ch: Fix various misspelled words

Misspellings were identified by https://github.com/marketplace/actions/check-spelling
* Initial corrections suggested by Google Sheets
* Additional corrections by Google Chrome auto-suggest
* Some manual corrections

Signed-off-by: Josh Soref <jsoref@users.noreply.github.com>
This commit is contained in:
Josh Soref 2020-09-22 07:31:42 -04:00 committed by Rob Bradford
parent 22a2a99e5f
commit 5c3f4dbe6f
32 changed files with 56 additions and 56 deletions

View File

@ -88,5 +88,5 @@ Fixes #88
Signed-off-by: Sebastien Boeuf <sebastien.boeuf@intel.com> Signed-off-by: Sebastien Boeuf <sebastien.boeuf@intel.com>
``` ```
Then, after the correspoding PR is merged, Github will automatically close that issue when parsing the Then, after the corresponding PR is merged, Github will automatically close that issue when parsing the
[commit message](https://help.github.com/articles/closing-issues-via-commit-messages/). [commit message](https://help.github.com/articles/closing-issues-via-commit-messages/).

View File

@ -99,7 +99,7 @@ pub fn create_fdt<T: DeviceInfoForFDT + Clone + Debug, S: ::std::hash::BuildHash
initrd: &Option<InitramfsConfig>, initrd: &Option<InitramfsConfig>,
pci_space_address: &Option<(u64, u64)>, pci_space_address: &Option<(u64, u64)>,
) -> Result<Vec<u8>> { ) -> Result<Vec<u8>> {
// Alocate stuff necessary for the holding the blob. // Allocate stuff necessary for the holding the blob.
let mut fdt = vec![0; FDT_MAX_SIZE]; let mut fdt = vec![0; FDT_MAX_SIZE];
allocate_fdt(&mut fdt)?; allocate_fdt(&mut fdt)?;
@ -402,7 +402,7 @@ fn create_gic_node(fdt: &mut Vec<u8>, gic_device: &dyn GICDevice) -> Result<()>
if gic_device.msi_compatible() { if gic_device.msi_compatible() {
append_begin_node(fdt, "msic")?; append_begin_node(fdt, "msic")?;
append_property_string(fdt, "compatible", gic_device.msi_compatiblility())?; append_property_string(fdt, "compatible", gic_device.msi_compatibility())?;
append_property_null(fdt, "msi-controller")?; append_property_null(fdt, "msi-controller")?;
append_property_u32(fdt, "phandle", MSI_PHANDLE)?; append_property_u32(fdt, "phandle", MSI_PHANDLE)?;
let msi_reg_prop = generate_prop64(gic_device.msi_properties()); let msi_reg_prop = generate_prop64(gic_device.msi_properties());

View File

@ -36,7 +36,7 @@ pub trait GICDevice {
} }
/// Returns the MSI compatibility property of the device /// Returns the MSI compatibility property of the device
fn msi_compatiblility(&self) -> &str { fn msi_compatibility(&self) -> &str {
"" ""
} }

View File

@ -49,7 +49,7 @@ pub mod kvm {
true true
} }
fn msi_compatiblility(&self) -> &str { fn msi_compatibility(&self) -> &str {
"arm,gic-v3-its" "arm,gic-v3-its"
} }

View File

@ -37,7 +37,7 @@
// | | // | |
// 144 M +---------------------------------------------------------------| // 144 M +---------------------------------------------------------------|
// | | // | |
// | Reserverd (now GIC is here) | // | Reserved (now GIC is here) |
// | | // | |
// 0GB +---------------------------------------------------------------+ // 0GB +---------------------------------------------------------------+
// //

View File

@ -210,10 +210,10 @@ fn get_fdt_addr(mem: &GuestMemoryMmap) -> u64 {
pub fn get_host_cpu_phys_bits() -> u8 { pub fn get_host_cpu_phys_bits() -> u8 {
// The value returned here is used to determine the physical address space size // The value returned here is used to determine the physical address space size
// for a VM (IPA size). // for a VM (IPA size).
// In recent kernel versions, the maxium IPA size supported by the host can be // In recent kernel versions, the maximum IPA size supported by the host can be
// known by querying cap KVM_CAP_ARM_VM_IPA_SIZE. And the IPA size for a // known by querying cap KVM_CAP_ARM_VM_IPA_SIZE. And the IPA size for a
// guest can be configured smaller. // guest can be configured smaller.
// But in Cloud-Hypervisor we simply use the maxium value for the VM. // But in Cloud-Hypervisor we simply use the maximum value for the VM.
// Reference https://lwn.net/Articles/766767/. // Reference https://lwn.net/Articles/766767/.
// //
// The correct way to query KVM_CAP_ARM_VM_IPA_SIZE is via rust-vmm/kvm-ioctls, // The correct way to query KVM_CAP_ARM_VM_IPA_SIZE is via rust-vmm/kvm-ioctls,

View File

@ -20,7 +20,7 @@ use vm_device::BusDevice;
// From 0x0 to 0x1C we have following registers: // From 0x0 to 0x1C we have following registers:
const RTCDR: u64 = 0x0; // Data Register. const RTCDR: u64 = 0x0; // Data Register.
const RTCMR: u64 = 0x4; // Match Register. const RTCMR: u64 = 0x4; // Match Register.
const RTCLR: u64 = 0x8; // Load Regiser. const RTCLR: u64 = 0x8; // Load Register.
const RTCCR: u64 = 0xc; // Control Register. const RTCCR: u64 = 0xc; // Control Register.
const RTCIMSC: u64 = 0x10; // Interrupt Mask Set or Clear Register. const RTCIMSC: u64 = 0x10; // Interrupt Mask Set or Clear Register.
const RTCRIS: u64 = 0x14; // Raw Interrupt Status. const RTCRIS: u64 = 0x14; // Raw Interrupt Status.

View File

@ -12,7 +12,7 @@
* [Reboot a Virtual Machine](#reboot-a-virtual-machine) * [Reboot a Virtual Machine](#reboot-a-virtual-machine)
* [Shut a Virtual Machine Down](#shut-a-virtual-machine-down) * [Shut a Virtual Machine Down](#shut-a-virtual-machine-down)
+ [Command Line Interface](#command-line-interface) + [Command Line Interface](#command-line-interface)
+ [REST API and CLI Architecural Relationship](#rest-api-and-cli-architectural-relationship) + [REST API and CLI Architectural Relationship](#rest-api-and-cli-architectural-relationship)
* [Internal API](#internal-api) * [Internal API](#internal-api)
+ [Goals and Design](#goals-and-design) + [Goals and Design](#goals-and-design)
* [End to End Example](#end-to-end-example) * [End to End Example](#end-to-end-example)

View File

@ -63,7 +63,7 @@ As per adding CPUs to the guest, after a reboot the VM will be running with the
## Memory Hot Plug ## Memory Hot Plug
Extra memory can be added from a runing Cloud Hypervisor instance. This is controlled by two mechanisms: Extra memory can be added from a running Cloud Hypervisor instance. This is controlled by two mechanisms:
1. Allocating some of the guest physical address space for hotplug memory. 1. Allocating some of the guest physical address space for hotplug memory.
2. Making a HTTP API request to the VMM to ask for a new amount of RAM to be assigned to the VM. In the case of expanding the memory for the VM the new memory will be hotplugged into the running VM, if reducing the size of the memory then change will take effect after the next reboot. 2. Making a HTTP API request to the VMM to ask for a new amount of RAM to be assigned to the VM. In the case of expanding the memory for the VM the new memory will be hotplugged into the running VM, if reducing the size of the memory then change will take effect after the next reboot.

View File

@ -8,7 +8,7 @@ the host kernel. The required Linux and KVM changes can be found in the
[KVM SGX Tree](https://github.com/intel/kvm-sgx). [KVM SGX Tree](https://github.com/intel/kvm-sgx).
Utilizing SGX in the guest requires a kernel/OS with SGX support, e.g. a kernel Utilizing SGX in the guest requires a kernel/OS with SGX support, e.g. a kernel
buit using the [SGX Linux Development Tree](https://github.com/jsakkine-intel/linux-sgx.git) built using the [SGX Linux Development Tree](https://github.com/jsakkine-intel/linux-sgx.git)
or the [KVM SGX Tree](https://github.com/intel/kvm-sgx). Running KVM SGX as the or the [KVM SGX Tree](https://github.com/intel/kvm-sgx). Running KVM SGX as the
guest kernel allows nested virtualization of SGX. guest kernel allows nested virtualization of SGX.

View File

@ -1,6 +1,6 @@
# How to use networking # How to use networking
cloud-hypervisor can emulate one or more virtual network interfaces, represented at the hypervisor host by [tap devices](https://www.kernel.org/doc/Documentation/networking/tuntap.txt). This guide briefly describes, in a manual and distribution neutral way, how to setup and use networking with cloud-hypevisor. cloud-hypervisor can emulate one or more virtual network interfaces, represented at the hypervisor host by [tap devices](https://www.kernel.org/doc/Documentation/networking/tuntap.txt). This guide briefly describes, in a manual and distribution neutral way, how to setup and use networking with cloud-hypervisor.
## Multiple queue support for net devices ## ## Multiple queue support for net devices ##
@ -14,7 +14,7 @@ Note:
- Currently, it does not support to use ethtool to change the combined queue numbers in guest. - Currently, it does not support to use ethtool to change the combined queue numbers in guest.
- Multiple queue is enabled for vhost-user-net backend in cloud-hypervisor, however, multiple thread is not added to handle mq, thus, the performance for vhost-user-net backend is not supposed to be improved. The multiple thread will be added for backend later. - Multiple queue is enabled for vhost-user-net backend in cloud-hypervisor, however, multiple thread is not added to handle mq, thus, the performance for vhost-user-net backend is not supposed to be improved. The multiple thread will be added for backend later.
- Performance test for vhost-user-net will be covered once vhost-user-net backend has mulitple thread supported. - Performance test for vhost-user-net will be covered once vhost-user-net backend has multiple thread supported.
- Performance test for virtio-net is done by comparing 2 queue pairs with 1 queue pairs, that to run 2 iperf3 sessions in the same test environments, throughput is improved about 37%. - Performance test for virtio-net is done by comparing 2 queue pairs with 1 queue pairs, that to run 2 iperf3 sessions in the same test environments, throughput is improved about 37%.
## Start cloud-hypervisor with net devices ## Start cloud-hypervisor with net devices

View File

@ -54,7 +54,7 @@ drwxr-xr-x 47 foo bar 4096 Jul 22 11:47 ../
In this particular example, we can observe that 2 memory region files were In this particular example, we can observe that 2 memory region files were
created. That is explained by the size of the guest RAM, which is 4GiB in this created. That is explained by the size of the guest RAM, which is 4GiB in this
case. Because it exceeds 3GiB (which is where we can find a ~1GiB memory hole), case. Because it exceeds 3GiB (which is where we can find a ~1GiB memory hole),
Cloud-Hypervisor needs 2 distincts memory regions to be created. Each memory Cloud-Hypervisor needs 2 distinct memory regions to be created. Each memory
region's content is stored through a dedicated file, which explains why we end region's content is stored through a dedicated file, which explains why we end
up with 2 different files, the first one containing the guest RAM range 0-3GiB up with 2 different files, the first one containing the guest RAM range 0-3GiB
and the second one containing the guest RAM range 3-4GiB. and the second one containing the guest RAM range 3-4GiB.
@ -63,7 +63,7 @@ and the second one containing the guest RAM range 3-4GiB.
and state. The configuration bits are used to create a similar virtual machine and state. The configuration bits are used to create a similar virtual machine
with the correct amount of CPUs, RAM, and other expected devices. The state with the correct amount of CPUs, RAM, and other expected devices. The state
bits are used to restore each component in the state it was left before the bits are used to restore each component in the state it was left before the
snapshot occured. snapshot occurred.
## Restore a Cloud-Hypervisor VM ## Restore a Cloud-Hypervisor VM

View File

@ -64,7 +64,7 @@ impl PciClassCode {
} }
} }
/// A PCI sublcass. Each class in `PciClassCode` can specify a unique set of subclasses. This trait /// A PCI subclass. Each class in `PciClassCode` can specify a unique set of subclasses. This trait
/// is implemented by each subclass. It allows use of a trait object to generate configurations. /// is implemented by each subclass. It allows use of a trait object to generate configurations.
pub trait PciSubclass { pub trait PciSubclass {
/// Convert this subclass to the value used in the PCI specification. /// Convert this subclass to the value used in the PCI specification.

View File

@ -785,7 +785,7 @@ impl PciDevice for VfioPciDevice {
// We need to allocate a guest MMIO address range for that BAR. // We need to allocate a guest MMIO address range for that BAR.
// In case the BAR is mappable directly, this means it might be // In case the BAR is mappable directly, this means it might be
// set as user memory region, which expects to deal with 4K // set as user memory region, which expects to deal with 4K
// pages. Therefore, the aligment has to be set accordingly. // pages. Therefore, the alignment has to be set accordingly.
let bar_alignment = if (bar_id == VFIO_PCI_ROM_REGION_INDEX) let bar_alignment = if (bar_id == VFIO_PCI_ROM_REGION_INDEX)
|| (self.device.get_region_flags(bar_id) & VFIO_REGION_INFO_FLAG_MMAP != 0) || (self.device.get_region_flags(bar_id) & VFIO_REGION_INFO_FLAG_MMAP != 0)
{ {

View File

@ -1056,7 +1056,7 @@ impl QcowFile {
// Free the previously used cluster if one exists. Modified tables are always // Free the previously used cluster if one exists. Modified tables are always
// witten to new clusters so the L1 table can be committed to disk after they // witten to new clusters so the L1 table can be committed to disk after they
// are and L1 never points at an invalid table. // are and L1 never points at an invalid table.
// The index must be valid from when it was insterted. // The index must be valid from when it was inserted.
let addr = self.l1_table[l1_index]; let addr = self.l1_table[l1_index];
if addr != 0 { if addr != 0 {
self.unref_clusters.push(addr); self.unref_clusters.push(addr);
@ -1354,7 +1354,7 @@ impl QcowFile {
fn sync_caches(&mut self) -> std::io::Result<()> { fn sync_caches(&mut self) -> std::io::Result<()> {
// Write out all dirty L2 tables. // Write out all dirty L2 tables.
for (l1_index, l2_table) in self.l2_cache.iter_mut().filter(|(_k, v)| v.dirty()) { for (l1_index, l2_table) in self.l2_cache.iter_mut().filter(|(_k, v)| v.dirty()) {
// The index must be valid from when we insterted it. // The index must be valid from when we inserted it.
let addr = self.l1_table[*l1_index]; let addr = self.l1_table[*l1_index];
if addr != 0 { if addr != 0 {
self.raw_file.write_pointer_table( self.raw_file.write_pointer_table(

View File

@ -318,7 +318,7 @@ Highlights for `cloud-hypervisor` version 0.7.0 include:
Further to our effort to support modifying a running guest we now support Further to our effort to support modifying a running guest we now support
hotplug and unplug of the following virtio backed devices: block, network, hotplug and unplug of the following virtio backed devices: block, network,
pmem, virtio-fs and vsock. This functionality is available on the (default) PCI pmem, virtio-fs and vsock. This functionality is available on the (default) PCI
based tranport and is exposed through the HTTP API. The `ch-remote` utility based transport and is exposed through the HTTP API. The `ch-remote` utility
provides a CLI for adding or removing these device types after the VM has provides a CLI for adding or removing these device types after the VM has
booted. User can use the `id` parameter on the devices to choose names for booted. User can use the `id` parameter on the devices to choose names for
devices to ease their removal. devices to ease their removal.
@ -434,7 +434,7 @@ When choosing to offload the paravirtualized block and networking I/O to an
external process (through the `vhost-user` protocol), Cloud Hypervisor now external process (through the `vhost-user` protocol), Cloud Hypervisor now
automatically spawns its default `vhost-user-blk` and `vhost-user-net` backends automatically spawns its default `vhost-user-blk` and `vhost-user-net` backends
into their own, separate processes. into their own, separate processes.
This provides a seamless parvirtualized I/O user experience for those who want This provides a seamless paravirtualized I/O user experience for those who want
to run their guest I/O into separate executions contexts. to run their guest I/O into separate executions contexts.
### Command Line Interface ### Command Line Interface
@ -580,7 +580,7 @@ default Cloud Hypervisor I/O architecture.
### Guest pause and resume ### Guest pause and resume
As an initial requiremnt for enabling live migration, we added support for As an initial requirement for enabling live migration, we added support for
pausing and resuming any VMM components. As an intermediate step towards live pausing and resuming any VMM components. As an intermediate step towards live
migration, the upcoming guest snapshotting feature will be based on the pause migration, the upcoming guest snapshotting feature will be based on the pause
and resume capabilities. and resume capabilities.

View File

@ -173,9 +173,9 @@ if [ $RES -ne 0 ]; then
exit 1 exit 1
fi fi
# Create tap interface without multipe queues support for vhost_user_net test. # Create tap interface without multiple queues support for vhost_user_net test.
sudo ip tuntap add name vunet-tap0 mode tap sudo ip tuntap add name vunet-tap0 mode tap
# Create tap interface with multipe queues support for vhost_user_net test. # Create tap interface with multiple queues support for vhost_user_net test.
sudo ip tuntap add name vunet-tap1 mode tap multi_queue sudo ip tuntap add name vunet-tap1 mode tap multi_queue
BUILD_TARGET="aarch64-unknown-linux-${CH_LIBC}" BUILD_TARGET="aarch64-unknown-linux-${CH_LIBC}"

View File

@ -189,9 +189,9 @@ sudo ip tuntap add vfio-tap3 mode tap
sudo ip link set vfio-tap3 master vfio-br0 sudo ip link set vfio-tap3 master vfio-br0
sudo ip link set vfio-tap3 up sudo ip link set vfio-tap3 up
# Create tap interface without multipe queues support for vhost_user_net test. # Create tap interface without multiple queues support for vhost_user_net test.
sudo ip tuntap add name vunet-tap0 mode tap sudo ip tuntap add name vunet-tap0 mode tap
# Create tap interface with multipe queues support for vhost_user_net test. # Create tap interface with multiple queues support for vhost_user_net test.
sudo ip tuntap add name vunet-tap1 mode tap multi_queue sudo ip tuntap add name vunet-tap1 mode tap multi_queue
BUILD_TARGET="$(uname -m)-unknown-linux-${CH_LIBC}" BUILD_TARGET="$(uname -m)-unknown-linux-${CH_LIBC}"

View File

@ -763,7 +763,7 @@ pub trait FileSystem {
/// Allocate requested space for file data. /// Allocate requested space for file data.
/// ///
/// If this function returns success, then the file sytem must guarantee that it is possible to /// If this function returns success, then the file system must guarantee that it is possible to
/// write up to `length` bytes of data starting at `offset` without failing due to a lack of /// write up to `length` bytes of data starting at `offset` without failing due to a lack of
/// free space on the disk. /// free space on the disk.
/// ///

View File

@ -809,7 +809,7 @@ impl Iommu {
// virtual IOMMU. This list is translated into a virtio-iommu configuration // virtual IOMMU. This list is translated into a virtio-iommu configuration
// topology, so that it can be understood by the guest driver. // topology, so that it can be understood by the guest driver.
// //
// The topology is overriden everytime this function is being invoked. // The topology is overridden everytime this function is being invoked.
// //
// This function is dedicated to PCI, which means it will exclusively // This function is dedicated to PCI, which means it will exclusively
// create VIRTIO_IOMMU_TOPO_PCI_RANGE entries. // create VIRTIO_IOMMU_TOPO_PCI_RANGE entries.

View File

@ -66,7 +66,7 @@ const VIRTIO_MEM_RESP_NACK: u16 = 1;
// - VIRTIO_MEM_REQ_UNPLUG_ALL // - VIRTIO_MEM_REQ_UNPLUG_ALL
// VIRTIO_MEM_RESP_BUSY: u16 = 2; // VIRTIO_MEM_RESP_BUSY: u16 = 2;
// Error in request (e.g. addresses/alignemnt), applicable for // Error in request (e.g. addresses/alignment), applicable for
// - VIRTIO_MEM_REQ_PLUG // - VIRTIO_MEM_REQ_PLUG
// - VIRTIO_MEM_REQ_UNPLUG // - VIRTIO_MEM_REQ_UNPLUG
// - VIRTIO_MEM_REQ_STATE // - VIRTIO_MEM_REQ_STATE
@ -664,7 +664,7 @@ impl EpollHelperHandler for MemEpollHandler {
}, },
}; };
if let Err(e) = self.resize.send(r) { if let Err(e) = self.resize.send(r) {
error!("Sending \"resize\" reponse: {:?}", e); error!("Sending \"resize\" response: {:?}", e);
return true; return true;
} }
if signal_error { if signal_error {

View File

@ -82,7 +82,7 @@ pub enum Error {
InvalidQueuePairsNum, InvalidQueuePairsNum,
/// No memory passed in. /// No memory passed in.
NoMemory, NoMemory,
/// No ueue pairs nummber. /// No ueue pairs number.
NoQueuePairsNum, NoQueuePairsNum,
} }

View File

@ -235,7 +235,7 @@ where
} }
Err(err) if err.kind() == ErrorKind::WouldBlock => { Err(err) if err.kind() == ErrorKind::WouldBlock => {
// This shouldn't actually happen (receiving EWOULDBLOCK after EPOLLIN), but // This shouldn't actually happen (receiving EWOULDBLOCK after EPOLLIN), but
// apparently it does, so we need to handle it greacefully. // apparently it does, so we need to handle it gracefully.
warn!( warn!(
"vsock: unexpected EWOULDBLOCK while reading from backing stream: \ "vsock: unexpected EWOULDBLOCK while reading from backing stream: \
lp={}, pp={}, err={:?}", lp={}, pp={}, err={:?}",
@ -905,7 +905,7 @@ mod tests {
let mut ctx = CsmTestContext::new(ConnState::PeerInit); let mut ctx = CsmTestContext::new(ConnState::PeerInit);
assert!(ctx.conn.has_pending_rx()); assert!(ctx.conn.has_pending_rx());
ctx.recv(); ctx.recv();
// For peer-initiated requests, our connection should always yield a vsock reponse packet, // For peer-initiated requests, our connection should always yield a vsock response packet,
// in order to establish the connection. // in order to establish the connection.
assert_eq!(ctx.pkt.op(), uapi::VSOCK_OP_RESPONSE); assert_eq!(ctx.pkt.op(), uapi::VSOCK_OP_RESPONSE);
assert_eq!(ctx.pkt.src_cid(), LOCAL_CID); assert_eq!(ctx.pkt.src_cid(), LOCAL_CID);
@ -1165,7 +1165,7 @@ mod tests {
.contains(epoll::Events::EPOLLOUT)); .contains(epoll::Events::EPOLLOUT));
assert_eq!(ctx.conn.tx_buf.len(), data.len()); assert_eq!(ctx.conn.tx_buf.len(), data.len());
// Unlock the write stream and notify the connection it can now write its bufferred // Unlock the write stream and notify the connection it can now write its buffered
// data. // data.
ctx.set_stream(TestStream::new()); ctx.set_stream(TestStream::new());
ctx.conn.notify(epoll::Events::EPOLLOUT); ctx.conn.notify(epoll::Events::EPOLLOUT);

View File

@ -65,7 +65,7 @@ impl TxBuf {
let data = self.data.get_or_insert_with(|| let data = self.data.get_or_insert_with(||
// Using uninitialized memory here is quite safe, since we never read from any // Using uninitialized memory here is quite safe, since we never read from any
// area of the buffer before writing to it. First we push, then we flush only // area of the buffer before writing to it. First we push, then we flush only
// what had been prviously pushed. // what had been previously pushed.
Box::new(unsafe {mem::MaybeUninit::<[u8; Self::SIZE]>::uninit().assume_init()})); Box::new(unsafe {mem::MaybeUninit::<[u8; Self::SIZE]>::uninit().assume_init()}));
// Buffer head, as an offset into the data slice. // Buffer head, as an offset into the data slice.

View File

@ -19,7 +19,7 @@ use crate::{
}; };
use anyhow::anyhow; use anyhow::anyhow;
/// This is the `VirtioDevice` implementation for our vsock device. It handles the virtio-level /// This is the `VirtioDevice` implementation for our vsock device. It handles the virtio-level
/// device logic: feature negociation, device configuration, and device activation. /// device logic: feature negotiation, device configuration, and device activation.
/// The run-time device logic (i.e. event-driven data handling) is implemented by /// The run-time device logic (i.e. event-driven data handling) is implemented by
/// `super::epoll_handler::EpollHandler`. /// `super::epoll_handler::EpollHandler`.
/// ///
@ -272,7 +272,7 @@ where
// After the backend has been kicked, it might've freed up some resources, so we // After the backend has been kicked, it might've freed up some resources, so we
// can attempt to send it more data to process. // can attempt to send it more data to process.
// In particular, if `self.backend.send_pkt()` halted the TX queue processing (by // In particular, if `self.backend.send_pkt()` halted the TX queue processing (by
// reurning an error) at some point in the past, now is the time to try walking the // returning an error) at some point in the past, now is the time to try walking the
// TX queue again. // TX queue again.
if let Err(e) = self.process_tx() { if let Err(e) = self.process_tx() {
error!("Failed to process TX queue: {:?}", e); error!("Failed to process TX queue: {:?}", e);

View File

@ -70,7 +70,7 @@ const HDROFF_TYPE: usize = 28;
// Operation ID - one of the VSOCK_OP_* values; e.g. // Operation ID - one of the VSOCK_OP_* values; e.g.
// - VSOCK_OP_RW: a data packet; // - VSOCK_OP_RW: a data packet;
// - VSOCK_OP_REQUEST: connection request; // - VSOCK_OP_REQUEST: connection request;
// - VSOCK_OP_RST: forcefull connection termination; // - VSOCK_OP_RST: forceful connection termination;
// etc (see `super::defs::uapi` for the full list). // etc (see `super::defs::uapi` for the full list).
const HDROFF_OP: usize = 30; const HDROFF_OP: usize = 30;

View File

@ -120,7 +120,7 @@ pub struct VsockMuxer {
impl VsockChannel for VsockMuxer { impl VsockChannel for VsockMuxer {
/// Deliver a vsock packet to the guest vsock driver. /// Deliver a vsock packet to the guest vsock driver.
/// ///
/// Retuns: /// Returns:
/// - `Ok(())`: `pkt` has been successfully filled in; or /// - `Ok(())`: `pkt` has been successfully filled in; or
/// - `Err(VsockError::NoData)`: there was no available data with which to fill in the /// - `Err(VsockError::NoData)`: there was no available data with which to fill in the
/// packet. /// packet.
@ -280,13 +280,13 @@ impl VsockEpollListener for VsockMuxer {
/// Get the epoll events to be polled upstream. /// Get the epoll events to be polled upstream.
/// ///
/// Since the polled FD is a nested epoll FD, we're only interested in EPOLLIN events (i.e. /// Since the polled FD is a nested epoll FD, we're only interested in EPOLLIN events (i.e.
/// some event occured on one of the FDs registered under our epoll FD). /// some event occurred on one of the FDs registered under our epoll FD).
/// ///
fn get_polled_evset(&self) -> epoll::Events { fn get_polled_evset(&self) -> epoll::Events {
epoll::Events::EPOLLIN epoll::Events::EPOLLIN
} }
/// Notify the muxer about a pending event having occured under its nested epoll FD. /// Notify the muxer about a pending event having occurred under its nested epoll FD.
/// ///
fn notify(&mut self, _: epoll::Events) { fn notify(&mut self, _: epoll::Events) {
debug!("vsock: muxer received kick"); debug!("vsock: muxer received kick");
@ -376,7 +376,7 @@ impl VsockMuxer {
let key_copy = *key; let key_copy = *key;
let evset_copy = *evset; let evset_copy = *evset;
// The handling of this event will most probably mutate the state of the // The handling of this event will most probably mutate the state of the
// receiving conection. We'll need to check for new pending RX, event set // receiving connection. We'll need to check for new pending RX, event set
// mutation, and all that, so we're wrapping the event delivery inside those // mutation, and all that, so we're wrapping the event delivery inside those
// checks. // checks.
self.apply_conn_mutation(key_copy, |conn| { self.apply_conn_mutation(key_copy, |conn| {
@ -608,7 +608,7 @@ impl VsockMuxer {
/// ///
fn allocate_local_port(&mut self) -> u32 { fn allocate_local_port(&mut self) -> u32 {
// TODO: this doesn't seem very space-efficient. // TODO: this doesn't seem very space-efficient.
// Mybe rewrite this to limit port range and use a bitmap? // Maybe rewrite this to limit port range and use a bitmap?
// //
loop { loop {
@ -626,10 +626,10 @@ impl VsockMuxer {
self.local_port_set.remove(&port); self.local_port_set.remove(&port);
} }
/// Handle a new connection request comming from our peer (the guest vsock driver). /// Handle a new connection request coming from our peer (the guest vsock driver).
/// ///
/// This will attempt to connect to a host-side Unix socket, expected to be listening at /// This will attempt to connect to a host-side Unix socket, expected to be listening at
/// the file system path corresponing to the destination port. If successful, a new /// the file system path corresponding to the destination port. If successful, a new
/// connection object will be created and added to the connection pool. On failure, a new /// connection object will be created and added to the connection pool. On failure, a new
/// RST packet will be scheduled for delivery to the guest. /// RST packet will be scheduled for delivery to the guest.
/// ///
@ -1232,7 +1232,7 @@ mod tests {
streams.push(listener.accept()); streams.push(listener.accept());
} }
// The muxer RX queue should now be full (with connection reponses), but still // The muxer RX queue should now be full (with connection responses), but still
// synchronized. // synchronized.
assert!(ctx.muxer.rxq.is_synced()); assert!(ctx.muxer.rxq.is_synced());

View File

@ -19,7 +19,7 @@ pub enum Error {
IoError(io::Error), IoError(io::Error),
} }
/// Type of Message Singaled Interrupt /// Type of Message Signalled Interrupt
#[derive(Copy, Clone, Debug, PartialEq, Serialize, Deserialize)] #[derive(Copy, Clone, Debug, PartialEq, Serialize, Deserialize)]
pub enum MsiIrqType { pub enum MsiIrqType {
/// PCI MSI IRQ numbers. /// PCI MSI IRQ numbers.

View File

@ -86,7 +86,7 @@ impl<'a> Iterator for DescIter<'a> {
} }
} }
/// A virtio descriptor constraints with C representive. /// A virtio descriptor constraints with C representative.
#[repr(C)] #[repr(C)]
#[derive(Default, Clone, Copy)] #[derive(Default, Clone, Copy)]
pub struct Descriptor { pub struct Descriptor {
@ -540,13 +540,13 @@ impl Queue {
); );
false false
} else if desc_table.mask(0xf) != 0 { } else if desc_table.mask(0xf) != 0 {
error!("virtio queue descriptor table breaks alignment contraints"); error!("virtio queue descriptor table breaks alignment constraints");
false false
} else if avail_ring.mask(0x1) != 0 { } else if avail_ring.mask(0x1) != 0 {
error!("virtio queue available ring breaks alignment contraints"); error!("virtio queue available ring breaks alignment constraints");
false false
} else if used_ring.mask(0x3) != 0 { } else if used_ring.mask(0x3) != 0 {
error!("virtio queue used ring breaks alignment contraints"); error!("virtio queue used ring breaks alignment constraints");
false false
} else { } else {
true true

View File

@ -55,7 +55,7 @@ pub enum ApiError {
/// API request send error /// API request send error
RequestSend(SendError<ApiRequest>), RequestSend(SendError<ApiRequest>),
/// Wrong reponse payload type /// Wrong response payload type
ResponsePayloadType, ResponsePayloadType,
/// API response receive error /// API response receive error

View File

@ -32,7 +32,7 @@ pub enum Error {
ParseFsSockMissing, ParseFsSockMissing,
/// Cannot have dax=off along with cache_size parameter. /// Cannot have dax=off along with cache_size parameter.
InvalidCacheSizeWithDaxOff, InvalidCacheSizeWithDaxOff,
/// Missing persistant memory file parameter. /// Missing persistent memory file parameter.
ParsePmemFileMissing, ParsePmemFileMissing,
/// Missing vsock socket path parameter. /// Missing vsock socket path parameter.
ParseVsockSockMissing, ParseVsockSockMissing,
@ -56,7 +56,7 @@ pub enum Error {
ParseRNG(OptionParserError), ParseRNG(OptionParserError),
/// Error parsing filesystem parameters /// Error parsing filesystem parameters
ParseFileSystem(OptionParserError), ParseFileSystem(OptionParserError),
/// Error parsing persistent memorry parameters /// Error parsing persistent memory parameters
ParsePersistentMemory(OptionParserError), ParsePersistentMemory(OptionParserError),
/// Failed parsing console /// Failed parsing console
ParseConsole(OptionParserError), ParseConsole(OptionParserError),

View File

@ -250,7 +250,7 @@ const LENGTH_OFFSET_HIGH: u64 = 0xC;
const STATUS_OFFSET: u64 = 0x14; const STATUS_OFFSET: u64 = 0x14;
const SELECTION_OFFSET: u64 = 0; const SELECTION_OFFSET: u64 = 0;
// The MMIO address space size is substracted with the size of a 4k page. This // The MMIO address space size is subtracted with the size of a 4k page. This
// is done on purpose to workaround a Linux bug when the VMM allocates devices // is done on purpose to workaround a Linux bug when the VMM allocates devices
// at the end of the addressable space. // at the end of the addressable space.
fn mmio_address_space_size() -> u64 { fn mmio_address_space_size() -> u64 {