vendor: Remove vendoring

The cargo interaction with the .cargo/config does not meet our
requirements.

Regardless of .cargo/config explicitly replacing our external sources
with vendored ones, cargo build will rely first on Cargo.lock to update
its local source cache. If a dependency has been push forced, build
fails because of our top level Cargo.toml description.
This prevents us from actually pinning dependencies, which defeats the
vendoring purpose.

We're removing vendoring for now, until we understand it better.

Signed-off-by: Samuel Ortiz <sameo@linux.intel.com>
This commit is contained in:
Samuel Ortiz 2019-06-06 10:39:07 +02:00 committed by Rob Bradford
parent 842515c2f1
commit 74a21f24e1
4064 changed files with 1 additions and 442576 deletions

View File

@ -1,29 +0,0 @@
[source.crates-io]
replace-with = "vendor+crates-io"
[source."https://github.com/bjzhjing/linux-loader"]
git = "https://github.com/bjzhjing/linux-loader"
rev = "2b95f1e1958a2b6399b590f64344cab5b4a6d608"
replace-with = "vendor+https://github.com/bjzhjing/linux-loader"
[source."https://github.com/rust-vmm/vm-memory"]
git = "https://github.com/rust-vmm/vm-memory"
rev = "281b8bd6cd2927f7a65130194b203a1c2b0ad2e3"
replace-with = "vendor+https://github.com/rust-vmm/vm-memory"
[source."https://github.com/rust-vmm/vmm-sys-util"]
git = "https://github.com/rust-vmm/vmm-sys-util"
rev = "60fe35bea0bdce8b36c6186a740878880f944bdc"
replace-with = "vendor+https://github.com/rust-vmm/vmm-sys-util"
[source."vendor+crates-io"]
directory = "./vendor/registry-40351f815f426200"
[source."vendor+https://github.com/bjzhjing/linux-loader"]
directory = "./vendor/git-4af8f4552cd0d200"
[source."vendor+https://github.com/rust-vmm/vm-memory"]
directory = "./vendor/git-89548d8276566400"
[source."vendor+https://github.com/rust-vmm/vmm-sys-util"]
directory = "./vendor/git-bad78e1967b13e00"

32
Cargo.lock generated
View File

@ -75,10 +75,7 @@ name = "cloud-hypervisor"
version = "0.1.0"
dependencies = [
"clap 2.27.1 (registry+https://github.com/rust-lang/crates.io-index)",
"linux-loader 0.1.0 (git+https://github.com/bjzhjing/linux-loader?rev=2b95f1e1958a2b6399b590f64344cab5b4a6d608)",
"vm-memory 0.1.0 (git+https://github.com/rust-vmm/vm-memory?rev=281b8bd6cd2927f7a65130194b203a1c2b0ad2e3)",
"vmm 0.1.0",
"vmm-sys-util 0.1.0 (git+https://github.com/rust-vmm/vmm-sys-util?rev=60fe35bea0bdce8b36c6186a740878880f944bdc)",
]
[[package]]
@ -136,15 +133,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
[[package]]
name = "linux-loader"
version = "0.1.0"
source = "git+https://github.com/bjzhjing/linux-loader#9fa483ce4a7d6a1250be82fa551060ecdbdec4aa"
dependencies = [
"vm-memory 0.1.0 (git+https://github.com/rust-vmm/vm-memory)",
]
[[package]]
name = "linux-loader"
version = "0.1.0"
source = "git+https://github.com/bjzhjing/linux-loader?rev=2b95f1e1958a2b6399b590f64344cab5b4a6d608#2b95f1e1958a2b6399b590f64344cab5b4a6d608"
source = "git+https://github.com/bjzhjing/linux-loader#912dbb58c8a859c10735066d5a2089cd4073904c"
dependencies = [
"vm-memory 0.1.0 (git+https://github.com/rust-vmm/vm-memory)",
]
@ -444,14 +433,6 @@ dependencies = [
"libc 0.2.58 (registry+https://github.com/rust-lang/crates.io-index)",
]
[[package]]
name = "vm-memory"
version = "0.1.0"
source = "git+https://github.com/rust-vmm/vm-memory?rev=281b8bd6cd2927f7a65130194b203a1c2b0ad2e3#281b8bd6cd2927f7a65130194b203a1c2b0ad2e3"
dependencies = [
"libc 0.2.58 (registry+https://github.com/rust-lang/crates.io-index)",
]
[[package]]
name = "vm-virtio"
version = "0.1.0"
@ -500,14 +481,6 @@ dependencies = [
"libc 0.2.58 (registry+https://github.com/rust-lang/crates.io-index)",
]
[[package]]
name = "vmm-sys-util"
version = "0.1.0"
source = "git+https://github.com/rust-vmm/vmm-sys-util?rev=60fe35bea0bdce8b36c6186a740878880f944bdc#60fe35bea0bdce8b36c6186a740878880f944bdc"
dependencies = [
"libc 0.2.58 (registry+https://github.com/rust-lang/crates.io-index)",
]
[[package]]
name = "winapi"
version = "0.3.7"
@ -543,7 +516,6 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
"checksum kvm-ioctls 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)" = "f87b0c7322658f94e9fcf661146c9761a487813f3154e6a67a24fc59c68f5306"
"checksum libc 0.2.58 (registry+https://github.com/rust-lang/crates.io-index)" = "6281b86796ba5e4366000be6e9e18bf35580adf9e63fbe2294aadb587613a319"
"checksum linux-loader 0.1.0 (git+https://github.com/bjzhjing/linux-loader)" = "<none>"
"checksum linux-loader 0.1.0 (git+https://github.com/bjzhjing/linux-loader?rev=2b95f1e1958a2b6399b590f64344cab5b4a6d608)" = "<none>"
"checksum log 0.4.6 (registry+https://github.com/rust-lang/crates.io-index)" = "c84ec4b527950aa83a329754b01dbe3f58361d1c5efacd1f6d68c494d08a17c6"
"checksum numtoa 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)" = "b8f8bdf33df195859076e54ab11ee78a1b208382d3a26ec40d142ffc1ecc49ef"
"checksum proc-macro2 0.4.30 (registry+https://github.com/rust-lang/crates.io-index)" = "cf3d2011ab5c909338f7887f4fc896d35932e29146c12c8d01da6b22a80ba759"
@ -573,9 +545,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
"checksum unicode-xid 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)" = "fc72304796d0818e357ead4e000d19c9c174ab23dc11093ac919054d20a6a7fc"
"checksum vec_map 0.8.1 (registry+https://github.com/rust-lang/crates.io-index)" = "05c78687fb1a80548ae3250346c3db86a80a7cdd77bda190189f2d0a0987c81a"
"checksum vm-memory 0.1.0 (git+https://github.com/rust-vmm/vm-memory)" = "<none>"
"checksum vm-memory 0.1.0 (git+https://github.com/rust-vmm/vm-memory?rev=281b8bd6cd2927f7a65130194b203a1c2b0ad2e3)" = "<none>"
"checksum vmm-sys-util 0.1.0 (git+https://github.com/rust-vmm/vmm-sys-util)" = "<none>"
"checksum vmm-sys-util 0.1.0 (git+https://github.com/rust-vmm/vmm-sys-util?rev=60fe35bea0bdce8b36c6186a740878880f944bdc)" = "<none>"
"checksum winapi 0.3.7 (registry+https://github.com/rust-lang/crates.io-index)" = "f10e386af2b13e47c89e7236a7a14a086791a2b88ebad6df9bf42040195cf770"
"checksum winapi-i686-pc-windows-gnu 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)" = "ac3b87c63620426dd9b991e5ce0329eff545bccbbb34f3be09ff6fb6ab51b7b6"
"checksum winapi-x86_64-pc-windows-gnu 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)" = "712e227841d057c1ee1cd2fb22fa7e5a5461ae8e48fa2ca79ec42cfc1931183f"

View File

@ -8,15 +8,3 @@ edition = "2018"
clap = "=2.27.1"
vmm = { path = "vmm" }
[dependencies.vm-memory]
git = "https://github.com/rust-vmm/vm-memory"
rev = "281b8bd6cd2927f7a65130194b203a1c2b0ad2e3"
[dependencies.vmm-sys-util]
git = "https://github.com/rust-vmm/vmm-sys-util"
rev = "60fe35bea0bdce8b36c6186a740878880f944bdc"
[dependencies.linux-loader]
git = "https://github.com/bjzhjing/linux-loader"
rev = "2b95f1e1958a2b6399b590f64344cab5b4a6d608"

View File

@ -1,85 +0,0 @@
# Cloud Hypervisor vendoring
The `cloud-hypervisor` build relies on having all dependencies locally vendored,
for several reasons:
1. Reproducible builds: Separate builds from the same cloud-hypervisor git
commit will build against exactly the same set of dependencies.
1. Network isolated builds: Vendoring allows us to build cloud-hypervisor
in a network isolated environment. All dependencies are locally fetched
and thus `cargo` will not try to fetch crates from external repositories.
1. Simplified custom dependencies: When having to deal with custom, temporary
dependencies, vendoring allows for a centralized and simple way of overriding
an existing dependency with a custom one.
## Workflow
The `cargo vendor` tool does 2 things:
1. It generates vendored copies of all dependencies that the project crates
describe through their `Cargo.toml` files.
1. It creates a `.cargo/config` amendment to force cargo builds to use the
vendored copies instead of the external ones.
It's important to note that `cargo vendor` can not force a dependency version
or revision. All dependencies are described through the project crates
`Cargo.toml` files.
As a consequence, vendoring and dependency revision pinning are 2 separate
things, and `cargo vendor` only handles the former.
All the `cloud-hypervisor` vendored dependencies are under the `vendor`
directory. For all intents and purposes the `vendor` directory is read-only and
should not be manually modified.
The sections below describe a few vendoring use cases:
### Overriding a crates.io dependency
For overriding a `crates.io` crate with a local or remote crate, we first need
to modify the project's top-level `Cargo.toml` file.
For example, if we want to switch from the `crates.io` `kvm-ioctls` package to
a forked one containing some specific feature or fix we would add the following
lines to the project top-level `Cargo.toml` file:
```toml
[patch.crates-io]
kvm-ioctls = { git = "https://github.com/sboeuf/kvm-ioctls", branch = "kvm_signal_msi" }
```
Then we need to vendor that change:
```shell
cargo vendor --relative-path --no-merge-sources ./vendor > .cargo/config
```
### pinning a git dependency
Some crates may depend on non published crates, that are developed and
maintained in some git repository.
Let's take the `vm-memory` crate as one example. It is a `rust-vmm` crate that
is not yet published in `crates.io`. Several `cloud-hypervisor` crates depend
on it. Provided that none of those dependent crates rely on a specific branch
or revision of the `vm-memory` crate, we may want to pin our project to the
crate revision `281b8bd6cd2927f7a65130194b203a1c2b0ad2e3`.
We need to describe that revision pin and then vendor it. First we need to add
the following lines to the project top-level `Cargo.toml`:
```toml
[dependencies.vm-memory]
git = "https://github.com/rust-vmm/vm-memory"
rev = "281b8bd6cd2927f7a65130194b203a1c2b0ad2e3"
```
And then vendor that change:
```shell
cargo vendor --relative-path --no-merge-sources ./vendor > .cargo/config
```

1
vendor/.sources vendored
View File

@ -1 +0,0 @@
["git-3e7c44ea7d5fd800","git-4af8f4552cd0d200","git-643dc7ede01ca600","git-89548d8276566400","git-bad78e1967b13e00","git-bda1448fb2afcb00","registry-40351f815f426200"]

View File

@ -1,168 +0,0 @@
steps:
- label: "build-gnu-x86"
commands:
- cargo build --release
retry:
automatic: false
agents:
platform: x86_64.metal
os: linux
plugins:
- docker#v3.0.1:
image: "rustvmm/dev:v2"
always-pull: true
- label: "build-gnu-x86-mmap"
commands:
- cargo build --release --features=backend-mmap
retry:
automatic: false
agents:
platform: x86_64.metal
os: linux
plugins:
- docker#v3.0.1:
image: "rustvmm/dev:v2"
always-pull: true
- label: "build-gnu-arm-mmap"
commands:
- cargo build --release --features=backend-mmap
retry:
automatic: false
agents:
platform: arm.metal
os: linux
plugins:
- docker#v3.0.1:
image: "rustvmm/dev:v2"
always-pull: true
- label: "build-musl-arm-mmap"
commands:
- cargo build --release --features=backend-mmap --target aarch64-unknown-linux-musl
retry:
automatic: false
agents:
platform: arm.metal
os: linux
plugins:
- docker#v3.0.1:
image: "rustvmm/dev:v2"
always-pull: true
- label: "style"
command: cargo fmt --all -- --check
retry:
automatic: false
agents:
platform: x86_64.metal
os: linux
plugins:
- docker#v3.0.1:
image: "rustvmm/dev:v2"
always-pull: true
- label: "unittests-gnu-x86"
commands:
- cargo test --all-features
retry:
automatic: false
agents:
platform: x86_64.metal
os: linux
plugins:
- docker#v3.0.1:
image: "rustvmm/dev:v2"
always-pull: true
- label: "unittests-musl-x86"
commands:
- cargo test --all-features --target x86_64-unknown-linux-musl
retry:
automatic: false
agents:
platform: x86_64.metal
os: linux
plugins:
- docker#v3.0.1:
image: "rustvmm/dev:v2"
always-pull: true
- label: "unittests-gnu-arm"
commands:
- cargo test --all-features
retry:
automatic: false
agents:
platform: arm.metal
os: linux
plugins:
- docker#v3.0.1:
image: "rustvmm/dev:v2"
always-pull: true
- label: "unittests-musl-arm"
commands:
- cargo test --all-features --target aarch64-unknown-linux-musl
retry:
automatic: false
agents:
platform: arm.metal
os: linux
plugins:
- docker#v3.0.1:
image: "rustvmm/dev:v2"
always-pull: true
- label: "clippy-x86"
commands:
- cargo clippy --all -- -D warnings
retry:
automatic: false
agents:
platform: x86_64.metal
os: linux
plugins:
- docker#v3.0.1:
image: "rustvmm/dev:v2"
always-pull: true
- label: "clippy-arm"
commands:
- cargo clippy --all -- -D warnings
retry:
automatic: false
agents:
platform: arm.metal
os: linux
plugins:
- docker#v3.0.1:
image: "rustvmm/dev:v2"
always-pull: true
- label: "check-warnings-x86"
commands:
- RUSTFLAGS="-D warnings" cargo check --all-targets
retry:
automatic: false
agents:
platform: x86_64.metal
os: linux
plugins:
- docker#v3.0.1:
image: "rustvmm/dev:v2"
always-pull: true
- label: "check-warnings-arm"
commands:
- RUSTFLAGS="-D warnings" cargo check --all-targets
retry:
automatic: false
agents:
platform: arm.metal
os: linux
plugins:
- docker#v3.0.1:
image: "rustvmm/dev:v2"
always-pull: true

View File

@ -1,79 +0,0 @@
steps:
- label: "build-msvc-x86"
commands:
- cargo build --release
retry:
automatic: true
agents:
platform: x86_64
os: windows
plugins:
- petrutlucian94/docker#v3.1.1:
image: "lpetrut/rust_win_buildtools"
always-pull: true
- label: "build-msvc-x86-mmap"
commands:
- cargo build --release --features=backend-mmap
retry:
automatic: true
agents:
platform: x86_64
os: windows
plugins:
- petrutlucian94/docker#v3.1.1:
image: "lpetrut/rust_win_buildtools"
always-pull: true
- label: "style"
command: cargo fmt --all -- --check
retry:
automatic: true
agents:
platform: x86_64
os: windows
plugins:
- petrutlucian94/docker#v3.1.1:
image: "lpetrut/rust_win_buildtools"
always-pull: true
- label: "unittests-msvc-x86"
commands:
- cargo test --all-features
retry:
automatic: true
agents:
platform: x86_64
os: windows
plugins:
- petrutlucian94/docker#v3.1.1:
image: "lpetrut/rust_win_buildtools"
always-pull: true
- label: "clippy-x86"
commands:
- cargo clippy --all
retry:
automatic: true
agents:
platform: x86_64
os: windows
plugins:
- petrutlucian94/docker#v3.1.1:
image: "lpetrut/rust_win_buildtools"
always-pull: true
- label: "check-warnings-x86"
commands:
- cargo check --all-targets
retry:
automatic: true
agents:
platform: x86_64
os: windows
plugins:
- petrutlucian94/docker#v3.1.1:
image: "lpetrut/rust_win_buildtools"
always-pull: true
environment:
- "RUSTFLAGS=-D warnings"

View File

@ -1 +0,0 @@
{"files":{".buildkite/pipeline.linux.yml":"15e790653d9e63fb98a1aff02697ab123b44fc03c06127b222e5f67e7732722e",".buildkite/pipeline.windows.yml":"15e21819ef7321c79181ae7f04aafc0c3ceff709a800edd8994f15a9454d6405",".cargo/config":"c2f1c2fd93436e068cfb14eef3dff8a79d25d1f03c11baf6acbcfbdc9fd3a465","Cargo.toml":"459c6688ce88cac6767971e8053b03a14e05aec157a927134c34d5497c84cf68","DESIGN.md":"aa60ac0a1d59179c253c7be0e496f956344cd0bf41f01f86c9a28575ea433785","LICENSE":"cfc7749b96f63bd31c3c42b5c471bf756814053e847c10f3eb003417bc523d30","README.md":"2f4aa7c6dbb257a9345d17cec07b0731cae37dc216d25b41272061861bf984b8","THIRD-PARTY":"a6d3ebd1c2f37d4fd83d0676621f695fc0cc2d8c6e646cdbb831b46e0650c208","TODO.md":"c844f03be6631843e90d13b3410df031b07ee16db4a3c7cbda7e89557e9be46b","src/address.rs":"dbc3fa642269bb6f9fb5e176dddc9fcedbe024949f32c43577f52ef04fe0cf09","src/bytes.rs":"2d0c58f53e40f51b5177a234a5062f700ea153c779661c8c34f5d9340f24dd05","src/endian.rs":"948548db28334bceeefe72b09560a700d62993372bb89f46e39990038690f516","src/guest_memory.rs":"2be506fe58244f51279c473a7d0d8e1fc41ef638e9c16cc9e6f071bcbecb3b9b","src/lib.rs":"f65c3f36632da8df7b165578bfd8db010af11c1736fd9855614435e76dd3c390","src/mmap.rs":"9f449c7dac3830128bc1875ca0cc7e7407534eeb7cc1e1db0b0d118aba8de07b","src/mmap_unix.rs":"9a1d71bf1bb7952c25e3796f992953390b6db5a32ef40f2dda1275f866dd9ff0","src/mmap_windows.rs":"bd0091dd90eddede03739ab89648a38f513310437dafdf1f449603bb15a7a2ed","src/volatile_memory.rs":"06bdb496a75f3a190b7092daffce01158acdf73654badd72e697b674eedc4fac"},"package":null}

View File

@ -1,2 +0,0 @@
[target.aarch64-unknown-linux-musl]
rustflags = [ "-C", "target-feature=+crt-static", "-C", "link-arg=-lgcc"]

View File

@ -1,17 +0,0 @@
[package]
name = "vm-memory"
version = "0.1.0"
authors = ["Liu Jiang <gerry@linux.alibaba.com>"]
repository = "https://github.com/rust-vmm/vm-memory"
license = "Apache-2.0"
[features]
default = []
backend-mmap = []
[dependencies]
libc = ">=0.2.39"
[dev-dependencies]
matches = ">=0"
tempfile = ">=3.0.2"

View File

@ -1,79 +0,0 @@
## Objectives
For a typical hypervisor, there are seveval components, such as boot loader, virtual device drivers, virtio backend drivers and vhost drivers etc, that need to access VM's physical memory. The `vm-memory` crate aims to provide a set of stable traits to decouple VM memory consumers from VM memory providers. Based on these traits, VM memory consumers could access VM's physical memory without knowing the implementation details of the VM memory provider. Thus hypervisor components, such as boot loader, virtual device drivers, virtio backend drivers and vhost drivers etc, could be shared and reused by multiple hypervisors.
## API Principles
- Define consumer side interfaces to access VM's physical memory.
- Do not define provider side interfaces to supply VM physical memory.
The `vm-memory` crate focuses on defining consumer side interfaces to access VM's physical memory, and it dosen't define the way how the underline VM memory provider is implemented. For light-wieght hypervisors like crosvm and firecracker, they may make some assumptions about the structure of VM's physical memory and implement a light-weight backend to access VM's physical memory. For hypervisors like qemu, a high performance and full functionality backend may be implemented with less assumptions.
## Architecture
Th `vm-memory` is derived from two upstream projects:
- [crosvm project](https://chromium.googlesource.com/chromiumos/platform/crosvm/) commit 186eb8b0db644892e8ffba8344efe3492bb2b823
- [firecracker project](https://firecracker-microvm.github.io/) commit 80128ea61b305a27df1f751d70415b04b503eae7
To be hypervisor neutral, the high level abstraction has been heavily refactored. The new `vm-memory` crate could be divided into four logic parts as:
### Abstraction of Generic Address Space
Build generic abstractions to describe and access an address space as below:
- AddressValue: Stores the raw value of an address. Typically u32, u64 or usize is used to store the raw value. But pointers, such as \*u8, can't be used because it doesn't implement the Add and Sub traits.
- Address: Encapsulates an AddressValue object and defines methods to access it.
- Bytes: Common trait for volatile access to memory. The `Bytes` trait can be parameterized with newtypes that represent addresses, in order to enforce that addresses are used with the right "kind" of volatile memory.
- VolatileMemory: Basic implementation of volatile access to memory, implements `Bytes<usize>`.
To make the abstraction as generic as possible, all of above core traits only define methods to access the address space, and they never define methods to manage (create, delete, insert, remove etc) address spaces. By this way, the address space consumers (virtio device drivers, vhost-user drivers and boot loaders etc) may be decoupled from the address space provider (typically a hypervisor).
### Specialization for Virtual Machine Physical Address Space
The generic address space crates are specialized to access VM's physical memory with following traits:
- GuestAddress: represents a guest physical address (GPA). On ARM64, a 32-bit hypervisor may be used to support a 64-bit VM. For simplicity, u64 is used to store the the raw value no matter if it is a 32-bit or 64-bit virtual machine.
- GuestMemoryRegion: used to represent a continuous region of VM's physical memory.
- GuestMemory: used to represent a collection of GuestMemoryRegion objects. The main responsibilities of the GuestMemory trait are:
- hide the detail of accessing VM's physical address (for example complex hierarchical structures).
- map a request address to a GuestMemoryRegion object and relay the request to it.
- handle cases where an access request spanning two or more GuestMemoryRegion objects.
The VM memory consumers, such as virtio device drivers, vhost drivers and boot loaders etc, should only rely on traits and structs defined here to access VM's physical memory.
### A Sample and Default Backend Implementation Based on mmap()
Provide a default and sample implementation of the GuestMemory trait by mmapping VM's physical memory into current process. Three data structures are defined here:
- MmapRegion: mmap a continous range of VM's physical memory into current and provide methods to access the mmapped memory.
- GuestRegionMmap: a wrapper structure to map VM's physical address into (mmap\_region, offset) tuple.
- GuestMemoryMmap: manage a collection of GuestRegionMmap objects for a VM.
One of the main responsibilities of the GuestMemoryMmap object is to handle the use cases where an access request crosses the memory region boundary. This scenario may be triggered when memory hotplug is supported. So there's a tradeoff between functionality and code complexity:
- use following pattern for simplicity which fails when the request crosses region boundary. It's current default behavior in the crosvm and firecracker project.
```rust
let guest_memory_mmap: GuestMemoryMmap = ...
let addr: GuestAddress = ...
let buf = &mut [0u8; 5];
let result = guest_memory_mmap.find_region(addr).unwrap().write(buf, addr);
```
- use following pattern for functionality to support request crossing region boundary:
```rust
let guest_memory_mmap: GuestMemoryMmap = ...
let addr: GuestAddress = ...
let buf = &mut [0u8; 5];
let result = guest_memory_mmap.write(buf, addr);
```
### Utilities and Helpers
Following utility and helper traits/macros are imported from the [crosvm project](https://chromium.googlesource.com/chromiumos/platform/crosvm/) with minor changes:
- ByteValued (originally `DataInit`): Types for which it is safe to initialize from raw data. A type `T` is `ByteValued` if and only if it can be initialized by reading its contents from a byte array. This is generally true for all plain-old-data structs. It is notably not true for any type that includes a reference.
- {Le,Be}\_{16,32,64}: Explicit endian types useful for embedding in structs or reinterpreting data.
## Relationships between Traits, Structs and new Types
Traits:
- Address inherits AddressValue
- GuestMemoryRegion inherits Bytes<MemoryRegionAddress, E = Error> (must be implemented)
- GuestMemory implements Bytes<GuestAddress> (generic implementation)
New Types:
- GuestAddress: Address\<u64\>
- MemoryRegionAddress: Address\<u64\>
Structs:
- MmapRegion implements VolatileMemory
- GuestRegionMmap implements Bytes<MemoryRegionAddress> + GuestMemoryRegion
- GuestMemoryMmap implements GuestMemory
- VolatileSlice: Bytes<usize, E = volatile_memory::Error> + VolatileMemory

View File

@ -1,202 +0,0 @@
Apache License
Version 2.0, January 2004
http://www.apache.org/licenses/
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
1. Definitions.
"License" shall mean the terms and conditions for use, reproduction,
and distribution as defined by Sections 1 through 9 of this document.
"Licensor" shall mean the copyright owner or entity authorized by
the copyright owner that is granting the License.
"Legal Entity" shall mean the union of the acting entity and all
other entities that control, are controlled by, or are under common
control with that entity. For the purposes of this definition,
"control" means (i) the power, direct or indirect, to cause the
direction or management of such entity, whether by contract or
otherwise, or (ii) ownership of fifty percent (50%) or more of the
outstanding shares, or (iii) beneficial ownership of such entity.
"You" (or "Your") shall mean an individual or Legal Entity
exercising permissions granted by this License.
"Source" form shall mean the preferred form for making modifications,
including but not limited to software source code, documentation
source, and configuration files.
"Object" form shall mean any form resulting from mechanical
transformation or translation of a Source form, including but
not limited to compiled object code, generated documentation,
and conversions to other media types.
"Work" shall mean the work of authorship, whether in Source or
Object form, made available under the License, as indicated by a
copyright notice that is included in or attached to the work
(an example is provided in the Appendix below).
"Derivative Works" shall mean any work, whether in Source or Object
form, that is based on (or derived from) the Work and for which the
editorial revisions, annotations, elaborations, or other modifications
represent, as a whole, an original work of authorship. For the purposes
of this License, Derivative Works shall not include works that remain
separable from, or merely link (or bind by name) to the interfaces of,
the Work and Derivative Works thereof.
"Contribution" shall mean any work of authorship, including
the original version of the Work and any modifications or additions
to that Work or Derivative Works thereof, that is intentionally
submitted to Licensor for inclusion in the Work by the copyright owner
or by an individual or Legal Entity authorized to submit on behalf of
the copyright owner. For the purposes of this definition, "submitted"
means any form of electronic, verbal, or written communication sent
to the Licensor or its representatives, including but not limited to
communication on electronic mailing lists, source code control systems,
and issue tracking systems that are managed by, or on behalf of, the
Licensor for the purpose of discussing and improving the Work, but
excluding communication that is conspicuously marked or otherwise
designated in writing by the copyright owner as "Not a Contribution."
"Contributor" shall mean Licensor and any individual or Legal Entity
on behalf of whom a Contribution has been received by Licensor and
subsequently incorporated within the Work.
2. Grant of Copyright License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
copyright license to reproduce, prepare Derivative Works of,
publicly display, publicly perform, sublicense, and distribute the
Work and such Derivative Works in Source or Object form.
3. Grant of Patent License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
(except as stated in this section) patent license to make, have made,
use, offer to sell, sell, import, and otherwise transfer the Work,
where such license applies only to those patent claims licensable
by such Contributor that are necessarily infringed by their
Contribution(s) alone or by combination of their Contribution(s)
with the Work to which such Contribution(s) was submitted. If You
institute patent litigation against any entity (including a
cross-claim or counterclaim in a lawsuit) alleging that the Work
or a Contribution incorporated within the Work constitutes direct
or contributory patent infringement, then any patent licenses
granted to You under this License for that Work shall terminate
as of the date such litigation is filed.
4. Redistribution. You may reproduce and distribute copies of the
Work or Derivative Works thereof in any medium, with or without
modifications, and in Source or Object form, provided that You
meet the following conditions:
(a) You must give any other recipients of the Work or
Derivative Works a copy of this License; and
(b) You must cause any modified files to carry prominent notices
stating that You changed the files; and
(c) You must retain, in the Source form of any Derivative Works
that You distribute, all copyright, patent, trademark, and
attribution notices from the Source form of the Work,
excluding those notices that do not pertain to any part of
the Derivative Works; and
(d) If the Work includes a "NOTICE" text file as part of its
distribution, then any Derivative Works that You distribute must
include a readable copy of the attribution notices contained
within such NOTICE file, excluding those notices that do not
pertain to any part of the Derivative Works, in at least one
of the following places: within a NOTICE text file distributed
as part of the Derivative Works; within the Source form or
documentation, if provided along with the Derivative Works; or,
within a display generated by the Derivative Works, if and
wherever such third-party notices normally appear. The contents
of the NOTICE file are for informational purposes only and
do not modify the License. You may add Your own attribution
notices within Derivative Works that You distribute, alongside
or as an addendum to the NOTICE text from the Work, provided
that such additional attribution notices cannot be construed
as modifying the License.
You may add Your own copyright statement to Your modifications and
may provide additional or different license terms and conditions
for use, reproduction, or distribution of Your modifications, or
for any such Derivative Works as a whole, provided Your use,
reproduction, and distribution of the Work otherwise complies with
the conditions stated in this License.
5. Submission of Contributions. Unless You explicitly state otherwise,
any Contribution intentionally submitted for inclusion in the Work
by You to the Licensor shall be under the terms and conditions of
this License, without any additional terms or conditions.
Notwithstanding the above, nothing herein shall supersede or modify
the terms of any separate license agreement you may have executed
with Licensor regarding such Contributions.
6. Trademarks. This License does not grant permission to use the trade
names, trademarks, service marks, or product names of the Licensor,
except as required for reasonable and customary use in describing the
origin of the Work and reproducing the content of the NOTICE file.
7. Disclaimer of Warranty. Unless required by applicable law or
agreed to in writing, Licensor provides the Work (and each
Contributor provides its Contributions) on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
implied, including, without limitation, any warranties or conditions
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
PARTICULAR PURPOSE. You are solely responsible for determining the
appropriateness of using or redistributing the Work and assume any
risks associated with Your exercise of permissions under this License.
8. Limitation of Liability. In no event and under no legal theory,
whether in tort (including negligence), contract, or otherwise,
unless required by applicable law (such as deliberate and grossly
negligent acts) or agreed to in writing, shall any Contributor be
liable to You for damages, including any direct, indirect, special,
incidental, or consequential damages of any character arising as a
result of this License or out of the use or inability to use the
Work (including but not limited to damages for loss of goodwill,
work stoppage, computer failure or malfunction, or any and all
other commercial damages or losses), even if such Contributor
has been advised of the possibility of such damages.
9. Accepting Warranty or Additional Liability. While redistributing
the Work or Derivative Works thereof, You may choose to offer,
and charge a fee for, acceptance of support, warranty, indemnity,
or other liability obligations and/or rights consistent with this
License. However, in accepting such obligations, You may act only
on Your own behalf and on Your sole responsibility, not on behalf
of any other Contributor, and only if You agree to indemnify,
defend, and hold each Contributor harmless for any liability
incurred by, or claims asserted against, such Contributor by reason
of your accepting any such warranty or additional liability.
END OF TERMS AND CONDITIONS
APPENDIX: How to apply the Apache License to your work.
To apply the Apache License to your work, attach the following
boilerplate notice, with the fields enclosed by brackets "[]"
replaced with your own identifying information. (Don't include
the brackets!) The text should be enclosed in the appropriate
comment syntax for the file format. We also recommend that a
file or class name and description of purpose be included on the
same "printed page" as the copyright notice for easier
identification within third-party archives.
Copyright [yyyy] [name of copyright owner]
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.

View File

@ -1,47 +0,0 @@
# vm-memory
A library to access virtual machine's physical memory.
For a typical hypervisor, there are seveval components, such as boot loader, virtual device drivers, virtio backend drivers and vhost drivers etc, need to access VM's physical memory. The `vm-memory` crate provides a set of traits to decouple VM memory consumers from VM memory providers. Based on these traits, VM memory consumers could access VM's physical memory without knowing the implementation details of the VM memory provider. Thus hypervisor components based on these traits could be shared and reused by multiple hypervisors.
## Platform Support
- Arch: x86, AMD64, ARM64
- OS: Linux/Unix/Windows
## Usage
First, add the following to your `Cargo.toml`:
```toml
vm-memory = "0.1"
```
Next, add this to your crate root:
```rust
extern crate vm_memory;
```
## Example
- Create VM physical memory objects in hypervisor specific ways. Use the default GuestMemoryMmap as an example:
```
fn provide_mem_to_virt_dev() {
let gm = GuestMemoryMmap::new(&[(GuestAddress(0), 0x1000), (GuestAddress(0x1000), 0x1000)]).unwrap();
virt_device_io(&gm);
}
```
- Consumers access VM's physical memory
```
fn virt_device_io<T: GuestMemory>(mem: &T) {
let sample_buf = &[1, 2, 3, 4, 5];
assert_eq!(mem.write(sample_buf, GuestAddress(0xffc)).unwrap(), 5);
let buf = &mut [0u8; 5];
assert_eq!(mem.read(buf, GuestAddress(0xffc)).unwrap(), 5);
assert_eq!(buf, sample_buf);
}
```
## Documentations & References
- [Design of The `vm-memory` Crate](DESIGN.md)
- [TODO List](TODO.md)
- [The rust-vmm Project](https://github.com/rust-vmm/)
## License
This project is licensed under
- Apache License, Version 2.0, (LICENSE or http://www.apache.org/licenses/LICENSE-2.0)

View File

@ -1,27 +0,0 @@
// Copyright 2017 The Chromium OS Authors. All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following disclaimer
// in the documentation and/or other materials provided with the
// distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived from
// this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.

View File

@ -1,3 +0,0 @@
### TODO List
- Abstraction layer to seperate VM memory management from VM memory accessor.
- Help needed to refine documentation and usage examples.

View File

@ -1,170 +0,0 @@
// Copyright (C) 2019 Alibaba Cloud Computing. All rights reserved.
// SPDX-License-Identifier: Apache-2.0
//
// Portions Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//
// Portions Copyright 2017 The Chromium OS Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the THIRD-PARTY file.
//! Traits to represent an address within an address space.
//!
//! Two traits are defined to present an address within an address space:
//! - [AddressValue](trait.AddressValue.html): stores the raw value of an address. Typically u32,
//! u64 or usize is used to store the raw value. But pointers, such as *u8, can't be used because
//! it doesn't implement the Add and Sub traits.
//! - [Address](trait.Address.html): encapsulates an AddressValue object and defines methods to
//! access and manipulate it.
use std::cmp::{Eq, Ord, PartialEq, PartialOrd};
use std::ops::{Add, BitAnd, BitOr, Sub};
/// Simple helper trait used to store a raw address value.
pub trait AddressValue {
/// Type of the address raw value.
type V: Copy
+ PartialEq
+ Eq
+ PartialOrd
+ Ord
+ Add<Output = Self::V>
+ Sub<Output = Self::V>
+ BitAnd<Output = Self::V>
+ BitOr<Output = Self::V>;
}
/// Trait to represent an address within an address space.
///
/// To simplify the design and implementation, assume the same raw data type (AddressValue::V)
/// could be used to store address, size and offset for the address space. Thus the Address trait
/// could be used to manage address, size and offset. On the other hand, type aliases may be
/// defined to improve code readability.
///
/// One design rule is applied to the Address trait that operators (+, -, &, | etc) are not
/// supported and it forces clients to explicitly invoke corresponding methods. But there are
/// always exceptions:
/// Address (BitAnd|BitOr) AddressValue are supported.
pub trait Address:
AddressValue
+ Sized
+ Default
+ Copy
+ Eq
+ PartialEq
+ Ord
+ PartialOrd
+ BitAnd<<Self as AddressValue>::V, Output = Self>
+ BitOr<<Self as AddressValue>::V, Output = Self>
{
/// Create an address from a raw address value.
fn new(addr: Self::V) -> Self;
/// Get the raw value of the address.
fn raw_value(&self) -> Self::V;
/// Returns the bitwise and of the address with the given mask.
fn mask(&self, mask: Self::V) -> Self::V {
self.raw_value() & mask
}
/// Returns the offset from this address to the given base address and None if there is
/// underflow.
fn checked_offset_from(&self, base: Self) -> Option<Self::V>;
/// Returns the offset from this address to the given base address.
/// Only use this when `base` is guaranteed not to overflow.
fn unchecked_offset_from(&self, base: Self) -> Self::V {
self.raw_value() - base.raw_value()
}
/// Returns the result of the add or None if there is overflow.
fn checked_add(&self, other: Self::V) -> Option<Self>;
/// Returns the result of the add and a flag identifying whether there was overflow
fn overflowing_add(&self, other: Self::V) -> (Self, bool);
/// Returns the result of the base address + the size.
/// Only use this when `offset` is guaranteed not to overflow.
fn unchecked_add(&self, offset: Self::V) -> Self;
/// Returns the result of the subtraction or None if there is underflow.
fn checked_sub(&self, other: Self::V) -> Option<Self>;
/// Returns the result of the subtraction and a flag identifying whether there was overflow
fn overflowing_sub(&self, other: Self::V) -> (Self, bool);
/// Returns the result of the subtraction.
/// Only use this when `other` is guaranteed not to underflow.
fn unchecked_sub(&self, other: Self::V) -> Self;
}
macro_rules! impl_address_ops {
($T:ident, $V:ty) => {
impl AddressValue for $T {
type V = $V;
}
impl Address for $T {
fn new(value: $V) -> $T {
$T(value)
}
fn raw_value(&self) -> $V {
self.0
}
fn checked_offset_from(&self, base: $T) -> Option<$V> {
self.0.checked_sub(base.0)
}
fn checked_add(&self, other: $V) -> Option<$T> {
self.0.checked_add(other).map($T)
}
fn overflowing_add(&self, other: $V) -> ($T, bool) {
let (t, ovf) = self.0.overflowing_add(other);
($T(t), ovf)
}
fn unchecked_add(&self, offset: $V) -> $T {
$T(self.0 + offset)
}
fn checked_sub(&self, other: $V) -> Option<$T> {
self.0.checked_sub(other).map($T)
}
fn overflowing_sub(&self, other: $V) -> ($T, bool) {
let (t, ovf) = self.0.overflowing_sub(other);
($T(t), ovf)
}
fn unchecked_sub(&self, other: $V) -> $T {
$T(self.0 - other)
}
}
impl Default for $T {
fn default() -> $T {
Self::new(0 as $V)
}
}
impl BitAnd<$V> for $T {
type Output = $T;
fn bitand(self, other: $V) -> $T {
$T(self.0 & other)
}
}
impl BitOr<$V> for $T {
type Output = $T;
fn bitor(self, other: $V) -> $T {
$T(self.0 | other)
}
}
};
}

View File

@ -1,285 +0,0 @@
// Portions Copyright 2019 Red Hat, Inc.
//
// Portions Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//
// Portions Copyright 2017 The Chromium OS Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the THIRD-PARTY file.
//! Define the ByteValued trait to mark that it is safe to instantiate the struct with random data.
use std::io::{Read, Write};
use std::mem::size_of;
use std::result::Result;
use std::slice::{from_raw_parts, from_raw_parts_mut};
/// Types for which it is safe to initialize from raw data.
///
/// A type `T` is `ByteValued` if and only if it can be initialized by reading its contents from a
/// byte array. This is generally true for all plain-old-data structs. It is notably not true for
/// any type that includes a reference.
///
/// Implementing this trait guarantees that it is safe to instantiate the struct with random data.
pub unsafe trait ByteValued: Copy + Default + Send + Sync {
/// Converts a slice of raw data into a reference of `Self`.
///
/// The value of `data` is not copied. Instead a reference is made from the given slice. The
/// value of `Self` will depend on the representation of the type in memory, and may change in
/// an unstable fashion.
///
/// This will return `None` if the length of data does not match the size of `Self`, or if the
/// data is not aligned for the type of `Self`.
fn from_slice(data: &[u8]) -> Option<&Self> {
// Early out to avoid an unneeded `align_to` call.
if data.len() != size_of::<Self>() {
return None;
}
// Safe because the ByteValued trait asserts any data is valid for this type, and we ensured
// the size of the pointer's buffer is the correct size. The `align_to` method ensures that
// we don't have any unaligned references. This aliases a pointer, but because the pointer
// is from a const slice reference, there are no mutable aliases. Finally, the reference
// returned can not outlive data because they have equal implicit lifetime constraints.
match unsafe { data.align_to::<Self>() } {
([], [mid], []) => Some(mid),
_ => None,
}
}
/// Converts a mutable slice of raw data into a mutable reference of `Self`.
///
/// Because `Self` is made from a reference to the mutable slice`, mutations to the returned
/// reference are immediately reflected in `data`. The value of the returned `Self` will depend
/// on the representation of the type in memory, and may change in an unstable fashion.
///
/// This will return `None` if the length of data does not match the size of `Self`, or if the
/// data is not aligned for the type of `Self`.
fn from_mut_slice(data: &mut [u8]) -> Option<&mut Self> {
// Early out to avoid an unneeded `align_to_mut` call.
if data.len() != size_of::<Self>() {
return None;
}
// Safe because the ByteValued trait asserts any data is valid for this type, and we ensured
// the size of the pointer's buffer is the correct size. The `align_to` method ensures that
// we don't have any unaligned references. This aliases a pointer, but because the pointer
// is from a mut slice reference, we borrow the passed in mutable reference. Finally, the
// reference returned can not outlive data because they have equal implicit lifetime
// constraints.
match unsafe { data.align_to_mut::<Self>() } {
([], [mid], []) => Some(mid),
_ => None,
}
}
/// Converts a reference to `self` into a slice of bytes.
///
/// The value of `self` is not copied. Instead, the slice is made from a reference to `self`.
/// The value of bytes in the returned slice will depend on the representation of the type in
/// memory, and may change in an unstable fashion.
fn as_slice(&self) -> &[u8] {
// Safe because the entire size of self is accessible as bytes because the trait guarantees
// it. The lifetime of the returned slice is the same as the passed reference, so that no
// dangling pointers will result from this pointer alias.
unsafe { from_raw_parts(self as *const Self as *const u8, size_of::<Self>()) }
}
/// Converts a mutable reference to `self` into a mutable slice of bytes.
///
/// Because the slice is made from a reference to `self`, mutations to the returned slice are
/// immediately reflected in `self`. The value of bytes in the returned slice will depend on
/// the representation of the type in memory, and may change in an unstable fashion.
fn as_mut_slice(&mut self) -> &mut [u8] {
// Safe because the entire size of self is accessible as bytes because the trait guarantees
// it. The trait also guarantees that any combination of bytes is valid for this type, so
// modifying them in the form of a byte slice is valid. The lifetime of the returned slice
// is the same as the passed reference, so that no dangling pointers will result from this
// pointer alias. Although this does alias a mutable pointer, we do so by exclusively
// borrowing the given mutable reference.
unsafe { from_raw_parts_mut(self as *mut Self as *mut u8, size_of::<Self>()) }
}
}
/// A container to host a range of bytes and access its content.
///
/// Candidates which may implement this trait include:
/// - anonymous memory areas
/// - mmapped memory areas
/// - data files
/// - a proxy to access memory on remote
pub trait Bytes<A> {
/// Associated error codes
type E;
/// Writes a slice into the container at the specified address.
/// Returns the number of bytes written. The number of bytes written can
/// be less than the length of the slice if there isn't enough room in the
/// container.
fn write(&self, buf: &[u8], addr: A) -> Result<usize, Self::E>;
/// Reads to a slice from the container at the specified address.
/// Returns the number of bytes read. The number of bytes read can be less than the length
/// of the slice if there isn't enough room within the container.
fn read(&self, buf: &mut [u8], addr: A) -> Result<usize, Self::E>;
/// Writes the entire contents of a slice into the container at the specified address.
///
/// Returns an error if there isn't enough room within the container to complete the entire
/// write. Part of the data may have been written nevertheless.
fn write_slice(&self, buf: &[u8], addr: A) -> Result<(), Self::E>;
/// Reads from the container at the specified address to fill the entire buffer.
///
/// Returns an error if there isn't enough room within the container to fill the entire buffer.
/// Part of the buffer may have been filled nevertheless.
fn read_slice(&self, buf: &mut [u8], addr: A) -> Result<(), Self::E>;
/// Writes an object into the container at the specified address.
/// Returns Ok(()) if the object fits, or Err if it extends past the end.
fn write_obj<T: ByteValued>(&self, val: T, addr: A) -> Result<(), Self::E> {
self.write_slice(val.as_slice(), addr)
}
/// Reads an object from the container at the given address.
/// Reading from a volatile area isn't strictly safe as it could change mid-read.
/// However, as long as the type T is plain old data and can handle random initialization,
/// everything will be OK.
fn read_obj<T: ByteValued>(&self, addr: A) -> Result<T, Self::E> {
let mut result: T = Default::default();
self.read_slice(result.as_mut_slice(), addr).map(|_| result)
}
/// Writes data from a readable object like a File and writes it into the container.
///
/// # Arguments
/// * `addr` - Begin writing at this address.
/// * `src` - Copy from `src` into the container.
/// * `count` - Copy `count` bytes from `src` into the container.
fn read_from<F>(&self, addr: A, src: &mut F, count: usize) -> Result<usize, Self::E>
where
F: Read;
/// Writes data from a readable object like a File and writes it into the container.
///
/// # Arguments
/// * `addr` - Begin writing at this address.
/// * `src` - Copy from `src` into the container.
/// * `count` - Copy `count` bytes from `src` into the container.
fn read_exact_from<F>(&self, addr: A, src: &mut F, count: usize) -> Result<(), Self::E>
where
F: Read;
/// Reads data from the container to a writable object.
///
/// # Arguments
/// * `addr` - Begin reading from this addr.
/// * `dst` - Copy from the container to `dst`.
/// * `count` - Copy `count` bytes from the container to `dst`.
fn write_to<F>(&self, addr: A, dst: &mut F, count: usize) -> Result<usize, Self::E>
where
F: Write;
/// Reads data from the container to a writable object.
///
/// # Arguments
/// * `addr` - Begin reading from this addr.
/// * `dst` - Copy from the container to `dst`.
/// * `count` - Copy `count` bytes from the container to `dst`.
fn write_all_to<F>(&self, addr: A, dst: &mut F, count: usize) -> Result<(), Self::E>
where
F: Write;
}
// All intrinsic types and arrays of intrinsic types are ByteValued. They are just numbers.
macro_rules! array_data_init {
($T:ty, $($N:expr)+) => {
$(
unsafe impl ByteValued for [$T; $N] {}
)+
}
}
macro_rules! data_init_type {
($T:ty) => {
unsafe impl ByteValued for $T {}
array_data_init! {
$T,
0 1 2 3 4 5 6 7 8 9
10 11 12 13 14 15 16 17 18 19
20 21 22 23 24 25 26 27 28 29
30 31 32
}
};
}
data_init_type!(u8);
data_init_type!(u16);
data_init_type!(u32);
data_init_type!(u64);
data_init_type!(usize);
data_init_type!(i8);
data_init_type!(i16);
data_init_type!(i32);
data_init_type!(i64);
data_init_type!(isize);
#[cfg(test)]
mod tests {
use std::fmt::Debug;
use std::mem::{align_of, size_of};
use ByteValued;
fn from_slice_alignment<T>()
where
T: ByteValued + PartialEq + Debug + Default,
{
let mut v = [0u8; 32];
let pre_len = {
let (pre, _, _) = unsafe { v.align_to::<T>() };
pre.len()
};
{
let aligned_v = &mut v[pre_len..pre_len + size_of::<T>()];
{
let from_aligned = T::from_slice(aligned_v);
let val: T = Default::default();
assert_eq!(from_aligned, Some(&val));
}
{
let from_aligned_mut = T::from_mut_slice(aligned_v);
let mut val: T = Default::default();
assert_eq!(from_aligned_mut, Some(&mut val));
}
}
for i in 1..size_of::<T>() {
let begin = pre_len + i;
let end = begin + size_of::<T>();
let unaligned_v = &mut v[begin..end];
{
let from_unaligned = T::from_slice(unaligned_v);
if align_of::<T>() != 1 {
assert_eq!(from_unaligned, None);
}
}
{
let from_unaligned_mut = T::from_mut_slice(unaligned_v);
if align_of::<T>() != 1 {
assert_eq!(from_unaligned_mut, None);
}
}
}
}
#[test]
fn test_slice_alignment() {
from_slice_alignment::<u8>();
from_slice_alignment::<u16>();
from_slice_alignment::<u32>();
from_slice_alignment::<u64>();
from_slice_alignment::<usize>();
from_slice_alignment::<i8>();
from_slice_alignment::<i16>();
from_slice_alignment::<i32>();
from_slice_alignment::<i64>();
from_slice_alignment::<isize>();
}
}

View File

@ -1,148 +0,0 @@
// Copyright 2017 The Chromium OS Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
//! Explicit endian types useful for embedding in structs or reinterpreting data.
//!
//! Each endian type is guaarnteed to have the same size and alignment as a regular unsigned
//! primitive of the equal size.
//!
//! # Examples
//!
//! ```
//! # use vm_memory::*;
//! let b: Be32 = From::from(3);
//! let l: Le32 = From::from(3);
//!
//! assert_eq!(b.to_native(), 3);
//! assert_eq!(l.to_native(), 3);
//! assert!(b == 3);
//! assert!(l == 3);
//!
//! let b_trans: u32 = unsafe { std::mem::transmute(b) };
//! let l_trans: u32 = unsafe { std::mem::transmute(l) };
//!
//! #[cfg(target_endian = "little")]
//! assert_eq!(l_trans, 3);
//! #[cfg(target_endian = "big")]
//! assert_eq!(b_trans, 3);
//!
//! assert_ne!(b_trans, l_trans);
//! ```
use std::mem::{align_of, size_of};
use bytes::ByteValued;
macro_rules! const_assert {
($condition:expr) => {
let _ = [(); 0 - !$condition as usize];
};
}
macro_rules! endian_type {
($old_type:ident, $new_type:ident, $to_new:ident, $from_new:ident) => {
/// An unsigned integer type of with an explicit endianness.
///
/// See module level documentation for examples.
#[derive(Copy, Clone, Eq, PartialEq, Debug, Default)]
pub struct $new_type($old_type);
impl $new_type {
fn _assert() {
const_assert!(align_of::<$new_type>() == align_of::<$old_type>());
const_assert!(size_of::<$new_type>() == size_of::<$old_type>());
}
/// Converts `self` to the native endianness.
pub fn to_native(self) -> $old_type {
$old_type::$from_new(self.0)
}
}
unsafe impl ByteValued for $new_type {}
impl PartialEq<$old_type> for $new_type {
fn eq(&self, other: &$old_type) -> bool {
self.0 == $old_type::$to_new(*other)
}
}
impl PartialEq<$new_type> for $old_type {
fn eq(&self, other: &$new_type) -> bool {
$old_type::$to_new(other.0) == *self
}
}
impl Into<$old_type> for $new_type {
fn into(self) -> $old_type {
$old_type::$from_new(self.0)
}
}
impl From<$old_type> for $new_type {
fn from(v: $old_type) -> $new_type {
$new_type($old_type::$to_new(v))
}
}
};
}
endian_type!(u16, Le16, to_le, from_le);
endian_type!(u32, Le32, to_le, from_le);
endian_type!(u64, Le64, to_le, from_le);
endian_type!(usize, LeSize, to_le, from_le);
endian_type!(u16, Be16, to_be, from_be);
endian_type!(u32, Be32, to_be, from_be);
endian_type!(u64, Be64, to_be, from_be);
endian_type!(usize, BeSize, to_be, from_be);
#[cfg(test)]
mod tests {
use super::*;
use std::convert::From;
use std::mem::transmute;
#[cfg(target_endian = "little")]
const NATIVE_LITTLE: bool = true;
#[cfg(target_endian = "big")]
const NATIVE_LITTLE: bool = false;
const NATIVE_BIG: bool = !NATIVE_LITTLE;
macro_rules! endian_test {
($old_type:ty, $new_type:ty, $test_name:ident, $native:expr) => {
mod $test_name {
use super::*;
#[allow(overflowing_literals)]
#[test]
fn equality() {
let v = 0x0123_4567_89AB_CDEF as $old_type;
let endian_v: $new_type = From::from(v);
let endian_into: $old_type = endian_v.into();
let endian_transmute: $old_type = unsafe { transmute(endian_v) };
if $native {
assert_eq!(endian_v, endian_transmute);
} else {
assert_eq!(endian_v, endian_transmute.swap_bytes());
}
assert_eq!(v, endian_into);
assert!(v == endian_v);
assert!(endian_v == v);
}
}
};
}
endian_test!(u16, Le16, test_le16, NATIVE_LITTLE);
endian_test!(u32, Le32, test_le32, NATIVE_LITTLE);
endian_test!(u64, Le64, test_le64, NATIVE_LITTLE);
endian_test!(usize, LeSize, test_le_size, NATIVE_LITTLE);
endian_test!(u16, Be16, test_be16, NATIVE_BIG);
endian_test!(u32, Be32, test_be32, NATIVE_BIG);
endian_test!(u64, Be64, test_be64, NATIVE_BIG);
endian_test!(usize, BeSize, test_be_size, NATIVE_BIG);
}

View File

@ -1,515 +0,0 @@
// Copyright (C) 2019 Alibaba Cloud Computing. All rights reserved.
// SPDX-License-Identifier: Apache-2.0
//
// Portions Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//
// Portions Copyright 2017 The Chromium OS Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the THIRD-PARTY file.
//! Traits to track and access guest's physical memory.
//!
//! To make the abstraction as generic as possible, all the core traits declared here only define
//! methods to access guest's memory, and never define methods to manage (create, delete, insert,
//! remove etc) guest's memory. By this way, the guest memory consumers (virtio device drivers,
//! vhost drivers and boot loaders etc) may be decoupled from the guest memory provider (typically
//! a hypervisor).
//!
//! Traits and Structs
//! - [GuestAddress](struct.GuestAddress.html): represents a guest physical address (GPA).
//! - [MemoryRegionAddress](struct.MemoryRegionAddress.html): represents an offset inside a region.
//! - [GuestMemoryRegion](trait.GuestMemoryRegion.html): represent a continuous region of guest's
//! physical memory.
//! - [GuestMemory](trait.GuestMemroy.html): represent a collection of GuestMemoryRegion objects.
//! The main responsibilities of the GuestMemory trait are:
//! - hide the detail of accessing guest's physical address.
//! - map a request address to a GuestMemoryRegion object and relay the request to it.
//! - handle cases where an access request spanning two or more GuestMemoryRegion objects.
use std::convert::From;
use std::fmt::{self, Display};
use std::io::{self, Read, Write};
use std::ops::{BitAnd, BitOr};
use address::{Address, AddressValue};
use bytes::Bytes;
use volatile_memory;
static MAX_ACCESS_CHUNK: usize = 4096;
/// Errors associated with handling guest memory accesses.
#[allow(missing_docs)]
#[derive(Debug)]
pub enum Error {
/// Failure in finding a guest address in any memory regions mapped by this guest.
InvalidGuestAddress(GuestAddress),
/// Couldn't read/write from the given source.
IOError(io::Error),
/// Incomplete read or write
PartialBuffer { expected: usize, completed: usize },
/// Requested backend address is out of range.
InvalidBackendAddress,
}
impl From<volatile_memory::Error> for Error {
fn from(e: volatile_memory::Error) -> Self {
match e {
volatile_memory::Error::OutOfBounds { .. } => Error::InvalidBackendAddress,
volatile_memory::Error::Overflow { .. } => Error::InvalidBackendAddress,
volatile_memory::Error::IOError(e) => Error::IOError(e),
volatile_memory::Error::PartialBuffer {
expected,
completed,
} => Error::PartialBuffer {
expected,
completed,
},
}
}
}
/// Result of guest memory operations
pub type Result<T> = std::result::Result<T, Error>;
impl std::error::Error for Error {}
impl Display for Error {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "Guest memory error: ")?;
match self {
Error::InvalidGuestAddress(addr) => {
write!(f, "invalid guest address {}", addr.raw_value())
}
Error::IOError(error) => write!(f, "{}", error),
Error::PartialBuffer {
expected,
completed,
} => write!(
f,
"only used {} bytes in {} long buffer",
completed, expected,
),
Error::InvalidBackendAddress => write!(f, "invalid backend address"),
}
}
}
/// Represents a guest physical address (GPA).
///
/// Notes:
/// - On ARM64, a 32-bit hypervisor may be used to support a 64-bit guest. For simplicity,
/// u64 is used to store the the raw value no matter if the guest a 32-bit or 64-bit virtual
/// machine.
#[derive(Clone, Copy, Debug, Eq, PartialEq, Ord, PartialOrd)]
pub struct GuestAddress(pub u64);
impl_address_ops!(GuestAddress, u64);
/// Represents an offset inside a region.
#[derive(Clone, Copy, Debug, Eq, PartialEq, Ord, PartialOrd)]
pub struct MemoryRegionAddress(pub u64);
impl_address_ops!(MemoryRegionAddress, u64);
/// Type of the raw value stored in a GuestAddress object.
pub type GuestUsize = <GuestAddress as AddressValue>::V;
/// Represents a continuous region of guest physical memory.
#[allow(clippy::len_without_is_empty)]
pub trait GuestMemoryRegion: Bytes<MemoryRegionAddress, E = Error> {
/// Get the size of the region.
fn len(&self) -> GuestUsize;
/// Get minimum (inclusive) address managed by the region.
fn start_addr(&self) -> GuestAddress;
/// Get maximum (inclusive) address managed by the region.
fn end_addr(&self) -> GuestAddress {
// unchecked_add is safe as the region bounds were checked when it was created.
self.start_addr().unchecked_add(self.len() - 1)
}
/// Returns the given address if it is within the memory range accessible
/// through this region.
fn check_address(&self, addr: MemoryRegionAddress) -> Option<MemoryRegionAddress> {
if self.address_in_range(addr) {
Some(addr)
} else {
None
}
}
/// Returns true if the given address is within the memory range accessible
/// through this region.
fn address_in_range(&self, addr: MemoryRegionAddress) -> bool {
addr.raw_value() < self.len()
}
/// Returns the address plus the offset if it is in range.
fn checked_offset(
&self,
base: MemoryRegionAddress,
offset: usize,
) -> Option<MemoryRegionAddress> {
base.checked_add(offset as u64)
.and_then(|addr| self.check_address(addr))
}
/// Convert an absolute address into an address space (GuestMemory)
/// to a relative address within this region, or return an error if
/// it is out of bounds.
fn to_region_addr(&self, addr: GuestAddress) -> Option<MemoryRegionAddress> {
addr.checked_offset_from(self.start_addr())
.and_then(|offset| self.check_address(MemoryRegionAddress(offset)))
}
/// Return a slice corresponding to the data in the region; unsafe because of
/// possible aliasing. Return None if the region does not support slice-based
/// access.
unsafe fn as_slice(&self) -> Option<&[u8]> {
None
}
/// Return a mutable slice corresponding to the data in the region; unsafe because of
/// possible aliasing. Return None if the region does not support slice-based
/// access.
unsafe fn as_mut_slice(&self) -> Option<&mut [u8]> {
None
}
}
/// Represents a container for a collection of GuestMemoryRegion objects.
///
/// The main responsibilities of the GuestMemory trait are:
/// - hide the detail of accessing guest's physical address.
/// - map a request address to a GuestMemoryRegion object and relay the request to it.
/// - handle cases where an access request spanning two or more GuestMemoryRegion objects.
///
/// Note: all regions in a GuestMemory object must not intersect with each other.
pub trait GuestMemory {
/// Type of objects hosted by the address space.
type R: GuestMemoryRegion;
/// Returns the number of regions in the collection.
fn num_regions(&self) -> usize;
/// Return the region containing the specified address or None.
fn find_region(&self, addr: GuestAddress) -> Option<&Self::R>;
/// Perform the specified action on each region.
/// It only walks children of current region and do not step into sub regions.
fn with_regions<F, E>(&self, cb: F) -> std::result::Result<(), E>
where
F: Fn(usize, &Self::R) -> std::result::Result<(), E>;
/// Perform the specified action on each region mutably.
/// It only walks children of current region and do not step into sub regions.
fn with_regions_mut<F, E>(&self, cb: F) -> std::result::Result<(), E>
where
F: FnMut(usize, &Self::R) -> std::result::Result<(), E>;
/// Applies two functions, specified as callbacks, on the inner memory regions.
///
/// # Arguments
/// * `init` - Starting value of the accumulator for the `foldf` function.
/// * `mapf` - "Map" function, applied to all the inner memory regions. It returns an array of
/// the same size as the memory regions array, containing the function's results
/// for each region.
/// * `foldf` - "Fold" function, applied to the array returned by `mapf`. It acts as an
/// operator, applying itself to the `init` value and to each subsequent elemnent
/// in the array returned by `mapf`.
///
/// # Examples
///
/// * Compute the total size of all memory mappings in KB by iterating over the memory regions
/// and dividing their sizes to 1024, then summing up the values in an accumulator.
///
/// ```
/// # #[cfg(feature = "backend-mmap")]
/// # fn test_map_fold() -> Result<(), ()> {
/// # use vm_memory::{GuestAddress, GuestMemory, GuestMemoryRegion, mmap::GuestMemoryMmap};
/// let start_addr1 = GuestAddress(0x0);
/// let start_addr2 = GuestAddress(0x400);
/// let mem = GuestMemoryMmap::new(&vec![(start_addr1, 1024), (start_addr2, 2048)]).unwrap();
/// let total_size = mem.map_and_fold(
/// 0,
/// |(_, region)| region.len() / 1024,
/// |acc, size| acc + size
/// );
/// println!("Total memory size = {} KB", total_size);
/// Ok(())
/// # }
/// ```
fn map_and_fold<F, G, T>(&self, init: T, mapf: F, foldf: G) -> T
where
F: Fn((usize, &Self::R)) -> T,
G: Fn(T, T) -> T;
/// Get maximum (inclusive) address managed by the region.
fn end_addr(&self) -> GuestAddress {
self.map_and_fold(
GuestAddress(0),
|(_, region)| region.end_addr(),
std::cmp::max,
)
}
/// Convert an absolute address into an address space (GuestMemory)
/// to a relative address within this region, or return None if
/// it is out of bounds.
fn to_region_addr(&self, addr: GuestAddress) -> Option<(&Self::R, MemoryRegionAddress)> {
self.find_region(addr)
.map(|r| (r, r.to_region_addr(addr).unwrap()))
}
/// Returns true if the given address is within the memory range available to the guest.
fn address_in_range(&self, addr: GuestAddress) -> bool {
self.find_region(addr).is_some()
}
/// Returns the given address if it is within the memory range available to the guest.
fn check_address(&self, addr: GuestAddress) -> Option<GuestAddress> {
self.find_region(addr).map(|_| addr)
}
/// Returns the address plus the offset if it is in range.
fn checked_offset(&self, base: GuestAddress, offset: usize) -> Option<GuestAddress> {
base.checked_add(offset as u64)
.and_then(|addr| self.check_address(addr))
}
/// Invoke callback `f` to handle data in the address range [addr, addr + count).
///
/// The address range [addr, addr + count) may span more than one GuestMemoryRegion objects, or
/// even has holes within it. So try_access() invokes the callback 'f' for each GuestMemoryRegion
/// object involved and returns:
/// - error code returned by the callback 'f'
/// - size of data already handled when encountering the first hole
/// - size of data already handled when the whole range has been handled
fn try_access<F>(&self, count: usize, addr: GuestAddress, mut f: F) -> Result<usize>
where
F: FnMut(usize, usize, MemoryRegionAddress, &Self::R) -> Result<usize>,
{
let mut cur = addr;
let mut total = 0;
while let Some(region) = self.find_region(cur) {
let start = region.to_region_addr(cur).unwrap();
let cap = region.len() as usize;
let len = std::cmp::min(cap, count - total);
match f(total, len, start, region) {
// no more data
Ok(0) => break,
// made some progress
Ok(len) => {
total += len;
if total == count {
break;
}
cur = match cur.overflowing_add(len as GuestUsize) {
(GuestAddress(0), _) => GuestAddress(0),
(result, false) => result,
(_, true) => panic!("guest address overflow"),
}
}
// error happened
e => return e,
}
}
if total == 0 {
Err(Error::InvalidGuestAddress(addr))
} else {
Ok(total)
}
}
}
impl<T: GuestMemory> Bytes<GuestAddress> for T {
type E = Error;
fn write(&self, buf: &[u8], addr: GuestAddress) -> Result<usize> {
self.try_access(
buf.len(),
addr,
|offset, _count, caddr, region| -> Result<usize> {
region.write(&buf[offset as usize..], caddr)
},
)
}
fn read(&self, buf: &mut [u8], addr: GuestAddress) -> Result<usize> {
self.try_access(
buf.len(),
addr,
|offset, _count, caddr, region| -> Result<usize> {
region.read(&mut buf[offset as usize..], caddr)
},
)
}
fn write_slice(&self, buf: &[u8], addr: GuestAddress) -> Result<()> {
let res = self.write(buf, addr)?;
if res != buf.len() {
return Err(Error::PartialBuffer {
expected: buf.len(),
completed: res,
});
}
Ok(())
}
fn read_slice(&self, buf: &mut [u8], addr: GuestAddress) -> Result<()> {
let res = self.read(buf, addr)?;
if res != buf.len() {
return Err(Error::PartialBuffer {
expected: buf.len(),
completed: res,
});
}
Ok(())
}
fn read_from<F>(&self, addr: GuestAddress, src: &mut F, count: usize) -> Result<usize>
where
F: Read,
{
self.try_access(count, addr, |offset, len, caddr, region| -> Result<usize> {
// Check if something bad happened before doing unsafe things.
assert!(offset < count);
if let Some(dst) = unsafe { region.as_mut_slice() } {
// This is safe cause `start` and `len` are within the `region`.
let start = caddr.raw_value() as usize;
let end = start + len;
src.read_exact(&mut dst[start..end])
.map_err(Error::IOError)?;
Ok(len)
} else {
let len = std::cmp::min(len, MAX_ACCESS_CHUNK);
let mut buf = vec![0u8; len].into_boxed_slice();
let bytes_read = src.read(&mut buf[..]).map_err(Error::IOError)?;
let bytes_written = region.write(&buf[0..bytes_read], caddr)?;
assert_eq!(bytes_written, bytes_read);
Ok(len)
}
})
}
fn read_exact_from<F>(&self, addr: GuestAddress, src: &mut F, count: usize) -> Result<()>
where
F: Read,
{
let res = self.read_from(addr, src, count)?;
if res != count {
return Err(Error::PartialBuffer {
expected: count,
completed: res,
});
}
Ok(())
}
fn write_to<F>(&self, addr: GuestAddress, dst: &mut F, count: usize) -> Result<usize>
where
F: Write,
{
self.try_access(count, addr, |offset, len, caddr, region| -> Result<usize> {
// Check if something bad happened before doing unsafe things.
assert!(offset < count);
if let Some(src) = unsafe { region.as_slice() } {
// This is safe cause `start` and `len` are within the `region`.
let start = caddr.raw_value() as usize;
let end = start + len;
// It is safe to read from volatile memory. Accessing the guest
// memory as a slice is OK because nothing assumes another thread
// won't change what is loaded.
let bytes_written = dst.write(&src[start..end]).map_err(Error::IOError)?;
Ok(bytes_written)
} else {
let len = std::cmp::min(len, MAX_ACCESS_CHUNK);
let mut buf = vec![0u8; len].into_boxed_slice();
let bytes_read = region.read(&mut buf, caddr)?;
assert_eq!(bytes_read, len);
let bytes_written = dst.write(&buf).map_err(Error::IOError)?;
Ok(bytes_written)
}
})
}
fn write_all_to<F>(&self, addr: GuestAddress, dst: &mut F, count: usize) -> Result<()>
where
F: Write,
{
let res = self.write_to(addr, dst, count)?;
if res != count {
return Err(Error::PartialBuffer {
expected: count,
completed: res,
});
}
Ok(())
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn offset_from() {
let base = GuestAddress(0x100);
let addr = GuestAddress(0x150);
assert_eq!(addr.unchecked_offset_from(base), 0x50u64);
assert_eq!(addr.checked_offset_from(base), Some(0x50u64));
assert_eq!(base.checked_offset_from(addr), None);
}
#[test]
fn equals() {
let a = GuestAddress(0x300);
let b = GuestAddress(0x300);
let c = GuestAddress(0x301);
assert_eq!(a, GuestAddress(a.raw_value()));
assert_eq!(a, b);
assert_eq!(b, a);
assert_ne!(a, c);
assert_ne!(c, a);
}
#[test]
#[allow(clippy::eq_op)]
fn cmp() {
let a = GuestAddress(0x300);
let b = GuestAddress(0x301);
assert!(a < b);
assert!(b > a);
assert!(!(a < a));
}
#[test]
fn mask() {
let a = GuestAddress(0x5050);
assert_eq!(GuestAddress(0x5000), a & 0xff00u64);
assert_eq!(0x5000, a.mask(0xff00u64));
assert_eq!(GuestAddress(0x5055), a | 0x0005u64);
}
#[test]
fn add_sub() {
let a = GuestAddress(0x50);
let b = GuestAddress(0x60);
assert_eq!(Some(GuestAddress(0xb0)), a.checked_add(0x60));
assert_eq!(0x10, b.unchecked_offset_from(a));
}
#[test]
fn checked_add_overflow() {
let a = GuestAddress(0xffff_ffff_ffff_ff55);
assert_eq!(Some(GuestAddress(0xffff_ffff_ffff_ff57)), a.checked_add(2));
assert!(a.checked_add(0xf0).is_none());
}
#[test]
fn checked_sub_underflow() {
let a = GuestAddress(0xff);
assert_eq!(Some(GuestAddress(0x0f)), a.checked_sub(0xf0));
assert!(a.checked_sub(0xffff).is_none());
}
}

View File

@ -1,57 +0,0 @@
// Portions Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//
// Portions Copyright 2017 The Chromium OS Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the THIRD-PARTY file.
//! Traits for allocating, handling and interacting with the VM's physical memory.
//!
//! For a typical hypervisor, there are seveval components, such as boot loader, virtual device
//! drivers, virtio backend drivers and vhost drivers etc, that need to access VM's physical memory.
//! This crate aims to provide a set of stable traits to decouple VM memory consumers from VM
//! memory providers. Based on these traits, VM memory consumers could access VM's physical memory
//! without knowing the implementation details of the VM memory provider. Thus hypervisor
//! components, such as boot loader, virtual device drivers, virtio backend drivers and vhost
//! drivers etc, could be shared and reused by multiple hypervisors.
#![deny(missing_docs)]
extern crate libc;
#[cfg(test)]
#[macro_use]
extern crate matches;
#[macro_use]
pub mod address;
pub use address::{Address, AddressValue};
pub mod bytes;
pub use bytes::{ByteValued, Bytes};
pub mod endian;
pub use endian::{Be16, Be32, Be64, BeSize, Le16, Le32, Le64, LeSize};
pub mod guest_memory;
pub use guest_memory::{
Error as GuestMemoryError, GuestAddress, GuestMemory, GuestMemoryRegion, GuestUsize,
MemoryRegionAddress, Result as GuestMemoryResult,
};
#[cfg(all(feature = "backend-mmap", unix))]
mod mmap_unix;
#[cfg(all(feature = "backend-mmap", windows))]
mod mmap_windows;
#[cfg(feature = "backend-mmap")]
pub mod mmap;
#[cfg(feature = "backend-mmap")]
pub use mmap::{GuestMemoryMmap, GuestRegionMmap, MmapError, MmapRegion};
pub mod volatile_memory;
pub use volatile_memory::{
Error as VolatileMemoryError, Result as VolatileMemoryResult, VolatileMemory, VolatileRef,
VolatileSlice,
};

View File

@ -1,681 +0,0 @@
// Copyright (C) 2019 Alibaba Cloud Computing. All rights reserved.
// SPDX-License-Identifier: Apache-2.0
//
// Portions Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//
// Portions Copyright 2017 The Chromium OS Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the THIRD-PARTY file.
//! A default implementation of the GuestMemory trait by mmap()-ing guest's memory into the current
//! process.
//!
//! The main structs to access guest's memory are:
//! - [MmapRegion](struct.MmapRegion.html): mmap a continuous region of guest's memory into the
//! current process
//! - [GuestRegionMmap](struct.GuestRegionMmap.html): tracks a mapping of memory in the current
//! process and the corresponding base address. It relays guest memory access requests to the
//! underline [MmapRegion](struct.MmapRegion.html) object.
//! - [GuestMemoryMmap](struct.GuestMemoryMmap.html): provides methods to access a collection of
//! GuestRegionMmap objects.
use std::io::{self, Read, Write};
use std::ops::Deref;
use std::sync::Arc;
use address::Address;
use guest_memory::*;
use volatile_memory::VolatileMemory;
use Bytes;
#[cfg(unix)]
pub use mmap_unix::MmapRegion;
#[cfg(windows)]
pub use mmap_windows::MmapRegion;
// For MmapRegion
pub(crate) trait AsSlice {
unsafe fn as_slice(&self) -> &[u8];
#[allow(clippy::mut_from_ref)]
unsafe fn as_mut_slice(&self) -> &mut [u8];
}
/// Errors that can happen when creating a memory map
#[derive(Debug)]
pub enum MmapError {
/// Syscall returned the given error.
SystemCallFailed(io::Error),
/// No memory region found.
NoMemoryRegion,
/// Some of the memory regions intersect with each other.
MemoryRegionOverlap,
}
/// Tracks a mapping of memory in the current process and the corresponding base address
/// in the guest's memory space.
#[derive(Debug)]
pub struct GuestRegionMmap {
mapping: MmapRegion,
guest_base: GuestAddress,
}
impl GuestRegionMmap {
/// Create a new memory-mapped memory region for guest's physical memory.
/// Note: caller needs to ensure that (mapping.len() + guest_base) doesn't wrapping around.
pub fn new(mapping: MmapRegion, guest_base: GuestAddress) -> Self {
GuestRegionMmap {
mapping,
guest_base,
}
}
/// Convert an absolute address into an address space (GuestMemory)
/// to a host pointer, or return None if it is out of bounds.
pub fn get_host_address(&self, addr: MemoryRegionAddress) -> Option<*mut u8> {
// Not sure why wrapping_offset is not unsafe. Anyway this
// is safe because we've just range-checked addr using check_address.
self.check_address(addr)
.map(|addr| self.as_ptr().wrapping_offset(addr.raw_value() as isize))
}
}
impl Deref for GuestRegionMmap {
type Target = MmapRegion;
fn deref(&self) -> &MmapRegion {
&self.mapping
}
}
impl Bytes<MemoryRegionAddress> for GuestRegionMmap {
type E = Error;
/// # Examples
/// * Write a slice at guest address 0x1200.
///
/// ```
/// # use vm_memory::{Bytes, GuestAddress, GuestMemoryMmap};
/// # let start_addr = GuestAddress(0x1000);
/// # let mut gm = GuestMemoryMmap::new(&vec![(start_addr, 0x400)]).unwrap();
/// let res = gm.write(&[1,2,3,4,5], GuestAddress(0x1200)).unwrap();
/// assert_eq!(5, res);
/// ```
fn write(&self, buf: &[u8], addr: MemoryRegionAddress) -> Result<usize> {
let maddr = addr.raw_value() as usize;
self.as_volatile_slice()
.write(buf, maddr)
.map_err(Into::into)
}
/// # Examples
/// * Read a slice of length 16 at guestaddress 0x1200.
///
/// ```
/// # use vm_memory::{Bytes, GuestAddress, GuestMemoryMmap};
/// # let start_addr = GuestAddress(0x1000);
/// # let mut gm = GuestMemoryMmap::new(&vec![(start_addr, 0x400)]).unwrap();
/// let buf = &mut [0u8; 16];
/// let res = gm.read(buf, GuestAddress(0x1200)).unwrap();
/// assert_eq!(16, res);
/// ```
fn read(&self, buf: &mut [u8], addr: MemoryRegionAddress) -> Result<usize> {
let maddr = addr.raw_value() as usize;
self.as_volatile_slice()
.read(buf, maddr)
.map_err(Into::into)
}
fn write_slice(&self, buf: &[u8], addr: MemoryRegionAddress) -> Result<()> {
let maddr = addr.raw_value() as usize;
self.as_volatile_slice()
.write_slice(buf, maddr)
.map_err(Into::into)
}
fn read_slice(&self, buf: &mut [u8], addr: MemoryRegionAddress) -> Result<()> {
let maddr = addr.raw_value() as usize;
self.as_volatile_slice()
.read_slice(buf, maddr)
.map_err(Into::into)
}
/// # Examples
///
/// * Read bytes from /dev/urandom
///
/// ```
/// # use vm_memory::{Address, Bytes, GuestAddress, GuestMemoryMmap};
/// # use std::fs::File;
/// # use std::path::Path;
/// # let start_addr = GuestAddress(0x1000);
/// # let gm = GuestMemoryMmap::new(&vec![(start_addr, 0x400)]).unwrap();
/// let mut file = if cfg!(unix) {
/// File::open(Path::new("/dev/urandom")).unwrap()
/// } else {
/// File::open(Path::new("c:\\Windows\\system32\\ntoskrnl.exe")).unwrap()
/// };
/// let addr = GuestAddress(0x1010);
/// gm.read_from(addr, &mut file, 128).unwrap();
/// let read_addr = addr.checked_add(8).unwrap();
/// let _: u32 = gm.read_obj(read_addr).unwrap();
/// ```
fn read_from<F>(&self, addr: MemoryRegionAddress, src: &mut F, count: usize) -> Result<usize>
where
F: Read,
{
let maddr = addr.raw_value() as usize;
self.as_volatile_slice()
.read_from::<F>(maddr, src, count)
.map_err(Into::into)
}
/// # Examples
///
/// * Read bytes from /dev/urandom
///
/// ```
/// # extern crate tempfile;
/// # use self::tempfile::tempfile;
/// # use vm_memory::{Address, Bytes, GuestAddress, GuestMemoryMmap};
/// # use std::fs::File;
/// # use std::path::Path;
/// # let start_addr = GuestAddress(0x1000);
/// # let gm = GuestMemoryMmap::new(&vec![(start_addr, 0x400)]).unwrap();
/// let mut file = if cfg!(unix) {
/// File::open(Path::new("/dev/urandom")).unwrap()
/// } else {
/// File::open(Path::new("c:\\Windows\\system32\\ntoskrnl.exe")).unwrap()
/// };
/// let addr = GuestAddress(0x1010);
/// gm.read_exact_from(addr, &mut file, 128).unwrap();
/// let read_addr = addr.checked_add(8).unwrap();
/// let _: u32 = gm.read_obj(read_addr).unwrap();
/// ```
fn read_exact_from<F>(&self, addr: MemoryRegionAddress, src: &mut F, count: usize) -> Result<()>
where
F: Read,
{
let maddr = addr.raw_value() as usize;
self.as_volatile_slice()
.read_exact_from::<F>(maddr, src, count)
.map_err(Into::into)
}
/// Writes data from the region to a writable object.
///
/// # Examples
///
/// * Write 128 bytes to a temp file
///
/// ```
/// # extern crate tempfile;
/// # use self::tempfile::tempfile;
/// # use vm_memory::{Address, Bytes, GuestAddress, GuestMemoryMmap};
/// # use std::fs::OpenOptions;
/// # let start_addr = GuestAddress(0x1000);
/// # let gm = GuestMemoryMmap::new(&vec![(start_addr, 0x400)]).unwrap();
/// let mut file = tempfile().unwrap();
/// let mut mem = [0u8; 1024];
/// gm.write_to(start_addr, &mut file, 128).unwrap();
/// ```
fn write_to<F>(&self, addr: MemoryRegionAddress, dst: &mut F, count: usize) -> Result<usize>
where
F: Write,
{
let maddr = addr.raw_value() as usize;
self.as_volatile_slice()
.write_to::<F>(maddr, dst, count)
.map_err(Into::into)
}
/// Writes data from the region to a writable object.
///
/// # Examples
///
/// * Write 128 bytes to a temp file
///
/// ```
/// # extern crate tempfile;
/// # use self::tempfile::tempfile;
/// # use vm_memory::{Address, Bytes, GuestAddress, GuestMemoryMmap};
/// # use std::fs::OpenOptions;
/// # let start_addr = GuestAddress(0x1000);
/// # let gm = GuestMemoryMmap::new(&vec![(start_addr, 0x400)]).unwrap();
/// let mut file = tempfile().unwrap();
/// let mut mem = [0u8; 1024];
/// gm.write_all_to(start_addr, &mut file, 128).unwrap();
/// ```
fn write_all_to<F>(&self, addr: MemoryRegionAddress, dst: &mut F, count: usize) -> Result<()>
where
F: Write,
{
let maddr = addr.raw_value() as usize;
self.as_volatile_slice()
.write_all_to::<F>(maddr, dst, count)
.map_err(Into::into)
}
}
impl GuestMemoryRegion for GuestRegionMmap {
fn len(&self) -> GuestUsize {
self.mapping.len() as GuestUsize
}
fn start_addr(&self) -> GuestAddress {
self.guest_base
}
unsafe fn as_slice(&self) -> Option<&[u8]> {
Some(self.mapping.as_slice())
}
unsafe fn as_mut_slice(&self) -> Option<&mut [u8]> {
Some(self.mapping.as_mut_slice())
}
}
/// Tracks memory regions allocated/mapped for the guest in the current process.
#[derive(Clone, Debug)]
pub struct GuestMemoryMmap {
regions: Arc<Vec<GuestRegionMmap>>,
}
impl GuestMemoryMmap {
/// Creates a container and allocates anonymous memory for guest memory regions.
/// Valid memory regions are specified as a Vec of (Address, Size) tuples sorted by Address.
pub fn new(ranges: &[(GuestAddress, usize)]) -> std::result::Result<Self, MmapError> {
if ranges.is_empty() {
return Err(MmapError::NoMemoryRegion);
}
let mut regions = Vec::<GuestRegionMmap>::new();
for range in ranges.iter() {
if let Some(last) = regions.last() {
if last
.guest_base
.checked_add(last.mapping.len() as GuestUsize)
.map_or(true, |a| a > range.0)
{
return Err(MmapError::MemoryRegionOverlap);
}
}
let mapping = MmapRegion::new(range.1).map_err(MmapError::SystemCallFailed)?;
regions.push(GuestRegionMmap {
mapping,
guest_base: range.0,
});
}
Ok(Self {
regions: Arc::new(regions),
})
}
/// Creates a container and adds an existing set of mappings to it.
pub fn from_regions(ranges: Vec<GuestRegionMmap>) -> std::result::Result<Self, MmapError> {
if ranges.is_empty() {
return Err(MmapError::NoMemoryRegion);
}
for rangei in 1..ranges.len() {
let range = &ranges[rangei];
let last = &ranges[rangei - 1];
if last
.guest_base
.checked_add(last.mapping.len() as GuestUsize)
.map_or(true, |a| a > range.start_addr())
{
return Err(MmapError::MemoryRegionOverlap);
}
}
Ok(Self {
regions: Arc::new(ranges),
})
}
/// Convert an absolute address into an address space (GuestMemory)
/// to a host pointer, or return None if it is out of bounds.
pub fn get_host_address(&self, addr: GuestAddress) -> Option<*mut u8> {
self.to_region_addr(addr)
.and_then(|(r, addr)| r.get_host_address(addr))
}
}
impl GuestMemory for GuestMemoryMmap {
type R = GuestRegionMmap;
fn num_regions(&self) -> usize {
self.regions.len()
}
fn find_region(&self, addr: GuestAddress) -> Option<&GuestRegionMmap> {
for region in self.regions.iter() {
if addr >= region.start_addr() && addr <= region.end_addr() {
return Some(region);
}
}
None
}
fn with_regions<F, E>(&self, cb: F) -> std::result::Result<(), E>
where
F: Fn(usize, &Self::R) -> std::result::Result<(), E>,
{
for (index, region) in self.regions.iter().enumerate() {
cb(index, region)?;
}
Ok(())
}
fn with_regions_mut<F, E>(&self, mut cb: F) -> std::result::Result<(), E>
where
F: FnMut(usize, &Self::R) -> std::result::Result<(), E>,
{
for (index, region) in self.regions.iter().enumerate() {
cb(index, region)?;
}
Ok(())
}
fn map_and_fold<F, G, T>(&self, init: T, mapf: F, foldf: G) -> T
where
F: Fn((usize, &Self::R)) -> T,
G: Fn(T, T) -> T,
{
self.regions.iter().enumerate().map(mapf).fold(init, foldf)
}
}
#[cfg(test)]
mod tests {
extern crate tempfile;
use self::tempfile::tempfile;
use super::*;
use std::fs::File;
use std::mem;
use std::path::Path;
use Bytes;
#[test]
fn basic_map() {
let m = MmapRegion::new(1024).unwrap();
assert_eq!(1024, m.len());
}
#[test]
fn map_invalid_size() {
let e = MmapRegion::new(0).unwrap_err();
assert_eq!(e.raw_os_error(), Some(libc::EINVAL));
}
#[test]
fn slice_addr() {
let m = MmapRegion::new(5).unwrap();
let s = m.get_slice(2, 3).unwrap();
assert_eq!(s.as_ptr(), unsafe { m.as_ptr().offset(2) });
}
#[test]
fn mapped_file_read() {
let mut f = tempfile().unwrap();
let sample_buf = &[1, 2, 3, 4, 5];
assert!(f.write_all(sample_buf).is_ok());
let mem_map = MmapRegion::from_fd(&f, sample_buf.len(), 0).unwrap();
let buf = &mut [0u8; 16];
assert_eq!(
mem_map.as_volatile_slice().read(buf, 0).unwrap(),
sample_buf.len()
);
assert_eq!(buf[0..sample_buf.len()], sample_buf[..]);
}
#[test]
fn test_regions() {
// No regions provided should return error.
assert_eq!(
format!("{:?}", GuestMemoryMmap::new(&[]).err().unwrap()),
format!("{:?}", MmapError::NoMemoryRegion)
);
let start_addr1 = GuestAddress(0x0);
let start_addr2 = GuestAddress(0x800);
let guest_mem =
GuestMemoryMmap::new(&[(start_addr1, 0x400), (start_addr2, 0x400)]).unwrap();
assert_eq!(guest_mem.num_regions(), 2);
assert_eq!(guest_mem.end_addr(), GuestAddress(0xbff));
assert!(guest_mem.find_region(GuestAddress(0x200)).is_some());
assert!(guest_mem.find_region(GuestAddress(0x600)).is_none());
assert!(guest_mem.find_region(GuestAddress(0xa00)).is_some());
assert!(guest_mem.find_region(GuestAddress(0xc00)).is_none());
}
#[test]
fn test_address_in_range() {
let start_addr1 = GuestAddress(0x0);
let start_addr2 = GuestAddress(0x800);
let guest_mem =
GuestMemoryMmap::new(&[(start_addr1, 0x400), (start_addr2, 0x400)]).unwrap();
assert!(guest_mem.address_in_range(GuestAddress(0x200)));
assert!(!guest_mem.address_in_range(GuestAddress(0x600)));
assert!(guest_mem.address_in_range(GuestAddress(0xa00)));
assert!(!guest_mem.address_in_range(GuestAddress(0xc00)));
}
#[test]
fn test_check_address() {
let start_addr1 = GuestAddress(0x0);
let start_addr2 = GuestAddress(0x800);
let guest_mem =
GuestMemoryMmap::new(&[(start_addr1, 0x400), (start_addr2, 0x400)]).unwrap();
assert_eq!(
guest_mem.check_address(GuestAddress(0x200)),
Some(GuestAddress(0x200))
);
assert_eq!(guest_mem.check_address(GuestAddress(0x600)), None);
assert_eq!(
guest_mem.check_address(GuestAddress(0xa00)),
Some(GuestAddress(0xa00))
);
assert_eq!(guest_mem.check_address(GuestAddress(0xc00)), None);
}
#[test]
fn test_to_region_addr() {
let start_addr1 = GuestAddress(0x0);
let start_addr2 = GuestAddress(0x800);
let guest_mem =
GuestMemoryMmap::new(&[(start_addr1, 0x400), (start_addr2, 0x400)]).unwrap();
assert!(guest_mem.to_region_addr(GuestAddress(0x600)).is_none());
let (r0, addr0) = guest_mem.to_region_addr(GuestAddress(0x800)).unwrap();
let (r1, addr1) = guest_mem.to_region_addr(GuestAddress(0xa00)).unwrap();
assert!(r0.as_ptr() == r1.as_ptr());
assert_eq!(addr0, MemoryRegionAddress(0));
assert_eq!(addr1, MemoryRegionAddress(0x200));
}
#[test]
fn test_get_host_address() {
let start_addr1 = GuestAddress(0x0);
let start_addr2 = GuestAddress(0x800);
let guest_mem =
GuestMemoryMmap::new(&[(start_addr1, 0x400), (start_addr2, 0x400)]).unwrap();
assert!(guest_mem.get_host_address(GuestAddress(0x600)).is_none());
let ptr0 = guest_mem.get_host_address(GuestAddress(0x800)).unwrap();
let ptr1 = guest_mem.get_host_address(GuestAddress(0xa00)).unwrap();
assert_eq!(
ptr0,
guest_mem.find_region(GuestAddress(0x800)).unwrap().as_ptr()
);
assert_eq!(unsafe { ptr0.offset(0x200) }, ptr1);
}
#[test]
fn test_deref() {
let start_addr = GuestAddress(0x0);
let guest_mem = GuestMemoryMmap::new(&[(start_addr, 0x400)]).unwrap();
let sample_buf = &[1, 2, 3, 4, 5];
assert_eq!(guest_mem.write(sample_buf, start_addr).unwrap(), 5);
let slice = guest_mem
.find_region(GuestAddress(0))
.unwrap()
.as_volatile_slice();
let buf = &mut [0, 0, 0, 0, 0];
assert_eq!(slice.read(buf, 0).unwrap(), 5);
assert_eq!(buf, sample_buf);
}
#[test]
fn mapped_file_regions() {
let mut f = tempfile().unwrap();
let empty_buf = &[0; 16384];
assert!(f.write_all(empty_buf).is_ok());
let mem_map = MmapRegion::from_fd(&f, empty_buf.len(), 0).unwrap();
let guest_reg = GuestRegionMmap::new(mem_map, GuestAddress(0x8000));
let mut region_vec = Vec::new();
region_vec.push(guest_reg);
let guest_mem = GuestMemoryMmap::from_regions(region_vec).unwrap();
assert_eq!(guest_mem.num_regions(), 1);
assert!(guest_mem.find_region(GuestAddress(0)).is_none());
assert!(guest_mem.find_region(GuestAddress(0x8000)).is_some());
}
#[test]
fn overlap_memory() {
let start_addr1 = GuestAddress(0x0);
let start_addr2 = GuestAddress(0x1000);
let res = GuestMemoryMmap::new(&[(start_addr1, 0x2000), (start_addr2, 0x2000)]);
assert_eq!(
format!("{:?}", res.err().unwrap()),
format!("{:?}", MmapError::MemoryRegionOverlap)
);
}
#[test]
fn test_read_u64() {
let start_addr1 = GuestAddress(0x0);
let start_addr2 = GuestAddress(0x1000);
let bad_addr = GuestAddress(0x2001);
let bad_addr2 = GuestAddress(0x1ffc);
let max_addr = GuestAddress(0x2000);
let gm = GuestMemoryMmap::new(&[(start_addr1, 0x1000), (start_addr2, 0x1000)]).unwrap();
let val1: u64 = 0xaa55_aa55_aa55_aa55;
let val2: u64 = 0x55aa_55aa_55aa_55aa;
assert_eq!(
format!("{:?}", gm.write_obj(val1, bad_addr).err().unwrap()),
format!("InvalidGuestAddress({:?})", bad_addr,)
);
assert_eq!(
format!("{:?}", gm.write_obj(val1, bad_addr2).err().unwrap()),
format!(
"PartialBuffer {{ expected: {:?}, completed: {:?} }}",
mem::size_of::<u64>(),
max_addr.checked_offset_from(bad_addr2).unwrap()
)
);
gm.write_obj(val1, GuestAddress(0x500)).unwrap();
gm.write_obj(val2, GuestAddress(0x1000 + 32)).unwrap();
let num1: u64 = gm.read_obj(GuestAddress(0x500)).unwrap();
let num2: u64 = gm.read_obj(GuestAddress(0x1000 + 32)).unwrap();
assert_eq!(val1, num1);
assert_eq!(val2, num2);
}
#[test]
fn write_and_read() {
let mut start_addr = GuestAddress(0x1000);
let gm = GuestMemoryMmap::new(&[(start_addr, 0x400)]).unwrap();
let sample_buf = &[1, 2, 3, 4, 5];
assert_eq!(gm.write(sample_buf, start_addr).unwrap(), 5);
let buf = &mut [0u8; 5];
assert_eq!(gm.read(buf, start_addr).unwrap(), 5);
assert_eq!(buf, sample_buf);
start_addr = GuestAddress(0x13ff);
assert_eq!(gm.write(sample_buf, start_addr).unwrap(), 1);
assert_eq!(gm.read(buf, start_addr).unwrap(), 1);
assert_eq!(buf[0], sample_buf[0]);
}
#[test]
fn read_to_and_write_from_mem() {
let gm = GuestMemoryMmap::new(&[(GuestAddress(0x1000), 0x400)]).unwrap();
let addr = GuestAddress(0x1010);
let mut file = if cfg!(unix) {
File::open(Path::new("/dev/zero")).unwrap()
} else {
File::open(Path::new("c:\\Windows\\system32\\ntoskrnl.exe")).unwrap()
};
gm.write_obj(!0u32, addr).unwrap();
gm.read_exact_from(addr, &mut file, mem::size_of::<u32>())
.unwrap();
let value: u32 = gm.read_obj(addr).unwrap();
if cfg!(unix) {
assert_eq!(value, 0);
} else {
assert_eq!(value, 0x0090_5a4d);
}
let mut sink = Vec::new();
gm.write_all_to(addr, &mut sink, mem::size_of::<u32>())
.unwrap();
if cfg!(unix) {
assert_eq!(sink, vec![0; mem::size_of::<u32>()]);
} else {
assert_eq!(sink, vec![0x4d, 0x5a, 0x90, 0x00]);
};
}
#[test]
fn create_vec_with_regions() {
let region_size = 0x400;
let regions = vec![
(GuestAddress(0x0), region_size),
(GuestAddress(0x1000), region_size),
];
let mut iterated_regions = Vec::new();
let gm = GuestMemoryMmap::new(&regions).unwrap();
let res: Result<()> = gm.with_regions(|_, region| {
assert_eq!(region.len(), region_size as GuestUsize);
Ok(())
});
assert!(res.is_ok());
let res: Result<()> = gm.with_regions_mut(|_, region| {
iterated_regions.push((region.start_addr(), region.len() as usize));
Ok(())
});
assert!(res.is_ok());
assert_eq!(regions, iterated_regions);
assert_eq!(gm.clone().regions[0].guest_base, regions[0].0);
assert_eq!(gm.clone().regions[1].guest_base, regions[1].0);
}
#[test]
fn test_access_cross_boundary() {
let start_addr1 = GuestAddress(0x0);
let start_addr2 = GuestAddress(0x1000);
let gm = GuestMemoryMmap::new(&[(start_addr1, 0x1000), (start_addr2, 0x1000)]).unwrap();
let sample_buf = &[1, 2, 3, 4, 5];
assert_eq!(gm.write(sample_buf, GuestAddress(0xffc)).unwrap(), 5);
let buf = &mut [0u8; 5];
assert_eq!(gm.read(buf, GuestAddress(0xffc)).unwrap(), 5);
assert_eq!(buf, sample_buf);
}
}

View File

@ -1,168 +0,0 @@
// Copyright (C) 2019 Alibaba Cloud Computing. All rights reserved.
// SPDX-License-Identifier: Apache-2.0
//
// Portions Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//
// Portions Copyright 2017 The Chromium OS Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the THIRD-PARTY file.
//! A default Unix implementation of the GuestMemory trait by mmap()-ing guest's memory into
//! the current process.
//!
//! The main structs to access guest's memory are:
//! - [MmapRegion](struct.MmapRegion.html): mmap a continuous region of guest's memory into the
//! current process
//! - [GuestRegionMmap](struct.GuestRegionMmap.html): tracks a mapping of memory in the current
//! process and the corresponding base address. It relays guest memory access requests to the
//! underline [MmapRegion](struct.MmapRegion.html) object.
//! - [GuestMemoryMmap](struct.GuestMemoryMmap.html): provides methods to access a collection of
//! GuestRegionMmap objects.
use libc;
use std::io;
use std::ptr::null_mut;
use mmap::AsSlice;
use volatile_memory::{self, compute_offset, VolatileMemory, VolatileSlice};
use std::os::unix::io::AsRawFd;
/// A backend driver to access guest's physical memory by mmapping guest's memory into the current
/// process.
/// For a combination of 32-bit hypervisor and 64-bit virtual machine, only partial of guest's
/// physical memory may be mapped into current process due to limited process virtual address
/// space size.
#[derive(Debug)]
pub struct MmapRegion {
addr: *mut u8,
size: usize,
}
// Send and Sync aren't automatically inherited for the raw address pointer.
// Accessing that pointer is only done through the stateless interface which
// allows the object to be shared by multiple threads without a decrease in
// safety.
unsafe impl Send for MmapRegion {}
unsafe impl Sync for MmapRegion {}
impl MmapRegion {
/// Creates an anonymous shared mapping of `size` bytes.
///
/// # Arguments
/// * `size` - Size of memory region in bytes.
pub fn new(size: usize) -> io::Result<Self> {
// This is safe because we are creating an anonymous mapping in a place not already used by
// any other area in this process.
let addr = unsafe {
libc::mmap(
null_mut(),
size,
libc::PROT_READ | libc::PROT_WRITE,
libc::MAP_ANONYMOUS | libc::MAP_SHARED | libc::MAP_NORESERVE,
-1,
0,
)
};
if addr == libc::MAP_FAILED {
return Err(io::Error::last_os_error());
}
Ok(Self {
addr: addr as *mut u8,
size,
})
}
/// Maps the `size` bytes starting at `offset` bytes of the given `fd`.
///
/// # Arguments
/// * `fd` - File descriptor to mmap from.
/// * `size` - Size of memory region in bytes.
/// * `offset` - Offset in bytes from the beginning of `fd` to start the mmap.
pub fn from_fd(fd: &AsRawFd, size: usize, offset: libc::off_t) -> io::Result<Self> {
// This is safe because we are creating a mapping in a place not already used by any other
// area in this process.
let addr = unsafe {
libc::mmap(
null_mut(),
size,
libc::PROT_READ | libc::PROT_WRITE,
libc::MAP_SHARED,
fd.as_raw_fd(),
offset,
)
};
if addr == libc::MAP_FAILED {
return Err(io::Error::last_os_error());
}
Ok(Self {
addr: addr as *mut u8,
size,
})
}
/// Returns a pointer to the beginning of the memory region. Should only be
/// used for passing this region to ioctls for setting guest memory.
pub fn as_ptr(&self) -> *mut u8 {
self.addr
}
}
impl AsSlice for MmapRegion {
// Returns the region as a slice
// used to do crap
unsafe fn as_slice(&self) -> &[u8] {
// This is safe because we mapped the area at addr ourselves, so this slice will not
// overflow. However, it is possible to alias.
std::slice::from_raw_parts(self.addr, self.size)
}
// safe because it's expected interior mutability
#[allow(clippy::mut_from_ref)]
unsafe fn as_mut_slice(&self) -> &mut [u8] {
// This is safe because we mapped the area at addr ourselves, so this slice will not
// overflow. However, it is possible to alias.
std::slice::from_raw_parts_mut(self.addr, self.size)
}
}
impl VolatileMemory for MmapRegion {
fn len(&self) -> usize {
self.size
}
fn get_slice(&self, offset: usize, count: usize) -> volatile_memory::Result<VolatileSlice> {
let end = compute_offset(offset, count)?;
if end > self.size {
return Err(volatile_memory::Error::OutOfBounds { addr: end });
}
// Safe because we checked that offset + count was within our range and we only ever hand
// out volatile accessors.
Ok(unsafe { VolatileSlice::new((self.addr as usize + offset) as *mut _, count) })
}
}
impl Drop for MmapRegion {
fn drop(&mut self) {
// This is safe because we mmap the area at addr ourselves, and nobody
// else is holding a reference to it.
unsafe {
libc::munmap(self.addr as *mut libc::c_void, self.size);
}
}
}
#[cfg(test)]
mod tests {
use mmap_unix::MmapRegion;
use std::os::unix::io::FromRawFd;
#[test]
fn map_invalid_fd() {
let fd = unsafe { std::fs::File::from_raw_fd(-1) };
let e = MmapRegion::from_fd(&fd, 1024, 0).unwrap_err();
assert_eq!(e.raw_os_error(), Some(libc::EBADF));
}
}

View File

@ -1,225 +0,0 @@
// Copyright (C) 2019 CrowdStrike, Inc. All rights reserved.
// SPDX-License-Identifier: Apache-2.0
//
//! A default Windows implementation of the GuestMemory trait using VirtualAlloc() and MapViewOfFile().
//!
//! The main structs to access guest's memory are:
//! - [MmapRegion](struct.MmapRegion.html): mmap a continuous region of guest's memory into the
//! current process
//! - [GuestRegionMmap](struct.GuestRegionMmap.html): tracks a mapping of memory in the current
//! process and the corresponding base address. It relays guest memory access requests to the
//! underline [MmapRegion](struct.MmapRegion.html) object.
//! - [GuestMemoryMmap](struct.GuestMemoryMmap.html): provides methods to access a collection of
//! GuestRegionMmap objects.
use libc;
use std::io;
use std::ptr::null_mut;
use mmap::AsSlice;
use volatile_memory::{self, compute_offset, VolatileMemory, VolatileSlice};
use libc::{c_void, size_t};
use std;
use std::os::windows::io::{AsRawHandle, RawHandle};
use std::ptr::null;
#[allow(non_snake_case)]
#[link(name = "kernel32")]
extern "stdcall" {
pub fn VirtualAlloc(
lpAddress: *mut c_void,
dwSize: size_t,
flAllocationType: u32,
flProtect: u32,
) -> *mut c_void;
pub fn VirtualFree(lpAddress: *mut c_void, dwSize: size_t, dwFreeType: u32) -> u32;
pub fn CreateFileMappingA(
hFile: RawHandle, // HANDLE
lpFileMappingAttributes: *const c_void, // LPSECURITY_ATTRIBUTES
flProtect: u32, // DWORD
dwMaximumSizeHigh: u32, // DWORD
dwMaximumSizeLow: u32, // DWORD
lpName: *const u8, // LPCSTR
) -> RawHandle; // HANDLE
pub fn MapViewOfFile(
hFileMappingObject: RawHandle,
dwDesiredAccess: u32,
dwFileOffsetHigh: u32,
dwFileOffsetLow: u32,
dwNumberOfBytesToMap: size_t,
) -> *mut c_void;
pub fn CloseHandle(hObject: RawHandle) -> u32; // BOOL
}
const MM_HIGHEST_VAD_ADDRESS: u64 = 0x000007FFFFFDFFFF;
const MEM_COMMIT: u32 = 0x00001000;
const MEM_RELEASE: u32 = 0x00008000;
const FILE_MAP_ALL_ACCESS: u32 = 0xf001f;
const PAGE_READWRITE: u32 = 0x04;
pub const MAP_FAILED: *mut c_void = 0 as *mut c_void;
pub const INVALID_HANDLE_VALUE: RawHandle = (-1isize) as RawHandle;
#[allow(dead_code)]
pub const ERROR_INVALID_PARAMETER: i32 = 87;
/// A backend driver to access guest's physical memory by mmapping guest's memory into the current
/// process.
/// For a combination of 32-bit hypervisor and 64-bit virtual machine, only partial of guest's
/// physical memory may be mapped into current process due to limited process virtual address
/// space size.
#[derive(Debug)]
pub struct MmapRegion {
addr: *mut u8,
size: usize,
}
// Send and Sync aren't automatically inherited for the raw address pointer.
// Accessing that pointer is only done through the stateless interface which
// allows the object to be shared by multiple threads without a decrease in
// safety.
unsafe impl Send for MmapRegion {}
unsafe impl Sync for MmapRegion {}
impl MmapRegion {
/// Creates an anonymous shared mapping of `size` bytes.
///
/// # Arguments
/// * `size` - Size of memory region in bytes.
pub fn new(size: usize) -> io::Result<Self> {
if (size == 0) || (size > MM_HIGHEST_VAD_ADDRESS as usize) {
return Err(io::Error::from_raw_os_error(libc::EINVAL));
}
// This is safe because we are creating an anonymous mapping in a place not already used by
// any other area in this process.
let addr = unsafe { VirtualAlloc(0 as *mut c_void, size, MEM_COMMIT, PAGE_READWRITE) };
if addr == MAP_FAILED {
return Err(io::Error::last_os_error());
}
Ok(Self {
addr: addr as *mut u8,
size,
})
}
/// Maps the `size` bytes starting at `offset` bytes of the given `fd`.
///
/// # Arguments
/// * `file` - Raw handle to a file to map into the address space.
/// * `size` - Size of memory region in bytes.
/// * `offset` - Offset in bytes from the beginning of `file` to start the mapping.
pub fn from_fd(file: &AsRawHandle, size: usize, offset: libc::off_t) -> io::Result<Self> {
let handle = file.as_raw_handle();
if handle == INVALID_HANDLE_VALUE {
return Err(io::Error::from_raw_os_error(libc::EBADF));
}
let mapping = unsafe {
CreateFileMappingA(
handle,
null(),
PAGE_READWRITE,
(size >> 32) as u32,
size as u32,
null(),
)
};
if mapping == 0 as RawHandle {
return Err(io::Error::last_os_error());
}
// This is safe because we are creating a mapping in a place not already used by any other
// area in this process.
let addr = unsafe {
MapViewOfFile(
mapping,
FILE_MAP_ALL_ACCESS,
(offset as u64 >> 32) as u32,
offset as u32,
size,
)
};
unsafe {
CloseHandle(mapping);
}
if addr == null_mut() {
return Err(io::Error::last_os_error());
}
Ok(Self {
addr: addr as *mut u8,
size,
})
}
/// Returns a pointer to the beginning of the memory region. Should only be
/// used for passing this region to ioctls for setting guest memory.
pub fn as_ptr(&self) -> *mut u8 {
self.addr
}
}
impl AsSlice for MmapRegion {
// Returns the region as a slice
// used to do crap
unsafe fn as_slice(&self) -> &[u8] {
// This is safe because we mapped the area at addr ourselves, so this slice will not
// overflow. However, it is possible to alias.
std::slice::from_raw_parts(self.addr, self.size)
}
// safe because it's expected interior mutability
#[allow(clippy::mut_from_ref)]
unsafe fn as_mut_slice(&self) -> &mut [u8] {
// This is safe because we mapped the area at addr ourselves, so this slice will not
// overflow. However, it is possible to alias.
std::slice::from_raw_parts_mut(self.addr, self.size)
}
}
impl VolatileMemory for MmapRegion {
fn len(&self) -> usize {
self.size
}
fn get_slice(&self, offset: usize, count: usize) -> volatile_memory::Result<VolatileSlice> {
let end = compute_offset(offset, count)?;
if end > self.size {
return Err(volatile_memory::Error::OutOfBounds { addr: end });
}
// Safe because we checked that offset + count was within our range and we only ever hand
// out volatile accessors.
Ok(unsafe { VolatileSlice::new((self.addr as usize + offset) as *mut _, count) })
}
}
impl Drop for MmapRegion {
fn drop(&mut self) {
// This is safe because we mmap the area at addr ourselves, and nobody
// else is holding a reference to it.
unsafe {
VirtualFree(self.addr as *mut libc::c_void, self.size, MEM_RELEASE);
}
}
}
#[cfg(test)]
mod tests {
use mmap_windows::{MmapRegion, INVALID_HANDLE_VALUE};
use std::os::windows::io::FromRawHandle;
#[test]
fn map_invalid_handle() {
let fd = unsafe { std::fs::File::from_raw_handle(INVALID_HANDLE_VALUE) };
let e = MmapRegion::from_fd(&fd, 1024, 0).unwrap_err();
assert_eq!(e.raw_os_error(), Some(libc::EBADF));
}
}

View File

@ -1,967 +0,0 @@
// Portions Copyright 2019 Red Hat, Inc.
//
// Copyright 2017 The Chromium OS Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the THIRT-PARTY file.
//! Types for volatile access to memory.
//!
//! Two of the core rules for safe rust is no data races and no aliased mutable references.
//! `VolatileRef` and `VolatileSlice`, along with types that produce those which implement
//! `VolatileMemory`, allow us to sidestep that rule by wrapping pointers that absolutely have to be
//! accessed volatile. Some systems really do need to operate on shared memory and can't have the
//! compiler reordering or eliding access because it has no visibility into what other systems are
//! doing with that hunk of memory.
//!
//! For the purposes of maintaining safety, volatile memory has some rules of its own:
//! 1. No references or slices to volatile memory (`&` or `&mut`).
//! 2. Access should always been done with a volatile read or write.
//! The First rule is because having references of any kind to memory considered volatile would
//! violate pointer aliasing. The second is because unvolatile accesses are inherently undefined if
//! done concurrently without synchronization. With volatile access we know that the compiler has
//! not reordered or elided the access.
use std::cmp::min;
use std::fmt;
use std::io::{self, Read, Write};
use std::marker::PhantomData;
use std::mem::size_of;
use std::ptr::copy;
use std::ptr::{read_volatile, write_volatile};
use std::result;
use std::slice::{from_raw_parts, from_raw_parts_mut};
use std::usize;
use bytes::{ByteValued, Bytes};
/// VolatileMemory related error codes
#[allow(missing_docs)]
#[derive(Debug)]
pub enum Error {
/// `addr` is out of bounds of the volatile memory slice.
OutOfBounds { addr: usize },
/// Taking a slice at `base` with `offset` would overflow `usize`.
Overflow { base: usize, offset: usize },
/// Writing to memory failed
IOError(io::Error),
/// Incomplete read or write
PartialBuffer { expected: usize, completed: usize },
}
impl fmt::Display for Error {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
match self {
Error::OutOfBounds { addr } => write!(f, "address 0x{:x} is out of bounds", addr),
Error::Overflow { base, offset } => write!(
f,
"address 0x{:x} offset by 0x{:x} would overflow",
base, offset
),
Error::IOError(error) => write!(f, "{}", error),
Error::PartialBuffer {
expected,
completed,
} => write!(
f,
"only used {} bytes in {} long buffer",
completed, expected
),
}
}
}
/// Result of volatile memory operations
pub type Result<T> = result::Result<T, Error>;
/// Convenience function for computing `base + offset` which returns
/// `Err(Error::Overflow)` instead of panicking in the case `base + offset` exceeds
/// `usize::MAX`.
///
/// # Examples
///
/// ```
/// # use vm_memory::volatile_memory::*;
/// # fn get_slice(offset: usize, count: usize) -> Result<()> {
/// let mem_end = compute_offset(offset, count)?;
/// if mem_end > 100 {
/// return Err(Error::OutOfBounds{addr: mem_end});
/// }
/// # Ok(())
/// # }
/// ```
pub fn compute_offset(base: usize, offset: usize) -> Result<usize> {
match base.checked_add(offset) {
None => Err(Error::Overflow { base, offset }),
Some(m) => Ok(m),
}
}
/// Trait for types that support raw volatile access to their data.
pub trait VolatileMemory {
/// Gets the size of this slice.
fn len(&self) -> usize;
/// Check whether the region is empty.
fn is_empty(&self) -> bool {
self.len() == 0
}
/// Gets a slice of memory at `offset` that is `count` bytes in length and supports volatile
/// access.
fn get_slice(&self, offset: usize, count: usize) -> Result<VolatileSlice>;
/// Gets a slice of memory for the entire region that supports volatile access.
fn as_volatile_slice(&self) -> VolatileSlice {
self.get_slice(0, self.len()).unwrap()
}
/// Gets a `VolatileRef` at `offset`.
fn get_ref<T: ByteValued>(&self, offset: usize) -> Result<VolatileRef<T>> {
let slice = self.get_slice(offset, size_of::<T>())?;
unsafe {
// This is safe because the pointer is range-checked by get_slice, and
// the lifetime is the same as self.
Ok(VolatileRef::<T>::new(slice.addr))
}
}
/// Check that addr + count is valid and return the sum.
fn compute_end_offset(&self, base: usize, offset: usize) -> Result<usize> {
let mem_end = compute_offset(base, offset)?;
if mem_end > self.len() {
return Err(Error::OutOfBounds { addr: mem_end });
}
Ok(mem_end)
}
}
impl<'a> VolatileMemory for &'a mut [u8] {
fn len(&self) -> usize {
<[u8]>::len(self)
}
fn get_slice(&self, offset: usize, count: usize) -> Result<VolatileSlice> {
let _ = self.compute_end_offset(offset, count)?;
unsafe {
// This is safe because the pointer is range-checked by compute_end_offset, and
// the lifetime is the same as the original slice.
Ok(VolatileSlice::new(
(self.as_ptr() as usize + offset) as *mut _,
count,
))
}
}
}
#[repr(C, packed)]
struct Packed<T>(T);
/// A slice of raw memory that supports volatile access.
#[derive(Copy, Clone, Debug)]
pub struct VolatileSlice<'a> {
addr: *mut u8,
size: usize,
phantom: PhantomData<&'a u8>,
}
impl<'a> VolatileSlice<'a> {
/// Creates a slice of raw memory that must support volatile access.
///
/// To use this safely, the caller must guarantee that the memory at `addr` is `size` bytes long
/// and is available for the duration of the lifetime of the new `VolatileSlice`. The caller
/// must also guarantee that all other users of the given chunk of memory are using volatile
/// accesses.
pub unsafe fn new(addr: *mut u8, size: usize) -> VolatileSlice<'a> {
VolatileSlice {
addr,
size,
phantom: PhantomData,
}
}
/// Gets the address of this slice's memory.
pub fn as_ptr(&self) -> *mut u8 {
self.addr
}
/// Gets the size of this slice.
pub fn len(&self) -> usize {
self.size
}
/// Check whether the slice is empty.
pub fn is_empty(&self) -> bool {
self.size == 0
}
/// Creates a copy of this slice with the address increased by `count` bytes, and the size
/// reduced by `count` bytes.
pub fn offset(self, count: usize) -> Result<VolatileSlice<'a>> {
let new_addr = (self.addr as usize)
.checked_add(count)
.ok_or(Error::Overflow {
base: self.addr as usize,
offset: count,
})?;
let new_size = self
.size
.checked_sub(count)
.ok_or(Error::OutOfBounds { addr: new_addr })?;
unsafe {
// Safe because the memory has the same lifetime and points to a subset of the
// memory of the original slice.
Ok(VolatileSlice::new(new_addr as *mut u8, new_size))
}
}
/// Copies `self.len()` or `buf.len()` times the size of `T` bytes, whichever is smaller, to
/// `buf`.
///
/// The copy happens from smallest to largest address in `T` sized chunks using volatile reads.
///
/// # Examples
///
/// ```
/// # use std::fs::File;
/// # use std::path::Path;
/// # use vm_memory::VolatileMemory;
/// # fn test_write_null() -> Result<(), ()> {
/// let mut mem = [0u8; 32];
/// let mem_ref = &mut mem[..];
/// let vslice = mem_ref.get_slice(0, 32).map_err(|_| ())?;
/// let mut buf = [5u8; 16];
/// vslice.copy_to(&mut buf[..]);
/// for v in &buf[..] {
/// assert_eq!(buf[0], 0);
/// }
/// # Ok(())
/// # }
/// ```
pub fn copy_to<T>(&self, buf: &mut [T]) -> usize
where
T: ByteValued,
{
let mut addr = self.addr;
let mut i = 0;
for v in buf.iter_mut().take(self.size / size_of::<T>()) {
unsafe {
*v = read_volatile(addr as *const Packed<T>).0;
addr = addr.add(size_of::<T>());
};
i += 1;
}
i
}
/// Copies `self.len()` or `slice.len()` bytes, whichever is smaller, to `slice`.
///
/// The copies happen in an undefined order.
/// # Examples
///
/// ```
/// # use vm_memory::VolatileMemory;
/// # fn test_write_null() -> Result<(), ()> {
/// let mut mem = [0u8; 32];
/// let mem_ref = &mut mem[..];
/// let vslice = mem_ref.get_slice(0, 32).map_err(|_| ())?;
/// vslice.copy_to_volatile_slice(vslice.get_slice(16, 16).map_err(|_| ())?);
/// # Ok(())
/// # }
/// ```
pub fn copy_to_volatile_slice(&self, slice: VolatileSlice) {
unsafe {
// Safe because the pointers are range-checked when the slices
// are created, and they never escape the VolatileSlices.
// FIXME: ... however, is it really okay to mix non-volatile
// operations such as copy with read_volatile and write_volatile?
copy(self.addr, slice.addr, min(self.size, slice.size));
}
}
/// Copies `self.len()` or `buf.len()` times the size of `T` bytes, whichever is smaller, to
/// this slice's memory.
///
/// The copy happens from smallest to largest address in `T` sized chunks using volatile writes.
///
/// # Examples
///
/// ```
/// # use std::fs::File;
/// # use std::path::Path;
/// # use vm_memory::VolatileMemory;
/// # fn test_write_null() -> Result<(), ()> {
/// let mut mem = [0u8; 32];
/// let mem_ref = &mut mem[..];
/// let vslice = mem_ref.get_slice(0, 32).map_err(|_| ())?;
/// let buf = [5u8; 64];
/// vslice.copy_from(&buf[..]);
/// for i in 0..4 {
/// assert_eq!(vslice.get_ref::<u32>(i * 4).map_err(|_| ())?.load(), 0x05050505);
/// }
/// # Ok(())
/// # }
/// ```
pub fn copy_from<T>(&self, buf: &[T])
where
T: ByteValued,
{
let mut addr = self.addr;
for &v in buf.iter().take(self.size / size_of::<T>()) {
unsafe {
// Safe because the pointers are range-checked when the slices
// are created, and they never escape the VolatileSlices.
write_volatile(addr as *mut Packed<T>, Packed::<T>(v));
addr = addr.add(size_of::<T>());
}
}
}
// These function are private and only used for the read/write functions. It is not valid in
// general to take slices of volatile memory.
unsafe fn as_slice(&self) -> &[u8] {
from_raw_parts(self.addr, self.size)
}
// safe because it's expected interior mutability
#[allow(clippy::mut_from_ref)]
unsafe fn as_mut_slice(&self) -> &mut [u8] {
from_raw_parts_mut(self.addr, self.size)
}
}
impl<'a> Bytes<usize> for VolatileSlice<'a> {
type E = Error;
/// Writes a slice to the region at the specified address.
/// Returns the number of bytes written. The number of bytes written can
/// be less than the length of the slice if there isn't enough room in the
/// region.
///
/// # Examples
/// * Write a slice at offset 256.
///
/// ```
/// # use vm_memory::{Bytes, VolatileMemory};
/// # let mut mem = [0u8; 1024];
/// # let mut mem_ref = &mut mem[..];
/// # let vslice = mem_ref.as_volatile_slice();
/// let res = vslice.write(&[1,2,3,4,5], 1020);
/// assert!(res.is_ok());
/// assert_eq!(res.unwrap(), 4);
/// ```
fn write(&self, buf: &[u8], addr: usize) -> Result<usize> {
if addr >= self.size {
return Err(Error::OutOfBounds { addr });
}
unsafe {
// Guest memory can't strictly be modeled as a slice because it is
// volatile. Writing to it with what compiles down to a memcpy
// won't hurt anything as long as we get the bounds checks right.
let mut slice: &mut [u8] = &mut self.as_mut_slice()[addr..];
Ok(slice.write(buf).map_err(Error::IOError)?)
}
}
/// Reads to a slice from the region at the specified address.
/// Returns the number of bytes read. The number of bytes read can be less than the length
/// of the slice if there isn't enough room in the region.
///
/// # Examples
/// * Read a slice of size 16 at offset 256.
///
/// ```
/// # use vm_memory::{Bytes, VolatileMemory};
/// # let mut mem = [0u8; 1024];
/// # let mut mem_ref = &mut mem[..];
/// # let vslice = mem_ref.as_volatile_slice();
/// let buf = &mut [0u8; 16];
/// let res = vslice.read(buf, 1010);
/// assert!(res.is_ok());
/// assert_eq!(res.unwrap(), 14);
/// ```
fn read(&self, mut buf: &mut [u8], addr: usize) -> Result<usize> {
if addr >= self.size {
return Err(Error::OutOfBounds { addr });
}
unsafe {
// Guest memory can't strictly be modeled as a slice because it is
// volatile. Writing to it with what compiles down to a memcpy
// won't hurt anything as long as we get the bounds checks right.
let slice: &[u8] = &self.as_slice()[addr..];
Ok(buf.write(slice).map_err(Error::IOError)?)
}
}
/// Writes a slice to the region at the specified address.
///
/// # Examples
/// * Write a slice at offset 256.
///
/// ```
/// # use vm_memory::{Bytes, VolatileMemory};
/// # let mut mem = [0u8; 1024];
/// # let mut mem_ref = &mut mem[..];
/// # let vslice = mem_ref.as_volatile_slice();
/// let res = vslice.write_slice(&[1,2,3,4,5], 256);
/// assert!(res.is_ok());
/// assert_eq!(res.unwrap(), ());
/// ```
fn write_slice(&self, buf: &[u8], addr: usize) -> Result<()> {
let len = self.write(buf, addr)?;
if len != buf.len() {
return Err(Error::PartialBuffer {
expected: buf.len(),
completed: len,
});
}
Ok(())
}
/// Reads to a slice from the region at the specified address.
///
/// # Examples
/// * Read a slice of size 16 at offset 256.
///
/// ```
/// # use vm_memory::{Bytes, VolatileMemory};
/// # let mut mem = [0u8; 1024];
/// # let mut mem_ref = &mut mem[..];
/// # let vslice = mem_ref.as_volatile_slice();
/// let buf = &mut [0u8; 16];
/// let res = vslice.read_slice(buf, 256);
/// assert!(res.is_ok());
/// assert_eq!(res.unwrap(), ());
/// ```
fn read_slice(&self, buf: &mut [u8], addr: usize) -> Result<()> {
let len = self.read(buf, addr)?;
if len != buf.len() {
return Err(Error::PartialBuffer {
expected: buf.len(),
completed: len,
});
}
Ok(())
}
/// Writes data from a readable object like a File and writes it to the region.
///
/// # Examples
///
/// * Read bytes from /dev/urandom
///
/// ```
/// # use vm_memory::{Bytes, VolatileMemory};
/// # use std::fs::File;
/// # use std::path::Path;
/// # fn test_read_random() -> Result<u32, ()> {
/// # let mut mem = [0u8; 1024];
/// # let mut mem_ref = &mut mem[..];
/// # let vslice = mem_ref.as_volatile_slice();
/// let mut file = File::open(Path::new("/dev/urandom")).map_err(|_| ())?;
/// vslice.read_from(32, &mut file, 128).map_err(|_| ())?;
/// let rand_val: u32 = vslice.read_obj(40).map_err(|_| ())?;
/// # Ok(rand_val)
/// # }
/// ```
fn read_from<F>(&self, addr: usize, src: &mut F, count: usize) -> Result<usize>
where
F: Read,
{
let end = self.compute_end_offset(addr, count)?;
unsafe {
// It is safe to overwrite the volatile memory. Accessing the guest
// memory as a mutable slice is OK because nothing assumes another
// thread won't change what is loaded.
let dst = &mut self.as_mut_slice()[addr..end];
src.read(dst).map_err(Error::IOError)
}
}
/// Writes data from a readable object like a File and writes it to the region.
///
/// # Examples
///
/// * Read bytes from /dev/urandom
///
/// ```
/// # use vm_memory::{Bytes, VolatileMemory};
/// # use std::fs::File;
/// # use std::path::Path;
/// # fn test_read_random() -> Result<u32, ()> {
/// # let mut mem = [0u8; 1024];
/// # let mut mem_ref = &mut mem[..];
/// # let vslice = mem_ref.as_volatile_slice();
/// let mut file = File::open(Path::new("/dev/urandom")).map_err(|_| ())?;
/// vslice.read_exact_from(32, &mut file, 128).map_err(|_| ())?;
/// let rand_val: u32 = vslice.read_obj(40).map_err(|_| ())?;
/// # Ok(rand_val)
/// # }
/// ```
fn read_exact_from<F>(&self, addr: usize, src: &mut F, count: usize) -> Result<()>
where
F: Read,
{
let end = self.compute_end_offset(addr, count)?;
unsafe {
// It is safe to overwrite the volatile memory. Accessing the guest
// memory as a mutable slice is OK because nothing assumes another
// thread won't change what is loaded.
let dst = &mut self.as_mut_slice()[addr..end];
src.read_exact(dst).map_err(Error::IOError)?;
}
Ok(())
}
/// Reads data from the region to a writable object.
///
/// # Examples
///
/// * Write 128 bytes to /dev/null
///
/// ```
/// # use vm_memory::{Bytes, VolatileMemory};
/// # use std::fs::File;
/// # use std::path::Path;
/// # fn test_write_null() -> Result<(), ()> {
/// # let mut mem = [0u8; 1024];
/// # let mut mem_ref = &mut mem[..];
/// # let vslice = mem_ref.as_volatile_slice();
/// let mut file = File::open(Path::new("/dev/null")).map_err(|_| ())?;
/// vslice.write_to(32, &mut file, 128).map_err(|_| ())?;
/// # Ok(())
/// # }
/// ```
fn write_to<F>(&self, addr: usize, dst: &mut F, count: usize) -> Result<usize>
where
F: Write,
{
let end = self.compute_end_offset(addr, count)?;
unsafe {
// It is safe to read from volatile memory. Accessing the guest
// memory as a slice is OK because nothing assumes another thread
// won't change what is loaded.
let src = &self.as_mut_slice()[addr..end];
dst.write(src).map_err(Error::IOError)
}
}
/// Reads data from the region to a writable object.
///
/// # Examples
///
/// * Write 128 bytes to /dev/null
///
/// ```
/// # use vm_memory::{Bytes, VolatileMemory};
/// # use std::fs::File;
/// # use std::path::Path;
/// # fn test_write_null() -> Result<(), ()> {
/// # let mut mem = [0u8; 1024];
/// # let mut mem_ref = &mut mem[..];
/// # let vslice = mem_ref.as_volatile_slice();
/// let mut file = File::open(Path::new("/dev/null")).map_err(|_| ())?;
/// vslice.write_all_to(32, &mut file, 128).map_err(|_| ())?;
/// # Ok(())
/// # }
/// ```
fn write_all_to<F>(&self, addr: usize, dst: &mut F, count: usize) -> Result<()>
where
F: Write,
{
let end = self.compute_end_offset(addr, count)?;
unsafe {
// It is safe to read from volatile memory. Accessing the guest
// memory as a slice is OK because nothing assumes another thread
// won't change what is loaded.
let src = &self.as_mut_slice()[addr..end];
dst.write_all(src).map_err(Error::IOError)?;
}
Ok(())
}
}
impl<'a> VolatileMemory for VolatileSlice<'a> {
fn len(&self) -> usize {
self.size
}
fn get_slice(&self, offset: usize, count: usize) -> Result<VolatileSlice> {
let _ = self.compute_end_offset(offset, count)?;
Ok(unsafe {
// This is safe because the pointer is range-checked by compute_end_offset, and
// the lifetime is the same as self.
VolatileSlice::new((self.addr as usize + offset) as *mut u8, count)
})
}
}
/// A memory location that supports volatile access of a `T`.
///
/// # Examples
///
/// ```
/// # use vm_memory::VolatileRef;
/// let mut v = 5u32;
/// assert_eq!(v, 5);
/// let v_ref = unsafe { VolatileRef::<u32>::new(&mut v as *mut u32 as *mut u8) };
/// assert_eq!(v_ref.load(), 5);
/// v_ref.store(500);
/// assert_eq!(v, 500);
#[derive(Debug)]
pub struct VolatileRef<'a, T: ByteValued>
where
T: 'a,
{
addr: *mut Packed<T>,
phantom: PhantomData<&'a T>,
}
#[allow(clippy::len_without_is_empty)]
impl<'a, T: ByteValued> VolatileRef<'a, T> {
/// Creates a reference to raw memory that must support volatile access of `T` sized chunks.
///
/// To use this safely, the caller must guarantee that the memory at `addr` is big enough for a
/// `T` and is available for the duration of the lifetime of the new `VolatileRef`. The caller
/// must also guarantee that all other users of the given chunk of memory are using volatile
/// accesses.
pub unsafe fn new(addr: *mut u8) -> VolatileRef<'a, T> {
VolatileRef {
addr: addr as *mut Packed<T>,
phantom: PhantomData,
}
}
/// Gets the address of this slice's memory.
pub fn as_ptr(&self) -> *mut u8 {
self.addr as *mut u8
}
/// Gets the size of this slice.
///
/// # Examples
///
/// ```
/// # use std::mem::size_of;
/// # use vm_memory::VolatileRef;
/// let v_ref = unsafe { VolatileRef::<u32>::new(0 as *mut _) };
/// assert_eq!(v_ref.len(), size_of::<u32>() as usize);
/// ```
pub fn len(&self) -> usize {
size_of::<T>()
}
/// Does a volatile write of the value `v` to the address of this ref.
#[inline(always)]
pub fn store(&self, v: T) {
unsafe { write_volatile(self.addr, Packed::<T>(v)) };
}
/// Does a volatile read of the value at the address of this ref.
#[inline(always)]
pub fn load(&self) -> T {
// For the purposes of demonstrating why read_volatile is necessary, try replacing the code
// in this function with the commented code below and running `cargo test --release`.
// unsafe { *(self.addr as *const T) }
unsafe { read_volatile(self.addr).0 }
}
/// Converts this `T` reference to a raw slice with the same size and address.
pub fn to_slice(&self) -> VolatileSlice<'a> {
unsafe { VolatileSlice::new(self.addr as *mut u8, size_of::<T>()) }
}
}
#[cfg(test)]
mod tests {
extern crate tempfile;
use super::*;
use self::tempfile::tempfile;
use std::sync::Arc;
use std::thread::{sleep, spawn};
use std::time::Duration;
use std::fs::File;
use std::path::Path;
#[derive(Clone)]
struct VecMem {
mem: Arc<Vec<u8>>,
}
impl VecMem {
fn new(size: usize) -> VecMem {
let mut mem = Vec::new();
mem.resize(size, 0);
VecMem { mem: Arc::new(mem) }
}
}
impl VolatileMemory for VecMem {
fn len(&self) -> usize {
self.mem.len()
}
fn get_slice(&self, offset: usize, count: usize) -> Result<VolatileSlice> {
let _ = self.compute_end_offset(offset, count)?;
Ok(unsafe {
VolatileSlice::new((self.mem.as_ptr() as usize + offset) as *mut _, count)
})
}
}
#[test]
fn ref_store() {
let mut a = [0u8; 1];
{
let a_ref = &mut a[..];
let v_ref = a_ref.get_ref(0).unwrap();
v_ref.store(2u8);
}
assert_eq!(a[0], 2);
}
#[test]
fn ref_load() {
let mut a = [5u8; 1];
{
let a_ref = &mut a[..];
let c = {
let v_ref = a_ref.get_ref::<u8>(0).unwrap();
assert_eq!(v_ref.load(), 5u8);
v_ref
};
// To make sure we can take a v_ref out of the scope we made it in:
c.load();
// but not too far:
// c
} //.load()
;
}
#[test]
fn ref_to_slice() {
let mut a = [1u8; 5];
let a_ref = &mut a[..];
let v_ref = a_ref.get_ref(1).unwrap();
v_ref.store(0x1234_5678u32);
let ref_slice = v_ref.to_slice();
assert_eq!(v_ref.as_ptr() as usize, ref_slice.as_ptr() as usize);
assert_eq!(v_ref.len(), ref_slice.len());
assert!(!ref_slice.is_empty());
}
#[test]
fn observe_mutate() {
let a = VecMem::new(1);
let a_clone = a.clone();
let v_ref = a.get_ref::<u8>(0).unwrap();
v_ref.store(99);
spawn(move || {
sleep(Duration::from_millis(10));
let clone_v_ref = a_clone.get_ref::<u8>(0).unwrap();
clone_v_ref.store(0);
});
// Technically this is a race condition but we have to observe the v_ref's value changing
// somehow and this helps to ensure the sleep actually happens before the store rather then
// being reordered by the compiler.
assert_eq!(v_ref.load(), 99);
// Granted we could have a machine that manages to perform this many volatile loads in the
// amount of time the spawned thread sleeps, but the most likely reason the retry limit will
// get reached is because v_ref.load() is not actually performing the required volatile read
// or v_ref.store() is not doing a volatile write. A timer based solution was avoided
// because that might use a syscall which could hint the optimizer to reload v_ref's pointer
// regardless of volatile status. Note that we use a longer retry duration for optimized
// builds.
#[cfg(debug_assertions)]
const RETRY_MAX: usize = 500_000_000;
#[cfg(not(debug_assertions))]
const RETRY_MAX: usize = 10_000_000_000;
let mut retry = 0;
while v_ref.load() == 99 && retry < RETRY_MAX {
retry += 1;
}
assert_ne!(retry, RETRY_MAX, "maximum retry exceeded");
assert_eq!(v_ref.load(), 0);
}
#[test]
fn slice_len() {
let a = VecMem::new(100);
let s = a.get_slice(0, 27).unwrap();
assert_eq!(s.len(), 27);
let s = a.get_slice(34, 27).unwrap();
assert_eq!(s.len(), 27);
let s = s.get_slice(20, 5).unwrap();
assert_eq!(s.len(), 5);
}
#[test]
fn slice_is_empty() {
let a = VecMem::new(100);
let s = a.get_slice(0, 27).unwrap();
assert!(!s.is_empty());
let s = a.get_slice(34, 0).unwrap();
assert!(s.is_empty());
}
#[test]
fn slice_overflow_error() {
use std::usize::MAX;
let a = VecMem::new(1);
let res = a.get_slice(MAX, 1).unwrap_err();
assert_matches!(
res,
Error::Overflow {
base: MAX,
offset: 1,
}
);
}
#[test]
fn slice_oob_error() {
let a = VecMem::new(100);
a.get_slice(50, 50).unwrap();
let res = a.get_slice(55, 50).unwrap_err();
assert_matches!(res, Error::OutOfBounds { addr: 105 });
}
#[test]
fn ref_overflow_error() {
use std::usize::MAX;
let a = VecMem::new(1);
let res = a.get_ref::<u8>(MAX).unwrap_err();
assert_matches!(
res,
Error::Overflow {
base: MAX,
offset: 1,
}
);
}
#[test]
fn ref_oob_error() {
let a = VecMem::new(100);
a.get_ref::<u8>(99).unwrap();
let res = a.get_ref::<u16>(99).unwrap_err();
assert_matches!(res, Error::OutOfBounds { addr: 101 });
}
#[test]
fn ref_oob_too_large() {
let a = VecMem::new(3);
let res = a.get_ref::<u32>(0).unwrap_err();
assert_matches!(res, Error::OutOfBounds { addr: 4 });
}
#[test]
fn slice_store() {
let a = VecMem::new(5);
let s = a.as_volatile_slice();
let r = a.get_ref(2).unwrap();
r.store(9u16);
assert_eq!(s.read_obj::<u16>(2).unwrap(), 9);
}
#[test]
fn test_write_past_end() {
let a = VecMem::new(5);
let s = a.as_volatile_slice();
let res = s.write(&[1, 2, 3, 4, 5, 6], 0);
assert!(res.is_ok());
assert_eq!(res.unwrap(), 5);
}
#[test]
fn slice_read_and_write() {
let a = VecMem::new(5);
let s = a.as_volatile_slice();
let sample_buf = [1, 2, 3];
assert!(s.write(&sample_buf, 5).is_err());
assert!(s.write(&sample_buf, 2).is_ok());
let mut buf = [0u8; 3];
assert!(s.read(&mut buf, 5).is_err());
assert!(s.read_slice(&mut buf, 2).is_ok());
assert_eq!(buf, sample_buf);
}
#[test]
fn obj_read_and_write() {
let a = VecMem::new(5);
let s = a.as_volatile_slice();
assert!(s.write_obj(55u16, 4).is_err());
assert!(s.write_obj(55u16, core::usize::MAX).is_err());
assert!(s.write_obj(55u16, 2).is_ok());
assert_eq!(s.read_obj::<u16>(2).unwrap(), 55u16);
assert!(s.read_obj::<u16>(4).is_err());
assert!(s.read_obj::<u16>(core::usize::MAX).is_err());
}
#[test]
fn mem_read_and_write() {
let a = VecMem::new(5);
let s = a.as_volatile_slice();
assert!(s.write_obj(!0u32, 1).is_ok());
let mut file = if cfg!(unix) {
File::open(Path::new("/dev/zero")).unwrap()
} else {
File::open(Path::new("c:\\Windows\\system32\\ntoskrnl.exe")).unwrap()
};
assert!(s.read_exact_from(2, &mut file, size_of::<u32>()).is_err());
assert!(s
.read_exact_from(core::usize::MAX, &mut file, size_of::<u32>())
.is_err());
assert!(s.read_exact_from(1, &mut file, size_of::<u32>()).is_ok());
let mut f = tempfile().unwrap();
assert!(s.read_exact_from(1, &mut f, size_of::<u32>()).is_err());
format!("{:?}", s.read_exact_from(1, &mut f, size_of::<u32>()));
let value = s.read_obj::<u32>(1).unwrap();
if cfg!(unix) {
assert_eq!(value, 0);
} else {
assert_eq!(value, 0x0090_5a4d);
}
let mut sink = Vec::new();
assert!(s.write_all_to(1, &mut sink, size_of::<u32>()).is_ok());
assert!(s.write_all_to(2, &mut sink, size_of::<u32>()).is_err());
assert!(s
.write_all_to(core::usize::MAX, &mut sink, size_of::<u32>())
.is_err());
format!("{:?}", s.write_all_to(2, &mut sink, size_of::<u32>()));
if cfg!(unix) {
assert_eq!(sink, vec![0; size_of::<u32>()]);
} else {
assert_eq!(sink, vec![0x4d, 0x5a, 0x90, 0x00]);
};
}
#[test]
fn unaligned_read_and_write() {
let a = VecMem::new(7);
let s = a.as_volatile_slice();
let sample_buf: [u8; 7] = [1, 2, 0xAA, 0xAA, 0xAA, 0xAA, 4];
assert!(s.write_slice(&sample_buf, 0).is_ok());
let r = a.get_ref::<u32>(2).unwrap();
assert_eq!(r.load(), 0xAAAA_AAAA);
r.store(0x5555_5555);
let sample_buf: [u8; 7] = [1, 2, 0x55, 0x55, 0x55, 0x55, 4];
let mut buf: [u8; 7] = Default::default();
assert!(s.read_slice(&mut buf, 0).is_ok());
assert_eq!(buf, sample_buf);
}
}

View File

@ -1,61 +0,0 @@
steps:
- label: "build-gnu-x86"
commands:
- cargo build --release
retry:
automatic: false
agents:
platform: x86_64.metal
plugins:
- docker#v3.0.1:
image: "fandree/rust-vmm-dev"
always-pull: true
- label: "style"
command: cargo fmt --all -- --check
retry:
automatic: false
agents:
platform: x86_64.metal
plugins:
- docker#v3.0.1:
image: "fandree/rust-vmm-dev"
always-pull: true
- label: "unittests-gnu-x86"
commands:
- cargo test
retry:
automatic: false
agents:
platform: x86_64.metal
plugins:
- docker#v3.0.1:
privileged: true
image: "fandree/rust-vmm-dev"
always-pull: true
- label: "clippy-x86"
commands:
- cargo clippy --all -- -D warnings
retry:
automatic: false
agents:
platform: x86_64.metal
plugins:
- docker#v3.0.1:
image: "fandree/rust-vmm-dev"
always-pull: true
- label: "coverage-x86"
commands:
- pytest tests/test_coverage.py
retry:
automatic: false
agents:
platform: x86_64.metal
plugins:
- docker#v3.0.1:
privileged: true
image: "fandree/rust-vmm-dev"
always-pull: true

View File

@ -1 +0,0 @@
{"files":{".buildkite/pipeline.yml":"95725fe62c2dc4076b32df9b26c828bcf4e3876092c0401e718e16ce4f513be0",".gitignore/.gitignore":"6b937008e0a58a438c32574627db6961a4d2aff4bf5c703d4e45cb56ef3b168a","Cargo.toml":"f2754a32f2dacc0f8695ee9780193badc85995954ed452d0df4ca1ef8517d089","DESIGN.md":"d877f3093cf94402528a54a97e833fc79d4cbc07934031035f60a2d8d0a542d4","LICENSE-APACHE":"000b4962e6b27176a0ff89cce4be555b16472cafb5671eb2804a8fdac6854793","LICENSE-BSD-3-Clause":"a6d3ebd1c2f37d4fd83d0676621f695fc0cc2d8c6e646cdbb831b46e0650c208","README.md":"3e585f0ceb915bf4734075d2f0a91b626ff2363079baebcf7592a8d51d2c7931","src/cmdline/mod.rs":"30f57b492618155be6129311d0182688e5d1ecd9a3b5fdc9b742cd0c0e1f85da","src/lib.rs":"67d3e4a9ed48495c00fe6698fc606693b15a006fb0599cb18b946ea3929cd0f6","src/loader/bootparam.rs":"054b5a196873965cb48b9e9fa99c2505f523f9c71ca8ca0fad3af02491811fe6","src/loader/elf.rs":"f4e4eb6ffcb114bbf3242a9c72ea7e99631487d63bbbfbcf641bea42b3aba2e1","src/loader/mod.rs":"555c8214415955ab2e47bac0dbc392cd8ed4155e54099767de93e1136cfddd12","src/loader/struct_util.rs":"a98c2854ebd3a0c0ed11e01e474f2da15fb9200754129223710d62cc88ab253a","src/loader/test_elf.bin":"dda390f7546c7ba36bab412a93add96533806154824b77aafc8c153231f3f161","tests/conftest.py":"ee2baacd9da7ac04ffbfd580a7d4fe8a8b166a16e1eeed2d488f7b820abab433","tests/coverage":"69eaa47e0089d5a3e2844fe7ca823b67ff712e382f6c34b0300797a415b7f3e9","tests/test_coverage.py":"2f4bbb571bf52ad796f9f47a3aaace61ee92ab9788980496bb1652826f224150"},"package":null}

View File

@ -1,3 +0,0 @@
/target
**/*.rs.bk
Cargo.lock

View File

@ -1,16 +0,0 @@
[package]
name = "linux-loader"
version = "0.1.0"
authors = ["cathy <cathy.zhang@intel.com>"]
edition = "2018"
license = "Apache-2.0 AND BSD-3-Clause"
[features]
default = ["elf"]
elf = []
bzImage = []
[dependencies.vm-memory]
git = "https://github.com/rust-vmm/vm-memory"
features = ["backend-mmap"]

View File

@ -1,64 +0,0 @@
# ELF Image parsing and loading
The boot process is explained from the following two sides.
## Loader side
It follows ELF standard which is specified in elf.rs. The entry header and program headers will be inerpreted, and PT_LOAD segments will be loaded into guest memory.
### Where kernel is loaded
There are two ways on deciding where the program segments will be loaded.
- One way is to provide an option and allow vmm to specify where to load the image, considering its memory layout.
- The other way is to load image into phdr.p_paddr by default.
## Vmm side
### Construct zero page
According to the 64-bit boot protocol, the boot parameters (traditionally known as "zero page") should be setup, including setup_header, e820 table and other stuff. However, ELF has no setup_header, nothing returned from ELF loader could be used to fill boot parameters, vmm is totally responsible for the construction.
### Configure vcpu
- RIP, the start offset of guest memory where kernel is loaded, which is returned from loader
- 64 bit mode with paging enabled
- GDT must be configured and loaded
# bzImage
The boot process is also explained from the following two sides.
## Loader side
### What will be returned from loader
bzImage includes two parts, the setup and the compressed kernel. The compressed kernel part will be loaded into guest memory, and the following three parts will returned to vmm by loader.
- The start address of loaded kernel
- The offset of memory where kernel is end of loading
- The setup header begin at the offset 0x01f1 of bzImage, this one is an extra compared to the return of ELF loader.
### Where kernel is loaded
The same as ELF image loader, there are two ways for deciding where the compressed kernel will be loaded.
- Vmm specify where to load kernel image.
- Load into code32_start (Boot load address) by default.
### Additional checking
As what the boot protocol said, the kernel is a bzImage kernel if the protocol >= 2.00 and the 0x01 bit(LOAD_HIGH) is the loadflags field is set. Add this checking to validate the bzImage.
## Vmm side
### Construct zero page
While vmm build "zero page" with e820 table and other stuff, bzImage loader will return the setup header to fill the boot parameters. Meanwhile, setup_header.init_size is a must to be filled into zero page, which will be used during head_64.S boot process.
### Configure vcpu
- RIP, the start address of loaded 64-bit kernel returned from loader + 0x200. Regarding to the 64-bit boot protocol, kernel is started by jumping to the 64-bit kernel entry point, which is the start address of loaded 64-bit kernel plus 0x200.
- 64 bit mode with paging enabled
- GDT must be configured and loaded

View File

@ -1,201 +0,0 @@
Apache License
Version 2.0, January 2004
http://www.apache.org/licenses/
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
1. Definitions.
"License" shall mean the terms and conditions for use, reproduction,
and distribution as defined by Sections 1 through 9 of this document.
"Licensor" shall mean the copyright owner or entity authorized by
the copyright owner that is granting the License.
"Legal Entity" shall mean the union of the acting entity and all
other entities that control, are controlled by, or are under common
control with that entity. For the purposes of this definition,
"control" means (i) the power, direct or indirect, to cause the
direction or management of such entity, whether by contract or
otherwise, or (ii) ownership of fifty percent (50%) or more of the
outstanding shares, or (iii) beneficial ownership of such entity.
"You" (or "Your") shall mean an individual or Legal Entity
exercising permissions granted by this License.
"Source" form shall mean the preferred form for making modifications,
including but not limited to software source code, documentation
source, and configuration files.
"Object" form shall mean any form resulting from mechanical
transformation or translation of a Source form, including but
not limited to compiled object code, generated documentation,
and conversions to other media types.
"Work" shall mean the work of authorship, whether in Source or
Object form, made available under the License, as indicated by a
copyright notice that is included in or attached to the work
(an example is provided in the Appendix below).
"Derivative Works" shall mean any work, whether in Source or Object
form, that is based on (or derived from) the Work and for which the
editorial revisions, annotations, elaborations, or other modifications
represent, as a whole, an original work of authorship. For the purposes
of this License, Derivative Works shall not include works that remain
separable from, or merely link (or bind by name) to the interfaces of,
the Work and Derivative Works thereof.
"Contribution" shall mean any work of authorship, including
the original version of the Work and any modifications or additions
to that Work or Derivative Works thereof, that is intentionally
submitted to Licensor for inclusion in the Work by the copyright owner
or by an individual or Legal Entity authorized to submit on behalf of
the copyright owner. For the purposes of this definition, "submitted"
means any form of electronic, verbal, or written communication sent
to the Licensor or its representatives, including but not limited to
communication on electronic mailing lists, source code control systems,
and issue tracking systems that are managed by, or on behalf of, the
Licensor for the purpose of discussing and improving the Work, but
excluding communication that is conspicuously marked or otherwise
designated in writing by the copyright owner as "Not a Contribution."
"Contributor" shall mean Licensor and any individual or Legal Entity
on behalf of whom a Contribution has been received by Licensor and
subsequently incorporated within the Work.
2. Grant of Copyright License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
copyright license to reproduce, prepare Derivative Works of,
publicly display, publicly perform, sublicense, and distribute the
Work and such Derivative Works in Source or Object form.
3. Grant of Patent License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
(except as stated in this section) patent license to make, have made,
use, offer to sell, sell, import, and otherwise transfer the Work,
where such license applies only to those patent claims licensable
by such Contributor that are necessarily infringed by their
Contribution(s) alone or by combination of their Contribution(s)
with the Work to which such Contribution(s) was submitted. If You
institute patent litigation against any entity (including a
cross-claim or counterclaim in a lawsuit) alleging that the Work
or a Contribution incorporated within the Work constitutes direct
or contributory patent infringement, then any patent licenses
granted to You under this License for that Work shall terminate
as of the date such litigation is filed.
4. Redistribution. You may reproduce and distribute copies of the
Work or Derivative Works thereof in any medium, with or without
modifications, and in Source or Object form, provided that You
meet the following conditions:
(a) You must give any other recipients of the Work or
Derivative Works a copy of this License; and
(b) You must cause any modified files to carry prominent notices
stating that You changed the files; and
(c) You must retain, in the Source form of any Derivative Works
that You distribute, all copyright, patent, trademark, and
attribution notices from the Source form of the Work,
excluding those notices that do not pertain to any part of
the Derivative Works; and
(d) If the Work includes a "NOTICE" text file as part of its
distribution, then any Derivative Works that You distribute must
include a readable copy of the attribution notices contained
within such NOTICE file, excluding those notices that do not
pertain to any part of the Derivative Works, in at least one
of the following places: within a NOTICE text file distributed
as part of the Derivative Works; within the Source form or
documentation, if provided along with the Derivative Works; or,
within a display generated by the Derivative Works, if and
wherever such third-party notices normally appear. The contents
of the NOTICE file are for informational purposes only and
do not modify the License. You may add Your own attribution
notices within Derivative Works that You distribute, alongside
or as an addendum to the NOTICE text from the Work, provided
that such additional attribution notices cannot be construed
as modifying the License.
You may add Your own copyright statement to Your modifications and
may provide additional or different license terms and conditions
for use, reproduction, or distribution of Your modifications, or
for any such Derivative Works as a whole, provided Your use,
reproduction, and distribution of the Work otherwise complies with
the conditions stated in this License.
5. Submission of Contributions. Unless You explicitly state otherwise,
any Contribution intentionally submitted for inclusion in the Work
by You to the Licensor shall be under the terms and conditions of
this License, without any additional terms or conditions.
Notwithstanding the above, nothing herein shall supersede or modify
the terms of any separate license agreement you may have executed
with Licensor regarding such Contributions.
6. Trademarks. This License does not grant permission to use the trade
names, trademarks, service marks, or product names of the Licensor,
except as required for reasonable and customary use in describing the
origin of the Work and reproducing the content of the NOTICE file.
7. Disclaimer of Warranty. Unless required by applicable law or
agreed to in writing, Licensor provides the Work (and each
Contributor provides its Contributions) on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
implied, including, without limitation, any warranties or conditions
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
PARTICULAR PURPOSE. You are solely responsible for determining the
appropriateness of using or redistributing the Work and assume any
risks associated with Your exercise of permissions under this License.
8. Limitation of Liability. In no event and under no legal theory,
whether in tort (including negligence), contract, or otherwise,
unless required by applicable law (such as deliberate and grossly
negligent acts) or agreed to in writing, shall any Contributor be
liable to You for damages, including any direct, indirect, special,
incidental, or consequential damages of any character arising as a
result of this License or out of the use or inability to use the
Work (including but not limited to damages for loss of goodwill,
work stoppage, computer failure or malfunction, or any and all
other commercial damages or losses), even if such Contributor
has been advised of the possibility of such damages.
9. Accepting Warranty or Additional Liability. While redistributing
the Work or Derivative Works thereof, You may choose to offer,
and charge a fee for, acceptance of support, warranty, indemnity,
or other liability obligations and/or rights consistent with this
License. However, in accepting such obligations, You may act only
on Your own behalf and on Your sole responsibility, not on behalf
of any other Contributor, and only if You agree to indemnify,
defend, and hold each Contributor harmless for any liability
incurred by, or claims asserted against, such Contributor by reason
of your accepting any such warranty or additional liability.
END OF TERMS AND CONDITIONS
APPENDIX: How to apply the Apache License to your work.
To apply the Apache License to your work, attach the following
boilerplate notice, with the fields enclosed by brackets "{}"
replaced with your own identifying information. (Don't include
the brackets!) The text should be enclosed in the appropriate
comment syntax for the file format. We also recommend that a
file or class name and description of purpose be included on the
same "printed page" as the copyright notice for easier
identification within third-party archives.
Copyright [yyyy] [name of copyright owner]
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.

View File

@ -1,27 +0,0 @@
// Copyright 2017 The Chromium OS Authors. All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following disclaimer
// in the documentation and/or other materials provided with the
// distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived from
// this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.

View File

@ -1,105 +0,0 @@
# Linux-loader
## Short-description
* Parsing and loading vmlinux (raw ELF image) and bzImage images
* Linux command line parsing and generation
* Definitions and helpers for the Linux boot protocol
## How to build
```
cd linux-loader
cargo build
```
## Tests
Our Continuous Integration (CI) pipeline is implemented on top of
[Buildkite](https://buildkite.com/).
For the complete list of tests, check our
[CI pipeline](https://buildkite.com/rust-vmm/vm-virtio-ci).
Each individual test runs in a container. To reproduce a test locally, you can
use the dev-container on both x86 and arm64.
```bash
docker run -it \
--security-opt seccomp=unconfined \
--volume $(pwd):/linux-loader \
fandree/rust-vmm-dev
cd linux-loader/
cargo test
```
### Test Profiles
The integration tests support two test profiles:
- **devel**: this is the recommended profile for running the integration tests
on a local development machine.
- **ci** (default option): this is the profile used when running the
integration tests as part of the the Continuous Integration (CI).
The test profiles are applicable to tests that run using pytest. Currently only
the [coverage test](tests/test_coverage.py) follows this model as all the other
integration tests are run using the
[Buildkite pipeline](https://buildkite.com/rust-vmm/vm-virtio-ci).
The difference between is declaring tests as passed or failed:
- with the **devel** profile the coverage test passes if the current coverage
is equal or higher than the upstream coverage value. In case the current
coverage is higher, the coverage file is updated to the new coverage value.
- with the **ci** profile the coverage test passes only if the current coverage
is equal to the upstream coverage value.
Further details about the coverage test can be found in the
[Adaptive Coverage](#adaptive-coverage) section.
### Adaptive Coverage
The line coverage is saved in [tests/coverage](tests/coverage). To update the
coverage before submitting a PR, run the coverage test:
```bash
docker run -it \
--security-opt seccomp=unconfined \
--volume $(pwd):/linux-loader \
fandree/rust-vmm-dev
cd linux-loader/
pytest --profile=devel tests/test_coverage.py
```
If the PR coverage is higher than the upstream coverage, the coverage file
needs to be manually added to the commit before submitting the PR:
```bash
git add tests/coverage
```
Failing to do so will generate a fail on the CI pipeline when publishing the
PR.
**NOTE:** The coverage file is only updated in the `devel` test profile. In
the `ci` profile the coverage test will fail if the current coverage is higher
than the coverage reported in [tests/coverage](tests/coverage).
### bzImage test
As we don't want to distribute an entire kernel bzImage, the `load_bzImage` test is ignored by
default. In order to test the bzImage support, one needs to locally build a bzImage, copy it
to the `src/loader` directory and run the ignored test:
```shell
# Assuming your linux-loader and linux-stable are both under $LINUX_LOADER
$ git clone git://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git $LINUX_LOADER/linux-stable
$ cd linux-stable
$ make bzImage
$ cp linux-stable/arch/x86/boot/bzImage $LINUX_LOADER/linux-loader/src/loader/
$ cd $LINUX_LOADER/linux-loader
$ docker run -it \
--security-opt seccomp=unconfined \
--volume $(pwd):/linux-loader \
fandree/rust-vmm-dev
$ cd linux-loader/
$ cargo test -- --ignored
```

View File

@ -1,235 +0,0 @@
// Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.
//
// Portions Copyright 2017 The Chromium OS Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE-BSD-3-Clause file.
//
// SPDX-License-Identifier: Apache-2.0 AND BSD-3-Clause
//
//! Helper for creating valid kernel command line strings.
use std::fmt;
use std::result;
/// The error type for command line building operations.
#[derive(PartialEq, Debug)]
pub enum Error {
/// Operation would have resulted in a non-printable ASCII character.
InvalidAscii,
/// Key/Value Operation would have had a space in it.
HasSpace,
/// Key/Value Operation would have had an equals sign in it.
HasEquals,
/// Operation would have made the command line too large.
TooLarge,
}
impl fmt::Display for Error {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(
f,
"{}",
match *self {
Error::InvalidAscii => "string contains non-printable ASCII character",
Error::HasSpace => "string contains a space",
Error::HasEquals => "string contains an equals sign",
Error::TooLarge => "inserting string would make command line too long",
}
)
}
}
/// Specialized Result type for command line operations.
pub type Result<T> = result::Result<T, Error>;
fn valid_char(c: char) -> bool {
match c {
' '...'~' => true,
_ => false,
}
}
fn valid_str(s: &str) -> Result<()> {
if s.chars().all(valid_char) {
Ok(())
} else {
Err(Error::InvalidAscii)
}
}
fn valid_element(s: &str) -> Result<()> {
if !s.chars().all(valid_char) {
Err(Error::InvalidAscii)
} else if s.contains(' ') {
Err(Error::HasSpace)
} else if s.contains('=') {
Err(Error::HasEquals)
} else {
Ok(())
}
}
/// A builder for a kernel command line string that validates the string as its being built. A
/// `CString` can be constructed from this directly using `CString::new`.
#[derive(Clone)]
pub struct Cmdline {
line: String,
capacity: usize,
}
impl Cmdline {
/// Constructs an empty Cmdline with the given capacity, which includes the nul terminator.
/// Capacity must be greater than 0.
pub fn new(capacity: usize) -> Cmdline {
assert_ne!(capacity, 0);
Cmdline {
line: String::with_capacity(capacity),
capacity,
}
}
fn has_capacity(&self, more: usize) -> Result<()> {
let needs_space = if self.line.is_empty() { 0 } else { 1 };
if self.line.len() + more + needs_space < self.capacity {
Ok(())
} else {
Err(Error::TooLarge)
}
}
fn start_push(&mut self) {
if !self.line.is_empty() {
self.line.push(' ');
}
}
fn end_push(&mut self) {
// This assert is always true because of the `has_capacity` check that each insert method
// uses.
assert!(self.line.len() < self.capacity);
}
/// Validates and inserts a key value pair into this command line
pub fn insert<T: AsRef<str>>(&mut self, key: T, val: T) -> Result<()> {
let k = key.as_ref();
let v = val.as_ref();
valid_element(k)?;
valid_element(v)?;
self.has_capacity(k.len() + v.len() + 1)?;
self.start_push();
self.line.push_str(k);
self.line.push('=');
self.line.push_str(v);
self.end_push();
Ok(())
}
/// Validates and inserts a string to the end of the current command line
pub fn insert_str<T: AsRef<str>>(&mut self, slug: T) -> Result<()> {
let s = slug.as_ref();
valid_str(s)?;
self.has_capacity(s.len())?;
self.start_push();
self.line.push_str(s);
self.end_push();
Ok(())
}
/// Returns the cmdline in progress without nul termination
pub fn as_str(&self) -> &str {
self.line.as_str()
}
}
impl Into<Vec<u8>> for Cmdline {
fn into(self) -> Vec<u8> {
self.line.into_bytes()
}
}
#[cfg(test)]
mod tests {
use super::*;
use std::ffi::CString;
#[test]
fn insert_hello_world() {
let mut cl = Cmdline::new(100);
assert_eq!(cl.as_str(), "");
assert!(cl.insert("hello", "world").is_ok());
assert_eq!(cl.as_str(), "hello=world");
let s = CString::new(cl).expect("failed to create CString from Cmdline");
assert_eq!(s, CString::new("hello=world").unwrap());
}
#[test]
fn insert_multi() {
let mut cl = Cmdline::new(100);
assert!(cl.insert("hello", "world").is_ok());
assert!(cl.insert("foo", "bar").is_ok());
assert_eq!(cl.as_str(), "hello=world foo=bar");
}
#[test]
fn insert_space() {
let mut cl = Cmdline::new(100);
assert_eq!(cl.insert("a ", "b"), Err(Error::HasSpace));
assert_eq!(cl.insert("a", "b "), Err(Error::HasSpace));
assert_eq!(cl.insert("a ", "b "), Err(Error::HasSpace));
assert_eq!(cl.insert(" a", "b"), Err(Error::HasSpace));
assert_eq!(cl.as_str(), "");
}
#[test]
fn insert_equals() {
let mut cl = Cmdline::new(100);
assert_eq!(cl.insert("a=", "b"), Err(Error::HasEquals));
assert_eq!(cl.insert("a", "b="), Err(Error::HasEquals));
assert_eq!(cl.insert("a=", "b "), Err(Error::HasEquals));
assert_eq!(cl.insert("=a", "b"), Err(Error::HasEquals));
assert_eq!(cl.insert("a", "=b"), Err(Error::HasEquals));
assert_eq!(cl.as_str(), "");
}
#[test]
fn insert_emoji() {
let mut cl = Cmdline::new(100);
assert_eq!(cl.insert("heart", "💖"), Err(Error::InvalidAscii));
assert_eq!(cl.insert("💖", "love"), Err(Error::InvalidAscii));
assert_eq!(cl.as_str(), "");
}
#[test]
fn insert_string() {
let mut cl = Cmdline::new(13);
assert_eq!(cl.as_str(), "");
assert!(cl.insert_str("noapic").is_ok());
assert_eq!(cl.as_str(), "noapic");
assert!(cl.insert_str("nopci").is_ok());
assert_eq!(cl.as_str(), "noapic nopci");
}
#[test]
fn insert_too_large() {
let mut cl = Cmdline::new(4);
assert_eq!(cl.insert("hello", "world"), Err(Error::TooLarge));
assert_eq!(cl.insert("a", "world"), Err(Error::TooLarge));
assert_eq!(cl.insert("hello", "b"), Err(Error::TooLarge));
assert!(cl.insert("a", "b").is_ok());
assert_eq!(cl.insert("a", "b"), Err(Error::TooLarge));
assert_eq!(cl.insert_str("a"), Err(Error::TooLarge));
assert_eq!(cl.as_str(), "a=b");
let mut cl = Cmdline::new(10);
assert!(cl.insert("ab", "ba").is_ok()); // adds 5 length
assert_eq!(cl.insert("c", "da"), Err(Error::TooLarge)); // adds 5 (including space) length
assert!(cl.insert("c", "d").is_ok()); // adds 4 (including space) length
}
}

View File

@ -1,13 +0,0 @@
// Copyright (c) 2019 Intel Corporation. All rights reserved.
// Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.
//
// Copyright 2017 The Chromium OS Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE-BSD-3-Clause file.
//
// SPDX-License-Identifier: Apache-2.0 AND BSD-3-Clause
pub mod cmdline;
pub mod loader;
extern crate vm_memory;

File diff suppressed because it is too large Load Diff

View File

@ -1,340 +0,0 @@
// Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.
//
// Portions Copyright 2017 The Chromium OS Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE-BSD-3-Clause file.
//
// SPDX-License-Identifier: Apache-2.0 AND BSD-3-Clause
/*
* automatically generated by rust-bindgen
* From upstream linux include/uapi/linux/elf.h at commit:
* 806276b7f07a39a1cc3f38bb1ef5c573d4594a38
*/
pub const EI_MAG0: ::std::os::raw::c_uint = 0;
pub const EI_MAG1: ::std::os::raw::c_uint = 1;
pub const EI_MAG2: ::std::os::raw::c_uint = 2;
pub const EI_MAG3: ::std::os::raw::c_uint = 3;
pub const EI_DATA: ::std::os::raw::c_uint = 5;
pub const ELFMAG0: ::std::os::raw::c_uint = 127;
pub const ELFDATA2LSB: ::std::os::raw::c_uint = 1;
pub const PT_LOAD: ::std::os::raw::c_uint = 1;
pub const ELFMAG1: u8 = b'E';
pub const ELFMAG2: u8 = b'L';
pub const ELFMAG3: u8 = b'F';
type Elf64_Addr = __u64;
type Elf64_Half = __u16;
type Elf64_Off = __u64;
type Elf64_Word = __u32;
type Elf64_Xword = __u64;
type __s8 = ::std::os::raw::c_schar;
type __u8 = ::std::os::raw::c_uchar;
type __s16 = ::std::os::raw::c_short;
type __u16 = ::std::os::raw::c_ushort;
type __s32 = ::std::os::raw::c_int;
type __u32 = ::std::os::raw::c_uint;
type __s64 = ::std::os::raw::c_longlong;
type __u64 = ::std::os::raw::c_ulonglong;
#[repr(C)]
#[derive(Debug, Default, Copy)]
pub struct elf64_hdr {
pub e_ident: [::std::os::raw::c_uchar; 16usize],
pub e_type: Elf64_Half,
pub e_machine: Elf64_Half,
pub e_version: Elf64_Word,
pub e_entry: Elf64_Addr,
pub e_phoff: Elf64_Off,
pub e_shoff: Elf64_Off,
pub e_flags: Elf64_Word,
pub e_ehsize: Elf64_Half,
pub e_phentsize: Elf64_Half,
pub e_phnum: Elf64_Half,
pub e_shentsize: Elf64_Half,
pub e_shnum: Elf64_Half,
pub e_shstrndx: Elf64_Half,
}
impl Clone for elf64_hdr {
fn clone(&self) -> Self {
*self
}
}
pub type Elf64_Ehdr = elf64_hdr;
#[repr(C)]
#[derive(Debug, Default, Copy)]
pub struct elf64_phdr {
pub p_type: Elf64_Word,
pub p_flags: Elf64_Word,
pub p_offset: Elf64_Off,
pub p_vaddr: Elf64_Addr,
pub p_paddr: Elf64_Addr,
pub p_filesz: Elf64_Xword,
pub p_memsz: Elf64_Xword,
pub p_align: Elf64_Xword,
}
impl Clone for elf64_phdr {
fn clone(&self) -> Self {
*self
}
}
pub type Elf64_Phdr = elf64_phdr;
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn bindgen_test_layout_elf64_phdr() {
assert_eq!(
::std::mem::size_of::<elf64_phdr>(),
56usize,
concat!("Size of: ", stringify!(elf64_phdr))
);
assert_eq!(
::std::mem::align_of::<elf64_phdr>(),
8usize,
concat!("Alignment of ", stringify!(elf64_phdr))
);
assert_eq!(
unsafe { &(*(0 as *const elf64_phdr)).p_type as *const _ as usize },
0usize,
concat!(
"Alignment of field: ",
stringify!(elf64_phdr),
"::",
stringify!(p_type)
)
);
assert_eq!(
unsafe { &(*(0 as *const elf64_phdr)).p_flags as *const _ as usize },
4usize,
concat!(
"Alignment of field: ",
stringify!(elf64_phdr),
"::",
stringify!(p_flags)
)
);
assert_eq!(
unsafe { &(*(0 as *const elf64_phdr)).p_offset as *const _ as usize },
8usize,
concat!(
"Alignment of field: ",
stringify!(elf64_phdr),
"::",
stringify!(p_offset)
)
);
assert_eq!(
unsafe { &(*(0 as *const elf64_phdr)).p_vaddr as *const _ as usize },
16usize,
concat!(
"Alignment of field: ",
stringify!(elf64_phdr),
"::",
stringify!(p_vaddr)
)
);
assert_eq!(
unsafe { &(*(0 as *const elf64_phdr)).p_paddr as *const _ as usize },
24usize,
concat!(
"Alignment of field: ",
stringify!(elf64_phdr),
"::",
stringify!(p_paddr)
)
);
assert_eq!(
unsafe { &(*(0 as *const elf64_phdr)).p_filesz as *const _ as usize },
32usize,
concat!(
"Alignment of field: ",
stringify!(elf64_phdr),
"::",
stringify!(p_filesz)
)
);
assert_eq!(
unsafe { &(*(0 as *const elf64_phdr)).p_memsz as *const _ as usize },
40usize,
concat!(
"Alignment of field: ",
stringify!(elf64_phdr),
"::",
stringify!(p_memsz)
)
);
assert_eq!(
unsafe { &(*(0 as *const elf64_phdr)).p_align as *const _ as usize },
48usize,
concat!(
"Alignment of field: ",
stringify!(elf64_phdr),
"::",
stringify!(p_align)
)
);
}
#[test]
fn bindgen_test_layout_elf64_hdr() {
assert_eq!(
::std::mem::size_of::<elf64_hdr>(),
64usize,
concat!("Size of: ", stringify!(elf64_hdr))
);
assert_eq!(
::std::mem::align_of::<elf64_hdr>(),
8usize,
concat!("Alignment of ", stringify!(elf64_hdr))
);
assert_eq!(
unsafe { &(*(0 as *const elf64_hdr)).e_ident as *const _ as usize },
0usize,
concat!(
"Alignment of field: ",
stringify!(elf64_hdr),
"::",
stringify!(e_ident)
)
);
assert_eq!(
unsafe { &(*(0 as *const elf64_hdr)).e_type as *const _ as usize },
16usize,
concat!(
"Alignment of field: ",
stringify!(elf64_hdr),
"::",
stringify!(e_type)
)
);
assert_eq!(
unsafe { &(*(0 as *const elf64_hdr)).e_machine as *const _ as usize },
18usize,
concat!(
"Alignment of field: ",
stringify!(elf64_hdr),
"::",
stringify!(e_machine)
)
);
assert_eq!(
unsafe { &(*(0 as *const elf64_hdr)).e_version as *const _ as usize },
20usize,
concat!(
"Alignment of field: ",
stringify!(elf64_hdr),
"::",
stringify!(e_version)
)
);
assert_eq!(
unsafe { &(*(0 as *const elf64_hdr)).e_entry as *const _ as usize },
24usize,
concat!(
"Alignment of field: ",
stringify!(elf64_hdr),
"::",
stringify!(e_entry)
)
);
assert_eq!(
unsafe { &(*(0 as *const elf64_hdr)).e_phoff as *const _ as usize },
32usize,
concat!(
"Alignment of field: ",
stringify!(elf64_hdr),
"::",
stringify!(e_phoff)
)
);
assert_eq!(
unsafe { &(*(0 as *const elf64_hdr)).e_shoff as *const _ as usize },
40usize,
concat!(
"Alignment of field: ",
stringify!(elf64_hdr),
"::",
stringify!(e_shoff)
)
);
assert_eq!(
unsafe { &(*(0 as *const elf64_hdr)).e_flags as *const _ as usize },
48usize,
concat!(
"Alignment of field: ",
stringify!(elf64_hdr),
"::",
stringify!(e_flags)
)
);
assert_eq!(
unsafe { &(*(0 as *const elf64_hdr)).e_ehsize as *const _ as usize },
52usize,
concat!(
"Alignment of field: ",
stringify!(elf64_hdr),
"::",
stringify!(e_ehsize)
)
);
assert_eq!(
unsafe { &(*(0 as *const elf64_hdr)).e_phentsize as *const _ as usize },
54usize,
concat!(
"Alignment of field: ",
stringify!(elf64_hdr),
"::",
stringify!(e_phentsize)
)
);
assert_eq!(
unsafe { &(*(0 as *const elf64_hdr)).e_phnum as *const _ as usize },
56usize,
concat!(
"Alignment of field: ",
stringify!(elf64_hdr),
"::",
stringify!(e_phnum)
)
);
assert_eq!(
unsafe { &(*(0 as *const elf64_hdr)).e_shentsize as *const _ as usize },
58usize,
concat!(
"Alignment of field: ",
stringify!(elf64_hdr),
"::",
stringify!(e_shentsize)
)
);
assert_eq!(
unsafe { &(*(0 as *const elf64_hdr)).e_shnum as *const _ as usize },
60usize,
concat!(
"Alignment of field: ",
stringify!(elf64_hdr),
"::",
stringify!(e_shnum)
)
);
assert_eq!(
unsafe { &(*(0 as *const elf64_hdr)).e_shstrndx as *const _ as usize },
62usize,
concat!(
"Alignment of field: ",
stringify!(elf64_hdr),
"::",
stringify!(e_shstrndx)
)
);
}
}

View File

@ -1,596 +0,0 @@
// Copyright (c) 2019 Intel Corporation. All rights reserved.
// Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.
//
// Copyright 2017 The Chromium OS Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE-BSD-3-Clause file.
//
// SPDX-License-Identifier: Apache-2.0 AND BSD-3-Clause
//! Traits and Structs
//! - [KernelLoader](trait.KernelLoader.html): load kernel image into guest memory
//! - [KernelLoaderResult](struct.KernelLoaderResult.html): the structure which loader
//! returns to VMM to assist zero page construction and boot environment setup
//! - [Elf](struct.Elf.html): elf image loader
//! - [BzImage](struct.BzImage.html): bzImage loader
extern crate vm_memory;
use std::error::{self, Error as KernelLoaderError};
use std::ffi::CStr;
use std::fmt::{self, Display};
use std::io::{Read, Seek, SeekFrom};
use std::mem;
use vm_memory::{Address, Bytes, GuestAddress, GuestMemory, GuestUsize};
#[allow(dead_code)]
#[allow(non_camel_case_types)]
#[allow(non_snake_case)]
#[allow(non_upper_case_globals)]
#[cfg_attr(feature = "cargo-clippy", allow(clippy::all))]
pub mod bootparam;
#[allow(dead_code)]
#[allow(non_camel_case_types)]
#[allow(non_snake_case)]
#[allow(non_upper_case_globals)]
#[cfg_attr(feature = "cargo-clippy", allow(clippy::all))]
mod elf;
mod struct_util;
#[derive(Debug, PartialEq)]
pub enum Error {
BigEndianElfOnLittle,
CommandLineCopy,
CommandLineOverflow,
InvalidElfMagicNumber,
InvalidProgramHeaderSize,
InvalidProgramHeaderOffset,
InvalidProgramHeaderAddress,
InvalidEntryAddress,
InvalidBzImage,
InvalidKernelStartAddress,
InitrdImageSizeTooLarge,
MemoryOverflow,
ReadElfHeader,
ReadKernelImage,
ReadProgramHeader,
ReadBzImageHeader,
ReadBzImageCompressedKernel,
ReadInitrdImage,
SeekKernelStart,
SeekElfStart,
SeekProgramHeader,
SeekBzImageEnd,
SeekBzImageHeader,
SeekBzImageCompressedKernel,
SeekInitrdImage,
}
pub type Result<T> = std::result::Result<T, Error>;
impl error::Error for Error {
fn description(&self) -> &str {
match self {
Error::BigEndianElfOnLittle => {
"Trying to load big-endian binary on little-endian machine"
}
Error::CommandLineCopy => "Failed writing command line to guest memory",
Error::CommandLineOverflow => "Command line overflowed guest memory",
Error::InvalidElfMagicNumber => "Invalid Elf magic number",
Error::InvalidProgramHeaderSize => "Invalid program header size",
Error::InvalidProgramHeaderOffset => "Invalid program header offset",
Error::InvalidProgramHeaderAddress => "Invalid Program Header Address",
Error::InvalidEntryAddress => "Invalid entry address",
Error::InvalidBzImage => "Invalid bzImage",
Error::InvalidKernelStartAddress => "Invalid kernel start address",
Error::InitrdImageSizeTooLarge => "Initrd image size too large",
Error::MemoryOverflow => "Memory to load kernel image is not enough",
Error::ReadElfHeader => "Unable to read elf header",
Error::ReadKernelImage => "Unable to read kernel image",
Error::ReadProgramHeader => "Unable to read program header",
Error::ReadBzImageHeader => "Unable to read bzImage header",
Error::ReadBzImageCompressedKernel => "Unable to read bzImage compressed kernel",
Error::ReadInitrdImage => "Unable to read initrd image",
Error::SeekKernelStart => "Unable to seek to kernel start",
Error::SeekElfStart => "Unable to seek to elf start",
Error::SeekProgramHeader => "Unable to seek to program header",
Error::SeekBzImageEnd => "Unable to seek bzImage end",
Error::SeekBzImageHeader => "Unable to seek bzImage header",
Error::SeekBzImageCompressedKernel => "Unable to seek bzImage compressed kernel",
Error::SeekInitrdImage => "Unable to seek initrd image",
}
}
}
impl Display for Error {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "Kernel Loader Error: {}", Error::description(self))
}
}
/// * `kernel_load` - The actual `guest_mem` address where kernel image is loaded start.
/// * `kernel_end` - The offset of `guest_mem` where kernel image load is loaded finish, return
/// in case of loading initrd adjacent to kernel image.
/// * `setup_header` - The setup_header belongs to linux boot protocol, only for bzImage, vmm
/// will use it to setup setup_header.init_size, which is a must for bzImage
/// direct boot.
#[derive(Debug, Default, Copy, Clone, PartialEq)]
pub struct KernelLoaderResult {
pub kernel_load: GuestAddress,
pub kernel_end: GuestUsize,
pub setup_header: Option<bootparam::setup_header>,
}
pub trait KernelLoader {
fn load<F, M: GuestMemory>(
guest_mem: &M,
kernel_start: Option<GuestAddress>,
kernel_image: &mut F,
lowest_kernel_start: Option<GuestAddress>,
) -> Result<(KernelLoaderResult)>
where
F: Read + Seek;
}
#[cfg(feature = "elf")]
pub struct Elf;
#[cfg(feature = "elf")]
impl KernelLoader for Elf {
/// Loads a kernel from a vmlinux elf image to a slice
///
/// kernel is loaded into guest memory at offset phdr.p_paddr specified by elf image.
///
/// # Arguments
///
/// * `guest_mem` - The guest memory region the kernel is written to.
/// * `kernel_start` - The offset into 'guest _mem' at which to load the kernel.
/// * `kernel_image` - Input vmlinux image.
/// * `lowest_kernel_start` - This is the start of the high memory, kernel should above it.
///
/// # Returns
/// * KernelLoaderResult
fn load<F, M: GuestMemory>(
guest_mem: &M,
kernel_start: Option<GuestAddress>,
kernel_image: &mut F,
lowest_kernel_start: Option<GuestAddress>,
) -> Result<(KernelLoaderResult)>
where
F: Read + Seek,
{
let mut ehdr: elf::Elf64_Ehdr = Default::default();
kernel_image
.seek(SeekFrom::Start(0))
.map_err(|_| Error::SeekElfStart)?;
unsafe {
// read_struct is safe when reading a POD struct. It can be used and dropped without issue.
struct_util::read_struct(kernel_image, &mut ehdr).map_err(|_| Error::ReadElfHeader)?;
}
// Sanity checks
if ehdr.e_ident[elf::EI_MAG0 as usize] != elf::ELFMAG0 as u8
|| ehdr.e_ident[elf::EI_MAG1 as usize] != elf::ELFMAG1
|| ehdr.e_ident[elf::EI_MAG2 as usize] != elf::ELFMAG2
|| ehdr.e_ident[elf::EI_MAG3 as usize] != elf::ELFMAG3
{
return Err(Error::InvalidElfMagicNumber);
}
if ehdr.e_ident[elf::EI_DATA as usize] != elf::ELFDATA2LSB as u8 {
return Err(Error::BigEndianElfOnLittle);
}
if ehdr.e_phentsize as usize != mem::size_of::<elf::Elf64_Phdr>() {
return Err(Error::InvalidProgramHeaderSize);
}
if (ehdr.e_phoff as usize) < mem::size_of::<elf::Elf64_Ehdr>() {
// If the program header is backwards, bail.
return Err(Error::InvalidProgramHeaderOffset);
}
if (lowest_kernel_start.is_some())
&& ((ehdr.e_entry as u64) < lowest_kernel_start.unwrap().raw_value())
{
return Err(Error::InvalidEntryAddress);
}
let mut loader_result: KernelLoaderResult = Default::default();
// where the kernel will be start loaded.
loader_result.kernel_load = match kernel_start {
Some(start) => GuestAddress(start.raw_value() + (ehdr.e_entry as u64)),
None => GuestAddress(ehdr.e_entry as u64),
};
kernel_image
.seek(SeekFrom::Start(ehdr.e_phoff))
.map_err(|_| Error::SeekProgramHeader)?;
let phdrs: Vec<elf::Elf64_Phdr> = unsafe {
// Reading the structs is safe for a slice of POD structs.
struct_util::read_struct_slice(kernel_image, ehdr.e_phnum as usize)
.map_err(|_| Error::ReadProgramHeader)?
};
// Read in each section pointed to by the program headers.
for phdr in &phdrs {
if phdr.p_type != elf::PT_LOAD || phdr.p_filesz == 0 {
continue;
}
kernel_image
.seek(SeekFrom::Start(phdr.p_offset))
.map_err(|_| Error::SeekKernelStart)?;
// vmm does not specify where the kernel should be loaded, just
// load it to the physical address p_paddr for each segment.
let mem_offset = match kernel_start {
Some(start) => start
.checked_add(phdr.p_paddr as u64)
.ok_or(Error::InvalidProgramHeaderAddress)?,
None => GuestAddress(phdr.p_paddr as u64),
};
guest_mem
.read_exact_from(mem_offset, kernel_image, phdr.p_filesz as usize)
.map_err(|_| Error::ReadKernelImage)?;
loader_result.kernel_end = mem_offset.raw_value()
.checked_add(phdr.p_memsz as GuestUsize)
.ok_or(Error::MemoryOverflow)?;
}
loader_result.setup_header = None;
Ok(loader_result)
}
}
#[cfg(feature = "bzImage")]
pub struct BzImage;
#[cfg(feature = "bzImage")]
impl KernelLoader for BzImage {
/// Loads a bzImage
///
/// kernel is loaded into guest memory at code32_start the default load address
/// stored in bzImage setup header.
///
/// # Arguments
///
/// * `guest_mem` - The guest memory region the kernel is written to.
/// * `kernel_start` - The offset into 'guest _mem' at which to load the kernel.
/// * `kernel_image` - Input bzImage image.
/// * `lowest_kernel_start` - This is the start of the high memory, kernel should above it.
///
/// # Returns
/// * KernelLoaderResult
fn load<F, M: GuestMemory>(
guest_mem: &M,
kernel_start: Option<GuestAddress>,
kernel_image: &mut F,
lowest_kernel_start: Option<GuestAddress>,
) -> Result<(KernelLoaderResult)>
where
F: Read + Seek,
{
let mut kernel_size = kernel_image
.seek(SeekFrom::End(0))
.map_err(|_| Error::SeekBzImageEnd)? as usize;
let mut boot_header: bootparam::setup_header = Default::default();
kernel_image
.seek(SeekFrom::Start(0x1F1))
.map_err(|_| Error::SeekBzImageHeader)?;
unsafe {
// read_struct is safe when reading a POD struct. It can be used and dropped without issue.
struct_util::read_struct(kernel_image, &mut boot_header)
.map_err(|_| Error::ReadBzImageHeader)?;
}
// if the HdrS magic number is not found at offset 0x202, the boot protocol version is "old",
// the image type is assumed as zImage, not bzImage.
if boot_header.header != 0x5372_6448 {
return Err(Error::InvalidBzImage);
}
// follow section of loading the rest of the kernel in linux boot protocol
if (boot_header.version < 0x0200) || ((boot_header.loadflags & 0x1) == 0x0) {
return Err(Error::InvalidBzImage);
}
let mut setup_size = boot_header.setup_sects as usize;
if setup_size == 0 {
setup_size = 4;
}
setup_size = (setup_size + 1) * 512;
kernel_size -= setup_size;
// verify bzImage validation by checking if code32_start, the defaults to the address of
// the kernel is not lower than high memory.
if (lowest_kernel_start.is_some())
&& (u64::from(boot_header.code32_start) < lowest_kernel_start.unwrap().raw_value())
{
return Err(Error::InvalidKernelStartAddress);
}
let mem_offset = match kernel_start {
Some(start) => start,
None => GuestAddress(u64::from(boot_header.code32_start)),
};
boot_header.code32_start = mem_offset.raw_value() as u32;
let mut loader_result: KernelLoaderResult = Default::default();
loader_result.setup_header = Some(boot_header);
loader_result.kernel_load = mem_offset;
//seek the compressed vmlinux.bin and read to memory
kernel_image
.seek(SeekFrom::Start(setup_size as u64))
.map_err(|_| Error::SeekBzImageCompressedKernel)?;
guest_mem
.read_exact_from(mem_offset, kernel_image, kernel_size)
.map_err(|_| Error::ReadBzImageCompressedKernel)?;
loader_result.kernel_end = mem_offset.raw_value()
.checked_add(kernel_size as GuestUsize)
.ok_or(Error::MemoryOverflow)?;
Ok(loader_result)
}
}
/// Writes the command line string to the given memory slice.
///
/// # Arguments
///
/// * `guest_mem` - A u8 slice that will be partially overwritten by the command line.
/// * `guest_addr` - The address in `guest_mem` at which to load the command line.
/// * `cmdline` - The kernel command line.
pub fn load_cmdline<M: GuestMemory>(
guest_mem: &M,
guest_addr: GuestAddress,
cmdline: &CStr,
) -> Result<()> {
let len = cmdline.to_bytes().len();
if len == 0 {
return Ok(());
}
let end = guest_addr
.checked_add(len as u64 + 1)
.ok_or(Error::CommandLineOverflow)?; // Extra for null termination.
if end > guest_mem.end_addr() {
return Err(Error::CommandLineOverflow)?;
}
guest_mem
.write_slice(cmdline.to_bytes_with_nul(), guest_addr)
.map_err(|_| Error::CommandLineCopy)?;
Ok(())
}
#[cfg(test)]
mod test {
use super::*;
use std::io::Cursor;
use vm_memory::{Address, GuestAddress, GuestMemoryMmap};
const MEM_SIZE: u64 = 0x1000000;
fn create_guest_mem() -> GuestMemoryMmap {
GuestMemoryMmap::new(&[(GuestAddress(0x0), (MEM_SIZE as usize))]).unwrap()
}
#[allow(non_snake_case)]
#[cfg(feature = "bzImage")]
fn make_bzImage() -> Vec<u8> {
let mut v = Vec::new();
v.extend_from_slice(include_bytes!("bzImage"));
v
}
// Elf64 image that prints hello world on x86_64.
fn make_elf_bin() -> Vec<u8> {
let mut v = Vec::new();
v.extend_from_slice(include_bytes!("test_elf.bin"));
v
}
#[allow(safe_packed_borrows)]
#[allow(non_snake_case)]
#[test]
#[ignore]
#[cfg(feature = "bzImage")]
fn load_bzImage() {
let gm = create_guest_mem();
let image = make_bzImage();
let mut kernel_start = GuestAddress(0x200000);
let mut lowest_kernel_start = GuestAddress(0x0);
// load bzImage with good kernel_start and himem_start setting
let mut loader_result = BzImage::load(
&gm,
Some(kernel_start),
&mut Cursor::new(&image),
Some(lowest_kernel_start),
)
.unwrap();
assert_eq!(0x53726448, loader_result.setup_header.unwrap().header);
println!(
"bzImage is loaded at {:8x} \n",
loader_result.kernel_load.raw_value()
);
println!(
"bzImage version is {:2x} \n",
loader_result.setup_header.unwrap().version
);
println!(
"bzImage loadflags is {:x} \n",
loader_result.setup_header.unwrap().loadflags
);
println!(
"bzImage kernel size is {:4x} \n",
(loader_result.kernel_end as u32)
);
// load bzImage without kernel_start
loader_result = BzImage::load(
&gm,
None,
&mut Cursor::new(&image),
Some(lowest_kernel_start),
)
.unwrap();
assert_eq!(0x53726448, loader_result.setup_header.unwrap().header);
println!(
"bzImage is loaded at {:8x} \n",
loader_result.kernel_load.raw_value()
);
// load bzImage withouth himem_start
loader_result = BzImage::load(&gm, None, &mut Cursor::new(&image), None).unwrap();
assert_eq!(0x53726448, loader_result.setup_header.unwrap().header);
println!(
"bzImage is loaded at {:8x} \n",
loader_result.kernel_load.raw_value()
);
// load bzImage with a bad himem setting
kernel_start = GuestAddress(0x1000);
lowest_kernel_start = GuestAddress(0x200000);
let x = BzImage::load(
&gm,
Some(kernel_start),
&mut Cursor::new(&image),
Some(lowest_kernel_start),
);
assert_eq!(x.is_ok(), false);
println!("load bzImage with bad himem setting \n");
}
#[test]
fn load_elf() {
let gm = create_guest_mem();
let image = make_elf_bin();
let kernel_addr = GuestAddress(0x200000);
let mut lowest_kernel_start = GuestAddress(0x0);
let mut loader_result = Elf::load(
&gm,
Some(kernel_addr),
&mut Cursor::new(&image),
Some(lowest_kernel_start),
)
.unwrap();
println!(
"load elf at address {:8x} \n",
loader_result.kernel_load.raw_value()
);
loader_result = Elf::load(&gm, Some(kernel_addr), &mut Cursor::new(&image), None).unwrap();
println!(
"load elf at address {:8x} \n",
loader_result.kernel_load.raw_value()
);
loader_result = Elf::load(
&gm,
None,
&mut Cursor::new(&image),
Some(lowest_kernel_start),
)
.unwrap();
println!(
"load elf at address {:8x} \n",
loader_result.kernel_load.raw_value()
);
lowest_kernel_start = GuestAddress(0xa00000);
assert_eq!(
Err(Error::InvalidEntryAddress),
Elf::load(
&gm,
None,
&mut Cursor::new(&image),
Some(lowest_kernel_start)
)
);
}
#[test]
fn cmdline_overflow() {
let gm = create_guest_mem();
let cmdline_address = GuestAddress(MEM_SIZE - 5);
assert_eq!(
Err(Error::CommandLineOverflow),
load_cmdline(
&gm,
cmdline_address,
CStr::from_bytes_with_nul(b"12345\0").unwrap()
)
);
}
#[test]
fn cmdline_write_end() {
let gm = create_guest_mem();
let mut cmdline_address = GuestAddress(45);
assert_eq!(
Ok(()),
load_cmdline(
&gm,
cmdline_address,
CStr::from_bytes_with_nul(b"1234\0").unwrap()
)
);
let val: u8 = gm.read_obj(cmdline_address).unwrap();
assert_eq!(val, '1' as u8);
cmdline_address = cmdline_address.unchecked_add(1);
let val: u8 = gm.read_obj(cmdline_address).unwrap();
assert_eq!(val, '2' as u8);
cmdline_address = cmdline_address.unchecked_add(1);
let val: u8 = gm.read_obj(cmdline_address).unwrap();
assert_eq!(val, '3' as u8);
cmdline_address = cmdline_address.unchecked_add(1);
let val: u8 = gm.read_obj(cmdline_address).unwrap();
assert_eq!(val, '4' as u8);
cmdline_address = cmdline_address.unchecked_add(1);
let val: u8 = gm.read_obj(cmdline_address).unwrap();
assert_eq!(val, '\0' as u8);
}
#[test]
fn bad_magic() {
let gm = create_guest_mem();
let kernel_addr = GuestAddress(0x0);
let mut bad_image = make_elf_bin();
bad_image[0x1] = 0x33;
assert_eq!(
Err(Error::InvalidElfMagicNumber),
Elf::load(&gm, Some(kernel_addr), &mut Cursor::new(&bad_image), None)
);
}
#[test]
fn bad_endian() {
// Only little endian is supported
let gm = create_guest_mem();
let kernel_addr = GuestAddress(0x0);
let mut bad_image = make_elf_bin();
bad_image[0x5] = 2;
assert_eq!(
Err(Error::BigEndianElfOnLittle),
Elf::load(&gm, Some(kernel_addr), &mut Cursor::new(&bad_image), None)
);
}
#[test]
fn bad_phoff() {
// program header has to be past the end of the elf header
let gm = create_guest_mem();
let kernel_addr = GuestAddress(0x0);
let mut bad_image = make_elf_bin();
bad_image[0x20] = 0x10;
assert_eq!(
Err(Error::InvalidProgramHeaderOffset),
Elf::load(&gm, Some(kernel_addr), &mut Cursor::new(&bad_image), None)
);
}
}

View File

@ -1,152 +0,0 @@
// Copyright (c) 2019 Intel Corporation. All rights reserved.
// Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.
//
// Portions Copyright 2017 The Chromium OS Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE-BSD-3-Clause file.
//
// SPDX-License-Identifier: Apache-2.0 AND BSD-3-Clause
use std;
use std::io::Read;
use std::mem;
#[derive(Debug)]
pub enum Error {
ReadStruct,
}
pub type Result<T> = std::result::Result<T, Error>;
/// Reads a struct from an input buffer.
/// This is unsafe because the struct is initialized to unverified data read from the input.
/// `read_struct` should only be called to fill plain old data structs. It is not endian safe.
///
/// # Arguments
///
/// * `f` - The input to read from. Often this is a file.
/// * `out` - The struct to fill with data read from `f`.
pub unsafe fn read_struct<T: Copy, F: Read>(f: &mut F, out: &mut T) -> Result<()> {
let out_slice = std::slice::from_raw_parts_mut(out as *mut T as *mut u8, mem::size_of::<T>());
f.read_exact(out_slice).map_err(|_| Error::ReadStruct)?;
Ok(())
}
/// Reads an array of structs from an input buffer. Returns a Vec of structs initialized with data
/// from the specified input.
/// This is unsafe because the structs are initialized to unverified data read from the input.
/// `read_struct_slice` should only be called for plain old data structs. It is not endian safe.
///
/// # Arguments
///
/// * `f` - The input to read from. Often this is a file.
/// * `len` - The number of structs to fill with data read from `f`.
pub unsafe fn read_struct_slice<T: Copy, F: Read>(f: &mut F, len: usize) -> Result<Vec<T>> {
let mut out: Vec<T> = Vec::with_capacity(len);
out.set_len(len);
let out_slice = std::slice::from_raw_parts_mut(
out.as_ptr() as *mut T as *mut u8,
mem::size_of::<T>() * len,
);
f.read_exact(out_slice).map_err(|_| Error::ReadStruct)?;
Ok(out)
}
#[cfg(test)]
mod tests {
use super::*;
use std::io::Cursor;
use std::mem;
#[derive(Clone, Copy, Debug, Default, PartialEq)]
struct TestRead {
a: u64,
b: u8,
c: u8,
d: u8,
e: u8,
}
#[test]
fn struct_basic_read() {
let orig = TestRead {
a: 0x7766554433221100,
b: 0x88,
c: 0x99,
d: 0xaa,
e: 0xbb,
};
let source = unsafe {
// Don't worry it's a test
std::slice::from_raw_parts(
&orig as *const _ as *const u8,
std::mem::size_of::<TestRead>(),
)
};
assert_eq!(mem::size_of::<TestRead>(), mem::size_of_val(&source));
let mut tr: TestRead = Default::default();
unsafe {
read_struct(&mut Cursor::new(source), &mut tr).unwrap();
}
assert_eq!(orig, tr);
}
#[test]
fn struct_read_past_end() {
let orig = TestRead {
a: 0x7766554433221100,
b: 0x88,
c: 0x99,
d: 0xaa,
e: 0xbb,
};
let source = unsafe {
// Don't worry it's a test
std::slice::from_raw_parts(
&orig as *const _ as *const u8,
std::mem::size_of::<TestRead>() - 1,
)
};
let mut tr: TestRead = Default::default();
unsafe {
assert!(read_struct(&mut Cursor::new(source), &mut tr).is_err());
format!("{:?}", read_struct(&mut Cursor::new(source), &mut tr));
}
}
#[test]
fn struct_slice_read() {
let orig = vec![
TestRead {
a: 0x7766554433221100,
b: 0x88,
c: 0x99,
d: 0xaa,
e: 0xbb,
},
TestRead {
a: 0x7867564534231201,
b: 0x02,
c: 0x13,
d: 0x24,
e: 0x35,
},
TestRead {
a: 0x7a69584736251403,
b: 0x04,
c: 0x15,
d: 0x26,
e: 0x37,
},
];
let source = unsafe {
// Don't worry it's a test
std::slice::from_raw_parts(
orig.as_ptr() as *const u8,
std::mem::size_of::<TestRead>() * 3,
)
};
let tr: Vec<TestRead> = unsafe { read_struct_slice(&mut Cursor::new(source), 3).unwrap() };
assert_eq!(orig, tr);
}
}

View File

@ -1,28 +0,0 @@
import pytest
PROFILE_CI="ci"
PROFILE_DEVEL="devel"
def pytest_addoption(parser):
parser.addoption(
"--profile",
default=PROFILE_CI,
choices=[PROFILE_CI, PROFILE_DEVEL],
help="Profile for running the test: {} or {}".format(
PROFILE_CI,
PROFILE_DEVEL
)
)
@pytest.fixture
def profile(request):
return request.config.getoption("--profile")
# This is used for defining global variables in pytest.
def pytest_configure():
pytest.profile_ci = PROFILE_CI
pytest.profile_devel = PROFILE_DEVEL

View File

@ -1 +0,0 @@
67.0

View File

@ -1,108 +0,0 @@
# Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved.
# SPDX-License-Identifier: Apache-2.0 OR BSD-3-Clause
"""Test the coverage and update the threshold when coverage is increased."""
import os, re, shutil, subprocess
import pytest
def _get_current_coverage():
"""Helper function that returns the coverage computed with kcov."""
kcov_ouput_dir = os.path.join(
os.path.dirname(os.path.realpath(__file__)),
"kcov_output"
)
# By default the build output for kcov and unit tests are both in the debug
# directory. This causes some linker errors that I haven't investigated.
# Error: error: linking with `cc` failed: exit code: 1
# An easy fix is to have separate build directories for kcov & unit tests.
kcov_build_dir = os.path.join(
os.path.dirname(os.path.realpath(__file__)),
"kcov_build"
)
# Remove kcov output and build directory to be sure we are always working
# on a clean environment.
shutil.rmtree(kcov_ouput_dir, ignore_errors=True)
shutil.rmtree(kcov_build_dir, ignore_errors=True)
exclude_pattern = (
'${CARGO_HOME:-$HOME/.cargo/},'
'usr/lib/,'
'lib/'
)
exclude_region = "'mod tests {'"
kcov_cmd = "CARGO_TARGET_DIR={} cargo kcov --all " \
"--output {} -- " \
"--exclude-region={} " \
"--exclude-pattern={} " \
"--verify".format(
kcov_build_dir,
kcov_ouput_dir,
exclude_region,
exclude_pattern
)
subprocess.run(kcov_cmd, shell=True, check=True)
# Read the coverage reported by kcov.
coverage_file = os.path.join(kcov_ouput_dir, 'index.js')
with open(coverage_file) as cov_output:
coverage = float(re.findall(
r'"covered":"(\d+\.\d)"',
cov_output.read()
)[0])
# Remove coverage related directories.
shutil.rmtree(kcov_ouput_dir, ignore_errors=True)
shutil.rmtree(kcov_build_dir, ignore_errors=True)
return coverage
def _get_previous_coverage():
"""Helper function that returns the last reported coverage."""
coverage_path = os.path.join(
os.path.dirname(os.path.realpath(__file__)),
'coverage'
)
# The first and only line of the file contains the coverage.
with open(coverage_path) as f:
coverage = f.readline()
return float(coverage.strip())
def _update_coverage(cov_value):
"""Updates the coverage in the coverage file."""
coverage_path = os.path.join(
os.path.dirname(os.path.realpath(__file__)),
'coverage'
)
with open(coverage_path, "w") as f:
f.write(str(cov_value))
def test_coverage(profile):
current_coverage = _get_current_coverage()
previous_coverage = _get_previous_coverage()
if previous_coverage < current_coverage:
if profile == pytest.profile_ci:
# In the CI Profile we expect the coverage to be manually updated.
assert False, "Coverage is increased from {} to {}. " \
"Please update the coverage in " \
"tests/coverage.".format(
previous_coverage,
current_coverage
)
elif profile == pytest.profile_devel:
_update_coverage(current_coverage)
else:
# This should never happen because pytest should only accept
# the valid test profiles specified with `choices` in
# `pytest_addoption`.
assert False, "Invalid test profile."
elif previous_coverage > current_coverage:
diff = float(previous_coverage - current_coverage)
assert False, "Coverage drops by {:.2f}%. Please add unit tests for" \
"the uncovered lines.".format(diff)

View File

@ -1,153 +0,0 @@
steps:
- label: "build-gnu-x86"
commands:
- cargo build --release
retry:
automatic: false
agents:
platform: x86_64.metal
plugins:
- docker#v3.0.1:
image: "rustvmm/dev:v2"
always-pull: true
- label: "build-gnu-arm"
commands:
- cargo build --release
retry:
automatic: false
agents:
platform: arm.metal
plugins:
- docker#v3.0.1:
image: "rustvmm/dev:v2"
always-pull: true
- label: "build-musl-arm"
commands:
- cargo build --release --target aarch64-unknown-linux-musl
retry:
automatic: false
agents:
platform: arm.metal
os: linux
plugins:
- docker#v3.0.1:
image: "rustvmm/dev:v2"
always-pull: true
- label: "unittests-musl-x86"
commands:
- cargo test --all-features --target x86_64-unknown-linux-musl
retry:
automatic: false
agents:
platform: x86_64.metal
os: linux
plugins:
- docker#v3.0.1:
image: "rustvmm/dev:v2"
always-pull: true
- label: "unittests-musl-arm"
commands:
- cargo test --all-features --target aarch64-unknown-linux-musl
retry:
automatic: false
agents:
platform: arm.metal
os: linux
plugins:
- docker#v3.0.1:
image: "rustvmm/dev:v2"
always-pull: true
- label: "style"
command: cargo fmt --all -- --check
retry:
automatic: false
agents:
platform: x86_64.metal
plugins:
- docker#v3.0.1:
image: "rustvmm/dev:v2"
always-pull: true
- label: "unittests-gnu-x86"
commands:
- cargo test
retry:
automatic: false
agents:
platform: x86_64.metal
plugins:
- docker#v3.0.1:
privileged: true
image: "rustvmm/dev:v2"
always-pull: true
tmpfs: [ "/tmp:exec" ]
- label: "unittests-gnu-arm"
commands:
- cargo test
retry:
automatic: false
agents:
platform: arm.metal
plugins:
- docker#v3.0.1:
privileged: true
image: "rustvmm/dev:v2"
always-pull: true
tmpfs: [ "/tmp:exec" ]
- label: "clippy-x86"
commands:
- cargo clippy --all -- -D warnings
retry:
automatic: false
agents:
platform: x86_64.metal
plugins:
- docker#v3.0.1:
image: "rustvmm/dev:v2"
always-pull: true
- label: "clippy-arm"
commands:
- cargo clippy --all -- -D warnings
retry:
automatic: false
agents:
platform: arm.metal
plugins:
- docker#v3.0.1:
image: "rustvmm/dev:v2"
always-pull: true
- label: "check-warnings-x86"
commands:
- RUSTFLAGS="-D warnings" cargo check --all-targets
retry:
automatic: false
agents:
platform: x86_64.metal
os: linux
plugins:
- docker#v3.0.1:
image: "rustvmm/dev:v2"
always-pull: true
- label: "check-warnings-arm"
commands:
- RUSTFLAGS="-D warnings" cargo check --all-targets
retry:
automatic: false
agents:
platform: arm.metal
os: linux
plugins:
- docker#v3.0.1:
image: "rustvmm/dev:v2"
always-pull: true

View File

@ -1 +0,0 @@
{"files":{".buildkite/pipeline.yml":"e603f3ef6b6357911abe831a0a03f0444cb1ad057385fdd6923d01d6f9f460ba",".cargo/config":"62f04eec5a565cf7017d26222f6f424004ea6f850c22c9c7352ede255a2c59c4","Cargo.toml":"d19b52349ada25dbbae4e2bea4850cf1b09ea546d5efee03ddfbbf9f184c0c72","LICENSE-APACHE":"c71d239df91726fc519c6eb72d318ec65820627232b2f796219e87dcf35d0ab4","LICENSE-BSD-3-Clause":"a6d3ebd1c2f37d4fd83d0676621f695fc0cc2d8c6e646cdbb831b46e0650c208","README.md":"94512ed84e8e89faba431d9107e85d229363c187705ab9244f0ab5269b5961b5","src/errno.rs":"b93845c200fc596b0ef2414671d965cf5c4fdbf6bba6e5e5f0d032a32c73f8ac","src/eventfd.rs":"7bd871242f49c14d8783714e6814456f51a9c280dcadf1625e1bd2313d2b5f7f","src/file_traits.rs":"398c529e7ebce143ecb9f9bd2f5f47ea3e953ac34cc211ad71cdcf1898cc7d38","src/ioctl.rs":"5c4abf75e7b6786e7da3191ac1e4460e1ec7d073a53331a6d9597bb9ccc3f88a","src/lib.rs":"ee0818e0ca6fdc340c52d514eeb2e3aeb4f7ba8e4e522bb946cdbce4779926f1","src/poll.rs":"1498c14ba022ede57c4faf17bee49cf5ac9d1c8d3883db441697ee224dac7818","src/seek_hole.rs":"de43f21bc2c5d9eb7f06e21e3c20f93476bf6016e4d041df71a02b9e54b3c3ca","src/signal.rs":"c9bb7282ec31f10c9a9e84ec4348eaa028e47d191237c465c9b45eaea994b5a8","src/syslog.rs":"fbf4bde16b1059b5b39c5318e8bb918dc431e8e0ccbc82c0d765b9ce4a8d5f96","src/tempdir.rs":"4993460e81f7df6398e0f2b07cc3d81e728aa7e0559c7f3d83b6df1876bc3776","src/terminal.rs":"85efb1df641730fa1981bac6fd65bd75f7d532bb8680a56e94d6d006eeb363e9","src/timerfd.rs":"fd3c52e3918d881c16cb1498f8f66253ee758275a6a66ed8eb11c78e69f69e55","src/write_zeroes.rs":"c2951bbdb3ab07727eda29e9a91a51e427fdf6fed0b611ea6a3732edbd9a1246"},"package":null}

View File

@ -1,2 +0,0 @@
[target.aarch64-unknown-linux-musl]
rustflags = [ "-C", "target-feature=+crt-static", "-C", "link-arg=-lgcc"]

View File

@ -1,9 +0,0 @@
[package]
name = "vmm-sys-util"
version = "0.1.0"
authors = ["Jing Liu <jing2.liu@linux.intel.com>"]
license = "Apache-2.0"
[dependencies]
libc = ">=0.2.39"

View File

@ -1,201 +0,0 @@
Apache License
Version 2.0, January 2004
http://www.apache.org/licenses/
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
1. Definitions.
"License" shall mean the terms and conditions for use, reproduction,
and distribution as defined by Sections 1 through 9 of this document.
"Licensor" shall mean the copyright owner or entity authorized by
the copyright owner that is granting the License.
"Legal Entity" shall mean the union of the acting entity and all
other entities that control, are controlled by, or are under common
control with that entity. For the purposes of this definition,
"control" means (i) the power, direct or indirect, to cause the
direction or management of such entity, whether by contract or
otherwise, or (ii) ownership of fifty percent (50%) or more of the
outstanding shares, or (iii) beneficial ownership of such entity.
"You" (or "Your") shall mean an individual or Legal Entity
exercising permissions granted by this License.
"Source" form shall mean the preferred form for making modifications,
including but not limited to software source code, documentation
source, and configuration files.
"Object" form shall mean any form resulting from mechanical
transformation or translation of a Source form, including but
not limited to compiled object code, generated documentation,
and conversions to other media types.
"Work" shall mean the work of authorship, whether in Source or
Object form, made available under the License, as indicated by a
copyright notice that is included in or attached to the work
(an example is provided in the Appendix below).
"Derivative Works" shall mean any work, whether in Source or Object
form, that is based on (or derived from) the Work and for which the
editorial revisions, annotations, elaborations, or other modifications
represent, as a whole, an original work of authorship. For the purposes
of this License, Derivative Works shall not include works that remain
separable from, or merely link (or bind by name) to the interfaces of,
the Work and Derivative Works thereof.
"Contribution" shall mean any work of authorship, including
the original version of the Work and any modifications or additions
to that Work or Derivative Works thereof, that is intentionally
submitted to Licensor for inclusion in the Work by the copyright owner
or by an individual or Legal Entity authorized to submit on behalf of
the copyright owner. For the purposes of this definition, "submitted"
means any form of electronic, verbal, or written communication sent
to the Licensor or its representatives, including but not limited to
communication on electronic mailing lists, source code control systems,
and issue tracking systems that are managed by, or on behalf of, the
Licensor for the purpose of discussing and improving the Work, but
excluding communication that is conspicuously marked or otherwise
designated in writing by the copyright owner as "Not a Contribution."
"Contributor" shall mean Licensor and any individual or Legal Entity
on behalf of whom a Contribution has been received by Licensor and
subsequently incorporated within the Work.
2. Grant of Copyright License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
copyright license to reproduce, prepare Derivative Works of,
publicly display, publicly perform, sublicense, and distribute the
Work and such Derivative Works in Source or Object form.
3. Grant of Patent License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
(except as stated in this section) patent license to make, have made,
use, offer to sell, sell, import, and otherwise transfer the Work,
where such license applies only to those patent claims licensable
by such Contributor that are necessarily infringed by their
Contribution(s) alone or by combination of their Contribution(s)
with the Work to which such Contribution(s) was submitted. If You
institute patent litigation against any entity (including a
cross-claim or counterclaim in a lawsuit) alleging that the Work
or a Contribution incorporated within the Work constitutes direct
or contributory patent infringement, then any patent licenses
granted to You under this License for that Work shall terminate
as of the date such litigation is filed.
4. Redistribution. You may reproduce and distribute copies of the
Work or Derivative Works thereof in any medium, with or without
modifications, and in Source or Object form, provided that You
meet the following conditions:
(a) You must give any other recipients of the Work or
Derivative Works a copy of this License; and
(b) You must cause any modified files to carry prominent notices
stating that You changed the files; and
(c) You must retain, in the Source form of any Derivative Works
that You distribute, all copyright, patent, trademark, and
attribution notices from the Source form of the Work,
excluding those notices that do not pertain to any part of
the Derivative Works; and
(d) If the Work includes a "NOTICE" text file as part of its
distribution, then any Derivative Works that You distribute must
include a readable copy of the attribution notices contained
within such NOTICE file, excluding those notices that do not
pertain to any part of the Derivative Works, in at least one
of the following places: within a NOTICE text file distributed
as part of the Derivative Works; within the Source form or
documentation, if provided along with the Derivative Works; or,
within a display generated by the Derivative Works, if and
wherever such third-party notices normally appear. The contents
of the NOTICE file are for informational purposes only and
do not modify the License. You may add Your own attribution
notices within Derivative Works that You distribute, alongside
or as an addendum to the NOTICE text from the Work, provided
that such additional attribution notices cannot be construed
as modifying the License.
You may add Your own copyright statement to Your modifications and
may provide additional or different license terms and conditions
for use, reproduction, or distribution of Your modifications, or
for any such Derivative Works as a whole, provided Your use,
reproduction, and distribution of the Work otherwise complies with
the conditions stated in this License.
5. Submission of Contributions. Unless You explicitly state otherwise,
any Contribution intentionally submitted for inclusion in the Work
by You to the Licensor shall be under the terms and conditions of
this License, without any additional terms or conditions.
Notwithstanding the above, nothing herein shall supersede or modify
the terms of any separate license agreement you may have executed
with Licensor regarding such Contributions.
6. Trademarks. This License does not grant permission to use the trade
names, trademarks, service marks, or product names of the Licensor,
except as required for reasonable and customary use in describing the
origin of the Work and reproducing the content of the NOTICE file.
7. Disclaimer of Warranty. Unless required by applicable law or
agreed to in writing, Licensor provides the Work (and each
Contributor provides its Contributions) on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
implied, including, without limitation, any warranties or conditions
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
PARTICULAR PURPOSE. You are solely responsible for determining the
appropriateness of using or redistributing the Work and assume any
risks associated with Your exercise of permissions under this License.
8. Limitation of Liability. In no event and under no legal theory,
whether in tort (including negligence), contract, or otherwise,
unless required by applicable law (such as deliberate and grossly
negligent acts) or agreed to in writing, shall any Contributor be
liable to You for damages, including any direct, indirect, special,
incidental, or consequential damages of any character arising as a
result of this License or out of the use or inability to use the
Work (including but not limited to damages for loss of goodwill,
work stoppage, computer failure or malfunction, or any and all
other commercial damages or losses), even if such Contributor
has been advised of the possibility of such damages.
9. Accepting Warranty or Additional Liability. While redistributing
the Work or Derivative Works thereof, You may choose to offer,
and charge a fee for, acceptance of support, warranty, indemnity,
or other liability obligations and/or rights consistent with this
License. However, in accepting such obligations, You may act only
on Your own behalf and on Your sole responsibility, not on behalf
of any other Contributor, and only if You agree to indemnify,
defend, and hold each Contributor harmless for any liability
incurred by, or claims asserted against, such Contributor by reason
of your accepting any such warranty or additional liability.
END OF TERMS AND CONDITIONS
APPENDIX: How to apply the Apache License to your work.
To apply the Apache License to your work, attach the following
boilerplate notice, with the fields enclosed by brackets "[]"
replaced with your own identifying information. (Don't include
the brackets!) The text should be enclosed in the appropriate
comment syntax for the file format. We also recommend that a
file or class name and description of purpose be included on the
same "printed page" as the copyright notice for easier
identification within third-party archives.
Copyright [yyyy] [name of copyright owner]
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.

View File

@ -1,27 +0,0 @@
// Copyright 2017 The Chromium OS Authors. All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following disclaimer
// in the documentation and/or other materials provided with the
// distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived from
// this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.

View File

@ -1,2 +0,0 @@
# vmm-sys-util
This crate is a collection of modules that provides helpers and utilities.

View File

@ -1,80 +0,0 @@
// Copyright 2019 Intel Corporation. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//
// Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//
// Portions Copyright 2017 The Chromium OS Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE-BSD-3-Clause file.
use std::fmt::{Display, Formatter};
use std::io;
use std::result;
use libc::__errno_location;
/// An error number, retrieved from [`errno`](http://man7.org/linux/man-pages/man3/errno.3.html),
/// set by a libc function that returned an error.
#[derive(Clone, Copy, Debug, PartialEq)]
pub struct Error(i32);
pub type Result<T> = result::Result<T, Error>;
impl Error {
/// Constructs a new error with the given `errno`.
pub fn new(e: i32) -> Error {
Error(e)
}
/// Constructs an error from the current `errno`.
///
/// The result of this only has any meaning just after a libc call that returned a value
/// indicating `errno` was set.
pub fn last() -> Error {
Error(unsafe { *__errno_location() })
}
/// Gets the `errno` for this error.
pub fn errno(self) -> i32 {
self.0
}
}
impl Display for Error {
fn fmt(&self, f: &mut Formatter) -> std::fmt::Result {
io::Error::from_raw_os_error(self.0).fmt(f)
}
}
impl From<io::Error> for Error {
fn from(e: io::Error) -> Self {
Error::new(e.raw_os_error().unwrap_or_default())
}
}
/// Returns the last `errno` as a [`Result`] that is always an error.
///
/// [`Result`]: type.Result.html
pub fn errno_result<T>() -> Result<T> {
Err(Error::last())
}
#[cfg(test)]
mod tests {
use super::*;
use libc;
use std::fs::File;
use std::io::{self, Write};
use std::os::unix::io::FromRawFd;
#[test]
pub fn test_invalid_fd() {
let mut file = unsafe { File::from_raw_fd(-1) };
assert!(file.write(b"test").is_err());
let last_err = errno_result::<i32>().unwrap_err();
assert_eq!(last_err, Error::new(libc::EBADF));
assert_eq!(last_err.errno(), libc::EBADF);
assert_eq!(last_err, Error::from(io::Error::last_os_error()));
assert_eq!(last_err, Error::last());
}
}

View File

@ -1,150 +0,0 @@
// Copyright 2019 Intel Corporation. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//
// Copyright 2017 The Chromium OS Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE-BSD-3-Clause file.
use std::fs::File;
use std::os::unix::io::{AsRawFd, FromRawFd, RawFd};
use std::{io, mem, result};
use libc::{c_void, dup, eventfd, read, write};
/// A safe wrapper around a Linux eventfd (man 2 eventfd).
pub struct EventFd {
eventfd: File,
}
impl EventFd {
/// Creates a new blocking EventFd with an initial value.
///
/// `flag`: The initial value. Refer to Linux eventfd(2).
pub fn new(flag: i32) -> result::Result<EventFd, io::Error> {
// This is safe because eventfd merely allocated an eventfd for
// our process and we handle the error case.
let ret = unsafe { eventfd(0, flag) };
if ret < 0 {
Err(io::Error::last_os_error())
} else {
// This is safe because we checked ret for success and know
// the kernel gave us an fd that we own.
Ok(EventFd {
eventfd: unsafe { File::from_raw_fd(ret) },
})
}
}
/// Adds `v` to the eventfd's count, does not block if the result will overflow the count
pub fn write(&self, v: u64) -> result::Result<(), io::Error> {
// This is safe because we made this fd and the pointer we pass
// can not overflow because we give the syscall's size parameter properly.
let ret = unsafe {
write(
self.as_raw_fd(),
&v as *const u64 as *const c_void,
mem::size_of::<u64>(),
)
};
if ret <= 0 {
Err(io::Error::last_os_error())
} else {
Ok(())
}
}
/// Tries to read from the eventfd, does not block if the counter is zero
pub fn read(&self) -> result::Result<u64, io::Error> {
let mut buf: u64 = 0;
let ret = unsafe {
// This is safe because we made this fd and the pointer we
// pass can not overflow because we give the syscall's size parameter properly.
read(
self.as_raw_fd(),
&mut buf as *mut u64 as *mut c_void,
mem::size_of::<u64>(),
)
};
if ret < 0 {
Err(io::Error::last_os_error())
} else {
Ok(buf)
}
}
/// Clones this EventFd, internally creating a new file descriptor. The new EventFd will share
/// the same underlying count within the kernel.
pub fn try_clone(&self) -> result::Result<EventFd, io::Error> {
// This is safe because we made this fd and properly check that it returns without error.
let ret = unsafe { dup(self.as_raw_fd()) };
if ret < 0 {
Err(io::Error::last_os_error())
} else {
// This is safe because we checked ret for success and know the kernel gave us an fd that we
// own.
Ok(EventFd {
eventfd: unsafe { File::from_raw_fd(ret) },
})
}
}
}
impl AsRawFd for EventFd {
fn as_raw_fd(&self) -> RawFd {
self.eventfd.as_raw_fd()
}
}
impl FromRawFd for EventFd {
unsafe fn from_raw_fd(fd: RawFd) -> Self {
EventFd {
eventfd: File::from_raw_fd(fd),
}
}
}
#[cfg(test)]
mod tests {
use super::*;
use libc::EFD_NONBLOCK;
#[test]
fn test_new() {
EventFd::new(EFD_NONBLOCK).unwrap();
EventFd::new(0).unwrap();
}
#[test]
fn test_read_write() {
let evt = EventFd::new(EFD_NONBLOCK).unwrap();
evt.write(55).unwrap();
assert_eq!(evt.read().unwrap(), 55);
}
#[test]
fn test_write_overflow() {
let evt = EventFd::new(EFD_NONBLOCK).unwrap();
evt.write(std::u64::MAX - 1).unwrap();
let r = evt.write(1);
match r {
Err(ref inner) if inner.kind() == io::ErrorKind::WouldBlock => (),
_ => panic!("Unexpected"),
}
}
#[test]
fn test_read_nothing() {
let evt = EventFd::new(EFD_NONBLOCK).unwrap();
let r = evt.read();
match r {
Err(ref inner) if inner.kind() == io::ErrorKind::WouldBlock => (),
_ => panic!("Unexpected"),
}
}
#[test]
fn test_clone() {
let evt = EventFd::new(EFD_NONBLOCK).unwrap();
let evt_clone = evt.try_clone().unwrap();
evt.write(923).unwrap();
assert_eq!(evt_clone.read().unwrap(), 923);
}
}

View File

@ -1,39 +0,0 @@
// Copyright 2018 The Chromium OS Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE-BSD-3-Clause file.
//
// SPDX-License-Identifier: BSD-3-Clause
use std::fs::File;
use std::io::Result;
/// A trait for flushing the contents of a file to disk.
/// This is equivalent to File's `sync_all` method, but
/// wrapped in a trait so that it can be implemented for
/// other types.
pub trait FileSync {
// Flush buffers related to this file to disk.
fn fsync(&mut self) -> Result<()>;
}
impl FileSync for File {
fn fsync(&mut self) -> Result<()> {
self.sync_all()
}
}
/// A trait for setting the size of a file.
/// This is equivalent to File's `set_len` method, but
/// wrapped in a trait so that it can be implemented for
/// other types.
pub trait FileSetLen {
// Set the size of this file.
// This is the moral equivalent of `ftruncate()`.
fn set_len(&self, _len: u64) -> Result<()>;
}
impl FileSetLen for File {
fn set_len(&self, len: u64) -> Result<()> {
File::set_len(self, len)
}
}

View File

@ -1,225 +0,0 @@
// Copyright 2019 Intel Corporation. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//
// Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//
// Portions Copyright 2017 The Chromium OS Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE-BSD-3-Clause file.
//! Macros and wrapper functions for dealing with ioctls.
use libc;
use std::os::raw::{c_int, c_uint, c_ulong, c_void};
use std::os::unix::io::AsRawFd;
/// Raw macro to declare the expression that calculates an ioctl number
#[macro_export]
macro_rules! ioctl_expr {
($dir:expr, $ty:expr, $nr:expr, $size:expr) => {
(($dir << $crate::ioctl::_IOC_DIRSHIFT)
| ($ty << $crate::ioctl::_IOC_TYPESHIFT)
| ($nr << $crate::ioctl::_IOC_NRSHIFT)
| ($size << $crate::ioctl::_IOC_SIZESHIFT)) as ::std::os::raw::c_ulong
};
}
/// Raw macro to declare a function that returns an ioctl number.
#[macro_export]
macro_rules! ioctl_ioc_nr {
($name:ident, $dir:expr, $ty:expr, $nr:expr, $size:expr) => {
#[allow(non_snake_case)]
#[allow(clippy::cast_lossless)]
pub fn $name() -> ::std::os::raw::c_ulong {
ioctl_expr!($dir, $ty, $nr, $size)
}
};
($name:ident, $dir:expr, $ty:expr, $nr:expr, $size:expr, $($v:ident),+) => {
#[allow(non_snake_case)]
#[allow(clippy::cast_lossless)]
pub fn $name($($v: ::std::os::raw::c_uint),+) -> ::std::os::raw::c_ulong {
ioctl_expr!($dir, $ty, $nr, $size)
}
};
}
/// Declare an ioctl that transfers no data.
#[macro_export]
macro_rules! ioctl_io_nr {
($name:ident, $ty:expr, $nr:expr) => {
ioctl_ioc_nr!($name, $crate::ioctl::_IOC_NONE, $ty, $nr, 0);
};
($name:ident, $ty:expr, $nr:expr, $($v:ident),+) => {
ioctl_ioc_nr!($name, $crate::ioctl::_IOC_NONE, $ty, $nr, 0, $($v),+);
};
}
/// Declare an ioctl that reads data.
#[macro_export]
macro_rules! ioctl_ior_nr {
($name:ident, $ty:expr, $nr:expr, $size:ty) => {
ioctl_ioc_nr!(
$name,
$crate::ioctl::_IOC_READ,
$ty,
$nr,
::std::mem::size_of::<$size>() as u32
);
};
($name:ident, $ty:expr, $nr:expr, $size:ty, $($v:ident),+) => {
ioctl_ioc_nr!(
$name,
$crate::ioctl::_IOC_READ,
$ty,
$nr,
::std::mem::size_of::<$size>() as u32,
$($v),+
);
};
}
/// Declare an ioctl that writes data.
#[macro_export]
macro_rules! ioctl_iow_nr {
($name:ident, $ty:expr, $nr:expr, $size:ty) => {
ioctl_ioc_nr!(
$name,
$crate::ioctl::_IOC_WRITE,
$ty,
$nr,
::std::mem::size_of::<$size>() as u32
);
};
($name:ident, $ty:expr, $nr:expr, $size:ty, $($v:ident),+) => {
ioctl_ioc_nr!(
$name,
$crate::ioctl::_IOC_WRITE,
$ty,
$nr,
::std::mem::size_of::<$size>() as u32,
$($v),+
);
};
}
/// Declare an ioctl that reads and writes data.
#[macro_export]
macro_rules! ioctl_iowr_nr {
($name:ident, $ty:expr, $nr:expr, $size:ty) => {
ioctl_ioc_nr!(
$name,
$crate::ioctl::_IOC_READ | $crate::ioctl::_IOC_WRITE,
$ty,
$nr,
::std::mem::size_of::<$size>() as u32
);
};
($name:ident, $ty:expr, $nr:expr, $size:ty, $($v:ident),+) => {
ioctl_ioc_nr!(
$name,
$crate::ioctl::_IOC_READ | $crate::ioctl::_IOC_WRITE,
$ty,
$nr,
::std::mem::size_of::<$size>() as u32,
$($v),+
);
};
}
pub const _IOC_NRBITS: c_uint = 8;
pub const _IOC_TYPEBITS: c_uint = 8;
pub const _IOC_SIZEBITS: c_uint = 14;
pub const _IOC_DIRBITS: c_uint = 2;
pub const _IOC_NRMASK: c_uint = 255;
pub const _IOC_TYPEMASK: c_uint = 255;
pub const _IOC_SIZEMASK: c_uint = 16383;
pub const _IOC_DIRMASK: c_uint = 3;
pub const _IOC_NRSHIFT: c_uint = 0;
pub const _IOC_TYPESHIFT: c_uint = 8;
pub const _IOC_SIZESHIFT: c_uint = 16;
pub const _IOC_DIRSHIFT: c_uint = 30;
pub const _IOC_NONE: c_uint = 0;
pub const _IOC_WRITE: c_uint = 1;
pub const _IOC_READ: c_uint = 2;
pub const IOC_IN: c_uint = 1_073_741_824;
pub const IOC_OUT: c_uint = 2_147_483_648;
pub const IOC_INOUT: c_uint = 3_221_225_472;
pub const IOCSIZE_MASK: c_uint = 1_073_676_288;
pub const IOCSIZE_SHIFT: c_uint = 16;
// The type of the `req` parameter is different for the `musl` library. This will enable
// successful build for other non-musl libraries.
#[cfg(target_env = "musl")]
type IoctlRequest = c_int;
#[cfg(not(target_env = "musl"))]
type IoctlRequest = c_ulong;
/// Run an ioctl with no arguments.
pub unsafe fn ioctl<F: AsRawFd>(fd: &F, req: c_ulong) -> c_int {
libc::ioctl(fd.as_raw_fd(), req as IoctlRequest, 0)
}
/// Run an ioctl with a single value argument.
pub unsafe fn ioctl_with_val<F: AsRawFd>(fd: &F, req: c_ulong, arg: c_ulong) -> c_int {
libc::ioctl(fd.as_raw_fd(), req as IoctlRequest, arg)
}
/// Run an ioctl with an immutable reference.
pub unsafe fn ioctl_with_ref<F: AsRawFd, T>(fd: &F, req: c_ulong, arg: &T) -> c_int {
libc::ioctl(
fd.as_raw_fd(),
req as IoctlRequest,
arg as *const T as *const c_void,
)
}
/// Run an ioctl with a mutable reference.
pub unsafe fn ioctl_with_mut_ref<F: AsRawFd, T>(fd: &F, req: c_ulong, arg: &mut T) -> c_int {
libc::ioctl(
fd.as_raw_fd(),
req as IoctlRequest,
arg as *mut T as *mut c_void,
)
}
/// Run an ioctl with a raw pointer.
pub unsafe fn ioctl_with_ptr<F: AsRawFd, T>(fd: &F, req: c_ulong, arg: *const T) -> c_int {
libc::ioctl(fd.as_raw_fd(), req as IoctlRequest, arg as *const c_void)
}
/// Run an ioctl with a mutable raw pointer.
pub unsafe fn ioctl_with_mut_ptr<F: AsRawFd, T>(fd: &F, req: c_ulong, arg: *mut T) -> c_int {
libc::ioctl(fd.as_raw_fd(), req as IoctlRequest, arg as *mut c_void)
}
#[cfg(test)]
mod tests {
const TUNTAP: ::std::os::raw::c_uint = 0x54;
const VHOST: ::std::os::raw::c_uint = 0xAF;
const EVDEV: ::std::os::raw::c_uint = 0x45;
const KVMIO: ::std::os::raw::c_uint = 0xAE;
ioctl_io_nr!(KVM_CREATE_VM, KVMIO, 0x01);
ioctl_ior_nr!(TUNGETFEATURES, TUNTAP, 0xcf, ::std::os::raw::c_uint);
ioctl_iow_nr!(TUNSETQUEUE, TUNTAP, 0xd9, ::std::os::raw::c_int);
ioctl_io_nr!(VHOST_SET_OWNER, VHOST, 0x01);
ioctl_iowr_nr!(VHOST_GET_VRING_BASE, VHOST, 0x12, ::std::os::raw::c_int);
ioctl_iowr_nr!(KVM_GET_MSR_INDEX_LIST, KVMIO, 0x2, ::std::os::raw::c_int);
ioctl_ior_nr!(EVIOCGBIT, EVDEV, 0x20 + evt, [u8; 128], evt);
ioctl_io_nr!(FAKE_IOCTL_2_ARG, EVDEV, 0x01 + x + y, x, y);
#[test]
fn test_ioctl_macros() {
assert_eq!(0x0000_AE01, KVM_CREATE_VM());
assert_eq!(0x0000_AF01, VHOST_SET_OWNER());
assert_eq!(0x8004_54CF, TUNGETFEATURES());
assert_eq!(0x4004_54D9, TUNSETQUEUE());
assert_eq!(0xC004_AE02, KVM_GET_MSR_INDEX_LIST());
assert_eq!(0xC004_AF12, VHOST_GET_VRING_BASE());
assert_eq!(0x8080_4522, EVIOCGBIT(2));
assert_eq!(0x0000_4509, FAKE_IOCTL_2_ARG(3, 5));
}
}

View File

@ -1,78 +0,0 @@
// Copyright 2019 Intel Corporation. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
extern crate libc;
mod tempdir;
#[macro_use]
pub mod ioctl;
pub mod errno;
pub mod eventfd;
pub mod file_traits;
pub mod seek_hole;
pub mod signal;
pub mod terminal;
pub mod timerfd;
pub mod write_zeroes;
#[macro_use]
pub mod syslog;
pub mod poll;
pub use crate::tempdir::*;
pub use errno::*;
pub use eventfd::*;
pub use poll::*;
use std::os::unix::io::AsRawFd;
pub use crate::file_traits::{FileSetLen, FileSync};
pub use crate::seek_hole::SeekHole;
pub use crate::write_zeroes::{PunchHole, WriteZeroes};
pub enum FallocateMode {
PunchHole,
ZeroRange,
}
/// Safe wrapper for `fallocate()`.
pub fn fallocate(
file: &dyn AsRawFd,
mode: FallocateMode,
keep_size: bool,
offset: u64,
len: u64,
) -> Result<()> {
let offset = if offset > libc::off64_t::max_value() as u64 {
return Err(Error::new(libc::EINVAL));
} else {
offset as libc::off64_t
};
let len = if len > libc::off64_t::max_value() as u64 {
return Err(Error::new(libc::EINVAL));
} else {
len as libc::off64_t
};
let mut mode = match mode {
FallocateMode::PunchHole => libc::FALLOC_FL_PUNCH_HOLE,
FallocateMode::ZeroRange => libc::FALLOC_FL_ZERO_RANGE,
};
if keep_size {
mode |= libc::FALLOC_FL_KEEP_SIZE;
}
// Safe since we pass in a valid fd and fallocate mode, validate offset and len,
// and check the return value.
let ret = unsafe { libc::fallocate64(file.as_raw_fd(), mode, offset, len) };
if ret < 0 {
errno_result()
} else {
Ok(())
}
}

View File

@ -1,711 +0,0 @@
// Copyright 2019 Intel Corporation. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//
// Copyright 2017 The Chromium OS Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE-BSD-3-Clause file.
use std::cell::{Cell, Ref, RefCell};
use std::cmp::min;
use std::fs::File;
use std::i32;
use std::i64;
use std::marker::PhantomData;
use std::os::unix::io::{AsRawFd, FromRawFd, IntoRawFd, RawFd};
use std::ptr::null_mut;
use std::slice;
use std::thread;
use std::time::Duration;
use libc::{
c_int, epoll_create1, epoll_ctl, epoll_event, epoll_wait, EINTR, EPOLLHUP, EPOLLIN, EPOLLOUT,
EPOLL_CLOEXEC, EPOLL_CTL_ADD, EPOLL_CTL_DEL, EPOLL_CTL_MOD,
};
use crate::{errno_result, Error, Result};
macro_rules! handle_eintr_errno {
($x:expr) => {{
let mut res;
loop {
res = $x;
if res != -1 || Error::last() != Error::new(EINTR) {
break;
}
}
res
}};
}
const POLL_CONTEXT_MAX_EVENTS: usize = 16;
/// EpollEvents wraps raw epoll_events, it should only be used with EpollContext.
pub struct EpollEvents(RefCell<[epoll_event; POLL_CONTEXT_MAX_EVENTS]>);
impl EpollEvents {
pub fn new() -> EpollEvents {
EpollEvents(RefCell::new(
[epoll_event { events: 0, u64: 0 }; POLL_CONTEXT_MAX_EVENTS],
))
}
}
impl Default for EpollEvents {
fn default() -> Self {
Self::new()
}
}
/// Trait for a token that can be associated with an `fd` in a `PollContext`.
///
/// Simple enums that have no or primitive variant data can use the `#[derive(PollToken)]`
/// custom derive to implement this trait.
pub trait PollToken {
/// Converts this token into a u64 that can be turned back into a token via `from_raw_token`.
fn as_raw_token(&self) -> u64;
/// Converts a raw token as returned from `as_raw_token` back into a token.
///
/// It is invalid to give a raw token that was not returned via `as_raw_token` from the same
/// `Self`. The implementation can expect that this will never happen as a result of its usage
/// in `PollContext`.
fn from_raw_token(data: u64) -> Self;
}
impl PollToken for usize {
fn as_raw_token(&self) -> u64 {
*self as u64
}
fn from_raw_token(data: u64) -> Self {
data as Self
}
}
impl PollToken for u64 {
fn as_raw_token(&self) -> u64 {
*self as u64
}
fn from_raw_token(data: u64) -> Self {
data as Self
}
}
impl PollToken for u32 {
fn as_raw_token(&self) -> u64 {
u64::from(*self)
}
fn from_raw_token(data: u64) -> Self {
data as Self
}
}
impl PollToken for u16 {
fn as_raw_token(&self) -> u64 {
u64::from(*self)
}
fn from_raw_token(data: u64) -> Self {
data as Self
}
}
impl PollToken for u8 {
fn as_raw_token(&self) -> u64 {
u64::from(*self)
}
fn from_raw_token(data: u64) -> Self {
data as Self
}
}
impl PollToken for () {
fn as_raw_token(&self) -> u64 {
0
}
fn from_raw_token(_data: u64) -> Self {}
}
/// An event returned by `PollContext::wait`.
pub struct PollEvent<'a, T> {
event: &'a epoll_event,
token: PhantomData<T>, // Needed to satisfy usage of T
}
impl<'a, T: PollToken> PollEvent<'a, T> {
/// Gets the token associated in `PollContext::add` with this event.
pub fn token(&self) -> T {
T::from_raw_token(self.event.u64)
}
/// True if the `fd` associated with this token in `PollContext::add` is readable.
pub fn readable(&self) -> bool {
self.event.events & (EPOLLIN as u32) != 0
}
/// True if the `fd` associated with this token in `PollContext::add` has been hungup on.
pub fn hungup(&self) -> bool {
self.event.events & (EPOLLHUP as u32) != 0
}
}
/// An iterator over some (sub)set of events returned by `PollContext::wait`.
pub struct PollEventIter<'a, I, T>
where
I: Iterator<Item = &'a epoll_event>,
{
mask: u32,
iter: I,
tokens: PhantomData<[T]>, // Needed to satisfy usage of T
}
impl<'a, I, T> Iterator for PollEventIter<'a, I, T>
where
I: Iterator<Item = &'a epoll_event>,
T: PollToken,
{
type Item = PollEvent<'a, T>;
fn next(&mut self) -> Option<Self::Item> {
let mask = self.mask;
self.iter
.find(|event| (event.events & mask) != 0)
.map(|event| PollEvent {
event,
token: PhantomData,
})
}
}
/// The list of event returned by `PollContext::wait`.
pub struct PollEvents<'a, T> {
count: usize,
events: Ref<'a, [epoll_event; POLL_CONTEXT_MAX_EVENTS]>,
tokens: PhantomData<[T]>, // Needed to satisfy usage of T
}
impl<'a, T: PollToken> PollEvents<'a, T> {
/// Copies the events to an owned structure so the reference to this (and by extension
/// `PollContext`) can be dropped.
pub fn to_owned(&self) -> PollEventsOwned<T> {
PollEventsOwned {
count: self.count,
events: RefCell::new(*self.events),
tokens: PhantomData,
}
}
/// Iterates over each event.
pub fn iter(&self) -> PollEventIter<slice::Iter<epoll_event>, T> {
PollEventIter {
mask: 0xffff_ffff,
iter: self.events[..self.count].iter(),
tokens: PhantomData,
}
}
/// Iterates over each readable event.
pub fn iter_readable(&self) -> PollEventIter<slice::Iter<epoll_event>, T> {
PollEventIter {
mask: EPOLLIN as u32,
iter: self.events[..self.count].iter(),
tokens: PhantomData,
}
}
/// Iterates over each hungup event.
pub fn iter_hungup(&self) -> PollEventIter<slice::Iter<epoll_event>, T> {
PollEventIter {
mask: EPOLLHUP as u32,
iter: self.events[..self.count].iter(),
tokens: PhantomData,
}
}
}
/// A deep copy of the event records from `PollEvents`.
pub struct PollEventsOwned<T> {
count: usize,
events: RefCell<[epoll_event; POLL_CONTEXT_MAX_EVENTS]>,
tokens: PhantomData<T>, // Needed to satisfy usage of T
}
impl<T: PollToken> PollEventsOwned<T> {
/// Takes a reference to the events so that they can be iterated via methods in `PollEvents`.
pub fn as_ref(&self) -> PollEvents<T> {
PollEvents {
count: self.count,
events: self.events.borrow(),
tokens: PhantomData,
}
}
}
/// Watching events taken by PollContext.
pub struct WatchingEvents(u32);
impl WatchingEvents {
/// Returns empty Events.
#[inline(always)]
pub fn empty() -> WatchingEvents {
WatchingEvents(0)
}
/// Build Events from raw epoll events (defined in epoll_ctl(2)).
#[inline(always)]
pub fn new(raw: u32) -> WatchingEvents {
WatchingEvents(raw)
}
/// Set read events.
#[inline(always)]
pub fn set_read(self) -> WatchingEvents {
WatchingEvents(self.0 | EPOLLIN as u32)
}
/// Set write events.
#[inline(always)]
pub fn set_write(self) -> WatchingEvents {
WatchingEvents(self.0 | EPOLLOUT as u32)
}
/// Get the underlying epoll events.
pub fn get_raw(&self) -> u32 {
self.0
}
}
/// EpollContext wraps linux epoll. It provides similar interface to PollContext.
/// It is thread safe while PollContext is not. It requires user to pass in a reference of
/// EpollEvents while PollContext does not. Always use PollContext if you don't need to access the
/// same epoll from different threads.
pub struct EpollContext<T> {
epoll_ctx: File,
// Needed to satisfy usage of T
tokens: PhantomData<[T]>,
}
impl<T: PollToken> EpollContext<T> {
/// Creates a new `EpollContext`.
pub fn new() -> Result<EpollContext<T>> {
// Safe because we check the return value.
let epoll_fd = unsafe { epoll_create1(EPOLL_CLOEXEC) };
if epoll_fd < 0 {
return errno_result();
}
Ok(EpollContext {
epoll_ctx: unsafe { File::from_raw_fd(epoll_fd) },
tokens: PhantomData,
})
}
/// Adds the given `fd` to this context and associates the given `token` with the `fd`'s
/// readable events.
///
/// A `fd` can only be added once and does not need to be kept open. If the `fd` is dropped and
/// there were no duplicated file descriptors (i.e. adding the same descriptor with a different
/// FD number) added to this context, events will not be reported by `wait` anymore.
pub fn add(&self, fd: &AsRawFd, token: T) -> Result<()> {
self.add_fd_with_events(fd, WatchingEvents::empty().set_read(), token)
}
/// Adds the given `fd` to this context, watching for the specified events and associates the
/// given 'token' with those events.
///
/// A `fd` can only be added once and does not need to be kept open. If the `fd` is dropped and
/// there were no duplicated file descriptors (i.e. adding the same descriptor with a different
/// FD number) added to this context, events will not be reported by `wait` anymore.
pub fn add_fd_with_events(&self, fd: &AsRawFd, events: WatchingEvents, token: T) -> Result<()> {
let mut evt = epoll_event {
events: events.get_raw(),
u64: token.as_raw_token(),
};
// Safe because we give a valid epoll FD and FD to watch, as well as a valid epoll_event
// structure. Then we check the return value.
let ret = unsafe {
epoll_ctl(
self.epoll_ctx.as_raw_fd(),
EPOLL_CTL_ADD,
fd.as_raw_fd(),
&mut evt,
)
};
if ret < 0 {
return errno_result();
};
Ok(())
}
/// If `fd` was previously added to this context, the watched events will be replaced with
/// `events` and the token associated with it will be replaced with the given `token`.
pub fn modify(&self, fd: &AsRawFd, events: WatchingEvents, token: T) -> Result<()> {
let mut evt = epoll_event {
events: events.0,
u64: token.as_raw_token(),
};
// Safe because we give a valid epoll FD and FD to modify, as well as a valid epoll_event
// structure. Then we check the return value.
let ret = unsafe {
epoll_ctl(
self.epoll_ctx.as_raw_fd(),
EPOLL_CTL_MOD,
fd.as_raw_fd(),
&mut evt,
)
};
if ret < 0 {
return errno_result();
};
Ok(())
}
/// Deletes the given `fd` from this context.
///
/// If an `fd`'s token shows up in the list of hangup events, it should be removed using this
/// method or by closing/dropping (if and only if the fd was never dup()'d/fork()'d) the `fd`.
/// Failure to do so will cause the `wait` method to always return immediately, causing ~100%
/// CPU load.
pub fn delete(&self, fd: &AsRawFd) -> Result<()> {
// Safe because we give a valid epoll FD and FD to stop watching. Then we check the return
// value.
let ret = unsafe {
epoll_ctl(
self.epoll_ctx.as_raw_fd(),
EPOLL_CTL_DEL,
fd.as_raw_fd(),
null_mut(),
)
};
if ret < 0 {
return errno_result();
};
Ok(())
}
/// Waits for any events to occur in FDs that were previously added to this context.
///
/// The events are level-triggered, meaning that if any events are unhandled (i.e. not reading
/// for readable events and not closing for hungup events), subsequent calls to `wait` will
/// return immediately. The consequence of not handling an event perpetually while calling
/// `wait` is that the callers loop will degenerated to busy loop polling, pinning a CPU to
/// ~100% usage.
pub fn wait<'a>(&self, events: &'a EpollEvents) -> Result<PollEvents<'a, T>> {
self.wait_timeout(events, Duration::new(i64::MAX as u64, 0))
}
/// Like `wait` except will only block for a maximum of the given `timeout`.
///
/// This may return earlier than `timeout` with zero events if the duration indicated exceeds
/// system limits.
pub fn wait_timeout<'a>(
&self,
events: &'a EpollEvents,
timeout: Duration,
) -> Result<PollEvents<'a, T>> {
let timeout_millis = if timeout.as_secs() as i64 == i64::max_value() {
// We make the convenient assumption that 2^63 seconds is an effectively unbounded time
// frame. This is meant to mesh with `wait` calling us with no timeout.
-1
} else {
// In cases where we the number of milliseconds would overflow an i32, we substitute the
// maximum timeout which is ~24.8 days.
let millis = timeout
.as_secs()
.checked_mul(1_000)
.and_then(|ms| ms.checked_add(u64::from(timeout.subsec_nanos()) / 1_000_000))
.unwrap_or(i32::max_value() as u64);
min(i32::max_value() as u64, millis) as i32
};
let ret = {
let mut epoll_events = events.0.borrow_mut();
let max_events = epoll_events.len() as c_int;
// Safe because we give an epoll context and a properly sized epoll_events array
// pointer, which we trust the kernel to fill in properly.
unsafe {
handle_eintr_errno!(epoll_wait(
self.epoll_ctx.as_raw_fd(),
&mut epoll_events[0],
max_events,
timeout_millis
))
}
};
if ret < 0 {
return errno_result();
}
let epoll_events = events.0.borrow();
let events = PollEvents {
count: ret as usize,
events: epoll_events,
tokens: PhantomData,
};
Ok(events)
}
}
impl<T: PollToken> AsRawFd for EpollContext<T> {
fn as_raw_fd(&self) -> RawFd {
self.epoll_ctx.as_raw_fd()
}
}
impl<T: PollToken> IntoRawFd for EpollContext<T> {
fn into_raw_fd(self) -> RawFd {
self.epoll_ctx.into_raw_fd()
}
}
/// Used to poll multiple objects that have file descriptors.
///
/// # Example
///
/// ```
/// # use vmm_sys_util::{Result, EventFd, PollContext, PollEvents};
/// # fn test() -> Result<()> {
/// let evt1 = EventFd::new(0)?;
/// let evt2 = EventFd::new(0)?;
/// evt2.write(1)?;
///
/// let ctx: PollContext<u32> = PollContext::new()?;
/// ctx.add(&evt1, 1)?;
/// ctx.add(&evt2, 2)?;
///
/// let pollevents: PollEvents<u32> = ctx.wait()?;
/// let tokens: Vec<u32> = pollevents.iter_readable().map(|e| e.token()).collect();
/// assert_eq!(&tokens[..], &[2]);
/// # Ok(())
/// # }
/// ```
pub struct PollContext<T> {
epoll_ctx: EpollContext<T>,
// We use a RefCell here so that the `wait` method only requires an immutable self reference
// while returning the events (encapsulated by PollEvents). Without the RefCell, `wait` would
// hold a mutable reference that lives as long as its returned reference (i.e. the PollEvents),
// even though that reference is immutable. This is terribly inconvenient for the caller because
// the borrow checking would prevent them from using `delete` and `add` while the events are in
// scope.
events: EpollEvents,
// Hangup busy loop detection variables. See `check_for_hungup_busy_loop`.
hangups: Cell<usize>,
max_hangups: Cell<usize>,
}
impl<T: PollToken> PollContext<T> {
/// Creates a new `PollContext`.
pub fn new() -> Result<PollContext<T>> {
Ok(PollContext {
epoll_ctx: EpollContext::new()?,
events: EpollEvents::new(),
hangups: Cell::new(0),
max_hangups: Cell::new(0),
})
}
/// Adds the given `fd` to this context and associates the given `token` with the `fd`'s
/// readable events.
///
/// A `fd` can only be added once and does not need to be kept open. If the `fd` is dropped and
/// there were no duplicated file descriptors (i.e. adding the same descriptor with a different
/// FD number) added to this context, events will not be reported by `wait` anymore.
pub fn add(&self, fd: &AsRawFd, token: T) -> Result<()> {
self.add_fd_with_events(fd, WatchingEvents::empty().set_read(), token)
}
/// Adds the given `fd` to this context, watching for the specified events and associates the
/// given 'token' with those events.
///
/// A `fd` can only be added once and does not need to be kept open. If the `fd` is dropped and
/// there were no duplicated file descriptors (i.e. adding the same descriptor with a different
/// FD number) added to this context, events will not be reported by `wait` anymore.
pub fn add_fd_with_events(&self, fd: &AsRawFd, events: WatchingEvents, token: T) -> Result<()> {
self.epoll_ctx.add_fd_with_events(fd, events, token)?;
self.hangups.set(0);
self.max_hangups.set(self.max_hangups.get() + 1);
Ok(())
}
/// If `fd` was previously added to this context, the watched events will be replaced with
/// `events` and the token associated with it will be replaced with the given `token`.
pub fn modify(&self, fd: &AsRawFd, events: WatchingEvents, token: T) -> Result<()> {
self.epoll_ctx.modify(fd, events, token)
}
/// Deletes the given `fd` from this context.
///
/// If an `fd`'s token shows up in the list of hangup events, it should be removed using this
/// method or by closing/dropping (if and only if the fd was never dup()'d/fork()'d) the `fd`.
/// Failure to do so will cause the `wait` method to always return immediately, causing ~100%
/// CPU load.
pub fn delete(&self, fd: &AsRawFd) -> Result<()> {
self.epoll_ctx.delete(fd)?;
self.hangups.set(0);
self.max_hangups.set(self.max_hangups.get() - 1);
Ok(())
}
// This method determines if the the user of wait is misusing the `PollContext` by leaving FDs
// in this `PollContext` that have been shutdown or hungup on. Such an FD will cause `wait` to
// return instantly with a hungup event. If that FD is perpetually left in this context, a busy
// loop burning ~100% of one CPU will silently occur with no human visible malfunction.
//
// How do we know if the client of this context is ignoring hangups? A naive implementation
// would trigger if consecutive wait calls yield hangup events, but there are legitimate cases
// for this, such as two distinct sockets becoming hungup across two consecutive wait calls. A
// smarter implementation would only trigger if `delete` wasn't called between waits that
// yielded hangups. Sadly `delete` isn't the only way to remove an FD from this context. The
// other way is for the client to close the hungup FD, which automatically removes it from this
// context. Assuming that the client always uses close, this implementation would too eagerly
// trigger.
//
// The implementation used here keeps an upper bound of FDs in this context using a counter
// hooked into add/delete (which is imprecise because close can also remove FDs without us
// knowing). The number of consecutive (no add or delete in between) hangups yielded by wait
// calls is counted and compared to the upper bound. If the upper bound is exceeded by the
// consecutive hangups, the implementation triggers the check and logs.
//
// This implementation has false negatives because the upper bound can be completely too high,
// in the worst case caused by only using close instead of delete. However, this method has the
// advantage of always triggering eventually genuine busy loop cases, requires no dynamic
// allocations, is fast and constant time to compute, and has no false positives.
fn check_for_hungup_busy_loop(&self, new_hangups: usize) {
let old_hangups = self.hangups.get();
let max_hangups = self.max_hangups.get();
if old_hangups <= max_hangups && old_hangups + new_hangups > max_hangups {
warn!(
"busy poll wait loop with hungup FDs detected on thread {}",
thread::current().name().unwrap_or("")
);
// This panic is helpful for tests of this functionality.
#[cfg(test)]
panic!("hungup busy loop detected");
}
self.hangups.set(old_hangups + new_hangups);
}
/// Waits for any events to occur in FDs that were previously added to this context.
///
/// The events are level-triggered, meaning that if any events are unhandled (i.e. not reading
/// for readable events and not closing for hungup events), subsequent calls to `wait` will
/// return immediately. The consequence of not handling an event perpetually while calling
/// `wait` is that the callers loop will degenerated to busy loop polling, pinning a CPU to
/// ~100% usage.
///
/// # Panics
/// Panics if the returned `PollEvents` structure is not dropped before subsequent `wait` calls.
pub fn wait(&self) -> Result<PollEvents<T>> {
self.wait_timeout(Duration::new(i64::MAX as u64, 0))
}
/// Like `wait` except will only block for a maximum of the given `timeout`.
///
/// This may return earlier than `timeout` with zero events if the duration indicated exceeds
/// system limits.
pub fn wait_timeout(&self, timeout: Duration) -> Result<PollEvents<T>> {
let events = self.epoll_ctx.wait_timeout(&self.events, timeout)?;
let hangups = events.iter_hungup().count();
self.check_for_hungup_busy_loop(hangups);
Ok(events)
}
}
impl<T: PollToken> AsRawFd for PollContext<T> {
fn as_raw_fd(&self) -> RawFd {
self.epoll_ctx.as_raw_fd()
}
}
impl<T: PollToken> IntoRawFd for PollContext<T> {
fn into_raw_fd(self) -> RawFd {
self.epoll_ctx.into_raw_fd()
}
}
#[cfg(test)]
mod tests {
use super::*;
use crate::eventfd::EventFd;
use std::os::unix::net::UnixStream;
use std::time::Instant;
#[test]
fn test_poll_context() {
let evt1 = EventFd::new(0).unwrap();
let evt2 = EventFd::new(0).unwrap();
evt1.write(1).unwrap();
evt2.write(1).unwrap();
let ctx: PollContext<u32> = PollContext::new().unwrap();
ctx.add(&evt1, 1).unwrap();
ctx.add(&evt2, 2).unwrap();
let mut evt_count = 0;
while evt_count < 2 {
for event in ctx.wait().unwrap().iter_readable() {
evt_count += 1;
match event.token() {
1 => {
evt1.read().unwrap();
ctx.delete(&evt1).unwrap();
}
2 => {
evt2.read().unwrap();
ctx.delete(&evt2).unwrap();
}
_ => panic!("unexpected token"),
};
}
}
assert_eq!(evt_count, 2);
}
#[test]
fn test_poll_context_overflow() {
const EVT_COUNT: usize = POLL_CONTEXT_MAX_EVENTS * 2 + 1;
let ctx: PollContext<usize> = PollContext::new().unwrap();
let mut evts = Vec::with_capacity(EVT_COUNT);
for i in 0..EVT_COUNT {
let evt = EventFd::new(0).unwrap();
evt.write(1).unwrap();
ctx.add(&evt, i).unwrap();
evts.push(evt);
}
let mut evt_count = 0;
while evt_count < EVT_COUNT {
for event in ctx.wait().unwrap().iter_readable() {
evts[event.token()].read().unwrap();
evt_count += 1;
}
}
}
#[test]
#[should_panic]
fn test_poll_context_hungup() {
let (s1, s2) = UnixStream::pair().unwrap();
let ctx: PollContext<u32> = PollContext::new().unwrap();
ctx.add(&s1, 1).unwrap();
// Causes s1 to receive hangup events, which we purposefully ignore to trip the detection
// logic in `PollContext`.
drop(s2);
// Should easily panic within this many iterations.
for _ in 0..1000 {
ctx.wait().unwrap();
}
}
#[test]
fn test_poll_context_timeout() {
let ctx: PollContext<u32> = PollContext::new().unwrap();
let dur = Duration::from_millis(10);
let start_inst = Instant::now();
ctx.wait_timeout(dur).unwrap();
assert!(start_inst.elapsed() >= dur);
}
}

View File

@ -1,215 +0,0 @@
// Copyright 2018 The Chromium OS Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE-BSD-3-Clause file.
//
// SPDX-License-Identifier: BSD-3-Clause
use std::fs::File;
use std::io::{Error, Result};
use std::os::unix::io::AsRawFd;
#[cfg(target_env = "musl")]
use libc::{c_int, lseek64, ENXIO};
#[cfg(target_env = "gnu")]
use libc::{lseek64, ENXIO, SEEK_DATA, SEEK_HOLE};
/// A trait for seeking to the next hole or non-hole position in a file.
pub trait SeekHole {
/// Seek to the first hole in a file at a position greater than or equal to `offset`.
/// If no holes exist after `offset`, the seek position will be set to the end of the file.
/// If `offset` is at or after the end of the file, the seek position is unchanged, and None is returned.
/// Returns the current seek position after the seek or an error.
fn seek_hole(&mut self, offset: u64) -> Result<Option<u64>>;
/// Seek to the first data in a file at a position greater than or equal to `offset`.
/// If no data exists after `offset`, the seek position is unchanged, and None is returned.
/// Returns the current offset after the seek or an error.
fn seek_data(&mut self, offset: u64) -> Result<Option<u64>>;
}
#[cfg(target_env = "musl")]
pub const SEEK_DATA: c_int = 3;
#[cfg(target_env = "musl")]
pub const SEEK_HOLE: c_int = 4;
/// Safe wrapper for `libc::lseek64()`
fn lseek(file: &mut File, offset: i64, whence: i32) -> Result<Option<u64>> {
// This is safe because we pass a known-good file descriptor.
let res = unsafe { lseek64(file.as_raw_fd(), offset, whence) };
if res < 0 {
// Convert ENXIO into None; pass any other error as-is.
let err = Error::last_os_error();
if let Some(errno) = Error::raw_os_error(&err) {
if errno == ENXIO {
return Ok(None);
}
}
Err(err)
} else {
Ok(Some(res as u64))
}
}
impl SeekHole for File {
fn seek_hole(&mut self, offset: u64) -> Result<Option<u64>> {
lseek(self, offset as i64, SEEK_HOLE)
}
fn seek_data(&mut self, offset: u64) -> Result<Option<u64>> {
lseek(self, offset as i64, SEEK_DATA)
}
}
#[cfg(test)]
mod tests {
use super::*;
use crate::TempDir;
use std::fs::File;
use std::io::{Seek, SeekFrom, Write};
use std::path::PathBuf;
fn seek_cur(file: &mut File) -> u64 {
file.seek(SeekFrom::Current(0)).unwrap()
}
#[test]
fn seek_data() {
let tempdir = TempDir::new("/tmp/seek_data_test").unwrap();
let mut path = PathBuf::from(tempdir.as_path().unwrap());
path.push("test_file");
let mut file = File::create(&path).unwrap();
// Empty file
assert_eq!(file.seek_data(0).unwrap(), None);
assert_eq!(seek_cur(&mut file), 0);
// File with non-zero length consisting entirely of a hole
file.set_len(0x10000).unwrap();
assert_eq!(file.seek_data(0).unwrap(), None);
assert_eq!(seek_cur(&mut file), 0);
// seek_data at or after the end of the file should return None
assert_eq!(file.seek_data(0x10000).unwrap(), None);
assert_eq!(seek_cur(&mut file), 0);
assert_eq!(file.seek_data(0x10001).unwrap(), None);
assert_eq!(seek_cur(&mut file), 0);
// Write some data to [0x10000, 0x20000)
let b = [0x55u8; 0x10000];
file.seek(SeekFrom::Start(0x10000)).unwrap();
file.write_all(&b).unwrap();
assert_eq!(file.seek_data(0).unwrap(), Some(0x10000));
assert_eq!(seek_cur(&mut file), 0x10000);
// seek_data within data should return the same offset
assert_eq!(file.seek_data(0x10000).unwrap(), Some(0x10000));
assert_eq!(seek_cur(&mut file), 0x10000);
assert_eq!(file.seek_data(0x10001).unwrap(), Some(0x10001));
assert_eq!(seek_cur(&mut file), 0x10001);
assert_eq!(file.seek_data(0x1FFFF).unwrap(), Some(0x1FFFF));
assert_eq!(seek_cur(&mut file), 0x1FFFF);
// Extend the file to add another hole after the data
file.set_len(0x30000).unwrap();
assert_eq!(file.seek_data(0).unwrap(), Some(0x10000));
assert_eq!(seek_cur(&mut file), 0x10000);
assert_eq!(file.seek_data(0x1FFFF).unwrap(), Some(0x1FFFF));
assert_eq!(seek_cur(&mut file), 0x1FFFF);
assert_eq!(file.seek_data(0x20000).unwrap(), None);
assert_eq!(seek_cur(&mut file), 0x1FFFF);
}
#[test]
#[allow(clippy::cyclomatic_complexity)]
fn seek_hole() {
let tempdir = TempDir::new("/tmp/seek_hole_test").unwrap();
let mut path = PathBuf::from(tempdir.as_path().unwrap());
path.push("test_file");
let mut file = File::create(&path).unwrap();
// Empty file
assert_eq!(file.seek_hole(0).unwrap(), None);
assert_eq!(seek_cur(&mut file), 0);
// File with non-zero length consisting entirely of a hole
file.set_len(0x10000).unwrap();
assert_eq!(file.seek_hole(0).unwrap(), Some(0));
assert_eq!(seek_cur(&mut file), 0);
assert_eq!(file.seek_hole(0xFFFF).unwrap(), Some(0xFFFF));
assert_eq!(seek_cur(&mut file), 0xFFFF);
// seek_hole at or after the end of the file should return None
file.seek(SeekFrom::Start(0)).unwrap();
assert_eq!(file.seek_hole(0x10000).unwrap(), None);
assert_eq!(seek_cur(&mut file), 0);
assert_eq!(file.seek_hole(0x10001).unwrap(), None);
assert_eq!(seek_cur(&mut file), 0);
// Write some data to [0x10000, 0x20000)
let b = [0x55u8; 0x10000];
file.seek(SeekFrom::Start(0x10000)).unwrap();
file.write_all(&b).unwrap();
// seek_hole within a hole should return the same offset
assert_eq!(file.seek_hole(0).unwrap(), Some(0));
assert_eq!(seek_cur(&mut file), 0);
assert_eq!(file.seek_hole(0xFFFF).unwrap(), Some(0xFFFF));
assert_eq!(seek_cur(&mut file), 0xFFFF);
// seek_hole within data should return the next hole (EOF)
file.seek(SeekFrom::Start(0)).unwrap();
assert_eq!(file.seek_hole(0x10000).unwrap(), Some(0x20000));
assert_eq!(seek_cur(&mut file), 0x20000);
file.seek(SeekFrom::Start(0)).unwrap();
assert_eq!(file.seek_hole(0x10001).unwrap(), Some(0x20000));
assert_eq!(seek_cur(&mut file), 0x20000);
file.seek(SeekFrom::Start(0)).unwrap();
assert_eq!(file.seek_hole(0x1FFFF).unwrap(), Some(0x20000));
assert_eq!(seek_cur(&mut file), 0x20000);
// seek_hole at EOF after data should return None
file.seek(SeekFrom::Start(0)).unwrap();
assert_eq!(file.seek_hole(0x20000).unwrap(), None);
assert_eq!(seek_cur(&mut file), 0);
// Extend the file to add another hole after the data
file.set_len(0x30000).unwrap();
assert_eq!(file.seek_hole(0).unwrap(), Some(0));
assert_eq!(seek_cur(&mut file), 0);
assert_eq!(file.seek_hole(0xFFFF).unwrap(), Some(0xFFFF));
assert_eq!(seek_cur(&mut file), 0xFFFF);
file.seek(SeekFrom::Start(0)).unwrap();
assert_eq!(file.seek_hole(0x10000).unwrap(), Some(0x20000));
assert_eq!(seek_cur(&mut file), 0x20000);
file.seek(SeekFrom::Start(0)).unwrap();
assert_eq!(file.seek_hole(0x1FFFF).unwrap(), Some(0x20000));
assert_eq!(seek_cur(&mut file), 0x20000);
file.seek(SeekFrom::Start(0)).unwrap();
assert_eq!(file.seek_hole(0x20000).unwrap(), Some(0x20000));
assert_eq!(seek_cur(&mut file), 0x20000);
file.seek(SeekFrom::Start(0)).unwrap();
assert_eq!(file.seek_hole(0x20001).unwrap(), Some(0x20001));
assert_eq!(seek_cur(&mut file), 0x20001);
// seek_hole at EOF after a hole should return None
file.seek(SeekFrom::Start(0)).unwrap();
assert_eq!(file.seek_hole(0x30000).unwrap(), None);
assert_eq!(seek_cur(&mut file), 0);
// Write some data to [0x20000, 0x30000)
file.seek(SeekFrom::Start(0x20000)).unwrap();
file.write_all(&b).unwrap();
// seek_hole within [0x20000, 0x30000) should now find the hole at EOF
assert_eq!(file.seek_hole(0x20000).unwrap(), Some(0x30000));
assert_eq!(seek_cur(&mut file), 0x30000);
file.seek(SeekFrom::Start(0)).unwrap();
assert_eq!(file.seek_hole(0x20001).unwrap(), Some(0x30000));
assert_eq!(seek_cur(&mut file), 0x30000);
file.seek(SeekFrom::Start(0)).unwrap();
assert_eq!(file.seek_hole(0x30000).unwrap(), None);
assert_eq!(seek_cur(&mut file), 0);
}
}

View File

@ -1,423 +0,0 @@
// Copyright 2019 Intel Corporation. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//
// Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//
// Portions Copyright 2017 The Chromium OS Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE-BSD-3-Clause file.
use libc::{
c_int, c_void, pthread_kill, pthread_sigmask, pthread_t, sigaction, sigaddset, sigemptyset,
siginfo_t, sigismember, sigpending, sigset_t, sigtimedwait, timespec, EAGAIN, EINTR, EINVAL,
SIGHUP, SIGSYS, SIG_BLOCK, SIG_UNBLOCK,
};
use errno;
use std::fmt::{self, Display};
use std::io;
use std::mem;
use std::os::unix::thread::JoinHandleExt;
use std::ptr::{null, null_mut};
use std::result;
use std::thread::JoinHandle;
#[derive(Debug)]
pub enum Error {
/// Couldn't create a sigset.
CreateSigset(errno::Error),
/// The wrapped signal has already been blocked.
SignalAlreadyBlocked(c_int),
/// Failed to check if the requested signal is in the blocked set already.
CompareBlockedSignals(errno::Error),
/// The signal could not be blocked.
BlockSignal(errno::Error),
/// The signal mask could not be retrieved.
RetrieveSignalMask(i32),
/// The signal could not be unblocked.
UnblockSignal(errno::Error),
/// Failed to wait for given signal.
ClearWaitPending(errno::Error),
/// Failed to get pending signals.
ClearGetPending(errno::Error),
/// Failed to check if given signal is in the set of pending signals.
ClearCheckPending(errno::Error),
}
impl Display for Error {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
use self::Error::*;
match self {
CreateSigset(e) => write!(f, "couldn't create a sigset: {}", e),
SignalAlreadyBlocked(num) => write!(f, "signal {} already blocked", num),
CompareBlockedSignals(e) => write!(
f,
"failed to check whether requested signal is in the blocked set: {}",
e,
),
BlockSignal(e) => write!(f, "signal could not be blocked: {}", e),
RetrieveSignalMask(errno) => write!(
f,
"failed to retrieve signal mask: {}",
io::Error::from_raw_os_error(*errno),
),
UnblockSignal(e) => write!(f, "signal could not be unblocked: {}", e),
ClearWaitPending(e) => write!(f, "failed to wait for given signal: {}", e),
ClearGetPending(e) => write!(f, "failed to get pending signals: {}", e),
ClearCheckPending(e) => write!(
f,
"failed to check whether given signal is in the pending set: {}",
e,
),
}
}
}
pub type SignalResult<T> = result::Result<T, Error>;
type SiginfoHandler = extern "C" fn(num: c_int, info: *mut siginfo_t, _unused: *mut c_void) -> ();
pub enum SignalHandler {
Siginfo(SiginfoHandler),
// TODO add a`SimpleHandler` when `libc` adds `sa_handler` support to `sigaction`.
}
impl SignalHandler {
fn set_flags(act: &mut sigaction, flag: c_int) {
act.sa_flags = flag;
}
}
/// Fills a `sigaction` structure from of the signal handler.
/// Refer to http://man7.org/linux/man-pages/man7/signal.7.html
impl Into<sigaction> for SignalHandler {
fn into(self) -> sigaction {
let mut act: sigaction = unsafe { mem::zeroed() };
match self {
SignalHandler::Siginfo(function) => {
act.sa_sigaction = function as *const () as usize;
}
}
act
}
}
extern "C" {
fn __libc_current_sigrtmin() -> c_int;
fn __libc_current_sigrtmax() -> c_int;
}
/// Returns the minimum (inclusive) real-time signal number.
#[allow(non_snake_case)]
fn SIGRTMIN() -> c_int {
unsafe { __libc_current_sigrtmin() }
}
/// Returns the maximum (inclusive) real-time signal number.
#[allow(non_snake_case)]
fn SIGRTMAX() -> c_int {
unsafe { __libc_current_sigrtmax() }
}
/// Verifies that a signal number is valid: for VCPU signals, it needs to be enclosed within the OS
/// limits for realtime signals, and the remaining ones need to be between the minimum (SIGHUP) and
/// maximum (SIGSYS) values.
pub fn validate_signal_num(num: c_int, for_vcpu: bool) -> errno::Result<c_int> {
if for_vcpu {
let actual_num = num + SIGRTMIN();
if actual_num <= SIGRTMAX() {
return Ok(actual_num);
}
} else if SIGHUP <= num && num <= SIGSYS {
return Ok(num);
}
Err(errno::Error::new(EINVAL))
}
/// Registers `handler` as the signal handler of signum `num`.
///
/// Uses `sigaction` to register the handler.
///
/// This is considered unsafe because the given handler will be called asynchronously, interrupting
/// whatever the thread was doing and therefore must only do async-signal-safe operations.
/// flags: SA_SIGINFO or SA_RESTART if wants to restart after signal received.
pub unsafe fn register_signal_handler(
num: i32,
handler: SignalHandler,
for_vcpu: bool,
flag: c_int,
) -> errno::Result<()> {
let num = validate_signal_num(num, for_vcpu)?;
let mut act: sigaction = handler.into();
SignalHandler::set_flags(&mut act, flag);
match sigaction(num, &act, null_mut()) {
0 => Ok(()),
_ => errno::errno_result(),
}
}
/// Creates `sigset` from an array of signal numbers.
///
/// This is a helper function used when we want to manipulate signals.
pub fn create_sigset(signals: &[c_int]) -> errno::Result<sigset_t> {
// sigset will actually be initialized by sigemptyset below.
let mut sigset: sigset_t = unsafe { mem::zeroed() };
// Safe - return value is checked.
let ret = unsafe { sigemptyset(&mut sigset) };
if ret < 0 {
return errno::errno_result();
}
for signal in signals {
// Safe - return value is checked.
let ret = unsafe { sigaddset(&mut sigset, *signal) };
if ret < 0 {
return errno::errno_result();
}
}
Ok(sigset)
}
/// Retrieves the signal mask of the current thread as a vector of c_ints.
pub fn get_blocked_signals() -> SignalResult<Vec<c_int>> {
let mut mask = Vec::new();
// Safe - return values are checked.
unsafe {
let mut old_sigset: sigset_t = mem::zeroed();
let ret = pthread_sigmask(SIG_BLOCK, null(), &mut old_sigset as *mut sigset_t);
if ret < 0 {
return Err(Error::RetrieveSignalMask(ret));
}
for num in 0..=SIGRTMAX() {
if sigismember(&old_sigset, num) > 0 {
mask.push(num);
}
}
}
Ok(mask)
}
/// Masks given signal.
///
/// If signal is already blocked the call will fail with Error::SignalAlreadyBlocked
/// result.
pub fn block_signal(num: c_int) -> SignalResult<()> {
let sigset = create_sigset(&[num]).map_err(Error::CreateSigset)?;
// Safe - return values are checked.
unsafe {
let mut old_sigset: sigset_t = mem::zeroed();
let ret = pthread_sigmask(SIG_BLOCK, &sigset, &mut old_sigset as *mut sigset_t);
if ret < 0 {
return Err(Error::BlockSignal(errno::Error::last()));
}
let ret = sigismember(&old_sigset, num);
if ret < 0 {
return Err(Error::CompareBlockedSignals(errno::Error::last()));
} else if ret > 0 {
return Err(Error::SignalAlreadyBlocked(num));
}
}
Ok(())
}
/// Unmasks given signal.
pub fn unblock_signal(num: c_int) -> SignalResult<()> {
let sigset = create_sigset(&[num]).map_err(Error::CreateSigset)?;
// Safe - return value is checked.
let ret = unsafe { pthread_sigmask(SIG_UNBLOCK, &sigset, null_mut()) };
if ret < 0 {
return Err(Error::UnblockSignal(errno::Error::last()));
}
Ok(())
}
/// Clears pending signal.
pub fn clear_signal(num: c_int) -> SignalResult<()> {
let sigset = create_sigset(&[num]).map_err(Error::CreateSigset)?;
while {
// This is safe as we are rigorously checking return values
// of libc calls.
unsafe {
let mut siginfo: siginfo_t = mem::zeroed();
let ts = timespec {
tv_sec: 0,
tv_nsec: 0,
};
// Attempt to consume one instance of pending signal. If signal
// is not pending, the call will fail with EAGAIN or EINTR.
let ret = sigtimedwait(&sigset, &mut siginfo, &ts);
if ret < 0 {
let e = errno::Error::last();
match e.errno() {
EAGAIN | EINTR => {}
_ => {
return Err(Error::ClearWaitPending(errno::Error::last()));
}
}
}
// This sigset will be actually filled with `sigpending` call.
let mut chkset: sigset_t = mem::zeroed();
// See if more instances of the signal are pending.
let ret = sigpending(&mut chkset);
if ret < 0 {
return Err(Error::ClearGetPending(errno::Error::last()));
}
let ret = sigismember(&chkset, num);
if ret < 0 {
return Err(Error::ClearCheckPending(errno::Error::last()));
}
// This is do-while loop condition.
ret != 0
}
} {}
Ok(())
}
/// Trait for threads that can be signalled via `pthread_kill`.
///
/// Note that this is only useful for signals between SIGRTMIN and SIGRTMAX because these are
/// guaranteed to not be used by the C runtime.
///
/// This is marked unsafe because the implementation of this trait must guarantee that the returned
/// pthread_t is valid and has a lifetime at least that of the trait object.
pub unsafe trait Killable {
fn pthread_handle(&self) -> pthread_t;
/// Sends the signal `num + SIGRTMIN` to this killable thread.
///
/// The value of `num + SIGRTMIN` must not exceed `SIGRTMAX`.
fn kill(&self, num: i32) -> errno::Result<()> {
let num = validate_signal_num(num, true)?;
// Safe because we ensure we are using a valid pthread handle, a valid signal number, and
// check the return result.
let ret = unsafe { pthread_kill(self.pthread_handle(), num) };
if ret < 0 {
return errno::errno_result();
}
Ok(())
}
}
// Safe because we fulfill our contract of returning a genuine pthread handle.
unsafe impl<T> Killable for JoinHandle<T> {
fn pthread_handle(&self) -> pthread_t {
// JoinHandleExt::as_pthread_t gives c_ulong, convert it to the
// type that the libc crate expects
assert_eq!(mem::size_of::<pthread_t>(), mem::size_of::<usize>());
self.as_pthread_t() as usize as pthread_t
}
}
#[cfg(test)]
mod tests {
use super::*;
use libc::SA_SIGINFO;
use std::thread;
use std::time::Duration;
static mut SIGNAL_HANDLER_CALLED: bool = false;
extern "C" fn handle_signal(_: c_int, _: *mut siginfo_t, _: *mut c_void) {
unsafe {
SIGNAL_HANDLER_CALLED = true;
}
}
#[test]
fn test_register_signal_handler() {
unsafe {
// testing bad value
assert!(register_signal_handler(
SIGRTMAX(),
SignalHandler::Siginfo(handle_signal),
true,
SA_SIGINFO
)
.is_err());
format!(
"{:?}",
register_signal_handler(
SIGRTMAX(),
SignalHandler::Siginfo(handle_signal),
true,
SA_SIGINFO
)
);
assert!(register_signal_handler(
0,
SignalHandler::Siginfo(handle_signal),
true,
SA_SIGINFO
)
.is_ok());
assert!(register_signal_handler(
libc::SIGSYS,
SignalHandler::Siginfo(handle_signal),
false,
SA_SIGINFO
)
.is_ok());
}
}
#[test]
#[allow(clippy::empty_loop)]
fn test_killing_thread() {
let killable = thread::spawn(|| thread::current().id());
let killable_id = killable.join().unwrap();
assert_ne!(killable_id, thread::current().id());
// We install a signal handler for the specified signal; otherwise the whole process will
// be brought down when the signal is received, as part of the default behaviour. Signal
// handlers are global, so we install this before starting the thread.
unsafe {
register_signal_handler(0, SignalHandler::Siginfo(handle_signal), true, SA_SIGINFO)
.expect("failed to register vcpu signal handler");
}
let killable = thread::spawn(|| loop {});
let res = killable.kill(SIGRTMAX());
assert!(res.is_err());
format!("{:?}", res);
unsafe {
assert!(!SIGNAL_HANDLER_CALLED);
}
assert!(killable.kill(0).is_ok());
// We're waiting to detect that the signal handler has been called.
const MAX_WAIT_ITERS: u32 = 20;
let mut iter_count = 0;
loop {
thread::sleep(Duration::from_millis(100));
if unsafe { SIGNAL_HANDLER_CALLED } {
break;
}
iter_count += 1;
// timeout if we wait too long
assert!(iter_count <= MAX_WAIT_ITERS);
}
// Our signal handler doesn't do anything which influences the killable thread, so the
// previous signal is effectively ignored. If we were to join killable here, we would block
// forever as the loop keeps running. Since we don't join, the thread will become detached
// as the handle is dropped, and will be killed when the process/main thread exits.
}
}

View File

@ -1,643 +0,0 @@
// Copyright 2019 Intel Corporation. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//
// Copyright 2017 The Chromium OS Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE-BSD-3-Clause file.
//! Facilities for sending log message to syslog.
//!
//! Every function exported by this module is thread-safe. Each function will silently fail until
//! `syslog::init()` is called and returns `Ok`.
//!
//! # Examples
//!
//! ```
//! #[macro_use]
//! extern crate vmm_sys_util;
//!
//! use vmm_sys_util::syslog::init;
//! fn main() {
//! if let Err(e) = init() {
//! println!("failed to initiailize syslog: {}", e);
//! return;
//! }
//! warn!("this is your {} warning", "final");
//! error!("something went horribly wrong: {}", "out of RAMs");
//! }
//! ```
use std::env;
use std::ffi::CString;
use std::ffi::{OsStr, OsString};
use std::fmt::{self, Display};
use std::fs::File;
use std::io;
use std::io::{stderr, Cursor, ErrorKind, Write};
use std::mem;
use std::os::unix::io::{AsRawFd, FromRawFd, RawFd};
use std::os::unix::net::UnixDatagram;
use std::path::PathBuf;
use std::ptr::null;
use std::str::from_utf8;
use std::sync::{Mutex as StdMutex, MutexGuard, Once, ONCE_INIT};
use libc::{
c_char, c_long, closelog, fcntl, gethostname, localtime_r, openlog, pid_t, syscall, time,
time_t, tm, F_GETFD, LOG_NDELAY, LOG_PERROR, LOG_PID, LOG_USER,
};
/// Temporary define linux-x86_64 syscall value here.
#[allow(non_upper_case_globals)]
pub const SYS_getpid: c_long = 39;
const SYSLOG_PATH: &str = "/dev/log";
/// The priority (i.e. severity) of a syslog message.
///
/// See syslog man pages for information on their semantics.
#[derive(Copy, Clone, Debug)]
pub enum Priority {
Emergency = 0,
Alert = 1,
Critical = 2,
Error = 3,
Warning = 4,
Notice = 5,
Info = 6,
Debug = 7,
}
impl fmt::Display for Priority {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
match self {
Priority::Emergency => write!(f, "EMERGENCY"),
Priority::Alert => write!(f, "ALERT"),
Priority::Critical => write!(f, "CRITICAL"),
Priority::Error => write!(f, "ERROR"),
Priority::Warning => write!(f, "WARNING"),
Priority::Notice => write!(f, "NOTICE"),
Priority::Info => write!(f, "INFO"),
Priority::Debug => write!(f, "DEBUG"),
}
}
}
/// The facility of a syslog message.
///
/// See syslog man pages for information on their semantics.
pub enum Facility {
Kernel = 0,
User = 1 << 3,
Mail = 2 << 3,
Daemon = 3 << 3,
Auth = 4 << 3,
Syslog = 5 << 3,
Lpr = 6 << 3,
News = 7 << 3,
Uucp = 8 << 3,
Local0 = 16 << 3,
Local1 = 17 << 3,
Local2 = 18 << 3,
Local3 = 19 << 3,
Local4 = 20 << 3,
Local5 = 21 << 3,
Local6 = 22 << 3,
Local7 = 23 << 3,
}
/// Errors returned by `syslog::init()`.
#[derive(Debug)]
pub enum Error {
/// Initialization was never attempted.
NeverInitialized,
/// Initialization has previously failed and can not be retried.
Poisoned,
/// Error while creating socket.
Socket(io::Error),
/// Error while attempting to connect socket.
Connect(io::Error),
// There was an error using `open` to get the lowest file descriptor.
GetLowestFd(io::Error),
// The guess of libc's file descriptor for the syslog connection was invalid.
InvalidFd,
}
impl Display for Error {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
use self::Error::*;
match self {
NeverInitialized => write!(f, "initialization was never attempted"),
Poisoned => write!(f, "initialization previously failed and cannot be retried"),
Socket(e) => write!(f, "failed to create socket: {}", e),
Connect(e) => write!(f, "failed to connect socket: {}", e),
GetLowestFd(e) => write!(f, "failed to get lowest file descriptor: {}", e),
InvalidFd => write!(f, "guess of fd for syslog connection was invalid"),
}
}
}
fn get_hostname() -> Result<String, ()> {
let mut hostname: [u8; 256] = [b'\0'; 256];
// Safe because we give a valid pointer to a buffer of the indicated length and check for the
// result.
let ret = unsafe { gethostname(hostname.as_mut_ptr() as *mut c_char, hostname.len()) };
if ret == -1 {
return Err(());
}
let len = hostname.iter().position(|&v| v == b'\0').ok_or(())?;
Ok(from_utf8(&hostname[..len]).map_err(|_| ())?.to_string())
}
fn get_proc_name() -> Option<String> {
env::args_os()
.next()
.map(PathBuf::from)
.and_then(|s| s.file_name().map(OsStr::to_os_string))
.map(OsString::into_string)
.and_then(Result::ok)
}
// Uses libc's openlog function to get a socket to the syslogger. By getting the socket this way, as
// opposed to connecting to the syslogger directly, libc's internal state gets initialized for other
// libraries (e.g. minijail) that make use of libc's syslog function. Note that this function
// depends on no other threads or signal handlers being active in this process because they might
// create FDs.
//
// TODO(zachr): Once https://android-review.googlesource.com/470998 lands, there won't be any
// libraries in use that hard depend on libc's syslogger. Remove this and go back to making the
// connection directly once minjail is ready.
fn openlog_and_get_socket() -> Result<UnixDatagram, Error> {
// closelog first in case there was already a file descriptor open. Safe because it takes no
// arguments and just closes an open file descriptor. Does nothing if the file descriptor
// was not already open.
unsafe {
closelog();
}
let file_path = CString::new("/dev/null").unwrap();
unsafe {
// Ordinarily libc's FD for the syslog connection can't be accessed, but we can guess that the
// FD that openlog will be getting is the lowest unused FD. To guarantee that an FD is opened in
// this function we use the LOG_NDELAY to tell openlog to connect to the syslog now. To get the
// lowest unused FD, we open a dummy file (which the manual says will always return the lowest
// fd), and then close that fd. Voilà, we now know the lowest numbered FD. The call to openlog
// will make use of that FD, and then we just wrap a `UnixDatagram` around it for ease of use.
let fd = libc::open(file_path.as_ptr(), libc::O_RDONLY);
if fd < 0 {
let err = io::Error::last_os_error();
return Err(Error::GetLowestFd(err));
}
// Safe because openlog accesses no pointers because `ident` is null, only valid flags are
// used, and it returns no error.
openlog(null(), LOG_NDELAY | LOG_PERROR | LOG_PID, LOG_USER);
// For safety, ensure the fd we guessed is valid. The `fcntl` call itself only reads the
// file descriptor table of the current process, which is trivially safe.
if fcntl(fd, F_GETFD) >= 0 {
Ok(UnixDatagram::from_raw_fd(fd))
} else {
Err(Error::InvalidFd)
}
}
}
struct State {
stderr: bool,
socket: Option<UnixDatagram>,
file: Option<File>,
hostname: Option<String>,
proc_name: Option<String>,
}
impl State {
fn new() -> Result<State, Error> {
let s = openlog_and_get_socket()?;
Ok(State {
stderr: true,
socket: Some(s),
file: None,
hostname: get_hostname().ok(),
proc_name: get_proc_name(),
})
}
}
static STATE_ONCE: Once = ONCE_INIT;
static mut STATE: *const StdMutex<State> = 0 as *const _;
fn new_mutex_ptr<T>(inner: T) -> *const StdMutex<T> {
Box::into_raw(Box::new(StdMutex::new(inner)))
}
/// Initialize the syslog connection and internal variables.
///
/// This should only be called once per process before any other threads have been spawned or any
/// signal handlers have been registered. Every call made after the first will have no effect
/// besides return `Ok` or `Err` appropriately.
pub fn init() -> Result<(), Error> {
let mut err = Error::Poisoned;
STATE_ONCE.call_once(|| match State::new() {
// Safe because STATE mutation is guarded by `Once`.
Ok(state) => unsafe { STATE = new_mutex_ptr(state) },
Err(e) => err = e,
});
if unsafe { STATE.is_null() } {
Err(err)
} else {
Ok(())
}
}
fn lock() -> Result<MutexGuard<'static, State>, Error> {
// Safe because we assume that STATE is always in either a valid or NULL state.
let state_ptr = unsafe { STATE };
if state_ptr.is_null() {
return Err(Error::NeverInitialized);
}
// Safe because STATE only mutates once and we checked for NULL.
let state = unsafe { &*state_ptr };
let guard = match state.lock() {
Ok(guard) => guard,
_ => panic!("mutex is poisoned"),
};
Ok(guard)
}
// Attempts to lock and retrieve the state. Returns from the function silently on failure.
macro_rules! lock {
() => {
match lock() {
Ok(s) => s,
_ => return,
};
};
}
/// Replaces the hostname reported in each syslog message.
///
/// The default hostname is whatever `gethostname()` returned when `vmm_sys_util::syslog::init()` was first
/// called.
///
/// Does nothing if syslog was never initialized.
pub fn set_hostname<T: Into<String>>(hostname: T) {
let mut state = lock!();
state.hostname = Some(hostname.into());
}
/// Replaces the process name reported in each syslog message.
///
/// The default process name is the _file name_ of `argv[0]`. For example, if this program was
/// invoked as
///
/// ```bash
/// $ path/to/app --delete everything
/// ```
///
/// the default process name would be _app_.
///
/// Does nothing if syslog was never initialized.
pub fn set_proc_name<T: Into<String>>(proc_name: T) {
let mut state = lock!();
state.proc_name = Some(proc_name.into());
}
/// Enables or disables echoing log messages to the syslog.
///
/// The default behavior is **enabled**.
///
/// If `enable` goes from `true` to `false`, the syslog connection is closed. The connection is
/// reopened if `enable` is set to `true` after it became `false`.
///
/// Returns an error if syslog was never initialized or the syslog connection failed to be
/// established.
///
/// # Arguments
/// * `enable` - `true` to enable echoing to syslog, `false` to disable echoing to syslog.
pub fn echo_syslog(enable: bool) -> Result<(), Error> {
let state_ptr = unsafe { STATE };
if state_ptr.is_null() {
return Err(Error::NeverInitialized);
}
let mut state = lock().map_err(|_| Error::Poisoned)?;
match state.socket.take() {
Some(_) if enable => {}
Some(s) => {
// Because `openlog_and_get_socket` actually just "borrows" the syslog FD, this module
// does not own the syslog connection and therefore should not destroy it.
mem::forget(s);
}
None if enable => {
let s = openlog_and_get_socket()?;
state.socket = Some(s);
}
_ => {}
}
Ok(())
}
/// Replaces the optional `File` to echo log messages to.
///
/// The default behavior is to not echo to a file. Passing `None` to this function restores that
/// behavior.
///
/// Does nothing if syslog was never initialized.
///
/// # Arguments
/// * `file` - `Some(file)` to echo to `file`, `None` to disable echoing to the file previously passed to `echo_file`.
pub fn echo_file(file: Option<File>) {
let mut state = lock!();
state.file = file;
}
/// Enables or disables echoing log messages to the `std::io::stderr()`.
///
/// The default behavior is **enabled**.
///
/// Does nothing if syslog was never initialized.
///
/// # Arguments
/// * `enable` - `true` to enable echoing to stderr, `false` to disable echoing to stderr.
pub fn echo_stderr(enable: bool) {
let mut state = lock!();
state.stderr = enable;
}
/// Retrieves the file descriptors owned by the global syslogger.
///
/// Does nothing if syslog was never initialized. If their are any file descriptors, they will be
/// pushed into `fds`.
///
/// Note that the `stderr` file descriptor is never added, as it is not owned by syslog.
#[allow(clippy::redundant_closure)]
pub fn push_fds(fds: &mut Vec<RawFd>) {
let state = lock!();
fds.extend(state.socket.iter().map(|s| s.as_raw_fd()));
fds.extend(state.file.iter().map(|f| f.as_raw_fd()));
}
/// Should only be called after `init()` was called.
fn send_buf(socket: &UnixDatagram, buf: &[u8]) {
const SEND_RETRY: usize = 2;
for _ in 0..SEND_RETRY {
match socket.send(&buf[..]) {
Ok(_) => break,
Err(e) => match e.kind() {
ErrorKind::ConnectionRefused
| ErrorKind::ConnectionReset
| ErrorKind::ConnectionAborted
| ErrorKind::NotConnected => {
let res = socket.connect(SYSLOG_PATH);
if res.is_err() {
break;
}
}
_ => {}
},
}
}
}
fn get_localtime() -> tm {
unsafe {
// Safe because tm is just a struct of plain data.
let mut tm: tm = mem::zeroed();
let mut now: time_t = 0;
// Safe because we give time a valid pointer and can never fail.
time(&mut now as *mut _);
// Safe because we give localtime_r valid pointers and can never fail.
localtime_r(&now, &mut tm as *mut _);
tm
}
}
/// Records a log message with the given details.
///
/// Note that this will fail silently if syslog was not initialized.
///
/// # Arguments
/// * `pri` - The `Priority` (i.e. severity) of the log message.
/// * `fac` - The `Facility` of the log message. Usually `Facility::User` should be used.
/// * `file_name` - Name of the file that generated the log.
/// * `line` - Line number within `file_name` that generated the log.
/// * `args` - The log's message to record, in the form of `format_args!()` return value
///
/// # Examples
///
/// ```
/// # use vmm_sys_util::syslog::{init, log, Priority, Facility};
/// # fn main() {
/// # if let Err(e) = init() {
/// # println!("failed to initiailize syslog: {}", e);
/// # return;
/// # }
/// log(Priority::Error,
/// Facility::User,
/// file!(),
/// line!(),
/// format_args!("hello syslog"));
/// # }
/// ```
#[allow(clippy::redundant_closure)]
pub fn log(pri: Priority, fac: Facility, file_name: &str, line: u32, args: fmt::Arguments) {
const MONTHS: [&str; 12] = [
"Jan", "Feb", "Mar", "Apr", "May", "Jun", "Jul", "Aug", "Sep", "Oct", "Nov", "Dec",
];
let mut state = lock!();
let mut buf = [0u8; 1024];
if let Some(ref socket) = state.socket {
let tm = get_localtime();
let prifac = (pri as u8) | (fac as u8);
let (res, len) = {
let mut buf_cursor = Cursor::new(&mut buf[..]);
(
write!(
&mut buf_cursor,
"<{}>{} {:02} {:02}:{:02}:{:02} {} {}[{}]: [{}:{}] {}",
prifac,
MONTHS[tm.tm_mon as usize],
tm.tm_mday,
tm.tm_hour,
tm.tm_min,
tm.tm_sec,
state.hostname.as_ref().map(|s| s.as_ref()).unwrap_or("-"),
state.proc_name.as_ref().map(|s| s.as_ref()).unwrap_or("-"),
unsafe { syscall(SYS_getpid as c_long) as pid_t },
//getpid(),
file_name,
line,
args
),
buf_cursor.position() as usize,
)
};
if res.is_ok() {
send_buf(&socket, &buf[..len]);
}
}
let (res, len) = {
let mut buf_cursor = Cursor::new(&mut buf[..]);
(
writeln!(&mut buf_cursor, "[{}:{}:{}] {}", pri, file_name, line, args),
buf_cursor.position() as usize,
)
};
if res.is_ok() {
if let Some(ref mut file) = state.file {
let _ = file.write_all(&buf[..len]);
}
if state.stderr {
let _ = stderr().write_all(&buf[..len]);
}
}
}
/// A macro for logging at an arbitrary priority level.
///
/// Note that this will fail silently if syslog was not initialized.
#[macro_export]
macro_rules! log {
($pri:expr, $($args:tt)+) => ({
$crate::syslog::log($pri, $crate::syslog::Facility::User, file!(), line!(), format_args!($($args)+))
})
}
/// A macro for logging an error.
///
/// Note that this will fail silently if syslog was not initialized.
#[macro_export]
macro_rules! error {
($($args:tt)+) => (log!($crate::syslog::Priority::Error, $($args)*))
}
/// A macro for logging a warning.
///
/// Note that this will fail silently if syslog was not initialized.
#[macro_export]
macro_rules! warn {
($($args:tt)+) => (log!($crate::syslog::Priority::Warning, $($args)*))
}
/// A macro for logging info.
///
/// Note that this will fail silently if syslog was not initialized.
#[macro_export]
macro_rules! info {
($($args:tt)+) => (log!($crate::syslog::Priority::Info, $($args)*))
}
/// A macro for logging debug information.
///
/// Note that this will fail silently if syslog was not initialized.
#[macro_export]
macro_rules! debug {
($($args:tt)+) => (log!($crate::syslog::Priority::Debug, $($args)*))
}
#[cfg(test)]
mod tests {
use super::*;
use libc::{shm_open, shm_unlink, O_CREAT, O_EXCL, O_RDWR};
use std::ffi::CStr;
use std::io::{Read, Seek, SeekFrom};
use std::os::unix::io::FromRawFd;
#[test]
fn test_init_syslog() {
init().unwrap();
}
#[test]
fn test_fds() {
init().unwrap();
let mut fds = Vec::new();
push_fds(&mut fds);
assert!(!fds.is_empty());
for fd in fds {
assert!(fd >= 0);
}
}
#[test]
fn test_syslog_log() {
init().unwrap();
log(
Priority::Error,
Facility::User,
file!(),
line!(),
format_args!("hello syslog"),
);
}
#[test]
fn test_proc_name() {
init().unwrap();
log(
Priority::Error,
Facility::User,
file!(),
line!(),
format_args!("before proc name"),
);
set_proc_name("sys_util-test");
log(
Priority::Error,
Facility::User,
file!(),
line!(),
format_args!("after proc name"),
);
}
#[test]
#[allow(clippy::zero_prefixed_literal)]
fn test_syslog_file() {
init().unwrap();
let shm_name = CStr::from_bytes_with_nul(b"/crosvm_shm\0").unwrap();
let mut file = unsafe {
shm_unlink(shm_name.as_ptr());
let fd = shm_open(shm_name.as_ptr(), O_RDWR | O_CREAT | O_EXCL, 0666);
assert!(fd >= 0, "error creating shared memory;");
File::from_raw_fd(fd)
};
let syslog_file = file.try_clone().expect("error cloning shared memory file");
echo_file(Some(syslog_file));
const TEST_STR: &str = "hello shared memory file";
log(
Priority::Error,
Facility::User,
file!(),
line!(),
format_args!("{}", TEST_STR),
);
file.seek(SeekFrom::Start(0))
.expect("error seeking shared memory file");
let mut buf = String::new();
file.read_to_string(&mut buf)
.expect("error reading shared memory file");
assert!(buf.contains(TEST_STR));
}
#[test]
fn test_macros() {
init().unwrap();
error!("this is an error {}", 3);
warn!("this is a warning {}", "uh oh");
info!("this is info {}", true);
debug!("this is debug info {:?}", Some("helpful stuff"));
}
}

View File

@ -1,104 +0,0 @@
// Copyright 2017 The Chromium OS Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE-BSD-3-Clause file.
//
// SPDX-License-Identifier: BSD-3-Clause
use std::ffi::CString;
use std::ffi::OsStr;
use std::ffi::OsString;
use std::fs;
use std::os::unix::ffi::OsStringExt;
use std::path::Path;
use std::path::PathBuf;
use libc;
use crate::{errno_result, Result};
/// Create and remove a temporary directory. The directory will be maintained for the lifetime of
/// the `TempDir` object.
pub struct TempDir {
path: Option<PathBuf>,
}
impl TempDir {
/// Creates a new tempory directory.
/// The directory will be removed when the object goes out of scope.
///
/// # Examples
///
/// ```
/// # use std::path::Path;
/// # use std::path::PathBuf;
/// # use vmm_sys_util::TempDir;
/// # fn test_create_temp_dir() -> Result<(), ()> {
/// let t = TempDir::new("/tmp/testdir").map_err(|_| ())?;
/// assert!(t.as_path().unwrap().exists());
/// # Ok(())
/// # }
/// ```
pub fn new<P: AsRef<OsStr>>(prefix: P) -> Result<TempDir> {
let mut dir_string = prefix.as_ref().to_os_string();
dir_string.push("XXXXXX");
// unwrap this result as the internal bytes can't have a null with a valid path.
let dir_name = CString::new(dir_string.into_vec()).unwrap();
let mut dir_bytes = dir_name.into_bytes_with_nul();
let ret = unsafe {
// Creating the directory isn't unsafe. The fact that it modifies the guts of the path
// is also OK because it only overwrites the last 6 Xs added above.
libc::mkdtemp(dir_bytes.as_mut_ptr() as *mut libc::c_char)
};
if ret.is_null() {
return errno_result();
}
dir_bytes.pop(); // Remove the null becasue from_vec can't handle it.
Ok(TempDir {
path: Some(PathBuf::from(OsString::from_vec(dir_bytes))),
})
}
/// Removes the temporary directory. Calling this is optional as dropping a `TempDir` object
/// will also remove the directory. Calling remove explicitly allows for better error handling.
pub fn remove(mut self) -> Result<()> {
let path = self.path.take();
path.map_or(Ok(()), fs::remove_dir_all)?;
Ok(())
}
/// Returns the path to the tempdir if it is currently valid
pub fn as_path(&self) -> Option<&Path> {
self.path.as_ref().map(|ref p| p.as_path())
}
}
impl Drop for TempDir {
fn drop(&mut self) {
if let Some(ref p) = self.path {
// Nothing can be done here if this returns an error.
let _ = fs::remove_dir_all(p);
}
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn create_dir() {
let t = TempDir::new("/tmp/asdf").unwrap();
let path = t.as_path().unwrap();
assert!(path.exists());
assert!(path.is_dir());
assert!(path.starts_with("/tmp/"));
}
#[test]
fn remove_dir() {
let t = TempDir::new("/tmp/asdf").unwrap();
let path = t.as_path().unwrap().to_owned();
assert!(t.remove().is_ok());
assert!(!path.exists());
}
}

View File

@ -1,155 +0,0 @@
// Copyright 2019 Intel Corporation. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//
// Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//
// Copyright 2017 The Chromium OS Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE-BSD-3-Clause file.
use std::io::StdinLock;
use std::mem::zeroed;
use std::os::unix::io::RawFd;
use libc::{
c_int, fcntl, isatty, read, tcgetattr, tcsetattr, termios, ECHO, F_GETFL, F_SETFL, ICANON,
ISIG, O_NONBLOCK, STDIN_FILENO, TCSANOW,
};
use crate::errno::{errno_result, Result};
fn modify_mode<F: FnOnce(&mut termios)>(fd: RawFd, f: F) -> Result<()> {
// Safe because we check the return value of isatty.
if unsafe { isatty(fd) } != 1 {
return Ok(());
}
// The following pair are safe because termios gets totally overwritten by tcgetattr and we
// check the return result.
let mut termios: termios = unsafe { zeroed() };
let ret = unsafe { tcgetattr(fd, &mut termios as *mut _) };
if ret < 0 {
return errno_result();
}
let mut new_termios = termios;
f(&mut new_termios);
// Safe because the syscall will only read the extent of termios and we check the return result.
let ret = unsafe { tcsetattr(fd, TCSANOW, &new_termios as *const _) };
if ret < 0 {
return errno_result();
}
Ok(())
}
fn get_flags(fd: RawFd) -> Result<c_int> {
// Safe because no third parameter is expected and we check the return result.
let ret = unsafe { fcntl(fd, F_GETFL) };
if ret < 0 {
return errno_result();
}
Ok(ret)
}
fn set_flags(fd: RawFd, flags: c_int) -> Result<()> {
// Safe because we supply the third parameter and we check the return result.
let ret = unsafe { fcntl(fd, F_SETFL, flags) };
if ret < 0 {
return errno_result();
}
Ok(())
}
/// Trait for file descriptors that are TTYs, according to `isatty(3)`.
///
/// This is marked unsafe because the implementation must promise that the returned RawFd is a valid
/// fd and that the lifetime of the returned fd is at least that of the trait object.
pub unsafe trait Terminal {
/// Gets the file descriptor of the TTY.
fn tty_fd(&self) -> RawFd;
/// Set this terminal's mode to canonical mode (`ICANON | ECHO | ISIG`).
fn set_canon_mode(&self) -> Result<()> {
modify_mode(self.tty_fd(), |t| t.c_lflag |= ICANON | ECHO | ISIG)
}
/// Set this terminal's mode to raw mode (`!(ICANON | ECHO | ISIG)`).
fn set_raw_mode(&self) -> Result<()> {
modify_mode(self.tty_fd(), |t| t.c_lflag &= !(ICANON | ECHO | ISIG))
}
/// Sets the non-blocking mode of this terminal's file descriptor.
///
/// If `non_block` is `true`, then `read_raw` will not block. If `non_block` is `false`, then
/// `read_raw` may block if there is nothing to read.
fn set_non_block(&self, non_block: bool) -> Result<()> {
let old_flags = get_flags(self.tty_fd())?;
let new_flags = if non_block {
old_flags | O_NONBLOCK
} else {
old_flags & !O_NONBLOCK
};
if new_flags != old_flags {
set_flags(self.tty_fd(), new_flags)?
}
Ok(())
}
/// Reads up to `out.len()` bytes from this terminal without any buffering.
///
/// This may block, depending on if non-blocking was enabled with `set_non_block` or if there
/// are any bytes to read. If there is at least one byte that is readable, this will not block.
fn read_raw(&self, out: &mut [u8]) -> Result<usize> {
// Safe because read will only modify the pointer up to the length we give it and we check
// the return result.
let ret = unsafe { read(self.tty_fd(), out.as_mut_ptr() as *mut _, out.len()) };
if ret < 0 {
return errno_result();
}
Ok(ret as usize)
}
}
// Safe because we return a genuine terminal fd that never changes and shares our lifetime.
unsafe impl<'a> Terminal for StdinLock<'a> {
fn tty_fd(&self) -> RawFd {
STDIN_FILENO
}
}
#[cfg(test)]
mod tests {
use super::*;
use std::fs::File;
use std::io;
use std::os::unix::io::AsRawFd;
use std::path::Path;
unsafe impl Terminal for File {
fn tty_fd(&self) -> RawFd {
self.as_raw_fd()
}
}
#[test]
fn test_a_tty() {
let stdin_handle = io::stdin();
let stdin = stdin_handle.lock();
assert!(stdin.set_canon_mode().is_ok());
assert!(stdin.set_raw_mode().is_ok());
assert!(stdin.set_raw_mode().is_ok());
assert!(stdin.set_canon_mode().is_ok());
assert!(stdin.set_non_block(true).is_ok());
let mut out = [0u8; 0];
assert!(stdin.read_raw(&mut out[..]).is_ok());
}
#[test]
fn test_a_non_tty() {
let file = File::open(Path::new("/dev/zero")).unwrap();
assert!(file.set_canon_mode().is_ok());
}
}

View File

@ -1,173 +0,0 @@
// Copyright 2019 Intel Corporation. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//
// Copyright 2018 The Chromium OS Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE-BSD-3-clause file.
use std::fs::File;
use std::mem;
use std::os::unix::io::{AsRawFd, FromRawFd, IntoRawFd, RawFd};
use std::ptr;
use std::time::Duration;
use libc::{self, timerfd_create, timerfd_gettime, timerfd_settime, CLOCK_MONOTONIC, TFD_CLOEXEC};
use crate::errno::{errno_result, Result};
/// A safe wrapper around a Linux timerfd (man 2 timerfd_create).
pub struct TimerFd(File);
impl TimerFd {
/// Creates a new [`TimerFd`](struct.TimerFd.html).
///
/// The timer is initally disarmed and must be armed by calling [`reset`](fn.reset.html).
pub fn new() -> Result<TimerFd> {
// Safe because this doesn't modify any memory and we check the return value.
let ret = unsafe { timerfd_create(CLOCK_MONOTONIC, TFD_CLOEXEC) };
if ret < 0 {
return errno_result();
}
// Safe because we uniquely own the file descriptor.
Ok(TimerFd(unsafe { File::from_raw_fd(ret) }))
}
/// Sets the timer to expire after `dur`.
///
/// If `interval` is not `None` it represents the period for repeated expirations after the
/// initial expiration. Otherwise the timer will expire just once. Cancels any existing duration and repeating interval.
pub fn reset(&mut self, dur: Duration, interval: Option<Duration>) -> Result<()> {
// Safe because we are zero-initializing a struct with only primitive member fields.
let mut spec: libc::itimerspec = unsafe { mem::zeroed() };
spec.it_value.tv_sec = dur.as_secs() as libc::time_t;
// nsec always fits in i32 because subsec_nanos is defined to be less than one billion.
let nsec = dur.subsec_nanos() as i32;
spec.it_value.tv_nsec = libc::c_long::from(nsec);
if let Some(int) = interval {
spec.it_interval.tv_sec = int.as_secs() as libc::time_t;
// nsec always fits in i32 because subsec_nanos is defined to be less than one billion.
let nsec = int.subsec_nanos() as i32;
spec.it_interval.tv_nsec = libc::c_long::from(nsec);
}
// Safe because this doesn't modify any memory and we check the return value.
let ret = unsafe { timerfd_settime(self.as_raw_fd(), 0, &spec, ptr::null_mut()) };
if ret < 0 {
return errno_result();
}
Ok(())
}
/// Waits until the timer expires.
///
/// The return value represents the number of times the timer
/// has expired since the last time `wait` was called. If the timer has not yet expired once
/// this call will block until it does.
pub fn wait(&mut self) -> Result<u64> {
let mut count = 0u64;
// Safe because this will only modify |buf| and we check the return value.
let ret = unsafe {
libc::read(
self.as_raw_fd(),
&mut count as *mut _ as *mut libc::c_void,
mem::size_of_val(&count),
)
};
if ret < 0 {
return errno_result();
}
// The bytes in the buffer are guaranteed to be in native byte-order so we don't need to
// use from_le or from_be.
Ok(count)
}
/// Returns `true` if the timer is currently armed.
pub fn is_armed(&self) -> Result<bool> {
// Safe because we are zero-initializing a struct with only primitive member fields.
let mut spec: libc::itimerspec = unsafe { mem::zeroed() };
// Safe because timerfd_gettime is trusted to only modify `spec`.
let ret = unsafe { timerfd_gettime(self.as_raw_fd(), &mut spec) };
if ret < 0 {
return errno_result();
}
Ok(spec.it_value.tv_sec != 0 || spec.it_value.tv_nsec != 0)
}
/// Disarms the timer.
pub fn clear(&mut self) -> Result<()> {
// Safe because we are zero-initializing a struct with only primitive member fields.
let spec: libc::itimerspec = unsafe { mem::zeroed() };
// Safe because this doesn't modify any memory and we check the return value.
let ret = unsafe { timerfd_settime(self.as_raw_fd(), 0, &spec, ptr::null_mut()) };
if ret < 0 {
return errno_result();
}
Ok(())
}
}
impl AsRawFd for TimerFd {
fn as_raw_fd(&self) -> RawFd {
self.0.as_raw_fd()
}
}
impl FromRawFd for TimerFd {
unsafe fn from_raw_fd(fd: RawFd) -> Self {
TimerFd(File::from_raw_fd(fd))
}
}
impl IntoRawFd for TimerFd {
fn into_raw_fd(self) -> RawFd {
self.0.into_raw_fd()
}
}
#[cfg(test)]
mod tests {
use super::*;
use std::thread::sleep;
use std::time::{Duration, Instant};
#[test]
fn test_one_shot() {
let mut tfd = TimerFd::new().expect("failed to create timerfd");
assert_eq!(tfd.is_armed().unwrap(), false);
let dur = Duration::from_millis(200);
let now = Instant::now();
tfd.reset(dur, None).expect("failed to arm timer");
assert_eq!(tfd.is_armed().unwrap(), true);
let count = tfd.wait().expect("unable to wait for timer");
assert_eq!(count, 1);
assert!(now.elapsed() >= dur);
}
#[test]
fn test_repeating() {
let mut tfd = TimerFd::new().expect("failed to create timerfd");
let dur = Duration::from_millis(200);
let interval = Duration::from_millis(100);
tfd.reset(dur, Some(interval)).expect("failed to arm timer");
sleep(dur * 3);
let count = tfd.wait().expect("unable to wait for timer");
assert!(count >= 5, "count = {}", count);
}
}

View File

@ -1,172 +0,0 @@
// Copyright 2018 The Chromium OS Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE-BSD-3-Clause file.
//
// SPDX-License-Identifier: BSD-3-Clause
use std::cmp::min;
use std::fs::File;
use std::io::{self, Seek, SeekFrom, Write};
use crate::fallocate;
use crate::FallocateMode;
/// A trait for deallocating space in a file.
pub trait PunchHole {
/// Replace a range of bytes with a hole.
fn punch_hole(&mut self, offset: u64, length: u64) -> io::Result<()>;
}
impl PunchHole for File {
fn punch_hole(&mut self, offset: u64, length: u64) -> io::Result<()> {
fallocate(self, FallocateMode::PunchHole, true, offset, length as u64)
.map_err(|e| io::Error::from_raw_os_error(e.errno()))
}
}
/// A trait for writing zeroes to a stream.
pub trait WriteZeroes {
/// Write `length` bytes of zeroes to the stream, returning how many bytes were written.
fn write_zeroes(&mut self, length: usize) -> io::Result<usize>;
}
impl<T: PunchHole + Seek + Write> WriteZeroes for T {
fn write_zeroes(&mut self, length: usize) -> io::Result<usize> {
// Try to punch a hole first.
let offset = self.seek(SeekFrom::Current(0))?;
if let Ok(()) = self.punch_hole(offset, length as u64) {
// Advance the seek cursor as if we had done a real write().
self.seek(SeekFrom::Current(length as i64))?;
return Ok(length);
}
// fall back to write()
// punch_hole() failed; fall back to writing a buffer of zeroes
// until we have written up to length.
let buf_size = min(length, 0x10000);
let buf = vec![0u8; buf_size];
let mut nwritten: usize = 0;
while nwritten < length {
let remaining = length - nwritten;
let write_size = min(remaining, buf_size);
nwritten += self.write(&buf[0..write_size])?;
}
Ok(length)
}
}
#[cfg(test)]
#[allow(clippy::unused_io_amount)]
mod tests {
use super::*;
use std::fs::OpenOptions;
use std::io::{Read, Seek, SeekFrom};
use std::path::PathBuf;
use crate::TempDir;
#[test]
fn simple_test() {
let tempdir = TempDir::new("/tmp/write_zeroes_test").unwrap();
let mut path = PathBuf::from(tempdir.as_path().unwrap());
path.push("file");
let mut f = OpenOptions::new()
.read(true)
.write(true)
.create(true)
.open(&path)
.unwrap();
f.set_len(16384).unwrap();
// Write buffer of non-zero bytes to offset 1234
let orig_data = [0x55u8; 5678];
f.seek(SeekFrom::Start(1234)).unwrap();
f.write(&orig_data).unwrap();
// Read back the data plus some overlap on each side
let mut readback = [0u8; 16384];
f.seek(SeekFrom::Start(0)).unwrap();
f.read(&mut readback).unwrap();
// Bytes before the write should still be 0
for read in readback[0..1234].iter() {
assert_eq!(*read, 0);
}
// Bytes that were just written should be 0x55
for read in readback[1234..(1234 + 5678)].iter() {
assert_eq!(*read, 0x55);
}
// Bytes after the written area should still be 0
for read in readback[(1234 + 5678)..].iter() {
assert_eq!(*read, 0);
}
// Overwrite some of the data with zeroes
f.seek(SeekFrom::Start(2345)).unwrap();
f.write_zeroes(4321).expect("write_zeroes failed");
// Verify seek position after write_zeroes()
assert_eq!(f.seek(SeekFrom::Current(0)).unwrap(), 2345 + 4321);
// Read back the data and verify that it is now zero
f.seek(SeekFrom::Start(0)).unwrap();
f.read(&mut readback).unwrap();
// Bytes before the write should still be 0
for read in readback[0..1234].iter() {
assert_eq!(*read, 0);
}
// Original data should still exist before the write_zeroes region
for read in readback[1234..2345].iter() {
assert_eq!(*read, 0x55);
}
// The write_zeroes region should now be zero
for read in readback[2345..(2345 + 4321)].iter() {
assert_eq!(*read, 0);
}
// Original data should still exist after the write_zeroes region
for read in readback[(2345 + 4321)..(1234 + 5678)].iter() {
assert_eq!(*read, 0x55);
}
// The rest of the file should still be 0
for read in readback[(1234 + 5678)..].iter() {
assert_eq!(*read, 0);
}
}
#[test]
fn large_write_zeroes() {
let tempdir = TempDir::new("/tmp/write_zeroes_test").unwrap();
let mut path = PathBuf::from(tempdir.as_path().unwrap());
path.push("file");
let mut f = OpenOptions::new()
.read(true)
.write(true)
.create(true)
.open(&path)
.unwrap();
f.set_len(16384).unwrap();
// Write buffer of non-zero bytes
let orig_data = [0x55u8; 0x20000];
f.seek(SeekFrom::Start(0)).unwrap();
f.write(&orig_data).unwrap();
// Overwrite some of the data with zeroes
f.seek(SeekFrom::Start(0)).unwrap();
f.write_zeroes(0x10001).expect("write_zeroes failed");
// Verify seek position after write_zeroes()
assert_eq!(f.seek(SeekFrom::Current(0)).unwrap(), 0x10001);
// Read back the data and verify that it is now zero
let mut readback = [0u8; 0x20000];
f.seek(SeekFrom::Start(0)).unwrap();
f.read(&mut readback).unwrap();
// The write_zeroes region should now be zero
for read in readback[0..0x10001].iter() {
assert_eq!(*read, 0);
}
// Original data should still exist after the write_zeroes region
for read in readback[0x10001..0x20000].iter() {
assert_eq!(*read, 0x55);
}
}
}

View File

@ -1,155 +0,0 @@
steps:
- label: "build-gnu-x86"
commands:
- cargo build --release
retry:
automatic: false
agents:
platform: x86_64.metal
os: linux
plugins:
- docker#v3.0.1:
image: "fandree/rust-vmm-dev"
always-pull: true
- label: "build-gnu-x86-mmap"
commands:
- cargo build --release --features=backend-mmap
retry:
automatic: false
agents:
platform: x86_64.metal
os: linux
plugins:
- docker#v3.0.1:
image: "fandree/rust-vmm-dev"
always-pull: true
- label: "build-gnu-arm-mmap"
commands:
- cargo build --release --features=backend-mmap
retry:
automatic: false
agents:
platform: arm.metal
os: linux
plugins:
- docker#v3.0.1:
image: "fandree/rust-vmm-dev"
always-pull: true
- label: "build-musl-arm-mmap"
commands:
- cargo build --release --features=backend-mmap --target aarch64-unknown-linux-musl
retry:
automatic: false
agents:
platform: arm.metal
os: linux
plugins:
- docker#v3.0.1:
image: "fandree/rust-vmm-dev"
always-pull: true
- label: "style"
command: cargo fmt --all -- --check
retry:
automatic: false
agents:
platform: x86_64.metal
os: linux
plugins:
- docker#v3.0.1:
image: "fandree/rust-vmm-dev"
always-pull: true
- label: "unittests-gnu-x86"
commands:
- cargo test --all-features
retry:
automatic: false
agents:
platform: x86_64.metal
os: linux
plugins:
- docker#v3.0.1:
image: "fandree/rust-vmm-dev"
always-pull: true
- label: "unittests-musl-x86"
commands:
- cargo test --all-features --target x86_64-unknown-linux-musl
retry:
automatic: false
agents:
platform: x86_64.metal
os: linux
plugins:
- docker#v3.0.1:
image: "fandree/rust-vmm-dev"
always-pull: true
- label: "unittests-gnu-arm"
commands:
- cargo test --all-features
retry:
automatic: false
agents:
platform: arm.metal
os: linux
plugins:
- docker#v3.0.1:
image: "fandree/rust-vmm-dev"
always-pull: true
- label: "unittests-musl-arm"
commands:
- cargo test --all-features --target aarch64-unknown-linux-musl
retry:
automatic: false
agents:
platform: arm.metal
os: linux
plugins:
- docker#v3.0.1:
image: "fandree/rust-vmm-dev"
always-pull: true
- label: "clippy-x86"
commands:
- cargo clippy --all
retry:
automatic: false
agents:
platform: x86_64.metal
os: linux
plugins:
- docker#v3.0.1:
image: "fandree/rust-vmm-dev"
always-pull: true
- label: "check-warnings-x86"
commands:
- RUSTFLAGS="-D warnings" cargo check --all-targets
retry:
automatic: false
agents:
platform: x86_64.metal
os: linux
plugins:
- docker#v3.0.1:
image: "fandree/rust-vmm-dev"
always-pull: true
- label: "check-warnings-arm"
commands:
- RUSTFLAGS="-D warnings" cargo check --all-targets
retry:
automatic: false
agents:
platform: arm.metal
os: linux
plugins:
- docker#v3.0.1:
image: "fandree/rust-vmm-dev"
always-pull: true

View File

@ -1,79 +0,0 @@
steps:
- label: "build-msvc-x86"
commands:
- cargo build --release
retry:
automatic: true
agents:
platform: x86_64
os: windows
plugins:
- petrutlucian94/docker#v3.1.1:
image: "lpetrut/rust_win_buildtools"
always-pull: true
- label: "build-msvc-x86-mmap"
commands:
- cargo build --release --features=backend-mmap
retry:
automatic: true
agents:
platform: x86_64
os: windows
plugins:
- petrutlucian94/docker#v3.1.1:
image: "lpetrut/rust_win_buildtools"
always-pull: true
- label: "style"
command: cargo fmt --all -- --check
retry:
automatic: true
agents:
platform: x86_64
os: windows
plugins:
- petrutlucian94/docker#v3.1.1:
image: "lpetrut/rust_win_buildtools"
always-pull: true
- label: "unittests-msvc-x86"
commands:
- cargo test --all-features
retry:
automatic: true
agents:
platform: x86_64
os: windows
plugins:
- petrutlucian94/docker#v3.1.1:
image: "lpetrut/rust_win_buildtools"
always-pull: true
- label: "clippy-x86"
commands:
- cargo clippy --all
retry:
automatic: true
agents:
platform: x86_64
os: windows
plugins:
- petrutlucian94/docker#v3.1.1:
image: "lpetrut/rust_win_buildtools"
always-pull: true
- label: "check-warnings-x86"
commands:
- cargo check --all-targets
retry:
automatic: true
agents:
platform: x86_64
os: windows
plugins:
- petrutlucian94/docker#v3.1.1:
image: "lpetrut/rust_win_buildtools"
always-pull: true
environment:
- "RUSTFLAGS=-D warnings"

View File

@ -1 +0,0 @@
{"files":{".buildkite/pipeline.linux.yml":"a792da4b923a91eb9a590da2d057f89b37b86ebcf483c344813a1abd5957854d",".buildkite/pipeline.windows.yml":"15e21819ef7321c79181ae7f04aafc0c3ceff709a800edd8994f15a9454d6405",".cargo/config":"c2f1c2fd93436e068cfb14eef3dff8a79d25d1f03c11baf6acbcfbdc9fd3a465","Cargo.toml":"459c6688ce88cac6767971e8053b03a14e05aec157a927134c34d5497c84cf68","DESIGN.md":"aa60ac0a1d59179c253c7be0e496f956344cd0bf41f01f86c9a28575ea433785","LICENSE":"cfc7749b96f63bd31c3c42b5c471bf756814053e847c10f3eb003417bc523d30","README.md":"2f4aa7c6dbb257a9345d17cec07b0731cae37dc216d25b41272061861bf984b8","THIRD-PARTY":"a6d3ebd1c2f37d4fd83d0676621f695fc0cc2d8c6e646cdbb831b46e0650c208","TODO.md":"c844f03be6631843e90d13b3410df031b07ee16db4a3c7cbda7e89557e9be46b","src/address.rs":"dbc3fa642269bb6f9fb5e176dddc9fcedbe024949f32c43577f52ef04fe0cf09","src/bytes.rs":"2d0c58f53e40f51b5177a234a5062f700ea153c779661c8c34f5d9340f24dd05","src/endian.rs":"948548db28334bceeefe72b09560a700d62993372bb89f46e39990038690f516","src/guest_memory.rs":"2be506fe58244f51279c473a7d0d8e1fc41ef638e9c16cc9e6f071bcbecb3b9b","src/lib.rs":"f65c3f36632da8df7b165578bfd8db010af11c1736fd9855614435e76dd3c390","src/mmap.rs":"9f449c7dac3830128bc1875ca0cc7e7407534eeb7cc1e1db0b0d118aba8de07b","src/mmap_unix.rs":"9a1d71bf1bb7952c25e3796f992953390b6db5a32ef40f2dda1275f866dd9ff0","src/mmap_windows.rs":"bd0091dd90eddede03739ab89648a38f513310437dafdf1f449603bb15a7a2ed","src/volatile_memory.rs":"06bdb496a75f3a190b7092daffce01158acdf73654badd72e697b674eedc4fac"},"package":null}

View File

@ -1,2 +0,0 @@
[target.aarch64-unknown-linux-musl]
rustflags = [ "-C", "target-feature=+crt-static", "-C", "link-arg=-lgcc"]

View File

@ -1,17 +0,0 @@
[package]
name = "vm-memory"
version = "0.1.0"
authors = ["Liu Jiang <gerry@linux.alibaba.com>"]
repository = "https://github.com/rust-vmm/vm-memory"
license = "Apache-2.0"
[features]
default = []
backend-mmap = []
[dependencies]
libc = ">=0.2.39"
[dev-dependencies]
matches = ">=0"
tempfile = ">=3.0.2"

View File

@ -1,79 +0,0 @@
## Objectives
For a typical hypervisor, there are seveval components, such as boot loader, virtual device drivers, virtio backend drivers and vhost drivers etc, that need to access VM's physical memory. The `vm-memory` crate aims to provide a set of stable traits to decouple VM memory consumers from VM memory providers. Based on these traits, VM memory consumers could access VM's physical memory without knowing the implementation details of the VM memory provider. Thus hypervisor components, such as boot loader, virtual device drivers, virtio backend drivers and vhost drivers etc, could be shared and reused by multiple hypervisors.
## API Principles
- Define consumer side interfaces to access VM's physical memory.
- Do not define provider side interfaces to supply VM physical memory.
The `vm-memory` crate focuses on defining consumer side interfaces to access VM's physical memory, and it dosen't define the way how the underline VM memory provider is implemented. For light-wieght hypervisors like crosvm and firecracker, they may make some assumptions about the structure of VM's physical memory and implement a light-weight backend to access VM's physical memory. For hypervisors like qemu, a high performance and full functionality backend may be implemented with less assumptions.
## Architecture
Th `vm-memory` is derived from two upstream projects:
- [crosvm project](https://chromium.googlesource.com/chromiumos/platform/crosvm/) commit 186eb8b0db644892e8ffba8344efe3492bb2b823
- [firecracker project](https://firecracker-microvm.github.io/) commit 80128ea61b305a27df1f751d70415b04b503eae7
To be hypervisor neutral, the high level abstraction has been heavily refactored. The new `vm-memory` crate could be divided into four logic parts as:
### Abstraction of Generic Address Space
Build generic abstractions to describe and access an address space as below:
- AddressValue: Stores the raw value of an address. Typically u32, u64 or usize is used to store the raw value. But pointers, such as \*u8, can't be used because it doesn't implement the Add and Sub traits.
- Address: Encapsulates an AddressValue object and defines methods to access it.
- Bytes: Common trait for volatile access to memory. The `Bytes` trait can be parameterized with newtypes that represent addresses, in order to enforce that addresses are used with the right "kind" of volatile memory.
- VolatileMemory: Basic implementation of volatile access to memory, implements `Bytes<usize>`.
To make the abstraction as generic as possible, all of above core traits only define methods to access the address space, and they never define methods to manage (create, delete, insert, remove etc) address spaces. By this way, the address space consumers (virtio device drivers, vhost-user drivers and boot loaders etc) may be decoupled from the address space provider (typically a hypervisor).
### Specialization for Virtual Machine Physical Address Space
The generic address space crates are specialized to access VM's physical memory with following traits:
- GuestAddress: represents a guest physical address (GPA). On ARM64, a 32-bit hypervisor may be used to support a 64-bit VM. For simplicity, u64 is used to store the the raw value no matter if it is a 32-bit or 64-bit virtual machine.
- GuestMemoryRegion: used to represent a continuous region of VM's physical memory.
- GuestMemory: used to represent a collection of GuestMemoryRegion objects. The main responsibilities of the GuestMemory trait are:
- hide the detail of accessing VM's physical address (for example complex hierarchical structures).
- map a request address to a GuestMemoryRegion object and relay the request to it.
- handle cases where an access request spanning two or more GuestMemoryRegion objects.
The VM memory consumers, such as virtio device drivers, vhost drivers and boot loaders etc, should only rely on traits and structs defined here to access VM's physical memory.
### A Sample and Default Backend Implementation Based on mmap()
Provide a default and sample implementation of the GuestMemory trait by mmapping VM's physical memory into current process. Three data structures are defined here:
- MmapRegion: mmap a continous range of VM's physical memory into current and provide methods to access the mmapped memory.
- GuestRegionMmap: a wrapper structure to map VM's physical address into (mmap\_region, offset) tuple.
- GuestMemoryMmap: manage a collection of GuestRegionMmap objects for a VM.
One of the main responsibilities of the GuestMemoryMmap object is to handle the use cases where an access request crosses the memory region boundary. This scenario may be triggered when memory hotplug is supported. So there's a tradeoff between functionality and code complexity:
- use following pattern for simplicity which fails when the request crosses region boundary. It's current default behavior in the crosvm and firecracker project.
```rust
let guest_memory_mmap: GuestMemoryMmap = ...
let addr: GuestAddress = ...
let buf = &mut [0u8; 5];
let result = guest_memory_mmap.find_region(addr).unwrap().write(buf, addr);
```
- use following pattern for functionality to support request crossing region boundary:
```rust
let guest_memory_mmap: GuestMemoryMmap = ...
let addr: GuestAddress = ...
let buf = &mut [0u8; 5];
let result = guest_memory_mmap.write(buf, addr);
```
### Utilities and Helpers
Following utility and helper traits/macros are imported from the [crosvm project](https://chromium.googlesource.com/chromiumos/platform/crosvm/) with minor changes:
- ByteValued (originally `DataInit`): Types for which it is safe to initialize from raw data. A type `T` is `ByteValued` if and only if it can be initialized by reading its contents from a byte array. This is generally true for all plain-old-data structs. It is notably not true for any type that includes a reference.
- {Le,Be}\_{16,32,64}: Explicit endian types useful for embedding in structs or reinterpreting data.
## Relationships between Traits, Structs and new Types
Traits:
- Address inherits AddressValue
- GuestMemoryRegion inherits Bytes<MemoryRegionAddress, E = Error> (must be implemented)
- GuestMemory implements Bytes<GuestAddress> (generic implementation)
New Types:
- GuestAddress: Address\<u64\>
- MemoryRegionAddress: Address\<u64\>
Structs:
- MmapRegion implements VolatileMemory
- GuestRegionMmap implements Bytes<MemoryRegionAddress> + GuestMemoryRegion
- GuestMemoryMmap implements GuestMemory
- VolatileSlice: Bytes<usize, E = volatile_memory::Error> + VolatileMemory

View File

@ -1,202 +0,0 @@
Apache License
Version 2.0, January 2004
http://www.apache.org/licenses/
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
1. Definitions.
"License" shall mean the terms and conditions for use, reproduction,
and distribution as defined by Sections 1 through 9 of this document.
"Licensor" shall mean the copyright owner or entity authorized by
the copyright owner that is granting the License.
"Legal Entity" shall mean the union of the acting entity and all
other entities that control, are controlled by, or are under common
control with that entity. For the purposes of this definition,
"control" means (i) the power, direct or indirect, to cause the
direction or management of such entity, whether by contract or
otherwise, or (ii) ownership of fifty percent (50%) or more of the
outstanding shares, or (iii) beneficial ownership of such entity.
"You" (or "Your") shall mean an individual or Legal Entity
exercising permissions granted by this License.
"Source" form shall mean the preferred form for making modifications,
including but not limited to software source code, documentation
source, and configuration files.
"Object" form shall mean any form resulting from mechanical
transformation or translation of a Source form, including but
not limited to compiled object code, generated documentation,
and conversions to other media types.
"Work" shall mean the work of authorship, whether in Source or
Object form, made available under the License, as indicated by a
copyright notice that is included in or attached to the work
(an example is provided in the Appendix below).
"Derivative Works" shall mean any work, whether in Source or Object
form, that is based on (or derived from) the Work and for which the
editorial revisions, annotations, elaborations, or other modifications
represent, as a whole, an original work of authorship. For the purposes
of this License, Derivative Works shall not include works that remain
separable from, or merely link (or bind by name) to the interfaces of,
the Work and Derivative Works thereof.
"Contribution" shall mean any work of authorship, including
the original version of the Work and any modifications or additions
to that Work or Derivative Works thereof, that is intentionally
submitted to Licensor for inclusion in the Work by the copyright owner
or by an individual or Legal Entity authorized to submit on behalf of
the copyright owner. For the purposes of this definition, "submitted"
means any form of electronic, verbal, or written communication sent
to the Licensor or its representatives, including but not limited to
communication on electronic mailing lists, source code control systems,
and issue tracking systems that are managed by, or on behalf of, the
Licensor for the purpose of discussing and improving the Work, but
excluding communication that is conspicuously marked or otherwise
designated in writing by the copyright owner as "Not a Contribution."
"Contributor" shall mean Licensor and any individual or Legal Entity
on behalf of whom a Contribution has been received by Licensor and
subsequently incorporated within the Work.
2. Grant of Copyright License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
copyright license to reproduce, prepare Derivative Works of,
publicly display, publicly perform, sublicense, and distribute the
Work and such Derivative Works in Source or Object form.
3. Grant of Patent License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
(except as stated in this section) patent license to make, have made,
use, offer to sell, sell, import, and otherwise transfer the Work,
where such license applies only to those patent claims licensable
by such Contributor that are necessarily infringed by their
Contribution(s) alone or by combination of their Contribution(s)
with the Work to which such Contribution(s) was submitted. If You
institute patent litigation against any entity (including a
cross-claim or counterclaim in a lawsuit) alleging that the Work
or a Contribution incorporated within the Work constitutes direct
or contributory patent infringement, then any patent licenses
granted to You under this License for that Work shall terminate
as of the date such litigation is filed.
4. Redistribution. You may reproduce and distribute copies of the
Work or Derivative Works thereof in any medium, with or without
modifications, and in Source or Object form, provided that You
meet the following conditions:
(a) You must give any other recipients of the Work or
Derivative Works a copy of this License; and
(b) You must cause any modified files to carry prominent notices
stating that You changed the files; and
(c) You must retain, in the Source form of any Derivative Works
that You distribute, all copyright, patent, trademark, and
attribution notices from the Source form of the Work,
excluding those notices that do not pertain to any part of
the Derivative Works; and
(d) If the Work includes a "NOTICE" text file as part of its
distribution, then any Derivative Works that You distribute must
include a readable copy of the attribution notices contained
within such NOTICE file, excluding those notices that do not
pertain to any part of the Derivative Works, in at least one
of the following places: within a NOTICE text file distributed
as part of the Derivative Works; within the Source form or
documentation, if provided along with the Derivative Works; or,
within a display generated by the Derivative Works, if and
wherever such third-party notices normally appear. The contents
of the NOTICE file are for informational purposes only and
do not modify the License. You may add Your own attribution
notices within Derivative Works that You distribute, alongside
or as an addendum to the NOTICE text from the Work, provided
that such additional attribution notices cannot be construed
as modifying the License.
You may add Your own copyright statement to Your modifications and
may provide additional or different license terms and conditions
for use, reproduction, or distribution of Your modifications, or
for any such Derivative Works as a whole, provided Your use,
reproduction, and distribution of the Work otherwise complies with
the conditions stated in this License.
5. Submission of Contributions. Unless You explicitly state otherwise,
any Contribution intentionally submitted for inclusion in the Work
by You to the Licensor shall be under the terms and conditions of
this License, without any additional terms or conditions.
Notwithstanding the above, nothing herein shall supersede or modify
the terms of any separate license agreement you may have executed
with Licensor regarding such Contributions.
6. Trademarks. This License does not grant permission to use the trade
names, trademarks, service marks, or product names of the Licensor,
except as required for reasonable and customary use in describing the
origin of the Work and reproducing the content of the NOTICE file.
7. Disclaimer of Warranty. Unless required by applicable law or
agreed to in writing, Licensor provides the Work (and each
Contributor provides its Contributions) on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
implied, including, without limitation, any warranties or conditions
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
PARTICULAR PURPOSE. You are solely responsible for determining the
appropriateness of using or redistributing the Work and assume any
risks associated with Your exercise of permissions under this License.
8. Limitation of Liability. In no event and under no legal theory,
whether in tort (including negligence), contract, or otherwise,
unless required by applicable law (such as deliberate and grossly
negligent acts) or agreed to in writing, shall any Contributor be
liable to You for damages, including any direct, indirect, special,
incidental, or consequential damages of any character arising as a
result of this License or out of the use or inability to use the
Work (including but not limited to damages for loss of goodwill,
work stoppage, computer failure or malfunction, or any and all
other commercial damages or losses), even if such Contributor
has been advised of the possibility of such damages.
9. Accepting Warranty or Additional Liability. While redistributing
the Work or Derivative Works thereof, You may choose to offer,
and charge a fee for, acceptance of support, warranty, indemnity,
or other liability obligations and/or rights consistent with this
License. However, in accepting such obligations, You may act only
on Your own behalf and on Your sole responsibility, not on behalf
of any other Contributor, and only if You agree to indemnify,
defend, and hold each Contributor harmless for any liability
incurred by, or claims asserted against, such Contributor by reason
of your accepting any such warranty or additional liability.
END OF TERMS AND CONDITIONS
APPENDIX: How to apply the Apache License to your work.
To apply the Apache License to your work, attach the following
boilerplate notice, with the fields enclosed by brackets "[]"
replaced with your own identifying information. (Don't include
the brackets!) The text should be enclosed in the appropriate
comment syntax for the file format. We also recommend that a
file or class name and description of purpose be included on the
same "printed page" as the copyright notice for easier
identification within third-party archives.
Copyright [yyyy] [name of copyright owner]
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.

View File

@ -1,47 +0,0 @@
# vm-memory
A library to access virtual machine's physical memory.
For a typical hypervisor, there are seveval components, such as boot loader, virtual device drivers, virtio backend drivers and vhost drivers etc, need to access VM's physical memory. The `vm-memory` crate provides a set of traits to decouple VM memory consumers from VM memory providers. Based on these traits, VM memory consumers could access VM's physical memory without knowing the implementation details of the VM memory provider. Thus hypervisor components based on these traits could be shared and reused by multiple hypervisors.
## Platform Support
- Arch: x86, AMD64, ARM64
- OS: Linux/Unix/Windows
## Usage
First, add the following to your `Cargo.toml`:
```toml
vm-memory = "0.1"
```
Next, add this to your crate root:
```rust
extern crate vm_memory;
```
## Example
- Create VM physical memory objects in hypervisor specific ways. Use the default GuestMemoryMmap as an example:
```
fn provide_mem_to_virt_dev() {
let gm = GuestMemoryMmap::new(&[(GuestAddress(0), 0x1000), (GuestAddress(0x1000), 0x1000)]).unwrap();
virt_device_io(&gm);
}
```
- Consumers access VM's physical memory
```
fn virt_device_io<T: GuestMemory>(mem: &T) {
let sample_buf = &[1, 2, 3, 4, 5];
assert_eq!(mem.write(sample_buf, GuestAddress(0xffc)).unwrap(), 5);
let buf = &mut [0u8; 5];
assert_eq!(mem.read(buf, GuestAddress(0xffc)).unwrap(), 5);
assert_eq!(buf, sample_buf);
}
```
## Documentations & References
- [Design of The `vm-memory` Crate](DESIGN.md)
- [TODO List](TODO.md)
- [The rust-vmm Project](https://github.com/rust-vmm/)
## License
This project is licensed under
- Apache License, Version 2.0, (LICENSE or http://www.apache.org/licenses/LICENSE-2.0)

View File

@ -1,27 +0,0 @@
// Copyright 2017 The Chromium OS Authors. All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following disclaimer
// in the documentation and/or other materials provided with the
// distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived from
// this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.

View File

@ -1,3 +0,0 @@
### TODO List
- Abstraction layer to seperate VM memory management from VM memory accessor.
- Help needed to refine documentation and usage examples.

View File

@ -1,170 +0,0 @@
// Copyright (C) 2019 Alibaba Cloud Computing. All rights reserved.
// SPDX-License-Identifier: Apache-2.0
//
// Portions Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//
// Portions Copyright 2017 The Chromium OS Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the THIRD-PARTY file.
//! Traits to represent an address within an address space.
//!
//! Two traits are defined to present an address within an address space:
//! - [AddressValue](trait.AddressValue.html): stores the raw value of an address. Typically u32,
//! u64 or usize is used to store the raw value. But pointers, such as *u8, can't be used because
//! it doesn't implement the Add and Sub traits.
//! - [Address](trait.Address.html): encapsulates an AddressValue object and defines methods to
//! access and manipulate it.
use std::cmp::{Eq, Ord, PartialEq, PartialOrd};
use std::ops::{Add, BitAnd, BitOr, Sub};
/// Simple helper trait used to store a raw address value.
pub trait AddressValue {
/// Type of the address raw value.
type V: Copy
+ PartialEq
+ Eq
+ PartialOrd
+ Ord
+ Add<Output = Self::V>
+ Sub<Output = Self::V>
+ BitAnd<Output = Self::V>
+ BitOr<Output = Self::V>;
}
/// Trait to represent an address within an address space.
///
/// To simplify the design and implementation, assume the same raw data type (AddressValue::V)
/// could be used to store address, size and offset for the address space. Thus the Address trait
/// could be used to manage address, size and offset. On the other hand, type aliases may be
/// defined to improve code readability.
///
/// One design rule is applied to the Address trait that operators (+, -, &, | etc) are not
/// supported and it forces clients to explicitly invoke corresponding methods. But there are
/// always exceptions:
/// Address (BitAnd|BitOr) AddressValue are supported.
pub trait Address:
AddressValue
+ Sized
+ Default
+ Copy
+ Eq
+ PartialEq
+ Ord
+ PartialOrd
+ BitAnd<<Self as AddressValue>::V, Output = Self>
+ BitOr<<Self as AddressValue>::V, Output = Self>
{
/// Create an address from a raw address value.
fn new(addr: Self::V) -> Self;
/// Get the raw value of the address.
fn raw_value(&self) -> Self::V;
/// Returns the bitwise and of the address with the given mask.
fn mask(&self, mask: Self::V) -> Self::V {
self.raw_value() & mask
}
/// Returns the offset from this address to the given base address and None if there is
/// underflow.
fn checked_offset_from(&self, base: Self) -> Option<Self::V>;
/// Returns the offset from this address to the given base address.
/// Only use this when `base` is guaranteed not to overflow.
fn unchecked_offset_from(&self, base: Self) -> Self::V {
self.raw_value() - base.raw_value()
}
/// Returns the result of the add or None if there is overflow.
fn checked_add(&self, other: Self::V) -> Option<Self>;
/// Returns the result of the add and a flag identifying whether there was overflow
fn overflowing_add(&self, other: Self::V) -> (Self, bool);
/// Returns the result of the base address + the size.
/// Only use this when `offset` is guaranteed not to overflow.
fn unchecked_add(&self, offset: Self::V) -> Self;
/// Returns the result of the subtraction or None if there is underflow.
fn checked_sub(&self, other: Self::V) -> Option<Self>;
/// Returns the result of the subtraction and a flag identifying whether there was overflow
fn overflowing_sub(&self, other: Self::V) -> (Self, bool);
/// Returns the result of the subtraction.
/// Only use this when `other` is guaranteed not to underflow.
fn unchecked_sub(&self, other: Self::V) -> Self;
}
macro_rules! impl_address_ops {
($T:ident, $V:ty) => {
impl AddressValue for $T {
type V = $V;
}
impl Address for $T {
fn new(value: $V) -> $T {
$T(value)
}
fn raw_value(&self) -> $V {
self.0
}
fn checked_offset_from(&self, base: $T) -> Option<$V> {
self.0.checked_sub(base.0)
}
fn checked_add(&self, other: $V) -> Option<$T> {
self.0.checked_add(other).map($T)
}
fn overflowing_add(&self, other: $V) -> ($T, bool) {
let (t, ovf) = self.0.overflowing_add(other);
($T(t), ovf)
}
fn unchecked_add(&self, offset: $V) -> $T {
$T(self.0 + offset)
}
fn checked_sub(&self, other: $V) -> Option<$T> {
self.0.checked_sub(other).map($T)
}
fn overflowing_sub(&self, other: $V) -> ($T, bool) {
let (t, ovf) = self.0.overflowing_sub(other);
($T(t), ovf)
}
fn unchecked_sub(&self, other: $V) -> $T {
$T(self.0 - other)
}
}
impl Default for $T {
fn default() -> $T {
Self::new(0 as $V)
}
}
impl BitAnd<$V> for $T {
type Output = $T;
fn bitand(self, other: $V) -> $T {
$T(self.0 & other)
}
}
impl BitOr<$V> for $T {
type Output = $T;
fn bitor(self, other: $V) -> $T {
$T(self.0 | other)
}
}
};
}

View File

@ -1,285 +0,0 @@
// Portions Copyright 2019 Red Hat, Inc.
//
// Portions Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//
// Portions Copyright 2017 The Chromium OS Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the THIRD-PARTY file.
//! Define the ByteValued trait to mark that it is safe to instantiate the struct with random data.
use std::io::{Read, Write};
use std::mem::size_of;
use std::result::Result;
use std::slice::{from_raw_parts, from_raw_parts_mut};
/// Types for which it is safe to initialize from raw data.
///
/// A type `T` is `ByteValued` if and only if it can be initialized by reading its contents from a
/// byte array. This is generally true for all plain-old-data structs. It is notably not true for
/// any type that includes a reference.
///
/// Implementing this trait guarantees that it is safe to instantiate the struct with random data.
pub unsafe trait ByteValued: Copy + Default + Send + Sync {
/// Converts a slice of raw data into a reference of `Self`.
///
/// The value of `data` is not copied. Instead a reference is made from the given slice. The
/// value of `Self` will depend on the representation of the type in memory, and may change in
/// an unstable fashion.
///
/// This will return `None` if the length of data does not match the size of `Self`, or if the
/// data is not aligned for the type of `Self`.
fn from_slice(data: &[u8]) -> Option<&Self> {
// Early out to avoid an unneeded `align_to` call.
if data.len() != size_of::<Self>() {
return None;
}
// Safe because the ByteValued trait asserts any data is valid for this type, and we ensured
// the size of the pointer's buffer is the correct size. The `align_to` method ensures that
// we don't have any unaligned references. This aliases a pointer, but because the pointer
// is from a const slice reference, there are no mutable aliases. Finally, the reference
// returned can not outlive data because they have equal implicit lifetime constraints.
match unsafe { data.align_to::<Self>() } {
([], [mid], []) => Some(mid),
_ => None,
}
}
/// Converts a mutable slice of raw data into a mutable reference of `Self`.
///
/// Because `Self` is made from a reference to the mutable slice`, mutations to the returned
/// reference are immediately reflected in `data`. The value of the returned `Self` will depend
/// on the representation of the type in memory, and may change in an unstable fashion.
///
/// This will return `None` if the length of data does not match the size of `Self`, or if the
/// data is not aligned for the type of `Self`.
fn from_mut_slice(data: &mut [u8]) -> Option<&mut Self> {
// Early out to avoid an unneeded `align_to_mut` call.
if data.len() != size_of::<Self>() {
return None;
}
// Safe because the ByteValued trait asserts any data is valid for this type, and we ensured
// the size of the pointer's buffer is the correct size. The `align_to` method ensures that
// we don't have any unaligned references. This aliases a pointer, but because the pointer
// is from a mut slice reference, we borrow the passed in mutable reference. Finally, the
// reference returned can not outlive data because they have equal implicit lifetime
// constraints.
match unsafe { data.align_to_mut::<Self>() } {
([], [mid], []) => Some(mid),
_ => None,
}
}
/// Converts a reference to `self` into a slice of bytes.
///
/// The value of `self` is not copied. Instead, the slice is made from a reference to `self`.
/// The value of bytes in the returned slice will depend on the representation of the type in
/// memory, and may change in an unstable fashion.
fn as_slice(&self) -> &[u8] {
// Safe because the entire size of self is accessible as bytes because the trait guarantees
// it. The lifetime of the returned slice is the same as the passed reference, so that no
// dangling pointers will result from this pointer alias.
unsafe { from_raw_parts(self as *const Self as *const u8, size_of::<Self>()) }
}
/// Converts a mutable reference to `self` into a mutable slice of bytes.
///
/// Because the slice is made from a reference to `self`, mutations to the returned slice are
/// immediately reflected in `self`. The value of bytes in the returned slice will depend on
/// the representation of the type in memory, and may change in an unstable fashion.
fn as_mut_slice(&mut self) -> &mut [u8] {
// Safe because the entire size of self is accessible as bytes because the trait guarantees
// it. The trait also guarantees that any combination of bytes is valid for this type, so
// modifying them in the form of a byte slice is valid. The lifetime of the returned slice
// is the same as the passed reference, so that no dangling pointers will result from this
// pointer alias. Although this does alias a mutable pointer, we do so by exclusively
// borrowing the given mutable reference.
unsafe { from_raw_parts_mut(self as *mut Self as *mut u8, size_of::<Self>()) }
}
}
/// A container to host a range of bytes and access its content.
///
/// Candidates which may implement this trait include:
/// - anonymous memory areas
/// - mmapped memory areas
/// - data files
/// - a proxy to access memory on remote
pub trait Bytes<A> {
/// Associated error codes
type E;
/// Writes a slice into the container at the specified address.
/// Returns the number of bytes written. The number of bytes written can
/// be less than the length of the slice if there isn't enough room in the
/// container.
fn write(&self, buf: &[u8], addr: A) -> Result<usize, Self::E>;
/// Reads to a slice from the container at the specified address.
/// Returns the number of bytes read. The number of bytes read can be less than the length
/// of the slice if there isn't enough room within the container.
fn read(&self, buf: &mut [u8], addr: A) -> Result<usize, Self::E>;
/// Writes the entire contents of a slice into the container at the specified address.
///
/// Returns an error if there isn't enough room within the container to complete the entire
/// write. Part of the data may have been written nevertheless.
fn write_slice(&self, buf: &[u8], addr: A) -> Result<(), Self::E>;
/// Reads from the container at the specified address to fill the entire buffer.
///
/// Returns an error if there isn't enough room within the container to fill the entire buffer.
/// Part of the buffer may have been filled nevertheless.
fn read_slice(&self, buf: &mut [u8], addr: A) -> Result<(), Self::E>;
/// Writes an object into the container at the specified address.
/// Returns Ok(()) if the object fits, or Err if it extends past the end.
fn write_obj<T: ByteValued>(&self, val: T, addr: A) -> Result<(), Self::E> {
self.write_slice(val.as_slice(), addr)
}
/// Reads an object from the container at the given address.
/// Reading from a volatile area isn't strictly safe as it could change mid-read.
/// However, as long as the type T is plain old data and can handle random initialization,
/// everything will be OK.
fn read_obj<T: ByteValued>(&self, addr: A) -> Result<T, Self::E> {
let mut result: T = Default::default();
self.read_slice(result.as_mut_slice(), addr).map(|_| result)
}
/// Writes data from a readable object like a File and writes it into the container.
///
/// # Arguments
/// * `addr` - Begin writing at this address.
/// * `src` - Copy from `src` into the container.
/// * `count` - Copy `count` bytes from `src` into the container.
fn read_from<F>(&self, addr: A, src: &mut F, count: usize) -> Result<usize, Self::E>
where
F: Read;
/// Writes data from a readable object like a File and writes it into the container.
///
/// # Arguments
/// * `addr` - Begin writing at this address.
/// * `src` - Copy from `src` into the container.
/// * `count` - Copy `count` bytes from `src` into the container.
fn read_exact_from<F>(&self, addr: A, src: &mut F, count: usize) -> Result<(), Self::E>
where
F: Read;
/// Reads data from the container to a writable object.
///
/// # Arguments
/// * `addr` - Begin reading from this addr.
/// * `dst` - Copy from the container to `dst`.
/// * `count` - Copy `count` bytes from the container to `dst`.
fn write_to<F>(&self, addr: A, dst: &mut F, count: usize) -> Result<usize, Self::E>
where
F: Write;
/// Reads data from the container to a writable object.
///
/// # Arguments
/// * `addr` - Begin reading from this addr.
/// * `dst` - Copy from the container to `dst`.
/// * `count` - Copy `count` bytes from the container to `dst`.
fn write_all_to<F>(&self, addr: A, dst: &mut F, count: usize) -> Result<(), Self::E>
where
F: Write;
}
// All intrinsic types and arrays of intrinsic types are ByteValued. They are just numbers.
macro_rules! array_data_init {
($T:ty, $($N:expr)+) => {
$(
unsafe impl ByteValued for [$T; $N] {}
)+
}
}
macro_rules! data_init_type {
($T:ty) => {
unsafe impl ByteValued for $T {}
array_data_init! {
$T,
0 1 2 3 4 5 6 7 8 9
10 11 12 13 14 15 16 17 18 19
20 21 22 23 24 25 26 27 28 29
30 31 32
}
};
}
data_init_type!(u8);
data_init_type!(u16);
data_init_type!(u32);
data_init_type!(u64);
data_init_type!(usize);
data_init_type!(i8);
data_init_type!(i16);
data_init_type!(i32);
data_init_type!(i64);
data_init_type!(isize);
#[cfg(test)]
mod tests {
use std::fmt::Debug;
use std::mem::{align_of, size_of};
use ByteValued;
fn from_slice_alignment<T>()
where
T: ByteValued + PartialEq + Debug + Default,
{
let mut v = [0u8; 32];
let pre_len = {
let (pre, _, _) = unsafe { v.align_to::<T>() };
pre.len()
};
{
let aligned_v = &mut v[pre_len..pre_len + size_of::<T>()];
{
let from_aligned = T::from_slice(aligned_v);
let val: T = Default::default();
assert_eq!(from_aligned, Some(&val));
}
{
let from_aligned_mut = T::from_mut_slice(aligned_v);
let mut val: T = Default::default();
assert_eq!(from_aligned_mut, Some(&mut val));
}
}
for i in 1..size_of::<T>() {
let begin = pre_len + i;
let end = begin + size_of::<T>();
let unaligned_v = &mut v[begin..end];
{
let from_unaligned = T::from_slice(unaligned_v);
if align_of::<T>() != 1 {
assert_eq!(from_unaligned, None);
}
}
{
let from_unaligned_mut = T::from_mut_slice(unaligned_v);
if align_of::<T>() != 1 {
assert_eq!(from_unaligned_mut, None);
}
}
}
}
#[test]
fn test_slice_alignment() {
from_slice_alignment::<u8>();
from_slice_alignment::<u16>();
from_slice_alignment::<u32>();
from_slice_alignment::<u64>();
from_slice_alignment::<usize>();
from_slice_alignment::<i8>();
from_slice_alignment::<i16>();
from_slice_alignment::<i32>();
from_slice_alignment::<i64>();
from_slice_alignment::<isize>();
}
}

View File

@ -1,148 +0,0 @@
// Copyright 2017 The Chromium OS Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
//! Explicit endian types useful for embedding in structs or reinterpreting data.
//!
//! Each endian type is guaarnteed to have the same size and alignment as a regular unsigned
//! primitive of the equal size.
//!
//! # Examples
//!
//! ```
//! # use vm_memory::*;
//! let b: Be32 = From::from(3);
//! let l: Le32 = From::from(3);
//!
//! assert_eq!(b.to_native(), 3);
//! assert_eq!(l.to_native(), 3);
//! assert!(b == 3);
//! assert!(l == 3);
//!
//! let b_trans: u32 = unsafe { std::mem::transmute(b) };
//! let l_trans: u32 = unsafe { std::mem::transmute(l) };
//!
//! #[cfg(target_endian = "little")]
//! assert_eq!(l_trans, 3);
//! #[cfg(target_endian = "big")]
//! assert_eq!(b_trans, 3);
//!
//! assert_ne!(b_trans, l_trans);
//! ```
use std::mem::{align_of, size_of};
use bytes::ByteValued;
macro_rules! const_assert {
($condition:expr) => {
let _ = [(); 0 - !$condition as usize];
};
}
macro_rules! endian_type {
($old_type:ident, $new_type:ident, $to_new:ident, $from_new:ident) => {
/// An unsigned integer type of with an explicit endianness.
///
/// See module level documentation for examples.
#[derive(Copy, Clone, Eq, PartialEq, Debug, Default)]
pub struct $new_type($old_type);
impl $new_type {
fn _assert() {
const_assert!(align_of::<$new_type>() == align_of::<$old_type>());
const_assert!(size_of::<$new_type>() == size_of::<$old_type>());
}
/// Converts `self` to the native endianness.
pub fn to_native(self) -> $old_type {
$old_type::$from_new(self.0)
}
}
unsafe impl ByteValued for $new_type {}
impl PartialEq<$old_type> for $new_type {
fn eq(&self, other: &$old_type) -> bool {
self.0 == $old_type::$to_new(*other)
}
}
impl PartialEq<$new_type> for $old_type {
fn eq(&self, other: &$new_type) -> bool {
$old_type::$to_new(other.0) == *self
}
}
impl Into<$old_type> for $new_type {
fn into(self) -> $old_type {
$old_type::$from_new(self.0)
}
}
impl From<$old_type> for $new_type {
fn from(v: $old_type) -> $new_type {
$new_type($old_type::$to_new(v))
}
}
};
}
endian_type!(u16, Le16, to_le, from_le);
endian_type!(u32, Le32, to_le, from_le);
endian_type!(u64, Le64, to_le, from_le);
endian_type!(usize, LeSize, to_le, from_le);
endian_type!(u16, Be16, to_be, from_be);
endian_type!(u32, Be32, to_be, from_be);
endian_type!(u64, Be64, to_be, from_be);
endian_type!(usize, BeSize, to_be, from_be);
#[cfg(test)]
mod tests {
use super::*;
use std::convert::From;
use std::mem::transmute;
#[cfg(target_endian = "little")]
const NATIVE_LITTLE: bool = true;
#[cfg(target_endian = "big")]
const NATIVE_LITTLE: bool = false;
const NATIVE_BIG: bool = !NATIVE_LITTLE;
macro_rules! endian_test {
($old_type:ty, $new_type:ty, $test_name:ident, $native:expr) => {
mod $test_name {
use super::*;
#[allow(overflowing_literals)]
#[test]
fn equality() {
let v = 0x0123_4567_89AB_CDEF as $old_type;
let endian_v: $new_type = From::from(v);
let endian_into: $old_type = endian_v.into();
let endian_transmute: $old_type = unsafe { transmute(endian_v) };
if $native {
assert_eq!(endian_v, endian_transmute);
} else {
assert_eq!(endian_v, endian_transmute.swap_bytes());
}
assert_eq!(v, endian_into);
assert!(v == endian_v);
assert!(endian_v == v);
}
}
};
}
endian_test!(u16, Le16, test_le16, NATIVE_LITTLE);
endian_test!(u32, Le32, test_le32, NATIVE_LITTLE);
endian_test!(u64, Le64, test_le64, NATIVE_LITTLE);
endian_test!(usize, LeSize, test_le_size, NATIVE_LITTLE);
endian_test!(u16, Be16, test_be16, NATIVE_BIG);
endian_test!(u32, Be32, test_be32, NATIVE_BIG);
endian_test!(u64, Be64, test_be64, NATIVE_BIG);
endian_test!(usize, BeSize, test_be_size, NATIVE_BIG);
}

View File

@ -1,515 +0,0 @@
// Copyright (C) 2019 Alibaba Cloud Computing. All rights reserved.
// SPDX-License-Identifier: Apache-2.0
//
// Portions Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//
// Portions Copyright 2017 The Chromium OS Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the THIRD-PARTY file.
//! Traits to track and access guest's physical memory.
//!
//! To make the abstraction as generic as possible, all the core traits declared here only define
//! methods to access guest's memory, and never define methods to manage (create, delete, insert,
//! remove etc) guest's memory. By this way, the guest memory consumers (virtio device drivers,
//! vhost drivers and boot loaders etc) may be decoupled from the guest memory provider (typically
//! a hypervisor).
//!
//! Traits and Structs
//! - [GuestAddress](struct.GuestAddress.html): represents a guest physical address (GPA).
//! - [MemoryRegionAddress](struct.MemoryRegionAddress.html): represents an offset inside a region.
//! - [GuestMemoryRegion](trait.GuestMemoryRegion.html): represent a continuous region of guest's
//! physical memory.
//! - [GuestMemory](trait.GuestMemroy.html): represent a collection of GuestMemoryRegion objects.
//! The main responsibilities of the GuestMemory trait are:
//! - hide the detail of accessing guest's physical address.
//! - map a request address to a GuestMemoryRegion object and relay the request to it.
//! - handle cases where an access request spanning two or more GuestMemoryRegion objects.
use std::convert::From;
use std::fmt::{self, Display};
use std::io::{self, Read, Write};
use std::ops::{BitAnd, BitOr};
use address::{Address, AddressValue};
use bytes::Bytes;
use volatile_memory;
static MAX_ACCESS_CHUNK: usize = 4096;
/// Errors associated with handling guest memory accesses.
#[allow(missing_docs)]
#[derive(Debug)]
pub enum Error {
/// Failure in finding a guest address in any memory regions mapped by this guest.
InvalidGuestAddress(GuestAddress),
/// Couldn't read/write from the given source.
IOError(io::Error),
/// Incomplete read or write
PartialBuffer { expected: usize, completed: usize },
/// Requested backend address is out of range.
InvalidBackendAddress,
}
impl From<volatile_memory::Error> for Error {
fn from(e: volatile_memory::Error) -> Self {
match e {
volatile_memory::Error::OutOfBounds { .. } => Error::InvalidBackendAddress,
volatile_memory::Error::Overflow { .. } => Error::InvalidBackendAddress,
volatile_memory::Error::IOError(e) => Error::IOError(e),
volatile_memory::Error::PartialBuffer {
expected,
completed,
} => Error::PartialBuffer {
expected,
completed,
},
}
}
}
/// Result of guest memory operations
pub type Result<T> = std::result::Result<T, Error>;
impl std::error::Error for Error {}
impl Display for Error {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "Guest memory error: ")?;
match self {
Error::InvalidGuestAddress(addr) => {
write!(f, "invalid guest address {}", addr.raw_value())
}
Error::IOError(error) => write!(f, "{}", error),
Error::PartialBuffer {
expected,
completed,
} => write!(
f,
"only used {} bytes in {} long buffer",
completed, expected,
),
Error::InvalidBackendAddress => write!(f, "invalid backend address"),
}
}
}
/// Represents a guest physical address (GPA).
///
/// Notes:
/// - On ARM64, a 32-bit hypervisor may be used to support a 64-bit guest. For simplicity,
/// u64 is used to store the the raw value no matter if the guest a 32-bit or 64-bit virtual
/// machine.
#[derive(Clone, Copy, Debug, Eq, PartialEq, Ord, PartialOrd)]
pub struct GuestAddress(pub u64);
impl_address_ops!(GuestAddress, u64);
/// Represents an offset inside a region.
#[derive(Clone, Copy, Debug, Eq, PartialEq, Ord, PartialOrd)]
pub struct MemoryRegionAddress(pub u64);
impl_address_ops!(MemoryRegionAddress, u64);
/// Type of the raw value stored in a GuestAddress object.
pub type GuestUsize = <GuestAddress as AddressValue>::V;
/// Represents a continuous region of guest physical memory.
#[allow(clippy::len_without_is_empty)]
pub trait GuestMemoryRegion: Bytes<MemoryRegionAddress, E = Error> {
/// Get the size of the region.
fn len(&self) -> GuestUsize;
/// Get minimum (inclusive) address managed by the region.
fn start_addr(&self) -> GuestAddress;
/// Get maximum (inclusive) address managed by the region.
fn end_addr(&self) -> GuestAddress {
// unchecked_add is safe as the region bounds were checked when it was created.
self.start_addr().unchecked_add(self.len() - 1)
}
/// Returns the given address if it is within the memory range accessible
/// through this region.
fn check_address(&self, addr: MemoryRegionAddress) -> Option<MemoryRegionAddress> {
if self.address_in_range(addr) {
Some(addr)
} else {
None
}
}
/// Returns true if the given address is within the memory range accessible
/// through this region.
fn address_in_range(&self, addr: MemoryRegionAddress) -> bool {
addr.raw_value() < self.len()
}
/// Returns the address plus the offset if it is in range.
fn checked_offset(
&self,
base: MemoryRegionAddress,
offset: usize,
) -> Option<MemoryRegionAddress> {
base.checked_add(offset as u64)
.and_then(|addr| self.check_address(addr))
}
/// Convert an absolute address into an address space (GuestMemory)
/// to a relative address within this region, or return an error if
/// it is out of bounds.
fn to_region_addr(&self, addr: GuestAddress) -> Option<MemoryRegionAddress> {
addr.checked_offset_from(self.start_addr())
.and_then(|offset| self.check_address(MemoryRegionAddress(offset)))
}
/// Return a slice corresponding to the data in the region; unsafe because of
/// possible aliasing. Return None if the region does not support slice-based
/// access.
unsafe fn as_slice(&self) -> Option<&[u8]> {
None
}
/// Return a mutable slice corresponding to the data in the region; unsafe because of
/// possible aliasing. Return None if the region does not support slice-based
/// access.
unsafe fn as_mut_slice(&self) -> Option<&mut [u8]> {
None
}
}
/// Represents a container for a collection of GuestMemoryRegion objects.
///
/// The main responsibilities of the GuestMemory trait are:
/// - hide the detail of accessing guest's physical address.
/// - map a request address to a GuestMemoryRegion object and relay the request to it.
/// - handle cases where an access request spanning two or more GuestMemoryRegion objects.
///
/// Note: all regions in a GuestMemory object must not intersect with each other.
pub trait GuestMemory {
/// Type of objects hosted by the address space.
type R: GuestMemoryRegion;
/// Returns the number of regions in the collection.
fn num_regions(&self) -> usize;
/// Return the region containing the specified address or None.
fn find_region(&self, addr: GuestAddress) -> Option<&Self::R>;
/// Perform the specified action on each region.
/// It only walks children of current region and do not step into sub regions.
fn with_regions<F, E>(&self, cb: F) -> std::result::Result<(), E>
where
F: Fn(usize, &Self::R) -> std::result::Result<(), E>;
/// Perform the specified action on each region mutably.
/// It only walks children of current region and do not step into sub regions.
fn with_regions_mut<F, E>(&self, cb: F) -> std::result::Result<(), E>
where
F: FnMut(usize, &Self::R) -> std::result::Result<(), E>;
/// Applies two functions, specified as callbacks, on the inner memory regions.
///
/// # Arguments
/// * `init` - Starting value of the accumulator for the `foldf` function.
/// * `mapf` - "Map" function, applied to all the inner memory regions. It returns an array of
/// the same size as the memory regions array, containing the function's results
/// for each region.
/// * `foldf` - "Fold" function, applied to the array returned by `mapf`. It acts as an
/// operator, applying itself to the `init` value and to each subsequent elemnent
/// in the array returned by `mapf`.
///
/// # Examples
///
/// * Compute the total size of all memory mappings in KB by iterating over the memory regions
/// and dividing their sizes to 1024, then summing up the values in an accumulator.
///
/// ```
/// # #[cfg(feature = "backend-mmap")]
/// # fn test_map_fold() -> Result<(), ()> {
/// # use vm_memory::{GuestAddress, GuestMemory, GuestMemoryRegion, mmap::GuestMemoryMmap};
/// let start_addr1 = GuestAddress(0x0);
/// let start_addr2 = GuestAddress(0x400);
/// let mem = GuestMemoryMmap::new(&vec![(start_addr1, 1024), (start_addr2, 2048)]).unwrap();
/// let total_size = mem.map_and_fold(
/// 0,
/// |(_, region)| region.len() / 1024,
/// |acc, size| acc + size
/// );
/// println!("Total memory size = {} KB", total_size);
/// Ok(())
/// # }
/// ```
fn map_and_fold<F, G, T>(&self, init: T, mapf: F, foldf: G) -> T
where
F: Fn((usize, &Self::R)) -> T,
G: Fn(T, T) -> T;
/// Get maximum (inclusive) address managed by the region.
fn end_addr(&self) -> GuestAddress {
self.map_and_fold(
GuestAddress(0),
|(_, region)| region.end_addr(),
std::cmp::max,
)
}
/// Convert an absolute address into an address space (GuestMemory)
/// to a relative address within this region, or return None if
/// it is out of bounds.
fn to_region_addr(&self, addr: GuestAddress) -> Option<(&Self::R, MemoryRegionAddress)> {
self.find_region(addr)
.map(|r| (r, r.to_region_addr(addr).unwrap()))
}
/// Returns true if the given address is within the memory range available to the guest.
fn address_in_range(&self, addr: GuestAddress) -> bool {
self.find_region(addr).is_some()
}
/// Returns the given address if it is within the memory range available to the guest.
fn check_address(&self, addr: GuestAddress) -> Option<GuestAddress> {
self.find_region(addr).map(|_| addr)
}
/// Returns the address plus the offset if it is in range.
fn checked_offset(&self, base: GuestAddress, offset: usize) -> Option<GuestAddress> {
base.checked_add(offset as u64)
.and_then(|addr| self.check_address(addr))
}
/// Invoke callback `f` to handle data in the address range [addr, addr + count).
///
/// The address range [addr, addr + count) may span more than one GuestMemoryRegion objects, or
/// even has holes within it. So try_access() invokes the callback 'f' for each GuestMemoryRegion
/// object involved and returns:
/// - error code returned by the callback 'f'
/// - size of data already handled when encountering the first hole
/// - size of data already handled when the whole range has been handled
fn try_access<F>(&self, count: usize, addr: GuestAddress, mut f: F) -> Result<usize>
where
F: FnMut(usize, usize, MemoryRegionAddress, &Self::R) -> Result<usize>,
{
let mut cur = addr;
let mut total = 0;
while let Some(region) = self.find_region(cur) {
let start = region.to_region_addr(cur).unwrap();
let cap = region.len() as usize;
let len = std::cmp::min(cap, count - total);
match f(total, len, start, region) {
// no more data
Ok(0) => break,
// made some progress
Ok(len) => {
total += len;
if total == count {
break;
}
cur = match cur.overflowing_add(len as GuestUsize) {
(GuestAddress(0), _) => GuestAddress(0),
(result, false) => result,
(_, true) => panic!("guest address overflow"),
}
}
// error happened
e => return e,
}
}
if total == 0 {
Err(Error::InvalidGuestAddress(addr))
} else {
Ok(total)
}
}
}
impl<T: GuestMemory> Bytes<GuestAddress> for T {
type E = Error;
fn write(&self, buf: &[u8], addr: GuestAddress) -> Result<usize> {
self.try_access(
buf.len(),
addr,
|offset, _count, caddr, region| -> Result<usize> {
region.write(&buf[offset as usize..], caddr)
},
)
}
fn read(&self, buf: &mut [u8], addr: GuestAddress) -> Result<usize> {
self.try_access(
buf.len(),
addr,
|offset, _count, caddr, region| -> Result<usize> {
region.read(&mut buf[offset as usize..], caddr)
},
)
}
fn write_slice(&self, buf: &[u8], addr: GuestAddress) -> Result<()> {
let res = self.write(buf, addr)?;
if res != buf.len() {
return Err(Error::PartialBuffer {
expected: buf.len(),
completed: res,
});
}
Ok(())
}
fn read_slice(&self, buf: &mut [u8], addr: GuestAddress) -> Result<()> {
let res = self.read(buf, addr)?;
if res != buf.len() {
return Err(Error::PartialBuffer {
expected: buf.len(),
completed: res,
});
}
Ok(())
}
fn read_from<F>(&self, addr: GuestAddress, src: &mut F, count: usize) -> Result<usize>
where
F: Read,
{
self.try_access(count, addr, |offset, len, caddr, region| -> Result<usize> {
// Check if something bad happened before doing unsafe things.
assert!(offset < count);
if let Some(dst) = unsafe { region.as_mut_slice() } {
// This is safe cause `start` and `len` are within the `region`.
let start = caddr.raw_value() as usize;
let end = start + len;
src.read_exact(&mut dst[start..end])
.map_err(Error::IOError)?;
Ok(len)
} else {
let len = std::cmp::min(len, MAX_ACCESS_CHUNK);
let mut buf = vec![0u8; len].into_boxed_slice();
let bytes_read = src.read(&mut buf[..]).map_err(Error::IOError)?;
let bytes_written = region.write(&buf[0..bytes_read], caddr)?;
assert_eq!(bytes_written, bytes_read);
Ok(len)
}
})
}
fn read_exact_from<F>(&self, addr: GuestAddress, src: &mut F, count: usize) -> Result<()>
where
F: Read,
{
let res = self.read_from(addr, src, count)?;
if res != count {
return Err(Error::PartialBuffer {
expected: count,
completed: res,
});
}
Ok(())
}
fn write_to<F>(&self, addr: GuestAddress, dst: &mut F, count: usize) -> Result<usize>
where
F: Write,
{
self.try_access(count, addr, |offset, len, caddr, region| -> Result<usize> {
// Check if something bad happened before doing unsafe things.
assert!(offset < count);
if let Some(src) = unsafe { region.as_slice() } {
// This is safe cause `start` and `len` are within the `region`.
let start = caddr.raw_value() as usize;
let end = start + len;
// It is safe to read from volatile memory. Accessing the guest
// memory as a slice is OK because nothing assumes another thread
// won't change what is loaded.
let bytes_written = dst.write(&src[start..end]).map_err(Error::IOError)?;
Ok(bytes_written)
} else {
let len = std::cmp::min(len, MAX_ACCESS_CHUNK);
let mut buf = vec![0u8; len].into_boxed_slice();
let bytes_read = region.read(&mut buf, caddr)?;
assert_eq!(bytes_read, len);
let bytes_written = dst.write(&buf).map_err(Error::IOError)?;
Ok(bytes_written)
}
})
}
fn write_all_to<F>(&self, addr: GuestAddress, dst: &mut F, count: usize) -> Result<()>
where
F: Write,
{
let res = self.write_to(addr, dst, count)?;
if res != count {
return Err(Error::PartialBuffer {
expected: count,
completed: res,
});
}
Ok(())
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn offset_from() {
let base = GuestAddress(0x100);
let addr = GuestAddress(0x150);
assert_eq!(addr.unchecked_offset_from(base), 0x50u64);
assert_eq!(addr.checked_offset_from(base), Some(0x50u64));
assert_eq!(base.checked_offset_from(addr), None);
}
#[test]
fn equals() {
let a = GuestAddress(0x300);
let b = GuestAddress(0x300);
let c = GuestAddress(0x301);
assert_eq!(a, GuestAddress(a.raw_value()));
assert_eq!(a, b);
assert_eq!(b, a);
assert_ne!(a, c);
assert_ne!(c, a);
}
#[test]
#[allow(clippy::eq_op)]
fn cmp() {
let a = GuestAddress(0x300);
let b = GuestAddress(0x301);
assert!(a < b);
assert!(b > a);
assert!(!(a < a));
}
#[test]
fn mask() {
let a = GuestAddress(0x5050);
assert_eq!(GuestAddress(0x5000), a & 0xff00u64);
assert_eq!(0x5000, a.mask(0xff00u64));
assert_eq!(GuestAddress(0x5055), a | 0x0005u64);
}
#[test]
fn add_sub() {
let a = GuestAddress(0x50);
let b = GuestAddress(0x60);
assert_eq!(Some(GuestAddress(0xb0)), a.checked_add(0x60));
assert_eq!(0x10, b.unchecked_offset_from(a));
}
#[test]
fn checked_add_overflow() {
let a = GuestAddress(0xffff_ffff_ffff_ff55);
assert_eq!(Some(GuestAddress(0xffff_ffff_ffff_ff57)), a.checked_add(2));
assert!(a.checked_add(0xf0).is_none());
}
#[test]
fn checked_sub_underflow() {
let a = GuestAddress(0xff);
assert_eq!(Some(GuestAddress(0x0f)), a.checked_sub(0xf0));
assert!(a.checked_sub(0xffff).is_none());
}
}

View File

@ -1,57 +0,0 @@
// Portions Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//
// Portions Copyright 2017 The Chromium OS Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the THIRD-PARTY file.
//! Traits for allocating, handling and interacting with the VM's physical memory.
//!
//! For a typical hypervisor, there are seveval components, such as boot loader, virtual device
//! drivers, virtio backend drivers and vhost drivers etc, that need to access VM's physical memory.
//! This crate aims to provide a set of stable traits to decouple VM memory consumers from VM
//! memory providers. Based on these traits, VM memory consumers could access VM's physical memory
//! without knowing the implementation details of the VM memory provider. Thus hypervisor
//! components, such as boot loader, virtual device drivers, virtio backend drivers and vhost
//! drivers etc, could be shared and reused by multiple hypervisors.
#![deny(missing_docs)]
extern crate libc;
#[cfg(test)]
#[macro_use]
extern crate matches;
#[macro_use]
pub mod address;
pub use address::{Address, AddressValue};
pub mod bytes;
pub use bytes::{ByteValued, Bytes};
pub mod endian;
pub use endian::{Be16, Be32, Be64, BeSize, Le16, Le32, Le64, LeSize};
pub mod guest_memory;
pub use guest_memory::{
Error as GuestMemoryError, GuestAddress, GuestMemory, GuestMemoryRegion, GuestUsize,
MemoryRegionAddress, Result as GuestMemoryResult,
};
#[cfg(all(feature = "backend-mmap", unix))]
mod mmap_unix;
#[cfg(all(feature = "backend-mmap", windows))]
mod mmap_windows;
#[cfg(feature = "backend-mmap")]
pub mod mmap;
#[cfg(feature = "backend-mmap")]
pub use mmap::{GuestMemoryMmap, GuestRegionMmap, MmapError, MmapRegion};
pub mod volatile_memory;
pub use volatile_memory::{
Error as VolatileMemoryError, Result as VolatileMemoryResult, VolatileMemory, VolatileRef,
VolatileSlice,
};

View File

@ -1,681 +0,0 @@
// Copyright (C) 2019 Alibaba Cloud Computing. All rights reserved.
// SPDX-License-Identifier: Apache-2.0
//
// Portions Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//
// Portions Copyright 2017 The Chromium OS Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the THIRD-PARTY file.
//! A default implementation of the GuestMemory trait by mmap()-ing guest's memory into the current
//! process.
//!
//! The main structs to access guest's memory are:
//! - [MmapRegion](struct.MmapRegion.html): mmap a continuous region of guest's memory into the
//! current process
//! - [GuestRegionMmap](struct.GuestRegionMmap.html): tracks a mapping of memory in the current
//! process and the corresponding base address. It relays guest memory access requests to the
//! underline [MmapRegion](struct.MmapRegion.html) object.
//! - [GuestMemoryMmap](struct.GuestMemoryMmap.html): provides methods to access a collection of
//! GuestRegionMmap objects.
use std::io::{self, Read, Write};
use std::ops::Deref;
use std::sync::Arc;
use address::Address;
use guest_memory::*;
use volatile_memory::VolatileMemory;
use Bytes;
#[cfg(unix)]
pub use mmap_unix::MmapRegion;
#[cfg(windows)]
pub use mmap_windows::MmapRegion;
// For MmapRegion
pub(crate) trait AsSlice {
unsafe fn as_slice(&self) -> &[u8];
#[allow(clippy::mut_from_ref)]
unsafe fn as_mut_slice(&self) -> &mut [u8];
}
/// Errors that can happen when creating a memory map
#[derive(Debug)]
pub enum MmapError {
/// Syscall returned the given error.
SystemCallFailed(io::Error),
/// No memory region found.
NoMemoryRegion,
/// Some of the memory regions intersect with each other.
MemoryRegionOverlap,
}
/// Tracks a mapping of memory in the current process and the corresponding base address
/// in the guest's memory space.
#[derive(Debug)]
pub struct GuestRegionMmap {
mapping: MmapRegion,
guest_base: GuestAddress,
}
impl GuestRegionMmap {
/// Create a new memory-mapped memory region for guest's physical memory.
/// Note: caller needs to ensure that (mapping.len() + guest_base) doesn't wrapping around.
pub fn new(mapping: MmapRegion, guest_base: GuestAddress) -> Self {
GuestRegionMmap {
mapping,
guest_base,
}
}
/// Convert an absolute address into an address space (GuestMemory)
/// to a host pointer, or return None if it is out of bounds.
pub fn get_host_address(&self, addr: MemoryRegionAddress) -> Option<*mut u8> {
// Not sure why wrapping_offset is not unsafe. Anyway this
// is safe because we've just range-checked addr using check_address.
self.check_address(addr)
.map(|addr| self.as_ptr().wrapping_offset(addr.raw_value() as isize))
}
}
impl Deref for GuestRegionMmap {
type Target = MmapRegion;
fn deref(&self) -> &MmapRegion {
&self.mapping
}
}
impl Bytes<MemoryRegionAddress> for GuestRegionMmap {
type E = Error;
/// # Examples
/// * Write a slice at guest address 0x1200.
///
/// ```
/// # use vm_memory::{Bytes, GuestAddress, GuestMemoryMmap};
/// # let start_addr = GuestAddress(0x1000);
/// # let mut gm = GuestMemoryMmap::new(&vec![(start_addr, 0x400)]).unwrap();
/// let res = gm.write(&[1,2,3,4,5], GuestAddress(0x1200)).unwrap();
/// assert_eq!(5, res);
/// ```
fn write(&self, buf: &[u8], addr: MemoryRegionAddress) -> Result<usize> {
let maddr = addr.raw_value() as usize;
self.as_volatile_slice()
.write(buf, maddr)
.map_err(Into::into)
}
/// # Examples
/// * Read a slice of length 16 at guestaddress 0x1200.
///
/// ```
/// # use vm_memory::{Bytes, GuestAddress, GuestMemoryMmap};
/// # let start_addr = GuestAddress(0x1000);
/// # let mut gm = GuestMemoryMmap::new(&vec![(start_addr, 0x400)]).unwrap();
/// let buf = &mut [0u8; 16];
/// let res = gm.read(buf, GuestAddress(0x1200)).unwrap();
/// assert_eq!(16, res);
/// ```
fn read(&self, buf: &mut [u8], addr: MemoryRegionAddress) -> Result<usize> {
let maddr = addr.raw_value() as usize;
self.as_volatile_slice()
.read(buf, maddr)
.map_err(Into::into)
}
fn write_slice(&self, buf: &[u8], addr: MemoryRegionAddress) -> Result<()> {
let maddr = addr.raw_value() as usize;
self.as_volatile_slice()
.write_slice(buf, maddr)
.map_err(Into::into)
}
fn read_slice(&self, buf: &mut [u8], addr: MemoryRegionAddress) -> Result<()> {
let maddr = addr.raw_value() as usize;
self.as_volatile_slice()
.read_slice(buf, maddr)
.map_err(Into::into)
}
/// # Examples
///
/// * Read bytes from /dev/urandom
///
/// ```
/// # use vm_memory::{Address, Bytes, GuestAddress, GuestMemoryMmap};
/// # use std::fs::File;
/// # use std::path::Path;
/// # let start_addr = GuestAddress(0x1000);
/// # let gm = GuestMemoryMmap::new(&vec![(start_addr, 0x400)]).unwrap();
/// let mut file = if cfg!(unix) {
/// File::open(Path::new("/dev/urandom")).unwrap()
/// } else {
/// File::open(Path::new("c:\\Windows\\system32\\ntoskrnl.exe")).unwrap()
/// };
/// let addr = GuestAddress(0x1010);
/// gm.read_from(addr, &mut file, 128).unwrap();
/// let read_addr = addr.checked_add(8).unwrap();
/// let _: u32 = gm.read_obj(read_addr).unwrap();
/// ```
fn read_from<F>(&self, addr: MemoryRegionAddress, src: &mut F, count: usize) -> Result<usize>
where
F: Read,
{
let maddr = addr.raw_value() as usize;
self.as_volatile_slice()
.read_from::<F>(maddr, src, count)
.map_err(Into::into)
}
/// # Examples
///
/// * Read bytes from /dev/urandom
///
/// ```
/// # extern crate tempfile;
/// # use self::tempfile::tempfile;
/// # use vm_memory::{Address, Bytes, GuestAddress, GuestMemoryMmap};
/// # use std::fs::File;
/// # use std::path::Path;
/// # let start_addr = GuestAddress(0x1000);
/// # let gm = GuestMemoryMmap::new(&vec![(start_addr, 0x400)]).unwrap();
/// let mut file = if cfg!(unix) {
/// File::open(Path::new("/dev/urandom")).unwrap()
/// } else {
/// File::open(Path::new("c:\\Windows\\system32\\ntoskrnl.exe")).unwrap()
/// };
/// let addr = GuestAddress(0x1010);
/// gm.read_exact_from(addr, &mut file, 128).unwrap();
/// let read_addr = addr.checked_add(8).unwrap();
/// let _: u32 = gm.read_obj(read_addr).unwrap();
/// ```
fn read_exact_from<F>(&self, addr: MemoryRegionAddress, src: &mut F, count: usize) -> Result<()>
where
F: Read,
{
let maddr = addr.raw_value() as usize;
self.as_volatile_slice()
.read_exact_from::<F>(maddr, src, count)
.map_err(Into::into)
}
/// Writes data from the region to a writable object.
///
/// # Examples
///
/// * Write 128 bytes to a temp file
///
/// ```
/// # extern crate tempfile;
/// # use self::tempfile::tempfile;
/// # use vm_memory::{Address, Bytes, GuestAddress, GuestMemoryMmap};
/// # use std::fs::OpenOptions;
/// # let start_addr = GuestAddress(0x1000);
/// # let gm = GuestMemoryMmap::new(&vec![(start_addr, 0x400)]).unwrap();
/// let mut file = tempfile().unwrap();
/// let mut mem = [0u8; 1024];
/// gm.write_to(start_addr, &mut file, 128).unwrap();
/// ```
fn write_to<F>(&self, addr: MemoryRegionAddress, dst: &mut F, count: usize) -> Result<usize>
where
F: Write,
{
let maddr = addr.raw_value() as usize;
self.as_volatile_slice()
.write_to::<F>(maddr, dst, count)
.map_err(Into::into)
}
/// Writes data from the region to a writable object.
///
/// # Examples
///
/// * Write 128 bytes to a temp file
///
/// ```
/// # extern crate tempfile;
/// # use self::tempfile::tempfile;
/// # use vm_memory::{Address, Bytes, GuestAddress, GuestMemoryMmap};
/// # use std::fs::OpenOptions;
/// # let start_addr = GuestAddress(0x1000);
/// # let gm = GuestMemoryMmap::new(&vec![(start_addr, 0x400)]).unwrap();
/// let mut file = tempfile().unwrap();
/// let mut mem = [0u8; 1024];
/// gm.write_all_to(start_addr, &mut file, 128).unwrap();
/// ```
fn write_all_to<F>(&self, addr: MemoryRegionAddress, dst: &mut F, count: usize) -> Result<()>
where
F: Write,
{
let maddr = addr.raw_value() as usize;
self.as_volatile_slice()
.write_all_to::<F>(maddr, dst, count)
.map_err(Into::into)
}
}
impl GuestMemoryRegion for GuestRegionMmap {
fn len(&self) -> GuestUsize {
self.mapping.len() as GuestUsize
}
fn start_addr(&self) -> GuestAddress {
self.guest_base
}
unsafe fn as_slice(&self) -> Option<&[u8]> {
Some(self.mapping.as_slice())
}
unsafe fn as_mut_slice(&self) -> Option<&mut [u8]> {
Some(self.mapping.as_mut_slice())
}
}
/// Tracks memory regions allocated/mapped for the guest in the current process.
#[derive(Clone, Debug)]
pub struct GuestMemoryMmap {
regions: Arc<Vec<GuestRegionMmap>>,
}
impl GuestMemoryMmap {
/// Creates a container and allocates anonymous memory for guest memory regions.
/// Valid memory regions are specified as a Vec of (Address, Size) tuples sorted by Address.
pub fn new(ranges: &[(GuestAddress, usize)]) -> std::result::Result<Self, MmapError> {
if ranges.is_empty() {
return Err(MmapError::NoMemoryRegion);
}
let mut regions = Vec::<GuestRegionMmap>::new();
for range in ranges.iter() {
if let Some(last) = regions.last() {
if last
.guest_base
.checked_add(last.mapping.len() as GuestUsize)
.map_or(true, |a| a > range.0)
{
return Err(MmapError::MemoryRegionOverlap);
}
}
let mapping = MmapRegion::new(range.1).map_err(MmapError::SystemCallFailed)?;
regions.push(GuestRegionMmap {
mapping,
guest_base: range.0,
});
}
Ok(Self {
regions: Arc::new(regions),
})
}
/// Creates a container and adds an existing set of mappings to it.
pub fn from_regions(ranges: Vec<GuestRegionMmap>) -> std::result::Result<Self, MmapError> {
if ranges.is_empty() {
return Err(MmapError::NoMemoryRegion);
}
for rangei in 1..ranges.len() {
let range = &ranges[rangei];
let last = &ranges[rangei - 1];
if last
.guest_base
.checked_add(last.mapping.len() as GuestUsize)
.map_or(true, |a| a > range.start_addr())
{
return Err(MmapError::MemoryRegionOverlap);
}
}
Ok(Self {
regions: Arc::new(ranges),
})
}
/// Convert an absolute address into an address space (GuestMemory)
/// to a host pointer, or return None if it is out of bounds.
pub fn get_host_address(&self, addr: GuestAddress) -> Option<*mut u8> {
self.to_region_addr(addr)
.and_then(|(r, addr)| r.get_host_address(addr))
}
}
impl GuestMemory for GuestMemoryMmap {
type R = GuestRegionMmap;
fn num_regions(&self) -> usize {
self.regions.len()
}
fn find_region(&self, addr: GuestAddress) -> Option<&GuestRegionMmap> {
for region in self.regions.iter() {
if addr >= region.start_addr() && addr <= region.end_addr() {
return Some(region);
}
}
None
}
fn with_regions<F, E>(&self, cb: F) -> std::result::Result<(), E>
where
F: Fn(usize, &Self::R) -> std::result::Result<(), E>,
{
for (index, region) in self.regions.iter().enumerate() {
cb(index, region)?;
}
Ok(())
}
fn with_regions_mut<F, E>(&self, mut cb: F) -> std::result::Result<(), E>
where
F: FnMut(usize, &Self::R) -> std::result::Result<(), E>,
{
for (index, region) in self.regions.iter().enumerate() {
cb(index, region)?;
}
Ok(())
}
fn map_and_fold<F, G, T>(&self, init: T, mapf: F, foldf: G) -> T
where
F: Fn((usize, &Self::R)) -> T,
G: Fn(T, T) -> T,
{
self.regions.iter().enumerate().map(mapf).fold(init, foldf)
}
}
#[cfg(test)]
mod tests {
extern crate tempfile;
use self::tempfile::tempfile;
use super::*;
use std::fs::File;
use std::mem;
use std::path::Path;
use Bytes;
#[test]
fn basic_map() {
let m = MmapRegion::new(1024).unwrap();
assert_eq!(1024, m.len());
}
#[test]
fn map_invalid_size() {
let e = MmapRegion::new(0).unwrap_err();
assert_eq!(e.raw_os_error(), Some(libc::EINVAL));
}
#[test]
fn slice_addr() {
let m = MmapRegion::new(5).unwrap();
let s = m.get_slice(2, 3).unwrap();
assert_eq!(s.as_ptr(), unsafe { m.as_ptr().offset(2) });
}
#[test]
fn mapped_file_read() {
let mut f = tempfile().unwrap();
let sample_buf = &[1, 2, 3, 4, 5];
assert!(f.write_all(sample_buf).is_ok());
let mem_map = MmapRegion::from_fd(&f, sample_buf.len(), 0).unwrap();
let buf = &mut [0u8; 16];
assert_eq!(
mem_map.as_volatile_slice().read(buf, 0).unwrap(),
sample_buf.len()
);
assert_eq!(buf[0..sample_buf.len()], sample_buf[..]);
}
#[test]
fn test_regions() {
// No regions provided should return error.
assert_eq!(
format!("{:?}", GuestMemoryMmap::new(&[]).err().unwrap()),
format!("{:?}", MmapError::NoMemoryRegion)
);
let start_addr1 = GuestAddress(0x0);
let start_addr2 = GuestAddress(0x800);
let guest_mem =
GuestMemoryMmap::new(&[(start_addr1, 0x400), (start_addr2, 0x400)]).unwrap();
assert_eq!(guest_mem.num_regions(), 2);
assert_eq!(guest_mem.end_addr(), GuestAddress(0xbff));
assert!(guest_mem.find_region(GuestAddress(0x200)).is_some());
assert!(guest_mem.find_region(GuestAddress(0x600)).is_none());
assert!(guest_mem.find_region(GuestAddress(0xa00)).is_some());
assert!(guest_mem.find_region(GuestAddress(0xc00)).is_none());
}
#[test]
fn test_address_in_range() {
let start_addr1 = GuestAddress(0x0);
let start_addr2 = GuestAddress(0x800);
let guest_mem =
GuestMemoryMmap::new(&[(start_addr1, 0x400), (start_addr2, 0x400)]).unwrap();
assert!(guest_mem.address_in_range(GuestAddress(0x200)));
assert!(!guest_mem.address_in_range(GuestAddress(0x600)));
assert!(guest_mem.address_in_range(GuestAddress(0xa00)));
assert!(!guest_mem.address_in_range(GuestAddress(0xc00)));
}
#[test]
fn test_check_address() {
let start_addr1 = GuestAddress(0x0);
let start_addr2 = GuestAddress(0x800);
let guest_mem =
GuestMemoryMmap::new(&[(start_addr1, 0x400), (start_addr2, 0x400)]).unwrap();
assert_eq!(
guest_mem.check_address(GuestAddress(0x200)),
Some(GuestAddress(0x200))
);
assert_eq!(guest_mem.check_address(GuestAddress(0x600)), None);
assert_eq!(
guest_mem.check_address(GuestAddress(0xa00)),
Some(GuestAddress(0xa00))
);
assert_eq!(guest_mem.check_address(GuestAddress(0xc00)), None);
}
#[test]
fn test_to_region_addr() {
let start_addr1 = GuestAddress(0x0);
let start_addr2 = GuestAddress(0x800);
let guest_mem =
GuestMemoryMmap::new(&[(start_addr1, 0x400), (start_addr2, 0x400)]).unwrap();
assert!(guest_mem.to_region_addr(GuestAddress(0x600)).is_none());
let (r0, addr0) = guest_mem.to_region_addr(GuestAddress(0x800)).unwrap();
let (r1, addr1) = guest_mem.to_region_addr(GuestAddress(0xa00)).unwrap();
assert!(r0.as_ptr() == r1.as_ptr());
assert_eq!(addr0, MemoryRegionAddress(0));
assert_eq!(addr1, MemoryRegionAddress(0x200));
}
#[test]
fn test_get_host_address() {
let start_addr1 = GuestAddress(0x0);
let start_addr2 = GuestAddress(0x800);
let guest_mem =
GuestMemoryMmap::new(&[(start_addr1, 0x400), (start_addr2, 0x400)]).unwrap();
assert!(guest_mem.get_host_address(GuestAddress(0x600)).is_none());
let ptr0 = guest_mem.get_host_address(GuestAddress(0x800)).unwrap();
let ptr1 = guest_mem.get_host_address(GuestAddress(0xa00)).unwrap();
assert_eq!(
ptr0,
guest_mem.find_region(GuestAddress(0x800)).unwrap().as_ptr()
);
assert_eq!(unsafe { ptr0.offset(0x200) }, ptr1);
}
#[test]
fn test_deref() {
let start_addr = GuestAddress(0x0);
let guest_mem = GuestMemoryMmap::new(&[(start_addr, 0x400)]).unwrap();
let sample_buf = &[1, 2, 3, 4, 5];
assert_eq!(guest_mem.write(sample_buf, start_addr).unwrap(), 5);
let slice = guest_mem
.find_region(GuestAddress(0))
.unwrap()
.as_volatile_slice();
let buf = &mut [0, 0, 0, 0, 0];
assert_eq!(slice.read(buf, 0).unwrap(), 5);
assert_eq!(buf, sample_buf);
}
#[test]
fn mapped_file_regions() {
let mut f = tempfile().unwrap();
let empty_buf = &[0; 16384];
assert!(f.write_all(empty_buf).is_ok());
let mem_map = MmapRegion::from_fd(&f, empty_buf.len(), 0).unwrap();
let guest_reg = GuestRegionMmap::new(mem_map, GuestAddress(0x8000));
let mut region_vec = Vec::new();
region_vec.push(guest_reg);
let guest_mem = GuestMemoryMmap::from_regions(region_vec).unwrap();
assert_eq!(guest_mem.num_regions(), 1);
assert!(guest_mem.find_region(GuestAddress(0)).is_none());
assert!(guest_mem.find_region(GuestAddress(0x8000)).is_some());
}
#[test]
fn overlap_memory() {
let start_addr1 = GuestAddress(0x0);
let start_addr2 = GuestAddress(0x1000);
let res = GuestMemoryMmap::new(&[(start_addr1, 0x2000), (start_addr2, 0x2000)]);
assert_eq!(
format!("{:?}", res.err().unwrap()),
format!("{:?}", MmapError::MemoryRegionOverlap)
);
}
#[test]
fn test_read_u64() {
let start_addr1 = GuestAddress(0x0);
let start_addr2 = GuestAddress(0x1000);
let bad_addr = GuestAddress(0x2001);
let bad_addr2 = GuestAddress(0x1ffc);
let max_addr = GuestAddress(0x2000);
let gm = GuestMemoryMmap::new(&[(start_addr1, 0x1000), (start_addr2, 0x1000)]).unwrap();
let val1: u64 = 0xaa55_aa55_aa55_aa55;
let val2: u64 = 0x55aa_55aa_55aa_55aa;
assert_eq!(
format!("{:?}", gm.write_obj(val1, bad_addr).err().unwrap()),
format!("InvalidGuestAddress({:?})", bad_addr,)
);
assert_eq!(
format!("{:?}", gm.write_obj(val1, bad_addr2).err().unwrap()),
format!(
"PartialBuffer {{ expected: {:?}, completed: {:?} }}",
mem::size_of::<u64>(),
max_addr.checked_offset_from(bad_addr2).unwrap()
)
);
gm.write_obj(val1, GuestAddress(0x500)).unwrap();
gm.write_obj(val2, GuestAddress(0x1000 + 32)).unwrap();
let num1: u64 = gm.read_obj(GuestAddress(0x500)).unwrap();
let num2: u64 = gm.read_obj(GuestAddress(0x1000 + 32)).unwrap();
assert_eq!(val1, num1);
assert_eq!(val2, num2);
}
#[test]
fn write_and_read() {
let mut start_addr = GuestAddress(0x1000);
let gm = GuestMemoryMmap::new(&[(start_addr, 0x400)]).unwrap();
let sample_buf = &[1, 2, 3, 4, 5];
assert_eq!(gm.write(sample_buf, start_addr).unwrap(), 5);
let buf = &mut [0u8; 5];
assert_eq!(gm.read(buf, start_addr).unwrap(), 5);
assert_eq!(buf, sample_buf);
start_addr = GuestAddress(0x13ff);
assert_eq!(gm.write(sample_buf, start_addr).unwrap(), 1);
assert_eq!(gm.read(buf, start_addr).unwrap(), 1);
assert_eq!(buf[0], sample_buf[0]);
}
#[test]
fn read_to_and_write_from_mem() {
let gm = GuestMemoryMmap::new(&[(GuestAddress(0x1000), 0x400)]).unwrap();
let addr = GuestAddress(0x1010);
let mut file = if cfg!(unix) {
File::open(Path::new("/dev/zero")).unwrap()
} else {
File::open(Path::new("c:\\Windows\\system32\\ntoskrnl.exe")).unwrap()
};
gm.write_obj(!0u32, addr).unwrap();
gm.read_exact_from(addr, &mut file, mem::size_of::<u32>())
.unwrap();
let value: u32 = gm.read_obj(addr).unwrap();
if cfg!(unix) {
assert_eq!(value, 0);
} else {
assert_eq!(value, 0x0090_5a4d);
}
let mut sink = Vec::new();
gm.write_all_to(addr, &mut sink, mem::size_of::<u32>())
.unwrap();
if cfg!(unix) {
assert_eq!(sink, vec![0; mem::size_of::<u32>()]);
} else {
assert_eq!(sink, vec![0x4d, 0x5a, 0x90, 0x00]);
};
}
#[test]
fn create_vec_with_regions() {
let region_size = 0x400;
let regions = vec![
(GuestAddress(0x0), region_size),
(GuestAddress(0x1000), region_size),
];
let mut iterated_regions = Vec::new();
let gm = GuestMemoryMmap::new(&regions).unwrap();
let res: Result<()> = gm.with_regions(|_, region| {
assert_eq!(region.len(), region_size as GuestUsize);
Ok(())
});
assert!(res.is_ok());
let res: Result<()> = gm.with_regions_mut(|_, region| {
iterated_regions.push((region.start_addr(), region.len() as usize));
Ok(())
});
assert!(res.is_ok());
assert_eq!(regions, iterated_regions);
assert_eq!(gm.clone().regions[0].guest_base, regions[0].0);
assert_eq!(gm.clone().regions[1].guest_base, regions[1].0);
}
#[test]
fn test_access_cross_boundary() {
let start_addr1 = GuestAddress(0x0);
let start_addr2 = GuestAddress(0x1000);
let gm = GuestMemoryMmap::new(&[(start_addr1, 0x1000), (start_addr2, 0x1000)]).unwrap();
let sample_buf = &[1, 2, 3, 4, 5];
assert_eq!(gm.write(sample_buf, GuestAddress(0xffc)).unwrap(), 5);
let buf = &mut [0u8; 5];
assert_eq!(gm.read(buf, GuestAddress(0xffc)).unwrap(), 5);
assert_eq!(buf, sample_buf);
}
}

View File

@ -1,168 +0,0 @@
// Copyright (C) 2019 Alibaba Cloud Computing. All rights reserved.
// SPDX-License-Identifier: Apache-2.0
//
// Portions Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//
// Portions Copyright 2017 The Chromium OS Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the THIRD-PARTY file.
//! A default Unix implementation of the GuestMemory trait by mmap()-ing guest's memory into
//! the current process.
//!
//! The main structs to access guest's memory are:
//! - [MmapRegion](struct.MmapRegion.html): mmap a continuous region of guest's memory into the
//! current process
//! - [GuestRegionMmap](struct.GuestRegionMmap.html): tracks a mapping of memory in the current
//! process and the corresponding base address. It relays guest memory access requests to the
//! underline [MmapRegion](struct.MmapRegion.html) object.
//! - [GuestMemoryMmap](struct.GuestMemoryMmap.html): provides methods to access a collection of
//! GuestRegionMmap objects.
use libc;
use std::io;
use std::ptr::null_mut;
use mmap::AsSlice;
use volatile_memory::{self, compute_offset, VolatileMemory, VolatileSlice};
use std::os::unix::io::AsRawFd;
/// A backend driver to access guest's physical memory by mmapping guest's memory into the current
/// process.
/// For a combination of 32-bit hypervisor and 64-bit virtual machine, only partial of guest's
/// physical memory may be mapped into current process due to limited process virtual address
/// space size.
#[derive(Debug)]
pub struct MmapRegion {
addr: *mut u8,
size: usize,
}
// Send and Sync aren't automatically inherited for the raw address pointer.
// Accessing that pointer is only done through the stateless interface which
// allows the object to be shared by multiple threads without a decrease in
// safety.
unsafe impl Send for MmapRegion {}
unsafe impl Sync for MmapRegion {}
impl MmapRegion {
/// Creates an anonymous shared mapping of `size` bytes.
///
/// # Arguments
/// * `size` - Size of memory region in bytes.
pub fn new(size: usize) -> io::Result<Self> {
// This is safe because we are creating an anonymous mapping in a place not already used by
// any other area in this process.
let addr = unsafe {
libc::mmap(
null_mut(),
size,
libc::PROT_READ | libc::PROT_WRITE,
libc::MAP_ANONYMOUS | libc::MAP_SHARED | libc::MAP_NORESERVE,
-1,
0,
)
};
if addr == libc::MAP_FAILED {
return Err(io::Error::last_os_error());
}
Ok(Self {
addr: addr as *mut u8,
size,
})
}
/// Maps the `size` bytes starting at `offset` bytes of the given `fd`.
///
/// # Arguments
/// * `fd` - File descriptor to mmap from.
/// * `size` - Size of memory region in bytes.
/// * `offset` - Offset in bytes from the beginning of `fd` to start the mmap.
pub fn from_fd(fd: &AsRawFd, size: usize, offset: libc::off_t) -> io::Result<Self> {
// This is safe because we are creating a mapping in a place not already used by any other
// area in this process.
let addr = unsafe {
libc::mmap(
null_mut(),
size,
libc::PROT_READ | libc::PROT_WRITE,
libc::MAP_SHARED,
fd.as_raw_fd(),
offset,
)
};
if addr == libc::MAP_FAILED {
return Err(io::Error::last_os_error());
}
Ok(Self {
addr: addr as *mut u8,
size,
})
}
/// Returns a pointer to the beginning of the memory region. Should only be
/// used for passing this region to ioctls for setting guest memory.
pub fn as_ptr(&self) -> *mut u8 {
self.addr
}
}
impl AsSlice for MmapRegion {
// Returns the region as a slice
// used to do crap
unsafe fn as_slice(&self) -> &[u8] {
// This is safe because we mapped the area at addr ourselves, so this slice will not
// overflow. However, it is possible to alias.
std::slice::from_raw_parts(self.addr, self.size)
}
// safe because it's expected interior mutability
#[allow(clippy::mut_from_ref)]
unsafe fn as_mut_slice(&self) -> &mut [u8] {
// This is safe because we mapped the area at addr ourselves, so this slice will not
// overflow. However, it is possible to alias.
std::slice::from_raw_parts_mut(self.addr, self.size)
}
}
impl VolatileMemory for MmapRegion {
fn len(&self) -> usize {
self.size
}
fn get_slice(&self, offset: usize, count: usize) -> volatile_memory::Result<VolatileSlice> {
let end = compute_offset(offset, count)?;
if end > self.size {
return Err(volatile_memory::Error::OutOfBounds { addr: end });
}
// Safe because we checked that offset + count was within our range and we only ever hand
// out volatile accessors.
Ok(unsafe { VolatileSlice::new((self.addr as usize + offset) as *mut _, count) })
}
}
impl Drop for MmapRegion {
fn drop(&mut self) {
// This is safe because we mmap the area at addr ourselves, and nobody
// else is holding a reference to it.
unsafe {
libc::munmap(self.addr as *mut libc::c_void, self.size);
}
}
}
#[cfg(test)]
mod tests {
use mmap_unix::MmapRegion;
use std::os::unix::io::FromRawFd;
#[test]
fn map_invalid_fd() {
let fd = unsafe { std::fs::File::from_raw_fd(-1) };
let e = MmapRegion::from_fd(&fd, 1024, 0).unwrap_err();
assert_eq!(e.raw_os_error(), Some(libc::EBADF));
}
}

View File

@ -1,225 +0,0 @@
// Copyright (C) 2019 CrowdStrike, Inc. All rights reserved.
// SPDX-License-Identifier: Apache-2.0
//
//! A default Windows implementation of the GuestMemory trait using VirtualAlloc() and MapViewOfFile().
//!
//! The main structs to access guest's memory are:
//! - [MmapRegion](struct.MmapRegion.html): mmap a continuous region of guest's memory into the
//! current process
//! - [GuestRegionMmap](struct.GuestRegionMmap.html): tracks a mapping of memory in the current
//! process and the corresponding base address. It relays guest memory access requests to the
//! underline [MmapRegion](struct.MmapRegion.html) object.
//! - [GuestMemoryMmap](struct.GuestMemoryMmap.html): provides methods to access a collection of
//! GuestRegionMmap objects.
use libc;
use std::io;
use std::ptr::null_mut;
use mmap::AsSlice;
use volatile_memory::{self, compute_offset, VolatileMemory, VolatileSlice};
use libc::{c_void, size_t};
use std;
use std::os::windows::io::{AsRawHandle, RawHandle};
use std::ptr::null;
#[allow(non_snake_case)]
#[link(name = "kernel32")]
extern "stdcall" {
pub fn VirtualAlloc(
lpAddress: *mut c_void,
dwSize: size_t,
flAllocationType: u32,
flProtect: u32,
) -> *mut c_void;
pub fn VirtualFree(lpAddress: *mut c_void, dwSize: size_t, dwFreeType: u32) -> u32;
pub fn CreateFileMappingA(
hFile: RawHandle, // HANDLE
lpFileMappingAttributes: *const c_void, // LPSECURITY_ATTRIBUTES
flProtect: u32, // DWORD
dwMaximumSizeHigh: u32, // DWORD
dwMaximumSizeLow: u32, // DWORD
lpName: *const u8, // LPCSTR
) -> RawHandle; // HANDLE
pub fn MapViewOfFile(
hFileMappingObject: RawHandle,
dwDesiredAccess: u32,
dwFileOffsetHigh: u32,
dwFileOffsetLow: u32,
dwNumberOfBytesToMap: size_t,
) -> *mut c_void;
pub fn CloseHandle(hObject: RawHandle) -> u32; // BOOL
}
const MM_HIGHEST_VAD_ADDRESS: u64 = 0x000007FFFFFDFFFF;
const MEM_COMMIT: u32 = 0x00001000;
const MEM_RELEASE: u32 = 0x00008000;
const FILE_MAP_ALL_ACCESS: u32 = 0xf001f;
const PAGE_READWRITE: u32 = 0x04;
pub const MAP_FAILED: *mut c_void = 0 as *mut c_void;
pub const INVALID_HANDLE_VALUE: RawHandle = (-1isize) as RawHandle;
#[allow(dead_code)]
pub const ERROR_INVALID_PARAMETER: i32 = 87;
/// A backend driver to access guest's physical memory by mmapping guest's memory into the current
/// process.
/// For a combination of 32-bit hypervisor and 64-bit virtual machine, only partial of guest's
/// physical memory may be mapped into current process due to limited process virtual address
/// space size.
#[derive(Debug)]
pub struct MmapRegion {
addr: *mut u8,
size: usize,
}
// Send and Sync aren't automatically inherited for the raw address pointer.
// Accessing that pointer is only done through the stateless interface which
// allows the object to be shared by multiple threads without a decrease in
// safety.
unsafe impl Send for MmapRegion {}
unsafe impl Sync for MmapRegion {}
impl MmapRegion {
/// Creates an anonymous shared mapping of `size` bytes.
///
/// # Arguments
/// * `size` - Size of memory region in bytes.
pub fn new(size: usize) -> io::Result<Self> {
if (size == 0) || (size > MM_HIGHEST_VAD_ADDRESS as usize) {
return Err(io::Error::from_raw_os_error(libc::EINVAL));
}
// This is safe because we are creating an anonymous mapping in a place not already used by
// any other area in this process.
let addr = unsafe { VirtualAlloc(0 as *mut c_void, size, MEM_COMMIT, PAGE_READWRITE) };
if addr == MAP_FAILED {
return Err(io::Error::last_os_error());
}
Ok(Self {
addr: addr as *mut u8,
size,
})
}
/// Maps the `size` bytes starting at `offset` bytes of the given `fd`.
///
/// # Arguments
/// * `file` - Raw handle to a file to map into the address space.
/// * `size` - Size of memory region in bytes.
/// * `offset` - Offset in bytes from the beginning of `file` to start the mapping.
pub fn from_fd(file: &AsRawHandle, size: usize, offset: libc::off_t) -> io::Result<Self> {
let handle = file.as_raw_handle();
if handle == INVALID_HANDLE_VALUE {
return Err(io::Error::from_raw_os_error(libc::EBADF));
}
let mapping = unsafe {
CreateFileMappingA(
handle,
null(),
PAGE_READWRITE,
(size >> 32) as u32,
size as u32,
null(),
)
};
if mapping == 0 as RawHandle {
return Err(io::Error::last_os_error());
}
// This is safe because we are creating a mapping in a place not already used by any other
// area in this process.
let addr = unsafe {
MapViewOfFile(
mapping,
FILE_MAP_ALL_ACCESS,
(offset as u64 >> 32) as u32,
offset as u32,
size,
)
};
unsafe {
CloseHandle(mapping);
}
if addr == null_mut() {
return Err(io::Error::last_os_error());
}
Ok(Self {
addr: addr as *mut u8,
size,
})
}
/// Returns a pointer to the beginning of the memory region. Should only be
/// used for passing this region to ioctls for setting guest memory.
pub fn as_ptr(&self) -> *mut u8 {
self.addr
}
}
impl AsSlice for MmapRegion {
// Returns the region as a slice
// used to do crap
unsafe fn as_slice(&self) -> &[u8] {
// This is safe because we mapped the area at addr ourselves, so this slice will not
// overflow. However, it is possible to alias.
std::slice::from_raw_parts(self.addr, self.size)
}
// safe because it's expected interior mutability
#[allow(clippy::mut_from_ref)]
unsafe fn as_mut_slice(&self) -> &mut [u8] {
// This is safe because we mapped the area at addr ourselves, so this slice will not
// overflow. However, it is possible to alias.
std::slice::from_raw_parts_mut(self.addr, self.size)
}
}
impl VolatileMemory for MmapRegion {
fn len(&self) -> usize {
self.size
}
fn get_slice(&self, offset: usize, count: usize) -> volatile_memory::Result<VolatileSlice> {
let end = compute_offset(offset, count)?;
if end > self.size {
return Err(volatile_memory::Error::OutOfBounds { addr: end });
}
// Safe because we checked that offset + count was within our range and we only ever hand
// out volatile accessors.
Ok(unsafe { VolatileSlice::new((self.addr as usize + offset) as *mut _, count) })
}
}
impl Drop for MmapRegion {
fn drop(&mut self) {
// This is safe because we mmap the area at addr ourselves, and nobody
// else is holding a reference to it.
unsafe {
VirtualFree(self.addr as *mut libc::c_void, self.size, MEM_RELEASE);
}
}
}
#[cfg(test)]
mod tests {
use mmap_windows::{MmapRegion, INVALID_HANDLE_VALUE};
use std::os::windows::io::FromRawHandle;
#[test]
fn map_invalid_handle() {
let fd = unsafe { std::fs::File::from_raw_handle(INVALID_HANDLE_VALUE) };
let e = MmapRegion::from_fd(&fd, 1024, 0).unwrap_err();
assert_eq!(e.raw_os_error(), Some(libc::EBADF));
}
}

View File

@ -1,967 +0,0 @@
// Portions Copyright 2019 Red Hat, Inc.
//
// Copyright 2017 The Chromium OS Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the THIRT-PARTY file.
//! Types for volatile access to memory.
//!
//! Two of the core rules for safe rust is no data races and no aliased mutable references.
//! `VolatileRef` and `VolatileSlice`, along with types that produce those which implement
//! `VolatileMemory`, allow us to sidestep that rule by wrapping pointers that absolutely have to be
//! accessed volatile. Some systems really do need to operate on shared memory and can't have the
//! compiler reordering or eliding access because it has no visibility into what other systems are
//! doing with that hunk of memory.
//!
//! For the purposes of maintaining safety, volatile memory has some rules of its own:
//! 1. No references or slices to volatile memory (`&` or `&mut`).
//! 2. Access should always been done with a volatile read or write.
//! The First rule is because having references of any kind to memory considered volatile would
//! violate pointer aliasing. The second is because unvolatile accesses are inherently undefined if
//! done concurrently without synchronization. With volatile access we know that the compiler has
//! not reordered or elided the access.
use std::cmp::min;
use std::fmt;
use std::io::{self, Read, Write};
use std::marker::PhantomData;
use std::mem::size_of;
use std::ptr::copy;
use std::ptr::{read_volatile, write_volatile};
use std::result;
use std::slice::{from_raw_parts, from_raw_parts_mut};
use std::usize;
use bytes::{ByteValued, Bytes};
/// VolatileMemory related error codes
#[allow(missing_docs)]
#[derive(Debug)]
pub enum Error {
/// `addr` is out of bounds of the volatile memory slice.
OutOfBounds { addr: usize },
/// Taking a slice at `base` with `offset` would overflow `usize`.
Overflow { base: usize, offset: usize },
/// Writing to memory failed
IOError(io::Error),
/// Incomplete read or write
PartialBuffer { expected: usize, completed: usize },
}
impl fmt::Display for Error {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
match self {
Error::OutOfBounds { addr } => write!(f, "address 0x{:x} is out of bounds", addr),
Error::Overflow { base, offset } => write!(
f,
"address 0x{:x} offset by 0x{:x} would overflow",
base, offset
),
Error::IOError(error) => write!(f, "{}", error),
Error::PartialBuffer {
expected,
completed,
} => write!(
f,
"only used {} bytes in {} long buffer",
completed, expected
),
}
}
}
/// Result of volatile memory operations
pub type Result<T> = result::Result<T, Error>;
/// Convenience function for computing `base + offset` which returns
/// `Err(Error::Overflow)` instead of panicking in the case `base + offset` exceeds
/// `usize::MAX`.
///
/// # Examples
///
/// ```
/// # use vm_memory::volatile_memory::*;
/// # fn get_slice(offset: usize, count: usize) -> Result<()> {
/// let mem_end = compute_offset(offset, count)?;
/// if mem_end > 100 {
/// return Err(Error::OutOfBounds{addr: mem_end});
/// }
/// # Ok(())
/// # }
/// ```
pub fn compute_offset(base: usize, offset: usize) -> Result<usize> {
match base.checked_add(offset) {
None => Err(Error::Overflow { base, offset }),
Some(m) => Ok(m),
}
}
/// Trait for types that support raw volatile access to their data.
pub trait VolatileMemory {
/// Gets the size of this slice.
fn len(&self) -> usize;
/// Check whether the region is empty.
fn is_empty(&self) -> bool {
self.len() == 0
}
/// Gets a slice of memory at `offset` that is `count` bytes in length and supports volatile
/// access.
fn get_slice(&self, offset: usize, count: usize) -> Result<VolatileSlice>;
/// Gets a slice of memory for the entire region that supports volatile access.
fn as_volatile_slice(&self) -> VolatileSlice {
self.get_slice(0, self.len()).unwrap()
}
/// Gets a `VolatileRef` at `offset`.
fn get_ref<T: ByteValued>(&self, offset: usize) -> Result<VolatileRef<T>> {
let slice = self.get_slice(offset, size_of::<T>())?;
unsafe {
// This is safe because the pointer is range-checked by get_slice, and
// the lifetime is the same as self.
Ok(VolatileRef::<T>::new(slice.addr))
}
}
/// Check that addr + count is valid and return the sum.
fn compute_end_offset(&self, base: usize, offset: usize) -> Result<usize> {
let mem_end = compute_offset(base, offset)?;
if mem_end > self.len() {
return Err(Error::OutOfBounds { addr: mem_end });
}
Ok(mem_end)
}
}
impl<'a> VolatileMemory for &'a mut [u8] {
fn len(&self) -> usize {
<[u8]>::len(self)
}
fn get_slice(&self, offset: usize, count: usize) -> Result<VolatileSlice> {
let _ = self.compute_end_offset(offset, count)?;
unsafe {
// This is safe because the pointer is range-checked by compute_end_offset, and
// the lifetime is the same as the original slice.
Ok(VolatileSlice::new(
(self.as_ptr() as usize + offset) as *mut _,
count,
))
}
}
}
#[repr(C, packed)]
struct Packed<T>(T);
/// A slice of raw memory that supports volatile access.
#[derive(Copy, Clone, Debug)]
pub struct VolatileSlice<'a> {
addr: *mut u8,
size: usize,
phantom: PhantomData<&'a u8>,
}
impl<'a> VolatileSlice<'a> {
/// Creates a slice of raw memory that must support volatile access.
///
/// To use this safely, the caller must guarantee that the memory at `addr` is `size` bytes long
/// and is available for the duration of the lifetime of the new `VolatileSlice`. The caller
/// must also guarantee that all other users of the given chunk of memory are using volatile
/// accesses.
pub unsafe fn new(addr: *mut u8, size: usize) -> VolatileSlice<'a> {
VolatileSlice {
addr,
size,
phantom: PhantomData,
}
}
/// Gets the address of this slice's memory.
pub fn as_ptr(&self) -> *mut u8 {
self.addr
}
/// Gets the size of this slice.
pub fn len(&self) -> usize {
self.size
}
/// Check whether the slice is empty.
pub fn is_empty(&self) -> bool {
self.size == 0
}
/// Creates a copy of this slice with the address increased by `count` bytes, and the size
/// reduced by `count` bytes.
pub fn offset(self, count: usize) -> Result<VolatileSlice<'a>> {
let new_addr = (self.addr as usize)
.checked_add(count)
.ok_or(Error::Overflow {
base: self.addr as usize,
offset: count,
})?;
let new_size = self
.size
.checked_sub(count)
.ok_or(Error::OutOfBounds { addr: new_addr })?;
unsafe {
// Safe because the memory has the same lifetime and points to a subset of the
// memory of the original slice.
Ok(VolatileSlice::new(new_addr as *mut u8, new_size))
}
}
/// Copies `self.len()` or `buf.len()` times the size of `T` bytes, whichever is smaller, to
/// `buf`.
///
/// The copy happens from smallest to largest address in `T` sized chunks using volatile reads.
///
/// # Examples
///
/// ```
/// # use std::fs::File;
/// # use std::path::Path;
/// # use vm_memory::VolatileMemory;
/// # fn test_write_null() -> Result<(), ()> {
/// let mut mem = [0u8; 32];
/// let mem_ref = &mut mem[..];
/// let vslice = mem_ref.get_slice(0, 32).map_err(|_| ())?;
/// let mut buf = [5u8; 16];
/// vslice.copy_to(&mut buf[..]);
/// for v in &buf[..] {
/// assert_eq!(buf[0], 0);
/// }
/// # Ok(())
/// # }
/// ```
pub fn copy_to<T>(&self, buf: &mut [T]) -> usize
where
T: ByteValued,
{
let mut addr = self.addr;
let mut i = 0;
for v in buf.iter_mut().take(self.size / size_of::<T>()) {
unsafe {
*v = read_volatile(addr as *const Packed<T>).0;
addr = addr.add(size_of::<T>());
};
i += 1;
}
i
}
/// Copies `self.len()` or `slice.len()` bytes, whichever is smaller, to `slice`.
///
/// The copies happen in an undefined order.
/// # Examples
///
/// ```
/// # use vm_memory::VolatileMemory;
/// # fn test_write_null() -> Result<(), ()> {
/// let mut mem = [0u8; 32];
/// let mem_ref = &mut mem[..];
/// let vslice = mem_ref.get_slice(0, 32).map_err(|_| ())?;
/// vslice.copy_to_volatile_slice(vslice.get_slice(16, 16).map_err(|_| ())?);
/// # Ok(())
/// # }
/// ```
pub fn copy_to_volatile_slice(&self, slice: VolatileSlice) {
unsafe {
// Safe because the pointers are range-checked when the slices
// are created, and they never escape the VolatileSlices.
// FIXME: ... however, is it really okay to mix non-volatile
// operations such as copy with read_volatile and write_volatile?
copy(self.addr, slice.addr, min(self.size, slice.size));
}
}
/// Copies `self.len()` or `buf.len()` times the size of `T` bytes, whichever is smaller, to
/// this slice's memory.
///
/// The copy happens from smallest to largest address in `T` sized chunks using volatile writes.
///
/// # Examples
///
/// ```
/// # use std::fs::File;
/// # use std::path::Path;
/// # use vm_memory::VolatileMemory;
/// # fn test_write_null() -> Result<(), ()> {
/// let mut mem = [0u8; 32];
/// let mem_ref = &mut mem[..];
/// let vslice = mem_ref.get_slice(0, 32).map_err(|_| ())?;
/// let buf = [5u8; 64];
/// vslice.copy_from(&buf[..]);
/// for i in 0..4 {
/// assert_eq!(vslice.get_ref::<u32>(i * 4).map_err(|_| ())?.load(), 0x05050505);
/// }
/// # Ok(())
/// # }
/// ```
pub fn copy_from<T>(&self, buf: &[T])
where
T: ByteValued,
{
let mut addr = self.addr;
for &v in buf.iter().take(self.size / size_of::<T>()) {
unsafe {
// Safe because the pointers are range-checked when the slices
// are created, and they never escape the VolatileSlices.
write_volatile(addr as *mut Packed<T>, Packed::<T>(v));
addr = addr.add(size_of::<T>());
}
}
}
// These function are private and only used for the read/write functions. It is not valid in
// general to take slices of volatile memory.
unsafe fn as_slice(&self) -> &[u8] {
from_raw_parts(self.addr, self.size)
}
// safe because it's expected interior mutability
#[allow(clippy::mut_from_ref)]
unsafe fn as_mut_slice(&self) -> &mut [u8] {
from_raw_parts_mut(self.addr, self.size)
}
}
impl<'a> Bytes<usize> for VolatileSlice<'a> {
type E = Error;
/// Writes a slice to the region at the specified address.
/// Returns the number of bytes written. The number of bytes written can
/// be less than the length of the slice if there isn't enough room in the
/// region.
///
/// # Examples
/// * Write a slice at offset 256.
///
/// ```
/// # use vm_memory::{Bytes, VolatileMemory};
/// # let mut mem = [0u8; 1024];
/// # let mut mem_ref = &mut mem[..];
/// # let vslice = mem_ref.as_volatile_slice();
/// let res = vslice.write(&[1,2,3,4,5], 1020);
/// assert!(res.is_ok());
/// assert_eq!(res.unwrap(), 4);
/// ```
fn write(&self, buf: &[u8], addr: usize) -> Result<usize> {
if addr >= self.size {
return Err(Error::OutOfBounds { addr });
}
unsafe {
// Guest memory can't strictly be modeled as a slice because it is
// volatile. Writing to it with what compiles down to a memcpy
// won't hurt anything as long as we get the bounds checks right.
let mut slice: &mut [u8] = &mut self.as_mut_slice()[addr..];
Ok(slice.write(buf).map_err(Error::IOError)?)
}
}
/// Reads to a slice from the region at the specified address.
/// Returns the number of bytes read. The number of bytes read can be less than the length
/// of the slice if there isn't enough room in the region.
///
/// # Examples
/// * Read a slice of size 16 at offset 256.
///
/// ```
/// # use vm_memory::{Bytes, VolatileMemory};
/// # let mut mem = [0u8; 1024];
/// # let mut mem_ref = &mut mem[..];
/// # let vslice = mem_ref.as_volatile_slice();
/// let buf = &mut [0u8; 16];
/// let res = vslice.read(buf, 1010);
/// assert!(res.is_ok());
/// assert_eq!(res.unwrap(), 14);
/// ```
fn read(&self, mut buf: &mut [u8], addr: usize) -> Result<usize> {
if addr >= self.size {
return Err(Error::OutOfBounds { addr });
}
unsafe {
// Guest memory can't strictly be modeled as a slice because it is
// volatile. Writing to it with what compiles down to a memcpy
// won't hurt anything as long as we get the bounds checks right.
let slice: &[u8] = &self.as_slice()[addr..];
Ok(buf.write(slice).map_err(Error::IOError)?)
}
}
/// Writes a slice to the region at the specified address.
///
/// # Examples
/// * Write a slice at offset 256.
///
/// ```
/// # use vm_memory::{Bytes, VolatileMemory};
/// # let mut mem = [0u8; 1024];
/// # let mut mem_ref = &mut mem[..];
/// # let vslice = mem_ref.as_volatile_slice();
/// let res = vslice.write_slice(&[1,2,3,4,5], 256);
/// assert!(res.is_ok());
/// assert_eq!(res.unwrap(), ());
/// ```
fn write_slice(&self, buf: &[u8], addr: usize) -> Result<()> {
let len = self.write(buf, addr)?;
if len != buf.len() {
return Err(Error::PartialBuffer {
expected: buf.len(),
completed: len,
});
}
Ok(())
}
/// Reads to a slice from the region at the specified address.
///
/// # Examples
/// * Read a slice of size 16 at offset 256.
///
/// ```
/// # use vm_memory::{Bytes, VolatileMemory};
/// # let mut mem = [0u8; 1024];
/// # let mut mem_ref = &mut mem[..];
/// # let vslice = mem_ref.as_volatile_slice();
/// let buf = &mut [0u8; 16];
/// let res = vslice.read_slice(buf, 256);
/// assert!(res.is_ok());
/// assert_eq!(res.unwrap(), ());
/// ```
fn read_slice(&self, buf: &mut [u8], addr: usize) -> Result<()> {
let len = self.read(buf, addr)?;
if len != buf.len() {
return Err(Error::PartialBuffer {
expected: buf.len(),
completed: len,
});
}
Ok(())
}
/// Writes data from a readable object like a File and writes it to the region.
///
/// # Examples
///
/// * Read bytes from /dev/urandom
///
/// ```
/// # use vm_memory::{Bytes, VolatileMemory};
/// # use std::fs::File;
/// # use std::path::Path;
/// # fn test_read_random() -> Result<u32, ()> {
/// # let mut mem = [0u8; 1024];
/// # let mut mem_ref = &mut mem[..];
/// # let vslice = mem_ref.as_volatile_slice();
/// let mut file = File::open(Path::new("/dev/urandom")).map_err(|_| ())?;
/// vslice.read_from(32, &mut file, 128).map_err(|_| ())?;
/// let rand_val: u32 = vslice.read_obj(40).map_err(|_| ())?;
/// # Ok(rand_val)
/// # }
/// ```
fn read_from<F>(&self, addr: usize, src: &mut F, count: usize) -> Result<usize>
where
F: Read,
{
let end = self.compute_end_offset(addr, count)?;
unsafe {
// It is safe to overwrite the volatile memory. Accessing the guest
// memory as a mutable slice is OK because nothing assumes another
// thread won't change what is loaded.
let dst = &mut self.as_mut_slice()[addr..end];
src.read(dst).map_err(Error::IOError)
}
}
/// Writes data from a readable object like a File and writes it to the region.
///
/// # Examples
///
/// * Read bytes from /dev/urandom
///
/// ```
/// # use vm_memory::{Bytes, VolatileMemory};
/// # use std::fs::File;
/// # use std::path::Path;
/// # fn test_read_random() -> Result<u32, ()> {
/// # let mut mem = [0u8; 1024];
/// # let mut mem_ref = &mut mem[..];
/// # let vslice = mem_ref.as_volatile_slice();
/// let mut file = File::open(Path::new("/dev/urandom")).map_err(|_| ())?;
/// vslice.read_exact_from(32, &mut file, 128).map_err(|_| ())?;
/// let rand_val: u32 = vslice.read_obj(40).map_err(|_| ())?;
/// # Ok(rand_val)
/// # }
/// ```
fn read_exact_from<F>(&self, addr: usize, src: &mut F, count: usize) -> Result<()>
where
F: Read,
{
let end = self.compute_end_offset(addr, count)?;
unsafe {
// It is safe to overwrite the volatile memory. Accessing the guest
// memory as a mutable slice is OK because nothing assumes another
// thread won't change what is loaded.
let dst = &mut self.as_mut_slice()[addr..end];
src.read_exact(dst).map_err(Error::IOError)?;
}
Ok(())
}
/// Reads data from the region to a writable object.
///
/// # Examples
///
/// * Write 128 bytes to /dev/null
///
/// ```
/// # use vm_memory::{Bytes, VolatileMemory};
/// # use std::fs::File;
/// # use std::path::Path;
/// # fn test_write_null() -> Result<(), ()> {
/// # let mut mem = [0u8; 1024];
/// # let mut mem_ref = &mut mem[..];
/// # let vslice = mem_ref.as_volatile_slice();
/// let mut file = File::open(Path::new("/dev/null")).map_err(|_| ())?;
/// vslice.write_to(32, &mut file, 128).map_err(|_| ())?;
/// # Ok(())
/// # }
/// ```
fn write_to<F>(&self, addr: usize, dst: &mut F, count: usize) -> Result<usize>
where
F: Write,
{
let end = self.compute_end_offset(addr, count)?;
unsafe {
// It is safe to read from volatile memory. Accessing the guest
// memory as a slice is OK because nothing assumes another thread
// won't change what is loaded.
let src = &self.as_mut_slice()[addr..end];
dst.write(src).map_err(Error::IOError)
}
}
/// Reads data from the region to a writable object.
///
/// # Examples
///
/// * Write 128 bytes to /dev/null
///
/// ```
/// # use vm_memory::{Bytes, VolatileMemory};
/// # use std::fs::File;
/// # use std::path::Path;
/// # fn test_write_null() -> Result<(), ()> {
/// # let mut mem = [0u8; 1024];
/// # let mut mem_ref = &mut mem[..];
/// # let vslice = mem_ref.as_volatile_slice();
/// let mut file = File::open(Path::new("/dev/null")).map_err(|_| ())?;
/// vslice.write_all_to(32, &mut file, 128).map_err(|_| ())?;
/// # Ok(())
/// # }
/// ```
fn write_all_to<F>(&self, addr: usize, dst: &mut F, count: usize) -> Result<()>
where
F: Write,
{
let end = self.compute_end_offset(addr, count)?;
unsafe {
// It is safe to read from volatile memory. Accessing the guest
// memory as a slice is OK because nothing assumes another thread
// won't change what is loaded.
let src = &self.as_mut_slice()[addr..end];
dst.write_all(src).map_err(Error::IOError)?;
}
Ok(())
}
}
impl<'a> VolatileMemory for VolatileSlice<'a> {
fn len(&self) -> usize {
self.size
}
fn get_slice(&self, offset: usize, count: usize) -> Result<VolatileSlice> {
let _ = self.compute_end_offset(offset, count)?;
Ok(unsafe {
// This is safe because the pointer is range-checked by compute_end_offset, and
// the lifetime is the same as self.
VolatileSlice::new((self.addr as usize + offset) as *mut u8, count)
})
}
}
/// A memory location that supports volatile access of a `T`.
///
/// # Examples
///
/// ```
/// # use vm_memory::VolatileRef;
/// let mut v = 5u32;
/// assert_eq!(v, 5);
/// let v_ref = unsafe { VolatileRef::<u32>::new(&mut v as *mut u32 as *mut u8) };
/// assert_eq!(v_ref.load(), 5);
/// v_ref.store(500);
/// assert_eq!(v, 500);
#[derive(Debug)]
pub struct VolatileRef<'a, T: ByteValued>
where
T: 'a,
{
addr: *mut Packed<T>,
phantom: PhantomData<&'a T>,
}
#[allow(clippy::len_without_is_empty)]
impl<'a, T: ByteValued> VolatileRef<'a, T> {
/// Creates a reference to raw memory that must support volatile access of `T` sized chunks.
///
/// To use this safely, the caller must guarantee that the memory at `addr` is big enough for a
/// `T` and is available for the duration of the lifetime of the new `VolatileRef`. The caller
/// must also guarantee that all other users of the given chunk of memory are using volatile
/// accesses.
pub unsafe fn new(addr: *mut u8) -> VolatileRef<'a, T> {
VolatileRef {
addr: addr as *mut Packed<T>,
phantom: PhantomData,
}
}
/// Gets the address of this slice's memory.
pub fn as_ptr(&self) -> *mut u8 {
self.addr as *mut u8
}
/// Gets the size of this slice.
///
/// # Examples
///
/// ```
/// # use std::mem::size_of;
/// # use vm_memory::VolatileRef;
/// let v_ref = unsafe { VolatileRef::<u32>::new(0 as *mut _) };
/// assert_eq!(v_ref.len(), size_of::<u32>() as usize);
/// ```
pub fn len(&self) -> usize {
size_of::<T>()
}
/// Does a volatile write of the value `v` to the address of this ref.
#[inline(always)]
pub fn store(&self, v: T) {
unsafe { write_volatile(self.addr, Packed::<T>(v)) };
}
/// Does a volatile read of the value at the address of this ref.
#[inline(always)]
pub fn load(&self) -> T {
// For the purposes of demonstrating why read_volatile is necessary, try replacing the code
// in this function with the commented code below and running `cargo test --release`.
// unsafe { *(self.addr as *const T) }
unsafe { read_volatile(self.addr).0 }
}
/// Converts this `T` reference to a raw slice with the same size and address.
pub fn to_slice(&self) -> VolatileSlice<'a> {
unsafe { VolatileSlice::new(self.addr as *mut u8, size_of::<T>()) }
}
}
#[cfg(test)]
mod tests {
extern crate tempfile;
use super::*;
use self::tempfile::tempfile;
use std::sync::Arc;
use std::thread::{sleep, spawn};
use std::time::Duration;
use std::fs::File;
use std::path::Path;
#[derive(Clone)]
struct VecMem {
mem: Arc<Vec<u8>>,
}
impl VecMem {
fn new(size: usize) -> VecMem {
let mut mem = Vec::new();
mem.resize(size, 0);
VecMem { mem: Arc::new(mem) }
}
}
impl VolatileMemory for VecMem {
fn len(&self) -> usize {
self.mem.len()
}
fn get_slice(&self, offset: usize, count: usize) -> Result<VolatileSlice> {
let _ = self.compute_end_offset(offset, count)?;
Ok(unsafe {
VolatileSlice::new((self.mem.as_ptr() as usize + offset) as *mut _, count)
})
}
}
#[test]
fn ref_store() {
let mut a = [0u8; 1];
{
let a_ref = &mut a[..];
let v_ref = a_ref.get_ref(0).unwrap();
v_ref.store(2u8);
}
assert_eq!(a[0], 2);
}
#[test]
fn ref_load() {
let mut a = [5u8; 1];
{
let a_ref = &mut a[..];
let c = {
let v_ref = a_ref.get_ref::<u8>(0).unwrap();
assert_eq!(v_ref.load(), 5u8);
v_ref
};
// To make sure we can take a v_ref out of the scope we made it in:
c.load();
// but not too far:
// c
} //.load()
;
}
#[test]
fn ref_to_slice() {
let mut a = [1u8; 5];
let a_ref = &mut a[..];
let v_ref = a_ref.get_ref(1).unwrap();
v_ref.store(0x1234_5678u32);
let ref_slice = v_ref.to_slice();
assert_eq!(v_ref.as_ptr() as usize, ref_slice.as_ptr() as usize);
assert_eq!(v_ref.len(), ref_slice.len());
assert!(!ref_slice.is_empty());
}
#[test]
fn observe_mutate() {
let a = VecMem::new(1);
let a_clone = a.clone();
let v_ref = a.get_ref::<u8>(0).unwrap();
v_ref.store(99);
spawn(move || {
sleep(Duration::from_millis(10));
let clone_v_ref = a_clone.get_ref::<u8>(0).unwrap();
clone_v_ref.store(0);
});
// Technically this is a race condition but we have to observe the v_ref's value changing
// somehow and this helps to ensure the sleep actually happens before the store rather then
// being reordered by the compiler.
assert_eq!(v_ref.load(), 99);
// Granted we could have a machine that manages to perform this many volatile loads in the
// amount of time the spawned thread sleeps, but the most likely reason the retry limit will
// get reached is because v_ref.load() is not actually performing the required volatile read
// or v_ref.store() is not doing a volatile write. A timer based solution was avoided
// because that might use a syscall which could hint the optimizer to reload v_ref's pointer
// regardless of volatile status. Note that we use a longer retry duration for optimized
// builds.
#[cfg(debug_assertions)]
const RETRY_MAX: usize = 500_000_000;
#[cfg(not(debug_assertions))]
const RETRY_MAX: usize = 10_000_000_000;
let mut retry = 0;
while v_ref.load() == 99 && retry < RETRY_MAX {
retry += 1;
}
assert_ne!(retry, RETRY_MAX, "maximum retry exceeded");
assert_eq!(v_ref.load(), 0);
}
#[test]
fn slice_len() {
let a = VecMem::new(100);
let s = a.get_slice(0, 27).unwrap();
assert_eq!(s.len(), 27);
let s = a.get_slice(34, 27).unwrap();
assert_eq!(s.len(), 27);
let s = s.get_slice(20, 5).unwrap();
assert_eq!(s.len(), 5);
}
#[test]
fn slice_is_empty() {
let a = VecMem::new(100);
let s = a.get_slice(0, 27).unwrap();
assert!(!s.is_empty());
let s = a.get_slice(34, 0).unwrap();
assert!(s.is_empty());
}
#[test]
fn slice_overflow_error() {
use std::usize::MAX;
let a = VecMem::new(1);
let res = a.get_slice(MAX, 1).unwrap_err();
assert_matches!(
res,
Error::Overflow {
base: MAX,
offset: 1,
}
);
}
#[test]
fn slice_oob_error() {
let a = VecMem::new(100);
a.get_slice(50, 50).unwrap();
let res = a.get_slice(55, 50).unwrap_err();
assert_matches!(res, Error::OutOfBounds { addr: 105 });
}
#[test]
fn ref_overflow_error() {
use std::usize::MAX;
let a = VecMem::new(1);
let res = a.get_ref::<u8>(MAX).unwrap_err();
assert_matches!(
res,
Error::Overflow {
base: MAX,
offset: 1,
}
);
}
#[test]
fn ref_oob_error() {
let a = VecMem::new(100);
a.get_ref::<u8>(99).unwrap();
let res = a.get_ref::<u16>(99).unwrap_err();
assert_matches!(res, Error::OutOfBounds { addr: 101 });
}
#[test]
fn ref_oob_too_large() {
let a = VecMem::new(3);
let res = a.get_ref::<u32>(0).unwrap_err();
assert_matches!(res, Error::OutOfBounds { addr: 4 });
}
#[test]
fn slice_store() {
let a = VecMem::new(5);
let s = a.as_volatile_slice();
let r = a.get_ref(2).unwrap();
r.store(9u16);
assert_eq!(s.read_obj::<u16>(2).unwrap(), 9);
}
#[test]
fn test_write_past_end() {
let a = VecMem::new(5);
let s = a.as_volatile_slice();
let res = s.write(&[1, 2, 3, 4, 5, 6], 0);
assert!(res.is_ok());
assert_eq!(res.unwrap(), 5);
}
#[test]
fn slice_read_and_write() {
let a = VecMem::new(5);
let s = a.as_volatile_slice();
let sample_buf = [1, 2, 3];
assert!(s.write(&sample_buf, 5).is_err());
assert!(s.write(&sample_buf, 2).is_ok());
let mut buf = [0u8; 3];
assert!(s.read(&mut buf, 5).is_err());
assert!(s.read_slice(&mut buf, 2).is_ok());
assert_eq!(buf, sample_buf);
}
#[test]
fn obj_read_and_write() {
let a = VecMem::new(5);
let s = a.as_volatile_slice();
assert!(s.write_obj(55u16, 4).is_err());
assert!(s.write_obj(55u16, core::usize::MAX).is_err());
assert!(s.write_obj(55u16, 2).is_ok());
assert_eq!(s.read_obj::<u16>(2).unwrap(), 55u16);
assert!(s.read_obj::<u16>(4).is_err());
assert!(s.read_obj::<u16>(core::usize::MAX).is_err());
}
#[test]
fn mem_read_and_write() {
let a = VecMem::new(5);
let s = a.as_volatile_slice();
assert!(s.write_obj(!0u32, 1).is_ok());
let mut file = if cfg!(unix) {
File::open(Path::new("/dev/zero")).unwrap()
} else {
File::open(Path::new("c:\\Windows\\system32\\ntoskrnl.exe")).unwrap()
};
assert!(s.read_exact_from(2, &mut file, size_of::<u32>()).is_err());
assert!(s
.read_exact_from(core::usize::MAX, &mut file, size_of::<u32>())
.is_err());
assert!(s.read_exact_from(1, &mut file, size_of::<u32>()).is_ok());
let mut f = tempfile().unwrap();
assert!(s.read_exact_from(1, &mut f, size_of::<u32>()).is_err());
format!("{:?}", s.read_exact_from(1, &mut f, size_of::<u32>()));
let value = s.read_obj::<u32>(1).unwrap();
if cfg!(unix) {
assert_eq!(value, 0);
} else {
assert_eq!(value, 0x0090_5a4d);
}
let mut sink = Vec::new();
assert!(s.write_all_to(1, &mut sink, size_of::<u32>()).is_ok());
assert!(s.write_all_to(2, &mut sink, size_of::<u32>()).is_err());
assert!(s
.write_all_to(core::usize::MAX, &mut sink, size_of::<u32>())
.is_err());
format!("{:?}", s.write_all_to(2, &mut sink, size_of::<u32>()));
if cfg!(unix) {
assert_eq!(sink, vec![0; size_of::<u32>()]);
} else {
assert_eq!(sink, vec![0x4d, 0x5a, 0x90, 0x00]);
};
}
#[test]
fn unaligned_read_and_write() {
let a = VecMem::new(7);
let s = a.as_volatile_slice();
let sample_buf: [u8; 7] = [1, 2, 0xAA, 0xAA, 0xAA, 0xAA, 4];
assert!(s.write_slice(&sample_buf, 0).is_ok());
let r = a.get_ref::<u32>(2).unwrap();
assert_eq!(r.load(), 0xAAAA_AAAA);
r.store(0x5555_5555);
let sample_buf: [u8; 7] = [1, 2, 0x55, 0x55, 0x55, 0x55, 4];
let mut buf: [u8; 7] = Default::default();
assert!(s.read_slice(&mut buf, 0).is_ok());
assert_eq!(buf, sample_buf);
}
}

View File

@ -1,75 +0,0 @@
steps:
- label: "build-gnu-x86"
commands:
- cargo build --release
retry:
automatic: false
agents:
platform: x86_64.metal
plugins:
- docker#v3.0.1:
image: "fandree/rust-vmm-dev"
always-pull: true
- label: "build-gnu-arm"
commands:
- cargo build --release
retry:
automatic: false
agents:
platform: arm.metal
plugins:
- docker#v3.0.1:
image: "fandree/rust-vmm-dev"
always-pull: true
- label: "style"
command: cargo fmt --all -- --check
retry:
automatic: false
agents:
platform: x86_64.metal
plugins:
- docker#v3.0.1:
image: "fandree/rust-vmm-dev"
always-pull: true
- label: "unittests-gnu-x86"
commands:
- cargo test
retry:
automatic: false
agents:
platform: x86_64.metal
plugins:
- docker#v3.0.1:
privileged: true
image: "fandree/rust-vmm-dev"
always-pull: true
tmpfs: [ "/tmp:exec" ]
- label: "unittests-gnu-arm"
commands:
- cargo test
retry:
automatic: false
agents:
platform: arm.metal
plugins:
- docker#v3.0.1:
privileged: true
image: "fandree/rust-vmm-dev"
always-pull: true
tmpfs: [ "/tmp:exec" ]
- label: "clippy-x86"
commands:
- cargo clippy --all -- -D warnings
retry:
automatic: false
agents:
platform: x86_64.metal
plugins:
- docker#v3.0.1:
image: "fandree/rust-vmm-dev"
always-pull: true

View File

@ -1 +0,0 @@
{"files":{".buildkite/pipeline.yml":"0194df2d3534e3c6938ba2945e05696a12b8ed63d4941b3a4173c5bcf6e587c4","Cargo.toml":"d19b52349ada25dbbae4e2bea4850cf1b09ea546d5efee03ddfbbf9f184c0c72","LICENSE-APACHE":"c71d239df91726fc519c6eb72d318ec65820627232b2f796219e87dcf35d0ab4","LICENSE-BSD-3-Clause":"a6d3ebd1c2f37d4fd83d0676621f695fc0cc2d8c6e646cdbb831b46e0650c208","README.md":"94512ed84e8e89faba431d9107e85d229363c187705ab9244f0ab5269b5961b5","src/errno.rs":"b93845c200fc596b0ef2414671d965cf5c4fdbf6bba6e5e5f0d032a32c73f8ac","src/eventfd.rs":"7bd871242f49c14d8783714e6814456f51a9c280dcadf1625e1bd2313d2b5f7f","src/file_traits.rs":"398c529e7ebce143ecb9f9bd2f5f47ea3e953ac34cc211ad71cdcf1898cc7d38","src/ioctl.rs":"5c4abf75e7b6786e7da3191ac1e4460e1ec7d073a53331a6d9597bb9ccc3f88a","src/lib.rs":"ee0818e0ca6fdc340c52d514eeb2e3aeb4f7ba8e4e522bb946cdbce4779926f1","src/poll.rs":"1498c14ba022ede57c4faf17bee49cf5ac9d1c8d3883db441697ee224dac7818","src/seek_hole.rs":"de43f21bc2c5d9eb7f06e21e3c20f93476bf6016e4d041df71a02b9e54b3c3ca","src/signal.rs":"724f679cb62d268a5ec8f0704a8d6b01882f302f508c189e82054657ed8b31bf","src/syslog.rs":"fbf4bde16b1059b5b39c5318e8bb918dc431e8e0ccbc82c0d765b9ce4a8d5f96","src/tempdir.rs":"4993460e81f7df6398e0f2b07cc3d81e728aa7e0559c7f3d83b6df1876bc3776","src/terminal.rs":"85efb1df641730fa1981bac6fd65bd75f7d532bb8680a56e94d6d006eeb363e9","src/timerfd.rs":"fd3c52e3918d881c16cb1498f8f66253ee758275a6a66ed8eb11c78e69f69e55","src/write_zeroes.rs":"c2951bbdb3ab07727eda29e9a91a51e427fdf6fed0b611ea6a3732edbd9a1246"},"package":null}

View File

@ -1,9 +0,0 @@
[package]
name = "vmm-sys-util"
version = "0.1.0"
authors = ["Jing Liu <jing2.liu@linux.intel.com>"]
license = "Apache-2.0"
[dependencies]
libc = ">=0.2.39"

View File

@ -1,201 +0,0 @@
Apache License
Version 2.0, January 2004
http://www.apache.org/licenses/
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
1. Definitions.
"License" shall mean the terms and conditions for use, reproduction,
and distribution as defined by Sections 1 through 9 of this document.
"Licensor" shall mean the copyright owner or entity authorized by
the copyright owner that is granting the License.
"Legal Entity" shall mean the union of the acting entity and all
other entities that control, are controlled by, or are under common
control with that entity. For the purposes of this definition,
"control" means (i) the power, direct or indirect, to cause the
direction or management of such entity, whether by contract or
otherwise, or (ii) ownership of fifty percent (50%) or more of the
outstanding shares, or (iii) beneficial ownership of such entity.
"You" (or "Your") shall mean an individual or Legal Entity
exercising permissions granted by this License.
"Source" form shall mean the preferred form for making modifications,
including but not limited to software source code, documentation
source, and configuration files.
"Object" form shall mean any form resulting from mechanical
transformation or translation of a Source form, including but
not limited to compiled object code, generated documentation,
and conversions to other media types.
"Work" shall mean the work of authorship, whether in Source or
Object form, made available under the License, as indicated by a
copyright notice that is included in or attached to the work
(an example is provided in the Appendix below).
"Derivative Works" shall mean any work, whether in Source or Object
form, that is based on (or derived from) the Work and for which the
editorial revisions, annotations, elaborations, or other modifications
represent, as a whole, an original work of authorship. For the purposes
of this License, Derivative Works shall not include works that remain
separable from, or merely link (or bind by name) to the interfaces of,
the Work and Derivative Works thereof.
"Contribution" shall mean any work of authorship, including
the original version of the Work and any modifications or additions
to that Work or Derivative Works thereof, that is intentionally
submitted to Licensor for inclusion in the Work by the copyright owner
or by an individual or Legal Entity authorized to submit on behalf of
the copyright owner. For the purposes of this definition, "submitted"
means any form of electronic, verbal, or written communication sent
to the Licensor or its representatives, including but not limited to
communication on electronic mailing lists, source code control systems,
and issue tracking systems that are managed by, or on behalf of, the
Licensor for the purpose of discussing and improving the Work, but
excluding communication that is conspicuously marked or otherwise
designated in writing by the copyright owner as "Not a Contribution."
"Contributor" shall mean Licensor and any individual or Legal Entity
on behalf of whom a Contribution has been received by Licensor and
subsequently incorporated within the Work.
2. Grant of Copyright License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
copyright license to reproduce, prepare Derivative Works of,
publicly display, publicly perform, sublicense, and distribute the
Work and such Derivative Works in Source or Object form.
3. Grant of Patent License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
(except as stated in this section) patent license to make, have made,
use, offer to sell, sell, import, and otherwise transfer the Work,
where such license applies only to those patent claims licensable
by such Contributor that are necessarily infringed by their
Contribution(s) alone or by combination of their Contribution(s)
with the Work to which such Contribution(s) was submitted. If You
institute patent litigation against any entity (including a
cross-claim or counterclaim in a lawsuit) alleging that the Work
or a Contribution incorporated within the Work constitutes direct
or contributory patent infringement, then any patent licenses
granted to You under this License for that Work shall terminate
as of the date such litigation is filed.
4. Redistribution. You may reproduce and distribute copies of the
Work or Derivative Works thereof in any medium, with or without
modifications, and in Source or Object form, provided that You
meet the following conditions:
(a) You must give any other recipients of the Work or
Derivative Works a copy of this License; and
(b) You must cause any modified files to carry prominent notices
stating that You changed the files; and
(c) You must retain, in the Source form of any Derivative Works
that You distribute, all copyright, patent, trademark, and
attribution notices from the Source form of the Work,
excluding those notices that do not pertain to any part of
the Derivative Works; and
(d) If the Work includes a "NOTICE" text file as part of its
distribution, then any Derivative Works that You distribute must
include a readable copy of the attribution notices contained
within such NOTICE file, excluding those notices that do not
pertain to any part of the Derivative Works, in at least one
of the following places: within a NOTICE text file distributed
as part of the Derivative Works; within the Source form or
documentation, if provided along with the Derivative Works; or,
within a display generated by the Derivative Works, if and
wherever such third-party notices normally appear. The contents
of the NOTICE file are for informational purposes only and
do not modify the License. You may add Your own attribution
notices within Derivative Works that You distribute, alongside
or as an addendum to the NOTICE text from the Work, provided
that such additional attribution notices cannot be construed
as modifying the License.
You may add Your own copyright statement to Your modifications and
may provide additional or different license terms and conditions
for use, reproduction, or distribution of Your modifications, or
for any such Derivative Works as a whole, provided Your use,
reproduction, and distribution of the Work otherwise complies with
the conditions stated in this License.
5. Submission of Contributions. Unless You explicitly state otherwise,
any Contribution intentionally submitted for inclusion in the Work
by You to the Licensor shall be under the terms and conditions of
this License, without any additional terms or conditions.
Notwithstanding the above, nothing herein shall supersede or modify
the terms of any separate license agreement you may have executed
with Licensor regarding such Contributions.
6. Trademarks. This License does not grant permission to use the trade
names, trademarks, service marks, or product names of the Licensor,
except as required for reasonable and customary use in describing the
origin of the Work and reproducing the content of the NOTICE file.
7. Disclaimer of Warranty. Unless required by applicable law or
agreed to in writing, Licensor provides the Work (and each
Contributor provides its Contributions) on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
implied, including, without limitation, any warranties or conditions
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
PARTICULAR PURPOSE. You are solely responsible for determining the
appropriateness of using or redistributing the Work and assume any
risks associated with Your exercise of permissions under this License.
8. Limitation of Liability. In no event and under no legal theory,
whether in tort (including negligence), contract, or otherwise,
unless required by applicable law (such as deliberate and grossly
negligent acts) or agreed to in writing, shall any Contributor be
liable to You for damages, including any direct, indirect, special,
incidental, or consequential damages of any character arising as a
result of this License or out of the use or inability to use the
Work (including but not limited to damages for loss of goodwill,
work stoppage, computer failure or malfunction, or any and all
other commercial damages or losses), even if such Contributor
has been advised of the possibility of such damages.
9. Accepting Warranty or Additional Liability. While redistributing
the Work or Derivative Works thereof, You may choose to offer,
and charge a fee for, acceptance of support, warranty, indemnity,
or other liability obligations and/or rights consistent with this
License. However, in accepting such obligations, You may act only
on Your own behalf and on Your sole responsibility, not on behalf
of any other Contributor, and only if You agree to indemnify,
defend, and hold each Contributor harmless for any liability
incurred by, or claims asserted against, such Contributor by reason
of your accepting any such warranty or additional liability.
END OF TERMS AND CONDITIONS
APPENDIX: How to apply the Apache License to your work.
To apply the Apache License to your work, attach the following
boilerplate notice, with the fields enclosed by brackets "[]"
replaced with your own identifying information. (Don't include
the brackets!) The text should be enclosed in the appropriate
comment syntax for the file format. We also recommend that a
file or class name and description of purpose be included on the
same "printed page" as the copyright notice for easier
identification within third-party archives.
Copyright [yyyy] [name of copyright owner]
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.

View File

@ -1,27 +0,0 @@
// Copyright 2017 The Chromium OS Authors. All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following disclaimer
// in the documentation and/or other materials provided with the
// distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived from
// this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.

View File

@ -1,2 +0,0 @@
# vmm-sys-util
This crate is a collection of modules that provides helpers and utilities.

View File

@ -1,80 +0,0 @@
// Copyright 2019 Intel Corporation. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//
// Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//
// Portions Copyright 2017 The Chromium OS Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE-BSD-3-Clause file.
use std::fmt::{Display, Formatter};
use std::io;
use std::result;
use libc::__errno_location;
/// An error number, retrieved from [`errno`](http://man7.org/linux/man-pages/man3/errno.3.html),
/// set by a libc function that returned an error.
#[derive(Clone, Copy, Debug, PartialEq)]
pub struct Error(i32);
pub type Result<T> = result::Result<T, Error>;
impl Error {
/// Constructs a new error with the given `errno`.
pub fn new(e: i32) -> Error {
Error(e)
}
/// Constructs an error from the current `errno`.
///
/// The result of this only has any meaning just after a libc call that returned a value
/// indicating `errno` was set.
pub fn last() -> Error {
Error(unsafe { *__errno_location() })
}
/// Gets the `errno` for this error.
pub fn errno(self) -> i32 {
self.0
}
}
impl Display for Error {
fn fmt(&self, f: &mut Formatter) -> std::fmt::Result {
io::Error::from_raw_os_error(self.0).fmt(f)
}
}
impl From<io::Error> for Error {
fn from(e: io::Error) -> Self {
Error::new(e.raw_os_error().unwrap_or_default())
}
}
/// Returns the last `errno` as a [`Result`] that is always an error.
///
/// [`Result`]: type.Result.html
pub fn errno_result<T>() -> Result<T> {
Err(Error::last())
}
#[cfg(test)]
mod tests {
use super::*;
use libc;
use std::fs::File;
use std::io::{self, Write};
use std::os::unix::io::FromRawFd;
#[test]
pub fn test_invalid_fd() {
let mut file = unsafe { File::from_raw_fd(-1) };
assert!(file.write(b"test").is_err());
let last_err = errno_result::<i32>().unwrap_err();
assert_eq!(last_err, Error::new(libc::EBADF));
assert_eq!(last_err.errno(), libc::EBADF);
assert_eq!(last_err, Error::from(io::Error::last_os_error()));
assert_eq!(last_err, Error::last());
}
}

View File

@ -1,150 +0,0 @@
// Copyright 2019 Intel Corporation. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//
// Copyright 2017 The Chromium OS Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE-BSD-3-Clause file.
use std::fs::File;
use std::os::unix::io::{AsRawFd, FromRawFd, RawFd};
use std::{io, mem, result};
use libc::{c_void, dup, eventfd, read, write};
/// A safe wrapper around a Linux eventfd (man 2 eventfd).
pub struct EventFd {
eventfd: File,
}
impl EventFd {
/// Creates a new blocking EventFd with an initial value.
///
/// `flag`: The initial value. Refer to Linux eventfd(2).
pub fn new(flag: i32) -> result::Result<EventFd, io::Error> {
// This is safe because eventfd merely allocated an eventfd for
// our process and we handle the error case.
let ret = unsafe { eventfd(0, flag) };
if ret < 0 {
Err(io::Error::last_os_error())
} else {
// This is safe because we checked ret for success and know
// the kernel gave us an fd that we own.
Ok(EventFd {
eventfd: unsafe { File::from_raw_fd(ret) },
})
}
}
/// Adds `v` to the eventfd's count, does not block if the result will overflow the count
pub fn write(&self, v: u64) -> result::Result<(), io::Error> {
// This is safe because we made this fd and the pointer we pass
// can not overflow because we give the syscall's size parameter properly.
let ret = unsafe {
write(
self.as_raw_fd(),
&v as *const u64 as *const c_void,
mem::size_of::<u64>(),
)
};
if ret <= 0 {
Err(io::Error::last_os_error())
} else {
Ok(())
}
}
/// Tries to read from the eventfd, does not block if the counter is zero
pub fn read(&self) -> result::Result<u64, io::Error> {
let mut buf: u64 = 0;
let ret = unsafe {
// This is safe because we made this fd and the pointer we
// pass can not overflow because we give the syscall's size parameter properly.
read(
self.as_raw_fd(),
&mut buf as *mut u64 as *mut c_void,
mem::size_of::<u64>(),
)
};
if ret < 0 {
Err(io::Error::last_os_error())
} else {
Ok(buf)
}
}
/// Clones this EventFd, internally creating a new file descriptor. The new EventFd will share
/// the same underlying count within the kernel.
pub fn try_clone(&self) -> result::Result<EventFd, io::Error> {
// This is safe because we made this fd and properly check that it returns without error.
let ret = unsafe { dup(self.as_raw_fd()) };
if ret < 0 {
Err(io::Error::last_os_error())
} else {
// This is safe because we checked ret for success and know the kernel gave us an fd that we
// own.
Ok(EventFd {
eventfd: unsafe { File::from_raw_fd(ret) },
})
}
}
}
impl AsRawFd for EventFd {
fn as_raw_fd(&self) -> RawFd {
self.eventfd.as_raw_fd()
}
}
impl FromRawFd for EventFd {
unsafe fn from_raw_fd(fd: RawFd) -> Self {
EventFd {
eventfd: File::from_raw_fd(fd),
}
}
}
#[cfg(test)]
mod tests {
use super::*;
use libc::EFD_NONBLOCK;
#[test]
fn test_new() {
EventFd::new(EFD_NONBLOCK).unwrap();
EventFd::new(0).unwrap();
}
#[test]
fn test_read_write() {
let evt = EventFd::new(EFD_NONBLOCK).unwrap();
evt.write(55).unwrap();
assert_eq!(evt.read().unwrap(), 55);
}
#[test]
fn test_write_overflow() {
let evt = EventFd::new(EFD_NONBLOCK).unwrap();
evt.write(std::u64::MAX - 1).unwrap();
let r = evt.write(1);
match r {
Err(ref inner) if inner.kind() == io::ErrorKind::WouldBlock => (),
_ => panic!("Unexpected"),
}
}
#[test]
fn test_read_nothing() {
let evt = EventFd::new(EFD_NONBLOCK).unwrap();
let r = evt.read();
match r {
Err(ref inner) if inner.kind() == io::ErrorKind::WouldBlock => (),
_ => panic!("Unexpected"),
}
}
#[test]
fn test_clone() {
let evt = EventFd::new(EFD_NONBLOCK).unwrap();
let evt_clone = evt.try_clone().unwrap();
evt.write(923).unwrap();
assert_eq!(evt_clone.read().unwrap(), 923);
}
}

View File

@ -1,39 +0,0 @@
// Copyright 2018 The Chromium OS Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE-BSD-3-Clause file.
//
// SPDX-License-Identifier: BSD-3-Clause
use std::fs::File;
use std::io::Result;
/// A trait for flushing the contents of a file to disk.
/// This is equivalent to File's `sync_all` method, but
/// wrapped in a trait so that it can be implemented for
/// other types.
pub trait FileSync {
// Flush buffers related to this file to disk.
fn fsync(&mut self) -> Result<()>;
}
impl FileSync for File {
fn fsync(&mut self) -> Result<()> {
self.sync_all()
}
}
/// A trait for setting the size of a file.
/// This is equivalent to File's `set_len` method, but
/// wrapped in a trait so that it can be implemented for
/// other types.
pub trait FileSetLen {
// Set the size of this file.
// This is the moral equivalent of `ftruncate()`.
fn set_len(&self, _len: u64) -> Result<()>;
}
impl FileSetLen for File {
fn set_len(&self, len: u64) -> Result<()> {
File::set_len(self, len)
}
}

View File

@ -1,225 +0,0 @@
// Copyright 2019 Intel Corporation. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//
// Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//
// Portions Copyright 2017 The Chromium OS Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE-BSD-3-Clause file.
//! Macros and wrapper functions for dealing with ioctls.
use libc;
use std::os::raw::{c_int, c_uint, c_ulong, c_void};
use std::os::unix::io::AsRawFd;
/// Raw macro to declare the expression that calculates an ioctl number
#[macro_export]
macro_rules! ioctl_expr {
($dir:expr, $ty:expr, $nr:expr, $size:expr) => {
(($dir << $crate::ioctl::_IOC_DIRSHIFT)
| ($ty << $crate::ioctl::_IOC_TYPESHIFT)
| ($nr << $crate::ioctl::_IOC_NRSHIFT)
| ($size << $crate::ioctl::_IOC_SIZESHIFT)) as ::std::os::raw::c_ulong
};
}
/// Raw macro to declare a function that returns an ioctl number.
#[macro_export]
macro_rules! ioctl_ioc_nr {
($name:ident, $dir:expr, $ty:expr, $nr:expr, $size:expr) => {
#[allow(non_snake_case)]
#[allow(clippy::cast_lossless)]
pub fn $name() -> ::std::os::raw::c_ulong {
ioctl_expr!($dir, $ty, $nr, $size)
}
};
($name:ident, $dir:expr, $ty:expr, $nr:expr, $size:expr, $($v:ident),+) => {
#[allow(non_snake_case)]
#[allow(clippy::cast_lossless)]
pub fn $name($($v: ::std::os::raw::c_uint),+) -> ::std::os::raw::c_ulong {
ioctl_expr!($dir, $ty, $nr, $size)
}
};
}
/// Declare an ioctl that transfers no data.
#[macro_export]
macro_rules! ioctl_io_nr {
($name:ident, $ty:expr, $nr:expr) => {
ioctl_ioc_nr!($name, $crate::ioctl::_IOC_NONE, $ty, $nr, 0);
};
($name:ident, $ty:expr, $nr:expr, $($v:ident),+) => {
ioctl_ioc_nr!($name, $crate::ioctl::_IOC_NONE, $ty, $nr, 0, $($v),+);
};
}
/// Declare an ioctl that reads data.
#[macro_export]
macro_rules! ioctl_ior_nr {
($name:ident, $ty:expr, $nr:expr, $size:ty) => {
ioctl_ioc_nr!(
$name,
$crate::ioctl::_IOC_READ,
$ty,
$nr,
::std::mem::size_of::<$size>() as u32
);
};
($name:ident, $ty:expr, $nr:expr, $size:ty, $($v:ident),+) => {
ioctl_ioc_nr!(
$name,
$crate::ioctl::_IOC_READ,
$ty,
$nr,
::std::mem::size_of::<$size>() as u32,
$($v),+
);
};
}
/// Declare an ioctl that writes data.
#[macro_export]
macro_rules! ioctl_iow_nr {
($name:ident, $ty:expr, $nr:expr, $size:ty) => {
ioctl_ioc_nr!(
$name,
$crate::ioctl::_IOC_WRITE,
$ty,
$nr,
::std::mem::size_of::<$size>() as u32
);
};
($name:ident, $ty:expr, $nr:expr, $size:ty, $($v:ident),+) => {
ioctl_ioc_nr!(
$name,
$crate::ioctl::_IOC_WRITE,
$ty,
$nr,
::std::mem::size_of::<$size>() as u32,
$($v),+
);
};
}
/// Declare an ioctl that reads and writes data.
#[macro_export]
macro_rules! ioctl_iowr_nr {
($name:ident, $ty:expr, $nr:expr, $size:ty) => {
ioctl_ioc_nr!(
$name,
$crate::ioctl::_IOC_READ | $crate::ioctl::_IOC_WRITE,
$ty,
$nr,
::std::mem::size_of::<$size>() as u32
);
};
($name:ident, $ty:expr, $nr:expr, $size:ty, $($v:ident),+) => {
ioctl_ioc_nr!(
$name,
$crate::ioctl::_IOC_READ | $crate::ioctl::_IOC_WRITE,
$ty,
$nr,
::std::mem::size_of::<$size>() as u32,
$($v),+
);
};
}
pub const _IOC_NRBITS: c_uint = 8;
pub const _IOC_TYPEBITS: c_uint = 8;
pub const _IOC_SIZEBITS: c_uint = 14;
pub const _IOC_DIRBITS: c_uint = 2;
pub const _IOC_NRMASK: c_uint = 255;
pub const _IOC_TYPEMASK: c_uint = 255;
pub const _IOC_SIZEMASK: c_uint = 16383;
pub const _IOC_DIRMASK: c_uint = 3;
pub const _IOC_NRSHIFT: c_uint = 0;
pub const _IOC_TYPESHIFT: c_uint = 8;
pub const _IOC_SIZESHIFT: c_uint = 16;
pub const _IOC_DIRSHIFT: c_uint = 30;
pub const _IOC_NONE: c_uint = 0;
pub const _IOC_WRITE: c_uint = 1;
pub const _IOC_READ: c_uint = 2;
pub const IOC_IN: c_uint = 1_073_741_824;
pub const IOC_OUT: c_uint = 2_147_483_648;
pub const IOC_INOUT: c_uint = 3_221_225_472;
pub const IOCSIZE_MASK: c_uint = 1_073_676_288;
pub const IOCSIZE_SHIFT: c_uint = 16;
// The type of the `req` parameter is different for the `musl` library. This will enable
// successful build for other non-musl libraries.
#[cfg(target_env = "musl")]
type IoctlRequest = c_int;
#[cfg(not(target_env = "musl"))]
type IoctlRequest = c_ulong;
/// Run an ioctl with no arguments.
pub unsafe fn ioctl<F: AsRawFd>(fd: &F, req: c_ulong) -> c_int {
libc::ioctl(fd.as_raw_fd(), req as IoctlRequest, 0)
}
/// Run an ioctl with a single value argument.
pub unsafe fn ioctl_with_val<F: AsRawFd>(fd: &F, req: c_ulong, arg: c_ulong) -> c_int {
libc::ioctl(fd.as_raw_fd(), req as IoctlRequest, arg)
}
/// Run an ioctl with an immutable reference.
pub unsafe fn ioctl_with_ref<F: AsRawFd, T>(fd: &F, req: c_ulong, arg: &T) -> c_int {
libc::ioctl(
fd.as_raw_fd(),
req as IoctlRequest,
arg as *const T as *const c_void,
)
}
/// Run an ioctl with a mutable reference.
pub unsafe fn ioctl_with_mut_ref<F: AsRawFd, T>(fd: &F, req: c_ulong, arg: &mut T) -> c_int {
libc::ioctl(
fd.as_raw_fd(),
req as IoctlRequest,
arg as *mut T as *mut c_void,
)
}
/// Run an ioctl with a raw pointer.
pub unsafe fn ioctl_with_ptr<F: AsRawFd, T>(fd: &F, req: c_ulong, arg: *const T) -> c_int {
libc::ioctl(fd.as_raw_fd(), req as IoctlRequest, arg as *const c_void)
}
/// Run an ioctl with a mutable raw pointer.
pub unsafe fn ioctl_with_mut_ptr<F: AsRawFd, T>(fd: &F, req: c_ulong, arg: *mut T) -> c_int {
libc::ioctl(fd.as_raw_fd(), req as IoctlRequest, arg as *mut c_void)
}
#[cfg(test)]
mod tests {
const TUNTAP: ::std::os::raw::c_uint = 0x54;
const VHOST: ::std::os::raw::c_uint = 0xAF;
const EVDEV: ::std::os::raw::c_uint = 0x45;
const KVMIO: ::std::os::raw::c_uint = 0xAE;
ioctl_io_nr!(KVM_CREATE_VM, KVMIO, 0x01);
ioctl_ior_nr!(TUNGETFEATURES, TUNTAP, 0xcf, ::std::os::raw::c_uint);
ioctl_iow_nr!(TUNSETQUEUE, TUNTAP, 0xd9, ::std::os::raw::c_int);
ioctl_io_nr!(VHOST_SET_OWNER, VHOST, 0x01);
ioctl_iowr_nr!(VHOST_GET_VRING_BASE, VHOST, 0x12, ::std::os::raw::c_int);
ioctl_iowr_nr!(KVM_GET_MSR_INDEX_LIST, KVMIO, 0x2, ::std::os::raw::c_int);
ioctl_ior_nr!(EVIOCGBIT, EVDEV, 0x20 + evt, [u8; 128], evt);
ioctl_io_nr!(FAKE_IOCTL_2_ARG, EVDEV, 0x01 + x + y, x, y);
#[test]
fn test_ioctl_macros() {
assert_eq!(0x0000_AE01, KVM_CREATE_VM());
assert_eq!(0x0000_AF01, VHOST_SET_OWNER());
assert_eq!(0x8004_54CF, TUNGETFEATURES());
assert_eq!(0x4004_54D9, TUNSETQUEUE());
assert_eq!(0xC004_AE02, KVM_GET_MSR_INDEX_LIST());
assert_eq!(0xC004_AF12, VHOST_GET_VRING_BASE());
assert_eq!(0x8080_4522, EVIOCGBIT(2));
assert_eq!(0x0000_4509, FAKE_IOCTL_2_ARG(3, 5));
}
}

View File

@ -1,78 +0,0 @@
// Copyright 2019 Intel Corporation. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
extern crate libc;
mod tempdir;
#[macro_use]
pub mod ioctl;
pub mod errno;
pub mod eventfd;
pub mod file_traits;
pub mod seek_hole;
pub mod signal;
pub mod terminal;
pub mod timerfd;
pub mod write_zeroes;
#[macro_use]
pub mod syslog;
pub mod poll;
pub use crate::tempdir::*;
pub use errno::*;
pub use eventfd::*;
pub use poll::*;
use std::os::unix::io::AsRawFd;
pub use crate::file_traits::{FileSetLen, FileSync};
pub use crate::seek_hole::SeekHole;
pub use crate::write_zeroes::{PunchHole, WriteZeroes};
pub enum FallocateMode {
PunchHole,
ZeroRange,
}
/// Safe wrapper for `fallocate()`.
pub fn fallocate(
file: &dyn AsRawFd,
mode: FallocateMode,
keep_size: bool,
offset: u64,
len: u64,
) -> Result<()> {
let offset = if offset > libc::off64_t::max_value() as u64 {
return Err(Error::new(libc::EINVAL));
} else {
offset as libc::off64_t
};
let len = if len > libc::off64_t::max_value() as u64 {
return Err(Error::new(libc::EINVAL));
} else {
len as libc::off64_t
};
let mut mode = match mode {
FallocateMode::PunchHole => libc::FALLOC_FL_PUNCH_HOLE,
FallocateMode::ZeroRange => libc::FALLOC_FL_ZERO_RANGE,
};
if keep_size {
mode |= libc::FALLOC_FL_KEEP_SIZE;
}
// Safe since we pass in a valid fd and fallocate mode, validate offset and len,
// and check the return value.
let ret = unsafe { libc::fallocate64(file.as_raw_fd(), mode, offset, len) };
if ret < 0 {
errno_result()
} else {
Ok(())
}
}

View File

@ -1,711 +0,0 @@
// Copyright 2019 Intel Corporation. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//
// Copyright 2017 The Chromium OS Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE-BSD-3-Clause file.
use std::cell::{Cell, Ref, RefCell};
use std::cmp::min;
use std::fs::File;
use std::i32;
use std::i64;
use std::marker::PhantomData;
use std::os::unix::io::{AsRawFd, FromRawFd, IntoRawFd, RawFd};
use std::ptr::null_mut;
use std::slice;
use std::thread;
use std::time::Duration;
use libc::{
c_int, epoll_create1, epoll_ctl, epoll_event, epoll_wait, EINTR, EPOLLHUP, EPOLLIN, EPOLLOUT,
EPOLL_CLOEXEC, EPOLL_CTL_ADD, EPOLL_CTL_DEL, EPOLL_CTL_MOD,
};
use crate::{errno_result, Error, Result};
macro_rules! handle_eintr_errno {
($x:expr) => {{
let mut res;
loop {
res = $x;
if res != -1 || Error::last() != Error::new(EINTR) {
break;
}
}
res
}};
}
const POLL_CONTEXT_MAX_EVENTS: usize = 16;
/// EpollEvents wraps raw epoll_events, it should only be used with EpollContext.
pub struct EpollEvents(RefCell<[epoll_event; POLL_CONTEXT_MAX_EVENTS]>);
impl EpollEvents {
pub fn new() -> EpollEvents {
EpollEvents(RefCell::new(
[epoll_event { events: 0, u64: 0 }; POLL_CONTEXT_MAX_EVENTS],
))
}
}
impl Default for EpollEvents {
fn default() -> Self {
Self::new()
}
}
/// Trait for a token that can be associated with an `fd` in a `PollContext`.
///
/// Simple enums that have no or primitive variant data can use the `#[derive(PollToken)]`
/// custom derive to implement this trait.
pub trait PollToken {
/// Converts this token into a u64 that can be turned back into a token via `from_raw_token`.
fn as_raw_token(&self) -> u64;
/// Converts a raw token as returned from `as_raw_token` back into a token.
///
/// It is invalid to give a raw token that was not returned via `as_raw_token` from the same
/// `Self`. The implementation can expect that this will never happen as a result of its usage
/// in `PollContext`.
fn from_raw_token(data: u64) -> Self;
}
impl PollToken for usize {
fn as_raw_token(&self) -> u64 {
*self as u64
}
fn from_raw_token(data: u64) -> Self {
data as Self
}
}
impl PollToken for u64 {
fn as_raw_token(&self) -> u64 {
*self as u64
}
fn from_raw_token(data: u64) -> Self {
data as Self
}
}
impl PollToken for u32 {
fn as_raw_token(&self) -> u64 {
u64::from(*self)
}
fn from_raw_token(data: u64) -> Self {
data as Self
}
}
impl PollToken for u16 {
fn as_raw_token(&self) -> u64 {
u64::from(*self)
}
fn from_raw_token(data: u64) -> Self {
data as Self
}
}
impl PollToken for u8 {
fn as_raw_token(&self) -> u64 {
u64::from(*self)
}
fn from_raw_token(data: u64) -> Self {
data as Self
}
}
impl PollToken for () {
fn as_raw_token(&self) -> u64 {
0
}
fn from_raw_token(_data: u64) -> Self {}
}
/// An event returned by `PollContext::wait`.
pub struct PollEvent<'a, T> {
event: &'a epoll_event,
token: PhantomData<T>, // Needed to satisfy usage of T
}
impl<'a, T: PollToken> PollEvent<'a, T> {
/// Gets the token associated in `PollContext::add` with this event.
pub fn token(&self) -> T {
T::from_raw_token(self.event.u64)
}
/// True if the `fd` associated with this token in `PollContext::add` is readable.
pub fn readable(&self) -> bool {
self.event.events & (EPOLLIN as u32) != 0
}
/// True if the `fd` associated with this token in `PollContext::add` has been hungup on.
pub fn hungup(&self) -> bool {
self.event.events & (EPOLLHUP as u32) != 0
}
}
/// An iterator over some (sub)set of events returned by `PollContext::wait`.
pub struct PollEventIter<'a, I, T>
where
I: Iterator<Item = &'a epoll_event>,
{
mask: u32,
iter: I,
tokens: PhantomData<[T]>, // Needed to satisfy usage of T
}
impl<'a, I, T> Iterator for PollEventIter<'a, I, T>
where
I: Iterator<Item = &'a epoll_event>,
T: PollToken,
{
type Item = PollEvent<'a, T>;
fn next(&mut self) -> Option<Self::Item> {
let mask = self.mask;
self.iter
.find(|event| (event.events & mask) != 0)
.map(|event| PollEvent {
event,
token: PhantomData,
})
}
}
/// The list of event returned by `PollContext::wait`.
pub struct PollEvents<'a, T> {
count: usize,
events: Ref<'a, [epoll_event; POLL_CONTEXT_MAX_EVENTS]>,
tokens: PhantomData<[T]>, // Needed to satisfy usage of T
}
impl<'a, T: PollToken> PollEvents<'a, T> {
/// Copies the events to an owned structure so the reference to this (and by extension
/// `PollContext`) can be dropped.
pub fn to_owned(&self) -> PollEventsOwned<T> {
PollEventsOwned {
count: self.count,
events: RefCell::new(*self.events),
tokens: PhantomData,
}
}
/// Iterates over each event.
pub fn iter(&self) -> PollEventIter<slice::Iter<epoll_event>, T> {
PollEventIter {
mask: 0xffff_ffff,
iter: self.events[..self.count].iter(),
tokens: PhantomData,
}
}
/// Iterates over each readable event.
pub fn iter_readable(&self) -> PollEventIter<slice::Iter<epoll_event>, T> {
PollEventIter {
mask: EPOLLIN as u32,
iter: self.events[..self.count].iter(),
tokens: PhantomData,
}
}
/// Iterates over each hungup event.
pub fn iter_hungup(&self) -> PollEventIter<slice::Iter<epoll_event>, T> {
PollEventIter {
mask: EPOLLHUP as u32,
iter: self.events[..self.count].iter(),
tokens: PhantomData,
}
}
}
/// A deep copy of the event records from `PollEvents`.
pub struct PollEventsOwned<T> {
count: usize,
events: RefCell<[epoll_event; POLL_CONTEXT_MAX_EVENTS]>,
tokens: PhantomData<T>, // Needed to satisfy usage of T
}
impl<T: PollToken> PollEventsOwned<T> {
/// Takes a reference to the events so that they can be iterated via methods in `PollEvents`.
pub fn as_ref(&self) -> PollEvents<T> {
PollEvents {
count: self.count,
events: self.events.borrow(),
tokens: PhantomData,
}
}
}
/// Watching events taken by PollContext.
pub struct WatchingEvents(u32);
impl WatchingEvents {
/// Returns empty Events.
#[inline(always)]
pub fn empty() -> WatchingEvents {
WatchingEvents(0)
}
/// Build Events from raw epoll events (defined in epoll_ctl(2)).
#[inline(always)]
pub fn new(raw: u32) -> WatchingEvents {
WatchingEvents(raw)
}
/// Set read events.
#[inline(always)]
pub fn set_read(self) -> WatchingEvents {
WatchingEvents(self.0 | EPOLLIN as u32)
}
/// Set write events.
#[inline(always)]
pub fn set_write(self) -> WatchingEvents {
WatchingEvents(self.0 | EPOLLOUT as u32)
}
/// Get the underlying epoll events.
pub fn get_raw(&self) -> u32 {
self.0
}
}
/// EpollContext wraps linux epoll. It provides similar interface to PollContext.
/// It is thread safe while PollContext is not. It requires user to pass in a reference of
/// EpollEvents while PollContext does not. Always use PollContext if you don't need to access the
/// same epoll from different threads.
pub struct EpollContext<T> {
epoll_ctx: File,
// Needed to satisfy usage of T
tokens: PhantomData<[T]>,
}
impl<T: PollToken> EpollContext<T> {
/// Creates a new `EpollContext`.
pub fn new() -> Result<EpollContext<T>> {
// Safe because we check the return value.
let epoll_fd = unsafe { epoll_create1(EPOLL_CLOEXEC) };
if epoll_fd < 0 {
return errno_result();
}
Ok(EpollContext {
epoll_ctx: unsafe { File::from_raw_fd(epoll_fd) },
tokens: PhantomData,
})
}
/// Adds the given `fd` to this context and associates the given `token` with the `fd`'s
/// readable events.
///
/// A `fd` can only be added once and does not need to be kept open. If the `fd` is dropped and
/// there were no duplicated file descriptors (i.e. adding the same descriptor with a different
/// FD number) added to this context, events will not be reported by `wait` anymore.
pub fn add(&self, fd: &AsRawFd, token: T) -> Result<()> {
self.add_fd_with_events(fd, WatchingEvents::empty().set_read(), token)
}
/// Adds the given `fd` to this context, watching for the specified events and associates the
/// given 'token' with those events.
///
/// A `fd` can only be added once and does not need to be kept open. If the `fd` is dropped and
/// there were no duplicated file descriptors (i.e. adding the same descriptor with a different
/// FD number) added to this context, events will not be reported by `wait` anymore.
pub fn add_fd_with_events(&self, fd: &AsRawFd, events: WatchingEvents, token: T) -> Result<()> {
let mut evt = epoll_event {
events: events.get_raw(),
u64: token.as_raw_token(),
};
// Safe because we give a valid epoll FD and FD to watch, as well as a valid epoll_event
// structure. Then we check the return value.
let ret = unsafe {
epoll_ctl(
self.epoll_ctx.as_raw_fd(),
EPOLL_CTL_ADD,
fd.as_raw_fd(),
&mut evt,
)
};
if ret < 0 {
return errno_result();
};
Ok(())
}
/// If `fd` was previously added to this context, the watched events will be replaced with
/// `events` and the token associated with it will be replaced with the given `token`.
pub fn modify(&self, fd: &AsRawFd, events: WatchingEvents, token: T) -> Result<()> {
let mut evt = epoll_event {
events: events.0,
u64: token.as_raw_token(),
};
// Safe because we give a valid epoll FD and FD to modify, as well as a valid epoll_event
// structure. Then we check the return value.
let ret = unsafe {
epoll_ctl(
self.epoll_ctx.as_raw_fd(),
EPOLL_CTL_MOD,
fd.as_raw_fd(),
&mut evt,
)
};
if ret < 0 {
return errno_result();
};
Ok(())
}
/// Deletes the given `fd` from this context.
///
/// If an `fd`'s token shows up in the list of hangup events, it should be removed using this
/// method or by closing/dropping (if and only if the fd was never dup()'d/fork()'d) the `fd`.
/// Failure to do so will cause the `wait` method to always return immediately, causing ~100%
/// CPU load.
pub fn delete(&self, fd: &AsRawFd) -> Result<()> {
// Safe because we give a valid epoll FD and FD to stop watching. Then we check the return
// value.
let ret = unsafe {
epoll_ctl(
self.epoll_ctx.as_raw_fd(),
EPOLL_CTL_DEL,
fd.as_raw_fd(),
null_mut(),
)
};
if ret < 0 {
return errno_result();
};
Ok(())
}
/// Waits for any events to occur in FDs that were previously added to this context.
///
/// The events are level-triggered, meaning that if any events are unhandled (i.e. not reading
/// for readable events and not closing for hungup events), subsequent calls to `wait` will
/// return immediately. The consequence of not handling an event perpetually while calling
/// `wait` is that the callers loop will degenerated to busy loop polling, pinning a CPU to
/// ~100% usage.
pub fn wait<'a>(&self, events: &'a EpollEvents) -> Result<PollEvents<'a, T>> {
self.wait_timeout(events, Duration::new(i64::MAX as u64, 0))
}
/// Like `wait` except will only block for a maximum of the given `timeout`.
///
/// This may return earlier than `timeout` with zero events if the duration indicated exceeds
/// system limits.
pub fn wait_timeout<'a>(
&self,
events: &'a EpollEvents,
timeout: Duration,
) -> Result<PollEvents<'a, T>> {
let timeout_millis = if timeout.as_secs() as i64 == i64::max_value() {
// We make the convenient assumption that 2^63 seconds is an effectively unbounded time
// frame. This is meant to mesh with `wait` calling us with no timeout.
-1
} else {
// In cases where we the number of milliseconds would overflow an i32, we substitute the
// maximum timeout which is ~24.8 days.
let millis = timeout
.as_secs()
.checked_mul(1_000)
.and_then(|ms| ms.checked_add(u64::from(timeout.subsec_nanos()) / 1_000_000))
.unwrap_or(i32::max_value() as u64);
min(i32::max_value() as u64, millis) as i32
};
let ret = {
let mut epoll_events = events.0.borrow_mut();
let max_events = epoll_events.len() as c_int;
// Safe because we give an epoll context and a properly sized epoll_events array
// pointer, which we trust the kernel to fill in properly.
unsafe {
handle_eintr_errno!(epoll_wait(
self.epoll_ctx.as_raw_fd(),
&mut epoll_events[0],
max_events,
timeout_millis
))
}
};
if ret < 0 {
return errno_result();
}
let epoll_events = events.0.borrow();
let events = PollEvents {
count: ret as usize,
events: epoll_events,
tokens: PhantomData,
};
Ok(events)
}
}
impl<T: PollToken> AsRawFd for EpollContext<T> {
fn as_raw_fd(&self) -> RawFd {
self.epoll_ctx.as_raw_fd()
}
}
impl<T: PollToken> IntoRawFd for EpollContext<T> {
fn into_raw_fd(self) -> RawFd {
self.epoll_ctx.into_raw_fd()
}
}
/// Used to poll multiple objects that have file descriptors.
///
/// # Example
///
/// ```
/// # use vmm_sys_util::{Result, EventFd, PollContext, PollEvents};
/// # fn test() -> Result<()> {
/// let evt1 = EventFd::new(0)?;
/// let evt2 = EventFd::new(0)?;
/// evt2.write(1)?;
///
/// let ctx: PollContext<u32> = PollContext::new()?;
/// ctx.add(&evt1, 1)?;
/// ctx.add(&evt2, 2)?;
///
/// let pollevents: PollEvents<u32> = ctx.wait()?;
/// let tokens: Vec<u32> = pollevents.iter_readable().map(|e| e.token()).collect();
/// assert_eq!(&tokens[..], &[2]);
/// # Ok(())
/// # }
/// ```
pub struct PollContext<T> {
epoll_ctx: EpollContext<T>,
// We use a RefCell here so that the `wait` method only requires an immutable self reference
// while returning the events (encapsulated by PollEvents). Without the RefCell, `wait` would
// hold a mutable reference that lives as long as its returned reference (i.e. the PollEvents),
// even though that reference is immutable. This is terribly inconvenient for the caller because
// the borrow checking would prevent them from using `delete` and `add` while the events are in
// scope.
events: EpollEvents,
// Hangup busy loop detection variables. See `check_for_hungup_busy_loop`.
hangups: Cell<usize>,
max_hangups: Cell<usize>,
}
impl<T: PollToken> PollContext<T> {
/// Creates a new `PollContext`.
pub fn new() -> Result<PollContext<T>> {
Ok(PollContext {
epoll_ctx: EpollContext::new()?,
events: EpollEvents::new(),
hangups: Cell::new(0),
max_hangups: Cell::new(0),
})
}
/// Adds the given `fd` to this context and associates the given `token` with the `fd`'s
/// readable events.
///
/// A `fd` can only be added once and does not need to be kept open. If the `fd` is dropped and
/// there were no duplicated file descriptors (i.e. adding the same descriptor with a different
/// FD number) added to this context, events will not be reported by `wait` anymore.
pub fn add(&self, fd: &AsRawFd, token: T) -> Result<()> {
self.add_fd_with_events(fd, WatchingEvents::empty().set_read(), token)
}
/// Adds the given `fd` to this context, watching for the specified events and associates the
/// given 'token' with those events.
///
/// A `fd` can only be added once and does not need to be kept open. If the `fd` is dropped and
/// there were no duplicated file descriptors (i.e. adding the same descriptor with a different
/// FD number) added to this context, events will not be reported by `wait` anymore.
pub fn add_fd_with_events(&self, fd: &AsRawFd, events: WatchingEvents, token: T) -> Result<()> {
self.epoll_ctx.add_fd_with_events(fd, events, token)?;
self.hangups.set(0);
self.max_hangups.set(self.max_hangups.get() + 1);
Ok(())
}
/// If `fd` was previously added to this context, the watched events will be replaced with
/// `events` and the token associated with it will be replaced with the given `token`.
pub fn modify(&self, fd: &AsRawFd, events: WatchingEvents, token: T) -> Result<()> {
self.epoll_ctx.modify(fd, events, token)
}
/// Deletes the given `fd` from this context.
///
/// If an `fd`'s token shows up in the list of hangup events, it should be removed using this
/// method or by closing/dropping (if and only if the fd was never dup()'d/fork()'d) the `fd`.
/// Failure to do so will cause the `wait` method to always return immediately, causing ~100%
/// CPU load.
pub fn delete(&self, fd: &AsRawFd) -> Result<()> {
self.epoll_ctx.delete(fd)?;
self.hangups.set(0);
self.max_hangups.set(self.max_hangups.get() - 1);
Ok(())
}
// This method determines if the the user of wait is misusing the `PollContext` by leaving FDs
// in this `PollContext` that have been shutdown or hungup on. Such an FD will cause `wait` to
// return instantly with a hungup event. If that FD is perpetually left in this context, a busy
// loop burning ~100% of one CPU will silently occur with no human visible malfunction.
//
// How do we know if the client of this context is ignoring hangups? A naive implementation
// would trigger if consecutive wait calls yield hangup events, but there are legitimate cases
// for this, such as two distinct sockets becoming hungup across two consecutive wait calls. A
// smarter implementation would only trigger if `delete` wasn't called between waits that
// yielded hangups. Sadly `delete` isn't the only way to remove an FD from this context. The
// other way is for the client to close the hungup FD, which automatically removes it from this
// context. Assuming that the client always uses close, this implementation would too eagerly
// trigger.
//
// The implementation used here keeps an upper bound of FDs in this context using a counter
// hooked into add/delete (which is imprecise because close can also remove FDs without us
// knowing). The number of consecutive (no add or delete in between) hangups yielded by wait
// calls is counted and compared to the upper bound. If the upper bound is exceeded by the
// consecutive hangups, the implementation triggers the check and logs.
//
// This implementation has false negatives because the upper bound can be completely too high,
// in the worst case caused by only using close instead of delete. However, this method has the
// advantage of always triggering eventually genuine busy loop cases, requires no dynamic
// allocations, is fast and constant time to compute, and has no false positives.
fn check_for_hungup_busy_loop(&self, new_hangups: usize) {
let old_hangups = self.hangups.get();
let max_hangups = self.max_hangups.get();
if old_hangups <= max_hangups && old_hangups + new_hangups > max_hangups {
warn!(
"busy poll wait loop with hungup FDs detected on thread {}",
thread::current().name().unwrap_or("")
);
// This panic is helpful for tests of this functionality.
#[cfg(test)]
panic!("hungup busy loop detected");
}
self.hangups.set(old_hangups + new_hangups);
}
/// Waits for any events to occur in FDs that were previously added to this context.
///
/// The events are level-triggered, meaning that if any events are unhandled (i.e. not reading
/// for readable events and not closing for hungup events), subsequent calls to `wait` will
/// return immediately. The consequence of not handling an event perpetually while calling
/// `wait` is that the callers loop will degenerated to busy loop polling, pinning a CPU to
/// ~100% usage.
///
/// # Panics
/// Panics if the returned `PollEvents` structure is not dropped before subsequent `wait` calls.
pub fn wait(&self) -> Result<PollEvents<T>> {
self.wait_timeout(Duration::new(i64::MAX as u64, 0))
}
/// Like `wait` except will only block for a maximum of the given `timeout`.
///
/// This may return earlier than `timeout` with zero events if the duration indicated exceeds
/// system limits.
pub fn wait_timeout(&self, timeout: Duration) -> Result<PollEvents<T>> {
let events = self.epoll_ctx.wait_timeout(&self.events, timeout)?;
let hangups = events.iter_hungup().count();
self.check_for_hungup_busy_loop(hangups);
Ok(events)
}
}
impl<T: PollToken> AsRawFd for PollContext<T> {
fn as_raw_fd(&self) -> RawFd {
self.epoll_ctx.as_raw_fd()
}
}
impl<T: PollToken> IntoRawFd for PollContext<T> {
fn into_raw_fd(self) -> RawFd {
self.epoll_ctx.into_raw_fd()
}
}
#[cfg(test)]
mod tests {
use super::*;
use crate::eventfd::EventFd;
use std::os::unix::net::UnixStream;
use std::time::Instant;
#[test]
fn test_poll_context() {
let evt1 = EventFd::new(0).unwrap();
let evt2 = EventFd::new(0).unwrap();
evt1.write(1).unwrap();
evt2.write(1).unwrap();
let ctx: PollContext<u32> = PollContext::new().unwrap();
ctx.add(&evt1, 1).unwrap();
ctx.add(&evt2, 2).unwrap();
let mut evt_count = 0;
while evt_count < 2 {
for event in ctx.wait().unwrap().iter_readable() {
evt_count += 1;
match event.token() {
1 => {
evt1.read().unwrap();
ctx.delete(&evt1).unwrap();
}
2 => {
evt2.read().unwrap();
ctx.delete(&evt2).unwrap();
}
_ => panic!("unexpected token"),
};
}
}
assert_eq!(evt_count, 2);
}
#[test]
fn test_poll_context_overflow() {
const EVT_COUNT: usize = POLL_CONTEXT_MAX_EVENTS * 2 + 1;
let ctx: PollContext<usize> = PollContext::new().unwrap();
let mut evts = Vec::with_capacity(EVT_COUNT);
for i in 0..EVT_COUNT {
let evt = EventFd::new(0).unwrap();
evt.write(1).unwrap();
ctx.add(&evt, i).unwrap();
evts.push(evt);
}
let mut evt_count = 0;
while evt_count < EVT_COUNT {
for event in ctx.wait().unwrap().iter_readable() {
evts[event.token()].read().unwrap();
evt_count += 1;
}
}
}
#[test]
#[should_panic]
fn test_poll_context_hungup() {
let (s1, s2) = UnixStream::pair().unwrap();
let ctx: PollContext<u32> = PollContext::new().unwrap();
ctx.add(&s1, 1).unwrap();
// Causes s1 to receive hangup events, which we purposefully ignore to trip the detection
// logic in `PollContext`.
drop(s2);
// Should easily panic within this many iterations.
for _ in 0..1000 {
ctx.wait().unwrap();
}
}
#[test]
fn test_poll_context_timeout() {
let ctx: PollContext<u32> = PollContext::new().unwrap();
let dur = Duration::from_millis(10);
let start_inst = Instant::now();
ctx.wait_timeout(dur).unwrap();
assert!(start_inst.elapsed() >= dur);
}
}

View File

@ -1,215 +0,0 @@
// Copyright 2018 The Chromium OS Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE-BSD-3-Clause file.
//
// SPDX-License-Identifier: BSD-3-Clause
use std::fs::File;
use std::io::{Error, Result};
use std::os::unix::io::AsRawFd;
#[cfg(target_env = "musl")]
use libc::{c_int, lseek64, ENXIO};
#[cfg(target_env = "gnu")]
use libc::{lseek64, ENXIO, SEEK_DATA, SEEK_HOLE};
/// A trait for seeking to the next hole or non-hole position in a file.
pub trait SeekHole {
/// Seek to the first hole in a file at a position greater than or equal to `offset`.
/// If no holes exist after `offset`, the seek position will be set to the end of the file.
/// If `offset` is at or after the end of the file, the seek position is unchanged, and None is returned.
/// Returns the current seek position after the seek or an error.
fn seek_hole(&mut self, offset: u64) -> Result<Option<u64>>;
/// Seek to the first data in a file at a position greater than or equal to `offset`.
/// If no data exists after `offset`, the seek position is unchanged, and None is returned.
/// Returns the current offset after the seek or an error.
fn seek_data(&mut self, offset: u64) -> Result<Option<u64>>;
}
#[cfg(target_env = "musl")]
pub const SEEK_DATA: c_int = 3;
#[cfg(target_env = "musl")]
pub const SEEK_HOLE: c_int = 4;
/// Safe wrapper for `libc::lseek64()`
fn lseek(file: &mut File, offset: i64, whence: i32) -> Result<Option<u64>> {
// This is safe because we pass a known-good file descriptor.
let res = unsafe { lseek64(file.as_raw_fd(), offset, whence) };
if res < 0 {
// Convert ENXIO into None; pass any other error as-is.
let err = Error::last_os_error();
if let Some(errno) = Error::raw_os_error(&err) {
if errno == ENXIO {
return Ok(None);
}
}
Err(err)
} else {
Ok(Some(res as u64))
}
}
impl SeekHole for File {
fn seek_hole(&mut self, offset: u64) -> Result<Option<u64>> {
lseek(self, offset as i64, SEEK_HOLE)
}
fn seek_data(&mut self, offset: u64) -> Result<Option<u64>> {
lseek(self, offset as i64, SEEK_DATA)
}
}
#[cfg(test)]
mod tests {
use super::*;
use crate::TempDir;
use std::fs::File;
use std::io::{Seek, SeekFrom, Write};
use std::path::PathBuf;
fn seek_cur(file: &mut File) -> u64 {
file.seek(SeekFrom::Current(0)).unwrap()
}
#[test]
fn seek_data() {
let tempdir = TempDir::new("/tmp/seek_data_test").unwrap();
let mut path = PathBuf::from(tempdir.as_path().unwrap());
path.push("test_file");
let mut file = File::create(&path).unwrap();
// Empty file
assert_eq!(file.seek_data(0).unwrap(), None);
assert_eq!(seek_cur(&mut file), 0);
// File with non-zero length consisting entirely of a hole
file.set_len(0x10000).unwrap();
assert_eq!(file.seek_data(0).unwrap(), None);
assert_eq!(seek_cur(&mut file), 0);
// seek_data at or after the end of the file should return None
assert_eq!(file.seek_data(0x10000).unwrap(), None);
assert_eq!(seek_cur(&mut file), 0);
assert_eq!(file.seek_data(0x10001).unwrap(), None);
assert_eq!(seek_cur(&mut file), 0);
// Write some data to [0x10000, 0x20000)
let b = [0x55u8; 0x10000];
file.seek(SeekFrom::Start(0x10000)).unwrap();
file.write_all(&b).unwrap();
assert_eq!(file.seek_data(0).unwrap(), Some(0x10000));
assert_eq!(seek_cur(&mut file), 0x10000);
// seek_data within data should return the same offset
assert_eq!(file.seek_data(0x10000).unwrap(), Some(0x10000));
assert_eq!(seek_cur(&mut file), 0x10000);
assert_eq!(file.seek_data(0x10001).unwrap(), Some(0x10001));
assert_eq!(seek_cur(&mut file), 0x10001);
assert_eq!(file.seek_data(0x1FFFF).unwrap(), Some(0x1FFFF));
assert_eq!(seek_cur(&mut file), 0x1FFFF);
// Extend the file to add another hole after the data
file.set_len(0x30000).unwrap();
assert_eq!(file.seek_data(0).unwrap(), Some(0x10000));
assert_eq!(seek_cur(&mut file), 0x10000);
assert_eq!(file.seek_data(0x1FFFF).unwrap(), Some(0x1FFFF));
assert_eq!(seek_cur(&mut file), 0x1FFFF);
assert_eq!(file.seek_data(0x20000).unwrap(), None);
assert_eq!(seek_cur(&mut file), 0x1FFFF);
}
#[test]
#[allow(clippy::cyclomatic_complexity)]
fn seek_hole() {
let tempdir = TempDir::new("/tmp/seek_hole_test").unwrap();
let mut path = PathBuf::from(tempdir.as_path().unwrap());
path.push("test_file");
let mut file = File::create(&path).unwrap();
// Empty file
assert_eq!(file.seek_hole(0).unwrap(), None);
assert_eq!(seek_cur(&mut file), 0);
// File with non-zero length consisting entirely of a hole
file.set_len(0x10000).unwrap();
assert_eq!(file.seek_hole(0).unwrap(), Some(0));
assert_eq!(seek_cur(&mut file), 0);
assert_eq!(file.seek_hole(0xFFFF).unwrap(), Some(0xFFFF));
assert_eq!(seek_cur(&mut file), 0xFFFF);
// seek_hole at or after the end of the file should return None
file.seek(SeekFrom::Start(0)).unwrap();
assert_eq!(file.seek_hole(0x10000).unwrap(), None);
assert_eq!(seek_cur(&mut file), 0);
assert_eq!(file.seek_hole(0x10001).unwrap(), None);
assert_eq!(seek_cur(&mut file), 0);
// Write some data to [0x10000, 0x20000)
let b = [0x55u8; 0x10000];
file.seek(SeekFrom::Start(0x10000)).unwrap();
file.write_all(&b).unwrap();
// seek_hole within a hole should return the same offset
assert_eq!(file.seek_hole(0).unwrap(), Some(0));
assert_eq!(seek_cur(&mut file), 0);
assert_eq!(file.seek_hole(0xFFFF).unwrap(), Some(0xFFFF));
assert_eq!(seek_cur(&mut file), 0xFFFF);
// seek_hole within data should return the next hole (EOF)
file.seek(SeekFrom::Start(0)).unwrap();
assert_eq!(file.seek_hole(0x10000).unwrap(), Some(0x20000));
assert_eq!(seek_cur(&mut file), 0x20000);
file.seek(SeekFrom::Start(0)).unwrap();
assert_eq!(file.seek_hole(0x10001).unwrap(), Some(0x20000));
assert_eq!(seek_cur(&mut file), 0x20000);
file.seek(SeekFrom::Start(0)).unwrap();
assert_eq!(file.seek_hole(0x1FFFF).unwrap(), Some(0x20000));
assert_eq!(seek_cur(&mut file), 0x20000);
// seek_hole at EOF after data should return None
file.seek(SeekFrom::Start(0)).unwrap();
assert_eq!(file.seek_hole(0x20000).unwrap(), None);
assert_eq!(seek_cur(&mut file), 0);
// Extend the file to add another hole after the data
file.set_len(0x30000).unwrap();
assert_eq!(file.seek_hole(0).unwrap(), Some(0));
assert_eq!(seek_cur(&mut file), 0);
assert_eq!(file.seek_hole(0xFFFF).unwrap(), Some(0xFFFF));
assert_eq!(seek_cur(&mut file), 0xFFFF);
file.seek(SeekFrom::Start(0)).unwrap();
assert_eq!(file.seek_hole(0x10000).unwrap(), Some(0x20000));
assert_eq!(seek_cur(&mut file), 0x20000);
file.seek(SeekFrom::Start(0)).unwrap();
assert_eq!(file.seek_hole(0x1FFFF).unwrap(), Some(0x20000));
assert_eq!(seek_cur(&mut file), 0x20000);
file.seek(SeekFrom::Start(0)).unwrap();
assert_eq!(file.seek_hole(0x20000).unwrap(), Some(0x20000));
assert_eq!(seek_cur(&mut file), 0x20000);
file.seek(SeekFrom::Start(0)).unwrap();
assert_eq!(file.seek_hole(0x20001).unwrap(), Some(0x20001));
assert_eq!(seek_cur(&mut file), 0x20001);
// seek_hole at EOF after a hole should return None
file.seek(SeekFrom::Start(0)).unwrap();
assert_eq!(file.seek_hole(0x30000).unwrap(), None);
assert_eq!(seek_cur(&mut file), 0);
// Write some data to [0x20000, 0x30000)
file.seek(SeekFrom::Start(0x20000)).unwrap();
file.write_all(&b).unwrap();
// seek_hole within [0x20000, 0x30000) should now find the hole at EOF
assert_eq!(file.seek_hole(0x20000).unwrap(), Some(0x30000));
assert_eq!(seek_cur(&mut file), 0x30000);
file.seek(SeekFrom::Start(0)).unwrap();
assert_eq!(file.seek_hole(0x20001).unwrap(), Some(0x30000));
assert_eq!(seek_cur(&mut file), 0x30000);
file.seek(SeekFrom::Start(0)).unwrap();
assert_eq!(file.seek_hole(0x30000).unwrap(), None);
assert_eq!(seek_cur(&mut file), 0);
}
}

View File

@ -1,420 +0,0 @@
// Copyright 2019 Intel Corporation. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//
// Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//
// Portions Copyright 2017 The Chromium OS Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE-BSD-3-Clause file.
use libc::{
c_int, c_void, pthread_kill, pthread_sigmask, pthread_t, sigaction, sigaddset, sigemptyset,
siginfo_t, sigismember, sigpending, sigset_t, sigtimedwait, timespec, EAGAIN, EINTR, EINVAL,
SIGHUP, SIGSYS, SIG_BLOCK, SIG_UNBLOCK,
};
use errno;
use std::fmt::{self, Display};
use std::io;
use std::mem;
use std::os::unix::thread::JoinHandleExt;
use std::ptr::{null, null_mut};
use std::result;
use std::thread::JoinHandle;
#[derive(Debug)]
pub enum Error {
/// Couldn't create a sigset.
CreateSigset(errno::Error),
/// The wrapped signal has already been blocked.
SignalAlreadyBlocked(c_int),
/// Failed to check if the requested signal is in the blocked set already.
CompareBlockedSignals(errno::Error),
/// The signal could not be blocked.
BlockSignal(errno::Error),
/// The signal mask could not be retrieved.
RetrieveSignalMask(i32),
/// The signal could not be unblocked.
UnblockSignal(errno::Error),
/// Failed to wait for given signal.
ClearWaitPending(errno::Error),
/// Failed to get pending signals.
ClearGetPending(errno::Error),
/// Failed to check if given signal is in the set of pending signals.
ClearCheckPending(errno::Error),
}
impl Display for Error {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
use self::Error::*;
match self {
CreateSigset(e) => write!(f, "couldn't create a sigset: {}", e),
SignalAlreadyBlocked(num) => write!(f, "signal {} already blocked", num),
CompareBlockedSignals(e) => write!(
f,
"failed to check whether requested signal is in the blocked set: {}",
e,
),
BlockSignal(e) => write!(f, "signal could not be blocked: {}", e),
RetrieveSignalMask(errno) => write!(
f,
"failed to retrieve signal mask: {}",
io::Error::from_raw_os_error(*errno),
),
UnblockSignal(e) => write!(f, "signal could not be unblocked: {}", e),
ClearWaitPending(e) => write!(f, "failed to wait for given signal: {}", e),
ClearGetPending(e) => write!(f, "failed to get pending signals: {}", e),
ClearCheckPending(e) => write!(
f,
"failed to check whether given signal is in the pending set: {}",
e,
),
}
}
}
pub type SignalResult<T> = result::Result<T, Error>;
type SiginfoHandler = extern "C" fn(num: c_int, info: *mut siginfo_t, _unused: *mut c_void) -> ();
pub enum SignalHandler {
Siginfo(SiginfoHandler),
// TODO add a`SimpleHandler` when `libc` adds `sa_handler` support to `sigaction`.
}
impl SignalHandler {
fn set_flags(act: &mut sigaction, flag: c_int) {
act.sa_flags = flag;
}
}
/// Fills a `sigaction` structure from of the signal handler.
/// Refer to http://man7.org/linux/man-pages/man7/signal.7.html
impl Into<sigaction> for SignalHandler {
fn into(self) -> sigaction {
let mut act: sigaction = unsafe { mem::zeroed() };
match self {
SignalHandler::Siginfo(function) => {
act.sa_sigaction = function as *const () as usize;
}
}
act
}
}
extern "C" {
fn __libc_current_sigrtmin() -> c_int;
fn __libc_current_sigrtmax() -> c_int;
}
/// Returns the minimum (inclusive) real-time signal number.
#[allow(non_snake_case)]
fn SIGRTMIN() -> c_int {
unsafe { __libc_current_sigrtmin() }
}
/// Returns the maximum (inclusive) real-time signal number.
#[allow(non_snake_case)]
fn SIGRTMAX() -> c_int {
unsafe { __libc_current_sigrtmax() }
}
/// Verifies that a signal number is valid: for VCPU signals, it needs to be enclosed within the OS
/// limits for realtime signals, and the remaining ones need to be between the minimum (SIGHUP) and
/// maximum (SIGSYS) values.
pub fn validate_signal_num(num: c_int, for_vcpu: bool) -> errno::Result<c_int> {
if for_vcpu {
let actual_num = num + SIGRTMIN();
if actual_num <= SIGRTMAX() {
return Ok(actual_num);
}
} else if SIGHUP <= num && num <= SIGSYS {
return Ok(num);
}
Err(errno::Error::new(EINVAL))
}
/// Registers `handler` as the signal handler of signum `num`.
///
/// Uses `sigaction` to register the handler.
///
/// This is considered unsafe because the given handler will be called asynchronously, interrupting
/// whatever the thread was doing and therefore must only do async-signal-safe operations.
/// flags: SA_SIGINFO or SA_RESTART if wants to restart after signal received.
pub unsafe fn register_signal_handler(
num: i32,
handler: SignalHandler,
for_vcpu: bool,
flag: c_int,
) -> errno::Result<()> {
let num = validate_signal_num(num, for_vcpu)?;
let mut act: sigaction = handler.into();
SignalHandler::set_flags(&mut act, flag);
match sigaction(num, &act, null_mut()) {
0 => Ok(()),
_ => errno::errno_result(),
}
}
/// Creates `sigset` from an array of signal numbers.
///
/// This is a helper function used when we want to manipulate signals.
pub fn create_sigset(signals: &[c_int]) -> errno::Result<sigset_t> {
// sigset will actually be initialized by sigemptyset below.
let mut sigset: sigset_t = unsafe { mem::zeroed() };
// Safe - return value is checked.
let ret = unsafe { sigemptyset(&mut sigset) };
if ret < 0 {
return errno::errno_result();
}
for signal in signals {
// Safe - return value is checked.
let ret = unsafe { sigaddset(&mut sigset, *signal) };
if ret < 0 {
return errno::errno_result();
}
}
Ok(sigset)
}
/// Retrieves the signal mask of the current thread as a vector of c_ints.
pub fn get_blocked_signals() -> SignalResult<Vec<c_int>> {
let mut mask = Vec::new();
// Safe - return values are checked.
unsafe {
let mut old_sigset: sigset_t = mem::zeroed();
let ret = pthread_sigmask(SIG_BLOCK, null(), &mut old_sigset as *mut sigset_t);
if ret < 0 {
return Err(Error::RetrieveSignalMask(ret));
}
for num in 0..=SIGRTMAX() {
if sigismember(&old_sigset, num) > 0 {
mask.push(num);
}
}
}
Ok(mask)
}
/// Masks given signal.
///
/// If signal is already blocked the call will fail with Error::SignalAlreadyBlocked
/// result.
pub fn block_signal(num: c_int) -> SignalResult<()> {
let sigset = create_sigset(&[num]).map_err(Error::CreateSigset)?;
// Safe - return values are checked.
unsafe {
let mut old_sigset: sigset_t = mem::zeroed();
let ret = pthread_sigmask(SIG_BLOCK, &sigset, &mut old_sigset as *mut sigset_t);
if ret < 0 {
return Err(Error::BlockSignal(errno::Error::last()));
}
let ret = sigismember(&old_sigset, num);
if ret < 0 {
return Err(Error::CompareBlockedSignals(errno::Error::last()));
} else if ret > 0 {
return Err(Error::SignalAlreadyBlocked(num));
}
}
Ok(())
}
/// Unmasks given signal.
pub fn unblock_signal(num: c_int) -> SignalResult<()> {
let sigset = create_sigset(&[num]).map_err(Error::CreateSigset)?;
// Safe - return value is checked.
let ret = unsafe { pthread_sigmask(SIG_UNBLOCK, &sigset, null_mut()) };
if ret < 0 {
return Err(Error::UnblockSignal(errno::Error::last()));
}
Ok(())
}
/// Clears pending signal.
pub fn clear_signal(num: c_int) -> SignalResult<()> {
let sigset = create_sigset(&[num]).map_err(Error::CreateSigset)?;
while {
// This is safe as we are rigorously checking return values
// of libc calls.
unsafe {
let mut siginfo: siginfo_t = mem::zeroed();
let ts = timespec {
tv_sec: 0,
tv_nsec: 0,
};
// Attempt to consume one instance of pending signal. If signal
// is not pending, the call will fail with EAGAIN or EINTR.
let ret = sigtimedwait(&sigset, &mut siginfo, &ts);
if ret < 0 {
let e = errno::Error::last();
match e.errno() {
EAGAIN | EINTR => {}
_ => {
return Err(Error::ClearWaitPending(errno::Error::last()));
}
}
}
// This sigset will be actually filled with `sigpending` call.
let mut chkset: sigset_t = mem::zeroed();
// See if more instances of the signal are pending.
let ret = sigpending(&mut chkset);
if ret < 0 {
return Err(Error::ClearGetPending(errno::Error::last()));
}
let ret = sigismember(&chkset, num);
if ret < 0 {
return Err(Error::ClearCheckPending(errno::Error::last()));
}
// This is do-while loop condition.
ret != 0
}
} {}
Ok(())
}
/// Trait for threads that can be signalled via `pthread_kill`.
///
/// Note that this is only useful for signals between SIGRTMIN and SIGRTMAX because these are
/// guaranteed to not be used by the C runtime.
///
/// This is marked unsafe because the implementation of this trait must guarantee that the returned
/// pthread_t is valid and has a lifetime at least that of the trait object.
pub unsafe trait Killable {
fn pthread_handle(&self) -> pthread_t;
/// Sends the signal `num + SIGRTMIN` to this killable thread.
///
/// The value of `num + SIGRTMIN` must not exceed `SIGRTMAX`.
fn kill(&self, num: i32) -> errno::Result<()> {
let num = validate_signal_num(num, true)?;
// Safe because we ensure we are using a valid pthread handle, a valid signal number, and
// check the return result.
let ret = unsafe { pthread_kill(self.pthread_handle(), num) };
if ret < 0 {
return errno::errno_result();
}
Ok(())
}
}
// Safe because we fulfill our contract of returning a genuine pthread handle.
unsafe impl<T> Killable for JoinHandle<T> {
fn pthread_handle(&self) -> pthread_t {
self.as_pthread_t()
}
}
#[cfg(test)]
mod tests {
use super::*;
use libc::SA_SIGINFO;
use std::thread;
use std::time::Duration;
static mut SIGNAL_HANDLER_CALLED: bool = false;
extern "C" fn handle_signal(_: c_int, _: *mut siginfo_t, _: *mut c_void) {
unsafe {
SIGNAL_HANDLER_CALLED = true;
}
}
#[test]
fn test_register_signal_handler() {
unsafe {
// testing bad value
assert!(register_signal_handler(
SIGRTMAX(),
SignalHandler::Siginfo(handle_signal),
true,
SA_SIGINFO
)
.is_err());
format!(
"{:?}",
register_signal_handler(
SIGRTMAX(),
SignalHandler::Siginfo(handle_signal),
true,
SA_SIGINFO
)
);
assert!(register_signal_handler(
0,
SignalHandler::Siginfo(handle_signal),
true,
SA_SIGINFO
)
.is_ok());
assert!(register_signal_handler(
libc::SIGSYS,
SignalHandler::Siginfo(handle_signal),
false,
SA_SIGINFO
)
.is_ok());
}
}
#[test]
#[allow(clippy::empty_loop)]
fn test_killing_thread() {
let killable = thread::spawn(|| thread::current().id());
let killable_id = killable.join().unwrap();
assert_ne!(killable_id, thread::current().id());
// We install a signal handler for the specified signal; otherwise the whole process will
// be brought down when the signal is received, as part of the default behaviour. Signal
// handlers are global, so we install this before starting the thread.
unsafe {
register_signal_handler(0, SignalHandler::Siginfo(handle_signal), true, SA_SIGINFO)
.expect("failed to register vcpu signal handler");
}
let killable = thread::spawn(|| loop {});
let res = killable.kill(SIGRTMAX());
assert!(res.is_err());
format!("{:?}", res);
unsafe {
assert!(!SIGNAL_HANDLER_CALLED);
}
assert!(killable.kill(0).is_ok());
// We're waiting to detect that the signal handler has been called.
const MAX_WAIT_ITERS: u32 = 20;
let mut iter_count = 0;
loop {
thread::sleep(Duration::from_millis(100));
if unsafe { SIGNAL_HANDLER_CALLED } {
break;
}
iter_count += 1;
// timeout if we wait too long
assert!(iter_count <= MAX_WAIT_ITERS);
}
// Our signal handler doesn't do anything which influences the killable thread, so the
// previous signal is effectively ignored. If we were to join killable here, we would block
// forever as the loop keeps running. Since we don't join, the thread will become detached
// as the handle is dropped, and will be killed when the process/main thread exits.
}
}

View File

@ -1,643 +0,0 @@
// Copyright 2019 Intel Corporation. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//
// Copyright 2017 The Chromium OS Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE-BSD-3-Clause file.
//! Facilities for sending log message to syslog.
//!
//! Every function exported by this module is thread-safe. Each function will silently fail until
//! `syslog::init()` is called and returns `Ok`.
//!
//! # Examples
//!
//! ```
//! #[macro_use]
//! extern crate vmm_sys_util;
//!
//! use vmm_sys_util::syslog::init;
//! fn main() {
//! if let Err(e) = init() {
//! println!("failed to initiailize syslog: {}", e);
//! return;
//! }
//! warn!("this is your {} warning", "final");
//! error!("something went horribly wrong: {}", "out of RAMs");
//! }
//! ```
use std::env;
use std::ffi::CString;
use std::ffi::{OsStr, OsString};
use std::fmt::{self, Display};
use std::fs::File;
use std::io;
use std::io::{stderr, Cursor, ErrorKind, Write};
use std::mem;
use std::os::unix::io::{AsRawFd, FromRawFd, RawFd};
use std::os::unix::net::UnixDatagram;
use std::path::PathBuf;
use std::ptr::null;
use std::str::from_utf8;
use std::sync::{Mutex as StdMutex, MutexGuard, Once, ONCE_INIT};
use libc::{
c_char, c_long, closelog, fcntl, gethostname, localtime_r, openlog, pid_t, syscall, time,
time_t, tm, F_GETFD, LOG_NDELAY, LOG_PERROR, LOG_PID, LOG_USER,
};
/// Temporary define linux-x86_64 syscall value here.
#[allow(non_upper_case_globals)]
pub const SYS_getpid: c_long = 39;
const SYSLOG_PATH: &str = "/dev/log";
/// The priority (i.e. severity) of a syslog message.
///
/// See syslog man pages for information on their semantics.
#[derive(Copy, Clone, Debug)]
pub enum Priority {
Emergency = 0,
Alert = 1,
Critical = 2,
Error = 3,
Warning = 4,
Notice = 5,
Info = 6,
Debug = 7,
}
impl fmt::Display for Priority {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
match self {
Priority::Emergency => write!(f, "EMERGENCY"),
Priority::Alert => write!(f, "ALERT"),
Priority::Critical => write!(f, "CRITICAL"),
Priority::Error => write!(f, "ERROR"),
Priority::Warning => write!(f, "WARNING"),
Priority::Notice => write!(f, "NOTICE"),
Priority::Info => write!(f, "INFO"),
Priority::Debug => write!(f, "DEBUG"),
}
}
}
/// The facility of a syslog message.
///
/// See syslog man pages for information on their semantics.
pub enum Facility {
Kernel = 0,
User = 1 << 3,
Mail = 2 << 3,
Daemon = 3 << 3,
Auth = 4 << 3,
Syslog = 5 << 3,
Lpr = 6 << 3,
News = 7 << 3,
Uucp = 8 << 3,
Local0 = 16 << 3,
Local1 = 17 << 3,
Local2 = 18 << 3,
Local3 = 19 << 3,
Local4 = 20 << 3,
Local5 = 21 << 3,
Local6 = 22 << 3,
Local7 = 23 << 3,
}
/// Errors returned by `syslog::init()`.
#[derive(Debug)]
pub enum Error {
/// Initialization was never attempted.
NeverInitialized,
/// Initialization has previously failed and can not be retried.
Poisoned,
/// Error while creating socket.
Socket(io::Error),
/// Error while attempting to connect socket.
Connect(io::Error),
// There was an error using `open` to get the lowest file descriptor.
GetLowestFd(io::Error),
// The guess of libc's file descriptor for the syslog connection was invalid.
InvalidFd,
}
impl Display for Error {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
use self::Error::*;
match self {
NeverInitialized => write!(f, "initialization was never attempted"),
Poisoned => write!(f, "initialization previously failed and cannot be retried"),
Socket(e) => write!(f, "failed to create socket: {}", e),
Connect(e) => write!(f, "failed to connect socket: {}", e),
GetLowestFd(e) => write!(f, "failed to get lowest file descriptor: {}", e),
InvalidFd => write!(f, "guess of fd for syslog connection was invalid"),
}
}
}
fn get_hostname() -> Result<String, ()> {
let mut hostname: [u8; 256] = [b'\0'; 256];
// Safe because we give a valid pointer to a buffer of the indicated length and check for the
// result.
let ret = unsafe { gethostname(hostname.as_mut_ptr() as *mut c_char, hostname.len()) };
if ret == -1 {
return Err(());
}
let len = hostname.iter().position(|&v| v == b'\0').ok_or(())?;
Ok(from_utf8(&hostname[..len]).map_err(|_| ())?.to_string())
}
fn get_proc_name() -> Option<String> {
env::args_os()
.next()
.map(PathBuf::from)
.and_then(|s| s.file_name().map(OsStr::to_os_string))
.map(OsString::into_string)
.and_then(Result::ok)
}
// Uses libc's openlog function to get a socket to the syslogger. By getting the socket this way, as
// opposed to connecting to the syslogger directly, libc's internal state gets initialized for other
// libraries (e.g. minijail) that make use of libc's syslog function. Note that this function
// depends on no other threads or signal handlers being active in this process because they might
// create FDs.
//
// TODO(zachr): Once https://android-review.googlesource.com/470998 lands, there won't be any
// libraries in use that hard depend on libc's syslogger. Remove this and go back to making the
// connection directly once minjail is ready.
fn openlog_and_get_socket() -> Result<UnixDatagram, Error> {
// closelog first in case there was already a file descriptor open. Safe because it takes no
// arguments and just closes an open file descriptor. Does nothing if the file descriptor
// was not already open.
unsafe {
closelog();
}
let file_path = CString::new("/dev/null").unwrap();
unsafe {
// Ordinarily libc's FD for the syslog connection can't be accessed, but we can guess that the
// FD that openlog will be getting is the lowest unused FD. To guarantee that an FD is opened in
// this function we use the LOG_NDELAY to tell openlog to connect to the syslog now. To get the
// lowest unused FD, we open a dummy file (which the manual says will always return the lowest
// fd), and then close that fd. Voilà, we now know the lowest numbered FD. The call to openlog
// will make use of that FD, and then we just wrap a `UnixDatagram` around it for ease of use.
let fd = libc::open(file_path.as_ptr(), libc::O_RDONLY);
if fd < 0 {
let err = io::Error::last_os_error();
return Err(Error::GetLowestFd(err));
}
// Safe because openlog accesses no pointers because `ident` is null, only valid flags are
// used, and it returns no error.
openlog(null(), LOG_NDELAY | LOG_PERROR | LOG_PID, LOG_USER);
// For safety, ensure the fd we guessed is valid. The `fcntl` call itself only reads the
// file descriptor table of the current process, which is trivially safe.
if fcntl(fd, F_GETFD) >= 0 {
Ok(UnixDatagram::from_raw_fd(fd))
} else {
Err(Error::InvalidFd)
}
}
}
struct State {
stderr: bool,
socket: Option<UnixDatagram>,
file: Option<File>,
hostname: Option<String>,
proc_name: Option<String>,
}
impl State {
fn new() -> Result<State, Error> {
let s = openlog_and_get_socket()?;
Ok(State {
stderr: true,
socket: Some(s),
file: None,
hostname: get_hostname().ok(),
proc_name: get_proc_name(),
})
}
}
static STATE_ONCE: Once = ONCE_INIT;
static mut STATE: *const StdMutex<State> = 0 as *const _;
fn new_mutex_ptr<T>(inner: T) -> *const StdMutex<T> {
Box::into_raw(Box::new(StdMutex::new(inner)))
}
/// Initialize the syslog connection and internal variables.
///
/// This should only be called once per process before any other threads have been spawned or any
/// signal handlers have been registered. Every call made after the first will have no effect
/// besides return `Ok` or `Err` appropriately.
pub fn init() -> Result<(), Error> {
let mut err = Error::Poisoned;
STATE_ONCE.call_once(|| match State::new() {
// Safe because STATE mutation is guarded by `Once`.
Ok(state) => unsafe { STATE = new_mutex_ptr(state) },
Err(e) => err = e,
});
if unsafe { STATE.is_null() } {
Err(err)
} else {
Ok(())
}
}
fn lock() -> Result<MutexGuard<'static, State>, Error> {
// Safe because we assume that STATE is always in either a valid or NULL state.
let state_ptr = unsafe { STATE };
if state_ptr.is_null() {
return Err(Error::NeverInitialized);
}
// Safe because STATE only mutates once and we checked for NULL.
let state = unsafe { &*state_ptr };
let guard = match state.lock() {
Ok(guard) => guard,
_ => panic!("mutex is poisoned"),
};
Ok(guard)
}
// Attempts to lock and retrieve the state. Returns from the function silently on failure.
macro_rules! lock {
() => {
match lock() {
Ok(s) => s,
_ => return,
};
};
}
/// Replaces the hostname reported in each syslog message.
///
/// The default hostname is whatever `gethostname()` returned when `vmm_sys_util::syslog::init()` was first
/// called.
///
/// Does nothing if syslog was never initialized.
pub fn set_hostname<T: Into<String>>(hostname: T) {
let mut state = lock!();
state.hostname = Some(hostname.into());
}
/// Replaces the process name reported in each syslog message.
///
/// The default process name is the _file name_ of `argv[0]`. For example, if this program was
/// invoked as
///
/// ```bash
/// $ path/to/app --delete everything
/// ```
///
/// the default process name would be _app_.
///
/// Does nothing if syslog was never initialized.
pub fn set_proc_name<T: Into<String>>(proc_name: T) {
let mut state = lock!();
state.proc_name = Some(proc_name.into());
}
/// Enables or disables echoing log messages to the syslog.
///
/// The default behavior is **enabled**.
///
/// If `enable` goes from `true` to `false`, the syslog connection is closed. The connection is
/// reopened if `enable` is set to `true` after it became `false`.
///
/// Returns an error if syslog was never initialized or the syslog connection failed to be
/// established.
///
/// # Arguments
/// * `enable` - `true` to enable echoing to syslog, `false` to disable echoing to syslog.
pub fn echo_syslog(enable: bool) -> Result<(), Error> {
let state_ptr = unsafe { STATE };
if state_ptr.is_null() {
return Err(Error::NeverInitialized);
}
let mut state = lock().map_err(|_| Error::Poisoned)?;
match state.socket.take() {
Some(_) if enable => {}
Some(s) => {
// Because `openlog_and_get_socket` actually just "borrows" the syslog FD, this module
// does not own the syslog connection and therefore should not destroy it.
mem::forget(s);
}
None if enable => {
let s = openlog_and_get_socket()?;
state.socket = Some(s);
}
_ => {}
}
Ok(())
}
/// Replaces the optional `File` to echo log messages to.
///
/// The default behavior is to not echo to a file. Passing `None` to this function restores that
/// behavior.
///
/// Does nothing if syslog was never initialized.
///
/// # Arguments
/// * `file` - `Some(file)` to echo to `file`, `None` to disable echoing to the file previously passed to `echo_file`.
pub fn echo_file(file: Option<File>) {
let mut state = lock!();
state.file = file;
}
/// Enables or disables echoing log messages to the `std::io::stderr()`.
///
/// The default behavior is **enabled**.
///
/// Does nothing if syslog was never initialized.
///
/// # Arguments
/// * `enable` - `true` to enable echoing to stderr, `false` to disable echoing to stderr.
pub fn echo_stderr(enable: bool) {
let mut state = lock!();
state.stderr = enable;
}
/// Retrieves the file descriptors owned by the global syslogger.
///
/// Does nothing if syslog was never initialized. If their are any file descriptors, they will be
/// pushed into `fds`.
///
/// Note that the `stderr` file descriptor is never added, as it is not owned by syslog.
#[allow(clippy::redundant_closure)]
pub fn push_fds(fds: &mut Vec<RawFd>) {
let state = lock!();
fds.extend(state.socket.iter().map(|s| s.as_raw_fd()));
fds.extend(state.file.iter().map(|f| f.as_raw_fd()));
}
/// Should only be called after `init()` was called.
fn send_buf(socket: &UnixDatagram, buf: &[u8]) {
const SEND_RETRY: usize = 2;
for _ in 0..SEND_RETRY {
match socket.send(&buf[..]) {
Ok(_) => break,
Err(e) => match e.kind() {
ErrorKind::ConnectionRefused
| ErrorKind::ConnectionReset
| ErrorKind::ConnectionAborted
| ErrorKind::NotConnected => {
let res = socket.connect(SYSLOG_PATH);
if res.is_err() {
break;
}
}
_ => {}
},
}
}
}
fn get_localtime() -> tm {
unsafe {
// Safe because tm is just a struct of plain data.
let mut tm: tm = mem::zeroed();
let mut now: time_t = 0;
// Safe because we give time a valid pointer and can never fail.
time(&mut now as *mut _);
// Safe because we give localtime_r valid pointers and can never fail.
localtime_r(&now, &mut tm as *mut _);
tm
}
}
/// Records a log message with the given details.
///
/// Note that this will fail silently if syslog was not initialized.
///
/// # Arguments
/// * `pri` - The `Priority` (i.e. severity) of the log message.
/// * `fac` - The `Facility` of the log message. Usually `Facility::User` should be used.
/// * `file_name` - Name of the file that generated the log.
/// * `line` - Line number within `file_name` that generated the log.
/// * `args` - The log's message to record, in the form of `format_args!()` return value
///
/// # Examples
///
/// ```
/// # use vmm_sys_util::syslog::{init, log, Priority, Facility};
/// # fn main() {
/// # if let Err(e) = init() {
/// # println!("failed to initiailize syslog: {}", e);
/// # return;
/// # }
/// log(Priority::Error,
/// Facility::User,
/// file!(),
/// line!(),
/// format_args!("hello syslog"));
/// # }
/// ```
#[allow(clippy::redundant_closure)]
pub fn log(pri: Priority, fac: Facility, file_name: &str, line: u32, args: fmt::Arguments) {
const MONTHS: [&str; 12] = [
"Jan", "Feb", "Mar", "Apr", "May", "Jun", "Jul", "Aug", "Sep", "Oct", "Nov", "Dec",
];
let mut state = lock!();
let mut buf = [0u8; 1024];
if let Some(ref socket) = state.socket {
let tm = get_localtime();
let prifac = (pri as u8) | (fac as u8);
let (res, len) = {
let mut buf_cursor = Cursor::new(&mut buf[..]);
(
write!(
&mut buf_cursor,
"<{}>{} {:02} {:02}:{:02}:{:02} {} {}[{}]: [{}:{}] {}",
prifac,
MONTHS[tm.tm_mon as usize],
tm.tm_mday,
tm.tm_hour,
tm.tm_min,
tm.tm_sec,
state.hostname.as_ref().map(|s| s.as_ref()).unwrap_or("-"),
state.proc_name.as_ref().map(|s| s.as_ref()).unwrap_or("-"),
unsafe { syscall(SYS_getpid as c_long) as pid_t },
//getpid(),
file_name,
line,
args
),
buf_cursor.position() as usize,
)
};
if res.is_ok() {
send_buf(&socket, &buf[..len]);
}
}
let (res, len) = {
let mut buf_cursor = Cursor::new(&mut buf[..]);
(
writeln!(&mut buf_cursor, "[{}:{}:{}] {}", pri, file_name, line, args),
buf_cursor.position() as usize,
)
};
if res.is_ok() {
if let Some(ref mut file) = state.file {
let _ = file.write_all(&buf[..len]);
}
if state.stderr {
let _ = stderr().write_all(&buf[..len]);
}
}
}
/// A macro for logging at an arbitrary priority level.
///
/// Note that this will fail silently if syslog was not initialized.
#[macro_export]
macro_rules! log {
($pri:expr, $($args:tt)+) => ({
$crate::syslog::log($pri, $crate::syslog::Facility::User, file!(), line!(), format_args!($($args)+))
})
}
/// A macro for logging an error.
///
/// Note that this will fail silently if syslog was not initialized.
#[macro_export]
macro_rules! error {
($($args:tt)+) => (log!($crate::syslog::Priority::Error, $($args)*))
}
/// A macro for logging a warning.
///
/// Note that this will fail silently if syslog was not initialized.
#[macro_export]
macro_rules! warn {
($($args:tt)+) => (log!($crate::syslog::Priority::Warning, $($args)*))
}
/// A macro for logging info.
///
/// Note that this will fail silently if syslog was not initialized.
#[macro_export]
macro_rules! info {
($($args:tt)+) => (log!($crate::syslog::Priority::Info, $($args)*))
}
/// A macro for logging debug information.
///
/// Note that this will fail silently if syslog was not initialized.
#[macro_export]
macro_rules! debug {
($($args:tt)+) => (log!($crate::syslog::Priority::Debug, $($args)*))
}
#[cfg(test)]
mod tests {
use super::*;
use libc::{shm_open, shm_unlink, O_CREAT, O_EXCL, O_RDWR};
use std::ffi::CStr;
use std::io::{Read, Seek, SeekFrom};
use std::os::unix::io::FromRawFd;
#[test]
fn test_init_syslog() {
init().unwrap();
}
#[test]
fn test_fds() {
init().unwrap();
let mut fds = Vec::new();
push_fds(&mut fds);
assert!(!fds.is_empty());
for fd in fds {
assert!(fd >= 0);
}
}
#[test]
fn test_syslog_log() {
init().unwrap();
log(
Priority::Error,
Facility::User,
file!(),
line!(),
format_args!("hello syslog"),
);
}
#[test]
fn test_proc_name() {
init().unwrap();
log(
Priority::Error,
Facility::User,
file!(),
line!(),
format_args!("before proc name"),
);
set_proc_name("sys_util-test");
log(
Priority::Error,
Facility::User,
file!(),
line!(),
format_args!("after proc name"),
);
}
#[test]
#[allow(clippy::zero_prefixed_literal)]
fn test_syslog_file() {
init().unwrap();
let shm_name = CStr::from_bytes_with_nul(b"/crosvm_shm\0").unwrap();
let mut file = unsafe {
shm_unlink(shm_name.as_ptr());
let fd = shm_open(shm_name.as_ptr(), O_RDWR | O_CREAT | O_EXCL, 0666);
assert!(fd >= 0, "error creating shared memory;");
File::from_raw_fd(fd)
};
let syslog_file = file.try_clone().expect("error cloning shared memory file");
echo_file(Some(syslog_file));
const TEST_STR: &str = "hello shared memory file";
log(
Priority::Error,
Facility::User,
file!(),
line!(),
format_args!("{}", TEST_STR),
);
file.seek(SeekFrom::Start(0))
.expect("error seeking shared memory file");
let mut buf = String::new();
file.read_to_string(&mut buf)
.expect("error reading shared memory file");
assert!(buf.contains(TEST_STR));
}
#[test]
fn test_macros() {
init().unwrap();
error!("this is an error {}", 3);
warn!("this is a warning {}", "uh oh");
info!("this is info {}", true);
debug!("this is debug info {:?}", Some("helpful stuff"));
}
}

View File

@ -1,104 +0,0 @@
// Copyright 2017 The Chromium OS Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE-BSD-3-Clause file.
//
// SPDX-License-Identifier: BSD-3-Clause
use std::ffi::CString;
use std::ffi::OsStr;
use std::ffi::OsString;
use std::fs;
use std::os::unix::ffi::OsStringExt;
use std::path::Path;
use std::path::PathBuf;
use libc;
use crate::{errno_result, Result};
/// Create and remove a temporary directory. The directory will be maintained for the lifetime of
/// the `TempDir` object.
pub struct TempDir {
path: Option<PathBuf>,
}
impl TempDir {
/// Creates a new tempory directory.
/// The directory will be removed when the object goes out of scope.
///
/// # Examples
///
/// ```
/// # use std::path::Path;
/// # use std::path::PathBuf;
/// # use vmm_sys_util::TempDir;
/// # fn test_create_temp_dir() -> Result<(), ()> {
/// let t = TempDir::new("/tmp/testdir").map_err(|_| ())?;
/// assert!(t.as_path().unwrap().exists());
/// # Ok(())
/// # }
/// ```
pub fn new<P: AsRef<OsStr>>(prefix: P) -> Result<TempDir> {
let mut dir_string = prefix.as_ref().to_os_string();
dir_string.push("XXXXXX");
// unwrap this result as the internal bytes can't have a null with a valid path.
let dir_name = CString::new(dir_string.into_vec()).unwrap();
let mut dir_bytes = dir_name.into_bytes_with_nul();
let ret = unsafe {
// Creating the directory isn't unsafe. The fact that it modifies the guts of the path
// is also OK because it only overwrites the last 6 Xs added above.
libc::mkdtemp(dir_bytes.as_mut_ptr() as *mut libc::c_char)
};
if ret.is_null() {
return errno_result();
}
dir_bytes.pop(); // Remove the null becasue from_vec can't handle it.
Ok(TempDir {
path: Some(PathBuf::from(OsString::from_vec(dir_bytes))),
})
}
/// Removes the temporary directory. Calling this is optional as dropping a `TempDir` object
/// will also remove the directory. Calling remove explicitly allows for better error handling.
pub fn remove(mut self) -> Result<()> {
let path = self.path.take();
path.map_or(Ok(()), fs::remove_dir_all)?;
Ok(())
}
/// Returns the path to the tempdir if it is currently valid
pub fn as_path(&self) -> Option<&Path> {
self.path.as_ref().map(|ref p| p.as_path())
}
}
impl Drop for TempDir {
fn drop(&mut self) {
if let Some(ref p) = self.path {
// Nothing can be done here if this returns an error.
let _ = fs::remove_dir_all(p);
}
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn create_dir() {
let t = TempDir::new("/tmp/asdf").unwrap();
let path = t.as_path().unwrap();
assert!(path.exists());
assert!(path.is_dir());
assert!(path.starts_with("/tmp/"));
}
#[test]
fn remove_dir() {
let t = TempDir::new("/tmp/asdf").unwrap();
let path = t.as_path().unwrap().to_owned();
assert!(t.remove().is_ok());
assert!(!path.exists());
}
}

View File

@ -1,155 +0,0 @@
// Copyright 2019 Intel Corporation. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//
// Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//
// Copyright 2017 The Chromium OS Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE-BSD-3-Clause file.
use std::io::StdinLock;
use std::mem::zeroed;
use std::os::unix::io::RawFd;
use libc::{
c_int, fcntl, isatty, read, tcgetattr, tcsetattr, termios, ECHO, F_GETFL, F_SETFL, ICANON,
ISIG, O_NONBLOCK, STDIN_FILENO, TCSANOW,
};
use crate::errno::{errno_result, Result};
fn modify_mode<F: FnOnce(&mut termios)>(fd: RawFd, f: F) -> Result<()> {
// Safe because we check the return value of isatty.
if unsafe { isatty(fd) } != 1 {
return Ok(());
}
// The following pair are safe because termios gets totally overwritten by tcgetattr and we
// check the return result.
let mut termios: termios = unsafe { zeroed() };
let ret = unsafe { tcgetattr(fd, &mut termios as *mut _) };
if ret < 0 {
return errno_result();
}
let mut new_termios = termios;
f(&mut new_termios);
// Safe because the syscall will only read the extent of termios and we check the return result.
let ret = unsafe { tcsetattr(fd, TCSANOW, &new_termios as *const _) };
if ret < 0 {
return errno_result();
}
Ok(())
}
fn get_flags(fd: RawFd) -> Result<c_int> {
// Safe because no third parameter is expected and we check the return result.
let ret = unsafe { fcntl(fd, F_GETFL) };
if ret < 0 {
return errno_result();
}
Ok(ret)
}
fn set_flags(fd: RawFd, flags: c_int) -> Result<()> {
// Safe because we supply the third parameter and we check the return result.
let ret = unsafe { fcntl(fd, F_SETFL, flags) };
if ret < 0 {
return errno_result();
}
Ok(())
}
/// Trait for file descriptors that are TTYs, according to `isatty(3)`.
///
/// This is marked unsafe because the implementation must promise that the returned RawFd is a valid
/// fd and that the lifetime of the returned fd is at least that of the trait object.
pub unsafe trait Terminal {
/// Gets the file descriptor of the TTY.
fn tty_fd(&self) -> RawFd;
/// Set this terminal's mode to canonical mode (`ICANON | ECHO | ISIG`).
fn set_canon_mode(&self) -> Result<()> {
modify_mode(self.tty_fd(), |t| t.c_lflag |= ICANON | ECHO | ISIG)
}
/// Set this terminal's mode to raw mode (`!(ICANON | ECHO | ISIG)`).
fn set_raw_mode(&self) -> Result<()> {
modify_mode(self.tty_fd(), |t| t.c_lflag &= !(ICANON | ECHO | ISIG))
}
/// Sets the non-blocking mode of this terminal's file descriptor.
///
/// If `non_block` is `true`, then `read_raw` will not block. If `non_block` is `false`, then
/// `read_raw` may block if there is nothing to read.
fn set_non_block(&self, non_block: bool) -> Result<()> {
let old_flags = get_flags(self.tty_fd())?;
let new_flags = if non_block {
old_flags | O_NONBLOCK
} else {
old_flags & !O_NONBLOCK
};
if new_flags != old_flags {
set_flags(self.tty_fd(), new_flags)?
}
Ok(())
}
/// Reads up to `out.len()` bytes from this terminal without any buffering.
///
/// This may block, depending on if non-blocking was enabled with `set_non_block` or if there
/// are any bytes to read. If there is at least one byte that is readable, this will not block.
fn read_raw(&self, out: &mut [u8]) -> Result<usize> {
// Safe because read will only modify the pointer up to the length we give it and we check
// the return result.
let ret = unsafe { read(self.tty_fd(), out.as_mut_ptr() as *mut _, out.len()) };
if ret < 0 {
return errno_result();
}
Ok(ret as usize)
}
}
// Safe because we return a genuine terminal fd that never changes and shares our lifetime.
unsafe impl<'a> Terminal for StdinLock<'a> {
fn tty_fd(&self) -> RawFd {
STDIN_FILENO
}
}
#[cfg(test)]
mod tests {
use super::*;
use std::fs::File;
use std::io;
use std::os::unix::io::AsRawFd;
use std::path::Path;
unsafe impl Terminal for File {
fn tty_fd(&self) -> RawFd {
self.as_raw_fd()
}
}
#[test]
fn test_a_tty() {
let stdin_handle = io::stdin();
let stdin = stdin_handle.lock();
assert!(stdin.set_canon_mode().is_ok());
assert!(stdin.set_raw_mode().is_ok());
assert!(stdin.set_raw_mode().is_ok());
assert!(stdin.set_canon_mode().is_ok());
assert!(stdin.set_non_block(true).is_ok());
let mut out = [0u8; 0];
assert!(stdin.read_raw(&mut out[..]).is_ok());
}
#[test]
fn test_a_non_tty() {
let file = File::open(Path::new("/dev/zero")).unwrap();
assert!(file.set_canon_mode().is_ok());
}
}

View File

@ -1,173 +0,0 @@
// Copyright 2019 Intel Corporation. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//
// Copyright 2018 The Chromium OS Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE-BSD-3-clause file.
use std::fs::File;
use std::mem;
use std::os::unix::io::{AsRawFd, FromRawFd, IntoRawFd, RawFd};
use std::ptr;
use std::time::Duration;
use libc::{self, timerfd_create, timerfd_gettime, timerfd_settime, CLOCK_MONOTONIC, TFD_CLOEXEC};
use crate::errno::{errno_result, Result};
/// A safe wrapper around a Linux timerfd (man 2 timerfd_create).
pub struct TimerFd(File);
impl TimerFd {
/// Creates a new [`TimerFd`](struct.TimerFd.html).
///
/// The timer is initally disarmed and must be armed by calling [`reset`](fn.reset.html).
pub fn new() -> Result<TimerFd> {
// Safe because this doesn't modify any memory and we check the return value.
let ret = unsafe { timerfd_create(CLOCK_MONOTONIC, TFD_CLOEXEC) };
if ret < 0 {
return errno_result();
}
// Safe because we uniquely own the file descriptor.
Ok(TimerFd(unsafe { File::from_raw_fd(ret) }))
}
/// Sets the timer to expire after `dur`.
///
/// If `interval` is not `None` it represents the period for repeated expirations after the
/// initial expiration. Otherwise the timer will expire just once. Cancels any existing duration and repeating interval.
pub fn reset(&mut self, dur: Duration, interval: Option<Duration>) -> Result<()> {
// Safe because we are zero-initializing a struct with only primitive member fields.
let mut spec: libc::itimerspec = unsafe { mem::zeroed() };
spec.it_value.tv_sec = dur.as_secs() as libc::time_t;
// nsec always fits in i32 because subsec_nanos is defined to be less than one billion.
let nsec = dur.subsec_nanos() as i32;
spec.it_value.tv_nsec = libc::c_long::from(nsec);
if let Some(int) = interval {
spec.it_interval.tv_sec = int.as_secs() as libc::time_t;
// nsec always fits in i32 because subsec_nanos is defined to be less than one billion.
let nsec = int.subsec_nanos() as i32;
spec.it_interval.tv_nsec = libc::c_long::from(nsec);
}
// Safe because this doesn't modify any memory and we check the return value.
let ret = unsafe { timerfd_settime(self.as_raw_fd(), 0, &spec, ptr::null_mut()) };
if ret < 0 {
return errno_result();
}
Ok(())
}
/// Waits until the timer expires.
///
/// The return value represents the number of times the timer
/// has expired since the last time `wait` was called. If the timer has not yet expired once
/// this call will block until it does.
pub fn wait(&mut self) -> Result<u64> {
let mut count = 0u64;
// Safe because this will only modify |buf| and we check the return value.
let ret = unsafe {
libc::read(
self.as_raw_fd(),
&mut count as *mut _ as *mut libc::c_void,
mem::size_of_val(&count),
)
};
if ret < 0 {
return errno_result();
}
// The bytes in the buffer are guaranteed to be in native byte-order so we don't need to
// use from_le or from_be.
Ok(count)
}
/// Returns `true` if the timer is currently armed.
pub fn is_armed(&self) -> Result<bool> {
// Safe because we are zero-initializing a struct with only primitive member fields.
let mut spec: libc::itimerspec = unsafe { mem::zeroed() };
// Safe because timerfd_gettime is trusted to only modify `spec`.
let ret = unsafe { timerfd_gettime(self.as_raw_fd(), &mut spec) };
if ret < 0 {
return errno_result();
}
Ok(spec.it_value.tv_sec != 0 || spec.it_value.tv_nsec != 0)
}
/// Disarms the timer.
pub fn clear(&mut self) -> Result<()> {
// Safe because we are zero-initializing a struct with only primitive member fields.
let spec: libc::itimerspec = unsafe { mem::zeroed() };
// Safe because this doesn't modify any memory and we check the return value.
let ret = unsafe { timerfd_settime(self.as_raw_fd(), 0, &spec, ptr::null_mut()) };
if ret < 0 {
return errno_result();
}
Ok(())
}
}
impl AsRawFd for TimerFd {
fn as_raw_fd(&self) -> RawFd {
self.0.as_raw_fd()
}
}
impl FromRawFd for TimerFd {
unsafe fn from_raw_fd(fd: RawFd) -> Self {
TimerFd(File::from_raw_fd(fd))
}
}
impl IntoRawFd for TimerFd {
fn into_raw_fd(self) -> RawFd {
self.0.into_raw_fd()
}
}
#[cfg(test)]
mod tests {
use super::*;
use std::thread::sleep;
use std::time::{Duration, Instant};
#[test]
fn test_one_shot() {
let mut tfd = TimerFd::new().expect("failed to create timerfd");
assert_eq!(tfd.is_armed().unwrap(), false);
let dur = Duration::from_millis(200);
let now = Instant::now();
tfd.reset(dur, None).expect("failed to arm timer");
assert_eq!(tfd.is_armed().unwrap(), true);
let count = tfd.wait().expect("unable to wait for timer");
assert_eq!(count, 1);
assert!(now.elapsed() >= dur);
}
#[test]
fn test_repeating() {
let mut tfd = TimerFd::new().expect("failed to create timerfd");
let dur = Duration::from_millis(200);
let interval = Duration::from_millis(100);
tfd.reset(dur, Some(interval)).expect("failed to arm timer");
sleep(dur * 3);
let count = tfd.wait().expect("unable to wait for timer");
assert!(count >= 5, "count = {}", count);
}
}

View File

@ -1,172 +0,0 @@
// Copyright 2018 The Chromium OS Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE-BSD-3-Clause file.
//
// SPDX-License-Identifier: BSD-3-Clause
use std::cmp::min;
use std::fs::File;
use std::io::{self, Seek, SeekFrom, Write};
use crate::fallocate;
use crate::FallocateMode;
/// A trait for deallocating space in a file.
pub trait PunchHole {
/// Replace a range of bytes with a hole.
fn punch_hole(&mut self, offset: u64, length: u64) -> io::Result<()>;
}
impl PunchHole for File {
fn punch_hole(&mut self, offset: u64, length: u64) -> io::Result<()> {
fallocate(self, FallocateMode::PunchHole, true, offset, length as u64)
.map_err(|e| io::Error::from_raw_os_error(e.errno()))
}
}
/// A trait for writing zeroes to a stream.
pub trait WriteZeroes {
/// Write `length` bytes of zeroes to the stream, returning how many bytes were written.
fn write_zeroes(&mut self, length: usize) -> io::Result<usize>;
}
impl<T: PunchHole + Seek + Write> WriteZeroes for T {
fn write_zeroes(&mut self, length: usize) -> io::Result<usize> {
// Try to punch a hole first.
let offset = self.seek(SeekFrom::Current(0))?;
if let Ok(()) = self.punch_hole(offset, length as u64) {
// Advance the seek cursor as if we had done a real write().
self.seek(SeekFrom::Current(length as i64))?;
return Ok(length);
}
// fall back to write()
// punch_hole() failed; fall back to writing a buffer of zeroes
// until we have written up to length.
let buf_size = min(length, 0x10000);
let buf = vec![0u8; buf_size];
let mut nwritten: usize = 0;
while nwritten < length {
let remaining = length - nwritten;
let write_size = min(remaining, buf_size);
nwritten += self.write(&buf[0..write_size])?;
}
Ok(length)
}
}
#[cfg(test)]
#[allow(clippy::unused_io_amount)]
mod tests {
use super::*;
use std::fs::OpenOptions;
use std::io::{Read, Seek, SeekFrom};
use std::path::PathBuf;
use crate::TempDir;
#[test]
fn simple_test() {
let tempdir = TempDir::new("/tmp/write_zeroes_test").unwrap();
let mut path = PathBuf::from(tempdir.as_path().unwrap());
path.push("file");
let mut f = OpenOptions::new()
.read(true)
.write(true)
.create(true)
.open(&path)
.unwrap();
f.set_len(16384).unwrap();
// Write buffer of non-zero bytes to offset 1234
let orig_data = [0x55u8; 5678];
f.seek(SeekFrom::Start(1234)).unwrap();
f.write(&orig_data).unwrap();
// Read back the data plus some overlap on each side
let mut readback = [0u8; 16384];
f.seek(SeekFrom::Start(0)).unwrap();
f.read(&mut readback).unwrap();
// Bytes before the write should still be 0
for read in readback[0..1234].iter() {
assert_eq!(*read, 0);
}
// Bytes that were just written should be 0x55
for read in readback[1234..(1234 + 5678)].iter() {
assert_eq!(*read, 0x55);
}
// Bytes after the written area should still be 0
for read in readback[(1234 + 5678)..].iter() {
assert_eq!(*read, 0);
}
// Overwrite some of the data with zeroes
f.seek(SeekFrom::Start(2345)).unwrap();
f.write_zeroes(4321).expect("write_zeroes failed");
// Verify seek position after write_zeroes()
assert_eq!(f.seek(SeekFrom::Current(0)).unwrap(), 2345 + 4321);
// Read back the data and verify that it is now zero
f.seek(SeekFrom::Start(0)).unwrap();
f.read(&mut readback).unwrap();
// Bytes before the write should still be 0
for read in readback[0..1234].iter() {
assert_eq!(*read, 0);
}
// Original data should still exist before the write_zeroes region
for read in readback[1234..2345].iter() {
assert_eq!(*read, 0x55);
}
// The write_zeroes region should now be zero
for read in readback[2345..(2345 + 4321)].iter() {
assert_eq!(*read, 0);
}
// Original data should still exist after the write_zeroes region
for read in readback[(2345 + 4321)..(1234 + 5678)].iter() {
assert_eq!(*read, 0x55);
}
// The rest of the file should still be 0
for read in readback[(1234 + 5678)..].iter() {
assert_eq!(*read, 0);
}
}
#[test]
fn large_write_zeroes() {
let tempdir = TempDir::new("/tmp/write_zeroes_test").unwrap();
let mut path = PathBuf::from(tempdir.as_path().unwrap());
path.push("file");
let mut f = OpenOptions::new()
.read(true)
.write(true)
.create(true)
.open(&path)
.unwrap();
f.set_len(16384).unwrap();
// Write buffer of non-zero bytes
let orig_data = [0x55u8; 0x20000];
f.seek(SeekFrom::Start(0)).unwrap();
f.write(&orig_data).unwrap();
// Overwrite some of the data with zeroes
f.seek(SeekFrom::Start(0)).unwrap();
f.write_zeroes(0x10001).expect("write_zeroes failed");
// Verify seek position after write_zeroes()
assert_eq!(f.seek(SeekFrom::Current(0)).unwrap(), 0x10001);
// Read back the data and verify that it is now zero
let mut readback = [0u8; 0x20000];
f.seek(SeekFrom::Start(0)).unwrap();
f.read(&mut readback).unwrap();
// The write_zeroes region should now be zero
for read in readback[0..0x10001].iter() {
assert_eq!(*read, 0);
}
// Original data should still exist after the write_zeroes region
for read in readback[0x10001..0x20000].iter() {
assert_eq!(*read, 0x55);
}
}
}

Some files were not shown because too many files have changed in this diff Show More