misc: Fix clippy issues

Signed-off-by: Bo Chen <chen.bo@intel.com>
This commit is contained in:
Bo Chen 2022-11-01 14:52:40 -07:00 committed by Sebastien Boeuf
parent 9266ea4995
commit a9ec0f33c0
35 changed files with 78 additions and 92 deletions

View File

@ -764,11 +764,11 @@ pub fn arch_memory_regions(size: GuestUsize) -> Vec<(GuestAddress, usize, Region
.checked_add(layout::MEM_32BIT_DEVICES_SIZE)
.expect("32-bit reserved region is too large");
let requested_memory_size = GuestAddress(size as u64);
let requested_memory_size = GuestAddress(size);
let mut regions = Vec::new();
// case1: guest memory fits before the gap
if size as u64 <= layout::MEM_32BIT_RESERVED_START.raw_value() {
if size <= layout::MEM_32BIT_RESERVED_START.raw_value() {
regions.push((GuestAddress(0), size as usize, RegionType::Ram));
// case2: guest memory extends beyond the gap
} else {
@ -865,7 +865,7 @@ fn configure_pvh(
start_info.0.magic = XEN_HVM_START_MAGIC_VALUE;
start_info.0.version = 1; // pvh has version 1
start_info.0.nr_modules = 0;
start_info.0.cmdline_paddr = cmdline_addr.raw_value() as u64;
start_info.0.cmdline_paddr = cmdline_addr.raw_value();
start_info.0.memmap_paddr = layout::MEMMAP_START.raw_value();
if let Some(rsdp_addr) = rsdp_addr {
@ -934,7 +934,7 @@ fn configure_pvh(
add_memmap_entry(
&mut memmap,
sgx_epc_region.start().raw_value(),
sgx_epc_region.size() as u64,
sgx_epc_region.size(),
E820_RESERVED,
);
}
@ -1126,7 +1126,7 @@ fn update_cpuid_sgx(
for (i, epc_section) in epc_sections.iter().enumerate() {
let subleaf_idx = i + 2;
let start = epc_section.start().raw_value();
let size = epc_section.size() as u64;
let size = epc_section.size();
let eax = (start & 0xffff_f000) as u32 | 0x1;
let ebx = (start >> 32) as u32;
let ecx = (size & 0xffff_f000) as u32 | (leaf.ecx & 0xf);

View File

@ -124,7 +124,7 @@ pub fn configure_segments_and_sregs(
mem: &GuestMemoryMmap,
sregs: &mut SpecialRegisters,
) -> Result<()> {
let gdt_table: [u64; BOOT_GDT_MAX as usize] = {
let gdt_table: [u64; BOOT_GDT_MAX] = {
// Configure GDT entries as specified by PVH boot protocol
[
gdt_entry(0, 0, 0), // NULL

View File

@ -26,7 +26,7 @@ impl DiskFile for QcowDiskSync {
fn size(&mut self) -> DiskFileResult<u64> {
let mut file = self.qcow_file.lock().unwrap();
Ok(file.seek(SeekFrom::End(0)).map_err(DiskFileError::Size)? as u64)
file.seek(SeekFrom::End(0)).map_err(DiskFileError::Size)
}
fn new_async_io(&self, _ring_depth: u32) -> DiskFileResult<Box<dyn AsyncIo>> {

View File

@ -23,10 +23,9 @@ impl RawFileDisk {
impl DiskFile for RawFileDisk {
fn size(&mut self) -> DiskFileResult<u64> {
Ok(self
.file
self.file
.seek(SeekFrom::End(0))
.map_err(DiskFileError::Size)? as u64)
.map_err(DiskFileError::Size)
}
fn new_async_io(&self, ring_depth: u32) -> DiskFileResult<Box<dyn AsyncIo>> {

View File

@ -22,10 +22,9 @@ impl RawFileDiskSync {
impl DiskFile for RawFileDiskSync {
fn size(&mut self) -> DiskFileResult<u64> {
Ok(self
.file
self.file
.seek(SeekFrom::End(0))
.map_err(DiskFileError::Size)? as u64)
.map_err(DiskFileError::Size)
}
fn new_async_io(&self, _ring_depth: u32) -> DiskFileResult<Box<dyn AsyncIo>> {
@ -118,7 +117,7 @@ impl AsyncIo for RawFileSync {
}
if let Some(user_data) = user_data {
self.completion_list.push((user_data, result as i32));
self.completion_list.push((user_data, result));
self.eventfd.write(1).unwrap();
}

View File

@ -114,7 +114,7 @@ impl Aml for AcpiGedDevice {
&aml::ResourceTemplate::new(vec![&aml::AddressSpace::new_memory(
aml::AddressSpaceCachable::NotCacheable,
true,
self.address.0 as u64,
self.address.0,
self.address.0 + GED_DEVICE_ACPI_SIZE as u64 - 1,
)]),
),

View File

@ -199,7 +199,7 @@ impl Serial {
}
fn handle_write(&mut self, offset: u8, v: u8) -> Result<()> {
match offset as u8 {
match offset {
DLAB_LOW if self.is_dlab_set() => {
self.baud_divisor = (self.baud_divisor & 0xff00) | u16::from(v)
}

View File

@ -56,7 +56,7 @@ fn get_op<T: CpuStateManager>(
OpKind::Immediate16 => insn.immediate16() as u64,
OpKind::Immediate32 => insn.immediate32() as u64,
OpKind::Immediate32to64 => insn.immediate32to64() as u64,
OpKind::Immediate64 => insn.immediate64() as u64,
OpKind::Immediate64 => insn.immediate64(),
k => return Err(PlatformError::InvalidOperand(anyhow!("{:?}", k))),
};

View File

@ -1093,7 +1093,7 @@ impl vm::Vm for MshvVm {
flags,
guest_pfn: guest_phys_addr >> PAGE_SHIFT,
size: memory_size,
userspace_addr: userspace_addr as u64,
userspace_addr,
}
.into()
}

View File

@ -413,7 +413,7 @@ impl VfioCommon {
resources: Option<Vec<Resource>>,
) -> Result<Vec<PciBarConfiguration>, PciDeviceError> {
let mut bars = Vec::new();
let mut bar_id = VFIO_PCI_BAR0_REGION_INDEX as u32;
let mut bar_id = VFIO_PCI_BAR0_REGION_INDEX;
// Going through all regular regions to compute the BAR size.
// We're not saving the BAR address to restore it, because we
@ -526,7 +526,7 @@ impl VfioCommon {
}
// Invert and add 1 to to find size
region_size = (!combined_size + 1) as u64;
region_size = !combined_size + 1;
} else {
region_type = PciBarRegionType::Memory32BitRegion;
@ -599,7 +599,7 @@ impl VfioCommon {
start: bar_addr,
length: region_size,
type_: region_type,
index: bar_id as u32,
index: bar_id,
user_memory_regions: Vec::new(),
});

View File

@ -276,12 +276,7 @@ impl VfioUserPciDevice {
self.client
.lock()
.unwrap()
.dma_map(
offset,
region.start_addr().raw_value(),
region.len() as u64,
fd,
)
.dma_map(offset, region.start_addr().raw_value(), region.len(), fd)
.map_err(VfioUserPciDeviceError::DmaMap)
}
@ -292,7 +287,7 @@ impl VfioUserPciDevice {
self.client
.lock()
.unwrap()
.dma_unmap(region.start_addr().raw_value(), region.len() as u64)
.dma_unmap(region.start_addr().raw_value(), region.len())
.map_err(VfioUserPciDeviceError::DmaUnmap)
}
}

View File

@ -566,7 +566,7 @@ impl QcowFile {
// Set the refcount for each refcount table cluster.
let cluster_size = 0x01u64 << qcow.header.cluster_bits;
let refcount_table_base = qcow.header.refcount_table_offset as u64;
let refcount_table_base = qcow.header.refcount_table_offset;
let end_cluster_addr =
refcount_table_base + u64::from(qcow.header.refcount_table_clusters) * cluster_size;
@ -963,7 +963,7 @@ impl QcowFile {
// Gets the offset of the given guest address in the host file. If L1, L2, or data clusters have
// yet to be allocated, return None.
fn file_offset_read(&mut self, address: u64) -> std::io::Result<Option<u64>> {
if address >= self.virtual_size() as u64 {
if address >= self.virtual_size() {
return Err(std::io::Error::from_raw_os_error(EINVAL));
}
@ -1006,7 +1006,7 @@ impl QcowFile {
// Gets the offset of the given guest address in the host file. If L1, L2, or data clusters need
// to be allocated, they will be.
fn file_offset_write(&mut self, address: u64) -> std::io::Result<u64> {
if address >= self.virtual_size() as u64 {
if address >= self.virtual_size() {
return Err(std::io::Error::from_raw_os_error(EINVAL));
}
@ -1123,7 +1123,7 @@ impl QcowFile {
// Returns true if the cluster containing `address` is already allocated.
fn cluster_allocated(&mut self, address: u64) -> std::io::Result<bool> {
if address >= self.virtual_size() as u64 {
if address >= self.virtual_size() {
return Err(std::io::Error::from_raw_os_error(EINVAL));
}
@ -1195,7 +1195,7 @@ impl QcowFile {
// Deallocate the storage for the cluster starting at `address`.
// Any future reads of this cluster will return all zeroes.
fn deallocate_cluster(&mut self, address: u64) -> std::io::Result<()> {
if address >= self.virtual_size() as u64 {
if address >= self.virtual_size() {
return Err(std::io::Error::from_raw_os_error(EINVAL));
}
@ -1417,7 +1417,7 @@ impl Drop for QcowFile {
impl Read for QcowFile {
fn read(&mut self, buf: &mut [u8]) -> std::io::Result<usize> {
let address: u64 = self.current_offset as u64;
let address: u64 = self.current_offset;
let read_count: usize = self.limit_range_file(address, buf.len());
let mut nread: usize = 0;
@ -1479,7 +1479,7 @@ impl Seek for QcowFile {
impl Write for QcowFile {
fn write(&mut self, buf: &[u8]) -> std::io::Result<usize> {
let address: u64 = self.current_offset as u64;
let address: u64 = self.current_offset;
let write_count: usize = self.limit_range_file(address, buf.len());
let mut nwritten: usize = 0;

View File

@ -599,7 +599,7 @@ fn main() {
}
};
let on_tty = unsafe { libc::isatty(libc::STDIN_FILENO as i32) } != 0;
let on_tty = unsafe { libc::isatty(libc::STDIN_FILENO) } != 0;
if on_tty {
// Don't forget to set the terminal in canonical mode
// before to exit.

View File

@ -1421,7 +1421,7 @@ pub fn parse_fio_output(output: &str, fio_ops: &FioOps, num_jobs: u32) -> Result
.expect("'fio' parse error: missing entry 'read.runtime'")
as f64
/ 1000_f64;
total_bps += bytes as f64 / runtime as f64;
total_bps += bytes as f64 / runtime;
}
if write {
let bytes = j["write"]["io_bytes"]
@ -1432,7 +1432,7 @@ pub fn parse_fio_output(output: &str, fio_ops: &FioOps, num_jobs: u32) -> Result
.expect("'fio' parse error: missing entry 'write.runtime'")
as f64
/ 1000_f64;
total_bps += bytes as f64 / runtime as f64;
total_bps += bytes as f64 / runtime;
}
}
@ -1477,7 +1477,7 @@ pub fn parse_fio_output_iops(output: &str, fio_ops: &FioOps, num_jobs: u32) -> R
.expect("'fio' parse error: missing entry 'read.runtime'")
as f64
/ 1000_f64;
total_iops += ios as f64 / runtime as f64;
total_iops += ios as f64 / runtime;
}
if write {
let ios = j["write"]["total_ios"]
@ -1488,7 +1488,7 @@ pub fn parse_fio_output_iops(output: &str, fio_ops: &FioOps, num_jobs: u32) -> R
.expect("'fio' parse error: missing entry 'write.runtime'")
as f64
/ 1000_f64;
total_iops += ios as f64 / runtime as f64;
total_iops += ios as f64 / runtime;
}
}

View File

@ -1839,7 +1839,7 @@ fn _test_virtio_iommu(acpi: bool) {
// contains "0000:00:02.0" which is the first disk.
//
// Verify the iommu group of the first disk.
let iommu_group = if acpi { 0 } else { 1 };
let iommu_group = !acpi as i32;
assert_eq!(
guest
.ssh_command(
@ -2873,7 +2873,7 @@ mod common_parallel {
let mut os_path = os_dir.as_path().to_path_buf();
os_path.push("osdisk.img");
rate_limited_copy(
&guest.disk_config.disk(DiskType::OperatingSystem).unwrap(),
guest.disk_config.disk(DiskType::OperatingSystem).unwrap(),
os_path.as_path(),
)
.expect("copying of OS disk failed");
@ -3216,7 +3216,7 @@ mod common_parallel {
format!(
"file={},size={}",
guest.disk_config.disk(DiskType::OperatingSystem).unwrap(),
fs::metadata(&guest.disk_config.disk(DiskType::OperatingSystem).unwrap())
fs::metadata(guest.disk_config.disk(DiskType::OperatingSystem).unwrap())
.unwrap()
.len()
)
@ -3763,7 +3763,7 @@ mod common_parallel {
// We copy our cloudinit into the vfio mount point, for the nested
// cloud-hypervisor guest to use.
rate_limited_copy(
&guest.disk_config.disk(DiskType::CloudInit).unwrap(),
guest.disk_config.disk(DiskType::CloudInit).unwrap(),
&cloud_init_vfio_base_path,
)
.expect("copying of cloud-init disk failed");
@ -6222,7 +6222,7 @@ mod common_parallel {
);
let tap_index =
fs::read_to_string(&format!("/sys/class/net/{}/ifindex", guest_macvtap_name)).unwrap();
fs::read_to_string(format!("/sys/class/net/{}/ifindex", guest_macvtap_name)).unwrap();
let tap_device = format!("/dev/tap{}", tap_index.trim());
assert!(
@ -7892,7 +7892,7 @@ mod vfio {
// We copy our cloudinit into the vfio mount point, for the nested
// cloud-hypervisor guest to use.
rate_limited_copy(
&guest.disk_config.disk(DiskType::CloudInit).unwrap(),
guest.disk_config.disk(DiskType::CloudInit).unwrap(),
&cloud_init_vfio_base_path,
)
.expect("copying of cloud-init disk failed");

View File

@ -234,7 +234,7 @@ impl VhostUserBlkBackend {
}
};
let nsectors = (image.lock().unwrap().seek(SeekFrom::End(0)).unwrap() as u64) / SECTOR_SIZE;
let nsectors = (image.lock().unwrap().seek(SeekFrom::End(0)).unwrap()) / SECTOR_SIZE;
let config = VirtioBlockConfig {
capacity: nsectors,
blk_size: BLK_SIZE,
@ -305,7 +305,7 @@ impl VhostUserBackendMut<VringRwLock<GuestMemoryAtomic<GuestMemoryMmap>>, Atomic
}
fn max_queue_size(&self) -> usize {
self.queue_size as usize
self.queue_size
}
fn features(&self) -> u64 {

View File

@ -151,7 +151,7 @@ impl BalloonEpollHandler {
libc::fallocate64(
f_off.file().as_raw_fd(),
libc::FALLOC_FL_PUNCH_HOLE | libc::FALLOC_FL_KEEP_SIZE,
(offset as u64 + f_off.start()) as libc::off64_t,
(offset + f_off.start()) as libc::off64_t,
range_len as libc::off64_t,
)
};

View File

@ -229,7 +229,7 @@ impl BlockEpollHandler {
.map_err(Error::RequestStatus)?;
queue
.add_used(mem.deref(), desc_index as u16, len)
.add_used(mem.deref(), desc_index, len)
.map_err(Error::QueueAddUsed)?;
used_descs = true;
}

View File

@ -225,7 +225,7 @@ impl ConsoleEpollHandler {
while let Some(mut desc_chain) = recv_queue.pop_descriptor_chain(self.mem.memory()) {
let desc = desc_chain.next().ok_or(Error::DescriptorChainTooShort)?;
let len = cmp::min(desc.len() as u32, in_buffer.len() as u32);
let len = cmp::min(desc.len(), in_buffer.len() as u32);
let source_slice = in_buffer.drain(..len as usize).collect::<Vec<u8>>();
desc_chain

View File

@ -246,7 +246,7 @@ impl VirtioMemConfig {
size,
self.region_size
)));
} else if size % (self.block_size as u64) != 0 {
} else if size % self.block_size != 0 {
return Err(Error::ResizeError(anyhow!(
"new size 0x{:x} is not aligned on block_size 0x{:x}",
size,

View File

@ -140,7 +140,7 @@ impl Blk {
}
let config_len = mem::size_of::<VirtioBlockConfig>();
let config_space: Vec<u8> = vec![0u8; config_len as usize];
let config_space: Vec<u8> = vec![0u8; config_len];
let (_, config_space) = vu
.socket_handle()
.get_config(

View File

@ -232,20 +232,13 @@ impl VhostUserMasterReqHandler for SlaveReqHandler {
pwrite64(
fd.as_raw_fd(),
ptr as *const c_void,
len as usize,
len,
foffset as off64_t,
)
}
} else {
debug!("read: foffset={}, len={}", foffset, len);
unsafe {
pread64(
fd.as_raw_fd(),
ptr as *mut c_void,
len as usize,
foffset as off64_t,
)
}
unsafe { pread64(fd.as_raw_fd(), ptr as *mut c_void, len, foffset as off64_t) }
};
if ret < 0 {

View File

@ -68,7 +68,7 @@ impl VhostUserHandle {
let vhost_user_net_reg = VhostUserMemoryRegionInfo {
guest_phys_addr: region.start_addr().raw_value(),
memory_size: region.len() as u64,
memory_size: region.len(),
userspace_addr: region.as_ptr() as u64,
mmap_offset,
mmap_handle,
@ -92,7 +92,7 @@ impl VhostUserHandle {
let region = VhostUserMemoryRegionInfo {
guest_phys_addr: region.start_addr().raw_value(),
memory_size: region.len() as u64,
memory_size: region.len(),
userspace_addr: region.as_ptr() as u64,
mmap_offset,
mmap_handle,

View File

@ -647,7 +647,7 @@ where
/// buffer.
///
fn peer_avail_credit(&self) -> usize {
(Wrapping(self.peer_buf_alloc as u32) - (self.rx_cnt - self.peer_fwd_cnt)).0 as usize
(Wrapping(self.peer_buf_alloc) - (self.rx_cnt - self.peer_fwd_cnt)).0 as usize
}
/// Prepare a packet header for transmission to our peer.
@ -1118,7 +1118,7 @@ mod tests {
// CONN_TX_BUF_SIZE - CONN_CREDIT_UPDATE_THRESHOLD, we initialize
// fwd_cnt at 6 bytes below the threshold.
let initial_fwd_cnt =
csm_defs::CONN_TX_BUF_SIZE as u32 - csm_defs::CONN_CREDIT_UPDATE_THRESHOLD as u32 - 6;
csm_defs::CONN_TX_BUF_SIZE - csm_defs::CONN_CREDIT_UPDATE_THRESHOLD - 6;
ctx.conn.fwd_cnt = Wrapping(initial_fwd_cnt);
// Use a 4-byte packet for triggering the credit update threshold.

View File

@ -285,9 +285,9 @@ mod tests {
pub fn create_epoll_handler_context(&self) -> EpollHandlerContext {
const QSIZE: u16 = 2;
let guest_rxvq = GuestQ::new(GuestAddress(0x0010_0000), &self.mem, QSIZE as u16);
let guest_txvq = GuestQ::new(GuestAddress(0x0020_0000), &self.mem, QSIZE as u16);
let guest_evvq = GuestQ::new(GuestAddress(0x0030_0000), &self.mem, QSIZE as u16);
let guest_rxvq = GuestQ::new(GuestAddress(0x0010_0000), &self.mem, QSIZE);
let guest_txvq = GuestQ::new(GuestAddress(0x0020_0000), &self.mem, QSIZE);
let guest_evvq = GuestQ::new(GuestAddress(0x0030_0000), &self.mem, QSIZE);
let rxvq = guest_rxvq.create_queue();
let txvq = guest_txvq.create_queue();
let evvq = guest_evvq.create_queue();

View File

@ -881,7 +881,7 @@ mod tests {
peer_port: u32,
data: &[u8],
) -> &mut VsockPacket {
assert!(data.len() <= self.pkt.buf().unwrap().len() as usize);
assert!(data.len() <= self.pkt.buf().unwrap().len());
self.init_pkt(local_port, peer_port, uapi::VSOCK_OP_RW)
.set_len(data.len() as u32);
self.pkt.buf_mut().unwrap()[..data.len()].copy_from_slice(data);

View File

@ -274,7 +274,7 @@ fn create_srat_table(numa_nodes: &NumaNodes) -> Sdt {
assert_eq!(std::mem::size_of::<MemoryAffinity>(), 40);
for (node_id, node) in numa_nodes.iter() {
let proximity_domain = *node_id as u32;
let proximity_domain = *node_id;
for region in &node.memory_regions {
srat.append(MemoryAffinity::from_region(
@ -346,7 +346,7 @@ fn create_slit_table(numa_nodes: &NumaNodes) -> Sdt {
let dist: u8 = if *node_id == *i {
10
} else if let Some(distance) = distances.get(i) {
*distance as u8
*distance
} else {
20
};

View File

@ -126,7 +126,7 @@ impl CpuSegment {
CpuSegment {
selector: reg.selector as u32,
limit: reg.limit as u32,
limit: reg.limit,
flags,
pad: 0,
base: reg.base,

View File

@ -1941,7 +1941,7 @@ impl Aml for CpuManager {
&aml::ResourceTemplate::new(vec![&aml::AddressSpace::new_memory(
aml::AddressSpaceCachable::NotCacheable,
true,
acpi_address.0 as u64,
acpi_address.0,
acpi_address.0 + CPU_MANAGER_ACPI_SIZE as u64 - 1,
)]),
),
@ -2387,8 +2387,8 @@ impl CpuElf64Writable for CpuManager {
eflags: gregs.rflags,
rsp: gregs.rsp,
ss: sregs.ss.selector as u64,
fs_base: sregs.fs.base as u64,
gs_base: sregs.gs.base as u64,
fs_base: sregs.fs.base,
gs_base: sregs.gs.base,
ds: sregs.ds.selector as u64,
es: sregs.es.selector as u64,
fs: sregs.fs.selector as u64,

View File

@ -3059,7 +3059,7 @@ impl DeviceManager {
vfio_container
.vfio_dma_map(
region.start_addr().raw_value(),
region.len() as u64,
region.len(),
region.as_ptr() as u64,
)
.map_err(DeviceManagerError::VfioDmaMap)?;
@ -3576,7 +3576,7 @@ impl DeviceManager {
vfio_container
.vfio_dma_map(
new_region.start_addr().raw_value(),
new_region.len() as u64,
new_region.len(),
new_region.as_ptr() as u64,
)
.map_err(DeviceManagerError::UpdateMemoryForVfioPciDevice)?;
@ -4197,7 +4197,7 @@ impl Aml for DeviceManager {
&aml::ResourceTemplate::new(vec![&aml::AddressSpace::new_memory(
aml::AddressSpaceCachable::NotCacheable,
true,
self.acpi_address.0 as u64,
self.acpi_address.0,
self.acpi_address.0 + DEVICE_MANAGER_ACPI_SIZE as u64 - 1,
)]),
),

View File

@ -270,7 +270,7 @@ impl InterruptManager for LegacyUserspaceInterruptManager {
fn create_group(&self, config: Self::GroupConfig) -> Result<Arc<dyn InterruptSourceGroup>> {
Ok(Arc::new(LegacyUserspaceInterruptGroup::new(
self.ioapic.clone(),
config.irq as u32,
config.irq,
)))
}

View File

@ -424,7 +424,7 @@ impl Vmm {
Ok(signals) => {
self.signals = Some(signals.handle());
let exit_evt = self.exit_evt.try_clone().map_err(Error::EventFdClone)?;
let on_tty = unsafe { libc::isatty(libc::STDIN_FILENO as i32) } != 0;
let on_tty = unsafe { libc::isatty(libc::STDIN_FILENO) } != 0;
let signal_handler_seccomp_filter = get_seccomp_filter(
&self.seccomp_action,
@ -1451,7 +1451,7 @@ impl Vmm {
send_data_migration: VmSendMigrationData,
) -> result::Result<(), MigratableError> {
let path = Self::socket_url_to_path(&send_data_migration.destination_url)?;
let mut socket = UnixStream::connect(&path).map_err(|e| {
let mut socket = UnixStream::connect(path).map_err(|e| {
MigratableError::MigrateSend(anyhow!("Error connecting to UNIX socket: {}", e))
})?;

View File

@ -790,7 +790,7 @@ impl MemoryManager {
for (region, virtio_mem) in regions {
let slot = self.create_userspace_mapping(
region.start_addr().raw_value(),
region.len() as u64,
region.len(),
region.as_ptr() as u64,
self.mergeable,
false,
@ -1430,7 +1430,7 @@ impl MemoryManager {
// Map it into the guest
let slot = self.create_userspace_mapping(
region.start_addr().0,
region.len() as u64,
region.len(),
region.as_ptr() as u64,
self.mergeable,
false,
@ -1487,7 +1487,7 @@ impl MemoryManager {
slot.active = true;
slot.inserting = true;
slot.base = region.start_addr().0;
slot.length = region.len() as u64;
slot.length = region.len();
self.next_hotplug_slot += 1;
@ -1872,7 +1872,7 @@ impl MemoryManager {
table.push(MemoryRange {
gpa: region.start_addr().raw_value(),
length: region.len() as u64,
length: region.len(),
});
}
}
@ -2265,7 +2265,7 @@ impl Aml for MemoryManager {
&aml::ResourceTemplate::new(vec![&aml::AddressSpace::new_memory(
aml::AddressSpaceCachable::NotCacheable,
true,
acpi_address.0 as u64,
acpi_address.0,
acpi_address.0 + MEMORY_MANAGER_ACPI_SIZE as u64 - 1,
)]),
),
@ -2343,8 +2343,8 @@ impl Aml for MemoryManager {
#[cfg(target_arch = "x86_64")]
{
if let Some(sgx_epc_region) = &self.sgx_epc_region {
let min = sgx_epc_region.start().raw_value() as u64;
let max = min + sgx_epc_region.size() as u64 - 1;
let min = sgx_epc_region.start().raw_value();
let max = min + sgx_epc_region.size() - 1;
// SGX EPC region
aml::Device::new(
"_SB_.EPC_".into(),

View File

@ -159,7 +159,7 @@ impl PciSegment {
// There are 32 devices on the PCI bus, let's assign them an IRQ.
for i in 0..32 {
pci_irq_slots[i] = irqs[(i % num_irqs) as usize];
pci_irq_slots[i] = irqs[(i % num_irqs)];
}
Ok(())

View File

@ -586,7 +586,7 @@ impl Vm {
)
.map_err(Error::CpuManager)?;
let on_tty = unsafe { libc::isatty(libc::STDIN_FILENO as i32) } != 0;
let on_tty = unsafe { libc::isatty(libc::STDIN_FILENO) } != 0;
#[cfg(feature = "tdx")]
let kernel = config
@ -3336,7 +3336,7 @@ pub fn test_vm() {
let mem_region = vm.make_user_memory_region(
index as u32,
region.start_addr().raw_value(),
region.len() as u64,
region.len(),
region.as_ptr() as u64,
false,
false,