diff --git a/arch/src/x86_64/mod.rs b/arch/src/x86_64/mod.rs index 9af0eaf05..335867e12 100644 --- a/arch/src/x86_64/mod.rs +++ b/arch/src/x86_64/mod.rs @@ -764,11 +764,11 @@ pub fn arch_memory_regions(size: GuestUsize) -> Vec<(GuestAddress, usize, Region .checked_add(layout::MEM_32BIT_DEVICES_SIZE) .expect("32-bit reserved region is too large"); - let requested_memory_size = GuestAddress(size as u64); + let requested_memory_size = GuestAddress(size); let mut regions = Vec::new(); // case1: guest memory fits before the gap - if size as u64 <= layout::MEM_32BIT_RESERVED_START.raw_value() { + if size <= layout::MEM_32BIT_RESERVED_START.raw_value() { regions.push((GuestAddress(0), size as usize, RegionType::Ram)); // case2: guest memory extends beyond the gap } else { @@ -865,7 +865,7 @@ fn configure_pvh( start_info.0.magic = XEN_HVM_START_MAGIC_VALUE; start_info.0.version = 1; // pvh has version 1 start_info.0.nr_modules = 0; - start_info.0.cmdline_paddr = cmdline_addr.raw_value() as u64; + start_info.0.cmdline_paddr = cmdline_addr.raw_value(); start_info.0.memmap_paddr = layout::MEMMAP_START.raw_value(); if let Some(rsdp_addr) = rsdp_addr { @@ -934,7 +934,7 @@ fn configure_pvh( add_memmap_entry( &mut memmap, sgx_epc_region.start().raw_value(), - sgx_epc_region.size() as u64, + sgx_epc_region.size(), E820_RESERVED, ); } @@ -1126,7 +1126,7 @@ fn update_cpuid_sgx( for (i, epc_section) in epc_sections.iter().enumerate() { let subleaf_idx = i + 2; let start = epc_section.start().raw_value(); - let size = epc_section.size() as u64; + let size = epc_section.size(); let eax = (start & 0xffff_f000) as u32 | 0x1; let ebx = (start >> 32) as u32; let ecx = (size & 0xffff_f000) as u32 | (leaf.ecx & 0xf); diff --git a/arch/src/x86_64/regs.rs b/arch/src/x86_64/regs.rs index 1bb2af994..762777515 100644 --- a/arch/src/x86_64/regs.rs +++ b/arch/src/x86_64/regs.rs @@ -124,7 +124,7 @@ pub fn configure_segments_and_sregs( mem: &GuestMemoryMmap, sregs: &mut SpecialRegisters, ) -> Result<()> { - let gdt_table: [u64; BOOT_GDT_MAX as usize] = { + let gdt_table: [u64; BOOT_GDT_MAX] = { // Configure GDT entries as specified by PVH boot protocol [ gdt_entry(0, 0, 0), // NULL diff --git a/block_util/src/qcow_sync.rs b/block_util/src/qcow_sync.rs index b4b4b1cb3..990c4ec2d 100644 --- a/block_util/src/qcow_sync.rs +++ b/block_util/src/qcow_sync.rs @@ -26,7 +26,7 @@ impl DiskFile for QcowDiskSync { fn size(&mut self) -> DiskFileResult { let mut file = self.qcow_file.lock().unwrap(); - Ok(file.seek(SeekFrom::End(0)).map_err(DiskFileError::Size)? as u64) + file.seek(SeekFrom::End(0)).map_err(DiskFileError::Size) } fn new_async_io(&self, _ring_depth: u32) -> DiskFileResult> { diff --git a/block_util/src/raw_async.rs b/block_util/src/raw_async.rs index 1c47c2f52..aab48ed87 100644 --- a/block_util/src/raw_async.rs +++ b/block_util/src/raw_async.rs @@ -23,10 +23,9 @@ impl RawFileDisk { impl DiskFile for RawFileDisk { fn size(&mut self) -> DiskFileResult { - Ok(self - .file + self.file .seek(SeekFrom::End(0)) - .map_err(DiskFileError::Size)? as u64) + .map_err(DiskFileError::Size) } fn new_async_io(&self, ring_depth: u32) -> DiskFileResult> { diff --git a/block_util/src/raw_sync.rs b/block_util/src/raw_sync.rs index 0fcbdc6a9..ed6f1be96 100644 --- a/block_util/src/raw_sync.rs +++ b/block_util/src/raw_sync.rs @@ -22,10 +22,9 @@ impl RawFileDiskSync { impl DiskFile for RawFileDiskSync { fn size(&mut self) -> DiskFileResult { - Ok(self - .file + self.file .seek(SeekFrom::End(0)) - .map_err(DiskFileError::Size)? as u64) + .map_err(DiskFileError::Size) } fn new_async_io(&self, _ring_depth: u32) -> DiskFileResult> { @@ -118,7 +117,7 @@ impl AsyncIo for RawFileSync { } if let Some(user_data) = user_data { - self.completion_list.push((user_data, result as i32)); + self.completion_list.push((user_data, result)); self.eventfd.write(1).unwrap(); } diff --git a/devices/src/acpi.rs b/devices/src/acpi.rs index 3c71e0405..34de716a6 100644 --- a/devices/src/acpi.rs +++ b/devices/src/acpi.rs @@ -114,7 +114,7 @@ impl Aml for AcpiGedDevice { &aml::ResourceTemplate::new(vec![&aml::AddressSpace::new_memory( aml::AddressSpaceCachable::NotCacheable, true, - self.address.0 as u64, + self.address.0, self.address.0 + GED_DEVICE_ACPI_SIZE as u64 - 1, )]), ), diff --git a/devices/src/legacy/serial.rs b/devices/src/legacy/serial.rs index 2fb528159..6c870b5b1 100644 --- a/devices/src/legacy/serial.rs +++ b/devices/src/legacy/serial.rs @@ -199,7 +199,7 @@ impl Serial { } fn handle_write(&mut self, offset: u8, v: u8) -> Result<()> { - match offset as u8 { + match offset { DLAB_LOW if self.is_dlab_set() => { self.baud_divisor = (self.baud_divisor & 0xff00) | u16::from(v) } diff --git a/hypervisor/src/arch/x86/emulator/instructions/mod.rs b/hypervisor/src/arch/x86/emulator/instructions/mod.rs index ef10df58f..3fac495ce 100644 --- a/hypervisor/src/arch/x86/emulator/instructions/mod.rs +++ b/hypervisor/src/arch/x86/emulator/instructions/mod.rs @@ -56,7 +56,7 @@ fn get_op( OpKind::Immediate16 => insn.immediate16() as u64, OpKind::Immediate32 => insn.immediate32() as u64, OpKind::Immediate32to64 => insn.immediate32to64() as u64, - OpKind::Immediate64 => insn.immediate64() as u64, + OpKind::Immediate64 => insn.immediate64(), k => return Err(PlatformError::InvalidOperand(anyhow!("{:?}", k))), }; diff --git a/hypervisor/src/mshv/mod.rs b/hypervisor/src/mshv/mod.rs index 766dc4925..507cef207 100644 --- a/hypervisor/src/mshv/mod.rs +++ b/hypervisor/src/mshv/mod.rs @@ -1093,7 +1093,7 @@ impl vm::Vm for MshvVm { flags, guest_pfn: guest_phys_addr >> PAGE_SHIFT, size: memory_size, - userspace_addr: userspace_addr as u64, + userspace_addr, } .into() } diff --git a/pci/src/vfio.rs b/pci/src/vfio.rs index ce6e4d185..de2948b33 100644 --- a/pci/src/vfio.rs +++ b/pci/src/vfio.rs @@ -413,7 +413,7 @@ impl VfioCommon { resources: Option>, ) -> Result, PciDeviceError> { let mut bars = Vec::new(); - let mut bar_id = VFIO_PCI_BAR0_REGION_INDEX as u32; + let mut bar_id = VFIO_PCI_BAR0_REGION_INDEX; // Going through all regular regions to compute the BAR size. // We're not saving the BAR address to restore it, because we @@ -526,7 +526,7 @@ impl VfioCommon { } // Invert and add 1 to to find size - region_size = (!combined_size + 1) as u64; + region_size = !combined_size + 1; } else { region_type = PciBarRegionType::Memory32BitRegion; @@ -599,7 +599,7 @@ impl VfioCommon { start: bar_addr, length: region_size, type_: region_type, - index: bar_id as u32, + index: bar_id, user_memory_regions: Vec::new(), }); diff --git a/pci/src/vfio_user.rs b/pci/src/vfio_user.rs index 5e8259ab3..95c654196 100644 --- a/pci/src/vfio_user.rs +++ b/pci/src/vfio_user.rs @@ -276,12 +276,7 @@ impl VfioUserPciDevice { self.client .lock() .unwrap() - .dma_map( - offset, - region.start_addr().raw_value(), - region.len() as u64, - fd, - ) + .dma_map(offset, region.start_addr().raw_value(), region.len(), fd) .map_err(VfioUserPciDeviceError::DmaMap) } @@ -292,7 +287,7 @@ impl VfioUserPciDevice { self.client .lock() .unwrap() - .dma_unmap(region.start_addr().raw_value(), region.len() as u64) + .dma_unmap(region.start_addr().raw_value(), region.len()) .map_err(VfioUserPciDeviceError::DmaUnmap) } } diff --git a/qcow/src/qcow.rs b/qcow/src/qcow.rs index c0157baa4..c22e32dcb 100644 --- a/qcow/src/qcow.rs +++ b/qcow/src/qcow.rs @@ -566,7 +566,7 @@ impl QcowFile { // Set the refcount for each refcount table cluster. let cluster_size = 0x01u64 << qcow.header.cluster_bits; - let refcount_table_base = qcow.header.refcount_table_offset as u64; + let refcount_table_base = qcow.header.refcount_table_offset; let end_cluster_addr = refcount_table_base + u64::from(qcow.header.refcount_table_clusters) * cluster_size; @@ -963,7 +963,7 @@ impl QcowFile { // Gets the offset of the given guest address in the host file. If L1, L2, or data clusters have // yet to be allocated, return None. fn file_offset_read(&mut self, address: u64) -> std::io::Result> { - if address >= self.virtual_size() as u64 { + if address >= self.virtual_size() { return Err(std::io::Error::from_raw_os_error(EINVAL)); } @@ -1006,7 +1006,7 @@ impl QcowFile { // Gets the offset of the given guest address in the host file. If L1, L2, or data clusters need // to be allocated, they will be. fn file_offset_write(&mut self, address: u64) -> std::io::Result { - if address >= self.virtual_size() as u64 { + if address >= self.virtual_size() { return Err(std::io::Error::from_raw_os_error(EINVAL)); } @@ -1123,7 +1123,7 @@ impl QcowFile { // Returns true if the cluster containing `address` is already allocated. fn cluster_allocated(&mut self, address: u64) -> std::io::Result { - if address >= self.virtual_size() as u64 { + if address >= self.virtual_size() { return Err(std::io::Error::from_raw_os_error(EINVAL)); } @@ -1195,7 +1195,7 @@ impl QcowFile { // Deallocate the storage for the cluster starting at `address`. // Any future reads of this cluster will return all zeroes. fn deallocate_cluster(&mut self, address: u64) -> std::io::Result<()> { - if address >= self.virtual_size() as u64 { + if address >= self.virtual_size() { return Err(std::io::Error::from_raw_os_error(EINVAL)); } @@ -1417,7 +1417,7 @@ impl Drop for QcowFile { impl Read for QcowFile { fn read(&mut self, buf: &mut [u8]) -> std::io::Result { - let address: u64 = self.current_offset as u64; + let address: u64 = self.current_offset; let read_count: usize = self.limit_range_file(address, buf.len()); let mut nread: usize = 0; @@ -1479,7 +1479,7 @@ impl Seek for QcowFile { impl Write for QcowFile { fn write(&mut self, buf: &[u8]) -> std::io::Result { - let address: u64 = self.current_offset as u64; + let address: u64 = self.current_offset; let write_count: usize = self.limit_range_file(address, buf.len()); let mut nwritten: usize = 0; diff --git a/src/main.rs b/src/main.rs index b892f38e3..4d715d246 100644 --- a/src/main.rs +++ b/src/main.rs @@ -599,7 +599,7 @@ fn main() { } }; - let on_tty = unsafe { libc::isatty(libc::STDIN_FILENO as i32) } != 0; + let on_tty = unsafe { libc::isatty(libc::STDIN_FILENO) } != 0; if on_tty { // Don't forget to set the terminal in canonical mode // before to exit. diff --git a/test_infra/src/lib.rs b/test_infra/src/lib.rs index 8a4361d92..3d7f73d1c 100644 --- a/test_infra/src/lib.rs +++ b/test_infra/src/lib.rs @@ -1421,7 +1421,7 @@ pub fn parse_fio_output(output: &str, fio_ops: &FioOps, num_jobs: u32) -> Result .expect("'fio' parse error: missing entry 'read.runtime'") as f64 / 1000_f64; - total_bps += bytes as f64 / runtime as f64; + total_bps += bytes as f64 / runtime; } if write { let bytes = j["write"]["io_bytes"] @@ -1432,7 +1432,7 @@ pub fn parse_fio_output(output: &str, fio_ops: &FioOps, num_jobs: u32) -> Result .expect("'fio' parse error: missing entry 'write.runtime'") as f64 / 1000_f64; - total_bps += bytes as f64 / runtime as f64; + total_bps += bytes as f64 / runtime; } } @@ -1477,7 +1477,7 @@ pub fn parse_fio_output_iops(output: &str, fio_ops: &FioOps, num_jobs: u32) -> R .expect("'fio' parse error: missing entry 'read.runtime'") as f64 / 1000_f64; - total_iops += ios as f64 / runtime as f64; + total_iops += ios as f64 / runtime; } if write { let ios = j["write"]["total_ios"] @@ -1488,7 +1488,7 @@ pub fn parse_fio_output_iops(output: &str, fio_ops: &FioOps, num_jobs: u32) -> R .expect("'fio' parse error: missing entry 'write.runtime'") as f64 / 1000_f64; - total_iops += ios as f64 / runtime as f64; + total_iops += ios as f64 / runtime; } } diff --git a/tests/integration.rs b/tests/integration.rs index bf63a3601..6db27d4df 100644 --- a/tests/integration.rs +++ b/tests/integration.rs @@ -1839,7 +1839,7 @@ fn _test_virtio_iommu(acpi: bool) { // contains "0000:00:02.0" which is the first disk. // // Verify the iommu group of the first disk. - let iommu_group = if acpi { 0 } else { 1 }; + let iommu_group = !acpi as i32; assert_eq!( guest .ssh_command( @@ -2873,7 +2873,7 @@ mod common_parallel { let mut os_path = os_dir.as_path().to_path_buf(); os_path.push("osdisk.img"); rate_limited_copy( - &guest.disk_config.disk(DiskType::OperatingSystem).unwrap(), + guest.disk_config.disk(DiskType::OperatingSystem).unwrap(), os_path.as_path(), ) .expect("copying of OS disk failed"); @@ -3216,7 +3216,7 @@ mod common_parallel { format!( "file={},size={}", guest.disk_config.disk(DiskType::OperatingSystem).unwrap(), - fs::metadata(&guest.disk_config.disk(DiskType::OperatingSystem).unwrap()) + fs::metadata(guest.disk_config.disk(DiskType::OperatingSystem).unwrap()) .unwrap() .len() ) @@ -3763,7 +3763,7 @@ mod common_parallel { // We copy our cloudinit into the vfio mount point, for the nested // cloud-hypervisor guest to use. rate_limited_copy( - &guest.disk_config.disk(DiskType::CloudInit).unwrap(), + guest.disk_config.disk(DiskType::CloudInit).unwrap(), &cloud_init_vfio_base_path, ) .expect("copying of cloud-init disk failed"); @@ -6222,7 +6222,7 @@ mod common_parallel { ); let tap_index = - fs::read_to_string(&format!("/sys/class/net/{}/ifindex", guest_macvtap_name)).unwrap(); + fs::read_to_string(format!("/sys/class/net/{}/ifindex", guest_macvtap_name)).unwrap(); let tap_device = format!("/dev/tap{}", tap_index.trim()); assert!( @@ -7892,7 +7892,7 @@ mod vfio { // We copy our cloudinit into the vfio mount point, for the nested // cloud-hypervisor guest to use. rate_limited_copy( - &guest.disk_config.disk(DiskType::CloudInit).unwrap(), + guest.disk_config.disk(DiskType::CloudInit).unwrap(), &cloud_init_vfio_base_path, ) .expect("copying of cloud-init disk failed"); diff --git a/vhost_user_block/src/lib.rs b/vhost_user_block/src/lib.rs index 059d74b20..c6863fce3 100644 --- a/vhost_user_block/src/lib.rs +++ b/vhost_user_block/src/lib.rs @@ -234,7 +234,7 @@ impl VhostUserBlkBackend { } }; - let nsectors = (image.lock().unwrap().seek(SeekFrom::End(0)).unwrap() as u64) / SECTOR_SIZE; + let nsectors = (image.lock().unwrap().seek(SeekFrom::End(0)).unwrap()) / SECTOR_SIZE; let config = VirtioBlockConfig { capacity: nsectors, blk_size: BLK_SIZE, @@ -305,7 +305,7 @@ impl VhostUserBackendMut>, Atomic } fn max_queue_size(&self) -> usize { - self.queue_size as usize + self.queue_size } fn features(&self) -> u64 { diff --git a/virtio-devices/src/balloon.rs b/virtio-devices/src/balloon.rs index 2e52d0b20..eb0823f62 100644 --- a/virtio-devices/src/balloon.rs +++ b/virtio-devices/src/balloon.rs @@ -151,7 +151,7 @@ impl BalloonEpollHandler { libc::fallocate64( f_off.file().as_raw_fd(), libc::FALLOC_FL_PUNCH_HOLE | libc::FALLOC_FL_KEEP_SIZE, - (offset as u64 + f_off.start()) as libc::off64_t, + (offset + f_off.start()) as libc::off64_t, range_len as libc::off64_t, ) }; diff --git a/virtio-devices/src/block.rs b/virtio-devices/src/block.rs index fdc9b664b..ae2a5518b 100644 --- a/virtio-devices/src/block.rs +++ b/virtio-devices/src/block.rs @@ -229,7 +229,7 @@ impl BlockEpollHandler { .map_err(Error::RequestStatus)?; queue - .add_used(mem.deref(), desc_index as u16, len) + .add_used(mem.deref(), desc_index, len) .map_err(Error::QueueAddUsed)?; used_descs = true; } diff --git a/virtio-devices/src/console.rs b/virtio-devices/src/console.rs index 024cb3c02..fe3dab4ad 100644 --- a/virtio-devices/src/console.rs +++ b/virtio-devices/src/console.rs @@ -225,7 +225,7 @@ impl ConsoleEpollHandler { while let Some(mut desc_chain) = recv_queue.pop_descriptor_chain(self.mem.memory()) { let desc = desc_chain.next().ok_or(Error::DescriptorChainTooShort)?; - let len = cmp::min(desc.len() as u32, in_buffer.len() as u32); + let len = cmp::min(desc.len(), in_buffer.len() as u32); let source_slice = in_buffer.drain(..len as usize).collect::>(); desc_chain diff --git a/virtio-devices/src/mem.rs b/virtio-devices/src/mem.rs index e00e8efd9..96378e1ad 100644 --- a/virtio-devices/src/mem.rs +++ b/virtio-devices/src/mem.rs @@ -246,7 +246,7 @@ impl VirtioMemConfig { size, self.region_size ))); - } else if size % (self.block_size as u64) != 0 { + } else if size % self.block_size != 0 { return Err(Error::ResizeError(anyhow!( "new size 0x{:x} is not aligned on block_size 0x{:x}", size, diff --git a/virtio-devices/src/vhost_user/blk.rs b/virtio-devices/src/vhost_user/blk.rs index a8ae96197..e67568b80 100644 --- a/virtio-devices/src/vhost_user/blk.rs +++ b/virtio-devices/src/vhost_user/blk.rs @@ -140,7 +140,7 @@ impl Blk { } let config_len = mem::size_of::(); - let config_space: Vec = vec![0u8; config_len as usize]; + let config_space: Vec = vec![0u8; config_len]; let (_, config_space) = vu .socket_handle() .get_config( diff --git a/virtio-devices/src/vhost_user/fs.rs b/virtio-devices/src/vhost_user/fs.rs index 81c469b8b..b2a07d04a 100644 --- a/virtio-devices/src/vhost_user/fs.rs +++ b/virtio-devices/src/vhost_user/fs.rs @@ -232,20 +232,13 @@ impl VhostUserMasterReqHandler for SlaveReqHandler { pwrite64( fd.as_raw_fd(), ptr as *const c_void, - len as usize, + len, foffset as off64_t, ) } } else { debug!("read: foffset={}, len={}", foffset, len); - unsafe { - pread64( - fd.as_raw_fd(), - ptr as *mut c_void, - len as usize, - foffset as off64_t, - ) - } + unsafe { pread64(fd.as_raw_fd(), ptr as *mut c_void, len, foffset as off64_t) } }; if ret < 0 { diff --git a/virtio-devices/src/vhost_user/vu_common_ctrl.rs b/virtio-devices/src/vhost_user/vu_common_ctrl.rs index 0ab0bdda2..b077a5817 100644 --- a/virtio-devices/src/vhost_user/vu_common_ctrl.rs +++ b/virtio-devices/src/vhost_user/vu_common_ctrl.rs @@ -68,7 +68,7 @@ impl VhostUserHandle { let vhost_user_net_reg = VhostUserMemoryRegionInfo { guest_phys_addr: region.start_addr().raw_value(), - memory_size: region.len() as u64, + memory_size: region.len(), userspace_addr: region.as_ptr() as u64, mmap_offset, mmap_handle, @@ -92,7 +92,7 @@ impl VhostUserHandle { let region = VhostUserMemoryRegionInfo { guest_phys_addr: region.start_addr().raw_value(), - memory_size: region.len() as u64, + memory_size: region.len(), userspace_addr: region.as_ptr() as u64, mmap_offset, mmap_handle, diff --git a/virtio-devices/src/vsock/csm/connection.rs b/virtio-devices/src/vsock/csm/connection.rs index 6ae59cb98..3d5c7b9f9 100644 --- a/virtio-devices/src/vsock/csm/connection.rs +++ b/virtio-devices/src/vsock/csm/connection.rs @@ -647,7 +647,7 @@ where /// buffer. /// fn peer_avail_credit(&self) -> usize { - (Wrapping(self.peer_buf_alloc as u32) - (self.rx_cnt - self.peer_fwd_cnt)).0 as usize + (Wrapping(self.peer_buf_alloc) - (self.rx_cnt - self.peer_fwd_cnt)).0 as usize } /// Prepare a packet header for transmission to our peer. @@ -1118,7 +1118,7 @@ mod tests { // CONN_TX_BUF_SIZE - CONN_CREDIT_UPDATE_THRESHOLD, we initialize // fwd_cnt at 6 bytes below the threshold. let initial_fwd_cnt = - csm_defs::CONN_TX_BUF_SIZE as u32 - csm_defs::CONN_CREDIT_UPDATE_THRESHOLD as u32 - 6; + csm_defs::CONN_TX_BUF_SIZE - csm_defs::CONN_CREDIT_UPDATE_THRESHOLD - 6; ctx.conn.fwd_cnt = Wrapping(initial_fwd_cnt); // Use a 4-byte packet for triggering the credit update threshold. diff --git a/virtio-devices/src/vsock/mod.rs b/virtio-devices/src/vsock/mod.rs index 53c43a2c4..742980612 100644 --- a/virtio-devices/src/vsock/mod.rs +++ b/virtio-devices/src/vsock/mod.rs @@ -285,9 +285,9 @@ mod tests { pub fn create_epoll_handler_context(&self) -> EpollHandlerContext { const QSIZE: u16 = 2; - let guest_rxvq = GuestQ::new(GuestAddress(0x0010_0000), &self.mem, QSIZE as u16); - let guest_txvq = GuestQ::new(GuestAddress(0x0020_0000), &self.mem, QSIZE as u16); - let guest_evvq = GuestQ::new(GuestAddress(0x0030_0000), &self.mem, QSIZE as u16); + let guest_rxvq = GuestQ::new(GuestAddress(0x0010_0000), &self.mem, QSIZE); + let guest_txvq = GuestQ::new(GuestAddress(0x0020_0000), &self.mem, QSIZE); + let guest_evvq = GuestQ::new(GuestAddress(0x0030_0000), &self.mem, QSIZE); let rxvq = guest_rxvq.create_queue(); let txvq = guest_txvq.create_queue(); let evvq = guest_evvq.create_queue(); diff --git a/virtio-devices/src/vsock/unix/muxer.rs b/virtio-devices/src/vsock/unix/muxer.rs index 319f046b5..1cfd7e256 100644 --- a/virtio-devices/src/vsock/unix/muxer.rs +++ b/virtio-devices/src/vsock/unix/muxer.rs @@ -881,7 +881,7 @@ mod tests { peer_port: u32, data: &[u8], ) -> &mut VsockPacket { - assert!(data.len() <= self.pkt.buf().unwrap().len() as usize); + assert!(data.len() <= self.pkt.buf().unwrap().len()); self.init_pkt(local_port, peer_port, uapi::VSOCK_OP_RW) .set_len(data.len() as u32); self.pkt.buf_mut().unwrap()[..data.len()].copy_from_slice(data); diff --git a/vmm/src/acpi.rs b/vmm/src/acpi.rs index fefcb90d1..2527af7d1 100644 --- a/vmm/src/acpi.rs +++ b/vmm/src/acpi.rs @@ -274,7 +274,7 @@ fn create_srat_table(numa_nodes: &NumaNodes) -> Sdt { assert_eq!(std::mem::size_of::(), 40); for (node_id, node) in numa_nodes.iter() { - let proximity_domain = *node_id as u32; + let proximity_domain = *node_id; for region in &node.memory_regions { srat.append(MemoryAffinity::from_region( @@ -346,7 +346,7 @@ fn create_slit_table(numa_nodes: &NumaNodes) -> Sdt { let dist: u8 = if *node_id == *i { 10 } else if let Some(distance) = distances.get(i) { - *distance as u8 + *distance } else { 20 }; diff --git a/vmm/src/coredump.rs b/vmm/src/coredump.rs index 11376472d..99647c180 100644 --- a/vmm/src/coredump.rs +++ b/vmm/src/coredump.rs @@ -126,7 +126,7 @@ impl CpuSegment { CpuSegment { selector: reg.selector as u32, - limit: reg.limit as u32, + limit: reg.limit, flags, pad: 0, base: reg.base, diff --git a/vmm/src/cpu.rs b/vmm/src/cpu.rs index 150e0789d..0728eb2af 100644 --- a/vmm/src/cpu.rs +++ b/vmm/src/cpu.rs @@ -1941,7 +1941,7 @@ impl Aml for CpuManager { &aml::ResourceTemplate::new(vec![&aml::AddressSpace::new_memory( aml::AddressSpaceCachable::NotCacheable, true, - acpi_address.0 as u64, + acpi_address.0, acpi_address.0 + CPU_MANAGER_ACPI_SIZE as u64 - 1, )]), ), @@ -2387,8 +2387,8 @@ impl CpuElf64Writable for CpuManager { eflags: gregs.rflags, rsp: gregs.rsp, ss: sregs.ss.selector as u64, - fs_base: sregs.fs.base as u64, - gs_base: sregs.gs.base as u64, + fs_base: sregs.fs.base, + gs_base: sregs.gs.base, ds: sregs.ds.selector as u64, es: sregs.es.selector as u64, fs: sregs.fs.selector as u64, diff --git a/vmm/src/device_manager.rs b/vmm/src/device_manager.rs index e00ae20f3..794cffabc 100644 --- a/vmm/src/device_manager.rs +++ b/vmm/src/device_manager.rs @@ -3059,7 +3059,7 @@ impl DeviceManager { vfio_container .vfio_dma_map( region.start_addr().raw_value(), - region.len() as u64, + region.len(), region.as_ptr() as u64, ) .map_err(DeviceManagerError::VfioDmaMap)?; @@ -3576,7 +3576,7 @@ impl DeviceManager { vfio_container .vfio_dma_map( new_region.start_addr().raw_value(), - new_region.len() as u64, + new_region.len(), new_region.as_ptr() as u64, ) .map_err(DeviceManagerError::UpdateMemoryForVfioPciDevice)?; @@ -4197,7 +4197,7 @@ impl Aml for DeviceManager { &aml::ResourceTemplate::new(vec![&aml::AddressSpace::new_memory( aml::AddressSpaceCachable::NotCacheable, true, - self.acpi_address.0 as u64, + self.acpi_address.0, self.acpi_address.0 + DEVICE_MANAGER_ACPI_SIZE as u64 - 1, )]), ), diff --git a/vmm/src/interrupt.rs b/vmm/src/interrupt.rs index 464fac29e..074392e49 100644 --- a/vmm/src/interrupt.rs +++ b/vmm/src/interrupt.rs @@ -270,7 +270,7 @@ impl InterruptManager for LegacyUserspaceInterruptManager { fn create_group(&self, config: Self::GroupConfig) -> Result> { Ok(Arc::new(LegacyUserspaceInterruptGroup::new( self.ioapic.clone(), - config.irq as u32, + config.irq, ))) } diff --git a/vmm/src/lib.rs b/vmm/src/lib.rs index 2483c53dd..0b7224942 100644 --- a/vmm/src/lib.rs +++ b/vmm/src/lib.rs @@ -424,7 +424,7 @@ impl Vmm { Ok(signals) => { self.signals = Some(signals.handle()); let exit_evt = self.exit_evt.try_clone().map_err(Error::EventFdClone)?; - let on_tty = unsafe { libc::isatty(libc::STDIN_FILENO as i32) } != 0; + let on_tty = unsafe { libc::isatty(libc::STDIN_FILENO) } != 0; let signal_handler_seccomp_filter = get_seccomp_filter( &self.seccomp_action, @@ -1451,7 +1451,7 @@ impl Vmm { send_data_migration: VmSendMigrationData, ) -> result::Result<(), MigratableError> { let path = Self::socket_url_to_path(&send_data_migration.destination_url)?; - let mut socket = UnixStream::connect(&path).map_err(|e| { + let mut socket = UnixStream::connect(path).map_err(|e| { MigratableError::MigrateSend(anyhow!("Error connecting to UNIX socket: {}", e)) })?; diff --git a/vmm/src/memory_manager.rs b/vmm/src/memory_manager.rs index d422a2ab9..b3d936a31 100644 --- a/vmm/src/memory_manager.rs +++ b/vmm/src/memory_manager.rs @@ -790,7 +790,7 @@ impl MemoryManager { for (region, virtio_mem) in regions { let slot = self.create_userspace_mapping( region.start_addr().raw_value(), - region.len() as u64, + region.len(), region.as_ptr() as u64, self.mergeable, false, @@ -1430,7 +1430,7 @@ impl MemoryManager { // Map it into the guest let slot = self.create_userspace_mapping( region.start_addr().0, - region.len() as u64, + region.len(), region.as_ptr() as u64, self.mergeable, false, @@ -1487,7 +1487,7 @@ impl MemoryManager { slot.active = true; slot.inserting = true; slot.base = region.start_addr().0; - slot.length = region.len() as u64; + slot.length = region.len(); self.next_hotplug_slot += 1; @@ -1872,7 +1872,7 @@ impl MemoryManager { table.push(MemoryRange { gpa: region.start_addr().raw_value(), - length: region.len() as u64, + length: region.len(), }); } } @@ -2265,7 +2265,7 @@ impl Aml for MemoryManager { &aml::ResourceTemplate::new(vec![&aml::AddressSpace::new_memory( aml::AddressSpaceCachable::NotCacheable, true, - acpi_address.0 as u64, + acpi_address.0, acpi_address.0 + MEMORY_MANAGER_ACPI_SIZE as u64 - 1, )]), ), @@ -2343,8 +2343,8 @@ impl Aml for MemoryManager { #[cfg(target_arch = "x86_64")] { if let Some(sgx_epc_region) = &self.sgx_epc_region { - let min = sgx_epc_region.start().raw_value() as u64; - let max = min + sgx_epc_region.size() as u64 - 1; + let min = sgx_epc_region.start().raw_value(); + let max = min + sgx_epc_region.size() - 1; // SGX EPC region aml::Device::new( "_SB_.EPC_".into(), diff --git a/vmm/src/pci_segment.rs b/vmm/src/pci_segment.rs index ab91fb4a1..c1ea6215d 100644 --- a/vmm/src/pci_segment.rs +++ b/vmm/src/pci_segment.rs @@ -159,7 +159,7 @@ impl PciSegment { // There are 32 devices on the PCI bus, let's assign them an IRQ. for i in 0..32 { - pci_irq_slots[i] = irqs[(i % num_irqs) as usize]; + pci_irq_slots[i] = irqs[(i % num_irqs)]; } Ok(()) diff --git a/vmm/src/vm.rs b/vmm/src/vm.rs index 72271e728..951ee365f 100644 --- a/vmm/src/vm.rs +++ b/vmm/src/vm.rs @@ -586,7 +586,7 @@ impl Vm { ) .map_err(Error::CpuManager)?; - let on_tty = unsafe { libc::isatty(libc::STDIN_FILENO as i32) } != 0; + let on_tty = unsafe { libc::isatty(libc::STDIN_FILENO) } != 0; #[cfg(feature = "tdx")] let kernel = config @@ -3336,7 +3336,7 @@ pub fn test_vm() { let mem_region = vm.make_user_memory_region( index as u32, region.start_addr().raw_value(), - region.len() as u64, + region.len(), region.as_ptr() as u64, false, false,