vhost_user_net: Remove unnecessary checks for unconfigured memory

Simplify the check for the unusual situation where the memory is not
configured by using .ok_or() on the option to convert it to a result.
This cleans up a bunch of extra indentation.

Signed-off-by: Rob Bradford <robert.bradford@intel.com>
This commit is contained in:
Rob Bradford 2019-10-15 16:02:40 +01:00 committed by Sebastien Boeuf
parent df336ade57
commit 8663b429b3

View File

@ -83,8 +83,8 @@ pub enum Error {
InvalidVringAddr, InvalidVringAddr,
/// No vring call fd to notify. /// No vring call fd to notify.
NoVringCallFdNotify, NoVringCallFdNotify,
/// No memory while handling tx. /// No memory configured.
NoMemoryForTx, NoMemoryConfigured,
/// Failed to parse sock parameter. /// Failed to parse sock parameter.
ParseSockParam, ParseSockParam,
/// Failed to parse ip parameter. /// Failed to parse ip parameter.
@ -208,76 +208,73 @@ impl VhostUserNetBackend {
// Copies a single frame from `self.rx.frame_buf` into the guest. Returns true // Copies a single frame from `self.rx.frame_buf` into the guest. Returns true
// if a buffer was used, and false if the frame must be deferred until a buffer // if a buffer was used, and false if the frame must be deferred until a buffer
// is made available by the driver. // is made available by the driver.
fn rx_single_frame(&mut self, vring: &mut Vring) -> bool { fn rx_single_frame(&mut self, vring: &mut Vring) -> Result<bool> {
if let Some(mem) = &self.mem { let mem = self.mem.as_ref().ok_or(Error::NoMemoryConfigured)?;
let mut next_desc = vring.mut_queue().iter(&mem).next();
if next_desc.is_none() { let mut next_desc = vring.mut_queue().iter(&mem).next();
// Queue has no available descriptors
if self.rx_tap_listening { if next_desc.is_none() {
self.vring_worker // Queue has no available descriptors
.as_ref() if self.rx_tap_listening {
.unwrap() self.vring_worker
.unregister_listener( .as_ref()
self.tap.as_raw_fd(), .unwrap()
epoll::Events::EPOLLIN, .unregister_listener(
u64::from(RX_TAP_EVENT), self.tap.as_raw_fd(),
) epoll::Events::EPOLLIN,
.unwrap(); u64::from(RX_TAP_EVENT),
self.rx_tap_listening = false; )
} .unwrap();
return false; self.rx_tap_listening = false;
} }
return Ok(false);
}
// We just checked that the head descriptor exists. // We just checked that the head descriptor exists.
let head_index = next_desc.as_ref().unwrap().index; let head_index = next_desc.as_ref().unwrap().index;
let mut write_count = 0; let mut write_count = 0;
// Copy from frame into buffer, which may span multiple descriptors. // Copy from frame into buffer, which may span multiple descriptors.
loop { loop {
match next_desc { match next_desc {
Some(desc) => { Some(desc) => {
if !desc.is_write_only() { if !desc.is_write_only() {
break;
}
let limit = cmp::min(write_count + desc.len as usize, self.rx.bytes_read);
let source_slice = &self.rx.frame_buf[write_count..limit];
let write_result = mem.write_slice(source_slice, desc.addr);
match write_result {
Ok(_) => {
write_count = limit;
}
Err(e) => {
error!("Failed to write slice: {:?}", e);
break;
}
};
if write_count >= self.rx.bytes_read {
break;
}
next_desc = desc.next_descriptor();
}
None => {
warn!("Receiving buffer is too small to hold frame of current size");
break; break;
} }
let limit = cmp::min(write_count + desc.len as usize, self.rx.bytes_read);
let source_slice = &self.rx.frame_buf[write_count..limit];
let write_result = mem.write_slice(source_slice, desc.addr);
match write_result {
Ok(_) => {
write_count = limit;
}
Err(e) => {
error!("Failed to write slice: {:?}", e);
break;
}
};
if write_count >= self.rx.bytes_read {
break;
}
next_desc = desc.next_descriptor();
}
None => {
warn!("Receiving buffer is too small to hold frame of current size");
break;
} }
} }
vring
.mut_queue()
.add_used(&mem, head_index, write_count as u32);
// Mark that we have at least one pending packet and we need to interrupt the guest.
self.rx.deferred_irqs = true;
write_count >= self.rx.bytes_read
} else {
error!("No memory for single frame handling!\n");
false
} }
vring
.mut_queue()
.add_used(&mem, head_index, write_count as u32);
// Mark that we have at least one pending packet and we need to interrupt the guest.
self.rx.deferred_irqs = true;
Ok(write_count >= self.rx.bytes_read)
} }
fn process_rx(&mut self, vring: &mut Vring) -> Result<()> { fn process_rx(&mut self, vring: &mut Vring) -> Result<()> {
@ -286,7 +283,7 @@ impl VhostUserNetBackend {
match self.read_tap() { match self.read_tap() {
Ok(count) => { Ok(count) => {
self.rx.bytes_read = count; self.rx.bytes_read = count;
if !self.rx_single_frame(vring) { if !self.rx_single_frame(vring)? {
self.rx.deferred_frame = true; self.rx.deferred_frame = true;
break; break;
} }
@ -316,7 +313,7 @@ impl VhostUserNetBackend {
fn resume_rx(&mut self, vring: &mut Vring) -> Result<()> { fn resume_rx(&mut self, vring: &mut Vring) -> Result<()> {
if self.rx.deferred_frame { if self.rx.deferred_frame {
if self.rx_single_frame(vring) { if self.rx_single_frame(vring)? {
self.rx.deferred_frame = false; self.rx.deferred_frame = false;
// process_rx() was interrupted possibly before consuming all // process_rx() was interrupted possibly before consuming all
// packets in the tap; try continuing now. // packets in the tap; try continuing now.
@ -334,67 +331,65 @@ impl VhostUserNetBackend {
} }
fn process_tx(&mut self, vring: &mut Vring) -> Result<()> { fn process_tx(&mut self, vring: &mut Vring) -> Result<()> {
if let Some(mem) = &self.mem { let mem = self.mem.as_ref().ok_or(Error::NoMemoryConfigured)?;
let mut used_desc_heads = [(0, 0); QUEUE_SIZE];
let mut used_count = 0;
while let Some(avail_desc) = vring.mut_queue().iter(&mem).next() {
let head_index = avail_desc.index;
let mut read_count = 0;
let mut next_desc = Some(avail_desc);
self.tx.iovec.clear(); let mut used_desc_heads = [(0, 0); QUEUE_SIZE];
while let Some(desc) = next_desc { let mut used_count = 0;
if desc.is_write_only() { while let Some(avail_desc) = vring.mut_queue().iter(&mem).next() {
let head_index = avail_desc.index;
let mut read_count = 0;
let mut next_desc = Some(avail_desc);
self.tx.iovec.clear();
while let Some(desc) = next_desc {
if desc.is_write_only() {
break;
}
self.tx.iovec.push((desc.addr, desc.len as usize));
read_count += desc.len as usize;
next_desc = desc.next_descriptor();
}
used_desc_heads[used_count] = (head_index, read_count);
used_count += 1;
read_count = 0;
// Copy buffer from across multiple descriptors.
// TODO(performance - Issue #420): change this to use `writev()` instead of `write()`
// and get rid of the intermediate buffer.
for (desc_addr, desc_len) in self.tx.iovec.drain(..) {
let limit = cmp::min((read_count + desc_len) as usize, self.tx.frame_buf.len());
let read_result = mem.read_slice(
&mut self.tx.frame_buf[read_count..limit as usize],
desc_addr,
);
match read_result {
Ok(_) => {
// Increment by number of bytes actually read
read_count += limit - read_count;
}
Err(e) => {
println!("Failed to read slice: {:?}", e);
break; break;
} }
self.tx.iovec.push((desc.addr, desc.len as usize));
read_count += desc.len as usize;
next_desc = desc.next_descriptor();
} }
used_desc_heads[used_count] = (head_index, read_count);
used_count += 1;
read_count = 0;
// Copy buffer from across multiple descriptors.
// TODO(performance - Issue #420): change this to use `writev()` instead of `write()`
// and get rid of the intermediate buffer.
for (desc_addr, desc_len) in self.tx.iovec.drain(..) {
let limit = cmp::min((read_count + desc_len) as usize, self.tx.frame_buf.len());
let read_result = mem.read_slice(
&mut self.tx.frame_buf[read_count..limit as usize],
desc_addr,
);
match read_result {
Ok(_) => {
// Increment by number of bytes actually read
read_count += limit - read_count;
}
Err(e) => {
println!("Failed to read slice: {:?}", e);
break;
}
}
}
let write_result = self.tap.write(&self.tx.frame_buf[..read_count as usize]);
match write_result {
Ok(_) => {}
Err(e) => {
println!("net: tx: error failed to write to tap: {}", e);
}
};
} }
if used_count > 0 { let write_result = self.tap.write(&self.tx.frame_buf[..read_count as usize]);
for &(desc_index, _) in &used_desc_heads[..used_count] { match write_result {
vring.mut_queue().add_used(&mem, desc_index, 0); Ok(_) => {}
Err(e) => {
println!("net: tx: error failed to write to tap: {}", e);
} }
vring.signal_used_queue().unwrap(); };
}
} else {
error!("No memory for vhost-user-net backend tx handling!\n");
return Err(Error::NoMemoryForTx);
} }
if used_count > 0 {
for &(desc_index, _) in &used_desc_heads[..used_count] {
vring.mut_queue().add_used(&mem, desc_index, 0);
}
vring.signal_used_queue().unwrap();
}
Ok(()) Ok(())
} }
@ -463,7 +458,7 @@ impl VhostUserBackend for VhostUserNetBackend {
// Process a deferred frame first if available. Don't read from tap again // Process a deferred frame first if available. Don't read from tap again
// until we manage to receive this deferred frame. // until we manage to receive this deferred frame.
{ {
if self.rx_single_frame(&mut vring) { if self.rx_single_frame(&mut vring).unwrap() {
self.rx.deferred_frame = false; self.rx.deferred_frame = false;
self.process_rx(&mut vring).unwrap(); self.process_rx(&mut vring).unwrap();
} else if self.rx.deferred_irqs { } else if self.rx.deferred_irqs {