mirror of
https://github.com/cloud-hypervisor/cloud-hypervisor.git
synced 2024-09-29 01:55:45 +00:00
vmm: Remove unused "poll_queue" from DiskConfig
The parameter "poll_queue" was useful at the time Cloud Hypervisor was responsible for spawning vhost-user backends, as it was carrying the information the vhost-user-block backend should have this option enabled or not. It's been quite some time that we walked away from this design, as we now expect a management layer to be responsible for running vhost-user backends. That's the reason why we can remove "poll_queue" from the DiskConfig structure. Signed-off-by: Sebastien Boeuf <sebastien.boeuf@intel.com>
This commit is contained in:
parent
9524a8ffe8
commit
4d74525bdc
@ -811,9 +811,6 @@ components:
|
|||||||
default: false
|
default: false
|
||||||
vhost_socket:
|
vhost_socket:
|
||||||
type: string
|
type: string
|
||||||
poll_queue:
|
|
||||||
type: boolean
|
|
||||||
default: true
|
|
||||||
rate_limiter_config:
|
rate_limiter_config:
|
||||||
$ref: '#/components/schemas/RateLimiterConfig'
|
$ref: '#/components/schemas/RateLimiterConfig'
|
||||||
pci_segment:
|
pci_segment:
|
||||||
|
@ -959,8 +959,6 @@ pub struct DiskConfig {
|
|||||||
#[serde(default)]
|
#[serde(default)]
|
||||||
pub vhost_user: bool,
|
pub vhost_user: bool,
|
||||||
pub vhost_socket: Option<String>,
|
pub vhost_socket: Option<String>,
|
||||||
#[serde(default = "default_diskconfig_poll_queue")]
|
|
||||||
pub poll_queue: bool,
|
|
||||||
#[serde(default)]
|
#[serde(default)]
|
||||||
pub rate_limiter_config: Option<RateLimiterConfig>,
|
pub rate_limiter_config: Option<RateLimiterConfig>,
|
||||||
#[serde(default)]
|
#[serde(default)]
|
||||||
@ -980,10 +978,6 @@ fn default_diskconfig_queue_size() -> u16 {
|
|||||||
DEFAULT_QUEUE_SIZE_VUBLK
|
DEFAULT_QUEUE_SIZE_VUBLK
|
||||||
}
|
}
|
||||||
|
|
||||||
fn default_diskconfig_poll_queue() -> bool {
|
|
||||||
true
|
|
||||||
}
|
|
||||||
|
|
||||||
impl Default for DiskConfig {
|
impl Default for DiskConfig {
|
||||||
fn default() -> Self {
|
fn default() -> Self {
|
||||||
Self {
|
Self {
|
||||||
@ -995,7 +989,6 @@ impl Default for DiskConfig {
|
|||||||
queue_size: default_diskconfig_queue_size(),
|
queue_size: default_diskconfig_queue_size(),
|
||||||
vhost_user: false,
|
vhost_user: false,
|
||||||
vhost_socket: None,
|
vhost_socket: None,
|
||||||
poll_queue: default_diskconfig_poll_queue(),
|
|
||||||
id: None,
|
id: None,
|
||||||
disable_io_uring: false,
|
disable_io_uring: false,
|
||||||
rate_limiter_config: None,
|
rate_limiter_config: None,
|
||||||
@ -1008,7 +1001,7 @@ impl DiskConfig {
|
|||||||
pub const SYNTAX: &'static str = "Disk parameters \
|
pub const SYNTAX: &'static str = "Disk parameters \
|
||||||
\"path=<disk_image_path>,readonly=on|off,direct=on|off,iommu=on|off,\
|
\"path=<disk_image_path>,readonly=on|off,direct=on|off,iommu=on|off,\
|
||||||
num_queues=<number_of_queues>,queue_size=<size_of_each_queue>,\
|
num_queues=<number_of_queues>,queue_size=<size_of_each_queue>,\
|
||||||
vhost_user=on|off,socket=<vhost_user_socket_path>,poll_queue=on|off,\
|
vhost_user=on|off,socket=<vhost_user_socket_path>,\
|
||||||
bw_size=<bytes>,bw_one_time_burst=<bytes>,bw_refill_time=<ms>,\
|
bw_size=<bytes>,bw_one_time_burst=<bytes>,bw_refill_time=<ms>,\
|
||||||
ops_size=<io_ops>,ops_one_time_burst=<io_ops>,ops_refill_time=<ms>,\
|
ops_size=<io_ops>,ops_one_time_burst=<io_ops>,ops_refill_time=<ms>,\
|
||||||
id=<device_id>,pci_segment=<segment_id>\"";
|
id=<device_id>,pci_segment=<segment_id>\"";
|
||||||
@ -1024,7 +1017,6 @@ impl DiskConfig {
|
|||||||
.add("num_queues")
|
.add("num_queues")
|
||||||
.add("vhost_user")
|
.add("vhost_user")
|
||||||
.add("socket")
|
.add("socket")
|
||||||
.add("poll_queue")
|
|
||||||
.add("bw_size")
|
.add("bw_size")
|
||||||
.add("bw_one_time_burst")
|
.add("bw_one_time_burst")
|
||||||
.add("bw_refill_time")
|
.add("bw_refill_time")
|
||||||
@ -1066,11 +1058,6 @@ impl DiskConfig {
|
|||||||
.unwrap_or(Toggle(false))
|
.unwrap_or(Toggle(false))
|
||||||
.0;
|
.0;
|
||||||
let vhost_socket = parser.get("socket");
|
let vhost_socket = parser.get("socket");
|
||||||
let poll_queue = parser
|
|
||||||
.convert::<Toggle>("poll_queue")
|
|
||||||
.map_err(Error::ParseDisk)?
|
|
||||||
.unwrap_or_else(|| Toggle(default_diskconfig_poll_queue()))
|
|
||||||
.0;
|
|
||||||
let id = parser.get("id");
|
let id = parser.get("id");
|
||||||
let disable_io_uring = parser
|
let disable_io_uring = parser
|
||||||
.convert::<Toggle>("_disable_io_uring")
|
.convert::<Toggle>("_disable_io_uring")
|
||||||
@ -1132,10 +1119,6 @@ impl DiskConfig {
|
|||||||
None
|
None
|
||||||
};
|
};
|
||||||
|
|
||||||
if parser.is_set("poll_queue") && !vhost_user {
|
|
||||||
warn!("poll_queue parameter currently only has effect when used vhost_user=true");
|
|
||||||
}
|
|
||||||
|
|
||||||
Ok(DiskConfig {
|
Ok(DiskConfig {
|
||||||
path,
|
path,
|
||||||
readonly,
|
readonly,
|
||||||
@ -1145,7 +1128,6 @@ impl DiskConfig {
|
|||||||
queue_size,
|
queue_size,
|
||||||
vhost_user,
|
vhost_user,
|
||||||
vhost_socket,
|
vhost_socket,
|
||||||
poll_queue,
|
|
||||||
rate_limiter_config,
|
rate_limiter_config,
|
||||||
id,
|
id,
|
||||||
disable_io_uring,
|
disable_io_uring,
|
||||||
@ -2901,22 +2883,6 @@ mod tests {
|
|||||||
..Default::default()
|
..Default::default()
|
||||||
}
|
}
|
||||||
);
|
);
|
||||||
assert_eq!(
|
|
||||||
DiskConfig::parse("path=/path/to_file,poll_queue=false")?,
|
|
||||||
DiskConfig {
|
|
||||||
path: Some(PathBuf::from("/path/to_file")),
|
|
||||||
poll_queue: false,
|
|
||||||
..Default::default()
|
|
||||||
}
|
|
||||||
);
|
|
||||||
assert_eq!(
|
|
||||||
DiskConfig::parse("path=/path/to_file,poll_queue=true")?,
|
|
||||||
DiskConfig {
|
|
||||||
path: Some(PathBuf::from("/path/to_file")),
|
|
||||||
poll_queue: true,
|
|
||||||
..Default::default()
|
|
||||||
}
|
|
||||||
);
|
|
||||||
|
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
Loading…
Reference in New Issue
Block a user