vmm: return seccomp rules according to hypervisors

That requires stashing the hypervisor type into various places.

Signed-off-by: Wei Liu <liuwe@microsoft.com>
This commit is contained in:
Wei Liu 2022-07-20 22:51:15 +00:00 committed by Liu Wei
parent 9fc3379e8d
commit ad33f7c5e6
6 changed files with 127 additions and 60 deletions

View File

@ -7,6 +7,7 @@ use crate::api::http_endpoint::{VmActionHandler, VmCreate, VmInfo, VmmPing, VmmS
use crate::api::{ApiError, ApiRequest, VmAction}; use crate::api::{ApiError, ApiRequest, VmAction};
use crate::seccomp_filters::{get_seccomp_filter, Thread}; use crate::seccomp_filters::{get_seccomp_filter, Thread};
use crate::{Error as VmmError, Result}; use crate::{Error as VmmError, Result};
use hypervisor::HypervisorType;
use micro_http::{Body, HttpServer, MediaType, Method, Request, Response, StatusCode, Version}; use micro_http::{Body, HttpServer, MediaType, Method, Request, Response, StatusCode, Version};
use once_cell::sync::Lazy; use once_cell::sync::Lazy;
use seccompiler::{apply_filter, SeccompAction}; use seccompiler::{apply_filter, SeccompAction};
@ -278,10 +279,11 @@ fn start_http_thread(
api_sender: Sender<ApiRequest>, api_sender: Sender<ApiRequest>,
seccomp_action: &SeccompAction, seccomp_action: &SeccompAction,
exit_evt: EventFd, exit_evt: EventFd,
hypervisor_type: HypervisorType,
) -> Result<thread::JoinHandle<Result<()>>> { ) -> Result<thread::JoinHandle<Result<()>>> {
// Retrieve seccomp filter for API thread // Retrieve seccomp filter for API thread
let api_seccomp_filter = let api_seccomp_filter = get_seccomp_filter(seccomp_action, Thread::Api, hypervisor_type)
get_seccomp_filter(seccomp_action, Thread::Api).map_err(VmmError::CreateSeccompFilter)?; .map_err(VmmError::CreateSeccompFilter)?;
thread::Builder::new() thread::Builder::new()
.name("http-server".to_string()) .name("http-server".to_string())
@ -336,12 +338,20 @@ pub fn start_http_path_thread(
api_sender: Sender<ApiRequest>, api_sender: Sender<ApiRequest>,
seccomp_action: &SeccompAction, seccomp_action: &SeccompAction,
exit_evt: EventFd, exit_evt: EventFd,
hypervisor_type: HypervisorType,
) -> Result<thread::JoinHandle<Result<()>>> { ) -> Result<thread::JoinHandle<Result<()>>> {
let socket_path = PathBuf::from(path); let socket_path = PathBuf::from(path);
let socket_fd = UnixListener::bind(socket_path).map_err(VmmError::CreateApiServerSocket)?; let socket_fd = UnixListener::bind(socket_path).map_err(VmmError::CreateApiServerSocket)?;
let server = let server =
HttpServer::new_from_fd(socket_fd.into_raw_fd()).map_err(VmmError::CreateApiServer)?; HttpServer::new_from_fd(socket_fd.into_raw_fd()).map_err(VmmError::CreateApiServer)?;
start_http_thread(server, api_notifier, api_sender, seccomp_action, exit_evt) start_http_thread(
server,
api_notifier,
api_sender,
seccomp_action,
exit_evt,
hypervisor_type,
)
} }
pub fn start_http_fd_thread( pub fn start_http_fd_thread(
@ -350,7 +360,15 @@ pub fn start_http_fd_thread(
api_sender: Sender<ApiRequest>, api_sender: Sender<ApiRequest>,
seccomp_action: &SeccompAction, seccomp_action: &SeccompAction,
exit_evt: EventFd, exit_evt: EventFd,
hypervisor_type: HypervisorType,
) -> Result<thread::JoinHandle<Result<()>>> { ) -> Result<thread::JoinHandle<Result<()>>> {
let server = HttpServer::new_from_fd(fd).map_err(VmmError::CreateApiServer)?; let server = HttpServer::new_from_fd(fd).map_err(VmmError::CreateApiServer)?;
start_http_thread(server, api_notifier, api_sender, seccomp_action, exit_evt) start_http_thread(
server,
api_notifier,
api_sender,
seccomp_action,
exit_evt,
hypervisor_type,
)
} }

View File

@ -46,7 +46,7 @@ use hypervisor::arch::x86::{SpecialRegisters, StandardRegisters};
use hypervisor::kvm::kvm_bindings; use hypervisor::kvm::kvm_bindings;
#[cfg(feature = "tdx")] #[cfg(feature = "tdx")]
use hypervisor::kvm::{TdxExitDetails, TdxExitStatus}; use hypervisor::kvm::{TdxExitDetails, TdxExitStatus};
use hypervisor::{CpuState, HypervisorCpuError, VmExit, VmOps}; use hypervisor::{CpuState, HypervisorCpuError, HypervisorType, VmExit, VmOps};
use libc::{c_void, siginfo_t}; use libc::{c_void, siginfo_t};
#[cfg(feature = "guest_debug")] #[cfg(feature = "guest_debug")]
use linux_loader::elf::Elf64_Nhdr; use linux_loader::elf::Elf64_Nhdr;
@ -412,6 +412,7 @@ impl Snapshottable for Vcpu {
} }
pub struct CpuManager { pub struct CpuManager {
hypervisor_type: HypervisorType,
config: CpusConfig, config: CpusConfig,
#[cfg_attr(target_arch = "aarch64", allow(dead_code))] #[cfg_attr(target_arch = "aarch64", allow(dead_code))]
interrupt_controller: Option<Arc<Mutex<dyn InterruptController>>>, interrupt_controller: Option<Arc<Mutex<dyn InterruptController>>>,
@ -589,6 +590,7 @@ impl CpuManager {
let guest_memory = memory_manager.lock().unwrap().guest_memory(); let guest_memory = memory_manager.lock().unwrap().guest_memory();
let mut vcpu_states = Vec::with_capacity(usize::from(config.max_vcpus)); let mut vcpu_states = Vec::with_capacity(usize::from(config.max_vcpus));
vcpu_states.resize_with(usize::from(config.max_vcpus), VcpuState::default); vcpu_states.resize_with(usize::from(config.max_vcpus), VcpuState::default);
let hypervisor_type = hypervisor.hypervisor_type();
#[cfg(target_arch = "x86_64")] #[cfg(target_arch = "x86_64")]
let sgx_epc_sections = memory_manager let sgx_epc_sections = memory_manager
@ -688,6 +690,7 @@ impl CpuManager {
}; };
let cpu_manager = Arc::new(Mutex::new(CpuManager { let cpu_manager = Arc::new(Mutex::new(CpuManager {
hypervisor_type,
config: config.clone(), config: config.clone(),
interrupt_controller: device_manager.interrupt_controller().clone(), interrupt_controller: device_manager.interrupt_controller().clone(),
vm_memory: guest_memory, vm_memory: guest_memory,
@ -836,8 +839,9 @@ impl CpuManager {
}); });
// Retrieve seccomp filter for vcpu thread // Retrieve seccomp filter for vcpu thread
let vcpu_seccomp_filter = get_seccomp_filter(&self.seccomp_action, Thread::Vcpu) let vcpu_seccomp_filter =
.map_err(Error::CreateSeccompFilter)?; get_seccomp_filter(&self.seccomp_action, Thread::Vcpu, self.hypervisor_type)
.map_err(Error::CreateSeccompFilter)?;
#[cfg(target_arch = "x86_64")] #[cfg(target_arch = "x86_64")]
let interrupt_controller_clone = self.interrupt_controller.as_ref().cloned(); let interrupt_controller_clone = self.interrupt_controller.as_ref().cloned();

View File

@ -50,7 +50,7 @@ use devices::legacy::Serial;
use devices::{ use devices::{
interrupt_controller, interrupt_controller::InterruptController, AcpiNotificationFlags, interrupt_controller, interrupt_controller::InterruptController, AcpiNotificationFlags,
}; };
use hypervisor::{HypervisorVmError, IoEventAddress}; use hypervisor::{HypervisorType, HypervisorVmError, IoEventAddress};
use libc::{ use libc::{
cfmakeraw, isatty, tcgetattr, tcsetattr, termios, MAP_NORESERVE, MAP_PRIVATE, MAP_SHARED, cfmakeraw, isatty, tcgetattr, tcsetattr, termios, MAP_NORESERVE, MAP_PRIVATE, MAP_SHARED,
O_TMPFILE, PROT_READ, PROT_WRITE, TCSANOW, O_TMPFILE, PROT_READ, PROT_WRITE, TCSANOW,
@ -807,6 +807,9 @@ struct MetaVirtioDevice {
} }
pub struct DeviceManager { pub struct DeviceManager {
// The underlying hypervisor
hypervisor_type: HypervisorType,
// Manage address space related to devices // Manage address space related to devices
address_manager: Arc<AddressManager>, address_manager: Arc<AddressManager>,
@ -945,6 +948,7 @@ pub struct DeviceManager {
impl DeviceManager { impl DeviceManager {
#[allow(clippy::too_many_arguments)] #[allow(clippy::too_many_arguments)]
pub fn new( pub fn new(
hypervisor_type: HypervisorType,
vm: Arc<dyn hypervisor::Vm>, vm: Arc<dyn hypervisor::Vm>,
config: Arc<Mutex<VmConfig>>, config: Arc<Mutex<VmConfig>>,
memory_manager: Arc<Mutex<MemoryManager>>, memory_manager: Arc<Mutex<MemoryManager>>,
@ -1035,6 +1039,7 @@ impl DeviceManager {
} }
let device_manager = DeviceManager { let device_manager = DeviceManager {
hypervisor_type,
address_manager: Arc::clone(&address_manager), address_manager: Arc::clone(&address_manager),
console: Arc::new(Console::default()), console: Arc::new(Console::default()),
interrupt_controller: None, interrupt_controller: None,
@ -1803,8 +1808,12 @@ impl DeviceManager {
} }
fn listen_for_sigwinch_on_tty(&mut self, pty: &File) -> std::io::Result<()> { fn listen_for_sigwinch_on_tty(&mut self, pty: &File) -> std::io::Result<()> {
let seccomp_filter = let seccomp_filter = get_seccomp_filter(
get_seccomp_filter(&self.seccomp_action, Thread::PtyForeground).unwrap(); &self.seccomp_action,
Thread::PtyForeground,
self.hypervisor_type,
)
.unwrap();
match start_sigwinch_listener(seccomp_filter, pty) { match start_sigwinch_listener(seccomp_filter, pty) {
Ok(pipe) => { Ok(pipe) => {

View File

@ -280,10 +280,11 @@ pub fn start_vmm_thread(
let gdb_vm_debug_event = vm_debug_event.try_clone().map_err(Error::EventFdClone)?; let gdb_vm_debug_event = vm_debug_event.try_clone().map_err(Error::EventFdClone)?;
let http_api_event = api_event.try_clone().map_err(Error::EventFdClone)?; let http_api_event = api_event.try_clone().map_err(Error::EventFdClone)?;
let hypervisor_type = hypervisor.hypervisor_type();
// Retrieve seccomp filter // Retrieve seccomp filter
let vmm_seccomp_filter = let vmm_seccomp_filter = get_seccomp_filter(seccomp_action, Thread::Vmm, hypervisor_type)
get_seccomp_filter(seccomp_action, Thread::Vmm).map_err(Error::CreateSeccompFilter)?; .map_err(Error::CreateSeccompFilter)?;
let vmm_seccomp_action = seccomp_action.clone(); let vmm_seccomp_action = seccomp_action.clone();
let exit_evt = EventFd::new(EFD_NONBLOCK).map_err(Error::EventFdCreate)?; let exit_evt = EventFd::new(EFD_NONBLOCK).map_err(Error::EventFdCreate)?;
@ -328,6 +329,7 @@ pub fn start_vmm_thread(
api_sender, api_sender,
seccomp_action, seccomp_action,
exit_evt, exit_evt,
hypervisor_type,
)?; )?;
} else if let Some(http_fd) = http_fd { } else if let Some(http_fd) = http_fd {
api::start_http_fd_thread( api::start_http_fd_thread(
@ -336,6 +338,7 @@ pub fn start_vmm_thread(
api_sender, api_sender,
seccomp_action, seccomp_action,
exit_evt, exit_evt,
hypervisor_type,
)?; )?;
} }
@ -413,9 +416,12 @@ impl Vmm {
let exit_evt = self.exit_evt.try_clone().map_err(Error::EventFdClone)?; let exit_evt = self.exit_evt.try_clone().map_err(Error::EventFdClone)?;
let on_tty = unsafe { libc::isatty(libc::STDIN_FILENO as i32) } != 0; let on_tty = unsafe { libc::isatty(libc::STDIN_FILENO as i32) } != 0;
let signal_handler_seccomp_filter = let signal_handler_seccomp_filter = get_seccomp_filter(
get_seccomp_filter(&self.seccomp_action, Thread::SignalHandler) &self.seccomp_action,
.map_err(Error::CreateSeccompFilter)?; Thread::SignalHandler,
self.hypervisor.hypervisor_type(),
)
.map_err(Error::CreateSeccompFilter)?;
self.threads.push( self.threads.push(
thread::Builder::new() thread::Builder::new()
.name("vmm_signal_handler".to_string()) .name("vmm_signal_handler".to_string())

View File

@ -4,6 +4,7 @@
// //
// SPDX-License-Identifier: Apache-2.0 // SPDX-License-Identifier: Apache-2.0
use hypervisor::HypervisorType;
use seccompiler::{ use seccompiler::{
BackendError, BpfProgram, Error, SeccompAction, SeccompCmpArgLen as ArgLen, SeccompCmpOp::Eq, BackendError, BpfProgram, Error, SeccompAction, SeccompCmpArgLen as ArgLen, SeccompCmpOp::Eq,
SeccompCondition as Cond, SeccompFilter, SeccompRule, SeccompCondition as Cond, SeccompFilter, SeccompRule,
@ -226,17 +227,22 @@ fn create_vmm_ioctl_seccomp_rule_common_kvm() -> Result<Vec<SeccompRule>, Backen
]) ])
} }
fn create_vmm_ioctl_seccomp_rule_hypervisor() -> Result<Vec<SeccompRule>, BackendError> { fn create_vmm_ioctl_seccomp_rule_hypervisor(
#[cfg(feature = "kvm")] hypervisor_type: HypervisorType,
let rules = create_vmm_ioctl_seccomp_rule_common_kvm(); ) -> Result<Vec<SeccompRule>, BackendError> {
match hypervisor_type {
#[cfg(feature = "mshv")] #[cfg(feature = "kvm")]
let rules = create_vmm_ioctl_seccomp_rule_common_mshv(); HypervisorType::Kvm => create_vmm_ioctl_seccomp_rule_common_kvm(),
#[cfg(feature = "mshv")]
rules HypervisorType::Mshv => create_vmm_ioctl_seccomp_rule_common_mshv(),
#[allow(unreachable_patterns)]
_ => panic!("Invalid hypervisor {:?}", hypervisor_type),
}
} }
fn create_vmm_ioctl_seccomp_rule_common() -> Result<Vec<SeccompRule>, BackendError> { fn create_vmm_ioctl_seccomp_rule_common(
hypervisor_type: HypervisorType,
) -> Result<Vec<SeccompRule>, BackendError> {
let mut common_rules = or![ let mut common_rules = or![
and![Cond::new(1, ArgLen::Dword, Eq, BLKSSZGET)?], and![Cond::new(1, ArgLen::Dword, Eq, BLKSSZGET)?],
and![Cond::new(1, ArgLen::Dword, Eq, BLKPBSZGET)?], and![Cond::new(1, ArgLen::Dword, Eq, BLKPBSZGET)?],
@ -308,7 +314,7 @@ fn create_vmm_ioctl_seccomp_rule_common() -> Result<Vec<SeccompRule>, BackendErr
and![Cond::new(1, ArgLen::Dword, Eq, VHOST_VDPA_GET_IOVA_RANGE)?], and![Cond::new(1, ArgLen::Dword, Eq, VHOST_VDPA_GET_IOVA_RANGE)?],
]; ];
let hypervisor_rules = create_vmm_ioctl_seccomp_rule_hypervisor()?; let hypervisor_rules = create_vmm_ioctl_seccomp_rule_hypervisor(hypervisor_type)?;
common_rules.extend(hypervisor_rules); common_rules.extend(hypervisor_rules);
@ -341,7 +347,7 @@ fn create_vmm_ioctl_seccomp_rule_kvm() -> Result<Vec<SeccompRule>, BackendError>
const KVM_SET_GUEST_DEBUG: u64 = 0x4048_ae9b; const KVM_SET_GUEST_DEBUG: u64 = 0x4048_ae9b;
const KVM_TRANSLATE: u64 = 0xc018_ae85; const KVM_TRANSLATE: u64 = 0xc018_ae85;
let common_rules = create_vmm_ioctl_seccomp_rule_common()?; let common_rules = create_vmm_ioctl_seccomp_rule_common(HypervisorType::Kvm)?;
let mut arch_rules = or![ let mut arch_rules = or![
and![Cond::new(1, ArgLen::Dword, Eq, KVM_CREATE_PIT2)?], and![Cond::new(1, ArgLen::Dword, Eq, KVM_CREATE_PIT2)?],
and![Cond::new(1, ArgLen::Dword, Eq, KVM_GET_CLOCK,)?], and![Cond::new(1, ArgLen::Dword, Eq, KVM_GET_CLOCK,)?],
@ -377,7 +383,7 @@ fn create_vmm_ioctl_seccomp_rule_kvm() -> Result<Vec<SeccompRule>, BackendError>
const KVM_ARM_PREFERRED_TARGET: u64 = 0x8020_aeaf; const KVM_ARM_PREFERRED_TARGET: u64 = 0x8020_aeaf;
const KVM_ARM_VCPU_INIT: u64 = 0x4020_aeae; const KVM_ARM_VCPU_INIT: u64 = 0x4020_aeae;
let common_rules = create_vmm_ioctl_seccomp_rule_common()?; let common_rules = create_vmm_ioctl_seccomp_rule_common(HypervisorType::Kvm)?;
let mut arch_rules = or![ let mut arch_rules = or![
and![Cond::new(1, ArgLen::Dword, Eq, KVM_ARM_PREFERRED_TARGET,)?], and![Cond::new(1, ArgLen::Dword, Eq, KVM_ARM_PREFERRED_TARGET,)?],
and![Cond::new(1, ArgLen::Dword, Eq, KVM_ARM_VCPU_INIT,)?], and![Cond::new(1, ArgLen::Dword, Eq, KVM_ARM_VCPU_INIT,)?],
@ -389,17 +395,20 @@ fn create_vmm_ioctl_seccomp_rule_kvm() -> Result<Vec<SeccompRule>, BackendError>
#[cfg(all(target_arch = "x86_64", feature = "mshv"))] #[cfg(all(target_arch = "x86_64", feature = "mshv"))]
fn create_vmm_ioctl_seccomp_rule_mshv() -> Result<Vec<SeccompRule>, BackendError> { fn create_vmm_ioctl_seccomp_rule_mshv() -> Result<Vec<SeccompRule>, BackendError> {
create_vmm_ioctl_seccomp_rule_common() create_vmm_ioctl_seccomp_rule_common(HypervisorType::Mshv)
} }
fn create_vmm_ioctl_seccomp_rule() -> Result<Vec<SeccompRule>, BackendError> { fn create_vmm_ioctl_seccomp_rule(
#[cfg(feature = "kvm")] hypervisor_type: HypervisorType,
let rules = create_vmm_ioctl_seccomp_rule_kvm(); ) -> Result<Vec<SeccompRule>, BackendError> {
match hypervisor_type {
#[cfg(feature = "mshv")] #[cfg(feature = "kvm")]
let rules = create_vmm_ioctl_seccomp_rule_mshv(); HypervisorType::Kvm => create_vmm_ioctl_seccomp_rule_kvm(),
#[cfg(feature = "mshv")]
rules HypervisorType::Mshv => create_vmm_ioctl_seccomp_rule_mshv(),
#[allow(unreachable_patterns)]
_ => panic!("Invalid hypervisor {:?}", hypervisor_type),
}
} }
fn create_api_ioctl_seccomp_rule() -> Result<Vec<SeccompRule>, BackendError> { fn create_api_ioctl_seccomp_rule() -> Result<Vec<SeccompRule>, BackendError> {
@ -465,7 +474,9 @@ fn pty_foreground_thread_rules() -> Result<Vec<(i64, Vec<SeccompRule>)>, Backend
// The filter containing the white listed syscall rules required by the VMM to // The filter containing the white listed syscall rules required by the VMM to
// function. // function.
fn vmm_thread_rules() -> Result<Vec<(i64, Vec<SeccompRule>)>, BackendError> { fn vmm_thread_rules(
hypervisor_type: HypervisorType,
) -> Result<Vec<(i64, Vec<SeccompRule>)>, BackendError> {
Ok(vec![ Ok(vec![
(libc::SYS_accept4, vec![]), (libc::SYS_accept4, vec![]),
#[cfg(target_arch = "x86_64")] #[cfg(target_arch = "x86_64")]
@ -506,7 +517,10 @@ fn vmm_thread_rules() -> Result<Vec<(i64, Vec<SeccompRule>)>, BackendError> {
(libc::SYS_gettid, vec![]), (libc::SYS_gettid, vec![]),
(libc::SYS_gettimeofday, vec![]), (libc::SYS_gettimeofday, vec![]),
(libc::SYS_getuid, vec![]), (libc::SYS_getuid, vec![]),
(libc::SYS_ioctl, create_vmm_ioctl_seccomp_rule()?), (
libc::SYS_ioctl,
create_vmm_ioctl_seccomp_rule(hypervisor_type)?,
),
(libc::SYS_io_uring_enter, vec![]), (libc::SYS_io_uring_enter, vec![]),
(libc::SYS_io_uring_setup, vec![]), (libc::SYS_io_uring_setup, vec![]),
(libc::SYS_io_uring_register, vec![]), (libc::SYS_io_uring_register, vec![]),
@ -619,17 +633,22 @@ fn create_vcpu_ioctl_seccomp_rule_mshv() -> Result<Vec<SeccompRule>, BackendErro
]) ])
} }
fn create_vcpu_ioctl_seccomp_rule_hypervisor() -> Result<Vec<SeccompRule>, BackendError> { fn create_vcpu_ioctl_seccomp_rule_hypervisor(
#[cfg(feature = "kvm")] hypervisor_type: HypervisorType,
let rules = create_vcpu_ioctl_seccomp_rule_kvm(); ) -> Result<Vec<SeccompRule>, BackendError> {
match hypervisor_type {
#[cfg(feature = "mshv")] #[cfg(feature = "kvm")]
let rules = create_vcpu_ioctl_seccomp_rule_mshv(); HypervisorType::Kvm => create_vcpu_ioctl_seccomp_rule_kvm(),
#[cfg(feature = "mshv")]
rules HypervisorType::Mshv => create_vcpu_ioctl_seccomp_rule_mshv(),
#[allow(unreachable_patterns)]
_ => panic!("Invalid hypervisor {:?}", hypervisor_type),
}
} }
fn create_vcpu_ioctl_seccomp_rule() -> Result<Vec<SeccompRule>, BackendError> { fn create_vcpu_ioctl_seccomp_rule(
hypervisor_type: HypervisorType,
) -> Result<Vec<SeccompRule>, BackendError> {
let mut rules = or![ let mut rules = or![
and![Cond::new(1, ArgLen::Dword, Eq, VFIO_DEVICE_SET_IRQS)?], and![Cond::new(1, ArgLen::Dword, Eq, VFIO_DEVICE_SET_IRQS)?],
and![Cond::new(1, ArgLen::Dword, Eq, VFIO_GROUP_UNSET_CONTAINER)?], and![Cond::new(1, ArgLen::Dword, Eq, VFIO_GROUP_UNSET_CONTAINER)?],
@ -645,14 +664,16 @@ fn create_vcpu_ioctl_seccomp_rule() -> Result<Vec<SeccompRule>, BackendError> {
)?], )?],
]; ];
let hypervisor_rules = create_vcpu_ioctl_seccomp_rule_hypervisor()?; let hypervisor_rules = create_vcpu_ioctl_seccomp_rule_hypervisor(hypervisor_type)?;
rules.extend(hypervisor_rules); rules.extend(hypervisor_rules);
Ok(rules) Ok(rules)
} }
fn vcpu_thread_rules() -> Result<Vec<(i64, Vec<SeccompRule>)>, BackendError> { fn vcpu_thread_rules(
hypervisor_type: HypervisorType,
) -> Result<Vec<(i64, Vec<SeccompRule>)>, BackendError> {
Ok(vec![ Ok(vec![
(libc::SYS_brk, vec![]), (libc::SYS_brk, vec![]),
(libc::SYS_clock_gettime, vec![]), (libc::SYS_clock_gettime, vec![]),
@ -665,7 +686,10 @@ fn vcpu_thread_rules() -> Result<Vec<(i64, Vec<SeccompRule>)>, BackendError> {
(libc::SYS_futex, vec![]), (libc::SYS_futex, vec![]),
(libc::SYS_getrandom, vec![]), (libc::SYS_getrandom, vec![]),
(libc::SYS_getpid, vec![]), (libc::SYS_getpid, vec![]),
(libc::SYS_ioctl, create_vcpu_ioctl_seccomp_rule()?), (
libc::SYS_ioctl,
create_vcpu_ioctl_seccomp_rule(hypervisor_type)?,
),
(libc::SYS_lseek, vec![]), (libc::SYS_lseek, vec![]),
(libc::SYS_madvise, vec![]), (libc::SYS_madvise, vec![]),
(libc::SYS_mmap, vec![]), (libc::SYS_mmap, vec![]),
@ -727,12 +751,15 @@ fn api_thread_rules() -> Result<Vec<(i64, Vec<SeccompRule>)>, BackendError> {
]) ])
} }
fn get_seccomp_rules(thread_type: Thread) -> Result<Vec<(i64, Vec<SeccompRule>)>, BackendError> { fn get_seccomp_rules(
thread_type: Thread,
hypervisor_type: HypervisorType,
) -> Result<Vec<(i64, Vec<SeccompRule>)>, BackendError> {
match thread_type { match thread_type {
Thread::Api => Ok(api_thread_rules()?), Thread::Api => Ok(api_thread_rules()?),
Thread::SignalHandler => Ok(signal_handler_thread_rules()?), Thread::SignalHandler => Ok(signal_handler_thread_rules()?),
Thread::Vcpu => Ok(vcpu_thread_rules()?), Thread::Vcpu => Ok(vcpu_thread_rules(hypervisor_type)?),
Thread::Vmm => Ok(vmm_thread_rules()?), Thread::Vmm => Ok(vmm_thread_rules(hypervisor_type)?),
Thread::PtyForeground => Ok(pty_foreground_thread_rules()?), Thread::PtyForeground => Ok(pty_foreground_thread_rules()?),
} }
} }
@ -741,11 +768,12 @@ fn get_seccomp_rules(thread_type: Thread) -> Result<Vec<(i64, Vec<SeccompRule>)>
pub fn get_seccomp_filter( pub fn get_seccomp_filter(
seccomp_action: &SeccompAction, seccomp_action: &SeccompAction,
thread_type: Thread, thread_type: Thread,
hypervisor_type: HypervisorType,
) -> Result<BpfProgram, Error> { ) -> Result<BpfProgram, Error> {
match seccomp_action { match seccomp_action {
SeccompAction::Allow => Ok(vec![]), SeccompAction::Allow => Ok(vec![]),
SeccompAction::Log => SeccompFilter::new( SeccompAction::Log => SeccompFilter::new(
get_seccomp_rules(thread_type) get_seccomp_rules(thread_type, hypervisor_type)
.map_err(Error::Backend)? .map_err(Error::Backend)?
.into_iter() .into_iter()
.collect(), .collect(),
@ -756,7 +784,7 @@ pub fn get_seccomp_filter(
.and_then(|filter| filter.try_into()) .and_then(|filter| filter.try_into())
.map_err(Error::Backend), .map_err(Error::Backend),
_ => SeccompFilter::new( _ => SeccompFilter::new(
get_seccomp_rules(thread_type) get_seccomp_rules(thread_type, hypervisor_type)
.map_err(Error::Backend)? .map_err(Error::Backend)?
.into_iter() .into_iter()
.collect(), .collect(),

View File

@ -471,7 +471,6 @@ pub struct Vm {
numa_nodes: NumaNodes, numa_nodes: NumaNodes,
seccomp_action: SeccompAction, seccomp_action: SeccompAction,
exit_evt: EventFd, exit_evt: EventFd,
#[cfg(all(feature = "kvm", target_arch = "x86_64"))]
hypervisor: Arc<dyn hypervisor::Hypervisor>, hypervisor: Arc<dyn hypervisor::Hypervisor>,
stop_on_boot: bool, stop_on_boot: bool,
#[cfg(target_arch = "x86_64")] #[cfg(target_arch = "x86_64")]
@ -534,6 +533,7 @@ impl Vm {
let stop_on_boot = false; let stop_on_boot = false;
let device_manager = DeviceManager::new( let device_manager = DeviceManager::new(
hypervisor.hypervisor_type(),
vm.clone(), vm.clone(),
config.clone(), config.clone(),
memory_manager.clone(), memory_manager.clone(),
@ -617,7 +617,6 @@ impl Vm {
numa_nodes, numa_nodes,
seccomp_action: seccomp_action.clone(), seccomp_action: seccomp_action.clone(),
exit_evt, exit_evt,
#[cfg(all(feature = "kvm", target_arch = "x86_64"))]
hypervisor, hypervisor,
stop_on_boot, stop_on_boot,
#[cfg(target_arch = "x86_64")] #[cfg(target_arch = "x86_64")]
@ -1969,9 +1968,12 @@ impl Vm {
Ok(signals) => { Ok(signals) => {
self.signals = Some(signals.handle()); self.signals = Some(signals.handle());
let exit_evt = self.exit_evt.try_clone().map_err(Error::EventFdClone)?; let exit_evt = self.exit_evt.try_clone().map_err(Error::EventFdClone)?;
let signal_handler_seccomp_filter = let signal_handler_seccomp_filter = get_seccomp_filter(
get_seccomp_filter(&self.seccomp_action, Thread::SignalHandler) &self.seccomp_action,
.map_err(Error::CreateSeccompFilter)?; Thread::SignalHandler,
self.hypervisor.hypervisor_type(),
)
.map_err(Error::CreateSeccompFilter)?;
self.threads.push( self.threads.push(
thread::Builder::new() thread::Builder::new()
.name("vm_signal_handler".to_string()) .name("vm_signal_handler".to_string())