vmm: config: Move TDX to rely on PayloadConfig

Removing the option --tdx to specify that we want to run a TD VM. Rely
on --platform option by adding the "tdx" boolean parameter. This is the
new way for enabling TDX with Cloud Hypervisor.

Along with this change, the way to retrieve the firmware path has been
updated to rely on the recently introduced PayloadConfig structure.

Fixes #4556

Signed-off-by: Sebastien Boeuf <sebastien.boeuf@intel.com>
This commit is contained in:
Sebastien Boeuf 2022-09-02 17:19:32 +02:00 committed by Rob Bradford
parent b05bd28321
commit 3793ffe888
6 changed files with 91 additions and 100 deletions

View File

@ -61,7 +61,8 @@ meaning it will be printing guest kernel logs to the `virtio-console` device.
```bash
./cloud-hypervisor \
--tdx firmware=edk2-staging/Build/OvmfCh/RELEASE_GCC5/FV/OVMF.fd \
--platform tdx=on
--firmware edk2-staging/Build/OvmfCh/RELEASE_GCC5/FV/OVMF.fd \
--cpus boot=1 \
--memory size=1G \
--disk path=tdx_guest_img
@ -72,7 +73,8 @@ firmware:
```bash
./cloud-hypervisor \
--tdx firmware=edk2-staging/Build/OvmfCh/DEBUG_GCC5/FV/OVMF.fd \
--platform tdx=on
--firmware edk2-staging/Build/OvmfCh/DEBUG_GCC5/FV/OVMF.fd \
--cpus boot=1 \
--memory size=1G \
--disk path=tdx_guest_img \
@ -95,7 +97,8 @@ option as well.
```bash
./cloud-hypervisor \
--tdx firmware=tdshim \
--platform tdx=on
--firmware tdshim \
--kernel bzImage \
--cmdline "root=/dev/vda3 console=hvc0 rw"
--cpus boot=1 \

View File

@ -400,15 +400,6 @@ fn create_app<'a>(
.group("vmm-config"),
);
#[cfg(feature = "tdx")]
let app = app.arg(
Arg::new("tdx")
.long("tdx")
.help("TDX Support: firmware=<tdvf path>")
.takes_value(true)
.group("vm-config"),
);
app
}
@ -577,11 +568,6 @@ fn start_vmm(cmd_arguments: ArgMatches) -> Result<Option<String>, Error> {
let payload_present =
cmd_arguments.is_present("kernel") || cmd_arguments.is_present("firmware");
// Can't test for "vm-config" group as some have default values. The kernel (or tdx if enabled)
// is the only required option for booting the VM.
#[cfg(feature = "tdx")]
let payload_present = payload_present || cmd_arguments.is_present("tdx");
if payload_present {
let vm_params = config::VmParams::from_arg_matches(&cmd_arguments);
let vm_config = config::VmConfig::parse(vm_params).map_err(Error::ParsingConfig)?;
@ -744,8 +730,6 @@ mod unit_tests {
sgx_epc: None,
numa: None,
watchdog: false,
#[cfg(feature = "tdx")]
tdx: None,
#[cfg(feature = "gdb")]
gdb: false,
platform: None,

View File

@ -559,8 +559,6 @@ components:
type: array
items:
$ref: "#/components/schemas/SgxEpcConfig"
tdx:
$ref: "#/components/schemas/TdxConfig"
numa:
type: array
items:
@ -650,6 +648,9 @@ components:
type: array
items:
type: string
tdx:
type: boolean
default: false
MemoryZoneConfig:
required:
@ -1006,15 +1007,6 @@ components:
type: boolean
default: false
TdxConfig:
required:
- firmware
type: object
properties:
firmware:
type: string
description: Path to the firmware that will be used to boot the TDx guest up.
NumaDistance:
required:
- destination

View File

@ -153,6 +153,9 @@ pub enum ValidationError {
/// CPU Hotplug is not permitted with TDX
#[cfg(feature = "tdx")]
TdxNoCpuHotplug,
/// Missing firmware for TDX
#[cfg(feature = "tdx")]
TdxFirmwareMissing,
/// Insuffient vCPUs for queues
TooManyQueues,
/// Need shared memory for vfio-user
@ -219,6 +222,10 @@ impl fmt::Display for ValidationError {
TdxNoCpuHotplug => {
write!(f, "CPU hotplug is not permitted with TDX")
}
#[cfg(feature = "tdx")]
TdxFirmwareMissing => {
write!(f, "No TDX firmware specified")
}
TooManyQueues => {
write!(f, "Number of vCPUs is insufficient for number of queues")
}
@ -363,8 +370,6 @@ pub struct VmParams<'a> {
pub sgx_epc: Option<Vec<&'a str>>,
pub numa: Option<Vec<&'a str>>,
pub watchdog: bool,
#[cfg(feature = "tdx")]
pub tdx: Option<&'a str>,
#[cfg(feature = "gdb")]
pub gdb: bool,
pub platform: Option<&'a str>,
@ -397,8 +402,6 @@ impl<'a> VmParams<'a> {
let numa: Option<Vec<&str>> = args.values_of("numa").map(|x| x.collect());
let watchdog = args.is_present("watchdog");
let platform = args.value_of("platform");
#[cfg(feature = "tdx")]
let tdx = args.value_of("tdx");
#[cfg(feature = "gdb")]
let gdb = args.is_present("gdb");
VmParams {
@ -425,8 +428,6 @@ impl<'a> VmParams<'a> {
sgx_epc,
numa,
watchdog,
#[cfg(feature = "tdx")]
tdx,
#[cfg(feature = "gdb")]
gdb,
platform,
@ -642,16 +643,22 @@ pub struct PlatformConfig {
pub uuid: Option<String>,
#[serde(default)]
pub oem_strings: Option<Vec<String>>,
#[cfg(feature = "tdx")]
#[serde(default)]
pub tdx: bool,
}
impl PlatformConfig {
pub fn parse(platform: &str) -> Result<Self> {
let mut parser = OptionParser::new();
parser.add("num_pci_segments");
parser.add("iommu_segments");
parser.add("serial_number");
parser.add("uuid");
parser.add("oem_strings");
parser
.add("num_pci_segments")
.add("iommu_segments")
.add("serial_number")
.add("uuid")
.add("oem_strings");
#[cfg(feature = "tdx")]
parser.add("tdx");
parser.parse(platform).map_err(Error::ParsePlatform)?;
let num_pci_segments: u16 = parser
@ -670,12 +677,20 @@ impl PlatformConfig {
.convert::<StringList>("oem_strings")
.map_err(Error::ParsePlatform)?
.map(|v| v.0);
#[cfg(feature = "tdx")]
let tdx = parser
.convert::<Toggle>("tdx")
.map_err(Error::ParsePlatform)?
.unwrap_or(Toggle(false))
.0;
Ok(PlatformConfig {
num_pci_segments,
iommu_segments,
serial_number,
uuid,
oem_strings,
#[cfg(feature = "tdx")]
tdx,
})
}
@ -706,6 +721,8 @@ impl Default for PlatformConfig {
serial_number: None,
uuid: None,
oem_strings: None,
#[cfg(feature = "tdx")]
tdx: false,
}
}
}
@ -2083,26 +2100,6 @@ impl VsockConfig {
}
}
#[cfg(feature = "tdx")]
#[derive(Clone, Debug, PartialEq, Eq, Deserialize, Serialize, Default)]
pub struct TdxConfig {
pub firmware: PathBuf,
}
#[cfg(feature = "tdx")]
impl TdxConfig {
pub fn parse(tdx: &str) -> Result<Self> {
let mut parser = OptionParser::new();
parser.add("firmware");
parser.parse(tdx).map_err(Error::ParseTdx)?;
let firmware = parser
.get("firmware")
.map(PathBuf::from)
.ok_or(Error::FirmwarePathMissing)?;
Ok(TdxConfig { firmware })
}
}
#[cfg(target_arch = "x86_64")]
#[derive(Clone, Debug, PartialEq, Eq, Deserialize, Serialize, Default)]
pub struct SgxEpcConfig {
@ -2296,8 +2293,6 @@ pub struct VmConfig {
pub numa: Option<Vec<NumaConfig>>,
#[serde(default)]
pub watchdog: bool,
#[cfg(feature = "tdx")]
pub tdx: Option<TdxConfig>,
#[cfg(feature = "gdb")]
pub gdb: bool,
pub platform: Option<PlatformConfig>,
@ -2341,16 +2336,16 @@ impl VmConfig {
})
}
#[cfg(not(feature = "tdx"))]
self.payload
.as_ref()
.ok_or(ValidationError::KernelMissing)?;
#[cfg(feature = "tdx")]
{
let tdx_enabled = self.tdx.is_some();
if !tdx_enabled && self.payload.is_none() {
return Err(ValidationError::KernelMissing);
let tdx_enabled = self.platform.as_ref().map(|p| p.tdx).unwrap_or(false);
// At this point we know payload isn't None.
if tdx_enabled && self.payload.as_ref().unwrap().firmware.is_none() {
return Err(ValidationError::TdxFirmwareMissing);
}
if tdx_enabled && (self.cpus.max_vcpus != self.cpus.boot_vcpus) {
return Err(ValidationError::TdxNoCpuHotplug);
@ -2686,9 +2681,6 @@ impl VmConfig {
None
};
#[cfg(feature = "tdx")]
let tdx = vm_params.tdx.map(TdxConfig::parse).transpose()?;
#[cfg(feature = "gdb")]
let gdb = vm_params.gdb;
@ -2716,8 +2708,6 @@ impl VmConfig {
sgx_epc,
numa,
watchdog: vm_params.watchdog,
#[cfg(feature = "tdx")]
tdx,
#[cfg(feature = "gdb")]
gdb,
platform,
@ -2725,6 +2715,11 @@ impl VmConfig {
config.validate().map_err(Error::Validation)?;
Ok(config)
}
#[cfg(feature = "tdx")]
pub fn is_tdx_enabled(&self) -> bool {
self.platform.as_ref().map(|p| p.tdx).unwrap_or(false)
}
}
#[cfg(test)]
@ -3323,8 +3318,6 @@ mod tests {
sgx_epc: None,
numa: None,
watchdog: false,
#[cfg(feature = "tdx")]
tdx: None,
#[cfg(feature = "gdb")]
gdb: false,
platform: None,

View File

@ -1419,8 +1419,6 @@ impl Vmm {
let vm_config = vm.get_config();
#[cfg(all(feature = "kvm", target_arch = "x86_64"))]
let common_cpuid = {
#[cfg(feature = "tdx")]
let tdx_enabled = vm_config.lock().unwrap().tdx.is_some();
let phys_bits = vm::physical_bits(vm_config.lock().unwrap().cpus.max_phys_bits);
arch::generate_common_cpuid(
hypervisor,
@ -1429,7 +1427,7 @@ impl Vmm {
phys_bits,
vm_config.lock().unwrap().cpus.kvm_hyperv,
#[cfg(feature = "tdx")]
tdx_enabled,
vm_config.lock().unwrap().is_tdx_enabled(),
)
.map_err(|e| {
MigratableError::MigrateReceive(anyhow!("Error generating common cpuid': {:?}", e))
@ -1612,8 +1610,6 @@ impl Vmm {
let dest_cpuid = &{
let vm_config = &src_vm_config.lock().unwrap();
#[cfg(feature = "tdx")]
let tdx_enabled = vm_config.tdx.is_some();
let phys_bits = vm::physical_bits(vm_config.cpus.max_phys_bits);
arch::generate_common_cpuid(
self.hypervisor.clone(),
@ -1622,7 +1618,7 @@ impl Vmm {
phys_bits,
vm_config.cpus.kvm_hyperv,
#[cfg(feature = "tdx")]
tdx_enabled,
vm_config.is_tdx_enabled(),
)
.map_err(|e| {
MigratableError::MigrateReceive(anyhow!("Error generating common cpuid: {:?}", e))
@ -2053,8 +2049,6 @@ mod unit_tests {
sgx_epc: None,
numa: None,
watchdog: false,
#[cfg(feature = "tdx")]
tdx: None,
#[cfg(feature = "gdb")]
gdb: false,
platform: None,

View File

@ -285,6 +285,10 @@ pub enum Error {
#[error("Error finalizing TDX VM: {0}")]
FinalizeTdx(#[source] hypervisor::HypervisorVmError),
#[cfg(feature = "tdx")]
#[error("TDX firmware missing")]
TdxFirmwareMissing,
#[cfg(feature = "tdx")]
#[error("Invalid TDX payload type")]
InvalidPayloadType,
@ -515,7 +519,9 @@ impl Vm {
Self::create_numa_nodes(config.lock().unwrap().numa.clone(), &memory_manager)?;
#[cfg(feature = "tdx")]
let force_iommu = config.lock().unwrap().tdx.is_some();
let tdx_enabled = config.lock().unwrap().is_tdx_enabled();
#[cfg(feature = "tdx")]
let force_iommu = tdx_enabled;
#[cfg(not(feature = "tdx"))]
let force_iommu = false;
@ -559,8 +565,6 @@ impl Vm {
});
let exit_evt_clone = exit_evt.try_clone().map_err(Error::EventFdClone)?;
#[cfg(feature = "tdx")]
let tdx_enabled = config.lock().unwrap().tdx.is_some();
let cpus_config = { &config.lock().unwrap().cpus.clone() };
let cpu_manager = cpu::CpuManager::new(
cpus_config,
@ -723,7 +727,7 @@ impl Vm {
let timestamp = Instant::now();
#[cfg(feature = "tdx")]
let tdx_enabled = config.lock().unwrap().tdx.is_some();
let tdx_enabled = config.lock().unwrap().is_tdx_enabled();
hypervisor.check_required_extensions().unwrap();
#[cfg(feature = "tdx")]
let vm = hypervisor
@ -1136,7 +1140,7 @@ impl Vm {
) -> Result<Option<thread::JoinHandle<Result<EntryPoint>>>> {
// Kernel with TDX is loaded in a different manner
#[cfg(feature = "tdx")]
if config.lock().unwrap().tdx.is_some() {
if config.lock().unwrap().is_tdx_enabled() {
return Ok(None);
}
@ -1784,10 +1788,19 @@ impl Vm {
#[cfg(feature = "tdx")]
fn extract_tdvf_sections(&mut self) -> Result<Vec<TdvfSection>> {
use arch::x86_64::tdx::*;
let firmware_path = self
.config
.lock()
.unwrap()
.payload
.as_ref()
.unwrap()
.firmware
.clone()
.ok_or(Error::TdxFirmwareMissing)?;
// The TDVF file contains a table of section as well as code
let mut firmware_file =
File::open(&self.config.lock().unwrap().tdx.as_ref().unwrap().firmware)
.map_err(Error::LoadTdvf)?;
let mut firmware_file = File::open(firmware_path).map_err(Error::LoadTdvf)?;
// For all the sections allocate some RAM backing them
parse_tdvf_sections(&mut firmware_file).map_err(Error::ParseTdvf)
@ -1882,9 +1895,17 @@ impl Vm {
}
// The TDVF file contains a table of section as well as code
let mut firmware_file =
File::open(&self.config.lock().unwrap().tdx.as_ref().unwrap().firmware)
.map_err(Error::LoadTdvf)?;
let firmware_path = self
.config
.lock()
.unwrap()
.payload
.as_ref()
.unwrap()
.firmware
.clone()
.ok_or(Error::TdxFirmwareMissing)?;
let mut firmware_file = File::open(firmware_path).map_err(Error::LoadTdvf)?;
// The guest memory at this point now has all the required regions so it
// is safe to copy from the TDVF file into it.
@ -2115,7 +2136,7 @@ impl Vm {
fn create_acpi_tables(&self) -> Option<GuestAddress> {
#[cfg(feature = "tdx")]
if self.config.lock().unwrap().tdx.is_some() {
if self.config.lock().unwrap().is_tdx_enabled() {
return None;
}
@ -2166,10 +2187,13 @@ impl Vm {
// finish.
let entry_point = self.entry_point()?;
#[cfg(feature = "tdx")]
let tdx_enabled = self.config.lock().unwrap().is_tdx_enabled();
// The initial TDX configuration must be done before the vCPUs are
// created
#[cfg(feature = "tdx")]
if self.config.lock().unwrap().tdx.is_some() {
if tdx_enabled {
self.init_tdx()?;
}
@ -2181,7 +2205,7 @@ impl Vm {
.map_err(Error::CpuManager)?;
#[cfg(feature = "tdx")]
let sections = if self.config.lock().unwrap().tdx.is_some() {
let sections = if tdx_enabled {
self.extract_tdvf_sections()?
} else {
Vec::new()
@ -2189,7 +2213,7 @@ impl Vm {
// Configuring the TDX regions requires that the vCPUs are created.
#[cfg(feature = "tdx")]
let hob_address = if self.config.lock().unwrap().tdx.is_some() {
let hob_address = if tdx_enabled {
// TDX sections are written to memory.
self.populate_tdx_sections(&sections)?
} else {
@ -2715,9 +2739,12 @@ impl Snapshottable for Vm {
fn snapshot(&mut self) -> std::result::Result<Snapshot, MigratableError> {
event!("vm", "snapshotting");
#[cfg(feature = "tdx")]
let tdx_enabled = self.config.lock().unwrap().is_tdx_enabled();
#[cfg(feature = "tdx")]
{
if self.config.lock().unwrap().tdx.is_some() {
if tdx_enabled {
return Err(MigratableError::Snapshot(anyhow!(
"Snapshot not possible with TDX VM"
)));
@ -2733,8 +2760,6 @@ impl Snapshottable for Vm {
#[cfg(all(feature = "kvm", target_arch = "x86_64"))]
let common_cpuid = {
#[cfg(feature = "tdx")]
let tdx_enabled = self.config.lock().unwrap().tdx.is_some();
let phys_bits = physical_bits(self.config.lock().unwrap().cpus.max_phys_bits);
arch::generate_common_cpuid(
self.hypervisor.clone(),