Skip to content

Commit 9041aaa

Browse files
committed
Update worker thread for TEE
For TEE VMs, use the same sender as we would for macOS or an x86 VM with a split IRQCHIP. ADditionally, use a channel for inter-process communication instead of an EventFd. Signed-off-by: Jake Correnti <[email protected]>
1 parent 52bcaff commit 9041aaa

File tree

5 files changed

+146
-137
lines changed

5 files changed

+146
-137
lines changed

src/libkrun/src/lib.rs

Lines changed: 5 additions & 117 deletions
Original file line numberDiff line numberDiff line change
@@ -5,8 +5,6 @@ use std::collections::hash_map::Entry;
55
use std::collections::HashMap;
66
use std::convert::TryInto;
77
use std::env;
8-
#[cfg(feature = "tee")]
9-
use std::ffi::c_void;
108
use std::ffi::CStr;
119
#[cfg(target_os = "linux")]
1210
use std::ffi::CString;
@@ -53,17 +51,6 @@ use vmm::vmm_config::machine_config::VmConfig;
5351
use vmm::vmm_config::net::NetworkInterfaceConfig;
5452
use vmm::vmm_config::vsock::VsockDeviceConfig;
5553

56-
#[cfg(feature = "tee")]
57-
use kvm_bindings::{kvm_memory_attributes, KVM_MEMORY_ATTRIBUTE_PRIVATE};
58-
59-
#[cfg(feature = "tee")]
60-
use vm_memory::{guest_memory::GuestMemory, GuestAddress, GuestMemoryRegion, MemoryRegionAddress};
61-
62-
#[cfg(feature = "tee")]
63-
use libc::{
64-
fallocate, madvise, EFD_SEMAPHORE, FALLOC_FL_KEEP_SIZE, FALLOC_FL_PUNCH_HOLE, MADV_DONTNEED,
65-
};
66-
6754
// Value returned on success. We use libc's errors otherwise.
6855
const KRUN_SUCCESS: i32 = 0;
6956
// Maximum number of arguments/environment variables we allow
@@ -1504,24 +1491,12 @@ pub extern "C" fn krun_start_enter(ctx_id: u32) -> i32 {
15041491
}
15051492
}
15061493

1507-
#[cfg(feature = "tee")]
1508-
let (pm_sender, pm_receiver) = unbounded();
1509-
#[cfg(feature = "tee")]
1510-
let pm_efd =
1511-
EventFd::new(EFD_SEMAPHORE).expect("unable to create TEE memory properties eventfd");
1512-
let (sender, receiver) = unbounded();
1494+
let (sender, _receiver) = unbounded();
15131495

15141496
let _vmm = match vmm::builder::build_microvm(
15151497
&ctx_cfg.vmr,
15161498
&mut event_manager,
15171499
ctx_cfg.shutdown_efd,
1518-
#[cfg(feature = "tee")]
1519-
(
1520-
pm_sender,
1521-
pm_efd
1522-
.try_clone()
1523-
.expect("unable to clone TEE memory properties eventfd"),
1524-
),
15251500
sender,
15261501
) {
15271502
Ok(vmm) => vmm,
@@ -1531,105 +1506,18 @@ pub extern "C" fn krun_start_enter(ctx_id: u32) -> i32 {
15311506
}
15321507
};
15331508

1534-
#[cfg(feature = "tee")]
1535-
let mapper_vmm = _vmm.clone();
1536-
15371509
#[cfg(target_os = "macos")]
15381510
if ctx_cfg.gpu_virgl_flags.is_some() {
1539-
vmm::worker::start_worker_thread(_vmm.clone(), receiver).unwrap();
1511+
vmm::worker::start_worker_thread(_vmm.clone(), _receiver).unwrap();
15401512
}
15411513

15421514
#[cfg(target_arch = "x86_64")]
15431515
if ctx_cfg.vmr.split_irqchip {
1544-
vmm::worker::start_worker_thread(_vmm.clone(), receiver.clone()).unwrap();
1516+
vmm::worker::start_worker_thread(_vmm.clone(), _receiver.clone()).unwrap();
15451517
}
15461518

1547-
#[cfg(feature = "tee")]
1548-
let guest_mem = _vmm.lock().unwrap().guest_memory().clone();
1549-
1550-
#[cfg(feature = "tee")]
1551-
std::thread::Builder::new()
1552-
.name("TEE memory properties worker".into())
1553-
.spawn(move || loop {
1554-
match pm_receiver.recv() {
1555-
Err(e) => error!("Error in pm receiver: {:?}", e),
1556-
Ok(m) => {
1557-
let (guest_memfd, region_start) = mapper_vmm
1558-
.lock()
1559-
.unwrap()
1560-
.kvm_vm()
1561-
.guest_memfd_get(m.gpa)
1562-
.unwrap_or_else(|| panic!("unable to find KVM guest_memfd for memory region corresponding to GPA 0x{:x}", m.gpa));
1563-
1564-
let attributes: u64 = if m.private {
1565-
KVM_MEMORY_ATTRIBUTE_PRIVATE as u64
1566-
} else {
1567-
0
1568-
};
1569-
1570-
let attr = kvm_memory_attributes {
1571-
address: m.gpa,
1572-
size: m.size,
1573-
attributes,
1574-
flags: 0,
1575-
};
1576-
1577-
mapper_vmm
1578-
.lock()
1579-
.unwrap()
1580-
.kvm_vm()
1581-
.fd()
1582-
.set_memory_attributes(attr)
1583-
.unwrap_or_else(|_| panic!("unable to set memory attributes for memory region corresponding to guest address 0x{:x}", m.gpa));
1584-
1585-
let region = guest_mem.find_region(GuestAddress(m.gpa));
1586-
if region.is_none() {
1587-
error!("guest memory region corresponding to GPA 0x{:x} not found", m.gpa);
1588-
pm_efd.write(1).unwrap();
1589-
continue;
1590-
}
1591-
1592-
let offset = m.gpa - region_start;
1593-
1594-
if m.private {
1595-
let region_addr = MemoryRegionAddress(offset);
1596-
1597-
let host_startaddr = region
1598-
.unwrap()
1599-
.get_host_address(region_addr)
1600-
.expect("host address corresponding to memory region address 0x{:x} not found");
1601-
1602-
let ret = unsafe {
1603-
madvise(
1604-
host_startaddr as *mut c_void,
1605-
m.size.try_into().unwrap(),
1606-
MADV_DONTNEED,
1607-
)
1608-
};
1609-
1610-
if ret < 0 {
1611-
error!("unable to advise kernel that memory region corresponding to GPA 0x{:x} will likely not be needed (madvise)", m.gpa);
1612-
}
1613-
} else {
1614-
let ret = unsafe {
1615-
fallocate(
1616-
guest_memfd,
1617-
FALLOC_FL_PUNCH_HOLE | FALLOC_FL_KEEP_SIZE,
1618-
offset as i64,
1619-
m.size as i64,
1620-
)
1621-
};
1622-
1623-
if ret < 0 {
1624-
error!("unable to allocate space in guest_memfd for shared memory (fallocate)");
1625-
}
1626-
}
1627-
1628-
pm_efd.write(1).unwrap();
1629-
}
1630-
}
1631-
})
1632-
.unwrap();
1519+
#[cfg(feature = "amd-sev")]
1520+
vmm::worker::start_worker_thread(_vmm.clone(), _receiver.clone()).unwrap();
16331521

16341522
loop {
16351523
match event_manager.run() {

src/utils/src/worker_message.rs

Lines changed: 8 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -1,3 +1,10 @@
1+
#[derive(Debug)]
2+
pub struct MemoryProperties {
3+
pub gpa: u64,
4+
pub size: u64,
5+
pub private: bool,
6+
}
7+
18
#[derive(Debug)]
29
pub enum WorkerMessage {
310
#[cfg(target_arch = "x86_64")]
@@ -11,4 +18,5 @@ pub enum WorkerMessage {
1118
GpuAddMapping(crossbeam_channel::Sender<bool>, u64, u64, u64),
1219
#[cfg(target_os = "macos")]
1320
GpuRemoveMapping(crossbeam_channel::Sender<bool>, u64, u64),
21+
ConvertMemory(crossbeam_channel::Sender<bool>, MemoryProperties),
1422
}

src/vmm/src/builder.rs

Lines changed: 7 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -23,8 +23,6 @@ use super::{Error, Vmm};
2323
#[cfg(target_arch = "x86_64")]
2424
use crate::device_manager::legacy::PortIODeviceManager;
2525
use crate::device_manager::mmio::MMIODeviceManager;
26-
#[cfg(feature = "tee")]
27-
use crate::linux::vstate::MemoryProperties;
2826
use crate::resources::VmResources;
2927
use crate::vmm_config::external_kernel::{ExternalKernel, KernelFormat};
3028
#[cfg(all(target_os = "linux", target_arch = "aarch64"))]
@@ -511,7 +509,6 @@ pub fn build_microvm(
511509
vm_resources: &super::resources::VmResources,
512510
event_manager: &mut EventManager,
513511
_shutdown_efd: Option<EventFd>,
514-
#[cfg(feature = "tee")] pm_sender: (Sender<MemoryProperties>, EventFd),
515512
_sender: Sender<WorkerMessage>,
516513
) -> std::result::Result<Arc<Mutex<Vmm>>, StartMicrovmError> {
517514
let payload = choose_payload(vm_resources)?;
@@ -667,7 +664,10 @@ pub fn build_microvm(
667664
#[cfg(target_arch = "x86_64")]
668665
{
669666
let ioapic: Box<dyn IrqChipT> = if vm_resources.split_irqchip {
670-
Box::new(IoApic::new(vm.fd(), _sender).map_err(StartMicrovmError::CreateKvmIrqChip)?)
667+
Box::new(
668+
IoApic::new(vm.fd(), _sender.clone())
669+
.map_err(StartMicrovmError::CreateKvmIrqChip)?,
670+
)
671671
} else {
672672
Box::new(KvmIoapic::new(vm.fd()).map_err(StartMicrovmError::CreateKvmIrqChip)?)
673673
};
@@ -689,7 +689,7 @@ pub fn build_microvm(
689689
&pio_device_manager.io_bus,
690690
&exit_evt,
691691
#[cfg(feature = "tee")]
692-
pm_sender,
692+
_sender,
693693
)
694694
.map_err(StartMicrovmError::Internal)?;
695695
}
@@ -1456,7 +1456,7 @@ fn create_vcpus_x86_64(
14561456
entry_addr: GuestAddress,
14571457
io_bus: &devices::Bus,
14581458
exit_evt: &EventFd,
1459-
#[cfg(feature = "tee")] pm_sender: (Sender<MemoryProperties>, EventFd),
1459+
#[cfg(feature = "tee")] pm_sender: Sender<WorkerMessage>,
14601460
) -> super::Result<Vec<Vcpu>> {
14611461
let mut vcpus = Vec::with_capacity(vcpu_config.vcpu_count as usize);
14621462
for cpu_index in 0..vcpu_config.vcpu_count {
@@ -1468,7 +1468,7 @@ fn create_vcpus_x86_64(
14681468
io_bus.clone(),
14691469
exit_evt.try_clone().map_err(Error::EventFd)?,
14701470
#[cfg(feature = "tee")]
1471-
(pm_sender.0.clone(), pm_sender.1.try_clone().unwrap()),
1471+
pm_sender.clone(),
14721472
)
14731473
.map_err(Error::Vcpu)?;
14741474

src/vmm/src/linux/vstate.rs

Lines changed: 26 additions & 13 deletions
Original file line numberDiff line numberDiff line change
@@ -50,6 +50,8 @@ use kvm_ioctls::{Cap::*, *};
5050
use utils::eventfd::EventFd;
5151
use utils::signal::{register_signal_handler, sigrtmin, Killable};
5252
use utils::sm::StateMachine;
53+
#[cfg(feature = "tee")]
54+
use utils::worker_message::{MemoryProperties, WorkerMessage};
5355
use vm_memory::{
5456
Address, GuestAddress, GuestMemory, GuestMemoryError, GuestMemoryMmap, GuestMemoryRegion,
5557
GuestRegionMmap,
@@ -746,13 +748,6 @@ pub struct VmState {
746748
ioapic: kvm_irqchip,
747749
}
748750

749-
#[cfg(feature = "tee")]
750-
pub struct MemoryProperties {
751-
pub gpa: u64,
752-
pub size: u64,
753-
pub private: bool,
754-
}
755-
756751
/// Encapsulates configuration parameters for the guest vCPUS.
757752
#[derive(Debug, Eq, PartialEq)]
758753
pub struct VcpuConfig {
@@ -796,7 +791,7 @@ pub struct Vcpu {
796791
response_sender: Sender<VcpuResponse>,
797792

798793
#[cfg(feature = "tee")]
799-
pm_sender: (Sender<MemoryProperties>, EventFd),
794+
pm_sender: Sender<WorkerMessage>,
800795
}
801796

802797
impl Vcpu {
@@ -900,7 +895,7 @@ impl Vcpu {
900895
msr_list: MsrList,
901896
io_bus: devices::Bus,
902897
exit_evt: EventFd,
903-
#[cfg(feature = "tee")] pm_sender: (Sender<MemoryProperties>, EventFd),
898+
#[cfg(feature = "tee")] pm_sender: Sender<WorkerMessage>,
904899
) -> Result<Self> {
905900
let kvm_vcpu = vm_fd.create_vcpu(id as u64).map_err(Error::VcpuFd)?;
906901
let (event_sender, event_receiver) = unbounded();
@@ -1222,8 +1217,17 @@ impl Vcpu {
12221217

12231218
let mem_properties = MemoryProperties { gpa, size, private };
12241219

1225-
self.pm_sender.0.send(mem_properties).unwrap();
1226-
let _ = self.pm_sender.1.read().unwrap();
1220+
let (response_sender, response_receiver) = unbounded();
1221+
self.pm_sender
1222+
.send(WorkerMessage::ConvertMemory(
1223+
response_sender.clone(),
1224+
mem_properties,
1225+
))
1226+
.unwrap();
1227+
if !response_receiver.recv().unwrap() {
1228+
error!("Unable to convert memory with properties: gpa: 0x{:x} size: 0x{:x} to_private: {}", gpa, size, private);
1229+
return Err(Error::VcpuUnhandledKvmExit);
1230+
}
12271231
Ok(VcpuEmulation::Handled)
12281232
}
12291233
#[cfg(target_arch = "x86_64")]
@@ -1242,8 +1246,17 @@ impl Vcpu {
12421246

12431247
let mem_properties = MemoryProperties { gpa, size, private };
12441248

1245-
self.pm_sender.0.send(mem_properties).unwrap();
1246-
let _ = self.pm_sender.1.read().unwrap();
1249+
let (response_sender, response_receiver) = unbounded();
1250+
self.pm_sender
1251+
.send(WorkerMessage::ConvertMemory(
1252+
response_sender.clone(),
1253+
mem_properties,
1254+
))
1255+
.unwrap();
1256+
if !response_receiver.recv().unwrap() {
1257+
error!("Unable to convert memory with properties: gpa: 0x{:x} size: 0x{:x} to_private: {}", gpa, size, private);
1258+
return Err(Error::VcpuUnhandledKvmExit);
1259+
}
12471260

12481261
Ok(VcpuEmulation::Handled)
12491262
}

0 commit comments

Comments
 (0)