Skip to content

Make snapshots region aware #742

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Merged
merged 3 commits into from
Jul 30, 2025
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
46 changes: 29 additions & 17 deletions src/hyperlight_host/src/hypervisor/hyperv_linux.rs
Original file line number Diff line number Diff line change
Expand Up @@ -75,6 +75,7 @@ use super::{
use super::{HyperlightExit, Hypervisor, InterruptHandle, LinuxInterruptHandle, VirtualCPU};
#[cfg(gdb)]
use crate::HyperlightError;
use crate::hypervisor::get_memory_access_violation;
use crate::mem::memory_region::{MemoryRegion, MemoryRegionFlags};
use crate::mem::ptr::{GuestPtr, RawPtr};
use crate::mem::shared_mem::HostSharedMemory;
Expand Down Expand Up @@ -312,12 +313,15 @@ pub(crate) struct HypervLinuxDriver {
page_size: usize,
vm_fd: VmFd,
vcpu_fd: VcpuFd,
entrypoint: u64,
mem_regions: Vec<MemoryRegion>,
orig_rsp: GuestPtr,
entrypoint: u64,
interrupt_handle: Arc<LinuxInterruptHandle>,
mem_mgr: Option<MemMgrWrapper<HostSharedMemory>>,
host_funcs: Option<Arc<Mutex<FunctionRegistry>>>,

sandbox_regions: Vec<MemoryRegion>, // Initially mapped regions when sandbox is created
mmap_regions: Vec<MemoryRegion>, // Later mapped regions

#[cfg(gdb)]
debug: Option<MshvDebug>,
#[cfg(gdb)]
Expand Down Expand Up @@ -447,7 +451,8 @@ impl HypervLinuxDriver {
page_size: 0,
vm_fd,
vcpu_fd,
mem_regions,
sandbox_regions: mem_regions,
mmap_regions: Vec::new(),
entrypoint: entrypoint_ptr.absolute()?,
orig_rsp: rsp_ptr,
interrupt_handle: interrupt_handle.clone(),
Expand Down Expand Up @@ -540,8 +545,11 @@ impl Debug for HypervLinuxDriver {
f.field("Entrypoint", &self.entrypoint)
.field("Original RSP", &self.orig_rsp);

for region in &self.mem_regions {
f.field("Memory Region", &region);
for region in &self.sandbox_regions {
f.field("Sandbox Memory Region", &region);
}
for region in &self.mmap_regions {
f.field("Mapped Memory Region", &region);
}

let regs = self.vcpu_fd.get_regs();
Expand Down Expand Up @@ -631,20 +639,24 @@ impl Hypervisor for HypervLinuxDriver {
}
let mshv_region: mshv_user_mem_region = rgn.to_owned().into();
self.vm_fd.map_user_memory(mshv_region)?;
self.mem_regions.push(rgn.to_owned());
self.mmap_regions.push(rgn.to_owned());
Ok(())
}

#[instrument(err(Debug), skip_all, parent = Span::current(), level = "Trace")]
unsafe fn unmap_regions(&mut self, n: u64) -> Result<()> {
for rgn in self
.mem_regions
.split_off(self.mem_regions.len() - n as usize)
{
let mshv_region: mshv_user_mem_region = rgn.to_owned().into();
unsafe fn unmap_region(&mut self, region: &MemoryRegion) -> Result<()> {
if let Some(pos) = self.mmap_regions.iter().position(|r| r == region) {
let removed_region = self.mmap_regions.remove(pos);
let mshv_region: mshv_user_mem_region = removed_region.into();
self.vm_fd.unmap_user_memory(mshv_region)?;
Ok(())
} else {
Err(new_error!("Tried to unmap region that is not mapped"))
}
Ok(())
}

fn get_mapped_regions(&self) -> Box<dyn ExactSizeIterator<Item = &MemoryRegion> + '_> {
Box::new(self.mmap_regions.iter())
}

#[instrument(err(Debug), skip_all, parent = Span::current(), level = "Trace")]
Expand Down Expand Up @@ -867,9 +879,9 @@ impl Hypervisor for HypervLinuxDriver {
gpa,
&self
);
match self.get_memory_access_violation(
match get_memory_access_violation(
gpa as usize,
&self.mem_regions,
self.sandbox_regions.iter().chain(self.mmap_regions.iter()),
access_info,
) {
Some(access_info_violation) => access_info_violation,
Expand Down Expand Up @@ -999,7 +1011,7 @@ impl Hypervisor for HypervLinuxDriver {
});

Ok(Some(crashdump::CrashDumpContext::new(
&self.mem_regions,
&self.sandbox_regions,
regs,
xsave.buffer.to_vec(),
self.entrypoint,
Expand Down Expand Up @@ -1180,7 +1192,7 @@ impl Drop for HypervLinuxDriver {
#[instrument(skip_all, parent = Span::current(), level = "Trace")]
fn drop(&mut self) {
self.interrupt_handle.dropped.store(true, Ordering::Relaxed);
for region in &self.mem_regions {
for region in self.sandbox_regions.iter().chain(self.mmap_regions.iter()) {
let mshv_region: mshv_user_mem_region = region.to_owned().into();
match self.vm_fd.unmap_user_memory(mshv_region) {
Ok(_) => (),
Expand Down
40 changes: 25 additions & 15 deletions src/hyperlight_host/src/hypervisor/hyperv_windows.rs
Original file line number Diff line number Diff line change
Expand Up @@ -57,6 +57,7 @@ use super::{
};
use super::{HyperlightExit, Hypervisor, InterruptHandle, VirtualCPU};
use crate::hypervisor::fpu::FP_CONTROL_WORD_DEFAULT;
use crate::hypervisor::get_memory_access_violation;
use crate::hypervisor::wrappers::WHvGeneralRegisters;
use crate::mem::memory_region::{MemoryRegion, MemoryRegionFlags};
use crate::mem::ptr::{GuestPtr, RawPtr};
Expand Down Expand Up @@ -281,10 +282,13 @@ pub(crate) struct HypervWindowsDriver {
_surrogate_process: SurrogateProcess, // we need to keep a reference to the SurrogateProcess for the duration of the driver since otherwise it will dropped and the memory mapping will be unmapped and the surrogate process will be returned to the pool
entrypoint: u64,
orig_rsp: GuestPtr,
mem_regions: Vec<MemoryRegion>,
interrupt_handle: Arc<WindowsInterruptHandle>,
mem_mgr: Option<MemMgrWrapper<HostSharedMemory>>,
host_funcs: Option<Arc<Mutex<FunctionRegistry>>>,

sandbox_regions: Vec<MemoryRegion>, // Initially mapped regions when sandbox is created
mmap_regions: Vec<MemoryRegion>, // Later mapped regions

#[cfg(gdb)]
debug: Option<HypervDebug>,
#[cfg(gdb)]
Expand Down Expand Up @@ -358,7 +362,8 @@ impl HypervWindowsDriver {
_surrogate_process: surrogate_process,
entrypoint,
orig_rsp: GuestPtr::try_from(RawPtr::from(rsp))?,
mem_regions,
sandbox_regions: mem_regions,
mmap_regions: Vec::new(),
interrupt_handle: interrupt_handle.clone(),
mem_mgr: None,
host_funcs: None,
Expand Down Expand Up @@ -457,8 +462,11 @@ impl Debug for HypervWindowsDriver {
fs.field("Entrypoint", &self.entrypoint)
.field("Original RSP", &self.orig_rsp);

for region in &self.mem_regions {
fs.field("Memory Region", &region);
for region in &self.sandbox_regions {
fs.field("Sandbox Memory Region", &region);
}
for region in &self.mmap_regions {
fs.field("Mapped Memory Region", &region);
}

// Get the registers
Expand Down Expand Up @@ -631,18 +639,17 @@ impl Hypervisor for HypervWindowsDriver {
}

#[instrument(err(Debug), skip_all, parent = Span::current(), level = "Trace")]
unsafe fn map_region(&mut self, _rgn: &MemoryRegion) -> Result<()> {
unsafe fn map_region(&mut self, _region: &MemoryRegion) -> Result<()> {
log_then_return!("Mapping host memory into the guest not yet supported on this platform");
}

#[instrument(err(Debug), skip_all, parent = Span::current(), level = "Trace")]
unsafe fn unmap_regions(&mut self, n: u64) -> Result<()> {
if n > 0 {
log_then_return!(
"Mapping host memory into the guest not yet supported on this platform"
);
}
Ok(())
unsafe fn unmap_region(&mut self, _region: &MemoryRegion) -> Result<()> {
log_then_return!("Mapping host memory into the guest not yet supported on this platform");
}

fn get_mapped_regions(&self) -> Box<dyn ExactSizeIterator<Item = &MemoryRegion> + '_> {
Box::new(self.mmap_regions.iter())
}

#[instrument(err(Debug), skip_all, parent = Span::current(), level = "Trace")]
Expand Down Expand Up @@ -824,8 +831,11 @@ impl Hypervisor for HypervWindowsDriver {
gpa, access_info, &self
);

match self.get_memory_access_violation(gpa as usize, &self.mem_regions, access_info)
{
match get_memory_access_violation(
gpa as usize,
self.sandbox_regions.iter().chain(self.mmap_regions.iter()),
access_info,
) {
Some(access_info) => access_info,
None => HyperlightExit::Mmio(gpa),
}
Expand Down Expand Up @@ -934,7 +944,7 @@ impl Hypervisor for HypervWindowsDriver {
});

Ok(Some(crashdump::CrashDumpContext::new(
&self.mem_regions,
&self.sandbox_regions,
regs,
xsave,
self.entrypoint,
Expand Down
70 changes: 53 additions & 17 deletions src/hyperlight_host/src/hypervisor/kvm.rs
Original file line number Diff line number Diff line change
Expand Up @@ -42,6 +42,7 @@ use super::{
use super::{HyperlightExit, Hypervisor, InterruptHandle, LinuxInterruptHandle, VirtualCPU};
#[cfg(gdb)]
use crate::HyperlightError;
use crate::hypervisor::get_memory_access_violation;
use crate::mem::memory_region::{MemoryRegion, MemoryRegionFlags};
use crate::mem::ptr::{GuestPtr, RawPtr};
use crate::mem::shared_mem::HostSharedMemory;
Expand Down Expand Up @@ -294,10 +295,15 @@ pub(crate) struct KVMDriver {
vcpu_fd: VcpuFd,
entrypoint: u64,
orig_rsp: GuestPtr,
mem_regions: Vec<MemoryRegion>,
interrupt_handle: Arc<LinuxInterruptHandle>,
mem_mgr: Option<MemMgrWrapper<HostSharedMemory>>,
host_funcs: Option<Arc<Mutex<FunctionRegistry>>>,

sandbox_regions: Vec<MemoryRegion>, // Initially mapped regions when sandbox is created
mmap_regions: Vec<(MemoryRegion, u32)>, // Later mapped regions (region, slot number)
next_slot: u32, // Monotonically increasing slot number
freed_slots: Vec<u32>, // Reusable slots from unmapped regions

#[cfg(gdb)]
debug: Option<KvmDebug>,
#[cfg(gdb)]
Expand Down Expand Up @@ -384,7 +390,10 @@ impl KVMDriver {
vcpu_fd,
entrypoint,
orig_rsp: rsp_gp,
mem_regions,
next_slot: mem_regions.len() as u32,
sandbox_regions: mem_regions,
mmap_regions: Vec::new(),
freed_slots: Vec::new(),
interrupt_handle: interrupt_handle.clone(),
mem_mgr: None,
host_funcs: None,
Expand Down Expand Up @@ -434,8 +443,11 @@ impl Debug for KVMDriver {
let mut f = f.debug_struct("KVM Driver");
// Output each memory region

for region in &self.mem_regions {
f.field("Memory Region", &region);
for region in &self.sandbox_regions {
f.field("Sandbox Memory Region", &region);
}
for region in &self.mmap_regions {
f.field("Mapped Memory Region", &region);
}
let regs = self.vcpu_fd.get_regs();
// check that regs is OK and then set field in debug struct
Expand Down Expand Up @@ -517,25 +529,45 @@ impl Hypervisor for KVMDriver {
}

let mut kvm_region: kvm_userspace_memory_region = region.clone().into();
kvm_region.slot = self.mem_regions.len() as u32;

// Try to reuse a freed slot first, otherwise use next_slot
let slot = if let Some(freed_slot) = self.freed_slots.pop() {
freed_slot
} else {
let slot = self.next_slot;
self.next_slot += 1;
slot
};

kvm_region.slot = slot;
unsafe { self.vm_fd.set_user_memory_region(kvm_region) }?;
self.mem_regions.push(region.to_owned());
self.mmap_regions.push((region.to_owned(), slot));
Ok(())
}

#[instrument(err(Debug), skip_all, parent = Span::current(), level = "Trace")]
unsafe fn unmap_regions(&mut self, n: u64) -> Result<()> {
let n_keep = self.mem_regions.len() - n as usize;
for (k, region) in self.mem_regions.split_off(n_keep).iter().enumerate() {
let mut kvm_region: kvm_userspace_memory_region = region.clone().into();
kvm_region.slot = (n_keep + k) as u32;
unsafe fn unmap_region(&mut self, region: &MemoryRegion) -> Result<()> {
if let Some(idx) = self.mmap_regions.iter().position(|(r, _)| r == region) {
let (region, slot) = self.mmap_regions.remove(idx);
let mut kvm_region: kvm_userspace_memory_region = region.into();
kvm_region.slot = slot;
// Setting memory_size to 0 unmaps the slot's region
// From https://docs.kernel.org/virt/kvm/api.html
// > Deleting a slot is done by passing zero for memory_size.
kvm_region.memory_size = 0;
unsafe { self.vm_fd.set_user_memory_region(kvm_region) }?;

// Add the freed slot to the reuse list
self.freed_slots.push(slot);

Ok(())
} else {
Err(new_error!("Tried to unmap region that is not mapped"))
}
Ok(())
}

fn get_mapped_regions(&self) -> Box<dyn ExactSizeIterator<Item = &MemoryRegion> + '_> {
Box::new(self.mmap_regions.iter().map(|(region, _)| region))
}

#[instrument(err(Debug), skip_all, parent = Span::current(), level = "Trace")]
Expand Down Expand Up @@ -717,9 +749,11 @@ impl Hypervisor for KVMDriver {
Ok(VcpuExit::MmioRead(addr, _)) => {
crate::debug!("KVM MMIO Read -Details: Address: {} \n {:#?}", addr, &self);

match self.get_memory_access_violation(
match get_memory_access_violation(
addr as usize,
&self.mem_regions,
self.sandbox_regions
.iter()
.chain(self.mmap_regions.iter().map(|(r, _)| r)),
MemoryRegionFlags::READ,
) {
Some(access_violation_exit) => access_violation_exit,
Expand All @@ -729,9 +763,11 @@ impl Hypervisor for KVMDriver {
Ok(VcpuExit::MmioWrite(addr, _)) => {
crate::debug!("KVM MMIO Write -Details: Address: {} \n {:#?}", addr, &self);

match self.get_memory_access_violation(
match get_memory_access_violation(
addr as usize,
&self.mem_regions,
self.sandbox_regions
.iter()
.chain(self.mmap_regions.iter().map(|(r, _)| r)),
MemoryRegionFlags::WRITE,
) {
Some(access_violation_exit) => access_violation_exit,
Expand Down Expand Up @@ -847,7 +883,7 @@ impl Hypervisor for KVMDriver {
// The [`CrashDumpContext`] accepts xsave as a vector of u8, so we need to convert the
// xsave region to a vector of u8
Ok(Some(crashdump::CrashDumpContext::new(
&self.mem_regions,
&self.sandbox_regions,
regs,
xsave
.region
Expand Down
Loading
Loading