diff --git a/dev_tests/src/ratchet.rs b/dev_tests/src/ratchet.rs index bbc7c35b3..14dc0c010 100644 --- a/dev_tests/src/ratchet.rs +++ b/dev_tests/src/ratchet.rs @@ -37,7 +37,7 @@ fn ratchet_globals() -> Result<()> { ("litebox/", 9), ("litebox_platform_linux_kernel/", 6), ("litebox_platform_linux_userland/", 5), - ("litebox_platform_lvbs/", 20), + ("litebox_platform_lvbs/", 21), ("litebox_platform_multiplex/", 1), ("litebox_platform_windows_userland/", 7), ("litebox_runner_linux_userland/", 1), diff --git a/litebox_common_linux/src/vmap.rs b/litebox_common_linux/src/vmap.rs index 325ce75b3..19fac943d 100644 --- a/litebox_common_linux/src/vmap.rs +++ b/litebox_common_linux/src/vmap.rs @@ -166,12 +166,12 @@ pub enum PhysPointerError { NoMappingInfo, #[error("Overflow occurred during calculation")] Overflow, - #[error("Non-contiguous physical pages in the array")] - NonContiguousPages, #[error("The operation is unsupported on this platform")] UnsupportedOperation, #[error("Unsupported permissions: {0:#x}")] UnsupportedPermissions(u8), #[error("Memory copy failed")] CopyFailed, + #[error("Duplicate physical page address {0:#x} in the input array")] + DuplicatePhysicalAddress(usize), } diff --git a/litebox_platform_lvbs/src/arch/x86/mm/paging.rs b/litebox_platform_lvbs/src/arch/x86/mm/paging.rs index 3fd566f8e..3f5c096c1 100644 --- a/litebox_platform_lvbs/src/arch/x86/mm/paging.rs +++ b/litebox_platform_lvbs/src/arch/x86/mm/paging.rs @@ -12,7 +12,7 @@ use x86_64::{ PageTableFlags, PhysFrame, Size4KiB, Translate, frame::PhysFrameRange, mapper::{ - FlagUpdateError, MapToError, PageTableFrameMapping, TranslateResult, + CleanUp, FlagUpdateError, MapToError, PageTableFrameMapping, TranslateResult, UnmapError as X64UnmapError, }, }, @@ -365,6 +365,130 @@ impl X64PageTable<'_, M, ALIGN> { Ok(M::pa_to_va(frame_range.start.start_address()).as_mut_ptr()) } + /// Map non-contiguous physical frames to virtually contiguous addresses. + /// + /// This function maps each physical frame in `frames` to consecutive virtual addresses + /// starting from `base_va`. Unlike `map_phys_frame_range`, this allows mapping + /// non-contiguous physical pages to a contiguous virtual address range. + /// + /// # Arguments + /// - `frames` - Slice of physical frames to map (non-contiguous, no duplicate) + /// - `base_va` - Starting virtual address for the mapping + /// - `flags` - Page table flags to apply to all mappings + /// + /// # Returns + /// - `Ok(*mut u8)` — pointer to the start of the mapped virtual range + /// - `Err(MapToError::PageAlreadyMapped)` if any VA is already mapped + /// - `Err(MapToError::FrameAllocationFailed)` if page table allocation fails + /// + /// # Behavior + /// - Any existing mapping is treated as an error + /// - On error, all pages mapped by this call are unmapped (atomic) + pub(crate) fn map_non_contiguous_phys_frames( + &self, + frames: &[PhysFrame], + base_va: VirtAddr, + flags: PageTableFlags, + ) -> Result<*mut u8, MapToError> { + let mut allocator = PageTableAllocator::::new(); + let mut mapped_count: usize = 0; + + let mut inner = self.inner.lock(); + + if !base_va.is_aligned(Size4KiB::SIZE) { + return Err(MapToError::FrameAllocationFailed); + } + + // Quick pre-scan: check all target VAs for existing mappings before + // modifying any page table entries. This avoids expensive rollback when + // an overlap is detected partway through. + for i in 0..frames.len() { + let va = base_va + (i as u64) * Size4KiB::SIZE; + match inner.translate(va) { + TranslateResult::Mapped { frame, .. } => { + let existing_frame = + PhysFrame::::containing_address(frame.start_address()); + return Err(MapToError::PageAlreadyMapped(existing_frame)); + } + TranslateResult::NotMapped => {} + TranslateResult::InvalidFrameAddress(_) => { + return Err(MapToError::FrameAllocationFailed); + } + } + } + + // All VAs verified as unmapped — proceed with mapping. + let table_flags = PageTableFlags::PRESENT | PageTableFlags::WRITABLE; + for (i, &target_frame) in frames.iter().enumerate() { + let va = base_va + (i as u64) * Size4KiB::SIZE; + let page: Page = Page::containing_address(va); + + // Note: Since we lock the entire page table for the duration of this function (`self.inner.lock()`), + // there should be no concurrent modifications to the page table. If we allow concurrent mappings + // in the future, we should re-check the VA here before mapping and return an error + // if it is no longer unmapped. + + match unsafe { + inner.map_to_with_table_flags( + page, + target_frame, + flags, + table_flags, + &mut allocator, + ) + } { + Ok(fl) => { + mapped_count += 1; + if FLUSH_TLB { + fl.flush(); + } + } + Err(e) => { + Self::rollback_mapped_pages(&mut inner, base_va, mapped_count, &mut allocator); + return Err(e); + } + } + } + + Ok(base_va.as_mut_ptr()) + } + + /// Rollback helper: unmap the first `count` pages starting from `base_va` + /// and free any intermediate page-table frames (P1/P2/P3) that became empty. + /// + /// Note: The caller must already hold the page table lock (`self.inner`). + /// This function accepts the locked `MappedPageTable` directly. + fn rollback_mapped_pages( + inner: &mut MappedPageTable<'_, FrameMapping>, + base_va: VirtAddr, + count: usize, + allocator: &mut PageTableAllocator, + ) { + for i in 0..count { + let va = base_va + (i as u64) * Size4KiB::SIZE; + let page: Page = Page::containing_address(va); + if let Ok((_frame, fl)) = inner.unmap(page) + && FLUSH_TLB + { + fl.flush(); + } + } + + // Free any intermediate page-table frames (P1/P2/P3) that are now + // empty after unmapping. + if count > 0 { + let start = Page::::containing_address(base_va); + let end = Page::::containing_address( + base_va + ((count - 1) as u64) * Size4KiB::SIZE, + ); + // Safety: the vmap VA range is used exclusively by this page table + // and all leaf entries have just been unmapped above. + unsafe { + inner.clean_up_addr_range(Page::range_inclusive(start, end), allocator); + } + } + } + /// This function creates a new empty top-level page table. pub(crate) unsafe fn new_top_level() -> Self { let frame = PageTableAllocator::::allocate_frame(true) diff --git a/litebox_platform_lvbs/src/lib.rs b/litebox_platform_lvbs/src/lib.rs index 1892e5f52..3b024c0c5 100644 --- a/litebox_platform_lvbs/src/lib.rs +++ b/litebox_platform_lvbs/src/lib.rs @@ -41,6 +41,9 @@ use x86_64::{ }; use zerocopy::{FromBytes, IntoBytes}; +#[cfg(feature = "optee_syscall")] +use crate::mm::vmap::vmap_allocator; + extern crate alloc; pub mod arch; @@ -1169,23 +1172,21 @@ impl litebox::platform::SystemInfoProvider for LinuxKernel< } } -/// Checks whether the given physical addresses are contiguous with respect to ALIGN. -/// -/// Note: This is a temporary check to let `VmapManager` work with this platform -/// which does not yet support virtually contiguous mapping of non-contiguous physical pages -/// (for now, it maps physical pages with a fixed offset). #[cfg(feature = "optee_syscall")] -fn check_contiguity( - addrs: &[PhysPageAddr], -) -> Result<(), PhysPointerError> { +/// Checks whether the given physical addresses are contiguous with respect to ALIGN. +fn is_contiguous(addrs: &[PhysPageAddr]) -> bool { for window in addrs.windows(2) { let first = window[0].as_usize(); let second = window[1].as_usize(); - if second != first.checked_add(ALIGN).ok_or(PhysPointerError::Overflow)? { - return Err(PhysPointerError::NonContiguousPages); + if let Some(expected) = first.checked_add(ALIGN) { + if second != expected { + return false; + } + } else { + return false; } } - Ok(()) + true } #[cfg(feature = "optee_syscall")] @@ -1195,72 +1196,119 @@ impl VmapManager for LinuxKernel pages: &PhysPageAddrArray, perms: PhysPageMapPermissions, ) -> Result, PhysPointerError> { - // TODO: Remove this check once this platform supports virtually contiguous - // non-contiguous physical page mapping. - check_contiguity(pages)?; - if pages.is_empty() { return Err(PhysPointerError::InvalidPhysicalAddress(0)); } - let phys_start = x86_64::PhysAddr::new(pages[0].as_usize() as u64); - let phys_end = x86_64::PhysAddr::new( - pages - .last() - .unwrap() - .as_usize() - .checked_add(ALIGN) - .ok_or(PhysPointerError::Overflow)? as u64, - ); - let frame_range = if ALIGN == PAGE_SIZE { - PhysFrame::range( - PhysFrame::::containing_address(phys_start), - PhysFrame::::containing_address(phys_end), - ) - } else { - unimplemented!("ALIGN other than 4KiB is not supported yet") - }; + + // Reject duplicate page addresses + { + let mut seen = hashbrown::HashSet::with_capacity(pages.len()); + for page in pages { + if !seen.insert(page.as_usize()) { + return Err(PhysPointerError::DuplicatePhysicalAddress(page.as_usize())); + } + } + } + + if ALIGN != PAGE_SIZE { + unimplemented!("ALIGN other than 4KiB is not supported yet"); + } let mut flags = PageTableFlags::PRESENT; if perms.contains(PhysPageMapPermissions::WRITE) { flags |= PageTableFlags::WRITABLE; } - if let Ok(page_addr) = self - .page_table_manager - .current_page_table() - .map_phys_frame_range(frame_range, flags) - { - Ok(PhysPageMapInfo { - base: page_addr, - size: pages.len() * ALIGN, - }) + // If pages are contiguous, use `map_phys_frame_range` which is efficient and doesn't require vmap VA space. + if is_contiguous(pages) { + let phys_start = x86_64::PhysAddr::new(pages[0].as_usize() as u64); + let phys_end = x86_64::PhysAddr::new( + pages + .last() + .unwrap() + .as_usize() + .checked_add(ALIGN) + .ok_or(PhysPointerError::Overflow)? as u64, + ); + let frame_range = PhysFrame::range( + PhysFrame::::containing_address(phys_start), + PhysFrame::::containing_address(phys_end), + ); + + match self + .page_table_manager + .current_page_table() + .map_phys_frame_range(frame_range, flags) + { + Ok(page_addr) => Ok(PhysPageMapInfo { + base: page_addr, + size: pages.len() * ALIGN, + }), + Err(MapToError::PageAlreadyMapped(_)) => { + Err(PhysPointerError::AlreadyMapped(pages[0].as_usize())) + } + Err(_) => Err(PhysPointerError::InvalidPhysicalAddress( + pages[0].as_usize(), + )), + } } else { - Err(PhysPointerError::InvalidPhysicalAddress( - pages[0].as_usize(), - )) + let frames: alloc::vec::Vec> = pages + .iter() + .map(|p| PhysFrame::containing_address(x86_64::PhysAddr::new(p.as_usize() as u64))) + .collect(); + + let base_va = vmap_allocator() + .allocate_and_register(&frames) + .ok_or(PhysPointerError::AlreadyMapped(pages[0].as_usize()))?; + + match self + .page_table_manager + .current_page_table() + .map_non_contiguous_phys_frames(&frames, base_va, flags) + { + Ok(page_addr) => Ok(PhysPageMapInfo { + base: page_addr, + size: pages.len() * ALIGN, + }), + Err(e) => { + vmap_allocator().rollback_allocation(base_va); + match e { + MapToError::PageAlreadyMapped(_) => { + Err(PhysPointerError::AlreadyMapped(pages[0].as_usize())) + } + _ => Err(PhysPointerError::InvalidPhysicalAddress( + pages[0].as_usize(), + )), + } + } + } } } unsafe fn vunmap(&self, vmap_info: PhysPageMapInfo) -> Result<(), PhysPointerError> { - if ALIGN == PAGE_SIZE { - let Some(page_range) = PageRange::::new( - vmap_info.base as usize, - vmap_info.base.wrapping_add(vmap_info.size) as usize, - ) else { - return Err(PhysPointerError::UnalignedPhysicalAddress( - vmap_info.base as usize, - ALIGN, - )); - }; - unsafe { - self.page_table_manager - .current_page_table() - .unmap_pages(page_range, false, true) - .map_err(|_| PhysPointerError::Unmapped(vmap_info.base as usize)) - } - } else { - unimplemented!("ALIGN other than 4KiB is not supported yet") + if ALIGN != PAGE_SIZE { + unimplemented!("ALIGN other than 4KiB is not supported yet"); } + + let base_va = x86_64::VirtAddr::new(vmap_info.base as u64); + + // Perform both cleanup steps unconditionally so that a failure in one + // does not leave the other in an inconsistent state. + let unmap_result = self + .unmap_vtl0_pages(vmap_info.base, vmap_info.size) + .map_err(|_| PhysPointerError::Unmapped(vmap_info.base as usize)); + + let unregister_result = if crate::mm::vmap::is_vmap_address(base_va) { + crate::mm::vmap::vmap_allocator() + .unregister_allocation(base_va) + .ok_or(PhysPointerError::Unmapped(vmap_info.base as usize)) + .map(|_| ()) + } else { + Ok(()) + }; + + // Report the first error, if any. + unmap_result.and(unregister_result) } fn validate_unowned(&self, pages: &PhysPageAddrArray) -> Result<(), PhysPointerError> { diff --git a/litebox_platform_lvbs/src/mm/mod.rs b/litebox_platform_lvbs/src/mm/mod.rs index 8365ce6f4..5bd3a5471 100644 --- a/litebox_platform_lvbs/src/mm/mod.rs +++ b/litebox_platform_lvbs/src/mm/mod.rs @@ -6,6 +6,7 @@ use crate::arch::{PhysAddr, VirtAddr}; pub(crate) mod pgtable; +pub(crate) mod vmap; #[cfg(test)] pub mod tests; diff --git a/litebox_platform_lvbs/src/mm/vmap.rs b/litebox_platform_lvbs/src/mm/vmap.rs new file mode 100644 index 000000000..0cfa6ffeb --- /dev/null +++ b/litebox_platform_lvbs/src/mm/vmap.rs @@ -0,0 +1,368 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT license. + +//! Vmap region allocator for mapping non-contiguous physical page frames to virtually contiguous addresses. +//! +//! This module provides functionality similar to Linux kernel's `vmap()` and `vunmap()`: +//! - Reserves a virtual address region for vmap mappings +//! - Maintains PA↔VA mappings using HashMap for duplicate detection and cleanup + +use alloc::boxed::Box; +use hashbrown::HashMap; +use rangemap::RangeSet; +use spin::Once; +use spin::mutex::SpinMutex; +use x86_64::VirtAddr; +use x86_64::structures::paging::{PhysFrame, Size4KiB}; + +use crate::mshv::vtl1_mem_layout::PAGE_SIZE; + +/// Start of the vmap virtual address region. +/// This address is chosen to be within the 4-level paging canonical address space +/// and not conflict with VTL1's direct-mapped physical memory. +const VMAP_START: u64 = 0x6000_0000_0000; + +/// End of the vmap virtual address region. +/// Provides 1 TiB of virtual address space for vmap allocations. +const VMAP_END: u64 = 0x7000_0000_0000; + +/// Number of unmapped guard pages appended after each vmap allocation. +const GUARD_PAGES: usize = 1; + +/// Information about a single vmap allocation. +#[derive(Clone, Debug)] +struct VmapAllocation { + /// Physical frames of the mapped pages (in order). + frames: Box<[PhysFrame]>, +} + +/// Inner state for the vmap region allocator. +/// +/// Uses a bump allocator with a `RangeSet` free list for virtual addresses +/// and HashMap for maintaining bidirectional mappings between physical and virtual addresses. +struct VmapRegionAllocatorInner { + /// Next available virtual address for allocation (bump allocator). + next_va: VirtAddr, + /// Free set of previously allocated and freed VA ranges (auto-coalescing). + free_set: RangeSet, + /// Map from physical frame to virtual address. + pa_to_va_map: HashMap, VirtAddr>, + /// Map from virtual address to physical frame. + va_to_pa_map: HashMap>, + /// Allocation metadata indexed by starting virtual address. + allocations: HashMap, +} + +impl VmapRegionAllocatorInner { + /// Creates a new vmap region allocator inner state. + fn new() -> Self { + Self { + next_va: VirtAddr::new(VMAP_START), + free_set: RangeSet::new(), + pa_to_va_map: HashMap::new(), + va_to_pa_map: HashMap::new(), + allocations: HashMap::new(), + } + } + + /// Allocates a contiguous virtual address range for the given number of pages, + /// plus [`GUARD_PAGES`] unmapped trailing guard pages. + /// + /// The guard pages are reserved in the VA space but never mapped, so an + /// out-of-bounds access past the allocation triggers a page fault. + /// + /// First tries to find a suitable range in the free list, then falls back to + /// bump allocation. + /// + /// Returns `Some(VirtAddr)` with the starting virtual address on success, + /// or `None` if insufficient virtual address space is available. + fn allocate_va_range(&mut self, num_pages: usize) -> Option { + if num_pages == 0 { + return None; + } + + let total_pages = num_pages.checked_add(GUARD_PAGES)?; + let size = (total_pages as u64).checked_mul(PAGE_SIZE as u64)?; + + // Try to find a suitable range in the free set (first-fit) + for range in self.free_set.iter() { + let range_size = range.end - range.start; + if range_size >= size { + let allocated_start = range.start; + // Remove the allocated portion from the free set + self.free_set + .remove(allocated_start..allocated_start + size); + return Some(allocated_start); + } + } + + // Fall back to bump allocation + let end_va = self.next_va + size; + + if end_va > VirtAddr::new(VMAP_END) { + return None; + } + + let allocated_va = self.next_va; + self.next_va = end_va; + Some(allocated_va) + } + + /// Returns a VA range to the free set for reuse. + fn free_va_range(&mut self, start: VirtAddr, num_pages: usize) { + if num_pages == 0 { + return; + } + let total_pages = num_pages + GUARD_PAGES; + let end = start + (total_pages as u64) * (PAGE_SIZE as u64); + self.free_set.insert(start..end); + } +} + +/// Checks if a virtual address is within the vmap region. +pub fn is_vmap_address(va: VirtAddr) -> bool { + (VMAP_START..VMAP_END).contains(&va.as_u64()) +} + +/// Vmap region allocator that manages virtual address allocation and PA↔VA mappings. +pub struct VmapRegionAllocator { + inner: SpinMutex, +} + +impl VmapRegionAllocator { + fn new() -> Self { + Self { + inner: SpinMutex::new(VmapRegionAllocatorInner::new()), + } + } + + /// Atomically allocates VA range, registers mappings, and records allocation. + /// + /// This ensures consistency - either the entire operation succeeds or nothing changes. + /// + /// Returns the base VA on success, or None if: + /// - No VA space available + /// - Any PA is already mapped (duplicate mapping) + pub fn allocate_and_register(&self, frames: &[PhysFrame]) -> Option { + if frames.is_empty() { + return None; + } + + let mut inner = self.inner.lock(); + + // Check for duplicate PA mappings before allocating + for frame in frames { + if inner.pa_to_va_map.contains_key(frame) { + return None; + } + } + + let base_va = inner.allocate_va_range(frames.len())?; + + for (i, &frame) in frames.iter().enumerate() { + let va = VirtAddr::new(base_va.as_u64() + (i as u64) * (PAGE_SIZE as u64)); + inner.pa_to_va_map.insert(frame, va); + inner.va_to_pa_map.insert(va, frame); + } + + inner.allocations.insert( + base_va, + VmapAllocation { + frames: frames.into(), + }, + ); + + Some(base_va) + } + + /// Rolls back a failed allocation by removing mappings and freeing VA range. + /// + /// Call this if page table mapping fails after `allocate_and_register` succeeds. + pub fn rollback_allocation(&self, base_va: VirtAddr) { + let mut inner = self.inner.lock(); + if let Some(allocation) = inner.allocations.remove(&base_va) { + for (i, frame) in allocation.frames.iter().enumerate() { + let va = VirtAddr::new(base_va.as_u64() + (i as u64) * (PAGE_SIZE as u64)); + inner.pa_to_va_map.remove(frame); + inner.va_to_pa_map.remove(&va); + } + inner.free_va_range(base_va, allocation.frames.len()); + } + } + + /// Unregisters all mappings for an allocation starting at the given virtual address. + /// + /// Returns the number of pages that were unmapped, or `None` if no allocation was found. + pub fn unregister_allocation(&self, va_start: VirtAddr) -> Option { + let mut inner = self.inner.lock(); + let allocation = inner.allocations.remove(&va_start)?; + + for (i, frame) in allocation.frames.iter().enumerate() { + let va = VirtAddr::new(va_start.as_u64() + (i as u64) * (PAGE_SIZE as u64)); + inner.pa_to_va_map.remove(frame); + inner.va_to_pa_map.remove(&va); + } + + inner.free_va_range(va_start, allocation.frames.len()); + + Some(allocation.frames.len()) + } +} + +/// Returns a reference to the global vmap region allocator. +pub fn vmap_allocator() -> &'static VmapRegionAllocator { + static ALLOCATOR: Once = Once::new(); + ALLOCATOR.call_once(VmapRegionAllocator::new) +} + +#[cfg(test)] +mod tests { + use super::*; + use x86_64::PhysAddr; + + #[test] + fn test_is_vmap_address() { + assert!(is_vmap_address(VirtAddr::new(VMAP_START))); + assert!(is_vmap_address(VirtAddr::new(VMAP_START + 0x1000))); + assert!(is_vmap_address(VirtAddr::new(VMAP_END - 1))); + assert!(!is_vmap_address(VirtAddr::new(VMAP_START - 1))); + assert!(!is_vmap_address(VirtAddr::new(VMAP_END))); + } + + #[test] + fn test_allocate_va_range() { + let mut allocator = VmapRegionAllocatorInner::new(); + + // Allocate first range (1 data page + 1 guard page = 2 pages consumed) + let va1 = allocator.allocate_va_range(1); + assert!(va1.is_some()); + assert_eq!(va1.unwrap().as_u64(), VMAP_START); + + // Second allocation starts after data + guard pages + let va2 = allocator.allocate_va_range(2); + assert!(va2.is_some()); + assert_eq!( + va2.unwrap().as_u64(), + VMAP_START + (1 + GUARD_PAGES as u64) * PAGE_SIZE as u64 + ); + + // Zero pages should return None + let va3 = allocator.allocate_va_range(0); + assert!(va3.is_none()); + } + + #[test] + fn test_va_range_reuse() { + let mut allocator = VmapRegionAllocatorInner::new(); + + // Allocate and free a 2-page range (consumes 2 + guard pages) + let va1 = allocator.allocate_va_range(2).unwrap(); + allocator.free_va_range(va1, 2); + + // Next allocation of same size should reuse the freed range + let va2 = allocator.allocate_va_range(2).unwrap(); + assert_eq!(va1, va2); + + // Free the 3-page slot (2 data + 1 guard), then allocate 1 page (needs 1+1=2 pages). + // The remaining 1 page in the 3-page slot is not enough for another 1+1 allocation. + allocator.free_va_range(va2, 2); + let va3 = allocator.allocate_va_range(1).unwrap(); + assert_eq!(va3, va1); + } + + #[test] + fn test_allocate_and_register() { + let allocator = VmapRegionAllocator::new(); + + let frames = alloc::vec![ + PhysFrame::::containing_address(PhysAddr::new(0x1000)), + PhysFrame::::containing_address(PhysAddr::new(0x3000)), + PhysFrame::::containing_address(PhysAddr::new(0x5000)), + ]; + + // Allocate and register + let base_va = allocator.allocate_and_register(&frames); + assert!(base_va.is_some()); + let base_va = base_va.unwrap(); + assert_eq!(base_va.as_u64(), VMAP_START); + + // Duplicate PA should fail (proves mappings were recorded) + let duplicate = allocator + .allocate_and_register(&[PhysFrame::containing_address(PhysAddr::new(0x1000))]); + assert!(duplicate.is_none()); + + // Empty input should return None + assert!(allocator.allocate_and_register(&[]).is_none()); + } + + #[test] + fn test_rollback_allocation() { + let allocator = VmapRegionAllocator::new(); + + let frames = alloc::vec![ + PhysFrame::::containing_address(PhysAddr::new(0x1000)), + PhysFrame::::containing_address(PhysAddr::new(0x2000)), + ]; + + let base_va = allocator.allocate_and_register(&frames).unwrap(); + + // Rollback + allocator.rollback_allocation(base_va); + + // Mappings should be gone — re-registering the same PAs must succeed + let new_va = allocator.allocate_and_register(&frames).unwrap(); + assert_eq!(new_va, base_va); + } + + #[test] + fn test_unregister_allocation() { + let allocator = VmapRegionAllocator::new(); + + let frames = alloc::vec![ + PhysFrame::::containing_address(PhysAddr::new(0x1000)), + PhysFrame::::containing_address(PhysAddr::new(0x3000)), + PhysFrame::::containing_address(PhysAddr::new(0x5000)), + ]; + + let base_va = allocator.allocate_and_register(&frames).unwrap(); + + // Unregister + let num_pages = allocator.unregister_allocation(base_va); + assert_eq!(num_pages, Some(3)); + + // Mappings should be gone — re-registering the same PAs must succeed + // and reuse the freed VA range + let new_va = allocator.allocate_and_register(&frames).unwrap(); + assert_eq!(new_va, base_va); + + // Unregistering an unknown VA returns None + assert_eq!( + allocator.unregister_allocation(VirtAddr::new(VMAP_END - 0x1000)), + None + ); + } + + #[test] + fn test_guard_page_gap() { + let allocator = VmapRegionAllocator::new(); + + let frames_a = alloc::vec![PhysFrame::::containing_address(PhysAddr::new( + 0x1000 + )),]; + let frames_b = alloc::vec![PhysFrame::::containing_address(PhysAddr::new( + 0x2000 + )),]; + + let va_a = allocator.allocate_and_register(&frames_a).unwrap(); + let va_b = allocator.allocate_and_register(&frames_b).unwrap(); + + // Allocations should be separated by at least GUARD_PAGES unmapped pages + let gap_pages = (va_b.as_u64() - va_a.as_u64()) / PAGE_SIZE as u64; + assert!( + gap_pages >= (1 + GUARD_PAGES as u64), + "expected at least {} pages between allocations, got {}", + 1 + GUARD_PAGES, + gap_pages + ); + } +}