Skip to content
Merged
Show file tree
Hide file tree
Changes from 13 commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
1 change: 1 addition & 0 deletions modules/axhal/src/arch/x86_64/trap.rs
Original file line number Diff line number Diff line change
Expand Up @@ -78,6 +78,7 @@ fn vec_to_str(vec: u64) -> &'static str {
fn err_code_to_flags(err_code: u64) -> Result<MappingFlags, u64> {
let code = PageFaultErrorCode::from_bits_truncate(err_code);
let reserved_bits = (PageFaultErrorCode::CAUSED_BY_WRITE
| PageFaultErrorCode::PROTECTION_VIOLATION
Copy link
Collaborator

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

@mingzi47 Sorry to bother you, I was wondering if this change is necessary? Because I'm porting these to axcpu.

| PageFaultErrorCode::USER_MODE
| PageFaultErrorCode::INSTRUCTION_FETCH)
.complement();
Expand Down
214 changes: 197 additions & 17 deletions modules/axmm/src/aspace.rs
Original file line number Diff line number Diff line change
Expand Up @@ -9,7 +9,8 @@ use memory_addr::{
use memory_set::{MemoryArea, MemorySet};
use page_table_multiarch::PageSize;

use crate::backend::{Backend, PageIterWrapper};
use crate::backend::{Backend, PageIterWrapper, alloc_frame, dealloc_frame};
use crate::frameinfo::{add_frame_ref, get_frame_info};
use crate::mapping_err_to_ax_err;

/// The virtual memory address space.
Expand Down Expand Up @@ -221,28 +222,73 @@ impl AddrSpace {
Ok(())
}

/// Populates the area with physical frames, returning false if the area
/// contains unmapped area.
pub fn populate_area(&mut self, mut start: VirtAddr, size: usize, align: PageSize) -> AxResult {
/// Ensures that the specified virtual memory region is fully mapped.
///
/// This function walks through the given virtual address range and attempts to ensure
/// that every page is mapped. If a page is not mapped and the corresponding area allows
/// on-demand population (`populate == false`), it will trigger a page fault to map it.
/// If `cow_on_write` is true, it will handle copy-on-write (COW) logic for already
/// mapped pages that may require COW due to write intentions.
///
/// # Parameters
///
/// - `start`: The starting virtual address of the region to map.
/// - `size`: The size (in bytes) of the region.
/// - `align`: Alignment requirement for the allocated memory, must be a multiple of 4KiB.
/// - `cow_on_write`: Whether to trigger copy-on-write handling for write-intended mappings.
///
/// # Returns
///
/// Returns `Ok(())` if the entire region is successfully mapped, or an appropriate
/// `AxError` variant (`NoMemory`, `BadAddress`) on failure.
///
/// # Errors
///
/// - `AxError::NoMemory`: Failed to allocate.
/// - `AxError::BadAddress`: An invalid mapping state was detected.
pub fn ensure_region_mapped(
&mut self,
mut start: VirtAddr,
size: usize,
align: PageSize,
cow_on_write: bool,
) -> AxResult {
self.validate_region(start, size, align)?;
let end = start + size;

while let Some(area) = self.areas.find(start) {
let backend = area.backend();
if let Backend::Alloc { populate, align } = *backend {
if !populate {
for addr in PageIterWrapper::new(start, area.end().min(end), align).unwrap() {
match self.pt.query(addr) {
Ok(_) => {}
// If the page is not mapped, try map it.
Err(PagingError::NotMapped) => {
for addr in PageIterWrapper::new(start, area.end().min(end), align).unwrap() {
match self.pt.query(addr) {
// if the page is already mapped and write intentions, try cow.
Ok((paddr, flags, page_size)) => {
if cow_on_write {
if !area.flags().contains(MappingFlags::WRITE) {
return Err(AxError::BadAddress);
}

if !Self::handle_cow_fault(
addr,
paddr,
flags,
page_size,
&mut self.pt,
) {
return Err(AxError::NoMemory);
}
}
}
// If the page is not mapped, try map it.
Err(PagingError::NotMapped) => {
if !populate {
if !backend.handle_page_fault(addr, area.flags(), &mut self.pt) {
return Err(AxError::NoMemory);
}
}
Err(_) => return Err(AxError::BadAddress),
};
}
}
Err(_) => return Err(AxError::BadAddress),
};
}
}
start = area.end();
Expand Down Expand Up @@ -395,7 +441,7 @@ impl AddrSpace {
align: PageSize,
) -> AxResult {
// Populate the area first, which also checks the address range for us.
self.populate_area(start, size, align)?;
self.ensure_region_mapped(start, size, align, false)?;

self.areas
.protect(start, size, |_| Some(flags), &mut self.pt)
Expand Down Expand Up @@ -453,9 +499,29 @@ impl AddrSpace {
if let Some(area) = self.areas.find(vaddr) {
let orig_flags = area.flags();
if orig_flags.contains(access_flags) {
return area
.backend()
.handle_page_fault(vaddr, orig_flags, &mut self.pt);
// Two cases enter the branch:
// - shared pages (If there is a shared page in the vma)
// - cow
if let Ok((paddr, _, page_size)) = self.pt.query(vaddr) {
if !access_flags.contains(MappingFlags::WRITE) {
return false;
}
let off = page_size.align_offset(vaddr.into());
// 1. page fault caused by write
// 2. pte exists
// 3. Not shared memory
return Self::handle_cow_fault(
vaddr,
paddr.sub(off),
orig_flags,
page_size,
&mut self.pt,
);
} else {
return area
.backend()
.handle_page_fault(vaddr, orig_flags, &mut self.pt);
}
}
}
false
Expand All @@ -467,6 +533,7 @@ impl AddrSpace {

for area in self.areas.iter() {
let backend = area.backend();

// Remap the memory area in the new address space.
let new_area =
MemoryArea::new(area.start(), area.size(), area.flags(), backend.clone());
Expand Down Expand Up @@ -513,6 +580,119 @@ impl AddrSpace {
}
Ok(new_aspace)
}

/// Creates a copy of the current [`AddrSpace`] with copy-on-write (COW)
///
/// For pages that require COW, remove `write` flags.
pub fn copy_with_cow(&mut self) -> AxResult<Self> {
let mut new_aspace = Self::new_empty(self.base(), self.size())?;
let new_pt = &mut new_aspace.pt;
let old_pt = &mut self.pt;

for area in self.areas.iter() {
let mut backend = area.backend().clone();
if let Backend::Alloc { populate, .. } = &mut backend {
// Forcing `populate = false` is to prevent the subsequent `new_aspace.areas.map`
// from mapping page table entries for the virtual addresses.
*populate = false
}

let new_area =
MemoryArea::new(area.start(), area.size(), area.flags(), backend.clone());
new_aspace
.areas
.map(new_area, new_pt, false)
.map_err(mapping_err_to_ax_err)?;

// Linear-backed regions are usually allocated by the kernel and are shared
if matches!(backend, Backend::Linear { .. }) {
continue;
}

let mut flags = area.flags();
flags.remove(MappingFlags::WRITE);

let align = if let Backend::Alloc { align, .. } = backend {
align
} else {
unreachable!()
};

//If the page is mapped in the old page table:
// - Update its permissions in the old page table using `flags`.
// - Map the same physical page into the new page table at the same
// virtual address, with the same page size and `flags`.
for vaddr in PageIterWrapper::new(area.start(), area.end(), align)
.expect("Failed to create page iterator")
{
if let Ok((paddr, _, page_size)) = old_pt.query(vaddr) {
add_frame_ref(paddr);

old_pt
.protect(vaddr, flags)
.map(|(_, tlb)| tlb.flush())
.expect("protect failed");
new_pt
.map(vaddr, paddr, page_size, flags)
.map(|tlb| tlb.flush())
.expect("map failed");
}
}
}

Ok(new_aspace)
}

/// Handles a Copy-On-Write (COW) page fault.
///
/// # Arguments
/// - `vaddr`: The virtual address that triggered the fault.
/// - `paddr`: It must be an aligned physical address; if it's a huge page,
/// it must be the starting physical address.
/// - `flags`: vma flags.
/// - `align`: Alignment requirement for the allocated memory, must be a multiple of 4KiB.
/// - `pt`: A mutable reference to the page table that should be updated.
///
/// # Returns
/// - `true` if the page fault was handled successfully.
/// - `false` if the fault handling failed (e.g., allocation failed or invalid ref count).
fn handle_cow_fault(
vaddr: VirtAddr,
paddr: PhysAddr,
flags: MappingFlags,
align: PageSize,
pt: &mut PageTable,
) -> bool {
let frame_info = get_frame_info(paddr);
match frame_info.ref_count() {
0 => unreachable!(),
// There is only one AddrSpace reference to the page,
// so there is no need to copy it.
1 => pt.protect(vaddr, flags).map(|(_, tlb)| tlb.flush()).is_ok(),
// Allocates the new page and copies the contents of the original page,
// remapping the virtual address to the physical address of the new page.
2.. => match alloc_frame(false, align) {
Some(new_frame) => {
unsafe {
core::ptr::copy_nonoverlapping(
phys_to_virt(paddr).as_ptr(),
phys_to_virt(new_frame).as_mut_ptr(),
align.into(),
)
};

dealloc_frame(paddr, align);

pt.remap(vaddr, new_frame, flags)
.map(|(_, tlb)| {
tlb.flush();
})
.is_ok()
}
None => false,
},
}
}
}

impl fmt::Debug for AddrSpace {
Expand Down
22 changes: 17 additions & 5 deletions modules/axmm/src/backend/alloc.rs
Original file line number Diff line number Diff line change
Expand Up @@ -4,6 +4,8 @@ use axhal::mem::{phys_to_virt, virt_to_phys};
use axhal::paging::{MappingFlags, PageSize, PageTable};
use memory_addr::{PAGE_SIZE_4K, PhysAddr, VirtAddr};

use crate::frameinfo::{add_frame_ref, dec_frame_ref};

use super::Backend;

/// Allocates a physical frame, with an option to zero it out.
Expand All @@ -25,14 +27,15 @@ use super::Backend;
/// - If `zeroed` is `true`, the function uses `unsafe` operations to zero out the memory.
/// - The allocated memory must be accessed via its physical address, which requires
/// conversion using `virt_to_phys`.
fn alloc_frame(zeroed: bool, align: PageSize) -> Option<PhysAddr> {
pub fn alloc_frame(zeroed: bool, align: PageSize) -> Option<PhysAddr> {
let page_size: usize = align.into();
let num_pages = page_size / PAGE_SIZE_4K;
let vaddr = VirtAddr::from(global_allocator().alloc_pages(num_pages, page_size).ok()?);
if zeroed {
unsafe { core::ptr::write_bytes(vaddr.as_mut_ptr(), 0, page_size) };
}
let paddr = virt_to_phys(vaddr);
add_frame_ref(paddr);
Some(paddr)
}

Expand All @@ -43,6 +46,9 @@ fn alloc_frame(zeroed: bool, align: PageSize) -> Option<PhysAddr> {
/// The size of the memory to be freed is determined by the `align` parameter,
/// which must be a multiple of 4KiB.
///
/// This function decreases the reference count associated with the frame.
/// When the reference count reaches 1, it actually frees the frame memory.
///
/// # Parameters
/// - `frame`: The physical address of the memory to be freed.
/// - `align`: The alignment requirement for the memory, must be a multiple of 4KiB.
Expand All @@ -52,11 +58,17 @@ fn alloc_frame(zeroed: bool, align: PageSize) -> Option<PhysAddr> {
/// otherwise undefined behavior may occur.
/// - If the deallocation fails, the function will call `panic!`. Details about
/// the failure can be obtained from the global memory allocator’s error messages.
fn dealloc_frame(frame: PhysAddr, align: PageSize) {
let page_size: usize = align.into();
let num_pages = page_size / PAGE_SIZE_4K;
pub fn dealloc_frame(frame: PhysAddr, align: PageSize) {
let vaddr = phys_to_virt(frame);
global_allocator().dealloc_pages(vaddr.as_usize(), num_pages);
match dec_frame_ref(frame) {
0 => unreachable!(),
1 => {
let page_size: usize = align.into();
let num_pages = page_size / PAGE_SIZE_4K;
global_allocator().dealloc_pages(vaddr.as_usize(), num_pages);
}
_ => (),
}
}

impl Backend {
Expand Down
2 changes: 2 additions & 0 deletions modules/axmm/src/backend/mod.rs
Original file line number Diff line number Diff line change
Expand Up @@ -10,6 +10,8 @@ mod alloc;
mod linear;
mod page_iter_wrapper;

pub use alloc::{alloc_frame, dealloc_frame};

/// A unified enum type for different memory mapping backends.
///
/// Currently, two backends are implemented:
Expand Down
Loading