From 4468909a7bae2ada9442419d1610dc797eb0b8d8 Mon Sep 17 00:00:00 2001 From: mingzi Date: Tue, 27 May 2025 22:20:47 +0800 Subject: [PATCH 01/15] Add basc COW - support x86_64 refactor: improve page fault handling and page management - Fix whitespace in PageFaultErrorCode flags - Simplify page fault COW logic - Rename page ref count methods for clarity - Move page copy logic to Page struct - Add better documentation for page management refactor: improve copy_with_cow - use `insert_area` - add `MemorySet` patch fmt: fmt chore: update test workflow dependencies - Add memory_set crate dependency - Add memory_addr crate dependency refactor: copy_with_cow and add support alloc contiguous pages - Remove myself git dependencies for memory_set and memory_addr - Update Cargo.toml and Cargo.lock - Adjust page allocation to support contiguous pages - Improve COW handling with page size parameter --- modules/axhal/src/arch/x86_64/trap.rs | 1 + modules/axmm/src/aspace.rs | 135 ++++++++++++++++++++++- modules/axmm/src/backend/alloc.rs | 23 ++-- modules/axmm/src/lib.rs | 4 + modules/axmm/src/page.rs | 153 ++++++++++++++++++++++++++ 5 files changed, 303 insertions(+), 13 deletions(-) create mode 100644 modules/axmm/src/page.rs diff --git a/modules/axhal/src/arch/x86_64/trap.rs b/modules/axhal/src/arch/x86_64/trap.rs index 55c42e44b4..c777e90a35 100644 --- a/modules/axhal/src/arch/x86_64/trap.rs +++ b/modules/axhal/src/arch/x86_64/trap.rs @@ -78,6 +78,7 @@ fn vec_to_str(vec: u64) -> &'static str { fn err_code_to_flags(err_code: u64) -> Result { let code = PageFaultErrorCode::from_bits_truncate(err_code); let reserved_bits = (PageFaultErrorCode::CAUSED_BY_WRITE + | PageFaultErrorCode::PROTECTION_VIOLATION | PageFaultErrorCode::USER_MODE | PageFaultErrorCode::INSTRUCTION_FETCH) .complement(); diff --git a/modules/axmm/src/aspace.rs b/modules/axmm/src/aspace.rs index f9691ec10f..d656acef4c 100644 --- a/modules/axmm/src/aspace.rs +++ b/modules/axmm/src/aspace.rs @@ -10,6 +10,7 @@ use memory_set::{MemoryArea, MemorySet}; use crate::backend::Backend; use crate::mapping_err_to_ax_err; +use crate::page::page_manager; /// The virtual memory address space. pub struct AddrSpace { @@ -373,9 +374,20 @@ impl AddrSpace { if let Some(area) = self.areas.find(vaddr) { let orig_flags = area.flags(); if orig_flags.contains(access_flags) { - return area - .backend() - .handle_page_fault(vaddr, orig_flags, &mut self.pt); + if let Ok((paddr, _, page_size)) = self.pt.query(vaddr) { + // TODO: skip Shared + if !access_flags.contains(MappingFlags::WRITE) { + return false; + } + // 1. page fault caused by write + // 2. pte exists + // 3. Not shared memory + return self.handle_page_fault_cow(vaddr, paddr, page_size.into(), orig_flags); + } else { + return area + .backend() + .handle_page_fault(vaddr, orig_flags, &mut self.pt); + } } } false @@ -433,6 +445,122 @@ impl AddrSpace { } Ok(new_aspace) } + + /// Creates a copy of the current [`AddrSpace`] with copy-on-write (COW) + /// + /// For pages that require COW, remove `write` flags. + pub fn copy_with_cow(&mut self) -> AxResult { + // TODO: huge page + let mut new_aspace = Self::new_empty(self.base(), self.size())?; + let new_pt = &mut new_aspace.pt; + let old_pt = &mut self.pt; + + for area in self.areas.iter() { + // Copy the memory area in the new address space. + // + let mut is_shared = false; + + let mut backend = area.backend().clone(); + // TODO: Shared mem area + match &mut backend { + // Forcing `populate = false` is to prevent the subsequent `new_aspace.areas.map` + // from mapping page table entries for the virtual addresses. + Backend::Alloc { populate, .. } => { + *populate = false; + } + // Linear-backed regions are usually allocated by the kernel and are shared + Backend::Linear { .. } => is_shared = true, + } + + let new_area = MemoryArea::new(area.start(), area.size(), area.flags(), backend); + new_aspace + .areas + .map(new_area, new_pt, false) + .map_err(mapping_err_to_ax_err)?; + + if is_shared { + continue; + } + + for vaddr in PageIter4K::new(area.start(), area.end()).unwrap() { + if let Ok((paddr, mut flags, size)) = old_pt.query(vaddr) { + // remove `write` flags + flags.remove(MappingFlags::WRITE); + old_pt + .protect(vaddr, flags) + .map(|(_, tlb)| tlb.flush()) + .ok(); + // The same physical page is mapped in the new page table + new_pt + .map(vaddr, paddr, size, flags) + .map(|tlb| tlb.flush()) + .ok(); + // NOTE: Increment the physical page reference count + page_manager().lock().inc_page_ref(paddr); + } + } + } + + Ok(new_aspace) + } + + /// Handles a Copy-On-Write (COW) page fault. + /// + /// # Arguments + /// - `vaddr`: The virtual address that triggered the fault. + /// - `paddr`: The physical address currently mapped to the faulting virtual address. + /// - `orig_flags`: The MemoryArea flags. + /// + /// # Returns + /// - `true` if the page fault was handled successfully. + /// - `false` if the fault handling failed (e.g., allocation failed or invalid ref count). + fn handle_page_fault_cow( + &mut self, + vaddr: VirtAddr, + paddr: PhysAddr, + page_size: usize, + orig_flags: MappingFlags, + ) -> bool { + let mut page_manager = page_manager().lock(); + + if let Some(old_page) = page_manager.find_page(paddr) { + let ref_count = old_page.ref_count(); + + debug!( + "handle_page_fault_cow => ref_count : {}, flags : {:#?}, vaddr: {:#?}", + ref_count, orig_flags, vaddr, + ); + + match ref_count { + 0 => false, + // There is only one AddrSpace reference to the page, + // so there is no need to copy it. + 1 => self + .pt + .protect(vaddr, orig_flags) + .map(|(_, tlb)| tlb.flush()) + .is_ok(), + // Allocates the new page and copies the contents of the original page, + // remapping the virtual address to the physical address of the new page. + // NOTE: Reduce the page's reference count + 2.. => match page_manager.alloc(page_size / PAGE_SIZE_4K, PAGE_SIZE_4K) { + Ok(new_page) => { + new_page.copy_form(old_page); + page_manager.dec_page_ref(paddr); + self.pt + .remap(vaddr, new_page.start_paddr(), orig_flags) + .map(|(_, tlb)| { + tlb.flush(); + }) + .is_ok() + } + Err(_) => false, + }, + } + } else { + false + } + } } impl fmt::Debug for AddrSpace { @@ -447,6 +575,7 @@ impl fmt::Debug for AddrSpace { impl Drop for AddrSpace { fn drop(&mut self) { + debug!("AddrSpace drop ..... "); self.clear(); } } diff --git a/modules/axmm/src/backend/alloc.rs b/modules/axmm/src/backend/alloc.rs index d5ec5787e6..d20674171a 100644 --- a/modules/axmm/src/backend/alloc.rs +++ b/modules/axmm/src/backend/alloc.rs @@ -1,22 +1,25 @@ -use axalloc::global_allocator; -use axhal::mem::{phys_to_virt, virt_to_phys}; use axhal::paging::{MappingFlags, PageSize, PageTable}; use memory_addr::{PAGE_SIZE_4K, PageIter4K, PhysAddr, VirtAddr}; +use crate::page::page_manager; + use super::Backend; fn alloc_frame(zeroed: bool) -> Option { - let vaddr = VirtAddr::from(global_allocator().alloc_pages(1, PAGE_SIZE_4K).ok()?); - if zeroed { - unsafe { core::ptr::write_bytes(vaddr.as_mut_ptr(), 0, PAGE_SIZE_4K) }; - } - let paddr = virt_to_phys(vaddr); - Some(paddr) + page_manager() + .lock() + .alloc(1, PAGE_SIZE_4K) + .ok() + .map(|page| { + if zeroed { + page.zero(); + } + page.start_paddr() + }) } fn dealloc_frame(frame: PhysAddr) { - let vaddr = phys_to_virt(frame); - global_allocator().dealloc_pages(vaddr.as_usize(), 1); + page_manager().lock().dealloc(frame); } impl Backend { diff --git a/modules/axmm/src/lib.rs b/modules/axmm/src/lib.rs index 1be9043498..a38a1919d6 100644 --- a/modules/axmm/src/lib.rs +++ b/modules/axmm/src/lib.rs @@ -8,6 +8,7 @@ extern crate alloc; mod aspace; mod backend; +mod page; pub use self::aspace::AddrSpace; pub use self::backend::Backend; @@ -18,6 +19,7 @@ use kspin::SpinNoIrq; use lazyinit::LazyInit; use memory_addr::{PhysAddr, va}; use memory_set::MappingError; +use page::init_page_manager; static KERNEL_ASPACE: LazyInit> = LazyInit::new(); @@ -63,6 +65,8 @@ pub fn init_memory_management() { debug!("kernel address space init OK: {:#x?}", kernel_aspace); KERNEL_ASPACE.init_once(SpinNoIrq::new(kernel_aspace)); axhal::paging::set_kernel_page_table_root(kernel_page_table_root()); + + init_page_manager(); } /// Initializes kernel paging for secondary CPUs. diff --git a/modules/axmm/src/page.rs b/modules/axmm/src/page.rs new file mode 100644 index 0000000000..61f2820658 --- /dev/null +++ b/modules/axmm/src/page.rs @@ -0,0 +1,153 @@ +use core::sync::atomic::{AtomicUsize, Ordering}; + +use alloc::{collections::btree_map::BTreeMap, sync::Arc}; +use axalloc::GlobalPage; +use axerrno::AxResult; +use axhal::mem::virt_to_phys; +use kspin::{SpinNoIrq, SpinRaw}; +use lazyinit::LazyInit; +use memory_addr::PhysAddr; + +static PAGE_MANAGER: LazyInit> = LazyInit::new(); + +pub(crate) fn init_page_manager() { + PAGE_MANAGER.init_once(SpinNoIrq::new(PageManager::new())); +} + +pub fn page_manager() -> &'static SpinNoIrq { + &PAGE_MANAGER +} + +/// Manages the physical pages allocated in AddrSpace, +/// typically Backend::Alloc physical page frames +pub struct PageManager { + phys2page: BTreeMap>, +} + +impl PageManager { + pub fn new() -> Self { + Self { + phys2page: BTreeMap::new(), + } + } + + /// Allocate contiguous 4K-sized pages. + /// + /// # Parameters + /// + /// - `num_pages`: The number of contiguous physical pages to allocate. + /// - `align_pow2`: The alignment requirement expressed as a power of two. The starting address + /// of the allocated memory will be aligned to `2^align_pow2` bytes. + /// # Returns + /// - newly allocated page, or error. + pub fn alloc(&mut self, num_pages: usize, align_pow2: usize) -> AxResult> { + match GlobalPage::alloc_contiguous(num_pages, align_pow2) { + Ok(page) => { + let page = Arc::new(Page::new(page)); + + assert!( + self.phys2page + .insert(page.start_paddr(), page.clone()) + .is_none() + ); + + Ok(page.clone()) + } + Err(e) => Err(e), + } + } + + /// Decrement the reference count of the page at the given physical address. + /// When the reference count is 0, it is reclaimed by RAII + pub fn dealloc(&mut self, paddr: PhysAddr) { + self.dec_page_ref(paddr); + } + + /// Increment the reference count of the page at the given physical address. + pub fn inc_page_ref(&self, paddr: PhysAddr) { + if let Some(page) = self.find_page(paddr) { + page.inc_ref(); + } + } + + /// Decrement the reference count of the page at the given physical address. + /// When the reference count is 0, it is reclaimed by RAII + pub fn dec_page_ref(&mut self, paddr: PhysAddr) { + if let Some(page) = self.find_page(paddr) { + match page.dec_ref() { + 1 => { + debug!("page manager => sub ref : {:#?}. ref : 0", paddr); + self.phys2page.remove(&paddr); + } + n => trace!("page manager => sub ref : {:#?}, ref : {}", paddr, n - 1), + } + } + } + + /// Find the page for the given physical address. + pub fn find_page(&self, addr: PhysAddr) -> Option> { + if let Some((_, value)) = self.phys2page.range(..=addr).next_back() { + if value.contain_paddr(addr) { + Some(value.clone()) + } else { + None + } + } else { + None + } + } +} + +pub struct Page { + inner: SpinRaw, + // page ref count + ref_count: AtomicUsize, +} + +impl Page { + fn new(page: GlobalPage) -> Self { + Self { + inner: SpinRaw::new(page), + ref_count: AtomicUsize::new(1), + } + } + + fn inc_ref(&self) -> usize { + self.ref_count.fetch_add(1, Ordering::SeqCst) + } + + fn dec_ref(&self) -> usize { + self.ref_count.fetch_sub(1, Ordering::SeqCst) + } + + // Fill physical memory with zero + pub fn zero(&self) { + self.inner.lock().zero(); + } + + /// Get current page reference count. + pub fn ref_count(&self) -> usize { + self.ref_count.load(Ordering::SeqCst) + } + + /// Get the starting physical address of the page. + pub fn start_paddr(&self) -> PhysAddr { + self.inner.lock().start_paddr(virt_to_phys) + } + + /// Check if the physical address is on the page + pub fn contain_paddr(&self, addr: PhysAddr) -> bool { + let page = self.inner.lock(); + let start = page.start_paddr(virt_to_phys); + + start <= addr && addr <= start + page.size() + } + + /// Copy data from another page to this page. + pub fn copy_form(&self, other: Arc) { + self.inner + .lock() + .as_slice_mut() + .copy_from_slice(other.inner.lock().as_slice()); + } +} From 4df0a03758c44ab77b0dea3c8e1e71c3404ad1e7 Mon Sep 17 00:00:00 2001 From: mingzi Date: Tue, 3 Jun 2025 17:46:32 +0800 Subject: [PATCH 02/15] Squash merge cow-backend into cow --- modules/axmm/src/aspace.rs | 143 ++++++++++++++-------------- modules/axmm/src/backend/alloc.rs | 133 ++++++++++++++++++++++---- modules/axmm/src/backend/mod.rs | 64 ++++++++++--- modules/axmm/src/lib.rs | 4 - modules/axmm/src/page.rs | 153 ------------------------------ 5 files changed, 236 insertions(+), 261 deletions(-) delete mode 100644 modules/axmm/src/page.rs diff --git a/modules/axmm/src/aspace.rs b/modules/axmm/src/aspace.rs index d656acef4c..116e2d08be 100644 --- a/modules/axmm/src/aspace.rs +++ b/modules/axmm/src/aspace.rs @@ -1,5 +1,6 @@ use core::fmt; +use alloc::sync::Arc; use axerrno::{AxError, AxResult, ax_err}; use axhal::mem::phys_to_virt; use axhal::paging::{MappingFlags, PageTable, PagingError}; @@ -7,10 +8,10 @@ use memory_addr::{ MemoryAddr, PAGE_SIZE_4K, PageIter4K, PhysAddr, VirtAddr, VirtAddrRange, is_aligned_4k, }; use memory_set::{MemoryArea, MemorySet}; +use page_table_multiarch::PageSize; -use crate::backend::Backend; +use crate::backend::{Backend, alloc_frame}; use crate::mapping_err_to_ax_err; -use crate::page::page_manager; /// The virtual memory address space. pub struct AddrSpace { @@ -170,7 +171,7 @@ impl AddrSpace { while let Some(area) = self.areas.find(start) { let backend = area.backend(); - if let Backend::Alloc { populate } = backend { + if let Backend::Alloc { populate, .. } = backend { if !*populate { for addr in PageIter4K::new(start, area.end().min(end)).unwrap() { match self.pt.query(addr) { @@ -382,7 +383,7 @@ impl AddrSpace { // 1. page fault caused by write // 2. pte exists // 3. Not shared memory - return self.handle_page_fault_cow(vaddr, paddr, page_size.into(), orig_flags); + return self.handle_page_fault_cow(vaddr, paddr, page_size); } else { return area .backend() @@ -398,7 +399,11 @@ impl AddrSpace { let mut new_aspace = Self::new_empty(self.base(), self.size())?; for area in self.areas.iter() { - let backend = area.backend(); + let backend = match area.backend() { + Backend::Alloc { populate, .. } => Backend::new_alloc(*populate), + Backend::Linear { .. } => area.backend().clone(), + }; + // Remap the memory area in the new address space. let new_area = MemoryArea::new(area.start(), area.size(), area.flags(), backend.clone()); @@ -450,16 +455,11 @@ impl AddrSpace { /// /// For pages that require COW, remove `write` flags. pub fn copy_with_cow(&mut self) -> AxResult { - // TODO: huge page let mut new_aspace = Self::new_empty(self.base(), self.size())?; let new_pt = &mut new_aspace.pt; let old_pt = &mut self.pt; for area in self.areas.iter() { - // Copy the memory area in the new address space. - // - let mut is_shared = false; - let mut backend = area.backend().clone(); // TODO: Shared mem area match &mut backend { @@ -467,9 +467,32 @@ impl AddrSpace { // from mapping page table entries for the virtual addresses. Backend::Alloc { populate, .. } => { *populate = false; + + let mut flags = area.flags(); + flags.remove(MappingFlags::WRITE); + + //If the page is mapped in the old page table: + // - Update its permissions in the old page table using `flags`. + // - Map the same physical page into the new page table at the same + // virtual address, with the same page size and `flags`. + // TODO: huge page + for vaddr in PageIter4K::new(area.start(), area.end()) + .expect("Failed to create page iterator") + { + if let Ok((paddr, _, page_size)) = old_pt.query(vaddr) { + old_pt + .protect(vaddr, flags) + .map(|(_, tlb)| tlb.flush()) + .ok(); + new_pt + .map(vaddr, paddr, page_size, flags) + .map(|tlb| tlb.flush()) + .ok(); + } + } } // Linear-backed regions are usually allocated by the kernel and are shared - Backend::Linear { .. } => is_shared = true, + Backend::Linear { .. } => (), } let new_area = MemoryArea::new(area.start(), area.size(), area.flags(), backend); @@ -477,28 +500,6 @@ impl AddrSpace { .areas .map(new_area, new_pt, false) .map_err(mapping_err_to_ax_err)?; - - if is_shared { - continue; - } - - for vaddr in PageIter4K::new(area.start(), area.end()).unwrap() { - if let Ok((paddr, mut flags, size)) = old_pt.query(vaddr) { - // remove `write` flags - flags.remove(MappingFlags::WRITE); - old_pt - .protect(vaddr, flags) - .map(|(_, tlb)| tlb.flush()) - .ok(); - // The same physical page is mapped in the new page table - new_pt - .map(vaddr, paddr, size, flags) - .map(|tlb| tlb.flush()) - .ok(); - // NOTE: Increment the physical page reference count - page_manager().lock().inc_page_ref(paddr); - } - } } Ok(new_aspace) @@ -509,7 +510,6 @@ impl AddrSpace { /// # Arguments /// - `vaddr`: The virtual address that triggered the fault. /// - `paddr`: The physical address currently mapped to the faulting virtual address. - /// - `orig_flags`: The MemoryArea flags. /// /// # Returns /// - `true` if the page fault was handled successfully. @@ -518,47 +518,46 @@ impl AddrSpace { &mut self, vaddr: VirtAddr, paddr: PhysAddr, - page_size: usize, - orig_flags: MappingFlags, + page_size: PageSize, ) -> bool { - let mut page_manager = page_manager().lock(); - - if let Some(old_page) = page_manager.find_page(paddr) { - let ref_count = old_page.ref_count(); - - debug!( - "handle_page_fault_cow => ref_count : {}, flags : {:#?}, vaddr: {:#?}", - ref_count, orig_flags, vaddr, - ); + let area = self.areas.find(vaddr).unwrap(); + match area.backend() { + Backend::Alloc { tracker, .. } => { + let old_frame = match tracker.find(paddr) { + Some(frame) => frame, + None => return false, + }; - match ref_count { - 0 => false, - // There is only one AddrSpace reference to the page, - // so there is no need to copy it. - 1 => self - .pt - .protect(vaddr, orig_flags) - .map(|(_, tlb)| tlb.flush()) - .is_ok(), - // Allocates the new page and copies the contents of the original page, - // remapping the virtual address to the physical address of the new page. - // NOTE: Reduce the page's reference count - 2.. => match page_manager.alloc(page_size / PAGE_SIZE_4K, PAGE_SIZE_4K) { - Ok(new_page) => { - new_page.copy_form(old_page); - page_manager.dec_page_ref(paddr); - self.pt - .remap(vaddr, new_page.start_paddr(), orig_flags) - .map(|(_, tlb)| { - tlb.flush(); - }) - .is_ok() - } - Err(_) => false, - }, + match Arc::strong_count(&old_frame) { + ..=1 => false, + // There is only one AddrSpace reference to the page, + // so there is no need to copy it. + 2 => self + .pt + .protect(vaddr, area.flags()) + .map(|(_, tlb)| tlb.flush()) + .is_ok(), + + // Allocates the new page and copies the contents of the original page, + // remapping the virtual address to the physical address of the new page. + // NOTE: Reduce the page's reference count + 3.. => match alloc_frame(false, page_size.into()) { + Some(new_frame) => { + new_frame.copy_from(old_frame.clone()); + tracker.remove(old_frame.start_paddr()); + tracker.insert(new_frame.clone()); + self.pt + .remap(vaddr, new_frame.start_paddr(), area.flags()) + .map(|(_, tlb)| { + tlb.flush(); + }) + .is_ok() + } + None => false, + }, + } } - } else { - false + Backend::Linear { .. } => false, } } } diff --git a/modules/axmm/src/backend/alloc.rs b/modules/axmm/src/backend/alloc.rs index d20674171a..f5ff01b8d6 100644 --- a/modules/axmm/src/backend/alloc.rs +++ b/modules/axmm/src/backend/alloc.rs @@ -1,31 +1,115 @@ -use axhal::paging::{MappingFlags, PageSize, PageTable}; +use alloc::{sync::Arc, vec::Vec}; +use axalloc::GlobalPage; +use axhal::{ + mem::virt_to_phys, + paging::{MappingFlags, PageSize, PageTable}, +}; +use kspin::SpinNoIrq; use memory_addr::{PAGE_SIZE_4K, PageIter4K, PhysAddr, VirtAddr}; -use crate::page::page_manager; - use super::Backend; -fn alloc_frame(zeroed: bool) -> Option { - page_manager() - .lock() - .alloc(1, PAGE_SIZE_4K) +pub struct FrameTracker { + inner: SpinNoIrq>>, +} + +impl FrameTracker { + fn new() -> Self { + Self { + inner: SpinNoIrq::new(Vec::new()), + } + } + + pub fn for_each(&self, f: F) + where + F: FnMut(&Arc), + { + self.inner.lock().iter().for_each(f); + } + + pub fn find(&self, paddr: PhysAddr) -> Option> { + self.inner + .lock() + .iter() + .find(|frame| frame.contains(paddr)) + .map(|frame| frame.clone()) + } + + pub fn insert(&self, frame: Arc) { + self.inner.lock().push(frame); + } + + pub fn remove(&self, paddr: PhysAddr) { + let mut vec = self.inner.lock(); + let index = vec + .iter() + .position(|frame| frame.contains(paddr)) + .expect("Tried to remove a frame that was not present"); + vec.remove(index); + } +} + +pub struct Frame { + inner: SpinNoIrq, +} + +impl Frame { + fn new(page: GlobalPage) -> Self { + Self { + inner: SpinNoIrq::new(page), + } + } + + pub fn copy_from(&self, other: Arc) { + self.inner + .lock() + .as_slice_mut() + .copy_from_slice(other.inner.lock().as_slice()); + } + + pub fn contains(&self, paddr: PhysAddr) -> bool { + let start = self.start_paddr(); + let size = self.inner.lock().size(); + // left-closed, right-open interval + start <= paddr && paddr < start + size + } + + pub fn start_paddr(&self) -> PhysAddr { + self.inner.lock().start_paddr(virt_to_phys) + } +} + +/// Allocates a physical memory frame and optionally zeroes it. +/// +/// # Parameters +/// +/// - `zeroed`: A boolean indicating whether the allocated frame should be zero-initialized. +/// +/// # Returns +/// +/// Returns an `Option>`: +/// - `Some(Arc)`: Allocation succeeded; the frame is wrapped in a reference-counted pointer. +/// - `None`: Allocation failed (e.g., out of memory). +pub fn alloc_frame(zeroed: bool, page_size: usize) -> Option> { + let page_num = page_size / PAGE_SIZE_4K; + GlobalPage::alloc_contiguous(page_num, page_size) .ok() - .map(|page| { + .map(|mut page| { if zeroed { page.zero(); } - page.start_paddr() - }) -} -fn dealloc_frame(frame: PhysAddr) { - page_manager().lock().dealloc(frame); + Arc::new(Frame::new(page)) + }) } impl Backend { /// Creates a new allocation mapping backend. - pub const fn new_alloc(populate: bool) -> Self { - Self::Alloc { populate } + pub fn new_alloc(populate: bool) -> Self { + Self::Alloc { + populate, + tracker: Arc::new(FrameTracker::new()), + } } pub(crate) fn map_alloc( @@ -34,6 +118,7 @@ impl Backend { flags: MappingFlags, pt: &mut PageTable, populate: bool, + trakcer: Arc, ) -> bool { debug!( "map_alloc: [{:#x}, {:#x}) {:?} (populate={})", @@ -45,8 +130,9 @@ impl Backend { if populate { // allocate all possible physical frames for populated mapping. for addr in PageIter4K::new(start, start + size).unwrap() { - if let Some(frame) = alloc_frame(true) { - if let Ok(tlb) = pt.map(addr, frame, PageSize::Size4K, flags) { + if let Some(page) = alloc_frame(true, PAGE_SIZE_4K) { + if let Ok(tlb) = pt.map(addr, page.start_paddr(), PageSize::Size4K, flags) { + trakcer.insert(page); tlb.ignore(); // TLB flush on map is unnecessary, as there are no outdated mappings. } else { return false; @@ -64,6 +150,7 @@ impl Backend { size: usize, pt: &mut PageTable, _populate: bool, + tracker: Arc, ) -> bool { debug!("unmap_alloc: [{:#x}, {:#x})", start, start + size); for addr in PageIter4K::new(start, start + size).unwrap() { @@ -74,7 +161,7 @@ impl Backend { return false; } tlb.flush(); - dealloc_frame(frame); + tracker.remove(frame); } else { // Deallocation is needn't if the page is not mapped. } @@ -87,15 +174,19 @@ impl Backend { orig_flags: MappingFlags, pt: &mut PageTable, populate: bool, + tracker: Arc, ) -> bool { if populate { false // Populated mappings should not trigger page faults. - } else if let Some(frame) = alloc_frame(true) { + } else if let Some(page) = alloc_frame(true, PAGE_SIZE_4K) { // Allocate a physical frame lazily and map it to the fault address. // `vaddr` does not need to be aligned. It will be automatically // aligned during `pt.map` regardless of the page size. - pt.map(vaddr, frame, PageSize::Size4K, orig_flags) - .map(|tlb| tlb.flush()) + pt.map(vaddr, page.start_paddr(), PageSize::Size4K, orig_flags) + .map(|tlb| { + tracker.insert(page); + tlb.flush() + }) .is_ok() } else { false diff --git a/modules/axmm/src/backend/mod.rs b/modules/axmm/src/backend/mod.rs index be58b3e59d..16f6f04eac 100644 --- a/modules/axmm/src/backend/mod.rs +++ b/modules/axmm/src/backend/mod.rs @@ -1,5 +1,7 @@ //! Memory mapping backends. +use ::alloc::sync::Arc; +use alloc::FrameTracker; use axhal::paging::{MappingFlags, PageTable}; use memory_addr::VirtAddr; use memory_set::MappingBackend; @@ -7,6 +9,8 @@ use memory_set::MappingBackend; mod alloc; mod linear; +pub use alloc::alloc_frame; + /// A unified enum type for different memory mapping backends. /// /// Currently, two backends are implemented: @@ -15,7 +19,6 @@ mod linear; /// contiguous and their addresses should be known when creating the mapping. /// - **Allocation**: used in general, or for lazy mappings. The target physical /// frames are obtained from the global allocator. -#[derive(Clone)] pub enum Backend { /// Linear mapping backend. /// @@ -35,6 +38,9 @@ pub enum Backend { Alloc { /// Whether to populate the physical frames when creating the mapping. populate: bool, + /// Track of the mapped physical frames. + /// The physical frame is wrapped through `Arc` and is released when there is no reference. + tracker: Arc, }, } @@ -43,16 +49,22 @@ impl MappingBackend for Backend { type Flags = MappingFlags; type PageTable = PageTable; fn map(&self, start: VirtAddr, size: usize, flags: MappingFlags, pt: &mut PageTable) -> bool { - match *self { - Self::Linear { pa_va_offset } => Self::map_linear(start, size, flags, pt, pa_va_offset), - Self::Alloc { populate } => Self::map_alloc(start, size, flags, pt, populate), + match self { + Self::Linear { pa_va_offset } => { + Self::map_linear(start, size, flags, pt, *pa_va_offset) + } + Self::Alloc { populate, tracker } => { + Self::map_alloc(start, size, flags, pt, *populate, tracker.clone()) + } } } fn unmap(&self, start: VirtAddr, size: usize, pt: &mut PageTable) -> bool { - match *self { - Self::Linear { pa_va_offset } => Self::unmap_linear(start, size, pt, pa_va_offset), - Self::Alloc { populate } => Self::unmap_alloc(start, size, pt, populate), + match self { + Self::Linear { pa_va_offset } => Self::unmap_linear(start, size, pt, *pa_va_offset), + Self::Alloc { populate, tracker } => { + Self::unmap_alloc(start, size, pt, *populate, tracker.clone()) + } } } @@ -70,6 +82,32 @@ impl MappingBackend for Backend { } } +impl Clone for Backend { + /// The `Backend` enum implements the `Clone` trait to properly duplicate its internal state + /// when cloning the backend. + /// + /// For the `Alloc` variant, create a new allocator backend, clone its `tracker`, + /// and insert every `frame` from the original `tracker` into the new `tracker`. + fn clone(&self) -> Self { + match self { + Backend::Alloc { populate, tracker } => { + let backend = Self::new_alloc(*populate); + let new_tracker = match &backend { + Backend::Alloc { tracker, .. } => tracker.clone(), + _ => unreachable!(), + }; + + tracker.for_each(|frame| { + new_tracker.insert(frame.clone()); + }); + + backend + } + Backend::Linear { pa_va_offset } => Self::new_linear(*pa_va_offset), + } + } +} + impl Backend { pub(crate) fn handle_page_fault( &self, @@ -77,11 +115,15 @@ impl Backend { orig_flags: MappingFlags, page_table: &mut PageTable, ) -> bool { - match *self { + match self { Self::Linear { .. } => false, // Linear mappings should not trigger page faults. - Self::Alloc { populate } => { - Self::handle_page_fault_alloc(vaddr, orig_flags, page_table, populate) - } + Self::Alloc { populate, tracker } => Self::handle_page_fault_alloc( + vaddr, + orig_flags, + page_table, + *populate, + tracker.clone(), + ), } } } diff --git a/modules/axmm/src/lib.rs b/modules/axmm/src/lib.rs index a38a1919d6..1be9043498 100644 --- a/modules/axmm/src/lib.rs +++ b/modules/axmm/src/lib.rs @@ -8,7 +8,6 @@ extern crate alloc; mod aspace; mod backend; -mod page; pub use self::aspace::AddrSpace; pub use self::backend::Backend; @@ -19,7 +18,6 @@ use kspin::SpinNoIrq; use lazyinit::LazyInit; use memory_addr::{PhysAddr, va}; use memory_set::MappingError; -use page::init_page_manager; static KERNEL_ASPACE: LazyInit> = LazyInit::new(); @@ -65,8 +63,6 @@ pub fn init_memory_management() { debug!("kernel address space init OK: {:#x?}", kernel_aspace); KERNEL_ASPACE.init_once(SpinNoIrq::new(kernel_aspace)); axhal::paging::set_kernel_page_table_root(kernel_page_table_root()); - - init_page_manager(); } /// Initializes kernel paging for secondary CPUs. diff --git a/modules/axmm/src/page.rs b/modules/axmm/src/page.rs deleted file mode 100644 index 61f2820658..0000000000 --- a/modules/axmm/src/page.rs +++ /dev/null @@ -1,153 +0,0 @@ -use core::sync::atomic::{AtomicUsize, Ordering}; - -use alloc::{collections::btree_map::BTreeMap, sync::Arc}; -use axalloc::GlobalPage; -use axerrno::AxResult; -use axhal::mem::virt_to_phys; -use kspin::{SpinNoIrq, SpinRaw}; -use lazyinit::LazyInit; -use memory_addr::PhysAddr; - -static PAGE_MANAGER: LazyInit> = LazyInit::new(); - -pub(crate) fn init_page_manager() { - PAGE_MANAGER.init_once(SpinNoIrq::new(PageManager::new())); -} - -pub fn page_manager() -> &'static SpinNoIrq { - &PAGE_MANAGER -} - -/// Manages the physical pages allocated in AddrSpace, -/// typically Backend::Alloc physical page frames -pub struct PageManager { - phys2page: BTreeMap>, -} - -impl PageManager { - pub fn new() -> Self { - Self { - phys2page: BTreeMap::new(), - } - } - - /// Allocate contiguous 4K-sized pages. - /// - /// # Parameters - /// - /// - `num_pages`: The number of contiguous physical pages to allocate. - /// - `align_pow2`: The alignment requirement expressed as a power of two. The starting address - /// of the allocated memory will be aligned to `2^align_pow2` bytes. - /// # Returns - /// - newly allocated page, or error. - pub fn alloc(&mut self, num_pages: usize, align_pow2: usize) -> AxResult> { - match GlobalPage::alloc_contiguous(num_pages, align_pow2) { - Ok(page) => { - let page = Arc::new(Page::new(page)); - - assert!( - self.phys2page - .insert(page.start_paddr(), page.clone()) - .is_none() - ); - - Ok(page.clone()) - } - Err(e) => Err(e), - } - } - - /// Decrement the reference count of the page at the given physical address. - /// When the reference count is 0, it is reclaimed by RAII - pub fn dealloc(&mut self, paddr: PhysAddr) { - self.dec_page_ref(paddr); - } - - /// Increment the reference count of the page at the given physical address. - pub fn inc_page_ref(&self, paddr: PhysAddr) { - if let Some(page) = self.find_page(paddr) { - page.inc_ref(); - } - } - - /// Decrement the reference count of the page at the given physical address. - /// When the reference count is 0, it is reclaimed by RAII - pub fn dec_page_ref(&mut self, paddr: PhysAddr) { - if let Some(page) = self.find_page(paddr) { - match page.dec_ref() { - 1 => { - debug!("page manager => sub ref : {:#?}. ref : 0", paddr); - self.phys2page.remove(&paddr); - } - n => trace!("page manager => sub ref : {:#?}, ref : {}", paddr, n - 1), - } - } - } - - /// Find the page for the given physical address. - pub fn find_page(&self, addr: PhysAddr) -> Option> { - if let Some((_, value)) = self.phys2page.range(..=addr).next_back() { - if value.contain_paddr(addr) { - Some(value.clone()) - } else { - None - } - } else { - None - } - } -} - -pub struct Page { - inner: SpinRaw, - // page ref count - ref_count: AtomicUsize, -} - -impl Page { - fn new(page: GlobalPage) -> Self { - Self { - inner: SpinRaw::new(page), - ref_count: AtomicUsize::new(1), - } - } - - fn inc_ref(&self) -> usize { - self.ref_count.fetch_add(1, Ordering::SeqCst) - } - - fn dec_ref(&self) -> usize { - self.ref_count.fetch_sub(1, Ordering::SeqCst) - } - - // Fill physical memory with zero - pub fn zero(&self) { - self.inner.lock().zero(); - } - - /// Get current page reference count. - pub fn ref_count(&self) -> usize { - self.ref_count.load(Ordering::SeqCst) - } - - /// Get the starting physical address of the page. - pub fn start_paddr(&self) -> PhysAddr { - self.inner.lock().start_paddr(virt_to_phys) - } - - /// Check if the physical address is on the page - pub fn contain_paddr(&self, addr: PhysAddr) -> bool { - let page = self.inner.lock(); - let start = page.start_paddr(virt_to_phys); - - start <= addr && addr <= start + page.size() - } - - /// Copy data from another page to this page. - pub fn copy_form(&self, other: Arc) { - self.inner - .lock() - .as_slice_mut() - .copy_from_slice(other.inner.lock().as_slice()); - } -} From c5ea6ae4270693e99c76515377957c2a55f39d55 Mon Sep 17 00:00:00 2001 From: mingzi Date: Sat, 7 Jun 2025 16:14:29 +0800 Subject: [PATCH 03/15] refactor: replace Arc-based frame tracking with frame info table - Remove Arc and FrameTracker in favor of atomic ref counting - Add frameinfo module for physical frame management - Simplify COW fault handling with direct frame operations - Update backend allocator to use new frame tracking system - only support with x86 --- modules/axmm/src/aspace.rs | 95 ++++++++++--------- modules/axmm/src/backend/alloc.rs | 147 ++++++++---------------------- modules/axmm/src/backend/mod.rs | 64 +++---------- modules/axmm/src/frameinfo.rs | 88 ++++++++++++++++++ modules/axmm/src/lib.rs | 4 + 5 files changed, 195 insertions(+), 203 deletions(-) create mode 100644 modules/axmm/src/frameinfo.rs diff --git a/modules/axmm/src/aspace.rs b/modules/axmm/src/aspace.rs index 116e2d08be..38ac0ec6fd 100644 --- a/modules/axmm/src/aspace.rs +++ b/modules/axmm/src/aspace.rs @@ -1,6 +1,5 @@ use core::fmt; -use alloc::sync::Arc; use axerrno::{AxError, AxResult, ax_err}; use axhal::mem::phys_to_virt; use axhal::paging::{MappingFlags, PageTable, PagingError}; @@ -10,7 +9,8 @@ use memory_addr::{ use memory_set::{MemoryArea, MemorySet}; use page_table_multiarch::PageSize; -use crate::backend::{Backend, alloc_frame}; +use crate::backend::{Backend, alloc_frame, dealloc_frame}; +use crate::frameinfo::{add_frame_ref, get_frame_info}; use crate::mapping_err_to_ax_err; /// The virtual memory address space. @@ -383,7 +383,13 @@ impl AddrSpace { // 1. page fault caused by write // 2. pte exists // 3. Not shared memory - return self.handle_page_fault_cow(vaddr, paddr, page_size); + let off = page_size.align_offset(vaddr.into()); + return self.handle_cow_fault( + vaddr, + paddr.sub(off), + orig_flags, + page_size, + ); } else { return area .backend() @@ -480,6 +486,8 @@ impl AddrSpace { .expect("Failed to create page iterator") { if let Ok((paddr, _, page_size)) = old_pt.query(vaddr) { + add_frame_ref(paddr); + old_pt .protect(vaddr, flags) .map(|(_, tlb)| tlb.flush()) @@ -509,55 +517,57 @@ impl AddrSpace { /// /// # Arguments /// - `vaddr`: The virtual address that triggered the fault. - /// - `paddr`: The physical address currently mapped to the faulting virtual address. + /// - `paddr`: It must be an aligned physical address; if it's a huge page, + /// it must be the starting physical address. + /// - `page_size`: The size of the page on which the current physical address is located /// /// # Returns /// - `true` if the page fault was handled successfully. /// - `false` if the fault handling failed (e.g., allocation failed or invalid ref count). - fn handle_page_fault_cow( + fn handle_cow_fault( &mut self, vaddr: VirtAddr, paddr: PhysAddr, + flags: MappingFlags, page_size: PageSize, ) -> bool { - let area = self.areas.find(vaddr).unwrap(); - match area.backend() { - Backend::Alloc { tracker, .. } => { - let old_frame = match tracker.find(paddr) { - Some(frame) => frame, - None => return false, - }; - - match Arc::strong_count(&old_frame) { - ..=1 => false, - // There is only one AddrSpace reference to the page, - // so there is no need to copy it. - 2 => self - .pt - .protect(vaddr, area.flags()) - .map(|(_, tlb)| tlb.flush()) - .is_ok(), - - // Allocates the new page and copies the contents of the original page, - // remapping the virtual address to the physical address of the new page. - // NOTE: Reduce the page's reference count - 3.. => match alloc_frame(false, page_size.into()) { - Some(new_frame) => { - new_frame.copy_from(old_frame.clone()); - tracker.remove(old_frame.start_paddr()); - tracker.insert(new_frame.clone()); - self.pt - .remap(vaddr, new_frame.start_paddr(), area.flags()) - .map(|(_, tlb)| { - tlb.flush(); - }) - .is_ok() - } - None => false, - }, + let frame_info = get_frame_info(paddr); + match frame_info.ref_count() { + 0 => unreachable!(), + // There is only one AddrSpace reference to the page, + // so there is no need to copy it. + 1 => self + .pt + .protect(vaddr, flags) + .map(|(_, tlb)| tlb.flush()) + .is_ok(), + // Allocates the new page and copies the contents of the original page, + // remapping the virtual address to the physical address of the new page. + // NOTE: Reduce the page's reference count + 2.. => match alloc_frame(false, page_size.into()) { + Some(new_frame) => { + let new_slice = unsafe { + core::slice::from_raw_parts_mut( + phys_to_virt(new_frame).as_mut_ptr(), + page_size.into(), + ) + }; + let old_slice = unsafe { + core::slice::from_raw_parts(phys_to_virt(paddr).as_ptr(), page_size.into()) + }; + + new_slice.copy_from_slice(old_slice); + dealloc_frame(paddr); + + self.pt + .remap(vaddr, new_frame, flags) + .map(|(_, tlb)| { + tlb.flush(); + }) + .is_ok() } - } - Backend::Linear { .. } => false, + None => false, + }, } } } @@ -574,7 +584,6 @@ impl fmt::Debug for AddrSpace { impl Drop for AddrSpace { fn drop(&mut self) { - debug!("AddrSpace drop ..... "); self.clear(); } } diff --git a/modules/axmm/src/backend/alloc.rs b/modules/axmm/src/backend/alloc.rs index f5ff01b8d6..7b0aac8b22 100644 --- a/modules/axmm/src/backend/alloc.rs +++ b/modules/axmm/src/backend/alloc.rs @@ -1,115 +1,52 @@ -use alloc::{sync::Arc, vec::Vec}; -use axalloc::GlobalPage; +use axalloc::global_allocator; use axhal::{ - mem::virt_to_phys, + mem::{phys_to_virt, virt_to_phys}, paging::{MappingFlags, PageSize, PageTable}, }; -use kspin::SpinNoIrq; use memory_addr::{PAGE_SIZE_4K, PageIter4K, PhysAddr, VirtAddr}; -use super::Backend; - -pub struct FrameTracker { - inner: SpinNoIrq>>, -} - -impl FrameTracker { - fn new() -> Self { - Self { - inner: SpinNoIrq::new(Vec::new()), - } - } - - pub fn for_each(&self, f: F) - where - F: FnMut(&Arc), - { - self.inner.lock().iter().for_each(f); - } - - pub fn find(&self, paddr: PhysAddr) -> Option> { - self.inner - .lock() - .iter() - .find(|frame| frame.contains(paddr)) - .map(|frame| frame.clone()) - } - - pub fn insert(&self, frame: Arc) { - self.inner.lock().push(frame); - } - - pub fn remove(&self, paddr: PhysAddr) { - let mut vec = self.inner.lock(); - let index = vec - .iter() - .position(|frame| frame.contains(paddr)) - .expect("Tried to remove a frame that was not present"); - vec.remove(index); - } -} - -pub struct Frame { - inner: SpinNoIrq, -} - -impl Frame { - fn new(page: GlobalPage) -> Self { - Self { - inner: SpinNoIrq::new(page), - } - } - - pub fn copy_from(&self, other: Arc) { - self.inner - .lock() - .as_slice_mut() - .copy_from_slice(other.inner.lock().as_slice()); - } - - pub fn contains(&self, paddr: PhysAddr) -> bool { - let start = self.start_paddr(); - let size = self.inner.lock().size(); - // left-closed, right-open interval - start <= paddr && paddr < start + size - } +use crate::frameinfo::{add_frame_ref, dec_frame_ref}; - pub fn start_paddr(&self) -> PhysAddr { - self.inner.lock().start_paddr(virt_to_phys) - } -} +use super::Backend; -/// Allocates a physical memory frame and optionally zeroes it. +/// Allocates a single physical frame with optional zero-initialization and alignment. /// /// # Parameters -/// -/// - `zeroed`: A boolean indicating whether the allocated frame should be zero-initialized. +/// - `zeroed`: If `true`, the allocated frame memory is zeroed out. +/// - `align`: The alignment requirement (in pages) for the allocation. /// /// # Returns -/// -/// Returns an `Option>`: -/// - `Some(Arc)`: Allocation succeeded; the frame is wrapped in a reference-counted pointer. -/// - `None`: Allocation failed (e.g., out of memory). -pub fn alloc_frame(zeroed: bool, page_size: usize) -> Option> { - let page_num = page_size / PAGE_SIZE_4K; - GlobalPage::alloc_contiguous(page_num, page_size) - .ok() - .map(|mut page| { - if zeroed { - page.zero(); - } +/// Returns `Some(PhysAddr)` of the allocated frame on success, or `None` if allocation fails. +pub fn alloc_frame(zeroed: bool, align: usize) -> Option { + let vaddr = VirtAddr::from(global_allocator().alloc_pages(1, align).ok()?); + if zeroed { + unsafe { core::ptr::write_bytes(vaddr.as_mut_ptr(), 0, align) }; + } + let paddr = virt_to_phys(vaddr); + add_frame_ref(paddr); + Some(paddr) +} - Arc::new(Frame::new(page)) - }) +/// Deallocates a previously allocated physical frame. +/// +/// This function decreases the reference count associated with the frame. +/// When the reference count reaches 1, it actually frees the frame memory. +/// +/// # Parameters +/// - `frame`: The physical address of the frame to deallocate. +pub fn dealloc_frame(frame: PhysAddr) { + let vaddr = phys_to_virt(frame); + match dec_frame_ref(frame) { + 0 => unreachable!(), + 1 => global_allocator().dealloc_pages(vaddr.as_usize(), 1), + _ => (), + } } impl Backend { /// Creates a new allocation mapping backend. pub fn new_alloc(populate: bool) -> Self { - Self::Alloc { - populate, - tracker: Arc::new(FrameTracker::new()), - } + Self::Alloc { populate } } pub(crate) fn map_alloc( @@ -118,7 +55,6 @@ impl Backend { flags: MappingFlags, pt: &mut PageTable, populate: bool, - trakcer: Arc, ) -> bool { debug!( "map_alloc: [{:#x}, {:#x}) {:?} (populate={})", @@ -130,9 +66,8 @@ impl Backend { if populate { // allocate all possible physical frames for populated mapping. for addr in PageIter4K::new(start, start + size).unwrap() { - if let Some(page) = alloc_frame(true, PAGE_SIZE_4K) { - if let Ok(tlb) = pt.map(addr, page.start_paddr(), PageSize::Size4K, flags) { - trakcer.insert(page); + if let Some(frame) = alloc_frame(true, PAGE_SIZE_4K) { + if let Ok(tlb) = pt.map(addr, frame, PageSize::Size4K, flags) { tlb.ignore(); // TLB flush on map is unnecessary, as there are no outdated mappings. } else { return false; @@ -150,7 +85,6 @@ impl Backend { size: usize, pt: &mut PageTable, _populate: bool, - tracker: Arc, ) -> bool { debug!("unmap_alloc: [{:#x}, {:#x})", start, start + size); for addr in PageIter4K::new(start, start + size).unwrap() { @@ -161,7 +95,8 @@ impl Backend { return false; } tlb.flush(); - tracker.remove(frame); + + dealloc_frame(frame); } else { // Deallocation is needn't if the page is not mapped. } @@ -174,19 +109,15 @@ impl Backend { orig_flags: MappingFlags, pt: &mut PageTable, populate: bool, - tracker: Arc, ) -> bool { if populate { false // Populated mappings should not trigger page faults. - } else if let Some(page) = alloc_frame(true, PAGE_SIZE_4K) { + } else if let Some(frame) = alloc_frame(true, PAGE_SIZE_4K) { // Allocate a physical frame lazily and map it to the fault address. // `vaddr` does not need to be aligned. It will be automatically // aligned during `pt.map` regardless of the page size. - pt.map(vaddr, page.start_paddr(), PageSize::Size4K, orig_flags) - .map(|tlb| { - tracker.insert(page); - tlb.flush() - }) + pt.map(vaddr, frame, PageSize::Size4K, orig_flags) + .map(|tlb| tlb.flush()) .is_ok() } else { false diff --git a/modules/axmm/src/backend/mod.rs b/modules/axmm/src/backend/mod.rs index 16f6f04eac..02b7d4c70d 100644 --- a/modules/axmm/src/backend/mod.rs +++ b/modules/axmm/src/backend/mod.rs @@ -1,7 +1,5 @@ //! Memory mapping backends. -use ::alloc::sync::Arc; -use alloc::FrameTracker; use axhal::paging::{MappingFlags, PageTable}; use memory_addr::VirtAddr; use memory_set::MappingBackend; @@ -9,7 +7,7 @@ use memory_set::MappingBackend; mod alloc; mod linear; -pub use alloc::alloc_frame; +pub use alloc::{alloc_frame, dealloc_frame}; /// A unified enum type for different memory mapping backends. /// @@ -19,6 +17,7 @@ pub use alloc::alloc_frame; /// contiguous and their addresses should be known when creating the mapping. /// - **Allocation**: used in general, or for lazy mappings. The target physical /// frames are obtained from the global allocator. +#[derive(Clone)] pub enum Backend { /// Linear mapping backend. /// @@ -38,9 +37,6 @@ pub enum Backend { Alloc { /// Whether to populate the physical frames when creating the mapping. populate: bool, - /// Track of the mapped physical frames. - /// The physical frame is wrapped through `Arc` and is released when there is no reference. - tracker: Arc, }, } @@ -49,22 +45,16 @@ impl MappingBackend for Backend { type Flags = MappingFlags; type PageTable = PageTable; fn map(&self, start: VirtAddr, size: usize, flags: MappingFlags, pt: &mut PageTable) -> bool { - match self { - Self::Linear { pa_va_offset } => { - Self::map_linear(start, size, flags, pt, *pa_va_offset) - } - Self::Alloc { populate, tracker } => { - Self::map_alloc(start, size, flags, pt, *populate, tracker.clone()) - } + match *self { + Self::Linear { pa_va_offset } => Self::map_linear(start, size, flags, pt, pa_va_offset), + Self::Alloc { populate } => Self::map_alloc(start, size, flags, pt, populate), } } fn unmap(&self, start: VirtAddr, size: usize, pt: &mut PageTable) -> bool { - match self { - Self::Linear { pa_va_offset } => Self::unmap_linear(start, size, pt, *pa_va_offset), - Self::Alloc { populate, tracker } => { - Self::unmap_alloc(start, size, pt, *populate, tracker.clone()) - } + match *self { + Self::Linear { pa_va_offset } => Self::unmap_linear(start, size, pt, pa_va_offset), + Self::Alloc { populate } => Self::unmap_alloc(start, size, pt, populate), } } @@ -82,32 +72,6 @@ impl MappingBackend for Backend { } } -impl Clone for Backend { - /// The `Backend` enum implements the `Clone` trait to properly duplicate its internal state - /// when cloning the backend. - /// - /// For the `Alloc` variant, create a new allocator backend, clone its `tracker`, - /// and insert every `frame` from the original `tracker` into the new `tracker`. - fn clone(&self) -> Self { - match self { - Backend::Alloc { populate, tracker } => { - let backend = Self::new_alloc(*populate); - let new_tracker = match &backend { - Backend::Alloc { tracker, .. } => tracker.clone(), - _ => unreachable!(), - }; - - tracker.for_each(|frame| { - new_tracker.insert(frame.clone()); - }); - - backend - } - Backend::Linear { pa_va_offset } => Self::new_linear(*pa_va_offset), - } - } -} - impl Backend { pub(crate) fn handle_page_fault( &self, @@ -115,15 +79,11 @@ impl Backend { orig_flags: MappingFlags, page_table: &mut PageTable, ) -> bool { - match self { + match *self { Self::Linear { .. } => false, // Linear mappings should not trigger page faults. - Self::Alloc { populate, tracker } => Self::handle_page_fault_alloc( - vaddr, - orig_flags, - page_table, - *populate, - tracker.clone(), - ), + Self::Alloc { populate } => { + Self::handle_page_fault_alloc(vaddr, orig_flags, page_table, populate) + } } } } diff --git a/modules/axmm/src/frameinfo.rs b/modules/axmm/src/frameinfo.rs new file mode 100644 index 0000000000..72b3e0213d --- /dev/null +++ b/modules/axmm/src/frameinfo.rs @@ -0,0 +1,88 @@ +//! FrameInfo +//! +//! A simple physical FrameInfo manager is provided to track and manage +//! the reference count for every 4KB memory page frame in the system. +//! +//! There is a [' FrameInfo '] struct for each physical page frame +//! that keeps track of its reference count. +//! NOTE: If the page is huge page, its [`FrameInfo`] is placed at the +//! starting physical address. +use core::sync::atomic::{AtomicUsize, Ordering}; + +use alloc::vec::Vec; +use lazyinit::LazyInit; +use memory_addr::PhysAddr; + +// 4 kb page +const FRAME_SHIFT: usize = 12; + +pub const MAX_FRAME_NUM: usize = axconfig::plat::PHYS_MEMORY_SIZE >> FRAME_SHIFT; + +static FRAME_INFO_TABLE: LazyInit> = LazyInit::new(); + +pub fn init_frame_info_table() { + let _ = + FRAME_INFO_TABLE.init_once((0..MAX_FRAME_NUM).map(|_| FrameInfo::new_empty()).collect()); +} + +/// Returns the `FrameInfo` structure associated with a given physical address. +/// +/// # Parameters +/// - `paddr`: It must be an aligned physical address; if it's a huge page, +/// it must be the starting physical address. +/// +/// # Returns +/// A reference to the `FrameInfo` associated with the given physical address. +pub fn get_frame_info(paddr: PhysAddr) -> &'static FrameInfo { + &FRAME_INFO_TABLE[phys_to_pfn(paddr)] +} + +/// Increases the reference count of the frame associated with a physical address. +/// +/// # Parameters +/// - `paddr`: It must be an aligned physical address; if it's a huge page, +/// it must be the starting physical address. +pub fn add_frame_ref(paddr: PhysAddr) { + let frame = get_frame_info(paddr); + frame.inc_ref(); +} + +/// Decreases the reference count of the frame associated with a physical address. +/// +/// - `paddr`: It must be an aligned physical address; if it's a huge page, +/// it must be the starting physical address. +/// +/// # Returns +/// The updated reference count after decrementing. +pub fn dec_frame_ref(paddr: PhysAddr) -> usize { + let frame = get_frame_info(paddr); + frame.dec_ref() +} + +pub struct FrameInfo { + ref_count: AtomicUsize, +} + +impl FrameInfo { + fn new_empty() -> Self { + Self { + ref_count: AtomicUsize::new(0), + } + } + + fn inc_ref(&self) -> usize { + self.ref_count.fetch_add(1, Ordering::SeqCst) + } + + fn dec_ref(&self) -> usize { + self.ref_count.fetch_sub(1, Ordering::SeqCst) + } + + pub fn ref_count(&self) -> usize { + self.ref_count.load(Ordering::SeqCst) + } +} + +fn phys_to_pfn(paddr: PhysAddr) -> usize { + paddr.as_usize() >> FRAME_SHIFT +} diff --git a/modules/axmm/src/lib.rs b/modules/axmm/src/lib.rs index 1be9043498..73dc2c4749 100644 --- a/modules/axmm/src/lib.rs +++ b/modules/axmm/src/lib.rs @@ -8,12 +8,14 @@ extern crate alloc; mod aspace; mod backend; +mod frameinfo; pub use self::aspace::AddrSpace; pub use self::backend::Backend; use axerrno::{AxError, AxResult}; use axhal::mem::phys_to_virt; +use frameinfo::init_frame_info_table; use kspin::SpinNoIrq; use lazyinit::LazyInit; use memory_addr::{PhysAddr, va}; @@ -63,6 +65,8 @@ pub fn init_memory_management() { debug!("kernel address space init OK: {:#x?}", kernel_aspace); KERNEL_ASPACE.init_once(SpinNoIrq::new(kernel_aspace)); axhal::paging::set_kernel_page_table_root(kernel_page_table_root()); + + init_frame_info_table(); } /// Initializes kernel paging for secondary CPUs. From c479c347f4f0e940b91448398cabc66017526b67 Mon Sep 17 00:00:00 2001 From: mingzi Date: Sat, 7 Jun 2025 16:41:59 +0800 Subject: [PATCH 04/15] refactor: simplify address space handling logic - Remove redundant backend matching in fork - Reorganize COW fault handling comments - Clean up page table update flow --- modules/axmm/src/aspace.rs | 86 ++++++++++++++++++-------------------- 1 file changed, 41 insertions(+), 45 deletions(-) diff --git a/modules/axmm/src/aspace.rs b/modules/axmm/src/aspace.rs index 38ac0ec6fd..63b5761208 100644 --- a/modules/axmm/src/aspace.rs +++ b/modules/axmm/src/aspace.rs @@ -375,21 +375,18 @@ impl AddrSpace { if let Some(area) = self.areas.find(vaddr) { let orig_flags = area.flags(); if orig_flags.contains(access_flags) { + // Two cases enter the branch: + // - shared pages (If there is a shared page in the vma) + // - cow if let Ok((paddr, _, page_size)) = self.pt.query(vaddr) { - // TODO: skip Shared if !access_flags.contains(MappingFlags::WRITE) { return false; } + let off = page_size.align_offset(vaddr.into()); // 1. page fault caused by write // 2. pte exists // 3. Not shared memory - let off = page_size.align_offset(vaddr.into()); - return self.handle_cow_fault( - vaddr, - paddr.sub(off), - orig_flags, - page_size, - ); + return self.handle_cow_fault(vaddr, paddr.sub(off), orig_flags, page_size); } else { return area .backend() @@ -405,10 +402,7 @@ impl AddrSpace { let mut new_aspace = Self::new_empty(self.base(), self.size())?; for area in self.areas.iter() { - let backend = match area.backend() { - Backend::Alloc { populate, .. } => Backend::new_alloc(*populate), - Backend::Linear { .. } => area.backend().clone(), - }; + let backend = area.backend(); // Remap the memory area in the new address space. let new_area = @@ -467,47 +461,48 @@ impl AddrSpace { for area in self.areas.iter() { let mut backend = area.backend().clone(); - // TODO: Shared mem area - match &mut backend { + if let Backend::Alloc { populate, .. } = &mut backend { // Forcing `populate = false` is to prevent the subsequent `new_aspace.areas.map` // from mapping page table entries for the virtual addresses. - Backend::Alloc { populate, .. } => { - *populate = false; - - let mut flags = area.flags(); - flags.remove(MappingFlags::WRITE); - - //If the page is mapped in the old page table: - // - Update its permissions in the old page table using `flags`. - // - Map the same physical page into the new page table at the same - // virtual address, with the same page size and `flags`. - // TODO: huge page - for vaddr in PageIter4K::new(area.start(), area.end()) - .expect("Failed to create page iterator") - { - if let Ok((paddr, _, page_size)) = old_pt.query(vaddr) { - add_frame_ref(paddr); - - old_pt - .protect(vaddr, flags) - .map(|(_, tlb)| tlb.flush()) - .ok(); - new_pt - .map(vaddr, paddr, page_size, flags) - .map(|tlb| tlb.flush()) - .ok(); - } - } - } - // Linear-backed regions are usually allocated by the kernel and are shared - Backend::Linear { .. } => (), + *populate = false } - let new_area = MemoryArea::new(area.start(), area.size(), area.flags(), backend); + let new_area = + MemoryArea::new(area.start(), area.size(), area.flags(), backend.clone()); new_aspace .areas .map(new_area, new_pt, false) .map_err(mapping_err_to_ax_err)?; + + // Linear-backed regions are usually allocated by the kernel and are shared + if matches!(backend, Backend::Linear { .. }) { + continue; + } + + let mut flags = area.flags(); + flags.remove(MappingFlags::WRITE); + //If the page is mapped in the old page table: + // - Update its permissions in the old page table using `flags`. + // - Map the same physical page into the new page table at the same + // virtual address, with the same page size and `flags`. + // TODO: huge page iter + for vaddr in + PageIter4K::new(area.start(), area.end()).expect("Failed to create page iterator") + { + if let Ok((paddr, _, page_size)) = old_pt.query(vaddr) { + // FIXME: need use huge page iter + add_frame_ref(paddr); + + old_pt + .protect(vaddr, flags) + .map(|(_, tlb)| tlb.flush()) + .ok(); + new_pt + .map(vaddr, paddr, page_size, flags) + .map(|tlb| tlb.flush()) + .ok(); + } + } } Ok(new_aspace) @@ -519,6 +514,7 @@ impl AddrSpace { /// - `vaddr`: The virtual address that triggered the fault. /// - `paddr`: It must be an aligned physical address; if it's a huge page, /// it must be the starting physical address. + /// - `flags`: vma flags. /// - `page_size`: The size of the page on which the current physical address is located /// /// # Returns From 21994881be203cdb2aa615c547b3f530390fb001 Mon Sep 17 00:00:00 2001 From: mingzi Date: Sat, 7 Jun 2025 23:27:34 +0800 Subject: [PATCH 05/15] refactor: improve memory management safety - Replace ok() with expect() for protect/map operations - Simplify page copy with copy_nonoverlapping - Add physical address validation in phys_to_pfn --- modules/axmm/src/aspace.rs | 14 +++++--------- modules/axmm/src/frameinfo.rs | 3 ++- 2 files changed, 7 insertions(+), 10 deletions(-) diff --git a/modules/axmm/src/aspace.rs b/modules/axmm/src/aspace.rs index 63b5761208..ee96f655f7 100644 --- a/modules/axmm/src/aspace.rs +++ b/modules/axmm/src/aspace.rs @@ -496,11 +496,11 @@ impl AddrSpace { old_pt .protect(vaddr, flags) .map(|(_, tlb)| tlb.flush()) - .ok(); + .expect("protect failed"); new_pt .map(vaddr, paddr, page_size, flags) .map(|tlb| tlb.flush()) - .ok(); + .expect("map failed"); } } } @@ -539,20 +539,16 @@ impl AddrSpace { .is_ok(), // Allocates the new page and copies the contents of the original page, // remapping the virtual address to the physical address of the new page. - // NOTE: Reduce the page's reference count 2.. => match alloc_frame(false, page_size.into()) { Some(new_frame) => { - let new_slice = unsafe { - core::slice::from_raw_parts_mut( + unsafe { + core::ptr::copy_nonoverlapping( + phys_to_virt(paddr).as_ptr(), phys_to_virt(new_frame).as_mut_ptr(), page_size.into(), ) }; - let old_slice = unsafe { - core::slice::from_raw_parts(phys_to_virt(paddr).as_ptr(), page_size.into()) - }; - new_slice.copy_from_slice(old_slice); dealloc_frame(paddr); self.pt diff --git a/modules/axmm/src/frameinfo.rs b/modules/axmm/src/frameinfo.rs index 72b3e0213d..5491e4b813 100644 --- a/modules/axmm/src/frameinfo.rs +++ b/modules/axmm/src/frameinfo.rs @@ -84,5 +84,6 @@ impl FrameInfo { } fn phys_to_pfn(paddr: PhysAddr) -> usize { - paddr.as_usize() >> FRAME_SHIFT + assert!(paddr.as_usize() >= axconfig::plat::PHYS_MEMORY_BASE); + (paddr.as_usize() - axconfig::plat::PHYS_MEMORY_BASE) >> FRAME_SHIFT } From d497a2183e89bb620c8473c984f1e144bc5008d4 Mon Sep 17 00:00:00 2001 From: mingzi Date: Mon, 9 Jun 2025 17:08:17 +0800 Subject: [PATCH 06/15] chore: clean up --- modules/axmm/src/frameinfo.rs | 1 - 1 file changed, 1 deletion(-) diff --git a/modules/axmm/src/frameinfo.rs b/modules/axmm/src/frameinfo.rs index 5491e4b813..0c77ae9f21 100644 --- a/modules/axmm/src/frameinfo.rs +++ b/modules/axmm/src/frameinfo.rs @@ -84,6 +84,5 @@ impl FrameInfo { } fn phys_to_pfn(paddr: PhysAddr) -> usize { - assert!(paddr.as_usize() >= axconfig::plat::PHYS_MEMORY_BASE); (paddr.as_usize() - axconfig::plat::PHYS_MEMORY_BASE) >> FRAME_SHIFT } From 1262090cb09f681de92f58a9fe1617ce8f9eceb6 Mon Sep 17 00:00:00 2001 From: mingzi Date: Wed, 11 Jun 2025 20:01:44 +0800 Subject: [PATCH 07/15] refactor: Rename `populate_area` to `ensure_region_mapped` with COW support - Renamed `populate_area` to `ensure_region_mapped` - Refactored `handle_cow_fault` to take a mutable page table reference --- modules/axmm/src/aspace.rs | 89 ++++++++++++++++++++++++++++---------- 1 file changed, 67 insertions(+), 22 deletions(-) diff --git a/modules/axmm/src/aspace.rs b/modules/axmm/src/aspace.rs index ee96f655f7..214f471465 100644 --- a/modules/axmm/src/aspace.rs +++ b/modules/axmm/src/aspace.rs @@ -163,28 +163,71 @@ impl AddrSpace { Ok(()) } - /// Populates the area with physical frames, returning false if the area - /// contains unmapped area. - pub fn populate_area(&mut self, mut start: VirtAddr, size: usize) -> AxResult { + /// Ensures that the specified virtual memory region is fully mapped. + /// + /// This function walks through the given virtual address range and attempts to ensure + /// that every page is mapped. If a page is not mapped and the corresponding area allows + /// on-demand population (`populate == false`), it will trigger a page fault to map it. + /// If `cow_on_write` is true, it will handle copy-on-write (COW) logic for already + /// mapped pages that may require COW due to write intentions. + /// + /// # Parameters + /// + /// - `start`: The starting virtual address of the region to map. + /// - `size`: The size (in bytes) of the region. + /// - `cow_on_write`: Whether to trigger copy-on-write handling for write-intended mappings. + /// + /// # Returns + /// + /// Returns `Ok(())` if the entire region is successfully mapped, or an appropriate + /// `AxError` variant (`NoMemory`, `BadAddress`) on failure. + /// + /// # Errors + /// + /// - `AxError::NoMemory`: Failed to allocate. + /// - `AxError::BadAddress`: An invalid mapping state was detected. + pub fn ensure_region_mapped( + &mut self, + mut start: VirtAddr, + size: usize, + cow_on_write: bool, + ) -> AxResult { self.validate_region(start, size)?; let end = start + size; while let Some(area) = self.areas.find(start) { let backend = area.backend(); if let Backend::Alloc { populate, .. } = backend { - if !*populate { - for addr in PageIter4K::new(start, area.end().min(end)).unwrap() { - match self.pt.query(addr) { - Ok(_) => {} - // If the page is not mapped, try map it. - Err(PagingError::NotMapped) => { + for addr in PageIter4K::new(start, area.end().min(end)).unwrap() { + match self.pt.query(addr) { + // if the page is already mapped and write intentions, try cow. + Ok((paddr, flags, page_size)) => { + if cow_on_write { + if !area.flags().contains(MappingFlags::WRITE) { + return Err(AxError::BadAddress) + } + + if !Self::handle_cow_fault( + addr, + paddr, + flags, + page_size, + &mut self.pt, + ) { + return Err(AxError::NoMemory); + } + } + } + // If the page is not mapped, try map it. + Err(PagingError::NotMapped) => { + if !*populate { if !backend.handle_page_fault(addr, area.flags(), &mut self.pt) { return Err(AxError::NoMemory); } } - Err(_) => return Err(AxError::BadAddress), - }; - } + } + Err(_) => return Err(AxError::BadAddress), + }; } } start = area.end(); @@ -317,7 +360,7 @@ impl AddrSpace { /// aligned. pub fn protect(&mut self, start: VirtAddr, size: usize, flags: MappingFlags) -> AxResult { // Populate the area first, which also checks the address range for us. - self.populate_area(start, size)?; + self.ensure_region_mapped(start, size, false)?; self.areas .protect(start, size, |_| Some(flags), &mut self.pt) @@ -386,7 +429,13 @@ impl AddrSpace { // 1. page fault caused by write // 2. pte exists // 3. Not shared memory - return self.handle_cow_fault(vaddr, paddr.sub(off), orig_flags, page_size); + return Self::handle_cow_fault( + vaddr, + paddr.sub(off), + orig_flags, + page_size, + &mut self.pt, + ); } else { return area .backend() @@ -516,27 +565,24 @@ impl AddrSpace { /// it must be the starting physical address. /// - `flags`: vma flags. /// - `page_size`: The size of the page on which the current physical address is located + /// - `pt`: A mutable reference to the page table that should be updated. /// /// # Returns /// - `true` if the page fault was handled successfully. /// - `false` if the fault handling failed (e.g., allocation failed or invalid ref count). fn handle_cow_fault( - &mut self, vaddr: VirtAddr, paddr: PhysAddr, flags: MappingFlags, page_size: PageSize, + pt: &mut PageTable, ) -> bool { let frame_info = get_frame_info(paddr); match frame_info.ref_count() { 0 => unreachable!(), // There is only one AddrSpace reference to the page, // so there is no need to copy it. - 1 => self - .pt - .protect(vaddr, flags) - .map(|(_, tlb)| tlb.flush()) - .is_ok(), + 1 => pt.protect(vaddr, flags).map(|(_, tlb)| tlb.flush()).is_ok(), // Allocates the new page and copies the contents of the original page, // remapping the virtual address to the physical address of the new page. 2.. => match alloc_frame(false, page_size.into()) { @@ -551,8 +597,7 @@ impl AddrSpace { dealloc_frame(paddr); - self.pt - .remap(vaddr, new_frame, flags) + pt.remap(vaddr, new_frame, flags) .map(|(_, tlb)| { tlb.flush(); }) From 44d596ebcb7f4f7f53a41b829264b55e5ecc220b Mon Sep 17 00:00:00 2001 From: mingzi Date: Wed, 11 Jun 2025 21:59:31 +0800 Subject: [PATCH 08/15] chore: Modify according to comment --- modules/axmm/src/aspace.rs | 5 +++-- modules/axmm/src/backend/alloc.rs | 11 +++++------ modules/axmm/src/frameinfo.rs | 20 ++++++++++---------- modules/axmm/src/lib.rs | 3 +-- 4 files changed, 19 insertions(+), 20 deletions(-) diff --git a/modules/axmm/src/aspace.rs b/modules/axmm/src/aspace.rs index 214f471465..eb2a7ddead 100644 --- a/modules/axmm/src/aspace.rs +++ b/modules/axmm/src/aspace.rs @@ -204,7 +204,7 @@ impl AddrSpace { Ok((paddr, flags, page_size)) => { if cow_on_write { if !area.flags().contains(MappingFlags::WRITE) { - return Err(AxError::BadAddress) + return Err(AxError::BadAddress); } if !Self::handle_cow_fault( @@ -530,6 +530,7 @@ impl AddrSpace { let mut flags = area.flags(); flags.remove(MappingFlags::WRITE); + //If the page is mapped in the old page table: // - Update its permissions in the old page table using `flags`. // - Map the same physical page into the new page table at the same @@ -585,7 +586,7 @@ impl AddrSpace { 1 => pt.protect(vaddr, flags).map(|(_, tlb)| tlb.flush()).is_ok(), // Allocates the new page and copies the contents of the original page, // remapping the virtual address to the physical address of the new page. - 2.. => match alloc_frame(false, page_size.into()) { + 2.. => match alloc_frame(false) { Some(new_frame) => { unsafe { core::ptr::copy_nonoverlapping( diff --git a/modules/axmm/src/backend/alloc.rs b/modules/axmm/src/backend/alloc.rs index 7b0aac8b22..c6a42c1a03 100644 --- a/modules/axmm/src/backend/alloc.rs +++ b/modules/axmm/src/backend/alloc.rs @@ -13,14 +13,13 @@ use super::Backend; /// /// # Parameters /// - `zeroed`: If `true`, the allocated frame memory is zeroed out. -/// - `align`: The alignment requirement (in pages) for the allocation. /// /// # Returns /// Returns `Some(PhysAddr)` of the allocated frame on success, or `None` if allocation fails. -pub fn alloc_frame(zeroed: bool, align: usize) -> Option { - let vaddr = VirtAddr::from(global_allocator().alloc_pages(1, align).ok()?); +pub fn alloc_frame(zeroed: bool) -> Option { + let vaddr = VirtAddr::from(global_allocator().alloc_pages(1, PAGE_SIZE_4K).ok()?); if zeroed { - unsafe { core::ptr::write_bytes(vaddr.as_mut_ptr(), 0, align) }; + unsafe { core::ptr::write_bytes(vaddr.as_mut_ptr(), 0, PAGE_SIZE_4K) }; } let paddr = virt_to_phys(vaddr); add_frame_ref(paddr); @@ -66,7 +65,7 @@ impl Backend { if populate { // allocate all possible physical frames for populated mapping. for addr in PageIter4K::new(start, start + size).unwrap() { - if let Some(frame) = alloc_frame(true, PAGE_SIZE_4K) { + if let Some(frame) = alloc_frame(true) { if let Ok(tlb) = pt.map(addr, frame, PageSize::Size4K, flags) { tlb.ignore(); // TLB flush on map is unnecessary, as there are no outdated mappings. } else { @@ -112,7 +111,7 @@ impl Backend { ) -> bool { if populate { false // Populated mappings should not trigger page faults. - } else if let Some(frame) = alloc_frame(true, PAGE_SIZE_4K) { + } else if let Some(frame) = alloc_frame(true) { // Allocate a physical frame lazily and map it to the fault address. // `vaddr` does not need to be aligned. It will be automatically // aligned during `pt.map` regardless of the page size. diff --git a/modules/axmm/src/frameinfo.rs b/modules/axmm/src/frameinfo.rs index 0c77ae9f21..9794e74594 100644 --- a/modules/axmm/src/frameinfo.rs +++ b/modules/axmm/src/frameinfo.rs @@ -7,9 +7,12 @@ //! that keeps track of its reference count. //! NOTE: If the page is huge page, its [`FrameInfo`] is placed at the //! starting physical address. -use core::sync::atomic::{AtomicUsize, Ordering}; +use core::{ + array, + sync::atomic::{AtomicUsize, Ordering}, +}; -use alloc::vec::Vec; +use alloc::boxed::Box; use lazyinit::LazyInit; use memory_addr::PhysAddr; @@ -18,11 +21,10 @@ const FRAME_SHIFT: usize = 12; pub const MAX_FRAME_NUM: usize = axconfig::plat::PHYS_MEMORY_SIZE >> FRAME_SHIFT; -static FRAME_INFO_TABLE: LazyInit> = LazyInit::new(); +static FRAME_INFO_TABLE: LazyInit> = LazyInit::new(); -pub fn init_frame_info_table() { - let _ = - FRAME_INFO_TABLE.init_once((0..MAX_FRAME_NUM).map(|_| FrameInfo::new_empty()).collect()); +pub fn init_frames() { + let _ = FRAME_INFO_TABLE.init_once(Box::new(array::from_fn(|_| FrameInfo::new_empty()))); } /// Returns the `FrameInfo` structure associated with a given physical address. @@ -43,8 +45,7 @@ pub fn get_frame_info(paddr: PhysAddr) -> &'static FrameInfo { /// - `paddr`: It must be an aligned physical address; if it's a huge page, /// it must be the starting physical address. pub fn add_frame_ref(paddr: PhysAddr) { - let frame = get_frame_info(paddr); - frame.inc_ref(); + get_frame_info(paddr).inc_ref(); } /// Decreases the reference count of the frame associated with a physical address. @@ -55,8 +56,7 @@ pub fn add_frame_ref(paddr: PhysAddr) { /// # Returns /// The updated reference count after decrementing. pub fn dec_frame_ref(paddr: PhysAddr) -> usize { - let frame = get_frame_info(paddr); - frame.dec_ref() + get_frame_info(paddr).dec_ref() } pub struct FrameInfo { diff --git a/modules/axmm/src/lib.rs b/modules/axmm/src/lib.rs index 73dc2c4749..6101560d7c 100644 --- a/modules/axmm/src/lib.rs +++ b/modules/axmm/src/lib.rs @@ -15,7 +15,6 @@ pub use self::backend::Backend; use axerrno::{AxError, AxResult}; use axhal::mem::phys_to_virt; -use frameinfo::init_frame_info_table; use kspin::SpinNoIrq; use lazyinit::LazyInit; use memory_addr::{PhysAddr, va}; @@ -66,7 +65,7 @@ pub fn init_memory_management() { KERNEL_ASPACE.init_once(SpinNoIrq::new(kernel_aspace)); axhal::paging::set_kernel_page_table_root(kernel_page_table_root()); - init_frame_info_table(); + frameinfo::init_frames(); } /// Initializes kernel paging for secondary CPUs. From 74519d3303cfd32c1417ca587cbffede54d409f7 Mon Sep 17 00:00:00 2001 From: mingzi Date: Thu, 12 Jun 2025 16:51:50 +0800 Subject: [PATCH 09/15] refactor: replace PageIter4K with PageIterWrapper in AddrSpace - Use align from backend for page iteration - Remove TODO and FIXME comments - Simplify page iteration logic --- modules/axmm/src/aspace.rs | 12 ++++++++---- 1 file changed, 8 insertions(+), 4 deletions(-) diff --git a/modules/axmm/src/aspace.rs b/modules/axmm/src/aspace.rs index 93d5f9afb0..a179568757 100644 --- a/modules/axmm/src/aspace.rs +++ b/modules/axmm/src/aspace.rs @@ -612,16 +612,20 @@ impl AddrSpace { let mut flags = area.flags(); flags.remove(MappingFlags::WRITE); + let align = if let Backend::Alloc { align, .. } = backend { + align + } else { + unreachable!() + }; + //If the page is mapped in the old page table: // - Update its permissions in the old page table using `flags`. // - Map the same physical page into the new page table at the same // virtual address, with the same page size and `flags`. - // TODO: huge page iter - for vaddr in - PageIter4K::new(area.start(), area.end()).expect("Failed to create page iterator") + for vaddr in PageIterWrapper::new(area.start(), area.end(), align) + .expect("Failed to create page iterator") { if let Ok((paddr, _, page_size)) = old_pt.query(vaddr) { - // FIXME: need use huge page iter add_frame_ref(paddr); old_pt From 05a17bee0db1538ddbbaded4f8a5f35048d1af76 Mon Sep 17 00:00:00 2001 From: mingzi Date: Fri, 13 Jun 2025 23:09:18 +0800 Subject: [PATCH 10/15] refactor: simplify frame reference counting - Replace direct frame info access with FrameRefTable API - Remove redundant align parameter from protect() - Rename ensure_region_mapped to populate_area - Simplify COW handling logic - Consolidate frame info operations into FrameRefTable struct --- modules/axdma/src/dma.rs | 2 +- modules/axmm/src/aspace.rs | 49 +++++++---------- modules/axmm/src/backend/alloc.rs | 6 +-- modules/axmm/src/frameinfo.rs | 90 +++++++++++++++---------------- 4 files changed, 69 insertions(+), 78 deletions(-) diff --git a/modules/axdma/src/dma.rs b/modules/axdma/src/dma.rs index 81ae42c97f..d3136f3b48 100644 --- a/modules/axdma/src/dma.rs +++ b/modules/axdma/src/dma.rs @@ -97,7 +97,7 @@ impl DmaAllocator { let expand_size = num_pages * PAGE_SIZE_4K; axmm::kernel_aspace() .lock() - .protect(vaddr, expand_size, flags, PageSize::Size4K) + .protect(vaddr, expand_size, flags) .map_err(|e| { error!("change table flag fail: {e:?}"); AllocError::NoMemory diff --git a/modules/axmm/src/aspace.rs b/modules/axmm/src/aspace.rs index a179568757..d9bedd6c5c 100644 --- a/modules/axmm/src/aspace.rs +++ b/modules/axmm/src/aspace.rs @@ -10,7 +10,7 @@ use memory_set::{MemoryArea, MemorySet}; use page_table_multiarch::PageSize; use crate::backend::{Backend, PageIterWrapper, alloc_frame, dealloc_frame}; -use crate::frameinfo::{add_frame_ref, get_frame_info}; +use crate::frameinfo::frame_table; use crate::mapping_err_to_ax_err; /// The virtual memory address space. @@ -227,15 +227,14 @@ impl AddrSpace { /// This function walks through the given virtual address range and attempts to ensure /// that every page is mapped. If a page is not mapped and the corresponding area allows /// on-demand population (`populate == false`), it will trigger a page fault to map it. - /// If `cow_on_write` is true, it will handle copy-on-write (COW) logic for already + /// If `access_flags` contains `WRITE`, it will handle copy-on-write (COW) logic for already /// mapped pages that may require COW due to write intentions. /// /// # Parameters /// /// - `start`: The starting virtual address of the region to map. /// - `size`: The size (in bytes) of the region. - /// - `align`: Alignment requirement for the allocated memory, must be a multiple of 4KiB. - /// - `cow_on_write`: Whether to trigger copy-on-write handling for write-intended mappings. + /// - `access_flags` indicates the access type /// /// # Returns /// @@ -246,14 +245,13 @@ impl AddrSpace { /// /// - `AxError::NoMemory`: Failed to allocate. /// - `AxError::BadAddress`: An invalid mapping state was detected. - pub fn ensure_region_mapped( + pub fn populate_area( &mut self, mut start: VirtAddr, size: usize, - align: PageSize, - cow_on_write: bool, + access_flags: MappingFlags, ) -> AxResult { - self.validate_region(start, size, align)?; + self.validate_region(start, size, PageSize::Size4K)?; let end = start + size; while let Some(area) = self.areas.find(start) { @@ -263,11 +261,9 @@ impl AddrSpace { match self.pt.query(addr) { // if the page is already mapped and write intentions, try cow. Ok((paddr, flags, page_size)) => { - if cow_on_write { - if !area.flags().contains(MappingFlags::WRITE) { - return Err(AxError::BadAddress); - } - + if flags.contains(MappingFlags::WRITE) { + continue; + } else if access_flags.contains(MappingFlags::WRITE) { if !Self::handle_cow_fault( addr, paddr, @@ -285,6 +281,8 @@ impl AddrSpace { if !backend.handle_page_fault(addr, area.flags(), &mut self.pt) { return Err(AxError::NoMemory); } + } else { + return Err(AxError::BadAddress); } } Err(_) => return Err(AxError::BadAddress), @@ -292,7 +290,7 @@ impl AddrSpace { } } start = area.end(); - assert!(start.is_aligned(align)); + assert!(start.is_aligned(PageSize::Size4K)); if start >= end { break; } @@ -433,15 +431,9 @@ impl AddrSpace { /// /// Returns an error if the address range is out of the address space or not /// aligned. - pub fn protect( - &mut self, - start: VirtAddr, - size: usize, - flags: MappingFlags, - align: PageSize, - ) -> AxResult { + pub fn protect(&mut self, start: VirtAddr, size: usize, flags: MappingFlags) -> AxResult { // Populate the area first, which also checks the address range for us. - self.ensure_region_mapped(start, size, align, false)?; + self.populate_area(start, size, flags)?; self.areas .protect(start, size, |_| Some(flags), &mut self.pt) @@ -506,13 +498,12 @@ impl AddrSpace { if !access_flags.contains(MappingFlags::WRITE) { return false; } - let off = page_size.align_offset(vaddr.into()); // 1. page fault caused by write // 2. pte exists // 3. Not shared memory return Self::handle_cow_fault( vaddr, - paddr.sub(off), + paddr, orig_flags, page_size, &mut self.pt, @@ -626,7 +617,7 @@ impl AddrSpace { .expect("Failed to create page iterator") { if let Ok((paddr, _, page_size)) = old_pt.query(vaddr) { - add_frame_ref(paddr); + frame_table().inc_ref(paddr); old_pt .protect(vaddr, flags) @@ -647,8 +638,7 @@ impl AddrSpace { /// /// # Arguments /// - `vaddr`: The virtual address that triggered the fault. - /// - `paddr`: It must be an aligned physical address; if it's a huge page, - /// it must be the starting physical address. + /// - `paddr`: The physical address that triggered the fault. /// - `flags`: vma flags. /// - `align`: Alignment requirement for the allocated memory, must be a multiple of 4KiB. /// - `pt`: A mutable reference to the page table that should be updated. @@ -663,8 +653,9 @@ impl AddrSpace { align: PageSize, pt: &mut PageTable, ) -> bool { - let frame_info = get_frame_info(paddr); - match frame_info.ref_count() { + let paddr = paddr.align_down(align); + + match frame_table().ref_count(paddr) { 0 => unreachable!(), // There is only one AddrSpace reference to the page, // so there is no need to copy it. diff --git a/modules/axmm/src/backend/alloc.rs b/modules/axmm/src/backend/alloc.rs index e9a4f1ba5b..ec32d8397e 100644 --- a/modules/axmm/src/backend/alloc.rs +++ b/modules/axmm/src/backend/alloc.rs @@ -4,7 +4,7 @@ use axhal::mem::{phys_to_virt, virt_to_phys}; use axhal::paging::{MappingFlags, PageSize, PageTable}; use memory_addr::{PAGE_SIZE_4K, PhysAddr, VirtAddr}; -use crate::frameinfo::{add_frame_ref, dec_frame_ref}; +use crate::frameinfo::frame_table; use super::Backend; @@ -35,7 +35,7 @@ pub fn alloc_frame(zeroed: bool, align: PageSize) -> Option { unsafe { core::ptr::write_bytes(vaddr.as_mut_ptr(), 0, page_size) }; } let paddr = virt_to_phys(vaddr); - add_frame_ref(paddr); + frame_table().inc_ref(paddr); Some(paddr) } @@ -60,7 +60,7 @@ pub fn alloc_frame(zeroed: bool, align: PageSize) -> Option { /// the failure can be obtained from the global memory allocator’s error messages. pub fn dealloc_frame(frame: PhysAddr, align: PageSize) { let vaddr = phys_to_virt(frame); - match dec_frame_ref(frame) { + match frame_table().dec_ref(frame) { 0 => unreachable!(), 1 => { let page_size: usize = align.into(); diff --git a/modules/axmm/src/frameinfo.rs b/modules/axmm/src/frameinfo.rs index 9794e74594..d3b9ed1092 100644 --- a/modules/axmm/src/frameinfo.rs +++ b/modules/axmm/src/frameinfo.rs @@ -21,68 +21,68 @@ const FRAME_SHIFT: usize = 12; pub const MAX_FRAME_NUM: usize = axconfig::plat::PHYS_MEMORY_SIZE >> FRAME_SHIFT; -static FRAME_INFO_TABLE: LazyInit> = LazyInit::new(); +static FRAME_INFO_TABLE: LazyInit = LazyInit::new(); pub fn init_frames() { - let _ = FRAME_INFO_TABLE.init_once(Box::new(array::from_fn(|_| FrameInfo::new_empty()))); + let _ = FRAME_INFO_TABLE.init_once(FrameRefTable::default()); } -/// Returns the `FrameInfo` structure associated with a given physical address. -/// -/// # Parameters -/// - `paddr`: It must be an aligned physical address; if it's a huge page, -/// it must be the starting physical address. -/// -/// # Returns -/// A reference to the `FrameInfo` associated with the given physical address. -pub fn get_frame_info(paddr: PhysAddr) -> &'static FrameInfo { - &FRAME_INFO_TABLE[phys_to_pfn(paddr)] +pub(crate) fn frame_table() -> &'static FrameRefTable { + &FRAME_INFO_TABLE } -/// Increases the reference count of the frame associated with a physical address. -/// -/// # Parameters -/// - `paddr`: It must be an aligned physical address; if it's a huge page, -/// it must be the starting physical address. -pub fn add_frame_ref(paddr: PhysAddr) { - get_frame_info(paddr).inc_ref(); +pub(crate) struct FrameRefTable { + data: Box<[FrameInfo; MAX_FRAME_NUM]>, } -/// Decreases the reference count of the frame associated with a physical address. -/// -/// - `paddr`: It must be an aligned physical address; if it's a huge page, -/// it must be the starting physical address. -/// -/// # Returns -/// The updated reference count after decrementing. -pub fn dec_frame_ref(paddr: PhysAddr) -> usize { - get_frame_info(paddr).dec_ref() -} - -pub struct FrameInfo { - ref_count: AtomicUsize, +impl Default for FrameRefTable { + fn default() -> Self { + FrameRefTable { + data: Box::new(array::from_fn(|_| FrameInfo::default())), + } + } } -impl FrameInfo { - fn new_empty() -> Self { - Self { - ref_count: AtomicUsize::new(0), - } +impl FrameRefTable { + fn info(&self, paddr: PhysAddr) -> &FrameInfo { + let index = (paddr.as_usize() - axconfig::plat::PHYS_MEMORY_BASE) >> FRAME_SHIFT; + &self.data[index] } - fn inc_ref(&self) -> usize { - self.ref_count.fetch_add(1, Ordering::SeqCst) + /// Increases the reference count of the frame associated with a physical address. + /// + /// # Parameters + /// - `paddr`: It must be an aligned physical address; if it's a huge page, + /// it must be the starting physical address. + pub fn inc_ref(&self, paddr: PhysAddr) { + self.info(paddr).ref_count.fetch_add(1, Ordering::SeqCst); } - fn dec_ref(&self) -> usize { - self.ref_count.fetch_sub(1, Ordering::SeqCst) + /// Decreases the reference count of the frame associated with a physical address. + /// + /// - `paddr`: It must be an aligned physical address; if it's a huge page, + /// it must be the starting physical address. + /// + /// # Returns + /// The updated reference count after decrementing. + pub fn dec_ref(&self, paddr: PhysAddr) -> usize { + self.info(paddr).ref_count.fetch_sub(1, Ordering::SeqCst) } - pub fn ref_count(&self) -> usize { - self.ref_count.load(Ordering::SeqCst) + /// Returns the `FrameInfo` structure associated with a given physical address. + /// + /// # Parameters + /// - `paddr`: It must be an aligned physical address; if it's a huge page, + /// it must be the starting physical address. + /// + /// # Returns + /// A reference to the `FrameInfo` associated with the given physical address. + pub fn ref_count(&self, paddr: PhysAddr) -> usize { + self.info(paddr).ref_count.load(Ordering::SeqCst) } } -fn phys_to_pfn(paddr: PhysAddr) -> usize { - (paddr.as_usize() - axconfig::plat::PHYS_MEMORY_BASE) >> FRAME_SHIFT +#[derive(Default)] +pub(crate) struct FrameInfo { + ref_count: AtomicUsize, } From 172657fbb329e847e3bca2080600483a1230d99a Mon Sep 17 00:00:00 2001 From: mingzi Date: Sat, 14 Jun 2025 13:27:32 +0800 Subject: [PATCH 11/15] refactor: (populate_area) use a iterator to walk through the area covered by the specified access interval --- modules/axmm/src/aspace.rs | 22 ++++++++++++++-------- 1 file changed, 14 insertions(+), 8 deletions(-) diff --git a/modules/axmm/src/aspace.rs b/modules/axmm/src/aspace.rs index d9bedd6c5c..4066299b90 100644 --- a/modules/axmm/src/aspace.rs +++ b/modules/axmm/src/aspace.rs @@ -254,7 +254,16 @@ impl AddrSpace { self.validate_region(start, size, PageSize::Size4K)?; let end = start + size; - while let Some(area) = self.areas.find(start) { + for area in self.areas.iter() { + if start >= area.end() { + continue; + } + + if start < area.start() { + // If the area is not fully mapped, we return ENOMEM. + return ax_err!(NoMemory); + } + let backend = area.backend(); if let Backend::Alloc { populate, align } = *backend { for addr in PageIterWrapper::new(start, area.end().min(end), align).unwrap() { @@ -292,16 +301,13 @@ impl AddrSpace { start = area.end(); assert!(start.is_aligned(PageSize::Size4K)); if start >= end { - break; + return Ok(()); } } - if start < end { - // If the area is not fully mapped, we return ENOMEM. - return ax_err!(NoMemory); - } - - Ok(()) + // start < end + // If the area is not fully mapped, we return ENOMEM. + ax_err!(NoMemory) } /// Removes mappings within the specified virtual address range. From e9a3c037766e6e31ea1dc69797f2a146df6c8762 Mon Sep 17 00:00:00 2001 From: mingzi Date: Sat, 14 Jun 2025 14:57:03 +0800 Subject: [PATCH 12/15] feat: add COW feature for memory management - Add conditional compilation for COW feature - Refactor clone_or_err into try_clone with COW support - Update frame allocation/deallocation logic - Move frameinfo module behind feature flag - Add new cow feature to Cargo.toml --- modules/axmm/Cargo.toml | 4 + modules/axmm/src/aspace.rs | 224 ++++++++++++++++-------------- modules/axmm/src/backend/alloc.rs | 23 +-- modules/axmm/src/lib.rs | 2 + 4 files changed, 138 insertions(+), 115 deletions(-) diff --git a/modules/axmm/Cargo.toml b/modules/axmm/Cargo.toml index 46a7d5d828..8df1605bc0 100644 --- a/modules/axmm/Cargo.toml +++ b/modules/axmm/Cargo.toml @@ -9,6 +9,10 @@ homepage.workspace = true repository = "https://github.com/arceos-org/arceos/tree/main/modules/axmm" documentation = "https://arceos-org.github.io/arceos/axmm/index.html" +[features] +default = [] +cow = [] + [dependencies] axhal = { workspace = true, features = ["paging"] } axalloc = { workspace = true } diff --git a/modules/axmm/src/aspace.rs b/modules/axmm/src/aspace.rs index 4066299b90..15d347b9a8 100644 --- a/modules/axmm/src/aspace.rs +++ b/modules/axmm/src/aspace.rs @@ -3,16 +3,18 @@ use core::fmt; use axerrno::{AxError, AxResult, ax_err}; use axhal::mem::phys_to_virt; use axhal::paging::{MappingFlags, PageTable, PagingError}; -use memory_addr::{ - MemoryAddr, PAGE_SIZE_4K, PageIter4K, PhysAddr, VirtAddr, VirtAddrRange, is_aligned, -}; +use memory_addr::{MemoryAddr, PAGE_SIZE_4K, PhysAddr, VirtAddr, VirtAddrRange, is_aligned}; use memory_set::{MemoryArea, MemorySet}; use page_table_multiarch::PageSize; -use crate::backend::{Backend, PageIterWrapper, alloc_frame, dealloc_frame}; -use crate::frameinfo::frame_table; +use crate::backend::{Backend, PageIterWrapper}; use crate::mapping_err_to_ax_err; +#[cfg(feature = "cow")] +use crate::backend::{alloc_frame, dealloc_frame}; +#[cfg(feature = "cow")] +use crate::frameinfo::frame_table; + /// The virtual memory address space. pub struct AddrSpace { va_range: VirtAddrRange, @@ -249,13 +251,13 @@ impl AddrSpace { &mut self, mut start: VirtAddr, size: usize, - access_flags: MappingFlags, + _access_flags: MappingFlags, ) -> AxResult { self.validate_region(start, size, PageSize::Size4K)?; let end = start + size; for area in self.areas.iter() { - if start >= area.end() { + if start >= area.end() { continue; } @@ -268,11 +270,14 @@ impl AddrSpace { if let Backend::Alloc { populate, align } = *backend { for addr in PageIterWrapper::new(start, area.end().min(end), align).unwrap() { match self.pt.query(addr) { - // if the page is already mapped and write intentions, try cow. + #[cfg(not(feature = "cow"))] + Ok(_) => (), + #[cfg(feature = "cow")] Ok((paddr, flags, page_size)) => { + // if the page is already mapped and write intentions, try cow. if flags.contains(MappingFlags::WRITE) { continue; - } else if access_flags.contains(MappingFlags::WRITE) { + } else if _access_flags.contains(MappingFlags::WRITE) { if !Self::handle_cow_fault( addr, paddr, @@ -500,6 +505,7 @@ impl AddrSpace { // Two cases enter the branch: // - shared pages (If there is a shared page in the vma) // - cow + #[cfg(feature = "cow")] if let Ok((paddr, _, page_size)) = self.pt.query(vaddr) { if !access_flags.contains(MappingFlags::WRITE) { return false; @@ -514,22 +520,52 @@ impl AddrSpace { page_size, &mut self.pt, ); - } else { - return area - .backend() - .handle_page_fault(vaddr, orig_flags, &mut self.pt); } + + return area + .backend() + .handle_page_fault(vaddr, orig_flags, &mut self.pt); } } false } - /// Clone a [`AddrSpace`] by re-mapping all [`MemoryArea`]s in a new page table and copying data in user space. - pub fn clone_or_err(&mut self) -> AxResult { + /// Attempts to clone the current address space into a new one. + /// + /// This method creates a new empty address space with the same base and size, + /// then iterates over all memory areas in the original address space to copy or + /// share their mappings into the new one. + /// + /// ### Behavior with `cow` Feature Enabled + /// - For memory areas backed by `Backend::Alloc`, the `populate` flag is forced + /// to `false` to avoid preemptive physical allocation in the new space. + /// - All writable mappings have their `WRITE` flag removed, enforcing + /// Copy-On-Write (COW) semantics. + /// - Shared pages increase their reference count via `frame_table().inc_ref()`, + /// and both the original and the cloned page tables are updated: + /// - The original page's protection flags are modified to remove write access. + /// - The new address space maps the same physical page with the new flags. + /// + /// ### Behavior without `cow` Feature + /// - Each mapped page in the original address space is copied into the + /// corresponding address in the new address space. + /// - If the target address in the new space is not mapped, a page fault is + /// handled via `backend.handle_page_fault`, and memory is allocated before copying. + /// - The actual copying is done using `core::ptr::copy_nonoverlapping` at the + /// physical address level. + pub fn try_clone(&mut self) -> AxResult { let mut new_aspace = Self::new_empty(self.base(), self.size())?; for area in self.areas.iter() { - let backend = area.backend(); + let backend = match area.backend() { + #[cfg(feature = "cow")] + Backend::Alloc { populate: _, align } => { + // Forcing `populate = false` is to prevent the subsequent `new_aspace.areas.map` + // from mapping page table entries for the virtual addresses. + &Backend::new_alloc(false, *align) + } + other => other, + }; // Remap the memory area in the new address space. let new_area = @@ -539,104 +575,81 @@ impl AddrSpace { .map(new_area, &mut new_aspace.pt, false) .map_err(mapping_err_to_ax_err)?; - if matches!(backend, Backend::Linear { .. }) { - continue; - } - // Copy data from old memory area to new memory area. - for vaddr in - PageIter4K::new(area.start(), area.end()).expect("Failed to create page iterator") - { - let addr = match self.pt.query(vaddr) { - Ok((paddr, _, _)) => paddr, - // If the page is not mapped, skip it. - Err(PagingError::NotMapped) => continue, - Err(_) => return Err(AxError::BadAddress), - }; - let new_addr = match new_aspace.pt.query(vaddr) { - Ok((paddr, _, _)) => paddr, - // If the page is not mapped, try map it. - Err(PagingError::NotMapped) => { - if !backend.handle_page_fault(vaddr, area.flags(), &mut new_aspace.pt) { - return Err(AxError::NoMemory); - } - match new_aspace.pt.query(vaddr) { - Ok((paddr, _, _)) => paddr, - Err(_) => return Err(AxError::BadAddress), - } - } - Err(_) => return Err(AxError::BadAddress), - }; - unsafe { - core::ptr::copy_nonoverlapping( - phys_to_virt(addr).as_ptr(), - phys_to_virt(new_addr).as_mut_ptr(), - PAGE_SIZE_4K, - ) - }; - } - } - Ok(new_aspace) - } - - /// Creates a copy of the current [`AddrSpace`] with copy-on-write (COW) - /// - /// For pages that require COW, remove `write` flags. - pub fn copy_with_cow(&mut self) -> AxResult { - let mut new_aspace = Self::new_empty(self.base(), self.size())?; - let new_pt = &mut new_aspace.pt; - let old_pt = &mut self.pt; - - for area in self.areas.iter() { - let mut backend = area.backend().clone(); - if let Backend::Alloc { populate, .. } = &mut backend { - // Forcing `populate = false` is to prevent the subsequent `new_aspace.areas.map` - // from mapping page table entries for the virtual addresses. - *populate = false - } - - let new_area = - MemoryArea::new(area.start(), area.size(), area.flags(), backend.clone()); - new_aspace - .areas - .map(new_area, new_pt, false) - .map_err(mapping_err_to_ax_err)?; - - // Linear-backed regions are usually allocated by the kernel and are shared - if matches!(backend, Backend::Linear { .. }) { - continue; - } - - let mut flags = area.flags(); - flags.remove(MappingFlags::WRITE); + let align = match *backend { + Backend::Alloc { align, .. } => align, + // Linear-backed regions are usually allocated by the kernel and are shared + Backend::Linear { .. } => continue, + }; - let align = if let Backend::Alloc { align, .. } = backend { - align - } else { - unreachable!() + #[cfg(feature = "cow")] + let flags = { + let mut f = area.flags(); + f.remove(MappingFlags::WRITE); + f }; - //If the page is mapped in the old page table: - // - Update its permissions in the old page table using `flags`. - // - Map the same physical page into the new page table at the same - // virtual address, with the same page size and `flags`. for vaddr in PageIterWrapper::new(area.start(), area.end(), align) .expect("Failed to create page iterator") { - if let Ok((paddr, _, page_size)) = old_pt.query(vaddr) { - frame_table().inc_ref(paddr); - - old_pt - .protect(vaddr, flags) - .map(|(_, tlb)| tlb.flush()) - .expect("protect failed"); - new_pt - .map(vaddr, paddr, page_size, flags) - .map(|tlb| tlb.flush()) - .expect("map failed"); + //If the page is mapped in the old page table: + // - Update its permissions in the old page table using `flags`. + // - Map the same physical page into the new page table at the same + // virtual address, with the same page size and `flags`. + #[cfg(feature = "cow")] + { + match self.pt.query(vaddr) { + Ok((paddr, _, page_size)) => { + frame_table().inc_ref(paddr); + + self.pt + .protect(vaddr, flags) + .map(|(_, tlb)| tlb.flush()) + .expect("protect failed"); + new_aspace + .pt + .map(vaddr, paddr, page_size, flags) + .map(|tlb| tlb.flush()) + .expect("map failed"); + } + // If the page is not mapped, skip it. + Err(PagingError::NotMapped) => continue, + Err(_) => return Err(AxError::BadAddress), + } + } + + // Copy data from old memory area to new memory area. + #[cfg(not(feature = "cow"))] + { + let addr = match self.pt.query(vaddr) { + Ok((paddr, _, _)) => paddr, + // If the page is not mapped, skip it. + Err(PagingError::NotMapped) => continue, + Err(_) => return Err(AxError::BadAddress), + }; + let new_addr = match new_aspace.pt.query(vaddr) { + Ok((paddr, _, _)) => paddr, + // If the page is not mapped, try map it. + Err(PagingError::NotMapped) => { + if !backend.handle_page_fault(vaddr, area.flags(), &mut new_aspace.pt) { + return Err(AxError::NoMemory); + } + match new_aspace.pt.query(vaddr) { + Ok((paddr, _, _)) => paddr, + Err(_) => return Err(AxError::BadAddress), + } + } + Err(_) => return Err(AxError::BadAddress), + }; + unsafe { + core::ptr::copy_nonoverlapping( + phys_to_virt(addr).as_ptr(), + phys_to_virt(new_addr).as_mut_ptr(), + PAGE_SIZE_4K, + ) + }; } } } - Ok(new_aspace) } @@ -652,6 +665,7 @@ impl AddrSpace { /// # Returns /// - `true` if the page fault was handled successfully. /// - `false` if the fault handling failed (e.g., allocation failed or invalid ref count). + #[cfg(feature = "cow")] fn handle_cow_fault( vaddr: VirtAddr, paddr: PhysAddr, diff --git a/modules/axmm/src/backend/alloc.rs b/modules/axmm/src/backend/alloc.rs index ec32d8397e..aee8792557 100644 --- a/modules/axmm/src/backend/alloc.rs +++ b/modules/axmm/src/backend/alloc.rs @@ -4,6 +4,7 @@ use axhal::mem::{phys_to_virt, virt_to_phys}; use axhal::paging::{MappingFlags, PageSize, PageTable}; use memory_addr::{PAGE_SIZE_4K, PhysAddr, VirtAddr}; +#[cfg(feature = "cow")] use crate::frameinfo::frame_table; use super::Backend; @@ -35,7 +36,10 @@ pub fn alloc_frame(zeroed: bool, align: PageSize) -> Option { unsafe { core::ptr::write_bytes(vaddr.as_mut_ptr(), 0, page_size) }; } let paddr = virt_to_phys(vaddr); + + #[cfg(feature = "cow")] frame_table().inc_ref(paddr); + Some(paddr) } @@ -46,7 +50,7 @@ pub fn alloc_frame(zeroed: bool, align: PageSize) -> Option { /// The size of the memory to be freed is determined by the `align` parameter, /// which must be a multiple of 4KiB. /// -/// This function decreases the reference count associated with the frame. +/// If `cow` feature is enabled, this function decreases the reference count associated with the frame. /// When the reference count reaches 1, it actually frees the frame memory. /// /// # Parameters @@ -59,16 +63,15 @@ pub fn alloc_frame(zeroed: bool, align: PageSize) -> Option { /// - If the deallocation fails, the function will call `panic!`. Details about /// the failure can be obtained from the global memory allocator’s error messages. pub fn dealloc_frame(frame: PhysAddr, align: PageSize) { - let vaddr = phys_to_virt(frame); - match frame_table().dec_ref(frame) { - 0 => unreachable!(), - 1 => { - let page_size: usize = align.into(); - let num_pages = page_size / PAGE_SIZE_4K; - global_allocator().dealloc_pages(vaddr.as_usize(), num_pages); - } - _ => (), + #[cfg(feature = "cow")] + if frame_table().dec_ref(frame) > 1 { + return; } + + let vaddr = phys_to_virt(frame); + let page_size: usize = align.into(); + let num_pages = page_size / PAGE_SIZE_4K; + global_allocator().dealloc_pages(vaddr.as_usize(), num_pages); } impl Backend { diff --git a/modules/axmm/src/lib.rs b/modules/axmm/src/lib.rs index 018f34803a..e2e28b3e2a 100644 --- a/modules/axmm/src/lib.rs +++ b/modules/axmm/src/lib.rs @@ -8,6 +8,7 @@ extern crate alloc; mod aspace; mod backend; +#[cfg(feature = "cow")] mod frameinfo; pub use self::aspace::AddrSpace; @@ -72,6 +73,7 @@ pub fn init_memory_management() { KERNEL_ASPACE.init_once(SpinNoIrq::new(kernel_aspace)); axhal::paging::set_kernel_page_table_root(kernel_page_table_root()); + #[cfg(feature = "cow")] frameinfo::init_frames(); } From a4a46582fb630700f4ca1b803a19814faf92559e Mon Sep 17 00:00:00 2001 From: mingzi Date: Sun, 15 Jun 2025 19:00:19 +0800 Subject: [PATCH 13/15] refactor: simplify COW handling and frame table initialization - Consolidate COW fault handling logic - Replace LazyInit with lazy_static for frame table - Remove redundant frame table initialization - Clean up memory area copying logic --- modules/axmm/Cargo.toml | 1 + modules/axmm/src/aspace.rs | 134 ++++++++++++++++------------------ modules/axmm/src/frameinfo.rs | 8 +- modules/axmm/src/lib.rs | 3 - 4 files changed, 68 insertions(+), 78 deletions(-) diff --git a/modules/axmm/Cargo.toml b/modules/axmm/Cargo.toml index 8df1605bc0..4a84a0282e 100644 --- a/modules/axmm/Cargo.toml +++ b/modules/axmm/Cargo.toml @@ -18,6 +18,7 @@ axhal = { workspace = true, features = ["paging"] } axalloc = { workspace = true } axconfig = { workspace = true } +lazy_static = { version = "1.5", features = ["spin_no_std"] } log = "=0.4.21" axerrno = "0.1" lazyinit = "0.2" diff --git a/modules/axmm/src/aspace.rs b/modules/axmm/src/aspace.rs index 15d347b9a8..b1b55ea9e5 100644 --- a/modules/axmm/src/aspace.rs +++ b/modules/axmm/src/aspace.rs @@ -506,20 +506,19 @@ impl AddrSpace { // - shared pages (If there is a shared page in the vma) // - cow #[cfg(feature = "cow")] - if let Ok((paddr, _, page_size)) = self.pt.query(vaddr) { - if !access_flags.contains(MappingFlags::WRITE) { - return false; + if access_flags.contains(MappingFlags::WRITE) { + if let Ok((paddr, _, page_size)) = self.pt.query(vaddr) { + // 1. page fault caused by write + // 2. pte exists + // 3. Not shared memory + return Self::handle_cow_fault( + vaddr, + paddr, + orig_flags, + page_size, + &mut self.pt, + ); } - // 1. page fault caused by write - // 2. pte exists - // 3. Not shared memory - return Self::handle_cow_fault( - vaddr, - paddr, - orig_flags, - page_size, - &mut self.pt, - ); } return area @@ -562,92 +561,87 @@ impl AddrSpace { Backend::Alloc { populate: _, align } => { // Forcing `populate = false` is to prevent the subsequent `new_aspace.areas.map` // from mapping page table entries for the virtual addresses. - &Backend::new_alloc(false, *align) + Backend::new_alloc(false, *align) } - other => other, + other => other.clone(), }; // Remap the memory area in the new address space. - let new_area = - MemoryArea::new(area.start(), area.size(), area.flags(), backend.clone()); + let new_area = MemoryArea::new(area.start(), area.size(), area.flags(), backend); new_aspace .areas .map(new_area, &mut new_aspace.pt, false) .map_err(mapping_err_to_ax_err)?; - let align = match *backend { - Backend::Alloc { align, .. } => align, + let align = match area.backend() { + Backend::Alloc { align, .. } => *align, // Linear-backed regions are usually allocated by the kernel and are shared Backend::Linear { .. } => continue, }; #[cfg(feature = "cow")] - let flags = { - let mut f = area.flags(); - f.remove(MappingFlags::WRITE); - f - }; + let cow_flags = area.flags().clone() - MappingFlags::WRITE; for vaddr in PageIterWrapper::new(area.start(), area.end(), align) .expect("Failed to create page iterator") { - //If the page is mapped in the old page table: - // - Update its permissions in the old page table using `flags`. - // - Map the same physical page into the new page table at the same - // virtual address, with the same page size and `flags`. - #[cfg(feature = "cow")] - { - match self.pt.query(vaddr) { - Ok((paddr, _, page_size)) => { + // Copy data from old memory area to new memory area. + match self.pt.query(vaddr) { + Ok((paddr, _, page_size)) => { + #[cfg(not(feature = "cow"))] + { + let new_addr = match new_aspace.pt.query(vaddr) { + Ok((paddr, _, _)) => paddr, + // If the page is not mapped, try map it. + Err(PagingError::NotMapped) => { + if !area.backend().handle_page_fault( + vaddr, + area.flags(), + &mut new_aspace.pt, + ) { + return Err(AxError::NoMemory); + } + match new_aspace.pt.query(vaddr) { + Ok((paddr, _, _)) => paddr, + Err(_) => return Err(AxError::BadAddress), + } + } + Err(_) => return Err(AxError::BadAddress), + }; + unsafe { + core::ptr::copy_nonoverlapping( + phys_to_virt(paddr).as_ptr(), + phys_to_virt(new_addr).as_mut_ptr(), + page_size.into(), + ) + }; + } + + //If the page is mapped in the old page table: + // - Update its permissions in the old page table using `flags`. + // - Map the same physical page into the new page table at the same + // virtual address, with the same page size and `flags`. + #[cfg(feature = "cow")] + { frame_table().inc_ref(paddr); self.pt - .protect(vaddr, flags) + .protect(vaddr, cow_flags) .map(|(_, tlb)| tlb.flush()) .expect("protect failed"); new_aspace .pt - .map(vaddr, paddr, page_size, flags) + .map(vaddr, paddr, page_size, cow_flags) .map(|tlb| tlb.flush()) .expect("map failed"); - } - // If the page is not mapped, skip it. - Err(PagingError::NotMapped) => continue, - Err(_) => return Err(AxError::BadAddress), - } - } - // Copy data from old memory area to new memory area. - #[cfg(not(feature = "cow"))] - { - let addr = match self.pt.query(vaddr) { - Ok((paddr, _, _)) => paddr, - // If the page is not mapped, skip it. - Err(PagingError::NotMapped) => continue, - Err(_) => return Err(AxError::BadAddress), - }; - let new_addr = match new_aspace.pt.query(vaddr) { - Ok((paddr, _, _)) => paddr, - // If the page is not mapped, try map it. - Err(PagingError::NotMapped) => { - if !backend.handle_page_fault(vaddr, area.flags(), &mut new_aspace.pt) { - return Err(AxError::NoMemory); - } - match new_aspace.pt.query(vaddr) { - Ok((paddr, _, _)) => paddr, - Err(_) => return Err(AxError::BadAddress), - } + continue; } - Err(_) => return Err(AxError::BadAddress), - }; - unsafe { - core::ptr::copy_nonoverlapping( - phys_to_virt(addr).as_ptr(), - phys_to_virt(new_addr).as_mut_ptr(), - PAGE_SIZE_4K, - ) - }; - } + } + // If the page is not mapped, skip it. + Err(PagingError::NotMapped) => continue, + Err(_) => return Err(AxError::BadAddress), + }; } } Ok(new_aspace) diff --git a/modules/axmm/src/frameinfo.rs b/modules/axmm/src/frameinfo.rs index d3b9ed1092..63fde0b64d 100644 --- a/modules/axmm/src/frameinfo.rs +++ b/modules/axmm/src/frameinfo.rs @@ -13,18 +13,16 @@ use core::{ }; use alloc::boxed::Box; +use lazy_static::lazy_static; use lazyinit::LazyInit; use memory_addr::PhysAddr; - // 4 kb page const FRAME_SHIFT: usize = 12; pub const MAX_FRAME_NUM: usize = axconfig::plat::PHYS_MEMORY_SIZE >> FRAME_SHIFT; -static FRAME_INFO_TABLE: LazyInit = LazyInit::new(); - -pub fn init_frames() { - let _ = FRAME_INFO_TABLE.init_once(FrameRefTable::default()); +lazy_static! { + static ref FRAME_INFO_TABLE: FrameRefTable = FrameRefTable::default(); } pub(crate) fn frame_table() -> &'static FrameRefTable { diff --git a/modules/axmm/src/lib.rs b/modules/axmm/src/lib.rs index e2e28b3e2a..de74ed4e74 100644 --- a/modules/axmm/src/lib.rs +++ b/modules/axmm/src/lib.rs @@ -72,9 +72,6 @@ pub fn init_memory_management() { debug!("kernel address space init OK: {:#x?}", kernel_aspace); KERNEL_ASPACE.init_once(SpinNoIrq::new(kernel_aspace)); axhal::paging::set_kernel_page_table_root(kernel_page_table_root()); - - #[cfg(feature = "cow")] - frameinfo::init_frames(); } /// Initializes kernel paging for secondary CPUs. From 459349f8358dec0e62f6cc2fe0d383abd0499b79 Mon Sep 17 00:00:00 2001 From: mingzi Date: Sun, 15 Jun 2025 20:38:26 +0800 Subject: [PATCH 14/15] refactor: reorganize memory management modules - Move PageIterWrapper to root module - Clean up unused imports - Simplify cow feature logic - Fix docstring alignment - Remove redundant page size handling --- modules/axdma/src/dma.rs | 5 +- modules/axmm/src/aspace.rs | 60 ++++++++++--------- modules/axmm/src/backend/alloc.rs | 2 +- modules/axmm/src/backend/mod.rs | 2 - modules/axmm/src/frameinfo.rs | 7 +-- modules/axmm/src/lib.rs | 1 + .../src/{backend => }/page_iter_wrapper.rs | 1 - 7 files changed, 37 insertions(+), 41 deletions(-) rename modules/axmm/src/{backend => }/page_iter_wrapper.rs (99%) diff --git a/modules/axdma/src/dma.rs b/modules/axdma/src/dma.rs index d3136f3b48..a1eae85f76 100644 --- a/modules/axdma/src/dma.rs +++ b/modules/axdma/src/dma.rs @@ -2,10 +2,7 @@ use core::{alloc::Layout, ptr::NonNull}; use allocator::{AllocError, AllocResult, BaseAllocator, ByteAllocator}; use axalloc::{DefaultByteAllocator, global_allocator}; -use axhal::{ - mem::virt_to_phys, - paging::{MappingFlags, PageSize}, -}; +use axhal::{mem::virt_to_phys, paging::MappingFlags}; use kspin::SpinNoIrq; use log::{debug, error}; use memory_addr::{PAGE_SIZE_4K, VirtAddr, va}; diff --git a/modules/axmm/src/aspace.rs b/modules/axmm/src/aspace.rs index b1b55ea9e5..8e80f84c0a 100644 --- a/modules/axmm/src/aspace.rs +++ b/modules/axmm/src/aspace.rs @@ -7,8 +7,9 @@ use memory_addr::{MemoryAddr, PAGE_SIZE_4K, PhysAddr, VirtAddr, VirtAddrRange, i use memory_set::{MemoryArea, MemorySet}; use page_table_multiarch::PageSize; -use crate::backend::{Backend, PageIterWrapper}; +use crate::backend::Backend; use crate::mapping_err_to_ax_err; +use crate::page_iter_wrapper::PageIterWrapper; #[cfg(feature = "cow")] use crate::backend::{alloc_frame, dealloc_frame}; @@ -270,21 +271,22 @@ impl AddrSpace { if let Backend::Alloc { populate, align } = *backend { for addr in PageIterWrapper::new(start, area.end().min(end), align).unwrap() { match self.pt.query(addr) { - #[cfg(not(feature = "cow"))] - Ok(_) => (), - #[cfg(feature = "cow")] + #[allow(unused_variables)] Ok((paddr, flags, page_size)) => { - // if the page is already mapped and write intentions, try cow. - if flags.contains(MappingFlags::WRITE) { - continue; - } else if _access_flags.contains(MappingFlags::WRITE) { - if !Self::handle_cow_fault( - addr, - paddr, - flags, - page_size, - &mut self.pt, - ) { + #[cfg(feature = "cow")] + { + // if the page is already mapped and write intentions, try cow. + if flags.contains(MappingFlags::WRITE) { + continue; + } else if _access_flags.contains(MappingFlags::WRITE) + && !Self::handle_cow_fault( + addr, + paddr, + flags, + page_size, + &mut self.pt, + ) + { return Err(AxError::NoMemory); } } @@ -506,19 +508,19 @@ impl AddrSpace { // - shared pages (If there is a shared page in the vma) // - cow #[cfg(feature = "cow")] - if access_flags.contains(MappingFlags::WRITE) { - if let Ok((paddr, _, page_size)) = self.pt.query(vaddr) { - // 1. page fault caused by write - // 2. pte exists - // 3. Not shared memory - return Self::handle_cow_fault( - vaddr, - paddr, - orig_flags, - page_size, - &mut self.pt, - ); - } + if access_flags.contains(MappingFlags::WRITE) + && let Ok((paddr, _, page_size)) = self.pt.query(vaddr) + { + // 1. page fault caused by write + // 2. pte exists + // 3. Not shared memory + return Self::handle_cow_fault( + vaddr, + paddr, + orig_flags, + page_size, + &mut self.pt, + ); } return area @@ -580,7 +582,7 @@ impl AddrSpace { }; #[cfg(feature = "cow")] - let cow_flags = area.flags().clone() - MappingFlags::WRITE; + let cow_flags = area.flags() - MappingFlags::WRITE; for vaddr in PageIterWrapper::new(area.start(), area.end(), align) .expect("Failed to create page iterator") diff --git a/modules/axmm/src/backend/alloc.rs b/modules/axmm/src/backend/alloc.rs index aee8792557..7cc40bab19 100644 --- a/modules/axmm/src/backend/alloc.rs +++ b/modules/axmm/src/backend/alloc.rs @@ -1,4 +1,4 @@ -use crate::backend::page_iter_wrapper::PageIterWrapper; +use crate::page_iter_wrapper::PageIterWrapper; use axalloc::global_allocator; use axhal::mem::{phys_to_virt, virt_to_phys}; use axhal::paging::{MappingFlags, PageSize, PageTable}; diff --git a/modules/axmm/src/backend/mod.rs b/modules/axmm/src/backend/mod.rs index 188ade31c9..59c5a45c70 100644 --- a/modules/axmm/src/backend/mod.rs +++ b/modules/axmm/src/backend/mod.rs @@ -3,12 +3,10 @@ use axhal::paging::{MappingFlags, PageTable}; use memory_addr::VirtAddr; use memory_set::MappingBackend; -pub use page_iter_wrapper::PageIterWrapper; use page_table_multiarch::PageSize; mod alloc; mod linear; -mod page_iter_wrapper; pub use alloc::{alloc_frame, dealloc_frame}; diff --git a/modules/axmm/src/frameinfo.rs b/modules/axmm/src/frameinfo.rs index 63fde0b64d..87c1858426 100644 --- a/modules/axmm/src/frameinfo.rs +++ b/modules/axmm/src/frameinfo.rs @@ -14,7 +14,6 @@ use core::{ use alloc::boxed::Box; use lazy_static::lazy_static; -use lazyinit::LazyInit; use memory_addr::PhysAddr; // 4 kb page const FRAME_SHIFT: usize = 12; @@ -51,7 +50,7 @@ impl FrameRefTable { /// /// # Parameters /// - `paddr`: It must be an aligned physical address; if it's a huge page, - /// it must be the starting physical address. + /// it must be the starting physical address. pub fn inc_ref(&self, paddr: PhysAddr) { self.info(paddr).ref_count.fetch_add(1, Ordering::SeqCst); } @@ -59,7 +58,7 @@ impl FrameRefTable { /// Decreases the reference count of the frame associated with a physical address. /// /// - `paddr`: It must be an aligned physical address; if it's a huge page, - /// it must be the starting physical address. + /// it must be the starting physical address. /// /// # Returns /// The updated reference count after decrementing. @@ -71,7 +70,7 @@ impl FrameRefTable { /// /// # Parameters /// - `paddr`: It must be an aligned physical address; if it's a huge page, - /// it must be the starting physical address. + /// it must be the starting physical address. /// /// # Returns /// A reference to the `FrameInfo` associated with the given physical address. diff --git a/modules/axmm/src/lib.rs b/modules/axmm/src/lib.rs index de74ed4e74..92cf8d086d 100644 --- a/modules/axmm/src/lib.rs +++ b/modules/axmm/src/lib.rs @@ -10,6 +10,7 @@ mod aspace; mod backend; #[cfg(feature = "cow")] mod frameinfo; +mod page_iter_wrapper; pub use self::aspace::AddrSpace; pub use self::backend::Backend; diff --git a/modules/axmm/src/backend/page_iter_wrapper.rs b/modules/axmm/src/page_iter_wrapper.rs similarity index 99% rename from modules/axmm/src/backend/page_iter_wrapper.rs rename to modules/axmm/src/page_iter_wrapper.rs index 98b5ac33fb..08f56b530e 100644 --- a/modules/axmm/src/backend/page_iter_wrapper.rs +++ b/modules/axmm/src/page_iter_wrapper.rs @@ -56,7 +56,6 @@ impl PageIterWrapper { PageSize::Size4K => PageIter4K::::new(start, end).map(Self::Size4K), PageSize::Size2M => PageIter2M::::new(start, end).map(Self::Size2M), PageSize::Size1G => PageIter1G::::new(start, end).map(Self::Size1G), - _ => None, } } } From 639278dc96bb9e3560c410ff77a04311cd392b9e Mon Sep 17 00:00:00 2001 From: mingzi Date: Sun, 15 Jun 2025 21:46:14 +0800 Subject: [PATCH 15/15] doc: fix doc link --- modules/axmm/src/aspace.rs | 6 +++--- modules/axmm/src/frameinfo.rs | 2 +- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/modules/axmm/src/aspace.rs b/modules/axmm/src/aspace.rs index 8e80f84c0a..c839eab5b8 100644 --- a/modules/axmm/src/aspace.rs +++ b/modules/axmm/src/aspace.rs @@ -538,7 +538,7 @@ impl AddrSpace { /// share their mappings into the new one. /// /// ### Behavior with `cow` Feature Enabled - /// - For memory areas backed by `Backend::Alloc`, the `populate` flag is forced + /// - For memory areas backed by [`Backend::Alloc`], the `populate` flag is forced /// to `false` to avoid preemptive physical allocation in the new space. /// - All writable mappings have their `WRITE` flag removed, enforcing /// Copy-On-Write (COW) semantics. @@ -551,8 +551,8 @@ impl AddrSpace { /// - Each mapped page in the original address space is copied into the /// corresponding address in the new address space. /// - If the target address in the new space is not mapped, a page fault is - /// handled via `backend.handle_page_fault`, and memory is allocated before copying. - /// - The actual copying is done using `core::ptr::copy_nonoverlapping` at the + /// handled via [`Backend::handle_page_fault`], and memory is allocated before copying. + /// - The actual copying is done using [`core::ptr::copy_nonoverlapping`] at the /// physical address level. pub fn try_clone(&mut self) -> AxResult { let mut new_aspace = Self::new_empty(self.base(), self.size())?; diff --git a/modules/axmm/src/frameinfo.rs b/modules/axmm/src/frameinfo.rs index 87c1858426..b6add837de 100644 --- a/modules/axmm/src/frameinfo.rs +++ b/modules/axmm/src/frameinfo.rs @@ -3,7 +3,7 @@ //! A simple physical FrameInfo manager is provided to track and manage //! the reference count for every 4KB memory page frame in the system. //! -//! There is a [' FrameInfo '] struct for each physical page frame +//! There is a [`FrameInfo`] struct for each physical page frame //! that keeps track of its reference count. //! NOTE: If the page is huge page, its [`FrameInfo`] is placed at the //! starting physical address.