Skip to content
Merged
Show file tree
Hide file tree
Changes from 9 commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
1 change: 1 addition & 0 deletions modules/axhal/src/arch/x86_64/trap.rs
Original file line number Diff line number Diff line change
Expand Up @@ -78,6 +78,7 @@ fn vec_to_str(vec: u64) -> &'static str {
fn err_code_to_flags(err_code: u64) -> Result<MappingFlags, u64> {
let code = PageFaultErrorCode::from_bits_truncate(err_code);
let reserved_bits = (PageFaultErrorCode::CAUSED_BY_WRITE
| PageFaultErrorCode::PROTECTION_VIOLATION
Copy link
Collaborator

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

@mingzi47 Sorry to bother you, I was wondering if this change is necessary? Because I'm porting these to axcpu.

| PageFaultErrorCode::USER_MODE
| PageFaultErrorCode::INSTRUCTION_FETCH)
.complement();
Expand Down
139 changes: 134 additions & 5 deletions modules/axmm/src/aspace.rs
Original file line number Diff line number Diff line change
Expand Up @@ -7,8 +7,10 @@ use memory_addr::{
MemoryAddr, PAGE_SIZE_4K, PageIter4K, PhysAddr, VirtAddr, VirtAddrRange, is_aligned_4k,
};
use memory_set::{MemoryArea, MemorySet};
use page_table_multiarch::PageSize;

use crate::backend::Backend;
use crate::backend::{Backend, alloc_frame, dealloc_frame};
use crate::frameinfo::{add_frame_ref, get_frame_info};
use crate::mapping_err_to_ax_err;

/// The virtual memory address space.
Expand Down Expand Up @@ -169,7 +171,7 @@ impl AddrSpace {

while let Some(area) = self.areas.find(start) {
let backend = area.backend();
if let Backend::Alloc { populate } = backend {
if let Backend::Alloc { populate, .. } = backend {
if !*populate {
for addr in PageIter4K::new(start, area.end().min(end)).unwrap() {
match self.pt.query(addr) {
Expand Down Expand Up @@ -373,9 +375,23 @@ impl AddrSpace {
if let Some(area) = self.areas.find(vaddr) {
let orig_flags = area.flags();
if orig_flags.contains(access_flags) {
return area
.backend()
.handle_page_fault(vaddr, orig_flags, &mut self.pt);
// Two cases enter the branch:
// - shared pages (If there is a shared page in the vma)
// - cow
if let Ok((paddr, _, page_size)) = self.pt.query(vaddr) {
if !access_flags.contains(MappingFlags::WRITE) {
return false;
}
let off = page_size.align_offset(vaddr.into());
// 1. page fault caused by write
// 2. pte exists
// 3. Not shared memory
return self.handle_cow_fault(vaddr, paddr.sub(off), orig_flags, page_size);
} else {
return area
.backend()
.handle_page_fault(vaddr, orig_flags, &mut self.pt);
}
}
}
false
Expand All @@ -387,6 +403,7 @@ impl AddrSpace {

for area in self.areas.iter() {
let backend = area.backend();

// Remap the memory area in the new address space.
let new_area =
MemoryArea::new(area.start(), area.size(), area.flags(), backend.clone());
Expand Down Expand Up @@ -433,6 +450,118 @@ impl AddrSpace {
}
Ok(new_aspace)
}

/// Creates a copy of the current [`AddrSpace`] with copy-on-write (COW)
///
/// For pages that require COW, remove `write` flags.
pub fn copy_with_cow(&mut self) -> AxResult<Self> {
let mut new_aspace = Self::new_empty(self.base(), self.size())?;
let new_pt = &mut new_aspace.pt;
let old_pt = &mut self.pt;

for area in self.areas.iter() {
let mut backend = area.backend().clone();
if let Backend::Alloc { populate, .. } = &mut backend {
// Forcing `populate = false` is to prevent the subsequent `new_aspace.areas.map`
// from mapping page table entries for the virtual addresses.
*populate = false
}

let new_area =
MemoryArea::new(area.start(), area.size(), area.flags(), backend.clone());
new_aspace
.areas
.map(new_area, new_pt, false)
.map_err(mapping_err_to_ax_err)?;

// Linear-backed regions are usually allocated by the kernel and are shared
if matches!(backend, Backend::Linear { .. }) {
continue;
}

let mut flags = area.flags();
flags.remove(MappingFlags::WRITE);
//If the page is mapped in the old page table:
// - Update its permissions in the old page table using `flags`.
// - Map the same physical page into the new page table at the same
// virtual address, with the same page size and `flags`.
// TODO: huge page iter
for vaddr in
PageIter4K::new(area.start(), area.end()).expect("Failed to create page iterator")
{
if let Ok((paddr, _, page_size)) = old_pt.query(vaddr) {
// FIXME: need use huge page iter
add_frame_ref(paddr);

old_pt
.protect(vaddr, flags)
.map(|(_, tlb)| tlb.flush())
.expect("protect failed");
new_pt
.map(vaddr, paddr, page_size, flags)
.map(|tlb| tlb.flush())
.expect("map failed");
}
}
}

Ok(new_aspace)
}

/// Handles a Copy-On-Write (COW) page fault.
///
/// # Arguments
/// - `vaddr`: The virtual address that triggered the fault.
/// - `paddr`: It must be an aligned physical address; if it's a huge page,
/// it must be the starting physical address.
/// - `flags`: vma flags.
/// - `page_size`: The size of the page on which the current physical address is located
///
/// # Returns
/// - `true` if the page fault was handled successfully.
/// - `false` if the fault handling failed (e.g., allocation failed or invalid ref count).
fn handle_cow_fault(
&mut self,
vaddr: VirtAddr,
paddr: PhysAddr,
flags: MappingFlags,
page_size: PageSize,
) -> bool {
let frame_info = get_frame_info(paddr);
match frame_info.ref_count() {
0 => unreachable!(),
// There is only one AddrSpace reference to the page,
// so there is no need to copy it.
1 => self
.pt
.protect(vaddr, flags)
.map(|(_, tlb)| tlb.flush())
.is_ok(),
// Allocates the new page and copies the contents of the original page,
// remapping the virtual address to the physical address of the new page.
2.. => match alloc_frame(false, page_size.into()) {
Some(new_frame) => {
unsafe {
core::ptr::copy_nonoverlapping(
phys_to_virt(paddr).as_ptr(),
phys_to_virt(new_frame).as_mut_ptr(),
page_size.into(),
)
};

dealloc_frame(paddr);

self.pt
.remap(vaddr, new_frame, flags)
.map(|(_, tlb)| {
tlb.flush();
})
.is_ok()
}
None => false,
},
}
}
}

impl fmt::Debug for AddrSpace {
Expand Down
45 changes: 35 additions & 10 deletions modules/axmm/src/backend/alloc.rs
Original file line number Diff line number Diff line change
@@ -1,27 +1,51 @@
use axalloc::global_allocator;
use axhal::mem::{phys_to_virt, virt_to_phys};
use axhal::paging::{MappingFlags, PageSize, PageTable};
use axhal::{
mem::{phys_to_virt, virt_to_phys},
paging::{MappingFlags, PageSize, PageTable},
};
use memory_addr::{PAGE_SIZE_4K, PageIter4K, PhysAddr, VirtAddr};

use crate::frameinfo::{add_frame_ref, dec_frame_ref};

use super::Backend;

fn alloc_frame(zeroed: bool) -> Option<PhysAddr> {
let vaddr = VirtAddr::from(global_allocator().alloc_pages(1, PAGE_SIZE_4K).ok()?);
/// Allocates a single physical frame with optional zero-initialization and alignment.
///
/// # Parameters
/// - `zeroed`: If `true`, the allocated frame memory is zeroed out.
/// - `align`: The alignment requirement (in pages) for the allocation.
///
/// # Returns
/// Returns `Some(PhysAddr)` of the allocated frame on success, or `None` if allocation fails.
pub fn alloc_frame(zeroed: bool, align: usize) -> Option<PhysAddr> {
let vaddr = VirtAddr::from(global_allocator().alloc_pages(1, align).ok()?);
if zeroed {
unsafe { core::ptr::write_bytes(vaddr.as_mut_ptr(), 0, PAGE_SIZE_4K) };
unsafe { core::ptr::write_bytes(vaddr.as_mut_ptr(), 0, align) };
}
let paddr = virt_to_phys(vaddr);
add_frame_ref(paddr);
Some(paddr)
}

fn dealloc_frame(frame: PhysAddr) {
/// Deallocates a previously allocated physical frame.
///
/// This function decreases the reference count associated with the frame.
/// When the reference count reaches 1, it actually frees the frame memory.
///
/// # Parameters
/// - `frame`: The physical address of the frame to deallocate.
pub fn dealloc_frame(frame: PhysAddr) {
let vaddr = phys_to_virt(frame);
global_allocator().dealloc_pages(vaddr.as_usize(), 1);
match dec_frame_ref(frame) {
0 => unreachable!(),
1 => global_allocator().dealloc_pages(vaddr.as_usize(), 1),
_ => (),
}
}

impl Backend {
/// Creates a new allocation mapping backend.
pub const fn new_alloc(populate: bool) -> Self {
pub fn new_alloc(populate: bool) -> Self {
Self::Alloc { populate }
}

Expand All @@ -42,7 +66,7 @@ impl Backend {
if populate {
// allocate all possible physical frames for populated mapping.
for addr in PageIter4K::new(start, start + size).unwrap() {
if let Some(frame) = alloc_frame(true) {
if let Some(frame) = alloc_frame(true, PAGE_SIZE_4K) {
if let Ok(tlb) = pt.map(addr, frame, PageSize::Size4K, flags) {
tlb.ignore(); // TLB flush on map is unnecessary, as there are no outdated mappings.
} else {
Expand Down Expand Up @@ -71,6 +95,7 @@ impl Backend {
return false;
}
tlb.flush();

dealloc_frame(frame);
} else {
// Deallocation is needn't if the page is not mapped.
Expand All @@ -87,7 +112,7 @@ impl Backend {
) -> bool {
if populate {
false // Populated mappings should not trigger page faults.
} else if let Some(frame) = alloc_frame(true) {
} else if let Some(frame) = alloc_frame(true, PAGE_SIZE_4K) {
// Allocate a physical frame lazily and map it to the fault address.
// `vaddr` does not need to be aligned. It will be automatically
// aligned during `pt.map` regardless of the page size.
Expand Down
2 changes: 2 additions & 0 deletions modules/axmm/src/backend/mod.rs
Original file line number Diff line number Diff line change
Expand Up @@ -7,6 +7,8 @@ use memory_set::MappingBackend;
mod alloc;
mod linear;

pub use alloc::{alloc_frame, dealloc_frame};

/// A unified enum type for different memory mapping backends.
///
/// Currently, two backends are implemented:
Expand Down
88 changes: 88 additions & 0 deletions modules/axmm/src/frameinfo.rs
Original file line number Diff line number Diff line change
@@ -0,0 +1,88 @@
//! FrameInfo
//!
//! A simple physical FrameInfo manager is provided to track and manage
//! the reference count for every 4KB memory page frame in the system.
//!
//! There is a [' FrameInfo '] struct for each physical page frame
//! that keeps track of its reference count.
//! NOTE: If the page is huge page, its [`FrameInfo`] is placed at the
//! starting physical address.
use core::sync::atomic::{AtomicUsize, Ordering};

use alloc::vec::Vec;
use lazyinit::LazyInit;
use memory_addr::PhysAddr;

// 4 kb page
const FRAME_SHIFT: usize = 12;

pub const MAX_FRAME_NUM: usize = axconfig::plat::PHYS_MEMORY_SIZE >> FRAME_SHIFT;

static FRAME_INFO_TABLE: LazyInit<Vec<FrameInfo>> = LazyInit::new();

pub fn init_frame_info_table() {
let _ =
FRAME_INFO_TABLE.init_once((0..MAX_FRAME_NUM).map(|_| FrameInfo::new_empty()).collect());
}

/// Returns the `FrameInfo` structure associated with a given physical address.
///
/// # Parameters
/// - `paddr`: It must be an aligned physical address; if it's a huge page,
/// it must be the starting physical address.
///
/// # Returns
/// A reference to the `FrameInfo` associated with the given physical address.
pub fn get_frame_info(paddr: PhysAddr) -> &'static FrameInfo {
&FRAME_INFO_TABLE[phys_to_pfn(paddr)]
}

/// Increases the reference count of the frame associated with a physical address.
///
/// # Parameters
/// - `paddr`: It must be an aligned physical address; if it's a huge page,
/// it must be the starting physical address.
pub fn add_frame_ref(paddr: PhysAddr) {
let frame = get_frame_info(paddr);
frame.inc_ref();
}

/// Decreases the reference count of the frame associated with a physical address.
///
/// - `paddr`: It must be an aligned physical address; if it's a huge page,
/// it must be the starting physical address.
///
/// # Returns
/// The updated reference count after decrementing.
pub fn dec_frame_ref(paddr: PhysAddr) -> usize {
let frame = get_frame_info(paddr);
frame.dec_ref()
}

pub struct FrameInfo {
ref_count: AtomicUsize,
}

impl FrameInfo {
fn new_empty() -> Self {
Self {
ref_count: AtomicUsize::new(0),
}
}

fn inc_ref(&self) -> usize {
self.ref_count.fetch_add(1, Ordering::SeqCst)
}

fn dec_ref(&self) -> usize {
self.ref_count.fetch_sub(1, Ordering::SeqCst)
}

pub fn ref_count(&self) -> usize {
self.ref_count.load(Ordering::SeqCst)
}
}

fn phys_to_pfn(paddr: PhysAddr) -> usize {
(paddr.as_usize() - axconfig::plat::PHYS_MEMORY_BASE) >> FRAME_SHIFT
}
4 changes: 4 additions & 0 deletions modules/axmm/src/lib.rs
Original file line number Diff line number Diff line change
Expand Up @@ -8,12 +8,14 @@ extern crate alloc;

mod aspace;
mod backend;
mod frameinfo;

pub use self::aspace::AddrSpace;
pub use self::backend::Backend;

use axerrno::{AxError, AxResult};
use axhal::mem::phys_to_virt;
use frameinfo::init_frame_info_table;
use kspin::SpinNoIrq;
use lazyinit::LazyInit;
use memory_addr::{PhysAddr, va};
Expand Down Expand Up @@ -63,6 +65,8 @@ pub fn init_memory_management() {
debug!("kernel address space init OK: {:#x?}", kernel_aspace);
KERNEL_ASPACE.init_once(SpinNoIrq::new(kernel_aspace));
axhal::paging::set_kernel_page_table_root(kernel_page_table_root());

init_frame_info_table();
}

/// Initializes kernel paging for secondary CPUs.
Expand Down