Skip to content
Merged
Show file tree
Hide file tree
Changes from 7 commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
1 change: 1 addition & 0 deletions Cargo.lock

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

3 changes: 2 additions & 1 deletion Cargo.toml
Original file line number Diff line number Diff line change
Expand Up @@ -18,6 +18,7 @@ xmas-elf = "0.9"
spin = "0.9"
crate_interface = "0.1"
bitflags = "2.6"
percpu = "0.2.0"

kernel-elf-parser = "0.3"
num_enum = { version = "0.7", default-features = false }
Expand All @@ -44,4 +45,4 @@ page_table_entry = { git = "https://github.com/yfblock/page_table_multiarch.git"
x86 = "0.52"

[build-dependencies]
toml_edit = "0.22"
toml_edit = "0.22"
50 changes: 32 additions & 18 deletions src/mm.rs
Original file line number Diff line number Diff line change
Expand Up @@ -59,7 +59,7 @@ fn map_elf(
&interp_elf,
axconfig::plat::USER_INTERP_BASE,
Some(uspace_base as isize),
uspace_base,
// uspace_base,
)
.map_err(|_| AxError::InvalidData)?;
// Set the first argument to the path of the user app.
Expand Down Expand Up @@ -171,28 +171,42 @@ pub fn load_user_app(
Ok((entry, user_sp))
}

#[percpu::def_percpu]
static mut ACCESSING_USER_MEM: bool = false;

/// Enables scoped access into user memory, allowing page faults to occur inside
/// kernel.
pub fn access_user_memory<R>(f: impl FnOnce() -> R) -> R {
ACCESSING_USER_MEM.with_current(|v| {
*v = true;
let result = f();
*v = false;
result
})
}

#[register_trap_handler(PAGE_FAULT)]
fn handle_page_fault(vaddr: VirtAddr, access_flags: MappingFlags, is_user: bool) -> bool {
warn!(
"Page fault at {:#x}, access_flags: {:#x?}",
vaddr, access_flags
);
if is_user {
if !axtask::current()
.task_ext()
.aspace
.lock()
.handle_page_fault(vaddr, access_flags)
{
warn!(
"{}: segmentation fault at {:#x}, exit!",
axtask::current().id_name(),
vaddr
);
axtask::exit(-1);
}
true
} else {
false
if !is_user && !ACCESSING_USER_MEM.read_current() {
return false;
}

if !axtask::current()
.task_ext()
.aspace
.lock()
.handle_page_fault(vaddr, access_flags)
{
warn!(
"{}: segmentation fault at {:#x}, exit!",
axtask::current().id_name(),
vaddr
);
axtask::exit(-1);
}
true
}
88 changes: 50 additions & 38 deletions src/ptr.rs
Original file line number Diff line number Diff line change
@@ -1,68 +1,80 @@
use axerrno::{LinuxError, LinuxResult};
use axhal::paging::{MappingFlags, PageTable};
use axhal::paging::MappingFlags;
use axtask::{TaskExtRef, current};
use memory_addr::{MemoryAddr, PAGE_SIZE_4K, PageIter4K, VirtAddr};
use memory_addr::{MemoryAddr, PAGE_SIZE_4K, PageIter4K, VirtAddr, VirtAddrRange};

use core::{alloc::Layout, ffi::CStr, slice};

fn check_page(pt: &PageTable, page: VirtAddr, access_flags: MappingFlags) -> LinuxResult<()> {
let Ok((_, flags, _)) = pt.query(page) else {
return Err(LinuxError::EFAULT);
};
if !flags.contains(access_flags) {
return Err(LinuxError::EFAULT);
}
Ok(())
}
use crate::mm::access_user_memory;

fn check_region(start: VirtAddr, layout: Layout, access_flags: MappingFlags) -> LinuxResult<()> {
let align = layout.align();
if start.as_usize() & (align - 1) != 0 {
return Err(LinuxError::EFAULT);
}

// TODO: currently we're doing a very basic and inefficient check, due to
// the fact that AddrSpace does not expose necessary API.
let task = current();
let aspace = task.task_ext().aspace.lock();
let pt = aspace.page_table();

let page_start = start.align_down_4k();
let page_end = (start + layout.size()).align_up_4k();
for page in PageIter4K::new(page_start, page_end).unwrap() {
check_page(pt, page, access_flags)?;
if !aspace.check_region_access(
VirtAddrRange::from_start_size(start, layout.size()),
access_flags,
) {
return Err(LinuxError::EFAULT);
}

// Now force each page to be loaded into memory.
access_user_memory(|| {
let page_start = start.align_down_4k();
let page_end = (start + layout.size()).align_up_4k();
for page in PageIter4K::new(page_start, page_end).unwrap() {
// SAFETY: The page is valid and we've checked the access flags.
unsafe { page.as_ptr_of::<u8>().read_volatile() };
}
});

Ok(())
}

fn check_cstr(start: VirtAddr, access_flags: MappingFlags) -> LinuxResult<&'static CStr> {
// TODO: see check_region
let task = current();
let aspace = task.task_ext().aspace.lock();
let pt = aspace.page_table();

let mut page = start.align_down_4k();
check_page(pt, page, access_flags)?;
page += PAGE_SIZE_4K;

let start: *const u8 = start.as_ptr();
let mut len = 0;

loop {
// SAFETY: Outer caller has provided a pointer to a valid C string.
let ptr = unsafe { start.add(len) };
if ptr == page.as_ptr() {
check_page(pt, page, access_flags)?;
page += PAGE_SIZE_4K;
access_user_memory(|| {
loop {
// SAFETY: Outer caller has provided a pointer to a valid C string.
let ptr = unsafe { start.add(len) };
if ptr >= page.as_ptr() {
// We cannot prepare `aspace` outside of the loop, since holding
// aspace requires a mutex which would be required on page
// fault, and page faults can trigger inside the loop.

// TODO: this is inefficient, but we have to do this instead of
// querying the page table since the page might has not been
// allocated yet.
let task = current();
let aspace = task.task_ext().aspace.lock();
if !aspace.check_region_access(
VirtAddrRange::from_start_size(page, PAGE_SIZE_4K),
access_flags,
) {
return Err(LinuxError::EFAULT);
}

page += PAGE_SIZE_4K;
}

// This might trigger a page fault
// SAFETY: The pointer is valid and points to a valid memory region.
if unsafe { *ptr } == 0 {
break;
}
len += 1;
}

// SAFETY: The pointer is valid and points to a valid memory region.
if unsafe { *ptr } == 0 {
break;
}
len += 1;
}
Ok(())
})?;

// SAFETY: We've checked that the memory region contains a valid C string.
Ok(unsafe { CStr::from_bytes_with_nul_unchecked(slice::from_raw_parts(start, len + 1)) })
Expand Down
Loading