@@ -3,10 +3,12 @@ use axerrno::{LinuxError, LinuxResult};
3
3
use axhal:: paging:: MappingFlags ;
4
4
use axtask:: { TaskExtRef , current} ;
5
5
use linux_raw_sys:: general:: {
6
- MAP_ANONYMOUS , MAP_FIXED , MAP_NORESERVE , MAP_PRIVATE , MAP_SHARED , MAP_STACK , PROT_EXEC ,
7
- PROT_GROWSDOWN , PROT_GROWSUP , PROT_READ , PROT_WRITE ,
6
+ MAP_ANONYMOUS , MAP_FIXED , MAP_HUGE_1GB , MAP_HUGE_MASK , MAP_HUGE_SHIFT , MAP_HUGETLB ,
7
+ MAP_NORESERVE , MAP_PRIVATE , MAP_SHARED , MAP_STACK , PROT_EXEC , PROT_GROWSDOWN , PROT_GROWSUP ,
8
+ PROT_READ , PROT_WRITE ,
8
9
} ;
9
- use memory_addr:: { VirtAddr , VirtAddrRange } ;
10
+ use memory_addr:: { MemoryAddr , VirtAddr , VirtAddrRange } ;
11
+ use page_table_multiarch:: PageSize ;
10
12
11
13
use crate :: file:: { File , FileLike } ;
12
14
@@ -82,13 +84,25 @@ pub fn sys_mmap(
82
84
// An example is the flags contained none of MAP_PRIVATE, MAP_SHARED, or MAP_SHARED_VALIDATE.
83
85
let map_flags = MmapFlags :: from_bits_truncate ( flags) ;
84
86
87
+ // The check uses bitwise operations to
88
+ // verify that exactly one of the two mutually exclusive mapping flags is set
89
+ if ( map_flags. bits ( ) & ( MAP_PRIVATE | MAP_SHARED ) ) . count_ones ( ) != 1 {
90
+ return Err ( LinuxError :: EINVAL ) ;
91
+ }
92
+
85
93
info ! (
86
94
"sys_mmap: addr: {:x?}, length: {:x?}, prot: {:?}, flags: {:?}, fd: {:?}, offset: {:?}" ,
87
95
addr, length, permission_flags, map_flags, fd, offset
88
96
) ;
89
97
90
- let start = memory_addr:: align_down_4k ( addr) ;
91
- let end = memory_addr:: align_up_4k ( addr + length) ;
98
+ let page_size = match ( flags & MAP_HUGETLB , flags & MAP_HUGE_MASK << MAP_HUGE_SHIFT ) {
99
+ ( 0 , _) => PageSize :: Size4K ,
100
+ ( _, MAP_HUGE_1GB ) => PageSize :: Size1G ,
101
+ ( _, _) => PageSize :: Size2M ,
102
+ } ;
103
+
104
+ let start = addr. align_down ( page_size) ;
105
+ let end = ( addr + length) . align_up ( page_size) ;
92
106
let aligned_length = end - start;
93
107
debug ! (
94
108
"start: {:x?}, end: {:x?}, aligned_length: {:x?}" ,
@@ -108,13 +122,15 @@ pub fn sys_mmap(
108
122
VirtAddr :: from ( start) ,
109
123
aligned_length,
110
124
VirtAddrRange :: new ( aspace. base ( ) , aspace. end ( ) ) ,
125
+ page_size,
111
126
)
112
127
. or ( aspace. find_free_area (
113
128
aspace. base ( ) ,
114
129
aligned_length,
115
130
VirtAddrRange :: new ( aspace. base ( ) , aspace. end ( ) ) ,
131
+ page_size,
116
132
) )
117
- . ok_or ( LinuxError :: ENOMEM ) ?
133
+ . ok_or ( LinuxError :: ENOMEM ) ?
118
134
} ;
119
135
120
136
let populate = if fd == -1 {
@@ -128,6 +144,7 @@ pub fn sys_mmap(
128
144
aligned_length,
129
145
permission_flags. into ( ) ,
130
146
populate,
147
+ page_size,
131
148
) ?;
132
149
133
150
if populate {
@@ -141,7 +158,7 @@ pub fn sys_mmap(
141
158
let length = core:: cmp:: min ( length, file_size - offset) ;
142
159
let mut buf = vec ! [ 0u8 ; length] ;
143
160
file. read_at ( offset as u64 , & mut buf) ?;
144
- aspace. write ( start_addr, & buf) ?;
161
+ aspace. write ( start_addr, page_size , & buf) ?;
145
162
}
146
163
Ok ( start_addr. as_usize ( ) as _ )
147
164
}
@@ -150,7 +167,6 @@ pub fn sys_munmap(addr: usize, length: usize) -> LinuxResult<isize> {
150
167
let curr = current ( ) ;
151
168
let process_data = curr. task_ext ( ) . process_data ( ) ;
152
169
let mut aspace = process_data. aspace . lock ( ) ;
153
- let length = memory_addr:: align_up_4k ( length) ;
154
170
let start_addr = VirtAddr :: from ( addr) ;
155
171
aspace. unmap ( start_addr, length) ?;
156
172
axhal:: arch:: flush_tlb ( None ) ;
@@ -171,7 +187,7 @@ pub fn sys_mprotect(addr: usize, length: usize, prot: u32) -> LinuxResult<isize>
171
187
let mut aspace = process_data. aspace . lock ( ) ;
172
188
let length = memory_addr:: align_up_4k ( length) ;
173
189
let start_addr = VirtAddr :: from ( addr) ;
174
- aspace. protect ( start_addr, length, permission_flags. into ( ) ) ?;
190
+ aspace. protect ( start_addr, length, permission_flags. into ( ) , PageSize :: Size4K ) ?;
175
191
176
192
Ok ( 0 )
177
193
}
0 commit comments