feature: MapMemory and UnmapMemory system calls

This commit is contained in:
Mark Poliakov 2021-11-29 16:57:21 +02:00
parent 3ed41501cb
commit 4c3374de36
10 changed files with 420 additions and 20 deletions

12
Cargo.lock generated
View File

@ -69,7 +69,7 @@ checksum = "99a40cabc11c8258822a593f5c51f2d9f4923e715ca9e2a0630cf77ae15f390b"
dependencies = [
"endian-type-rs",
"fallible-iterator",
"memoffset",
"memoffset 0.5.6",
"num-derive",
"num-traits",
"rustc_version",
@ -131,6 +131,7 @@ version = "0.1.0"
dependencies = [
"lazy_static",
"libsys",
"memoffset 0.6.4",
]
[[package]]
@ -151,6 +152,15 @@ dependencies = [
"autocfg",
]
[[package]]
name = "memoffset"
version = "0.6.4"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "59accc507f1338036a0477ef61afdae33cde60840f4dfe481319ce3ad116ddf9"
dependencies = [
"autocfg",
]
[[package]]
name = "num-derive"
version = "0.3.3"

View File

@ -264,6 +264,66 @@ impl Space {
Ok(())
}
pub fn allocate(
&mut self,
start: usize,
end: usize,
len: usize,
flags: MapAttributes,
usage: PageUsage,
) -> Result<usize, Errno> {
'l0: for page in (start..end).step_by(0x1000) {
for i in 0..len {
if self.translate(page + i * 0x1000).is_ok() {
continue 'l0;
}
}
for i in 0..len {
let phys = phys::alloc_page(usage).unwrap();
self.map(page + i * 0x1000, phys, flags).unwrap();
}
return Ok(page);
}
Err(Errno::OutOfMemory)
}
pub fn unmap_single(&mut self, page: usize) -> Result<(), Errno> {
let l0i = page >> 30;
let l1i = (page >> 21) & 0x1FF;
let l2i = (page >> 12) & 0x1FF;
let l1_table = self.0.next_level_table(l0i).ok_or(Errno::DoesNotExist)?;
let l2_table = l1_table.next_level_table(l1i).ok_or(Errno::DoesNotExist)?;
let entry = l2_table[l2i];
if !entry.is_present() {
return Err(Errno::DoesNotExist);
}
let phys = unsafe { entry.address_unchecked() };
unsafe {
phys::free_page(phys);
}
l2_table[l2i] = Entry::invalid();
unsafe {
asm!("tlbi vaae1, {}", in(reg) page);
}
// TODO release paging structure memory
Ok(())
}
pub fn free(&mut self, start: usize, len: usize) -> Result<(), Errno> {
for i in 0..len {
self.unmap_single(start + i * 0x1000)?;
}
Ok(())
}
/// Performs a copy of the address space, cloning data owned by it
pub fn fork(&mut self) -> Result<&'static mut Self, Errno> {
let res = Self::alloc_empty()?;
@ -288,10 +348,12 @@ impl Space {
todo!();
// res.map(virt_addr, dst_phys, flags)?;
} else {
let writable = flags & MapAttributes::AP_BOTH_READONLY == MapAttributes::AP_BOTH_READWRITE;
let writable = flags & MapAttributes::AP_BOTH_READONLY
== MapAttributes::AP_BOTH_READWRITE;
if writable {
flags |= MapAttributes::AP_BOTH_READONLY | MapAttributes::EX_COW;
flags |=
MapAttributes::AP_BOTH_READONLY | MapAttributes::EX_COW;
l2_table[l2i].set_cow();
unsafe {

View File

@ -92,9 +92,9 @@ impl Process {
}
#[inline]
pub fn manipulate_space<F>(&self, f: F) -> Result<(), Errno>
pub fn manipulate_space<R, F>(&self, f: F) -> R
where
F: FnOnce(&mut Space) -> Result<(), Errno>,
F: FnOnce(&mut Space) -> R,
{
f(self.inner.lock().space.as_mut().unwrap())
}
@ -250,6 +250,8 @@ impl Process {
}
}
// TODO when exiting from signal handler interrupting an IO operation
// deadlock is achieved
self.io.lock().handle_exit();
drop(lock);

View File

@ -1,6 +1,7 @@
//! System call implementation
use crate::arch::{machine, platform::exception::ExceptionFrame};
use crate::mem::{virt::MapAttributes, phys::PageUsage};
use crate::debug::Level;
use crate::dev::timer::TimestampSource;
use crate::fs::create_filesystem;
@ -13,7 +14,7 @@ use libsys::{
debug::TraceLevel,
error::Errno,
ioctl::IoctlCmd,
proc::{ExitCode, Pid},
proc::{ExitCode, Pid, MemoryAccess, MemoryMap},
signal::{Signal, SignalDestination},
stat::{
AccessMode, DirectoryEntry, FdSet, FileDescriptor, FileMode, GroupId, MountOptions,
@ -198,6 +199,56 @@ pub fn syscall(num: SystemCall, args: &[usize]) -> Result<usize, Errno> {
SystemCall::GetCurrentDirectory => {
todo!()
}
SystemCall::Seek => {
todo!()
}
SystemCall::MapMemory => {
let len = args[1];
if len == 0 || (len & 0xFFF) != 0 {
return Err(Errno::InvalidArgument);
}
let acc = MemoryAccess::from_bits(args[2] as u32).ok_or(Errno::InvalidArgument)?;
let flags = MemoryAccess::from_bits(args[3] as u32).ok_or(Errno::InvalidArgument)?;
let mut attrs = MapAttributes::NOT_GLOBAL | MapAttributes::SH_OUTER | MapAttributes::PXN;
if !acc.contains(MemoryAccess::READ) {
return Err(Errno::NotImplemented);
}
if acc.contains(MemoryAccess::WRITE) {
if acc.contains(MemoryAccess::EXEC) {
return Err(Errno::PermissionDenied);
}
attrs |= MapAttributes::AP_BOTH_READWRITE;
} else {
attrs |= MapAttributes::AP_BOTH_READONLY;
}
if !acc.contains(MemoryAccess::EXEC) {
attrs |= MapAttributes::UXN;
}
// TODO don't ignore flags
let usage = PageUsage::UserPrivate;
let proc = Process::current();
proc.manipulate_space(move |space| {
space.allocate(0x100000000, 0xF00000000, len / 4096, attrs, usage)
})
}
SystemCall::UnmapMemory => {
let addr = args[0];
let len = args[1];
if addr == 0 || len == 0 || addr & 0xFFF != 0 || len & 0xFFF != 0 {
return Err(Errno::InvalidArgument);
}
let proc = Process::current();
proc.manipulate_space(move |space| {
space.free(addr, len / 4096)
})?;
Ok(0)
}
// Process
SystemCall::Clone => {

View File

@ -20,6 +20,10 @@ pub enum SystemCall {
SetGroupId = 14,
SetCurrentDirectory = 15,
GetCurrentDirectory = 16,
Seek = 17,
MapMemory = 18,
UnmapMemory = 19,
// Process manipulation
Fork = 32,
Clone = 33,

View File

@ -3,7 +3,7 @@ use crate::{
debug::TraceLevel,
error::Errno,
ioctl::IoctlCmd,
proc::{ExitCode, Pid},
proc::{ExitCode, MemoryAccess, MemoryMap, Pid},
signal::{Signal, SignalDestination},
stat::{
AccessMode, DirectoryEntry, FdSet, FileDescriptor, FileMode, GroupId, MountOptions,
@ -451,3 +451,26 @@ pub fn sys_chdir(path: &str) -> Result<(), Errno> {
)
})
}
#[inline(always)]
pub fn sys_mmap(
hint: usize,
len: usize,
acc: MemoryAccess,
flags: MemoryMap,
) -> Result<usize, Errno> {
Errno::from_syscall(unsafe {
syscall!(
SystemCall::MapMemory,
argn!(hint),
argn!(len),
argn!(acc.bits()),
argn!(flags.bits())
)
})
}
#[inline(always)]
pub unsafe fn sys_munmap(addr: usize, len: usize) -> Result<(), Errno> {
Errno::from_syscall_unit(unsafe { syscall!(SystemCall::UnmapMemory, argn!(addr), argn!(len)) })
}

View File

@ -16,6 +16,24 @@ pub struct Pid(u32);
#[repr(transparent)]
pub struct Pgid(u32);
bitflags! {
pub struct MemoryAccess: u32 {
const READ = 1 << 0;
const WRITE = 1 << 1;
const EXEC = 1 << 2;
}
}
bitflags! {
pub struct MemoryMap: u32 {
const BACKEND = 0x3 << 0;
const ANONYMOUS = 1 << 0;
const SHARING = 0x3 << 2;
const PRIVATE = 1 << 2;
}
}
impl From<i32> for ExitCode {
fn from(f: i32) -> Self {
Self(f)

View File

@ -8,3 +8,4 @@ edition = "2021"
[dependencies]
libsys = { path = "../libsys", features = ["user"] }
lazy_static = { version = "^1.4.0", features = ["spin_no_std"] }
memoffset = "^0.6.4"

View File

@ -1,29 +1,251 @@
use core::alloc::{Layout, GlobalAlloc};
use core::alloc::{GlobalAlloc, Layout};
use core::mem::{size_of, MaybeUninit};
use core::ptr::null_mut;
use core::sync::atomic::{AtomicUsize, Ordering};
use libsys::{debug::TraceLevel, mem::memset};
use libsys::{
calls::{sys_mmap, sys_munmap},
debug::TraceLevel,
error::Errno,
mem::memset,
proc::{MemoryAccess, MemoryMap},
};
use memoffset::offset_of;
use crate::trace;
use crate::trace_debug;
struct Allocator;
static mut ALLOC_DATA: [u8; 65536] = [0; 65536];
static ALLOC_PTR: AtomicUsize = AtomicUsize::new(0);
const BLOCK_MAGIC: u32 = 0xBADB10C0;
const BLOCK_MAGIC_MASK: u32 = 0xFFFFFFF0;
const BLOCK_ALLOC: u32 = 1 << 0;
const SMALL_ZONE_ELEM: usize = 256;
const SMALL_ZONE_SIZE: usize = 6 * 0x1000;
const MID_ZONE_ELEM: usize = 2048;
const MID_ZONE_SIZE: usize = 24 * 0x1000;
const LARGE_ZONE_ELEM: usize = 8192;
const LARGE_ZONE_SIZE: usize = 48 * 0x1000;
struct ZoneList {
prev: *mut ZoneList,
next: *mut ZoneList,
}
#[repr(C)]
struct Zone {
size: usize,
list: ZoneList,
}
#[repr(C)]
struct Block {
prev: *mut Block,
next: *mut Block,
flags: u32,
size: u32,
}
static mut SMALL_ZONE_LIST: MaybeUninit<ZoneList> = MaybeUninit::uninit();
static mut MID_ZONE_LIST: MaybeUninit<ZoneList> = MaybeUninit::uninit();
static mut LARGE_ZONE_LIST: MaybeUninit<ZoneList> = MaybeUninit::uninit();
impl ZoneList {
fn init(&mut self) {
self.prev = self;
self.next = self;
}
unsafe fn init_uninit(list: &mut MaybeUninit<Self>) {
list.assume_init_mut().init()
}
fn add(&mut self, new: *mut ZoneList) {
let new = unsafe { &mut *new };
let next = unsafe { &mut *self.next };
next.prev = new;
new.next = next;
new.prev = self;
self.next = new;
}
fn del(&mut self) {
let prev = unsafe { &mut *self.prev };
let next = unsafe { &mut *self.next };
next.prev = prev;
prev.next = next;
}
}
impl Zone {
fn alloc(size: usize) -> Result<*mut Self, Errno> {
let pages = sys_mmap(
0,
size,
MemoryAccess::READ | MemoryAccess::WRITE,
MemoryMap::ANONYMOUS | MemoryMap::PRIVATE,
)?;
trace_debug!("Zone::alloc({}) => {:#x}", size, pages);
let zone_ptr = pages as *mut Zone;
let head_ptr = (pages + size_of::<Zone>()) as *mut Block;
let zone = unsafe { &mut *zone_ptr };
let head = unsafe { &mut *head_ptr };
zone.list.init();
zone.size = size - size_of::<Zone>();
head.size = (size - (size_of::<Zone>() + size_of::<Block>())) as u32;
head.flags = BLOCK_MAGIC;
head.prev = null_mut();
head.next = null_mut();
Ok(zone)
}
unsafe fn free(zone: *mut Self) {
trace_debug!("Zone::free({:p})", zone);
sys_munmap(zone as usize, (&*zone).size + size_of::<Zone>())
.expect("Failed to unmap heap pages");
}
fn get(item: *mut ZoneList) -> *mut Zone {
((item as usize) - offset_of!(Zone, list)) as *mut Zone
}
}
unsafe fn zone_alloc(zone: &mut Zone, size: usize) -> *mut u8 {
assert_eq!(size & 15, 0);
let mut begin = ((zone as *mut _ as usize) + size_of::<Zone>()) as *mut Block;
let mut block = begin;
while !block.is_null() {
let block_ref = &mut *block;
if block_ref.flags & BLOCK_ALLOC != 0 {
block = block_ref.next;
continue;
}
if size == block_ref.size as usize {
block_ref.flags |= BLOCK_ALLOC;
let ptr = block.add(1) as *mut u8;
// TODO fill with zeros
return ptr;
} else if block_ref.size as usize >= size + size_of::<Block>() {
let cur_next = block_ref.next;
let cur_next_ref = &mut *cur_next;
let new_block = ((block as usize) + size_of::<Block>() + size) as *mut Block;
let new_block_ref = &mut *new_block;
if !cur_next.is_null() {
cur_next_ref.prev = new_block;
}
new_block_ref.next = cur_next;
new_block_ref.prev = block;
new_block_ref.size = ((block_ref.size as usize) - size_of::<Block>() - size) as u32;
new_block_ref.flags = BLOCK_MAGIC;
block_ref.next = new_block;
block_ref.size = size as u32;
block_ref.flags |= BLOCK_ALLOC;
let ptr = block.add(1) as *mut u8;
// TODO fill with zeros
return ptr;
}
block = block_ref.next;
}
null_mut()
}
unsafe fn alloc_from(list: &mut ZoneList, zone_size: usize, size: usize) -> *mut u8 {
loop {
let mut zone = list.next;
while zone != list {
let ptr = zone_alloc(&mut *Zone::get(zone), size);
if !ptr.is_null() {
return ptr;
}
}
let zone = match Zone::alloc(zone_size) {
Ok(zone) => zone,
Err(e) => {
trace_debug!("Zone alloc failed: {:?}", e);
return null_mut();
}
};
list.add(&mut (&mut *zone).list);
}
}
unsafe impl GlobalAlloc for Allocator {
unsafe fn alloc(&self, layout: Layout) -> *mut u8 {
assert!(layout.align() < 16);
let res = ALLOC_PTR.fetch_add((layout.size() + 15) & !15, Ordering::SeqCst);
if res > 65536 {
panic!("Out of memory");
let size = (layout.size() + 15) & !15;
trace_debug!("alloc({:?})", layout);
if size <= SMALL_ZONE_ELEM {
alloc_from(SMALL_ZONE_LIST.assume_init_mut(), SMALL_ZONE_SIZE, size)
} else if size <= MID_ZONE_ELEM {
alloc_from(MID_ZONE_LIST.assume_init_mut(), MID_ZONE_SIZE, size)
} else if size <= LARGE_ZONE_ELEM {
alloc_from(LARGE_ZONE_LIST.assume_init_mut(), LARGE_ZONE_SIZE, size)
} else {
todo!();
}
trace!(TraceLevel::Debug, "alloc({:?}) = {:p}", layout, &ALLOC_DATA[res]);
let res = &mut ALLOC_DATA[res] as *mut _;
memset(res, 0, layout.size());
res
}
unsafe fn dealloc(&self, ptr: *mut u8, layout: Layout) {
trace!(TraceLevel::Debug, "free({:p}, {:?})", ptr, layout);
trace_debug!("free({:p}, {:?})", ptr, layout);
assert!(!ptr.is_null());
let mut block = ptr.sub(size_of::<Block>()) as *mut Block;
let mut block_ref = &mut *block;
if block_ref.flags & BLOCK_MAGIC_MASK != BLOCK_MAGIC {
panic!("Heap block is malformed: block={:p}, ptr={:p}", block, ptr);
}
if block_ref.flags & BLOCK_ALLOC == 0 {
panic!(
"Double free error in heap: block={:p}, ptr={:p}",
block, ptr
);
}
block_ref.flags &= !BLOCK_ALLOC;
let prev = block_ref.prev;
let next = block_ref.next;
let prev_ref = &mut *prev;
let next_ref = &mut *next;
if !prev.is_null() && prev_ref.flags & BLOCK_ALLOC == 0 {
block_ref.flags = 0;
prev_ref.next = next;
if !next.is_null() {
next_ref.prev = prev;
}
prev_ref.size += (block_ref.size as usize + size_of::<Block>()) as u32;
block = prev;
block_ref = &mut *block;
}
if !next.is_null() && next_ref.flags & BLOCK_ALLOC == 0 {
next_ref.flags = 0;
if !next_ref.next.is_null() {
(&mut *(next_ref.next)).prev = block;
}
block_ref.next = next_ref.next;
block_ref.size += (next_ref.size as usize + size_of::<Block>()) as u32;
}
if block_ref.prev.is_null() && block_ref.next.is_null() {
let zone = (block as usize - size_of::<Zone>()) as *mut Zone;
assert_eq!((zone as usize) & 0xFFF, 0);
(&mut *zone).list.del();
Zone::free(zone);
}
}
}
@ -34,3 +256,9 @@ fn alloc_error_handler(_layout: Layout) -> ! {
#[global_allocator]
static ALLOC: Allocator = Allocator;
pub unsafe fn init() {
ZoneList::init_uninit(&mut SMALL_ZONE_LIST);
ZoneList::init_uninit(&mut MID_ZONE_LIST);
ZoneList::init_uninit(&mut LARGE_ZONE_LIST);
}

View File

@ -27,6 +27,7 @@ extern "C" fn _start(arg: &'static ProgramArgs) -> ! {
}
unsafe {
allocator::init();
thread::init_main();
env::setup_env(arg);
}