mem/heap: use libyalloc instead of linked_list_allocator in kernel

This commit is contained in:
Mark Poliakov 2024-06-09 13:43:56 +03:00
parent 3383d0350c
commit 8eb5d2ecf1
18 changed files with 403 additions and 451 deletions

409
Cargo.lock generated

File diff suppressed because it is too large Load Diff

View File

@ -47,8 +47,6 @@ const_assert_eq!(KERNEL_L1_INDEX, 1);
// 2MiB max
const EARLY_MAPPING_L2I: usize = KERNEL_END_L2_INDEX + 1;
// 1GiB max
const HEAP_MAPPING_L1I: usize = KERNEL_L1_INDEX + 1;
// 1GiB max
const DEVICE_MAPPING_L1I: usize = KERNEL_L1_INDEX + 2;
const DEVICE_MAPPING_L3_COUNT: usize = 4;
// 16GiB max
@ -59,9 +57,6 @@ pub const RAM_MAPPING_L1_COUNT: usize = 16;
const EARLY_MAPPING_OFFSET: usize =
MAPPING_OFFSET | (KERNEL_L1_INDEX * L1::SIZE) | (EARLY_MAPPING_L2I * L2::SIZE);
static mut EARLY_MAPPING_L3: PageTable<L3> = PageTable::zeroed();
// 1GiB for heap mapping
pub const HEAP_MAPPING_OFFSET: usize = MAPPING_OFFSET | (HEAP_MAPPING_L1I * L1::SIZE);
pub static mut HEAP_MAPPING_L2: PageTable<L2> = PageTable::zeroed();
// 1GiB for device MMIO mapping
const DEVICE_MAPPING_OFFSET: usize = MAPPING_OFFSET | (DEVICE_MAPPING_L1I * L1::SIZE);
static mut DEVICE_MAPPING_L2: PageTable<L2> = PageTable::zeroed();
@ -232,18 +227,6 @@ pub unsafe fn map_ram_l1(index: usize) {
((index * L1::SIZE) as u64) | ram_block_flags().bits();
}
/// # Safety
///
/// Only meant to be used by the architecture initialization functions.
pub unsafe fn map_heap_l2(index: usize, page: PhysicalAddress) {
if index >= 512 {
todo!()
}
assert!(!HEAP_MAPPING_L2[index].is_present());
// TODO UXN, PXN
HEAP_MAPPING_L2[index] = PageEntry::normal_block(page, PageAttributes::empty());
}
// Device mappings
unsafe fn map_device_memory_l3(
base: PhysicalAddress,
@ -394,7 +377,6 @@ pub unsafe fn init_fixed_tables() {
// TODO this could be built in compile-time too?
let early_mapping_l3_phys = addr_of!(EARLY_MAPPING_L3) as usize - KERNEL_VIRT_OFFSET;
let device_mapping_l2_phys = addr_of!(DEVICE_MAPPING_L2) as usize - KERNEL_VIRT_OFFSET;
let heap_mapping_l2_phys = addr_of!(HEAP_MAPPING_L2) as usize - KERNEL_VIRT_OFFSET;
for i in 0..DEVICE_MAPPING_L3_COUNT {
let device_mapping_l3_phys = PhysicalAddress::from_raw(
@ -407,10 +389,6 @@ pub unsafe fn init_fixed_tables() {
KERNEL_TABLES.l2.data[EARLY_MAPPING_L2I] =
(early_mapping_l3_phys as u64) | kernel_table_flags().bits();
assert_eq!(KERNEL_TABLES.l1.data[HEAP_MAPPING_L1I], 0);
KERNEL_TABLES.l1.data[HEAP_MAPPING_L1I] =
(heap_mapping_l2_phys as u64) | kernel_table_flags().bits();
assert_eq!(KERNEL_TABLES.l1.data[DEVICE_MAPPING_L1I], 0);
KERNEL_TABLES.l1.data[DEVICE_MAPPING_L1I] =
(device_mapping_l2_phys as u64) | kernel_table_flags().bits();

View File

@ -44,7 +44,6 @@ const_assert_eq!(KERNEL_L1_INDEX, 0);
// Mapped at boot
const EARLY_MAPPING_L2I: usize = KERNEL_START_L2_INDEX - 1;
const HEAP_MAPPING_L1I: usize = KERNEL_L1_INDEX + 1;
const DEVICE_MAPPING_L1I: usize = KERNEL_L1_INDEX + 2;
const RAM_MAPPING_L0I: usize = KERNEL_L0_INDEX - 1;
@ -60,10 +59,6 @@ const EARLY_MAPPING_OFFSET: usize = CANONICAL_ADDRESS_MASK
| (KERNEL_L1_INDEX * L1::SIZE)
| (EARLY_MAPPING_L2I * L2::SIZE);
static mut EARLY_MAPPING_L3: PageTable<L3> = PageTable::zeroed();
// 1GiB for heap mapping
pub const HEAP_MAPPING_OFFSET: usize =
CANONICAL_ADDRESS_MASK | (KERNEL_L0_INDEX * L0::SIZE) | (HEAP_MAPPING_L1I * L1::SIZE);
pub(super) static mut HEAP_MAPPING_L2: PageTable<L2> = PageTable::zeroed();
// 1GiB for device MMIO mapping
const DEVICE_MAPPING_OFFSET: usize =
CANONICAL_ADDRESS_MASK | (KERNEL_L0_INDEX * L0::SIZE) | (DEVICE_MAPPING_L1I * L1::SIZE);
@ -269,23 +264,6 @@ unsafe fn unmap_device_memory(map: &RawDeviceMemoryMapping<KernelTableManagerImp
}
}
/// # Safety
///
/// Only meant to be called from memory initialization routines.
pub unsafe fn map_heap_block(index: usize, page: PhysicalAddress) {
if !page.is_page_aligned_for::<L2>() {
panic!("Attempted to map a misaligned 2MiB page");
}
assert!(index < 512);
if HEAP_MAPPING_L2[index].is_present() {
panic!("Page is already mappged: {:#x}", page);
}
// TODO NX
HEAP_MAPPING_L2[index] = PageEntry::<L2>::block(page, PageAttributes::WRITABLE);
}
/// Memory mapping which may be used for performing early kernel initialization
pub struct EarlyMapping<'a, T: ?Sized> {
value: &'a mut T,
@ -371,7 +349,7 @@ pub fn clone_kernel_tables(dst: &mut PageTable<L0>) {
/// * 0xFFFFFF8000000000 .. 0xFFFFFF8000200000 : ---
/// * 0xFFFFFF8000200000 .. 0xFFFFFF8000400000 : EARLY_MAPPING_L3
/// * 0xFFFFFF8000400000 .. ... : KERNEL_TABLES.kernel_l3s
/// * 0xFFFFFF8040000000 .. 0xFFFFFF8080000000 : HEAP_MAPPING_L2
/// * 0xFFFFFF8040000000 .. 0xFFFFFF8080000000 : ---
/// * 0xFFFFFF8080000000 .. 0xFFFFFF8100000000 : DEVICE_MAPPING_L2
/// * 0xFFFFFF8080000000 .. 0xFFFFFF8080800000 : DEVICE_MAPPING_L3S
/// * 0xFFFFFF8080800000 .. 0xFFFFFF8100000000 : ...
@ -383,7 +361,6 @@ pub unsafe fn init_fixed_tables() {
// TODO this could be built in compile-time too?
let early_mapping_l3_phys = addr_of!(EARLY_MAPPING_L3) as usize - KERNEL_VIRT_OFFSET;
let device_mapping_l2_phys = addr_of!(DEVICE_MAPPING_L2) as usize - KERNEL_VIRT_OFFSET;
let heap_mapping_l2_phys = addr_of!(HEAP_MAPPING_L2) as usize - KERNEL_VIRT_OFFSET;
let ram_mapping_l1_phys = addr_of!(RAM_MAPPING_L1) as usize - KERNEL_VIRT_OFFSET;
for i in 0..DEVICE_MAPPING_L3_COUNT {
@ -397,9 +374,6 @@ pub unsafe fn init_fixed_tables() {
KERNEL_TABLES.kernel_l2.data[EARLY_MAPPING_L2I] = (early_mapping_l3_phys as u64)
| (PageAttributes::WRITABLE | PageAttributes::PRESENT).bits();
assert_eq!(KERNEL_TABLES.kernel_l1.data[HEAP_MAPPING_L1I], 0);
KERNEL_TABLES.kernel_l1.data[HEAP_MAPPING_L1I] =
(heap_mapping_l2_phys as u64) | (PageAttributes::WRITABLE | PageAttributes::PRESENT).bits();
assert_eq!(KERNEL_TABLES.kernel_l1.data[DEVICE_MAPPING_L1I], 0);
KERNEL_TABLES.kernel_l1.data[DEVICE_MAPPING_L1I] = (device_mapping_l2_phys as u64)
| (PageAttributes::WRITABLE | PageAttributes::PRESENT).bits();

View File

@ -12,5 +12,6 @@ libk-util = { path = "../libk-util" }
libk-mm-interface = { path = "interface" }
vmalloc = { path = "../../lib/vmalloc" }
linked_list_allocator = "0.10.5"
libyalloc = { path = "../../../lib/libyalloc", default-features = false, features = ["dep-of-kernel"] }
log = "0.4.20"

View File

@ -1,41 +1,58 @@
//! Kernel's global heap allocator
use core::{
alloc::{GlobalAlloc, Layout},
ops::Range,
ptr::{null_mut, NonNull},
};
use kernel_arch::KernelTableManagerImpl;
use libk_mm_interface::address::PhysicalAddress;
use libk_util::sync::IrqSafeSpinlock;
use linked_list_allocator::Heap;
use libyalloc::{allocator::BucketAllocator, sys::PageProvider};
use crate::{address::Virtualize, phys, L3_PAGE_SIZE};
// TODO limits?
struct KernelPageProvider;
impl PageProvider for KernelPageProvider {
fn unmap_pages(address: NonNull<u8>, count: usize) {
log::trace!("Release {}K of heap", count * 4);
let phys = PhysicalAddress::from_virtualized(address.as_ptr().addr());
for i in 0..count {
unsafe {
phys::free_page(phys.add(i * L3_PAGE_SIZE));
}
}
}
fn map_pages(count: usize) -> Option<NonNull<u8>> {
log::trace!("Grow heap by {}K", count * 4);
let phys = phys::alloc_pages_contiguous(count).ok()?;
let virt = phys.raw_virtualize::<KernelTableManagerImpl>();
Some(unsafe { NonNull::new_unchecked(virt as *mut u8) })
}
}
/// Kernel heap manager
pub struct KernelAllocator {
inner: IrqSafeSpinlock<Heap>,
inner: IrqSafeSpinlock<BucketAllocator<KernelPageProvider>>,
}
impl KernelAllocator {
const fn empty() -> Self {
Self {
inner: IrqSafeSpinlock::new(Heap::empty()),
inner: IrqSafeSpinlock::new(BucketAllocator::new()),
}
}
unsafe fn init(&self, base: usize, size: usize) {
self.inner.lock().init(base as _, size);
}
fn range(&self) -> Range<usize> {
let lock = self.inner.lock();
lock.bottom() as usize..lock.top() as usize
}
}
unsafe impl GlobalAlloc for KernelAllocator {
unsafe fn alloc(&self, layout: Layout) -> *mut u8 {
match self.inner.lock().allocate_first_fit(layout) {
Ok(v) => v.as_ptr(),
Err(e) => {
log::error!("Failed to allocate {:?}: {:?}", layout, e);
match self.inner.lock().allocate(layout) {
Some(p) => p.as_ptr(),
None => {
log::error!("Failed to allocate {:?}", layout);
null_mut()
}
}
@ -43,24 +60,10 @@ unsafe impl GlobalAlloc for KernelAllocator {
unsafe fn dealloc(&self, ptr: *mut u8, layout: Layout) {
let ptr = NonNull::new(ptr).unwrap();
self.inner.lock().deallocate(ptr, layout)
self.inner.lock().free(ptr, layout);
}
}
/// Kernel's global allocator
#[global_allocator]
pub static GLOBAL_HEAP: KernelAllocator = KernelAllocator::empty();
/// Sets up kernel's global heap with given memory range.
///
/// # Safety
///
/// The caller must ensure the range is valid and mapped virtual memory.
pub unsafe fn init_heap(heap_base: usize, heap_size: usize) {
GLOBAL_HEAP.init(heap_base, heap_size);
}
/// Returns the heap address range
pub fn heap_range() -> Range<usize> {
GLOBAL_HEAP.range()
}

View File

@ -48,12 +48,6 @@ dependencies = [
"syn 1.0.109",
]
[[package]]
name = "autocfg"
version = "1.1.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "d468802bab17cbc0cc575e9b053f41e72aa36bfa6b7f55e3529ffa43161b97fa"
[[package]]
name = "bitflags"
version = "2.5.0"
@ -90,6 +84,12 @@ dependencies = [
"syn 2.0.53",
]
[[package]]
name = "cc"
version = "1.0.99"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "96c51067fd44124faa7f870b4b1c969379ad32b2ba805aa959430ceaa384f695"
[[package]]
name = "cfg-if"
version = "1.0.0"
@ -255,6 +255,12 @@ dependencies = [
"yggdrasil-abi",
]
[[package]]
name = "libc"
version = "0.2.155"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "97b3888a4aecf77e811145cadf6eef5901f4782c53886191b2f693f24761847c"
[[package]]
name = "libk"
version = "0.1.0"
@ -294,7 +300,7 @@ dependencies = [
"kernel-arch",
"libk-mm-interface",
"libk-util",
"linked_list_allocator",
"libyalloc",
"log",
"vmalloc",
"yggdrasil-abi",
@ -323,22 +329,11 @@ dependencies = [
]
[[package]]
name = "linked_list_allocator"
version = "0.10.5"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "9afa463f5405ee81cdb9cc2baf37e08ec7e4c8209442b5d72c04cfb2cd6e6286"
name = "libyalloc"
version = "0.1.0"
dependencies = [
"spinning_top",
]
[[package]]
name = "lock_api"
version = "0.4.11"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "3c168f8615b12bc01f9c17e2eb0cc07dcae1940121185446edc3744920e8ef45"
dependencies = [
"autocfg",
"scopeguard",
"libc",
"yggdrasil-rt",
]
[[package]]
@ -416,12 +411,6 @@ version = "1.0.17"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "e86697c916019a8588c99b5fac3cead74ec0b4b819707a682fd4d23fa0ce1ba1"
[[package]]
name = "scopeguard"
version = "1.2.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "94143f37725109f92c262ed2cf5e59bce7498c01bcc1502d7b9afe439a4e9f49"
[[package]]
name = "serde"
version = "1.0.197"
@ -453,15 +442,6 @@ dependencies = [
"serde",
]
[[package]]
name = "spinning_top"
version = "0.2.5"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "5b9eb1a2f4c41445a3a0ff9abc5221c5fcd28e1f13cd7c0397706f9ac938ddb0"
dependencies = [
"lock_api",
]
[[package]]
name = "static_assertions"
version = "1.1.0"
@ -554,6 +534,17 @@ dependencies = [
"serde",
]
[[package]]
name = "yggdrasil-rt"
version = "0.1.0"
dependencies = [
"abi-generator",
"abi-lib",
"cc",
"prettyplease",
"yggdrasil-abi",
]
[[package]]
name = "zerocopy"
version = "0.7.32"

View File

@ -12,19 +12,18 @@ use device_tree::dt::{DevTreeIndexPropExt, DevTreeNodeInfo, DeviceTree, FdtMemor
use git_version::git_version;
use kernel_arch_aarch64::{
mem::{
table::{L1, L2, L3},
EarlyMapping, HEAP_MAPPING_OFFSET, MEMORY_LIMIT, RAM_MAPPING_L1_COUNT,
table::{L1, L3},
EarlyMapping, MEMORY_LIMIT, RAM_MAPPING_L1_COUNT,
},
ArchitectureImpl, PerCpuData,
};
use libk::{arch::Cpu, device::external_interrupt_controller};
use libk_mm::{
address::{FromRaw, IntoRaw, PhysicalAddress},
heap,
phys::PhysicalMemoryRegion,
phys::{self, reserved::reserve_region},
pointer::PhysicalRef,
table::{EntryLevel, EntryLevelExt},
table::EntryLevelExt,
};
use libk_util::OneTimeInit;
use tock_registers::interfaces::Writeable;
@ -151,9 +150,6 @@ impl AArch64 {
}
unsafe fn init_memory_management(&'static self, dtb: PhysicalAddress) -> Result<(), Error> {
// 16x2MiB
const HEAP_PAGES: usize = 16;
// Initialize the runtime mappings
kernel_arch_aarch64::mem::init_fixed_tables();
@ -197,14 +193,6 @@ impl AArch64 {
phys::init_from_iter(regions, Self::map_physical_memory)?;
// Setup the heap
for i in 0..HEAP_PAGES {
let l2_page = phys::alloc_2m_page()?;
kernel_arch_aarch64::mem::map_heap_l2(i, l2_page);
}
heap::init_heap(HEAP_MAPPING_OFFSET, HEAP_PAGES * L2::SIZE);
// EarlyMapping for DTB no longer needed, it lives in physical memory and can be obtained
// through PhysicalRef
let dtb_slice: PhysicalRef<'static, [u8]> = PhysicalRef::map_slice(dtb, dtb_size);

View File

@ -8,9 +8,9 @@ use device_api::{interrupt::Irq, Device};
use git_version::git_version;
use kernel_arch_x86_64::{
mem::{
init_fixed_tables, map_heap_block,
init_fixed_tables,
table::{PageAttributes, PageEntry, PageTable, L1, L2, L3},
EarlyMapping, HEAP_MAPPING_OFFSET, MEMORY_LIMIT, RAM_MAPPING_L1,
EarlyMapping, MEMORY_LIMIT, RAM_MAPPING_L1,
},
PerCpuData,
};
@ -19,7 +19,6 @@ use libk::{arch::Cpu, device::register_external_interrupt_controller};
use libk_device::register_monotonic_timestamp_provider;
use libk_mm::{
address::{FromRaw, IntoRaw, PhysicalAddress, Virtualize},
heap,
phys::{self, reserved::reserve_region, PhysicalMemoryRegion},
table::{EntryLevel, EntryLevelExt},
};
@ -241,8 +240,6 @@ impl X86_64 {
}
unsafe fn init_memory_management(&self) -> Result<(), Error> {
const HEAP_PAGES: usize = 16;
init_fixed_tables();
// Reserve lower 4MiB just in case
@ -258,16 +255,6 @@ impl X86_64 {
&BootData::YBoot(data) => Self::init_physical_memory_from_yboot(data)?,
}
// Setup heap
for i in 0..HEAP_PAGES {
// Allocate in 2MiB chunks
let l2_page = phys::alloc_2m_page()?;
map_heap_block(i, l2_page);
}
heap::init_heap(HEAP_MAPPING_OFFSET, HEAP_PAGES * L2::SIZE);
Ok(())
}

View File

@ -41,7 +41,6 @@
use arch::Platform;
use kernel_arch::{Architecture, ArchitectureImpl};
use libk::arch::Cpu;
use libk_mm::heap;
use libk_util::sync::SpinFence;
use crate::{arch::PLATFORM, fs::sysfs, task::spawn_kernel_closure};
@ -93,8 +92,6 @@ pub fn kernel_secondary_main() -> ! {
pub fn kernel_main() -> ! {
libk::panic::set_handler(panic::panic_handler);
debugln!("Heap: {:#x?}", heap::heap_range());
// Setup the sysfs
sysfs::init();
fs::add_pseudo_devices().unwrap();

View File

View File

@ -17,7 +17,9 @@ libc = { version = "0.2.140", default-features = false }
yggdrasil-rt = { path = "../runtime", default-features = false }
[features]
default = []
default = ["global"]
global = []
dep-of-kernel = []
rustc-dep-of-std = [
"core",
"compiler_builtins",

View File

@ -2,33 +2,34 @@ use core::{alloc::Layout, ops::Index, ptr::NonNull};
use crate::{
bucket::Bucket,
util::{self, Assert, IsTrue},
sys::{self, PageProvider},
util::{Assert, IsTrue},
};
struct BucketList<const N: usize, const M: usize>
struct BucketList<P: PageProvider, const N: usize, const M: usize>
where
[u64; M / 64]: Sized,
Assert<{ M % 64 == 0 }>: IsTrue,
{
head: Option<NonNull<Bucket<N, M>>>,
head: Option<NonNull<Bucket<P, N, M>>>,
}
pub struct BucketAllocator {
pub struct BucketAllocator<P: PageProvider> {
// 1024x64 = 16 pages
buckets_1024: BucketList<1024, 64>,
buckets_1024: BucketList<P, 1024, 64>,
// 512x64 = 8 pages
buckets_512: BucketList<512, 64>,
buckets_512: BucketList<P, 512, 64>,
// 256x128 = 8 pages
buckets_256: BucketList<256, 128>,
buckets_256: BucketList<P, 256, 128>,
// 128x128 = 4 pages
buckets_128: BucketList<128, 128>,
buckets_128: BucketList<P, 128, 128>,
// 64x128 = 2 pages
buckets_64: BucketList<128, 128>,
buckets_64: BucketList<P, 128, 128>,
// 32x256 = 2 pages
buckets_32: BucketList<128, 128>,
buckets_32: BucketList<P, 128, 128>,
}
impl<const N: usize, const M: usize> BucketList<N, M>
impl<P: PageProvider, const N: usize, const M: usize> BucketList<P, N, M>
where
[u64; M / 64]: Sized,
Assert<{ M % 64 == 0 }>: IsTrue,
@ -75,12 +76,12 @@ where
}
}
impl<const N: usize, const M: usize> Index<usize> for BucketList<N, M>
impl<P: PageProvider, const N: usize, const M: usize> Index<usize> for BucketList<P, N, M>
where
[u64; M / 64]: Sized,
Assert<{ M % 64 == 0 }>: IsTrue,
{
type Output = Bucket<N, M>;
type Output = Bucket<P, N, M>;
fn index(&self, index: usize) -> &Self::Output {
let mut current = 0;
@ -100,7 +101,7 @@ where
}
}
impl BucketAllocator {
impl<P: PageProvider> BucketAllocator<P> {
pub const fn new() -> Self {
Self {
buckets_1024: BucketList::new(),
@ -116,14 +117,13 @@ impl BucketAllocator {
let aligned = layout.pad_to_align();
match aligned.size() {
0 => todo!(),
..=32 => self.buckets_32.allocate(),
..=64 => self.buckets_64.allocate(),
..=128 => self.buckets_128.allocate(),
..=256 => self.buckets_256.allocate(),
..=512 => self.buckets_512.allocate(),
..=1024 => self.buckets_1024.allocate(),
size => util::map_pages((size + util::PAGE_SIZE - 1) / util::PAGE_SIZE),
size => P::map_pages((size + sys::PAGE_SIZE - 1) / sys::PAGE_SIZE),
}
}
@ -131,7 +131,6 @@ impl BucketAllocator {
let aligned = layout.pad_to_align();
match aligned.size() {
0 => todo!(),
..=32 => self.buckets_32.free(ptr),
..=64 => self.buckets_64.free(ptr),
..=128 => self.buckets_128.free(ptr),
@ -139,8 +138,8 @@ impl BucketAllocator {
..=512 => self.buckets_512.free(ptr),
..=1024 => self.buckets_1024.free(ptr),
size => {
assert_eq!(usize::from(ptr.addr()) % util::PAGE_SIZE, 0);
util::unmap_pages(ptr, (size + util::PAGE_SIZE - 1) / util::PAGE_SIZE);
assert_eq!(usize::from(ptr.addr()) % sys::PAGE_SIZE, 0);
P::unmap_pages(ptr, (size + sys::PAGE_SIZE - 1) / sys::PAGE_SIZE);
}
}
}
@ -151,10 +150,11 @@ mod tests {
use core::{alloc::Layout, ptr::NonNull};
use super::{BucketAllocator, BucketList};
use crate::sys::OsPageProvider;
#[test]
fn single_list_allocation() {
let mut list = BucketList::<32, 64>::new();
let mut list = BucketList::<OsPageProvider, 32, 64>::new();
let mut vec = vec![];
for _ in 0..4 * 64 + 3 {
@ -173,7 +173,7 @@ mod tests {
fn multi_list_allocation() {
const SIZES: &[usize] = &[1, 3, 7, 15, 16, 24, 33, 65, 126, 255, 500, 1000];
let mut allocator = BucketAllocator::new();
let mut allocator = BucketAllocator::<OsPageProvider>::new();
let mut vec = vec![];
for _ in 0..65 {
@ -199,7 +199,7 @@ mod tests {
#[test]
#[should_panic]
fn double_free() {
let mut allocator = BucketAllocator::new();
let mut allocator = BucketAllocator::<OsPageProvider>::new();
let layout = Layout::from_size_align(63, 32).unwrap();
let ptr = allocator.allocate(layout).unwrap();
@ -213,7 +213,7 @@ mod tests {
fn large_alloc() {
const SIZES: &[usize] = &[2000, 2048, 4000, 4096, 8192];
let mut allocator = BucketAllocator::new();
let mut allocator = BucketAllocator::<OsPageProvider>::new();
let mut vec = vec![];
for &size in SIZES {

View File

@ -3,9 +3,12 @@ use core::{
ptr::NonNull,
};
use crate::util::{self, Assert, IsTrue, NonNullExt};
use crate::{
sys::PageProvider,
util::{Assert, IsTrue, NonNullExt},
};
pub struct Bucket<const N: usize, const M: usize>
pub struct Bucket<P: PageProvider, const N: usize, const M: usize>
where
[u64; M / 64]: Sized,
Assert<{ M % 64 == 0 }>: IsTrue,
@ -13,10 +16,10 @@ where
pub(crate) data: NonNull<u8>,
bitmap: [u64; M / 64],
allocated_count: usize,
pub(crate) next: Option<NonNull<Bucket<N, M>>>,
pub(crate) next: Option<NonNull<Bucket<P, N, M>>>,
}
impl<const N: usize, const M: usize> Bucket<N, M>
impl<P: PageProvider, const N: usize, const M: usize> Bucket<P, N, M>
where
[u64; M / 64]: Sized,
Assert<{ M % 64 == 0 }>: IsTrue,
@ -25,8 +28,8 @@ where
let data_page_count = (M * N + 0xFFF) / 0x1000;
let info_page_count = (size_of::<Self>() + 0xFFF) / 0x1000;
let data = util::map_pages(data_page_count)?;
let info = util::map_pages(info_page_count)?;
let data = P::map_pages(data_page_count)?;
let info = P::map_pages(info_page_count)?;
let bucket = unsafe { info.cast::<MaybeUninit<Self>>().as_mut() };
let bucket = bucket.write(Self {
@ -85,11 +88,27 @@ where
mod tests {
use core::ptr::NonNull;
use crate::{bucket::Bucket, util::NonNullExt};
use crate::{
bucket::Bucket,
sys::{self, PageProvider},
util::NonNullExt,
};
pub struct DummyPageProvider;
impl PageProvider for DummyPageProvider {
fn map_pages(count: usize) -> Option<NonNull<u8>> {
let v = vec![0u8; count * sys::PAGE_SIZE];
let p = NonNull::from(&mut v.leak()[0]);
Some(p)
}
fn unmap_pages(_address: NonNull<u8>, _count: usize) {}
}
#[test]
fn bucket_creation() {
let mut bucket = Bucket::<32, 64>::new().unwrap();
let mut bucket = Bucket::<DummyPageProvider, 32, 64>::new().unwrap();
let bucket = unsafe { bucket.as_mut() };
assert_eq!(bucket.allocated_count, 0);
assert_eq!(bucket.next, None);
@ -97,7 +116,7 @@ mod tests {
#[test]
fn bucket_allocation() {
let mut bucket = Bucket::<32, 64>::new().unwrap();
let mut bucket = Bucket::<DummyPageProvider, 32, 64>::new().unwrap();
let bucket = unsafe { bucket.as_mut() };
let mut vec = vec![];
@ -132,8 +151,8 @@ mod tests {
#[test]
fn free_outside_of_bucket() {
let mut bucket0 = Bucket::<32, 64>::new().unwrap();
let mut bucket1 = Bucket::<64, 64>::new().unwrap();
let mut bucket0 = Bucket::<DummyPageProvider, 32, 64>::new().unwrap();
let mut bucket1 = Bucket::<DummyPageProvider, 64, 64>::new().unwrap();
let bucket0 = unsafe { bucket0.as_mut() };
let bucket1 = unsafe { bucket1.as_mut() };
@ -150,7 +169,7 @@ mod tests {
#[test]
#[should_panic]
fn double_free() {
let mut bucket0 = Bucket::<32, 64>::new().unwrap();
let mut bucket0 = Bucket::<DummyPageProvider, 32, 64>::new().unwrap();
let bucket0 = unsafe { bucket0.as_mut() };
let ptr = bucket0.allocate().unwrap();

View File

@ -3,7 +3,7 @@ use core::{
ptr::{null_mut, NonNull},
};
use crate::{allocator::BucketAllocator, util::Spinlock};
use crate::{allocator::BucketAllocator, sys::OsPageProvider, util::Spinlock};
pub struct GlobalAllocator;
@ -37,4 +37,5 @@ unsafe impl Allocator for GlobalAllocator {
}
}
static GLOBAL_ALLOCATOR: Spinlock<BucketAllocator> = Spinlock::new(BucketAllocator::new());
static GLOBAL_ALLOCATOR: Spinlock<BucketAllocator<OsPageProvider>> =
Spinlock::new(BucketAllocator::new());

View File

@ -8,7 +8,7 @@
allocator_api
)]
#![cfg_attr(not(test), no_std)]
#![allow(incomplete_features)]
#![allow(incomplete_features, unexpected_cfgs)]
#![deny(fuzzy_provenance_casts, lossy_provenance_casts)]
#[cfg(test)]
@ -16,5 +16,8 @@ extern crate test;
pub mod allocator;
mod bucket;
pub mod global;
pub mod sys;
mod util;
#[cfg(any(feature = "global", rust_analyzer))]
pub mod global;

57
lib/libyalloc/src/sys.rs Normal file
View File

@ -0,0 +1,57 @@
use core::ptr::NonNull;
pub const PAGE_SIZE: usize = 0x1000;
pub trait PageProvider {
fn map_pages(count: usize) -> Option<NonNull<u8>>;
fn unmap_pages(address: NonNull<u8>, count: usize);
}
#[cfg(not(feature = "dep-of-kernel"))]
pub struct OsPageProvider;
#[cfg(any(all(unix, not(feature = "dep-of-kernel")), rust_analyzer))]
impl PageProvider for OsPageProvider {
fn map_pages(count: usize) -> Option<NonNull<u8>> {
use core::ptr::null_mut;
let address = unsafe {
libc::mmap(
null_mut(),
count * PAGE_SIZE,
libc::PROT_READ | libc::PROT_WRITE,
libc::MAP_ANONYMOUS | libc::MAP_PRIVATE,
-1,
0,
)
};
NonNull::new(address as *mut u8)
}
fn unmap_pages(address: NonNull<u8>, count: usize) {
unsafe {
libc::munmap(address.as_ptr() as _, count * PAGE_SIZE);
}
}
}
#[cfg(any(all(not(unix), not(feature = "dep-of-kernel")), rust_analyzer))]
impl PageProvider for OsPageProvider {
fn map_pages(count: usize) -> Option<NonNull<u8>> {
use yggdrasil_rt::mem::MappingSource;
let address = unsafe {
yggdrasil_rt::sys::map_memory(None, count * PAGE_SIZE, &MappingSource::Anonymous)
}
.ok()?;
NonNull::new(core::ptr::from_exposed_addr_mut(address))
}
fn unmap_pages(address: NonNull<u8>, count: usize) {
unsafe {
yggdrasil_rt::sys::unmap_memory(address.addr().into(), count * PAGE_SIZE).unwrap();
}
}
}

View File

@ -1,12 +1,12 @@
use core::ptr::NonNull;
#[cfg(any(not(feature = "dep-of-kernel"), rust_analyzer))]
use core::{
cell::UnsafeCell,
ops::{Deref, DerefMut},
ptr::NonNull,
sync::atomic::{AtomicBool, Ordering},
};
pub const PAGE_SIZE: usize = 0x1000;
pub enum Assert<const T: bool> {}
pub trait IsTrue {}
impl IsTrue for Assert<true> {}
@ -21,60 +21,18 @@ impl<T> NonNullExt<T> for NonNull<T> {
}
}
#[cfg(any(not(feature = "dep-of-kernel"), rust_analyzer))]
pub struct Spinlock<T: ?Sized> {
state: AtomicBool,
data: UnsafeCell<T>,
}
#[cfg(any(not(feature = "dep-of-kernel"), rust_analyzer))]
pub struct SpinlockGuard<'a, T: ?Sized> {
lock: &'a Spinlock<T>,
}
pub fn map_pages(count: usize) -> Option<NonNull<u8>> {
#[cfg(unix)]
{
use core::ptr::null_mut;
let address = unsafe {
libc::mmap(
null_mut(),
count * PAGE_SIZE,
libc::PROT_READ | libc::PROT_WRITE,
libc::MAP_ANONYMOUS | libc::MAP_PRIVATE,
-1,
0,
)
};
NonNull::new(address as *mut u8)
}
#[cfg(not(unix))]
{
use yggdrasil_rt::mem::MappingSource;
let address = unsafe {
yggdrasil_rt::sys::map_memory(None, count * PAGE_SIZE, &MappingSource::Anonymous)
}
.ok()?;
NonNull::new(core::ptr::from_exposed_addr_mut(address))
}
}
pub fn unmap_pages(address: NonNull<u8>, count: usize) {
#[cfg(unix)]
unsafe {
libc::munmap(address.as_ptr() as _, count * PAGE_SIZE);
}
#[cfg(not(unix))]
{
unsafe {
yggdrasil_rt::sys::unmap_memory(address.addr().into(), count * PAGE_SIZE).unwrap();
}
}
}
#[cfg(any(not(feature = "dep-of-kernel"), rust_analyzer))]
impl<T: ?Sized> Spinlock<T> {
pub const fn new(value: T) -> Self
where
@ -100,6 +58,7 @@ impl<T: ?Sized> Spinlock<T> {
}
}
#[cfg(any(not(feature = "dep-of-kernel"), rust_analyzer))]
impl<'a, T: ?Sized> Deref for SpinlockGuard<'a, T> {
type Target = T;
@ -108,16 +67,19 @@ impl<'a, T: ?Sized> Deref for SpinlockGuard<'a, T> {
}
}
#[cfg(any(not(feature = "dep-of-kernel"), rust_analyzer))]
impl<'a, T: ?Sized> DerefMut for SpinlockGuard<'a, T> {
fn deref_mut(&mut self) -> &mut Self::Target {
unsafe { &mut *self.lock.data.get() }
}
}
#[cfg(any(not(feature = "dep-of-kernel"), rust_analyzer))]
impl<'a, T: ?Sized> Drop for SpinlockGuard<'a, T> {
fn drop(&mut self) {
self.lock.state.store(false, Ordering::Release);
}
}
#[cfg(any(not(feature = "dep-of-kernel"), rust_analyzer))]
unsafe impl<T: ?Sized> Sync for Spinlock<T> {}

View File

@ -57,7 +57,7 @@ fn build_uefi_image(
"if=/dev/zero",
&format!("of={}", image_path.display()),
"bs=1M",
"count=256",
"count=512",
],
false,
)?;