mman: rewrite physical memory manager
Better protection of reserved memory regions, merged "pool" allocation functions into main memory manager
This commit is contained in:
parent
76ee873cb4
commit
e0f0087ccc
@ -27,9 +27,9 @@ static int elf_map_section(mm_space_t space, uintptr_t vma_dst, size_t size) {
|
||||
// TODO: access flags (e.g. is section writable?)
|
||||
if ((page_phys = mm_map_get(space, page_aligned + i * MM_PAGE_SIZE, NULL)) == MM_NADDR) {
|
||||
// Allocation needed
|
||||
assert((page_phys = mm_phys_alloc_page()) != MM_NADDR,
|
||||
assert((page_phys = mm_phys_alloc_page(PU_PRIVATE)) != MM_NADDR,
|
||||
"Failed to allocate memory\n");
|
||||
assert(mm_map_single(space, page_aligned + i * MM_PAGE_SIZE, page_phys, MM_PAGE_USER | MM_PAGE_WRITE, PU_PRIVATE) == 0,
|
||||
assert(mm_map_single(space, page_aligned + i * MM_PAGE_SIZE, page_phys, MM_PAGE_USER | MM_PAGE_WRITE) == 0,
|
||||
"Failed to map memory\n");
|
||||
}
|
||||
}
|
||||
|
@ -81,15 +81,15 @@ int do_pfault(struct amd64_exception_frame *frame, uintptr_t cr2, uintptr_t cr3)
|
||||
|
||||
if (page->refcount == 2) {
|
||||
//kdebug("[%d] Cloning page @ %p\n", proc->pid, cr2 & MM_PAGE_MASK);
|
||||
uintptr_t new_phys = mm_phys_alloc_page();
|
||||
uintptr_t new_phys = mm_phys_alloc_page(PU_PRIVATE);
|
||||
_assert(new_phys != MM_NADDR);
|
||||
memcpy((void *) MM_VIRTUALIZE(new_phys), (const void *) MM_VIRTUALIZE(phys), MM_PAGE_SIZE);
|
||||
_assert(mm_umap_single(space, cr2 & MM_PAGE_MASK, 1) == phys);
|
||||
_assert(mm_map_single(space, cr2 & MM_PAGE_MASK, new_phys, MM_PAGE_USER | MM_PAGE_WRITE, PU_PRIVATE) == 0);
|
||||
_assert(mm_map_single(space, cr2 & MM_PAGE_MASK, new_phys, MM_PAGE_USER | MM_PAGE_WRITE) == 0);
|
||||
} else if (page->refcount == 1) {
|
||||
//kdebug("[%d] Only one referring to %p now, claiming ownership\n", proc->pid, cr2 & MM_PAGE_MASK);
|
||||
_assert(mm_umap_single(space, cr2 & MM_PAGE_MASK, 1) == phys);
|
||||
_assert(mm_map_single(space, cr2 & MM_PAGE_MASK, phys, MM_PAGE_USER | MM_PAGE_WRITE, PU_PRIVATE) == 0);
|
||||
_assert(mm_map_single(space, cr2 & MM_PAGE_MASK, phys, MM_PAGE_USER | MM_PAGE_WRITE) == 0);
|
||||
} else {
|
||||
//kdebug("Page refcount == %d\n", page->refcount);
|
||||
panic("???\n");
|
||||
|
@ -14,6 +14,7 @@
|
||||
#include "arch/amd64/mm/mm.h"
|
||||
#include "arch/amd64/fpu.h"
|
||||
#include "sys/block/ram.h"
|
||||
#include "sys/mem/phys.h"
|
||||
#include "sys/console.h"
|
||||
#include "sys/config.h"
|
||||
#include "sys/kernel.h"
|
||||
@ -26,12 +27,27 @@
|
||||
#include "sys/elf.h"
|
||||
#include "sys/mm.h"
|
||||
|
||||
extern char _kernel_start, _kernel_end;
|
||||
|
||||
static uintptr_t multiboot_info_addr;
|
||||
static struct multiboot_tag_mmap *multiboot_tag_mmap;
|
||||
static struct multiboot_tag_module *multiboot_tag_initrd_module;
|
||||
static struct multiboot_tag_elf_sections *multiboot_tag_sections;
|
||||
static struct multiboot_tag_string *multiboot_tag_cmdline;
|
||||
static struct multiboot_tag_framebuffer *multiboot_tag_framebuffer;
|
||||
static struct multiboot_tag_mmap *multiboot_tag_mmap;
|
||||
static struct multiboot_tag_module *multiboot_tag_initrd_module;
|
||||
static struct multiboot_tag_elf_sections *multiboot_tag_sections;
|
||||
static struct multiboot_tag_string *multiboot_tag_cmdline;
|
||||
static struct multiboot_tag_framebuffer *multiboot_tag_framebuffer;
|
||||
|
||||
// Descriptors for reserved physical memory regions
|
||||
static struct mm_phys_reserved phys_reserve_initrd;
|
||||
static struct mm_phys_reserved phys_reserve_kernel = {
|
||||
// TODO: use _kernel_start instead of this
|
||||
// I was kinda lazy to add an additional reserved region for
|
||||
// multiboot stuff, so I simplified things a bit:
|
||||
// multiboot is known (don't know if it's a standard) to place
|
||||
// its structures below the kernel, so if I reserve pages below the
|
||||
// kernel, nothing should be overwritten
|
||||
.begin = 0,
|
||||
.end = MM_PHYS(&_kernel_end)
|
||||
};
|
||||
|
||||
extern struct {
|
||||
uint32_t eax, ebx;
|
||||
@ -109,6 +125,17 @@ void kernel_early_init(void) {
|
||||
rs232_init(RS232_COM1);
|
||||
ps2_init();
|
||||
|
||||
// Before anything is allocated, reserve:
|
||||
// 1. initrd pages
|
||||
// 2. multiboot tag pages
|
||||
mm_phys_reserve(&phys_reserve_kernel);
|
||||
|
||||
if (multiboot_tag_initrd_module) {
|
||||
phys_reserve_initrd.begin = multiboot_tag_initrd_module->mod_start & ~0xFFF;
|
||||
phys_reserve_initrd.end = (multiboot_tag_initrd_module->mod_end + 0xFFF) & ~0xFFF;
|
||||
mm_phys_reserve(&phys_reserve_initrd);
|
||||
}
|
||||
|
||||
amd64_phys_memory_map(multiboot_tag_mmap);
|
||||
|
||||
amd64_gdt_init();
|
||||
|
@ -13,6 +13,7 @@ _kernel_base_phys = 0x400000;
|
||||
|
||||
SECTIONS {
|
||||
. = _kernel_base_phys + _kernel_base;
|
||||
_kernel_start = .;
|
||||
|
||||
.text ALIGN(4K) : AT(ADDR(.text) - _kernel_base)
|
||||
{
|
||||
|
@ -129,7 +129,7 @@ uintptr_t mm_umap_single(mm_space_t pml4, uintptr_t vaddr, uint32_t size) {
|
||||
return old;
|
||||
}
|
||||
|
||||
int mm_map_single(mm_space_t pml4, uintptr_t virt_addr, uintptr_t phys, uint64_t flags, int usage) {
|
||||
int mm_map_single(mm_space_t pml4, uintptr_t virt_addr, uintptr_t phys, uint64_t flags) {
|
||||
virt_addr = AMD64_MM_STRIPSX(virt_addr);
|
||||
// TODO: support page sizes other than 4KiB
|
||||
// (Though I can't think of any reason to use it)
|
||||
@ -199,7 +199,6 @@ int mm_map_single(mm_space_t pml4, uintptr_t virt_addr, uintptr_t phys, uint64_t
|
||||
struct page *pg = PHYS2PAGE(phys);
|
||||
_assert(pg);
|
||||
++pg->refcount;
|
||||
pg->usage = usage;
|
||||
|
||||
pt[pti] = (phys & MM_PAGE_MASK) |
|
||||
(flags & MM_PTE_FLAGS_MASK) |
|
||||
|
@ -6,7 +6,6 @@
|
||||
#include "arch/amd64/mm/mm.h"
|
||||
#include "sys/panic.h"
|
||||
#include "sys/heap.h"
|
||||
#include "arch/amd64/mm/pool.h"
|
||||
#include "arch/amd64/mm/phys.h"
|
||||
#include "sys/mem/phys.h"
|
||||
#include "sys/mm.h"
|
||||
@ -30,10 +29,8 @@ void amd64_mm_init(void) {
|
||||
kdebug("Memory manager init\n");
|
||||
|
||||
mm_kernel = &kernel_pd_res[5 * 512];
|
||||
// Create a pool located right after kernel image
|
||||
amd64_mm_pool_init((uintptr_t) &_kernel_end, MM_POOL_SIZE);
|
||||
|
||||
uintptr_t heap_base_phys = mm_phys_alloc_contiguous(KERNEL_HEAP >> 12);
|
||||
uintptr_t heap_base_phys = mm_phys_alloc_contiguous(KERNEL_HEAP >> 12, PU_KERNEL);
|
||||
assert(heap_base_phys != MM_NADDR, "Could not allocate %S of memory for kernel heap\n", KERNEL_HEAP);
|
||||
kdebug("Setting up kernel heap of %S @ %p\n", KERNEL_HEAP, heap_base_phys);
|
||||
amd64_heap_init(heap_global, heap_base_phys, KERNEL_HEAP);
|
||||
|
@ -8,103 +8,226 @@
|
||||
#include "sys/mem/phys.h"
|
||||
#include "sys/mm.h"
|
||||
|
||||
extern int _kernel_end_phys;
|
||||
// Roughly 36MiB of lower memory is occupied by kernel so far:
|
||||
// The rest is available for both kernel and user allocation
|
||||
#define PHYS_ALLOWED_BEGIN ((((uintptr_t) &_kernel_end_phys + MM_POOL_SIZE) + 0xFFF) & ~0xFFF)
|
||||
#define PHYS_ALLOC_START_INDEX (PHYS_ALLOWED_BEGIN / MM_PAGE_SIZE)
|
||||
#define PHYS_MAX_PAGES ((1U << 30) / 0x1000)
|
||||
|
||||
static struct page _pages[PHYS_MAX_PAGES];
|
||||
static size_t _total_pages = 0, _alloc_pages = 0;
|
||||
struct page *mm_pages = _pages;
|
||||
// Reserve 1MiB at bottom
|
||||
#define LOW_BOUND 0x100000
|
||||
|
||||
void amd64_phys_stat(struct amd64_phys_stat *st) {
|
||||
st->limit = _total_pages * MM_PAGE_SIZE;
|
||||
st->pages_free = _total_pages - _alloc_pages;
|
||||
st->pages_used = _alloc_pages;
|
||||
struct page *mm_pages = NULL;
|
||||
static size_t _total_pages, _pages_free;
|
||||
static size_t _alloc_pages[_PU_COUNT];
|
||||
static spin_t phys_spin = 0;
|
||||
static struct mm_phys_reserved phys_reserve_mm_pages;
|
||||
static LIST_HEAD(reserved_regions);
|
||||
|
||||
static int is_reserved(uintptr_t addr) {
|
||||
struct mm_phys_reserved *res;
|
||||
list_for_each_entry(res, &reserved_regions, link) {
|
||||
if (addr >= res->begin && addr < res->end) {
|
||||
return 1;
|
||||
}
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
uintptr_t mm_phys_alloc_page(void) {
|
||||
for (size_t i = PHYS_ALLOC_START_INDEX; i < PHYS_MAX_PAGES; ++i) {
|
||||
if (!(_pages[i].flags & PG_ALLOC)) {
|
||||
_assert(!_pages[i].refcount);
|
||||
_pages[i].flags |= PG_ALLOC;
|
||||
++_alloc_pages;
|
||||
void mm_phys_reserve(struct mm_phys_reserved *res) {
|
||||
list_head_init(&res->link);
|
||||
list_add(&res->link, &reserved_regions);
|
||||
kdebug("#### Reserve region: %p .. %p\n", res->begin, res->end);
|
||||
}
|
||||
|
||||
void mm_phys_stat(struct mm_phys_stat *st) {
|
||||
st->pages_total = _total_pages;
|
||||
st->pages_free = _pages_free;
|
||||
st->pages_used_kernel = _alloc_pages[PU_KERNEL];
|
||||
st->pages_used_user = _alloc_pages[PU_PRIVATE];
|
||||
st->pages_used_shared = _alloc_pages[PU_SHARED];
|
||||
st->pages_used_paging = _alloc_pages[PU_PAGING];
|
||||
st->pages_used_cache = _alloc_pages[PU_CACHE];
|
||||
}
|
||||
|
||||
uint64_t *amd64_mm_pool_alloc(void) {
|
||||
uint64_t *table;
|
||||
uintptr_t ptr;
|
||||
|
||||
ptr = mm_phys_alloc_page(PU_PAGING);
|
||||
_assert(ptr != MM_NADDR);
|
||||
|
||||
table = (uint64_t *) MM_VIRTUALIZE(ptr);
|
||||
memset(table, 0, MM_PAGE_SIZE);
|
||||
return table;
|
||||
}
|
||||
|
||||
void amd64_mm_pool_free(uint64_t *p) {
|
||||
memset(p, 0xFF, MM_PAGE_SIZE);
|
||||
mm_phys_free_page(MM_PHYS(p));
|
||||
}
|
||||
|
||||
uintptr_t mm_phys_alloc_page(enum page_usage pu) {
|
||||
_assert(pu < _PU_COUNT && pu != PU_UNKNOWN);
|
||||
|
||||
uintptr_t irq;
|
||||
spin_lock_irqsave(&phys_spin, &irq);
|
||||
|
||||
for (size_t i = LOW_BOUND >> 12; i < PHYS_MAX_PAGES; ++i) {
|
||||
struct page *pg = &mm_pages[i];
|
||||
|
||||
if (!(pg->flags & PG_ALLOC)) {
|
||||
_assert(pg->usage == PU_UNKNOWN);
|
||||
_assert(pg->refcount == 0);
|
||||
pg->usage = pu;
|
||||
pg->flags |= PG_ALLOC;
|
||||
++_alloc_pages[pu];
|
||||
_assert(_pages_free);
|
||||
--_pages_free;
|
||||
|
||||
spin_release_irqrestore(&phys_spin, &irq);
|
||||
return i * MM_PAGE_SIZE;
|
||||
}
|
||||
}
|
||||
|
||||
spin_release_irqrestore(&phys_spin, &irq);
|
||||
return MM_NADDR;
|
||||
}
|
||||
|
||||
void mm_phys_free_page(uintptr_t addr) {
|
||||
struct page *page = PHYS2PAGE(addr);
|
||||
_assert(page);
|
||||
_assert(!page->refcount);
|
||||
_assert(page->flags & PG_ALLOC);
|
||||
--_alloc_pages;
|
||||
page->flags &= ~PG_ALLOC;
|
||||
uintptr_t irq;
|
||||
spin_lock_irqsave(&phys_spin, &irq);
|
||||
|
||||
struct page *pg = PHYS2PAGE(addr);
|
||||
_assert(!pg->refcount);
|
||||
_assert(pg->flags & PG_ALLOC);
|
||||
|
||||
_assert(_alloc_pages[pg->usage]);
|
||||
--_alloc_pages[pg->usage];
|
||||
++_pages_free;
|
||||
|
||||
pg->flags &= ~PG_ALLOC;
|
||||
pg->usage = PU_UNKNOWN;
|
||||
|
||||
spin_release_irqrestore(&phys_spin, &irq);
|
||||
}
|
||||
|
||||
uintptr_t mm_phys_alloc_contiguous(size_t count) {
|
||||
for (size_t i = PHYS_ALLOC_START_INDEX; i < PHYS_MAX_PAGES - count; ++i) {
|
||||
uintptr_t mm_phys_alloc_contiguous(size_t count, enum page_usage pu) {
|
||||
uintptr_t irq;
|
||||
spin_lock_irqsave(&phys_spin, &irq);
|
||||
|
||||
for (size_t i = LOW_BOUND >> 12; i < PHYS_MAX_PAGES - count; ++i) {
|
||||
for (size_t j = 0; j < count; ++j) {
|
||||
if (_pages[i + j].flags & PG_ALLOC) {
|
||||
if (mm_pages[i + j].flags & PG_ALLOC) {
|
||||
goto fail;
|
||||
}
|
||||
}
|
||||
|
||||
for (size_t j = 0; j < count; ++j) {
|
||||
_assert(!_pages[i + j].refcount);
|
||||
_pages[i + j].flags |= PG_ALLOC;
|
||||
_assert(!mm_pages[i + j].refcount);
|
||||
mm_pages[i + j].flags |= PG_ALLOC;
|
||||
mm_pages[i + j].usage = pu;
|
||||
|
||||
++_alloc_pages[pu];
|
||||
_assert(_pages_free);
|
||||
--_pages_free;
|
||||
}
|
||||
|
||||
spin_release_irqrestore(&phys_spin, &irq);
|
||||
return i * MM_PAGE_SIZE;
|
||||
fail:
|
||||
continue;
|
||||
}
|
||||
spin_release_irqrestore(&phys_spin, &irq);
|
||||
return MM_NADDR;
|
||||
}
|
||||
|
||||
static void phys_add_page(size_t index) {
|
||||
_pages[index].flags &= ~PG_ALLOC;
|
||||
_pages[index].refcount = 0;
|
||||
}
|
||||
|
||||
void amd64_phys_memory_map(const struct multiboot_tag_mmap *mmap) {
|
||||
kdebug("Kernel table pool ends @ %p\n", PHYS_ALLOWED_BEGIN);
|
||||
kdebug("Memory map @ %p\n", mmap);
|
||||
|
||||
static uintptr_t place_mm_pages(const struct multiboot_tag_mmap *mmap, size_t req_count) {
|
||||
size_t item_offset = offsetof(struct multiboot_tag_mmap, entries);
|
||||
|
||||
memset(_pages, 0xFF, sizeof(_pages));
|
||||
_total_pages = 0;
|
||||
_alloc_pages = 0;
|
||||
|
||||
// Collect usable physical memory information
|
||||
while (item_offset < mmap->size) {
|
||||
//const multiboot_memory_map_t *entry = (const multiboot_memory_map_t *) (curr_item);
|
||||
const struct multiboot_mmap_entry *entry =
|
||||
(const struct multiboot_mmap_entry *) ((uintptr_t) mmap + item_offset);
|
||||
uintptr_t page_aligned_begin = MAX((entry->addr + 0xFFF) & ~0xFFF, PHYS_ALLOWED_BEGIN);
|
||||
uintptr_t page_aligned_begin = (entry->addr + 0xFFF) & ~0xFFF;
|
||||
uintptr_t page_aligned_end = (entry->addr + entry->len) & ~0xFFF;
|
||||
|
||||
if (entry->type == 1 && page_aligned_end > page_aligned_begin) {
|
||||
kdebug("+++ %S @ %p\n", page_aligned_end - page_aligned_begin, page_aligned_begin);
|
||||
// Something like mm_phys_alloc_contiguous does, but
|
||||
// we don't yet have it obviously
|
||||
size_t collected = 0;
|
||||
uintptr_t base_addr = MM_NADDR;
|
||||
|
||||
for (uintptr_t addr = page_aligned_begin - PHYS_ALLOWED_BEGIN;
|
||||
addr < (page_aligned_end - PHYS_ALLOWED_BEGIN); addr += 0x1000) {
|
||||
size_t index = addr / MM_PAGE_SIZE;
|
||||
if (index >= PHYS_MAX_PAGES) {
|
||||
break;
|
||||
for (uintptr_t addr = page_aligned_begin; addr < page_aligned_end; addr += 0x1000) {
|
||||
if (is_reserved(addr)) {
|
||||
collected = 0;
|
||||
base_addr = MM_NADDR;
|
||||
continue;
|
||||
}
|
||||
|
||||
if (base_addr == MM_NADDR) {
|
||||
base_addr = addr;
|
||||
}
|
||||
++collected;
|
||||
if (collected == req_count) {
|
||||
return base_addr;
|
||||
}
|
||||
phys_add_page(index);
|
||||
++_total_pages;
|
||||
}
|
||||
}
|
||||
|
||||
item_offset += mmap->entry_size;
|
||||
}
|
||||
|
||||
return MM_NADDR;
|
||||
}
|
||||
|
||||
void amd64_phys_memory_map(const struct multiboot_tag_mmap *mmap) {
|
||||
// Allocate space for mm_pages array
|
||||
size_t mm_pages_req_count = (PHYS_MAX_PAGES * sizeof(struct page) + 0xFFF) >> 12;
|
||||
uintptr_t mm_pages_addr = place_mm_pages(mmap, mm_pages_req_count);
|
||||
_assert(mm_pages_addr != MM_NADDR);
|
||||
|
||||
kdebug("Placing mm_pages (%u) at %p\n", mm_pages_req_count, mm_pages_addr);
|
||||
phys_reserve_mm_pages.begin = mm_pages_addr;
|
||||
phys_reserve_mm_pages.end = mm_pages_addr + mm_pages_req_count * MM_PAGE_SIZE;
|
||||
mm_phys_reserve(&phys_reserve_mm_pages);
|
||||
|
||||
mm_pages = (struct page *) MM_VIRTUALIZE(mm_pages_addr);
|
||||
for (size_t i = 0; i < PHYS_MAX_PAGES; ++i) {
|
||||
mm_pages[i].flags = PG_ALLOC;
|
||||
mm_pages[i].refcount = (size_t) -1L;
|
||||
}
|
||||
|
||||
kdebug("Memory map @ %p\n", mmap);
|
||||
|
||||
size_t item_offset = offsetof(struct multiboot_tag_mmap, entries);
|
||||
|
||||
_total_pages = 0;
|
||||
|
||||
// Collect usable physical memory information
|
||||
while (item_offset < mmap->size) {
|
||||
//const multiboot_memory_map_t *entry = (const multiboot_memory_map_t *) (curr_item);
|
||||
const struct multiboot_mmap_entry *entry =
|
||||
(const struct multiboot_mmap_entry *) ((uintptr_t) mmap + item_offset);
|
||||
uintptr_t page_aligned_begin = (entry->addr + 0xFFF) & ~0xFFF;
|
||||
uintptr_t page_aligned_end = (entry->addr + entry->len) & ~0xFFF;
|
||||
|
||||
if (entry->type == 1 && page_aligned_end > page_aligned_begin) {
|
||||
kdebug("+++ %S @ %p\n", page_aligned_end - page_aligned_begin, page_aligned_begin);
|
||||
|
||||
for (uintptr_t addr = page_aligned_begin; addr < page_aligned_end; addr += 0x1000) {
|
||||
extern char _kernel_end;
|
||||
|
||||
if (!is_reserved(addr) && addr >= MM_PHYS(&_kernel_end)) {
|
||||
struct page *pg = PHYS2PAGE(addr);
|
||||
pg->flags &= ~PG_ALLOC;
|
||||
pg->usage = PU_UNKNOWN;
|
||||
pg->refcount = 0;
|
||||
++_total_pages;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
item_offset += mmap->entry_size;
|
||||
}
|
||||
|
||||
_pages_free = _total_pages;
|
||||
|
||||
kdebug("%S available\n", _total_pages << 12);
|
||||
}
|
||||
|
@ -1,97 +0,0 @@
|
||||
#include "arch/amd64/mm/pool.h"
|
||||
#include "arch/amd64/mm/mm.h"
|
||||
#include "sys/assert.h"
|
||||
#include "sys/debug.h"
|
||||
#include "sys/panic.h"
|
||||
#include "sys/string.h"
|
||||
#include "sys/spin.h"
|
||||
|
||||
static struct {
|
||||
uint64_t track[512];
|
||||
uintptr_t start;
|
||||
uintptr_t index_last;
|
||||
size_t size;
|
||||
} amd64_mm_pool;
|
||||
|
||||
static spin_t pool_lock = 0;
|
||||
|
||||
void amd64_mm_pool_stat(struct amd64_pool_stat *st) {
|
||||
uintptr_t irq;
|
||||
spin_lock_irqsave(&pool_lock, &irq);
|
||||
|
||||
st->pages_free = 0;
|
||||
st->pages_used = 0;
|
||||
|
||||
for (size_t i = 0; i < amd64_mm_pool.size >> 18; ++i) {
|
||||
for (size_t j = 0; j < 64; ++j) {
|
||||
if (amd64_mm_pool.track[i] & (1ULL << j)) {
|
||||
++st->pages_used;
|
||||
} else {
|
||||
++st->pages_free;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
spin_release_irqrestore(&pool_lock, &irq);
|
||||
}
|
||||
|
||||
uint64_t *amd64_mm_pool_alloc(void) {
|
||||
uintptr_t irq;
|
||||
spin_lock_irqsave(&pool_lock, &irq);
|
||||
uint64_t *r = NULL;
|
||||
|
||||
for (size_t i = amd64_mm_pool.index_last; i < amd64_mm_pool.size >> 18; ++i) {
|
||||
for (size_t j = 0; j < 64; ++j) {
|
||||
if (!(amd64_mm_pool.track[i] & (1ULL << j))) {
|
||||
r = (uint64_t *) (amd64_mm_pool.start + ((i << 18) + (j << 12)));
|
||||
amd64_mm_pool.track[i] |= (1ULL << j);
|
||||
amd64_mm_pool.index_last = i;
|
||||
memset(r, 0, 4096);
|
||||
spin_release_irqrestore(&pool_lock, &irq);
|
||||
return r;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
for (size_t i = 0; i < amd64_mm_pool.index_last; ++i) {
|
||||
for (size_t j = 0; j < 64; ++j) {
|
||||
if (!(amd64_mm_pool.track[i] & (1ULL << j))) {
|
||||
r = (uint64_t *) (amd64_mm_pool.start + ((i << 18) + (j << 12)));
|
||||
amd64_mm_pool.track[i] |= (1ULL << j);
|
||||
amd64_mm_pool.index_last = i;
|
||||
memset(r, 0, 4096);
|
||||
spin_release_irqrestore(&pool_lock, &irq);
|
||||
return r;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
spin_release_irqrestore(&pool_lock, &irq);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
void amd64_mm_pool_free(uint64_t *page) {
|
||||
uintptr_t irq;
|
||||
spin_lock_irqsave(&pool_lock, &irq);
|
||||
uintptr_t a = (uintptr_t) page;
|
||||
|
||||
if (a < amd64_mm_pool.start || a >= (amd64_mm_pool.start + amd64_mm_pool.size)) {
|
||||
panic("The page does not belong to the pool: %p\n", a);
|
||||
}
|
||||
|
||||
a -= amd64_mm_pool.start;
|
||||
|
||||
size_t i = (a >> 18) & 0x1FF;
|
||||
size_t j = (a >> 12) & 0x3F;
|
||||
|
||||
assert(amd64_mm_pool.track[i] & (1ULL << j), "Double free error (pool): %p\n", page);
|
||||
|
||||
amd64_mm_pool.track[i] &= ~(1ULL << j);
|
||||
spin_release_irqrestore(&pool_lock, &irq);
|
||||
}
|
||||
|
||||
void amd64_mm_pool_init(uintptr_t begin, size_t size) {
|
||||
amd64_mm_pool.start = begin;
|
||||
amd64_mm_pool.size = size;
|
||||
memset(amd64_mm_pool.track, 0, 512 * sizeof(uint64_t));
|
||||
}
|
@ -26,42 +26,6 @@ no_match:
|
||||
return MM_NADDR;
|
||||
}
|
||||
|
||||
uintptr_t vmalloc(mm_space_t pml4, uintptr_t from, uintptr_t to, size_t npages, uint64_t flags, int usage) {
|
||||
uintptr_t addr = vmfind(pml4, from, to, npages);
|
||||
uintptr_t virt_page, phys_page;
|
||||
uint64_t rflags = flags & (MM_PAGE_USER | MM_PAGE_WRITE | MM_PAGE_NOEXEC);
|
||||
|
||||
if (addr == MM_NADDR) {
|
||||
return MM_NADDR;
|
||||
}
|
||||
|
||||
for (size_t i = 0; i < npages; ++i) {
|
||||
virt_page = addr + i * MM_PAGE_SIZE;
|
||||
phys_page = mm_phys_alloc_page();
|
||||
|
||||
// Allocation of physical page failed, clean up
|
||||
if (phys_page == MM_NADDR) {
|
||||
// Unmap previously allocated pages
|
||||
for (size_t j = 0; j < i; ++j) {
|
||||
virt_page = addr + j * MM_PAGE_SIZE;
|
||||
// Deallocate physical pages that've already been mapped
|
||||
// We've mapped only 4KiB pages, so expect to unmap only
|
||||
// 4KiB pages
|
||||
assert((phys_page = mm_umap_single(pml4, virt_page, 1)) != MM_NADDR,
|
||||
"Failed to deallocate page when cleaning up botched alloc: %p\n", virt_page);
|
||||
|
||||
mm_phys_free_page(phys_page);
|
||||
}
|
||||
return MM_NADDR;
|
||||
}
|
||||
|
||||
// Succeeded, map the page
|
||||
assert(mm_map_single(pml4, virt_page, phys_page, rflags, usage) == 0, "Failed to map page: %p\n", virt_page);
|
||||
}
|
||||
|
||||
return addr;
|
||||
}
|
||||
|
||||
void vmfree(mm_space_t pml4, uintptr_t addr, size_t npages) {
|
||||
uintptr_t phys;
|
||||
for (size_t i = 0; i < npages; ++i) {
|
||||
|
@ -157,14 +157,14 @@ static int ahci_port_alloc(struct ahci_port *port) {
|
||||
// 12288 bytes (page-aligned), 3 pages
|
||||
|
||||
// Command list and FIS buffer
|
||||
uintptr_t page0 = mm_phys_alloc_page(); //amd64_phys_alloc_page();
|
||||
uintptr_t page0 = mm_phys_alloc_page(PU_KERNEL); //amd64_phys_alloc_page();
|
||||
if (page0 == MM_NADDR) {
|
||||
kerror("Failed to allocate a page\n");
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
// Command table
|
||||
uintptr_t page1 = mm_phys_alloc_contiguous(2); //amd64_phys_alloc_contiguous(2);
|
||||
uintptr_t page1 = mm_phys_alloc_contiguous(2, PU_KERNEL); //amd64_phys_alloc_contiguous(2);
|
||||
if (page1 == MM_NADDR) {
|
||||
kerror("Failed to allocate 2 pages\n");
|
||||
mm_phys_free_page(page0);
|
||||
|
@ -120,9 +120,9 @@ static struct uhci_qh *uhci_alloc_qh(struct uhci *hc) {
|
||||
}
|
||||
|
||||
static void uhci_data_init(struct uhci *data, uint32_t bar4) {
|
||||
uintptr_t frame_list_page = mm_phys_alloc_contiguous(2);
|
||||
uintptr_t frame_list_page = mm_phys_alloc_contiguous(2, PU_KERNEL);
|
||||
_assert(frame_list_page != MM_NADDR && frame_list_page < 0x100000000);
|
||||
uintptr_t pool_page = mm_phys_alloc_page();
|
||||
uintptr_t pool_page = mm_phys_alloc_page(PU_KERNEL);
|
||||
_assert(pool_page != MM_NADDR && pool_page < 0x100000000);
|
||||
|
||||
data->iobase = bar4 & ~3;
|
||||
|
@ -38,7 +38,6 @@ KERNEL_OBJ=$(O)/arch/amd64/entry.o \
|
||||
$(O)/arch/amd64/mm/map.o \
|
||||
$(O)/arch/amd64/mm/phys.o \
|
||||
$(O)/arch/amd64/mm/vmalloc.o \
|
||||
$(O)/arch/amd64/mm/pool.o \
|
||||
$(O)/arch/amd64/hw/ps2.o \
|
||||
$(O)/arch/amd64/hw/irq.o \
|
||||
$(O)/arch/amd64/hw/rtc.o \
|
||||
|
40
fs/ram.c
40
fs/ram.c
@ -140,7 +140,7 @@ int ram_vnode_bset_resize(struct vnode *vn, size_t size) {
|
||||
|
||||
// Allocate L1 block, if it hasn't been yet
|
||||
if (!priv->bpa_l1[l1_index]) {
|
||||
blk = mm_phys_alloc_page();
|
||||
blk = mm_phys_alloc_page(PU_KERNEL);
|
||||
_assert(blk != MM_NADDR);
|
||||
priv->bpa_l1[l1_index] = (uintptr_t *) MM_VIRTUALIZE(blk);
|
||||
memset(priv->bpa_l1[l1_index], 0, RAM_BLOCKS_PER_Ln * sizeof(uintptr_t));
|
||||
@ -400,7 +400,7 @@ static ssize_t ramfs_vnode_write(struct ofile *of, const void *buf, size_t count
|
||||
|
||||
for (size_t i = block_offset; i < block_index; ++i) {
|
||||
// TODO: this is suboptimal - 3.5K get wasted
|
||||
blk = mm_phys_alloc_page();
|
||||
blk = mm_phys_alloc_page(PU_KERNEL);
|
||||
_assert(blk != MM_NADDR);
|
||||
blk = MM_VIRTUALIZE(blk) | RAM_BSET_ALLOC;
|
||||
_assert(ram_vnode_bset_set(vn, i, blk) == 0);
|
||||
@ -433,6 +433,7 @@ static ssize_t ramfs_vnode_write(struct ofile *of, const void *buf, size_t count
|
||||
static off_t ramfs_vnode_lseek(struct ofile *of, off_t off, int whence) {
|
||||
struct ram_vnode_private *priv;
|
||||
struct vnode *vn;
|
||||
int res;
|
||||
|
||||
vn = of->file.vnode;
|
||||
_assert(vn->fs_data);
|
||||
@ -442,7 +443,9 @@ static off_t ramfs_vnode_lseek(struct ofile *of, off_t off, int whence) {
|
||||
switch (whence) {
|
||||
case SEEK_SET:
|
||||
if ((size_t) off > priv->size) {
|
||||
return (off_t) -ESPIPE;
|
||||
if ((res = ramfs_vnode_truncate(of->file.vnode, off)) < 0) {
|
||||
return res;
|
||||
}
|
||||
}
|
||||
of->file.pos = off;
|
||||
break;
|
||||
@ -506,9 +509,9 @@ static int ramfs_vnode_unlink(struct vnode *node) {
|
||||
|
||||
if (node->type == VN_REG) {
|
||||
// Free the blocks by truncating the file to zero
|
||||
res = ramfs_vnode_truncate(node, 0);
|
||||
//res = ramfs_vnode_truncate(node, 0);
|
||||
if (res != 0) {
|
||||
return res;
|
||||
//return res;
|
||||
}
|
||||
}
|
||||
|
||||
@ -525,11 +528,34 @@ static int ramfs_vnode_unlink(struct vnode *node) {
|
||||
static int ramfs_vnode_truncate(struct vnode *at, size_t size) {
|
||||
struct ram_vnode_private *priv;
|
||||
_assert(priv = at->fs_data);
|
||||
|
||||
// TODO: allocate blocks on upward truncation
|
||||
if (size > priv->size) {
|
||||
panic("NYI\n");
|
||||
kdebug("Truncate up: %s\n", at->name);
|
||||
uintptr_t blk;
|
||||
size_t block_index, block_offset;
|
||||
struct vnode *vn = at;
|
||||
|
||||
// Number of last block
|
||||
block_index = (size + 511) / 512;
|
||||
// Old capacity
|
||||
block_offset = priv->bpa_cap;
|
||||
|
||||
_assert(ram_vnode_bset_resize(vn, size) == 0);
|
||||
|
||||
for (size_t i = block_offset; i < block_index; ++i) {
|
||||
// TODO: this is suboptimal - 3.5K get wasted
|
||||
blk = mm_phys_alloc_page(PU_KERNEL);
|
||||
_assert(blk != MM_NADDR);
|
||||
blk = MM_VIRTUALIZE(blk) | RAM_BSET_ALLOC;
|
||||
_assert(ram_vnode_bset_set(vn, i, blk) == 0);
|
||||
}
|
||||
|
||||
return 0;
|
||||
} else if (size < priv->size) {
|
||||
kdebug("Truncate down: %s\n", at->name);
|
||||
_assert(ram_vnode_bset_resize(at, size) == 0);
|
||||
}
|
||||
_assert(ram_vnode_bset_resize(at, size) == 0);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -207,7 +207,7 @@ int tar_init(struct fs *ramfs, void *mem_base) {
|
||||
|
||||
if (type == VN_LNK) {
|
||||
file_length = strlen(hdr->linkname);
|
||||
uintptr_t blk = mm_phys_alloc_page();
|
||||
uintptr_t blk = mm_phys_alloc_page(PU_KERNEL);
|
||||
_assert(blk != MM_NADDR);
|
||||
if ((res = ram_vnode_bset_resize(node, file_length)) != 0) {
|
||||
panic("Failed to resize file\n");
|
||||
|
20
fs/sysfs.c
20
fs/sysfs.c
@ -1,6 +1,5 @@
|
||||
#include "user/fcntl.h"
|
||||
#include "user/errno.h"
|
||||
#include "arch/amd64/mm/phys.h"
|
||||
#include "arch/amd64/mm/pool.h"
|
||||
#include "fs/sysfs.h"
|
||||
#include "sys/snprintf.h"
|
||||
@ -10,6 +9,7 @@
|
||||
#include "sys/string.h"
|
||||
#include "sys/thread.h"
|
||||
#include "sys/mem/slab.h"
|
||||
#include "sys/mem/phys.h"
|
||||
#include "fs/fs.h"
|
||||
#include "sys/mod.h"
|
||||
#include "sys/debug.h"
|
||||
@ -308,23 +308,21 @@ static int system_uptime_getter(void *ctx, char *buf, size_t lim) {
|
||||
}
|
||||
|
||||
static int system_mem_getter(void *ctx, char *buf, size_t lim) {
|
||||
struct amd64_phys_stat phys_st;
|
||||
struct mm_phys_stat phys_st;
|
||||
struct heap_stat heap_st;
|
||||
struct amd64_pool_stat pool_st;
|
||||
struct slab_stat slab_st;
|
||||
|
||||
slab_stat(&slab_st);
|
||||
amd64_phys_stat(&phys_st);
|
||||
amd64_mm_pool_stat(&pool_st);
|
||||
mm_phys_stat(&phys_st);
|
||||
heap_stat(heap_global, &heap_st);
|
||||
|
||||
sysfs_buf_printf(buf, lim, "PhysTotal: %u kB\n", phys_st.limit / 1024);
|
||||
sysfs_buf_printf(buf, lim, "PhysTotal: %u kB\n", phys_st.pages_total * 4);
|
||||
sysfs_buf_printf(buf, lim, "PhysFree: %u kB\n", phys_st.pages_free * 4);
|
||||
sysfs_buf_printf(buf, lim, "PhysUsed: %u kB\n", phys_st.pages_used * 4);
|
||||
|
||||
sysfs_buf_printf(buf, lim, "PoolTotal: %u kB\n", MM_POOL_SIZE / 1024);
|
||||
sysfs_buf_printf(buf, lim, "PoolFree: %u kB\n", pool_st.pages_free * 4);
|
||||
sysfs_buf_printf(buf, lim, "PoolUsed: %u kB\n", pool_st.pages_used * 4);
|
||||
sysfs_buf_printf(buf, lim, "UsedKernel: %u kB\n", phys_st.pages_used_kernel * 4);
|
||||
sysfs_buf_printf(buf, lim, "UsedUser: %u kB\n", phys_st.pages_used_user * 4);
|
||||
sysfs_buf_printf(buf, lim, "UsedShared: %u kB\n", phys_st.pages_used_shared * 4);
|
||||
sysfs_buf_printf(buf, lim, "UsedPaging: %u kB\n", phys_st.pages_used_paging * 4);
|
||||
sysfs_buf_printf(buf, lim, "UsedCache: %u kB\n", phys_st.pages_used_cache * 4);
|
||||
|
||||
sysfs_buf_printf(buf, lim, "HeapTotal: %u kB\n", heap_st.total_size / 1024);
|
||||
sysfs_buf_printf(buf, lim, "HeapFree: %u kB\n", heap_st.free_size / 1024);
|
||||
|
@ -3,17 +3,4 @@
|
||||
#include "sys/types.h"
|
||||
#include <stdint.h>
|
||||
|
||||
struct amd64_phys_stat {
|
||||
size_t pages_free;
|
||||
size_t pages_used;
|
||||
size_t limit;
|
||||
};
|
||||
|
||||
void amd64_phys_memory_map(const struct multiboot_tag_mmap *mmap);
|
||||
//void amd64_phys_memory_map(const multiboot_memory_map_t *mmap, size_t length);
|
||||
|
||||
//void amd64_phys_free(uintptr_t page);
|
||||
//uintptr_t amd64_phys_alloc_page(void);
|
||||
//uintptr_t amd64_phys_alloc_contiguous(size_t count);
|
||||
|
||||
void amd64_phys_stat(struct amd64_phys_stat *st);
|
||||
|
@ -4,6 +4,7 @@
|
||||
*/
|
||||
#pragma once
|
||||
#include "sys/types.h"
|
||||
#include "sys/list.h"
|
||||
|
||||
extern struct page *mm_pages;
|
||||
|
||||
@ -23,22 +24,43 @@ struct page {
|
||||
PU_SHARED, // Shared memory mapping
|
||||
PU_DEVICE, // Possibly shared device mapping
|
||||
PU_KERNEL, // Not a userspace page
|
||||
PU_PAGING, // Paging structures
|
||||
PU_CACHE,
|
||||
_PU_COUNT
|
||||
} usage;
|
||||
size_t refcount;
|
||||
};
|
||||
|
||||
struct mm_phys_reserved {
|
||||
uintptr_t begin, end;
|
||||
struct list_head link;
|
||||
};
|
||||
|
||||
struct mm_phys_stat {
|
||||
size_t pages_total;
|
||||
size_t pages_free;
|
||||
size_t pages_used_kernel;
|
||||
size_t pages_used_user;
|
||||
size_t pages_used_shared;
|
||||
size_t pages_used_paging;
|
||||
size_t pages_used_cache;
|
||||
};
|
||||
|
||||
void mm_phys_reserve(struct mm_phys_reserved *res);
|
||||
void mm_phys_stat(struct mm_phys_stat *st);
|
||||
|
||||
/**
|
||||
* @brief Allocate a single physical memory region of MM_PAGE_SIZE bytes
|
||||
* @return MM_NADDR on failure, a page-aligned physical address otherwise
|
||||
*/
|
||||
uintptr_t mm_phys_alloc_page(void);
|
||||
uintptr_t mm_phys_alloc_page(enum page_usage pu);
|
||||
|
||||
/**
|
||||
* @brief Allocates a contiguous physical memory region of MM_PAGE_SIZE * \p count bytes
|
||||
* @param count Size of the region in pages
|
||||
* @return MM_NADDR on failure, a page-aligned physical address otherwise
|
||||
*/
|
||||
uintptr_t mm_phys_alloc_contiguous(size_t count);
|
||||
uintptr_t mm_phys_alloc_contiguous(size_t count, enum page_usage pu);
|
||||
|
||||
/**
|
||||
* @brief Free a physical page
|
||||
|
@ -9,5 +9,5 @@
|
||||
#define VM_ALLOC_USER (MM_PAGE_USER)
|
||||
|
||||
uintptr_t vmfind(const mm_space_t pd, uintptr_t from, uintptr_t to, size_t npages);
|
||||
uintptr_t vmalloc(mm_space_t pd, uintptr_t from, uintptr_t to, size_t npages, uint64_t flags, int usage);
|
||||
//uintptr_t vmalloc(mm_space_t pd, uintptr_t from, uintptr_t to, size_t npages, uint64_t flags, int usage);
|
||||
void vmfree(mm_space_t pd, uintptr_t addr, size_t npages);
|
||||
|
@ -28,7 +28,7 @@ void mm_space_free(struct process *proc);
|
||||
|
||||
void mm_describe(const mm_space_t pd);
|
||||
|
||||
int mm_map_single(mm_space_t pd, uintptr_t virt_page, uintptr_t phys_page, uint64_t flags, int usage);
|
||||
int mm_map_single(mm_space_t pd, uintptr_t virt_page, uintptr_t phys_page, uint64_t flags);
|
||||
uintptr_t mm_umap_single(mm_space_t pd, uintptr_t virt_page, uint32_t size);
|
||||
uintptr_t mm_map_get(mm_space_t pd, uintptr_t virt, uint64_t *rflags);
|
||||
|
||||
|
@ -11,6 +11,11 @@
|
||||
|
||||
#define THREAD_MAX_FDS 16
|
||||
|
||||
#define THREAD_KSTACK_PAGES 4
|
||||
#define THREAD_USTACK_PAGES 8
|
||||
#define THREAD_USTACK_BEGIN 0x10000000
|
||||
#define THREAD_USTACK_END 0xF0000000
|
||||
|
||||
struct ofile;
|
||||
|
||||
enum thread_state {
|
||||
|
@ -167,8 +167,8 @@ static void block_cache_queue_erase(struct block_cache *cache, struct lru_node *
|
||||
}
|
||||
|
||||
static void block_cache_page_release(struct block_cache *cache, uintptr_t address, uintptr_t page) {
|
||||
kdebug("Block cache: release page %p\n", page & LRU_PAGE_MASK);
|
||||
if (page & LRU_PAGE_DIRTY) {
|
||||
kdebug("Block cache: write page %p\n", page & LRU_PAGE_MASK);
|
||||
_assert(blk_page_sync(cache->blk, address * cache->page_size, page & LRU_PAGE_MASK) == 0);
|
||||
}
|
||||
mm_phys_free_page(page & LRU_PAGE_MASK);
|
||||
@ -177,8 +177,7 @@ static void block_cache_page_release(struct block_cache *cache, uintptr_t addres
|
||||
static uintptr_t block_cache_page_alloc(struct block_cache *cache) {
|
||||
// Other sizes are not supported
|
||||
_assert(cache->page_size == MM_PAGE_SIZE);
|
||||
kdebug("Block cache: allocate page\n");
|
||||
return mm_phys_alloc_page();
|
||||
return mm_phys_alloc_page(PU_CACHE);
|
||||
}
|
||||
|
||||
void block_cache_mark_dirty(struct block_cache *cache, uintptr_t address) {
|
||||
|
@ -80,7 +80,7 @@ static int display_blk_mmap(struct blkdev *blk, uintptr_t base, size_t page_coun
|
||||
mm_map_single(proc->space,
|
||||
base + i * MM_PAGE_SIZE,
|
||||
phys + i * MM_PAGE_SIZE,
|
||||
pf | MM_PAGE_USER, PU_DEVICE);
|
||||
pf | MM_PAGE_USER);
|
||||
}
|
||||
|
||||
return 0;
|
||||
|
16
sys/execve.c
16
sys/execve.c
@ -92,7 +92,7 @@ static int procv_setup(const char *const argv[],
|
||||
|
||||
*procv_page_count = page_count;
|
||||
for (size_t i = 0; i < page_count; ++i) {
|
||||
phys_pages[i] = mm_phys_alloc_page();
|
||||
phys_pages[i] = mm_phys_alloc_page(PU_PRIVATE);
|
||||
_assert(phys_pages[i] != MM_NADDR);
|
||||
}
|
||||
|
||||
@ -289,8 +289,7 @@ int sys_execve(const char *path, const char **argv, const char **envp) {
|
||||
_assert(mm_map_single(proc->space,
|
||||
procv_virt + i * MM_PAGE_SIZE,
|
||||
procv_phys_pages[i],
|
||||
MM_PAGE_USER | MM_PAGE_WRITE,
|
||||
PU_PRIVATE) == 0);
|
||||
MM_PAGE_USER | MM_PAGE_WRITE) == 0);
|
||||
}
|
||||
uintptr_t *argv_fixup = (uintptr_t *) procv_virt;
|
||||
uintptr_t *envp_fixup = (uintptr_t *) procv_virt + procv_vecp[0] + 1;
|
||||
@ -312,9 +311,16 @@ int sys_execve(const char *path, const char **argv, const char **envp) {
|
||||
thr->data.rsp0 = thr->data.rsp0_top;
|
||||
|
||||
// Allocate a new user stack
|
||||
uintptr_t ustack = vmalloc(proc->space, 0x100000, 0xF0000000, 4, MM_PAGE_USER | MM_PAGE_WRITE /* | MM_PAGE_NOEXEC */, PU_PRIVATE);
|
||||
uintptr_t ustack = vmfind(proc->space, THREAD_USTACK_BEGIN, THREAD_USTACK_END, THREAD_USTACK_PAGES);
|
||||
_assert(ustack != MM_NADDR);
|
||||
for (size_t i = 0; i < THREAD_USTACK_PAGES; ++i) {
|
||||
uintptr_t phys = mm_phys_alloc_page(PU_PRIVATE);
|
||||
_assert(phys != MM_NADDR);
|
||||
mm_map_single(proc->space, ustack + i * MM_PAGE_SIZE, phys, MM_PAGE_WRITE | MM_PAGE_USER);
|
||||
}
|
||||
|
||||
thr->data.rsp3_base = ustack;
|
||||
thr->data.rsp3_size = 4 * MM_PAGE_SIZE;
|
||||
thr->data.rsp3_size = MM_PAGE_SIZE * THREAD_USTACK_PAGES;
|
||||
|
||||
if (was_kernel) {
|
||||
proc_add_entry(proc);
|
||||
|
@ -37,13 +37,13 @@ static int sys_mmap_anon(mm_space_t space, uintptr_t base, size_t page_count, in
|
||||
|
||||
// Map pages
|
||||
for (size_t i = 0; i < page_count; ++i) {
|
||||
uintptr_t phys = mm_phys_alloc_page();
|
||||
uintptr_t phys = mm_phys_alloc_page(map_usage);
|
||||
_assert(phys != MM_NADDR);
|
||||
struct page *page = PHYS2PAGE(phys);
|
||||
_assert(page);
|
||||
|
||||
page->flags |= PG_MMAPED;
|
||||
_assert(mm_map_single(space, base + i * MM_PAGE_SIZE, phys, map_flags, map_usage) == 0);
|
||||
_assert(mm_map_single(space, base + i * MM_PAGE_SIZE, phys, map_flags) == 0);
|
||||
}
|
||||
|
||||
return 0;
|
||||
@ -214,7 +214,7 @@ int sys_shmget(size_t size, int flags) {
|
||||
list_head_init(&chunk->link);
|
||||
|
||||
for (size_t i = 0; i < size; ++i) {
|
||||
chunk->pages[i] = mm_phys_alloc_page();
|
||||
chunk->pages[i] = mm_phys_alloc_page(PU_SHARED);
|
||||
_assert(chunk->pages[i] != MM_NADDR);
|
||||
}
|
||||
|
||||
@ -246,11 +246,11 @@ void *sys_shmat(int id, const void *hint, int flags) {
|
||||
_assert(virt_base != MM_NADDR);
|
||||
|
||||
for (size_t i = 0; i < chunk->page_count; ++i) {
|
||||
_assert(PHYS2PAGE(chunk->pages[i])->usage == PU_SHARED);
|
||||
mm_map_single(space,
|
||||
virt_base + i * MM_PAGE_SIZE,
|
||||
chunk->pages[i],
|
||||
MM_PAGE_WRITE | MM_PAGE_USER,
|
||||
PU_SHARED);
|
||||
MM_PAGE_WRITE | MM_PAGE_USER);
|
||||
}
|
||||
|
||||
return (void *) virt_base;
|
||||
|
@ -81,7 +81,7 @@ struct slab_cache *slab_cache_get(size_t size) {
|
||||
////
|
||||
|
||||
static struct slab *slab_create(struct slab_cache *cp) {
|
||||
uintptr_t page_phys = mm_phys_alloc_page();
|
||||
uintptr_t page_phys = mm_phys_alloc_page(PU_KERNEL);
|
||||
|
||||
if (page_phys == MM_NADDR) {
|
||||
return NULL;
|
||||
|
@ -150,9 +150,9 @@ static int object_section_load(struct object *obj, Elf64_Shdr *shdr) {
|
||||
// Get or map physical page
|
||||
if (mm_map_get(mm_kernel, page, &page_phys) == MM_NADDR) {
|
||||
kdebug("MAP OBJECT PAGE %p\n", page);
|
||||
page_phys = mm_phys_alloc_page();
|
||||
page_phys = mm_phys_alloc_page(PU_KERNEL);
|
||||
_assert(page_phys != MM_NADDR);
|
||||
_assert(mm_map_single(mm_kernel, page, page_phys, MM_PAGE_WRITE, 0) == 0);
|
||||
_assert(mm_map_single(mm_kernel, page, page_phys, MM_PAGE_WRITE) == 0);
|
||||
}
|
||||
}
|
||||
|
||||
@ -189,9 +189,9 @@ static int object_reloc(struct object *obj) {
|
||||
uint8_t *plt = obj->gotplt;
|
||||
_assert(!(((uintptr_t) plt) & 0xFFF));
|
||||
for (size_t i = 0; i < obj->gotplt_size; i += MM_PAGE_SIZE) {
|
||||
uintptr_t page_phys = mm_phys_alloc_page();
|
||||
uintptr_t page_phys = mm_phys_alloc_page(PU_KERNEL);
|
||||
_assert(mm_map_get(mm_kernel, (uintptr_t) plt + i, NULL) == MM_NADDR);
|
||||
_assert(mm_map_single(mm_kernel, (uintptr_t) plt + i, page_phys, MM_PAGE_WRITE, 0) == 0);
|
||||
_assert(mm_map_single(mm_kernel, (uintptr_t) plt + i, page_phys, MM_PAGE_WRITE) == 0);
|
||||
}
|
||||
|
||||
// Export global symbols
|
||||
|
18
sys/thread.c
18
sys/thread.c
@ -380,7 +380,7 @@ int sys_clone(int (*fn) (void *), void *stack, int flags, void *arg) {
|
||||
}
|
||||
|
||||
int thread_init(struct thread *thr, uintptr_t entry, void *arg, int flags) {
|
||||
uintptr_t stack_pages = mm_phys_alloc_contiguous(2); //amd64_phys_alloc_contiguous(2);
|
||||
uintptr_t stack_pages = mm_phys_alloc_contiguous(THREAD_KSTACK_PAGES, PU_KERNEL);
|
||||
_assert(stack_pages != MM_NADDR);
|
||||
|
||||
thr->signal_entry = MM_NADDR;
|
||||
@ -390,7 +390,7 @@ int thread_init(struct thread *thr, uintptr_t entry, void *arg, int flags) {
|
||||
thr->sched_next = NULL;
|
||||
|
||||
thr->data.rsp0_base = MM_VIRTUALIZE(stack_pages);
|
||||
thr->data.rsp0_size = MM_PAGE_SIZE * 2;
|
||||
thr->data.rsp0_size = MM_PAGE_SIZE * THREAD_KSTACK_PAGES;
|
||||
thr->data.rsp0_top = thr->data.rsp0_base + thr->data.rsp0_size;
|
||||
thr->flags = (flags & THR_INIT_USER) ? 0 : THREAD_KERNEL;
|
||||
thr->sigq = 0;
|
||||
@ -421,9 +421,15 @@ int thread_init(struct thread *thr, uintptr_t entry, void *arg, int flags) {
|
||||
uintptr_t ustack_base;
|
||||
if (!(flags & THR_INIT_STACK_SET)) {
|
||||
// Allocate thread user stack
|
||||
ustack_base = vmalloc(space, 0x1000000, 0xF0000000, 4, MM_PAGE_WRITE | MM_PAGE_USER, PU_PRIVATE);
|
||||
ustack_base = vmfind(space, THREAD_USTACK_BEGIN, THREAD_USTACK_END, THREAD_USTACK_PAGES);
|
||||
_assert(ustack_base != MM_NADDR);
|
||||
for (size_t i = 0; i < THREAD_USTACK_PAGES; ++i) {
|
||||
uintptr_t phys = mm_phys_alloc_page(PU_PRIVATE);
|
||||
_assert(phys != MM_NADDR);
|
||||
mm_map_single(space, ustack_base + i * MM_PAGE_SIZE, phys, MM_PAGE_WRITE | MM_PAGE_USER);
|
||||
}
|
||||
thr->data.rsp3_base = ustack_base;
|
||||
thr->data.rsp3_size = MM_PAGE_SIZE * 4;
|
||||
thr->data.rsp3_size = MM_PAGE_SIZE * THREAD_USTACK_PAGES;
|
||||
}
|
||||
}
|
||||
|
||||
@ -587,7 +593,7 @@ int sys_fork(struct sys_fork_frame *frame) {
|
||||
kdebug("New process #%d with main thread <%p>\n", dst->pid, dst_thread);
|
||||
|
||||
// Initialize dst thread
|
||||
uintptr_t stack_pages = mm_phys_alloc_contiguous(2); //amd64_phys_alloc_contiguous(2);
|
||||
uintptr_t stack_pages = mm_phys_alloc_contiguous(THREAD_KSTACK_PAGES, PU_KERNEL);
|
||||
_assert(stack_pages != MM_NADDR);
|
||||
list_head_init(&dst_thread->wait_head);
|
||||
thread_wait_io_init(&dst_thread->sleep_notify);
|
||||
@ -596,7 +602,7 @@ int sys_fork(struct sys_fork_frame *frame) {
|
||||
dst_thread->sched_next = NULL;
|
||||
|
||||
dst_thread->data.rsp0_base = MM_VIRTUALIZE(stack_pages);
|
||||
dst_thread->data.rsp0_size = MM_PAGE_SIZE * 2;
|
||||
dst_thread->data.rsp0_size = MM_PAGE_SIZE * THREAD_KSTACK_PAGES;
|
||||
dst_thread->data.rsp0_top = dst_thread->data.rsp0_base + dst_thread->data.rsp0_size;
|
||||
dst_thread->flags = 0;
|
||||
dst_thread->sigq = 0;
|
||||
|
Loading…
x
Reference in New Issue
Block a user