|
|
|
@ -6,6 +6,13 @@
|
|
|
|
|
* If the bit is set, the page is in use. kmalloc() also just always hands
|
|
|
|
|
* out entire pages, so let's just hope we never need more than PAGE_SIZE bytes
|
|
|
|
|
* of contiguous memory lmao
|
|
|
|
|
*
|
|
|
|
|
* To manipulate the page directory once paging is enabled, we abuse the
|
|
|
|
|
* structural similarity between page directory and page table by mapping the
|
|
|
|
|
* last entry in the page directory to itself. This makes the MMU interpret the
|
|
|
|
|
* page directory as if it were a page table, giving us access to the individual
|
|
|
|
|
* directory entries at `0xffc00000-0xffffffff` virtual. The last page, at
|
|
|
|
|
* address `0xfffff000-0xffffffff`, then points to the page directory itself.
|
|
|
|
|
*/
|
|
|
|
|
|
|
|
|
|
#include <arch/page.h>
|
|
|
|
@ -23,22 +30,33 @@
|
|
|
|
|
extern void _image_start_phys;
|
|
|
|
|
extern void _image_end_phys;
|
|
|
|
|
|
|
|
|
|
/* 0 = free, 1 = allocated */
|
|
|
|
|
/**
|
|
|
|
|
* @brief Page allocation bitmap.
|
|
|
|
|
* 0 = free, 1 = allocated.
|
|
|
|
|
*
|
|
|
|
|
* The pagemap manipulation code below is specifically kept agnostic to
|
|
|
|
|
* the type of the page map (u8/u16/u32 etc) so we can easily change it later
|
|
|
|
|
* if it has performance benefits (which it almost certainly has)
|
|
|
|
|
*/
|
|
|
|
|
static u8 *pagemap;
|
|
|
|
|
static size_t pagemap_len;
|
|
|
|
|
static void *page_start;
|
|
|
|
|
static void *page_end;
|
|
|
|
|
|
|
|
|
|
/* first and last dynamic page address (watch out, these are physical) */
|
|
|
|
|
static void *dynpage_start;
|
|
|
|
|
static void *dynpage_end;
|
|
|
|
|
|
|
|
|
|
/**
|
|
|
|
|
* @brief First page table for low memory (0 - 4 M).
|
|
|
|
|
* This is initialized by the early boot routine in assembly so that paging
|
|
|
|
|
* can be enabled (the kernel itself is mapped to `0xc0000000` by default).
|
|
|
|
|
* can be enabled (the kernel itself is mapped to `0xc0100000` by default).
|
|
|
|
|
*/
|
|
|
|
|
struct x86_page_table pt0;
|
|
|
|
|
/** @brief First page directory for low memory. */
|
|
|
|
|
struct x86_page_directory pd0;
|
|
|
|
|
|
|
|
|
|
int kmalloc_init(void *start, void *end)
|
|
|
|
|
static void setup_pagemap(void);
|
|
|
|
|
|
|
|
|
|
int kmalloc_init(void *start_phys, void *end_phys)
|
|
|
|
|
{
|
|
|
|
|
/*
|
|
|
|
|
* if the kernel image is loaded within the paging region (which is
|
|
|
|
@ -46,46 +64,74 @@ int kmalloc_init(void *start, void *end)
|
|
|
|
|
* to the end of the kernel image so we won't hand out pages that
|
|
|
|
|
* actually store kernel data
|
|
|
|
|
*/
|
|
|
|
|
if (&_image_start_phys >= start && &_image_start_phys <= end)
|
|
|
|
|
start = &_image_end_phys;
|
|
|
|
|
if (&_image_start_phys >= start_phys && &_image_start_phys <= end_phys)
|
|
|
|
|
start_phys = &_image_end_phys;
|
|
|
|
|
|
|
|
|
|
page_start = ptr_align(start, PAGE_SIZE_LOG2);
|
|
|
|
|
page_end = ptr_align(end, -PAGE_SIZE_LOG2);
|
|
|
|
|
dynpage_start = ptr_align(start_phys, PAGE_SIZE_LOG2);
|
|
|
|
|
dynpage_end = ptr_align(end_phys, -PAGE_SIZE_LOG2);
|
|
|
|
|
|
|
|
|
|
if (page_end - page_start < 1024 * PAGE_SIZE) {
|
|
|
|
|
if (dynpage_end - dynpage_start < 1024 * PAGE_SIZE) {
|
|
|
|
|
kprintf("We have < 1024 pages for kmalloc(), this wouldn't go well\n");
|
|
|
|
|
return -1;
|
|
|
|
|
}
|
|
|
|
|
/*
|
|
|
|
|
* Add an arbitrary offset to where dynpages actually start.
|
|
|
|
|
* I have no idea if this is necessary, but i think it might be possible
|
|
|
|
|
* that grub stores its info tags right after the kernel image which
|
|
|
|
|
* would blow up _boot(). Until this is resolved, we just throw away
|
|
|
|
|
* a couple KiB of RAM to be on the safe side. Techbros cope.
|
|
|
|
|
*/
|
|
|
|
|
dynpage_start += 32 * PAGE_SIZE;
|
|
|
|
|
|
|
|
|
|
pagemap = start;
|
|
|
|
|
pagemap_len = ((page_end - page_start) / PAGE_SIZE) / 8;
|
|
|
|
|
while (page_start - (void *)pagemap < pagemap_len) {
|
|
|
|
|
page_start += 8 * PAGE_SIZE;
|
|
|
|
|
pagemap_len--;
|
|
|
|
|
}
|
|
|
|
|
setup_pagemap();
|
|
|
|
|
|
|
|
|
|
kprintf("Kernel image: %p - %p\n", &_image_start_phys, &_image_end_phys);
|
|
|
|
|
kprintf("Page bitmap: %p - %p\n", pagemap, pagemap + pagemap_len);
|
|
|
|
|
kprintf("Paging area: %p - %p\n", page_start, page_end);
|
|
|
|
|
kprintf("Available memory: %u bytes (%u pages)\n",
|
|
|
|
|
page_end - page_start, (page_end - page_start) / PAGE_SIZE);
|
|
|
|
|
dynpage_end - dynpage_start, (dynpage_end - dynpage_start) / PAGE_SIZE);
|
|
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// int mem_init(void)
|
|
|
|
|
// {
|
|
|
|
|
// struct x86_page_directory *map = get_page();
|
|
|
|
|
// if (map == NULL)
|
|
|
|
|
// return -ENOMEM;
|
|
|
|
|
// memset(map, 0, sizeof(*map));
|
|
|
|
|
int map_page(void *phys, void *virt, enum mm_page_flags flags)
|
|
|
|
|
{
|
|
|
|
|
# ifdef DEBUG
|
|
|
|
|
if (phys != PAGE_ALIGN(phys))
|
|
|
|
|
kprintf("map_page(): unaligned physical address %p!\n", phys);
|
|
|
|
|
if (virt != PAGE_ALIGN(virt))
|
|
|
|
|
kprintf("map_page(): unaligned virtual address %p!\n", virt);
|
|
|
|
|
# endif
|
|
|
|
|
|
|
|
|
|
struct x86_page_directory *pd = (struct x86_page_directory *)0xfffff000;
|
|
|
|
|
|
|
|
|
|
// }
|
|
|
|
|
size_t pd_index = ((uintptr_t)virt >> PAGE_SIZE_LOG2) / 1024;
|
|
|
|
|
size_t pt_index = ((uintptr_t)virt >> PAGE_SIZE_LOG2) % 1024;
|
|
|
|
|
|
|
|
|
|
void map_page(vm_info_t *map, void *physical, void *virtual, enum mm_flags flags)
|
|
|
|
|
{
|
|
|
|
|
struct x86_page_directory_entry *pd_entry = &pd->entries[pd_index];
|
|
|
|
|
if (!pd_entry->present) {
|
|
|
|
|
void *page = get_page();
|
|
|
|
|
if (page == NULL)
|
|
|
|
|
return -ENOMEM;
|
|
|
|
|
memset(page, 0, PAGE_SIZE);
|
|
|
|
|
pd_entry->shifted_address = (uintptr_t)page >> X86_PAGE_DIRECTORY_ADDRESS_SHIFT;
|
|
|
|
|
pd_entry->rw = 1;
|
|
|
|
|
pd_entry->present = 1;
|
|
|
|
|
vm_flush();
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
struct x86_page_table *pt = &((struct x86_page_table *)0xffc00000)[pd_index];
|
|
|
|
|
struct x86_page_table_entry *pt_entry = &pt->entries[pt_index];
|
|
|
|
|
pt_entry->rw = (flags & MM_PAGE_RW) != 0;
|
|
|
|
|
pt_entry->user = (flags & MM_PAGE_USER) != 0;
|
|
|
|
|
pt_entry->write_through = 0;
|
|
|
|
|
pt_entry->cache_disabled = 0;
|
|
|
|
|
pt_entry->accessed = 0;
|
|
|
|
|
pt_entry->dirty = 0;
|
|
|
|
|
pt_entry->global = 0;
|
|
|
|
|
pt_entry->shifted_address = (uintptr_t)virt >> X86_PAGE_TABLE_ADDRESS_SHIFT;
|
|
|
|
|
|
|
|
|
|
pt_entry->present = 1;
|
|
|
|
|
vm_flush();
|
|
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static inline int find_zero_bit(u8 bitfield)
|
|
|
|
@ -107,13 +153,14 @@ void *get_page(void)
|
|
|
|
|
for (size_t i = 0; i < pagemap_len; i++) {
|
|
|
|
|
if (pagemap[i] != 0xff) {
|
|
|
|
|
int bit = find_zero_bit(pagemap[i]);
|
|
|
|
|
if (bit == 8) {
|
|
|
|
|
if (bit <= 8) {
|
|
|
|
|
page = dynpage_start + (i * 8 + bit) * PAGE_SIZE;
|
|
|
|
|
pagemap[i] |= (1 << bit);
|
|
|
|
|
} else {
|
|
|
|
|
kprintf("Throw your computer in the garbage\n");
|
|
|
|
|
break;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
page = page_start + (i * 8 + bit) * PAGE_SIZE;
|
|
|
|
|
pagemap[i] |= (1 << bit);
|
|
|
|
|
break;
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
@ -132,7 +179,7 @@ void put_page(void *page)
|
|
|
|
|
kprintf("Unaligned ptr %p passed to put_page()!\n", page);
|
|
|
|
|
return;
|
|
|
|
|
}
|
|
|
|
|
if (page < page_start || page >= page_end) {
|
|
|
|
|
if (page < dynpage_start || page >= dynpage_end) {
|
|
|
|
|
kprintf("Page %p passed to put_page() is not in the dynamic area!\n", page);
|
|
|
|
|
return;
|
|
|
|
|
}
|
|
|
|
@ -142,7 +189,7 @@ void put_page(void *page)
|
|
|
|
|
memset(page, 'A', PAGE_SIZE);
|
|
|
|
|
# endif
|
|
|
|
|
|
|
|
|
|
size_t page_number = (page - page_start) / PAGE_SIZE;
|
|
|
|
|
size_t page_number = (page - dynpage_start) / PAGE_SIZE;
|
|
|
|
|
size_t index = page_number / 8;
|
|
|
|
|
int bit = page_number % 8;
|
|
|
|
|
if ((pagemap[index] & (1 << bit)) == 0)
|
|
|
|
@ -151,6 +198,109 @@ void put_page(void *page)
|
|
|
|
|
pagemap[index] &= ~(1 << bit);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
void *virt_to_phys(void *virt)
|
|
|
|
|
{
|
|
|
|
|
size_t pd_index = ((uintptr_t)virt >> PAGE_SIZE_LOG2) / 1024;
|
|
|
|
|
size_t pt_index = ((uintptr_t)virt >> PAGE_SIZE_LOG2) % 1024;
|
|
|
|
|
|
|
|
|
|
struct x86_page_directory *pd = (struct x86_page_directory *)0xfffff000;
|
|
|
|
|
if (!pd->entries[pd_index].present)
|
|
|
|
|
return NULL;
|
|
|
|
|
|
|
|
|
|
struct x86_page_table *pt = &((struct x86_page_table *)0xffc00000)[pd_index];
|
|
|
|
|
if (!pt->entries[pt_index].present)
|
|
|
|
|
return NULL;
|
|
|
|
|
|
|
|
|
|
uintptr_t address = pt->entries[pt_index].shifted_address << X86_PAGE_TABLE_ADDRESS_SHIFT;
|
|
|
|
|
return (void *)(address + ((uintptr_t)virt & ~PAGE_MASK));
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
void vm_flush(void)
|
|
|
|
|
{
|
|
|
|
|
__asm__ volatile(
|
|
|
|
|
" mov %%cr3, %%eax \n"
|
|
|
|
|
" mov %%eax, %%cr3 \n"
|
|
|
|
|
::: "eax"
|
|
|
|
|
);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/**
|
|
|
|
|
* So, this is going to be a little awkward. Pretty much the entire mm code
|
|
|
|
|
* depends on the page bitmap, so we can't use any of it before the bitmap is
|
|
|
|
|
* actually present. This means we have to do *everything* by hand here.
|
|
|
|
|
*/
|
|
|
|
|
static void setup_pagemap(void)
|
|
|
|
|
{
|
|
|
|
|
/*
|
|
|
|
|
* If we blow up the pagemap we blow up the entire system, so we give
|
|
|
|
|
* it its very own page table and map it somewhere far, far away from
|
|
|
|
|
* anything else. A page table takes up exactly one page, so we cut
|
|
|
|
|
* that away from the usable dynamic page area. So these two lines are
|
|
|
|
|
* basically a replacement for a call to get_page().
|
|
|
|
|
*/
|
|
|
|
|
void *pt_phys = dynpage_start;
|
|
|
|
|
dynpage_start += PAGE_SIZE;
|
|
|
|
|
|
|
|
|
|
/*
|
|
|
|
|
* As described in multiboot.S, the entry in the page directory points
|
|
|
|
|
* to the page directory itself so we can still manipulate it while we
|
|
|
|
|
* are in virtual address space. The second-last entry in the page
|
|
|
|
|
* directory is still free, so we put the page table for the bitmap there.
|
|
|
|
|
* If you do the math, the page table therefore maps addresses
|
|
|
|
|
* 0xff800000-0xffbfffff, which is where we start off with the bitmap.
|
|
|
|
|
*/
|
|
|
|
|
pagemap = (u8 *)0xff800000;
|
|
|
|
|
|
|
|
|
|
/*
|
|
|
|
|
* Now that we have a physical page for the page table, we need to
|
|
|
|
|
* map it to a virtual address so we can fill its entries.
|
|
|
|
|
* So this is basically a replacement for a call to map_page().
|
|
|
|
|
*/
|
|
|
|
|
struct x86_page_directory *pd = (struct x86_page_directory *)0xfffff000;
|
|
|
|
|
pd->entries[1022].shifted_address = (uintptr_t)pt_phys >> X86_PAGE_DIRECTORY_ADDRESS_SHIFT;
|
|
|
|
|
pd->entries[1022].rw = 1;
|
|
|
|
|
pd->entries[1022].present = 1;
|
|
|
|
|
vm_flush();
|
|
|
|
|
|
|
|
|
|
struct x86_page_table *pt = &((struct x86_page_table *)0xffc00000)[1022];
|
|
|
|
|
memset(pt, 0, sizeof(*pt));
|
|
|
|
|
|
|
|
|
|
/*
|
|
|
|
|
* Alright, now we can actually fill the page table with entries for
|
|
|
|
|
* the bitmap. Again, we just take away pages from the dynpage area,
|
|
|
|
|
* until there is enough space. We also need to map those pages to the
|
|
|
|
|
* virtual address, of course.
|
|
|
|
|
*/
|
|
|
|
|
void *pagemap_phys = dynpage_start;
|
|
|
|
|
size_t pt_index = 0;
|
|
|
|
|
do {
|
|
|
|
|
/*
|
|
|
|
|
* take one page away from the dynamic area and reserve it for
|
|
|
|
|
* the bitmap, and recalculate the required bitmap size in bytes
|
|
|
|
|
*/
|
|
|
|
|
dynpage_start += PAGE_SIZE;
|
|
|
|
|
pagemap_len = ((dynpage_end - dynpage_start) / PAGE_SIZE) / 8;
|
|
|
|
|
|
|
|
|
|
/* now add a page table entry for that page */
|
|
|
|
|
struct x86_page_table_entry *pt_entry = &pt->entries[pt_index];
|
|
|
|
|
uintptr_t address = (uintptr_t)pagemap_phys + pt_index * PAGE_SIZE;
|
|
|
|
|
pt_entry->shifted_address = address >> X86_PAGE_TABLE_ADDRESS_SHIFT;
|
|
|
|
|
pt_entry->present = 1;
|
|
|
|
|
pt_entry->rw = 1;
|
|
|
|
|
|
|
|
|
|
pt_index++;
|
|
|
|
|
} while (pagemap_len > (dynpage_start - pagemap_phys) / 8);
|
|
|
|
|
|
|
|
|
|
/*
|
|
|
|
|
* Great! We have enough space for the bitmap, and it is mapped
|
|
|
|
|
* correctly (at least i hope so). Now all that's left is to flush
|
|
|
|
|
* the TLB once again to make the updated entries take effect, and
|
|
|
|
|
* clear the bitmap.
|
|
|
|
|
*/
|
|
|
|
|
vm_flush();
|
|
|
|
|
memset(pagemap, 0, pagemap_len);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/*
|
|
|
|
|
* This file is part of GayBSD.
|
|
|
|
|
* Copyright (c) 2021 fef <owo@fef.moe>.
|
|
|
|
|