@ -27,20 +27,11 @@
# include <gay/util.h>
# include <string.h>
# include <strings.h>
/* from linker script */
extern void _image_start_phys ;
extern void _image_end_phys ;
/**
* @ brief Page allocation bitmap .
* 0 = free , 1 = allocated .
*/
static unsigned long * pagemap ;
/** @brief Pagemap length as in number of `unsigned long`s, *not* bytes! */
static usize pagemap_len ;
/* first and last dynamic page address (watch out, these are physical) */
static uintptr_t dynpage_start ;
static uintptr_t dynpage_end ;
@ -50,11 +41,9 @@ static uintptr_t dynpage_end;
* This is initialized by the early boot routine in assembly so that paging
* can be enabled ( the kernel itself is mapped to ` 0xc0100000 ` by default ) .
*/
struct x86_page_table pt0 ;
__asmlink struct x86_page_table pt0 ;
/** @brief First page directory for low memory. */
struct x86_page_directory pd0 ;
static void setup_pagemap ( void ) ;
__asmlink struct x86_page_directory pd0 ;
int mem_init ( uintptr_t start_phys , uintptr_t end_phys )
{
@ -83,8 +72,6 @@ int mem_init(uintptr_t start_phys, uintptr_t end_phys)
*/
dynpage_start + = 32 * PAGE_SIZE ;
setup_pagemap ( ) ;
kprintf ( " Available memory: %zu bytes (%lu pages) \n " ,
dynpage_end - dynpage_start ,
( unsigned long ) ( dynpage_end - dynpage_start ) / PAGE_SIZE ) ;
@ -104,6 +91,30 @@ int map_page(uintptr_t phys, void *virt, enum mm_page_flags flags)
usize pd_index = ( ( uintptr_t ) virt > > PAGE_SHIFT ) / 1024 ;
usize pt_index = ( ( uintptr_t ) virt > > PAGE_SHIFT ) % 1024 ;
struct x86_page_directory_entry * pde = & X86_CURRENT_PD - > entries [ pd_index ] ;
if ( flags & MM_PAGE_HUGE ) {
# ifdef DEBUG
if ( phys ! = HUGEPAGE_ALIGN ( phys ) ) {
kprintf ( " map_page(): unaligned physical address %p! \n " ,
( void * ) phys ) ;
phys = HUGEPAGE_ALIGN ( phys ) ;
}
if ( virt ! = HUGEPAGE_ALIGN ( virt ) ) {
kprintf ( " map_page(): unaligned virtual address %p! \n " ,
virt ) ;
}
# endif
* ( unsigned long * ) pde = 0 ;
pde - > present = 1 ;
pde - > huge = 1 ;
pde - > rw = ( flags & MM_PAGE_RW ) ! = 0 ;
pde - > user = ( flags & MM_PAGE_USER ) ! = 0 ;
pde - > accessed = ( flags & MM_PAGE_ACCESSED ) ! = 0 ;
pde - > cache_disabled = ( flags & MM_PAGE_NOCACHE ) ! = 0 ;
return 0 ;
}
/*
* warning : pt might not be present yet before the if block below ,
* we only define it here already so we can easily call memset ( ) in
@ -111,27 +122,26 @@ int map_page(uintptr_t phys, void *virt, enum mm_page_flags flags)
*/
struct x86_page_table * pt = X86_CURRENT_PT ( pd_index ) ;
struct x86_page_directory_entry * pd_entry = & X86_CURRENT_PD - > entries [ pd_index ] ;
if ( ! pd_entry - > present ) {
if ( ! pde - > present ) {
uintptr_t pt_phys = get_page ( ) ;
if ( ! pt_phys )
return - ENOMEM ;
* ( unsigned long * ) pd _ entry = 0 ;
pd _ entry - > shifted_address = pt_phys > > PAGE_SHIFT ;
pd _ entry - > rw = 1 ;
pd _ entry - > present = 1 ;
* ( unsigned long * ) pd e = 0 ;
pd e- > shifted_address = pt_phys > > PAGE_SHIFT ;
pd e- > rw = 1 ;
pd e- > present = 1 ;
vm_flush ( ) ;
memset ( pt , 0 , sizeof ( * pt ) ) ;
}
struct x86_page_table_entry * pt _ entry = & pt - > entries [ pt_index ] ;
* ( unsigned long * ) pt _ entry = 0 ; /* zero out the entire entry first */
pt _ entry - > rw = ( flags & MM_PAGE_RW ) ! = 0 ;
pt _ entry - > user = ( flags & MM_PAGE_USER ) ! = 0 ;
pt _ entry - > cache_disabled = ( flags & MM_PAGE_NOCACHE ) ! = 0 ;
pt _ entry - > shifted_address = phys > > PAGE_SHIFT ;
pt _ entry - > present = 1 ;
struct x86_page_table_entry * pt e = & pt - > entries [ pt_index ] ;
* ( unsigned long * ) pt e = 0 ; /* zero out the entire entry first */
pt e- > rw = ( flags & MM_PAGE_RW ) ! = 0 ;
pt e- > user = ( flags & MM_PAGE_USER ) ! = 0 ;
pt e- > cache_disabled = ( flags & MM_PAGE_NOCACHE ) ! = 0 ;
pt e- > shifted_address = phys > > PAGE_SHIFT ;
pt e- > present = 1 ;
return 0 ;
}
@ -148,76 +158,33 @@ uintptr_t unmap_page(void *virt)
usize pd_index = ( ( uintptr_t ) virt > > PAGE_SHIFT ) / 1024 ;
usize pt_index = ( ( uintptr_t ) virt > > PAGE_SHIFT ) % 1024 ;
struct x86_page_directory_entry * pd _ entry = & pd - > entries [ pd_index ] ;
if ( ! pd _ entry - > present )
struct x86_page_directory_entry * pd e = & pd - > entries [ pd_index ] ;
if ( ! pd e- > present )
return 0 ;
struct x86_page_table * pt = X86_CURRENT_PT ( pd_index ) ;
struct x86_page_table_entry * pt_entry = & pt - > entries [ pt_index ] ;
if ( ! pt_entry - > present )
return 0 ;
uintptr_t phys_shifted = pt_entry - > shifted_address ;
* ( unsigned long * ) pt_entry = 0 ;
return phys_shifted < < PAGE_SHIFT ;
}
uintptr_t get_page ( void )
{
uintptr_t page = 0 ;
for ( usize i = 0 ; i < pagemap_len ; i + + ) {
if ( ~ pagemap [ i ] ! = 0 ) {
/*
* for some stupid reason , the bit index returned by
* ffsl ( ) starts at 1 rather than 0
* ( and is 0 if there is no bit set )
*/
int bit = ffsl ( ( long ) ~ pagemap [ i ] ) - 1 ;
if ( bit > = 0 ) {
unsigned long page_number = i * sizeof ( * pagemap ) * 8 + bit ;
page = dynpage_start + page_number * PAGE_SIZE ;
pagemap [ i ] | = ( 1lu < < bit ) ;
} else {
kprintf ( " Throw your computer in the garbage \n " ) ;
}
break ;
uintptr_t phys = 0 ;
if ( pde - > huge ) {
phys = pde - > shifted_address ;
phys < < = HUGEPAGE_SHIFT ;
* ( unsigned long * ) pde = 0 ;
} else {
struct x86_page_table * pt = X86_CURRENT_PT ( pd_index ) ;
struct x86_page_table_entry * pte = & pt - > entries [ pt_index ] ;
if ( pte - > present ) {
phys = pte - > shifted_address ;
phys < < = PAGE_SHIFT ;
* ( unsigned long * ) pte = 0 ;
}
}
return page ;
}
void put_page ( uintptr_t phys )
{
# ifdef DEBUG
if ( phys % PAGE_SIZE ! = 0 ) {
kprintf ( " Unaligned ptr %p passed to put_page()! \n " , ( void * ) phys ) ;
return ;
}
if ( phys < dynpage_start | | phys > = dynpage_end ) {
kprintf ( " Page %p passed to put_page() is not in the dynamic area! \n " ,
( void * ) phys ) ;
return ;
}
# endif
usize page_number = ( phys - dynpage_start ) > > PAGE_SHIFT ;
usize index = page_number / ( sizeof ( * pagemap ) * 8 ) ;
int bit = page_number % ( sizeof ( * pagemap ) * 8 ) ;
if ( ( pagemap [ index ] & ( 1lu < < bit ) ) = = 0 )
kprintf ( " Double free of page %p! \n " , ( void * ) phys ) ;
pagemap [ index ] & = ~ ( 1lu < < bit ) ;
return phys ;
}
void x86_isr_page_fault ( struct x86_trap_frame * frame , u32 error_code )
{
void * address ;
__asm__ volatile (
" mov %%cr2, %0 \n "
" mov %%cr2, %0 \n "
: " =r " ( address )
:
) ;
@ -247,9 +214,9 @@ void x86_isr_page_fault(struct x86_trap_frame *frame, u32 error_code)
x86_print_regs ( frame ) ;
kprintf ( " system halted " ) ;
__asm__ volatile (
" cli \n "
" 1: hlt \n "
" jmp 1b \n "
" cli \n "
" 1: hlt \n "
" jmp 1b \n "
) ;
}
@ -258,17 +225,26 @@ uintptr_t virt_to_phys(void *virt)
usize pd_index = ( ( uintptr_t ) virt > > PAGE_SHIFT ) / 1024 ;
usize pt_index = ( ( uintptr_t ) virt > > PAGE_SHIFT ) % 1024 ;
struct x86_page_directory * pd = X86_CURRENT_PD ;
if ( ! pd - > entries [ pd_index ] . present )
struct x86_page_directory _entry * pd e = & X86_CURRENT_PD - > entries [ pd_index ] ;
if ( ! pd e - > present )
return 0 ;
struct x86_page_table * pt = X86_CURRENT_PT ( pd_index ) ;
if ( ! pt - > entries [ pt_index ] . present )
return 0 ;
uintptr_t phys = 0 ;
if ( pde - > huge ) {
phys = pde - > shifted_address ;
phys < < = PAGE_SHIFT ; /* attention, this is not HUGEPAGE_SHIFT */
phys | = ( uintptr_t ) virt & ~ HUGEPAGE_MASK ;
} else {
struct x86_page_table * pt = X86_CURRENT_PT ( pd_index ) ;
struct x86_page_table_entry * pte = & pt - > entries [ pt_index ] ;
if ( pte - > present ) {
phys = pte - > shifted_address ;
phys < < = PAGE_SHIFT ;
phys | = ( uintptr_t ) virt & ~ PAGE_MASK ;
}
}
uintptr_t phys = pt - > entries [ pt_index ] . shifted_address < < PAGE_SHIFT ;
/* if the virtual address wasn't page aligned, add the offset into the page */
return phys | ( ( uintptr_t ) virt & ~ PAGE_MASK ) ;
return phys ;
}
void vm_flush ( void )
@ -280,85 +256,6 @@ void vm_flush(void)
) ;
}
/**
* So , this is going to be a little awkward . Pretty much the entire mm code
* depends on the page bitmap , so we can ' t use any of it before the bitmap is
* actually present . This means we have to do * everything * by hand here .
*/
static void setup_pagemap ( void )
{
/*
* If we blow up the pagemap we blow up the entire system , so we give
* it its very own page table and map it somewhere far , far away from
* anything else . A page table takes up exactly one page , so we cut
* that away from the usable dynamic page area . So these two lines are
* basically a replacement for a call to get_page ( ) .
*/
uintptr_t pt_phys = dynpage_start ;
dynpage_start + = PAGE_SIZE ;
/*
* As described in multiboot . S , the last entry in the page directory points
* to the page directory itself so we can still manipulate it while we
* are in virtual address space . The second - last entry in the page
* directory is still free , so we put the page table for the bitmap there .
* If you do the math , that page table therefore maps addresses
* 0xff800000 - 0xffbfffff , which is where we start off with the bitmap .
*/
pagemap = ( unsigned long * ) 0xff800000 ;
/*
* Now that we have a physical page for the page table , we need to
* map it to a virtual address so we can fill its entries .
* So this is basically a replacement for a call to map_page ( ) .
*/
struct x86_page_directory_entry * pd_entry = & X86_CURRENT_PD - > entries [ 1022 ] ;
* ( unsigned long * ) pd_entry = 0 ;
pd_entry - > shifted_address = pt_phys > > PAGE_SHIFT ;
pd_entry - > rw = 1 ;
pd_entry - > present = 1 ;
vm_flush ( ) ;
struct x86_page_table * pt = X86_CURRENT_PT ( 1022 ) ;
memset ( pt , 0 , sizeof ( * pt ) ) ;
/*
* Alright , now we can actually fill the page table with entries for
* the bitmap . Again , we just take away pages from the dynpage area ,
* until there is enough space . We also need to map those pages to the
* virtual address , of course .
*/
uintptr_t pagemap_phys = dynpage_start ;
usize pt_index = 0 ;
do {
/*
* take one page away from the dynamic area and reserve it for
* the bitmap , and recalculate the required bitmap length
*/
dynpage_start + = PAGE_SIZE ;
pagemap_len = ( dynpage_end - dynpage_start ) / ( PAGE_SIZE * sizeof ( * pagemap ) * 8 ) ;
/* now add a page table entry for that page */
struct x86_page_table_entry * pt_entry = & pt - > entries [ pt_index ] ;
* ( unsigned long * ) pt_entry = 0 ;
uintptr_t address = pagemap_phys + pt_index * PAGE_SIZE ;
pt_entry - > shifted_address = address > > PAGE_SHIFT ;
pt_entry - > present = 1 ;
pt_entry - > rw = 1 ;
pt_index + + ;
} while ( pagemap_len * sizeof ( * pagemap ) * 8 > ( dynpage_start - pagemap_phys ) ) ;
/*
* Great ! We have enough space for the bitmap , and it is mapped
* correctly ( at least i hope so ) . Now all that ' s left is to flush
* the TLB once again to make the updated entries take effect , and
* clear the bitmap .
*/
vm_flush ( ) ;
memset ( pagemap , 0 , pagemap_len * sizeof ( * pagemap ) ) ;
}
/*
* This file is part of GayBSD .
* Copyright ( c ) 2021 fef < owo @ fef . moe > .