@ -96,10 +96,6 @@ static inline void claim_bmem_area(struct mm_zone *zone, const struct _bmem_area
/* only the first page in the order group is inserted into
* the freelist , but all of them need to be initialized */
for ( u_int i = 0 ; i < ( 1u < < order ) ; i + + ) {
if ( pos > = end )
panic ( " page %p out of range " , pos ) ;
if ( atom_read ( & pos - > count ) ! = 420 )
panic ( " page %p double initialized \n " , pos ) ;
atom_init ( & pos - > count , 0 ) ;
atom_init ( & pos - > attr , 0 ) ;
@ -158,7 +154,8 @@ void paging_init(vm_paddr_t phys_end)
vm_paddr_t bitmap_start_phys = __boot_pmalloc ( bitmap_size_log2 , MM_ZONE_NORMAL ) ;
panic_if ( bitmap_start_phys = = BOOT_PMALLOC_ERR ,
" cannot allocate memory for the page bitmaps " ) ;
memset ( __v ( bitmap_start_phys ) , 0 , bitmap_total_size ) ;
for ( int i = 0 ; i < ( 1 < < bitmap_size_log2 ) ; i + + )
__boot_clear_page ( bitmap_start_phys + ( i * PAGE_SIZE ) ) ;
/*
* initialize the pools
@ -192,7 +189,7 @@ void paging_init(vm_paddr_t phys_end)
/* This is merely an optimization to simplify checking whether
* two buddies can be coalesced into one . In reality , the
* reference count is invalid because the page is reserved . */
atom_init ( & vm_page_array [ pfn ] . count , 420 ) ;
atom_init ( & vm_page_array [ pfn ] . count , INT_MIN ) ;
atom_init ( & vm_page_array [ pfn ] . attr , _PGA_RSVD_MASK ) ;
vm_page_array [ pfn ] . pfn = pfn ;
}
@ -207,12 +204,12 @@ void paging_init(vm_paddr_t phys_end)
/* make sure the boot memory allocator cannot under any circumstances hand
* out pages from this area anymore , even though that should be unnecessary */
clist_del ( & area - > link ) ;
claim_bmem_area ( zone , area ) ;
zone - > thrsh . emerg = latom_read ( & zone - > free_count ) / CFG_PAGE_EMERG_DENOM ;
if ( zone - > thrsh . emerg > CFG_PAGE_EMERG_MAX )
zone - > thrsh . emerg = CFG_PAGE_EMERG_MAX ;
}
zone - > thrsh . emerg = latom_read ( & zone - > free_count ) / CFG_PAGE_EMERG_DENOM ;
if ( zone - > thrsh . emerg > CFG_PAGE_EMERG_MAX )
zone - > thrsh . emerg = CFG_PAGE_EMERG_MAX ;
}
}
@ -227,9 +224,18 @@ vm_page_t page_alloc(u_int order, enum mflags flags)
{
if ( order > MM_MAX_ORDER ) {
page_debug ( " get_pages(%d, %#08x): Order too high! \n " , order , flags ) ;
return nil ;
return INVALID_PAGE ;
}
/*
* See if the requested zone has enough free pages for the allocation .
* If not , fall back to lower physical memory ( i . e . use a zone with
* smaller index ) . Repeat until we either find a zone that has enough
* free pages , or until we ' ve run out of zones ( in which case the
* allocation failed ) . Just because we found a zone doesn ' t mean we ' ve
* succeeded , since the pages in that zone might not be contiguous .
* If they ' re not , we have to try again ( see further down below ) .
*/
struct mm_zone * zone = & mm_zones [ _M_ZONE_INDEX ( flags ) ] ;
long count_after ;
try_next_zone :
@ -242,7 +248,7 @@ try_next_zone:
zone - - ;
goto try_next_zone ;
} else {
return nil ;
return INVALID_PAGE ;
}
}
}
@ -254,9 +260,9 @@ try_next_zone:
* requested order , and if it ' s empty , go over to the next higher order .
* Repeat until we found a page , or we ' ve reached the highest order .
*/
vm_page_t page = nil ;
vm_page_t page = INVALID_PAGE ;
u_int page_order = order ;
while ( page = = nil & & page_order < MM_NR_ORDERS ) {
while ( ! page & & page_order < MM_NR_ORDERS ) {
struct mm_pool * pool = & zone - > pools [ page_order ] ;
disable_intr ( ) ;
@ -276,7 +282,7 @@ try_next_zone:
intr_restore ( cpuflags ) ;
}
if ( page = = nil ) {
if ( ! page ) {
if ( zone > & mm_zones [ 0 ] ) {
/*
* If we reach this , the current zone technically had enough free
@ -288,7 +294,7 @@ try_next_zone:
zone - - ;
goto try_next_zone ;
} else {
return nil ;
return INVALID_PAGE ;
}
}
@ -312,7 +318,7 @@ try_next_zone:
disable_intr ( ) ;
spin_lock ( & pool - > lock ) ;
clist_add _first ( & pool - > freelist , & buddy - > link ) ;
clist_add ( & pool - > freelist , & buddy - > link ) ;
pool - > free_entries + + ;
spin_unlock ( & pool - > lock ) ;
intr_restore ( cpuflags ) ;
@ -320,7 +326,14 @@ try_next_zone:
for ( u_int i = 0 ; i < ( 1 < < order ) ; i + + )
pga_set_order ( & page [ i ] , order ) ;
page_clear ( page ) ;
/* future versions will have a background thread that
* clears pages in the freelist when the cpu is idle */
if ( ( flags & _M_ZERO ) & & ! pga_zero ( page ) )
page_clear ( page ) ;
/* XXX only clear the zero flag when the page actually becomes dirty */
pga_set_zero ( page , false ) ;
return page ;
}
@ -378,21 +391,14 @@ void page_free(vm_page_t page)
PAGE_ASSERT ( ( uintptr_t ) ptr % ORDER_SIZE ( order ) = = 0 ) ;
u_long pfn = pg2pfn ( page ) ;
PAGE_DEBUG_BLOCK {
int old_count = atom_sub ( & page - > count , 1 ) ;
if ( old_count ! = 1 ) {
if ( old_count = = 0 )
page_debug ( " double free of %p " , ptr ) ;
else
page_debug ( " attempted to free %p with references " , ptr ) ;
return ;
}
} else {
atom_dec ( & page - > count ) ;
if ( atom_dec ( & page - > count ) ) {
page_debug ( " Double free of %p " , page ) ;
return ;
}
struct mm_zone * zone = & mm_zones [ pga_zone ( page ) ] ;
latom_add ( & zone - > free_count , ( 1 < < order ) ) ;
struct mm_pool * pool = & zone - > pools [ order ] ;
/* try to coalesce free buddy blocks until we're reached the highest order */
while ( order < MM_MAX_ORDER ) {
@ -405,30 +411,30 @@ void page_free(vm_page_t page)
* to avoid blocking other CPUs for longer than necessary */
vm_page_t buddy = & vm_page_array [ pfn ^ ( 1ul < < order ) ] ;
vm_page_t low = & vm_page_array [ pfn & ~ ( 1ul < < order ) ] ;
struct mm_pool * current_order_pool = & zone - > pools [ order ] ;
struct mm_pool * next_order_pool = & zone - > pools [ order + 1 ] ;
disable_intr ( ) ;
spin_lock ( & zone- > pools [ order ] . lock ) ;
spin_lock ( & pool- > lock ) ;
if ( can_merge ( page , buddy ) ) {
/* remove buddy from the low order freelist */
clist_del ( & buddy - > link ) ;
current_order_pool - > free_entries - - ;
pool - > free_entries - - ;
spin_unlock ( & pool - > lock ) ;
pga_set_order ( buddy , order + 1 ) ;
pga_set_order ( page , order + 1 ) ;
clist_add ( & next_order_pool - > freelist , & low - > link ) ;
next_order_pool - > free_entries + + ;
} else {
order = MM_MAX_ORDER ; /* break out of the loop */
spin_unlock ( & pool - > lock ) ;
intr_restore ( cpuflags ) ;
break ;
}
spin_unlock ( & zone - > pools [ order ] . lock ) ;
intr_restore ( cpuflags ) ;
page = low ;
pfn = pg2pfn ( page ) ;
order + + ;
pool + + ;
}
/* finally, we need to insert the page at its freelist */
struct mm_pool * pool = & zone - > pools [ order ] ;
disable_intr ( ) ;
spin_lock ( & pool - > lock ) ;
clist_add ( & pool - > freelist , & page - > link ) ;