@ -1,9 +1,14 @@
/* Copyright (C) 2021,2022 fef <owo@fef.moe>. All rights reserved. */
/*
* slabbing slabs onto the slab for slabs slab slab slahsdf ashklfghdsla
*/
# include <arch/atom.h>
# include <arch/cpufunc.h>
# include <arch/page.h>
# include <gay/bits.h>
# include <gay/cdefs.h>
# include <gay/clist.h>
# include <gay/config.h>
@ -15,9 +20,7 @@
# include <gay/types.h>
# include <gay/vm/page.h>
/*
* XXX this implementation is still missing object caches
*/
# include <strings.h>
# if CFG_POISON_SLABS
struct slab_poison {
@ -29,8 +32,8 @@ struct slab_poison {
u_long high_poison [ 1 ] ;
} ;
static void poison_ after_alloc( struct slab_poison * poison , u_int exact_size , void * alloc_source ) ;
static void poison_ after _free( struct slab_poison * poison ) ;
static void poison_ on_alloc( struct slab_poison * poison , u_long exact_size , void * alloc_source ) ;
static void poison_ on _free( struct slab_poison * poison ) ;
# endif
# if CFG_DEBUG_SLAB_ALLOCS
@ -49,126 +52,323 @@ static void poison_after_free(struct slab_poison *poison);
# define slab_debug_noisy(msg, ...) ({})
# endif
struct slab_pool {
const u_int entry_size ; /**< @brief Size of one entry in bytes */
const u_int entries_per_slab ; /**< @brief Max number of entries per slab */
atom_t total_used ; /**< @brief Total allocated entries */
const u_int page_order ; /**< @brief Order passed to `get_pages()` */
struct clist empty_list ; /* -> struct vm_page::link */
struct clist partial_list ; /* -> struct vm_page::link */
struct clist full_list ; /* -> struct vm_page::link */
spin_t empty_lock ; /**< @brief Lock for `empty_list` */
spin_t partial_lock ; /**< @brief Lock for `partial_list` */
spin_t full_lock ; /**< @brief Lock for `full_list` */
atom_t empty_count ; /**< @brief Number of empty slabs */
atom_t partial_count ; /**< @brief Number of partially empty slabs */
atom_t full_count ; /**< @brief Number of full slabs */
/**
* @ brief Single node in the object cache system .
* Each node owns a page
*/
struct kmem_cache_node {
struct clist link ; /* -> struct kmem_cache_pool::list */
void * * freelist ; /**< @brief Stack of free objects */
struct kmem_cache * cache ; /**< @brief Object cache this node belongs to */
spin_t lock ; /**< @brief Lock for `freelist` */
u_int free_count ;
vm_page_t page ; /**< @brief Physical page this node manages */
} ;
/*
* Fun size calculations because the slab header takes up some overhead at the
* beginning of each page . We should ideally try to cram all the info we need
* into struct vm_page , because the individual slab entry sizes could be even
* powers of two and perfectly aligned then .
struct kmem_cache_pool {
struct clist list ; /* -> struct kmem_cache_node::link */
spin_t lock ;
atom_t count ;
} ;
/**
* @ brief Cache for one particular object type .
* A pool holds multiple nodes , each of which hold the same number of slabs .
*/
struct kmem_cache {
u_int object_size ; /**< @brief Object size in bytes */
u_int page_order ; /**< @brief Order passed to `get_pages()` */
enum slab_flags flags ; /**< @brief Flags for how to allocate */
u_int slabs_per_node ; /**< @brief Max number of slabs per cache node */
latom_t total_used ; /**< @brief Total allocated entries */
const char * name ; /**< @brief Unique name for this object type */
void ( * ctor ) ( void * ptr , kmem_cache_t cache ) ;
void ( * dtor ) ( void * ptr , kmem_cache_t cache ) ;
struct kmem_cache_pool empty ;
struct kmem_cache_pool partial ;
struct kmem_cache_pool full ;
struct clist link ; /**< @brief List of all kmem caches */
} ;
/* values for struct kmem_cache::flags */
/** @brief Zone to request pages from (using `page_alloc()`) */
# define SLAB_ZONE(flags) ((flags) & 3)
/** @brief List of all currently registered `struct kmem_cache`s. */
static CLIST ( kmem_cache_list ) ;
# define _MIN1(x) ((x) < 1 ? 1 : (x))
# define POOL_ENTRIES_PER_TABLE(sz) _MIN1(PAGE_SIZE / (sz))
# define SLABS_PER_NOD E(sz) _MIN1(PAGE_SIZE / (sz))
# define POOL_DEFINE(sz) { \
. entry_size = ( sz ) , \
. entries_per_slab = POOL_ENTRIES_PER_TABLE ( sz ) , \
. total_used = ATOM_DEFINE ( 0 ) , \
# define CACHE_DEFINE(sz, _name, _flags) { \
. object_size = ( sz ) , \
. page_order = ( ( sz ) - 1 ) / PAGE_SIZE , \
. empty_lock = SPIN_DEFINE , \
. partial_lock = SPIN_DEFINE , \
. full_lock = SPIN_DEFINE , \
. empty_count = ATOM_DEFINE ( 0 ) , \
. partial_count = ATOM_DEFINE ( 0 ) , \
. full_count = ATOM_DEFINE ( 0 ) , \
. flags = ( _flags ) , \
. slabs_per_node = SLABS_PER_NODE ( sz ) , \
. total_used = ATOM_DEFINE ( 0 ) , \
. name = ( _name ) , \
}
static struct slab_pool slab_pools_normal [ ] = {
POOL_DEFINE ( 32 ) ,
POOL_DEFINE ( 64 ) ,
POOL_DEFINE ( 128 ) ,
POOL_DEFINE ( 256 ) ,
POOL_DEFINE ( 512 ) ,
POOL_DEFINE ( 1024 ) ,
POOL_DEFINE( 2048 ) ,
POOL_DEFINE( 4096 ) ,
POOL_DEFINE( 8192 ) ,
POOL_DEFINE( 16384 ) ,
POOL_DEFINE( 32768 ) ,
static struct kmem_cache kmem_caches [ ] = {
CACHE_DEFINE( 32 , " kmem_32 " , _M_ZONE_NORMAL | SLAB_POISON ) ,
CACHE_DEFINE( 64 , " kmem_64 " , _M_ZONE_NORMAL | SLAB_POISON ) ,
CACHE_DEFINE( 128 , " kmem_128 " , _M_ZONE_NORMAL | SLAB_POISON ) ,
CACHE_DEFINE( 256 , " kmem_256 " , _M_ZONE_NORMAL | SLAB_POISON ) ,
CACHE_DEFINE( 512 , " kmem_512 " , _M_ZONE_NORMAL | SLAB_POISON ) ,
CACHE_DEFINE( 1024 , " kmem_1024 " , _M_ZONE_NORMAL | SLAB_POISON ) ,
CACHE_DEFINE( 2048 , " kmem_2048 " , _M_ZONE_NORMAL | SLAB_POISON ) ,
CACHE_DEFINE( 4096 , " kmem_4096 " , _M_ZONE_NORMAL | SLAB_POISON ) ,
CACHE_DEFINE( 8192 , " kmem_8192 " , _M_ZONE_NORMAL | SLAB_POISON ) ,
CACHE_DEFINE( 16384 , " kmem_16384 " , _M_ZONE_NORMAL | SLAB_POISON ) ,
CACHE_DEFINE( 32768 , " kmem_32768 " , _M_ZONE_NORMAL | SLAB_POISON ) ,
{ /* terminator */ }
} ;
static struct slab_pool slab_pools_dma [ ] = {
POOL_DEFINE( 32 ) ,
POOL_DEFINE( 64 ) ,
POOL_DEFINE( 128 ) ,
POOL_DEFINE( 256 ) ,
POOL_DEFINE( 512 ) ,
POOL_DEFINE( 1024 ) ,
static struct kmem_cache kmem_dma_caches [ ] = {
CACHE_DEFINE( 32 , " kmem_dma_32 " , _M_ZONE_DMA | SLAB_POISON ) ,
CACHE_DEFINE( 64 , " kmem_dma_64 " , _M_ZONE_DMA | SLAB_POISON ) ,
CACHE_DEFINE( 128 , " kmem_dma_128 " , _M_ZONE_DMA | SLAB_POISON ) ,
CACHE_DEFINE( 256 , " kmem_dma_256 " , _M_ZONE_DMA | SLAB_POISON ) ,
CACHE_DEFINE( 512 , " kmem_dma_512 " , _M_ZONE_DMA | SLAB_POISON ) ,
CACHE_DEFINE( 1024 , " kmem_dma_1024 " , _M_ZONE_DMA | SLAB_POISON ) ,
{ /* terminator */ }
} ;
/**
* This is a little fucked .
*
* So , every ` vm_page_t ` in use by the slab allocator gets a corresponding
* ` struct kmem_cache_node ` that keeps track of everything we need to know to
* make allocations . However , the memory for those structs themselves doesn ' t
* magically grow on trees . In other words , we need to allocate memory in
* order to be able to allocate memory .
*
* So what we have here is a separate object cache for ` struct kmem_cache_node `
* that works slightly differently than all the other ones : Instead of making
* an extra allocation for the cache node , that node sits at the beginning of
* the page that we allocate from itself . Other caches don ' t do this because
* it destroys the perfect page alignment of the allocated area itself , but that
* doesn ' t matter here .
*/
static struct kmem_cache kmem_cache_node_caches =
CACHE_DEFINE ( sizeof ( struct kmem_cache_node ) , " kmem_cache_node " , _M_ZONE_NORMAL | SLAB_DMAP ) ;
# undef _MIN1 /* we don't wanna end up using this in actual code, do we? */
static struct slab_pool * slab_zone_pools [ MM_NR_ZONES ] = {
[ _M_ZONE_DMA ] = slab_pools_dma ,
[ _M_ZONE_NORMAL ] = slab_pools_normal ,
static struct kmem_cache * kmem_cache_zone s[ MM_NR_ZONES ] = {
[ _M_ZONE_DMA ] = kmem_dma_caches ,
[ _M_ZONE_NORMAL ] = kmem_caches ,
} ;
static vm_page_t slab_create ( struct slab_pool * pool , enum mflags flags ) ;
static void cache_pool_init ( struct kmem_cache_pool * pool )
{
clist_init ( & pool - > list ) ;
atom_init ( & pool - > count , 0 ) ;
spin_init ( & pool - > lock ) ;
}
void kmalloc_init ( void )
{
for ( int i = 0 ; i < MM_NR_ZONES ; i + + ) {
struct slab_pool * pool = slab_zone_pools [ i ] ;
cache_pool_init ( & kmem_cache_node_caches . empty ) ;
cache_pool_init ( & kmem_cache_node_caches . partial ) ;
cache_pool_init ( & kmem_cache_node_caches . full ) ;
/* for the management node at the beginning of the page */
kmem_cache_node_caches . slabs_per_node - - ;
clist_add ( & kmem_cache_list , & kmem_cache_node_caches . link ) ;
while ( pool - > entry_size ! = 0 ) {
clist_init ( & pool - > empty_list ) ;
clist_init ( & pool - > partial_list ) ;
clist_init ( & pool - > full_list ) ;
pool + + ;
for ( int i = 0 ; i < MM_NR_ZONES ; i + + ) {
struct kmem_cache * cache = kmem_cache_zones [ i ] ;
while ( cache - > object_size ! = 0 ) {
clist_init ( & cache - > empty . list ) ;
clist_init ( & cache - > partial . list ) ;
clist_init ( & cache - > full . list ) ;
clist_add ( & kmem_cache_list , & cache - > link ) ;
cache + + ;
}
}
}
void * kmalloc ( usize size , enum mflags flags )
kmem_cache_t kmem_cache_register ( const char * name , u_int obj_size , enum slab_flags flags ,
void ( * ctor ) ( void * ptr , kmem_cache_t cache ) ,
void ( * dtor ) ( void * ptr , kmem_cache_t cache ) )
{
if ( size = = 0 )
obj_size = align_ceil ( obj_size , sizeof ( long ) ) ;
/* we only support objects up to PAGE_SIZE for now */
if ( obj_size > PAGE_SIZE | | obj_size = = 0 )
return nil ;
# if CFG_POISON_SLABS
size + = sizeof ( struct slab_poison ) ;
# endif
struct kmem_cache * cache = kmalloc ( sizeof ( * cache ) , M_KERN ) ;
SLAB_DEBUG_BLOCK {
if ( ! ( flags & _M_NOWAIT ) & & in_irq ( ) ) {
slab_debug ( " kmalloc() called from irq without M_NOWAIT "
" (caller: %p) \n " , ktrace_return_addr ( ) ) ;
flags | = _M_NOWAIT ;
if ( cache ) {
cache - > name = name ;
cache - > object_size = obj_size ;
cache - > flags = flags ;
cache - > ctor = ctor ;
cache - > dtor = dtor ;
cache_pool_init ( & cache - > empty ) ;
cache_pool_init ( & cache - > partial ) ;
cache_pool_init ( & cache - > full ) ;
/* XXX this is pretty wasteful for larger obj_sizes */
cache - > slabs_per_node = PAGE_SIZE / obj_size ;
cache - > page_order = 0 ;
clist_add ( & kmem_cache_list , & cache - > link ) ;
}
return cache ;
}
static inline void * * freelist_init ( vm_page_t page , struct kmem_cache * cache )
{
void * prev = nil ;
void * start = __v ( pg2paddr ( page ) ) ;
void * end = start + align_floor ( 1 < < ( cache - > page_order + PAGE_SHIFT ) , cache - > object_size ) ;
void * pos = end ;
do {
pos - = cache - > object_size ;
if ( cache - > ctor )
cache - > ctor ( pos , cache ) ;
* ( void * * ) pos = prev ;
prev = pos ;
} while ( pos > = start + cache - > object_size ) ;
return ( void * * ) pos ;
}
/** Attempt to remove a cache node from the partial/empty lists in a cache node and return it */
/* call with interrupts disabled */
static inline struct kmem_cache_node * pool_del_first_node ( struct kmem_cache_pool * pool )
{
struct kmem_cache_node * node = nil ;
spin_lock ( & pool - > lock ) ;
if ( ! clist_is_empty ( & pool - > list ) ) {
atom_dec ( & pool - > count ) ;
node = clist_del_first_entry ( & pool - > list , typeof ( * node ) , link ) ;
}
spin_unlock ( & pool - > lock ) ;
return node ;
}
/* call with interrupts disabled */
static inline void pool_del_node ( struct kmem_cache_pool * pool , struct kmem_cache_node * node )
{
atom_dec ( & pool - > count ) ;
spin_lock ( & pool - > lock ) ;
clist_del ( & node - > link ) ;
spin_unlock ( & pool - > lock ) ;
}
/* call with interrupts disabled */
static inline void pool_add_node ( struct kmem_cache_pool * pool , struct kmem_cache_node * node )
{
spin_lock ( & pool - > lock ) ;
clist_add ( & pool - > list , & node - > link ) ;
spin_unlock ( & pool - > lock ) ;
atom_inc ( & pool - > count ) ;
}
/* call with interrupts disabled */
static inline void * pop_freelist_and_insert ( struct kmem_cache * cache , struct kmem_cache_node * node )
{
spin_lock ( & node - > lock ) ;
void * ret = node - > freelist ;
node - > freelist = * node - > freelist ;
u_int free_count = - - node - > free_count ;
spin_unlock ( & node - > lock ) ;
latom_inc ( & cache - > total_used ) ;
if ( free_count = = 0 )
pool_add_node ( & cache - > full , node ) ;
else
pool_add_node ( & cache - > partial , node ) ;
return ret ;
}
/* call with interrupts disabled */
static struct kmem_cache_node * node_alloc ( void )
{
/*
* This is really the same basic procedure as kmem_cache_alloc ( ) ,
* except that we allocate everything manually if we run out of caches
* and interrupts are disabled .
* It definitely needs a cleanup at some point , most of the stuff here
* can probably be eliminated if kmem_cache_alloc ( ) is split up .
*/
struct kmem_cache_node * mgmt_node = pool_del_first_node ( & kmem_cache_node_caches . partial ) ;
if ( ! mgmt_node ) {
mgmt_node = pool_del_first_node ( & kmem_cache_node_caches . empty ) ;
if ( ! mgmt_node ) {
vm_page_t page = page_alloc ( 0 , M_ATOMIC ) ;
if ( ! page )
return nil ;
void * * freelist = freelist_init ( page , & kmem_cache_node_caches ) ;
mgmt_node = ( struct kmem_cache_node * ) freelist ;
mgmt_node - > freelist = * freelist ;
mgmt_node = __v ( pg2paddr ( page ) ) ;
spin_init ( & mgmt_node - > lock ) ;
mgmt_node - > free_count = kmem_cache_node_caches . slabs_per_node ;
mgmt_node - > cache = & kmem_cache_node_caches ;
mgmt_node - > page = page ;
}
}
SLAB_ASSERT ( _M_ZONE_INDEX ( flags ) < ARRAY_SIZE ( slab_zone_pools ) ) ;
struct slab_pool * pool = slab_zone_pools [ _M_ZONE_INDEX ( flags ) ] ;
while ( pool - > entry_size ! = 0 ) {
if ( pool - > entry_size > = size )
break ;
pool + + ;
struct kmem_cache_node * new_node = pop_freelist_and_insert ( & kmem_cache_node_caches ,
mgmt_node ) ;
return new_node ;
}
/* call with interrupts disabled */
static inline struct kmem_cache_node * node_create ( struct kmem_cache * cache , enum mflags flags ,
register_t cpuflags )
{
struct kmem_cache_node * node = node_alloc ( ) ;
if ( node ) {
intr_restore ( cpuflags ) ;
vm_page_t page = page_alloc ( cache - > page_order , flags | M_ZERO ) ;
if ( page ) {
pga_set_slab ( page , true ) ;
page - > slab = node ;
node - > freelist = freelist_init ( page , cache ) ;
spin_init ( & node - > lock ) ;
node - > free_count = cache - > slabs_per_node ;
node - > cache = cache ;
node - > page = page ;
} else {
kfree ( node ) ;
node = nil ;
}
intr_disable ( ) ;
}
if ( pool - > entry_size = = 0 ) {
slab_debug ( " Refusing to allocate %zu bytes in zone %d (limit is %u) \n " ,
size , _M_ZONE_INDEX ( flags ) , pool [ - 1 ] . entry_size ) ;
return nil ;
return node ;
}
void * kmem_cache_alloc ( kmem_cache_t cache , enum mflags flags )
{
SLAB_DEBUG_BLOCK {
if ( ! ( flags & _M_NOWAIT ) & & in_irq ( ) ) {
slab_debug ( " kmem_cache_alloc() called from irq %p w/o M_NOWAIT \n " ,
ktrace_return_addr ( ) ) ;
flags | = _M_NOWAIT ;
}
}
slab_debug_noisy ( " alloc %zu bytes from zone %d, pool size %u \n " ,
size , _M_ZONE_INDEX ( flags ) , pool - > entry_size ) ;
SLAB_ASSERT ( _M_ZONE_INDEX ( flags ) < ARRAY_SIZE ( slab_zone_pools ) ) ;
slab_debug_noisy ( " alloc %zu bytes from zone %d, cache %s \n " ,
size , _M_ZONE_INDEX ( flags ) , cache - > name ) ;
/*
* Before locking a slab , we always remove it from its pool .
* Before locking a node , we always remove it from its cache pool .
* This is far from optimal , because if multiple CPUs allocate from the
* same pool at the same time , we could end up creating several slabs
* with one used entry each ( not to mention the overhead of the mostly
@ -178,62 +378,63 @@ void *kmalloc(usize size, enum mflags flags)
* it can ' t possibly be used for allocations anymore .
* This is probably not worth the overhead , though .
*/
vm_page_t page = INVALID_PAGE ;
struct kmem_cache_node * node = nil ;
/* try to use a slab that is already partially used first */
register_t cpuflags = intr_disable ( ) ;
spin_lock ( & pool - > partial_lock ) ;
if ( ! clist_is_empty ( & pool - > partial_list ) ) {
atom_dec ( & pool - > partial_count ) ;
page = clist_del_first_entry ( & pool - > partial_list , typeof ( * page ) , link ) ;
}
spin_unlock ( & pool - > partial_lock ) ;
if ( ! page ) {
/* no partially used slab available, see if we have a completely free one */
spin_lock ( & pool - > empty_lock ) ;
if ( ! clist_is_empty ( & pool - > empty_list ) ) {
atom_dec ( & pool - > empty_count ) ;
page = clist_del_first_entry ( & pool - > empty_list , typeof ( * page ) , link ) ;
}
spin_unlock ( & pool - > empty_lock ) ;
if ( ! page ) {
/* we're completely out of usable slabs, allocate a new one */
intr_restore ( cpuflags ) ;
page = slab_create ( pool , flags ) ;
if ( ! page ) {
node = pool_del_first_node ( & cache - > partial ) ;
if ( ! node ) {
/* no partially used node available, see if we have a completely free one */
node = pool_del_first_node ( & cache - > empty ) ;
if ( ! node ) {
/* we're completely out of usable nodes, allocate a new one */
node = node_create ( cache , flags , cpuflags ) ;
if ( ! node ) {
slab_debug ( " kernel OOM \n " ) ;
return nil ;
}
intr_disable ( ) ;
}
}
/* if we've made it to here, we have a slab and interrupts are disabled */
page_lock ( page ) ;
void * ret = page - > slab . freelist ;
SLAB ( page ) - > freelist = * SLAB ( page ) - > freelist ;
if ( - - page - > slab . free_count = = 0 ) {
spin_lock ( & pool - > full_lock ) ;
clist_add ( & pool - > full_list , & page - > link ) ;
spin_unlock ( & pool - > full_lock ) ;
atom_inc ( & pool - > full_count ) ;
} else {
spin_lock ( & pool - > partial_lock ) ;
clist_add ( & pool - > partial_list , & page - > link ) ;
spin_unlock ( & pool - > partial_lock ) ;
atom_inc ( & pool - > partial_count ) ;
}
page_unlock ( page ) ;
/* if we've made it to here, we have a cache node and interrupts are disabled */
void * ret = pop_freelist_and_insert ( cache , node ) ;
intr_restore ( cpuflags ) ;
atom_inc ( & pool - > total_used ) ;
return ret ;
}
void * kmalloc ( usize size , enum mflags flags )
{
if ( size = = 0 )
return nil ;
# if CFG_POISON_SLABS
struct slab_poison * poison = ret ;
poison_after_alloc ( poison , size - sizeof ( * poison ) , ktrace_return_addr ( ) ) ;
ret = poison - > data ;
size + = sizeof ( struct slab_poison ) ;
# endif
SLAB_ASSERT ( _M_ZONE_INDEX ( flags ) < ARRAY_SIZE ( slab_zone_pools ) ) ;
struct kmem_cache * cache = kmem_cache_zones [ _M_ZONE_INDEX ( flags ) ] ;
while ( cache - > object_size ! = 0 ) {
if ( cache - > object_size > = size )
break ;
cache + + ;
}
if ( cache - > object_size = = 0 ) {
slab_debug ( " Refusing to allocate %zu bytes in zone %d (limit is %u) \n " ,
size , _M_ZONE_INDEX ( flags ) , cache [ - 1 ] . object_size ) ;
return nil ;
}
void * ret = kmem_cache_alloc ( cache , flags ) ;
# if CFG_POISON_SLABS
if ( ret ) {
struct slab_poison * poison = ret ;
poison_on_alloc ( poison , size - sizeof ( * poison ) , ktrace_return_addr ( ) ) ;
ret = poison - > data ;
}
# endif
return ret ;
}
@ -247,64 +448,39 @@ void kfree(void *ptr)
vm_page_t page = vaddr2pg ( ptr ) ;
SLAB_ASSERT ( pga_slab ( page ) ) ;
struct slab_pool * pool = SLAB ( page ) - > pool ;
struct kmem_cache_node * node = page_slab ( page ) ;
struct kmem_cache * cache = node - > cache ;
# if CFG_POISON_SLABS
struct slab_poison * poison = container_of ( ptr , typeof ( * poison ) , data ) ;
poison_after_free ( poison ) ;
ptr = poison ;
if ( cache - > flags & SLAB_POISON ) {
struct slab_poison * poison = container_of ( ptr , typeof ( * poison ) , data ) ;
poison_on_free ( poison ) ;
ptr = poison ;
}
# endif
register_t cpuflags = intr_disable ( ) ;
page_lock ( page ) ;
* ( void * * ) ptr = SLAB ( page ) - > freelist ;
spin_lock ( & node - > lock ) ;
* ( void * * ) ptr = node - > freelist ;
SLAB ( page ) - > freelist = ( void * * ) ptr ;
if ( + + SLAB ( page ) - > free_count = = pool - > entries_per_slab ) {
spin_lock ( & pool - > partial_lock ) ;
clist_del ( & page - > link ) ;
spin_unlock ( & pool - > partial_lock ) ;
atom_dec ( & pool - > partial_count ) ;
spin_lock ( & pool - > empty_lock ) ;
clist_add ( & pool - > empty_list , & page - > link ) ;
spin_unlock ( & pool - > empty_lock ) ;
atom_inc ( & pool - > empty_count ) ;
}
page_unlock ( page ) ;
atom_dec ( & pool - > total_used ) ;
intr_restore ( cpuflags ) ;
}
u_int free_count = + + node - > free_count ;
spin_unlock ( & node - > lock ) ;
static vm_page_t slab_create ( struct slab_pool * pool , enum mflags flags )
{
slab_debug_noisy ( " Creating new cache for entry_size %u \n " , pool - > entry_size ) ;
vm_page_t page = page_alloc ( pool - > page_order , flags ) ;
if ( page ) {
pga_set_slab ( page , true ) ;
SLAB ( page ) - > pool = pool ;
SLAB ( page ) - > free_count = pool - > entries_per_slab ;
void * prev = nil ;
/* XXX this should not rely on a direct map */
void * start = pfn2vaddr ( pg2pfn ( page ) ) ;
void * end = start + ( 1 < < ( pool - > page_order + PAGE_SHIFT ) ) ;
void * pos = end ;
do {
pos - = pool - > entry_size ;
* ( void * * ) pos = prev ;
prev = pos ;
} while ( pos > start ) ;
SLAB ( page ) - > freelist = pos ;
if ( free_count = = cache - > slabs_per_node ) {
pool_del_node ( & cache - > partial , node ) ;
pool_add_node ( & cache - > empty , node ) ;
}
return page ;
latom_dec ( & cache - > total_used ) ;
intr_restore ( cpuflags ) ;
}
# if CFG_POISON_SLABS
static inline void poison_ after_alloc( struct slab_poison * poison , u_int exact_size ,
void * alloc_source )
static inline void poison_on_alloc ( struct slab_poison * poison , u_long exact_size ,
void * alloc_source )
{
u_ int offset = align_ceil ( poison - > exact_size , sizeof ( long ) ) / sizeof ( long ) ;
u_long offset = align_ceil ( poison - > exact_size , sizeof ( long ) ) / sizeof ( long ) ;
u_long * poison_start = & poison - > low_poison ;
/*
@ -331,9 +507,9 @@ static inline void poison_after_alloc(struct slab_poison *poison, u_int exact_si
* pos = SLAB_POISON_ALLOC ;
}
static inline void poison_ after _free( struct slab_poison * poison )
static inline void poison_ on _free( struct slab_poison * poison )
{
u_ int offset = align_ceil ( poison - > exact_size , sizeof ( long ) ) / sizeof ( long ) ;
u_ long offset = align_ceil ( poison - > exact_size , sizeof ( long ) ) / sizeof ( long ) ;
if ( poison - > low_poison ! = SLAB_POISON_ALLOC ) {
kprintf ( " Low out-of-bounds write to %p (alloc by %p) \n " ,