mm: add flags parameter to kmalloc()
This commit is contained in:
parent
c767d551d3
commit
0342739ee1
11 changed files with 45 additions and 36 deletions
|
@ -11,6 +11,13 @@
|
|||
* @{
|
||||
*/
|
||||
|
||||
enum memflags {
|
||||
MEM_KERNEL = (1 << 0),
|
||||
MEM_USER = (1 << 1),
|
||||
MEM_ATOMIC = (1 << 2),
|
||||
MEM_STACK = (1 << 3),
|
||||
};
|
||||
|
||||
/**
|
||||
* @brief Allocate `size` bytes of memory *w/out initializing it*.
|
||||
*
|
||||
|
@ -21,20 +28,7 @@
|
|||
* @return A pointer to the beginning of the memory area, or `NULL` if
|
||||
* `size` was 0 or there is not enough free memory left.
|
||||
*/
|
||||
__malloc(kfree, 1) void *kmalloc(size_t size);
|
||||
|
||||
/**
|
||||
* @brief Allocate `size` bytes of memory *w/out initializing it*.
|
||||
*
|
||||
* Unlike `kmalloc()`, this method is guaranteed not to sleep. It does this by
|
||||
* using a completely separate, smaller heap. Only use this if you already are
|
||||
* in atomic context, like when in an irq.
|
||||
*
|
||||
* @param size Amount of bytes to allocate
|
||||
* @return A pointer to the beginning of the memory area, or `NULL` if
|
||||
* `size` was 0 or there is not enough free memory left.
|
||||
*/
|
||||
__malloc(kfree, 1) void *atomic_kmalloc(size_t size);
|
||||
__malloc(kfree, 1) void *kmalloc(size_t size, enum memflags flags);
|
||||
|
||||
/**
|
||||
* @brief Free a previously allocated memory region.
|
||||
|
|
|
@ -23,7 +23,7 @@ int devices_init(void)
|
|||
if (devices_kent != NULL)
|
||||
return -EEXIST;
|
||||
|
||||
devices_kent = kmalloc(sizeof(*devices_kent));
|
||||
devices_kent = kmalloc(sizeof(*devices_kent), MEM_KERNEL);
|
||||
if (devices_kent == NULL)
|
||||
return -ENOMEM;
|
||||
|
||||
|
@ -59,7 +59,7 @@ static void device_kevent_destroy(struct kent *kent)
|
|||
|
||||
struct device_kevent *device_kevent_create(struct device *device, enum device_kevent_flags flags)
|
||||
{
|
||||
struct device_kevent *event = atomic_kmalloc(sizeof(*event));
|
||||
struct device_kevent *event = kmalloc(sizeof(*event), MEM_KERNEL | MEM_ATOMIC);
|
||||
if (event == NULL)
|
||||
return NULL;
|
||||
|
||||
|
|
|
@ -22,7 +22,7 @@ struct dmabuf *dmabuf_create(struct device *dev, size_t len)
|
|||
* allocation needs to be atomic because the buffer might be
|
||||
* free()d from within an irq handler which cannot sleep
|
||||
*/
|
||||
struct dmabuf *buf = atomic_kmalloc(sizeof(*buf) + len);
|
||||
struct dmabuf *buf = kmalloc(sizeof(*buf) + len, MEM_KERNEL | MEM_ATOMIC);
|
||||
if (buf == NULL)
|
||||
return NULL;
|
||||
|
||||
|
|
|
@ -41,7 +41,7 @@ struct file *file_create(struct device *device, enum file_type type, int *err)
|
|||
return NULL;
|
||||
}
|
||||
|
||||
f = kmalloc(sizeof(*f));
|
||||
f = kmalloc(sizeof(*f), MEM_KERNEL);
|
||||
if (f == NULL) {
|
||||
*err = -ENOMEM;
|
||||
mutex_unlock(&fdtab_lock);
|
||||
|
@ -114,7 +114,7 @@ static int iowait_device(struct file *file, enum device_kevent_flags flags)
|
|||
kent_get(¤t->kent);
|
||||
|
||||
/* this must be atomic because event listeners can't sleep but need to call free() */
|
||||
struct io_device_kevent_extra *extra = atomic_kmalloc(sizeof(*extra));
|
||||
struct io_device_kevent_extra *extra = kmalloc(sizeof(*extra), MEM_KERNEL | MEM_ATOMIC);
|
||||
if (extra == NULL)
|
||||
return -ENOMEM;
|
||||
|
||||
|
@ -206,7 +206,7 @@ static void file_kevent_destroy(struct kent *kent)
|
|||
|
||||
struct file_kevent *file_kevent_create(struct file *f, enum file_kevent_flags flags)
|
||||
{
|
||||
struct file_kevent *event = atomic_kmalloc(sizeof(*event));
|
||||
struct file_kevent *event = kmalloc(sizeof(*event), MEM_KERNEL | MEM_ATOMIC);
|
||||
if (event == NULL)
|
||||
return NULL;
|
||||
|
||||
|
|
|
@ -18,7 +18,7 @@ long sys_read(int fd, __user void *buf, size_t len)
|
|||
if (f == NULL)
|
||||
return -EBADF;
|
||||
|
||||
copy = kmalloc(len);
|
||||
copy = kmalloc(len, MEM_KERNEL);
|
||||
if (copy == NULL)
|
||||
return -ENOMEM;
|
||||
|
||||
|
|
|
@ -18,7 +18,7 @@ long sys_write(int fd, __user const void *buf, size_t len)
|
|||
if (f == NULL)
|
||||
return -EBADF;
|
||||
|
||||
copy = kmalloc(len);
|
||||
copy = kmalloc(len, MEM_KERNEL);
|
||||
if (copy == NULL) {
|
||||
file_put(f);
|
||||
return -ENOMEM;
|
||||
|
|
|
@ -137,7 +137,7 @@ struct kevent_listener *kevent_listener_add(enum kevent_kind kind,
|
|||
int (*cb)(struct kevent *, void *),
|
||||
void *extra)
|
||||
{
|
||||
struct kevent_listener *listener = kmalloc(sizeof(*listener));
|
||||
struct kevent_listener *listener = kmalloc(sizeof(*listener), MEM_KERNEL);
|
||||
|
||||
if (listener != NULL) {
|
||||
listener->cb = cb;
|
||||
|
|
27
kernel/mm.c
27
kernel/mm.c
|
@ -166,7 +166,7 @@ static struct memblk *blk_slice(struct list_head *heap, struct memblk *bottom, s
|
|||
|
||||
long sys_malloc(size_t size)
|
||||
{
|
||||
void *ptr = kmalloc(size);
|
||||
void *ptr = kmalloc(size, MEM_USER);
|
||||
return *(long *)&ptr;
|
||||
}
|
||||
|
||||
|
@ -202,11 +202,29 @@ void kmalloc_init(void *heap, size_t size)
|
|||
atomic_heap_free = blk_get_size(atomic_block);
|
||||
}
|
||||
|
||||
void *kmalloc(size_t size)
|
||||
static void *atomic_kmalloc(size_t);
|
||||
|
||||
/*
|
||||
* this is still the old algorithm and all flags except atomic are ignored,
|
||||
* so that at least the code still compiles to do some testing
|
||||
*/
|
||||
void *kmalloc(size_t size, enum memflags flags)
|
||||
{
|
||||
# ifdef DEBUG
|
||||
if ((flags & MEM_KERNEL) && (flags & MEM_USER))
|
||||
__breakpoint;
|
||||
if ((flags & (MEM_USER | MEM_KERNEL)) == 0)
|
||||
__breakpoint;
|
||||
if ((flags & MEM_USER) && (flags & MEM_ATOMIC))
|
||||
__breakpoint;
|
||||
# endif
|
||||
|
||||
if (size == 0)
|
||||
return NULL; /* as per POSIX */
|
||||
|
||||
if (flags & MEM_ATOMIC)
|
||||
return atomic_kmalloc(size);
|
||||
|
||||
if (size > generic_heap_free)
|
||||
return NULL;
|
||||
|
||||
|
@ -241,11 +259,8 @@ void *kmalloc(size_t size)
|
|||
return ptr;
|
||||
}
|
||||
|
||||
void *atomic_kmalloc(size_t size)
|
||||
static void *atomic_kmalloc(size_t size)
|
||||
{
|
||||
if (size == 0)
|
||||
return NULL;
|
||||
|
||||
if (size > atomic_heap_free)
|
||||
return NULL;
|
||||
|
||||
|
|
|
@ -10,7 +10,7 @@
|
|||
|
||||
struct ringbuf *ringbuf_create(size_t size)
|
||||
{
|
||||
struct ringbuf *buf = kmalloc(sizeof(*buf) + size);
|
||||
struct ringbuf *buf = kmalloc(sizeof(*buf) + size, MEM_KERNEL);
|
||||
if (buf == NULL)
|
||||
return NULL;
|
||||
|
||||
|
|
|
@ -96,7 +96,7 @@ int sched_init(void)
|
|||
if (err != 0)
|
||||
goto out;
|
||||
|
||||
idle_task.stack = kmalloc(CONFIG_STACK_SIZE);
|
||||
idle_task.stack = kmalloc(CONFIG_STACK_SIZE, MEM_USER | MEM_STACK);
|
||||
if (idle_task.stack == NULL)
|
||||
goto out;
|
||||
idle_task.bottom = idle_task.stack + CONFIG_STACK_SIZE;
|
||||
|
@ -222,14 +222,14 @@ long sys_exec(int (*entry)(void))
|
|||
goto out;
|
||||
}
|
||||
|
||||
child = kmalloc(sizeof(*child));
|
||||
child = kmalloc(sizeof(*child), MEM_KERNEL);
|
||||
if (child == NULL) {
|
||||
pid = -ENOMEM;
|
||||
goto out;
|
||||
}
|
||||
|
||||
child->pid = pid;
|
||||
child->stack = kmalloc(CONFIG_STACK_SIZE);
|
||||
child->stack = kmalloc(CONFIG_STACK_SIZE, MEM_USER | MEM_STACK);
|
||||
if (child->stack == NULL) {
|
||||
pid = -ENOMEM;
|
||||
goto err_stack_malloc;
|
||||
|
|
|
@ -26,7 +26,7 @@ static void task_kevent_destroy(struct kent *kent)
|
|||
|
||||
void task_kevent_create_and_dispatch(struct task *task, int status)
|
||||
{
|
||||
struct task_kevent *event = kmalloc(sizeof(*event));
|
||||
struct task_kevent *event = kmalloc(sizeof(*event), MEM_KERNEL);
|
||||
if (event == NULL)
|
||||
return; /* TODO: we're fucked here */
|
||||
|
||||
|
@ -54,12 +54,12 @@ __noreturn void sys_exit(int status)
|
|||
|
||||
if (parent->state != TASK_WAITPID) {
|
||||
/*
|
||||
* atomic_kmalloc wouldn't actually be needed here, but we use
|
||||
* the atomic flag wouldn't actually be needed here, but we use
|
||||
* it anyway because it has a separate heap which is more likely
|
||||
* to have an emergency reserve of memory. A failing allocation
|
||||
* would *really* be inconvenient here.
|
||||
*/
|
||||
struct dead_child *entry = atomic_kmalloc(sizeof(*entry));
|
||||
struct dead_child *entry = kmalloc(sizeof(*entry), MEM_KERNEL | MEM_ATOMIC);
|
||||
if (entry == NULL) {
|
||||
schedule(); /* TODO: we're severely fucked here */
|
||||
}
|
||||
|
|
Loading…
Reference in a new issue