/* Copyright (C) 2021,2022 fef . All rights reserved. */ #pragma once #include #include /* * If possible, arch should define this value as an unmappable base address. * For example, on the amd64, this is set to 0xdead000000000000 because the * highest 17 bits of linear addresses must always be identical there. * As a result, it will *always* page fault when dereferenced, no matter * the current state of the page maps. */ #ifndef POISON_BASE #define POISON_BASE 0ul #endif /* * You should only use values that `sus_nil()` finds sus (i.e. values < 0x1000), * and whose LSB is zero. The latter is a little safety measure because most * architectures use bit 0 of the page maps as a present flag. So, even if the * impossible happens and this *somehow* ends up in a page map, it is marked as * non-present and therefore page faults rather than exposing anything. */ #define PAGE_POISON_ALLOC (POISON_BASE + 0x00000010ul) #define PAGE_POISON_FREE (POISON_BASE + 0x00000020ul) #if LONG_BIT == 32 #define SLAB_POISON_ALLOC 0x61616160ul #define SLAB_POISON_FREE 0x41414140ul #elif LONG_BIT == 64 #define SLAB_POISON_ALLOC 0x6161616161616161ul #define SLAB_POISON_FREE 0x4141414141414141ul #elif LONG_BIT == 128 #define SLAB_POISON_ALLOC 0x61616161616161616161616161616160ul #define SLAB_POISON_FREE 0x41414141414141414141414141414140ul #else #error "Unsupported long size" #endif #define CLIST_POISON_PREV (POISON_BASE + 0x000000c4ul) #define CLIST_POISON_NEXT (POISON_BASE + 0x000000c8ul)