/* Hash mask for indexing the table. */
static unsigned int stack_hash_mask;
+/* The lock must be held when performing pool or freelist modifications. */
+static DEFINE_RAW_SPINLOCK(pool_lock);
/* Array of memory regions that store stack records. */
-static void **stack_pools;
+static void **stack_pools __pt_guarded_by(&pool_lock);
/* Newly allocated pool that is not yet added to stack_pools. */
static void *new_pool;
/* Number of pools in stack_pools. */
static int pools_num;
/* Offset to the unused space in the currently used pool. */
-static size_t pool_offset = DEPOT_POOL_SIZE;
+static size_t pool_offset __guarded_by(&pool_lock) = DEPOT_POOL_SIZE;
/* Freelist of stack records within stack_pools. */
-static LIST_HEAD(free_stacks);
-/* The lock must be held when performing pool or freelist modifications. */
-static DEFINE_RAW_SPINLOCK(pool_lock);
+static __guarded_by(&pool_lock) LIST_HEAD(free_stacks);
/* Statistics counters for debugfs. */
enum depot_counter_id {
* Initializes new stack pool, and updates the list of pools.
*/
static bool depot_init_pool(void **prealloc)
+ __must_hold(&pool_lock)
{
lockdep_assert_held(&pool_lock);
/* Keeps the preallocated memory to be used for a new stack depot pool. */
static void depot_keep_new_pool(void **prealloc)
+ __must_hold(&pool_lock)
{
lockdep_assert_held(&pool_lock);
* the current pre-allocation.
*/
static struct stack_record *depot_pop_free_pool(void **prealloc, size_t size)
+ __must_hold(&pool_lock)
{
struct stack_record *stack;
void *current_pool;
/* Try to find next free usable entry from the freelist. */
static struct stack_record *depot_pop_free(void)
+ __must_hold(&pool_lock)
{
struct stack_record *stack;
/* Allocates a new stack in a stack depot pool. */
static struct stack_record *
depot_alloc_stack(unsigned long *entries, unsigned int nr_entries, u32 hash, depot_flags_t flags, void **prealloc)
+ __must_hold(&pool_lock)
{
struct stack_record *stack = NULL;
size_t record_size;
}
static struct stack_record *depot_fetch_stack(depot_stack_handle_t handle)
+ __must_not_hold(&pool_lock)
{
const int pools_num_cached = READ_ONCE(pools_num);
union handle_parts parts = { .handle = handle };
return NULL;
}
- pool = stack_pools[pool_index];
+ /* @pool_index either valid, or user passed in corrupted value. */
+ pool = context_unsafe(stack_pools[pool_index]);
if (WARN_ON(!pool))
return NULL;
/* Links stack into the freelist. */
static void depot_free_stack(struct stack_record *stack)
+ __must_not_hold(&pool_lock)
{
unsigned long flags;