// SPDX-License-Identifier: GPL-2.0-only /* * Generic stack depot for storing stack traces. * * Some debugging tools need to save stack traces of certain events which can * be later presented to the user. For example, KASAN needs to safe alloc and * free stacks for each object, but storing two stack traces per object * requires too much memory (e.g. SLUB_DEBUG needs 256 bytes per object for * that). * * Instead, stack depot maintains a hashtable of unique stacktraces. Since alloc * and free stacks repeat a lot, we save about 100x space. * Stacks are never removed from depot, so we store them contiguously one after * another in a contiguous memory allocation. * * Author: Alexander Potapenko * Copyright (C) 2016 Google, Inc. * * Based on code by Dmitry Chernenkov. */ #define pr_fmt(fmt) "stackdepot: " fmt #include #include #include #include #include #include #include #include #include #include #include #include #include #include #define DEPOT_STACK_BITS (sizeof(depot_stack_handle_t) * 8) #define STACK_ALLOC_NULL_PROTECTION_BITS 1 #define STACK_ALLOC_ORDER 2 /* Pool size order for stack depot, 4 pages */ #define STACK_ALLOC_SIZE (1LL << (PAGE_SHIFT + STACK_ALLOC_ORDER)) #define STACK_ALLOC_ALIGN 4 #define STACK_ALLOC_OFFSET_BITS (STACK_ALLOC_ORDER + PAGE_SHIFT - \ STACK_ALLOC_ALIGN) #define STACK_ALLOC_INDEX_BITS (DEPOT_STACK_BITS - \ STACK_ALLOC_NULL_PROTECTION_BITS - \ STACK_ALLOC_OFFSET_BITS - STACK_DEPOT_EXTRA_BITS) #define STACK_ALLOC_POOLS_CAP 8192 #define STACK_ALLOC_MAX_POOLS \ (((1LL << (STACK_ALLOC_INDEX_BITS)) < STACK_ALLOC_POOLS_CAP) ? \ (1LL << (STACK_ALLOC_INDEX_BITS)) : STACK_ALLOC_POOLS_CAP) /* The compact structure to store the reference to stacks. */ union handle_parts { depot_stack_handle_t handle; struct { u32 pool_index : STACK_ALLOC_INDEX_BITS; u32 offset : STACK_ALLOC_OFFSET_BITS; u32 valid : STACK_ALLOC_NULL_PROTECTION_BITS; u32 extra : STACK_DEPOT_EXTRA_BITS; }; }; struct stack_record { struct stack_record *next; /* Link in the hashtable */ u32 hash; /* Hash in the hastable */ u32 size; /* Number of frames in the stack */ union handle_parts handle; unsigned long entries[]; /* Variable-sized array of entries. */ }; static bool stack_depot_disabled; static bool __stack_depot_early_init_requested __initdata = IS_ENABLED(CONFIG_STACKDEPOT_ALWAYS_INIT); static bool __stack_depot_early_init_passed __initdata; /* Use one hash table bucket per 16 KB of memory. */ #define STACK_HASH_TABLE_SCALE 14 /* Limit the number of buckets between 4K and 1M. */ #define STACK_BUCKET_NUMBER_ORDER_MIN 12 #define STACK_BUCKET_NUMBER_ORDER_MAX 20 /* Initial seed for jhash2. */ #define STACK_HASH_SEED 0x9747b28c /* Hash table of pointers to stored stack traces. */ static struct stack_record **stack_table; /* Fixed order of the number of table buckets. Used when KASAN is enabled. */ static unsigned int stack_bucket_number_order; /* Hash mask for indexing the table. */ static unsigned int stack_hash_mask; /* Array of memory regions that store stack traces. */ static void *stack_pools[STACK_ALLOC_MAX_POOLS]; /* Currently used pool in stack_pools. */ static int pool_index; /* Offset to the unused space in the currently used pool. */ static size_t pool_offset; /* Lock that protects the variables above. */ static DEFINE_RAW_SPINLOCK(pool_lock); /* Whether the next pool is initialized. */ static int next_pool_inited; static int __init disable_stack_depot(char *str) { int ret; ret = kstrtobool(str, &stack_depot_disabled); if (!ret && stack_depot_disabled) { pr_info("disabled\n"); stack_table = NULL; } return 0; } early_param("stack_depot_disable", disable_stack_depot); void __init stack_depot_request_early_init(void) { /* Too late to request early init now. */ WARN_ON(__stack_depot_early_init_passed); __stack_depot_early_init_requested = true; } /* Allocates a hash table via memblock. Can only be used during early boot. */ int __init stack_depot_early_init(void) { unsigned long entries = 0; /* This function must be called only once, from mm_init(). */ if (WARN_ON(__stack_depot_early_init_passed)) return 0; __stack_depot_early_init_passed = true; /* * If KASAN is enabled, use the maximum order: KASAN is frequently used * in fuzzing scenarios, which leads to a large number of different * stack traces being stored in stack depot. */ if (kasan_enabled() && !stack_bucket_number_order) stack_bucket_number_order = STACK_BUCKET_NUMBER_ORDER_MAX; if (!__stack_depot_early_init_requested || stack_depot_disabled) return 0; /* * If stack_bucket_number_order is not set, leave entries as 0 to rely * on the automatic calculations performed by alloc_large_system_hash. */ if (stack_bucket_number_order) entries = 1UL << stack_bucket_number_order; pr_info("allocating hash table via alloc_large_system_hash\n"); stack_table = alloc_large_system_hash("stackdepot", sizeof(struct stack_record *), entries, STACK_HASH_TABLE_SCALE, HASH_EARLY | HASH_ZERO, NULL, &stack_hash_mask, 1UL << STACK_BUCKET_NUMBER_ORDER_MIN, 1UL << STACK_BUCKET_NUMBER_ORDER_MAX); if (!stack_table) { pr_err("hash table allocation failed, disabling\n"); stack_depot_disabled = true; return -ENOMEM; } return 0; } /* Allocates a hash table via kvcalloc. Can be used after boot. */ int stack_depot_init(void) { static DEFINE_MUTEX(stack_depot_init_mutex); unsigned long entries; int ret = 0; mutex_lock(&stack_depot_init_mutex); if (stack_depot_disabled || stack_table) goto out_unlock; /* * Similarly to stack_depot_early_init, use stack_bucket_number_order * if assigned, and rely on automatic scaling otherwise. */ if (stack_bucket_number_order) { entries = 1UL << stack_bucket_number_order; } else { int scale = STACK_HASH_TABLE_SCALE; entries = nr_free_buffer_pages(); entries = roundup_pow_of_two(entries); if (scale > PAGE_SHIFT) entries >>= (scale - PAGE_SHIFT); else entries <<= (PAGE_SHIFT - scale); } if (entries < 1UL << STACK_BUCKET_NUMBER_ORDER_MIN) entries = 1UL << STACK_BUCKET_NUMBER_ORDER_MIN; if (entries > 1UL << STACK_BUCKET_NUMBER_ORDER_MAX) entries = 1UL << STACK_BUCKET_NUMBER_ORDER_MAX; pr_info("allocating hash table of %lu entries via kvcalloc\n", entries); stack_table = kvcalloc(entries, sizeof(struct stack_record *), GFP_KERNEL); if (!stack_table) { pr_err("hash table allocation failed, disabling\n"); stack_depot_disabled = true; ret = -ENOMEM; goto out_unlock; } stack_hash_mask = entries - 1; out_unlock: mutex_unlock(&stack_depot_init_mutex); return ret; } EXPORT_SYMBOL_GPL(stack_depot_init); static bool init_stack_pool(void **prealloc) { if (!*prealloc) return false; /* * This smp_load_acquire() pairs with smp_store_release() to * |next_pool_inited| below and in depot_alloc_stack(). */ if (smp_load_acquire(&next_pool_inited)) return true; if (stack_pools[pool_index] == NULL) { stack_pools[pool_index] = *prealloc; *prealloc = NULL; } else { /* If this is the last depot pool, do not touch the next one. */ if (pool_index + 1 < STACK_ALLOC_MAX_POOLS) { stack_pools[pool_index + 1] = *prealloc; *prealloc = NULL; } /* * This smp_store_release pairs with smp_load_acquire() from * |next_pool_inited| above and in stack_depot_save(). */ smp_store_release(&next_pool_inited, 1); } return true; } /* Allocation of a new stack in raw storage */ static struct stack_record * depot_alloc_stack(unsigned long *entries, int size, u32 hash, void **prealloc) { struct stack_record *stack; size_t required_size = struct_size(stack, entries, size); required_size = ALIGN(required_size, 1 << STACK_ALLOC_ALIGN); if (unlikely(pool_offset + required_size > STACK_ALLOC_SIZE)) { if (unlikely(pool_index + 1 >= STACK_ALLOC_MAX_POOLS)) { WARN_ONCE(1, "Stack depot reached limit capacity"); return NULL; } pool_index++; pool_offset = 0; /* * smp_store_release() here pairs with smp_load_acquire() from * |next_pool_inited| in stack_depot_save() and * init_stack_pool(). */ if (pool_index + 1 < STACK_ALLOC_MAX_POOLS) smp_store_release(&next_pool_inited, 0); } init_stack_pool(prealloc); if (stack_pools[pool_index] == NULL) return NULL; stack = stack_pools[pool_index] + pool_offset; stack->hash = hash; stack->size = size; stack->handle.pool_index = pool_index; stack->handle.offset = pool_offset >> STACK_ALLOC_ALIGN; stack->handle.valid = 1; stack->handle.extra = 0; memcpy(stack->entries, entries, flex_array_size(stack, entries, size)); pool_offset += required_size; return stack; } /* Calculate hash for a stack */ static inline u32 hash_stack(unsigned long *entries, unsigned int size) { return jhash2((u32 *)entries, array_size(size, sizeof(*entries)) / sizeof(u32), STACK_HASH_SEED); } /* Use our own, non-instrumented version of memcmp(). * * We actually don't care about the order, just the equality. */ static inline int stackdepot_memcmp(const unsigned long *u1, const unsigned long *u2, unsigned int n) { for ( ; n-- ; u1++, u2++) { if (*u1 != *u2) return 1; } return 0; } /* Find a stack that is equal to the one stored in entries in the hash */ static inline struct stack_record *find_stack(struct stack_record *bucket, unsigned long *entries, int size, u32 hash) { struct stack_record *found; for (found = bucket; found; found = found->next) { if (found->hash == hash && found->size == size && !stackdepot_memcmp(entries, found->entries, size)) return found; } return NULL; } /** * __stack_depot_save - Save a stack trace from an array * * @entries: Pointer to storage array * @nr_entries: Size of the storage array * @extra_bits: Flags to store in unused bits of depot_stack_handle_t * @alloc_flags: Allocation gfp flags * @can_alloc: Allocate stack pools (increased chance of failure if false) * * Saves a stack trace from @entries array of size @nr_entries. If @can_alloc is * %true, is allowed to replenish the stack pool in case no space is left * (allocates using GFP flags of @alloc_flags). If @can_alloc is %false, avoids * any allocations and will fail if no space is left to store the stack trace. * * If the stack trace in @entries is from an interrupt, only the portion up to * interrupt entry is saved. * * Additional opaque flags can be passed in @extra_bits, stored in the unused * bits of the stack handle, and retrieved using stack_depot_get_extra_bits() * without calling stack_depot_fetch(). * * Context: Any context, but setting @can_alloc to %false is required if * alloc_pages() cannot be used from the current context. Currently * this is the case from contexts where neither %GFP_ATOMIC nor * %GFP_NOWAIT can be used (NMI, raw_spin_lock). * * Return: The handle of the stack struct stored in depot, 0 on failure. */ depot_stack_handle_t __stack_depot_save(unsigned long *entries, unsigned int nr_entries, unsigned int extra_bits, gfp_t alloc_flags, bool can_alloc) { struct stack_record *found = NULL, **bucket; union handle_parts retval = { .handle = 0 }; struct page *page = NULL; void *prealloc = NULL; unsigned long flags; u32 hash; /* * If this stack trace is from an interrupt, including anything before * interrupt entry usually leads to unbounded stackdepot growth. * * Because use of filter_irq_stacks() is a requirement to ensure * stackdepot can efficiently deduplicate interrupt stacks, always * filter_irq_stacks() to simplify all callers' use of stackdepot. */ nr_entries = filter_irq_stacks(entries, nr_entries); if (unlikely(nr_entries == 0) || stack_depot_disabled) goto fast_exit; hash = hash_stack(entries, nr_entries); bucket = &stack_table[hash & stack_hash_mask]; /* * Fast path: look the stack trace up without locking. * The smp_load_acquire() here pairs with smp_store_release() to * |bucket| below. */ found = find_stack(smp_load_acquire(bucket), entries, nr_entries, hash); if (found) goto exit; /* * Check if the current or the next stack pool need to be initialized. * If so, allocate the memory - we won't be able to do that under the * lock. * * The smp_load_acquire() here pairs with smp_store_release() to * |next_pool_inited| in depot_alloc_stack() and init_stack_pool(). */ if (unlikely(can_alloc && !smp_load_acquire(&next_pool_inited))) { /* * Zero out zone modifiers, as we don't have specific zone * requirements. Keep the flags related to allocation in atomic * contexts and I/O. */ alloc_flags &= ~GFP_ZONEMASK; alloc_flags &= (GFP_ATOMIC | GFP_KERNEL); alloc_flags |= __GFP_NOWARN; page = alloc_pages(alloc_flags, STACK_ALLOC_ORDER); if (page) prealloc = page_address(page); } raw_spin_lock_irqsave(&pool_lock, flags); found = find_stack(*bucket, entries, nr_entries, hash); if (!found) { struct stack_record *new = depot_alloc_stack(entries, nr_entries, hash, &prealloc); if (new) { new->next = *bucket; /* * This smp_store_release() pairs with * smp_load_acquire() from |bucket| above. */ smp_store_release(bucket, new); found = new; } } else if (prealloc) { /* * We didn't need to store this stack trace, but let's keep * the preallocated memory for the future. */ WARN_ON(!init_stack_pool(&prealloc)); } raw_spin_unlock_irqrestore(&pool_lock, flags); exit: if (prealloc) { /* Nobody used this memory, ok to free it. */ free_pages((unsigned long)prealloc, STACK_ALLOC_ORDER); } if (found) retval.handle = found->handle.handle; fast_exit: retval.extra = extra_bits; return retval.handle; } EXPORT_SYMBOL_GPL(__stack_depot_save); /** * stack_depot_save - Save a stack trace from an array * * @entries: Pointer to storage array * @nr_entries: Size of the storage array * @alloc_flags: Allocation gfp flags * * Context: Contexts where allocations via alloc_pages() are allowed. * See __stack_depot_save() for more details. * * Return: The handle of the stack struct stored in depot, 0 on failure. */ depot_stack_handle_t stack_depot_save(unsigned long *entries, unsigned int nr_entries, gfp_t alloc_flags) { return __stack_depot_save(entries, nr_entries, 0, alloc_flags, true); } EXPORT_SYMBOL_GPL(stack_depot_save); /** * stack_depot_fetch - Fetch stack entries from a depot * * @handle: Stack depot handle which was returned from * stack_depot_save(). * @entries: Pointer to store the entries address * * Return: The number of trace entries for this depot. */ unsigned int stack_depot_fetch(depot_stack_handle_t handle, unsigned long **entries) { union handle_parts parts = { .handle = handle }; void *pool; size_t offset = parts.offset << STACK_ALLOC_ALIGN; struct stack_record *stack; *entries = NULL; if (!handle) return 0; if (parts.pool_index > pool_index) { WARN(1, "pool index %d out of bounds (%d) for stack id %08x\n", parts.pool_index, pool_index, handle); return 0; } pool = stack_pools[parts.pool_index]; if (!pool) return 0; stack = pool + offset; *entries = stack->entries; return stack->size; } EXPORT_SYMBOL_GPL(stack_depot_fetch); /** * stack_depot_print - print stack entries from a depot * * @stack: Stack depot handle which was returned from * stack_depot_save(). * */ void stack_depot_print(depot_stack_handle_t stack) { unsigned long *entries; unsigned int nr_entries; nr_entries = stack_depot_fetch(stack, &entries); if (nr_entries > 0) stack_trace_print(entries, nr_entries, 0); } EXPORT_SYMBOL_GPL(stack_depot_print); /** * stack_depot_snprint - print stack entries from a depot into a buffer * * @handle: Stack depot handle which was returned from * stack_depot_save(). * @buf: Pointer to the print buffer * * @size: Size of the print buffer * * @spaces: Number of leading spaces to print * * Return: Number of bytes printed. */ int stack_depot_snprint(depot_stack_handle_t handle, char *buf, size_t size, int spaces) { unsigned long *entries; unsigned int nr_entries; nr_entries = stack_depot_fetch(handle, &entries); return nr_entries ? stack_trace_snprint(buf, size, entries, nr_entries, spaces) : 0; } EXPORT_SYMBOL_GPL(stack_depot_snprint); unsigned int stack_depot_get_extra_bits(depot_stack_handle_t handle) { union handle_parts parts = { .handle = handle }; return parts.extra; } EXPORT_SYMBOL(stack_depot_get_extra_bits);