summaryrefslogtreecommitdiffstats
path: root/lib
diff options
context:
space:
mode:
authorAndrey Konovalov <andreyknvl@google.com>2023-02-10 22:16:05 +0100
committerAndrew Morton <akpm@linux-foundation.org>2023-02-17 05:43:52 +0100
commitb232b9995a6dbaafe19d07d81acc039bc84bd569 (patch)
treee047fb07167413d14509a7444e46fbd5f20f5342 /lib
parentlib/stackdepot: annotate racy pool_index accesses (diff)
downloadlinux-b232b9995a6dbaafe19d07d81acc039bc84bd569.tar.xz
linux-b232b9995a6dbaafe19d07d81acc039bc84bd569.zip
lib/stackdepot: various comments clean-ups
Clean up comments in include/linux/stackdepot.h and lib/stackdepot.c: 1. Rework the initialization comment in stackdepot.h. 2. Rework the header comment in stackdepot.c. 3. Various clean-ups for other comments. Also adjust whitespaces for find_stack and depot_alloc_stack call sites. No functional changes. Link: https://lkml.kernel.org/r/5836231b7954355e2311fc9b5870f697ea8e1f7d.1676063693.git.andreyknvl@google.com Signed-off-by: Andrey Konovalov <andreyknvl@google.com> Reviewed-by: Alexander Potapenko <glider@google.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Diffstat (limited to 'lib')
-rw-r--r--lib/stackdepot.c120
1 files changed, 59 insertions, 61 deletions
diff --git a/lib/stackdepot.c b/lib/stackdepot.c
index b625090890e1..aaa5d2f1abc2 100644
--- a/lib/stackdepot.c
+++ b/lib/stackdepot.c
@@ -1,22 +1,26 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
- * Generic stack depot for storing stack traces.
+ * Stack depot - a stack trace storage that avoids duplication.
*
- * Some debugging tools need to save stack traces of certain events which can
- * be later presented to the user. For example, KASAN needs to safe alloc and
- * free stacks for each object, but storing two stack traces per object
- * requires too much memory (e.g. SLUB_DEBUG needs 256 bytes per object for
- * that).
+ * Stack depot is intended to be used by subsystems that need to store and
+ * later retrieve many potentially duplicated stack traces without wasting
+ * memory.
*
- * Instead, stack depot maintains a hashtable of unique stacktraces. Since alloc
- * and free stacks repeat a lot, we save about 100x space.
- * Stacks are never removed from depot, so we store them contiguously one after
- * another in a contiguous memory allocation.
+ * For example, KASAN needs to save allocation and free stack traces for each
+ * object. Storing two stack traces per object requires a lot of memory (e.g.
+ * SLUB_DEBUG needs 256 bytes per object for that). Since allocation and free
+ * stack traces often repeat, using stack depot allows to save about 100x space.
+ *
+ * Internally, stack depot maintains a hash table of unique stacktraces. The
+ * stack traces themselves are stored contiguously one after another in a set
+ * of separate page allocations.
+ *
+ * Stack traces are never removed from stack depot.
*
* Author: Alexander Potapenko <glider@google.com>
* Copyright (C) 2016 Google, Inc.
*
- * Based on code by Dmitry Chernenkov.
+ * Based on the code by Dmitry Chernenkov.
*/
#define pr_fmt(fmt) "stackdepot: " fmt
@@ -50,7 +54,7 @@
(((1LL << (DEPOT_POOL_INDEX_BITS)) < DEPOT_POOLS_CAP) ? \
(1LL << (DEPOT_POOL_INDEX_BITS)) : DEPOT_POOLS_CAP)
-/* The compact structure to store the reference to stacks. */
+/* Compact structure that stores a reference to a stack. */
union handle_parts {
depot_stack_handle_t handle;
struct {
@@ -62,11 +66,11 @@ union handle_parts {
};
struct stack_record {
- struct stack_record *next; /* Link in the hashtable */
- u32 hash; /* Hash in the hastable */
- u32 size; /* Number of frames in the stack */
+ struct stack_record *next; /* Link in the hash table */
+ u32 hash; /* Hash in the hash table */
+ u32 size; /* Number of stored frames */
union handle_parts handle;
- unsigned long entries[]; /* Variable-sized array of entries. */
+ unsigned long entries[]; /* Variable-sized array of frames */
};
static bool stack_depot_disabled;
@@ -317,7 +321,7 @@ depot_alloc_stack(unsigned long *entries, int size, u32 hash, void **prealloc)
return stack;
}
-/* Calculate hash for a stack */
+/* Calculates the hash for a stack. */
static inline u32 hash_stack(unsigned long *entries, unsigned int size)
{
return jhash2((u32 *)entries,
@@ -325,9 +329,9 @@ static inline u32 hash_stack(unsigned long *entries, unsigned int size)
STACK_HASH_SEED);
}
-/* Use our own, non-instrumented version of memcmp().
- *
- * We actually don't care about the order, just the equality.
+/*
+ * Non-instrumented version of memcmp().
+ * Does not check the lexicographical order, only the equality.
*/
static inline
int stackdepot_memcmp(const unsigned long *u1, const unsigned long *u2,
@@ -340,7 +344,7 @@ int stackdepot_memcmp(const unsigned long *u1, const unsigned long *u2,
return 0;
}
-/* Find a stack that is equal to the one stored in entries in the hash */
+/* Finds a stack in a bucket of the hash table. */
static inline struct stack_record *find_stack(struct stack_record *bucket,
unsigned long *entries, int size,
u32 hash)
@@ -357,27 +361,27 @@ static inline struct stack_record *find_stack(struct stack_record *bucket,
}
/**
- * __stack_depot_save - Save a stack trace from an array
+ * __stack_depot_save - Save a stack trace to stack depot
*
- * @entries: Pointer to storage array
- * @nr_entries: Size of the storage array
- * @alloc_flags: Allocation gfp flags
+ * @entries: Pointer to the stack trace
+ * @nr_entries: Number of frames in the stack
+ * @alloc_flags: Allocation GFP flags
* @can_alloc: Allocate stack pools (increased chance of failure if false)
*
* Saves a stack trace from @entries array of size @nr_entries. If @can_alloc is
- * %true, is allowed to replenish the stack pool in case no space is left
+ * %true, stack depot can replenish the stack pools in case no space is left
* (allocates using GFP flags of @alloc_flags). If @can_alloc is %false, avoids
- * any allocations and will fail if no space is left to store the stack trace.
+ * any allocations and fails if no space is left to store the stack trace.
*
- * If the stack trace in @entries is from an interrupt, only the portion up to
- * interrupt entry is saved.
+ * If the provided stack trace comes from the interrupt context, only the part
+ * up to the interrupt entry is saved.
*
* Context: Any context, but setting @can_alloc to %false is required if
* alloc_pages() cannot be used from the current context. Currently
- * this is the case from contexts where neither %GFP_ATOMIC nor
+ * this is the case for contexts where neither %GFP_ATOMIC nor
* %GFP_NOWAIT can be used (NMI, raw_spin_lock).
*
- * Return: The handle of the stack struct stored in depot, 0 on failure.
+ * Return: Handle of the stack struct stored in depot, 0 on failure
*/
depot_stack_handle_t __stack_depot_save(unsigned long *entries,
unsigned int nr_entries,
@@ -392,11 +396,11 @@ depot_stack_handle_t __stack_depot_save(unsigned long *entries,
/*
* If this stack trace is from an interrupt, including anything before
- * interrupt entry usually leads to unbounded stackdepot growth.
+ * interrupt entry usually leads to unbounded stack depot growth.
*
- * Because use of filter_irq_stacks() is a requirement to ensure
- * stackdepot can efficiently deduplicate interrupt stacks, always
- * filter_irq_stacks() to simplify all callers' use of stackdepot.
+ * Since use of filter_irq_stacks() is a requirement to ensure stack
+ * depot can efficiently deduplicate interrupt stacks, always
+ * filter_irq_stacks() to simplify all callers' use of stack depot.
*/
nr_entries = filter_irq_stacks(entries, nr_entries);
@@ -411,8 +415,7 @@ depot_stack_handle_t __stack_depot_save(unsigned long *entries,
* The smp_load_acquire() here pairs with smp_store_release() to
* |bucket| below.
*/
- found = find_stack(smp_load_acquire(bucket), entries,
- nr_entries, hash);
+ found = find_stack(smp_load_acquire(bucket), entries, nr_entries, hash);
if (found)
goto exit;
@@ -441,7 +444,8 @@ depot_stack_handle_t __stack_depot_save(unsigned long *entries,
found = find_stack(*bucket, entries, nr_entries, hash);
if (!found) {
- struct stack_record *new = depot_alloc_stack(entries, nr_entries, hash, &prealloc);
+ struct stack_record *new =
+ depot_alloc_stack(entries, nr_entries, hash, &prealloc);
if (new) {
new->next = *bucket;
@@ -454,8 +458,8 @@ depot_stack_handle_t __stack_depot_save(unsigned long *entries,
}
} else if (prealloc) {
/*
- * We didn't need to store this stack trace, but let's keep
- * the preallocated memory for the future.
+ * Stack depot already contains this stack trace, but let's
+ * keep the preallocated memory for the future.
*/
depot_init_pool(&prealloc);
}
@@ -463,7 +467,7 @@ depot_stack_handle_t __stack_depot_save(unsigned long *entries,
raw_spin_unlock_irqrestore(&pool_lock, flags);
exit:
if (prealloc) {
- /* Nobody used this memory, ok to free it. */
+ /* Stack depot didn't use this memory, free it. */
free_pages((unsigned long)prealloc, DEPOT_POOL_ORDER);
}
if (found)
@@ -474,16 +478,16 @@ fast_exit:
EXPORT_SYMBOL_GPL(__stack_depot_save);
/**
- * stack_depot_save - Save a stack trace from an array
+ * stack_depot_save - Save a stack trace to stack depot
*
- * @entries: Pointer to storage array
- * @nr_entries: Size of the storage array
- * @alloc_flags: Allocation gfp flags
+ * @entries: Pointer to the stack trace
+ * @nr_entries: Number of frames in the stack
+ * @alloc_flags: Allocation GFP flags
*
* Context: Contexts where allocations via alloc_pages() are allowed.
* See __stack_depot_save() for more details.
*
- * Return: The handle of the stack struct stored in depot, 0 on failure.
+ * Return: Handle of the stack trace stored in depot, 0 on failure
*/
depot_stack_handle_t stack_depot_save(unsigned long *entries,
unsigned int nr_entries,
@@ -494,13 +498,12 @@ depot_stack_handle_t stack_depot_save(unsigned long *entries,
EXPORT_SYMBOL_GPL(stack_depot_save);
/**
- * stack_depot_fetch - Fetch stack entries from a depot
+ * stack_depot_fetch - Fetch a stack trace from stack depot
*
- * @handle: Stack depot handle which was returned from
- * stack_depot_save().
- * @entries: Pointer to store the entries address
+ * @handle: Stack depot handle returned from stack_depot_save()
+ * @entries: Pointer to store the address of the stack trace
*
- * Return: The number of trace entries for this depot.
+ * Return: Number of frames for the fetched stack
*/
unsigned int stack_depot_fetch(depot_stack_handle_t handle,
unsigned long **entries)
@@ -535,11 +538,9 @@ unsigned int stack_depot_fetch(depot_stack_handle_t handle,
EXPORT_SYMBOL_GPL(stack_depot_fetch);
/**
- * stack_depot_print - print stack entries from a depot
- *
- * @stack: Stack depot handle which was returned from
- * stack_depot_save().
+ * stack_depot_print - Print a stack trace from stack depot
*
+ * @stack: Stack depot handle returned from stack_depot_save()
*/
void stack_depot_print(depot_stack_handle_t stack)
{
@@ -553,17 +554,14 @@ void stack_depot_print(depot_stack_handle_t stack)
EXPORT_SYMBOL_GPL(stack_depot_print);
/**
- * stack_depot_snprint - print stack entries from a depot into a buffer
+ * stack_depot_snprint - Print a stack trace from stack depot into a buffer
*
- * @handle: Stack depot handle which was returned from
- * stack_depot_save().
+ * @handle: Stack depot handle returned from stack_depot_save()
* @buf: Pointer to the print buffer
- *
* @size: Size of the print buffer
- *
* @spaces: Number of leading spaces to print
*
- * Return: Number of bytes printed.
+ * Return: Number of bytes printed
*/
int stack_depot_snprint(depot_stack_handle_t handle, char *buf, size_t size,
int spaces)