summaryrefslogtreecommitdiffstats
path: root/lib/stackdepot.c
diff options
context:
space:
mode:
authorAndrey Konovalov <andreyknvl@google.com>2023-02-10 22:15:49 +0100
committerAndrew Morton <akpm@linux-foundation.org>2023-02-17 05:43:48 +0100
commit15ef6a982f40a2b53b057dad24f00c3fb43e7e70 (patch)
tree326dd55f846bdf21a8637a2264ff71d849e4dc6b /lib/stackdepot.c
parentmm: fix typo in __vm_enough_memory warning (diff)
downloadlinux-15ef6a982f40a2b53b057dad24f00c3fb43e7e70.tar.xz
linux-15ef6a982f40a2b53b057dad24f00c3fb43e7e70.zip
lib/stackdepot: put functions in logical order
Patch series "lib/stackdepot: fixes and clean-ups", v2. A set of fixes, comments, and clean-ups I came up with while reading the stack depot code. This patch (of 18): Put stack depot functions' declarations and definitions in a more logical order: 1. Functions that save stack traces into stack depot. 2. Functions that fetch and print stack traces. 3. stack_depot_get_extra_bits that operates on stack depot handles and does not interact with the stack depot storage. No functional changes. Link: https://lkml.kernel.org/r/cover.1676063693.git.andreyknvl@google.com Link: https://lkml.kernel.org/r/daca1319b665d826b94c596b992a8d8117846147.1676063693.git.andreyknvl@google.com Signed-off-by: Andrey Konovalov <andreyknvl@google.com> Reviewed-by: Alexander Potapenko <glider@google.com> Cc: Evgenii Stepanov <eugenis@google.com> Cc: Marco Elver <elver@google.com> Cc: Vlastimil Babka <vbabka@suse.cz> Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Diffstat (limited to '')
-rw-r--r--lib/stackdepot.c314
1 files changed, 157 insertions, 157 deletions
diff --git a/lib/stackdepot.c b/lib/stackdepot.c
index 79e894cf8406..4bfaf3bce619 100644
--- a/lib/stackdepot.c
+++ b/lib/stackdepot.c
@@ -79,84 +79,6 @@ static int next_slab_inited;
static size_t depot_offset;
static DEFINE_RAW_SPINLOCK(depot_lock);
-unsigned int stack_depot_get_extra_bits(depot_stack_handle_t handle)
-{
- union handle_parts parts = { .handle = handle };
-
- return parts.extra;
-}
-EXPORT_SYMBOL(stack_depot_get_extra_bits);
-
-static bool init_stack_slab(void **prealloc)
-{
- if (!*prealloc)
- return false;
- /*
- * This smp_load_acquire() pairs with smp_store_release() to
- * |next_slab_inited| below and in depot_alloc_stack().
- */
- if (smp_load_acquire(&next_slab_inited))
- return true;
- if (stack_slabs[depot_index] == NULL) {
- stack_slabs[depot_index] = *prealloc;
- *prealloc = NULL;
- } else {
- /* If this is the last depot slab, do not touch the next one. */
- if (depot_index + 1 < STACK_ALLOC_MAX_SLABS) {
- stack_slabs[depot_index + 1] = *prealloc;
- *prealloc = NULL;
- }
- /*
- * This smp_store_release pairs with smp_load_acquire() from
- * |next_slab_inited| above and in stack_depot_save().
- */
- smp_store_release(&next_slab_inited, 1);
- }
- return true;
-}
-
-/* Allocation of a new stack in raw storage */
-static struct stack_record *
-depot_alloc_stack(unsigned long *entries, int size, u32 hash, void **prealloc)
-{
- struct stack_record *stack;
- size_t required_size = struct_size(stack, entries, size);
-
- required_size = ALIGN(required_size, 1 << STACK_ALLOC_ALIGN);
-
- if (unlikely(depot_offset + required_size > STACK_ALLOC_SIZE)) {
- if (unlikely(depot_index + 1 >= STACK_ALLOC_MAX_SLABS)) {
- WARN_ONCE(1, "Stack depot reached limit capacity");
- return NULL;
- }
- depot_index++;
- depot_offset = 0;
- /*
- * smp_store_release() here pairs with smp_load_acquire() from
- * |next_slab_inited| in stack_depot_save() and
- * init_stack_slab().
- */
- if (depot_index + 1 < STACK_ALLOC_MAX_SLABS)
- smp_store_release(&next_slab_inited, 0);
- }
- init_stack_slab(prealloc);
- if (stack_slabs[depot_index] == NULL)
- return NULL;
-
- stack = stack_slabs[depot_index] + depot_offset;
-
- stack->hash = hash;
- stack->size = size;
- stack->handle.slabindex = depot_index;
- stack->handle.offset = depot_offset >> STACK_ALLOC_ALIGN;
- stack->handle.valid = 1;
- stack->handle.extra = 0;
- memcpy(stack->entries, entries, flex_array_size(stack, entries, size));
- depot_offset += required_size;
-
- return stack;
-}
-
/* one hash table bucket entry per 16kB of memory */
#define STACK_HASH_SCALE 14
/* limited between 4k and 1M buckets */
@@ -270,6 +192,76 @@ int stack_depot_init(void)
}
EXPORT_SYMBOL_GPL(stack_depot_init);
+static bool init_stack_slab(void **prealloc)
+{
+ if (!*prealloc)
+ return false;
+ /*
+ * This smp_load_acquire() pairs with smp_store_release() to
+ * |next_slab_inited| below and in depot_alloc_stack().
+ */
+ if (smp_load_acquire(&next_slab_inited))
+ return true;
+ if (stack_slabs[depot_index] == NULL) {
+ stack_slabs[depot_index] = *prealloc;
+ *prealloc = NULL;
+ } else {
+ /* If this is the last depot slab, do not touch the next one. */
+ if (depot_index + 1 < STACK_ALLOC_MAX_SLABS) {
+ stack_slabs[depot_index + 1] = *prealloc;
+ *prealloc = NULL;
+ }
+ /*
+ * This smp_store_release pairs with smp_load_acquire() from
+ * |next_slab_inited| above and in stack_depot_save().
+ */
+ smp_store_release(&next_slab_inited, 1);
+ }
+ return true;
+}
+
+/* Allocation of a new stack in raw storage */
+static struct stack_record *
+depot_alloc_stack(unsigned long *entries, int size, u32 hash, void **prealloc)
+{
+ struct stack_record *stack;
+ size_t required_size = struct_size(stack, entries, size);
+
+ required_size = ALIGN(required_size, 1 << STACK_ALLOC_ALIGN);
+
+ if (unlikely(depot_offset + required_size > STACK_ALLOC_SIZE)) {
+ if (unlikely(depot_index + 1 >= STACK_ALLOC_MAX_SLABS)) {
+ WARN_ONCE(1, "Stack depot reached limit capacity");
+ return NULL;
+ }
+ depot_index++;
+ depot_offset = 0;
+ /*
+ * smp_store_release() here pairs with smp_load_acquire() from
+ * |next_slab_inited| in stack_depot_save() and
+ * init_stack_slab().
+ */
+ if (depot_index + 1 < STACK_ALLOC_MAX_SLABS)
+ smp_store_release(&next_slab_inited, 0);
+ }
+ init_stack_slab(prealloc);
+ if (stack_slabs[depot_index] == NULL)
+ return NULL;
+
+ stack = stack_slabs[depot_index] + depot_offset;
+
+ stack->hash = hash;
+ stack->size = size;
+ stack->handle.slabindex = depot_index;
+ stack->handle.offset = depot_offset >> STACK_ALLOC_ALIGN;
+ stack->handle.valid = 1;
+ stack->handle.extra = 0;
+ memcpy(stack->entries, entries, flex_array_size(stack, entries, size));
+ depot_offset += required_size;
+
+ return stack;
+}
+
/* Calculate hash for a stack */
static inline u32 hash_stack(unsigned long *entries, unsigned int size)
{
@@ -310,85 +302,6 @@ static inline struct stack_record *find_stack(struct stack_record *bucket,
}
/**
- * stack_depot_snprint - print stack entries from a depot into a buffer
- *
- * @handle: Stack depot handle which was returned from
- * stack_depot_save().
- * @buf: Pointer to the print buffer
- *
- * @size: Size of the print buffer
- *
- * @spaces: Number of leading spaces to print
- *
- * Return: Number of bytes printed.
- */
-int stack_depot_snprint(depot_stack_handle_t handle, char *buf, size_t size,
- int spaces)
-{
- unsigned long *entries;
- unsigned int nr_entries;
-
- nr_entries = stack_depot_fetch(handle, &entries);
- return nr_entries ? stack_trace_snprint(buf, size, entries, nr_entries,
- spaces) : 0;
-}
-EXPORT_SYMBOL_GPL(stack_depot_snprint);
-
-/**
- * stack_depot_print - print stack entries from a depot
- *
- * @stack: Stack depot handle which was returned from
- * stack_depot_save().
- *
- */
-void stack_depot_print(depot_stack_handle_t stack)
-{
- unsigned long *entries;
- unsigned int nr_entries;
-
- nr_entries = stack_depot_fetch(stack, &entries);
- if (nr_entries > 0)
- stack_trace_print(entries, nr_entries, 0);
-}
-EXPORT_SYMBOL_GPL(stack_depot_print);
-
-/**
- * stack_depot_fetch - Fetch stack entries from a depot
- *
- * @handle: Stack depot handle which was returned from
- * stack_depot_save().
- * @entries: Pointer to store the entries address
- *
- * Return: The number of trace entries for this depot.
- */
-unsigned int stack_depot_fetch(depot_stack_handle_t handle,
- unsigned long **entries)
-{
- union handle_parts parts = { .handle = handle };
- void *slab;
- size_t offset = parts.offset << STACK_ALLOC_ALIGN;
- struct stack_record *stack;
-
- *entries = NULL;
- if (!handle)
- return 0;
-
- if (parts.slabindex > depot_index) {
- WARN(1, "slab index %d out of bounds (%d) for stack id %08x\n",
- parts.slabindex, depot_index, handle);
- return 0;
- }
- slab = stack_slabs[parts.slabindex];
- if (!slab)
- return 0;
- stack = slab + offset;
-
- *entries = stack->entries;
- return stack->size;
-}
-EXPORT_SYMBOL_GPL(stack_depot_fetch);
-
-/**
* __stack_depot_save - Save a stack trace from an array
*
* @entries: Pointer to storage array
@@ -533,3 +446,90 @@ depot_stack_handle_t stack_depot_save(unsigned long *entries,
return __stack_depot_save(entries, nr_entries, 0, alloc_flags, true);
}
EXPORT_SYMBOL_GPL(stack_depot_save);
+
+/**
+ * stack_depot_fetch - Fetch stack entries from a depot
+ *
+ * @handle: Stack depot handle which was returned from
+ * stack_depot_save().
+ * @entries: Pointer to store the entries address
+ *
+ * Return: The number of trace entries for this depot.
+ */
+unsigned int stack_depot_fetch(depot_stack_handle_t handle,
+ unsigned long **entries)
+{
+ union handle_parts parts = { .handle = handle };
+ void *slab;
+ size_t offset = parts.offset << STACK_ALLOC_ALIGN;
+ struct stack_record *stack;
+
+ *entries = NULL;
+ if (!handle)
+ return 0;
+
+ if (parts.slabindex > depot_index) {
+ WARN(1, "slab index %d out of bounds (%d) for stack id %08x\n",
+ parts.slabindex, depot_index, handle);
+ return 0;
+ }
+ slab = stack_slabs[parts.slabindex];
+ if (!slab)
+ return 0;
+ stack = slab + offset;
+
+ *entries = stack->entries;
+ return stack->size;
+}
+EXPORT_SYMBOL_GPL(stack_depot_fetch);
+
+/**
+ * stack_depot_print - print stack entries from a depot
+ *
+ * @stack: Stack depot handle which was returned from
+ * stack_depot_save().
+ *
+ */
+void stack_depot_print(depot_stack_handle_t stack)
+{
+ unsigned long *entries;
+ unsigned int nr_entries;
+
+ nr_entries = stack_depot_fetch(stack, &entries);
+ if (nr_entries > 0)
+ stack_trace_print(entries, nr_entries, 0);
+}
+EXPORT_SYMBOL_GPL(stack_depot_print);
+
+/**
+ * stack_depot_snprint - print stack entries from a depot into a buffer
+ *
+ * @handle: Stack depot handle which was returned from
+ * stack_depot_save().
+ * @buf: Pointer to the print buffer
+ *
+ * @size: Size of the print buffer
+ *
+ * @spaces: Number of leading spaces to print
+ *
+ * Return: Number of bytes printed.
+ */
+int stack_depot_snprint(depot_stack_handle_t handle, char *buf, size_t size,
+ int spaces)
+{
+ unsigned long *entries;
+ unsigned int nr_entries;
+
+ nr_entries = stack_depot_fetch(handle, &entries);
+ return nr_entries ? stack_trace_snprint(buf, size, entries, nr_entries,
+ spaces) : 0;
+}
+EXPORT_SYMBOL_GPL(stack_depot_snprint);
+
+unsigned int stack_depot_get_extra_bits(depot_stack_handle_t handle)
+{
+ union handle_parts parts = { .handle = handle };
+
+ return parts.extra;
+}
+EXPORT_SYMBOL(stack_depot_get_extra_bits);