summaryrefslogtreecommitdiffstats
path: root/kernel/stackleak.c
diff options
context:
space:
mode:
authorMark Rutland <mark.rutland@arm.com>2022-04-27 19:31:27 +0200
committerKees Cook <keescook@chromium.org>2022-05-08 10:33:09 +0200
commit8111e67dee9ff774712cff8e34fba465c8361960 (patch)
treef7939bb13d63ab03d22651a35e73b17a703d212a /kernel/stackleak.c
parentlkdtm/stackleak: check stack boundaries (diff)
downloadlinux-8111e67dee9ff774712cff8e34fba465c8361960.tar.xz
linux-8111e67dee9ff774712cff8e34fba465c8361960.zip
stackleak: add on/off stack variants
The stackleak_erase() code dynamically handles being on a task stack or another stack. In most cases, this is a fixed property of the caller, which the caller is aware of, as an architecture might always return using the task stack, or might always return using a trampoline stack. This patch adds stackleak_erase_on_task_stack() and stackleak_erase_off_task_stack() functions which callers can use to avoid on_thread_stack() check and associated redundant work when the calling stack is known. The existing stackleak_erase() is retained as a safe default. There should be no functional change as a result of this patch. Signed-off-by: Mark Rutland <mark.rutland@arm.com> Cc: Alexander Popov <alex.popov@linux.com> Cc: Andrew Morton <akpm@linux-foundation.org> Cc: Andy Lutomirski <luto@kernel.org> Cc: Kees Cook <keescook@chromium.org> Signed-off-by: Kees Cook <keescook@chromium.org> Link: https://lore.kernel.org/r/20220427173128.2603085-13-mark.rutland@arm.com
Diffstat (limited to 'kernel/stackleak.c')
-rw-r--r--kernel/stackleak.c35
1 files changed, 32 insertions, 3 deletions
diff --git a/kernel/stackleak.c b/kernel/stackleak.c
index afd54b8e10b8..c2c33d2202e9 100644
--- a/kernel/stackleak.c
+++ b/kernel/stackleak.c
@@ -70,7 +70,7 @@ late_initcall(stackleak_sysctls_init);
#define skip_erasing() false
#endif /* CONFIG_STACKLEAK_RUNTIME_DISABLE */
-static __always_inline void __stackleak_erase(void)
+static __always_inline void __stackleak_erase(bool on_task_stack)
{
const unsigned long task_stack_low = stackleak_task_low_bound(current);
const unsigned long task_stack_high = stackleak_task_high_bound(current);
@@ -96,7 +96,7 @@ static __always_inline void __stackleak_erase(void)
* function has a fixed-size stack frame, and the current stack pointer
* doesn't change while we write poison.
*/
- if (on_thread_stack())
+ if (on_task_stack)
erase_high = current_stack_pointer;
else
erase_high = task_stack_high;
@@ -110,12 +110,41 @@ static __always_inline void __stackleak_erase(void)
current->lowest_stack = task_stack_high;
}
+/*
+ * Erase and poison the portion of the task stack used since the last erase.
+ * Can be called from the task stack or an entry stack when the task stack is
+ * no longer in use.
+ */
asmlinkage void noinstr stackleak_erase(void)
{
if (skip_erasing())
return;
- __stackleak_erase();
+ __stackleak_erase(on_thread_stack());
+}
+
+/*
+ * Erase and poison the portion of the task stack used since the last erase.
+ * Can only be called from the task stack.
+ */
+asmlinkage void noinstr stackleak_erase_on_task_stack(void)
+{
+ if (skip_erasing())
+ return;
+
+ __stackleak_erase(true);
+}
+
+/*
+ * Erase and poison the portion of the task stack used since the last erase.
+ * Can only be called from a stack other than the task stack.
+ */
+asmlinkage void noinstr stackleak_erase_off_task_stack(void)
+{
+ if (skip_erasing())
+ return;
+
+ __stackleak_erase(false);
}
void __used __no_caller_saved_registers noinstr stackleak_track_stack(void)