summaryrefslogtreecommitdiffstats
path: root/kernel
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2024-11-26 01:09:48 +0100
committerLinus Torvalds <torvalds@linux-foundation.org>2024-11-26 01:09:48 +0100
commitf5f4745a7f057b58c9728ee4e2c5d6d79f382fe7 (patch)
tree7e5ddce694baa3a0d6c8d7a5b2f59e8778315ee9 /kernel
parentMerge tag 'trace-rust-v6.13' of git://git.kernel.org/pub/scm/linux/kernel/git... (diff)
parentgdb: lx-symbols: do not error out on monolithic build (diff)
downloadlinux-f5f4745a7f057b58c9728ee4e2c5d6d79f382fe7.tar.xz
linux-f5f4745a7f057b58c9728ee4e2c5d6d79f382fe7.zip
Merge tag 'mm-nonmm-stable-2024-11-24-02-05' of git://git.kernel.org/pub/scm/linux/kernel/git/akpm/mm
Pull non-MM updates from Andrew Morton: - The series "resource: A couple of cleanups" from Andy Shevchenko performs some cleanups in the resource management code - The series "Improve the copy of task comm" from Yafang Shao addresses possible race-induced overflows in the management of task_struct.comm[] - The series "Remove unnecessary header includes from {tools/}lib/list_sort.c" from Kuan-Wei Chiu adds some cleanups and a small fix to the list_sort library code and to its selftest - The series "Enhance min heap API with non-inline functions and optimizations" also from Kuan-Wei Chiu optimizes and cleans up the min_heap library code - The series "nilfs2: Finish folio conversion" from Ryusuke Konishi finishes off nilfs2's folioification - The series "add detect count for hung tasks" from Lance Yang adds more userspace visibility into the hung-task detector's activity - Apart from that, singelton patches in many places - please see the individual changelogs for details * tag 'mm-nonmm-stable-2024-11-24-02-05' of git://git.kernel.org/pub/scm/linux/kernel/git/akpm/mm: (71 commits) gdb: lx-symbols: do not error out on monolithic build kernel/reboot: replace sprintf() with sysfs_emit() lib: util_macros_kunit: add kunit test for util_macros.h util_macros.h: fix/rework find_closest() macros Improve consistency of '#error' directive messages ocfs2: fix uninitialized value in ocfs2_file_read_iter() hung_task: add docs for hung_task_detect_count hung_task: add detect count for hung tasks dma-buf: use atomic64_inc_return() in dma_buf_getfile() fs/proc/kcore.c: fix coccinelle reported ERROR instances resource: avoid unnecessary resource tree walking in __region_intersects() ocfs2: remove unused errmsg function and table ocfs2: cluster: fix a typo lib/scatterlist: use sg_phys() helper checkpatch: always parse orig_commit in fixes tag nilfs2: convert metadata aops from writepage to writepages nilfs2: convert nilfs_recovery_copy_block() to take a folio nilfs2: convert nilfs_page_count_clean_buffers() to take a folio nilfs2: remove nilfs_writepage nilfs2: convert checkpoint file to be folio-based ...
Diffstat (limited to 'kernel')
-rw-r--r--kernel/auditsc.c6
-rw-r--r--kernel/crash_core.c6
-rw-r--r--kernel/events/core.c15
-rw-r--r--kernel/events/hw_breakpoint.c4
-rw-r--r--kernel/hung_task.c18
-rw-r--r--kernel/kthread.c2
-rw-r--r--kernel/notifier.c8
-rw-r--r--kernel/reboot.c15
-rw-r--r--kernel/resource.c66
-rw-r--r--kernel/watchdog.c3
10 files changed, 84 insertions, 59 deletions
diff --git a/kernel/auditsc.c b/kernel/auditsc.c
index 91afdd0d036e..279ba5c420a4 100644
--- a/kernel/auditsc.c
+++ b/kernel/auditsc.c
@@ -2729,7 +2729,7 @@ void __audit_ptrace(struct task_struct *t)
context->target_uid = task_uid(t);
context->target_sessionid = audit_get_sessionid(t);
security_task_getlsmprop_obj(t, &context->target_ref);
- memcpy(context->target_comm, t->comm, TASK_COMM_LEN);
+ strscpy(context->target_comm, t->comm);
}
/**
@@ -2756,7 +2756,7 @@ int audit_signal_info_syscall(struct task_struct *t)
ctx->target_uid = t_uid;
ctx->target_sessionid = audit_get_sessionid(t);
security_task_getlsmprop_obj(t, &ctx->target_ref);
- memcpy(ctx->target_comm, t->comm, TASK_COMM_LEN);
+ strscpy(ctx->target_comm, t->comm);
return 0;
}
@@ -2777,7 +2777,7 @@ int audit_signal_info_syscall(struct task_struct *t)
axp->target_uid[axp->pid_count] = t_uid;
axp->target_sessionid[axp->pid_count] = audit_get_sessionid(t);
security_task_getlsmprop_obj(t, &axp->target_ref[axp->pid_count]);
- memcpy(axp->target_comm[axp->pid_count], t->comm, TASK_COMM_LEN);
+ strscpy(axp->target_comm[axp->pid_count], t->comm);
axp->pid_count++;
return 0;
diff --git a/kernel/crash_core.c b/kernel/crash_core.c
index c1048893f4b6..078fe5bc5a74 100644
--- a/kernel/crash_core.c
+++ b/kernel/crash_core.c
@@ -505,7 +505,8 @@ int crash_check_hotplug_support(void)
crash_hotplug_lock();
/* Obtain lock while reading crash information */
if (!kexec_trylock()) {
- pr_info("kexec_trylock() failed, kdump image may be inaccurate\n");
+ if (!kexec_in_progress)
+ pr_info("kexec_trylock() failed, kdump image may be inaccurate\n");
crash_hotplug_unlock();
return 0;
}
@@ -547,7 +548,8 @@ static void crash_handle_hotplug_event(unsigned int hp_action, unsigned int cpu,
crash_hotplug_lock();
/* Obtain lock while changing crash information */
if (!kexec_trylock()) {
- pr_info("kexec_trylock() failed, kdump image may be inaccurate\n");
+ if (!kexec_in_progress)
+ pr_info("kexec_trylock() failed, kdump image may be inaccurate\n");
crash_hotplug_unlock();
return;
}
diff --git a/kernel/events/core.c b/kernel/events/core.c
index 5d4a54f50826..065f9188b44a 100644
--- a/kernel/events/core.c
+++ b/kernel/events/core.c
@@ -3778,18 +3778,11 @@ static bool perf_less_group_idx(const void *l, const void *r, void __always_unus
return le->group_index < re->group_index;
}
-static void swap_ptr(void *l, void *r, void __always_unused *args)
-{
- void **lp = l, **rp = r;
-
- swap(*lp, *rp);
-}
-
DEFINE_MIN_HEAP(struct perf_event *, perf_event_min_heap);
static const struct min_heap_callbacks perf_min_heap = {
.less = perf_less_group_idx,
- .swp = swap_ptr,
+ .swp = NULL,
};
static void __heap_add(struct perf_event_min_heap *heap, struct perf_event *event)
@@ -3870,7 +3863,7 @@ static noinline int visit_groups_merge(struct perf_event_context *ctx,
perf_assert_pmu_disabled((*evt)->pmu_ctx->pmu);
}
- min_heapify_all(&event_heap, &perf_min_heap, NULL);
+ min_heapify_all_inline(&event_heap, &perf_min_heap, NULL);
while (event_heap.nr) {
ret = func(*evt, data);
@@ -3879,9 +3872,9 @@ static noinline int visit_groups_merge(struct perf_event_context *ctx,
*evt = perf_event_groups_next(*evt, pmu);
if (*evt)
- min_heap_sift_down(&event_heap, 0, &perf_min_heap, NULL);
+ min_heap_sift_down_inline(&event_heap, 0, &perf_min_heap, NULL);
else
- min_heap_pop(&event_heap, &perf_min_heap, NULL);
+ min_heap_pop_inline(&event_heap, &perf_min_heap, NULL);
}
return 0;
diff --git a/kernel/events/hw_breakpoint.c b/kernel/events/hw_breakpoint.c
index 6c2cb4e4f48d..bc4a61029b6d 100644
--- a/kernel/events/hw_breakpoint.c
+++ b/kernel/events/hw_breakpoint.c
@@ -849,7 +849,7 @@ register_wide_hw_breakpoint(struct perf_event_attr *attr,
cpu_events = alloc_percpu(typeof(*cpu_events));
if (!cpu_events)
- return (void __percpu __force *)ERR_PTR(-ENOMEM);
+ return ERR_PTR_PCPU(-ENOMEM);
cpus_read_lock();
for_each_online_cpu(cpu) {
@@ -868,7 +868,7 @@ register_wide_hw_breakpoint(struct perf_event_attr *attr,
return cpu_events;
unregister_wide_hw_breakpoint(cpu_events);
- return (void __percpu __force *)ERR_PTR(err);
+ return ERR_PTR_PCPU(err);
}
EXPORT_SYMBOL_GPL(register_wide_hw_breakpoint);
diff --git a/kernel/hung_task.c b/kernel/hung_task.c
index 959d99583d1c..c18717189f32 100644
--- a/kernel/hung_task.c
+++ b/kernel/hung_task.c
@@ -31,6 +31,11 @@
static int __read_mostly sysctl_hung_task_check_count = PID_MAX_LIMIT;
/*
+ * Total number of tasks detected as hung since boot:
+ */
+static unsigned long __read_mostly sysctl_hung_task_detect_count;
+
+/*
* Limit number of tasks checked in a batch.
*
* This value controls the preemptibility of khungtaskd since preemption
@@ -115,6 +120,12 @@ static void check_hung_task(struct task_struct *t, unsigned long timeout)
if (time_is_after_jiffies(t->last_switch_time + timeout * HZ))
return;
+ /*
+ * This counter tracks the total number of tasks detected as hung
+ * since boot.
+ */
+ sysctl_hung_task_detect_count++;
+
trace_sched_process_hang(t);
if (sysctl_hung_task_panic) {
@@ -314,6 +325,13 @@ static struct ctl_table hung_task_sysctls[] = {
.proc_handler = proc_dointvec_minmax,
.extra1 = SYSCTL_NEG_ONE,
},
+ {
+ .procname = "hung_task_detect_count",
+ .data = &sysctl_hung_task_detect_count,
+ .maxlen = sizeof(unsigned long),
+ .mode = 0444,
+ .proc_handler = proc_doulongvec_minmax,
+ },
};
static void __init hung_task_sysctl_init(void)
diff --git a/kernel/kthread.c b/kernel/kthread.c
index 9bb36897b6c6..a5ac612b1609 100644
--- a/kernel/kthread.c
+++ b/kernel/kthread.c
@@ -101,7 +101,7 @@ void get_kthread_comm(char *buf, size_t buf_size, struct task_struct *tsk)
struct kthread *kthread = to_kthread(tsk);
if (!kthread || !kthread->full_name) {
- __get_task_comm(buf, buf_size, tsk);
+ strscpy(buf, tsk->comm, buf_size);
return;
}
diff --git a/kernel/notifier.c b/kernel/notifier.c
index b3ce28f39eb6..2f9fe7c30287 100644
--- a/kernel/notifier.c
+++ b/kernel/notifier.c
@@ -5,19 +5,11 @@
#include <linux/notifier.h>
#include <linux/rcupdate.h>
#include <linux/vmalloc.h>
-#include <linux/reboot.h>
#define CREATE_TRACE_POINTS
#include <trace/events/notifier.h>
/*
- * Notifier list for kernel code which wants to be called
- * at shutdown. This is used to stop any idling DMA operations
- * and the like.
- */
-BLOCKING_NOTIFIER_HEAD(reboot_notifier_list);
-
-/*
* Notifier chain core routines. The exported routines below
* are layered on top of these, with appropriate locking added.
*/
diff --git a/kernel/reboot.c b/kernel/reboot.c
index f05dbde2c93f..a701000bab34 100644
--- a/kernel/reboot.c
+++ b/kernel/reboot.c
@@ -72,6 +72,13 @@ static bool poweroff_fallback_to_halt;
*/
void __weak (*pm_power_off)(void);
+/*
+ * Notifier list for kernel code which wants to be called
+ * at shutdown. This is used to stop any idling DMA operations
+ * and the like.
+ */
+static BLOCKING_NOTIFIER_HEAD(reboot_notifier_list);
+
/**
* emergency_restart - reboot the system
*
@@ -1130,7 +1137,7 @@ static ssize_t mode_show(struct kobject *kobj, struct kobj_attribute *attr, char
val = REBOOT_UNDEFINED_STR;
}
- return sprintf(buf, "%s\n", val);
+ return sysfs_emit(buf, "%s\n", val);
}
static ssize_t mode_store(struct kobject *kobj, struct kobj_attribute *attr,
const char *buf, size_t count)
@@ -1160,7 +1167,7 @@ static struct kobj_attribute reboot_mode_attr = __ATTR_RW(mode);
#ifdef CONFIG_X86
static ssize_t force_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf)
{
- return sprintf(buf, "%d\n", reboot_force);
+ return sysfs_emit(buf, "%d\n", reboot_force);
}
static ssize_t force_store(struct kobject *kobj, struct kobj_attribute *attr,
const char *buf, size_t count)
@@ -1207,7 +1214,7 @@ static ssize_t type_show(struct kobject *kobj, struct kobj_attribute *attr, char
val = REBOOT_UNDEFINED_STR;
}
- return sprintf(buf, "%s\n", val);
+ return sysfs_emit(buf, "%s\n", val);
}
static ssize_t type_store(struct kobject *kobj, struct kobj_attribute *attr,
const char *buf, size_t count)
@@ -1240,7 +1247,7 @@ static struct kobj_attribute reboot_type_attr = __ATTR_RW(type);
#ifdef CONFIG_SMP
static ssize_t cpu_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf)
{
- return sprintf(buf, "%d\n", reboot_cpu);
+ return sysfs_emit(buf, "%d\n", reboot_cpu);
}
static ssize_t cpu_store(struct kobject *kobj, struct kobj_attribute *attr,
const char *buf, size_t count)
diff --git a/kernel/resource.c b/kernel/resource.c
index d2c8143ae4ff..c9fd26c06345 100644
--- a/kernel/resource.c
+++ b/kernel/resource.c
@@ -50,17 +50,35 @@ EXPORT_SYMBOL(iomem_resource);
static DEFINE_RWLOCK(resource_lock);
-static struct resource *next_resource(struct resource *p, bool skip_children)
+/*
+ * Return the next node of @p in pre-order tree traversal. If
+ * @skip_children is true, skip the descendant nodes of @p in
+ * traversal. If @p is a descendant of @subtree_root, only traverse
+ * the subtree under @subtree_root.
+ */
+static struct resource *next_resource(struct resource *p, bool skip_children,
+ struct resource *subtree_root)
{
if (!skip_children && p->child)
return p->child;
- while (!p->sibling && p->parent)
+ while (!p->sibling && p->parent) {
p = p->parent;
+ if (p == subtree_root)
+ return NULL;
+ }
return p->sibling;
}
+/*
+ * Traverse the resource subtree under @_root in pre-order, excluding
+ * @_root itself.
+ *
+ * NOTE: '__p' is introduced to avoid shadowing '_p' outside of loop.
+ * And it is referenced to avoid unused variable warning.
+ */
#define for_each_resource(_root, _p, _skip_children) \
- for ((_p) = (_root)->child; (_p); (_p) = next_resource(_p, _skip_children))
+ for (typeof(_root) __root = (_root), __p = _p = __root->child; \
+ __p && _p; _p = next_resource(_p, _skip_children, __root))
#ifdef CONFIG_PROC_FS
@@ -88,7 +106,7 @@ static void *r_next(struct seq_file *m, void *v, loff_t *pos)
(*pos)++;
- return (void *)next_resource(p, false);
+ return (void *)next_resource(p, false, NULL);
}
static void r_stop(struct seq_file *m, void *v)
@@ -297,6 +315,11 @@ int release_resource(struct resource *old)
EXPORT_SYMBOL(release_resource);
+static bool is_type_match(struct resource *p, unsigned long flags, unsigned long desc)
+{
+ return (p->flags & flags) == flags && (desc == IORES_DESC_NONE || desc == p->desc);
+}
+
/**
* find_next_iomem_res - Finds the lowest iomem resource that covers part of
* [@start..@end].
@@ -339,13 +362,9 @@ static int find_next_iomem_res(resource_size_t start, resource_size_t end,
if (p->end < start)
continue;
- if ((p->flags & flags) != flags)
- continue;
- if ((desc != IORES_DESC_NONE) && (desc != p->desc))
- continue;
-
/* Found a match, break */
- break;
+ if (is_type_match(p, flags, desc))
+ break;
}
if (p) {
@@ -537,21 +556,18 @@ static int __region_intersects(struct resource *parent, resource_size_t start,
size_t size, unsigned long flags,
unsigned long desc)
{
- resource_size_t ostart, oend;
int type = 0; int other = 0;
struct resource *p, *dp;
- bool is_type, covered;
- struct resource res;
+ struct resource res, o;
+ bool covered;
res.start = start;
res.end = start + size - 1;
for (p = parent->child; p ; p = p->sibling) {
- if (!resource_overlaps(p, &res))
+ if (!resource_intersection(p, &res, &o))
continue;
- is_type = (p->flags & flags) == flags &&
- (desc == IORES_DESC_NONE || desc == p->desc);
- if (is_type) {
+ if (is_type_match(p, flags, desc)) {
type++;
continue;
}
@@ -568,27 +584,23 @@ static int __region_intersects(struct resource *parent, resource_size_t start,
* |-- "System RAM" --||-- "CXL Window 0a" --|
*/
covered = false;
- ostart = max(res.start, p->start);
- oend = min(res.end, p->end);
for_each_resource(p, dp, false) {
if (!resource_overlaps(dp, &res))
continue;
- is_type = (dp->flags & flags) == flags &&
- (desc == IORES_DESC_NONE || desc == dp->desc);
- if (is_type) {
+ if (is_type_match(dp, flags, desc)) {
type++;
/*
- * Range from 'ostart' to 'dp->start'
+ * Range from 'o.start' to 'dp->start'
* isn't covered by matched resource.
*/
- if (dp->start > ostart)
+ if (dp->start > o.start)
break;
- if (dp->end >= oend) {
+ if (dp->end >= o.end) {
covered = true;
break;
}
/* Remove covered range */
- ostart = max(ostart, dp->end + 1);
+ o.start = max(o.start, dp->end + 1);
}
}
if (!covered)
@@ -744,7 +756,7 @@ EXPORT_SYMBOL_GPL(find_resource_space);
* @root: root resource descriptor
* @old: resource descriptor desired by caller
* @newsize: new size of the resource descriptor
- * @constraint: the size and alignment constraints to be met.
+ * @constraint: the memory range and alignment constraints to be met.
*/
static int reallocate_resource(struct resource *root, struct resource *old,
resource_size_t newsize,
diff --git a/kernel/watchdog.c b/kernel/watchdog.c
index 5a93d4c446b8..41e0f7e9fa35 100644
--- a/kernel/watchdog.c
+++ b/kernel/watchdog.c
@@ -998,6 +998,7 @@ static int proc_watchdog_common(int which, const struct ctl_table *table, int wr
mutex_lock(&watchdog_mutex);
+ old = *param;
if (!write) {
/*
* On read synchronize the userspace interface. This is a
@@ -1005,8 +1006,8 @@ static int proc_watchdog_common(int which, const struct ctl_table *table, int wr
*/
*param = (watchdog_enabled & which) != 0;
err = proc_dointvec_minmax(table, write, buffer, lenp, ppos);
+ *param = old;
} else {
- old = READ_ONCE(*param);
err = proc_dointvec_minmax(table, write, buffer, lenp, ppos);
if (!err && old != READ_ONCE(*param))
proc_watchdog_update();