summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorSeongJae Park <sj@kernel.org>2022-01-14 23:10:50 +0100
committerLinus Torvalds <torvalds@linux-foundation.org>2022-01-15 15:30:33 +0100
commit76fd0285b447991267e838842c0be7395eb454bb (patch)
tree3237bb3b04ecac3eda048f6467afeb8f3bcd8a30
parentmm/damon/vaddr: hide kernel pointer from damon_va_three_regions() failure log (diff)
downloadlinux-76fd0285b447991267e838842c0be7395eb454bb.tar.xz
linux-76fd0285b447991267e838842c0be7395eb454bb.zip
mm/damon: hide kernel pointer from tracepoint event
DAMON's virtual address spaces monitoring primitive uses 'struct pid *' of the target process as its monitoring target id. The kernel address is exposed as-is to the user space via the DAMON tracepoint, 'damon_aggregated'. Though primarily only privileged users are allowed to access that, it would be better to avoid unnecessarily exposing kernel pointers so. Because the trace result is only required to be able to distinguish each target, we aren't need to use the pointer as-is. This makes the tracepoint to use the index of the target in the context's targets list as its id in the tracepoint, to hide the kernel space address. Link: https://lkml.kernel.org/r/20211229131016.23641-5-sj@kernel.org Signed-off-by: SeongJae Park <sj@kernel.org> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
-rw-r--r--include/trace/events/damon.h8
-rw-r--r--mm/damon/core.c4
2 files changed, 7 insertions, 5 deletions
diff --git a/include/trace/events/damon.h b/include/trace/events/damon.h
index 99ffa601e351..c79f1d4c39af 100644
--- a/include/trace/events/damon.h
+++ b/include/trace/events/damon.h
@@ -11,10 +11,10 @@
TRACE_EVENT(damon_aggregated,
- TP_PROTO(struct damon_target *t, struct damon_region *r,
- unsigned int nr_regions),
+ TP_PROTO(struct damon_target *t, unsigned int target_id,
+ struct damon_region *r, unsigned int nr_regions),
- TP_ARGS(t, r, nr_regions),
+ TP_ARGS(t, target_id, r, nr_regions),
TP_STRUCT__entry(
__field(unsigned long, target_id)
@@ -26,7 +26,7 @@ TRACE_EVENT(damon_aggregated,
),
TP_fast_assign(
- __entry->target_id = t->id;
+ __entry->target_id = target_id;
__entry->nr_regions = nr_regions;
__entry->start = r->ar.start;
__entry->end = r->ar.end;
diff --git a/mm/damon/core.c b/mm/damon/core.c
index 6482d510dcbe..1dd153c31c9e 100644
--- a/mm/damon/core.c
+++ b/mm/damon/core.c
@@ -514,15 +514,17 @@ static bool kdamond_aggregate_interval_passed(struct damon_ctx *ctx)
static void kdamond_reset_aggregated(struct damon_ctx *c)
{
struct damon_target *t;
+ unsigned int ti = 0; /* target's index */
damon_for_each_target(t, c) {
struct damon_region *r;
damon_for_each_region(r, t) {
- trace_damon_aggregated(t, r, damon_nr_regions(t));
+ trace_damon_aggregated(t, ti, r, damon_nr_regions(t));
r->last_nr_accesses = r->nr_accesses;
r->nr_accesses = 0;
}
+ ti++;
}
}