summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorMinchan Kim <minchan@kernel.org>2021-05-05 03:37:31 +0200
committerLinus Torvalds <torvalds@linux-foundation.org>2021-05-05 20:27:24 +0200
commit3aab8ae7aace3388da319a233edf48f0f5d26a44 (patch)
treed22956812439bbd25b4a74d952abb56fa5fade85
parentmm: cma: support sysfs (diff)
downloadlinux-3aab8ae7aace3388da319a233edf48f0f5d26a44.tar.xz
linux-3aab8ae7aace3388da319a233edf48f0f5d26a44.zip
mm: cma: add the CMA instance name to cma trace events
There were missing places to add cma instance name. To identify each CMA instance, let's add the name for every cma trace. This patch also changes the existing cma_trace_alloc to cma_trace_finish since we have cma_alloc_start[1]. [1] https://lore.kernel.org/linux-mm/20210324160740.15901-1-georgi.djakov@linaro.org Link: https://lkml.kernel.org/r/20210330220237.748899-1-minchan@kernel.org Signed-off-by: Minchan Kim <minchan@kernel.org> Cc: Liam Mark <lmark@codeaurora.org> Cc: Georgi Djakov <georgi.djakov@linaro.org> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
-rw-r--r--include/trace/events/cma.h28
-rw-r--r--mm/cma.c7
2 files changed, 21 insertions, 14 deletions
diff --git a/include/trace/events/cma.h b/include/trace/events/cma.h
index be1525a10457..5cf385ae7c08 100644
--- a/include/trace/events/cma.h
+++ b/include/trace/events/cma.h
@@ -10,12 +10,13 @@
DECLARE_EVENT_CLASS(cma_alloc_class,
- TP_PROTO(unsigned long pfn, const struct page *page,
+ TP_PROTO(const char *name, unsigned long pfn, const struct page *page,
unsigned int count, unsigned int align),
- TP_ARGS(pfn, page, count, align),
+ TP_ARGS(name, pfn, page, count, align),
TP_STRUCT__entry(
+ __string(name, name)
__field(unsigned long, pfn)
__field(const struct page *, page)
__field(unsigned int, count)
@@ -23,13 +24,15 @@ DECLARE_EVENT_CLASS(cma_alloc_class,
),
TP_fast_assign(
+ __assign_str(name, name);
__entry->pfn = pfn;
__entry->page = page;
__entry->count = count;
__entry->align = align;
),
- TP_printk("pfn=%lx page=%p count=%u align=%u",
+ TP_printk("name=%s pfn=%lx page=%p count=%u align=%u",
+ __get_str(name),
__entry->pfn,
__entry->page,
__entry->count,
@@ -38,24 +41,27 @@ DECLARE_EVENT_CLASS(cma_alloc_class,
TRACE_EVENT(cma_release,
- TP_PROTO(unsigned long pfn, const struct page *page,
+ TP_PROTO(const char *name, unsigned long pfn, const struct page *page,
unsigned int count),
- TP_ARGS(pfn, page, count),
+ TP_ARGS(name, pfn, page, count),
TP_STRUCT__entry(
+ __string(name, name)
__field(unsigned long, pfn)
__field(const struct page *, page)
__field(unsigned int, count)
),
TP_fast_assign(
+ __assign_str(name, name);
__entry->pfn = pfn;
__entry->page = page;
__entry->count = count;
),
- TP_printk("pfn=%lx page=%p count=%u",
+ TP_printk("name=%s pfn=%lx page=%p count=%u",
+ __get_str(name),
__entry->pfn,
__entry->page,
__entry->count)
@@ -85,20 +91,20 @@ TRACE_EVENT(cma_alloc_start,
__entry->align)
);
-DEFINE_EVENT(cma_alloc_class, cma_alloc,
+DEFINE_EVENT(cma_alloc_class, cma_alloc_finish,
- TP_PROTO(unsigned long pfn, const struct page *page,
+ TP_PROTO(const char *name, unsigned long pfn, const struct page *page,
unsigned int count, unsigned int align),
- TP_ARGS(pfn, page, count, align)
+ TP_ARGS(name, pfn, page, count, align)
);
DEFINE_EVENT(cma_alloc_class, cma_alloc_busy_retry,
- TP_PROTO(unsigned long pfn, const struct page *page,
+ TP_PROTO(const char *name, unsigned long pfn, const struct page *page,
unsigned int count, unsigned int align),
- TP_ARGS(pfn, page, count, align)
+ TP_ARGS(name, pfn, page, count, align)
);
#endif /* _TRACE_CMA_H */
diff --git a/mm/cma.c b/mm/cma.c
index 2380f2571eb5..cdad8c4de921 100644
--- a/mm/cma.c
+++ b/mm/cma.c
@@ -486,12 +486,13 @@ struct page *cma_alloc(struct cma *cma, size_t count, unsigned int align,
pr_debug("%s(): memory range at %p is busy, retrying\n",
__func__, pfn_to_page(pfn));
- trace_cma_alloc_busy_retry(pfn, pfn_to_page(pfn), count, align);
+ trace_cma_alloc_busy_retry(cma->name, pfn, pfn_to_page(pfn),
+ count, align);
/* try again with a bit different memory target */
start = bitmap_no + mask + 1;
}
- trace_cma_alloc(pfn, page, count, align);
+ trace_cma_alloc_finish(cma->name, pfn, page, count, align);
/*
* CMA can allocate multiple page blocks, which results in different
@@ -551,7 +552,7 @@ bool cma_release(struct cma *cma, const struct page *pages, unsigned int count)
free_contig_range(pfn, count);
cma_clear_bitmap(cma, pfn, count);
- trace_cma_release(pfn, pages, count);
+ trace_cma_release(cma->name, pfn, pages, count);
return true;
}