diff options
author | James Clark <james.clark@arm.com> | 2020-11-26 15:13:18 +0100 |
---|---|---|
committer | Arnaldo Carvalho de Melo <acme@redhat.com> | 2020-12-24 14:03:54 +0100 |
commit | 91585846f105ef2e3f479a5124a264ebb770f6ab (patch) | |
tree | f4cab0658779b531343f87cb1a6d47b46c8d724b /tools/perf/util/cpumap.c | |
parent | perf tests: Improve topology test to check all aggregation types (diff) | |
download | linux-91585846f105ef2e3f479a5124a264ebb770f6ab.tar.xz linux-91585846f105ef2e3f479a5124a264ebb770f6ab.zip |
perf cpumap: Use existing allocator to avoid using malloc
Use the existing allocator for perf_cpu_map to avoid use of raw malloc.
This could cause an issue in later commits where the size of
perf_cpu_map is changed.
No functional changes.
Signed-off-by: James Clark <james.clark@arm.com>
Acked-by: Namhyung Kim <namhyung@kernel.org>
Acked-by: Jiri Olsa <jolsa@redhat.com>
Tested-by: Arnaldo Carvalho de Melo <acme@redhat.com>
Tested-by: John Garry <john.garry@huawei.com>
Cc: Alexander Shishkin <alexander.shishkin@linux.intel.com>
Cc: Ingo Molnar <mingo@redhat.com>
Cc: Mark Rutland <mark.rutland@arm.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Thomas Richter <tmricht@linux.ibm.com>
Link: https://lore.kernel.org/r/20201126141328.6509-3-james.clark@arm.com
Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
Diffstat (limited to '')
-rw-r--r-- | tools/perf/util/cpumap.c | 8 |
1 files changed, 4 insertions, 4 deletions
diff --git a/tools/perf/util/cpumap.c b/tools/perf/util/cpumap.c index dc5c5e6fc502..20e3a75953fc 100644 --- a/tools/perf/util/cpumap.c +++ b/tools/perf/util/cpumap.c @@ -132,15 +132,16 @@ int cpu_map__build_map(struct perf_cpu_map *cpus, struct perf_cpu_map **res, int (*f)(struct perf_cpu_map *map, int cpu, void *data), void *data) { - struct perf_cpu_map *c; int nr = cpus->nr; + struct perf_cpu_map *c = perf_cpu_map__empty_new(nr); int cpu, s1, s2; - /* allocate as much as possible */ - c = calloc(1, sizeof(*c) + nr * sizeof(int)); if (!c) return -1; + /* Reset size as it may only be partially filled */ + c->nr = 0; + for (cpu = 0; cpu < nr; cpu++) { s1 = f(cpus, cpu, data); for (s2 = 0; s2 < c->nr; s2++) { @@ -155,7 +156,6 @@ int cpu_map__build_map(struct perf_cpu_map *cpus, struct perf_cpu_map **res, /* ensure we process id in increasing order */ qsort(c->map, c->nr, sizeof(int), cmp_ids); - refcount_set(&c->refcnt, 1); *res = c; return 0; } |