diff options
author | Peter Zijlstra <peterz@infradead.org> | 2018-01-10 19:23:08 +0100 |
---|---|---|
committer | Ingo Molnar <mingo@kernel.org> | 2018-01-25 14:48:30 +0100 |
commit | efe951d3de9141626a494bcb1efb0650eaef6491 (patch) | |
tree | 700b1669ac70b2fb6ddc854ac931e2cc6c8b0105 /arch | |
parent | perf/core: Fix ctx::mutex deadlock (diff) | |
download | linux-efe951d3de9141626a494bcb1efb0650eaef6491.tar.xz linux-efe951d3de9141626a494bcb1efb0650eaef6491.zip |
perf/x86: Fix perf,x86,cpuhp deadlock
More lockdep gifts, a 5-way lockup race:
perf_event_create_kernel_counter()
perf_event_alloc()
perf_try_init_event()
x86_pmu_event_init()
__x86_pmu_event_init()
x86_reserve_hardware()
#0 mutex_lock(&pmc_reserve_mutex);
reserve_ds_buffer()
#1 get_online_cpus()
perf_event_release_kernel()
_free_event()
hw_perf_event_destroy()
x86_release_hardware()
#0 mutex_lock(&pmc_reserve_mutex)
release_ds_buffer()
#1 get_online_cpus()
#1 do_cpu_up()
perf_event_init_cpu()
#2 mutex_lock(&pmus_lock)
#3 mutex_lock(&ctx->mutex)
sys_perf_event_open()
mutex_lock_double()
#3 mutex_lock(ctx->mutex)
#4 mutex_lock_nested(ctx->mutex, 1);
perf_try_init_event()
#4 mutex_lock_nested(ctx->mutex, 1)
x86_pmu_event_init()
intel_pmu_hw_config()
x86_add_exclusive()
#0 mutex_lock(&pmc_reserve_mutex)
Fix it by using ordering constructs instead of locking.
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Cc: Alexander Shishkin <alexander.shishkin@linux.intel.com>
Cc: Arnaldo Carvalho de Melo <acme@redhat.com>
Cc: Jiri Olsa <jolsa@redhat.com>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Stephane Eranian <eranian@google.com>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: Vince Weaver <vincent.weaver@maine.edu>
Cc: linux-kernel@vger.kernel.org
Signed-off-by: Ingo Molnar <mingo@kernel.org>
Diffstat (limited to 'arch')
-rw-r--r-- | arch/x86/events/intel/ds.c | 33 |
1 files changed, 18 insertions, 15 deletions
diff --git a/arch/x86/events/intel/ds.c b/arch/x86/events/intel/ds.c index 8156e47da7ba..18c25ab28557 100644 --- a/arch/x86/events/intel/ds.c +++ b/arch/x86/events/intel/ds.c @@ -372,10 +372,9 @@ static int alloc_pebs_buffer(int cpu) static void release_pebs_buffer(int cpu) { struct cpu_hw_events *hwev = per_cpu_ptr(&cpu_hw_events, cpu); - struct debug_store *ds = hwev->ds; void *cea; - if (!ds || !x86_pmu.pebs) + if (!x86_pmu.pebs) return; kfree(per_cpu(insn_buffer, cpu)); @@ -384,7 +383,6 @@ static void release_pebs_buffer(int cpu) /* Clear the fixmap */ cea = &get_cpu_entry_area(cpu)->cpu_debug_buffers.pebs_buffer; ds_clear_cea(cea, x86_pmu.pebs_buffer_size); - ds->pebs_buffer_base = 0; dsfree_pages(hwev->ds_pebs_vaddr, x86_pmu.pebs_buffer_size); hwev->ds_pebs_vaddr = NULL; } @@ -419,16 +417,14 @@ static int alloc_bts_buffer(int cpu) static void release_bts_buffer(int cpu) { struct cpu_hw_events *hwev = per_cpu_ptr(&cpu_hw_events, cpu); - struct debug_store *ds = hwev->ds; void *cea; - if (!ds || !x86_pmu.bts) + if (!x86_pmu.bts) return; /* Clear the fixmap */ cea = &get_cpu_entry_area(cpu)->cpu_debug_buffers.bts_buffer; ds_clear_cea(cea, BTS_BUFFER_SIZE); - ds->bts_buffer_base = 0; dsfree_pages(hwev->ds_bts_vaddr, BTS_BUFFER_SIZE); hwev->ds_bts_vaddr = NULL; } @@ -454,16 +450,22 @@ void release_ds_buffers(void) if (!x86_pmu.bts && !x86_pmu.pebs) return; - get_online_cpus(); - for_each_online_cpu(cpu) + for_each_possible_cpu(cpu) + release_ds_buffer(cpu); + + for_each_possible_cpu(cpu) { + /* + * Again, ignore errors from offline CPUs, they will no longer + * observe cpu_hw_events.ds and not program the DS_AREA when + * they come up. + */ fini_debug_store_on_cpu(cpu); + } for_each_possible_cpu(cpu) { release_pebs_buffer(cpu); release_bts_buffer(cpu); - release_ds_buffer(cpu); } - put_online_cpus(); } void reserve_ds_buffers(void) @@ -483,8 +485,6 @@ void reserve_ds_buffers(void) if (!x86_pmu.pebs) pebs_err = 1; - get_online_cpus(); - for_each_possible_cpu(cpu) { if (alloc_ds_buffer(cpu)) { bts_err = 1; @@ -521,11 +521,14 @@ void reserve_ds_buffers(void) if (x86_pmu.pebs && !pebs_err) x86_pmu.pebs_active = 1; - for_each_online_cpu(cpu) + for_each_possible_cpu(cpu) { + /* + * Ignores wrmsr_on_cpu() errors for offline CPUs they + * will get this call through intel_pmu_cpu_starting(). + */ init_debug_store_on_cpu(cpu); + } } - - put_online_cpus(); } /* |