diff options
author | Ard Biesheuvel <ardb@kernel.org> | 2022-01-10 09:54:22 +0100 |
---|---|---|
committer | Ard Biesheuvel <ardb@kernel.org> | 2022-01-25 09:53:52 +0100 |
commit | d31e23aff011d96278f4dbc22f2ec5db433eabaf (patch) | |
tree | 7da6315ac0b3016db684d3be4caac44f45ebd802 /arch/arm/mm | |
parent | ARM: entry: avoid clobbering R9 in IRQ handler (diff) | |
download | linux-d31e23aff011d96278f4dbc22f2ec5db433eabaf.tar.xz linux-d31e23aff011d96278f4dbc22f2ec5db433eabaf.zip |
ARM: mm: make vmalloc_seq handling SMP safe
Rework the vmalloc_seq handling so it can be used safely under SMP, as
we started using it to ensure that vmap'ed stacks are guaranteed to be
mapped by the active mm before switching to a task, and here we need to
ensure that changes to the page tables are visible to other CPUs when
they observe a change in the sequence count.
Since LPAE needs none of this, fold a check against it into the
vmalloc_seq counter check after breaking it out into a separate static
inline helper.
Given that vmap'ed stacks are now also supported on !SMP configurations,
let's drop the WARN() that could potentially now fire spuriously.
Signed-off-by: Ard Biesheuvel <ardb@kernel.org>
Diffstat (limited to 'arch/arm/mm')
-rw-r--r-- | arch/arm/mm/context.c | 3 | ||||
-rw-r--r-- | arch/arm/mm/ioremap.c | 18 |
2 files changed, 12 insertions, 9 deletions
diff --git a/arch/arm/mm/context.c b/arch/arm/mm/context.c index 48091870db89..4204ffa2d104 100644 --- a/arch/arm/mm/context.c +++ b/arch/arm/mm/context.c @@ -240,8 +240,7 @@ void check_and_switch_context(struct mm_struct *mm, struct task_struct *tsk) unsigned int cpu = smp_processor_id(); u64 asid; - if (unlikely(mm->context.vmalloc_seq != init_mm.context.vmalloc_seq)) - __check_vmalloc_seq(mm); + check_vmalloc_seq(mm); /* * We cannot update the pgd and the ASID atomicly with classic diff --git a/arch/arm/mm/ioremap.c b/arch/arm/mm/ioremap.c index 6e830b9418c9..8963c8c63471 100644 --- a/arch/arm/mm/ioremap.c +++ b/arch/arm/mm/ioremap.c @@ -117,16 +117,21 @@ EXPORT_SYMBOL(ioremap_page); void __check_vmalloc_seq(struct mm_struct *mm) { - unsigned int seq; + int seq; do { - seq = init_mm.context.vmalloc_seq; + seq = atomic_read(&init_mm.context.vmalloc_seq); memcpy(pgd_offset(mm, VMALLOC_START), pgd_offset_k(VMALLOC_START), sizeof(pgd_t) * (pgd_index(VMALLOC_END) - pgd_index(VMALLOC_START))); - mm->context.vmalloc_seq = seq; - } while (seq != init_mm.context.vmalloc_seq); + /* + * Use a store-release so that other CPUs that observe the + * counter's new value are guaranteed to see the results of the + * memcpy as well. + */ + atomic_set_release(&mm->context.vmalloc_seq, seq); + } while (seq != atomic_read(&init_mm.context.vmalloc_seq)); } #if !defined(CONFIG_SMP) && !defined(CONFIG_ARM_LPAE) @@ -157,7 +162,7 @@ static void unmap_area_sections(unsigned long virt, unsigned long size) * Note: this is still racy on SMP machines. */ pmd_clear(pmdp); - init_mm.context.vmalloc_seq++; + atomic_inc_return_release(&init_mm.context.vmalloc_seq); /* * Free the page table, if there was one. @@ -174,8 +179,7 @@ static void unmap_area_sections(unsigned long virt, unsigned long size) * Ensure that the active_mm is up to date - we want to * catch any use-after-iounmap cases. */ - if (current->active_mm->context.vmalloc_seq != init_mm.context.vmalloc_seq) - __check_vmalloc_seq(current->active_mm); + check_vmalloc_seq(current->active_mm); flush_tlb_kernel_range(virt, end); } |