diff options
author | Martin Schwidefsky <schwidefsky@de.ibm.com> | 2016-05-25 09:45:26 +0200 |
---|---|---|
committer | Martin Schwidefsky <schwidefsky@de.ibm.com> | 2016-06-13 15:58:22 +0200 |
commit | 64f31d5802af11fd87872b4bae07b35cf0acb358 (patch) | |
tree | 73f514c73762092fd6710efa6863a5a6124d86a3 /arch/s390/include/asm/mmu_context.h | |
parent | bitmap: bitmap_equal memcmp optimization (diff) | |
download | linux-64f31d5802af11fd87872b4bae07b35cf0acb358.tar.xz linux-64f31d5802af11fd87872b4bae07b35cf0acb358.zip |
s390/mm: simplify the TLB flushing code
ptep_flush_lazy and pmdp_flush_lazy use mm->context.attach_count to
decide between a lazy TLB flush vs an immediate TLB flush. The field
contains two 16-bit counters, the number of CPUs that have the mm
attached and can create TLB entries for it and the number of CPUs in
the middle of a page table update.
The __tlb_flush_asce, ptep_flush_direct and pmdp_flush_direct functions
use the attach counter and a mask check with mm_cpumask(mm) to decide
between a local flush local of the current CPU and a global flush.
For all these functions the decision between lazy vs immediate and
local vs global TLB flush can be based on CPU masks. There are two
masks: the mm->context.cpu_attach_mask with the CPUs that are actively
using the mm, and the mm_cpumask(mm) with the CPUs that have used the
mm since the last full flush. The decision between lazy vs immediate
flush is based on the mm->context.cpu_attach_mask, to decide between
local vs global flush the mm_cpumask(mm) is used.
With this patch all checks will use the CPU masks, the old counter
mm->context.attach_count with its two 16-bit values is turned into a
single counter mm->context.flush_count that keeps track of the number
of CPUs with incomplete page table updates. The sole user of this
counter is finish_arch_post_lock_switch() which waits for the end of
all page table updates.
Signed-off-by: Martin Schwidefsky <schwidefsky@de.ibm.com>
Diffstat (limited to 'arch/s390/include/asm/mmu_context.h')
-rw-r--r-- | arch/s390/include/asm/mmu_context.h | 15 |
1 files changed, 5 insertions, 10 deletions
diff --git a/arch/s390/include/asm/mmu_context.h b/arch/s390/include/asm/mmu_context.h index c837b79b455d..f77c638bf397 100644 --- a/arch/s390/include/asm/mmu_context.h +++ b/arch/s390/include/asm/mmu_context.h @@ -19,7 +19,7 @@ static inline int init_new_context(struct task_struct *tsk, INIT_LIST_HEAD(&mm->context.pgtable_list); INIT_LIST_HEAD(&mm->context.gmap_list); cpumask_clear(&mm->context.cpu_attach_mask); - atomic_set(&mm->context.attach_count, 0); + atomic_set(&mm->context.flush_count, 0); mm->context.flush_mm = 0; #ifdef CONFIG_PGSTE mm->context.alloc_pgste = page_table_allocate_pgste; @@ -90,15 +90,12 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next, S390_lowcore.user_asce = next->context.asce; if (prev == next) return; - if (MACHINE_HAS_TLB_LC) - cpumask_set_cpu(cpu, &next->context.cpu_attach_mask); + cpumask_set_cpu(cpu, &next->context.cpu_attach_mask); + cpumask_set_cpu(cpu, mm_cpumask(next)); /* Clear old ASCE by loading the kernel ASCE. */ __ctl_load(S390_lowcore.kernel_asce, 1, 1); __ctl_load(S390_lowcore.kernel_asce, 7, 7); - atomic_inc(&next->context.attach_count); - atomic_dec(&prev->context.attach_count); - if (MACHINE_HAS_TLB_LC) - cpumask_clear_cpu(cpu, &prev->context.cpu_attach_mask); + cpumask_clear_cpu(cpu, &prev->context.cpu_attach_mask); } #define finish_arch_post_lock_switch finish_arch_post_lock_switch @@ -110,10 +107,9 @@ static inline void finish_arch_post_lock_switch(void) load_kernel_asce(); if (mm) { preempt_disable(); - while (atomic_read(&mm->context.attach_count) >> 16) + while (atomic_read(&mm->context.flush_count)) cpu_relax(); - cpumask_set_cpu(smp_processor_id(), mm_cpumask(mm)); if (mm->context.flush_mm) __tlb_flush_mm(mm); preempt_enable(); @@ -128,7 +124,6 @@ static inline void activate_mm(struct mm_struct *prev, struct mm_struct *next) { switch_mm(prev, next, current); - cpumask_set_cpu(smp_processor_id(), mm_cpumask(next)); set_user_asce(next); } |