summaryrefslogtreecommitdiffstats
path: root/arch
diff options
context:
space:
mode:
authorJames Hogan <james.hogan@imgtec.com>2016-07-13 15:12:51 +0200
committerRalf Baechle <ralf@linux-mips.org>2016-07-29 10:19:29 +0200
commit6d758bfc7b05b11a4a853c3052cb815f40b82afe (patch)
treed532a1f2a7dbcec7c35695304dfce135e3298d12 /arch
parentMIPS: c-r4k: Add r4k_on_each_cpu cache op type arg (diff)
downloadlinux-6d758bfc7b05b11a4a853c3052cb815f40b82afe.tar.xz
linux-6d758bfc7b05b11a4a853c3052cb815f40b82afe.zip
MIPS: c-r4k: Fix valid ASID optimisation
Several cache operations are optimised to return early from the SMP call handler if the memory map in question has no valid ASID on the current CPU, or any online CPU in the case of MIPS_MT_SMP. The idea is that if a memory map has never been used on a CPU it shouldn't have cache lines in need of flushing. However this doesn't cover all cases when ASIDs for other CPUs need to be checked: - Offline VPEs may have recently been online and brought lines into the (shared) cache, so they should also be checked, rather than only online CPUs. - SMP systems with a Coherence Manager (CM), but with MT disabled still have globalized hit cache ops, but don't use SMP calls, so all present CPUs should be taken into account. - R6 systems have a different multithreading implementation, so MIPS_MT_SMP won't be set, but as above may still have a CM which globalizes hit cache ops. Additionally for non-globalized cache operations where an SMP call to a single VPE in each foreign core is used, it is not necessary to check every CPU in the system, only sibling CPUs sharing the same first level cache. Fix this by making has_valid_asid() take a cache op type argument like r4k_on_each_cpu(), so it can determine whether r4k_on_each_cpu() will have done SMP calls to other cores. It can then determine which set of CPUs to check the ASIDs of based on that, excluding foreign CPUs if an SMP call will have been performed. Signed-off-by: James Hogan <james.hogan@imgtec.com> Cc: Paul Burton <paul.burton@imgtec.com> Cc: Leonid Yegoshin <leonid.yegoshin@imgtec.com> Cc: linux-mips@linux-mips.org Patchwork: https://patchwork.linux-mips.org/patch/13804/ Signed-off-by: Ralf Baechle <ralf@linux-mips.org>
Diffstat (limited to 'arch')
-rw-r--r--arch/mips/mm/c-r4k.c48
1 files changed, 35 insertions, 13 deletions
diff --git a/arch/mips/mm/c-r4k.c b/arch/mips/mm/c-r4k.c
index 412052321472..2a4bb5057ebc 100644
--- a/arch/mips/mm/c-r4k.c
+++ b/arch/mips/mm/c-r4k.c
@@ -488,19 +488,41 @@ static void r4k___flush_cache_all(void)
r4k_on_each_cpu(R4K_INDEX, local_r4k___flush_cache_all, NULL);
}
-static inline int has_valid_asid(const struct mm_struct *mm)
+/**
+ * has_valid_asid() - Determine if an mm already has an ASID.
+ * @mm: Memory map.
+ * @type: R4K_HIT or R4K_INDEX, type of cache op.
+ *
+ * Determines whether @mm already has an ASID on any of the CPUs which cache ops
+ * of type @type within an r4k_on_each_cpu() call will affect. If
+ * r4k_on_each_cpu() does an SMP call to a single VPE in each core, then the
+ * scope of the operation is confined to sibling CPUs, otherwise all online CPUs
+ * will need to be checked.
+ *
+ * Must be called in non-preemptive context.
+ *
+ * Returns: 1 if the CPUs affected by @type cache ops have an ASID for @mm.
+ * 0 otherwise.
+ */
+static inline int has_valid_asid(const struct mm_struct *mm, unsigned int type)
{
-#ifdef CONFIG_MIPS_MT_SMP
- int i;
+ unsigned int i;
+ const cpumask_t *mask = cpu_present_mask;
- for_each_online_cpu(i)
+ /* cpu_sibling_map[] undeclared when !CONFIG_SMP */
+#ifdef CONFIG_SMP
+ /*
+ * If r4k_on_each_cpu does SMP calls, it does them to a single VPE in
+ * each foreign core, so we only need to worry about siblings.
+ * Otherwise we need to worry about all present CPUs.
+ */
+ if (r4k_op_needs_ipi(type))
+ mask = &cpu_sibling_map[smp_processor_id()];
+#endif
+ for_each_cpu(i, mask)
if (cpu_context(i, mm))
return 1;
-
return 0;
-#else
- return cpu_context(smp_processor_id(), mm);
-#endif
}
static void r4k__flush_cache_vmap(void)
@@ -522,7 +544,7 @@ static inline void local_r4k_flush_cache_range(void * args)
struct vm_area_struct *vma = args;
int exec = vma->vm_flags & VM_EXEC;
- if (!(has_valid_asid(vma->vm_mm)))
+ if (!has_valid_asid(vma->vm_mm, R4K_INDEX))
return;
/*
@@ -550,7 +572,7 @@ static inline void local_r4k_flush_cache_mm(void * args)
{
struct mm_struct *mm = args;
- if (!has_valid_asid(mm))
+ if (!has_valid_asid(mm, R4K_INDEX))
return;
/*
@@ -600,10 +622,10 @@ static inline void local_r4k_flush_cache_page(void *args)
void *vaddr;
/*
- * If ownes no valid ASID yet, cannot possibly have gotten
+ * If owns no valid ASID yet, cannot possibly have gotten
* this page into the cache.
*/
- if (!has_valid_asid(mm))
+ if (!has_valid_asid(mm, R4K_HIT))
return;
addr &= PAGE_MASK;
@@ -851,7 +873,7 @@ static void local_r4k_flush_cache_sigtramp(void *args)
* If owns no valid ASID yet, cannot possibly have gotten
* this page into the cache.
*/
- if (!has_valid_asid(mm))
+ if (!has_valid_asid(mm, R4K_HIT))
return;
if (mm == current->active_mm) {