diff options
author | Akira Takeuchi <takeuchi.akr@jp.panasonic.com> | 2010-10-27 18:28:51 +0200 |
---|---|---|
committer | David Howells <dhowells@redhat.com> | 2010-10-27 18:28:51 +0200 |
commit | 965ea4bbb9ae926358273368144ba838c561bc38 (patch) | |
tree | c6ee08f2f2970bb500ccc7cf58de6beea870e4f8 /arch/mn10300/mm | |
parent | MN10300: Use the [ID]PTEL2 registers rather than [ID]PTEL for TLB control (diff) | |
download | linux-965ea4bbb9ae926358273368144ba838c561bc38.tar.xz linux-965ea4bbb9ae926358273368144ba838c561bc38.zip |
MN10300: SMP TLB flushing
Implement global TLB flushing for MN10300. This will be used by the AM34 which
is SMP capable.
Signed-off-by: Akira Takeuchi <takeuchi.akr@jp.panasonic.com>
Signed-off-by: Kiyoshi Owada <owada.kiyoshi@jp.panasonic.com>
Signed-off-by: David Howells <dhowells@redhat.com>
Diffstat (limited to 'arch/mn10300/mm')
-rw-r--r-- | arch/mn10300/mm/Makefile | 2 | ||||
-rw-r--r-- | arch/mn10300/mm/tlb-smp.c | 214 |
2 files changed, 216 insertions, 0 deletions
diff --git a/arch/mn10300/mm/Makefile b/arch/mn10300/mm/Makefile index 184745f94c32..203fee23f7d7 100644 --- a/arch/mn10300/mm/Makefile +++ b/arch/mn10300/mm/Makefile @@ -18,3 +18,5 @@ cacheflush-$(CONFIG_MN10300_CACHE_DISABLED) := cache-disabled.o obj-y := \ init.o fault.o pgtable.o extable.o tlb-mn10300.o mmu-context.o \ misalignment.o dma-alloc.o $(cacheflush-y) + +obj-$(CONFIG_SMP) += tlb-smp.o diff --git a/arch/mn10300/mm/tlb-smp.c b/arch/mn10300/mm/tlb-smp.c new file mode 100644 index 000000000000..0b6a5ad1960e --- /dev/null +++ b/arch/mn10300/mm/tlb-smp.c @@ -0,0 +1,214 @@ +/* SMP TLB support routines. + * + * Copyright (C) 2006-2008 Panasonic Corporation + * All Rights Reserved. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ +#include <linux/interrupt.h> +#include <linux/spinlock.h> +#include <linux/init.h> +#include <linux/jiffies.h> +#include <linux/cpumask.h> +#include <linux/err.h> +#include <linux/kernel.h> +#include <linux/delay.h> +#include <linux/sched.h> +#include <linux/profile.h> +#include <linux/smp.h> +#include <asm/tlbflush.h> +#include <asm/system.h> +#include <asm/bitops.h> +#include <asm/processor.h> +#include <asm/bug.h> +#include <asm/exceptions.h> +#include <asm/hardirq.h> +#include <asm/fpu.h> +#include <asm/mmu_context.h> +#include <asm/thread_info.h> +#include <asm/cpu-regs.h> +#include <asm/intctl-regs.h> + +/* + * For flush TLB + */ +#define FLUSH_ALL 0xffffffff + +static cpumask_t flush_cpumask; +static struct mm_struct *flush_mm; +static unsigned long flush_va; +static DEFINE_SPINLOCK(tlbstate_lock); + +DEFINE_PER_CPU_SHARED_ALIGNED(struct tlb_state, cpu_tlbstate) = { + &init_mm, 0 +}; + +static void flush_tlb_others(cpumask_t cpumask, struct mm_struct *mm, + unsigned long va); +static void do_flush_tlb_all(void *info); + +/** + * smp_flush_tlb - Callback to invalidate the TLB. + * @unused: Callback context (ignored). + */ +void smp_flush_tlb(void *unused) +{ + unsigned long cpu_id; + + cpu_id = get_cpu(); + + if (!cpu_isset(cpu_id, flush_cpumask)) + /* This was a BUG() but until someone can quote me the line + * from the intel manual that guarantees an IPI to multiple + * CPUs is retried _only_ on the erroring CPUs its staying as a + * return + * + * BUG(); + */ + goto out; + + if (flush_va == FLUSH_ALL) + local_flush_tlb(); + else + local_flush_tlb_page(flush_mm, flush_va); + + smp_mb__before_clear_bit(); + cpu_clear(cpu_id, flush_cpumask); + smp_mb__after_clear_bit(); +out: + put_cpu(); +} + +/** + * flush_tlb_others - Tell the specified CPUs to invalidate their TLBs + * @cpumask: The list of CPUs to target. + * @mm: The VM context to flush from (if va!=FLUSH_ALL). + * @va: Virtual address to flush or FLUSH_ALL to flush everything. + */ +static void flush_tlb_others(cpumask_t cpumask, struct mm_struct *mm, + unsigned long va) +{ + cpumask_t tmp; + + /* A couple of sanity checks (to be removed): + * - mask must not be empty + * - current CPU must not be in mask + * - we do not send IPIs to as-yet unbooted CPUs. + */ + BUG_ON(!mm); + BUG_ON(cpus_empty(cpumask)); + BUG_ON(cpu_isset(smp_processor_id(), cpumask)); + + cpus_and(tmp, cpumask, cpu_online_map); + BUG_ON(!cpus_equal(cpumask, tmp)); + + /* I'm not happy about this global shared spinlock in the MM hot path, + * but we'll see how contended it is. + * + * Temporarily this turns IRQs off, so that lockups are detected by the + * NMI watchdog. + */ + spin_lock(&tlbstate_lock); + + flush_mm = mm; + flush_va = va; +#if NR_CPUS <= BITS_PER_LONG + atomic_set_mask(cpumask.bits[0], &flush_cpumask.bits[0]); +#else +#error Not supported. +#endif + + /* FIXME: if NR_CPUS>=3, change send_IPI_mask */ + smp_call_function(smp_flush_tlb, NULL, 1); + + while (!cpus_empty(flush_cpumask)) + /* Lockup detection does not belong here */ + smp_mb(); + + flush_mm = NULL; + flush_va = 0; + spin_unlock(&tlbstate_lock); +} + +/** + * flush_tlb_mm - Invalidate TLB of specified VM context + * @mm: The VM context to invalidate. + */ +void flush_tlb_mm(struct mm_struct *mm) +{ + cpumask_t cpu_mask; + + preempt_disable(); + cpu_mask = mm->cpu_vm_mask; + cpu_clear(smp_processor_id(), cpu_mask); + + local_flush_tlb(); + if (!cpus_empty(cpu_mask)) + flush_tlb_others(cpu_mask, mm, FLUSH_ALL); + + preempt_enable(); +} + +/** + * flush_tlb_current_task - Invalidate TLB of current task + */ +void flush_tlb_current_task(void) +{ + struct mm_struct *mm = current->mm; + cpumask_t cpu_mask; + + preempt_disable(); + cpu_mask = mm->cpu_vm_mask; + cpu_clear(smp_processor_id(), cpu_mask); + + local_flush_tlb(); + if (!cpus_empty(cpu_mask)) + flush_tlb_others(cpu_mask, mm, FLUSH_ALL); + + preempt_enable(); +} + +/** + * flush_tlb_page - Invalidate TLB of page + * @vma: The VM context to invalidate the page for. + * @va: The virtual address of the page to invalidate. + */ +void flush_tlb_page(struct vm_area_struct *vma, unsigned long va) +{ + struct mm_struct *mm = vma->vm_mm; + cpumask_t cpu_mask; + + preempt_disable(); + cpu_mask = mm->cpu_vm_mask; + cpu_clear(smp_processor_id(), cpu_mask); + + local_flush_tlb_page(mm, va); + if (!cpus_empty(cpu_mask)) + flush_tlb_others(cpu_mask, mm, va); + + preempt_enable(); +} + +/** + * do_flush_tlb_all - Callback to completely invalidate a TLB + * @unused: Callback context (ignored). + */ +static void do_flush_tlb_all(void *unused) +{ + local_flush_tlb_all(); +} + +/** + * flush_tlb_all - Completely invalidate TLBs on all CPUs + */ +void flush_tlb_all(void) +{ + on_each_cpu(do_flush_tlb_all, 0, 1); +} |