diff options
author | Paul Burton <paul.burton@imgtec.com> | 2015-09-22 20:12:16 +0200 |
---|---|---|
committer | Ralf Baechle <ralf@linux-mips.org> | 2015-11-11 08:35:14 +0100 |
commit | 23d5de8efb9aed48074a72bf3d43841e1556ca42 (patch) | |
tree | f7f9b6fa12c31bd197fc2151c06ccd7cc39c8a6a /arch/mips/kernel | |
parent | MIPS: CM: Fix GCR_Cx_CONFIG PVPE mask (diff) | |
download | linux-23d5de8efb9aed48074a72bf3d43841e1556ca42.tar.xz linux-23d5de8efb9aed48074a72bf3d43841e1556ca42.zip |
MIPS: CM: Introduce core-other locking functions
Introduce mips_cm_lock_other & mips_cm_unlock_other, mirroring the
existing CPC equivalents, in order to lock access from the current core
to another via the core-other GCR region. This hasn't been required in
the past but with CM3 the CPC starts using GCR_CL_OTHER rather than
CPC_CL_OTHER and this will be required for safety.
[ralf@linux-mips.org: Fix merge conflict.]
Signed-off-by: Paul Burton <paul.burton@imgtec.com>
Cc: linux-mips@linux-mips.org
Cc: linux-kernel@vger.kernel.org
Cc: James Hogan <james.hogan@imgtec.com>
Cc: Markos Chandras <markos.chandras@imgtec.com>
Patchwork: https://patchwork.linux-mips.org/patch/11207/
Signed-off-by: Ralf Baechle <ralf@linux-mips.org>
Diffstat (limited to 'arch/mips/kernel')
-rw-r--r-- | arch/mips/kernel/mips-cm.c | 39 |
1 files changed, 39 insertions, 0 deletions
diff --git a/arch/mips/kernel/mips-cm.c b/arch/mips/kernel/mips-cm.c index 02f7d7a41133..01908dbdf677 100644 --- a/arch/mips/kernel/mips-cm.c +++ b/arch/mips/kernel/mips-cm.c @@ -9,6 +9,8 @@ */ #include <linux/errno.h> +#include <linux/percpu.h> +#include <linux/spinlock.h> #include <asm/mips-cm.h> #include <asm/mipsregs.h> @@ -136,6 +138,9 @@ static char *cm3_causes[32] = { "0x19", "0x1a", "0x1b", "0x1c", "0x1d", "0x1e", "0x1f" }; +static DEFINE_PER_CPU_ALIGNED(spinlock_t, cm_core_lock); +static DEFINE_PER_CPU_ALIGNED(unsigned long, cm_core_lock_flags); + phys_addr_t __mips_cm_phys_base(void) { u32 config3 = read_c0_config3(); @@ -200,6 +205,7 @@ int mips_cm_probe(void) { phys_addr_t addr; u32 base_reg; + unsigned cpu; /* * No need to probe again if we have already been @@ -247,9 +253,42 @@ int mips_cm_probe(void) /* determine register width for this CM */ mips_cm_is64 = config_enabled(CONFIG_64BIT) && (mips_cm_revision() >= CM_REV_CM3); + for_each_possible_cpu(cpu) + spin_lock_init(&per_cpu(cm_core_lock, cpu)); + return 0; } +void mips_cm_lock_other(unsigned int core, unsigned int vp) +{ + unsigned curr_core; + u32 val; + + preempt_disable(); + curr_core = current_cpu_data.core; + spin_lock_irqsave(&per_cpu(cm_core_lock, curr_core), + per_cpu(cm_core_lock_flags, curr_core)); + + if (mips_cm_revision() >= CM_REV_CM3) { + val = core << CM3_GCR_Cx_OTHER_CORE_SHF; + val |= vp << CM3_GCR_Cx_OTHER_VP_SHF; + } else { + BUG_ON(vp != 0); + val = core << CM_GCR_Cx_OTHER_CORENUM_SHF; + } + + write_gcr_cl_other(val); +} + +void mips_cm_unlock_other(void) +{ + unsigned curr_core = current_cpu_data.core; + + spin_unlock_irqrestore(&per_cpu(cm_core_lock, curr_core), + per_cpu(cm_core_lock_flags, curr_core)); + preempt_enable(); +} + void mips_cm_error_report(void) { u64 cm_error, cm_addr, cm_other; |