summaryrefslogtreecommitdiffstats
path: root/arch/tile/kernel
diff options
context:
space:
mode:
authorChris Metcalf <cmetcalf@tilera.com>2013-08-07 17:36:54 +0200
committerChris Metcalf <cmetcalf@tilera.com>2013-08-13 22:26:01 +0200
commitbc1a298f4e04833db4c430df59b90039f0170515 (patch)
tree802da739309efeab62317f62ec4f1989f3f7d8dd /arch/tile/kernel
parenttile: remove calls to arch_flush_lazy_mmu_mode() (diff)
downloadlinux-bc1a298f4e04833db4c430df59b90039f0170515.tar.xz
linux-bc1a298f4e04833db4c430df59b90039f0170515.zip
tile: support CONFIG_PREEMPT
This change adds support for CONFIG_PREEMPT (full kernel preemption). In addition to the core support, this change includes a number of places where we fix up uses of smp_processor_id() and per-cpu variables. I also eliminate the PAGE_HOME_HERE and PAGE_HOME_UNKNOWN values for page homing, as it turns out they weren't being used. Signed-off-by: Chris Metcalf <cmetcalf@tilera.com>
Diffstat (limited to 'arch/tile/kernel')
-rw-r--r--arch/tile/kernel/asm-offsets.c2
-rw-r--r--arch/tile/kernel/hardwall.c18
-rw-r--r--arch/tile/kernel/intvec_32.S27
-rw-r--r--arch/tile/kernel/intvec_64.S30
-rw-r--r--arch/tile/kernel/irq.c1
-rw-r--r--arch/tile/kernel/smp.c2
-rw-r--r--arch/tile/kernel/smpboot.c8
-rw-r--r--arch/tile/kernel/stack.c4
-rw-r--r--arch/tile/kernel/sys.c4
9 files changed, 67 insertions, 29 deletions
diff --git a/arch/tile/kernel/asm-offsets.c b/arch/tile/kernel/asm-offsets.c
index 8652b0be4685..97ea6ac0a47b 100644
--- a/arch/tile/kernel/asm-offsets.c
+++ b/arch/tile/kernel/asm-offsets.c
@@ -58,6 +58,8 @@ void foo(void)
offsetof(struct thread_info, status));
DEFINE(THREAD_INFO_HOMECACHE_CPU_OFFSET,
offsetof(struct thread_info, homecache_cpu));
+ DEFINE(THREAD_INFO_PREEMPT_COUNT_OFFSET,
+ offsetof(struct thread_info, preempt_count));
DEFINE(THREAD_INFO_STEP_STATE_OFFSET,
offsetof(struct thread_info, step_state));
#ifdef __tilegx__
diff --git a/arch/tile/kernel/hardwall.c b/arch/tile/kernel/hardwall.c
index 7db8893d4fc5..df27a1fd94a3 100644
--- a/arch/tile/kernel/hardwall.c
+++ b/arch/tile/kernel/hardwall.c
@@ -272,9 +272,9 @@ static void hardwall_setup_func(void *info)
struct hardwall_info *r = info;
struct hardwall_type *hwt = r->type;
- int cpu = smp_processor_id();
- int x = cpu % smp_width;
- int y = cpu / smp_width;
+ int cpu = smp_processor_id(); /* on_each_cpu disables preemption */
+ int x = cpu_x(cpu);
+ int y = cpu_y(cpu);
int bits = 0;
if (x == r->ulhc_x)
bits |= W_PROTECT;
@@ -317,6 +317,7 @@ static void hardwall_protect_rectangle(struct hardwall_info *r)
on_each_cpu_mask(&rect_cpus, hardwall_setup_func, r, 1);
}
+/* Entered from INT_xDN_FIREWALL interrupt vector with irqs disabled. */
void __kprobes do_hardwall_trap(struct pt_regs* regs, int fault_num)
{
struct hardwall_info *rect;
@@ -325,7 +326,6 @@ void __kprobes do_hardwall_trap(struct pt_regs* regs, int fault_num)
struct siginfo info;
int cpu = smp_processor_id();
int found_processes;
- unsigned long flags;
struct pt_regs *old_regs = set_irq_regs(regs);
irq_enter();
@@ -346,7 +346,7 @@ void __kprobes do_hardwall_trap(struct pt_regs* regs, int fault_num)
BUG_ON(hwt->disabled);
/* This tile trapped a network access; find the rectangle. */
- spin_lock_irqsave(&hwt->lock, flags);
+ spin_lock(&hwt->lock);
list_for_each_entry(rect, &hwt->list, list) {
if (cpumask_test_cpu(cpu, &rect->cpumask))
break;
@@ -401,7 +401,7 @@ void __kprobes do_hardwall_trap(struct pt_regs* regs, int fault_num)
pr_notice("hardwall: no associated processes!\n");
done:
- spin_unlock_irqrestore(&hwt->lock, flags);
+ spin_unlock(&hwt->lock);
/*
* We have to disable firewall interrupts now, or else when we
@@ -661,7 +661,7 @@ static int hardwall_deactivate(struct hardwall_type *hwt,
return -EINVAL;
printk(KERN_DEBUG "Pid %d (%s) deactivated for %s hardwall: cpu %d\n",
- task->pid, task->comm, hwt->name, smp_processor_id());
+ task->pid, task->comm, hwt->name, raw_smp_processor_id());
return 0;
}
@@ -803,8 +803,8 @@ static void reset_xdn_network_state(struct hardwall_type *hwt)
/* Reset UDN coordinates to their standard value */
{
unsigned int cpu = smp_processor_id();
- unsigned int x = cpu % smp_width;
- unsigned int y = cpu / smp_width;
+ unsigned int x = cpu_x(cpu);
+ unsigned int y = cpu_y(cpu);
__insn_mtspr(SPR_UDN_TILE_COORD, (x << 18) | (y << 7));
}
diff --git a/arch/tile/kernel/intvec_32.S b/arch/tile/kernel/intvec_32.S
index 388061319c4c..10767655689e 100644
--- a/arch/tile/kernel/intvec_32.S
+++ b/arch/tile/kernel/intvec_32.S
@@ -28,10 +28,6 @@
#include <arch/interrupts.h>
#include <arch/spr_def.h>
-#ifdef CONFIG_PREEMPT
-# error "No support for kernel preemption currently"
-#endif
-
#define PTREGS_PTR(reg, ptreg) addli reg, sp, C_ABI_SAVE_AREA_SIZE + (ptreg)
#define PTREGS_OFFSET_SYSCALL PTREGS_OFFSET_REG(TREG_SYSCALL_NR)
@@ -812,17 +808,34 @@ STD_ENTRY(interrupt_return)
}
lw r29, r29
andi r29, r29, SPR_EX_CONTEXT_1_1__PL_MASK /* mask off ICS */
+ bzt r29, .Lresume_userspace
+
+#ifdef CONFIG_PREEMPT
+ /* Returning to kernel space. Check if we need preemption. */
+ GET_THREAD_INFO(r29)
+ addli r28, r29, THREAD_INFO_FLAGS_OFFSET
{
- bzt r29, .Lresume_userspace
- PTREGS_PTR(r29, PTREGS_OFFSET_PC)
+ lw r28, r28
+ addli r29, r29, THREAD_INFO_PREEMPT_COUNT_OFFSET
+ }
+ {
+ andi r28, r28, _TIF_NEED_RESCHED
+ lw r29, r29
}
+ bzt r28, 1f
+ bnz r29, 1f
+ jal preempt_schedule_irq
+ FEEDBACK_REENTER(interrupt_return)
+1:
+#endif
/* If we're resuming to _cpu_idle_nap, bump PC forward by 8. */
{
- lw r28, r29
+ PTREGS_PTR(r29, PTREGS_OFFSET_PC)
moveli r27, lo16(_cpu_idle_nap)
}
{
+ lw r28, r29
auli r27, r27, ha16(_cpu_idle_nap)
}
{
diff --git a/arch/tile/kernel/intvec_64.S b/arch/tile/kernel/intvec_64.S
index 884af9ea5bed..38a60f27707c 100644
--- a/arch/tile/kernel/intvec_64.S
+++ b/arch/tile/kernel/intvec_64.S
@@ -30,10 +30,6 @@
#include <arch/interrupts.h>
#include <arch/spr_def.h>
-#ifdef CONFIG_PREEMPT
-# error "No support for kernel preemption currently"
-#endif
-
#define PTREGS_PTR(reg, ptreg) addli reg, sp, C_ABI_SAVE_AREA_SIZE + (ptreg)
#define PTREGS_OFFSET_SYSCALL PTREGS_OFFSET_REG(TREG_SYSCALL_NR)
@@ -820,11 +816,33 @@ STD_ENTRY(interrupt_return)
andi r29, r29, SPR_EX_CONTEXT_1_1__PL_MASK /* mask off ICS */
{
beqzt r29, .Lresume_userspace
- PTREGS_PTR(r29, PTREGS_OFFSET_PC)
+ move r29, sp
+ }
+
+#ifdef CONFIG_PREEMPT
+ /* Returning to kernel space. Check if we need preemption. */
+ EXTRACT_THREAD_INFO(r29)
+ addli r28, r29, THREAD_INFO_FLAGS_OFFSET
+ {
+ ld r28, r28
+ addli r29, r29, THREAD_INFO_PREEMPT_COUNT_OFFSET
}
+ {
+ andi r28, r28, _TIF_NEED_RESCHED
+ ld4s r29, r29
+ }
+ beqzt r28, 1f
+ bnez r29, 1f
+ jal preempt_schedule_irq
+ FEEDBACK_REENTER(interrupt_return)
+1:
+#endif
/* If we're resuming to _cpu_idle_nap, bump PC forward by 8. */
- moveli r27, hw2_last(_cpu_idle_nap)
+ {
+ moveli r27, hw2_last(_cpu_idle_nap)
+ PTREGS_PTR(r29, PTREGS_OFFSET_PC)
+ }
{
ld r28, r29
shl16insli r27, r27, hw1(_cpu_idle_nap)
diff --git a/arch/tile/kernel/irq.c b/arch/tile/kernel/irq.c
index 02e628065012..c90de6c3cb7f 100644
--- a/arch/tile/kernel/irq.c
+++ b/arch/tile/kernel/irq.c
@@ -74,6 +74,7 @@ static DEFINE_SPINLOCK(available_irqs_lock);
/*
* The interrupt handling path, implemented in terms of HV interrupt
* emulation on TILE64 and TILEPro, and IPI hardware on TILE-Gx.
+ * Entered with interrupts disabled.
*/
void tile_dev_intr(struct pt_regs *regs, int intnum)
{
diff --git a/arch/tile/kernel/smp.c b/arch/tile/kernel/smp.c
index cbc73a8b8fe1..6cc520d71d2b 100644
--- a/arch/tile/kernel/smp.c
+++ b/arch/tile/kernel/smp.c
@@ -100,8 +100,8 @@ static void smp_start_cpu_interrupt(void)
/* Handler to stop the current cpu. */
static void smp_stop_cpu_interrupt(void)
{
- set_cpu_online(smp_processor_id(), 0);
arch_local_irq_disable_all();
+ set_cpu_online(smp_processor_id(), 0);
for (;;)
asm("nap; nop");
}
diff --git a/arch/tile/kernel/smpboot.c b/arch/tile/kernel/smpboot.c
index 44bab29bf2f3..dee7f13c5854 100644
--- a/arch/tile/kernel/smpboot.c
+++ b/arch/tile/kernel/smpboot.c
@@ -142,13 +142,15 @@ static struct cpumask cpu_started __cpuinitdata;
*/
static void __cpuinit start_secondary(void)
{
- int cpuid = smp_processor_id();
+ int cpuid;
+
+ preempt_disable();
+
+ cpuid = smp_processor_id();
/* Set our thread pointer appropriately. */
set_my_cpu_offset(__per_cpu_offset[cpuid]);
- preempt_disable();
-
/*
* In large machines even this will slow us down, since we
* will be contending for for the printk spinlock.
diff --git a/arch/tile/kernel/stack.c b/arch/tile/kernel/stack.c
index c972689231ef..176ffe48eee9 100644
--- a/arch/tile/kernel/stack.c
+++ b/arch/tile/kernel/stack.c
@@ -194,7 +194,7 @@ static int KBacktraceIterator_next_item_inclusive(
*/
static void validate_stack(struct pt_regs *regs)
{
- int cpu = smp_processor_id();
+ int cpu = raw_smp_processor_id();
unsigned long ksp0 = get_current_ksp0();
unsigned long ksp0_base = ksp0 - THREAD_SIZE;
unsigned long sp = stack_pointer;
@@ -392,7 +392,7 @@ void tile_show_stack(struct KBacktraceIterator *kbt, int headers)
pr_err("Starting stack dump of tid %d, pid %d (%s)"
" on cpu %d at cycle %lld\n",
kbt->task->pid, kbt->task->tgid, kbt->task->comm,
- smp_processor_id(), get_cycles());
+ raw_smp_processor_id(), get_cycles());
}
kbt->verbose = 1;
i = 0;
diff --git a/arch/tile/kernel/sys.c b/arch/tile/kernel/sys.c
index b881a7be24bd..38debe706061 100644
--- a/arch/tile/kernel/sys.c
+++ b/arch/tile/kernel/sys.c
@@ -38,8 +38,10 @@
SYSCALL_DEFINE3(cacheflush, unsigned long, addr, unsigned long, len,
unsigned long, flags)
{
+ /* DCACHE is not particularly effective if not bound to one cpu. */
if (flags & DCACHE)
- homecache_evict(cpumask_of(smp_processor_id()));
+ homecache_evict(cpumask_of(raw_smp_processor_id()));
+
if (flags & ICACHE)
flush_remote(0, HV_FLUSH_EVICT_L1I, mm_cpumask(current->mm),
0, 0, 0, NULL, NULL, 0);