summaryrefslogtreecommitdiffstats
path: root/arch/powerpc/kernel/misc_32.S
diff options
context:
space:
mode:
Diffstat (limited to 'arch/powerpc/kernel/misc_32.S')
-rw-r--r--arch/powerpc/kernel/misc_32.S32
1 files changed, 25 insertions, 7 deletions
diff --git a/arch/powerpc/kernel/misc_32.S b/arch/powerpc/kernel/misc_32.S
index 8533de50347d..8b642ab26d37 100644
--- a/arch/powerpc/kernel/misc_32.S
+++ b/arch/powerpc/kernel/misc_32.S
@@ -288,7 +288,16 @@ _GLOBAL(_tlbia)
*/
_GLOBAL(_tlbie)
#if defined(CONFIG_40x)
+ /* We run the search with interrupts disabled because we have to change
+ * the PID and I don't want to preempt when that happens.
+ */
+ mfmsr r5
+ mfspr r6,SPRN_PID
+ wrteei 0
+ mtspr SPRN_PID,r4
tlbsx. r3, 0, r3
+ mtspr SPRN_PID,r6
+ wrtee r5
bne 10f
sync
/* There are only 64 TLB entries, so r3 < 64, which means bit 25 is clear.
@@ -297,23 +306,23 @@ _GLOBAL(_tlbie)
tlbwe r3, r3, TLB_TAG
isync
10:
+
#elif defined(CONFIG_44x)
- mfspr r4,SPRN_MMUCR
- mfspr r5,SPRN_PID /* Get PID */
- rlwimi r4,r5,0,24,31 /* Set TID */
+ mfspr r5,SPRN_MMUCR
+ rlwimi r5,r4,0,24,31 /* Set TID */
/* We have to run the search with interrupts disabled, even critical
* and debug interrupts (in fact the only critical exceptions we have
* are debug and machine check). Otherwise an interrupt which causes
* a TLB miss can clobber the MMUCR between the mtspr and the tlbsx. */
- mfmsr r5
+ mfmsr r4
lis r6,(MSR_EE|MSR_CE|MSR_ME|MSR_DE)@ha
addi r6,r6,(MSR_EE|MSR_CE|MSR_ME|MSR_DE)@l
- andc r6,r5,r6
+ andc r6,r4,r6
mtmsr r6
- mtspr SPRN_MMUCR,r4
+ mtspr SPRN_MMUCR,r5
tlbsx. r3, 0, r3
- mtmsr r5
+ mtmsr r4
bne 10f
sync
/* There are only 64 TLB entries, so r3 < 64,
@@ -534,12 +543,21 @@ END_FTR_SECTION_IFSET(CPU_FTR_COHERENT_ICACHE)
addi r3,r3,L1_CACHE_BYTES
bdnz 0b
sync
+#ifndef CONFIG_44x
+ /* We don't flush the icache on 44x. Those have a virtual icache
+ * and we don't have access to the virtual address here (it's
+ * not the page vaddr but where it's mapped in user space). The
+ * flushing of the icache on these is handled elsewhere, when
+ * a change in the address space occurs, before returning to
+ * user space
+ */
mtctr r4
1: icbi 0,r6
addi r6,r6,L1_CACHE_BYTES
bdnz 1b
sync
isync
+#endif /* CONFIG_44x */
blr
/*