diff options
Diffstat (limited to 'arch/mips/kernel/cps-vec.S')
-rw-r--r-- | arch/mips/kernel/cps-vec.S | 296 |
1 files changed, 190 insertions, 106 deletions
diff --git a/arch/mips/kernel/cps-vec.S b/arch/mips/kernel/cps-vec.S index ac81edd44563..51b98dc371b3 100644 --- a/arch/mips/kernel/cps-vec.S +++ b/arch/mips/kernel/cps-vec.S @@ -18,9 +18,12 @@ #include <asm/mipsmtregs.h> #include <asm/pm.h> +#define GCR_CPC_BASE_OFS 0x0088 #define GCR_CL_COHERENCE_OFS 0x2008 #define GCR_CL_ID_OFS 0x2028 +#define CPC_CL_VC_RUN_OFS 0x2028 + .extern mips_cm_base .set noreorder @@ -60,6 +63,37 @@ nop .endm + /* + * Set dest to non-zero if the core supports MIPSr6 multithreading + * (ie. VPs), else zero. If MIPSr6 multithreading is not supported then + * branch to nomt. + */ + .macro has_vp dest, nomt + mfc0 \dest, CP0_CONFIG, 1 + bgez \dest, \nomt + mfc0 \dest, CP0_CONFIG, 2 + bgez \dest, \nomt + mfc0 \dest, CP0_CONFIG, 3 + bgez \dest, \nomt + mfc0 \dest, CP0_CONFIG, 4 + bgez \dest, \nomt + mfc0 \dest, CP0_CONFIG, 5 + andi \dest, \dest, MIPS_CONF5_VP + beqz \dest, \nomt + nop + .endm + + /* Calculate an uncached address for the CM GCRs */ + .macro cmgcrb dest + .set push + .set noat + MFC0 $1, CP0_CMGCRBASE + PTR_SLL $1, $1, 4 + PTR_LI \dest, UNCAC_BASE + PTR_ADDU \dest, \dest, $1 + .set pop + .endm + .section .text.cps-vec .balign 0x1000 @@ -90,120 +124,64 @@ not_nmi: li t0, ST0_CU1 | ST0_CU0 | ST0_BEV | STATUS_BITDEPS mtc0 t0, CP0_STATUS - /* - * Clear the bits used to index the caches. Note that the architecture - * dictates that writing to any of TagLo or TagHi selects 0 or 2 should - * be valid for all MIPS32 CPUs, even those for which said writes are - * unnecessary. - */ - mtc0 zero, CP0_TAGLO, 0 - mtc0 zero, CP0_TAGHI, 0 - mtc0 zero, CP0_TAGLO, 2 - mtc0 zero, CP0_TAGHI, 2 - ehb - - /* Primary cache configuration is indicated by Config1 */ - mfc0 v0, CP0_CONFIG, 1 - - /* Detect I-cache line size */ - _EXT t0, v0, MIPS_CONF1_IL_SHF, MIPS_CONF1_IL_SZ - beqz t0, icache_done - li t1, 2 - sllv t0, t1, t0 - - /* Detect I-cache size */ - _EXT t1, v0, MIPS_CONF1_IS_SHF, MIPS_CONF1_IS_SZ - xori t2, t1, 0x7 - beqz t2, 1f - li t3, 32 - addiu t1, t1, 1 - sllv t1, t3, t1 -1: /* At this point t1 == I-cache sets per way */ - _EXT t2, v0, MIPS_CONF1_IA_SHF, MIPS_CONF1_IA_SZ - addiu t2, t2, 1 - mul t1, t1, t0 - mul t1, t1, t2 - - li a0, CKSEG0 - PTR_ADD a1, a0, t1 -1: cache Index_Store_Tag_I, 0(a0) - PTR_ADD a0, a0, t0 - bne a0, a1, 1b + /* Skip cache & coherence setup if we're already coherent */ + cmgcrb v1 + lw s7, GCR_CL_COHERENCE_OFS(v1) + bnez s7, 1f nop -icache_done: - /* Detect D-cache line size */ - _EXT t0, v0, MIPS_CONF1_DL_SHF, MIPS_CONF1_DL_SZ - beqz t0, dcache_done - li t1, 2 - sllv t0, t1, t0 - - /* Detect D-cache size */ - _EXT t1, v0, MIPS_CONF1_DS_SHF, MIPS_CONF1_DS_SZ - xori t2, t1, 0x7 - beqz t2, 1f - li t3, 32 - addiu t1, t1, 1 - sllv t1, t3, t1 -1: /* At this point t1 == D-cache sets per way */ - _EXT t2, v0, MIPS_CONF1_DA_SHF, MIPS_CONF1_DA_SZ - addiu t2, t2, 1 - mul t1, t1, t0 - mul t1, t1, t2 + /* Initialize the L1 caches */ + jal mips_cps_cache_init + nop - li a0, CKSEG0 - PTR_ADDU a1, a0, t1 - PTR_SUBU a1, a1, t0 -1: cache Index_Store_Tag_D, 0(a0) - bne a0, a1, 1b - PTR_ADD a0, a0, t0 -dcache_done: + /* Enter the coherent domain */ + li t0, 0xff + sw t0, GCR_CL_COHERENCE_OFS(v1) + ehb /* Set Kseg0 CCA to that in s0 */ - mfc0 t0, CP0_CONFIG +1: mfc0 t0, CP0_CONFIG ori t0, 0x7 xori t0, 0x7 or t0, t0, s0 mtc0 t0, CP0_CONFIG ehb - /* Calculate an uncached address for the CM GCRs */ - MFC0 v1, CP0_CMGCRBASE - PTR_SLL v1, v1, 4 - PTR_LI t0, UNCAC_BASE - PTR_ADDU v1, v1, t0 - - /* Enter the coherent domain */ - li t0, 0xff - sw t0, GCR_CL_COHERENCE_OFS(v1) - ehb - /* Jump to kseg0 */ PTR_LA t0, 1f jr t0 nop /* - * We're up, cached & coherent. Perform any further required core-level - * initialisation. + * We're up, cached & coherent. Perform any EVA initialization necessary + * before we access memory. */ -1: jal mips_cps_core_init +1: eva_init + + /* Retrieve boot configuration pointers */ + jal mips_cps_get_bootcfg nop - /* Do any EVA initialization if necessary */ - eva_init + /* Skip core-level init if we started up coherent */ + bnez s7, 1f + nop + + /* Perform any further required core-level initialisation */ + jal mips_cps_core_init + nop /* * Boot any other VPEs within this core that should be online, and * deactivate this VPE if it should be offline. */ + move a1, t9 jal mips_cps_boot_vpes - nop + move a0, v0 /* Off we go! */ - PTR_L t1, VPEBOOTCFG_PC(v0) - PTR_L gp, VPEBOOTCFG_GP(v0) - PTR_L sp, VPEBOOTCFG_SP(v0) +1: PTR_L t1, VPEBOOTCFG_PC(v1) + PTR_L gp, VPEBOOTCFG_GP(v1) + PTR_L sp, VPEBOOTCFG_SP(v1) jr t1 nop END(mips_cps_core_entry) @@ -245,7 +223,6 @@ LEAF(excep_intex) .org 0x480 LEAF(excep_ejtag) - DUMP_EXCEP("EJTAG") PTR_LA k0, ejtag_debug_handler jr k0 nop @@ -323,22 +300,35 @@ LEAF(mips_cps_core_init) nop END(mips_cps_core_init) -LEAF(mips_cps_boot_vpes) - /* Retrieve CM base address */ - PTR_LA t0, mips_cm_base - PTR_L t0, 0(t0) - +/** + * mips_cps_get_bootcfg() - retrieve boot configuration pointers + * + * Returns: pointer to struct core_boot_config in v0, pointer to + * struct vpe_boot_config in v1, VPE ID in t9 + */ +LEAF(mips_cps_get_bootcfg) /* Calculate a pointer to this cores struct core_boot_config */ + cmgcrb t0 lw t0, GCR_CL_ID_OFS(t0) li t1, COREBOOTCFG_SIZE mul t0, t0, t1 PTR_LA t1, mips_cps_core_bootcfg PTR_L t1, 0(t1) - PTR_ADDU t0, t0, t1 + PTR_ADDU v0, t0, t1 /* Calculate this VPEs ID. If the core doesn't support MT use 0 */ li t9, 0 -#ifdef CONFIG_MIPS_MT_SMP +#if defined(CONFIG_CPU_MIPSR6) + has_vp ta2, 1f + + /* + * Assume non-contiguous numbering. Perhaps some day we'll need + * to handle contiguous VP numbering, but no such systems yet + * exist. + */ + mfc0 t9, $3, 1 + andi t9, t9, 0xff +#elif defined(CONFIG_MIPS_MT_SMP) has_mt ta2, 1f /* Find the number of VPEs present in the core */ @@ -362,22 +352,43 @@ LEAF(mips_cps_boot_vpes) 1: /* Calculate a pointer to this VPEs struct vpe_boot_config */ li t1, VPEBOOTCFG_SIZE - mul v0, t9, t1 - PTR_L ta3, COREBOOTCFG_VPECONFIG(t0) - PTR_ADDU v0, v0, ta3 - -#ifdef CONFIG_MIPS_MT_SMP + mul v1, t9, t1 + PTR_L ta3, COREBOOTCFG_VPECONFIG(v0) + PTR_ADDU v1, v1, ta3 - /* If the core doesn't support MT then return */ - bnez ta2, 1f - nop jr ra nop + END(mips_cps_get_bootcfg) + +LEAF(mips_cps_boot_vpes) + PTR_L ta2, COREBOOTCFG_VPEMASK(a0) + PTR_L ta3, COREBOOTCFG_VPECONFIG(a0) + +#if defined(CONFIG_CPU_MIPSR6) + + has_vp t0, 5f + + /* Find base address of CPC */ + cmgcrb t3 + PTR_L t1, GCR_CPC_BASE_OFS(t3) + PTR_LI t2, ~0x7fff + and t1, t1, t2 + PTR_LI t2, UNCAC_BASE + PTR_ADD t1, t1, t2 + + /* Set VC_RUN to the VPE mask */ + PTR_S ta2, CPC_CL_VC_RUN_OFS(t1) + ehb + +#elif defined(CONFIG_MIPS_MT) .set push .set mt -1: /* Enter VPE configuration state */ + /* If the core doesn't support MT then return */ + has_mt t0, 5f + + /* Enter VPE configuration state */ dvpe PTR_LA t1, 1f jr.hb t1 @@ -388,7 +399,6 @@ LEAF(mips_cps_boot_vpes) ehb /* Loop through each VPE */ - PTR_L ta2, COREBOOTCFG_VPEMASK(t0) move t8, ta2 li ta1, 0 @@ -465,7 +475,7 @@ LEAF(mips_cps_boot_vpes) /* Check whether this VPE is meant to be running */ li t0, 1 - sll t0, t0, t9 + sll t0, t0, a1 and t0, t0, t8 bnez t0, 2f nop @@ -482,10 +492,84 @@ LEAF(mips_cps_boot_vpes) #endif /* CONFIG_MIPS_MT_SMP */ /* Return */ - jr ra +5: jr ra nop END(mips_cps_boot_vpes) +LEAF(mips_cps_cache_init) + /* + * Clear the bits used to index the caches. Note that the architecture + * dictates that writing to any of TagLo or TagHi selects 0 or 2 should + * be valid for all MIPS32 CPUs, even those for which said writes are + * unnecessary. + */ + mtc0 zero, CP0_TAGLO, 0 + mtc0 zero, CP0_TAGHI, 0 + mtc0 zero, CP0_TAGLO, 2 + mtc0 zero, CP0_TAGHI, 2 + ehb + + /* Primary cache configuration is indicated by Config1 */ + mfc0 v0, CP0_CONFIG, 1 + + /* Detect I-cache line size */ + _EXT t0, v0, MIPS_CONF1_IL_SHF, MIPS_CONF1_IL_SZ + beqz t0, icache_done + li t1, 2 + sllv t0, t1, t0 + + /* Detect I-cache size */ + _EXT t1, v0, MIPS_CONF1_IS_SHF, MIPS_CONF1_IS_SZ + xori t2, t1, 0x7 + beqz t2, 1f + li t3, 32 + addiu t1, t1, 1 + sllv t1, t3, t1 +1: /* At this point t1 == I-cache sets per way */ + _EXT t2, v0, MIPS_CONF1_IA_SHF, MIPS_CONF1_IA_SZ + addiu t2, t2, 1 + mul t1, t1, t0 + mul t1, t1, t2 + + li a0, CKSEG0 + PTR_ADD a1, a0, t1 +1: cache Index_Store_Tag_I, 0(a0) + PTR_ADD a0, a0, t0 + bne a0, a1, 1b + nop +icache_done: + + /* Detect D-cache line size */ + _EXT t0, v0, MIPS_CONF1_DL_SHF, MIPS_CONF1_DL_SZ + beqz t0, dcache_done + li t1, 2 + sllv t0, t1, t0 + + /* Detect D-cache size */ + _EXT t1, v0, MIPS_CONF1_DS_SHF, MIPS_CONF1_DS_SZ + xori t2, t1, 0x7 + beqz t2, 1f + li t3, 32 + addiu t1, t1, 1 + sllv t1, t3, t1 +1: /* At this point t1 == D-cache sets per way */ + _EXT t2, v0, MIPS_CONF1_DA_SHF, MIPS_CONF1_DA_SZ + addiu t2, t2, 1 + mul t1, t1, t0 + mul t1, t1, t2 + + li a0, CKSEG0 + PTR_ADDU a1, a0, t1 + PTR_SUBU a1, a1, t0 +1: cache Index_Store_Tag_D, 0(a0) + bne a0, a1, 1b + PTR_ADD a0, a0, t0 +dcache_done: + + jr ra + nop + END(mips_cps_cache_init) + #if defined(CONFIG_MIPS_CPS_PM) && defined(CONFIG_CPU_PM) /* Calculate a pointer to this CPUs struct mips_static_suspend_state */ |