diff options
Diffstat (limited to 'arch/arc/mm')
-rw-r--r-- | arch/arc/mm/cache.c | 83 | ||||
-rw-r--r-- | arch/arc/mm/fault.c | 2 | ||||
-rw-r--r-- | arch/arc/mm/init.c | 6 | ||||
-rw-r--r-- | arch/arc/mm/tlb.c | 5 | ||||
-rw-r--r-- | arch/arc/mm/tlbex.S | 9 |
5 files changed, 87 insertions, 18 deletions
diff --git a/arch/arc/mm/cache.c b/arch/arc/mm/cache.c index 7db283b46ebd..eee924dfffa6 100644 --- a/arch/arc/mm/cache.c +++ b/arch/arc/mm/cache.c @@ -652,7 +652,7 @@ static void __ic_line_inv_vaddr(phys_addr_t paddr, unsigned long vaddr, #endif /* CONFIG_ARC_HAS_ICACHE */ -noinline void slc_op(phys_addr_t paddr, unsigned long sz, const int op) +noinline void slc_op_rgn(phys_addr_t paddr, unsigned long sz, const int op) { #ifdef CONFIG_ISA_ARCV2 /* @@ -715,6 +715,58 @@ noinline void slc_op(phys_addr_t paddr, unsigned long sz, const int op) #endif } +noinline void slc_op_line(phys_addr_t paddr, unsigned long sz, const int op) +{ +#ifdef CONFIG_ISA_ARCV2 + /* + * SLC is shared between all cores and concurrent aux operations from + * multiple cores need to be serialized using a spinlock + * A concurrent operation can be silently ignored and/or the old/new + * operation can remain incomplete forever (lockup in SLC_CTRL_BUSY loop + * below) + */ + static DEFINE_SPINLOCK(lock); + + const unsigned long SLC_LINE_MASK = ~(l2_line_sz - 1); + unsigned int ctrl, cmd; + unsigned long flags; + int num_lines; + + spin_lock_irqsave(&lock, flags); + + ctrl = read_aux_reg(ARC_REG_SLC_CTRL); + + /* Don't rely on default value of IM bit */ + if (!(op & OP_FLUSH)) /* i.e. OP_INV */ + ctrl &= ~SLC_CTRL_IM; /* clear IM: Disable flush before Inv */ + else + ctrl |= SLC_CTRL_IM; + + write_aux_reg(ARC_REG_SLC_CTRL, ctrl); + + cmd = op & OP_INV ? ARC_AUX_SLC_IVDL : ARC_AUX_SLC_FLDL; + + sz += paddr & ~SLC_LINE_MASK; + paddr &= SLC_LINE_MASK; + + num_lines = DIV_ROUND_UP(sz, l2_line_sz); + + while (num_lines-- > 0) { + write_aux_reg(cmd, paddr); + paddr += l2_line_sz; + } + + /* Make sure "busy" bit reports correct stataus, see STAR 9001165532 */ + read_aux_reg(ARC_REG_SLC_CTRL); + + while (read_aux_reg(ARC_REG_SLC_CTRL) & SLC_CTRL_BUSY); + + spin_unlock_irqrestore(&lock, flags); +#endif +} + +#define slc_op(paddr, sz, op) slc_op_rgn(paddr, sz, op) + noinline static void slc_entire_op(const int op) { unsigned int ctrl, r = ARC_REG_SLC_CTRL; @@ -1095,7 +1147,7 @@ SYSCALL_DEFINE3(cacheflush, uint32_t, start, uint32_t, sz, uint32_t, flags) */ noinline void __init arc_ioc_setup(void) { - unsigned int ap_sz; + unsigned int ioc_base, mem_sz; /* Flush + invalidate + disable L1 dcache */ __dc_disable(); @@ -1104,18 +1156,29 @@ noinline void __init arc_ioc_setup(void) if (read_aux_reg(ARC_REG_SLC_BCR)) slc_entire_op(OP_FLUSH_N_INV); - /* IOC Aperture start: TDB: handle non default CONFIG_LINUX_LINK_BASE */ - write_aux_reg(ARC_REG_IO_COH_AP0_BASE, 0x80000); - /* - * IOC Aperture size: - * decoded as 2 ^ (SIZE + 2) KB: so setting 0x11 implies 512M + * currently IOC Aperture covers entire DDR * TBD: fix for PGU + 1GB of low mem * TBD: fix for PAE */ - ap_sz = order_base_2(arc_get_mem_sz()/1024) - 2; - write_aux_reg(ARC_REG_IO_COH_AP0_SIZE, ap_sz); + mem_sz = arc_get_mem_sz(); + + if (!is_power_of_2(mem_sz) || mem_sz < 4096) + panic("IOC Aperture size must be power of 2 larger than 4KB"); + + /* + * IOC Aperture size decoded as 2 ^ (SIZE + 2) KB, + * so setting 0x11 implies 512MB, 0x12 implies 1GB... + */ + write_aux_reg(ARC_REG_IO_COH_AP0_SIZE, order_base_2(mem_sz >> 10) - 2); + + /* for now assume kernel base is start of IOC aperture */ + ioc_base = CONFIG_LINUX_RAM_BASE; + + if (ioc_base % mem_sz != 0) + panic("IOC Aperture start must be aligned to the size of the aperture"); + write_aux_reg(ARC_REG_IO_COH_AP0_BASE, ioc_base >> 12); write_aux_reg(ARC_REG_IO_COH_PARTIAL, 1); write_aux_reg(ARC_REG_IO_COH_ENABLE, 1); @@ -1207,7 +1270,7 @@ void __ref arc_cache_init(void) unsigned int __maybe_unused cpu = smp_processor_id(); char str[256]; - printk(arc_cache_mumbojumbo(0, str, sizeof(str))); + pr_info("%s", arc_cache_mumbojumbo(0, str, sizeof(str))); if (!cpu) arc_cache_init_master(); diff --git a/arch/arc/mm/fault.c b/arch/arc/mm/fault.c index 162c97528872..a0b7bd6d030d 100644 --- a/arch/arc/mm/fault.c +++ b/arch/arc/mm/fault.c @@ -207,7 +207,7 @@ no_context: /* Are we prepared to handle this kernel fault? * * (The kernel has valid exception-points in the source - * when it acesses user-memory. When it fails in one + * when it accesses user-memory. When it fails in one * of those points, we find it in a table and do a jump * to some fixup code that loads an appropriate error * code) diff --git a/arch/arc/mm/init.c b/arch/arc/mm/init.c index 8c9415ed6280..ba145065c579 100644 --- a/arch/arc/mm/init.c +++ b/arch/arc/mm/init.c @@ -26,7 +26,7 @@ pgd_t swapper_pg_dir[PTRS_PER_PGD] __aligned(PAGE_SIZE); char empty_zero_page[PAGE_SIZE] __aligned(PAGE_SIZE); EXPORT_SYMBOL(empty_zero_page); -static const unsigned long low_mem_start = CONFIG_LINUX_LINK_BASE; +static const unsigned long low_mem_start = CONFIG_LINUX_RAM_BASE; static unsigned long low_mem_sz; #ifdef CONFIG_HIGHMEM @@ -63,7 +63,7 @@ void __init early_init_dt_add_memory_arch(u64 base, u64 size) if (!low_mem_sz) { if (base != low_mem_start) - panic("CONFIG_LINUX_LINK_BASE != DT memory { }"); + panic("CONFIG_LINUX_RAM_BASE != DT memory { }"); low_mem_sz = size; in_use = 1; @@ -161,7 +161,7 @@ void __init setup_arch_memory(void) * We can't use the helper free_area_init(zones[]) because it uses * PAGE_OFFSET to compute the @min_low_pfn which would be wrong * when our kernel doesn't start at PAGE_OFFSET, i.e. - * PAGE_OFFSET != CONFIG_LINUX_LINK_BASE + * PAGE_OFFSET != CONFIG_LINUX_RAM_BASE */ free_area_init_node(0, /* node-id */ zones_size, /* num pages per zone */ diff --git a/arch/arc/mm/tlb.c b/arch/arc/mm/tlb.c index b181f3ee38aa..8ceefbf72fb0 100644 --- a/arch/arc/mm/tlb.c +++ b/arch/arc/mm/tlb.c @@ -821,7 +821,7 @@ void arc_mmu_init(void) char str[256]; struct cpuinfo_arc_mmu *mmu = &cpuinfo_arc700[smp_processor_id()].mmu; - printk(arc_mmu_mumbojumbo(0, str, sizeof(str))); + pr_info("%s", arc_mmu_mumbojumbo(0, str, sizeof(str))); /* * Can't be done in processor.h due to header include depenedencies @@ -908,9 +908,6 @@ void do_tlb_overlap_fault(unsigned long cause, unsigned long address, local_irq_save(flags); - /* re-enable the MMU */ - write_aux_reg(ARC_REG_PID, MMU_ENABLE | read_aux_reg(ARC_REG_PID)); - /* loop thru all sets of TLB */ for (set = 0; set < mmu->sets; set++) { diff --git a/arch/arc/mm/tlbex.S b/arch/arc/mm/tlbex.S index b30e4e36bb00..0e1e47a67c73 100644 --- a/arch/arc/mm/tlbex.S +++ b/arch/arc/mm/tlbex.S @@ -274,6 +274,13 @@ ex_saved_reg1: .macro COMMIT_ENTRY_TO_MMU #if (CONFIG_ARC_MMU_VER < 4) +#ifdef CONFIG_EZNPS_MTM_EXT + /* verify if entry for this vaddr+ASID already exists */ + sr TLBProbe, [ARC_REG_TLBCOMMAND] + lr r0, [ARC_REG_TLBINDEX] + bbit0 r0, 31, 88f +#endif + /* Get free TLB slot: Set = computed from vaddr, way = random */ sr TLBGetIndex, [ARC_REG_TLBCOMMAND] @@ -287,6 +294,8 @@ ex_saved_reg1: #else sr TLBInsertEntry, [ARC_REG_TLBCOMMAND] #endif + +88: .endm |