From e83d01697583d8610d1d62279758c2a881e3396f Mon Sep 17 00:00:00 2001 From: Ian Munsie Date: Wed, 8 Oct 2014 19:54:50 +1100 Subject: powerpc/cell: Move spu_handle_mm_fault() out of cell platform Currently spu_handle_mm_fault() is in the cell platform. This code is generically useful for other non-cell co-processors on powerpc. This patch moves this function out of the cell platform into arch/powerpc/mm so that others may use it. Signed-off-by: Ian Munsie Signed-off-by: Michael Neuling Signed-off-by: Michael Ellerman --- arch/powerpc/mm/copro_fault.c | 92 +++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 92 insertions(+) create mode 100644 arch/powerpc/mm/copro_fault.c (limited to 'arch/powerpc/mm/copro_fault.c') diff --git a/arch/powerpc/mm/copro_fault.c b/arch/powerpc/mm/copro_fault.c new file mode 100644 index 000000000000..ba7df14c6b82 --- /dev/null +++ b/arch/powerpc/mm/copro_fault.c @@ -0,0 +1,92 @@ +/* + * CoProcessor (SPU/AFU) mm fault handler + * + * (C) Copyright IBM Deutschland Entwicklung GmbH 2007 + * + * Author: Arnd Bergmann + * Author: Jeremy Kerr + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2, or (at your option) + * any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. + */ +#include +#include +#include +#include + +/* + * This ought to be kept in sync with the powerpc specific do_page_fault + * function. Currently, there are a few corner cases that we haven't had + * to handle fortunately. + */ +int copro_handle_mm_fault(struct mm_struct *mm, unsigned long ea, + unsigned long dsisr, unsigned *flt) +{ + struct vm_area_struct *vma; + unsigned long is_write; + int ret; + + if (mm == NULL) + return -EFAULT; + + if (mm->pgd == NULL) + return -EFAULT; + + down_read(&mm->mmap_sem); + ret = -EFAULT; + vma = find_vma(mm, ea); + if (!vma) + goto out_unlock; + + if (ea < vma->vm_start) { + if (!(vma->vm_flags & VM_GROWSDOWN)) + goto out_unlock; + if (expand_stack(vma, ea)) + goto out_unlock; + } + + is_write = dsisr & DSISR_ISSTORE; + if (is_write) { + if (!(vma->vm_flags & VM_WRITE)) + goto out_unlock; + } else { + if (dsisr & DSISR_PROTFAULT) + goto out_unlock; + if (!(vma->vm_flags & (VM_READ | VM_EXEC))) + goto out_unlock; + } + + ret = 0; + *flt = handle_mm_fault(mm, vma, ea, is_write ? FAULT_FLAG_WRITE : 0); + if (unlikely(*flt & VM_FAULT_ERROR)) { + if (*flt & VM_FAULT_OOM) { + ret = -ENOMEM; + goto out_unlock; + } else if (*flt & VM_FAULT_SIGBUS) { + ret = -EFAULT; + goto out_unlock; + } + BUG(); + } + + if (*flt & VM_FAULT_MAJOR) + current->maj_flt++; + else + current->min_flt++; + +out_unlock: + up_read(&mm->mmap_sem); + return ret; +} +EXPORT_SYMBOL_GPL(copro_handle_mm_fault); -- cgit v1.2.3 From 73d16a6e0e51990cbe13f8d8f43bd5329bbab30a Mon Sep 17 00:00:00 2001 From: Ian Munsie Date: Wed, 8 Oct 2014 19:54:51 +1100 Subject: powerpc/cell: Move data segment faulting code out of cell platform __spu_trap_data_seg() currently contains code to determine the VSID and ESID required for a particular EA and mm struct. This code is generically useful for other co-processors. This moves the code of the cell platform so it can be used by other powerpc code. It also adds 1TB segment handling which Cell didn't support. The new function is called copro_calculate_slb(). This also moves the internal struct spu_slb to a generic struct copro_slb which is now used in the Cell and copro code. We use this new struct instead of passing around esid and vsid parameters. Signed-off-by: Ian Munsie Signed-off-by: Michael Neuling Signed-off-by: Michael Ellerman --- arch/powerpc/include/asm/copro.h | 7 +++++ arch/powerpc/include/asm/mmu-hash64.h | 7 +++++ arch/powerpc/mm/copro_fault.c | 46 ++++++++++++++++++++++++++++ arch/powerpc/mm/slb.c | 3 -- arch/powerpc/platforms/cell/spu_base.c | 55 ++++++---------------------------- 5 files changed, 69 insertions(+), 49 deletions(-) (limited to 'arch/powerpc/mm/copro_fault.c') diff --git a/arch/powerpc/include/asm/copro.h b/arch/powerpc/include/asm/copro.h index 51cae85a50b4..b0e6a183181f 100644 --- a/arch/powerpc/include/asm/copro.h +++ b/arch/powerpc/include/asm/copro.h @@ -10,7 +10,14 @@ #ifndef _ASM_POWERPC_COPRO_H #define _ASM_POWERPC_COPRO_H +struct copro_slb +{ + u64 esid, vsid; +}; + int copro_handle_mm_fault(struct mm_struct *mm, unsigned long ea, unsigned long dsisr, unsigned *flt); +int copro_calculate_slb(struct mm_struct *mm, u64 ea, struct copro_slb *slb); + #endif /* _ASM_POWERPC_COPRO_H */ diff --git a/arch/powerpc/include/asm/mmu-hash64.h b/arch/powerpc/include/asm/mmu-hash64.h index 92bc3a637923..bd813c00ede2 100644 --- a/arch/powerpc/include/asm/mmu-hash64.h +++ b/arch/powerpc/include/asm/mmu-hash64.h @@ -190,6 +190,13 @@ static inline unsigned int mmu_psize_to_shift(unsigned int mmu_psize) #ifndef __ASSEMBLY__ +static inline int slb_vsid_shift(int ssize) +{ + if (ssize == MMU_SEGSIZE_256M) + return SLB_VSID_SHIFT; + return SLB_VSID_SHIFT_1T; +} + static inline int segment_shift(int ssize) { if (ssize == MMU_SEGSIZE_256M) diff --git a/arch/powerpc/mm/copro_fault.c b/arch/powerpc/mm/copro_fault.c index ba7df14c6b82..a15a23efc0e2 100644 --- a/arch/powerpc/mm/copro_fault.c +++ b/arch/powerpc/mm/copro_fault.c @@ -24,6 +24,7 @@ #include #include #include +#include /* * This ought to be kept in sync with the powerpc specific do_page_fault @@ -90,3 +91,48 @@ out_unlock: return ret; } EXPORT_SYMBOL_GPL(copro_handle_mm_fault); + +int copro_calculate_slb(struct mm_struct *mm, u64 ea, struct copro_slb *slb) +{ + u64 vsid; + int psize, ssize; + + slb->esid = (ea & ESID_MASK) | SLB_ESID_V; + + switch (REGION_ID(ea)) { + case USER_REGION_ID: + pr_devel("%s: 0x%llx -- USER_REGION_ID\n", __func__, ea); + psize = get_slice_psize(mm, ea); + ssize = user_segment_size(ea); + vsid = get_vsid(mm->context.id, ea, ssize); + break; + case VMALLOC_REGION_ID: + pr_devel("%s: 0x%llx -- VMALLOC_REGION_ID\n", __func__, ea); + if (ea < VMALLOC_END) + psize = mmu_vmalloc_psize; + else + psize = mmu_io_psize; + ssize = mmu_kernel_ssize; + vsid = get_kernel_vsid(ea, mmu_kernel_ssize); + break; + case KERNEL_REGION_ID: + pr_devel("%s: 0x%llx -- KERNEL_REGION_ID\n", __func__, ea); + psize = mmu_linear_psize; + ssize = mmu_kernel_ssize; + vsid = get_kernel_vsid(ea, mmu_kernel_ssize); + break; + default: + pr_debug("%s: invalid region access at %016llx\n", __func__, ea); + return 1; + } + + vsid = (vsid << slb_vsid_shift(ssize)) | SLB_VSID_USER; + + vsid |= mmu_psize_defs[psize].sllp | + ((ssize == MMU_SEGSIZE_1T) ? SLB_VSID_B_1T : 0); + + slb->vsid = vsid; + + return 0; +} +EXPORT_SYMBOL_GPL(copro_calculate_slb); diff --git a/arch/powerpc/mm/slb.c b/arch/powerpc/mm/slb.c index 0399a6702958..6e450ca66526 100644 --- a/arch/powerpc/mm/slb.c +++ b/arch/powerpc/mm/slb.c @@ -46,9 +46,6 @@ static inline unsigned long mk_esid_data(unsigned long ea, int ssize, return (ea & slb_esid_mask(ssize)) | SLB_ESID_V | slot; } -#define slb_vsid_shift(ssize) \ - ((ssize) == MMU_SEGSIZE_256M? SLB_VSID_SHIFT: SLB_VSID_SHIFT_1T) - static inline unsigned long mk_vsid_data(unsigned long ea, int ssize, unsigned long flags) { diff --git a/arch/powerpc/platforms/cell/spu_base.c b/arch/powerpc/platforms/cell/spu_base.c index 2930d1e81a05..ffcbd242e669 100644 --- a/arch/powerpc/platforms/cell/spu_base.c +++ b/arch/powerpc/platforms/cell/spu_base.c @@ -76,10 +76,6 @@ static LIST_HEAD(spu_full_list); static DEFINE_SPINLOCK(spu_full_list_lock); static DEFINE_MUTEX(spu_full_list_mutex); -struct spu_slb { - u64 esid, vsid; -}; - void spu_invalidate_slbs(struct spu *spu) { struct spu_priv2 __iomem *priv2 = spu->priv2; @@ -149,7 +145,7 @@ static void spu_restart_dma(struct spu *spu) } } -static inline void spu_load_slb(struct spu *spu, int slbe, struct spu_slb *slb) +static inline void spu_load_slb(struct spu *spu, int slbe, struct copro_slb *slb) { struct spu_priv2 __iomem *priv2 = spu->priv2; @@ -167,45 +163,12 @@ static inline void spu_load_slb(struct spu *spu, int slbe, struct spu_slb *slb) static int __spu_trap_data_seg(struct spu *spu, unsigned long ea) { - struct mm_struct *mm = spu->mm; - struct spu_slb slb; - int psize; - - pr_debug("%s\n", __func__); - - slb.esid = (ea & ESID_MASK) | SLB_ESID_V; + struct copro_slb slb; + int ret; - switch(REGION_ID(ea)) { - case USER_REGION_ID: -#ifdef CONFIG_PPC_MM_SLICES - psize = get_slice_psize(mm, ea); -#else - psize = mm->context.user_psize; -#endif - slb.vsid = (get_vsid(mm->context.id, ea, MMU_SEGSIZE_256M) - << SLB_VSID_SHIFT) | SLB_VSID_USER; - break; - case VMALLOC_REGION_ID: - if (ea < VMALLOC_END) - psize = mmu_vmalloc_psize; - else - psize = mmu_io_psize; - slb.vsid = (get_kernel_vsid(ea, MMU_SEGSIZE_256M) - << SLB_VSID_SHIFT) | SLB_VSID_KERNEL; - break; - case KERNEL_REGION_ID: - psize = mmu_linear_psize; - slb.vsid = (get_kernel_vsid(ea, MMU_SEGSIZE_256M) - << SLB_VSID_SHIFT) | SLB_VSID_KERNEL; - break; - default: - /* Future: support kernel segments so that drivers - * can use SPUs. - */ - pr_debug("invalid region access at %016lx\n", ea); - return 1; - } - slb.vsid |= mmu_psize_defs[psize].sllp; + ret = copro_calculate_slb(spu->mm, ea, &slb); + if (ret) + return ret; spu_load_slb(spu, spu->slb_replace, &slb); @@ -253,7 +216,7 @@ static int __spu_trap_data_map(struct spu *spu, unsigned long ea, u64 dsisr) return 0; } -static void __spu_kernel_slb(void *addr, struct spu_slb *slb) +static void __spu_kernel_slb(void *addr, struct copro_slb *slb) { unsigned long ea = (unsigned long)addr; u64 llp; @@ -272,7 +235,7 @@ static void __spu_kernel_slb(void *addr, struct spu_slb *slb) * Given an array of @nr_slbs SLB entries, @slbs, return non-zero if the * address @new_addr is present. */ -static inline int __slb_present(struct spu_slb *slbs, int nr_slbs, +static inline int __slb_present(struct copro_slb *slbs, int nr_slbs, void *new_addr) { unsigned long ea = (unsigned long)new_addr; @@ -297,7 +260,7 @@ static inline int __slb_present(struct spu_slb *slbs, int nr_slbs, void spu_setup_kernel_slbs(struct spu *spu, struct spu_lscsa *lscsa, void *code, int code_size) { - struct spu_slb slbs[4]; + struct copro_slb slbs[4]; int i, nr_slbs = 0; /* start and end addresses of both mappings */ void *addrs[] = { -- cgit v1.2.3 From be3ebfe8215392b714349554c5138b8b6592fe20 Mon Sep 17 00:00:00 2001 From: Ian Munsie Date: Wed, 8 Oct 2014 19:54:52 +1100 Subject: powerpc/cell: Make spu_flush_all_slbs() generic This moves spu_flush_all_slbs() into a generic call copro_flush_all_slbs(). This will be useful when we add cxl which also needs a similar SLB flush call. Signed-off-by: Ian Munsie Signed-off-by: Michael Neuling Signed-off-by: Michael Ellerman --- arch/powerpc/include/asm/copro.h | 6 ++++++ arch/powerpc/mm/copro_fault.c | 9 +++++++++ arch/powerpc/mm/hash_utils_64.c | 10 +++------- arch/powerpc/mm/slice.c | 10 +++------- 4 files changed, 21 insertions(+), 14 deletions(-) (limited to 'arch/powerpc/mm/copro_fault.c') diff --git a/arch/powerpc/include/asm/copro.h b/arch/powerpc/include/asm/copro.h index b0e6a183181f..ce216df31381 100644 --- a/arch/powerpc/include/asm/copro.h +++ b/arch/powerpc/include/asm/copro.h @@ -20,4 +20,10 @@ int copro_handle_mm_fault(struct mm_struct *mm, unsigned long ea, int copro_calculate_slb(struct mm_struct *mm, u64 ea, struct copro_slb *slb); + +#ifdef CONFIG_PPC_COPRO_BASE +void copro_flush_all_slbs(struct mm_struct *mm); +#else +static inline void copro_flush_all_slbs(struct mm_struct *mm) {} +#endif #endif /* _ASM_POWERPC_COPRO_H */ diff --git a/arch/powerpc/mm/copro_fault.c b/arch/powerpc/mm/copro_fault.c index a15a23efc0e2..f2aa5a81ad9c 100644 --- a/arch/powerpc/mm/copro_fault.c +++ b/arch/powerpc/mm/copro_fault.c @@ -25,6 +25,7 @@ #include #include #include +#include /* * This ought to be kept in sync with the powerpc specific do_page_fault @@ -136,3 +137,11 @@ int copro_calculate_slb(struct mm_struct *mm, u64 ea, struct copro_slb *slb) return 0; } EXPORT_SYMBOL_GPL(copro_calculate_slb); + +void copro_flush_all_slbs(struct mm_struct *mm) +{ +#ifdef CONFIG_SPU_BASE + spu_flush_all_slbs(mm); +#endif +} +EXPORT_SYMBOL_GPL(copro_flush_all_slbs); diff --git a/arch/powerpc/mm/hash_utils_64.c b/arch/powerpc/mm/hash_utils_64.c index dd73b63f9479..0842b726b03f 100644 --- a/arch/powerpc/mm/hash_utils_64.c +++ b/arch/powerpc/mm/hash_utils_64.c @@ -51,7 +51,7 @@ #include #include #include -#include +#include #include #include #include @@ -901,9 +901,7 @@ void demote_segment_4k(struct mm_struct *mm, unsigned long addr) if (get_slice_psize(mm, addr) == MMU_PAGE_4K) return; slice_set_range_psize(mm, addr, 1, MMU_PAGE_4K); -#ifdef CONFIG_SPU_BASE - spu_flush_all_slbs(mm); -#endif + copro_flush_all_slbs(mm); if (get_paca_psize(addr) != MMU_PAGE_4K) { get_paca()->context = mm->context; slb_flush_and_rebolt(); @@ -1141,9 +1139,7 @@ int hash_page(unsigned long ea, unsigned long access, unsigned long trap) "to 4kB pages because of " "non-cacheable mapping\n"); psize = mmu_vmalloc_psize = MMU_PAGE_4K; -#ifdef CONFIG_SPU_BASE - spu_flush_all_slbs(mm); -#endif + copro_flush_all_slbs(mm); } } diff --git a/arch/powerpc/mm/slice.c b/arch/powerpc/mm/slice.c index 86f6a755af0b..8d7bda94d196 100644 --- a/arch/powerpc/mm/slice.c +++ b/arch/powerpc/mm/slice.c @@ -33,7 +33,7 @@ #include #include #include -#include +#include #include /* some sanity checks */ @@ -234,9 +234,7 @@ static void slice_convert(struct mm_struct *mm, struct slice_mask mask, int psiz spin_unlock_irqrestore(&slice_convert_lock, flags); -#ifdef CONFIG_SPU_BASE - spu_flush_all_slbs(mm); -#endif + copro_flush_all_slbs(mm); } /* @@ -673,9 +671,7 @@ void slice_set_psize(struct mm_struct *mm, unsigned long address, spin_unlock_irqrestore(&slice_convert_lock, flags); -#ifdef CONFIG_SPU_BASE - spu_flush_all_slbs(mm); -#endif + copro_flush_all_slbs(mm); } void slice_set_range_psize(struct mm_struct *mm, unsigned long start, -- cgit v1.2.3 From 4c6d9acce1f4ca815881f0e3adca192795eca1c5 Mon Sep 17 00:00:00 2001 From: Ian Munsie Date: Wed, 8 Oct 2014 19:55:00 +1100 Subject: powerpc/mm: Add hooks for cxl This adds hooks into the core powerpc mm code for cxl. The core powerpc code sometimes uses local tlbie. Unfortunately this won't work with the current cxl driver as it relies on snooping tlbie broadcasts. The cxl hardware can have TLB entries invalidated via MMIO but this is not currently supported by the driver. In future we can make local tlbie smarter so that it invalidates cxl contexts via MMIO when it needs to but for now we have this workaround. This workaround checks for any active cxl contexts and if so, disables local tlbie. This also adds a hook for when SLBs are invalidated. This ensures any corresponding SLBs in cxl are also invalidated at the same time. This is required for segment demotion. Signed-off-by: Ian Munsie Signed-off-by: Michael Neuling Signed-off-by: Michael Ellerman --- arch/powerpc/mm/copro_fault.c | 2 ++ arch/powerpc/mm/hash_native_64.c | 6 +++++- 2 files changed, 7 insertions(+), 1 deletion(-) (limited to 'arch/powerpc/mm/copro_fault.c') diff --git a/arch/powerpc/mm/copro_fault.c b/arch/powerpc/mm/copro_fault.c index f2aa5a81ad9c..0f9939e693df 100644 --- a/arch/powerpc/mm/copro_fault.c +++ b/arch/powerpc/mm/copro_fault.c @@ -26,6 +26,7 @@ #include #include #include +#include /* * This ought to be kept in sync with the powerpc specific do_page_fault @@ -143,5 +144,6 @@ void copro_flush_all_slbs(struct mm_struct *mm) #ifdef CONFIG_SPU_BASE spu_flush_all_slbs(mm); #endif + cxl_slbia(mm); } EXPORT_SYMBOL_GPL(copro_flush_all_slbs); diff --git a/arch/powerpc/mm/hash_native_64.c b/arch/powerpc/mm/hash_native_64.c index afc0a8295f84..ae4962a06476 100644 --- a/arch/powerpc/mm/hash_native_64.c +++ b/arch/powerpc/mm/hash_native_64.c @@ -29,6 +29,8 @@ #include #include +#include + #ifdef DEBUG_LOW #define DBG_LOW(fmt...) udbg_printf(fmt) #else @@ -149,9 +151,11 @@ static inline void __tlbiel(unsigned long vpn, int psize, int apsize, int ssize) static inline void tlbie(unsigned long vpn, int psize, int apsize, int ssize, int local) { - unsigned int use_local = local && mmu_has_feature(MMU_FTR_TLBIEL); + unsigned int use_local; int lock_tlbie = !mmu_has_feature(MMU_FTR_LOCKLESS_TLBIE); + use_local = local && mmu_has_feature(MMU_FTR_TLBIEL) && !cxl_ctx_in_use(); + if (use_local) use_local = mmu_psize_defs[psize].tlbiel; if (lock_tlbie && !use_local) -- cgit v1.2.3 From 03f54397976581e71a3294ac0e6dfcf4aa36e539 Mon Sep 17 00:00:00 2001 From: Ian Munsie Date: Tue, 28 Oct 2014 14:25:29 +1100 Subject: powerpc/mm: Use appropriate ESID mask in copro_calculate_slb() This patch makes copro_calculate_slb() mask the ESID by the correct mask for 1T vs 256M segments. This has no effect by itself as the extra bits were ignored, but it makes debugging the segment table entries easier and means that we can directly compare the ESID values for duplicates without needing to worry about masking in the comparison. This will be used to simplify a comparison in the following patch. Signed-off-by: Ian Munsie Reviewed-by: Aneesh Kumar K.V Signed-off-by: Michael Ellerman --- arch/powerpc/mm/copro_fault.c | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) (limited to 'arch/powerpc/mm/copro_fault.c') diff --git a/arch/powerpc/mm/copro_fault.c b/arch/powerpc/mm/copro_fault.c index 0f9939e693df..5a236f082c78 100644 --- a/arch/powerpc/mm/copro_fault.c +++ b/arch/powerpc/mm/copro_fault.c @@ -99,8 +99,6 @@ int copro_calculate_slb(struct mm_struct *mm, u64 ea, struct copro_slb *slb) u64 vsid; int psize, ssize; - slb->esid = (ea & ESID_MASK) | SLB_ESID_V; - switch (REGION_ID(ea)) { case USER_REGION_ID: pr_devel("%s: 0x%llx -- USER_REGION_ID\n", __func__, ea); @@ -133,6 +131,7 @@ int copro_calculate_slb(struct mm_struct *mm, u64 ea, struct copro_slb *slb) vsid |= mmu_psize_defs[psize].sllp | ((ssize == MMU_SEGSIZE_1T) ? SLB_VSID_B_1T : 0); + slb->esid = (ea & (ssize == MMU_SEGSIZE_1T ? ESID_MASK_1T : ESID_MASK)) | SLB_ESID_V; slb->vsid = vsid; return 0; -- cgit v1.2.3