diff options
Diffstat (limited to 'arch/arm/mm')
-rw-r--r-- | arch/arm/mm/dma-mapping.c | 76 | ||||
-rw-r--r-- | arch/arm/mm/fault.c | 7 | ||||
-rw-r--r-- | arch/arm/mm/mmu.c | 26 |
3 files changed, 20 insertions, 89 deletions
diff --git a/arch/arm/mm/dma-mapping.c b/arch/arm/mm/dma-mapping.c index 9414d72f664b..8a8949174b1c 100644 --- a/arch/arm/mm/dma-mapping.c +++ b/arch/arm/mm/dma-mapping.c @@ -179,6 +179,23 @@ static void arm_dma_sync_single_for_device(struct device *dev, __dma_page_cpu_to_dev(page, offset, size, dir); } +/* + * Return whether the given device DMA address mask can be supported + * properly. For example, if your device can only drive the low 24-bits + * during bus mastering, then you would pass 0x00ffffff as the mask + * to this function. + */ +static int arm_dma_supported(struct device *dev, u64 mask) +{ + unsigned long max_dma_pfn = min(max_pfn - 1, arm_dma_pfn_limit); + + /* + * Translate the device's DMA mask to a PFN limit. This + * PFN number includes the page which we can DMA to. + */ + return dma_to_pfn(dev, mask) >= max_dma_pfn; +} + const struct dma_map_ops arm_dma_ops = { .alloc = arm_dma_alloc, .free = arm_dma_free, @@ -219,49 +236,6 @@ const struct dma_map_ops arm_coherent_dma_ops = { }; EXPORT_SYMBOL(arm_coherent_dma_ops); -static int __dma_supported(struct device *dev, u64 mask, bool warn) -{ - unsigned long max_dma_pfn = min(max_pfn - 1, arm_dma_pfn_limit); - - /* - * Translate the device's DMA mask to a PFN limit. This - * PFN number includes the page which we can DMA to. - */ - if (dma_to_pfn(dev, mask) < max_dma_pfn) { - if (warn) - dev_warn(dev, "Coherent DMA mask %#llx (pfn %#lx-%#lx) covers a smaller range of system memory than the DMA zone pfn 0x0-%#lx\n", - mask, - dma_to_pfn(dev, 0), dma_to_pfn(dev, mask) + 1, - max_dma_pfn + 1); - return 0; - } - - return 1; -} - -static u64 get_coherent_dma_mask(struct device *dev) -{ - u64 mask = (u64)DMA_BIT_MASK(32); - - if (dev) { - mask = dev->coherent_dma_mask; - - /* - * Sanity check the DMA mask - it must be non-zero, and - * must be able to be satisfied by a DMA allocation. - */ - if (mask == 0) { - dev_warn(dev, "coherent DMA mask is unset\n"); - return 0; - } - - if (!__dma_supported(dev, mask, true)) - return 0; - } - - return mask; -} - static void __dma_clear_buffer(struct page *page, size_t size, int coherent_flag) { /* @@ -688,7 +662,7 @@ static void *__dma_alloc(struct device *dev, size_t size, dma_addr_t *handle, gfp_t gfp, pgprot_t prot, bool is_coherent, unsigned long attrs, const void *caller) { - u64 mask = get_coherent_dma_mask(dev); + u64 mask = min_not_zero(dev->coherent_dma_mask, dev->bus_dma_limit); struct page *page = NULL; void *addr; bool allowblock, cma; @@ -712,9 +686,6 @@ static void *__dma_alloc(struct device *dev, size_t size, dma_addr_t *handle, } #endif - if (!mask) - return NULL; - buf = kzalloc(sizeof(*buf), gfp & ~(__GFP_DMA | __GFP_DMA32 | __GFP_HIGHMEM)); if (!buf) @@ -1087,17 +1058,6 @@ void arm_dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg, dir); } -/* - * Return whether the given device DMA address mask can be supported - * properly. For example, if your device can only drive the low 24-bits - * during bus mastering, then you would pass 0x00ffffff as the mask - * to this function. - */ -int arm_dma_supported(struct device *dev, u64 mask) -{ - return __dma_supported(dev, mask, false); -} - static const struct dma_map_ops *arm_get_dma_map_ops(bool coherent) { /* diff --git a/arch/arm/mm/fault.c b/arch/arm/mm/fault.c index bd0f4821f7e1..b598e6978b29 100644 --- a/arch/arm/mm/fault.c +++ b/arch/arm/mm/fault.c @@ -241,7 +241,7 @@ do_page_fault(unsigned long addr, unsigned int fsr, struct pt_regs *regs) struct mm_struct *mm; int sig, code; vm_fault_t fault; - unsigned int flags = FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE; + unsigned int flags = FAULT_FLAG_DEFAULT; if (kprobe_page_fault(regs, fsr)) return 0; @@ -295,7 +295,7 @@ retry: * signal first. We do not need to release the mmap_sem because * it would already be released in __lock_page_or_retry in * mm/filemap.c. */ - if ((fault & VM_FAULT_RETRY) && fatal_signal_pending(current)) { + if (fault_signal_pending(fault, regs)) { if (!user_mode(regs)) goto no_context; return 0; @@ -319,9 +319,6 @@ retry: regs, addr); } if (fault & VM_FAULT_RETRY) { - /* Clear FAULT_FLAG_ALLOW_RETRY to avoid any risk - * of starvation. */ - flags &= ~FAULT_FLAG_ALLOW_RETRY; flags |= FAULT_FLAG_TRIED; goto retry; } diff --git a/arch/arm/mm/mmu.c b/arch/arm/mm/mmu.c index 5d0d0f86e790..69a337df619f 100644 --- a/arch/arm/mm/mmu.c +++ b/arch/arm/mm/mmu.c @@ -63,9 +63,6 @@ static unsigned int cachepolicy __initdata = CPOLICY_WRITEBACK; static unsigned int ecc_mask __initdata = 0; pgprot_t pgprot_user; pgprot_t pgprot_kernel; -pgprot_t pgprot_hyp_device; -pgprot_t pgprot_s2; -pgprot_t pgprot_s2_device; EXPORT_SYMBOL(pgprot_user); EXPORT_SYMBOL(pgprot_kernel); @@ -75,15 +72,8 @@ struct cachepolicy { unsigned int cr_mask; pmdval_t pmd; pteval_t pte; - pteval_t pte_s2; }; -#ifdef CONFIG_ARM_LPAE -#define s2_policy(policy) policy -#else -#define s2_policy(policy) 0 -#endif - unsigned long kimage_voffset __ro_after_init; static struct cachepolicy cache_policies[] __initdata = { @@ -92,31 +82,26 @@ static struct cachepolicy cache_policies[] __initdata = { .cr_mask = CR_W|CR_C, .pmd = PMD_SECT_UNCACHED, .pte = L_PTE_MT_UNCACHED, - .pte_s2 = s2_policy(L_PTE_S2_MT_UNCACHED), }, { .policy = "buffered", .cr_mask = CR_C, .pmd = PMD_SECT_BUFFERED, .pte = L_PTE_MT_BUFFERABLE, - .pte_s2 = s2_policy(L_PTE_S2_MT_UNCACHED), }, { .policy = "writethrough", .cr_mask = 0, .pmd = PMD_SECT_WT, .pte = L_PTE_MT_WRITETHROUGH, - .pte_s2 = s2_policy(L_PTE_S2_MT_WRITETHROUGH), }, { .policy = "writeback", .cr_mask = 0, .pmd = PMD_SECT_WB, .pte = L_PTE_MT_WRITEBACK, - .pte_s2 = s2_policy(L_PTE_S2_MT_WRITEBACK), }, { .policy = "writealloc", .cr_mask = 0, .pmd = PMD_SECT_WBWA, .pte = L_PTE_MT_WRITEALLOC, - .pte_s2 = s2_policy(L_PTE_S2_MT_WRITEBACK), } }; @@ -246,9 +231,6 @@ static struct mem_type mem_types[] __ro_after_init = { [MT_DEVICE] = { /* Strongly ordered / ARMv6 shared device */ .prot_pte = PROT_PTE_DEVICE | L_PTE_MT_DEV_SHARED | L_PTE_SHARED, - .prot_pte_s2 = s2_policy(PROT_PTE_S2_DEVICE) | - s2_policy(L_PTE_S2_MT_DEV_SHARED) | - L_PTE_SHARED, .prot_l1 = PMD_TYPE_TABLE, .prot_sect = PROT_SECT_DEVICE | PMD_SECT_S, .domain = DOMAIN_IO, @@ -434,7 +416,6 @@ static void __init build_mem_type_table(void) struct cachepolicy *cp; unsigned int cr = get_cr(); pteval_t user_pgprot, kern_pgprot, vecs_pgprot; - pteval_t hyp_device_pgprot, s2_pgprot, s2_device_pgprot; int cpu_arch = cpu_architecture(); int i; @@ -558,9 +539,6 @@ static void __init build_mem_type_table(void) */ cp = &cache_policies[cachepolicy]; vecs_pgprot = kern_pgprot = user_pgprot = cp->pte; - s2_pgprot = cp->pte_s2; - hyp_device_pgprot = mem_types[MT_DEVICE].prot_pte; - s2_device_pgprot = mem_types[MT_DEVICE].prot_pte_s2; #ifndef CONFIG_ARM_LPAE /* @@ -604,7 +582,6 @@ static void __init build_mem_type_table(void) user_pgprot |= L_PTE_SHARED; kern_pgprot |= L_PTE_SHARED; vecs_pgprot |= L_PTE_SHARED; - s2_pgprot |= L_PTE_SHARED; mem_types[MT_DEVICE_WC].prot_sect |= PMD_SECT_S; mem_types[MT_DEVICE_WC].prot_pte |= L_PTE_SHARED; mem_types[MT_DEVICE_CACHED].prot_sect |= PMD_SECT_S; @@ -666,9 +643,6 @@ static void __init build_mem_type_table(void) pgprot_user = __pgprot(L_PTE_PRESENT | L_PTE_YOUNG | user_pgprot); pgprot_kernel = __pgprot(L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY | kern_pgprot); - pgprot_s2 = __pgprot(L_PTE_PRESENT | L_PTE_YOUNG | s2_pgprot); - pgprot_s2_device = __pgprot(s2_device_pgprot); - pgprot_hyp_device = __pgprot(hyp_device_pgprot); mem_types[MT_LOW_VECTORS].prot_l1 |= ecc_mask; mem_types[MT_HIGH_VECTORS].prot_l1 |= ecc_mask; |