From d618382ba5f1a4905db63f4980bf7b0a5826de9d Mon Sep 17 00:00:00 2001 From: "David S. Miller" Date: Wed, 4 Nov 2015 11:30:57 -0800 Subject: iommu-common: Fix error code used in iommu_tbl_range_{alloc,free}(). The value returned from iommu_tbl_range_alloc() (and the one passed in as a fourth argument to iommu_tbl_range_free) is not a DMA address, it is rather an index into the IOMMU page table. Therefore using DMA_ERROR_CODE is not appropriate. Use a more type matching error code define, IOMMU_ERROR_CODE, and update all users of this interface. Reported-by: Andre Przywara Signed-off-by: David S. Miller --- arch/sparc/kernel/iommu.c | 12 ++++++------ arch/sparc/kernel/ldc.c | 2 +- arch/sparc/kernel/pci_sun4v.c | 18 +++++++++--------- 3 files changed, 16 insertions(+), 16 deletions(-) (limited to 'arch/sparc/kernel') diff --git a/arch/sparc/kernel/iommu.c b/arch/sparc/kernel/iommu.c index 5320689c06e9..37686828c3d9 100644 --- a/arch/sparc/kernel/iommu.c +++ b/arch/sparc/kernel/iommu.c @@ -161,7 +161,7 @@ static inline iopte_t *alloc_npages(struct device *dev, entry = iommu_tbl_range_alloc(dev, &iommu->tbl, npages, NULL, (unsigned long)(-1), 0); - if (unlikely(entry == DMA_ERROR_CODE)) + if (unlikely(entry == IOMMU_ERROR_CODE)) return NULL; return iommu->page_table + entry; @@ -253,7 +253,7 @@ static void dma_4u_free_coherent(struct device *dev, size_t size, npages = IO_PAGE_ALIGN(size) >> IO_PAGE_SHIFT; iommu = dev->archdata.iommu; - iommu_tbl_range_free(&iommu->tbl, dvma, npages, DMA_ERROR_CODE); + iommu_tbl_range_free(&iommu->tbl, dvma, npages, IOMMU_ERROR_CODE); order = get_order(size); if (order < 10) @@ -426,7 +426,7 @@ static void dma_4u_unmap_page(struct device *dev, dma_addr_t bus_addr, iommu_free_ctx(iommu, ctx); spin_unlock_irqrestore(&iommu->lock, flags); - iommu_tbl_range_free(&iommu->tbl, bus_addr, npages, DMA_ERROR_CODE); + iommu_tbl_range_free(&iommu->tbl, bus_addr, npages, IOMMU_ERROR_CODE); } static int dma_4u_map_sg(struct device *dev, struct scatterlist *sglist, @@ -492,7 +492,7 @@ static int dma_4u_map_sg(struct device *dev, struct scatterlist *sglist, &handle, (unsigned long)(-1), 0); /* Handle failure */ - if (unlikely(entry == DMA_ERROR_CODE)) { + if (unlikely(entry == IOMMU_ERROR_CODE)) { if (printk_ratelimit()) printk(KERN_INFO "iommu_alloc failed, iommu %p paddr %lx" " npages %lx\n", iommu, paddr, npages); @@ -571,7 +571,7 @@ iommu_map_failed: iopte_make_dummy(iommu, base + j); iommu_tbl_range_free(&iommu->tbl, vaddr, npages, - DMA_ERROR_CODE); + IOMMU_ERROR_CODE); s->dma_address = DMA_ERROR_CODE; s->dma_length = 0; @@ -648,7 +648,7 @@ static void dma_4u_unmap_sg(struct device *dev, struct scatterlist *sglist, iopte_make_dummy(iommu, base + i); iommu_tbl_range_free(&iommu->tbl, dma_handle, npages, - DMA_ERROR_CODE); + IOMMU_ERROR_CODE); sg = sg_next(sg); } diff --git a/arch/sparc/kernel/ldc.c b/arch/sparc/kernel/ldc.c index 1ae5eb1bb045..59d503866431 100644 --- a/arch/sparc/kernel/ldc.c +++ b/arch/sparc/kernel/ldc.c @@ -1953,7 +1953,7 @@ static struct ldc_mtable_entry *alloc_npages(struct ldc_iommu *iommu, entry = iommu_tbl_range_alloc(NULL, &iommu->iommu_map_table, npages, NULL, (unsigned long)-1, 0); - if (unlikely(entry < 0)) + if (unlikely(entry == IOMMU_ERROR_CODE)) return NULL; return iommu->page_table + entry; diff --git a/arch/sparc/kernel/pci_sun4v.c b/arch/sparc/kernel/pci_sun4v.c index d2fe57dad433..836e8cef47e2 100644 --- a/arch/sparc/kernel/pci_sun4v.c +++ b/arch/sparc/kernel/pci_sun4v.c @@ -159,7 +159,7 @@ static void *dma_4v_alloc_coherent(struct device *dev, size_t size, entry = iommu_tbl_range_alloc(dev, &iommu->tbl, npages, NULL, (unsigned long)(-1), 0); - if (unlikely(entry == DMA_ERROR_CODE)) + if (unlikely(entry == IOMMU_ERROR_CODE)) goto range_alloc_fail; *dma_addrp = (iommu->tbl.table_map_base + (entry << IO_PAGE_SHIFT)); @@ -187,7 +187,7 @@ static void *dma_4v_alloc_coherent(struct device *dev, size_t size, return ret; iommu_map_fail: - iommu_tbl_range_free(&iommu->tbl, *dma_addrp, npages, DMA_ERROR_CODE); + iommu_tbl_range_free(&iommu->tbl, *dma_addrp, npages, IOMMU_ERROR_CODE); range_alloc_fail: free_pages(first_page, order); @@ -226,7 +226,7 @@ static void dma_4v_free_coherent(struct device *dev, size_t size, void *cpu, devhandle = pbm->devhandle; entry = ((dvma - iommu->tbl.table_map_base) >> IO_PAGE_SHIFT); dma_4v_iommu_demap(&devhandle, entry, npages); - iommu_tbl_range_free(&iommu->tbl, dvma, npages, DMA_ERROR_CODE); + iommu_tbl_range_free(&iommu->tbl, dvma, npages, IOMMU_ERROR_CODE); order = get_order(size); if (order < 10) free_pages((unsigned long)cpu, order); @@ -256,7 +256,7 @@ static dma_addr_t dma_4v_map_page(struct device *dev, struct page *page, entry = iommu_tbl_range_alloc(dev, &iommu->tbl, npages, NULL, (unsigned long)(-1), 0); - if (unlikely(entry == DMA_ERROR_CODE)) + if (unlikely(entry == IOMMU_ERROR_CODE)) goto bad; bus_addr = (iommu->tbl.table_map_base + (entry << IO_PAGE_SHIFT)); @@ -288,7 +288,7 @@ bad: return DMA_ERROR_CODE; iommu_map_fail: - iommu_tbl_range_free(&iommu->tbl, bus_addr, npages, DMA_ERROR_CODE); + iommu_tbl_range_free(&iommu->tbl, bus_addr, npages, IOMMU_ERROR_CODE); return DMA_ERROR_CODE; } @@ -317,7 +317,7 @@ static void dma_4v_unmap_page(struct device *dev, dma_addr_t bus_addr, bus_addr &= IO_PAGE_MASK; entry = (bus_addr - iommu->tbl.table_map_base) >> IO_PAGE_SHIFT; dma_4v_iommu_demap(&devhandle, entry, npages); - iommu_tbl_range_free(&iommu->tbl, bus_addr, npages, DMA_ERROR_CODE); + iommu_tbl_range_free(&iommu->tbl, bus_addr, npages, IOMMU_ERROR_CODE); } static int dma_4v_map_sg(struct device *dev, struct scatterlist *sglist, @@ -376,7 +376,7 @@ static int dma_4v_map_sg(struct device *dev, struct scatterlist *sglist, &handle, (unsigned long)(-1), 0); /* Handle failure */ - if (unlikely(entry == DMA_ERROR_CODE)) { + if (unlikely(entry == IOMMU_ERROR_CODE)) { if (printk_ratelimit()) printk(KERN_INFO "iommu_alloc failed, iommu %p paddr %lx" " npages %lx\n", iommu, paddr, npages); @@ -451,7 +451,7 @@ iommu_map_failed: npages = iommu_num_pages(s->dma_address, s->dma_length, IO_PAGE_SIZE); iommu_tbl_range_free(&iommu->tbl, vaddr, npages, - DMA_ERROR_CODE); + IOMMU_ERROR_CODE); /* XXX demap? XXX */ s->dma_address = DMA_ERROR_CODE; s->dma_length = 0; @@ -496,7 +496,7 @@ static void dma_4v_unmap_sg(struct device *dev, struct scatterlist *sglist, entry = ((dma_handle - tbl->table_map_base) >> shift); dma_4v_iommu_demap(&devhandle, entry, npages); iommu_tbl_range_free(&iommu->tbl, dma_handle, npages, - DMA_ERROR_CODE); + IOMMU_ERROR_CODE); sg = sg_next(sg); } -- cgit v1.2.3 From cae9af6a820b47d3e5e0da4edf23cf6fa69b18d8 Mon Sep 17 00:00:00 2001 From: Rob Gardner Date: Fri, 30 Oct 2015 22:36:23 -0600 Subject: sparc64: Don't restrict fp regs for no-fault loads The function handle_ldf_stq() deals with no-fault ASI loads and stores, but restricts fp registers to quad word regs (ie, %f0, %f4 etc). This is valid for the STQ case, but unnecessarily restricts loads, which may be single precision, double, or quad. This results in SIGFPE being raised for this instruction when the source address is invalid: ldda [%g1] ASI_PNF, %f2 but not for this one: ldda [%g1] ASI_PNF, %f4 The validation check for quad register is moved to within the STQ block so that loads are not affected by the check. An additional problem is that the calculation for freg is incorrect when a single precision load is being handled. This causes %f1 to be seen as %f32 etc, and the incorrect register ends up being overwritten. This code sequence demonstrates the problem: ldd [%g1], %f32 ! g1 = valid address lda [%i3] ASI_PNF, %f1 ! i3 = invalid address std %f32, [%g1] This is corrected by basing the freg calculation on the load size. Signed-off-by: Rob Gardner Signed-off-by: David S. Miller --- arch/sparc/kernel/unaligned_64.c | 22 +++++++++++++++------- 1 file changed, 15 insertions(+), 7 deletions(-) (limited to 'arch/sparc/kernel') diff --git a/arch/sparc/kernel/unaligned_64.c b/arch/sparc/kernel/unaligned_64.c index 62098a89bbbf..d89e97b374cf 100644 --- a/arch/sparc/kernel/unaligned_64.c +++ b/arch/sparc/kernel/unaligned_64.c @@ -436,24 +436,26 @@ extern void sun4v_data_access_exception(struct pt_regs *regs, int handle_ldf_stq(u32 insn, struct pt_regs *regs) { unsigned long addr = compute_effective_address(regs, insn, 0); - int freg = ((insn >> 25) & 0x1e) | ((insn >> 20) & 0x20); + int freg; struct fpustate *f = FPUSTATE; int asi = decode_asi(insn, regs); - int flag = (freg < 32) ? FPRS_DL : FPRS_DU; + int flag; perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS, 1, regs, 0); save_and_clear_fpu(); current_thread_info()->xfsr[0] &= ~0x1c000; - if (freg & 3) { - current_thread_info()->xfsr[0] |= (6 << 14) /* invalid_fp_register */; - do_fpother(regs); - return 0; - } if (insn & 0x200000) { /* STQ */ u64 first = 0, second = 0; + freg = ((insn >> 25) & 0x1e) | ((insn >> 20) & 0x20); + flag = (freg < 32) ? FPRS_DL : FPRS_DU; + if (freg & 3) { + current_thread_info()->xfsr[0] |= (6 << 14) /* invalid_fp_register */; + do_fpother(regs); + return 0; + } if (current_thread_info()->fpsaved[0] & flag) { first = *(u64 *)&f->regs[freg]; second = *(u64 *)&f->regs[freg+2]; @@ -513,6 +515,12 @@ int handle_ldf_stq(u32 insn, struct pt_regs *regs) case 0x100000: size = 4; break; default: size = 2; break; } + if (size == 1) + freg = (insn >> 25) & 0x1f; + else + freg = ((insn >> 25) & 0x1e) | ((insn >> 20) & 0x20); + flag = (freg < 32) ? FPRS_DL : FPRS_DU; + for (i = 0; i < size; i++) data[i] = 0; -- cgit v1.2.3