summaryrefslogtreecommitdiffstats
path: root/arch/powerpc/kernel/iommu.c
diff options
context:
space:
mode:
Diffstat (limited to 'arch/powerpc/kernel/iommu.c')
-rw-r--r--arch/powerpc/kernel/iommu.c93
1 files changed, 38 insertions, 55 deletions
diff --git a/arch/powerpc/kernel/iommu.c b/arch/powerpc/kernel/iommu.c
index a3c406aca664..8f1f4e539c4b 100644
--- a/arch/powerpc/kernel/iommu.c
+++ b/arch/powerpc/kernel/iommu.c
@@ -31,6 +31,7 @@
#include <linux/string.h>
#include <linux/dma-mapping.h>
#include <linux/bitops.h>
+#include <linux/iommu-helper.h>
#include <asm/io.h>
#include <asm/prom.h>
#include <asm/iommu.h>
@@ -81,17 +82,19 @@ static int __init setup_iommu(char *str)
__setup("protect4gb=", setup_protect4gb);
__setup("iommu=", setup_iommu);
-static unsigned long iommu_range_alloc(struct iommu_table *tbl,
+static unsigned long iommu_range_alloc(struct device *dev,
+ struct iommu_table *tbl,
unsigned long npages,
unsigned long *handle,
unsigned long mask,
unsigned int align_order)
{
- unsigned long n, end, i, start;
+ unsigned long n, end, start;
unsigned long limit;
int largealloc = npages > 15;
int pass = 0;
unsigned long align_mask;
+ unsigned long boundary_size;
align_mask = 0xffffffffffffffffl >> (64 - align_order);
@@ -136,14 +139,17 @@ static unsigned long iommu_range_alloc(struct iommu_table *tbl,
start &= mask;
}
- n = find_next_zero_bit(tbl->it_map, limit, start);
-
- /* Align allocation */
- n = (n + align_mask) & ~align_mask;
-
- end = n + npages;
+ if (dev)
+ boundary_size = ALIGN(dma_get_seg_boundary(dev) + 1,
+ 1 << IOMMU_PAGE_SHIFT);
+ else
+ boundary_size = ALIGN(1UL << 32, 1 << IOMMU_PAGE_SHIFT);
+ /* 4GB boundary for iseries_hv_alloc and iseries_hv_map */
- if (unlikely(end >= limit)) {
+ n = iommu_area_alloc(tbl->it_map, limit, start, npages,
+ tbl->it_offset, boundary_size >> IOMMU_PAGE_SHIFT,
+ align_mask);
+ if (n == -1) {
if (likely(pass < 2)) {
/* First failure, just rescan the half of the table.
* Second failure, rescan the other half of the table.
@@ -158,14 +164,7 @@ static unsigned long iommu_range_alloc(struct iommu_table *tbl,
}
}
- for (i = n; i < end; i++)
- if (test_bit(i, tbl->it_map)) {
- start = i+1;
- goto again;
- }
-
- for (i = n; i < end; i++)
- __set_bit(i, tbl->it_map);
+ end = n + npages;
/* Bump the hint to a new block for small allocs. */
if (largealloc) {
@@ -184,16 +183,17 @@ static unsigned long iommu_range_alloc(struct iommu_table *tbl,
return n;
}
-static dma_addr_t iommu_alloc(struct iommu_table *tbl, void *page,
- unsigned int npages, enum dma_data_direction direction,
- unsigned long mask, unsigned int align_order)
+static dma_addr_t iommu_alloc(struct device *dev, struct iommu_table *tbl,
+ void *page, unsigned int npages,
+ enum dma_data_direction direction,
+ unsigned long mask, unsigned int align_order)
{
unsigned long entry, flags;
dma_addr_t ret = DMA_ERROR_CODE;
spin_lock_irqsave(&(tbl->it_lock), flags);
- entry = iommu_range_alloc(tbl, npages, NULL, mask, align_order);
+ entry = iommu_range_alloc(dev, tbl, npages, NULL, mask, align_order);
if (unlikely(entry == DMA_ERROR_CODE)) {
spin_unlock_irqrestore(&(tbl->it_lock), flags);
@@ -224,7 +224,6 @@ static void __iommu_free(struct iommu_table *tbl, dma_addr_t dma_addr,
unsigned int npages)
{
unsigned long entry, free_entry;
- unsigned long i;
entry = dma_addr >> IOMMU_PAGE_SHIFT;
free_entry = entry - tbl->it_offset;
@@ -246,9 +245,7 @@ static void __iommu_free(struct iommu_table *tbl, dma_addr_t dma_addr,
}
ppc_md.tce_free(tbl, entry, npages);
-
- for (i = 0; i < npages; i++)
- __clear_bit(free_entry+i, tbl->it_map);
+ iommu_area_free(tbl->it_map, free_entry, npages);
}
static void iommu_free(struct iommu_table *tbl, dma_addr_t dma_addr,
@@ -270,16 +267,18 @@ static void iommu_free(struct iommu_table *tbl, dma_addr_t dma_addr,
spin_unlock_irqrestore(&(tbl->it_lock), flags);
}
-int iommu_map_sg(struct iommu_table *tbl, struct scatterlist *sglist,
+int iommu_map_sg(struct device *dev, struct scatterlist *sglist,
int nelems, unsigned long mask,
enum dma_data_direction direction)
{
+ struct iommu_table *tbl = dev->archdata.dma_data;
dma_addr_t dma_next = 0, dma_addr;
unsigned long flags;
struct scatterlist *s, *outs, *segstart;
int outcount, incount, i;
unsigned int align;
unsigned long handle;
+ unsigned int max_seg_size;
BUG_ON(direction == DMA_NONE);
@@ -298,6 +297,7 @@ int iommu_map_sg(struct iommu_table *tbl, struct scatterlist *sglist,
spin_lock_irqsave(&(tbl->it_lock), flags);
+ max_seg_size = dma_get_max_seg_size(dev);
for_each_sg(sglist, s, nelems, i) {
unsigned long vaddr, npages, entry, slen;
@@ -314,7 +314,7 @@ int iommu_map_sg(struct iommu_table *tbl, struct scatterlist *sglist,
if (IOMMU_PAGE_SHIFT < PAGE_SHIFT && slen >= PAGE_SIZE &&
(vaddr & ~PAGE_MASK) == 0)
align = PAGE_SHIFT - IOMMU_PAGE_SHIFT;
- entry = iommu_range_alloc(tbl, npages, &handle,
+ entry = iommu_range_alloc(dev, tbl, npages, &handle,
mask >> IOMMU_PAGE_SHIFT, align);
DBG(" - vaddr: %lx, size: %lx\n", vaddr, slen);
@@ -344,7 +344,8 @@ int iommu_map_sg(struct iommu_table *tbl, struct scatterlist *sglist,
/* We cannot merge if:
* - allocated dma_addr isn't contiguous to previous allocation
*/
- if (novmerge || (dma_addr != dma_next)) {
+ if (novmerge || (dma_addr != dma_next) ||
+ (outs->dma_length + s->length > max_seg_size)) {
/* Can't merge: create a new segment */
segstart = s;
outcount++;
@@ -452,9 +453,6 @@ void iommu_unmap_sg(struct iommu_table *tbl, struct scatterlist *sglist,
struct iommu_table *iommu_init_table(struct iommu_table *tbl, int nid)
{
unsigned long sz;
- unsigned long start_index, end_index;
- unsigned long entries_per_4g;
- unsigned long index;
static int welcomed = 0;
struct page *page;
@@ -476,6 +474,7 @@ struct iommu_table *iommu_init_table(struct iommu_table *tbl, int nid)
#ifdef CONFIG_CRASH_DUMP
if (ppc_md.tce_get) {
+ unsigned long index;
unsigned long tceval;
unsigned long tcecount = 0;
@@ -506,23 +505,6 @@ struct iommu_table *iommu_init_table(struct iommu_table *tbl, int nid)
ppc_md.tce_free(tbl, tbl->it_offset, tbl->it_size);
#endif
- /*
- * DMA cannot cross 4 GB boundary. Mark last entry of each 4
- * GB chunk as reserved.
- */
- if (protect4gb) {
- entries_per_4g = 0x100000000l >> IOMMU_PAGE_SHIFT;
-
- /* Mark the last bit before a 4GB boundary as used */
- start_index = tbl->it_offset | (entries_per_4g - 1);
- start_index -= tbl->it_offset;
-
- end_index = tbl->it_size;
-
- for (index = start_index; index < end_index - 1; index += entries_per_4g)
- __set_bit(index, tbl->it_map);
- }
-
if (!welcomed) {
printk(KERN_INFO "IOMMU table initialized, virtual merging %s\n",
novmerge ? "disabled" : "enabled");
@@ -570,9 +552,9 @@ void iommu_free_table(struct iommu_table *tbl, const char *node_name)
* need not be page aligned, the dma_addr_t returned will point to the same
* byte within the page as vaddr.
*/
-dma_addr_t iommu_map_single(struct iommu_table *tbl, void *vaddr,
- size_t size, unsigned long mask,
- enum dma_data_direction direction)
+dma_addr_t iommu_map_single(struct device *dev, struct iommu_table *tbl,
+ void *vaddr, size_t size, unsigned long mask,
+ enum dma_data_direction direction)
{
dma_addr_t dma_handle = DMA_ERROR_CODE;
unsigned long uaddr;
@@ -589,7 +571,7 @@ dma_addr_t iommu_map_single(struct iommu_table *tbl, void *vaddr,
((unsigned long)vaddr & ~PAGE_MASK) == 0)
align = PAGE_SHIFT - IOMMU_PAGE_SHIFT;
- dma_handle = iommu_alloc(tbl, vaddr, npages, direction,
+ dma_handle = iommu_alloc(dev, tbl, vaddr, npages, direction,
mask >> IOMMU_PAGE_SHIFT, align);
if (dma_handle == DMA_ERROR_CODE) {
if (printk_ratelimit()) {
@@ -621,8 +603,9 @@ void iommu_unmap_single(struct iommu_table *tbl, dma_addr_t dma_handle,
* Returns the virtual address of the buffer and sets dma_handle
* to the dma address (mapping) of the first page.
*/
-void *iommu_alloc_coherent(struct iommu_table *tbl, size_t size,
- dma_addr_t *dma_handle, unsigned long mask, gfp_t flag, int node)
+void *iommu_alloc_coherent(struct device *dev, struct iommu_table *tbl,
+ size_t size, dma_addr_t *dma_handle,
+ unsigned long mask, gfp_t flag, int node)
{
void *ret = NULL;
dma_addr_t mapping;
@@ -656,7 +639,7 @@ void *iommu_alloc_coherent(struct iommu_table *tbl, size_t size,
/* Set up tces to cover the allocated range */
nio_pages = size >> IOMMU_PAGE_SHIFT;
io_order = get_iommu_order(size);
- mapping = iommu_alloc(tbl, ret, nio_pages, DMA_BIDIRECTIONAL,
+ mapping = iommu_alloc(dev, tbl, ret, nio_pages, DMA_BIDIRECTIONAL,
mask >> IOMMU_PAGE_SHIFT, io_order);
if (mapping == DMA_ERROR_CODE) {
free_pages((unsigned long)ret, order);