diff options
Diffstat (limited to 'drivers/xen')
-rw-r--r-- | drivers/xen/biomerge.c | 5 | ||||
-rw-r--r-- | drivers/xen/events/events_base.c | 1 | ||||
-rw-r--r-- | drivers/xen/evtchn.c | 2 | ||||
-rw-r--r-- | drivers/xen/privcmd-buf.c | 3 | ||||
-rw-r--r-- | drivers/xen/swiotlb-xen.c | 196 | ||||
-rw-r--r-- | drivers/xen/xenbus/xenbus_dev_frontend.c | 4 |
6 files changed, 65 insertions, 146 deletions
diff --git a/drivers/xen/biomerge.c b/drivers/xen/biomerge.c index f3fbb700f569..05a286d24f14 100644 --- a/drivers/xen/biomerge.c +++ b/drivers/xen/biomerge.c @@ -4,12 +4,13 @@ #include <xen/xen.h> #include <xen/page.h> +/* check if @page can be merged with 'vec1' */ bool xen_biovec_phys_mergeable(const struct bio_vec *vec1, - const struct bio_vec *vec2) + const struct page *page) { #if XEN_PAGE_SIZE == PAGE_SIZE unsigned long bfn1 = pfn_to_bfn(page_to_pfn(vec1->bv_page)); - unsigned long bfn2 = pfn_to_bfn(page_to_pfn(vec2->bv_page)); + unsigned long bfn2 = pfn_to_bfn(page_to_pfn(page)); return bfn1 + PFN_DOWN(vec1->bv_offset + vec1->bv_len) == bfn2; #else diff --git a/drivers/xen/events/events_base.c b/drivers/xen/events/events_base.c index 117e76b2f939..084e45882c73 100644 --- a/drivers/xen/events/events_base.c +++ b/drivers/xen/events/events_base.c @@ -1687,7 +1687,6 @@ void __init xen_init_IRQ(void) #ifdef CONFIG_X86 if (xen_pv_domain()) { - irq_ctx_init(smp_processor_id()); if (xen_initial_domain()) pci_xen_initial_domain(); } diff --git a/drivers/xen/evtchn.c b/drivers/xen/evtchn.c index 6d1a5e58968f..f341b016672f 100644 --- a/drivers/xen/evtchn.c +++ b/drivers/xen/evtchn.c @@ -664,7 +664,7 @@ static int evtchn_open(struct inode *inode, struct file *filp) filp->private_data = u; - return nonseekable_open(inode, filp); + return stream_open(inode, filp); } static int evtchn_release(struct inode *inode, struct file *filp) diff --git a/drivers/xen/privcmd-buf.c b/drivers/xen/privcmd-buf.c index de01a6d0059d..a1c61e351d3f 100644 --- a/drivers/xen/privcmd-buf.c +++ b/drivers/xen/privcmd-buf.c @@ -140,8 +140,7 @@ static int privcmd_buf_mmap(struct file *file, struct vm_area_struct *vma) if (!(vma->vm_flags & VM_SHARED)) return -EINVAL; - vma_priv = kzalloc(sizeof(*vma_priv) + count * sizeof(void *), - GFP_KERNEL); + vma_priv = kzalloc(struct_size(vma_priv, pages, count), GFP_KERNEL); if (!vma_priv) return -ENOMEM; diff --git a/drivers/xen/swiotlb-xen.c b/drivers/xen/swiotlb-xen.c index 877baf2a94f4..5dcb06fe9667 100644 --- a/drivers/xen/swiotlb-xen.c +++ b/drivers/xen/swiotlb-xen.c @@ -391,13 +391,8 @@ static dma_addr_t xen_swiotlb_map_page(struct device *dev, struct page *page, if (dma_capable(dev, dev_addr, size) && !range_straddles_page_boundary(phys, size) && !xen_arch_need_swiotlb(dev, phys, dev_addr) && - (swiotlb_force != SWIOTLB_FORCE)) { - /* we are not interested in the dma_addr returned by - * xen_dma_map_page, only in the potential cache flushes executed - * by the function. */ - xen_dma_map_page(dev, page, dev_addr, offset, size, dir, attrs); - return dev_addr; - } + swiotlb_force != SWIOTLB_FORCE) + goto done; /* * Oh well, have to allocate and map a bounce buffer. @@ -410,19 +405,25 @@ static dma_addr_t xen_swiotlb_map_page(struct device *dev, struct page *page, return DMA_MAPPING_ERROR; dev_addr = xen_phys_to_bus(map); - xen_dma_map_page(dev, pfn_to_page(map >> PAGE_SHIFT), - dev_addr, map & ~PAGE_MASK, size, dir, attrs); /* * Ensure that the address returned is DMA'ble */ - if (dma_capable(dev, dev_addr, size)) - return dev_addr; - - attrs |= DMA_ATTR_SKIP_CPU_SYNC; - swiotlb_tbl_unmap_single(dev, map, size, dir, attrs); + if (unlikely(!dma_capable(dev, dev_addr, size))) { + swiotlb_tbl_unmap_single(dev, map, size, dir, + attrs | DMA_ATTR_SKIP_CPU_SYNC); + return DMA_MAPPING_ERROR; + } - return DMA_MAPPING_ERROR; + page = pfn_to_page(map >> PAGE_SHIFT); + offset = map & ~PAGE_MASK; +done: + /* + * we are not interested in the dma_addr returned by xen_dma_map_page, + * only in the potential cache flushes executed by the function. + */ + xen_dma_map_page(dev, page, dev_addr, offset, size, dir, attrs); + return dev_addr; } /* @@ -455,48 +456,28 @@ static void xen_swiotlb_unmap_page(struct device *hwdev, dma_addr_t dev_addr, xen_unmap_single(hwdev, dev_addr, size, dir, attrs); } -/* - * Make physical memory consistent for a single streaming mode DMA translation - * after a transfer. - * - * If you perform a xen_swiotlb_map_page() but wish to interrogate the buffer - * using the cpu, yet do not wish to teardown the dma mapping, you must - * call this function before doing so. At the next point you give the dma - * address back to the card, you must first perform a - * xen_swiotlb_dma_sync_for_device, and then the device again owns the buffer - */ static void -xen_swiotlb_sync_single(struct device *hwdev, dma_addr_t dev_addr, - size_t size, enum dma_data_direction dir, - enum dma_sync_target target) +xen_swiotlb_sync_single_for_cpu(struct device *dev, dma_addr_t dma_addr, + size_t size, enum dma_data_direction dir) { - phys_addr_t paddr = xen_bus_to_phys(dev_addr); - - BUG_ON(dir == DMA_NONE); - - if (target == SYNC_FOR_CPU) - xen_dma_sync_single_for_cpu(hwdev, dev_addr, size, dir); + phys_addr_t paddr = xen_bus_to_phys(dma_addr); - /* NOTE: We use dev_addr here, not paddr! */ - if (is_xen_swiotlb_buffer(dev_addr)) - swiotlb_tbl_sync_single(hwdev, paddr, size, dir, target); + xen_dma_sync_single_for_cpu(dev, dma_addr, size, dir); - if (target == SYNC_FOR_DEVICE) - xen_dma_sync_single_for_device(hwdev, dev_addr, size, dir); + if (is_xen_swiotlb_buffer(dma_addr)) + swiotlb_tbl_sync_single(dev, paddr, size, dir, SYNC_FOR_CPU); } -void -xen_swiotlb_sync_single_for_cpu(struct device *hwdev, dma_addr_t dev_addr, - size_t size, enum dma_data_direction dir) +static void +xen_swiotlb_sync_single_for_device(struct device *dev, dma_addr_t dma_addr, + size_t size, enum dma_data_direction dir) { - xen_swiotlb_sync_single(hwdev, dev_addr, size, dir, SYNC_FOR_CPU); -} + phys_addr_t paddr = xen_bus_to_phys(dma_addr); -void -xen_swiotlb_sync_single_for_device(struct device *hwdev, dma_addr_t dev_addr, - size_t size, enum dma_data_direction dir) -{ - xen_swiotlb_sync_single(hwdev, dev_addr, size, dir, SYNC_FOR_DEVICE); + if (is_xen_swiotlb_buffer(dma_addr)) + swiotlb_tbl_sync_single(dev, paddr, size, dir, SYNC_FOR_DEVICE); + + xen_dma_sync_single_for_device(dev, dma_addr, size, dir); } /* @@ -504,9 +485,8 @@ xen_swiotlb_sync_single_for_device(struct device *hwdev, dma_addr_t dev_addr, * concerning calls here are the same as for swiotlb_unmap_page() above. */ static void -xen_swiotlb_unmap_sg_attrs(struct device *hwdev, struct scatterlist *sgl, - int nelems, enum dma_data_direction dir, - unsigned long attrs) +xen_swiotlb_unmap_sg(struct device *hwdev, struct scatterlist *sgl, int nelems, + enum dma_data_direction dir, unsigned long attrs) { struct scatterlist *sg; int i; @@ -518,26 +498,9 @@ xen_swiotlb_unmap_sg_attrs(struct device *hwdev, struct scatterlist *sgl, } -/* - * Map a set of buffers described by scatterlist in streaming mode for DMA. - * This is the scatter-gather version of the above xen_swiotlb_map_page - * interface. Here the scatter gather list elements are each tagged with the - * appropriate dma address and length. They are obtained via - * sg_dma_{address,length}(SG). - * - * NOTE: An implementation may be able to use a smaller number of - * DMA address/length pairs than there are SG table elements. - * (for example via virtual mapping capabilities) - * The routine returns the number of addr/length pairs actually - * used, at most nents. - * - * Device ownership issues as mentioned above for xen_swiotlb_map_page are the - * same here. - */ static int -xen_swiotlb_map_sg_attrs(struct device *hwdev, struct scatterlist *sgl, - int nelems, enum dma_data_direction dir, - unsigned long attrs) +xen_swiotlb_map_sg(struct device *dev, struct scatterlist *sgl, int nelems, + enum dma_data_direction dir, unsigned long attrs) { struct scatterlist *sg; int i; @@ -545,85 +508,44 @@ xen_swiotlb_map_sg_attrs(struct device *hwdev, struct scatterlist *sgl, BUG_ON(dir == DMA_NONE); for_each_sg(sgl, sg, nelems, i) { - phys_addr_t paddr = sg_phys(sg); - dma_addr_t dev_addr = xen_phys_to_bus(paddr); - - if (swiotlb_force == SWIOTLB_FORCE || - xen_arch_need_swiotlb(hwdev, paddr, dev_addr) || - !dma_capable(hwdev, dev_addr, sg->length) || - range_straddles_page_boundary(paddr, sg->length)) { - phys_addr_t map = swiotlb_tbl_map_single(hwdev, - start_dma_addr, - sg_phys(sg), - sg->length, - dir, attrs); - if (map == DMA_MAPPING_ERROR) { - dev_warn(hwdev, "swiotlb buffer is full\n"); - /* Don't panic here, we expect map_sg users - to do proper error handling. */ - attrs |= DMA_ATTR_SKIP_CPU_SYNC; - xen_swiotlb_unmap_sg_attrs(hwdev, sgl, i, dir, - attrs); - sg_dma_len(sgl) = 0; - return 0; - } - dev_addr = xen_phys_to_bus(map); - xen_dma_map_page(hwdev, pfn_to_page(map >> PAGE_SHIFT), - dev_addr, - map & ~PAGE_MASK, - sg->length, - dir, - attrs); - sg->dma_address = dev_addr; - } else { - /* we are not interested in the dma_addr returned by - * xen_dma_map_page, only in the potential cache flushes executed - * by the function. */ - xen_dma_map_page(hwdev, pfn_to_page(paddr >> PAGE_SHIFT), - dev_addr, - paddr & ~PAGE_MASK, - sg->length, - dir, - attrs); - sg->dma_address = dev_addr; - } + sg->dma_address = xen_swiotlb_map_page(dev, sg_page(sg), + sg->offset, sg->length, dir, attrs); + if (sg->dma_address == DMA_MAPPING_ERROR) + goto out_unmap; sg_dma_len(sg) = sg->length; } + return nelems; +out_unmap: + xen_swiotlb_unmap_sg(dev, sgl, i, dir, attrs | DMA_ATTR_SKIP_CPU_SYNC); + sg_dma_len(sgl) = 0; + return 0; } -/* - * Make physical memory consistent for a set of streaming mode DMA translations - * after a transfer. - * - * The same as swiotlb_sync_single_* but for a scatter-gather list, same rules - * and usage. - */ static void -xen_swiotlb_sync_sg(struct device *hwdev, struct scatterlist *sgl, - int nelems, enum dma_data_direction dir, - enum dma_sync_target target) +xen_swiotlb_sync_sg_for_cpu(struct device *dev, struct scatterlist *sgl, + int nelems, enum dma_data_direction dir) { struct scatterlist *sg; int i; - for_each_sg(sgl, sg, nelems, i) - xen_swiotlb_sync_single(hwdev, sg->dma_address, - sg_dma_len(sg), dir, target); -} - -static void -xen_swiotlb_sync_sg_for_cpu(struct device *hwdev, struct scatterlist *sg, - int nelems, enum dma_data_direction dir) -{ - xen_swiotlb_sync_sg(hwdev, sg, nelems, dir, SYNC_FOR_CPU); + for_each_sg(sgl, sg, nelems, i) { + xen_swiotlb_sync_single_for_cpu(dev, sg->dma_address, + sg->length, dir); + } } static void -xen_swiotlb_sync_sg_for_device(struct device *hwdev, struct scatterlist *sg, +xen_swiotlb_sync_sg_for_device(struct device *dev, struct scatterlist *sgl, int nelems, enum dma_data_direction dir) { - xen_swiotlb_sync_sg(hwdev, sg, nelems, dir, SYNC_FOR_DEVICE); + struct scatterlist *sg; + int i; + + for_each_sg(sgl, sg, nelems, i) { + xen_swiotlb_sync_single_for_device(dev, sg->dma_address, + sg->length, dir); + } } /* @@ -690,8 +612,8 @@ const struct dma_map_ops xen_swiotlb_dma_ops = { .sync_single_for_device = xen_swiotlb_sync_single_for_device, .sync_sg_for_cpu = xen_swiotlb_sync_sg_for_cpu, .sync_sg_for_device = xen_swiotlb_sync_sg_for_device, - .map_sg = xen_swiotlb_map_sg_attrs, - .unmap_sg = xen_swiotlb_unmap_sg_attrs, + .map_sg = xen_swiotlb_map_sg, + .unmap_sg = xen_swiotlb_unmap_sg, .map_page = xen_swiotlb_map_page, .unmap_page = xen_swiotlb_unmap_page, .dma_supported = xen_swiotlb_dma_supported, diff --git a/drivers/xen/xenbus/xenbus_dev_frontend.c b/drivers/xen/xenbus/xenbus_dev_frontend.c index c3e201025ef0..0782ff3c2273 100644 --- a/drivers/xen/xenbus/xenbus_dev_frontend.c +++ b/drivers/xen/xenbus/xenbus_dev_frontend.c @@ -622,9 +622,7 @@ static int xenbus_file_open(struct inode *inode, struct file *filp) if (xen_store_evtchn == 0) return -ENOENT; - nonseekable_open(inode, filp); - - filp->f_mode &= ~FMODE_ATOMIC_POS; /* cdev-style semantics */ + stream_open(inode, filp); u = kzalloc(sizeof(*u), GFP_KERNEL); if (u == NULL) |