diff options
author | Thomas Zimmermann <tzimmermann@suse.de> | 2020-11-03 10:30:10 +0100 |
---|---|---|
committer | Thomas Zimmermann <tzimmermann@suse.de> | 2020-11-09 09:17:36 +0100 |
commit | 43676605f890b218e551f396a55dbaea7799acb4 (patch) | |
tree | b3ce6592acfafea411d364ed64b7e98e8666d864 /drivers | |
parent | drm/exynos: Remove empty exynos_drm_gem_prime_{vmap,vunmap}() (diff) | |
download | linux-43676605f890b218e551f396a55dbaea7799acb4.tar.xz linux-43676605f890b218e551f396a55dbaea7799acb4.zip |
drm/ttm: Add vmap/vunmap to TTM and TTM GEM helpers
The new functions ttm_bo_{vmap,vunmap}() map and unmap a TTM BO in kernel
address space. The mapping's address is returned as struct dma_buf_map.
Each function is a simplified version of TTM's existing kmap code. Both
functions respect the memory's location ani/or writecombine flags.
On top TTM's functions, GEM TTM helpers got drm_gem_ttm_{vmap,vunmap}(),
two helpers that convert a GEM object into the TTM BO and forward the call
to TTM's vmap/vunmap. These helpers can be dropped into the rsp GEM object
callbacks.
v5:
* use size_t for storing mapping size (Christian)
* ignore premapped memory areas correctly in ttm_bo_vunmap()
* rebase onto latest TTM interfaces (Christian)
* remove BUG() from ttm_bo_vmap() (Christian)
v4:
* drop ttm_kmap_obj_to_dma_buf() in favor of vmap helpers (Daniel,
Christian)
Signed-off-by: Thomas Zimmermann <tzimmermann@suse.de>
Reviewed-by: Christian König <christian.koenig@amd.com>
Acked-by: Daniel Vetter <daniel.vetter@ffwll.ch>
Tested-by: Sam Ravnborg <sam@ravnborg.org>
Link: https://patchwork.freedesktop.org/patch/msgid/20201103093015.1063-6-tzimmermann@suse.de
Diffstat (limited to 'drivers')
-rw-r--r-- | drivers/gpu/drm/drm_gem_ttm_helper.c | 37 | ||||
-rw-r--r-- | drivers/gpu/drm/ttm/ttm_bo_util.c | 72 |
2 files changed, 109 insertions, 0 deletions
diff --git a/drivers/gpu/drm/drm_gem_ttm_helper.c b/drivers/gpu/drm/drm_gem_ttm_helper.c index 0e4fb9ba43ad..de28720757af 100644 --- a/drivers/gpu/drm/drm_gem_ttm_helper.c +++ b/drivers/gpu/drm/drm_gem_ttm_helper.c @@ -50,6 +50,43 @@ void drm_gem_ttm_print_info(struct drm_printer *p, unsigned int indent, EXPORT_SYMBOL(drm_gem_ttm_print_info); /** + * drm_gem_ttm_vmap() - vmap &ttm_buffer_object + * @gem: GEM object. + * @map: [out] returns the dma-buf mapping. + * + * Maps a GEM object with ttm_bo_vmap(). This function can be used as + * &drm_gem_object_funcs.vmap callback. + * + * Returns: + * 0 on success, or a negative errno code otherwise. + */ +int drm_gem_ttm_vmap(struct drm_gem_object *gem, + struct dma_buf_map *map) +{ + struct ttm_buffer_object *bo = drm_gem_ttm_of_gem(gem); + + return ttm_bo_vmap(bo, map); +} +EXPORT_SYMBOL(drm_gem_ttm_vmap); + +/** + * drm_gem_ttm_vunmap() - vunmap &ttm_buffer_object + * @gem: GEM object. + * @map: dma-buf mapping. + * + * Unmaps a GEM object with ttm_bo_vunmap(). This function can be used as + * &drm_gem_object_funcs.vmap callback. + */ +void drm_gem_ttm_vunmap(struct drm_gem_object *gem, + struct dma_buf_map *map) +{ + struct ttm_buffer_object *bo = drm_gem_ttm_of_gem(gem); + + ttm_bo_vunmap(bo, map); +} +EXPORT_SYMBOL(drm_gem_ttm_vunmap); + +/** * drm_gem_ttm_mmap() - mmap &ttm_buffer_object * @gem: GEM object. * @vma: vm area. diff --git a/drivers/gpu/drm/ttm/ttm_bo_util.c b/drivers/gpu/drm/ttm/ttm_bo_util.c index ecb54415d1ca..7ccb2295cac1 100644 --- a/drivers/gpu/drm/ttm/ttm_bo_util.c +++ b/drivers/gpu/drm/ttm/ttm_bo_util.c @@ -32,6 +32,7 @@ #include <drm/ttm/ttm_bo_driver.h> #include <drm/ttm/ttm_placement.h> #include <drm/drm_vma_manager.h> +#include <linux/dma-buf-map.h> #include <linux/io.h> #include <linux/highmem.h> #include <linux/wait.h> @@ -471,6 +472,77 @@ void ttm_bo_kunmap(struct ttm_bo_kmap_obj *map) } EXPORT_SYMBOL(ttm_bo_kunmap); +int ttm_bo_vmap(struct ttm_buffer_object *bo, struct dma_buf_map *map) +{ + struct ttm_resource *mem = &bo->mem; + int ret; + + ret = ttm_mem_io_reserve(bo->bdev, mem); + if (ret) + return ret; + + if (mem->bus.is_iomem) { + void __iomem *vaddr_iomem; + size_t size = bo->num_pages << PAGE_SHIFT; + + if (mem->bus.addr) + vaddr_iomem = (void __iomem *)mem->bus.addr; + else if (mem->bus.caching == ttm_write_combined) + vaddr_iomem = ioremap_wc(mem->bus.offset, size); + else + vaddr_iomem = ioremap(mem->bus.offset, size); + + if (!vaddr_iomem) + return -ENOMEM; + + dma_buf_map_set_vaddr_iomem(map, vaddr_iomem); + + } else { + struct ttm_operation_ctx ctx = { + .interruptible = false, + .no_wait_gpu = false + }; + struct ttm_tt *ttm = bo->ttm; + pgprot_t prot; + void *vaddr; + + ret = ttm_tt_populate(bo->bdev, ttm, &ctx); + if (ret) + return ret; + + /* + * We need to use vmap to get the desired page protection + * or to make the buffer object look contiguous. + */ + prot = ttm_io_prot(bo, mem, PAGE_KERNEL); + vaddr = vmap(ttm->pages, bo->num_pages, 0, prot); + if (!vaddr) + return -ENOMEM; + + dma_buf_map_set_vaddr(map, vaddr); + } + + return 0; +} +EXPORT_SYMBOL(ttm_bo_vmap); + +void ttm_bo_vunmap(struct ttm_buffer_object *bo, struct dma_buf_map *map) +{ + struct ttm_resource *mem = &bo->mem; + + if (dma_buf_map_is_null(map)) + return; + + if (!map->is_iomem) + vunmap(map->vaddr); + else if (!mem->bus.addr) + iounmap(map->vaddr_iomem); + dma_buf_map_clear(map); + + ttm_mem_io_free(bo->bdev, &bo->mem); +} +EXPORT_SYMBOL(ttm_bo_vunmap); + static int ttm_bo_wait_free_node(struct ttm_buffer_object *bo, bool dst_use_tt) { |