diff options
author | Dave Airlie <airlied@redhat.com> | 2013-07-23 06:16:42 +0200 |
---|---|---|
committer | Dave Airlie <airlied@redhat.com> | 2013-07-24 03:58:10 +0200 |
commit | 8002db6336dd361fc13214e9515fe5d52ff294ee (patch) | |
tree | 67a6fad200b33eada21944d3e42911a32f523705 /drivers/gpu/drm/qxl/qxl_image.c | |
parent | qxl: allow creation of pre-pinned objects and use for releases. (diff) | |
download | linux-8002db6336dd361fc13214e9515fe5d52ff294ee.tar.xz linux-8002db6336dd361fc13214e9515fe5d52ff294ee.zip |
qxl: convert qxl driver to proper use for reservations
The recent addition of lockdep support to reservations and their subsequent
use by TTM showed up a number of potential problems with the way qxl was using
TTM objects.
a) it was allocating objects, and reserving them later without validating
underneath the reservation, which meant in extreme conditions the objects could
be evicted before the reservation ever used them.
b) it was reserving objects straight after allocating them, but with no
ability to back off should the reservations fail. It now allocates the necessary
objects then does a complete reservation pass on them to avoid deadlocks.
c) it had two lists per release tracking objects, unnecessary complicating
the reservation process.
This patch removes the dual object tracking, adds reservations ticket support
to the release and fence object handling. It then ports the internal fb
drawing code and the userspace facing ioctl to use the new interfaces properly,
along with cleanup up the error path handling in some codepaths.
Signed-off-by: Dave Airlie <airlied@redhat.com>
Diffstat (limited to 'drivers/gpu/drm/qxl/qxl_image.c')
-rw-r--r-- | drivers/gpu/drm/qxl/qxl_image.c | 111 |
1 files changed, 86 insertions, 25 deletions
diff --git a/drivers/gpu/drm/qxl/qxl_image.c b/drivers/gpu/drm/qxl/qxl_image.c index cf856206996b..7fbcc35e8ad3 100644 --- a/drivers/gpu/drm/qxl/qxl_image.c +++ b/drivers/gpu/drm/qxl/qxl_image.c @@ -30,31 +30,100 @@ #include "qxl_object.h" static int -qxl_image_create_helper(struct qxl_device *qdev, +qxl_allocate_chunk(struct qxl_device *qdev, + struct qxl_release *release, + struct qxl_drm_image *image, + unsigned int chunk_size) +{ + struct qxl_drm_chunk *chunk; + int ret; + + chunk = kmalloc(sizeof(struct qxl_drm_chunk), GFP_KERNEL); + if (!chunk) + return -ENOMEM; + + ret = qxl_alloc_bo_reserved(qdev, release, chunk_size, &chunk->bo); + if (ret) { + kfree(chunk); + return ret; + } + + list_add_tail(&chunk->head, &image->chunk_list); + return 0; +} + +int +qxl_image_alloc_objects(struct qxl_device *qdev, struct qxl_release *release, - struct qxl_bo **image_bo, - const uint8_t *data, - int width, int height, - int depth, unsigned int hash, - int stride) + struct qxl_drm_image **image_ptr, + int height, int stride) +{ + struct qxl_drm_image *image; + int ret; + + image = kmalloc(sizeof(struct qxl_drm_image), GFP_KERNEL); + if (!image) + return -ENOMEM; + + INIT_LIST_HEAD(&image->chunk_list); + + ret = qxl_alloc_bo_reserved(qdev, release, sizeof(struct qxl_image), &image->bo); + if (ret) { + kfree(image); + return ret; + } + + ret = qxl_allocate_chunk(qdev, release, image, sizeof(struct qxl_data_chunk) + stride * height); + if (ret) { + qxl_bo_unref(&image->bo); + kfree(image); + return ret; + } + *image_ptr = image; + return 0; +} + +void qxl_image_free_objects(struct qxl_device *qdev, struct qxl_drm_image *dimage) { + struct qxl_drm_chunk *chunk, *tmp; + + list_for_each_entry_safe(chunk, tmp, &dimage->chunk_list, head) { + qxl_bo_unref(&chunk->bo); + kfree(chunk); + } + + qxl_bo_unref(&dimage->bo); + kfree(dimage); +} + +static int +qxl_image_init_helper(struct qxl_device *qdev, + struct qxl_release *release, + struct qxl_drm_image *dimage, + const uint8_t *data, + int width, int height, + int depth, unsigned int hash, + int stride) +{ + struct qxl_drm_chunk *drv_chunk; struct qxl_image *image; struct qxl_data_chunk *chunk; int i; int chunk_stride; int linesize = width * depth / 8; - struct qxl_bo *chunk_bo; - int ret; + struct qxl_bo *chunk_bo, *image_bo; void *ptr; /* Chunk */ /* FIXME: Check integer overflow */ /* TODO: variable number of chunks */ + + drv_chunk = list_first_entry(&dimage->chunk_list, struct qxl_drm_chunk, head); + + chunk_bo = drv_chunk->bo; chunk_stride = stride; /* TODO: should use linesize, but it renders wrong (check the bitmaps are sent correctly first) */ - ret = qxl_alloc_bo_reserved(qdev, sizeof(*chunk) + height * chunk_stride, - &chunk_bo); - + ptr = qxl_bo_kmap_atomic_page(qdev, chunk_bo, 0); chunk = ptr; chunk->data_size = height * chunk_stride; @@ -102,7 +171,6 @@ qxl_image_create_helper(struct qxl_device *qdev, while (remain > 0) { page_base = out_offset & PAGE_MASK; page_offset = offset_in_page(out_offset); - size = min((int)(PAGE_SIZE - page_offset), remain); ptr = qxl_bo_kmap_atomic_page(qdev, chunk_bo, page_base); @@ -116,14 +184,10 @@ qxl_image_create_helper(struct qxl_device *qdev, } } } - - qxl_bo_kunmap(chunk_bo); - /* Image */ - ret = qxl_alloc_bo_reserved(qdev, sizeof(*image), image_bo); - - ptr = qxl_bo_kmap_atomic_page(qdev, *image_bo, 0); + image_bo = dimage->bo; + ptr = qxl_bo_kmap_atomic_page(qdev, image_bo, 0); image = ptr; image->descriptor.id = 0; @@ -154,23 +218,20 @@ qxl_image_create_helper(struct qxl_device *qdev, image->u.bitmap.stride = chunk_stride; image->u.bitmap.palette = 0; image->u.bitmap.data = qxl_bo_physical_address(qdev, chunk_bo, 0); - qxl_release_add_res(qdev, release, chunk_bo); - qxl_bo_unreserve(chunk_bo); - qxl_bo_unref(&chunk_bo); - qxl_bo_kunmap_atomic_page(qdev, *image_bo, ptr); + qxl_bo_kunmap_atomic_page(qdev, image_bo, ptr); return 0; } -int qxl_image_create(struct qxl_device *qdev, +int qxl_image_init(struct qxl_device *qdev, struct qxl_release *release, - struct qxl_bo **image_bo, + struct qxl_drm_image *dimage, const uint8_t *data, int x, int y, int width, int height, int depth, int stride) { data += y * stride + x * (depth / 8); - return qxl_image_create_helper(qdev, release, image_bo, data, + return qxl_image_init_helper(qdev, release, dimage, data, width, height, depth, 0, stride); } |