summaryrefslogtreecommitdiffstats
path: root/drivers/gpu/drm/virtio
diff options
context:
space:
mode:
authorDmitry Osipenko <dmitry.osipenko@collabora.com>2023-03-24 00:07:55 +0100
committerDmitry Osipenko <dmitry.osipenko@collabora.com>2023-08-01 00:41:04 +0200
commit7cb8d1ab8cbda554341dac8b54fd135dedff4245 (patch)
tree70796cde2abee0584f228fa2eea21735bba6b0e7 /drivers/gpu/drm/virtio
parentfbdev: Align deferred I/O with naming of helpers (diff)
downloadlinux-7cb8d1ab8cbda554341dac8b54fd135dedff4245.tar.xz
linux-7cb8d1ab8cbda554341dac8b54fd135dedff4245.zip
drm/virtio: Support sync objects
Add sync object DRM UAPI support to VirtIO-GPU driver. Sync objects support is needed by native context VirtIO-GPU Mesa drivers, it also will be used by Venus and Virgl contexts. Reviewed-by; Emil Velikov <emil.velikov@collabora.com> Signed-off-by: Dmitry Osipenko <dmitry.osipenko@collabora.com> Tested-by: Pierre-Eric Pelloux-Prayer <pierre-eric.pelloux-prayer@amd.com> # amdgpu nctx Tested-by: Rob Clark <robdclark@gmail.com> # freedreno nctx Reviewed-by: Rob Clark <robdclark@gmail.com> Acked-by: Gurchetan Singh <gurchetansingh@chromium.org> Acked-by: Gerd Hoffmann <kraxel@redhat.com> Link: https://patchwork.freedesktop.org/patch/msgid/20230416115237.798604-4-dmitry.osipenko@collabora.com
Diffstat (limited to 'drivers/gpu/drm/virtio')
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_drv.c3
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_submit.c224
2 files changed, 226 insertions, 1 deletions
diff --git a/drivers/gpu/drm/virtio/virtgpu_drv.c b/drivers/gpu/drm/virtio/virtgpu_drv.c
index a7ec5a3770da..644b8ee51009 100644
--- a/drivers/gpu/drm/virtio/virtgpu_drv.c
+++ b/drivers/gpu/drm/virtio/virtgpu_drv.c
@@ -176,7 +176,8 @@ static const struct drm_driver driver = {
* If KMS is disabled DRIVER_MODESET and DRIVER_ATOMIC are masked
* out via drm_device::driver_features:
*/
- .driver_features = DRIVER_MODESET | DRIVER_GEM | DRIVER_RENDER | DRIVER_ATOMIC,
+ .driver_features = DRIVER_MODESET | DRIVER_GEM | DRIVER_RENDER | DRIVER_ATOMIC |
+ DRIVER_SYNCOBJ | DRIVER_SYNCOBJ_TIMELINE,
.open = virtio_gpu_driver_open,
.postclose = virtio_gpu_driver_postclose,
diff --git a/drivers/gpu/drm/virtio/virtgpu_submit.c b/drivers/gpu/drm/virtio/virtgpu_submit.c
index 1d010c66910d..3c00135ead45 100644
--- a/drivers/gpu/drm/virtio/virtgpu_submit.c
+++ b/drivers/gpu/drm/virtio/virtgpu_submit.c
@@ -14,11 +14,24 @@
#include <linux/uaccess.h>
#include <drm/drm_file.h>
+#include <drm/drm_syncobj.h>
#include <drm/virtgpu_drm.h>
#include "virtgpu_drv.h"
+struct virtio_gpu_submit_post_dep {
+ struct drm_syncobj *syncobj;
+ struct dma_fence_chain *chain;
+ u64 point;
+};
+
struct virtio_gpu_submit {
+ struct virtio_gpu_submit_post_dep *post_deps;
+ unsigned int num_out_syncobjs;
+
+ struct drm_syncobj **in_syncobjs;
+ unsigned int num_in_syncobjs;
+
struct virtio_gpu_object_array *buflist;
struct drm_virtgpu_execbuffer *exbuf;
struct virtio_gpu_fence *out_fence;
@@ -59,6 +72,203 @@ static int virtio_gpu_dma_fence_wait(struct virtio_gpu_submit *submit,
return 0;
}
+static void virtio_gpu_free_syncobjs(struct drm_syncobj **syncobjs,
+ u32 nr_syncobjs)
+{
+ u32 i = nr_syncobjs;
+
+ while (i--) {
+ if (syncobjs[i])
+ drm_syncobj_put(syncobjs[i]);
+ }
+
+ kvfree(syncobjs);
+}
+
+static int
+virtio_gpu_parse_deps(struct virtio_gpu_submit *submit)
+{
+ struct drm_virtgpu_execbuffer *exbuf = submit->exbuf;
+ struct drm_virtgpu_execbuffer_syncobj syncobj_desc;
+ size_t syncobj_stride = exbuf->syncobj_stride;
+ u32 num_in_syncobjs = exbuf->num_in_syncobjs;
+ struct drm_syncobj **syncobjs;
+ int ret = 0, i;
+
+ if (!num_in_syncobjs)
+ return 0;
+
+ /*
+ * kvalloc at first tries to allocate memory using kmalloc and
+ * falls back to vmalloc only on failure. It also uses __GFP_NOWARN
+ * internally for allocations larger than a page size, preventing
+ * storm of KMSG warnings.
+ */
+ syncobjs = kvcalloc(num_in_syncobjs, sizeof(*syncobjs), GFP_KERNEL);
+ if (!syncobjs)
+ return -ENOMEM;
+
+ for (i = 0; i < num_in_syncobjs; i++) {
+ u64 address = exbuf->in_syncobjs + i * syncobj_stride;
+ struct dma_fence *fence;
+
+ memset(&syncobj_desc, 0, sizeof(syncobj_desc));
+
+ if (copy_from_user(&syncobj_desc,
+ u64_to_user_ptr(address),
+ min(syncobj_stride, sizeof(syncobj_desc)))) {
+ ret = -EFAULT;
+ break;
+ }
+
+ if (syncobj_desc.flags & ~VIRTGPU_EXECBUF_SYNCOBJ_FLAGS) {
+ ret = -EINVAL;
+ break;
+ }
+
+ ret = drm_syncobj_find_fence(submit->file, syncobj_desc.handle,
+ syncobj_desc.point, 0, &fence);
+ if (ret)
+ break;
+
+ ret = virtio_gpu_dma_fence_wait(submit, fence);
+
+ dma_fence_put(fence);
+ if (ret)
+ break;
+
+ if (syncobj_desc.flags & VIRTGPU_EXECBUF_SYNCOBJ_RESET) {
+ syncobjs[i] = drm_syncobj_find(submit->file,
+ syncobj_desc.handle);
+ if (!syncobjs[i]) {
+ ret = -EINVAL;
+ break;
+ }
+ }
+ }
+
+ if (ret) {
+ virtio_gpu_free_syncobjs(syncobjs, i);
+ return ret;
+ }
+
+ submit->num_in_syncobjs = num_in_syncobjs;
+ submit->in_syncobjs = syncobjs;
+
+ return ret;
+}
+
+static void virtio_gpu_reset_syncobjs(struct drm_syncobj **syncobjs,
+ u32 nr_syncobjs)
+{
+ u32 i;
+
+ for (i = 0; i < nr_syncobjs; i++) {
+ if (syncobjs[i])
+ drm_syncobj_replace_fence(syncobjs[i], NULL);
+ }
+}
+
+static void
+virtio_gpu_free_post_deps(struct virtio_gpu_submit_post_dep *post_deps,
+ u32 nr_syncobjs)
+{
+ u32 i = nr_syncobjs;
+
+ while (i--) {
+ kfree(post_deps[i].chain);
+ drm_syncobj_put(post_deps[i].syncobj);
+ }
+
+ kvfree(post_deps);
+}
+
+static int virtio_gpu_parse_post_deps(struct virtio_gpu_submit *submit)
+{
+ struct drm_virtgpu_execbuffer *exbuf = submit->exbuf;
+ struct drm_virtgpu_execbuffer_syncobj syncobj_desc;
+ struct virtio_gpu_submit_post_dep *post_deps;
+ u32 num_out_syncobjs = exbuf->num_out_syncobjs;
+ size_t syncobj_stride = exbuf->syncobj_stride;
+ int ret = 0, i;
+
+ if (!num_out_syncobjs)
+ return 0;
+
+ post_deps = kvcalloc(num_out_syncobjs, sizeof(*post_deps), GFP_KERNEL);
+ if (!post_deps)
+ return -ENOMEM;
+
+ for (i = 0; i < num_out_syncobjs; i++) {
+ u64 address = exbuf->out_syncobjs + i * syncobj_stride;
+
+ memset(&syncobj_desc, 0, sizeof(syncobj_desc));
+
+ if (copy_from_user(&syncobj_desc,
+ u64_to_user_ptr(address),
+ min(syncobj_stride, sizeof(syncobj_desc)))) {
+ ret = -EFAULT;
+ break;
+ }
+
+ post_deps[i].point = syncobj_desc.point;
+
+ if (syncobj_desc.flags) {
+ ret = -EINVAL;
+ break;
+ }
+
+ if (syncobj_desc.point) {
+ post_deps[i].chain = dma_fence_chain_alloc();
+ if (!post_deps[i].chain) {
+ ret = -ENOMEM;
+ break;
+ }
+ }
+
+ post_deps[i].syncobj = drm_syncobj_find(submit->file,
+ syncobj_desc.handle);
+ if (!post_deps[i].syncobj) {
+ kfree(post_deps[i].chain);
+ ret = -EINVAL;
+ break;
+ }
+ }
+
+ if (ret) {
+ virtio_gpu_free_post_deps(post_deps, i);
+ return ret;
+ }
+
+ submit->num_out_syncobjs = num_out_syncobjs;
+ submit->post_deps = post_deps;
+
+ return 0;
+}
+
+static void
+virtio_gpu_process_post_deps(struct virtio_gpu_submit *submit)
+{
+ struct virtio_gpu_submit_post_dep *post_deps = submit->post_deps;
+
+ if (post_deps) {
+ struct dma_fence *fence = &submit->out_fence->f;
+ u32 i;
+
+ for (i = 0; i < submit->num_out_syncobjs; i++) {
+ if (post_deps[i].chain) {
+ drm_syncobj_add_point(post_deps[i].syncobj,
+ post_deps[i].chain,
+ fence, post_deps[i].point);
+ post_deps[i].chain = NULL;
+ } else {
+ drm_syncobj_replace_fence(post_deps[i].syncobj,
+ fence);
+ }
+ }
+ }
+}
+
static int virtio_gpu_fence_event_create(struct drm_device *dev,
struct drm_file *file,
struct virtio_gpu_fence *fence,
@@ -118,6 +328,10 @@ static int virtio_gpu_init_submit_buflist(struct virtio_gpu_submit *submit)
static void virtio_gpu_cleanup_submit(struct virtio_gpu_submit *submit)
{
+ virtio_gpu_reset_syncobjs(submit->in_syncobjs, submit->num_in_syncobjs);
+ virtio_gpu_free_syncobjs(submit->in_syncobjs, submit->num_in_syncobjs);
+ virtio_gpu_free_post_deps(submit->post_deps, submit->num_out_syncobjs);
+
if (!IS_ERR(submit->buf))
kvfree(submit->buf);
@@ -172,6 +386,7 @@ static int virtio_gpu_init_submit(struct virtio_gpu_submit *submit,
drm_fence_event = false;
if ((exbuf->flags & VIRTGPU_EXECBUF_FENCE_FD_OUT) ||
+ exbuf->num_out_syncobjs ||
exbuf->num_bo_handles ||
drm_fence_event)
out_fence = virtio_gpu_fence_alloc(vgdev, fence_ctx, ring_idx);
@@ -291,6 +506,14 @@ int virtio_gpu_execbuffer_ioctl(struct drm_device *dev, void *data,
if (ret)
goto cleanup;
+ ret = virtio_gpu_parse_post_deps(&submit);
+ if (ret)
+ goto cleanup;
+
+ ret = virtio_gpu_parse_deps(&submit);
+ if (ret)
+ goto cleanup;
+
/*
* Await in-fences in the end of the job submission path to
* optimize the path by proceeding directly to the submission
@@ -311,6 +534,7 @@ int virtio_gpu_execbuffer_ioctl(struct drm_device *dev, void *data,
* the job submission path.
*/
virtio_gpu_install_out_fence_fd(&submit);
+ virtio_gpu_process_post_deps(&submit);
virtio_gpu_complete_submit(&submit);
cleanup:
virtio_gpu_cleanup_submit(&submit);