diff options
-rw-r--r-- | drivers/gpu/drm/vmwgfx/Makefile | 3 | ||||
-rw-r--r-- | drivers/gpu/drm/vmwgfx/vmwgfx_buffer.c | 26 | ||||
-rw-r--r-- | drivers/gpu/drm/vmwgfx/vmwgfx_drv.c | 28 | ||||
-rw-r--r-- | drivers/gpu/drm/vmwgfx/vmwgfx_drv.h | 16 | ||||
-rw-r--r-- | drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c | 112 | ||||
-rw-r--r-- | drivers/gpu/drm/vmwgfx/vmwgfx_fence.c | 619 | ||||
-rw-r--r-- | drivers/gpu/drm/vmwgfx/vmwgfx_fence.h | 105 | ||||
-rw-r--r-- | drivers/gpu/drm/vmwgfx/vmwgfx_irq.c | 35 | ||||
-rw-r--r-- | drivers/gpu/drm/vmwgfx/vmwgfx_resource.c | 4 | ||||
-rw-r--r-- | include/drm/vmwgfx_drm.h | 149 |
10 files changed, 1010 insertions, 87 deletions
diff --git a/drivers/gpu/drm/vmwgfx/Makefile b/drivers/gpu/drm/vmwgfx/Makefile index f41e8b499978..7d8e9d5d498c 100644 --- a/drivers/gpu/drm/vmwgfx/Makefile +++ b/drivers/gpu/drm/vmwgfx/Makefile @@ -4,6 +4,7 @@ ccflags-y := -Iinclude/drm vmwgfx-y := vmwgfx_execbuf.o vmwgfx_gmr.o vmwgfx_kms.o vmwgfx_drv.o \ vmwgfx_fb.o vmwgfx_ioctl.o vmwgfx_resource.o vmwgfx_buffer.o \ vmwgfx_fifo.o vmwgfx_irq.o vmwgfx_ldu.o vmwgfx_ttm_glue.o \ - vmwgfx_overlay.o vmwgfx_marker.o vmwgfx_gmrid_manager.o + vmwgfx_overlay.o vmwgfx_marker.o vmwgfx_gmrid_manager.o \ + vmwgfx_fence.o obj-$(CONFIG_DRM_VMWGFX) := vmwgfx.o diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_buffer.c b/drivers/gpu/drm/vmwgfx/vmwgfx_buffer.c index 72d95617bc59..5d665ce8cbe4 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_buffer.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_buffer.c @@ -274,39 +274,39 @@ static int vmw_ttm_fault_reserve_notify(struct ttm_buffer_object *bo) static void *vmw_sync_obj_ref(void *sync_obj) { - return sync_obj; + + return (void *) + vmw_fence_obj_reference((struct vmw_fence_obj *) sync_obj); } static void vmw_sync_obj_unref(void **sync_obj) { - *sync_obj = NULL; + vmw_fence_obj_unreference((struct vmw_fence_obj **) sync_obj); } static int vmw_sync_obj_flush(void *sync_obj, void *sync_arg) { - struct vmw_private *dev_priv = (struct vmw_private *)sync_arg; - - mutex_lock(&dev_priv->hw_mutex); - vmw_write(dev_priv, SVGA_REG_SYNC, SVGA_SYNC_GENERIC); - mutex_unlock(&dev_priv->hw_mutex); + vmw_fence_obj_flush((struct vmw_fence_obj *) sync_obj); return 0; } static bool vmw_sync_obj_signaled(void *sync_obj, void *sync_arg) { - struct vmw_private *dev_priv = (struct vmw_private *)sync_arg; - uint32_t seqno = (unsigned long) sync_obj; + unsigned long flags = (unsigned long) sync_arg; + return vmw_fence_obj_signaled((struct vmw_fence_obj *) sync_obj, + (uint32_t) flags); - return vmw_seqno_passed(dev_priv, seqno); } static int vmw_sync_obj_wait(void *sync_obj, void *sync_arg, bool lazy, bool interruptible) { - struct vmw_private *dev_priv = (struct vmw_private *)sync_arg; - uint32_t seqno = (unsigned long) sync_obj; + unsigned long flags = (unsigned long) sync_arg; - return vmw_wait_seqno(dev_priv, false, seqno, false, 3*HZ); + return vmw_fence_obj_wait((struct vmw_fence_obj *) sync_obj, + (uint32_t) flags, + lazy, interruptible, + VMW_FENCE_WAIT_TIMEOUT); } struct ttm_bo_driver vmw_bo_driver = { diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c index 4f65f1e34b8f..d4829cbf326d 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c @@ -82,12 +82,18 @@ #define DRM_IOCTL_VMW_EXECBUF \ DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_EXECBUF, \ struct drm_vmw_execbuf_arg) +#define DRM_IOCTL_VMW_GET_3D_CAP \ + DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_GET_3D_CAP, \ + struct drm_vmw_get_3d_cap_arg) #define DRM_IOCTL_VMW_FENCE_WAIT \ DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_FENCE_WAIT, \ struct drm_vmw_fence_wait_arg) -#define DRM_IOCTL_VMW_GET_3D_CAP \ - DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_GET_3D_CAP, \ - struct drm_vmw_get_3d_cap_arg) +#define DRM_IOCTL_VMW_FENCE_SIGNALED \ + DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_FENCE_SIGNALED, \ + struct drm_vmw_fence_signaled_arg) +#define DRM_IOCTL_VMW_FENCE_UNREF \ + DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_FENCE_UNREF, \ + struct drm_vmw_fence_arg) /** * The core DRM version of this macro doesn't account for @@ -131,7 +137,12 @@ static struct drm_ioctl_desc vmw_ioctls[] = { DRM_AUTH | DRM_UNLOCKED), VMW_IOCTL_DEF(VMW_EXECBUF, vmw_execbuf_ioctl, DRM_AUTH | DRM_UNLOCKED), - VMW_IOCTL_DEF(VMW_FENCE_WAIT, vmw_fence_wait_ioctl, + VMW_IOCTL_DEF(VMW_FENCE_WAIT, vmw_fence_obj_wait_ioctl, + DRM_AUTH | DRM_UNLOCKED), + VMW_IOCTL_DEF(VMW_FENCE_SIGNALED, + vmw_fence_obj_signaled_ioctl, + DRM_AUTH | DRM_UNLOCKED), + VMW_IOCTL_DEF(VMW_FENCE_UNREF, vmw_fence_obj_unref_ioctl, DRM_AUTH | DRM_UNLOCKED), VMW_IOCTL_DEF(VMW_GET_3D_CAP, vmw_get_cap_3d_ioctl, DRM_AUTH | DRM_UNLOCKED), @@ -198,12 +209,14 @@ static int vmw_request_device(struct vmw_private *dev_priv) DRM_ERROR("Unable to initialize FIFO.\n"); return ret; } + vmw_fence_fifo_up(dev_priv->fman); return 0; } static void vmw_release_device(struct vmw_private *dev_priv) { + vmw_fence_fifo_down(dev_priv->fman); vmw_fifo_release(dev_priv, &dev_priv->fifo); } @@ -434,6 +447,10 @@ static int vmw_driver_load(struct drm_device *dev, unsigned long chipset) goto out_no_device; } } + + dev_priv->fman = vmw_fence_manager_init(dev_priv); + if (unlikely(dev_priv->fman == NULL)) + goto out_no_fman; ret = vmw_kms_init(dev_priv); if (unlikely(ret != 0)) goto out_no_kms; @@ -475,6 +492,8 @@ out_no_fifo: vmw_overlay_close(dev_priv); vmw_kms_close(dev_priv); out_no_kms: + vmw_fence_manager_takedown(dev_priv->fman); +out_no_fman: if (dev_priv->stealth) pci_release_region(dev->pdev, 2); else @@ -518,6 +537,7 @@ static int vmw_driver_unload(struct drm_device *dev) } vmw_kms_close(dev_priv); vmw_overlay_close(dev_priv); + vmw_fence_manager_takedown(dev_priv->fman); if (dev_priv->stealth) pci_release_region(dev->pdev, 2); else diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h index 3018871aaaff..770f0636cee8 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h @@ -38,6 +38,7 @@ #include "ttm/ttm_lock.h" #include "ttm/ttm_execbuf_util.h" #include "ttm/ttm_module.h" +#include "vmwgfx_fence.h" #define VMWGFX_DRIVER_DATE "20100927" #define VMWGFX_DRIVER_MAJOR 1 @@ -53,6 +54,11 @@ #define VMW_PL_GMR TTM_PL_PRIV0 #define VMW_PL_FLAG_GMR TTM_PL_FLAG_PRIV0 +#define VMW_RES_CONTEXT ttm_driver_type0 +#define VMW_RES_SURFACE ttm_driver_type1 +#define VMW_RES_STREAM ttm_driver_type2 +#define VMW_RES_FENCE ttm_driver_type3 + struct vmw_fpriv { struct drm_master *locked_master; struct ttm_object_file *tfile; @@ -245,6 +251,7 @@ struct vmw_private { atomic_t fifo_queue_waiters; uint32_t last_read_seqno; spinlock_t irq_lock; + struct vmw_fence_manager *fman; /* * Device state @@ -456,8 +463,6 @@ extern int vmw_irq_postinstall(struct drm_device *dev); extern void vmw_irq_uninstall(struct drm_device *dev); extern bool vmw_seqno_passed(struct vmw_private *dev_priv, uint32_t seqno); -extern int vmw_fence_wait_ioctl(struct drm_device *dev, void *data, - struct drm_file *file_priv); extern int vmw_fallback_wait(struct vmw_private *dev_priv, bool lazy, bool fifo_idle, @@ -466,7 +471,8 @@ extern int vmw_fallback_wait(struct vmw_private *dev_priv, unsigned long timeout); extern void vmw_update_seqno(struct vmw_private *dev_priv, struct vmw_fifo_state *fifo_state); - +extern void vmw_seqno_waiter_add(struct vmw_private *dev_priv); +extern void vmw_seqno_waiter_remove(struct vmw_private *dev_priv); /** * Rudimentary fence-like objects currently used only for throttling - @@ -572,4 +578,8 @@ static inline struct vmw_dma_buffer *vmw_dmabuf_reference(struct vmw_dma_buffer return NULL; } +static inline struct ttm_mem_global *vmw_mem_glob(struct vmw_private *dev_priv) +{ + return (struct ttm_mem_global *) dev_priv->mem_global_ref.object; +} #endif diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c index be41484735b1..d48ee89a5190 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c @@ -256,7 +256,7 @@ static int vmw_translate_guest_ptr(struct vmw_private *dev_priv, val_buf = &sw_context->val_bufs[cur_validate_node]; val_buf->bo = ttm_bo_reference(bo); val_buf->usage = TTM_USAGE_READWRITE; - val_buf->new_sync_obj_arg = (void *) dev_priv; + val_buf->new_sync_obj_arg = (void *) DRM_VMW_FENCE_FLAG_EXEC; list_add_tail(&val_buf->head, &sw_context->validate_nodes); ++sw_context->cur_val_buf; } @@ -321,7 +321,6 @@ static int vmw_cmd_wait_query(struct vmw_private *dev_priv, return 0; } - static int vmw_cmd_dma(struct vmw_private *dev_priv, struct vmw_sw_context *sw_context, SVGA3dCmdHeader *header) @@ -676,6 +675,50 @@ static int vmw_resize_cmd_bounce(struct vmw_sw_context *sw_context, return 0; } +/** + * vmw_execbuf_fence_commands - create and submit a command stream fence + * + * Creates a fence object and submits a command stream marker. + * If this fails for some reason, We sync the fifo and return NULL. + * It is then safe to fence buffers with a NULL pointer. + */ + +int vmw_execbuf_fence_commands(struct drm_file *file_priv, + struct vmw_private *dev_priv, + struct vmw_fence_obj **p_fence, + uint32_t *p_handle) +{ + uint32_t sequence; + int ret; + bool synced = false; + + + ret = vmw_fifo_send_fence(dev_priv, &sequence); + if (unlikely(ret != 0)) { + DRM_ERROR("Fence submission error. Syncing.\n"); + synced = true; + } + + if (p_handle != NULL) + ret = vmw_user_fence_create(file_priv, dev_priv->fman, + sequence, + DRM_VMW_FENCE_FLAG_EXEC, + p_fence, p_handle); + else + ret = vmw_fence_create(dev_priv->fman, sequence, + DRM_VMW_FENCE_FLAG_EXEC, + p_fence); + + if (unlikely(ret != 0 && !synced)) { + (void) vmw_fallback_wait(dev_priv, false, false, + sequence, false, + VMW_FENCE_WAIT_TIMEOUT); + *p_fence = NULL; + } + + return 0; +} + int vmw_execbuf_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv) { @@ -686,9 +729,10 @@ int vmw_execbuf_ioctl(struct drm_device *dev, void *data, int ret; void *user_cmd; void *cmd; - uint32_t seqno; struct vmw_sw_context *sw_context = &dev_priv->ctx; struct vmw_master *vmaster = vmw_master(file_priv->master); + struct vmw_fence_obj *fence; + uint32_t handle; ret = ttm_read_lock(&vmaster->lock, true); if (unlikely(ret != 0)) @@ -755,34 +799,60 @@ int vmw_execbuf_ioctl(struct drm_device *dev, void *data, memcpy(cmd, sw_context->cmd_bounce, arg->command_size); vmw_fifo_commit(dev_priv, arg->command_size); - ret = vmw_fifo_send_fence(dev_priv, &seqno); - - ttm_eu_fence_buffer_objects(&sw_context->validate_nodes, - (void *)(unsigned long) seqno); - vmw_clear_validations(sw_context); - mutex_unlock(&dev_priv->cmdbuf_mutex); - + user_fence_rep = (struct drm_vmw_fence_rep __user *) + (unsigned long)arg->fence_rep; + ret = vmw_execbuf_fence_commands(file_priv, dev_priv, + &fence, + (user_fence_rep) ? &handle : NULL); /* * This error is harmless, because if fence submission fails, - * vmw_fifo_send_fence will sync. + * vmw_fifo_send_fence will sync. The error will be propagated to + * user-space in @fence_rep */ if (ret != 0) DRM_ERROR("Fence submission error. Syncing.\n"); - fence_rep.error = ret; - fence_rep.fence_seq = (uint64_t) seqno; - fence_rep.pad64 = 0; + ttm_eu_fence_buffer_objects(&sw_context->validate_nodes, + (void *) fence); - user_fence_rep = (struct drm_vmw_fence_rep __user *) - (unsigned long)arg->fence_rep; + vmw_clear_validations(sw_context); + mutex_unlock(&dev_priv->cmdbuf_mutex); - /* - * copy_to_user errors will be detected by user space not - * seeing fence_rep::error filled in. - */ + if (user_fence_rep) { + fence_rep.error = ret; + fence_rep.handle = handle; + fence_rep.seqno = fence->seqno; + vmw_update_seqno(dev_priv, &dev_priv->fifo); + fence_rep.passed_seqno = dev_priv->last_read_seqno; + + /* + * copy_to_user errors will be detected by user space not + * seeing fence_rep::error filled in. Typically + * user-space would have pre-set that member to -EFAULT. + */ + ret = copy_to_user(user_fence_rep, &fence_rep, + sizeof(fence_rep)); + + /* + * User-space lost the fence object. We need to sync + * and unreference the handle. + */ + if (unlikely(ret != 0) && (fence_rep.error == 0)) { + BUG_ON(fence == NULL); + + ttm_ref_object_base_unref(vmw_fpriv(file_priv)->tfile, + handle, TTM_REF_USAGE); + DRM_ERROR("Fence copy error. Syncing.\n"); + (void) vmw_fence_obj_wait(fence, + fence->signal_mask, + false, false, + VMW_FENCE_WAIT_TIMEOUT); + } + } - ret = copy_to_user(user_fence_rep, &fence_rep, sizeof(fence_rep)); + if (likely(fence != NULL)) + vmw_fence_obj_unreference(&fence); vmw_kms_cursor_post_execbuf(dev_priv); ttm_read_unlock(&vmaster->lock); diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c b/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c new file mode 100644 index 000000000000..5065a140fdf8 --- /dev/null +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c @@ -0,0 +1,619 @@ +/************************************************************************** + * + * Copyright © 2011 VMware, Inc., Palo Alto, CA., USA + * All Rights Reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the + * "Software"), to deal in the Software without restriction, including + * without limitation the rights to use, copy, modify, merge, publish, + * distribute, sub license, and/or sell copies of the Software, and to + * permit persons to whom the Software is furnished to do so, subject to + * the following conditions: + * + * The above copyright notice and this permission notice (including the + * next paragraph) shall be included in all copies or substantial portions + * of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, + * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR + * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE + * USE OR OTHER DEALINGS IN THE SOFTWARE. + * + **************************************************************************/ + +#include "drmP.h" +#include "vmwgfx_drv.h" + +#define VMW_FENCE_WRAP (1 << 31) + +struct vmw_fence_manager { + int num_fence_objects; + struct vmw_private *dev_priv; + spinlock_t lock; + u32 next_seqno; + struct list_head fence_list; + struct work_struct work; + u32 user_fence_size; + u32 fence_size; + bool fifo_down; + struct list_head cleanup_list; +}; + +struct vmw_user_fence { + struct ttm_base_object base; + struct vmw_fence_obj fence; +}; + +/** + * vmw_fence_destroy_locked + * + */ + +static void vmw_fence_obj_destroy_locked(struct kref *kref) +{ + struct vmw_fence_obj *fence = + container_of(kref, struct vmw_fence_obj, kref); + + struct vmw_fence_manager *fman = fence->fman; + unsigned int num_fences; + + list_del_init(&fence->head); + num_fences = --fman->num_fence_objects; + spin_unlock_irq(&fman->lock); + if (fence->destroy) + fence->destroy(fence); + else + kfree(fence); + + spin_lock_irq(&fman->lock); +} + + +/** + * Execute signal actions on fences recently signaled. + * This is done from a workqueue so we don't have to execute + * signal actions from atomic context. + */ + +static void vmw_fence_work_func(struct work_struct *work) +{ + struct vmw_fence_manager *fman = + container_of(work, struct vmw_fence_manager, work); + struct list_head list; + struct vmw_fence_action *action, *next_action; + + do { + INIT_LIST_HEAD(&list); + spin_lock_irq(&fman->lock); + list_splice_init(&fman->cleanup_list, &list); + spin_unlock_irq(&fman->lock); + + if (list_empty(&list)) + return; + + /* + * At this point, only we should be able to manipulate the + * list heads of the actions we have on the private list. + */ + + list_for_each_entry_safe(action, next_action, &list, head) { + list_del_init(&action->head); + action->cleanup(action); + } + } while (1); +} + +struct vmw_fence_manager *vmw_fence_manager_init(struct vmw_private *dev_priv) +{ + struct vmw_fence_manager *fman = kzalloc(sizeof(*fman), GFP_KERNEL); + + if (unlikely(fman == NULL)) + return NULL; + + fman->dev_priv = dev_priv; + spin_lock_init(&fman->lock); + INIT_LIST_HEAD(&fman->fence_list); + INIT_LIST_HEAD(&fman->cleanup_list); + INIT_WORK(&fman->work, &vmw_fence_work_func); + fman->fifo_down = true; + fman->user_fence_size = ttm_round_pot(sizeof(struct vmw_user_fence)); + fman->fence_size = ttm_round_pot(sizeof(struct vmw_fence_obj)); + + return fman; +} + +void vmw_fence_manager_takedown(struct vmw_fence_manager *fman) +{ + unsigned long irq_flags; + bool lists_empty; + + (void) cancel_work_sync(&fman->work); + + spin_lock_irqsave(&fman->lock, irq_flags); + lists_empty = list_empty(&fman->fence_list) && + list_empty(&fman->cleanup_list); + spin_unlock_irqrestore(&fman->lock, irq_flags); + + BUG_ON(!lists_empty); + kfree(fman); +} + +static int vmw_fence_obj_init(struct vmw_fence_manager *fman, + struct vmw_fence_obj *fence, + u32 seqno, + uint32_t mask, + void (*destroy) (struct vmw_fence_obj *fence)) +{ + unsigned long irq_flags; + unsigned int num_fences; + int ret = 0; + + fence->seqno = seqno; + INIT_LIST_HEAD(&fence->seq_passed_actions); + fence->fman = fman; + fence->signaled = 0; + fence->signal_mask = mask; + kref_init(&fence->kref); + fence->destroy = destroy; + init_waitqueue_head(&fence->queue); + + spin_lock_irqsave(&fman->lock, irq_flags); + if (unlikely(fman->fifo_down)) { + ret = -EBUSY; + goto out_unlock; + } + list_add_tail(&fence->head, &fman->fence_list); + num_fences = ++fman->num_fence_objects; + +out_unlock: + spin_unlock_irqrestore(&fman->lock, irq_flags); + return ret; + +} + +struct vmw_fence_obj *vmw_fence_obj_reference(struct vmw_fence_obj *fence) +{ + kref_get(&fence->kref); + return fence; +} + +/** + * vmw_fence_obj_unreference + * + * Note that this function may not be entered with disabled irqs since + * it may re-enable them in the destroy function. + * + */ +void vmw_fence_obj_unreference(struct vmw_fence_obj **fence_p) +{ + struct vmw_fence_obj *fence = *fence_p; + struct vmw_fence_manager *fman = fence->fman; + + *fence_p = NULL; + spin_lock_irq(&fman->lock); + BUG_ON(atomic_read(&fence->kref.refcount) == 0); + kref_put(&fence->kref, vmw_fence_obj_destroy_locked); + spin_unlock_irq(&fman->lock); +} + +void vmw_fences_perform_actions(struct vmw_fence_manager *fman, + struct list_head *list) +{ + struct vmw_fence_action *action, *next_action; + + list_for_each_entry_safe(action, next_action, list, head) { + list_del_init(&action->head); + if (action->seq_passed != NULL) + action->seq_passed(action); + + /* + * Add the cleanup action to the cleanup list so that + * it will be performed by a worker task. + */ + + if (action->cleanup != NULL) + list_add_tail(&action->head, &fman->cleanup_list); + } +} + +void vmw_fences_update(struct vmw_fence_manager *fman, u32 seqno) +{ + unsigned long flags; + struct vmw_fence_obj *fence, *next_fence; + struct list_head action_list; + + spin_lock_irqsave(&fman->lock, flags); + list_for_each_entry_safe(fence, next_fence, &fman->fence_list, head) { + if (seqno - fence->seqno < VMW_FENCE_WRAP) { + list_del_init(&fence->head); + fence->signaled |= DRM_VMW_FENCE_FLAG_EXEC; + INIT_LIST_HEAD(&action_list); + list_splice_init(&fence->seq_passed_actions, + &action_list); + vmw_fences_perform_actions(fman, &action_list); + wake_up_all(&fence->queue); + } + + } + if (!list_empty(&fman->cleanup_list)) + (void) schedule_work(&fman->work); + spin_unlock_irqrestore(&fman->lock, flags); +} + + +bool vmw_fence_obj_signaled(struct vmw_fence_obj *fence, + uint32_t flags) +{ + struct vmw_fence_manager *fman = fence->fman; + unsigned long irq_flags; + uint32_t signaled; + + spin_lock_irqsave(&fman->lock, irq_flags); + signaled = fence->signaled; + spin_unlock_irqrestore(&fman->lock, irq_flags); + + flags &= fence->signal_mask; + if ((signaled & flags) == flags) + return 1; + + if ((signaled & DRM_VMW_FENCE_FLAG_EXEC) == 0) { + struct vmw_private *dev_priv = fman->dev_priv; + __le32 __iomem *fifo_mem = dev_priv->mmio_virt; + u32 seqno; + + seqno = ioread32(fifo_mem + SVGA_FIFO_FENCE); + vmw_fences_update(fman, seqno); + } + + spin_lock_irqsave(&fman->lock, irq_flags); + signaled = fence->signaled; + spin_unlock_irqrestore(&fman->lock, irq_flags); + + return ((signaled & flags) == flags); +} + +int vmw_fence_obj_wait(struct vmw_fence_obj *fence, + uint32_t flags, bool lazy, + bool interruptible, unsigned long timeout) +{ + struct vmw_private *dev_priv = fence->fman->dev_priv; + long ret; + + if (likely(vmw_fence_obj_signaled(fence, flags))) + return 0; + + vmw_fifo_ping_host(dev_priv, SVGA_SYNC_GENERIC); + vmw_seqno_waiter_add(dev_priv); + + if (interruptible) + ret = wait_event_interruptible_timeout + (fence->queue, + vmw_fence_obj_signaled(fence, flags), + timeout); + else + ret = wait_event_timeout + (fence->queue, + vmw_fence_obj_signaled(fence, flags), + timeout); + + vmw_seqno_waiter_remove(dev_priv); + + if (unlikely(ret == 0)) + ret = -EBUSY; + else if (likely(ret > 0)) + ret = 0; + + return ret; +} + +void vmw_fence_obj_flush(struct vmw_fence_obj *fence) +{ + struct vmw_private *dev_priv = fence->fman->dev_priv; + + vmw_fifo_ping_host(dev_priv, SVGA_SYNC_GENERIC); +} + +static void vmw_fence_destroy(struct vmw_fence_obj *fence) +{ + struct vmw_fence_manager *fman = fence->fman; + + kfree(fence); + /* + * Free kernel space accounting. + */ + ttm_mem_global_free(vmw_mem_glob(fman->dev_priv), + fman->fence_size); +} + +int vmw_fence_create(struct vmw_fence_manager *fman, + uint32_t seqno, + uint32_t mask, + struct vmw_fence_obj **p_fence) +{ + struct ttm_mem_global *mem_glob = vmw_mem_glob(fman->dev_priv); + struct vmw_fence_obj *fence; + int ret; + + ret = ttm_mem_global_alloc(mem_glob, fman->fence_size, + false, false); + if (unlikely(ret != 0)) + return ret; + + fence = kzalloc(sizeof(*fence), GFP_KERNEL); + if (unlikely(fence == NULL)) { + ret = -ENOMEM; + goto out_no_object; + } + + ret = vmw_fence_obj_init(fman, fence, seqno, mask, + vmw_fence_destroy); + if (unlikely(ret != 0)) + goto out_err_init; + + *p_fence = fence; + return 0; + +out_err_init: + kfree(fence); +out_no_object: + ttm_mem_global_free(mem_glob, fman->fence_size); + return ret; +} + + +static void vmw_user_fence_destroy(struct vmw_fence_obj *fence) +{ + struct vmw_user_fence *ufence = + container_of(fence, struct vmw_user_fence, fence); + struct vmw_fence_manager *fman = fence->fman; + + kfree(ufence); + /* + * Free kernel space accounting. + */ + ttm_mem_global_free(vmw_mem_glob(fman->dev_priv), + fman->user_fence_size); +} + +static void vmw_user_fence_base_release(struct ttm_base_object **p_base) +{ + struct ttm_base_object *base = *p_base; + struct vmw_user_fence *ufence = + container_of(base, struct vmw_user_fence, base); + struct vmw_fence_obj *fence = &ufence->fence; + + *p_base = NULL; + vmw_fence_obj_unreference(&fence); +} + +int vmw_user_fence_create(struct drm_file *file_priv, + struct vmw_fence_manager *fman, + uint32_t seqno, + uint32_t mask, + struct vmw_fence_obj **p_fence, + uint32_t *p_handle) +{ + struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile; + struct vmw_user_fence *ufence; + struct vmw_fence_obj *tmp; + struct ttm_mem_global *mem_glob = vmw_mem_glob(fman->dev_priv); + int ret; + + /* + * Kernel memory space accounting, since this object may + * be created by a user-space request. + */ + + ret = ttm_mem_global_alloc(mem_glob, fman->user_fence_size, + false, false); + if (unlikely(ret != 0)) + return ret; + + ufence = kzalloc(sizeof(*ufence), GFP_KERNEL); + if (unlikely(ufence == NULL)) { + ret = -ENOMEM; + goto out_no_object; + } + + ret = vmw_fence_obj_init(fman, &ufence->fence, seqno, + mask, vmw_user_fence_destroy); + if (unlikely(ret != 0)) { + kfree(ufence); + goto out_no_object; + } + + /* + * The base object holds a reference which is freed in + * vmw_user_fence_base_release. + */ + tmp = vmw_fence_obj_reference(&ufence->fence); + ret = ttm_base_object_init(tfile, &ufence->base, false, + VMW_RES_FENCE, + &vmw_user_fence_base_release, NULL); + + + if (unlikely(ret != 0)) { + /* + * Free the base object's reference + */ + vmw_fence_obj_unreference(&tmp); + goto out_err; + } + + *p_fence = &ufence->fence; + *p_handle = ufence->base.hash.key; + + return 0; +out_err: + tmp = &ufence->fence; + vmw_fence_obj_unreference(&tmp); +out_no_object: + ttm_mem_global_free(mem_glob, fman->user_fence_size); + return ret; +} + + +/** + * vmw_fence_fifo_down - signal all unsignaled fence objects. + */ + +void vmw_fence_fifo_down(struct vmw_fence_manager *fman) +{ + unsigned long irq_flags; + struct list_head action_list; + int ret; + + /* + * The list may be altered while we traverse it, so always + * restart when we've released the fman->lock. + */ + + spin_lock_irqsave(&fman->lock, irq_flags); + fman->fifo_down = true; + while (!list_empty(&fman->fence_list)) { + struct vmw_fence_obj *fence = + list_entry(fman->fence_list.prev, struct vmw_fence_obj, + head); + kref_get(&fence->kref); + spin_unlock_irq(&fman->lock); + + ret = vmw_fence_obj_wait(fence, fence->signal_mask, + false, false, + VMW_FENCE_WAIT_TIMEOUT); + + if (unlikely(ret != 0)) { + list_del_init(&fence->head); + fence->signaled |= DRM_VMW_FENCE_FLAG_EXEC; + INIT_LIST_HEAD(&action_list); + list_splice_init(&fence->seq_passed_actions, + &action_list); + vmw_fences_perform_actions(fman, &action_list); + wake_up_all(&fence->queue); + } + + spin_lock_irq(&fman->lock); + + BUG_ON(!list_empty(&fence->head)); + kref_put(&fence->kref, vmw_fence_obj_destroy_locked); + } + spin_unlock_irqrestore(&fman->lock, irq_flags); +} + +void vmw_fence_fifo_up(struct vmw_fence_manager *fman) +{ + unsigned long irq_flags; + + spin_lock_irqsave(&fman->lock, irq_flags); + fman->fifo_down = false; + spin_unlock_irqrestore(&fman->lock, irq_flags); +} + + +int vmw_fence_obj_wait_ioctl(struct drm_device *dev, void *data, + struct drm_file *file_priv) +{ + struct drm_vmw_fence_wait_arg *arg = + (struct drm_vmw_fence_wait_arg *)data; + unsigned long timeout; + struct ttm_base_object *base; + struct vmw_fence_obj *fence; + struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile; + int ret; + uint64_t wait_timeout = ((uint64_t)arg->timeout_us * HZ); + + /* + * 64-bit division not present on 32-bit systems, so do an + * approximation. (Divide by 1000000). + */ + + wait_timeout = (wait_timeout >> 20) + (wait_timeout >> 24) - + (wait_timeout >> 26); + + if (!arg->cookie_valid) { + arg->cookie_valid = 1; + arg->kernel_cookie = jiffies + wait_timeout; + } + + base = ttm_base_object_lookup(tfile, arg->handle); + if (unlikely(base == NULL)) { + printk(KERN_ERR "Wait invalid fence object handle " + "0x%08lx.\n", + (unsigned long)arg->handle); + return -EINVAL; + } + + fence = &(container_of(base, struct vmw_user_fence, base)->fence); + + timeout = jiffies; + if (time_after_eq(timeout, (unsigned long)arg->kernel_cookie)) { + ret = ((vmw_fence_obj_signaled(fence, arg->flags)) ? + 0 : -EBUSY); + goto out; + } + + timeout = (unsigned long)arg->kernel_cookie - timeout; + + ret = vmw_fence_obj_wait(fence, arg->flags, arg->lazy, true, timeout); + +out: + ttm_base_object_unref(&base); + + /* + * Optionally unref the fence object. + */ + + if (ret == 0 && (arg->wait_options & DRM_VMW_WAIT_OPTION_UNREF)) + return ttm_ref_object_base_unref(tfile, arg->handle, + TTM_REF_USAGE); + return ret; +} + +int vmw_fence_obj_signaled_ioctl(struct drm_device *dev, void *data, + struct drm_file *file_priv) +{ + struct drm_vmw_fence_signaled_arg *arg = + (struct drm_vmw_fence_signaled_arg *) data; + struct ttm_base_object *base; + struct vmw_fence_obj *fence; + struct vmw_fence_manager *fman; + struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile; + struct vmw_private *dev_priv = vmw_priv(dev); + + base = ttm_base_object_lookup(tfile, arg->handle); + if (unlikely(base == NULL)) { + printk(KERN_ERR "Fence signaled invalid fence object handle " + "0x%08lx.\n", + (unsigned long)arg->handle); + return -EINVAL; + } + + fence = &(container_of(base, struct vmw_user_fence, base)->fence); + fman = fence->fman; + + arg->signaled = vmw_fence_obj_signaled(fence, arg->flags); + spin_lock_irq(&fman->lock); + + arg->signaled_flags = fence->signaled; + arg->passed_seqno = dev_priv->last_read_seqno; + spin_unlock_irq(&fman->lock); + + ttm_base_object_unref(&base); + + return 0; +} + + +int vmw_fence_obj_unref_ioctl(struct drm_device *dev, void *data, + struct drm_file *file_priv) +{ + struct drm_vmw_fence_arg *arg = + (struct drm_vmw_fence_arg *) data; + + return ttm_ref_object_base_unref(vmw_fpriv(file_priv)->tfile, + arg->handle, + TTM_REF_USAGE); +} diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_fence.h b/drivers/gpu/drm/vmwgfx/vmwgfx_fence.h new file mode 100644 index 000000000000..93074064aaf3 --- /dev/null +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_fence.h @@ -0,0 +1,105 @@ +/************************************************************************** + * + * Copyright © 2011 VMware, Inc., Palo Alto, CA., USA + * All Rights Reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the + * "Software"), to deal in the Software without restriction, including + * without limitation the rights to use, copy, modify, merge, publish, + * distribute, sub license, and/or sell copies of the Software, and to + * permit persons to whom the Software is furnished to do so, subject to + * the following conditions: + * + * The above copyright notice and this permission notice (including the + * next paragraph) shall be included in all copies or substantial portions + * of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, + * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR + * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE + * USE OR OTHER DEALINGS IN THE SOFTWARE. + * + **************************************************************************/ + +#ifndef _VMWGFX_FENCE_H_ + +#define VMW_FENCE_WAIT_TIMEOUT (5*HZ) + +struct vmw_private; + +struct vmw_fence_manager; + +/** + * + * + */ +struct vmw_fence_action { + struct list_head head; + void (*seq_passed) (struct vmw_fence_action *action); + void (*cleanup) (struct vmw_fence_action *action); +}; + +struct vmw_fence_obj { + struct kref kref; + u32 seqno; + + struct vmw_fence_manager *fman; + struct list_head head; + uint32_t signaled; + uint32_t signal_mask; + struct list_head seq_passed_actions; + void (*destroy)(struct vmw_fence_obj *fence); + wait_queue_head_t queue; +}; + +extern struct vmw_fence_manager * +vmw_fence_manager_init(struct vmw_private *dev_priv); + +extern void vmw_fence_manager_takedown(struct vmw_fence_manager *fman); + +extern void vmw_fence_obj_unreference(struct vmw_fence_obj **fence_p); + +extern struct vmw_fence_obj * +vmw_fence_obj_reference(struct vmw_fence_obj *fence); + +extern void vmw_fences_update(struct vmw_fence_manager *fman, + u32 sequence); + +extern bool vmw_fence_obj_signaled(struct vmw_fence_obj *fence, + uint32_t flags); + +extern int vmw_fence_obj_wait(struct vmw_fence_obj *fence, uint32_t flags, + bool lazy, + bool interruptible, unsigned long timeout); + +extern void vmw_fence_obj_flush(struct vmw_fence_obj *fence); + +extern int vmw_fence_create(struct vmw_fence_manager *fman, + uint32_t seqno, + uint32_t mask, + struct vmw_fence_obj **p_fence); + +extern int vmw_user_fence_create(struct drm_file *file_priv, + struct vmw_fence_manager *fman, + uint32_t sequence, + uint32_t mask, + struct vmw_fence_obj **p_fence, + uint32_t *p_handle); + +extern void vmw_fence_fifo_up(struct vmw_fence_manager *fman); + +extern void vmw_fence_fifo_down(struct vmw_fence_manager *fman); + +extern int vmw_fence_obj_wait_ioctl(struct drm_device *dev, void *data, + struct drm_file *file_priv); + +extern int vmw_fence_obj_signaled_ioctl(struct drm_device *dev, void *data, + struct drm_file *file_priv); + +extern int vmw_fence_obj_unref_ioctl(struct drm_device *dev, void *data, + struct drm_file *file_priv); +#endif /* _VMWGFX_FENCE_H_ */ diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c b/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c index 13dde06b60be..a005292a8908 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c @@ -40,8 +40,13 @@ irqreturn_t vmw_irq_handler(DRM_IRQ_ARGS) status = inl(dev_priv->io_start + VMWGFX_IRQSTATUS_PORT); spin_unlock(&dev_priv->irq_lock); - if (status & SVGA_IRQFLAG_ANY_FENCE) + if (status & SVGA_IRQFLAG_ANY_FENCE) { + __le32 __iomem *fifo_mem = dev_priv->mmio_virt; + uint32_t seqno = ioread32(fifo_mem + SVGA_FIFO_FENCE); + + vmw_fences_update(dev_priv->fman, seqno); wake_up_all(&dev_priv->fence_queue); + } if (status & SVGA_IRQFLAG_FIFO_PROGRESS) wake_up_all(&dev_priv->fifo_queue); @@ -68,12 +73,12 @@ void vmw_update_seqno(struct vmw_private *dev_priv, struct vmw_fifo_state *fifo_state) { __le32 __iomem *fifo_mem = dev_priv->mmio_virt; - uint32_t seqno = ioread32(fifo_mem + SVGA_FIFO_FENCE); if (dev_priv->last_read_seqno != seqno) { dev_priv->last_read_seqno = seqno; vmw_marker_pull(&fifo_state->marker_queue, seqno); + vmw_fences_update(dev_priv->fman, seqno); } } @@ -175,7 +180,7 @@ int vmw_fallback_wait(struct vmw_private *dev_priv, return ret; } -static void vmw_seqno_waiter_add(struct vmw_private *dev_priv) +void vmw_seqno_waiter_add(struct vmw_private *dev_priv) { mutex_lock(&dev_priv->hw_mutex); if (dev_priv->fence_queue_waiters++ == 0) { @@ -192,7 +197,7 @@ static void vmw_seqno_waiter_add(struct vmw_private *dev_priv) mutex_unlock(&dev_priv->hw_mutex); } -static void vmw_seqno_waiter_remove(struct vmw_private *dev_priv) +void vmw_seqno_waiter_remove(struct vmw_private *dev_priv) { mutex_lock(&dev_priv->hw_mutex); if (--dev_priv->fence_queue_waiters == 0) { @@ -286,25 +291,3 @@ void vmw_irq_uninstall(struct drm_device *dev) status = inl(dev_priv->io_start + VMWGFX_IRQSTATUS_PORT); outl(status, dev_priv->io_start + VMWGFX_IRQSTATUS_PORT); } - -#define VMW_FENCE_WAIT_TIMEOUT 3*HZ; - -int vmw_fence_wait_ioctl(struct drm_device *dev, void *data, - struct drm_file *file_priv) -{ - struct drm_vmw_fence_wait_arg *arg = - (struct drm_vmw_fence_wait_arg *)data; - unsigned long timeout; - - if (!arg->cookie_valid) { - arg->cookie_valid = 1; - arg->kernel_cookie = jiffies + VMW_FENCE_WAIT_TIMEOUT; - } - - timeout = jiffies; - if (time_after_eq(timeout, (unsigned long)arg->kernel_cookie)) - return -EBUSY; - - timeout = (unsigned long)arg->kernel_cookie - timeout; - return vmw_wait_seqno(vmw_priv(dev), true, arg->seqno, true, timeout); -} diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c b/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c index 4b53803d0fa9..c1b6ffd4ce7b 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c @@ -31,10 +31,6 @@ #include "ttm/ttm_placement.h" #include "drmP.h" -#define VMW_RES_CONTEXT ttm_driver_type0 -#define VMW_RES_SURFACE ttm_driver_type1 -#define VMW_RES_STREAM ttm_driver_type2 - struct vmw_user_context { struct ttm_base_object base; struct vmw_resource res; diff --git a/include/drm/vmwgfx_drm.h b/include/drm/vmwgfx_drm.h index c2b3909ac50a..763a7a3885a1 100644 --- a/include/drm/vmwgfx_drm.h +++ b/include/drm/vmwgfx_drm.h @@ -48,8 +48,12 @@ #define DRM_VMW_UNREF_SURFACE 10 #define DRM_VMW_REF_SURFACE 11 #define DRM_VMW_EXECBUF 12 -#define DRM_VMW_FENCE_WAIT 13 -#define DRM_VMW_GET_3D_CAP 14 +#define DRM_VMW_GET_3D_CAP 13 +#define DRM_VMW_FENCE_WAIT 14 +#define DRM_VMW_FENCE_SIGNALED 15 +#define DRM_VMW_FENCE_UNREF 16 +#define DRM_VMW_FENCE_EVENT 17 + /*************************************************************************/ /** @@ -318,14 +322,23 @@ struct drm_vmw_execbuf_arg { uint32_t command_size; uint32_t throttle_us; uint64_t fence_rep; - uint32_t version; - uint32_t flags; + uint32_t version; + uint32_t flags; }; /** * struct drm_vmw_fence_rep * - * @fence_seq: Fence seqno associated with a command submission. + * @handle: Fence object handle for fence associated with a command submission. + * @mask: Fence flags relevant for this fence object. + * @seqno: Fence sequence number in fifo. A fence object with a lower + * seqno will signal the EXEC flag before a fence object with a higher + * seqno. This can be used by user-space to avoid kernel calls to determine + * whether a fence has signaled the EXEC flag. Note that @seqno will + * wrap at 32-bit. + * @passed_seqno: The highest seqno number processed by the hardware + * so far. This can be used to mark user-space fence objects as signaled, and + * to determine whether a fence seqno might be stale. * @error: This member should've been set to -EFAULT on submission. * The following actions should be take on completion: * error == -EFAULT: Fence communication failed. The host is synchronized. @@ -339,9 +352,12 @@ struct drm_vmw_execbuf_arg { */ struct drm_vmw_fence_rep { - uint64_t fence_seq; - int32_t error; + uint32_t handle; + uint32_t mask; + uint32_t seqno; + uint32_t passed_seqno; uint32_t pad64; + int32_t error; }; /*************************************************************************/ @@ -430,14 +446,6 @@ struct drm_vmw_unref_dmabuf_arg { uint32_t pad64; }; - -struct drm_vmw_fence_wait_arg { - uint64_t seqno; - uint64_t kernel_cookie; - int32_t cookie_valid; - int32_t pad64; -}; - /*************************************************************************/ /** * DRM_VMW_CONTROL_STREAM - Control overlays, aka streams. @@ -559,6 +567,7 @@ struct drm_vmw_stream_arg { * Return a single stream that was claimed by this process. Also makes * sure that the stream has been stopped. */ + /*************************************************************************/ /** * DRM_VMW_GET_3D_CAP @@ -607,4 +616,114 @@ struct drm_vmw_update_layout_arg { uint64_t rects; }; + +/*************************************************************************/ +/** + * DRM_VMW_FENCE_WAIT + * + * Waits for a fence object to signal. The wait is interruptible, so that + * signals may be delivered during the interrupt. The wait may timeout, + * in which case the calls returns -EBUSY. If the wait is restarted, + * that is restarting without resetting @cookie_valid to zero, + * the timeout is computed from the first call. + * + * The flags argument to the DRM_VMW_FENCE_WAIT ioctl indicates what to wait + * on: + * DRM_VMW_FENCE_FLAG_EXEC: All commands ahead of the fence in the command + * stream + * have executed. + * DRM_VMW_FENCE_FLAG_QUERY: All query results resulting from query finish + * commands + * in the buffer given to the EXECBUF ioctl returning the fence object handle + * are available to user-space. + * + * DRM_VMW_WAIT_OPTION_UNREF: If this wait option is given, and the + * fenc wait ioctl returns 0, the fence object has been unreferenced after + * the wait. + */ + +#define DRM_VMW_FENCE_FLAG_EXEC (1 << 0) +#define DRM_VMW_FENCE_FLAG_QUERY (1 << 1) + +#define DRM_VMW_WAIT_OPTION_UNREF (1 << 0) + +/** + * struct drm_vmw_fence_wait_arg + * + * @handle: Fence object handle as returned by the DRM_VMW_EXECBUF ioctl. + * @cookie_valid: Must be reset to 0 on first call. Left alone on restart. + * @kernel_cookie: Set to 0 on first call. Left alone on restart. + * @timeout_us: Wait timeout in microseconds. 0 for indefinite timeout. + * @lazy: Set to 1 if timing is not critical. Allow more than a kernel tick + * before returning. + * @flags: Fence flags to wait on. + * @wait_options: Options that control the behaviour of the wait ioctl. + * + * Input argument to the DRM_VMW_FENCE_WAIT ioctl. + */ + +struct drm_vmw_fence_wait_arg { + uint32_t handle; + int32_t cookie_valid; + uint64_t kernel_cookie; + uint64_t timeout_us; + int32_t lazy; + int32_t flags; + int32_t wait_options; + int32_t pad64; +}; + +/*************************************************************************/ +/** + * DRM_VMW_FENCE_SIGNALED + * + * Checks if a fence object is signaled.. + */ + +/** + * struct drm_vmw_fence_signaled_arg + * + * @handle: Fence object handle as returned by the DRM_VMW_EXECBUF ioctl. + * @flags: Fence object flags input to DRM_VMW_FENCE_SIGNALED ioctl + * @signaled: Out: Flags signaled. + * @sequence: Out: Highest sequence passed so far. Can be used to signal the + * EXEC flag of user-space fence objects. + * + * Input/Output argument to the DRM_VMW_FENCE_SIGNALED and DRM_VMW_FENCE_UNREF + * ioctls. + */ + +struct drm_vmw_fence_signaled_arg { + uint32_t handle; + uint32_t flags; + int32_t signaled; + uint32_t passed_seqno; + uint32_t signaled_flags; + uint32_t pad64; +}; + +/*************************************************************************/ +/** + * DRM_VMW_FENCE_UNREF + * + * Unreferences a fence object, and causes it to be destroyed if there are no + * other references to it. + * + */ + +/** + * struct drm_vmw_fence_arg + * + * @handle: Fence object handle as returned by the DRM_VMW_EXECBUF ioctl. + * + * Input/Output argument to the DRM_VMW_FENCE_UNREF ioctl.. + */ + +struct drm_vmw_fence_arg { + uint32_t handle; + uint32_t pad64; +}; + + + #endif |