summaryrefslogtreecommitdiffstats
path: root/drivers/gpu/drm/i915/i915_vma.c
diff options
context:
space:
mode:
authorDave Airlie <airlied@redhat.com>2019-12-27 06:25:04 +0100
committerDave Airlie <airlied@redhat.com>2019-12-27 06:25:04 +0100
commit3ae3271443b337c1cd421a9b73d51c5c2de52977 (patch)
tree2ce6c36149f45e50af5b3c321fa2b5222f9b81ae /drivers/gpu/drm/i915/i915_vma.c
parentdrm/amdgpu/display: use msleep rather than udelay for HDCP (diff)
parentdrm/i915: Update DRIVER_DATE to 20191223 (diff)
downloadlinux-3ae3271443b337c1cd421a9b73d51c5c2de52977.tar.xz
linux-3ae3271443b337c1cd421a9b73d51c5c2de52977.zip
Merge tag 'drm-intel-next-2019-12-23' of git://anongit.freedesktop.org/drm/drm-intel into drm-next
i915 features for v5.6: - Separate hardware and uapi state (Maarten) - Expose a number of sprite and plane formats (Ville) - DDC symlink in HDMI connector sysfs directory (Andrzej Pietrasiewicz) - Improve obj->mm.lock nesting lock annotation (Daniel) (Includes lockdep changes) - Selftest improvements across the board (Chris) - ICL/TGL VDSC support on DSI (Jani, Vandita) - TGL DSB fixes (Animesh, Lucas, Tvrtko) - VBT parsing improvements and fixes (Lucas, Matt, José, Jani, Dan Carpenter) - Fix LPSS vs. PMIC PWM backlight use on BYT/CHT (Hans) (Includes ACPI+MFD changes) - Display state, crtc, plane code refactoring (Ville) - Set opregion chpd value to indicate the driver handles hotplug (Hans de Goede) - DSI updates and fixes, TGL pipe D support, port mapping (José, Jani, Vandita) - Make HDCP 2.2 support cover CFL (Juston Li) - Fix CML PCI IDs and ULT (Shawn Lee) - CMP-V PCH fix (Imre) - TGL: Add another TGL PCH ID (James) - EHL/JSL: Add new PCI IDs (James) - Rename pipe update tracepoints (Ville) - Fix FBC on GLK+ (Ville) - GuC fixes and improvements (Daniele, Don Hiatt, Stuart Summers, Matthew Brost) - Display debugfs improvements (Ville) - Hotplug/irq fixes (Matt) - PSR fixes and improvements (José) - DRM_I915_GEM_MMAP_OFFSET ioctl (Abdiel) - Static analysis fixes (Colin Ian King) - Register sysctl path globally (Venkata Sandeep Dhanalakota) - Introduce new macros for tracing (Venkata Sandeep Dhanalakota) - Migrate gt towards intel_uncore_read/write (Andi) - Add rps frequency translation helpers (Andi) - Fix TGL transcoder clock off sequence (José) - Fix TGL port A audio (Kai Vehmanen) - TGL render decompression (DK) - GEM/GT improvements and fixes across the board (Chris) - Couple of backmerges (Jani) Signed-off-by: Dave Airlie <airlied@redhat.com> # gpg: Signature made Tue 24 Dec 2019 03:20:48 AM AEST # gpg: using RSA key D398079D26ABEE6F # gpg: Good signature from "Jani Nikula <jani.nikula@intel.com>" # gpg: WARNING: This key is not certified with a trusted signature! # gpg: There is no indication that the signature belongs to the owner. # Primary key fingerprint: 1565 A65B 77B0 632E 1124 E59C D398 079D 26AB EE6F # Conflicts: # drivers/gpu/drm/i915/display/intel_fbc.c # drivers/gpu/drm/i915/gt/intel_lrc.c # drivers/gpu/drm/i915/i915_gem.c From: Jani Nikula <jani.nikula@intel.com> Link: https://patchwork.freedesktop.org/patch/msgid/87lfr3rkry.fsf@intel.com
Diffstat (limited to 'drivers/gpu/drm/i915/i915_vma.c')
-rw-r--r--drivers/gpu/drm/i915/i915_vma.c84
1 files changed, 70 insertions, 14 deletions
diff --git a/drivers/gpu/drm/i915/i915_vma.c b/drivers/gpu/drm/i915/i915_vma.c
index e5512f26e20a..cbd783c31adb 100644
--- a/drivers/gpu/drm/i915/i915_vma.c
+++ b/drivers/gpu/drm/i915/i915_vma.c
@@ -28,7 +28,9 @@
#include "display/intel_frontbuffer.h"
#include "gt/intel_engine.h"
+#include "gt/intel_engine_heartbeat.h"
#include "gt/intel_gt.h"
+#include "gt/intel_gt_requests.h"
#include "i915_drv.h"
#include "i915_globals.h"
@@ -112,6 +114,7 @@ vma_create(struct drm_i915_gem_object *obj,
if (vma == NULL)
return ERR_PTR(-ENOMEM);
+ kref_init(&vma->ref);
mutex_init(&vma->pages_mutex);
vma->vm = i915_vm_get(vm);
vma->ops = &vm->vma_ops;
@@ -290,6 +293,7 @@ i915_vma_instance(struct drm_i915_gem_object *obj,
struct i915_vma_work {
struct dma_fence_work base;
struct i915_vma *vma;
+ struct drm_i915_gem_object *pinned;
enum i915_cache_level cache_level;
unsigned int flags;
};
@@ -304,15 +308,21 @@ static int __vma_bind(struct dma_fence_work *work)
if (err)
atomic_or(I915_VMA_ERROR, &vma->flags);
- if (vma->obj)
- __i915_gem_object_unpin_pages(vma->obj);
-
return err;
}
+static void __vma_release(struct dma_fence_work *work)
+{
+ struct i915_vma_work *vw = container_of(work, typeof(*vw), base);
+
+ if (vw->pinned)
+ __i915_gem_object_unpin_pages(vw->pinned);
+}
+
static const struct dma_fence_work_ops bind_ops = {
.name = "bind",
.work = __vma_bind,
+ .release = __vma_release,
};
struct i915_vma_work *i915_vma_work(void)
@@ -393,8 +403,10 @@ int i915_vma_bind(struct i915_vma *vma,
i915_active_set_exclusive(&vma->active, &work->base.dma);
work->base.dma.error = 0; /* enable the queue_work() */
- if (vma->obj)
+ if (vma->obj) {
__i915_gem_object_pin_pages(vma->obj);
+ work->pinned = vma->obj;
+ }
} else {
GEM_BUG_ON((bind_flags & ~vma_flags) & vma->vm->bind_async_flags);
ret = vma->ops->bind_vma(vma, cache_level, bind_flags);
@@ -939,6 +951,38 @@ err_pages:
return err;
}
+static void flush_idle_contexts(struct intel_gt *gt)
+{
+ struct intel_engine_cs *engine;
+ enum intel_engine_id id;
+
+ for_each_engine(engine, gt, id)
+ intel_engine_flush_barriers(engine);
+
+ intel_gt_wait_for_idle(gt, MAX_SCHEDULE_TIMEOUT);
+}
+
+int i915_ggtt_pin(struct i915_vma *vma, u32 align, unsigned int flags)
+{
+ struct i915_address_space *vm = vma->vm;
+ int err;
+
+ GEM_BUG_ON(!i915_vma_is_ggtt(vma));
+
+ do {
+ err = i915_vma_pin(vma, 0, align, flags | PIN_GLOBAL);
+ if (err != -ENOSPC)
+ return err;
+
+ /* Unlike i915_vma_pin, we don't take no for an answer! */
+ flush_idle_contexts(vm->gt);
+ if (mutex_lock_interruptible(&vm->mutex) == 0) {
+ i915_gem_evict_vm(vm);
+ mutex_unlock(&vm->mutex);
+ }
+ } while (1);
+}
+
void i915_vma_close(struct i915_vma *vma)
{
struct intel_gt *gt = vma->vm->gt;
@@ -978,8 +1022,10 @@ void i915_vma_reopen(struct i915_vma *vma)
__i915_vma_remove_closed(vma);
}
-void i915_vma_destroy(struct i915_vma *vma)
+void i915_vma_release(struct kref *ref)
{
+ struct i915_vma *vma = container_of(ref, typeof(*vma), ref);
+
if (drm_mm_node_allocated(&vma->node)) {
mutex_lock(&vma->vm->mutex);
atomic_and(~I915_VMA_PIN_MASK, &vma->flags);
@@ -1019,7 +1065,9 @@ void i915_vma_parked(struct intel_gt *gt)
if (!kref_get_unless_zero(&obj->base.refcount))
continue;
- if (!i915_vm_tryopen(vm)) {
+ if (i915_vm_tryopen(vm)) {
+ list_del_init(&vma->closed_link);
+ } else {
i915_gem_object_put(obj);
obj = NULL;
}
@@ -1027,7 +1075,7 @@ void i915_vma_parked(struct intel_gt *gt)
spin_unlock_irq(&gt->closed_lock);
if (obj) {
- i915_vma_destroy(vma);
+ __i915_vma_put(vma);
i915_gem_object_put(obj);
}
@@ -1054,17 +1102,16 @@ static void __i915_vma_iounmap(struct i915_vma *vma)
void i915_vma_revoke_mmap(struct i915_vma *vma)
{
- struct drm_vma_offset_node *node = &vma->obj->base.vma_node;
+ struct drm_vma_offset_node *node;
u64 vma_offset;
- lockdep_assert_held(&vma->vm->mutex);
-
if (!i915_vma_has_userfault(vma))
return;
GEM_BUG_ON(!i915_vma_is_map_and_fenceable(vma));
GEM_BUG_ON(!vma->obj->userfault_count);
+ node = &vma->mmo->vma_node;
vma_offset = vma->ggtt_view.partial.offset << PAGE_SHIFT;
unmap_mapping_range(vma->vm->i915->drm.anon_inode->i_mapping,
drm_vma_node_offset_addr(node) + vma_offset,
@@ -1104,8 +1151,14 @@ int i915_vma_move_to_active(struct i915_vma *vma,
return err;
if (flags & EXEC_OBJECT_WRITE) {
- if (intel_frontbuffer_invalidate(obj->frontbuffer, ORIGIN_CS))
- i915_active_add_request(&obj->frontbuffer->write, rq);
+ struct intel_frontbuffer *front;
+
+ front = __intel_frontbuffer_get(obj);
+ if (unlikely(front)) {
+ if (intel_frontbuffer_invalidate(front, ORIGIN_CS))
+ i915_active_add_request(&front->write, rq);
+ intel_frontbuffer_put(front);
+ }
dma_resv_add_excl_fence(vma->resv, &rq->fence);
obj->write_domain = I915_GEM_DOMAIN_RENDER;
@@ -1146,7 +1199,7 @@ int __i915_vma_unbind(struct i915_vma *vma)
GEM_BUG_ON(i915_vma_is_active(vma));
if (i915_vma_is_pinned(vma)) {
vma_print_allocator(vma, "is pinned");
- return -EBUSY;
+ return -EAGAIN;
}
GEM_BUG_ON(i915_vma_is_active(vma));
@@ -1186,7 +1239,7 @@ int __i915_vma_unbind(struct i915_vma *vma)
i915_vma_detach(vma);
vma_unbind_pages(vma);
- drm_mm_remove_node(&vma->node); /* pairs with i915_vma_destroy() */
+ drm_mm_remove_node(&vma->node); /* pairs with i915_vma_release() */
return 0;
}
@@ -1195,6 +1248,9 @@ int i915_vma_unbind(struct i915_vma *vma)
struct i915_address_space *vm = vma->vm;
int err;
+ if (!drm_mm_node_allocated(&vma->node))
+ return 0;
+
err = mutex_lock_interruptible(&vm->mutex);
if (err)
return err;