diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2013-09-05 19:17:26 +0200 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2013-09-05 19:17:26 +0200 |
commit | a09e9a7a4b907f2dfa9bdb2b98a1828ab4b340b2 (patch) | |
tree | c7a2df4e887573648eeaf8f7939889046990d3f6 /drivers/gpu/drm/i915 | |
parent | Merge tag 'fbdev-3.12' of git://git.kernel.org/pub/scm/linux/kernel/git/tomba... (diff) | |
parent | Merge branch 'exynos-drm-next' of git://git.kernel.org/pub/scm/linux/kernel/g... (diff) | |
download | linux-a09e9a7a4b907f2dfa9bdb2b98a1828ab4b340b2.tar.xz linux-a09e9a7a4b907f2dfa9bdb2b98a1828ab4b340b2.zip |
Merge branch 'drm-next' of git://people.freedesktop.org/~airlied/linux
Pull drm tree changes from Dave Airlie:
"This is the main drm pull request, I have some overlap with sound and
arm-soc, the sound patch is acked and may conflict based on -next
reports but should be a trivial fixup, which I'll leave to you!
Highlights:
- new drivers:
MSM driver from Rob Clark
- non-drm:
switcheroo and hdmi audio driver support for secondary GPU
poweroff, so drivers can use runtime PM to poweroff the GPUs. This
can save 5 or 6W on some optimus laptops.
- drm core:
combined GEM and TTM VMA manager
per-filp mmap permission tracking
initial rendernode support (via a runtime enable for now, until we get api stable),
remove old proc support,
lots of cleanups of legacy code
hdmi vendor infoframes and 4k modes
lots of gem/prime locking and races fixes
async pageflip scaffolding
drm bridge objects
- i915:
Haswell PC8+ support and eLLC support, HDMI 4K support, initial
per-process VMA pieces, watermark reworks, convert to generic hdmi
infoframes, encoder reworking, fastboot support,
- radeon:
CIK PM support, remove 3d blit code in favour of DMA engines,
Berlin GPU support, HDMI audio fixes
- nouveau:
secondary GPU power down support for optimus laptops, lots of
fixes, use MSI, VP3 engine support
- exynos:
runtime pm support for g2d, DT support, remove non-DT,
- tda998x i2c driver:
lots of fixes for sync issues
- gma500:
lots of cleanups
- rcar:
add LVDS support, fbdev emulation,
- tegra:
just minor fixes"
* 'drm-next' of git://people.freedesktop.org/~airlied/linux: (684 commits)
drm/exynos: Fix build error with exynos_drm_connector.c
drm/exynos: Remove non-DT support in exynos_drm_fimd
drm/exynos: Remove non-DT support in exynos_hdmi
drm/exynos: Remove non-DT support in exynos_drm_g2d
drm/exynos: Remove non-DT support in exynos_hdmiphy
drm/exynos: Remove non-DT support in exynos_ddc
drm/exynos: Make Exynos DRM drivers depend on OF
drm/exynos: Consider fallback option to allocation fail
drm/exynos: fimd: move platform data parsing to separate function
drm/exynos: fimd: get signal polarities from device tree
drm/exynos: fimd: replace struct fb_videomode with videomode
drm/exynos: check a pixel format to a particular window layer
drm/exynos: fix fimd pixel format setting
drm/exynos: Add NULL pointer check
drm/exynos: Remove redundant error messages
drm/exynos: Add missing of.h header include
drm/exynos: Remove redundant NULL check in exynos_drm_buf
drm/exynos: add device tree support for rotator
drm/exynos: Add missing includes
drm/exynos: add runtime pm interfaces to g2d driver
...
Diffstat (limited to 'drivers/gpu/drm/i915')
40 files changed, 6871 insertions, 4488 deletions
diff --git a/drivers/gpu/drm/i915/Makefile b/drivers/gpu/drm/i915/Makefile index 40034ecefd3b..b8449a84a0dc 100644 --- a/drivers/gpu/drm/i915/Makefile +++ b/drivers/gpu/drm/i915/Makefile @@ -5,6 +5,7 @@ ccflags-y := -Iinclude/drm i915-y := i915_drv.o i915_dma.o i915_irq.o \ i915_debugfs.o \ + i915_gpu_error.o \ i915_suspend.o \ i915_gem.o \ i915_gem_context.o \ @@ -37,6 +38,7 @@ i915-y := i915_drv.o i915_dma.o i915_irq.o \ intel_sprite.o \ intel_opregion.o \ intel_sideband.o \ + intel_uncore.o \ dvo_ch7xxx.o \ dvo_ch7017.o \ dvo_ivch.o \ diff --git a/drivers/gpu/drm/i915/dvo_ch7xxx.c b/drivers/gpu/drm/i915/dvo_ch7xxx.c index 757e0fa11043..af42e94f6846 100644 --- a/drivers/gpu/drm/i915/dvo_ch7xxx.c +++ b/drivers/gpu/drm/i915/dvo_ch7xxx.c @@ -307,7 +307,7 @@ static void ch7xxx_mode_set(struct intel_dvo_device *dvo, idf |= CH7xxx_IDF_HSP; if (mode->flags & DRM_MODE_FLAG_PVSYNC) - idf |= CH7xxx_IDF_HSP; + idf |= CH7xxx_IDF_VSP; ch7xxx_writeb(dvo, CH7xxx_IDF, idf); } diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c index 47d6c748057e..55ab9246e1b9 100644 --- a/drivers/gpu/drm/i915/i915_debugfs.c +++ b/drivers/gpu/drm/i915/i915_debugfs.c @@ -30,7 +30,8 @@ #include <linux/debugfs.h> #include <linux/slab.h> #include <linux/export.h> -#include <generated/utsrelease.h> +#include <linux/list_sort.h> +#include <asm/msr-index.h> #include <drm/drmP.h> #include "intel_drv.h" #include "intel_ringbuffer.h" @@ -90,41 +91,45 @@ static const char *get_tiling_flag(struct drm_i915_gem_object *obj) } } -static const char *cache_level_str(int type) +static inline const char *get_global_flag(struct drm_i915_gem_object *obj) { - switch (type) { - case I915_CACHE_NONE: return " uncached"; - case I915_CACHE_LLC: return " snooped (LLC)"; - case I915_CACHE_LLC_MLC: return " snooped (LLC+MLC)"; - default: return ""; - } + return obj->has_global_gtt_mapping ? "g" : " "; } static void describe_obj(struct seq_file *m, struct drm_i915_gem_object *obj) { - seq_printf(m, "%pK: %s%s %8zdKiB %02x %02x %d %d %d%s%s%s", + struct i915_vma *vma; + seq_printf(m, "%pK: %s%s%s %8zdKiB %02x %02x %u %u %u%s%s%s", &obj->base, get_pin_flag(obj), get_tiling_flag(obj), + get_global_flag(obj), obj->base.size / 1024, obj->base.read_domains, obj->base.write_domain, obj->last_read_seqno, obj->last_write_seqno, obj->last_fenced_seqno, - cache_level_str(obj->cache_level), + i915_cache_level_str(obj->cache_level), obj->dirty ? " dirty" : "", obj->madv == I915_MADV_DONTNEED ? " purgeable" : ""); if (obj->base.name) seq_printf(m, " (name: %d)", obj->base.name); if (obj->pin_count) seq_printf(m, " (pinned x %d)", obj->pin_count); + if (obj->pin_display) + seq_printf(m, " (display)"); if (obj->fence_reg != I915_FENCE_REG_NONE) seq_printf(m, " (fence: %d)", obj->fence_reg); - if (obj->gtt_space != NULL) - seq_printf(m, " (gtt offset: %08x, size: %08x)", - obj->gtt_offset, (unsigned int)obj->gtt_space->size); + list_for_each_entry(vma, &obj->vma_list, vma_link) { + if (!i915_is_ggtt(vma->vm)) + seq_puts(m, " (pp"); + else + seq_puts(m, " (g"); + seq_printf(m, "gtt offset: %08lx, size: %08lx)", + vma->node.start, vma->node.size); + } if (obj->stolen) seq_printf(m, " (stolen: %08lx)", obj->stolen->start); if (obj->pin_mappable || obj->fault_mappable) { @@ -146,8 +151,9 @@ static int i915_gem_object_list_info(struct seq_file *m, void *data) uintptr_t list = (uintptr_t) node->info_ent->data; struct list_head *head; struct drm_device *dev = node->minor->dev; - drm_i915_private_t *dev_priv = dev->dev_private; - struct drm_i915_gem_object *obj; + struct drm_i915_private *dev_priv = dev->dev_private; + struct i915_address_space *vm = &dev_priv->gtt.base; + struct i915_vma *vma; size_t total_obj_size, total_gtt_size; int count, ret; @@ -155,14 +161,15 @@ static int i915_gem_object_list_info(struct seq_file *m, void *data) if (ret) return ret; + /* FIXME: the user of this interface might want more than just GGTT */ switch (list) { case ACTIVE_LIST: - seq_printf(m, "Active:\n"); - head = &dev_priv->mm.active_list; + seq_puts(m, "Active:\n"); + head = &vm->active_list; break; case INACTIVE_LIST: - seq_printf(m, "Inactive:\n"); - head = &dev_priv->mm.inactive_list; + seq_puts(m, "Inactive:\n"); + head = &vm->inactive_list; break; default: mutex_unlock(&dev->struct_mutex); @@ -170,14 +177,75 @@ static int i915_gem_object_list_info(struct seq_file *m, void *data) } total_obj_size = total_gtt_size = count = 0; - list_for_each_entry(obj, head, mm_list) { + list_for_each_entry(vma, head, mm_list) { seq_printf(m, " "); - describe_obj(m, obj); + describe_obj(m, vma->obj); seq_printf(m, "\n"); + total_obj_size += vma->obj->base.size; + total_gtt_size += vma->node.size; + count++; + } + mutex_unlock(&dev->struct_mutex); + + seq_printf(m, "Total %d objects, %zu bytes, %zu GTT size\n", + count, total_obj_size, total_gtt_size); + return 0; +} + +static int obj_rank_by_stolen(void *priv, + struct list_head *A, struct list_head *B) +{ + struct drm_i915_gem_object *a = + container_of(A, struct drm_i915_gem_object, obj_exec_link); + struct drm_i915_gem_object *b = + container_of(B, struct drm_i915_gem_object, obj_exec_link); + + return a->stolen->start - b->stolen->start; +} + +static int i915_gem_stolen_list_info(struct seq_file *m, void *data) +{ + struct drm_info_node *node = (struct drm_info_node *) m->private; + struct drm_device *dev = node->minor->dev; + struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_gem_object *obj; + size_t total_obj_size, total_gtt_size; + LIST_HEAD(stolen); + int count, ret; + + ret = mutex_lock_interruptible(&dev->struct_mutex); + if (ret) + return ret; + + total_obj_size = total_gtt_size = count = 0; + list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) { + if (obj->stolen == NULL) + continue; + + list_add(&obj->obj_exec_link, &stolen); + + total_obj_size += obj->base.size; + total_gtt_size += i915_gem_obj_ggtt_size(obj); + count++; + } + list_for_each_entry(obj, &dev_priv->mm.unbound_list, global_list) { + if (obj->stolen == NULL) + continue; + + list_add(&obj->obj_exec_link, &stolen); + total_obj_size += obj->base.size; - total_gtt_size += obj->gtt_space->size; count++; } + list_sort(NULL, &stolen, obj_rank_by_stolen); + seq_puts(m, "Stolen:\n"); + while (!list_empty(&stolen)) { + obj = list_first_entry(&stolen, typeof(*obj), obj_exec_link); + seq_puts(m, " "); + describe_obj(m, obj); + seq_putc(m, '\n'); + list_del_init(&obj->obj_exec_link); + } mutex_unlock(&dev->struct_mutex); seq_printf(m, "Total %d objects, %zu bytes, %zu GTT size\n", @@ -187,10 +255,10 @@ static int i915_gem_object_list_info(struct seq_file *m, void *data) #define count_objects(list, member) do { \ list_for_each_entry(obj, list, member) { \ - size += obj->gtt_space->size; \ + size += i915_gem_obj_ggtt_size(obj); \ ++count; \ if (obj->map_and_fenceable) { \ - mappable_size += obj->gtt_space->size; \ + mappable_size += i915_gem_obj_ggtt_size(obj); \ ++mappable_count; \ } \ } \ @@ -209,7 +277,7 @@ static int per_file_stats(int id, void *ptr, void *data) stats->count++; stats->total += obj->base.size; - if (obj->gtt_space) { + if (i915_gem_obj_ggtt_bound(obj)) { if (!list_empty(&obj->ring_list)) stats->active += obj->base.size; else @@ -222,6 +290,17 @@ static int per_file_stats(int id, void *ptr, void *data) return 0; } +#define count_vmas(list, member) do { \ + list_for_each_entry(vma, list, member) { \ + size += i915_gem_obj_ggtt_size(vma->obj); \ + ++count; \ + if (vma->obj->map_and_fenceable) { \ + mappable_size += i915_gem_obj_ggtt_size(vma->obj); \ + ++mappable_count; \ + } \ + } \ +} while (0) + static int i915_gem_object_info(struct seq_file *m, void* data) { struct drm_info_node *node = (struct drm_info_node *) m->private; @@ -230,7 +309,9 @@ static int i915_gem_object_info(struct seq_file *m, void* data) u32 count, mappable_count, purgeable_count; size_t size, mappable_size, purgeable_size; struct drm_i915_gem_object *obj; + struct i915_address_space *vm = &dev_priv->gtt.base; struct drm_file *file; + struct i915_vma *vma; int ret; ret = mutex_lock_interruptible(&dev->struct_mutex); @@ -247,12 +328,12 @@ static int i915_gem_object_info(struct seq_file *m, void* data) count, mappable_count, size, mappable_size); size = count = mappable_size = mappable_count = 0; - count_objects(&dev_priv->mm.active_list, mm_list); + count_vmas(&vm->active_list, mm_list); seq_printf(m, " %u [%u] active objects, %zu [%zu] bytes\n", count, mappable_count, size, mappable_size); size = count = mappable_size = mappable_count = 0; - count_objects(&dev_priv->mm.inactive_list, mm_list); + count_vmas(&vm->inactive_list, mm_list); seq_printf(m, " %u [%u] inactive objects, %zu [%zu] bytes\n", count, mappable_count, size, mappable_size); @@ -267,11 +348,11 @@ static int i915_gem_object_info(struct seq_file *m, void* data) size = count = mappable_size = mappable_count = 0; list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) { if (obj->fault_mappable) { - size += obj->gtt_space->size; + size += i915_gem_obj_ggtt_size(obj); ++count; } if (obj->pin_mappable) { - mappable_size += obj->gtt_space->size; + mappable_size += i915_gem_obj_ggtt_size(obj); ++mappable_count; } if (obj->madv == I915_MADV_DONTNEED) { @@ -287,10 +368,10 @@ static int i915_gem_object_info(struct seq_file *m, void* data) count, size); seq_printf(m, "%zu [%lu] gtt total\n", - dev_priv->gtt.total, - dev_priv->gtt.mappable_end - dev_priv->gtt.start); + dev_priv->gtt.base.total, + dev_priv->gtt.mappable_end - dev_priv->gtt.base.start); - seq_printf(m, "\n"); + seq_putc(m, '\n'); list_for_each_entry_reverse(file, &dev->filelist, lhead) { struct file_stats stats; @@ -310,7 +391,7 @@ static int i915_gem_object_info(struct seq_file *m, void* data) return 0; } -static int i915_gem_gtt_info(struct seq_file *m, void* data) +static int i915_gem_gtt_info(struct seq_file *m, void *data) { struct drm_info_node *node = (struct drm_info_node *) m->private; struct drm_device *dev = node->minor->dev; @@ -329,11 +410,11 @@ static int i915_gem_gtt_info(struct seq_file *m, void* data) if (list == PINNED_LIST && obj->pin_count == 0) continue; - seq_printf(m, " "); + seq_puts(m, " "); describe_obj(m, obj); - seq_printf(m, "\n"); + seq_putc(m, '\n'); total_obj_size += obj->base.size; - total_gtt_size += obj->gtt_space->size; + total_gtt_size += i915_gem_obj_ggtt_size(obj); count++; } @@ -371,20 +452,22 @@ static int i915_gem_pageflip_info(struct seq_file *m, void *data) pipe, plane); } if (work->enable_stall_check) - seq_printf(m, "Stall check enabled, "); + seq_puts(m, "Stall check enabled, "); else - seq_printf(m, "Stall check waiting for page flip ioctl, "); + seq_puts(m, "Stall check waiting for page flip ioctl, "); seq_printf(m, "%d prepares\n", atomic_read(&work->pending)); if (work->old_fb_obj) { struct drm_i915_gem_object *obj = work->old_fb_obj; if (obj) - seq_printf(m, "Old framebuffer gtt_offset 0x%08x\n", obj->gtt_offset); + seq_printf(m, "Old framebuffer gtt_offset 0x%08lx\n", + i915_gem_obj_ggtt_offset(obj)); } if (work->pending_flip_obj) { struct drm_i915_gem_object *obj = work->pending_flip_obj; if (obj) - seq_printf(m, "New framebuffer gtt_offset 0x%08x\n", obj->gtt_offset); + seq_printf(m, "New framebuffer gtt_offset 0x%08lx\n", + i915_gem_obj_ggtt_offset(obj)); } } spin_unlock_irqrestore(&dev->event_lock, flags); @@ -424,7 +507,7 @@ static int i915_gem_request_info(struct seq_file *m, void *data) mutex_unlock(&dev->struct_mutex); if (count == 0) - seq_printf(m, "No requests\n"); + seq_puts(m, "No requests\n"); return 0; } @@ -574,10 +657,10 @@ static int i915_gem_fence_regs_info(struct seq_file *m, void *data) seq_printf(m, "Fence %d, pin count = %d, object = ", i, dev_priv->fence_regs[i].pin_count); if (obj == NULL) - seq_printf(m, "unused"); + seq_puts(m, "unused"); else describe_obj(m, obj); - seq_printf(m, "\n"); + seq_putc(m, '\n'); } mutex_unlock(&dev->struct_mutex); @@ -606,361 +689,6 @@ static int i915_hws_info(struct seq_file *m, void *data) return 0; } -static const char *ring_str(int ring) -{ - switch (ring) { - case RCS: return "render"; - case VCS: return "bsd"; - case BCS: return "blt"; - case VECS: return "vebox"; - default: return ""; - } -} - -static const char *pin_flag(int pinned) -{ - if (pinned > 0) - return " P"; - else if (pinned < 0) - return " p"; - else - return ""; -} - -static const char *tiling_flag(int tiling) -{ - switch (tiling) { - default: - case I915_TILING_NONE: return ""; - case I915_TILING_X: return " X"; - case I915_TILING_Y: return " Y"; - } -} - -static const char *dirty_flag(int dirty) -{ - return dirty ? " dirty" : ""; -} - -static const char *purgeable_flag(int purgeable) -{ - return purgeable ? " purgeable" : ""; -} - -static bool __i915_error_ok(struct drm_i915_error_state_buf *e) -{ - - if (!e->err && WARN(e->bytes > (e->size - 1), "overflow")) { - e->err = -ENOSPC; - return false; - } - - if (e->bytes == e->size - 1 || e->err) - return false; - - return true; -} - -static bool __i915_error_seek(struct drm_i915_error_state_buf *e, - unsigned len) -{ - if (e->pos + len <= e->start) { - e->pos += len; - return false; - } - - /* First vsnprintf needs to fit in its entirety for memmove */ - if (len >= e->size) { - e->err = -EIO; - return false; - } - - return true; -} - -static void __i915_error_advance(struct drm_i915_error_state_buf *e, - unsigned len) -{ - /* If this is first printf in this window, adjust it so that - * start position matches start of the buffer - */ - - if (e->pos < e->start) { - const size_t off = e->start - e->pos; - - /* Should not happen but be paranoid */ - if (off > len || e->bytes) { - e->err = -EIO; - return; - } - - memmove(e->buf, e->buf + off, len - off); - e->bytes = len - off; - e->pos = e->start; - return; - } - - e->bytes += len; - e->pos += len; -} - -static void i915_error_vprintf(struct drm_i915_error_state_buf *e, - const char *f, va_list args) -{ - unsigned len; - - if (!__i915_error_ok(e)) - return; - - /* Seek the first printf which is hits start position */ - if (e->pos < e->start) { - len = vsnprintf(NULL, 0, f, args); - if (!__i915_error_seek(e, len)) - return; - } - - len = vsnprintf(e->buf + e->bytes, e->size - e->bytes, f, args); - if (len >= e->size - e->bytes) - len = e->size - e->bytes - 1; - - __i915_error_advance(e, len); -} - -static void i915_error_puts(struct drm_i915_error_state_buf *e, - const char *str) -{ - unsigned len; - - if (!__i915_error_ok(e)) - return; - - len = strlen(str); - - /* Seek the first printf which is hits start position */ - if (e->pos < e->start) { - if (!__i915_error_seek(e, len)) - return; - } - - if (len >= e->size - e->bytes) - len = e->size - e->bytes - 1; - memcpy(e->buf + e->bytes, str, len); - - __i915_error_advance(e, len); -} - -void i915_error_printf(struct drm_i915_error_state_buf *e, const char *f, ...) -{ - va_list args; - - va_start(args, f); - i915_error_vprintf(e, f, args); - va_end(args); -} - -#define err_printf(e, ...) i915_error_printf(e, __VA_ARGS__) -#define err_puts(e, s) i915_error_puts(e, s) - -static void print_error_buffers(struct drm_i915_error_state_buf *m, - const char *name, - struct drm_i915_error_buffer *err, - int count) -{ - err_printf(m, "%s [%d]:\n", name, count); - - while (count--) { - err_printf(m, " %08x %8u %02x %02x %x %x", - err->gtt_offset, - err->size, - err->read_domains, - err->write_domain, - err->rseqno, err->wseqno); - err_puts(m, pin_flag(err->pinned)); - err_puts(m, tiling_flag(err->tiling)); - err_puts(m, dirty_flag(err->dirty)); - err_puts(m, purgeable_flag(err->purgeable)); - err_puts(m, err->ring != -1 ? " " : ""); - err_puts(m, ring_str(err->ring)); - err_puts(m, cache_level_str(err->cache_level)); - - if (err->name) - err_printf(m, " (name: %d)", err->name); - if (err->fence_reg != I915_FENCE_REG_NONE) - err_printf(m, " (fence: %d)", err->fence_reg); - - err_puts(m, "\n"); - err++; - } -} - -static void i915_ring_error_state(struct drm_i915_error_state_buf *m, - struct drm_device *dev, - struct drm_i915_error_state *error, - unsigned ring) -{ - BUG_ON(ring >= I915_NUM_RINGS); /* shut up confused gcc */ - err_printf(m, "%s command stream:\n", ring_str(ring)); - err_printf(m, " HEAD: 0x%08x\n", error->head[ring]); - err_printf(m, " TAIL: 0x%08x\n", error->tail[ring]); - err_printf(m, " CTL: 0x%08x\n", error->ctl[ring]); - err_printf(m, " ACTHD: 0x%08x\n", error->acthd[ring]); - err_printf(m, " IPEIR: 0x%08x\n", error->ipeir[ring]); - err_printf(m, " IPEHR: 0x%08x\n", error->ipehr[ring]); - err_printf(m, " INSTDONE: 0x%08x\n", error->instdone[ring]); - if (ring == RCS && INTEL_INFO(dev)->gen >= 4) - err_printf(m, " BBADDR: 0x%08llx\n", error->bbaddr); - - if (INTEL_INFO(dev)->gen >= 4) - err_printf(m, " INSTPS: 0x%08x\n", error->instps[ring]); - err_printf(m, " INSTPM: 0x%08x\n", error->instpm[ring]); - err_printf(m, " FADDR: 0x%08x\n", error->faddr[ring]); - if (INTEL_INFO(dev)->gen >= 6) { - err_printf(m, " RC PSMI: 0x%08x\n", error->rc_psmi[ring]); - err_printf(m, " FAULT_REG: 0x%08x\n", error->fault_reg[ring]); - err_printf(m, " SYNC_0: 0x%08x [last synced 0x%08x]\n", - error->semaphore_mboxes[ring][0], - error->semaphore_seqno[ring][0]); - err_printf(m, " SYNC_1: 0x%08x [last synced 0x%08x]\n", - error->semaphore_mboxes[ring][1], - error->semaphore_seqno[ring][1]); - } - err_printf(m, " seqno: 0x%08x\n", error->seqno[ring]); - err_printf(m, " waiting: %s\n", yesno(error->waiting[ring])); - err_printf(m, " ring->head: 0x%08x\n", error->cpu_ring_head[ring]); - err_printf(m, " ring->tail: 0x%08x\n", error->cpu_ring_tail[ring]); -} - -struct i915_error_state_file_priv { - struct drm_device *dev; - struct drm_i915_error_state *error; -}; - - -static int i915_error_state(struct i915_error_state_file_priv *error_priv, - struct drm_i915_error_state_buf *m) - -{ - struct drm_device *dev = error_priv->dev; - drm_i915_private_t *dev_priv = dev->dev_private; - struct drm_i915_error_state *error = error_priv->error; - struct intel_ring_buffer *ring; - int i, j, page, offset, elt; - - if (!error) { - err_printf(m, "no error state collected\n"); - return 0; - } - - err_printf(m, "Time: %ld s %ld us\n", error->time.tv_sec, - error->time.tv_usec); - err_printf(m, "Kernel: " UTS_RELEASE "\n"); - err_printf(m, "PCI ID: 0x%04x\n", dev->pci_device); - err_printf(m, "EIR: 0x%08x\n", error->eir); - err_printf(m, "IER: 0x%08x\n", error->ier); - err_printf(m, "PGTBL_ER: 0x%08x\n", error->pgtbl_er); - err_printf(m, "FORCEWAKE: 0x%08x\n", error->forcewake); - err_printf(m, "DERRMR: 0x%08x\n", error->derrmr); - err_printf(m, "CCID: 0x%08x\n", error->ccid); - - for (i = 0; i < dev_priv->num_fence_regs; i++) - err_printf(m, " fence[%d] = %08llx\n", i, error->fence[i]); - - for (i = 0; i < ARRAY_SIZE(error->extra_instdone); i++) - err_printf(m, " INSTDONE_%d: 0x%08x\n", i, - error->extra_instdone[i]); - - if (INTEL_INFO(dev)->gen >= 6) { - err_printf(m, "ERROR: 0x%08x\n", error->error); - err_printf(m, "DONE_REG: 0x%08x\n", error->done_reg); - } - - if (INTEL_INFO(dev)->gen == 7) - err_printf(m, "ERR_INT: 0x%08x\n", error->err_int); - - for_each_ring(ring, dev_priv, i) - i915_ring_error_state(m, dev, error, i); - - if (error->active_bo) - print_error_buffers(m, "Active", - error->active_bo, - error->active_bo_count); - - if (error->pinned_bo) - print_error_buffers(m, "Pinned", - error->pinned_bo, - error->pinned_bo_count); - - for (i = 0; i < ARRAY_SIZE(error->ring); i++) { - struct drm_i915_error_object *obj; - - if ((obj = error->ring[i].batchbuffer)) { - err_printf(m, "%s --- gtt_offset = 0x%08x\n", - dev_priv->ring[i].name, - obj->gtt_offset); - offset = 0; - for (page = 0; page < obj->page_count; page++) { - for (elt = 0; elt < PAGE_SIZE/4; elt++) { - err_printf(m, "%08x : %08x\n", offset, - obj->pages[page][elt]); - offset += 4; - } - } - } - - if (error->ring[i].num_requests) { - err_printf(m, "%s --- %d requests\n", - dev_priv->ring[i].name, - error->ring[i].num_requests); - for (j = 0; j < error->ring[i].num_requests; j++) { - err_printf(m, " seqno 0x%08x, emitted %ld, tail 0x%08x\n", - error->ring[i].requests[j].seqno, - error->ring[i].requests[j].jiffies, - error->ring[i].requests[j].tail); - } - } - - if ((obj = error->ring[i].ringbuffer)) { - err_printf(m, "%s --- ringbuffer = 0x%08x\n", - dev_priv->ring[i].name, - obj->gtt_offset); - offset = 0; - for (page = 0; page < obj->page_count; page++) { - for (elt = 0; elt < PAGE_SIZE/4; elt++) { - err_printf(m, "%08x : %08x\n", - offset, - obj->pages[page][elt]); - offset += 4; - } - } - } - - obj = error->ring[i].ctx; - if (obj) { - err_printf(m, "%s --- HW Context = 0x%08x\n", - dev_priv->ring[i].name, - obj->gtt_offset); - offset = 0; - for (elt = 0; elt < PAGE_SIZE/16; elt += 4) { - err_printf(m, "[%04x] %08x %08x %08x %08x\n", - offset, - obj->pages[0][elt], - obj->pages[0][elt+1], - obj->pages[0][elt+2], - obj->pages[0][elt+3]); - offset += 16; - } - } - } - - if (error->overlay) - intel_overlay_print_error_state(m, error->overlay); - - if (error->display) - intel_display_print_error_state(m, dev, error->display); - - return 0; -} - static ssize_t i915_error_state_write(struct file *filp, const char __user *ubuf, @@ -986,9 +714,7 @@ i915_error_state_write(struct file *filp, static int i915_error_state_open(struct inode *inode, struct file *file) { struct drm_device *dev = inode->i_private; - drm_i915_private_t *dev_priv = dev->dev_private; struct i915_error_state_file_priv *error_priv; - unsigned long flags; error_priv = kzalloc(sizeof(*error_priv), GFP_KERNEL); if (!error_priv) @@ -996,11 +722,7 @@ static int i915_error_state_open(struct inode *inode, struct file *file) error_priv->dev = dev; - spin_lock_irqsave(&dev_priv->gpu_error.lock, flags); - error_priv->error = dev_priv->gpu_error.first_error; - if (error_priv->error) - kref_get(&error_priv->error->ref); - spin_unlock_irqrestore(&dev_priv->gpu_error.lock, flags); + i915_error_state_get(dev, error_priv); file->private_data = error_priv; @@ -1011,8 +733,7 @@ static int i915_error_state_release(struct inode *inode, struct file *file) { struct i915_error_state_file_priv *error_priv = file->private_data; - if (error_priv->error) - kref_put(&error_priv->error->ref, i915_error_state_free); + i915_error_state_put(error_priv); kfree(error_priv); return 0; @@ -1025,40 +746,15 @@ static ssize_t i915_error_state_read(struct file *file, char __user *userbuf, struct drm_i915_error_state_buf error_str; loff_t tmp_pos = 0; ssize_t ret_count = 0; - int ret = 0; - - memset(&error_str, 0, sizeof(error_str)); - - /* We need to have enough room to store any i915_error_state printf - * so that we can move it to start position. - */ - error_str.size = count + 1 > PAGE_SIZE ? count + 1 : PAGE_SIZE; - error_str.buf = kmalloc(error_str.size, - GFP_TEMPORARY | __GFP_NORETRY | __GFP_NOWARN); - - if (error_str.buf == NULL) { - error_str.size = PAGE_SIZE; - error_str.buf = kmalloc(error_str.size, GFP_TEMPORARY); - } - - if (error_str.buf == NULL) { - error_str.size = 128; - error_str.buf = kmalloc(error_str.size, GFP_TEMPORARY); - } - - if (error_str.buf == NULL) - return -ENOMEM; - - error_str.start = *pos; + int ret; - ret = i915_error_state(error_priv, &error_str); + ret = i915_error_state_buf_init(&error_str, count, *pos); if (ret) - goto out; + return ret; - if (error_str.bytes == 0 && error_str.err) { - ret = error_str.err; + ret = i915_error_state_to_str(&error_str, error_priv); + if (ret) goto out; - } ret_count = simple_read_from_buffer(userbuf, count, &tmp_pos, error_str.buf, @@ -1069,7 +765,7 @@ static ssize_t i915_error_state_read(struct file *file, char __user *userbuf, else *pos = error_str.start + ret_count; out: - kfree(error_str.buf); + i915_error_state_buf_release(&error_str); return ret ?: ret_count; } @@ -1246,7 +942,7 @@ static int i915_cur_delayinfo(struct seq_file *m, void *unused) (freq_sts >> 8) & 0xff)); mutex_unlock(&dev_priv->rps.hw_lock); } else { - seq_printf(m, "no P-state info available\n"); + seq_puts(m, "no P-state info available\n"); } return 0; @@ -1341,28 +1037,28 @@ static int ironlake_drpc_info(struct seq_file *m) seq_printf(m, "RS2 VID: %d\n", ((crstandvid >> 8) & 0x3f)); seq_printf(m, "Render standby enabled: %s\n", (rstdbyctl & RCX_SW_EXIT) ? "no" : "yes"); - seq_printf(m, "Current RS state: "); + seq_puts(m, "Current RS state: "); switch (rstdbyctl & RSX_STATUS_MASK) { case RSX_STATUS_ON: - seq_printf(m, "on\n"); + seq_puts(m, "on\n"); break; case RSX_STATUS_RC1: - seq_printf(m, "RC1\n"); + seq_puts(m, "RC1\n"); break; case RSX_STATUS_RC1E: - seq_printf(m, "RC1E\n"); + seq_puts(m, "RC1E\n"); break; case RSX_STATUS_RS1: - seq_printf(m, "RS1\n"); + seq_puts(m, "RS1\n"); break; case RSX_STATUS_RS2: - seq_printf(m, "RS2 (RC6)\n"); + seq_puts(m, "RS2 (RC6)\n"); break; case RSX_STATUS_RS3: - seq_printf(m, "RC3 (RC6+)\n"); + seq_puts(m, "RC3 (RC6+)\n"); break; default: - seq_printf(m, "unknown\n"); + seq_puts(m, "unknown\n"); break; } @@ -1377,20 +1073,19 @@ static int gen6_drpc_info(struct seq_file *m) struct drm_i915_private *dev_priv = dev->dev_private; u32 rpmodectl1, gt_core_status, rcctl1, rc6vids = 0; unsigned forcewake_count; - int count=0, ret; - + int count = 0, ret; ret = mutex_lock_interruptible(&dev->struct_mutex); if (ret) return ret; - spin_lock_irq(&dev_priv->gt_lock); - forcewake_count = dev_priv->forcewake_count; - spin_unlock_irq(&dev_priv->gt_lock); + spin_lock_irq(&dev_priv->uncore.lock); + forcewake_count = dev_priv->uncore.forcewake_count; + spin_unlock_irq(&dev_priv->uncore.lock); if (forcewake_count) { - seq_printf(m, "RC information inaccurate because somebody " - "holds a forcewake reference \n"); + seq_puts(m, "RC information inaccurate because somebody " + "holds a forcewake reference \n"); } else { /* NB: we cannot use forcewake, else we read the wrong values */ while (count++ < 50 && (I915_READ_NOTRACE(FORCEWAKE_ACK) & 1)) @@ -1399,7 +1094,7 @@ static int gen6_drpc_info(struct seq_file *m) } gt_core_status = readl(dev_priv->regs + GEN6_GT_CORE_STATUS); - trace_i915_reg_rw(false, GEN6_GT_CORE_STATUS, gt_core_status, 4); + trace_i915_reg_rw(false, GEN6_GT_CORE_STATUS, gt_core_status, 4, true); rpmodectl1 = I915_READ(GEN6_RP_CONTROL); rcctl1 = I915_READ(GEN6_RC_CONTROL); @@ -1423,25 +1118,25 @@ static int gen6_drpc_info(struct seq_file *m) yesno(rcctl1 & GEN6_RC_CTL_RC6p_ENABLE)); seq_printf(m, "Deepest RC6 Enabled: %s\n", yesno(rcctl1 & GEN6_RC_CTL_RC6pp_ENABLE)); - seq_printf(m, "Current RC state: "); + seq_puts(m, "Current RC state: "); switch (gt_core_status & GEN6_RCn_MASK) { case GEN6_RC0: if (gt_core_status & GEN6_CORE_CPD_STATE_MASK) - seq_printf(m, "Core Power Down\n"); + seq_puts(m, "Core Power Down\n"); else - seq_printf(m, "on\n"); + seq_puts(m, "on\n"); break; case GEN6_RC3: - seq_printf(m, "RC3\n"); + seq_puts(m, "RC3\n"); break; case GEN6_RC6: - seq_printf(m, "RC6\n"); + seq_puts(m, "RC6\n"); break; case GEN6_RC7: - seq_printf(m, "RC7\n"); + seq_puts(m, "RC7\n"); break; default: - seq_printf(m, "Unknown\n"); + seq_puts(m, "Unknown\n"); break; } @@ -1485,43 +1180,52 @@ static int i915_fbc_status(struct seq_file *m, void *unused) drm_i915_private_t *dev_priv = dev->dev_private; if (!I915_HAS_FBC(dev)) { - seq_printf(m, "FBC unsupported on this chipset\n"); + seq_puts(m, "FBC unsupported on this chipset\n"); return 0; } if (intel_fbc_enabled(dev)) { - seq_printf(m, "FBC enabled\n"); + seq_puts(m, "FBC enabled\n"); } else { - seq_printf(m, "FBC disabled: "); - switch (dev_priv->no_fbc_reason) { + seq_puts(m, "FBC disabled: "); + switch (dev_priv->fbc.no_fbc_reason) { + case FBC_OK: + seq_puts(m, "FBC actived, but currently disabled in hardware"); + break; + case FBC_UNSUPPORTED: + seq_puts(m, "unsupported by this chipset"); + break; case FBC_NO_OUTPUT: - seq_printf(m, "no outputs"); + seq_puts(m, "no outputs"); break; case FBC_STOLEN_TOO_SMALL: - seq_printf(m, "not enough stolen memory"); + seq_puts(m, "not enough stolen memory"); break; case FBC_UNSUPPORTED_MODE: - seq_printf(m, "mode not supported"); + seq_puts(m, "mode not supported"); break; case FBC_MODE_TOO_LARGE: - seq_printf(m, "mode too large"); + seq_puts(m, "mode too large"); break; case FBC_BAD_PLANE: - seq_printf(m, "FBC unsupported on plane"); + seq_puts(m, "FBC unsupported on plane"); break; case FBC_NOT_TILED: - seq_printf(m, "scanout buffer not tiled"); + seq_puts(m, "scanout buffer not tiled"); break; case FBC_MULTIPLE_PIPES: - seq_printf(m, "multiple pipes are enabled"); + seq_puts(m, "multiple pipes are enabled"); break; case FBC_MODULE_PARAM: - seq_printf(m, "disabled per module param (default off)"); + seq_puts(m, "disabled per module param (default off)"); + break; + case FBC_CHIP_DEFAULT: + seq_puts(m, "disabled per chip default"); break; default: - seq_printf(m, "unknown reason"); + seq_puts(m, "unknown reason"); } - seq_printf(m, "\n"); + seq_putc(m, '\n'); } return 0; } @@ -1604,7 +1308,7 @@ static int i915_ring_freq_table(struct seq_file *m, void *unused) int gpu_freq, ia_freq; if (!(IS_GEN6(dev) || IS_GEN7(dev))) { - seq_printf(m, "unsupported on this chipset\n"); + seq_puts(m, "unsupported on this chipset\n"); return 0; } @@ -1612,7 +1316,7 @@ static int i915_ring_freq_table(struct seq_file *m, void *unused) if (ret) return ret; - seq_printf(m, "GPU freq (MHz)\tEffective CPU freq (MHz)\tEffective Ring freq (MHz)\n"); + seq_puts(m, "GPU freq (MHz)\tEffective CPU freq (MHz)\tEffective Ring freq (MHz)\n"); for (gpu_freq = dev_priv->rps.min_delay; gpu_freq <= dev_priv->rps.max_delay; @@ -1701,7 +1405,7 @@ static int i915_gem_framebuffer_info(struct seq_file *m, void *data) fb->base.bits_per_pixel, atomic_read(&fb->base.refcount.refcount)); describe_obj(m, fb->obj); - seq_printf(m, "\n"); + seq_putc(m, '\n'); mutex_unlock(&dev->mode_config.mutex); mutex_lock(&dev->mode_config.fb_lock); @@ -1716,7 +1420,7 @@ static int i915_gem_framebuffer_info(struct seq_file *m, void *data) fb->base.bits_per_pixel, atomic_read(&fb->base.refcount.refcount)); describe_obj(m, fb->obj); - seq_printf(m, "\n"); + seq_putc(m, '\n'); } mutex_unlock(&dev->mode_config.fb_lock); @@ -1736,22 +1440,22 @@ static int i915_context_status(struct seq_file *m, void *unused) return ret; if (dev_priv->ips.pwrctx) { - seq_printf(m, "power context "); + seq_puts(m, "power context "); describe_obj(m, dev_priv->ips.pwrctx); - seq_printf(m, "\n"); + seq_putc(m, '\n'); } if (dev_priv->ips.renderctx) { - seq_printf(m, "render context "); + seq_puts(m, "render context "); describe_obj(m, dev_priv->ips.renderctx); - seq_printf(m, "\n"); + seq_putc(m, '\n'); } for_each_ring(ring, dev_priv, i) { if (ring->default_context) { seq_printf(m, "HW default context %s ring ", ring->name); describe_obj(m, ring->default_context->obj); - seq_printf(m, "\n"); + seq_putc(m, '\n'); } } @@ -1767,9 +1471,9 @@ static int i915_gen6_forcewake_count_info(struct seq_file *m, void *data) struct drm_i915_private *dev_priv = dev->dev_private; unsigned forcewake_count; - spin_lock_irq(&dev_priv->gt_lock); - forcewake_count = dev_priv->forcewake_count; - spin_unlock_irq(&dev_priv->gt_lock); + spin_lock_irq(&dev_priv->uncore.lock); + forcewake_count = dev_priv->uncore.forcewake_count; + spin_unlock_irq(&dev_priv->uncore.lock); seq_printf(m, "forcewake count = %u\n", forcewake_count); @@ -1778,7 +1482,7 @@ static int i915_gen6_forcewake_count_info(struct seq_file *m, void *data) static const char *swizzle_string(unsigned swizzle) { - switch(swizzle) { + switch (swizzle) { case I915_BIT_6_SWIZZLE_NONE: return "none"; case I915_BIT_6_SWIZZLE_9: @@ -1868,7 +1572,7 @@ static int i915_ppgtt_info(struct seq_file *m, void *data) if (dev_priv->mm.aliasing_ppgtt) { struct i915_hw_ppgtt *ppgtt = dev_priv->mm.aliasing_ppgtt; - seq_printf(m, "aliasing PPGTT:\n"); + seq_puts(m, "aliasing PPGTT:\n"); seq_printf(m, "pd gtt offset: 0x%08x\n", ppgtt->pd_offset); } seq_printf(m, "ECOCHK: 0x%08x\n", I915_READ(GAM_ECOCHK)); @@ -1886,7 +1590,7 @@ static int i915_dpio_info(struct seq_file *m, void *data) if (!IS_VALLEYVIEW(dev)) { - seq_printf(m, "unsupported\n"); + seq_puts(m, "unsupported\n"); return 0; } @@ -1924,6 +1628,194 @@ static int i915_dpio_info(struct seq_file *m, void *data) return 0; } +static int i915_llc(struct seq_file *m, void *data) +{ + struct drm_info_node *node = (struct drm_info_node *) m->private; + struct drm_device *dev = node->minor->dev; + struct drm_i915_private *dev_priv = dev->dev_private; + + /* Size calculation for LLC is a bit of a pain. Ignore for now. */ + seq_printf(m, "LLC: %s\n", yesno(HAS_LLC(dev))); + seq_printf(m, "eLLC: %zuMB\n", dev_priv->ellc_size); + + return 0; +} + +static int i915_edp_psr_status(struct seq_file *m, void *data) +{ + struct drm_info_node *node = m->private; + struct drm_device *dev = node->minor->dev; + struct drm_i915_private *dev_priv = dev->dev_private; + u32 psrstat, psrperf; + + if (!IS_HASWELL(dev)) { + seq_puts(m, "PSR not supported on this platform\n"); + } else if (IS_HASWELL(dev) && I915_READ(EDP_PSR_CTL) & EDP_PSR_ENABLE) { + seq_puts(m, "PSR enabled\n"); + } else { + seq_puts(m, "PSR disabled: "); + switch (dev_priv->no_psr_reason) { + case PSR_NO_SOURCE: + seq_puts(m, "not supported on this platform"); + break; + case PSR_NO_SINK: + seq_puts(m, "not supported by panel"); + break; + case PSR_MODULE_PARAM: + seq_puts(m, "disabled by flag"); + break; + case PSR_CRTC_NOT_ACTIVE: + seq_puts(m, "crtc not active"); + break; + case PSR_PWR_WELL_ENABLED: + seq_puts(m, "power well enabled"); + break; + case PSR_NOT_TILED: + seq_puts(m, "not tiled"); + break; + case PSR_SPRITE_ENABLED: + seq_puts(m, "sprite enabled"); + break; + case PSR_S3D_ENABLED: + seq_puts(m, "stereo 3d enabled"); + break; + case PSR_INTERLACED_ENABLED: + seq_puts(m, "interlaced enabled"); + break; + case PSR_HSW_NOT_DDIA: + seq_puts(m, "HSW ties PSR to DDI A (eDP)"); + break; + default: + seq_puts(m, "unknown reason"); + } + seq_puts(m, "\n"); + return 0; + } + + psrstat = I915_READ(EDP_PSR_STATUS_CTL); + + seq_puts(m, "PSR Current State: "); + switch (psrstat & EDP_PSR_STATUS_STATE_MASK) { + case EDP_PSR_STATUS_STATE_IDLE: + seq_puts(m, "Reset state\n"); + break; + case EDP_PSR_STATUS_STATE_SRDONACK: + seq_puts(m, "Wait for TG/Stream to send on frame of data after SRD conditions are met\n"); + break; + case EDP_PSR_STATUS_STATE_SRDENT: + seq_puts(m, "SRD entry\n"); + break; + case EDP_PSR_STATUS_STATE_BUFOFF: + seq_puts(m, "Wait for buffer turn off\n"); + break; + case EDP_PSR_STATUS_STATE_BUFON: + seq_puts(m, "Wait for buffer turn on\n"); + break; + case EDP_PSR_STATUS_STATE_AUXACK: + seq_puts(m, "Wait for AUX to acknowledge on SRD exit\n"); + break; + case EDP_PSR_STATUS_STATE_SRDOFFACK: + seq_puts(m, "Wait for TG/Stream to acknowledge the SRD VDM exit\n"); + break; + default: + seq_puts(m, "Unknown\n"); + break; + } + + seq_puts(m, "Link Status: "); + switch (psrstat & EDP_PSR_STATUS_LINK_MASK) { + case EDP_PSR_STATUS_LINK_FULL_OFF: + seq_puts(m, "Link is fully off\n"); + break; + case EDP_PSR_STATUS_LINK_FULL_ON: + seq_puts(m, "Link is fully on\n"); + break; + case EDP_PSR_STATUS_LINK_STANDBY: + seq_puts(m, "Link is in standby\n"); + break; + default: + seq_puts(m, "Unknown\n"); + break; + } + + seq_printf(m, "PSR Entry Count: %u\n", + psrstat >> EDP_PSR_STATUS_COUNT_SHIFT & + EDP_PSR_STATUS_COUNT_MASK); + + seq_printf(m, "Max Sleep Timer Counter: %u\n", + psrstat >> EDP_PSR_STATUS_MAX_SLEEP_TIMER_SHIFT & + EDP_PSR_STATUS_MAX_SLEEP_TIMER_MASK); + + seq_printf(m, "Had AUX error: %s\n", + yesno(psrstat & EDP_PSR_STATUS_AUX_ERROR)); + + seq_printf(m, "Sending AUX: %s\n", + yesno(psrstat & EDP_PSR_STATUS_AUX_SENDING)); + + seq_printf(m, "Sending Idle: %s\n", + yesno(psrstat & EDP_PSR_STATUS_SENDING_IDLE)); + + seq_printf(m, "Sending TP2 TP3: %s\n", + yesno(psrstat & EDP_PSR_STATUS_SENDING_TP2_TP3)); + + seq_printf(m, "Sending TP1: %s\n", + yesno(psrstat & EDP_PSR_STATUS_SENDING_TP1)); + + seq_printf(m, "Idle Count: %u\n", + psrstat & EDP_PSR_STATUS_IDLE_MASK); + + psrperf = (I915_READ(EDP_PSR_PERF_CNT)) & EDP_PSR_PERF_CNT_MASK; + seq_printf(m, "Performance Counter: %u\n", psrperf); + + return 0; +} + +static int i915_energy_uJ(struct seq_file *m, void *data) +{ + struct drm_info_node *node = m->private; + struct drm_device *dev = node->minor->dev; + struct drm_i915_private *dev_priv = dev->dev_private; + u64 power; + u32 units; + + if (INTEL_INFO(dev)->gen < 6) + return -ENODEV; + + rdmsrl(MSR_RAPL_POWER_UNIT, power); + power = (power & 0x1f00) >> 8; + units = 1000000 / (1 << power); /* convert to uJ */ + power = I915_READ(MCH_SECP_NRG_STTS); + power *= units; + + seq_printf(m, "%llu", (long long unsigned)power); + + return 0; +} + +static int i915_pc8_status(struct seq_file *m, void *unused) +{ + struct drm_info_node *node = (struct drm_info_node *) m->private; + struct drm_device *dev = node->minor->dev; + struct drm_i915_private *dev_priv = dev->dev_private; + + if (!IS_HASWELL(dev)) { + seq_puts(m, "not supported\n"); + return 0; + } + + mutex_lock(&dev_priv->pc8.lock); + seq_printf(m, "Requirements met: %s\n", + yesno(dev_priv->pc8.requirements_met)); + seq_printf(m, "GPU idle: %s\n", yesno(dev_priv->pc8.gpu_idle)); + seq_printf(m, "Disable count: %d\n", dev_priv->pc8.disable_count); + seq_printf(m, "IRQs disabled: %s\n", + yesno(dev_priv->pc8.irqs_disabled)); + seq_printf(m, "Enabled: %s\n", yesno(dev_priv->pc8.enabled)); + mutex_unlock(&dev_priv->pc8.lock); + + return 0; +} + static int i915_wedged_get(void *data, u64 *val) { @@ -2006,6 +1898,8 @@ i915_drop_caches_set(void *data, u64 val) struct drm_device *dev = data; struct drm_i915_private *dev_priv = dev->dev_private; struct drm_i915_gem_object *obj, *next; + struct i915_address_space *vm; + struct i915_vma *vma, *x; int ret; DRM_DEBUG_DRIVER("Dropping caches: 0x%08llx\n", val); @@ -2026,12 +1920,17 @@ i915_drop_caches_set(void *data, u64 val) i915_gem_retire_requests(dev); if (val & DROP_BOUND) { - list_for_each_entry_safe(obj, next, &dev_priv->mm.inactive_list, mm_list) - if (obj->pin_count == 0) { - ret = i915_gem_object_unbind(obj); + list_for_each_entry(vm, &dev_priv->vm_list, global_link) { + list_for_each_entry_safe(vma, x, &vm->inactive_list, + mm_list) { + if (vma->obj->pin_count) + continue; + + ret = i915_vma_unbind(vma); if (ret) goto unlock; } + } } if (val & DROP_UNBOUND) { @@ -2326,6 +2225,7 @@ static struct drm_info_list i915_debugfs_list[] = { {"i915_gem_pinned", i915_gem_gtt_info, 0, (void *) PINNED_LIST}, {"i915_gem_active", i915_gem_object_list_info, 0, (void *) ACTIVE_LIST}, {"i915_gem_inactive", i915_gem_object_list_info, 0, (void *) INACTIVE_LIST}, + {"i915_gem_stolen", i915_gem_stolen_list_info }, {"i915_gem_pageflip", i915_gem_pageflip_info, 0}, {"i915_gem_request", i915_gem_request_info, 0}, {"i915_gem_seqno", i915_gem_seqno_info, 0}, @@ -2353,64 +2253,42 @@ static struct drm_info_list i915_debugfs_list[] = { {"i915_swizzle_info", i915_swizzle_info, 0}, {"i915_ppgtt_info", i915_ppgtt_info, 0}, {"i915_dpio", i915_dpio_info, 0}, + {"i915_llc", i915_llc, 0}, + {"i915_edp_psr_status", i915_edp_psr_status, 0}, + {"i915_energy_uJ", i915_energy_uJ, 0}, + {"i915_pc8_status", i915_pc8_status, 0}, }; #define I915_DEBUGFS_ENTRIES ARRAY_SIZE(i915_debugfs_list) +static struct i915_debugfs_files { + const char *name; + const struct file_operations *fops; +} i915_debugfs_files[] = { + {"i915_wedged", &i915_wedged_fops}, + {"i915_max_freq", &i915_max_freq_fops}, + {"i915_min_freq", &i915_min_freq_fops}, + {"i915_cache_sharing", &i915_cache_sharing_fops}, + {"i915_ring_stop", &i915_ring_stop_fops}, + {"i915_gem_drop_caches", &i915_drop_caches_fops}, + {"i915_error_state", &i915_error_state_fops}, + {"i915_next_seqno", &i915_next_seqno_fops}, +}; + int i915_debugfs_init(struct drm_minor *minor) { - int ret; - - ret = i915_debugfs_create(minor->debugfs_root, minor, - "i915_wedged", - &i915_wedged_fops); - if (ret) - return ret; + int ret, i; ret = i915_forcewake_create(minor->debugfs_root, minor); if (ret) return ret; - ret = i915_debugfs_create(minor->debugfs_root, minor, - "i915_max_freq", - &i915_max_freq_fops); - if (ret) - return ret; - - ret = i915_debugfs_create(minor->debugfs_root, minor, - "i915_min_freq", - &i915_min_freq_fops); - if (ret) - return ret; - - ret = i915_debugfs_create(minor->debugfs_root, minor, - "i915_cache_sharing", - &i915_cache_sharing_fops); - if (ret) - return ret; - - ret = i915_debugfs_create(minor->debugfs_root, minor, - "i915_ring_stop", - &i915_ring_stop_fops); - if (ret) - return ret; - - ret = i915_debugfs_create(minor->debugfs_root, minor, - "i915_gem_drop_caches", - &i915_drop_caches_fops); - if (ret) - return ret; - - ret = i915_debugfs_create(minor->debugfs_root, minor, - "i915_error_state", - &i915_error_state_fops); - if (ret) - return ret; - - ret = i915_debugfs_create(minor->debugfs_root, minor, - "i915_next_seqno", - &i915_next_seqno_fops); - if (ret) - return ret; + for (i = 0; i < ARRAY_SIZE(i915_debugfs_files); i++) { + ret = i915_debugfs_create(minor->debugfs_root, minor, + i915_debugfs_files[i].name, + i915_debugfs_files[i].fops); + if (ret) + return ret; + } return drm_debugfs_create_files(i915_debugfs_list, I915_DEBUGFS_ENTRIES, @@ -2419,26 +2297,18 @@ int i915_debugfs_init(struct drm_minor *minor) void i915_debugfs_cleanup(struct drm_minor *minor) { + int i; + drm_debugfs_remove_files(i915_debugfs_list, I915_DEBUGFS_ENTRIES, minor); drm_debugfs_remove_files((struct drm_info_list *) &i915_forcewake_fops, 1, minor); - drm_debugfs_remove_files((struct drm_info_list *) &i915_wedged_fops, - 1, minor); - drm_debugfs_remove_files((struct drm_info_list *) &i915_max_freq_fops, - 1, minor); - drm_debugfs_remove_files((struct drm_info_list *) &i915_min_freq_fops, - 1, minor); - drm_debugfs_remove_files((struct drm_info_list *) &i915_cache_sharing_fops, - 1, minor); - drm_debugfs_remove_files((struct drm_info_list *) &i915_drop_caches_fops, - 1, minor); - drm_debugfs_remove_files((struct drm_info_list *) &i915_ring_stop_fops, - 1, minor); - drm_debugfs_remove_files((struct drm_info_list *) &i915_error_state_fops, - 1, minor); - drm_debugfs_remove_files((struct drm_info_list *) &i915_next_seqno_fops, - 1, minor); + for (i = 0; i < ARRAY_SIZE(i915_debugfs_files); i++) { + struct drm_info_list *info_list = + (struct drm_info_list *) i915_debugfs_files[i].fops; + + drm_debugfs_remove_files(info_list, 1, minor); + } } #endif /* CONFIG_DEBUG_FS */ diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c index f4669802a0fb..fdaa0915ce56 100644 --- a/drivers/gpu/drm/i915/i915_dma.c +++ b/drivers/gpu/drm/i915/i915_dma.c @@ -976,6 +976,9 @@ static int i915_getparam(struct drm_device *dev, void *data, case I915_PARAM_HAS_LLC: value = HAS_LLC(dev); break; + case I915_PARAM_HAS_WT: + value = HAS_WT(dev); + break; case I915_PARAM_HAS_ALIASING_PPGTT: value = dev_priv->mm.aliasing_ppgtt ? 1 : 0; break; @@ -1293,7 +1296,7 @@ static int i915_load_modeset_init(struct drm_device *dev) intel_register_dsm_handler(); - ret = vga_switcheroo_register_client(dev->pdev, &i915_switcheroo_ops); + ret = vga_switcheroo_register_client(dev->pdev, &i915_switcheroo_ops, false); if (ret) goto cleanup_vga_client; @@ -1323,10 +1326,8 @@ static int i915_load_modeset_init(struct drm_device *dev) /* Always safe in the mode setting case. */ /* FIXME: do pre/post-mode set stuff in core KMS code */ dev->vblank_disable_allowed = 1; - if (INTEL_INFO(dev)->num_pipes == 0) { - dev_priv->mm.suspended = 0; + if (INTEL_INFO(dev)->num_pipes == 0) return 0; - } ret = intel_fbdev_init(dev); if (ret) @@ -1352,9 +1353,6 @@ static int i915_load_modeset_init(struct drm_device *dev) drm_kms_helper_poll_init(dev); - /* We're off and running w/KMS */ - dev_priv->mm.suspended = 0; - return 0; cleanup_gem: @@ -1363,7 +1361,7 @@ cleanup_gem: i915_gem_context_fini(dev); mutex_unlock(&dev->struct_mutex); i915_gem_cleanup_aliasing_ppgtt(dev); - drm_mm_takedown(&dev_priv->mm.gtt_space); + drm_mm_takedown(&dev_priv->gtt.base.mm); cleanup_irq: drm_irq_uninstall(dev); cleanup_gem_stolen: @@ -1441,22 +1439,6 @@ static void i915_dump_device_info(struct drm_i915_private *dev_priv) } /** - * intel_early_sanitize_regs - clean up BIOS state - * @dev: DRM device - * - * This function must be called before we do any I915_READ or I915_WRITE. Its - * purpose is to clean up any state left by the BIOS that may affect us when - * reading and/or writing registers. - */ -static void intel_early_sanitize_regs(struct drm_device *dev) -{ - struct drm_i915_private *dev_priv = dev->dev_private; - - if (HAS_FPGA_DBG_UNCLAIMED(dev)) - I915_WRITE_NOTRACE(FPGA_DBG, FPGA_DBG_RM_NOCLAIM); -} - -/** * i915_driver_load - setup chip and create an initial config * @dev: DRM device * @flags: startup flags @@ -1497,15 +1479,31 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags) spin_lock_init(&dev_priv->irq_lock); spin_lock_init(&dev_priv->gpu_error.lock); - spin_lock_init(&dev_priv->rps.lock); - spin_lock_init(&dev_priv->gt_lock); spin_lock_init(&dev_priv->backlight.lock); + spin_lock_init(&dev_priv->uncore.lock); + spin_lock_init(&dev_priv->mm.object_stat_lock); mutex_init(&dev_priv->dpio_lock); mutex_init(&dev_priv->rps.hw_lock); mutex_init(&dev_priv->modeset_restore_lock); + mutex_init(&dev_priv->pc8.lock); + dev_priv->pc8.requirements_met = false; + dev_priv->pc8.gpu_idle = false; + dev_priv->pc8.irqs_disabled = false; + dev_priv->pc8.enabled = false; + dev_priv->pc8.disable_count = 2; /* requirements_met + gpu_idle */ + INIT_DELAYED_WORK(&dev_priv->pc8.enable_work, hsw_enable_pc8_work); + i915_dump_device_info(dev_priv); + /* Not all pre-production machines fall into this category, only the + * very first ones. Almost everything should work, except for maybe + * suspend/resume. And we don't implement workarounds that affect only + * pre-production machines. */ + if (IS_HSW_EARLY_SDV(dev)) + DRM_INFO("This is an early pre-production Haswell machine. " + "It may not be fully functional.\n"); + if (i915_get_bridge_dev(dev)) { ret = -EIO; goto free_priv; @@ -1531,7 +1529,17 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags) goto put_bridge; } - intel_early_sanitize_regs(dev); + intel_uncore_early_sanitize(dev); + + if (IS_HASWELL(dev) && (I915_READ(HSW_EDRAM_PRESENT) == 1)) { + /* The docs do not explain exactly how the calculation can be + * made. It is somewhat guessable, but for now, it's always + * 128MB. + * NB: We can't write IDICR yet because we do not have gt funcs + * set up */ + dev_priv->ellc_size = 128; + DRM_INFO("Found %zuMB of eLLC\n", dev_priv->ellc_size); + } ret = i915_gem_gtt_init(dev); if (ret) @@ -1567,8 +1575,8 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags) goto out_rmmap; } - dev_priv->mm.gtt_mtrr = arch_phys_wc_add(dev_priv->gtt.mappable_base, - aperture_size); + dev_priv->gtt.mtrr = arch_phys_wc_add(dev_priv->gtt.mappable_base, + aperture_size); /* The i915 workqueue is primarily used for batched retirement of * requests (and thus managing bo) once the task has been completed @@ -1595,8 +1603,8 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags) intel_irq_init(dev); intel_pm_init(dev); - intel_gt_sanitize(dev); - intel_gt_init(dev); + intel_uncore_sanitize(dev); + intel_uncore_init(dev); /* Try to make sure MCHBAR is enabled before poking at it */ intel_setup_mchbar(dev); @@ -1631,9 +1639,6 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags) goto out_gem_unload; } - /* Start out suspended */ - dev_priv->mm.suspended = 1; - if (HAS_POWER_WELL(dev)) i915_init_power_well(dev); @@ -1643,6 +1648,9 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags) DRM_ERROR("failed to init modeset\n"); goto out_gem_unload; } + } else { + /* Start out suspended in ums mode. */ + dev_priv->ums.mm_suspended = 1; } i915_setup_sysfs(dev); @@ -1669,9 +1677,9 @@ out_gem_unload: intel_teardown_mchbar(dev); destroy_workqueue(dev_priv->wq); out_mtrrfree: - arch_phys_wc_del(dev_priv->mm.gtt_mtrr); + arch_phys_wc_del(dev_priv->gtt.mtrr); io_mapping_free(dev_priv->gtt.mappable); - dev_priv->gtt.gtt_remove(dev); + dev_priv->gtt.base.cleanup(&dev_priv->gtt.base); out_rmmap: pci_iounmap(dev->pdev, dev_priv->regs); put_bridge: @@ -1688,8 +1696,13 @@ int i915_driver_unload(struct drm_device *dev) intel_gpu_ips_teardown(); - if (HAS_POWER_WELL(dev)) + if (HAS_POWER_WELL(dev)) { + /* The i915.ko module is still not prepared to be loaded when + * the power well is not enabled, so just enable it in case + * we're going to unload/reload. */ + intel_set_power_well(dev, true); i915_remove_power_well(dev); + } i915_teardown_sysfs(dev); @@ -1707,7 +1720,7 @@ int i915_driver_unload(struct drm_device *dev) cancel_delayed_work_sync(&dev_priv->mm.retire_work); io_mapping_free(dev_priv->gtt.mappable); - arch_phys_wc_del(dev_priv->mm.gtt_mtrr); + arch_phys_wc_del(dev_priv->gtt.mtrr); acpi_video_unregister(); @@ -1735,6 +1748,8 @@ int i915_driver_unload(struct drm_device *dev) cancel_work_sync(&dev_priv->gpu_error.work); i915_destroy_error_state(dev); + cancel_delayed_work_sync(&dev_priv->pc8.enable_work); + if (dev->pdev->msi_enabled) pci_disable_msi(dev->pdev); @@ -1756,7 +1771,9 @@ int i915_driver_unload(struct drm_device *dev) i915_free_hws(dev); } - drm_mm_takedown(&dev_priv->mm.gtt_space); + list_del(&dev_priv->gtt.base.global_link); + WARN_ON(!list_empty(&dev_priv->vm_list)); + drm_mm_takedown(&dev_priv->gtt.base.mm); if (dev_priv->regs != NULL) pci_iounmap(dev->pdev, dev_priv->regs); @@ -1766,7 +1783,7 @@ int i915_driver_unload(struct drm_device *dev) destroy_workqueue(dev_priv->wq); pm_qos_remove_request(&dev_priv->pm_qos); - dev_priv->gtt.gtt_remove(dev); + dev_priv->gtt.base.cleanup(&dev_priv->gtt.base); if (dev_priv->slab) kmem_cache_destroy(dev_priv->slab); @@ -1842,14 +1859,14 @@ void i915_driver_postclose(struct drm_device *dev, struct drm_file *file) kfree(file_priv); } -struct drm_ioctl_desc i915_ioctls[] = { +const struct drm_ioctl_desc i915_ioctls[] = { DRM_IOCTL_DEF_DRV(I915_INIT, i915_dma_init, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), DRM_IOCTL_DEF_DRV(I915_FLUSH, i915_flush_ioctl, DRM_AUTH), DRM_IOCTL_DEF_DRV(I915_FLIP, i915_flip_bufs, DRM_AUTH), DRM_IOCTL_DEF_DRV(I915_BATCHBUFFER, i915_batchbuffer, DRM_AUTH), DRM_IOCTL_DEF_DRV(I915_IRQ_EMIT, i915_irq_emit, DRM_AUTH), DRM_IOCTL_DEF_DRV(I915_IRQ_WAIT, i915_irq_wait, DRM_AUTH), - DRM_IOCTL_DEF_DRV(I915_GETPARAM, i915_getparam, DRM_AUTH), + DRM_IOCTL_DEF_DRV(I915_GETPARAM, i915_getparam, DRM_AUTH|DRM_RENDER_ALLOW), DRM_IOCTL_DEF_DRV(I915_SETPARAM, i915_setparam, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), DRM_IOCTL_DEF_DRV(I915_ALLOC, drm_noop, DRM_AUTH), DRM_IOCTL_DEF_DRV(I915_FREE, drm_noop, DRM_AUTH), @@ -1862,35 +1879,35 @@ struct drm_ioctl_desc i915_ioctls[] = { DRM_IOCTL_DEF_DRV(I915_HWS_ADDR, i915_set_status_page, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), DRM_IOCTL_DEF_DRV(I915_GEM_INIT, i915_gem_init_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY|DRM_UNLOCKED), DRM_IOCTL_DEF_DRV(I915_GEM_EXECBUFFER, i915_gem_execbuffer, DRM_AUTH|DRM_UNLOCKED), - DRM_IOCTL_DEF_DRV(I915_GEM_EXECBUFFER2, i915_gem_execbuffer2, DRM_AUTH|DRM_UNLOCKED), + DRM_IOCTL_DEF_DRV(I915_GEM_EXECBUFFER2, i915_gem_execbuffer2, DRM_AUTH|DRM_UNLOCKED|DRM_RENDER_ALLOW), DRM_IOCTL_DEF_DRV(I915_GEM_PIN, i915_gem_pin_ioctl, DRM_AUTH|DRM_ROOT_ONLY|DRM_UNLOCKED), DRM_IOCTL_DEF_DRV(I915_GEM_UNPIN, i915_gem_unpin_ioctl, DRM_AUTH|DRM_ROOT_ONLY|DRM_UNLOCKED), - DRM_IOCTL_DEF_DRV(I915_GEM_BUSY, i915_gem_busy_ioctl, DRM_AUTH|DRM_UNLOCKED), - DRM_IOCTL_DEF_DRV(I915_GEM_SET_CACHING, i915_gem_set_caching_ioctl, DRM_UNLOCKED), - DRM_IOCTL_DEF_DRV(I915_GEM_GET_CACHING, i915_gem_get_caching_ioctl, DRM_UNLOCKED), - DRM_IOCTL_DEF_DRV(I915_GEM_THROTTLE, i915_gem_throttle_ioctl, DRM_AUTH|DRM_UNLOCKED), + DRM_IOCTL_DEF_DRV(I915_GEM_BUSY, i915_gem_busy_ioctl, DRM_AUTH|DRM_UNLOCKED|DRM_RENDER_ALLOW), + DRM_IOCTL_DEF_DRV(I915_GEM_SET_CACHING, i915_gem_set_caching_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW), + DRM_IOCTL_DEF_DRV(I915_GEM_GET_CACHING, i915_gem_get_caching_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW), + DRM_IOCTL_DEF_DRV(I915_GEM_THROTTLE, i915_gem_throttle_ioctl, DRM_AUTH|DRM_UNLOCKED|DRM_RENDER_ALLOW), DRM_IOCTL_DEF_DRV(I915_GEM_ENTERVT, i915_gem_entervt_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY|DRM_UNLOCKED), DRM_IOCTL_DEF_DRV(I915_GEM_LEAVEVT, i915_gem_leavevt_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY|DRM_UNLOCKED), - DRM_IOCTL_DEF_DRV(I915_GEM_CREATE, i915_gem_create_ioctl, DRM_UNLOCKED), - DRM_IOCTL_DEF_DRV(I915_GEM_PREAD, i915_gem_pread_ioctl, DRM_UNLOCKED), - DRM_IOCTL_DEF_DRV(I915_GEM_PWRITE, i915_gem_pwrite_ioctl, DRM_UNLOCKED), - DRM_IOCTL_DEF_DRV(I915_GEM_MMAP, i915_gem_mmap_ioctl, DRM_UNLOCKED), - DRM_IOCTL_DEF_DRV(I915_GEM_MMAP_GTT, i915_gem_mmap_gtt_ioctl, DRM_UNLOCKED), - DRM_IOCTL_DEF_DRV(I915_GEM_SET_DOMAIN, i915_gem_set_domain_ioctl, DRM_UNLOCKED), - DRM_IOCTL_DEF_DRV(I915_GEM_SW_FINISH, i915_gem_sw_finish_ioctl, DRM_UNLOCKED), - DRM_IOCTL_DEF_DRV(I915_GEM_SET_TILING, i915_gem_set_tiling, DRM_UNLOCKED), - DRM_IOCTL_DEF_DRV(I915_GEM_GET_TILING, i915_gem_get_tiling, DRM_UNLOCKED), - DRM_IOCTL_DEF_DRV(I915_GEM_GET_APERTURE, i915_gem_get_aperture_ioctl, DRM_UNLOCKED), + DRM_IOCTL_DEF_DRV(I915_GEM_CREATE, i915_gem_create_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW), + DRM_IOCTL_DEF_DRV(I915_GEM_PREAD, i915_gem_pread_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW), + DRM_IOCTL_DEF_DRV(I915_GEM_PWRITE, i915_gem_pwrite_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW), + DRM_IOCTL_DEF_DRV(I915_GEM_MMAP, i915_gem_mmap_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW), + DRM_IOCTL_DEF_DRV(I915_GEM_MMAP_GTT, i915_gem_mmap_gtt_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW), + DRM_IOCTL_DEF_DRV(I915_GEM_SET_DOMAIN, i915_gem_set_domain_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW), + DRM_IOCTL_DEF_DRV(I915_GEM_SW_FINISH, i915_gem_sw_finish_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW), + DRM_IOCTL_DEF_DRV(I915_GEM_SET_TILING, i915_gem_set_tiling, DRM_UNLOCKED|DRM_RENDER_ALLOW), + DRM_IOCTL_DEF_DRV(I915_GEM_GET_TILING, i915_gem_get_tiling, DRM_UNLOCKED|DRM_RENDER_ALLOW), + DRM_IOCTL_DEF_DRV(I915_GEM_GET_APERTURE, i915_gem_get_aperture_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW), DRM_IOCTL_DEF_DRV(I915_GET_PIPE_FROM_CRTC_ID, intel_get_pipe_from_crtc_id, DRM_UNLOCKED), - DRM_IOCTL_DEF_DRV(I915_GEM_MADVISE, i915_gem_madvise_ioctl, DRM_UNLOCKED), + DRM_IOCTL_DEF_DRV(I915_GEM_MADVISE, i915_gem_madvise_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW), DRM_IOCTL_DEF_DRV(I915_OVERLAY_PUT_IMAGE, intel_overlay_put_image, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED), DRM_IOCTL_DEF_DRV(I915_OVERLAY_ATTRS, intel_overlay_attrs, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED), DRM_IOCTL_DEF_DRV(I915_SET_SPRITE_COLORKEY, intel_sprite_set_colorkey, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED), DRM_IOCTL_DEF_DRV(I915_GET_SPRITE_COLORKEY, intel_sprite_get_colorkey, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED), - DRM_IOCTL_DEF_DRV(I915_GEM_WAIT, i915_gem_wait_ioctl, DRM_AUTH|DRM_UNLOCKED), - DRM_IOCTL_DEF_DRV(I915_GEM_CONTEXT_CREATE, i915_gem_context_create_ioctl, DRM_UNLOCKED), - DRM_IOCTL_DEF_DRV(I915_GEM_CONTEXT_DESTROY, i915_gem_context_destroy_ioctl, DRM_UNLOCKED), - DRM_IOCTL_DEF_DRV(I915_REG_READ, i915_reg_read_ioctl, DRM_UNLOCKED), + DRM_IOCTL_DEF_DRV(I915_GEM_WAIT, i915_gem_wait_ioctl, DRM_AUTH|DRM_UNLOCKED|DRM_RENDER_ALLOW), + DRM_IOCTL_DEF_DRV(I915_GEM_CONTEXT_CREATE, i915_gem_context_create_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW), + DRM_IOCTL_DEF_DRV(I915_GEM_CONTEXT_DESTROY, i915_gem_context_destroy_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW), + DRM_IOCTL_DEF_DRV(I915_REG_READ, i915_reg_read_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW), }; int i915_max_ioctl = DRM_ARRAY_SIZE(i915_ioctls); diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c index 45b3c030f483..ccb28ead3501 100644 --- a/drivers/gpu/drm/i915/i915_drv.c +++ b/drivers/gpu/drm/i915/i915_drv.c @@ -118,10 +118,14 @@ module_param_named(i915_enable_ppgtt, i915_enable_ppgtt, int, 0600); MODULE_PARM_DESC(i915_enable_ppgtt, "Enable PPGTT (default: true)"); -unsigned int i915_preliminary_hw_support __read_mostly = 0; +int i915_enable_psr __read_mostly = 0; +module_param_named(enable_psr, i915_enable_psr, int, 0600); +MODULE_PARM_DESC(enable_psr, "Enable PSR (default: false)"); + +unsigned int i915_preliminary_hw_support __read_mostly = IS_ENABLED(CONFIG_DRM_I915_PRELIMINARY_HW_SUPPORT); module_param_named(preliminary_hw_support, i915_preliminary_hw_support, int, 0600); MODULE_PARM_DESC(preliminary_hw_support, - "Enable preliminary hardware support. (default: false)"); + "Enable preliminary hardware support."); int i915_disable_power_well __read_mostly = 1; module_param_named(disable_power_well, i915_disable_power_well, int, 0600); @@ -132,6 +136,24 @@ int i915_enable_ips __read_mostly = 1; module_param_named(enable_ips, i915_enable_ips, int, 0600); MODULE_PARM_DESC(enable_ips, "Enable IPS (default: true)"); +bool i915_fastboot __read_mostly = 0; +module_param_named(fastboot, i915_fastboot, bool, 0600); +MODULE_PARM_DESC(fastboot, "Try to skip unnecessary mode sets at boot time " + "(default: false)"); + +int i915_enable_pc8 __read_mostly = 1; +module_param_named(enable_pc8, i915_enable_pc8, int, 0600); +MODULE_PARM_DESC(enable_pc8, "Enable support for low power package C states (PC8+) (default: true)"); + +int i915_pc8_timeout __read_mostly = 5000; +module_param_named(pc8_timeout, i915_pc8_timeout, int, 0600); +MODULE_PARM_DESC(pc8_timeout, "Number of msecs of idleness required to enter PC8+ (default: 5000)"); + +bool i915_prefault_disable __read_mostly; +module_param_named(prefault_disable, i915_prefault_disable, bool, 0600); +MODULE_PARM_DESC(prefault_disable, + "Disable page prefaulting for pread/pwrite/reloc (default:false). For developers only."); + static struct drm_driver driver; extern int intel_agp_enabled; @@ -543,6 +565,9 @@ static int i915_drm_freeze(struct drm_device *dev) dev_priv->modeset_restore = MODESET_SUSPENDED; mutex_unlock(&dev_priv->modeset_restore_lock); + /* We do a lot of poking in a lot of registers, make sure they work + * properly. */ + hsw_disable_package_c8(dev_priv); intel_set_power_well(dev, true); drm_kms_helper_poll_disable(dev); @@ -551,7 +576,11 @@ static int i915_drm_freeze(struct drm_device *dev) /* If KMS is active, we do the leavevt stuff here */ if (drm_core_check_feature(dev, DRIVER_MODESET)) { - int error = i915_gem_idle(dev); + int error; + + mutex_lock(&dev->struct_mutex); + error = i915_gem_idle(dev); + mutex_unlock(&dev->struct_mutex); if (error) { dev_err(&dev->pdev->dev, "GEM idle failed, resume might fail\n"); @@ -656,7 +685,6 @@ static int __i915_drm_thaw(struct drm_device *dev) intel_init_pch_refclk(dev); mutex_lock(&dev->struct_mutex); - dev_priv->mm.suspended = 0; error = i915_gem_init_hw(dev); mutex_unlock(&dev->struct_mutex); @@ -696,6 +724,10 @@ static int __i915_drm_thaw(struct drm_device *dev) schedule_work(&dev_priv->console_resume_work); } + /* Undo what we did at i915_drm_freeze so the refcount goes back to the + * expected level. */ + hsw_enable_package_c8(dev_priv); + mutex_lock(&dev_priv->modeset_restore_lock); dev_priv->modeset_restore = MODESET_DONE; mutex_unlock(&dev_priv->modeset_restore_lock); @@ -706,7 +738,7 @@ static int i915_drm_thaw(struct drm_device *dev) { int error = 0; - intel_gt_sanitize(dev); + intel_uncore_sanitize(dev); if (drm_core_check_feature(dev, DRIVER_MODESET)) { mutex_lock(&dev->struct_mutex); @@ -732,7 +764,7 @@ int i915_resume(struct drm_device *dev) pci_set_master(dev->pdev); - intel_gt_sanitize(dev); + intel_uncore_sanitize(dev); /* * Platforms with opregion should have sane BIOS, older ones (gen3 and @@ -753,139 +785,6 @@ int i915_resume(struct drm_device *dev) return 0; } -static int i8xx_do_reset(struct drm_device *dev) -{ - struct drm_i915_private *dev_priv = dev->dev_private; - - if (IS_I85X(dev)) - return -ENODEV; - - I915_WRITE(D_STATE, I915_READ(D_STATE) | DSTATE_GFX_RESET_I830); - POSTING_READ(D_STATE); - - if (IS_I830(dev) || IS_845G(dev)) { - I915_WRITE(DEBUG_RESET_I830, - DEBUG_RESET_DISPLAY | - DEBUG_RESET_RENDER | - DEBUG_RESET_FULL); - POSTING_READ(DEBUG_RESET_I830); - msleep(1); - - I915_WRITE(DEBUG_RESET_I830, 0); - POSTING_READ(DEBUG_RESET_I830); - } - - msleep(1); - - I915_WRITE(D_STATE, I915_READ(D_STATE) & ~DSTATE_GFX_RESET_I830); - POSTING_READ(D_STATE); - - return 0; -} - -static int i965_reset_complete(struct drm_device *dev) -{ - u8 gdrst; - pci_read_config_byte(dev->pdev, I965_GDRST, &gdrst); - return (gdrst & GRDOM_RESET_ENABLE) == 0; -} - -static int i965_do_reset(struct drm_device *dev) -{ - int ret; - u8 gdrst; - - /* - * Set the domains we want to reset (GRDOM/bits 2 and 3) as - * well as the reset bit (GR/bit 0). Setting the GR bit - * triggers the reset; when done, the hardware will clear it. - */ - pci_read_config_byte(dev->pdev, I965_GDRST, &gdrst); - pci_write_config_byte(dev->pdev, I965_GDRST, - gdrst | GRDOM_RENDER | - GRDOM_RESET_ENABLE); - ret = wait_for(i965_reset_complete(dev), 500); - if (ret) - return ret; - - /* We can't reset render&media without also resetting display ... */ - pci_read_config_byte(dev->pdev, I965_GDRST, &gdrst); - pci_write_config_byte(dev->pdev, I965_GDRST, - gdrst | GRDOM_MEDIA | - GRDOM_RESET_ENABLE); - - return wait_for(i965_reset_complete(dev), 500); -} - -static int ironlake_do_reset(struct drm_device *dev) -{ - struct drm_i915_private *dev_priv = dev->dev_private; - u32 gdrst; - int ret; - - gdrst = I915_READ(MCHBAR_MIRROR_BASE + ILK_GDSR); - gdrst &= ~GRDOM_MASK; - I915_WRITE(MCHBAR_MIRROR_BASE + ILK_GDSR, - gdrst | GRDOM_RENDER | GRDOM_RESET_ENABLE); - ret = wait_for(I915_READ(MCHBAR_MIRROR_BASE + ILK_GDSR) & 0x1, 500); - if (ret) - return ret; - - /* We can't reset render&media without also resetting display ... */ - gdrst = I915_READ(MCHBAR_MIRROR_BASE + ILK_GDSR); - gdrst &= ~GRDOM_MASK; - I915_WRITE(MCHBAR_MIRROR_BASE + ILK_GDSR, - gdrst | GRDOM_MEDIA | GRDOM_RESET_ENABLE); - return wait_for(I915_READ(MCHBAR_MIRROR_BASE + ILK_GDSR) & 0x1, 500); -} - -static int gen6_do_reset(struct drm_device *dev) -{ - struct drm_i915_private *dev_priv = dev->dev_private; - int ret; - unsigned long irqflags; - - /* Hold gt_lock across reset to prevent any register access - * with forcewake not set correctly - */ - spin_lock_irqsave(&dev_priv->gt_lock, irqflags); - - /* Reset the chip */ - - /* GEN6_GDRST is not in the gt power well, no need to check - * for fifo space for the write or forcewake the chip for - * the read - */ - I915_WRITE_NOTRACE(GEN6_GDRST, GEN6_GRDOM_FULL); - - /* Spin waiting for the device to ack the reset request */ - ret = wait_for((I915_READ_NOTRACE(GEN6_GDRST) & GEN6_GRDOM_FULL) == 0, 500); - - /* If reset with a user forcewake, try to restore, otherwise turn it off */ - if (dev_priv->forcewake_count) - dev_priv->gt.force_wake_get(dev_priv); - else - dev_priv->gt.force_wake_put(dev_priv); - - /* Restore fifo count */ - dev_priv->gt_fifo_count = I915_READ_NOTRACE(GT_FIFO_FREE_ENTRIES); - - spin_unlock_irqrestore(&dev_priv->gt_lock, irqflags); - return ret; -} - -int intel_gpu_reset(struct drm_device *dev) -{ - switch (INTEL_INFO(dev)->gen) { - case 7: - case 6: return gen6_do_reset(dev); - case 5: return ironlake_do_reset(dev); - case 4: return i965_do_reset(dev); - case 2: return i8xx_do_reset(dev); - default: return -ENODEV; - } -} - /** * i915_reset - reset chip after a hang * @dev: drm device to reset @@ -955,11 +854,11 @@ int i915_reset(struct drm_device *dev) * switched away). */ if (drm_core_check_feature(dev, DRIVER_MODESET) || - !dev_priv->mm.suspended) { + !dev_priv->ums.mm_suspended) { struct intel_ring_buffer *ring; int i; - dev_priv->mm.suspended = 0; + dev_priv->ums.mm_suspended = 0; i915_gem_init_swizzling(dev); @@ -1110,7 +1009,6 @@ static const struct file_operations i915_driver_fops = { .unlocked_ioctl = drm_ioctl, .mmap = drm_gem_mmap, .poll = drm_poll, - .fasync = drm_fasync, .read = drm_read, #ifdef CONFIG_COMPAT .compat_ioctl = i915_compat_ioctl, @@ -1123,8 +1021,9 @@ static struct drm_driver driver = { * deal with them for Intel hardware. */ .driver_features = - DRIVER_USE_AGP | DRIVER_REQUIRE_AGP | /* DRIVER_USE_MTRR |*/ - DRIVER_HAVE_IRQ | DRIVER_IRQ_SHARED | DRIVER_GEM | DRIVER_PRIME, + DRIVER_USE_AGP | DRIVER_REQUIRE_AGP | + DRIVER_HAVE_IRQ | DRIVER_IRQ_SHARED | DRIVER_GEM | DRIVER_PRIME | + DRIVER_RENDER, .load = i915_driver_load, .unload = i915_driver_unload, .open = i915_driver_open, @@ -1154,7 +1053,7 @@ static struct drm_driver driver = { .dumb_create = i915_gem_dumb_create, .dumb_map_offset = i915_gem_mmap_gtt, - .dumb_destroy = i915_gem_dumb_destroy, + .dumb_destroy = drm_gem_dumb_destroy, .ioctls = i915_ioctls, .fops = &i915_driver_fops, .name = DRIVER_NAME, @@ -1215,136 +1114,3 @@ module_exit(i915_exit); MODULE_AUTHOR(DRIVER_AUTHOR); MODULE_DESCRIPTION(DRIVER_DESC); MODULE_LICENSE("GPL and additional rights"); - -/* We give fast paths for the really cool registers */ -#define NEEDS_FORCE_WAKE(dev_priv, reg) \ - ((HAS_FORCE_WAKE((dev_priv)->dev)) && \ - ((reg) < 0x40000) && \ - ((reg) != FORCEWAKE)) -static void -ilk_dummy_write(struct drm_i915_private *dev_priv) -{ - /* WaIssueDummyWriteToWakeupFromRC6:ilk Issue a dummy write to wake up - * the chip from rc6 before touching it for real. MI_MODE is masked, - * hence harmless to write 0 into. */ - I915_WRITE_NOTRACE(MI_MODE, 0); -} - -static void -hsw_unclaimed_reg_clear(struct drm_i915_private *dev_priv, u32 reg) -{ - if (HAS_FPGA_DBG_UNCLAIMED(dev_priv->dev) && - (I915_READ_NOTRACE(FPGA_DBG) & FPGA_DBG_RM_NOCLAIM)) { - DRM_ERROR("Unknown unclaimed register before writing to %x\n", - reg); - I915_WRITE_NOTRACE(FPGA_DBG, FPGA_DBG_RM_NOCLAIM); - } -} - -static void -hsw_unclaimed_reg_check(struct drm_i915_private *dev_priv, u32 reg) -{ - if (HAS_FPGA_DBG_UNCLAIMED(dev_priv->dev) && - (I915_READ_NOTRACE(FPGA_DBG) & FPGA_DBG_RM_NOCLAIM)) { - DRM_ERROR("Unclaimed write to %x\n", reg); - I915_WRITE_NOTRACE(FPGA_DBG, FPGA_DBG_RM_NOCLAIM); - } -} - -#define __i915_read(x, y) \ -u##x i915_read##x(struct drm_i915_private *dev_priv, u32 reg) { \ - unsigned long irqflags; \ - u##x val = 0; \ - spin_lock_irqsave(&dev_priv->gt_lock, irqflags); \ - if (IS_GEN5(dev_priv->dev)) \ - ilk_dummy_write(dev_priv); \ - if (NEEDS_FORCE_WAKE((dev_priv), (reg))) { \ - if (dev_priv->forcewake_count == 0) \ - dev_priv->gt.force_wake_get(dev_priv); \ - val = read##y(dev_priv->regs + reg); \ - if (dev_priv->forcewake_count == 0) \ - dev_priv->gt.force_wake_put(dev_priv); \ - } else { \ - val = read##y(dev_priv->regs + reg); \ - } \ - spin_unlock_irqrestore(&dev_priv->gt_lock, irqflags); \ - trace_i915_reg_rw(false, reg, val, sizeof(val)); \ - return val; \ -} - -__i915_read(8, b) -__i915_read(16, w) -__i915_read(32, l) -__i915_read(64, q) -#undef __i915_read - -#define __i915_write(x, y) \ -void i915_write##x(struct drm_i915_private *dev_priv, u32 reg, u##x val) { \ - unsigned long irqflags; \ - u32 __fifo_ret = 0; \ - trace_i915_reg_rw(true, reg, val, sizeof(val)); \ - spin_lock_irqsave(&dev_priv->gt_lock, irqflags); \ - if (NEEDS_FORCE_WAKE((dev_priv), (reg))) { \ - __fifo_ret = __gen6_gt_wait_for_fifo(dev_priv); \ - } \ - if (IS_GEN5(dev_priv->dev)) \ - ilk_dummy_write(dev_priv); \ - hsw_unclaimed_reg_clear(dev_priv, reg); \ - write##y(val, dev_priv->regs + reg); \ - if (unlikely(__fifo_ret)) { \ - gen6_gt_check_fifodbg(dev_priv); \ - } \ - hsw_unclaimed_reg_check(dev_priv, reg); \ - spin_unlock_irqrestore(&dev_priv->gt_lock, irqflags); \ -} -__i915_write(8, b) -__i915_write(16, w) -__i915_write(32, l) -__i915_write(64, q) -#undef __i915_write - -static const struct register_whitelist { - uint64_t offset; - uint32_t size; - uint32_t gen_bitmask; /* support gens, 0x10 for 4, 0x30 for 4 and 5, etc. */ -} whitelist[] = { - { RING_TIMESTAMP(RENDER_RING_BASE), 8, 0xF0 }, -}; - -int i915_reg_read_ioctl(struct drm_device *dev, - void *data, struct drm_file *file) -{ - struct drm_i915_private *dev_priv = dev->dev_private; - struct drm_i915_reg_read *reg = data; - struct register_whitelist const *entry = whitelist; - int i; - - for (i = 0; i < ARRAY_SIZE(whitelist); i++, entry++) { - if (entry->offset == reg->offset && - (1 << INTEL_INFO(dev)->gen & entry->gen_bitmask)) - break; - } - - if (i == ARRAY_SIZE(whitelist)) - return -EINVAL; - - switch (entry->size) { - case 8: - reg->val = I915_READ64(reg->offset); - break; - case 4: - reg->val = I915_READ(reg->offset); - break; - case 2: - reg->val = I915_READ16(reg->offset); - break; - case 1: - reg->val = I915_READ8(reg->offset); - break; - default: - WARN_ON(1); - return -EINVAL; - } - - return 0; -} diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index 1929bffc1c77..52a3785a3fdf 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h @@ -144,6 +144,7 @@ enum intel_dpll_id { struct intel_dpll_hw_state { uint32_t dpll; + uint32_t dpll_md; uint32_t fp0; uint32_t fp1; }; @@ -156,6 +157,8 @@ struct intel_shared_dpll { /* should match the index in the dev_priv->shared_dplls array */ enum intel_dpll_id id; struct intel_dpll_hw_state hw_state; + void (*mode_set)(struct drm_i915_private *dev_priv, + struct intel_shared_dpll *pll); void (*enable)(struct drm_i915_private *dev_priv, struct intel_shared_dpll *pll); void (*disable)(struct drm_i915_private *dev_priv, @@ -198,7 +201,6 @@ struct intel_ddi_plls { #define DRIVER_MINOR 6 #define DRIVER_PATCHLEVEL 0 -#define WATCH_COHERENCY 0 #define WATCH_LISTS 0 #define WATCH_GTT 0 @@ -320,8 +322,8 @@ struct drm_i915_error_state { u32 purgeable:1; s32 ring:4; u32 cache_level:2; - } *active_bo, *pinned_bo; - u32 active_bo_count, pinned_bo_count; + } **active_bo, **pinned_bo; + u32 *active_bo_count, *pinned_bo_count; struct intel_overlay_error_state *overlay; struct intel_display_error_state *display; }; @@ -356,14 +358,16 @@ struct drm_i915_display_funcs { struct dpll *match_clock, struct dpll *best_clock); void (*update_wm)(struct drm_device *dev); - void (*update_sprite_wm)(struct drm_device *dev, int pipe, + void (*update_sprite_wm)(struct drm_plane *plane, + struct drm_crtc *crtc, uint32_t sprite_width, int pixel_size, - bool enable); + bool enable, bool scaled); void (*modeset_global_resources)(struct drm_device *dev); /* Returns the active state of the crtc, and if the crtc is active, * fills out the pipe-config with the hw state. */ bool (*get_pipe_config)(struct intel_crtc *, struct intel_crtc_config *); + void (*get_clock)(struct intel_crtc *, struct intel_crtc_config *); int (*crtc_mode_set)(struct drm_crtc *crtc, int x, int y, struct drm_framebuffer *old_fb); @@ -376,7 +380,8 @@ struct drm_i915_display_funcs { void (*init_clock_gating)(struct drm_device *dev); int (*queue_flip)(struct drm_device *dev, struct drm_crtc *crtc, struct drm_framebuffer *fb, - struct drm_i915_gem_object *obj); + struct drm_i915_gem_object *obj, + uint32_t flags); int (*update_plane)(struct drm_crtc *crtc, struct drm_framebuffer *fb, int x, int y); void (*hpd_irq_setup)(struct drm_device *dev); @@ -387,11 +392,20 @@ struct drm_i915_display_funcs { /* pll clock increase/decrease */ }; -struct drm_i915_gt_funcs { +struct intel_uncore_funcs { void (*force_wake_get)(struct drm_i915_private *dev_priv); void (*force_wake_put)(struct drm_i915_private *dev_priv); }; +struct intel_uncore { + spinlock_t lock; /** lock is also taken in irq contexts. */ + + struct intel_uncore_funcs funcs; + + unsigned fifo_count; + unsigned forcewake_count; +}; + #define DEV_INFO_FOR_EACH_FLAG(func, sep) \ func(is_mobile) sep \ func(is_i85x) sep \ @@ -436,12 +450,64 @@ struct intel_device_info { enum i915_cache_level { I915_CACHE_NONE = 0, - I915_CACHE_LLC, - I915_CACHE_LLC_MLC, /* gen6+, in docs at least! */ + I915_CACHE_LLC, /* also used for snoopable memory on non-LLC */ + I915_CACHE_L3_LLC, /* gen7+, L3 sits between the domain specifc + caches, eg sampler/render caches, and the + large Last-Level-Cache. LLC is coherent with + the CPU, but L3 is only visible to the GPU. */ + I915_CACHE_WT, /* hsw:gt3e WriteThrough for scanouts */ }; typedef uint32_t gen6_gtt_pte_t; +struct i915_address_space { + struct drm_mm mm; + struct drm_device *dev; + struct list_head global_link; + unsigned long start; /* Start offset always 0 for dri2 */ + size_t total; /* size addr space maps (ex. 2GB for ggtt) */ + + struct { + dma_addr_t addr; + struct page *page; + } scratch; + + /** + * List of objects currently involved in rendering. + * + * Includes buffers having the contents of their GPU caches + * flushed, not necessarily primitives. last_rendering_seqno + * represents when the rendering involved will be completed. + * + * A reference is held on the buffer while on this list. + */ + struct list_head active_list; + + /** + * LRU list of objects which are not in the ringbuffer and + * are ready to unbind, but are still in the GTT. + * + * last_rendering_seqno is 0 while an object is in this list. + * + * A reference is not held on the buffer while on this list, + * as merely being GTT-bound shouldn't prevent its being + * freed, and we'll pull it off the list in the free path. + */ + struct list_head inactive_list; + + /* FIXME: Need a more generic return type */ + gen6_gtt_pte_t (*pte_encode)(dma_addr_t addr, + enum i915_cache_level level); + void (*clear_range)(struct i915_address_space *vm, + unsigned int first_entry, + unsigned int num_entries); + void (*insert_entries)(struct i915_address_space *vm, + struct sg_table *st, + unsigned int first_entry, + enum i915_cache_level cache_level); + void (*cleanup)(struct i915_address_space *vm); +}; + /* The Graphics Translation Table is the way in which GEN hardware translates a * Graphics Virtual Address into a Physical Address. In addition to the normal * collateral associated with any va->pa translations GEN hardware also has a @@ -450,8 +516,7 @@ typedef uint32_t gen6_gtt_pte_t; * the spec. */ struct i915_gtt { - unsigned long start; /* Start offset of used GTT */ - size_t total; /* Total size GTT can map */ + struct i915_address_space base; size_t stolen_size; /* Total size of stolen memory */ unsigned long mappable_end; /* End offset that we can CPU map */ @@ -462,50 +527,47 @@ struct i915_gtt { void __iomem *gsm; bool do_idle_maps; - dma_addr_t scratch_page_dma; - struct page *scratch_page; + + int mtrr; /* global gtt ops */ int (*gtt_probe)(struct drm_device *dev, size_t *gtt_total, size_t *stolen, phys_addr_t *mappable_base, unsigned long *mappable_end); - void (*gtt_remove)(struct drm_device *dev); - void (*gtt_clear_range)(struct drm_device *dev, - unsigned int first_entry, - unsigned int num_entries); - void (*gtt_insert_entries)(struct drm_device *dev, - struct sg_table *st, - unsigned int pg_start, - enum i915_cache_level cache_level); - gen6_gtt_pte_t (*pte_encode)(struct drm_device *dev, - dma_addr_t addr, - enum i915_cache_level level); }; -#define gtt_total_entries(gtt) ((gtt).total >> PAGE_SHIFT) +#define gtt_total_entries(gtt) ((gtt).base.total >> PAGE_SHIFT) -#define I915_PPGTT_PD_ENTRIES 512 -#define I915_PPGTT_PT_ENTRIES 1024 struct i915_hw_ppgtt { - struct drm_device *dev; + struct i915_address_space base; unsigned num_pd_entries; struct page **pt_pages; uint32_t pd_offset; dma_addr_t *pt_dma_addr; - dma_addr_t scratch_page_dma_addr; - /* pte functions, mirroring the interface of the global gtt. */ - void (*clear_range)(struct i915_hw_ppgtt *ppgtt, - unsigned int first_entry, - unsigned int num_entries); - void (*insert_entries)(struct i915_hw_ppgtt *ppgtt, - struct sg_table *st, - unsigned int pg_start, - enum i915_cache_level cache_level); - gen6_gtt_pte_t (*pte_encode)(struct drm_device *dev, - dma_addr_t addr, - enum i915_cache_level level); int (*enable)(struct drm_device *dev); - void (*cleanup)(struct i915_hw_ppgtt *ppgtt); +}; + +/** + * A VMA represents a GEM BO that is bound into an address space. Therefore, a + * VMA's presence cannot be guaranteed before binding, or after unbinding the + * object into/from the address space. + * + * To make things as simple as possible (ie. no refcounting), a VMA's lifetime + * will always be <= an objects lifetime. So object refcounting should cover us. + */ +struct i915_vma { + struct drm_mm_node node; + struct drm_i915_gem_object *obj; + struct i915_address_space *vm; + + /** This object's place on the active/inactive lists */ + struct list_head mm_list; + + struct list_head vma_link; /* Link in the object's VMA list */ + + /** This vma's place in the batchbuffer or on the eviction list */ + struct list_head exec_list; + }; struct i915_ctx_hang_stats { @@ -528,15 +590,48 @@ struct i915_hw_context { struct i915_ctx_hang_stats hang_stats; }; -enum no_fbc_reason { - FBC_NO_OUTPUT, /* no outputs enabled to compress */ - FBC_STOLEN_TOO_SMALL, /* not enough space to hold compressed buffers */ - FBC_UNSUPPORTED_MODE, /* interlace or doublescanned mode */ - FBC_MODE_TOO_LARGE, /* mode too large for compression */ - FBC_BAD_PLANE, /* fbc not supported on plane */ - FBC_NOT_TILED, /* buffer not tiled */ - FBC_MULTIPLE_PIPES, /* more than one pipe active */ - FBC_MODULE_PARAM, +struct i915_fbc { + unsigned long size; + unsigned int fb_id; + enum plane plane; + int y; + + struct drm_mm_node *compressed_fb; + struct drm_mm_node *compressed_llb; + + struct intel_fbc_work { + struct delayed_work work; + struct drm_crtc *crtc; + struct drm_framebuffer *fb; + int interval; + } *fbc_work; + + enum no_fbc_reason { + FBC_OK, /* FBC is enabled */ + FBC_UNSUPPORTED, /* FBC is not supported by this chipset */ + FBC_NO_OUTPUT, /* no outputs enabled to compress */ + FBC_STOLEN_TOO_SMALL, /* not enough space for buffers */ + FBC_UNSUPPORTED_MODE, /* interlace or doublescanned mode */ + FBC_MODE_TOO_LARGE, /* mode too large for compression */ + FBC_BAD_PLANE, /* fbc not supported on plane */ + FBC_NOT_TILED, /* buffer not tiled */ + FBC_MULTIPLE_PIPES, /* more than one pipe active */ + FBC_MODULE_PARAM, + FBC_CHIP_DEFAULT, /* disabled by default on this chip */ + } no_fbc_reason; +}; + +enum no_psr_reason { + PSR_NO_SOURCE, /* Not supported on platform */ + PSR_NO_SINK, /* Not supported by panel */ + PSR_MODULE_PARAM, + PSR_CRTC_NOT_ACTIVE, + PSR_PWR_WELL_ENABLED, + PSR_NOT_TILED, + PSR_SPRITE_ENABLED, + PSR_S3D_ENABLED, + PSR_INTERLACED_ENABLED, + PSR_HSW_NOT_DDIA, }; enum intel_pch { @@ -722,12 +817,12 @@ struct i915_suspend_saved_registers { }; struct intel_gen6_power_mgmt { + /* work and pm_iir are protected by dev_priv->irq_lock */ struct work_struct work; - struct delayed_work vlv_work; u32 pm_iir; - /* lock - irqsave spinlock that protectects the work_struct and - * pm_iir. */ - spinlock_t lock; + + /* On vlv we need to manually drop to Vmin with a delayed work. */ + struct delayed_work vlv_work; /* The below variables an all the rps hw state are protected by * dev->struct mutext. */ @@ -793,6 +888,18 @@ struct i915_dri1_state { uint32_t counter; }; +struct i915_ums_state { + /** + * Flag if the X Server, and thus DRM, is not currently in + * control of the device. + * + * This is set between LeaveVT and EnterVT. It needs to be + * replaced with a semaphore. It also needs to be + * transitioned away from for kernel modesetting. + */ + int mm_suspended; +}; + struct intel_l3_parity { u32 *remap_info; struct work_struct error_work; @@ -801,8 +908,6 @@ struct intel_l3_parity { struct i915_gem_mm { /** Memory allocator for GTT stolen memory */ struct drm_mm stolen; - /** Memory allocator for GTT */ - struct drm_mm gtt_space; /** List of all objects in gtt_space. Used to restore gtt * mappings on resume */ struct list_head bound_list; @@ -816,37 +921,12 @@ struct i915_gem_mm { /** Usable portion of the GTT for GEM */ unsigned long stolen_base; /* limited to low memory (32-bit) */ - int gtt_mtrr; - /** PPGTT used for aliasing the PPGTT with the GTT */ struct i915_hw_ppgtt *aliasing_ppgtt; struct shrinker inactive_shrinker; bool shrinker_no_lock_stealing; - /** - * List of objects currently involved in rendering. - * - * Includes buffers having the contents of their GPU caches - * flushed, not necessarily primitives. last_rendering_seqno - * represents when the rendering involved will be completed. - * - * A reference is held on the buffer while on this list. - */ - struct list_head active_list; - - /** - * LRU list of objects which are not in the ringbuffer and - * are ready to unbind, but are still in the GTT. - * - * last_rendering_seqno is 0 while an object is in this list. - * - * A reference is not held on the buffer while on this list, - * as merely being GTT-bound shouldn't prevent its being - * freed, and we'll pull it off the list in the free path. - */ - struct list_head inactive_list; - /** LRU list of objects with fence regs on them. */ struct list_head fence_list; @@ -865,16 +945,6 @@ struct i915_gem_mm { */ bool interruptible; - /** - * Flag if the X Server, and thus DRM, is not currently in - * control of the device. - * - * This is set between LeaveVT and EnterVT. It needs to be - * replaced with a semaphore. It also needs to be - * transitioned away from for kernel modesetting. - */ - int suspended; - /** Bit 6 swizzling required for X tiling */ uint32_t bit_6_swizzle_x; /** Bit 6 swizzling required for Y tiling */ @@ -884,6 +954,7 @@ struct i915_gem_mm { struct drm_i915_gem_phys_object *phys_objs[I915_MAX_PHYS_OBJECT]; /* accounting, useful for userland debugging */ + spinlock_t object_stat_lock; size_t object_memory; u32 object_count; }; @@ -897,6 +968,11 @@ struct drm_i915_error_state_buf { loff_t pos; }; +struct i915_error_state_file_priv { + struct drm_device *dev; + struct drm_i915_error_state *error; +}; + struct i915_gpu_error { /* For hangcheck timer */ #define DRM_I915_HANGCHECK_PERIOD 1500 /* in ms */ @@ -988,6 +1064,88 @@ struct intel_vbt_data { struct child_device_config *child_dev; }; +enum intel_ddb_partitioning { + INTEL_DDB_PART_1_2, + INTEL_DDB_PART_5_6, /* IVB+ */ +}; + +struct intel_wm_level { + bool enable; + uint32_t pri_val; + uint32_t spr_val; + uint32_t cur_val; + uint32_t fbc_val; +}; + +/* + * This struct tracks the state needed for the Package C8+ feature. + * + * Package states C8 and deeper are really deep PC states that can only be + * reached when all the devices on the system allow it, so even if the graphics + * device allows PC8+, it doesn't mean the system will actually get to these + * states. + * + * Our driver only allows PC8+ when all the outputs are disabled, the power well + * is disabled and the GPU is idle. When these conditions are met, we manually + * do the other conditions: disable the interrupts, clocks and switch LCPLL + * refclk to Fclk. + * + * When we really reach PC8 or deeper states (not just when we allow it) we lose + * the state of some registers, so when we come back from PC8+ we need to + * restore this state. We don't get into PC8+ if we're not in RC6, so we don't + * need to take care of the registers kept by RC6. + * + * The interrupt disabling is part of the requirements. We can only leave the + * PCH HPD interrupts enabled. If we're in PC8+ and we get another interrupt we + * can lock the machine. + * + * Ideally every piece of our code that needs PC8+ disabled would call + * hsw_disable_package_c8, which would increment disable_count and prevent the + * system from reaching PC8+. But we don't have a symmetric way to do this for + * everything, so we have the requirements_met and gpu_idle variables. When we + * switch requirements_met or gpu_idle to true we decrease disable_count, and + * increase it in the opposite case. The requirements_met variable is true when + * all the CRTCs, encoders and the power well are disabled. The gpu_idle + * variable is true when the GPU is idle. + * + * In addition to everything, we only actually enable PC8+ if disable_count + * stays at zero for at least some seconds. This is implemented with the + * enable_work variable. We do this so we don't enable/disable PC8 dozens of + * consecutive times when all screens are disabled and some background app + * queries the state of our connectors, or we have some application constantly + * waking up to use the GPU. Only after the enable_work function actually + * enables PC8+ the "enable" variable will become true, which means that it can + * be false even if disable_count is 0. + * + * The irqs_disabled variable becomes true exactly after we disable the IRQs and + * goes back to false exactly before we reenable the IRQs. We use this variable + * to check if someone is trying to enable/disable IRQs while they're supposed + * to be disabled. This shouldn't happen and we'll print some error messages in + * case it happens, but if it actually happens we'll also update the variables + * inside struct regsave so when we restore the IRQs they will contain the + * latest expected values. + * + * For more, read "Display Sequences for Package C8" on our documentation. + */ +struct i915_package_c8 { + bool requirements_met; + bool gpu_idle; + bool irqs_disabled; + /* Only true after the delayed work task actually enables it. */ + bool enabled; + int disable_count; + struct mutex lock; + struct delayed_work enable_work; + + struct { + uint32_t deimr; + uint32_t sdeimr; + uint32_t gtimr; + uint32_t gtier; + uint32_t gen6_pmimr; + } regsave; +}; + typedef struct drm_i915_private { struct drm_device *dev; struct kmem_cache *slab; @@ -998,14 +1156,7 @@ typedef struct drm_i915_private { void __iomem *regs; - struct drm_i915_gt_funcs gt; - /** gt_fifo_count and the subsequent register write are synchronized - * with dev->struct_mutex. */ - unsigned gt_fifo_count; - /** forcewake_count is protected by gt_lock */ - unsigned forcewake_count; - /** gt_lock is also taken in irq contexts. */ - spinlock_t gt_lock; + struct intel_uncore uncore; struct intel_gmbus gmbus[GMBUS_NUM_PORTS]; @@ -1042,6 +1193,7 @@ typedef struct drm_i915_private { /** Cached value of IMR to avoid reads in updating the bitfield */ u32 irq_mask; u32 gt_irq_mask; + u32 pm_irq_mask; struct work_struct hotplug_work; bool enable_hotplug_processing; @@ -1059,12 +1211,7 @@ typedef struct drm_i915_private { int num_plane; - unsigned long cfb_size; - unsigned int cfb_fb; - enum plane cfb_plane; - int cfb_y; - struct intel_fbc_work *fbc_work; - + struct i915_fbc fbc; struct intel_opregion opregion; struct intel_vbt_data vbt; @@ -1081,8 +1228,6 @@ typedef struct drm_i915_private { } backlight; /* LVDS info */ - struct drm_display_mode *lfp_lvds_vbt_mode; /* if any */ - struct drm_display_mode *sdvo_lvds_vbt_mode; /* if any */ bool no_aux_handshake; struct drm_i915_fence_reg fence_regs[I915_MAX_NUM_FENCES]; /* assume 965 */ @@ -1105,7 +1250,8 @@ typedef struct drm_i915_private { enum modeset_restore modeset_restore; struct mutex modeset_restore_lock; - struct i915_gtt gtt; + struct list_head vm_list; /* Global list of all address spaces */ + struct i915_gtt gtt; /* VMA representing the global address space */ struct i915_gem_mm mm; @@ -1132,6 +1278,9 @@ typedef struct drm_i915_private { struct intel_l3_parity l3_parity; + /* Cannot be determined by PCIID. You must always read a register. */ + size_t ellc_size; + /* gen6+ rps state */ struct intel_gen6_power_mgmt rps; @@ -1142,10 +1291,7 @@ typedef struct drm_i915_private { /* Haswell power well */ struct i915_power_well power_well; - enum no_fbc_reason no_fbc_reason; - - struct drm_mm_node *compressed_fb; - struct drm_mm_node *compressed_llb; + enum no_psr_reason no_psr_reason; struct i915_gpu_error gpu_error; @@ -1170,11 +1316,34 @@ typedef struct drm_i915_private { struct i915_suspend_saved_registers regfile; + struct { + /* + * Raw watermark latency values: + * in 0.1us units for WM0, + * in 0.5us units for WM1+. + */ + /* primary */ + uint16_t pri_latency[5]; + /* sprite */ + uint16_t spr_latency[5]; + /* cursor */ + uint16_t cur_latency[5]; + } wm; + + struct i915_package_c8 pc8; + /* Old dri1 support infrastructure, beware the dragons ya fools entering * here! */ struct i915_dri1_state dri1; + /* Old ums support infrastructure, same warning applies. */ + struct i915_ums_state ums; } drm_i915_private_t; +static inline struct drm_i915_private *to_i915(const struct drm_device *dev) +{ + return dev->dev_private; +} + /* Iterate over initialised rings */ #define for_each_ring(ring__, dev_priv__, i__) \ for ((i__) = 0; (i__) < I915_NUM_RINGS; (i__)++) \ @@ -1187,7 +1356,7 @@ enum hdmi_force_audio { HDMI_AUDIO_ON, /* force turn on HDMI audio */ }; -#define I915_GTT_RESERVED ((struct drm_mm_node *)0x1) +#define I915_GTT_OFFSET_NONE ((u32)-1) struct drm_i915_gem_object_ops { /* Interface between the GEM object and its backing storage. @@ -1212,15 +1381,16 @@ struct drm_i915_gem_object { const struct drm_i915_gem_object_ops *ops; - /** Current space allocated to this object in the GTT, if any. */ - struct drm_mm_node *gtt_space; + /** List of VMAs backed by this object */ + struct list_head vma_list; + /** Stolen memory for this object, instead of being backed by shmem. */ struct drm_mm_node *stolen; struct list_head global_list; - /** This object's place on the active/inactive lists */ struct list_head ring_list; - struct list_head mm_list; + /** Used in execbuf to temporarily hold a ref */ + struct list_head obj_exec_link; /** This object's place in the batchbuffer or on the eviction list */ struct list_head exec_list; @@ -1287,6 +1457,7 @@ struct drm_i915_gem_object { */ unsigned int fault_mappable:1; unsigned int pin_mappable:1; + unsigned int pin_display:1; /* * Is the GPU currently using a fence to access this buffer, @@ -1294,7 +1465,7 @@ struct drm_i915_gem_object { unsigned int pending_fenced_gpu_access:1; unsigned int fenced_gpu_access:1; - unsigned int cache_level:2; + unsigned int cache_level:3; unsigned int has_aliasing_ppgtt_mapping:1; unsigned int has_global_gtt_mapping:1; @@ -1314,13 +1485,6 @@ struct drm_i915_gem_object { unsigned long exec_handle; struct drm_i915_gem_exec_object2 *exec_entry; - /** - * Current offset of the object in GTT space. - * - * This is the same as gtt_space->start - */ - uint32_t gtt_offset; - struct intel_ring_buffer *ring; /** Breadcrumb of last rendering to the buffer. */ @@ -1396,7 +1560,7 @@ struct drm_i915_file_private { struct i915_ctx_hang_stats hang_stats; }; -#define INTEL_INFO(dev) (((struct drm_i915_private *) (dev)->dev_private)->info) +#define INTEL_INFO(dev) (to_i915(dev)->info) #define IS_I830(dev) ((dev)->pci_device == 0x3577) #define IS_845G(dev) ((dev)->pci_device == 0x2562) @@ -1414,7 +1578,6 @@ struct drm_i915_file_private { #define IS_PINEVIEW_M(dev) ((dev)->pci_device == 0xa011) #define IS_PINEVIEW(dev) (INTEL_INFO(dev)->is_pineview) #define IS_G33(dev) (INTEL_INFO(dev)->is_g33) -#define IS_IRONLAKE_D(dev) ((dev)->pci_device == 0x0042) #define IS_IRONLAKE_M(dev) ((dev)->pci_device == 0x0046) #define IS_IVYBRIDGE(dev) (INTEL_INFO(dev)->is_ivybridge) #define IS_IVB_GT1(dev) ((dev)->pci_device == 0x0156 || \ @@ -1426,6 +1589,8 @@ struct drm_i915_file_private { #define IS_VALLEYVIEW(dev) (INTEL_INFO(dev)->is_valleyview) #define IS_HASWELL(dev) (INTEL_INFO(dev)->is_haswell) #define IS_MOBILE(dev) (INTEL_INFO(dev)->is_mobile) +#define IS_HSW_EARLY_SDV(dev) (IS_HASWELL(dev) && \ + ((dev)->pci_device & 0xFF00) == 0x0C00) #define IS_ULT(dev) (IS_HASWELL(dev) && \ ((dev)->pci_device & 0xFF00) == 0x0A00) @@ -1446,6 +1611,7 @@ struct drm_i915_file_private { #define HAS_BLT(dev) (INTEL_INFO(dev)->has_blt_ring) #define HAS_VEBOX(dev) (INTEL_INFO(dev)->has_vebox_ring) #define HAS_LLC(dev) (INTEL_INFO(dev)->has_llc) +#define HAS_WT(dev) (IS_HASWELL(dev) && to_i915(dev)->ellc_size) #define I915_NEED_GFX_HWS(dev) (INTEL_INFO(dev)->need_gfx_hws) #define HAS_HW_CONTEXTS(dev) (INTEL_INFO(dev)->gen >= 6) @@ -1468,8 +1634,6 @@ struct drm_i915_file_private { #define SUPPORTS_EDP(dev) (IS_IRONLAKE_M(dev)) #define SUPPORTS_TV(dev) (INTEL_INFO(dev)->supports_tv) #define I915_HAS_HOTPLUG(dev) (INTEL_INFO(dev)->has_hotplug) -/* dsparb controlled by hw only */ -#define DSPARB_HWCONTROL(dev) (IS_G4X(dev) || IS_IRONLAKE(dev)) #define HAS_FW_BLC(dev) (INTEL_INFO(dev)->gen > 2) #define HAS_PIPE_CXSR(dev) (INTEL_INFO(dev)->has_pipe_cxsr) @@ -1477,8 +1641,6 @@ struct drm_i915_file_private { #define HAS_IPS(dev) (IS_ULT(dev)) -#define HAS_PIPE_CONTROL(dev) (INTEL_INFO(dev)->gen >= 5) - #define HAS_DDI(dev) (INTEL_INFO(dev)->has_ddi) #define HAS_POWER_WELL(dev) (IS_HASWELL(dev)) #define HAS_FPGA_DBG_UNCLAIMED(dev) (INTEL_INFO(dev)->has_fpga_dbg) @@ -1490,7 +1652,7 @@ struct drm_i915_file_private { #define INTEL_PCH_LPT_DEVICE_ID_TYPE 0x8c00 #define INTEL_PCH_LPT_LP_DEVICE_ID_TYPE 0x9c00 -#define INTEL_PCH_TYPE(dev) (((struct drm_i915_private *)(dev)->dev_private)->pch_type) +#define INTEL_PCH_TYPE(dev) (to_i915(dev)->pch_type) #define HAS_PCH_LPT(dev) (INTEL_PCH_TYPE(dev) == PCH_LPT) #define HAS_PCH_CPT(dev) (INTEL_PCH_TYPE(dev) == PCH_CPT) #define HAS_PCH_IBX(dev) (INTEL_PCH_TYPE(dev) == PCH_IBX) @@ -1526,7 +1688,7 @@ struct drm_i915_file_private { #define INTEL_RC6p_ENABLE (1<<1) #define INTEL_RC6pp_ENABLE (1<<2) -extern struct drm_ioctl_desc i915_ioctls[]; +extern const struct drm_ioctl_desc i915_ioctls[]; extern int i915_max_ioctl; extern unsigned int i915_fbpercrtc __always_unused; extern int i915_panel_ignore_lid __read_mostly; @@ -1540,9 +1702,14 @@ extern int i915_enable_rc6 __read_mostly; extern int i915_enable_fbc __read_mostly; extern bool i915_enable_hangcheck __read_mostly; extern int i915_enable_ppgtt __read_mostly; +extern int i915_enable_psr __read_mostly; extern unsigned int i915_preliminary_hw_support __read_mostly; extern int i915_disable_power_well __read_mostly; extern int i915_enable_ips __read_mostly; +extern bool i915_fastboot __read_mostly; +extern int i915_enable_pc8 __read_mostly; +extern int i915_pc8_timeout __read_mostly; +extern bool i915_prefault_disable __read_mostly; extern int i915_suspend(struct drm_device *dev, pm_message_t state); extern int i915_resume(struct drm_device *dev); @@ -1578,16 +1745,19 @@ extern void i915_update_gfx_val(struct drm_i915_private *dev_priv); extern void intel_console_resume(struct work_struct *work); /* i915_irq.c */ -void i915_hangcheck_elapsed(unsigned long data); +void i915_queue_hangcheck(struct drm_device *dev); void i915_handle_error(struct drm_device *dev, bool wedged); extern void intel_irq_init(struct drm_device *dev); extern void intel_pm_init(struct drm_device *dev); extern void intel_hpd_init(struct drm_device *dev); -extern void intel_gt_init(struct drm_device *dev); -extern void intel_gt_sanitize(struct drm_device *dev); +extern void intel_pm_init(struct drm_device *dev); -void i915_error_state_free(struct kref *error_ref); +extern void intel_uncore_sanitize(struct drm_device *dev); +extern void intel_uncore_early_sanitize(struct drm_device *dev); +extern void intel_uncore_init(struct drm_device *dev); +extern void intel_uncore_clear_errors(struct drm_device *dev); +extern void intel_uncore_check_errors(struct drm_device *dev); void i915_enable_pipestat(drm_i915_private_t *dev_priv, int pipe, u32 mask); @@ -1595,13 +1765,6 @@ i915_enable_pipestat(drm_i915_private_t *dev_priv, int pipe, u32 mask); void i915_disable_pipestat(drm_i915_private_t *dev_priv, int pipe, u32 mask); -#ifdef CONFIG_DEBUG_FS -extern void i915_destroy_error_state(struct drm_device *dev); -#else -#define i915_destroy_error_state(x) -#endif - - /* i915_gem.c */ int i915_gem_init_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv); @@ -1658,13 +1821,18 @@ void i915_gem_object_init(struct drm_i915_gem_object *obj, struct drm_i915_gem_object *i915_gem_alloc_object(struct drm_device *dev, size_t size); void i915_gem_free_object(struct drm_gem_object *obj); +struct i915_vma *i915_gem_vma_create(struct drm_i915_gem_object *obj, + struct i915_address_space *vm); +void i915_gem_vma_destroy(struct i915_vma *vma); int __must_check i915_gem_object_pin(struct drm_i915_gem_object *obj, + struct i915_address_space *vm, uint32_t alignment, bool map_and_fenceable, bool nonblocking); void i915_gem_object_unpin(struct drm_i915_gem_object *obj); -int __must_check i915_gem_object_unbind(struct drm_i915_gem_object *obj); +int __must_check i915_vma_unbind(struct i915_vma *vma); +int __must_check i915_gem_object_ggtt_unbind(struct drm_i915_gem_object *obj); int i915_gem_object_put_pages(struct drm_i915_gem_object *obj); void i915_gem_release_mmap(struct drm_i915_gem_object *obj); void i915_gem_lastclose(struct drm_device *dev); @@ -1701,8 +1869,6 @@ int i915_gem_dumb_create(struct drm_file *file_priv, struct drm_mode_create_dumb *args); int i915_gem_mmap_gtt(struct drm_file *file_priv, struct drm_device *dev, uint32_t handle, uint64_t *offset); -int i915_gem_dumb_destroy(struct drm_file *file_priv, struct drm_device *dev, - uint32_t handle); /** * Returns true if seq1 is later than seq2. */ @@ -1754,10 +1920,7 @@ static inline bool i915_terminally_wedged(struct i915_gpu_error *error) } void i915_gem_reset(struct drm_device *dev); -void i915_gem_clflush_object(struct drm_i915_gem_object *obj); -int __must_check i915_gem_object_set_domain(struct drm_i915_gem_object *obj, - uint32_t read_domains, - uint32_t write_domain); +bool i915_gem_clflush_object(struct drm_i915_gem_object *obj, bool force); int __must_check i915_gem_object_finish_gpu(struct drm_i915_gem_object *obj); int __must_check i915_gem_init(struct drm_device *dev); int __must_check i915_gem_init_hw(struct drm_device *dev); @@ -1784,6 +1947,7 @@ int __must_check i915_gem_object_pin_to_display_plane(struct drm_i915_gem_object *obj, u32 alignment, struct intel_ring_buffer *pipelined); +void i915_gem_object_unpin_from_display_plane(struct drm_i915_gem_object *obj); int i915_gem_attach_phys_object(struct drm_device *dev, struct drm_i915_gem_object *obj, int id, @@ -1810,6 +1974,56 @@ struct dma_buf *i915_gem_prime_export(struct drm_device *dev, void i915_gem_restore_fences(struct drm_device *dev); +unsigned long i915_gem_obj_offset(struct drm_i915_gem_object *o, + struct i915_address_space *vm); +bool i915_gem_obj_bound_any(struct drm_i915_gem_object *o); +bool i915_gem_obj_bound(struct drm_i915_gem_object *o, + struct i915_address_space *vm); +unsigned long i915_gem_obj_size(struct drm_i915_gem_object *o, + struct i915_address_space *vm); +struct i915_vma *i915_gem_obj_to_vma(struct drm_i915_gem_object *obj, + struct i915_address_space *vm); +struct i915_vma * +i915_gem_obj_lookup_or_create_vma(struct drm_i915_gem_object *obj, + struct i915_address_space *vm); +/* Some GGTT VM helpers */ +#define obj_to_ggtt(obj) \ + (&((struct drm_i915_private *)(obj)->base.dev->dev_private)->gtt.base) +static inline bool i915_is_ggtt(struct i915_address_space *vm) +{ + struct i915_address_space *ggtt = + &((struct drm_i915_private *)(vm)->dev->dev_private)->gtt.base; + return vm == ggtt; +} + +static inline bool i915_gem_obj_ggtt_bound(struct drm_i915_gem_object *obj) +{ + return i915_gem_obj_bound(obj, obj_to_ggtt(obj)); +} + +static inline unsigned long +i915_gem_obj_ggtt_offset(struct drm_i915_gem_object *obj) +{ + return i915_gem_obj_offset(obj, obj_to_ggtt(obj)); +} + +static inline unsigned long +i915_gem_obj_ggtt_size(struct drm_i915_gem_object *obj) +{ + return i915_gem_obj_size(obj, obj_to_ggtt(obj)); +} + +static inline int __must_check +i915_gem_obj_ggtt_pin(struct drm_i915_gem_object *obj, + uint32_t alignment, + bool map_and_fenceable, + bool nonblocking) +{ + return i915_gem_object_pin(obj, obj_to_ggtt(obj), alignment, + map_and_fenceable, nonblocking); +} +#undef obj_to_ggtt + /* i915_gem_context.c */ void i915_gem_context_init(struct drm_device *dev); void i915_gem_context_fini(struct drm_device *dev); @@ -1828,7 +2042,7 @@ static inline void i915_gem_context_unreference(struct i915_hw_context *ctx) } struct i915_ctx_hang_stats * __must_check -i915_gem_context_get_hang_stats(struct intel_ring_buffer *ring, +i915_gem_context_get_hang_stats(struct drm_device *dev, struct drm_file *file, u32 id); int i915_gem_context_create_ioctl(struct drm_device *dev, void *data, @@ -1862,7 +2076,9 @@ static inline void i915_gem_chipset_flush(struct drm_device *dev) /* i915_gem_evict.c */ -int __must_check i915_gem_evict_something(struct drm_device *dev, int min_size, +int __must_check i915_gem_evict_something(struct drm_device *dev, + struct i915_address_space *vm, + int min_size, unsigned alignment, unsigned cache_level, bool mappable, @@ -1884,7 +2100,7 @@ i915_gem_object_create_stolen_for_preallocated(struct drm_device *dev, void i915_gem_object_release_stolen(struct drm_i915_gem_object *obj); /* i915_gem_tiling.c */ -inline static bool i915_gem_object_needs_bit17_swizzle(struct drm_i915_gem_object *obj) +static inline bool i915_gem_object_needs_bit17_swizzle(struct drm_i915_gem_object *obj) { drm_i915_private_t *dev_priv = obj->base.dev->dev_private; @@ -1897,23 +2113,36 @@ void i915_gem_object_do_bit_17_swizzle(struct drm_i915_gem_object *obj); void i915_gem_object_save_bit_17_swizzle(struct drm_i915_gem_object *obj); /* i915_gem_debug.c */ -void i915_gem_dump_object(struct drm_i915_gem_object *obj, int len, - const char *where, uint32_t mark); #if WATCH_LISTS int i915_verify_lists(struct drm_device *dev); #else #define i915_verify_lists(dev) 0 #endif -void i915_gem_object_check_coherency(struct drm_i915_gem_object *obj, - int handle); -void i915_gem_dump_object(struct drm_i915_gem_object *obj, int len, - const char *where, uint32_t mark); /* i915_debugfs.c */ int i915_debugfs_init(struct drm_minor *minor); void i915_debugfs_cleanup(struct drm_minor *minor); + +/* i915_gpu_error.c */ __printf(2, 3) void i915_error_printf(struct drm_i915_error_state_buf *e, const char *f, ...); +int i915_error_state_to_str(struct drm_i915_error_state_buf *estr, + const struct i915_error_state_file_priv *error); +int i915_error_state_buf_init(struct drm_i915_error_state_buf *eb, + size_t count, loff_t pos); +static inline void i915_error_state_buf_release( + struct drm_i915_error_state_buf *eb) +{ + kfree(eb->buf); +} +void i915_capture_error_state(struct drm_device *dev); +void i915_error_state_get(struct drm_device *dev, + struct i915_error_state_file_priv *error_priv); +void i915_error_state_put(struct i915_error_state_file_priv *error_priv); +void i915_destroy_error_state(struct drm_device *dev); + +void i915_get_extra_instdone(struct drm_device *dev, uint32_t *instdone); +const char *i915_cache_level_str(int type); /* i915_suspend.c */ extern int i915_save_state(struct drm_device *dev); @@ -1993,7 +2222,6 @@ int i915_reg_read_ioctl(struct drm_device *dev, void *data, struct drm_file *file); /* overlay */ -#ifdef CONFIG_DEBUG_FS extern struct intel_overlay_error_state *intel_overlay_capture_error_state(struct drm_device *dev); extern void intel_overlay_print_error_state(struct drm_i915_error_state_buf *e, struct intel_overlay_error_state *error); @@ -2002,7 +2230,6 @@ extern struct intel_display_error_state *intel_display_capture_error_state(struc extern void intel_display_print_error_state(struct drm_i915_error_state_buf *e, struct drm_device *dev, struct intel_display_error_state *error); -#endif /* On SNB platform, before reading ring registers forcewake bit * must be set to prevent GT core from power down and stale values being @@ -2010,7 +2237,6 @@ extern void intel_display_print_error_state(struct drm_i915_error_state_buf *e, */ void gen6_gt_force_wake_get(struct drm_i915_private *dev_priv); void gen6_gt_force_wake_put(struct drm_i915_private *dev_priv); -int __gen6_gt_wait_for_fifo(struct drm_i915_private *dev_priv); int sandybridge_pcode_read(struct drm_i915_private *dev_priv, u8 mbox, u32 *val); int sandybridge_pcode_write(struct drm_i915_private *dev_priv, u8 mbox, u32 val); @@ -2029,39 +2255,37 @@ void intel_sbi_write(struct drm_i915_private *dev_priv, u16 reg, u32 value, int vlv_gpu_freq(int ddr_freq, int val); int vlv_freq_opcode(int ddr_freq, int val); -#define __i915_read(x, y) \ - u##x i915_read##x(struct drm_i915_private *dev_priv, u32 reg); - -__i915_read(8, b) -__i915_read(16, w) -__i915_read(32, l) -__i915_read(64, q) +#define __i915_read(x) \ + u##x i915_read##x(struct drm_i915_private *dev_priv, u32 reg, bool trace); +__i915_read(8) +__i915_read(16) +__i915_read(32) +__i915_read(64) #undef __i915_read -#define __i915_write(x, y) \ - void i915_write##x(struct drm_i915_private *dev_priv, u32 reg, u##x val); - -__i915_write(8, b) -__i915_write(16, w) -__i915_write(32, l) -__i915_write(64, q) +#define __i915_write(x) \ + void i915_write##x(struct drm_i915_private *dev_priv, u32 reg, u##x val, bool trace); +__i915_write(8) +__i915_write(16) +__i915_write(32) +__i915_write(64) #undef __i915_write -#define I915_READ8(reg) i915_read8(dev_priv, (reg)) -#define I915_WRITE8(reg, val) i915_write8(dev_priv, (reg), (val)) +#define I915_READ8(reg) i915_read8(dev_priv, (reg), true) +#define I915_WRITE8(reg, val) i915_write8(dev_priv, (reg), (val), true) -#define I915_READ16(reg) i915_read16(dev_priv, (reg)) -#define I915_WRITE16(reg, val) i915_write16(dev_priv, (reg), (val)) -#define I915_READ16_NOTRACE(reg) readw(dev_priv->regs + (reg)) -#define I915_WRITE16_NOTRACE(reg, val) writew(val, dev_priv->regs + (reg)) +#define I915_READ16(reg) i915_read16(dev_priv, (reg), true) +#define I915_WRITE16(reg, val) i915_write16(dev_priv, (reg), (val), true) +#define I915_READ16_NOTRACE(reg) i915_read16(dev_priv, (reg), false) +#define I915_WRITE16_NOTRACE(reg, val) i915_write16(dev_priv, (reg), (val), false) -#define I915_READ(reg) i915_read32(dev_priv, (reg)) -#define I915_WRITE(reg, val) i915_write32(dev_priv, (reg), (val)) -#define I915_READ_NOTRACE(reg) readl(dev_priv->regs + (reg)) -#define I915_WRITE_NOTRACE(reg, val) writel(val, dev_priv->regs + (reg)) +#define I915_READ(reg) i915_read32(dev_priv, (reg), true) +#define I915_WRITE(reg, val) i915_write32(dev_priv, (reg), (val), true) +#define I915_READ_NOTRACE(reg) i915_read32(dev_priv, (reg), false) +#define I915_WRITE_NOTRACE(reg, val) i915_write32(dev_priv, (reg), (val), false) -#define I915_WRITE64(reg, val) i915_write64(dev_priv, (reg), (val)) -#define I915_READ64(reg) i915_read64(dev_priv, (reg)) +#define I915_WRITE64(reg, val) i915_write64(dev_priv, (reg), (val), true) +#define I915_READ64(reg) i915_read64(dev_priv, (reg), true) #define POSTING_READ(reg) (void)I915_READ_NOTRACE(reg) #define POSTING_READ16(reg) (void)I915_READ16_NOTRACE(reg) diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index d9e2208cfe98..2d1cb10d846f 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c @@ -26,6 +26,7 @@ */ #include <drm/drmP.h> +#include <drm/drm_vma_manager.h> #include <drm/i915_drm.h> #include "i915_drv.h" #include "i915_trace.h" @@ -37,11 +38,14 @@ #include <linux/dma-buf.h> static void i915_gem_object_flush_gtt_write_domain(struct drm_i915_gem_object *obj); -static void i915_gem_object_flush_cpu_write_domain(struct drm_i915_gem_object *obj); -static __must_check int i915_gem_object_bind_to_gtt(struct drm_i915_gem_object *obj, - unsigned alignment, - bool map_and_fenceable, - bool nonblocking); +static void i915_gem_object_flush_cpu_write_domain(struct drm_i915_gem_object *obj, + bool force); +static __must_check int +i915_gem_object_bind_to_vm(struct drm_i915_gem_object *obj, + struct i915_address_space *vm, + unsigned alignment, + bool map_and_fenceable, + bool nonblocking); static int i915_gem_phys_pwrite(struct drm_device *dev, struct drm_i915_gem_object *obj, struct drm_i915_gem_pwrite *args, @@ -59,6 +63,20 @@ static long i915_gem_purge(struct drm_i915_private *dev_priv, long target); static void i915_gem_shrink_all(struct drm_i915_private *dev_priv); static void i915_gem_object_truncate(struct drm_i915_gem_object *obj); +static bool cpu_cache_is_coherent(struct drm_device *dev, + enum i915_cache_level level) +{ + return HAS_LLC(dev) || level != I915_CACHE_NONE; +} + +static bool cpu_write_needs_clflush(struct drm_i915_gem_object *obj) +{ + if (!cpu_cache_is_coherent(obj->base.dev, obj->cache_level)) + return true; + + return obj->pin_display; +} + static inline void i915_gem_object_fence_lost(struct drm_i915_gem_object *obj) { if (obj->tiling_mode) @@ -75,15 +93,19 @@ static inline void i915_gem_object_fence_lost(struct drm_i915_gem_object *obj) static void i915_gem_info_add_obj(struct drm_i915_private *dev_priv, size_t size) { + spin_lock(&dev_priv->mm.object_stat_lock); dev_priv->mm.object_count++; dev_priv->mm.object_memory += size; + spin_unlock(&dev_priv->mm.object_stat_lock); } static void i915_gem_info_remove_obj(struct drm_i915_private *dev_priv, size_t size) { + spin_lock(&dev_priv->mm.object_stat_lock); dev_priv->mm.object_count--; dev_priv->mm.object_memory -= size; + spin_unlock(&dev_priv->mm.object_stat_lock); } static int @@ -135,7 +157,7 @@ int i915_mutex_lock_interruptible(struct drm_device *dev) static inline bool i915_gem_object_is_inactive(struct drm_i915_gem_object *obj) { - return obj->gtt_space && !obj->active; + return i915_gem_obj_bound_any(obj) && !obj->active; } int @@ -178,10 +200,10 @@ i915_gem_get_aperture_ioctl(struct drm_device *dev, void *data, mutex_lock(&dev->struct_mutex); list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) if (obj->pin_count) - pinned += obj->gtt_space->size; + pinned += i915_gem_obj_ggtt_size(obj); mutex_unlock(&dev->struct_mutex); - args->aper_size = dev_priv->gtt.total; + args->aper_size = dev_priv->gtt.base.total; args->aper_available_size = args->aper_size - pinned; return 0; @@ -219,16 +241,10 @@ i915_gem_create(struct drm_file *file, return -ENOMEM; ret = drm_gem_handle_create(file, &obj->base, &handle); - if (ret) { - drm_gem_object_release(&obj->base); - i915_gem_info_remove_obj(dev->dev_private, obj->base.size); - i915_gem_object_free(obj); - return ret; - } - /* drop reference from allocate - handle holds it now */ - drm_gem_object_unreference(&obj->base); - trace_i915_gem_object_create(obj); + drm_gem_object_unreference_unlocked(&obj->base); + if (ret) + return ret; *handle_p = handle; return 0; @@ -246,13 +262,6 @@ i915_gem_dumb_create(struct drm_file *file, args->size, &args->handle); } -int i915_gem_dumb_destroy(struct drm_file *file, - struct drm_device *dev, - uint32_t handle) -{ - return drm_gem_handle_delete(file, handle); -} - /** * Creates a new mm object and returns a handle to it. */ @@ -420,9 +429,8 @@ i915_gem_shmem_pread(struct drm_device *dev, * read domain and manually flush cachelines (if required). This * optimizes for the case when the gpu will dirty the data * anyway again before the next pread happens. */ - if (obj->cache_level == I915_CACHE_NONE) - needs_clflush = 1; - if (obj->gtt_space) { + needs_clflush = !cpu_cache_is_coherent(dev, obj->cache_level); + if (i915_gem_obj_bound_any(obj)) { ret = i915_gem_object_set_to_gtt_domain(obj, false); if (ret) return ret; @@ -465,7 +473,7 @@ i915_gem_shmem_pread(struct drm_device *dev, mutex_unlock(&dev->struct_mutex); - if (!prefaulted) { + if (likely(!i915_prefault_disable) && !prefaulted) { ret = fault_in_multipages_writeable(user_data, remain); /* Userspace is tricking us, but we've already clobbered * its pages with the prefault and promised to write the @@ -594,7 +602,7 @@ i915_gem_gtt_pwrite_fast(struct drm_device *dev, char __user *user_data; int page_offset, page_length, ret; - ret = i915_gem_object_pin(obj, 0, true, true); + ret = i915_gem_obj_ggtt_pin(obj, 0, true, true); if (ret) goto out; @@ -609,7 +617,7 @@ i915_gem_gtt_pwrite_fast(struct drm_device *dev, user_data = to_user_ptr(args->data_ptr); remain = args->size; - offset = obj->gtt_offset + args->offset; + offset = i915_gem_obj_ggtt_offset(obj) + args->offset; while (remain > 0) { /* Operation in this page @@ -737,19 +745,18 @@ i915_gem_shmem_pwrite(struct drm_device *dev, * write domain and manually flush cachelines (if required). This * optimizes for the case when the gpu will use the data * right away and we therefore have to clflush anyway. */ - if (obj->cache_level == I915_CACHE_NONE) - needs_clflush_after = 1; - if (obj->gtt_space) { + needs_clflush_after = cpu_write_needs_clflush(obj); + if (i915_gem_obj_bound_any(obj)) { ret = i915_gem_object_set_to_gtt_domain(obj, true); if (ret) return ret; } } - /* Same trick applies for invalidate partially written cachelines before - * writing. */ - if (!(obj->base.read_domains & I915_GEM_DOMAIN_CPU) - && obj->cache_level == I915_CACHE_NONE) - needs_clflush_before = 1; + /* Same trick applies to invalidate partially written cachelines read + * before writing. */ + if ((obj->base.read_domains & I915_GEM_DOMAIN_CPU) == 0) + needs_clflush_before = + !cpu_cache_is_coherent(dev, obj->cache_level); ret = i915_gem_object_get_pages(obj); if (ret) @@ -828,8 +835,8 @@ out: */ if (!needs_clflush_after && obj->base.write_domain != I915_GEM_DOMAIN_CPU) { - i915_gem_clflush_object(obj); - i915_gem_chipset_flush(dev); + if (i915_gem_clflush_object(obj, obj->pin_display)) + i915_gem_chipset_flush(dev); } } @@ -860,10 +867,12 @@ i915_gem_pwrite_ioctl(struct drm_device *dev, void *data, args->size)) return -EFAULT; - ret = fault_in_multipages_readable(to_user_ptr(args->data_ptr), - args->size); - if (ret) - return -EFAULT; + if (likely(!i915_prefault_disable)) { + ret = fault_in_multipages_readable(to_user_ptr(args->data_ptr), + args->size); + if (ret) + return -EFAULT; + } ret = i915_mutex_lock_interruptible(dev); if (ret) @@ -904,9 +913,9 @@ i915_gem_pwrite_ioctl(struct drm_device *dev, void *data, goto out; } - if (obj->cache_level == I915_CACHE_NONE && - obj->tiling_mode == I915_TILING_NONE && - obj->base.write_domain != I915_GEM_DOMAIN_CPU) { + if (obj->tiling_mode == I915_TILING_NONE && + obj->base.write_domain != I915_GEM_DOMAIN_CPU && + cpu_write_needs_clflush(obj)) { ret = i915_gem_gtt_pwrite_fast(dev, obj, args, file); /* Note that the gtt paths might fail with non-page-backed user * pointers (e.g. gtt mappings when moving data between @@ -990,6 +999,8 @@ static int __wait_seqno(struct intel_ring_buffer *ring, u32 seqno, bool wait_forever = true; int ret; + WARN(dev_priv->pc8.irqs_disabled, "IRQs disabled\n"); + if (i915_seqno_passed(ring->get_seqno(ring, true), seqno)) return 0; @@ -1255,8 +1266,8 @@ i915_gem_sw_finish_ioctl(struct drm_device *dev, void *data, } /* Pinned buffers may be scanout, so flush the cache */ - if (obj->pin_count) - i915_gem_object_flush_cpu_write_domain(obj); + if (obj->pin_display) + i915_gem_object_flush_cpu_write_domain(obj, true); drm_gem_object_unreference(&obj->base); unlock: @@ -1346,7 +1357,7 @@ int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf) } /* Now bind it into the GTT if needed */ - ret = i915_gem_object_pin(obj, 0, true, false); + ret = i915_gem_obj_ggtt_pin(obj, 0, true, false); if (ret) goto unlock; @@ -1360,8 +1371,9 @@ int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf) obj->fault_mappable = true; - pfn = ((dev_priv->gtt.mappable_base + obj->gtt_offset) >> PAGE_SHIFT) + - page_offset; + pfn = dev_priv->gtt.mappable_base + i915_gem_obj_ggtt_offset(obj); + pfn >>= PAGE_SHIFT; + pfn += page_offset; /* Finally, remap it using the new GTT offset */ ret = vm_insert_pfn(vma, (unsigned long)vmf->virtual_address, pfn); @@ -1425,11 +1437,7 @@ i915_gem_release_mmap(struct drm_i915_gem_object *obj) if (!obj->fault_mappable) return; - if (obj->base.dev->dev_mapping) - unmap_mapping_range(obj->base.dev->dev_mapping, - (loff_t)obj->base.map_list.hash.key<<PAGE_SHIFT, - obj->base.size, 1); - + drm_vma_node_unmap(&obj->base.vma_node, obj->base.dev->dev_mapping); obj->fault_mappable = false; } @@ -1485,7 +1493,7 @@ static int i915_gem_object_create_mmap_offset(struct drm_i915_gem_object *obj) struct drm_i915_private *dev_priv = obj->base.dev->dev_private; int ret; - if (obj->base.map_list.map) + if (drm_vma_node_has_offset(&obj->base.vma_node)) return 0; dev_priv->mm.shrinker_no_lock_stealing = true; @@ -1516,9 +1524,6 @@ out: static void i915_gem_object_free_mmap_offset(struct drm_i915_gem_object *obj) { - if (!obj->base.map_list.map) - return; - drm_gem_free_mmap_offset(&obj->base); } @@ -1557,7 +1562,7 @@ i915_gem_mmap_gtt(struct drm_file *file, if (ret) goto out; - *offset = (u64)obj->base.map_list.hash.key << PAGE_SHIFT; + *offset = drm_vma_node_offset_addr(&obj->base.vma_node); out: drm_gem_object_unreference(&obj->base); @@ -1632,7 +1637,7 @@ i915_gem_object_put_pages_gtt(struct drm_i915_gem_object *obj) * hope for the best. */ WARN_ON(ret != -EIO); - i915_gem_clflush_object(obj); + i915_gem_clflush_object(obj, true); obj->base.read_domains = obj->base.write_domain = I915_GEM_DOMAIN_CPU; } @@ -1667,11 +1672,11 @@ i915_gem_object_put_pages(struct drm_i915_gem_object *obj) if (obj->pages == NULL) return 0; - BUG_ON(obj->gtt_space); - if (obj->pages_pin_count) return -EBUSY; + BUG_ON(i915_gem_obj_bound_any(obj)); + /* ->put_pages might need to allocate memory for the bit17 swizzle * array, hence protect them from being reaped by removing them from gtt * lists early. */ @@ -1704,12 +1709,18 @@ __i915_gem_shrink(struct drm_i915_private *dev_priv, long target, } } - list_for_each_entry_safe(obj, next, - &dev_priv->mm.inactive_list, - mm_list) { - if ((i915_gem_object_is_purgeable(obj) || !purgeable_only) && - i915_gem_object_unbind(obj) == 0 && - i915_gem_object_put_pages(obj) == 0) { + list_for_each_entry_safe(obj, next, &dev_priv->mm.bound_list, + global_list) { + struct i915_vma *vma, *v; + + if (!i915_gem_object_is_purgeable(obj) && purgeable_only) + continue; + + list_for_each_entry_safe(vma, v, &obj->vma_list, vma_link) + if (i915_vma_unbind(vma)) + break; + + if (!i915_gem_object_put_pages(obj)) { count += obj->base.size >> PAGE_SHIFT; if (count >= target) return count; @@ -1892,8 +1903,6 @@ i915_gem_object_move_to_active(struct drm_i915_gem_object *obj, obj->active = 1; } - /* Move from whatever list we were on to the tail of execution. */ - list_move_tail(&obj->mm_list, &dev_priv->mm.active_list); list_move_tail(&obj->ring_list, &ring->active_list); obj->last_read_seqno = seqno; @@ -1915,13 +1924,14 @@ i915_gem_object_move_to_active(struct drm_i915_gem_object *obj, static void i915_gem_object_move_to_inactive(struct drm_i915_gem_object *obj) { - struct drm_device *dev = obj->base.dev; - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = obj->base.dev->dev_private; + struct i915_address_space *ggtt_vm = &dev_priv->gtt.base; + struct i915_vma *vma = i915_gem_obj_to_vma(obj, ggtt_vm); BUG_ON(obj->base.write_domain & ~I915_GEM_GPU_DOMAINS); BUG_ON(!obj->active); - list_move_tail(&obj->mm_list, &dev_priv->mm.inactive_list); + list_move_tail(&vma->mm_list, &ggtt_vm->inactive_list); list_del_init(&obj->ring_list); obj->ring = NULL; @@ -2085,11 +2095,9 @@ int __i915_add_request(struct intel_ring_buffer *ring, trace_i915_gem_request_add(ring, request->seqno); ring->outstanding_lazy_request = 0; - if (!dev_priv->mm.suspended) { - if (i915_enable_hangcheck) { - mod_timer(&dev_priv->gpu_error.hangcheck_timer, - round_jiffies_up(jiffies + DRM_I915_HANGCHECK_JIFFIES)); - } + if (!dev_priv->ums.mm_suspended) { + i915_queue_hangcheck(ring->dev); + if (was_empty) { queue_delayed_work(dev_priv->wq, &dev_priv->mm.retire_work, @@ -2119,10 +2127,11 @@ i915_gem_request_remove_from_client(struct drm_i915_gem_request *request) spin_unlock(&file_priv->mm.lock); } -static bool i915_head_inside_object(u32 acthd, struct drm_i915_gem_object *obj) +static bool i915_head_inside_object(u32 acthd, struct drm_i915_gem_object *obj, + struct i915_address_space *vm) { - if (acthd >= obj->gtt_offset && - acthd < obj->gtt_offset + obj->base.size) + if (acthd >= i915_gem_obj_offset(obj, vm) && + acthd < i915_gem_obj_offset(obj, vm) + obj->base.size) return true; return false; @@ -2145,6 +2154,17 @@ static bool i915_head_inside_request(const u32 acthd_unmasked, return false; } +static struct i915_address_space * +request_to_vm(struct drm_i915_gem_request *request) +{ + struct drm_i915_private *dev_priv = request->ring->dev->dev_private; + struct i915_address_space *vm; + + vm = &dev_priv->gtt.base; + + return vm; +} + static bool i915_request_guilty(struct drm_i915_gem_request *request, const u32 acthd, bool *inside) { @@ -2152,9 +2172,9 @@ static bool i915_request_guilty(struct drm_i915_gem_request *request, * pointing inside the ring, matches the batch_obj address range. * However this is extremely unlikely. */ - if (request->batch_obj) { - if (i915_head_inside_object(acthd, request->batch_obj)) { + if (i915_head_inside_object(acthd, request->batch_obj, + request_to_vm(request))) { *inside = true; return true; } @@ -2174,17 +2194,21 @@ static void i915_set_reset_status(struct intel_ring_buffer *ring, { struct i915_ctx_hang_stats *hs = NULL; bool inside, guilty; + unsigned long offset = 0; /* Innocent until proven guilty */ guilty = false; - if (ring->hangcheck.action != wait && + if (request->batch_obj) + offset = i915_gem_obj_offset(request->batch_obj, + request_to_vm(request)); + + if (ring->hangcheck.action != HANGCHECK_WAIT && i915_request_guilty(request, acthd, &inside)) { - DRM_ERROR("%s hung %s bo (0x%x ctx %d) at 0x%x\n", + DRM_ERROR("%s hung %s bo (0x%lx ctx %d) at 0x%x\n", ring->name, inside ? "inside" : "flushing", - request->batch_obj ? - request->batch_obj->gtt_offset : 0, + offset, request->ctx ? request->ctx->id : 0, acthd); @@ -2275,23 +2299,12 @@ void i915_gem_restore_fences(struct drm_device *dev) void i915_gem_reset(struct drm_device *dev) { struct drm_i915_private *dev_priv = dev->dev_private; - struct drm_i915_gem_object *obj; struct intel_ring_buffer *ring; int i; for_each_ring(ring, dev_priv, i) i915_gem_reset_ring_lists(dev_priv, ring); - /* Move everything out of the GPU domains to ensure we do any - * necessary invalidation upon reuse. - */ - list_for_each_entry(obj, - &dev_priv->mm.inactive_list, - mm_list) - { - obj->base.read_domains &= ~I915_GEM_GPU_DOMAINS; - } - i915_gem_restore_fences(dev); } @@ -2400,7 +2413,7 @@ i915_gem_retire_work_handler(struct work_struct *work) idle &= list_empty(&ring->request_list); } - if (!dev_priv->mm.suspended && !idle) + if (!dev_priv->ums.mm_suspended && !idle) queue_delayed_work(dev_priv->wq, &dev_priv->mm.retire_work, round_jiffies_up_relative(HZ)); if (idle) @@ -2586,18 +2599,18 @@ static void i915_gem_object_finish_gtt(struct drm_i915_gem_object *obj) old_write_domain); } -/** - * Unbinds an object from the GTT aperture. - */ -int -i915_gem_object_unbind(struct drm_i915_gem_object *obj) +int i915_vma_unbind(struct i915_vma *vma) { + struct drm_i915_gem_object *obj = vma->obj; drm_i915_private_t *dev_priv = obj->base.dev->dev_private; int ret; - if (obj->gtt_space == NULL) + if (list_empty(&vma->vma_link)) return 0; + if (!drm_mm_node_allocated(&vma->node)) + goto destroy; + if (obj->pin_count) return -EBUSY; @@ -2618,7 +2631,7 @@ i915_gem_object_unbind(struct drm_i915_gem_object *obj) if (ret) return ret; - trace_i915_gem_object_unbind(obj); + trace_i915_vma_unbind(vma); if (obj->has_global_gtt_mapping) i915_gem_gtt_unbind_object(obj); @@ -2629,18 +2642,46 @@ i915_gem_object_unbind(struct drm_i915_gem_object *obj) i915_gem_gtt_finish_object(obj); i915_gem_object_unpin_pages(obj); - list_del(&obj->mm_list); - list_move_tail(&obj->global_list, &dev_priv->mm.unbound_list); + list_del(&vma->mm_list); /* Avoid an unnecessary call to unbind on rebind. */ - obj->map_and_fenceable = true; + if (i915_is_ggtt(vma->vm)) + obj->map_and_fenceable = true; - drm_mm_put_block(obj->gtt_space); - obj->gtt_space = NULL; - obj->gtt_offset = 0; + drm_mm_remove_node(&vma->node); + +destroy: + i915_gem_vma_destroy(vma); + + /* Since the unbound list is global, only move to that list if + * no more VMAs exist. + * NB: Until we have real VMAs there will only ever be one */ + WARN_ON(!list_empty(&obj->vma_list)); + if (list_empty(&obj->vma_list)) + list_move_tail(&obj->global_list, &dev_priv->mm.unbound_list); return 0; } +/** + * Unbinds an object from the global GTT aperture. + */ +int +i915_gem_object_ggtt_unbind(struct drm_i915_gem_object *obj) +{ + struct drm_i915_private *dev_priv = obj->base.dev->dev_private; + struct i915_address_space *ggtt = &dev_priv->gtt.base; + + if (!i915_gem_obj_ggtt_bound(obj)) + return 0; + + if (obj->pin_count) + return -EBUSY; + + BUG_ON(obj->pages == NULL); + + return i915_vma_unbind(i915_gem_obj_to_vma(obj, ggtt)); +} + int i915_gpu_idle(struct drm_device *dev) { drm_i915_private_t *dev_priv = dev->dev_private; @@ -2691,12 +2732,12 @@ static void i965_write_fence_reg(struct drm_device *dev, int reg, POSTING_READ(fence_reg); if (obj) { - u32 size = obj->gtt_space->size; + u32 size = i915_gem_obj_ggtt_size(obj); uint64_t val; - val = (uint64_t)((obj->gtt_offset + size - 4096) & + val = (uint64_t)((i915_gem_obj_ggtt_offset(obj) + size - 4096) & 0xfffff000) << 32; - val |= obj->gtt_offset & 0xfffff000; + val |= i915_gem_obj_ggtt_offset(obj) & 0xfffff000; val |= (uint64_t)((obj->stride / 128) - 1) << fence_pitch_shift; if (obj->tiling_mode == I915_TILING_Y) val |= 1 << I965_FENCE_TILING_Y_SHIFT; @@ -2720,15 +2761,15 @@ static void i915_write_fence_reg(struct drm_device *dev, int reg, u32 val; if (obj) { - u32 size = obj->gtt_space->size; + u32 size = i915_gem_obj_ggtt_size(obj); int pitch_val; int tile_width; - WARN((obj->gtt_offset & ~I915_FENCE_START_MASK) || + WARN((i915_gem_obj_ggtt_offset(obj) & ~I915_FENCE_START_MASK) || (size & -size) != size || - (obj->gtt_offset & (size - 1)), - "object 0x%08x [fenceable? %d] not 1M or pot-size (0x%08x) aligned\n", - obj->gtt_offset, obj->map_and_fenceable, size); + (i915_gem_obj_ggtt_offset(obj) & (size - 1)), + "object 0x%08lx [fenceable? %d] not 1M or pot-size (0x%08x) aligned\n", + i915_gem_obj_ggtt_offset(obj), obj->map_and_fenceable, size); if (obj->tiling_mode == I915_TILING_Y && HAS_128_BYTE_Y_TILING(dev)) tile_width = 128; @@ -2739,7 +2780,7 @@ static void i915_write_fence_reg(struct drm_device *dev, int reg, pitch_val = obj->stride / tile_width; pitch_val = ffs(pitch_val) - 1; - val = obj->gtt_offset; + val = i915_gem_obj_ggtt_offset(obj); if (obj->tiling_mode == I915_TILING_Y) val |= 1 << I830_FENCE_TILING_Y_SHIFT; val |= I915_FENCE_SIZE_BITS(size); @@ -2764,19 +2805,19 @@ static void i830_write_fence_reg(struct drm_device *dev, int reg, uint32_t val; if (obj) { - u32 size = obj->gtt_space->size; + u32 size = i915_gem_obj_ggtt_size(obj); uint32_t pitch_val; - WARN((obj->gtt_offset & ~I830_FENCE_START_MASK) || + WARN((i915_gem_obj_ggtt_offset(obj) & ~I830_FENCE_START_MASK) || (size & -size) != size || - (obj->gtt_offset & (size - 1)), - "object 0x%08x not 512K or pot-size 0x%08x aligned\n", - obj->gtt_offset, size); + (i915_gem_obj_ggtt_offset(obj) & (size - 1)), + "object 0x%08lx not 512K or pot-size 0x%08x aligned\n", + i915_gem_obj_ggtt_offset(obj), size); pitch_val = obj->stride / 128; pitch_val = ffs(pitch_val) - 1; - val = obj->gtt_offset; + val = i915_gem_obj_ggtt_offset(obj); if (obj->tiling_mode == I915_TILING_Y) val |= 1 << I830_FENCE_TILING_Y_SHIFT; val |= I830_FENCE_SIZE_BITS(size); @@ -2997,7 +3038,7 @@ static bool i915_gem_valid_gtt_space(struct drm_device *dev, if (HAS_LLC(dev)) return true; - if (gtt_space == NULL) + if (!drm_mm_node_allocated(gtt_space)) return true; if (list_empty(>t_space->node_list)) @@ -3030,8 +3071,8 @@ static void i915_gem_verify_gtt(struct drm_device *dev) if (obj->cache_level != obj->gtt_space->color) { printk(KERN_ERR "object reserved space [%08lx, %08lx] with wrong color, cache_level=%x, color=%lx\n", - obj->gtt_space->start, - obj->gtt_space->start + obj->gtt_space->size, + i915_gem_obj_ggtt_offset(obj), + i915_gem_obj_ggtt_offset(obj) + i915_gem_obj_ggtt_size(obj), obj->cache_level, obj->gtt_space->color); err++; @@ -3042,8 +3083,8 @@ static void i915_gem_verify_gtt(struct drm_device *dev) obj->gtt_space, obj->cache_level)) { printk(KERN_ERR "invalid GTT space found at [%08lx, %08lx] - color=%x\n", - obj->gtt_space->start, - obj->gtt_space->start + obj->gtt_space->size, + i915_gem_obj_ggtt_offset(obj), + i915_gem_obj_ggtt_offset(obj) + i915_gem_obj_ggtt_size(obj), obj->cache_level); err++; continue; @@ -3058,18 +3099,18 @@ static void i915_gem_verify_gtt(struct drm_device *dev) * Finds free space in the GTT aperture and binds the object there. */ static int -i915_gem_object_bind_to_gtt(struct drm_i915_gem_object *obj, - unsigned alignment, - bool map_and_fenceable, - bool nonblocking) +i915_gem_object_bind_to_vm(struct drm_i915_gem_object *obj, + struct i915_address_space *vm, + unsigned alignment, + bool map_and_fenceable, + bool nonblocking) { struct drm_device *dev = obj->base.dev; drm_i915_private_t *dev_priv = dev->dev_private; - struct drm_mm_node *node; u32 size, fence_size, fence_alignment, unfenced_alignment; - bool mappable, fenceable; - size_t gtt_max = map_and_fenceable ? - dev_priv->gtt.mappable_end : dev_priv->gtt.total; + size_t gtt_max = + map_and_fenceable ? dev_priv->gtt.mappable_end : vm->total; + struct i915_vma *vma; int ret; fence_size = i915_gem_get_gtt_size(dev, @@ -3110,77 +3151,89 @@ i915_gem_object_bind_to_gtt(struct drm_i915_gem_object *obj, i915_gem_object_pin_pages(obj); - node = kzalloc(sizeof(*node), GFP_KERNEL); - if (node == NULL) { - i915_gem_object_unpin_pages(obj); - return -ENOMEM; + BUG_ON(!i915_is_ggtt(vm)); + + vma = i915_gem_obj_lookup_or_create_vma(obj, vm); + if (IS_ERR(vma)) { + ret = PTR_ERR(vma); + goto err_unpin; } + /* For now we only ever use 1 vma per object */ + WARN_ON(!list_is_singular(&obj->vma_list)); + search_free: - ret = drm_mm_insert_node_in_range_generic(&dev_priv->mm.gtt_space, node, + ret = drm_mm_insert_node_in_range_generic(&vm->mm, &vma->node, size, alignment, - obj->cache_level, 0, gtt_max); + obj->cache_level, 0, gtt_max, + DRM_MM_SEARCH_DEFAULT); if (ret) { - ret = i915_gem_evict_something(dev, size, alignment, + ret = i915_gem_evict_something(dev, vm, size, alignment, obj->cache_level, map_and_fenceable, nonblocking); if (ret == 0) goto search_free; - i915_gem_object_unpin_pages(obj); - kfree(node); - return ret; + goto err_free_vma; } - if (WARN_ON(!i915_gem_valid_gtt_space(dev, node, obj->cache_level))) { - i915_gem_object_unpin_pages(obj); - drm_mm_put_block(node); - return -EINVAL; + if (WARN_ON(!i915_gem_valid_gtt_space(dev, &vma->node, + obj->cache_level))) { + ret = -EINVAL; + goto err_remove_node; } ret = i915_gem_gtt_prepare_object(obj); - if (ret) { - i915_gem_object_unpin_pages(obj); - drm_mm_put_block(node); - return ret; - } + if (ret) + goto err_remove_node; list_move_tail(&obj->global_list, &dev_priv->mm.bound_list); - list_add_tail(&obj->mm_list, &dev_priv->mm.inactive_list); + list_add_tail(&vma->mm_list, &vm->inactive_list); - obj->gtt_space = node; - obj->gtt_offset = node->start; + if (i915_is_ggtt(vm)) { + bool mappable, fenceable; - fenceable = - node->size == fence_size && - (node->start & (fence_alignment - 1)) == 0; + fenceable = (vma->node.size == fence_size && + (vma->node.start & (fence_alignment - 1)) == 0); - mappable = - obj->gtt_offset + obj->base.size <= dev_priv->gtt.mappable_end; + mappable = (vma->node.start + obj->base.size <= + dev_priv->gtt.mappable_end); + + obj->map_and_fenceable = mappable && fenceable; + } - obj->map_and_fenceable = mappable && fenceable; + WARN_ON(map_and_fenceable && !obj->map_and_fenceable); - trace_i915_gem_object_bind(obj, map_and_fenceable); + trace_i915_vma_bind(vma, map_and_fenceable); i915_gem_verify_gtt(dev); return 0; + +err_remove_node: + drm_mm_remove_node(&vma->node); +err_free_vma: + i915_gem_vma_destroy(vma); +err_unpin: + i915_gem_object_unpin_pages(obj); + return ret; } -void -i915_gem_clflush_object(struct drm_i915_gem_object *obj) +bool +i915_gem_clflush_object(struct drm_i915_gem_object *obj, + bool force) { /* If we don't have a page list set up, then we're not pinned * to GPU, and we can ignore the cache flush because it'll happen * again at bind time. */ if (obj->pages == NULL) - return; + return false; /* * Stolen memory is always coherent with the GPU as it is explicitly * marked as wc by the system, or the system is cache-coherent. */ if (obj->stolen) - return; + return false; /* If the GPU is snooping the contents of the CPU cache, * we do not need to manually clear the CPU cache lines. However, @@ -3190,12 +3243,13 @@ i915_gem_clflush_object(struct drm_i915_gem_object *obj) * snooping behaviour occurs naturally as the result of our domain * tracking. */ - if (obj->cache_level != I915_CACHE_NONE) - return; + if (!force && cpu_cache_is_coherent(obj->base.dev, obj->cache_level)) + return false; trace_i915_gem_object_clflush(obj); - drm_clflush_sg(obj->pages); + + return true; } /** Flushes the GTT write domain for the object if it's dirty. */ @@ -3227,15 +3281,17 @@ i915_gem_object_flush_gtt_write_domain(struct drm_i915_gem_object *obj) /** Flushes the CPU write domain for the object if it's dirty. */ static void -i915_gem_object_flush_cpu_write_domain(struct drm_i915_gem_object *obj) +i915_gem_object_flush_cpu_write_domain(struct drm_i915_gem_object *obj, + bool force) { uint32_t old_write_domain; if (obj->base.write_domain != I915_GEM_DOMAIN_CPU) return; - i915_gem_clflush_object(obj); - i915_gem_chipset_flush(obj->base.dev); + if (i915_gem_clflush_object(obj, force)) + i915_gem_chipset_flush(obj->base.dev); + old_write_domain = obj->base.write_domain; obj->base.write_domain = 0; @@ -3258,7 +3314,7 @@ i915_gem_object_set_to_gtt_domain(struct drm_i915_gem_object *obj, bool write) int ret; /* Not valid to be called on unbound objects. */ - if (obj->gtt_space == NULL) + if (!i915_gem_obj_bound_any(obj)) return -EINVAL; if (obj->base.write_domain == I915_GEM_DOMAIN_GTT) @@ -3268,7 +3324,7 @@ i915_gem_object_set_to_gtt_domain(struct drm_i915_gem_object *obj, bool write) if (ret) return ret; - i915_gem_object_flush_cpu_write_domain(obj); + i915_gem_object_flush_cpu_write_domain(obj, false); /* Serialise direct access to this object with the barriers for * coherent writes from the GPU, by effectively invalidating the @@ -3296,8 +3352,14 @@ i915_gem_object_set_to_gtt_domain(struct drm_i915_gem_object *obj, bool write) old_write_domain); /* And bump the LRU for this access */ - if (i915_gem_object_is_inactive(obj)) - list_move_tail(&obj->mm_list, &dev_priv->mm.inactive_list); + if (i915_gem_object_is_inactive(obj)) { + struct i915_vma *vma = i915_gem_obj_to_vma(obj, + &dev_priv->gtt.base); + if (vma) + list_move_tail(&vma->mm_list, + &dev_priv->gtt.base.inactive_list); + + } return 0; } @@ -3307,6 +3369,7 @@ int i915_gem_object_set_cache_level(struct drm_i915_gem_object *obj, { struct drm_device *dev = obj->base.dev; drm_i915_private_t *dev_priv = dev->dev_private; + struct i915_vma *vma; int ret; if (obj->cache_level == cache_level) @@ -3317,13 +3380,17 @@ int i915_gem_object_set_cache_level(struct drm_i915_gem_object *obj, return -EBUSY; } - if (!i915_gem_valid_gtt_space(dev, obj->gtt_space, cache_level)) { - ret = i915_gem_object_unbind(obj); - if (ret) - return ret; + list_for_each_entry(vma, &obj->vma_list, vma_link) { + if (!i915_gem_valid_gtt_space(dev, &vma->node, cache_level)) { + ret = i915_vma_unbind(vma); + if (ret) + return ret; + + break; + } } - if (obj->gtt_space) { + if (i915_gem_obj_bound_any(obj)) { ret = i915_gem_object_finish_gpu(obj); if (ret) return ret; @@ -3345,11 +3412,13 @@ int i915_gem_object_set_cache_level(struct drm_i915_gem_object *obj, if (obj->has_aliasing_ppgtt_mapping) i915_ppgtt_bind_object(dev_priv->mm.aliasing_ppgtt, obj, cache_level); - - obj->gtt_space->color = cache_level; } - if (cache_level == I915_CACHE_NONE) { + list_for_each_entry(vma, &obj->vma_list, vma_link) + vma->node.color = cache_level; + obj->cache_level = cache_level; + + if (cpu_write_needs_clflush(obj)) { u32 old_read_domains, old_write_domain; /* If we're coming from LLC cached, then we haven't @@ -3359,7 +3428,6 @@ int i915_gem_object_set_cache_level(struct drm_i915_gem_object *obj, * Just set it to the CPU cache for now. */ WARN_ON(obj->base.write_domain & ~I915_GEM_DOMAIN_CPU); - WARN_ON(obj->base.read_domains & ~I915_GEM_DOMAIN_CPU); old_read_domains = obj->base.read_domains; old_write_domain = obj->base.write_domain; @@ -3372,7 +3440,6 @@ int i915_gem_object_set_cache_level(struct drm_i915_gem_object *obj, old_write_domain); } - obj->cache_level = cache_level; i915_gem_verify_gtt(dev); return 0; } @@ -3394,7 +3461,20 @@ int i915_gem_get_caching_ioctl(struct drm_device *dev, void *data, goto unlock; } - args->caching = obj->cache_level != I915_CACHE_NONE; + switch (obj->cache_level) { + case I915_CACHE_LLC: + case I915_CACHE_L3_LLC: + args->caching = I915_CACHING_CACHED; + break; + + case I915_CACHE_WT: + args->caching = I915_CACHING_DISPLAY; + break; + + default: + args->caching = I915_CACHING_NONE; + break; + } drm_gem_object_unreference(&obj->base); unlock: @@ -3417,6 +3497,9 @@ int i915_gem_set_caching_ioctl(struct drm_device *dev, void *data, case I915_CACHING_CACHED: level = I915_CACHE_LLC; break; + case I915_CACHING_DISPLAY: + level = HAS_WT(dev) ? I915_CACHE_WT : I915_CACHE_NONE; + break; default: return -EINVAL; } @@ -3439,6 +3522,22 @@ unlock: return ret; } +static bool is_pin_display(struct drm_i915_gem_object *obj) +{ + /* There are 3 sources that pin objects: + * 1. The display engine (scanouts, sprites, cursors); + * 2. Reservations for execbuffer; + * 3. The user. + * + * We can ignore reservations as we hold the struct_mutex and + * are only called outside of the reservation path. The user + * can only increment pin_count once, and so if after + * subtracting the potential reference by the user, any pin_count + * remains, it must be due to another use by the display engine. + */ + return obj->pin_count - !!obj->user_pin_count; +} + /* * Prepare buffer for display plane (scanout, cursors, etc). * Can be called from an uninterruptible phase (modesetting) and allows @@ -3458,6 +3557,11 @@ i915_gem_object_pin_to_display_plane(struct drm_i915_gem_object *obj, return ret; } + /* Mark the pin_display early so that we account for the + * display coherency whilst setting up the cache domains. + */ + obj->pin_display = true; + /* The display engine is not coherent with the LLC cache on gen6. As * a result, we make sure that the pinning that is about to occur is * done with uncached PTEs. This is lowest common denominator for all @@ -3467,19 +3571,20 @@ i915_gem_object_pin_to_display_plane(struct drm_i915_gem_object *obj, * of uncaching, which would allow us to flush all the LLC-cached data * with that bit in the PTE to main memory with just one PIPE_CONTROL. */ - ret = i915_gem_object_set_cache_level(obj, I915_CACHE_NONE); + ret = i915_gem_object_set_cache_level(obj, + HAS_WT(obj->base.dev) ? I915_CACHE_WT : I915_CACHE_NONE); if (ret) - return ret; + goto err_unpin_display; /* As the user may map the buffer once pinned in the display plane * (e.g. libkms for the bootup splash), we have to ensure that we * always use map_and_fenceable for all scanout buffers. */ - ret = i915_gem_object_pin(obj, alignment, true, false); + ret = i915_gem_obj_ggtt_pin(obj, alignment, true, false); if (ret) - return ret; + goto err_unpin_display; - i915_gem_object_flush_cpu_write_domain(obj); + i915_gem_object_flush_cpu_write_domain(obj, true); old_write_domain = obj->base.write_domain; old_read_domains = obj->base.read_domains; @@ -3495,6 +3600,17 @@ i915_gem_object_pin_to_display_plane(struct drm_i915_gem_object *obj, old_write_domain); return 0; + +err_unpin_display: + obj->pin_display = is_pin_display(obj); + return ret; +} + +void +i915_gem_object_unpin_from_display_plane(struct drm_i915_gem_object *obj) +{ + i915_gem_object_unpin(obj); + obj->pin_display = is_pin_display(obj); } int @@ -3540,7 +3656,7 @@ i915_gem_object_set_to_cpu_domain(struct drm_i915_gem_object *obj, bool write) /* Flush the CPU cache if it's still invalid. */ if ((obj->base.read_domains & I915_GEM_DOMAIN_CPU) == 0) { - i915_gem_clflush_object(obj); + i915_gem_clflush_object(obj, false); obj->base.read_domains |= I915_GEM_DOMAIN_CPU; } @@ -3618,37 +3734,44 @@ i915_gem_ring_throttle(struct drm_device *dev, struct drm_file *file) int i915_gem_object_pin(struct drm_i915_gem_object *obj, + struct i915_address_space *vm, uint32_t alignment, bool map_and_fenceable, bool nonblocking) { + struct i915_vma *vma; int ret; if (WARN_ON(obj->pin_count == DRM_I915_GEM_OBJECT_MAX_PIN_COUNT)) return -EBUSY; - if (obj->gtt_space != NULL) { - if ((alignment && obj->gtt_offset & (alignment - 1)) || + WARN_ON(map_and_fenceable && !i915_is_ggtt(vm)); + + vma = i915_gem_obj_to_vma(obj, vm); + + if (vma) { + if ((alignment && + vma->node.start & (alignment - 1)) || (map_and_fenceable && !obj->map_and_fenceable)) { WARN(obj->pin_count, "bo is already pinned with incorrect alignment:" - " offset=%x, req.alignment=%x, req.map_and_fenceable=%d," + " offset=%lx, req.alignment=%x, req.map_and_fenceable=%d," " obj->map_and_fenceable=%d\n", - obj->gtt_offset, alignment, + i915_gem_obj_offset(obj, vm), alignment, map_and_fenceable, obj->map_and_fenceable); - ret = i915_gem_object_unbind(obj); + ret = i915_vma_unbind(vma); if (ret) return ret; } } - if (obj->gtt_space == NULL) { + if (!i915_gem_obj_bound(obj, vm)) { struct drm_i915_private *dev_priv = obj->base.dev->dev_private; - ret = i915_gem_object_bind_to_gtt(obj, alignment, - map_and_fenceable, - nonblocking); + ret = i915_gem_object_bind_to_vm(obj, vm, alignment, + map_and_fenceable, + nonblocking); if (ret) return ret; @@ -3669,7 +3792,7 @@ void i915_gem_object_unpin(struct drm_i915_gem_object *obj) { BUG_ON(obj->pin_count == 0); - BUG_ON(obj->gtt_space == NULL); + BUG_ON(!i915_gem_obj_bound_any(obj)); if (--obj->pin_count == 0) obj->pin_mappable = false; @@ -3707,7 +3830,7 @@ i915_gem_pin_ioctl(struct drm_device *dev, void *data, } if (obj->user_pin_count == 0) { - ret = i915_gem_object_pin(obj, args->alignment, true, false); + ret = i915_gem_obj_ggtt_pin(obj, args->alignment, true, false); if (ret) goto out; } @@ -3715,11 +3838,7 @@ i915_gem_pin_ioctl(struct drm_device *dev, void *data, obj->user_pin_count++; obj->pin_filp = file; - /* XXX - flush the CPU caches for pinned objects - * as the X server doesn't manage domains yet - */ - i915_gem_object_flush_cpu_write_domain(obj); - args->offset = obj->gtt_offset; + args->offset = i915_gem_obj_ggtt_offset(obj); out: drm_gem_object_unreference(&obj->base); unlock: @@ -3858,10 +3977,11 @@ unlock: void i915_gem_object_init(struct drm_i915_gem_object *obj, const struct drm_i915_gem_object_ops *ops) { - INIT_LIST_HEAD(&obj->mm_list); INIT_LIST_HEAD(&obj->global_list); INIT_LIST_HEAD(&obj->ring_list); INIT_LIST_HEAD(&obj->exec_list); + INIT_LIST_HEAD(&obj->obj_exec_link); + INIT_LIST_HEAD(&obj->vma_list); obj->ops = ops; @@ -3926,6 +4046,8 @@ struct drm_i915_gem_object *i915_gem_alloc_object(struct drm_device *dev, } else obj->cache_level = I915_CACHE_NONE; + trace_i915_gem_object_create(obj); + return obj; } @@ -3941,6 +4063,7 @@ void i915_gem_free_object(struct drm_gem_object *gem_obj) struct drm_i915_gem_object *obj = to_intel_bo(gem_obj); struct drm_device *dev = obj->base.dev; drm_i915_private_t *dev_priv = dev->dev_private; + struct i915_vma *vma, *next; trace_i915_gem_object_destroy(obj); @@ -3948,15 +4071,21 @@ void i915_gem_free_object(struct drm_gem_object *gem_obj) i915_gem_detach_phys_object(dev, obj); obj->pin_count = 0; - if (WARN_ON(i915_gem_object_unbind(obj) == -ERESTARTSYS)) { - bool was_interruptible; + /* NB: 0 or 1 elements */ + WARN_ON(!list_empty(&obj->vma_list) && + !list_is_singular(&obj->vma_list)); + list_for_each_entry_safe(vma, next, &obj->vma_list, vma_link) { + int ret = i915_vma_unbind(vma); + if (WARN_ON(ret == -ERESTARTSYS)) { + bool was_interruptible; - was_interruptible = dev_priv->mm.interruptible; - dev_priv->mm.interruptible = false; + was_interruptible = dev_priv->mm.interruptible; + dev_priv->mm.interruptible = false; - WARN_ON(i915_gem_object_unbind(obj)); + WARN_ON(i915_vma_unbind(vma)); - dev_priv->mm.interruptible = was_interruptible; + dev_priv->mm.interruptible = was_interruptible; + } } /* Stolen objects don't hold a ref, but do hold pin count. Fix that up @@ -3982,15 +4111,42 @@ void i915_gem_free_object(struct drm_gem_object *gem_obj) i915_gem_object_free(obj); } +struct i915_vma *i915_gem_vma_create(struct drm_i915_gem_object *obj, + struct i915_address_space *vm) +{ + struct i915_vma *vma = kzalloc(sizeof(*vma), GFP_KERNEL); + if (vma == NULL) + return ERR_PTR(-ENOMEM); + + INIT_LIST_HEAD(&vma->vma_link); + INIT_LIST_HEAD(&vma->mm_list); + INIT_LIST_HEAD(&vma->exec_list); + vma->vm = vm; + vma->obj = obj; + + /* Keep GGTT vmas first to make debug easier */ + if (i915_is_ggtt(vm)) + list_add(&vma->vma_link, &obj->vma_list); + else + list_add_tail(&vma->vma_link, &obj->vma_list); + + return vma; +} + +void i915_gem_vma_destroy(struct i915_vma *vma) +{ + WARN_ON(vma->node.allocated); + list_del(&vma->vma_link); + kfree(vma); +} + int i915_gem_idle(struct drm_device *dev) { drm_i915_private_t *dev_priv = dev->dev_private; int ret; - mutex_lock(&dev->struct_mutex); - - if (dev_priv->mm.suspended) { + if (dev_priv->ums.mm_suspended) { mutex_unlock(&dev->struct_mutex); return 0; } @@ -4006,18 +4162,11 @@ i915_gem_idle(struct drm_device *dev) if (!drm_core_check_feature(dev, DRIVER_MODESET)) i915_gem_evict_everything(dev); - /* Hack! Don't let anybody do execbuf while we don't control the chip. - * We need to replace this with a semaphore, or something. - * And not confound mm.suspended! - */ - dev_priv->mm.suspended = 1; del_timer_sync(&dev_priv->gpu_error.hangcheck_timer); i915_kernel_lost_context(dev); i915_gem_cleanup_ringbuffer(dev); - mutex_unlock(&dev->struct_mutex); - /* Cancel the retire work handler, which should be idle now. */ cancel_delayed_work_sync(&dev_priv->mm.retire_work); @@ -4150,8 +4299,8 @@ i915_gem_init_hw(struct drm_device *dev) if (INTEL_INFO(dev)->gen < 6 && !intel_enable_gtt()) return -EIO; - if (IS_HASWELL(dev) && (I915_READ(0x120010) == 1)) - I915_WRITE(0x9008, I915_READ(0x9008) | 0xf0000); + if (dev_priv->ellc_size) + I915_WRITE(HSW_IDICR, I915_READ(HSW_IDICR) | IDIHASHMSK(0xf)); if (HAS_PCH_NOP(dev)) { u32 temp = I915_READ(GEN7_MSG_CTL); @@ -4227,7 +4376,7 @@ int i915_gem_entervt_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv) { - drm_i915_private_t *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = dev->dev_private; int ret; if (drm_core_check_feature(dev, DRIVER_MODESET)) @@ -4239,7 +4388,7 @@ i915_gem_entervt_ioctl(struct drm_device *dev, void *data, } mutex_lock(&dev->struct_mutex); - dev_priv->mm.suspended = 0; + dev_priv->ums.mm_suspended = 0; ret = i915_gem_init_hw(dev); if (ret != 0) { @@ -4247,7 +4396,7 @@ i915_gem_entervt_ioctl(struct drm_device *dev, void *data, return ret; } - BUG_ON(!list_empty(&dev_priv->mm.active_list)); + BUG_ON(!list_empty(&dev_priv->gtt.base.active_list)); mutex_unlock(&dev->struct_mutex); ret = drm_irq_install(dev); @@ -4259,7 +4408,7 @@ i915_gem_entervt_ioctl(struct drm_device *dev, void *data, cleanup_ringbuffer: mutex_lock(&dev->struct_mutex); i915_gem_cleanup_ringbuffer(dev); - dev_priv->mm.suspended = 1; + dev_priv->ums.mm_suspended = 1; mutex_unlock(&dev->struct_mutex); return ret; @@ -4269,11 +4418,26 @@ int i915_gem_leavevt_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv) { + struct drm_i915_private *dev_priv = dev->dev_private; + int ret; + if (drm_core_check_feature(dev, DRIVER_MODESET)) return 0; drm_irq_uninstall(dev); - return i915_gem_idle(dev); + + mutex_lock(&dev->struct_mutex); + ret = i915_gem_idle(dev); + + /* Hack! Don't let anybody do execbuf while we don't control the chip. + * We need to replace this with a semaphore, or something. + * And not confound ums.mm_suspended! + */ + if (ret != 0) + dev_priv->ums.mm_suspended = 1; + mutex_unlock(&dev->struct_mutex); + + return ret; } void @@ -4284,9 +4448,11 @@ i915_gem_lastclose(struct drm_device *dev) if (drm_core_check_feature(dev, DRIVER_MODESET)) return; + mutex_lock(&dev->struct_mutex); ret = i915_gem_idle(dev); if (ret) DRM_ERROR("failed to idle hardware: %d\n", ret); + mutex_unlock(&dev->struct_mutex); } static void @@ -4296,6 +4462,16 @@ init_ring_lists(struct intel_ring_buffer *ring) INIT_LIST_HEAD(&ring->request_list); } +static void i915_init_vm(struct drm_i915_private *dev_priv, + struct i915_address_space *vm) +{ + vm->dev = dev_priv->dev; + INIT_LIST_HEAD(&vm->active_list); + INIT_LIST_HEAD(&vm->inactive_list); + INIT_LIST_HEAD(&vm->global_link); + list_add(&vm->global_link, &dev_priv->vm_list); +} + void i915_gem_load(struct drm_device *dev) { @@ -4308,8 +4484,9 @@ i915_gem_load(struct drm_device *dev) SLAB_HWCACHE_ALIGN, NULL); - INIT_LIST_HEAD(&dev_priv->mm.active_list); - INIT_LIST_HEAD(&dev_priv->mm.inactive_list); + INIT_LIST_HEAD(&dev_priv->vm_list); + i915_init_vm(dev_priv, &dev_priv->gtt.base); + INIT_LIST_HEAD(&dev_priv->mm.unbound_list); INIT_LIST_HEAD(&dev_priv->mm.bound_list); INIT_LIST_HEAD(&dev_priv->mm.fence_list); @@ -4608,11 +4785,101 @@ i915_gem_inactive_shrink(struct shrinker *shrinker, struct shrink_control *sc) list_for_each_entry(obj, &dev_priv->mm.unbound_list, global_list) if (obj->pages_pin_count == 0) cnt += obj->base.size >> PAGE_SHIFT; - list_for_each_entry(obj, &dev_priv->mm.inactive_list, mm_list) + + list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) { + if (obj->active) + continue; + if (obj->pin_count == 0 && obj->pages_pin_count == 0) cnt += obj->base.size >> PAGE_SHIFT; + } if (unlock) mutex_unlock(&dev->struct_mutex); return cnt; } + +/* All the new VM stuff */ +unsigned long i915_gem_obj_offset(struct drm_i915_gem_object *o, + struct i915_address_space *vm) +{ + struct drm_i915_private *dev_priv = o->base.dev->dev_private; + struct i915_vma *vma; + + if (vm == &dev_priv->mm.aliasing_ppgtt->base) + vm = &dev_priv->gtt.base; + + BUG_ON(list_empty(&o->vma_list)); + list_for_each_entry(vma, &o->vma_list, vma_link) { + if (vma->vm == vm) + return vma->node.start; + + } + return -1; +} + +bool i915_gem_obj_bound(struct drm_i915_gem_object *o, + struct i915_address_space *vm) +{ + struct i915_vma *vma; + + list_for_each_entry(vma, &o->vma_list, vma_link) + if (vma->vm == vm && drm_mm_node_allocated(&vma->node)) + return true; + + return false; +} + +bool i915_gem_obj_bound_any(struct drm_i915_gem_object *o) +{ + struct drm_i915_private *dev_priv = o->base.dev->dev_private; + struct i915_address_space *vm; + + list_for_each_entry(vm, &dev_priv->vm_list, global_link) + if (i915_gem_obj_bound(o, vm)) + return true; + + return false; +} + +unsigned long i915_gem_obj_size(struct drm_i915_gem_object *o, + struct i915_address_space *vm) +{ + struct drm_i915_private *dev_priv = o->base.dev->dev_private; + struct i915_vma *vma; + + if (vm == &dev_priv->mm.aliasing_ppgtt->base) + vm = &dev_priv->gtt.base; + + BUG_ON(list_empty(&o->vma_list)); + + list_for_each_entry(vma, &o->vma_list, vma_link) + if (vma->vm == vm) + return vma->node.size; + + return 0; +} + +struct i915_vma *i915_gem_obj_to_vma(struct drm_i915_gem_object *obj, + struct i915_address_space *vm) +{ + struct i915_vma *vma; + list_for_each_entry(vma, &obj->vma_list, vma_link) + if (vma->vm == vm) + return vma; + + return NULL; +} + +struct i915_vma * +i915_gem_obj_lookup_or_create_vma(struct drm_i915_gem_object *obj, + struct i915_address_space *vm) +{ + struct i915_vma *vma; + + vma = i915_gem_obj_to_vma(obj, vm); + if (!vma) + vma = i915_gem_vma_create(obj, vm); + + return vma; +} diff --git a/drivers/gpu/drm/i915/i915_gem_context.c b/drivers/gpu/drm/i915/i915_gem_context.c index 51b7a2171cae..403309c2a7d6 100644 --- a/drivers/gpu/drm/i915/i915_gem_context.c +++ b/drivers/gpu/drm/i915/i915_gem_context.c @@ -155,7 +155,7 @@ create_hw_context(struct drm_device *dev, if (INTEL_INFO(dev)->gen >= 7) { ret = i915_gem_object_set_cache_level(ctx->obj, - I915_CACHE_LLC_MLC); + I915_CACHE_L3_LLC); /* Failure shouldn't ever happen this early */ if (WARN_ON(ret)) goto err_out; @@ -214,7 +214,7 @@ static int create_default_context(struct drm_i915_private *dev_priv) * default context. */ dev_priv->ring[RCS].default_context = ctx; - ret = i915_gem_object_pin(ctx->obj, CONTEXT_ALIGN, false, false); + ret = i915_gem_obj_ggtt_pin(ctx->obj, CONTEXT_ALIGN, false, false); if (ret) { DRM_DEBUG_DRIVER("Couldn't pin %d\n", ret); goto err_destroy; @@ -304,31 +304,24 @@ static int context_idr_cleanup(int id, void *p, void *data) } struct i915_ctx_hang_stats * -i915_gem_context_get_hang_stats(struct intel_ring_buffer *ring, +i915_gem_context_get_hang_stats(struct drm_device *dev, struct drm_file *file, u32 id) { - struct drm_i915_private *dev_priv = ring->dev->dev_private; + struct drm_i915_private *dev_priv = dev->dev_private; struct drm_i915_file_private *file_priv = file->driver_priv; - struct i915_hw_context *to; - - if (dev_priv->hw_contexts_disabled) - return ERR_PTR(-ENOENT); - - if (ring->id != RCS) - return ERR_PTR(-EINVAL); - - if (file == NULL) - return ERR_PTR(-EINVAL); + struct i915_hw_context *ctx; if (id == DEFAULT_CONTEXT_ID) return &file_priv->hang_stats; - to = i915_gem_context_get(file->driver_priv, id); - if (to == NULL) + ctx = NULL; + if (!dev_priv->hw_contexts_disabled) + ctx = i915_gem_context_get(file->driver_priv, id); + if (ctx == NULL) return ERR_PTR(-ENOENT); - return &to->hang_stats; + return &ctx->hang_stats; } void i915_gem_context_close(struct drm_device *dev, struct drm_file *file) @@ -377,7 +370,7 @@ mi_set_context(struct intel_ring_buffer *ring, intel_ring_emit(ring, MI_NOOP); intel_ring_emit(ring, MI_SET_CONTEXT); - intel_ring_emit(ring, new_context->obj->gtt_offset | + intel_ring_emit(ring, i915_gem_obj_ggtt_offset(new_context->obj) | MI_MM_SPACE_GTT | MI_SAVE_EXT_STATE_EN | MI_RESTORE_EXT_STATE_EN | @@ -407,7 +400,7 @@ static int do_switch(struct i915_hw_context *to) if (from == to) return 0; - ret = i915_gem_object_pin(to->obj, CONTEXT_ALIGN, false, false); + ret = i915_gem_obj_ggtt_pin(to->obj, CONTEXT_ALIGN, false, false); if (ret) return ret; @@ -443,7 +436,10 @@ static int do_switch(struct i915_hw_context *to) * MI_SET_CONTEXT instead of when the next seqno has completed. */ if (from != NULL) { + struct drm_i915_private *dev_priv = from->obj->base.dev->dev_private; + struct i915_address_space *ggtt = &dev_priv->gtt.base; from->obj->base.read_domains = I915_GEM_DOMAIN_INSTRUCTION; + list_move_tail(&i915_gem_obj_to_vma(from->obj, ggtt)->mm_list, &ggtt->active_list); i915_gem_object_move_to_active(from->obj, ring); /* As long as MI_SET_CONTEXT is serializing, ie. it flushes the * whole damn pipeline, we don't need to explicitly mark the diff --git a/drivers/gpu/drm/i915/i915_gem_debug.c b/drivers/gpu/drm/i915/i915_gem_debug.c index 582e6a5f3dac..775d506b3208 100644 --- a/drivers/gpu/drm/i915/i915_gem_debug.c +++ b/drivers/gpu/drm/i915/i915_gem_debug.c @@ -97,7 +97,7 @@ i915_verify_lists(struct drm_device *dev) } } - list_for_each_entry(obj, &dev_priv->mm.inactive_list, list) { + list_for_each_entry(obj, &i915_gtt_vm->inactive_list, list) { if (obj->base.dev != dev || !atomic_read(&obj->base.refcount.refcount)) { DRM_ERROR("freed inactive %p\n", obj); @@ -115,73 +115,4 @@ i915_verify_lists(struct drm_device *dev) return warned = err; } -#endif /* WATCH_INACTIVE */ - -#if WATCH_COHERENCY -void -i915_gem_object_check_coherency(struct drm_i915_gem_object *obj, int handle) -{ - struct drm_device *dev = obj->base.dev; - int page; - uint32_t *gtt_mapping; - uint32_t *backing_map = NULL; - int bad_count = 0; - - DRM_INFO("%s: checking coherency of object %p@0x%08x (%d, %zdkb):\n", - __func__, obj, obj->gtt_offset, handle, - obj->size / 1024); - - gtt_mapping = ioremap(dev_priv->mm.gtt_base_addr + obj->gtt_offset, - obj->base.size); - if (gtt_mapping == NULL) { - DRM_ERROR("failed to map GTT space\n"); - return; - } - - for (page = 0; page < obj->size / PAGE_SIZE; page++) { - int i; - - backing_map = kmap_atomic(obj->pages[page]); - - if (backing_map == NULL) { - DRM_ERROR("failed to map backing page\n"); - goto out; - } - - for (i = 0; i < PAGE_SIZE / 4; i++) { - uint32_t cpuval = backing_map[i]; - uint32_t gttval = readl(gtt_mapping + - page * 1024 + i); - - if (cpuval != gttval) { - DRM_INFO("incoherent CPU vs GPU at 0x%08x: " - "0x%08x vs 0x%08x\n", - (int)(obj->gtt_offset + - page * PAGE_SIZE + i * 4), - cpuval, gttval); - if (bad_count++ >= 8) { - DRM_INFO("...\n"); - goto out; - } - } - } - kunmap_atomic(backing_map); - backing_map = NULL; - } - - out: - if (backing_map != NULL) - kunmap_atomic(backing_map); - iounmap(gtt_mapping); - - /* give syslog time to catch up */ - msleep(1); - - /* Directly flush the object, since we just loaded values with the CPU - * from the backing pages and we don't want to disturb the cache - * management that we're trying to observe. - */ - - i915_gem_clflush_object(obj); -} -#endif +#endif /* WATCH_LIST */ diff --git a/drivers/gpu/drm/i915/i915_gem_dmabuf.c b/drivers/gpu/drm/i915/i915_gem_dmabuf.c index 9e6578330801..e918b05fcbdd 100644 --- a/drivers/gpu/drm/i915/i915_gem_dmabuf.c +++ b/drivers/gpu/drm/i915/i915_gem_dmabuf.c @@ -27,10 +27,15 @@ #include "i915_drv.h" #include <linux/dma-buf.h> +static struct drm_i915_gem_object *dma_buf_to_obj(struct dma_buf *buf) +{ + return to_intel_bo(buf->priv); +} + static struct sg_table *i915_gem_map_dma_buf(struct dma_buf_attachment *attachment, enum dma_data_direction dir) { - struct drm_i915_gem_object *obj = attachment->dmabuf->priv; + struct drm_i915_gem_object *obj = dma_buf_to_obj(attachment->dmabuf); struct sg_table *st; struct scatterlist *src, *dst; int ret, i; @@ -85,7 +90,7 @@ static void i915_gem_unmap_dma_buf(struct dma_buf_attachment *attachment, struct sg_table *sg, enum dma_data_direction dir) { - struct drm_i915_gem_object *obj = attachment->dmabuf->priv; + struct drm_i915_gem_object *obj = dma_buf_to_obj(attachment->dmabuf); mutex_lock(&obj->base.dev->struct_mutex); @@ -98,20 +103,9 @@ static void i915_gem_unmap_dma_buf(struct dma_buf_attachment *attachment, mutex_unlock(&obj->base.dev->struct_mutex); } -static void i915_gem_dmabuf_release(struct dma_buf *dma_buf) -{ - struct drm_i915_gem_object *obj = dma_buf->priv; - - if (obj->base.export_dma_buf == dma_buf) { - /* drop the reference on the export fd holds */ - obj->base.export_dma_buf = NULL; - drm_gem_object_unreference_unlocked(&obj->base); - } -} - static void *i915_gem_dmabuf_vmap(struct dma_buf *dma_buf) { - struct drm_i915_gem_object *obj = dma_buf->priv; + struct drm_i915_gem_object *obj = dma_buf_to_obj(dma_buf); struct drm_device *dev = obj->base.dev; struct sg_page_iter sg_iter; struct page **pages; @@ -159,7 +153,7 @@ error: static void i915_gem_dmabuf_vunmap(struct dma_buf *dma_buf, void *vaddr) { - struct drm_i915_gem_object *obj = dma_buf->priv; + struct drm_i915_gem_object *obj = dma_buf_to_obj(dma_buf); struct drm_device *dev = obj->base.dev; int ret; @@ -202,7 +196,7 @@ static int i915_gem_dmabuf_mmap(struct dma_buf *dma_buf, struct vm_area_struct * static int i915_gem_begin_cpu_access(struct dma_buf *dma_buf, size_t start, size_t length, enum dma_data_direction direction) { - struct drm_i915_gem_object *obj = dma_buf->priv; + struct drm_i915_gem_object *obj = dma_buf_to_obj(dma_buf); struct drm_device *dev = obj->base.dev; int ret; bool write = (direction == DMA_BIDIRECTIONAL || direction == DMA_TO_DEVICE); @@ -219,7 +213,7 @@ static int i915_gem_begin_cpu_access(struct dma_buf *dma_buf, size_t start, size static const struct dma_buf_ops i915_dmabuf_ops = { .map_dma_buf = i915_gem_map_dma_buf, .unmap_dma_buf = i915_gem_unmap_dma_buf, - .release = i915_gem_dmabuf_release, + .release = drm_gem_dmabuf_release, .kmap = i915_gem_dmabuf_kmap, .kmap_atomic = i915_gem_dmabuf_kmap_atomic, .kunmap = i915_gem_dmabuf_kunmap, @@ -233,9 +227,7 @@ static const struct dma_buf_ops i915_dmabuf_ops = { struct dma_buf *i915_gem_prime_export(struct drm_device *dev, struct drm_gem_object *gem_obj, int flags) { - struct drm_i915_gem_object *obj = to_intel_bo(gem_obj); - - return dma_buf_export(obj, &i915_dmabuf_ops, obj->base.size, flags); + return dma_buf_export(gem_obj, &i915_dmabuf_ops, gem_obj->size, flags); } static int i915_gem_object_get_pages_dmabuf(struct drm_i915_gem_object *obj) @@ -272,7 +264,7 @@ struct drm_gem_object *i915_gem_prime_import(struct drm_device *dev, /* is this one of own objects? */ if (dma_buf->ops == &i915_dmabuf_ops) { - obj = dma_buf->priv; + obj = dma_buf_to_obj(dma_buf); /* is it from our device? */ if (obj->base.dev == dev) { /* @@ -297,12 +289,7 @@ struct drm_gem_object *i915_gem_prime_import(struct drm_device *dev, goto fail_detach; } - ret = drm_gem_private_object_init(dev, &obj->base, dma_buf->size); - if (ret) { - i915_gem_object_free(obj); - goto fail_detach; - } - + drm_gem_private_object_init(dev, &obj->base, dma_buf->size); i915_gem_object_init(obj, &i915_gem_object_dmabuf_ops); obj->base.import_attach = attach; diff --git a/drivers/gpu/drm/i915/i915_gem_evict.c b/drivers/gpu/drm/i915/i915_gem_evict.c index c86d5d9356fd..91b700155850 100644 --- a/drivers/gpu/drm/i915/i915_gem_evict.c +++ b/drivers/gpu/drm/i915/i915_gem_evict.c @@ -32,23 +32,23 @@ #include "i915_trace.h" static bool -mark_free(struct drm_i915_gem_object *obj, struct list_head *unwind) +mark_free(struct i915_vma *vma, struct list_head *unwind) { - if (obj->pin_count) + if (vma->obj->pin_count) return false; - list_add(&obj->exec_list, unwind); - return drm_mm_scan_add_block(obj->gtt_space); + list_add(&vma->exec_list, unwind); + return drm_mm_scan_add_block(&vma->node); } int -i915_gem_evict_something(struct drm_device *dev, int min_size, - unsigned alignment, unsigned cache_level, +i915_gem_evict_something(struct drm_device *dev, struct i915_address_space *vm, + int min_size, unsigned alignment, unsigned cache_level, bool mappable, bool nonblocking) { drm_i915_private_t *dev_priv = dev->dev_private; struct list_head eviction_list, unwind_list; - struct drm_i915_gem_object *obj; + struct i915_vma *vma; int ret = 0; trace_i915_gem_evict(dev, min_size, alignment, mappable); @@ -77,17 +77,17 @@ i915_gem_evict_something(struct drm_device *dev, int min_size, */ INIT_LIST_HEAD(&unwind_list); - if (mappable) - drm_mm_init_scan_with_range(&dev_priv->mm.gtt_space, - min_size, alignment, cache_level, - 0, dev_priv->gtt.mappable_end); - else - drm_mm_init_scan(&dev_priv->mm.gtt_space, - min_size, alignment, cache_level); + if (mappable) { + BUG_ON(!i915_is_ggtt(vm)); + drm_mm_init_scan_with_range(&vm->mm, min_size, + alignment, cache_level, 0, + dev_priv->gtt.mappable_end); + } else + drm_mm_init_scan(&vm->mm, min_size, alignment, cache_level); /* First see if there is a large enough contiguous idle region... */ - list_for_each_entry(obj, &dev_priv->mm.inactive_list, mm_list) { - if (mark_free(obj, &unwind_list)) + list_for_each_entry(vma, &vm->inactive_list, mm_list) { + if (mark_free(vma, &unwind_list)) goto found; } @@ -95,22 +95,21 @@ i915_gem_evict_something(struct drm_device *dev, int min_size, goto none; /* Now merge in the soon-to-be-expired objects... */ - list_for_each_entry(obj, &dev_priv->mm.active_list, mm_list) { - if (mark_free(obj, &unwind_list)) + list_for_each_entry(vma, &vm->active_list, mm_list) { + if (mark_free(vma, &unwind_list)) goto found; } none: /* Nothing found, clean up and bail out! */ while (!list_empty(&unwind_list)) { - obj = list_first_entry(&unwind_list, - struct drm_i915_gem_object, + vma = list_first_entry(&unwind_list, + struct i915_vma, exec_list); - - ret = drm_mm_scan_remove_block(obj->gtt_space); + ret = drm_mm_scan_remove_block(&vma->node); BUG_ON(ret); - list_del_init(&obj->exec_list); + list_del_init(&vma->exec_list); } /* We expect the caller to unpin, evict all and try again, or give up. @@ -124,27 +123,30 @@ found: * temporary list. */ INIT_LIST_HEAD(&eviction_list); while (!list_empty(&unwind_list)) { - obj = list_first_entry(&unwind_list, - struct drm_i915_gem_object, + vma = list_first_entry(&unwind_list, + struct i915_vma, exec_list); - if (drm_mm_scan_remove_block(obj->gtt_space)) { - list_move(&obj->exec_list, &eviction_list); - drm_gem_object_reference(&obj->base); + if (drm_mm_scan_remove_block(&vma->node)) { + list_move(&vma->exec_list, &eviction_list); + drm_gem_object_reference(&vma->obj->base); continue; } - list_del_init(&obj->exec_list); + list_del_init(&vma->exec_list); } /* Unbinding will emit any required flushes */ while (!list_empty(&eviction_list)) { - obj = list_first_entry(&eviction_list, - struct drm_i915_gem_object, + struct drm_gem_object *obj; + vma = list_first_entry(&eviction_list, + struct i915_vma, exec_list); + + obj = &vma->obj->base; + list_del_init(&vma->exec_list); if (ret == 0) - ret = i915_gem_object_unbind(obj); + ret = i915_vma_unbind(vma); - list_del_init(&obj->exec_list); - drm_gem_object_unreference(&obj->base); + drm_gem_object_unreference(obj); } return ret; @@ -154,12 +156,18 @@ int i915_gem_evict_everything(struct drm_device *dev) { drm_i915_private_t *dev_priv = dev->dev_private; - struct drm_i915_gem_object *obj, *next; - bool lists_empty; + struct i915_address_space *vm; + struct i915_vma *vma, *next; + bool lists_empty = true; int ret; - lists_empty = (list_empty(&dev_priv->mm.inactive_list) && - list_empty(&dev_priv->mm.active_list)); + list_for_each_entry(vm, &dev_priv->vm_list, global_link) { + lists_empty = (list_empty(&vm->inactive_list) && + list_empty(&vm->active_list)); + if (!lists_empty) + lists_empty = false; + } + if (lists_empty) return -ENOSPC; @@ -176,10 +184,11 @@ i915_gem_evict_everything(struct drm_device *dev) i915_gem_retire_requests(dev); /* Having flushed everything, unbind() should never raise an error */ - list_for_each_entry_safe(obj, next, - &dev_priv->mm.inactive_list, mm_list) - if (obj->pin_count == 0) - WARN_ON(i915_gem_object_unbind(obj)); + list_for_each_entry(vm, &dev_priv->vm_list, global_link) { + list_for_each_entry_safe(vma, next, &vm->inactive_list, mm_list) + if (vma->obj->pin_count == 0) + WARN_ON(i915_vma_unbind(vma)); + } return 0; } diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c index 87a3227e5179..792c52a235ee 100644 --- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c +++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c @@ -172,9 +172,60 @@ static inline int use_cpu_reloc(struct drm_i915_gem_object *obj) } static int +relocate_entry_cpu(struct drm_i915_gem_object *obj, + struct drm_i915_gem_relocation_entry *reloc) +{ + uint32_t page_offset = offset_in_page(reloc->offset); + char *vaddr; + int ret = -EINVAL; + + ret = i915_gem_object_set_to_cpu_domain(obj, 1); + if (ret) + return ret; + + vaddr = kmap_atomic(i915_gem_object_get_page(obj, + reloc->offset >> PAGE_SHIFT)); + *(uint32_t *)(vaddr + page_offset) = reloc->delta; + kunmap_atomic(vaddr); + + return 0; +} + +static int +relocate_entry_gtt(struct drm_i915_gem_object *obj, + struct drm_i915_gem_relocation_entry *reloc) +{ + struct drm_device *dev = obj->base.dev; + struct drm_i915_private *dev_priv = dev->dev_private; + uint32_t __iomem *reloc_entry; + void __iomem *reloc_page; + int ret = -EINVAL; + + ret = i915_gem_object_set_to_gtt_domain(obj, true); + if (ret) + return ret; + + ret = i915_gem_object_put_fence(obj); + if (ret) + return ret; + + /* Map the page containing the relocation we're going to perform. */ + reloc->offset += i915_gem_obj_ggtt_offset(obj); + reloc_page = io_mapping_map_atomic_wc(dev_priv->gtt.mappable, + reloc->offset & PAGE_MASK); + reloc_entry = (uint32_t __iomem *) + (reloc_page + offset_in_page(reloc->offset)); + iowrite32(reloc->delta, reloc_entry); + io_mapping_unmap_atomic(reloc_page); + + return 0; +} + +static int i915_gem_execbuffer_relocate_entry(struct drm_i915_gem_object *obj, struct eb_objects *eb, - struct drm_i915_gem_relocation_entry *reloc) + struct drm_i915_gem_relocation_entry *reloc, + struct i915_address_space *vm) { struct drm_device *dev = obj->base.dev; struct drm_gem_object *target_obj; @@ -188,7 +239,7 @@ i915_gem_execbuffer_relocate_entry(struct drm_i915_gem_object *obj, return -ENOENT; target_i915_obj = to_intel_bo(target_obj); - target_offset = target_i915_obj->gtt_offset; + target_offset = i915_gem_obj_ggtt_offset(target_i915_obj); /* Sandybridge PPGTT errata: We need a global gtt mapping for MI and * pipe_control writes because the gpu doesn't properly redirect them @@ -254,40 +305,10 @@ i915_gem_execbuffer_relocate_entry(struct drm_i915_gem_object *obj, return -EFAULT; reloc->delta += target_offset; - if (use_cpu_reloc(obj)) { - uint32_t page_offset = reloc->offset & ~PAGE_MASK; - char *vaddr; - - ret = i915_gem_object_set_to_cpu_domain(obj, 1); - if (ret) - return ret; - - vaddr = kmap_atomic(i915_gem_object_get_page(obj, - reloc->offset >> PAGE_SHIFT)); - *(uint32_t *)(vaddr + page_offset) = reloc->delta; - kunmap_atomic(vaddr); - } else { - struct drm_i915_private *dev_priv = dev->dev_private; - uint32_t __iomem *reloc_entry; - void __iomem *reloc_page; - - ret = i915_gem_object_set_to_gtt_domain(obj, true); - if (ret) - return ret; - - ret = i915_gem_object_put_fence(obj); - if (ret) - return ret; - - /* Map the page containing the relocation we're going to perform. */ - reloc->offset += obj->gtt_offset; - reloc_page = io_mapping_map_atomic_wc(dev_priv->gtt.mappable, - reloc->offset & PAGE_MASK); - reloc_entry = (uint32_t __iomem *) - (reloc_page + (reloc->offset & ~PAGE_MASK)); - iowrite32(reloc->delta, reloc_entry); - io_mapping_unmap_atomic(reloc_page); - } + if (use_cpu_reloc(obj)) + ret = relocate_entry_cpu(obj, reloc); + else + ret = relocate_entry_gtt(obj, reloc); /* and update the user's relocation entry */ reloc->presumed_offset = target_offset; @@ -297,7 +318,8 @@ i915_gem_execbuffer_relocate_entry(struct drm_i915_gem_object *obj, static int i915_gem_execbuffer_relocate_object(struct drm_i915_gem_object *obj, - struct eb_objects *eb) + struct eb_objects *eb, + struct i915_address_space *vm) { #define N_RELOC(x) ((x) / sizeof(struct drm_i915_gem_relocation_entry)) struct drm_i915_gem_relocation_entry stack_reloc[N_RELOC(512)]; @@ -321,7 +343,8 @@ i915_gem_execbuffer_relocate_object(struct drm_i915_gem_object *obj, do { u64 offset = r->presumed_offset; - ret = i915_gem_execbuffer_relocate_entry(obj, eb, r); + ret = i915_gem_execbuffer_relocate_entry(obj, eb, r, + vm); if (ret) return ret; @@ -344,13 +367,15 @@ i915_gem_execbuffer_relocate_object(struct drm_i915_gem_object *obj, static int i915_gem_execbuffer_relocate_object_slow(struct drm_i915_gem_object *obj, struct eb_objects *eb, - struct drm_i915_gem_relocation_entry *relocs) + struct drm_i915_gem_relocation_entry *relocs, + struct i915_address_space *vm) { const struct drm_i915_gem_exec_object2 *entry = obj->exec_entry; int i, ret; for (i = 0; i < entry->relocation_count; i++) { - ret = i915_gem_execbuffer_relocate_entry(obj, eb, &relocs[i]); + ret = i915_gem_execbuffer_relocate_entry(obj, eb, &relocs[i], + vm); if (ret) return ret; } @@ -359,7 +384,8 @@ i915_gem_execbuffer_relocate_object_slow(struct drm_i915_gem_object *obj, } static int -i915_gem_execbuffer_relocate(struct eb_objects *eb) +i915_gem_execbuffer_relocate(struct eb_objects *eb, + struct i915_address_space *vm) { struct drm_i915_gem_object *obj; int ret = 0; @@ -373,7 +399,7 @@ i915_gem_execbuffer_relocate(struct eb_objects *eb) */ pagefault_disable(); list_for_each_entry(obj, &eb->objects, exec_list) { - ret = i915_gem_execbuffer_relocate_object(obj, eb); + ret = i915_gem_execbuffer_relocate_object(obj, eb, vm); if (ret) break; } @@ -395,6 +421,7 @@ need_reloc_mappable(struct drm_i915_gem_object *obj) static int i915_gem_execbuffer_reserve_object(struct drm_i915_gem_object *obj, struct intel_ring_buffer *ring, + struct i915_address_space *vm, bool *need_reloc) { struct drm_i915_private *dev_priv = obj->base.dev->dev_private; @@ -409,7 +436,8 @@ i915_gem_execbuffer_reserve_object(struct drm_i915_gem_object *obj, obj->tiling_mode != I915_TILING_NONE; need_mappable = need_fence || need_reloc_mappable(obj); - ret = i915_gem_object_pin(obj, entry->alignment, need_mappable, false); + ret = i915_gem_object_pin(obj, vm, entry->alignment, need_mappable, + false); if (ret) return ret; @@ -436,8 +464,8 @@ i915_gem_execbuffer_reserve_object(struct drm_i915_gem_object *obj, obj->has_aliasing_ppgtt_mapping = 1; } - if (entry->offset != obj->gtt_offset) { - entry->offset = obj->gtt_offset; + if (entry->offset != i915_gem_obj_offset(obj, vm)) { + entry->offset = i915_gem_obj_offset(obj, vm); *need_reloc = true; } @@ -458,7 +486,7 @@ i915_gem_execbuffer_unreserve_object(struct drm_i915_gem_object *obj) { struct drm_i915_gem_exec_object2 *entry; - if (!obj->gtt_space) + if (!i915_gem_obj_bound_any(obj)) return; entry = obj->exec_entry; @@ -475,6 +503,7 @@ i915_gem_execbuffer_unreserve_object(struct drm_i915_gem_object *obj) static int i915_gem_execbuffer_reserve(struct intel_ring_buffer *ring, struct list_head *objects, + struct i915_address_space *vm, bool *need_relocs) { struct drm_i915_gem_object *obj; @@ -529,31 +558,37 @@ i915_gem_execbuffer_reserve(struct intel_ring_buffer *ring, list_for_each_entry(obj, objects, exec_list) { struct drm_i915_gem_exec_object2 *entry = obj->exec_entry; bool need_fence, need_mappable; + u32 obj_offset; - if (!obj->gtt_space) + if (!i915_gem_obj_bound(obj, vm)) continue; + obj_offset = i915_gem_obj_offset(obj, vm); need_fence = has_fenced_gpu_access && entry->flags & EXEC_OBJECT_NEEDS_FENCE && obj->tiling_mode != I915_TILING_NONE; need_mappable = need_fence || need_reloc_mappable(obj); - if ((entry->alignment && obj->gtt_offset & (entry->alignment - 1)) || + WARN_ON((need_mappable || need_fence) && + !i915_is_ggtt(vm)); + + if ((entry->alignment && + obj_offset & (entry->alignment - 1)) || (need_mappable && !obj->map_and_fenceable)) - ret = i915_gem_object_unbind(obj); + ret = i915_vma_unbind(i915_gem_obj_to_vma(obj, vm)); else - ret = i915_gem_execbuffer_reserve_object(obj, ring, need_relocs); + ret = i915_gem_execbuffer_reserve_object(obj, ring, vm, need_relocs); if (ret) goto err; } /* Bind fresh objects */ list_for_each_entry(obj, objects, exec_list) { - if (obj->gtt_space) + if (i915_gem_obj_bound(obj, vm)) continue; - ret = i915_gem_execbuffer_reserve_object(obj, ring, need_relocs); + ret = i915_gem_execbuffer_reserve_object(obj, ring, vm, need_relocs); if (ret) goto err; } @@ -577,7 +612,8 @@ i915_gem_execbuffer_relocate_slow(struct drm_device *dev, struct drm_file *file, struct intel_ring_buffer *ring, struct eb_objects *eb, - struct drm_i915_gem_exec_object2 *exec) + struct drm_i915_gem_exec_object2 *exec, + struct i915_address_space *vm) { struct drm_i915_gem_relocation_entry *reloc; struct drm_i915_gem_object *obj; @@ -661,14 +697,15 @@ i915_gem_execbuffer_relocate_slow(struct drm_device *dev, goto err; need_relocs = (args->flags & I915_EXEC_NO_RELOC) == 0; - ret = i915_gem_execbuffer_reserve(ring, &eb->objects, &need_relocs); + ret = i915_gem_execbuffer_reserve(ring, &eb->objects, vm, &need_relocs); if (ret) goto err; list_for_each_entry(obj, &eb->objects, exec_list) { int offset = obj->exec_entry - exec; ret = i915_gem_execbuffer_relocate_object_slow(obj, eb, - reloc + reloc_offset[offset]); + reloc + reloc_offset[offset], + vm); if (ret) goto err; } @@ -691,6 +728,7 @@ i915_gem_execbuffer_move_to_gpu(struct intel_ring_buffer *ring, { struct drm_i915_gem_object *obj; uint32_t flush_domains = 0; + bool flush_chipset = false; int ret; list_for_each_entry(obj, objects, exec_list) { @@ -699,12 +737,12 @@ i915_gem_execbuffer_move_to_gpu(struct intel_ring_buffer *ring, return ret; if (obj->base.write_domain & I915_GEM_DOMAIN_CPU) - i915_gem_clflush_object(obj); + flush_chipset |= i915_gem_clflush_object(obj, false); flush_domains |= obj->base.write_domain; } - if (flush_domains & I915_GEM_DOMAIN_CPU) + if (flush_chipset) i915_gem_chipset_flush(ring->dev); if (flush_domains & I915_GEM_DOMAIN_GTT) @@ -758,8 +796,10 @@ validate_exec_list(struct drm_i915_gem_exec_object2 *exec, if (!access_ok(VERIFY_WRITE, ptr, length)) return -EFAULT; - if (fault_in_multipages_readable(ptr, length)) - return -EFAULT; + if (likely(!i915_prefault_disable)) { + if (fault_in_multipages_readable(ptr, length)) + return -EFAULT; + } } return 0; @@ -767,6 +807,7 @@ validate_exec_list(struct drm_i915_gem_exec_object2 *exec, static void i915_gem_execbuffer_move_to_active(struct list_head *objects, + struct i915_address_space *vm, struct intel_ring_buffer *ring) { struct drm_i915_gem_object *obj; @@ -781,6 +822,8 @@ i915_gem_execbuffer_move_to_active(struct list_head *objects, obj->base.read_domains = obj->base.pending_read_domains; obj->fenced_gpu_access = obj->pending_fenced_gpu_access; + /* FIXME: This lookup gets fixed later <-- danvet */ + list_move_tail(&i915_gem_obj_to_vma(obj, vm)->mm_list, &vm->active_list); i915_gem_object_move_to_active(obj, ring); if (obj->base.write_domain) { obj->dirty = 1; @@ -835,7 +878,8 @@ static int i915_gem_do_execbuffer(struct drm_device *dev, void *data, struct drm_file *file, struct drm_i915_gem_execbuffer2 *args, - struct drm_i915_gem_exec_object2 *exec) + struct drm_i915_gem_exec_object2 *exec, + struct i915_address_space *vm) { drm_i915_private_t *dev_priv = dev->dev_private; struct eb_objects *eb; @@ -872,7 +916,7 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data, break; case I915_EXEC_BSD: ring = &dev_priv->ring[VCS]; - if (ctx_id != 0) { + if (ctx_id != DEFAULT_CONTEXT_ID) { DRM_DEBUG("Ring %s doesn't support contexts\n", ring->name); return -EPERM; @@ -880,7 +924,7 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data, break; case I915_EXEC_BLT: ring = &dev_priv->ring[BCS]; - if (ctx_id != 0) { + if (ctx_id != DEFAULT_CONTEXT_ID) { DRM_DEBUG("Ring %s doesn't support contexts\n", ring->name); return -EPERM; @@ -888,7 +932,7 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data, break; case I915_EXEC_VEBOX: ring = &dev_priv->ring[VECS]; - if (ctx_id != 0) { + if (ctx_id != DEFAULT_CONTEXT_ID) { DRM_DEBUG("Ring %s doesn't support contexts\n", ring->name); return -EPERM; @@ -972,7 +1016,7 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data, if (ret) goto pre_mutex_err; - if (dev_priv->mm.suspended) { + if (dev_priv->ums.mm_suspended) { mutex_unlock(&dev->struct_mutex); ret = -EBUSY; goto pre_mutex_err; @@ -997,17 +1041,17 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data, /* Move the objects en-masse into the GTT, evicting if necessary. */ need_relocs = (args->flags & I915_EXEC_NO_RELOC) == 0; - ret = i915_gem_execbuffer_reserve(ring, &eb->objects, &need_relocs); + ret = i915_gem_execbuffer_reserve(ring, &eb->objects, vm, &need_relocs); if (ret) goto err; /* The objects are in their final locations, apply the relocations. */ if (need_relocs) - ret = i915_gem_execbuffer_relocate(eb); + ret = i915_gem_execbuffer_relocate(eb, vm); if (ret) { if (ret == -EFAULT) { ret = i915_gem_execbuffer_relocate_slow(dev, args, file, ring, - eb, exec); + eb, exec, vm); BUG_ON(!mutex_is_locked(&dev->struct_mutex)); } if (ret) @@ -1058,7 +1102,8 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data, goto err; } - exec_start = batch_obj->gtt_offset + args->batch_start_offset; + exec_start = i915_gem_obj_offset(batch_obj, vm) + + args->batch_start_offset; exec_len = args->batch_len; if (cliprects) { for (i = 0; i < args->num_cliprects; i++) { @@ -1083,7 +1128,7 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data, trace_i915_gem_ring_dispatch(ring, intel_ring_get_seqno(ring), flags); - i915_gem_execbuffer_move_to_active(&eb->objects, ring); + i915_gem_execbuffer_move_to_active(&eb->objects, vm, ring); i915_gem_execbuffer_retire_commands(dev, file, ring, batch_obj); err: @@ -1104,6 +1149,7 @@ int i915_gem_execbuffer(struct drm_device *dev, void *data, struct drm_file *file) { + struct drm_i915_private *dev_priv = dev->dev_private; struct drm_i915_gem_execbuffer *args = data; struct drm_i915_gem_execbuffer2 exec2; struct drm_i915_gem_exec_object *exec_list = NULL; @@ -1159,7 +1205,8 @@ i915_gem_execbuffer(struct drm_device *dev, void *data, exec2.flags = I915_EXEC_RENDER; i915_execbuffer2_set_context_id(exec2, 0); - ret = i915_gem_do_execbuffer(dev, data, file, &exec2, exec2_list); + ret = i915_gem_do_execbuffer(dev, data, file, &exec2, exec2_list, + &dev_priv->gtt.base); if (!ret) { /* Copy the new buffer offsets back to the user's exec list. */ for (i = 0; i < args->buffer_count; i++) @@ -1185,6 +1232,7 @@ int i915_gem_execbuffer2(struct drm_device *dev, void *data, struct drm_file *file) { + struct drm_i915_private *dev_priv = dev->dev_private; struct drm_i915_gem_execbuffer2 *args = data; struct drm_i915_gem_exec_object2 *exec2_list = NULL; int ret; @@ -1215,7 +1263,8 @@ i915_gem_execbuffer2(struct drm_device *dev, void *data, return -EFAULT; } - ret = i915_gem_do_execbuffer(dev, data, file, args, exec2_list); + ret = i915_gem_do_execbuffer(dev, data, file, args, exec2_list, + &dev_priv->gtt.base); if (!ret) { /* Copy the new buffer offsets back to the user's exec list. */ ret = copy_to_user(to_user_ptr(args->buffers_ptr), diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c index 5101ab6869b4..212f6d8c35ec 100644 --- a/drivers/gpu/drm/i915/i915_gem_gtt.c +++ b/drivers/gpu/drm/i915/i915_gem_gtt.c @@ -28,8 +28,12 @@ #include "i915_trace.h" #include "intel_drv.h" +#define GEN6_PPGTT_PD_ENTRIES 512 +#define I915_PPGTT_PT_ENTRIES (PAGE_SIZE / sizeof(gen6_gtt_pte_t)) + /* PPGTT stuff */ #define GEN6_GTT_ADDR_ENCODE(addr) ((addr) | (((addr) >> 28) & 0xff0)) +#define HSW_GTT_ADDR_ENCODE(addr) ((addr) | (((addr) >> 28) & 0x7f0)) #define GEN6_PDE_VALID (1 << 0) /* gen6+ has bit 11-4 for physical addr bit 39-32 */ @@ -39,19 +43,50 @@ #define GEN6_PTE_UNCACHED (1 << 1) #define HSW_PTE_UNCACHED (0) #define GEN6_PTE_CACHE_LLC (2 << 1) -#define GEN6_PTE_CACHE_LLC_MLC (3 << 1) +#define GEN7_PTE_CACHE_L3_LLC (3 << 1) #define GEN6_PTE_ADDR_ENCODE(addr) GEN6_GTT_ADDR_ENCODE(addr) +#define HSW_PTE_ADDR_ENCODE(addr) HSW_GTT_ADDR_ENCODE(addr) -static gen6_gtt_pte_t gen6_pte_encode(struct drm_device *dev, - dma_addr_t addr, - enum i915_cache_level level) +/* Cacheability Control is a 4-bit value. The low three bits are stored in * + * bits 3:1 of the PTE, while the fourth bit is stored in bit 11 of the PTE. + */ +#define HSW_CACHEABILITY_CONTROL(bits) ((((bits) & 0x7) << 1) | \ + (((bits) & 0x8) << (11 - 3))) +#define HSW_WB_LLC_AGE3 HSW_CACHEABILITY_CONTROL(0x2) +#define HSW_WB_LLC_AGE0 HSW_CACHEABILITY_CONTROL(0x3) +#define HSW_WB_ELLC_LLC_AGE0 HSW_CACHEABILITY_CONTROL(0xb) +#define HSW_WT_ELLC_LLC_AGE0 HSW_CACHEABILITY_CONTROL(0x6) + +static gen6_gtt_pte_t snb_pte_encode(dma_addr_t addr, + enum i915_cache_level level) { gen6_gtt_pte_t pte = GEN6_PTE_VALID; pte |= GEN6_PTE_ADDR_ENCODE(addr); switch (level) { - case I915_CACHE_LLC_MLC: - pte |= GEN6_PTE_CACHE_LLC_MLC; + case I915_CACHE_L3_LLC: + case I915_CACHE_LLC: + pte |= GEN6_PTE_CACHE_LLC; + break; + case I915_CACHE_NONE: + pte |= GEN6_PTE_UNCACHED; + break; + default: + WARN_ON(1); + } + + return pte; +} + +static gen6_gtt_pte_t ivb_pte_encode(dma_addr_t addr, + enum i915_cache_level level) +{ + gen6_gtt_pte_t pte = GEN6_PTE_VALID; + pte |= GEN6_PTE_ADDR_ENCODE(addr); + + switch (level) { + case I915_CACHE_L3_LLC: + pte |= GEN7_PTE_CACHE_L3_LLC; break; case I915_CACHE_LLC: pte |= GEN6_PTE_CACHE_LLC; @@ -60,7 +95,7 @@ static gen6_gtt_pte_t gen6_pte_encode(struct drm_device *dev, pte |= GEN6_PTE_UNCACHED; break; default: - BUG(); + WARN_ON(1); } return pte; @@ -69,8 +104,7 @@ static gen6_gtt_pte_t gen6_pte_encode(struct drm_device *dev, #define BYT_PTE_WRITEABLE (1 << 1) #define BYT_PTE_SNOOPED_BY_CPU_CACHES (1 << 2) -static gen6_gtt_pte_t byt_pte_encode(struct drm_device *dev, - dma_addr_t addr, +static gen6_gtt_pte_t byt_pte_encode(dma_addr_t addr, enum i915_cache_level level) { gen6_gtt_pte_t pte = GEN6_PTE_VALID; @@ -87,22 +121,41 @@ static gen6_gtt_pte_t byt_pte_encode(struct drm_device *dev, return pte; } -static gen6_gtt_pte_t hsw_pte_encode(struct drm_device *dev, - dma_addr_t addr, +static gen6_gtt_pte_t hsw_pte_encode(dma_addr_t addr, enum i915_cache_level level) { gen6_gtt_pte_t pte = GEN6_PTE_VALID; - pte |= GEN6_PTE_ADDR_ENCODE(addr); + pte |= HSW_PTE_ADDR_ENCODE(addr); if (level != I915_CACHE_NONE) - pte |= GEN6_PTE_CACHE_LLC; + pte |= HSW_WB_LLC_AGE3; + + return pte; +} + +static gen6_gtt_pte_t iris_pte_encode(dma_addr_t addr, + enum i915_cache_level level) +{ + gen6_gtt_pte_t pte = GEN6_PTE_VALID; + pte |= HSW_PTE_ADDR_ENCODE(addr); + + switch (level) { + case I915_CACHE_NONE: + break; + case I915_CACHE_WT: + pte |= HSW_WT_ELLC_LLC_AGE0; + break; + default: + pte |= HSW_WB_ELLC_LLC_AGE0; + break; + } return pte; } static void gen6_write_pdes(struct i915_hw_ppgtt *ppgtt) { - struct drm_i915_private *dev_priv = ppgtt->dev->dev_private; + struct drm_i915_private *dev_priv = ppgtt->base.dev->dev_private; gen6_gtt_pte_t __iomem *pd_addr; uint32_t pd_entry; int i; @@ -181,18 +234,18 @@ static int gen6_ppgtt_enable(struct drm_device *dev) } /* PPGTT support for Sandybdrige/Gen6 and later */ -static void gen6_ppgtt_clear_range(struct i915_hw_ppgtt *ppgtt, +static void gen6_ppgtt_clear_range(struct i915_address_space *vm, unsigned first_entry, unsigned num_entries) { + struct i915_hw_ppgtt *ppgtt = + container_of(vm, struct i915_hw_ppgtt, base); gen6_gtt_pte_t *pt_vaddr, scratch_pte; unsigned act_pt = first_entry / I915_PPGTT_PT_ENTRIES; unsigned first_pte = first_entry % I915_PPGTT_PT_ENTRIES; unsigned last_pte, i; - scratch_pte = ppgtt->pte_encode(ppgtt->dev, - ppgtt->scratch_page_dma_addr, - I915_CACHE_LLC); + scratch_pte = vm->pte_encode(vm->scratch.addr, I915_CACHE_LLC); while (num_entries) { last_pte = first_pte + num_entries; @@ -212,11 +265,13 @@ static void gen6_ppgtt_clear_range(struct i915_hw_ppgtt *ppgtt, } } -static void gen6_ppgtt_insert_entries(struct i915_hw_ppgtt *ppgtt, +static void gen6_ppgtt_insert_entries(struct i915_address_space *vm, struct sg_table *pages, unsigned first_entry, enum i915_cache_level cache_level) { + struct i915_hw_ppgtt *ppgtt = + container_of(vm, struct i915_hw_ppgtt, base); gen6_gtt_pte_t *pt_vaddr; unsigned act_pt = first_entry / I915_PPGTT_PT_ENTRIES; unsigned act_pte = first_entry % I915_PPGTT_PT_ENTRIES; @@ -227,8 +282,7 @@ static void gen6_ppgtt_insert_entries(struct i915_hw_ppgtt *ppgtt, dma_addr_t page_addr; page_addr = sg_page_iter_dma_address(&sg_iter); - pt_vaddr[act_pte] = ppgtt->pte_encode(ppgtt->dev, page_addr, - cache_level); + pt_vaddr[act_pte] = vm->pte_encode(page_addr, cache_level); if (++act_pte == I915_PPGTT_PT_ENTRIES) { kunmap_atomic(pt_vaddr); act_pt++; @@ -240,13 +294,17 @@ static void gen6_ppgtt_insert_entries(struct i915_hw_ppgtt *ppgtt, kunmap_atomic(pt_vaddr); } -static void gen6_ppgtt_cleanup(struct i915_hw_ppgtt *ppgtt) +static void gen6_ppgtt_cleanup(struct i915_address_space *vm) { + struct i915_hw_ppgtt *ppgtt = + container_of(vm, struct i915_hw_ppgtt, base); int i; + drm_mm_takedown(&ppgtt->base.mm); + if (ppgtt->pt_dma_addr) { for (i = 0; i < ppgtt->num_pd_entries; i++) - pci_unmap_page(ppgtt->dev->pdev, + pci_unmap_page(ppgtt->base.dev->pdev, ppgtt->pt_dma_addr[i], 4096, PCI_DMA_BIDIRECTIONAL); } @@ -260,7 +318,7 @@ static void gen6_ppgtt_cleanup(struct i915_hw_ppgtt *ppgtt) static int gen6_ppgtt_init(struct i915_hw_ppgtt *ppgtt) { - struct drm_device *dev = ppgtt->dev; + struct drm_device *dev = ppgtt->base.dev; struct drm_i915_private *dev_priv = dev->dev_private; unsigned first_pd_entry_in_global_pt; int i; @@ -271,18 +329,13 @@ static int gen6_ppgtt_init(struct i915_hw_ppgtt *ppgtt) * now. */ first_pd_entry_in_global_pt = gtt_total_entries(dev_priv->gtt); - if (IS_HASWELL(dev)) { - ppgtt->pte_encode = hsw_pte_encode; - } else if (IS_VALLEYVIEW(dev)) { - ppgtt->pte_encode = byt_pte_encode; - } else { - ppgtt->pte_encode = gen6_pte_encode; - } - ppgtt->num_pd_entries = I915_PPGTT_PD_ENTRIES; + ppgtt->base.pte_encode = dev_priv->gtt.base.pte_encode; + ppgtt->num_pd_entries = GEN6_PPGTT_PD_ENTRIES; ppgtt->enable = gen6_ppgtt_enable; - ppgtt->clear_range = gen6_ppgtt_clear_range; - ppgtt->insert_entries = gen6_ppgtt_insert_entries; - ppgtt->cleanup = gen6_ppgtt_cleanup; + ppgtt->base.clear_range = gen6_ppgtt_clear_range; + ppgtt->base.insert_entries = gen6_ppgtt_insert_entries; + ppgtt->base.cleanup = gen6_ppgtt_cleanup; + ppgtt->base.scratch = dev_priv->gtt.base.scratch; ppgtt->pt_pages = kzalloc(sizeof(struct page *)*ppgtt->num_pd_entries, GFP_KERNEL); if (!ppgtt->pt_pages) @@ -313,8 +366,8 @@ static int gen6_ppgtt_init(struct i915_hw_ppgtt *ppgtt) ppgtt->pt_dma_addr[i] = pt_addr; } - ppgtt->clear_range(ppgtt, 0, - ppgtt->num_pd_entries*I915_PPGTT_PT_ENTRIES); + ppgtt->base.clear_range(&ppgtt->base, 0, + ppgtt->num_pd_entries * I915_PPGTT_PT_ENTRIES); ppgtt->pd_offset = first_pd_entry_in_global_pt * sizeof(gen6_gtt_pte_t); @@ -347,8 +400,7 @@ static int i915_gem_init_aliasing_ppgtt(struct drm_device *dev) if (!ppgtt) return -ENOMEM; - ppgtt->dev = dev; - ppgtt->scratch_page_dma_addr = dev_priv->gtt.scratch_page_dma; + ppgtt->base.dev = dev; if (INTEL_INFO(dev)->gen < 8) ret = gen6_ppgtt_init(ppgtt); @@ -357,8 +409,11 @@ static int i915_gem_init_aliasing_ppgtt(struct drm_device *dev) if (ret) kfree(ppgtt); - else + else { dev_priv->mm.aliasing_ppgtt = ppgtt; + drm_mm_init(&ppgtt->base.mm, ppgtt->base.start, + ppgtt->base.total); + } return ret; } @@ -371,7 +426,7 @@ void i915_gem_cleanup_aliasing_ppgtt(struct drm_device *dev) if (!ppgtt) return; - ppgtt->cleanup(ppgtt); + ppgtt->base.cleanup(&ppgtt->base); dev_priv->mm.aliasing_ppgtt = NULL; } @@ -379,17 +434,17 @@ void i915_ppgtt_bind_object(struct i915_hw_ppgtt *ppgtt, struct drm_i915_gem_object *obj, enum i915_cache_level cache_level) { - ppgtt->insert_entries(ppgtt, obj->pages, - obj->gtt_space->start >> PAGE_SHIFT, - cache_level); + ppgtt->base.insert_entries(&ppgtt->base, obj->pages, + i915_gem_obj_ggtt_offset(obj) >> PAGE_SHIFT, + cache_level); } void i915_ppgtt_unbind_object(struct i915_hw_ppgtt *ppgtt, struct drm_i915_gem_object *obj) { - ppgtt->clear_range(ppgtt, - obj->gtt_space->start >> PAGE_SHIFT, - obj->base.size >> PAGE_SHIFT); + ppgtt->base.clear_range(&ppgtt->base, + i915_gem_obj_ggtt_offset(obj) >> PAGE_SHIFT, + obj->base.size >> PAGE_SHIFT); } extern int intel_iommu_gfx_mapped; @@ -436,11 +491,12 @@ void i915_gem_restore_gtt_mappings(struct drm_device *dev) struct drm_i915_gem_object *obj; /* First fill our portion of the GTT with scratch pages */ - dev_priv->gtt.gtt_clear_range(dev, dev_priv->gtt.start / PAGE_SIZE, - dev_priv->gtt.total / PAGE_SIZE); + dev_priv->gtt.base.clear_range(&dev_priv->gtt.base, + dev_priv->gtt.base.start / PAGE_SIZE, + dev_priv->gtt.base.total / PAGE_SIZE); list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) { - i915_gem_clflush_object(obj); + i915_gem_clflush_object(obj, obj->pin_display); i915_gem_gtt_bind_object(obj, obj->cache_level); } @@ -466,12 +522,12 @@ int i915_gem_gtt_prepare_object(struct drm_i915_gem_object *obj) * within the global GTT as well as accessible by the GPU through the GMADR * mapped BAR (dev_priv->mm.gtt->gtt). */ -static void gen6_ggtt_insert_entries(struct drm_device *dev, +static void gen6_ggtt_insert_entries(struct i915_address_space *vm, struct sg_table *st, unsigned int first_entry, enum i915_cache_level level) { - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = vm->dev->dev_private; gen6_gtt_pte_t __iomem *gtt_entries = (gen6_gtt_pte_t __iomem *)dev_priv->gtt.gsm + first_entry; int i = 0; @@ -480,8 +536,7 @@ static void gen6_ggtt_insert_entries(struct drm_device *dev, for_each_sg_page(st->sgl, &sg_iter, st->nents, 0) { addr = sg_page_iter_dma_address(&sg_iter); - iowrite32(dev_priv->gtt.pte_encode(dev, addr, level), - >t_entries[i]); + iowrite32(vm->pte_encode(addr, level), >t_entries[i]); i++; } @@ -492,8 +547,8 @@ static void gen6_ggtt_insert_entries(struct drm_device *dev, * hardware should work, we must keep this posting read for paranoia. */ if (i != 0) - WARN_ON(readl(>t_entries[i-1]) - != dev_priv->gtt.pte_encode(dev, addr, level)); + WARN_ON(readl(>t_entries[i-1]) != + vm->pte_encode(addr, level)); /* This next bit makes the above posting read even more important. We * want to flush the TLBs only after we're certain all the PTE updates @@ -503,11 +558,11 @@ static void gen6_ggtt_insert_entries(struct drm_device *dev, POSTING_READ(GFX_FLSH_CNTL_GEN6); } -static void gen6_ggtt_clear_range(struct drm_device *dev, +static void gen6_ggtt_clear_range(struct i915_address_space *vm, unsigned int first_entry, unsigned int num_entries) { - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = vm->dev->dev_private; gen6_gtt_pte_t scratch_pte, __iomem *gtt_base = (gen6_gtt_pte_t __iomem *) dev_priv->gtt.gsm + first_entry; const int max_entries = gtt_total_entries(dev_priv->gtt) - first_entry; @@ -518,16 +573,14 @@ static void gen6_ggtt_clear_range(struct drm_device *dev, first_entry, num_entries, max_entries)) num_entries = max_entries; - scratch_pte = dev_priv->gtt.pte_encode(dev, - dev_priv->gtt.scratch_page_dma, - I915_CACHE_LLC); + scratch_pte = vm->pte_encode(vm->scratch.addr, I915_CACHE_LLC); for (i = 0; i < num_entries; i++) iowrite32(scratch_pte, >t_base[i]); readl(gtt_base); } -static void i915_ggtt_insert_entries(struct drm_device *dev, +static void i915_ggtt_insert_entries(struct i915_address_space *vm, struct sg_table *st, unsigned int pg_start, enum i915_cache_level cache_level) @@ -539,7 +592,7 @@ static void i915_ggtt_insert_entries(struct drm_device *dev, } -static void i915_ggtt_clear_range(struct drm_device *dev, +static void i915_ggtt_clear_range(struct i915_address_space *vm, unsigned int first_entry, unsigned int num_entries) { @@ -552,10 +605,11 @@ void i915_gem_gtt_bind_object(struct drm_i915_gem_object *obj, { struct drm_device *dev = obj->base.dev; struct drm_i915_private *dev_priv = dev->dev_private; + const unsigned long entry = i915_gem_obj_ggtt_offset(obj) >> PAGE_SHIFT; - dev_priv->gtt.gtt_insert_entries(dev, obj->pages, - obj->gtt_space->start >> PAGE_SHIFT, - cache_level); + dev_priv->gtt.base.insert_entries(&dev_priv->gtt.base, obj->pages, + entry, + cache_level); obj->has_global_gtt_mapping = 1; } @@ -564,10 +618,11 @@ void i915_gem_gtt_unbind_object(struct drm_i915_gem_object *obj) { struct drm_device *dev = obj->base.dev; struct drm_i915_private *dev_priv = dev->dev_private; + const unsigned long entry = i915_gem_obj_ggtt_offset(obj) >> PAGE_SHIFT; - dev_priv->gtt.gtt_clear_range(obj->base.dev, - obj->gtt_space->start >> PAGE_SHIFT, - obj->base.size >> PAGE_SHIFT); + dev_priv->gtt.base.clear_range(&dev_priv->gtt.base, + entry, + obj->base.size >> PAGE_SHIFT); obj->has_global_gtt_mapping = 0; } @@ -618,7 +673,8 @@ void i915_gem_setup_global_gtt(struct drm_device *dev, * aperture. One page should be enough to keep any prefetching inside * of the aperture. */ - drm_i915_private_t *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = dev->dev_private; + struct i915_address_space *ggtt_vm = &dev_priv->gtt.base; struct drm_mm_node *entry; struct drm_i915_gem_object *obj; unsigned long hole_start, hole_end; @@ -626,37 +682,38 @@ void i915_gem_setup_global_gtt(struct drm_device *dev, BUG_ON(mappable_end > end); /* Subtract the guard page ... */ - drm_mm_init(&dev_priv->mm.gtt_space, start, end - start - PAGE_SIZE); + drm_mm_init(&ggtt_vm->mm, start, end - start - PAGE_SIZE); if (!HAS_LLC(dev)) - dev_priv->mm.gtt_space.color_adjust = i915_gtt_color_adjust; + dev_priv->gtt.base.mm.color_adjust = i915_gtt_color_adjust; /* Mark any preallocated objects as occupied */ list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) { - DRM_DEBUG_KMS("reserving preallocated space: %x + %zx\n", - obj->gtt_offset, obj->base.size); - - BUG_ON(obj->gtt_space != I915_GTT_RESERVED); - obj->gtt_space = drm_mm_create_block(&dev_priv->mm.gtt_space, - obj->gtt_offset, - obj->base.size, - false); + struct i915_vma *vma = i915_gem_obj_to_vma(obj, ggtt_vm); + int ret; + DRM_DEBUG_KMS("reserving preallocated space: %lx + %zx\n", + i915_gem_obj_ggtt_offset(obj), obj->base.size); + + WARN_ON(i915_gem_obj_ggtt_bound(obj)); + ret = drm_mm_reserve_node(&ggtt_vm->mm, &vma->node); + if (ret) + DRM_DEBUG_KMS("Reservation failed\n"); obj->has_global_gtt_mapping = 1; + list_add(&vma->vma_link, &obj->vma_list); } - dev_priv->gtt.start = start; - dev_priv->gtt.total = end - start; + dev_priv->gtt.base.start = start; + dev_priv->gtt.base.total = end - start; /* Clear any non-preallocated blocks */ - drm_mm_for_each_hole(entry, &dev_priv->mm.gtt_space, - hole_start, hole_end) { + drm_mm_for_each_hole(entry, &ggtt_vm->mm, hole_start, hole_end) { + const unsigned long count = (hole_end - hole_start) / PAGE_SIZE; DRM_DEBUG_KMS("clearing unused GTT space: [%lx, %lx]\n", hole_start, hole_end); - dev_priv->gtt.gtt_clear_range(dev, hole_start / PAGE_SIZE, - (hole_end-hole_start) / PAGE_SIZE); + ggtt_vm->clear_range(ggtt_vm, hole_start / PAGE_SIZE, count); } /* And finally clear the reserved guard page */ - dev_priv->gtt.gtt_clear_range(dev, end / PAGE_SIZE - 1, 1); + ggtt_vm->clear_range(ggtt_vm, end / PAGE_SIZE - 1, 1); } static bool @@ -679,7 +736,7 @@ void i915_gem_init_global_gtt(struct drm_device *dev) struct drm_i915_private *dev_priv = dev->dev_private; unsigned long gtt_size, mappable_size; - gtt_size = dev_priv->gtt.total; + gtt_size = dev_priv->gtt.base.total; mappable_size = dev_priv->gtt.mappable_end; if (intel_enable_ppgtt(dev) && HAS_ALIASING_PPGTT(dev)) { @@ -688,7 +745,7 @@ void i915_gem_init_global_gtt(struct drm_device *dev) if (INTEL_INFO(dev)->gen <= 7) { /* PPGTT pdes are stolen from global gtt ptes, so shrink the * aperture accordingly when using aliasing ppgtt. */ - gtt_size -= I915_PPGTT_PD_ENTRIES*PAGE_SIZE; + gtt_size -= GEN6_PPGTT_PD_ENTRIES * PAGE_SIZE; } i915_gem_setup_global_gtt(dev, 0, mappable_size, gtt_size); @@ -698,8 +755,8 @@ void i915_gem_init_global_gtt(struct drm_device *dev) return; DRM_ERROR("Aliased PPGTT setup failed %d\n", ret); - drm_mm_takedown(&dev_priv->mm.gtt_space); - gtt_size += I915_PPGTT_PD_ENTRIES*PAGE_SIZE; + drm_mm_takedown(&dev_priv->gtt.base.mm); + gtt_size += GEN6_PPGTT_PD_ENTRIES * PAGE_SIZE; } i915_gem_setup_global_gtt(dev, 0, mappable_size, gtt_size); } @@ -724,8 +781,8 @@ static int setup_scratch_page(struct drm_device *dev) #else dma_addr = page_to_phys(page); #endif - dev_priv->gtt.scratch_page = page; - dev_priv->gtt.scratch_page_dma = dma_addr; + dev_priv->gtt.base.scratch.page = page; + dev_priv->gtt.base.scratch.addr = dma_addr; return 0; } @@ -733,11 +790,13 @@ static int setup_scratch_page(struct drm_device *dev) static void teardown_scratch_page(struct drm_device *dev) { struct drm_i915_private *dev_priv = dev->dev_private; - set_pages_wb(dev_priv->gtt.scratch_page, 1); - pci_unmap_page(dev->pdev, dev_priv->gtt.scratch_page_dma, + struct page *page = dev_priv->gtt.base.scratch.page; + + set_pages_wb(page, 1); + pci_unmap_page(dev->pdev, dev_priv->gtt.base.scratch.addr, PAGE_SIZE, PCI_DMA_BIDIRECTIONAL); - put_page(dev_priv->gtt.scratch_page); - __free_page(dev_priv->gtt.scratch_page); + put_page(page); + __free_page(page); } static inline unsigned int gen6_get_total_gtt_size(u16 snb_gmch_ctl) @@ -800,17 +859,18 @@ static int gen6_gmch_probe(struct drm_device *dev, if (ret) DRM_ERROR("Scratch setup failed\n"); - dev_priv->gtt.gtt_clear_range = gen6_ggtt_clear_range; - dev_priv->gtt.gtt_insert_entries = gen6_ggtt_insert_entries; + dev_priv->gtt.base.clear_range = gen6_ggtt_clear_range; + dev_priv->gtt.base.insert_entries = gen6_ggtt_insert_entries; return ret; } -static void gen6_gmch_remove(struct drm_device *dev) +static void gen6_gmch_remove(struct i915_address_space *vm) { - struct drm_i915_private *dev_priv = dev->dev_private; - iounmap(dev_priv->gtt.gsm); - teardown_scratch_page(dev_priv->dev); + + struct i915_gtt *gtt = container_of(vm, struct i915_gtt, base); + iounmap(gtt->gsm); + teardown_scratch_page(vm->dev); } static int i915_gmch_probe(struct drm_device *dev, @@ -831,13 +891,13 @@ static int i915_gmch_probe(struct drm_device *dev, intel_gtt_get(gtt_total, stolen, mappable_base, mappable_end); dev_priv->gtt.do_idle_maps = needs_idle_maps(dev_priv->dev); - dev_priv->gtt.gtt_clear_range = i915_ggtt_clear_range; - dev_priv->gtt.gtt_insert_entries = i915_ggtt_insert_entries; + dev_priv->gtt.base.clear_range = i915_ggtt_clear_range; + dev_priv->gtt.base.insert_entries = i915_ggtt_insert_entries; return 0; } -static void i915_gmch_remove(struct drm_device *dev) +static void i915_gmch_remove(struct i915_address_space *vm) { intel_gmch_remove(); } @@ -849,34 +909,35 @@ int i915_gem_gtt_init(struct drm_device *dev) int ret; if (INTEL_INFO(dev)->gen <= 5) { - dev_priv->gtt.gtt_probe = i915_gmch_probe; - dev_priv->gtt.gtt_remove = i915_gmch_remove; + gtt->gtt_probe = i915_gmch_probe; + gtt->base.cleanup = i915_gmch_remove; } else { - dev_priv->gtt.gtt_probe = gen6_gmch_probe; - dev_priv->gtt.gtt_remove = gen6_gmch_remove; - if (IS_HASWELL(dev)) { - dev_priv->gtt.pte_encode = hsw_pte_encode; - } else if (IS_VALLEYVIEW(dev)) { - dev_priv->gtt.pte_encode = byt_pte_encode; - } else { - dev_priv->gtt.pte_encode = gen6_pte_encode; - } + gtt->gtt_probe = gen6_gmch_probe; + gtt->base.cleanup = gen6_gmch_remove; + if (IS_HASWELL(dev) && dev_priv->ellc_size) + gtt->base.pte_encode = iris_pte_encode; + else if (IS_HASWELL(dev)) + gtt->base.pte_encode = hsw_pte_encode; + else if (IS_VALLEYVIEW(dev)) + gtt->base.pte_encode = byt_pte_encode; + else if (INTEL_INFO(dev)->gen >= 7) + gtt->base.pte_encode = ivb_pte_encode; + else + gtt->base.pte_encode = snb_pte_encode; } - ret = dev_priv->gtt.gtt_probe(dev, &dev_priv->gtt.total, - &dev_priv->gtt.stolen_size, - >t->mappable_base, - >t->mappable_end); + ret = gtt->gtt_probe(dev, >t->base.total, >t->stolen_size, + >t->mappable_base, >t->mappable_end); if (ret) return ret; + gtt->base.dev = dev; + /* GMADR is the PCI mmio aperture into the global GTT. */ DRM_INFO("Memory usable by graphics device = %zdM\n", - dev_priv->gtt.total >> 20); - DRM_DEBUG_DRIVER("GMADR size = %ldM\n", - dev_priv->gtt.mappable_end >> 20); - DRM_DEBUG_DRIVER("GTT stolen size = %zdM\n", - dev_priv->gtt.stolen_size >> 20); + gtt->base.total >> 20); + DRM_DEBUG_DRIVER("GMADR size = %ldM\n", gtt->mappable_end >> 20); + DRM_DEBUG_DRIVER("GTT stolen size = %zdM\n", gtt->stolen_size >> 20); return 0; } diff --git a/drivers/gpu/drm/i915/i915_gem_stolen.c b/drivers/gpu/drm/i915/i915_gem_stolen.c index 982d4732cecf..9969d10b80f5 100644 --- a/drivers/gpu/drm/i915/i915_gem_stolen.c +++ b/drivers/gpu/drm/i915/i915_gem_stolen.c @@ -45,49 +45,48 @@ static unsigned long i915_stolen_to_physical(struct drm_device *dev) { struct drm_i915_private *dev_priv = dev->dev_private; - struct pci_dev *pdev = dev_priv->bridge_dev; + struct resource *r; u32 base; - /* On the machines I have tested the Graphics Base of Stolen Memory - * is unreliable, so on those compute the base by subtracting the - * stolen memory from the Top of Low Usable DRAM which is where the - * BIOS places the graphics stolen memory. + /* Almost universally we can find the Graphics Base of Stolen Memory + * at offset 0x5c in the igfx configuration space. On a few (desktop) + * machines this is also mirrored in the bridge device at different + * locations, or in the MCHBAR. On gen2, the layout is again slightly + * different with the Graphics Segment immediately following Top of + * Memory (or Top of Usable DRAM). Note it appears that TOUD is only + * reported by 865g, so we just use the top of memory as determined + * by the e820 probe. * - * On gen2, the layout is slightly different with the Graphics Segment - * immediately following Top of Memory (or Top of Usable DRAM). Note - * it appears that TOUD is only reported by 865g, so we just use the - * top of memory as determined by the e820 probe. - * - * XXX gen2 requires an unavailable symbol and 945gm fails with - * its value of TOLUD. + * XXX However gen2 requires an unavailable symbol. */ base = 0; - if (IS_VALLEYVIEW(dev)) { + if (INTEL_INFO(dev)->gen >= 3) { + /* Read Graphics Base of Stolen Memory directly */ pci_read_config_dword(dev->pdev, 0x5c, &base); base &= ~((1<<20) - 1); - } else if (INTEL_INFO(dev)->gen >= 6) { - /* Read Base Data of Stolen Memory Register (BDSM) directly. - * Note that there is also a MCHBAR miror at 0x1080c0 or - * we could use device 2:0x5c instead. - */ - pci_read_config_dword(pdev, 0xB0, &base); - base &= ~4095; /* lower bits used for locking register */ - } else if (INTEL_INFO(dev)->gen > 3 || IS_G33(dev)) { - /* Read Graphics Base of Stolen Memory directly */ - pci_read_config_dword(pdev, 0xA4, &base); + } else { /* GEN2 */ #if 0 - } else if (IS_GEN3(dev)) { - u8 val; - /* Stolen is immediately below Top of Low Usable DRAM */ - pci_read_config_byte(pdev, 0x9c, &val); - base = val >> 3 << 27; - base -= dev_priv->mm.gtt->stolen_size; - } else { /* Stolen is immediately above Top of Memory */ base = max_low_pfn_mapped << PAGE_SHIFT; #endif } + if (base == 0) + return 0; + + /* Verify that nothing else uses this physical address. Stolen + * memory should be reserved by the BIOS and hidden from the + * kernel. So if the region is already marked as busy, something + * is seriously wrong. + */ + r = devm_request_mem_region(dev->dev, base, dev_priv->gtt.stolen_size, + "Graphics Stolen Memory"); + if (r == NULL) { + DRM_ERROR("conflict detected with stolen region: [0x%08x - 0x%08x]\n", + base, base + (uint32_t)dev_priv->gtt.stolen_size); + base = 0; + } + return base; } @@ -95,32 +94,37 @@ static int i915_setup_compression(struct drm_device *dev, int size) { struct drm_i915_private *dev_priv = dev->dev_private; struct drm_mm_node *compressed_fb, *uninitialized_var(compressed_llb); + int ret; - /* Try to over-allocate to reduce reallocations and fragmentation */ - compressed_fb = drm_mm_search_free(&dev_priv->mm.stolen, - size <<= 1, 4096, 0); + compressed_fb = kzalloc(sizeof(*compressed_fb), GFP_KERNEL); if (!compressed_fb) - compressed_fb = drm_mm_search_free(&dev_priv->mm.stolen, - size >>= 1, 4096, 0); - if (compressed_fb) - compressed_fb = drm_mm_get_block(compressed_fb, size, 4096); - if (!compressed_fb) - goto err; + goto err_llb; + + /* Try to over-allocate to reduce reallocations and fragmentation */ + ret = drm_mm_insert_node(&dev_priv->mm.stolen, compressed_fb, + size <<= 1, 4096, DRM_MM_SEARCH_DEFAULT); + if (ret) + ret = drm_mm_insert_node(&dev_priv->mm.stolen, compressed_fb, + size >>= 1, 4096, + DRM_MM_SEARCH_DEFAULT); + if (ret) + goto err_llb; if (HAS_PCH_SPLIT(dev)) I915_WRITE(ILK_DPFC_CB_BASE, compressed_fb->start); else if (IS_GM45(dev)) { I915_WRITE(DPFC_CB_BASE, compressed_fb->start); } else { - compressed_llb = drm_mm_search_free(&dev_priv->mm.stolen, - 4096, 4096, 0); - if (compressed_llb) - compressed_llb = drm_mm_get_block(compressed_llb, - 4096, 4096); + compressed_llb = kzalloc(sizeof(*compressed_llb), GFP_KERNEL); if (!compressed_llb) goto err_fb; - dev_priv->compressed_llb = compressed_llb; + ret = drm_mm_insert_node(&dev_priv->mm.stolen, compressed_llb, + 4096, 4096, DRM_MM_SEARCH_DEFAULT); + if (ret) + goto err_fb; + + dev_priv->fbc.compressed_llb = compressed_llb; I915_WRITE(FBC_CFB_BASE, dev_priv->mm.stolen_base + compressed_fb->start); @@ -128,8 +132,8 @@ static int i915_setup_compression(struct drm_device *dev, int size) dev_priv->mm.stolen_base + compressed_llb->start); } - dev_priv->compressed_fb = compressed_fb; - dev_priv->cfb_size = size; + dev_priv->fbc.compressed_fb = compressed_fb; + dev_priv->fbc.size = size; DRM_DEBUG_KMS("reserved %d bytes of contiguous stolen space for FBC\n", size); @@ -137,8 +141,10 @@ static int i915_setup_compression(struct drm_device *dev, int size) return 0; err_fb: - drm_mm_put_block(compressed_fb); -err: + kfree(compressed_llb); + drm_mm_remove_node(compressed_fb); +err_llb: + kfree(compressed_fb); pr_info_once("drm: not enough stolen space for compressed buffer (need %d more bytes), disabling. Hint: you may be able to increase stolen memory size in the BIOS to avoid this.\n", size); return -ENOSPC; } @@ -150,7 +156,7 @@ int i915_gem_stolen_setup_compression(struct drm_device *dev, int size) if (!drm_mm_initialized(&dev_priv->mm.stolen)) return -ENODEV; - if (size < dev_priv->cfb_size) + if (size < dev_priv->fbc.size) return 0; /* Release any current block */ @@ -163,16 +169,20 @@ void i915_gem_stolen_cleanup_compression(struct drm_device *dev) { struct drm_i915_private *dev_priv = dev->dev_private; - if (dev_priv->cfb_size == 0) + if (dev_priv->fbc.size == 0) return; - if (dev_priv->compressed_fb) - drm_mm_put_block(dev_priv->compressed_fb); + if (dev_priv->fbc.compressed_fb) { + drm_mm_remove_node(dev_priv->fbc.compressed_fb); + kfree(dev_priv->fbc.compressed_fb); + } - if (dev_priv->compressed_llb) - drm_mm_put_block(dev_priv->compressed_llb); + if (dev_priv->fbc.compressed_llb) { + drm_mm_remove_node(dev_priv->fbc.compressed_llb); + kfree(dev_priv->fbc.compressed_llb); + } - dev_priv->cfb_size = 0; + dev_priv->fbc.size = 0; } void i915_gem_cleanup_stolen(struct drm_device *dev) @@ -201,6 +211,9 @@ int i915_gem_init_stolen(struct drm_device *dev) if (IS_VALLEYVIEW(dev)) bios_reserved = 1024*1024; /* top 1M on VLV/BYT */ + if (WARN_ON(bios_reserved > dev_priv->gtt.stolen_size)) + return 0; + /* Basic memrange allocator for stolen space */ drm_mm_init(&dev_priv->mm.stolen, 0, dev_priv->gtt.stolen_size - bios_reserved); @@ -271,9 +284,7 @@ _i915_gem_object_create_stolen(struct drm_device *dev, if (obj == NULL) return NULL; - if (drm_gem_private_object_init(dev, &obj->base, stolen->size)) - goto cleanup; - + drm_gem_private_object_init(dev, &obj->base, stolen->size); i915_gem_object_init(obj, &i915_gem_object_stolen_ops); obj->pages = i915_pages_create_for_stolen(dev, @@ -285,9 +296,8 @@ _i915_gem_object_create_stolen(struct drm_device *dev, i915_gem_object_pin_pages(obj); obj->stolen = stolen; - obj->base.write_domain = I915_GEM_DOMAIN_GTT; - obj->base.read_domains = I915_GEM_DOMAIN_GTT; - obj->cache_level = I915_CACHE_NONE; + obj->base.read_domains = I915_GEM_DOMAIN_CPU | I915_GEM_DOMAIN_GTT; + obj->cache_level = HAS_LLC(dev) ? I915_CACHE_LLC : I915_CACHE_NONE; return obj; @@ -302,6 +312,7 @@ i915_gem_object_create_stolen(struct drm_device *dev, u32 size) struct drm_i915_private *dev_priv = dev->dev_private; struct drm_i915_gem_object *obj; struct drm_mm_node *stolen; + int ret; if (!drm_mm_initialized(&dev_priv->mm.stolen)) return NULL; @@ -310,17 +321,23 @@ i915_gem_object_create_stolen(struct drm_device *dev, u32 size) if (size == 0) return NULL; - stolen = drm_mm_search_free(&dev_priv->mm.stolen, size, 4096, 0); - if (stolen) - stolen = drm_mm_get_block(stolen, size, 4096); - if (stolen == NULL) + stolen = kzalloc(sizeof(*stolen), GFP_KERNEL); + if (!stolen) + return NULL; + + ret = drm_mm_insert_node(&dev_priv->mm.stolen, stolen, size, + 4096, DRM_MM_SEARCH_DEFAULT); + if (ret) { + kfree(stolen); return NULL; + } obj = _i915_gem_object_create_stolen(dev, stolen); if (obj) return obj; - drm_mm_put_block(stolen); + drm_mm_remove_node(stolen); + kfree(stolen); return NULL; } @@ -331,8 +348,11 @@ i915_gem_object_create_stolen_for_preallocated(struct drm_device *dev, u32 size) { struct drm_i915_private *dev_priv = dev->dev_private; + struct i915_address_space *ggtt = &dev_priv->gtt.base; struct drm_i915_gem_object *obj; struct drm_mm_node *stolen; + struct i915_vma *vma; + int ret; if (!drm_mm_initialized(&dev_priv->mm.stolen)) return NULL; @@ -347,56 +367,74 @@ i915_gem_object_create_stolen_for_preallocated(struct drm_device *dev, if (WARN_ON(size == 0)) return NULL; - stolen = drm_mm_create_block(&dev_priv->mm.stolen, - stolen_offset, size, - false); - if (stolen == NULL) { + stolen = kzalloc(sizeof(*stolen), GFP_KERNEL); + if (!stolen) + return NULL; + + stolen->start = stolen_offset; + stolen->size = size; + ret = drm_mm_reserve_node(&dev_priv->mm.stolen, stolen); + if (ret) { DRM_DEBUG_KMS("failed to allocate stolen space\n"); + kfree(stolen); return NULL; } obj = _i915_gem_object_create_stolen(dev, stolen); if (obj == NULL) { DRM_DEBUG_KMS("failed to allocate stolen object\n"); - drm_mm_put_block(stolen); + drm_mm_remove_node(stolen); + kfree(stolen); return NULL; } /* Some objects just need physical mem from stolen space */ - if (gtt_offset == -1) + if (gtt_offset == I915_GTT_OFFSET_NONE) return obj; + vma = i915_gem_vma_create(obj, ggtt); + if (IS_ERR(vma)) { + ret = PTR_ERR(vma); + goto err_out; + } + /* To simplify the initialisation sequence between KMS and GTT, * we allow construction of the stolen object prior to * setting up the GTT space. The actual reservation will occur * later. */ - if (drm_mm_initialized(&dev_priv->mm.gtt_space)) { - obj->gtt_space = drm_mm_create_block(&dev_priv->mm.gtt_space, - gtt_offset, size, - false); - if (obj->gtt_space == NULL) { + vma->node.start = gtt_offset; + vma->node.size = size; + if (drm_mm_initialized(&ggtt->mm)) { + ret = drm_mm_reserve_node(&ggtt->mm, &vma->node); + if (ret) { DRM_DEBUG_KMS("failed to allocate stolen GTT space\n"); - drm_gem_object_unreference(&obj->base); - return NULL; + goto err_vma; } - } else - obj->gtt_space = I915_GTT_RESERVED; + } - obj->gtt_offset = gtt_offset; obj->has_global_gtt_mapping = 1; list_add_tail(&obj->global_list, &dev_priv->mm.bound_list); - list_add_tail(&obj->mm_list, &dev_priv->mm.inactive_list); + list_add_tail(&vma->mm_list, &ggtt->inactive_list); return obj; + +err_vma: + i915_gem_vma_destroy(vma); +err_out: + drm_mm_remove_node(stolen); + kfree(stolen); + drm_gem_object_unreference(&obj->base); + return NULL; } void i915_gem_object_release_stolen(struct drm_i915_gem_object *obj) { if (obj->stolen) { - drm_mm_put_block(obj->stolen); + drm_mm_remove_node(obj->stolen); + kfree(obj->stolen); obj->stolen = NULL; } } diff --git a/drivers/gpu/drm/i915/i915_gem_tiling.c b/drivers/gpu/drm/i915/i915_gem_tiling.c index 537545be69db..032e9ef9c896 100644 --- a/drivers/gpu/drm/i915/i915_gem_tiling.c +++ b/drivers/gpu/drm/i915/i915_gem_tiling.c @@ -268,18 +268,18 @@ i915_gem_object_fence_ok(struct drm_i915_gem_object *obj, int tiling_mode) return true; if (INTEL_INFO(obj->base.dev)->gen == 3) { - if (obj->gtt_offset & ~I915_FENCE_START_MASK) + if (i915_gem_obj_ggtt_offset(obj) & ~I915_FENCE_START_MASK) return false; } else { - if (obj->gtt_offset & ~I830_FENCE_START_MASK) + if (i915_gem_obj_ggtt_offset(obj) & ~I830_FENCE_START_MASK) return false; } size = i915_gem_get_gtt_size(obj->base.dev, obj->base.size, tiling_mode); - if (obj->gtt_space->size != size) + if (i915_gem_obj_ggtt_size(obj) != size) return false; - if (obj->gtt_offset & (size - 1)) + if (i915_gem_obj_ggtt_offset(obj) & (size - 1)) return false; return true; @@ -359,18 +359,19 @@ i915_gem_set_tiling(struct drm_device *dev, void *data, */ obj->map_and_fenceable = - obj->gtt_space == NULL || - (obj->gtt_offset + obj->base.size <= dev_priv->gtt.mappable_end && + !i915_gem_obj_ggtt_bound(obj) || + (i915_gem_obj_ggtt_offset(obj) + + obj->base.size <= dev_priv->gtt.mappable_end && i915_gem_object_fence_ok(obj, args->tiling_mode)); /* Rebind if we need a change of alignment */ if (!obj->map_and_fenceable) { - u32 unfenced_alignment = + u32 unfenced_align = i915_gem_get_gtt_alignment(dev, obj->base.size, args->tiling_mode, false); - if (obj->gtt_offset & (unfenced_alignment - 1)) - ret = i915_gem_object_unbind(obj); + if (i915_gem_obj_ggtt_offset(obj) & (unfenced_align - 1)) + ret = i915_gem_object_ggtt_unbind(obj); } if (ret == 0) { diff --git a/drivers/gpu/drm/i915/i915_gpu_error.c b/drivers/gpu/drm/i915/i915_gpu_error.c new file mode 100644 index 000000000000..558e568d5b45 --- /dev/null +++ b/drivers/gpu/drm/i915/i915_gpu_error.c @@ -0,0 +1,1019 @@ +/* + * Copyright (c) 2008 Intel Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice (including the next + * paragraph) shall be included in all copies or substantial portions of the + * Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS + * IN THE SOFTWARE. + * + * Authors: + * Eric Anholt <eric@anholt.net> + * Keith Packard <keithp@keithp.com> + * Mika Kuoppala <mika.kuoppala@intel.com> + * + */ + +#include <generated/utsrelease.h> +#include "i915_drv.h" + +static const char *yesno(int v) +{ + return v ? "yes" : "no"; +} + +static const char *ring_str(int ring) +{ + switch (ring) { + case RCS: return "render"; + case VCS: return "bsd"; + case BCS: return "blt"; + case VECS: return "vebox"; + default: return ""; + } +} + +static const char *pin_flag(int pinned) +{ + if (pinned > 0) + return " P"; + else if (pinned < 0) + return " p"; + else + return ""; +} + +static const char *tiling_flag(int tiling) +{ + switch (tiling) { + default: + case I915_TILING_NONE: return ""; + case I915_TILING_X: return " X"; + case I915_TILING_Y: return " Y"; + } +} + +static const char *dirty_flag(int dirty) +{ + return dirty ? " dirty" : ""; +} + +static const char *purgeable_flag(int purgeable) +{ + return purgeable ? " purgeable" : ""; +} + +static bool __i915_error_ok(struct drm_i915_error_state_buf *e) +{ + + if (!e->err && WARN(e->bytes > (e->size - 1), "overflow")) { + e->err = -ENOSPC; + return false; + } + + if (e->bytes == e->size - 1 || e->err) + return false; + + return true; +} + +static bool __i915_error_seek(struct drm_i915_error_state_buf *e, + unsigned len) +{ + if (e->pos + len <= e->start) { + e->pos += len; + return false; + } + + /* First vsnprintf needs to fit in its entirety for memmove */ + if (len >= e->size) { + e->err = -EIO; + return false; + } + + return true; +} + +static void __i915_error_advance(struct drm_i915_error_state_buf *e, + unsigned len) +{ + /* If this is first printf in this window, adjust it so that + * start position matches start of the buffer + */ + + if (e->pos < e->start) { + const size_t off = e->start - e->pos; + + /* Should not happen but be paranoid */ + if (off > len || e->bytes) { + e->err = -EIO; + return; + } + + memmove(e->buf, e->buf + off, len - off); + e->bytes = len - off; + e->pos = e->start; + return; + } + + e->bytes += len; + e->pos += len; +} + +static void i915_error_vprintf(struct drm_i915_error_state_buf *e, + const char *f, va_list args) +{ + unsigned len; + + if (!__i915_error_ok(e)) + return; + + /* Seek the first printf which is hits start position */ + if (e->pos < e->start) { + len = vsnprintf(NULL, 0, f, args); + if (!__i915_error_seek(e, len)) + return; + } + + len = vsnprintf(e->buf + e->bytes, e->size - e->bytes, f, args); + if (len >= e->size - e->bytes) + len = e->size - e->bytes - 1; + + __i915_error_advance(e, len); +} + +static void i915_error_puts(struct drm_i915_error_state_buf *e, + const char *str) +{ + unsigned len; + + if (!__i915_error_ok(e)) + return; + + len = strlen(str); + + /* Seek the first printf which is hits start position */ + if (e->pos < e->start) { + if (!__i915_error_seek(e, len)) + return; + } + + if (len >= e->size - e->bytes) + len = e->size - e->bytes - 1; + memcpy(e->buf + e->bytes, str, len); + + __i915_error_advance(e, len); +} + +#define err_printf(e, ...) i915_error_printf(e, __VA_ARGS__) +#define err_puts(e, s) i915_error_puts(e, s) + +static void print_error_buffers(struct drm_i915_error_state_buf *m, + const char *name, + struct drm_i915_error_buffer *err, + int count) +{ + err_printf(m, "%s [%d]:\n", name, count); + + while (count--) { + err_printf(m, " %08x %8u %02x %02x %x %x", + err->gtt_offset, + err->size, + err->read_domains, + err->write_domain, + err->rseqno, err->wseqno); + err_puts(m, pin_flag(err->pinned)); + err_puts(m, tiling_flag(err->tiling)); + err_puts(m, dirty_flag(err->dirty)); + err_puts(m, purgeable_flag(err->purgeable)); + err_puts(m, err->ring != -1 ? " " : ""); + err_puts(m, ring_str(err->ring)); + err_puts(m, i915_cache_level_str(err->cache_level)); + + if (err->name) + err_printf(m, " (name: %d)", err->name); + if (err->fence_reg != I915_FENCE_REG_NONE) + err_printf(m, " (fence: %d)", err->fence_reg); + + err_puts(m, "\n"); + err++; + } +} + +static void i915_ring_error_state(struct drm_i915_error_state_buf *m, + struct drm_device *dev, + struct drm_i915_error_state *error, + unsigned ring) +{ + BUG_ON(ring >= I915_NUM_RINGS); /* shut up confused gcc */ + err_printf(m, "%s command stream:\n", ring_str(ring)); + err_printf(m, " HEAD: 0x%08x\n", error->head[ring]); + err_printf(m, " TAIL: 0x%08x\n", error->tail[ring]); + err_printf(m, " CTL: 0x%08x\n", error->ctl[ring]); + err_printf(m, " ACTHD: 0x%08x\n", error->acthd[ring]); + err_printf(m, " IPEIR: 0x%08x\n", error->ipeir[ring]); + err_printf(m, " IPEHR: 0x%08x\n", error->ipehr[ring]); + err_printf(m, " INSTDONE: 0x%08x\n", error->instdone[ring]); + if (ring == RCS && INTEL_INFO(dev)->gen >= 4) + err_printf(m, " BBADDR: 0x%08llx\n", error->bbaddr); + + if (INTEL_INFO(dev)->gen >= 4) + err_printf(m, " INSTPS: 0x%08x\n", error->instps[ring]); + err_printf(m, " INSTPM: 0x%08x\n", error->instpm[ring]); + err_printf(m, " FADDR: 0x%08x\n", error->faddr[ring]); + if (INTEL_INFO(dev)->gen >= 6) { + err_printf(m, " RC PSMI: 0x%08x\n", error->rc_psmi[ring]); + err_printf(m, " FAULT_REG: 0x%08x\n", error->fault_reg[ring]); + err_printf(m, " SYNC_0: 0x%08x [last synced 0x%08x]\n", + error->semaphore_mboxes[ring][0], + error->semaphore_seqno[ring][0]); + err_printf(m, " SYNC_1: 0x%08x [last synced 0x%08x]\n", + error->semaphore_mboxes[ring][1], + error->semaphore_seqno[ring][1]); + if (HAS_VEBOX(dev)) { + err_printf(m, " SYNC_2: 0x%08x [last synced 0x%08x]\n", + error->semaphore_mboxes[ring][2], + error->semaphore_seqno[ring][2]); + } + } + err_printf(m, " seqno: 0x%08x\n", error->seqno[ring]); + err_printf(m, " waiting: %s\n", yesno(error->waiting[ring])); + err_printf(m, " ring->head: 0x%08x\n", error->cpu_ring_head[ring]); + err_printf(m, " ring->tail: 0x%08x\n", error->cpu_ring_tail[ring]); +} + +void i915_error_printf(struct drm_i915_error_state_buf *e, const char *f, ...) +{ + va_list args; + + va_start(args, f); + i915_error_vprintf(e, f, args); + va_end(args); +} + +int i915_error_state_to_str(struct drm_i915_error_state_buf *m, + const struct i915_error_state_file_priv *error_priv) +{ + struct drm_device *dev = error_priv->dev; + drm_i915_private_t *dev_priv = dev->dev_private; + struct drm_i915_error_state *error = error_priv->error; + struct intel_ring_buffer *ring; + int i, j, page, offset, elt; + + if (!error) { + err_printf(m, "no error state collected\n"); + goto out; + } + + err_printf(m, "Time: %ld s %ld us\n", error->time.tv_sec, + error->time.tv_usec); + err_printf(m, "Kernel: " UTS_RELEASE "\n"); + err_printf(m, "PCI ID: 0x%04x\n", dev->pci_device); + err_printf(m, "EIR: 0x%08x\n", error->eir); + err_printf(m, "IER: 0x%08x\n", error->ier); + err_printf(m, "PGTBL_ER: 0x%08x\n", error->pgtbl_er); + err_printf(m, "FORCEWAKE: 0x%08x\n", error->forcewake); + err_printf(m, "DERRMR: 0x%08x\n", error->derrmr); + err_printf(m, "CCID: 0x%08x\n", error->ccid); + + for (i = 0; i < dev_priv->num_fence_regs; i++) + err_printf(m, " fence[%d] = %08llx\n", i, error->fence[i]); + + for (i = 0; i < ARRAY_SIZE(error->extra_instdone); i++) + err_printf(m, " INSTDONE_%d: 0x%08x\n", i, + error->extra_instdone[i]); + + if (INTEL_INFO(dev)->gen >= 6) { + err_printf(m, "ERROR: 0x%08x\n", error->error); + err_printf(m, "DONE_REG: 0x%08x\n", error->done_reg); + } + + if (INTEL_INFO(dev)->gen == 7) + err_printf(m, "ERR_INT: 0x%08x\n", error->err_int); + + for_each_ring(ring, dev_priv, i) + i915_ring_error_state(m, dev, error, i); + + if (error->active_bo) + print_error_buffers(m, "Active", + error->active_bo[0], + error->active_bo_count[0]); + + if (error->pinned_bo) + print_error_buffers(m, "Pinned", + error->pinned_bo[0], + error->pinned_bo_count[0]); + + for (i = 0; i < ARRAY_SIZE(error->ring); i++) { + struct drm_i915_error_object *obj; + + if ((obj = error->ring[i].batchbuffer)) { + err_printf(m, "%s --- gtt_offset = 0x%08x\n", + dev_priv->ring[i].name, + obj->gtt_offset); + offset = 0; + for (page = 0; page < obj->page_count; page++) { + for (elt = 0; elt < PAGE_SIZE/4; elt++) { + err_printf(m, "%08x : %08x\n", offset, + obj->pages[page][elt]); + offset += 4; + } + } + } + + if (error->ring[i].num_requests) { + err_printf(m, "%s --- %d requests\n", + dev_priv->ring[i].name, + error->ring[i].num_requests); + for (j = 0; j < error->ring[i].num_requests; j++) { + err_printf(m, " seqno 0x%08x, emitted %ld, tail 0x%08x\n", + error->ring[i].requests[j].seqno, + error->ring[i].requests[j].jiffies, + error->ring[i].requests[j].tail); + } + } + + if ((obj = error->ring[i].ringbuffer)) { + err_printf(m, "%s --- ringbuffer = 0x%08x\n", + dev_priv->ring[i].name, + obj->gtt_offset); + offset = 0; + for (page = 0; page < obj->page_count; page++) { + for (elt = 0; elt < PAGE_SIZE/4; elt++) { + err_printf(m, "%08x : %08x\n", + offset, + obj->pages[page][elt]); + offset += 4; + } + } + } + + obj = error->ring[i].ctx; + if (obj) { + err_printf(m, "%s --- HW Context = 0x%08x\n", + dev_priv->ring[i].name, + obj->gtt_offset); + offset = 0; + for (elt = 0; elt < PAGE_SIZE/16; elt += 4) { + err_printf(m, "[%04x] %08x %08x %08x %08x\n", + offset, + obj->pages[0][elt], + obj->pages[0][elt+1], + obj->pages[0][elt+2], + obj->pages[0][elt+3]); + offset += 16; + } + } + } + + if (error->overlay) + intel_overlay_print_error_state(m, error->overlay); + + if (error->display) + intel_display_print_error_state(m, dev, error->display); + +out: + if (m->bytes == 0 && m->err) + return m->err; + + return 0; +} + +int i915_error_state_buf_init(struct drm_i915_error_state_buf *ebuf, + size_t count, loff_t pos) +{ + memset(ebuf, 0, sizeof(*ebuf)); + + /* We need to have enough room to store any i915_error_state printf + * so that we can move it to start position. + */ + ebuf->size = count + 1 > PAGE_SIZE ? count + 1 : PAGE_SIZE; + ebuf->buf = kmalloc(ebuf->size, + GFP_TEMPORARY | __GFP_NORETRY | __GFP_NOWARN); + + if (ebuf->buf == NULL) { + ebuf->size = PAGE_SIZE; + ebuf->buf = kmalloc(ebuf->size, GFP_TEMPORARY); + } + + if (ebuf->buf == NULL) { + ebuf->size = 128; + ebuf->buf = kmalloc(ebuf->size, GFP_TEMPORARY); + } + + if (ebuf->buf == NULL) + return -ENOMEM; + + ebuf->start = pos; + + return 0; +} + +static void i915_error_object_free(struct drm_i915_error_object *obj) +{ + int page; + + if (obj == NULL) + return; + + for (page = 0; page < obj->page_count; page++) + kfree(obj->pages[page]); + + kfree(obj); +} + +static void i915_error_state_free(struct kref *error_ref) +{ + struct drm_i915_error_state *error = container_of(error_ref, + typeof(*error), ref); + int i; + + for (i = 0; i < ARRAY_SIZE(error->ring); i++) { + i915_error_object_free(error->ring[i].batchbuffer); + i915_error_object_free(error->ring[i].ringbuffer); + i915_error_object_free(error->ring[i].ctx); + kfree(error->ring[i].requests); + } + + kfree(error->active_bo); + kfree(error->overlay); + kfree(error->display); + kfree(error); +} + +static struct drm_i915_error_object * +i915_error_object_create_sized(struct drm_i915_private *dev_priv, + struct drm_i915_gem_object *src, + const int num_pages) +{ + struct drm_i915_error_object *dst; + int i; + u32 reloc_offset; + + if (src == NULL || src->pages == NULL) + return NULL; + + dst = kmalloc(sizeof(*dst) + num_pages * sizeof(u32 *), GFP_ATOMIC); + if (dst == NULL) + return NULL; + + reloc_offset = dst->gtt_offset = i915_gem_obj_ggtt_offset(src); + for (i = 0; i < num_pages; i++) { + unsigned long flags; + void *d; + + d = kmalloc(PAGE_SIZE, GFP_ATOMIC); + if (d == NULL) + goto unwind; + + local_irq_save(flags); + if (reloc_offset < dev_priv->gtt.mappable_end && + src->has_global_gtt_mapping) { + void __iomem *s; + + /* Simply ignore tiling or any overlapping fence. + * It's part of the error state, and this hopefully + * captures what the GPU read. + */ + + s = io_mapping_map_atomic_wc(dev_priv->gtt.mappable, + reloc_offset); + memcpy_fromio(d, s, PAGE_SIZE); + io_mapping_unmap_atomic(s); + } else if (src->stolen) { + unsigned long offset; + + offset = dev_priv->mm.stolen_base; + offset += src->stolen->start; + offset += i << PAGE_SHIFT; + + memcpy_fromio(d, (void __iomem *) offset, PAGE_SIZE); + } else { + struct page *page; + void *s; + + page = i915_gem_object_get_page(src, i); + + drm_clflush_pages(&page, 1); + + s = kmap_atomic(page); + memcpy(d, s, PAGE_SIZE); + kunmap_atomic(s); + + drm_clflush_pages(&page, 1); + } + local_irq_restore(flags); + + dst->pages[i] = d; + + reloc_offset += PAGE_SIZE; + } + dst->page_count = num_pages; + + return dst; + +unwind: + while (i--) + kfree(dst->pages[i]); + kfree(dst); + return NULL; +} +#define i915_error_object_create(dev_priv, src) \ + i915_error_object_create_sized((dev_priv), (src), \ + (src)->base.size>>PAGE_SHIFT) + +static void capture_bo(struct drm_i915_error_buffer *err, + struct drm_i915_gem_object *obj) +{ + err->size = obj->base.size; + err->name = obj->base.name; + err->rseqno = obj->last_read_seqno; + err->wseqno = obj->last_write_seqno; + err->gtt_offset = i915_gem_obj_ggtt_offset(obj); + err->read_domains = obj->base.read_domains; + err->write_domain = obj->base.write_domain; + err->fence_reg = obj->fence_reg; + err->pinned = 0; + if (obj->pin_count > 0) + err->pinned = 1; + if (obj->user_pin_count > 0) + err->pinned = -1; + err->tiling = obj->tiling_mode; + err->dirty = obj->dirty; + err->purgeable = obj->madv != I915_MADV_WILLNEED; + err->ring = obj->ring ? obj->ring->id : -1; + err->cache_level = obj->cache_level; +} + +static u32 capture_active_bo(struct drm_i915_error_buffer *err, + int count, struct list_head *head) +{ + struct i915_vma *vma; + int i = 0; + + list_for_each_entry(vma, head, mm_list) { + capture_bo(err++, vma->obj); + if (++i == count) + break; + } + + return i; +} + +static u32 capture_pinned_bo(struct drm_i915_error_buffer *err, + int count, struct list_head *head) +{ + struct drm_i915_gem_object *obj; + int i = 0; + + list_for_each_entry(obj, head, global_list) { + if (obj->pin_count == 0) + continue; + + capture_bo(err++, obj); + if (++i == count) + break; + } + + return i; +} + +static void i915_gem_record_fences(struct drm_device *dev, + struct drm_i915_error_state *error) +{ + struct drm_i915_private *dev_priv = dev->dev_private; + int i; + + /* Fences */ + switch (INTEL_INFO(dev)->gen) { + case 7: + case 6: + for (i = 0; i < dev_priv->num_fence_regs; i++) + error->fence[i] = I915_READ64(FENCE_REG_SANDYBRIDGE_0 + (i * 8)); + break; + case 5: + case 4: + for (i = 0; i < 16; i++) + error->fence[i] = I915_READ64(FENCE_REG_965_0 + (i * 8)); + break; + case 3: + if (IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev)) + for (i = 0; i < 8; i++) + error->fence[i+8] = I915_READ(FENCE_REG_945_8 + (i * 4)); + case 2: + for (i = 0; i < 8; i++) + error->fence[i] = I915_READ(FENCE_REG_830_0 + (i * 4)); + break; + + default: + BUG(); + } +} + +static struct drm_i915_error_object * +i915_error_first_batchbuffer(struct drm_i915_private *dev_priv, + struct intel_ring_buffer *ring) +{ + struct i915_address_space *vm; + struct i915_vma *vma; + struct drm_i915_gem_object *obj; + u32 seqno; + + if (!ring->get_seqno) + return NULL; + + if (HAS_BROKEN_CS_TLB(dev_priv->dev)) { + u32 acthd = I915_READ(ACTHD); + + if (WARN_ON(ring->id != RCS)) + return NULL; + + obj = ring->private; + if (acthd >= i915_gem_obj_ggtt_offset(obj) && + acthd < i915_gem_obj_ggtt_offset(obj) + obj->base.size) + return i915_error_object_create(dev_priv, obj); + } + + seqno = ring->get_seqno(ring, false); + list_for_each_entry(vm, &dev_priv->vm_list, global_link) { + list_for_each_entry(vma, &vm->active_list, mm_list) { + obj = vma->obj; + if (obj->ring != ring) + continue; + + if (i915_seqno_passed(seqno, obj->last_read_seqno)) + continue; + + if ((obj->base.read_domains & I915_GEM_DOMAIN_COMMAND) == 0) + continue; + + /* We need to copy these to an anonymous buffer as the simplest + * method to avoid being overwritten by userspace. + */ + return i915_error_object_create(dev_priv, obj); + } + } + + return NULL; +} + +static void i915_record_ring_state(struct drm_device *dev, + struct drm_i915_error_state *error, + struct intel_ring_buffer *ring) +{ + struct drm_i915_private *dev_priv = dev->dev_private; + + if (INTEL_INFO(dev)->gen >= 6) { + error->rc_psmi[ring->id] = I915_READ(ring->mmio_base + 0x50); + error->fault_reg[ring->id] = I915_READ(RING_FAULT_REG(ring)); + error->semaphore_mboxes[ring->id][0] + = I915_READ(RING_SYNC_0(ring->mmio_base)); + error->semaphore_mboxes[ring->id][1] + = I915_READ(RING_SYNC_1(ring->mmio_base)); + error->semaphore_seqno[ring->id][0] = ring->sync_seqno[0]; + error->semaphore_seqno[ring->id][1] = ring->sync_seqno[1]; + } + + if (HAS_VEBOX(dev)) { + error->semaphore_mboxes[ring->id][2] = + I915_READ(RING_SYNC_2(ring->mmio_base)); + error->semaphore_seqno[ring->id][2] = ring->sync_seqno[2]; + } + + if (INTEL_INFO(dev)->gen >= 4) { + error->faddr[ring->id] = I915_READ(RING_DMA_FADD(ring->mmio_base)); + error->ipeir[ring->id] = I915_READ(RING_IPEIR(ring->mmio_base)); + error->ipehr[ring->id] = I915_READ(RING_IPEHR(ring->mmio_base)); + error->instdone[ring->id] = I915_READ(RING_INSTDONE(ring->mmio_base)); + error->instps[ring->id] = I915_READ(RING_INSTPS(ring->mmio_base)); + if (ring->id == RCS) + error->bbaddr = I915_READ64(BB_ADDR); + } else { + error->faddr[ring->id] = I915_READ(DMA_FADD_I8XX); + error->ipeir[ring->id] = I915_READ(IPEIR); + error->ipehr[ring->id] = I915_READ(IPEHR); + error->instdone[ring->id] = I915_READ(INSTDONE); + } + + error->waiting[ring->id] = waitqueue_active(&ring->irq_queue); + error->instpm[ring->id] = I915_READ(RING_INSTPM(ring->mmio_base)); + error->seqno[ring->id] = ring->get_seqno(ring, false); + error->acthd[ring->id] = intel_ring_get_active_head(ring); + error->head[ring->id] = I915_READ_HEAD(ring); + error->tail[ring->id] = I915_READ_TAIL(ring); + error->ctl[ring->id] = I915_READ_CTL(ring); + + error->cpu_ring_head[ring->id] = ring->head; + error->cpu_ring_tail[ring->id] = ring->tail; +} + + +static void i915_gem_record_active_context(struct intel_ring_buffer *ring, + struct drm_i915_error_state *error, + struct drm_i915_error_ring *ering) +{ + struct drm_i915_private *dev_priv = ring->dev->dev_private; + struct drm_i915_gem_object *obj; + + /* Currently render ring is the only HW context user */ + if (ring->id != RCS || !error->ccid) + return; + + list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) { + if ((error->ccid & PAGE_MASK) == i915_gem_obj_ggtt_offset(obj)) { + ering->ctx = i915_error_object_create_sized(dev_priv, + obj, 1); + break; + } + } +} + +static void i915_gem_record_rings(struct drm_device *dev, + struct drm_i915_error_state *error) +{ + struct drm_i915_private *dev_priv = dev->dev_private; + struct intel_ring_buffer *ring; + struct drm_i915_gem_request *request; + int i, count; + + for_each_ring(ring, dev_priv, i) { + i915_record_ring_state(dev, error, ring); + + error->ring[i].batchbuffer = + i915_error_first_batchbuffer(dev_priv, ring); + + error->ring[i].ringbuffer = + i915_error_object_create(dev_priv, ring->obj); + + + i915_gem_record_active_context(ring, error, &error->ring[i]); + + count = 0; + list_for_each_entry(request, &ring->request_list, list) + count++; + + error->ring[i].num_requests = count; + error->ring[i].requests = + kmalloc(count*sizeof(struct drm_i915_error_request), + GFP_ATOMIC); + if (error->ring[i].requests == NULL) { + error->ring[i].num_requests = 0; + continue; + } + + count = 0; + list_for_each_entry(request, &ring->request_list, list) { + struct drm_i915_error_request *erq; + + erq = &error->ring[i].requests[count++]; + erq->seqno = request->seqno; + erq->jiffies = request->emitted_jiffies; + erq->tail = request->tail; + } + } +} + +/* FIXME: Since pin count/bound list is global, we duplicate what we capture per + * VM. + */ +static void i915_gem_capture_vm(struct drm_i915_private *dev_priv, + struct drm_i915_error_state *error, + struct i915_address_space *vm, + const int ndx) +{ + struct drm_i915_error_buffer *active_bo = NULL, *pinned_bo = NULL; + struct drm_i915_gem_object *obj; + struct i915_vma *vma; + int i; + + i = 0; + list_for_each_entry(vma, &vm->active_list, mm_list) + i++; + error->active_bo_count[ndx] = i; + list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) + if (obj->pin_count) + i++; + error->pinned_bo_count[ndx] = i - error->active_bo_count[ndx]; + + if (i) { + active_bo = kmalloc(sizeof(*active_bo)*i, GFP_ATOMIC); + if (active_bo) + pinned_bo = active_bo + error->active_bo_count[ndx]; + } + + if (active_bo) + error->active_bo_count[ndx] = + capture_active_bo(active_bo, + error->active_bo_count[ndx], + &vm->active_list); + + if (pinned_bo) + error->pinned_bo_count[ndx] = + capture_pinned_bo(pinned_bo, + error->pinned_bo_count[ndx], + &dev_priv->mm.bound_list); + error->active_bo[ndx] = active_bo; + error->pinned_bo[ndx] = pinned_bo; +} + +static void i915_gem_capture_buffers(struct drm_i915_private *dev_priv, + struct drm_i915_error_state *error) +{ + struct i915_address_space *vm; + int cnt = 0, i = 0; + + list_for_each_entry(vm, &dev_priv->vm_list, global_link) + cnt++; + + if (WARN(cnt > 1, "Multiple VMs not yet supported\n")) + cnt = 1; + + vm = &dev_priv->gtt.base; + + error->active_bo = kcalloc(cnt, sizeof(*error->active_bo), GFP_ATOMIC); + error->pinned_bo = kcalloc(cnt, sizeof(*error->pinned_bo), GFP_ATOMIC); + error->active_bo_count = kcalloc(cnt, sizeof(*error->active_bo_count), + GFP_ATOMIC); + error->pinned_bo_count = kcalloc(cnt, sizeof(*error->pinned_bo_count), + GFP_ATOMIC); + + list_for_each_entry(vm, &dev_priv->vm_list, global_link) + i915_gem_capture_vm(dev_priv, error, vm, i++); +} + +/** + * i915_capture_error_state - capture an error record for later analysis + * @dev: drm device + * + * Should be called when an error is detected (either a hang or an error + * interrupt) to capture error state from the time of the error. Fills + * out a structure which becomes available in debugfs for user level tools + * to pick up. + */ +void i915_capture_error_state(struct drm_device *dev) +{ + struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_error_state *error; + unsigned long flags; + int pipe; + + spin_lock_irqsave(&dev_priv->gpu_error.lock, flags); + error = dev_priv->gpu_error.first_error; + spin_unlock_irqrestore(&dev_priv->gpu_error.lock, flags); + if (error) + return; + + /* Account for pipe specific data like PIPE*STAT */ + error = kzalloc(sizeof(*error), GFP_ATOMIC); + if (!error) { + DRM_DEBUG_DRIVER("out of memory, not capturing error state\n"); + return; + } + + DRM_INFO("capturing error event; look for more information in " + "/sys/class/drm/card%d/error\n", dev->primary->index); + + kref_init(&error->ref); + error->eir = I915_READ(EIR); + error->pgtbl_er = I915_READ(PGTBL_ER); + if (HAS_HW_CONTEXTS(dev)) + error->ccid = I915_READ(CCID); + + if (HAS_PCH_SPLIT(dev)) + error->ier = I915_READ(DEIER) | I915_READ(GTIER); + else if (IS_VALLEYVIEW(dev)) + error->ier = I915_READ(GTIER) | I915_READ(VLV_IER); + else if (IS_GEN2(dev)) + error->ier = I915_READ16(IER); + else + error->ier = I915_READ(IER); + + if (INTEL_INFO(dev)->gen >= 6) + error->derrmr = I915_READ(DERRMR); + + if (IS_VALLEYVIEW(dev)) + error->forcewake = I915_READ(FORCEWAKE_VLV); + else if (INTEL_INFO(dev)->gen >= 7) + error->forcewake = I915_READ(FORCEWAKE_MT); + else if (INTEL_INFO(dev)->gen == 6) + error->forcewake = I915_READ(FORCEWAKE); + + if (!HAS_PCH_SPLIT(dev)) + for_each_pipe(pipe) + error->pipestat[pipe] = I915_READ(PIPESTAT(pipe)); + + if (INTEL_INFO(dev)->gen >= 6) { + error->error = I915_READ(ERROR_GEN6); + error->done_reg = I915_READ(DONE_REG); + } + + if (INTEL_INFO(dev)->gen == 7) + error->err_int = I915_READ(GEN7_ERR_INT); + + i915_get_extra_instdone(dev, error->extra_instdone); + + i915_gem_capture_buffers(dev_priv, error); + i915_gem_record_fences(dev, error); + i915_gem_record_rings(dev, error); + + do_gettimeofday(&error->time); + + error->overlay = intel_overlay_capture_error_state(dev); + error->display = intel_display_capture_error_state(dev); + + spin_lock_irqsave(&dev_priv->gpu_error.lock, flags); + if (dev_priv->gpu_error.first_error == NULL) { + dev_priv->gpu_error.first_error = error; + error = NULL; + } + spin_unlock_irqrestore(&dev_priv->gpu_error.lock, flags); + + if (error) + i915_error_state_free(&error->ref); +} + +void i915_error_state_get(struct drm_device *dev, + struct i915_error_state_file_priv *error_priv) +{ + struct drm_i915_private *dev_priv = dev->dev_private; + unsigned long flags; + + spin_lock_irqsave(&dev_priv->gpu_error.lock, flags); + error_priv->error = dev_priv->gpu_error.first_error; + if (error_priv->error) + kref_get(&error_priv->error->ref); + spin_unlock_irqrestore(&dev_priv->gpu_error.lock, flags); + +} + +void i915_error_state_put(struct i915_error_state_file_priv *error_priv) +{ + if (error_priv->error) + kref_put(&error_priv->error->ref, i915_error_state_free); +} + +void i915_destroy_error_state(struct drm_device *dev) +{ + struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_error_state *error; + unsigned long flags; + + spin_lock_irqsave(&dev_priv->gpu_error.lock, flags); + error = dev_priv->gpu_error.first_error; + dev_priv->gpu_error.first_error = NULL; + spin_unlock_irqrestore(&dev_priv->gpu_error.lock, flags); + + if (error) + kref_put(&error->ref, i915_error_state_free); +} + +const char *i915_cache_level_str(int type) +{ + switch (type) { + case I915_CACHE_NONE: return " uncached"; + case I915_CACHE_LLC: return " snooped or LLC"; + case I915_CACHE_L3_LLC: return " L3+LLC"; + default: return ""; + } +} + +/* NB: please notice the memset */ +void i915_get_extra_instdone(struct drm_device *dev, uint32_t *instdone) +{ + struct drm_i915_private *dev_priv = dev->dev_private; + memset(instdone, 0, sizeof(*instdone) * I915_NUM_INSTDONE_REG); + + switch (INTEL_INFO(dev)->gen) { + case 2: + case 3: + instdone[0] = I915_READ(INSTDONE); + break; + case 4: + case 5: + case 6: + instdone[0] = I915_READ(INSTDONE_I965); + instdone[1] = I915_READ(INSTDONE1); + break; + default: + WARN_ONCE(1, "Unsupported platform\n"); + case 7: + instdone[0] = I915_READ(GEN7_INSTDONE_1); + instdone[1] = I915_READ(GEN7_SC_INSTDONE); + instdone[2] = I915_READ(GEN7_SAMPLER_INSTDONE); + instdone[3] = I915_READ(GEN7_ROW_INSTDONE); + break; + } +} diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c index 3d92a7cef154..a03b445ceb5f 100644 --- a/drivers/gpu/drm/i915/i915_irq.c +++ b/drivers/gpu/drm/i915/i915_irq.c @@ -85,6 +85,12 @@ ironlake_enable_display_irq(drm_i915_private_t *dev_priv, u32 mask) { assert_spin_locked(&dev_priv->irq_lock); + if (dev_priv->pc8.irqs_disabled) { + WARN(1, "IRQs disabled\n"); + dev_priv->pc8.regsave.deimr &= ~mask; + return; + } + if ((dev_priv->irq_mask & mask) != 0) { dev_priv->irq_mask &= ~mask; I915_WRITE(DEIMR, dev_priv->irq_mask); @@ -97,6 +103,12 @@ ironlake_disable_display_irq(drm_i915_private_t *dev_priv, u32 mask) { assert_spin_locked(&dev_priv->irq_lock); + if (dev_priv->pc8.irqs_disabled) { + WARN(1, "IRQs disabled\n"); + dev_priv->pc8.regsave.deimr |= mask; + return; + } + if ((dev_priv->irq_mask & mask) != mask) { dev_priv->irq_mask |= mask; I915_WRITE(DEIMR, dev_priv->irq_mask); @@ -104,6 +116,85 @@ ironlake_disable_display_irq(drm_i915_private_t *dev_priv, u32 mask) } } +/** + * ilk_update_gt_irq - update GTIMR + * @dev_priv: driver private + * @interrupt_mask: mask of interrupt bits to update + * @enabled_irq_mask: mask of interrupt bits to enable + */ +static void ilk_update_gt_irq(struct drm_i915_private *dev_priv, + uint32_t interrupt_mask, + uint32_t enabled_irq_mask) +{ + assert_spin_locked(&dev_priv->irq_lock); + + if (dev_priv->pc8.irqs_disabled) { + WARN(1, "IRQs disabled\n"); + dev_priv->pc8.regsave.gtimr &= ~interrupt_mask; + dev_priv->pc8.regsave.gtimr |= (~enabled_irq_mask & + interrupt_mask); + return; + } + + dev_priv->gt_irq_mask &= ~interrupt_mask; + dev_priv->gt_irq_mask |= (~enabled_irq_mask & interrupt_mask); + I915_WRITE(GTIMR, dev_priv->gt_irq_mask); + POSTING_READ(GTIMR); +} + +void ilk_enable_gt_irq(struct drm_i915_private *dev_priv, uint32_t mask) +{ + ilk_update_gt_irq(dev_priv, mask, mask); +} + +void ilk_disable_gt_irq(struct drm_i915_private *dev_priv, uint32_t mask) +{ + ilk_update_gt_irq(dev_priv, mask, 0); +} + +/** + * snb_update_pm_irq - update GEN6_PMIMR + * @dev_priv: driver private + * @interrupt_mask: mask of interrupt bits to update + * @enabled_irq_mask: mask of interrupt bits to enable + */ +static void snb_update_pm_irq(struct drm_i915_private *dev_priv, + uint32_t interrupt_mask, + uint32_t enabled_irq_mask) +{ + uint32_t new_val; + + assert_spin_locked(&dev_priv->irq_lock); + + if (dev_priv->pc8.irqs_disabled) { + WARN(1, "IRQs disabled\n"); + dev_priv->pc8.regsave.gen6_pmimr &= ~interrupt_mask; + dev_priv->pc8.regsave.gen6_pmimr |= (~enabled_irq_mask & + interrupt_mask); + return; + } + + new_val = dev_priv->pm_irq_mask; + new_val &= ~interrupt_mask; + new_val |= (~enabled_irq_mask & interrupt_mask); + + if (new_val != dev_priv->pm_irq_mask) { + dev_priv->pm_irq_mask = new_val; + I915_WRITE(GEN6_PMIMR, dev_priv->pm_irq_mask); + POSTING_READ(GEN6_PMIMR); + } +} + +void snb_enable_pm_irq(struct drm_i915_private *dev_priv, uint32_t mask) +{ + snb_update_pm_irq(dev_priv, mask, mask); +} + +void snb_disable_pm_irq(struct drm_i915_private *dev_priv, uint32_t mask) +{ + snb_update_pm_irq(dev_priv, mask, 0); +} + static bool ivb_can_enable_err_int(struct drm_device *dev) { struct drm_i915_private *dev_priv = dev->dev_private; @@ -128,6 +219,8 @@ static bool cpt_can_enable_serr_int(struct drm_device *dev) enum pipe pipe; struct intel_crtc *crtc; + assert_spin_locked(&dev_priv->irq_lock); + for_each_pipe(pipe) { crtc = to_intel_crtc(dev_priv->pipe_to_crtc_mapping[pipe]); @@ -152,38 +245,75 @@ static void ironlake_set_fifo_underrun_reporting(struct drm_device *dev, } static void ivybridge_set_fifo_underrun_reporting(struct drm_device *dev, - bool enable) + enum pipe pipe, bool enable) { struct drm_i915_private *dev_priv = dev->dev_private; - if (enable) { + I915_WRITE(GEN7_ERR_INT, ERR_INT_FIFO_UNDERRUN(pipe)); + if (!ivb_can_enable_err_int(dev)) return; - I915_WRITE(GEN7_ERR_INT, ERR_INT_FIFO_UNDERRUN_A | - ERR_INT_FIFO_UNDERRUN_B | - ERR_INT_FIFO_UNDERRUN_C); - ironlake_enable_display_irq(dev_priv, DE_ERR_INT_IVB); } else { + bool was_enabled = !(I915_READ(DEIMR) & DE_ERR_INT_IVB); + + /* Change the state _after_ we've read out the current one. */ ironlake_disable_display_irq(dev_priv, DE_ERR_INT_IVB); + + if (!was_enabled && + (I915_READ(GEN7_ERR_INT) & ERR_INT_FIFO_UNDERRUN(pipe))) { + DRM_DEBUG_KMS("uncleared fifo underrun on pipe %c\n", + pipe_name(pipe)); + } } } -static void ibx_set_fifo_underrun_reporting(struct intel_crtc *crtc, +/** + * ibx_display_interrupt_update - update SDEIMR + * @dev_priv: driver private + * @interrupt_mask: mask of interrupt bits to update + * @enabled_irq_mask: mask of interrupt bits to enable + */ +static void ibx_display_interrupt_update(struct drm_i915_private *dev_priv, + uint32_t interrupt_mask, + uint32_t enabled_irq_mask) +{ + uint32_t sdeimr = I915_READ(SDEIMR); + sdeimr &= ~interrupt_mask; + sdeimr |= (~enabled_irq_mask & interrupt_mask); + + assert_spin_locked(&dev_priv->irq_lock); + + if (dev_priv->pc8.irqs_disabled && + (interrupt_mask & SDE_HOTPLUG_MASK_CPT)) { + WARN(1, "IRQs disabled\n"); + dev_priv->pc8.regsave.sdeimr &= ~interrupt_mask; + dev_priv->pc8.regsave.sdeimr |= (~enabled_irq_mask & + interrupt_mask); + return; + } + + I915_WRITE(SDEIMR, sdeimr); + POSTING_READ(SDEIMR); +} +#define ibx_enable_display_interrupt(dev_priv, bits) \ + ibx_display_interrupt_update((dev_priv), (bits), (bits)) +#define ibx_disable_display_interrupt(dev_priv, bits) \ + ibx_display_interrupt_update((dev_priv), (bits), 0) + +static void ibx_set_fifo_underrun_reporting(struct drm_device *dev, + enum transcoder pch_transcoder, bool enable) { - struct drm_device *dev = crtc->base.dev; struct drm_i915_private *dev_priv = dev->dev_private; - uint32_t bit = (crtc->pipe == PIPE_A) ? SDE_TRANSA_FIFO_UNDER : - SDE_TRANSB_FIFO_UNDER; + uint32_t bit = (pch_transcoder == TRANSCODER_A) ? + SDE_TRANSA_FIFO_UNDER : SDE_TRANSB_FIFO_UNDER; if (enable) - I915_WRITE(SDEIMR, I915_READ(SDEIMR) & ~bit); + ibx_enable_display_interrupt(dev_priv, bit); else - I915_WRITE(SDEIMR, I915_READ(SDEIMR) | bit); - - POSTING_READ(SDEIMR); + ibx_disable_display_interrupt(dev_priv, bit); } static void cpt_set_fifo_underrun_reporting(struct drm_device *dev, @@ -193,19 +323,26 @@ static void cpt_set_fifo_underrun_reporting(struct drm_device *dev, struct drm_i915_private *dev_priv = dev->dev_private; if (enable) { + I915_WRITE(SERR_INT, + SERR_INT_TRANS_FIFO_UNDERRUN(pch_transcoder)); + if (!cpt_can_enable_serr_int(dev)) return; - I915_WRITE(SERR_INT, SERR_INT_TRANS_A_FIFO_UNDERRUN | - SERR_INT_TRANS_B_FIFO_UNDERRUN | - SERR_INT_TRANS_C_FIFO_UNDERRUN); - - I915_WRITE(SDEIMR, I915_READ(SDEIMR) & ~SDE_ERROR_CPT); + ibx_enable_display_interrupt(dev_priv, SDE_ERROR_CPT); } else { - I915_WRITE(SDEIMR, I915_READ(SDEIMR) | SDE_ERROR_CPT); - } + uint32_t tmp = I915_READ(SERR_INT); + bool was_enabled = !(I915_READ(SDEIMR) & SDE_ERROR_CPT); - POSTING_READ(SDEIMR); + /* Change the state _after_ we've read out the current one. */ + ibx_disable_display_interrupt(dev_priv, SDE_ERROR_CPT); + + if (!was_enabled && + (tmp & SERR_INT_TRANS_FIFO_UNDERRUN(pch_transcoder))) { + DRM_DEBUG_KMS("uncleared pch fifo underrun on pch transcoder %c\n", + transcoder_name(pch_transcoder)); + } + } } /** @@ -243,7 +380,7 @@ bool intel_set_cpu_fifo_underrun_reporting(struct drm_device *dev, if (IS_GEN5(dev) || IS_GEN6(dev)) ironlake_set_fifo_underrun_reporting(dev, pipe, enable); else if (IS_GEN7(dev)) - ivybridge_set_fifo_underrun_reporting(dev, enable); + ivybridge_set_fifo_underrun_reporting(dev, pipe, enable); done: spin_unlock_irqrestore(&dev_priv->irq_lock, flags); @@ -269,29 +406,19 @@ bool intel_set_pch_fifo_underrun_reporting(struct drm_device *dev, bool enable) { struct drm_i915_private *dev_priv = dev->dev_private; - enum pipe p; - struct drm_crtc *crtc; - struct intel_crtc *intel_crtc; + struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pch_transcoder]; + struct intel_crtc *intel_crtc = to_intel_crtc(crtc); unsigned long flags; bool ret; - if (HAS_PCH_LPT(dev)) { - crtc = NULL; - for_each_pipe(p) { - struct drm_crtc *c = dev_priv->pipe_to_crtc_mapping[p]; - if (intel_pipe_has_type(c, INTEL_OUTPUT_ANALOG)) { - crtc = c; - break; - } - } - if (!crtc) { - DRM_ERROR("PCH FIFO underrun, but no CRTC using the PCH found\n"); - return false; - } - } else { - crtc = dev_priv->pipe_to_crtc_mapping[pch_transcoder]; - } - intel_crtc = to_intel_crtc(crtc); + /* + * NOTE: Pre-LPT has a fixed cpu pipe -> pch transcoder mapping, but LPT + * has only one pch transcoder A that all pipes can use. To avoid racy + * pch transcoder -> pipe lookups from interrupt code simply store the + * underrun statistics in crtc A. Since we never expose this anywhere + * nor use it outside of the fifo underrun code here using the "wrong" + * crtc on LPT won't cause issues. + */ spin_lock_irqsave(&dev_priv->irq_lock, flags); @@ -303,7 +430,7 @@ bool intel_set_pch_fifo_underrun_reporting(struct drm_device *dev, intel_crtc->pch_fifo_underrun_disabled = !enable; if (HAS_PCH_IBX(dev)) - ibx_set_fifo_underrun_reporting(intel_crtc, enable); + ibx_set_fifo_underrun_reporting(dev, pch_transcoder, enable); else cpt_set_fifo_underrun_reporting(dev, pch_transcoder, enable); @@ -319,6 +446,8 @@ i915_enable_pipestat(drm_i915_private_t *dev_priv, int pipe, u32 mask) u32 reg = PIPESTAT(pipe); u32 pipestat = I915_READ(reg) & 0x7fff0000; + assert_spin_locked(&dev_priv->irq_lock); + if ((pipestat & mask) == mask) return; @@ -334,6 +463,8 @@ i915_disable_pipestat(drm_i915_private_t *dev_priv, int pipe, u32 mask) u32 reg = PIPESTAT(pipe); u32 pipestat = I915_READ(reg) & 0x7fff0000; + assert_spin_locked(&dev_priv->irq_lock); + if ((pipestat & mask) == 0) return; @@ -625,14 +756,13 @@ static void i915_hotplug_work_func(struct work_struct *work) drm_kms_helper_hotplug_event(dev); } -static void ironlake_handle_rps_change(struct drm_device *dev) +static void ironlake_rps_change_irq_handler(struct drm_device *dev) { drm_i915_private_t *dev_priv = dev->dev_private; u32 busy_up, busy_down, max_avg, min_avg; u8 new_delay; - unsigned long flags; - spin_lock_irqsave(&mchdev_lock, flags); + spin_lock(&mchdev_lock); I915_WRITE16(MEMINTRSTS, I915_READ(MEMINTRSTS)); @@ -660,7 +790,7 @@ static void ironlake_handle_rps_change(struct drm_device *dev) if (ironlake_set_drps(dev, new_delay)) dev_priv->ips.cur_delay = new_delay; - spin_unlock_irqrestore(&mchdev_lock, flags); + spin_unlock(&mchdev_lock); return; } @@ -668,34 +798,31 @@ static void ironlake_handle_rps_change(struct drm_device *dev) static void notify_ring(struct drm_device *dev, struct intel_ring_buffer *ring) { - struct drm_i915_private *dev_priv = dev->dev_private; - if (ring->obj == NULL) return; trace_i915_gem_request_complete(ring, ring->get_seqno(ring, false)); wake_up_all(&ring->irq_queue); - if (i915_enable_hangcheck) { - mod_timer(&dev_priv->gpu_error.hangcheck_timer, - round_jiffies_up(jiffies + DRM_I915_HANGCHECK_JIFFIES)); - } + i915_queue_hangcheck(dev); } static void gen6_pm_rps_work(struct work_struct *work) { drm_i915_private_t *dev_priv = container_of(work, drm_i915_private_t, rps.work); - u32 pm_iir, pm_imr; + u32 pm_iir; u8 new_delay; - spin_lock_irq(&dev_priv->rps.lock); + spin_lock_irq(&dev_priv->irq_lock); pm_iir = dev_priv->rps.pm_iir; dev_priv->rps.pm_iir = 0; - pm_imr = I915_READ(GEN6_PMIMR); /* Make sure not to corrupt PMIMR state used by ringbuffer code */ - I915_WRITE(GEN6_PMIMR, pm_imr & ~GEN6_PM_RPS_EVENTS); - spin_unlock_irq(&dev_priv->rps.lock); + snb_enable_pm_irq(dev_priv, GEN6_PM_RPS_EVENTS); + spin_unlock_irq(&dev_priv->irq_lock); + + /* Make sure we didn't queue anything we're not going to process. */ + WARN_ON(pm_iir & ~GEN6_PM_RPS_EVENTS); if ((pm_iir & GEN6_PM_RPS_EVENTS) == 0) return; @@ -781,13 +908,12 @@ static void ivybridge_parity_work(struct work_struct *work) I915_WRITE(GEN7_MISCCPCTL, misccpctl); spin_lock_irqsave(&dev_priv->irq_lock, flags); - dev_priv->gt_irq_mask &= ~GT_RENDER_L3_PARITY_ERROR_INTERRUPT; - I915_WRITE(GTIMR, dev_priv->gt_irq_mask); + ilk_enable_gt_irq(dev_priv, GT_RENDER_L3_PARITY_ERROR_INTERRUPT); spin_unlock_irqrestore(&dev_priv->irq_lock, flags); mutex_unlock(&dev_priv->dev->struct_mutex); - parity_event[0] = "L3_PARITY_ERROR=1"; + parity_event[0] = I915_L3_PARITY_UEVENT "=1"; parity_event[1] = kasprintf(GFP_KERNEL, "ROW=%d", row); parity_event[2] = kasprintf(GFP_KERNEL, "BANK=%d", bank); parity_event[3] = kasprintf(GFP_KERNEL, "SUBBANK=%d", subbank); @@ -804,22 +930,31 @@ static void ivybridge_parity_work(struct work_struct *work) kfree(parity_event[1]); } -static void ivybridge_handle_parity_error(struct drm_device *dev) +static void ivybridge_parity_error_irq_handler(struct drm_device *dev) { drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; - unsigned long flags; if (!HAS_L3_GPU_CACHE(dev)) return; - spin_lock_irqsave(&dev_priv->irq_lock, flags); - dev_priv->gt_irq_mask |= GT_RENDER_L3_PARITY_ERROR_INTERRUPT; - I915_WRITE(GTIMR, dev_priv->gt_irq_mask); - spin_unlock_irqrestore(&dev_priv->irq_lock, flags); + spin_lock(&dev_priv->irq_lock); + ilk_disable_gt_irq(dev_priv, GT_RENDER_L3_PARITY_ERROR_INTERRUPT); + spin_unlock(&dev_priv->irq_lock); queue_work(dev_priv->wq, &dev_priv->l3_parity.error_work); } +static void ilk_gt_irq_handler(struct drm_device *dev, + struct drm_i915_private *dev_priv, + u32 gt_iir) +{ + if (gt_iir & + (GT_RENDER_USER_INTERRUPT | GT_RENDER_PIPECTL_NOTIFY_INTERRUPT)) + notify_ring(dev, &dev_priv->ring[RCS]); + if (gt_iir & ILK_BSD_USER_INTERRUPT) + notify_ring(dev, &dev_priv->ring[VCS]); +} + static void snb_gt_irq_handler(struct drm_device *dev, struct drm_i915_private *dev_priv, u32 gt_iir) @@ -841,32 +976,7 @@ static void snb_gt_irq_handler(struct drm_device *dev, } if (gt_iir & GT_RENDER_L3_PARITY_ERROR_INTERRUPT) - ivybridge_handle_parity_error(dev); -} - -/* Legacy way of handling PM interrupts */ -static void gen6_queue_rps_work(struct drm_i915_private *dev_priv, - u32 pm_iir) -{ - unsigned long flags; - - /* - * IIR bits should never already be set because IMR should - * prevent an interrupt from being shown in IIR. The warning - * displays a case where we've unsafely cleared - * dev_priv->rps.pm_iir. Although missing an interrupt of the same - * type is not a problem, it displays a problem in the logic. - * - * The mask bit in IMR is cleared by dev_priv->rps.work. - */ - - spin_lock_irqsave(&dev_priv->rps.lock, flags); - dev_priv->rps.pm_iir |= pm_iir; - I915_WRITE(GEN6_PMIMR, dev_priv->rps.pm_iir); - POSTING_READ(GEN6_PMIMR); - spin_unlock_irqrestore(&dev_priv->rps.lock, flags); - - queue_work(dev_priv->wq, &dev_priv->rps.work); + ivybridge_parity_error_irq_handler(dev); } #define HPD_STORM_DETECT_PERIOD 1000 @@ -886,6 +996,10 @@ static inline void intel_hpd_irq_handler(struct drm_device *dev, spin_lock(&dev_priv->irq_lock); for (i = 1; i < HPD_NUM_PINS; i++) { + WARN(((hpd[i] & hotplug_trigger) && + dev_priv->hpd_stats[i].hpd_mark != HPD_ENABLED), + "Received HPD interrupt although disabled\n"); + if (!(hpd[i] & hotplug_trigger) || dev_priv->hpd_stats[i].hpd_mark != HPD_ENABLED) continue; @@ -896,6 +1010,7 @@ static inline void intel_hpd_irq_handler(struct drm_device *dev, + msecs_to_jiffies(HPD_STORM_DETECT_PERIOD))) { dev_priv->hpd_stats[i].hpd_last_jiffies = jiffies; dev_priv->hpd_stats[i].hpd_cnt = 0; + DRM_DEBUG_KMS("Received HPD interrupt on PIN %d - cnt: 0\n", i); } else if (dev_priv->hpd_stats[i].hpd_cnt > HPD_STORM_THRESHOLD) { dev_priv->hpd_stats[i].hpd_mark = HPD_MARK_DISABLED; dev_priv->hpd_event_bits &= ~(1 << i); @@ -903,6 +1018,8 @@ static inline void intel_hpd_irq_handler(struct drm_device *dev, storm_detected = true; } else { dev_priv->hpd_stats[i].hpd_cnt++; + DRM_DEBUG_KMS("Received HPD interrupt on PIN %d - cnt: %d\n", i, + dev_priv->hpd_stats[i].hpd_cnt); } } @@ -928,28 +1045,21 @@ static void dp_aux_irq_handler(struct drm_device *dev) wake_up_all(&dev_priv->gmbus_wait_queue); } -/* Unlike gen6_queue_rps_work() from which this function is originally derived, - * we must be able to deal with other PM interrupts. This is complicated because - * of the way in which we use the masks to defer the RPS work (which for - * posterity is necessary because of forcewake). - */ -static void hsw_pm_irq_handler(struct drm_i915_private *dev_priv, - u32 pm_iir) +/* The RPS events need forcewake, so we add them to a work queue and mask their + * IMR bits until the work is done. Other interrupts can be processed without + * the work queue. */ +static void gen6_rps_irq_handler(struct drm_i915_private *dev_priv, u32 pm_iir) { - unsigned long flags; + if (pm_iir & GEN6_PM_RPS_EVENTS) { + spin_lock(&dev_priv->irq_lock); + dev_priv->rps.pm_iir |= pm_iir & GEN6_PM_RPS_EVENTS; + snb_disable_pm_irq(dev_priv, pm_iir & GEN6_PM_RPS_EVENTS); + spin_unlock(&dev_priv->irq_lock); - spin_lock_irqsave(&dev_priv->rps.lock, flags); - dev_priv->rps.pm_iir |= pm_iir & GEN6_PM_RPS_EVENTS; - if (dev_priv->rps.pm_iir) { - I915_WRITE(GEN6_PMIMR, dev_priv->rps.pm_iir); - /* never want to mask useful interrupts. (also posting read) */ - WARN_ON(I915_READ_NOTRACE(GEN6_PMIMR) & ~GEN6_PM_RPS_EVENTS); - /* TODO: if queue_work is slow, move it out of the spinlock */ queue_work(dev_priv->wq, &dev_priv->rps.work); } - spin_unlock_irqrestore(&dev_priv->rps.lock, flags); - if (pm_iir & ~GEN6_PM_RPS_EVENTS) { + if (HAS_VEBOX(dev_priv->dev)) { if (pm_iir & PM_VEBOX_USER_INTERRUPT) notify_ring(dev_priv->dev, &dev_priv->ring[VECS]); @@ -1028,8 +1138,8 @@ static irqreturn_t valleyview_irq_handler(int irq, void *arg) if (pipe_stats[0] & PIPE_GMBUS_INTERRUPT_STATUS) gmbus_irq_handler(dev); - if (pm_iir & GEN6_PM_RPS_EVENTS) - gen6_queue_rps_work(dev_priv, pm_iir); + if (pm_iir) + gen6_rps_irq_handler(dev_priv, pm_iir); I915_WRITE(GTIIR, gt_iir); I915_WRITE(GEN6_PMIIR, pm_iir); @@ -1179,27 +1289,112 @@ static void cpt_irq_handler(struct drm_device *dev, u32 pch_iir) cpt_serr_int_handler(dev); } -static irqreturn_t ivybridge_irq_handler(int irq, void *arg) +static void ilk_display_irq_handler(struct drm_device *dev, u32 de_iir) +{ + struct drm_i915_private *dev_priv = dev->dev_private; + + if (de_iir & DE_AUX_CHANNEL_A) + dp_aux_irq_handler(dev); + + if (de_iir & DE_GSE) + intel_opregion_asle_intr(dev); + + if (de_iir & DE_PIPEA_VBLANK) + drm_handle_vblank(dev, 0); + + if (de_iir & DE_PIPEB_VBLANK) + drm_handle_vblank(dev, 1); + + if (de_iir & DE_POISON) + DRM_ERROR("Poison interrupt\n"); + + if (de_iir & DE_PIPEA_FIFO_UNDERRUN) + if (intel_set_cpu_fifo_underrun_reporting(dev, PIPE_A, false)) + DRM_DEBUG_DRIVER("Pipe A FIFO underrun\n"); + + if (de_iir & DE_PIPEB_FIFO_UNDERRUN) + if (intel_set_cpu_fifo_underrun_reporting(dev, PIPE_B, false)) + DRM_DEBUG_DRIVER("Pipe B FIFO underrun\n"); + + if (de_iir & DE_PLANEA_FLIP_DONE) { + intel_prepare_page_flip(dev, 0); + intel_finish_page_flip_plane(dev, 0); + } + + if (de_iir & DE_PLANEB_FLIP_DONE) { + intel_prepare_page_flip(dev, 1); + intel_finish_page_flip_plane(dev, 1); + } + + /* check event from PCH */ + if (de_iir & DE_PCH_EVENT) { + u32 pch_iir = I915_READ(SDEIIR); + + if (HAS_PCH_CPT(dev)) + cpt_irq_handler(dev, pch_iir); + else + ibx_irq_handler(dev, pch_iir); + + /* should clear PCH hotplug event before clear CPU irq */ + I915_WRITE(SDEIIR, pch_iir); + } + + if (IS_GEN5(dev) && de_iir & DE_PCU_EVENT) + ironlake_rps_change_irq_handler(dev); +} + +static void ivb_display_irq_handler(struct drm_device *dev, u32 de_iir) +{ + struct drm_i915_private *dev_priv = dev->dev_private; + int i; + + if (de_iir & DE_ERR_INT_IVB) + ivb_err_int_handler(dev); + + if (de_iir & DE_AUX_CHANNEL_A_IVB) + dp_aux_irq_handler(dev); + + if (de_iir & DE_GSE_IVB) + intel_opregion_asle_intr(dev); + + for (i = 0; i < 3; i++) { + if (de_iir & (DE_PIPEA_VBLANK_IVB << (5 * i))) + drm_handle_vblank(dev, i); + if (de_iir & (DE_PLANEA_FLIP_DONE_IVB << (5 * i))) { + intel_prepare_page_flip(dev, i); + intel_finish_page_flip_plane(dev, i); + } + } + + /* check event from PCH */ + if (!HAS_PCH_NOP(dev) && (de_iir & DE_PCH_EVENT_IVB)) { + u32 pch_iir = I915_READ(SDEIIR); + + cpt_irq_handler(dev, pch_iir); + + /* clear PCH hotplug event before clear CPU irq */ + I915_WRITE(SDEIIR, pch_iir); + } +} + +static irqreturn_t ironlake_irq_handler(int irq, void *arg) { struct drm_device *dev = (struct drm_device *) arg; drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; - u32 de_iir, gt_iir, de_ier, pm_iir, sde_ier = 0; + u32 de_iir, gt_iir, de_ier, sde_ier = 0; irqreturn_t ret = IRQ_NONE; - int i; + bool err_int_reenable = false; atomic_inc(&dev_priv->irq_received); /* We get interrupts on unclaimed registers, so check for this before we * do any I915_{READ,WRITE}. */ - if (IS_HASWELL(dev) && - (I915_READ_NOTRACE(FPGA_DBG) & FPGA_DBG_RM_NOCLAIM)) { - DRM_ERROR("Unclaimed register before interrupt\n"); - I915_WRITE_NOTRACE(FPGA_DBG, FPGA_DBG_RM_NOCLAIM); - } + intel_uncore_check_errors(dev); /* disable master interrupt before clearing iir */ de_ier = I915_READ(DEIER); I915_WRITE(DEIER, de_ier & ~DE_MASTER_IRQ_CONTROL); + POSTING_READ(DEIER); /* Disable south interrupts. We'll only write to SDEIIR once, so further * interrupts will will be stored on its back queue, and then we'll be @@ -1217,62 +1412,42 @@ static irqreturn_t ivybridge_irq_handler(int irq, void *arg) * handler. */ if (IS_HASWELL(dev)) { spin_lock(&dev_priv->irq_lock); - ironlake_disable_display_irq(dev_priv, DE_ERR_INT_IVB); + err_int_reenable = ~dev_priv->irq_mask & DE_ERR_INT_IVB; + if (err_int_reenable) + ironlake_disable_display_irq(dev_priv, DE_ERR_INT_IVB); spin_unlock(&dev_priv->irq_lock); } gt_iir = I915_READ(GTIIR); if (gt_iir) { - snb_gt_irq_handler(dev, dev_priv, gt_iir); + if (INTEL_INFO(dev)->gen >= 6) + snb_gt_irq_handler(dev, dev_priv, gt_iir); + else + ilk_gt_irq_handler(dev, dev_priv, gt_iir); I915_WRITE(GTIIR, gt_iir); ret = IRQ_HANDLED; } de_iir = I915_READ(DEIIR); if (de_iir) { - if (de_iir & DE_ERR_INT_IVB) - ivb_err_int_handler(dev); - - if (de_iir & DE_AUX_CHANNEL_A_IVB) - dp_aux_irq_handler(dev); - - if (de_iir & DE_GSE_IVB) - intel_opregion_asle_intr(dev); - - for (i = 0; i < 3; i++) { - if (de_iir & (DE_PIPEA_VBLANK_IVB << (5 * i))) - drm_handle_vblank(dev, i); - if (de_iir & (DE_PLANEA_FLIP_DONE_IVB << (5 * i))) { - intel_prepare_page_flip(dev, i); - intel_finish_page_flip_plane(dev, i); - } - } - - /* check event from PCH */ - if (!HAS_PCH_NOP(dev) && (de_iir & DE_PCH_EVENT_IVB)) { - u32 pch_iir = I915_READ(SDEIIR); - - cpt_irq_handler(dev, pch_iir); - - /* clear PCH hotplug event before clear CPU irq */ - I915_WRITE(SDEIIR, pch_iir); - } - + if (INTEL_INFO(dev)->gen >= 7) + ivb_display_irq_handler(dev, de_iir); + else + ilk_display_irq_handler(dev, de_iir); I915_WRITE(DEIIR, de_iir); ret = IRQ_HANDLED; } - pm_iir = I915_READ(GEN6_PMIIR); - if (pm_iir) { - if (IS_HASWELL(dev)) - hsw_pm_irq_handler(dev_priv, pm_iir); - else if (pm_iir & GEN6_PM_RPS_EVENTS) - gen6_queue_rps_work(dev_priv, pm_iir); - I915_WRITE(GEN6_PMIIR, pm_iir); - ret = IRQ_HANDLED; + if (INTEL_INFO(dev)->gen >= 6) { + u32 pm_iir = I915_READ(GEN6_PMIIR); + if (pm_iir) { + gen6_rps_irq_handler(dev_priv, pm_iir); + I915_WRITE(GEN6_PMIIR, pm_iir); + ret = IRQ_HANDLED; + } } - if (IS_HASWELL(dev)) { + if (err_int_reenable) { spin_lock(&dev_priv->irq_lock); if (ivb_can_enable_err_int(dev)) ironlake_enable_display_irq(dev_priv, DE_ERR_INT_IVB); @@ -1289,119 +1464,6 @@ static irqreturn_t ivybridge_irq_handler(int irq, void *arg) return ret; } -static void ilk_gt_irq_handler(struct drm_device *dev, - struct drm_i915_private *dev_priv, - u32 gt_iir) -{ - if (gt_iir & - (GT_RENDER_USER_INTERRUPT | GT_RENDER_PIPECTL_NOTIFY_INTERRUPT)) - notify_ring(dev, &dev_priv->ring[RCS]); - if (gt_iir & ILK_BSD_USER_INTERRUPT) - notify_ring(dev, &dev_priv->ring[VCS]); -} - -static irqreturn_t ironlake_irq_handler(int irq, void *arg) -{ - struct drm_device *dev = (struct drm_device *) arg; - drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; - int ret = IRQ_NONE; - u32 de_iir, gt_iir, de_ier, pm_iir, sde_ier; - - atomic_inc(&dev_priv->irq_received); - - /* disable master interrupt before clearing iir */ - de_ier = I915_READ(DEIER); - I915_WRITE(DEIER, de_ier & ~DE_MASTER_IRQ_CONTROL); - POSTING_READ(DEIER); - - /* Disable south interrupts. We'll only write to SDEIIR once, so further - * interrupts will will be stored on its back queue, and then we'll be - * able to process them after we restore SDEIER (as soon as we restore - * it, we'll get an interrupt if SDEIIR still has something to process - * due to its back queue). */ - sde_ier = I915_READ(SDEIER); - I915_WRITE(SDEIER, 0); - POSTING_READ(SDEIER); - - de_iir = I915_READ(DEIIR); - gt_iir = I915_READ(GTIIR); - pm_iir = I915_READ(GEN6_PMIIR); - - if (de_iir == 0 && gt_iir == 0 && (!IS_GEN6(dev) || pm_iir == 0)) - goto done; - - ret = IRQ_HANDLED; - - if (IS_GEN5(dev)) - ilk_gt_irq_handler(dev, dev_priv, gt_iir); - else - snb_gt_irq_handler(dev, dev_priv, gt_iir); - - if (de_iir & DE_AUX_CHANNEL_A) - dp_aux_irq_handler(dev); - - if (de_iir & DE_GSE) - intel_opregion_asle_intr(dev); - - if (de_iir & DE_PIPEA_VBLANK) - drm_handle_vblank(dev, 0); - - if (de_iir & DE_PIPEB_VBLANK) - drm_handle_vblank(dev, 1); - - if (de_iir & DE_POISON) - DRM_ERROR("Poison interrupt\n"); - - if (de_iir & DE_PIPEA_FIFO_UNDERRUN) - if (intel_set_cpu_fifo_underrun_reporting(dev, PIPE_A, false)) - DRM_DEBUG_DRIVER("Pipe A FIFO underrun\n"); - - if (de_iir & DE_PIPEB_FIFO_UNDERRUN) - if (intel_set_cpu_fifo_underrun_reporting(dev, PIPE_B, false)) - DRM_DEBUG_DRIVER("Pipe B FIFO underrun\n"); - - if (de_iir & DE_PLANEA_FLIP_DONE) { - intel_prepare_page_flip(dev, 0); - intel_finish_page_flip_plane(dev, 0); - } - - if (de_iir & DE_PLANEB_FLIP_DONE) { - intel_prepare_page_flip(dev, 1); - intel_finish_page_flip_plane(dev, 1); - } - - /* check event from PCH */ - if (de_iir & DE_PCH_EVENT) { - u32 pch_iir = I915_READ(SDEIIR); - - if (HAS_PCH_CPT(dev)) - cpt_irq_handler(dev, pch_iir); - else - ibx_irq_handler(dev, pch_iir); - - /* should clear PCH hotplug event before clear CPU irq */ - I915_WRITE(SDEIIR, pch_iir); - } - - if (IS_GEN5(dev) && de_iir & DE_PCU_EVENT) - ironlake_handle_rps_change(dev); - - if (IS_GEN6(dev) && pm_iir & GEN6_PM_RPS_EVENTS) - gen6_queue_rps_work(dev_priv, pm_iir); - - I915_WRITE(GTIIR, gt_iir); - I915_WRITE(DEIIR, de_iir); - I915_WRITE(GEN6_PMIIR, pm_iir); - -done: - I915_WRITE(DEIER, de_ier); - POSTING_READ(DEIER); - I915_WRITE(SDEIER, sde_ier); - POSTING_READ(SDEIER); - - return ret; -} - /** * i915_error_work_func - do process context error handling work * @work: work struct @@ -1417,9 +1479,9 @@ static void i915_error_work_func(struct work_struct *work) gpu_error); struct drm_device *dev = dev_priv->dev; struct intel_ring_buffer *ring; - char *error_event[] = { "ERROR=1", NULL }; - char *reset_event[] = { "RESET=1", NULL }; - char *reset_done_event[] = { "ERROR=0", NULL }; + char *error_event[] = { I915_ERROR_UEVENT "=1", NULL }; + char *reset_event[] = { I915_RESET_UEVENT "=1", NULL }; + char *reset_done_event[] = { I915_ERROR_UEVENT "=0", NULL }; int i, ret; kobject_uevent_env(&dev->primary->kdev.kobj, KOBJ_CHANGE, error_event); @@ -1470,535 +1532,6 @@ static void i915_error_work_func(struct work_struct *work) } } -/* NB: please notice the memset */ -static void i915_get_extra_instdone(struct drm_device *dev, - uint32_t *instdone) -{ - struct drm_i915_private *dev_priv = dev->dev_private; - memset(instdone, 0, sizeof(*instdone) * I915_NUM_INSTDONE_REG); - - switch(INTEL_INFO(dev)->gen) { - case 2: - case 3: - instdone[0] = I915_READ(INSTDONE); - break; - case 4: - case 5: - case 6: - instdone[0] = I915_READ(INSTDONE_I965); - instdone[1] = I915_READ(INSTDONE1); - break; - default: - WARN_ONCE(1, "Unsupported platform\n"); - case 7: - instdone[0] = I915_READ(GEN7_INSTDONE_1); - instdone[1] = I915_READ(GEN7_SC_INSTDONE); - instdone[2] = I915_READ(GEN7_SAMPLER_INSTDONE); - instdone[3] = I915_READ(GEN7_ROW_INSTDONE); - break; - } -} - -#ifdef CONFIG_DEBUG_FS -static struct drm_i915_error_object * -i915_error_object_create_sized(struct drm_i915_private *dev_priv, - struct drm_i915_gem_object *src, - const int num_pages) -{ - struct drm_i915_error_object *dst; - int i; - u32 reloc_offset; - - if (src == NULL || src->pages == NULL) - return NULL; - - dst = kmalloc(sizeof(*dst) + num_pages * sizeof(u32 *), GFP_ATOMIC); - if (dst == NULL) - return NULL; - - reloc_offset = src->gtt_offset; - for (i = 0; i < num_pages; i++) { - unsigned long flags; - void *d; - - d = kmalloc(PAGE_SIZE, GFP_ATOMIC); - if (d == NULL) - goto unwind; - - local_irq_save(flags); - if (reloc_offset < dev_priv->gtt.mappable_end && - src->has_global_gtt_mapping) { - void __iomem *s; - - /* Simply ignore tiling or any overlapping fence. - * It's part of the error state, and this hopefully - * captures what the GPU read. - */ - - s = io_mapping_map_atomic_wc(dev_priv->gtt.mappable, - reloc_offset); - memcpy_fromio(d, s, PAGE_SIZE); - io_mapping_unmap_atomic(s); - } else if (src->stolen) { - unsigned long offset; - - offset = dev_priv->mm.stolen_base; - offset += src->stolen->start; - offset += i << PAGE_SHIFT; - - memcpy_fromio(d, (void __iomem *) offset, PAGE_SIZE); - } else { - struct page *page; - void *s; - - page = i915_gem_object_get_page(src, i); - - drm_clflush_pages(&page, 1); - - s = kmap_atomic(page); - memcpy(d, s, PAGE_SIZE); - kunmap_atomic(s); - - drm_clflush_pages(&page, 1); - } - local_irq_restore(flags); - - dst->pages[i] = d; - - reloc_offset += PAGE_SIZE; - } - dst->page_count = num_pages; - dst->gtt_offset = src->gtt_offset; - - return dst; - -unwind: - while (i--) - kfree(dst->pages[i]); - kfree(dst); - return NULL; -} -#define i915_error_object_create(dev_priv, src) \ - i915_error_object_create_sized((dev_priv), (src), \ - (src)->base.size>>PAGE_SHIFT) - -static void -i915_error_object_free(struct drm_i915_error_object *obj) -{ - int page; - - if (obj == NULL) - return; - - for (page = 0; page < obj->page_count; page++) - kfree(obj->pages[page]); - - kfree(obj); -} - -void -i915_error_state_free(struct kref *error_ref) -{ - struct drm_i915_error_state *error = container_of(error_ref, - typeof(*error), ref); - int i; - - for (i = 0; i < ARRAY_SIZE(error->ring); i++) { - i915_error_object_free(error->ring[i].batchbuffer); - i915_error_object_free(error->ring[i].ringbuffer); - i915_error_object_free(error->ring[i].ctx); - kfree(error->ring[i].requests); - } - - kfree(error->active_bo); - kfree(error->overlay); - kfree(error->display); - kfree(error); -} -static void capture_bo(struct drm_i915_error_buffer *err, - struct drm_i915_gem_object *obj) -{ - err->size = obj->base.size; - err->name = obj->base.name; - err->rseqno = obj->last_read_seqno; - err->wseqno = obj->last_write_seqno; - err->gtt_offset = obj->gtt_offset; - err->read_domains = obj->base.read_domains; - err->write_domain = obj->base.write_domain; - err->fence_reg = obj->fence_reg; - err->pinned = 0; - if (obj->pin_count > 0) - err->pinned = 1; - if (obj->user_pin_count > 0) - err->pinned = -1; - err->tiling = obj->tiling_mode; - err->dirty = obj->dirty; - err->purgeable = obj->madv != I915_MADV_WILLNEED; - err->ring = obj->ring ? obj->ring->id : -1; - err->cache_level = obj->cache_level; -} - -static u32 capture_active_bo(struct drm_i915_error_buffer *err, - int count, struct list_head *head) -{ - struct drm_i915_gem_object *obj; - int i = 0; - - list_for_each_entry(obj, head, mm_list) { - capture_bo(err++, obj); - if (++i == count) - break; - } - - return i; -} - -static u32 capture_pinned_bo(struct drm_i915_error_buffer *err, - int count, struct list_head *head) -{ - struct drm_i915_gem_object *obj; - int i = 0; - - list_for_each_entry(obj, head, global_list) { - if (obj->pin_count == 0) - continue; - - capture_bo(err++, obj); - if (++i == count) - break; - } - - return i; -} - -static void i915_gem_record_fences(struct drm_device *dev, - struct drm_i915_error_state *error) -{ - struct drm_i915_private *dev_priv = dev->dev_private; - int i; - - /* Fences */ - switch (INTEL_INFO(dev)->gen) { - case 7: - case 6: - for (i = 0; i < dev_priv->num_fence_regs; i++) - error->fence[i] = I915_READ64(FENCE_REG_SANDYBRIDGE_0 + (i * 8)); - break; - case 5: - case 4: - for (i = 0; i < 16; i++) - error->fence[i] = I915_READ64(FENCE_REG_965_0 + (i * 8)); - break; - case 3: - if (IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev)) - for (i = 0; i < 8; i++) - error->fence[i+8] = I915_READ(FENCE_REG_945_8 + (i * 4)); - case 2: - for (i = 0; i < 8; i++) - error->fence[i] = I915_READ(FENCE_REG_830_0 + (i * 4)); - break; - - default: - BUG(); - } -} - -static struct drm_i915_error_object * -i915_error_first_batchbuffer(struct drm_i915_private *dev_priv, - struct intel_ring_buffer *ring) -{ - struct drm_i915_gem_object *obj; - u32 seqno; - - if (!ring->get_seqno) - return NULL; - - if (HAS_BROKEN_CS_TLB(dev_priv->dev)) { - u32 acthd = I915_READ(ACTHD); - - if (WARN_ON(ring->id != RCS)) - return NULL; - - obj = ring->private; - if (acthd >= obj->gtt_offset && - acthd < obj->gtt_offset + obj->base.size) - return i915_error_object_create(dev_priv, obj); - } - - seqno = ring->get_seqno(ring, false); - list_for_each_entry(obj, &dev_priv->mm.active_list, mm_list) { - if (obj->ring != ring) - continue; - - if (i915_seqno_passed(seqno, obj->last_read_seqno)) - continue; - - if ((obj->base.read_domains & I915_GEM_DOMAIN_COMMAND) == 0) - continue; - - /* We need to copy these to an anonymous buffer as the simplest - * method to avoid being overwritten by userspace. - */ - return i915_error_object_create(dev_priv, obj); - } - - return NULL; -} - -static void i915_record_ring_state(struct drm_device *dev, - struct drm_i915_error_state *error, - struct intel_ring_buffer *ring) -{ - struct drm_i915_private *dev_priv = dev->dev_private; - - if (INTEL_INFO(dev)->gen >= 6) { - error->rc_psmi[ring->id] = I915_READ(ring->mmio_base + 0x50); - error->fault_reg[ring->id] = I915_READ(RING_FAULT_REG(ring)); - error->semaphore_mboxes[ring->id][0] - = I915_READ(RING_SYNC_0(ring->mmio_base)); - error->semaphore_mboxes[ring->id][1] - = I915_READ(RING_SYNC_1(ring->mmio_base)); - error->semaphore_seqno[ring->id][0] = ring->sync_seqno[0]; - error->semaphore_seqno[ring->id][1] = ring->sync_seqno[1]; - } - - if (INTEL_INFO(dev)->gen >= 4) { - error->faddr[ring->id] = I915_READ(RING_DMA_FADD(ring->mmio_base)); - error->ipeir[ring->id] = I915_READ(RING_IPEIR(ring->mmio_base)); - error->ipehr[ring->id] = I915_READ(RING_IPEHR(ring->mmio_base)); - error->instdone[ring->id] = I915_READ(RING_INSTDONE(ring->mmio_base)); - error->instps[ring->id] = I915_READ(RING_INSTPS(ring->mmio_base)); - if (ring->id == RCS) - error->bbaddr = I915_READ64(BB_ADDR); - } else { - error->faddr[ring->id] = I915_READ(DMA_FADD_I8XX); - error->ipeir[ring->id] = I915_READ(IPEIR); - error->ipehr[ring->id] = I915_READ(IPEHR); - error->instdone[ring->id] = I915_READ(INSTDONE); - } - - error->waiting[ring->id] = waitqueue_active(&ring->irq_queue); - error->instpm[ring->id] = I915_READ(RING_INSTPM(ring->mmio_base)); - error->seqno[ring->id] = ring->get_seqno(ring, false); - error->acthd[ring->id] = intel_ring_get_active_head(ring); - error->head[ring->id] = I915_READ_HEAD(ring); - error->tail[ring->id] = I915_READ_TAIL(ring); - error->ctl[ring->id] = I915_READ_CTL(ring); - - error->cpu_ring_head[ring->id] = ring->head; - error->cpu_ring_tail[ring->id] = ring->tail; -} - - -static void i915_gem_record_active_context(struct intel_ring_buffer *ring, - struct drm_i915_error_state *error, - struct drm_i915_error_ring *ering) -{ - struct drm_i915_private *dev_priv = ring->dev->dev_private; - struct drm_i915_gem_object *obj; - - /* Currently render ring is the only HW context user */ - if (ring->id != RCS || !error->ccid) - return; - - list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) { - if ((error->ccid & PAGE_MASK) == obj->gtt_offset) { - ering->ctx = i915_error_object_create_sized(dev_priv, - obj, 1); - } - } -} - -static void i915_gem_record_rings(struct drm_device *dev, - struct drm_i915_error_state *error) -{ - struct drm_i915_private *dev_priv = dev->dev_private; - struct intel_ring_buffer *ring; - struct drm_i915_gem_request *request; - int i, count; - - for_each_ring(ring, dev_priv, i) { - i915_record_ring_state(dev, error, ring); - - error->ring[i].batchbuffer = - i915_error_first_batchbuffer(dev_priv, ring); - - error->ring[i].ringbuffer = - i915_error_object_create(dev_priv, ring->obj); - - - i915_gem_record_active_context(ring, error, &error->ring[i]); - - count = 0; - list_for_each_entry(request, &ring->request_list, list) - count++; - - error->ring[i].num_requests = count; - error->ring[i].requests = - kmalloc(count*sizeof(struct drm_i915_error_request), - GFP_ATOMIC); - if (error->ring[i].requests == NULL) { - error->ring[i].num_requests = 0; - continue; - } - - count = 0; - list_for_each_entry(request, &ring->request_list, list) { - struct drm_i915_error_request *erq; - - erq = &error->ring[i].requests[count++]; - erq->seqno = request->seqno; - erq->jiffies = request->emitted_jiffies; - erq->tail = request->tail; - } - } -} - -/** - * i915_capture_error_state - capture an error record for later analysis - * @dev: drm device - * - * Should be called when an error is detected (either a hang or an error - * interrupt) to capture error state from the time of the error. Fills - * out a structure which becomes available in debugfs for user level tools - * to pick up. - */ -static void i915_capture_error_state(struct drm_device *dev) -{ - struct drm_i915_private *dev_priv = dev->dev_private; - struct drm_i915_gem_object *obj; - struct drm_i915_error_state *error; - unsigned long flags; - int i, pipe; - - spin_lock_irqsave(&dev_priv->gpu_error.lock, flags); - error = dev_priv->gpu_error.first_error; - spin_unlock_irqrestore(&dev_priv->gpu_error.lock, flags); - if (error) - return; - - /* Account for pipe specific data like PIPE*STAT */ - error = kzalloc(sizeof(*error), GFP_ATOMIC); - if (!error) { - DRM_DEBUG_DRIVER("out of memory, not capturing error state\n"); - return; - } - - DRM_INFO("capturing error event; look for more information in " - "/sys/kernel/debug/dri/%d/i915_error_state\n", - dev->primary->index); - - kref_init(&error->ref); - error->eir = I915_READ(EIR); - error->pgtbl_er = I915_READ(PGTBL_ER); - if (HAS_HW_CONTEXTS(dev)) - error->ccid = I915_READ(CCID); - - if (HAS_PCH_SPLIT(dev)) - error->ier = I915_READ(DEIER) | I915_READ(GTIER); - else if (IS_VALLEYVIEW(dev)) - error->ier = I915_READ(GTIER) | I915_READ(VLV_IER); - else if (IS_GEN2(dev)) - error->ier = I915_READ16(IER); - else - error->ier = I915_READ(IER); - - if (INTEL_INFO(dev)->gen >= 6) - error->derrmr = I915_READ(DERRMR); - - if (IS_VALLEYVIEW(dev)) - error->forcewake = I915_READ(FORCEWAKE_VLV); - else if (INTEL_INFO(dev)->gen >= 7) - error->forcewake = I915_READ(FORCEWAKE_MT); - else if (INTEL_INFO(dev)->gen == 6) - error->forcewake = I915_READ(FORCEWAKE); - - if (!HAS_PCH_SPLIT(dev)) - for_each_pipe(pipe) - error->pipestat[pipe] = I915_READ(PIPESTAT(pipe)); - - if (INTEL_INFO(dev)->gen >= 6) { - error->error = I915_READ(ERROR_GEN6); - error->done_reg = I915_READ(DONE_REG); - } - - if (INTEL_INFO(dev)->gen == 7) - error->err_int = I915_READ(GEN7_ERR_INT); - - i915_get_extra_instdone(dev, error->extra_instdone); - - i915_gem_record_fences(dev, error); - i915_gem_record_rings(dev, error); - - /* Record buffers on the active and pinned lists. */ - error->active_bo = NULL; - error->pinned_bo = NULL; - - i = 0; - list_for_each_entry(obj, &dev_priv->mm.active_list, mm_list) - i++; - error->active_bo_count = i; - list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) - if (obj->pin_count) - i++; - error->pinned_bo_count = i - error->active_bo_count; - - error->active_bo = NULL; - error->pinned_bo = NULL; - if (i) { - error->active_bo = kmalloc(sizeof(*error->active_bo)*i, - GFP_ATOMIC); - if (error->active_bo) - error->pinned_bo = - error->active_bo + error->active_bo_count; - } - - if (error->active_bo) - error->active_bo_count = - capture_active_bo(error->active_bo, - error->active_bo_count, - &dev_priv->mm.active_list); - - if (error->pinned_bo) - error->pinned_bo_count = - capture_pinned_bo(error->pinned_bo, - error->pinned_bo_count, - &dev_priv->mm.bound_list); - - do_gettimeofday(&error->time); - - error->overlay = intel_overlay_capture_error_state(dev); - error->display = intel_display_capture_error_state(dev); - - spin_lock_irqsave(&dev_priv->gpu_error.lock, flags); - if (dev_priv->gpu_error.first_error == NULL) { - dev_priv->gpu_error.first_error = error; - error = NULL; - } - spin_unlock_irqrestore(&dev_priv->gpu_error.lock, flags); - - if (error) - i915_error_state_free(&error->ref); -} - -void i915_destroy_error_state(struct drm_device *dev) -{ - struct drm_i915_private *dev_priv = dev->dev_private; - struct drm_i915_error_state *error; - unsigned long flags; - - spin_lock_irqsave(&dev_priv->gpu_error.lock, flags); - error = dev_priv->gpu_error.first_error; - dev_priv->gpu_error.first_error = NULL; - spin_unlock_irqrestore(&dev_priv->gpu_error.lock, flags); - - if (error) - kref_put(&error->ref, i915_error_state_free); -} -#else -#define i915_capture_error_state(x) -#endif - static void i915_report_and_clear_eir(struct drm_device *dev) { struct drm_i915_private *dev_priv = dev->dev_private; @@ -2155,10 +1688,10 @@ static void __always_unused i915_pageflip_stall_check(struct drm_device *dev, in if (INTEL_INFO(dev)->gen >= 4) { int dspsurf = DSPSURF(intel_crtc->plane); stall_detected = I915_HI_DISPBASE(I915_READ(dspsurf)) == - obj->gtt_offset; + i915_gem_obj_ggtt_offset(obj); } else { int dspaddr = DSPADDR(intel_crtc->plane); - stall_detected = I915_READ(dspaddr) == (obj->gtt_offset + + stall_detected = I915_READ(dspaddr) == (i915_gem_obj_ggtt_offset(obj) + crtc->y * crtc->fb->pitches[0] + crtc->x * crtc->fb->bits_per_pixel/8); } @@ -2202,29 +1735,14 @@ static int ironlake_enable_vblank(struct drm_device *dev, int pipe) { drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; unsigned long irqflags; + uint32_t bit = (INTEL_INFO(dev)->gen >= 7) ? DE_PIPE_VBLANK_IVB(pipe) : + DE_PIPE_VBLANK_ILK(pipe); if (!i915_pipe_enabled(dev, pipe)) return -EINVAL; spin_lock_irqsave(&dev_priv->irq_lock, irqflags); - ironlake_enable_display_irq(dev_priv, (pipe == 0) ? - DE_PIPEA_VBLANK : DE_PIPEB_VBLANK); - spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); - - return 0; -} - -static int ivybridge_enable_vblank(struct drm_device *dev, int pipe) -{ - drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; - unsigned long irqflags; - - if (!i915_pipe_enabled(dev, pipe)) - return -EINVAL; - - spin_lock_irqsave(&dev_priv->irq_lock, irqflags); - ironlake_enable_display_irq(dev_priv, - DE_PIPEA_VBLANK_IVB << (5 * pipe)); + ironlake_enable_display_irq(dev_priv, bit); spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); return 0; @@ -2275,21 +1793,11 @@ static void ironlake_disable_vblank(struct drm_device *dev, int pipe) { drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; unsigned long irqflags; + uint32_t bit = (INTEL_INFO(dev)->gen >= 7) ? DE_PIPE_VBLANK_IVB(pipe) : + DE_PIPE_VBLANK_ILK(pipe); spin_lock_irqsave(&dev_priv->irq_lock, irqflags); - ironlake_disable_display_irq(dev_priv, (pipe == 0) ? - DE_PIPEA_VBLANK : DE_PIPEB_VBLANK); - spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); -} - -static void ivybridge_disable_vblank(struct drm_device *dev, int pipe) -{ - drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; - unsigned long irqflags; - - spin_lock_irqsave(&dev_priv->irq_lock, irqflags); - ironlake_disable_display_irq(dev_priv, - DE_PIPEA_VBLANK_IVB << (pipe * 5)); + ironlake_disable_display_irq(dev_priv, bit); spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); } @@ -2392,10 +1900,10 @@ ring_stuck(struct intel_ring_buffer *ring, u32 acthd) u32 tmp; if (ring->hangcheck.acthd != acthd) - return active; + return HANGCHECK_ACTIVE; if (IS_GEN2(dev)) - return hung; + return HANGCHECK_HUNG; /* Is the chip hanging on a WAIT_FOR_EVENT? * If so we can simply poke the RB_WAIT bit @@ -2407,24 +1915,24 @@ ring_stuck(struct intel_ring_buffer *ring, u32 acthd) DRM_ERROR("Kicking stuck wait on %s\n", ring->name); I915_WRITE_CTL(ring, tmp); - return kick; + return HANGCHECK_KICK; } if (INTEL_INFO(dev)->gen >= 6 && tmp & RING_WAIT_SEMAPHORE) { switch (semaphore_passed(ring)) { default: - return hung; + return HANGCHECK_HUNG; case 1: DRM_ERROR("Kicking stuck semaphore on %s\n", ring->name); I915_WRITE_CTL(ring, tmp); - return kick; + return HANGCHECK_KICK; case 0: - return wait; + return HANGCHECK_WAIT; } } - return hung; + return HANGCHECK_HUNG; } /** @@ -2435,7 +1943,7 @@ ring_stuck(struct intel_ring_buffer *ring, u32 acthd) * we kick the ring. If we see no progress on three subsequent calls * we assume chip is wedged and try to fix it by resetting the chip. */ -void i915_hangcheck_elapsed(unsigned long data) +static void i915_hangcheck_elapsed(unsigned long data) { struct drm_device *dev = (struct drm_device *)data; drm_i915_private_t *dev_priv = dev->dev_private; @@ -2471,8 +1979,6 @@ void i915_hangcheck_elapsed(unsigned long data) } else busy = false; } else { - int score; - /* We always increment the hangcheck score * if the ring is busy and still processing * the same request, so that no single request @@ -2492,21 +1998,19 @@ void i915_hangcheck_elapsed(unsigned long data) acthd); switch (ring->hangcheck.action) { - case wait: - score = 0; + case HANGCHECK_WAIT: break; - case active: - score = BUSY; + case HANGCHECK_ACTIVE: + ring->hangcheck.score += BUSY; break; - case kick: - score = KICK; + case HANGCHECK_KICK: + ring->hangcheck.score += KICK; break; - case hung: - score = HUNG; + case HANGCHECK_HUNG: + ring->hangcheck.score += HUNG; stuck[i] = true; break; } - ring->hangcheck.score += score; } } else { /* Gradually reduce the count so that we catch DoS @@ -2536,9 +2040,17 @@ void i915_hangcheck_elapsed(unsigned long data) if (busy_count) /* Reset timer case chip hangs without another request * being added */ - mod_timer(&dev_priv->gpu_error.hangcheck_timer, - round_jiffies_up(jiffies + - DRM_I915_HANGCHECK_JIFFIES)); + i915_queue_hangcheck(dev); +} + +void i915_queue_hangcheck(struct drm_device *dev) +{ + struct drm_i915_private *dev_priv = dev->dev_private; + if (!i915_enable_hangcheck) + return; + + mod_timer(&dev_priv->gpu_error.hangcheck_timer, + round_jiffies_up(jiffies + DRM_I915_HANGCHECK_JIFFIES)); } static void ibx_irq_preinstall(struct drm_device *dev) @@ -2560,31 +2072,26 @@ static void ibx_irq_preinstall(struct drm_device *dev) POSTING_READ(SDEIER); } -/* drm_dma.h hooks -*/ -static void ironlake_irq_preinstall(struct drm_device *dev) +static void gen5_gt_irq_preinstall(struct drm_device *dev) { - drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; - - atomic_set(&dev_priv->irq_received, 0); - - I915_WRITE(HWSTAM, 0xeffe); - - /* XXX hotplug from PCH */ - - I915_WRITE(DEIMR, 0xffffffff); - I915_WRITE(DEIER, 0x0); - POSTING_READ(DEIER); + struct drm_i915_private *dev_priv = dev->dev_private; /* and GT */ I915_WRITE(GTIMR, 0xffffffff); I915_WRITE(GTIER, 0x0); POSTING_READ(GTIER); - ibx_irq_preinstall(dev); + if (INTEL_INFO(dev)->gen >= 6) { + /* and PM */ + I915_WRITE(GEN6_PMIMR, 0xffffffff); + I915_WRITE(GEN6_PMIER, 0x0); + POSTING_READ(GEN6_PMIER); + } } -static void ivybridge_irq_preinstall(struct drm_device *dev) +/* drm_dma.h hooks +*/ +static void ironlake_irq_preinstall(struct drm_device *dev) { drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; @@ -2592,21 +2099,11 @@ static void ivybridge_irq_preinstall(struct drm_device *dev) I915_WRITE(HWSTAM, 0xeffe); - /* XXX hotplug from PCH */ - I915_WRITE(DEIMR, 0xffffffff); I915_WRITE(DEIER, 0x0); POSTING_READ(DEIER); - /* and GT */ - I915_WRITE(GTIMR, 0xffffffff); - I915_WRITE(GTIER, 0x0); - POSTING_READ(GTIER); - - /* Power management */ - I915_WRITE(GEN6_PMIMR, 0xffffffff); - I915_WRITE(GEN6_PMIER, 0x0); - POSTING_READ(GEN6_PMIER); + gen5_gt_irq_preinstall(dev); ibx_irq_preinstall(dev); } @@ -2627,9 +2124,8 @@ static void valleyview_irq_preinstall(struct drm_device *dev) /* and GT */ I915_WRITE(GTIIR, I915_READ(GTIIR)); I915_WRITE(GTIIR, I915_READ(GTIIR)); - I915_WRITE(GTIMR, 0xffffffff); - I915_WRITE(GTIER, 0x0); - POSTING_READ(GTIER); + + gen5_gt_irq_preinstall(dev); I915_WRITE(DPINVGTT, 0xff); @@ -2648,22 +2144,21 @@ static void ibx_hpd_irq_setup(struct drm_device *dev) drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; struct drm_mode_config *mode_config = &dev->mode_config; struct intel_encoder *intel_encoder; - u32 mask = ~I915_READ(SDEIMR); - u32 hotplug; + u32 hotplug_irqs, hotplug, enabled_irqs = 0; if (HAS_PCH_IBX(dev)) { - mask &= ~SDE_HOTPLUG_MASK; + hotplug_irqs = SDE_HOTPLUG_MASK; list_for_each_entry(intel_encoder, &mode_config->encoder_list, base.head) if (dev_priv->hpd_stats[intel_encoder->hpd_pin].hpd_mark == HPD_ENABLED) - mask |= hpd_ibx[intel_encoder->hpd_pin]; + enabled_irqs |= hpd_ibx[intel_encoder->hpd_pin]; } else { - mask &= ~SDE_HOTPLUG_MASK_CPT; + hotplug_irqs = SDE_HOTPLUG_MASK_CPT; list_for_each_entry(intel_encoder, &mode_config->encoder_list, base.head) if (dev_priv->hpd_stats[intel_encoder->hpd_pin].hpd_mark == HPD_ENABLED) - mask |= hpd_cpt[intel_encoder->hpd_pin]; + enabled_irqs |= hpd_cpt[intel_encoder->hpd_pin]; } - I915_WRITE(SDEIMR, ~mask); + ibx_display_interrupt_update(dev_priv, hotplug_irqs, enabled_irqs); /* * Enable digital hotplug on the PCH, and configure the DP short pulse @@ -2700,123 +2195,103 @@ static void ibx_irq_postinstall(struct drm_device *dev) I915_WRITE(SDEIMR, ~mask); } -static int ironlake_irq_postinstall(struct drm_device *dev) +static void gen5_gt_irq_postinstall(struct drm_device *dev) { - unsigned long irqflags; - - drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; - /* enable kind of interrupts always enabled */ - u32 display_mask = DE_MASTER_IRQ_CONTROL | DE_GSE | DE_PCH_EVENT | - DE_PLANEA_FLIP_DONE | DE_PLANEB_FLIP_DONE | - DE_AUX_CHANNEL_A | DE_PIPEB_FIFO_UNDERRUN | - DE_PIPEA_FIFO_UNDERRUN | DE_POISON; - u32 gt_irqs; - - dev_priv->irq_mask = ~display_mask; + struct drm_i915_private *dev_priv = dev->dev_private; + u32 pm_irqs, gt_irqs; - /* should always can generate irq */ - I915_WRITE(DEIIR, I915_READ(DEIIR)); - I915_WRITE(DEIMR, dev_priv->irq_mask); - I915_WRITE(DEIER, display_mask | - DE_PIPEA_VBLANK | DE_PIPEB_VBLANK | DE_PCU_EVENT); - POSTING_READ(DEIER); + pm_irqs = gt_irqs = 0; dev_priv->gt_irq_mask = ~0; + if (HAS_L3_GPU_CACHE(dev)) { + /* L3 parity interrupt is always unmasked. */ + dev_priv->gt_irq_mask = ~GT_RENDER_L3_PARITY_ERROR_INTERRUPT; + gt_irqs |= GT_RENDER_L3_PARITY_ERROR_INTERRUPT; + } - I915_WRITE(GTIIR, I915_READ(GTIIR)); - I915_WRITE(GTIMR, dev_priv->gt_irq_mask); - - gt_irqs = GT_RENDER_USER_INTERRUPT; - - if (IS_GEN6(dev)) - gt_irqs |= GT_BLT_USER_INTERRUPT | GT_BSD_USER_INTERRUPT; - else + gt_irqs |= GT_RENDER_USER_INTERRUPT; + if (IS_GEN5(dev)) { gt_irqs |= GT_RENDER_PIPECTL_NOTIFY_INTERRUPT | ILK_BSD_USER_INTERRUPT; + } else { + gt_irqs |= GT_BLT_USER_INTERRUPT | GT_BSD_USER_INTERRUPT; + } + I915_WRITE(GTIIR, I915_READ(GTIIR)); + I915_WRITE(GTIMR, dev_priv->gt_irq_mask); I915_WRITE(GTIER, gt_irqs); POSTING_READ(GTIER); - ibx_irq_postinstall(dev); + if (INTEL_INFO(dev)->gen >= 6) { + pm_irqs |= GEN6_PM_RPS_EVENTS; - if (IS_IRONLAKE_M(dev)) { - /* Enable PCU event interrupts - * - * spinlocking not required here for correctness since interrupt - * setup is guaranteed to run in single-threaded context. But we - * need it to make the assert_spin_locked happy. */ - spin_lock_irqsave(&dev_priv->irq_lock, irqflags); - ironlake_enable_display_irq(dev_priv, DE_PCU_EVENT); - spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); - } + if (HAS_VEBOX(dev)) + pm_irqs |= PM_VEBOX_USER_INTERRUPT; - return 0; + dev_priv->pm_irq_mask = 0xffffffff; + I915_WRITE(GEN6_PMIIR, I915_READ(GEN6_PMIIR)); + I915_WRITE(GEN6_PMIMR, dev_priv->pm_irq_mask); + I915_WRITE(GEN6_PMIER, pm_irqs); + POSTING_READ(GEN6_PMIER); + } } -static int ivybridge_irq_postinstall(struct drm_device *dev) +static int ironlake_irq_postinstall(struct drm_device *dev) { + unsigned long irqflags; drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; - /* enable kind of interrupts always enabled */ - u32 display_mask = - DE_MASTER_IRQ_CONTROL | DE_GSE_IVB | DE_PCH_EVENT_IVB | - DE_PLANEC_FLIP_DONE_IVB | - DE_PLANEB_FLIP_DONE_IVB | - DE_PLANEA_FLIP_DONE_IVB | - DE_AUX_CHANNEL_A_IVB | - DE_ERR_INT_IVB; - u32 pm_irqs = GEN6_PM_RPS_EVENTS; - u32 gt_irqs; + u32 display_mask, extra_mask; + + if (INTEL_INFO(dev)->gen >= 7) { + display_mask = (DE_MASTER_IRQ_CONTROL | DE_GSE_IVB | + DE_PCH_EVENT_IVB | DE_PLANEC_FLIP_DONE_IVB | + DE_PLANEB_FLIP_DONE_IVB | + DE_PLANEA_FLIP_DONE_IVB | DE_AUX_CHANNEL_A_IVB | + DE_ERR_INT_IVB); + extra_mask = (DE_PIPEC_VBLANK_IVB | DE_PIPEB_VBLANK_IVB | + DE_PIPEA_VBLANK_IVB); + + I915_WRITE(GEN7_ERR_INT, I915_READ(GEN7_ERR_INT)); + } else { + display_mask = (DE_MASTER_IRQ_CONTROL | DE_GSE | DE_PCH_EVENT | + DE_PLANEA_FLIP_DONE | DE_PLANEB_FLIP_DONE | + DE_AUX_CHANNEL_A | DE_PIPEB_FIFO_UNDERRUN | + DE_PIPEA_FIFO_UNDERRUN | DE_POISON); + extra_mask = DE_PIPEA_VBLANK | DE_PIPEB_VBLANK | DE_PCU_EVENT; + } dev_priv->irq_mask = ~display_mask; /* should always can generate irq */ - I915_WRITE(GEN7_ERR_INT, I915_READ(GEN7_ERR_INT)); I915_WRITE(DEIIR, I915_READ(DEIIR)); I915_WRITE(DEIMR, dev_priv->irq_mask); - I915_WRITE(DEIER, - display_mask | - DE_PIPEC_VBLANK_IVB | - DE_PIPEB_VBLANK_IVB | - DE_PIPEA_VBLANK_IVB); + I915_WRITE(DEIER, display_mask | extra_mask); POSTING_READ(DEIER); - dev_priv->gt_irq_mask = ~GT_RENDER_L3_PARITY_ERROR_INTERRUPT; - - I915_WRITE(GTIIR, I915_READ(GTIIR)); - I915_WRITE(GTIMR, dev_priv->gt_irq_mask); - - gt_irqs = GT_RENDER_USER_INTERRUPT | GT_BSD_USER_INTERRUPT | - GT_BLT_USER_INTERRUPT | GT_RENDER_L3_PARITY_ERROR_INTERRUPT; - I915_WRITE(GTIER, gt_irqs); - POSTING_READ(GTIER); - - I915_WRITE(GEN6_PMIIR, I915_READ(GEN6_PMIIR)); - if (HAS_VEBOX(dev)) - pm_irqs |= PM_VEBOX_USER_INTERRUPT | - PM_VEBOX_CS_ERROR_INTERRUPT; - - /* Our enable/disable rps functions may touch these registers so - * make sure to set a known state for only the non-RPS bits. - * The RMW is extra paranoia since this should be called after being set - * to a known state in preinstall. - * */ - I915_WRITE(GEN6_PMIMR, - (I915_READ(GEN6_PMIMR) | ~GEN6_PM_RPS_EVENTS) & ~pm_irqs); - I915_WRITE(GEN6_PMIER, - (I915_READ(GEN6_PMIER) & GEN6_PM_RPS_EVENTS) | pm_irqs); - POSTING_READ(GEN6_PMIER); + gen5_gt_irq_postinstall(dev); ibx_irq_postinstall(dev); + if (IS_IRONLAKE_M(dev)) { + /* Enable PCU event interrupts + * + * spinlocking not required here for correctness since interrupt + * setup is guaranteed to run in single-threaded context. But we + * need it to make the assert_spin_locked happy. */ + spin_lock_irqsave(&dev_priv->irq_lock, irqflags); + ironlake_enable_display_irq(dev_priv, DE_PCU_EVENT); + spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); + } + return 0; } static int valleyview_irq_postinstall(struct drm_device *dev) { drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; - u32 gt_irqs; u32 enable_mask; u32 pipestat_enable = PLANE_FLIP_DONE_INT_EN_VLV; + unsigned long irqflags; enable_mask = I915_DISPLAY_PORT_INTERRUPT; enable_mask |= I915_DISPLAY_PIPE_A_EVENT_INTERRUPT | @@ -2842,20 +2317,18 @@ static int valleyview_irq_postinstall(struct drm_device *dev) I915_WRITE(PIPESTAT(1), 0xffff); POSTING_READ(VLV_IER); + /* Interrupt setup is already guaranteed to be single-threaded, this is + * just to make the assert_spin_locked check happy. */ + spin_lock_irqsave(&dev_priv->irq_lock, irqflags); i915_enable_pipestat(dev_priv, 0, pipestat_enable); i915_enable_pipestat(dev_priv, 0, PIPE_GMBUS_EVENT_ENABLE); i915_enable_pipestat(dev_priv, 1, pipestat_enable); + spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); I915_WRITE(VLV_IIR, 0xffffffff); I915_WRITE(VLV_IIR, 0xffffffff); - I915_WRITE(GTIIR, I915_READ(GTIIR)); - I915_WRITE(GTIMR, dev_priv->gt_irq_mask); - - gt_irqs = GT_RENDER_USER_INTERRUPT | GT_BSD_USER_INTERRUPT | - GT_BLT_USER_INTERRUPT; - I915_WRITE(GTIER, gt_irqs); - POSTING_READ(GTIER); + gen5_gt_irq_postinstall(dev); /* ack & enable invalid PTE error interrupts */ #if 0 /* FIXME: add support to irq handler for checking these bits */ @@ -3001,7 +2474,6 @@ static irqreturn_t i8xx_irq_handler(int irq, void *arg) u16 iir, new_iir; u32 pipe_stats[2]; unsigned long irqflags; - int irq_received; int pipe; u16 flip_mask = I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT | @@ -3035,7 +2507,6 @@ static irqreturn_t i8xx_irq_handler(int irq, void *arg) DRM_DEBUG_DRIVER("pipe %c underrun\n", pipe_name(pipe)); I915_WRITE(reg, pipe_stats[pipe]); - irq_received = 1; } } spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); @@ -3323,6 +2794,7 @@ static int i965_irq_postinstall(struct drm_device *dev) drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; u32 enable_mask; u32 error_mask; + unsigned long irqflags; /* Unmask the interrupts that we always want on. */ dev_priv->irq_mask = ~(I915_ASLE_INTERRUPT | @@ -3341,7 +2813,11 @@ static int i965_irq_postinstall(struct drm_device *dev) if (IS_G4X(dev)) enable_mask |= I915_BSD_USER_INTERRUPT; + /* Interrupt setup is already guaranteed to be single-threaded, this is + * just to make the assert_spin_locked check happy. */ + spin_lock_irqsave(&dev_priv->irq_lock, irqflags); i915_enable_pipestat(dev_priv, 0, PIPE_GMBUS_EVENT_ENABLE); + spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); /* * Enable some error detection, note the instruction error mask @@ -3616,15 +3092,6 @@ void intel_irq_init(struct drm_device *dev) dev->driver->enable_vblank = valleyview_enable_vblank; dev->driver->disable_vblank = valleyview_disable_vblank; dev_priv->display.hpd_irq_setup = i915_hpd_irq_setup; - } else if (IS_IVYBRIDGE(dev) || IS_HASWELL(dev)) { - /* Share uninstall handlers with ILK/SNB */ - dev->driver->irq_handler = ivybridge_irq_handler; - dev->driver->irq_preinstall = ivybridge_irq_preinstall; - dev->driver->irq_postinstall = ivybridge_irq_postinstall; - dev->driver->irq_uninstall = ironlake_irq_uninstall; - dev->driver->enable_vblank = ivybridge_enable_vblank; - dev->driver->disable_vblank = ivybridge_disable_vblank; - dev_priv->display.hpd_irq_setup = ibx_hpd_irq_setup; } else if (HAS_PCH_SPLIT(dev)) { dev->driver->irq_handler = ironlake_irq_handler; dev->driver->irq_preinstall = ironlake_irq_preinstall; @@ -3683,3 +3150,67 @@ void intel_hpd_init(struct drm_device *dev) dev_priv->display.hpd_irq_setup(dev); spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); } + +/* Disable interrupts so we can allow Package C8+. */ +void hsw_pc8_disable_interrupts(struct drm_device *dev) +{ + struct drm_i915_private *dev_priv = dev->dev_private; + unsigned long irqflags; + + spin_lock_irqsave(&dev_priv->irq_lock, irqflags); + + dev_priv->pc8.regsave.deimr = I915_READ(DEIMR); + dev_priv->pc8.regsave.sdeimr = I915_READ(SDEIMR); + dev_priv->pc8.regsave.gtimr = I915_READ(GTIMR); + dev_priv->pc8.regsave.gtier = I915_READ(GTIER); + dev_priv->pc8.regsave.gen6_pmimr = I915_READ(GEN6_PMIMR); + + ironlake_disable_display_irq(dev_priv, ~DE_PCH_EVENT_IVB); + ibx_disable_display_interrupt(dev_priv, ~SDE_HOTPLUG_MASK_CPT); + ilk_disable_gt_irq(dev_priv, 0xffffffff); + snb_disable_pm_irq(dev_priv, 0xffffffff); + + dev_priv->pc8.irqs_disabled = true; + + spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); +} + +/* Restore interrupts so we can recover from Package C8+. */ +void hsw_pc8_restore_interrupts(struct drm_device *dev) +{ + struct drm_i915_private *dev_priv = dev->dev_private; + unsigned long irqflags; + uint32_t val, expected; + + spin_lock_irqsave(&dev_priv->irq_lock, irqflags); + + val = I915_READ(DEIMR); + expected = ~DE_PCH_EVENT_IVB; + WARN(val != expected, "DEIMR is 0x%08x, not 0x%08x\n", val, expected); + + val = I915_READ(SDEIMR) & ~SDE_HOTPLUG_MASK_CPT; + expected = ~SDE_HOTPLUG_MASK_CPT; + WARN(val != expected, "SDEIMR non-HPD bits are 0x%08x, not 0x%08x\n", + val, expected); + + val = I915_READ(GTIMR); + expected = 0xffffffff; + WARN(val != expected, "GTIMR is 0x%08x, not 0x%08x\n", val, expected); + + val = I915_READ(GEN6_PMIMR); + expected = 0xffffffff; + WARN(val != expected, "GEN6_PMIMR is 0x%08x, not 0x%08x\n", val, + expected); + + dev_priv->pc8.irqs_disabled = false; + + ironlake_enable_display_irq(dev_priv, ~dev_priv->pc8.regsave.deimr); + ibx_enable_display_interrupt(dev_priv, + ~dev_priv->pc8.regsave.sdeimr & + ~SDE_HOTPLUG_MASK_CPT); + ilk_enable_gt_irq(dev_priv, ~dev_priv->pc8.regsave.gtimr); + snb_enable_pm_irq(dev_priv, ~dev_priv->pc8.regsave.gen6_pmimr); + I915_WRITE(GTIER, dev_priv->pc8.regsave.gtier); + + spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); +} diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h index 342f1f336168..b6a58f720f9a 100644 --- a/drivers/gpu/drm/i915/i915_reg.h +++ b/drivers/gpu/drm/i915/i915_reg.h @@ -61,6 +61,12 @@ #define GC_LOW_FREQUENCY_ENABLE (1 << 7) #define GC_DISPLAY_CLOCK_190_200_MHZ (0 << 4) #define GC_DISPLAY_CLOCK_333_MHZ (4 << 4) +#define GC_DISPLAY_CLOCK_267_MHZ_PNV (0 << 4) +#define GC_DISPLAY_CLOCK_333_MHZ_PNV (1 << 4) +#define GC_DISPLAY_CLOCK_444_MHZ_PNV (2 << 4) +#define GC_DISPLAY_CLOCK_200_MHZ_PNV (5 << 4) +#define GC_DISPLAY_CLOCK_133_MHZ_PNV (6 << 4) +#define GC_DISPLAY_CLOCK_167_MHZ_PNV (7 << 4) #define GC_DISPLAY_CLOCK_MASK (7 << 4) #define GM45_GC_RENDER_CLOCK_MASK (0xf << 0) #define GM45_GC_RENDER_CLOCK_266_MHZ (8 << 0) @@ -363,6 +369,7 @@ #define PUNIT_REG_GPU_LFM 0xd3 #define PUNIT_REG_GPU_FREQ_REQ 0xd4 #define PUNIT_REG_GPU_FREQ_STS 0xd8 +#define GENFREQSTATUS (1<<0) #define PUNIT_REG_MEDIA_TURBO_FREQ_REQ 0xdc #define PUNIT_FUSE_BUS2 0xf6 /* bits 47:40 */ @@ -680,6 +687,7 @@ #define ERR_INT_FIFO_UNDERRUN_C (1<<6) #define ERR_INT_FIFO_UNDERRUN_B (1<<3) #define ERR_INT_FIFO_UNDERRUN_A (1<<0) +#define ERR_INT_FIFO_UNDERRUN(pipe) (1<<(pipe*3)) #define FPGA_DBG 0x42300 #define FPGA_DBG_RM_NOCLAIM (1<<31) @@ -1127,7 +1135,8 @@ #define _DPLL_B (dev_priv->info->display_mmio_offset + 0x6018) #define DPLL(pipe) _PIPE(pipe, _DPLL_A, _DPLL_B) #define DPLL_VCO_ENABLE (1 << 31) -#define DPLL_DVO_HIGH_SPEED (1 << 30) +#define DPLL_SDVO_HIGH_SPEED (1 << 30) +#define DPLL_DVO_2X_MODE (1 << 30) #define DPLL_EXT_BUFFER_ENABLE_VLV (1 << 30) #define DPLL_SYNCLOCK_ENABLE (1 << 29) #define DPLL_REFA_CLK_ENABLE_VLV (1 << 29) @@ -1440,6 +1449,8 @@ #define MCH_SSKPD_WM0_MASK 0x3f #define MCH_SSKPD_WM0_VAL 0xc +#define MCH_SECP_NRG_STTS (MCHBAR_MIRROR_BASE_SNB + 0x592c) + /* Clocking configuration register */ #define CLKCFG 0x10c00 #define CLKCFG_FSB_400 (5 << 0) /* hrawclk 100 */ @@ -1696,15 +1707,26 @@ */ #define CCID 0x2180 #define CCID_EN (1<<0) +/* + * Notes on SNB/IVB/VLV context size: + * - Power context is saved elsewhere (LLC or stolen) + * - Ring/execlist context is saved on SNB, not on IVB + * - Extended context size already includes render context size + * - We always need to follow the extended context size. + * SNB BSpec has comments indicating that we should use the + * render context size instead if execlists are disabled, but + * based on empirical testing that's just nonsense. + * - Pipelined/VF state is saved on SNB/IVB respectively + * - GT1 size just indicates how much of render context + * doesn't need saving on GT1 + */ #define CXT_SIZE 0x21a0 #define GEN6_CXT_POWER_SIZE(cxt_reg) ((cxt_reg >> 24) & 0x3f) #define GEN6_CXT_RING_SIZE(cxt_reg) ((cxt_reg >> 18) & 0x3f) #define GEN6_CXT_RENDER_SIZE(cxt_reg) ((cxt_reg >> 12) & 0x3f) #define GEN6_CXT_EXTENDED_SIZE(cxt_reg) ((cxt_reg >> 6) & 0x3f) #define GEN6_CXT_PIPELINE_SIZE(cxt_reg) ((cxt_reg >> 0) & 0x3f) -#define GEN6_CXT_TOTAL_SIZE(cxt_reg) (GEN6_CXT_POWER_SIZE(cxt_reg) + \ - GEN6_CXT_RING_SIZE(cxt_reg) + \ - GEN6_CXT_RENDER_SIZE(cxt_reg) + \ +#define GEN6_CXT_TOTAL_SIZE(cxt_reg) (GEN6_CXT_RING_SIZE(cxt_reg) + \ GEN6_CXT_EXTENDED_SIZE(cxt_reg) + \ GEN6_CXT_PIPELINE_SIZE(cxt_reg)) #define GEN7_CXT_SIZE 0x21a8 @@ -1714,11 +1736,7 @@ #define GEN7_CXT_EXTENDED_SIZE(ctx_reg) ((ctx_reg >> 9) & 0x7f) #define GEN7_CXT_GT1_SIZE(ctx_reg) ((ctx_reg >> 6) & 0x7) #define GEN7_CXT_VFSTATE_SIZE(ctx_reg) ((ctx_reg >> 0) & 0x3f) -#define GEN7_CXT_TOTAL_SIZE(ctx_reg) (GEN7_CXT_POWER_SIZE(ctx_reg) + \ - GEN7_CXT_RING_SIZE(ctx_reg) + \ - GEN7_CXT_RENDER_SIZE(ctx_reg) + \ - GEN7_CXT_EXTENDED_SIZE(ctx_reg) + \ - GEN7_CXT_GT1_SIZE(ctx_reg) + \ +#define GEN7_CXT_TOTAL_SIZE(ctx_reg) (GEN7_CXT_EXTENDED_SIZE(ctx_reg) + \ GEN7_CXT_VFSTATE_SIZE(ctx_reg)) /* Haswell does have the CXT_SIZE register however it does not appear to be * valid. Now, docs explain in dwords what is in the context object. The full @@ -1778,6 +1796,71 @@ #define BCLRPAT(pipe) _PIPE(pipe, _BCLRPAT_A, _BCLRPAT_B) #define VSYNCSHIFT(trans) _TRANSCODER(trans, _VSYNCSHIFT_A, _VSYNCSHIFT_B) +/* HSW eDP PSR registers */ +#define EDP_PSR_CTL 0x64800 +#define EDP_PSR_ENABLE (1<<31) +#define EDP_PSR_LINK_DISABLE (0<<27) +#define EDP_PSR_LINK_STANDBY (1<<27) +#define EDP_PSR_MIN_LINK_ENTRY_TIME_MASK (3<<25) +#define EDP_PSR_MIN_LINK_ENTRY_TIME_8_LINES (0<<25) +#define EDP_PSR_MIN_LINK_ENTRY_TIME_4_LINES (1<<25) +#define EDP_PSR_MIN_LINK_ENTRY_TIME_2_LINES (2<<25) +#define EDP_PSR_MIN_LINK_ENTRY_TIME_0_LINES (3<<25) +#define EDP_PSR_MAX_SLEEP_TIME_SHIFT 20 +#define EDP_PSR_SKIP_AUX_EXIT (1<<12) +#define EDP_PSR_TP1_TP2_SEL (0<<11) +#define EDP_PSR_TP1_TP3_SEL (1<<11) +#define EDP_PSR_TP2_TP3_TIME_500us (0<<8) +#define EDP_PSR_TP2_TP3_TIME_100us (1<<8) +#define EDP_PSR_TP2_TP3_TIME_2500us (2<<8) +#define EDP_PSR_TP2_TP3_TIME_0us (3<<8) +#define EDP_PSR_TP1_TIME_500us (0<<4) +#define EDP_PSR_TP1_TIME_100us (1<<4) +#define EDP_PSR_TP1_TIME_2500us (2<<4) +#define EDP_PSR_TP1_TIME_0us (3<<4) +#define EDP_PSR_IDLE_FRAME_SHIFT 0 + +#define EDP_PSR_AUX_CTL 0x64810 +#define EDP_PSR_AUX_DATA1 0x64814 +#define EDP_PSR_DPCD_COMMAND 0x80060000 +#define EDP_PSR_AUX_DATA2 0x64818 +#define EDP_PSR_DPCD_NORMAL_OPERATION (1<<24) +#define EDP_PSR_AUX_DATA3 0x6481c +#define EDP_PSR_AUX_DATA4 0x64820 +#define EDP_PSR_AUX_DATA5 0x64824 + +#define EDP_PSR_STATUS_CTL 0x64840 +#define EDP_PSR_STATUS_STATE_MASK (7<<29) +#define EDP_PSR_STATUS_STATE_IDLE (0<<29) +#define EDP_PSR_STATUS_STATE_SRDONACK (1<<29) +#define EDP_PSR_STATUS_STATE_SRDENT (2<<29) +#define EDP_PSR_STATUS_STATE_BUFOFF (3<<29) +#define EDP_PSR_STATUS_STATE_BUFON (4<<29) +#define EDP_PSR_STATUS_STATE_AUXACK (5<<29) +#define EDP_PSR_STATUS_STATE_SRDOFFACK (6<<29) +#define EDP_PSR_STATUS_LINK_MASK (3<<26) +#define EDP_PSR_STATUS_LINK_FULL_OFF (0<<26) +#define EDP_PSR_STATUS_LINK_FULL_ON (1<<26) +#define EDP_PSR_STATUS_LINK_STANDBY (2<<26) +#define EDP_PSR_STATUS_MAX_SLEEP_TIMER_SHIFT 20 +#define EDP_PSR_STATUS_MAX_SLEEP_TIMER_MASK 0x1f +#define EDP_PSR_STATUS_COUNT_SHIFT 16 +#define EDP_PSR_STATUS_COUNT_MASK 0xf +#define EDP_PSR_STATUS_AUX_ERROR (1<<15) +#define EDP_PSR_STATUS_AUX_SENDING (1<<12) +#define EDP_PSR_STATUS_SENDING_IDLE (1<<9) +#define EDP_PSR_STATUS_SENDING_TP2_TP3 (1<<8) +#define EDP_PSR_STATUS_SENDING_TP1 (1<<4) +#define EDP_PSR_STATUS_IDLE_MASK 0xf + +#define EDP_PSR_PERF_CNT 0x64844 +#define EDP_PSR_PERF_CNT_MASK 0xffffff + +#define EDP_PSR_DEBUG_CTL 0x64860 +#define EDP_PSR_DEBUG_MASK_LPSP (1<<27) +#define EDP_PSR_DEBUG_MASK_MEMUP (1<<26) +#define EDP_PSR_DEBUG_MASK_HPD (1<<25) + /* VGA port control */ #define ADPA 0x61100 #define PCH_ADPA 0xe1100 @@ -2053,6 +2136,7 @@ * (Haswell and newer) to see which VIDEO_DIP_DATA byte corresponds to each byte * of the infoframe structure specified by CEA-861. */ #define VIDEO_DIP_DATA_SIZE 32 +#define VIDEO_DIP_VSC_DATA_SIZE 36 #define VIDEO_DIP_CTL 0x61170 /* Pre HSW: */ #define VIDEO_DIP_ENABLE (1 << 31) @@ -2200,6 +2284,8 @@ #define BLC_PWM_CPU_CTL2 0x48250 #define BLC_PWM_CPU_CTL 0x48254 +#define HSW_BLC_PWM2_CTL 0x48350 + /* PCH CTL1 is totally different, all but the below bits are reserved. CTL2 is * like the normal CTL from gen4 and earlier. Hooray for confusing naming. */ #define BLC_PWM_PCH_CTL1 0xc8250 @@ -2208,6 +2294,12 @@ #define BLM_PCH_POLARITY (1 << 29) #define BLC_PWM_PCH_CTL2 0xc8254 +#define UTIL_PIN_CTL 0x48400 +#define UTIL_PIN_ENABLE (1 << 31) + +#define PCH_GTC_CTL 0xe7000 +#define PCH_GTC_ENABLE (1 << 31) + /* TV port control */ #define TV_CTL 0x68000 /** Enables the TV encoder */ @@ -3121,9 +3213,6 @@ #define MLTR_WM2_SHIFT 8 /* the unit of memory self-refresh latency time is 0.5us */ #define ILK_SRLT_MASK 0x3f -#define ILK_LATENCY(shift) (I915_READ(MLTR_ILK) >> (shift) & ILK_SRLT_MASK) -#define ILK_READ_WM1_LATENCY() ILK_LATENCY(MLTR_WM1_SHIFT) -#define ILK_READ_WM2_LATENCY() ILK_LATENCY(MLTR_WM2_SHIFT) /* define the fifo size on Ironlake */ #define ILK_DISPLAY_FIFO 128 @@ -3170,12 +3259,6 @@ #define SSKPD_WM2_SHIFT 16 #define SSKPD_WM3_SHIFT 24 -#define SNB_LATENCY(shift) (I915_READ(MCHBAR_MIRROR_BASE_SNB + SSKPD) >> (shift) & SSKPD_WM_MASK) -#define SNB_READ_WM0_LATENCY() SNB_LATENCY(SSKPD_WM0_SHIFT) -#define SNB_READ_WM1_LATENCY() SNB_LATENCY(SSKPD_WM1_SHIFT) -#define SNB_READ_WM2_LATENCY() SNB_LATENCY(SSKPD_WM2_SHIFT) -#define SNB_READ_WM3_LATENCY() SNB_LATENCY(SSKPD_WM3_SHIFT) - /* * The two pipe frame counter registers are not synchronized, so * reading a stable value is somewhat tricky. The following code @@ -3726,6 +3809,9 @@ #define DE_PLANEA_FLIP_DONE_IVB (1<<3) #define DE_PIPEA_VBLANK_IVB (1<<0) +#define DE_PIPE_VBLANK_ILK(pipe) (1 << ((pipe * 8) + 7)) +#define DE_PIPE_VBLANK_IVB(pipe) (1 << (pipe * 5)) + #define VLV_MASTER_IER 0x4400c /* Gunit master IER */ #define MASTER_INTERRUPT_ENABLE (1<<31) @@ -3888,6 +3974,7 @@ #define SERR_INT_TRANS_C_FIFO_UNDERRUN (1<<6) #define SERR_INT_TRANS_B_FIFO_UNDERRUN (1<<3) #define SERR_INT_TRANS_A_FIFO_UNDERRUN (1<<0) +#define SERR_INT_TRANS_FIFO_UNDERRUN(pipe) (1<<(pipe*3)) /* digital port hotplug */ #define PCH_PORT_HOTPLUG 0xc4030 /* SHOTPLUG_CTL */ @@ -4081,6 +4168,8 @@ _TRANSCODER(trans, HSW_VIDEO_DIP_CTL_A, HSW_VIDEO_DIP_CTL_B) #define HSW_TVIDEO_DIP_AVI_DATA(trans) \ _TRANSCODER(trans, HSW_VIDEO_DIP_AVI_DATA_A, HSW_VIDEO_DIP_AVI_DATA_B) +#define HSW_TVIDEO_DIP_VS_DATA(trans) \ + _TRANSCODER(trans, HSW_VIDEO_DIP_VS_DATA_A, HSW_VIDEO_DIP_VS_DATA_B) #define HSW_TVIDEO_DIP_SPD_DATA(trans) \ _TRANSCODER(trans, HSW_VIDEO_DIP_SPD_DATA_A, HSW_VIDEO_DIP_SPD_DATA_B) #define HSW_TVIDEO_DIP_GCP(trans) \ @@ -4088,6 +4177,13 @@ #define HSW_TVIDEO_DIP_VSC_DATA(trans) \ _TRANSCODER(trans, HSW_VIDEO_DIP_VSC_DATA_A, HSW_VIDEO_DIP_VSC_DATA_B) +#define HSW_STEREO_3D_CTL_A 0x70020 +#define S3D_ENABLE (1<<31) +#define HSW_STEREO_3D_CTL_B 0x71020 + +#define HSW_STEREO_3D_CTL(trans) \ + _TRANSCODER(trans, HSW_STEREO_3D_CTL_A, HSW_STEREO_3D_CTL_A) + #define _PCH_TRANS_HTOTAL_B 0xe1000 #define _PCH_TRANS_HBLANK_B 0xe1004 #define _PCH_TRANS_HSYNC_B 0xe1008 @@ -4476,6 +4572,10 @@ #define GT_FIFO_FREE_ENTRIES 0x120008 #define GT_FIFO_NUM_RESERVED_ENTRIES 20 +#define HSW_IDICR 0x9008 +#define IDIHASHMSK(x) (((x) & 0x3f) << 16) +#define HSW_EDRAM_PRESENT 0x120010 + #define GEN6_UCGCTL1 0x9400 # define GEN6_BLBUNIT_CLOCK_GATE_DISABLE (1 << 5) # define GEN6_CSUNIT_CLOCK_GATE_DISABLE (1 << 7) @@ -4744,8 +4844,8 @@ #define HSW_PWR_WELL_DRIVER 0x45404 /* CTL2 */ #define HSW_PWR_WELL_KVMR 0x45408 /* CTL3 */ #define HSW_PWR_WELL_DEBUG 0x4540C /* CTL4 */ -#define HSW_PWR_WELL_ENABLE (1<<31) -#define HSW_PWR_WELL_STATE (1<<30) +#define HSW_PWR_WELL_ENABLE_REQUEST (1<<31) +#define HSW_PWR_WELL_STATE_ENABLED (1<<30) #define HSW_PWR_WELL_CTL5 0x45410 #define HSW_PWR_WELL_ENABLE_SINGLE_STEP (1<<31) #define HSW_PWR_WELL_PWR_GATE_OVERRIDE (1<<20) @@ -4866,7 +4966,8 @@ #define SBI_SSCAUXDIV6 0x0610 #define SBI_SSCAUXDIV_FINALDIV2SEL(x) ((x)<<4) #define SBI_DBUFF0 0x2a00 -#define SBI_DBUFF0_ENABLE (1<<0) +#define SBI_GEN0 0x1f00 +#define SBI_GEN0_CFG_BUFFENABLE_DISABLE (1<<0) /* LPT PIXCLK_GATE */ #define PIXCLK_GATE 0xC6020 @@ -4932,7 +5033,14 @@ #define LCPLL_CLK_FREQ_450 (0<<26) #define LCPLL_CD_CLOCK_DISABLE (1<<25) #define LCPLL_CD2X_CLOCK_DISABLE (1<<23) +#define LCPLL_POWER_DOWN_ALLOW (1<<22) #define LCPLL_CD_SOURCE_FCLK (1<<21) +#define LCPLL_CD_SOURCE_FCLK_DONE (1<<19) + +#define D_COMP (MCHBAR_MIRROR_BASE_SNB + 0x5F0C) +#define D_COMP_RCOMP_IN_PROGRESS (1<<9) +#define D_COMP_COMP_FORCE (1<<8) +#define D_COMP_COMP_DISABLE (1<<0) /* Pipe WM_LINETIME - watermark line time */ #define PIPE_WM_LINETIME_A 0x45270 diff --git a/drivers/gpu/drm/i915/i915_sysfs.c b/drivers/gpu/drm/i915/i915_sysfs.c index 6875b5654c63..a777e7f3b0df 100644 --- a/drivers/gpu/drm/i915/i915_sysfs.c +++ b/drivers/gpu/drm/i915/i915_sysfs.c @@ -409,6 +409,71 @@ static const struct attribute *gen6_attrs[] = { NULL, }; +static ssize_t error_state_read(struct file *filp, struct kobject *kobj, + struct bin_attribute *attr, char *buf, + loff_t off, size_t count) +{ + + struct device *kdev = container_of(kobj, struct device, kobj); + struct drm_minor *minor = container_of(kdev, struct drm_minor, kdev); + struct drm_device *dev = minor->dev; + struct i915_error_state_file_priv error_priv; + struct drm_i915_error_state_buf error_str; + ssize_t ret_count = 0; + int ret; + + memset(&error_priv, 0, sizeof(error_priv)); + + ret = i915_error_state_buf_init(&error_str, count, off); + if (ret) + return ret; + + error_priv.dev = dev; + i915_error_state_get(dev, &error_priv); + + ret = i915_error_state_to_str(&error_str, &error_priv); + if (ret) + goto out; + + ret_count = count < error_str.bytes ? count : error_str.bytes; + + memcpy(buf, error_str.buf, ret_count); +out: + i915_error_state_put(&error_priv); + i915_error_state_buf_release(&error_str); + + return ret ?: ret_count; +} + +static ssize_t error_state_write(struct file *file, struct kobject *kobj, + struct bin_attribute *attr, char *buf, + loff_t off, size_t count) +{ + struct device *kdev = container_of(kobj, struct device, kobj); + struct drm_minor *minor = container_of(kdev, struct drm_minor, kdev); + struct drm_device *dev = minor->dev; + int ret; + + DRM_DEBUG_DRIVER("Resetting error state\n"); + + ret = mutex_lock_interruptible(&dev->struct_mutex); + if (ret) + return ret; + + i915_destroy_error_state(dev); + mutex_unlock(&dev->struct_mutex); + + return count; +} + +static struct bin_attribute error_state_attr = { + .attr.name = "error", + .attr.mode = S_IRUSR | S_IWUSR, + .size = 0, + .read = error_state_read, + .write = error_state_write, +}; + void i915_setup_sysfs(struct drm_device *dev) { int ret; @@ -432,10 +497,16 @@ void i915_setup_sysfs(struct drm_device *dev) if (ret) DRM_ERROR("gen6 sysfs setup failed\n"); } + + ret = sysfs_create_bin_file(&dev->primary->kdev.kobj, + &error_state_attr); + if (ret) + DRM_ERROR("error_state sysfs setup failed\n"); } void i915_teardown_sysfs(struct drm_device *dev) { + sysfs_remove_bin_file(&dev->primary->kdev.kobj, &error_state_attr); sysfs_remove_files(&dev->primary->kdev.kobj, gen6_attrs); device_remove_bin_file(&dev->primary->kdev, &dpf_attrs); #ifdef CONFIG_PM diff --git a/drivers/gpu/drm/i915/i915_trace.h b/drivers/gpu/drm/i915/i915_trace.h index 3db4a6817713..e2c5ee6f6194 100644 --- a/drivers/gpu/drm/i915/i915_trace.h +++ b/drivers/gpu/drm/i915/i915_trace.h @@ -33,47 +33,52 @@ TRACE_EVENT(i915_gem_object_create, TP_printk("obj=%p, size=%u", __entry->obj, __entry->size) ); -TRACE_EVENT(i915_gem_object_bind, - TP_PROTO(struct drm_i915_gem_object *obj, bool mappable), - TP_ARGS(obj, mappable), +TRACE_EVENT(i915_vma_bind, + TP_PROTO(struct i915_vma *vma, bool mappable), + TP_ARGS(vma, mappable), TP_STRUCT__entry( __field(struct drm_i915_gem_object *, obj) + __field(struct i915_address_space *, vm) __field(u32, offset) __field(u32, size) __field(bool, mappable) ), TP_fast_assign( - __entry->obj = obj; - __entry->offset = obj->gtt_space->start; - __entry->size = obj->gtt_space->size; + __entry->obj = vma->obj; + __entry->vm = vma->vm; + __entry->offset = vma->node.start; + __entry->size = vma->node.size; __entry->mappable = mappable; ), - TP_printk("obj=%p, offset=%08x size=%x%s", + TP_printk("obj=%p, offset=%08x size=%x%s vm=%p", __entry->obj, __entry->offset, __entry->size, - __entry->mappable ? ", mappable" : "") + __entry->mappable ? ", mappable" : "", + __entry->vm) ); -TRACE_EVENT(i915_gem_object_unbind, - TP_PROTO(struct drm_i915_gem_object *obj), - TP_ARGS(obj), +TRACE_EVENT(i915_vma_unbind, + TP_PROTO(struct i915_vma *vma), + TP_ARGS(vma), TP_STRUCT__entry( __field(struct drm_i915_gem_object *, obj) + __field(struct i915_address_space *, vm) __field(u32, offset) __field(u32, size) ), TP_fast_assign( - __entry->obj = obj; - __entry->offset = obj->gtt_space->start; - __entry->size = obj->gtt_space->size; + __entry->obj = vma->obj; + __entry->vm = vma->vm; + __entry->offset = vma->node.start; + __entry->size = vma->node.size; ), - TP_printk("obj=%p, offset=%08x size=%x", - __entry->obj, __entry->offset, __entry->size) + TP_printk("obj=%p, offset=%08x size=%x vm=%p", + __entry->obj, __entry->offset, __entry->size, __entry->vm) ); TRACE_EVENT(i915_gem_object_change_domain, @@ -406,10 +411,12 @@ TRACE_EVENT(i915_flip_complete, TP_printk("plane=%d, obj=%p", __entry->plane, __entry->obj) ); -TRACE_EVENT(i915_reg_rw, - TP_PROTO(bool write, u32 reg, u64 val, int len), +TRACE_EVENT_CONDITION(i915_reg_rw, + TP_PROTO(bool write, u32 reg, u64 val, int len, bool trace), + + TP_ARGS(write, reg, val, len, trace), - TP_ARGS(write, reg, val, len), + TP_CONDITION(trace), TP_STRUCT__entry( __field(u64, val) diff --git a/drivers/gpu/drm/i915/intel_acpi.c b/drivers/gpu/drm/i915/intel_acpi.c index bcbbaea2a78e..57fe1ae32a0d 100644 --- a/drivers/gpu/drm/i915/intel_acpi.c +++ b/drivers/gpu/drm/i915/intel_acpi.c @@ -28,7 +28,7 @@ static const u8 intel_dsm_guid[] = { 0x0f, 0x13, 0x17, 0xb0, 0x1c, 0x2c }; -static int intel_dsm(acpi_handle handle, int func, int arg) +static int intel_dsm(acpi_handle handle, int func) { struct acpi_buffer output = { ACPI_ALLOCATE_BUFFER, NULL }; struct acpi_object_list input; @@ -46,8 +46,9 @@ static int intel_dsm(acpi_handle handle, int func, int arg) params[1].integer.value = INTEL_DSM_REVISION_ID; params[2].type = ACPI_TYPE_INTEGER; params[2].integer.value = func; - params[3].type = ACPI_TYPE_INTEGER; - params[3].integer.value = arg; + params[3].type = ACPI_TYPE_PACKAGE; + params[3].package.count = 0; + params[3].package.elements = NULL; ret = acpi_evaluate_object(handle, "_DSM", &input, &output); if (ret) { @@ -151,8 +152,9 @@ static void intel_dsm_platform_mux_info(void) params[1].integer.value = INTEL_DSM_REVISION_ID; params[2].type = ACPI_TYPE_INTEGER; params[2].integer.value = INTEL_DSM_FN_PLATFORM_MUX_INFO; - params[3].type = ACPI_TYPE_INTEGER; - params[3].integer.value = 0; + params[3].type = ACPI_TYPE_PACKAGE; + params[3].package.count = 0; + params[3].package.elements = NULL; ret = acpi_evaluate_object(intel_dsm_priv.dhandle, "_DSM", &input, &output); @@ -205,7 +207,7 @@ static bool intel_dsm_pci_probe(struct pci_dev *pdev) return false; } - ret = intel_dsm(dhandle, INTEL_DSM_FN_SUPPORTED_FUNCTIONS, 0); + ret = intel_dsm(dhandle, INTEL_DSM_FN_SUPPORTED_FUNCTIONS); if (ret < 0) { DRM_DEBUG_KMS("failed to get supported _DSM functions\n"); return false; diff --git a/drivers/gpu/drm/i915/intel_crt.c b/drivers/gpu/drm/i915/intel_crt.c index 3acec8c48166..b5a3875f22c7 100644 --- a/drivers/gpu/drm/i915/intel_crt.c +++ b/drivers/gpu/drm/i915/intel_crt.c @@ -52,15 +52,14 @@ struct intel_crt { u32 adpa_reg; }; -static struct intel_crt *intel_attached_crt(struct drm_connector *connector) +static struct intel_crt *intel_encoder_to_crt(struct intel_encoder *encoder) { - return container_of(intel_attached_encoder(connector), - struct intel_crt, base); + return container_of(encoder, struct intel_crt, base); } -static struct intel_crt *intel_encoder_to_crt(struct intel_encoder *encoder) +static struct intel_crt *intel_attached_crt(struct drm_connector *connector) { - return container_of(encoder, struct intel_crt, base); + return intel_encoder_to_crt(intel_attached_encoder(connector)); } static bool intel_crt_get_hw_state(struct intel_encoder *encoder, @@ -238,17 +237,14 @@ static bool intel_crt_compute_config(struct intel_encoder *encoder, return true; } -static void intel_crt_mode_set(struct drm_encoder *encoder, - struct drm_display_mode *mode, - struct drm_display_mode *adjusted_mode) +static void intel_crt_mode_set(struct intel_encoder *encoder) { - struct drm_device *dev = encoder->dev; - struct drm_crtc *crtc = encoder->crtc; - struct intel_crt *crt = - intel_encoder_to_crt(to_intel_encoder(encoder)); - struct intel_crtc *intel_crtc = to_intel_crtc(crtc); + struct drm_device *dev = encoder->base.dev; + struct intel_crt *crt = intel_encoder_to_crt(encoder); + struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc); struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_display_mode *adjusted_mode = &crtc->config.adjusted_mode; u32 adpa; if (HAS_PCH_SPLIT(dev)) @@ -265,14 +261,14 @@ static void intel_crt_mode_set(struct drm_encoder *encoder, if (HAS_PCH_LPT(dev)) ; /* Those bits don't exist here */ else if (HAS_PCH_CPT(dev)) - adpa |= PORT_TRANS_SEL_CPT(intel_crtc->pipe); - else if (intel_crtc->pipe == 0) + adpa |= PORT_TRANS_SEL_CPT(crtc->pipe); + else if (crtc->pipe == 0) adpa |= ADPA_PIPE_A_SELECT; else adpa |= ADPA_PIPE_B_SELECT; if (!HAS_PCH_SPLIT(dev)) - I915_WRITE(BCLRPAT(intel_crtc->pipe), 0); + I915_WRITE(BCLRPAT(crtc->pipe), 0); I915_WRITE(crt->adpa_reg, adpa); } @@ -613,6 +609,10 @@ intel_crt_detect(struct drm_connector *connector, bool force) enum drm_connector_status status; struct intel_load_detect_pipe tmp; + DRM_DEBUG_KMS("[CONNECTOR:%d:%s] force=%d\n", + connector->base.id, drm_get_connector_name(connector), + force); + if (I915_HAS_HOTPLUG(dev)) { /* We can not rely on the HPD pin always being correctly wired * up, for example many KVM do not pass it through, and so @@ -707,10 +707,6 @@ static void intel_crt_reset(struct drm_connector *connector) * Routines for controlling stuff on the analog port */ -static const struct drm_encoder_helper_funcs crt_encoder_funcs = { - .mode_set = intel_crt_mode_set, -}; - static const struct drm_connector_funcs intel_crt_connector_funcs = { .reset = intel_crt_reset, .dpms = intel_crt_dpms, @@ -800,6 +796,7 @@ void intel_crt_init(struct drm_device *dev) crt->adpa_reg = ADPA; crt->base.compute_config = intel_crt_compute_config; + crt->base.mode_set = intel_crt_mode_set; crt->base.disable = intel_disable_crt; crt->base.enable = intel_enable_crt; crt->base.get_config = intel_crt_get_config; @@ -811,7 +808,6 @@ void intel_crt_init(struct drm_device *dev) crt->base.get_hw_state = intel_crt_get_hw_state; intel_connector->get_hw_state = intel_connector_get_hw_state; - drm_encoder_helper_add(&crt->base.base, &crt_encoder_funcs); drm_connector_helper_add(connector, &intel_crt_connector_helper_funcs); drm_sysfs_connector_add(connector); diff --git a/drivers/gpu/drm/i915/intel_ddi.c b/drivers/gpu/drm/i915/intel_ddi.c index b042ee5c4070..63aca49d11a8 100644 --- a/drivers/gpu/drm/i915/intel_ddi.c +++ b/drivers/gpu/drm/i915/intel_ddi.c @@ -84,25 +84,17 @@ static enum port intel_ddi_get_encoder_port(struct intel_encoder *intel_encoder) * in either FDI or DP modes only, as HDMI connections will work with both * of those */ -static void intel_prepare_ddi_buffers(struct drm_device *dev, enum port port, - bool use_fdi_mode) +static void intel_prepare_ddi_buffers(struct drm_device *dev, enum port port) { struct drm_i915_private *dev_priv = dev->dev_private; u32 reg; int i; - const u32 *ddi_translations = ((use_fdi_mode) ? + const u32 *ddi_translations = (port == PORT_E) ? hsw_ddi_translations_fdi : - hsw_ddi_translations_dp); + hsw_ddi_translations_dp; - DRM_DEBUG_DRIVER("Initializing DDI buffers for port %c in %s mode\n", - port_name(port), - use_fdi_mode ? "FDI" : "DP"); - - WARN((use_fdi_mode && (port != PORT_E)), - "Programming port %c in FDI mode, this probably will not work.\n", - port_name(port)); - - for (i=0, reg=DDI_BUF_TRANS(port); i < ARRAY_SIZE(hsw_ddi_translations_fdi); i++) { + for (i = 0, reg = DDI_BUF_TRANS(port); + i < ARRAY_SIZE(hsw_ddi_translations_fdi); i++) { I915_WRITE(reg, ddi_translations[i]); reg += 4; } @@ -118,14 +110,8 @@ void intel_prepare_ddi(struct drm_device *dev) if (!HAS_DDI(dev)) return; - for (port = PORT_A; port < PORT_E; port++) - intel_prepare_ddi_buffers(dev, port, false); - - /* DDI E is the suggested one to work in FDI mode, so program is as such - * by default. It will have to be re-programmed in case a digital DP - * output will be detected on it - */ - intel_prepare_ddi_buffers(dev, PORT_E, true); + for (port = PORT_A; port <= PORT_E; port++) + intel_prepare_ddi_buffers(dev, port); } static const long hsw_ddi_buf_ctl_values[] = { @@ -281,25 +267,22 @@ void hsw_fdi_link_train(struct drm_crtc *crtc) DRM_ERROR("FDI link training failed!\n"); } -static void intel_ddi_mode_set(struct drm_encoder *encoder, - struct drm_display_mode *mode, - struct drm_display_mode *adjusted_mode) +static void intel_ddi_mode_set(struct intel_encoder *encoder) { - struct drm_crtc *crtc = encoder->crtc; - struct intel_crtc *intel_crtc = to_intel_crtc(crtc); - struct intel_encoder *intel_encoder = to_intel_encoder(encoder); - int port = intel_ddi_get_encoder_port(intel_encoder); - int pipe = intel_crtc->pipe; - int type = intel_encoder->type; + struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc); + int port = intel_ddi_get_encoder_port(encoder); + int pipe = crtc->pipe; + int type = encoder->type; + struct drm_display_mode *adjusted_mode = &crtc->config.adjusted_mode; DRM_DEBUG_KMS("Preparing DDI mode on port %c, pipe %c\n", port_name(port), pipe_name(pipe)); - intel_crtc->eld_vld = false; + crtc->eld_vld = false; if (type == INTEL_OUTPUT_DISPLAYPORT || type == INTEL_OUTPUT_EDP) { - struct intel_dp *intel_dp = enc_to_intel_dp(encoder); + struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base); struct intel_digital_port *intel_dig_port = - enc_to_dig_port(encoder); + enc_to_dig_port(&encoder->base); intel_dp->DP = intel_dig_port->saved_port_bits | DDI_BUF_CTL_ENABLE | DDI_BUF_EMP_400MV_0DB_HSW; @@ -307,17 +290,17 @@ static void intel_ddi_mode_set(struct drm_encoder *encoder, if (intel_dp->has_audio) { DRM_DEBUG_DRIVER("DP audio on pipe %c on DDI\n", - pipe_name(intel_crtc->pipe)); + pipe_name(crtc->pipe)); /* write eld */ DRM_DEBUG_DRIVER("DP audio: write eld information\n"); - intel_write_eld(encoder, adjusted_mode); + intel_write_eld(&encoder->base, adjusted_mode); } intel_dp_init_link_config(intel_dp); } else if (type == INTEL_OUTPUT_HDMI) { - struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder); + struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(&encoder->base); if (intel_hdmi->has_audio) { /* Proper support for digital audio needs a new logic @@ -325,14 +308,14 @@ static void intel_ddi_mode_set(struct drm_encoder *encoder, * patch bombing. */ DRM_DEBUG_DRIVER("HDMI audio on pipe %c on DDI\n", - pipe_name(intel_crtc->pipe)); + pipe_name(crtc->pipe)); /* write eld */ DRM_DEBUG_DRIVER("HDMI audio: write eld information\n"); - intel_write_eld(encoder, adjusted_mode); + intel_write_eld(&encoder->base, adjusted_mode); } - intel_hdmi->set_infoframes(encoder, adjusted_mode); + intel_hdmi->set_infoframes(&encoder->base, adjusted_mode); } } @@ -1118,6 +1101,7 @@ static void intel_enable_ddi(struct intel_encoder *intel_encoder) intel_dp_stop_link_train(intel_dp); ironlake_edp_backlight_on(intel_dp); + intel_edp_psr_enable(intel_dp); } if (intel_crtc->eld_vld && type != INTEL_OUTPUT_EDP) { @@ -1148,16 +1132,20 @@ static void intel_disable_ddi(struct intel_encoder *intel_encoder) if (type == INTEL_OUTPUT_EDP) { struct intel_dp *intel_dp = enc_to_intel_dp(encoder); + intel_edp_psr_disable(intel_dp); ironlake_edp_backlight_off(intel_dp); } } int intel_ddi_get_cdclk_freq(struct drm_i915_private *dev_priv) { - if (I915_READ(HSW_FUSE_STRAP) & HSW_CDCLK_LIMIT) + uint32_t lcpll = I915_READ(LCPLL_CTL); + + if (lcpll & LCPLL_CD_SOURCE_FCLK) + return 800000; + else if (I915_READ(HSW_FUSE_STRAP) & HSW_CDCLK_LIMIT) return 450000; - else if ((I915_READ(LCPLL_CTL) & LCPLL_CLK_FREQ_MASK) == - LCPLL_CLK_FREQ_450) + else if ((lcpll & LCPLL_CLK_FREQ_MASK) == LCPLL_CLK_FREQ_450) return 450000; else if (IS_ULT(dev_priv->dev)) return 337500; @@ -1309,10 +1297,6 @@ static const struct drm_encoder_funcs intel_ddi_funcs = { .destroy = intel_ddi_destroy, }; -static const struct drm_encoder_helper_funcs intel_ddi_helper_funcs = { - .mode_set = intel_ddi_mode_set, -}; - void intel_ddi_init(struct drm_device *dev, enum port port) { struct drm_i915_private *dev_priv = dev->dev_private; @@ -1337,9 +1321,9 @@ void intel_ddi_init(struct drm_device *dev, enum port port) drm_encoder_init(dev, encoder, &intel_ddi_funcs, DRM_MODE_ENCODER_TMDS); - drm_encoder_helper_add(encoder, &intel_ddi_helper_funcs); intel_encoder->compute_config = intel_ddi_compute_config; + intel_encoder->mode_set = intel_ddi_mode_set; intel_encoder->enable = intel_enable_ddi; intel_encoder->pre_enable = intel_ddi_pre_enable; intel_encoder->disable = intel_disable_ddi; diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c index be79f477a38f..38452d82ac7d 100644 --- a/drivers/gpu/drm/i915/intel_display.c +++ b/drivers/gpu/drm/i915/intel_display.c @@ -45,6 +45,15 @@ bool intel_pipe_has_type(struct drm_crtc *crtc, int type); static void intel_increase_pllclock(struct drm_crtc *crtc); static void intel_crtc_update_cursor(struct drm_crtc *crtc, bool on); +static void i9xx_crtc_clock_get(struct intel_crtc *crtc, + struct intel_crtc_config *pipe_config); +static void ironlake_crtc_clock_get(struct intel_crtc *crtc, + struct intel_crtc_config *pipe_config); + +static int intel_set_mode(struct drm_crtc *crtc, struct drm_display_mode *mode, + int x, int y, struct drm_framebuffer *old_fb); + + typedef struct { int min, max; } intel_range_t; @@ -54,7 +63,6 @@ typedef struct { int p2_slow, p2_fast; } intel_p2_t; -#define INTEL_P2_NUM 2 typedef struct intel_limit intel_limit_t; struct intel_limit { intel_range_t dot, vco, n, m, m1, m2, p, p1; @@ -84,7 +92,7 @@ intel_fdi_link_freq(struct drm_device *dev) return 27; } -static const intel_limit_t intel_limits_i8xx_dvo = { +static const intel_limit_t intel_limits_i8xx_dac = { .dot = { .min = 25000, .max = 350000 }, .vco = { .min = 930000, .max = 1400000 }, .n = { .min = 3, .max = 16 }, @@ -97,6 +105,19 @@ static const intel_limit_t intel_limits_i8xx_dvo = { .p2_slow = 4, .p2_fast = 2 }, }; +static const intel_limit_t intel_limits_i8xx_dvo = { + .dot = { .min = 25000, .max = 350000 }, + .vco = { .min = 930000, .max = 1400000 }, + .n = { .min = 3, .max = 16 }, + .m = { .min = 96, .max = 140 }, + .m1 = { .min = 18, .max = 26 }, + .m2 = { .min = 6, .max = 16 }, + .p = { .min = 4, .max = 128 }, + .p1 = { .min = 2, .max = 33 }, + .p2 = { .dot_limit = 165000, + .p2_slow = 4, .p2_fast = 4 }, +}; + static const intel_limit_t intel_limits_i8xx_lvds = { .dot = { .min = 25000, .max = 350000 }, .vco = { .min = 930000, .max = 1400000 }, @@ -405,8 +426,10 @@ static const intel_limit_t *intel_limit(struct drm_crtc *crtc, int refclk) } else { if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) limit = &intel_limits_i8xx_lvds; - else + else if (intel_pipe_has_type(crtc, INTEL_OUTPUT_DVO)) limit = &intel_limits_i8xx_dvo; + else + limit = &intel_limits_i8xx_dac; } return limit; } @@ -667,7 +690,7 @@ vlv_find_best_dpll(const intel_limit_t *limit, struct drm_crtc *crtc, { u32 p1, p2, m1, m2, vco, bestn, bestm1, bestm2, bestp1, bestp2; u32 m, n, fastclk; - u32 updrate, minupdate, fracbits, p; + u32 updrate, minupdate, p; unsigned long bestppm, ppm, absppm; int dotclk, flag; @@ -678,7 +701,6 @@ vlv_find_best_dpll(const intel_limit_t *limit, struct drm_crtc *crtc, fastclk = dotclk / (2*100); updrate = 0; minupdate = 19200; - fracbits = 1; n = p = p1 = p2 = m = m1 = m2 = vco = bestn = 0; bestm1 = bestm2 = bestp1 = bestp2 = 0; @@ -892,8 +914,8 @@ static const char *state_string(bool enabled) } /* Only for pre-ILK configs */ -static void assert_pll(struct drm_i915_private *dev_priv, - enum pipe pipe, bool state) +void assert_pll(struct drm_i915_private *dev_priv, + enum pipe pipe, bool state) { int reg; u32 val; @@ -906,10 +928,8 @@ static void assert_pll(struct drm_i915_private *dev_priv, "PLL state assertion failure (expected %s, current %s)\n", state_string(state), state_string(cur_state)); } -#define assert_pll_enabled(d, p) assert_pll(d, p, true) -#define assert_pll_disabled(d, p) assert_pll(d, p, false) -static struct intel_shared_dpll * +struct intel_shared_dpll * intel_crtc_to_shared_dpll(struct intel_crtc *crtc) { struct drm_i915_private *dev_priv = crtc->base.dev->dev_private; @@ -921,9 +941,9 @@ intel_crtc_to_shared_dpll(struct intel_crtc *crtc) } /* For ILK+ */ -static void assert_shared_dpll(struct drm_i915_private *dev_priv, - struct intel_shared_dpll *pll, - bool state) +void assert_shared_dpll(struct drm_i915_private *dev_priv, + struct intel_shared_dpll *pll, + bool state) { bool cur_state; struct intel_dpll_hw_state hw_state; @@ -942,8 +962,6 @@ static void assert_shared_dpll(struct drm_i915_private *dev_priv, "%s assertion failure (expected %s, current %s)\n", pll->name, state_string(state), state_string(cur_state)); } -#define assert_shared_dpll_enabled(d, p) assert_shared_dpll(d, p, true) -#define assert_shared_dpll_disabled(d, p) assert_shared_dpll(d, p, false) static void assert_fdi_tx(struct drm_i915_private *dev_priv, enum pipe pipe, bool state) @@ -1007,15 +1025,19 @@ static void assert_fdi_tx_pll_enabled(struct drm_i915_private *dev_priv, WARN(!(val & FDI_TX_PLL_ENABLE), "FDI TX PLL assertion failure, should be active but is disabled\n"); } -static void assert_fdi_rx_pll_enabled(struct drm_i915_private *dev_priv, - enum pipe pipe) +void assert_fdi_rx_pll(struct drm_i915_private *dev_priv, + enum pipe pipe, bool state) { int reg; u32 val; + bool cur_state; reg = FDI_RX_CTL(pipe); val = I915_READ(reg); - WARN(!(val & FDI_RX_PLL_ENABLE), "FDI RX PLL assertion failure, should be active but is disabled\n"); + cur_state = !!(val & FDI_RX_PLL_ENABLE); + WARN(cur_state != state, + "FDI RX PLL assertion failure (expected %s, current %s)\n", + state_string(state), state_string(cur_state)); } static void assert_panel_unlocked(struct drm_i915_private *dev_priv, @@ -1111,7 +1133,7 @@ static void assert_planes_disabled(struct drm_i915_private *dev_priv, } /* Need to check both planes against the pipe */ - for (i = 0; i < INTEL_INFO(dev)->num_pipes; i++) { + for_each_pipe(i) { reg = DSPCNTR(i); val = I915_READ(reg); cur_pipe = (val & DISPPLANE_SEL_PIPE_MASK) >> @@ -1301,51 +1323,92 @@ static void assert_pch_ports_disabled(struct drm_i915_private *dev_priv, assert_pch_hdmi_disabled(dev_priv, pipe, PCH_HDMID); } -/** - * intel_enable_pll - enable a PLL - * @dev_priv: i915 private structure - * @pipe: pipe PLL to enable - * - * Enable @pipe's PLL so we can start pumping pixels from a plane. Check to - * make sure the PLL reg is writable first though, since the panel write - * protect mechanism may be enabled. - * - * Note! This is for pre-ILK only. - * - * Unfortunately needed by dvo_ns2501 since the dvo depends on it running. - */ -static void intel_enable_pll(struct drm_i915_private *dev_priv, enum pipe pipe) +static void vlv_enable_pll(struct intel_crtc *crtc) { - int reg; - u32 val; + struct drm_device *dev = crtc->base.dev; + struct drm_i915_private *dev_priv = dev->dev_private; + int reg = DPLL(crtc->pipe); + u32 dpll = crtc->config.dpll_hw_state.dpll; - assert_pipe_disabled(dev_priv, pipe); + assert_pipe_disabled(dev_priv, crtc->pipe); /* No really, not for ILK+ */ - BUG_ON(!IS_VALLEYVIEW(dev_priv->dev) && dev_priv->info->gen >= 5); + BUG_ON(!IS_VALLEYVIEW(dev_priv->dev)); /* PLL is protected by panel, make sure we can write it */ if (IS_MOBILE(dev_priv->dev) && !IS_I830(dev_priv->dev)) - assert_panel_unlocked(dev_priv, pipe); + assert_panel_unlocked(dev_priv, crtc->pipe); - reg = DPLL(pipe); - val = I915_READ(reg); - val |= DPLL_VCO_ENABLE; + I915_WRITE(reg, dpll); + POSTING_READ(reg); + udelay(150); + + if (wait_for(((I915_READ(reg) & DPLL_LOCK_VLV) == DPLL_LOCK_VLV), 1)) + DRM_ERROR("DPLL %d failed to lock\n", crtc->pipe); + + I915_WRITE(DPLL_MD(crtc->pipe), crtc->config.dpll_hw_state.dpll_md); + POSTING_READ(DPLL_MD(crtc->pipe)); /* We do this three times for luck */ - I915_WRITE(reg, val); + I915_WRITE(reg, dpll); POSTING_READ(reg); udelay(150); /* wait for warmup */ - I915_WRITE(reg, val); + I915_WRITE(reg, dpll); POSTING_READ(reg); udelay(150); /* wait for warmup */ - I915_WRITE(reg, val); + I915_WRITE(reg, dpll); + POSTING_READ(reg); + udelay(150); /* wait for warmup */ +} + +static void i9xx_enable_pll(struct intel_crtc *crtc) +{ + struct drm_device *dev = crtc->base.dev; + struct drm_i915_private *dev_priv = dev->dev_private; + int reg = DPLL(crtc->pipe); + u32 dpll = crtc->config.dpll_hw_state.dpll; + + assert_pipe_disabled(dev_priv, crtc->pipe); + + /* No really, not for ILK+ */ + BUG_ON(dev_priv->info->gen >= 5); + + /* PLL is protected by panel, make sure we can write it */ + if (IS_MOBILE(dev) && !IS_I830(dev)) + assert_panel_unlocked(dev_priv, crtc->pipe); + + I915_WRITE(reg, dpll); + + /* Wait for the clocks to stabilize. */ + POSTING_READ(reg); + udelay(150); + + if (INTEL_INFO(dev)->gen >= 4) { + I915_WRITE(DPLL_MD(crtc->pipe), + crtc->config.dpll_hw_state.dpll_md); + } else { + /* The pixel multiplier can only be updated once the + * DPLL is enabled and the clocks are stable. + * + * So write it again. + */ + I915_WRITE(reg, dpll); + } + + /* We do this three times for luck */ + I915_WRITE(reg, dpll); + POSTING_READ(reg); + udelay(150); /* wait for warmup */ + I915_WRITE(reg, dpll); + POSTING_READ(reg); + udelay(150); /* wait for warmup */ + I915_WRITE(reg, dpll); POSTING_READ(reg); udelay(150); /* wait for warmup */ } /** - * intel_disable_pll - disable a PLL + * i9xx_disable_pll - disable a PLL * @dev_priv: i915 private structure * @pipe: pipe PLL to disable * @@ -1353,11 +1416,8 @@ static void intel_enable_pll(struct drm_i915_private *dev_priv, enum pipe pipe) * * Note! This is for pre-ILK only. */ -static void intel_disable_pll(struct drm_i915_private *dev_priv, enum pipe pipe) +static void i9xx_disable_pll(struct drm_i915_private *dev_priv, enum pipe pipe) { - int reg; - u32 val; - /* Don't disable pipe A or pipe A PLLs if needed */ if (pipe == PIPE_A && (dev_priv->quirks & QUIRK_PIPEA_FORCE)) return; @@ -1365,11 +1425,8 @@ static void intel_disable_pll(struct drm_i915_private *dev_priv, enum pipe pipe) /* Make sure the pipe isn't still relying on us */ assert_pipe_disabled(dev_priv, pipe); - reg = DPLL(pipe); - val = I915_READ(reg); - val &= ~DPLL_VCO_ENABLE; - I915_WRITE(reg, val); - POSTING_READ(reg); + I915_WRITE(DPLL(pipe), 0); + POSTING_READ(DPLL(pipe)); } void vlv_wait_port_ready(struct drm_i915_private *dev_priv, int port) @@ -1819,7 +1876,7 @@ intel_pin_and_fence_fb_obj(struct drm_device *dev, return 0; err_unpin: - i915_gem_object_unpin(obj); + i915_gem_object_unpin_from_display_plane(obj); err_interruptible: dev_priv->mm.interruptible = true; return ret; @@ -1828,7 +1885,7 @@ err_interruptible: void intel_unpin_fb_obj(struct drm_i915_gem_object *obj) { i915_gem_object_unpin_fence(obj); - i915_gem_object_unpin(obj); + i915_gem_object_unpin_from_display_plane(obj); } /* Computes the linear offset to the base tile and adjusts x, y. bytes per pixel @@ -1942,16 +1999,17 @@ static int i9xx_update_plane(struct drm_crtc *crtc, struct drm_framebuffer *fb, intel_crtc->dspaddr_offset = linear_offset; } - DRM_DEBUG_KMS("Writing base %08X %08lX %d %d %d\n", - obj->gtt_offset, linear_offset, x, y, fb->pitches[0]); + DRM_DEBUG_KMS("Writing base %08lX %08lX %d %d %d\n", + i915_gem_obj_ggtt_offset(obj), linear_offset, x, y, + fb->pitches[0]); I915_WRITE(DSPSTRIDE(plane), fb->pitches[0]); if (INTEL_INFO(dev)->gen >= 4) { I915_MODIFY_DISPBASE(DSPSURF(plane), - obj->gtt_offset + intel_crtc->dspaddr_offset); + i915_gem_obj_ggtt_offset(obj) + intel_crtc->dspaddr_offset); I915_WRITE(DSPTILEOFF(plane), (y << 16) | x); I915_WRITE(DSPLINOFF(plane), linear_offset); } else - I915_WRITE(DSPADDR(plane), obj->gtt_offset + linear_offset); + I915_WRITE(DSPADDR(plane), i915_gem_obj_ggtt_offset(obj) + linear_offset); POSTING_READ(reg); return 0; @@ -2031,11 +2089,12 @@ static int ironlake_update_plane(struct drm_crtc *crtc, fb->pitches[0]); linear_offset -= intel_crtc->dspaddr_offset; - DRM_DEBUG_KMS("Writing base %08X %08lX %d %d %d\n", - obj->gtt_offset, linear_offset, x, y, fb->pitches[0]); + DRM_DEBUG_KMS("Writing base %08lX %08lX %d %d %d\n", + i915_gem_obj_ggtt_offset(obj), linear_offset, x, y, + fb->pitches[0]); I915_WRITE(DSPSTRIDE(plane), fb->pitches[0]); I915_MODIFY_DISPBASE(DSPSURF(plane), - obj->gtt_offset + intel_crtc->dspaddr_offset); + i915_gem_obj_ggtt_offset(obj) + intel_crtc->dspaddr_offset); if (IS_HASWELL(dev)) { I915_WRITE(DSPOFFSET(plane), (y << 16) | x); } else { @@ -2183,6 +2242,20 @@ intel_pipe_set_base(struct drm_crtc *crtc, int x, int y, return ret; } + /* Update pipe size and adjust fitter if needed */ + if (i915_fastboot) { + I915_WRITE(PIPESRC(intel_crtc->pipe), + ((crtc->mode.hdisplay - 1) << 16) | + (crtc->mode.vdisplay - 1)); + if (!intel_crtc->config.pch_pfit.size && + (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS) || + intel_pipe_has_type(crtc, INTEL_OUTPUT_EDP))) { + I915_WRITE(PF_CTL(intel_crtc->pipe), 0); + I915_WRITE(PF_WIN_POS(intel_crtc->pipe), 0); + I915_WRITE(PF_WIN_SZ(intel_crtc->pipe), 0); + } + } + ret = dev_priv->display.update_plane(crtc, fb, x, y); if (ret) { intel_unpin_fb_obj(to_intel_framebuffer(fb)->obj); @@ -2203,6 +2276,7 @@ intel_pipe_set_base(struct drm_crtc *crtc, int x, int y, } intel_update_fbc(dev); + intel_edp_psr_update(dev); mutex_unlock(&dev->struct_mutex); intel_crtc_update_sarea_pos(crtc, x, y); @@ -2523,7 +2597,7 @@ static void ivb_manual_fdi_link_train(struct drm_crtc *crtc) struct drm_i915_private *dev_priv = dev->dev_private; struct intel_crtc *intel_crtc = to_intel_crtc(crtc); int pipe = intel_crtc->pipe; - u32 reg, temp, i; + u32 reg, temp, i, j; /* Train 1: umask FDI RX Interrupt symbol_lock and bit_lock bit for train result */ @@ -2539,97 +2613,99 @@ static void ivb_manual_fdi_link_train(struct drm_crtc *crtc) DRM_DEBUG_KMS("FDI_RX_IIR before link train 0x%x\n", I915_READ(FDI_RX_IIR(pipe))); - /* enable CPU FDI TX and PCH FDI RX */ - reg = FDI_TX_CTL(pipe); - temp = I915_READ(reg); - temp &= ~FDI_DP_PORT_WIDTH_MASK; - temp |= FDI_DP_PORT_WIDTH(intel_crtc->config.fdi_lanes); - temp &= ~(FDI_LINK_TRAIN_AUTO | FDI_LINK_TRAIN_NONE_IVB); - temp |= FDI_LINK_TRAIN_PATTERN_1_IVB; - temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK; - temp |= FDI_LINK_TRAIN_400MV_0DB_SNB_B; - temp |= FDI_COMPOSITE_SYNC; - I915_WRITE(reg, temp | FDI_TX_ENABLE); - - I915_WRITE(FDI_RX_MISC(pipe), - FDI_RX_TP1_TO_TP2_48 | FDI_RX_FDI_DELAY_90); - - reg = FDI_RX_CTL(pipe); - temp = I915_READ(reg); - temp &= ~FDI_LINK_TRAIN_AUTO; - temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT; - temp |= FDI_LINK_TRAIN_PATTERN_1_CPT; - temp |= FDI_COMPOSITE_SYNC; - I915_WRITE(reg, temp | FDI_RX_ENABLE); + /* Try each vswing and preemphasis setting twice before moving on */ + for (j = 0; j < ARRAY_SIZE(snb_b_fdi_train_param) * 2; j++) { + /* disable first in case we need to retry */ + reg = FDI_TX_CTL(pipe); + temp = I915_READ(reg); + temp &= ~(FDI_LINK_TRAIN_AUTO | FDI_LINK_TRAIN_NONE_IVB); + temp &= ~FDI_TX_ENABLE; + I915_WRITE(reg, temp); - POSTING_READ(reg); - udelay(150); + reg = FDI_RX_CTL(pipe); + temp = I915_READ(reg); + temp &= ~FDI_LINK_TRAIN_AUTO; + temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT; + temp &= ~FDI_RX_ENABLE; + I915_WRITE(reg, temp); - for (i = 0; i < 4; i++) { + /* enable CPU FDI TX and PCH FDI RX */ reg = FDI_TX_CTL(pipe); temp = I915_READ(reg); + temp &= ~FDI_DP_PORT_WIDTH_MASK; + temp |= FDI_DP_PORT_WIDTH(intel_crtc->config.fdi_lanes); + temp |= FDI_LINK_TRAIN_PATTERN_1_IVB; temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK; - temp |= snb_b_fdi_train_param[i]; - I915_WRITE(reg, temp); + temp |= snb_b_fdi_train_param[j/2]; + temp |= FDI_COMPOSITE_SYNC; + I915_WRITE(reg, temp | FDI_TX_ENABLE); - POSTING_READ(reg); - udelay(500); + I915_WRITE(FDI_RX_MISC(pipe), + FDI_RX_TP1_TO_TP2_48 | FDI_RX_FDI_DELAY_90); - reg = FDI_RX_IIR(pipe); + reg = FDI_RX_CTL(pipe); temp = I915_READ(reg); - DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp); - - if (temp & FDI_RX_BIT_LOCK || - (I915_READ(reg) & FDI_RX_BIT_LOCK)) { - I915_WRITE(reg, temp | FDI_RX_BIT_LOCK); - DRM_DEBUG_KMS("FDI train 1 done, level %i.\n", i); - break; - } - } - if (i == 4) - DRM_ERROR("FDI train 1 fail!\n"); + temp |= FDI_LINK_TRAIN_PATTERN_1_CPT; + temp |= FDI_COMPOSITE_SYNC; + I915_WRITE(reg, temp | FDI_RX_ENABLE); - /* Train 2 */ - reg = FDI_TX_CTL(pipe); - temp = I915_READ(reg); - temp &= ~FDI_LINK_TRAIN_NONE_IVB; - temp |= FDI_LINK_TRAIN_PATTERN_2_IVB; - temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK; - temp |= FDI_LINK_TRAIN_400MV_0DB_SNB_B; - I915_WRITE(reg, temp); + POSTING_READ(reg); + udelay(1); /* should be 0.5us */ - reg = FDI_RX_CTL(pipe); - temp = I915_READ(reg); - temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT; - temp |= FDI_LINK_TRAIN_PATTERN_2_CPT; - I915_WRITE(reg, temp); + for (i = 0; i < 4; i++) { + reg = FDI_RX_IIR(pipe); + temp = I915_READ(reg); + DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp); - POSTING_READ(reg); - udelay(150); + if (temp & FDI_RX_BIT_LOCK || + (I915_READ(reg) & FDI_RX_BIT_LOCK)) { + I915_WRITE(reg, temp | FDI_RX_BIT_LOCK); + DRM_DEBUG_KMS("FDI train 1 done, level %i.\n", + i); + break; + } + udelay(1); /* should be 0.5us */ + } + if (i == 4) { + DRM_DEBUG_KMS("FDI train 1 fail on vswing %d\n", j / 2); + continue; + } - for (i = 0; i < 4; i++) { + /* Train 2 */ reg = FDI_TX_CTL(pipe); temp = I915_READ(reg); - temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK; - temp |= snb_b_fdi_train_param[i]; + temp &= ~FDI_LINK_TRAIN_NONE_IVB; + temp |= FDI_LINK_TRAIN_PATTERN_2_IVB; + I915_WRITE(reg, temp); + + reg = FDI_RX_CTL(pipe); + temp = I915_READ(reg); + temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT; + temp |= FDI_LINK_TRAIN_PATTERN_2_CPT; I915_WRITE(reg, temp); POSTING_READ(reg); - udelay(500); + udelay(2); /* should be 1.5us */ - reg = FDI_RX_IIR(pipe); - temp = I915_READ(reg); - DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp); + for (i = 0; i < 4; i++) { + reg = FDI_RX_IIR(pipe); + temp = I915_READ(reg); + DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp); - if (temp & FDI_RX_SYMBOL_LOCK) { - I915_WRITE(reg, temp | FDI_RX_SYMBOL_LOCK); - DRM_DEBUG_KMS("FDI train 2 done, level %i.\n", i); - break; + if (temp & FDI_RX_SYMBOL_LOCK || + (I915_READ(reg) & FDI_RX_SYMBOL_LOCK)) { + I915_WRITE(reg, temp | FDI_RX_SYMBOL_LOCK); + DRM_DEBUG_KMS("FDI train 2 done, level %i.\n", + i); + goto train_done; + } + udelay(2); /* should be 1.5us */ } + if (i == 4) + DRM_DEBUG_KMS("FDI train 2 fail on vswing %d\n", j / 2); } - if (i == 4) - DRM_ERROR("FDI train 2 fail!\n"); +train_done: DRM_DEBUG_KMS("FDI train done.\n"); } @@ -2927,15 +3003,8 @@ static void ironlake_pch_enable(struct drm_crtc *crtc) /* For PCH output, training FDI link */ dev_priv->display.fdi_link_train(crtc); - /* XXX: pch pll's can be enabled any time before we enable the PCH - * transcoder, and we actually should do this to not upset any PCH - * transcoder that already use the clock when we share it. - * - * Note that enable_shared_dpll tries to do the right thing, but - * get_shared_dpll unconditionally resets the pll - we need that to have - * the right LVDS enable sequence. */ - ironlake_enable_shared_dpll(intel_crtc); - + /* We need to program the right clock selection before writing the pixel + * mutliplier into the DPLL. */ if (HAS_PCH_CPT(dev)) { u32 sel; @@ -2949,6 +3018,15 @@ static void ironlake_pch_enable(struct drm_crtc *crtc) I915_WRITE(PCH_DPLL_SEL, temp); } + /* XXX: pch pll's can be enabled any time before we enable the PCH + * transcoder, and we actually should do this to not upset any PCH + * transcoder that already use the clock when we share it. + * + * Note that enable_shared_dpll tries to do the right thing, but + * get_shared_dpll unconditionally resets the pll - we need that to have + * the right LVDS enable sequence. */ + ironlake_enable_shared_dpll(intel_crtc); + /* set transcoder timing, panel must allow it */ assert_panel_unlocked(dev_priv, pipe); ironlake_pch_transcoder_set_timings(intel_crtc, pipe); @@ -3031,7 +3109,7 @@ static void intel_put_shared_dpll(struct intel_crtc *crtc) crtc->config.shared_dpll = DPLL_ID_PRIVATE; } -static struct intel_shared_dpll *intel_get_shared_dpll(struct intel_crtc *crtc, u32 dpll, u32 fp) +static struct intel_shared_dpll *intel_get_shared_dpll(struct intel_crtc *crtc) { struct drm_i915_private *dev_priv = crtc->base.dev->dev_private; struct intel_shared_dpll *pll = intel_crtc_to_shared_dpll(crtc); @@ -3045,7 +3123,7 @@ static struct intel_shared_dpll *intel_get_shared_dpll(struct intel_crtc *crtc, if (HAS_PCH_IBX(dev_priv->dev)) { /* Ironlake PCH has a fixed PLL->PCH pipe mapping. */ - i = crtc->pipe; + i = (enum intel_dpll_id) crtc->pipe; pll = &dev_priv->shared_dplls[i]; DRM_DEBUG_KMS("CRTC:%d using pre-allocated %s\n", @@ -3061,8 +3139,8 @@ static struct intel_shared_dpll *intel_get_shared_dpll(struct intel_crtc *crtc, if (pll->refcount == 0) continue; - if (dpll == (I915_READ(PCH_DPLL(pll->id)) & 0x7fffffff) && - fp == I915_READ(PCH_FP0(pll->id))) { + if (memcmp(&crtc->config.dpll_hw_state, &pll->hw_state, + sizeof(pll->hw_state)) == 0) { DRM_DEBUG_KMS("CRTC:%d sharing existing %s (refcount %d, ative %d)\n", crtc->base.base.id, pll->name, pll->refcount, pll->active); @@ -3096,13 +3174,7 @@ found: WARN_ON(pll->on); assert_shared_dpll_disabled(dev_priv, pll); - /* Wait for the clocks to stabilize before rewriting the regs */ - I915_WRITE(PCH_DPLL(pll->id), dpll & ~DPLL_VCO_ENABLE); - POSTING_READ(PCH_DPLL(pll->id)); - udelay(150); - - I915_WRITE(PCH_FP0(pll->id), fp); - I915_WRITE(PCH_DPLL(pll->id), dpll & ~DPLL_VCO_ENABLE); + pll->mode_set(dev_priv, pll); } pll->refcount++; @@ -3174,7 +3246,6 @@ static void ironlake_crtc_enable(struct drm_crtc *crtc) struct intel_encoder *encoder; int pipe = intel_crtc->pipe; int plane = intel_crtc->plane; - u32 temp; WARN_ON(!crtc->enabled); @@ -3188,12 +3259,9 @@ static void ironlake_crtc_enable(struct drm_crtc *crtc) intel_update_watermarks(dev); - if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) { - temp = I915_READ(PCH_LVDS); - if ((temp & LVDS_PORT_EN) == 0) - I915_WRITE(PCH_LVDS, temp | LVDS_PORT_EN); - } - + for_each_encoder_on_crtc(dev, crtc, encoder) + if (encoder->pre_enable) + encoder->pre_enable(encoder); if (intel_crtc->config.has_pch_encoder) { /* Note: FDI PLL enabling _must_ be done before we enable the @@ -3205,10 +3273,6 @@ static void ironlake_crtc_enable(struct drm_crtc *crtc) assert_fdi_rx_disabled(dev_priv, pipe); } - for_each_encoder_on_crtc(dev, crtc, encoder) - if (encoder->pre_enable) - encoder->pre_enable(encoder); - ironlake_pfit_enable(intel_crtc); /* @@ -3389,7 +3453,7 @@ static void ironlake_crtc_disable(struct drm_crtc *crtc) intel_crtc_wait_for_pending_flips(crtc); drm_vblank_off(dev, pipe); - if (dev_priv->cfb_plane == plane) + if (dev_priv->fbc.plane == plane) intel_disable_fbc(dev); intel_crtc_update_cursor(crtc, false); @@ -3462,7 +3526,7 @@ static void haswell_crtc_disable(struct drm_crtc *crtc) drm_vblank_off(dev, pipe); /* FBC must be disabled before disabling the plane on HSW. */ - if (dev_priv->cfb_plane == plane) + if (dev_priv->fbc.plane == plane) intel_disable_fbc(dev); hsw_disable_ips(intel_crtc); @@ -3593,22 +3657,16 @@ static void valleyview_crtc_enable(struct drm_crtc *crtc) intel_crtc->active = true; intel_update_watermarks(dev); - mutex_lock(&dev_priv->dpio_lock); - for_each_encoder_on_crtc(dev, crtc, encoder) if (encoder->pre_pll_enable) encoder->pre_pll_enable(encoder); - intel_enable_pll(dev_priv, pipe); + vlv_enable_pll(intel_crtc); for_each_encoder_on_crtc(dev, crtc, encoder) if (encoder->pre_enable) encoder->pre_enable(encoder); - /* VLV wants encoder enabling _before_ the pipe is up. */ - for_each_encoder_on_crtc(dev, crtc, encoder) - encoder->enable(encoder); - i9xx_pfit_enable(intel_crtc); intel_crtc_load_lut(crtc); @@ -3620,7 +3678,8 @@ static void valleyview_crtc_enable(struct drm_crtc *crtc) intel_update_fbc(dev); - mutex_unlock(&dev_priv->dpio_lock); + for_each_encoder_on_crtc(dev, crtc, encoder) + encoder->enable(encoder); } static void i9xx_crtc_enable(struct drm_crtc *crtc) @@ -3640,12 +3699,12 @@ static void i9xx_crtc_enable(struct drm_crtc *crtc) intel_crtc->active = true; intel_update_watermarks(dev); - intel_enable_pll(dev_priv, pipe); - for_each_encoder_on_crtc(dev, crtc, encoder) if (encoder->pre_enable) encoder->pre_enable(encoder); + i9xx_enable_pll(intel_crtc); + i9xx_pfit_enable(intel_crtc); intel_crtc_load_lut(crtc); @@ -3701,7 +3760,7 @@ static void i9xx_crtc_disable(struct drm_crtc *crtc) intel_crtc_wait_for_pending_flips(crtc); drm_vblank_off(dev, pipe); - if (dev_priv->cfb_plane == plane) + if (dev_priv->fbc.plane == plane) intel_disable_fbc(dev); intel_crtc_dpms_overlay(intel_crtc, false); @@ -3717,7 +3776,7 @@ static void i9xx_crtc_disable(struct drm_crtc *crtc) if (encoder->post_disable) encoder->post_disable(encoder); - intel_disable_pll(dev_priv, pipe); + i9xx_disable_pll(dev_priv, pipe); intel_crtc->active = false; intel_update_fbc(dev); @@ -3817,16 +3876,6 @@ static void intel_crtc_disable(struct drm_crtc *crtc) } } -void intel_modeset_disable(struct drm_device *dev) -{ - struct drm_crtc *crtc; - - list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { - if (crtc->enabled) - intel_crtc_disable(crtc); - } -} - void intel_encoder_destroy(struct drm_encoder *encoder) { struct intel_encoder *intel_encoder = to_intel_encoder(encoder); @@ -3835,10 +3884,10 @@ void intel_encoder_destroy(struct drm_encoder *encoder) kfree(intel_encoder); } -/* Simple dpms helper for encodres with just one connector, no cloning and only +/* Simple dpms helper for encoders with just one connector, no cloning and only * one kind of off state. It clamps all !ON modes to fully OFF and changes the * state of the entire output pipe. */ -void intel_encoder_dpms(struct intel_encoder *encoder, int mode) +static void intel_encoder_dpms(struct intel_encoder *encoder, int mode) { if (mode == DRM_MODE_DPMS_ON) { encoder->connectors_active = true; @@ -4032,7 +4081,7 @@ static void hsw_compute_ips_config(struct intel_crtc *crtc, { pipe_config->ips_enabled = i915_enable_ips && hsw_crtc_supports_ips(crtc) && - pipe_config->pipe_bpp == 24; + pipe_config->pipe_bpp <= 24; } static int intel_crtc_compute_config(struct intel_crtc *crtc, @@ -4048,12 +4097,6 @@ static int intel_crtc_compute_config(struct intel_crtc *crtc, return -EINVAL; } - /* All interlaced capable intel hw wants timings in frames. Note though - * that intel_lvds_mode_fixup does some funny tricks with the crtc - * timings, so we need to be careful not to clobber these.*/ - if (!pipe_config->timings_set) - drm_mode_set_crtcinfo(adjusted_mode, 0); - /* Cantiga+ cannot handle modes with a hsync front porch of 0. * WaPruneModeWithIncorrectHsyncOffset:ctg,elk,ilk,snb,ivb,vlv,hsw. */ @@ -4103,6 +4146,30 @@ static int i9xx_misc_get_display_clock_speed(struct drm_device *dev) return 200000; } +static int pnv_get_display_clock_speed(struct drm_device *dev) +{ + u16 gcfgc = 0; + + pci_read_config_word(dev->pdev, GCFGC, &gcfgc); + + switch (gcfgc & GC_DISPLAY_CLOCK_MASK) { + case GC_DISPLAY_CLOCK_267_MHZ_PNV: + return 267000; + case GC_DISPLAY_CLOCK_333_MHZ_PNV: + return 333000; + case GC_DISPLAY_CLOCK_444_MHZ_PNV: + return 444000; + case GC_DISPLAY_CLOCK_200_MHZ_PNV: + return 200000; + default: + DRM_ERROR("Unknown pnv display core clock 0x%04x\n", gcfgc); + case GC_DISPLAY_CLOCK_133_MHZ_PNV: + return 133000; + case GC_DISPLAY_CLOCK_167_MHZ_PNV: + return 167000; + } +} + static int i915gm_get_display_clock_speed(struct drm_device *dev) { u16 gcfgc = 0; @@ -4266,14 +4333,17 @@ static void i9xx_update_pll_dividers(struct intel_crtc *crtc, } I915_WRITE(FP0(pipe), fp); + crtc->config.dpll_hw_state.fp0 = fp; crtc->lowfreq_avail = false; if (intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_LVDS) && reduced_clock && i915_powersave) { I915_WRITE(FP1(pipe), fp2); + crtc->config.dpll_hw_state.fp1 = fp2; crtc->lowfreq_avail = true; } else { I915_WRITE(FP1(pipe), fp); + crtc->config.dpll_hw_state.fp1 = fp; } } @@ -4351,17 +4421,13 @@ static void vlv_update_pll(struct intel_crtc *crtc) { struct drm_device *dev = crtc->base.dev; struct drm_i915_private *dev_priv = dev->dev_private; - struct intel_encoder *encoder; int pipe = crtc->pipe; u32 dpll, mdiv; u32 bestn, bestm1, bestm2, bestp1, bestp2; - bool is_hdmi; u32 coreclk, reg_val, dpll_md; mutex_lock(&dev_priv->dpio_lock); - is_hdmi = intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_HDMI); - bestn = crtc->config.dpll.n; bestm1 = crtc->config.dpll.m1; bestm2 = crtc->config.dpll.m2; @@ -4407,7 +4473,7 @@ static void vlv_update_pll(struct intel_crtc *crtc) intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_ANALOG) || intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_HDMI)) vlv_dpio_write(dev_priv, DPIO_LPF_COEFF(pipe), - 0x005f0021); + 0x009f0003); else vlv_dpio_write(dev_priv, DPIO_LPF_COEFF(pipe), 0x00d0000f); @@ -4440,10 +4506,6 @@ static void vlv_update_pll(struct intel_crtc *crtc) vlv_dpio_write(dev_priv, DPIO_PLL_CML(pipe), 0x87871000); - for_each_encoder_on_crtc(dev, &crtc->base, encoder) - if (encoder->pre_pll_enable) - encoder->pre_pll_enable(encoder); - /* Enable DPIO clock input */ dpll = DPLL_EXT_BUFFER_ENABLE_VLV | DPLL_REFA_CLK_ENABLE_VLV | DPLL_VGA_MODE_DIS | DPLL_INTEGRATED_CLOCK_VLV; @@ -4451,17 +4513,11 @@ static void vlv_update_pll(struct intel_crtc *crtc) dpll |= DPLL_INTEGRATED_CRI_CLK_VLV; dpll |= DPLL_VCO_ENABLE; - I915_WRITE(DPLL(pipe), dpll); - POSTING_READ(DPLL(pipe)); - udelay(150); - - if (wait_for(((I915_READ(DPLL(pipe)) & DPLL_LOCK_VLV) == DPLL_LOCK_VLV), 1)) - DRM_ERROR("DPLL %d failed to lock\n", pipe); + crtc->config.dpll_hw_state.dpll = dpll; dpll_md = (crtc->config.pixel_multiplier - 1) << DPLL_MD_UDI_MULTIPLIER_SHIFT; - I915_WRITE(DPLL_MD(pipe), dpll_md); - POSTING_READ(DPLL_MD(pipe)); + crtc->config.dpll_hw_state.dpll_md = dpll_md; if (crtc->config.has_dp_encoder) intel_dp_set_m_n(crtc); @@ -4475,8 +4531,6 @@ static void i9xx_update_pll(struct intel_crtc *crtc, { struct drm_device *dev = crtc->base.dev; struct drm_i915_private *dev_priv = dev->dev_private; - struct intel_encoder *encoder; - int pipe = crtc->pipe; u32 dpll; bool is_sdvo; struct dpll *clock = &crtc->config.dpll; @@ -4499,10 +4553,10 @@ static void i9xx_update_pll(struct intel_crtc *crtc, } if (is_sdvo) - dpll |= DPLL_DVO_HIGH_SPEED; + dpll |= DPLL_SDVO_HIGH_SPEED; if (intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_DISPLAYPORT)) - dpll |= DPLL_DVO_HIGH_SPEED; + dpll |= DPLL_SDVO_HIGH_SPEED; /* compute bitmask from p1 value */ if (IS_PINEVIEW(dev)) @@ -4538,35 +4592,16 @@ static void i9xx_update_pll(struct intel_crtc *crtc, dpll |= PLL_REF_INPUT_DREFCLK; dpll |= DPLL_VCO_ENABLE; - I915_WRITE(DPLL(pipe), dpll & ~DPLL_VCO_ENABLE); - POSTING_READ(DPLL(pipe)); - udelay(150); - - for_each_encoder_on_crtc(dev, &crtc->base, encoder) - if (encoder->pre_pll_enable) - encoder->pre_pll_enable(encoder); - - if (crtc->config.has_dp_encoder) - intel_dp_set_m_n(crtc); - - I915_WRITE(DPLL(pipe), dpll); - - /* Wait for the clocks to stabilize. */ - POSTING_READ(DPLL(pipe)); - udelay(150); + crtc->config.dpll_hw_state.dpll = dpll; if (INTEL_INFO(dev)->gen >= 4) { u32 dpll_md = (crtc->config.pixel_multiplier - 1) << DPLL_MD_UDI_MULTIPLIER_SHIFT; - I915_WRITE(DPLL_MD(pipe), dpll_md); - } else { - /* The pixel multiplier can only be updated once the - * DPLL is enabled and the clocks are stable. - * - * So write it again. - */ - I915_WRITE(DPLL(pipe), dpll); + crtc->config.dpll_hw_state.dpll_md = dpll_md; } + + if (crtc->config.has_dp_encoder) + intel_dp_set_m_n(crtc); } static void i8xx_update_pll(struct intel_crtc *crtc, @@ -4575,8 +4610,6 @@ static void i8xx_update_pll(struct intel_crtc *crtc, { struct drm_device *dev = crtc->base.dev; struct drm_i915_private *dev_priv = dev->dev_private; - struct intel_encoder *encoder; - int pipe = crtc->pipe; u32 dpll; struct dpll *clock = &crtc->config.dpll; @@ -4595,6 +4628,9 @@ static void i8xx_update_pll(struct intel_crtc *crtc, dpll |= PLL_P2_DIVIDE_BY_4; } + if (intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_DVO)) + dpll |= DPLL_DVO_2X_MODE; + if (intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_LVDS) && intel_panel_use_ssc(dev_priv) && num_connectors < 2) dpll |= PLLB_REF_INPUT_SPREADSPECTRUMIN; @@ -4602,26 +4638,7 @@ static void i8xx_update_pll(struct intel_crtc *crtc, dpll |= PLL_REF_INPUT_DREFCLK; dpll |= DPLL_VCO_ENABLE; - I915_WRITE(DPLL(pipe), dpll & ~DPLL_VCO_ENABLE); - POSTING_READ(DPLL(pipe)); - udelay(150); - - for_each_encoder_on_crtc(dev, &crtc->base, encoder) - if (encoder->pre_pll_enable) - encoder->pre_pll_enable(encoder); - - I915_WRITE(DPLL(pipe), dpll); - - /* Wait for the clocks to stabilize. */ - POSTING_READ(DPLL(pipe)); - udelay(150); - - /* The pixel multiplier can only be updated once the - * DPLL is enabled and the clocks are stable. - * - * So write it again. - */ - I915_WRITE(DPLL(pipe), dpll); + crtc->config.dpll_hw_state.dpll = dpll; } static void intel_set_pipe_timings(struct intel_crtc *intel_crtc) @@ -4727,6 +4744,27 @@ static void intel_get_pipe_timings(struct intel_crtc *crtc, pipe_config->requested_mode.hdisplay = ((tmp >> 16) & 0xffff) + 1; } +static void intel_crtc_mode_from_pipe_config(struct intel_crtc *intel_crtc, + struct intel_crtc_config *pipe_config) +{ + struct drm_crtc *crtc = &intel_crtc->base; + + crtc->mode.hdisplay = pipe_config->adjusted_mode.crtc_hdisplay; + crtc->mode.htotal = pipe_config->adjusted_mode.crtc_htotal; + crtc->mode.hsync_start = pipe_config->adjusted_mode.crtc_hsync_start; + crtc->mode.hsync_end = pipe_config->adjusted_mode.crtc_hsync_end; + + crtc->mode.vdisplay = pipe_config->adjusted_mode.crtc_vdisplay; + crtc->mode.vtotal = pipe_config->adjusted_mode.crtc_vtotal; + crtc->mode.vsync_start = pipe_config->adjusted_mode.crtc_vsync_start; + crtc->mode.vsync_end = pipe_config->adjusted_mode.crtc_vsync_end; + + crtc->mode.flags = pipe_config->adjusted_mode.flags; + + crtc->mode.clock = pipe_config->adjusted_mode.clock; + crtc->mode.flags |= pipe_config->adjusted_mode.flags; +} + static void i9xx_set_pipeconf(struct intel_crtc *intel_crtc) { struct drm_device *dev = intel_crtc->base.dev; @@ -4939,7 +4977,7 @@ static bool i9xx_get_pipe_config(struct intel_crtc *crtc, struct drm_i915_private *dev_priv = dev->dev_private; uint32_t tmp; - pipe_config->cpu_transcoder = crtc->pipe; + pipe_config->cpu_transcoder = (enum transcoder) crtc->pipe; pipe_config->shared_dpll = DPLL_ID_PRIVATE; tmp = I915_READ(PIPECONF(crtc->pipe)); @@ -4955,6 +4993,7 @@ static bool i9xx_get_pipe_config(struct intel_crtc *crtc, pipe_config->pixel_multiplier = ((tmp & DPLL_MD_UDI_MULTIPLIER_MASK) >> DPLL_MD_UDI_MULTIPLIER_SHIFT) + 1; + pipe_config->dpll_hw_state.dpll_md = tmp; } else if (IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev)) { tmp = I915_READ(DPLL(crtc->pipe)); pipe_config->pixel_multiplier = @@ -4966,6 +5005,16 @@ static bool i9xx_get_pipe_config(struct intel_crtc *crtc, * function. */ pipe_config->pixel_multiplier = 1; } + pipe_config->dpll_hw_state.dpll = I915_READ(DPLL(crtc->pipe)); + if (!IS_VALLEYVIEW(dev)) { + pipe_config->dpll_hw_state.fp0 = I915_READ(FP0(crtc->pipe)); + pipe_config->dpll_hw_state.fp1 = I915_READ(FP1(crtc->pipe)); + } else { + /* Mask out read-only status bits. */ + pipe_config->dpll_hw_state.dpll &= ~(DPLL_LOCK_VLV | + DPLL_PORTC_READY_MASK | + DPLL_PORTB_READY_MASK); + } return true; } @@ -5119,74 +5168,37 @@ static void ironlake_init_pch_refclk(struct drm_device *dev) BUG_ON(val != final); } -/* Sequence to enable CLKOUT_DP for FDI usage and configure PCH FDI I/O. */ -static void lpt_init_pch_refclk(struct drm_device *dev) +static void lpt_reset_fdi_mphy(struct drm_i915_private *dev_priv) { - struct drm_i915_private *dev_priv = dev->dev_private; - struct drm_mode_config *mode_config = &dev->mode_config; - struct intel_encoder *encoder; - bool has_vga = false; - bool is_sdv = false; - u32 tmp; - - list_for_each_entry(encoder, &mode_config->encoder_list, base.head) { - switch (encoder->type) { - case INTEL_OUTPUT_ANALOG: - has_vga = true; - break; - } - } - - if (!has_vga) - return; - - mutex_lock(&dev_priv->dpio_lock); - - /* XXX: Rip out SDV support once Haswell ships for real. */ - if (IS_HASWELL(dev) && (dev->pci_device & 0xFF00) == 0x0C00) - is_sdv = true; - - tmp = intel_sbi_read(dev_priv, SBI_SSCCTL, SBI_ICLK); - tmp &= ~SBI_SSCCTL_DISABLE; - tmp |= SBI_SSCCTL_PATHALT; - intel_sbi_write(dev_priv, SBI_SSCCTL, tmp, SBI_ICLK); + uint32_t tmp; - udelay(24); + tmp = I915_READ(SOUTH_CHICKEN2); + tmp |= FDI_MPHY_IOSFSB_RESET_CTL; + I915_WRITE(SOUTH_CHICKEN2, tmp); - tmp = intel_sbi_read(dev_priv, SBI_SSCCTL, SBI_ICLK); - tmp &= ~SBI_SSCCTL_PATHALT; - intel_sbi_write(dev_priv, SBI_SSCCTL, tmp, SBI_ICLK); + if (wait_for_atomic_us(I915_READ(SOUTH_CHICKEN2) & + FDI_MPHY_IOSFSB_RESET_STATUS, 100)) + DRM_ERROR("FDI mPHY reset assert timeout\n"); - if (!is_sdv) { - tmp = I915_READ(SOUTH_CHICKEN2); - tmp |= FDI_MPHY_IOSFSB_RESET_CTL; - I915_WRITE(SOUTH_CHICKEN2, tmp); + tmp = I915_READ(SOUTH_CHICKEN2); + tmp &= ~FDI_MPHY_IOSFSB_RESET_CTL; + I915_WRITE(SOUTH_CHICKEN2, tmp); - if (wait_for_atomic_us(I915_READ(SOUTH_CHICKEN2) & - FDI_MPHY_IOSFSB_RESET_STATUS, 100)) - DRM_ERROR("FDI mPHY reset assert timeout\n"); - - tmp = I915_READ(SOUTH_CHICKEN2); - tmp &= ~FDI_MPHY_IOSFSB_RESET_CTL; - I915_WRITE(SOUTH_CHICKEN2, tmp); + if (wait_for_atomic_us((I915_READ(SOUTH_CHICKEN2) & + FDI_MPHY_IOSFSB_RESET_STATUS) == 0, 100)) + DRM_ERROR("FDI mPHY reset de-assert timeout\n"); +} - if (wait_for_atomic_us((I915_READ(SOUTH_CHICKEN2) & - FDI_MPHY_IOSFSB_RESET_STATUS) == 0, - 100)) - DRM_ERROR("FDI mPHY reset de-assert timeout\n"); - } +/* WaMPhyProgramming:hsw */ +static void lpt_program_fdi_mphy(struct drm_i915_private *dev_priv) +{ + uint32_t tmp; tmp = intel_sbi_read(dev_priv, 0x8008, SBI_MPHY); tmp &= ~(0xFF << 24); tmp |= (0x12 << 24); intel_sbi_write(dev_priv, 0x8008, tmp, SBI_MPHY); - if (is_sdv) { - tmp = intel_sbi_read(dev_priv, 0x800C, SBI_MPHY); - tmp |= 0x7FFF; - intel_sbi_write(dev_priv, 0x800C, tmp, SBI_MPHY); - } - tmp = intel_sbi_read(dev_priv, 0x2008, SBI_MPHY); tmp |= (1 << 11); intel_sbi_write(dev_priv, 0x2008, tmp, SBI_MPHY); @@ -5195,24 +5207,6 @@ static void lpt_init_pch_refclk(struct drm_device *dev) tmp |= (1 << 11); intel_sbi_write(dev_priv, 0x2108, tmp, SBI_MPHY); - if (is_sdv) { - tmp = intel_sbi_read(dev_priv, 0x2038, SBI_MPHY); - tmp |= (0x3F << 24) | (0xF << 20) | (0xF << 16); - intel_sbi_write(dev_priv, 0x2038, tmp, SBI_MPHY); - - tmp = intel_sbi_read(dev_priv, 0x2138, SBI_MPHY); - tmp |= (0x3F << 24) | (0xF << 20) | (0xF << 16); - intel_sbi_write(dev_priv, 0x2138, tmp, SBI_MPHY); - - tmp = intel_sbi_read(dev_priv, 0x203C, SBI_MPHY); - tmp |= (0x3F << 8); - intel_sbi_write(dev_priv, 0x203C, tmp, SBI_MPHY); - - tmp = intel_sbi_read(dev_priv, 0x213C, SBI_MPHY); - tmp |= (0x3F << 8); - intel_sbi_write(dev_priv, 0x213C, tmp, SBI_MPHY); - } - tmp = intel_sbi_read(dev_priv, 0x206C, SBI_MPHY); tmp |= (1 << 24) | (1 << 21) | (1 << 18); intel_sbi_write(dev_priv, 0x206C, tmp, SBI_MPHY); @@ -5221,17 +5215,15 @@ static void lpt_init_pch_refclk(struct drm_device *dev) tmp |= (1 << 24) | (1 << 21) | (1 << 18); intel_sbi_write(dev_priv, 0x216C, tmp, SBI_MPHY); - if (!is_sdv) { - tmp = intel_sbi_read(dev_priv, 0x2080, SBI_MPHY); - tmp &= ~(7 << 13); - tmp |= (5 << 13); - intel_sbi_write(dev_priv, 0x2080, tmp, SBI_MPHY); + tmp = intel_sbi_read(dev_priv, 0x2080, SBI_MPHY); + tmp &= ~(7 << 13); + tmp |= (5 << 13); + intel_sbi_write(dev_priv, 0x2080, tmp, SBI_MPHY); - tmp = intel_sbi_read(dev_priv, 0x2180, SBI_MPHY); - tmp &= ~(7 << 13); - tmp |= (5 << 13); - intel_sbi_write(dev_priv, 0x2180, tmp, SBI_MPHY); - } + tmp = intel_sbi_read(dev_priv, 0x2180, SBI_MPHY); + tmp &= ~(7 << 13); + tmp |= (5 << 13); + intel_sbi_write(dev_priv, 0x2180, tmp, SBI_MPHY); tmp = intel_sbi_read(dev_priv, 0x208C, SBI_MPHY); tmp &= ~0xFF; @@ -5253,34 +5245,120 @@ static void lpt_init_pch_refclk(struct drm_device *dev) tmp |= (0x1C << 16); intel_sbi_write(dev_priv, 0x2198, tmp, SBI_MPHY); - if (!is_sdv) { - tmp = intel_sbi_read(dev_priv, 0x20C4, SBI_MPHY); - tmp |= (1 << 27); - intel_sbi_write(dev_priv, 0x20C4, tmp, SBI_MPHY); + tmp = intel_sbi_read(dev_priv, 0x20C4, SBI_MPHY); + tmp |= (1 << 27); + intel_sbi_write(dev_priv, 0x20C4, tmp, SBI_MPHY); + + tmp = intel_sbi_read(dev_priv, 0x21C4, SBI_MPHY); + tmp |= (1 << 27); + intel_sbi_write(dev_priv, 0x21C4, tmp, SBI_MPHY); + + tmp = intel_sbi_read(dev_priv, 0x20EC, SBI_MPHY); + tmp &= ~(0xF << 28); + tmp |= (4 << 28); + intel_sbi_write(dev_priv, 0x20EC, tmp, SBI_MPHY); + + tmp = intel_sbi_read(dev_priv, 0x21EC, SBI_MPHY); + tmp &= ~(0xF << 28); + tmp |= (4 << 28); + intel_sbi_write(dev_priv, 0x21EC, tmp, SBI_MPHY); +} + +/* Implements 3 different sequences from BSpec chapter "Display iCLK + * Programming" based on the parameters passed: + * - Sequence to enable CLKOUT_DP + * - Sequence to enable CLKOUT_DP without spread + * - Sequence to enable CLKOUT_DP for FDI usage and configure PCH FDI I/O + */ +static void lpt_enable_clkout_dp(struct drm_device *dev, bool with_spread, + bool with_fdi) +{ + struct drm_i915_private *dev_priv = dev->dev_private; + uint32_t reg, tmp; + + if (WARN(with_fdi && !with_spread, "FDI requires downspread\n")) + with_spread = true; + if (WARN(dev_priv->pch_id == INTEL_PCH_LPT_LP_DEVICE_ID_TYPE && + with_fdi, "LP PCH doesn't have FDI\n")) + with_fdi = false; + + mutex_lock(&dev_priv->dpio_lock); + + tmp = intel_sbi_read(dev_priv, SBI_SSCCTL, SBI_ICLK); + tmp &= ~SBI_SSCCTL_DISABLE; + tmp |= SBI_SSCCTL_PATHALT; + intel_sbi_write(dev_priv, SBI_SSCCTL, tmp, SBI_ICLK); - tmp = intel_sbi_read(dev_priv, 0x21C4, SBI_MPHY); - tmp |= (1 << 27); - intel_sbi_write(dev_priv, 0x21C4, tmp, SBI_MPHY); + udelay(24); - tmp = intel_sbi_read(dev_priv, 0x20EC, SBI_MPHY); - tmp &= ~(0xF << 28); - tmp |= (4 << 28); - intel_sbi_write(dev_priv, 0x20EC, tmp, SBI_MPHY); + if (with_spread) { + tmp = intel_sbi_read(dev_priv, SBI_SSCCTL, SBI_ICLK); + tmp &= ~SBI_SSCCTL_PATHALT; + intel_sbi_write(dev_priv, SBI_SSCCTL, tmp, SBI_ICLK); - tmp = intel_sbi_read(dev_priv, 0x21EC, SBI_MPHY); - tmp &= ~(0xF << 28); - tmp |= (4 << 28); - intel_sbi_write(dev_priv, 0x21EC, tmp, SBI_MPHY); + if (with_fdi) { + lpt_reset_fdi_mphy(dev_priv); + lpt_program_fdi_mphy(dev_priv); + } } - /* ULT uses SBI_GEN0, but ULT doesn't have VGA, so we don't care. */ - tmp = intel_sbi_read(dev_priv, SBI_DBUFF0, SBI_ICLK); - tmp |= SBI_DBUFF0_ENABLE; - intel_sbi_write(dev_priv, SBI_DBUFF0, tmp, SBI_ICLK); + reg = (dev_priv->pch_id == INTEL_PCH_LPT_LP_DEVICE_ID_TYPE) ? + SBI_GEN0 : SBI_DBUFF0; + tmp = intel_sbi_read(dev_priv, reg, SBI_ICLK); + tmp |= SBI_GEN0_CFG_BUFFENABLE_DISABLE; + intel_sbi_write(dev_priv, reg, tmp, SBI_ICLK); + + mutex_unlock(&dev_priv->dpio_lock); +} + +/* Sequence to disable CLKOUT_DP */ +static void lpt_disable_clkout_dp(struct drm_device *dev) +{ + struct drm_i915_private *dev_priv = dev->dev_private; + uint32_t reg, tmp; + + mutex_lock(&dev_priv->dpio_lock); + + reg = (dev_priv->pch_id == INTEL_PCH_LPT_LP_DEVICE_ID_TYPE) ? + SBI_GEN0 : SBI_DBUFF0; + tmp = intel_sbi_read(dev_priv, reg, SBI_ICLK); + tmp &= ~SBI_GEN0_CFG_BUFFENABLE_DISABLE; + intel_sbi_write(dev_priv, reg, tmp, SBI_ICLK); + + tmp = intel_sbi_read(dev_priv, SBI_SSCCTL, SBI_ICLK); + if (!(tmp & SBI_SSCCTL_DISABLE)) { + if (!(tmp & SBI_SSCCTL_PATHALT)) { + tmp |= SBI_SSCCTL_PATHALT; + intel_sbi_write(dev_priv, SBI_SSCCTL, tmp, SBI_ICLK); + udelay(32); + } + tmp |= SBI_SSCCTL_DISABLE; + intel_sbi_write(dev_priv, SBI_SSCCTL, tmp, SBI_ICLK); + } mutex_unlock(&dev_priv->dpio_lock); } +static void lpt_init_pch_refclk(struct drm_device *dev) +{ + struct drm_mode_config *mode_config = &dev->mode_config; + struct intel_encoder *encoder; + bool has_vga = false; + + list_for_each_entry(encoder, &mode_config->encoder_list, base.head) { + switch (encoder->type) { + case INTEL_OUTPUT_ANALOG: + has_vga = true; + break; + } + } + + if (has_vga) + lpt_enable_clkout_dp(dev, true, true); + else + lpt_disable_clkout_dp(dev); +} + /* * Initialize reference clocks when the driver loads */ @@ -5610,9 +5688,9 @@ static uint32_t ironlake_compute_dpll(struct intel_crtc *intel_crtc, << PLL_REF_SDVO_HDMI_MULTIPLIER_SHIFT; if (is_sdvo) - dpll |= DPLL_DVO_HIGH_SPEED; + dpll |= DPLL_SDVO_HIGH_SPEED; if (intel_crtc->config.has_dp_encoder) - dpll |= DPLL_DVO_HIGH_SPEED; + dpll |= DPLL_SDVO_HIGH_SPEED; /* compute bitmask from p1 value */ dpll |= (1 << (intel_crtc->config.dpll.p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT; @@ -5708,7 +5786,7 @@ static int ironlake_crtc_mode_set(struct drm_crtc *crtc, else intel_crtc->config.dpll_hw_state.fp1 = fp; - pll = intel_get_shared_dpll(intel_crtc, dpll, fp); + pll = intel_get_shared_dpll(intel_crtc); if (pll == NULL) { DRM_DEBUG_DRIVER("failed to find PLL for pipe %c\n", pipe_name(pipe)); @@ -5720,10 +5798,6 @@ static int ironlake_crtc_mode_set(struct drm_crtc *crtc, if (intel_crtc->config.has_dp_encoder) intel_dp_set_m_n(intel_crtc); - for_each_encoder_on_crtc(dev, crtc, encoder) - if (encoder->pre_pll_enable) - encoder->pre_pll_enable(encoder); - if (is_lvds && has_reduced_clock && i915_powersave) intel_crtc->lowfreq_avail = true; else @@ -5732,23 +5806,6 @@ static int ironlake_crtc_mode_set(struct drm_crtc *crtc, if (intel_crtc->config.has_pch_encoder) { pll = intel_crtc_to_shared_dpll(intel_crtc); - I915_WRITE(PCH_DPLL(pll->id), dpll); - - /* Wait for the clocks to stabilize. */ - POSTING_READ(PCH_DPLL(pll->id)); - udelay(150); - - /* The pixel multiplier can only be updated once the - * DPLL is enabled and the clocks are stable. - * - * So write it again. - */ - I915_WRITE(PCH_DPLL(pll->id), dpll); - - if (has_reduced_clock) - I915_WRITE(PCH_FP1(pll->id), fp2); - else - I915_WRITE(PCH_FP1(pll->id), fp); } intel_set_pipe_timings(intel_crtc); @@ -5820,7 +5877,7 @@ static bool ironlake_get_pipe_config(struct intel_crtc *crtc, struct drm_i915_private *dev_priv = dev->dev_private; uint32_t tmp; - pipe_config->cpu_transcoder = crtc->pipe; + pipe_config->cpu_transcoder = (enum transcoder) crtc->pipe; pipe_config->shared_dpll = DPLL_ID_PRIVATE; tmp = I915_READ(PIPECONF(crtc->pipe)); @@ -5838,12 +5895,9 @@ static bool ironlake_get_pipe_config(struct intel_crtc *crtc, ironlake_get_fdi_m_n_config(crtc, pipe_config); - /* XXX: Can't properly read out the pch dpll pixel multiplier - * since we don't have state tracking for pch clocks yet. */ - pipe_config->pixel_multiplier = 1; - if (HAS_PCH_IBX(dev_priv->dev)) { - pipe_config->shared_dpll = crtc->pipe; + pipe_config->shared_dpll = + (enum intel_dpll_id) crtc->pipe; } else { tmp = I915_READ(PCH_DPLL_SEL); if (tmp & TRANS_DPLLB_SEL(crtc->pipe)) @@ -5856,6 +5910,11 @@ static bool ironlake_get_pipe_config(struct intel_crtc *crtc, WARN_ON(!pll->get_hw_state(dev_priv, pll, &pipe_config->dpll_hw_state)); + + tmp = pipe_config->dpll_hw_state.dpll; + pipe_config->pixel_multiplier = + ((tmp & PLL_REF_SDVO_HDMI_MULTIPLIER_MASK) + >> PLL_REF_SDVO_HDMI_MULTIPLIER_SHIFT) + 1; } else { pipe_config->pixel_multiplier = 1; } @@ -5867,6 +5926,305 @@ static bool ironlake_get_pipe_config(struct intel_crtc *crtc, return true; } +static void assert_can_disable_lcpll(struct drm_i915_private *dev_priv) +{ + struct drm_device *dev = dev_priv->dev; + struct intel_ddi_plls *plls = &dev_priv->ddi_plls; + struct intel_crtc *crtc; + unsigned long irqflags; + uint32_t val; + + list_for_each_entry(crtc, &dev->mode_config.crtc_list, base.head) + WARN(crtc->base.enabled, "CRTC for pipe %c enabled\n", + pipe_name(crtc->pipe)); + + WARN(I915_READ(HSW_PWR_WELL_DRIVER), "Power well on\n"); + WARN(plls->spll_refcount, "SPLL enabled\n"); + WARN(plls->wrpll1_refcount, "WRPLL1 enabled\n"); + WARN(plls->wrpll2_refcount, "WRPLL2 enabled\n"); + WARN(I915_READ(PCH_PP_STATUS) & PP_ON, "Panel power on\n"); + WARN(I915_READ(BLC_PWM_CPU_CTL2) & BLM_PWM_ENABLE, + "CPU PWM1 enabled\n"); + WARN(I915_READ(HSW_BLC_PWM2_CTL) & BLM_PWM_ENABLE, + "CPU PWM2 enabled\n"); + WARN(I915_READ(BLC_PWM_PCH_CTL1) & BLM_PCH_PWM_ENABLE, + "PCH PWM1 enabled\n"); + WARN(I915_READ(UTIL_PIN_CTL) & UTIL_PIN_ENABLE, + "Utility pin enabled\n"); + WARN(I915_READ(PCH_GTC_CTL) & PCH_GTC_ENABLE, "PCH GTC enabled\n"); + + spin_lock_irqsave(&dev_priv->irq_lock, irqflags); + val = I915_READ(DEIMR); + WARN((val & ~DE_PCH_EVENT_IVB) != val, + "Unexpected DEIMR bits enabled: 0x%x\n", val); + val = I915_READ(SDEIMR); + WARN((val | SDE_HOTPLUG_MASK_CPT) != 0xffffffff, + "Unexpected SDEIMR bits enabled: 0x%x\n", val); + spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); +} + +/* + * This function implements pieces of two sequences from BSpec: + * - Sequence for display software to disable LCPLL + * - Sequence for display software to allow package C8+ + * The steps implemented here are just the steps that actually touch the LCPLL + * register. Callers should take care of disabling all the display engine + * functions, doing the mode unset, fixing interrupts, etc. + */ +void hsw_disable_lcpll(struct drm_i915_private *dev_priv, + bool switch_to_fclk, bool allow_power_down) +{ + uint32_t val; + + assert_can_disable_lcpll(dev_priv); + + val = I915_READ(LCPLL_CTL); + + if (switch_to_fclk) { + val |= LCPLL_CD_SOURCE_FCLK; + I915_WRITE(LCPLL_CTL, val); + + if (wait_for_atomic_us(I915_READ(LCPLL_CTL) & + LCPLL_CD_SOURCE_FCLK_DONE, 1)) + DRM_ERROR("Switching to FCLK failed\n"); + + val = I915_READ(LCPLL_CTL); + } + + val |= LCPLL_PLL_DISABLE; + I915_WRITE(LCPLL_CTL, val); + POSTING_READ(LCPLL_CTL); + + if (wait_for((I915_READ(LCPLL_CTL) & LCPLL_PLL_LOCK) == 0, 1)) + DRM_ERROR("LCPLL still locked\n"); + + val = I915_READ(D_COMP); + val |= D_COMP_COMP_DISABLE; + I915_WRITE(D_COMP, val); + POSTING_READ(D_COMP); + ndelay(100); + + if (wait_for((I915_READ(D_COMP) & D_COMP_RCOMP_IN_PROGRESS) == 0, 1)) + DRM_ERROR("D_COMP RCOMP still in progress\n"); + + if (allow_power_down) { + val = I915_READ(LCPLL_CTL); + val |= LCPLL_POWER_DOWN_ALLOW; + I915_WRITE(LCPLL_CTL, val); + POSTING_READ(LCPLL_CTL); + } +} + +/* + * Fully restores LCPLL, disallowing power down and switching back to LCPLL + * source. + */ +void hsw_restore_lcpll(struct drm_i915_private *dev_priv) +{ + uint32_t val; + + val = I915_READ(LCPLL_CTL); + + if ((val & (LCPLL_PLL_LOCK | LCPLL_PLL_DISABLE | LCPLL_CD_SOURCE_FCLK | + LCPLL_POWER_DOWN_ALLOW)) == LCPLL_PLL_LOCK) + return; + + /* Make sure we're not on PC8 state before disabling PC8, otherwise + * we'll hang the machine! */ + dev_priv->uncore.funcs.force_wake_get(dev_priv); + + if (val & LCPLL_POWER_DOWN_ALLOW) { + val &= ~LCPLL_POWER_DOWN_ALLOW; + I915_WRITE(LCPLL_CTL, val); + POSTING_READ(LCPLL_CTL); + } + + val = I915_READ(D_COMP); + val |= D_COMP_COMP_FORCE; + val &= ~D_COMP_COMP_DISABLE; + I915_WRITE(D_COMP, val); + POSTING_READ(D_COMP); + + val = I915_READ(LCPLL_CTL); + val &= ~LCPLL_PLL_DISABLE; + I915_WRITE(LCPLL_CTL, val); + + if (wait_for(I915_READ(LCPLL_CTL) & LCPLL_PLL_LOCK, 5)) + DRM_ERROR("LCPLL not locked yet\n"); + + if (val & LCPLL_CD_SOURCE_FCLK) { + val = I915_READ(LCPLL_CTL); + val &= ~LCPLL_CD_SOURCE_FCLK; + I915_WRITE(LCPLL_CTL, val); + + if (wait_for_atomic_us((I915_READ(LCPLL_CTL) & + LCPLL_CD_SOURCE_FCLK_DONE) == 0, 1)) + DRM_ERROR("Switching back to LCPLL failed\n"); + } + + dev_priv->uncore.funcs.force_wake_put(dev_priv); +} + +void hsw_enable_pc8_work(struct work_struct *__work) +{ + struct drm_i915_private *dev_priv = + container_of(to_delayed_work(__work), struct drm_i915_private, + pc8.enable_work); + struct drm_device *dev = dev_priv->dev; + uint32_t val; + + if (dev_priv->pc8.enabled) + return; + + DRM_DEBUG_KMS("Enabling package C8+\n"); + + dev_priv->pc8.enabled = true; + + if (dev_priv->pch_id == INTEL_PCH_LPT_LP_DEVICE_ID_TYPE) { + val = I915_READ(SOUTH_DSPCLK_GATE_D); + val &= ~PCH_LP_PARTITION_LEVEL_DISABLE; + I915_WRITE(SOUTH_DSPCLK_GATE_D, val); + } + + lpt_disable_clkout_dp(dev); + hsw_pc8_disable_interrupts(dev); + hsw_disable_lcpll(dev_priv, true, true); +} + +static void __hsw_enable_package_c8(struct drm_i915_private *dev_priv) +{ + WARN_ON(!mutex_is_locked(&dev_priv->pc8.lock)); + WARN(dev_priv->pc8.disable_count < 1, + "pc8.disable_count: %d\n", dev_priv->pc8.disable_count); + + dev_priv->pc8.disable_count--; + if (dev_priv->pc8.disable_count != 0) + return; + + schedule_delayed_work(&dev_priv->pc8.enable_work, + msecs_to_jiffies(i915_pc8_timeout)); +} + +static void __hsw_disable_package_c8(struct drm_i915_private *dev_priv) +{ + struct drm_device *dev = dev_priv->dev; + uint32_t val; + + WARN_ON(!mutex_is_locked(&dev_priv->pc8.lock)); + WARN(dev_priv->pc8.disable_count < 0, + "pc8.disable_count: %d\n", dev_priv->pc8.disable_count); + + dev_priv->pc8.disable_count++; + if (dev_priv->pc8.disable_count != 1) + return; + + cancel_delayed_work_sync(&dev_priv->pc8.enable_work); + if (!dev_priv->pc8.enabled) + return; + + DRM_DEBUG_KMS("Disabling package C8+\n"); + + hsw_restore_lcpll(dev_priv); + hsw_pc8_restore_interrupts(dev); + lpt_init_pch_refclk(dev); + + if (dev_priv->pch_id == INTEL_PCH_LPT_LP_DEVICE_ID_TYPE) { + val = I915_READ(SOUTH_DSPCLK_GATE_D); + val |= PCH_LP_PARTITION_LEVEL_DISABLE; + I915_WRITE(SOUTH_DSPCLK_GATE_D, val); + } + + intel_prepare_ddi(dev); + i915_gem_init_swizzling(dev); + mutex_lock(&dev_priv->rps.hw_lock); + gen6_update_ring_freq(dev); + mutex_unlock(&dev_priv->rps.hw_lock); + dev_priv->pc8.enabled = false; +} + +void hsw_enable_package_c8(struct drm_i915_private *dev_priv) +{ + mutex_lock(&dev_priv->pc8.lock); + __hsw_enable_package_c8(dev_priv); + mutex_unlock(&dev_priv->pc8.lock); +} + +void hsw_disable_package_c8(struct drm_i915_private *dev_priv) +{ + mutex_lock(&dev_priv->pc8.lock); + __hsw_disable_package_c8(dev_priv); + mutex_unlock(&dev_priv->pc8.lock); +} + +static bool hsw_can_enable_package_c8(struct drm_i915_private *dev_priv) +{ + struct drm_device *dev = dev_priv->dev; + struct intel_crtc *crtc; + uint32_t val; + + list_for_each_entry(crtc, &dev->mode_config.crtc_list, base.head) + if (crtc->base.enabled) + return false; + + /* This case is still possible since we have the i915.disable_power_well + * parameter and also the KVMr or something else might be requesting the + * power well. */ + val = I915_READ(HSW_PWR_WELL_DRIVER); + if (val != 0) { + DRM_DEBUG_KMS("Not enabling PC8: power well on\n"); + return false; + } + + return true; +} + +/* Since we're called from modeset_global_resources there's no way to + * symmetrically increase and decrease the refcount, so we use + * dev_priv->pc8.requirements_met to track whether we already have the refcount + * or not. + */ +static void hsw_update_package_c8(struct drm_device *dev) +{ + struct drm_i915_private *dev_priv = dev->dev_private; + bool allow; + + if (!i915_enable_pc8) + return; + + mutex_lock(&dev_priv->pc8.lock); + + allow = hsw_can_enable_package_c8(dev_priv); + + if (allow == dev_priv->pc8.requirements_met) + goto done; + + dev_priv->pc8.requirements_met = allow; + + if (allow) + __hsw_enable_package_c8(dev_priv); + else + __hsw_disable_package_c8(dev_priv); + +done: + mutex_unlock(&dev_priv->pc8.lock); +} + +static void hsw_package_c8_gpu_idle(struct drm_i915_private *dev_priv) +{ + if (!dev_priv->pc8.gpu_idle) { + dev_priv->pc8.gpu_idle = true; + hsw_enable_package_c8(dev_priv); + } +} + +static void hsw_package_c8_gpu_busy(struct drm_i915_private *dev_priv) +{ + if (dev_priv->pc8.gpu_idle) { + dev_priv->pc8.gpu_idle = false; + hsw_disable_package_c8(dev_priv); + } +} + static void haswell_modeset_global_resources(struct drm_device *dev) { bool enable = false; @@ -5882,6 +6240,8 @@ static void haswell_modeset_global_resources(struct drm_device *dev) } intel_set_power_well(dev, enable); + + hsw_update_package_c8(dev); } static int haswell_crtc_mode_set(struct drm_crtc *crtc, @@ -5935,7 +6295,7 @@ static bool haswell_get_pipe_config(struct intel_crtc *crtc, enum intel_display_power_domain pfit_domain; uint32_t tmp; - pipe_config->cpu_transcoder = crtc->pipe; + pipe_config->cpu_transcoder = (enum transcoder) crtc->pipe; pipe_config->shared_dpll = DPLL_ID_PRIVATE; tmp = I915_READ(TRANS_DDI_FUNC_CTL(TRANSCODER_EDP)); @@ -6005,11 +6365,8 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc, { struct drm_device *dev = crtc->dev; struct drm_i915_private *dev_priv = dev->dev_private; - struct drm_encoder_helper_funcs *encoder_funcs; struct intel_encoder *encoder; struct intel_crtc *intel_crtc = to_intel_crtc(crtc); - struct drm_display_mode *adjusted_mode = - &intel_crtc->config.adjusted_mode; struct drm_display_mode *mode = &intel_crtc->config.requested_mode; int pipe = intel_crtc->pipe; int ret; @@ -6028,12 +6385,7 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc, encoder->base.base.id, drm_get_encoder_name(&encoder->base), mode->base.id, mode->name); - if (encoder->mode_set) { - encoder->mode_set(encoder); - } else { - encoder_funcs = encoder->base.helper_private; - encoder_funcs->mode_set(&encoder->base, mode, adjusted_mode); - } + encoder->mode_set(encoder); } return 0; @@ -6548,7 +6900,7 @@ static int intel_crtc_cursor_set(struct drm_crtc *crtc, goto fail_unpin; } - addr = obj->gtt_offset; + addr = i915_gem_obj_ggtt_offset(obj); } else { int align = IS_I830(dev) ? 16 * 1024 : 256; ret = i915_gem_attach_phys_object(dev, obj, @@ -6570,7 +6922,7 @@ static int intel_crtc_cursor_set(struct drm_crtc *crtc, if (intel_crtc->cursor_bo != obj) i915_gem_detach_phys_object(dev, intel_crtc->cursor_bo); } else - i915_gem_object_unpin(intel_crtc->cursor_bo); + i915_gem_object_unpin_from_display_plane(intel_crtc->cursor_bo); drm_gem_object_unreference(&intel_crtc->cursor_bo->base); } @@ -6585,7 +6937,7 @@ static int intel_crtc_cursor_set(struct drm_crtc *crtc, return 0; fail_unpin: - i915_gem_object_unpin(obj); + i915_gem_object_unpin_from_display_plane(obj); fail_locked: mutex_unlock(&dev->struct_mutex); fail: @@ -6875,11 +7227,12 @@ void intel_release_load_detect_pipe(struct drm_connector *connector, } /* Returns the clock of the currently programmed mode of the given pipe. */ -static int intel_crtc_clock_get(struct drm_device *dev, struct drm_crtc *crtc) +static void i9xx_crtc_clock_get(struct intel_crtc *crtc, + struct intel_crtc_config *pipe_config) { + struct drm_device *dev = crtc->base.dev; struct drm_i915_private *dev_priv = dev->dev_private; - struct intel_crtc *intel_crtc = to_intel_crtc(crtc); - int pipe = intel_crtc->pipe; + int pipe = pipe_config->cpu_transcoder; u32 dpll = I915_READ(DPLL(pipe)); u32 fp; intel_clock_t clock; @@ -6918,7 +7271,8 @@ static int intel_crtc_clock_get(struct drm_device *dev, struct drm_crtc *crtc) default: DRM_DEBUG_KMS("Unknown DPLL mode %08x in programmed " "mode\n", (int)(dpll & DPLL_MODE_MASK)); - return 0; + pipe_config->adjusted_mode.clock = 0; + return; } if (IS_PINEVIEW(dev)) @@ -6955,12 +7309,55 @@ static int intel_crtc_clock_get(struct drm_device *dev, struct drm_crtc *crtc) } } - /* XXX: It would be nice to validate the clocks, but we can't reuse - * i830PllIsValid() because it relies on the xf86_config connector - * configuration being accurate, which it isn't necessarily. + pipe_config->adjusted_mode.clock = clock.dot * + pipe_config->pixel_multiplier; +} + +static void ironlake_crtc_clock_get(struct intel_crtc *crtc, + struct intel_crtc_config *pipe_config) +{ + struct drm_device *dev = crtc->base.dev; + struct drm_i915_private *dev_priv = dev->dev_private; + enum transcoder cpu_transcoder = pipe_config->cpu_transcoder; + int link_freq, repeat; + u64 clock; + u32 link_m, link_n; + + repeat = pipe_config->pixel_multiplier; + + /* + * The calculation for the data clock is: + * pixel_clock = ((m/n)*(link_clock * nr_lanes * repeat))/bpp + * But we want to avoid losing precison if possible, so: + * pixel_clock = ((m * link_clock * nr_lanes * repeat)/(n*bpp)) + * + * and the link clock is simpler: + * link_clock = (m * link_clock * repeat) / n + */ + + /* + * We need to get the FDI or DP link clock here to derive + * the M/N dividers. + * + * For FDI, we read it from the BIOS or use a fixed 2.7GHz. + * For DP, it's either 1.62GHz or 2.7GHz. + * We do our calculations in 10*MHz since we don't need much precison. */ + if (pipe_config->has_pch_encoder) + link_freq = intel_fdi_link_freq(dev) * 10000; + else + link_freq = pipe_config->port_clock; - return clock.dot; + link_m = I915_READ(PIPE_LINK_M1(cpu_transcoder)); + link_n = I915_READ(PIPE_LINK_N1(cpu_transcoder)); + + if (!link_m || !link_n) + return; + + clock = ((u64)link_m * (u64)link_freq * (u64)repeat); + do_div(clock, link_n); + + pipe_config->adjusted_mode.clock = clock; } /** Returns the currently programmed mode of the given pipe. */ @@ -6971,6 +7368,7 @@ struct drm_display_mode *intel_crtc_mode_get(struct drm_device *dev, struct intel_crtc *intel_crtc = to_intel_crtc(crtc); enum transcoder cpu_transcoder = intel_crtc->config.cpu_transcoder; struct drm_display_mode *mode; + struct intel_crtc_config pipe_config; int htot = I915_READ(HTOTAL(cpu_transcoder)); int hsync = I915_READ(HSYNC(cpu_transcoder)); int vtot = I915_READ(VTOTAL(cpu_transcoder)); @@ -6980,7 +7378,18 @@ struct drm_display_mode *intel_crtc_mode_get(struct drm_device *dev, if (!mode) return NULL; - mode->clock = intel_crtc_clock_get(dev, crtc); + /* + * Construct a pipe_config sufficient for getting the clock info + * back out of crtc_clock_get. + * + * Note, if LVDS ever uses a non-1 pixel multiplier, we'll need + * to use a real value here instead. + */ + pipe_config.cpu_transcoder = (enum transcoder) intel_crtc->pipe; + pipe_config.pixel_multiplier = 1; + i9xx_crtc_clock_get(intel_crtc, &pipe_config); + + mode->clock = pipe_config.adjusted_mode.clock; mode->hdisplay = (htot & 0xffff) + 1; mode->htotal = ((htot & 0xffff0000) >> 16) + 1; mode->hsync_start = (hsync & 0xffff) + 1; @@ -7064,13 +7473,19 @@ static void intel_decrease_pllclock(struct drm_crtc *crtc) void intel_mark_busy(struct drm_device *dev) { - i915_update_gfx_val(dev->dev_private); + struct drm_i915_private *dev_priv = dev->dev_private; + + hsw_package_c8_gpu_busy(dev_priv); + i915_update_gfx_val(dev_priv); } void intel_mark_idle(struct drm_device *dev) { + struct drm_i915_private *dev_priv = dev->dev_private; struct drm_crtc *crtc; + hsw_package_c8_gpu_idle(dev_priv); + if (!i915_powersave) return; @@ -7235,7 +7650,8 @@ inline static void intel_mark_page_flip_active(struct intel_crtc *intel_crtc) static int intel_gen2_queue_flip(struct drm_device *dev, struct drm_crtc *crtc, struct drm_framebuffer *fb, - struct drm_i915_gem_object *obj) + struct drm_i915_gem_object *obj, + uint32_t flags) { struct drm_i915_private *dev_priv = dev->dev_private; struct intel_crtc *intel_crtc = to_intel_crtc(crtc); @@ -7263,7 +7679,7 @@ static int intel_gen2_queue_flip(struct drm_device *dev, intel_ring_emit(ring, MI_DISPLAY_FLIP | MI_DISPLAY_FLIP_PLANE(intel_crtc->plane)); intel_ring_emit(ring, fb->pitches[0]); - intel_ring_emit(ring, obj->gtt_offset + intel_crtc->dspaddr_offset); + intel_ring_emit(ring, i915_gem_obj_ggtt_offset(obj) + intel_crtc->dspaddr_offset); intel_ring_emit(ring, 0); /* aux display base address, unused */ intel_mark_page_flip_active(intel_crtc); @@ -7279,7 +7695,8 @@ err: static int intel_gen3_queue_flip(struct drm_device *dev, struct drm_crtc *crtc, struct drm_framebuffer *fb, - struct drm_i915_gem_object *obj) + struct drm_i915_gem_object *obj, + uint32_t flags) { struct drm_i915_private *dev_priv = dev->dev_private; struct intel_crtc *intel_crtc = to_intel_crtc(crtc); @@ -7304,7 +7721,7 @@ static int intel_gen3_queue_flip(struct drm_device *dev, intel_ring_emit(ring, MI_DISPLAY_FLIP_I915 | MI_DISPLAY_FLIP_PLANE(intel_crtc->plane)); intel_ring_emit(ring, fb->pitches[0]); - intel_ring_emit(ring, obj->gtt_offset + intel_crtc->dspaddr_offset); + intel_ring_emit(ring, i915_gem_obj_ggtt_offset(obj) + intel_crtc->dspaddr_offset); intel_ring_emit(ring, MI_NOOP); intel_mark_page_flip_active(intel_crtc); @@ -7320,7 +7737,8 @@ err: static int intel_gen4_queue_flip(struct drm_device *dev, struct drm_crtc *crtc, struct drm_framebuffer *fb, - struct drm_i915_gem_object *obj) + struct drm_i915_gem_object *obj, + uint32_t flags) { struct drm_i915_private *dev_priv = dev->dev_private; struct intel_crtc *intel_crtc = to_intel_crtc(crtc); @@ -7344,7 +7762,7 @@ static int intel_gen4_queue_flip(struct drm_device *dev, MI_DISPLAY_FLIP_PLANE(intel_crtc->plane)); intel_ring_emit(ring, fb->pitches[0]); intel_ring_emit(ring, - (obj->gtt_offset + intel_crtc->dspaddr_offset) | + (i915_gem_obj_ggtt_offset(obj) + intel_crtc->dspaddr_offset) | obj->tiling_mode); /* XXX Enabling the panel-fitter across page-flip is so far @@ -7368,7 +7786,8 @@ err: static int intel_gen6_queue_flip(struct drm_device *dev, struct drm_crtc *crtc, struct drm_framebuffer *fb, - struct drm_i915_gem_object *obj) + struct drm_i915_gem_object *obj, + uint32_t flags) { struct drm_i915_private *dev_priv = dev->dev_private; struct intel_crtc *intel_crtc = to_intel_crtc(crtc); @@ -7387,7 +7806,7 @@ static int intel_gen6_queue_flip(struct drm_device *dev, intel_ring_emit(ring, MI_DISPLAY_FLIP | MI_DISPLAY_FLIP_PLANE(intel_crtc->plane)); intel_ring_emit(ring, fb->pitches[0] | obj->tiling_mode); - intel_ring_emit(ring, obj->gtt_offset + intel_crtc->dspaddr_offset); + intel_ring_emit(ring, i915_gem_obj_ggtt_offset(obj) + intel_crtc->dspaddr_offset); /* Contrary to the suggestions in the documentation, * "Enable Panel Fitter" does not seem to be required when page @@ -7418,7 +7837,8 @@ err: static int intel_gen7_queue_flip(struct drm_device *dev, struct drm_crtc *crtc, struct drm_framebuffer *fb, - struct drm_i915_gem_object *obj) + struct drm_i915_gem_object *obj, + uint32_t flags) { struct drm_i915_private *dev_priv = dev->dev_private; struct intel_crtc *intel_crtc = to_intel_crtc(crtc); @@ -7452,7 +7872,7 @@ static int intel_gen7_queue_flip(struct drm_device *dev, intel_ring_emit(ring, MI_DISPLAY_FLIP_I915 | plane_bit); intel_ring_emit(ring, (fb->pitches[0] | obj->tiling_mode)); - intel_ring_emit(ring, obj->gtt_offset + intel_crtc->dspaddr_offset); + intel_ring_emit(ring, i915_gem_obj_ggtt_offset(obj) + intel_crtc->dspaddr_offset); intel_ring_emit(ring, (MI_NOOP)); intel_mark_page_flip_active(intel_crtc); @@ -7468,14 +7888,16 @@ err: static int intel_default_queue_flip(struct drm_device *dev, struct drm_crtc *crtc, struct drm_framebuffer *fb, - struct drm_i915_gem_object *obj) + struct drm_i915_gem_object *obj, + uint32_t flags) { return -ENODEV; } static int intel_crtc_page_flip(struct drm_crtc *crtc, struct drm_framebuffer *fb, - struct drm_pending_vblank_event *event) + struct drm_pending_vblank_event *event, + uint32_t page_flip_flags) { struct drm_device *dev = crtc->dev; struct drm_i915_private *dev_priv = dev->dev_private; @@ -7545,7 +7967,7 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc, atomic_inc(&intel_crtc->unpin_work_count); intel_crtc->reset_counter = atomic_read(&dev_priv->gpu_error.reset_counter); - ret = dev_priv->display.queue_flip(dev, crtc, fb, obj); + ret = dev_priv->display.queue_flip(dev, crtc, fb, obj, page_flip_flags); if (ret) goto cleanup_pending; @@ -7789,7 +8211,6 @@ intel_modeset_pipe_config(struct drm_crtc *crtc, struct drm_display_mode *mode) { struct drm_device *dev = crtc->dev; - struct drm_encoder_helper_funcs *encoder_funcs; struct intel_encoder *encoder; struct intel_crtc_config *pipe_config; int plane_bpp, ret = -EINVAL; @@ -7806,9 +8227,23 @@ intel_modeset_pipe_config(struct drm_crtc *crtc, drm_mode_copy(&pipe_config->adjusted_mode, mode); drm_mode_copy(&pipe_config->requested_mode, mode); - pipe_config->cpu_transcoder = to_intel_crtc(crtc)->pipe; + pipe_config->cpu_transcoder = + (enum transcoder) to_intel_crtc(crtc)->pipe; pipe_config->shared_dpll = DPLL_ID_PRIVATE; + /* + * Sanitize sync polarity flags based on requested ones. If neither + * positive or negative polarity is requested, treat this as meaning + * negative polarity. + */ + if (!(pipe_config->adjusted_mode.flags & + (DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NHSYNC))) + pipe_config->adjusted_mode.flags |= DRM_MODE_FLAG_NHSYNC; + + if (!(pipe_config->adjusted_mode.flags & + (DRM_MODE_FLAG_PVSYNC | DRM_MODE_FLAG_NVSYNC))) + pipe_config->adjusted_mode.flags |= DRM_MODE_FLAG_NVSYNC; + /* Compute a starting value for pipe_config->pipe_bpp taking the source * plane pixel format and any sink constraints into account. Returns the * source plane bpp so that dithering can be selected on mismatches @@ -7823,6 +8258,9 @@ encoder_retry: pipe_config->port_clock = 0; pipe_config->pixel_multiplier = 1; + /* Fill in default crtc timings, allow encoders to overwrite them. */ + drm_mode_set_crtcinfo(&pipe_config->adjusted_mode, 0); + /* Pass our mode to the connectors and the CRTC to give them a chance to * adjust it according to limitations or connector properties, and also * a chance to reject the mode entirely. @@ -7833,20 +8271,8 @@ encoder_retry: if (&encoder->new_crtc->base != crtc) continue; - if (encoder->compute_config) { - if (!(encoder->compute_config(encoder, pipe_config))) { - DRM_DEBUG_KMS("Encoder config failure\n"); - goto fail; - } - - continue; - } - - encoder_funcs = encoder->base.helper_private; - if (!(encoder_funcs->mode_fixup(&encoder->base, - &pipe_config->requested_mode, - &pipe_config->adjusted_mode))) { - DRM_DEBUG_KMS("Encoder fixup failed\n"); + if (!(encoder->compute_config(encoder, pipe_config))) { + DRM_DEBUG_KMS("Encoder config failure\n"); goto fail; } } @@ -8041,6 +8467,28 @@ intel_modeset_update_state(struct drm_device *dev, unsigned prepare_pipes) } +static bool intel_fuzzy_clock_check(struct intel_crtc_config *cur, + struct intel_crtc_config *new) +{ + int clock1, clock2, diff; + + clock1 = cur->adjusted_mode.clock; + clock2 = new->adjusted_mode.clock; + + if (clock1 == clock2) + return true; + + if (!clock1 || !clock2) + return false; + + diff = abs(clock1 - clock2); + + if (((((diff + clock1 + clock2) * 100)) / (clock1 + clock2)) < 105) + return true; + + return false; +} + #define for_each_intel_crtc_masked(dev, mask, intel_crtc) \ list_for_each_entry((intel_crtc), \ &(dev)->mode_config.crtc_list, \ @@ -8072,7 +8520,7 @@ intel_pipe_config_compare(struct drm_device *dev, #define PIPE_CONF_CHECK_FLAGS(name, mask) \ if ((current_config->name ^ pipe_config->name) & (mask)) { \ - DRM_ERROR("mismatch in " #name " " \ + DRM_ERROR("mismatch in " #name "(" #mask ") " \ "(expected %i, found %i)\n", \ current_config->name & (mask), \ pipe_config->name & (mask)); \ @@ -8106,8 +8554,7 @@ intel_pipe_config_compare(struct drm_device *dev, PIPE_CONF_CHECK_I(adjusted_mode.crtc_vsync_start); PIPE_CONF_CHECK_I(adjusted_mode.crtc_vsync_end); - if (!HAS_PCH_SPLIT(dev)) - PIPE_CONF_CHECK_I(pixel_multiplier); + PIPE_CONF_CHECK_I(pixel_multiplier); PIPE_CONF_CHECK_FLAGS(adjusted_mode.flags, DRM_MODE_FLAG_INTERLACE); @@ -8138,6 +8585,7 @@ intel_pipe_config_compare(struct drm_device *dev, PIPE_CONF_CHECK_I(shared_dpll); PIPE_CONF_CHECK_X(dpll_hw_state.dpll); + PIPE_CONF_CHECK_X(dpll_hw_state.dpll_md); PIPE_CONF_CHECK_X(dpll_hw_state.fp0); PIPE_CONF_CHECK_X(dpll_hw_state.fp1); @@ -8146,6 +8594,15 @@ intel_pipe_config_compare(struct drm_device *dev, #undef PIPE_CONF_CHECK_FLAGS #undef PIPE_CONF_QUIRK + if (!IS_HASWELL(dev)) { + if (!intel_fuzzy_clock_check(current_config, pipe_config)) { + DRM_ERROR("mismatch in clock (expected %d, found %d)\n", + current_config->adjusted_mode.clock, + pipe_config->adjusted_mode.clock); + return false; + } + } + return true; } @@ -8277,6 +8734,9 @@ check_crtc_state(struct drm_device *dev) encoder->get_config(encoder, &pipe_config); } + if (dev_priv->display.get_clock) + dev_priv->display.get_clock(crtc, &pipe_config); + WARN(crtc->active != active, "crtc active state doesn't match with hw state " "(expected %i, found %i)\n", crtc->active, active); @@ -8454,9 +8914,9 @@ out: return ret; } -int intel_set_mode(struct drm_crtc *crtc, - struct drm_display_mode *mode, - int x, int y, struct drm_framebuffer *fb) +static int intel_set_mode(struct drm_crtc *crtc, + struct drm_display_mode *mode, + int x, int y, struct drm_framebuffer *fb) { int ret; @@ -8573,8 +9033,16 @@ intel_set_config_compute_mode_changes(struct drm_mode_set *set, } else if (set->crtc->fb != set->fb) { /* If we have no fb then treat it as a full mode set */ if (set->crtc->fb == NULL) { - DRM_DEBUG_KMS("crtc has no fb, full mode set\n"); - config->mode_changed = true; + struct intel_crtc *intel_crtc = + to_intel_crtc(set->crtc); + + if (intel_crtc->active && i915_fastboot) { + DRM_DEBUG_KMS("crtc has no fb, will flip\n"); + config->fb_changed = true; + } else { + DRM_DEBUG_KMS("inactive crtc, full mode set\n"); + config->mode_changed = true; + } } else if (set->fb == NULL) { config->mode_changed = true; } else if (set->fb->pixel_format != @@ -8594,6 +9062,9 @@ intel_set_config_compute_mode_changes(struct drm_mode_set *set, drm_mode_debug_printmodeline(set->mode); config->mode_changed = true; } + + DRM_DEBUG_KMS("computed changes for [CRTC:%d], mode_changed=%d, fb_changed=%d\n", + set->crtc->base.id, config->mode_changed, config->fb_changed); } static int @@ -8604,14 +9075,13 @@ intel_modeset_stage_output_state(struct drm_device *dev, struct drm_crtc *new_crtc; struct intel_connector *connector; struct intel_encoder *encoder; - int count, ro; + int ro; /* The upper layers ensure that we either disable a crtc or have a list * of connectors. For paranoia, double-check this. */ WARN_ON(!set->fb && (set->num_connectors != 0)); WARN_ON(set->fb && (set->num_connectors == 0)); - count = 0; list_for_each_entry(connector, &dev->mode_config.connector_list, base.head) { /* Otherwise traverse passed in connector list and get encoders @@ -8645,7 +9115,6 @@ intel_modeset_stage_output_state(struct drm_device *dev, /* connector->new_encoder is now updated for all connectors. */ /* Update crtc of enabled connectors. */ - count = 0; list_for_each_entry(connector, &dev->mode_config.connector_list, base.head) { if (!connector->new_encoder) @@ -8804,19 +9273,32 @@ static bool ibx_pch_dpll_get_hw_state(struct drm_i915_private *dev_priv, return val & DPLL_VCO_ENABLE; } +static void ibx_pch_dpll_mode_set(struct drm_i915_private *dev_priv, + struct intel_shared_dpll *pll) +{ + I915_WRITE(PCH_FP0(pll->id), pll->hw_state.fp0); + I915_WRITE(PCH_FP1(pll->id), pll->hw_state.fp1); +} + static void ibx_pch_dpll_enable(struct drm_i915_private *dev_priv, struct intel_shared_dpll *pll) { - uint32_t reg, val; - /* PCH refclock must be enabled first */ assert_pch_refclk_enabled(dev_priv); - reg = PCH_DPLL(pll->id); - val = I915_READ(reg); - val |= DPLL_VCO_ENABLE; - I915_WRITE(reg, val); - POSTING_READ(reg); + I915_WRITE(PCH_DPLL(pll->id), pll->hw_state.dpll); + + /* Wait for the clocks to stabilize. */ + POSTING_READ(PCH_DPLL(pll->id)); + udelay(150); + + /* The pixel multiplier can only be updated once the + * DPLL is enabled and the clocks are stable. + * + * So write it again. + */ + I915_WRITE(PCH_DPLL(pll->id), pll->hw_state.dpll); + POSTING_READ(PCH_DPLL(pll->id)); udelay(200); } @@ -8825,7 +9307,6 @@ static void ibx_pch_dpll_disable(struct drm_i915_private *dev_priv, { struct drm_device *dev = dev_priv->dev; struct intel_crtc *crtc; - uint32_t reg, val; /* Make sure no transcoder isn't still depending on us. */ list_for_each_entry(crtc, &dev->mode_config.crtc_list, base.head) { @@ -8833,11 +9314,8 @@ static void ibx_pch_dpll_disable(struct drm_i915_private *dev_priv, assert_pch_transcoder_disabled(dev_priv, crtc->pipe); } - reg = PCH_DPLL(pll->id); - val = I915_READ(reg); - val &= ~DPLL_VCO_ENABLE; - I915_WRITE(reg, val); - POSTING_READ(reg); + I915_WRITE(PCH_DPLL(pll->id), 0); + POSTING_READ(PCH_DPLL(pll->id)); udelay(200); } @@ -8856,6 +9334,7 @@ static void ibx_pch_dpll_init(struct drm_device *dev) for (i = 0; i < dev_priv->num_shared_dpll; i++) { dev_priv->shared_dplls[i].id = i; dev_priv->shared_dplls[i].name = ibx_pch_dpll_names[i]; + dev_priv->shared_dplls[i].mode_set = ibx_pch_dpll_mode_set; dev_priv->shared_dplls[i].enable = ibx_pch_dpll_enable; dev_priv->shared_dplls[i].disable = ibx_pch_dpll_disable; dev_priv->shared_dplls[i].get_hw_state = @@ -9035,8 +9514,13 @@ static void intel_setup_outputs(struct drm_device *dev) intel_dp_init(dev, PCH_DP_D, PORT_D); } else if (IS_VALLEYVIEW(dev)) { /* Check for built-in panel first. Shares lanes with HDMI on SDVOC */ - if (I915_READ(VLV_DISPLAY_BASE + DP_C) & DP_DETECTED) - intel_dp_init(dev, VLV_DISPLAY_BASE + DP_C, PORT_C); + if (I915_READ(VLV_DISPLAY_BASE + GEN4_HDMIC) & SDVO_DETECTED) { + intel_hdmi_init(dev, VLV_DISPLAY_BASE + GEN4_HDMIC, + PORT_C); + if (I915_READ(VLV_DISPLAY_BASE + DP_C) & DP_DETECTED) + intel_dp_init(dev, VLV_DISPLAY_BASE + DP_C, + PORT_C); + } if (I915_READ(VLV_DISPLAY_BASE + GEN4_HDMIB) & SDVO_DETECTED) { intel_hdmi_init(dev, VLV_DISPLAY_BASE + GEN4_HDMIB, @@ -9096,13 +9580,17 @@ static void intel_setup_outputs(struct drm_device *dev) drm_helper_move_panel_connectors_to_head(dev); } +void intel_framebuffer_fini(struct intel_framebuffer *fb) +{ + drm_framebuffer_cleanup(&fb->base); + drm_gem_object_unreference_unlocked(&fb->obj->base); +} + static void intel_user_framebuffer_destroy(struct drm_framebuffer *fb) { struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb); - drm_framebuffer_cleanup(fb); - drm_gem_object_unreference_unlocked(&intel_fb->obj->base); - + intel_framebuffer_fini(intel_fb); kfree(intel_fb); } @@ -9272,6 +9760,7 @@ static void intel_init_display(struct drm_device *dev) dev_priv->display.update_plane = ironlake_update_plane; } else if (HAS_PCH_SPLIT(dev)) { dev_priv->display.get_pipe_config = ironlake_get_pipe_config; + dev_priv->display.get_clock = ironlake_crtc_clock_get; dev_priv->display.crtc_mode_set = ironlake_crtc_mode_set; dev_priv->display.crtc_enable = ironlake_crtc_enable; dev_priv->display.crtc_disable = ironlake_crtc_disable; @@ -9279,6 +9768,7 @@ static void intel_init_display(struct drm_device *dev) dev_priv->display.update_plane = ironlake_update_plane; } else if (IS_VALLEYVIEW(dev)) { dev_priv->display.get_pipe_config = i9xx_get_pipe_config; + dev_priv->display.get_clock = i9xx_crtc_clock_get; dev_priv->display.crtc_mode_set = i9xx_crtc_mode_set; dev_priv->display.crtc_enable = valleyview_crtc_enable; dev_priv->display.crtc_disable = i9xx_crtc_disable; @@ -9286,6 +9776,7 @@ static void intel_init_display(struct drm_device *dev) dev_priv->display.update_plane = i9xx_update_plane; } else { dev_priv->display.get_pipe_config = i9xx_get_pipe_config; + dev_priv->display.get_clock = i9xx_crtc_clock_get; dev_priv->display.crtc_mode_set = i9xx_crtc_mode_set; dev_priv->display.crtc_enable = i9xx_crtc_enable; dev_priv->display.crtc_disable = i9xx_crtc_disable; @@ -9303,9 +9794,12 @@ static void intel_init_display(struct drm_device *dev) else if (IS_I915G(dev)) dev_priv->display.get_display_clock_speed = i915_get_display_clock_speed; - else if (IS_I945GM(dev) || IS_845G(dev) || IS_PINEVIEW_M(dev)) + else if (IS_I945GM(dev) || IS_845G(dev)) dev_priv->display.get_display_clock_speed = i9xx_misc_get_display_clock_speed; + else if (IS_PINEVIEW(dev)) + dev_priv->display.get_display_clock_speed = + pnv_get_display_clock_speed; else if (IS_I915GM(dev)) dev_priv->display.get_display_clock_speed = i915gm_get_display_clock_speed; @@ -9586,7 +10080,7 @@ void intel_modeset_init(struct drm_device *dev) INTEL_INFO(dev)->num_pipes, INTEL_INFO(dev)->num_pipes > 1 ? "s" : ""); - for (i = 0; i < INTEL_INFO(dev)->num_pipes; i++) { + for_each_pipe(i) { intel_crtc_init(dev, i); for (j = 0; j < dev_priv->num_plane; j++) { ret = intel_plane_init(dev, i, j); @@ -9792,6 +10286,17 @@ void i915_redisable_vga(struct drm_device *dev) struct drm_i915_private *dev_priv = dev->dev_private; u32 vga_reg = i915_vgacntrl_reg(dev); + /* This function can be called both from intel_modeset_setup_hw_state or + * at a very early point in our resume sequence, where the power well + * structures are not yet restored. Since this function is at a very + * paranoid "someone might have enabled VGA while we were not looking" + * level, just check if the power well is enabled instead of trying to + * follow the "don't touch the power well if we don't need it" policy + * the rest of the driver uses. */ + if (HAS_POWER_WELL(dev) && + (I915_READ(HSW_PWR_WELL_DRIVER) & HSW_PWR_WELL_STATE_ENABLED) == 0) + return; + if (I915_READ(vga_reg) != VGA_DISP_DISABLE) { DRM_DEBUG_KMS("Something enabled VGA plane, disabling it\n"); i915_disable_vga(dev); @@ -9862,6 +10367,15 @@ static void intel_modeset_readout_hw_state(struct drm_device *dev) pipe); } + list_for_each_entry(crtc, &dev->mode_config.crtc_list, + base.head) { + if (!crtc->active) + continue; + if (dev_priv->display.get_clock) + dev_priv->display.get_clock(crtc, + &crtc->config); + } + list_for_each_entry(connector, &dev->mode_config.connector_list, base.head) { if (connector->get_hw_state(connector)) { @@ -9893,6 +10407,22 @@ void intel_modeset_setup_hw_state(struct drm_device *dev, intel_modeset_readout_hw_state(dev); + /* + * Now that we have the config, copy it to each CRTC struct + * Note that this could go away if we move to using crtc_config + * checking everywhere. + */ + list_for_each_entry(crtc, &dev->mode_config.crtc_list, + base.head) { + if (crtc->active && i915_fastboot) { + intel_crtc_mode_from_pipe_config(crtc, &crtc->config); + + DRM_DEBUG_KMS("[CRTC:%d] found active mode: ", + crtc->base.base.id); + drm_mode_debug_printmodeline(&crtc->base.mode); + } + } + /* HW state is read out, now we need to sanitize this mess. */ list_for_each_entry(encoder, &dev->mode_config.encoder_list, base.head) { @@ -9955,7 +10485,6 @@ void intel_modeset_cleanup(struct drm_device *dev) { struct drm_i915_private *dev_priv = dev->dev_private; struct drm_crtc *crtc; - struct intel_crtc *intel_crtc; /* * Interrupts and polling as the first thing to avoid creating havoc. @@ -9979,7 +10508,6 @@ void intel_modeset_cleanup(struct drm_device *dev) if (!crtc->fb) continue; - intel_crtc = to_intel_crtc(crtc); intel_increase_pllclock(crtc); } @@ -10035,9 +10563,6 @@ int intel_modeset_vga_set_state(struct drm_device *dev, bool state) return 0; } -#ifdef CONFIG_DEBUG_FS -#include <linux/seq_file.h> - struct intel_display_error_state { u32 power_well_driver; @@ -10151,8 +10676,7 @@ intel_display_capture_error_state(struct drm_device *dev) * well was on, so here we have to clear the FPGA_DBG_RM_NOCLAIM bit to * prevent the next I915_WRITE from detecting it and printing an error * message. */ - if (HAS_POWER_WELL(dev)) - I915_WRITE_NOTRACE(FPGA_DBG, FPGA_DBG_RM_NOCLAIM); + intel_uncore_clear_errors(dev); return error; } @@ -10209,4 +10733,3 @@ intel_display_print_error_state(struct drm_i915_error_state_buf *m, err_printf(m, " VSYNC: %08x\n", error->transcoder[i].vsync); } } -#endif diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c index 26e162bb3a51..2151d13772b8 100644 --- a/drivers/gpu/drm/i915/intel_dp.c +++ b/drivers/gpu/drm/i915/intel_dp.c @@ -276,29 +276,13 @@ intel_dp_aux_wait_done(struct intel_dp *intel_dp, bool has_aux_irq) return status; } -static int -intel_dp_aux_ch(struct intel_dp *intel_dp, - uint8_t *send, int send_bytes, - uint8_t *recv, int recv_size) +static uint32_t get_aux_clock_divider(struct intel_dp *intel_dp, + int index) { struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp); struct drm_device *dev = intel_dig_port->base.base.dev; struct drm_i915_private *dev_priv = dev->dev_private; - uint32_t ch_ctl = intel_dp->aux_ch_ctl_reg; - uint32_t ch_data = ch_ctl + 4; - int i, ret, recv_bytes; - uint32_t status; - uint32_t aux_clock_divider; - int try, precharge; - bool has_aux_irq = INTEL_INFO(dev)->gen >= 5 && !IS_VALLEYVIEW(dev); - /* dp aux is extremely sensitive to irq latency, hence request the - * lowest possible wakeup latency and so prevent the cpu from going into - * deep sleep states. - */ - pm_qos_update_request(&dev_priv->pm_qos, 0); - - intel_dp_check_edp(intel_dp); /* The clock divider is based off the hrawclk, * and would like to run at 2MHz. So, take the * hrawclk value and divide by 2 and use that @@ -307,29 +291,61 @@ intel_dp_aux_ch(struct intel_dp *intel_dp, * clock divider. */ if (IS_VALLEYVIEW(dev)) { - aux_clock_divider = 100; + return index ? 0 : 100; } else if (intel_dig_port->port == PORT_A) { + if (index) + return 0; if (HAS_DDI(dev)) - aux_clock_divider = DIV_ROUND_CLOSEST( - intel_ddi_get_cdclk_freq(dev_priv), 2000); + return DIV_ROUND_CLOSEST(intel_ddi_get_cdclk_freq(dev_priv), 2000); else if (IS_GEN6(dev) || IS_GEN7(dev)) - aux_clock_divider = 200; /* SNB & IVB eDP input clock at 400Mhz */ + return 200; /* SNB & IVB eDP input clock at 400Mhz */ else - aux_clock_divider = 225; /* eDP input clock at 450Mhz */ + return 225; /* eDP input clock at 450Mhz */ } else if (dev_priv->pch_id == INTEL_PCH_LPT_DEVICE_ID_TYPE) { /* Workaround for non-ULT HSW */ - aux_clock_divider = 74; + switch (index) { + case 0: return 63; + case 1: return 72; + default: return 0; + } } else if (HAS_PCH_SPLIT(dev)) { - aux_clock_divider = DIV_ROUND_UP(intel_pch_rawclk(dev), 2); + return index ? 0 : DIV_ROUND_UP(intel_pch_rawclk(dev), 2); } else { - aux_clock_divider = intel_hrawclk(dev) / 2; + return index ? 0 :intel_hrawclk(dev) / 2; } +} + +static int +intel_dp_aux_ch(struct intel_dp *intel_dp, + uint8_t *send, int send_bytes, + uint8_t *recv, int recv_size) +{ + struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp); + struct drm_device *dev = intel_dig_port->base.base.dev; + struct drm_i915_private *dev_priv = dev->dev_private; + uint32_t ch_ctl = intel_dp->aux_ch_ctl_reg; + uint32_t ch_data = ch_ctl + 4; + uint32_t aux_clock_divider; + int i, ret, recv_bytes; + uint32_t status; + int try, precharge, clock = 0; + bool has_aux_irq = INTEL_INFO(dev)->gen >= 5 && !IS_VALLEYVIEW(dev); + + /* dp aux is extremely sensitive to irq latency, hence request the + * lowest possible wakeup latency and so prevent the cpu from going into + * deep sleep states. + */ + pm_qos_update_request(&dev_priv->pm_qos, 0); + + intel_dp_check_edp(intel_dp); if (IS_GEN6(dev)) precharge = 3; else precharge = 5; + intel_aux_display_runtime_get(dev_priv); + /* Try to wait for any previous AUX channel activity */ for (try = 0; try < 3; try++) { status = I915_READ_NOTRACE(ch_ctl); @@ -345,37 +361,41 @@ intel_dp_aux_ch(struct intel_dp *intel_dp, goto out; } - /* Must try at least 3 times according to DP spec */ - for (try = 0; try < 5; try++) { - /* Load the send data into the aux channel data registers */ - for (i = 0; i < send_bytes; i += 4) - I915_WRITE(ch_data + i, - pack_aux(send + i, send_bytes - i)); - - /* Send the command and wait for it to complete */ - I915_WRITE(ch_ctl, - DP_AUX_CH_CTL_SEND_BUSY | - (has_aux_irq ? DP_AUX_CH_CTL_INTERRUPT : 0) | - DP_AUX_CH_CTL_TIME_OUT_400us | - (send_bytes << DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT) | - (precharge << DP_AUX_CH_CTL_PRECHARGE_2US_SHIFT) | - (aux_clock_divider << DP_AUX_CH_CTL_BIT_CLOCK_2X_SHIFT) | - DP_AUX_CH_CTL_DONE | - DP_AUX_CH_CTL_TIME_OUT_ERROR | - DP_AUX_CH_CTL_RECEIVE_ERROR); - - status = intel_dp_aux_wait_done(intel_dp, has_aux_irq); - - /* Clear done status and any errors */ - I915_WRITE(ch_ctl, - status | - DP_AUX_CH_CTL_DONE | - DP_AUX_CH_CTL_TIME_OUT_ERROR | - DP_AUX_CH_CTL_RECEIVE_ERROR); - - if (status & (DP_AUX_CH_CTL_TIME_OUT_ERROR | - DP_AUX_CH_CTL_RECEIVE_ERROR)) - continue; + while ((aux_clock_divider = get_aux_clock_divider(intel_dp, clock++))) { + /* Must try at least 3 times according to DP spec */ + for (try = 0; try < 5; try++) { + /* Load the send data into the aux channel data registers */ + for (i = 0; i < send_bytes; i += 4) + I915_WRITE(ch_data + i, + pack_aux(send + i, send_bytes - i)); + + /* Send the command and wait for it to complete */ + I915_WRITE(ch_ctl, + DP_AUX_CH_CTL_SEND_BUSY | + (has_aux_irq ? DP_AUX_CH_CTL_INTERRUPT : 0) | + DP_AUX_CH_CTL_TIME_OUT_400us | + (send_bytes << DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT) | + (precharge << DP_AUX_CH_CTL_PRECHARGE_2US_SHIFT) | + (aux_clock_divider << DP_AUX_CH_CTL_BIT_CLOCK_2X_SHIFT) | + DP_AUX_CH_CTL_DONE | + DP_AUX_CH_CTL_TIME_OUT_ERROR | + DP_AUX_CH_CTL_RECEIVE_ERROR); + + status = intel_dp_aux_wait_done(intel_dp, has_aux_irq); + + /* Clear done status and any errors */ + I915_WRITE(ch_ctl, + status | + DP_AUX_CH_CTL_DONE | + DP_AUX_CH_CTL_TIME_OUT_ERROR | + DP_AUX_CH_CTL_RECEIVE_ERROR); + + if (status & (DP_AUX_CH_CTL_TIME_OUT_ERROR | + DP_AUX_CH_CTL_RECEIVE_ERROR)) + continue; + if (status & DP_AUX_CH_CTL_DONE) + break; + } if (status & DP_AUX_CH_CTL_DONE) break; } @@ -416,6 +436,7 @@ intel_dp_aux_ch(struct intel_dp *intel_dp, ret = recv_bytes; out: pm_qos_update_request(&dev_priv->pm_qos, PM_QOS_DEFAULT_VALUE); + intel_aux_display_runtime_put(dev_priv); return ret; } @@ -710,8 +731,11 @@ intel_dp_compute_config(struct intel_encoder *encoder, /* Walk through all bpp values. Luckily they're all nicely spaced with 2 * bpc in between. */ bpp = pipe_config->pipe_bpp; - if (is_edp(intel_dp) && dev_priv->vbt.edp_bpp) + if (is_edp(intel_dp) && dev_priv->vbt.edp_bpp) { + DRM_DEBUG_KMS("clamping bpp for eDP panel to BIOS-provided %i\n", + dev_priv->vbt.edp_bpp); bpp = min_t(int, bpp, dev_priv->vbt.edp_bpp); + } for (; bpp >= 6*3; bpp -= 2*3) { mode_rate = intel_dp_link_required(adjusted_mode->clock, bpp); @@ -812,15 +836,14 @@ static void ironlake_set_pll_cpu_edp(struct intel_dp *intel_dp) udelay(500); } -static void -intel_dp_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode, - struct drm_display_mode *adjusted_mode) +static void intel_dp_mode_set(struct intel_encoder *encoder) { - struct drm_device *dev = encoder->dev; + struct drm_device *dev = encoder->base.dev; struct drm_i915_private *dev_priv = dev->dev_private; - struct intel_dp *intel_dp = enc_to_intel_dp(encoder); + struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base); enum port port = dp_to_dig_port(intel_dp)->port; - struct intel_crtc *crtc = to_intel_crtc(encoder->crtc); + struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc); + struct drm_display_mode *adjusted_mode = &crtc->config.adjusted_mode; /* * There are four kinds of DP registers: @@ -852,7 +875,7 @@ intel_dp_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode, DRM_DEBUG_DRIVER("Enabling DP audio on pipe %c\n", pipe_name(crtc->pipe)); intel_dp->DP |= DP_AUDIO_OUTPUT_ENABLE; - intel_write_eld(encoder, adjusted_mode); + intel_write_eld(&encoder->base, adjusted_mode); } intel_dp_init_link_config(intel_dp); @@ -1360,6 +1383,275 @@ static void intel_dp_get_config(struct intel_encoder *encoder, } pipe_config->adjusted_mode.flags |= flags; + + if (dp_to_dig_port(intel_dp)->port == PORT_A) { + if ((I915_READ(DP_A) & DP_PLL_FREQ_MASK) == DP_PLL_FREQ_160MHZ) + pipe_config->port_clock = 162000; + else + pipe_config->port_clock = 270000; + } +} + +static bool is_edp_psr(struct intel_dp *intel_dp) +{ + return is_edp(intel_dp) && + intel_dp->psr_dpcd[0] & DP_PSR_IS_SUPPORTED; +} + +static bool intel_edp_is_psr_enabled(struct drm_device *dev) +{ + struct drm_i915_private *dev_priv = dev->dev_private; + + if (!IS_HASWELL(dev)) + return false; + + return I915_READ(EDP_PSR_CTL) & EDP_PSR_ENABLE; +} + +static void intel_edp_psr_write_vsc(struct intel_dp *intel_dp, + struct edp_vsc_psr *vsc_psr) +{ + struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); + struct drm_device *dev = dig_port->base.base.dev; + struct drm_i915_private *dev_priv = dev->dev_private; + struct intel_crtc *crtc = to_intel_crtc(dig_port->base.base.crtc); + u32 ctl_reg = HSW_TVIDEO_DIP_CTL(crtc->config.cpu_transcoder); + u32 data_reg = HSW_TVIDEO_DIP_VSC_DATA(crtc->config.cpu_transcoder); + uint32_t *data = (uint32_t *) vsc_psr; + unsigned int i; + + /* As per BSPec (Pipe Video Data Island Packet), we need to disable + the video DIP being updated before program video DIP data buffer + registers for DIP being updated. */ + I915_WRITE(ctl_reg, 0); + POSTING_READ(ctl_reg); + + for (i = 0; i < VIDEO_DIP_VSC_DATA_SIZE; i += 4) { + if (i < sizeof(struct edp_vsc_psr)) + I915_WRITE(data_reg + i, *data++); + else + I915_WRITE(data_reg + i, 0); + } + + I915_WRITE(ctl_reg, VIDEO_DIP_ENABLE_VSC_HSW); + POSTING_READ(ctl_reg); +} + +static void intel_edp_psr_setup(struct intel_dp *intel_dp) +{ + struct drm_device *dev = intel_dp_to_dev(intel_dp); + struct drm_i915_private *dev_priv = dev->dev_private; + struct edp_vsc_psr psr_vsc; + + if (intel_dp->psr_setup_done) + return; + + /* Prepare VSC packet as per EDP 1.3 spec, Table 3.10 */ + memset(&psr_vsc, 0, sizeof(psr_vsc)); + psr_vsc.sdp_header.HB0 = 0; + psr_vsc.sdp_header.HB1 = 0x7; + psr_vsc.sdp_header.HB2 = 0x2; + psr_vsc.sdp_header.HB3 = 0x8; + intel_edp_psr_write_vsc(intel_dp, &psr_vsc); + + /* Avoid continuous PSR exit by masking memup and hpd */ + I915_WRITE(EDP_PSR_DEBUG_CTL, EDP_PSR_DEBUG_MASK_MEMUP | + EDP_PSR_DEBUG_MASK_HPD); + + intel_dp->psr_setup_done = true; +} + +static void intel_edp_psr_enable_sink(struct intel_dp *intel_dp) +{ + struct drm_device *dev = intel_dp_to_dev(intel_dp); + struct drm_i915_private *dev_priv = dev->dev_private; + uint32_t aux_clock_divider = get_aux_clock_divider(intel_dp, 0); + int precharge = 0x3; + int msg_size = 5; /* Header(4) + Message(1) */ + + /* Enable PSR in sink */ + if (intel_dp->psr_dpcd[1] & DP_PSR_NO_TRAIN_ON_EXIT) + intel_dp_aux_native_write_1(intel_dp, DP_PSR_EN_CFG, + DP_PSR_ENABLE & + ~DP_PSR_MAIN_LINK_ACTIVE); + else + intel_dp_aux_native_write_1(intel_dp, DP_PSR_EN_CFG, + DP_PSR_ENABLE | + DP_PSR_MAIN_LINK_ACTIVE); + + /* Setup AUX registers */ + I915_WRITE(EDP_PSR_AUX_DATA1, EDP_PSR_DPCD_COMMAND); + I915_WRITE(EDP_PSR_AUX_DATA2, EDP_PSR_DPCD_NORMAL_OPERATION); + I915_WRITE(EDP_PSR_AUX_CTL, + DP_AUX_CH_CTL_TIME_OUT_400us | + (msg_size << DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT) | + (precharge << DP_AUX_CH_CTL_PRECHARGE_2US_SHIFT) | + (aux_clock_divider << DP_AUX_CH_CTL_BIT_CLOCK_2X_SHIFT)); +} + +static void intel_edp_psr_enable_source(struct intel_dp *intel_dp) +{ + struct drm_device *dev = intel_dp_to_dev(intel_dp); + struct drm_i915_private *dev_priv = dev->dev_private; + uint32_t max_sleep_time = 0x1f; + uint32_t idle_frames = 1; + uint32_t val = 0x0; + + if (intel_dp->psr_dpcd[1] & DP_PSR_NO_TRAIN_ON_EXIT) { + val |= EDP_PSR_LINK_STANDBY; + val |= EDP_PSR_TP2_TP3_TIME_0us; + val |= EDP_PSR_TP1_TIME_0us; + val |= EDP_PSR_SKIP_AUX_EXIT; + } else + val |= EDP_PSR_LINK_DISABLE; + + I915_WRITE(EDP_PSR_CTL, val | + EDP_PSR_MIN_LINK_ENTRY_TIME_8_LINES | + max_sleep_time << EDP_PSR_MAX_SLEEP_TIME_SHIFT | + idle_frames << EDP_PSR_IDLE_FRAME_SHIFT | + EDP_PSR_ENABLE); +} + +static bool intel_edp_psr_match_conditions(struct intel_dp *intel_dp) +{ + struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); + struct drm_device *dev = dig_port->base.base.dev; + struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_crtc *crtc = dig_port->base.base.crtc; + struct intel_crtc *intel_crtc = to_intel_crtc(crtc); + struct drm_i915_gem_object *obj = to_intel_framebuffer(crtc->fb)->obj; + struct intel_encoder *intel_encoder = &dp_to_dig_port(intel_dp)->base; + + if (!IS_HASWELL(dev)) { + DRM_DEBUG_KMS("PSR not supported on this platform\n"); + dev_priv->no_psr_reason = PSR_NO_SOURCE; + return false; + } + + if ((intel_encoder->type != INTEL_OUTPUT_EDP) || + (dig_port->port != PORT_A)) { + DRM_DEBUG_KMS("HSW ties PSR to DDI A (eDP)\n"); + dev_priv->no_psr_reason = PSR_HSW_NOT_DDIA; + return false; + } + + if (!is_edp_psr(intel_dp)) { + DRM_DEBUG_KMS("PSR not supported by this panel\n"); + dev_priv->no_psr_reason = PSR_NO_SINK; + return false; + } + + if (!i915_enable_psr) { + DRM_DEBUG_KMS("PSR disable by flag\n"); + dev_priv->no_psr_reason = PSR_MODULE_PARAM; + return false; + } + + crtc = dig_port->base.base.crtc; + if (crtc == NULL) { + DRM_DEBUG_KMS("crtc not active for PSR\n"); + dev_priv->no_psr_reason = PSR_CRTC_NOT_ACTIVE; + return false; + } + + intel_crtc = to_intel_crtc(crtc); + if (!intel_crtc->active || !crtc->fb || !crtc->mode.clock) { + DRM_DEBUG_KMS("crtc not active for PSR\n"); + dev_priv->no_psr_reason = PSR_CRTC_NOT_ACTIVE; + return false; + } + + obj = to_intel_framebuffer(crtc->fb)->obj; + if (obj->tiling_mode != I915_TILING_X || + obj->fence_reg == I915_FENCE_REG_NONE) { + DRM_DEBUG_KMS("PSR condition failed: fb not tiled or fenced\n"); + dev_priv->no_psr_reason = PSR_NOT_TILED; + return false; + } + + if (I915_READ(SPRCTL(intel_crtc->pipe)) & SPRITE_ENABLE) { + DRM_DEBUG_KMS("PSR condition failed: Sprite is Enabled\n"); + dev_priv->no_psr_reason = PSR_SPRITE_ENABLED; + return false; + } + + if (I915_READ(HSW_STEREO_3D_CTL(intel_crtc->config.cpu_transcoder)) & + S3D_ENABLE) { + DRM_DEBUG_KMS("PSR condition failed: Stereo 3D is Enabled\n"); + dev_priv->no_psr_reason = PSR_S3D_ENABLED; + return false; + } + + if (crtc->mode.flags & DRM_MODE_FLAG_INTERLACE) { + DRM_DEBUG_KMS("PSR condition failed: Interlaced is Enabled\n"); + dev_priv->no_psr_reason = PSR_INTERLACED_ENABLED; + return false; + } + + return true; +} + +static void intel_edp_psr_do_enable(struct intel_dp *intel_dp) +{ + struct drm_device *dev = intel_dp_to_dev(intel_dp); + + if (!intel_edp_psr_match_conditions(intel_dp) || + intel_edp_is_psr_enabled(dev)) + return; + + /* Setup PSR once */ + intel_edp_psr_setup(intel_dp); + + /* Enable PSR on the panel */ + intel_edp_psr_enable_sink(intel_dp); + + /* Enable PSR on the host */ + intel_edp_psr_enable_source(intel_dp); +} + +void intel_edp_psr_enable(struct intel_dp *intel_dp) +{ + struct drm_device *dev = intel_dp_to_dev(intel_dp); + + if (intel_edp_psr_match_conditions(intel_dp) && + !intel_edp_is_psr_enabled(dev)) + intel_edp_psr_do_enable(intel_dp); +} + +void intel_edp_psr_disable(struct intel_dp *intel_dp) +{ + struct drm_device *dev = intel_dp_to_dev(intel_dp); + struct drm_i915_private *dev_priv = dev->dev_private; + + if (!intel_edp_is_psr_enabled(dev)) + return; + + I915_WRITE(EDP_PSR_CTL, I915_READ(EDP_PSR_CTL) & ~EDP_PSR_ENABLE); + + /* Wait till PSR is idle */ + if (_wait_for((I915_READ(EDP_PSR_STATUS_CTL) & + EDP_PSR_STATUS_STATE_MASK) == 0, 2000, 10)) + DRM_ERROR("Timed out waiting for PSR Idle State\n"); +} + +void intel_edp_psr_update(struct drm_device *dev) +{ + struct intel_encoder *encoder; + struct intel_dp *intel_dp = NULL; + + list_for_each_entry(encoder, &dev->mode_config.encoder_list, base.head) + if (encoder->type == INTEL_OUTPUT_EDP) { + intel_dp = enc_to_intel_dp(&encoder->base); + + if (!is_edp_psr(intel_dp)) + return; + + if (!intel_edp_psr_match_conditions(intel_dp)) + intel_edp_psr_disable(intel_dp); + else + if (!intel_edp_is_psr_enabled(dev)) + intel_edp_psr_do_enable(intel_dp); + } } static void intel_disable_dp(struct intel_encoder *encoder) @@ -1411,47 +1703,50 @@ static void intel_enable_dp(struct intel_encoder *encoder) intel_dp_complete_link_train(intel_dp); intel_dp_stop_link_train(intel_dp); ironlake_edp_backlight_on(intel_dp); +} - if (IS_VALLEYVIEW(dev)) { - struct intel_digital_port *dport = - enc_to_dig_port(&encoder->base); - int channel = vlv_dport_to_channel(dport); - - vlv_wait_port_ready(dev_priv, channel); - } +static void vlv_enable_dp(struct intel_encoder *encoder) +{ } static void intel_pre_enable_dp(struct intel_encoder *encoder) { struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base); struct intel_digital_port *dport = dp_to_dig_port(intel_dp); + + if (dport->port == PORT_A) + ironlake_edp_pll_on(intel_dp); +} + +static void vlv_pre_enable_dp(struct intel_encoder *encoder) +{ + struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base); + struct intel_digital_port *dport = dp_to_dig_port(intel_dp); struct drm_device *dev = encoder->base.dev; struct drm_i915_private *dev_priv = dev->dev_private; + struct intel_crtc *intel_crtc = to_intel_crtc(encoder->base.crtc); + int port = vlv_dport_to_channel(dport); + int pipe = intel_crtc->pipe; + u32 val; - if (dport->port == PORT_A && !IS_VALLEYVIEW(dev)) - ironlake_edp_pll_on(intel_dp); + mutex_lock(&dev_priv->dpio_lock); - if (IS_VALLEYVIEW(dev)) { - struct intel_crtc *intel_crtc = - to_intel_crtc(encoder->base.crtc); - int port = vlv_dport_to_channel(dport); - int pipe = intel_crtc->pipe; - u32 val; - - val = vlv_dpio_read(dev_priv, DPIO_DATA_LANE_A(port)); - val = 0; - if (pipe) - val |= (1<<21); - else - val &= ~(1<<21); - val |= 0x001000c4; - vlv_dpio_write(dev_priv, DPIO_DATA_CHANNEL(port), val); + val = vlv_dpio_read(dev_priv, DPIO_DATA_LANE_A(port)); + val = 0; + if (pipe) + val |= (1<<21); + else + val &= ~(1<<21); + val |= 0x001000c4; + vlv_dpio_write(dev_priv, DPIO_DATA_CHANNEL(port), val); + vlv_dpio_write(dev_priv, DPIO_PCS_CLOCKBUF0(port), 0x00760018); + vlv_dpio_write(dev_priv, DPIO_PCS_CLOCKBUF8(port), 0x00400888); - vlv_dpio_write(dev_priv, DPIO_PCS_CLOCKBUF0(port), - 0x00760018); - vlv_dpio_write(dev_priv, DPIO_PCS_CLOCKBUF8(port), - 0x00400888); - } + mutex_unlock(&dev_priv->dpio_lock); + + intel_enable_dp(encoder); + + vlv_wait_port_ready(dev_priv, port); } static void intel_dp_pre_pll_enable(struct intel_encoder *encoder) @@ -1465,6 +1760,7 @@ static void intel_dp_pre_pll_enable(struct intel_encoder *encoder) return; /* Program Tx lane resets to default */ + mutex_lock(&dev_priv->dpio_lock); vlv_dpio_write(dev_priv, DPIO_PCS_TX(port), DPIO_PCS_TX_LANE2_RESET | DPIO_PCS_TX_LANE1_RESET); @@ -1478,6 +1774,7 @@ static void intel_dp_pre_pll_enable(struct intel_encoder *encoder) vlv_dpio_write(dev_priv, DPIO_PCS_STAGGER1(port), 0x00750f00); vlv_dpio_write(dev_priv, DPIO_TX_CTL(port), 0x00001500); vlv_dpio_write(dev_priv, DPIO_TX_LANE(port), 0x40400000); + mutex_unlock(&dev_priv->dpio_lock); } /* @@ -1689,6 +1986,7 @@ static uint32_t intel_vlv_signal_levels(struct intel_dp *intel_dp) return 0; } + mutex_lock(&dev_priv->dpio_lock); vlv_dpio_write(dev_priv, DPIO_TX_OCALINIT(port), 0x00000000); vlv_dpio_write(dev_priv, DPIO_TX_SWING_CTL4(port), demph_reg_value); vlv_dpio_write(dev_priv, DPIO_TX_SWING_CTL2(port), @@ -1697,6 +1995,7 @@ static uint32_t intel_vlv_signal_levels(struct intel_dp *intel_dp) vlv_dpio_write(dev_priv, DPIO_PCS_STAGGER0(port), 0x00030000); vlv_dpio_write(dev_priv, DPIO_PCS_CTL_OVER1(port), preemph_reg_value); vlv_dpio_write(dev_priv, DPIO_TX_OCALINIT(port), 0x80000000); + mutex_unlock(&dev_priv->dpio_lock); return 0; } @@ -2030,7 +2329,6 @@ intel_dp_start_link_train(struct intel_dp *intel_dp) struct drm_device *dev = encoder->dev; int i; uint8_t voltage; - bool clock_recovery = false; int voltage_tries, loop_tries; uint32_t DP = intel_dp->DP; @@ -2048,7 +2346,6 @@ intel_dp_start_link_train(struct intel_dp *intel_dp) voltage = 0xff; voltage_tries = 0; loop_tries = 0; - clock_recovery = false; for (;;) { /* Use intel_dp->train_set[0] to set the voltage and pre emphasis values */ uint8_t link_status[DP_LINK_STATUS_SIZE]; @@ -2069,7 +2366,6 @@ intel_dp_start_link_train(struct intel_dp *intel_dp) if (drm_dp_clock_recovery_ok(link_status, intel_dp->lane_count)) { DRM_DEBUG_KMS("clock recovery OK\n"); - clock_recovery = true; break; } @@ -2275,6 +2571,13 @@ intel_dp_get_dpcd(struct intel_dp *intel_dp) if (intel_dp->dpcd[DP_DPCD_REV] == 0) return false; /* DPCD not present */ + /* Check if the panel supports PSR */ + memset(intel_dp->psr_dpcd, 0, sizeof(intel_dp->psr_dpcd)); + intel_dp_aux_native_read_retry(intel_dp, DP_PSR_SUPPORT, + intel_dp->psr_dpcd, + sizeof(intel_dp->psr_dpcd)); + if (is_edp_psr(intel_dp)) + DRM_DEBUG_KMS("Detected EDP PSR Panel.\n"); if (!(intel_dp->dpcd[DP_DOWNSTREAMPORT_PRESENT] & DP_DWN_STRM_PORT_PRESENT)) return true; /* native DP sink */ @@ -2542,6 +2845,9 @@ intel_dp_detect(struct drm_connector *connector, bool force) enum drm_connector_status status; struct edid *edid = NULL; + DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n", + connector->base.id, drm_get_connector_name(connector)); + intel_dp->has_audio = false; if (HAS_PCH_SPLIT(dev)) @@ -2735,10 +3041,6 @@ void intel_dp_encoder_destroy(struct drm_encoder *encoder) kfree(intel_dig_port); } -static const struct drm_encoder_helper_funcs intel_dp_helper_funcs = { - .mode_set = intel_dp_mode_set, -}; - static const struct drm_connector_funcs intel_dp_connector_funcs = { .dpms = intel_connector_dpms, .detect = intel_dp_detect, @@ -3166,6 +3468,8 @@ intel_dp_init_connector(struct intel_digital_port *intel_dig_port, WARN(error, "intel_dp_i2c_init failed with error %d for port %c\n", error, port_name(port)); + intel_dp->psr_setup_done = false; + if (!intel_edp_init_connector(intel_dp, intel_connector)) { i2c_del_adapter(&intel_dp->adapter); if (is_edp(intel_dp)) { @@ -3216,17 +3520,21 @@ intel_dp_init(struct drm_device *dev, int output_reg, enum port port) drm_encoder_init(dev, &intel_encoder->base, &intel_dp_enc_funcs, DRM_MODE_ENCODER_TMDS); - drm_encoder_helper_add(&intel_encoder->base, &intel_dp_helper_funcs); intel_encoder->compute_config = intel_dp_compute_config; - intel_encoder->enable = intel_enable_dp; - intel_encoder->pre_enable = intel_pre_enable_dp; + intel_encoder->mode_set = intel_dp_mode_set; intel_encoder->disable = intel_disable_dp; intel_encoder->post_disable = intel_post_disable_dp; intel_encoder->get_hw_state = intel_dp_get_hw_state; intel_encoder->get_config = intel_dp_get_config; - if (IS_VALLEYVIEW(dev)) + if (IS_VALLEYVIEW(dev)) { intel_encoder->pre_pll_enable = intel_dp_pre_pll_enable; + intel_encoder->pre_enable = vlv_pre_enable_dp; + intel_encoder->enable = vlv_enable_dp; + } else { + intel_encoder->pre_enable = intel_pre_enable_dp; + intel_encoder->enable = intel_enable_dp; + } intel_dig_port->port = port; intel_dig_port->dp.output_reg = output_reg; diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h index b7d6e09456ce..176080822a74 100644 --- a/drivers/gpu/drm/i915/intel_drv.h +++ b/drivers/gpu/drm/i915/intel_drv.h @@ -26,6 +26,7 @@ #define __INTEL_DRV_H__ #include <linux/i2c.h> +#include <linux/hdmi.h> #include <drm/i915_drm.h> #include "i915_drv.h" #include <drm/drm_crtc.h> @@ -208,10 +209,6 @@ struct intel_crtc_config { struct drm_display_mode requested_mode; struct drm_display_mode adjusted_mode; - /* This flag must be set by the encoder's compute_config callback if it - * changes the crtc timings in the mode to prevent the crtc fixup from - * overwriting them. Currently only lvds needs that. */ - bool timings_set; /* Whether to set up the PCH/FDI. Note that we never allow sharing * between pch encoders and cpu encoders. */ bool has_pch_encoder; @@ -334,6 +331,13 @@ struct intel_crtc { bool pch_fifo_underrun_disabled; }; +struct intel_plane_wm_parameters { + uint32_t horiz_pixels; + uint8_t bytes_per_pixel; + bool enabled; + bool scaled; +}; + struct intel_plane { struct drm_plane base; int plane; @@ -352,20 +356,18 @@ struct intel_plane { * as the other pieces of the struct may not reflect the values we want * for the watermark calculations. Currently only Haswell uses this. */ - struct { - bool enable; - uint8_t bytes_per_pixel; - uint32_t horiz_pixels; - } wm; + struct intel_plane_wm_parameters wm; void (*update_plane)(struct drm_plane *plane, + struct drm_crtc *crtc, struct drm_framebuffer *fb, struct drm_i915_gem_object *obj, int crtc_x, int crtc_y, unsigned int crtc_w, unsigned int crtc_h, uint32_t x, uint32_t y, uint32_t src_w, uint32_t src_h); - void (*disable_plane)(struct drm_plane *plane); + void (*disable_plane)(struct drm_plane *plane, + struct drm_crtc *crtc); int (*update_colorkey)(struct drm_plane *plane, struct drm_intel_sprite_colorkey *key); void (*get_colorkey)(struct drm_plane *plane, @@ -397,66 +399,6 @@ struct cxsr_latency { #define to_intel_framebuffer(x) container_of(x, struct intel_framebuffer, base) #define to_intel_plane(x) container_of(x, struct intel_plane, base) -#define DIP_HEADER_SIZE 5 - -#define DIP_TYPE_AVI 0x82 -#define DIP_VERSION_AVI 0x2 -#define DIP_LEN_AVI 13 -#define DIP_AVI_PR_1 0 -#define DIP_AVI_PR_2 1 -#define DIP_AVI_RGB_QUANT_RANGE_DEFAULT (0 << 2) -#define DIP_AVI_RGB_QUANT_RANGE_LIMITED (1 << 2) -#define DIP_AVI_RGB_QUANT_RANGE_FULL (2 << 2) - -#define DIP_TYPE_SPD 0x83 -#define DIP_VERSION_SPD 0x1 -#define DIP_LEN_SPD 25 -#define DIP_SPD_UNKNOWN 0 -#define DIP_SPD_DSTB 0x1 -#define DIP_SPD_DVDP 0x2 -#define DIP_SPD_DVHS 0x3 -#define DIP_SPD_HDDVR 0x4 -#define DIP_SPD_DVC 0x5 -#define DIP_SPD_DSC 0x6 -#define DIP_SPD_VCD 0x7 -#define DIP_SPD_GAME 0x8 -#define DIP_SPD_PC 0x9 -#define DIP_SPD_BD 0xa -#define DIP_SPD_SCD 0xb - -struct dip_infoframe { - uint8_t type; /* HB0 */ - uint8_t ver; /* HB1 */ - uint8_t len; /* HB2 - body len, not including checksum */ - uint8_t ecc; /* Header ECC */ - uint8_t checksum; /* PB0 */ - union { - struct { - /* PB1 - Y 6:5, A 4:4, B 3:2, S 1:0 */ - uint8_t Y_A_B_S; - /* PB2 - C 7:6, M 5:4, R 3:0 */ - uint8_t C_M_R; - /* PB3 - ITC 7:7, EC 6:4, Q 3:2, SC 1:0 */ - uint8_t ITC_EC_Q_SC; - /* PB4 - VIC 6:0 */ - uint8_t VIC; - /* PB5 - YQ 7:6, CN 5:4, PR 3:0 */ - uint8_t YQ_CN_PR; - /* PB6 to PB13 */ - uint16_t top_bar_end; - uint16_t bottom_bar_start; - uint16_t left_bar_end; - uint16_t right_bar_start; - } __attribute__ ((packed)) avi; - struct { - uint8_t vn[8]; - uint8_t pd[16]; - uint8_t sdi; - } __attribute__ ((packed)) spd; - uint8_t payload[27]; - } __attribute__ ((packed)) body; -} __attribute__((packed)); - struct intel_hdmi { u32 hdmi_reg; int ddc_bus; @@ -467,7 +409,8 @@ struct intel_hdmi { enum hdmi_force_audio force_audio; bool rgb_quant_range_selectable; void (*write_infoframe)(struct drm_encoder *encoder, - struct dip_infoframe *frame); + enum hdmi_infoframe_type type, + const uint8_t *frame, ssize_t len); void (*set_infoframes)(struct drm_encoder *encoder, struct drm_display_mode *adjusted_mode); }; @@ -487,6 +430,7 @@ struct intel_dp { uint8_t link_bw; uint8_t lane_count; uint8_t dpcd[DP_RECEIVER_CAP_SIZE]; + uint8_t psr_dpcd[EDP_PSR_RECEIVER_CAP_SIZE]; uint8_t downstream_ports[DP_MAX_DOWNSTREAM_PORTS]; struct i2c_adapter adapter; struct i2c_algo_dp_aux_data algo; @@ -498,6 +442,7 @@ struct intel_dp { int backlight_off_delay; struct delayed_work panel_vdd_work; bool want_panel_vdd; + bool psr_setup_done; struct intel_connector *attached_connector; }; @@ -549,13 +494,6 @@ struct intel_unpin_work { bool enable_stall_check; }; -struct intel_fbc_work { - struct delayed_work work; - struct drm_crtc *crtc; - struct drm_framebuffer *fb; - int interval; -}; - int intel_pch_rawclk(struct drm_device *dev); int intel_connector_update_modes(struct drm_connector *connector, @@ -574,7 +512,6 @@ extern void intel_hdmi_init_connector(struct intel_digital_port *intel_dig_port, extern struct intel_hdmi *enc_to_intel_hdmi(struct drm_encoder *encoder); extern bool intel_hdmi_compute_config(struct intel_encoder *encoder, struct intel_crtc_config *pipe_config); -extern void intel_dip_infoframe_csum(struct dip_infoframe *avi_if); extern bool intel_sdvo_init(struct drm_device *dev, uint32_t sdvo_reg, bool is_sdvob); extern void intel_dvo_init(struct drm_device *dev); @@ -639,14 +576,10 @@ struct intel_set_config { bool mode_changed; }; -extern int intel_set_mode(struct drm_crtc *crtc, struct drm_display_mode *mode, - int x, int y, struct drm_framebuffer *old_fb); -extern void intel_modeset_disable(struct drm_device *dev); extern void intel_crtc_restore_mode(struct drm_crtc *crtc); extern void intel_crtc_load_lut(struct drm_crtc *crtc); extern void intel_crtc_update_dpms(struct drm_crtc *crtc); extern void intel_encoder_destroy(struct drm_encoder *encoder); -extern void intel_encoder_dpms(struct intel_encoder *encoder, int mode); extern void intel_connector_dpms(struct drm_connector *, int mode); extern bool intel_connector_get_hw_state(struct intel_connector *connector); extern void intel_modeset_check_state(struct drm_device *dev); @@ -712,12 +645,10 @@ extern bool intel_get_load_detect_pipe(struct drm_connector *connector, extern void intel_release_load_detect_pipe(struct drm_connector *connector, struct intel_load_detect_pipe *old); -extern void intelfb_restore(void); extern void intel_crtc_fb_gamma_set(struct drm_crtc *crtc, u16 red, u16 green, u16 blue, int regno); extern void intel_crtc_fb_gamma_get(struct drm_crtc *crtc, u16 *red, u16 *green, u16 *blue, int regno); -extern void intel_enable_clock_gating(struct drm_device *dev); extern int intel_pin_and_fence_fb_obj(struct drm_device *dev, struct drm_i915_gem_object *obj, @@ -728,6 +659,7 @@ extern int intel_framebuffer_init(struct drm_device *dev, struct intel_framebuffer *ifb, struct drm_mode_fb_cmd2 *mode_cmd, struct drm_i915_gem_object *obj); +extern void intel_framebuffer_fini(struct intel_framebuffer *fb); extern int intel_fbdev_init(struct drm_device *dev); extern void intel_fbdev_initial_config(struct drm_device *dev); extern void intel_fbdev_fini(struct drm_device *dev); @@ -747,6 +679,22 @@ extern int intel_overlay_attrs(struct drm_device *dev, void *data, extern void intel_fb_output_poll_changed(struct drm_device *dev); extern void intel_fb_restore_mode(struct drm_device *dev); +struct intel_shared_dpll * +intel_crtc_to_shared_dpll(struct intel_crtc *crtc); + +void assert_shared_dpll(struct drm_i915_private *dev_priv, + struct intel_shared_dpll *pll, + bool state); +#define assert_shared_dpll_enabled(d, p) assert_shared_dpll(d, p, true) +#define assert_shared_dpll_disabled(d, p) assert_shared_dpll(d, p, false) +void assert_pll(struct drm_i915_private *dev_priv, + enum pipe pipe, bool state); +#define assert_pll_enabled(d, p) assert_pll(d, p, true) +#define assert_pll_disabled(d, p) assert_pll(d, p, false) +void assert_fdi_rx_pll(struct drm_i915_private *dev_priv, + enum pipe pipe, bool state); +#define assert_fdi_rx_pll_enabled(d, p) assert_fdi_rx_pll(d, p, true) +#define assert_fdi_rx_pll_disabled(d, p) assert_fdi_rx_pll(d, p, false) extern void assert_pipe(struct drm_i915_private *dev_priv, enum pipe pipe, bool state); #define assert_pipe_enabled(d, p) assert_pipe(d, p, true) @@ -762,9 +710,10 @@ extern void intel_ddi_init(struct drm_device *dev, enum port port); /* For use by IVB LP watermark workaround in intel_sprite.c */ extern void intel_update_watermarks(struct drm_device *dev); -extern void intel_update_sprite_watermarks(struct drm_device *dev, int pipe, - uint32_t sprite_width, - int pixel_size, bool enable); +extern void intel_update_sprite_watermarks(struct drm_plane *plane, + struct drm_crtc *crtc, + uint32_t sprite_width, int pixel_size, + bool enabled, bool scaled); extern unsigned long intel_gen4_compute_page_offset(int *x, int *y, unsigned int tiling_mode, @@ -780,7 +729,6 @@ extern int intel_sprite_get_colorkey(struct drm_device *dev, void *data, extern void intel_init_pm(struct drm_device *dev); /* FBC */ extern bool intel_fbc_enabled(struct drm_device *dev); -extern void intel_enable_fbc(struct drm_crtc *crtc, unsigned long interval); extern void intel_update_fbc(struct drm_device *dev); /* IPS */ extern void intel_gpu_ips_init(struct drm_i915_private *dev_priv); @@ -796,8 +744,8 @@ extern void intel_init_power_well(struct drm_device *dev); extern void intel_set_power_well(struct drm_device *dev, bool enable); extern void intel_enable_gt_powersave(struct drm_device *dev); extern void intel_disable_gt_powersave(struct drm_device *dev); -extern void gen6_gt_check_fifodbg(struct drm_i915_private *dev_priv); extern void ironlake_teardown_rc6(struct drm_device *dev); +void gen6_update_ring_freq(struct drm_device *dev); extern bool intel_ddi_get_hw_state(struct intel_encoder *encoder, enum pipe *pipe); @@ -825,4 +773,24 @@ extern bool intel_set_pch_fifo_underrun_reporting(struct drm_device *dev, enum transcoder pch_transcoder, bool enable); +extern void intel_edp_psr_enable(struct intel_dp *intel_dp); +extern void intel_edp_psr_disable(struct intel_dp *intel_dp); +extern void intel_edp_psr_update(struct drm_device *dev); +extern void hsw_disable_lcpll(struct drm_i915_private *dev_priv, + bool switch_to_fclk, bool allow_power_down); +extern void hsw_restore_lcpll(struct drm_i915_private *dev_priv); +extern void ilk_enable_gt_irq(struct drm_i915_private *dev_priv, uint32_t mask); +extern void ilk_disable_gt_irq(struct drm_i915_private *dev_priv, + uint32_t mask); +extern void snb_enable_pm_irq(struct drm_i915_private *dev_priv, uint32_t mask); +extern void snb_disable_pm_irq(struct drm_i915_private *dev_priv, + uint32_t mask); +extern void hsw_enable_pc8_work(struct work_struct *__work); +extern void hsw_enable_package_c8(struct drm_i915_private *dev_priv); +extern void hsw_disable_package_c8(struct drm_i915_private *dev_priv); +extern void hsw_pc8_disable_interrupts(struct drm_device *dev); +extern void hsw_pc8_restore_interrupts(struct drm_device *dev); +extern void intel_aux_display_runtime_get(struct drm_i915_private *dev_priv); +extern void intel_aux_display_runtime_put(struct drm_i915_private *dev_priv); + #endif /* __INTEL_DRV_H__ */ diff --git a/drivers/gpu/drm/i915/intel_dvo.c b/drivers/gpu/drm/i915/intel_dvo.c index eb2020eb2b7e..406303b509c1 100644 --- a/drivers/gpu/drm/i915/intel_dvo.c +++ b/drivers/gpu/drm/i915/intel_dvo.c @@ -100,15 +100,14 @@ struct intel_dvo { bool panel_wants_dither; }; -static struct intel_dvo *enc_to_intel_dvo(struct drm_encoder *encoder) +static struct intel_dvo *enc_to_dvo(struct intel_encoder *encoder) { - return container_of(encoder, struct intel_dvo, base.base); + return container_of(encoder, struct intel_dvo, base); } static struct intel_dvo *intel_attached_dvo(struct drm_connector *connector) { - return container_of(intel_attached_encoder(connector), - struct intel_dvo, base); + return enc_to_dvo(intel_attached_encoder(connector)); } static bool intel_dvo_connector_get_hw_state(struct intel_connector *connector) @@ -123,7 +122,7 @@ static bool intel_dvo_get_hw_state(struct intel_encoder *encoder, { struct drm_device *dev = encoder->base.dev; struct drm_i915_private *dev_priv = dev->dev_private; - struct intel_dvo *intel_dvo = enc_to_intel_dvo(&encoder->base); + struct intel_dvo *intel_dvo = enc_to_dvo(encoder); u32 tmp; tmp = I915_READ(intel_dvo->dev.dvo_reg); @@ -140,7 +139,7 @@ static void intel_dvo_get_config(struct intel_encoder *encoder, struct intel_crtc_config *pipe_config) { struct drm_i915_private *dev_priv = encoder->base.dev->dev_private; - struct intel_dvo *intel_dvo = enc_to_intel_dvo(&encoder->base); + struct intel_dvo *intel_dvo = enc_to_dvo(encoder); u32 tmp, flags = 0; tmp = I915_READ(intel_dvo->dev.dvo_reg); @@ -159,7 +158,7 @@ static void intel_dvo_get_config(struct intel_encoder *encoder, static void intel_disable_dvo(struct intel_encoder *encoder) { struct drm_i915_private *dev_priv = encoder->base.dev->dev_private; - struct intel_dvo *intel_dvo = enc_to_intel_dvo(&encoder->base); + struct intel_dvo *intel_dvo = enc_to_dvo(encoder); u32 dvo_reg = intel_dvo->dev.dvo_reg; u32 temp = I915_READ(dvo_reg); @@ -171,7 +170,7 @@ static void intel_disable_dvo(struct intel_encoder *encoder) static void intel_enable_dvo(struct intel_encoder *encoder) { struct drm_i915_private *dev_priv = encoder->base.dev->dev_private; - struct intel_dvo *intel_dvo = enc_to_intel_dvo(&encoder->base); + struct intel_dvo *intel_dvo = enc_to_dvo(encoder); u32 dvo_reg = intel_dvo->dev.dvo_reg; u32 temp = I915_READ(dvo_reg); @@ -241,11 +240,11 @@ static int intel_dvo_mode_valid(struct drm_connector *connector, return intel_dvo->dev.dev_ops->mode_valid(&intel_dvo->dev, mode); } -static bool intel_dvo_mode_fixup(struct drm_encoder *encoder, - const struct drm_display_mode *mode, - struct drm_display_mode *adjusted_mode) +static bool intel_dvo_compute_config(struct intel_encoder *encoder, + struct intel_crtc_config *pipe_config) { - struct intel_dvo *intel_dvo = enc_to_intel_dvo(encoder); + struct intel_dvo *intel_dvo = enc_to_dvo(encoder); + struct drm_display_mode *adjusted_mode = &pipe_config->adjusted_mode; /* If we have timings from the BIOS for the panel, put them in * to the adjusted mode. The CRTC will be set up for this mode, @@ -267,23 +266,23 @@ static bool intel_dvo_mode_fixup(struct drm_encoder *encoder, } if (intel_dvo->dev.dev_ops->mode_fixup) - return intel_dvo->dev.dev_ops->mode_fixup(&intel_dvo->dev, mode, adjusted_mode); + return intel_dvo->dev.dev_ops->mode_fixup(&intel_dvo->dev, + &pipe_config->requested_mode, + adjusted_mode); return true; } -static void intel_dvo_mode_set(struct drm_encoder *encoder, - struct drm_display_mode *mode, - struct drm_display_mode *adjusted_mode) +static void intel_dvo_mode_set(struct intel_encoder *encoder) { - struct drm_device *dev = encoder->dev; + struct drm_device *dev = encoder->base.dev; struct drm_i915_private *dev_priv = dev->dev_private; - struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc); - struct intel_dvo *intel_dvo = enc_to_intel_dvo(encoder); - int pipe = intel_crtc->pipe; + struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc); + struct drm_display_mode *adjusted_mode = &crtc->config.adjusted_mode; + struct intel_dvo *intel_dvo = enc_to_dvo(encoder); + int pipe = crtc->pipe; u32 dvo_val; u32 dvo_reg = intel_dvo->dev.dvo_reg, dvo_srcdim_reg; - int dpll_reg = DPLL(pipe); switch (dvo_reg) { case DVOA: @@ -298,7 +297,9 @@ static void intel_dvo_mode_set(struct drm_encoder *encoder, break; } - intel_dvo->dev.dev_ops->mode_set(&intel_dvo->dev, mode, adjusted_mode); + intel_dvo->dev.dev_ops->mode_set(&intel_dvo->dev, + &crtc->config.requested_mode, + adjusted_mode); /* Save the data order, since I don't know what it should be set to. */ dvo_val = I915_READ(dvo_reg) & @@ -314,8 +315,6 @@ static void intel_dvo_mode_set(struct drm_encoder *encoder, if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC) dvo_val |= DVO_VSYNC_ACTIVE_HIGH; - I915_WRITE(dpll_reg, I915_READ(dpll_reg) | DPLL_DVO_HIGH_SPEED); - /*I915_WRITE(DVOB_SRCDIM, (adjusted_mode->hdisplay << DVO_SRCDIM_HORIZONTAL_SHIFT) | (adjusted_mode->VDisplay << DVO_SRCDIM_VERTICAL_SHIFT));*/ @@ -335,6 +334,8 @@ static enum drm_connector_status intel_dvo_detect(struct drm_connector *connector, bool force) { struct intel_dvo *intel_dvo = intel_attached_dvo(connector); + DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n", + connector->base.id, drm_get_connector_name(connector)); return intel_dvo->dev.dev_ops->detect(&intel_dvo->dev); } @@ -372,11 +373,6 @@ static void intel_dvo_destroy(struct drm_connector *connector) kfree(connector); } -static const struct drm_encoder_helper_funcs intel_dvo_helper_funcs = { - .mode_fixup = intel_dvo_mode_fixup, - .mode_set = intel_dvo_mode_set, -}; - static const struct drm_connector_funcs intel_dvo_connector_funcs = { .dpms = intel_dvo_dpms, .detect = intel_dvo_detect, @@ -392,7 +388,7 @@ static const struct drm_connector_helper_funcs intel_dvo_connector_helper_funcs static void intel_dvo_enc_destroy(struct drm_encoder *encoder) { - struct intel_dvo *intel_dvo = enc_to_intel_dvo(encoder); + struct intel_dvo *intel_dvo = enc_to_dvo(to_intel_encoder(encoder)); if (intel_dvo->dev.dev_ops->destroy) intel_dvo->dev.dev_ops->destroy(&intel_dvo->dev); @@ -471,6 +467,8 @@ void intel_dvo_init(struct drm_device *dev) intel_encoder->enable = intel_enable_dvo; intel_encoder->get_hw_state = intel_dvo_get_hw_state; intel_encoder->get_config = intel_dvo_get_config; + intel_encoder->compute_config = intel_dvo_compute_config; + intel_encoder->mode_set = intel_dvo_mode_set; intel_connector->get_hw_state = intel_dvo_connector_get_hw_state; /* Now, try to find a controller */ @@ -537,9 +535,6 @@ void intel_dvo_init(struct drm_device *dev) connector->interlace_allowed = false; connector->doublescan_allowed = false; - drm_encoder_helper_add(&intel_encoder->base, - &intel_dvo_helper_funcs); - intel_connector_attach_encoder(intel_connector, intel_encoder); if (dvo->type == INTEL_DVO_CHIP_LVDS) { /* For our LVDS chipsets, we should hopefully be able diff --git a/drivers/gpu/drm/i915/intel_fb.c b/drivers/gpu/drm/i915/intel_fb.c index dff669e2387f..bc2100007b21 100644 --- a/drivers/gpu/drm/i915/intel_fb.c +++ b/drivers/gpu/drm/i915/intel_fb.c @@ -139,11 +139,11 @@ static int intelfb_create(struct drm_fb_helper *helper, info->apertures->ranges[0].base = dev->mode_config.fb_base; info->apertures->ranges[0].size = dev_priv->gtt.mappable_end; - info->fix.smem_start = dev->mode_config.fb_base + obj->gtt_offset; + info->fix.smem_start = dev->mode_config.fb_base + i915_gem_obj_ggtt_offset(obj); info->fix.smem_len = size; info->screen_base = - ioremap_wc(dev_priv->gtt.mappable_base + obj->gtt_offset, + ioremap_wc(dev_priv->gtt.mappable_base + i915_gem_obj_ggtt_offset(obj), size); if (!info->screen_base) { ret = -ENOSPC; @@ -166,9 +166,9 @@ static int intelfb_create(struct drm_fb_helper *helper, /* Use default scratch pixmap (info->pixmap.flags = FB_PIXMAP_SYSTEM) */ - DRM_DEBUG_KMS("allocated %dx%d fb: 0x%08x, bo %p\n", + DRM_DEBUG_KMS("allocated %dx%d fb: 0x%08lx, bo %p\n", fb->width, fb->height, - obj->gtt_offset, obj); + i915_gem_obj_ggtt_offset(obj), obj); mutex_unlock(&dev->struct_mutex); @@ -193,26 +193,21 @@ static struct drm_fb_helper_funcs intel_fb_helper_funcs = { static void intel_fbdev_destroy(struct drm_device *dev, struct intel_fbdev *ifbdev) { - struct fb_info *info; - struct intel_framebuffer *ifb = &ifbdev->ifb; - if (ifbdev->helper.fbdev) { - info = ifbdev->helper.fbdev; + struct fb_info *info = ifbdev->helper.fbdev; + unregister_framebuffer(info); iounmap(info->screen_base); if (info->cmap.len) fb_dealloc_cmap(&info->cmap); + framebuffer_release(info); } drm_fb_helper_fini(&ifbdev->helper); - drm_framebuffer_unregister_private(&ifb->base); - drm_framebuffer_cleanup(&ifb->base); - if (ifb->obj) { - drm_gem_object_unreference_unlocked(&ifb->obj->base); - ifb->obj = NULL; - } + drm_framebuffer_unregister_private(&ifbdev->ifb.base); + intel_framebuffer_fini(&ifbdev->ifb); } int intel_fbdev_init(struct drm_device *dev) diff --git a/drivers/gpu/drm/i915/intel_hdmi.c b/drivers/gpu/drm/i915/intel_hdmi.c index 2fd3fd5b943e..4148cc85bf7f 100644 --- a/drivers/gpu/drm/i915/intel_hdmi.c +++ b/drivers/gpu/drm/i915/intel_hdmi.c @@ -29,6 +29,7 @@ #include <linux/i2c.h> #include <linux/slab.h> #include <linux/delay.h> +#include <linux/hdmi.h> #include <drm/drmP.h> #include <drm/drm_crtc.h> #include <drm/drm_edid.h> @@ -66,89 +67,83 @@ static struct intel_hdmi *intel_attached_hdmi(struct drm_connector *connector) return enc_to_intel_hdmi(&intel_attached_encoder(connector)->base); } -void intel_dip_infoframe_csum(struct dip_infoframe *frame) +static u32 g4x_infoframe_index(enum hdmi_infoframe_type type) { - uint8_t *data = (uint8_t *)frame; - uint8_t sum = 0; - unsigned i; - - frame->checksum = 0; - frame->ecc = 0; - - for (i = 0; i < frame->len + DIP_HEADER_SIZE; i++) - sum += data[i]; - - frame->checksum = 0x100 - sum; -} - -static u32 g4x_infoframe_index(struct dip_infoframe *frame) -{ - switch (frame->type) { - case DIP_TYPE_AVI: + switch (type) { + case HDMI_INFOFRAME_TYPE_AVI: return VIDEO_DIP_SELECT_AVI; - case DIP_TYPE_SPD: + case HDMI_INFOFRAME_TYPE_SPD: return VIDEO_DIP_SELECT_SPD; + case HDMI_INFOFRAME_TYPE_VENDOR: + return VIDEO_DIP_SELECT_VENDOR; default: - DRM_DEBUG_DRIVER("unknown info frame type %d\n", frame->type); + DRM_DEBUG_DRIVER("unknown info frame type %d\n", type); return 0; } } -static u32 g4x_infoframe_enable(struct dip_infoframe *frame) +static u32 g4x_infoframe_enable(enum hdmi_infoframe_type type) { - switch (frame->type) { - case DIP_TYPE_AVI: + switch (type) { + case HDMI_INFOFRAME_TYPE_AVI: return VIDEO_DIP_ENABLE_AVI; - case DIP_TYPE_SPD: + case HDMI_INFOFRAME_TYPE_SPD: return VIDEO_DIP_ENABLE_SPD; + case HDMI_INFOFRAME_TYPE_VENDOR: + return VIDEO_DIP_ENABLE_VENDOR; default: - DRM_DEBUG_DRIVER("unknown info frame type %d\n", frame->type); + DRM_DEBUG_DRIVER("unknown info frame type %d\n", type); return 0; } } -static u32 hsw_infoframe_enable(struct dip_infoframe *frame) +static u32 hsw_infoframe_enable(enum hdmi_infoframe_type type) { - switch (frame->type) { - case DIP_TYPE_AVI: + switch (type) { + case HDMI_INFOFRAME_TYPE_AVI: return VIDEO_DIP_ENABLE_AVI_HSW; - case DIP_TYPE_SPD: + case HDMI_INFOFRAME_TYPE_SPD: return VIDEO_DIP_ENABLE_SPD_HSW; + case HDMI_INFOFRAME_TYPE_VENDOR: + return VIDEO_DIP_ENABLE_VS_HSW; default: - DRM_DEBUG_DRIVER("unknown info frame type %d\n", frame->type); + DRM_DEBUG_DRIVER("unknown info frame type %d\n", type); return 0; } } -static u32 hsw_infoframe_data_reg(struct dip_infoframe *frame, +static u32 hsw_infoframe_data_reg(enum hdmi_infoframe_type type, enum transcoder cpu_transcoder) { - switch (frame->type) { - case DIP_TYPE_AVI: + switch (type) { + case HDMI_INFOFRAME_TYPE_AVI: return HSW_TVIDEO_DIP_AVI_DATA(cpu_transcoder); - case DIP_TYPE_SPD: + case HDMI_INFOFRAME_TYPE_SPD: return HSW_TVIDEO_DIP_SPD_DATA(cpu_transcoder); + case HDMI_INFOFRAME_TYPE_VENDOR: + return HSW_TVIDEO_DIP_VS_DATA(cpu_transcoder); default: - DRM_DEBUG_DRIVER("unknown info frame type %d\n", frame->type); + DRM_DEBUG_DRIVER("unknown info frame type %d\n", type); return 0; } } static void g4x_write_infoframe(struct drm_encoder *encoder, - struct dip_infoframe *frame) + enum hdmi_infoframe_type type, + const uint8_t *frame, ssize_t len) { uint32_t *data = (uint32_t *)frame; struct drm_device *dev = encoder->dev; struct drm_i915_private *dev_priv = dev->dev_private; u32 val = I915_READ(VIDEO_DIP_CTL); - unsigned i, len = DIP_HEADER_SIZE + frame->len; + int i; WARN(!(val & VIDEO_DIP_ENABLE), "Writing DIP with CTL reg disabled\n"); val &= ~(VIDEO_DIP_SELECT_MASK | 0xf); /* clear DIP data offset */ - val |= g4x_infoframe_index(frame); + val |= g4x_infoframe_index(type); - val &= ~g4x_infoframe_enable(frame); + val &= ~g4x_infoframe_enable(type); I915_WRITE(VIDEO_DIP_CTL, val); @@ -162,7 +157,7 @@ static void g4x_write_infoframe(struct drm_encoder *encoder, I915_WRITE(VIDEO_DIP_DATA, 0); mmiowb(); - val |= g4x_infoframe_enable(frame); + val |= g4x_infoframe_enable(type); val &= ~VIDEO_DIP_FREQ_MASK; val |= VIDEO_DIP_FREQ_VSYNC; @@ -171,22 +166,22 @@ static void g4x_write_infoframe(struct drm_encoder *encoder, } static void ibx_write_infoframe(struct drm_encoder *encoder, - struct dip_infoframe *frame) + enum hdmi_infoframe_type type, + const uint8_t *frame, ssize_t len) { uint32_t *data = (uint32_t *)frame; struct drm_device *dev = encoder->dev; struct drm_i915_private *dev_priv = dev->dev_private; struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc); - int reg = TVIDEO_DIP_CTL(intel_crtc->pipe); - unsigned i, len = DIP_HEADER_SIZE + frame->len; + int i, reg = TVIDEO_DIP_CTL(intel_crtc->pipe); u32 val = I915_READ(reg); WARN(!(val & VIDEO_DIP_ENABLE), "Writing DIP with CTL reg disabled\n"); val &= ~(VIDEO_DIP_SELECT_MASK | 0xf); /* clear DIP data offset */ - val |= g4x_infoframe_index(frame); + val |= g4x_infoframe_index(type); - val &= ~g4x_infoframe_enable(frame); + val &= ~g4x_infoframe_enable(type); I915_WRITE(reg, val); @@ -200,7 +195,7 @@ static void ibx_write_infoframe(struct drm_encoder *encoder, I915_WRITE(TVIDEO_DIP_DATA(intel_crtc->pipe), 0); mmiowb(); - val |= g4x_infoframe_enable(frame); + val |= g4x_infoframe_enable(type); val &= ~VIDEO_DIP_FREQ_MASK; val |= VIDEO_DIP_FREQ_VSYNC; @@ -209,25 +204,25 @@ static void ibx_write_infoframe(struct drm_encoder *encoder, } static void cpt_write_infoframe(struct drm_encoder *encoder, - struct dip_infoframe *frame) + enum hdmi_infoframe_type type, + const uint8_t *frame, ssize_t len) { uint32_t *data = (uint32_t *)frame; struct drm_device *dev = encoder->dev; struct drm_i915_private *dev_priv = dev->dev_private; struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc); - int reg = TVIDEO_DIP_CTL(intel_crtc->pipe); - unsigned i, len = DIP_HEADER_SIZE + frame->len; + int i, reg = TVIDEO_DIP_CTL(intel_crtc->pipe); u32 val = I915_READ(reg); WARN(!(val & VIDEO_DIP_ENABLE), "Writing DIP with CTL reg disabled\n"); val &= ~(VIDEO_DIP_SELECT_MASK | 0xf); /* clear DIP data offset */ - val |= g4x_infoframe_index(frame); + val |= g4x_infoframe_index(type); /* The DIP control register spec says that we need to update the AVI * infoframe without clearing its enable bit */ - if (frame->type != DIP_TYPE_AVI) - val &= ~g4x_infoframe_enable(frame); + if (type != HDMI_INFOFRAME_TYPE_AVI) + val &= ~g4x_infoframe_enable(type); I915_WRITE(reg, val); @@ -241,7 +236,7 @@ static void cpt_write_infoframe(struct drm_encoder *encoder, I915_WRITE(TVIDEO_DIP_DATA(intel_crtc->pipe), 0); mmiowb(); - val |= g4x_infoframe_enable(frame); + val |= g4x_infoframe_enable(type); val &= ~VIDEO_DIP_FREQ_MASK; val |= VIDEO_DIP_FREQ_VSYNC; @@ -250,22 +245,22 @@ static void cpt_write_infoframe(struct drm_encoder *encoder, } static void vlv_write_infoframe(struct drm_encoder *encoder, - struct dip_infoframe *frame) + enum hdmi_infoframe_type type, + const uint8_t *frame, ssize_t len) { uint32_t *data = (uint32_t *)frame; struct drm_device *dev = encoder->dev; struct drm_i915_private *dev_priv = dev->dev_private; struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc); - int reg = VLV_TVIDEO_DIP_CTL(intel_crtc->pipe); - unsigned i, len = DIP_HEADER_SIZE + frame->len; + int i, reg = VLV_TVIDEO_DIP_CTL(intel_crtc->pipe); u32 val = I915_READ(reg); WARN(!(val & VIDEO_DIP_ENABLE), "Writing DIP with CTL reg disabled\n"); val &= ~(VIDEO_DIP_SELECT_MASK | 0xf); /* clear DIP data offset */ - val |= g4x_infoframe_index(frame); + val |= g4x_infoframe_index(type); - val &= ~g4x_infoframe_enable(frame); + val &= ~g4x_infoframe_enable(type); I915_WRITE(reg, val); @@ -279,7 +274,7 @@ static void vlv_write_infoframe(struct drm_encoder *encoder, I915_WRITE(VLV_TVIDEO_DIP_DATA(intel_crtc->pipe), 0); mmiowb(); - val |= g4x_infoframe_enable(frame); + val |= g4x_infoframe_enable(type); val &= ~VIDEO_DIP_FREQ_MASK; val |= VIDEO_DIP_FREQ_VSYNC; @@ -288,21 +283,24 @@ static void vlv_write_infoframe(struct drm_encoder *encoder, } static void hsw_write_infoframe(struct drm_encoder *encoder, - struct dip_infoframe *frame) + enum hdmi_infoframe_type type, + const uint8_t *frame, ssize_t len) { uint32_t *data = (uint32_t *)frame; struct drm_device *dev = encoder->dev; struct drm_i915_private *dev_priv = dev->dev_private; struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc); u32 ctl_reg = HSW_TVIDEO_DIP_CTL(intel_crtc->config.cpu_transcoder); - u32 data_reg = hsw_infoframe_data_reg(frame, intel_crtc->config.cpu_transcoder); - unsigned int i, len = DIP_HEADER_SIZE + frame->len; + u32 data_reg; + int i; u32 val = I915_READ(ctl_reg); + data_reg = hsw_infoframe_data_reg(type, + intel_crtc->config.cpu_transcoder); if (data_reg == 0) return; - val &= ~hsw_infoframe_enable(frame); + val &= ~hsw_infoframe_enable(type); I915_WRITE(ctl_reg, val); mmiowb(); @@ -315,18 +313,48 @@ static void hsw_write_infoframe(struct drm_encoder *encoder, I915_WRITE(data_reg + i, 0); mmiowb(); - val |= hsw_infoframe_enable(frame); + val |= hsw_infoframe_enable(type); I915_WRITE(ctl_reg, val); POSTING_READ(ctl_reg); } -static void intel_set_infoframe(struct drm_encoder *encoder, - struct dip_infoframe *frame) +/* + * The data we write to the DIP data buffer registers is 1 byte bigger than the + * HDMI infoframe size because of an ECC/reserved byte at position 3 (starting + * at 0). It's also a byte used by DisplayPort so the same DIP registers can be + * used for both technologies. + * + * DW0: Reserved/ECC/DP | HB2 | HB1 | HB0 + * DW1: DB3 | DB2 | DB1 | DB0 + * DW2: DB7 | DB6 | DB5 | DB4 + * DW3: ... + * + * (HB is Header Byte, DB is Data Byte) + * + * The hdmi pack() functions don't know about that hardware specific hole so we + * trick them by giving an offset into the buffer and moving back the header + * bytes by one. + */ +static void intel_write_infoframe(struct drm_encoder *encoder, + union hdmi_infoframe *frame) { struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder); + uint8_t buffer[VIDEO_DIP_DATA_SIZE]; + ssize_t len; + + /* see comment above for the reason for this offset */ + len = hdmi_infoframe_pack(frame, buffer + 1, sizeof(buffer) - 1); + if (len < 0) + return; - intel_dip_infoframe_csum(frame); - intel_hdmi->write_infoframe(encoder, frame); + /* Insert the 'hole' (see big comment above) at position 3 */ + buffer[0] = buffer[1]; + buffer[1] = buffer[2]; + buffer[2] = buffer[3]; + buffer[3] = 0; + len++; + + intel_hdmi->write_infoframe(encoder, frame->any.type, buffer, len); } static void intel_hdmi_set_avi_infoframe(struct drm_encoder *encoder, @@ -334,40 +362,57 @@ static void intel_hdmi_set_avi_infoframe(struct drm_encoder *encoder, { struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder); struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc); - struct dip_infoframe avi_if = { - .type = DIP_TYPE_AVI, - .ver = DIP_VERSION_AVI, - .len = DIP_LEN_AVI, - }; + union hdmi_infoframe frame; + int ret; - if (adjusted_mode->flags & DRM_MODE_FLAG_DBLCLK) - avi_if.body.avi.YQ_CN_PR |= DIP_AVI_PR_2; + ret = drm_hdmi_avi_infoframe_from_display_mode(&frame.avi, + adjusted_mode); + if (ret < 0) { + DRM_ERROR("couldn't fill AVI infoframe\n"); + return; + } if (intel_hdmi->rgb_quant_range_selectable) { if (intel_crtc->config.limited_color_range) - avi_if.body.avi.ITC_EC_Q_SC |= DIP_AVI_RGB_QUANT_RANGE_LIMITED; + frame.avi.quantization_range = + HDMI_QUANTIZATION_RANGE_LIMITED; else - avi_if.body.avi.ITC_EC_Q_SC |= DIP_AVI_RGB_QUANT_RANGE_FULL; + frame.avi.quantization_range = + HDMI_QUANTIZATION_RANGE_FULL; } - avi_if.body.avi.VIC = drm_match_cea_mode(adjusted_mode); - - intel_set_infoframe(encoder, &avi_if); + intel_write_infoframe(encoder, &frame); } static void intel_hdmi_set_spd_infoframe(struct drm_encoder *encoder) { - struct dip_infoframe spd_if; + union hdmi_infoframe frame; + int ret; + + ret = hdmi_spd_infoframe_init(&frame.spd, "Intel", "Integrated gfx"); + if (ret < 0) { + DRM_ERROR("couldn't fill SPD infoframe\n"); + return; + } + + frame.spd.sdi = HDMI_SPD_SDI_PC; + + intel_write_infoframe(encoder, &frame); +} + +static void +intel_hdmi_set_hdmi_infoframe(struct drm_encoder *encoder, + struct drm_display_mode *adjusted_mode) +{ + union hdmi_infoframe frame; + int ret; - memset(&spd_if, 0, sizeof(spd_if)); - spd_if.type = DIP_TYPE_SPD; - spd_if.ver = DIP_VERSION_SPD; - spd_if.len = DIP_LEN_SPD; - strcpy(spd_if.body.spd.vn, "Intel"); - strcpy(spd_if.body.spd.pd, "Integrated gfx"); - spd_if.body.spd.sdi = DIP_SPD_PC; + ret = drm_hdmi_vendor_infoframe_from_display_mode(&frame.vendor.hdmi, + adjusted_mode); + if (ret < 0) + return; - intel_set_infoframe(encoder, &spd_if); + intel_write_infoframe(encoder, &frame); } static void g4x_set_infoframes(struct drm_encoder *encoder, @@ -432,6 +477,7 @@ static void g4x_set_infoframes(struct drm_encoder *encoder, intel_hdmi_set_avi_infoframe(encoder, adjusted_mode); intel_hdmi_set_spd_infoframe(encoder); + intel_hdmi_set_hdmi_infoframe(encoder, adjusted_mode); } static void ibx_set_infoframes(struct drm_encoder *encoder, @@ -493,6 +539,7 @@ static void ibx_set_infoframes(struct drm_encoder *encoder, intel_hdmi_set_avi_infoframe(encoder, adjusted_mode); intel_hdmi_set_spd_infoframe(encoder); + intel_hdmi_set_hdmi_infoframe(encoder, adjusted_mode); } static void cpt_set_infoframes(struct drm_encoder *encoder, @@ -528,6 +575,7 @@ static void cpt_set_infoframes(struct drm_encoder *encoder, intel_hdmi_set_avi_infoframe(encoder, adjusted_mode); intel_hdmi_set_spd_infoframe(encoder); + intel_hdmi_set_hdmi_infoframe(encoder, adjusted_mode); } static void vlv_set_infoframes(struct drm_encoder *encoder, @@ -562,6 +610,7 @@ static void vlv_set_infoframes(struct drm_encoder *encoder, intel_hdmi_set_avi_infoframe(encoder, adjusted_mode); intel_hdmi_set_spd_infoframe(encoder); + intel_hdmi_set_hdmi_infoframe(encoder, adjusted_mode); } static void hsw_set_infoframes(struct drm_encoder *encoder, @@ -589,16 +638,16 @@ static void hsw_set_infoframes(struct drm_encoder *encoder, intel_hdmi_set_avi_infoframe(encoder, adjusted_mode); intel_hdmi_set_spd_infoframe(encoder); + intel_hdmi_set_hdmi_infoframe(encoder, adjusted_mode); } -static void intel_hdmi_mode_set(struct drm_encoder *encoder, - struct drm_display_mode *mode, - struct drm_display_mode *adjusted_mode) +static void intel_hdmi_mode_set(struct intel_encoder *encoder) { - struct drm_device *dev = encoder->dev; + struct drm_device *dev = encoder->base.dev; struct drm_i915_private *dev_priv = dev->dev_private; - struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc); - struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder); + struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc); + struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(&encoder->base); + struct drm_display_mode *adjusted_mode = &crtc->config.adjusted_mode; u32 hdmi_val; hdmi_val = SDVO_ENCODING_HDMI; @@ -609,7 +658,7 @@ static void intel_hdmi_mode_set(struct drm_encoder *encoder, if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC) hdmi_val |= SDVO_HSYNC_ACTIVE_HIGH; - if (intel_crtc->config.pipe_bpp > 24) + if (crtc->config.pipe_bpp > 24) hdmi_val |= HDMI_COLOR_FORMAT_12bpc; else hdmi_val |= SDVO_COLOR_FORMAT_8bpc; @@ -620,21 +669,21 @@ static void intel_hdmi_mode_set(struct drm_encoder *encoder, if (intel_hdmi->has_audio) { DRM_DEBUG_DRIVER("Enabling HDMI audio on pipe %c\n", - pipe_name(intel_crtc->pipe)); + pipe_name(crtc->pipe)); hdmi_val |= SDVO_AUDIO_ENABLE; hdmi_val |= HDMI_MODE_SELECT_HDMI; - intel_write_eld(encoder, adjusted_mode); + intel_write_eld(&encoder->base, adjusted_mode); } if (HAS_PCH_CPT(dev)) - hdmi_val |= SDVO_PIPE_SEL_CPT(intel_crtc->pipe); + hdmi_val |= SDVO_PIPE_SEL_CPT(crtc->pipe); else - hdmi_val |= SDVO_PIPE_SEL(intel_crtc->pipe); + hdmi_val |= SDVO_PIPE_SEL(crtc->pipe); I915_WRITE(intel_hdmi->hdmi_reg, hdmi_val); POSTING_READ(intel_hdmi->hdmi_reg); - intel_hdmi->set_infoframes(encoder, adjusted_mode); + intel_hdmi->set_infoframes(&encoder->base, adjusted_mode); } static bool intel_hdmi_get_hw_state(struct intel_encoder *encoder, @@ -719,14 +768,10 @@ static void intel_enable_hdmi(struct intel_encoder *encoder) I915_WRITE(intel_hdmi->hdmi_reg, temp); POSTING_READ(intel_hdmi->hdmi_reg); } +} - if (IS_VALLEYVIEW(dev)) { - struct intel_digital_port *dport = - enc_to_dig_port(&encoder->base); - int channel = vlv_dport_to_channel(dport); - - vlv_wait_port_ready(dev_priv, channel); - } +static void vlv_enable_hdmi(struct intel_encoder *encoder) +{ } static void intel_disable_hdmi(struct intel_encoder *encoder) @@ -879,6 +924,9 @@ intel_hdmi_detect(struct drm_connector *connector, bool force) struct edid *edid; enum drm_connector_status status = connector_status_disconnected; + DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n", + connector->base.id, drm_get_connector_name(connector)); + intel_hdmi->has_hdmi_sink = false; intel_hdmi->has_audio = false; intel_hdmi->rgb_quant_range_selectable = false; @@ -1030,6 +1078,7 @@ static void intel_hdmi_pre_enable(struct intel_encoder *encoder) return; /* Enable clock channels for this port */ + mutex_lock(&dev_priv->dpio_lock); val = vlv_dpio_read(dev_priv, DPIO_DATA_LANE_A(port)); val = 0; if (pipe) @@ -1060,6 +1109,11 @@ static void intel_hdmi_pre_enable(struct intel_encoder *encoder) 0x00760018); vlv_dpio_write(dev_priv, DPIO_PCS_CLOCKBUF8(port), 0x00400888); + mutex_unlock(&dev_priv->dpio_lock); + + intel_enable_hdmi(encoder); + + vlv_wait_port_ready(dev_priv, port); } static void intel_hdmi_pre_pll_enable(struct intel_encoder *encoder) @@ -1073,6 +1127,7 @@ static void intel_hdmi_pre_pll_enable(struct intel_encoder *encoder) return; /* Program Tx lane resets to default */ + mutex_lock(&dev_priv->dpio_lock); vlv_dpio_write(dev_priv, DPIO_PCS_TX(port), DPIO_PCS_TX_LANE2_RESET | DPIO_PCS_TX_LANE1_RESET); @@ -1091,6 +1146,7 @@ static void intel_hdmi_pre_pll_enable(struct intel_encoder *encoder) 0x00002000); vlv_dpio_write(dev_priv, DPIO_TX_OCALINIT(port), DPIO_TX_OCALINIT_EN); + mutex_unlock(&dev_priv->dpio_lock); } static void intel_hdmi_post_disable(struct intel_encoder *encoder) @@ -1113,10 +1169,6 @@ static void intel_hdmi_destroy(struct drm_connector *connector) kfree(connector); } -static const struct drm_encoder_helper_funcs intel_hdmi_helper_funcs = { - .mode_set = intel_hdmi_mode_set, -}; - static const struct drm_connector_funcs intel_hdmi_connector_funcs = { .dpms = intel_connector_dpms, .detect = intel_hdmi_detect, @@ -1221,7 +1273,6 @@ void intel_hdmi_init(struct drm_device *dev, int hdmi_reg, enum port port) { struct intel_digital_port *intel_dig_port; struct intel_encoder *intel_encoder; - struct drm_encoder *encoder; struct intel_connector *intel_connector; intel_dig_port = kzalloc(sizeof(struct intel_digital_port), GFP_KERNEL); @@ -1235,21 +1286,22 @@ void intel_hdmi_init(struct drm_device *dev, int hdmi_reg, enum port port) } intel_encoder = &intel_dig_port->base; - encoder = &intel_encoder->base; drm_encoder_init(dev, &intel_encoder->base, &intel_hdmi_enc_funcs, DRM_MODE_ENCODER_TMDS); - drm_encoder_helper_add(&intel_encoder->base, &intel_hdmi_helper_funcs); intel_encoder->compute_config = intel_hdmi_compute_config; - intel_encoder->enable = intel_enable_hdmi; + intel_encoder->mode_set = intel_hdmi_mode_set; intel_encoder->disable = intel_disable_hdmi; intel_encoder->get_hw_state = intel_hdmi_get_hw_state; intel_encoder->get_config = intel_hdmi_get_config; if (IS_VALLEYVIEW(dev)) { - intel_encoder->pre_enable = intel_hdmi_pre_enable; intel_encoder->pre_pll_enable = intel_hdmi_pre_pll_enable; + intel_encoder->pre_enable = intel_hdmi_pre_enable; + intel_encoder->enable = vlv_enable_hdmi; intel_encoder->post_disable = intel_hdmi_post_disable; + } else { + intel_encoder->enable = intel_enable_hdmi; } intel_encoder->type = INTEL_OUTPUT_HDMI; diff --git a/drivers/gpu/drm/i915/intel_i2c.c b/drivers/gpu/drm/i915/intel_i2c.c index 639fe192997c..d1c1e0f7f262 100644 --- a/drivers/gpu/drm/i915/intel_i2c.c +++ b/drivers/gpu/drm/i915/intel_i2c.c @@ -398,6 +398,7 @@ gmbus_xfer(struct i2c_adapter *adapter, int i, reg_offset; int ret = 0; + intel_aux_display_runtime_get(dev_priv); mutex_lock(&dev_priv->gmbus_mutex); if (bus->force_bit) { @@ -497,6 +498,7 @@ timeout: out: mutex_unlock(&dev_priv->gmbus_mutex); + intel_aux_display_runtime_put(dev_priv); return ret; } diff --git a/drivers/gpu/drm/i915/intel_lvds.c b/drivers/gpu/drm/i915/intel_lvds.c index 61348eae2f04..4d33278e31fb 100644 --- a/drivers/gpu/drm/i915/intel_lvds.c +++ b/drivers/gpu/drm/i915/intel_lvds.c @@ -122,17 +122,25 @@ static void intel_lvds_get_config(struct intel_encoder *encoder, * This is an exception to the general rule that mode_set doesn't turn * things on. */ -static void intel_pre_pll_enable_lvds(struct intel_encoder *encoder) +static void intel_pre_enable_lvds(struct intel_encoder *encoder) { struct intel_lvds_encoder *lvds_encoder = to_lvds_encoder(&encoder->base); struct drm_device *dev = encoder->base.dev; struct drm_i915_private *dev_priv = dev->dev_private; - struct intel_crtc *intel_crtc = to_intel_crtc(encoder->base.crtc); + struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc); struct drm_display_mode *fixed_mode = lvds_encoder->attached_connector->base.panel.fixed_mode; - int pipe = intel_crtc->pipe; + int pipe = crtc->pipe; u32 temp; + if (HAS_PCH_SPLIT(dev)) { + assert_fdi_rx_pll_disabled(dev_priv, pipe); + assert_shared_dpll_disabled(dev_priv, + intel_crtc_to_shared_dpll(crtc)); + } else { + assert_pll_disabled(dev_priv, pipe); + } + temp = I915_READ(lvds_encoder->reg); temp |= LVDS_PORT_EN | LVDS_A0A2_CLKA_POWER_UP; @@ -149,7 +157,7 @@ static void intel_pre_pll_enable_lvds(struct intel_encoder *encoder) /* set the corresponsding LVDS_BORDER bit */ temp &= ~LVDS_BORDER_ENABLE; - temp |= intel_crtc->config.gmch_pfit.lvds_border_bits; + temp |= crtc->config.gmch_pfit.lvds_border_bits; /* Set the B0-B3 data pairs corresponding to whether we're going to * set the DPLLs for dual-channel mode or not. */ @@ -169,8 +177,7 @@ static void intel_pre_pll_enable_lvds(struct intel_encoder *encoder) if (INTEL_INFO(dev)->gen == 4) { /* Bspec wording suggests that LVDS port dithering only exists * for 18bpp panels. */ - if (intel_crtc->config.dither && - intel_crtc->config.pipe_bpp == 18) + if (crtc->config.dither && crtc->config.pipe_bpp == 18) temp |= LVDS_ENABLE_DITHER; else temp &= ~LVDS_ENABLE_DITHER; @@ -312,14 +319,12 @@ static bool intel_lvds_compute_config(struct intel_encoder *intel_encoder, return true; } -static void intel_lvds_mode_set(struct drm_encoder *encoder, - struct drm_display_mode *mode, - struct drm_display_mode *adjusted_mode) +static void intel_lvds_mode_set(struct intel_encoder *encoder) { /* - * The LVDS pin pair will already have been turned on in the - * intel_crtc_mode_set since it has a large impact on the DPLL - * settings. + * We don't do anything here, the LVDS port is fully set up in the pre + * enable hook - the ordering constraints for enabling the lvds port vs. + * enabling the display pll are too strict. */ } @@ -336,6 +341,9 @@ intel_lvds_detect(struct drm_connector *connector, bool force) struct drm_device *dev = connector->dev; enum drm_connector_status status; + DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n", + connector->base.id, drm_get_connector_name(connector)); + status = intel_panel_detect(dev); if (status != connector_status_unknown) return status; @@ -497,10 +505,6 @@ static int intel_lvds_set_property(struct drm_connector *connector, return 0; } -static const struct drm_encoder_helper_funcs intel_lvds_helper_funcs = { - .mode_set = intel_lvds_mode_set, -}; - static const struct drm_connector_helper_funcs intel_lvds_connector_helper_funcs = { .get_modes = intel_lvds_get_modes, .mode_valid = intel_lvds_mode_valid, @@ -959,8 +963,9 @@ void intel_lvds_init(struct drm_device *dev) DRM_MODE_ENCODER_LVDS); intel_encoder->enable = intel_enable_lvds; - intel_encoder->pre_pll_enable = intel_pre_pll_enable_lvds; + intel_encoder->pre_enable = intel_pre_enable_lvds; intel_encoder->compute_config = intel_lvds_compute_config; + intel_encoder->mode_set = intel_lvds_mode_set; intel_encoder->disable = intel_disable_lvds; intel_encoder->get_hw_state = intel_lvds_get_hw_state; intel_encoder->get_config = intel_lvds_get_config; @@ -977,7 +982,6 @@ void intel_lvds_init(struct drm_device *dev) else intel_encoder->crtc_mask = (1 << 1); - drm_encoder_helper_add(encoder, &intel_lvds_helper_funcs); drm_connector_helper_add(connector, &intel_lvds_connector_helper_funcs); connector->display_info.subpixel_order = SubPixelHorizontalRGB; connector->interlace_allowed = false; diff --git a/drivers/gpu/drm/i915/intel_overlay.c b/drivers/gpu/drm/i915/intel_overlay.c index a3698812e9c7..ddfd0aefe0c0 100644 --- a/drivers/gpu/drm/i915/intel_overlay.c +++ b/drivers/gpu/drm/i915/intel_overlay.c @@ -196,7 +196,7 @@ intel_overlay_map_regs(struct intel_overlay *overlay) regs = (struct overlay_registers __iomem *)overlay->reg_bo->phys_obj->handle->vaddr; else regs = io_mapping_map_wc(dev_priv->gtt.mappable, - overlay->reg_bo->gtt_offset); + i915_gem_obj_ggtt_offset(overlay->reg_bo)); return regs; } @@ -740,7 +740,7 @@ static int intel_overlay_do_put_image(struct intel_overlay *overlay, swidth = params->src_w; swidthsw = calc_swidthsw(overlay->dev, params->offset_Y, tmp_width); sheight = params->src_h; - iowrite32(new_bo->gtt_offset + params->offset_Y, ®s->OBUF_0Y); + iowrite32(i915_gem_obj_ggtt_offset(new_bo) + params->offset_Y, ®s->OBUF_0Y); ostride = params->stride_Y; if (params->format & I915_OVERLAY_YUV_PLANAR) { @@ -754,8 +754,8 @@ static int intel_overlay_do_put_image(struct intel_overlay *overlay, params->src_w/uv_hscale); swidthsw |= max_t(u32, tmp_U, tmp_V) << 16; sheight |= (params->src_h/uv_vscale) << 16; - iowrite32(new_bo->gtt_offset + params->offset_U, ®s->OBUF_0U); - iowrite32(new_bo->gtt_offset + params->offset_V, ®s->OBUF_0V); + iowrite32(i915_gem_obj_ggtt_offset(new_bo) + params->offset_U, ®s->OBUF_0U); + iowrite32(i915_gem_obj_ggtt_offset(new_bo) + params->offset_V, ®s->OBUF_0V); ostride |= params->stride_UV << 16; } @@ -1333,7 +1333,9 @@ void intel_setup_overlay(struct drm_device *dev) overlay->dev = dev; - reg_bo = i915_gem_object_create_stolen(dev, PAGE_SIZE); + reg_bo = NULL; + if (!OVERLAY_NEEDS_PHYSICAL(dev)) + reg_bo = i915_gem_object_create_stolen(dev, PAGE_SIZE); if (reg_bo == NULL) reg_bo = i915_gem_alloc_object(dev, PAGE_SIZE); if (reg_bo == NULL) @@ -1350,12 +1352,12 @@ void intel_setup_overlay(struct drm_device *dev) } overlay->flip_addr = reg_bo->phys_obj->handle->busaddr; } else { - ret = i915_gem_object_pin(reg_bo, PAGE_SIZE, true, false); + ret = i915_gem_obj_ggtt_pin(reg_bo, PAGE_SIZE, true, false); if (ret) { DRM_ERROR("failed to pin overlay register bo\n"); goto out_free_bo; } - overlay->flip_addr = reg_bo->gtt_offset; + overlay->flip_addr = i915_gem_obj_ggtt_offset(reg_bo); ret = i915_gem_object_set_to_gtt_domain(reg_bo, true); if (ret) { @@ -1412,9 +1414,6 @@ void intel_cleanup_overlay(struct drm_device *dev) kfree(dev_priv->overlay); } -#ifdef CONFIG_DEBUG_FS -#include <linux/seq_file.h> - struct intel_overlay_error_state { struct overlay_registers regs; unsigned long base; @@ -1435,7 +1434,7 @@ intel_overlay_map_regs_atomic(struct intel_overlay *overlay) overlay->reg_bo->phys_obj->handle->vaddr; else regs = io_mapping_map_atomic_wc(dev_priv->gtt.mappable, - overlay->reg_bo->gtt_offset); + i915_gem_obj_ggtt_offset(overlay->reg_bo)); return regs; } @@ -1468,7 +1467,7 @@ intel_overlay_capture_error_state(struct drm_device *dev) if (OVERLAY_NEEDS_PHYSICAL(overlay->dev)) error->base = (__force long)overlay->reg_bo->phys_obj->handle->vaddr; else - error->base = overlay->reg_bo->gtt_offset; + error->base = i915_gem_obj_ggtt_offset(overlay->reg_bo); regs = intel_overlay_map_regs_atomic(overlay); if (!regs) @@ -1537,4 +1536,3 @@ intel_overlay_print_error_state(struct drm_i915_error_state_buf *m, P(UVSCALEV); #undef P } -#endif diff --git a/drivers/gpu/drm/i915/intel_panel.c b/drivers/gpu/drm/i915/intel_panel.c index 5950888ae1d0..a43c33bc4a35 100644 --- a/drivers/gpu/drm/i915/intel_panel.c +++ b/drivers/gpu/drm/i915/intel_panel.c @@ -194,9 +194,6 @@ void intel_gmch_panel_fitting(struct intel_crtc *intel_crtc, adjusted_mode->vdisplay == mode->vdisplay) goto out; - drm_mode_set_crtcinfo(adjusted_mode, 0); - pipe_config->timings_set = true; - switch (fitting_mode) { case DRM_MODE_SCALE_CENTER: /* diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c index b0e4a0bd1313..46056820d1d2 100644 --- a/drivers/gpu/drm/i915/intel_pm.c +++ b/drivers/gpu/drm/i915/intel_pm.c @@ -30,8 +30,7 @@ #include "intel_drv.h" #include "../../../platform/x86/intel_ips.h" #include <linux/module.h> - -#define FORCEWAKE_ACK_TIMEOUT_MS 2 +#include <drm/i915_powerwell.h> /* FBC, or Frame Buffer Compression, is a technique employed to compress the * framebuffer contents in-memory, aiming at reducing the required bandwidth @@ -86,7 +85,7 @@ static void i8xx_enable_fbc(struct drm_crtc *crtc, unsigned long interval) int plane, i; u32 fbc_ctl, fbc_ctl2; - cfb_pitch = dev_priv->cfb_size / FBC_LL_SIZE; + cfb_pitch = dev_priv->fbc.size / FBC_LL_SIZE; if (fb->pitches[0] < cfb_pitch) cfb_pitch = fb->pitches[0]; @@ -217,7 +216,7 @@ static void ironlake_enable_fbc(struct drm_crtc *crtc, unsigned long interval) (stall_watermark << DPFC_RECOMP_STALL_WM_SHIFT) | (interval << DPFC_RECOMP_TIMER_COUNT_SHIFT)); I915_WRITE(ILK_DPFC_FENCE_YOFF, crtc->y); - I915_WRITE(ILK_FBC_RT_BASE, obj->gtt_offset | ILK_FBC_RT_VALID); + I915_WRITE(ILK_FBC_RT_BASE, i915_gem_obj_ggtt_offset(obj) | ILK_FBC_RT_VALID); /* enable it... */ I915_WRITE(ILK_DPFC_CONTROL, dpfc_ctl | DPFC_CTL_EN); @@ -274,7 +273,7 @@ static void gen7_enable_fbc(struct drm_crtc *crtc, unsigned long interval) struct drm_i915_gem_object *obj = intel_fb->obj; struct intel_crtc *intel_crtc = to_intel_crtc(crtc); - I915_WRITE(IVB_FBC_RT_BASE, obj->gtt_offset); + I915_WRITE(IVB_FBC_RT_BASE, i915_gem_obj_ggtt_offset(obj)); I915_WRITE(ILK_DPFC_CONTROL, DPFC_CTL_EN | DPFC_CTL_LIMIT_1X | IVB_DPFC_CTL_FENCE_EN | @@ -325,7 +324,7 @@ static void intel_fbc_work_fn(struct work_struct *__work) struct drm_i915_private *dev_priv = dev->dev_private; mutex_lock(&dev->struct_mutex); - if (work == dev_priv->fbc_work) { + if (work == dev_priv->fbc.fbc_work) { /* Double check that we haven't switched fb without cancelling * the prior work. */ @@ -333,12 +332,12 @@ static void intel_fbc_work_fn(struct work_struct *__work) dev_priv->display.enable_fbc(work->crtc, work->interval); - dev_priv->cfb_plane = to_intel_crtc(work->crtc)->plane; - dev_priv->cfb_fb = work->crtc->fb->base.id; - dev_priv->cfb_y = work->crtc->y; + dev_priv->fbc.plane = to_intel_crtc(work->crtc)->plane; + dev_priv->fbc.fb_id = work->crtc->fb->base.id; + dev_priv->fbc.y = work->crtc->y; } - dev_priv->fbc_work = NULL; + dev_priv->fbc.fbc_work = NULL; } mutex_unlock(&dev->struct_mutex); @@ -347,28 +346,28 @@ static void intel_fbc_work_fn(struct work_struct *__work) static void intel_cancel_fbc_work(struct drm_i915_private *dev_priv) { - if (dev_priv->fbc_work == NULL) + if (dev_priv->fbc.fbc_work == NULL) return; DRM_DEBUG_KMS("cancelling pending FBC enable\n"); /* Synchronisation is provided by struct_mutex and checking of - * dev_priv->fbc_work, so we can perform the cancellation + * dev_priv->fbc.fbc_work, so we can perform the cancellation * entirely asynchronously. */ - if (cancel_delayed_work(&dev_priv->fbc_work->work)) + if (cancel_delayed_work(&dev_priv->fbc.fbc_work->work)) /* tasklet was killed before being run, clean up */ - kfree(dev_priv->fbc_work); + kfree(dev_priv->fbc.fbc_work); /* Mark the work as no longer wanted so that if it does * wake-up (because the work was already running and waiting * for our mutex), it will discover that is no longer * necessary to run. */ - dev_priv->fbc_work = NULL; + dev_priv->fbc.fbc_work = NULL; } -void intel_enable_fbc(struct drm_crtc *crtc, unsigned long interval) +static void intel_enable_fbc(struct drm_crtc *crtc, unsigned long interval) { struct intel_fbc_work *work; struct drm_device *dev = crtc->dev; @@ -381,6 +380,7 @@ void intel_enable_fbc(struct drm_crtc *crtc, unsigned long interval) work = kzalloc(sizeof *work, GFP_KERNEL); if (work == NULL) { + DRM_ERROR("Failed to allocate FBC work structure\n"); dev_priv->display.enable_fbc(crtc, interval); return; } @@ -390,9 +390,7 @@ void intel_enable_fbc(struct drm_crtc *crtc, unsigned long interval) work->interval = interval; INIT_DELAYED_WORK(&work->work, intel_fbc_work_fn); - dev_priv->fbc_work = work; - - DRM_DEBUG_KMS("scheduling delayed FBC enable\n"); + dev_priv->fbc.fbc_work = work; /* Delay the actual enabling to let pageflipping cease and the * display to settle before starting the compression. Note that @@ -404,6 +402,8 @@ void intel_enable_fbc(struct drm_crtc *crtc, unsigned long interval) * following the termination of the page-flipping sequence * and indeed performing the enable as a co-routine and not * waiting synchronously upon the vblank. + * + * WaFbcWaitForVBlankBeforeEnable:ilk,snb */ schedule_delayed_work(&work->work, msecs_to_jiffies(50)); } @@ -418,7 +418,17 @@ void intel_disable_fbc(struct drm_device *dev) return; dev_priv->display.disable_fbc(dev); - dev_priv->cfb_plane = -1; + dev_priv->fbc.plane = -1; +} + +static bool set_no_fbc_reason(struct drm_i915_private *dev_priv, + enum no_fbc_reason reason) +{ + if (dev_priv->fbc.no_fbc_reason == reason) + return false; + + dev_priv->fbc.no_fbc_reason = reason; + return true; } /** @@ -448,14 +458,18 @@ void intel_update_fbc(struct drm_device *dev) struct drm_framebuffer *fb; struct intel_framebuffer *intel_fb; struct drm_i915_gem_object *obj; - int enable_fbc; unsigned int max_hdisplay, max_vdisplay; - if (!i915_powersave) + if (!I915_HAS_FBC(dev)) { + set_no_fbc_reason(dev_priv, FBC_UNSUPPORTED); return; + } - if (!I915_HAS_FBC(dev)) + if (!i915_powersave) { + if (set_no_fbc_reason(dev_priv, FBC_MODULE_PARAM)) + DRM_DEBUG_KMS("fbc disabled per module param\n"); return; + } /* * If FBC is already on, we just have to verify that we can @@ -470,8 +484,8 @@ void intel_update_fbc(struct drm_device *dev) if (intel_crtc_active(tmp_crtc) && !to_intel_crtc(tmp_crtc)->primary_disabled) { if (crtc) { - DRM_DEBUG_KMS("more than one pipe active, disabling compression\n"); - dev_priv->no_fbc_reason = FBC_MULTIPLE_PIPES; + if (set_no_fbc_reason(dev_priv, FBC_MULTIPLE_PIPES)) + DRM_DEBUG_KMS("more than one pipe active, disabling compression\n"); goto out_disable; } crtc = tmp_crtc; @@ -479,8 +493,8 @@ void intel_update_fbc(struct drm_device *dev) } if (!crtc || crtc->fb == NULL) { - DRM_DEBUG_KMS("no output, disabling\n"); - dev_priv->no_fbc_reason = FBC_NO_OUTPUT; + if (set_no_fbc_reason(dev_priv, FBC_NO_OUTPUT)) + DRM_DEBUG_KMS("no output, disabling\n"); goto out_disable; } @@ -489,23 +503,22 @@ void intel_update_fbc(struct drm_device *dev) intel_fb = to_intel_framebuffer(fb); obj = intel_fb->obj; - enable_fbc = i915_enable_fbc; - if (enable_fbc < 0) { - DRM_DEBUG_KMS("fbc set to per-chip default\n"); - enable_fbc = 1; - if (INTEL_INFO(dev)->gen <= 7 && !IS_HASWELL(dev)) - enable_fbc = 0; + if (i915_enable_fbc < 0 && + INTEL_INFO(dev)->gen <= 7 && !IS_HASWELL(dev)) { + if (set_no_fbc_reason(dev_priv, FBC_CHIP_DEFAULT)) + DRM_DEBUG_KMS("disabled per chip default\n"); + goto out_disable; } - if (!enable_fbc) { - DRM_DEBUG_KMS("fbc disabled per module param\n"); - dev_priv->no_fbc_reason = FBC_MODULE_PARAM; + if (!i915_enable_fbc) { + if (set_no_fbc_reason(dev_priv, FBC_MODULE_PARAM)) + DRM_DEBUG_KMS("fbc disabled per module param\n"); goto out_disable; } if ((crtc->mode.flags & DRM_MODE_FLAG_INTERLACE) || (crtc->mode.flags & DRM_MODE_FLAG_DBLSCAN)) { - DRM_DEBUG_KMS("mode incompatible with compression, " - "disabling\n"); - dev_priv->no_fbc_reason = FBC_UNSUPPORTED_MODE; + if (set_no_fbc_reason(dev_priv, FBC_UNSUPPORTED_MODE)) + DRM_DEBUG_KMS("mode incompatible with compression, " + "disabling\n"); goto out_disable; } @@ -518,14 +531,14 @@ void intel_update_fbc(struct drm_device *dev) } if ((crtc->mode.hdisplay > max_hdisplay) || (crtc->mode.vdisplay > max_vdisplay)) { - DRM_DEBUG_KMS("mode too large for compression, disabling\n"); - dev_priv->no_fbc_reason = FBC_MODE_TOO_LARGE; + if (set_no_fbc_reason(dev_priv, FBC_MODE_TOO_LARGE)) + DRM_DEBUG_KMS("mode too large for compression, disabling\n"); goto out_disable; } if ((IS_I915GM(dev) || IS_I945GM(dev) || IS_HASWELL(dev)) && intel_crtc->plane != 0) { - DRM_DEBUG_KMS("plane not 0, disabling compression\n"); - dev_priv->no_fbc_reason = FBC_BAD_PLANE; + if (set_no_fbc_reason(dev_priv, FBC_BAD_PLANE)) + DRM_DEBUG_KMS("plane not 0, disabling compression\n"); goto out_disable; } @@ -534,8 +547,8 @@ void intel_update_fbc(struct drm_device *dev) */ if (obj->tiling_mode != I915_TILING_X || obj->fence_reg == I915_FENCE_REG_NONE) { - DRM_DEBUG_KMS("framebuffer not tiled or fenced, disabling compression\n"); - dev_priv->no_fbc_reason = FBC_NOT_TILED; + if (set_no_fbc_reason(dev_priv, FBC_NOT_TILED)) + DRM_DEBUG_KMS("framebuffer not tiled or fenced, disabling compression\n"); goto out_disable; } @@ -544,8 +557,8 @@ void intel_update_fbc(struct drm_device *dev) goto out_disable; if (i915_gem_stolen_setup_compression(dev, intel_fb->obj->base.size)) { - DRM_DEBUG_KMS("framebuffer too large, disabling compression\n"); - dev_priv->no_fbc_reason = FBC_STOLEN_TOO_SMALL; + if (set_no_fbc_reason(dev_priv, FBC_STOLEN_TOO_SMALL)) + DRM_DEBUG_KMS("framebuffer too large, disabling compression\n"); goto out_disable; } @@ -554,9 +567,9 @@ void intel_update_fbc(struct drm_device *dev) * cannot be unpinned (and have its GTT offset and fence revoked) * without first being decoupled from the scanout and FBC disabled. */ - if (dev_priv->cfb_plane == intel_crtc->plane && - dev_priv->cfb_fb == fb->base.id && - dev_priv->cfb_y == crtc->y) + if (dev_priv->fbc.plane == intel_crtc->plane && + dev_priv->fbc.fb_id == fb->base.id && + dev_priv->fbc.y == crtc->y) return; if (intel_fbc_enabled(dev)) { @@ -588,6 +601,7 @@ void intel_update_fbc(struct drm_device *dev) } intel_enable_fbc(crtc, 500); + dev_priv->fbc.no_fbc_reason = FBC_OK; return; out_disable: @@ -1666,9 +1680,6 @@ static void i830_update_wm(struct drm_device *dev) I915_WRITE(FW_BLC, fwater_lo); } -#define ILK_LP0_PLANE_LATENCY 700 -#define ILK_LP0_CURSOR_LATENCY 1300 - /* * Check the wm result. * @@ -1783,9 +1794,9 @@ static void ironlake_update_wm(struct drm_device *dev) enabled = 0; if (g4x_compute_wm0(dev, PIPE_A, &ironlake_display_wm_info, - ILK_LP0_PLANE_LATENCY, + dev_priv->wm.pri_latency[0] * 100, &ironlake_cursor_wm_info, - ILK_LP0_CURSOR_LATENCY, + dev_priv->wm.cur_latency[0] * 100, &plane_wm, &cursor_wm)) { I915_WRITE(WM0_PIPEA_ILK, (plane_wm << WM0_PIPE_PLANE_SHIFT) | cursor_wm); @@ -1797,9 +1808,9 @@ static void ironlake_update_wm(struct drm_device *dev) if (g4x_compute_wm0(dev, PIPE_B, &ironlake_display_wm_info, - ILK_LP0_PLANE_LATENCY, + dev_priv->wm.pri_latency[0] * 100, &ironlake_cursor_wm_info, - ILK_LP0_CURSOR_LATENCY, + dev_priv->wm.cur_latency[0] * 100, &plane_wm, &cursor_wm)) { I915_WRITE(WM0_PIPEB_ILK, (plane_wm << WM0_PIPE_PLANE_SHIFT) | cursor_wm); @@ -1823,7 +1834,7 @@ static void ironlake_update_wm(struct drm_device *dev) /* WM1 */ if (!ironlake_compute_srwm(dev, 1, enabled, - ILK_READ_WM1_LATENCY() * 500, + dev_priv->wm.pri_latency[1] * 500, &ironlake_display_srwm_info, &ironlake_cursor_srwm_info, &fbc_wm, &plane_wm, &cursor_wm)) @@ -1831,14 +1842,14 @@ static void ironlake_update_wm(struct drm_device *dev) I915_WRITE(WM1_LP_ILK, WM1_LP_SR_EN | - (ILK_READ_WM1_LATENCY() << WM1_LP_LATENCY_SHIFT) | + (dev_priv->wm.pri_latency[1] << WM1_LP_LATENCY_SHIFT) | (fbc_wm << WM1_LP_FBC_SHIFT) | (plane_wm << WM1_LP_SR_SHIFT) | cursor_wm); /* WM2 */ if (!ironlake_compute_srwm(dev, 2, enabled, - ILK_READ_WM2_LATENCY() * 500, + dev_priv->wm.pri_latency[2] * 500, &ironlake_display_srwm_info, &ironlake_cursor_srwm_info, &fbc_wm, &plane_wm, &cursor_wm)) @@ -1846,7 +1857,7 @@ static void ironlake_update_wm(struct drm_device *dev) I915_WRITE(WM2_LP_ILK, WM2_LP_EN | - (ILK_READ_WM2_LATENCY() << WM1_LP_LATENCY_SHIFT) | + (dev_priv->wm.pri_latency[2] << WM1_LP_LATENCY_SHIFT) | (fbc_wm << WM1_LP_FBC_SHIFT) | (plane_wm << WM1_LP_SR_SHIFT) | cursor_wm); @@ -1860,7 +1871,7 @@ static void ironlake_update_wm(struct drm_device *dev) static void sandybridge_update_wm(struct drm_device *dev) { struct drm_i915_private *dev_priv = dev->dev_private; - int latency = SNB_READ_WM0_LATENCY() * 100; /* In unit 0.1us */ + int latency = dev_priv->wm.pri_latency[0] * 100; /* In unit 0.1us */ u32 val; int fbc_wm, plane_wm, cursor_wm; unsigned int enabled; @@ -1915,7 +1926,7 @@ static void sandybridge_update_wm(struct drm_device *dev) /* WM1 */ if (!ironlake_compute_srwm(dev, 1, enabled, - SNB_READ_WM1_LATENCY() * 500, + dev_priv->wm.pri_latency[1] * 500, &sandybridge_display_srwm_info, &sandybridge_cursor_srwm_info, &fbc_wm, &plane_wm, &cursor_wm)) @@ -1923,14 +1934,14 @@ static void sandybridge_update_wm(struct drm_device *dev) I915_WRITE(WM1_LP_ILK, WM1_LP_SR_EN | - (SNB_READ_WM1_LATENCY() << WM1_LP_LATENCY_SHIFT) | + (dev_priv->wm.pri_latency[1] << WM1_LP_LATENCY_SHIFT) | (fbc_wm << WM1_LP_FBC_SHIFT) | (plane_wm << WM1_LP_SR_SHIFT) | cursor_wm); /* WM2 */ if (!ironlake_compute_srwm(dev, 2, enabled, - SNB_READ_WM2_LATENCY() * 500, + dev_priv->wm.pri_latency[2] * 500, &sandybridge_display_srwm_info, &sandybridge_cursor_srwm_info, &fbc_wm, &plane_wm, &cursor_wm)) @@ -1938,14 +1949,14 @@ static void sandybridge_update_wm(struct drm_device *dev) I915_WRITE(WM2_LP_ILK, WM2_LP_EN | - (SNB_READ_WM2_LATENCY() << WM1_LP_LATENCY_SHIFT) | + (dev_priv->wm.pri_latency[2] << WM1_LP_LATENCY_SHIFT) | (fbc_wm << WM1_LP_FBC_SHIFT) | (plane_wm << WM1_LP_SR_SHIFT) | cursor_wm); /* WM3 */ if (!ironlake_compute_srwm(dev, 3, enabled, - SNB_READ_WM3_LATENCY() * 500, + dev_priv->wm.pri_latency[3] * 500, &sandybridge_display_srwm_info, &sandybridge_cursor_srwm_info, &fbc_wm, &plane_wm, &cursor_wm)) @@ -1953,7 +1964,7 @@ static void sandybridge_update_wm(struct drm_device *dev) I915_WRITE(WM3_LP_ILK, WM3_LP_EN | - (SNB_READ_WM3_LATENCY() << WM1_LP_LATENCY_SHIFT) | + (dev_priv->wm.pri_latency[3] << WM1_LP_LATENCY_SHIFT) | (fbc_wm << WM1_LP_FBC_SHIFT) | (plane_wm << WM1_LP_SR_SHIFT) | cursor_wm); @@ -1962,7 +1973,7 @@ static void sandybridge_update_wm(struct drm_device *dev) static void ivybridge_update_wm(struct drm_device *dev) { struct drm_i915_private *dev_priv = dev->dev_private; - int latency = SNB_READ_WM0_LATENCY() * 100; /* In unit 0.1us */ + int latency = dev_priv->wm.pri_latency[0] * 100; /* In unit 0.1us */ u32 val; int fbc_wm, plane_wm, cursor_wm; int ignore_fbc_wm, ignore_plane_wm, ignore_cursor_wm; @@ -2032,7 +2043,7 @@ static void ivybridge_update_wm(struct drm_device *dev) /* WM1 */ if (!ironlake_compute_srwm(dev, 1, enabled, - SNB_READ_WM1_LATENCY() * 500, + dev_priv->wm.pri_latency[1] * 500, &sandybridge_display_srwm_info, &sandybridge_cursor_srwm_info, &fbc_wm, &plane_wm, &cursor_wm)) @@ -2040,14 +2051,14 @@ static void ivybridge_update_wm(struct drm_device *dev) I915_WRITE(WM1_LP_ILK, WM1_LP_SR_EN | - (SNB_READ_WM1_LATENCY() << WM1_LP_LATENCY_SHIFT) | + (dev_priv->wm.pri_latency[1] << WM1_LP_LATENCY_SHIFT) | (fbc_wm << WM1_LP_FBC_SHIFT) | (plane_wm << WM1_LP_SR_SHIFT) | cursor_wm); /* WM2 */ if (!ironlake_compute_srwm(dev, 2, enabled, - SNB_READ_WM2_LATENCY() * 500, + dev_priv->wm.pri_latency[2] * 500, &sandybridge_display_srwm_info, &sandybridge_cursor_srwm_info, &fbc_wm, &plane_wm, &cursor_wm)) @@ -2055,19 +2066,19 @@ static void ivybridge_update_wm(struct drm_device *dev) I915_WRITE(WM2_LP_ILK, WM2_LP_EN | - (SNB_READ_WM2_LATENCY() << WM1_LP_LATENCY_SHIFT) | + (dev_priv->wm.pri_latency[2] << WM1_LP_LATENCY_SHIFT) | (fbc_wm << WM1_LP_FBC_SHIFT) | (plane_wm << WM1_LP_SR_SHIFT) | cursor_wm); /* WM3, note we have to correct the cursor latency */ if (!ironlake_compute_srwm(dev, 3, enabled, - SNB_READ_WM3_LATENCY() * 500, + dev_priv->wm.pri_latency[3] * 500, &sandybridge_display_srwm_info, &sandybridge_cursor_srwm_info, &fbc_wm, &plane_wm, &ignore_cursor_wm) || !ironlake_compute_srwm(dev, 3, enabled, - 2 * SNB_READ_WM3_LATENCY() * 500, + dev_priv->wm.cur_latency[3] * 500, &sandybridge_display_srwm_info, &sandybridge_cursor_srwm_info, &ignore_fbc_wm, &ignore_plane_wm, &cursor_wm)) @@ -2075,14 +2086,14 @@ static void ivybridge_update_wm(struct drm_device *dev) I915_WRITE(WM3_LP_ILK, WM3_LP_EN | - (SNB_READ_WM3_LATENCY() << WM1_LP_LATENCY_SHIFT) | + (dev_priv->wm.pri_latency[3] << WM1_LP_LATENCY_SHIFT) | (fbc_wm << WM1_LP_FBC_SHIFT) | (plane_wm << WM1_LP_SR_SHIFT) | cursor_wm); } -static uint32_t hsw_wm_get_pixel_rate(struct drm_device *dev, - struct drm_crtc *crtc) +static uint32_t ilk_pipe_pixel_rate(struct drm_device *dev, + struct drm_crtc *crtc) { struct intel_crtc *intel_crtc = to_intel_crtc(crtc); uint32_t pixel_rate, pfit_size; @@ -2112,30 +2123,38 @@ static uint32_t hsw_wm_get_pixel_rate(struct drm_device *dev, return pixel_rate; } -static uint32_t hsw_wm_method1(uint32_t pixel_rate, uint8_t bytes_per_pixel, +/* latency must be in 0.1us units. */ +static uint32_t ilk_wm_method1(uint32_t pixel_rate, uint8_t bytes_per_pixel, uint32_t latency) { uint64_t ret; + if (WARN(latency == 0, "Latency value missing\n")) + return UINT_MAX; + ret = (uint64_t) pixel_rate * bytes_per_pixel * latency; ret = DIV_ROUND_UP_ULL(ret, 64 * 10000) + 2; return ret; } -static uint32_t hsw_wm_method2(uint32_t pixel_rate, uint32_t pipe_htotal, +/* latency must be in 0.1us units. */ +static uint32_t ilk_wm_method2(uint32_t pixel_rate, uint32_t pipe_htotal, uint32_t horiz_pixels, uint8_t bytes_per_pixel, uint32_t latency) { uint32_t ret; + if (WARN(latency == 0, "Latency value missing\n")) + return UINT_MAX; + ret = (latency * pixel_rate) / (pipe_htotal * 10000); ret = (ret + 1) * horiz_pixels * bytes_per_pixel; ret = DIV_ROUND_UP(ret, 64) + 2; return ret; } -static uint32_t hsw_wm_fbc(uint32_t pri_val, uint32_t horiz_pixels, +static uint32_t ilk_wm_fbc(uint32_t pri_val, uint32_t horiz_pixels, uint8_t bytes_per_pixel) { return DIV_ROUND_UP(pri_val * 64, horiz_pixels * bytes_per_pixel) + 2; @@ -2143,15 +2162,11 @@ static uint32_t hsw_wm_fbc(uint32_t pri_val, uint32_t horiz_pixels, struct hsw_pipe_wm_parameters { bool active; - bool sprite_enabled; - uint8_t pri_bytes_per_pixel; - uint8_t spr_bytes_per_pixel; - uint8_t cur_bytes_per_pixel; - uint32_t pri_horiz_pixels; - uint32_t spr_horiz_pixels; - uint32_t cur_horiz_pixels; uint32_t pipe_htotal; uint32_t pixel_rate; + struct intel_plane_wm_parameters pri; + struct intel_plane_wm_parameters spr; + struct intel_plane_wm_parameters cur; }; struct hsw_wm_maximums { @@ -2161,15 +2176,6 @@ struct hsw_wm_maximums { uint16_t fbc; }; -struct hsw_lp_wm_result { - bool enable; - bool fbc_enable; - uint32_t pri_val; - uint32_t spr_val; - uint32_t cur_val; - uint32_t fbc_val; -}; - struct hsw_wm_values { uint32_t wm_pipe[3]; uint32_t wm_lp[3]; @@ -2178,128 +2184,289 @@ struct hsw_wm_values { bool enable_fbc_wm; }; -enum hsw_data_buf_partitioning { - HSW_DATA_BUF_PART_1_2, - HSW_DATA_BUF_PART_5_6, +/* used in computing the new watermarks state */ +struct intel_wm_config { + unsigned int num_pipes_active; + bool sprites_enabled; + bool sprites_scaled; + bool fbc_wm_enabled; }; -/* For both WM_PIPE and WM_LP. */ -static uint32_t hsw_compute_pri_wm(struct hsw_pipe_wm_parameters *params, +/* + * For both WM_PIPE and WM_LP. + * mem_value must be in 0.1us units. + */ +static uint32_t ilk_compute_pri_wm(struct hsw_pipe_wm_parameters *params, uint32_t mem_value, bool is_lp) { uint32_t method1, method2; - /* TODO: for now, assume the primary plane is always enabled. */ - if (!params->active) + if (!params->active || !params->pri.enabled) return 0; - method1 = hsw_wm_method1(params->pixel_rate, - params->pri_bytes_per_pixel, + method1 = ilk_wm_method1(params->pixel_rate, + params->pri.bytes_per_pixel, mem_value); if (!is_lp) return method1; - method2 = hsw_wm_method2(params->pixel_rate, + method2 = ilk_wm_method2(params->pixel_rate, params->pipe_htotal, - params->pri_horiz_pixels, - params->pri_bytes_per_pixel, + params->pri.horiz_pixels, + params->pri.bytes_per_pixel, mem_value); return min(method1, method2); } -/* For both WM_PIPE and WM_LP. */ -static uint32_t hsw_compute_spr_wm(struct hsw_pipe_wm_parameters *params, +/* + * For both WM_PIPE and WM_LP. + * mem_value must be in 0.1us units. + */ +static uint32_t ilk_compute_spr_wm(struct hsw_pipe_wm_parameters *params, uint32_t mem_value) { uint32_t method1, method2; - if (!params->active || !params->sprite_enabled) + if (!params->active || !params->spr.enabled) return 0; - method1 = hsw_wm_method1(params->pixel_rate, - params->spr_bytes_per_pixel, + method1 = ilk_wm_method1(params->pixel_rate, + params->spr.bytes_per_pixel, mem_value); - method2 = hsw_wm_method2(params->pixel_rate, + method2 = ilk_wm_method2(params->pixel_rate, params->pipe_htotal, - params->spr_horiz_pixels, - params->spr_bytes_per_pixel, + params->spr.horiz_pixels, + params->spr.bytes_per_pixel, mem_value); return min(method1, method2); } -/* For both WM_PIPE and WM_LP. */ -static uint32_t hsw_compute_cur_wm(struct hsw_pipe_wm_parameters *params, +/* + * For both WM_PIPE and WM_LP. + * mem_value must be in 0.1us units. + */ +static uint32_t ilk_compute_cur_wm(struct hsw_pipe_wm_parameters *params, uint32_t mem_value) { - if (!params->active) + if (!params->active || !params->cur.enabled) return 0; - return hsw_wm_method2(params->pixel_rate, + return ilk_wm_method2(params->pixel_rate, params->pipe_htotal, - params->cur_horiz_pixels, - params->cur_bytes_per_pixel, + params->cur.horiz_pixels, + params->cur.bytes_per_pixel, mem_value); } /* Only for WM_LP. */ -static uint32_t hsw_compute_fbc_wm(struct hsw_pipe_wm_parameters *params, - uint32_t pri_val, - uint32_t mem_value) +static uint32_t ilk_compute_fbc_wm(struct hsw_pipe_wm_parameters *params, + uint32_t pri_val) { - if (!params->active) + if (!params->active || !params->pri.enabled) return 0; - return hsw_wm_fbc(pri_val, - params->pri_horiz_pixels, - params->pri_bytes_per_pixel); + return ilk_wm_fbc(pri_val, + params->pri.horiz_pixels, + params->pri.bytes_per_pixel); } -static bool hsw_compute_lp_wm(uint32_t mem_value, struct hsw_wm_maximums *max, - struct hsw_pipe_wm_parameters *params, - struct hsw_lp_wm_result *result) +static unsigned int ilk_display_fifo_size(const struct drm_device *dev) { - enum pipe pipe; - uint32_t pri_val[3], spr_val[3], cur_val[3], fbc_val[3]; + if (INTEL_INFO(dev)->gen >= 7) + return 768; + else + return 512; +} - for (pipe = PIPE_A; pipe <= PIPE_C; pipe++) { - struct hsw_pipe_wm_parameters *p = ¶ms[pipe]; +/* Calculate the maximum primary/sprite plane watermark */ +static unsigned int ilk_plane_wm_max(const struct drm_device *dev, + int level, + const struct intel_wm_config *config, + enum intel_ddb_partitioning ddb_partitioning, + bool is_sprite) +{ + unsigned int fifo_size = ilk_display_fifo_size(dev); + unsigned int max; - pri_val[pipe] = hsw_compute_pri_wm(p, mem_value, true); - spr_val[pipe] = hsw_compute_spr_wm(p, mem_value); - cur_val[pipe] = hsw_compute_cur_wm(p, mem_value); - fbc_val[pipe] = hsw_compute_fbc_wm(p, pri_val[pipe], mem_value); - } + /* if sprites aren't enabled, sprites get nothing */ + if (is_sprite && !config->sprites_enabled) + return 0; - result->pri_val = max3(pri_val[0], pri_val[1], pri_val[2]); - result->spr_val = max3(spr_val[0], spr_val[1], spr_val[2]); - result->cur_val = max3(cur_val[0], cur_val[1], cur_val[2]); - result->fbc_val = max3(fbc_val[0], fbc_val[1], fbc_val[2]); + /* HSW allows LP1+ watermarks even with multiple pipes */ + if (level == 0 || config->num_pipes_active > 1) { + fifo_size /= INTEL_INFO(dev)->num_pipes; - if (result->fbc_val > max->fbc) { - result->fbc_enable = false; - result->fbc_val = 0; - } else { - result->fbc_enable = true; + /* + * For some reason the non self refresh + * FIFO size is only half of the self + * refresh FIFO size on ILK/SNB. + */ + if (INTEL_INFO(dev)->gen <= 6) + fifo_size /= 2; + } + + if (config->sprites_enabled) { + /* level 0 is always calculated with 1:1 split */ + if (level > 0 && ddb_partitioning == INTEL_DDB_PART_5_6) { + if (is_sprite) + fifo_size *= 5; + fifo_size /= 6; + } else { + fifo_size /= 2; + } } + /* clamp to max that the registers can hold */ + if (INTEL_INFO(dev)->gen >= 7) + /* IVB/HSW primary/sprite plane watermarks */ + max = level == 0 ? 127 : 1023; + else if (!is_sprite) + /* ILK/SNB primary plane watermarks */ + max = level == 0 ? 127 : 511; + else + /* ILK/SNB sprite plane watermarks */ + max = level == 0 ? 63 : 255; + + return min(fifo_size, max); +} + +/* Calculate the maximum cursor plane watermark */ +static unsigned int ilk_cursor_wm_max(const struct drm_device *dev, + int level, + const struct intel_wm_config *config) +{ + /* HSW LP1+ watermarks w/ multiple pipes */ + if (level > 0 && config->num_pipes_active > 1) + return 64; + + /* otherwise just report max that registers can hold */ + if (INTEL_INFO(dev)->gen >= 7) + return level == 0 ? 63 : 255; + else + return level == 0 ? 31 : 63; +} + +/* Calculate the maximum FBC watermark */ +static unsigned int ilk_fbc_wm_max(void) +{ + /* max that registers can hold */ + return 15; +} + +static void ilk_wm_max(struct drm_device *dev, + int level, + const struct intel_wm_config *config, + enum intel_ddb_partitioning ddb_partitioning, + struct hsw_wm_maximums *max) +{ + max->pri = ilk_plane_wm_max(dev, level, config, ddb_partitioning, false); + max->spr = ilk_plane_wm_max(dev, level, config, ddb_partitioning, true); + max->cur = ilk_cursor_wm_max(dev, level, config); + max->fbc = ilk_fbc_wm_max(); +} + +static bool ilk_check_wm(int level, + const struct hsw_wm_maximums *max, + struct intel_wm_level *result) +{ + bool ret; + + /* already determined to be invalid? */ + if (!result->enable) + return false; + result->enable = result->pri_val <= max->pri && result->spr_val <= max->spr && result->cur_val <= max->cur; - return result->enable; + + ret = result->enable; + + /* + * HACK until we can pre-compute everything, + * and thus fail gracefully if LP0 watermarks + * are exceeded... + */ + if (level == 0 && !result->enable) { + if (result->pri_val > max->pri) + DRM_DEBUG_KMS("Primary WM%d too large %u (max %u)\n", + level, result->pri_val, max->pri); + if (result->spr_val > max->spr) + DRM_DEBUG_KMS("Sprite WM%d too large %u (max %u)\n", + level, result->spr_val, max->spr); + if (result->cur_val > max->cur) + DRM_DEBUG_KMS("Cursor WM%d too large %u (max %u)\n", + level, result->cur_val, max->cur); + + result->pri_val = min_t(uint32_t, result->pri_val, max->pri); + result->spr_val = min_t(uint32_t, result->spr_val, max->spr); + result->cur_val = min_t(uint32_t, result->cur_val, max->cur); + result->enable = true; + } + + DRM_DEBUG_KMS("WM%d: %sabled\n", level, result->enable ? "en" : "dis"); + + return ret; +} + +static void ilk_compute_wm_level(struct drm_i915_private *dev_priv, + int level, + struct hsw_pipe_wm_parameters *p, + struct intel_wm_level *result) +{ + uint16_t pri_latency = dev_priv->wm.pri_latency[level]; + uint16_t spr_latency = dev_priv->wm.spr_latency[level]; + uint16_t cur_latency = dev_priv->wm.cur_latency[level]; + + /* WM1+ latency values stored in 0.5us units */ + if (level > 0) { + pri_latency *= 5; + spr_latency *= 5; + cur_latency *= 5; + } + + result->pri_val = ilk_compute_pri_wm(p, pri_latency, level); + result->spr_val = ilk_compute_spr_wm(p, spr_latency); + result->cur_val = ilk_compute_cur_wm(p, cur_latency); + result->fbc_val = ilk_compute_fbc_wm(p, result->pri_val); + result->enable = true; +} + +static bool hsw_compute_lp_wm(struct drm_i915_private *dev_priv, + int level, struct hsw_wm_maximums *max, + struct hsw_pipe_wm_parameters *params, + struct intel_wm_level *result) +{ + enum pipe pipe; + struct intel_wm_level res[3]; + + for (pipe = PIPE_A; pipe <= PIPE_C; pipe++) + ilk_compute_wm_level(dev_priv, level, ¶ms[pipe], &res[pipe]); + + result->pri_val = max3(res[0].pri_val, res[1].pri_val, res[2].pri_val); + result->spr_val = max3(res[0].spr_val, res[1].spr_val, res[2].spr_val); + result->cur_val = max3(res[0].cur_val, res[1].cur_val, res[2].cur_val); + result->fbc_val = max3(res[0].fbc_val, res[1].fbc_val, res[2].fbc_val); + result->enable = true; + + return ilk_check_wm(level, max, result); } static uint32_t hsw_compute_wm_pipe(struct drm_i915_private *dev_priv, - uint32_t mem_value, enum pipe pipe, + enum pipe pipe, struct hsw_pipe_wm_parameters *params) { uint32_t pri_val, cur_val, spr_val; + /* WM0 latency values stored in 0.1us units */ + uint16_t pri_latency = dev_priv->wm.pri_latency[0]; + uint16_t spr_latency = dev_priv->wm.spr_latency[0]; + uint16_t cur_latency = dev_priv->wm.cur_latency[0]; - pri_val = hsw_compute_pri_wm(params, mem_value, false); - spr_val = hsw_compute_spr_wm(params, mem_value); - cur_val = hsw_compute_cur_wm(params, mem_value); + pri_val = ilk_compute_pri_wm(params, pri_latency, false); + spr_val = ilk_compute_spr_wm(params, spr_latency); + cur_val = ilk_compute_cur_wm(params, cur_latency); WARN(pri_val > 127, "Primary WM error, mode not supported for pipe %c\n", @@ -2338,27 +2505,116 @@ hsw_compute_linetime_wm(struct drm_device *dev, struct drm_crtc *crtc) PIPE_WM_LINETIME_TIME(linetime); } +static void intel_read_wm_latency(struct drm_device *dev, uint16_t wm[5]) +{ + struct drm_i915_private *dev_priv = dev->dev_private; + + if (IS_HASWELL(dev)) { + uint64_t sskpd = I915_READ64(MCH_SSKPD); + + wm[0] = (sskpd >> 56) & 0xFF; + if (wm[0] == 0) + wm[0] = sskpd & 0xF; + wm[1] = (sskpd >> 4) & 0xFF; + wm[2] = (sskpd >> 12) & 0xFF; + wm[3] = (sskpd >> 20) & 0x1FF; + wm[4] = (sskpd >> 32) & 0x1FF; + } else if (INTEL_INFO(dev)->gen >= 6) { + uint32_t sskpd = I915_READ(MCH_SSKPD); + + wm[0] = (sskpd >> SSKPD_WM0_SHIFT) & SSKPD_WM_MASK; + wm[1] = (sskpd >> SSKPD_WM1_SHIFT) & SSKPD_WM_MASK; + wm[2] = (sskpd >> SSKPD_WM2_SHIFT) & SSKPD_WM_MASK; + wm[3] = (sskpd >> SSKPD_WM3_SHIFT) & SSKPD_WM_MASK; + } else if (INTEL_INFO(dev)->gen >= 5) { + uint32_t mltr = I915_READ(MLTR_ILK); + + /* ILK primary LP0 latency is 700 ns */ + wm[0] = 7; + wm[1] = (mltr >> MLTR_WM1_SHIFT) & ILK_SRLT_MASK; + wm[2] = (mltr >> MLTR_WM2_SHIFT) & ILK_SRLT_MASK; + } +} + +static void intel_fixup_spr_wm_latency(struct drm_device *dev, uint16_t wm[5]) +{ + /* ILK sprite LP0 latency is 1300 ns */ + if (INTEL_INFO(dev)->gen == 5) + wm[0] = 13; +} + +static void intel_fixup_cur_wm_latency(struct drm_device *dev, uint16_t wm[5]) +{ + /* ILK cursor LP0 latency is 1300 ns */ + if (INTEL_INFO(dev)->gen == 5) + wm[0] = 13; + + /* WaDoubleCursorLP3Latency:ivb */ + if (IS_IVYBRIDGE(dev)) + wm[3] *= 2; +} + +static void intel_print_wm_latency(struct drm_device *dev, + const char *name, + const uint16_t wm[5]) +{ + int level, max_level; + + /* how many WM levels are we expecting */ + if (IS_HASWELL(dev)) + max_level = 4; + else if (INTEL_INFO(dev)->gen >= 6) + max_level = 3; + else + max_level = 2; + + for (level = 0; level <= max_level; level++) { + unsigned int latency = wm[level]; + + if (latency == 0) { + DRM_ERROR("%s WM%d latency not provided\n", + name, level); + continue; + } + + /* WM1+ latency values in 0.5us units */ + if (level > 0) + latency *= 5; + + DRM_DEBUG_KMS("%s WM%d latency %u (%u.%u usec)\n", + name, level, wm[level], + latency / 10, latency % 10); + } +} + +static void intel_setup_wm_latency(struct drm_device *dev) +{ + struct drm_i915_private *dev_priv = dev->dev_private; + + intel_read_wm_latency(dev, dev_priv->wm.pri_latency); + + memcpy(dev_priv->wm.spr_latency, dev_priv->wm.pri_latency, + sizeof(dev_priv->wm.pri_latency)); + memcpy(dev_priv->wm.cur_latency, dev_priv->wm.pri_latency, + sizeof(dev_priv->wm.pri_latency)); + + intel_fixup_spr_wm_latency(dev, dev_priv->wm.spr_latency); + intel_fixup_cur_wm_latency(dev, dev_priv->wm.cur_latency); + + intel_print_wm_latency(dev, "Primary", dev_priv->wm.pri_latency); + intel_print_wm_latency(dev, "Sprite", dev_priv->wm.spr_latency); + intel_print_wm_latency(dev, "Cursor", dev_priv->wm.cur_latency); +} + static void hsw_compute_wm_parameters(struct drm_device *dev, struct hsw_pipe_wm_parameters *params, - uint32_t *wm, struct hsw_wm_maximums *lp_max_1_2, struct hsw_wm_maximums *lp_max_5_6) { - struct drm_i915_private *dev_priv = dev->dev_private; struct drm_crtc *crtc; struct drm_plane *plane; - uint64_t sskpd = I915_READ64(MCH_SSKPD); enum pipe pipe; - int pipes_active = 0, sprites_enabled = 0; - - if ((sskpd >> 56) & 0xFF) - wm[0] = (sskpd >> 56) & 0xFF; - else - wm[0] = sskpd & 0xF; - wm[1] = ((sskpd >> 4) & 0xFF) * 5; - wm[2] = ((sskpd >> 12) & 0xFF) * 5; - wm[3] = ((sskpd >> 20) & 0x1FF) * 5; - wm[4] = ((sskpd >> 32) & 0x1FF) * 5; + struct intel_wm_config config = {}; list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { struct intel_crtc *intel_crtc = to_intel_crtc(crtc); @@ -2371,15 +2627,18 @@ static void hsw_compute_wm_parameters(struct drm_device *dev, if (!p->active) continue; - pipes_active++; + config.num_pipes_active++; p->pipe_htotal = intel_crtc->config.adjusted_mode.htotal; - p->pixel_rate = hsw_wm_get_pixel_rate(dev, crtc); - p->pri_bytes_per_pixel = crtc->fb->bits_per_pixel / 8; - p->cur_bytes_per_pixel = 4; - p->pri_horiz_pixels = + p->pixel_rate = ilk_pipe_pixel_rate(dev, crtc); + p->pri.bytes_per_pixel = crtc->fb->bits_per_pixel / 8; + p->cur.bytes_per_pixel = 4; + p->pri.horiz_pixels = intel_crtc->config.requested_mode.hdisplay; - p->cur_horiz_pixels = 64; + p->cur.horiz_pixels = 64; + /* TODO: for now, assume primary and cursor planes are always enabled. */ + p->pri.enabled = true; + p->cur.enabled = true; } list_for_each_entry(plane, &dev->mode_config.plane_list, head) { @@ -2389,59 +2648,53 @@ static void hsw_compute_wm_parameters(struct drm_device *dev, pipe = intel_plane->pipe; p = ¶ms[pipe]; - p->sprite_enabled = intel_plane->wm.enable; - p->spr_bytes_per_pixel = intel_plane->wm.bytes_per_pixel; - p->spr_horiz_pixels = intel_plane->wm.horiz_pixels; + p->spr = intel_plane->wm; - if (p->sprite_enabled) - sprites_enabled++; + config.sprites_enabled |= p->spr.enabled; + config.sprites_scaled |= p->spr.scaled; } - if (pipes_active > 1) { - lp_max_1_2->pri = lp_max_5_6->pri = sprites_enabled ? 128 : 256; - lp_max_1_2->spr = lp_max_5_6->spr = 128; - lp_max_1_2->cur = lp_max_5_6->cur = 64; - } else { - lp_max_1_2->pri = sprites_enabled ? 384 : 768; - lp_max_5_6->pri = sprites_enabled ? 128 : 768; - lp_max_1_2->spr = 384; - lp_max_5_6->spr = 640; - lp_max_1_2->cur = lp_max_5_6->cur = 255; - } - lp_max_1_2->fbc = lp_max_5_6->fbc = 15; + ilk_wm_max(dev, 1, &config, INTEL_DDB_PART_1_2, lp_max_1_2); + + /* 5/6 split only in single pipe config on IVB+ */ + if (INTEL_INFO(dev)->gen >= 7 && config.num_pipes_active <= 1) + ilk_wm_max(dev, 1, &config, INTEL_DDB_PART_5_6, lp_max_5_6); + else + *lp_max_5_6 = *lp_max_1_2; } static void hsw_compute_wm_results(struct drm_device *dev, struct hsw_pipe_wm_parameters *params, - uint32_t *wm, struct hsw_wm_maximums *lp_maximums, struct hsw_wm_values *results) { struct drm_i915_private *dev_priv = dev->dev_private; struct drm_crtc *crtc; - struct hsw_lp_wm_result lp_results[4] = {}; + struct intel_wm_level lp_results[4] = {}; enum pipe pipe; int level, max_level, wm_lp; for (level = 1; level <= 4; level++) - if (!hsw_compute_lp_wm(wm[level], lp_maximums, params, + if (!hsw_compute_lp_wm(dev_priv, level, + lp_maximums, params, &lp_results[level - 1])) break; max_level = level - 1; + memset(results, 0, sizeof(*results)); + /* The spec says it is preferred to disable FBC WMs instead of disabling * a WM level. */ results->enable_fbc_wm = true; for (level = 1; level <= max_level; level++) { - if (!lp_results[level - 1].fbc_enable) { + if (lp_results[level - 1].fbc_val > lp_maximums->fbc) { results->enable_fbc_wm = false; - break; + lp_results[level - 1].fbc_val = 0; } } - memset(results, 0, sizeof(*results)); for (wm_lp = 1; wm_lp <= 3; wm_lp++) { - const struct hsw_lp_wm_result *r; + const struct intel_wm_level *r; level = (max_level == 4 && wm_lp > 1) ? wm_lp + 1 : wm_lp; if (level > max_level) @@ -2456,8 +2709,7 @@ static void hsw_compute_wm_results(struct drm_device *dev, } for_each_pipe(pipe) - results->wm_pipe[pipe] = hsw_compute_wm_pipe(dev_priv, wm[0], - pipe, + results->wm_pipe[pipe] = hsw_compute_wm_pipe(dev_priv, pipe, ¶ms[pipe]); for_each_pipe(pipe) { @@ -2468,8 +2720,8 @@ static void hsw_compute_wm_results(struct drm_device *dev, /* Find the result with the highest level enabled. Check for enable_fbc_wm in * case both are at the same level. Prefer r1 in case they're the same. */ -struct hsw_wm_values *hsw_find_best_result(struct hsw_wm_values *r1, - struct hsw_wm_values *r2) +static struct hsw_wm_values *hsw_find_best_result(struct hsw_wm_values *r1, + struct hsw_wm_values *r2) { int i, val_r1 = 0, val_r2 = 0; @@ -2498,11 +2750,11 @@ struct hsw_wm_values *hsw_find_best_result(struct hsw_wm_values *r1, */ static void hsw_write_wm_values(struct drm_i915_private *dev_priv, struct hsw_wm_values *results, - enum hsw_data_buf_partitioning partitioning) + enum intel_ddb_partitioning partitioning) { struct hsw_wm_values previous; uint32_t val; - enum hsw_data_buf_partitioning prev_partitioning; + enum intel_ddb_partitioning prev_partitioning; bool prev_enable_fbc_wm; previous.wm_pipe[0] = I915_READ(WM0_PIPEA_ILK); @@ -2519,7 +2771,7 @@ static void hsw_write_wm_values(struct drm_i915_private *dev_priv, previous.wm_linetime[2] = I915_READ(PIPE_WM_LINETIME(PIPE_C)); prev_partitioning = (I915_READ(WM_MISC) & WM_MISC_DATA_PARTITION_5_6) ? - HSW_DATA_BUF_PART_5_6 : HSW_DATA_BUF_PART_1_2; + INTEL_DDB_PART_5_6 : INTEL_DDB_PART_1_2; prev_enable_fbc_wm = !(I915_READ(DISP_ARB_CTL) & DISP_FBC_WM_DIS); @@ -2558,7 +2810,7 @@ static void hsw_write_wm_values(struct drm_i915_private *dev_priv, if (prev_partitioning != partitioning) { val = I915_READ(WM_MISC); - if (partitioning == HSW_DATA_BUF_PART_1_2) + if (partitioning == INTEL_DDB_PART_1_2) val &= ~WM_MISC_DATA_PARTITION_5_6; else val |= WM_MISC_DATA_PARTITION_5_6; @@ -2595,44 +2847,39 @@ static void haswell_update_wm(struct drm_device *dev) struct hsw_wm_maximums lp_max_1_2, lp_max_5_6; struct hsw_pipe_wm_parameters params[3]; struct hsw_wm_values results_1_2, results_5_6, *best_results; - uint32_t wm[5]; - enum hsw_data_buf_partitioning partitioning; + enum intel_ddb_partitioning partitioning; - hsw_compute_wm_parameters(dev, params, wm, &lp_max_1_2, &lp_max_5_6); + hsw_compute_wm_parameters(dev, params, &lp_max_1_2, &lp_max_5_6); - hsw_compute_wm_results(dev, params, wm, &lp_max_1_2, &results_1_2); + hsw_compute_wm_results(dev, params, + &lp_max_1_2, &results_1_2); if (lp_max_1_2.pri != lp_max_5_6.pri) { - hsw_compute_wm_results(dev, params, wm, &lp_max_5_6, - &results_5_6); + hsw_compute_wm_results(dev, params, + &lp_max_5_6, &results_5_6); best_results = hsw_find_best_result(&results_1_2, &results_5_6); } else { best_results = &results_1_2; } partitioning = (best_results == &results_1_2) ? - HSW_DATA_BUF_PART_1_2 : HSW_DATA_BUF_PART_5_6; + INTEL_DDB_PART_1_2 : INTEL_DDB_PART_5_6; hsw_write_wm_values(dev_priv, best_results, partitioning); } -static void haswell_update_sprite_wm(struct drm_device *dev, int pipe, +static void haswell_update_sprite_wm(struct drm_plane *plane, + struct drm_crtc *crtc, uint32_t sprite_width, int pixel_size, - bool enable) + bool enabled, bool scaled) { - struct drm_plane *plane; + struct intel_plane *intel_plane = to_intel_plane(plane); - list_for_each_entry(plane, &dev->mode_config.plane_list, head) { - struct intel_plane *intel_plane = to_intel_plane(plane); - - if (intel_plane->pipe == pipe) { - intel_plane->wm.enable = enable; - intel_plane->wm.horiz_pixels = sprite_width + 1; - intel_plane->wm.bytes_per_pixel = pixel_size; - break; - } - } + intel_plane->wm.enabled = enabled; + intel_plane->wm.scaled = scaled; + intel_plane->wm.horiz_pixels = sprite_width; + intel_plane->wm.bytes_per_pixel = pixel_size; - haswell_update_wm(dev); + haswell_update_wm(plane->dev); } static bool @@ -2711,17 +2958,20 @@ sandybridge_compute_sprite_srwm(struct drm_device *dev, int plane, return *sprite_wm > 0x3ff ? false : true; } -static void sandybridge_update_sprite_wm(struct drm_device *dev, int pipe, +static void sandybridge_update_sprite_wm(struct drm_plane *plane, + struct drm_crtc *crtc, uint32_t sprite_width, int pixel_size, - bool enable) + bool enabled, bool scaled) { + struct drm_device *dev = plane->dev; struct drm_i915_private *dev_priv = dev->dev_private; - int latency = SNB_READ_WM0_LATENCY() * 100; /* In unit 0.1us */ + int pipe = to_intel_plane(plane)->pipe; + int latency = dev_priv->wm.spr_latency[0] * 100; /* In unit 0.1us */ u32 val; int sprite_wm, reg; int ret; - if (!enable) + if (!enabled) return; switch (pipe) { @@ -2756,7 +3006,7 @@ static void sandybridge_update_sprite_wm(struct drm_device *dev, int pipe, ret = sandybridge_compute_sprite_srwm(dev, pipe, sprite_width, pixel_size, &sandybridge_display_srwm_info, - SNB_READ_WM1_LATENCY() * 500, + dev_priv->wm.spr_latency[1] * 500, &sprite_wm); if (!ret) { DRM_DEBUG_KMS("failed to compute sprite lp1 wm on pipe %c\n", @@ -2772,7 +3022,7 @@ static void sandybridge_update_sprite_wm(struct drm_device *dev, int pipe, ret = sandybridge_compute_sprite_srwm(dev, pipe, sprite_width, pixel_size, &sandybridge_display_srwm_info, - SNB_READ_WM2_LATENCY() * 500, + dev_priv->wm.spr_latency[2] * 500, &sprite_wm); if (!ret) { DRM_DEBUG_KMS("failed to compute sprite lp2 wm on pipe %c\n", @@ -2784,7 +3034,7 @@ static void sandybridge_update_sprite_wm(struct drm_device *dev, int pipe, ret = sandybridge_compute_sprite_srwm(dev, pipe, sprite_width, pixel_size, &sandybridge_display_srwm_info, - SNB_READ_WM3_LATENCY() * 500, + dev_priv->wm.spr_latency[3] * 500, &sprite_wm); if (!ret) { DRM_DEBUG_KMS("failed to compute sprite lp3 wm on pipe %c\n", @@ -2834,15 +3084,16 @@ void intel_update_watermarks(struct drm_device *dev) dev_priv->display.update_wm(dev); } -void intel_update_sprite_watermarks(struct drm_device *dev, int pipe, +void intel_update_sprite_watermarks(struct drm_plane *plane, + struct drm_crtc *crtc, uint32_t sprite_width, int pixel_size, - bool enable) + bool enabled, bool scaled) { - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = plane->dev->dev_private; if (dev_priv->display.update_sprite_wm) - dev_priv->display.update_sprite_wm(dev, pipe, sprite_width, - pixel_size, enable); + dev_priv->display.update_sprite_wm(plane, crtc, sprite_width, + pixel_size, enabled, scaled); } static struct drm_i915_gem_object * @@ -2859,7 +3110,7 @@ intel_alloc_context_page(struct drm_device *dev) return NULL; } - ret = i915_gem_object_pin(ctx, 4096, true, false); + ret = i915_gem_obj_ggtt_pin(ctx, 4096, true, false); if (ret) { DRM_ERROR("failed to pin power context: %d\n", ret); goto err_unref; @@ -3076,19 +3327,12 @@ void gen6_set_rps(struct drm_device *dev, u8 val) */ static void vlv_update_rps_cur_delay(struct drm_i915_private *dev_priv) { - unsigned long timeout = jiffies + msecs_to_jiffies(10); u32 pval; WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock)); - do { - pval = vlv_punit_read(dev_priv, PUNIT_REG_GPU_FREQ_STS); - if (time_after(jiffies, timeout)) { - DRM_DEBUG_DRIVER("timed out waiting for Punit\n"); - break; - } - udelay(10); - } while (pval & 1); + if (wait_for(((pval = vlv_punit_read(dev_priv, PUNIT_REG_GPU_FREQ_STS)) & GENFREQSTATUS) == 0, 10)) + DRM_DEBUG_DRIVER("timed out waiting for Punit\n"); pval >>= 8; @@ -3129,13 +3373,10 @@ void valleyview_set_rps(struct drm_device *dev, u8 val) trace_intel_gpu_freq_change(vlv_gpu_freq(dev_priv->mem_freq, val)); } - -static void gen6_disable_rps(struct drm_device *dev) +static void gen6_disable_rps_interrupts(struct drm_device *dev) { struct drm_i915_private *dev_priv = dev->dev_private; - I915_WRITE(GEN6_RC_CONTROL, 0); - I915_WRITE(GEN6_RPNSWREQ, 1 << 31); I915_WRITE(GEN6_PMINTRMSK, 0xffffffff); I915_WRITE(GEN6_PMIER, I915_READ(GEN6_PMIER) & ~GEN6_PM_RPS_EVENTS); /* Complete PM interrupt masking here doesn't race with the rps work @@ -3143,30 +3384,30 @@ static void gen6_disable_rps(struct drm_device *dev) * register (PMIMR) to mask PM interrupts. The only risk is in leaving * stale bits in PMIIR and PMIMR which gen6_enable_rps will clean up. */ - spin_lock_irq(&dev_priv->rps.lock); + spin_lock_irq(&dev_priv->irq_lock); dev_priv->rps.pm_iir = 0; - spin_unlock_irq(&dev_priv->rps.lock); + spin_unlock_irq(&dev_priv->irq_lock); I915_WRITE(GEN6_PMIIR, GEN6_PM_RPS_EVENTS); } -static void valleyview_disable_rps(struct drm_device *dev) +static void gen6_disable_rps(struct drm_device *dev) { struct drm_i915_private *dev_priv = dev->dev_private; I915_WRITE(GEN6_RC_CONTROL, 0); - I915_WRITE(GEN6_PMINTRMSK, 0xffffffff); - I915_WRITE(GEN6_PMIER, 0); - /* Complete PM interrupt masking here doesn't race with the rps work - * item again unmasking PM interrupts because that is using a different - * register (PMIMR) to mask PM interrupts. The only risk is in leaving - * stale bits in PMIIR and PMIMR which gen6_enable_rps will clean up. */ + I915_WRITE(GEN6_RPNSWREQ, 1 << 31); - spin_lock_irq(&dev_priv->rps.lock); - dev_priv->rps.pm_iir = 0; - spin_unlock_irq(&dev_priv->rps.lock); + gen6_disable_rps_interrupts(dev); +} - I915_WRITE(GEN6_PMIIR, I915_READ(GEN6_PMIIR)); +static void valleyview_disable_rps(struct drm_device *dev) +{ + struct drm_i915_private *dev_priv = dev->dev_private; + + I915_WRITE(GEN6_RC_CONTROL, 0); + + gen6_disable_rps_interrupts(dev); if (dev_priv->vlv_pctx) { drm_gem_object_unreference(&dev_priv->vlv_pctx->base); @@ -3176,6 +3417,10 @@ static void valleyview_disable_rps(struct drm_device *dev) int intel_enable_rc6(const struct drm_device *dev) { + /* No RC6 before Ironlake */ + if (INTEL_INFO(dev)->gen < 5) + return 0; + /* Respect the kernel parameter if it is set */ if (i915_enable_rc6 >= 0) return i915_enable_rc6; @@ -3199,6 +3444,19 @@ int intel_enable_rc6(const struct drm_device *dev) return (INTEL_RC6_ENABLE | INTEL_RC6p_ENABLE); } +static void gen6_enable_rps_interrupts(struct drm_device *dev) +{ + struct drm_i915_private *dev_priv = dev->dev_private; + + spin_lock_irq(&dev_priv->irq_lock); + WARN_ON(dev_priv->rps.pm_iir); + snb_enable_pm_irq(dev_priv, GEN6_PM_RPS_EVENTS); + I915_WRITE(GEN6_PMIIR, GEN6_PM_RPS_EVENTS); + spin_unlock_irq(&dev_priv->irq_lock); + /* only unmask PM interrupts we need. Mask all others. */ + I915_WRITE(GEN6_PMINTRMSK, ~GEN6_PM_RPS_EVENTS); +} + static void gen6_enable_rps(struct drm_device *dev) { struct drm_i915_private *dev_priv = dev->dev_private; @@ -3250,7 +3508,10 @@ static void gen6_enable_rps(struct drm_device *dev) I915_WRITE(GEN6_RC_SLEEP, 0); I915_WRITE(GEN6_RC1e_THRESHOLD, 1000); - I915_WRITE(GEN6_RC6_THRESHOLD, 50000); + if (INTEL_INFO(dev)->gen <= 6 || IS_IVYBRIDGE(dev)) + I915_WRITE(GEN6_RC6_THRESHOLD, 125000); + else + I915_WRITE(GEN6_RC6_THRESHOLD, 50000); I915_WRITE(GEN6_RC6p_THRESHOLD, 150000); I915_WRITE(GEN6_RC6pp_THRESHOLD, 64000); /* unused */ @@ -3327,17 +3588,7 @@ static void gen6_enable_rps(struct drm_device *dev) gen6_set_rps(dev_priv->dev, (gt_perf_status & 0xff00) >> 8); - /* requires MSI enabled */ - I915_WRITE(GEN6_PMIER, I915_READ(GEN6_PMIER) | GEN6_PM_RPS_EVENTS); - spin_lock_irq(&dev_priv->rps.lock); - /* FIXME: Our interrupt enabling sequence is bonghits. - * dev_priv->rps.pm_iir really should be 0 here. */ - dev_priv->rps.pm_iir = 0; - I915_WRITE(GEN6_PMIMR, I915_READ(GEN6_PMIMR) & ~GEN6_PM_RPS_EVENTS); - I915_WRITE(GEN6_PMIIR, GEN6_PM_RPS_EVENTS); - spin_unlock_irq(&dev_priv->rps.lock); - /* unmask all PM interrupts */ - I915_WRITE(GEN6_PMINTRMSK, 0); + gen6_enable_rps_interrupts(dev); rc6vids = 0; ret = sandybridge_pcode_read(dev_priv, GEN6_PCODE_READ_RC6VIDS, &rc6vids); @@ -3356,7 +3607,7 @@ static void gen6_enable_rps(struct drm_device *dev) gen6_gt_force_wake_put(dev_priv); } -static void gen6_update_ring_freq(struct drm_device *dev) +void gen6_update_ring_freq(struct drm_device *dev) { struct drm_i915_private *dev_priv = dev->dev_private; int min_freq = 15; @@ -3482,7 +3733,7 @@ static void valleyview_setup_pctx(struct drm_device *dev) pcbr_offset = (pcbr & (~4095)) - dev_priv->mm.stolen_base; pctx = i915_gem_object_create_stolen_for_preallocated(dev_priv->dev, pcbr_offset, - -1, + I915_GTT_OFFSET_NONE, pctx_size); goto out; } @@ -3607,14 +3858,7 @@ static void valleyview_enable_rps(struct drm_device *dev) valleyview_set_rps(dev_priv->dev, dev_priv->rps.rpe_delay); - /* requires MSI enabled */ - I915_WRITE(GEN6_PMIER, GEN6_PM_RPS_EVENTS); - spin_lock_irq(&dev_priv->rps.lock); - WARN_ON(dev_priv->rps.pm_iir != 0); - I915_WRITE(GEN6_PMIMR, 0); - spin_unlock_irq(&dev_priv->rps.lock); - /* enable all PM interrupts */ - I915_WRITE(GEN6_PMINTRMSK, 0); + gen6_enable_rps_interrupts(dev); gen6_gt_force_wake_put(dev_priv); } @@ -3708,7 +3952,7 @@ static void ironlake_enable_rc6(struct drm_device *dev) intel_ring_emit(ring, MI_SUSPEND_FLUSH | MI_SUSPEND_FLUSH_EN); intel_ring_emit(ring, MI_SET_CONTEXT); - intel_ring_emit(ring, dev_priv->ips.renderctx->gtt_offset | + intel_ring_emit(ring, i915_gem_obj_ggtt_offset(dev_priv->ips.renderctx) | MI_MM_SPACE_GTT | MI_SAVE_EXT_STATE_EN | MI_RESTORE_EXT_STATE_EN | @@ -3731,7 +3975,7 @@ static void ironlake_enable_rc6(struct drm_device *dev) return; } - I915_WRITE(PWRCTXA, dev_priv->ips.pwrctx->gtt_offset | PWRCTX_EN); + I915_WRITE(PWRCTXA, i915_gem_obj_ggtt_offset(dev_priv->ips.pwrctx) | PWRCTX_EN); I915_WRITE(RSTDBYCTL, I915_READ(RSTDBYCTL) & ~RCX_SW_EXIT); } @@ -4429,7 +4673,10 @@ static void ironlake_init_clock_gating(struct drm_device *dev) struct drm_i915_private *dev_priv = dev->dev_private; uint32_t dspclk_gate = ILK_VRHUNIT_CLOCK_GATE_DISABLE; - /* Required for FBC */ + /* + * Required for FBC + * WaFbcDisableDpfcClockGating:ilk + */ dspclk_gate |= ILK_DPFCRUNIT_CLOCK_GATE_DISABLE | ILK_DPFCUNIT_CLOCK_GATE_DISABLE | ILK_DPFDUNIT_CLOCK_GATE_ENABLE; @@ -4466,6 +4713,7 @@ static void ironlake_init_clock_gating(struct drm_device *dev) * The bit 7,8,9 of 0x42020. */ if (IS_IRONLAKE_M(dev)) { + /* WaFbcAsynchFlipDisableFbcQueue:ilk */ I915_WRITE(ILK_DISPLAY_CHICKEN1, I915_READ(ILK_DISPLAY_CHICKEN1) | ILK_FBCQ_DIS); @@ -4602,6 +4850,8 @@ static void gen6_init_clock_gating(struct drm_device *dev) * The bit5 and bit7 of 0x42020 * The bit14 of 0x70180 * The bit14 of 0x71180 + * + * WaFbcAsynchFlipDisableFbcQueue:snb */ I915_WRITE(ILK_DISPLAY_CHICKEN1, I915_READ(ILK_DISPLAY_CHICKEN1) | @@ -4614,10 +4864,6 @@ static void gen6_init_clock_gating(struct drm_device *dev) ILK_DPARBUNIT_CLOCK_GATE_ENABLE | ILK_DPFDUNIT_CLOCK_GATE_ENABLE); - /* WaMbcDriverBootEnable:snb */ - I915_WRITE(GEN6_MBCTL, I915_READ(GEN6_MBCTL) | - GEN6_MBCTL_ENABLE_BOOT_FETCH); - g4x_disable_trickle_feed(dev); /* The default value should be 0x200 according to docs, but the two @@ -4713,10 +4959,6 @@ static void haswell_init_clock_gating(struct drm_device *dev) I915_WRITE(CACHE_MODE_1, _MASKED_BIT_ENABLE(PIXEL_SUBSPAN_COLLECT_OPT_DISABLE)); - /* WaMbcDriverBootEnable:hsw */ - I915_WRITE(GEN6_MBCTL, I915_READ(GEN6_MBCTL) | - GEN6_MBCTL_ENABLE_BOOT_FETCH); - /* WaSwitchSolVfFArbitrationPriority:hsw */ I915_WRITE(GAM_ECOCHK, I915_READ(GAM_ECOCHK) | HSW_ECOCHK_ARB_PRIO_SOL); @@ -4800,10 +5042,6 @@ static void ivybridge_init_clock_gating(struct drm_device *dev) g4x_disable_trickle_feed(dev); - /* WaMbcDriverBootEnable:ivb */ - I915_WRITE(GEN6_MBCTL, I915_READ(GEN6_MBCTL) | - GEN6_MBCTL_ENABLE_BOOT_FETCH); - /* WaVSRefCountFullforceMissDisable:ivb */ gen7_setup_fixed_func_scheduler(dev_priv); @@ -4863,11 +5101,6 @@ static void valleyview_init_clock_gating(struct drm_device *dev) I915_READ(GEN7_SQ_CHICKEN_MBCUNIT_CONFIG) | GEN7_SQ_CHICKEN_MBCUNIT_SQINTMOB); - /* WaMbcDriverBootEnable:vlv */ - I915_WRITE(GEN6_MBCTL, I915_READ(GEN6_MBCTL) | - GEN6_MBCTL_ENABLE_BOOT_FETCH); - - /* According to the BSpec vol1g, bit 12 (RCPBUNIT) clock * gating disable must be set. Failure to set it results in * flickering pixels due to Z write ordering failures after @@ -5035,7 +5268,7 @@ bool intel_display_power_enabled(struct drm_device *dev, case POWER_DOMAIN_TRANSCODER_B: case POWER_DOMAIN_TRANSCODER_C: return I915_READ(HSW_PWR_WELL_DRIVER) == - (HSW_PWR_WELL_ENABLE | HSW_PWR_WELL_STATE); + (HSW_PWR_WELL_ENABLE_REQUEST | HSW_PWR_WELL_STATE_ENABLED); default: BUG(); } @@ -5048,17 +5281,18 @@ static void __intel_set_power_well(struct drm_device *dev, bool enable) uint32_t tmp; tmp = I915_READ(HSW_PWR_WELL_DRIVER); - is_enabled = tmp & HSW_PWR_WELL_STATE; - enable_requested = tmp & HSW_PWR_WELL_ENABLE; + is_enabled = tmp & HSW_PWR_WELL_STATE_ENABLED; + enable_requested = tmp & HSW_PWR_WELL_ENABLE_REQUEST; if (enable) { if (!enable_requested) - I915_WRITE(HSW_PWR_WELL_DRIVER, HSW_PWR_WELL_ENABLE); + I915_WRITE(HSW_PWR_WELL_DRIVER, + HSW_PWR_WELL_ENABLE_REQUEST); if (!is_enabled) { DRM_DEBUG_KMS("Enabling power well\n"); if (wait_for((I915_READ(HSW_PWR_WELL_DRIVER) & - HSW_PWR_WELL_STATE), 20)) + HSW_PWR_WELL_STATE_ENABLED), 20)) DRM_ERROR("Timeout enabling power well\n"); } } else { @@ -5178,10 +5412,21 @@ void intel_init_power_well(struct drm_device *dev) /* We're taking over the BIOS, so clear any requests made by it since * the driver is in charge now. */ - if (I915_READ(HSW_PWR_WELL_BIOS) & HSW_PWR_WELL_ENABLE) + if (I915_READ(HSW_PWR_WELL_BIOS) & HSW_PWR_WELL_ENABLE_REQUEST) I915_WRITE(HSW_PWR_WELL_BIOS, 0); } +/* Disables PC8 so we can use the GMBUS and DP AUX interrupts. */ +void intel_aux_display_runtime_get(struct drm_i915_private *dev_priv) +{ + hsw_disable_package_c8(dev_priv); +} + +void intel_aux_display_runtime_put(struct drm_i915_private *dev_priv) +{ + hsw_enable_package_c8(dev_priv); +} + /* Set up chip specific power management-related functions */ void intel_init_pm(struct drm_device *dev) { @@ -5217,8 +5462,12 @@ void intel_init_pm(struct drm_device *dev) /* For FIFO watermark updates */ if (HAS_PCH_SPLIT(dev)) { + intel_setup_wm_latency(dev); + if (IS_GEN5(dev)) { - if (I915_READ(MLTR_ILK) & ILK_SRLT_MASK) + if (dev_priv->wm.pri_latency[1] && + dev_priv->wm.spr_latency[1] && + dev_priv->wm.cur_latency[1]) dev_priv->display.update_wm = ironlake_update_wm; else { DRM_DEBUG_KMS("Failed to get proper latency. " @@ -5227,7 +5476,9 @@ void intel_init_pm(struct drm_device *dev) } dev_priv->display.init_clock_gating = ironlake_init_clock_gating; } else if (IS_GEN6(dev)) { - if (SNB_READ_WM0_LATENCY()) { + if (dev_priv->wm.pri_latency[0] && + dev_priv->wm.spr_latency[0] && + dev_priv->wm.cur_latency[0]) { dev_priv->display.update_wm = sandybridge_update_wm; dev_priv->display.update_sprite_wm = sandybridge_update_sprite_wm; } else { @@ -5237,7 +5488,9 @@ void intel_init_pm(struct drm_device *dev) } dev_priv->display.init_clock_gating = gen6_init_clock_gating; } else if (IS_IVYBRIDGE(dev)) { - if (SNB_READ_WM0_LATENCY()) { + if (dev_priv->wm.pri_latency[0] && + dev_priv->wm.spr_latency[0] && + dev_priv->wm.cur_latency[0]) { dev_priv->display.update_wm = ivybridge_update_wm; dev_priv->display.update_sprite_wm = sandybridge_update_sprite_wm; } else { @@ -5247,7 +5500,9 @@ void intel_init_pm(struct drm_device *dev) } dev_priv->display.init_clock_gating = ivybridge_init_clock_gating; } else if (IS_HASWELL(dev)) { - if (I915_READ64(MCH_SSKPD)) { + if (dev_priv->wm.pri_latency[0] && + dev_priv->wm.spr_latency[0] && + dev_priv->wm.cur_latency[0]) { dev_priv->display.update_wm = haswell_update_wm; dev_priv->display.update_sprite_wm = haswell_update_sprite_wm; @@ -5310,260 +5565,6 @@ void intel_init_pm(struct drm_device *dev) } } -static void __gen6_gt_wait_for_thread_c0(struct drm_i915_private *dev_priv) -{ - u32 gt_thread_status_mask; - - if (IS_HASWELL(dev_priv->dev)) - gt_thread_status_mask = GEN6_GT_THREAD_STATUS_CORE_MASK_HSW; - else - gt_thread_status_mask = GEN6_GT_THREAD_STATUS_CORE_MASK; - - /* w/a for a sporadic read returning 0 by waiting for the GT - * thread to wake up. - */ - if (wait_for_atomic_us((I915_READ_NOTRACE(GEN6_GT_THREAD_STATUS_REG) & gt_thread_status_mask) == 0, 500)) - DRM_ERROR("GT thread status wait timed out\n"); -} - -static void __gen6_gt_force_wake_reset(struct drm_i915_private *dev_priv) -{ - I915_WRITE_NOTRACE(FORCEWAKE, 0); - POSTING_READ(ECOBUS); /* something from same cacheline, but !FORCEWAKE */ -} - -static void __gen6_gt_force_wake_get(struct drm_i915_private *dev_priv) -{ - if (wait_for_atomic((I915_READ_NOTRACE(FORCEWAKE_ACK) & 1) == 0, - FORCEWAKE_ACK_TIMEOUT_MS)) - DRM_ERROR("Timed out waiting for forcewake old ack to clear.\n"); - - I915_WRITE_NOTRACE(FORCEWAKE, 1); - POSTING_READ(ECOBUS); /* something from same cacheline, but !FORCEWAKE */ - - if (wait_for_atomic((I915_READ_NOTRACE(FORCEWAKE_ACK) & 1), - FORCEWAKE_ACK_TIMEOUT_MS)) - DRM_ERROR("Timed out waiting for forcewake to ack request.\n"); - - /* WaRsForcewakeWaitTC0:snb */ - __gen6_gt_wait_for_thread_c0(dev_priv); -} - -static void __gen6_gt_force_wake_mt_reset(struct drm_i915_private *dev_priv) -{ - I915_WRITE_NOTRACE(FORCEWAKE_MT, _MASKED_BIT_DISABLE(0xffff)); - /* something from same cacheline, but !FORCEWAKE_MT */ - POSTING_READ(ECOBUS); -} - -static void __gen6_gt_force_wake_mt_get(struct drm_i915_private *dev_priv) -{ - u32 forcewake_ack; - - if (IS_HASWELL(dev_priv->dev)) - forcewake_ack = FORCEWAKE_ACK_HSW; - else - forcewake_ack = FORCEWAKE_MT_ACK; - - if (wait_for_atomic((I915_READ_NOTRACE(forcewake_ack) & FORCEWAKE_KERNEL) == 0, - FORCEWAKE_ACK_TIMEOUT_MS)) - DRM_ERROR("Timed out waiting for forcewake old ack to clear.\n"); - - I915_WRITE_NOTRACE(FORCEWAKE_MT, _MASKED_BIT_ENABLE(FORCEWAKE_KERNEL)); - /* something from same cacheline, but !FORCEWAKE_MT */ - POSTING_READ(ECOBUS); - - if (wait_for_atomic((I915_READ_NOTRACE(forcewake_ack) & FORCEWAKE_KERNEL), - FORCEWAKE_ACK_TIMEOUT_MS)) - DRM_ERROR("Timed out waiting for forcewake to ack request.\n"); - - /* WaRsForcewakeWaitTC0:ivb,hsw */ - __gen6_gt_wait_for_thread_c0(dev_priv); -} - -/* - * Generally this is called implicitly by the register read function. However, - * if some sequence requires the GT to not power down then this function should - * be called at the beginning of the sequence followed by a call to - * gen6_gt_force_wake_put() at the end of the sequence. - */ -void gen6_gt_force_wake_get(struct drm_i915_private *dev_priv) -{ - unsigned long irqflags; - - spin_lock_irqsave(&dev_priv->gt_lock, irqflags); - if (dev_priv->forcewake_count++ == 0) - dev_priv->gt.force_wake_get(dev_priv); - spin_unlock_irqrestore(&dev_priv->gt_lock, irqflags); -} - -void gen6_gt_check_fifodbg(struct drm_i915_private *dev_priv) -{ - u32 gtfifodbg; - gtfifodbg = I915_READ_NOTRACE(GTFIFODBG); - if (WARN(gtfifodbg & GT_FIFO_CPU_ERROR_MASK, - "MMIO read or write has been dropped %x\n", gtfifodbg)) - I915_WRITE_NOTRACE(GTFIFODBG, GT_FIFO_CPU_ERROR_MASK); -} - -static void __gen6_gt_force_wake_put(struct drm_i915_private *dev_priv) -{ - I915_WRITE_NOTRACE(FORCEWAKE, 0); - /* something from same cacheline, but !FORCEWAKE */ - POSTING_READ(ECOBUS); - gen6_gt_check_fifodbg(dev_priv); -} - -static void __gen6_gt_force_wake_mt_put(struct drm_i915_private *dev_priv) -{ - I915_WRITE_NOTRACE(FORCEWAKE_MT, _MASKED_BIT_DISABLE(FORCEWAKE_KERNEL)); - /* something from same cacheline, but !FORCEWAKE_MT */ - POSTING_READ(ECOBUS); - gen6_gt_check_fifodbg(dev_priv); -} - -/* - * see gen6_gt_force_wake_get() - */ -void gen6_gt_force_wake_put(struct drm_i915_private *dev_priv) -{ - unsigned long irqflags; - - spin_lock_irqsave(&dev_priv->gt_lock, irqflags); - if (--dev_priv->forcewake_count == 0) - dev_priv->gt.force_wake_put(dev_priv); - spin_unlock_irqrestore(&dev_priv->gt_lock, irqflags); -} - -int __gen6_gt_wait_for_fifo(struct drm_i915_private *dev_priv) -{ - int ret = 0; - - if (dev_priv->gt_fifo_count < GT_FIFO_NUM_RESERVED_ENTRIES) { - int loop = 500; - u32 fifo = I915_READ_NOTRACE(GT_FIFO_FREE_ENTRIES); - while (fifo <= GT_FIFO_NUM_RESERVED_ENTRIES && loop--) { - udelay(10); - fifo = I915_READ_NOTRACE(GT_FIFO_FREE_ENTRIES); - } - if (WARN_ON(loop < 0 && fifo <= GT_FIFO_NUM_RESERVED_ENTRIES)) - ++ret; - dev_priv->gt_fifo_count = fifo; - } - dev_priv->gt_fifo_count--; - - return ret; -} - -static void vlv_force_wake_reset(struct drm_i915_private *dev_priv) -{ - I915_WRITE_NOTRACE(FORCEWAKE_VLV, _MASKED_BIT_DISABLE(0xffff)); - /* something from same cacheline, but !FORCEWAKE_VLV */ - POSTING_READ(FORCEWAKE_ACK_VLV); -} - -static void vlv_force_wake_get(struct drm_i915_private *dev_priv) -{ - if (wait_for_atomic((I915_READ_NOTRACE(FORCEWAKE_ACK_VLV) & FORCEWAKE_KERNEL) == 0, - FORCEWAKE_ACK_TIMEOUT_MS)) - DRM_ERROR("Timed out waiting for forcewake old ack to clear.\n"); - - I915_WRITE_NOTRACE(FORCEWAKE_VLV, _MASKED_BIT_ENABLE(FORCEWAKE_KERNEL)); - I915_WRITE_NOTRACE(FORCEWAKE_MEDIA_VLV, - _MASKED_BIT_ENABLE(FORCEWAKE_KERNEL)); - - if (wait_for_atomic((I915_READ_NOTRACE(FORCEWAKE_ACK_VLV) & FORCEWAKE_KERNEL), - FORCEWAKE_ACK_TIMEOUT_MS)) - DRM_ERROR("Timed out waiting for GT to ack forcewake request.\n"); - - if (wait_for_atomic((I915_READ_NOTRACE(FORCEWAKE_ACK_MEDIA_VLV) & - FORCEWAKE_KERNEL), - FORCEWAKE_ACK_TIMEOUT_MS)) - DRM_ERROR("Timed out waiting for media to ack forcewake request.\n"); - - /* WaRsForcewakeWaitTC0:vlv */ - __gen6_gt_wait_for_thread_c0(dev_priv); -} - -static void vlv_force_wake_put(struct drm_i915_private *dev_priv) -{ - I915_WRITE_NOTRACE(FORCEWAKE_VLV, _MASKED_BIT_DISABLE(FORCEWAKE_KERNEL)); - I915_WRITE_NOTRACE(FORCEWAKE_MEDIA_VLV, - _MASKED_BIT_DISABLE(FORCEWAKE_KERNEL)); - /* The below doubles as a POSTING_READ */ - gen6_gt_check_fifodbg(dev_priv); -} - -void intel_gt_sanitize(struct drm_device *dev) -{ - struct drm_i915_private *dev_priv = dev->dev_private; - - if (IS_VALLEYVIEW(dev)) { - vlv_force_wake_reset(dev_priv); - } else if (INTEL_INFO(dev)->gen >= 6) { - __gen6_gt_force_wake_reset(dev_priv); - if (IS_IVYBRIDGE(dev) || IS_HASWELL(dev)) - __gen6_gt_force_wake_mt_reset(dev_priv); - } - - /* BIOS often leaves RC6 enabled, but disable it for hw init */ - if (INTEL_INFO(dev)->gen >= 6) - intel_disable_gt_powersave(dev); -} - -void intel_gt_init(struct drm_device *dev) -{ - struct drm_i915_private *dev_priv = dev->dev_private; - - if (IS_VALLEYVIEW(dev)) { - dev_priv->gt.force_wake_get = vlv_force_wake_get; - dev_priv->gt.force_wake_put = vlv_force_wake_put; - } else if (IS_HASWELL(dev)) { - dev_priv->gt.force_wake_get = __gen6_gt_force_wake_mt_get; - dev_priv->gt.force_wake_put = __gen6_gt_force_wake_mt_put; - } else if (IS_IVYBRIDGE(dev)) { - u32 ecobus; - - /* IVB configs may use multi-threaded forcewake */ - - /* A small trick here - if the bios hasn't configured - * MT forcewake, and if the device is in RC6, then - * force_wake_mt_get will not wake the device and the - * ECOBUS read will return zero. Which will be - * (correctly) interpreted by the test below as MT - * forcewake being disabled. - */ - mutex_lock(&dev->struct_mutex); - __gen6_gt_force_wake_mt_get(dev_priv); - ecobus = I915_READ_NOTRACE(ECOBUS); - __gen6_gt_force_wake_mt_put(dev_priv); - mutex_unlock(&dev->struct_mutex); - - if (ecobus & FORCEWAKE_MT_ENABLE) { - dev_priv->gt.force_wake_get = - __gen6_gt_force_wake_mt_get; - dev_priv->gt.force_wake_put = - __gen6_gt_force_wake_mt_put; - } else { - DRM_INFO("No MT forcewake available on Ivybridge, this can result in issues\n"); - DRM_INFO("when using vblank-synced partial screen updates.\n"); - dev_priv->gt.force_wake_get = __gen6_gt_force_wake_get; - dev_priv->gt.force_wake_put = __gen6_gt_force_wake_put; - } - } else if (IS_GEN6(dev)) { - dev_priv->gt.force_wake_get = __gen6_gt_force_wake_get; - dev_priv->gt.force_wake_put = __gen6_gt_force_wake_put; - } -} - -void intel_pm_init(struct drm_device *dev) -{ - struct drm_i915_private *dev_priv = dev->dev_private; - - INIT_DELAYED_WORK(&dev_priv->rps.delayed_resume_work, - intel_gen6_powersave_work); -} - int sandybridge_pcode_read(struct drm_i915_private *dev_priv, u8 mbox, u32 *val) { WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock)); @@ -5666,3 +5667,11 @@ int vlv_freq_opcode(int ddr_freq, int val) return val; } +void intel_pm_init(struct drm_device *dev) +{ + struct drm_i915_private *dev_priv = dev->dev_private; + + INIT_DELAYED_WORK(&dev_priv->rps.delayed_resume_work, + intel_gen6_powersave_work); +} + diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c index 079ef0129e74..f05cceac5a52 100644 --- a/drivers/gpu/drm/i915/intel_ringbuffer.c +++ b/drivers/gpu/drm/i915/intel_ringbuffer.c @@ -440,14 +440,14 @@ static int init_ring_common(struct intel_ring_buffer *ring) * registers with the above sequence (the readback of the HEAD registers * also enforces ordering), otherwise the hw might lose the new ring * register values. */ - I915_WRITE_START(ring, obj->gtt_offset); + I915_WRITE_START(ring, i915_gem_obj_ggtt_offset(obj)); I915_WRITE_CTL(ring, ((ring->size - PAGE_SIZE) & RING_NR_PAGES) | RING_VALID); /* If the head is still not zero, the ring is dead */ if (wait_for((I915_READ_CTL(ring) & RING_VALID) != 0 && - I915_READ_START(ring) == obj->gtt_offset && + I915_READ_START(ring) == i915_gem_obj_ggtt_offset(obj) && (I915_READ_HEAD(ring) & HEAD_ADDR) == 0, 50)) { DRM_ERROR("%s initialization failed " "ctl %08x head %08x tail %08x start %08x\n", @@ -501,11 +501,11 @@ init_pipe_control(struct intel_ring_buffer *ring) i915_gem_object_set_cache_level(obj, I915_CACHE_LLC); - ret = i915_gem_object_pin(obj, 4096, true, false); + ret = i915_gem_obj_ggtt_pin(obj, 4096, true, false); if (ret) goto err_unref; - pc->gtt_offset = obj->gtt_offset; + pc->gtt_offset = i915_gem_obj_ggtt_offset(obj); pc->cpu_page = kmap(sg_page(obj->pages->sgl)); if (pc->cpu_page == NULL) { ret = -ENOMEM; @@ -836,11 +836,8 @@ gen5_ring_get_irq(struct intel_ring_buffer *ring) return false; spin_lock_irqsave(&dev_priv->irq_lock, flags); - if (ring->irq_refcount.gt++ == 0) { - dev_priv->gt_irq_mask &= ~ring->irq_enable_mask; - I915_WRITE(GTIMR, dev_priv->gt_irq_mask); - POSTING_READ(GTIMR); - } + if (ring->irq_refcount++ == 0) + ilk_enable_gt_irq(dev_priv, ring->irq_enable_mask); spin_unlock_irqrestore(&dev_priv->irq_lock, flags); return true; @@ -854,11 +851,8 @@ gen5_ring_put_irq(struct intel_ring_buffer *ring) unsigned long flags; spin_lock_irqsave(&dev_priv->irq_lock, flags); - if (--ring->irq_refcount.gt == 0) { - dev_priv->gt_irq_mask |= ring->irq_enable_mask; - I915_WRITE(GTIMR, dev_priv->gt_irq_mask); - POSTING_READ(GTIMR); - } + if (--ring->irq_refcount == 0) + ilk_disable_gt_irq(dev_priv, ring->irq_enable_mask); spin_unlock_irqrestore(&dev_priv->irq_lock, flags); } @@ -873,7 +867,7 @@ i9xx_ring_get_irq(struct intel_ring_buffer *ring) return false; spin_lock_irqsave(&dev_priv->irq_lock, flags); - if (ring->irq_refcount.gt++ == 0) { + if (ring->irq_refcount++ == 0) { dev_priv->irq_mask &= ~ring->irq_enable_mask; I915_WRITE(IMR, dev_priv->irq_mask); POSTING_READ(IMR); @@ -891,7 +885,7 @@ i9xx_ring_put_irq(struct intel_ring_buffer *ring) unsigned long flags; spin_lock_irqsave(&dev_priv->irq_lock, flags); - if (--ring->irq_refcount.gt == 0) { + if (--ring->irq_refcount == 0) { dev_priv->irq_mask |= ring->irq_enable_mask; I915_WRITE(IMR, dev_priv->irq_mask); POSTING_READ(IMR); @@ -910,7 +904,7 @@ i8xx_ring_get_irq(struct intel_ring_buffer *ring) return false; spin_lock_irqsave(&dev_priv->irq_lock, flags); - if (ring->irq_refcount.gt++ == 0) { + if (ring->irq_refcount++ == 0) { dev_priv->irq_mask &= ~ring->irq_enable_mask; I915_WRITE16(IMR, dev_priv->irq_mask); POSTING_READ16(IMR); @@ -928,7 +922,7 @@ i8xx_ring_put_irq(struct intel_ring_buffer *ring) unsigned long flags; spin_lock_irqsave(&dev_priv->irq_lock, flags); - if (--ring->irq_refcount.gt == 0) { + if (--ring->irq_refcount == 0) { dev_priv->irq_mask |= ring->irq_enable_mask; I915_WRITE16(IMR, dev_priv->irq_mask); POSTING_READ16(IMR); @@ -1033,16 +1027,14 @@ gen6_ring_get_irq(struct intel_ring_buffer *ring) gen6_gt_force_wake_get(dev_priv); spin_lock_irqsave(&dev_priv->irq_lock, flags); - if (ring->irq_refcount.gt++ == 0) { + if (ring->irq_refcount++ == 0) { if (HAS_L3_GPU_CACHE(dev) && ring->id == RCS) I915_WRITE_IMR(ring, ~(ring->irq_enable_mask | GT_RENDER_L3_PARITY_ERROR_INTERRUPT)); else I915_WRITE_IMR(ring, ~ring->irq_enable_mask); - dev_priv->gt_irq_mask &= ~ring->irq_enable_mask; - I915_WRITE(GTIMR, dev_priv->gt_irq_mask); - POSTING_READ(GTIMR); + ilk_enable_gt_irq(dev_priv, ring->irq_enable_mask); } spin_unlock_irqrestore(&dev_priv->irq_lock, flags); @@ -1057,15 +1049,13 @@ gen6_ring_put_irq(struct intel_ring_buffer *ring) unsigned long flags; spin_lock_irqsave(&dev_priv->irq_lock, flags); - if (--ring->irq_refcount.gt == 0) { + if (--ring->irq_refcount == 0) { if (HAS_L3_GPU_CACHE(dev) && ring->id == RCS) I915_WRITE_IMR(ring, ~GT_RENDER_L3_PARITY_ERROR_INTERRUPT); else I915_WRITE_IMR(ring, ~0); - dev_priv->gt_irq_mask |= ring->irq_enable_mask; - I915_WRITE(GTIMR, dev_priv->gt_irq_mask); - POSTING_READ(GTIMR); + ilk_disable_gt_irq(dev_priv, ring->irq_enable_mask); } spin_unlock_irqrestore(&dev_priv->irq_lock, flags); @@ -1082,14 +1072,12 @@ hsw_vebox_get_irq(struct intel_ring_buffer *ring) if (!dev->irq_enabled) return false; - spin_lock_irqsave(&dev_priv->rps.lock, flags); - if (ring->irq_refcount.pm++ == 0) { - u32 pm_imr = I915_READ(GEN6_PMIMR); + spin_lock_irqsave(&dev_priv->irq_lock, flags); + if (ring->irq_refcount++ == 0) { I915_WRITE_IMR(ring, ~ring->irq_enable_mask); - I915_WRITE(GEN6_PMIMR, pm_imr & ~ring->irq_enable_mask); - POSTING_READ(GEN6_PMIMR); + snb_enable_pm_irq(dev_priv, ring->irq_enable_mask); } - spin_unlock_irqrestore(&dev_priv->rps.lock, flags); + spin_unlock_irqrestore(&dev_priv->irq_lock, flags); return true; } @@ -1104,14 +1092,12 @@ hsw_vebox_put_irq(struct intel_ring_buffer *ring) if (!dev->irq_enabled) return; - spin_lock_irqsave(&dev_priv->rps.lock, flags); - if (--ring->irq_refcount.pm == 0) { - u32 pm_imr = I915_READ(GEN6_PMIMR); + spin_lock_irqsave(&dev_priv->irq_lock, flags); + if (--ring->irq_refcount == 0) { I915_WRITE_IMR(ring, ~0); - I915_WRITE(GEN6_PMIMR, pm_imr | ring->irq_enable_mask); - POSTING_READ(GEN6_PMIMR); + snb_disable_pm_irq(dev_priv, ring->irq_enable_mask); } - spin_unlock_irqrestore(&dev_priv->rps.lock, flags); + spin_unlock_irqrestore(&dev_priv->irq_lock, flags); } static int @@ -1156,7 +1142,7 @@ i830_dispatch_execbuffer(struct intel_ring_buffer *ring, intel_ring_advance(ring); } else { struct drm_i915_gem_object *obj = ring->private; - u32 cs_offset = obj->gtt_offset; + u32 cs_offset = i915_gem_obj_ggtt_offset(obj); if (len > I830_BATCH_LIMIT) return -ENOSPC; @@ -1236,12 +1222,12 @@ static int init_status_page(struct intel_ring_buffer *ring) i915_gem_object_set_cache_level(obj, I915_CACHE_LLC); - ret = i915_gem_object_pin(obj, 4096, true, false); + ret = i915_gem_obj_ggtt_pin(obj, 4096, true, false); if (ret != 0) { goto err_unref; } - ring->status_page.gfx_addr = obj->gtt_offset; + ring->status_page.gfx_addr = i915_gem_obj_ggtt_offset(obj); ring->status_page.page_addr = kmap(sg_page(obj->pages->sgl)); if (ring->status_page.page_addr == NULL) { ret = -ENOMEM; @@ -1319,7 +1305,7 @@ static int intel_init_ring_buffer(struct drm_device *dev, ring->obj = obj; - ret = i915_gem_object_pin(obj, PAGE_SIZE, true, false); + ret = i915_gem_obj_ggtt_pin(obj, PAGE_SIZE, true, false); if (ret) goto err_unref; @@ -1328,7 +1314,7 @@ static int intel_init_ring_buffer(struct drm_device *dev, goto err_unpin; ring->virtual_start = - ioremap_wc(dev_priv->gtt.mappable_base + obj->gtt_offset, + ioremap_wc(dev_priv->gtt.mappable_base + i915_gem_obj_ggtt_offset(obj), ring->size); if (ring->virtual_start == NULL) { DRM_ERROR("Failed to map ringbuffer.\n"); @@ -1606,6 +1592,8 @@ void intel_ring_init_seqno(struct intel_ring_buffer *ring, u32 seqno) if (INTEL_INFO(ring->dev)->gen >= 6) { I915_WRITE(RING_SYNC_0(ring->mmio_base), 0); I915_WRITE(RING_SYNC_1(ring->mmio_base), 0); + if (HAS_VEBOX(ring->dev)) + I915_WRITE(RING_SYNC_2(ring->mmio_base), 0); } ring->set_seqno(ring, seqno); @@ -1840,7 +1828,7 @@ int intel_init_render_ring_buffer(struct drm_device *dev) return -ENOMEM; } - ret = i915_gem_object_pin(obj, 0, true, false); + ret = i915_gem_obj_ggtt_pin(obj, 0, true, false); if (ret != 0) { drm_gem_object_unreference(&obj->base); DRM_ERROR("Failed to ping batch bo\n"); @@ -2020,8 +2008,7 @@ int intel_init_vebox_ring_buffer(struct drm_device *dev) ring->add_request = gen6_add_request; ring->get_seqno = gen6_ring_get_seqno; ring->set_seqno = ring_set_seqno; - ring->irq_enable_mask = PM_VEBOX_USER_INTERRUPT | - PM_VEBOX_CS_ERROR_INTERRUPT; + ring->irq_enable_mask = PM_VEBOX_USER_INTERRUPT; ring->irq_get = hsw_vebox_get_irq; ring->irq_put = hsw_vebox_put_irq; ring->dispatch_execbuffer = gen6_ring_dispatch_execbuffer; diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.h b/drivers/gpu/drm/i915/intel_ringbuffer.h index 799f04c9da45..432ad5311ba6 100644 --- a/drivers/gpu/drm/i915/intel_ringbuffer.h +++ b/drivers/gpu/drm/i915/intel_ringbuffer.h @@ -33,11 +33,12 @@ struct intel_hw_status_page { #define I915_READ_IMR(ring) I915_READ(RING_IMR((ring)->mmio_base)) #define I915_WRITE_IMR(ring, val) I915_WRITE(RING_IMR((ring)->mmio_base), val) -#define I915_READ_NOPID(ring) I915_READ(RING_NOPID((ring)->mmio_base)) -#define I915_READ_SYNC_0(ring) I915_READ(RING_SYNC_0((ring)->mmio_base)) -#define I915_READ_SYNC_1(ring) I915_READ(RING_SYNC_1((ring)->mmio_base)) - -enum intel_ring_hangcheck_action { wait, active, kick, hung }; +enum intel_ring_hangcheck_action { + HANGCHECK_WAIT, + HANGCHECK_ACTIVE, + HANGCHECK_KICK, + HANGCHECK_HUNG, +}; struct intel_ring_hangcheck { bool deadlock; @@ -78,10 +79,7 @@ struct intel_ring_buffer { */ u32 last_retired_head; - struct { - u32 gt; /* protected by dev_priv->irq_lock */ - u32 pm; /* protected by dev_priv->rps.lock (sucks) */ - } irq_refcount; + unsigned irq_refcount; /* protected by dev_priv->irq_lock */ u32 irq_enable_mask; /* bitmask to enable ring interrupt */ u32 trace_irq_seqno; u32 sync_seqno[I915_NUM_RINGS-1]; diff --git a/drivers/gpu/drm/i915/intel_sdvo.c b/drivers/gpu/drm/i915/intel_sdvo.c index 2628d5622449..317e058fb3cf 100644 --- a/drivers/gpu/drm/i915/intel_sdvo.c +++ b/drivers/gpu/drm/i915/intel_sdvo.c @@ -202,15 +202,14 @@ struct intel_sdvo_connector { u32 cur_dot_crawl, max_dot_crawl; }; -static struct intel_sdvo *to_intel_sdvo(struct drm_encoder *encoder) +static struct intel_sdvo *to_sdvo(struct intel_encoder *encoder) { - return container_of(encoder, struct intel_sdvo, base.base); + return container_of(encoder, struct intel_sdvo, base); } static struct intel_sdvo *intel_attached_sdvo(struct drm_connector *connector) { - return container_of(intel_attached_encoder(connector), - struct intel_sdvo, base); + return to_sdvo(intel_attached_encoder(connector)); } static struct intel_sdvo_connector *to_intel_sdvo_connector(struct drm_connector *connector) @@ -539,7 +538,8 @@ static bool intel_sdvo_read_response(struct intel_sdvo *intel_sdvo, &status)) goto log_fail; - while (status == SDVO_CMD_STATUS_PENDING && --retry) { + while ((status == SDVO_CMD_STATUS_PENDING || + status == SDVO_CMD_STATUS_TARGET_NOT_SPECIFIED) && --retry) { if (retry < 10) msleep(15); else @@ -964,30 +964,32 @@ static bool intel_sdvo_write_infoframe(struct intel_sdvo *intel_sdvo, static bool intel_sdvo_set_avi_infoframe(struct intel_sdvo *intel_sdvo, const struct drm_display_mode *adjusted_mode) { - struct dip_infoframe avi_if = { - .type = DIP_TYPE_AVI, - .ver = DIP_VERSION_AVI, - .len = DIP_LEN_AVI, - }; - uint8_t sdvo_data[4 + sizeof(avi_if.body.avi)]; - struct intel_crtc *intel_crtc = to_intel_crtc(intel_sdvo->base.base.crtc); + uint8_t sdvo_data[HDMI_INFOFRAME_SIZE(AVI)]; + struct drm_crtc *crtc = intel_sdvo->base.base.crtc; + struct intel_crtc *intel_crtc = to_intel_crtc(crtc); + union hdmi_infoframe frame; + int ret; + ssize_t len; + + ret = drm_hdmi_avi_infoframe_from_display_mode(&frame.avi, + adjusted_mode); + if (ret < 0) { + DRM_ERROR("couldn't fill AVI infoframe\n"); + return false; + } if (intel_sdvo->rgb_quant_range_selectable) { if (intel_crtc->config.limited_color_range) - avi_if.body.avi.ITC_EC_Q_SC |= DIP_AVI_RGB_QUANT_RANGE_LIMITED; + frame.avi.quantization_range = + HDMI_QUANTIZATION_RANGE_LIMITED; else - avi_if.body.avi.ITC_EC_Q_SC |= DIP_AVI_RGB_QUANT_RANGE_FULL; + frame.avi.quantization_range = + HDMI_QUANTIZATION_RANGE_FULL; } - avi_if.body.avi.VIC = drm_match_cea_mode(adjusted_mode); - - intel_dip_infoframe_csum(&avi_if); - - /* sdvo spec says that the ecc is handled by the hw, and it looks like - * we must not send the ecc field, either. */ - memcpy(sdvo_data, &avi_if, 3); - sdvo_data[3] = avi_if.checksum; - memcpy(&sdvo_data[4], &avi_if.body, sizeof(avi_if.body.avi)); + len = hdmi_infoframe_pack(&frame, sdvo_data, sizeof(sdvo_data)); + if (len < 0) + return false; return intel_sdvo_write_infoframe(intel_sdvo, SDVO_HBUF_INDEX_AVI_IF, SDVO_HBUF_TX_VSYNC, @@ -1084,7 +1086,7 @@ static void i9xx_adjust_sdvo_tv_clock(struct intel_crtc_config *pipe_config) static bool intel_sdvo_compute_config(struct intel_encoder *encoder, struct intel_crtc_config *pipe_config) { - struct intel_sdvo *intel_sdvo = to_intel_sdvo(&encoder->base); + struct intel_sdvo *intel_sdvo = to_sdvo(encoder); struct drm_display_mode *adjusted_mode = &pipe_config->adjusted_mode; struct drm_display_mode *mode = &pipe_config->requested_mode; @@ -1154,7 +1156,7 @@ static void intel_sdvo_mode_set(struct intel_encoder *intel_encoder) struct drm_display_mode *adjusted_mode = &intel_crtc->config.adjusted_mode; struct drm_display_mode *mode = &intel_crtc->config.requested_mode; - struct intel_sdvo *intel_sdvo = to_intel_sdvo(&intel_encoder->base); + struct intel_sdvo *intel_sdvo = to_sdvo(intel_encoder); u32 sdvox; struct intel_sdvo_in_out_map in_out; struct intel_sdvo_dtd input_dtd, output_dtd; @@ -1292,7 +1294,7 @@ static bool intel_sdvo_get_hw_state(struct intel_encoder *encoder, { struct drm_device *dev = encoder->base.dev; struct drm_i915_private *dev_priv = dev->dev_private; - struct intel_sdvo *intel_sdvo = to_intel_sdvo(&encoder->base); + struct intel_sdvo *intel_sdvo = to_sdvo(encoder); u16 active_outputs = 0; u32 tmp; @@ -1315,7 +1317,7 @@ static void intel_sdvo_get_config(struct intel_encoder *encoder, { struct drm_device *dev = encoder->base.dev; struct drm_i915_private *dev_priv = dev->dev_private; - struct intel_sdvo *intel_sdvo = to_intel_sdvo(&encoder->base); + struct intel_sdvo *intel_sdvo = to_sdvo(encoder); struct intel_sdvo_dtd dtd; int encoder_pixel_multiplier = 0; u32 flags = 0, sdvox; @@ -1357,22 +1359,21 @@ static void intel_sdvo_get_config(struct intel_encoder *encoder, } /* Cross check the port pixel multiplier with the sdvo encoder state. */ - intel_sdvo_get_value(intel_sdvo, SDVO_CMD_GET_CLOCK_RATE_MULT, &val, 1); - switch (val) { - case SDVO_CLOCK_RATE_MULT_1X: - encoder_pixel_multiplier = 1; - break; - case SDVO_CLOCK_RATE_MULT_2X: - encoder_pixel_multiplier = 2; - break; - case SDVO_CLOCK_RATE_MULT_4X: - encoder_pixel_multiplier = 4; - break; + if (intel_sdvo_get_value(intel_sdvo, SDVO_CMD_GET_CLOCK_RATE_MULT, + &val, 1)) { + switch (val) { + case SDVO_CLOCK_RATE_MULT_1X: + encoder_pixel_multiplier = 1; + break; + case SDVO_CLOCK_RATE_MULT_2X: + encoder_pixel_multiplier = 2; + break; + case SDVO_CLOCK_RATE_MULT_4X: + encoder_pixel_multiplier = 4; + break; + } } - if(HAS_PCH_SPLIT(dev)) - return; /* no pixel multiplier readout support yet */ - WARN(encoder_pixel_multiplier != pipe_config->pixel_multiplier, "SDVO pixel multiplier mismatch, port: %i, encoder: %i\n", pipe_config->pixel_multiplier, encoder_pixel_multiplier); @@ -1381,7 +1382,7 @@ static void intel_sdvo_get_config(struct intel_encoder *encoder, static void intel_disable_sdvo(struct intel_encoder *encoder) { struct drm_i915_private *dev_priv = encoder->base.dev->dev_private; - struct intel_sdvo *intel_sdvo = to_intel_sdvo(&encoder->base); + struct intel_sdvo *intel_sdvo = to_sdvo(encoder); u32 temp; intel_sdvo_set_active_outputs(intel_sdvo, 0); @@ -1423,7 +1424,7 @@ static void intel_enable_sdvo(struct intel_encoder *encoder) { struct drm_device *dev = encoder->base.dev; struct drm_i915_private *dev_priv = dev->dev_private; - struct intel_sdvo *intel_sdvo = to_intel_sdvo(&encoder->base); + struct intel_sdvo *intel_sdvo = to_sdvo(encoder); struct intel_crtc *intel_crtc = to_intel_crtc(encoder->base.crtc); u32 temp; bool input1, input2; @@ -1584,7 +1585,7 @@ static uint16_t intel_sdvo_get_hotplug_support(struct intel_sdvo *intel_sdvo) static void intel_sdvo_enable_hotplug(struct intel_encoder *encoder) { - struct intel_sdvo *intel_sdvo = to_intel_sdvo(&encoder->base); + struct intel_sdvo *intel_sdvo = to_sdvo(encoder); intel_sdvo_write_cmd(intel_sdvo, SDVO_CMD_SET_ACTIVE_HOT_PLUG, &intel_sdvo->hotplug_active, 2); @@ -1697,6 +1698,9 @@ intel_sdvo_detect(struct drm_connector *connector, bool force) struct intel_sdvo_connector *intel_sdvo_connector = to_intel_sdvo_connector(connector); enum drm_connector_status ret; + DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n", + connector->base.id, drm_get_connector_name(connector)); + if (!intel_sdvo_get_value(intel_sdvo, SDVO_CMD_GET_ATTACHED_DISPLAYS, &response, 2)) @@ -2188,7 +2192,7 @@ static const struct drm_connector_helper_funcs intel_sdvo_connector_helper_funcs static void intel_sdvo_enc_destroy(struct drm_encoder *encoder) { - struct intel_sdvo *intel_sdvo = to_intel_sdvo(encoder); + struct intel_sdvo *intel_sdvo = to_sdvo(to_intel_encoder(encoder)); if (intel_sdvo->sdvo_lvds_fixed_mode != NULL) drm_mode_destroy(encoder->dev, diff --git a/drivers/gpu/drm/i915/intel_sprite.c b/drivers/gpu/drm/i915/intel_sprite.c index 1fa5612a4572..78b621cdd108 100644 --- a/drivers/gpu/drm/i915/intel_sprite.c +++ b/drivers/gpu/drm/i915/intel_sprite.c @@ -38,7 +38,8 @@ #include "i915_drv.h" static void -vlv_update_plane(struct drm_plane *dplane, struct drm_framebuffer *fb, +vlv_update_plane(struct drm_plane *dplane, struct drm_crtc *crtc, + struct drm_framebuffer *fb, struct drm_i915_gem_object *obj, int crtc_x, int crtc_y, unsigned int crtc_w, unsigned int crtc_h, uint32_t x, uint32_t y, @@ -108,14 +109,15 @@ vlv_update_plane(struct drm_plane *dplane, struct drm_framebuffer *fb, sprctl |= SP_ENABLE; + intel_update_sprite_watermarks(dplane, crtc, src_w, pixel_size, true, + src_w != crtc_w || src_h != crtc_h); + /* Sizes are 0 based */ src_w--; src_h--; crtc_w--; crtc_h--; - intel_update_sprite_watermarks(dev, pipe, crtc_w, pixel_size, true); - I915_WRITE(SPSTRIDE(pipe, plane), fb->pitches[0]); I915_WRITE(SPPOS(pipe, plane), (crtc_y << 16) | crtc_x); @@ -133,13 +135,13 @@ vlv_update_plane(struct drm_plane *dplane, struct drm_framebuffer *fb, I915_WRITE(SPSIZE(pipe, plane), (crtc_h << 16) | crtc_w); I915_WRITE(SPCNTR(pipe, plane), sprctl); - I915_MODIFY_DISPBASE(SPSURF(pipe, plane), obj->gtt_offset + + I915_MODIFY_DISPBASE(SPSURF(pipe, plane), i915_gem_obj_ggtt_offset(obj) + sprsurf_offset); POSTING_READ(SPSURF(pipe, plane)); } static void -vlv_disable_plane(struct drm_plane *dplane) +vlv_disable_plane(struct drm_plane *dplane, struct drm_crtc *crtc) { struct drm_device *dev = dplane->dev; struct drm_i915_private *dev_priv = dev->dev_private; @@ -152,6 +154,8 @@ vlv_disable_plane(struct drm_plane *dplane) /* Activate double buffered register update */ I915_MODIFY_DISPBASE(SPSURF(pipe, plane), 0); POSTING_READ(SPSURF(pipe, plane)); + + intel_update_sprite_watermarks(dplane, crtc, 0, 0, false, false); } static int @@ -206,7 +210,8 @@ vlv_get_colorkey(struct drm_plane *dplane, } static void -ivb_update_plane(struct drm_plane *plane, struct drm_framebuffer *fb, +ivb_update_plane(struct drm_plane *plane, struct drm_crtc *crtc, + struct drm_framebuffer *fb, struct drm_i915_gem_object *obj, int crtc_x, int crtc_y, unsigned int crtc_w, unsigned int crtc_h, uint32_t x, uint32_t y, @@ -262,14 +267,15 @@ ivb_update_plane(struct drm_plane *plane, struct drm_framebuffer *fb, if (IS_HASWELL(dev)) sprctl |= SPRITE_PIPE_CSC_ENABLE; + intel_update_sprite_watermarks(plane, crtc, src_w, pixel_size, true, + src_w != crtc_w || src_h != crtc_h); + /* Sizes are 0 based */ src_w--; src_h--; crtc_w--; crtc_h--; - intel_update_sprite_watermarks(dev, pipe, crtc_w, pixel_size, true); - /* * IVB workaround: must disable low power watermarks for at least * one frame before enabling scaling. LP watermarks can be re-enabled @@ -308,7 +314,8 @@ ivb_update_plane(struct drm_plane *plane, struct drm_framebuffer *fb, if (intel_plane->can_scale) I915_WRITE(SPRSCALE(pipe), sprscale); I915_WRITE(SPRCTL(pipe), sprctl); - I915_MODIFY_DISPBASE(SPRSURF(pipe), obj->gtt_offset + sprsurf_offset); + I915_MODIFY_DISPBASE(SPRSURF(pipe), + i915_gem_obj_ggtt_offset(obj) + sprsurf_offset); POSTING_READ(SPRSURF(pipe)); /* potentially re-enable LP watermarks */ @@ -317,7 +324,7 @@ ivb_update_plane(struct drm_plane *plane, struct drm_framebuffer *fb, } static void -ivb_disable_plane(struct drm_plane *plane) +ivb_disable_plane(struct drm_plane *plane, struct drm_crtc *crtc) { struct drm_device *dev = plane->dev; struct drm_i915_private *dev_priv = dev->dev_private; @@ -335,7 +342,7 @@ ivb_disable_plane(struct drm_plane *plane) dev_priv->sprite_scaling_enabled &= ~(1 << pipe); - intel_update_sprite_watermarks(dev, pipe, 0, 0, false); + intel_update_sprite_watermarks(plane, crtc, 0, 0, false, false); /* potentially re-enable LP watermarks */ if (scaling_was_enabled && !dev_priv->sprite_scaling_enabled) @@ -397,7 +404,8 @@ ivb_get_colorkey(struct drm_plane *plane, struct drm_intel_sprite_colorkey *key) } static void -ilk_update_plane(struct drm_plane *plane, struct drm_framebuffer *fb, +ilk_update_plane(struct drm_plane *plane, struct drm_crtc *crtc, + struct drm_framebuffer *fb, struct drm_i915_gem_object *obj, int crtc_x, int crtc_y, unsigned int crtc_w, unsigned int crtc_h, uint32_t x, uint32_t y, @@ -449,14 +457,15 @@ ilk_update_plane(struct drm_plane *plane, struct drm_framebuffer *fb, dvscntr |= DVS_TRICKLE_FEED_DISABLE; /* must disable */ dvscntr |= DVS_ENABLE; + intel_update_sprite_watermarks(plane, crtc, src_w, pixel_size, true, + src_w != crtc_w || src_h != crtc_h); + /* Sizes are 0 based */ src_w--; src_h--; crtc_w--; crtc_h--; - intel_update_sprite_watermarks(dev, pipe, crtc_w, pixel_size, true); - dvsscale = 0; if (IS_GEN5(dev) || crtc_w != src_w || crtc_h != src_h) dvsscale = DVS_SCALE_ENABLE | (src_w << 16) | src_h; @@ -478,12 +487,13 @@ ilk_update_plane(struct drm_plane *plane, struct drm_framebuffer *fb, I915_WRITE(DVSSIZE(pipe), (crtc_h << 16) | crtc_w); I915_WRITE(DVSSCALE(pipe), dvsscale); I915_WRITE(DVSCNTR(pipe), dvscntr); - I915_MODIFY_DISPBASE(DVSSURF(pipe), obj->gtt_offset + dvssurf_offset); + I915_MODIFY_DISPBASE(DVSSURF(pipe), + i915_gem_obj_ggtt_offset(obj) + dvssurf_offset); POSTING_READ(DVSSURF(pipe)); } static void -ilk_disable_plane(struct drm_plane *plane) +ilk_disable_plane(struct drm_plane *plane, struct drm_crtc *crtc) { struct drm_device *dev = plane->dev; struct drm_i915_private *dev_priv = dev->dev_private; @@ -496,6 +506,8 @@ ilk_disable_plane(struct drm_plane *plane) /* Flush double buffered register updates */ I915_MODIFY_DISPBASE(DVSSURF(pipe), 0); POSTING_READ(DVSSURF(pipe)); + + intel_update_sprite_watermarks(plane, crtc, 0, 0, false, false); } static void @@ -818,11 +830,11 @@ intel_update_plane(struct drm_plane *plane, struct drm_crtc *crtc, intel_enable_primary(crtc); if (visible) - intel_plane->update_plane(plane, fb, obj, + intel_plane->update_plane(plane, crtc, fb, obj, crtc_x, crtc_y, crtc_w, crtc_h, src_x, src_y, src_w, src_h); else - intel_plane->disable_plane(plane); + intel_plane->disable_plane(plane, crtc); if (disable_primary) intel_disable_primary(crtc); @@ -855,9 +867,14 @@ intel_disable_plane(struct drm_plane *plane) struct intel_plane *intel_plane = to_intel_plane(plane); int ret = 0; - if (plane->crtc) - intel_enable_primary(plane->crtc); - intel_plane->disable_plane(plane); + if (!plane->fb) + return 0; + + if (WARN_ON(!plane->crtc)) + return -EINVAL; + + intel_enable_primary(plane->crtc); + intel_plane->disable_plane(plane, plane->crtc); if (!intel_plane->obj) goto out; diff --git a/drivers/gpu/drm/i915/intel_tv.c b/drivers/gpu/drm/i915/intel_tv.c index 39debd80d190..f2c6d7909ae2 100644 --- a/drivers/gpu/drm/i915/intel_tv.c +++ b/drivers/gpu/drm/i915/intel_tv.c @@ -823,16 +823,14 @@ static const struct tv_mode tv_modes[] = { }, }; -static struct intel_tv *enc_to_intel_tv(struct drm_encoder *encoder) +static struct intel_tv *enc_to_tv(struct intel_encoder *encoder) { - return container_of(encoder, struct intel_tv, base.base); + return container_of(encoder, struct intel_tv, base); } static struct intel_tv *intel_attached_tv(struct drm_connector *connector) { - return container_of(intel_attached_encoder(connector), - struct intel_tv, - base); + return enc_to_tv(intel_attached_encoder(connector)); } static bool @@ -908,7 +906,7 @@ static bool intel_tv_compute_config(struct intel_encoder *encoder, struct intel_crtc_config *pipe_config) { - struct intel_tv *intel_tv = enc_to_intel_tv(&encoder->base); + struct intel_tv *intel_tv = enc_to_tv(encoder); const struct tv_mode *tv_mode = intel_tv_mode_find(intel_tv); if (!tv_mode) @@ -921,15 +919,12 @@ intel_tv_compute_config(struct intel_encoder *encoder, return true; } -static void -intel_tv_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode, - struct drm_display_mode *adjusted_mode) +static void intel_tv_mode_set(struct intel_encoder *encoder) { - struct drm_device *dev = encoder->dev; + struct drm_device *dev = encoder->base.dev; struct drm_i915_private *dev_priv = dev->dev_private; - struct drm_crtc *crtc = encoder->crtc; - struct intel_crtc *intel_crtc = to_intel_crtc(crtc); - struct intel_tv *intel_tv = enc_to_intel_tv(encoder); + struct intel_crtc *intel_crtc = to_intel_crtc(encoder->base.crtc); + struct intel_tv *intel_tv = enc_to_tv(encoder); const struct tv_mode *tv_mode = intel_tv_mode_find(intel_tv); u32 tv_ctl; u32 hctl1, hctl2, hctl3; @@ -1305,6 +1300,10 @@ intel_tv_detect(struct drm_connector *connector, bool force) struct intel_tv *intel_tv = intel_attached_tv(connector); int type; + DRM_DEBUG_KMS("[CONNECTOR:%d:%s] force=%d\n", + connector->base.id, drm_get_connector_name(connector), + force); + mode = reported_modes[0]; if (force) { @@ -1483,10 +1482,6 @@ out: return ret; } -static const struct drm_encoder_helper_funcs intel_tv_helper_funcs = { - .mode_set = intel_tv_mode_set, -}; - static const struct drm_connector_funcs intel_tv_connector_funcs = { .dpms = intel_connector_dpms, .detect = intel_tv_detect, @@ -1619,6 +1614,7 @@ intel_tv_init(struct drm_device *dev) DRM_MODE_ENCODER_TVDAC); intel_encoder->compute_config = intel_tv_compute_config; + intel_encoder->mode_set = intel_tv_mode_set; intel_encoder->enable = intel_enable_tv; intel_encoder->disable = intel_disable_tv; intel_encoder->get_hw_state = intel_tv_get_hw_state; @@ -1640,7 +1636,6 @@ intel_tv_init(struct drm_device *dev) intel_tv->tv_format = tv_modes[initial_mode].name; - drm_encoder_helper_add(&intel_encoder->base, &intel_tv_helper_funcs); drm_connector_helper_add(connector, &intel_tv_connector_helper_funcs); connector->interlace_allowed = false; connector->doublescan_allowed = false; diff --git a/drivers/gpu/drm/i915/intel_uncore.c b/drivers/gpu/drm/i915/intel_uncore.c new file mode 100644 index 000000000000..8f5bc869c023 --- /dev/null +++ b/drivers/gpu/drm/i915/intel_uncore.c @@ -0,0 +1,595 @@ +/* + * Copyright © 2013 Intel Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice (including the next + * paragraph) shall be included in all copies or substantial portions of the + * Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS + * IN THE SOFTWARE. + */ + +#include "i915_drv.h" +#include "intel_drv.h" + +#define FORCEWAKE_ACK_TIMEOUT_MS 2 + +#define __raw_i915_read8(dev_priv__, reg__) readb((dev_priv__)->regs + (reg__)) +#define __raw_i915_write8(dev_priv__, reg__, val__) writeb(val__, (dev_priv__)->regs + (reg__)) + +#define __raw_i915_read16(dev_priv__, reg__) readw((dev_priv__)->regs + (reg__)) +#define __raw_i915_write16(dev_priv__, reg__, val__) writew(val__, (dev_priv__)->regs + (reg__)) + +#define __raw_i915_read32(dev_priv__, reg__) readl((dev_priv__)->regs + (reg__)) +#define __raw_i915_write32(dev_priv__, reg__, val__) writel(val__, (dev_priv__)->regs + (reg__)) + +#define __raw_i915_read64(dev_priv__, reg__) readq((dev_priv__)->regs + (reg__)) +#define __raw_i915_write64(dev_priv__, reg__, val__) writeq(val__, (dev_priv__)->regs + (reg__)) + +#define __raw_posting_read(dev_priv__, reg__) (void)__raw_i915_read32(dev_priv__, reg__) + + +static void __gen6_gt_wait_for_thread_c0(struct drm_i915_private *dev_priv) +{ + u32 gt_thread_status_mask; + + if (IS_HASWELL(dev_priv->dev)) + gt_thread_status_mask = GEN6_GT_THREAD_STATUS_CORE_MASK_HSW; + else + gt_thread_status_mask = GEN6_GT_THREAD_STATUS_CORE_MASK; + + /* w/a for a sporadic read returning 0 by waiting for the GT + * thread to wake up. + */ + if (wait_for_atomic_us((__raw_i915_read32(dev_priv, GEN6_GT_THREAD_STATUS_REG) & gt_thread_status_mask) == 0, 500)) + DRM_ERROR("GT thread status wait timed out\n"); +} + +static void __gen6_gt_force_wake_reset(struct drm_i915_private *dev_priv) +{ + __raw_i915_write32(dev_priv, FORCEWAKE, 0); + /* something from same cacheline, but !FORCEWAKE */ + __raw_posting_read(dev_priv, ECOBUS); +} + +static void __gen6_gt_force_wake_get(struct drm_i915_private *dev_priv) +{ + if (wait_for_atomic((__raw_i915_read32(dev_priv, FORCEWAKE_ACK) & 1) == 0, + FORCEWAKE_ACK_TIMEOUT_MS)) + DRM_ERROR("Timed out waiting for forcewake old ack to clear.\n"); + + __raw_i915_write32(dev_priv, FORCEWAKE, 1); + /* something from same cacheline, but !FORCEWAKE */ + __raw_posting_read(dev_priv, ECOBUS); + + if (wait_for_atomic((__raw_i915_read32(dev_priv, FORCEWAKE_ACK) & 1), + FORCEWAKE_ACK_TIMEOUT_MS)) + DRM_ERROR("Timed out waiting for forcewake to ack request.\n"); + + /* WaRsForcewakeWaitTC0:snb */ + __gen6_gt_wait_for_thread_c0(dev_priv); +} + +static void __gen6_gt_force_wake_mt_reset(struct drm_i915_private *dev_priv) +{ + __raw_i915_write32(dev_priv, FORCEWAKE_MT, _MASKED_BIT_DISABLE(0xffff)); + /* something from same cacheline, but !FORCEWAKE_MT */ + __raw_posting_read(dev_priv, ECOBUS); +} + +static void __gen6_gt_force_wake_mt_get(struct drm_i915_private *dev_priv) +{ + u32 forcewake_ack; + + if (IS_HASWELL(dev_priv->dev)) + forcewake_ack = FORCEWAKE_ACK_HSW; + else + forcewake_ack = FORCEWAKE_MT_ACK; + + if (wait_for_atomic((__raw_i915_read32(dev_priv, forcewake_ack) & FORCEWAKE_KERNEL) == 0, + FORCEWAKE_ACK_TIMEOUT_MS)) + DRM_ERROR("Timed out waiting for forcewake old ack to clear.\n"); + + __raw_i915_write32(dev_priv, FORCEWAKE_MT, + _MASKED_BIT_ENABLE(FORCEWAKE_KERNEL)); + /* something from same cacheline, but !FORCEWAKE_MT */ + __raw_posting_read(dev_priv, ECOBUS); + + if (wait_for_atomic((__raw_i915_read32(dev_priv, forcewake_ack) & FORCEWAKE_KERNEL), + FORCEWAKE_ACK_TIMEOUT_MS)) + DRM_ERROR("Timed out waiting for forcewake to ack request.\n"); + + /* WaRsForcewakeWaitTC0:ivb,hsw */ + __gen6_gt_wait_for_thread_c0(dev_priv); +} + +static void gen6_gt_check_fifodbg(struct drm_i915_private *dev_priv) +{ + u32 gtfifodbg; + + gtfifodbg = __raw_i915_read32(dev_priv, GTFIFODBG); + if (WARN(gtfifodbg & GT_FIFO_CPU_ERROR_MASK, + "MMIO read or write has been dropped %x\n", gtfifodbg)) + __raw_i915_write32(dev_priv, GTFIFODBG, GT_FIFO_CPU_ERROR_MASK); +} + +static void __gen6_gt_force_wake_put(struct drm_i915_private *dev_priv) +{ + __raw_i915_write32(dev_priv, FORCEWAKE, 0); + /* something from same cacheline, but !FORCEWAKE */ + __raw_posting_read(dev_priv, ECOBUS); + gen6_gt_check_fifodbg(dev_priv); +} + +static void __gen6_gt_force_wake_mt_put(struct drm_i915_private *dev_priv) +{ + __raw_i915_write32(dev_priv, FORCEWAKE_MT, + _MASKED_BIT_DISABLE(FORCEWAKE_KERNEL)); + /* something from same cacheline, but !FORCEWAKE_MT */ + __raw_posting_read(dev_priv, ECOBUS); + gen6_gt_check_fifodbg(dev_priv); +} + +static int __gen6_gt_wait_for_fifo(struct drm_i915_private *dev_priv) +{ + int ret = 0; + + if (dev_priv->uncore.fifo_count < GT_FIFO_NUM_RESERVED_ENTRIES) { + int loop = 500; + u32 fifo = __raw_i915_read32(dev_priv, GT_FIFO_FREE_ENTRIES); + while (fifo <= GT_FIFO_NUM_RESERVED_ENTRIES && loop--) { + udelay(10); + fifo = __raw_i915_read32(dev_priv, GT_FIFO_FREE_ENTRIES); + } + if (WARN_ON(loop < 0 && fifo <= GT_FIFO_NUM_RESERVED_ENTRIES)) + ++ret; + dev_priv->uncore.fifo_count = fifo; + } + dev_priv->uncore.fifo_count--; + + return ret; +} + +static void vlv_force_wake_reset(struct drm_i915_private *dev_priv) +{ + __raw_i915_write32(dev_priv, FORCEWAKE_VLV, + _MASKED_BIT_DISABLE(0xffff)); + /* something from same cacheline, but !FORCEWAKE_VLV */ + __raw_posting_read(dev_priv, FORCEWAKE_ACK_VLV); +} + +static void vlv_force_wake_get(struct drm_i915_private *dev_priv) +{ + if (wait_for_atomic((__raw_i915_read32(dev_priv, FORCEWAKE_ACK_VLV) & FORCEWAKE_KERNEL) == 0, + FORCEWAKE_ACK_TIMEOUT_MS)) + DRM_ERROR("Timed out waiting for forcewake old ack to clear.\n"); + + __raw_i915_write32(dev_priv, FORCEWAKE_VLV, + _MASKED_BIT_ENABLE(FORCEWAKE_KERNEL)); + __raw_i915_write32(dev_priv, FORCEWAKE_MEDIA_VLV, + _MASKED_BIT_ENABLE(FORCEWAKE_KERNEL)); + + if (wait_for_atomic((__raw_i915_read32(dev_priv, FORCEWAKE_ACK_VLV) & FORCEWAKE_KERNEL), + FORCEWAKE_ACK_TIMEOUT_MS)) + DRM_ERROR("Timed out waiting for GT to ack forcewake request.\n"); + + if (wait_for_atomic((__raw_i915_read32(dev_priv, FORCEWAKE_ACK_MEDIA_VLV) & + FORCEWAKE_KERNEL), + FORCEWAKE_ACK_TIMEOUT_MS)) + DRM_ERROR("Timed out waiting for media to ack forcewake request.\n"); + + /* WaRsForcewakeWaitTC0:vlv */ + __gen6_gt_wait_for_thread_c0(dev_priv); +} + +static void vlv_force_wake_put(struct drm_i915_private *dev_priv) +{ + __raw_i915_write32(dev_priv, FORCEWAKE_VLV, + _MASKED_BIT_DISABLE(FORCEWAKE_KERNEL)); + __raw_i915_write32(dev_priv, FORCEWAKE_MEDIA_VLV, + _MASKED_BIT_DISABLE(FORCEWAKE_KERNEL)); + /* The below doubles as a POSTING_READ */ + gen6_gt_check_fifodbg(dev_priv); +} + +void intel_uncore_early_sanitize(struct drm_device *dev) +{ + struct drm_i915_private *dev_priv = dev->dev_private; + + if (HAS_FPGA_DBG_UNCLAIMED(dev)) + __raw_i915_write32(dev_priv, FPGA_DBG, FPGA_DBG_RM_NOCLAIM); +} + +void intel_uncore_init(struct drm_device *dev) +{ + struct drm_i915_private *dev_priv = dev->dev_private; + + if (IS_VALLEYVIEW(dev)) { + dev_priv->uncore.funcs.force_wake_get = vlv_force_wake_get; + dev_priv->uncore.funcs.force_wake_put = vlv_force_wake_put; + } else if (IS_HASWELL(dev)) { + dev_priv->uncore.funcs.force_wake_get = __gen6_gt_force_wake_mt_get; + dev_priv->uncore.funcs.force_wake_put = __gen6_gt_force_wake_mt_put; + } else if (IS_IVYBRIDGE(dev)) { + u32 ecobus; + + /* IVB configs may use multi-threaded forcewake */ + + /* A small trick here - if the bios hasn't configured + * MT forcewake, and if the device is in RC6, then + * force_wake_mt_get will not wake the device and the + * ECOBUS read will return zero. Which will be + * (correctly) interpreted by the test below as MT + * forcewake being disabled. + */ + mutex_lock(&dev->struct_mutex); + __gen6_gt_force_wake_mt_get(dev_priv); + ecobus = __raw_i915_read32(dev_priv, ECOBUS); + __gen6_gt_force_wake_mt_put(dev_priv); + mutex_unlock(&dev->struct_mutex); + + if (ecobus & FORCEWAKE_MT_ENABLE) { + dev_priv->uncore.funcs.force_wake_get = + __gen6_gt_force_wake_mt_get; + dev_priv->uncore.funcs.force_wake_put = + __gen6_gt_force_wake_mt_put; + } else { + DRM_INFO("No MT forcewake available on Ivybridge, this can result in issues\n"); + DRM_INFO("when using vblank-synced partial screen updates.\n"); + dev_priv->uncore.funcs.force_wake_get = + __gen6_gt_force_wake_get; + dev_priv->uncore.funcs.force_wake_put = + __gen6_gt_force_wake_put; + } + } else if (IS_GEN6(dev)) { + dev_priv->uncore.funcs.force_wake_get = + __gen6_gt_force_wake_get; + dev_priv->uncore.funcs.force_wake_put = + __gen6_gt_force_wake_put; + } +} + +void intel_uncore_sanitize(struct drm_device *dev) +{ + struct drm_i915_private *dev_priv = dev->dev_private; + + if (IS_VALLEYVIEW(dev)) { + vlv_force_wake_reset(dev_priv); + } else if (INTEL_INFO(dev)->gen >= 6) { + __gen6_gt_force_wake_reset(dev_priv); + if (IS_IVYBRIDGE(dev) || IS_HASWELL(dev)) + __gen6_gt_force_wake_mt_reset(dev_priv); + } + + /* BIOS often leaves RC6 enabled, but disable it for hw init */ + intel_disable_gt_powersave(dev); +} + +/* + * Generally this is called implicitly by the register read function. However, + * if some sequence requires the GT to not power down then this function should + * be called at the beginning of the sequence followed by a call to + * gen6_gt_force_wake_put() at the end of the sequence. + */ +void gen6_gt_force_wake_get(struct drm_i915_private *dev_priv) +{ + unsigned long irqflags; + + spin_lock_irqsave(&dev_priv->uncore.lock, irqflags); + if (dev_priv->uncore.forcewake_count++ == 0) + dev_priv->uncore.funcs.force_wake_get(dev_priv); + spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags); +} + +/* + * see gen6_gt_force_wake_get() + */ +void gen6_gt_force_wake_put(struct drm_i915_private *dev_priv) +{ + unsigned long irqflags; + + spin_lock_irqsave(&dev_priv->uncore.lock, irqflags); + if (--dev_priv->uncore.forcewake_count == 0) + dev_priv->uncore.funcs.force_wake_put(dev_priv); + spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags); +} + +/* We give fast paths for the really cool registers */ +#define NEEDS_FORCE_WAKE(dev_priv, reg) \ + ((HAS_FORCE_WAKE((dev_priv)->dev)) && \ + ((reg) < 0x40000) && \ + ((reg) != FORCEWAKE)) + +static void +ilk_dummy_write(struct drm_i915_private *dev_priv) +{ + /* WaIssueDummyWriteToWakeupFromRC6:ilk Issue a dummy write to wake up + * the chip from rc6 before touching it for real. MI_MODE is masked, + * hence harmless to write 0 into. */ + __raw_i915_write32(dev_priv, MI_MODE, 0); +} + +static void +hsw_unclaimed_reg_clear(struct drm_i915_private *dev_priv, u32 reg) +{ + if (HAS_FPGA_DBG_UNCLAIMED(dev_priv->dev) && + (__raw_i915_read32(dev_priv, FPGA_DBG) & FPGA_DBG_RM_NOCLAIM)) { + DRM_ERROR("Unknown unclaimed register before writing to %x\n", + reg); + __raw_i915_write32(dev_priv, FPGA_DBG, FPGA_DBG_RM_NOCLAIM); + } +} + +static void +hsw_unclaimed_reg_check(struct drm_i915_private *dev_priv, u32 reg) +{ + if (HAS_FPGA_DBG_UNCLAIMED(dev_priv->dev) && + (__raw_i915_read32(dev_priv, FPGA_DBG) & FPGA_DBG_RM_NOCLAIM)) { + DRM_ERROR("Unclaimed write to %x\n", reg); + __raw_i915_write32(dev_priv, FPGA_DBG, FPGA_DBG_RM_NOCLAIM); + } +} + +#define __i915_read(x) \ +u##x i915_read##x(struct drm_i915_private *dev_priv, u32 reg, bool trace) { \ + unsigned long irqflags; \ + u##x val = 0; \ + spin_lock_irqsave(&dev_priv->uncore.lock, irqflags); \ + if (dev_priv->info->gen == 5) \ + ilk_dummy_write(dev_priv); \ + if (NEEDS_FORCE_WAKE((dev_priv), (reg))) { \ + if (dev_priv->uncore.forcewake_count == 0) \ + dev_priv->uncore.funcs.force_wake_get(dev_priv); \ + val = __raw_i915_read##x(dev_priv, reg); \ + if (dev_priv->uncore.forcewake_count == 0) \ + dev_priv->uncore.funcs.force_wake_put(dev_priv); \ + } else { \ + val = __raw_i915_read##x(dev_priv, reg); \ + } \ + spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags); \ + trace_i915_reg_rw(false, reg, val, sizeof(val), trace); \ + return val; \ +} + +__i915_read(8) +__i915_read(16) +__i915_read(32) +__i915_read(64) +#undef __i915_read + +#define __i915_write(x) \ +void i915_write##x(struct drm_i915_private *dev_priv, u32 reg, u##x val, bool trace) { \ + unsigned long irqflags; \ + u32 __fifo_ret = 0; \ + trace_i915_reg_rw(true, reg, val, sizeof(val), trace); \ + spin_lock_irqsave(&dev_priv->uncore.lock, irqflags); \ + if (NEEDS_FORCE_WAKE((dev_priv), (reg))) { \ + __fifo_ret = __gen6_gt_wait_for_fifo(dev_priv); \ + } \ + if (dev_priv->info->gen == 5) \ + ilk_dummy_write(dev_priv); \ + hsw_unclaimed_reg_clear(dev_priv, reg); \ + __raw_i915_write##x(dev_priv, reg, val); \ + if (unlikely(__fifo_ret)) { \ + gen6_gt_check_fifodbg(dev_priv); \ + } \ + hsw_unclaimed_reg_check(dev_priv, reg); \ + spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags); \ +} +__i915_write(8) +__i915_write(16) +__i915_write(32) +__i915_write(64) +#undef __i915_write + +static const struct register_whitelist { + uint64_t offset; + uint32_t size; + uint32_t gen_bitmask; /* support gens, 0x10 for 4, 0x30 for 4 and 5, etc. */ +} whitelist[] = { + { RING_TIMESTAMP(RENDER_RING_BASE), 8, 0xF0 }, +}; + +int i915_reg_read_ioctl(struct drm_device *dev, + void *data, struct drm_file *file) +{ + struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_reg_read *reg = data; + struct register_whitelist const *entry = whitelist; + int i; + + for (i = 0; i < ARRAY_SIZE(whitelist); i++, entry++) { + if (entry->offset == reg->offset && + (1 << INTEL_INFO(dev)->gen & entry->gen_bitmask)) + break; + } + + if (i == ARRAY_SIZE(whitelist)) + return -EINVAL; + + switch (entry->size) { + case 8: + reg->val = I915_READ64(reg->offset); + break; + case 4: + reg->val = I915_READ(reg->offset); + break; + case 2: + reg->val = I915_READ16(reg->offset); + break; + case 1: + reg->val = I915_READ8(reg->offset); + break; + default: + WARN_ON(1); + return -EINVAL; + } + + return 0; +} + +static int i8xx_do_reset(struct drm_device *dev) +{ + struct drm_i915_private *dev_priv = dev->dev_private; + + if (IS_I85X(dev)) + return -ENODEV; + + I915_WRITE(D_STATE, I915_READ(D_STATE) | DSTATE_GFX_RESET_I830); + POSTING_READ(D_STATE); + + if (IS_I830(dev) || IS_845G(dev)) { + I915_WRITE(DEBUG_RESET_I830, + DEBUG_RESET_DISPLAY | + DEBUG_RESET_RENDER | + DEBUG_RESET_FULL); + POSTING_READ(DEBUG_RESET_I830); + msleep(1); + + I915_WRITE(DEBUG_RESET_I830, 0); + POSTING_READ(DEBUG_RESET_I830); + } + + msleep(1); + + I915_WRITE(D_STATE, I915_READ(D_STATE) & ~DSTATE_GFX_RESET_I830); + POSTING_READ(D_STATE); + + return 0; +} + +static int i965_reset_complete(struct drm_device *dev) +{ + u8 gdrst; + pci_read_config_byte(dev->pdev, I965_GDRST, &gdrst); + return (gdrst & GRDOM_RESET_ENABLE) == 0; +} + +static int i965_do_reset(struct drm_device *dev) +{ + int ret; + + /* + * Set the domains we want to reset (GRDOM/bits 2 and 3) as + * well as the reset bit (GR/bit 0). Setting the GR bit + * triggers the reset; when done, the hardware will clear it. + */ + pci_write_config_byte(dev->pdev, I965_GDRST, + GRDOM_RENDER | GRDOM_RESET_ENABLE); + ret = wait_for(i965_reset_complete(dev), 500); + if (ret) + return ret; + + /* We can't reset render&media without also resetting display ... */ + pci_write_config_byte(dev->pdev, I965_GDRST, + GRDOM_MEDIA | GRDOM_RESET_ENABLE); + + ret = wait_for(i965_reset_complete(dev), 500); + if (ret) + return ret; + + pci_write_config_byte(dev->pdev, I965_GDRST, 0); + + return 0; +} + +static int ironlake_do_reset(struct drm_device *dev) +{ + struct drm_i915_private *dev_priv = dev->dev_private; + u32 gdrst; + int ret; + + gdrst = I915_READ(MCHBAR_MIRROR_BASE + ILK_GDSR); + gdrst &= ~GRDOM_MASK; + I915_WRITE(MCHBAR_MIRROR_BASE + ILK_GDSR, + gdrst | GRDOM_RENDER | GRDOM_RESET_ENABLE); + ret = wait_for(I915_READ(MCHBAR_MIRROR_BASE + ILK_GDSR) & 0x1, 500); + if (ret) + return ret; + + /* We can't reset render&media without also resetting display ... */ + gdrst = I915_READ(MCHBAR_MIRROR_BASE + ILK_GDSR); + gdrst &= ~GRDOM_MASK; + I915_WRITE(MCHBAR_MIRROR_BASE + ILK_GDSR, + gdrst | GRDOM_MEDIA | GRDOM_RESET_ENABLE); + return wait_for(I915_READ(MCHBAR_MIRROR_BASE + ILK_GDSR) & 0x1, 500); +} + +static int gen6_do_reset(struct drm_device *dev) +{ + struct drm_i915_private *dev_priv = dev->dev_private; + int ret; + unsigned long irqflags; + + /* Hold uncore.lock across reset to prevent any register access + * with forcewake not set correctly + */ + spin_lock_irqsave(&dev_priv->uncore.lock, irqflags); + + /* Reset the chip */ + + /* GEN6_GDRST is not in the gt power well, no need to check + * for fifo space for the write or forcewake the chip for + * the read + */ + __raw_i915_write32(dev_priv, GEN6_GDRST, GEN6_GRDOM_FULL); + + /* Spin waiting for the device to ack the reset request */ + ret = wait_for((__raw_i915_read32(dev_priv, GEN6_GDRST) & GEN6_GRDOM_FULL) == 0, 500); + + /* If reset with a user forcewake, try to restore, otherwise turn it off */ + if (dev_priv->uncore.forcewake_count) + dev_priv->uncore.funcs.force_wake_get(dev_priv); + else + dev_priv->uncore.funcs.force_wake_put(dev_priv); + + /* Restore fifo count */ + dev_priv->uncore.fifo_count = __raw_i915_read32(dev_priv, GT_FIFO_FREE_ENTRIES); + + spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags); + return ret; +} + +int intel_gpu_reset(struct drm_device *dev) +{ + switch (INTEL_INFO(dev)->gen) { + case 7: + case 6: return gen6_do_reset(dev); + case 5: return ironlake_do_reset(dev); + case 4: return i965_do_reset(dev); + case 2: return i8xx_do_reset(dev); + default: return -ENODEV; + } +} + +void intel_uncore_clear_errors(struct drm_device *dev) +{ + struct drm_i915_private *dev_priv = dev->dev_private; + + /* XXX needs spinlock around caller's grouping */ + if (HAS_FPGA_DBG_UNCLAIMED(dev)) + __raw_i915_write32(dev_priv, FPGA_DBG, FPGA_DBG_RM_NOCLAIM); +} + +void intel_uncore_check_errors(struct drm_device *dev) +{ + struct drm_i915_private *dev_priv = dev->dev_private; + + if (HAS_FPGA_DBG_UNCLAIMED(dev) && + (__raw_i915_read32(dev_priv, FPGA_DBG) & FPGA_DBG_RM_NOCLAIM)) { + DRM_ERROR("Unclaimed register before interrupt\n"); + __raw_i915_write32(dev_priv, FPGA_DBG, FPGA_DBG_RM_NOCLAIM); + } +} |