From 78501eac34f372bfbeb4e1d9de688c13efa916f6 Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Wed, 27 Oct 2010 12:18:21 +0100 Subject: drm/i915/ringbuffer: Drop the redundant dev from the vfunc interface The ringbuffer keeps a pointer to the parent device, so we can use that instead of passing around the pointer on the stack. Signed-off-by: Chris Wilson --- drivers/gpu/drm/i915/i915_debugfs.c | 4 +- drivers/gpu/drm/i915/i915_dma.c | 10 +- drivers/gpu/drm/i915/i915_drv.c | 2 +- drivers/gpu/drm/i915/i915_drv.h | 8 +- drivers/gpu/drm/i915/i915_gem.c | 55 +++-- drivers/gpu/drm/i915/i915_irq.c | 12 +- drivers/gpu/drm/i915/intel_ringbuffer.c | 378 +++++++++++++++----------------- drivers/gpu/drm/i915/intel_ringbuffer.h | 76 +++---- 8 files changed, 247 insertions(+), 298 deletions(-) diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c index 1f4f3ceb63c7..c1b04b6056da 100644 --- a/drivers/gpu/drm/i915/i915_debugfs.c +++ b/drivers/gpu/drm/i915/i915_debugfs.c @@ -296,7 +296,7 @@ static int i915_gem_seqno_info(struct seq_file *m, void *data) if (dev_priv->render_ring.status_page.page_addr != NULL) { seq_printf(m, "Current sequence: %d\n", - dev_priv->render_ring.get_seqno(dev, &dev_priv->render_ring)); + dev_priv->render_ring.get_seqno(&dev_priv->render_ring)); } else { seq_printf(m, "Current sequence: hws uninitialized\n"); } @@ -356,7 +356,7 @@ static int i915_interrupt_info(struct seq_file *m, void *data) atomic_read(&dev_priv->irq_received)); if (dev_priv->render_ring.status_page.page_addr != NULL) { seq_printf(m, "Current sequence: %d\n", - dev_priv->render_ring.get_seqno(dev, &dev_priv->render_ring)); + dev_priv->render_ring.get_seqno(&dev_priv->render_ring)); } else { seq_printf(m, "Current sequence: hws uninitialized\n"); } diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c index 7a26f4dd21ae..8a171394a9cf 100644 --- a/drivers/gpu/drm/i915/i915_dma.c +++ b/drivers/gpu/drm/i915/i915_dma.c @@ -131,9 +131,9 @@ static int i915_dma_cleanup(struct drm_device * dev) drm_irq_uninstall(dev); mutex_lock(&dev->struct_mutex); - intel_cleanup_ring_buffer(dev, &dev_priv->render_ring); - intel_cleanup_ring_buffer(dev, &dev_priv->bsd_ring); - intel_cleanup_ring_buffer(dev, &dev_priv->blt_ring); + intel_cleanup_ring_buffer(&dev_priv->render_ring); + intel_cleanup_ring_buffer(&dev_priv->bsd_ring); + intel_cleanup_ring_buffer(&dev_priv->blt_ring); mutex_unlock(&dev->struct_mutex); /* Clear the HWS virtual address at teardown */ @@ -221,7 +221,7 @@ static int i915_dma_resume(struct drm_device * dev) DRM_DEBUG_DRIVER("hw status page @ %p\n", ring->status_page.page_addr); if (ring->status_page.gfx_addr != 0) - intel_ring_setup_status_page(dev, ring); + intel_ring_setup_status_page(ring); else I915_WRITE(HWS_PGA, dev_priv->dma_status_page); @@ -567,7 +567,7 @@ static int i915_quiescent(struct drm_device * dev) drm_i915_private_t *dev_priv = dev->dev_private; i915_kernel_lost_context(dev); - return intel_wait_ring_buffer(dev, &dev_priv->render_ring, + return intel_wait_ring_buffer(&dev_priv->render_ring, dev_priv->render_ring.size - 8); } diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c index 3467dd420760..82c19ab3e1e2 100644 --- a/drivers/gpu/drm/i915/i915_drv.c +++ b/drivers/gpu/drm/i915/i915_drv.c @@ -473,7 +473,7 @@ int i915_reset(struct drm_device *dev, u8 flags) !dev_priv->mm.suspended) { struct intel_ring_buffer *ring = &dev_priv->render_ring; dev_priv->mm.suspended = 0; - ring->init(dev, ring); + ring->init(ring); mutex_unlock(&dev->struct_mutex); drm_irq_uninstall(dev); drm_irq_install(dev); diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index 2c2c19b6285e..6fb225f6b2c8 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h @@ -1219,10 +1219,10 @@ static inline void i915_write(struct drm_i915_private *dev_priv, u32 reg, #define I915_VERBOSE 0 #define BEGIN_LP_RING(n) do { \ - drm_i915_private_t *dev_priv__ = dev->dev_private; \ + drm_i915_private_t *dev_priv__ = dev->dev_private; \ if (I915_VERBOSE) \ DRM_DEBUG(" BEGIN_LP_RING %x\n", (int)(n)); \ - intel_ring_begin(dev, &dev_priv__->render_ring, (n)); \ + intel_ring_begin(&dev_priv__->render_ring, (n)); \ } while (0) @@ -1230,7 +1230,7 @@ static inline void i915_write(struct drm_i915_private *dev_priv, u32 reg, drm_i915_private_t *dev_priv__ = dev->dev_private; \ if (I915_VERBOSE) \ DRM_DEBUG(" OUT_RING %x\n", (int)(x)); \ - intel_ring_emit(dev, &dev_priv__->render_ring, x); \ + intel_ring_emit(&dev_priv__->render_ring, x); \ } while (0) #define ADVANCE_LP_RING() do { \ @@ -1238,7 +1238,7 @@ static inline void i915_write(struct drm_i915_private *dev_priv, u32 reg, if (I915_VERBOSE) \ DRM_DEBUG("ADVANCE_LP_RING %x\n", \ dev_priv__->render_ring.tail); \ - intel_ring_advance(dev, &dev_priv__->render_ring); \ + intel_ring_advance(&dev_priv__->render_ring); \ } while(0) /** diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index 8eb8453208b5..97bf7c87d857 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c @@ -1703,7 +1703,7 @@ i915_add_request(struct drm_device *dev, return 0; } - seqno = ring->add_request(dev, ring, 0); + seqno = ring->add_request(ring, 0); ring->outstanding_lazy_request = false; request->seqno = seqno; @@ -1745,8 +1745,7 @@ i915_retire_commands(struct drm_device *dev, struct intel_ring_buffer *ring) if (INTEL_INFO(dev)->gen >= 4) flush_domains |= I915_GEM_DOMAIN_SAMPLER; - ring->flush(dev, ring, - I915_GEM_DOMAIN_COMMAND, flush_domains); + ring->flush(ring, I915_GEM_DOMAIN_COMMAND, flush_domains); } static inline void @@ -1853,7 +1852,7 @@ i915_gem_retire_requests_ring(struct drm_device *dev, WARN_ON(i915_verify_lists(dev)); - seqno = ring->get_seqno(dev, ring); + seqno = ring->get_seqno(ring); while (!list_empty(&ring->request_list)) { struct drm_i915_gem_request *request; @@ -1894,7 +1893,7 @@ i915_gem_retire_requests_ring(struct drm_device *dev, if (unlikely (dev_priv->trace_irq_seqno && i915_seqno_passed(dev_priv->trace_irq_seqno, seqno))) { - ring->user_irq_put(dev, ring); + ring->user_irq_put(ring); dev_priv->trace_irq_seqno = 0; } @@ -1971,7 +1970,7 @@ i915_do_wait_request(struct drm_device *dev, uint32_t seqno, } BUG_ON(seqno == dev_priv->next_seqno); - if (!i915_seqno_passed(ring->get_seqno(dev, ring), seqno)) { + if (!i915_seqno_passed(ring->get_seqno(ring), seqno)) { if (HAS_PCH_SPLIT(dev)) ier = I915_READ(DEIER) | I915_READ(GTIER); else @@ -1986,19 +1985,17 @@ i915_do_wait_request(struct drm_device *dev, uint32_t seqno, trace_i915_gem_request_wait_begin(dev, seqno); ring->waiting_gem_seqno = seqno; - ring->user_irq_get(dev, ring); + ring->user_irq_get(ring); if (interruptible) ret = wait_event_interruptible(ring->irq_queue, - i915_seqno_passed( - ring->get_seqno(dev, ring), seqno) + i915_seqno_passed(ring->get_seqno(ring), seqno) || atomic_read(&dev_priv->mm.wedged)); else wait_event(ring->irq_queue, - i915_seqno_passed( - ring->get_seqno(dev, ring), seqno) + i915_seqno_passed(ring->get_seqno(ring), seqno) || atomic_read(&dev_priv->mm.wedged)); - ring->user_irq_put(dev, ring); + ring->user_irq_put(ring); ring->waiting_gem_seqno = 0; trace_i915_gem_request_wait_end(dev, seqno); @@ -2008,7 +2005,7 @@ i915_do_wait_request(struct drm_device *dev, uint32_t seqno, if (ret && ret != -ERESTARTSYS) DRM_ERROR("%s returns %d (awaiting %d at %d, next %d)\n", - __func__, ret, seqno, ring->get_seqno(dev, ring), + __func__, ret, seqno, ring->get_seqno(ring), dev_priv->next_seqno); /* Directly dispatch request retiring. While we have the work queue @@ -2040,7 +2037,7 @@ i915_gem_flush_ring(struct drm_device *dev, uint32_t invalidate_domains, uint32_t flush_domains) { - ring->flush(dev, ring, invalidate_domains, flush_domains); + ring->flush(ring, invalidate_domains, flush_domains); i915_gem_process_flushing_list(dev, flush_domains, ring); } @@ -3532,17 +3529,17 @@ i915_gem_ring_throttle(struct drm_device *dev, struct drm_file *file) return 0; ret = 0; - if (!i915_seqno_passed(ring->get_seqno(dev, ring), seqno)) { + if (!i915_seqno_passed(ring->get_seqno(ring), seqno)) { /* And wait for the seqno passing without holding any locks and * causing extra latency for others. This is safe as the irq * generation is designed to be run atomically and so is * lockless. */ - ring->user_irq_get(dev, ring); + ring->user_irq_get(ring); ret = wait_event_interruptible(ring->irq_queue, - i915_seqno_passed(ring->get_seqno(dev, ring), seqno) + i915_seqno_passed(ring->get_seqno(ring), seqno) || atomic_read(&dev_priv->mm.wedged)); - ring->user_irq_put(dev, ring); + ring->user_irq_put(ring); if (ret == 0 && atomic_read(&dev_priv->mm.wedged)) ret = -EIO; @@ -3829,17 +3826,15 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data, else flip_mask = MI_WAIT_FOR_PLANE_A_FLIP; - intel_ring_begin(dev, ring, 2); - intel_ring_emit(dev, ring, - MI_WAIT_FOR_EVENT | flip_mask); - intel_ring_emit(dev, ring, MI_NOOP); - intel_ring_advance(dev, ring); + intel_ring_begin(ring, 2); + intel_ring_emit(ring, MI_WAIT_FOR_EVENT | flip_mask); + intel_ring_emit(ring, MI_NOOP); + intel_ring_advance(ring); } } /* Exec the batchbuffer */ - ret = ring->dispatch_gem_execbuffer(dev, ring, args, - cliprects, exec_offset); + ret = ring->dispatch_execbuffer(ring, args, cliprects, exec_offset); if (ret) { DRM_ERROR("dispatch failed %d\n", ret); goto err; @@ -4520,9 +4515,9 @@ i915_gem_init_ringbuffer(struct drm_device *dev) return 0; cleanup_bsd_ring: - intel_cleanup_ring_buffer(dev, &dev_priv->bsd_ring); + intel_cleanup_ring_buffer(&dev_priv->bsd_ring); cleanup_render_ring: - intel_cleanup_ring_buffer(dev, &dev_priv->render_ring); + intel_cleanup_ring_buffer(&dev_priv->render_ring); cleanup_pipe_control: if (HAS_PIPE_CONTROL(dev)) i915_gem_cleanup_pipe_control(dev); @@ -4534,9 +4529,9 @@ i915_gem_cleanup_ringbuffer(struct drm_device *dev) { drm_i915_private_t *dev_priv = dev->dev_private; - intel_cleanup_ring_buffer(dev, &dev_priv->render_ring); - intel_cleanup_ring_buffer(dev, &dev_priv->bsd_ring); - intel_cleanup_ring_buffer(dev, &dev_priv->blt_ring); + intel_cleanup_ring_buffer(&dev_priv->render_ring); + intel_cleanup_ring_buffer(&dev_priv->bsd_ring); + intel_cleanup_ring_buffer(&dev_priv->blt_ring); if (HAS_PIPE_CONTROL(dev)) i915_gem_cleanup_pipe_control(dev); } diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c index 729fd0c91d7b..852a2d848bf4 100644 --- a/drivers/gpu/drm/i915/i915_irq.c +++ b/drivers/gpu/drm/i915/i915_irq.c @@ -297,7 +297,7 @@ static void notify_ring(struct drm_device *dev, struct intel_ring_buffer *ring) { struct drm_i915_private *dev_priv = dev->dev_private; - u32 seqno = ring->get_seqno(dev, ring); + u32 seqno = ring->get_seqno(ring); ring->irq_gem_seqno = seqno; trace_i915_gem_request_complete(dev, seqno); wake_up_all(&ring->irq_queue); @@ -586,7 +586,7 @@ static void i915_capture_error_state(struct drm_device *dev) DRM_DEBUG_DRIVER("generating error event\n"); error->seqno = - dev_priv->render_ring.get_seqno(dev, &dev_priv->render_ring); + dev_priv->render_ring.get_seqno(&dev_priv->render_ring); error->eir = I915_READ(EIR); error->pgtbl_er = I915_READ(PGTBL_ER); error->pipeastat = I915_READ(PIPEASTAT); @@ -1117,7 +1117,7 @@ void i915_trace_irq_get(struct drm_device *dev, u32 seqno) struct intel_ring_buffer *render_ring = &dev_priv->render_ring; if (dev_priv->trace_irq_seqno == 0) - render_ring->user_irq_get(dev, render_ring); + render_ring->user_irq_get(render_ring); dev_priv->trace_irq_seqno = seqno; } @@ -1141,10 +1141,10 @@ static int i915_wait_irq(struct drm_device * dev, int irq_nr) if (master_priv->sarea_priv) master_priv->sarea_priv->perf_boxes |= I915_BOX_WAIT; - render_ring->user_irq_get(dev, render_ring); + render_ring->user_irq_get(render_ring); DRM_WAIT_ON(ret, dev_priv->render_ring.irq_queue, 3 * DRM_HZ, READ_BREADCRUMB(dev_priv) >= irq_nr); - render_ring->user_irq_put(dev, render_ring); + render_ring->user_irq_put(render_ring); if (ret == -EBUSY) { DRM_ERROR("EBUSY -- rec: %d emitted: %d\n", @@ -1338,7 +1338,7 @@ void i915_hangcheck_elapsed(unsigned long data) /* If all work is done then ACTHD clearly hasn't advanced. */ if (list_empty(&dev_priv->render_ring.request_list) || - i915_seqno_passed(dev_priv->render_ring.get_seqno(dev, &dev_priv->render_ring), + i915_seqno_passed(dev_priv->render_ring.get_seqno(&dev_priv->render_ring), i915_get_tail_request(dev)->seqno)) { bool missed_wakeup = false; diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c index 09f2dc353ae2..d6eba661105f 100644 --- a/drivers/gpu/drm/i915/intel_ringbuffer.c +++ b/drivers/gpu/drm/i915/intel_ringbuffer.c @@ -49,11 +49,11 @@ static u32 i915_gem_get_seqno(struct drm_device *dev) } static void -render_ring_flush(struct drm_device *dev, - struct intel_ring_buffer *ring, +render_ring_flush(struct intel_ring_buffer *ring, u32 invalidate_domains, u32 flush_domains) { + struct drm_device *dev = ring->dev; drm_i915_private_t *dev_priv = dev->dev_private; u32 cmd; @@ -112,43 +112,39 @@ render_ring_flush(struct drm_device *dev, #if WATCH_EXEC DRM_INFO("%s: queue flush %08x to ring\n", __func__, cmd); #endif - intel_ring_begin(dev, ring, 2); - intel_ring_emit(dev, ring, cmd); - intel_ring_emit(dev, ring, MI_NOOP); - intel_ring_advance(dev, ring); + intel_ring_begin(ring, 2); + intel_ring_emit(ring, cmd); + intel_ring_emit(ring, MI_NOOP); + intel_ring_advance(ring); } } -static void ring_write_tail(struct drm_device *dev, - struct intel_ring_buffer *ring, +static void ring_write_tail(struct intel_ring_buffer *ring, u32 value) { - drm_i915_private_t *dev_priv = dev->dev_private; + drm_i915_private_t *dev_priv = ring->dev->dev_private; I915_WRITE_TAIL(ring, value); } -u32 intel_ring_get_active_head(struct drm_device *dev, - struct intel_ring_buffer *ring) +u32 intel_ring_get_active_head(struct intel_ring_buffer *ring) { - drm_i915_private_t *dev_priv = dev->dev_private; - u32 acthd_reg = INTEL_INFO(dev)->gen >= 4 ? + drm_i915_private_t *dev_priv = ring->dev->dev_private; + u32 acthd_reg = INTEL_INFO(ring->dev)->gen >= 4 ? RING_ACTHD(ring->mmio_base) : ACTHD; return I915_READ(acthd_reg); } -static int init_ring_common(struct drm_device *dev, - struct intel_ring_buffer *ring) +static int init_ring_common(struct intel_ring_buffer *ring) { + drm_i915_private_t *dev_priv = ring->dev->dev_private; + struct drm_i915_gem_object *obj_priv = to_intel_bo(ring->gem_object); u32 head; - drm_i915_private_t *dev_priv = dev->dev_private; - struct drm_i915_gem_object *obj_priv; - obj_priv = to_intel_bo(ring->gem_object); /* Stop the ring if it's running. */ I915_WRITE_CTL(ring, 0); I915_WRITE_HEAD(ring, 0); - ring->write_tail(dev, ring, 0); + ring->write_tail(ring, 0); /* Initialize the ring. */ I915_WRITE_START(ring, obj_priv->gtt_offset); @@ -192,8 +188,8 @@ static int init_ring_common(struct drm_device *dev, return -EIO; } - if (!drm_core_check_feature(dev, DRIVER_MODESET)) - i915_kernel_lost_context(dev); + if (!drm_core_check_feature(ring->dev, DRIVER_MODESET)) + i915_kernel_lost_context(ring->dev); else { ring->head = I915_READ_HEAD(ring) & HEAD_ADDR; ring->tail = I915_READ_TAIL(ring) & TAIL_ADDR; @@ -204,29 +200,29 @@ static int init_ring_common(struct drm_device *dev, return 0; } -static int init_render_ring(struct drm_device *dev, - struct intel_ring_buffer *ring) +static int init_render_ring(struct intel_ring_buffer *ring) { - drm_i915_private_t *dev_priv = dev->dev_private; - int ret = init_ring_common(dev, ring); - int mode; + struct drm_device *dev = ring->dev; + int ret = init_ring_common(ring); if (INTEL_INFO(dev)->gen > 3) { - mode = VS_TIMER_DISPATCH << 16 | VS_TIMER_DISPATCH; + drm_i915_private_t *dev_priv = dev->dev_private; + int mode = VS_TIMER_DISPATCH << 16 | VS_TIMER_DISPATCH; if (IS_GEN6(dev)) mode |= MI_FLUSH_ENABLE << 16 | MI_FLUSH_ENABLE; I915_WRITE(MI_MODE, mode); } + return ret; } -#define PIPE_CONTROL_FLUSH(addr) \ +#define PIPE_CONTROL_FLUSH(ring__, addr__) \ do { \ - OUT_RING(GFX_OP_PIPE_CONTROL | PIPE_CONTROL_QW_WRITE | \ + intel_ring_emit(ring__, GFX_OP_PIPE_CONTROL | PIPE_CONTROL_QW_WRITE | \ PIPE_CONTROL_DEPTH_STALL | 2); \ - OUT_RING(addr | PIPE_CONTROL_GLOBAL_GTT); \ - OUT_RING(0); \ - OUT_RING(0); \ + intel_ring_emit(ring__, (addr__) | PIPE_CONTROL_GLOBAL_GTT); \ + intel_ring_emit(ring__, 0); \ + intel_ring_emit(ring__, 0); \ } while (0) /** @@ -238,26 +234,26 @@ do { \ * Returned sequence numbers are nonzero on success. */ static u32 -render_ring_add_request(struct drm_device *dev, - struct intel_ring_buffer *ring, +render_ring_add_request(struct intel_ring_buffer *ring, u32 flush_domains) { + struct drm_device *dev = ring->dev; drm_i915_private_t *dev_priv = dev->dev_private; u32 seqno; seqno = i915_gem_get_seqno(dev); if (IS_GEN6(dev)) { - BEGIN_LP_RING(6); - OUT_RING(GFX_OP_PIPE_CONTROL | 3); - OUT_RING(PIPE_CONTROL_QW_WRITE | + intel_ring_begin(ring, 6); + intel_ring_emit(ring, GFX_OP_PIPE_CONTROL | 3); + intel_ring_emit(ring, PIPE_CONTROL_QW_WRITE | PIPE_CONTROL_WC_FLUSH | PIPE_CONTROL_IS_FLUSH | PIPE_CONTROL_NOTIFY); - OUT_RING(dev_priv->seqno_gfx_addr | PIPE_CONTROL_GLOBAL_GTT); - OUT_RING(seqno); - OUT_RING(0); - OUT_RING(0); - ADVANCE_LP_RING(); + intel_ring_emit(ring, dev_priv->seqno_gfx_addr | PIPE_CONTROL_GLOBAL_GTT); + intel_ring_emit(ring, seqno); + intel_ring_emit(ring, 0); + intel_ring_emit(ring, 0); + intel_ring_advance(ring); } else if (HAS_PIPE_CONTROL(dev)) { u32 scratch_addr = dev_priv->seqno_gfx_addr + 128; @@ -266,46 +262,46 @@ render_ring_add_request(struct drm_device *dev, * PIPE_NOTIFY buffers out to memory before requesting * an interrupt. */ - BEGIN_LP_RING(32); - OUT_RING(GFX_OP_PIPE_CONTROL | PIPE_CONTROL_QW_WRITE | + intel_ring_begin(ring, 32); + intel_ring_emit(ring, GFX_OP_PIPE_CONTROL | PIPE_CONTROL_QW_WRITE | PIPE_CONTROL_WC_FLUSH | PIPE_CONTROL_TC_FLUSH); - OUT_RING(dev_priv->seqno_gfx_addr | PIPE_CONTROL_GLOBAL_GTT); - OUT_RING(seqno); - OUT_RING(0); - PIPE_CONTROL_FLUSH(scratch_addr); + intel_ring_emit(ring, dev_priv->seqno_gfx_addr | PIPE_CONTROL_GLOBAL_GTT); + intel_ring_emit(ring, seqno); + intel_ring_emit(ring, 0); + PIPE_CONTROL_FLUSH(ring, scratch_addr); scratch_addr += 128; /* write to separate cachelines */ - PIPE_CONTROL_FLUSH(scratch_addr); + PIPE_CONTROL_FLUSH(ring, scratch_addr); scratch_addr += 128; - PIPE_CONTROL_FLUSH(scratch_addr); + PIPE_CONTROL_FLUSH(ring, scratch_addr); scratch_addr += 128; - PIPE_CONTROL_FLUSH(scratch_addr); + PIPE_CONTROL_FLUSH(ring, scratch_addr); scratch_addr += 128; - PIPE_CONTROL_FLUSH(scratch_addr); + PIPE_CONTROL_FLUSH(ring, scratch_addr); scratch_addr += 128; - PIPE_CONTROL_FLUSH(scratch_addr); - OUT_RING(GFX_OP_PIPE_CONTROL | PIPE_CONTROL_QW_WRITE | + PIPE_CONTROL_FLUSH(ring, scratch_addr); + intel_ring_emit(ring, GFX_OP_PIPE_CONTROL | PIPE_CONTROL_QW_WRITE | PIPE_CONTROL_WC_FLUSH | PIPE_CONTROL_TC_FLUSH | PIPE_CONTROL_NOTIFY); - OUT_RING(dev_priv->seqno_gfx_addr | PIPE_CONTROL_GLOBAL_GTT); - OUT_RING(seqno); - OUT_RING(0); - ADVANCE_LP_RING(); + intel_ring_emit(ring, dev_priv->seqno_gfx_addr | PIPE_CONTROL_GLOBAL_GTT); + intel_ring_emit(ring, seqno); + intel_ring_emit(ring, 0); + intel_ring_advance(ring); } else { - BEGIN_LP_RING(4); - OUT_RING(MI_STORE_DWORD_INDEX); - OUT_RING(I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT); - OUT_RING(seqno); + intel_ring_begin(ring, 4); + intel_ring_emit(ring, MI_STORE_DWORD_INDEX); + intel_ring_emit(ring, I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT); + intel_ring_emit(ring, seqno); - OUT_RING(MI_USER_INTERRUPT); - ADVANCE_LP_RING(); + intel_ring_emit(ring, MI_USER_INTERRUPT); + intel_ring_advance(ring); } return seqno; } static u32 -render_ring_get_seqno(struct drm_device *dev, - struct intel_ring_buffer *ring) +render_ring_get_seqno(struct intel_ring_buffer *ring) { + struct drm_device *dev = ring->dev; drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; if (HAS_PIPE_CONTROL(dev)) return ((volatile u32 *)(dev_priv->seqno_page))[0]; @@ -314,9 +310,9 @@ render_ring_get_seqno(struct drm_device *dev, } static void -render_ring_get_user_irq(struct drm_device *dev, - struct intel_ring_buffer *ring) +render_ring_get_user_irq(struct intel_ring_buffer *ring) { + struct drm_device *dev = ring->dev; drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; unsigned long irqflags; @@ -331,9 +327,9 @@ render_ring_get_user_irq(struct drm_device *dev, } static void -render_ring_put_user_irq(struct drm_device *dev, - struct intel_ring_buffer *ring) +render_ring_put_user_irq(struct intel_ring_buffer *ring) { + struct drm_device *dev = ring->dev; drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; unsigned long irqflags; @@ -348,56 +344,41 @@ render_ring_put_user_irq(struct drm_device *dev, spin_unlock_irqrestore(&dev_priv->user_irq_lock, irqflags); } -void intel_ring_setup_status_page(struct drm_device *dev, - struct intel_ring_buffer *ring) +void intel_ring_setup_status_page(struct intel_ring_buffer *ring) { - drm_i915_private_t *dev_priv = dev->dev_private; - if (IS_GEN6(dev)) { - I915_WRITE(RING_HWS_PGA_GEN6(ring->mmio_base), - ring->status_page.gfx_addr); - I915_READ(RING_HWS_PGA_GEN6(ring->mmio_base)); /* posting read */ - } else { - I915_WRITE(RING_HWS_PGA(ring->mmio_base), - ring->status_page.gfx_addr); - I915_READ(RING_HWS_PGA(ring->mmio_base)); /* posting read */ - } - + drm_i915_private_t *dev_priv = ring->dev->dev_private; + u32 mmio = IS_GEN6(ring->dev) ? + RING_HWS_PGA_GEN6(ring->mmio_base) : + RING_HWS_PGA(ring->mmio_base); + I915_WRITE(mmio, (u32)ring->status_page.gfx_addr); + POSTING_READ(mmio); } static void -bsd_ring_flush(struct drm_device *dev, - struct intel_ring_buffer *ring, - u32 invalidate_domains, - u32 flush_domains) -{ - intel_ring_begin(dev, ring, 2); - intel_ring_emit(dev, ring, MI_FLUSH); - intel_ring_emit(dev, ring, MI_NOOP); - intel_ring_advance(dev, ring); -} - -static int init_bsd_ring(struct drm_device *dev, - struct intel_ring_buffer *ring) +bsd_ring_flush(struct intel_ring_buffer *ring, + u32 invalidate_domains, + u32 flush_domains) { - return init_ring_common(dev, ring); + intel_ring_begin(ring, 2); + intel_ring_emit(ring, MI_FLUSH); + intel_ring_emit(ring, MI_NOOP); + intel_ring_advance(ring); } static u32 -ring_add_request(struct drm_device *dev, - struct intel_ring_buffer *ring, +ring_add_request(struct intel_ring_buffer *ring, u32 flush_domains) { u32 seqno; - seqno = i915_gem_get_seqno(dev); + seqno = i915_gem_get_seqno(ring->dev); - intel_ring_begin(dev, ring, 4); - intel_ring_emit(dev, ring, MI_STORE_DWORD_INDEX); - intel_ring_emit(dev, ring, - I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT); - intel_ring_emit(dev, ring, seqno); - intel_ring_emit(dev, ring, MI_USER_INTERRUPT); - intel_ring_advance(dev, ring); + intel_ring_begin(ring, 4); + intel_ring_emit(ring, MI_STORE_DWORD_INDEX); + intel_ring_emit(ring, I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT); + intel_ring_emit(ring, seqno); + intel_ring_emit(ring, MI_USER_INTERRUPT); + intel_ring_advance(ring); DRM_DEBUG_DRIVER("%s %d\n", ring->name, seqno); @@ -405,53 +386,55 @@ ring_add_request(struct drm_device *dev, } static void -bsd_ring_get_user_irq(struct drm_device *dev, - struct intel_ring_buffer *ring) +bsd_ring_get_user_irq(struct intel_ring_buffer *ring) { /* do nothing */ } static void -bsd_ring_put_user_irq(struct drm_device *dev, - struct intel_ring_buffer *ring) +bsd_ring_put_user_irq(struct intel_ring_buffer *ring) { /* do nothing */ } static u32 -ring_status_page_get_seqno(struct drm_device *dev, - struct intel_ring_buffer *ring) +ring_status_page_get_seqno(struct intel_ring_buffer *ring) { return intel_read_status_page(ring, I915_GEM_HWS_INDEX); } static int -ring_dispatch_gem_execbuffer(struct drm_device *dev, - struct intel_ring_buffer *ring, - struct drm_i915_gem_execbuffer2 *exec, - struct drm_clip_rect *cliprects, - uint64_t exec_offset) +ring_dispatch_execbuffer(struct intel_ring_buffer *ring, + struct drm_i915_gem_execbuffer2 *exec, + struct drm_clip_rect *cliprects, + uint64_t exec_offset) { uint32_t exec_start; + exec_start = (uint32_t) exec_offset + exec->batch_start_offset; - intel_ring_begin(dev, ring, 2); - intel_ring_emit(dev, ring, MI_BATCH_BUFFER_START | - (2 << 6) | MI_BATCH_NON_SECURE_I965); - intel_ring_emit(dev, ring, exec_start); - intel_ring_advance(dev, ring); + + intel_ring_begin(ring, 2); + intel_ring_emit(ring, + MI_BATCH_BUFFER_START | + (2 << 6) | + MI_BATCH_NON_SECURE_I965); + intel_ring_emit(ring, exec_start); + intel_ring_advance(ring); + return 0; } static int -render_ring_dispatch_gem_execbuffer(struct drm_device *dev, - struct intel_ring_buffer *ring, - struct drm_i915_gem_execbuffer2 *exec, - struct drm_clip_rect *cliprects, - uint64_t exec_offset) +render_ring_dispatch_execbuffer(struct intel_ring_buffer *ring, + struct drm_i915_gem_execbuffer2 *exec, + struct drm_clip_rect *cliprects, + uint64_t exec_offset) { + struct drm_device *dev = ring->dev; drm_i915_private_t *dev_priv = dev->dev_private; int nbox = exec->num_cliprects; int i = 0, count; uint32_t exec_start, exec_len; + exec_start = (uint32_t) exec_offset + exec->batch_start_offset; exec_len = (uint32_t) exec->batch_len; @@ -468,46 +451,44 @@ render_ring_dispatch_gem_execbuffer(struct drm_device *dev, } if (IS_I830(dev) || IS_845G(dev)) { - intel_ring_begin(dev, ring, 4); - intel_ring_emit(dev, ring, MI_BATCH_BUFFER); - intel_ring_emit(dev, ring, - exec_start | MI_BATCH_NON_SECURE); - intel_ring_emit(dev, ring, exec_start + exec_len - 4); - intel_ring_emit(dev, ring, 0); + intel_ring_begin(ring, 4); + intel_ring_emit(ring, MI_BATCH_BUFFER); + intel_ring_emit(ring, exec_start | MI_BATCH_NON_SECURE); + intel_ring_emit(ring, exec_start + exec_len - 4); + intel_ring_emit(ring, 0); } else { - intel_ring_begin(dev, ring, 2); + intel_ring_begin(ring, 2); if (INTEL_INFO(dev)->gen >= 4) { - intel_ring_emit(dev, ring, + intel_ring_emit(ring, MI_BATCH_BUFFER_START | (2 << 6) | MI_BATCH_NON_SECURE_I965); - intel_ring_emit(dev, ring, exec_start); + intel_ring_emit(ring, exec_start); } else { - intel_ring_emit(dev, ring, MI_BATCH_BUFFER_START + intel_ring_emit(ring, MI_BATCH_BUFFER_START | (2 << 6)); - intel_ring_emit(dev, ring, exec_start | + intel_ring_emit(ring, exec_start | MI_BATCH_NON_SECURE); } } - intel_ring_advance(dev, ring); + intel_ring_advance(ring); } if (IS_G4X(dev) || IS_GEN5(dev)) { - intel_ring_begin(dev, ring, 2); - intel_ring_emit(dev, ring, MI_FLUSH | + intel_ring_begin(ring, 2); + intel_ring_emit(ring, MI_FLUSH | MI_NO_WRITE_FLUSH | MI_INVALIDATE_ISP ); - intel_ring_emit(dev, ring, MI_NOOP); - intel_ring_advance(dev, ring); + intel_ring_emit(ring, MI_NOOP); + intel_ring_advance(ring); } /* XXX breadcrumb */ return 0; } -static void cleanup_status_page(struct drm_device *dev, - struct intel_ring_buffer *ring) +static void cleanup_status_page(struct intel_ring_buffer *ring) { - drm_i915_private_t *dev_priv = dev->dev_private; + drm_i915_private_t *dev_priv = ring->dev->dev_private; struct drm_gem_object *obj; struct drm_i915_gem_object *obj_priv; @@ -524,9 +505,9 @@ static void cleanup_status_page(struct drm_device *dev, memset(&dev_priv->hws_map, 0, sizeof(dev_priv->hws_map)); } -static int init_status_page(struct drm_device *dev, - struct intel_ring_buffer *ring) +static int init_status_page(struct intel_ring_buffer *ring) { + struct drm_device *dev = ring->dev; drm_i915_private_t *dev_priv = dev->dev_private; struct drm_gem_object *obj; struct drm_i915_gem_object *obj_priv; @@ -555,7 +536,7 @@ static int init_status_page(struct drm_device *dev, ring->status_page.obj = obj; memset(ring->status_page.page_addr, 0, PAGE_SIZE); - intel_ring_setup_status_page(dev, ring); + intel_ring_setup_status_page(ring); DRM_DEBUG_DRIVER("%s hws offset: 0x%08x\n", ring->name, ring->status_page.gfx_addr); @@ -583,7 +564,7 @@ int intel_init_ring_buffer(struct drm_device *dev, INIT_LIST_HEAD(&ring->gpu_write_list); if (I915_NEED_GFX_HWS(dev)) { - ret = init_status_page(dev, ring); + ret = init_status_page(ring); if (ret) return ret; } @@ -616,7 +597,7 @@ int intel_init_ring_buffer(struct drm_device *dev, } ring->virtual_start = ring->map.handle; - ret = ring->init(dev, ring); + ret = ring->init(ring); if (ret) goto err_unmap; @@ -639,33 +620,32 @@ err_unref: drm_gem_object_unreference(obj); ring->gem_object = NULL; err_hws: - cleanup_status_page(dev, ring); + cleanup_status_page(ring); return ret; } -void intel_cleanup_ring_buffer(struct drm_device *dev, - struct intel_ring_buffer *ring) +void intel_cleanup_ring_buffer(struct intel_ring_buffer *ring) { if (ring->gem_object == NULL) return; - drm_core_ioremapfree(&ring->map, dev); + drm_core_ioremapfree(&ring->map, ring->dev); i915_gem_object_unpin(ring->gem_object); drm_gem_object_unreference(ring->gem_object); ring->gem_object = NULL; - cleanup_status_page(dev, ring); + + cleanup_status_page(ring); } -static int intel_wrap_ring_buffer(struct drm_device *dev, - struct intel_ring_buffer *ring) +static int intel_wrap_ring_buffer(struct intel_ring_buffer *ring) { unsigned int *virt; int rem; rem = ring->size - ring->tail; if (ring->space < rem) { - int ret = intel_wait_ring_buffer(dev, ring, rem); + int ret = intel_wait_ring_buffer(ring, rem); if (ret) return ret; } @@ -683,11 +663,11 @@ static int intel_wrap_ring_buffer(struct drm_device *dev, return 0; } -int intel_wait_ring_buffer(struct drm_device *dev, - struct intel_ring_buffer *ring, int n) +int intel_wait_ring_buffer(struct intel_ring_buffer *ring, int n) { - unsigned long end; + struct drm_device *dev = ring->dev; drm_i915_private_t *dev_priv = dev->dev_private; + unsigned long end; trace_i915_ring_wait_begin (dev); end = jiffies + 3 * HZ; @@ -697,7 +677,7 @@ int intel_wait_ring_buffer(struct drm_device *dev, if (ring->space < 0) ring->space += ring->size; if (ring->space >= n) { - trace_i915_ring_wait_end (dev); + trace_i915_ring_wait_end(dev); return 0; } @@ -713,24 +693,24 @@ int intel_wait_ring_buffer(struct drm_device *dev, return -EBUSY; } -void intel_ring_begin(struct drm_device *dev, - struct intel_ring_buffer *ring, +void intel_ring_begin(struct intel_ring_buffer *ring, int num_dwords) { int n = 4*num_dwords; + if (unlikely(ring->tail + n > ring->size)) - intel_wrap_ring_buffer(dev, ring); + intel_wrap_ring_buffer(ring); + if (unlikely(ring->space < n)) - intel_wait_ring_buffer(dev, ring, n); + intel_wait_ring_buffer(ring, n); ring->space -= n; } -void intel_ring_advance(struct drm_device *dev, - struct intel_ring_buffer *ring) +void intel_ring_advance(struct intel_ring_buffer *ring) { ring->tail &= ring->size - 1; - ring->write_tail(dev, ring, ring->tail); + ring->write_tail(ring, ring->tail); } static const struct intel_ring_buffer render_ring = { @@ -745,7 +725,7 @@ static const struct intel_ring_buffer render_ring = { .get_seqno = render_ring_get_seqno, .user_irq_get = render_ring_get_user_irq, .user_irq_put = render_ring_put_user_irq, - .dispatch_gem_execbuffer = render_ring_dispatch_gem_execbuffer, + .dispatch_execbuffer = render_ring_dispatch_execbuffer, }; /* ring buffer for bit-stream decoder */ @@ -755,22 +735,21 @@ static const struct intel_ring_buffer bsd_ring = { .id = RING_BSD, .mmio_base = BSD_RING_BASE, .size = 32 * PAGE_SIZE, - .init = init_bsd_ring, + .init = init_ring_common, .write_tail = ring_write_tail, .flush = bsd_ring_flush, .add_request = ring_add_request, .get_seqno = ring_status_page_get_seqno, .user_irq_get = bsd_ring_get_user_irq, .user_irq_put = bsd_ring_put_user_irq, - .dispatch_gem_execbuffer = ring_dispatch_gem_execbuffer, + .dispatch_execbuffer = ring_dispatch_execbuffer, }; -static void gen6_bsd_ring_write_tail(struct drm_device *dev, - struct intel_ring_buffer *ring, +static void gen6_bsd_ring_write_tail(struct intel_ring_buffer *ring, u32 value) { - drm_i915_private_t *dev_priv = dev->dev_private; + drm_i915_private_t *dev_priv = ring->dev->dev_private; /* Every tail move must follow the sequence below */ I915_WRITE(GEN6_BSD_SLEEP_PSMI_CONTROL, @@ -789,36 +768,33 @@ static void gen6_bsd_ring_write_tail(struct drm_device *dev, GEN6_BSD_SLEEP_PSMI_CONTROL_RC_ILDL_MESSAGE_ENABLE); } -static void gen6_ring_flush(struct drm_device *dev, - struct intel_ring_buffer *ring, +static void gen6_ring_flush(struct intel_ring_buffer *ring, u32 invalidate_domains, u32 flush_domains) { - intel_ring_begin(dev, ring, 4); - intel_ring_emit(dev, ring, MI_FLUSH_DW); - intel_ring_emit(dev, ring, 0); - intel_ring_emit(dev, ring, 0); - intel_ring_emit(dev, ring, 0); - intel_ring_advance(dev, ring); + intel_ring_begin(ring, 4); + intel_ring_emit(ring, MI_FLUSH_DW); + intel_ring_emit(ring, 0); + intel_ring_emit(ring, 0); + intel_ring_emit(ring, 0); + intel_ring_advance(ring); } static int -gen6_ring_dispatch_gem_execbuffer(struct drm_device *dev, - struct intel_ring_buffer *ring, - struct drm_i915_gem_execbuffer2 *exec, - struct drm_clip_rect *cliprects, - uint64_t exec_offset) +gen6_ring_dispatch_execbuffer(struct intel_ring_buffer *ring, + struct drm_i915_gem_execbuffer2 *exec, + struct drm_clip_rect *cliprects, + uint64_t exec_offset) { uint32_t exec_start; exec_start = (uint32_t) exec_offset + exec->batch_start_offset; - intel_ring_begin(dev, ring, 2); - intel_ring_emit(dev, ring, - MI_BATCH_BUFFER_START | MI_BATCH_NON_SECURE_I965); + intel_ring_begin(ring, 2); + intel_ring_emit(ring, MI_BATCH_BUFFER_START | MI_BATCH_NON_SECURE_I965); /* bit0-7 is the length on GEN6+ */ - intel_ring_emit(dev, ring, exec_start); - intel_ring_advance(dev, ring); + intel_ring_emit(ring, exec_start); + intel_ring_advance(ring); return 0; } @@ -829,27 +805,25 @@ static const struct intel_ring_buffer gen6_bsd_ring = { .id = RING_BSD, .mmio_base = GEN6_BSD_RING_BASE, .size = 32 * PAGE_SIZE, - .init = init_bsd_ring, + .init = init_ring_common, .write_tail = gen6_bsd_ring_write_tail, .flush = gen6_ring_flush, .add_request = ring_add_request, .get_seqno = ring_status_page_get_seqno, .user_irq_get = bsd_ring_get_user_irq, .user_irq_put = bsd_ring_put_user_irq, - .dispatch_gem_execbuffer = gen6_ring_dispatch_gem_execbuffer, + .dispatch_execbuffer = gen6_ring_dispatch_execbuffer, }; /* Blitter support (SandyBridge+) */ static void -blt_ring_get_user_irq(struct drm_device *dev, - struct intel_ring_buffer *ring) +blt_ring_get_user_irq(struct intel_ring_buffer *ring) { /* do nothing */ } static void -blt_ring_put_user_irq(struct drm_device *dev, - struct intel_ring_buffer *ring) +blt_ring_put_user_irq(struct intel_ring_buffer *ring) { /* do nothing */ } @@ -866,7 +840,7 @@ static const struct intel_ring_buffer gen6_blt_ring = { .get_seqno = ring_status_page_get_seqno, .user_irq_get = blt_ring_get_user_irq, .user_irq_put = blt_ring_put_user_irq, - .dispatch_gem_execbuffer = gen6_ring_dispatch_gem_execbuffer, + .dispatch_execbuffer = gen6_ring_dispatch_execbuffer, }; int intel_init_render_ring_buffer(struct drm_device *dev) diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.h b/drivers/gpu/drm/i915/intel_ringbuffer.h index a05aff0e5764..ba4a393e6d16 100644 --- a/drivers/gpu/drm/i915/intel_ringbuffer.h +++ b/drivers/gpu/drm/i915/intel_ringbuffer.h @@ -2,7 +2,7 @@ #define _INTEL_RINGBUFFER_H_ struct intel_hw_status_page { - void *page_addr; + u32 __iomem *page_addr; unsigned int gfx_addr; struct drm_gem_object *obj; }; @@ -38,31 +38,23 @@ struct intel_ring_buffer { u32 irq_gem_seqno; /* last seq seem at irq time */ u32 waiting_gem_seqno; int user_irq_refcount; - void (*user_irq_get)(struct drm_device *dev, - struct intel_ring_buffer *ring); - void (*user_irq_put)(struct drm_device *dev, - struct intel_ring_buffer *ring); + void (*user_irq_get)(struct intel_ring_buffer *ring); + void (*user_irq_put)(struct intel_ring_buffer *ring); - int (*init)(struct drm_device *dev, - struct intel_ring_buffer *ring); + int (*init)(struct intel_ring_buffer *ring); - void (*write_tail)(struct drm_device *dev, - struct intel_ring_buffer *ring, + void (*write_tail)(struct intel_ring_buffer *ring, u32 value); - void (*flush)(struct drm_device *dev, - struct intel_ring_buffer *ring, - u32 invalidate_domains, - u32 flush_domains); - u32 (*add_request)(struct drm_device *dev, - struct intel_ring_buffer *ring, - u32 flush_domains); - u32 (*get_seqno)(struct drm_device *dev, - struct intel_ring_buffer *ring); - int (*dispatch_gem_execbuffer)(struct drm_device *dev, - struct intel_ring_buffer *ring, - struct drm_i915_gem_execbuffer2 *exec, - struct drm_clip_rect *cliprects, - uint64_t exec_offset); + void (*flush)(struct intel_ring_buffer *ring, + u32 invalidate_domains, + u32 flush_domains); + u32 (*add_request)(struct intel_ring_buffer *ring, + u32 flush_domains); + u32 (*get_seqno)(struct intel_ring_buffer *ring); + int (*dispatch_execbuffer)(struct intel_ring_buffer *ring, + struct drm_i915_gem_execbuffer2 *exec, + struct drm_clip_rect *cliprects, + uint64_t exec_offset); /** * List of objects currently involved in rendering from the @@ -102,43 +94,31 @@ struct intel_ring_buffer { static inline u32 intel_read_status_page(struct intel_ring_buffer *ring, - int reg) + int reg) { - u32 *regs = ring->status_page.page_addr; - return regs[reg]; + return ioread32(ring->status_page.page_addr + reg); } -int intel_init_ring_buffer(struct drm_device *dev, - struct intel_ring_buffer *ring); -void intel_cleanup_ring_buffer(struct drm_device *dev, - struct intel_ring_buffer *ring); -int intel_wait_ring_buffer(struct drm_device *dev, - struct intel_ring_buffer *ring, int n); -void intel_ring_begin(struct drm_device *dev, - struct intel_ring_buffer *ring, int n); - -static inline void intel_ring_emit(struct drm_device *dev, - struct intel_ring_buffer *ring, - unsigned int data) +void intel_cleanup_ring_buffer(struct intel_ring_buffer *ring); +int intel_wait_ring_buffer(struct intel_ring_buffer *ring, int n); +void intel_ring_begin(struct intel_ring_buffer *ring, int n); + +static inline void intel_ring_emit(struct intel_ring_buffer *ring, + u32 data) { - unsigned int *virt = ring->virtual_start + ring->tail; - *virt = data; + iowrite32(data, ring->virtual_start + ring->tail); ring->tail += 4; } -void intel_ring_advance(struct drm_device *dev, - struct intel_ring_buffer *ring); +void intel_ring_advance(struct intel_ring_buffer *ring); -u32 intel_ring_get_seqno(struct drm_device *dev, - struct intel_ring_buffer *ring); +u32 intel_ring_get_seqno(struct intel_ring_buffer *ring); int intel_init_render_ring_buffer(struct drm_device *dev); int intel_init_bsd_ring_buffer(struct drm_device *dev); int intel_init_blt_ring_buffer(struct drm_device *dev); -u32 intel_ring_get_active_head(struct drm_device *dev, - struct intel_ring_buffer *ring); -void intel_ring_setup_status_page(struct drm_device *dev, - struct intel_ring_buffer *ring); +u32 intel_ring_get_active_head(struct intel_ring_buffer *ring); +void intel_ring_setup_status_page(struct intel_ring_buffer *ring); #endif /* _INTEL_RINGBUFFER_H_ */ -- cgit v1.2.3 From e1f99ce6cac3b6a95551642be5ddb5d9c46bea76 Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Wed, 27 Oct 2010 12:45:26 +0100 Subject: drm/i915: Propagate errors from writing to ringbuffer Preparing the ringbuffer for adding new commands can fail (a timeout whilst waiting for the GPU to catch up and free some space). So check for any potential error before overwriting HEAD with new commands, and propagate that error back to the user where possible. Signed-off-by: Chris Wilson --- drivers/gpu/drm/i915/i915_dma.c | 119 ++++++++++---------- drivers/gpu/drm/i915/i915_drv.h | 28 +---- drivers/gpu/drm/i915/i915_gem.c | 5 +- drivers/gpu/drm/i915/i915_irq.c | 13 ++- drivers/gpu/drm/i915/intel_display.c | 51 +++++---- drivers/gpu/drm/i915/intel_overlay.c | 30 ++++- drivers/gpu/drm/i915/intel_ringbuffer.c | 189 ++++++++++++++++++-------------- drivers/gpu/drm/i915/intel_ringbuffer.h | 4 +- 8 files changed, 244 insertions(+), 195 deletions(-) diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c index 8a171394a9cf..02daf4e5c8e6 100644 --- a/drivers/gpu/drm/i915/i915_dma.c +++ b/drivers/gpu/drm/i915/i915_dma.c @@ -263,7 +263,7 @@ static int i915_dma_init(struct drm_device *dev, void *data, * instruction detected will be given a size of zero, which is a * signal to abort the rest of the buffer. */ -static int do_validate_cmd(int cmd) +static int validate_cmd(int cmd) { switch (((cmd >> 29) & 0x7)) { case 0x0: @@ -321,40 +321,27 @@ static int do_validate_cmd(int cmd) return 0; } -static int validate_cmd(int cmd) -{ - int ret = do_validate_cmd(cmd); - -/* printk("validate_cmd( %x ): %d\n", cmd, ret); */ - - return ret; -} - static int i915_emit_cmds(struct drm_device * dev, int *buffer, int dwords) { drm_i915_private_t *dev_priv = dev->dev_private; - int i; + int i, ret; if ((dwords+1) * sizeof(int) >= dev_priv->render_ring.size - 8) return -EINVAL; - BEGIN_LP_RING((dwords+1)&~1); - for (i = 0; i < dwords;) { - int cmd, sz; - - cmd = buffer[i]; - - if ((sz = validate_cmd(cmd)) == 0 || i + sz > dwords) + int sz = validate_cmd(buffer[i]); + if (sz == 0 || i + sz > dwords) return -EINVAL; - - OUT_RING(cmd); - - while (++i, --sz) { - OUT_RING(buffer[i]); - } + i += sz; } + ret = BEGIN_LP_RING((dwords+1)&~1); + if (ret) + return ret; + + for (i = 0; i < dwords; i++) + OUT_RING(buffer[i]); if (dwords & 1) OUT_RING(0); @@ -368,7 +355,9 @@ i915_emit_box(struct drm_device *dev, struct drm_clip_rect *boxes, int i, int DR1, int DR4) { + struct drm_i915_private *dev_priv = dev->dev_private; struct drm_clip_rect box = boxes[i]; + int ret; if (box.y2 <= box.y1 || box.x2 <= box.x1 || box.y2 <= 0 || box.x2 <= 0) { DRM_ERROR("Bad box %d,%d..%d,%d\n", @@ -377,22 +366,27 @@ i915_emit_box(struct drm_device *dev, } if (INTEL_INFO(dev)->gen >= 4) { - BEGIN_LP_RING(4); + ret = BEGIN_LP_RING(4); + if (ret) + return ret; + OUT_RING(GFX_OP_DRAWRECT_INFO_I965); OUT_RING((box.x1 & 0xffff) | (box.y1 << 16)); OUT_RING(((box.x2 - 1) & 0xffff) | ((box.y2 - 1) << 16)); OUT_RING(DR4); - ADVANCE_LP_RING(); } else { - BEGIN_LP_RING(6); + ret = BEGIN_LP_RING(6); + if (ret) + return ret; + OUT_RING(GFX_OP_DRAWRECT_INFO); OUT_RING(DR1); OUT_RING((box.x1 & 0xffff) | (box.y1 << 16)); OUT_RING(((box.x2 - 1) & 0xffff) | ((box.y2 - 1) << 16)); OUT_RING(DR4); OUT_RING(0); - ADVANCE_LP_RING(); } + ADVANCE_LP_RING(); return 0; } @@ -412,12 +406,13 @@ static void i915_emit_breadcrumb(struct drm_device *dev) if (master_priv->sarea_priv) master_priv->sarea_priv->last_enqueue = dev_priv->counter; - BEGIN_LP_RING(4); - OUT_RING(MI_STORE_DWORD_INDEX); - OUT_RING(I915_BREADCRUMB_INDEX << MI_STORE_DWORD_INDEX_SHIFT); - OUT_RING(dev_priv->counter); - OUT_RING(0); - ADVANCE_LP_RING(); + if (BEGIN_LP_RING(4) == 0) { + OUT_RING(MI_STORE_DWORD_INDEX); + OUT_RING(I915_BREADCRUMB_INDEX << MI_STORE_DWORD_INDEX_SHIFT); + OUT_RING(dev_priv->counter); + OUT_RING(0); + ADVANCE_LP_RING(); + } } static int i915_dispatch_cmdbuffer(struct drm_device * dev, @@ -458,8 +453,9 @@ static int i915_dispatch_batchbuffer(struct drm_device * dev, drm_i915_batchbuffer_t * batch, struct drm_clip_rect *cliprects) { + struct drm_i915_private *dev_priv = dev->dev_private; int nbox = batch->num_cliprects; - int i = 0, count; + int i, count, ret; if ((batch->start | batch->used) & 0x7) { DRM_ERROR("alignment"); @@ -469,17 +465,19 @@ static int i915_dispatch_batchbuffer(struct drm_device * dev, i915_kernel_lost_context(dev); count = nbox ? nbox : 1; - for (i = 0; i < count; i++) { if (i < nbox) { - int ret = i915_emit_box(dev, cliprects, i, - batch->DR1, batch->DR4); + ret = i915_emit_box(dev, cliprects, i, + batch->DR1, batch->DR4); if (ret) return ret; } if (!IS_I830(dev) && !IS_845G(dev)) { - BEGIN_LP_RING(2); + ret = BEGIN_LP_RING(2); + if (ret) + return ret; + if (INTEL_INFO(dev)->gen >= 4) { OUT_RING(MI_BATCH_BUFFER_START | (2 << 6) | MI_BATCH_NON_SECURE_I965); OUT_RING(batch->start); @@ -487,26 +485,29 @@ static int i915_dispatch_batchbuffer(struct drm_device * dev, OUT_RING(MI_BATCH_BUFFER_START | (2 << 6)); OUT_RING(batch->start | MI_BATCH_NON_SECURE); } - ADVANCE_LP_RING(); } else { - BEGIN_LP_RING(4); + ret = BEGIN_LP_RING(4); + if (ret) + return ret; + OUT_RING(MI_BATCH_BUFFER); OUT_RING(batch->start | MI_BATCH_NON_SECURE); OUT_RING(batch->start + batch->used - 4); OUT_RING(0); - ADVANCE_LP_RING(); } + ADVANCE_LP_RING(); } if (IS_G4X(dev) || IS_GEN5(dev)) { - BEGIN_LP_RING(2); - OUT_RING(MI_FLUSH | MI_NO_WRITE_FLUSH | MI_INVALIDATE_ISP); - OUT_RING(MI_NOOP); - ADVANCE_LP_RING(); + if (BEGIN_LP_RING(2) == 0) { + OUT_RING(MI_FLUSH | MI_NO_WRITE_FLUSH | MI_INVALIDATE_ISP); + OUT_RING(MI_NOOP); + ADVANCE_LP_RING(); + } } - i915_emit_breadcrumb(dev); + i915_emit_breadcrumb(dev); return 0; } @@ -515,6 +516,7 @@ static int i915_dispatch_flip(struct drm_device * dev) drm_i915_private_t *dev_priv = dev->dev_private; struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv; + int ret; if (!master_priv->sarea_priv) return -EINVAL; @@ -526,12 +528,13 @@ static int i915_dispatch_flip(struct drm_device * dev) i915_kernel_lost_context(dev); - BEGIN_LP_RING(2); + ret = BEGIN_LP_RING(10); + if (ret) + return ret; + OUT_RING(MI_FLUSH | MI_READ_FLUSH); OUT_RING(0); - ADVANCE_LP_RING(); - BEGIN_LP_RING(6); OUT_RING(CMD_OP_DISPLAYBUFFER_INFO | ASYNC_FLIP); OUT_RING(0); if (dev_priv->current_page == 0) { @@ -542,21 +545,21 @@ static int i915_dispatch_flip(struct drm_device * dev) dev_priv->current_page = 0; } OUT_RING(0); - ADVANCE_LP_RING(); - BEGIN_LP_RING(2); OUT_RING(MI_WAIT_FOR_EVENT | MI_WAIT_FOR_PLANE_A_FLIP); OUT_RING(0); + ADVANCE_LP_RING(); master_priv->sarea_priv->last_enqueue = dev_priv->counter++; - BEGIN_LP_RING(4); - OUT_RING(MI_STORE_DWORD_INDEX); - OUT_RING(I915_BREADCRUMB_INDEX << MI_STORE_DWORD_INDEX_SHIFT); - OUT_RING(dev_priv->counter); - OUT_RING(0); - ADVANCE_LP_RING(); + if (BEGIN_LP_RING(4) == 0) { + OUT_RING(MI_STORE_DWORD_INDEX); + OUT_RING(I915_BREADCRUMB_INDEX << MI_STORE_DWORD_INDEX_SHIFT); + OUT_RING(dev_priv->counter); + OUT_RING(0); + ADVANCE_LP_RING(); + } master_priv->sarea_priv->pf_current_page = dev_priv->current_page; return 0; diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index 6fb225f6b2c8..c241468c632e 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h @@ -1216,30 +1216,14 @@ static inline void i915_write(struct drm_i915_private *dev_priv, u32 reg, #define I915_DEBUG_DISABLE_IO() (dev_priv->debug_flags &= ~(I915_DEBUG_READ | \ I915_DEBUG_WRITE)) -#define I915_VERBOSE 0 +#define BEGIN_LP_RING(n) \ + intel_ring_begin(&dev_priv->render_ring, (n)) -#define BEGIN_LP_RING(n) do { \ - drm_i915_private_t *dev_priv__ = dev->dev_private; \ - if (I915_VERBOSE) \ - DRM_DEBUG(" BEGIN_LP_RING %x\n", (int)(n)); \ - intel_ring_begin(&dev_priv__->render_ring, (n)); \ -} while (0) - - -#define OUT_RING(x) do { \ - drm_i915_private_t *dev_priv__ = dev->dev_private; \ - if (I915_VERBOSE) \ - DRM_DEBUG(" OUT_RING %x\n", (int)(x)); \ - intel_ring_emit(&dev_priv__->render_ring, x); \ -} while (0) +#define OUT_RING(x) \ + intel_ring_emit(&dev_priv->render_ring, x) -#define ADVANCE_LP_RING() do { \ - drm_i915_private_t *dev_priv__ = dev->dev_private; \ - if (I915_VERBOSE) \ - DRM_DEBUG("ADVANCE_LP_RING %x\n", \ - dev_priv__->render_ring.tail); \ - intel_ring_advance(&dev_priv__->render_ring); \ -} while(0) +#define ADVANCE_LP_RING() \ + intel_ring_advance(&dev_priv->render_ring) /** * Reads a dword out of the status page, which is written to from the command diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index 97bf7c87d857..00e901483ba5 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c @@ -3826,7 +3826,10 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data, else flip_mask = MI_WAIT_FOR_PLANE_A_FLIP; - intel_ring_begin(ring, 2); + ret = intel_ring_begin(ring, 2); + if (ret) + goto err; + intel_ring_emit(ring, MI_WAIT_FOR_EVENT | flip_mask); intel_ring_emit(ring, MI_NOOP); intel_ring_advance(ring); diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c index 852a2d848bf4..8acdd6d857d3 100644 --- a/drivers/gpu/drm/i915/i915_irq.c +++ b/drivers/gpu/drm/i915/i915_irq.c @@ -1101,12 +1101,13 @@ static int i915_emit_irq(struct drm_device * dev) if (master_priv->sarea_priv) master_priv->sarea_priv->last_enqueue = dev_priv->counter; - BEGIN_LP_RING(4); - OUT_RING(MI_STORE_DWORD_INDEX); - OUT_RING(I915_BREADCRUMB_INDEX << MI_STORE_DWORD_INDEX_SHIFT); - OUT_RING(dev_priv->counter); - OUT_RING(MI_USER_INTERRUPT); - ADVANCE_LP_RING(); + if (BEGIN_LP_RING(4) == 0) { + OUT_RING(MI_STORE_DWORD_INDEX); + OUT_RING(I915_BREADCRUMB_INDEX << MI_STORE_DWORD_INDEX_SHIFT); + OUT_RING(dev_priv->counter); + OUT_RING(MI_USER_INTERRUPT); + ADVANCE_LP_RING(); + } return dev_priv->counter; } diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c index 990f065374b2..eb4c725e3069 100644 --- a/drivers/gpu/drm/i915/intel_display.c +++ b/drivers/gpu/drm/i915/intel_display.c @@ -5090,22 +5090,16 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc, if (ret) goto cleanup_objs; - /* Block clients from rendering to the new back buffer until - * the flip occurs and the object is no longer visible. - */ - atomic_add(1 << intel_crtc->plane, - &to_intel_bo(work->old_fb_obj)->pending_flip); - - work->pending_flip_obj = obj; - obj_priv = to_intel_bo(obj); - if (IS_GEN3(dev) || IS_GEN2(dev)) { u32 flip_mask; /* Can't queue multiple flips, so wait for the previous * one to finish before executing the next. */ - BEGIN_LP_RING(2); + ret = BEGIN_LP_RING(2); + if (ret) + goto cleanup_objs; + if (intel_crtc->plane) flip_mask = MI_WAIT_FOR_PLANE_B_FLIP; else @@ -5115,13 +5109,25 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc, ADVANCE_LP_RING(); } + work->pending_flip_obj = obj; + obj_priv = to_intel_bo(obj); + work->enable_stall_check = true; /* Offset into the new buffer for cases of shared fbs between CRTCs */ offset = crtc->y * fb->pitch + crtc->x * fb->bits_per_pixel/8; - BEGIN_LP_RING(4); - switch(INTEL_INFO(dev)->gen) { + ret = BEGIN_LP_RING(4); + if (ret) + goto cleanup_objs; + + /* Block clients from rendering to the new back buffer until + * the flip occurs and the object is no longer visible. + */ + atomic_add(1 << intel_crtc->plane, + &to_intel_bo(work->old_fb_obj)->pending_flip); + + switch (INTEL_INFO(dev)->gen) { case 2: OUT_RING(MI_DISPLAY_FLIP | MI_DISPLAY_FLIP_PLANE(intel_crtc->plane)); @@ -5850,16 +5856,17 @@ void intel_init_clock_gating(struct drm_device *dev) struct drm_i915_gem_object *obj_priv; obj_priv = to_intel_bo(dev_priv->renderctx); if (obj_priv) { - BEGIN_LP_RING(4); - OUT_RING(MI_SET_CONTEXT); - OUT_RING(obj_priv->gtt_offset | - MI_MM_SPACE_GTT | - MI_SAVE_EXT_STATE_EN | - MI_RESTORE_EXT_STATE_EN | - MI_RESTORE_INHIBIT); - OUT_RING(MI_NOOP); - OUT_RING(MI_FLUSH); - ADVANCE_LP_RING(); + if (BEGIN_LP_RING(4) == 0) { + OUT_RING(MI_SET_CONTEXT); + OUT_RING(obj_priv->gtt_offset | + MI_MM_SPACE_GTT | + MI_SAVE_EXT_STATE_EN | + MI_RESTORE_EXT_STATE_EN | + MI_RESTORE_INHIBIT); + OUT_RING(MI_NOOP); + OUT_RING(MI_FLUSH); + ADVANCE_LP_RING(); + } } } else DRM_DEBUG_KMS("Failed to allocate render context." diff --git a/drivers/gpu/drm/i915/intel_overlay.c b/drivers/gpu/drm/i915/intel_overlay.c index afb96d25219a..78fa6a249964 100644 --- a/drivers/gpu/drm/i915/intel_overlay.c +++ b/drivers/gpu/drm/i915/intel_overlay.c @@ -289,6 +289,7 @@ i830_deactivate_pipe_a(struct drm_device *dev) static int intel_overlay_on(struct intel_overlay *overlay) { struct drm_device *dev = overlay->dev; + struct drm_i915_private *dev_priv = dev->dev_private; struct drm_i915_gem_request *request; int pipe_a_quirk = 0; int ret; @@ -308,7 +309,12 @@ static int intel_overlay_on(struct intel_overlay *overlay) goto out; } - BEGIN_LP_RING(4); + ret = BEGIN_LP_RING(4); + if (ret) { + kfree(request); + goto out; + } + OUT_RING(MI_OVERLAY_FLIP | MI_OVERLAY_ON); OUT_RING(overlay->flip_addr | OFC_UPDATE); OUT_RING(MI_WAIT_FOR_EVENT | MI_WAIT_FOR_OVERLAY_FLIP); @@ -332,6 +338,7 @@ static int intel_overlay_continue(struct intel_overlay *overlay, struct drm_i915_gem_request *request; u32 flip_addr = overlay->flip_addr; u32 tmp; + int ret; BUG_ON(!overlay->active); @@ -347,7 +354,11 @@ static int intel_overlay_continue(struct intel_overlay *overlay, if (tmp & (1 << 17)) DRM_DEBUG("overlay underrun, DOVSTA: %x\n", tmp); - BEGIN_LP_RING(2); + ret = BEGIN_LP_RING(2); + if (ret) { + kfree(request); + return ret; + } OUT_RING(MI_OVERLAY_FLIP | MI_OVERLAY_CONTINUE); OUT_RING(flip_addr); ADVANCE_LP_RING(); @@ -389,8 +400,10 @@ static int intel_overlay_off(struct intel_overlay *overlay, bool interruptible) { struct drm_device *dev = overlay->dev; + struct drm_i915_private *dev_priv = dev->dev_private; u32 flip_addr = overlay->flip_addr; struct drm_i915_gem_request *request; + int ret; BUG_ON(!overlay->active); @@ -404,7 +417,11 @@ static int intel_overlay_off(struct intel_overlay *overlay, * of the hw. Do it in both cases */ flip_addr |= OFC_UPDATE; - BEGIN_LP_RING(6); + ret = BEGIN_LP_RING(6); + if (ret) { + kfree(request); + return ret; + } /* wait for overlay to go idle */ OUT_RING(MI_OVERLAY_FLIP | MI_OVERLAY_CONTINUE); OUT_RING(flip_addr); @@ -467,7 +484,12 @@ static int intel_overlay_release_old_vid(struct intel_overlay *overlay) if (request == NULL) return -ENOMEM; - BEGIN_LP_RING(2); + ret = BEGIN_LP_RING(2); + if (ret) { + kfree(request); + return ret; + } + OUT_RING(MI_WAIT_FOR_EVENT | MI_WAIT_FOR_OVERLAY_FLIP); OUT_RING(MI_NOOP); ADVANCE_LP_RING(); diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c index d6eba661105f..6fe42c1f4ea9 100644 --- a/drivers/gpu/drm/i915/intel_ringbuffer.c +++ b/drivers/gpu/drm/i915/intel_ringbuffer.c @@ -112,10 +112,11 @@ render_ring_flush(struct intel_ring_buffer *ring, #if WATCH_EXEC DRM_INFO("%s: queue flush %08x to ring\n", __func__, cmd); #endif - intel_ring_begin(ring, 2); - intel_ring_emit(ring, cmd); - intel_ring_emit(ring, MI_NOOP); - intel_ring_advance(ring); + if (intel_ring_begin(ring, 2) == 0) { + intel_ring_emit(ring, cmd); + intel_ring_emit(ring, MI_NOOP); + intel_ring_advance(ring); + } } } @@ -244,16 +245,17 @@ render_ring_add_request(struct intel_ring_buffer *ring, seqno = i915_gem_get_seqno(dev); if (IS_GEN6(dev)) { - intel_ring_begin(ring, 6); - intel_ring_emit(ring, GFX_OP_PIPE_CONTROL | 3); - intel_ring_emit(ring, PIPE_CONTROL_QW_WRITE | - PIPE_CONTROL_WC_FLUSH | PIPE_CONTROL_IS_FLUSH | - PIPE_CONTROL_NOTIFY); - intel_ring_emit(ring, dev_priv->seqno_gfx_addr | PIPE_CONTROL_GLOBAL_GTT); - intel_ring_emit(ring, seqno); - intel_ring_emit(ring, 0); - intel_ring_emit(ring, 0); - intel_ring_advance(ring); + if (intel_ring_begin(ring, 6) == 0) { + intel_ring_emit(ring, GFX_OP_PIPE_CONTROL | 3); + intel_ring_emit(ring, PIPE_CONTROL_QW_WRITE | + PIPE_CONTROL_WC_FLUSH | PIPE_CONTROL_IS_FLUSH | + PIPE_CONTROL_NOTIFY); + intel_ring_emit(ring, dev_priv->seqno_gfx_addr | PIPE_CONTROL_GLOBAL_GTT); + intel_ring_emit(ring, seqno); + intel_ring_emit(ring, 0); + intel_ring_emit(ring, 0); + intel_ring_advance(ring); + } } else if (HAS_PIPE_CONTROL(dev)) { u32 scratch_addr = dev_priv->seqno_gfx_addr + 128; @@ -262,38 +264,40 @@ render_ring_add_request(struct intel_ring_buffer *ring, * PIPE_NOTIFY buffers out to memory before requesting * an interrupt. */ - intel_ring_begin(ring, 32); - intel_ring_emit(ring, GFX_OP_PIPE_CONTROL | PIPE_CONTROL_QW_WRITE | - PIPE_CONTROL_WC_FLUSH | PIPE_CONTROL_TC_FLUSH); - intel_ring_emit(ring, dev_priv->seqno_gfx_addr | PIPE_CONTROL_GLOBAL_GTT); - intel_ring_emit(ring, seqno); - intel_ring_emit(ring, 0); - PIPE_CONTROL_FLUSH(ring, scratch_addr); - scratch_addr += 128; /* write to separate cachelines */ - PIPE_CONTROL_FLUSH(ring, scratch_addr); - scratch_addr += 128; - PIPE_CONTROL_FLUSH(ring, scratch_addr); - scratch_addr += 128; - PIPE_CONTROL_FLUSH(ring, scratch_addr); - scratch_addr += 128; - PIPE_CONTROL_FLUSH(ring, scratch_addr); - scratch_addr += 128; - PIPE_CONTROL_FLUSH(ring, scratch_addr); - intel_ring_emit(ring, GFX_OP_PIPE_CONTROL | PIPE_CONTROL_QW_WRITE | - PIPE_CONTROL_WC_FLUSH | PIPE_CONTROL_TC_FLUSH | - PIPE_CONTROL_NOTIFY); - intel_ring_emit(ring, dev_priv->seqno_gfx_addr | PIPE_CONTROL_GLOBAL_GTT); - intel_ring_emit(ring, seqno); - intel_ring_emit(ring, 0); - intel_ring_advance(ring); + if (intel_ring_begin(ring, 32) == 0) { + intel_ring_emit(ring, GFX_OP_PIPE_CONTROL | PIPE_CONTROL_QW_WRITE | + PIPE_CONTROL_WC_FLUSH | PIPE_CONTROL_TC_FLUSH); + intel_ring_emit(ring, dev_priv->seqno_gfx_addr | PIPE_CONTROL_GLOBAL_GTT); + intel_ring_emit(ring, seqno); + intel_ring_emit(ring, 0); + PIPE_CONTROL_FLUSH(ring, scratch_addr); + scratch_addr += 128; /* write to separate cachelines */ + PIPE_CONTROL_FLUSH(ring, scratch_addr); + scratch_addr += 128; + PIPE_CONTROL_FLUSH(ring, scratch_addr); + scratch_addr += 128; + PIPE_CONTROL_FLUSH(ring, scratch_addr); + scratch_addr += 128; + PIPE_CONTROL_FLUSH(ring, scratch_addr); + scratch_addr += 128; + PIPE_CONTROL_FLUSH(ring, scratch_addr); + intel_ring_emit(ring, GFX_OP_PIPE_CONTROL | PIPE_CONTROL_QW_WRITE | + PIPE_CONTROL_WC_FLUSH | PIPE_CONTROL_TC_FLUSH | + PIPE_CONTROL_NOTIFY); + intel_ring_emit(ring, dev_priv->seqno_gfx_addr | PIPE_CONTROL_GLOBAL_GTT); + intel_ring_emit(ring, seqno); + intel_ring_emit(ring, 0); + intel_ring_advance(ring); + } } else { - intel_ring_begin(ring, 4); - intel_ring_emit(ring, MI_STORE_DWORD_INDEX); - intel_ring_emit(ring, I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT); - intel_ring_emit(ring, seqno); + if (intel_ring_begin(ring, 4) == 0) { + intel_ring_emit(ring, MI_STORE_DWORD_INDEX); + intel_ring_emit(ring, I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT); + intel_ring_emit(ring, seqno); - intel_ring_emit(ring, MI_USER_INTERRUPT); - intel_ring_advance(ring); + intel_ring_emit(ring, MI_USER_INTERRUPT); + intel_ring_advance(ring); + } } return seqno; } @@ -359,10 +363,11 @@ bsd_ring_flush(struct intel_ring_buffer *ring, u32 invalidate_domains, u32 flush_domains) { - intel_ring_begin(ring, 2); - intel_ring_emit(ring, MI_FLUSH); - intel_ring_emit(ring, MI_NOOP); - intel_ring_advance(ring); + if (intel_ring_begin(ring, 2) == 0) { + intel_ring_emit(ring, MI_FLUSH); + intel_ring_emit(ring, MI_NOOP); + intel_ring_advance(ring); + } } static u32 @@ -373,12 +378,13 @@ ring_add_request(struct intel_ring_buffer *ring, seqno = i915_gem_get_seqno(ring->dev); - intel_ring_begin(ring, 4); - intel_ring_emit(ring, MI_STORE_DWORD_INDEX); - intel_ring_emit(ring, I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT); - intel_ring_emit(ring, seqno); - intel_ring_emit(ring, MI_USER_INTERRUPT); - intel_ring_advance(ring); + if (intel_ring_begin(ring, 4) == 0) { + intel_ring_emit(ring, MI_STORE_DWORD_INDEX); + intel_ring_emit(ring, I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT); + intel_ring_emit(ring, seqno); + intel_ring_emit(ring, MI_USER_INTERRUPT); + intel_ring_advance(ring); + } DRM_DEBUG_DRIVER("%s %d\n", ring->name, seqno); @@ -409,10 +415,14 @@ ring_dispatch_execbuffer(struct intel_ring_buffer *ring, uint64_t exec_offset) { uint32_t exec_start; + int ret; exec_start = (uint32_t) exec_offset + exec->batch_start_offset; - intel_ring_begin(ring, 2); + ret = intel_ring_begin(ring, 2); + if (ret) + return ret; + intel_ring_emit(ring, MI_BATCH_BUFFER_START | (2 << 6) | @@ -432,8 +442,8 @@ render_ring_dispatch_execbuffer(struct intel_ring_buffer *ring, struct drm_device *dev = ring->dev; drm_i915_private_t *dev_priv = dev->dev_private; int nbox = exec->num_cliprects; - int i = 0, count; uint32_t exec_start, exec_len; + int i, count, ret; exec_start = (uint32_t) exec_offset + exec->batch_start_offset; exec_len = (uint32_t) exec->batch_len; @@ -441,23 +451,28 @@ render_ring_dispatch_execbuffer(struct intel_ring_buffer *ring, trace_i915_gem_request_submit(dev, dev_priv->next_seqno + 1); count = nbox ? nbox : 1; - for (i = 0; i < count; i++) { if (i < nbox) { - int ret = i915_emit_box(dev, cliprects, i, - exec->DR1, exec->DR4); + ret = i915_emit_box(dev, cliprects, i, + exec->DR1, exec->DR4); if (ret) return ret; } if (IS_I830(dev) || IS_845G(dev)) { - intel_ring_begin(ring, 4); + ret = intel_ring_begin(ring, 4); + if (ret) + return ret; + intel_ring_emit(ring, MI_BATCH_BUFFER); intel_ring_emit(ring, exec_start | MI_BATCH_NON_SECURE); intel_ring_emit(ring, exec_start + exec_len - 4); intel_ring_emit(ring, 0); } else { - intel_ring_begin(ring, 2); + ret = intel_ring_begin(ring, 2); + if (ret) + return ret; + if (INTEL_INFO(dev)->gen >= 4) { intel_ring_emit(ring, MI_BATCH_BUFFER_START | (2 << 6) @@ -474,12 +489,13 @@ render_ring_dispatch_execbuffer(struct intel_ring_buffer *ring, } if (IS_G4X(dev) || IS_GEN5(dev)) { - intel_ring_begin(ring, 2); - intel_ring_emit(ring, MI_FLUSH | - MI_NO_WRITE_FLUSH | - MI_INVALIDATE_ISP ); - intel_ring_emit(ring, MI_NOOP); - intel_ring_advance(ring); + if (intel_ring_begin(ring, 2) == 0) { + intel_ring_emit(ring, MI_FLUSH | + MI_NO_WRITE_FLUSH | + MI_INVALIDATE_ISP ); + intel_ring_emit(ring, MI_NOOP); + intel_ring_advance(ring); + } } /* XXX breadcrumb */ @@ -693,18 +709,26 @@ int intel_wait_ring_buffer(struct intel_ring_buffer *ring, int n) return -EBUSY; } -void intel_ring_begin(struct intel_ring_buffer *ring, - int num_dwords) +int intel_ring_begin(struct intel_ring_buffer *ring, + int num_dwords) { int n = 4*num_dwords; + int ret; - if (unlikely(ring->tail + n > ring->size)) - intel_wrap_ring_buffer(ring); + if (unlikely(ring->tail + n > ring->size)) { + ret = intel_wrap_ring_buffer(ring); + if (unlikely(ret)) + return ret; + } - if (unlikely(ring->space < n)) - intel_wait_ring_buffer(ring, n); + if (unlikely(ring->space < n)) { + ret = intel_wait_ring_buffer(ring, n); + if (unlikely(ret)) + return ret; + } ring->space -= n; + return 0; } void intel_ring_advance(struct intel_ring_buffer *ring) @@ -772,12 +796,13 @@ static void gen6_ring_flush(struct intel_ring_buffer *ring, u32 invalidate_domains, u32 flush_domains) { - intel_ring_begin(ring, 4); - intel_ring_emit(ring, MI_FLUSH_DW); - intel_ring_emit(ring, 0); - intel_ring_emit(ring, 0); - intel_ring_emit(ring, 0); - intel_ring_advance(ring); + if (intel_ring_begin(ring, 4) == 0) { + intel_ring_emit(ring, MI_FLUSH_DW); + intel_ring_emit(ring, 0); + intel_ring_emit(ring, 0); + intel_ring_emit(ring, 0); + intel_ring_advance(ring); + } } static int @@ -787,10 +812,14 @@ gen6_ring_dispatch_execbuffer(struct intel_ring_buffer *ring, uint64_t exec_offset) { uint32_t exec_start; + int ret; exec_start = (uint32_t) exec_offset + exec->batch_start_offset; - intel_ring_begin(ring, 2); + ret = intel_ring_begin(ring, 2); + if (ret) + return ret; + intel_ring_emit(ring, MI_BATCH_BUFFER_START | MI_BATCH_NON_SECURE_I965); /* bit0-7 is the length on GEN6+ */ intel_ring_emit(ring, exec_start); diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.h b/drivers/gpu/drm/i915/intel_ringbuffer.h index ba4a393e6d16..35ece2b87b02 100644 --- a/drivers/gpu/drm/i915/intel_ringbuffer.h +++ b/drivers/gpu/drm/i915/intel_ringbuffer.h @@ -100,8 +100,8 @@ intel_read_status_page(struct intel_ring_buffer *ring, } void intel_cleanup_ring_buffer(struct intel_ring_buffer *ring); -int intel_wait_ring_buffer(struct intel_ring_buffer *ring, int n); -void intel_ring_begin(struct intel_ring_buffer *ring, int n); +int __must_check intel_wait_ring_buffer(struct intel_ring_buffer *ring, int n); +int __must_check intel_ring_begin(struct intel_ring_buffer *ring, int n); static inline void intel_ring_emit(struct intel_ring_buffer *ring, u32 data) -- cgit v1.2.3 From 7e318e18f248416a3d32a1649a9b4538e7f8b0eb Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Wed, 27 Oct 2010 13:43:39 +0100 Subject: drm/i915: Move object to GPU domains after dispatching execbuffer In the event that we fail to dispatch the execbuffer, for example if there is insufficient space on the ring, we were leaving the objects in an inconsistent state. Notably they were marked as being in the GPU write domain, but were not added to the ring or any list. This would lead to inevitable oops: [ 1010.522940] [drm:i915_gem_do_execbuffer] *ERROR* dispatch failed -16 [ 1010.523055] BUG: unable to handle kernel NULL pointer dereference at 0000000000000088 [ 1010.523097] IP: [] i915_gem_flush_ring+0x26/0x140 [ 1010.523120] PGD 14cf2f067 PUD 14ce04067 PMD 0 [ 1010.523140] Oops: 0000 [#1] SMP [ 1010.523154] last sysfs file: /sys/devices/virtual/vc/vcsa2/uevent [ 1010.523173] CPU 0 [ 1010.523183] Pid: 716, comm: X Not tainted 2.6.36+ #34 LosLunas CRB/SandyBridge Platform [ 1010.523206] RIP: 0010:[] [] i915_gem_flush_ring+0x26/0x140 [ 1010.523233] RSP: 0018:ffff88014bf97cd8 EFLAGS: 00010296 [ 1010.523249] RAX: ffff88014e2d1808 RBX: 0000000000000000 RCX: 0000000000000000 [ 1010.523270] RDX: 0000000000000002 RSI: 0000000000000000 RDI: 0000000000000000 [ 1010.523290] RBP: ffff88014e2d1000 R08: 0000000000000002 R09: 00000000400c645f [ 1010.523311] R10: 0000000000000001 R11: 0000000000000246 R12: 0000000000000002 [ 1010.523331] R13: ffff88014e29a000 R14: 00000000000000c8 R15: ffffffff8162eb28 [ 1010.523352] FS: 00007fc62379d700(0000) GS:ffff88001fc00000(0000) knlGS:0000000000000000 [ 1010.523375] CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033 [ 1010.523392] CR2: 0000000000000088 CR3: 000000014bf87000 CR4: 00000000000406f0 [ 1010.523412] DR0: 0000000000000000 DR1: 0000000000000000 DR2: 0000000000000000 [ 1010.523433] DR3: 0000000000000000 DR6: 00000000ffff0ff0 DR7: 0000000000000400 [ 1010.523454] Process X (pid: 716, threadinfo ffff88014bf96000, task ffff88014cc1ee40) [ 1010.523475] Stack: [ 1010.523483] ffff88014d5199c0 0000000000000200 0000000000000000 ffff88014bcc6400 [ 1010.523509] <0> 0000000000000000 0000000000000001 ffff88014e29a000 ffff88014bcc6400 [ 1010.523537] <0> ffffffff8162eb28 ffffffff8122faa8 ffff88014e29a000 ffff88014bcc6400 [ 1010.523568] Call Trace: [ 1010.523578] [] ? i915_gem_object_flush_gpu_write_domain+0x48/0x80 [ 1010.523601] [] ? i915_gem_object_set_to_gtt_domain+0x2e/0xb0 [ 1010.523623] [] ? i915_gem_set_domain_ioctl+0xdb/0x1f0 [ 1010.523644] [] ? drm_ioctl+0x3d1/0x460 [ 1010.523660] [] ? i915_gem_set_domain_ioctl+0x0/0x1f0 [ 1010.523682] [] ? vma_prio_tree_insert+0x28/0x120 [ 1010.523701] [] ? vma_link+0x99/0xf0 [ 1010.523717] [] ? mmap_region+0x1ed/0x4f0 [ 1010.523734] [] ? do_vfs_ioctl+0x9f/0x580 [ 1010.523750] [] ? sys_ioctl+0x49/0x80 [ 1010.523767] [] ? system_call_fastpath+0x16/0x1b [ 1010.523785] Code: 00 00 00 00 00 41 57 89 ce 41 56 41 55 41 54 45 89 c4 55 48 89 fd 53 48 89 d3 44 89 c2 48 89 df 4c 8d b3 c8 00 00 00 48 83 ec 18 93 88 00 00 00 48 8b 83 c8 00 00 00 4c 8b bd 30 03 00 00 48 [ 1010.523946] RIP [] i915_gem_flush_ring+0x26/0x140 [ 1010.523966] RSP [ 1010.523977] CR2: 0000000000000088 Signed-off-by: Chris Wilson --- drivers/gpu/drm/i915/i915_gem.c | 68 ++++++++++++++++------------------------- 1 file changed, 27 insertions(+), 41 deletions(-) diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index 00e901483ba5..580244c1e793 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c @@ -3085,9 +3085,6 @@ i915_gem_object_set_to_gpu_domain(struct drm_gem_object *obj, struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); uint32_t invalidate_domains = 0; uint32_t flush_domains = 0; - uint32_t old_read_domains; - - intel_mark_busy(dev, obj); /* * If the object isn't moving to a new write domain, @@ -3095,8 +3092,6 @@ i915_gem_object_set_to_gpu_domain(struct drm_gem_object *obj, */ if (obj->pending_write_domain == 0) obj->pending_read_domains |= obj->read_domains; - else - obj_priv->dirty = 1; /* * Flush the current write domain if @@ -3118,8 +3113,6 @@ i915_gem_object_set_to_gpu_domain(struct drm_gem_object *obj, if ((flush_domains | invalidate_domains) & I915_GEM_DOMAIN_CPU) i915_gem_clflush_object(obj); - old_read_domains = obj->read_domains; - /* The actual obj->write_domain will be updated with * pending_write_domain after we emit the accumulated flush for all * of our domain changes in execbuffers (which clears objects' @@ -3128,7 +3121,6 @@ i915_gem_object_set_to_gpu_domain(struct drm_gem_object *obj, */ if (flush_domains == 0 && obj->pending_write_domain == 0) obj->pending_write_domain = obj->write_domain; - obj->read_domains = obj->pending_read_domains; dev->invalidate_domains |= invalidate_domains; dev->flush_domains |= flush_domains; @@ -3136,10 +3128,6 @@ i915_gem_object_set_to_gpu_domain(struct drm_gem_object *obj, dev_priv->mm.flush_rings |= obj_priv->ring->id; if (invalidate_domains & I915_GEM_GPU_DOMAINS) dev_priv->mm.flush_rings |= ring->id; - - trace_i915_gem_object_change_domain(obj, - old_read_domains, - obj->write_domain); } /** @@ -3602,7 +3590,6 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data, drm_i915_private_t *dev_priv = dev->dev_private; struct drm_gem_object **object_list = NULL; struct drm_gem_object *batch_obj; - struct drm_i915_gem_object *obj_priv; struct drm_clip_rect *cliprects = NULL; struct drm_i915_gem_request *request = NULL; int ret, i, flips; @@ -3697,6 +3684,8 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data, /* Look up object handles */ for (i = 0; i < args->buffer_count; i++) { + struct drm_i915_gem_object *obj_priv; + object_list[i] = drm_gem_object_lookup(dev, file, exec_list[i].handle); if (object_list[i] == NULL) { @@ -3761,13 +3750,8 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data, dev->invalidate_domains = 0; dev->flush_domains = 0; dev_priv->mm.flush_rings = 0; - - for (i = 0; i < args->buffer_count; i++) { - struct drm_gem_object *obj = object_list[i]; - - /* Compute new gpu domains and update invalidate/flush */ - i915_gem_object_set_to_gpu_domain(obj, ring); - } + for (i = 0; i < args->buffer_count; i++) + i915_gem_object_set_to_gpu_domain(object_list[i], ring); if (dev->invalidate_domains | dev->flush_domains) { #if WATCH_EXEC @@ -3782,15 +3766,6 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data, dev_priv->mm.flush_rings); } - for (i = 0; i < args->buffer_count; i++) { - struct drm_gem_object *obj = object_list[i]; - uint32_t old_write_domain = obj->write_domain; - obj->write_domain = obj->pending_write_domain; - trace_i915_gem_object_change_domain(obj, - obj->read_domains, - old_write_domain); - } - #if WATCH_COHERENCY for (i = 0; i < args->buffer_count; i++) { i915_gem_object_check_coherency(object_list[i], @@ -3843,30 +3818,41 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data, goto err; } - /* - * Ensure that the commands in the batch buffer are - * finished before the interrupt fires - */ - i915_retire_commands(dev, ring); - for (i = 0; i < args->buffer_count; i++) { struct drm_gem_object *obj = object_list[i]; + obj->read_domains = obj->pending_read_domains; + obj->write_domain = obj->pending_write_domain; + i915_gem_object_move_to_active(obj, ring); - if (obj->write_domain) - list_move_tail(&to_intel_bo(obj)->gpu_write_list, + if (obj->write_domain) { + struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); + obj_priv->dirty = 1; + list_move_tail(&obj_priv->gpu_write_list, &ring->gpu_write_list); + intel_mark_busy(dev, obj); + } + + trace_i915_gem_object_change_domain(obj, + obj->read_domains, + obj->write_domain); } + /* + * Ensure that the commands in the batch buffer are + * finished before the interrupt fires + */ + i915_retire_commands(dev, ring); + i915_add_request(dev, file, request, ring); request = NULL; err: for (i = 0; i < args->buffer_count; i++) { - if (object_list[i]) { - obj_priv = to_intel_bo(object_list[i]); - obj_priv->in_execbuffer = false; - } + if (object_list[i] == NULL) + break; + + to_intel_bo(object_list[i])->in_execbuffer = false; drm_gem_object_unreference(object_list[i]); } -- cgit v1.2.3 From 893eead092f14e42cac054a394a86e3c6e016b68 Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Wed, 27 Oct 2010 14:44:35 +0100 Subject: drm/i915: Fix hangcheck to handle multiple rings Currently, we believe the GPU is idle if just the RENDER ring is idle. This is obviously wrong if we only using either the BLT or the BSD rings and so masking genuine hangs. Signed-off-by: Chris Wilson --- drivers/gpu/drm/i915/i915_irq.c | 74 +++++++++++++++++++---------------------- 1 file changed, 35 insertions(+), 39 deletions(-) diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c index 8acdd6d857d3..23b28528c642 100644 --- a/drivers/gpu/drm/i915/i915_irq.c +++ b/drivers/gpu/drm/i915/i915_irq.c @@ -1307,12 +1307,29 @@ int i915_vblank_swap(struct drm_device *dev, void *data, return -EINVAL; } -static struct drm_i915_gem_request * -i915_get_tail_request(struct drm_device *dev) +static u32 +ring_last_seqno(struct intel_ring_buffer *ring) { - drm_i915_private_t *dev_priv = dev->dev_private; - return list_entry(dev_priv->render_ring.request_list.prev, - struct drm_i915_gem_request, list); + return list_entry(ring->request_list.prev, + struct drm_i915_gem_request, list)->seqno; +} + +static bool i915_hangcheck_ring_idle(struct intel_ring_buffer *ring, bool *err) +{ + if (list_empty(&ring->request_list) || + i915_seqno_passed(ring->get_seqno(ring), ring_last_seqno(ring))) { + /* Issue a wake-up to catch stuck h/w. */ + if (ring->waiting_gem_seqno && waitqueue_active(&ring->irq_queue)) { + DRM_ERROR("Hangcheck timer elapsed... %s idle [waiting on %d, at %d], missed IRQ?\n", + ring->name, + ring->waiting_gem_seqno, + ring->get_seqno(ring)); + wake_up_all(&ring->irq_queue); + *err = true; + } + return true; + } + return false; } /** @@ -1326,6 +1343,17 @@ void i915_hangcheck_elapsed(unsigned long data) struct drm_device *dev = (struct drm_device *)data; drm_i915_private_t *dev_priv = dev->dev_private; uint32_t acthd, instdone, instdone1; + bool err = false; + + /* If all work is done then ACTHD clearly hasn't advanced. */ + if (i915_hangcheck_ring_idle(&dev_priv->render_ring, &err) && + i915_hangcheck_ring_idle(&dev_priv->bsd_ring, &err) && + i915_hangcheck_ring_idle(&dev_priv->blt_ring, &err)) { + dev_priv->hangcheck_count = 0; + if (err) + goto repeat; + return; + } if (INTEL_INFO(dev)->gen < 4) { acthd = I915_READ(ACTHD); @@ -1337,38 +1365,6 @@ void i915_hangcheck_elapsed(unsigned long data) instdone1 = I915_READ(INSTDONE1); } - /* If all work is done then ACTHD clearly hasn't advanced. */ - if (list_empty(&dev_priv->render_ring.request_list) || - i915_seqno_passed(dev_priv->render_ring.get_seqno(&dev_priv->render_ring), - i915_get_tail_request(dev)->seqno)) { - bool missed_wakeup = false; - - dev_priv->hangcheck_count = 0; - - /* Issue a wake-up to catch stuck h/w. */ - if (dev_priv->render_ring.waiting_gem_seqno && - waitqueue_active(&dev_priv->render_ring.irq_queue)) { - wake_up_all(&dev_priv->render_ring.irq_queue); - missed_wakeup = true; - } - - if (dev_priv->bsd_ring.waiting_gem_seqno && - waitqueue_active(&dev_priv->bsd_ring.irq_queue)) { - wake_up_all(&dev_priv->bsd_ring.irq_queue); - missed_wakeup = true; - } - - if (dev_priv->blt_ring.waiting_gem_seqno && - waitqueue_active(&dev_priv->blt_ring.irq_queue)) { - wake_up_all(&dev_priv->blt_ring.irq_queue); - missed_wakeup = true; - } - - if (missed_wakeup) - DRM_ERROR("Hangcheck timer elapsed... GPU idle, missed IRQ.\n"); - return; - } - if (dev_priv->last_acthd == acthd && dev_priv->last_instdone == instdone && dev_priv->last_instdone1 == instdone1) { @@ -1385,7 +1381,7 @@ void i915_hangcheck_elapsed(unsigned long data) if (tmp & RING_WAIT) { I915_WRITE(PRB0_CTL, tmp); POSTING_READ(PRB0_CTL); - goto out; + goto repeat; } } @@ -1400,7 +1396,7 @@ void i915_hangcheck_elapsed(unsigned long data) dev_priv->last_instdone1 = instdone1; } -out: +repeat: /* Reset timer case chip hangs without another request being added */ mod_timer(&dev_priv->hangcheck_timer, jiffies + msecs_to_jiffies(DRM_I915_HANGCHECK_PERIOD)); -- cgit v1.2.3 From c2c347a9eeda1b9b69c8fc393fd933747fbb2e11 Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Wed, 27 Oct 2010 15:11:53 +0100 Subject: drm/i915/debugfs: Include info for the other rings The render ring is not alone any more! And the other rings are just as troublesome... Signed-off-by: Chris Wilson --- drivers/gpu/drm/i915/i915_debugfs.c | 134 ++++++++++++++++++++++---------- drivers/gpu/drm/i915/intel_ringbuffer.h | 2 +- 2 files changed, 94 insertions(+), 42 deletions(-) diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c index c1b04b6056da..c5aa6bee3abb 100644 --- a/drivers/gpu/drm/i915/i915_debugfs.c +++ b/drivers/gpu/drm/i915/i915_debugfs.c @@ -48,6 +48,12 @@ enum { DEFERRED_FREE_LIST, }; +enum { + RENDER_RING, + BSD_RING, + BLT_RING, +}; + static const char *yesno(int v) { return v ? "yes" : "no"; @@ -265,21 +271,51 @@ static int i915_gem_request_info(struct seq_file *m, void *data) struct drm_device *dev = node->minor->dev; drm_i915_private_t *dev_priv = dev->dev_private; struct drm_i915_gem_request *gem_request; - int ret; + int ret, count; ret = mutex_lock_interruptible(&dev->struct_mutex); if (ret) return ret; - seq_printf(m, "Request:\n"); - list_for_each_entry(gem_request, &dev_priv->render_ring.request_list, - list) { - seq_printf(m, " %d @ %d\n", - gem_request->seqno, - (int) (jiffies - gem_request->emitted_jiffies)); + count = 0; + if (!list_empty(&dev_priv->render_ring.request_list)) { + seq_printf(m, "Render requests:\n"); + list_for_each_entry(gem_request, + &dev_priv->render_ring.request_list, + list) { + seq_printf(m, " %d @ %d\n", + gem_request->seqno, + (int) (jiffies - gem_request->emitted_jiffies)); + } + count++; + } + if (!list_empty(&dev_priv->bsd_ring.request_list)) { + seq_printf(m, "BSD requests:\n"); + list_for_each_entry(gem_request, + &dev_priv->bsd_ring.request_list, + list) { + seq_printf(m, " %d @ %d\n", + gem_request->seqno, + (int) (jiffies - gem_request->emitted_jiffies)); + } + count++; + } + if (!list_empty(&dev_priv->blt_ring.request_list)) { + seq_printf(m, "BLT requests:\n"); + list_for_each_entry(gem_request, + &dev_priv->blt_ring.request_list, + list) { + seq_printf(m, " %d @ %d\n", + gem_request->seqno, + (int) (jiffies - gem_request->emitted_jiffies)); + } + count++; } mutex_unlock(&dev->struct_mutex); + if (count == 0) + seq_printf(m, "No requests\n"); + return 0; } @@ -354,11 +390,17 @@ static int i915_interrupt_info(struct seq_file *m, void *data) } seq_printf(m, "Interrupts received: %d\n", atomic_read(&dev_priv->irq_received)); - if (dev_priv->render_ring.status_page.page_addr != NULL) { - seq_printf(m, "Current sequence: %d\n", + if (dev_priv->render_ring.get_seqno) { + seq_printf(m, "Current sequence (render): %d\n", dev_priv->render_ring.get_seqno(&dev_priv->render_ring)); - } else { - seq_printf(m, "Current sequence: hws uninitialized\n"); + } + if (dev_priv->bsd_ring.get_seqno) { + seq_printf(m, "Current sequence (BSD): %d\n", + dev_priv->bsd_ring.get_seqno(&dev_priv->bsd_ring)); + } + if (dev_priv->blt_ring.get_seqno) { + seq_printf(m, "Current sequence (BLT): %d\n", + dev_priv->blt_ring.get_seqno(&dev_priv->blt_ring)); } seq_printf(m, "Waiter sequence: %d\n", dev_priv->mm.waiting_gem_seqno); @@ -385,24 +427,12 @@ static int i915_gem_fence_regs_info(struct seq_file *m, void *data) for (i = 0; i < dev_priv->num_fence_regs; i++) { struct drm_gem_object *obj = dev_priv->fence_regs[i].obj; - if (obj == NULL) { - seq_printf(m, "Fenced object[%2d] = unused\n", i); - } else { - struct drm_i915_gem_object *obj_priv; - - obj_priv = to_intel_bo(obj); - seq_printf(m, "Fenced object[%2d] = %p: %s " - "%08x %08zx %08x %s %08x %08x %d", - i, obj, get_pin_flag(obj_priv), - obj_priv->gtt_offset, - obj->size, obj_priv->stride, - get_tiling_flag(obj_priv), - obj->read_domains, obj->write_domain, - obj_priv->last_rendering_seqno); - if (obj->name) - seq_printf(m, " (name: %d)", obj->name); - seq_printf(m, "\n"); - } + seq_printf(m, "Fenced object[%2d] = ", i); + if (obj == NULL) + seq_printf(m, "unused"); + else + describe_obj(m, to_intel_bo(obj)); + seq_printf(m, "\n"); } mutex_unlock(&dev->struct_mutex); @@ -477,19 +507,27 @@ static int i915_ringbuffer_data(struct seq_file *m, void *data) struct drm_info_node *node = (struct drm_info_node *) m->private; struct drm_device *dev = node->minor->dev; drm_i915_private_t *dev_priv = dev->dev_private; + struct intel_ring_buffer *ring; int ret; + switch ((uintptr_t)node->info_ent->data) { + case RENDER_RING: ring = &dev_priv->render_ring; break; + case BSD_RING: ring = &dev_priv->bsd_ring; break; + case BLT_RING: ring = &dev_priv->blt_ring; break; + default: return -EINVAL; + } + ret = mutex_lock_interruptible(&dev->struct_mutex); if (ret) return ret; - if (!dev_priv->render_ring.gem_object) { + if (!ring->gem_object) { seq_printf(m, "No ringbuffer setup\n"); } else { - u8 *virt = dev_priv->render_ring.virtual_start; + u8 *virt = ring->virtual_start; uint32_t off; - for (off = 0; off < dev_priv->render_ring.size; off += 4) { + for (off = 0; off < ring->size; off += 4) { uint32_t *ptr = (uint32_t *)(virt + off); seq_printf(m, "%08x : %08x\n", off, *ptr); } @@ -504,15 +542,25 @@ static int i915_ringbuffer_info(struct seq_file *m, void *data) struct drm_info_node *node = (struct drm_info_node *) m->private; struct drm_device *dev = node->minor->dev; drm_i915_private_t *dev_priv = dev->dev_private; - unsigned int head, tail; + struct intel_ring_buffer *ring; + + switch ((uintptr_t)node->info_ent->data) { + case RENDER_RING: ring = &dev_priv->render_ring; break; + case BSD_RING: ring = &dev_priv->bsd_ring; break; + case BLT_RING: ring = &dev_priv->blt_ring; break; + default: return -EINVAL; + } - head = I915_READ(PRB0_HEAD) & HEAD_ADDR; - tail = I915_READ(PRB0_TAIL) & TAIL_ADDR; + if (ring->size == 0) + return 0; - seq_printf(m, "RingHead : %08x\n", head); - seq_printf(m, "RingTail : %08x\n", tail); - seq_printf(m, "RingSize : %08lx\n", dev_priv->render_ring.size); - seq_printf(m, "Acthd : %08x\n", I915_READ(INTEL_INFO(dev)->gen >= 4 ? ACTHD_I965 : ACTHD)); + seq_printf(m, "Ring %s:\n", ring->name); + seq_printf(m, " Head : %08x\n", I915_READ_HEAD(ring) & HEAD_ADDR); + seq_printf(m, " Tail : %08x\n", I915_READ_TAIL(ring) & TAIL_ADDR); + seq_printf(m, " Size : %08x\n", ring->size); + seq_printf(m, " Active : %08x\n", intel_ring_get_active_head(ring)); + seq_printf(m, " Control : %08x\n", I915_READ_CTL(ring)); + seq_printf(m, " Start : %08x\n", I915_READ_START(ring)); return 0; } @@ -1029,8 +1077,12 @@ static struct drm_info_list i915_debugfs_list[] = { {"i915_gem_fence_regs", i915_gem_fence_regs_info, 0}, {"i915_gem_interrupt", i915_interrupt_info, 0}, {"i915_gem_hws", i915_hws_info, 0}, - {"i915_ringbuffer_data", i915_ringbuffer_data, 0}, - {"i915_ringbuffer_info", i915_ringbuffer_info, 0}, + {"i915_ringbuffer_data", i915_ringbuffer_data, 0, (void *)RENDER_RING}, + {"i915_ringbuffer_info", i915_ringbuffer_info, 0, (void *)RENDER_RING}, + {"i915_bsd_ringbuffer_data", i915_ringbuffer_data, 0, (void *)BSD_RING}, + {"i915_bsd_ringbuffer_info", i915_ringbuffer_info, 0, (void *)BSD_RING}, + {"i915_blt_ringbuffer_data", i915_ringbuffer_data, 0, (void *)BLT_RING}, + {"i915_blt_ringbuffer_info", i915_ringbuffer_info, 0, (void *)BLT_RING}, {"i915_batchbuffers", i915_batchbuffer_info, 0}, {"i915_error_state", i915_error_state, 0}, {"i915_rstdby_delays", i915_rstdby_delays, 0}, diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.h b/drivers/gpu/drm/i915/intel_ringbuffer.h index 35ece2b87b02..84f6919de8e0 100644 --- a/drivers/gpu/drm/i915/intel_ringbuffer.h +++ b/drivers/gpu/drm/i915/intel_ringbuffer.h @@ -25,7 +25,6 @@ struct intel_ring_buffer { RING_BLT = 0x4, } id; u32 mmio_base; - unsigned long size; void *virtual_start; struct drm_device *dev; struct drm_gem_object *gem_object; @@ -33,6 +32,7 @@ struct intel_ring_buffer { unsigned int head; unsigned int tail; int space; + int size; struct intel_hw_status_page status_page; u32 irq_gem_seqno; /* last seq seem at irq time */ -- cgit v1.2.3 From b2223497b44a4701d1be873d1e9453d7f720043b Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Wed, 27 Oct 2010 15:27:33 +0100 Subject: drm/i915: Remove the confusing global waiting/irq seqno Signed-off-by: Chris Wilson --- drivers/gpu/drm/i915/i915_debugfs.c | 44 ++++++++++++++------------------- drivers/gpu/drm/i915/i915_drv.h | 10 -------- drivers/gpu/drm/i915/i915_gem.c | 4 +-- drivers/gpu/drm/i915/i915_irq.c | 6 ++--- drivers/gpu/drm/i915/intel_ringbuffer.h | 4 +-- 5 files changed, 26 insertions(+), 42 deletions(-) diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c index c5aa6bee3abb..beb3de7921b5 100644 --- a/drivers/gpu/drm/i915/i915_debugfs.c +++ b/drivers/gpu/drm/i915/i915_debugfs.c @@ -319,6 +319,19 @@ static int i915_gem_request_info(struct seq_file *m, void *data) return 0; } +static void i915_ring_seqno_info(struct seq_file *m, + struct intel_ring_buffer *ring) +{ + if (ring->get_seqno) { + seq_printf(m, "Current sequence (%s): %d\n", + ring->name, ring->get_seqno(ring)); + seq_printf(m, "Waiter sequence (%s): %d\n", + ring->name, ring->waiting_seqno); + seq_printf(m, "IRQ sequence (%s): %d\n", + ring->name, ring->irq_seqno); + } +} + static int i915_gem_seqno_info(struct seq_file *m, void *data) { struct drm_info_node *node = (struct drm_info_node *) m->private; @@ -330,15 +343,9 @@ static int i915_gem_seqno_info(struct seq_file *m, void *data) if (ret) return ret; - if (dev_priv->render_ring.status_page.page_addr != NULL) { - seq_printf(m, "Current sequence: %d\n", - dev_priv->render_ring.get_seqno(&dev_priv->render_ring)); - } else { - seq_printf(m, "Current sequence: hws uninitialized\n"); - } - seq_printf(m, "Waiter sequence: %d\n", - dev_priv->mm.waiting_gem_seqno); - seq_printf(m, "IRQ sequence: %d\n", dev_priv->mm.irq_gem_seqno); + i915_ring_seqno_info(m, &dev_priv->render_ring); + i915_ring_seqno_info(m, &dev_priv->bsd_ring); + i915_ring_seqno_info(m, &dev_priv->blt_ring); mutex_unlock(&dev->struct_mutex); @@ -390,22 +397,9 @@ static int i915_interrupt_info(struct seq_file *m, void *data) } seq_printf(m, "Interrupts received: %d\n", atomic_read(&dev_priv->irq_received)); - if (dev_priv->render_ring.get_seqno) { - seq_printf(m, "Current sequence (render): %d\n", - dev_priv->render_ring.get_seqno(&dev_priv->render_ring)); - } - if (dev_priv->bsd_ring.get_seqno) { - seq_printf(m, "Current sequence (BSD): %d\n", - dev_priv->bsd_ring.get_seqno(&dev_priv->bsd_ring)); - } - if (dev_priv->blt_ring.get_seqno) { - seq_printf(m, "Current sequence (BLT): %d\n", - dev_priv->blt_ring.get_seqno(&dev_priv->blt_ring)); - } - seq_printf(m, "Waiter sequence: %d\n", - dev_priv->mm.waiting_gem_seqno); - seq_printf(m, "IRQ sequence: %d\n", - dev_priv->mm.irq_gem_seqno); + i915_ring_seqno_info(m, &dev_priv->render_ring); + i915_ring_seqno_info(m, &dev_priv->bsd_ring); + i915_ring_seqno_info(m, &dev_priv->blt_ring); mutex_unlock(&dev->struct_mutex); return 0; diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index c241468c632e..2af8e1604b44 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h @@ -608,16 +608,6 @@ typedef struct drm_i915_private { */ struct delayed_work retire_work; - /** - * Waiting sequence number, if any - */ - uint32_t waiting_gem_seqno; - - /** - * Last seq seen at irq time - */ - uint32_t irq_gem_seqno; - /** * Flag if the X Server, and thus DRM, is not currently in * control of the device. diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index 580244c1e793..74f5525d156f 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c @@ -1984,7 +1984,7 @@ i915_do_wait_request(struct drm_device *dev, uint32_t seqno, trace_i915_gem_request_wait_begin(dev, seqno); - ring->waiting_gem_seqno = seqno; + ring->waiting_seqno = seqno; ring->user_irq_get(ring); if (interruptible) ret = wait_event_interruptible(ring->irq_queue, @@ -1996,7 +1996,7 @@ i915_do_wait_request(struct drm_device *dev, uint32_t seqno, || atomic_read(&dev_priv->mm.wedged)); ring->user_irq_put(ring); - ring->waiting_gem_seqno = 0; + ring->waiting_seqno = 0; trace_i915_gem_request_wait_end(dev, seqno); } diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c index 23b28528c642..29cbcb33b92c 100644 --- a/drivers/gpu/drm/i915/i915_irq.c +++ b/drivers/gpu/drm/i915/i915_irq.c @@ -298,7 +298,7 @@ static void notify_ring(struct drm_device *dev, { struct drm_i915_private *dev_priv = dev->dev_private; u32 seqno = ring->get_seqno(ring); - ring->irq_gem_seqno = seqno; + ring->irq_seqno = seqno; trace_i915_gem_request_complete(dev, seqno); wake_up_all(&ring->irq_queue); dev_priv->hangcheck_count = 0; @@ -1319,10 +1319,10 @@ static bool i915_hangcheck_ring_idle(struct intel_ring_buffer *ring, bool *err) if (list_empty(&ring->request_list) || i915_seqno_passed(ring->get_seqno(ring), ring_last_seqno(ring))) { /* Issue a wake-up to catch stuck h/w. */ - if (ring->waiting_gem_seqno && waitqueue_active(&ring->irq_queue)) { + if (ring->waiting_seqno && waitqueue_active(&ring->irq_queue)) { DRM_ERROR("Hangcheck timer elapsed... %s idle [waiting on %d, at %d], missed IRQ?\n", ring->name, - ring->waiting_gem_seqno, + ring->waiting_seqno, ring->get_seqno(ring)); wake_up_all(&ring->irq_queue); *err = true; diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.h b/drivers/gpu/drm/i915/intel_ringbuffer.h index 84f6919de8e0..7ad9e94220b4 100644 --- a/drivers/gpu/drm/i915/intel_ringbuffer.h +++ b/drivers/gpu/drm/i915/intel_ringbuffer.h @@ -35,8 +35,8 @@ struct intel_ring_buffer { int size; struct intel_hw_status_page status_page; - u32 irq_gem_seqno; /* last seq seem at irq time */ - u32 waiting_gem_seqno; + u32 irq_seqno; /* last seq seem at irq time */ + u32 waiting_seqno; int user_irq_refcount; void (*user_irq_get)(struct intel_ring_buffer *ring); void (*user_irq_put)(struct intel_ring_buffer *ring); -- cgit v1.2.3 From 3cce469cab880ef8990d2d16d745bf85443fc998 Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Wed, 27 Oct 2010 16:11:02 +0100 Subject: drm/i915: Propagate error from failing to queue a request Signed-off-by: Chris Wilson --- drivers/gpu/drm/i915/i915_drv.h | 8 +- drivers/gpu/drm/i915/i915_gem.c | 36 ++++++--- drivers/gpu/drm/i915/intel_overlay.c | 20 +++-- drivers/gpu/drm/i915/intel_ringbuffer.c | 128 +++++++++++++++++--------------- drivers/gpu/drm/i915/intel_ringbuffer.h | 4 +- 5 files changed, 111 insertions(+), 85 deletions(-) diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index 2af8e1604b44..f168e82c10aa 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h @@ -1041,10 +1041,10 @@ int i915_gem_do_init(struct drm_device *dev, unsigned long start, unsigned long end); int i915_gpu_idle(struct drm_device *dev); int i915_gem_idle(struct drm_device *dev); -uint32_t i915_add_request(struct drm_device *dev, - struct drm_file *file_priv, - struct drm_i915_gem_request *request, - struct intel_ring_buffer *ring); +int i915_add_request(struct drm_device *dev, + struct drm_file *file_priv, + struct drm_i915_gem_request *request, + struct intel_ring_buffer *ring); int i915_do_wait_request(struct drm_device *dev, uint32_t seqno, bool interruptible, diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index 74f5525d156f..d0aaf97ac6e0 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c @@ -1683,7 +1683,7 @@ i915_gem_process_flushing_list(struct drm_device *dev, } } -uint32_t +int i915_add_request(struct drm_device *dev, struct drm_file *file, struct drm_i915_gem_request *request, @@ -1693,17 +1693,17 @@ i915_add_request(struct drm_device *dev, struct drm_i915_file_private *file_priv = NULL; uint32_t seqno; int was_empty; + int ret; + + BUG_ON(request == NULL); if (file != NULL) file_priv = file->driver_priv; - if (request == NULL) { - request = kzalloc(sizeof(*request), GFP_KERNEL); - if (request == NULL) - return 0; - } + ret = ring->add_request(ring, &seqno); + if (ret) + return ret; - seqno = ring->add_request(ring, 0); ring->outstanding_lazy_request = false; request->seqno = seqno; @@ -1727,7 +1727,7 @@ i915_add_request(struct drm_device *dev, queue_delayed_work(dev_priv->wq, &dev_priv->mm.retire_work, HZ); } - return seqno; + return 0; } /** @@ -1964,9 +1964,19 @@ i915_do_wait_request(struct drm_device *dev, uint32_t seqno, return -EAGAIN; if (ring->outstanding_lazy_request) { - seqno = i915_add_request(dev, NULL, NULL, ring); - if (seqno == 0) + struct drm_i915_gem_request *request; + + request = kzalloc(sizeof(*request), GFP_KERNEL); + if (request == NULL) return -ENOMEM; + + ret = i915_add_request(dev, NULL, request, ring); + if (ret) { + kfree(request); + return ret; + } + + seqno = request->seqno; } BUG_ON(seqno == dev_priv->next_seqno); @@ -3844,8 +3854,10 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data, */ i915_retire_commands(dev, ring); - i915_add_request(dev, file, request, ring); - request = NULL; + if (i915_add_request(dev, file, request, ring)) + ring->outstanding_lazy_request = true; + else + request = NULL; err: for (i = 0; i < args->buffer_count; i++) { diff --git a/drivers/gpu/drm/i915/intel_overlay.c b/drivers/gpu/drm/i915/intel_overlay.c index 78fa6a249964..2d4a6968cd76 100644 --- a/drivers/gpu/drm/i915/intel_overlay.c +++ b/drivers/gpu/drm/i915/intel_overlay.c @@ -221,11 +221,12 @@ static int intel_overlay_do_wait_request(struct intel_overlay *overlay, int ret; BUG_ON(overlay->last_flip_req); - overlay->last_flip_req = - i915_add_request(dev, NULL, request, &dev_priv->render_ring); - if (overlay->last_flip_req == 0) - return -ENOMEM; - + ret = i915_add_request(dev, NULL, request, &dev_priv->render_ring); + if (ret) { + kfree(request); + return ret; + } + overlay->last_flip_req = request->seqno; overlay->flip_tail = tail; ret = i915_do_wait_request(dev, overlay->last_flip_req, true, @@ -363,8 +364,13 @@ static int intel_overlay_continue(struct intel_overlay *overlay, OUT_RING(flip_addr); ADVANCE_LP_RING(); - overlay->last_flip_req = - i915_add_request(dev, NULL, request, &dev_priv->render_ring); + ret = i915_add_request(dev, NULL, request, &dev_priv->render_ring); + if (ret) { + kfree(request); + return ret; + } + + overlay->last_flip_req = request->seqno; return 0; } diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c index 6fe42c1f4ea9..4803b32f308f 100644 --- a/drivers/gpu/drm/i915/intel_ringbuffer.c +++ b/drivers/gpu/drm/i915/intel_ringbuffer.c @@ -234,28 +234,28 @@ do { \ * * Returned sequence numbers are nonzero on success. */ -static u32 +static int render_ring_add_request(struct intel_ring_buffer *ring, - u32 flush_domains) + u32 *result) { struct drm_device *dev = ring->dev; drm_i915_private_t *dev_priv = dev->dev_private; - u32 seqno; - - seqno = i915_gem_get_seqno(dev); + u32 seqno = i915_gem_get_seqno(dev); + int ret; if (IS_GEN6(dev)) { - if (intel_ring_begin(ring, 6) == 0) { - intel_ring_emit(ring, GFX_OP_PIPE_CONTROL | 3); - intel_ring_emit(ring, PIPE_CONTROL_QW_WRITE | - PIPE_CONTROL_WC_FLUSH | PIPE_CONTROL_IS_FLUSH | - PIPE_CONTROL_NOTIFY); - intel_ring_emit(ring, dev_priv->seqno_gfx_addr | PIPE_CONTROL_GLOBAL_GTT); - intel_ring_emit(ring, seqno); - intel_ring_emit(ring, 0); - intel_ring_emit(ring, 0); - intel_ring_advance(ring); - } + ret = intel_ring_begin(ring, 6); + if (ret) + return ret; + + intel_ring_emit(ring, GFX_OP_PIPE_CONTROL | 3); + intel_ring_emit(ring, PIPE_CONTROL_QW_WRITE | + PIPE_CONTROL_WC_FLUSH | PIPE_CONTROL_IS_FLUSH | + PIPE_CONTROL_NOTIFY); + intel_ring_emit(ring, dev_priv->seqno_gfx_addr | PIPE_CONTROL_GLOBAL_GTT); + intel_ring_emit(ring, seqno); + intel_ring_emit(ring, 0); + intel_ring_emit(ring, 0); } else if (HAS_PIPE_CONTROL(dev)) { u32 scratch_addr = dev_priv->seqno_gfx_addr + 128; @@ -264,42 +264,47 @@ render_ring_add_request(struct intel_ring_buffer *ring, * PIPE_NOTIFY buffers out to memory before requesting * an interrupt. */ - if (intel_ring_begin(ring, 32) == 0) { - intel_ring_emit(ring, GFX_OP_PIPE_CONTROL | PIPE_CONTROL_QW_WRITE | - PIPE_CONTROL_WC_FLUSH | PIPE_CONTROL_TC_FLUSH); - intel_ring_emit(ring, dev_priv->seqno_gfx_addr | PIPE_CONTROL_GLOBAL_GTT); - intel_ring_emit(ring, seqno); - intel_ring_emit(ring, 0); - PIPE_CONTROL_FLUSH(ring, scratch_addr); - scratch_addr += 128; /* write to separate cachelines */ - PIPE_CONTROL_FLUSH(ring, scratch_addr); - scratch_addr += 128; - PIPE_CONTROL_FLUSH(ring, scratch_addr); - scratch_addr += 128; - PIPE_CONTROL_FLUSH(ring, scratch_addr); - scratch_addr += 128; - PIPE_CONTROL_FLUSH(ring, scratch_addr); - scratch_addr += 128; - PIPE_CONTROL_FLUSH(ring, scratch_addr); - intel_ring_emit(ring, GFX_OP_PIPE_CONTROL | PIPE_CONTROL_QW_WRITE | - PIPE_CONTROL_WC_FLUSH | PIPE_CONTROL_TC_FLUSH | - PIPE_CONTROL_NOTIFY); - intel_ring_emit(ring, dev_priv->seqno_gfx_addr | PIPE_CONTROL_GLOBAL_GTT); - intel_ring_emit(ring, seqno); - intel_ring_emit(ring, 0); - intel_ring_advance(ring); - } + ret = intel_ring_begin(ring, 32); + if (ret) + return ret; + + intel_ring_emit(ring, GFX_OP_PIPE_CONTROL | PIPE_CONTROL_QW_WRITE | + PIPE_CONTROL_WC_FLUSH | PIPE_CONTROL_TC_FLUSH); + intel_ring_emit(ring, dev_priv->seqno_gfx_addr | PIPE_CONTROL_GLOBAL_GTT); + intel_ring_emit(ring, seqno); + intel_ring_emit(ring, 0); + PIPE_CONTROL_FLUSH(ring, scratch_addr); + scratch_addr += 128; /* write to separate cachelines */ + PIPE_CONTROL_FLUSH(ring, scratch_addr); + scratch_addr += 128; + PIPE_CONTROL_FLUSH(ring, scratch_addr); + scratch_addr += 128; + PIPE_CONTROL_FLUSH(ring, scratch_addr); + scratch_addr += 128; + PIPE_CONTROL_FLUSH(ring, scratch_addr); + scratch_addr += 128; + PIPE_CONTROL_FLUSH(ring, scratch_addr); + intel_ring_emit(ring, GFX_OP_PIPE_CONTROL | PIPE_CONTROL_QW_WRITE | + PIPE_CONTROL_WC_FLUSH | PIPE_CONTROL_TC_FLUSH | + PIPE_CONTROL_NOTIFY); + intel_ring_emit(ring, dev_priv->seqno_gfx_addr | PIPE_CONTROL_GLOBAL_GTT); + intel_ring_emit(ring, seqno); + intel_ring_emit(ring, 0); } else { - if (intel_ring_begin(ring, 4) == 0) { - intel_ring_emit(ring, MI_STORE_DWORD_INDEX); - intel_ring_emit(ring, I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT); - intel_ring_emit(ring, seqno); + ret = intel_ring_begin(ring, 4); + if (ret) + return ret; - intel_ring_emit(ring, MI_USER_INTERRUPT); - intel_ring_advance(ring); - } + intel_ring_emit(ring, MI_STORE_DWORD_INDEX); + intel_ring_emit(ring, I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT); + intel_ring_emit(ring, seqno); + + intel_ring_emit(ring, MI_USER_INTERRUPT); } - return seqno; + + intel_ring_advance(ring); + *result = seqno; + return 0; } static u32 @@ -370,25 +375,28 @@ bsd_ring_flush(struct intel_ring_buffer *ring, } } -static u32 +static int ring_add_request(struct intel_ring_buffer *ring, - u32 flush_domains) + u32 *result) { u32 seqno; + int ret; + + ret = intel_ring_begin(ring, 4); + if (ret) + return ret; seqno = i915_gem_get_seqno(ring->dev); - if (intel_ring_begin(ring, 4) == 0) { - intel_ring_emit(ring, MI_STORE_DWORD_INDEX); - intel_ring_emit(ring, I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT); - intel_ring_emit(ring, seqno); - intel_ring_emit(ring, MI_USER_INTERRUPT); - intel_ring_advance(ring); - } + intel_ring_emit(ring, MI_STORE_DWORD_INDEX); + intel_ring_emit(ring, I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT); + intel_ring_emit(ring, seqno); + intel_ring_emit(ring, MI_USER_INTERRUPT); + intel_ring_advance(ring); DRM_DEBUG_DRIVER("%s %d\n", ring->name, seqno); - - return seqno; + *result = seqno; + return 0; } static void diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.h b/drivers/gpu/drm/i915/intel_ringbuffer.h index 7ad9e94220b4..acd23374fe89 100644 --- a/drivers/gpu/drm/i915/intel_ringbuffer.h +++ b/drivers/gpu/drm/i915/intel_ringbuffer.h @@ -48,8 +48,8 @@ struct intel_ring_buffer { void (*flush)(struct intel_ring_buffer *ring, u32 invalidate_domains, u32 flush_domains); - u32 (*add_request)(struct intel_ring_buffer *ring, - u32 flush_domains); + int (*add_request)(struct intel_ring_buffer *ring, + u32 *seqno); u32 (*get_seqno)(struct intel_ring_buffer *ring); int (*dispatch_execbuffer)(struct intel_ring_buffer *ring, struct drm_i915_gem_execbuffer2 *exec, -- cgit v1.2.3 From d935cc61d466f6cc7514032835f4fc379cb7e2ca Mon Sep 17 00:00:00 2001 From: Daniel Vetter Date: Thu, 16 Sep 2010 15:13:11 +0200 Subject: drm_mm: add support for range-restricted fair-lru scans Signed-off-by: Daniel Vetter Signed-off-by: Chris Wilson --- drivers/gpu/drm/drm_mm.c | 40 +++++++++++++++++++++++++++++++++++++++- include/drm/drm_mm.h | 7 +++++++ 2 files changed, 46 insertions(+), 1 deletion(-) diff --git a/drivers/gpu/drm/drm_mm.c b/drivers/gpu/drm/drm_mm.c index a6bfc302ed90..c59515ba7e69 100644 --- a/drivers/gpu/drm/drm_mm.c +++ b/drivers/gpu/drm/drm_mm.c @@ -392,9 +392,35 @@ void drm_mm_init_scan(struct drm_mm *mm, unsigned long size, mm->scanned_blocks = 0; mm->scan_hit_start = 0; mm->scan_hit_size = 0; + mm->scan_check_range = 0; } EXPORT_SYMBOL(drm_mm_init_scan); +/** + * Initializa lru scanning. + * + * This simply sets up the scanning routines with the parameters for the desired + * hole. This version is for range-restricted scans. + * + * Warning: As long as the scan list is non-empty, no other operations than + * adding/removing nodes to/from the scan list are allowed. + */ +void drm_mm_init_scan_with_range(struct drm_mm *mm, unsigned long size, + unsigned alignment, + unsigned long start, + unsigned long end) +{ + mm->scan_alignment = alignment; + mm->scan_size = size; + mm->scanned_blocks = 0; + mm->scan_hit_start = 0; + mm->scan_hit_size = 0; + mm->scan_start = start; + mm->scan_end = end; + mm->scan_check_range = 1; +} +EXPORT_SYMBOL(drm_mm_init_scan_with_range); + /** * Add a node to the scan list that might be freed to make space for the desired * hole. @@ -406,6 +432,8 @@ int drm_mm_scan_add_block(struct drm_mm_node *node) struct drm_mm *mm = node->mm; struct list_head *prev_free, *next_free; struct drm_mm_node *prev_node, *next_node; + unsigned long adj_start; + unsigned long adj_end; mm->scanned_blocks++; @@ -452,7 +480,17 @@ int drm_mm_scan_add_block(struct drm_mm_node *node) node->free_stack.prev = prev_free; node->free_stack.next = next_free; - if (check_free_hole(node->start, node->start + node->size, + if (mm->scan_check_range) { + adj_start = node->start < mm->scan_start ? + mm->scan_start : node->start; + adj_end = node->start + node->size > mm->scan_end ? + mm->scan_end : node->start + node->size; + } else { + adj_start = node->start; + adj_end = node->start + node->size; + } + + if (check_free_hole(adj_start , adj_end, mm->scan_size, mm->scan_alignment)) { mm->scan_hit_start = node->start; mm->scan_hit_size = node->size; diff --git a/include/drm/drm_mm.h b/include/drm/drm_mm.h index bf01531193d5..e39177778601 100644 --- a/include/drm/drm_mm.h +++ b/include/drm/drm_mm.h @@ -62,11 +62,14 @@ struct drm_mm { struct list_head unused_nodes; int num_unused; spinlock_t unused_lock; + unsigned int scan_check_range : 1; unsigned scan_alignment; unsigned long scan_size; unsigned long scan_hit_start; unsigned scan_hit_size; unsigned scanned_blocks; + unsigned long scan_start; + unsigned long scan_end; }; /* @@ -145,6 +148,10 @@ static inline struct drm_mm *drm_get_mm(struct drm_mm_node *block) void drm_mm_init_scan(struct drm_mm *mm, unsigned long size, unsigned alignment); +void drm_mm_init_scan_with_range(struct drm_mm *mm, unsigned long size, + unsigned alignment, + unsigned long start, + unsigned long end); int drm_mm_scan_add_block(struct drm_mm_node *node); int drm_mm_scan_remove_block(struct drm_mm_node *node); -- cgit v1.2.3 From a6e0aa421406dc4cfd736c6d07d26ed39ab4f7bc Mon Sep 17 00:00:00 2001 From: Daniel Vetter Date: Thu, 16 Sep 2010 15:45:15 +0200 Subject: drm/i915: range-restricted eviction support Add a mappable parameter to i915_gem_evict_something to distinguish the two cases (non-restricted vs. mappable gtt allocations). No functional changes because the mappable limit is set to the end of the gtt currently. Signed-off-by: Daniel Vetter Signed-off-by: Chris Wilson --- drivers/gpu/drm/i915/i915_drv.h | 5 ++++- drivers/gpu/drm/i915/i915_gem.c | 10 ++++++---- drivers/gpu/drm/i915/i915_gem_evict.c | 24 +++++++++++++++++++----- 3 files changed, 29 insertions(+), 10 deletions(-) diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index f168e82c10aa..dc0a21a3489b 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h @@ -535,6 +535,8 @@ typedef struct drm_i915_private { struct drm_mm vram; /** Memory allocator for GTT */ struct drm_mm gtt_space; + /** End of mappable part of GTT */ + unsigned long gtt_mappable_end; struct io_mapping *gtt_mapping; int gtt_mtrr; @@ -1067,7 +1069,8 @@ void i915_gem_shrinker_init(void); void i915_gem_shrinker_exit(void); /* i915_gem_evict.c */ -int i915_gem_evict_something(struct drm_device *dev, int min_size, unsigned alignment); +int i915_gem_evict_something(struct drm_device *dev, int min_size, + unsigned alignment, bool mappable); int i915_gem_evict_everything(struct drm_device *dev); int i915_gem_evict_inactive(struct drm_device *dev); diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index d0aaf97ac6e0..254eb0c46aeb 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c @@ -187,6 +187,7 @@ int i915_gem_do_init(struct drm_device *dev, end - start); dev_priv->mm.gtt_total = end - start; + dev_priv->mm.gtt_mappable_end = end; return 0; } @@ -413,7 +414,8 @@ i915_gem_object_get_pages_or_evict(struct drm_gem_object *obj) struct drm_device *dev = obj->dev; ret = i915_gem_evict_something(dev, obj->size, - i915_gem_get_gtt_alignment(obj)); + i915_gem_get_gtt_alignment(obj), + false); if (ret) return ret; @@ -2672,7 +2674,7 @@ i915_gem_object_bind_to_gtt(struct drm_gem_object *obj, unsigned alignment) /* If the gtt is empty and we're still having trouble * fitting our object in, we're out of memory. */ - ret = i915_gem_evict_something(dev, obj->size, alignment); + ret = i915_gem_evict_something(dev, obj->size, alignment, true); if (ret) return ret; @@ -2687,7 +2689,7 @@ i915_gem_object_bind_to_gtt(struct drm_gem_object *obj, unsigned alignment) if (ret == -ENOMEM) { /* first try to clear up some space from the GTT */ ret = i915_gem_evict_something(dev, obj->size, - alignment); + alignment, true); if (ret) { /* now try to shrink everyone else */ if (gfpmask) { @@ -2717,7 +2719,7 @@ i915_gem_object_bind_to_gtt(struct drm_gem_object *obj, unsigned alignment) drm_mm_put_block(obj_priv->gtt_space); obj_priv->gtt_space = NULL; - ret = i915_gem_evict_something(dev, obj->size, alignment); + ret = i915_gem_evict_something(dev, obj->size, alignment, true); if (ret) return ret; diff --git a/drivers/gpu/drm/i915/i915_gem_evict.c b/drivers/gpu/drm/i915/i915_gem_evict.c index 43a4013f53fa..3a4215f31652 100644 --- a/drivers/gpu/drm/i915/i915_gem_evict.c +++ b/drivers/gpu/drm/i915/i915_gem_evict.c @@ -41,7 +41,8 @@ mark_free(struct drm_i915_gem_object *obj_priv, } int -i915_gem_evict_something(struct drm_device *dev, int min_size, unsigned alignment) +i915_gem_evict_something(struct drm_device *dev, int min_size, + unsigned alignment, bool mappable) { drm_i915_private_t *dev_priv = dev->dev_private; struct list_head eviction_list, unwind_list; @@ -51,9 +52,17 @@ i915_gem_evict_something(struct drm_device *dev, int min_size, unsigned alignmen i915_gem_retire_requests(dev); /* Re-check for free space after retiring requests */ - if (drm_mm_search_free(&dev_priv->mm.gtt_space, - min_size, alignment, 0)) - return 0; + if (mappable) { + if (drm_mm_search_free_in_range(&dev_priv->mm.gtt_space, + min_size, alignment, 0, + dev_priv->mm.gtt_mappable_end, + 0)) + return 0; + } else { + if (drm_mm_search_free(&dev_priv->mm.gtt_space, + min_size, alignment, 0)) + return 0; + } /* * The goal is to evict objects and amalgamate space in LRU order. @@ -79,7 +88,12 @@ i915_gem_evict_something(struct drm_device *dev, int min_size, unsigned alignmen */ INIT_LIST_HEAD(&unwind_list); - drm_mm_init_scan(&dev_priv->mm.gtt_space, min_size, alignment); + if (mappable) + drm_mm_init_scan_with_range(&dev_priv->mm.gtt_space, min_size, + alignment, 0, + dev_priv->mm.gtt_mappable_end); + else + drm_mm_init_scan(&dev_priv->mm.gtt_space, min_size, alignment); /* First see if there is a large enough contiguous idle region... */ list_for_each_entry(obj_priv, &dev_priv->mm.inactive_list, mm_list) { -- cgit v1.2.3 From 920afa77ced7124c8bb7d0c4839885618a3b4a54 Mon Sep 17 00:00:00 2001 From: Daniel Vetter Date: Thu, 16 Sep 2010 17:54:23 +0200 Subject: drm/i915: range-restricted bind_to_gtt Like before add a parameter mappable (also to gem_object_pin) and set it depending upon the context. Only bos that are brought into the gtt due to an execbuffer call can be put into the unmappable part of the gtt, everything else (especially pinned objects) need to be put into the mappable part of the gtt. Signed-off-by: Daniel Vetter Signed-off-by: Chris Wilson --- drivers/gpu/drm/i915/i915_drv.h | 3 +- drivers/gpu/drm/i915/i915_gem.c | 63 +++++++++++++++++++++++---------- drivers/gpu/drm/i915/intel_display.c | 6 ++-- drivers/gpu/drm/i915/intel_overlay.c | 4 +-- drivers/gpu/drm/i915/intel_ringbuffer.c | 4 +-- 5 files changed, 53 insertions(+), 27 deletions(-) diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index dc0a21a3489b..263bb0506247 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h @@ -1012,7 +1012,8 @@ int i915_gem_init_object(struct drm_gem_object *obj); struct drm_gem_object * i915_gem_alloc_object(struct drm_device *dev, size_t size); void i915_gem_free_object(struct drm_gem_object *obj); -int i915_gem_object_pin(struct drm_gem_object *obj, uint32_t alignment); +int i915_gem_object_pin(struct drm_gem_object *obj, uint32_t alignment, + bool mappable); void i915_gem_object_unpin(struct drm_gem_object *obj); int i915_gem_object_unbind(struct drm_gem_object *obj); void i915_gem_release_mmap(struct drm_gem_object *obj); diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index 254eb0c46aeb..ef14546fc08d 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c @@ -51,7 +51,7 @@ static void i915_gem_object_set_to_full_cpu_read_domain(struct drm_gem_object *o static int i915_gem_object_wait_rendering(struct drm_gem_object *obj, bool interruptible); static int i915_gem_object_bind_to_gtt(struct drm_gem_object *obj, - unsigned alignment); + unsigned alignment, bool mappable); static void i915_gem_clear_fence_reg(struct drm_gem_object *obj); static int i915_gem_phys_pwrite(struct drm_device *dev, struct drm_gem_object *obj, struct drm_i915_gem_pwrite *args, @@ -1031,7 +1031,7 @@ i915_gem_pwrite_ioctl(struct drm_device *dev, void *data, else if (obj_priv->tiling_mode == I915_TILING_NONE && obj_priv->gtt_space && obj->write_domain != I915_GEM_DOMAIN_CPU) { - ret = i915_gem_object_pin(obj, 0); + ret = i915_gem_object_pin(obj, 0, true); if (ret) goto out; @@ -1256,7 +1256,7 @@ int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf) /* Now bind it into the GTT if needed */ mutex_lock(&dev->struct_mutex); if (!obj_priv->gtt_space) { - ret = i915_gem_object_bind_to_gtt(obj, 0); + ret = i915_gem_object_bind_to_gtt(obj, 0, true); if (ret) goto unlock; @@ -1506,7 +1506,7 @@ i915_gem_mmap_gtt_ioctl(struct drm_device *dev, void *data, * initial fault faster and any subsequent flushing possible). */ if (!obj_priv->agp_mem) { - ret = i915_gem_object_bind_to_gtt(obj, 0); + ret = i915_gem_object_bind_to_gtt(obj, 0, true); if (ret) goto out; } @@ -2635,7 +2635,9 @@ i915_gem_object_put_fence_reg(struct drm_gem_object *obj, * Finds free space in the GTT aperture and binds the object there. */ static int -i915_gem_object_bind_to_gtt(struct drm_gem_object *obj, unsigned alignment) +i915_gem_object_bind_to_gtt(struct drm_gem_object *obj, + unsigned alignment, + bool mappable) { struct drm_device *dev = obj->dev; drm_i915_private_t *dev_priv = dev->dev_private; @@ -2659,22 +2661,42 @@ i915_gem_object_bind_to_gtt(struct drm_gem_object *obj, unsigned alignment) /* If the object is bigger than the entire aperture, reject it early * before evicting everything in a vain attempt to find space. */ - if (obj->size > dev_priv->mm.gtt_total) { + if (obj->size > + (mappable ? dev_priv->mm.gtt_mappable_end : dev_priv->mm.gtt_total)) { DRM_ERROR("Attempting to bind an object larger than the aperture\n"); return -E2BIG; } search_free: - free_space = drm_mm_search_free(&dev_priv->mm.gtt_space, - obj->size, alignment, 0); - if (free_space != NULL) - obj_priv->gtt_space = drm_mm_get_block(free_space, obj->size, - alignment); + if (mappable) + free_space = + drm_mm_search_free_in_range(&dev_priv->mm.gtt_space, + obj->size, alignment, 0, + dev_priv->mm.gtt_mappable_end, + 0); + else + free_space = drm_mm_search_free(&dev_priv->mm.gtt_space, + obj->size, alignment, 0); + + if (free_space != NULL) { + if (mappable) + obj_priv->gtt_space = + drm_mm_get_block_range_generic(free_space, + obj->size, + alignment, 0, + dev_priv->mm.gtt_mappable_end, + 0); + else + obj_priv->gtt_space = + drm_mm_get_block(free_space, obj->size, + alignment); + } if (obj_priv->gtt_space == NULL) { /* If the gtt is empty and we're still having trouble * fitting our object in, we're out of memory. */ - ret = i915_gem_evict_something(dev, obj->size, alignment, true); + ret = i915_gem_evict_something(dev, obj->size, alignment, + mappable); if (ret) return ret; @@ -2689,7 +2711,7 @@ i915_gem_object_bind_to_gtt(struct drm_gem_object *obj, unsigned alignment) if (ret == -ENOMEM) { /* first try to clear up some space from the GTT */ ret = i915_gem_evict_something(dev, obj->size, - alignment, true); + alignment, mappable); if (ret) { /* now try to shrink everyone else */ if (gfpmask) { @@ -2719,7 +2741,8 @@ i915_gem_object_bind_to_gtt(struct drm_gem_object *obj, unsigned alignment) drm_mm_put_block(obj_priv->gtt_space); obj_priv->gtt_space = NULL; - ret = i915_gem_evict_something(dev, obj->size, alignment, true); + ret = i915_gem_evict_something(dev, obj->size, alignment, + mappable); if (ret) return ret; @@ -3456,7 +3479,8 @@ i915_gem_execbuffer_pin(struct drm_device *dev, break; } - ret = i915_gem_object_pin(&obj->base, entry->alignment); + ret = i915_gem_object_pin(&obj->base, + entry->alignment, true); if (ret) break; @@ -4026,7 +4050,8 @@ i915_gem_execbuffer2(struct drm_device *dev, void *data, } int -i915_gem_object_pin(struct drm_gem_object *obj, uint32_t alignment) +i915_gem_object_pin(struct drm_gem_object *obj, uint32_t alignment, + bool mappable) { struct drm_device *dev = obj->dev; struct drm_i915_private *dev_priv = dev->dev_private; @@ -4051,7 +4076,7 @@ i915_gem_object_pin(struct drm_gem_object *obj, uint32_t alignment) } if (obj_priv->gtt_space == NULL) { - ret = i915_gem_object_bind_to_gtt(obj, alignment); + ret = i915_gem_object_bind_to_gtt(obj, alignment, mappable); if (ret) return ret; } @@ -4133,7 +4158,7 @@ i915_gem_pin_ioctl(struct drm_device *dev, void *data, obj_priv->user_pin_count++; obj_priv->pin_filp = file_priv; if (obj_priv->user_pin_count == 1) { - ret = i915_gem_object_pin(obj, args->alignment); + ret = i915_gem_object_pin(obj, args->alignment, true); if (ret) goto out; } @@ -4445,7 +4470,7 @@ i915_gem_init_pipe_control(struct drm_device *dev) obj_priv = to_intel_bo(obj); obj_priv->agp_type = AGP_USER_CACHED_MEMORY; - ret = i915_gem_object_pin(obj, 4096); + ret = i915_gem_object_pin(obj, 4096, true); if (ret) goto err_unref; diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c index eb4c725e3069..c9c4c707cf1a 100644 --- a/drivers/gpu/drm/i915/intel_display.c +++ b/drivers/gpu/drm/i915/intel_display.c @@ -1461,7 +1461,7 @@ intel_pin_and_fence_fb_obj(struct drm_device *dev, BUG(); } - ret = i915_gem_object_pin(obj, alignment); + ret = i915_gem_object_pin(obj, alignment, true); if (ret) return ret; @@ -4353,7 +4353,7 @@ static int intel_crtc_cursor_set(struct drm_crtc *crtc, /* we only need to pin inside GTT if cursor is non-phy */ mutex_lock(&dev->struct_mutex); if (!dev_priv->info->cursor_needs_physical) { - ret = i915_gem_object_pin(bo, PAGE_SIZE); + ret = i915_gem_object_pin(bo, PAGE_SIZE, true); if (ret) { DRM_ERROR("failed to pin cursor bo\n"); goto fail_locked; @@ -5517,7 +5517,7 @@ intel_alloc_context_page(struct drm_device *dev) } mutex_lock(&dev->struct_mutex); - ret = i915_gem_object_pin(ctx, 4096); + ret = i915_gem_object_pin(ctx, 4096, true); if (ret) { DRM_ERROR("failed to pin power context: %d\n", ret); goto err_unref; diff --git a/drivers/gpu/drm/i915/intel_overlay.c b/drivers/gpu/drm/i915/intel_overlay.c index 2d4a6968cd76..beda2016eb16 100644 --- a/drivers/gpu/drm/i915/intel_overlay.c +++ b/drivers/gpu/drm/i915/intel_overlay.c @@ -781,7 +781,7 @@ static int intel_overlay_do_put_image(struct intel_overlay *overlay, if (ret != 0) return ret; - ret = i915_gem_object_pin(new_bo, PAGE_SIZE); + ret = i915_gem_object_pin(new_bo, PAGE_SIZE, true); if (ret != 0) return ret; @@ -1423,7 +1423,7 @@ void intel_setup_overlay(struct drm_device *dev) } overlay->flip_addr = overlay->reg_bo->phys_obj->handle->busaddr; } else { - ret = i915_gem_object_pin(reg_bo, PAGE_SIZE); + ret = i915_gem_object_pin(reg_bo, PAGE_SIZE, true); if (ret) { DRM_ERROR("failed to pin overlay register bo\n"); goto out_free_bo; diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c index 4803b32f308f..8eaa60cc5d25 100644 --- a/drivers/gpu/drm/i915/intel_ringbuffer.c +++ b/drivers/gpu/drm/i915/intel_ringbuffer.c @@ -546,7 +546,7 @@ static int init_status_page(struct intel_ring_buffer *ring) obj_priv = to_intel_bo(obj); obj_priv->agp_type = AGP_USER_CACHED_MEMORY; - ret = i915_gem_object_pin(obj, 4096); + ret = i915_gem_object_pin(obj, 4096, true); if (ret != 0) { goto err_unref; } @@ -602,7 +602,7 @@ int intel_init_ring_buffer(struct drm_device *dev, ring->gem_object = obj; - ret = i915_gem_object_pin(obj, PAGE_SIZE); + ret = i915_gem_object_pin(obj, PAGE_SIZE, true); if (ret) goto err_unref; -- cgit v1.2.3 From 16e809acc167c3ede231cafcdab1be93bab3e429 Mon Sep 17 00:00:00 2001 From: Daniel Vetter Date: Thu, 16 Sep 2010 19:37:04 +0200 Subject: drm/i915: unbind unmappable objects on fault/pin In i915_gem_object_pin obviously unbind only if mappable is true. This is the last part to enable gtt_mappable_end != gtt_size, which the next patch will do. v2: Fences on g33/pineview only work in the mappable part of the gtt. Signed-off-by: Daniel Vetter Signed-off-by: Chris Wilson --- drivers/gpu/drm/i915/i915_gem.c | 25 ++++++++++++++++++++++--- 1 file changed, 22 insertions(+), 3 deletions(-) diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index ef14546fc08d..7b0680714101 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c @@ -260,6 +260,16 @@ i915_gem_create_ioctl(struct drm_device *dev, void *data, return 0; } +static bool +i915_gem_object_cpu_accessible(struct drm_i915_gem_object *obj) +{ + struct drm_device *dev = obj->base.dev; + drm_i915_private_t *dev_priv = dev->dev_private; + + return obj->gtt_space == NULL || + obj->gtt_offset + obj->base.size <= dev_priv->mm.gtt_mappable_end; +} + static inline int fast_shmem_read(struct page **pages, loff_t page_base, int page_offset, @@ -1255,6 +1265,9 @@ int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf) /* Now bind it into the GTT if needed */ mutex_lock(&dev->struct_mutex); + if (!i915_gem_object_cpu_accessible(obj_priv)) + i915_gem_object_unbind(obj); + if (!obj_priv->gtt_space) { ret = i915_gem_object_bind_to_gtt(obj, 0, true); if (ret) @@ -3465,11 +3478,15 @@ i915_gem_execbuffer_pin(struct drm_device *dev, ret = 0; for (i = 0; i < count; i++) { struct drm_i915_gem_exec_object2 *entry = &exec_list[i]; - struct drm_i915_gem_object *obj= to_intel_bo(object_list[i]); + struct drm_i915_gem_object *obj = to_intel_bo(object_list[i]); bool need_fence = entry->flags & EXEC_OBJECT_NEEDS_FENCE && obj->tiling_mode != I915_TILING_NONE; + /* g33/pnv can't fence buffers in the unmappable part */ + bool need_mappable = + entry->relocation_count ? true : need_fence; + /* Check fence reg constraints and rebind if necessary */ if (need_fence && !i915_gem_object_fence_offset_ok(&obj->base, @@ -3480,7 +3497,8 @@ i915_gem_execbuffer_pin(struct drm_device *dev, } ret = i915_gem_object_pin(&obj->base, - entry->alignment, true); + entry->alignment, + need_mappable); if (ret) break; @@ -4064,7 +4082,8 @@ i915_gem_object_pin(struct drm_gem_object *obj, uint32_t alignment, if (obj_priv->gtt_space != NULL) { if (alignment == 0) alignment = i915_gem_get_gtt_alignment(obj); - if (obj_priv->gtt_offset & (alignment - 1)) { + if (obj_priv->gtt_offset & (alignment - 1) || + (mappable && !i915_gem_object_cpu_accessible(obj_priv))) { WARN(obj_priv->pin_count, "bo is already pinned with incorrect alignment:" " offset=%x, req.alignment=%x\n", -- cgit v1.2.3 From 53984635a659e360f206a81ada4ae813152d72f1 Mon Sep 17 00:00:00 2001 From: Daniel Vetter Date: Wed, 22 Sep 2010 23:44:24 +0200 Subject: drm/i915: use the complete gtt At least the part that's currently enabled by the BIOS. Signed-off-by: Daniel Vetter Signed-off-by: Chris Wilson --- drivers/gpu/drm/i915/i915_dma.c | 14 +++++++++----- drivers/gpu/drm/i915/i915_drv.h | 2 +- drivers/gpu/drm/i915/i915_gem.c | 5 +++-- 3 files changed, 13 insertions(+), 8 deletions(-) diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c index 02daf4e5c8e6..1bd37e34c77e 100644 --- a/drivers/gpu/drm/i915/i915_dma.c +++ b/drivers/gpu/drm/i915/i915_dma.c @@ -1195,13 +1195,17 @@ static bool i915_switcheroo_can_switch(struct pci_dev *pdev) return can_switch; } -static int i915_load_modeset_init(struct drm_device *dev, - unsigned long prealloc_size, - unsigned long agp_size) +static int i915_load_modeset_init(struct drm_device *dev) { struct drm_i915_private *dev_priv = dev->dev_private; + unsigned long prealloc_size, gtt_size, mappable_size; int ret = 0; + prealloc_size = dev_priv->mm.gtt->gtt_stolen_entries << PAGE_SHIFT; + gtt_size = dev_priv->mm.gtt->gtt_total_entries << PAGE_SHIFT; + mappable_size = dev_priv->mm.gtt->gtt_mappable_entries << PAGE_SHIFT; + gtt_size -= PAGE_SIZE; + /* Basic memrange allocator for stolen space (aka mm.vram) */ drm_mm_init(&dev_priv->mm.vram, 0, prealloc_size); @@ -1214,7 +1218,7 @@ static int i915_load_modeset_init(struct drm_device *dev, * at the last page of the aperture. One page should be enough to * keep any prefetching inside of the aperture. */ - i915_gem_do_init(dev, prealloc_size, agp_size - 4096); + i915_gem_do_init(dev, prealloc_size, mappable_size, gtt_size); mutex_lock(&dev->struct_mutex); ret = i915_gem_init_ringbuffer(dev); @@ -2056,7 +2060,7 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags) intel_detect_pch(dev); if (drm_core_check_feature(dev, DRIVER_MODESET)) { - ret = i915_load_modeset_init(dev, prealloc_size, agp_size); + ret = i915_load_modeset_init(dev); if (ret < 0) { DRM_ERROR("failed to init modeset\n"); goto out_workqueue_free; diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index 263bb0506247..296ed38b292f 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h @@ -1041,7 +1041,7 @@ int i915_gem_object_set_domain(struct drm_gem_object *obj, int i915_gem_init_ringbuffer(struct drm_device *dev); void i915_gem_cleanup_ringbuffer(struct drm_device *dev); int i915_gem_do_init(struct drm_device *dev, unsigned long start, - unsigned long end); + unsigned long mappable_end, unsigned long end); int i915_gpu_idle(struct drm_device *dev); int i915_gem_idle(struct drm_device *dev); int i915_add_request(struct drm_device *dev, diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index 7b0680714101..6d9971f0ed92 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c @@ -173,6 +173,7 @@ i915_gem_object_is_inactive(struct drm_i915_gem_object *obj_priv) int i915_gem_do_init(struct drm_device *dev, unsigned long start, + unsigned long mappable_end, unsigned long end) { drm_i915_private_t *dev_priv = dev->dev_private; @@ -187,7 +188,7 @@ int i915_gem_do_init(struct drm_device *dev, end - start); dev_priv->mm.gtt_total = end - start; - dev_priv->mm.gtt_mappable_end = end; + dev_priv->mm.gtt_mappable_end = mappable_end; return 0; } @@ -200,7 +201,7 @@ i915_gem_init_ioctl(struct drm_device *dev, void *data, int ret; mutex_lock(&dev->struct_mutex); - ret = i915_gem_do_init(dev, args->gtt_start, args->gtt_end); + ret = i915_gem_do_init(dev, args->gtt_start, args->gtt_end, args->gtt_end); mutex_unlock(&dev->struct_mutex); return ret; -- cgit v1.2.3 From b3eafc5af02a799650757f2c5b2b0d4835dd0a5f Mon Sep 17 00:00:00 2001 From: Daniel Vetter Date: Thu, 23 Sep 2010 20:04:17 +0200 Subject: intel-gtt: save PGETBL_CTL later in the setup process ... and switch to a more classical store-reg-on-suspend, restore-on-resume way of doing things. Obviously this is just preparation for the future, the code is not there at all, yet. This is needed because the next patch adjusts this register and everything in it (not just the pagetable address) needs to be restored on resume. Signed-off-by: Daniel Vetter Signed-off-by: Chris Wilson --- drivers/char/agp/intel-gtt.c | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/drivers/char/agp/intel-gtt.c b/drivers/char/agp/intel-gtt.c index 6b6760ea2435..fd3e94f8ab51 100644 --- a/drivers/char/agp/intel-gtt.c +++ b/drivers/char/agp/intel-gtt.c @@ -95,7 +95,7 @@ static struct _intel_private { u8 __iomem *registers; phys_addr_t gtt_bus_addr; phys_addr_t gma_bus_addr; - phys_addr_t pte_bus_addr; + u32 PGETBL_save; u32 __iomem *gtt; /* I915G */ int num_dcache_entries; union { @@ -755,6 +755,11 @@ static int intel_gtt_init(void) intel_private.base.gtt_mappable_entries = intel_gtt_mappable_entries(); intel_private.base.gtt_total_entries = intel_gtt_total_entries(); + /* save the PGETBL reg for resume */ + intel_private.PGETBL_save = + readl(intel_private.registers+I810_PGETBL_CTL) + & ~I810_PGETBL_ENABLED; + dev_info(&intel_private.bridge_dev->dev, "detected gtt size: %dK total, %dK mappable\n", intel_private.base.gtt_total_entries * 4, @@ -891,7 +896,7 @@ static void intel_enable_gtt(void) gmch_ctrl |= I830_GMCH_ENABLED; pci_write_config_word(intel_private.bridge_dev, I830_GMCH_CTRL, gmch_ctrl); - writel(intel_private.pte_bus_addr|I810_PGETBL_ENABLED, + writel(intel_private.PGETBL_save|I810_PGETBL_ENABLED, intel_private.registers+I810_PGETBL_CTL); readl(intel_private.registers+I810_PGETBL_CTL); /* PCI Posting. */ } @@ -908,8 +913,6 @@ static int i830_setup(void) return -ENOMEM; intel_private.gtt_bus_addr = reg_addr + I810_PTE_BASE; - intel_private.pte_bus_addr = - readl(intel_private.registers+I810_PGETBL_CTL) & 0xfffff000; intel_i830_setup_flush(); @@ -1265,9 +1268,6 @@ static int i9xx_setup(void) intel_private.gtt_bus_addr = reg_addr + gtt_offset; } - intel_private.pte_bus_addr = - readl(intel_private.registers+I810_PGETBL_CTL) & 0xfffff000; - intel_i9xx_setup_flush(); return 0; -- cgit v1.2.3 From 201728429d6cf336cfd7483fcd1bce47291b2901 Mon Sep 17 00:00:00 2001 From: Daniel Vetter Date: Fri, 24 Sep 2010 18:25:59 +0200 Subject: intel-gtt: maximize ggtt size on platforms that support this On VT-d supporting platforms the GGTT is allocated in a stolen mem section separate from graphcis stolen mem. The GMCH register contains a bitfield specifying the size of that region. Docs suggest that this region can only be used for GGTT and PPGTT. Hence ensure that the PPGTT is disabled and use the complete area for the GGTT. Unfortunately the graphics core on G33/Pineview can't cope with really large GTTs and the BIOS usually enables the maximum of 512MB. So don't bother with maximizing the GTT on these platforms. Signed-off-by: Daniel Vetter Signed-off-by: Chris Wilson --- drivers/char/agp/intel-agp.h | 14 +++++-- drivers/char/agp/intel-gtt.c | 96 ++++++++++++++++++++++++++++++++------------ 2 files changed, 81 insertions(+), 29 deletions(-) diff --git a/drivers/char/agp/intel-agp.h b/drivers/char/agp/intel-agp.h index 90539df02504..010e3defd6c3 100644 --- a/drivers/char/agp/intel-agp.h +++ b/drivers/char/agp/intel-agp.h @@ -75,6 +75,8 @@ #define I810_GMS_DISABLE 0x00000000 #define I810_PGETBL_CTL 0x2020 #define I810_PGETBL_ENABLED 0x00000001 +/* Note: PGETBL_CTL2 has a different offset on G33. */ +#define I965_PGETBL_CTL2 0x20c4 #define I965_PGETBL_SIZE_MASK 0x0000000e #define I965_PGETBL_SIZE_512KB (0 << 1) #define I965_PGETBL_SIZE_256KB (1 << 1) @@ -82,9 +84,15 @@ #define I965_PGETBL_SIZE_1MB (3 << 1) #define I965_PGETBL_SIZE_2MB (4 << 1) #define I965_PGETBL_SIZE_1_5MB (5 << 1) -#define G33_PGETBL_SIZE_MASK (3 << 8) -#define G33_PGETBL_SIZE_1M (1 << 8) -#define G33_PGETBL_SIZE_2M (2 << 8) +#define G33_GMCH_SIZE_MASK (3 << 8) +#define G33_GMCH_SIZE_1M (1 << 8) +#define G33_GMCH_SIZE_2M (2 << 8) +#define G4x_GMCH_SIZE_MASK (0xf << 8) +#define G4x_GMCH_SIZE_1M (0x1 << 8) +#define G4x_GMCH_SIZE_2M (0x3 << 8) +#define G4x_GMCH_SIZE_VT_1M (0x9 << 8) +#define G4x_GMCH_SIZE_VT_1_5M (0xa << 8) +#define G4x_GMCH_SIZE_VT_2M (0xc << 8) #define I810_DRAM_CTL 0x3000 #define I810_DRAM_ROW_0 0x00000001 diff --git a/drivers/char/agp/intel-gtt.c b/drivers/char/agp/intel-gtt.c index fd3e94f8ab51..5dc1f5db55a7 100644 --- a/drivers/char/agp/intel-gtt.c +++ b/drivers/char/agp/intel-gtt.c @@ -642,41 +642,85 @@ static unsigned int intel_gtt_stolen_entries(void) return stolen_entries; } -static unsigned int intel_gtt_total_entries(void) +static void i965_adjust_pgetbl_size(unsigned int size_flag) +{ + u32 pgetbl_ctl, pgetbl_ctl2; + + /* ensure that ppgtt is disabled */ + pgetbl_ctl2 = readl(intel_private.registers+I965_PGETBL_CTL2); + pgetbl_ctl2 &= ~I810_PGETBL_ENABLED; + writel(pgetbl_ctl2, intel_private.registers+I965_PGETBL_CTL2); + + /* write the new ggtt size */ + pgetbl_ctl = readl(intel_private.registers+I810_PGETBL_CTL); + pgetbl_ctl &= ~I965_PGETBL_SIZE_MASK; + pgetbl_ctl |= size_flag; + writel(pgetbl_ctl, intel_private.registers+I810_PGETBL_CTL); +} + +static unsigned int i965_gtt_total_entries(void) { int size; + u32 pgetbl_ctl; + u16 gmch_ctl; - if (IS_G33 || INTEL_GTT_GEN == 4 || INTEL_GTT_GEN == 5) { - u32 pgetbl_ctl; - pgetbl_ctl = readl(intel_private.registers+I810_PGETBL_CTL); + pci_read_config_word(intel_private.bridge_dev, + I830_GMCH_CTRL, &gmch_ctl); - switch (pgetbl_ctl & I965_PGETBL_SIZE_MASK) { - case I965_PGETBL_SIZE_128KB: - size = KB(128); - break; - case I965_PGETBL_SIZE_256KB: - size = KB(256); - break; - case I965_PGETBL_SIZE_512KB: - size = KB(512); - break; - case I965_PGETBL_SIZE_1MB: - size = KB(1024); + if (INTEL_GTT_GEN == 5) { + switch (gmch_ctl & G4x_GMCH_SIZE_MASK) { + case G4x_GMCH_SIZE_1M: + case G4x_GMCH_SIZE_VT_1M: + i965_adjust_pgetbl_size(I965_PGETBL_SIZE_1MB); break; - case I965_PGETBL_SIZE_2MB: - size = KB(2048); + case G4x_GMCH_SIZE_VT_1_5M: + i965_adjust_pgetbl_size(I965_PGETBL_SIZE_1_5MB); break; - case I965_PGETBL_SIZE_1_5MB: - size = KB(1024 + 512); + case G4x_GMCH_SIZE_2M: + case G4x_GMCH_SIZE_VT_2M: + i965_adjust_pgetbl_size(I965_PGETBL_SIZE_2MB); break; - default: - dev_info(&intel_private.pcidev->dev, - "unknown page table size, assuming 512KB\n"); - size = KB(512); } + } - return size/4; - } else if (INTEL_GTT_GEN == 6) { + pgetbl_ctl = readl(intel_private.registers+I810_PGETBL_CTL); + + switch (pgetbl_ctl & I965_PGETBL_SIZE_MASK) { + case I965_PGETBL_SIZE_128KB: + size = KB(128); + break; + case I965_PGETBL_SIZE_256KB: + size = KB(256); + break; + case I965_PGETBL_SIZE_512KB: + size = KB(512); + break; + /* GTT pagetable sizes bigger than 512KB are not possible on G33! */ + case I965_PGETBL_SIZE_1MB: + size = KB(1024); + break; + case I965_PGETBL_SIZE_2MB: + size = KB(2048); + break; + case I965_PGETBL_SIZE_1_5MB: + size = KB(1024 + 512); + break; + default: + dev_info(&intel_private.pcidev->dev, + "unknown page table size, assuming 512KB\n"); + size = KB(512); + } + + return size/4; +} + +static unsigned int intel_gtt_total_entries(void) +{ + int size; + + if (IS_G33 || INTEL_GTT_GEN == 4 || INTEL_GTT_GEN == 5) + return i965_gtt_total_entries(); + else if (INTEL_GTT_GEN == 6) { u16 snb_gmch_ctl; pci_read_config_word(intel_private.pcidev, SNB_GMCH_CTRL, &snb_gmch_ctl); -- cgit v1.2.3 From ec57d2602a985b54b53e53098f74fdefc24723af Mon Sep 17 00:00:00 2001 From: Daniel Vetter Date: Thu, 30 Sep 2010 23:42:15 +0200 Subject: drm/i915: add mappable to gem_object_bind tracepoint This way we can make some more educated guesses as to why exactly we can't use 2G apertures to their full potential ;) Signed-off-by: Daniel Vetter Signed-off-by: Chris Wilson --- drivers/gpu/drm/i915/i915_gem.c | 2 +- drivers/gpu/drm/i915/i915_trace.h | 11 +++++++---- 2 files changed, 8 insertions(+), 5 deletions(-) diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index 6d9971f0ed92..bb5435ba01aa 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c @@ -2775,7 +2775,7 @@ i915_gem_object_bind_to_gtt(struct drm_gem_object *obj, BUG_ON(obj->write_domain & I915_GEM_GPU_DOMAINS); obj_priv->gtt_offset = obj_priv->gtt_space->start; - trace_i915_gem_object_bind(obj, obj_priv->gtt_offset); + trace_i915_gem_object_bind(obj, obj_priv->gtt_offset, mappable); return 0; } diff --git a/drivers/gpu/drm/i915/i915_trace.h b/drivers/gpu/drm/i915/i915_trace.h index fea97a21cc14..0b1049ff72a3 100644 --- a/drivers/gpu/drm/i915/i915_trace.h +++ b/drivers/gpu/drm/i915/i915_trace.h @@ -35,22 +35,25 @@ TRACE_EVENT(i915_gem_object_create, TRACE_EVENT(i915_gem_object_bind, - TP_PROTO(struct drm_gem_object *obj, u32 gtt_offset), + TP_PROTO(struct drm_gem_object *obj, u32 gtt_offset, bool mappable), - TP_ARGS(obj, gtt_offset), + TP_ARGS(obj, gtt_offset, mappable), TP_STRUCT__entry( __field(struct drm_gem_object *, obj) __field(u32, gtt_offset) + __field(bool, mappable) ), TP_fast_assign( __entry->obj = obj; __entry->gtt_offset = gtt_offset; + __entry->mappable = mappable; ), - TP_printk("obj=%p, gtt_offset=%08x", - __entry->obj, __entry->gtt_offset) + TP_printk("obj=%p, gtt_offset=%08x%s", + __entry->obj, __entry->gtt_offset, + __entry->mappable ? ", mappable" : "") ); TRACE_EVENT(i915_gem_object_change_domain, -- cgit v1.2.3 From fb7d516af11837126eb1e4a44ab0653bf9b57702 Mon Sep 17 00:00:00 2001 From: Daniel Vetter Date: Fri, 1 Oct 2010 22:05:20 +0200 Subject: drm/i915: add accounting for mappable objects in gtt v2 More precisely: For those that _need_ to be mappable. Also add two BUG_ONs in fault and pin to check the consistency of the mappable flag. Changes in v2: - Add tracking of gtt mappable space (to notice mappable/unmappable balancing issues). - Improve the mappable working set tracking by tracking fault and pin separately. Signed-off-by: Daniel Vetter Signed-off-by: Chris Wilson --- drivers/gpu/drm/i915/i915_debugfs.c | 6 +++ drivers/gpu/drm/i915/i915_drv.h | 12 +++++ drivers/gpu/drm/i915/i915_gem.c | 93 +++++++++++++++++++++++++++++++------ 3 files changed, 98 insertions(+), 13 deletions(-) diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c index beb3de7921b5..2e5961139326 100644 --- a/drivers/gpu/drm/i915/i915_debugfs.c +++ b/drivers/gpu/drm/i915/i915_debugfs.c @@ -131,6 +131,8 @@ describe_obj(struct seq_file *m, struct drm_i915_gem_object *obj) seq_printf(m, " (fence: %d)", obj->fence_reg); if (obj->gtt_space != NULL) seq_printf(m, " (gtt_offset: %08x)", obj->gtt_offset); + if (obj->pin_mappable || obj->fault_mappable) + seq_printf(m, " (mappable)"); if (obj->ring != NULL) seq_printf(m, " (%s)", obj->ring->name); } @@ -207,6 +209,10 @@ static int i915_gem_object_info(struct seq_file *m, void* data) seq_printf(m, "%zu object bytes\n", dev_priv->mm.object_memory); seq_printf(m, "%u pinned\n", dev_priv->mm.pin_count); seq_printf(m, "%zu pin bytes\n", dev_priv->mm.pin_memory); + seq_printf(m, "%u mappable objects in gtt\n", dev_priv->mm.gtt_mappable_count); + seq_printf(m, "%zu mappable gtt bytes\n", dev_priv->mm.gtt_mappable_memory); + seq_printf(m, "%zu mappable gtt used bytes\n", dev_priv->mm.mappable_gtt_used); + seq_printf(m, "%zu mappable gtt total\n", dev_priv->mm.mappable_gtt_total); seq_printf(m, "%u objects in gtt\n", dev_priv->mm.gtt_count); seq_printf(m, "%zu gtt bytes\n", dev_priv->mm.gtt_memory); seq_printf(m, "%zu gtt total\n", dev_priv->mm.gtt_total); diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index 296ed38b292f..cdae5d189165 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h @@ -643,9 +643,13 @@ typedef struct drm_i915_private { size_t object_memory; size_t pin_memory; size_t gtt_memory; + size_t gtt_mappable_memory; + size_t mappable_gtt_used; + size_t mappable_gtt_total; size_t gtt_total; u32 object_count; u32 pin_count; + u32 gtt_mappable_count; u32 gtt_count; } mm; struct sdvo_device_mapping sdvo_mappings[2]; @@ -775,6 +779,14 @@ struct drm_i915_gem_object { unsigned int pin_count : 4; #define DRM_I915_GEM_OBJECT_MAX_PIN_COUNT 0xf + /** + * Whether the current gtt mapping needs to be mappable (and isn't just + * mappable by accident). Track pin and fault separate for a more + * accurate mappable working set. + */ + unsigned int fault_mappable : 1; + unsigned int pin_mappable : 1; + /** AGP memory structure for our GTT binding. */ DRM_AGP_MEM *agp_mem; diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index bb5435ba01aa..2eceb24bf54b 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c @@ -84,31 +84,83 @@ static void i915_gem_info_remove_obj(struct drm_i915_private *dev_priv, } static void i915_gem_info_add_gtt(struct drm_i915_private *dev_priv, - size_t size) + struct drm_gem_object *obj) { + struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); dev_priv->mm.gtt_count++; - dev_priv->mm.gtt_memory += size; + dev_priv->mm.gtt_memory += obj->size; + if (obj_priv->gtt_offset < dev_priv->mm.gtt_mappable_end) { + dev_priv->mm.mappable_gtt_used += + min_t(size_t, obj->size, + dev_priv->mm.gtt_mappable_end + - obj_priv->gtt_offset); + } } static void i915_gem_info_remove_gtt(struct drm_i915_private *dev_priv, - size_t size) + struct drm_gem_object *obj) { + struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); dev_priv->mm.gtt_count--; - dev_priv->mm.gtt_memory -= size; + dev_priv->mm.gtt_memory -= obj->size; + if (obj_priv->gtt_offset < dev_priv->mm.gtt_mappable_end) { + dev_priv->mm.mappable_gtt_used -= + min_t(size_t, obj->size, + dev_priv->mm.gtt_mappable_end + - obj_priv->gtt_offset); + } +} + +/** + * Update the mappable working set counters. Call _only_ when there is a change + * in one of (pin|fault)_mappable and update *_mappable _before_ calling. + * @mappable: new state the changed mappable flag (either pin_ or fault_). + */ +static void +i915_gem_info_update_mappable(struct drm_i915_private *dev_priv, + struct drm_gem_object *obj, + bool mappable) +{ + struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); + + if (mappable) { + if (obj_priv->pin_mappable && obj_priv->fault_mappable) + /* Combined state was already mappable. */ + return; + dev_priv->mm.gtt_mappable_count++; + dev_priv->mm.gtt_mappable_memory += obj->size; + } else { + if (obj_priv->pin_mappable || obj_priv->fault_mappable) + /* Combined state still mappable. */ + return; + dev_priv->mm.gtt_mappable_count--; + dev_priv->mm.gtt_mappable_memory -= obj->size; + } } static void i915_gem_info_add_pin(struct drm_i915_private *dev_priv, - size_t size) + struct drm_gem_object *obj, + bool mappable) { + struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); dev_priv->mm.pin_count++; - dev_priv->mm.pin_memory += size; + dev_priv->mm.pin_memory += obj->size; + if (mappable) { + obj_priv->pin_mappable = true; + i915_gem_info_update_mappable(dev_priv, obj, true); + } } static void i915_gem_info_remove_pin(struct drm_i915_private *dev_priv, - size_t size) + struct drm_gem_object *obj) { + struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); dev_priv->mm.pin_count--; - dev_priv->mm.pin_memory -= size; + dev_priv->mm.pin_memory -= obj->size; + if (obj_priv->pin_mappable) { + obj_priv->pin_mappable = false; + i915_gem_info_update_mappable(dev_priv, obj, false); + } } int @@ -188,6 +240,7 @@ int i915_gem_do_init(struct drm_device *dev, end - start); dev_priv->mm.gtt_total = end - start; + dev_priv->mm.mappable_gtt_total = min(end, mappable_end) - start; dev_priv->mm.gtt_mappable_end = mappable_end; return 0; @@ -1266,6 +1319,7 @@ int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf) /* Now bind it into the GTT if needed */ mutex_lock(&dev->struct_mutex); + BUG_ON(obj_priv->pin_count && !obj_priv->pin_mappable); if (!i915_gem_object_cpu_accessible(obj_priv)) i915_gem_object_unbind(obj); @@ -1279,6 +1333,11 @@ int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf) goto unlock; } + if (!obj_priv->fault_mappable) { + obj_priv->fault_mappable = true; + i915_gem_info_update_mappable(dev_priv, obj, true); + } + /* Need a new fence register? */ if (obj_priv->tiling_mode != I915_TILING_NONE) { ret = i915_gem_object_get_fence_reg(obj, true); @@ -1396,11 +1455,17 @@ void i915_gem_release_mmap(struct drm_gem_object *obj) { struct drm_device *dev = obj->dev; + struct drm_i915_private *dev_priv = dev->dev_private; struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); if (dev->dev_mapping) unmap_mapping_range(dev->dev_mapping, obj_priv->mmap_offset, obj->size, 1); + + if (obj_priv->fault_mappable) { + obj_priv->fault_mappable = false; + i915_gem_info_update_mappable(dev_priv, obj, false); + } } static void @@ -2177,7 +2242,7 @@ i915_gem_object_unbind(struct drm_gem_object *obj) i915_gem_object_put_pages(obj); BUG_ON(obj_priv->pages_refcount); - i915_gem_info_remove_gtt(dev_priv, obj->size); + i915_gem_info_remove_gtt(dev_priv, obj); list_del_init(&obj_priv->mm_list); drm_mm_put_block(obj_priv->gtt_space); @@ -2763,9 +2828,11 @@ i915_gem_object_bind_to_gtt(struct drm_gem_object *obj, goto search_free; } + obj_priv->gtt_offset = obj_priv->gtt_space->start; + /* keep track of bounds object by adding it to the inactive list */ list_add_tail(&obj_priv->mm_list, &dev_priv->mm.inactive_list); - i915_gem_info_add_gtt(dev_priv, obj->size); + i915_gem_info_add_gtt(dev_priv, obj); /* Assert that the object is not currently in any GPU domain. As it * wasn't in the GTT, there shouldn't be any way it could have been in @@ -2774,7 +2841,6 @@ i915_gem_object_bind_to_gtt(struct drm_gem_object *obj, BUG_ON(obj->read_domains & I915_GEM_GPU_DOMAINS); BUG_ON(obj->write_domain & I915_GEM_GPU_DOMAINS); - obj_priv->gtt_offset = obj_priv->gtt_space->start; trace_i915_gem_object_bind(obj, obj_priv->gtt_offset, mappable); return 0; @@ -4107,11 +4173,12 @@ i915_gem_object_pin(struct drm_gem_object *obj, uint32_t alignment, * remove it from the inactive list */ if (obj_priv->pin_count == 1) { - i915_gem_info_add_pin(dev_priv, obj->size); + i915_gem_info_add_pin(dev_priv, obj, mappable); if (!obj_priv->active) list_move_tail(&obj_priv->mm_list, &dev_priv->mm.pinned_list); } + BUG_ON(!obj_priv->pin_mappable && mappable); WARN_ON(i915_verify_lists(dev)); return 0; @@ -4137,7 +4204,7 @@ i915_gem_object_unpin(struct drm_gem_object *obj) if (!obj_priv->active) list_move_tail(&obj_priv->mm_list, &dev_priv->mm.inactive_list); - i915_gem_info_remove_pin(dev_priv, obj->size); + i915_gem_info_remove_pin(dev_priv, obj); } WARN_ON(i915_verify_lists(dev)); } -- cgit v1.2.3 From da761a6edf5c12ba2e7566c5e6bc98899d46ff12 Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Wed, 27 Oct 2010 17:37:08 +0100 Subject: drm/i915: Bail early if we try to mmap an object too large to be mapped. Signed-off-by: Chris Wilson --- drivers/gpu/drm/i915/i915_gem.c | 12 ++++++++++++ 1 file changed, 12 insertions(+) diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index 2eceb24bf54b..19ceb8cd0922 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c @@ -1258,6 +1258,7 @@ int i915_gem_mmap_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv) { + struct drm_i915_private *dev_priv = dev->dev_private; struct drm_i915_gem_mmap *args = data; struct drm_gem_object *obj; loff_t offset; @@ -1270,6 +1271,11 @@ i915_gem_mmap_ioctl(struct drm_device *dev, void *data, if (obj == NULL) return -ENOENT; + if (obj->size > dev_priv->mm.gtt_mappable_end) { + drm_gem_object_unreference_unlocked(obj); + return -E2BIG; + } + offset = args->offset; down_write(¤t->mm->mmap_sem); @@ -1547,6 +1553,7 @@ int i915_gem_mmap_gtt_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv) { + struct drm_i915_private *dev_priv = dev->dev_private; struct drm_i915_gem_mmap_gtt *args = data; struct drm_gem_object *obj; struct drm_i915_gem_object *obj_priv; @@ -1566,6 +1573,11 @@ i915_gem_mmap_gtt_ioctl(struct drm_device *dev, void *data, } obj_priv = to_intel_bo(obj); + if (obj->size > dev_priv->mm.gtt_mappable_end) { + ret = -E2BIG; + goto unlock; + } + if (obj_priv->madv != I915_MADV_WILLNEED) { DRM_ERROR("Attempting to mmap a purgeable buffer\n"); ret = -EINVAL; -- cgit v1.2.3 From 71e9339c3389de6a685ecb26261a81b8a583aa1c Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Wed, 27 Oct 2010 18:46:52 +0100 Subject: drm/i915: Use the agp_size determined from the GTT This is the same value as before, but it just makes the code slightly more readable to use the local variable than converting the aperture size into bytes every time. Signed-off-by: Chris Wilson --- drivers/gpu/drm/i915/i915_dma.c | 26 ++++++++++++-------------- 1 file changed, 12 insertions(+), 14 deletions(-) diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c index 1bd37e34c77e..d8def48242c8 100644 --- a/drivers/gpu/drm/i915/i915_dma.c +++ b/drivers/gpu/drm/i915/i915_dma.c @@ -1920,6 +1920,16 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags) if (IS_GEN2(dev)) dma_set_coherent_mask(&dev->pdev->dev, DMA_BIT_MASK(30)); + dev_priv->mm.gtt = intel_gtt_get(); + if (!dev_priv->mm.gtt) { + DRM_ERROR("Failed to initialize GTT\n"); + ret = -ENODEV; + goto out_iomapfree; + } + + prealloc_size = dev_priv->mm.gtt->gtt_stolen_entries << PAGE_SHIFT; + agp_size = dev_priv->mm.gtt->gtt_mappable_entries << PAGE_SHIFT; + dev_priv->regs = ioremap(base, size); if (!dev_priv->regs) { DRM_ERROR("failed to map registers\n"); @@ -1928,8 +1938,7 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags) } dev_priv->mm.gtt_mapping = - io_mapping_create_wc(dev->agp->base, - dev->agp->agp_info.aper_size * 1024*1024); + io_mapping_create_wc(dev->agp->base, agp_size); if (dev_priv->mm.gtt_mapping == NULL) { ret = -EIO; goto out_rmmap; @@ -1941,24 +1950,13 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags) * MTRR if present. Even if a UC MTRR isn't present. */ dev_priv->mm.gtt_mtrr = mtrr_add(dev->agp->base, - dev->agp->agp_info.aper_size * - 1024 * 1024, + agp_size, MTRR_TYPE_WRCOMB, 1); if (dev_priv->mm.gtt_mtrr < 0) { DRM_INFO("MTRR allocation failed. Graphics " "performance may suffer.\n"); } - dev_priv->mm.gtt = intel_gtt_get(); - if (!dev_priv->mm.gtt) { - DRM_ERROR("Failed to initialize GTT\n"); - ret = -ENODEV; - goto out_iomapfree; - } - - prealloc_size = dev_priv->mm.gtt->gtt_stolen_entries << PAGE_SHIFT; - agp_size = dev_priv->mm.gtt->gtt_mappable_entries << PAGE_SHIFT; - /* The i915 workqueue is primarily used for batched retirement of * requests (and thus managing bo) once the task has been completed * by the GPU. i915_gem_retire_requests() is called directly when we -- cgit v1.2.3 From f406839f094ef24bb201c9574fdb9ce8e799a975 Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Wed, 27 Oct 2010 20:36:41 +0100 Subject: drm/i915: Capture ERROR register on Sandybridge hangs This holds error state from the main graphics arbiter mainly involving the DMA engine and address translation. Signed-off-by: Chris Wilson --- drivers/gpu/drm/i915/i915_debugfs.c | 3 +++ drivers/gpu/drm/i915/i915_drv.h | 1 + drivers/gpu/drm/i915/i915_irq.c | 18 +++++++++++------- drivers/gpu/drm/i915/i915_reg.h | 2 ++ 4 files changed, 17 insertions(+), 7 deletions(-) diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c index 2e5961139326..4fc1e05b769f 100644 --- a/drivers/gpu/drm/i915/i915_debugfs.c +++ b/drivers/gpu/drm/i915/i915_debugfs.c @@ -615,6 +615,9 @@ static int i915_error_state(struct seq_file *m, void *unused) seq_printf(m, "Time: %ld s %ld us\n", error->time.tv_sec, error->time.tv_usec); seq_printf(m, "PCI ID: 0x%04x\n", dev->pci_device); + if (INTEL_INFO(dev)->gen >= 6) { + seq_printf(m, "ERROR: 0x%08x\n", error->error); + } seq_printf(m, "EIR: 0x%08x\n", error->eir); seq_printf(m, " PGTBL_ER: 0x%08x\n", error->pgtbl_er); seq_printf(m, " INSTPM: 0x%08x\n", error->instpm); diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index cdae5d189165..895f9173de58 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h @@ -142,6 +142,7 @@ struct sdvo_device_mapping { struct drm_i915_error_state { u32 eir; u32 pgtbl_er; + u32 error; /* gen6+ */ u32 pipeastat; u32 pipebstat; u32 ipeir; diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c index 29cbcb33b92c..2a29497a4694 100644 --- a/drivers/gpu/drm/i915/i915_irq.c +++ b/drivers/gpu/drm/i915/i915_irq.c @@ -592,13 +592,11 @@ static void i915_capture_error_state(struct drm_device *dev) error->pipeastat = I915_READ(PIPEASTAT); error->pipebstat = I915_READ(PIPEBSTAT); error->instpm = I915_READ(INSTPM); - if (INTEL_INFO(dev)->gen < 4) { - error->ipeir = I915_READ(IPEIR); - error->ipehr = I915_READ(IPEHR); - error->instdone = I915_READ(INSTDONE); - error->acthd = I915_READ(ACTHD); - error->bbaddr = 0; - } else { + error->error = 0; + if (INTEL_INFO(dev)->gen >= 6) { + error->error = I915_READ(ERROR_GEN6); + } + if (INTEL_INFO(dev)->gen >= 4) { error->ipeir = I915_READ(IPEIR_I965); error->ipehr = I915_READ(IPEHR_I965); error->instdone = I915_READ(INSTDONE_I965); @@ -606,6 +604,12 @@ static void i915_capture_error_state(struct drm_device *dev) error->instdone1 = I915_READ(INSTDONE1); error->acthd = I915_READ(ACTHD_I965); error->bbaddr = I915_READ64(BB_ADDR); + } else { + error->ipeir = I915_READ(IPEIR); + error->ipehr = I915_READ(IPEHR); + error->instdone = I915_READ(INSTDONE); + error->acthd = I915_READ(ACTHD); + error->bbaddr = 0; } bbaddr = i915_ringbuffer_last_batch(dev); diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h index 25ed911a3112..fc161196da21 100644 --- a/drivers/gpu/drm/i915/i915_reg.h +++ b/drivers/gpu/drm/i915/i915_reg.h @@ -306,6 +306,8 @@ #define NOPID 0x02094 #define HWSTAM 0x02098 +#define ERROR_GEN6 0x040a0 + #define MI_MODE 0x0209c # define VS_TIMER_DISPATCH (1 << 6) # define MI_FLUSH_ENABLE (1 << 11) -- cgit v1.2.3 From b4ce0f85159f77f208a62930f67b4e548576a5a3 Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Thu, 28 Oct 2010 11:26:06 +0100 Subject: drm/i915: Use pci_iomap for remapping the MMIO registers. Play safe and use the common routines which take care of the cachability of the memory when setting up the iomapping for the PCI registers. Whilst they should be cacheable for the current generations, actually honouring what the device requires is a better long term strategy. Signed-off-by: Chris Wilson --- drivers/gpu/drm/i915/i915_dma.c | 21 ++++++++------------- 1 file changed, 8 insertions(+), 13 deletions(-) diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c index d8def48242c8..f8827c23a668 100644 --- a/drivers/gpu/drm/i915/i915_dma.c +++ b/drivers/gpu/drm/i915/i915_dma.c @@ -1888,7 +1888,6 @@ EXPORT_SYMBOL_GPL(i915_gpu_turbo_disable); int i915_driver_load(struct drm_device *dev, unsigned long flags) { struct drm_i915_private *dev_priv; - resource_size_t base, size; int ret = 0, mmio_bar; uint32_t agp_size, prealloc_size; /* i915 has 4 more counters */ @@ -1906,11 +1905,6 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags) dev_priv->dev = dev; dev_priv->info = (struct intel_device_info *) flags; - /* Add register map (needed for suspend/resume) */ - mmio_bar = IS_GEN2(dev) ? 1 : 0; - base = pci_resource_start(dev->pdev, mmio_bar); - size = pci_resource_len(dev->pdev, mmio_bar); - if (i915_get_bridge_dev(dev)) { ret = -EIO; goto free_priv; @@ -1920,6 +1914,14 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags) if (IS_GEN2(dev)) dma_set_coherent_mask(&dev->pdev->dev, DMA_BIT_MASK(30)); + mmio_bar = IS_GEN2(dev) ? 1 : 0; + dev_priv->regs = pci_iomap(dev->pdev, mmio_bar, 0); + if (!dev_priv->regs) { + DRM_ERROR("failed to map registers\n"); + ret = -EIO; + goto put_bridge; + } + dev_priv->mm.gtt = intel_gtt_get(); if (!dev_priv->mm.gtt) { DRM_ERROR("Failed to initialize GTT\n"); @@ -1930,13 +1932,6 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags) prealloc_size = dev_priv->mm.gtt->gtt_stolen_entries << PAGE_SHIFT; agp_size = dev_priv->mm.gtt->gtt_mappable_entries << PAGE_SHIFT; - dev_priv->regs = ioremap(base, size); - if (!dev_priv->regs) { - DRM_ERROR("failed to map registers\n"); - ret = -EIO; - goto put_bridge; - } - dev_priv->mm.gtt_mapping = io_mapping_create_wc(dev->agp->base, agp_size); if (dev_priv->mm.gtt_mapping == NULL) { -- cgit v1.2.3 From 176f28ebf4303b4f7e3a5bd8be7842a8bbecd9c3 Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Thu, 28 Oct 2010 11:18:07 +0100 Subject: drm/i915/ringbuffer: Check that we setup the ringbuffer Signed-off-by: Chris Wilson --- drivers/gpu/drm/i915/intel_ringbuffer.c | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c index 8eaa60cc5d25..e88214ef24b1 100644 --- a/drivers/gpu/drm/i915/intel_ringbuffer.c +++ b/drivers/gpu/drm/i915/intel_ringbuffer.c @@ -176,9 +176,10 @@ static int init_ring_common(struct intel_ring_buffer *ring) ((ring->gem_object->size - PAGE_SIZE) & RING_NR_PAGES) | RING_NO_REPORT | RING_VALID); - head = I915_READ_HEAD(ring) & HEAD_ADDR; /* If the head is still not zero, the ring is dead */ - if (head != 0) { + if ((I915_READ_CTL(ring) & RING_VALID) == 0 || + I915_READ_START(ring) != obj_priv->gtt_offset || + (I915_READ_HEAD(ring) & HEAD_ADDR) != 0) { DRM_ERROR("%s initialization failed " "ctl %08x head %08x tail %08x start %08x\n", ring->name, -- cgit v1.2.3 From 17250b71553680bc6e927497aa619ab06ab1015b Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Thu, 28 Oct 2010 12:51:39 +0100 Subject: drm/i915: Make the inactive object shrinker per-device Eliminate the racy device unload by embedding a shrinker into each device. Smaller, simpler code. Signed-off-by: Chris Wilson --- drivers/gpu/drm/i915/i915_dma.c | 3 + drivers/gpu/drm/i915/i915_drv.c | 3 - drivers/gpu/drm/i915/i915_drv.h | 12 +-- drivers/gpu/drm/i915/i915_gem.c | 179 ++++++++++++---------------------------- 4 files changed, 56 insertions(+), 141 deletions(-) diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c index f8827c23a668..dddca007912a 100644 --- a/drivers/gpu/drm/i915/i915_dma.c +++ b/drivers/gpu/drm/i915/i915_dma.c @@ -2096,6 +2096,9 @@ int i915_driver_unload(struct drm_device *dev) i915_mch_dev = NULL; spin_unlock(&mchdev_lock); + if (dev_priv->mm.inactive_shrinker.shrink) + unregister_shrinker(&dev_priv->mm.inactive_shrinker); + mutex_lock(&dev->struct_mutex); ret = i915_gpu_idle(dev); if (ret) diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c index 82c19ab3e1e2..d22aab94d645 100644 --- a/drivers/gpu/drm/i915/i915_drv.c +++ b/drivers/gpu/drm/i915/i915_drv.c @@ -660,8 +660,6 @@ static int __init i915_init(void) driver.num_ioctls = i915_max_ioctl; - i915_gem_shrinker_init(); - /* * If CONFIG_DRM_I915_KMS is set, default to KMS unless * explicitly disabled with the module pararmeter. @@ -693,7 +691,6 @@ static int __init i915_init(void) static void __exit i915_exit(void) { - i915_gem_shrinker_exit(); drm_exit(&driver); } diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index 895f9173de58..29e8b7962edf 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h @@ -542,14 +542,7 @@ typedef struct drm_i915_private { struct io_mapping *gtt_mapping; int gtt_mtrr; - /** - * Membership on list of all loaded devices, used to evict - * inactive buffers under memory pressure. - * - * Modifications should only be done whilst holding the - * shrink_list_lock spinlock. - */ - struct list_head shrink_list; + struct shrinker inactive_shrinker; /** * List of objects currently involved in rendering. @@ -1079,9 +1072,6 @@ void i915_gem_detach_phys_object(struct drm_device *dev, void i915_gem_free_all_phys_object(struct drm_device *dev); void i915_gem_release(struct drm_device * dev, struct drm_file *file_priv); -void i915_gem_shrinker_init(void); -void i915_gem_shrinker_exit(void); - /* i915_gem_evict.c */ int i915_gem_evict_something(struct drm_device *dev, int min_size, unsigned alignment, bool mappable); diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index 19ceb8cd0922..1af7285ca162 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c @@ -65,8 +65,10 @@ i915_gem_object_get_pages(struct drm_gem_object *obj, static void i915_gem_object_put_pages(struct drm_gem_object *obj); -static LIST_HEAD(shrink_list); -static DEFINE_SPINLOCK(shrink_list_lock); +static int i915_gem_inactive_shrink(struct shrinker *shrinker, + int nr_to_scan, + gfp_t gfp_mask); + /* some bookkeeping */ static void i915_gem_info_add_obj(struct drm_i915_private *dev_priv, @@ -4765,9 +4767,6 @@ i915_gem_load(struct drm_device *dev) INIT_DELAYED_WORK(&dev_priv->mm.retire_work, i915_gem_retire_work_handler); init_completion(&dev_priv->error_completion); - spin_lock(&shrink_list_lock); - list_add(&dev_priv->mm.shrink_list, &shrink_list); - spin_unlock(&shrink_list_lock); /* On GEN3 we really need to make sure the ARB C3 LP bit is set */ if (IS_GEN3(dev)) { @@ -4810,6 +4809,10 @@ i915_gem_load(struct drm_device *dev) } i915_gem_detect_bit_6_swizzle(dev); init_waitqueue_head(&dev_priv->pending_flip_queue); + + dev_priv->mm.inactive_shrinker.shrink = i915_gem_inactive_shrink; + dev_priv->mm.inactive_shrinker.seeks = DEFAULT_SEEKS; + register_shrinker(&dev_priv->mm.inactive_shrinker); } /* @@ -5022,152 +5025,74 @@ i915_gpu_is_active(struct drm_device *dev) int lists_empty; lists_empty = list_empty(&dev_priv->mm.flushing_list) && - list_empty(&dev_priv->render_ring.active_list) && - list_empty(&dev_priv->bsd_ring.active_list) && - list_empty(&dev_priv->blt_ring.active_list); + list_empty(&dev_priv->mm.active_list); return !lists_empty; } static int -i915_gem_shrink(struct shrinker *shrink, int nr_to_scan, gfp_t gfp_mask) +i915_gem_inactive_shrink(struct shrinker *shrinker, + int nr_to_scan, + gfp_t gfp_mask) { - drm_i915_private_t *dev_priv, *next_dev; - struct drm_i915_gem_object *obj_priv, *next_obj; - int cnt = 0; - int would_deadlock = 1; + struct drm_i915_private *dev_priv = + container_of(shrinker, + struct drm_i915_private, + mm.inactive_shrinker); + struct drm_device *dev = dev_priv->dev; + struct drm_i915_gem_object *obj, *next; + int cnt; + + if (!mutex_trylock(&dev->struct_mutex)) + return nr_to_scan ? 0 : -1; /* "fast-path" to count number of available objects */ if (nr_to_scan == 0) { - spin_lock(&shrink_list_lock); - list_for_each_entry(dev_priv, &shrink_list, mm.shrink_list) { - struct drm_device *dev = dev_priv->dev; - - if (mutex_trylock(&dev->struct_mutex)) { - list_for_each_entry(obj_priv, - &dev_priv->mm.inactive_list, - mm_list) - cnt++; - mutex_unlock(&dev->struct_mutex); - } - } - spin_unlock(&shrink_list_lock); - - return (cnt / 100) * sysctl_vfs_cache_pressure; + cnt = 0; + list_for_each_entry(obj, + &dev_priv->mm.inactive_list, + mm_list) + cnt++; + mutex_unlock(&dev->struct_mutex); + return cnt / 100 * sysctl_vfs_cache_pressure; } - spin_lock(&shrink_list_lock); - rescan: /* first scan for clean buffers */ - list_for_each_entry_safe(dev_priv, next_dev, - &shrink_list, mm.shrink_list) { - struct drm_device *dev = dev_priv->dev; - - if (! mutex_trylock(&dev->struct_mutex)) - continue; - - spin_unlock(&shrink_list_lock); - i915_gem_retire_requests(dev); + i915_gem_retire_requests(dev); - list_for_each_entry_safe(obj_priv, next_obj, - &dev_priv->mm.inactive_list, - mm_list) { - if (i915_gem_object_is_purgeable(obj_priv)) { - i915_gem_object_unbind(&obj_priv->base); - if (--nr_to_scan <= 0) - break; - } + list_for_each_entry_safe(obj, next, + &dev_priv->mm.inactive_list, + mm_list) { + if (i915_gem_object_is_purgeable(obj)) { + i915_gem_object_unbind(&obj->base); + if (--nr_to_scan == 0) + break; } - - spin_lock(&shrink_list_lock); - mutex_unlock(&dev->struct_mutex); - - would_deadlock = 0; - - if (nr_to_scan <= 0) - break; } /* second pass, evict/count anything still on the inactive list */ - list_for_each_entry_safe(dev_priv, next_dev, - &shrink_list, mm.shrink_list) { - struct drm_device *dev = dev_priv->dev; - - if (! mutex_trylock(&dev->struct_mutex)) - continue; - - spin_unlock(&shrink_list_lock); - - list_for_each_entry_safe(obj_priv, next_obj, - &dev_priv->mm.inactive_list, - mm_list) { - if (nr_to_scan > 0) { - i915_gem_object_unbind(&obj_priv->base); - nr_to_scan--; - } else - cnt++; - } - - spin_lock(&shrink_list_lock); - mutex_unlock(&dev->struct_mutex); - - would_deadlock = 0; - } - - if (nr_to_scan) { - int active = 0; - + cnt = 0; + list_for_each_entry_safe(obj, next, + &dev_priv->mm.inactive_list, + mm_list) { + if (nr_to_scan) { + i915_gem_object_unbind(&obj->base); + nr_to_scan--; + } else + cnt++; + } + + if (nr_to_scan && i915_gpu_is_active(dev)) { /* * We are desperate for pages, so as a last resort, wait * for the GPU to finish and discard whatever we can. * This has a dramatic impact to reduce the number of * OOM-killer events whilst running the GPU aggressively. */ - list_for_each_entry(dev_priv, &shrink_list, mm.shrink_list) { - struct drm_device *dev = dev_priv->dev; - - if (!mutex_trylock(&dev->struct_mutex)) - continue; - - spin_unlock(&shrink_list_lock); - - if (i915_gpu_is_active(dev)) { - i915_gpu_idle(dev); - active++; - } - - spin_lock(&shrink_list_lock); - mutex_unlock(&dev->struct_mutex); - } - - if (active) + if (i915_gpu_idle(dev) == 0) goto rescan; } - - spin_unlock(&shrink_list_lock); - - if (would_deadlock) - return -1; - else if (cnt > 0) - return (cnt / 100) * sysctl_vfs_cache_pressure; - else - return 0; -} - -static struct shrinker shrinker = { - .shrink = i915_gem_shrink, - .seeks = DEFAULT_SEEKS, -}; - -__init void -i915_gem_shrinker_init(void) -{ - register_shrinker(&shrinker); -} - -__exit void -i915_gem_shrinker_exit(void) -{ - unregister_shrinker(&shrinker); + mutex_unlock(&dev->struct_mutex); + return cnt / 100 * sysctl_vfs_cache_pressure; } -- cgit v1.2.3 From 39a01d1fb63cf8ebc1a8cf436f5c0ba9657b55c6 Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Thu, 28 Oct 2010 13:03:06 +0100 Subject: drm/i915: Remove mmap_offset Since we rarely use the mmap_offset and it is easily computable from the obj->map_list.hash, remove it. Signed-off-by: Chris Wilson --- drivers/gpu/drm/i915/i915_drv.h | 5 ----- drivers/gpu/drm/i915/i915_gem.c | 46 ++++++++++------------------------------- 2 files changed, 11 insertions(+), 40 deletions(-) diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index 29e8b7962edf..1f7e8b3c8df0 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h @@ -796,11 +796,6 @@ struct drm_i915_gem_object { /* Which ring is refering to is this object */ struct intel_ring_buffer *ring; - /** - * Fake offset for use by mmap(2) - */ - uint64_t mmap_offset; - /** Breadcrumb of last rendering to the buffer. */ uint32_t last_rendering_seqno; diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index 1af7285ca162..4ade4e243379 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c @@ -1392,7 +1392,6 @@ i915_gem_create_mmap_offset(struct drm_gem_object *obj) { struct drm_device *dev = obj->dev; struct drm_gem_mm *mm = dev->mm_private; - struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); struct drm_map_list *list; struct drm_local_map *map; int ret = 0; @@ -1431,16 +1430,13 @@ i915_gem_create_mmap_offset(struct drm_gem_object *obj) goto out_free_mm; } - /* By now we should be all set, any drm_mmap request on the offset - * below will get to our mmap & fault handler */ - obj_priv->mmap_offset = ((uint64_t) list->hash.key) << PAGE_SHIFT; - return 0; out_free_mm: drm_mm_put_block(list->file_offset_node); out_free_list: kfree(list->map); + list->map = NULL; return ret; } @@ -1466,9 +1462,10 @@ i915_gem_release_mmap(struct drm_gem_object *obj) struct drm_i915_private *dev_priv = dev->dev_private; struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); - if (dev->dev_mapping) + if (unlikely(obj->map_list.map && dev->dev_mapping)) unmap_mapping_range(dev->dev_mapping, - obj_priv->mmap_offset, obj->size, 1); + (loff_t)obj->map_list.hash.key<size, 1); if (obj_priv->fault_mappable) { obj_priv->fault_mappable = false; @@ -1480,24 +1477,13 @@ static void i915_gem_free_mmap_offset(struct drm_gem_object *obj) { struct drm_device *dev = obj->dev; - struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); struct drm_gem_mm *mm = dev->mm_private; - struct drm_map_list *list; + struct drm_map_list *list = &obj->map_list; - list = &obj->map_list; drm_ht_remove_item(&mm->offset_hash, &list->hash); - - if (list->file_offset_node) { - drm_mm_put_block(list->file_offset_node); - list->file_offset_node = NULL; - } - - if (list->map) { - kfree(list->map); - list->map = NULL; - } - - obj_priv->mmap_offset = 0; + drm_mm_put_block(list->file_offset_node); + kfree(list->map); + list->map = NULL; } /** @@ -1586,23 +1572,13 @@ i915_gem_mmap_gtt_ioctl(struct drm_device *dev, void *data, goto out; } - if (!obj_priv->mmap_offset) { + if (!obj->map_list.map) { ret = i915_gem_create_mmap_offset(obj); if (ret) goto out; } - args->offset = obj_priv->mmap_offset; - - /* - * Pull it into the GTT so that we have a page list (makes the - * initial fault faster and any subsequent flushing possible). - */ - if (!obj_priv->agp_mem) { - ret = i915_gem_object_bind_to_gtt(obj, 0, true); - if (ret) - goto out; - } + args->offset = (u64)obj->map_list.hash.key << PAGE_SHIFT; out: drm_gem_object_unreference(obj); @@ -4477,7 +4453,7 @@ static void i915_gem_free_object_tail(struct drm_gem_object *obj) return; } - if (obj_priv->mmap_offset) + if (obj->map_list.map) i915_gem_free_mmap_offset(obj); drm_gem_object_release(obj); -- cgit v1.2.3 From e5281ccd2e0049e2b9e8ce82449630d25082372d Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Thu, 28 Oct 2010 13:45:36 +0100 Subject: drm/i915: Eliminate nested get/put pages By using read_cache_page() for individual pages during pwrite/pread we can eliminate an unnecessary large allocation (and immediate free) of obj->pages. Also this eliminates any potential nesting of get/put pages, simplifying the code and preparing the path for greater things. Signed-off-by: Chris Wilson --- drivers/gpu/drm/i915/i915_drv.h | 9 - drivers/gpu/drm/i915/i915_gem.c | 358 ++++++++++++++++++---------------------- 2 files changed, 163 insertions(+), 204 deletions(-) diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index 1f7e8b3c8df0..3df8a624ddc9 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h @@ -747,15 +747,6 @@ struct drm_i915_gem_object { */ unsigned int madv : 2; - /** - * Refcount for the pages array. With the current locking scheme, there - * are at most two concurrent users: Binding a bo to the gtt and - * pwrite/pread using physical addresses. So two bits for a maximum - * of two users are enough. - */ - unsigned int pages_refcount : 2; -#define DRM_I915_GEM_OBJECT_MAX_PAGES_REFCOUNT 0x3 - /** * Current tiling mode for the object. */ diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index 4ade4e243379..abe6d901f95b 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c @@ -58,13 +58,6 @@ static int i915_gem_phys_pwrite(struct drm_device *dev, struct drm_gem_object *o struct drm_file *file_priv); static void i915_gem_free_object_tail(struct drm_gem_object *obj); -static int -i915_gem_object_get_pages(struct drm_gem_object *obj, - gfp_t gfpmask); - -static void -i915_gem_object_put_pages(struct drm_gem_object *obj); - static int i915_gem_inactive_shrink(struct shrinker *shrinker, int nr_to_scan, gfp_t gfp_mask); @@ -326,22 +319,6 @@ i915_gem_object_cpu_accessible(struct drm_i915_gem_object *obj) obj->gtt_offset + obj->base.size <= dev_priv->mm.gtt_mappable_end; } -static inline int -fast_shmem_read(struct page **pages, - loff_t page_base, int page_offset, - char __user *data, - int length) -{ - char *vaddr; - int ret; - - vaddr = kmap_atomic(pages[page_base >> PAGE_SHIFT]); - ret = __copy_to_user_inatomic(data, vaddr + page_offset, length); - kunmap_atomic(vaddr); - - return ret; -} - static int i915_gem_object_needs_bit17_swizzle(struct drm_gem_object *obj) { drm_i915_private_t *dev_priv = obj->dev->dev_private; @@ -429,8 +406,9 @@ i915_gem_shmem_pread_fast(struct drm_device *dev, struct drm_gem_object *obj, struct drm_file *file_priv) { struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); + struct address_space *mapping = obj->filp->f_path.dentry->d_inode->i_mapping; ssize_t remain; - loff_t offset, page_base; + loff_t offset; char __user *user_data; int page_offset, page_length; @@ -441,21 +419,34 @@ i915_gem_shmem_pread_fast(struct drm_device *dev, struct drm_gem_object *obj, offset = args->offset; while (remain > 0) { + struct page *page; + char *vaddr; + int ret; + /* Operation in this page * - * page_base = page offset within aperture * page_offset = offset within page * page_length = bytes to copy for this page */ - page_base = (offset & ~(PAGE_SIZE-1)); page_offset = offset & (PAGE_SIZE-1); page_length = remain; if ((page_offset + remain) > PAGE_SIZE) page_length = PAGE_SIZE - page_offset; - if (fast_shmem_read(obj_priv->pages, - page_base, page_offset, - user_data, page_length)) + page = read_cache_page_gfp(mapping, offset >> PAGE_SHIFT, + GFP_HIGHUSER | __GFP_RECLAIMABLE); + if (IS_ERR(page)) + return PTR_ERR(page); + + vaddr = kmap_atomic(page); + ret = __copy_to_user_inatomic(user_data, + vaddr + page_offset, + page_length); + kunmap_atomic(vaddr); + + mark_page_accessed(page); + page_cache_release(page); + if (ret) return -EFAULT; remain -= page_length; @@ -466,31 +457,6 @@ i915_gem_shmem_pread_fast(struct drm_device *dev, struct drm_gem_object *obj, return 0; } -static int -i915_gem_object_get_pages_or_evict(struct drm_gem_object *obj) -{ - int ret; - - ret = i915_gem_object_get_pages(obj, __GFP_NORETRY | __GFP_NOWARN); - - /* If we've insufficient memory to map in the pages, attempt - * to make some space by throwing out some old buffers. - */ - if (ret == -ENOMEM) { - struct drm_device *dev = obj->dev; - - ret = i915_gem_evict_something(dev, obj->size, - i915_gem_get_gtt_alignment(obj), - false); - if (ret) - return ret; - - ret = i915_gem_object_get_pages(obj, 0); - } - - return ret; -} - /** * This is the fallback shmem pread path, which allocates temporary storage * in kernel space to copy_to_user into outside of the struct_mutex, so we @@ -502,14 +468,15 @@ i915_gem_shmem_pread_slow(struct drm_device *dev, struct drm_gem_object *obj, struct drm_i915_gem_pread *args, struct drm_file *file_priv) { + struct address_space *mapping = obj->filp->f_path.dentry->d_inode->i_mapping; struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); struct mm_struct *mm = current->mm; struct page **user_pages; ssize_t remain; loff_t offset, pinned_pages, i; loff_t first_data_page, last_data_page, num_pages; - int shmem_page_index, shmem_page_offset; - int data_page_index, data_page_offset; + int shmem_page_offset; + int data_page_index, data_page_offset; int page_length; int ret; uint64_t data_ptr = args->data_ptr; @@ -552,15 +519,15 @@ i915_gem_shmem_pread_slow(struct drm_device *dev, struct drm_gem_object *obj, offset = args->offset; while (remain > 0) { + struct page *page; + /* Operation in this page * - * shmem_page_index = page number within shmem file * shmem_page_offset = offset within page in shmem file * data_page_index = page number in get_user_pages return * data_page_offset = offset with data_page_index page. * page_length = bytes to copy for this page */ - shmem_page_index = offset / PAGE_SIZE; shmem_page_offset = offset & ~PAGE_MASK; data_page_index = data_ptr / PAGE_SIZE - first_data_page; data_page_offset = data_ptr & ~PAGE_MASK; @@ -571,8 +538,13 @@ i915_gem_shmem_pread_slow(struct drm_device *dev, struct drm_gem_object *obj, if ((data_page_offset + page_length) > PAGE_SIZE) page_length = PAGE_SIZE - data_page_offset; + page = read_cache_page_gfp(mapping, offset >> PAGE_SHIFT, + GFP_HIGHUSER | __GFP_RECLAIMABLE); + if (IS_ERR(page)) + return PTR_ERR(page); + if (do_bit17_swizzling) { - slow_shmem_bit17_copy(obj_priv->pages[shmem_page_index], + slow_shmem_bit17_copy(page, shmem_page_offset, user_pages[data_page_index], data_page_offset, @@ -581,11 +553,14 @@ i915_gem_shmem_pread_slow(struct drm_device *dev, struct drm_gem_object *obj, } else { slow_shmem_copy(user_pages[data_page_index], data_page_offset, - obj_priv->pages[shmem_page_index], + page, shmem_page_offset, page_length); } + mark_page_accessed(page); + page_cache_release(page); + remain -= page_length; data_ptr += page_length; offset += page_length; @@ -594,6 +569,7 @@ i915_gem_shmem_pread_slow(struct drm_device *dev, struct drm_gem_object *obj, out: for (i = 0; i < pinned_pages; i++) { SetPageDirty(user_pages[i]); + mark_page_accessed(user_pages[i]); page_cache_release(user_pages[i]); } drm_free_large(user_pages); @@ -649,15 +625,11 @@ i915_gem_pread_ioctl(struct drm_device *dev, void *data, goto out; } - ret = i915_gem_object_get_pages_or_evict(obj); - if (ret) - goto out; - ret = i915_gem_object_set_cpu_read_domain_range(obj, args->offset, args->size); if (ret) - goto out_put; + goto out; ret = -EFAULT; if (!i915_gem_object_needs_bit17_swizzle(obj)) @@ -665,8 +637,6 @@ i915_gem_pread_ioctl(struct drm_device *dev, void *data, if (ret == -EFAULT) ret = i915_gem_shmem_pread_slow(dev, obj, args, file_priv); -out_put: - i915_gem_object_put_pages(obj); out: drm_gem_object_unreference(obj); unlock: @@ -718,22 +688,6 @@ slow_kernel_write(struct io_mapping *mapping, io_mapping_unmap(dst_vaddr); } -static inline int -fast_shmem_write(struct page **pages, - loff_t page_base, int page_offset, - char __user *data, - int length) -{ - char *vaddr; - int ret; - - vaddr = kmap_atomic(pages[page_base >> PAGE_SHIFT]); - ret = __copy_from_user_inatomic(vaddr + page_offset, data, length); - kunmap_atomic(vaddr); - - return ret; -} - /** * This is the fast pwrite path, where we copy the data directly from the * user into the GTT, uncached. @@ -890,9 +844,10 @@ i915_gem_shmem_pwrite_fast(struct drm_device *dev, struct drm_gem_object *obj, struct drm_i915_gem_pwrite *args, struct drm_file *file_priv) { + struct address_space *mapping = obj->filp->f_path.dentry->d_inode->i_mapping; struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); ssize_t remain; - loff_t offset, page_base; + loff_t offset; char __user *user_data; int page_offset, page_length; @@ -904,21 +859,40 @@ i915_gem_shmem_pwrite_fast(struct drm_device *dev, struct drm_gem_object *obj, obj_priv->dirty = 1; while (remain > 0) { + struct page *page; + char *vaddr; + int ret; + /* Operation in this page * - * page_base = page offset within aperture * page_offset = offset within page * page_length = bytes to copy for this page */ - page_base = (offset & ~(PAGE_SIZE-1)); page_offset = offset & (PAGE_SIZE-1); page_length = remain; if ((page_offset + remain) > PAGE_SIZE) page_length = PAGE_SIZE - page_offset; - if (fast_shmem_write(obj_priv->pages, - page_base, page_offset, - user_data, page_length)) + page = read_cache_page_gfp(mapping, offset >> PAGE_SHIFT, + GFP_HIGHUSER | __GFP_RECLAIMABLE); + if (IS_ERR(page)) + return PTR_ERR(page); + + vaddr = kmap_atomic(page, KM_USER0); + ret = __copy_from_user_inatomic(vaddr + page_offset, + user_data, + page_length); + kunmap_atomic(vaddr, KM_USER0); + + set_page_dirty(page); + mark_page_accessed(page); + page_cache_release(page); + + /* If we get a fault while copying data, then (presumably) our + * source page isn't available. Return the error and we'll + * retry in the slow path. + */ + if (ret) return -EFAULT; remain -= page_length; @@ -941,13 +915,14 @@ i915_gem_shmem_pwrite_slow(struct drm_device *dev, struct drm_gem_object *obj, struct drm_i915_gem_pwrite *args, struct drm_file *file_priv) { + struct address_space *mapping = obj->filp->f_path.dentry->d_inode->i_mapping; struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); struct mm_struct *mm = current->mm; struct page **user_pages; ssize_t remain; loff_t offset, pinned_pages, i; loff_t first_data_page, last_data_page, num_pages; - int shmem_page_index, shmem_page_offset; + int shmem_page_offset; int data_page_index, data_page_offset; int page_length; int ret; @@ -990,15 +965,15 @@ i915_gem_shmem_pwrite_slow(struct drm_device *dev, struct drm_gem_object *obj, obj_priv->dirty = 1; while (remain > 0) { + struct page *page; + /* Operation in this page * - * shmem_page_index = page number within shmem file * shmem_page_offset = offset within page in shmem file * data_page_index = page number in get_user_pages return * data_page_offset = offset with data_page_index page. * page_length = bytes to copy for this page */ - shmem_page_index = offset / PAGE_SIZE; shmem_page_offset = offset & ~PAGE_MASK; data_page_index = data_ptr / PAGE_SIZE - first_data_page; data_page_offset = data_ptr & ~PAGE_MASK; @@ -1009,21 +984,32 @@ i915_gem_shmem_pwrite_slow(struct drm_device *dev, struct drm_gem_object *obj, if ((data_page_offset + page_length) > PAGE_SIZE) page_length = PAGE_SIZE - data_page_offset; + page = read_cache_page_gfp(mapping, offset >> PAGE_SHIFT, + GFP_HIGHUSER | __GFP_RECLAIMABLE); + if (IS_ERR(page)) { + ret = PTR_ERR(page); + goto out; + } + if (do_bit17_swizzling) { - slow_shmem_bit17_copy(obj_priv->pages[shmem_page_index], + slow_shmem_bit17_copy(page, shmem_page_offset, user_pages[data_page_index], data_page_offset, page_length, 0); } else { - slow_shmem_copy(obj_priv->pages[shmem_page_index], + slow_shmem_copy(page, shmem_page_offset, user_pages[data_page_index], data_page_offset, page_length); } + set_page_dirty(page); + mark_page_accessed(page); + page_cache_release(page); + remain -= page_length; data_ptr += page_length; offset += page_length; @@ -1112,22 +1098,15 @@ i915_gem_pwrite_ioctl(struct drm_device *dev, void *data, out_unpin: i915_gem_object_unpin(obj); } else { - ret = i915_gem_object_get_pages_or_evict(obj); - if (ret) - goto out; - ret = i915_gem_object_set_to_cpu_domain(obj, 1); if (ret) - goto out_put; + goto out; ret = -EFAULT; if (!i915_gem_object_needs_bit17_swizzle(obj)) ret = i915_gem_shmem_pwrite_fast(dev, obj, args, file); if (ret == -EFAULT) ret = i915_gem_shmem_pwrite_slow(dev, obj, args, file); - -out_put: - i915_gem_object_put_pages(obj); } out: @@ -1587,19 +1566,62 @@ unlock: return ret; } +static int +i915_gem_object_get_pages_gtt(struct drm_gem_object *obj, + gfp_t gfpmask) +{ + struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); + int page_count, i; + struct address_space *mapping; + struct inode *inode; + struct page *page; + + /* Get the list of pages out of our struct file. They'll be pinned + * at this point until we release them. + */ + page_count = obj->size / PAGE_SIZE; + BUG_ON(obj_priv->pages != NULL); + obj_priv->pages = drm_malloc_ab(page_count, sizeof(struct page *)); + if (obj_priv->pages == NULL) + return -ENOMEM; + + inode = obj->filp->f_path.dentry->d_inode; + mapping = inode->i_mapping; + for (i = 0; i < page_count; i++) { + page = read_cache_page_gfp(mapping, i, + GFP_HIGHUSER | + __GFP_COLD | + __GFP_RECLAIMABLE | + gfpmask); + if (IS_ERR(page)) + goto err_pages; + + obj_priv->pages[i] = page; + } + + if (obj_priv->tiling_mode != I915_TILING_NONE) + i915_gem_object_do_bit_17_swizzle(obj); + + return 0; + +err_pages: + while (i--) + page_cache_release(obj_priv->pages[i]); + + drm_free_large(obj_priv->pages); + obj_priv->pages = NULL; + return PTR_ERR(page); +} + static void -i915_gem_object_put_pages(struct drm_gem_object *obj) +i915_gem_object_put_pages_gtt(struct drm_gem_object *obj) { struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); int page_count = obj->size / PAGE_SIZE; int i; - BUG_ON(obj_priv->pages_refcount == 0); BUG_ON(obj_priv->madv == __I915_MADV_PURGED); - if (--obj_priv->pages_refcount != 0) - return; - if (obj_priv->tiling_mode != I915_TILING_NONE) i915_gem_object_save_bit_17_swizzle(obj); @@ -2229,8 +2251,7 @@ i915_gem_object_unbind(struct drm_gem_object *obj) drm_unbind_agp(obj_priv->agp_mem); drm_free_agp(obj_priv->agp_mem, obj->size / PAGE_SIZE); - i915_gem_object_put_pages(obj); - BUG_ON(obj_priv->pages_refcount); + i915_gem_object_put_pages_gtt(obj); i915_gem_info_remove_gtt(dev_priv, obj); list_del_init(&obj_priv->mm_list); @@ -2290,62 +2311,6 @@ i915_gpu_idle(struct drm_device *dev) return 0; } -static int -i915_gem_object_get_pages(struct drm_gem_object *obj, - gfp_t gfpmask) -{ - struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); - int page_count, i; - struct address_space *mapping; - struct inode *inode; - struct page *page; - - BUG_ON(obj_priv->pages_refcount - == DRM_I915_GEM_OBJECT_MAX_PAGES_REFCOUNT); - - if (obj_priv->pages_refcount++ != 0) - return 0; - - /* Get the list of pages out of our struct file. They'll be pinned - * at this point until we release them. - */ - page_count = obj->size / PAGE_SIZE; - BUG_ON(obj_priv->pages != NULL); - obj_priv->pages = drm_calloc_large(page_count, sizeof(struct page *)); - if (obj_priv->pages == NULL) { - obj_priv->pages_refcount--; - return -ENOMEM; - } - - inode = obj->filp->f_path.dentry->d_inode; - mapping = inode->i_mapping; - for (i = 0; i < page_count; i++) { - page = read_cache_page_gfp(mapping, i, - GFP_HIGHUSER | - __GFP_COLD | - __GFP_RECLAIMABLE | - gfpmask); - if (IS_ERR(page)) - goto err_pages; - - obj_priv->pages[i] = page; - } - - if (obj_priv->tiling_mode != I915_TILING_NONE) - i915_gem_object_do_bit_17_swizzle(obj); - - return 0; - -err_pages: - while (i--) - page_cache_release(obj_priv->pages[i]); - - drm_free_large(obj_priv->pages); - obj_priv->pages = NULL; - obj_priv->pages_refcount--; - return PTR_ERR(page); -} - static void sandybridge_write_fence_reg(struct drm_i915_fence_reg *reg) { struct drm_gem_object *obj = reg->obj; @@ -2772,7 +2737,7 @@ i915_gem_object_bind_to_gtt(struct drm_gem_object *obj, goto search_free; } - ret = i915_gem_object_get_pages(obj, gfpmask); + ret = i915_gem_object_get_pages_gtt(obj, gfpmask); if (ret) { drm_mm_put_block(obj_priv->gtt_space); obj_priv->gtt_space = NULL; @@ -2806,7 +2771,7 @@ i915_gem_object_bind_to_gtt(struct drm_gem_object *obj, obj_priv->gtt_space->start, obj_priv->agp_type); if (obj_priv->agp_mem == NULL) { - i915_gem_object_put_pages(obj); + i915_gem_object_put_pages_gtt(obj); drm_mm_put_block(obj_priv->gtt_space); obj_priv->gtt_space = NULL; @@ -4860,33 +4825,35 @@ void i915_gem_free_all_phys_object(struct drm_device *dev) void i915_gem_detach_phys_object(struct drm_device *dev, struct drm_gem_object *obj) { - struct drm_i915_gem_object *obj_priv; + struct address_space *mapping = obj->filp->f_path.dentry->d_inode->i_mapping; + struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); + char *vaddr; int i; - int ret; int page_count; - obj_priv = to_intel_bo(obj); if (!obj_priv->phys_obj) return; - - ret = i915_gem_object_get_pages(obj, 0); - if (ret) - goto out; + vaddr = obj_priv->phys_obj->handle->vaddr; page_count = obj->size / PAGE_SIZE; for (i = 0; i < page_count; i++) { - char *dst = kmap_atomic(obj_priv->pages[i]); - char *src = obj_priv->phys_obj->handle->vaddr + (i * PAGE_SIZE); - - memcpy(dst, src, PAGE_SIZE); - kunmap_atomic(dst); + struct page *page = read_cache_page_gfp(mapping, i, + GFP_HIGHUSER | __GFP_RECLAIMABLE); + if (!IS_ERR(page)) { + char *dst = kmap_atomic(page); + memcpy(dst, vaddr + i*PAGE_SIZE, PAGE_SIZE); + kunmap_atomic(dst); + + drm_clflush_pages(&page, 1); + + set_page_dirty(page); + mark_page_accessed(page); + page_cache_release(page); + } } - drm_clflush_pages(obj_priv->pages, page_count); drm_agp_chipset_flush(dev); - i915_gem_object_put_pages(obj); -out: obj_priv->phys_obj->cur_obj = NULL; obj_priv->phys_obj = NULL; } @@ -4897,6 +4864,7 @@ i915_gem_attach_phys_object(struct drm_device *dev, int id, int align) { + struct address_space *mapping = obj->filp->f_path.dentry->d_inode->i_mapping; drm_i915_private_t *dev_priv = dev->dev_private; struct drm_i915_gem_object *obj_priv; int ret = 0; @@ -4920,7 +4888,7 @@ i915_gem_attach_phys_object(struct drm_device *dev, obj->size, align); if (ret) { DRM_ERROR("failed to init phys object %d size: %zu\n", id, obj->size); - goto out; + return ret; } } @@ -4928,27 +4896,27 @@ i915_gem_attach_phys_object(struct drm_device *dev, obj_priv->phys_obj = dev_priv->mm.phys_objs[id - 1]; obj_priv->phys_obj->cur_obj = obj; - ret = i915_gem_object_get_pages(obj, 0); - if (ret) { - DRM_ERROR("failed to get page list\n"); - goto out; - } - page_count = obj->size / PAGE_SIZE; for (i = 0; i < page_count; i++) { - char *src = kmap_atomic(obj_priv->pages[i]); - char *dst = obj_priv->phys_obj->handle->vaddr + (i * PAGE_SIZE); + struct page *page; + char *dst, *src; + + page = read_cache_page_gfp(mapping, i, + GFP_HIGHUSER | __GFP_RECLAIMABLE); + if (IS_ERR(page)) + return PTR_ERR(page); + src = kmap_atomic(obj_priv->pages[i]); + dst = obj_priv->phys_obj->handle->vaddr + (i * PAGE_SIZE); memcpy(dst, src, PAGE_SIZE); kunmap_atomic(src); - } - i915_gem_object_put_pages(obj); + mark_page_accessed(page); + page_cache_release(page); + } return 0; -out: - return ret; } static int -- cgit v1.2.3 From 4a684a4117abd756291969336af454e8a958802f Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Thu, 28 Oct 2010 14:44:08 +0100 Subject: drm/i915: Kill GTT mappings when moving from GTT domain In order to force a page-fault on a GTT mapping after we start using it from the GPU and so enforce correct CPU/GPU synchronisation, we need to invalidate the mapping. Pointed out by Owain G. Ainsworth. Signed-off-by: Chris Wilson --- drivers/gpu/drm/i915/i915_gem.c | 14 ++++++++++---- 1 file changed, 10 insertions(+), 4 deletions(-) diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index abe6d901f95b..d4d8f888db85 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c @@ -1314,12 +1314,12 @@ int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf) ret = i915_gem_object_bind_to_gtt(obj, 0, true); if (ret) goto unlock; - - ret = i915_gem_object_set_to_gtt_domain(obj, write); - if (ret) - goto unlock; } + ret = i915_gem_object_set_to_gtt_domain(obj, write); + if (ret) + goto unlock; + if (!obj_priv->fault_mappable) { obj_priv->fault_mappable = true; i915_gem_info_update_mappable(dev_priv, obj, true); @@ -2859,6 +2859,8 @@ i915_gem_object_flush_gtt_write_domain(struct drm_gem_object *obj) * to it immediately go to main memory as far as we know, so there's * no chipset flush. It also doesn't land in render cache. */ + i915_gem_release_mmap(obj); + old_write_domain = obj->write_domain; obj->write_domain = 0; @@ -3183,6 +3185,10 @@ i915_gem_object_set_to_gpu_domain(struct drm_gem_object *obj, if ((flush_domains | invalidate_domains) & I915_GEM_DOMAIN_CPU) i915_gem_clflush_object(obj); + /* blow away mappings if mapped through GTT */ + if ((flush_domains | invalidate_domains) & I915_GEM_DOMAIN_GTT) + i915_gem_release_mmap(obj); + /* The actual obj->write_domain will be updated with * pending_write_domain after we emit the accumulated flush for all * of our domain changes in execbuffers (which clears objects' -- cgit v1.2.3 From bbe2e11a4b683ead070ad5823cdf756e1525b903 Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Thu, 28 Oct 2010 22:35:07 +0100 Subject: drm/i915: Do not return -1 from shrinker when nr_to_scan == 0 The error code is only expected during the actual pruning and not during the first measurement (nr_to_scan == 0) pass. Signed-off-by: Chris Wilson --- drivers/gpu/drm/i915/i915_gem.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index d4d8f888db85..7569b77fb403 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c @@ -4994,7 +4994,7 @@ i915_gem_inactive_shrink(struct shrinker *shrinker, int cnt; if (!mutex_trylock(&dev->struct_mutex)) - return nr_to_scan ? 0 : -1; + return 0; /* "fast-path" to count number of available objects */ if (nr_to_scan == 0) { -- cgit v1.2.3 From 7465378fd7c681f6cf2b74b3494c4f0991d8c8ac Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Fri, 29 Oct 2010 10:41:23 +0100 Subject: drm/i915: Convert BUG_ON(pin_count) from an impossible condition Also spotted by Dan Carpenter. obj->pin_count is unsigned so the BUG_ON(obj->pin_count<0) will never trigger. Signed-off-by: Chris Wilson --- drivers/gpu/drm/i915/i915_gem.c | 16 +++------------- 1 file changed, 3 insertions(+), 13 deletions(-) diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index 7569b77fb403..08f57aedaf51 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c @@ -4128,12 +4128,7 @@ i915_gem_object_pin(struct drm_gem_object *obj, uint32_t alignment, return ret; } - obj_priv->pin_count++; - - /* If the object is not active and not pending a flush, - * remove it from the inactive list - */ - if (obj_priv->pin_count == 1) { + if (obj_priv->pin_count++ == 0) { i915_gem_info_add_pin(dev_priv, obj, mappable); if (!obj_priv->active) list_move_tail(&obj_priv->mm_list, @@ -4153,15 +4148,10 @@ i915_gem_object_unpin(struct drm_gem_object *obj) struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); WARN_ON(i915_verify_lists(dev)); - obj_priv->pin_count--; - BUG_ON(obj_priv->pin_count < 0); + BUG_ON(obj_priv->pin_count == 0); BUG_ON(obj_priv->gtt_space == NULL); - /* If the object is no longer pinned, and is - * neither active nor being flushed, then stick it on - * the inactive list - */ - if (obj_priv->pin_count == 0) { + if (--obj_priv->pin_count == 0) { if (!obj_priv->active) list_move_tail(&obj_priv->mm_list, &dev_priv->mm.inactive_list); -- cgit v1.2.3 From a00b10c360b35d6431a94cbf130a4e162870d661 Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Fri, 24 Sep 2010 21:15:47 +0100 Subject: drm/i915: Only enforce fence limits inside the GTT. So long as we adhere to the fence registers rules for alignment and no overlaps (including with unfenced accesses to linear memory) and account for the tiled access in our size allocation, we do not have to allocate the full fenced region for the object. This allows us to fight the bloat tiling imposed on pre-i965 chipsets and frees up RAM for real use. [Inside the GTT we still suffer the additional alignment constraints, so it doesn't magic allow us to render larger scenes without stalls -- we need the expanded GTT and fence pipelining to overcome those...] Signed-off-by: Chris Wilson --- drivers/gpu/drm/i915/i915_debugfs.c | 3 +- drivers/gpu/drm/i915/i915_dma.c | 3 + drivers/gpu/drm/i915/i915_drv.h | 8 +- drivers/gpu/drm/i915/i915_gem.c | 290 +++++++++++++++++--------------- drivers/gpu/drm/i915/i915_gem_tiling.c | 32 ++-- drivers/gpu/drm/i915/intel_display.c | 7 +- drivers/gpu/drm/i915/intel_overlay.c | 4 +- drivers/gpu/drm/i915/intel_ringbuffer.c | 4 +- include/drm/i915_drm.h | 1 + 9 files changed, 197 insertions(+), 155 deletions(-) diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c index 4fc1e05b769f..ba2af4e046ed 100644 --- a/drivers/gpu/drm/i915/i915_debugfs.c +++ b/drivers/gpu/drm/i915/i915_debugfs.c @@ -130,7 +130,8 @@ describe_obj(struct seq_file *m, struct drm_i915_gem_object *obj) if (obj->fence_reg != I915_FENCE_REG_NONE) seq_printf(m, " (fence: %d)", obj->fence_reg); if (obj->gtt_space != NULL) - seq_printf(m, " (gtt_offset: %08x)", obj->gtt_offset); + seq_printf(m, " (gtt offset: %08x, size: %08x)", + obj->gtt_offset, (unsigned int)obj->gtt_space->size); if (obj->pin_mappable || obj->fault_mappable) seq_printf(m, " (mappable)"); if (obj->ring != NULL) diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c index dddca007912a..00d8fb3e989f 100644 --- a/drivers/gpu/drm/i915/i915_dma.c +++ b/drivers/gpu/drm/i915/i915_dma.c @@ -770,6 +770,9 @@ static int i915_getparam(struct drm_device *dev, void *data, case I915_PARAM_HAS_BLT: value = HAS_BLT(dev); break; + case I915_PARAM_HAS_RELAXED_FENCING: + value = 1; + break; default: DRM_DEBUG_DRIVER("Unknown parameter %d\n", param->param); diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index 3df8a624ddc9..7aa7f8abf892 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h @@ -746,6 +746,8 @@ struct drm_i915_gem_object { * Advice: are the backing pages purgeable? */ unsigned int madv : 2; + unsigned int fenceable : 1; + unsigned int mappable : 1; /** * Current tiling mode for the object. @@ -1005,7 +1007,7 @@ struct drm_gem_object * i915_gem_alloc_object(struct drm_device *dev, size_t size); void i915_gem_free_object(struct drm_gem_object *obj); int i915_gem_object_pin(struct drm_gem_object *obj, uint32_t alignment, - bool mappable); + bool mappable, bool need_fence); void i915_gem_object_unpin(struct drm_gem_object *obj); int i915_gem_object_unbind(struct drm_gem_object *obj); void i915_gem_release_mmap(struct drm_gem_object *obj); @@ -1068,10 +1070,6 @@ int i915_gem_evict_inactive(struct drm_device *dev); void i915_gem_detect_bit_6_swizzle(struct drm_device *dev); void i915_gem_object_do_bit_17_swizzle(struct drm_gem_object *obj); void i915_gem_object_save_bit_17_swizzle(struct drm_gem_object *obj); -bool i915_tiling_ok(struct drm_device *dev, int stride, int size, - int tiling_mode); -bool i915_gem_object_fence_offset_ok(struct drm_gem_object *obj, - int tiling_mode); /* i915_gem_debug.c */ void i915_gem_dump_object(struct drm_gem_object *obj, int len, diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index 08f57aedaf51..07ad1e354084 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c @@ -36,7 +36,8 @@ #include #include -static uint32_t i915_gem_get_gtt_alignment(struct drm_gem_object *obj); +static uint32_t i915_gem_get_gtt_alignment(struct drm_i915_gem_object *obj_priv); +static uint32_t i915_gem_get_gtt_size(struct drm_i915_gem_object *obj_priv); static int i915_gem_object_flush_gpu_write_domain(struct drm_gem_object *obj, bool pipelined); @@ -51,7 +52,9 @@ static void i915_gem_object_set_to_full_cpu_read_domain(struct drm_gem_object *o static int i915_gem_object_wait_rendering(struct drm_gem_object *obj, bool interruptible); static int i915_gem_object_bind_to_gtt(struct drm_gem_object *obj, - unsigned alignment, bool mappable); + unsigned alignment, + bool mappable, + bool need_fence); static void i915_gem_clear_fence_reg(struct drm_gem_object *obj); static int i915_gem_phys_pwrite(struct drm_device *dev, struct drm_gem_object *obj, struct drm_i915_gem_pwrite *args, @@ -79,30 +82,26 @@ static void i915_gem_info_remove_obj(struct drm_i915_private *dev_priv, } static void i915_gem_info_add_gtt(struct drm_i915_private *dev_priv, - struct drm_gem_object *obj) + struct drm_i915_gem_object *obj) { - struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); dev_priv->mm.gtt_count++; - dev_priv->mm.gtt_memory += obj->size; - if (obj_priv->gtt_offset < dev_priv->mm.gtt_mappable_end) { + dev_priv->mm.gtt_memory += obj->gtt_space->size; + if (obj->gtt_offset < dev_priv->mm.gtt_mappable_end) { dev_priv->mm.mappable_gtt_used += - min_t(size_t, obj->size, - dev_priv->mm.gtt_mappable_end - - obj_priv->gtt_offset); + min_t(size_t, obj->gtt_space->size, + dev_priv->mm.gtt_mappable_end - obj->gtt_offset); } } static void i915_gem_info_remove_gtt(struct drm_i915_private *dev_priv, - struct drm_gem_object *obj) + struct drm_i915_gem_object *obj) { - struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); dev_priv->mm.gtt_count--; - dev_priv->mm.gtt_memory -= obj->size; - if (obj_priv->gtt_offset < dev_priv->mm.gtt_mappable_end) { + dev_priv->mm.gtt_memory -= obj->gtt_space->size; + if (obj->gtt_offset < dev_priv->mm.gtt_mappable_end) { dev_priv->mm.mappable_gtt_used -= - min_t(size_t, obj->size, - dev_priv->mm.gtt_mappable_end - - obj_priv->gtt_offset); + min_t(size_t, obj->gtt_space->size, + dev_priv->mm.gtt_mappable_end - obj->gtt_offset); } } @@ -113,47 +112,43 @@ static void i915_gem_info_remove_gtt(struct drm_i915_private *dev_priv, */ static void i915_gem_info_update_mappable(struct drm_i915_private *dev_priv, - struct drm_gem_object *obj, + struct drm_i915_gem_object *obj, bool mappable) { - struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); - if (mappable) { - if (obj_priv->pin_mappable && obj_priv->fault_mappable) + if (obj->pin_mappable && obj->fault_mappable) /* Combined state was already mappable. */ return; dev_priv->mm.gtt_mappable_count++; - dev_priv->mm.gtt_mappable_memory += obj->size; + dev_priv->mm.gtt_mappable_memory += obj->gtt_space->size; } else { - if (obj_priv->pin_mappable || obj_priv->fault_mappable) + if (obj->pin_mappable || obj->fault_mappable) /* Combined state still mappable. */ return; dev_priv->mm.gtt_mappable_count--; - dev_priv->mm.gtt_mappable_memory -= obj->size; + dev_priv->mm.gtt_mappable_memory -= obj->gtt_space->size; } } static void i915_gem_info_add_pin(struct drm_i915_private *dev_priv, - struct drm_gem_object *obj, + struct drm_i915_gem_object *obj, bool mappable) { - struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); dev_priv->mm.pin_count++; - dev_priv->mm.pin_memory += obj->size; + dev_priv->mm.pin_memory += obj->gtt_space->size; if (mappable) { - obj_priv->pin_mappable = true; + obj->pin_mappable = true; i915_gem_info_update_mappable(dev_priv, obj, true); } } static void i915_gem_info_remove_pin(struct drm_i915_private *dev_priv, - struct drm_gem_object *obj) + struct drm_i915_gem_object *obj) { - struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); dev_priv->mm.pin_count--; - dev_priv->mm.pin_memory -= obj->size; - if (obj_priv->pin_mappable) { - obj_priv->pin_mappable = false; + dev_priv->mm.pin_memory -= obj->gtt_space->size; + if (obj->pin_mappable) { + obj->pin_mappable = false; i915_gem_info_update_mappable(dev_priv, obj, false); } } @@ -309,16 +304,6 @@ i915_gem_create_ioctl(struct drm_device *dev, void *data, return 0; } -static bool -i915_gem_object_cpu_accessible(struct drm_i915_gem_object *obj) -{ - struct drm_device *dev = obj->base.dev; - drm_i915_private_t *dev_priv = dev->dev_private; - - return obj->gtt_space == NULL || - obj->gtt_offset + obj->base.size <= dev_priv->mm.gtt_mappable_end; -} - static int i915_gem_object_needs_bit17_swizzle(struct drm_gem_object *obj) { drm_i915_private_t *dev_priv = obj->dev->dev_private; @@ -1083,7 +1068,7 @@ i915_gem_pwrite_ioctl(struct drm_device *dev, void *data, else if (obj_priv->tiling_mode == I915_TILING_NONE && obj_priv->gtt_space && obj->write_domain != I915_GEM_DOMAIN_CPU) { - ret = i915_gem_object_pin(obj, 0, true); + ret = i915_gem_object_pin(obj, 0, true, false); if (ret) goto out; @@ -1307,11 +1292,19 @@ int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf) /* Now bind it into the GTT if needed */ mutex_lock(&dev->struct_mutex); BUG_ON(obj_priv->pin_count && !obj_priv->pin_mappable); - if (!i915_gem_object_cpu_accessible(obj_priv)) - i915_gem_object_unbind(obj); + + if (obj_priv->gtt_space) { + if (!obj_priv->mappable || + (obj_priv->tiling_mode && !obj_priv->fenceable)) { + ret = i915_gem_object_unbind(obj); + if (ret) + goto unlock; + } + } if (!obj_priv->gtt_space) { - ret = i915_gem_object_bind_to_gtt(obj, 0, true); + ret = i915_gem_object_bind_to_gtt(obj, 0, + true, obj_priv->tiling_mode); if (ret) goto unlock; } @@ -1322,7 +1315,7 @@ int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf) if (!obj_priv->fault_mappable) { obj_priv->fault_mappable = true; - i915_gem_info_update_mappable(dev_priv, obj, true); + i915_gem_info_update_mappable(dev_priv, obj_priv, true); } /* Need a new fence register? */ @@ -1448,7 +1441,7 @@ i915_gem_release_mmap(struct drm_gem_object *obj) if (obj_priv->fault_mappable) { obj_priv->fault_mappable = false; - i915_gem_info_update_mappable(dev_priv, obj, false); + i915_gem_info_update_mappable(dev_priv, obj_priv, false); } } @@ -1473,32 +1466,51 @@ i915_gem_free_mmap_offset(struct drm_gem_object *obj) * potential fence register mapping if needed. */ static uint32_t -i915_gem_get_gtt_alignment(struct drm_gem_object *obj) +i915_gem_get_gtt_alignment(struct drm_i915_gem_object *obj_priv) { - struct drm_device *dev = obj->dev; - struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); - int start, i; + struct drm_device *dev = obj_priv->base.dev; /* * Minimum alignment is 4k (GTT page size), but might be greater * if a fence register is needed for the object. */ - if (INTEL_INFO(dev)->gen >= 4 || obj_priv->tiling_mode == I915_TILING_NONE) + if (INTEL_INFO(dev)->gen >= 4 || + obj_priv->tiling_mode == I915_TILING_NONE) return 4096; + /* + * Previous chips need to be aligned to the size of the smallest + * fence register that can contain the object. + */ + return i915_gem_get_gtt_size(obj_priv); +} + +static uint32_t +i915_gem_get_gtt_size(struct drm_i915_gem_object *obj_priv) +{ + struct drm_device *dev = obj_priv->base.dev; + uint32_t size; + + /* + * Minimum alignment is 4k (GTT page size), but might be greater + * if a fence register is needed for the object. + */ + if (INTEL_INFO(dev)->gen >= 4) + return obj_priv->base.size; + /* * Previous chips need to be aligned to the size of the smallest * fence register that can contain the object. */ if (INTEL_INFO(dev)->gen == 3) - start = 1024*1024; + size = 1024*1024; else - start = 512*1024; + size = 512*1024; - for (i = start; i < obj->size; i <<= 1) - ; + while (size < obj_priv->base.size) + size <<= 1; - return i; + return size; } /** @@ -2253,8 +2265,10 @@ i915_gem_object_unbind(struct drm_gem_object *obj) i915_gem_object_put_pages_gtt(obj); - i915_gem_info_remove_gtt(dev_priv, obj); + i915_gem_info_remove_gtt(dev_priv, obj_priv); list_del_init(&obj_priv->mm_list); + obj_priv->fenceable = true; + obj_priv->mappable = true; drm_mm_put_block(obj_priv->gtt_space); obj_priv->gtt_space = NULL; @@ -2311,16 +2325,16 @@ i915_gpu_idle(struct drm_device *dev) return 0; } -static void sandybridge_write_fence_reg(struct drm_i915_fence_reg *reg) +static void sandybridge_write_fence_reg(struct drm_gem_object *obj) { - struct drm_gem_object *obj = reg->obj; struct drm_device *dev = obj->dev; drm_i915_private_t *dev_priv = dev->dev_private; struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); + u32 size = i915_gem_get_gtt_size(obj_priv); int regnum = obj_priv->fence_reg; uint64_t val; - val = (uint64_t)((obj_priv->gtt_offset + obj->size - 4096) & + val = (uint64_t)((obj_priv->gtt_offset + size - 4096) & 0xfffff000) << 32; val |= obj_priv->gtt_offset & 0xfffff000; val |= (uint64_t)((obj_priv->stride / 128) - 1) << @@ -2333,16 +2347,16 @@ static void sandybridge_write_fence_reg(struct drm_i915_fence_reg *reg) I915_WRITE64(FENCE_REG_SANDYBRIDGE_0 + (regnum * 8), val); } -static void i965_write_fence_reg(struct drm_i915_fence_reg *reg) +static void i965_write_fence_reg(struct drm_gem_object *obj) { - struct drm_gem_object *obj = reg->obj; struct drm_device *dev = obj->dev; drm_i915_private_t *dev_priv = dev->dev_private; struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); + u32 size = i915_gem_get_gtt_size(obj_priv); int regnum = obj_priv->fence_reg; uint64_t val; - val = (uint64_t)((obj_priv->gtt_offset + obj->size - 4096) & + val = (uint64_t)((obj_priv->gtt_offset + size - 4096) & 0xfffff000) << 32; val |= obj_priv->gtt_offset & 0xfffff000; val |= ((obj_priv->stride / 128) - 1) << I965_FENCE_PITCH_SHIFT; @@ -2353,21 +2367,20 @@ static void i965_write_fence_reg(struct drm_i915_fence_reg *reg) I915_WRITE64(FENCE_REG_965_0 + (regnum * 8), val); } -static void i915_write_fence_reg(struct drm_i915_fence_reg *reg) +static void i915_write_fence_reg(struct drm_gem_object *obj) { - struct drm_gem_object *obj = reg->obj; struct drm_device *dev = obj->dev; drm_i915_private_t *dev_priv = dev->dev_private; struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); - int regnum = obj_priv->fence_reg; + u32 size = i915_gem_get_gtt_size(obj_priv); + uint32_t fence_reg, val, pitch_val; int tile_width; - uint32_t fence_reg, val; - uint32_t pitch_val; if ((obj_priv->gtt_offset & ~I915_FENCE_START_MASK) || - (obj_priv->gtt_offset & (obj->size - 1))) { - WARN(1, "%s: object 0x%08x not 1M or size (0x%zx) aligned\n", - __func__, obj_priv->gtt_offset, obj->size); + (obj_priv->gtt_offset & (size - 1))) { + WARN(1, "%s: object 0x%08x [fenceable? %d] not 1M or size (0x%08x) aligned [gtt_space offset=%lx, size=%lx]\n", + __func__, obj_priv->gtt_offset, obj_priv->fenceable, size, + obj_priv->gtt_space->start, obj_priv->gtt_space->size); return; } @@ -2390,23 +2403,24 @@ static void i915_write_fence_reg(struct drm_i915_fence_reg *reg) val = obj_priv->gtt_offset; if (obj_priv->tiling_mode == I915_TILING_Y) val |= 1 << I830_FENCE_TILING_Y_SHIFT; - val |= I915_FENCE_SIZE_BITS(obj->size); + val |= I915_FENCE_SIZE_BITS(size); val |= pitch_val << I830_FENCE_PITCH_SHIFT; val |= I830_FENCE_REG_VALID; - if (regnum < 8) - fence_reg = FENCE_REG_830_0 + (regnum * 4); + fence_reg = obj_priv->fence_reg; + if (fence_reg < 8) + fence_reg = FENCE_REG_830_0 + fence_reg * 4; else - fence_reg = FENCE_REG_945_8 + ((regnum - 8) * 4); + fence_reg = FENCE_REG_945_8 + (fence_reg - 8) * 4; I915_WRITE(fence_reg, val); } -static void i830_write_fence_reg(struct drm_i915_fence_reg *reg) +static void i830_write_fence_reg(struct drm_gem_object *obj) { - struct drm_gem_object *obj = reg->obj; struct drm_device *dev = obj->dev; drm_i915_private_t *dev_priv = dev->dev_private; struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); + u32 size = i915_gem_get_gtt_size(obj_priv); int regnum = obj_priv->fence_reg; uint32_t val; uint32_t pitch_val; @@ -2426,7 +2440,7 @@ static void i830_write_fence_reg(struct drm_i915_fence_reg *reg) val = obj_priv->gtt_offset; if (obj_priv->tiling_mode == I915_TILING_Y) val |= 1 << I830_FENCE_TILING_Y_SHIFT; - fence_size_bits = I830_FENCE_SIZE_BITS(obj->size); + fence_size_bits = I830_FENCE_SIZE_BITS(size); WARN_ON(fence_size_bits & ~0x00000f00); val |= fence_size_bits; val |= pitch_val << I830_FENCE_PITCH_SHIFT; @@ -2438,10 +2452,9 @@ static void i830_write_fence_reg(struct drm_i915_fence_reg *reg) static int i915_find_fence_reg(struct drm_device *dev, bool interruptible) { - struct drm_i915_fence_reg *reg = NULL; - struct drm_i915_gem_object *obj_priv = NULL; struct drm_i915_private *dev_priv = dev->dev_private; - struct drm_gem_object *obj = NULL; + struct drm_i915_fence_reg *reg; + struct drm_i915_gem_object *obj_priv = NULL; int i, avail, ret; /* First try to find a free reg */ @@ -2460,33 +2473,31 @@ static int i915_find_fence_reg(struct drm_device *dev, return -ENOSPC; /* None available, try to steal one or wait for a user to finish */ - i = I915_FENCE_REG_NONE; + avail = I915_FENCE_REG_NONE; list_for_each_entry(reg, &dev_priv->mm.fence_list, lru_list) { - obj = reg->obj; - obj_priv = to_intel_bo(obj); - + obj_priv = to_intel_bo(reg->obj); if (obj_priv->pin_count) continue; /* found one! */ - i = obj_priv->fence_reg; + avail = obj_priv->fence_reg; break; } - BUG_ON(i == I915_FENCE_REG_NONE); + BUG_ON(avail == I915_FENCE_REG_NONE); /* We only have a reference on obj from the active list. put_fence_reg * might drop that one, causing a use-after-free in it. So hold a * private reference to obj like the other callers of put_fence_reg * (set_tiling ioctl) do. */ - drm_gem_object_reference(obj); - ret = i915_gem_object_put_fence_reg(obj, interruptible); - drm_gem_object_unreference(obj); + drm_gem_object_reference(&obj_priv->base); + ret = i915_gem_object_put_fence_reg(&obj_priv->base, interruptible); + drm_gem_object_unreference(&obj_priv->base); if (ret != 0) return ret; - return i; + return avail; } /** @@ -2551,22 +2562,23 @@ i915_gem_object_get_fence_reg(struct drm_gem_object *obj, switch (INTEL_INFO(dev)->gen) { case 6: - sandybridge_write_fence_reg(reg); + sandybridge_write_fence_reg(obj); break; case 5: case 4: - i965_write_fence_reg(reg); + i965_write_fence_reg(obj); break; case 3: - i915_write_fence_reg(reg); + i915_write_fence_reg(obj); break; case 2: - i830_write_fence_reg(reg); + i830_write_fence_reg(obj); break; } - trace_i915_gem_object_get_fence(obj, obj_priv->fence_reg, - obj_priv->tiling_mode); + trace_i915_gem_object_get_fence(obj, + obj_priv->fence_reg, + obj_priv->tiling_mode); return 0; } @@ -2671,13 +2683,15 @@ i915_gem_object_put_fence_reg(struct drm_gem_object *obj, static int i915_gem_object_bind_to_gtt(struct drm_gem_object *obj, unsigned alignment, - bool mappable) + bool mappable, + bool need_fence) { struct drm_device *dev = obj->dev; drm_i915_private_t *dev_priv = dev->dev_private; struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); struct drm_mm_node *free_space; - gfp_t gfpmask = __GFP_NORETRY | __GFP_NOWARN; + gfp_t gfpmask = __GFP_NORETRY | __GFP_NOWARN; + u32 size, fence_size, fence_alignment; int ret; if (obj_priv->madv != I915_MADV_WILLNEED) { @@ -2685,13 +2699,18 @@ i915_gem_object_bind_to_gtt(struct drm_gem_object *obj, return -EINVAL; } + fence_size = i915_gem_get_gtt_size(obj_priv); + fence_alignment = i915_gem_get_gtt_alignment(obj_priv); + if (alignment == 0) - alignment = i915_gem_get_gtt_alignment(obj); - if (alignment & (i915_gem_get_gtt_alignment(obj) - 1)) { + alignment = need_fence ? fence_alignment : 4096; + if (need_fence && alignment & (fence_alignment - 1)) { DRM_ERROR("Invalid object alignment requested %u\n", alignment); return -EINVAL; } + size = need_fence ? fence_size : obj->size; + /* If the object is bigger than the entire aperture, reject it early * before evicting everything in a vain attempt to find space. */ @@ -2705,32 +2724,29 @@ i915_gem_object_bind_to_gtt(struct drm_gem_object *obj, if (mappable) free_space = drm_mm_search_free_in_range(&dev_priv->mm.gtt_space, - obj->size, alignment, 0, + size, alignment, 0, dev_priv->mm.gtt_mappable_end, 0); else free_space = drm_mm_search_free(&dev_priv->mm.gtt_space, - obj->size, alignment, 0); + size, alignment, 0); if (free_space != NULL) { if (mappable) obj_priv->gtt_space = drm_mm_get_block_range_generic(free_space, - obj->size, - alignment, 0, + size, alignment, 0, dev_priv->mm.gtt_mappable_end, 0); else obj_priv->gtt_space = - drm_mm_get_block(free_space, obj->size, - alignment); + drm_mm_get_block(free_space, size, alignment); } if (obj_priv->gtt_space == NULL) { /* If the gtt is empty and we're still having trouble * fitting our object in, we're out of memory. */ - ret = i915_gem_evict_something(dev, obj->size, alignment, - mappable); + ret = i915_gem_evict_something(dev, size, alignment, mappable); if (ret) return ret; @@ -2744,7 +2760,7 @@ i915_gem_object_bind_to_gtt(struct drm_gem_object *obj, if (ret == -ENOMEM) { /* first try to clear up some space from the GTT */ - ret = i915_gem_evict_something(dev, obj->size, + ret = i915_gem_evict_something(dev, size, alignment, mappable); if (ret) { /* now try to shrink everyone else */ @@ -2775,8 +2791,8 @@ i915_gem_object_bind_to_gtt(struct drm_gem_object *obj, drm_mm_put_block(obj_priv->gtt_space); obj_priv->gtt_space = NULL; - ret = i915_gem_evict_something(dev, obj->size, alignment, - mappable); + ret = i915_gem_evict_something(dev, size, + alignment, mappable); if (ret) return ret; @@ -2787,7 +2803,7 @@ i915_gem_object_bind_to_gtt(struct drm_gem_object *obj, /* keep track of bounds object by adding it to the inactive list */ list_add_tail(&obj_priv->mm_list, &dev_priv->mm.inactive_list); - i915_gem_info_add_gtt(dev_priv, obj); + i915_gem_info_add_gtt(dev_priv, obj_priv); /* Assert that the object is not currently in any GPU domain. As it * wasn't in the GTT, there shouldn't be any way it could have been in @@ -2798,6 +2814,13 @@ i915_gem_object_bind_to_gtt(struct drm_gem_object *obj, trace_i915_gem_object_bind(obj, obj_priv->gtt_offset, mappable); + obj_priv->fenceable = + obj_priv->gtt_space->size == fence_size && + (obj_priv->gtt_space->start & (fence_alignment -1)) == 0; + + obj_priv->mappable = + obj_priv->gtt_offset + obj->size <= dev_priv->mm.gtt_mappable_end; + return 0; } @@ -3516,9 +3539,8 @@ i915_gem_execbuffer_pin(struct drm_device *dev, entry->relocation_count ? true : need_fence; /* Check fence reg constraints and rebind if necessary */ - if (need_fence && - !i915_gem_object_fence_offset_ok(&obj->base, - obj->tiling_mode)) { + if ((need_fence && !obj->fenceable) || + (need_mappable && !obj->mappable)) { ret = i915_gem_object_unbind(&obj->base); if (ret) break; @@ -3526,7 +3548,8 @@ i915_gem_execbuffer_pin(struct drm_device *dev, ret = i915_gem_object_pin(&obj->base, entry->alignment, - need_mappable); + need_mappable, + need_fence); if (ret) break; @@ -4097,7 +4120,7 @@ i915_gem_execbuffer2(struct drm_device *dev, void *data, int i915_gem_object_pin(struct drm_gem_object *obj, uint32_t alignment, - bool mappable) + bool mappable, bool need_fence) { struct drm_device *dev = obj->dev; struct drm_i915_private *dev_priv = dev->dev_private; @@ -4108,14 +4131,15 @@ i915_gem_object_pin(struct drm_gem_object *obj, uint32_t alignment, WARN_ON(i915_verify_lists(dev)); if (obj_priv->gtt_space != NULL) { - if (alignment == 0) - alignment = i915_gem_get_gtt_alignment(obj); - if (obj_priv->gtt_offset & (alignment - 1) || - (mappable && !i915_gem_object_cpu_accessible(obj_priv))) { + if ((alignment && obj_priv->gtt_offset & (alignment - 1)) || + (need_fence && !obj_priv->fenceable) || + (mappable && !obj_priv->mappable)) { WARN(obj_priv->pin_count, "bo is already pinned with incorrect alignment:" - " offset=%x, req.alignment=%x\n", - obj_priv->gtt_offset, alignment); + " offset=%x, req.alignment=%x, need_fence=%d, fenceable=%d, mappable=%d, cpu_accessible=%d\n", + obj_priv->gtt_offset, alignment, + need_fence, obj_priv->fenceable, + mappable, obj_priv->mappable); ret = i915_gem_object_unbind(obj); if (ret) return ret; @@ -4123,13 +4147,14 @@ i915_gem_object_pin(struct drm_gem_object *obj, uint32_t alignment, } if (obj_priv->gtt_space == NULL) { - ret = i915_gem_object_bind_to_gtt(obj, alignment, mappable); + ret = i915_gem_object_bind_to_gtt(obj, alignment, + mappable, need_fence); if (ret) return ret; } if (obj_priv->pin_count++ == 0) { - i915_gem_info_add_pin(dev_priv, obj, mappable); + i915_gem_info_add_pin(dev_priv, obj_priv, mappable); if (!obj_priv->active) list_move_tail(&obj_priv->mm_list, &dev_priv->mm.pinned_list); @@ -4155,7 +4180,7 @@ i915_gem_object_unpin(struct drm_gem_object *obj) if (!obj_priv->active) list_move_tail(&obj_priv->mm_list, &dev_priv->mm.inactive_list); - i915_gem_info_remove_pin(dev_priv, obj); + i915_gem_info_remove_pin(dev_priv, obj_priv); } WARN_ON(i915_verify_lists(dev)); } @@ -4196,7 +4221,8 @@ i915_gem_pin_ioctl(struct drm_device *dev, void *data, obj_priv->user_pin_count++; obj_priv->pin_filp = file_priv; if (obj_priv->user_pin_count == 1) { - ret = i915_gem_object_pin(obj, args->alignment, true); + ret = i915_gem_object_pin(obj, args->alignment, + true, obj_priv->tiling_mode); if (ret) goto out; } @@ -4389,6 +4415,8 @@ struct drm_gem_object * i915_gem_alloc_object(struct drm_device *dev, INIT_LIST_HEAD(&obj->ring_list); INIT_LIST_HEAD(&obj->gpu_write_list); obj->madv = I915_MADV_WILLNEED; + obj->fenceable = true; + obj->mappable = true; return &obj->base; } @@ -4508,7 +4536,7 @@ i915_gem_init_pipe_control(struct drm_device *dev) obj_priv = to_intel_bo(obj); obj_priv->agp_type = AGP_USER_CACHED_MEMORY; - ret = i915_gem_object_pin(obj, 4096, true); + ret = i915_gem_object_pin(obj, 4096, true, false); if (ret) goto err_unref; diff --git a/drivers/gpu/drm/i915/i915_gem_tiling.c b/drivers/gpu/drm/i915/i915_gem_tiling.c index af352de70be1..0597a737ebad 100644 --- a/drivers/gpu/drm/i915/i915_gem_tiling.c +++ b/drivers/gpu/drm/i915/i915_gem_tiling.c @@ -181,7 +181,7 @@ i915_gem_detect_bit_6_swizzle(struct drm_device *dev) } /* Check pitch constriants for all chips & tiling formats */ -bool +static bool i915_tiling_ok(struct drm_device *dev, int stride, int size, int tiling_mode) { int tile_width; @@ -232,25 +232,35 @@ i915_tiling_ok(struct drm_device *dev, int stride, int size, int tiling_mode) return true; } -bool -i915_gem_object_fence_offset_ok(struct drm_gem_object *obj, int tiling_mode) +/* Is the current GTT allocation valid for the change in tiling? */ +static bool +i915_gem_object_fence_ok(struct drm_gem_object *obj, int tiling_mode) { - struct drm_device *dev = obj->dev; struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); - - if (obj_priv->gtt_space == NULL) - return true; + u32 size; if (tiling_mode == I915_TILING_NONE) return true; - if (INTEL_INFO(dev)->gen >= 4) + if (INTEL_INFO(obj->dev)->gen >= 4) return true; - if (obj_priv->gtt_offset & (obj->size - 1)) + /* + * Previous chips need to be aligned to the size of the smallest + * fence register that can contain the object. + */ + if (INTEL_INFO(obj->dev)->gen == 3) + size = 1024*1024; + else + size = 512*1024; + + while (size < obj_priv->base.size) + size <<= 1; + + if (obj_priv->gtt_offset & (size - 1)) return false; - if (IS_GEN3(dev)) { + if (INTEL_INFO(obj->dev)->gen == 3) { if (obj_priv->gtt_offset & ~I915_FENCE_START_MASK) return false; } else { @@ -331,7 +341,7 @@ i915_gem_set_tiling(struct drm_device *dev, void *data, * tiling mode. Otherwise we can just leave it alone, but * need to ensure that any fence register is cleared. */ - if (!i915_gem_object_fence_offset_ok(obj, args->tiling_mode)) + if (!i915_gem_object_fence_ok(obj, args->tiling_mode)) ret = i915_gem_object_unbind(obj); else if (obj_priv->fence_reg != I915_FENCE_REG_NONE) ret = i915_gem_object_put_fence_reg(obj, true); diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c index c9c4c707cf1a..4954af23b7c8 100644 --- a/drivers/gpu/drm/i915/intel_display.c +++ b/drivers/gpu/drm/i915/intel_display.c @@ -1461,7 +1461,8 @@ intel_pin_and_fence_fb_obj(struct drm_device *dev, BUG(); } - ret = i915_gem_object_pin(obj, alignment, true); + ret = i915_gem_object_pin(obj, alignment, + !pipelined, obj_priv->tiling_mode); if (ret) return ret; @@ -4353,7 +4354,7 @@ static int intel_crtc_cursor_set(struct drm_crtc *crtc, /* we only need to pin inside GTT if cursor is non-phy */ mutex_lock(&dev->struct_mutex); if (!dev_priv->info->cursor_needs_physical) { - ret = i915_gem_object_pin(bo, PAGE_SIZE, true); + ret = i915_gem_object_pin(bo, PAGE_SIZE, true, false); if (ret) { DRM_ERROR("failed to pin cursor bo\n"); goto fail_locked; @@ -5517,7 +5518,7 @@ intel_alloc_context_page(struct drm_device *dev) } mutex_lock(&dev->struct_mutex); - ret = i915_gem_object_pin(ctx, 4096, true); + ret = i915_gem_object_pin(ctx, 4096, false, false); if (ret) { DRM_ERROR("failed to pin power context: %d\n", ret); goto err_unref; diff --git a/drivers/gpu/drm/i915/intel_overlay.c b/drivers/gpu/drm/i915/intel_overlay.c index beda2016eb16..e62e1b3d243f 100644 --- a/drivers/gpu/drm/i915/intel_overlay.c +++ b/drivers/gpu/drm/i915/intel_overlay.c @@ -781,7 +781,7 @@ static int intel_overlay_do_put_image(struct intel_overlay *overlay, if (ret != 0) return ret; - ret = i915_gem_object_pin(new_bo, PAGE_SIZE, true); + ret = i915_gem_object_pin(new_bo, PAGE_SIZE, false, false); if (ret != 0) return ret; @@ -1423,7 +1423,7 @@ void intel_setup_overlay(struct drm_device *dev) } overlay->flip_addr = overlay->reg_bo->phys_obj->handle->busaddr; } else { - ret = i915_gem_object_pin(reg_bo, PAGE_SIZE, true); + ret = i915_gem_object_pin(reg_bo, PAGE_SIZE, true, false); if (ret) { DRM_ERROR("failed to pin overlay register bo\n"); goto out_free_bo; diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c index e88214ef24b1..632a98e0ba5c 100644 --- a/drivers/gpu/drm/i915/intel_ringbuffer.c +++ b/drivers/gpu/drm/i915/intel_ringbuffer.c @@ -547,7 +547,7 @@ static int init_status_page(struct intel_ring_buffer *ring) obj_priv = to_intel_bo(obj); obj_priv->agp_type = AGP_USER_CACHED_MEMORY; - ret = i915_gem_object_pin(obj, 4096, true); + ret = i915_gem_object_pin(obj, 4096, true, false); if (ret != 0) { goto err_unref; } @@ -603,7 +603,7 @@ int intel_init_ring_buffer(struct drm_device *dev, ring->gem_object = obj; - ret = i915_gem_object_pin(obj, PAGE_SIZE, true); + ret = i915_gem_object_pin(obj, PAGE_SIZE, true, false); if (ret) goto err_unref; diff --git a/include/drm/i915_drm.h b/include/drm/i915_drm.h index 8c641bed9bbd..b20dbb2d7174 100644 --- a/include/drm/i915_drm.h +++ b/include/drm/i915_drm.h @@ -287,6 +287,7 @@ typedef struct drm_i915_irq_wait { #define I915_PARAM_HAS_EXECBUF2 9 #define I915_PARAM_HAS_BSD 10 #define I915_PARAM_HAS_BLT 11 +#define I915_PARAM_HAS_RELAXED_FENCING 12 typedef struct drm_i915_getparam { int param; -- cgit v1.2.3 From 872d860c85e30cdc97e7c91723257f4fcf04d5e9 Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Thu, 28 Oct 2010 23:43:30 +0100 Subject: drm/i915: Remove the duplicate domain-change tracepoint for GPU flush Signed-off-by: Chris Wilson --- drivers/gpu/drm/i915/i915_gem.c | 6 ------ 1 file changed, 6 deletions(-) diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index 07ad1e354084..936ddd83f8a2 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c @@ -2847,22 +2847,16 @@ i915_gem_object_flush_gpu_write_domain(struct drm_gem_object *obj, bool pipelined) { struct drm_device *dev = obj->dev; - uint32_t old_write_domain; if ((obj->write_domain & I915_GEM_GPU_DOMAINS) == 0) return 0; /* Queue the GPU write cache flushing we need. */ - old_write_domain = obj->write_domain; i915_gem_flush_ring(dev, NULL, to_intel_bo(obj)->ring, 0, obj->write_domain); BUG_ON(obj->write_domain); - trace_i915_gem_object_change_domain(obj, - obj->read_domains, - old_write_domain); - if (pipelined) return 0; -- cgit v1.2.3 From 33626e6a08669b1a5834e76363dcd5c8733a4278 Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Fri, 29 Oct 2010 16:18:36 +0100 Subject: drm/i915/ringbuffer: Disable the ringbuffer on cleanup. It should be idle on cleanup anyway... Signed-off-by: Chris Wilson --- drivers/gpu/drm/i915/intel_ringbuffer.c | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c index 632a98e0ba5c..0f1693981ee6 100644 --- a/drivers/gpu/drm/i915/intel_ringbuffer.c +++ b/drivers/gpu/drm/i915/intel_ringbuffer.c @@ -651,9 +651,17 @@ err_hws: void intel_cleanup_ring_buffer(struct intel_ring_buffer *ring) { + struct drm_i915_private *dev_priv; + int ret; + if (ring->gem_object == NULL) return; + /* Disable the ring buffer. The ring must be idle at this point */ + dev_priv = ring->dev->dev_private; + ret = intel_wait_ring_buffer(ring, ring->size - 8); + I915_WRITE_CTL(ring, 0); + drm_core_ioremapfree(&ring->map, ring->dev); i915_gem_object_unpin(ring->gem_object); -- cgit v1.2.3 From 1d8f38f4e7146d22f7fbc94eef0508bd75463f54 Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Fri, 29 Oct 2010 19:00:51 +0100 Subject: drm/i915: Record BLT engine error state Signed-off-by: Chris Wilson --- drivers/gpu/drm/i915/i915_debugfs.c | 19 +++++++++++++------ drivers/gpu/drm/i915/i915_drv.h | 7 ++++++- drivers/gpu/drm/i915/i915_irq.c | 7 +++++++ drivers/gpu/drm/i915/i915_reg.h | 4 ++++ 4 files changed, 30 insertions(+), 7 deletions(-) diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c index ba2af4e046ed..70eec5aceabe 100644 --- a/drivers/gpu/drm/i915/i915_debugfs.c +++ b/drivers/gpu/drm/i915/i915_debugfs.c @@ -616,21 +616,28 @@ static int i915_error_state(struct seq_file *m, void *unused) seq_printf(m, "Time: %ld s %ld us\n", error->time.tv_sec, error->time.tv_usec); seq_printf(m, "PCI ID: 0x%04x\n", dev->pci_device); + seq_printf(m, "EIR: 0x%08x\n", error->eir); + seq_printf(m, "PGTBL_ER: 0x%08x\n", error->pgtbl_er); if (INTEL_INFO(dev)->gen >= 6) { seq_printf(m, "ERROR: 0x%08x\n", error->error); + seq_printf(m, "Blitter command stream:\n"); + seq_printf(m, " ACTHD: 0x%08x\n", error->bcs_acthd); + seq_printf(m, " IPEHR: 0x%08x\n", error->bcs_ipehr); + seq_printf(m, " IPEIR: 0x%08x\n", error->bcs_ipeir); + seq_printf(m, " INSTDONE: 0x%08x\n", error->bcs_instdone); + seq_printf(m, " seqno: 0x%08x\n", error->bcs_seqno); } - seq_printf(m, "EIR: 0x%08x\n", error->eir); - seq_printf(m, " PGTBL_ER: 0x%08x\n", error->pgtbl_er); - seq_printf(m, " INSTPM: 0x%08x\n", error->instpm); + seq_printf(m, "Render command stream:\n"); + seq_printf(m, " ACTHD: 0x%08x\n", error->acthd); seq_printf(m, " IPEIR: 0x%08x\n", error->ipeir); seq_printf(m, " IPEHR: 0x%08x\n", error->ipehr); seq_printf(m, " INSTDONE: 0x%08x\n", error->instdone); - seq_printf(m, " ACTHD: 0x%08x\n", error->acthd); if (INTEL_INFO(dev)->gen >= 4) { - seq_printf(m, " INSTPS: 0x%08x\n", error->instps); seq_printf(m, " INSTDONE1: 0x%08x\n", error->instdone1); + seq_printf(m, " INSTPS: 0x%08x\n", error->instps); } - seq_printf(m, "seqno: 0x%08x\n", error->seqno); + seq_printf(m, " INSTPM: 0x%08x\n", error->instpm); + seq_printf(m, " seqno: 0x%08x\n", error->seqno); if (error->active_bo_count) { seq_printf(m, "Buffers [%d]:\n", error->active_bo_count); diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index 7aa7f8abf892..05bff9e6a9cb 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h @@ -142,13 +142,18 @@ struct sdvo_device_mapping { struct drm_i915_error_state { u32 eir; u32 pgtbl_er; - u32 error; /* gen6+ */ u32 pipeastat; u32 pipebstat; u32 ipeir; u32 ipehr; u32 instdone; u32 acthd; + u32 error; /* gen6+ */ + u32 bcs_acthd; /* gen6+ blt engine */ + u32 bcs_ipehr; + u32 bcs_ipeir; + u32 bcs_instdone; + u32 bcs_seqno; u32 instpm; u32 instps; u32 instdone1; diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c index 2a29497a4694..4ff39dee7f1c 100644 --- a/drivers/gpu/drm/i915/i915_irq.c +++ b/drivers/gpu/drm/i915/i915_irq.c @@ -595,6 +595,13 @@ static void i915_capture_error_state(struct drm_device *dev) error->error = 0; if (INTEL_INFO(dev)->gen >= 6) { error->error = I915_READ(ERROR_GEN6); + error->bcs_acthd = I915_READ(BCS_ACTHD); + error->bcs_ipehr = I915_READ(BCS_IPEHR); + error->bcs_ipeir = I915_READ(BCS_IPEIR); + error->bcs_instdone = I915_READ(BCS_INSTDONE); + error->bcs_seqno = 0; + if (dev_priv->blt_ring.get_seqno) + error->bcs_seqno = dev_priv->blt_ring.get_seqno(&dev_priv->blt_ring); } if (INTEL_INFO(dev)->gen >= 4) { error->ipeir = I915_READ(IPEIR_I965); diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h index fc161196da21..b173e5b77f96 100644 --- a/drivers/gpu/drm/i915/i915_reg.h +++ b/drivers/gpu/drm/i915/i915_reg.h @@ -305,6 +305,10 @@ #define INSTDONE 0x02090 #define NOPID 0x02094 #define HWSTAM 0x02098 +#define BCS_INSTDONE 0x2206C +#define BCS_IPEIR 0x22064 +#define BCS_IPEHR 0x22068 +#define BCS_ACTHD 0x22074 #define ERROR_GEN6 0x040a0 -- cgit v1.2.3 From c584fe47e4d92934c10e5d7f932ee042587dbcff Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Fri, 29 Oct 2010 18:15:52 +0100 Subject: drm/i915/ringbuffer: Remove duplicate initialisation of ring control Signed-off-by: Chris Wilson --- drivers/gpu/drm/i915/intel_ringbuffer.c | 12 +----------- 1 file changed, 1 insertion(+), 11 deletions(-) diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c index 0f1693981ee6..4d00da97905f 100644 --- a/drivers/gpu/drm/i915/intel_ringbuffer.c +++ b/drivers/gpu/drm/i915/intel_ringbuffer.c @@ -578,7 +578,6 @@ err: int intel_init_ring_buffer(struct drm_device *dev, struct intel_ring_buffer *ring) { - struct drm_i915_private *dev_priv = dev->dev_private; struct drm_i915_gem_object *obj_priv; struct drm_gem_object *obj; int ret; @@ -626,16 +625,7 @@ int intel_init_ring_buffer(struct drm_device *dev, if (ret) goto err_unmap; - if (!drm_core_check_feature(dev, DRIVER_MODESET)) - i915_kernel_lost_context(dev); - else { - ring->head = I915_READ_HEAD(ring) & HEAD_ADDR; - ring->tail = I915_READ_TAIL(ring) & TAIL_ADDR; - ring->space = ring->head - (ring->tail + 8); - if (ring->space < 0) - ring->space += ring->size; - } - return ret; + return 0; err_unmap: drm_core_ioremapfree(&ring->map, dev); -- cgit v1.2.3 From e380f60b22eddec7825224b8d788572c82b63161 Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Fri, 29 Oct 2010 18:11:26 +0100 Subject: agp/intel: Sandybridge doesn't require GMCH enabling Signed-off-by: Chris Wilson --- drivers/char/agp/intel-gtt.c | 37 ++++++++++++++++++++++++++++++------- 1 file changed, 30 insertions(+), 7 deletions(-) diff --git a/drivers/char/agp/intel-gtt.c b/drivers/char/agp/intel-gtt.c index 5dc1f5db55a7..9c86dac41da7 100644 --- a/drivers/char/agp/intel-gtt.c +++ b/drivers/char/agp/intel-gtt.c @@ -922,10 +922,11 @@ static void i830_write_entry(dma_addr_t addr, unsigned int entry, writel(addr | pte_flags, intel_private.gtt + entry); } -static void intel_enable_gtt(void) +static bool intel_enable_gtt(void) { u32 gma_addr; u16 gmch_ctrl; + u8 __iomem *reg; if (INTEL_GTT_GEN == 2) pci_read_config_dword(intel_private.pcidev, I810_GMADDR, @@ -936,13 +937,34 @@ static void intel_enable_gtt(void) intel_private.gma_bus_addr = (gma_addr & PCI_BASE_ADDRESS_MEM_MASK); - pci_read_config_word(intel_private.bridge_dev, I830_GMCH_CTRL, &gmch_ctrl); + if (INTEL_GTT_GEN >= 6) + return true; + + pci_read_config_word(intel_private.bridge_dev, + I830_GMCH_CTRL, &gmch_ctrl); gmch_ctrl |= I830_GMCH_ENABLED; - pci_write_config_word(intel_private.bridge_dev, I830_GMCH_CTRL, gmch_ctrl); + pci_write_config_word(intel_private.bridge_dev, + I830_GMCH_CTRL, gmch_ctrl); - writel(intel_private.PGETBL_save|I810_PGETBL_ENABLED, - intel_private.registers+I810_PGETBL_CTL); - readl(intel_private.registers+I810_PGETBL_CTL); /* PCI Posting. */ + pci_read_config_word(intel_private.bridge_dev, + I830_GMCH_CTRL, &gmch_ctrl); + if ((gmch_ctrl & I830_GMCH_ENABLED) == 0) { + dev_err(&intel_private.pcidev->dev, + "failed to enable the GTT: GMCH_CTRL=%x\n", + gmch_ctrl); + return false; + } + + reg = intel_private.registers+I810_PGETBL_CTL; + writel(intel_private.PGETBL_save|I810_PGETBL_ENABLED, reg); + if ((readl(reg) & I810_PGETBL_ENABLED) == 0) { + dev_err(&intel_private.pcidev->dev, + "failed to enable the GTT: PGETBL=%x [expected %x|1]\n", + readl(reg), intel_private.PGETBL_save); + return false; + } + + return true; } static int i830_setup(void) @@ -981,7 +1003,8 @@ static int intel_fake_agp_configure(void) { int i; - intel_enable_gtt(); + if (!intel_enable_gtt()) + return -EIO; agp_bridge->gart_bus_addr = intel_private.gma_bus_addr; -- cgit v1.2.3 From 4066c0ae13a8388b9f2a29cad60da330b578eff2 Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Fri, 29 Oct 2010 21:00:54 +0100 Subject: drm/i915/debugfs: Display the contents of the BLT and BSD status pages Signed-off-by: Chris Wilson --- drivers/gpu/drm/i915/i915_debugfs.c | 16 +++++++++++++--- 1 file changed, 13 insertions(+), 3 deletions(-) diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c index 70eec5aceabe..58cf60d89f68 100644 --- a/drivers/gpu/drm/i915/i915_debugfs.c +++ b/drivers/gpu/drm/i915/i915_debugfs.c @@ -445,10 +445,18 @@ static int i915_hws_info(struct seq_file *m, void *data) struct drm_info_node *node = (struct drm_info_node *) m->private; struct drm_device *dev = node->minor->dev; drm_i915_private_t *dev_priv = dev->dev_private; - int i; + struct intel_ring_buffer *ring; volatile u32 *hws; + int i; + + switch ((uintptr_t)node->info_ent->data) { + case RENDER_RING: ring = &dev_priv->render_ring; break; + case BSD_RING: ring = &dev_priv->bsd_ring; break; + case BLT_RING: ring = &dev_priv->blt_ring; break; + default: return -EINVAL; + } - hws = (volatile u32 *)dev_priv->render_ring.status_page.page_addr; + hws = (volatile u32 *)ring->status_page.page_addr; if (hws == NULL) return 0; @@ -1087,7 +1095,9 @@ static struct drm_info_list i915_debugfs_list[] = { {"i915_gem_seqno", i915_gem_seqno_info, 0}, {"i915_gem_fence_regs", i915_gem_fence_regs_info, 0}, {"i915_gem_interrupt", i915_interrupt_info, 0}, - {"i915_gem_hws", i915_hws_info, 0}, + {"i915_gem_hws", i915_hws_info, 0, (void *)RENDER_RING}, + {"i915_gem_hws_blt", i915_hws_info, 0, (void *)BLT_RING}, + {"i915_gem_hws_bsd", i915_hws_info, 0, (void *)BSD_RING}, {"i915_ringbuffer_data", i915_ringbuffer_data, 0, (void *)RENDER_RING}, {"i915_ringbuffer_info", i915_ringbuffer_info, 0, (void *)RENDER_RING}, {"i915_bsd_ringbuffer_data", i915_ringbuffer_data, 0, (void *)BSD_RING}, -- cgit v1.2.3 From 6dda569fe0fb71a03e2a2e815761796f98232cdb Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Fri, 29 Oct 2010 21:02:18 +0100 Subject: drm/i915: Switch to using pci_iounmap in conjunction with pci_iomap After switching the MMIO registers to use pci_iomap, remember to dispose of the mapping with pci_iounmap (for symmetry). Signed-off-by: Chris Wilson --- drivers/gpu/drm/i915/i915_dma.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c index 00d8fb3e989f..eee88cfcb3aa 100644 --- a/drivers/gpu/drm/i915/i915_dma.c +++ b/drivers/gpu/drm/i915/i915_dma.c @@ -2082,7 +2082,7 @@ out_workqueue_free: out_iomapfree: io_mapping_free(dev_priv->mm.gtt_mapping); out_rmmap: - iounmap(dev_priv->regs); + pci_iounmap(dev->pdev, dev_priv->regs); put_bridge: pci_dev_put(dev_priv->bridge_dev); free_priv: @@ -2168,7 +2168,7 @@ int i915_driver_unload(struct drm_device *dev) } if (dev_priv->regs != NULL) - iounmap(dev_priv->regs); + pci_iounmap(dev->pdev, dev_priv->regs); intel_teardown_gmbus(dev); intel_teardown_mchbar(dev); -- cgit v1.2.3 From f4e0b29bf23687ac16dc476bd90cd4d8b0eacd5c Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Fri, 29 Oct 2010 21:06:16 +0100 Subject: drm/i915: Check if the GPU hung whilst waiting for the ring to clear Signed-off-by: Chris Wilson --- drivers/gpu/drm/i915/intel_ringbuffer.c | 2 ++ 1 file changed, 2 insertions(+) diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c index 4d00da97905f..2e72d3a0740f 100644 --- a/drivers/gpu/drm/i915/intel_ringbuffer.c +++ b/drivers/gpu/drm/i915/intel_ringbuffer.c @@ -711,6 +711,8 @@ int intel_wait_ring_buffer(struct intel_ring_buffer *ring, int n) } msleep(1); + if (atomic_read(&dev_priv->mm.wedged)) + return -EAGAIN; } while (!time_after(jiffies, end)); trace_i915_ring_wait_end (dev); return -EBUSY; -- cgit v1.2.3 From 6aa56062eaba67adfb247cded244fd877329588d Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Fri, 29 Oct 2010 21:44:37 +0100 Subject: drm/i915/ringbuffer: Use the HEAD auto-reporting mechanism My Sandybridge only reports 0 for the ring buffer registers, causing it to hang as soon as we exhaust the available ring. As a workaround, take advantage of our huge ring buffers and use the auto-reporting mechanism to update the status page with the HEAD location every 64 KiB. Signed-off-by: Chris Wilson --- drivers/gpu/drm/i915/intel_ringbuffer.c | 13 ++++++++++++- 1 file changed, 12 insertions(+), 1 deletion(-) diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c index 2e72d3a0740f..390aa21edbe4 100644 --- a/drivers/gpu/drm/i915/intel_ringbuffer.c +++ b/drivers/gpu/drm/i915/intel_ringbuffer.c @@ -174,7 +174,7 @@ static int init_ring_common(struct intel_ring_buffer *ring) I915_WRITE_CTL(ring, ((ring->gem_object->size - PAGE_SIZE) & RING_NR_PAGES) - | RING_NO_REPORT | RING_VALID); + | RING_REPORT_64K | RING_VALID); /* If the head is still not zero, the ring is dead */ if ((I915_READ_CTL(ring) & RING_VALID) == 0 || @@ -691,6 +691,17 @@ int intel_wait_ring_buffer(struct intel_ring_buffer *ring, int n) struct drm_device *dev = ring->dev; drm_i915_private_t *dev_priv = dev->dev_private; unsigned long end; + u32 head; + + head = intel_read_status_page(ring, 4); + if (head) { + ring->head = head & HEAD_ADDR; + ring->space = ring->head - (ring->tail + 8); + if (ring->space < 0) + ring->space += ring->size; + if (ring->space >= n) + return 0; + } trace_i915_ring_wait_begin (dev); end = jiffies + 3 * HZ; -- cgit v1.2.3 From add354ddf62beac55ca3ba64835dd703a0649867 Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Fri, 29 Oct 2010 19:00:51 +0100 Subject: drm/i915: Record BSD engine error state Signed-off-by: Chris Wilson --- drivers/gpu/drm/i915/i915_debugfs.c | 6 ++++++ drivers/gpu/drm/i915/i915_drv.h | 5 +++++ drivers/gpu/drm/i915/i915_irq.c | 9 +++++++++ drivers/gpu/drm/i915/i915_reg.h | 4 ++++ 4 files changed, 24 insertions(+) diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c index 58cf60d89f68..56f7ced16f1a 100644 --- a/drivers/gpu/drm/i915/i915_debugfs.c +++ b/drivers/gpu/drm/i915/i915_debugfs.c @@ -634,6 +634,12 @@ static int i915_error_state(struct seq_file *m, void *unused) seq_printf(m, " IPEIR: 0x%08x\n", error->bcs_ipeir); seq_printf(m, " INSTDONE: 0x%08x\n", error->bcs_instdone); seq_printf(m, " seqno: 0x%08x\n", error->bcs_seqno); + seq_printf(m, "Video (BSD) command stream:\n"); + seq_printf(m, " ACTHD: 0x%08x\n", error->vcs_acthd); + seq_printf(m, " IPEHR: 0x%08x\n", error->vcs_ipehr); + seq_printf(m, " IPEIR: 0x%08x\n", error->vcs_ipeir); + seq_printf(m, " INSTDONE: 0x%08x\n", error->vcs_instdone); + seq_printf(m, " seqno: 0x%08x\n", error->vcs_seqno); } seq_printf(m, "Render command stream:\n"); seq_printf(m, " ACTHD: 0x%08x\n", error->acthd); diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index 05bff9e6a9cb..ec582b6d2113 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h @@ -154,6 +154,11 @@ struct drm_i915_error_state { u32 bcs_ipeir; u32 bcs_instdone; u32 bcs_seqno; + u32 vcs_acthd; /* gen6+ bsd engine */ + u32 vcs_ipehr; + u32 vcs_ipeir; + u32 vcs_instdone; + u32 vcs_seqno; u32 instpm; u32 instps; u32 instdone1; diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c index 4ff39dee7f1c..90c071d37748 100644 --- a/drivers/gpu/drm/i915/i915_irq.c +++ b/drivers/gpu/drm/i915/i915_irq.c @@ -595,6 +595,7 @@ static void i915_capture_error_state(struct drm_device *dev) error->error = 0; if (INTEL_INFO(dev)->gen >= 6) { error->error = I915_READ(ERROR_GEN6); + error->bcs_acthd = I915_READ(BCS_ACTHD); error->bcs_ipehr = I915_READ(BCS_IPEHR); error->bcs_ipeir = I915_READ(BCS_IPEIR); @@ -602,6 +603,14 @@ static void i915_capture_error_state(struct drm_device *dev) error->bcs_seqno = 0; if (dev_priv->blt_ring.get_seqno) error->bcs_seqno = dev_priv->blt_ring.get_seqno(&dev_priv->blt_ring); + + error->vcs_acthd = I915_READ(VCS_ACTHD); + error->vcs_ipehr = I915_READ(VCS_IPEHR); + error->vcs_ipeir = I915_READ(VCS_IPEIR); + error->vcs_instdone = I915_READ(VCS_INSTDONE); + error->vcs_seqno = 0; + if (dev_priv->bsd_ring.get_seqno) + error->vcs_seqno = dev_priv->bsd_ring.get_seqno(&dev_priv->bsd_ring); } if (INTEL_INFO(dev)->gen >= 4) { error->ipeir = I915_READ(IPEIR_I965); diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h index b173e5b77f96..c79d4ba4fb12 100644 --- a/drivers/gpu/drm/i915/i915_reg.h +++ b/drivers/gpu/drm/i915/i915_reg.h @@ -305,6 +305,10 @@ #define INSTDONE 0x02090 #define NOPID 0x02094 #define HWSTAM 0x02098 +#define VCS_INSTDONE 0x1206C +#define VCS_IPEIR 0x12064 +#define VCS_IPEHR 0x12068 +#define VCS_ACTHD 0x12074 #define BCS_INSTDONE 0x2206C #define BCS_IPEIR 0x22064 #define BCS_IPEHR 0x22068 -- cgit v1.2.3 From ff75b9bc489710ff223bc0d805bf3b862dc347ea Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Sat, 30 Oct 2010 22:52:31 +0100 Subject: drm/i915: Fix typo from e5281ccd in i915_gem_attach_phys_object() Accessing the uninitialised obj->pages instead of the local page lead to an OOPs. Reported-by: Xavier Chantry Signed-off-by: Chris Wilson --- drivers/gpu/drm/i915/i915_gem.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index 936ddd83f8a2..e3fc333e2e57 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c @@ -4925,7 +4925,7 @@ i915_gem_attach_phys_object(struct drm_device *dev, if (IS_ERR(page)) return PTR_ERR(page); - src = kmap_atomic(obj_priv->pages[i]); + src = kmap_atomic(page); dst = obj_priv->phys_obj->handle->vaddr + (i * PAGE_SIZE); memcpy(dst, src, PAGE_SIZE); kunmap_atomic(src); -- cgit v1.2.3 From 5eac3ab45955b32f3a9d89e633918c4d6f133dfa Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Sun, 31 Oct 2010 08:49:47 +0000 Subject: drm/i915: Evict just the purgeable GTT entries on the first pass Take two passes to evict everything whilst searching for sufficient free space to bind the batchbuffer. After searching for sufficient free space using LRU eviction, evict everything that is purgeable and try again. Only then if there is insufficient free space (or the GTT is too badly fragmented) evict everything from the aperture and try one last time. Signed-off-by: Chris Wilson --- drivers/gpu/drm/i915/i915_drv.h | 4 ++-- drivers/gpu/drm/i915/i915_gem.c | 19 ++++++++-------- drivers/gpu/drm/i915/i915_gem_evict.c | 42 +++++++++++------------------------ 3 files changed, 25 insertions(+), 40 deletions(-) diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index ec582b6d2113..54e5b2fa5fd4 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h @@ -1073,8 +1073,8 @@ void i915_gem_release(struct drm_device * dev, struct drm_file *file_priv); /* i915_gem_evict.c */ int i915_gem_evict_something(struct drm_device *dev, int min_size, unsigned alignment, bool mappable); -int i915_gem_evict_everything(struct drm_device *dev); -int i915_gem_evict_inactive(struct drm_device *dev); +int i915_gem_evict_everything(struct drm_device *dev, bool purgeable_only); +int i915_gem_evict_inactive(struct drm_device *dev, bool purgeable_only); /* i915_gem_tiling.c */ void i915_gem_detect_bit_6_swizzle(struct drm_device *dev); diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index e3fc333e2e57..c8e516d3f8bc 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c @@ -3519,7 +3519,8 @@ i915_gem_execbuffer_pin(struct drm_device *dev, int ret, i, retry; /* attempt to pin all of the buffers into the GTT */ - for (retry = 0; retry < 2; retry++) { + retry = 0; + do { ret = 0; for (i = 0; i < count; i++) { struct drm_i915_gem_exec_object2 *entry = &exec_list[i]; @@ -3567,18 +3568,18 @@ i915_gem_execbuffer_pin(struct drm_device *dev, while (i--) i915_gem_object_unpin(object_list[i]); - if (ret == 0) - break; - - if (ret != -ENOSPC || retry) + if (ret != -ENOSPC || retry > 1) return ret; - ret = i915_gem_evict_everything(dev); + /* First attempt, just clear anything that is purgeable. + * Second attempt, clear the entire GTT. + */ + ret = i915_gem_evict_everything(dev, retry == 0); if (ret) return ret; - } - return 0; + retry++; + } while (1); } /* Throttle our rendering by waiting until the ring has completed our requests @@ -4484,7 +4485,7 @@ i915_gem_idle(struct drm_device *dev) /* Under UMS, be paranoid and evict. */ if (!drm_core_check_feature(dev, DRIVER_MODESET)) { - ret = i915_gem_evict_inactive(dev); + ret = i915_gem_evict_inactive(dev, false); if (ret) { mutex_unlock(&dev->struct_mutex); return ret; diff --git a/drivers/gpu/drm/i915/i915_gem_evict.c b/drivers/gpu/drm/i915/i915_gem_evict.c index 3a4215f31652..a2609c5542fd 100644 --- a/drivers/gpu/drm/i915/i915_gem_evict.c +++ b/drivers/gpu/drm/i915/i915_gem_evict.c @@ -42,7 +42,7 @@ mark_free(struct drm_i915_gem_object *obj_priv, int i915_gem_evict_something(struct drm_device *dev, int min_size, - unsigned alignment, bool mappable) + unsigned alignment, bool mappable) { drm_i915_private_t *dev_priv = dev->dev_private; struct list_head eviction_list, unwind_list; @@ -54,7 +54,7 @@ i915_gem_evict_something(struct drm_device *dev, int min_size, /* Re-check for free space after retiring requests */ if (mappable) { if (drm_mm_search_free_in_range(&dev_priv->mm.gtt_space, - min_size, alignment, 0, + min_size, alignment, 0, dev_priv->mm.gtt_mappable_end, 0)) return 0; @@ -171,7 +171,7 @@ found: } int -i915_gem_evict_everything(struct drm_device *dev) +i915_gem_evict_everything(struct drm_device *dev, bool purgeable_only) { drm_i915_private_t *dev_priv = dev->dev_private; int ret; @@ -192,38 +192,22 @@ i915_gem_evict_everything(struct drm_device *dev) BUG_ON(!list_empty(&dev_priv->mm.flushing_list)); - ret = i915_gem_evict_inactive(dev); - if (ret) - return ret; - - lists_empty = (list_empty(&dev_priv->mm.inactive_list) && - list_empty(&dev_priv->mm.flushing_list) && - list_empty(&dev_priv->render_ring.active_list) && - list_empty(&dev_priv->bsd_ring.active_list) && - list_empty(&dev_priv->blt_ring.active_list)); - BUG_ON(!lists_empty); - - return 0; + return i915_gem_evict_inactive(dev, purgeable_only); } /** Unbinds all inactive objects. */ int -i915_gem_evict_inactive(struct drm_device *dev) +i915_gem_evict_inactive(struct drm_device *dev, bool purgeable_only) { drm_i915_private_t *dev_priv = dev->dev_private; - - while (!list_empty(&dev_priv->mm.inactive_list)) { - struct drm_gem_object *obj; - int ret; - - obj = &list_first_entry(&dev_priv->mm.inactive_list, - struct drm_i915_gem_object, - mm_list)->base; - - ret = i915_gem_object_unbind(obj); - if (ret != 0) { - DRM_ERROR("Error unbinding object: %d\n", ret); - return ret; + struct drm_i915_gem_object *obj, *next; + + list_for_each_entry_safe(obj, next, + &dev_priv->mm.inactive_list, mm_list) { + if (!purgeable_only || obj->madv != I915_MADV_WILLNEED) { + int ret = i915_gem_object_unbind(&obj->base); + if (ret) + return ret; } } -- cgit v1.2.3 From 100519e2f1c20286158746f92f27c3aa14f5a893 Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Sun, 31 Oct 2010 10:37:02 +0000 Subject: agp/intel: the GMCH is always enabled for integrated processor graphics ... and trying to set the bit is ineffectual. Fixes the regression from e380f60 which detected that we were trying to do undefined operations on the I830_GMCH_CTRL. Reported-by: Alexey Fisher Signed-off-by: Chris Wilson --- drivers/char/agp/intel-gtt.c | 43 +++++++++++++++++++++++++++---------------- 1 file changed, 27 insertions(+), 16 deletions(-) diff --git a/drivers/char/agp/intel-gtt.c b/drivers/char/agp/intel-gtt.c index 9c86dac41da7..f800e9cfc368 100644 --- a/drivers/char/agp/intel-gtt.c +++ b/drivers/char/agp/intel-gtt.c @@ -73,6 +73,7 @@ struct intel_gtt_driver { unsigned int is_g33 : 1; unsigned int is_pineview : 1; unsigned int is_ironlake : 1; + unsigned int has_pgtbl_enable : 1; unsigned int dma_mask_size : 8; /* Chipset specific GTT setup */ int (*setup)(void); @@ -113,6 +114,7 @@ static struct _intel_private { #define IS_G33 intel_private.driver->is_g33 #define IS_PINEVIEW intel_private.driver->is_pineview #define IS_IRONLAKE intel_private.driver->is_ironlake +#define HAS_PGTBL_EN intel_private.driver->has_pgtbl_enable static void intel_agp_free_sglist(struct agp_memory *mem) { @@ -803,6 +805,9 @@ static int intel_gtt_init(void) intel_private.PGETBL_save = readl(intel_private.registers+I810_PGETBL_CTL) & ~I810_PGETBL_ENABLED; + /* we only ever restore the register when enabling the PGTBL... */ + if (HAS_PGTBL_EN) + intel_private.PGETBL_save |= I810_PGETBL_ENABLED; dev_info(&intel_private.bridge_dev->dev, "detected gtt size: %dK total, %dK mappable\n", @@ -925,7 +930,6 @@ static void i830_write_entry(dma_addr_t addr, unsigned int entry, static bool intel_enable_gtt(void) { u32 gma_addr; - u16 gmch_ctrl; u8 __iomem *reg; if (INTEL_GTT_GEN == 2) @@ -940,26 +944,30 @@ static bool intel_enable_gtt(void) if (INTEL_GTT_GEN >= 6) return true; - pci_read_config_word(intel_private.bridge_dev, - I830_GMCH_CTRL, &gmch_ctrl); - gmch_ctrl |= I830_GMCH_ENABLED; - pci_write_config_word(intel_private.bridge_dev, - I830_GMCH_CTRL, gmch_ctrl); + if (INTEL_GTT_GEN == 2) { + u16 gmch_ctrl; - pci_read_config_word(intel_private.bridge_dev, - I830_GMCH_CTRL, &gmch_ctrl); - if ((gmch_ctrl & I830_GMCH_ENABLED) == 0) { - dev_err(&intel_private.pcidev->dev, - "failed to enable the GTT: GMCH_CTRL=%x\n", - gmch_ctrl); - return false; + pci_read_config_word(intel_private.bridge_dev, + I830_GMCH_CTRL, &gmch_ctrl); + gmch_ctrl |= I830_GMCH_ENABLED; + pci_write_config_word(intel_private.bridge_dev, + I830_GMCH_CTRL, gmch_ctrl); + + pci_read_config_word(intel_private.bridge_dev, + I830_GMCH_CTRL, &gmch_ctrl); + if ((gmch_ctrl & I830_GMCH_ENABLED) == 0) { + dev_err(&intel_private.pcidev->dev, + "failed to enable the GTT: GMCH_CTRL=%x\n", + gmch_ctrl); + return false; + } } reg = intel_private.registers+I810_PGETBL_CTL; - writel(intel_private.PGETBL_save|I810_PGETBL_ENABLED, reg); - if ((readl(reg) & I810_PGETBL_ENABLED) == 0) { + writel(intel_private.PGETBL_save, reg); + if (HAS_PGTBL_EN && (readl(reg) & I810_PGETBL_ENABLED) == 0) { dev_err(&intel_private.pcidev->dev, - "failed to enable the GTT: PGETBL=%x [expected %x|1]\n", + "failed to enable the GTT: PGETBL=%x [expected %x]\n", readl(reg), intel_private.PGETBL_save); return false; } @@ -1395,6 +1403,7 @@ static const struct intel_gtt_driver i81x_gtt_driver = { }; static const struct intel_gtt_driver i8xx_gtt_driver = { .gen = 2, + .has_pgtbl_enable = 1, .setup = i830_setup, .cleanup = i830_cleanup, .write_entry = i830_write_entry, @@ -1404,6 +1413,7 @@ static const struct intel_gtt_driver i8xx_gtt_driver = { }; static const struct intel_gtt_driver i915_gtt_driver = { .gen = 3, + .has_pgtbl_enable = 1, .setup = i9xx_setup, .cleanup = i9xx_cleanup, /* i945 is the last gpu to need phys mem (for overlay and cursors). */ @@ -1434,6 +1444,7 @@ static const struct intel_gtt_driver pineview_gtt_driver = { }; static const struct intel_gtt_driver i965_gtt_driver = { .gen = 4, + .has_pgtbl_enable = 1, .setup = i9xx_setup, .cleanup = i9xx_cleanup, .write_entry = i965_write_entry, -- cgit v1.2.3 From e5c652603680404683fd1f262b511340545179a2 Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Mon, 1 Nov 2010 11:35:28 +0000 Subject: drm/i915/debugfs: Report ring in error state Signed-off-by: Chris Wilson --- drivers/gpu/drm/i915/i915_debugfs.c | 62 ++++++++++++++++++++----------------- drivers/gpu/drm/i915/i915_drv.h | 1 + drivers/gpu/drm/i915/i915_irq.c | 1 + 3 files changed, 36 insertions(+), 28 deletions(-) diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c index 56f7ced16f1a..9cb6061bf9d5 100644 --- a/drivers/gpu/drm/i915/i915_debugfs.c +++ b/drivers/gpu/drm/i915/i915_debugfs.c @@ -32,6 +32,7 @@ #include "drmP.h" #include "drm.h" #include "intel_drv.h" +#include "intel_ringbuffer.h" #include "i915_drm.h" #include "i915_drv.h" @@ -48,12 +49,6 @@ enum { DEFERRED_FREE_LIST, }; -enum { - RENDER_RING, - BSD_RING, - BLT_RING, -}; - static const char *yesno(int v) { return v ? "yes" : "no"; @@ -450,9 +445,9 @@ static int i915_hws_info(struct seq_file *m, void *data) int i; switch ((uintptr_t)node->info_ent->data) { - case RENDER_RING: ring = &dev_priv->render_ring; break; - case BSD_RING: ring = &dev_priv->bsd_ring; break; - case BLT_RING: ring = &dev_priv->blt_ring; break; + case RING_RENDER: ring = &dev_priv->render_ring; break; + case RING_BSD: ring = &dev_priv->bsd_ring; break; + case RING_BLT: ring = &dev_priv->blt_ring; break; default: return -EINVAL; } @@ -520,9 +515,9 @@ static int i915_ringbuffer_data(struct seq_file *m, void *data) int ret; switch ((uintptr_t)node->info_ent->data) { - case RENDER_RING: ring = &dev_priv->render_ring; break; - case BSD_RING: ring = &dev_priv->bsd_ring; break; - case BLT_RING: ring = &dev_priv->blt_ring; break; + case RING_RENDER: ring = &dev_priv->render_ring; break; + case RING_BSD: ring = &dev_priv->bsd_ring; break; + case RING_BLT: ring = &dev_priv->blt_ring; break; default: return -EINVAL; } @@ -554,9 +549,9 @@ static int i915_ringbuffer_info(struct seq_file *m, void *data) struct intel_ring_buffer *ring; switch ((uintptr_t)node->info_ent->data) { - case RENDER_RING: ring = &dev_priv->render_ring; break; - case BSD_RING: ring = &dev_priv->bsd_ring; break; - case BLT_RING: ring = &dev_priv->blt_ring; break; + case RING_RENDER: ring = &dev_priv->render_ring; break; + case RING_BSD: ring = &dev_priv->bsd_ring; break; + case RING_BLT: ring = &dev_priv->blt_ring; break; default: return -EINVAL; } @@ -574,6 +569,16 @@ static int i915_ringbuffer_info(struct seq_file *m, void *data) return 0; } +static const char *ring_str(int ring) +{ + switch (ring) { + case RING_RENDER: return "render"; + case RING_BSD: return "bsd"; + case RING_BLT: return "blt"; + default: return ""; + } +} + static const char *pin_flag(int pinned) { if (pinned > 0) @@ -630,14 +635,14 @@ static int i915_error_state(struct seq_file *m, void *unused) seq_printf(m, "ERROR: 0x%08x\n", error->error); seq_printf(m, "Blitter command stream:\n"); seq_printf(m, " ACTHD: 0x%08x\n", error->bcs_acthd); - seq_printf(m, " IPEHR: 0x%08x\n", error->bcs_ipehr); seq_printf(m, " IPEIR: 0x%08x\n", error->bcs_ipeir); + seq_printf(m, " IPEHR: 0x%08x\n", error->bcs_ipehr); seq_printf(m, " INSTDONE: 0x%08x\n", error->bcs_instdone); seq_printf(m, " seqno: 0x%08x\n", error->bcs_seqno); seq_printf(m, "Video (BSD) command stream:\n"); seq_printf(m, " ACTHD: 0x%08x\n", error->vcs_acthd); - seq_printf(m, " IPEHR: 0x%08x\n", error->vcs_ipehr); seq_printf(m, " IPEIR: 0x%08x\n", error->vcs_ipeir); + seq_printf(m, " IPEHR: 0x%08x\n", error->vcs_ipehr); seq_printf(m, " INSTDONE: 0x%08x\n", error->vcs_instdone); seq_printf(m, " seqno: 0x%08x\n", error->vcs_seqno); } @@ -657,7 +662,7 @@ static int i915_error_state(struct seq_file *m, void *unused) seq_printf(m, "Buffers [%d]:\n", error->active_bo_count); for (i = 0; i < error->active_bo_count; i++) { - seq_printf(m, " %08x %8zd %08x %08x %08x%s%s%s%s", + seq_printf(m, " %08x %8zd %08x %08x %08x%s%s%s%s %s", error->active_bo[i].gtt_offset, error->active_bo[i].size, error->active_bo[i].read_domains, @@ -666,7 +671,8 @@ static int i915_error_state(struct seq_file *m, void *unused) pin_flag(error->active_bo[i].pinned), tiling_flag(error->active_bo[i].tiling), dirty_flag(error->active_bo[i].dirty), - purgeable_flag(error->active_bo[i].purgeable)); + purgeable_flag(error->active_bo[i].purgeable), + ring_str(error->active_bo[i].ring)); if (error->active_bo[i].name) seq_printf(m, " (name: %d)", error->active_bo[i].name); @@ -1101,15 +1107,15 @@ static struct drm_info_list i915_debugfs_list[] = { {"i915_gem_seqno", i915_gem_seqno_info, 0}, {"i915_gem_fence_regs", i915_gem_fence_regs_info, 0}, {"i915_gem_interrupt", i915_interrupt_info, 0}, - {"i915_gem_hws", i915_hws_info, 0, (void *)RENDER_RING}, - {"i915_gem_hws_blt", i915_hws_info, 0, (void *)BLT_RING}, - {"i915_gem_hws_bsd", i915_hws_info, 0, (void *)BSD_RING}, - {"i915_ringbuffer_data", i915_ringbuffer_data, 0, (void *)RENDER_RING}, - {"i915_ringbuffer_info", i915_ringbuffer_info, 0, (void *)RENDER_RING}, - {"i915_bsd_ringbuffer_data", i915_ringbuffer_data, 0, (void *)BSD_RING}, - {"i915_bsd_ringbuffer_info", i915_ringbuffer_info, 0, (void *)BSD_RING}, - {"i915_blt_ringbuffer_data", i915_ringbuffer_data, 0, (void *)BLT_RING}, - {"i915_blt_ringbuffer_info", i915_ringbuffer_info, 0, (void *)BLT_RING}, + {"i915_gem_hws", i915_hws_info, 0, (void *)RING_RENDER}, + {"i915_gem_hws_blt", i915_hws_info, 0, (void *)RING_BLT}, + {"i915_gem_hws_bsd", i915_hws_info, 0, (void *)RING_BSD}, + {"i915_ringbuffer_data", i915_ringbuffer_data, 0, (void *)RING_RENDER}, + {"i915_ringbuffer_info", i915_ringbuffer_info, 0, (void *)RING_RENDER}, + {"i915_bsd_ringbuffer_data", i915_ringbuffer_data, 0, (void *)RING_BSD}, + {"i915_bsd_ringbuffer_info", i915_ringbuffer_info, 0, (void *)RING_BSD}, + {"i915_blt_ringbuffer_data", i915_ringbuffer_data, 0, (void *)RING_BLT}, + {"i915_blt_ringbuffer_info", i915_ringbuffer_info, 0, (void *)RING_BLT}, {"i915_batchbuffers", i915_batchbuffer_info, 0}, {"i915_error_state", i915_error_state, 0}, {"i915_rstdby_delays", i915_rstdby_delays, 0}, diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index 54e5b2fa5fd4..3f8786049cb6 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h @@ -182,6 +182,7 @@ struct drm_i915_error_state { u32 tiling:2; u32 dirty:1; u32 purgeable:1; + u32 ring:4; } *active_bo; u32 active_bo_count; struct intel_overlay_error_state *overlay; diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c index 90c071d37748..3ec631f1129d 100644 --- a/drivers/gpu/drm/i915/i915_irq.c +++ b/drivers/gpu/drm/i915/i915_irq.c @@ -728,6 +728,7 @@ static void i915_capture_error_state(struct drm_device *dev) error->active_bo[i].tiling = obj_priv->tiling_mode; error->active_bo[i].dirty = obj_priv->dirty; error->active_bo[i].purgeable = obj_priv->madv != I915_MADV_WILLNEED; + error->active_bo[i].ring = obj_priv->ring->id; if (++i == count) break; -- cgit v1.2.3 From 13b2928933919c5344716d49620a52493a243f8c Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Mon, 1 Nov 2010 12:22:48 +0000 Subject: drm/i915: Apply big hammer to serialise buffer access between rings Signed-off-by: Chris Wilson --- drivers/gpu/drm/i915/i915_gem.c | 75 ++++++++++++++++++++++++++++------------- 1 file changed, 52 insertions(+), 23 deletions(-) diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index c8e516d3f8bc..c797d2b9b233 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c @@ -3189,7 +3189,8 @@ i915_gem_object_set_to_gpu_domain(struct drm_gem_object *obj, * write domain */ if (obj->write_domain && - obj->write_domain != obj->pending_read_domains) { + (obj->write_domain != obj->pending_read_domains || + obj_priv->ring != ring)) { flush_domains |= obj->write_domain; invalidate_domains |= obj->pending_read_domains & ~obj->write_domain; @@ -3582,6 +3583,52 @@ i915_gem_execbuffer_pin(struct drm_device *dev, } while (1); } +static int +i915_gem_execbuffer_move_to_gpu(struct drm_device *dev, + struct drm_file *file, + struct intel_ring_buffer *ring, + struct drm_gem_object **objects, + int count) +{ + struct drm_i915_private *dev_priv = dev->dev_private; + int ret, i; + + /* Zero the global flush/invalidate flags. These + * will be modified as new domains are computed + * for each object + */ + dev->invalidate_domains = 0; + dev->flush_domains = 0; + dev_priv->mm.flush_rings = 0; + for (i = 0; i < count; i++) + i915_gem_object_set_to_gpu_domain(objects[i], ring); + + if (dev->invalidate_domains | dev->flush_domains) { +#if WATCH_EXEC + DRM_INFO("%s: invalidate_domains %08x flush_domains %08x\n", + __func__, + dev->invalidate_domains, + dev->flush_domains); +#endif + i915_gem_flush(dev, file, + dev->invalidate_domains, + dev->flush_domains, + dev_priv->mm.flush_rings); + } + + for (i = 0; i < count; i++) { + struct drm_i915_gem_object *obj = to_intel_bo(objects[i]); + /* XXX replace with semaphores */ + if (obj->ring && ring != obj->ring) { + ret = i915_gem_object_wait_rendering(&obj->base, true); + if (ret) + return ret; + } + } + + return 0; +} + /* Throttle our rendering by waiting until the ring has completed our requests * emitted over 20 msec ago. * @@ -3843,28 +3890,10 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data, goto err; } - /* Zero the global flush/invalidate flags. These - * will be modified as new domains are computed - * for each object - */ - dev->invalidate_domains = 0; - dev->flush_domains = 0; - dev_priv->mm.flush_rings = 0; - for (i = 0; i < args->buffer_count; i++) - i915_gem_object_set_to_gpu_domain(object_list[i], ring); - - if (dev->invalidate_domains | dev->flush_domains) { -#if WATCH_EXEC - DRM_INFO("%s: invalidate_domains %08x flush_domains %08x\n", - __func__, - dev->invalidate_domains, - dev->flush_domains); -#endif - i915_gem_flush(dev, file, - dev->invalidate_domains, - dev->flush_domains, - dev_priv->mm.flush_rings); - } + ret = i915_gem_execbuffer_move_to_gpu(dev, file, ring, + object_list, args->buffer_count); + if (ret) + goto err; #if WATCH_COHERENCY for (i = 0; i < args->buffer_count; i++) { -- cgit v1.2.3 From 0f8c6d7ca9257d6a01671ab69b897860d3ae9bc0 Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Mon, 1 Nov 2010 12:38:44 +0000 Subject: drm/i915: Move the invalidate|flush information out of the device struct ... and into a local structure scoped for the single function in which it is used. Signed-off-by: Chris Wilson --- drivers/gpu/drm/i915/i915_drv.h | 2 -- drivers/gpu/drm/i915/i915_gem.c | 45 +++++++++++++++++++++-------------------- include/drm/drmP.h | 2 -- 3 files changed, 23 insertions(+), 26 deletions(-) diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index 3f8786049cb6..a0063f82fa33 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h @@ -642,8 +642,6 @@ typedef struct drm_i915_private { /* storage for physical objects */ struct drm_i915_gem_phys_object *phys_objs[I915_MAX_PHYS_OBJECT]; - uint32_t flush_rings; - /* accounting, useful for userland debugging */ size_t object_memory; size_t pin_memory; diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index c797d2b9b233..ffea847c8a0d 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c @@ -36,6 +36,12 @@ #include #include +struct change_domains { + uint32_t invalidate_domains; + uint32_t flush_domains; + uint32_t flush_rings; +}; + static uint32_t i915_gem_get_gtt_alignment(struct drm_i915_gem_object *obj_priv); static uint32_t i915_gem_get_gtt_size(struct drm_i915_gem_object *obj_priv); @@ -3167,10 +3173,9 @@ i915_gem_object_set_to_cpu_domain(struct drm_gem_object *obj, int write) */ static void i915_gem_object_set_to_gpu_domain(struct drm_gem_object *obj, - struct intel_ring_buffer *ring) + struct intel_ring_buffer *ring, + struct change_domains *cd) { - struct drm_device *dev = obj->dev; - struct drm_i915_private *dev_priv = dev->dev_private; struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); uint32_t invalidate_domains = 0; uint32_t flush_domains = 0; @@ -3216,12 +3221,12 @@ i915_gem_object_set_to_gpu_domain(struct drm_gem_object *obj, if (flush_domains == 0 && obj->pending_write_domain == 0) obj->pending_write_domain = obj->write_domain; - dev->invalidate_domains |= invalidate_domains; - dev->flush_domains |= flush_domains; + cd->invalidate_domains |= invalidate_domains; + cd->flush_domains |= flush_domains; if (flush_domains & I915_GEM_GPU_DOMAINS) - dev_priv->mm.flush_rings |= obj_priv->ring->id; + cd->flush_rings |= obj_priv->ring->id; if (invalidate_domains & I915_GEM_GPU_DOMAINS) - dev_priv->mm.flush_rings |= ring->id; + cd->flush_rings |= ring->id; } /** @@ -3590,30 +3595,26 @@ i915_gem_execbuffer_move_to_gpu(struct drm_device *dev, struct drm_gem_object **objects, int count) { - struct drm_i915_private *dev_priv = dev->dev_private; + struct change_domains cd; int ret, i; - /* Zero the global flush/invalidate flags. These - * will be modified as new domains are computed - * for each object - */ - dev->invalidate_domains = 0; - dev->flush_domains = 0; - dev_priv->mm.flush_rings = 0; + cd.invalidate_domains = 0; + cd.flush_domains = 0; + cd.flush_rings = 0; for (i = 0; i < count; i++) - i915_gem_object_set_to_gpu_domain(objects[i], ring); + i915_gem_object_set_to_gpu_domain(objects[i], ring, &cd); - if (dev->invalidate_domains | dev->flush_domains) { + if (cd.invalidate_domains | cd.flush_domains) { #if WATCH_EXEC DRM_INFO("%s: invalidate_domains %08x flush_domains %08x\n", __func__, - dev->invalidate_domains, - dev->flush_domains); + cd.invalidate_domains, + cd.flush_domains); #endif i915_gem_flush(dev, file, - dev->invalidate_domains, - dev->flush_domains, - dev_priv->mm.flush_rings); + cd.invalidate_domains, + cd.flush_domains, + cd.flush_rings); } for (i = 0; i < count; i++) { diff --git a/include/drm/drmP.h b/include/drm/drmP.h index 274eaaa15c36..d4bc0f5cab8f 100644 --- a/include/drm/drmP.h +++ b/include/drm/drmP.h @@ -1041,8 +1041,6 @@ struct drm_device { /*@{ */ spinlock_t object_name_lock; struct idr object_name_idr; - uint32_t invalidate_domains; /* domains pending invalidation */ - uint32_t flush_domains; /* domains pending flush */ /*@} */ }; -- cgit v1.2.3 From 328fc1325f144027f4a8269b11e9f8dcf1edcb97 Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Tue, 2 Nov 2010 10:00:34 +0000 Subject: Revert "drm/i915: add MMIO debug output" We can use mmiotrace instead of our own debug printks. This reverts commit be282fd48e7492812402a22d73a348c44bf95b63. Conflicts: drivers/gpu/drm/i915/i915_drv.h --- drivers/gpu/drm/i915/i915_drv.h | 30 ++---------------------------- 1 file changed, 2 insertions(+), 28 deletions(-) diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index a0063f82fa33..81ee20c2d24e 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h @@ -287,9 +287,6 @@ typedef struct drm_i915_private { int front_offset; int current_page; int page_flipping; -#define I915_DEBUG_READ (1<<0) -#define I915_DEBUG_WRITE (1<<1) - unsigned long debug_flags; wait_queue_head_t irq_queue; atomic_t irq_received; @@ -1172,26 +1169,8 @@ extern void intel_overlay_print_error_state(struct seq_file *m, struct intel_ove LOCK_TEST_WITH_RETURN(dev, file_priv); \ } while (0) -static inline u32 i915_read(struct drm_i915_private *dev_priv, u32 reg) -{ - u32 val; - - val = readl(dev_priv->regs + reg); - if (dev_priv->debug_flags & I915_DEBUG_READ) - printk(KERN_ERR "read 0x%08x from 0x%08x\n", val, reg); - return val; -} - -static inline void i915_write(struct drm_i915_private *dev_priv, u32 reg, - u32 val) -{ - writel(val, dev_priv->regs + reg); - if (dev_priv->debug_flags & I915_DEBUG_WRITE) - printk(KERN_ERR "wrote 0x%08x to 0x%08x\n", val, reg); -} - -#define I915_READ(reg) i915_read(dev_priv, (reg)) -#define I915_WRITE(reg, val) i915_write(dev_priv, (reg), (val)) +#define I915_READ(reg) readl(dev_priv->regs + (reg)) +#define I915_WRITE(reg, val) writel(val, dev_priv->regs + (reg)) #define I915_READ16(reg) readw(dev_priv->regs + (reg)) #define I915_WRITE16(reg, val) writel(val, dev_priv->regs + (reg)) #define I915_READ8(reg) readb(dev_priv->regs + (reg)) @@ -1201,11 +1180,6 @@ static inline void i915_write(struct drm_i915_private *dev_priv, u32 reg, #define POSTING_READ(reg) (void)I915_READ(reg) #define POSTING_READ16(reg) (void)I915_READ16(reg) -#define I915_DEBUG_ENABLE_IO() (dev_priv->debug_flags |= I915_DEBUG_READ | \ - I915_DEBUG_WRITE) -#define I915_DEBUG_DISABLE_IO() (dev_priv->debug_flags &= ~(I915_DEBUG_READ | \ - I915_DEBUG_WRITE)) - #define BEGIN_LP_RING(n) \ intel_ring_begin(&dev_priv->render_ring, (n)) -- cgit v1.2.3 From d110852513148a7ec44fad4e036455aeb816d713 Mon Sep 17 00:00:00 2001 From: Zhenyu Wang Date: Tue, 2 Nov 2010 17:30:46 +0800 Subject: agp/intel: fix cache control for sandybridge This is broken from 97ef1bdd0bc75bce7b2058e9c432b6c277dcf4d3. Let's set the correct bit for LLC+MLC and LLC only. Signed-off-by: Zhenyu Wang Signed-off-by: Chris Wilson --- drivers/char/agp/intel-gtt.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/char/agp/intel-gtt.c b/drivers/char/agp/intel-gtt.c index f800e9cfc368..c51efe6f2c7f 100644 --- a/drivers/char/agp/intel-gtt.c +++ b/drivers/char/agp/intel-gtt.c @@ -1291,11 +1291,11 @@ static void gen6_write_entry(dma_addr_t addr, unsigned int entry, if (type_mask == AGP_USER_UNCACHED_MEMORY) pte_flags = GEN6_PTE_UNCACHED | I810_PTE_VALID; else if (type_mask == AGP_USER_CACHED_MEMORY_LLC_MLC) { - pte_flags = GEN6_PTE_LLC | I810_PTE_VALID; + pte_flags = GEN6_PTE_LLC_MLC | I810_PTE_VALID; if (gfdt) pte_flags |= GEN6_PTE_GFDT; } else { /* set 'normal'/'cached' to LLC by default */ - pte_flags = GEN6_PTE_LLC_MLC | I810_PTE_VALID; + pte_flags = GEN6_PTE_LLC | I810_PTE_VALID; if (gfdt) pte_flags |= GEN6_PTE_GFDT; } -- cgit v1.2.3 From 897ef192514a6b0fc10a0ce3fe7e7aa0de09bc52 Mon Sep 17 00:00:00 2001 From: Zhenyu Wang Date: Tue, 2 Nov 2010 17:30:47 +0800 Subject: agp/intel: restore cache behavior on sandybridge This restores cache behavior for default AGP_USER_MEMORY as uncached, and leave default AGP_USER_CACHED_MEMORY as LLC only. I've seen different cache behavior on one sandybridge desktop CPU vs. another mobile CPU. Until we figure out how to detect the real cache config, restore back to the original behavior now. Signed-off-by: Zhenyu Wang Signed-off-by: Chris Wilson --- drivers/char/agp/intel-gtt.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/char/agp/intel-gtt.c b/drivers/char/agp/intel-gtt.c index c51efe6f2c7f..fc1637c32cb1 100644 --- a/drivers/char/agp/intel-gtt.c +++ b/drivers/char/agp/intel-gtt.c @@ -1288,7 +1288,7 @@ static void gen6_write_entry(dma_addr_t addr, unsigned int entry, unsigned int gfdt = flags & AGP_USER_CACHED_MEMORY_GFDT; u32 pte_flags; - if (type_mask == AGP_USER_UNCACHED_MEMORY) + if (type_mask == AGP_USER_MEMORY) pte_flags = GEN6_PTE_UNCACHED | I810_PTE_VALID; else if (type_mask == AGP_USER_CACHED_MEMORY_LLC_MLC) { pte_flags = GEN6_PTE_LLC_MLC | I810_PTE_VALID; -- cgit v1.2.3 From 8d19215be8254f4f75e9c5a0d28345947b0382db Mon Sep 17 00:00:00 2001 From: Zou Nan hai Date: Tue, 2 Nov 2010 16:31:01 +0800 Subject: drm/i915: SNB BLT workaround On some stepping of SNB cpu, the first command to be parsed in BLT command streamer should be MI_BATCHBUFFER_START otherwise the GPU may hang. Signed-off-by: Zou Nan hai [ickle: rebased for -next] Signed-off-by: Chris Wilson --- drivers/gpu/drm/i915/intel_ringbuffer.c | 123 +++++++++++++++++++++++++++++++- drivers/gpu/drm/i915/intel_ringbuffer.h | 3 + 2 files changed, 123 insertions(+), 3 deletions(-) diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c index 390aa21edbe4..a0702b6fb631 100644 --- a/drivers/gpu/drm/i915/intel_ringbuffer.c +++ b/drivers/gpu/drm/i915/intel_ringbuffer.c @@ -658,6 +658,9 @@ void intel_cleanup_ring_buffer(struct intel_ring_buffer *ring) drm_gem_object_unreference(ring->gem_object); ring->gem_object = NULL; + if (ring->cleanup) + ring->cleanup(ring); + cleanup_status_page(ring); } @@ -877,19 +880,133 @@ blt_ring_put_user_irq(struct intel_ring_buffer *ring) /* do nothing */ } + +/* Workaround for some stepping of SNB, + * each time when BLT engine ring tail moved, + * the first command in the ring to be parsed + * should be MI_BATCH_BUFFER_START + */ +#define NEED_BLT_WORKAROUND(dev) \ + (IS_GEN6(dev) && (dev->pdev->revision < 8)) + +static inline struct drm_i915_gem_object * +to_blt_workaround(struct intel_ring_buffer *ring) +{ + return ring->private; +} + +static int blt_ring_init(struct intel_ring_buffer *ring) +{ + if (NEED_BLT_WORKAROUND(ring->dev)) { + struct drm_i915_gem_object *obj; + u32 __iomem *ptr; + int ret; + + obj = to_intel_bo(i915_gem_alloc_object(ring->dev, 4096)); + if (obj == NULL) + return -ENOMEM; + + ret = i915_gem_object_pin(&obj->base, 4096, true, false); + if (ret) { + drm_gem_object_unreference(&obj->base); + return ret; + } + + ptr = kmap(obj->pages[0]); + iowrite32(MI_BATCH_BUFFER_END, ptr); + iowrite32(MI_NOOP, ptr+1); + kunmap(obj->pages[0]); + + ret = i915_gem_object_set_to_gtt_domain(&obj->base, false); + if (ret) { + i915_gem_object_unpin(&obj->base); + drm_gem_object_unreference(&obj->base); + return ret; + } + + ring->private = obj; + } + + return init_ring_common(ring); +} + +static int blt_ring_begin(struct intel_ring_buffer *ring, + int num_dwords) +{ + if (ring->private) { + int ret = intel_ring_begin(ring, num_dwords+2); + if (ret) + return ret; + + intel_ring_emit(ring, MI_BATCH_BUFFER_START); + intel_ring_emit(ring, to_blt_workaround(ring)->gtt_offset); + + return 0; + } else + return intel_ring_begin(ring, 4); +} + +static void blt_ring_flush(struct intel_ring_buffer *ring, + u32 invalidate_domains, + u32 flush_domains) +{ + if (blt_ring_begin(ring, 4) == 0) { + intel_ring_emit(ring, MI_FLUSH_DW); + intel_ring_emit(ring, 0); + intel_ring_emit(ring, 0); + intel_ring_emit(ring, 0); + intel_ring_advance(ring); + } +} + +static int +blt_ring_add_request(struct intel_ring_buffer *ring, + u32 *result) +{ + u32 seqno; + int ret; + + ret = blt_ring_begin(ring, 4); + if (ret) + return ret; + + seqno = i915_gem_get_seqno(ring->dev); + + intel_ring_emit(ring, MI_STORE_DWORD_INDEX); + intel_ring_emit(ring, I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT); + intel_ring_emit(ring, seqno); + intel_ring_emit(ring, MI_USER_INTERRUPT); + intel_ring_advance(ring); + + DRM_DEBUG_DRIVER("%s %d\n", ring->name, seqno); + *result = seqno; + return 0; +} + +static void blt_ring_cleanup(struct intel_ring_buffer *ring) +{ + if (!ring->private) + return; + + i915_gem_object_unpin(ring->private); + drm_gem_object_unreference(ring->private); + ring->private = NULL; +} + static const struct intel_ring_buffer gen6_blt_ring = { .name = "blt ring", .id = RING_BLT, .mmio_base = BLT_RING_BASE, .size = 32 * PAGE_SIZE, - .init = init_ring_common, + .init = blt_ring_init, .write_tail = ring_write_tail, - .flush = gen6_ring_flush, - .add_request = ring_add_request, + .flush = blt_ring_flush, + .add_request = blt_ring_add_request, .get_seqno = ring_status_page_get_seqno, .user_irq_get = blt_ring_get_user_irq, .user_irq_put = blt_ring_put_user_irq, .dispatch_execbuffer = gen6_ring_dispatch_execbuffer, + .cleanup = blt_ring_cleanup, }; int intel_init_render_ring_buffer(struct drm_device *dev) diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.h b/drivers/gpu/drm/i915/intel_ringbuffer.h index acd23374fe89..68043f1a186e 100644 --- a/drivers/gpu/drm/i915/intel_ringbuffer.h +++ b/drivers/gpu/drm/i915/intel_ringbuffer.h @@ -55,6 +55,7 @@ struct intel_ring_buffer { struct drm_i915_gem_execbuffer2 *exec, struct drm_clip_rect *cliprects, uint64_t exec_offset); + void (*cleanup)(struct intel_ring_buffer *ring); /** * List of objects currently involved in rendering from the @@ -90,6 +91,8 @@ struct intel_ring_buffer { wait_queue_head_t irq_queue; drm_local_map_t map; + + void *private; }; static inline u32 -- cgit v1.2.3 From 27153f72d04bcd83b3a66e219418a21d6269553b Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Tue, 2 Nov 2010 11:17:23 +0000 Subject: drm/i915: Drop the iomem accessors when writing to the kmapped blt batch I presumed that we would be writing to the batch through the GTT having bound it, so I converted it to use iomem. Even later as I spotted that we didn't even move the batch to the GTT (now an issue since we default to uncached memory on SNB) I still didn't realise that using iomem for kmapped memory was incorrect. Fix it. Signed-off-by: Chris Wilson --- drivers/gpu/drm/i915/intel_ringbuffer.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c index a0702b6fb631..85071570e1f9 100644 --- a/drivers/gpu/drm/i915/intel_ringbuffer.c +++ b/drivers/gpu/drm/i915/intel_ringbuffer.c @@ -899,7 +899,7 @@ static int blt_ring_init(struct intel_ring_buffer *ring) { if (NEED_BLT_WORKAROUND(ring->dev)) { struct drm_i915_gem_object *obj; - u32 __iomem *ptr; + u32 *ptr; int ret; obj = to_intel_bo(i915_gem_alloc_object(ring->dev, 4096)); @@ -913,8 +913,8 @@ static int blt_ring_init(struct intel_ring_buffer *ring) } ptr = kmap(obj->pages[0]); - iowrite32(MI_BATCH_BUFFER_END, ptr); - iowrite32(MI_NOOP, ptr+1); + *ptr++ = MI_BATCH_BUFFER_END; + *ptr++ = MI_NOOP; kunmap(obj->pages[0]); ret = i915_gem_object_set_to_gtt_domain(&obj->base, false); -- cgit v1.2.3 From 085ce2643713830cf772c12c1a16da9d0ba83f36 Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Wed, 3 Nov 2010 09:27:53 +0000 Subject: drm/i915: Ensure that if we ever try to pin+fence it is mappable. When merging Daniel's full-gtt patches I had a set of tweaks which I thought I had undone. I was half right... Bugzilla: https://bugs.freedesktop.org/show_bug.cgi?id=31286 Reported-by: jinjin.wang@intel.com Reported-by: Alexey Fisher Signed-off-by: Chris Wilson --- drivers/gpu/drm/i915/i915_gem.c | 1 + drivers/gpu/drm/i915/intel_display.c | 3 ++- 2 files changed, 3 insertions(+), 1 deletion(-) diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index 5bd86033a88e..12dae003c011 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c @@ -4151,6 +4151,7 @@ i915_gem_object_pin(struct drm_gem_object *obj, uint32_t alignment, int ret; BUG_ON(obj_priv->pin_count == DRM_I915_GEM_OBJECT_MAX_PIN_COUNT); + BUG_ON(need_fence && !mappable); WARN_ON(i915_verify_lists(dev)); if (obj_priv->gtt_space != NULL) { diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c index 33c178c8d6dd..a33d9a2c7429 100644 --- a/drivers/gpu/drm/i915/intel_display.c +++ b/drivers/gpu/drm/i915/intel_display.c @@ -1462,7 +1462,8 @@ intel_pin_and_fence_fb_obj(struct drm_device *dev, } ret = i915_gem_object_pin(obj, alignment, - !pipelined, obj_priv->tiling_mode); + !pipelined || obj_priv->tiling_mode, + obj_priv->tiling_mode); if (ret) return ret; -- cgit v1.2.3 From 818f2a3cc34b0673dccd4188ce4a1862d9d90127 Mon Sep 17 00:00:00 2001 From: Daniel Vetter Date: Thu, 4 Nov 2010 17:04:29 +0100 Subject: drm/i915: revert pageflip/mappable related abi breakage In a00b10c360b35d6431a "Only enforce fence limits inside the GTT" Chris Wilson implemented an optimization to only pin framebuffers as mappable for crtc_set_base (but not for pageflips). This breaks the abi, eg: A double buffering mesa client might leave the last framebuffer in unmappable space on close. A subsequent glReadPix by a frontbuffer rendering client then goes boom. My pretty anal mappable/unmappable consistency checking detected this, see https://bugs.freedesktop.org/show_bug.cgi?id=31286 Chris Wilson tried to fix this in 085ce2643713830cf772c by pinning tiled framebuffers into mappable space. This a) renders the original optimization of not forcing framebuffers for pageflipping clients into mappable pointless because all our scanout buffers are tiled by default. b) doesn't solve the problem for untiled framebuffers. So kill this. Emperically it's no gain anyway because framebuffers are being reused by the ddx and hence there's no chance for them to get constanly bounced between mappable and unmappable. Signed-off-by: Daniel Vetter Signed-off-by: Chris Wilson --- drivers/gpu/drm/i915/intel_display.c | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c index f1e4eca44866..a2cd579eb9b2 100644 --- a/drivers/gpu/drm/i915/intel_display.c +++ b/drivers/gpu/drm/i915/intel_display.c @@ -1461,8 +1461,7 @@ intel_pin_and_fence_fb_obj(struct drm_device *dev, BUG(); } - ret = i915_gem_object_pin(obj, alignment, - !pipelined || obj_priv->tiling_mode, + ret = i915_gem_object_pin(obj, alignment, true, obj_priv->tiling_mode); if (ret) return ret; -- cgit v1.2.3 From 75e9e9158f38e5cb21eff23b30bafa6f32e0a606 Mon Sep 17 00:00:00 2001 From: Daniel Vetter Date: Thu, 4 Nov 2010 17:11:09 +0100 Subject: drm/i915: kill mappable/fenceable disdinction a00b10c360b35d6431a "Only enforce fence limits inside the GTT" also added a fenceable/mappable disdinction when binding/pinning buffers. This only complicates the code with no pratical gain: - In execbuffer this matters on for g33/pineview, as this is the only chip that needs fences and has an unmappable gtt area. But fences are only possible in the mappable part of the gtt, so need_fence implies need_mappable. And need_mappable is only set independantly with relocations which implies (for sane userspace) that the buffer is untiled. - The overlay code is only really used on i8xx, which doesn't have unmappable gtt. And it doesn't support tiled buffers, currently. - For all other buffers it's a bug to pass in a tiled bo. In short, this disdinction doesn't have any practical gain. I've also reverted mapping the overlay and context pages as possibly unmappable. It's not worth being overtly clever here, all the big gains from unmappable are for execbuf bos. Also add a comment for a clever optimization that confused me while reading the original patch by Chris Wilson. Signed-off-by: Daniel Vetter Signed-off-by: Chris Wilson --- drivers/gpu/drm/i915/i915_drv.h | 10 ++-- drivers/gpu/drm/i915/i915_gem.c | 84 ++++++++++++++++----------------- drivers/gpu/drm/i915/intel_display.c | 7 ++- drivers/gpu/drm/i915/intel_overlay.c | 4 +- drivers/gpu/drm/i915/intel_ringbuffer.c | 6 +-- 5 files changed, 56 insertions(+), 55 deletions(-) diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index 48d0aefec1f8..621234265454 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h @@ -752,8 +752,6 @@ struct drm_i915_gem_object { * Advice: are the backing pages purgeable? */ unsigned int madv : 2; - unsigned int fenceable : 1; - unsigned int mappable : 1; /** * Current tiling mode for the object. @@ -772,6 +770,12 @@ struct drm_i915_gem_object { unsigned int pin_count : 4; #define DRM_I915_GEM_OBJECT_MAX_PIN_COUNT 0xf + /** + * Is the object at the current location in the gtt mappable and + * fenceable? Used to avoid costly recalculations. + */ + unsigned int map_and_fenceable : 1; + /** * Whether the current gtt mapping needs to be mappable (and isn't just * mappable by accident). Track pin and fault separate for a more @@ -1013,7 +1017,7 @@ struct drm_gem_object * i915_gem_alloc_object(struct drm_device *dev, size_t size); void i915_gem_free_object(struct drm_gem_object *obj); int i915_gem_object_pin(struct drm_gem_object *obj, uint32_t alignment, - bool mappable, bool need_fence); + bool map_and_fenceable); void i915_gem_object_unpin(struct drm_gem_object *obj); int i915_gem_object_unbind(struct drm_gem_object *obj); void i915_gem_release_mmap(struct drm_gem_object *obj); diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index 12dae003c011..47c665eeaf17 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c @@ -59,8 +59,7 @@ static int i915_gem_object_wait_rendering(struct drm_gem_object *obj, bool interruptible); static int i915_gem_object_bind_to_gtt(struct drm_gem_object *obj, unsigned alignment, - bool mappable, - bool need_fence); + bool map_and_fenceable); static void i915_gem_clear_fence_reg(struct drm_gem_object *obj); static int i915_gem_phys_pwrite(struct drm_device *dev, struct drm_gem_object *obj, struct drm_i915_gem_pwrite *args, @@ -1074,7 +1073,7 @@ i915_gem_pwrite_ioctl(struct drm_device *dev, void *data, else if (obj_priv->tiling_mode == I915_TILING_NONE && obj_priv->gtt_space && obj->write_domain != I915_GEM_DOMAIN_CPU) { - ret = i915_gem_object_pin(obj, 0, true, false); + ret = i915_gem_object_pin(obj, 0, true); if (ret) goto out; @@ -1300,8 +1299,7 @@ int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf) BUG_ON(obj_priv->pin_count && !obj_priv->pin_mappable); if (obj_priv->gtt_space) { - if (!obj_priv->mappable || - (obj_priv->tiling_mode && !obj_priv->fenceable)) { + if (!obj_priv->map_and_fenceable) { ret = i915_gem_object_unbind(obj); if (ret) goto unlock; @@ -1309,8 +1307,7 @@ int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf) } if (!obj_priv->gtt_space) { - ret = i915_gem_object_bind_to_gtt(obj, 0, - true, obj_priv->tiling_mode); + ret = i915_gem_object_bind_to_gtt(obj, 0, true); if (ret) goto unlock; } @@ -2273,8 +2270,8 @@ i915_gem_object_unbind(struct drm_gem_object *obj) i915_gem_info_remove_gtt(dev_priv, obj_priv); list_del_init(&obj_priv->mm_list); - obj_priv->fenceable = true; - obj_priv->mappable = true; + /* Avoid an unnecessary call to unbind on rebind. */ + obj_priv->map_and_fenceable = true; drm_mm_put_block(obj_priv->gtt_space); obj_priv->gtt_space = NULL; @@ -2383,7 +2380,7 @@ static void i915_write_fence_reg(struct drm_gem_object *obj) if ((obj_priv->gtt_offset & ~I915_FENCE_START_MASK) || (obj_priv->gtt_offset & (size - 1))) { WARN(1, "%s: object 0x%08x [fenceable? %d] not 1M or size (0x%08x) aligned [gtt_space offset=%lx, size=%lx]\n", - __func__, obj_priv->gtt_offset, obj_priv->fenceable, size, + __func__, obj_priv->gtt_offset, obj_priv->map_and_fenceable, size, obj_priv->gtt_space->start, obj_priv->gtt_space->size); return; } @@ -2687,8 +2684,7 @@ i915_gem_object_put_fence_reg(struct drm_gem_object *obj, static int i915_gem_object_bind_to_gtt(struct drm_gem_object *obj, unsigned alignment, - bool mappable, - bool need_fence) + bool map_and_fenceable) { struct drm_device *dev = obj->dev; drm_i915_private_t *dev_priv = dev->dev_private; @@ -2696,6 +2692,7 @@ i915_gem_object_bind_to_gtt(struct drm_gem_object *obj, struct drm_mm_node *free_space; gfp_t gfpmask = __GFP_NORETRY | __GFP_NOWARN; u32 size, fence_size, fence_alignment; + bool mappable, fenceable; int ret; if (obj_priv->madv != I915_MADV_WILLNEED) { @@ -2707,25 +2704,25 @@ i915_gem_object_bind_to_gtt(struct drm_gem_object *obj, fence_alignment = i915_gem_get_gtt_alignment(obj_priv); if (alignment == 0) - alignment = need_fence ? fence_alignment : 4096; - if (need_fence && alignment & (fence_alignment - 1)) { + alignment = map_and_fenceable ? fence_alignment : 4096; + if (map_and_fenceable && alignment & (fence_alignment - 1)) { DRM_ERROR("Invalid object alignment requested %u\n", alignment); return -EINVAL; } - size = need_fence ? fence_size : obj->size; + size = map_and_fenceable ? fence_size : obj->size; /* If the object is bigger than the entire aperture, reject it early * before evicting everything in a vain attempt to find space. */ if (obj->size > - (mappable ? dev_priv->mm.gtt_mappable_end : dev_priv->mm.gtt_total)) { + (map_and_fenceable ? dev_priv->mm.gtt_mappable_end : dev_priv->mm.gtt_total)) { DRM_ERROR("Attempting to bind an object larger than the aperture\n"); return -E2BIG; } search_free: - if (mappable) + if (map_and_fenceable) free_space = drm_mm_search_free_in_range(&dev_priv->mm.gtt_space, size, alignment, 0, @@ -2736,7 +2733,7 @@ i915_gem_object_bind_to_gtt(struct drm_gem_object *obj, size, alignment, 0); if (free_space != NULL) { - if (mappable) + if (map_and_fenceable) obj_priv->gtt_space = drm_mm_get_block_range_generic(free_space, size, alignment, 0, @@ -2750,7 +2747,8 @@ i915_gem_object_bind_to_gtt(struct drm_gem_object *obj, /* If the gtt is empty and we're still having trouble * fitting our object in, we're out of memory. */ - ret = i915_gem_evict_something(dev, size, alignment, mappable); + ret = i915_gem_evict_something(dev, size, alignment, + map_and_fenceable); if (ret) return ret; @@ -2765,7 +2763,8 @@ i915_gem_object_bind_to_gtt(struct drm_gem_object *obj, if (ret == -ENOMEM) { /* first try to clear up some space from the GTT */ ret = i915_gem_evict_something(dev, size, - alignment, mappable); + alignment, + map_and_fenceable); if (ret) { /* now try to shrink everyone else */ if (gfpmask) { @@ -2796,7 +2795,7 @@ i915_gem_object_bind_to_gtt(struct drm_gem_object *obj, obj_priv->gtt_space = NULL; ret = i915_gem_evict_something(dev, size, - alignment, mappable); + alignment, map_and_fenceable); if (ret) return ret; @@ -2816,15 +2815,17 @@ i915_gem_object_bind_to_gtt(struct drm_gem_object *obj, BUG_ON(obj->read_domains & I915_GEM_GPU_DOMAINS); BUG_ON(obj->write_domain & I915_GEM_GPU_DOMAINS); - trace_i915_gem_object_bind(obj, obj_priv->gtt_offset, mappable); + trace_i915_gem_object_bind(obj, obj_priv->gtt_offset, map_and_fenceable); - obj_priv->fenceable = + fenceable = obj_priv->gtt_space->size == fence_size && (obj_priv->gtt_space->start & (fence_alignment -1)) == 0; - obj_priv->mappable = + mappable = obj_priv->gtt_offset + obj->size <= dev_priv->mm.gtt_mappable_end; + obj_priv->map_and_fenceable = mappable && fenceable; + return 0; } @@ -3538,8 +3539,7 @@ i915_gem_execbuffer_pin(struct drm_device *dev, entry->relocation_count ? true : need_fence; /* Check fence reg constraints and rebind if necessary */ - if ((need_fence && !obj->fenceable) || - (need_mappable && !obj->mappable)) { + if (need_mappable && !obj->map_and_fenceable) { ret = i915_gem_object_unbind(&obj->base); if (ret) break; @@ -3547,8 +3547,7 @@ i915_gem_execbuffer_pin(struct drm_device *dev, ret = i915_gem_object_pin(&obj->base, entry->alignment, - need_mappable, - need_fence); + need_mappable); if (ret) break; @@ -4143,7 +4142,7 @@ i915_gem_execbuffer2(struct drm_device *dev, void *data, int i915_gem_object_pin(struct drm_gem_object *obj, uint32_t alignment, - bool mappable, bool need_fence) + bool map_and_fenceable) { struct drm_device *dev = obj->dev; struct drm_i915_private *dev_priv = dev->dev_private; @@ -4151,19 +4150,19 @@ i915_gem_object_pin(struct drm_gem_object *obj, uint32_t alignment, int ret; BUG_ON(obj_priv->pin_count == DRM_I915_GEM_OBJECT_MAX_PIN_COUNT); - BUG_ON(need_fence && !mappable); + BUG_ON(map_and_fenceable && !map_and_fenceable); WARN_ON(i915_verify_lists(dev)); if (obj_priv->gtt_space != NULL) { if ((alignment && obj_priv->gtt_offset & (alignment - 1)) || - (need_fence && !obj_priv->fenceable) || - (mappable && !obj_priv->mappable)) { + (map_and_fenceable && !obj_priv->map_and_fenceable)) { WARN(obj_priv->pin_count, "bo is already pinned with incorrect alignment:" - " offset=%x, req.alignment=%x, need_fence=%d, fenceable=%d, mappable=%d, cpu_accessible=%d\n", + " offset=%x, req.alignment=%x, req.map_and_fenceable=%d," + " obj->map_and_fenceable=%d\n", obj_priv->gtt_offset, alignment, - need_fence, obj_priv->fenceable, - mappable, obj_priv->mappable); + map_and_fenceable, + obj_priv->map_and_fenceable); ret = i915_gem_object_unbind(obj); if (ret) return ret; @@ -4172,18 +4171,18 @@ i915_gem_object_pin(struct drm_gem_object *obj, uint32_t alignment, if (obj_priv->gtt_space == NULL) { ret = i915_gem_object_bind_to_gtt(obj, alignment, - mappable, need_fence); + map_and_fenceable); if (ret) return ret; } if (obj_priv->pin_count++ == 0) { - i915_gem_info_add_pin(dev_priv, obj_priv, mappable); + i915_gem_info_add_pin(dev_priv, obj_priv, map_and_fenceable); if (!obj_priv->active) list_move_tail(&obj_priv->mm_list, &dev_priv->mm.pinned_list); } - BUG_ON(!obj_priv->pin_mappable && mappable); + BUG_ON(!obj_priv->pin_mappable && map_and_fenceable); WARN_ON(i915_verify_lists(dev)); return 0; @@ -4245,8 +4244,7 @@ i915_gem_pin_ioctl(struct drm_device *dev, void *data, obj_priv->user_pin_count++; obj_priv->pin_filp = file_priv; if (obj_priv->user_pin_count == 1) { - ret = i915_gem_object_pin(obj, args->alignment, - true, obj_priv->tiling_mode); + ret = i915_gem_object_pin(obj, args->alignment, true); if (ret) goto out; } @@ -4439,8 +4437,8 @@ struct drm_gem_object * i915_gem_alloc_object(struct drm_device *dev, INIT_LIST_HEAD(&obj->ring_list); INIT_LIST_HEAD(&obj->gpu_write_list); obj->madv = I915_MADV_WILLNEED; - obj->fenceable = true; - obj->mappable = true; + /* Avoid an unnecessary call to unbind on the first bind. */ + obj->map_and_fenceable = true; return &obj->base; } @@ -4560,7 +4558,7 @@ i915_gem_init_pipe_control(struct drm_device *dev) obj_priv = to_intel_bo(obj); obj_priv->agp_type = AGP_USER_CACHED_MEMORY; - ret = i915_gem_object_pin(obj, 4096, true, false); + ret = i915_gem_object_pin(obj, 4096, true); if (ret) goto err_unref; diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c index a2cd579eb9b2..77b34942dc91 100644 --- a/drivers/gpu/drm/i915/intel_display.c +++ b/drivers/gpu/drm/i915/intel_display.c @@ -1461,8 +1461,7 @@ intel_pin_and_fence_fb_obj(struct drm_device *dev, BUG(); } - ret = i915_gem_object_pin(obj, alignment, true, - obj_priv->tiling_mode); + ret = i915_gem_object_pin(obj, alignment, true); if (ret) return ret; @@ -4367,7 +4366,7 @@ static int intel_crtc_cursor_set(struct drm_crtc *crtc, /* we only need to pin inside GTT if cursor is non-phy */ mutex_lock(&dev->struct_mutex); if (!dev_priv->info->cursor_needs_physical) { - ret = i915_gem_object_pin(bo, PAGE_SIZE, true, false); + ret = i915_gem_object_pin(bo, PAGE_SIZE, true); if (ret) { DRM_ERROR("failed to pin cursor bo\n"); goto fail_locked; @@ -5531,7 +5530,7 @@ intel_alloc_context_page(struct drm_device *dev) } mutex_lock(&dev->struct_mutex); - ret = i915_gem_object_pin(ctx, 4096, false, false); + ret = i915_gem_object_pin(ctx, 4096, true); if (ret) { DRM_ERROR("failed to pin power context: %d\n", ret); goto err_unref; diff --git a/drivers/gpu/drm/i915/intel_overlay.c b/drivers/gpu/drm/i915/intel_overlay.c index 659f8349a15c..ec8ffaccbbdb 100644 --- a/drivers/gpu/drm/i915/intel_overlay.c +++ b/drivers/gpu/drm/i915/intel_overlay.c @@ -781,7 +781,7 @@ static int intel_overlay_do_put_image(struct intel_overlay *overlay, if (ret != 0) return ret; - ret = i915_gem_object_pin(new_bo, PAGE_SIZE, false, false); + ret = i915_gem_object_pin(new_bo, PAGE_SIZE, true); if (ret != 0) return ret; @@ -1425,7 +1425,7 @@ void intel_setup_overlay(struct drm_device *dev) } overlay->flip_addr = overlay->reg_bo->phys_obj->handle->busaddr; } else { - ret = i915_gem_object_pin(reg_bo, PAGE_SIZE, true, false); + ret = i915_gem_object_pin(reg_bo, PAGE_SIZE, true); if (ret) { DRM_ERROR("failed to pin overlay register bo\n"); goto out_free_bo; diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c index 85071570e1f9..78a5061a58f6 100644 --- a/drivers/gpu/drm/i915/intel_ringbuffer.c +++ b/drivers/gpu/drm/i915/intel_ringbuffer.c @@ -547,7 +547,7 @@ static int init_status_page(struct intel_ring_buffer *ring) obj_priv = to_intel_bo(obj); obj_priv->agp_type = AGP_USER_CACHED_MEMORY; - ret = i915_gem_object_pin(obj, 4096, true, false); + ret = i915_gem_object_pin(obj, 4096, true); if (ret != 0) { goto err_unref; } @@ -602,7 +602,7 @@ int intel_init_ring_buffer(struct drm_device *dev, ring->gem_object = obj; - ret = i915_gem_object_pin(obj, PAGE_SIZE, true, false); + ret = i915_gem_object_pin(obj, PAGE_SIZE, true); if (ret) goto err_unref; @@ -906,7 +906,7 @@ static int blt_ring_init(struct intel_ring_buffer *ring) if (obj == NULL) return -ENOMEM; - ret = i915_gem_object_pin(&obj->base, 4096, true, false); + ret = i915_gem_object_pin(&obj->base, 4096, true); if (ret) { drm_gem_object_unreference(&obj->base); return ret; -- cgit v1.2.3 From 045e769ab69ce94dedbcdcfd46c2578b385c2986 Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Sun, 7 Nov 2010 09:18:22 +0000 Subject: drm/i915: Handle GPU hangs during fault gracefully. Instead of killing the process, just return no page found and reschedule the process giving the GPU some time to (hopefully) recover. Signed-off-by: Chris Wilson --- drivers/gpu/drm/i915/i915_gem.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index 47c665eeaf17..7c91bf2bbdfd 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c @@ -1340,11 +1340,12 @@ unlock: mutex_unlock(&dev->struct_mutex); switch (ret) { + case -EAGAIN: + set_need_resched(); case 0: case -ERESTARTSYS: return VM_FAULT_NOPAGE; case -ENOMEM: - case -EAGAIN: return VM_FAULT_OOM; default: return VM_FAULT_SIGBUS; -- cgit v1.2.3 From ae69b42a10dafe61adb016e0e52ec1e8d1ba11b4 Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Sun, 7 Nov 2010 11:45:52 +0000 Subject: drm/i915/ringbuffer: Be consistent in use of ring->size when initialising Signed-off-by: Chris Wilson --- drivers/gpu/drm/i915/intel_ringbuffer.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c index 78a5061a58f6..9033697783b9 100644 --- a/drivers/gpu/drm/i915/intel_ringbuffer.c +++ b/drivers/gpu/drm/i915/intel_ringbuffer.c @@ -173,7 +173,7 @@ static int init_ring_common(struct intel_ring_buffer *ring) } I915_WRITE_CTL(ring, - ((ring->gem_object->size - PAGE_SIZE) & RING_NR_PAGES) + ((ring->size - PAGE_SIZE) & RING_NR_PAGES) | RING_REPORT_64K | RING_VALID); /* If the head is still not zero, the ring is dead */ -- cgit v1.2.3 From 629e894173c9de589913cf649deaadec4b0579bd Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Sun, 7 Nov 2010 11:50:02 +0000 Subject: drm/i915/ringbuffer: Ignore failure to setup the ring on Sandybridge The ring buffer registers return 0 whilst idle (for some values of idle) on early Sandybridge hw. Persevere even when all appears hopeless... Fortunately the head auto-reporting prevents most hangs. Bugzilla: https://bugs.freedesktop.org/show_bug.cgi?id=31370 Signed-off-by: Chris Wilson --- drivers/gpu/drm/i915/intel_ringbuffer.c | 32 ++++++++++++++++++++++++-------- 1 file changed, 24 insertions(+), 8 deletions(-) diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c index 9033697783b9..f5d6151c953f 100644 --- a/drivers/gpu/drm/i915/intel_ringbuffer.c +++ b/drivers/gpu/drm/i915/intel_ringbuffer.c @@ -180,14 +180,30 @@ static int init_ring_common(struct intel_ring_buffer *ring) if ((I915_READ_CTL(ring) & RING_VALID) == 0 || I915_READ_START(ring) != obj_priv->gtt_offset || (I915_READ_HEAD(ring) & HEAD_ADDR) != 0) { - DRM_ERROR("%s initialization failed " - "ctl %08x head %08x tail %08x start %08x\n", - ring->name, - I915_READ_CTL(ring), - I915_READ_HEAD(ring), - I915_READ_TAIL(ring), - I915_READ_START(ring)); - return -EIO; + if (IS_GEN6(ring->dev) && ring->dev->pdev->revision <= 8) { + /* Early revisions of Sandybridge do not like + * revealing the contents of the ring buffer + * registers whilst idle. Fortunately, the + * auto-reporting mechanism prevents most hangs, + * but this will bite us eventually... + */ + DRM_DEBUG("%s initialization failed " + "ctl %08x head %08x tail %08x start %08x. Ignoring, hope for the best!\n", + ring->name, + I915_READ_CTL(ring), + I915_READ_HEAD(ring), + I915_READ_TAIL(ring), + I915_READ_START(ring)); + } else { + DRM_ERROR("%s initialization failed " + "ctl %08x head %08x tail %08x start %08x\n", + ring->name, + I915_READ_CTL(ring), + I915_READ_HEAD(ring), + I915_READ_TAIL(ring), + I915_READ_START(ring)); + return -EIO; + } } if (!drm_core_check_feature(ring->dev, DRIVER_MODESET)) -- cgit v1.2.3 From de6e2eaf2c420bb8b0d4485913ef312a5539b489 Mon Sep 17 00:00:00 2001 From: Eric Anholt Date: Sat, 6 Nov 2010 14:53:32 -0700 Subject: drm/i915: Apply B-spec mandated workaround for read flushes on Ironlake. This is not known to fix any particular bugs we have, but the spec says to do it, and the BIOS hadn't already set it up on my system. Signed-off-by: Eric Anholt Signed-off-by: Chris Wilson --- drivers/gpu/drm/i915/i915_reg.h | 13 +++++++++++++ drivers/gpu/drm/i915/intel_display.c | 6 ++++++ 2 files changed, 19 insertions(+) diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h index c79d4ba4fb12..09e2a5502652 100644 --- a/drivers/gpu/drm/i915/i915_reg.h +++ b/drivers/gpu/drm/i915/i915_reg.h @@ -316,6 +316,19 @@ #define ERROR_GEN6 0x040a0 +/* GM45+ chicken bits -- debug workaround bits that may be required + * for various sorts of correct behavior. The top 16 bits of each are + * the enables for writing to the corresponding low bit. + */ +#define _3D_CHICKEN 0x02084 +#define _3D_CHICKEN2 0x0208c +/* Disables pipelining of read flushes past the SF-WIZ interface. + * Required on all Ironlake steppings according to the B-Spec, but the + * particular danger of not doing so is not specified. + */ +# define _3D_CHICKEN2_WM_READ_PIPELINED (1 << 14) +#define _3D_CHICKEN3 0x02090 + #define MI_MODE 0x0209c # define VS_TIMER_DISPATCH (1 << 6) # define MI_FLUSH_ENABLE (1 << 11) diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c index 77b34942dc91..5ab403556846 100644 --- a/drivers/gpu/drm/i915/intel_display.c +++ b/drivers/gpu/drm/i915/intel_display.c @@ -5818,6 +5818,12 @@ void intel_init_clock_gating(struct drm_device *dev) ILK_DPFC_DIS2 | ILK_CLK_FBC); } + + if (IS_GEN5(dev)) { + I915_WRITE(_3D_CHICKEN2, + _3D_CHICKEN2_WM_READ_PIPELINED << 16 | + _3D_CHICKEN2_WM_READ_PIPELINED); + } return; } else if (IS_G4X(dev)) { uint32_t dspclk_gate; -- cgit v1.2.3 From 67e92af01cb6f7e9a5fd5c930c43cd6f6ef45929 Mon Sep 17 00:00:00 2001 From: Eric Anholt Date: Sat, 6 Nov 2010 14:53:33 -0700 Subject: drm/i915: Apply display workaround required according to the B-Spec. Not known to fix any current bugs. Signed-off-by: Eric Anholt Signed-off-by: Chris Wilson --- drivers/gpu/drm/i915/i915_reg.h | 2 ++ drivers/gpu/drm/i915/intel_display.c | 4 ++++ 2 files changed, 6 insertions(+) diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h index 09e2a5502652..61fe2619bb63 100644 --- a/drivers/gpu/drm/i915/i915_reg.h +++ b/drivers/gpu/drm/i915/i915_reg.h @@ -2609,6 +2609,8 @@ #define GTIER 0x4401c #define ILK_DISPLAY_CHICKEN2 0x42004 +/* Required on all Ironlake and Sandybridge according to the B-Spec. */ +#define ILK_ELPIN_409_SELECT (1 << 25) #define ILK_DPARB_GATE (1<<22) #define ILK_VSDPFD_FULL (1<<21) #define ILK_DSPCLK_GATE 0x42020 diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c index 5ab403556846..c41dae5ade32 100644 --- a/drivers/gpu/drm/i915/intel_display.c +++ b/drivers/gpu/drm/i915/intel_display.c @@ -5819,6 +5819,10 @@ void intel_init_clock_gating(struct drm_device *dev) ILK_CLK_FBC); } + I915_WRITE(ILK_DISPLAY_CHICKEN2, + I915_READ(ILK_DISPLAY_CHICKEN2) | + ILK_ELPIN_409_SELECT); + if (IS_GEN5(dev)) { I915_WRITE(_3D_CHICKEN2, _3D_CHICKEN2_WM_READ_PIPELINED << 16 | -- cgit v1.2.3 From ba4f01a30480cdcd516b782f77a6e0951b83df1c Mon Sep 17 00:00:00 2001 From: Yuanhan Liu Date: Mon, 8 Nov 2010 17:09:41 +0800 Subject: drm/i915: trace down all the register write and read Add two tracepoints at I915_WRITE/READ for tracing down all the register write and read. Signed-off-by: Yuanhan Liu Signed-off-by: Chris Wilson --- drivers/gpu/drm/i915/i915_drv.h | 61 ++++++++++++++++++++++++++++++++++----- drivers/gpu/drm/i915/i915_trace.h | 23 +++++++++++++++ 2 files changed, 76 insertions(+), 8 deletions(-) diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index 621234265454..220ce53d4a9c 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h @@ -32,6 +32,7 @@ #include "i915_reg.h" #include "intel_bios.h" +#include "i915_trace.h" #include "intel_ringbuffer.h" #include #include @@ -1173,14 +1174,58 @@ extern void intel_overlay_print_error_state(struct seq_file *m, struct intel_ove LOCK_TEST_WITH_RETURN(dev, file_priv); \ } while (0) -#define I915_READ(reg) readl(dev_priv->regs + (reg)) -#define I915_WRITE(reg, val) writel(val, dev_priv->regs + (reg)) -#define I915_READ16(reg) readw(dev_priv->regs + (reg)) -#define I915_WRITE16(reg, val) writel(val, dev_priv->regs + (reg)) -#define I915_READ8(reg) readb(dev_priv->regs + (reg)) -#define I915_WRITE8(reg, val) writeb(val, dev_priv->regs + (reg)) -#define I915_WRITE64(reg, val) writeq(val, dev_priv->regs + (reg)) -#define I915_READ64(reg) readq(dev_priv->regs + (reg)) +static inline u32 i915_read(struct drm_i915_private *dev_priv, u32 reg, int len) +{ + u64 val = 0; + + switch (len) { + case 8: + val = readq(dev_priv->regs + reg); + break; + case 4: + val = readl(dev_priv->regs + reg); + break; + case 2: + val = readw(dev_priv->regs + reg); + break; + case 1: + val = readb(dev_priv->regs + reg); + break; + } + trace_i915_reg_rw('R', reg, val, len); + + return val; +} + +static inline void +i915_write(struct drm_i915_private *dev_priv, u32 reg, u64 val, int len) +{ + /* Trace down the write operation before the real write */ + trace_i915_reg_rw('W', reg, val, len); + switch (len) { + case 8: + writeq(val, dev_priv->regs + reg); + break; + case 4: + writel(val, dev_priv->regs + reg); + break; + case 2: + writew(val, dev_priv->regs + reg); + break; + case 1: + writeb(val, dev_priv->regs + reg); + break; + } +} + +#define I915_READ(reg) i915_read(dev_priv, (reg), 4) +#define I915_WRITE(reg, val) i915_write(dev_priv, (reg), (val), 4) +#define I915_READ16(reg) i915_read(dev_priv, (reg), 2) +#define I915_WRITE16(reg, val) i915_write(dev_priv, (reg), (val), 2) +#define I915_READ8(reg) i915_read(dev_priv, (reg), 1) +#define I915_WRITE8(reg, val) i915_write(dev_priv, (reg), (val), 1) +#define I915_WRITE64(reg, val) i915_write(dev_priv, (reg), (val), 8) +#define I915_READ64(reg) i915_read(dev_priv, (reg), 8) #define POSTING_READ(reg) (void)I915_READ(reg) #define POSTING_READ16(reg) (void)I915_READ16(reg) diff --git a/drivers/gpu/drm/i915/i915_trace.h b/drivers/gpu/drm/i915/i915_trace.h index 0b1049ff72a3..34ef49fd0377 100644 --- a/drivers/gpu/drm/i915/i915_trace.h +++ b/drivers/gpu/drm/i915/i915_trace.h @@ -301,6 +301,29 @@ TRACE_EVENT(i915_flip_complete, TP_printk("plane=%d, obj=%p", __entry->plane, __entry->obj) ); +TRACE_EVENT(i915_reg_rw, + TP_PROTO(int cmd, uint32_t reg, uint64_t val, int len), + + TP_ARGS(cmd, reg, val, len), + + TP_STRUCT__entry( + __field(int, cmd) + __field(uint32_t, reg) + __field(uint64_t, val) + __field(int, len) + ), + + TP_fast_assign( + __entry->cmd = cmd; + __entry->reg = reg; + __entry->val = (uint64_t)val; + __entry->len = len; + ), + + TP_printk("cmd=%c, reg=0x%x, val=0x%llx, len=%d", + __entry->cmd, __entry->reg, __entry->val, __entry->len) +); + #endif /* _I915_TRACE_H_ */ /* This part must be outside protection */ -- cgit v1.2.3 From 65e5ecb066fe54c13c8445d6acfdcdf149ad5df9 Mon Sep 17 00:00:00 2001 From: Yuanhan Liu Date: Mon, 8 Nov 2010 09:56:37 +0000 Subject: drm/i915: Add untraced register read/write interface This will be used later to hide the frequently written registers from debug traces in order to increase the signal-to-noise. Signed-off-by: Yuanhan Liu Signed-off-by: Chris Wilson --- drivers/gpu/drm/i915/i915_drv.h | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index 220ce53d4a9c..02c35d57fbed 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h @@ -1229,6 +1229,11 @@ i915_write(struct drm_i915_private *dev_priv, u32 reg, u64 val, int len) #define POSTING_READ(reg) (void)I915_READ(reg) #define POSTING_READ16(reg) (void)I915_READ16(reg) +#define I915_READ_NOTRACE(reg) readl(dev_priv->regs + (reg)) +#define I915_WRITE_NOTRACE(reg, val) writel(val, dev_priv->regs + (reg)) +#define POSTING_READ_NOTRACE(reg) (void)I915_READ_NOTRACE(reg) + + #define BEGIN_LP_RING(n) \ intel_ring_begin(&dev_priv->render_ring, (n)) -- cgit v1.2.3 From db5e4172a023cff68b3597ace8a5390b02669d27 Mon Sep 17 00:00:00 2001 From: Yuanhan Liu Date: Mon, 8 Nov 2010 09:58:16 +0000 Subject: drm/i915: filter out the read/write of GPIO registers from debug tracing These registers are written very frequently, are timing sensitive, and not particularly relevant to any debugging, so remove the tracepoints from these. Signed-off-by: Yuanhan Liu Signed-off-by: Chris Wilson --- drivers/gpu/drm/i915/intel_i2c.c | 25 +++++++++++++------------ 1 file changed, 13 insertions(+), 12 deletions(-) diff --git a/drivers/gpu/drm/i915/intel_i2c.c b/drivers/gpu/drm/i915/intel_i2c.c index 2be4f728ed0c..8f5c0d33ea00 100644 --- a/drivers/gpu/drm/i915/intel_i2c.c +++ b/drivers/gpu/drm/i915/intel_i2c.c @@ -85,8 +85,9 @@ static u32 get_reserved(struct intel_gpio *gpio) /* On most chips, these bits must be preserved in software. */ if (!IS_I830(dev) && !IS_845G(dev)) - reserved = I915_READ(gpio->reg) & (GPIO_DATA_PULLUP_DISABLE | - GPIO_CLOCK_PULLUP_DISABLE); + reserved = I915_READ_NOTRACE(gpio->reg) & + (GPIO_DATA_PULLUP_DISABLE | + GPIO_CLOCK_PULLUP_DISABLE); return reserved; } @@ -96,9 +97,9 @@ static int get_clock(void *data) struct intel_gpio *gpio = data; struct drm_i915_private *dev_priv = gpio->dev_priv; u32 reserved = get_reserved(gpio); - I915_WRITE(gpio->reg, reserved | GPIO_CLOCK_DIR_MASK); - I915_WRITE(gpio->reg, reserved); - return (I915_READ(gpio->reg) & GPIO_CLOCK_VAL_IN) != 0; + I915_WRITE_NOTRACE(gpio->reg, reserved | GPIO_CLOCK_DIR_MASK); + I915_WRITE_NOTRACE(gpio->reg, reserved); + return (I915_READ_NOTRACE(gpio->reg) & GPIO_CLOCK_VAL_IN) != 0; } static int get_data(void *data) @@ -106,9 +107,9 @@ static int get_data(void *data) struct intel_gpio *gpio = data; struct drm_i915_private *dev_priv = gpio->dev_priv; u32 reserved = get_reserved(gpio); - I915_WRITE(gpio->reg, reserved | GPIO_DATA_DIR_MASK); - I915_WRITE(gpio->reg, reserved); - return (I915_READ(gpio->reg) & GPIO_DATA_VAL_IN) != 0; + I915_WRITE_NOTRACE(gpio->reg, reserved | GPIO_DATA_DIR_MASK); + I915_WRITE_NOTRACE(gpio->reg, reserved); + return (I915_READ_NOTRACE(gpio->reg) & GPIO_DATA_VAL_IN) != 0; } static void set_clock(void *data, int state_high) @@ -124,8 +125,8 @@ static void set_clock(void *data, int state_high) clock_bits = GPIO_CLOCK_DIR_OUT | GPIO_CLOCK_DIR_MASK | GPIO_CLOCK_VAL_MASK; - I915_WRITE(gpio->reg, reserved | clock_bits); - POSTING_READ(gpio->reg); + I915_WRITE_NOTRACE(gpio->reg, reserved | clock_bits); + POSTING_READ_NOTRACE(gpio->reg); } static void set_data(void *data, int state_high) @@ -141,8 +142,8 @@ static void set_data(void *data, int state_high) data_bits = GPIO_DATA_DIR_OUT | GPIO_DATA_DIR_MASK | GPIO_DATA_VAL_MASK; - I915_WRITE(gpio->reg, reserved | data_bits); - POSTING_READ(gpio->reg); + I915_WRITE_NOTRACE(gpio->reg, reserved | data_bits); + POSTING_READ_NOTRACE(gpio->reg); } static struct i2c_adapter * -- cgit v1.2.3 From 374c479bef7ecd2b41d6dd6e24aa21d73b3afae5 Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Mon, 8 Nov 2010 21:07:24 +0000 Subject: drm/i915: POSTING_READs are simply flushes and so irrelevant to tracing As we use POSTING_READ to flush the write to the register before proceeding, we do not care what the return value is and similar we do not care for the read to be recorded whilst tracing register read/writes. Signed-off-by: Chris Wilson --- drivers/gpu/drm/i915/i915_drv.h | 8 +++++--- drivers/gpu/drm/i915/intel_i2c.c | 4 ++-- 2 files changed, 7 insertions(+), 5 deletions(-) diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index 02c35d57fbed..ff7593f70f0f 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h @@ -1226,12 +1226,14 @@ i915_write(struct drm_i915_private *dev_priv, u32 reg, u64 val, int len) #define I915_WRITE8(reg, val) i915_write(dev_priv, (reg), (val), 1) #define I915_WRITE64(reg, val) i915_write(dev_priv, (reg), (val), 8) #define I915_READ64(reg) i915_read(dev_priv, (reg), 8) -#define POSTING_READ(reg) (void)I915_READ(reg) -#define POSTING_READ16(reg) (void)I915_READ16(reg) #define I915_READ_NOTRACE(reg) readl(dev_priv->regs + (reg)) #define I915_WRITE_NOTRACE(reg, val) writel(val, dev_priv->regs + (reg)) -#define POSTING_READ_NOTRACE(reg) (void)I915_READ_NOTRACE(reg) +#define I915_READ16_NOTRACE(reg) readw(dev_priv->regs + (reg)) +#define I915_WRITE16_NOTRACE(reg, val) writew(val, dev_priv->regs + (reg)) + +#define POSTING_READ(reg) (void)I915_READ_NOTRACE(reg) +#define POSTING_READ16(reg) (void)I915_READ16_NOTRACE(reg) #define BEGIN_LP_RING(n) \ diff --git a/drivers/gpu/drm/i915/intel_i2c.c b/drivers/gpu/drm/i915/intel_i2c.c index 8f5c0d33ea00..d2d493a24e65 100644 --- a/drivers/gpu/drm/i915/intel_i2c.c +++ b/drivers/gpu/drm/i915/intel_i2c.c @@ -126,7 +126,7 @@ static void set_clock(void *data, int state_high) GPIO_CLOCK_VAL_MASK; I915_WRITE_NOTRACE(gpio->reg, reserved | clock_bits); - POSTING_READ_NOTRACE(gpio->reg); + POSTING_READ(gpio->reg); } static void set_data(void *data, int state_high) @@ -143,7 +143,7 @@ static void set_data(void *data, int state_high) GPIO_DATA_VAL_MASK; I915_WRITE_NOTRACE(gpio->reg, reserved | data_bits); - POSTING_READ_NOTRACE(gpio->reg); + POSTING_READ(gpio->reg); } static struct i2c_adapter * -- cgit v1.2.3 From 56e2ea346ab4c2ea159ecdec85fffc24f50c2903 Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Mon, 8 Nov 2010 17:10:29 +0000 Subject: drm/i915: Fix unload after failed initialisation If modeset init failed we attempted to unload the module, before we finished setting it up and so triggered various oopses. Signed-off-by: Chris Wilson --- drivers/gpu/drm/i915/i915_dma.c | 21 ++++++++++++--------- 1 file changed, 12 insertions(+), 9 deletions(-) diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c index eee88cfcb3aa..307bad0fcef7 100644 --- a/drivers/gpu/drm/i915/i915_dma.c +++ b/drivers/gpu/drm/i915/i915_dma.c @@ -1993,7 +1993,7 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags) drm_core_check_feature(dev, DRIVER_MODESET)) { DRM_ERROR("kernel modesetting requires GEM, disabling driver.\n"); ret = -ENODEV; - goto out_iomapfree; + goto out_workqueue_free; } dev->driver->get_vblank_counter = i915_get_vblank_counter; @@ -2016,8 +2016,8 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags) /* Init HWS */ if (!I915_NEED_GFX_HWS(dev)) { ret = i915_init_phys_hws(dev); - if (ret != 0) - goto out_workqueue_free; + if (ret) + goto out_gem_unload; } if (IS_PINEVIEW(dev)) @@ -2044,11 +2044,8 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags) dev_priv->trace_irq_seqno = 0; ret = drm_vblank_init(dev, I915_NUM_PIPE); - - if (ret) { - (void) i915_driver_unload(dev); - return ret; - } + if (ret) + goto out_gem_unload; /* Start out suspended */ dev_priv->mm.suspended = 1; @@ -2059,7 +2056,7 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags) ret = i915_load_modeset_init(dev); if (ret < 0) { DRM_ERROR("failed to init modeset\n"); - goto out_workqueue_free; + goto out_gem_unload; } } @@ -2077,6 +2074,12 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags) return 0; +out_gem_unload: + if (dev->pdev->msi_enabled) + pci_disable_msi(dev->pdev); + + intel_teardown_gmbus(dev); + intel_teardown_mchbar(dev); out_workqueue_free: destroy_workqueue(dev_priv->wq); out_iomapfree: -- cgit v1.2.3 From 33d2323711059d14fd675fa7def317e560fa787e Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Wed, 10 Nov 2010 13:59:09 +0000 Subject: drm/i915: Unconditionally get the fence reg when pinning scanout We use i915_gem_object_get_fence_reg() to do LRU tracking of the fence registers, so stop trying to be too clever when pinning the fb->obj. Signed-off-by: Daniel Vetter Signed-off-by: Chris Wilson --- drivers/gpu/drm/i915/intel_display.c | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c index c41dae5ade32..63770c963077 100644 --- a/drivers/gpu/drm/i915/intel_display.c +++ b/drivers/gpu/drm/i915/intel_display.c @@ -1474,8 +1474,7 @@ intel_pin_and_fence_fb_obj(struct drm_device *dev, * framebuffer compression. For simplicity, we always install * a fence as the cost is not that onerous. */ - if (obj_priv->fence_reg == I915_FENCE_REG_NONE && - obj_priv->tiling_mode != I915_TILING_NONE) { + if (obj_priv->tiling_mode != I915_TILING_NONE) { ret = i915_gem_object_get_fence_reg(obj, false); if (ret) goto err_unpin; -- cgit v1.2.3 From 5d97eb69bd4767ce9973360881fa6ad161510fb0 Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Wed, 10 Nov 2010 20:40:02 +0000 Subject: drm/i915: Only add the lazy request if we end up waiting for it. Signed-off-by: Chris Wilson --- drivers/gpu/drm/i915/i915_gem.c | 9 +++------ drivers/gpu/drm/i915/intel_ringbuffer.h | 2 +- 2 files changed, 4 insertions(+), 7 deletions(-) diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index 7c91bf2bbdfd..52f5c194c50f 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c @@ -1664,9 +1664,7 @@ i915_gem_next_request_seqno(struct drm_device *dev, struct intel_ring_buffer *ring) { drm_i915_private_t *dev_priv = dev->dev_private; - - ring->outstanding_lazy_request = true; - return dev_priv->next_seqno; + return ring->outstanding_lazy_request = dev_priv->next_seqno; } static void @@ -2072,7 +2070,7 @@ i915_do_wait_request(struct drm_device *dev, uint32_t seqno, if (atomic_read(&dev_priv->mm.wedged)) return -EAGAIN; - if (ring->outstanding_lazy_request) { + if (seqno == ring->outstanding_lazy_request) { struct drm_i915_gem_request *request; request = kzalloc(sizeof(*request), GFP_KERNEL); @@ -2087,7 +2085,6 @@ i915_do_wait_request(struct drm_device *dev, uint32_t seqno, seqno = request->seqno; } - BUG_ON(seqno == dev_priv->next_seqno); if (!i915_seqno_passed(ring->get_seqno(ring), seqno)) { if (HAS_PCH_SPLIT(dev)) @@ -3973,7 +3970,7 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data, i915_retire_commands(dev, ring); if (i915_add_request(dev, file, request, ring)) - ring->outstanding_lazy_request = true; + i915_gem_next_request_seqno(dev, ring); else request = NULL; diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.h b/drivers/gpu/drm/i915/intel_ringbuffer.h index 68043f1a186e..d73145c790bd 100644 --- a/drivers/gpu/drm/i915/intel_ringbuffer.h +++ b/drivers/gpu/drm/i915/intel_ringbuffer.h @@ -87,7 +87,7 @@ struct intel_ring_buffer { /** * Do we have some not yet emitted requests outstanding? */ - bool outstanding_lazy_request; + u32 outstanding_lazy_request; wait_queue_head_t irq_queue; drm_local_map_t map; -- cgit v1.2.3 From 527f9e907c39f7e88abb57eaa8bccb43c8706a3d Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Thu, 11 Nov 2010 01:16:58 +0000 Subject: drm/i915: Remove the global irq wait queue ... as it has been replaced by per-ring waiters. Signed-off-by: Chris Wilson --- drivers/gpu/drm/i915/i915_debugfs.c | 8 +------- drivers/gpu/drm/i915/i915_drv.h | 2 +- drivers/gpu/drm/i915/i915_irq.c | 2 +- 3 files changed, 3 insertions(+), 9 deletions(-) diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c index 9cb6061bf9d5..4c8fae9baaad 100644 --- a/drivers/gpu/drm/i915/i915_debugfs.c +++ b/drivers/gpu/drm/i915/i915_debugfs.c @@ -1022,7 +1022,6 @@ i915_wedged_write(struct file *filp, loff_t *ppos) { struct drm_device *dev = filp->private_data; - drm_i915_private_t *dev_priv = dev->dev_private; char buf[20]; int val = 1; @@ -1038,12 +1037,7 @@ i915_wedged_write(struct file *filp, } DRM_INFO("Manually setting wedged to %d\n", val); - - atomic_set(&dev_priv->mm.wedged, val); - if (val) { - wake_up_all(&dev_priv->irq_queue); - queue_work(dev_priv->wq, &dev_priv->error_work); - } + i915_handle_error(dev, val); return cnt; } diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index ff7593f70f0f..30d7a7bc6f2e 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h @@ -289,7 +289,6 @@ typedef struct drm_i915_private { int current_page; int page_flipping; - wait_queue_head_t irq_queue; atomic_t irq_received; /** Protects user_irq_refcount and irq_mask_reg */ spinlock_t user_irq_lock; @@ -915,6 +914,7 @@ extern void i915_update_gfx_val(struct drm_i915_private *dev_priv); /* i915_irq.c */ void i915_hangcheck_elapsed(unsigned long data); +void i915_handle_error(struct drm_device *dev, bool wedged); extern int i915_irq_emit(struct drm_device *dev, void *data, struct drm_file *file_priv); extern int i915_irq_wait(struct drm_device *dev, void *data, diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c index 3ec631f1129d..4a0664ea49b9 100644 --- a/drivers/gpu/drm/i915/i915_irq.c +++ b/drivers/gpu/drm/i915/i915_irq.c @@ -891,7 +891,7 @@ static void i915_report_and_clear_eir(struct drm_device *dev) * so userspace knows something bad happened (should trigger collection * of a ring dump etc.). */ -static void i915_handle_error(struct drm_device *dev, bool wedged) +void i915_handle_error(struct drm_device *dev, bool wedged) { struct drm_i915_private *dev_priv = dev->dev_private; -- cgit v1.2.3 From cae5852dcaa1139b198e13ebd3aeb7f3c065f875 Mon Sep 17 00:00:00 2001 From: Zou Nan hai Date: Tue, 9 Nov 2010 17:17:32 +0800 Subject: drm/i915/ringbuffer: set FORCE_WAKE bit before reading ring register Before reading ring register, set FORCE_WAKE bit to prevent GT core power down to low power state, otherwise we may read stale values. Signed-off-by: Zou Nan hai [ickle: added a udelay which seemed to do the trick on my SNB] Signed-off-by: Chris Wilson --- drivers/gpu/drm/i915/i915_drv.h | 176 +++++++++++++++++--------------- drivers/gpu/drm/i915/i915_reg.h | 1 + drivers/gpu/drm/i915/intel_ringbuffer.c | 2 +- drivers/gpu/drm/i915/intel_ringbuffer.h | 13 ++- 4 files changed, 106 insertions(+), 86 deletions(-) diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index 30d7a7bc6f2e..ecf12f9de1e0 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h @@ -876,6 +876,67 @@ enum intel_chip_family { CHIP_I965 = 0x08, }; +#define INTEL_INFO(dev) (((struct drm_i915_private *) (dev)->dev_private)->info) + +#define IS_I830(dev) ((dev)->pci_device == 0x3577) +#define IS_845G(dev) ((dev)->pci_device == 0x2562) +#define IS_I85X(dev) (INTEL_INFO(dev)->is_i85x) +#define IS_I865G(dev) ((dev)->pci_device == 0x2572) +#define IS_I915G(dev) (INTEL_INFO(dev)->is_i915g) +#define IS_I915GM(dev) ((dev)->pci_device == 0x2592) +#define IS_I945G(dev) ((dev)->pci_device == 0x2772) +#define IS_I945GM(dev) (INTEL_INFO(dev)->is_i945gm) +#define IS_BROADWATER(dev) (INTEL_INFO(dev)->is_broadwater) +#define IS_CRESTLINE(dev) (INTEL_INFO(dev)->is_crestline) +#define IS_GM45(dev) ((dev)->pci_device == 0x2A42) +#define IS_G4X(dev) (INTEL_INFO(dev)->is_g4x) +#define IS_PINEVIEW_G(dev) ((dev)->pci_device == 0xa001) +#define IS_PINEVIEW_M(dev) ((dev)->pci_device == 0xa011) +#define IS_PINEVIEW(dev) (INTEL_INFO(dev)->is_pineview) +#define IS_G33(dev) (INTEL_INFO(dev)->is_g33) +#define IS_IRONLAKE_D(dev) ((dev)->pci_device == 0x0042) +#define IS_IRONLAKE_M(dev) ((dev)->pci_device == 0x0046) +#define IS_MOBILE(dev) (INTEL_INFO(dev)->is_mobile) + +#define IS_GEN2(dev) (INTEL_INFO(dev)->gen == 2) +#define IS_GEN3(dev) (INTEL_INFO(dev)->gen == 3) +#define IS_GEN4(dev) (INTEL_INFO(dev)->gen == 4) +#define IS_GEN5(dev) (INTEL_INFO(dev)->gen == 5) +#define IS_GEN6(dev) (INTEL_INFO(dev)->gen == 6) + +#define HAS_BSD(dev) (INTEL_INFO(dev)->has_bsd_ring) +#define HAS_BLT(dev) (INTEL_INFO(dev)->has_blt_ring) +#define I915_NEED_GFX_HWS(dev) (INTEL_INFO(dev)->need_gfx_hws) + +#define HAS_OVERLAY(dev) (INTEL_INFO(dev)->has_overlay) +#define OVERLAY_NEEDS_PHYSICAL(dev) (INTEL_INFO(dev)->overlay_needs_physical) + +/* With the 945 and later, Y tiling got adjusted so that it was 32 128-byte + * rows, which changed the alignment requirements and fence programming. + */ +#define HAS_128_BYTE_Y_TILING(dev) (!IS_GEN2(dev) && !(IS_I915G(dev) || \ + IS_I915GM(dev))) +#define SUPPORTS_DIGITAL_OUTPUTS(dev) (!IS_GEN2(dev) && !IS_PINEVIEW(dev)) +#define SUPPORTS_INTEGRATED_HDMI(dev) (IS_G4X(dev) || IS_GEN5(dev)) +#define SUPPORTS_INTEGRATED_DP(dev) (IS_G4X(dev) || IS_GEN5(dev)) +#define SUPPORTS_EDP(dev) (IS_IRONLAKE_M(dev)) +#define SUPPORTS_TV(dev) (INTEL_INFO(dev)->supports_tv) +#define I915_HAS_HOTPLUG(dev) (INTEL_INFO(dev)->has_hotplug) +/* dsparb controlled by hw only */ +#define DSPARB_HWCONTROL(dev) (IS_G4X(dev) || IS_IRONLAKE(dev)) + +#define HAS_FW_BLC(dev) (INTEL_INFO(dev)->gen > 2) +#define HAS_PIPE_CXSR(dev) (INTEL_INFO(dev)->has_pipe_cxsr) +#define I915_HAS_FBC(dev) (INTEL_INFO(dev)->has_fbc) +#define I915_HAS_RC6(dev) (INTEL_INFO(dev)->has_rc6) + +#define HAS_PCH_SPLIT(dev) (IS_GEN5(dev) || IS_GEN6(dev)) +#define HAS_PIPE_CONTROL(dev) (IS_GEN5(dev) || IS_GEN6(dev)) + +#define INTEL_PCH_TYPE(dev) (((struct drm_i915_private *)(dev)->dev_private)->pch_type) +#define HAS_PCH_CPT(dev) (INTEL_PCH_TYPE(dev) == PCH_CPT) +#define HAS_PCH_IBX(dev) (INTEL_PCH_TYPE(dev) == PCH_IBX) + extern struct drm_ioctl_desc i915_ioctls[]; extern int i915_max_ioctl; extern unsigned int i915_fbpercrtc; @@ -1174,6 +1235,23 @@ extern void intel_overlay_print_error_state(struct seq_file *m, struct intel_ove LOCK_TEST_WITH_RETURN(dev, file_priv); \ } while (0) +#define I915_READ(reg) i915_read(dev_priv, (reg), 4) +#define I915_WRITE(reg, val) i915_write(dev_priv, (reg), (val), 4) +#define I915_READ16(reg) i915_read(dev_priv, (reg), 2) +#define I915_WRITE16(reg, val) i915_write(dev_priv, (reg), (val), 2) +#define I915_READ8(reg) i915_read(dev_priv, (reg), 1) +#define I915_WRITE8(reg, val) i915_write(dev_priv, (reg), (val), 1) +#define I915_WRITE64(reg, val) i915_write(dev_priv, (reg), (val), 8) +#define I915_READ64(reg) i915_read(dev_priv, (reg), 8) + +#define I915_READ_NOTRACE(reg) readl(dev_priv->regs + (reg)) +#define I915_WRITE_NOTRACE(reg, val) writel(val, dev_priv->regs + (reg)) +#define I915_READ16_NOTRACE(reg) readw(dev_priv->regs + (reg)) +#define I915_WRITE16_NOTRACE(reg, val) writew(val, dev_priv->regs + (reg)) + +#define POSTING_READ(reg) (void)I915_READ_NOTRACE(reg) +#define POSTING_READ16(reg) (void)I915_READ16_NOTRACE(reg) + static inline u32 i915_read(struct drm_i915_private *dev_priv, u32 reg, int len) { u64 val = 0; @@ -1197,6 +1275,23 @@ static inline u32 i915_read(struct drm_i915_private *dev_priv, u32 reg, int len) return val; } +/* On SNB platform, before reading ring registers forcewake bit + * must be set to prevent GT core from power down and stale values being + * returned. + */ +static inline u32 i915_safe_read(struct drm_i915_private *dev_priv, u32 reg) +{ + if (IS_GEN6(dev_priv->dev)) { + I915_WRITE_NOTRACE(FORCEWAKE, 1); + POSTING_READ(FORCEWAKE); + /* XXX How long do we really need to wait here? + * Will different registers/engines require different periods? + */ + udelay(100); + } + return I915_READ(reg); +} + static inline void i915_write(struct drm_i915_private *dev_priv, u32 reg, u64 val, int len) { @@ -1218,24 +1313,6 @@ i915_write(struct drm_i915_private *dev_priv, u32 reg, u64 val, int len) } } -#define I915_READ(reg) i915_read(dev_priv, (reg), 4) -#define I915_WRITE(reg, val) i915_write(dev_priv, (reg), (val), 4) -#define I915_READ16(reg) i915_read(dev_priv, (reg), 2) -#define I915_WRITE16(reg, val) i915_write(dev_priv, (reg), (val), 2) -#define I915_READ8(reg) i915_read(dev_priv, (reg), 1) -#define I915_WRITE8(reg, val) i915_write(dev_priv, (reg), (val), 1) -#define I915_WRITE64(reg, val) i915_write(dev_priv, (reg), (val), 8) -#define I915_READ64(reg) i915_read(dev_priv, (reg), 8) - -#define I915_READ_NOTRACE(reg) readl(dev_priv->regs + (reg)) -#define I915_WRITE_NOTRACE(reg, val) writel(val, dev_priv->regs + (reg)) -#define I915_READ16_NOTRACE(reg) readw(dev_priv->regs + (reg)) -#define I915_WRITE16_NOTRACE(reg, val) writew(val, dev_priv->regs + (reg)) - -#define POSTING_READ(reg) (void)I915_READ_NOTRACE(reg) -#define POSTING_READ16(reg) (void)I915_READ16_NOTRACE(reg) - - #define BEGIN_LP_RING(n) \ intel_ring_begin(&dev_priv->render_ring, (n)) @@ -1266,67 +1343,4 @@ i915_write(struct drm_i915_private *dev_priv, u32 reg, u64 val, int len) #define I915_GEM_HWS_INDEX 0x20 #define I915_BREADCRUMB_INDEX 0x21 -#define INTEL_INFO(dev) (((struct drm_i915_private *) (dev)->dev_private)->info) - -#define IS_I830(dev) ((dev)->pci_device == 0x3577) -#define IS_845G(dev) ((dev)->pci_device == 0x2562) -#define IS_I85X(dev) (INTEL_INFO(dev)->is_i85x) -#define IS_I865G(dev) ((dev)->pci_device == 0x2572) -#define IS_I915G(dev) (INTEL_INFO(dev)->is_i915g) -#define IS_I915GM(dev) ((dev)->pci_device == 0x2592) -#define IS_I945G(dev) ((dev)->pci_device == 0x2772) -#define IS_I945GM(dev) (INTEL_INFO(dev)->is_i945gm) -#define IS_BROADWATER(dev) (INTEL_INFO(dev)->is_broadwater) -#define IS_CRESTLINE(dev) (INTEL_INFO(dev)->is_crestline) -#define IS_GM45(dev) ((dev)->pci_device == 0x2A42) -#define IS_G4X(dev) (INTEL_INFO(dev)->is_g4x) -#define IS_PINEVIEW_G(dev) ((dev)->pci_device == 0xa001) -#define IS_PINEVIEW_M(dev) ((dev)->pci_device == 0xa011) -#define IS_PINEVIEW(dev) (INTEL_INFO(dev)->is_pineview) -#define IS_G33(dev) (INTEL_INFO(dev)->is_g33) -#define IS_IRONLAKE_D(dev) ((dev)->pci_device == 0x0042) -#define IS_IRONLAKE_M(dev) ((dev)->pci_device == 0x0046) -#define IS_MOBILE(dev) (INTEL_INFO(dev)->is_mobile) - -#define IS_GEN2(dev) (INTEL_INFO(dev)->gen == 2) -#define IS_GEN3(dev) (INTEL_INFO(dev)->gen == 3) -#define IS_GEN4(dev) (INTEL_INFO(dev)->gen == 4) -#define IS_GEN5(dev) (INTEL_INFO(dev)->gen == 5) -#define IS_GEN6(dev) (INTEL_INFO(dev)->gen == 6) - -#define HAS_BSD(dev) (INTEL_INFO(dev)->has_bsd_ring) -#define HAS_BLT(dev) (INTEL_INFO(dev)->has_blt_ring) -#define I915_NEED_GFX_HWS(dev) (INTEL_INFO(dev)->need_gfx_hws) - -#define HAS_OVERLAY(dev) (INTEL_INFO(dev)->has_overlay) -#define OVERLAY_NEEDS_PHYSICAL(dev) (INTEL_INFO(dev)->overlay_needs_physical) - -/* With the 945 and later, Y tiling got adjusted so that it was 32 128-byte - * rows, which changed the alignment requirements and fence programming. - */ -#define HAS_128_BYTE_Y_TILING(dev) (!IS_GEN2(dev) && !(IS_I915G(dev) || \ - IS_I915GM(dev))) -#define SUPPORTS_DIGITAL_OUTPUTS(dev) (!IS_GEN2(dev) && !IS_PINEVIEW(dev)) -#define SUPPORTS_INTEGRATED_HDMI(dev) (IS_G4X(dev) || IS_GEN5(dev)) -#define SUPPORTS_INTEGRATED_DP(dev) (IS_G4X(dev) || IS_GEN5(dev)) -#define SUPPORTS_EDP(dev) (IS_IRONLAKE_M(dev)) -#define SUPPORTS_TV(dev) (INTEL_INFO(dev)->supports_tv) -#define I915_HAS_HOTPLUG(dev) (INTEL_INFO(dev)->has_hotplug) -/* dsparb controlled by hw only */ -#define DSPARB_HWCONTROL(dev) (IS_G4X(dev) || IS_IRONLAKE(dev)) - -#define HAS_FW_BLC(dev) (INTEL_INFO(dev)->gen > 2) -#define HAS_PIPE_CXSR(dev) (INTEL_INFO(dev)->has_pipe_cxsr) -#define I915_HAS_FBC(dev) (INTEL_INFO(dev)->has_fbc) -#define I915_HAS_RC6(dev) (INTEL_INFO(dev)->has_rc6) - -#define HAS_PCH_SPLIT(dev) (IS_GEN5(dev) || IS_GEN6(dev)) -#define HAS_PIPE_CONTROL(dev) (IS_GEN5(dev) || IS_GEN6(dev)) - -#define INTEL_PCH_TYPE(dev) (((struct drm_i915_private *)(dev)->dev_private)->pch_type) -#define HAS_PCH_CPT(dev) (INTEL_PCH_TYPE(dev) == PCH_CPT) -#define HAS_PCH_IBX(dev) (INTEL_PCH_TYPE(dev) == PCH_IBX) - -#define PRIMARY_RINGBUFFER_SIZE (128*1024) - #endif diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h index 61fe2619bb63..1eca8e710b9e 100644 --- a/drivers/gpu/drm/i915/i915_reg.h +++ b/drivers/gpu/drm/i915/i915_reg.h @@ -3077,4 +3077,5 @@ #define EDP_LINK_TRAIN_800MV_0DB_SNB_B (0x38<<22) #define EDP_LINK_TRAIN_VOL_EMP_MASK_SNB (0x3f<<22) +#define FORCEWAKE 0xA18C #endif /* _I915_REG_H_ */ diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c index f5d6151c953f..99f2c96a9c70 100644 --- a/drivers/gpu/drm/i915/intel_ringbuffer.c +++ b/drivers/gpu/drm/i915/intel_ringbuffer.c @@ -708,7 +708,7 @@ static int intel_wrap_ring_buffer(struct intel_ring_buffer *ring) int intel_wait_ring_buffer(struct intel_ring_buffer *ring, int n) { struct drm_device *dev = ring->dev; - drm_i915_private_t *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = dev->dev_private; unsigned long end; u32 head; diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.h b/drivers/gpu/drm/i915/intel_ringbuffer.h index d73145c790bd..2565d65a625b 100644 --- a/drivers/gpu/drm/i915/intel_ringbuffer.h +++ b/drivers/gpu/drm/i915/intel_ringbuffer.h @@ -7,13 +7,18 @@ struct intel_hw_status_page { struct drm_gem_object *obj; }; -#define I915_READ_TAIL(ring) I915_READ(RING_TAIL(ring->mmio_base)) +#define I915_RING_READ(reg) i915_safe_read(dev_priv, reg) + +#define I915_READ_TAIL(ring) I915_RING_READ(RING_TAIL(ring->mmio_base)) #define I915_WRITE_TAIL(ring, val) I915_WRITE(RING_TAIL(ring->mmio_base), val) -#define I915_READ_START(ring) I915_READ(RING_START(ring->mmio_base)) + +#define I915_READ_START(ring) I915_RING_READ(RING_START(ring->mmio_base)) #define I915_WRITE_START(ring, val) I915_WRITE(RING_START(ring->mmio_base), val) -#define I915_READ_HEAD(ring) I915_READ(RING_HEAD(ring->mmio_base)) + +#define I915_READ_HEAD(ring) I915_RING_READ(RING_HEAD(ring->mmio_base)) #define I915_WRITE_HEAD(ring, val) I915_WRITE(RING_HEAD(ring->mmio_base), val) -#define I915_READ_CTL(ring) I915_READ(RING_CTL(ring->mmio_base)) + +#define I915_READ_CTL(ring) I915_RING_READ(RING_CTL(ring->mmio_base)) #define I915_WRITE_CTL(ring, val) I915_WRITE(RING_CTL(ring->mmio_base), val) struct drm_i915_gem_execbuffer2; -- cgit v1.2.3 From e74cfed521746544e6eeee84b24bd31c1e59ffe2 Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Tue, 9 Nov 2010 10:16:56 +0000 Subject: Revert "drm/i915/ringbuffer: Ignore failure to setup the ring on Sandybridge" This reverts commit 629e894173c9de589913cf649deaadec4b0579bd. --- drivers/gpu/drm/i915/intel_ringbuffer.c | 32 ++++++++------------------------ 1 file changed, 8 insertions(+), 24 deletions(-) diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c index 99f2c96a9c70..1db860d7989a 100644 --- a/drivers/gpu/drm/i915/intel_ringbuffer.c +++ b/drivers/gpu/drm/i915/intel_ringbuffer.c @@ -180,30 +180,14 @@ static int init_ring_common(struct intel_ring_buffer *ring) if ((I915_READ_CTL(ring) & RING_VALID) == 0 || I915_READ_START(ring) != obj_priv->gtt_offset || (I915_READ_HEAD(ring) & HEAD_ADDR) != 0) { - if (IS_GEN6(ring->dev) && ring->dev->pdev->revision <= 8) { - /* Early revisions of Sandybridge do not like - * revealing the contents of the ring buffer - * registers whilst idle. Fortunately, the - * auto-reporting mechanism prevents most hangs, - * but this will bite us eventually... - */ - DRM_DEBUG("%s initialization failed " - "ctl %08x head %08x tail %08x start %08x. Ignoring, hope for the best!\n", - ring->name, - I915_READ_CTL(ring), - I915_READ_HEAD(ring), - I915_READ_TAIL(ring), - I915_READ_START(ring)); - } else { - DRM_ERROR("%s initialization failed " - "ctl %08x head %08x tail %08x start %08x\n", - ring->name, - I915_READ_CTL(ring), - I915_READ_HEAD(ring), - I915_READ_TAIL(ring), - I915_READ_START(ring)); - return -EIO; - } + DRM_ERROR("%s initialization failed " + "ctl %08x head %08x tail %08x start %08x\n", + ring->name, + I915_READ_CTL(ring), + I915_READ_HEAD(ring), + I915_READ_TAIL(ring), + I915_READ_START(ring)); + return -EIO; } if (!drm_core_check_feature(ring->dev, DRIVER_MODESET)) -- cgit v1.2.3 From 8168bd48bb863c00747497aadf13884b2d69d287 Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Thu, 11 Nov 2010 17:54:52 +0000 Subject: drm/i915: Remove the definitions for Primary Ring Buffer We only ever used the PRB0, neglecting the secondary ring buffers, and now with the advent of multiple engines with separate ring buffers we need to excise the anachronisms from our code (and be explicit about which ring we mean where). This is doubly important in light of the FORCEWAKE required to read ring buffer registers on SandyBridge. Signed-off-by: Chris Wilson --- drivers/gpu/drm/i915/i915_dma.c | 4 ++-- drivers/gpu/drm/i915/i915_irq.c | 28 ++++++++++++++-------------- drivers/gpu/drm/i915/i915_reg.h | 10 ++++++---- drivers/gpu/drm/i915/intel_display.c | 10 +++++----- 4 files changed, 27 insertions(+), 25 deletions(-) diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c index 307bad0fcef7..4cd04917868d 100644 --- a/drivers/gpu/drm/i915/i915_dma.c +++ b/drivers/gpu/drm/i915/i915_dma.c @@ -106,8 +106,8 @@ void i915_kernel_lost_context(struct drm_device * dev) if (drm_core_check_feature(dev, DRIVER_MODESET)) return; - ring->head = I915_READ(PRB0_HEAD) & HEAD_ADDR; - ring->tail = I915_READ(PRB0_TAIL) & TAIL_ADDR; + ring->head = I915_READ_HEAD(ring) & HEAD_ADDR; + ring->tail = I915_READ_TAIL(ring) & TAIL_ADDR; ring->space = ring->head - (ring->tail + 8); if (ring->space < 0) ring->space += ring->size; diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c index 4a0664ea49b9..21034527d3a4 100644 --- a/drivers/gpu/drm/i915/i915_irq.c +++ b/drivers/gpu/drm/i915/i915_irq.c @@ -520,30 +520,30 @@ i915_get_bbaddr(struct drm_device *dev, u32 *ring) } static u32 -i915_ringbuffer_last_batch(struct drm_device *dev) +i915_ringbuffer_last_batch(struct drm_device *dev, + struct intel_ring_buffer *ring) { struct drm_i915_private *dev_priv = dev->dev_private; u32 head, bbaddr; - u32 *ring; + u32 *val; /* Locate the current position in the ringbuffer and walk back * to find the most recently dispatched batch buffer. */ bbaddr = 0; - head = I915_READ(PRB0_HEAD) & HEAD_ADDR; - ring = (u32 *)(dev_priv->render_ring.virtual_start + head); + head = I915_READ_HEAD(ring) & HEAD_ADDR; + val = (u32 *)(ring->virtual_start + head); - while (--ring >= (u32 *)dev_priv->render_ring.virtual_start) { - bbaddr = i915_get_bbaddr(dev, ring); + while (--val >= (u32 *)ring->virtual_start) { + bbaddr = i915_get_bbaddr(dev, val); if (bbaddr) break; } if (bbaddr == 0) { - ring = (u32 *)(dev_priv->render_ring.virtual_start - + dev_priv->render_ring.size); - while (--ring >= (u32 *)dev_priv->render_ring.virtual_start) { - bbaddr = i915_get_bbaddr(dev, ring); + val = (u32 *)(ring->virtual_start + ring->size); + while (--val >= (u32 *)ring->virtual_start) { + bbaddr = i915_get_bbaddr(dev, val); if (bbaddr) break; } @@ -628,7 +628,7 @@ static void i915_capture_error_state(struct drm_device *dev) error->bbaddr = 0; } - bbaddr = i915_ringbuffer_last_batch(dev); + bbaddr = i915_ringbuffer_last_batch(dev, &dev_priv->render_ring); /* Grab the current batchbuffer, most likely to have crashed. */ batchbuffer[0] = NULL; @@ -1398,10 +1398,10 @@ void i915_hangcheck_elapsed(unsigned long data) * and break the hang. This should work on * all but the second generation chipsets. */ - u32 tmp = I915_READ(PRB0_CTL); + struct intel_ring_buffer *ring = &dev_priv->render_ring; + u32 tmp = I915_READ_CTL(ring); if (tmp & RING_WAIT) { - I915_WRITE(PRB0_CTL, tmp); - POSTING_READ(PRB0_CTL); + I915_WRITE_CTL(ring, tmp); goto repeat; } } diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h index 1eca8e710b9e..886c0e072490 100644 --- a/drivers/gpu/drm/i915/i915_reg.h +++ b/drivers/gpu/drm/i915/i915_reg.h @@ -256,10 +256,6 @@ * Instruction and interrupt control regs */ #define PGTBL_ER 0x02024 -#define PRB0_TAIL 0x02030 -#define PRB0_HEAD 0x02034 -#define PRB0_START 0x02038 -#define PRB0_CTL 0x0203c #define RENDER_RING_BASE 0x02000 #define BSD_RING_BASE 0x04000 #define GEN6_BSD_RING_BASE 0x12000 @@ -285,10 +281,16 @@ #define RING_INVALID 0x00000000 #define RING_WAIT_I8XX (1<<0) /* gen2, PRBx_HEAD */ #define RING_WAIT (1<<11) /* gen3+, PRBx_CTL */ +#if 0 +#define PRB0_TAIL 0x02030 +#define PRB0_HEAD 0x02034 +#define PRB0_START 0x02038 +#define PRB0_CTL 0x0203c #define PRB1_TAIL 0x02040 /* 915+ only */ #define PRB1_HEAD 0x02044 /* 915+ only */ #define PRB1_START 0x02048 /* 915+ only */ #define PRB1_CTL 0x0204c /* 915+ only */ +#endif #define IPEIR_I965 0x02064 #define IPEHR_I965 0x02068 #define INSTDONE_I965 0x0206c diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c index 63770c963077..6a7f11ff66f5 100644 --- a/drivers/gpu/drm/i915/intel_display.c +++ b/drivers/gpu/drm/i915/intel_display.c @@ -1983,17 +1983,17 @@ static void intel_flush_display_plane(struct drm_device *dev, static void intel_clear_scanline_wait(struct drm_device *dev) { struct drm_i915_private *dev_priv = dev->dev_private; + struct intel_ring_buffer *ring; u32 tmp; if (IS_GEN2(dev)) /* Can't break the hang on i8xx */ return; - tmp = I915_READ(PRB0_CTL); - if (tmp & RING_WAIT) { - I915_WRITE(PRB0_CTL, tmp); - POSTING_READ(PRB0_CTL); - } + ring = &dev_priv->render_ring; + tmp = I915_READ_CTL(ring); + if (tmp & RING_WAIT) + I915_WRITE_CTL(ring, tmp); } static void intel_crtc_wait_for_pending_flips(struct drm_crtc *crtc) -- cgit v1.2.3 From 5e78330126e23e009502b21d1efdabd68ab91397 Mon Sep 17 00:00:00 2001 From: Daniel Vetter Date: Sun, 14 Nov 2010 22:32:36 +0100 Subject: drm/i915: fix relaxed tiling for gen <= 3 && !g33 g33/pineview doesn't have any alignment constrains for unfenced tiled buffers. But older chips have. Fix this. Problem introduced in a00b10c360b35d6431a94cbf130a4e162870d661. Signed-off-by: Daniel Vetter --- drivers/gpu/drm/i915/i915_gem.c | 43 ++++++++++++++++++++++++++++++++++++++--- 1 file changed, 40 insertions(+), 3 deletions(-) diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index 52f5c194c50f..f2038f6df3b0 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c @@ -1467,7 +1467,7 @@ i915_gem_free_mmap_offset(struct drm_gem_object *obj) * @obj: object to check * * Return the required GTT alignment for an object, taking into account - * potential fence register mapping if needed. + * potential fence register mapping. */ static uint32_t i915_gem_get_gtt_alignment(struct drm_i915_gem_object *obj_priv) @@ -1489,6 +1489,41 @@ i915_gem_get_gtt_alignment(struct drm_i915_gem_object *obj_priv) return i915_gem_get_gtt_size(obj_priv); } +/** + * i915_gem_get_unfenced_gtt_alignment - return required GTT alignment for an + * unfenced object + * @obj: object to check + * + * Return the required GTT alignment for an object, only taking into account + * unfenced tiled surface requirements. + */ +static uint32_t +i915_gem_get_unfenced_gtt_alignment(struct drm_i915_gem_object *obj_priv) +{ + struct drm_device *dev = obj_priv->base.dev; + int tile_height; + + /* + * Minimum alignment is 4k (GTT page size) for sane hw. + */ + if (INTEL_INFO(dev)->gen >= 4 || IS_G33(dev) || + obj_priv->tiling_mode == I915_TILING_NONE) + return 4096; + + /* + * Older chips need unfenced tiled buffers to be aligned to the left + * edge of an even tile row (where tile rows are counted as if the bo is + * placed in a fenced gtt region). + */ + if (IS_GEN2(dev) || + (obj_priv->tiling_mode == I915_TILING_Y && HAS_128_BYTE_Y_TILING(dev))) + tile_height = 32; + else + tile_height = 8; + + return tile_height * obj_priv->stride * 2; +} + static uint32_t i915_gem_get_gtt_size(struct drm_i915_gem_object *obj_priv) { @@ -2689,7 +2724,7 @@ i915_gem_object_bind_to_gtt(struct drm_gem_object *obj, struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); struct drm_mm_node *free_space; gfp_t gfpmask = __GFP_NORETRY | __GFP_NOWARN; - u32 size, fence_size, fence_alignment; + u32 size, fence_size, fence_alignment, unfenced_alignment; bool mappable, fenceable; int ret; @@ -2700,9 +2735,11 @@ i915_gem_object_bind_to_gtt(struct drm_gem_object *obj, fence_size = i915_gem_get_gtt_size(obj_priv); fence_alignment = i915_gem_get_gtt_alignment(obj_priv); + unfenced_alignment = i915_gem_get_unfenced_gtt_alignment(obj_priv); if (alignment == 0) - alignment = map_and_fenceable ? fence_alignment : 4096; + alignment = map_and_fenceable ? fence_alignment : + unfenced_alignment; if (map_and_fenceable && alignment & (fence_alignment - 1)) { DRM_ERROR("Invalid object alignment requested %u\n", alignment); return -EINVAL; -- cgit v1.2.3 From df15315899c0641412bd54b29565a70b078a6ac8 Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Mon, 15 Nov 2010 05:25:58 +0000 Subject: drm/i915: Fix current tiling check for relaxed fencing As we may bind an object with the correct alignment, but with an invalid size, it may pass the current checks on whether the object may be reused with a fence. Signed-off-by: Chris Wilson --- drivers/gpu/drm/i915/i915_gem_tiling.c | 22 ++++++++++++++-------- 1 file changed, 14 insertions(+), 8 deletions(-) diff --git a/drivers/gpu/drm/i915/i915_gem_tiling.c b/drivers/gpu/drm/i915/i915_gem_tiling.c index 0597a737ebad..a517b48d441d 100644 --- a/drivers/gpu/drm/i915/i915_gem_tiling.c +++ b/drivers/gpu/drm/i915/i915_gem_tiling.c @@ -245,6 +245,17 @@ i915_gem_object_fence_ok(struct drm_gem_object *obj, int tiling_mode) if (INTEL_INFO(obj->dev)->gen >= 4) return true; + if (!obj_priv->gtt_space) + return true; + + if (INTEL_INFO(obj->dev)->gen == 3) { + if (obj_priv->gtt_offset & ~I915_FENCE_START_MASK) + return false; + } else { + if (obj_priv->gtt_offset & ~I830_FENCE_START_MASK) + return false; + } + /* * Previous chips need to be aligned to the size of the smallest * fence register that can contain the object. @@ -257,16 +268,11 @@ i915_gem_object_fence_ok(struct drm_gem_object *obj, int tiling_mode) while (size < obj_priv->base.size) size <<= 1; - if (obj_priv->gtt_offset & (size - 1)) + if (obj_priv->gtt_space->size != size) return false; - if (INTEL_INFO(obj->dev)->gen == 3) { - if (obj_priv->gtt_offset & ~I915_FENCE_START_MASK) - return false; - } else { - if (obj_priv->gtt_offset & ~I830_FENCE_START_MASK) - return false; - } + if (obj_priv->gtt_offset & (size - 1)) + return false; return true; } -- cgit v1.2.3 From 3143a2bf18d12545f77dafa5b9f7fee83b001223 Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Tue, 16 Nov 2010 15:55:10 +0000 Subject: drm/i915: Convert (void)I915_READ to POSTING_READ ... and so hide the flushes from tracing. Signed-off-by: Chris Wilson --- drivers/gpu/drm/i915/i915_irq.c | 48 ++++++++++++++++++++--------------------- 1 file changed, 24 insertions(+), 24 deletions(-) diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c index 21034527d3a4..ef3503733ebb 100644 --- a/drivers/gpu/drm/i915/i915_irq.c +++ b/drivers/gpu/drm/i915/i915_irq.c @@ -70,7 +70,7 @@ ironlake_enable_graphics_irq(drm_i915_private_t *dev_priv, u32 mask) if ((dev_priv->gt_irq_mask_reg & mask) != 0) { dev_priv->gt_irq_mask_reg &= ~mask; I915_WRITE(GTIMR, dev_priv->gt_irq_mask_reg); - (void) I915_READ(GTIMR); + POSTING_READ(GTIMR); } } @@ -80,7 +80,7 @@ ironlake_disable_graphics_irq(drm_i915_private_t *dev_priv, u32 mask) if ((dev_priv->gt_irq_mask_reg & mask) != mask) { dev_priv->gt_irq_mask_reg |= mask; I915_WRITE(GTIMR, dev_priv->gt_irq_mask_reg); - (void) I915_READ(GTIMR); + POSTING_READ(GTIMR); } } @@ -91,7 +91,7 @@ ironlake_enable_display_irq(drm_i915_private_t *dev_priv, u32 mask) if ((dev_priv->irq_mask_reg & mask) != 0) { dev_priv->irq_mask_reg &= ~mask; I915_WRITE(DEIMR, dev_priv->irq_mask_reg); - (void) I915_READ(DEIMR); + POSTING_READ(DEIMR); } } @@ -101,7 +101,7 @@ ironlake_disable_display_irq(drm_i915_private_t *dev_priv, u32 mask) if ((dev_priv->irq_mask_reg & mask) != mask) { dev_priv->irq_mask_reg |= mask; I915_WRITE(DEIMR, dev_priv->irq_mask_reg); - (void) I915_READ(DEIMR); + POSTING_READ(DEIMR); } } @@ -111,7 +111,7 @@ i915_enable_irq(drm_i915_private_t *dev_priv, u32 mask) if ((dev_priv->irq_mask_reg & mask) != 0) { dev_priv->irq_mask_reg &= ~mask; I915_WRITE(IMR, dev_priv->irq_mask_reg); - (void) I915_READ(IMR); + POSTING_READ(IMR); } } @@ -121,7 +121,7 @@ i915_disable_irq(drm_i915_private_t *dev_priv, u32 mask) if ((dev_priv->irq_mask_reg & mask) != mask) { dev_priv->irq_mask_reg |= mask; I915_WRITE(IMR, dev_priv->irq_mask_reg); - (void) I915_READ(IMR); + POSTING_READ(IMR); } } @@ -144,7 +144,7 @@ i915_enable_pipestat(drm_i915_private_t *dev_priv, int pipe, u32 mask) dev_priv->pipestat[pipe] |= mask; /* Enable the interrupt, clear any pending status */ I915_WRITE(reg, dev_priv->pipestat[pipe] | (mask >> 16)); - (void) I915_READ(reg); + POSTING_READ(reg); } } @@ -156,7 +156,7 @@ i915_disable_pipestat(drm_i915_private_t *dev_priv, int pipe, u32 mask) dev_priv->pipestat[pipe] &= ~mask; I915_WRITE(reg, dev_priv->pipestat[pipe]); - (void) I915_READ(reg); + POSTING_READ(reg); } } @@ -321,7 +321,7 @@ static irqreturn_t ironlake_irq_handler(struct drm_device *dev) /* disable master interrupt before clearing iir */ de_ier = I915_READ(DEIER); I915_WRITE(DEIER, de_ier & ~DE_MASTER_IRQ_CONTROL); - (void)I915_READ(DEIER); + POSTING_READ(DEIER); de_iir = I915_READ(DEIIR); gt_iir = I915_READ(GTIIR); @@ -386,7 +386,7 @@ static irqreturn_t ironlake_irq_handler(struct drm_device *dev) done: I915_WRITE(DEIER, de_ier); - (void)I915_READ(DEIER); + POSTING_READ(DEIER); return ret; } @@ -796,7 +796,7 @@ static void i915_report_and_clear_eir(struct drm_device *dev) printk(KERN_ERR " ACTHD: 0x%08x\n", I915_READ(ACTHD_I965)); I915_WRITE(IPEIR_I965, ipeir); - (void)I915_READ(IPEIR_I965); + POSTING_READ(IPEIR_I965); } if (eir & GM45_ERROR_PAGE_TABLE) { u32 pgtbl_err = I915_READ(PGTBL_ER); @@ -804,7 +804,7 @@ static void i915_report_and_clear_eir(struct drm_device *dev) printk(KERN_ERR " PGTBL_ER: 0x%08x\n", pgtbl_err); I915_WRITE(PGTBL_ER, pgtbl_err); - (void)I915_READ(PGTBL_ER); + POSTING_READ(PGTBL_ER); } } @@ -815,7 +815,7 @@ static void i915_report_and_clear_eir(struct drm_device *dev) printk(KERN_ERR " PGTBL_ER: 0x%08x\n", pgtbl_err); I915_WRITE(PGTBL_ER, pgtbl_err); - (void)I915_READ(PGTBL_ER); + POSTING_READ(PGTBL_ER); } } @@ -846,7 +846,7 @@ static void i915_report_and_clear_eir(struct drm_device *dev) printk(KERN_ERR " ACTHD: 0x%08x\n", I915_READ(ACTHD)); I915_WRITE(IPEIR, ipeir); - (void)I915_READ(IPEIR); + POSTING_READ(IPEIR); } else { u32 ipeir = I915_READ(IPEIR_I965); @@ -863,12 +863,12 @@ static void i915_report_and_clear_eir(struct drm_device *dev) printk(KERN_ERR " ACTHD: 0x%08x\n", I915_READ(ACTHD_I965)); I915_WRITE(IPEIR_I965, ipeir); - (void)I915_READ(IPEIR_I965); + POSTING_READ(IPEIR_I965); } } I915_WRITE(EIR, eir); - (void)I915_READ(EIR); + POSTING_READ(EIR); eir = I915_READ(EIR); if (eir) { /* @@ -1435,17 +1435,17 @@ static void ironlake_irq_preinstall(struct drm_device *dev) I915_WRITE(DEIMR, 0xffffffff); I915_WRITE(DEIER, 0x0); - (void) I915_READ(DEIER); + POSTING_READ(DEIER); /* and GT */ I915_WRITE(GTIMR, 0xffffffff); I915_WRITE(GTIER, 0x0); - (void) I915_READ(GTIER); + POSTING_READ(GTIER); /* south display irq */ I915_WRITE(SDEIMR, 0xffffffff); I915_WRITE(SDEIER, 0x0); - (void) I915_READ(SDEIER); + POSTING_READ(SDEIER); } static int ironlake_irq_postinstall(struct drm_device *dev) @@ -1464,7 +1464,7 @@ static int ironlake_irq_postinstall(struct drm_device *dev) I915_WRITE(DEIIR, I915_READ(DEIIR)); I915_WRITE(DEIMR, dev_priv->irq_mask_reg); I915_WRITE(DEIER, dev_priv->de_irq_enable_reg); - (void) I915_READ(DEIER); + POSTING_READ(DEIER); if (IS_GEN6(dev)) { render_mask = @@ -1485,7 +1485,7 @@ static int ironlake_irq_postinstall(struct drm_device *dev) } I915_WRITE(GTIER, dev_priv->gt_irq_enable_reg); - (void) I915_READ(GTIER); + POSTING_READ(GTIER); if (HAS_PCH_CPT(dev)) { hotplug_mask = SDE_CRT_HOTPLUG_CPT | SDE_PORTB_HOTPLUG_CPT | @@ -1501,7 +1501,7 @@ static int ironlake_irq_postinstall(struct drm_device *dev) I915_WRITE(SDEIIR, I915_READ(SDEIIR)); I915_WRITE(SDEIMR, dev_priv->pch_irq_mask_reg); I915_WRITE(SDEIER, dev_priv->pch_irq_enable_reg); - (void) I915_READ(SDEIER); + POSTING_READ(SDEIER); if (IS_IRONLAKE_M(dev)) { /* Clear & enable PCU event interrupts */ @@ -1537,7 +1537,7 @@ void i915_driver_irq_preinstall(struct drm_device * dev) I915_WRITE(PIPEBSTAT, 0); I915_WRITE(IMR, 0xffffffff); I915_WRITE(IER, 0x0); - (void) I915_READ(IER); + POSTING_READ(IER); } /* @@ -1591,7 +1591,7 @@ int i915_driver_irq_postinstall(struct drm_device *dev) I915_WRITE(IMR, dev_priv->irq_mask_reg); I915_WRITE(IER, enable_mask); - (void) I915_READ(IER); + POSTING_READ(IER); if (I915_HAS_HOTPLUG(dev)) { u32 hotplug_en = I915_READ(PORT_HOTPLUG_EN); -- cgit v1.2.3 From 8f2535d9d9c8d87bfbbddab3a5b27abe48213ad2 Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Tue, 16 Nov 2010 10:58:37 +0000 Subject: drm/i915/crt: Introduce struct intel_crt We will use this structure in future patches to store CRT specific information on the encoder. Split out and tweaked from a patch by Keith Packard. Signed-off-by: Keith Packard Signed-off-by: Chris Wilson --- drivers/gpu/drm/i915/intel_crt.c | 64 +++++++++++++++++++++++----------------- 1 file changed, 37 insertions(+), 27 deletions(-) diff --git a/drivers/gpu/drm/i915/intel_crt.c b/drivers/gpu/drm/i915/intel_crt.c index c55c77043357..e38bc6769180 100644 --- a/drivers/gpu/drm/i915/intel_crt.c +++ b/drivers/gpu/drm/i915/intel_crt.c @@ -34,6 +34,16 @@ #include "i915_drm.h" #include "i915_drv.h" +struct intel_crt { + struct intel_encoder base; +}; + +static struct intel_crt *intel_attached_crt(struct drm_connector *connector) +{ + return container_of(intel_attached_encoder(connector), + struct intel_crt, base); +} + static void intel_crt_dpms(struct drm_encoder *encoder, int mode) { struct drm_device *dev = encoder->dev; @@ -277,13 +287,12 @@ static bool intel_crt_ddc_probe(struct drm_i915_private *dev_priv, int ddc_bus) return i2c_transfer(&dev_priv->gmbus[ddc_bus].adapter, msgs, 1) == 1; } -static bool intel_crt_detect_ddc(struct drm_encoder *encoder) +static bool intel_crt_detect_ddc(struct intel_crt *crt) { - struct intel_encoder *intel_encoder = to_intel_encoder(encoder); - struct drm_i915_private *dev_priv = encoder->dev->dev_private; + struct drm_i915_private *dev_priv = crt->base.base.dev->dev_private; /* CRT should always be at 0, but check anyway */ - if (intel_encoder->type != INTEL_OUTPUT_ANALOG) + if (crt->base.type != INTEL_OUTPUT_ANALOG) return false; if (intel_crt_ddc_probe(dev_priv, dev_priv->crt_ddc_pin)) { @@ -291,7 +300,7 @@ static bool intel_crt_detect_ddc(struct drm_encoder *encoder) return true; } - if (intel_ddc_probe(intel_encoder, dev_priv->crt_ddc_pin)) { + if (intel_ddc_probe(&crt->base, dev_priv->crt_ddc_pin)) { DRM_DEBUG_KMS("CRT detected via DDC:0x50 [EDID]\n"); return true; } @@ -300,9 +309,9 @@ static bool intel_crt_detect_ddc(struct drm_encoder *encoder) } static enum drm_connector_status -intel_crt_load_detect(struct drm_crtc *crtc, struct intel_encoder *intel_encoder) +intel_crt_load_detect(struct drm_crtc *crtc, struct intel_crt *crt) { - struct drm_encoder *encoder = &intel_encoder->base; + struct drm_encoder *encoder = &crt->base.base; struct drm_device *dev = encoder->dev; struct drm_i915_private *dev_priv = dev->dev_private; struct intel_crtc *intel_crtc = to_intel_crtc(crtc); @@ -434,7 +443,7 @@ static enum drm_connector_status intel_crt_detect(struct drm_connector *connector, bool force) { struct drm_device *dev = connector->dev; - struct intel_encoder *encoder = intel_attached_encoder(connector); + struct intel_crt *crt = intel_attached_crt(connector); struct drm_crtc *crtc; int dpms_mode; enum drm_connector_status status; @@ -447,24 +456,25 @@ intel_crt_detect(struct drm_connector *connector, bool force) return connector_status_disconnected; } - if (intel_crt_detect_ddc(&encoder->base)) + if (intel_crt_detect_ddc(crt)) return connector_status_connected; if (!force) return connector->status; /* for pre-945g platforms use load detect */ - if (encoder->base.crtc && encoder->base.crtc->enabled) { - status = intel_crt_load_detect(encoder->base.crtc, encoder); + crtc = crt->base.base.crtc; + if (crtc && crtc->enabled) { + status = intel_crt_load_detect(crtc, crt); } else { - crtc = intel_get_load_detect_pipe(encoder, connector, + crtc = intel_get_load_detect_pipe(&crt->base, connector, NULL, &dpms_mode); if (crtc) { - if (intel_crt_detect_ddc(&encoder->base)) + if (intel_crt_detect_ddc(crt)) status = connector_status_connected; else - status = intel_crt_load_detect(crtc, encoder); - intel_release_load_detect_pipe(encoder, + status = intel_crt_load_detect(crtc, crt); + intel_release_load_detect_pipe(&crt->base, connector, dpms_mode); } else status = connector_status_unknown; @@ -536,17 +546,17 @@ static const struct drm_encoder_funcs intel_crt_enc_funcs = { void intel_crt_init(struct drm_device *dev) { struct drm_connector *connector; - struct intel_encoder *intel_encoder; + struct intel_crt *crt; struct intel_connector *intel_connector; struct drm_i915_private *dev_priv = dev->dev_private; - intel_encoder = kzalloc(sizeof(struct intel_encoder), GFP_KERNEL); - if (!intel_encoder) + crt = kzalloc(sizeof(struct intel_crt), GFP_KERNEL); + if (!crt) return; intel_connector = kzalloc(sizeof(struct intel_connector), GFP_KERNEL); if (!intel_connector) { - kfree(intel_encoder); + kfree(crt); return; } @@ -554,20 +564,20 @@ void intel_crt_init(struct drm_device *dev) drm_connector_init(dev, &intel_connector->base, &intel_crt_connector_funcs, DRM_MODE_CONNECTOR_VGA); - drm_encoder_init(dev, &intel_encoder->base, &intel_crt_enc_funcs, + drm_encoder_init(dev, &crt->base.base, &intel_crt_enc_funcs, DRM_MODE_ENCODER_DAC); - intel_connector_attach_encoder(intel_connector, intel_encoder); + intel_connector_attach_encoder(intel_connector, &crt->base); - intel_encoder->type = INTEL_OUTPUT_ANALOG; - intel_encoder->clone_mask = (1 << INTEL_SDVO_NON_TV_CLONE_BIT) | - (1 << INTEL_ANALOG_CLONE_BIT) | - (1 << INTEL_SDVO_LVDS_CLONE_BIT); - intel_encoder->crtc_mask = (1 << 0) | (1 << 1); + crt->base.type = INTEL_OUTPUT_ANALOG; + crt->base.clone_mask = (1 << INTEL_SDVO_NON_TV_CLONE_BIT | + 1 << INTEL_ANALOG_CLONE_BIT | + 1 << INTEL_SDVO_LVDS_CLONE_BIT); + crt->base.crtc_mask = (1 << 0) | (1 << 1); connector->interlace_allowed = 1; connector->doublescan_allowed = 0; - drm_encoder_helper_add(&intel_encoder->base, &intel_crt_helper_funcs); + drm_encoder_helper_add(&crt->base.base, &intel_crt_helper_funcs); drm_connector_helper_add(connector, &intel_crt_connector_helper_funcs); drm_sysfs_connector_add(connector); -- cgit v1.2.3 From 8b5abbe0683f285e1311c4cc29c79da07d18d0af Mon Sep 17 00:00:00 2001 From: Keith Packard Date: Tue, 16 Nov 2010 16:03:53 +0800 Subject: drm/i915: Take advantage of auto-polling CRT hotplug detection on PCH hardware Both IBX and CPT have an automatic hotplug detection mode which appears to work reliably enough that we can dispense with the manual force hotplug trigger stuff. This means that hotplug detection is as simple as reading the current hotplug register values. The first time the hotplug detection is activated, the code synchronously waits for a hotplug sequence in case the hardware hasn't bothered to do a detection cycle since being initialized. Signed-off-by: Keith Packard Signed-off-by: Chris Wilson --- drivers/gpu/drm/i915/intel_crt.c | 95 ++++++++++++++++++++++++---------------- 1 file changed, 57 insertions(+), 38 deletions(-) diff --git a/drivers/gpu/drm/i915/intel_crt.c b/drivers/gpu/drm/i915/intel_crt.c index e38bc6769180..8df574316063 100644 --- a/drivers/gpu/drm/i915/intel_crt.c +++ b/drivers/gpu/drm/i915/intel_crt.c @@ -34,8 +34,17 @@ #include "i915_drm.h" #include "i915_drv.h" +/* Here's the desired hotplug mode */ +#define ADPA_HOTPLUG_BITS (ADPA_CRT_HOTPLUG_PERIOD_128 | \ + ADPA_CRT_HOTPLUG_WARMUP_10MS | \ + ADPA_CRT_HOTPLUG_SAMPLE_4S | \ + ADPA_CRT_HOTPLUG_VOLTAGE_50 | \ + ADPA_CRT_HOTPLUG_VOLREF_325MV | \ + ADPA_CRT_HOTPLUG_ENABLE) + struct intel_crt { struct intel_encoder base; + bool force_hotplug_required; }; static struct intel_crt *intel_attached_crt(struct drm_connector *connector) @@ -139,7 +148,7 @@ static void intel_crt_mode_set(struct drm_encoder *encoder, dpll_md & ~DPLL_MD_UDI_MULTIPLIER_MASK); } - adpa = 0; + adpa = ADPA_HOTPLUG_BITS; if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC) adpa |= ADPA_HSYNC_ACTIVE_HIGH; if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC) @@ -167,53 +176,44 @@ static void intel_crt_mode_set(struct drm_encoder *encoder, static bool intel_ironlake_crt_detect_hotplug(struct drm_connector *connector) { struct drm_device *dev = connector->dev; + struct intel_crt *crt = intel_attached_crt(connector); struct drm_i915_private *dev_priv = dev->dev_private; - u32 adpa, temp; + u32 adpa; bool ret; - bool turn_off_dac = false; - temp = adpa = I915_READ(PCH_ADPA); + /* The first time through, trigger an explicit detection cycle */ + if (crt->force_hotplug_required) { + bool turn_off_dac = HAS_PCH_SPLIT(dev); + u32 save_adpa; - if (HAS_PCH_SPLIT(dev)) - turn_off_dac = true; - - adpa &= ~ADPA_CRT_HOTPLUG_MASK; - if (turn_off_dac) - adpa &= ~ADPA_DAC_ENABLE; - - /* disable HPD first */ - I915_WRITE(PCH_ADPA, adpa); - (void)I915_READ(PCH_ADPA); - - adpa |= (ADPA_CRT_HOTPLUG_PERIOD_128 | - ADPA_CRT_HOTPLUG_WARMUP_10MS | - ADPA_CRT_HOTPLUG_SAMPLE_4S | - ADPA_CRT_HOTPLUG_VOLTAGE_50 | /* default */ - ADPA_CRT_HOTPLUG_VOLREF_325MV | - ADPA_CRT_HOTPLUG_ENABLE | - ADPA_CRT_HOTPLUG_FORCE_TRIGGER); - - DRM_DEBUG_KMS("pch crt adpa 0x%x", adpa); - I915_WRITE(PCH_ADPA, adpa); - - if (wait_for((I915_READ(PCH_ADPA) & ADPA_CRT_HOTPLUG_FORCE_TRIGGER) == 0, - 1000)) - DRM_DEBUG_KMS("timed out waiting for FORCE_TRIGGER"); - - if (turn_off_dac) { - /* Make sure hotplug is enabled */ - I915_WRITE(PCH_ADPA, temp | ADPA_CRT_HOTPLUG_ENABLE); - (void)I915_READ(PCH_ADPA); + crt->force_hotplug_required = 0; + + save_adpa = adpa = I915_READ(PCH_ADPA); + DRM_DEBUG_KMS("trigger hotplug detect cycle: adpa=0x%x\n", adpa); + + adpa |= ADPA_CRT_HOTPLUG_FORCE_TRIGGER; + if (turn_off_dac) + adpa &= ~ADPA_DAC_ENABLE; + + I915_WRITE(PCH_ADPA, adpa); + + if (wait_for((I915_READ(PCH_ADPA) & ADPA_CRT_HOTPLUG_FORCE_TRIGGER) == 0, + 1000)) + DRM_DEBUG_KMS("timed out waiting for FORCE_TRIGGER"); + + if (turn_off_dac) { + I915_WRITE(PCH_ADPA, save_adpa); + POSTING_READ(PCH_ADPA); + } } /* Check the status to see if both blue and green are on now */ adpa = I915_READ(PCH_ADPA); - adpa &= ADPA_CRT_HOTPLUG_MONITOR_MASK; - if ((adpa == ADPA_CRT_HOTPLUG_MONITOR_COLOR) || - (adpa == ADPA_CRT_HOTPLUG_MONITOR_MONO)) + if ((adpa & ADPA_CRT_HOTPLUG_MONITOR_MASK) != 0) ret = true; else ret = false; + DRM_DEBUG_KMS("ironlake hotplug adpa=0x%x, result %d\n", adpa, ret); return ret; } @@ -452,8 +452,10 @@ intel_crt_detect(struct drm_connector *connector, bool force) if (intel_crt_detect_hotplug(connector)) { DRM_DEBUG_KMS("CRT detected via hotplug\n"); return connector_status_connected; - } else + } else { + DRM_DEBUG_KMS("CRT not detected via hotplug\n"); return connector_status_disconnected; + } } if (intel_crt_detect_ddc(crt)) @@ -587,5 +589,22 @@ void intel_crt_init(struct drm_device *dev) else connector->polled = DRM_CONNECTOR_POLL_CONNECT; + /* + * Configure the automatic hotplug detection stuff + */ + crt->force_hotplug_required = 0; + if (HAS_PCH_SPLIT(dev)) { + u32 adpa; + + adpa = I915_READ(PCH_ADPA); + adpa &= ~ADPA_CRT_HOTPLUG_MASK; + adpa |= ADPA_HOTPLUG_BITS; + I915_WRITE(PCH_ADPA, adpa); + POSTING_READ(PCH_ADPA); + + DRM_DEBUG_KMS("pch crt adpa set to 0x%x\n", adpa); + crt->force_hotplug_required = 1; + } + dev_priv->hotplug_supported_mask |= CRT_HOTPLUG_INT_STATUS; } -- cgit v1.2.3 From 27641c3f003e7f3b6585c01d8a788883603eb262 Mon Sep 17 00:00:00 2001 From: Mario Kleiner Date: Sat, 23 Oct 2010 04:20:23 +0200 Subject: drm/vblank: Add support for precise vblank timestamping. The DRI2 swap & sync implementation needs precise vblank counts and precise timestamps corresponding to those vblank counts. For conformance to the OpenML OML_sync_control extension specification the DRM timestamp associated with a vblank count should correspond to the start of video scanout of the first scanline of the video frame following the vblank interval for that vblank count. Therefore we need to carry around precise timestamps for vblanks. Currently the DRM and KMS drivers generate timestamps ad-hoc via do_gettimeofday() in some places. The resulting timestamps are sometimes not very precise due to interrupt handling delays, they don't conform to OML_sync_control and some are wrong, as they aren't taken synchronized to the vblank. This patch implements support inside the drm core for precise and robust timestamping. It consists of the following interrelated pieces. 1. Vblank timestamp caching: A per-crtc ringbuffer stores the most recent vblank timestamps corresponding to vblank counts. The ringbuffer can be read out lock-free via the accessor function: struct timeval timestamp; vblankcount = drm_vblank_count_and_time(dev, crtcid, ×tamp). The function returns the current vblank count and the corresponding timestamp for start of video scanout following the vblank interval. It can be used anywhere between enclosing drm_vblank_get(dev, crtcid) and drm_vblank_put(dev,crtcid) statements. It is used inside the drmWaitVblank ioctl and in the vblank event queueing and handling. It should be used by kms drivers for timestamping of bufferswap completion. The timestamp ringbuffer is reinitialized each time vblank irq's get reenabled in drm_vblank_get()/ drm_update_vblank_count(). It is invalidated when vblank irq's get disabled. The ringbuffer is updated inside drm_handle_vblank() at each vblank irq. 2. Calculation of precise vblank timestamps: drm_get_last_vbltimestamp() is used to compute the timestamp for the end of the most recent vblank (if inside active scanout), or the expected end of the current vblank interval (if called inside a vblank interval). The function calls into a new optional kms driver entry point dev->driver->get_vblank_timestamp() which is supposed to provide the precise timestamp. If a kms driver doesn't implement the entry point or if the call fails, a simple do_gettimeofday() timestamp is returned as crude approximation of the true vblank time. A new drm module parameter drm.timestamp_precision_usec allows to disable high precision timestamps (if set to zero) or to specify the maximum acceptable error in the timestamps in microseconds. Kms drivers could implement their get_vblank_timestamp() function in a gpu specific way, as long as returned timestamps conform to OML_sync_control, e.g., by use of gpu specific hardware timestamps. Optionally, kms drivers can simply wrap and use the new utility function drm_calc_vbltimestamp_from_scanoutpos(). This function calls a new optional kms driver function dev->driver->get_scanout_position() which returns the current horizontal and vertical video scanout position of the crtc. The scanout position together with the drm_display_timing of the current video mode is used to calculate elapsed time relative to start of active scanout for the current video frame. This elapsed time is subtracted from the current do_gettimeofday() time to get the timestamp corresponding to start of video scanout. Currently non-interlaced, non-doublescan video modes, with or without panel scaling are handled correctly. Interlaced/ doublescan modes are tbd in a future patch. 3. Filtering of redundant vblank irq's and removal of some race-conditions in the vblank irq enable/disable path: Some gpu's (e.g., Radeon R500/R600) send spurious vblank irq's outside the vblank if vblank irq's get reenabled. These get detected by use of the vblank timestamps and filtered out to avoid miscounting of vblanks. Some race-conditions between the vblank irq enable/disable functions, the vblank irq handler and the gpu itself (updating its hardware vblank counter in the "wrong" moment) are fixed inside vblank_disable_and_save() and drm_update_vblank_count() by use of the vblank timestamps and a new spinlock dev->vblank_time_lock. The time until vblank irq disable is now configurable via a new drm module parameter drm.vblankoffdelay to allow experimentation with timeouts that are much shorter than the current 5 seconds and should allow longer vblank off periods for better power savings. Followup patches will use these new functions to implement precise timestamping for the intel and radeon kms drivers. Signed-off-by: Mario Kleiner Signed-off-by: Dave Airlie --- drivers/gpu/drm/drm_crtc_helper.c | 13 +- drivers/gpu/drm/drm_irq.c | 563 ++++++++++++++++++++++++++++++++++++-- drivers/gpu/drm/drm_stub.c | 10 + include/drm/drmP.h | 93 +++++++ include/drm/drm_crtc.h | 9 + 5 files changed, 662 insertions(+), 26 deletions(-) diff --git a/drivers/gpu/drm/drm_crtc_helper.c b/drivers/gpu/drm/drm_crtc_helper.c index f7af91cb273d..4c200931a6bc 100644 --- a/drivers/gpu/drm/drm_crtc_helper.c +++ b/drivers/gpu/drm/drm_crtc_helper.c @@ -336,7 +336,7 @@ bool drm_crtc_helper_set_mode(struct drm_crtc *crtc, struct drm_framebuffer *old_fb) { struct drm_device *dev = crtc->dev; - struct drm_display_mode *adjusted_mode, saved_mode; + struct drm_display_mode *adjusted_mode, saved_mode, saved_hwmode; struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private; struct drm_encoder_helper_funcs *encoder_funcs; int saved_x, saved_y; @@ -350,6 +350,7 @@ bool drm_crtc_helper_set_mode(struct drm_crtc *crtc, if (!crtc->enabled) return true; + saved_hwmode = crtc->hwmode; saved_mode = crtc->mode; saved_x = crtc->x; saved_y = crtc->y; @@ -427,11 +428,21 @@ bool drm_crtc_helper_set_mode(struct drm_crtc *crtc, } + /* Store real post-adjustment hardware mode. */ + crtc->hwmode = *adjusted_mode; + + /* Calculate and store various constants which + * are later needed by vblank and swap-completion + * timestamping. They are derived from true hwmode. + */ + drm_calc_timestamping_constants(crtc); + /* XXX free adjustedmode */ drm_mode_destroy(dev, adjusted_mode); /* FIXME: add subpixel order */ done: if (!ret) { + crtc->hwmode = saved_hwmode; crtc->mode = saved_mode; crtc->x = saved_x; crtc->y = saved_y; diff --git a/drivers/gpu/drm/drm_irq.c b/drivers/gpu/drm/drm_irq.c index 9d3a5030b6e1..4e82d0d3c378 100644 --- a/drivers/gpu/drm/drm_irq.c +++ b/drivers/gpu/drm/drm_irq.c @@ -40,6 +40,22 @@ #include #include + +/* Access macro for slots in vblank timestamp ringbuffer. */ +#define vblanktimestamp(dev, crtc, count) ( \ + (dev)->_vblank_time[(crtc) * DRM_VBLANKTIME_RBSIZE + \ + ((count) % DRM_VBLANKTIME_RBSIZE)]) + +/* Retry timestamp calculation up to 3 times to satisfy + * drm_timestamp_precision before giving up. + */ +#define DRM_TIMESTAMP_MAXRETRIES 3 + +/* Threshold in nanoseconds for detection of redundant + * vblank irq in drm_handle_vblank(). 1 msec should be ok. + */ +#define DRM_REDUNDANT_VBLIRQ_THRESH_NS 1000000 + /** * Get interrupt from bus id. * @@ -77,6 +93,87 @@ int drm_irq_by_busid(struct drm_device *dev, void *data, return 0; } +/* + * Clear vblank timestamp buffer for a crtc. + */ +static void clear_vblank_timestamps(struct drm_device *dev, int crtc) +{ + memset(&dev->_vblank_time[crtc * DRM_VBLANKTIME_RBSIZE], 0, + DRM_VBLANKTIME_RBSIZE * sizeof(struct timeval)); +} + +/* + * Disable vblank irq's on crtc, make sure that last vblank count + * of hardware and corresponding consistent software vblank counter + * are preserved, even if there are any spurious vblank irq's after + * disable. + */ +static void vblank_disable_and_save(struct drm_device *dev, int crtc) +{ + unsigned long irqflags; + u32 vblcount; + s64 diff_ns; + int vblrc; + struct timeval tvblank; + + /* Prevent vblank irq processing while disabling vblank irqs, + * so no updates of timestamps or count can happen after we've + * disabled. Needed to prevent races in case of delayed irq's. + * Disable preemption, so vblank_time_lock is held as short as + * possible, even under a kernel with PREEMPT_RT patches. + */ + preempt_disable(); + spin_lock_irqsave(&dev->vblank_time_lock, irqflags); + + dev->driver->disable_vblank(dev, crtc); + dev->vblank_enabled[crtc] = 0; + + /* No further vblank irq's will be processed after + * this point. Get current hardware vblank count and + * vblank timestamp, repeat until they are consistent. + * + * FIXME: There is still a race condition here and in + * drm_update_vblank_count() which can cause off-by-one + * reinitialization of software vblank counter. If gpu + * vblank counter doesn't increment exactly at the leading + * edge of a vblank interval, then we can lose 1 count if + * we happen to execute between start of vblank and the + * delayed gpu counter increment. + */ + do { + dev->last_vblank[crtc] = dev->driver->get_vblank_counter(dev, crtc); + vblrc = drm_get_last_vbltimestamp(dev, crtc, &tvblank, 0); + } while (dev->last_vblank[crtc] != dev->driver->get_vblank_counter(dev, crtc)); + + /* Compute time difference to stored timestamp of last vblank + * as updated by last invocation of drm_handle_vblank() in vblank irq. + */ + vblcount = atomic_read(&dev->_vblank_count[crtc]); + diff_ns = timeval_to_ns(&tvblank) - + timeval_to_ns(&vblanktimestamp(dev, crtc, vblcount)); + + /* If there is at least 1 msec difference between the last stored + * timestamp and tvblank, then we are currently executing our + * disable inside a new vblank interval, the tvblank timestamp + * corresponds to this new vblank interval and the irq handler + * for this vblank didn't run yet and won't run due to our disable. + * Therefore we need to do the job of drm_handle_vblank() and + * increment the vblank counter by one to account for this vblank. + * + * Skip this step if there isn't any high precision timestamp + * available. In that case we can't account for this and just + * hope for the best. + */ + if ((vblrc > 0) && (abs(diff_ns) > 1000000)) + atomic_inc(&dev->_vblank_count[crtc]); + + /* Invalidate all timestamps while vblank irq's are off. */ + clear_vblank_timestamps(dev, crtc); + + spin_unlock_irqrestore(&dev->vblank_time_lock, irqflags); + preempt_enable(); +} + static void vblank_disable_fn(unsigned long arg) { struct drm_device *dev = (struct drm_device *)arg; @@ -91,10 +188,7 @@ static void vblank_disable_fn(unsigned long arg) if (atomic_read(&dev->vblank_refcount[i]) == 0 && dev->vblank_enabled[i]) { DRM_DEBUG("disabling vblank on crtc %d\n", i); - dev->last_vblank[i] = - dev->driver->get_vblank_counter(dev, i); - dev->driver->disable_vblank(dev, i); - dev->vblank_enabled[i] = 0; + vblank_disable_and_save(dev, i); } spin_unlock_irqrestore(&dev->vbl_lock, irqflags); } @@ -117,6 +211,7 @@ void drm_vblank_cleanup(struct drm_device *dev) kfree(dev->last_vblank); kfree(dev->last_vblank_wait); kfree(dev->vblank_inmodeset); + kfree(dev->_vblank_time); dev->num_crtcs = 0; } @@ -129,6 +224,8 @@ int drm_vblank_init(struct drm_device *dev, int num_crtcs) setup_timer(&dev->vblank_disable_timer, vblank_disable_fn, (unsigned long)dev); spin_lock_init(&dev->vbl_lock); + spin_lock_init(&dev->vblank_time_lock); + dev->num_crtcs = num_crtcs; dev->vbl_queue = kmalloc(sizeof(wait_queue_head_t) * num_crtcs, @@ -161,6 +258,19 @@ int drm_vblank_init(struct drm_device *dev, int num_crtcs) if (!dev->vblank_inmodeset) goto err; + dev->_vblank_time = kcalloc(num_crtcs * DRM_VBLANKTIME_RBSIZE, + sizeof(struct timeval), GFP_KERNEL); + if (!dev->_vblank_time) + goto err; + + DRM_INFO("Supports vblank timestamp caching Rev 1 (10.10.2010).\n"); + + /* Driver specific high-precision vblank timestamping supported? */ + if (dev->driver->get_vblank_timestamp) + DRM_INFO("Driver supports precise vblank timestamp query.\n"); + else + DRM_INFO("No driver support for vblank timestamp query.\n"); + /* Zero per-crtc vblank stuff */ for (i = 0; i < num_crtcs; i++) { init_waitqueue_head(&dev->vbl_queue[i]); @@ -279,7 +389,7 @@ EXPORT_SYMBOL(drm_irq_install); * * Calls the driver's \c drm_driver_irq_uninstall() function, and stops the irq. */ -int drm_irq_uninstall(struct drm_device * dev) +int drm_irq_uninstall(struct drm_device *dev) { unsigned long irqflags; int irq_enabled, i; @@ -335,7 +445,9 @@ int drm_control(struct drm_device *dev, void *data, { struct drm_control *ctl = data; - /* if we haven't irq we fallback for compatibility reasons - this used to be a separate function in drm_dma.h */ + /* if we haven't irq we fallback for compatibility reasons - + * this used to be a separate function in drm_dma.h + */ switch (ctl->func) { @@ -359,6 +471,287 @@ int drm_control(struct drm_device *dev, void *data, } } +/** + * drm_calc_timestamping_constants - Calculate and + * store various constants which are later needed by + * vblank and swap-completion timestamping, e.g, by + * drm_calc_vbltimestamp_from_scanoutpos(). + * They are derived from crtc's true scanout timing, + * so they take things like panel scaling or other + * adjustments into account. + * + * @crtc drm_crtc whose timestamp constants should be updated. + * + */ +void drm_calc_timestamping_constants(struct drm_crtc *crtc) +{ + s64 linedur_ns = 0, pixeldur_ns = 0, framedur_ns = 0; + u64 dotclock; + + /* Dot clock in Hz: */ + dotclock = (u64) crtc->hwmode.clock * 1000; + + /* Valid dotclock? */ + if (dotclock > 0) { + /* Convert scanline length in pixels and video dot clock to + * line duration, frame duration and pixel duration in + * nanoseconds: + */ + pixeldur_ns = (s64) div64_u64(1000000000, dotclock); + linedur_ns = (s64) div64_u64(((u64) crtc->hwmode.crtc_htotal * + 1000000000), dotclock); + framedur_ns = (s64) crtc->hwmode.crtc_vtotal * linedur_ns; + } else + DRM_ERROR("crtc %d: Can't calculate constants, dotclock = 0!\n", + crtc->base.id); + + crtc->pixeldur_ns = pixeldur_ns; + crtc->linedur_ns = linedur_ns; + crtc->framedur_ns = framedur_ns; + + DRM_DEBUG("crtc %d: hwmode: htotal %d, vtotal %d, vdisplay %d\n", + crtc->base.id, crtc->hwmode.crtc_htotal, + crtc->hwmode.crtc_vtotal, crtc->hwmode.crtc_vdisplay); + DRM_DEBUG("crtc %d: clock %d kHz framedur %d linedur %d, pixeldur %d\n", + crtc->base.id, (int) dotclock/1000, (int) framedur_ns, + (int) linedur_ns, (int) pixeldur_ns); +} +EXPORT_SYMBOL(drm_calc_timestamping_constants); + +/** + * drm_calc_vbltimestamp_from_scanoutpos - helper routine for kms + * drivers. Implements calculation of exact vblank timestamps from + * given drm_display_mode timings and current video scanout position + * of a crtc. This can be called from within get_vblank_timestamp() + * implementation of a kms driver to implement the actual timestamping. + * + * Should return timestamps conforming to the OML_sync_control OpenML + * extension specification. The timestamp corresponds to the end of + * the vblank interval, aka start of scanout of topmost-leftmost display + * pixel in the following video frame. + * + * Requires support for optional dev->driver->get_scanout_position() + * in kms driver, plus a bit of setup code to provide a drm_display_mode + * that corresponds to the true scanout timing. + * + * The current implementation only handles standard video modes. It + * returns as no operation if a doublescan or interlaced video mode is + * active. Higher level code is expected to handle this. + * + * @dev: DRM device. + * @crtc: Which crtc's vblank timestamp to retrieve. + * @max_error: Desired maximum allowable error in timestamps (nanosecs). + * On return contains true maximum error of timestamp. + * @vblank_time: Pointer to struct timeval which should receive the timestamp. + * @flags: Flags to pass to driver: + * 0 = Default. + * DRM_CALLED_FROM_VBLIRQ = If function is called from vbl irq handler. + * @refcrtc: drm_crtc* of crtc which defines scanout timing. + * + * Returns negative value on error, failure or if not supported in current + * video mode: + * + * -EINVAL - Invalid crtc. + * -EAGAIN - Temporary unavailable, e.g., called before initial modeset. + * -ENOTSUPP - Function not supported in current display mode. + * -EIO - Failed, e.g., due to failed scanout position query. + * + * Returns or'ed positive status flags on success: + * + * DRM_VBLANKTIME_SCANOUTPOS_METHOD - Signal this method used for timestamping. + * DRM_VBLANKTIME_INVBL - Timestamp taken while scanout was in vblank interval. + * + */ +int drm_calc_vbltimestamp_from_scanoutpos(struct drm_device *dev, int crtc, + int *max_error, + struct timeval *vblank_time, + unsigned flags, + struct drm_crtc *refcrtc) +{ + struct timeval stime, raw_time; + struct drm_display_mode *mode; + int vbl_status, vtotal, vdisplay; + int vpos, hpos, i; + s64 framedur_ns, linedur_ns, pixeldur_ns, delta_ns, duration_ns; + bool invbl; + + if (crtc < 0 || crtc >= dev->num_crtcs) { + DRM_ERROR("Invalid crtc %d\n", crtc); + return -EINVAL; + } + + /* Scanout position query not supported? Should not happen. */ + if (!dev->driver->get_scanout_position) { + DRM_ERROR("Called from driver w/o get_scanout_position()!?\n"); + return -EIO; + } + + mode = &refcrtc->hwmode; + vtotal = mode->crtc_vtotal; + vdisplay = mode->crtc_vdisplay; + + /* Durations of frames, lines, pixels in nanoseconds. */ + framedur_ns = refcrtc->framedur_ns; + linedur_ns = refcrtc->linedur_ns; + pixeldur_ns = refcrtc->pixeldur_ns; + + /* If mode timing undefined, just return as no-op: + * Happens during initial modesetting of a crtc. + */ + if (vtotal <= 0 || vdisplay <= 0 || framedur_ns == 0) { + DRM_DEBUG("crtc %d: Noop due to uninitialized mode.\n", crtc); + return -EAGAIN; + } + + /* Don't know yet how to handle interlaced or + * double scan modes. Just no-op for now. + */ + if (mode->flags & (DRM_MODE_FLAG_INTERLACE | DRM_MODE_FLAG_DBLSCAN)) { + DRM_DEBUG("crtc %d: Noop due to unsupported mode.\n", crtc); + return -ENOTSUPP; + } + + /* Get current scanout position with system timestamp. + * Repeat query up to DRM_TIMESTAMP_MAXRETRIES times + * if single query takes longer than max_error nanoseconds. + * + * This guarantees a tight bound on maximum error if + * code gets preempted or delayed for some reason. + */ + for (i = 0; i < DRM_TIMESTAMP_MAXRETRIES; i++) { + /* Disable preemption to make it very likely to + * succeed in the first iteration even on PREEMPT_RT kernel. + */ + preempt_disable(); + + /* Get system timestamp before query. */ + do_gettimeofday(&stime); + + /* Get vertical and horizontal scanout pos. vpos, hpos. */ + vbl_status = dev->driver->get_scanout_position(dev, crtc, &vpos, &hpos); + + /* Get system timestamp after query. */ + do_gettimeofday(&raw_time); + + preempt_enable(); + + /* Return as no-op if scanout query unsupported or failed. */ + if (!(vbl_status & DRM_SCANOUTPOS_VALID)) { + DRM_DEBUG("crtc %d : scanoutpos query failed [%d].\n", + crtc, vbl_status); + return -EIO; + } + + duration_ns = timeval_to_ns(&raw_time) - timeval_to_ns(&stime); + + /* Accept result with < max_error nsecs timing uncertainty. */ + if (duration_ns <= (s64) *max_error) + break; + } + + /* Noisy system timing? */ + if (i == DRM_TIMESTAMP_MAXRETRIES) { + DRM_DEBUG("crtc %d: Noisy timestamp %d us > %d us [%d reps].\n", + crtc, (int) duration_ns/1000, *max_error/1000, i); + } + + /* Return upper bound of timestamp precision error. */ + *max_error = (int) duration_ns; + + /* Check if in vblank area: + * vpos is >=0 in video scanout area, but negative + * within vblank area, counting down the number of lines until + * start of scanout. + */ + invbl = vbl_status & DRM_SCANOUTPOS_INVBL; + + /* Convert scanout position into elapsed time at raw_time query + * since start of scanout at first display scanline. delta_ns + * can be negative if start of scanout hasn't happened yet. + */ + delta_ns = (s64) vpos * linedur_ns + (s64) hpos * pixeldur_ns; + + /* Is vpos outside nominal vblank area, but less than + * 1/100 of a frame height away from start of vblank? + * If so, assume this isn't a massively delayed vblank + * interrupt, but a vblank interrupt that fired a few + * microseconds before true start of vblank. Compensate + * by adding a full frame duration to the final timestamp. + * Happens, e.g., on ATI R500, R600. + * + * We only do this if DRM_CALLED_FROM_VBLIRQ. + */ + if ((flags & DRM_CALLED_FROM_VBLIRQ) && !invbl && + ((vdisplay - vpos) < vtotal / 100)) { + delta_ns = delta_ns - framedur_ns; + + /* Signal this correction as "applied". */ + vbl_status |= 0x8; + } + + /* Subtract time delta from raw timestamp to get final + * vblank_time timestamp for end of vblank. + */ + *vblank_time = ns_to_timeval(timeval_to_ns(&raw_time) - delta_ns); + + DRM_DEBUG("crtc %d : v %d p(%d,%d)@ %d.%d -> %d.%d [e %d us, %d rep]\n", + crtc, (int) vbl_status, hpos, vpos, raw_time.tv_sec, + raw_time.tv_usec, vblank_time->tv_sec, vblank_time->tv_usec, + (int) duration_ns/1000, i); + + vbl_status = DRM_VBLANKTIME_SCANOUTPOS_METHOD; + if (invbl) + vbl_status |= DRM_VBLANKTIME_INVBL; + + return vbl_status; +} +EXPORT_SYMBOL(drm_calc_vbltimestamp_from_scanoutpos); + +/** + * drm_get_last_vbltimestamp - retrieve raw timestamp for the most recent + * vblank interval. + * + * @dev: DRM device + * @crtc: which crtc's vblank timestamp to retrieve + * @tvblank: Pointer to target struct timeval which should receive the timestamp + * @flags: Flags to pass to driver: + * 0 = Default. + * DRM_CALLED_FROM_VBLIRQ = If function is called from vbl irq handler. + * + * Fetches the system timestamp corresponding to the time of the most recent + * vblank interval on specified crtc. May call into kms-driver to + * compute the timestamp with a high-precision GPU specific method. + * + * Returns zero if timestamp originates from uncorrected do_gettimeofday() + * call, i.e., it isn't very precisely locked to the true vblank. + * + * Returns non-zero if timestamp is considered to be very precise. + */ +u32 drm_get_last_vbltimestamp(struct drm_device *dev, int crtc, + struct timeval *tvblank, unsigned flags) +{ + int ret = 0; + + /* Define requested maximum error on timestamps (nanoseconds). */ + int max_error = (int) drm_timestamp_precision * 1000; + + /* Query driver if possible and precision timestamping enabled. */ + if (dev->driver->get_vblank_timestamp && (max_error > 0)) { + ret = dev->driver->get_vblank_timestamp(dev, crtc, &max_error, + tvblank, flags); + if (ret > 0) + return (u32) ret; + } + + /* GPU high precision timestamp query unsupported or failed. + * Return gettimeofday timestamp as best estimate. + */ + do_gettimeofday(tvblank); + + return 0; +} +EXPORT_SYMBOL(drm_get_last_vbltimestamp); + /** * drm_vblank_count - retrieve "cooked" vblank counter value * @dev: DRM device @@ -374,6 +767,40 @@ u32 drm_vblank_count(struct drm_device *dev, int crtc) } EXPORT_SYMBOL(drm_vblank_count); +/** + * drm_vblank_count_and_time - retrieve "cooked" vblank counter value + * and the system timestamp corresponding to that vblank counter value. + * + * @dev: DRM device + * @crtc: which counter to retrieve + * @vblanktime: Pointer to struct timeval to receive the vblank timestamp. + * + * Fetches the "cooked" vblank count value that represents the number of + * vblank events since the system was booted, including lost events due to + * modesetting activity. Returns corresponding system timestamp of the time + * of the vblank interval that corresponds to the current value vblank counter + * value. + */ +u32 drm_vblank_count_and_time(struct drm_device *dev, int crtc, + struct timeval *vblanktime) +{ + u32 cur_vblank; + + /* Read timestamp from slot of _vblank_time ringbuffer + * that corresponds to current vblank count. Retry if + * count has incremented during readout. This works like + * a seqlock. + */ + do { + cur_vblank = atomic_read(&dev->_vblank_count[crtc]); + *vblanktime = vblanktimestamp(dev, crtc, cur_vblank); + smp_rmb(); + } while (cur_vblank != atomic_read(&dev->_vblank_count[crtc])); + + return cur_vblank; +} +EXPORT_SYMBOL(drm_vblank_count_and_time); + /** * drm_update_vblank_count - update the master vblank counter * @dev: DRM device @@ -392,7 +819,8 @@ EXPORT_SYMBOL(drm_vblank_count); */ static void drm_update_vblank_count(struct drm_device *dev, int crtc) { - u32 cur_vblank, diff; + u32 cur_vblank, diff, tslot, rc; + struct timeval t_vblank; /* * Interrupts were disabled prior to this call, so deal with counter @@ -400,8 +828,18 @@ static void drm_update_vblank_count(struct drm_device *dev, int crtc) * NOTE! It's possible we lost a full dev->max_vblank_count events * here if the register is small or we had vblank interrupts off for * a long time. + * + * We repeat the hardware vblank counter & timestamp query until + * we get consistent results. This to prevent races between gpu + * updating its hardware counter while we are retrieving the + * corresponding vblank timestamp. */ - cur_vblank = dev->driver->get_vblank_counter(dev, crtc); + do { + cur_vblank = dev->driver->get_vblank_counter(dev, crtc); + rc = drm_get_last_vbltimestamp(dev, crtc, &t_vblank, 0); + } while (cur_vblank != dev->driver->get_vblank_counter(dev, crtc)); + + /* Deal with counter wrap */ diff = cur_vblank - dev->last_vblank[crtc]; if (cur_vblank < dev->last_vblank[crtc]) { diff += dev->max_vblank_count; @@ -413,6 +851,16 @@ static void drm_update_vblank_count(struct drm_device *dev, int crtc) DRM_DEBUG("enabling vblank interrupts on crtc %d, missed %d\n", crtc, diff); + /* Reinitialize corresponding vblank timestamp if high-precision query + * available. Skip this step if query unsupported or failed. Will + * reinitialize delayed at next vblank interrupt in that case. + */ + if (rc) { + tslot = atomic_read(&dev->_vblank_count[crtc]) + diff; + vblanktimestamp(dev, crtc, tslot) = t_vblank; + smp_wmb(); + } + atomic_add(diff, &dev->_vblank_count[crtc]); } @@ -429,15 +877,27 @@ static void drm_update_vblank_count(struct drm_device *dev, int crtc) */ int drm_vblank_get(struct drm_device *dev, int crtc) { - unsigned long irqflags; + unsigned long irqflags, irqflags2; int ret = 0; spin_lock_irqsave(&dev->vbl_lock, irqflags); /* Going from 0->1 means we have to enable interrupts again */ if (atomic_add_return(1, &dev->vblank_refcount[crtc]) == 1) { + /* Disable preemption while holding vblank_time_lock. Do + * it explicitely to guard against PREEMPT_RT kernel. + */ + preempt_disable(); + spin_lock_irqsave(&dev->vblank_time_lock, irqflags2); if (!dev->vblank_enabled[crtc]) { + /* Enable vblank irqs under vblank_time_lock protection. + * All vblank count & timestamp updates are held off + * until we are done reinitializing master counter and + * timestamps. Filtercode in drm_handle_vblank() will + * prevent double-accounting of same vblank interval. + */ ret = dev->driver->enable_vblank(dev, crtc); - DRM_DEBUG("enabling vblank on crtc %d, ret: %d\n", crtc, ret); + DRM_DEBUG("enabling vblank on crtc %d, ret: %d\n", + crtc, ret); if (ret) atomic_dec(&dev->vblank_refcount[crtc]); else { @@ -445,6 +905,8 @@ int drm_vblank_get(struct drm_device *dev, int crtc) drm_update_vblank_count(dev, crtc); } } + spin_unlock_irqrestore(&dev->vblank_time_lock, irqflags2); + preempt_enable(); } else { if (!dev->vblank_enabled[crtc]) { atomic_dec(&dev->vblank_refcount[crtc]); @@ -463,15 +925,17 @@ EXPORT_SYMBOL(drm_vblank_get); * @crtc: which counter to give up * * Release ownership of a given vblank counter, turning off interrupts - * if possible. + * if possible. Disable interrupts after drm_vblank_offdelay milliseconds. */ void drm_vblank_put(struct drm_device *dev, int crtc) { - BUG_ON (atomic_read (&dev->vblank_refcount[crtc]) == 0); + BUG_ON(atomic_read(&dev->vblank_refcount[crtc]) == 0); /* Last user schedules interrupt disable */ - if (atomic_dec_and_test(&dev->vblank_refcount[crtc])) - mod_timer(&dev->vblank_disable_timer, jiffies + 5*DRM_HZ); + if (atomic_dec_and_test(&dev->vblank_refcount[crtc]) && + (drm_vblank_offdelay > 0)) + mod_timer(&dev->vblank_disable_timer, + jiffies + ((drm_vblank_offdelay * DRM_HZ)/1000)); } EXPORT_SYMBOL(drm_vblank_put); @@ -480,10 +944,8 @@ void drm_vblank_off(struct drm_device *dev, int crtc) unsigned long irqflags; spin_lock_irqsave(&dev->vbl_lock, irqflags); - dev->driver->disable_vblank(dev, crtc); + vblank_disable_and_save(dev, crtc); DRM_WAKEUP(&dev->vbl_queue[crtc]); - dev->vblank_enabled[crtc] = 0; - dev->last_vblank[crtc] = dev->driver->get_vblank_counter(dev, crtc); spin_unlock_irqrestore(&dev->vbl_lock, irqflags); } EXPORT_SYMBOL(drm_vblank_off); @@ -599,7 +1061,6 @@ static int drm_queue_vblank_event(struct drm_device *dev, int pipe, e->base.file_priv = file_priv; e->base.destroy = (void (*) (struct drm_pending_event *)) kfree; - do_gettimeofday(&now); spin_lock_irqsave(&dev->event_lock, flags); if (file_priv->event_space < sizeof e->event) { @@ -609,7 +1070,8 @@ static int drm_queue_vblank_event(struct drm_device *dev, int pipe, } file_priv->event_space -= sizeof e->event; - seq = drm_vblank_count(dev, pipe); + seq = drm_vblank_count_and_time(dev, pipe, &now); + if ((vblwait->request.type & _DRM_VBLANK_NEXTONMISS) && (seq - vblwait->request.sequence) <= (1 << 23)) { vblwait->request.sequence = seq + 1; @@ -718,11 +1180,10 @@ int drm_wait_vblank(struct drm_device *dev, void *data, if (ret != -EINTR) { struct timeval now; - do_gettimeofday(&now); - + vblwait->reply.sequence = drm_vblank_count_and_time(dev, crtc, &now); vblwait->reply.tval_sec = now.tv_sec; vblwait->reply.tval_usec = now.tv_usec; - vblwait->reply.sequence = drm_vblank_count(dev, crtc); + DRM_DEBUG("returning %d to client\n", vblwait->reply.sequence); } else { @@ -741,8 +1202,7 @@ void drm_handle_vblank_events(struct drm_device *dev, int crtc) unsigned long flags; unsigned int seq; - do_gettimeofday(&now); - seq = drm_vblank_count(dev, crtc); + seq = drm_vblank_count_and_time(dev, crtc, &now); spin_lock_irqsave(&dev->event_lock, flags); @@ -780,11 +1240,64 @@ void drm_handle_vblank_events(struct drm_device *dev, int crtc) */ void drm_handle_vblank(struct drm_device *dev, int crtc) { + u32 vblcount; + s64 diff_ns; + struct timeval tvblank; + unsigned long irqflags; + if (!dev->num_crtcs) return; - atomic_inc(&dev->_vblank_count[crtc]); + /* Need timestamp lock to prevent concurrent execution with + * vblank enable/disable, as this would cause inconsistent + * or corrupted timestamps and vblank counts. + */ + spin_lock_irqsave(&dev->vblank_time_lock, irqflags); + + /* Vblank irq handling disabled. Nothing to do. */ + if (!dev->vblank_enabled[crtc]) { + spin_unlock_irqrestore(&dev->vblank_time_lock, irqflags); + return; + } + + /* Fetch corresponding timestamp for this vblank interval from + * driver and store it in proper slot of timestamp ringbuffer. + */ + + /* Get current timestamp and count. */ + vblcount = atomic_read(&dev->_vblank_count[crtc]); + drm_get_last_vbltimestamp(dev, crtc, &tvblank, DRM_CALLED_FROM_VBLIRQ); + + /* Compute time difference to timestamp of last vblank */ + diff_ns = timeval_to_ns(&tvblank) - + timeval_to_ns(&vblanktimestamp(dev, crtc, vblcount)); + + /* Update vblank timestamp and count if at least + * DRM_REDUNDANT_VBLIRQ_THRESH_NS nanoseconds + * difference between last stored timestamp and current + * timestamp. A smaller difference means basically + * identical timestamps. Happens if this vblank has + * been already processed and this is a redundant call, + * e.g., due to spurious vblank interrupts. We need to + * ignore those for accounting. + */ + if (abs(diff_ns) > DRM_REDUNDANT_VBLIRQ_THRESH_NS) { + /* Store new timestamp in ringbuffer. */ + vblanktimestamp(dev, crtc, vblcount + 1) = tvblank; + smp_wmb(); + + /* Increment cooked vblank count. This also atomically commits + * the timestamp computed above. + */ + atomic_inc(&dev->_vblank_count[crtc]); + } else { + DRM_DEBUG("crtc %d: Redundant vblirq ignored. diff_ns = %d\n", + crtc, (int) diff_ns); + } + DRM_WAKEUP(&dev->vbl_queue[crtc]); drm_handle_vblank_events(dev, crtc); + + spin_unlock_irqrestore(&dev->vblank_time_lock, irqflags); } EXPORT_SYMBOL(drm_handle_vblank); diff --git a/drivers/gpu/drm/drm_stub.c b/drivers/gpu/drm/drm_stub.c index cdc89ee042cc..d59edc18301f 100644 --- a/drivers/gpu/drm/drm_stub.c +++ b/drivers/gpu/drm/drm_stub.c @@ -40,12 +40,22 @@ unsigned int drm_debug = 0; /* 1 to enable debug output */ EXPORT_SYMBOL(drm_debug); +unsigned int drm_vblank_offdelay = 5000; /* Default to 5000 msecs. */ +EXPORT_SYMBOL(drm_vblank_offdelay); + +unsigned int drm_timestamp_precision = 20; /* Default to 20 usecs. */ +EXPORT_SYMBOL(drm_timestamp_precision); + MODULE_AUTHOR(CORE_AUTHOR); MODULE_DESCRIPTION(CORE_DESC); MODULE_LICENSE("GPL and additional rights"); MODULE_PARM_DESC(debug, "Enable debug output"); +MODULE_PARM_DESC(vblankoffdelay, "Delay until vblank irq auto-disable [msecs]"); +MODULE_PARM_DESC(timestamp_precision_usec, "Max. error on timestamps [usecs]"); module_param_named(debug, drm_debug, int, 0600); +module_param_named(vblankoffdelay, drm_vblank_offdelay, int, 0600); +module_param_named(timestamp_precision_usec, drm_timestamp_precision, int, 0600); struct idr drm_minors_idr; diff --git a/include/drm/drmP.h b/include/drm/drmP.h index 274eaaa15c36..2b3398004aa5 100644 --- a/include/drm/drmP.h +++ b/include/drm/drmP.h @@ -683,6 +683,21 @@ struct drm_master { void *driver_priv; /**< Private structure for driver to use */ }; +/* Size of ringbuffer for vblank timestamps. Just double-buffer + * in initial implementation. + */ +#define DRM_VBLANKTIME_RBSIZE 2 + +/* Flags and return codes for get_vblank_timestamp() driver function. */ +#define DRM_CALLED_FROM_VBLIRQ 1 +#define DRM_VBLANKTIME_SCANOUTPOS_METHOD (1 << 0) +#define DRM_VBLANKTIME_INVBL (1 << 1) + +/* get_scanout_position() return flags */ +#define DRM_SCANOUTPOS_VALID (1 << 0) +#define DRM_SCANOUTPOS_INVBL (1 << 1) +#define DRM_SCANOUTPOS_ACCURATE (1 << 2) + /** * DRM driver structure. This structure represent the common code for * a family of cards. There will one drm_device for each card present @@ -760,6 +775,68 @@ struct drm_driver { */ int (*device_is_agp) (struct drm_device *dev); + /** + * Called by vblank timestamping code. + * + * Return the current display scanout position from a crtc. + * + * \param dev DRM device. + * \param crtc Id of the crtc to query. + * \param *vpos Target location for current vertical scanout position. + * \param *hpos Target location for current horizontal scanout position. + * + * Returns vpos as a positive number while in active scanout area. + * Returns vpos as a negative number inside vblank, counting the number + * of scanlines to go until end of vblank, e.g., -1 means "one scanline + * until start of active scanout / end of vblank." + * + * \return Flags, or'ed together as follows: + * + * DRM_SCANOUTPOS_VALID = Query successfull. + * DRM_SCANOUTPOS_INVBL = Inside vblank. + * DRM_SCANOUTPOS_ACCURATE = Returned position is accurate. A lack of + * this flag means that returned position may be offset by a constant + * but unknown small number of scanlines wrt. real scanout position. + * + */ + int (*get_scanout_position) (struct drm_device *dev, int crtc, + int *vpos, int *hpos); + + /** + * Called by \c drm_get_last_vbltimestamp. Should return a precise + * timestamp when the most recent VBLANK interval ended or will end. + * + * Specifically, the timestamp in @vblank_time should correspond as + * closely as possible to the time when the first video scanline of + * the video frame after the end of VBLANK will start scanning out, + * the time immmediately after end of the VBLANK interval. If the + * @crtc is currently inside VBLANK, this will be a time in the future. + * If the @crtc is currently scanning out a frame, this will be the + * past start time of the current scanout. This is meant to adhere + * to the OpenML OML_sync_control extension specification. + * + * \param dev dev DRM device handle. + * \param crtc crtc for which timestamp should be returned. + * \param *max_error Maximum allowable timestamp error in nanoseconds. + * Implementation should strive to provide timestamp + * with an error of at most *max_error nanoseconds. + * Returns true upper bound on error for timestamp. + * \param *vblank_time Target location for returned vblank timestamp. + * \param flags 0 = Defaults, no special treatment needed. + * \param DRM_CALLED_FROM_VBLIRQ = Function is called from vblank + * irq handler. Some drivers need to apply some workarounds + * for gpu-specific vblank irq quirks if flag is set. + * + * \returns + * Zero if timestamping isn't supported in current display mode or a + * negative number on failure. A positive status code on success, + * which describes how the vblank_time timestamp was computed. + */ + int (*get_vblank_timestamp) (struct drm_device *dev, int crtc, + int *max_error, + struct timeval *vblank_time, + unsigned flags); + /* these have to be filled in */ irqreturn_t(*irq_handler) (DRM_IRQ_ARGS); @@ -983,6 +1060,8 @@ struct drm_device { wait_queue_head_t *vbl_queue; /**< VBLANK wait queue */ atomic_t *_vblank_count; /**< number of VBLANK interrupts (driver must alloc the right number of counters) */ + struct timeval *_vblank_time; /**< timestamp of current vblank_count (drivers must alloc right number of fields) */ + spinlock_t vblank_time_lock; /**< Protects vblank count and time updates during vblank enable/disable */ spinlock_t vbl_lock; atomic_t *vblank_refcount; /* number of users of vblank interruptsper crtc */ u32 *last_vblank; /* protected by dev->vbl_lock, used */ @@ -1284,11 +1363,22 @@ extern int drm_wait_vblank(struct drm_device *dev, void *data, struct drm_file *filp); extern int drm_vblank_wait(struct drm_device *dev, unsigned int *vbl_seq); extern u32 drm_vblank_count(struct drm_device *dev, int crtc); +extern u32 drm_vblank_count_and_time(struct drm_device *dev, int crtc, + struct timeval *vblanktime); extern void drm_handle_vblank(struct drm_device *dev, int crtc); extern int drm_vblank_get(struct drm_device *dev, int crtc); extern void drm_vblank_put(struct drm_device *dev, int crtc); extern void drm_vblank_off(struct drm_device *dev, int crtc); extern void drm_vblank_cleanup(struct drm_device *dev); +extern u32 drm_get_last_vbltimestamp(struct drm_device *dev, int crtc, + struct timeval *tvblank, unsigned flags); +extern int drm_calc_vbltimestamp_from_scanoutpos(struct drm_device *dev, + int crtc, int *max_error, + struct timeval *vblank_time, + unsigned flags, + struct drm_crtc *refcrtc); +extern void drm_calc_timestamping_constants(struct drm_crtc *crtc); + /* Modesetting support */ extern void drm_vblank_pre_modeset(struct drm_device *dev, int crtc); extern void drm_vblank_post_modeset(struct drm_device *dev, int crtc); @@ -1340,6 +1430,9 @@ extern void drm_put_dev(struct drm_device *dev); extern int drm_put_minor(struct drm_minor **minor); extern unsigned int drm_debug; +extern unsigned int drm_vblank_offdelay; +extern unsigned int drm_timestamp_precision; + extern struct class *drm_class; extern struct proc_dir_entry *drm_proc_root; extern struct dentry *drm_debugfs_root; diff --git a/include/drm/drm_crtc.h b/include/drm/drm_crtc.h index 029aa688e787..acd7fade160d 100644 --- a/include/drm/drm_crtc.h +++ b/include/drm/drm_crtc.h @@ -351,8 +351,14 @@ struct drm_crtc { bool enabled; + /* Requested mode from modesetting. */ struct drm_display_mode mode; + /* Programmed mode in hw, after adjustments for encoders, + * crtc, panel scaling etc. Needed for timestamping etc. + */ + struct drm_display_mode hwmode; + int x, y; const struct drm_crtc_funcs *funcs; @@ -360,6 +366,9 @@ struct drm_crtc { uint32_t gamma_size; uint16_t *gamma_store; + /* Constants needed for precise vblank and swap timestamping. */ + s64 framedur_ns, linedur_ns, pixeldur_ns; + /* if you are using the helper */ void *helper_private; }; -- cgit v1.2.3 From f5a8020903932624cf020dc72455a10a3e005087 Mon Sep 17 00:00:00 2001 From: Mario Kleiner Date: Sat, 23 Oct 2010 04:42:17 +0200 Subject: drm/kms/radeon: Add support for precise vblank timestamping. This patch adds new functions for use by the drm core: .get_vblank_timestamp() provides a precise timestamp for the end of the most recent (or current) vblank interval of a given crtc, as needed for the DRI2 implementation of the OML_sync_control extension. It is a thin wrapper around the drm function drm_calc_vbltimestamp_from_scanoutpos() which does almost all the work and is shared across drivers. .get_scanout_position() provides the current horizontal and vertical video scanout position and "in vblank" status of a given crtc, as needed by the drm for use by drm_calc_vbltimestamp_from_scanoutpos(). The function is also used by the dynamic gpu reclocking code to determine when it is safe to reclock inside vblank. For that purpose radeon_pm_in_vbl() is modified to accomodate a small change in the function prototype of the radeon_get_crtc_scanoutpos() which is hooked up to .get_scanout_position(). This code has been tested on AVIVO hardware, a RV530 (ATI Mobility Radeon X1600) in a Intel Core-2 Duo MacBookPro and some R600 variant (FireGL V7600) in a single cpu AMD Athlon 64 PC. Signed-off-by: Mario Kleiner Reviewed-by: Alex Deucher Signed-off-by: Dave Airlie --- drivers/gpu/drm/radeon/radeon_display.c | 40 +++++++++++++++++---------------- drivers/gpu/drm/radeon/radeon_drv.c | 8 +++++++ drivers/gpu/drm/radeon/radeon_kms.c | 21 +++++++++++++++++ drivers/gpu/drm/radeon/radeon_mode.h | 7 ++---- drivers/gpu/drm/radeon/radeon_pm.c | 6 ++--- 5 files changed, 55 insertions(+), 27 deletions(-) diff --git a/drivers/gpu/drm/radeon/radeon_display.c b/drivers/gpu/drm/radeon/radeon_display.c index 1df4dc6c063c..eeea7cbb9517 100644 --- a/drivers/gpu/drm/radeon/radeon_display.c +++ b/drivers/gpu/drm/radeon/radeon_display.c @@ -1019,7 +1019,7 @@ bool radeon_crtc_scaling_mode_fixup(struct drm_crtc *crtc, /* * Retrieve current video scanout position of crtc on a given gpu. * - * \param rdev Device to query. + * \param dev Device to query. * \param crtc Crtc to query. * \param *vpos Location where vertical scanout position should be stored. * \param *hpos Location where horizontal scanout position should go. @@ -1031,72 +1031,74 @@ bool radeon_crtc_scaling_mode_fixup(struct drm_crtc *crtc, * * \return Flags, or'ed together as follows: * - * RADEON_SCANOUTPOS_VALID = Query successfull. - * RADEON_SCANOUTPOS_INVBL = Inside vblank. - * RADEON_SCANOUTPOS_ACCURATE = Returned position is accurate. A lack of + * DRM_SCANOUTPOS_VALID = Query successfull. + * DRM_SCANOUTPOS_INVBL = Inside vblank. + * DRM_SCANOUTPOS_ACCURATE = Returned position is accurate. A lack of * this flag means that returned position may be offset by a constant but * unknown small number of scanlines wrt. real scanout position. * */ -int radeon_get_crtc_scanoutpos(struct radeon_device *rdev, int crtc, int *vpos, int *hpos) +int radeon_get_crtc_scanoutpos(struct drm_device *dev, int crtc, int *vpos, int *hpos) { u32 stat_crtc = 0, vbl = 0, position = 0; int vbl_start, vbl_end, vtotal, ret = 0; bool in_vbl = true; + struct radeon_device *rdev = dev->dev_private; + if (ASIC_IS_DCE4(rdev)) { if (crtc == 0) { vbl = RREG32(EVERGREEN_CRTC_V_BLANK_START_END + EVERGREEN_CRTC0_REGISTER_OFFSET); position = RREG32(EVERGREEN_CRTC_STATUS_POSITION + EVERGREEN_CRTC0_REGISTER_OFFSET); - ret |= RADEON_SCANOUTPOS_VALID; + ret |= DRM_SCANOUTPOS_VALID; } if (crtc == 1) { vbl = RREG32(EVERGREEN_CRTC_V_BLANK_START_END + EVERGREEN_CRTC1_REGISTER_OFFSET); position = RREG32(EVERGREEN_CRTC_STATUS_POSITION + EVERGREEN_CRTC1_REGISTER_OFFSET); - ret |= RADEON_SCANOUTPOS_VALID; + ret |= DRM_SCANOUTPOS_VALID; } if (crtc == 2) { vbl = RREG32(EVERGREEN_CRTC_V_BLANK_START_END + EVERGREEN_CRTC2_REGISTER_OFFSET); position = RREG32(EVERGREEN_CRTC_STATUS_POSITION + EVERGREEN_CRTC2_REGISTER_OFFSET); - ret |= RADEON_SCANOUTPOS_VALID; + ret |= DRM_SCANOUTPOS_VALID; } if (crtc == 3) { vbl = RREG32(EVERGREEN_CRTC_V_BLANK_START_END + EVERGREEN_CRTC3_REGISTER_OFFSET); position = RREG32(EVERGREEN_CRTC_STATUS_POSITION + EVERGREEN_CRTC3_REGISTER_OFFSET); - ret |= RADEON_SCANOUTPOS_VALID; + ret |= DRM_SCANOUTPOS_VALID; } if (crtc == 4) { vbl = RREG32(EVERGREEN_CRTC_V_BLANK_START_END + EVERGREEN_CRTC4_REGISTER_OFFSET); position = RREG32(EVERGREEN_CRTC_STATUS_POSITION + EVERGREEN_CRTC4_REGISTER_OFFSET); - ret |= RADEON_SCANOUTPOS_VALID; + ret |= DRM_SCANOUTPOS_VALID; } if (crtc == 5) { vbl = RREG32(EVERGREEN_CRTC_V_BLANK_START_END + EVERGREEN_CRTC5_REGISTER_OFFSET); position = RREG32(EVERGREEN_CRTC_STATUS_POSITION + EVERGREEN_CRTC5_REGISTER_OFFSET); - ret |= RADEON_SCANOUTPOS_VALID; + ret |= DRM_SCANOUTPOS_VALID; } } else if (ASIC_IS_AVIVO(rdev)) { if (crtc == 0) { vbl = RREG32(AVIVO_D1CRTC_V_BLANK_START_END); position = RREG32(AVIVO_D1CRTC_STATUS_POSITION); - ret |= RADEON_SCANOUTPOS_VALID; + ret |= DRM_SCANOUTPOS_VALID; } if (crtc == 1) { vbl = RREG32(AVIVO_D2CRTC_V_BLANK_START_END); position = RREG32(AVIVO_D2CRTC_STATUS_POSITION); - ret |= RADEON_SCANOUTPOS_VALID; + ret |= DRM_SCANOUTPOS_VALID; } } else { /* Pre-AVIVO: Different encoding of scanout pos and vblank interval. */ @@ -1112,7 +1114,7 @@ int radeon_get_crtc_scanoutpos(struct radeon_device *rdev, int crtc, int *vpos, if (!(stat_crtc & 1)) in_vbl = false; - ret |= RADEON_SCANOUTPOS_VALID; + ret |= DRM_SCANOUTPOS_VALID; } if (crtc == 1) { vbl = (RREG32(RADEON_CRTC2_V_TOTAL_DISP) & @@ -1122,7 +1124,7 @@ int radeon_get_crtc_scanoutpos(struct radeon_device *rdev, int crtc, int *vpos, if (!(stat_crtc & 1)) in_vbl = false; - ret |= RADEON_SCANOUTPOS_VALID; + ret |= DRM_SCANOUTPOS_VALID; } } @@ -1133,13 +1135,13 @@ int radeon_get_crtc_scanoutpos(struct radeon_device *rdev, int crtc, int *vpos, /* Valid vblank area boundaries from gpu retrieved? */ if (vbl > 0) { /* Yes: Decode. */ - ret |= RADEON_SCANOUTPOS_ACCURATE; + ret |= DRM_SCANOUTPOS_ACCURATE; vbl_start = vbl & 0x1fff; vbl_end = (vbl >> 16) & 0x1fff; } else { /* No: Fake something reasonable which gives at least ok results. */ - vbl_start = rdev->mode_info.crtcs[crtc]->base.mode.crtc_vdisplay; + vbl_start = rdev->mode_info.crtcs[crtc]->base.hwmode.crtc_vdisplay; vbl_end = 0; } @@ -1155,7 +1157,7 @@ int radeon_get_crtc_scanoutpos(struct radeon_device *rdev, int crtc, int *vpos, /* Inside "upper part" of vblank area? Apply corrective offset if so: */ if (in_vbl && (*vpos >= vbl_start)) { - vtotal = rdev->mode_info.crtcs[crtc]->base.mode.crtc_vtotal; + vtotal = rdev->mode_info.crtcs[crtc]->base.hwmode.crtc_vtotal; *vpos = *vpos - vtotal; } @@ -1164,7 +1166,7 @@ int radeon_get_crtc_scanoutpos(struct radeon_device *rdev, int crtc, int *vpos, /* In vblank? */ if (in_vbl) - ret |= RADEON_SCANOUTPOS_INVBL; + ret |= DRM_SCANOUTPOS_INVBL; return ret; } diff --git a/drivers/gpu/drm/radeon/radeon_drv.c b/drivers/gpu/drm/radeon/radeon_drv.c index 88e4ea925900..32ec0cc6be92 100644 --- a/drivers/gpu/drm/radeon/radeon_drv.c +++ b/drivers/gpu/drm/radeon/radeon_drv.c @@ -66,6 +66,10 @@ int radeon_resume_kms(struct drm_device *dev); u32 radeon_get_vblank_counter_kms(struct drm_device *dev, int crtc); int radeon_enable_vblank_kms(struct drm_device *dev, int crtc); void radeon_disable_vblank_kms(struct drm_device *dev, int crtc); +int radeon_get_vblank_timestamp_kms(struct drm_device *dev, int crtc, + int *max_error, + struct timeval *vblank_time, + unsigned flags); void radeon_driver_irq_preinstall_kms(struct drm_device *dev); int radeon_driver_irq_postinstall_kms(struct drm_device *dev); void radeon_driver_irq_uninstall_kms(struct drm_device *dev); @@ -74,6 +78,8 @@ int radeon_dma_ioctl_kms(struct drm_device *dev, void *data, struct drm_file *file_priv); int radeon_gem_object_init(struct drm_gem_object *obj); void radeon_gem_object_free(struct drm_gem_object *obj); +extern int radeon_get_crtc_scanoutpos(struct drm_device *dev, int crtc, + int *vpos, int *hpos); extern struct drm_ioctl_desc radeon_ioctls_kms[]; extern int radeon_max_kms_ioctl; int radeon_mmap(struct file *filp, struct vm_area_struct *vma); @@ -277,6 +283,8 @@ static struct drm_driver kms_driver = { .get_vblank_counter = radeon_get_vblank_counter_kms, .enable_vblank = radeon_enable_vblank_kms, .disable_vblank = radeon_disable_vblank_kms, + .get_vblank_timestamp = radeon_get_vblank_timestamp_kms, + .get_scanout_position = radeon_get_crtc_scanoutpos, #if defined(CONFIG_DEBUG_FS) .debugfs_init = radeon_debugfs_init, .debugfs_cleanup = radeon_debugfs_cleanup, diff --git a/drivers/gpu/drm/radeon/radeon_kms.c b/drivers/gpu/drm/radeon/radeon_kms.c index 8fbbe1c6ebbd..4bf423ca4c12 100644 --- a/drivers/gpu/drm/radeon/radeon_kms.c +++ b/drivers/gpu/drm/radeon/radeon_kms.c @@ -277,6 +277,27 @@ void radeon_disable_vblank_kms(struct drm_device *dev, int crtc) radeon_irq_set(rdev); } +int radeon_get_vblank_timestamp_kms(struct drm_device *dev, int crtc, + int *max_error, + struct timeval *vblank_time, + unsigned flags) +{ + struct drm_crtc *drmcrtc; + struct radeon_device *rdev = dev->dev_private; + + if (crtc < 0 || crtc >= dev->num_crtcs) { + DRM_ERROR("Invalid crtc %d\n", crtc); + return -EINVAL; + } + + /* Get associated drm_crtc: */ + drmcrtc = &rdev->mode_info.crtcs[crtc]->base; + + /* Helper routine in DRM core does all the work: */ + return drm_calc_vbltimestamp_from_scanoutpos(dev, crtc, max_error, + vblank_time, flags, + drmcrtc); +} /* * IOCTL. diff --git a/drivers/gpu/drm/radeon/radeon_mode.h b/drivers/gpu/drm/radeon/radeon_mode.h index e301c6f9e059..55856ad0ac41 100644 --- a/drivers/gpu/drm/radeon/radeon_mode.h +++ b/drivers/gpu/drm/radeon/radeon_mode.h @@ -442,10 +442,6 @@ struct radeon_framebuffer { struct drm_gem_object *obj; }; -/* radeon_get_crtc_scanoutpos() return flags */ -#define RADEON_SCANOUTPOS_VALID (1 << 0) -#define RADEON_SCANOUTPOS_INVBL (1 << 1) -#define RADEON_SCANOUTPOS_ACCURATE (1 << 2) extern enum radeon_tv_std radeon_combios_get_tv_info(struct radeon_device *rdev); @@ -562,7 +558,8 @@ extern int radeon_crtc_cursor_set(struct drm_crtc *crtc, extern int radeon_crtc_cursor_move(struct drm_crtc *crtc, int x, int y); -extern int radeon_get_crtc_scanoutpos(struct radeon_device *rdev, int crtc, int *vpos, int *hpos); +extern int radeon_get_crtc_scanoutpos(struct drm_device *dev, int crtc, + int *vpos, int *hpos); extern bool radeon_combios_check_hardcoded_edid(struct radeon_device *rdev); extern struct edid * diff --git a/drivers/gpu/drm/radeon/radeon_pm.c b/drivers/gpu/drm/radeon/radeon_pm.c index 8c9b2ef32c68..5eda5e471980 100644 --- a/drivers/gpu/drm/radeon/radeon_pm.c +++ b/drivers/gpu/drm/radeon/radeon_pm.c @@ -720,9 +720,9 @@ static bool radeon_pm_in_vbl(struct radeon_device *rdev) */ for (crtc = 0; (crtc < rdev->num_crtc) && in_vbl; crtc++) { if (rdev->pm.active_crtcs & (1 << crtc)) { - vbl_status = radeon_get_crtc_scanoutpos(rdev, crtc, &vpos, &hpos); - if ((vbl_status & RADEON_SCANOUTPOS_VALID) && - !(vbl_status & RADEON_SCANOUTPOS_INVBL)) + vbl_status = radeon_get_crtc_scanoutpos(rdev->ddev, crtc, &vpos, &hpos); + if ((vbl_status & DRM_SCANOUTPOS_VALID) && + !(vbl_status & DRM_SCANOUTPOS_INVBL)) in_vbl = false; } } -- cgit v1.2.3 From 6f34be50bd1bdd2ff3c955940e033a80d05f248a Mon Sep 17 00:00:00 2001 From: Alex Deucher Date: Sun, 21 Nov 2010 10:59:01 -0500 Subject: drm/radeon/kms: add pageflip ioctl support (v3) This adds support for dri2 pageflipping. v2: precision updates from Mario Kleiner. v3: Multihead fixes from Mario Kleiner; missing crtc offset add note about update pending bit on pre-avivo chips Signed-off-by: Alex Deucher Signed-off-by: Mario Kleiner Signed-off-by: Dave Airlie --- drivers/gpu/drm/radeon/evergreen.c | 301 +++++++++++++++++++++----------- drivers/gpu/drm/radeon/evergreen_reg.h | 6 + drivers/gpu/drm/radeon/r100.c | 74 +++++++- drivers/gpu/drm/radeon/r500_reg.h | 4 + drivers/gpu/drm/radeon/r600.c | 126 +++++++------ drivers/gpu/drm/radeon/r600d.h | 9 + drivers/gpu/drm/radeon/radeon.h | 57 ++++++ drivers/gpu/drm/radeon/radeon_asic.c | 42 +++++ drivers/gpu/drm/radeon/radeon_asic.h | 10 ++ drivers/gpu/drm/radeon/radeon_display.c | 261 +++++++++++++++++++++++++++ drivers/gpu/drm/radeon/radeon_drv.c | 3 +- drivers/gpu/drm/radeon/radeon_irq_kms.c | 39 ++++- drivers/gpu/drm/radeon/radeon_mode.h | 6 + drivers/gpu/drm/radeon/radeon_reg.h | 1 + drivers/gpu/drm/radeon/rs600.c | 116 +++++++++--- drivers/gpu/drm/radeon/rv770.c | 34 ++++ drivers/gpu/drm/radeon/rv770d.h | 7 + 17 files changed, 900 insertions(+), 196 deletions(-) diff --git a/drivers/gpu/drm/radeon/evergreen.c b/drivers/gpu/drm/radeon/evergreen.c index 4dc5b4714c5a..df3f37243222 100644 --- a/drivers/gpu/drm/radeon/evergreen.c +++ b/drivers/gpu/drm/radeon/evergreen.c @@ -40,6 +40,61 @@ static void evergreen_gpu_init(struct radeon_device *rdev); void evergreen_fini(struct radeon_device *rdev); +void evergreen_pre_page_flip(struct radeon_device *rdev, int crtc) +{ + struct radeon_crtc *radeon_crtc = rdev->mode_info.crtcs[crtc]; + u32 tmp; + + /* make sure flip is at vb rather than hb */ + tmp = RREG32(EVERGREEN_GRPH_FLIP_CONTROL + radeon_crtc->crtc_offset); + tmp &= ~EVERGREEN_GRPH_SURFACE_UPDATE_H_RETRACE_EN; + WREG32(EVERGREEN_GRPH_FLIP_CONTROL + radeon_crtc->crtc_offset, tmp); + + /* set pageflip to happen anywhere in vblank interval */ + WREG32(EVERGREEN_MASTER_UPDATE_MODE + radeon_crtc->crtc_offset, 0); + + /* enable the pflip int */ + radeon_irq_kms_pflip_irq_get(rdev, crtc); +} + +void evergreen_post_page_flip(struct radeon_device *rdev, int crtc) +{ + /* disable the pflip int */ + radeon_irq_kms_pflip_irq_put(rdev, crtc); +} + +u32 evergreen_page_flip(struct radeon_device *rdev, int crtc_id, u64 crtc_base) +{ + struct radeon_crtc *radeon_crtc = rdev->mode_info.crtcs[crtc_id]; + u32 tmp = RREG32(EVERGREEN_GRPH_UPDATE + radeon_crtc->crtc_offset); + + /* Lock the graphics update lock */ + tmp |= EVERGREEN_GRPH_UPDATE_LOCK; + WREG32(EVERGREEN_GRPH_UPDATE + radeon_crtc->crtc_offset, tmp); + + /* update the scanout addresses */ + WREG32(EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS_HIGH + radeon_crtc->crtc_offset, + upper_32_bits(crtc_base)); + WREG32(EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS + radeon_crtc->crtc_offset, + (u32)crtc_base); + + WREG32(EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS_HIGH + radeon_crtc->crtc_offset, + upper_32_bits(crtc_base)); + WREG32(EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS + radeon_crtc->crtc_offset, + (u32)crtc_base); + + /* Wait for update_pending to go high. */ + while (!(RREG32(EVERGREEN_GRPH_UPDATE + radeon_crtc->crtc_offset) & EVERGREEN_GRPH_SURFACE_UPDATE_PENDING)); + DRM_DEBUG("Update pending now high. Unlocking vupdate_lock.\n"); + + /* Unlock the lock, so double-buffering can take place inside vblank */ + tmp &= ~EVERGREEN_GRPH_UPDATE_LOCK; + WREG32(EVERGREEN_GRPH_UPDATE + radeon_crtc->crtc_offset, tmp); + + /* Return current update_pending status: */ + return RREG32(EVERGREEN_GRPH_UPDATE + radeon_crtc->crtc_offset) & EVERGREEN_GRPH_SURFACE_UPDATE_PENDING; +} + /* get temperature in millidegrees */ u32 evergreen_get_temp(struct radeon_device *rdev) { @@ -2060,6 +2115,7 @@ int evergreen_irq_set(struct radeon_device *rdev) u32 crtc1 = 0, crtc2 = 0, crtc3 = 0, crtc4 = 0, crtc5 = 0, crtc6 = 0; u32 hpd1, hpd2, hpd3, hpd4, hpd5, hpd6; u32 grbm_int_cntl = 0; + u32 grph1 = 0, grph2 = 0, grph3 = 0, grph4 = 0, grph5 = 0, grph6 = 0; if (!rdev->irq.installed) { WARN(1, "Can't enable IRQ/MSI because no handler is installed\n"); @@ -2085,27 +2141,33 @@ int evergreen_irq_set(struct radeon_device *rdev) cp_int_cntl |= RB_INT_ENABLE; cp_int_cntl |= TIME_STAMP_INT_ENABLE; } - if (rdev->irq.crtc_vblank_int[0]) { + if (rdev->irq.crtc_vblank_int[0] || + rdev->irq.pflip[0]) { DRM_DEBUG("evergreen_irq_set: vblank 0\n"); crtc1 |= VBLANK_INT_MASK; } - if (rdev->irq.crtc_vblank_int[1]) { + if (rdev->irq.crtc_vblank_int[1] || + rdev->irq.pflip[1]) { DRM_DEBUG("evergreen_irq_set: vblank 1\n"); crtc2 |= VBLANK_INT_MASK; } - if (rdev->irq.crtc_vblank_int[2]) { + if (rdev->irq.crtc_vblank_int[2] || + rdev->irq.pflip[2]) { DRM_DEBUG("evergreen_irq_set: vblank 2\n"); crtc3 |= VBLANK_INT_MASK; } - if (rdev->irq.crtc_vblank_int[3]) { + if (rdev->irq.crtc_vblank_int[3] || + rdev->irq.pflip[3]) { DRM_DEBUG("evergreen_irq_set: vblank 3\n"); crtc4 |= VBLANK_INT_MASK; } - if (rdev->irq.crtc_vblank_int[4]) { + if (rdev->irq.crtc_vblank_int[4] || + rdev->irq.pflip[4]) { DRM_DEBUG("evergreen_irq_set: vblank 4\n"); crtc5 |= VBLANK_INT_MASK; } - if (rdev->irq.crtc_vblank_int[5]) { + if (rdev->irq.crtc_vblank_int[5] || + rdev->irq.pflip[5]) { DRM_DEBUG("evergreen_irq_set: vblank 5\n"); crtc6 |= VBLANK_INT_MASK; } @@ -2148,6 +2210,13 @@ int evergreen_irq_set(struct radeon_device *rdev) WREG32(INT_MASK + EVERGREEN_CRTC4_REGISTER_OFFSET, crtc5); WREG32(INT_MASK + EVERGREEN_CRTC5_REGISTER_OFFSET, crtc6); + WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC0_REGISTER_OFFSET, grph1); + WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC1_REGISTER_OFFSET, grph2); + WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC2_REGISTER_OFFSET, grph3); + WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC3_REGISTER_OFFSET, grph4); + WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC4_REGISTER_OFFSET, grph5); + WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC5_REGISTER_OFFSET, grph6); + WREG32(DC_HPD1_INT_CONTROL, hpd1); WREG32(DC_HPD2_INT_CONTROL, hpd2); WREG32(DC_HPD3_INT_CONTROL, hpd3); @@ -2158,79 +2227,92 @@ int evergreen_irq_set(struct radeon_device *rdev) return 0; } -static inline void evergreen_irq_ack(struct radeon_device *rdev, - u32 *disp_int, - u32 *disp_int_cont, - u32 *disp_int_cont2, - u32 *disp_int_cont3, - u32 *disp_int_cont4, - u32 *disp_int_cont5) +static inline void evergreen_irq_ack(struct radeon_device *rdev) { u32 tmp; - *disp_int = RREG32(DISP_INTERRUPT_STATUS); - *disp_int_cont = RREG32(DISP_INTERRUPT_STATUS_CONTINUE); - *disp_int_cont2 = RREG32(DISP_INTERRUPT_STATUS_CONTINUE2); - *disp_int_cont3 = RREG32(DISP_INTERRUPT_STATUS_CONTINUE3); - *disp_int_cont4 = RREG32(DISP_INTERRUPT_STATUS_CONTINUE4); - *disp_int_cont5 = RREG32(DISP_INTERRUPT_STATUS_CONTINUE5); - - if (*disp_int & LB_D1_VBLANK_INTERRUPT) + rdev->irq.stat_regs.evergreen.disp_int = RREG32(DISP_INTERRUPT_STATUS); + rdev->irq.stat_regs.evergreen.disp_int_cont = RREG32(DISP_INTERRUPT_STATUS_CONTINUE); + rdev->irq.stat_regs.evergreen.disp_int_cont2 = RREG32(DISP_INTERRUPT_STATUS_CONTINUE2); + rdev->irq.stat_regs.evergreen.disp_int_cont3 = RREG32(DISP_INTERRUPT_STATUS_CONTINUE3); + rdev->irq.stat_regs.evergreen.disp_int_cont4 = RREG32(DISP_INTERRUPT_STATUS_CONTINUE4); + rdev->irq.stat_regs.evergreen.disp_int_cont5 = RREG32(DISP_INTERRUPT_STATUS_CONTINUE5); + rdev->irq.stat_regs.evergreen.d1grph_int = RREG32(GRPH_INT_STATUS + EVERGREEN_CRTC0_REGISTER_OFFSET); + rdev->irq.stat_regs.evergreen.d2grph_int = RREG32(GRPH_INT_STATUS + EVERGREEN_CRTC1_REGISTER_OFFSET); + rdev->irq.stat_regs.evergreen.d3grph_int = RREG32(GRPH_INT_STATUS + EVERGREEN_CRTC2_REGISTER_OFFSET); + rdev->irq.stat_regs.evergreen.d4grph_int = RREG32(GRPH_INT_STATUS + EVERGREEN_CRTC3_REGISTER_OFFSET); + rdev->irq.stat_regs.evergreen.d5grph_int = RREG32(GRPH_INT_STATUS + EVERGREEN_CRTC4_REGISTER_OFFSET); + rdev->irq.stat_regs.evergreen.d6grph_int = RREG32(GRPH_INT_STATUS + EVERGREEN_CRTC5_REGISTER_OFFSET); + + if (rdev->irq.stat_regs.evergreen.d1grph_int & GRPH_PFLIP_INT_OCCURRED) + WREG32(GRPH_INT_STATUS + EVERGREEN_CRTC0_REGISTER_OFFSET, GRPH_PFLIP_INT_CLEAR); + if (rdev->irq.stat_regs.evergreen.d2grph_int & GRPH_PFLIP_INT_OCCURRED) + WREG32(GRPH_INT_STATUS + EVERGREEN_CRTC1_REGISTER_OFFSET, GRPH_PFLIP_INT_CLEAR); + if (rdev->irq.stat_regs.evergreen.d3grph_int & GRPH_PFLIP_INT_OCCURRED) + WREG32(GRPH_INT_STATUS + EVERGREEN_CRTC2_REGISTER_OFFSET, GRPH_PFLIP_INT_CLEAR); + if (rdev->irq.stat_regs.evergreen.d4grph_int & GRPH_PFLIP_INT_OCCURRED) + WREG32(GRPH_INT_STATUS + EVERGREEN_CRTC3_REGISTER_OFFSET, GRPH_PFLIP_INT_CLEAR); + if (rdev->irq.stat_regs.evergreen.d5grph_int & GRPH_PFLIP_INT_OCCURRED) + WREG32(GRPH_INT_STATUS + EVERGREEN_CRTC4_REGISTER_OFFSET, GRPH_PFLIP_INT_CLEAR); + if (rdev->irq.stat_regs.evergreen.d6grph_int & GRPH_PFLIP_INT_OCCURRED) + WREG32(GRPH_INT_STATUS + EVERGREEN_CRTC5_REGISTER_OFFSET, GRPH_PFLIP_INT_CLEAR); + + if (rdev->irq.stat_regs.evergreen.disp_int & LB_D1_VBLANK_INTERRUPT) WREG32(VBLANK_STATUS + EVERGREEN_CRTC0_REGISTER_OFFSET, VBLANK_ACK); - if (*disp_int & LB_D1_VLINE_INTERRUPT) + if (rdev->irq.stat_regs.evergreen.disp_int & LB_D1_VLINE_INTERRUPT) WREG32(VLINE_STATUS + EVERGREEN_CRTC0_REGISTER_OFFSET, VLINE_ACK); - if (*disp_int_cont & LB_D2_VBLANK_INTERRUPT) + if (rdev->irq.stat_regs.evergreen.disp_int_cont & LB_D2_VBLANK_INTERRUPT) WREG32(VBLANK_STATUS + EVERGREEN_CRTC1_REGISTER_OFFSET, VBLANK_ACK); - if (*disp_int_cont & LB_D2_VLINE_INTERRUPT) + if (rdev->irq.stat_regs.evergreen.disp_int_cont & LB_D2_VLINE_INTERRUPT) WREG32(VLINE_STATUS + EVERGREEN_CRTC1_REGISTER_OFFSET, VLINE_ACK); - if (*disp_int_cont2 & LB_D3_VBLANK_INTERRUPT) + if (rdev->irq.stat_regs.evergreen.disp_int_cont2 & LB_D3_VBLANK_INTERRUPT) WREG32(VBLANK_STATUS + EVERGREEN_CRTC2_REGISTER_OFFSET, VBLANK_ACK); - if (*disp_int_cont2 & LB_D3_VLINE_INTERRUPT) + if (rdev->irq.stat_regs.evergreen.disp_int_cont2 & LB_D3_VLINE_INTERRUPT) WREG32(VLINE_STATUS + EVERGREEN_CRTC2_REGISTER_OFFSET, VLINE_ACK); - if (*disp_int_cont3 & LB_D4_VBLANK_INTERRUPT) + if (rdev->irq.stat_regs.evergreen.disp_int_cont3 & LB_D4_VBLANK_INTERRUPT) WREG32(VBLANK_STATUS + EVERGREEN_CRTC3_REGISTER_OFFSET, VBLANK_ACK); - if (*disp_int_cont3 & LB_D4_VLINE_INTERRUPT) + if (rdev->irq.stat_regs.evergreen.disp_int_cont3 & LB_D4_VLINE_INTERRUPT) WREG32(VLINE_STATUS + EVERGREEN_CRTC3_REGISTER_OFFSET, VLINE_ACK); - if (*disp_int_cont4 & LB_D5_VBLANK_INTERRUPT) + if (rdev->irq.stat_regs.evergreen.disp_int_cont4 & LB_D5_VBLANK_INTERRUPT) WREG32(VBLANK_STATUS + EVERGREEN_CRTC4_REGISTER_OFFSET, VBLANK_ACK); - if (*disp_int_cont4 & LB_D5_VLINE_INTERRUPT) + if (rdev->irq.stat_regs.evergreen.disp_int_cont4 & LB_D5_VLINE_INTERRUPT) WREG32(VLINE_STATUS + EVERGREEN_CRTC4_REGISTER_OFFSET, VLINE_ACK); - if (*disp_int_cont5 & LB_D6_VBLANK_INTERRUPT) + if (rdev->irq.stat_regs.evergreen.disp_int_cont5 & LB_D6_VBLANK_INTERRUPT) WREG32(VBLANK_STATUS + EVERGREEN_CRTC5_REGISTER_OFFSET, VBLANK_ACK); - if (*disp_int_cont5 & LB_D6_VLINE_INTERRUPT) + if (rdev->irq.stat_regs.evergreen.disp_int_cont5 & LB_D6_VLINE_INTERRUPT) WREG32(VLINE_STATUS + EVERGREEN_CRTC5_REGISTER_OFFSET, VLINE_ACK); - if (*disp_int & DC_HPD1_INTERRUPT) { + if (rdev->irq.stat_regs.evergreen.disp_int & DC_HPD1_INTERRUPT) { tmp = RREG32(DC_HPD1_INT_CONTROL); tmp |= DC_HPDx_INT_ACK; WREG32(DC_HPD1_INT_CONTROL, tmp); } - if (*disp_int_cont & DC_HPD2_INTERRUPT) { + if (rdev->irq.stat_regs.evergreen.disp_int_cont & DC_HPD2_INTERRUPT) { tmp = RREG32(DC_HPD2_INT_CONTROL); tmp |= DC_HPDx_INT_ACK; WREG32(DC_HPD2_INT_CONTROL, tmp); } - if (*disp_int_cont2 & DC_HPD3_INTERRUPT) { + if (rdev->irq.stat_regs.evergreen.disp_int_cont2 & DC_HPD3_INTERRUPT) { tmp = RREG32(DC_HPD3_INT_CONTROL); tmp |= DC_HPDx_INT_ACK; WREG32(DC_HPD3_INT_CONTROL, tmp); } - if (*disp_int_cont3 & DC_HPD4_INTERRUPT) { + if (rdev->irq.stat_regs.evergreen.disp_int_cont3 & DC_HPD4_INTERRUPT) { tmp = RREG32(DC_HPD4_INT_CONTROL); tmp |= DC_HPDx_INT_ACK; WREG32(DC_HPD4_INT_CONTROL, tmp); } - if (*disp_int_cont4 & DC_HPD5_INTERRUPT) { + if (rdev->irq.stat_regs.evergreen.disp_int_cont4 & DC_HPD5_INTERRUPT) { tmp = RREG32(DC_HPD5_INT_CONTROL); tmp |= DC_HPDx_INT_ACK; WREG32(DC_HPD5_INT_CONTROL, tmp); } - if (*disp_int_cont5 & DC_HPD6_INTERRUPT) { + if (rdev->irq.stat_regs.evergreen.disp_int_cont5 & DC_HPD6_INTERRUPT) { tmp = RREG32(DC_HPD5_INT_CONTROL); tmp |= DC_HPDx_INT_ACK; WREG32(DC_HPD6_INT_CONTROL, tmp); @@ -2239,14 +2321,10 @@ static inline void evergreen_irq_ack(struct radeon_device *rdev, void evergreen_irq_disable(struct radeon_device *rdev) { - u32 disp_int, disp_int_cont, disp_int_cont2; - u32 disp_int_cont3, disp_int_cont4, disp_int_cont5; - r600_disable_interrupts(rdev); /* Wait and acknowledge irq */ mdelay(1); - evergreen_irq_ack(rdev, &disp_int, &disp_int_cont, &disp_int_cont2, - &disp_int_cont3, &disp_int_cont4, &disp_int_cont5); + evergreen_irq_ack(rdev); evergreen_disable_interrupt_state(rdev); } @@ -2286,8 +2364,6 @@ int evergreen_irq_process(struct radeon_device *rdev) u32 rptr = rdev->ih.rptr; u32 src_id, src_data; u32 ring_index; - u32 disp_int, disp_int_cont, disp_int_cont2; - u32 disp_int_cont3, disp_int_cont4, disp_int_cont5; unsigned long flags; bool queue_hotplug = false; @@ -2308,8 +2384,7 @@ int evergreen_irq_process(struct radeon_device *rdev) restart_ih: /* display interrupts */ - evergreen_irq_ack(rdev, &disp_int, &disp_int_cont, &disp_int_cont2, - &disp_int_cont3, &disp_int_cont4, &disp_int_cont5); + evergreen_irq_ack(rdev); rdev->ih.wptr = wptr; while (rptr != wptr) { @@ -2322,17 +2397,21 @@ restart_ih: case 1: /* D1 vblank/vline */ switch (src_data) { case 0: /* D1 vblank */ - if (disp_int & LB_D1_VBLANK_INTERRUPT) { - drm_handle_vblank(rdev->ddev, 0); - rdev->pm.vblank_sync = true; - wake_up(&rdev->irq.vblank_queue); - disp_int &= ~LB_D1_VBLANK_INTERRUPT; + if (rdev->irq.stat_regs.evergreen.disp_int & LB_D1_VBLANK_INTERRUPT) { + if (rdev->irq.pflip[0]) + radeon_crtc_handle_flip(rdev, 0); + if (rdev->irq.crtc_vblank_int[0]) { + drm_handle_vblank(rdev->ddev, 0); + rdev->pm.vblank_sync = true; + wake_up(&rdev->irq.vblank_queue); + } + rdev->irq.stat_regs.evergreen.disp_int &= ~LB_D1_VBLANK_INTERRUPT; DRM_DEBUG("IH: D1 vblank\n"); } break; case 1: /* D1 vline */ - if (disp_int & LB_D1_VLINE_INTERRUPT) { - disp_int &= ~LB_D1_VLINE_INTERRUPT; + if (rdev->irq.stat_regs.evergreen.disp_int & LB_D1_VLINE_INTERRUPT) { + rdev->irq.stat_regs.evergreen.disp_int &= ~LB_D1_VLINE_INTERRUPT; DRM_DEBUG("IH: D1 vline\n"); } break; @@ -2344,17 +2423,21 @@ restart_ih: case 2: /* D2 vblank/vline */ switch (src_data) { case 0: /* D2 vblank */ - if (disp_int_cont & LB_D2_VBLANK_INTERRUPT) { - drm_handle_vblank(rdev->ddev, 1); - rdev->pm.vblank_sync = true; - wake_up(&rdev->irq.vblank_queue); - disp_int_cont &= ~LB_D2_VBLANK_INTERRUPT; + if (rdev->irq.stat_regs.evergreen.disp_int_cont & LB_D2_VBLANK_INTERRUPT) { + if (rdev->irq.pflip[1]) + radeon_crtc_handle_flip(rdev, 1); + if (rdev->irq.crtc_vblank_int[1]) { + drm_handle_vblank(rdev->ddev, 1); + rdev->pm.vblank_sync = true; + wake_up(&rdev->irq.vblank_queue); + } + rdev->irq.stat_regs.evergreen.disp_int_cont &= ~LB_D2_VBLANK_INTERRUPT; DRM_DEBUG("IH: D2 vblank\n"); } break; case 1: /* D2 vline */ - if (disp_int_cont & LB_D2_VLINE_INTERRUPT) { - disp_int_cont &= ~LB_D2_VLINE_INTERRUPT; + if (rdev->irq.stat_regs.evergreen.disp_int_cont & LB_D2_VLINE_INTERRUPT) { + rdev->irq.stat_regs.evergreen.disp_int_cont &= ~LB_D2_VLINE_INTERRUPT; DRM_DEBUG("IH: D2 vline\n"); } break; @@ -2366,17 +2449,21 @@ restart_ih: case 3: /* D3 vblank/vline */ switch (src_data) { case 0: /* D3 vblank */ - if (disp_int_cont2 & LB_D3_VBLANK_INTERRUPT) { - drm_handle_vblank(rdev->ddev, 2); - rdev->pm.vblank_sync = true; - wake_up(&rdev->irq.vblank_queue); - disp_int_cont2 &= ~LB_D3_VBLANK_INTERRUPT; + if (rdev->irq.stat_regs.evergreen.disp_int_cont2 & LB_D3_VBLANK_INTERRUPT) { + if (rdev->irq.crtc_vblank_int[2]) { + drm_handle_vblank(rdev->ddev, 2); + rdev->pm.vblank_sync = true; + wake_up(&rdev->irq.vblank_queue); + } + if (rdev->irq.pflip[2]) + radeon_crtc_handle_flip(rdev, 2); + rdev->irq.stat_regs.evergreen.disp_int_cont2 &= ~LB_D3_VBLANK_INTERRUPT; DRM_DEBUG("IH: D3 vblank\n"); } break; case 1: /* D3 vline */ - if (disp_int_cont2 & LB_D3_VLINE_INTERRUPT) { - disp_int_cont2 &= ~LB_D3_VLINE_INTERRUPT; + if (rdev->irq.stat_regs.evergreen.disp_int_cont2 & LB_D3_VLINE_INTERRUPT) { + rdev->irq.stat_regs.evergreen.disp_int_cont2 &= ~LB_D3_VLINE_INTERRUPT; DRM_DEBUG("IH: D3 vline\n"); } break; @@ -2388,17 +2475,21 @@ restart_ih: case 4: /* D4 vblank/vline */ switch (src_data) { case 0: /* D4 vblank */ - if (disp_int_cont3 & LB_D4_VBLANK_INTERRUPT) { - drm_handle_vblank(rdev->ddev, 3); - rdev->pm.vblank_sync = true; - wake_up(&rdev->irq.vblank_queue); - disp_int_cont3 &= ~LB_D4_VBLANK_INTERRUPT; + if (rdev->irq.stat_regs.evergreen.disp_int_cont3 & LB_D4_VBLANK_INTERRUPT) { + if (rdev->irq.crtc_vblank_int[3]) { + drm_handle_vblank(rdev->ddev, 3); + rdev->pm.vblank_sync = true; + wake_up(&rdev->irq.vblank_queue); + } + if (rdev->irq.pflip[3]) + radeon_crtc_handle_flip(rdev, 3); + rdev->irq.stat_regs.evergreen.disp_int_cont3 &= ~LB_D4_VBLANK_INTERRUPT; DRM_DEBUG("IH: D4 vblank\n"); } break; case 1: /* D4 vline */ - if (disp_int_cont3 & LB_D4_VLINE_INTERRUPT) { - disp_int_cont3 &= ~LB_D4_VLINE_INTERRUPT; + if (rdev->irq.stat_regs.evergreen.disp_int_cont3 & LB_D4_VLINE_INTERRUPT) { + rdev->irq.stat_regs.evergreen.disp_int_cont3 &= ~LB_D4_VLINE_INTERRUPT; DRM_DEBUG("IH: D4 vline\n"); } break; @@ -2410,17 +2501,21 @@ restart_ih: case 5: /* D5 vblank/vline */ switch (src_data) { case 0: /* D5 vblank */ - if (disp_int_cont4 & LB_D5_VBLANK_INTERRUPT) { - drm_handle_vblank(rdev->ddev, 4); - rdev->pm.vblank_sync = true; - wake_up(&rdev->irq.vblank_queue); - disp_int_cont4 &= ~LB_D5_VBLANK_INTERRUPT; + if (rdev->irq.stat_regs.evergreen.disp_int_cont4 & LB_D5_VBLANK_INTERRUPT) { + if (rdev->irq.crtc_vblank_int[4]) { + drm_handle_vblank(rdev->ddev, 4); + rdev->pm.vblank_sync = true; + wake_up(&rdev->irq.vblank_queue); + } + if (rdev->irq.pflip[4]) + radeon_crtc_handle_flip(rdev, 4); + rdev->irq.stat_regs.evergreen.disp_int_cont4 &= ~LB_D5_VBLANK_INTERRUPT; DRM_DEBUG("IH: D5 vblank\n"); } break; case 1: /* D5 vline */ - if (disp_int_cont4 & LB_D5_VLINE_INTERRUPT) { - disp_int_cont4 &= ~LB_D5_VLINE_INTERRUPT; + if (rdev->irq.stat_regs.evergreen.disp_int_cont4 & LB_D5_VLINE_INTERRUPT) { + rdev->irq.stat_regs.evergreen.disp_int_cont4 &= ~LB_D5_VLINE_INTERRUPT; DRM_DEBUG("IH: D5 vline\n"); } break; @@ -2432,17 +2527,21 @@ restart_ih: case 6: /* D6 vblank/vline */ switch (src_data) { case 0: /* D6 vblank */ - if (disp_int_cont5 & LB_D6_VBLANK_INTERRUPT) { - drm_handle_vblank(rdev->ddev, 5); - rdev->pm.vblank_sync = true; - wake_up(&rdev->irq.vblank_queue); - disp_int_cont5 &= ~LB_D6_VBLANK_INTERRUPT; + if (rdev->irq.stat_regs.evergreen.disp_int_cont5 & LB_D6_VBLANK_INTERRUPT) { + if (rdev->irq.crtc_vblank_int[5]) { + drm_handle_vblank(rdev->ddev, 5); + rdev->pm.vblank_sync = true; + wake_up(&rdev->irq.vblank_queue); + } + if (rdev->irq.pflip[5]) + radeon_crtc_handle_flip(rdev, 5); + rdev->irq.stat_regs.evergreen.disp_int_cont5 &= ~LB_D6_VBLANK_INTERRUPT; DRM_DEBUG("IH: D6 vblank\n"); } break; case 1: /* D6 vline */ - if (disp_int_cont5 & LB_D6_VLINE_INTERRUPT) { - disp_int_cont5 &= ~LB_D6_VLINE_INTERRUPT; + if (rdev->irq.stat_regs.evergreen.disp_int_cont5 & LB_D6_VLINE_INTERRUPT) { + rdev->irq.stat_regs.evergreen.disp_int_cont5 &= ~LB_D6_VLINE_INTERRUPT; DRM_DEBUG("IH: D6 vline\n"); } break; @@ -2454,43 +2553,43 @@ restart_ih: case 42: /* HPD hotplug */ switch (src_data) { case 0: - if (disp_int & DC_HPD1_INTERRUPT) { - disp_int &= ~DC_HPD1_INTERRUPT; + if (rdev->irq.stat_regs.evergreen.disp_int & DC_HPD1_INTERRUPT) { + rdev->irq.stat_regs.evergreen.disp_int &= ~DC_HPD1_INTERRUPT; queue_hotplug = true; DRM_DEBUG("IH: HPD1\n"); } break; case 1: - if (disp_int_cont & DC_HPD2_INTERRUPT) { - disp_int_cont &= ~DC_HPD2_INTERRUPT; + if (rdev->irq.stat_regs.evergreen.disp_int_cont & DC_HPD2_INTERRUPT) { + rdev->irq.stat_regs.evergreen.disp_int_cont &= ~DC_HPD2_INTERRUPT; queue_hotplug = true; DRM_DEBUG("IH: HPD2\n"); } break; case 2: - if (disp_int_cont2 & DC_HPD3_INTERRUPT) { - disp_int_cont2 &= ~DC_HPD3_INTERRUPT; + if (rdev->irq.stat_regs.evergreen.disp_int_cont2 & DC_HPD3_INTERRUPT) { + rdev->irq.stat_regs.evergreen.disp_int_cont2 &= ~DC_HPD3_INTERRUPT; queue_hotplug = true; DRM_DEBUG("IH: HPD3\n"); } break; case 3: - if (disp_int_cont3 & DC_HPD4_INTERRUPT) { - disp_int_cont3 &= ~DC_HPD4_INTERRUPT; + if (rdev->irq.stat_regs.evergreen.disp_int_cont3 & DC_HPD4_INTERRUPT) { + rdev->irq.stat_regs.evergreen.disp_int_cont3 &= ~DC_HPD4_INTERRUPT; queue_hotplug = true; DRM_DEBUG("IH: HPD4\n"); } break; case 4: - if (disp_int_cont4 & DC_HPD5_INTERRUPT) { - disp_int_cont4 &= ~DC_HPD5_INTERRUPT; + if (rdev->irq.stat_regs.evergreen.disp_int_cont4 & DC_HPD5_INTERRUPT) { + rdev->irq.stat_regs.evergreen.disp_int_cont4 &= ~DC_HPD5_INTERRUPT; queue_hotplug = true; DRM_DEBUG("IH: HPD5\n"); } break; case 5: - if (disp_int_cont5 & DC_HPD6_INTERRUPT) { - disp_int_cont5 &= ~DC_HPD6_INTERRUPT; + if (rdev->irq.stat_regs.evergreen.disp_int_cont5 & DC_HPD6_INTERRUPT) { + rdev->irq.stat_regs.evergreen.disp_int_cont5 &= ~DC_HPD6_INTERRUPT; queue_hotplug = true; DRM_DEBUG("IH: HPD6\n"); } diff --git a/drivers/gpu/drm/radeon/evergreen_reg.h b/drivers/gpu/drm/radeon/evergreen_reg.h index 2330f3a36fd5..c781c92c3451 100644 --- a/drivers/gpu/drm/radeon/evergreen_reg.h +++ b/drivers/gpu/drm/radeon/evergreen_reg.h @@ -105,6 +105,11 @@ #define EVERGREEN_GRPH_Y_START 0x6830 #define EVERGREEN_GRPH_X_END 0x6834 #define EVERGREEN_GRPH_Y_END 0x6838 +#define EVERGREEN_GRPH_UPDATE 0x6844 +# define EVERGREEN_GRPH_SURFACE_UPDATE_PENDING (1 << 2) +# define EVERGREEN_GRPH_UPDATE_LOCK (1 << 16) +#define EVERGREEN_GRPH_FLIP_CONTROL 0x6848 +# define EVERGREEN_GRPH_SURFACE_UPDATE_H_RETRACE_EN (1 << 0) /* CUR blocks at 0x6998, 0x7598, 0x10198, 0x10d98, 0x11998, 0x12598 */ #define EVERGREEN_CUR_CONTROL 0x6998 @@ -178,6 +183,7 @@ # define EVERGREEN_CRTC_DISP_READ_REQUEST_DISABLE (1 << 24) #define EVERGREEN_CRTC_STATUS 0x6e8c #define EVERGREEN_CRTC_STATUS_POSITION 0x6e90 +#define EVERGREEN_MASTER_UPDATE_MODE 0x6ef8 #define EVERGREEN_CRTC_UPDATE_LOCK 0x6ed4 #define EVERGREEN_DC_GPIO_HPD_MASK 0x64b0 diff --git a/drivers/gpu/drm/radeon/r100.c b/drivers/gpu/drm/radeon/r100.c index 8e10aa9f74b0..b2e29798a99d 100644 --- a/drivers/gpu/drm/radeon/r100.c +++ b/drivers/gpu/drm/radeon/r100.c @@ -68,6 +68,54 @@ MODULE_FIRMWARE(FIRMWARE_R520); * r100,rv100,rs100,rv200,rs200,r200,rv250,rs300,rv280 */ +void r100_pre_page_flip(struct radeon_device *rdev, int crtc) +{ + struct radeon_crtc *radeon_crtc = rdev->mode_info.crtcs[crtc]; + u32 tmp; + + /* make sure flip is at vb rather than hb */ + tmp = RREG32(RADEON_CRTC_OFFSET_CNTL + radeon_crtc->crtc_offset); + tmp &= ~RADEON_CRTC_OFFSET_FLIP_CNTL; + WREG32(RADEON_CRTC_OFFSET_CNTL + radeon_crtc->crtc_offset, tmp); + + /* set pageflip to happen as late as possible in the vblank interval. + * same field for crtc1/2 + */ + tmp = RREG32(RADEON_CRTC_GEN_CNTL); + tmp &= ~RADEON_CRTC_VSTAT_MODE_MASK; + WREG32(RADEON_CRTC_GEN_CNTL, tmp); + + /* enable the pflip int */ + radeon_irq_kms_pflip_irq_get(rdev, crtc); +} + +void r100_post_page_flip(struct radeon_device *rdev, int crtc) +{ + /* disable the pflip int */ + radeon_irq_kms_pflip_irq_put(rdev, crtc); +} + +u32 r100_page_flip(struct radeon_device *rdev, int crtc_id, u64 crtc_base) +{ + struct radeon_crtc *radeon_crtc = rdev->mode_info.crtcs[crtc_id]; + u32 tmp = ((u32)crtc_base) | RADEON_CRTC_OFFSET__OFFSET_LOCK; + + /* Lock the graphics update lock */ + /* update the scanout addresses */ + WREG32(RADEON_CRTC_OFFSET + radeon_crtc->crtc_offset, tmp); + + /* Note: We don't wait for update_pending to assert, as this never + * happens for some reason on R1xx - R4xx. Adds a bit of imprecision. + */ + + /* Unlock the lock, so double-buffering can take place inside vblank */ + tmp &= ~RADEON_CRTC_OFFSET__OFFSET_LOCK; + WREG32(RADEON_CRTC_OFFSET + radeon_crtc->crtc_offset, tmp); + + /* Return current update_pending status: */ + return RREG32(RADEON_CRTC_OFFSET + radeon_crtc->crtc_offset) & RADEON_CRTC_OFFSET__GUI_TRIG_OFFSET; +} + void r100_pm_get_dynpm_state(struct radeon_device *rdev) { int i; @@ -526,10 +574,12 @@ int r100_irq_set(struct radeon_device *rdev) if (rdev->irq.gui_idle) { tmp |= RADEON_GUI_IDLE_MASK; } - if (rdev->irq.crtc_vblank_int[0]) { + if (rdev->irq.crtc_vblank_int[0] || + rdev->irq.pflip[0]) { tmp |= RADEON_CRTC_VBLANK_MASK; } - if (rdev->irq.crtc_vblank_int[1]) { + if (rdev->irq.crtc_vblank_int[1] || + rdev->irq.pflip[1]) { tmp |= RADEON_CRTC2_VBLANK_MASK; } if (rdev->irq.hpd[0]) { @@ -600,14 +650,22 @@ int r100_irq_process(struct radeon_device *rdev) } /* Vertical blank interrupts */ if (status & RADEON_CRTC_VBLANK_STAT) { - drm_handle_vblank(rdev->ddev, 0); - rdev->pm.vblank_sync = true; - wake_up(&rdev->irq.vblank_queue); + if (rdev->irq.pflip[0]) + radeon_crtc_handle_flip(rdev, 0); + if (rdev->irq.crtc_vblank_int[0]) { + drm_handle_vblank(rdev->ddev, 0); + rdev->pm.vblank_sync = true; + wake_up(&rdev->irq.vblank_queue); + } } if (status & RADEON_CRTC2_VBLANK_STAT) { - drm_handle_vblank(rdev->ddev, 1); - rdev->pm.vblank_sync = true; - wake_up(&rdev->irq.vblank_queue); + if (rdev->irq.pflip[1]) + radeon_crtc_handle_flip(rdev, 1); + if (rdev->irq.crtc_vblank_int[1]) { + drm_handle_vblank(rdev->ddev, 1); + rdev->pm.vblank_sync = true; + wake_up(&rdev->irq.vblank_queue); + } } if (status & RADEON_FP_DETECT_STAT) { queue_hotplug = true; diff --git a/drivers/gpu/drm/radeon/r500_reg.h b/drivers/gpu/drm/radeon/r500_reg.h index 6ac1f604e29b..fc437059918f 100644 --- a/drivers/gpu/drm/radeon/r500_reg.h +++ b/drivers/gpu/drm/radeon/r500_reg.h @@ -355,6 +355,8 @@ #define AVIVO_D1CRTC_FRAME_COUNT 0x60a4 #define AVIVO_D1CRTC_STEREO_CONTROL 0x60c4 +#define AVIVO_D1MODE_MASTER_UPDATE_MODE 0x60e4 + /* master controls */ #define AVIVO_DC_CRTC_MASTER_EN 0x60f8 #define AVIVO_DC_CRTC_TV_CONTROL 0x60fc @@ -409,8 +411,10 @@ #define AVIVO_D1GRPH_X_END 0x6134 #define AVIVO_D1GRPH_Y_END 0x6138 #define AVIVO_D1GRPH_UPDATE 0x6144 +# define AVIVO_D1GRPH_SURFACE_UPDATE_PENDING (1 << 2) # define AVIVO_D1GRPH_UPDATE_LOCK (1 << 16) #define AVIVO_D1GRPH_FLIP_CONTROL 0x6148 +# define AVIVO_D1GRPH_SURFACE_UPDATE_H_RETRACE_EN (1 << 0) #define AVIVO_D1CUR_CONTROL 0x6400 # define AVIVO_D1CURSOR_EN (1 << 0) diff --git a/drivers/gpu/drm/radeon/r600.c b/drivers/gpu/drm/radeon/r600.c index a3552594ccc4..15b95724c408 100644 --- a/drivers/gpu/drm/radeon/r600.c +++ b/drivers/gpu/drm/radeon/r600.c @@ -2863,6 +2863,8 @@ static void r600_disable_interrupt_state(struct radeon_device *rdev) WREG32(CP_INT_CNTL, CNTX_BUSY_INT_ENABLE | CNTX_EMPTY_INT_ENABLE); WREG32(GRBM_INT_CNTL, 0); WREG32(DxMODE_INT_MASK, 0); + WREG32(D1GRPH_INTERRUPT_CONTROL, 0); + WREG32(D2GRPH_INTERRUPT_CONTROL, 0); if (ASIC_IS_DCE3(rdev)) { WREG32(DCE3_DACA_AUTODETECT_INT_CONTROL, 0); WREG32(DCE3_DACB_AUTODETECT_INT_CONTROL, 0); @@ -2987,6 +2989,7 @@ int r600_irq_set(struct radeon_device *rdev) u32 hpd1, hpd2, hpd3, hpd4 = 0, hpd5 = 0, hpd6 = 0; u32 grbm_int_cntl = 0; u32 hdmi1, hdmi2; + u32 d1grph = 0, d2grph = 0; if (!rdev->irq.installed) { WARN(1, "Can't enable IRQ/MSI because no handler is installed\n"); @@ -3023,11 +3026,13 @@ int r600_irq_set(struct radeon_device *rdev) cp_int_cntl |= RB_INT_ENABLE; cp_int_cntl |= TIME_STAMP_INT_ENABLE; } - if (rdev->irq.crtc_vblank_int[0]) { + if (rdev->irq.crtc_vblank_int[0] || + rdev->irq.pflip[0]) { DRM_DEBUG("r600_irq_set: vblank 0\n"); mode_int |= D1MODE_VBLANK_INT_MASK; } - if (rdev->irq.crtc_vblank_int[1]) { + if (rdev->irq.crtc_vblank_int[1] || + rdev->irq.pflip[1]) { DRM_DEBUG("r600_irq_set: vblank 1\n"); mode_int |= D2MODE_VBLANK_INT_MASK; } @@ -3070,6 +3075,8 @@ int r600_irq_set(struct radeon_device *rdev) WREG32(CP_INT_CNTL, cp_int_cntl); WREG32(DxMODE_INT_MASK, mode_int); + WREG32(D1GRPH_INTERRUPT_CONTROL, d1grph); + WREG32(D2GRPH_INTERRUPT_CONTROL, d2grph); WREG32(GRBM_INT_CNTL, grbm_int_cntl); WREG32(R600_HDMI_BLOCK1 + R600_HDMI_CNTL, hdmi1); if (ASIC_IS_DCE3(rdev)) { @@ -3092,32 +3099,35 @@ int r600_irq_set(struct radeon_device *rdev) return 0; } -static inline void r600_irq_ack(struct radeon_device *rdev, - u32 *disp_int, - u32 *disp_int_cont, - u32 *disp_int_cont2) +static inline void r600_irq_ack(struct radeon_device *rdev) { u32 tmp; if (ASIC_IS_DCE3(rdev)) { - *disp_int = RREG32(DCE3_DISP_INTERRUPT_STATUS); - *disp_int_cont = RREG32(DCE3_DISP_INTERRUPT_STATUS_CONTINUE); - *disp_int_cont2 = RREG32(DCE3_DISP_INTERRUPT_STATUS_CONTINUE2); + rdev->irq.stat_regs.r600.disp_int = RREG32(DCE3_DISP_INTERRUPT_STATUS); + rdev->irq.stat_regs.r600.disp_int_cont = RREG32(DCE3_DISP_INTERRUPT_STATUS_CONTINUE); + rdev->irq.stat_regs.r600.disp_int_cont2 = RREG32(DCE3_DISP_INTERRUPT_STATUS_CONTINUE2); } else { - *disp_int = RREG32(DISP_INTERRUPT_STATUS); - *disp_int_cont = RREG32(DISP_INTERRUPT_STATUS_CONTINUE); - *disp_int_cont2 = 0; - } - - if (*disp_int & LB_D1_VBLANK_INTERRUPT) + rdev->irq.stat_regs.r600.disp_int = RREG32(DISP_INTERRUPT_STATUS); + rdev->irq.stat_regs.r600.disp_int_cont = RREG32(DISP_INTERRUPT_STATUS_CONTINUE); + rdev->irq.stat_regs.r600.disp_int_cont2 = 0; + } + rdev->irq.stat_regs.r600.d1grph_int = RREG32(D1GRPH_INTERRUPT_STATUS); + rdev->irq.stat_regs.r600.d2grph_int = RREG32(D2GRPH_INTERRUPT_STATUS); + + if (rdev->irq.stat_regs.r600.d1grph_int & DxGRPH_PFLIP_INT_OCCURRED) + WREG32(D1GRPH_INTERRUPT_STATUS, DxGRPH_PFLIP_INT_CLEAR); + if (rdev->irq.stat_regs.r600.d2grph_int & DxGRPH_PFLIP_INT_OCCURRED) + WREG32(D2GRPH_INTERRUPT_STATUS, DxGRPH_PFLIP_INT_CLEAR); + if (rdev->irq.stat_regs.r600.disp_int & LB_D1_VBLANK_INTERRUPT) WREG32(D1MODE_VBLANK_STATUS, DxMODE_VBLANK_ACK); - if (*disp_int & LB_D1_VLINE_INTERRUPT) + if (rdev->irq.stat_regs.r600.disp_int & LB_D1_VLINE_INTERRUPT) WREG32(D1MODE_VLINE_STATUS, DxMODE_VLINE_ACK); - if (*disp_int & LB_D2_VBLANK_INTERRUPT) + if (rdev->irq.stat_regs.r600.disp_int & LB_D2_VBLANK_INTERRUPT) WREG32(D2MODE_VBLANK_STATUS, DxMODE_VBLANK_ACK); - if (*disp_int & LB_D2_VLINE_INTERRUPT) + if (rdev->irq.stat_regs.r600.disp_int & LB_D2_VLINE_INTERRUPT) WREG32(D2MODE_VLINE_STATUS, DxMODE_VLINE_ACK); - if (*disp_int & DC_HPD1_INTERRUPT) { + if (rdev->irq.stat_regs.r600.disp_int & DC_HPD1_INTERRUPT) { if (ASIC_IS_DCE3(rdev)) { tmp = RREG32(DC_HPD1_INT_CONTROL); tmp |= DC_HPDx_INT_ACK; @@ -3128,7 +3138,7 @@ static inline void r600_irq_ack(struct radeon_device *rdev, WREG32(DC_HOT_PLUG_DETECT1_INT_CONTROL, tmp); } } - if (*disp_int & DC_HPD2_INTERRUPT) { + if (rdev->irq.stat_regs.r600.disp_int & DC_HPD2_INTERRUPT) { if (ASIC_IS_DCE3(rdev)) { tmp = RREG32(DC_HPD2_INT_CONTROL); tmp |= DC_HPDx_INT_ACK; @@ -3139,7 +3149,7 @@ static inline void r600_irq_ack(struct radeon_device *rdev, WREG32(DC_HOT_PLUG_DETECT2_INT_CONTROL, tmp); } } - if (*disp_int_cont & DC_HPD3_INTERRUPT) { + if (rdev->irq.stat_regs.r600.disp_int_cont & DC_HPD3_INTERRUPT) { if (ASIC_IS_DCE3(rdev)) { tmp = RREG32(DC_HPD3_INT_CONTROL); tmp |= DC_HPDx_INT_ACK; @@ -3150,18 +3160,18 @@ static inline void r600_irq_ack(struct radeon_device *rdev, WREG32(DC_HOT_PLUG_DETECT3_INT_CONTROL, tmp); } } - if (*disp_int_cont & DC_HPD4_INTERRUPT) { + if (rdev->irq.stat_regs.r600.disp_int_cont & DC_HPD4_INTERRUPT) { tmp = RREG32(DC_HPD4_INT_CONTROL); tmp |= DC_HPDx_INT_ACK; WREG32(DC_HPD4_INT_CONTROL, tmp); } if (ASIC_IS_DCE32(rdev)) { - if (*disp_int_cont2 & DC_HPD5_INTERRUPT) { + if (rdev->irq.stat_regs.r600.disp_int_cont2 & DC_HPD5_INTERRUPT) { tmp = RREG32(DC_HPD5_INT_CONTROL); tmp |= DC_HPDx_INT_ACK; WREG32(DC_HPD5_INT_CONTROL, tmp); } - if (*disp_int_cont2 & DC_HPD6_INTERRUPT) { + if (rdev->irq.stat_regs.r600.disp_int_cont2 & DC_HPD6_INTERRUPT) { tmp = RREG32(DC_HPD5_INT_CONTROL); tmp |= DC_HPDx_INT_ACK; WREG32(DC_HPD6_INT_CONTROL, tmp); @@ -3183,12 +3193,10 @@ static inline void r600_irq_ack(struct radeon_device *rdev, void r600_irq_disable(struct radeon_device *rdev) { - u32 disp_int, disp_int_cont, disp_int_cont2; - r600_disable_interrupts(rdev); /* Wait and acknowledge irq */ mdelay(1); - r600_irq_ack(rdev, &disp_int, &disp_int_cont, &disp_int_cont2); + r600_irq_ack(rdev); r600_disable_interrupt_state(rdev); } @@ -3251,7 +3259,7 @@ int r600_irq_process(struct radeon_device *rdev) u32 wptr = r600_get_ih_wptr(rdev); u32 rptr = rdev->ih.rptr; u32 src_id, src_data; - u32 ring_index, disp_int, disp_int_cont, disp_int_cont2; + u32 ring_index; unsigned long flags; bool queue_hotplug = false; @@ -3272,7 +3280,7 @@ int r600_irq_process(struct radeon_device *rdev) restart_ih: /* display interrupts */ - r600_irq_ack(rdev, &disp_int, &disp_int_cont, &disp_int_cont2); + r600_irq_ack(rdev); rdev->ih.wptr = wptr; while (rptr != wptr) { @@ -3285,17 +3293,21 @@ restart_ih: case 1: /* D1 vblank/vline */ switch (src_data) { case 0: /* D1 vblank */ - if (disp_int & LB_D1_VBLANK_INTERRUPT) { - drm_handle_vblank(rdev->ddev, 0); - rdev->pm.vblank_sync = true; - wake_up(&rdev->irq.vblank_queue); - disp_int &= ~LB_D1_VBLANK_INTERRUPT; + if (rdev->irq.stat_regs.r600.disp_int & LB_D1_VBLANK_INTERRUPT) { + if (rdev->irq.pflip[0]) + radeon_crtc_handle_flip(rdev, 0); + if (rdev->irq.crtc_vblank_int[0]) { + drm_handle_vblank(rdev->ddev, 0); + rdev->pm.vblank_sync = true; + wake_up(&rdev->irq.vblank_queue); + } + rdev->irq.stat_regs.r600.disp_int &= ~LB_D1_VBLANK_INTERRUPT; DRM_DEBUG("IH: D1 vblank\n"); } break; case 1: /* D1 vline */ - if (disp_int & LB_D1_VLINE_INTERRUPT) { - disp_int &= ~LB_D1_VLINE_INTERRUPT; + if (rdev->irq.stat_regs.r600.disp_int & LB_D1_VLINE_INTERRUPT) { + rdev->irq.stat_regs.r600.disp_int &= ~LB_D1_VLINE_INTERRUPT; DRM_DEBUG("IH: D1 vline\n"); } break; @@ -3307,17 +3319,21 @@ restart_ih: case 5: /* D2 vblank/vline */ switch (src_data) { case 0: /* D2 vblank */ - if (disp_int & LB_D2_VBLANK_INTERRUPT) { - drm_handle_vblank(rdev->ddev, 1); - rdev->pm.vblank_sync = true; - wake_up(&rdev->irq.vblank_queue); - disp_int &= ~LB_D2_VBLANK_INTERRUPT; + if (rdev->irq.stat_regs.r600.disp_int & LB_D2_VBLANK_INTERRUPT) { + if (rdev->irq.pflip[1]) + radeon_crtc_handle_flip(rdev, 1); + if (rdev->irq.crtc_vblank_int[1]) { + drm_handle_vblank(rdev->ddev, 1); + rdev->pm.vblank_sync = true; + wake_up(&rdev->irq.vblank_queue); + } + rdev->irq.stat_regs.r600.disp_int &= ~LB_D2_VBLANK_INTERRUPT; DRM_DEBUG("IH: D2 vblank\n"); } break; case 1: /* D1 vline */ - if (disp_int & LB_D2_VLINE_INTERRUPT) { - disp_int &= ~LB_D2_VLINE_INTERRUPT; + if (rdev->irq.stat_regs.r600.disp_int & LB_D2_VLINE_INTERRUPT) { + rdev->irq.stat_regs.r600.disp_int &= ~LB_D2_VLINE_INTERRUPT; DRM_DEBUG("IH: D2 vline\n"); } break; @@ -3329,43 +3345,43 @@ restart_ih: case 19: /* HPD/DAC hotplug */ switch (src_data) { case 0: - if (disp_int & DC_HPD1_INTERRUPT) { - disp_int &= ~DC_HPD1_INTERRUPT; + if (rdev->irq.stat_regs.r600.disp_int & DC_HPD1_INTERRUPT) { + rdev->irq.stat_regs.r600.disp_int &= ~DC_HPD1_INTERRUPT; queue_hotplug = true; DRM_DEBUG("IH: HPD1\n"); } break; case 1: - if (disp_int & DC_HPD2_INTERRUPT) { - disp_int &= ~DC_HPD2_INTERRUPT; + if (rdev->irq.stat_regs.r600.disp_int & DC_HPD2_INTERRUPT) { + rdev->irq.stat_regs.r600.disp_int &= ~DC_HPD2_INTERRUPT; queue_hotplug = true; DRM_DEBUG("IH: HPD2\n"); } break; case 4: - if (disp_int_cont & DC_HPD3_INTERRUPT) { - disp_int_cont &= ~DC_HPD3_INTERRUPT; + if (rdev->irq.stat_regs.r600.disp_int_cont & DC_HPD3_INTERRUPT) { + rdev->irq.stat_regs.r600.disp_int_cont &= ~DC_HPD3_INTERRUPT; queue_hotplug = true; DRM_DEBUG("IH: HPD3\n"); } break; case 5: - if (disp_int_cont & DC_HPD4_INTERRUPT) { - disp_int_cont &= ~DC_HPD4_INTERRUPT; + if (rdev->irq.stat_regs.r600.disp_int_cont & DC_HPD4_INTERRUPT) { + rdev->irq.stat_regs.r600.disp_int_cont &= ~DC_HPD4_INTERRUPT; queue_hotplug = true; DRM_DEBUG("IH: HPD4\n"); } break; case 10: - if (disp_int_cont2 & DC_HPD5_INTERRUPT) { - disp_int_cont2 &= ~DC_HPD5_INTERRUPT; + if (rdev->irq.stat_regs.r600.disp_int_cont2 & DC_HPD5_INTERRUPT) { + rdev->irq.stat_regs.r600.disp_int_cont2 &= ~DC_HPD5_INTERRUPT; queue_hotplug = true; DRM_DEBUG("IH: HPD5\n"); } break; case 12: - if (disp_int_cont2 & DC_HPD6_INTERRUPT) { - disp_int_cont2 &= ~DC_HPD6_INTERRUPT; + if (rdev->irq.stat_regs.r600.disp_int_cont2 & DC_HPD6_INTERRUPT) { + rdev->irq.stat_regs.r600.disp_int_cont2 &= ~DC_HPD6_INTERRUPT; queue_hotplug = true; DRM_DEBUG("IH: HPD6\n"); } diff --git a/drivers/gpu/drm/radeon/r600d.h b/drivers/gpu/drm/radeon/r600d.h index bff4dc4f410f..c89cfa8e0c05 100644 --- a/drivers/gpu/drm/radeon/r600d.h +++ b/drivers/gpu/drm/radeon/r600d.h @@ -728,6 +728,15 @@ /* DCE 3.2 */ # define DC_HPDx_EN (1 << 28) +#define D1GRPH_INTERRUPT_STATUS 0x6158 +#define D2GRPH_INTERRUPT_STATUS 0x6958 +# define DxGRPH_PFLIP_INT_OCCURRED (1 << 0) +# define DxGRPH_PFLIP_INT_CLEAR (1 << 8) +#define D1GRPH_INTERRUPT_CONTROL 0x615c +#define D2GRPH_INTERRUPT_CONTROL 0x695c +# define DxGRPH_PFLIP_INT_MASK (1 << 0) +# define DxGRPH_PFLIP_INT_TYPE (1 << 8) + /* * PM4 */ diff --git a/drivers/gpu/drm/radeon/radeon.h b/drivers/gpu/drm/radeon/radeon.h index 3a7095743d44..ddf1eca13401 100644 --- a/drivers/gpu/drm/radeon/radeon.h +++ b/drivers/gpu/drm/radeon/radeon.h @@ -377,11 +377,56 @@ void radeon_scratch_free(struct radeon_device *rdev, uint32_t reg); /* * IRQS. */ + +struct radeon_unpin_work { + struct work_struct work; + struct radeon_device *rdev; + int crtc_id; + struct radeon_fence *fence; + struct drm_pending_vblank_event *event; + struct radeon_bo *old_rbo; + u64 new_crtc_base; +}; + +struct r500_irq_stat_regs { + u32 disp_int; +}; + +struct r600_irq_stat_regs { + u32 disp_int; + u32 disp_int_cont; + u32 disp_int_cont2; + u32 d1grph_int; + u32 d2grph_int; +}; + +struct evergreen_irq_stat_regs { + u32 disp_int; + u32 disp_int_cont; + u32 disp_int_cont2; + u32 disp_int_cont3; + u32 disp_int_cont4; + u32 disp_int_cont5; + u32 d1grph_int; + u32 d2grph_int; + u32 d3grph_int; + u32 d4grph_int; + u32 d5grph_int; + u32 d6grph_int; +}; + +union radeon_irq_stat_regs { + struct r500_irq_stat_regs r500; + struct r600_irq_stat_regs r600; + struct evergreen_irq_stat_regs evergreen; +}; + struct radeon_irq { bool installed; bool sw_int; /* FIXME: use a define max crtc rather than hardcode it */ bool crtc_vblank_int[6]; + bool pflip[6]; wait_queue_head_t vblank_queue; /* FIXME: use defines for max hpd/dacs */ bool hpd[6]; @@ -392,12 +437,17 @@ struct radeon_irq { bool hdmi[2]; spinlock_t sw_lock; int sw_refcount; + union radeon_irq_stat_regs stat_regs; + spinlock_t pflip_lock[6]; + int pflip_refcount[6]; }; int radeon_irq_kms_init(struct radeon_device *rdev); void radeon_irq_kms_fini(struct radeon_device *rdev); void radeon_irq_kms_sw_irq_get(struct radeon_device *rdev); void radeon_irq_kms_sw_irq_put(struct radeon_device *rdev); +void radeon_irq_kms_pflip_irq_get(struct radeon_device *rdev, int crtc); +void radeon_irq_kms_pflip_irq_put(struct radeon_device *rdev, int crtc); /* * CP & ring. @@ -881,6 +931,10 @@ struct radeon_asic { void (*pm_finish)(struct radeon_device *rdev); void (*pm_init_profile)(struct radeon_device *rdev); void (*pm_get_dynpm_state)(struct radeon_device *rdev); + /* pageflipping */ + void (*pre_page_flip)(struct radeon_device *rdev, int crtc); + u32 (*page_flip)(struct radeon_device *rdev, int crtc, u64 crtc_base); + void (*post_page_flip)(struct radeon_device *rdev, int crtc); }; /* @@ -1344,6 +1398,9 @@ static inline void radeon_ring_write(struct radeon_device *rdev, uint32_t v) #define radeon_pm_finish(rdev) (rdev)->asic->pm_finish((rdev)) #define radeon_pm_init_profile(rdev) (rdev)->asic->pm_init_profile((rdev)) #define radeon_pm_get_dynpm_state(rdev) (rdev)->asic->pm_get_dynpm_state((rdev)) +#define radeon_pre_page_flip(rdev, crtc) rdev->asic->pre_page_flip((rdev), (crtc)) +#define radeon_page_flip(rdev, crtc, base) rdev->asic->page_flip((rdev), (crtc), (base)) +#define radeon_post_page_flip(rdev, crtc) rdev->asic->post_page_flip((rdev), (crtc)) /* Common functions */ /* AGP */ diff --git a/drivers/gpu/drm/radeon/radeon_asic.c b/drivers/gpu/drm/radeon/radeon_asic.c index 64fb89ecbf74..6b126b3f5fa9 100644 --- a/drivers/gpu/drm/radeon/radeon_asic.c +++ b/drivers/gpu/drm/radeon/radeon_asic.c @@ -171,6 +171,9 @@ static struct radeon_asic r100_asic = { .pm_finish = &r100_pm_finish, .pm_init_profile = &r100_pm_init_profile, .pm_get_dynpm_state = &r100_pm_get_dynpm_state, + .pre_page_flip = &r100_pre_page_flip, + .page_flip = &r100_page_flip, + .post_page_flip = &r100_post_page_flip, }; static struct radeon_asic r200_asic = { @@ -215,6 +218,9 @@ static struct radeon_asic r200_asic = { .pm_finish = &r100_pm_finish, .pm_init_profile = &r100_pm_init_profile, .pm_get_dynpm_state = &r100_pm_get_dynpm_state, + .pre_page_flip = &r100_pre_page_flip, + .page_flip = &r100_page_flip, + .post_page_flip = &r100_post_page_flip, }; static struct radeon_asic r300_asic = { @@ -260,6 +266,9 @@ static struct radeon_asic r300_asic = { .pm_finish = &r100_pm_finish, .pm_init_profile = &r100_pm_init_profile, .pm_get_dynpm_state = &r100_pm_get_dynpm_state, + .pre_page_flip = &r100_pre_page_flip, + .page_flip = &r100_page_flip, + .post_page_flip = &r100_post_page_flip, }; static struct radeon_asic r300_asic_pcie = { @@ -304,6 +313,9 @@ static struct radeon_asic r300_asic_pcie = { .pm_finish = &r100_pm_finish, .pm_init_profile = &r100_pm_init_profile, .pm_get_dynpm_state = &r100_pm_get_dynpm_state, + .pre_page_flip = &r100_pre_page_flip, + .page_flip = &r100_page_flip, + .post_page_flip = &r100_post_page_flip, }; static struct radeon_asic r420_asic = { @@ -349,6 +361,9 @@ static struct radeon_asic r420_asic = { .pm_finish = &r100_pm_finish, .pm_init_profile = &r420_pm_init_profile, .pm_get_dynpm_state = &r100_pm_get_dynpm_state, + .pre_page_flip = &r100_pre_page_flip, + .page_flip = &r100_page_flip, + .post_page_flip = &r100_post_page_flip, }; static struct radeon_asic rs400_asic = { @@ -394,6 +409,9 @@ static struct radeon_asic rs400_asic = { .pm_finish = &r100_pm_finish, .pm_init_profile = &r100_pm_init_profile, .pm_get_dynpm_state = &r100_pm_get_dynpm_state, + .pre_page_flip = &r100_pre_page_flip, + .page_flip = &r100_page_flip, + .post_page_flip = &r100_post_page_flip, }; static struct radeon_asic rs600_asic = { @@ -439,6 +457,9 @@ static struct radeon_asic rs600_asic = { .pm_finish = &rs600_pm_finish, .pm_init_profile = &r420_pm_init_profile, .pm_get_dynpm_state = &r100_pm_get_dynpm_state, + .pre_page_flip = &rs600_pre_page_flip, + .page_flip = &rs600_page_flip, + .post_page_flip = &rs600_post_page_flip, }; static struct radeon_asic rs690_asic = { @@ -484,6 +505,9 @@ static struct radeon_asic rs690_asic = { .pm_finish = &rs600_pm_finish, .pm_init_profile = &r420_pm_init_profile, .pm_get_dynpm_state = &r100_pm_get_dynpm_state, + .pre_page_flip = &rs600_pre_page_flip, + .page_flip = &rs600_page_flip, + .post_page_flip = &rs600_post_page_flip, }; static struct radeon_asic rv515_asic = { @@ -529,6 +553,9 @@ static struct radeon_asic rv515_asic = { .pm_finish = &rs600_pm_finish, .pm_init_profile = &r420_pm_init_profile, .pm_get_dynpm_state = &r100_pm_get_dynpm_state, + .pre_page_flip = &rs600_pre_page_flip, + .page_flip = &rs600_page_flip, + .post_page_flip = &rs600_post_page_flip, }; static struct radeon_asic r520_asic = { @@ -574,6 +601,9 @@ static struct radeon_asic r520_asic = { .pm_finish = &rs600_pm_finish, .pm_init_profile = &r420_pm_init_profile, .pm_get_dynpm_state = &r100_pm_get_dynpm_state, + .pre_page_flip = &rs600_pre_page_flip, + .page_flip = &rs600_page_flip, + .post_page_flip = &rs600_post_page_flip, }; static struct radeon_asic r600_asic = { @@ -618,6 +648,9 @@ static struct radeon_asic r600_asic = { .pm_finish = &rs600_pm_finish, .pm_init_profile = &r600_pm_init_profile, .pm_get_dynpm_state = &r600_pm_get_dynpm_state, + .pre_page_flip = &rs600_pre_page_flip, + .page_flip = &rs600_page_flip, + .post_page_flip = &rs600_post_page_flip, }; static struct radeon_asic rs780_asic = { @@ -662,6 +695,9 @@ static struct radeon_asic rs780_asic = { .pm_finish = &rs600_pm_finish, .pm_init_profile = &rs780_pm_init_profile, .pm_get_dynpm_state = &r600_pm_get_dynpm_state, + .pre_page_flip = &rs600_pre_page_flip, + .page_flip = &rs600_page_flip, + .post_page_flip = &rs600_post_page_flip, }; static struct radeon_asic rv770_asic = { @@ -706,6 +742,9 @@ static struct radeon_asic rv770_asic = { .pm_finish = &rs600_pm_finish, .pm_init_profile = &r600_pm_init_profile, .pm_get_dynpm_state = &r600_pm_get_dynpm_state, + .pre_page_flip = &rs600_pre_page_flip, + .page_flip = &rv770_page_flip, + .post_page_flip = &rs600_post_page_flip, }; static struct radeon_asic evergreen_asic = { @@ -749,6 +788,9 @@ static struct radeon_asic evergreen_asic = { .pm_finish = &evergreen_pm_finish, .pm_init_profile = &r600_pm_init_profile, .pm_get_dynpm_state = &r600_pm_get_dynpm_state, + .pre_page_flip = &evergreen_pre_page_flip, + .page_flip = &evergreen_page_flip, + .post_page_flip = &evergreen_post_page_flip, }; int radeon_asic_init(struct radeon_device *rdev) diff --git a/drivers/gpu/drm/radeon/radeon_asic.h b/drivers/gpu/drm/radeon/radeon_asic.h index 740988244143..4970eda1bd41 100644 --- a/drivers/gpu/drm/radeon/radeon_asic.h +++ b/drivers/gpu/drm/radeon/radeon_asic.h @@ -130,6 +130,9 @@ extern void r100_pm_prepare(struct radeon_device *rdev); extern void r100_pm_finish(struct radeon_device *rdev); extern void r100_pm_init_profile(struct radeon_device *rdev); extern void r100_pm_get_dynpm_state(struct radeon_device *rdev); +extern void r100_pre_page_flip(struct radeon_device *rdev, int crtc); +extern u32 r100_page_flip(struct radeon_device *rdev, int crtc, u64 crtc_base); +extern void r100_post_page_flip(struct radeon_device *rdev, int crtc); /* * r200,rv250,rs300,rv280 @@ -205,6 +208,9 @@ void rs600_hpd_set_polarity(struct radeon_device *rdev, extern void rs600_pm_misc(struct radeon_device *rdev); extern void rs600_pm_prepare(struct radeon_device *rdev); extern void rs600_pm_finish(struct radeon_device *rdev); +extern void rs600_pre_page_flip(struct radeon_device *rdev, int crtc); +extern u32 rs600_page_flip(struct radeon_device *rdev, int crtc, u64 crtc_base); +extern void rs600_post_page_flip(struct radeon_device *rdev, int crtc); /* * rs690,rs740 @@ -287,6 +293,7 @@ void rv770_fini(struct radeon_device *rdev); int rv770_suspend(struct radeon_device *rdev); int rv770_resume(struct radeon_device *rdev); extern void rv770_pm_misc(struct radeon_device *rdev); +extern u32 rv770_page_flip(struct radeon_device *rdev, int crtc, u64 crtc_base); /* * evergreen @@ -314,5 +321,8 @@ extern int evergreen_cs_parse(struct radeon_cs_parser *p); extern void evergreen_pm_misc(struct radeon_device *rdev); extern void evergreen_pm_prepare(struct radeon_device *rdev); extern void evergreen_pm_finish(struct radeon_device *rdev); +extern void evergreen_pre_page_flip(struct radeon_device *rdev, int crtc); +extern u32 evergreen_page_flip(struct radeon_device *rdev, int crtc, u64 crtc_base); +extern void evergreen_post_page_flip(struct radeon_device *rdev, int crtc); #endif diff --git a/drivers/gpu/drm/radeon/radeon_display.c b/drivers/gpu/drm/radeon/radeon_display.c index eeea7cbb9517..f6493f444faa 100644 --- a/drivers/gpu/drm/radeon/radeon_display.c +++ b/drivers/gpu/drm/radeon/radeon_display.c @@ -183,12 +183,273 @@ static void radeon_crtc_destroy(struct drm_crtc *crtc) kfree(radeon_crtc); } +/* + * Handle unpin events outside the interrupt handler proper. + */ +static void radeon_unpin_work_func(struct work_struct *__work) +{ + struct radeon_unpin_work *work = + container_of(__work, struct radeon_unpin_work, work); + int r; + + /* unpin of the old buffer */ + r = radeon_bo_reserve(work->old_rbo, false); + if (likely(r == 0)) { + r = radeon_bo_unpin(work->old_rbo); + if (unlikely(r != 0)) { + DRM_ERROR("failed to unpin buffer after flip\n"); + } + radeon_bo_unreserve(work->old_rbo); + } else + DRM_ERROR("failed to reserve buffer after flip\n"); + kfree(work); +} + +void radeon_crtc_handle_flip(struct radeon_device *rdev, int crtc_id) +{ + struct radeon_crtc *radeon_crtc = rdev->mode_info.crtcs[crtc_id]; + struct radeon_unpin_work *work; + struct drm_pending_vblank_event *e; + struct timeval now; + unsigned long flags; + u32 update_pending; + int vpos, hpos; + + spin_lock_irqsave(&rdev->ddev->event_lock, flags); + work = radeon_crtc->unpin_work; + if (work == NULL || + !radeon_fence_signaled(work->fence)) { + spin_unlock_irqrestore(&rdev->ddev->event_lock, flags); + return; + } + /* New pageflip, or just completion of a previous one? */ + if (!radeon_crtc->deferred_flip_completion) { + /* do the flip (mmio) */ + update_pending = radeon_page_flip(rdev, crtc_id, work->new_crtc_base); + } else { + /* This is just a completion of a flip queued in crtc + * at last invocation. Make sure we go directly to + * completion routine. + */ + update_pending = 0; + radeon_crtc->deferred_flip_completion = 0; + } + + /* Has the pageflip already completed in crtc, or is it certain + * to complete in this vblank? + */ + if (update_pending && + (DRM_SCANOUTPOS_VALID & radeon_get_crtc_scanoutpos(rdev->ddev, crtc_id, + &vpos, &hpos)) && + (vpos >=0) && + (vpos < (99 * rdev->mode_info.crtcs[crtc_id]->base.hwmode.crtc_vdisplay)/100)) { + /* crtc didn't flip in this target vblank interval, + * but flip is pending in crtc. It will complete it + * in next vblank interval, so complete the flip at + * next vblank irq. + */ + radeon_crtc->deferred_flip_completion = 1; + spin_unlock_irqrestore(&rdev->ddev->event_lock, flags); + return; + } + + /* Pageflip (will be) certainly completed in this vblank. Clean up. */ + radeon_crtc->unpin_work = NULL; + + /* wakeup userspace */ + if (work->event) { + e = work->event; + do_gettimeofday(&now); + e->event.sequence = drm_vblank_count(rdev->ddev, radeon_crtc->crtc_id); + e->event.tv_sec = now.tv_sec; + e->event.tv_usec = now.tv_usec; + list_add_tail(&e->base.link, &e->base.file_priv->event_list); + wake_up_interruptible(&e->base.file_priv->event_wait); + } + spin_unlock_irqrestore(&rdev->ddev->event_lock, flags); + + drm_vblank_put(rdev->ddev, radeon_crtc->crtc_id); + radeon_fence_unref(&work->fence); + radeon_post_page_flip(work->rdev, work->crtc_id); + schedule_work(&work->work); +} + +static int radeon_crtc_page_flip(struct drm_crtc *crtc, + struct drm_framebuffer *fb, + struct drm_pending_vblank_event *event) +{ + struct drm_device *dev = crtc->dev; + struct radeon_device *rdev = dev->dev_private; + struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc); + struct radeon_framebuffer *old_radeon_fb; + struct radeon_framebuffer *new_radeon_fb; + struct drm_gem_object *obj; + struct radeon_bo *rbo; + struct radeon_fence *fence; + struct radeon_unpin_work *work; + unsigned long flags; + u32 tiling_flags, pitch_pixels; + u64 base; + int r; + + work = kzalloc(sizeof *work, GFP_KERNEL); + if (work == NULL) + return -ENOMEM; + + r = radeon_fence_create(rdev, &fence); + if (unlikely(r != 0)) { + kfree(work); + DRM_ERROR("flip queue: failed to create fence.\n"); + return -ENOMEM; + } + work->event = event; + work->rdev = rdev; + work->crtc_id = radeon_crtc->crtc_id; + work->fence = radeon_fence_ref(fence); + old_radeon_fb = to_radeon_framebuffer(crtc->fb); + new_radeon_fb = to_radeon_framebuffer(fb); + /* schedule unpin of the old buffer */ + obj = old_radeon_fb->obj; + rbo = obj->driver_private; + work->old_rbo = rbo; + INIT_WORK(&work->work, radeon_unpin_work_func); + + /* We borrow the event spin lock for protecting unpin_work */ + spin_lock_irqsave(&dev->event_lock, flags); + if (radeon_crtc->unpin_work) { + spin_unlock_irqrestore(&dev->event_lock, flags); + kfree(work); + radeon_fence_unref(&fence); + + DRM_DEBUG_DRIVER("flip queue: crtc already busy\n"); + return -EBUSY; + } + radeon_crtc->unpin_work = work; + radeon_crtc->deferred_flip_completion = 0; + spin_unlock_irqrestore(&dev->event_lock, flags); + + /* pin the new buffer */ + obj = new_radeon_fb->obj; + rbo = obj->driver_private; + + DRM_DEBUG_DRIVER("flip-ioctl() cur_fbo = %p, cur_bbo = %p\n", + work->old_rbo, rbo); + + r = radeon_bo_reserve(rbo, false); + if (unlikely(r != 0)) { + DRM_ERROR("failed to reserve new rbo buffer before flip\n"); + goto pflip_cleanup; + } + r = radeon_bo_pin(rbo, RADEON_GEM_DOMAIN_VRAM, &base); + if (unlikely(r != 0)) { + radeon_bo_unreserve(rbo); + r = -EINVAL; + DRM_ERROR("failed to pin new rbo buffer before flip\n"); + goto pflip_cleanup; + } + radeon_bo_get_tiling_flags(rbo, &tiling_flags, NULL); + radeon_bo_unreserve(rbo); + + if (!ASIC_IS_AVIVO(rdev)) { + /* crtc offset is from display base addr not FB location */ + base -= radeon_crtc->legacy_display_base_addr; + pitch_pixels = fb->pitch / (fb->bits_per_pixel / 8); + + if (tiling_flags & RADEON_TILING_MACRO) { + if (ASIC_IS_R300(rdev)) { + base &= ~0x7ff; + } else { + int byteshift = fb->bits_per_pixel >> 4; + int tile_addr = (((crtc->y >> 3) * pitch_pixels + crtc->x) >> (8 - byteshift)) << 11; + base += tile_addr + ((crtc->x << byteshift) % 256) + ((crtc->y % 8) << 8); + } + } else { + int offset = crtc->y * pitch_pixels + crtc->x; + switch (fb->bits_per_pixel) { + case 8: + default: + offset *= 1; + break; + case 15: + case 16: + offset *= 2; + break; + case 24: + offset *= 3; + break; + case 32: + offset *= 4; + break; + } + base += offset; + } + base &= ~7; + } + + spin_lock_irqsave(&dev->event_lock, flags); + work->new_crtc_base = base; + spin_unlock_irqrestore(&dev->event_lock, flags); + + /* update crtc fb */ + crtc->fb = fb; + + r = drm_vblank_get(dev, radeon_crtc->crtc_id); + if (r) { + DRM_ERROR("failed to get vblank before flip\n"); + goto pflip_cleanup1; + } + + /* 32 ought to cover us */ + r = radeon_ring_lock(rdev, 32); + if (r) { + DRM_ERROR("failed to lock the ring before flip\n"); + goto pflip_cleanup2; + } + + /* emit the fence */ + radeon_fence_emit(rdev, fence); + /* set the proper interrupt */ + radeon_pre_page_flip(rdev, radeon_crtc->crtc_id); + /* fire the ring */ + radeon_ring_unlock_commit(rdev); + + return 0; + +pflip_cleanup2: + drm_vblank_put(dev, radeon_crtc->crtc_id); + +pflip_cleanup1: + r = radeon_bo_reserve(rbo, false); + if (unlikely(r != 0)) { + DRM_ERROR("failed to reserve new rbo in error path\n"); + goto pflip_cleanup; + } + r = radeon_bo_unpin(rbo); + if (unlikely(r != 0)) { + radeon_bo_unreserve(rbo); + r = -EINVAL; + DRM_ERROR("failed to unpin new rbo in error path\n"); + goto pflip_cleanup; + } + radeon_bo_unreserve(rbo); + +pflip_cleanup: + spin_lock_irqsave(&dev->event_lock, flags); + radeon_crtc->unpin_work = NULL; + spin_unlock_irqrestore(&dev->event_lock, flags); + radeon_fence_unref(&fence); + kfree(work); + + return r; +} + static const struct drm_crtc_funcs radeon_crtc_funcs = { .cursor_set = radeon_crtc_cursor_set, .cursor_move = radeon_crtc_cursor_move, .gamma_set = radeon_crtc_gamma_set, .set_config = drm_crtc_helper_set_config, .destroy = radeon_crtc_destroy, + .page_flip = radeon_crtc_page_flip, }; static void radeon_crtc_init(struct drm_device *dev, int index) diff --git a/drivers/gpu/drm/radeon/radeon_drv.c b/drivers/gpu/drm/radeon/radeon_drv.c index 32ec0cc6be92..a92d2a5cea90 100644 --- a/drivers/gpu/drm/radeon/radeon_drv.c +++ b/drivers/gpu/drm/radeon/radeon_drv.c @@ -48,9 +48,10 @@ * - 2.5.0 - add get accel 2 to work around ddx breakage for evergreen * - 2.6.0 - add tiling config query (r6xx+), add initial HiZ support (r300->r500) * 2.7.0 - fixups for r600 2D tiling support. (no external ABI change), add eg dyn gpr regs + * 2.8.0 - pageflip support */ #define KMS_DRIVER_MAJOR 2 -#define KMS_DRIVER_MINOR 7 +#define KMS_DRIVER_MINOR 8 #define KMS_DRIVER_PATCHLEVEL 0 int radeon_driver_load_kms(struct drm_device *dev, unsigned long flags); int radeon_driver_unload_kms(struct drm_device *dev); diff --git a/drivers/gpu/drm/radeon/radeon_irq_kms.c b/drivers/gpu/drm/radeon/radeon_irq_kms.c index a108c7ed14f5..e0d1c6d1b9c7 100644 --- a/drivers/gpu/drm/radeon/radeon_irq_kms.c +++ b/drivers/gpu/drm/radeon/radeon_irq_kms.c @@ -71,8 +71,10 @@ void radeon_driver_irq_preinstall_kms(struct drm_device *dev) rdev->irq.gui_idle = false; for (i = 0; i < rdev->num_crtc; i++) rdev->irq.crtc_vblank_int[i] = false; - for (i = 0; i < 6; i++) + for (i = 0; i < 6; i++) { rdev->irq.hpd[i] = false; + rdev->irq.pflip[i] = false; + } radeon_irq_set(rdev); /* Clear bits */ radeon_irq_process(rdev); @@ -101,8 +103,10 @@ void radeon_driver_irq_uninstall_kms(struct drm_device *dev) rdev->irq.gui_idle = false; for (i = 0; i < rdev->num_crtc; i++) rdev->irq.crtc_vblank_int[i] = false; - for (i = 0; i < 6; i++) + for (i = 0; i < 6; i++) { rdev->irq.hpd[i] = false; + rdev->irq.pflip[i] = false; + } radeon_irq_set(rdev); } @@ -175,3 +179,34 @@ void radeon_irq_kms_sw_irq_put(struct radeon_device *rdev) spin_unlock_irqrestore(&rdev->irq.sw_lock, irqflags); } +void radeon_irq_kms_pflip_irq_get(struct radeon_device *rdev, int crtc) +{ + unsigned long irqflags; + + if (crtc < 0 || crtc >= rdev->num_crtc) + return; + + spin_lock_irqsave(&rdev->irq.pflip_lock[crtc], irqflags); + if (rdev->ddev->irq_enabled && (++rdev->irq.pflip_refcount[crtc] == 1)) { + rdev->irq.pflip[crtc] = true; + radeon_irq_set(rdev); + } + spin_unlock_irqrestore(&rdev->irq.pflip_lock[crtc], irqflags); +} + +void radeon_irq_kms_pflip_irq_put(struct radeon_device *rdev, int crtc) +{ + unsigned long irqflags; + + if (crtc < 0 || crtc >= rdev->num_crtc) + return; + + spin_lock_irqsave(&rdev->irq.pflip_lock[crtc], irqflags); + BUG_ON(rdev->ddev->irq_enabled && rdev->irq.pflip_refcount[crtc] <= 0); + if (rdev->ddev->irq_enabled && (--rdev->irq.pflip_refcount[crtc] == 0)) { + rdev->irq.pflip[crtc] = false; + radeon_irq_set(rdev); + } + spin_unlock_irqrestore(&rdev->irq.pflip_lock[crtc], irqflags); +} + diff --git a/drivers/gpu/drm/radeon/radeon_mode.h b/drivers/gpu/drm/radeon/radeon_mode.h index 55856ad0ac41..f406f02bf14e 100644 --- a/drivers/gpu/drm/radeon/radeon_mode.h +++ b/drivers/gpu/drm/radeon/radeon_mode.h @@ -277,6 +277,9 @@ struct radeon_crtc { fixed20_12 hsc; struct drm_display_mode native_mode; int pll_id; + /* page flipping */ + struct radeon_unpin_work *unpin_work; + int deferred_flip_completion; }; struct radeon_encoder_primary_dac { @@ -659,4 +662,7 @@ int radeon_fbdev_total_size(struct radeon_device *rdev); bool radeon_fbdev_robj_is_fb(struct radeon_device *rdev, struct radeon_bo *robj); void radeon_fb_output_poll_changed(struct radeon_device *rdev); + +void radeon_crtc_handle_flip(struct radeon_device *rdev, int crtc_id); + #endif diff --git a/drivers/gpu/drm/radeon/radeon_reg.h b/drivers/gpu/drm/radeon/radeon_reg.h index 64928814de53..26c43e234350 100644 --- a/drivers/gpu/drm/radeon/radeon_reg.h +++ b/drivers/gpu/drm/radeon/radeon_reg.h @@ -422,6 +422,7 @@ # define RADEON_CRTC_CSYNC_EN (1 << 4) # define RADEON_CRTC_ICON_EN (1 << 15) # define RADEON_CRTC_CUR_EN (1 << 16) +# define RADEON_CRTC_VSTAT_MODE_MASK (3 << 17) # define RADEON_CRTC_CUR_MODE_MASK (7 << 20) # define RADEON_CRTC_CUR_MODE_SHIFT 20 # define RADEON_CRTC_CUR_MODE_MONO 0 diff --git a/drivers/gpu/drm/radeon/rs600.c b/drivers/gpu/drm/radeon/rs600.c index f1c6e02c2e6b..683652bea17c 100644 --- a/drivers/gpu/drm/radeon/rs600.c +++ b/drivers/gpu/drm/radeon/rs600.c @@ -46,6 +46,56 @@ void rs600_gpu_init(struct radeon_device *rdev); int rs600_mc_wait_for_idle(struct radeon_device *rdev); +void rs600_pre_page_flip(struct radeon_device *rdev, int crtc) +{ + struct radeon_crtc *radeon_crtc = rdev->mode_info.crtcs[crtc]; + u32 tmp; + + /* make sure flip is at vb rather than hb */ + tmp = RREG32(AVIVO_D1GRPH_FLIP_CONTROL + radeon_crtc->crtc_offset); + tmp &= ~AVIVO_D1GRPH_SURFACE_UPDATE_H_RETRACE_EN; + WREG32(AVIVO_D1GRPH_FLIP_CONTROL + radeon_crtc->crtc_offset, tmp); + + /* set pageflip to happen anywhere in vblank interval */ + WREG32(AVIVO_D1MODE_MASTER_UPDATE_MODE + radeon_crtc->crtc_offset, 0); + + /* enable the pflip int */ + radeon_irq_kms_pflip_irq_get(rdev, crtc); +} + +void rs600_post_page_flip(struct radeon_device *rdev, int crtc) +{ + /* disable the pflip int */ + radeon_irq_kms_pflip_irq_put(rdev, crtc); +} + +u32 rs600_page_flip(struct radeon_device *rdev, int crtc_id, u64 crtc_base) +{ + struct radeon_crtc *radeon_crtc = rdev->mode_info.crtcs[crtc_id]; + u32 tmp = RREG32(AVIVO_D1GRPH_UPDATE + radeon_crtc->crtc_offset); + + /* Lock the graphics update lock */ + tmp |= AVIVO_D1GRPH_UPDATE_LOCK; + WREG32(AVIVO_D1GRPH_UPDATE + radeon_crtc->crtc_offset, tmp); + + /* update the scanout addresses */ + WREG32(AVIVO_D1GRPH_SECONDARY_SURFACE_ADDRESS + radeon_crtc->crtc_offset, + (u32)crtc_base); + WREG32(AVIVO_D1GRPH_PRIMARY_SURFACE_ADDRESS + radeon_crtc->crtc_offset, + (u32)crtc_base); + + /* Wait for update_pending to go high. */ + while (!(RREG32(AVIVO_D1GRPH_UPDATE + radeon_crtc->crtc_offset) & AVIVO_D1GRPH_SURFACE_UPDATE_PENDING)); + DRM_DEBUG("Update pending now high. Unlocking vupdate_lock.\n"); + + /* Unlock the lock, so double-buffering can take place inside vblank */ + tmp &= ~AVIVO_D1GRPH_UPDATE_LOCK; + WREG32(AVIVO_D1GRPH_UPDATE + radeon_crtc->crtc_offset, tmp); + + /* Return current update_pending status: */ + return RREG32(AVIVO_D1GRPH_UPDATE + radeon_crtc->crtc_offset) & AVIVO_D1GRPH_SURFACE_UPDATE_PENDING; +} + void rs600_pm_misc(struct radeon_device *rdev) { int requested_index = rdev->pm.requested_power_state_index; @@ -515,10 +565,12 @@ int rs600_irq_set(struct radeon_device *rdev) if (rdev->irq.gui_idle) { tmp |= S_000040_GUI_IDLE(1); } - if (rdev->irq.crtc_vblank_int[0]) { + if (rdev->irq.crtc_vblank_int[0] || + rdev->irq.pflip[0]) { mode_int |= S_006540_D1MODE_VBLANK_INT_MASK(1); } - if (rdev->irq.crtc_vblank_int[1]) { + if (rdev->irq.crtc_vblank_int[1] || + rdev->irq.pflip[1]) { mode_int |= S_006540_D2MODE_VBLANK_INT_MASK(1); } if (rdev->irq.hpd[0]) { @@ -534,7 +586,7 @@ int rs600_irq_set(struct radeon_device *rdev) return 0; } -static inline uint32_t rs600_irq_ack(struct radeon_device *rdev, u32 *r500_disp_int) +static inline u32 rs600_irq_ack(struct radeon_device *rdev) { uint32_t irqs = RREG32(R_000044_GEN_INT_STATUS); uint32_t irq_mask = S_000044_SW_INT(1); @@ -547,27 +599,27 @@ static inline uint32_t rs600_irq_ack(struct radeon_device *rdev, u32 *r500_disp_ } if (G_000044_DISPLAY_INT_STAT(irqs)) { - *r500_disp_int = RREG32(R_007EDC_DISP_INTERRUPT_STATUS); - if (G_007EDC_LB_D1_VBLANK_INTERRUPT(*r500_disp_int)) { + rdev->irq.stat_regs.r500.disp_int = RREG32(R_007EDC_DISP_INTERRUPT_STATUS); + if (G_007EDC_LB_D1_VBLANK_INTERRUPT(rdev->irq.stat_regs.r500.disp_int)) { WREG32(R_006534_D1MODE_VBLANK_STATUS, S_006534_D1MODE_VBLANK_ACK(1)); } - if (G_007EDC_LB_D2_VBLANK_INTERRUPT(*r500_disp_int)) { + if (G_007EDC_LB_D2_VBLANK_INTERRUPT(rdev->irq.stat_regs.r500.disp_int)) { WREG32(R_006D34_D2MODE_VBLANK_STATUS, S_006D34_D2MODE_VBLANK_ACK(1)); } - if (G_007EDC_DC_HOT_PLUG_DETECT1_INTERRUPT(*r500_disp_int)) { + if (G_007EDC_DC_HOT_PLUG_DETECT1_INTERRUPT(rdev->irq.stat_regs.r500.disp_int)) { tmp = RREG32(R_007D08_DC_HOT_PLUG_DETECT1_INT_CONTROL); tmp |= S_007D08_DC_HOT_PLUG_DETECT1_INT_ACK(1); WREG32(R_007D08_DC_HOT_PLUG_DETECT1_INT_CONTROL, tmp); } - if (G_007EDC_DC_HOT_PLUG_DETECT2_INTERRUPT(*r500_disp_int)) { + if (G_007EDC_DC_HOT_PLUG_DETECT2_INTERRUPT(rdev->irq.stat_regs.r500.disp_int)) { tmp = RREG32(R_007D18_DC_HOT_PLUG_DETECT2_INT_CONTROL); tmp |= S_007D18_DC_HOT_PLUG_DETECT2_INT_ACK(1); WREG32(R_007D18_DC_HOT_PLUG_DETECT2_INT_CONTROL, tmp); } } else { - *r500_disp_int = 0; + rdev->irq.stat_regs.r500.disp_int = 0; } if (irqs) { @@ -578,32 +630,30 @@ static inline uint32_t rs600_irq_ack(struct radeon_device *rdev, u32 *r500_disp_ void rs600_irq_disable(struct radeon_device *rdev) { - u32 tmp; - WREG32(R_000040_GEN_INT_CNTL, 0); WREG32(R_006540_DxMODE_INT_MASK, 0); /* Wait and acknowledge irq */ mdelay(1); - rs600_irq_ack(rdev, &tmp); + rs600_irq_ack(rdev); } int rs600_irq_process(struct radeon_device *rdev) { - uint32_t status, msi_rearm; - uint32_t r500_disp_int; + u32 status, msi_rearm; bool queue_hotplug = false; /* reset gui idle ack. the status bit is broken */ rdev->irq.gui_idle_acked = false; - status = rs600_irq_ack(rdev, &r500_disp_int); - if (!status && !r500_disp_int) { + status = rs600_irq_ack(rdev); + if (!status && !rdev->irq.stat_regs.r500.disp_int) { return IRQ_NONE; } - while (status || r500_disp_int) { + while (status || rdev->irq.stat_regs.r500.disp_int) { /* SW interrupt */ - if (G_000044_SW_INT(status)) + if (G_000044_SW_INT(status)) { radeon_fence_process(rdev); + } /* GUI idle */ if (G_000040_GUI_IDLE(status)) { rdev->irq.gui_idle_acked = true; @@ -611,25 +661,33 @@ int rs600_irq_process(struct radeon_device *rdev) wake_up(&rdev->irq.idle_queue); } /* Vertical blank interrupts */ - if (G_007EDC_LB_D1_VBLANK_INTERRUPT(r500_disp_int)) { - drm_handle_vblank(rdev->ddev, 0); - rdev->pm.vblank_sync = true; - wake_up(&rdev->irq.vblank_queue); + if (G_007EDC_LB_D1_VBLANK_INTERRUPT(rdev->irq.stat_regs.r500.disp_int)) { + if (rdev->irq.pflip[0]) + radeon_crtc_handle_flip(rdev, 0); + if (rdev->irq.crtc_vblank_int[0]) { + drm_handle_vblank(rdev->ddev, 0); + rdev->pm.vblank_sync = true; + wake_up(&rdev->irq.vblank_queue); + } } - if (G_007EDC_LB_D2_VBLANK_INTERRUPT(r500_disp_int)) { - drm_handle_vblank(rdev->ddev, 1); - rdev->pm.vblank_sync = true; - wake_up(&rdev->irq.vblank_queue); + if (G_007EDC_LB_D2_VBLANK_INTERRUPT(rdev->irq.stat_regs.r500.disp_int)) { + if (rdev->irq.pflip[1]) + radeon_crtc_handle_flip(rdev, 1); + if (rdev->irq.crtc_vblank_int[1]) { + drm_handle_vblank(rdev->ddev, 1); + rdev->pm.vblank_sync = true; + wake_up(&rdev->irq.vblank_queue); + } } - if (G_007EDC_DC_HOT_PLUG_DETECT1_INTERRUPT(r500_disp_int)) { + if (G_007EDC_DC_HOT_PLUG_DETECT1_INTERRUPT(rdev->irq.stat_regs.r500.disp_int)) { queue_hotplug = true; DRM_DEBUG("HPD1\n"); } - if (G_007EDC_DC_HOT_PLUG_DETECT2_INTERRUPT(r500_disp_int)) { + if (G_007EDC_DC_HOT_PLUG_DETECT2_INTERRUPT(rdev->irq.stat_regs.r500.disp_int)) { queue_hotplug = true; DRM_DEBUG("HPD2\n"); } - status = rs600_irq_ack(rdev, &r500_disp_int); + status = rs600_irq_ack(rdev); } /* reset gui idle ack. the status bit is broken */ rdev->irq.gui_idle_acked = false; diff --git a/drivers/gpu/drm/radeon/rv770.c b/drivers/gpu/drm/radeon/rv770.c index 4dfead8cee33..42ff07893f3a 100644 --- a/drivers/gpu/drm/radeon/rv770.c +++ b/drivers/gpu/drm/radeon/rv770.c @@ -42,6 +42,40 @@ static void rv770_gpu_init(struct radeon_device *rdev); void rv770_fini(struct radeon_device *rdev); +u32 rv770_page_flip(struct radeon_device *rdev, int crtc_id, u64 crtc_base) +{ + struct radeon_crtc *radeon_crtc = rdev->mode_info.crtcs[crtc_id]; + u32 tmp = RREG32(AVIVO_D1GRPH_UPDATE + radeon_crtc->crtc_offset); + + /* Lock the graphics update lock */ + tmp |= AVIVO_D1GRPH_UPDATE_LOCK; + WREG32(AVIVO_D1GRPH_UPDATE + radeon_crtc->crtc_offset, tmp); + + /* update the scanout addresses */ + if (radeon_crtc->crtc_id) { + WREG32(D2GRPH_SECONDARY_SURFACE_ADDRESS_HIGH, upper_32_bits(crtc_base)); + WREG32(D2GRPH_PRIMARY_SURFACE_ADDRESS_HIGH, upper_32_bits(crtc_base)); + } else { + WREG32(D1GRPH_SECONDARY_SURFACE_ADDRESS_HIGH, upper_32_bits(crtc_base)); + WREG32(D1GRPH_PRIMARY_SURFACE_ADDRESS_HIGH, upper_32_bits(crtc_base)); + } + WREG32(D1GRPH_SECONDARY_SURFACE_ADDRESS + radeon_crtc->crtc_offset, + (u32)crtc_base); + WREG32(D1GRPH_PRIMARY_SURFACE_ADDRESS + radeon_crtc->crtc_offset, + (u32)crtc_base); + + /* Wait for update_pending to go high. */ + while (!(RREG32(AVIVO_D1GRPH_UPDATE + radeon_crtc->crtc_offset) & AVIVO_D1GRPH_SURFACE_UPDATE_PENDING)); + DRM_DEBUG("Update pending now high. Unlocking vupdate_lock.\n"); + + /* Unlock the lock, so double-buffering can take place inside vblank */ + tmp &= ~AVIVO_D1GRPH_UPDATE_LOCK; + WREG32(AVIVO_D1GRPH_UPDATE + radeon_crtc->crtc_offset, tmp); + + /* Return current update_pending status: */ + return RREG32(AVIVO_D1GRPH_UPDATE + radeon_crtc->crtc_offset) & AVIVO_D1GRPH_SURFACE_UPDATE_PENDING; +} + /* get temperature in millidegrees */ u32 rv770_get_temp(struct radeon_device *rdev) { diff --git a/drivers/gpu/drm/radeon/rv770d.h b/drivers/gpu/drm/radeon/rv770d.h index b7a5a20e81dc..11955c685ad1 100644 --- a/drivers/gpu/drm/radeon/rv770d.h +++ b/drivers/gpu/drm/radeon/rv770d.h @@ -351,4 +351,11 @@ #define SRBM_STATUS 0x0E50 +#define D1GRPH_PRIMARY_SURFACE_ADDRESS 0x6110 +#define D1GRPH_PRIMARY_SURFACE_ADDRESS_HIGH 0x6914 +#define D2GRPH_PRIMARY_SURFACE_ADDRESS_HIGH 0x6114 +#define D1GRPH_SECONDARY_SURFACE_ADDRESS 0x6118 +#define D1GRPH_SECONDARY_SURFACE_ADDRESS_HIGH 0x691c +#define D2GRPH_SECONDARY_SURFACE_ADDRESS_HIGH 0x611c + #endif -- cgit v1.2.3 From 3e4ea7421f45966c93c8cbe81569e8dc93a58b87 Mon Sep 17 00:00:00 2001 From: Mario Kleiner Date: Sun, 21 Nov 2010 10:59:02 -0500 Subject: drm/kms/radeon: Reorder vblank and pageflip interrupt handling. In the vblank irq handler, calls to actual vblank handling, or at least drm_handle_vblank(), need to happen before calls to radeon_crtc_handle_flip(). Reason: The high precision pageflip timestamping and some other pageflip optimizations will need the updated vblank count and timestamps for the current vblank interval. These are calculated in drm_handle_vblank(), therefore it must go first. Signed-off-by: Mario Kleiner Signed-off-by: Alex Deucher Signed-off-by: Dave Airlie --- drivers/gpu/drm/radeon/evergreen.c | 8 ++++---- drivers/gpu/drm/radeon/r100.c | 8 ++++---- drivers/gpu/drm/radeon/r600.c | 8 ++++---- drivers/gpu/drm/radeon/rs600.c | 8 ++++---- 4 files changed, 16 insertions(+), 16 deletions(-) diff --git a/drivers/gpu/drm/radeon/evergreen.c b/drivers/gpu/drm/radeon/evergreen.c index df3f37243222..25e84379e7c6 100644 --- a/drivers/gpu/drm/radeon/evergreen.c +++ b/drivers/gpu/drm/radeon/evergreen.c @@ -2398,13 +2398,13 @@ restart_ih: switch (src_data) { case 0: /* D1 vblank */ if (rdev->irq.stat_regs.evergreen.disp_int & LB_D1_VBLANK_INTERRUPT) { - if (rdev->irq.pflip[0]) - radeon_crtc_handle_flip(rdev, 0); if (rdev->irq.crtc_vblank_int[0]) { drm_handle_vblank(rdev->ddev, 0); rdev->pm.vblank_sync = true; wake_up(&rdev->irq.vblank_queue); } + if (rdev->irq.pflip[0]) + radeon_crtc_handle_flip(rdev, 0); rdev->irq.stat_regs.evergreen.disp_int &= ~LB_D1_VBLANK_INTERRUPT; DRM_DEBUG("IH: D1 vblank\n"); } @@ -2424,13 +2424,13 @@ restart_ih: switch (src_data) { case 0: /* D2 vblank */ if (rdev->irq.stat_regs.evergreen.disp_int_cont & LB_D2_VBLANK_INTERRUPT) { - if (rdev->irq.pflip[1]) - radeon_crtc_handle_flip(rdev, 1); if (rdev->irq.crtc_vblank_int[1]) { drm_handle_vblank(rdev->ddev, 1); rdev->pm.vblank_sync = true; wake_up(&rdev->irq.vblank_queue); } + if (rdev->irq.pflip[1]) + radeon_crtc_handle_flip(rdev, 1); rdev->irq.stat_regs.evergreen.disp_int_cont &= ~LB_D2_VBLANK_INTERRUPT; DRM_DEBUG("IH: D2 vblank\n"); } diff --git a/drivers/gpu/drm/radeon/r100.c b/drivers/gpu/drm/radeon/r100.c index b2e29798a99d..2316f73db6c0 100644 --- a/drivers/gpu/drm/radeon/r100.c +++ b/drivers/gpu/drm/radeon/r100.c @@ -650,22 +650,22 @@ int r100_irq_process(struct radeon_device *rdev) } /* Vertical blank interrupts */ if (status & RADEON_CRTC_VBLANK_STAT) { - if (rdev->irq.pflip[0]) - radeon_crtc_handle_flip(rdev, 0); if (rdev->irq.crtc_vblank_int[0]) { drm_handle_vblank(rdev->ddev, 0); rdev->pm.vblank_sync = true; wake_up(&rdev->irq.vblank_queue); } + if (rdev->irq.pflip[0]) + radeon_crtc_handle_flip(rdev, 0); } if (status & RADEON_CRTC2_VBLANK_STAT) { - if (rdev->irq.pflip[1]) - radeon_crtc_handle_flip(rdev, 1); if (rdev->irq.crtc_vblank_int[1]) { drm_handle_vblank(rdev->ddev, 1); rdev->pm.vblank_sync = true; wake_up(&rdev->irq.vblank_queue); } + if (rdev->irq.pflip[1]) + radeon_crtc_handle_flip(rdev, 1); } if (status & RADEON_FP_DETECT_STAT) { queue_hotplug = true; diff --git a/drivers/gpu/drm/radeon/r600.c b/drivers/gpu/drm/radeon/r600.c index 15b95724c408..7057b392e005 100644 --- a/drivers/gpu/drm/radeon/r600.c +++ b/drivers/gpu/drm/radeon/r600.c @@ -3294,13 +3294,13 @@ restart_ih: switch (src_data) { case 0: /* D1 vblank */ if (rdev->irq.stat_regs.r600.disp_int & LB_D1_VBLANK_INTERRUPT) { - if (rdev->irq.pflip[0]) - radeon_crtc_handle_flip(rdev, 0); if (rdev->irq.crtc_vblank_int[0]) { drm_handle_vblank(rdev->ddev, 0); rdev->pm.vblank_sync = true; wake_up(&rdev->irq.vblank_queue); } + if (rdev->irq.pflip[0]) + radeon_crtc_handle_flip(rdev, 0); rdev->irq.stat_regs.r600.disp_int &= ~LB_D1_VBLANK_INTERRUPT; DRM_DEBUG("IH: D1 vblank\n"); } @@ -3320,13 +3320,13 @@ restart_ih: switch (src_data) { case 0: /* D2 vblank */ if (rdev->irq.stat_regs.r600.disp_int & LB_D2_VBLANK_INTERRUPT) { - if (rdev->irq.pflip[1]) - radeon_crtc_handle_flip(rdev, 1); if (rdev->irq.crtc_vblank_int[1]) { drm_handle_vblank(rdev->ddev, 1); rdev->pm.vblank_sync = true; wake_up(&rdev->irq.vblank_queue); } + if (rdev->irq.pflip[1]) + radeon_crtc_handle_flip(rdev, 1); rdev->irq.stat_regs.r600.disp_int &= ~LB_D2_VBLANK_INTERRUPT; DRM_DEBUG("IH: D2 vblank\n"); } diff --git a/drivers/gpu/drm/radeon/rs600.c b/drivers/gpu/drm/radeon/rs600.c index 683652bea17c..9a85b1614c86 100644 --- a/drivers/gpu/drm/radeon/rs600.c +++ b/drivers/gpu/drm/radeon/rs600.c @@ -662,22 +662,22 @@ int rs600_irq_process(struct radeon_device *rdev) } /* Vertical blank interrupts */ if (G_007EDC_LB_D1_VBLANK_INTERRUPT(rdev->irq.stat_regs.r500.disp_int)) { - if (rdev->irq.pflip[0]) - radeon_crtc_handle_flip(rdev, 0); if (rdev->irq.crtc_vblank_int[0]) { drm_handle_vblank(rdev->ddev, 0); rdev->pm.vblank_sync = true; wake_up(&rdev->irq.vblank_queue); } + if (rdev->irq.pflip[0]) + radeon_crtc_handle_flip(rdev, 0); } if (G_007EDC_LB_D2_VBLANK_INTERRUPT(rdev->irq.stat_regs.r500.disp_int)) { - if (rdev->irq.pflip[1]) - radeon_crtc_handle_flip(rdev, 1); if (rdev->irq.crtc_vblank_int[1]) { drm_handle_vblank(rdev->ddev, 1); rdev->pm.vblank_sync = true; wake_up(&rdev->irq.vblank_queue); } + if (rdev->irq.pflip[1]) + radeon_crtc_handle_flip(rdev, 1); } if (G_007EDC_DC_HOT_PLUG_DETECT1_INTERRUPT(rdev->irq.stat_regs.r500.disp_int)) { queue_hotplug = true; -- cgit v1.2.3 From b6724405bc67ef3ae1318002085cde382b12345f Mon Sep 17 00:00:00 2001 From: Mario Kleiner Date: Sun, 21 Nov 2010 10:59:03 -0500 Subject: drm/kms/radeon: Use high precision timestamps for pageflip completion events. Signed-off-by: Mario Kleiner Signed-off-by: Alex Deucher Signed-off-by: Dave Airlie --- drivers/gpu/drm/radeon/radeon_display.c | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/drivers/gpu/drm/radeon/radeon_display.c b/drivers/gpu/drm/radeon/radeon_display.c index f6493f444faa..15f24f2ee04d 100644 --- a/drivers/gpu/drm/radeon/radeon_display.c +++ b/drivers/gpu/drm/radeon/radeon_display.c @@ -259,8 +259,7 @@ void radeon_crtc_handle_flip(struct radeon_device *rdev, int crtc_id) /* wakeup userspace */ if (work->event) { e = work->event; - do_gettimeofday(&now); - e->event.sequence = drm_vblank_count(rdev->ddev, radeon_crtc->crtc_id); + e->event.sequence = drm_vblank_count_and_time(rdev->ddev, crtc_id, &now); e->event.tv_sec = now.tv_sec; e->event.tv_usec = now.tv_usec; list_add_tail(&e->base.link, &e->base.file_priv->event_list); -- cgit v1.2.3 From d6ea88865d3e5b0c62040531310c1f2c6a994f46 Mon Sep 17 00:00:00 2001 From: Dave Airlie Date: Mon, 22 Nov 2010 13:24:40 +1000 Subject: drm/ttm: Add a bo list reserve fastpath (v2) Makes it possible to reserve a list of buffer objects with a single spin lock / unlock if there is no contention. Should improve cpu usage on SMP kernels. v2: Initialize private list members on reserve and don't call ttm_bo_list_ref_sub() with zero put_count. Signed-off-by: Thomas Hellstrom Signed-off-by: Dave Airlie --- drivers/gpu/drm/ttm/ttm_bo.c | 32 ++++----- drivers/gpu/drm/ttm/ttm_execbuf_util.c | 124 ++++++++++++++++++++++++++++++--- include/drm/ttm/ttm_bo_api.h | 38 ++++++++++ include/drm/ttm/ttm_bo_driver.h | 14 ++++ include/drm/ttm/ttm_execbuf_util.h | 6 +- 5 files changed, 186 insertions(+), 28 deletions(-) diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c index 148a322d8f5d..a586378b1b2b 100644 --- a/drivers/gpu/drm/ttm/ttm_bo.c +++ b/drivers/gpu/drm/ttm/ttm_bo.c @@ -169,7 +169,7 @@ int ttm_bo_wait_unreserved(struct ttm_buffer_object *bo, bool interruptible) } EXPORT_SYMBOL(ttm_bo_wait_unreserved); -static void ttm_bo_add_to_lru(struct ttm_buffer_object *bo) +void ttm_bo_add_to_lru(struct ttm_buffer_object *bo) { struct ttm_bo_device *bdev = bo->bdev; struct ttm_mem_type_manager *man; @@ -191,11 +191,7 @@ static void ttm_bo_add_to_lru(struct ttm_buffer_object *bo) } } -/** - * Call with the lru_lock held. - */ - -static int ttm_bo_del_from_lru(struct ttm_buffer_object *bo) +int ttm_bo_del_from_lru(struct ttm_buffer_object *bo) { int put_count = 0; @@ -267,6 +263,15 @@ static void ttm_bo_ref_bug(struct kref *list_kref) BUG(); } +void ttm_bo_list_ref_sub(struct ttm_buffer_object *bo, int count, + bool never_free) +{ + while (count--) + kref_put(&bo->list_kref, + (never_free || (count >= 0)) ? ttm_bo_ref_bug : + ttm_bo_release_list); +} + int ttm_bo_reserve(struct ttm_buffer_object *bo, bool interruptible, bool no_wait, bool use_sequence, uint32_t sequence) @@ -282,8 +287,7 @@ int ttm_bo_reserve(struct ttm_buffer_object *bo, put_count = ttm_bo_del_from_lru(bo); spin_unlock(&glob->lru_lock); - while (put_count--) - kref_put(&bo->list_kref, ttm_bo_ref_bug); + ttm_bo_list_ref_sub(bo, put_count, true); return ret; } @@ -496,8 +500,7 @@ static void ttm_bo_cleanup_refs_or_queue(struct ttm_buffer_object *bo) spin_unlock(&glob->lru_lock); ttm_bo_cleanup_memtype_use(bo); - while (put_count--) - kref_put(&bo->list_kref, ttm_bo_ref_bug); + ttm_bo_list_ref_sub(bo, put_count, true); return; } else { @@ -580,8 +583,7 @@ retry: spin_unlock(&glob->lru_lock); ttm_bo_cleanup_memtype_use(bo); - while (put_count--) - kref_put(&bo->list_kref, ttm_bo_ref_bug); + ttm_bo_list_ref_sub(bo, put_count, true); return 0; } @@ -802,8 +804,7 @@ retry: BUG_ON(ret != 0); - while (put_count--) - kref_put(&bo->list_kref, ttm_bo_ref_bug); + ttm_bo_list_ref_sub(bo, put_count, true); ret = ttm_bo_evict(bo, interruptible, no_wait_reserve, no_wait_gpu); ttm_bo_unreserve(bo); @@ -1783,8 +1784,7 @@ static int ttm_bo_swapout(struct ttm_mem_shrink *shrink) put_count = ttm_bo_del_from_lru(bo); spin_unlock(&glob->lru_lock); - while (put_count--) - kref_put(&bo->list_kref, ttm_bo_ref_bug); + ttm_bo_list_ref_sub(bo, put_count, true); /** * Wait for GPU, then move to system cached. diff --git a/drivers/gpu/drm/ttm/ttm_execbuf_util.c b/drivers/gpu/drm/ttm/ttm_execbuf_util.c index c285c2902d15..201a71d111ec 100644 --- a/drivers/gpu/drm/ttm/ttm_execbuf_util.c +++ b/drivers/gpu/drm/ttm/ttm_execbuf_util.c @@ -32,6 +32,72 @@ #include #include +static void ttm_eu_backoff_reservation_locked(struct list_head *list) +{ + struct ttm_validate_buffer *entry; + + list_for_each_entry(entry, list, head) { + struct ttm_buffer_object *bo = entry->bo; + if (!entry->reserved) + continue; + + if (entry->removed) { + ttm_bo_add_to_lru(bo); + entry->removed = false; + + } + entry->reserved = false; + atomic_set(&bo->reserved, 0); + wake_up_all(&bo->event_queue); + } +} + +static void ttm_eu_del_from_lru_locked(struct list_head *list) +{ + struct ttm_validate_buffer *entry; + + list_for_each_entry(entry, list, head) { + struct ttm_buffer_object *bo = entry->bo; + if (!entry->reserved) + continue; + + if (!entry->removed) { + entry->put_count = ttm_bo_del_from_lru(bo); + entry->removed = true; + } + } +} + +static void ttm_eu_list_ref_sub(struct list_head *list) +{ + struct ttm_validate_buffer *entry; + + list_for_each_entry(entry, list, head) { + struct ttm_buffer_object *bo = entry->bo; + + if (entry->put_count) { + ttm_bo_list_ref_sub(bo, entry->put_count, true); + entry->put_count = 0; + } + } +} + +static int ttm_eu_wait_unreserved_locked(struct list_head *list, + struct ttm_buffer_object *bo) +{ + struct ttm_bo_global *glob = bo->glob; + int ret; + + ttm_eu_del_from_lru_locked(list); + spin_unlock(&glob->lru_lock); + ret = ttm_bo_wait_unreserved(bo, true); + spin_lock(&glob->lru_lock); + if (unlikely(ret != 0)) + ttm_eu_backoff_reservation_locked(list); + return ret; +} + + void ttm_eu_backoff_reservation(struct list_head *list) { struct ttm_validate_buffer *entry; @@ -61,35 +127,71 @@ EXPORT_SYMBOL(ttm_eu_backoff_reservation); int ttm_eu_reserve_buffers(struct list_head *list, uint32_t val_seq) { + struct ttm_bo_global *glob; struct ttm_validate_buffer *entry; int ret; + if (list_empty(list)) + return 0; + + list_for_each_entry(entry, list, head) { + entry->reserved = false; + entry->put_count = 0; + entry->removed = false; + } + + entry = list_first_entry(list, struct ttm_validate_buffer, head); + glob = entry->bo->glob; + retry: + spin_lock(&glob->lru_lock); list_for_each_entry(entry, list, head) { struct ttm_buffer_object *bo = entry->bo; - entry->reserved = false; - ret = ttm_bo_reserve(bo, true, false, true, val_seq); - if (ret != 0) { - ttm_eu_backoff_reservation(list); - if (ret == -EAGAIN) { - ret = ttm_bo_wait_unreserved(bo, true); - if (unlikely(ret != 0)) - return ret; - goto retry; - } else +retry_this_bo: + ret = ttm_bo_reserve_locked(bo, true, true, true, val_seq); + switch (ret) { + case 0: + break; + case -EBUSY: + ret = ttm_eu_wait_unreserved_locked(list, bo); + if (unlikely(ret != 0)) { + spin_unlock(&glob->lru_lock); + ttm_eu_list_ref_sub(list); + return ret; + } + goto retry_this_bo; + case -EAGAIN: + ttm_eu_backoff_reservation_locked(list); + spin_unlock(&glob->lru_lock); + ttm_eu_list_ref_sub(list); + ret = ttm_bo_wait_unreserved(bo, true); + if (unlikely(ret != 0)) return ret; + goto retry; + default: + ttm_eu_backoff_reservation_locked(list); + spin_unlock(&glob->lru_lock); + ttm_eu_list_ref_sub(list); + return ret; } entry->reserved = true; if (unlikely(atomic_read(&bo->cpu_writers) > 0)) { - ttm_eu_backoff_reservation(list); + ttm_eu_backoff_reservation_locked(list); + spin_unlock(&glob->lru_lock); + ttm_eu_list_ref_sub(list); ret = ttm_bo_wait_cpu(bo, false); if (ret) return ret; goto retry; } } + + ttm_eu_del_from_lru_locked(list); + spin_unlock(&glob->lru_lock); + ttm_eu_list_ref_sub(list); + return 0; } EXPORT_SYMBOL(ttm_eu_reserve_buffers); diff --git a/include/drm/ttm/ttm_bo_api.h b/include/drm/ttm/ttm_bo_api.h index beafc156a535..b0fc9c12554b 100644 --- a/include/drm/ttm/ttm_bo_api.h +++ b/include/drm/ttm/ttm_bo_api.h @@ -364,6 +364,44 @@ extern int ttm_bo_validate(struct ttm_buffer_object *bo, */ extern void ttm_bo_unref(struct ttm_buffer_object **bo); + +/** + * ttm_bo_list_ref_sub + * + * @bo: The buffer object. + * @count: The number of references with which to decrease @bo::list_kref; + * @never_free: The refcount should not reach zero with this operation. + * + * Release @count lru list references to this buffer object. + */ +extern void ttm_bo_list_ref_sub(struct ttm_buffer_object *bo, int count, + bool never_free); + +/** + * ttm_bo_add_to_lru + * + * @bo: The buffer object. + * + * Add this bo to the relevant mem type lru and, if it's backed by + * system pages (ttms) to the swap list. + * This function must be called with struct ttm_bo_global::lru_lock held, and + * is typically called immediately prior to unreserving a bo. + */ +extern void ttm_bo_add_to_lru(struct ttm_buffer_object *bo); + +/** + * ttm_bo_del_from_lru + * + * @bo: The buffer object. + * + * Remove this bo from all lru lists used to lookup and reserve an object. + * This function must be called with struct ttm_bo_global::lru_lock held, + * and is usually called just immediately after the bo has been reserved to + * avoid recursive reservation from lru lists. + */ +extern int ttm_bo_del_from_lru(struct ttm_buffer_object *bo); + + /** * ttm_bo_lock_delayed_workqueue * diff --git a/include/drm/ttm/ttm_bo_driver.h b/include/drm/ttm/ttm_bo_driver.h index 8e0c848326b6..95068e6024db 100644 --- a/include/drm/ttm/ttm_bo_driver.h +++ b/include/drm/ttm/ttm_bo_driver.h @@ -864,6 +864,20 @@ extern int ttm_bo_reserve(struct ttm_buffer_object *bo, bool interruptible, bool no_wait, bool use_sequence, uint32_t sequence); + +/** + * ttm_bo_reserve_locked: + * + * Similar to ttm_bo_reserve, but must be called with the glob::lru_lock + * spinlock held, and will not remove reserved buffers from the lru lists. + * The function may release the LRU spinlock if it needs to sleep. + */ + +extern int ttm_bo_reserve_locked(struct ttm_buffer_object *bo, + bool interruptible, + bool no_wait, bool use_sequence, + uint32_t sequence); + /** * ttm_bo_unreserve * diff --git a/include/drm/ttm/ttm_execbuf_util.h b/include/drm/ttm/ttm_execbuf_util.h index cd2c475da9ea..fd09b8438977 100644 --- a/include/drm/ttm/ttm_execbuf_util.h +++ b/include/drm/ttm/ttm_execbuf_util.h @@ -41,7 +41,9 @@ * @bo: refcounted buffer object pointer. * @new_sync_obj_arg: New sync_obj_arg for @bo, to be used once * adding a new sync object. - * @reservied: Indicates whether @bo has been reserved for validation. + * @reserved: Indicates whether @bo has been reserved for validation. + * @removed: Indicates whether @bo has been removed from lru lists. + * @put_count: Number of outstanding references on bo::list_kref. */ struct ttm_validate_buffer { @@ -49,6 +51,8 @@ struct ttm_validate_buffer { struct ttm_buffer_object *bo; void *new_sync_obj_arg; bool reserved; + bool removed; + int put_count; }; /** -- cgit v1.2.3 From ecf7ace9a8450303a987aa8364e53860cd50e554 Mon Sep 17 00:00:00 2001 From: Thomas Hellstrom Date: Tue, 16 Nov 2010 15:21:07 +0100 Subject: kref: Add a kref_sub function Makes it possible to optimize batched multiple unrefs. Initial user will be drivers/gpu/ttm which accumulates unrefs to be processed outside of atomic code. Signed-off-by: Thomas Hellstrom Signed-off-by: Dave Airlie --- include/linux/kref.h | 2 ++ lib/kref.c | 30 ++++++++++++++++++++++++++++++ 2 files changed, 32 insertions(+) diff --git a/include/linux/kref.h b/include/linux/kref.h index 6cc38fc07ab7..d4a62ab2ee5e 100644 --- a/include/linux/kref.h +++ b/include/linux/kref.h @@ -24,5 +24,7 @@ struct kref { void kref_init(struct kref *kref); void kref_get(struct kref *kref); int kref_put(struct kref *kref, void (*release) (struct kref *kref)); +int kref_sub(struct kref *kref, unsigned int count, + void (*release) (struct kref *kref)); #endif /* _KREF_H_ */ diff --git a/lib/kref.c b/lib/kref.c index d3d227a08a4b..3efb882b11db 100644 --- a/lib/kref.c +++ b/lib/kref.c @@ -62,6 +62,36 @@ int kref_put(struct kref *kref, void (*release)(struct kref *kref)) return 0; } + +/** + * kref_sub - subtract a number of refcounts for object. + * @kref: object. + * @count: Number of recounts to subtract. + * @release: pointer to the function that will clean up the object when the + * last reference to the object is released. + * This pointer is required, and it is not acceptable to pass kfree + * in as this function. + * + * Subtract @count from the refcount, and if 0, call release(). + * Return 1 if the object was removed, otherwise return 0. Beware, if this + * function returns 0, you still can not count on the kref from remaining in + * memory. Only use the return value if you want to see if the kref is now + * gone, not present. + */ +int kref_sub(struct kref *kref, unsigned int count, + void (*release)(struct kref *kref)) +{ + WARN_ON(release == NULL); + WARN_ON(release == (void (*)(struct kref *))kfree); + + if (atomic_sub_and_test((int) count, &kref->refcount)) { + release(kref); + return 1; + } + return 0; +} + EXPORT_SYMBOL(kref_init); EXPORT_SYMBOL(kref_get); EXPORT_SYMBOL(kref_put); +EXPORT_SYMBOL(kref_sub); -- cgit v1.2.3 From 2357cbe5f4ca8a52329c2c2a26b68839870d5d43 Mon Sep 17 00:00:00 2001 From: Thomas Hellstrom Date: Tue, 16 Nov 2010 15:21:08 +0100 Subject: drm/ttm: Use kref_sub instead of repeatedly calling kref_put Signed-off-by: Thomas Hellstrom Signed-off-by: Dave Airlie --- drivers/gpu/drm/ttm/ttm_bo.c | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c index a586378b1b2b..9ef893d5da88 100644 --- a/drivers/gpu/drm/ttm/ttm_bo.c +++ b/drivers/gpu/drm/ttm/ttm_bo.c @@ -266,10 +266,8 @@ static void ttm_bo_ref_bug(struct kref *list_kref) void ttm_bo_list_ref_sub(struct ttm_buffer_object *bo, int count, bool never_free) { - while (count--) - kref_put(&bo->list_kref, - (never_free || (count >= 0)) ? ttm_bo_ref_bug : - ttm_bo_release_list); + kref_sub(&bo->list_kref, count, + (never_free) ? ttm_bo_ref_bug : ttm_bo_release_list); } int ttm_bo_reserve(struct ttm_buffer_object *bo, -- cgit v1.2.3 From 68c4fa31aa52765314b4285a7835368ea35b509c Mon Sep 17 00:00:00 2001 From: Thomas Hellstrom Date: Wed, 17 Nov 2010 12:28:27 +0000 Subject: drm/ttm: Optimize ttm_eu_backoff_reservation Avoid the ttm_bo_unreserve() spinlocks by calling ttm_eu_backoff_reservation_locked under the lru spinlock. Signed-off-by: Thomas Hellstrom Reviewed-by: Jerome Glisse Signed-off-by: Dave Airlie --- drivers/gpu/drm/ttm/ttm_execbuf_util.c | 15 ++++++++------- 1 file changed, 8 insertions(+), 7 deletions(-) diff --git a/drivers/gpu/drm/ttm/ttm_execbuf_util.c b/drivers/gpu/drm/ttm/ttm_execbuf_util.c index 201a71d111ec..7dcc6470e2f5 100644 --- a/drivers/gpu/drm/ttm/ttm_execbuf_util.c +++ b/drivers/gpu/drm/ttm/ttm_execbuf_util.c @@ -101,15 +101,16 @@ static int ttm_eu_wait_unreserved_locked(struct list_head *list, void ttm_eu_backoff_reservation(struct list_head *list) { struct ttm_validate_buffer *entry; + struct ttm_bo_global *glob; - list_for_each_entry(entry, list, head) { - struct ttm_buffer_object *bo = entry->bo; - if (!entry->reserved) - continue; + if (list_empty(list)) + return; - entry->reserved = false; - ttm_bo_unreserve(bo); - } + entry = list_first_entry(list, struct ttm_validate_buffer, head); + glob = entry->bo->glob; + spin_lock(&glob->lru_lock); + ttm_eu_backoff_reservation_locked(list); + spin_unlock(&glob->lru_lock); } EXPORT_SYMBOL(ttm_eu_backoff_reservation); -- cgit v1.2.3 From 96726fe50feae74812a2ccf5d5da23cb01c0a413 Mon Sep 17 00:00:00 2001 From: Thomas Hellstrom Date: Wed, 17 Nov 2010 12:28:28 +0000 Subject: drm/ttm: Don't deadlock on recursive multi-bo reservations Add an aid for the driver to detect deadlocks on multi-bo reservations Update documentation. Signed-off-by: Thomas Hellstrom Reviewed-by: Jerome Glisse Signed-off-by: Dave Airlie --- drivers/gpu/drm/ttm/ttm_bo.c | 15 ++++++++++++--- include/drm/ttm/ttm_bo_driver.h | 25 ++++++++++++++++++++++--- 2 files changed, 34 insertions(+), 6 deletions(-) diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c index 9ef893d5da88..5d8750830dc3 100644 --- a/drivers/gpu/drm/ttm/ttm_bo.c +++ b/drivers/gpu/drm/ttm/ttm_bo.c @@ -223,9 +223,18 @@ int ttm_bo_reserve_locked(struct ttm_buffer_object *bo, /** * Deadlock avoidance for multi-bo reserving. */ - if (use_sequence && bo->seq_valid && - (sequence - bo->val_seq < (1 << 31))) { - return -EAGAIN; + if (use_sequence && bo->seq_valid) { + /** + * We've already reserved this one. + */ + if (unlikely(sequence == bo->val_seq)) + return -EDEADLK; + /** + * Already reserved by a thread that will not back + * off for us. We need to back off. + */ + if (unlikely(sequence - bo->val_seq < (1 << 31))) + return -EAGAIN; } if (no_wait) diff --git a/include/drm/ttm/ttm_bo_driver.h b/include/drm/ttm/ttm_bo_driver.h index 95068e6024db..1e25a40c688e 100644 --- a/include/drm/ttm/ttm_bo_driver.h +++ b/include/drm/ttm/ttm_bo_driver.h @@ -859,6 +859,9 @@ extern void ttm_bo_unmap_virtual(struct ttm_buffer_object *bo); * try again. (only if use_sequence == 1). * -ERESTARTSYS: A wait for the buffer to become unreserved was interrupted by * a signal. Release all buffer reservations and return to user-space. + * -EBUSY: The function needed to sleep, but @no_wait was true + * -EDEADLK: Bo already reserved using @sequence. This error code will only + * be returned if @use_sequence is set to true. */ extern int ttm_bo_reserve(struct ttm_buffer_object *bo, bool interruptible, @@ -868,11 +871,27 @@ extern int ttm_bo_reserve(struct ttm_buffer_object *bo, /** * ttm_bo_reserve_locked: * - * Similar to ttm_bo_reserve, but must be called with the glob::lru_lock - * spinlock held, and will not remove reserved buffers from the lru lists. + * @bo: A pointer to a struct ttm_buffer_object. + * @interruptible: Sleep interruptible if waiting. + * @no_wait: Don't sleep while trying to reserve, rather return -EBUSY. + * @use_sequence: If @bo is already reserved, Only sleep waiting for + * it to become unreserved if @sequence < (@bo)->sequence. + * + * Must be called with struct ttm_bo_global::lru_lock held, + * and will not remove reserved buffers from the lru lists. * The function may release the LRU spinlock if it needs to sleep. + * Otherwise identical to ttm_bo_reserve. + * + * Returns: + * -EAGAIN: The reservation may cause a deadlock. + * Release all buffer reservations, wait for @bo to become unreserved and + * try again. (only if use_sequence == 1). + * -ERESTARTSYS: A wait for the buffer to become unreserved was interrupted by + * a signal. Release all buffer reservations and return to user-space. + * -EBUSY: The function needed to sleep, but @no_wait was true + * -EDEADLK: Bo already reserved using @sequence. This error code will only + * be returned if @use_sequence is set to true. */ - extern int ttm_bo_reserve_locked(struct ttm_buffer_object *bo, bool interruptible, bool no_wait, bool use_sequence, -- cgit v1.2.3 From 702adba22433c175e8429a47760f35ca16caf1cd Mon Sep 17 00:00:00 2001 From: Thomas Hellstrom Date: Wed, 17 Nov 2010 12:28:29 +0000 Subject: drm/ttm/radeon/nouveau: Kill the bo lock in favour of a bo device fence_lock The bo lock used only to protect the bo sync object members, and since it is a per bo lock, fencing a buffer list will see a lot of locks and unlocks. Replace it with a per-device lock that protects the sync object members on *all* bos. Reading and setting these members will always be very quick, so the risc of heavy lock contention is microscopic. Note that waiting for sync objects will always take place outside of this lock. The bo device fence lock will eventually be replaced with a seqlock / rcu mechanism so we can determine that a bo is idle under a rcu / read seqlock. However this change will allow us to batch fencing and unreserving of buffers with a minimal amount of locking. Signed-off-by: Thomas Hellstrom Reviewed-by: Jerome Glisse Signed-off-by: Dave Airlie --- drivers/gpu/drm/nouveau/nouveau_gem.c | 12 ++++---- drivers/gpu/drm/radeon/radeon_object.c | 4 +-- drivers/gpu/drm/radeon/radeon_object.h | 4 +-- drivers/gpu/drm/ttm/ttm_bo.c | 55 +++++++++++++++++----------------- drivers/gpu/drm/ttm/ttm_bo_util.c | 7 ++--- drivers/gpu/drm/ttm/ttm_bo_vm.c | 6 ++-- drivers/gpu/drm/ttm/ttm_execbuf_util.c | 7 +++-- include/drm/ttm/ttm_bo_api.h | 6 ++-- include/drm/ttm/ttm_bo_driver.h | 3 ++ 9 files changed, 53 insertions(+), 51 deletions(-) diff --git a/drivers/gpu/drm/nouveau/nouveau_gem.c b/drivers/gpu/drm/nouveau/nouveau_gem.c index 9a1fdcf400c2..1f2301d26c0a 100644 --- a/drivers/gpu/drm/nouveau/nouveau_gem.c +++ b/drivers/gpu/drm/nouveau/nouveau_gem.c @@ -234,10 +234,10 @@ validate_fini_list(struct list_head *list, struct nouveau_fence *fence) if (likely(fence)) { struct nouveau_fence *prev_fence; - spin_lock(&nvbo->bo.lock); + spin_lock(&nvbo->bo.bdev->fence_lock); prev_fence = nvbo->bo.sync_obj; nvbo->bo.sync_obj = nouveau_fence_ref(fence); - spin_unlock(&nvbo->bo.lock); + spin_unlock(&nvbo->bo.bdev->fence_lock); nouveau_fence_unref((void *)&prev_fence); } @@ -557,9 +557,9 @@ nouveau_gem_pushbuf_reloc_apply(struct drm_device *dev, data |= r->vor; } - spin_lock(&nvbo->bo.lock); + spin_lock(&nvbo->bo.bdev->fence_lock); ret = ttm_bo_wait(&nvbo->bo, false, false, false); - spin_unlock(&nvbo->bo.lock); + spin_unlock(&nvbo->bo.bdev->fence_lock); if (ret) { NV_ERROR(dev, "reloc wait_idle failed: %d\n", ret); break; @@ -791,9 +791,9 @@ nouveau_gem_ioctl_cpu_prep(struct drm_device *dev, void *data, } if (req->flags & NOUVEAU_GEM_CPU_PREP_NOBLOCK) { - spin_lock(&nvbo->bo.lock); + spin_lock(&nvbo->bo.bdev->fence_lock); ret = ttm_bo_wait(&nvbo->bo, false, false, no_wait); - spin_unlock(&nvbo->bo.lock); + spin_unlock(&nvbo->bo.bdev->fence_lock); } else { ret = ttm_bo_synccpu_write_grab(&nvbo->bo, no_wait); if (ret == 0) diff --git a/drivers/gpu/drm/radeon/radeon_object.c b/drivers/gpu/drm/radeon/radeon_object.c index 1d067743fee0..e939cb6a91cc 100644 --- a/drivers/gpu/drm/radeon/radeon_object.c +++ b/drivers/gpu/drm/radeon/radeon_object.c @@ -369,11 +369,11 @@ void radeon_bo_list_fence(struct list_head *head, void *fence) list_for_each_entry(lobj, head, list) { bo = lobj->bo; - spin_lock(&bo->tbo.lock); + spin_lock(&bo->tbo.bdev->fence_lock); old_fence = (struct radeon_fence *)bo->tbo.sync_obj; bo->tbo.sync_obj = radeon_fence_ref(fence); bo->tbo.sync_obj_arg = NULL; - spin_unlock(&bo->tbo.lock); + spin_unlock(&bo->tbo.bdev->fence_lock); if (old_fence) { radeon_fence_unref(&old_fence); } diff --git a/drivers/gpu/drm/radeon/radeon_object.h b/drivers/gpu/drm/radeon/radeon_object.h index d143702b244a..fd536751f865 100644 --- a/drivers/gpu/drm/radeon/radeon_object.h +++ b/drivers/gpu/drm/radeon/radeon_object.h @@ -126,12 +126,12 @@ static inline int radeon_bo_wait(struct radeon_bo *bo, u32 *mem_type, r = ttm_bo_reserve(&bo->tbo, true, no_wait, false, 0); if (unlikely(r != 0)) return r; - spin_lock(&bo->tbo.lock); + spin_lock(&bo->tbo.bdev->fence_lock); if (mem_type) *mem_type = bo->tbo.mem.mem_type; if (bo->tbo.sync_obj) r = ttm_bo_wait(&bo->tbo, true, true, no_wait); - spin_unlock(&bo->tbo.lock); + spin_unlock(&bo->tbo.bdev->fence_lock); ttm_bo_unreserve(&bo->tbo); return r; } diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c index 5d8750830dc3..d93c73b1c471 100644 --- a/drivers/gpu/drm/ttm/ttm_bo.c +++ b/drivers/gpu/drm/ttm/ttm_bo.c @@ -427,11 +427,9 @@ moved: } if (bo->mem.mm_node) { - spin_lock(&bo->lock); bo->offset = (bo->mem.start << PAGE_SHIFT) + bdev->man[bo->mem.mem_type].gpu_offset; bo->cur_placement = bo->mem.placement; - spin_unlock(&bo->lock); } else bo->offset = 0; @@ -485,14 +483,14 @@ static void ttm_bo_cleanup_refs_or_queue(struct ttm_buffer_object *bo) int put_count; int ret; - spin_lock(&bo->lock); + spin_lock(&bdev->fence_lock); (void) ttm_bo_wait(bo, false, false, true); if (!bo->sync_obj) { spin_lock(&glob->lru_lock); /** - * Lock inversion between bo::reserve and bo::lock here, + * Lock inversion between bo:reserve and bdev::fence_lock here, * but that's OK, since we're only trylocking. */ @@ -501,7 +499,7 @@ static void ttm_bo_cleanup_refs_or_queue(struct ttm_buffer_object *bo) if (unlikely(ret == -EBUSY)) goto queue; - spin_unlock(&bo->lock); + spin_unlock(&bdev->fence_lock); put_count = ttm_bo_del_from_lru(bo); spin_unlock(&glob->lru_lock); @@ -522,7 +520,7 @@ queue: kref_get(&bo->list_kref); list_add_tail(&bo->ddestroy, &bdev->ddestroy); spin_unlock(&glob->lru_lock); - spin_unlock(&bo->lock); + spin_unlock(&bdev->fence_lock); if (sync_obj) { driver->sync_obj_flush(sync_obj, sync_obj_arg); @@ -547,14 +545,15 @@ static int ttm_bo_cleanup_refs(struct ttm_buffer_object *bo, bool no_wait_reserve, bool no_wait_gpu) { + struct ttm_bo_device *bdev = bo->bdev; struct ttm_bo_global *glob = bo->glob; int put_count; int ret = 0; retry: - spin_lock(&bo->lock); + spin_lock(&bdev->fence_lock); ret = ttm_bo_wait(bo, false, interruptible, no_wait_gpu); - spin_unlock(&bo->lock); + spin_unlock(&bdev->fence_lock); if (unlikely(ret != 0)) return ret; @@ -707,9 +706,9 @@ static int ttm_bo_evict(struct ttm_buffer_object *bo, bool interruptible, struct ttm_placement placement; int ret = 0; - spin_lock(&bo->lock); + spin_lock(&bdev->fence_lock); ret = ttm_bo_wait(bo, false, interruptible, no_wait_gpu); - spin_unlock(&bo->lock); + spin_unlock(&bdev->fence_lock); if (unlikely(ret != 0)) { if (ret != -ERESTARTSYS) { @@ -1044,6 +1043,7 @@ int ttm_bo_move_buffer(struct ttm_buffer_object *bo, { int ret = 0; struct ttm_mem_reg mem; + struct ttm_bo_device *bdev = bo->bdev; BUG_ON(!atomic_read(&bo->reserved)); @@ -1052,9 +1052,9 @@ int ttm_bo_move_buffer(struct ttm_buffer_object *bo, * Have the driver move function wait for idle when necessary, * instead of doing it here. */ - spin_lock(&bo->lock); + spin_lock(&bdev->fence_lock); ret = ttm_bo_wait(bo, false, interruptible, no_wait_gpu); - spin_unlock(&bo->lock); + spin_unlock(&bdev->fence_lock); if (ret) return ret; mem.num_pages = bo->num_pages; @@ -1171,7 +1171,6 @@ int ttm_bo_init(struct ttm_bo_device *bdev, } bo->destroy = destroy; - spin_lock_init(&bo->lock); kref_init(&bo->kref); kref_init(&bo->list_kref); atomic_set(&bo->cpu_writers, 0); @@ -1535,7 +1534,7 @@ int ttm_bo_device_init(struct ttm_bo_device *bdev, bdev->dev_mapping = NULL; bdev->glob = glob; bdev->need_dma32 = need_dma32; - + spin_lock_init(&bdev->fence_lock); mutex_lock(&glob->device_list_mutex); list_add_tail(&bdev->device_list, &glob->device_list); mutex_unlock(&glob->device_list_mutex); @@ -1659,6 +1658,7 @@ int ttm_bo_wait(struct ttm_buffer_object *bo, bool lazy, bool interruptible, bool no_wait) { struct ttm_bo_driver *driver = bo->bdev->driver; + struct ttm_bo_device *bdev = bo->bdev; void *sync_obj; void *sync_obj_arg; int ret = 0; @@ -1672,9 +1672,9 @@ int ttm_bo_wait(struct ttm_buffer_object *bo, void *tmp_obj = bo->sync_obj; bo->sync_obj = NULL; clear_bit(TTM_BO_PRIV_FLAG_MOVING, &bo->priv_flags); - spin_unlock(&bo->lock); + spin_unlock(&bdev->fence_lock); driver->sync_obj_unref(&tmp_obj); - spin_lock(&bo->lock); + spin_lock(&bdev->fence_lock); continue; } @@ -1683,29 +1683,29 @@ int ttm_bo_wait(struct ttm_buffer_object *bo, sync_obj = driver->sync_obj_ref(bo->sync_obj); sync_obj_arg = bo->sync_obj_arg; - spin_unlock(&bo->lock); + spin_unlock(&bdev->fence_lock); ret = driver->sync_obj_wait(sync_obj, sync_obj_arg, lazy, interruptible); if (unlikely(ret != 0)) { driver->sync_obj_unref(&sync_obj); - spin_lock(&bo->lock); + spin_lock(&bdev->fence_lock); return ret; } - spin_lock(&bo->lock); + spin_lock(&bdev->fence_lock); if (likely(bo->sync_obj == sync_obj && bo->sync_obj_arg == sync_obj_arg)) { void *tmp_obj = bo->sync_obj; bo->sync_obj = NULL; clear_bit(TTM_BO_PRIV_FLAG_MOVING, &bo->priv_flags); - spin_unlock(&bo->lock); + spin_unlock(&bdev->fence_lock); driver->sync_obj_unref(&sync_obj); driver->sync_obj_unref(&tmp_obj); - spin_lock(&bo->lock); + spin_lock(&bdev->fence_lock); } else { - spin_unlock(&bo->lock); + spin_unlock(&bdev->fence_lock); driver->sync_obj_unref(&sync_obj); - spin_lock(&bo->lock); + spin_lock(&bdev->fence_lock); } } return 0; @@ -1714,6 +1714,7 @@ EXPORT_SYMBOL(ttm_bo_wait); int ttm_bo_synccpu_write_grab(struct ttm_buffer_object *bo, bool no_wait) { + struct ttm_bo_device *bdev = bo->bdev; int ret = 0; /* @@ -1723,9 +1724,9 @@ int ttm_bo_synccpu_write_grab(struct ttm_buffer_object *bo, bool no_wait) ret = ttm_bo_reserve(bo, true, no_wait, false, 0); if (unlikely(ret != 0)) return ret; - spin_lock(&bo->lock); + spin_lock(&bdev->fence_lock); ret = ttm_bo_wait(bo, false, true, no_wait); - spin_unlock(&bo->lock); + spin_unlock(&bdev->fence_lock); if (likely(ret == 0)) atomic_inc(&bo->cpu_writers); ttm_bo_unreserve(bo); @@ -1797,9 +1798,9 @@ static int ttm_bo_swapout(struct ttm_mem_shrink *shrink) * Wait for GPU, then move to system cached. */ - spin_lock(&bo->lock); + spin_lock(&bo->bdev->fence_lock); ret = ttm_bo_wait(bo, false, false, false); - spin_unlock(&bo->lock); + spin_unlock(&bo->bdev->fence_lock); if (unlikely(ret != 0)) goto out; diff --git a/drivers/gpu/drm/ttm/ttm_bo_util.c b/drivers/gpu/drm/ttm/ttm_bo_util.c index 3106d5bcce32..4b75133d6606 100644 --- a/drivers/gpu/drm/ttm/ttm_bo_util.c +++ b/drivers/gpu/drm/ttm/ttm_bo_util.c @@ -337,7 +337,6 @@ static int ttm_buffer_object_transfer(struct ttm_buffer_object *bo, * TODO: Explicit member copy would probably be better here. */ - spin_lock_init(&fbo->lock); init_waitqueue_head(&fbo->event_queue); INIT_LIST_HEAD(&fbo->ddestroy); INIT_LIST_HEAD(&fbo->lru); @@ -520,7 +519,7 @@ int ttm_bo_move_accel_cleanup(struct ttm_buffer_object *bo, struct ttm_buffer_object *ghost_obj; void *tmp_obj = NULL; - spin_lock(&bo->lock); + spin_lock(&bdev->fence_lock); if (bo->sync_obj) { tmp_obj = bo->sync_obj; bo->sync_obj = NULL; @@ -529,7 +528,7 @@ int ttm_bo_move_accel_cleanup(struct ttm_buffer_object *bo, bo->sync_obj_arg = sync_obj_arg; if (evict) { ret = ttm_bo_wait(bo, false, false, false); - spin_unlock(&bo->lock); + spin_unlock(&bdev->fence_lock); if (tmp_obj) driver->sync_obj_unref(&tmp_obj); if (ret) @@ -552,7 +551,7 @@ int ttm_bo_move_accel_cleanup(struct ttm_buffer_object *bo, */ set_bit(TTM_BO_PRIV_FLAG_MOVING, &bo->priv_flags); - spin_unlock(&bo->lock); + spin_unlock(&bdev->fence_lock); if (tmp_obj) driver->sync_obj_unref(&tmp_obj); diff --git a/drivers/gpu/drm/ttm/ttm_bo_vm.c b/drivers/gpu/drm/ttm/ttm_bo_vm.c index fe6cb77899f4..8dd446cb778e 100644 --- a/drivers/gpu/drm/ttm/ttm_bo_vm.c +++ b/drivers/gpu/drm/ttm/ttm_bo_vm.c @@ -118,17 +118,17 @@ static int ttm_bo_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf) * move. */ - spin_lock(&bo->lock); + spin_lock(&bdev->fence_lock); if (test_bit(TTM_BO_PRIV_FLAG_MOVING, &bo->priv_flags)) { ret = ttm_bo_wait(bo, false, true, false); - spin_unlock(&bo->lock); + spin_unlock(&bdev->fence_lock); if (unlikely(ret != 0)) { retval = (ret != -ERESTARTSYS) ? VM_FAULT_SIGBUS : VM_FAULT_NOPAGE; goto out_unlock; } } else - spin_unlock(&bo->lock); + spin_unlock(&bdev->fence_lock); ret = ttm_mem_io_reserve(bdev, &bo->mem); diff --git a/drivers/gpu/drm/ttm/ttm_execbuf_util.c b/drivers/gpu/drm/ttm/ttm_execbuf_util.c index 7dcc6470e2f5..c3a2100bace6 100644 --- a/drivers/gpu/drm/ttm/ttm_execbuf_util.c +++ b/drivers/gpu/drm/ttm/ttm_execbuf_util.c @@ -203,14 +203,15 @@ void ttm_eu_fence_buffer_objects(struct list_head *list, void *sync_obj) list_for_each_entry(entry, list, head) { struct ttm_buffer_object *bo = entry->bo; - struct ttm_bo_driver *driver = bo->bdev->driver; + struct ttm_bo_device *bdev = bo->bdev; + struct ttm_bo_driver *driver = bdev->driver; void *old_sync_obj; - spin_lock(&bo->lock); + spin_lock(&bdev->fence_lock); old_sync_obj = bo->sync_obj; bo->sync_obj = driver->sync_obj_ref(sync_obj); bo->sync_obj_arg = entry->new_sync_obj_arg; - spin_unlock(&bo->lock); + spin_unlock(&bdev->fence_lock); ttm_bo_unreserve(bo); entry->reserved = false; if (old_sync_obj) diff --git a/include/drm/ttm/ttm_bo_api.h b/include/drm/ttm/ttm_bo_api.h index b0fc9c12554b..edacd483c59c 100644 --- a/include/drm/ttm/ttm_bo_api.h +++ b/include/drm/ttm/ttm_bo_api.h @@ -154,7 +154,6 @@ struct ttm_tt; * keeps one refcount. When this refcount reaches zero, * the object is destroyed. * @event_queue: Queue for processes waiting on buffer object status change. - * @lock: spinlock protecting mostly synchronization members. * @mem: structure describing current placement. * @persistant_swap_storage: Usually the swap storage is deleted for buffers * pinned in physical memory. If this behaviour is not desired, this member @@ -213,7 +212,6 @@ struct ttm_buffer_object { struct kref kref; struct kref list_kref; wait_queue_head_t event_queue; - spinlock_t lock; /** * Members protected by the bo::reserved lock. @@ -248,10 +246,10 @@ struct ttm_buffer_object { atomic_t reserved; /** - * Members protected by the bo::lock + * Members protected by struct buffer_object_device::fence_lock * In addition, setting sync_obj to anything else * than NULL requires bo::reserved to be held. This allows for - * checking NULL while reserved but not holding bo::lock. + * checking NULL while reserved but not holding the mentioned lock. */ void *sync_obj_arg; diff --git a/include/drm/ttm/ttm_bo_driver.h b/include/drm/ttm/ttm_bo_driver.h index 1e25a40c688e..ca8131e98300 100644 --- a/include/drm/ttm/ttm_bo_driver.h +++ b/include/drm/ttm/ttm_bo_driver.h @@ -510,6 +510,8 @@ struct ttm_bo_global { * * @driver: Pointer to a struct ttm_bo_driver struct setup by the driver. * @man: An array of mem_type_managers. + * @fence_lock: Protects the synchronizing members on *all* bos belonging + * to this device. * @addr_space_mm: Range manager for the device address space. * lru_lock: Spinlock that protects the buffer+device lru lists and * ddestroy lists. @@ -531,6 +533,7 @@ struct ttm_bo_device { struct ttm_bo_driver *driver; rwlock_t vm_lock; struct ttm_mem_type_manager man[TTM_NUM_MEM_TYPES]; + spinlock_t fence_lock; /* * Protected by the vm lock. */ -- cgit v1.2.3 From 95762c2b34069bf4adb7929969f1f5f5fc8a38df Mon Sep 17 00:00:00 2001 From: Thomas Hellstrom Date: Wed, 17 Nov 2010 12:28:30 +0000 Subject: drm/ttm: Improved fencing of buffer object lists Drastically reduce the number of spin lock / unlock operations by performing unreserving and fencing under global locks. Signed-off-by: Thomas Hellstrom Reviewed-by: Jerome Glisse Signed-off-by: Dave Airlie --- drivers/gpu/drm/ttm/ttm_bo.c | 11 ++++++++--- drivers/gpu/drm/ttm/ttm_execbuf_util.c | 36 +++++++++++++++++++++++----------- include/drm/ttm/ttm_bo_driver.h | 10 ++++++++++ include/drm/ttm/ttm_execbuf_util.h | 2 ++ 4 files changed, 45 insertions(+), 14 deletions(-) diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c index d93c73b1c471..551a5d31cadf 100644 --- a/drivers/gpu/drm/ttm/ttm_bo.c +++ b/drivers/gpu/drm/ttm/ttm_bo.c @@ -299,14 +299,19 @@ int ttm_bo_reserve(struct ttm_buffer_object *bo, return ret; } +void ttm_bo_unreserve_locked(struct ttm_buffer_object *bo) +{ + ttm_bo_add_to_lru(bo); + atomic_set(&bo->reserved, 0); + wake_up_all(&bo->event_queue); +} + void ttm_bo_unreserve(struct ttm_buffer_object *bo) { struct ttm_bo_global *glob = bo->glob; spin_lock(&glob->lru_lock); - ttm_bo_add_to_lru(bo); - atomic_set(&bo->reserved, 0); - wake_up_all(&bo->event_queue); + ttm_bo_unreserve_locked(bo); spin_unlock(&glob->lru_lock); } EXPORT_SYMBOL(ttm_bo_unreserve); diff --git a/drivers/gpu/drm/ttm/ttm_execbuf_util.c b/drivers/gpu/drm/ttm/ttm_execbuf_util.c index c3a2100bace6..b6da65cc502a 100644 --- a/drivers/gpu/drm/ttm/ttm_execbuf_util.c +++ b/drivers/gpu/drm/ttm/ttm_execbuf_util.c @@ -200,22 +200,36 @@ EXPORT_SYMBOL(ttm_eu_reserve_buffers); void ttm_eu_fence_buffer_objects(struct list_head *list, void *sync_obj) { struct ttm_validate_buffer *entry; + struct ttm_buffer_object *bo; + struct ttm_bo_global *glob; + struct ttm_bo_device *bdev; + struct ttm_bo_driver *driver; - list_for_each_entry(entry, list, head) { - struct ttm_buffer_object *bo = entry->bo; - struct ttm_bo_device *bdev = bo->bdev; - struct ttm_bo_driver *driver = bdev->driver; - void *old_sync_obj; + if (list_empty(list)) + return; + + bo = list_first_entry(list, struct ttm_validate_buffer, head)->bo; + bdev = bo->bdev; + driver = bdev->driver; + glob = bo->glob; - spin_lock(&bdev->fence_lock); - old_sync_obj = bo->sync_obj; + spin_lock(&bdev->fence_lock); + spin_lock(&glob->lru_lock); + + list_for_each_entry(entry, list, head) { + bo = entry->bo; + entry->old_sync_obj = bo->sync_obj; bo->sync_obj = driver->sync_obj_ref(sync_obj); bo->sync_obj_arg = entry->new_sync_obj_arg; - spin_unlock(&bdev->fence_lock); - ttm_bo_unreserve(bo); + ttm_bo_unreserve_locked(bo); entry->reserved = false; - if (old_sync_obj) - driver->sync_obj_unref(&old_sync_obj); + } + spin_unlock(&glob->lru_lock); + spin_unlock(&bdev->fence_lock); + + list_for_each_entry(entry, list, head) { + if (entry->old_sync_obj) + driver->sync_obj_unref(&entry->old_sync_obj); } } EXPORT_SYMBOL(ttm_eu_fence_buffer_objects); diff --git a/include/drm/ttm/ttm_bo_driver.h b/include/drm/ttm/ttm_bo_driver.h index ca8131e98300..cfb9ca4ec1c4 100644 --- a/include/drm/ttm/ttm_bo_driver.h +++ b/include/drm/ttm/ttm_bo_driver.h @@ -909,6 +909,16 @@ extern int ttm_bo_reserve_locked(struct ttm_buffer_object *bo, */ extern void ttm_bo_unreserve(struct ttm_buffer_object *bo); +/** + * ttm_bo_unreserve_locked + * + * @bo: A pointer to a struct ttm_buffer_object. + * + * Unreserve a previous reservation of @bo. + * Needs to be called with struct ttm_bo_global::lru_lock held. + */ +extern void ttm_bo_unreserve_locked(struct ttm_buffer_object *bo); + /** * ttm_bo_wait_unreserved * diff --git a/include/drm/ttm/ttm_execbuf_util.h b/include/drm/ttm/ttm_execbuf_util.h index fd09b8438977..535ab00407e0 100644 --- a/include/drm/ttm/ttm_execbuf_util.h +++ b/include/drm/ttm/ttm_execbuf_util.h @@ -44,6 +44,7 @@ * @reserved: Indicates whether @bo has been reserved for validation. * @removed: Indicates whether @bo has been removed from lru lists. * @put_count: Number of outstanding references on bo::list_kref. + * @old_sync_obj: Pointer to a sync object about to be unreferenced */ struct ttm_validate_buffer { @@ -53,6 +54,7 @@ struct ttm_validate_buffer { bool reserved; bool removed; int put_count; + void *old_sync_obj; }; /** -- cgit v1.2.3 From 65705962025df490d13df59ec57c5329d1bd0a16 Mon Sep 17 00:00:00 2001 From: Thomas Hellstrom Date: Wed, 17 Nov 2010 12:28:31 +0000 Subject: drm/ttm/vmwgfx: Have TTM manage the validation sequence. Rather than having the driver supply the validation sequence, leave that responsibility to TTM. This saves some confusion and a function argument. Signed-off-by: Thomas Hellstrom Signed-off-by: Dave Airlie --- drivers/gpu/drm/ttm/ttm_bo.c | 1 + drivers/gpu/drm/ttm/ttm_execbuf_util.c | 5 ++++- drivers/gpu/drm/vmwgfx/vmwgfx_drv.h | 1 - drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c | 3 +-- include/drm/ttm/ttm_bo_driver.h | 2 ++ include/drm/ttm/ttm_execbuf_util.h | 3 +-- 6 files changed, 9 insertions(+), 6 deletions(-) diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c index 551a5d31cadf..25e4c2a1d1d8 100644 --- a/drivers/gpu/drm/ttm/ttm_bo.c +++ b/drivers/gpu/drm/ttm/ttm_bo.c @@ -1539,6 +1539,7 @@ int ttm_bo_device_init(struct ttm_bo_device *bdev, bdev->dev_mapping = NULL; bdev->glob = glob; bdev->need_dma32 = need_dma32; + bdev->val_seq = 0; spin_lock_init(&bdev->fence_lock); mutex_lock(&glob->device_list_mutex); list_add_tail(&bdev->device_list, &glob->device_list); diff --git a/drivers/gpu/drm/ttm/ttm_execbuf_util.c b/drivers/gpu/drm/ttm/ttm_execbuf_util.c index b6da65cc502a..3832fe10b4df 100644 --- a/drivers/gpu/drm/ttm/ttm_execbuf_util.c +++ b/drivers/gpu/drm/ttm/ttm_execbuf_util.c @@ -126,11 +126,12 @@ EXPORT_SYMBOL(ttm_eu_backoff_reservation); * buffers in different orders. */ -int ttm_eu_reserve_buffers(struct list_head *list, uint32_t val_seq) +int ttm_eu_reserve_buffers(struct list_head *list) { struct ttm_bo_global *glob; struct ttm_validate_buffer *entry; int ret; + uint32_t val_seq; if (list_empty(list)) return 0; @@ -146,6 +147,8 @@ int ttm_eu_reserve_buffers(struct list_head *list, uint32_t val_seq) retry: spin_lock(&glob->lru_lock); + val_seq = entry->bo->bdev->val_seq++; + list_for_each_entry(entry, list, head) { struct ttm_buffer_object *bo = entry->bo; diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h index e7a58d055041..10fc01f69c40 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h @@ -264,7 +264,6 @@ struct vmw_private { */ struct vmw_sw_context ctx; - uint32_t val_seq; struct mutex cmdbuf_mutex; /** diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c index 76954e3528c1..41b95ed6dbcd 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c @@ -653,8 +653,7 @@ int vmw_execbuf_ioctl(struct drm_device *dev, void *data, ret = vmw_cmd_check_all(dev_priv, sw_context, cmd, arg->command_size); if (unlikely(ret != 0)) goto out_err; - ret = ttm_eu_reserve_buffers(&sw_context->validate_nodes, - dev_priv->val_seq++); + ret = ttm_eu_reserve_buffers(&sw_context->validate_nodes); if (unlikely(ret != 0)) goto out_err; diff --git a/include/drm/ttm/ttm_bo_driver.h b/include/drm/ttm/ttm_bo_driver.h index cfb9ca4ec1c4..e3b2e245db1b 100644 --- a/include/drm/ttm/ttm_bo_driver.h +++ b/include/drm/ttm/ttm_bo_driver.h @@ -515,6 +515,7 @@ struct ttm_bo_global { * @addr_space_mm: Range manager for the device address space. * lru_lock: Spinlock that protects the buffer+device lru lists and * ddestroy lists. + * @val_seq: Current validation sequence. * @nice_mode: Try nicely to wait for buffer idle when cleaning a manager. * If a GPU lockup has been detected, this is forced to 0. * @dev_mapping: A pointer to the struct address_space representing the @@ -544,6 +545,7 @@ struct ttm_bo_device { * Protected by the global:lru lock. */ struct list_head ddestroy; + uint32_t val_seq; /* * Protected by load / firstopen / lastclose /unload sync. diff --git a/include/drm/ttm/ttm_execbuf_util.h b/include/drm/ttm/ttm_execbuf_util.h index 535ab00407e0..26cc7f9ffa41 100644 --- a/include/drm/ttm/ttm_execbuf_util.h +++ b/include/drm/ttm/ttm_execbuf_util.h @@ -72,7 +72,6 @@ extern void ttm_eu_backoff_reservation(struct list_head *list); * function ttm_eu_reserve_buffers * * @list: thread private list of ttm_validate_buffer structs. - * @val_seq: A unique sequence number. * * Tries to reserve bos pointed to by the list entries for validation. * If the function returns 0, all buffers are marked as "unfenced", @@ -94,7 +93,7 @@ extern void ttm_eu_backoff_reservation(struct list_head *list); * has failed. */ -extern int ttm_eu_reserve_buffers(struct list_head *list, uint32_t val_seq); +extern int ttm_eu_reserve_buffers(struct list_head *list); /** * function ttm_eu_fence_buffer_objects. -- cgit v1.2.3 From eba67093f535322cb4f1c4b737319c0907a0c81d Mon Sep 17 00:00:00 2001 From: Thomas Hellstrom Date: Thu, 11 Nov 2010 09:41:57 +0100 Subject: drm/ttm: Fix up io_mem_reserve / io_mem_free calling This patch attempts to fix up shortcomings with the current calling sequences. 1) There's a fastpath where no locking occurs and only io_mem_reserved is called to obtain needed info for mapping. The fastpath is set per memory type manager. 2) If the fastpath is disabled, io_mem_reserve and io_mem_free will be exactly balanced and not called recursively for the same struct ttm_mem_reg. 3) Optionally the driver can choose to enable a per memory type manager LRU eviction mechanism that, when io_mem_reserve returns -EAGAIN will attempt to kill user-space mappings of memory in that manager to free up needed resources Signed-off-by: Thomas Hellstrom Reviewed-by: Ben Skeggs Signed-off-by: Dave Airlie --- drivers/gpu/drm/ttm/ttm_bo.c | 44 ++++++++++--- drivers/gpu/drm/ttm/ttm_bo_util.c | 129 +++++++++++++++++++++++++++++++++----- drivers/gpu/drm/ttm/ttm_bo_vm.c | 23 ++++--- include/drm/ttm/ttm_bo_api.h | 6 +- include/drm/ttm/ttm_bo_driver.h | 104 +++++++++++++++--------------- 5 files changed, 226 insertions(+), 80 deletions(-) diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c index 25e4c2a1d1d8..cf2ec562550e 100644 --- a/drivers/gpu/drm/ttm/ttm_bo.c +++ b/drivers/gpu/drm/ttm/ttm_bo.c @@ -378,8 +378,13 @@ static int ttm_bo_handle_move_mem(struct ttm_buffer_object *bo, int ret = 0; if (old_is_pci || new_is_pci || - ((mem->placement & bo->mem.placement & TTM_PL_MASK_CACHING) == 0)) - ttm_bo_unmap_virtual(bo); + ((mem->placement & bo->mem.placement & TTM_PL_MASK_CACHING) == 0)) { + ret = ttm_mem_io_lock(old_man, true); + if (unlikely(ret != 0)) + goto out_err; + ttm_bo_unmap_virtual_locked(bo); + ttm_mem_io_unlock(old_man); + } /* * Create and bind a ttm if required. @@ -466,7 +471,6 @@ static void ttm_bo_cleanup_memtype_use(struct ttm_buffer_object *bo) ttm_tt_destroy(bo->ttm); bo->ttm = NULL; } - ttm_bo_mem_put(bo, &bo->mem); atomic_set(&bo->reserved, 0); @@ -665,6 +669,7 @@ static void ttm_bo_release(struct kref *kref) struct ttm_buffer_object *bo = container_of(kref, struct ttm_buffer_object, kref); struct ttm_bo_device *bdev = bo->bdev; + struct ttm_mem_type_manager *man = &bdev->man[bo->mem.mem_type]; if (likely(bo->vm_node != NULL)) { rb_erase(&bo->vm_rb, &bdev->addr_space_rb); @@ -672,6 +677,9 @@ static void ttm_bo_release(struct kref *kref) bo->vm_node = NULL; } write_unlock(&bdev->vm_lock); + ttm_mem_io_lock(man, false); + ttm_mem_io_free_vm(bo); + ttm_mem_io_unlock(man); ttm_bo_cleanup_refs_or_queue(bo); kref_put(&bo->list_kref, ttm_bo_release_list); write_lock(&bdev->vm_lock); @@ -728,7 +736,8 @@ static int ttm_bo_evict(struct ttm_buffer_object *bo, bool interruptible, evict_mem = bo->mem; evict_mem.mm_node = NULL; - evict_mem.bus.io_reserved = false; + evict_mem.bus.io_reserved_vm = false; + evict_mem.bus.io_reserved_count = 0; placement.fpfn = 0; placement.lpfn = 0; @@ -1065,7 +1074,8 @@ int ttm_bo_move_buffer(struct ttm_buffer_object *bo, mem.num_pages = bo->num_pages; mem.size = mem.num_pages << PAGE_SHIFT; mem.page_alignment = bo->mem.page_alignment; - mem.bus.io_reserved = false; + mem.bus.io_reserved_vm = false; + mem.bus.io_reserved_count = 0; /* * Determine where to move the buffer. */ @@ -1184,6 +1194,7 @@ int ttm_bo_init(struct ttm_bo_device *bdev, INIT_LIST_HEAD(&bo->lru); INIT_LIST_HEAD(&bo->ddestroy); INIT_LIST_HEAD(&bo->swap); + INIT_LIST_HEAD(&bo->io_reserve_lru); bo->bdev = bdev; bo->glob = bdev->glob; bo->type = type; @@ -1193,7 +1204,8 @@ int ttm_bo_init(struct ttm_bo_device *bdev, bo->mem.num_pages = bo->num_pages; bo->mem.mm_node = NULL; bo->mem.page_alignment = page_alignment; - bo->mem.bus.io_reserved = false; + bo->mem.bus.io_reserved_vm = false; + bo->mem.bus.io_reserved_count = 0; bo->buffer_start = buffer_start & PAGE_MASK; bo->priv_flags = 0; bo->mem.placement = (TTM_PL_FLAG_SYSTEM | TTM_PL_FLAG_CACHED); @@ -1367,6 +1379,10 @@ int ttm_bo_init_mm(struct ttm_bo_device *bdev, unsigned type, BUG_ON(type >= TTM_NUM_MEM_TYPES); man = &bdev->man[type]; BUG_ON(man->has_type); + man->io_reserve_fastpath = true; + man->use_io_reserve_lru = false; + mutex_init(&man->io_reserve_mutex); + INIT_LIST_HEAD(&man->io_reserve_lru); ret = bdev->driver->init_mem_type(bdev, type, man); if (ret) @@ -1574,7 +1590,7 @@ bool ttm_mem_reg_is_pci(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem) return true; } -void ttm_bo_unmap_virtual(struct ttm_buffer_object *bo) +void ttm_bo_unmap_virtual_locked(struct ttm_buffer_object *bo) { struct ttm_bo_device *bdev = bo->bdev; loff_t offset = (loff_t) bo->addr_space_offset; @@ -1583,8 +1599,20 @@ void ttm_bo_unmap_virtual(struct ttm_buffer_object *bo) if (!bdev->dev_mapping) return; unmap_mapping_range(bdev->dev_mapping, offset, holelen, 1); - ttm_mem_io_free(bdev, &bo->mem); + ttm_mem_io_free_vm(bo); } + +void ttm_bo_unmap_virtual(struct ttm_buffer_object *bo) +{ + struct ttm_bo_device *bdev = bo->bdev; + struct ttm_mem_type_manager *man = &bdev->man[bo->mem.mem_type]; + + ttm_mem_io_lock(man, false); + ttm_bo_unmap_virtual_locked(bo); + ttm_mem_io_unlock(man); +} + + EXPORT_SYMBOL(ttm_bo_unmap_virtual); static void ttm_bo_vm_insert_rb(struct ttm_buffer_object *bo) diff --git a/drivers/gpu/drm/ttm/ttm_bo_util.c b/drivers/gpu/drm/ttm/ttm_bo_util.c index 4b75133d6606..a89839f83f6c 100644 --- a/drivers/gpu/drm/ttm/ttm_bo_util.c +++ b/drivers/gpu/drm/ttm/ttm_bo_util.c @@ -75,37 +75,123 @@ int ttm_bo_move_ttm(struct ttm_buffer_object *bo, } EXPORT_SYMBOL(ttm_bo_move_ttm); -int ttm_mem_io_reserve(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem) +int ttm_mem_io_lock(struct ttm_mem_type_manager *man, bool interruptible) { - int ret; + if (likely(man->io_reserve_fastpath)) + return 0; + + if (interruptible) + return mutex_lock_interruptible(&man->io_reserve_mutex); + + mutex_lock(&man->io_reserve_mutex); + return 0; +} - if (!mem->bus.io_reserved) { - mem->bus.io_reserved = true; +void ttm_mem_io_unlock(struct ttm_mem_type_manager *man) +{ + if (likely(man->io_reserve_fastpath)) + return; + + mutex_unlock(&man->io_reserve_mutex); +} + +static int ttm_mem_io_evict(struct ttm_mem_type_manager *man) +{ + struct ttm_buffer_object *bo; + + if (!man->use_io_reserve_lru || list_empty(&man->io_reserve_lru)) + return -EAGAIN; + + bo = list_first_entry(&man->io_reserve_lru, + struct ttm_buffer_object, + io_reserve_lru); + list_del_init(&bo->io_reserve_lru); + ttm_bo_unmap_virtual_locked(bo); + + return 0; +} + +static int ttm_mem_io_reserve(struct ttm_bo_device *bdev, + struct ttm_mem_reg *mem) +{ + struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type]; + int ret = 0; + + if (!bdev->driver->io_mem_reserve) + return 0; + if (likely(man->io_reserve_fastpath)) + return bdev->driver->io_mem_reserve(bdev, mem); + + if (bdev->driver->io_mem_reserve && + mem->bus.io_reserved_count++ == 0) { +retry: ret = bdev->driver->io_mem_reserve(bdev, mem); + if (ret == -EAGAIN) { + ret = ttm_mem_io_evict(man); + if (ret == 0) + goto retry; + } + } + return ret; +} + +static void ttm_mem_io_free(struct ttm_bo_device *bdev, + struct ttm_mem_reg *mem) +{ + struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type]; + + if (likely(man->io_reserve_fastpath)) + return; + + if (bdev->driver->io_mem_reserve && + --mem->bus.io_reserved_count == 0 && + bdev->driver->io_mem_free) + bdev->driver->io_mem_free(bdev, mem); + +} + +int ttm_mem_io_reserve_vm(struct ttm_buffer_object *bo) +{ + struct ttm_mem_reg *mem = &bo->mem; + int ret; + + if (!mem->bus.io_reserved_vm) { + struct ttm_mem_type_manager *man = + &bo->bdev->man[mem->mem_type]; + + ret = ttm_mem_io_reserve(bo->bdev, mem); if (unlikely(ret != 0)) return ret; + mem->bus.io_reserved_vm = true; + if (man->use_io_reserve_lru) + list_add_tail(&bo->io_reserve_lru, + &man->io_reserve_lru); } return 0; } -void ttm_mem_io_free(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem) +void ttm_mem_io_free_vm(struct ttm_buffer_object *bo) { - if (bdev->driver->io_mem_reserve) { - if (mem->bus.io_reserved) { - mem->bus.io_reserved = false; - bdev->driver->io_mem_free(bdev, mem); - } + struct ttm_mem_reg *mem = &bo->mem; + + if (mem->bus.io_reserved_vm) { + mem->bus.io_reserved_vm = false; + list_del_init(&bo->io_reserve_lru); + ttm_mem_io_free(bo->bdev, mem); } } int ttm_mem_reg_ioremap(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem, void **virtual) { + struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type]; int ret; void *addr; *virtual = NULL; + (void) ttm_mem_io_lock(man, false); ret = ttm_mem_io_reserve(bdev, mem); + ttm_mem_io_unlock(man); if (ret || !mem->bus.is_iomem) return ret; @@ -117,7 +203,9 @@ int ttm_mem_reg_ioremap(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem, else addr = ioremap_nocache(mem->bus.base + mem->bus.offset, mem->bus.size); if (!addr) { + (void) ttm_mem_io_lock(man, false); ttm_mem_io_free(bdev, mem); + ttm_mem_io_unlock(man); return -ENOMEM; } } @@ -134,7 +222,9 @@ void ttm_mem_reg_iounmap(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem, if (virtual && mem->bus.addr == NULL) iounmap(virtual); + (void) ttm_mem_io_lock(man, false); ttm_mem_io_free(bdev, mem); + ttm_mem_io_unlock(man); } static int ttm_copy_io_page(void *dst, void *src, unsigned long page) @@ -231,7 +321,7 @@ int ttm_bo_move_memcpy(struct ttm_buffer_object *bo, struct ttm_mem_type_manager *man = &bdev->man[new_mem->mem_type]; struct ttm_tt *ttm = bo->ttm; struct ttm_mem_reg *old_mem = &bo->mem; - struct ttm_mem_reg old_copy = *old_mem; + struct ttm_mem_reg old_copy; void *old_iomap; void *new_iomap; int ret; @@ -281,7 +371,7 @@ int ttm_bo_move_memcpy(struct ttm_buffer_object *bo, mb(); out2: ttm_bo_free_old_node(bo); - + old_copy = *old_mem; *old_mem = *new_mem; new_mem->mm_node = NULL; @@ -292,7 +382,7 @@ out2: } out1: - ttm_mem_reg_iounmap(bdev, new_mem, new_iomap); + ttm_mem_reg_iounmap(bdev, old_mem, new_iomap); out: ttm_mem_reg_iounmap(bdev, &old_copy, old_iomap); return ret; @@ -341,6 +431,7 @@ static int ttm_buffer_object_transfer(struct ttm_buffer_object *bo, INIT_LIST_HEAD(&fbo->ddestroy); INIT_LIST_HEAD(&fbo->lru); INIT_LIST_HEAD(&fbo->swap); + INIT_LIST_HEAD(&fbo->io_reserve_lru); fbo->vm_node = NULL; atomic_set(&fbo->cpu_writers, 0); @@ -452,6 +543,8 @@ int ttm_bo_kmap(struct ttm_buffer_object *bo, unsigned long start_page, unsigned long num_pages, struct ttm_bo_kmap_obj *map) { + struct ttm_mem_type_manager *man = + &bo->bdev->man[bo->mem.mem_type]; unsigned long offset, size; int ret; @@ -466,7 +559,9 @@ int ttm_bo_kmap(struct ttm_buffer_object *bo, if (num_pages > 1 && !DRM_SUSER(DRM_CURPROC)) return -EPERM; #endif + (void) ttm_mem_io_lock(man, false); ret = ttm_mem_io_reserve(bo->bdev, &bo->mem); + ttm_mem_io_unlock(man); if (ret) return ret; if (!bo->mem.bus.is_iomem) { @@ -481,12 +576,15 @@ EXPORT_SYMBOL(ttm_bo_kmap); void ttm_bo_kunmap(struct ttm_bo_kmap_obj *map) { + struct ttm_buffer_object *bo = map->bo; + struct ttm_mem_type_manager *man = + &bo->bdev->man[bo->mem.mem_type]; + if (!map->virtual) return; switch (map->bo_kmap_type) { case ttm_bo_map_iomap: iounmap(map->virtual); - ttm_mem_io_free(map->bo->bdev, &map->bo->mem); break; case ttm_bo_map_vmap: vunmap(map->virtual); @@ -499,6 +597,9 @@ void ttm_bo_kunmap(struct ttm_bo_kmap_obj *map) default: BUG(); } + (void) ttm_mem_io_lock(man, false); + ttm_mem_io_free(map->bo->bdev, &map->bo->mem); + ttm_mem_io_unlock(man); map->virtual = NULL; map->page = NULL; } diff --git a/drivers/gpu/drm/ttm/ttm_bo_vm.c b/drivers/gpu/drm/ttm/ttm_bo_vm.c index 8dd446cb778e..221b924acebe 100644 --- a/drivers/gpu/drm/ttm/ttm_bo_vm.c +++ b/drivers/gpu/drm/ttm/ttm_bo_vm.c @@ -83,6 +83,8 @@ static int ttm_bo_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf) int i; unsigned long address = (unsigned long)vmf->virtual_address; int retval = VM_FAULT_NOPAGE; + struct ttm_mem_type_manager *man = + &bdev->man[bo->mem.mem_type]; /* * Work around locking order reversal in fault / nopfn @@ -130,12 +132,16 @@ static int ttm_bo_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf) } else spin_unlock(&bdev->fence_lock); - - ret = ttm_mem_io_reserve(bdev, &bo->mem); - if (ret) { - retval = VM_FAULT_SIGBUS; + ret = ttm_mem_io_lock(man, true); + if (unlikely(ret != 0)) { + retval = VM_FAULT_NOPAGE; goto out_unlock; } + ret = ttm_mem_io_reserve_vm(bo); + if (unlikely(ret != 0)) { + retval = VM_FAULT_SIGBUS; + goto out_io_unlock; + } page_offset = ((address - vma->vm_start) >> PAGE_SHIFT) + bo->vm_node->start - vma->vm_pgoff; @@ -144,7 +150,7 @@ static int ttm_bo_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf) if (unlikely(page_offset >= bo->num_pages)) { retval = VM_FAULT_SIGBUS; - goto out_unlock; + goto out_io_unlock; } /* @@ -182,7 +188,7 @@ static int ttm_bo_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf) page = ttm_tt_get_page(ttm, page_offset); if (unlikely(!page && i == 0)) { retval = VM_FAULT_OOM; - goto out_unlock; + goto out_io_unlock; } else if (unlikely(!page)) { break; } @@ -200,14 +206,15 @@ static int ttm_bo_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf) else if (unlikely(ret != 0)) { retval = (ret == -ENOMEM) ? VM_FAULT_OOM : VM_FAULT_SIGBUS; - goto out_unlock; + goto out_io_unlock; } address += PAGE_SIZE; if (unlikely(++page_offset >= page_last)) break; } - +out_io_unlock: + ttm_mem_io_unlock(man); out_unlock: ttm_bo_unreserve(bo); return retval; diff --git a/include/drm/ttm/ttm_bo_api.h b/include/drm/ttm/ttm_bo_api.h index edacd483c59c..50852aad260a 100644 --- a/include/drm/ttm/ttm_bo_api.h +++ b/include/drm/ttm/ttm_bo_api.h @@ -74,6 +74,8 @@ struct ttm_placement { * @is_iomem: is this io memory ? * @size: size in byte * @offset: offset from the base address + * @io_reserved_vm: The VM system has a refcount in @io_reserved_count + * @io_reserved_count: Refcounting the numbers of callers to ttm_mem_io_reserve * * Structure indicating the bus placement of an object. */ @@ -83,7 +85,8 @@ struct ttm_bus_placement { unsigned long size; unsigned long offset; bool is_iomem; - bool io_reserved; + bool io_reserved_vm; + uint64_t io_reserved_count; }; @@ -235,6 +238,7 @@ struct ttm_buffer_object { struct list_head lru; struct list_head ddestroy; struct list_head swap; + struct list_head io_reserve_lru; uint32_t val_seq; bool seq_valid; diff --git a/include/drm/ttm/ttm_bo_driver.h b/include/drm/ttm/ttm_bo_driver.h index e3b2e245db1b..1da8af6ac884 100644 --- a/include/drm/ttm/ttm_bo_driver.h +++ b/include/drm/ttm/ttm_bo_driver.h @@ -179,30 +179,6 @@ struct ttm_tt { #define TTM_MEMTYPE_FLAG_MAPPABLE (1 << 1) /* Memory mappable */ #define TTM_MEMTYPE_FLAG_CMA (1 << 3) /* Can't map aperture */ -/** - * struct ttm_mem_type_manager - * - * @has_type: The memory type has been initialized. - * @use_type: The memory type is enabled. - * @flags: TTM_MEMTYPE_XX flags identifying the traits of the memory - * managed by this memory type. - * @gpu_offset: If used, the GPU offset of the first managed page of - * fixed memory or the first managed location in an aperture. - * @size: Size of the managed region. - * @available_caching: A mask of available caching types, TTM_PL_FLAG_XX, - * as defined in ttm_placement_common.h - * @default_caching: The default caching policy used for a buffer object - * placed in this memory type if the user doesn't provide one. - * @manager: The range manager used for this memory type. FIXME: If the aperture - * has a page size different from the underlying system, the granularity - * of this manager should take care of this. But the range allocating code - * in ttm_bo.c needs to be modified for this. - * @lru: The lru list for this memory type. - * - * This structure is used to identify and manage memory types for a device. - * It's set up by the ttm_bo_driver::init_mem_type method. - */ - struct ttm_mem_type_manager; struct ttm_mem_type_manager_func { @@ -287,6 +263,36 @@ struct ttm_mem_type_manager_func { void (*debug)(struct ttm_mem_type_manager *man, const char *prefix); }; +/** + * struct ttm_mem_type_manager + * + * @has_type: The memory type has been initialized. + * @use_type: The memory type is enabled. + * @flags: TTM_MEMTYPE_XX flags identifying the traits of the memory + * managed by this memory type. + * @gpu_offset: If used, the GPU offset of the first managed page of + * fixed memory or the first managed location in an aperture. + * @size: Size of the managed region. + * @available_caching: A mask of available caching types, TTM_PL_FLAG_XX, + * as defined in ttm_placement_common.h + * @default_caching: The default caching policy used for a buffer object + * placed in this memory type if the user doesn't provide one. + * @func: structure pointer implementing the range manager. See above + * @priv: Driver private closure for @func. + * @io_reserve_mutex: Mutex optionally protecting shared io_reserve structures + * @use_io_reserve_lru: Use an lru list to try to unreserve io_mem_regions + * reserved by the TTM vm system. + * @io_reserve_lru: Optional lru list for unreserving io mem regions. + * @io_reserve_fastpath: Only use bdev::driver::io_mem_reserve to obtain + * static information. bdev::driver::io_mem_free is never used. + * @lru: The lru list for this memory type. + * + * This structure is used to identify and manage memory types for a device. + * It's set up by the ttm_bo_driver::init_mem_type method. + */ + + + struct ttm_mem_type_manager { struct ttm_bo_device *bdev; @@ -303,6 +309,15 @@ struct ttm_mem_type_manager { uint32_t default_caching; const struct ttm_mem_type_manager_func *func; void *priv; + struct mutex io_reserve_mutex; + bool use_io_reserve_lru; + bool io_reserve_fastpath; + + /* + * Protected by @io_reserve_mutex: + */ + + struct list_head io_reserve_lru; /* * Protected by the global->lru_lock. @@ -758,31 +773,6 @@ extern void ttm_bo_mem_put_locked(struct ttm_buffer_object *bo, extern int ttm_bo_wait_cpu(struct ttm_buffer_object *bo, bool no_wait); -/** - * ttm_bo_pci_offset - Get the PCI offset for the buffer object memory. - * - * @bo Pointer to a struct ttm_buffer_object. - * @bus_base On return the base of the PCI region - * @bus_offset On return the byte offset into the PCI region - * @bus_size On return the byte size of the buffer object or zero if - * the buffer object memory is not accessible through a PCI region. - * - * Returns: - * -EINVAL if the buffer object is currently not mappable. - * 0 otherwise. - */ - -extern int ttm_bo_pci_offset(struct ttm_bo_device *bdev, - struct ttm_mem_reg *mem, - unsigned long *bus_base, - unsigned long *bus_offset, - unsigned long *bus_size); - -extern int ttm_mem_io_reserve(struct ttm_bo_device *bdev, - struct ttm_mem_reg *mem); -extern void ttm_mem_io_free(struct ttm_bo_device *bdev, - struct ttm_mem_reg *mem); - extern void ttm_bo_global_release(struct drm_global_reference *ref); extern int ttm_bo_global_init(struct drm_global_reference *ref); @@ -814,6 +804,22 @@ extern int ttm_bo_device_init(struct ttm_bo_device *bdev, */ extern void ttm_bo_unmap_virtual(struct ttm_buffer_object *bo); +/** + * ttm_bo_unmap_virtual + * + * @bo: tear down the virtual mappings for this BO + * + * The caller must take ttm_mem_io_lock before calling this function. + */ +extern void ttm_bo_unmap_virtual_locked(struct ttm_buffer_object *bo); + +extern int ttm_mem_io_reserve_vm(struct ttm_buffer_object *bo); +extern void ttm_mem_io_free_vm(struct ttm_buffer_object *bo); +extern int ttm_mem_io_lock(struct ttm_mem_type_manager *man, + bool interruptible); +extern void ttm_mem_io_unlock(struct ttm_mem_type_manager *man); + + /** * ttm_bo_reserve: * -- cgit v1.2.3 From 147666fb3b93b8c484f562da33a37f886ddff768 Mon Sep 17 00:00:00 2001 From: Thomas Hellstrom Date: Wed, 17 Nov 2010 12:38:32 +0000 Subject: drm/radeon: Use the ttm execbuf utilities Rather than re-implementing in the Radeon driver, Use the execbuf / cs / pushbuf utilities that comes with TTM. This comes with an even greater benefit now that many spinlocks have been optimized away... Signed-off-by: Thomas Hellstrom Signed-off-by: Dave Airlie --- drivers/gpu/drm/radeon/radeon.h | 4 +-- drivers/gpu/drm/radeon/radeon_cs.c | 17 ++++++----- drivers/gpu/drm/radeon/radeon_object.c | 55 +++------------------------------- drivers/gpu/drm/radeon/radeon_object.h | 3 -- 4 files changed, 16 insertions(+), 63 deletions(-) diff --git a/drivers/gpu/drm/radeon/radeon.h b/drivers/gpu/drm/radeon/radeon.h index 3a7095743d44..b1e073b7381f 100644 --- a/drivers/gpu/drm/radeon/radeon.h +++ b/drivers/gpu/drm/radeon/radeon.h @@ -69,6 +69,7 @@ #include #include #include +#include #include "radeon_family.h" #include "radeon_mode.h" @@ -259,13 +260,12 @@ struct radeon_bo { }; struct radeon_bo_list { - struct list_head list; + struct ttm_validate_buffer tv; struct radeon_bo *bo; uint64_t gpu_offset; unsigned rdomain; unsigned wdomain; u32 tiling_flags; - bool reserved; }; /* diff --git a/drivers/gpu/drm/radeon/radeon_cs.c b/drivers/gpu/drm/radeon/radeon_cs.c index 6d64a2705f12..35b5eb8fbe2a 100644 --- a/drivers/gpu/drm/radeon/radeon_cs.c +++ b/drivers/gpu/drm/radeon/radeon_cs.c @@ -77,13 +77,13 @@ int radeon_cs_parser_relocs(struct radeon_cs_parser *p) p->relocs_ptr[i] = &p->relocs[i]; p->relocs[i].robj = p->relocs[i].gobj->driver_private; p->relocs[i].lobj.bo = p->relocs[i].robj; - p->relocs[i].lobj.rdomain = r->read_domains; p->relocs[i].lobj.wdomain = r->write_domain; + p->relocs[i].lobj.rdomain = r->read_domains; + p->relocs[i].lobj.tv.bo = &p->relocs[i].robj->tbo; p->relocs[i].handle = r->handle; p->relocs[i].flags = r->flags; - INIT_LIST_HEAD(&p->relocs[i].lobj.list); radeon_bo_list_add_object(&p->relocs[i].lobj, - &p->validated); + &p->validated); } } return radeon_bo_list_validate(&p->validated); @@ -189,10 +189,13 @@ static void radeon_cs_parser_fini(struct radeon_cs_parser *parser, int error) { unsigned i; - if (!error && parser->ib) { - radeon_bo_list_fence(&parser->validated, parser->ib->fence); - } - radeon_bo_list_unreserve(&parser->validated); + + if (!error && parser->ib) + ttm_eu_fence_buffer_objects(&parser->validated, + parser->ib->fence); + else + ttm_eu_backoff_reservation(&parser->validated); + if (parser->relocs != NULL) { for (i = 0; i < parser->nrelocs; i++) { if (parser->relocs[i].gobj) diff --git a/drivers/gpu/drm/radeon/radeon_object.c b/drivers/gpu/drm/radeon/radeon_object.c index e939cb6a91cc..a8594d289bcf 100644 --- a/drivers/gpu/drm/radeon/radeon_object.c +++ b/drivers/gpu/drm/radeon/radeon_object.c @@ -293,34 +293,9 @@ void radeon_bo_list_add_object(struct radeon_bo_list *lobj, struct list_head *head) { if (lobj->wdomain) { - list_add(&lobj->list, head); + list_add(&lobj->tv.head, head); } else { - list_add_tail(&lobj->list, head); - } -} - -int radeon_bo_list_reserve(struct list_head *head) -{ - struct radeon_bo_list *lobj; - int r; - - list_for_each_entry(lobj, head, list){ - r = radeon_bo_reserve(lobj->bo, false); - if (unlikely(r != 0)) - return r; - lobj->reserved = true; - } - return 0; -} - -void radeon_bo_list_unreserve(struct list_head *head) -{ - struct radeon_bo_list *lobj; - - list_for_each_entry(lobj, head, list) { - /* only unreserve object we successfully reserved */ - if (lobj->reserved && radeon_bo_is_reserved(lobj->bo)) - radeon_bo_unreserve(lobj->bo); + list_add_tail(&lobj->tv.head, head); } } @@ -331,14 +306,11 @@ int radeon_bo_list_validate(struct list_head *head) u32 domain; int r; - list_for_each_entry(lobj, head, list) { - lobj->reserved = false; - } - r = radeon_bo_list_reserve(head); + r = ttm_eu_reserve_buffers(head); if (unlikely(r != 0)) { return r; } - list_for_each_entry(lobj, head, list) { + list_for_each_entry(lobj, head, tv.head) { bo = lobj->bo; if (!bo->pin_count) { domain = lobj->wdomain ? lobj->wdomain : lobj->rdomain; @@ -361,25 +333,6 @@ int radeon_bo_list_validate(struct list_head *head) return 0; } -void radeon_bo_list_fence(struct list_head *head, void *fence) -{ - struct radeon_bo_list *lobj; - struct radeon_bo *bo; - struct radeon_fence *old_fence = NULL; - - list_for_each_entry(lobj, head, list) { - bo = lobj->bo; - spin_lock(&bo->tbo.bdev->fence_lock); - old_fence = (struct radeon_fence *)bo->tbo.sync_obj; - bo->tbo.sync_obj = radeon_fence_ref(fence); - bo->tbo.sync_obj_arg = NULL; - spin_unlock(&bo->tbo.bdev->fence_lock); - if (old_fence) { - radeon_fence_unref(&old_fence); - } - } -} - int radeon_bo_fbdev_mmap(struct radeon_bo *bo, struct vm_area_struct *vma) { diff --git a/drivers/gpu/drm/radeon/radeon_object.h b/drivers/gpu/drm/radeon/radeon_object.h index fd536751f865..22d4c237dea5 100644 --- a/drivers/gpu/drm/radeon/radeon_object.h +++ b/drivers/gpu/drm/radeon/radeon_object.h @@ -152,10 +152,7 @@ extern int radeon_bo_init(struct radeon_device *rdev); extern void radeon_bo_fini(struct radeon_device *rdev); extern void radeon_bo_list_add_object(struct radeon_bo_list *lobj, struct list_head *head); -extern int radeon_bo_list_reserve(struct list_head *head); -extern void radeon_bo_list_unreserve(struct list_head *head); extern int radeon_bo_list_validate(struct list_head *head); -extern void radeon_bo_list_fence(struct list_head *head, void *fence); extern int radeon_bo_fbdev_mmap(struct radeon_bo *bo, struct vm_area_struct *vma); extern int radeon_bo_set_tiling_flags(struct radeon_bo *bo, -- cgit v1.2.3 From c724e8a9407683a8a2ee8eb00b972badf237bbe1 Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Mon, 22 Nov 2010 08:07:02 +0000 Subject: drm/i915: Capture pinned buffers on error The pinned buffers are useful for diagnosing errors in setting up state for the chipset, which may not necessarily be 'active' at the time of the error, e.g. the cursor buffer object. Signed-off-by: Chris Wilson --- drivers/gpu/drm/i915/i915_debugfs.c | 63 +++++++++++++++++---------- drivers/gpu/drm/i915/i915_drv.h | 4 +- drivers/gpu/drm/i915/i915_irq.c | 87 ++++++++++++++++++++++++------------- 3 files changed, 98 insertions(+), 56 deletions(-) diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c index 4c8fae9baaad..24a88ac63212 100644 --- a/drivers/gpu/drm/i915/i915_debugfs.c +++ b/drivers/gpu/drm/i915/i915_debugfs.c @@ -609,6 +609,36 @@ static const char *purgeable_flag(int purgeable) return purgeable ? " purgeable" : ""; } +static void print_error_buffers(struct seq_file *m, + const char *name, + struct drm_i915_error_buffer *err, + int count) +{ + seq_printf(m, "%s [%d]:\n", name, count); + + while (count--) { + seq_printf(m, " %08x %8zd %04x %04x %08x%s%s%s%s%s", + err->gtt_offset, + err->size, + err->read_domains, + err->write_domain, + err->seqno, + pin_flag(err->pinned), + tiling_flag(err->tiling), + dirty_flag(err->dirty), + purgeable_flag(err->purgeable), + ring_str(err->ring)); + + if (err->name) + seq_printf(m, " (name: %d)", err->name); + if (err->fence_reg != I915_FENCE_REG_NONE) + seq_printf(m, " (fence: %d)", err->fence_reg); + + seq_printf(m, "\n"); + err++; + } +} + static int i915_error_state(struct seq_file *m, void *unused) { struct drm_info_node *node = (struct drm_info_node *) m->private; @@ -658,30 +688,15 @@ static int i915_error_state(struct seq_file *m, void *unused) seq_printf(m, " INSTPM: 0x%08x\n", error->instpm); seq_printf(m, " seqno: 0x%08x\n", error->seqno); - if (error->active_bo_count) { - seq_printf(m, "Buffers [%d]:\n", error->active_bo_count); - - for (i = 0; i < error->active_bo_count; i++) { - seq_printf(m, " %08x %8zd %08x %08x %08x%s%s%s%s %s", - error->active_bo[i].gtt_offset, - error->active_bo[i].size, - error->active_bo[i].read_domains, - error->active_bo[i].write_domain, - error->active_bo[i].seqno, - pin_flag(error->active_bo[i].pinned), - tiling_flag(error->active_bo[i].tiling), - dirty_flag(error->active_bo[i].dirty), - purgeable_flag(error->active_bo[i].purgeable), - ring_str(error->active_bo[i].ring)); - - if (error->active_bo[i].name) - seq_printf(m, " (name: %d)", error->active_bo[i].name); - if (error->active_bo[i].fence_reg != I915_FENCE_REG_NONE) - seq_printf(m, " (fence: %d)", error->active_bo[i].fence_reg); - - seq_printf(m, "\n"); - } - } + if (error->active_bo) + print_error_buffers(m, "Active", + error->active_bo, + error->active_bo_count); + + if (error->pinned_bo) + print_error_buffers(m, "Pinned", + error->pinned_bo, + error->pinned_bo_count); for (i = 0; i < ARRAY_SIZE(error->batchbuffer); i++) { if (error->batchbuffer[i]) { diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index 73a41f7ab8c6..826c7237409b 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h @@ -184,8 +184,8 @@ struct drm_i915_error_state { u32 dirty:1; u32 purgeable:1; u32 ring:4; - } *active_bo; - u32 active_bo_count; + } *active_bo, *pinned_bo; + u32 active_bo_count, pinned_bo_count; struct intel_overlay_error_state *overlay; }; diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c index ef3503733ebb..bbcd5da89ba5 100644 --- a/drivers/gpu/drm/i915/i915_irq.c +++ b/drivers/gpu/drm/i915/i915_irq.c @@ -552,6 +552,40 @@ i915_ringbuffer_last_batch(struct drm_device *dev, return bbaddr; } +static u32 capture_bo_list(struct drm_i915_error_buffer *err, + int count, + struct list_head *head) +{ + struct drm_i915_gem_object *obj; + int i = 0; + + list_for_each_entry(obj, head, mm_list) { + err->size = obj->base.size; + err->name = obj->base.name; + err->seqno = obj->last_rendering_seqno; + err->gtt_offset = obj->gtt_offset; + err->read_domains = obj->base.read_domains; + err->write_domain = obj->base.write_domain; + err->fence_reg = obj->fence_reg; + err->pinned = 0; + if (obj->pin_count > 0) + err->pinned = 1; + if (obj->user_pin_count > 0) + err->pinned = -1; + err->tiling = obj->tiling_mode; + err->dirty = obj->dirty; + err->purgeable = obj->madv != I915_MADV_WILLNEED; + err->ring = obj->ring->id; + + if (++i == count) + break; + + err++; + } + + return i; +} + /** * i915_capture_error_state - capture an error record for later analysis * @dev: drm device @@ -700,42 +734,35 @@ static void i915_capture_error_state(struct drm_device *dev) error->ringbuffer = i915_error_object_create(dev, dev_priv->render_ring.gem_object); - /* Record buffers on the active list. */ + /* Record buffers on the active and pinned lists. */ error->active_bo = NULL; - error->active_bo_count = 0; + error->pinned_bo = NULL; - if (count) + error->active_bo_count = count; + list_for_each_entry(obj_priv, &dev_priv->mm.pinned_list, mm_list) + count++; + error->pinned_bo_count = count - error->active_bo_count; + + if (count) { error->active_bo = kmalloc(sizeof(*error->active_bo)*count, GFP_ATOMIC); - - if (error->active_bo) { - int i = 0; - list_for_each_entry(obj_priv, &dev_priv->mm.active_list, mm_list) { - struct drm_gem_object *obj = &obj_priv->base; - - error->active_bo[i].size = obj->size; - error->active_bo[i].name = obj->name; - error->active_bo[i].seqno = obj_priv->last_rendering_seqno; - error->active_bo[i].gtt_offset = obj_priv->gtt_offset; - error->active_bo[i].read_domains = obj->read_domains; - error->active_bo[i].write_domain = obj->write_domain; - error->active_bo[i].fence_reg = obj_priv->fence_reg; - error->active_bo[i].pinned = 0; - if (obj_priv->pin_count > 0) - error->active_bo[i].pinned = 1; - if (obj_priv->user_pin_count > 0) - error->active_bo[i].pinned = -1; - error->active_bo[i].tiling = obj_priv->tiling_mode; - error->active_bo[i].dirty = obj_priv->dirty; - error->active_bo[i].purgeable = obj_priv->madv != I915_MADV_WILLNEED; - error->active_bo[i].ring = obj_priv->ring->id; - - if (++i == count) - break; - } - error->active_bo_count = i; + if (error->active_bo) + error->pinned_bo = + error->active_bo + error->active_bo_count; } + if (error->active_bo) + error->active_bo_count = + capture_bo_list(error->active_bo, + error->active_bo_count, + &dev_priv->mm.active_list); + + if (error->pinned_bo) + error->pinned_bo_count = + capture_bo_list(error->pinned_bo, + error->pinned_bo_count, + &dev_priv->mm.pinned_list); + do_gettimeofday(&error->time); error->overlay = intel_overlay_capture_error_state(dev); -- cgit v1.2.3 From c4a1d9e4dc5d5313cfec2cc0c9d630efe8a6f287 Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Sun, 21 Nov 2010 13:12:35 +0000 Subject: drm/i915: Capture interesting display registers on error When trying to diagnose mysterious errors on resume, capture the display register contents as well. Signed-off-by: Chris Wilson --- drivers/gpu/drm/i915/i915_debugfs.c | 3 + drivers/gpu/drm/i915/i915_drv.h | 8 +++ drivers/gpu/drm/i915/i915_irq.c | 1 + drivers/gpu/drm/i915/i915_reg.h | 6 +- drivers/gpu/drm/i915/intel_display.c | 110 +++++++++++++++++++++++++++++++++++ 5 files changed, 127 insertions(+), 1 deletion(-) diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c index 24a88ac63212..421b8414b577 100644 --- a/drivers/gpu/drm/i915/i915_debugfs.c +++ b/drivers/gpu/drm/i915/i915_debugfs.c @@ -729,6 +729,9 @@ static int i915_error_state(struct seq_file *m, void *unused) if (error->overlay) intel_overlay_print_error_state(m, error->overlay); + if (error->display) + intel_display_print_error_state(m, dev, error->display); + out: spin_unlock_irqrestore(&dev_priv->error_lock, flags); diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index 826c7237409b..4c20ad92c0f3 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h @@ -140,6 +140,8 @@ struct sdvo_device_mapping { u8 ddc_pin; }; +struct intel_display_error_state; + struct drm_i915_error_state { u32 eir; u32 pgtbl_er; @@ -187,6 +189,7 @@ struct drm_i915_error_state { } *active_bo, *pinned_bo; u32 active_bo_count, pinned_bo_count; struct intel_overlay_error_state *overlay; + struct intel_display_error_state *display; }; struct drm_i915_display_funcs { @@ -1223,6 +1226,11 @@ extern int intel_trans_dp_port_sel (struct drm_crtc *crtc); #ifdef CONFIG_DEBUG_FS extern struct intel_overlay_error_state *intel_overlay_capture_error_state(struct drm_device *dev); extern void intel_overlay_print_error_state(struct seq_file *m, struct intel_overlay_error_state *error); + +extern struct intel_display_error_state *intel_display_capture_error_state(struct drm_device *dev); +extern void intel_display_print_error_state(struct seq_file *m, + struct drm_device *dev, + struct intel_display_error_state *error); #endif /** diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c index bbcd5da89ba5..0b6052abedd1 100644 --- a/drivers/gpu/drm/i915/i915_irq.c +++ b/drivers/gpu/drm/i915/i915_irq.c @@ -766,6 +766,7 @@ static void i915_capture_error_state(struct drm_device *dev) do_gettimeofday(&error->time); error->overlay = intel_overlay_capture_error_state(dev); + error->display = intel_display_capture_error_state(dev); spin_lock_irqsave(&dev_priv->error_lock, flags); if (dev_priv->first_error == NULL) { diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h index 886c0e072490..ec2a8b07ba5b 100644 --- a/drivers/gpu/drm/i915/i915_reg.h +++ b/drivers/gpu/drm/i915/i915_reg.h @@ -1193,7 +1193,6 @@ #define VTOTAL(pipe) _PIPE(pipe, VTOTAL_A, VTOTAL_B) #define VBLANK(pipe) _PIPE(pipe, VBLANK_A, VBLANK_B) #define VSYNC(pipe) _PIPE(pipe, VSYNC_A, VSYNC_B) -#define PIPESRC(pipe) _PIPE(pipe, PIPEASRC, PIPEBSRC) #define BCLRPAT(pipe) _PIPE(pipe, BCLRPAT_A, BCLRPAT_B) /* VGA port control */ @@ -2207,6 +2206,7 @@ #define PIPE_6BPC (2 << 5) #define PIPE_12BPC (3 << 5) +#define PIPESRC(pipe) _PIPE(pipe, PIPEASRC, PIPEBSRC) #define PIPECONF(pipe) _PIPE(pipe, PIPEACONF, PIPEBCONF) #define PIPEDSL(pipe) _PIPE(pipe, PIPEADSL, PIPEBDSL) @@ -2376,6 +2376,10 @@ #define CURBBASE 0x700c4 #define CURBPOS 0x700c8 +#define CURCNTR(pipe) _PIPE(pipe, CURACNTR, CURBCNTR) +#define CURBASE(pipe) _PIPE(pipe, CURABASE, CURBBASE) +#define CURPOS(pipe) _PIPE(pipe, CURAPOS, CURBPOS) + /* Display A control */ #define DSPACNTR 0x70180 #define DISPLAY_PLANE_ENABLE (1<<31) diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c index 3fa5aaa941d2..d4bc443f43fc 100644 --- a/drivers/gpu/drm/i915/intel_display.c +++ b/drivers/gpu/drm/i915/intel_display.c @@ -6269,3 +6269,113 @@ int intel_modeset_vga_set_state(struct drm_device *dev, bool state) pci_write_config_word(dev_priv->bridge_dev, INTEL_GMCH_CTRL, gmch_ctrl); return 0; } + +#ifdef CONFIG_DEBUG_FS +#include + +struct intel_display_error_state { + struct intel_cursor_error_state { + u32 control; + u32 position; + u32 base; + u32 size; + } cursor[2]; + + struct intel_pipe_error_state { + u32 conf; + u32 source; + + u32 htotal; + u32 hblank; + u32 hsync; + u32 vtotal; + u32 vblank; + u32 vsync; + } pipe[2]; + + struct intel_plane_error_state { + u32 control; + u32 stride; + u32 size; + u32 pos; + u32 addr; + u32 surface; + u32 tile_offset; + } plane[2]; +}; + +struct intel_display_error_state * +intel_display_capture_error_state(struct drm_device *dev) +{ + drm_i915_private_t *dev_priv = dev->dev_private; + struct intel_display_error_state *error; + int i; + + error = kmalloc(sizeof(*error), GFP_ATOMIC); + if (error == NULL) + return NULL; + + for (i = 0; i < 2; i++) { + error->cursor[i].control = I915_READ(CURCNTR(i)); + error->cursor[i].position = I915_READ(CURPOS(i)); + error->cursor[i].base = I915_READ(CURBASE(i)); + + error->plane[i].control = I915_READ(DSPCNTR(i)); + error->plane[i].stride = I915_READ(DSPSTRIDE(i)); + error->plane[i].size = I915_READ(DSPSIZE(i)); + error->plane[i].pos= I915_READ(DSPPOS(i)); + error->plane[i].addr = I915_READ(DSPADDR(i)); + if (INTEL_INFO(dev)->gen >= 4) { + error->plane[i].surface = I915_READ(DSPSURF(i)); + error->plane[i].tile_offset = I915_READ(DSPTILEOFF(i)); + } + + error->pipe[i].conf = I915_READ(PIPECONF(i)); + error->pipe[i].source = I915_READ(PIPESRC(i)); + error->pipe[i].htotal = I915_READ(HTOTAL(i)); + error->pipe[i].hblank = I915_READ(HBLANK(i)); + error->pipe[i].hsync = I915_READ(HSYNC(i)); + error->pipe[i].vtotal = I915_READ(VTOTAL(i)); + error->pipe[i].vblank = I915_READ(VBLANK(i)); + error->pipe[i].vsync = I915_READ(VSYNC(i)); + } + + return error; +} + +void +intel_display_print_error_state(struct seq_file *m, + struct drm_device *dev, + struct intel_display_error_state *error) +{ + int i; + + for (i = 0; i < 2; i++) { + seq_printf(m, "Pipe [%d]:\n", i); + seq_printf(m, " CONF: %08x\n", error->pipe[i].conf); + seq_printf(m, " SRC: %08x\n", error->pipe[i].source); + seq_printf(m, " HTOTAL: %08x\n", error->pipe[i].htotal); + seq_printf(m, " HBLANK: %08x\n", error->pipe[i].hblank); + seq_printf(m, " HSYNC: %08x\n", error->pipe[i].hsync); + seq_printf(m, " VTOTAL: %08x\n", error->pipe[i].vtotal); + seq_printf(m, " VBLANK: %08x\n", error->pipe[i].vblank); + seq_printf(m, " VSYNC: %08x\n", error->pipe[i].vsync); + + seq_printf(m, "Plane [%d]:\n", i); + seq_printf(m, " CNTR: %08x\n", error->plane[i].control); + seq_printf(m, " STRIDE: %08x\n", error->plane[i].stride); + seq_printf(m, " SIZE: %08x\n", error->plane[i].size); + seq_printf(m, " POS: %08x\n", error->plane[i].pos); + seq_printf(m, " ADDR: %08x\n", error->plane[i].addr); + if (INTEL_INFO(dev)->gen >= 4) { + seq_printf(m, " SURF: %08x\n", error->plane[i].surface); + seq_printf(m, " TILEOFF: %08x\n", error->plane[i].tile_offset); + } + + seq_printf(m, "Cursor [%d]:\n", i); + seq_printf(m, " CNTR: %08x\n", error->cursor[i].control); + seq_printf(m, " POS: %08x\n", error->cursor[i].position); + seq_printf(m, " BASE: %08x\n", error->cursor[i].base); + } +} +#endif -- cgit v1.2.3 From df9c2042858e85ab46731c13e708a5b0799db848 Mon Sep 17 00:00:00 2001 From: Eric Anholt Date: Thu, 18 Nov 2010 09:31:12 +0800 Subject: drm/i915: Correct a comment about the use of the workqueue. It isn't used for the hangcheck, which does its work right from the timer trigger, but hangcheck can lead to error state recording, which is run off of the workqueue. Signed-off-by: Eric Anholt Signed-off-by: Chris Wilson --- drivers/gpu/drm/i915/i915_dma.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c index 4cd04917868d..cf4e06a9417a 100644 --- a/drivers/gpu/drm/i915/i915_dma.c +++ b/drivers/gpu/drm/i915/i915_dma.c @@ -1962,7 +1962,7 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags) * bo. * * It is also used for periodic low-priority events, such as - * idle-timers and hangcheck. + * idle-timers and recording error state. * * All tasks on the workqueue are expected to acquire the dev mutex * so there is no point in running more than one instance of the -- cgit v1.2.3 From 75a6898ffd20a13aba8d03f005eb4ab940134fcb Mon Sep 17 00:00:00 2001 From: Eric Anholt Date: Thu, 18 Nov 2010 09:31:13 +0800 Subject: drm/i915: Also reinit the BSD and BLT rings after a GPU reset. Signed-off-by: Eric Anholt Signed-off-by: Chris Wilson --- drivers/gpu/drm/i915/i915_drv.c | 9 +++++++-- 1 file changed, 7 insertions(+), 2 deletions(-) diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c index f0e6bd06fc0e..99b574050fde 100644 --- a/drivers/gpu/drm/i915/i915_drv.c +++ b/drivers/gpu/drm/i915/i915_drv.c @@ -472,9 +472,14 @@ int i915_reset(struct drm_device *dev, u8 flags) */ if (drm_core_check_feature(dev, DRIVER_MODESET) || !dev_priv->mm.suspended) { - struct intel_ring_buffer *ring = &dev_priv->render_ring; dev_priv->mm.suspended = 0; - ring->init(ring); + + dev_priv->render_ring.init(&dev_priv->render_ring); + if (HAS_BSD(dev)) + dev_priv->bsd_ring.init(&dev_priv->bsd_ring); + if (HAS_BLT(dev)) + dev_priv->blt_ring.init(&dev_priv->blt_ring); + mutex_unlock(&dev->struct_mutex); drm_irq_uninstall(dev); drm_irq_install(dev); -- cgit v1.2.3 From cff458c21063de960bde0e39770a0f4cd0477d95 Mon Sep 17 00:00:00 2001 From: Eric Anholt Date: Thu, 18 Nov 2010 09:31:14 +0800 Subject: drm/i915: Add support for GPU reset on gen6. This has proven sufficient to recover from a hang of the GPU using the gem_bad_blit test while at the KMS console then starting X. When attempting the same during an X session, the timer doesn't appear to trigger. Signed-off-by: Eric Anholt Signed-off-by: Chris Wilson --- drivers/gpu/drm/i915/i915_drv.c | 11 +++++++++++ drivers/gpu/drm/i915/i915_reg.h | 6 ++++++ 2 files changed, 17 insertions(+) diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c index 99b574050fde..8c5541950b8d 100644 --- a/drivers/gpu/drm/i915/i915_drv.c +++ b/drivers/gpu/drm/i915/i915_drv.c @@ -405,6 +405,14 @@ static int ironlake_do_reset(struct drm_device *dev, u8 flags) return wait_for(I915_READ(MCHBAR_MIRROR_BASE + ILK_GDSR) & 0x1, 500); } +static int gen6_do_reset(struct drm_device *dev, u8 flags) +{ + struct drm_i915_private *dev_priv = dev->dev_private; + + I915_WRITE(GEN6_GDRST, GEN6_GRDOM_FULL); + return wait_for((I915_READ(GEN6_GDRST) & GEN6_GRDOM_FULL) == 0, 500); +} + /** * i965_reset - reset chip after a hang * @dev: drm device to reset @@ -439,6 +447,9 @@ int i915_reset(struct drm_device *dev, u8 flags) if (get_seconds() - dev_priv->last_gpu_reset < 5) { DRM_ERROR("GPU hanging too fast, declaring wedged!\n"); } else switch (INTEL_INFO(dev)->gen) { + case 6: + ret = gen6_do_reset(dev, flags); + break; case 5: ret = ironlake_do_reset(dev, flags); break; diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h index ec2a8b07ba5b..c668b2fb7e3d 100644 --- a/drivers/gpu/drm/i915/i915_reg.h +++ b/drivers/gpu/drm/i915/i915_reg.h @@ -78,6 +78,12 @@ #define GRDOM_RENDER (1<<2) #define GRDOM_MEDIA (3<<2) +#define GEN6_GDRST 0x941c +#define GEN6_GRDOM_FULL (1 << 0) +#define GEN6_GRDOM_RENDER (1 << 1) +#define GEN6_GRDOM_MEDIA (1 << 2) +#define GEN6_GRDOM_BLT (1 << 3) + /* VGA stuff */ #define VGA_ST01_MDA 0x3ba -- cgit v1.2.3 From 5f75377db4d8d81ca4465b54d3c339c70c6a0fa2 Mon Sep 17 00:00:00 2001 From: Keith Packard Date: Mon, 22 Nov 2010 09:24:22 +0000 Subject: drm/i915: Fix restore of 965 fence regs since the register tracing change. We were reading our 64-bit value in I915_READ64 and returning 32 bits of it. The restoration of fence regs at resume then had a zero end value, and the fence had no effect. Version 2: Split register access functions into per-size versions Sharing code between different sizes seemed reasonable when we only needed a single copy, but as 64-bit access requires its own version, it makes sense to just split them out for each size. Reported-by: Peter Clifton Signed-off-by: Eric Anholt Signed-off-by: Keith Packard [ickle: use a macro to create the various read/write routines] Signed-off-by: Chris Wilson --- drivers/gpu/drm/i915/i915_drv.h | 68 ++++++++++++++++++++++------------------- 1 file changed, 36 insertions(+), 32 deletions(-) diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index 4c20ad92c0f3..db79df376b86 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h @@ -1245,45 +1245,49 @@ extern void intel_display_print_error_state(struct seq_file *m, LOCK_TEST_WITH_RETURN(dev, file_priv); \ } while (0) -#define I915_READ(reg) i915_read(dev_priv, (reg), 4) -#define I915_WRITE(reg, val) i915_write(dev_priv, (reg), (val), 4) -#define I915_READ16(reg) i915_read(dev_priv, (reg), 2) -#define I915_WRITE16(reg, val) i915_write(dev_priv, (reg), (val), 2) -#define I915_READ8(reg) i915_read(dev_priv, (reg), 1) -#define I915_WRITE8(reg, val) i915_write(dev_priv, (reg), (val), 1) -#define I915_WRITE64(reg, val) i915_write(dev_priv, (reg), (val), 8) -#define I915_READ64(reg) i915_read(dev_priv, (reg), 8) +#define __i915_read(x, y) \ +static inline u##x i915_read##x(struct drm_i915_private *dev_priv, u32 reg) { \ + u##x val = read##y(dev_priv->regs + reg); \ + trace_i915_reg_rw('R', reg, val, sizeof(val)); \ + return val; \ +} +__i915_read(8, b) +__i915_read(16, w) +__i915_read(32, l) +__i915_read(64, q) +#undef __i915_read + +#define __i915_write(x, y) \ +static inline void i915_write##x(struct drm_i915_private *dev_priv, u32 reg, u##x val) { \ + trace_i915_reg_rw('W', reg, val, sizeof(val)); \ + write##y(val, dev_priv->regs + reg); \ +} +__i915_write(8, b) +__i915_write(16, w) +__i915_write(32, l) +__i915_write(64, q) +#undef __i915_write + +#define I915_READ8(reg) i915_read8(dev_priv, (reg)) +#define I915_WRITE8(reg, val) i915_write8(dev_priv, (reg), (val)) + +#define I915_READ16(reg) i915_read16(dev_priv, (reg)) +#define I915_WRITE16(reg, val) i915_write16(dev_priv, (reg), (val)) +#define I915_READ16_NOTRACE(reg) readw(dev_priv->regs + (reg)) +#define I915_WRITE16_NOTRACE(reg, val) writew(val, dev_priv->regs + (reg)) + +#define I915_READ(reg) i915_read32(dev_priv, (reg)) +#define I915_WRITE(reg, val) i915_write32(dev_priv, (reg), (val)) #define I915_READ_NOTRACE(reg) readl(dev_priv->regs + (reg)) #define I915_WRITE_NOTRACE(reg, val) writel(val, dev_priv->regs + (reg)) -#define I915_READ16_NOTRACE(reg) readw(dev_priv->regs + (reg)) -#define I915_WRITE16_NOTRACE(reg, val) writew(val, dev_priv->regs + (reg)) + +#define I915_WRITE64(reg, val) i915_write64(dev_priv, (reg), (val)) +#define I915_READ64(reg) i915_read64(dev_priv, (reg)) #define POSTING_READ(reg) (void)I915_READ_NOTRACE(reg) #define POSTING_READ16(reg) (void)I915_READ16_NOTRACE(reg) -static inline u32 i915_read(struct drm_i915_private *dev_priv, u32 reg, int len) -{ - u64 val = 0; - - switch (len) { - case 8: - val = readq(dev_priv->regs + reg); - break; - case 4: - val = readl(dev_priv->regs + reg); - break; - case 2: - val = readw(dev_priv->regs + reg); - break; - case 1: - val = readb(dev_priv->regs + reg); - break; - } - trace_i915_reg_rw('R', reg, val, len); - - return val; -} /* On SNB platform, before reading ring registers forcewake bit * must be set to prevent GT core from power down and stale values being -- cgit v1.2.3 From 9535ab7323351bacf02d82af79921df1d6594969 Mon Sep 17 00:00:00 2001 From: Alex Deucher Date: Mon, 22 Nov 2010 17:56:18 -0500 Subject: drm/radeon/kms: setup mc chremap properly on r7xx/evergreen Should improve performance slightly and possibly fix some issues. Signed-off-by: Alex Deucher Signed-off-by: Dave Airlie --- drivers/gpu/drm/radeon/evergreen.c | 38 +++++++++++++++++++++++++++++++ drivers/gpu/drm/radeon/evergreend.h | 4 ++++ drivers/gpu/drm/radeon/rv770.c | 45 +++++++++++++++++++++++++++++++++++++ drivers/gpu/drm/radeon/rv770d.h | 2 ++ 4 files changed, 89 insertions(+) diff --git a/drivers/gpu/drm/radeon/evergreen.c b/drivers/gpu/drm/radeon/evergreen.c index 4dc5b4714c5a..728358e6b798 100644 --- a/drivers/gpu/drm/radeon/evergreen.c +++ b/drivers/gpu/drm/radeon/evergreen.c @@ -1382,6 +1382,42 @@ static u32 evergreen_get_tile_pipe_to_backend_map(struct radeon_device *rdev, return backend_map; } +static void evergreen_program_channel_remap(struct radeon_device *rdev) +{ + u32 tcp_chan_steer_lo, tcp_chan_steer_hi, mc_shared_chremap, tmp; + + tmp = RREG32(MC_SHARED_CHMAP); + switch ((tmp & NOOFCHAN_MASK) >> NOOFCHAN_SHIFT) { + case 0: + case 1: + case 2: + case 3: + default: + /* default mapping */ + mc_shared_chremap = 0x00fac688; + break; + } + + switch (rdev->family) { + case CHIP_HEMLOCK: + case CHIP_CYPRESS: + tcp_chan_steer_lo = 0x54763210; + tcp_chan_steer_hi = 0x0000ba98; + break; + case CHIP_JUNIPER: + case CHIP_REDWOOD: + case CHIP_CEDAR: + default: + tcp_chan_steer_lo = 0x76543210; + tcp_chan_steer_hi = 0x0000ba98; + break; + } + + WREG32(TCP_CHAN_STEER_LO, tcp_chan_steer_lo); + WREG32(TCP_CHAN_STEER_HI, tcp_chan_steer_hi); + WREG32(MC_SHARED_CHREMAP, mc_shared_chremap); +} + static void evergreen_gpu_init(struct radeon_device *rdev) { u32 cc_rb_backend_disable = 0; @@ -1685,6 +1721,8 @@ static void evergreen_gpu_init(struct radeon_device *rdev) WREG32(DMIF_ADDR_CONFIG, gb_addr_config); WREG32(HDP_ADDR_CONFIG, gb_addr_config); + evergreen_program_channel_remap(rdev); + num_shader_engines = ((RREG32(GB_ADDR_CONFIG) & NUM_SHADER_ENGINES(3)) >> 12) + 1; grbm_gfx_index = INSTANCE_BROADCAST_WRITES; diff --git a/drivers/gpu/drm/radeon/evergreend.h b/drivers/gpu/drm/radeon/evergreend.h index 113c70cc8b39..9644b1cbfb09 100644 --- a/drivers/gpu/drm/radeon/evergreend.h +++ b/drivers/gpu/drm/radeon/evergreend.h @@ -180,6 +180,7 @@ #define MC_SHARED_CHMAP 0x2004 #define NOOFCHAN_SHIFT 12 #define NOOFCHAN_MASK 0x00003000 +#define MC_SHARED_CHREMAP 0x2008 #define MC_ARB_RAMCFG 0x2760 #define NOOFBANK_SHIFT 0 @@ -348,6 +349,9 @@ #define SYNC_WALKER (1 << 25) #define SYNC_ALIGNER (1 << 26) +#define TCP_CHAN_STEER_LO 0x960c +#define TCP_CHAN_STEER_HI 0x9610 + #define VGT_CACHE_INVALIDATION 0x88C4 #define CACHE_INVALIDATION(x) ((x) << 0) #define VC_ONLY 0 diff --git a/drivers/gpu/drm/radeon/rv770.c b/drivers/gpu/drm/radeon/rv770.c index 4dfead8cee33..24ebd0879c4b 100644 --- a/drivers/gpu/drm/radeon/rv770.c +++ b/drivers/gpu/drm/radeon/rv770.c @@ -489,6 +489,49 @@ static u32 r700_get_tile_pipe_to_backend_map(struct radeon_device *rdev, return backend_map; } +static void rv770_program_channel_remap(struct radeon_device *rdev) +{ + u32 tcp_chan_steer, mc_shared_chremap, tmp; + bool force_no_swizzle; + + switch (rdev->family) { + case CHIP_RV770: + case CHIP_RV730: + force_no_swizzle = false; + break; + case CHIP_RV710: + case CHIP_RV740: + default: + force_no_swizzle = true; + break; + } + + tmp = RREG32(MC_SHARED_CHMAP); + switch ((tmp & NOOFCHAN_MASK) >> NOOFCHAN_SHIFT) { + case 0: + case 1: + default: + /* default mapping */ + mc_shared_chremap = 0x00fac688; + break; + case 2: + case 3: + if (force_no_swizzle) + mc_shared_chremap = 0x00fac688; + else + mc_shared_chremap = 0x00bbc298; + break; + } + + if (rdev->family == CHIP_RV740) + tcp_chan_steer = 0x00ef2a60; + else + tcp_chan_steer = 0x00fac688; + + WREG32(TCP_CHAN_STEER, tcp_chan_steer); + WREG32(MC_SHARED_CHREMAP, mc_shared_chremap); +} + static void rv770_gpu_init(struct radeon_device *rdev) { int i, j, num_qd_pipes; @@ -688,6 +731,8 @@ static void rv770_gpu_init(struct radeon_device *rdev) WREG32(DCP_TILING_CONFIG, (gb_tiling_config & 0xffff)); WREG32(HDP_TILING_CONFIG, (gb_tiling_config & 0xffff)); + rv770_program_channel_remap(rdev); + WREG32(CC_RB_BACKEND_DISABLE, cc_rb_backend_disable); WREG32(CC_GC_SHADER_PIPE_CONFIG, cc_gc_shader_pipe_config); WREG32(GC_USER_SHADER_PIPE_CONFIG, cc_gc_shader_pipe_config); diff --git a/drivers/gpu/drm/radeon/rv770d.h b/drivers/gpu/drm/radeon/rv770d.h index b7a5a20e81dc..7b1c8f8f4074 100644 --- a/drivers/gpu/drm/radeon/rv770d.h +++ b/drivers/gpu/drm/radeon/rv770d.h @@ -138,6 +138,7 @@ #define MC_SHARED_CHMAP 0x2004 #define NOOFCHAN_SHIFT 12 #define NOOFCHAN_MASK 0x00003000 +#define MC_SHARED_CHREMAP 0x2008 #define MC_ARB_RAMCFG 0x2760 #define NOOFBANK_SHIFT 0 @@ -303,6 +304,7 @@ #define BILINEAR_PRECISION_8_BIT (1 << 31) #define TCP_CNTL 0x9610 +#define TCP_CHAN_STEER 0x9614 #define VGT_CACHE_INVALIDATION 0x88C4 #define CACHE_INVALIDATION(x) ((x)<<0) -- cgit v1.2.3 From 5d68e501bf000b8b1696875455c7a556ce2e9c43 Mon Sep 17 00:00:00 2001 From: Alex Deucher Date: Mon, 22 Nov 2010 17:56:19 -0500 Subject: drm/radeon/kms: upstream ObjectID.h updates Signed-off-by: Alex Deucher Signed-off-by: Dave Airlie --- drivers/gpu/drm/radeon/ObjectID.h | 48 +++++++++++++++++++++++++++++++++++++++ 1 file changed, 48 insertions(+) diff --git a/drivers/gpu/drm/radeon/ObjectID.h b/drivers/gpu/drm/radeon/ObjectID.h index c714179d1bfa..c61c3fe9fb98 100644 --- a/drivers/gpu/drm/radeon/ObjectID.h +++ b/drivers/gpu/drm/radeon/ObjectID.h @@ -37,6 +37,8 @@ #define GRAPH_OBJECT_TYPE_CONNECTOR 0x3 #define GRAPH_OBJECT_TYPE_ROUTER 0x4 /* deleted */ +#define GRAPH_OBJECT_TYPE_DISPLAY_PATH 0x6 +#define GRAPH_OBJECT_TYPE_GENERIC 0x7 /****************************************************/ /* Encoder Object ID Definition */ @@ -64,6 +66,9 @@ #define ENCODER_OBJECT_ID_VT1623 0x10 #define ENCODER_OBJECT_ID_HDMI_SI1930 0x11 #define ENCODER_OBJECT_ID_HDMI_INTERNAL 0x12 +#define ENCODER_OBJECT_ID_ALMOND 0x22 +#define ENCODER_OBJECT_ID_TRAVIS 0x23 +#define ENCODER_OBJECT_ID_NUTMEG 0x22 /* Kaleidoscope (KLDSCP) Class Display Hardware (internal) */ #define ENCODER_OBJECT_ID_INTERNAL_KLDSCP_TMDS1 0x13 #define ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DVO1 0x14 @@ -108,6 +113,7 @@ #define CONNECTOR_OBJECT_ID_DISPLAYPORT 0x13 #define CONNECTOR_OBJECT_ID_eDP 0x14 #define CONNECTOR_OBJECT_ID_MXM 0x15 +#define CONNECTOR_OBJECT_ID_LVDS_eDP 0x16 /* deleted */ @@ -124,6 +130,7 @@ #define GENERIC_OBJECT_ID_GLSYNC 0x01 #define GENERIC_OBJECT_ID_PX2_NON_DRIVABLE 0x02 #define GENERIC_OBJECT_ID_MXM_OPM 0x03 +#define GENERIC_OBJECT_ID_STEREO_PIN 0x04 //This object could show up from Misc Object table, it follows ATOM_OBJECT format, and contains one ATOM_OBJECT_GPIO_CNTL_RECORD for the stereo pin /****************************************************/ /* Graphics Object ENUM ID Definition */ @@ -360,6 +367,26 @@ GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\ ENCODER_OBJECT_ID_GENERAL_EXTERNAL_DVO << OBJECT_ID_SHIFT) +#define ENCODER_ALMOND_ENUM_ID1 ( GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\ + GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\ + ENCODER_OBJECT_ID_ALMOND << OBJECT_ID_SHIFT) + +#define ENCODER_ALMOND_ENUM_ID2 ( GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\ + GRAPH_OBJECT_ENUM_ID2 << ENUM_ID_SHIFT |\ + ENCODER_OBJECT_ID_ALMOND << OBJECT_ID_SHIFT) + +#define ENCODER_TRAVIS_ENUM_ID1 ( GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\ + GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\ + ENCODER_OBJECT_ID_TRAVIS << OBJECT_ID_SHIFT) + +#define ENCODER_TRAVIS_ENUM_ID2 ( GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\ + GRAPH_OBJECT_ENUM_ID2 << ENUM_ID_SHIFT |\ + ENCODER_OBJECT_ID_TRAVIS << OBJECT_ID_SHIFT) + +#define ENCODER_NUTMEG_ENUM_ID1 ( GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\ + GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\ + ENCODER_OBJECT_ID_NUTMEG << OBJECT_ID_SHIFT) + /****************************************************/ /* Connector Object ID definition - Shared with BIOS */ /****************************************************/ @@ -421,6 +448,14 @@ GRAPH_OBJECT_ENUM_ID2 << ENUM_ID_SHIFT |\ CONNECTOR_OBJECT_ID_SINGLE_LINK_DVI_D << OBJECT_ID_SHIFT) +#define CONNECTOR_SINGLE_LINK_DVI_D_ENUM_ID3 ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\ + GRAPH_OBJECT_ENUM_ID3 << ENUM_ID_SHIFT |\ + CONNECTOR_OBJECT_ID_SINGLE_LINK_DVI_D << OBJECT_ID_SHIFT) + +#define CONNECTOR_SINGLE_LINK_DVI_D_ENUM_ID4 ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\ + GRAPH_OBJECT_ENUM_ID4 << ENUM_ID_SHIFT |\ + CONNECTOR_OBJECT_ID_SINGLE_LINK_DVI_D << OBJECT_ID_SHIFT) + #define CONNECTOR_DUAL_LINK_DVI_D_ENUM_ID1 ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\ GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\ CONNECTOR_OBJECT_ID_DUAL_LINK_DVI_D << OBJECT_ID_SHIFT) @@ -512,6 +547,7 @@ #define CONNECTOR_7PIN_DIN_ENUM_ID1 ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\ GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\ CONNECTOR_OBJECT_ID_7PIN_DIN << OBJECT_ID_SHIFT) + #define CONNECTOR_7PIN_DIN_ENUM_ID2 ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\ GRAPH_OBJECT_ENUM_ID2 << ENUM_ID_SHIFT |\ CONNECTOR_OBJECT_ID_7PIN_DIN << OBJECT_ID_SHIFT) @@ -593,6 +629,14 @@ GRAPH_OBJECT_ENUM_ID7 << ENUM_ID_SHIFT |\ CONNECTOR_OBJECT_ID_MXM << OBJECT_ID_SHIFT) //Mapping to MXM_DAC +#define CONNECTOR_LVDS_eDP_ENUM_ID1 ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\ + GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\ + CONNECTOR_OBJECT_ID_LVDS_eDP << OBJECT_ID_SHIFT) + +#define CONNECTOR_LVDS_eDP_ENUM_ID2 ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\ + GRAPH_OBJECT_ENUM_ID2 << ENUM_ID_SHIFT |\ + CONNECTOR_OBJECT_ID_LVDS_eDP << OBJECT_ID_SHIFT) + /****************************************************/ /* Router Object ID definition - Shared with BIOS */ /****************************************************/ @@ -621,6 +665,10 @@ GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\ GENERIC_OBJECT_ID_MXM_OPM << OBJECT_ID_SHIFT) +#define GENERICOBJECT_STEREO_PIN_ENUM_ID1 (GRAPH_OBJECT_TYPE_GENERIC << OBJECT_TYPE_SHIFT |\ + GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\ + GENERIC_OBJECT_ID_STEREO_PIN << OBJECT_ID_SHIFT) + /****************************************************/ /* Object Cap definition - Shared with BIOS */ /****************************************************/ -- cgit v1.2.3 From 1422ef52efa2b9abcf8e9d4d641f3a12579f1027 Mon Sep 17 00:00:00 2001 From: Alex Deucher Date: Mon, 22 Nov 2010 17:56:20 -0500 Subject: drm/radeon/kms: upstream atombios.h updates Signed-off-by: Alex Deucher Signed-off-by: Dave Airlie --- drivers/gpu/drm/radeon/atombios.h | 853 +++++++++++++++++++++++++++++++++++--- 1 file changed, 792 insertions(+), 61 deletions(-) diff --git a/drivers/gpu/drm/radeon/atombios.h b/drivers/gpu/drm/radeon/atombios.h index fe359a239df3..6a9baa2443f8 100644 --- a/drivers/gpu/drm/radeon/atombios.h +++ b/drivers/gpu/drm/radeon/atombios.h @@ -73,8 +73,18 @@ #define ATOM_PPLL1 0 #define ATOM_PPLL2 1 #define ATOM_DCPLL 2 +#define ATOM_PPLL0 2 +#define ATOM_EXT_PLL1 8 +#define ATOM_EXT_PLL2 9 +#define ATOM_EXT_CLOCK 10 #define ATOM_PPLL_INVALID 0xFF +#define ENCODER_REFCLK_SRC_P1PLL 0 +#define ENCODER_REFCLK_SRC_P2PLL 1 +#define ENCODER_REFCLK_SRC_DCPLL 2 +#define ENCODER_REFCLK_SRC_EXTCLK 3 +#define ENCODER_REFCLK_SRC_INVALID 0xFF + #define ATOM_SCALER1 0 #define ATOM_SCALER2 1 @@ -192,6 +202,9 @@ typedef struct _ATOM_COMMON_TABLE_HEADER /*Image can't be updated, while Driver needs to carry the new table! */ }ATOM_COMMON_TABLE_HEADER; +/****************************************************************************/ +// Structure stores the ROM header. +/****************************************************************************/ typedef struct _ATOM_ROM_HEADER { ATOM_COMMON_TABLE_HEADER sHeader; @@ -221,6 +234,9 @@ typedef struct _ATOM_ROM_HEADER #define USHORT void* #endif +/****************************************************************************/ +// Structures used in Command.mtb +/****************************************************************************/ typedef struct _ATOM_MASTER_LIST_OF_COMMAND_TABLES{ USHORT ASIC_Init; //Function Table, used by various SW components,latest version 1.1 USHORT GetDisplaySurfaceSize; //Atomic Table, Used by Bios when enabling HW ICON @@ -312,6 +328,7 @@ typedef struct _ATOM_MASTER_LIST_OF_COMMAND_TABLES{ #define SetUniphyInstance ASIC_StaticPwrMgtStatusChange #define HPDInterruptService ReadHWAssistedI2CStatus #define EnableVGA_Access GetSCLKOverMCLKRatio +#define GetDispObjectInfo EnableYUV typedef struct _ATOM_MASTER_COMMAND_TABLE { @@ -357,6 +374,24 @@ typedef struct _ATOM_COMMON_ROM_COMMAND_TABLE_HEADER /****************************************************************************/ #define COMPUTE_MEMORY_PLL_PARAM 1 #define COMPUTE_ENGINE_PLL_PARAM 2 +#define ADJUST_MC_SETTING_PARAM 3 + +/****************************************************************************/ +// Structures used by AdjustMemoryControllerTable +/****************************************************************************/ +typedef struct _ATOM_ADJUST_MEMORY_CLOCK_FREQ +{ +#if ATOM_BIG_ENDIAN + ULONG ulPointerReturnFlag:1; // BYTE_3[7]=1 - Return the pointer to the right Data Block; BYTE_3[7]=0 - Program the right Data Block + ULONG ulMemoryModuleNumber:7; // BYTE_3[6:0] + ULONG ulClockFreq:24; +#else + ULONG ulClockFreq:24; + ULONG ulMemoryModuleNumber:7; // BYTE_3[6:0] + ULONG ulPointerReturnFlag:1; // BYTE_3[7]=1 - Return the pointer to the right Data Block; BYTE_3[7]=0 - Program the right Data Block +#endif +}ATOM_ADJUST_MEMORY_CLOCK_FREQ; +#define POINTER_RETURN_FLAG 0x80 typedef struct _COMPUTE_MEMORY_ENGINE_PLL_PARAMETERS { @@ -440,6 +475,26 @@ typedef struct _COMPUTE_MEMORY_ENGINE_PLL_PARAMETERS_V4 #endif }COMPUTE_MEMORY_ENGINE_PLL_PARAMETERS_V4; +typedef struct _COMPUTE_MEMORY_ENGINE_PLL_PARAMETERS_V5 +{ + union + { + ATOM_COMPUTE_CLOCK_FREQ ulClock; //Input Parameter + ATOM_S_MPLL_FB_DIVIDER ulFbDiv; //Output Parameter + }; + UCHAR ucRefDiv; //Output Parameter + UCHAR ucPostDiv; //Output Parameter + union + { + UCHAR ucCntlFlag; //Output Flags + UCHAR ucInputFlag; //Input Flags. ucInputFlag[0] - Strobe(1)/Performance(0) mode + }; + UCHAR ucReserved; +}COMPUTE_MEMORY_ENGINE_PLL_PARAMETERS_V5; + +// ucInputFlag +#define ATOM_PLL_INPUT_FLAG_PLL_STROBE_MODE_EN 1 // 1-StrobeMode, 0-PerformanceMode + typedef struct _DYNAMICE_MEMORY_SETTINGS_PARAMETER { ATOM_COMPUTE_CLOCK_FREQ ulClock; @@ -583,6 +638,7 @@ typedef struct _DIG_ENCODER_CONTROL_PARAMETERS #define ATOM_ENCODER_CONFIG_DPLINKRATE_MASK 0x01 #define ATOM_ENCODER_CONFIG_DPLINKRATE_1_62GHZ 0x00 #define ATOM_ENCODER_CONFIG_DPLINKRATE_2_70GHZ 0x01 +#define ATOM_ENCODER_CONFIG_DPLINKRATE_5_40GHZ 0x02 #define ATOM_ENCODER_CONFIG_LINK_SEL_MASK 0x04 #define ATOM_ENCODER_CONFIG_LINKA 0x00 #define ATOM_ENCODER_CONFIG_LINKB 0x04 @@ -608,6 +664,9 @@ typedef struct _DIG_ENCODER_CONTROL_PARAMETERS #define ATOM_ENCODER_MODE_TV 13 #define ATOM_ENCODER_MODE_CV 14 #define ATOM_ENCODER_MODE_CRT 15 +#define ATOM_ENCODER_MODE_DVO 16 +#define ATOM_ENCODER_MODE_DP_SST ATOM_ENCODER_MODE_DP // For DP1.2 +#define ATOM_ENCODER_MODE_DP_MST 5 // For DP1.2 typedef struct _ATOM_DIG_ENCODER_CONFIG_V2 { @@ -661,6 +720,7 @@ typedef struct _DIG_ENCODER_CONTROL_PARAMETERS_V2 #define ATOM_ENCODER_CMD_DP_LINK_TRAINING_START 0x08 #define ATOM_ENCODER_CMD_DP_LINK_TRAINING_PATTERN1 0x09 #define ATOM_ENCODER_CMD_DP_LINK_TRAINING_PATTERN2 0x0a +#define ATOM_ENCODER_CMD_DP_LINK_TRAINING_PATTERN3 0x13 #define ATOM_ENCODER_CMD_DP_LINK_TRAINING_COMPLETE 0x0b #define ATOM_ENCODER_CMD_DP_VIDEO_OFF 0x0c #define ATOM_ENCODER_CMD_DP_VIDEO_ON 0x0d @@ -671,24 +731,34 @@ typedef struct _DIG_ENCODER_CONTROL_PARAMETERS_V2 #define ATOM_ENCODER_STATUS_LINK_TRAINING_COMPLETE 0x10 #define ATOM_ENCODER_STATUS_LINK_TRAINING_INCOMPLETE 0x00 +//ucTableFormatRevision=1 +//ucTableContentRevision=3 // Following function ENABLE sub-function will be used by driver when TMDS/HDMI/LVDS is used, disable function will be used by driver typedef struct _ATOM_DIG_ENCODER_CONFIG_V3 { #if ATOM_BIG_ENDIAN UCHAR ucReserved1:1; - UCHAR ucDigSel:3; // =0: DIGA/B/C/D/E/F + UCHAR ucDigSel:3; // =0/1/2/3/4/5: DIG0/1/2/3/4/5 (In register spec also refered as DIGA/B/C/D/E/F) UCHAR ucReserved:3; UCHAR ucDPLinkRate:1; // =0: 1.62Ghz, =1: 2.7Ghz #else UCHAR ucDPLinkRate:1; // =0: 1.62Ghz, =1: 2.7Ghz UCHAR ucReserved:3; - UCHAR ucDigSel:3; // =0: DIGA/B/C/D/E/F + UCHAR ucDigSel:3; // =0/1/2/3/4/5: DIG0/1/2/3/4/5 (In register spec also refered as DIGA/B/C/D/E/F) UCHAR ucReserved1:1; #endif }ATOM_DIG_ENCODER_CONFIG_V3; +#define ATOM_ENCODER_CONFIG_V3_DPLINKRATE_MASK 0x03 +#define ATOM_ENCODER_CONFIG_V3_DPLINKRATE_1_62GHZ 0x00 +#define ATOM_ENCODER_CONFIG_V3_DPLINKRATE_2_70GHZ 0x01 #define ATOM_ENCODER_CONFIG_V3_ENCODER_SEL 0x70 - +#define ATOM_ENCODER_CONFIG_V3_DIG0_ENCODER 0x00 +#define ATOM_ENCODER_CONFIG_V3_DIG1_ENCODER 0x10 +#define ATOM_ENCODER_CONFIG_V3_DIG2_ENCODER 0x20 +#define ATOM_ENCODER_CONFIG_V3_DIG3_ENCODER 0x30 +#define ATOM_ENCODER_CONFIG_V3_DIG4_ENCODER 0x40 +#define ATOM_ENCODER_CONFIG_V3_DIG5_ENCODER 0x50 typedef struct _DIG_ENCODER_CONTROL_PARAMETERS_V3 { @@ -707,6 +777,56 @@ typedef struct _DIG_ENCODER_CONTROL_PARAMETERS_V3 UCHAR ucReserved; }DIG_ENCODER_CONTROL_PARAMETERS_V3; +//ucTableFormatRevision=1 +//ucTableContentRevision=4 +// start from NI +// Following function ENABLE sub-function will be used by driver when TMDS/HDMI/LVDS is used, disable function will be used by driver +typedef struct _ATOM_DIG_ENCODER_CONFIG_V4 +{ +#if ATOM_BIG_ENDIAN + UCHAR ucReserved1:1; + UCHAR ucDigSel:3; // =0/1/2/3/4/5: DIG0/1/2/3/4/5 (In register spec also refered as DIGA/B/C/D/E/F) + UCHAR ucReserved:2; + UCHAR ucDPLinkRate:2; // =0: 1.62Ghz, =1: 2.7Ghz, 2=5.4Ghz <= Changed comparing to previous version +#else + UCHAR ucDPLinkRate:2; // =0: 1.62Ghz, =1: 2.7Ghz, 2=5.4Ghz <= Changed comparing to previous version + UCHAR ucReserved:2; + UCHAR ucDigSel:3; // =0/1/2/3/4/5: DIG0/1/2/3/4/5 (In register spec also refered as DIGA/B/C/D/E/F) + UCHAR ucReserved1:1; +#endif +}ATOM_DIG_ENCODER_CONFIG_V4; + +#define ATOM_ENCODER_CONFIG_V4_DPLINKRATE_MASK 0x03 +#define ATOM_ENCODER_CONFIG_V4_DPLINKRATE_1_62GHZ 0x00 +#define ATOM_ENCODER_CONFIG_V4_DPLINKRATE_2_70GHZ 0x01 +#define ATOM_ENCODER_CONFIG_V4_DPLINKRATE_5_40GHZ 0x02 +#define ATOM_ENCODER_CONFIG_V4_ENCODER_SEL 0x70 +#define ATOM_ENCODER_CONFIG_V4_DIG0_ENCODER 0x00 +#define ATOM_ENCODER_CONFIG_V4_DIG1_ENCODER 0x10 +#define ATOM_ENCODER_CONFIG_V4_DIG2_ENCODER 0x20 +#define ATOM_ENCODER_CONFIG_V4_DIG3_ENCODER 0x30 +#define ATOM_ENCODER_CONFIG_V4_DIG4_ENCODER 0x40 +#define ATOM_ENCODER_CONFIG_V4_DIG5_ENCODER 0x50 + +typedef struct _DIG_ENCODER_CONTROL_PARAMETERS_V4 +{ + USHORT usPixelClock; // in 10KHz; for bios convenient + union{ + ATOM_DIG_ENCODER_CONFIG_V4 acConfig; + UCHAR ucConfig; + }; + UCHAR ucAction; + UCHAR ucEncoderMode; + // =0: DP encoder + // =1: LVDS encoder + // =2: DVI encoder + // =3: HDMI encoder + // =4: SDVO encoder + // =5: DP audio + UCHAR ucLaneNum; // how many lanes to enable + UCHAR ucBitPerColor; // only valid for DP mode when ucAction = ATOM_ENCODER_CMD_SETUP + UCHAR ucHPD_ID; // HPD ID (1-6). =0 means to skip HDP programming. New comparing to previous version +}DIG_ENCODER_CONTROL_PARAMETERS_V4; // define ucBitPerColor: #define PANEL_BPC_UNDEFINE 0x00 @@ -893,6 +1013,7 @@ typedef struct _ATOM_DIG_TRANSMITTER_CONFIG_V3 #endif }ATOM_DIG_TRANSMITTER_CONFIG_V3; + typedef struct _DIG_TRANSMITTER_CONTROL_PARAMETERS_V3 { union @@ -936,6 +1057,149 @@ typedef struct _DIG_TRANSMITTER_CONTROL_PARAMETERS_V3 #define ATOM_TRANSMITTER_CONFIG_V3_TRANSMITTER2 0x40 //CD #define ATOM_TRANSMITTER_CONFIG_V3_TRANSMITTER3 0x80 //EF + +/****************************************************************************/ +// Structures used by UNIPHYTransmitterControlTable V1.4 +// ASIC Families: NI +// ucTableFormatRevision=1 +// ucTableContentRevision=4 +/****************************************************************************/ +typedef struct _ATOM_DP_VS_MODE_V4 +{ + UCHAR ucLaneSel; + union + { + UCHAR ucLaneSet; + struct { +#if ATOM_BIG_ENDIAN + UCHAR ucPOST_CURSOR2:2; //Bit[7:6] Post Cursor2 Level <= New in V4 + UCHAR ucPRE_EMPHASIS:3; //Bit[5:3] Pre-emphasis Level + UCHAR ucVOLTAGE_SWING:3; //Bit[2:0] Voltage Swing Level +#else + UCHAR ucVOLTAGE_SWING:3; //Bit[2:0] Voltage Swing Level + UCHAR ucPRE_EMPHASIS:3; //Bit[5:3] Pre-emphasis Level + UCHAR ucPOST_CURSOR2:2; //Bit[7:6] Post Cursor2 Level <= New in V4 +#endif + }; + }; +}ATOM_DP_VS_MODE_V4; + +typedef struct _ATOM_DIG_TRANSMITTER_CONFIG_V4 +{ +#if ATOM_BIG_ENDIAN + UCHAR ucTransmitterSel:2; //bit7:6: =0 Dig Transmitter 1 ( Uniphy AB ) + // =1 Dig Transmitter 2 ( Uniphy CD ) + // =2 Dig Transmitter 3 ( Uniphy EF ) + UCHAR ucRefClkSource:2; //bit5:4: PPLL1 =0, PPLL2=1, DCPLL=2, EXT_CLK=3 <= New + UCHAR ucEncoderSel:1; //bit3=0: Data/Clk path source from DIGA/C/E. =1: Data/clk path source from DIGB/D/F + UCHAR ucLinkSel:1; //bit2=0: Uniphy LINKA or C or E when fDualLinkConnector=0. when fDualLinkConnector=1, it means master link of dual link is A or C or E + // =1: Uniphy LINKB or D or F when fDualLinkConnector=0. when fDualLinkConnector=1, it means master link of dual link is B or D or F + UCHAR fCoherentMode:1; //bit1=1: Coherent Mode ( for DVI/HDMI mode ) + UCHAR fDualLinkConnector:1; //bit0=1: Dual Link DVI connector +#else + UCHAR fDualLinkConnector:1; //bit0=1: Dual Link DVI connector + UCHAR fCoherentMode:1; //bit1=1: Coherent Mode ( for DVI/HDMI mode ) + UCHAR ucLinkSel:1; //bit2=0: Uniphy LINKA or C or E when fDualLinkConnector=0. when fDualLinkConnector=1, it means master link of dual link is A or C or E + // =1: Uniphy LINKB or D or F when fDualLinkConnector=0. when fDualLinkConnector=1, it means master link of dual link is B or D or F + UCHAR ucEncoderSel:1; //bit3=0: Data/Clk path source from DIGA/C/E. =1: Data/clk path source from DIGB/D/F + UCHAR ucRefClkSource:2; //bit5:4: PPLL1 =0, PPLL2=1, DCPLL=2, EXT_CLK=3 <= New + UCHAR ucTransmitterSel:2; //bit7:6: =0 Dig Transmitter 1 ( Uniphy AB ) + // =1 Dig Transmitter 2 ( Uniphy CD ) + // =2 Dig Transmitter 3 ( Uniphy EF ) +#endif +}ATOM_DIG_TRANSMITTER_CONFIG_V4; + +typedef struct _DIG_TRANSMITTER_CONTROL_PARAMETERS_V4 +{ + union + { + USHORT usPixelClock; // in 10KHz; for bios convenient + USHORT usInitInfo; // when init uniphy,lower 8bit is used for connector type defined in objectid.h + ATOM_DP_VS_MODE_V4 asMode; // DP Voltage swing mode Redefined comparing to previous version + }; + union + { + ATOM_DIG_TRANSMITTER_CONFIG_V4 acConfig; + UCHAR ucConfig; + }; + UCHAR ucAction; // define as ATOM_TRANSMITER_ACTION_XXX + UCHAR ucLaneNum; + UCHAR ucReserved[3]; +}DIG_TRANSMITTER_CONTROL_PARAMETERS_V4; + +//ucConfig +//Bit0 +#define ATOM_TRANSMITTER_CONFIG_V4_DUAL_LINK_CONNECTOR 0x01 +//Bit1 +#define ATOM_TRANSMITTER_CONFIG_V4_COHERENT 0x02 +//Bit2 +#define ATOM_TRANSMITTER_CONFIG_V4_LINK_SEL_MASK 0x04 +#define ATOM_TRANSMITTER_CONFIG_V4_LINKA 0x00 +#define ATOM_TRANSMITTER_CONFIG_V4_LINKB 0x04 +// Bit3 +#define ATOM_TRANSMITTER_CONFIG_V4_ENCODER_SEL_MASK 0x08 +#define ATOM_TRANSMITTER_CONFIG_V4_DIG1_ENCODER 0x00 +#define ATOM_TRANSMITTER_CONFIG_V4_DIG2_ENCODER 0x08 +// Bit5:4 +#define ATOM_TRANSMITTER_CONFIG_V4_REFCLK_SEL_MASK 0x30 +#define ATOM_TRANSMITTER_CONFIG_V4_P1PLL 0x00 +#define ATOM_TRANSMITTER_CONFIG_V4_P2PLL 0x10 +#define ATOM_TRANSMITTER_CONFIG_V4_DCPLL 0x20 // New in _V4 +#define ATOM_TRANSMITTER_CONFIG_V4_REFCLK_SRC_EXT 0x30 // Changed comparing to V3 +// Bit7:6 +#define ATOM_TRANSMITTER_CONFIG_V4_TRANSMITTER_SEL_MASK 0xC0 +#define ATOM_TRANSMITTER_CONFIG_V4_TRANSMITTER1 0x00 //AB +#define ATOM_TRANSMITTER_CONFIG_V4_TRANSMITTER2 0x40 //CD +#define ATOM_TRANSMITTER_CONFIG_V4_TRANSMITTER3 0x80 //EF + + +/****************************************************************************/ +// Structures used by ExternalEncoderControlTable V1.3 +// ASIC Families: Evergreen, Llano, NI +// ucTableFormatRevision=1 +// ucTableContentRevision=3 +/****************************************************************************/ + +typedef struct _EXTERNAL_ENCODER_CONTROL_PARAMETERS_V3 +{ + union{ + USHORT usPixelClock; // pixel clock in 10Khz, valid when ucAction=SETUP/ENABLE_OUTPUT + USHORT usConnectorId; // connector id, valid when ucAction = INIT + }; + UCHAR ucConfig; // indicate which encoder, and DP link rate when ucAction = SETUP/ENABLE_OUTPUT + UCHAR ucAction; // + UCHAR ucEncoderMode; // encoder mode, only used when ucAction = SETUP/ENABLE_OUTPUT + UCHAR ucLaneNum; // lane number, only used when ucAction = SETUP/ENABLE_OUTPUT + UCHAR ucBitPerColor; // output bit per color, only valid when ucAction = SETUP/ENABLE_OUTPUT and ucEncodeMode= DP + UCHAR ucReserved; +}EXTERNAL_ENCODER_CONTROL_PARAMETERS_V3; + +// ucAction +#define EXTERNAL_ENCODER_ACTION_V3_DISABLE_OUTPUT 0x00 +#define EXTERNAL_ENCODER_ACTION_V3_ENABLE_OUTPUT 0x01 +#define EXTERNAL_ENCODER_ACTION_V3_ENCODER_INIT 0x07 +#define EXTERNAL_ENCODER_ACTION_V3_ENCODER_SETUP 0x0f +#define EXTERNAL_ENCODER_ACTION_V3_ENCODER_BLANKING_OFF 0x10 +#define EXTERNAL_ENCODER_ACTION_V3_ENCODER_BLANKING 0x11 +#define EXTERNAL_ENCODER_ACTION_V3_DACLOAD_DETECTION 0x12 + +// ucConfig +#define EXTERNAL_ENCODER_CONFIG_V3_DPLINKRATE_MASK 0x03 +#define EXTERNAL_ENCODER_CONFIG_V3_DPLINKRATE_1_62GHZ 0x00 +#define EXTERNAL_ENCODER_CONFIG_V3_DPLINKRATE_2_70GHZ 0x01 +#define EXTERNAL_ENCODER_CONFIG_V3_DPLINKRATE_5_40GHZ 0x02 +#define EXTERNAL_ENCODER_CONFIG_V3_ENCODER_SEL_MASK 0x70 +#define EXTERNAL_ENCODER_CONFIG_V3_ENCODER1 0x00 +#define EXTERNAL_ENCODER_CONFIG_V3_ENCODER2 0x10 +#define EXTERNAL_ENCODER_CONFIG_V3_ENCODER3 0x20 + +typedef struct _EXTERNAL_ENCODER_CONTROL_PS_ALLOCATION_V3 +{ + EXTERNAL_ENCODER_CONTROL_PARAMETERS_V3 sExtEncoder; + ULONG ulReserved[2]; +}EXTERNAL_ENCODER_CONTROL_PS_ALLOCATION_V3; + + /****************************************************************************/ // Structures used by DAC1OuputControlTable // DAC2OuputControlTable @@ -1142,6 +1406,7 @@ typedef struct _PIXEL_CLOCK_PARAMETERS_V2 #define PIXEL_CLOCK_V4_MISC_SS_ENABLE 0x10 #define PIXEL_CLOCK_V4_MISC_COHERENT_MODE 0x20 + typedef struct _PIXEL_CLOCK_PARAMETERS_V3 { USHORT usPixelClock; // in 10kHz unit; for bios convenient = (RefClk*FB_Div)/(Ref_Div*Post_Div) @@ -1202,6 +1467,55 @@ typedef struct _PIXEL_CLOCK_PARAMETERS_V5 #define PIXEL_CLOCK_V5_MISC_HDMI_32BPP 0x08 #define PIXEL_CLOCK_V5_MISC_REF_DIV_SRC 0x10 +typedef struct _CRTC_PIXEL_CLOCK_FREQ +{ +#if ATOM_BIG_ENDIAN + ULONG ucCRTC:8; // ATOM_CRTC1~6, indicate the CRTC controller to + // drive the pixel clock. not used for DCPLL case. + ULONG ulPixelClock:24; // target the pixel clock to drive the CRTC timing. + // 0 means disable PPLL/DCPLL. Expanded to 24 bits comparing to previous version. +#else + ULONG ulPixelClock:24; // target the pixel clock to drive the CRTC timing. + // 0 means disable PPLL/DCPLL. Expanded to 24 bits comparing to previous version. + ULONG ucCRTC:8; // ATOM_CRTC1~6, indicate the CRTC controller to + // drive the pixel clock. not used for DCPLL case. +#endif +}CRTC_PIXEL_CLOCK_FREQ; + +typedef struct _PIXEL_CLOCK_PARAMETERS_V6 +{ + union{ + CRTC_PIXEL_CLOCK_FREQ ulCrtcPclkFreq; // pixel clock and CRTC id frequency + ULONG ulDispEngClkFreq; // dispclk frequency + }; + USHORT usFbDiv; // feedback divider integer part. + UCHAR ucPostDiv; // post divider. + UCHAR ucRefDiv; // Reference divider + UCHAR ucPpll; // ATOM_PPLL1/ATOM_PPLL2/ATOM_DCPLL + UCHAR ucTransmitterID; // ASIC encoder id defined in objectId.h, + // indicate which graphic encoder will be used. + UCHAR ucEncoderMode; // Encoder mode: + UCHAR ucMiscInfo; // bit[0]= Force program PPLL + // bit[1]= when VGA timing is used. + // bit[3:2]= HDMI panel bit depth: =0: 24bpp =1:30bpp, =2:32bpp + // bit[4]= RefClock source for PPLL. + // =0: XTLAIN( default mode ) + // =1: other external clock source, which is pre-defined + // by VBIOS depend on the feature required. + // bit[7:5]: reserved. + ULONG ulFbDivDecFrac; // 20 bit feedback divider decimal fraction part, range from 1~999999 ( 0.000001 to 0.999999 ) + +}PIXEL_CLOCK_PARAMETERS_V6; + +#define PIXEL_CLOCK_V6_MISC_FORCE_PROG_PPLL 0x01 +#define PIXEL_CLOCK_V6_MISC_VGA_MODE 0x02 +#define PIXEL_CLOCK_V6_MISC_HDMI_BPP_MASK 0x0c +#define PIXEL_CLOCK_V6_MISC_HDMI_24BPP 0x00 +#define PIXEL_CLOCK_V6_MISC_HDMI_36BPP 0x04 +#define PIXEL_CLOCK_V6_MISC_HDMI_30BPP 0x08 +#define PIXEL_CLOCK_V6_MISC_HDMI_48BPP 0x0c +#define PIXEL_CLOCK_V6_MISC_REF_DIV_SRC 0x10 + typedef struct _GET_DISP_PLL_STATUS_INPUT_PARAMETERS_V2 { PIXEL_CLOCK_PARAMETERS_V3 sDispClkInput; @@ -1241,10 +1555,11 @@ typedef struct _ADJUST_DISPLAY_PLL_PARAMETERS typedef struct _ADJUST_DISPLAY_PLL_INPUT_PARAMETERS_V3 { USHORT usPixelClock; // target pixel clock - UCHAR ucTransmitterID; // transmitter id defined in objectid.h + UCHAR ucTransmitterID; // GPU transmitter id defined in objectid.h UCHAR ucEncodeMode; // encoder mode: CRT, LVDS, DP, TMDS or HDMI UCHAR ucDispPllConfig; // display pll configure parameter defined as following DISPPLL_CONFIG_XXXX - UCHAR ucReserved[3]; + UCHAR ucExtTransmitterID; // external encoder id. + UCHAR ucReserved[2]; }ADJUST_DISPLAY_PLL_INPUT_PARAMETERS_V3; // usDispPllConfig v1.2 for RoadRunner @@ -1358,6 +1673,7 @@ typedef struct _SET_UP_HW_I2C_DATA_PARAMETERS /**************************************************************************/ #define SPEED_FAN_CONTROL_PS_ALLOCATION WRITE_ONE_BYTE_HW_I2C_DATA_PARAMETERS + /****************************************************************************/ // Structures used by PowerConnectorDetectionTable /****************************************************************************/ @@ -1438,6 +1754,31 @@ typedef struct _ENABLE_SPREAD_SPECTRUM_ON_PPLL_V2 #define ATOM_PPLL_SS_AMOUNT_V2_NFRAC_MASK 0x0F00 #define ATOM_PPLL_SS_AMOUNT_V2_NFRAC_SHIFT 8 +// Used by DCE5.0 + typedef struct _ENABLE_SPREAD_SPECTRUM_ON_PPLL_V3 +{ + USHORT usSpreadSpectrumAmountFrac; // SS_AMOUNT_DSFRAC New in DCE5.0 + UCHAR ucSpreadSpectrumType; // Bit[0]: 0-Down Spread,1-Center Spread. + // Bit[1]: 1-Ext. 0-Int. + // Bit[3:2]: =0 P1PLL =1 P2PLL =2 DCPLL + // Bits[7:4] reserved + UCHAR ucEnable; // ATOM_ENABLE or ATOM_DISABLE + USHORT usSpreadSpectrumAmount; // Includes SS_AMOUNT_FBDIV[7:0] and SS_AMOUNT_NFRAC_SLIP[11:8] + USHORT usSpreadSpectrumStep; // SS_STEP_SIZE_DSFRAC +}ENABLE_SPREAD_SPECTRUM_ON_PPLL_V3; + +#define ATOM_PPLL_SS_TYPE_V3_DOWN_SPREAD 0x00 +#define ATOM_PPLL_SS_TYPE_V3_CENTRE_SPREAD 0x01 +#define ATOM_PPLL_SS_TYPE_V3_EXT_SPREAD 0x02 +#define ATOM_PPLL_SS_TYPE_V3_PPLL_SEL_MASK 0x0c +#define ATOM_PPLL_SS_TYPE_V3_P1PLL 0x00 +#define ATOM_PPLL_SS_TYPE_V3_P2PLL 0x04 +#define ATOM_PPLL_SS_TYPE_V3_DCPLL 0x08 +#define ATOM_PPLL_SS_AMOUNT_V3_FBDIV_MASK 0x00FF +#define ATOM_PPLL_SS_AMOUNT_V3_FBDIV_SHIFT 0 +#define ATOM_PPLL_SS_AMOUNT_V3_NFRAC_MASK 0x0F00 +#define ATOM_PPLL_SS_AMOUNT_V3_NFRAC_SHIFT 8 + #define ENABLE_SPREAD_SPECTRUM_ON_PPLL_PS_ALLOCATION ENABLE_SPREAD_SPECTRUM_ON_PPLL /**************************************************************************/ @@ -1706,7 +2047,7 @@ typedef struct _ATOM_MASTER_LIST_OF_DATA_TABLES USHORT StandardVESA_Timing; // Only used by Bios USHORT FirmwareInfo; // Shared by various SW components,latest version 1.4 USHORT DAC_Info; // Will be obsolete from R600 - USHORT LVDS_Info; // Shared by various SW components,latest version 1.1 + USHORT LCD_Info; // Shared by various SW components,latest version 1.3, was called LVDS_Info USHORT TMDS_Info; // Will be obsolete from R600 USHORT AnalogTV_Info; // Shared by various SW components,latest version 1.1 USHORT SupportedDevicesInfo; // Will be obsolete from R600 @@ -1736,12 +2077,16 @@ typedef struct _ATOM_MASTER_LIST_OF_DATA_TABLES USHORT PowerSourceInfo; // Shared by various SW components, latest versoin 1.1 }ATOM_MASTER_LIST_OF_DATA_TABLES; +// For backward compatible +#define LVDS_Info LCD_Info + typedef struct _ATOM_MASTER_DATA_TABLE { ATOM_COMMON_TABLE_HEADER sHeader; ATOM_MASTER_LIST_OF_DATA_TABLES ListOfDataTables; }ATOM_MASTER_DATA_TABLE; + /****************************************************************************/ // Structure used in MultimediaCapabilityInfoTable /****************************************************************************/ @@ -1776,6 +2121,7 @@ typedef struct _ATOM_MULTIMEDIA_CONFIG_INFO UCHAR ucVideoInput4Info;// Video Input 4 Type (1:0) F/B setting (2) physical connector ID (5:3) reserved (7:6) }ATOM_MULTIMEDIA_CONFIG_INFO; + /****************************************************************************/ // Structures used in FirmwareInfoTable /****************************************************************************/ @@ -2031,8 +2377,47 @@ typedef struct _ATOM_FIRMWARE_INFO_V2_1 UCHAR ucReserved4[3]; }ATOM_FIRMWARE_INFO_V2_1; +//the structure below to be used from NI +//ucTableFormatRevision=2 +//ucTableContentRevision=2 +typedef struct _ATOM_FIRMWARE_INFO_V2_2 +{ + ATOM_COMMON_TABLE_HEADER sHeader; + ULONG ulFirmwareRevision; + ULONG ulDefaultEngineClock; //In 10Khz unit + ULONG ulDefaultMemoryClock; //In 10Khz unit + ULONG ulReserved[2]; + ULONG ulReserved1; //Was ulMaxEngineClockPLL_Output; //In 10Khz unit* + ULONG ulReserved2; //Was ulMaxMemoryClockPLL_Output; //In 10Khz unit* + ULONG ulMaxPixelClockPLL_Output; //In 10Khz unit + ULONG ulBinaryAlteredInfo; //Was ulASICMaxEngineClock ? + ULONG ulDefaultDispEngineClkFreq; //In 10Khz unit. This is the frequency before DCDTO, corresponding to usBootUpVDDCVoltage. + UCHAR ucReserved3; //Was ucASICMaxTemperature; + UCHAR ucMinAllowedBL_Level; + USHORT usBootUpVDDCVoltage; //In MV unit + USHORT usLcdMinPixelClockPLL_Output; // In MHz unit + USHORT usLcdMaxPixelClockPLL_Output; // In MHz unit + ULONG ulReserved4; //Was ulAsicMaximumVoltage + ULONG ulMinPixelClockPLL_Output; //In 10Khz unit + ULONG ulReserved5; //Was usMinEngineClockPLL_Input and usMaxEngineClockPLL_Input + ULONG ulReserved6; //Was usMinEngineClockPLL_Output and usMinMemoryClockPLL_Input + ULONG ulReserved7; //Was usMaxMemoryClockPLL_Input and usMinMemoryClockPLL_Output + USHORT usReserved11; //Was usMaxPixelClock; //In 10Khz unit, Max. Pclk used only for DAC + USHORT usMinPixelClockPLL_Input; //In 10Khz unit + USHORT usMaxPixelClockPLL_Input; //In 10Khz unit + USHORT usBootUpVDDCIVoltage; //In unit of mv; Was usMinPixelClockPLL_Output; + ATOM_FIRMWARE_CAPABILITY_ACCESS usFirmwareCapability; + USHORT usCoreReferenceClock; //In 10Khz unit + USHORT usMemoryReferenceClock; //In 10Khz unit + USHORT usUniphyDPModeExtClkFreq; //In 10Khz unit, if it is 0, In DP Mode Uniphy Input clock from internal PPLL, otherwise Input clock from external Spread clock + UCHAR ucMemoryModule_ID; //Indicate what is the board design + UCHAR ucReserved9[3]; + USHORT usBootUpMVDDCVoltage; //In unit of mv; Was usMinPixelClockPLL_Output; + USHORT usReserved12; + ULONG ulReserved10[3]; // New added comparing to previous version +}ATOM_FIRMWARE_INFO_V2_2; -#define ATOM_FIRMWARE_INFO_LAST ATOM_FIRMWARE_INFO_V2_1 +#define ATOM_FIRMWARE_INFO_LAST ATOM_FIRMWARE_INFO_V2_2 /****************************************************************************/ // Structures used in IntegratedSystemInfoTable @@ -2212,7 +2597,7 @@ ulDockingPinCFGInfo: [15:0]-Bus/Device/Function # to CFG to read this Docking Pi ucDockingPinBit: which bit in this register to read the pin status; ucDockingPinPolarity:Polarity of the pin when docked; -ulCPUCapInfo: [7:0]=1:Griffin;[7:0]=2:Greyhound;[7:0]=3:K8, other bits reserved for now and must be 0x0 +ulCPUCapInfo: [7:0]=1:Griffin;[7:0]=2:Greyhound;[7:0]=3:K8, [7:0]=4:Pharaoh, other bits reserved for now and must be 0x0 usNumberOfCyclesInPeriod:Indicate how many cycles when PWM duty is 100%. @@ -2250,6 +2635,14 @@ usMinUpStreamHTLinkWidth: Asymmetric link width support in the future, to rep usMinDownStreamHTLinkWidth: same as above. */ +// ATOM_INTEGRATED_SYSTEM_INFO::ulCPUCapInfo - CPU type definition +#define INTEGRATED_SYSTEM_INFO__UNKNOWN_CPU 0 +#define INTEGRATED_SYSTEM_INFO__AMD_CPU__GRIFFIN 1 +#define INTEGRATED_SYSTEM_INFO__AMD_CPU__GREYHOUND 2 +#define INTEGRATED_SYSTEM_INFO__AMD_CPU__K8 3 +#define INTEGRATED_SYSTEM_INFO__AMD_CPU__PHARAOH 4 + +#define INTEGRATED_SYSTEM_INFO__AMD_CPU__MAX_CODE INTEGRATED_SYSTEM_INFO__AMD_CPU__PHARAOH // this deff reflects max defined CPU code #define SYSTEM_CONFIG_POWEREXPRESS_ENABLE 0x00000001 #define SYSTEM_CONFIG_RUN_AT_OVERDRIVE_ENGINE 0x00000002 @@ -2778,8 +3171,88 @@ typedef struct _ATOM_LVDS_INFO_V12 #define PANEL_RANDOM_DITHER 0x80 #define PANEL_RANDOM_DITHER_MASK 0x80 +#define ATOM_LVDS_INFO_LAST ATOM_LVDS_INFO_V12 // no need to change this + +/****************************************************************************/ +// Structures used by LCD_InfoTable V1.3 Note: previous version was called ATOM_LVDS_INFO_V12 +// ASIC Families: NI +// ucTableFormatRevision=1 +// ucTableContentRevision=3 +/****************************************************************************/ +typedef struct _ATOM_LCD_INFO_V13 +{ + ATOM_COMMON_TABLE_HEADER sHeader; + ATOM_DTD_FORMAT sLCDTiming; + USHORT usExtInfoTableOffset; + USHORT usSupportedRefreshRate; //Refer to panel info table in ATOMBIOS extension Spec. + ULONG ulReserved0; + UCHAR ucLCD_Misc; // Reorganized in V13 + // Bit0: {=0:single, =1:dual}, + // Bit1: {=0:LDI format for RGB888, =1 FPDI format for RGB888} // was {=0:666RGB, =1:888RGB}, + // Bit3:2: {Grey level} + // Bit6:4 Color Bit Depth definition (see below definition in EDID V1.4 @BYTE 14h) + // Bit7 Reserved. was for ATOM_PANEL_MISC_API_ENABLED, still need it? + UCHAR ucPanelDefaultRefreshRate; + UCHAR ucPanelIdentification; + UCHAR ucSS_Id; + USHORT usLCDVenderID; + USHORT usLCDProductID; + UCHAR ucLCDPanel_SpecialHandlingCap; // Reorganized in V13 + // Bit0: Once DAL sees this CAP is set, it will read EDID from LCD on its own + // Bit1: See LCDPANEL_CAP_DRR_SUPPORTED + // Bit2: a quick reference whether an embadded panel (LCD1 ) is LVDS (0) or eDP (1) + // Bit7-3: Reserved + UCHAR ucPanelInfoSize; // start from ATOM_DTD_FORMAT to end of panel info, include ExtInfoTable + USHORT usBacklightPWM; // Backlight PWM in Hz. New in _V13 + + UCHAR ucPowerSequenceDIGONtoDE_in4Ms; + UCHAR ucPowerSequenceDEtoVARY_BL_in4Ms; + UCHAR ucPowerSequenceDEtoDIGON_in4Ms; + UCHAR ucPowerSequenceVARY_BLtoDE_in4Ms; + + UCHAR ucOffDelay_in4Ms; + UCHAR ucPowerSequenceVARY_BLtoBLON_in4Ms; + UCHAR ucPowerSequenceBLONtoVARY_BL_in4Ms; + UCHAR ucReserved1; + + ULONG ulReserved[4]; +}ATOM_LCD_INFO_V13; + +#define ATOM_LCD_INFO_LAST ATOM_LCD_INFO_V13 + +//Definitions for ucLCD_Misc +#define ATOM_PANEL_MISC_V13_DUAL 0x00000001 +#define ATOM_PANEL_MISC_V13_FPDI 0x00000002 +#define ATOM_PANEL_MISC_V13_GREY_LEVEL 0x0000000C +#define ATOM_PANEL_MISC_V13_GREY_LEVEL_SHIFT 2 +#define ATOM_PANEL_MISC_V13_COLOR_BIT_DEPTH_MASK 0x70 +#define ATOM_PANEL_MISC_V13_6BIT_PER_COLOR 0x10 +#define ATOM_PANEL_MISC_V13_8BIT_PER_COLOR 0x20 + +//Color Bit Depth definition in EDID V1.4 @BYTE 14h +//Bit 6 5 4 + // 0 0 0 - Color bit depth is undefined + // 0 0 1 - 6 Bits per Primary Color + // 0 1 0 - 8 Bits per Primary Color + // 0 1 1 - 10 Bits per Primary Color + // 1 0 0 - 12 Bits per Primary Color + // 1 0 1 - 14 Bits per Primary Color + // 1 1 0 - 16 Bits per Primary Color + // 1 1 1 - Reserved + +//Definitions for ucLCDPanel_SpecialHandlingCap: + +//Once DAL sees this CAP is set, it will read EDID from LCD on its own instead of using sLCDTiming in ATOM_LVDS_INFO_V12. +//Other entries in ATOM_LVDS_INFO_V12 are still valid/useful to DAL +#define LCDPANEL_CAP_V13_READ_EDID 0x1 // = LCDPANEL_CAP_READ_EDID no change comparing to previous version -#define ATOM_LVDS_INFO_LAST ATOM_LVDS_INFO_V12 +//If a design supports DRR (dynamic refresh rate) on internal panels (LVDS or EDP), this cap is set in ucLCDPanel_SpecialHandlingCap together +//with multiple supported refresh rates@usSupportedRefreshRate. This cap should not be set when only slow refresh rate is supported (static +//refresh rate switch by SW. This is only valid from ATOM_LVDS_INFO_V12 +#define LCDPANEL_CAP_V13_DRR_SUPPORTED 0x2 // = LCDPANEL_CAP_DRR_SUPPORTED no change comparing to previous version + +//Use this cap bit for a quick reference whether an embadded panel (LCD1 ) is LVDS or eDP. +#define LCDPANEL_CAP_V13_eDP 0x4 // = LCDPANEL_CAP_eDP no change comparing to previous version typedef struct _ATOM_PATCH_RECORD_MODE { @@ -2944,9 +3417,9 @@ typedef struct _ATOM_DPCD_INFO #define MAX_DTD_MODE_IN_VRAM 6 #define ATOM_DTD_MODE_SUPPORT_TBL_SIZE (MAX_DTD_MODE_IN_VRAM*28) //28= (SIZEOF ATOM_DTD_FORMAT) #define ATOM_STD_MODE_SUPPORT_TBL_SIZE 32*8 //32 is a predefined number,8= (SIZEOF ATOM_STD_FORMAT) -#define DFP_ENCODER_TYPE_OFFSET 0x80 -#define DP_ENCODER_LANE_NUM_OFFSET 0x84 -#define DP_ENCODER_LINK_RATE_OFFSET 0x88 +//20 bytes for Encoder Type and DPCD in STD EDID area +#define DFP_ENCODER_TYPE_OFFSET (ATOM_EDID_RAW_DATASIZE + ATOM_DTD_MODE_SUPPORT_TBL_SIZE + ATOM_STD_MODE_SUPPORT_TBL_SIZE - 20) +#define ATOM_DP_DPCD_OFFSET (DFP_ENCODER_TYPE_OFFSET + 4 ) #define ATOM_HWICON1_SURFACE_ADDR 0 #define ATOM_HWICON2_SURFACE_ADDR (ATOM_HWICON1_SURFACE_ADDR + ATOM_HWICON_SURFACE_SIZE) @@ -2997,14 +3470,16 @@ typedef struct _ATOM_DPCD_INFO #define ATOM_DFP5_DTD_MODE_TBL_ADDR (ATOM_DFP5_EDID_ADDR + ATOM_EDID_RAW_DATASIZE) #define ATOM_DFP5_STD_MODE_TBL_ADDR (ATOM_DFP5_DTD_MODE_TBL_ADDR + ATOM_DTD_MODE_SUPPORT_TBL_SIZE) -#define ATOM_DP_TRAINING_TBL_ADDR (ATOM_DFP5_STD_MODE_TBL_ADDR+ATOM_STD_MODE_SUPPORT_TBL_SIZE) +#define ATOM_DP_TRAINING_TBL_ADDR (ATOM_DFP5_STD_MODE_TBL_ADDR + ATOM_STD_MODE_SUPPORT_TBL_SIZE) -#define ATOM_STACK_STORAGE_START (ATOM_DP_TRAINING_TBL_ADDR+256) -#define ATOM_STACK_STORAGE_END ATOM_STACK_STORAGE_START+512 +#define ATOM_STACK_STORAGE_START (ATOM_DP_TRAINING_TBL_ADDR + 1024) +#define ATOM_STACK_STORAGE_END ATOM_STACK_STORAGE_START + 512 //The size below is in Kb! #define ATOM_VRAM_RESERVE_SIZE ((((ATOM_STACK_STORAGE_END - ATOM_HWICON1_SURFACE_ADDR)>>10)+4)&0xFFFC) +#define ATOM_VRAM_RESERVE_V2_SIZE 32 + #define ATOM_VRAM_OPERATION_FLAGS_MASK 0xC0000000L #define ATOM_VRAM_OPERATION_FLAGS_SHIFT 30 #define ATOM_VRAM_BLOCK_NEEDS_NO_RESERVATION 0x1 @@ -3206,6 +3681,15 @@ typedef struct _ATOM_DISPLAY_OBJECT_PATH USHORT usGraphicObjIds[1]; //1st Encoder Obj source from GPU to last Graphic Obj destinate to connector. }ATOM_DISPLAY_OBJECT_PATH; +typedef struct _ATOM_DISPLAY_EXTERNAL_OBJECT_PATH +{ + USHORT usDeviceTag; //supported device + USHORT usSize; //the size of ATOM_DISPLAY_OBJECT_PATH + USHORT usConnObjectId; //Connector Object ID + USHORT usGPUObjectId; //GPU ID + USHORT usGraphicObjIds[2]; //usGraphicObjIds[0]= GPU internal encoder, usGraphicObjIds[1]= external encoder +}ATOM_DISPLAY_EXTERNAL_OBJECT_PATH; + typedef struct _ATOM_DISPLAY_OBJECT_PATH_TABLE { UCHAR ucNumOfDispPath; @@ -3261,6 +3745,47 @@ typedef struct _ATOM_SRC_DST_TABLE_FOR_ONE_OBJECT //usSrcDstTableOffset #define EXT_AUXDDC_LUTINDEX_7 7 #define MAX_NUMBER_OF_EXT_AUXDDC_LUT_ENTRIES (EXT_AUXDDC_LUTINDEX_7+1) +//ucChannelMapping are defined as following +//for DP connector, eDP, DP to VGA/LVDS +//Bit[1:0]: Define which pin connect to DP connector DP_Lane0, =0: source from GPU pin TX0, =1: from GPU pin TX1, =2: from GPU pin TX2, =3 from GPU pin TX3 +//Bit[3:2]: Define which pin connect to DP connector DP_Lane1, =0: source from GPU pin TX0, =1: from GPU pin TX1, =2: from GPU pin TX2, =3 from GPU pin TX3 +//Bit[5:4]: Define which pin connect to DP connector DP_Lane2, =0: source from GPU pin TX0, =1: from GPU pin TX1, =2: from GPU pin TX2, =3 from GPU pin TX3 +//Bit[7:6]: Define which pin connect to DP connector DP_Lane3, =0: source from GPU pin TX0, =1: from GPU pin TX1, =2: from GPU pin TX2, =3 from GPU pin TX3 +typedef struct _ATOM_DP_CONN_CHANNEL_MAPPING +{ +#if ATOM_BIG_ENDIAN + UCHAR ucDP_Lane3_Source:2; + UCHAR ucDP_Lane2_Source:2; + UCHAR ucDP_Lane1_Source:2; + UCHAR ucDP_Lane0_Source:2; +#else + UCHAR ucDP_Lane0_Source:2; + UCHAR ucDP_Lane1_Source:2; + UCHAR ucDP_Lane2_Source:2; + UCHAR ucDP_Lane3_Source:2; +#endif +}ATOM_DP_CONN_CHANNEL_MAPPING; + +//for DVI/HDMI, in dual link case, both links have to have same mapping. +//Bit[1:0]: Define which pin connect to DVI connector data Lane2, =0: source from GPU pin TX0, =1: from GPU pin TX1, =2: from GPU pin TX2, =3 from GPU pin TX3 +//Bit[3:2]: Define which pin connect to DVI connector data Lane1, =0: source from GPU pin TX0, =1: from GPU pin TX1, =2: from GPU pin TX2, =3 from GPU pin TX3 +//Bit[5:4]: Define which pin connect to DVI connector data Lane0, =0: source from GPU pin TX0, =1: from GPU pin TX1, =2: from GPU pin TX2, =3 from GPU pin TX3 +//Bit[7:6]: Define which pin connect to DVI connector clock lane, =0: source from GPU pin TX0, =1: from GPU pin TX1, =2: from GPU pin TX2, =3 from GPU pin TX3 +typedef struct _ATOM_DVI_CONN_CHANNEL_MAPPING +{ +#if ATOM_BIG_ENDIAN + UCHAR ucDVI_CLK_Source:2; + UCHAR ucDVI_DATA0_Source:2; + UCHAR ucDVI_DATA1_Source:2; + UCHAR ucDVI_DATA2_Source:2; +#else + UCHAR ucDVI_DATA2_Source:2; + UCHAR ucDVI_DATA1_Source:2; + UCHAR ucDVI_DATA0_Source:2; + UCHAR ucDVI_CLK_Source:2; +#endif +}ATOM_DVI_CONN_CHANNEL_MAPPING; + typedef struct _EXT_DISPLAY_PATH { USHORT usDeviceTag; //A bit vector to show what devices are supported @@ -3269,7 +3794,13 @@ typedef struct _EXT_DISPLAY_PATH UCHAR ucExtAUXDDCLutIndex; //An index into external AUX/DDC channel LUT UCHAR ucExtHPDPINLutIndex; //An index into external HPD pin LUT USHORT usExtEncoderObjId; //external encoder object id - USHORT usReserved[3]; + union{ + UCHAR ucChannelMapping; // if ucChannelMapping=0, using default one to one mapping + ATOM_DP_CONN_CHANNEL_MAPPING asDPMapping; + ATOM_DVI_CONN_CHANNEL_MAPPING asDVIMapping; + }; + UCHAR ucReserved; + USHORT usReserved[2]; }EXT_DISPLAY_PATH; #define NUMBER_OF_UCHAR_FOR_GUID 16 @@ -3281,7 +3812,8 @@ typedef struct _ATOM_EXTERNAL_DISPLAY_CONNECTION_INFO UCHAR ucGuid [NUMBER_OF_UCHAR_FOR_GUID]; // a GUID is a 16 byte long string EXT_DISPLAY_PATH sPath[MAX_NUMBER_OF_EXT_DISPLAY_PATH]; // total of fixed 7 entries. UCHAR ucChecksum; // a simple Checksum of the sum of whole structure equal to 0x0. - UCHAR Reserved [7]; // for potential expansion + UCHAR uc3DStereoPinId; // use for eDP panel + UCHAR Reserved [6]; // for potential expansion }ATOM_EXTERNAL_DISPLAY_CONNECTION_INFO; //Related definitions, all records are differnt but they have a commond header @@ -3311,10 +3843,11 @@ typedef struct _ATOM_COMMON_RECORD_HEADER #define ATOM_CONNECTOR_AUXDDC_LUT_RECORD_TYPE 17 //This is for the case when connectors are not known to object table #define ATOM_OBJECT_LINK_RECORD_TYPE 18 //Once this record is present under one object, it indicats the oobject is linked to another obj described by the record #define ATOM_CONNECTOR_REMOTE_CAP_RECORD_TYPE 19 +#define ATOM_ENCODER_CAP_RECORD_TYPE 20 //Must be updated when new record type is added,equal to that record definition! -#define ATOM_MAX_OBJECT_RECORD_NUMBER ATOM_CONNECTOR_REMOTE_CAP_RECORD_TYPE +#define ATOM_MAX_OBJECT_RECORD_NUMBER ATOM_ENCODER_CAP_RECORD_TYPE typedef struct _ATOM_I2C_RECORD { @@ -3441,6 +3974,26 @@ typedef struct _ATOM_ENCODER_DVO_CF_RECORD UCHAR ucPadding[2]; }ATOM_ENCODER_DVO_CF_RECORD; +// Bit maps for ATOM_ENCODER_CAP_RECORD.ucEncoderCap +#define ATOM_ENCODER_CAP_RECORD_HBR2 0x01 // DP1.2 HBR2 is supported by this path + +typedef struct _ATOM_ENCODER_CAP_RECORD +{ + ATOM_COMMON_RECORD_HEADER sheader; + union { + USHORT usEncoderCap; + struct { +#if ATOM_BIG_ENDIAN + USHORT usReserved:15; // Bit1-15 may be defined for other capability in future + USHORT usHBR2Cap:1; // Bit0 is for DP1.2 HBR2 capability. +#else + USHORT usHBR2Cap:1; // Bit0 is for DP1.2 HBR2 capability. + USHORT usReserved:15; // Bit1-15 may be defined for other capability in future +#endif + }; + }; +}ATOM_ENCODER_CAP_RECORD; + // value for ATOM_CONNECTOR_CF_RECORD.ucConnectedDvoBundle #define ATOM_CONNECTOR_CF_RECORD_CONNECTED_UPPER12BITBUNDLEA 1 #define ATOM_CONNECTOR_CF_RECORD_CONNECTED_LOWER12BITBUNDLEB 2 @@ -3580,6 +4133,11 @@ typedef struct _ATOM_VOLTAGE_CONTROL #define VOLTAGE_CONTROL_ID_DAC 0x02 //I2C control, used for R5xx/R6xx MVDDC,MVDDQ or VDDCI #define VOLTAGE_CONTROL_ID_VT116xM 0x03 //I2C control, used for R6xx Core Voltage #define VOLTAGE_CONTROL_ID_DS4402 0x04 +#define VOLTAGE_CONTROL_ID_UP6266 0x05 +#define VOLTAGE_CONTROL_ID_SCORPIO 0x06 +#define VOLTAGE_CONTROL_ID_VT1556M 0x07 +#define VOLTAGE_CONTROL_ID_CHL822x 0x08 +#define VOLTAGE_CONTROL_ID_VT1586M 0x09 typedef struct _ATOM_VOLTAGE_OBJECT { @@ -3670,66 +4228,157 @@ typedef struct _ATOM_POWER_SOURCE_INFO #define POWER_SENSOR_GPIO 0x01 #define POWER_SENSOR_I2C 0x02 +typedef struct _ATOM_CLK_VOLT_CAPABILITY +{ + ULONG ulVoltageIndex; // The Voltage Index indicated by FUSE, same voltage index shared with SCLK DPM fuse table + ULONG ulMaximumSupportedCLK; // Maximum clock supported with specified voltage index, unit in 10kHz +}ATOM_CLK_VOLT_CAPABILITY; + +typedef struct _ATOM_AVAILABLE_SCLK_LIST +{ + ULONG ulSupportedSCLK; // Maximum clock supported with specified voltage index, unit in 10kHz + USHORT usVoltageIndex; // The Voltage Index indicated by FUSE for specified SCLK + USHORT usVoltageID; // The Voltage ID indicated by FUSE for specified SCLK +}ATOM_AVAILABLE_SCLK_LIST; + +// ATOM_INTEGRATED_SYSTEM_INFO_V6 ulSystemConfig cap definition +#define ATOM_IGP_INFO_V6_SYSTEM_CONFIG__PCIE_POWER_GATING_ENABLE 1 // refer to ulSystemConfig bit[0] + +// this IntegrateSystemInfoTable is used for Liano/Ontario APU typedef struct _ATOM_INTEGRATED_SYSTEM_INFO_V6 { ATOM_COMMON_TABLE_HEADER sHeader; ULONG ulBootUpEngineClock; ULONG ulDentistVCOFreq; ULONG ulBootUpUMAClock; - ULONG ulReserved1[8]; + ATOM_CLK_VOLT_CAPABILITY sDISPCLK_Voltage[4]; ULONG ulBootUpReqDisplayVector; ULONG ulOtherDisplayMisc; ULONG ulGPUCapInfo; - ULONG ulReserved2[3]; + ULONG ulSB_MMIO_Base_Addr; + USHORT usRequestedPWMFreqInHz; + UCHAR ucHtcTmpLmt; + UCHAR ucHtcHystLmt; + ULONG ulMinEngineClock; ULONG ulSystemConfig; ULONG ulCPUCapInfo; - USHORT usMaxNBVoltage; - USHORT usMinNBVoltage; - USHORT usBootUpNBVoltage; - USHORT usExtDispConnInfoOffset; - UCHAR ucHtcTmpLmt; - UCHAR ucTjOffset; + USHORT usNBP0Voltage; + USHORT usNBP1Voltage; + USHORT usBootUpNBVoltage; + USHORT usExtDispConnInfoOffset; + USHORT usPanelRefreshRateRange; UCHAR ucMemoryType; UCHAR ucUMAChannelNumber; ULONG ulCSR_M3_ARB_CNTL_DEFAULT[10]; ULONG ulCSR_M3_ARB_CNTL_UVD[10]; ULONG ulCSR_M3_ARB_CNTL_FS3D[10]; - ULONG ulReserved3[42]; + ATOM_AVAILABLE_SCLK_LIST sAvail_SCLK[5]; + ULONG ulGMCRestoreResetTime; + ULONG ulMinimumNClk; + ULONG ulIdleNClk; + ULONG ulDDR_DLL_PowerUpTime; + ULONG ulDDR_PLL_PowerUpTime; + USHORT usPCIEClkSSPercentage; + USHORT usPCIEClkSSType; + USHORT usLvdsSSPercentage; + USHORT usLvdsSSpreadRateIn10Hz; + USHORT usHDMISSPercentage; + USHORT usHDMISSpreadRateIn10Hz; + USHORT usDVISSPercentage; + USHORT usDVISSpreadRateIn10Hz; + ULONG ulReserved3[21]; ATOM_EXTERNAL_DISPLAY_CONNECTION_INFO sExtDispConnInfo; }ATOM_INTEGRATED_SYSTEM_INFO_V6; +// ulGPUCapInfo +#define INTEGRATED_SYSTEM_INFO_V6_GPUCAPINFO__TMDSHDMI_COHERENT_SINGLEPLL_MODE 0x01 +#define INTEGRATED_SYSTEM_INFO_V6_GPUCAPINFO__DISABLE_AUX_HW_MODE_DETECTION 0x08 + +// ulOtherDisplayMisc +#define INTEGRATED_SYSTEM_INFO__GET_EDID_CALLBACK_FUNC_SUPPORT 0x01 + + /********************************************************************************************************************** -// ATOM_INTEGRATED_SYSTEM_INFO_V6 Description -//ulBootUpEngineClock: VBIOS bootup Engine clock frequency, in 10kHz unit. -//ulDentistVCOFreq: Dentist VCO clock in 10kHz unit. -//ulBootUpUMAClock: System memory boot up clock frequency in 10Khz unit. -//ulReserved1[8] Reserved by now, must be 0x0. -//ulBootUpReqDisplayVector VBIOS boot up display IDs -// ATOM_DEVICE_CRT1_SUPPORT 0x0001 -// ATOM_DEVICE_CRT2_SUPPORT 0x0010 -// ATOM_DEVICE_DFP1_SUPPORT 0x0008 -// ATOM_DEVICE_DFP6_SUPPORT 0x0040 -// ATOM_DEVICE_DFP2_SUPPORT 0x0080 -// ATOM_DEVICE_DFP3_SUPPORT 0x0200 -// ATOM_DEVICE_DFP4_SUPPORT 0x0400 -// ATOM_DEVICE_DFP5_SUPPORT 0x0800 -// ATOM_DEVICE_LCD1_SUPPORT 0x0002 -//ulOtherDisplayMisc Other display related flags, not defined yet. -//ulGPUCapInfo TBD -//ulReserved2[3] must be 0x0 for the reserved. -//ulSystemConfig TBD -//ulCPUCapInfo TBD -//usMaxNBVoltage High NB voltage in unit of mv, calculated using current VDDNB (D24F2xDC) and VDDNB offset fuse. -//usMinNBVoltage Low NB voltage in unit of mv, calculated using current VDDNB (D24F2xDC) and VDDNB offset fuse. -//usBootUpNBVoltage Boot up NB voltage in unit of mv. -//ucHtcTmpLmt Bit [22:16] of D24F3x64 Thermal Control (HTC) Register. -//ucTjOffset Bit [28:22] of D24F3xE4 Thermtrip Status Register,may not be needed. -//ucMemoryType [3:0]=1:DDR1;=2:DDR2;=3:DDR3.[7:4] is reserved. -//ucUMAChannelNumber System memory channel numbers. -//usExtDispConnectionInfoOffset ATOM_EXTERNAL_DISPLAY_CONNECTION_INFO offset relative to beginning of this table. -//ulCSR_M3_ARB_CNTL_DEFAULT[10] Arrays with values for CSR M3 arbiter for default -//ulCSR_M3_ARB_CNTL_UVD[10] Arrays with values for CSR M3 arbiter for UVD playback. -//ulCSR_M3_ARB_CNTL_FS3D[10] Arrays with values for CSR M3 arbiter for Full Screen 3D applications. + ATOM_INTEGRATED_SYSTEM_INFO_V6 Description +ulBootUpEngineClock: VBIOS bootup Engine clock frequency, in 10kHz unit. if it is equal 0, then VBIOS use pre-defined bootup engine clock +ulDentistVCOFreq: Dentist VCO clock in 10kHz unit. +ulBootUpUMAClock: System memory boot up clock frequency in 10Khz unit. +sDISPCLK_Voltage: Report Display clock voltage requirement. + +ulBootUpReqDisplayVector: VBIOS boot up display IDs, following are supported devices in Liano/Ontaio projects: + ATOM_DEVICE_CRT1_SUPPORT 0x0001 + ATOM_DEVICE_CRT2_SUPPORT 0x0010 + ATOM_DEVICE_DFP1_SUPPORT 0x0008 + ATOM_DEVICE_DFP6_SUPPORT 0x0040 + ATOM_DEVICE_DFP2_SUPPORT 0x0080 + ATOM_DEVICE_DFP3_SUPPORT 0x0200 + ATOM_DEVICE_DFP4_SUPPORT 0x0400 + ATOM_DEVICE_DFP5_SUPPORT 0x0800 + ATOM_DEVICE_LCD1_SUPPORT 0x0002 +ulOtherDisplayMisc: Other display related flags, not defined yet. +ulGPUCapInfo: bit[0]=0: TMDS/HDMI Coherent Mode use cascade PLL mode. + =1: TMDS/HDMI Coherent Mode use signel PLL mode. + bit[3]=0: Enable HW AUX mode detection logic + =1: Disable HW AUX mode dettion logic +ulSB_MMIO_Base_Addr: Physical Base address to SB MMIO space. Driver needs to initialize it for SMU usage. + +usRequestedPWMFreqInHz: When it's set to 0x0 by SBIOS: the LCD BackLight is not controlled by GPU(SW). + Any attempt to change BL using VBIOS function or enable VariBri from PP table is not effective since ATOM_BIOS_INFO_BL_CONTROLLED_BY_GPU==0; + + When it's set to a non-zero frequency, the BackLight is controlled by GPU (SW) in one of two ways below: + 1. SW uses the GPU BL PWM output to control the BL, in chis case, this non-zero frequency determines what freq GPU should use; + VBIOS will set up proper PWM frequency and ATOM_BIOS_INFO_BL_CONTROLLED_BY_GPU==1,as the result, + Changing BL using VBIOS function is functional in both driver and non-driver present environment; + and enabling VariBri under the driver environment from PP table is optional. + + 2. SW uses other means to control BL (like DPCD),this non-zero frequency serves as a flag only indicating + that BL control from GPU is expected. + VBIOS will NOT set up PWM frequency but make ATOM_BIOS_INFO_BL_CONTROLLED_BY_GPU==1 + Changing BL using VBIOS function could be functional in both driver and non-driver present environment,but + it's per platform + and enabling VariBri under the driver environment from PP table is optional. + +ucHtcTmpLmt: Refer to D18F3x64 bit[22:16], HtcTmpLmt. + Threshold on value to enter HTC_active state. +ucHtcHystLmt: Refer to D18F3x64 bit[27:24], HtcHystLmt. + To calculate threshold off value to exit HTC_active state, which is Threshold on vlaue minus ucHtcHystLmt. +ulMinEngineClock: Minimum SCLK allowed in 10kHz unit. This is calculated based on WRCK Fuse settings. +ulSystemConfig: Bit[0]=0: PCIE Power Gating Disabled + =1: PCIE Power Gating Enabled + Bit[1]=0: DDR-DLL shut-down feature disabled. + 1: DDR-DLL shut-down feature enabled. + Bit[2]=0: DDR-PLL Power down feature disabled. + 1: DDR-PLL Power down feature enabled. +ulCPUCapInfo: TBD +usNBP0Voltage: VID for voltage on NB P0 State +usNBP1Voltage: VID for voltage on NB P1 State +usBootUpNBVoltage: Voltage Index of GNB voltage configured by SBIOS, which is suffcient to support VBIOS DISPCLK requirement. +usExtDispConnInfoOffset: Offset to sExtDispConnInfo inside the structure +usPanelRefreshRateRange: Bit vector for LCD supported refresh rate range. If DRR is requestd by the platform, at least two bits need to be set + to indicate a range. + SUPPORTED_LCD_REFRESHRATE_30Hz 0x0004 + SUPPORTED_LCD_REFRESHRATE_40Hz 0x0008 + SUPPORTED_LCD_REFRESHRATE_50Hz 0x0010 + SUPPORTED_LCD_REFRESHRATE_60Hz 0x0020 +ucMemoryType: [3:0]=1:DDR1;=2:DDR2;=3:DDR3.[7:4] is reserved. +ucUMAChannelNumber: System memory channel numbers. +ulCSR_M3_ARB_CNTL_DEFAULT[10]: Arrays with values for CSR M3 arbiter for default +ulCSR_M3_ARB_CNTL_UVD[10]: Arrays with values for CSR M3 arbiter for UVD playback. +ulCSR_M3_ARB_CNTL_FS3D[10]: Arrays with values for CSR M3 arbiter for Full Screen 3D applications. +sAvail_SCLK[5]: Arrays to provide availabe list of SLCK and corresponding voltage, order from low to high +ulGMCRestoreResetTime: GMC power restore and GMC reset time to calculate data reconnection latency. Unit in ns. +ulMinimumNClk: Minimum NCLK speed among all NB-Pstates to calcualte data reconnection latency. Unit in 10kHz. +ulIdleNClk: NCLK speed while memory runs in self-refresh state. Unit in 10kHz. +ulDDR_DLL_PowerUpTime: DDR PHY DLL power up time. Unit in ns. +ulDDR_PLL_PowerUpTime: DDR PHY PLL power up time. Unit in ns. +usPCIEClkSSPercentage: PCIE Clock Spred Spectrum Percentage in unit 0.01%; 100 mean 1%. +usPCIEClkSSType: PCIE Clock Spred Spectrum Type. 0 for Down spread(default); 1 for Center spread. +usLvdsSSPercentage: LVDS panel ( not include eDP ) Spread Spectrum Percentage in unit of 0.01%, =0, use VBIOS default setting. +usLvdsSSpreadRateIn10Hz: LVDS panel ( not include eDP ) Spread Spectrum frequency in unit of 10Hz, =0, use VBIOS default setting. +usHDMISSPercentage: HDMI Spread Spectrum Percentage in unit 0.01%; 100 mean 1%, =0, use VBIOS default setting. +usHDMISSpreadRateIn10Hz: HDMI Spread Spectrum frequency in unit of 10Hz, =0, use VBIOS default setting. +usDVISSPercentage: DVI Spread Spectrum Percentage in unit 0.01%; 100 mean 1%, =0, use VBIOS default setting. +usDVISSpreadRateIn10Hz: DVI Spread Spectrum frequency in unit of 10Hz, =0, use VBIOS default setting. **********************************************************************************************************************/ /**************************************************************************/ @@ -3790,6 +4439,7 @@ typedef struct _ATOM_ASIC_SS_ASSIGNMENT #define ASIC_INTERNAL_SS_ON_LVDS 6 #define ASIC_INTERNAL_SS_ON_DP 7 #define ASIC_INTERNAL_SS_ON_DCPLL 8 +#define ASIC_EXTERNAL_SS_ON_DP_CLOCK 9 typedef struct _ATOM_ASIC_SS_ASSIGNMENT_V2 { @@ -3903,6 +4553,7 @@ typedef struct _ATOM_ASIC_INTERNAL_SS_INFO_V3 #define ATOM_S0_SYSTEM_POWER_STATE_VALUE_AC 1 #define ATOM_S0_SYSTEM_POWER_STATE_VALUE_DC 2 #define ATOM_S0_SYSTEM_POWER_STATE_VALUE_LITEAC 3 +#define ATOM_S0_SYSTEM_POWER_STATE_VALUE_LIT2AC 4 //Byte aligned defintion for BIOS usage #define ATOM_S0_CRT1_MONOb0 0x01 @@ -4529,7 +5180,8 @@ typedef struct _ATOM_INIT_REG_BLOCK{ #define INDEX_ACCESS_RANGE_BEGIN (VALUE_DWORD + 1) #define INDEX_ACCESS_RANGE_END (INDEX_ACCESS_RANGE_BEGIN + 1) #define VALUE_INDEX_ACCESS_SINGLE (INDEX_ACCESS_RANGE_END + 1) - +//#define ACCESS_MCIODEBUGIND 0x40 //defined in BIOS code +#define ACCESS_PLACEHOLDER 0x80 typedef struct _ATOM_MC_INIT_PARAM_TABLE { @@ -4554,6 +5206,10 @@ typedef struct _ATOM_MC_INIT_PARAM_TABLE #define _32Mx32 0x33 #define _64Mx8 0x41 #define _64Mx16 0x42 +#define _64Mx32 0x43 +#define _128Mx8 0x51 +#define _128Mx16 0x52 +#define _256Mx8 0x61 #define SAMSUNG 0x1 #define INFINEON 0x2 @@ -4569,10 +5225,11 @@ typedef struct _ATOM_MC_INIT_PARAM_TABLE #define QIMONDA INFINEON #define PROMOS MOSEL #define KRETON INFINEON +#define ELIXIR NANYA /////////////Support for GDDR5 MC uCode to reside in upper 64K of ROM///////////// -#define UCODE_ROM_START_ADDRESS 0x1c000 +#define UCODE_ROM_START_ADDRESS 0x1b800 #define UCODE_SIGNATURE 0x4375434d // 'MCuC' - MC uCode //uCode block header for reference @@ -4903,7 +5560,34 @@ typedef struct _ATOM_VRAM_MODULE_V6 ATOM_MEMORY_TIMING_FORMAT_V2 asMemTiming[5];//Memory Timing block sort from lower clock to higher clock }ATOM_VRAM_MODULE_V6; - +typedef struct _ATOM_VRAM_MODULE_V7 +{ +// Design Specific Values + ULONG ulChannelMapCfg; // mmMC_SHARED_CHREMAP + USHORT usModuleSize; // Size of ATOM_VRAM_MODULE_V7 + USHORT usPrivateReserved; // MC_ARB_RAMCFG (includes NOOFBANK,NOOFRANKS,NOOFROWS,NOOFCOLS) + USHORT usReserved; + UCHAR ucExtMemoryID; // Current memory module ID + UCHAR ucMemoryType; // MEM_TYPE_DDR2/DDR3/GDDR3/GDDR5 + UCHAR ucChannelNum; // Number of mem. channels supported in this module + UCHAR ucChannelWidth; // CHANNEL_16BIT/CHANNEL_32BIT/CHANNEL_64BIT + UCHAR ucDensity; // _8Mx32, _16Mx32, _16Mx16, _32Mx16 + UCHAR ucReserve; // Former container for Mx_FLAGS like DBI_AC_MODE_ENABLE_ASIC for GDDR4. Not used now. + UCHAR ucMisc; // RANK_OF_THISMEMORY etc. + UCHAR ucVREFI; // Not used. + UCHAR ucNPL_RT; // Round trip delay (MC_SEQ_CAS_TIMING [28:24]:TCL=CL+NPL_RT-2). Always 2. + UCHAR ucPreamble; // [7:4] Write Preamble, [3:0] Read Preamble + UCHAR ucMemorySize; // Total memory size in unit of 16MB for CONFIG_MEMSIZE - bit[23:0] zeros + UCHAR ucReserved[3]; +// Memory Module specific values + USHORT usEMRS2Value; // EMRS2/MR2 Value. + USHORT usEMRS3Value; // EMRS3/MR3 Value. + UCHAR ucMemoryVenderID; // [7:4] Revision, [3:0] Vendor code + UCHAR ucRefreshRateFactor; // [1:0]=RefreshFactor (00=8ms, 01=16ms, 10=32ms,11=64ms) + UCHAR ucFIFODepth; // FIFO depth can be detected during vendor detection, here is hardcoded per memory + UCHAR ucCDR_Bandwidth; // [0:3]=Read CDR bandwidth, [4:7] - Write CDR Bandwidth + char strMemPNString[20]; // part number end with '0'. +}ATOM_VRAM_MODULE_V7; typedef struct _ATOM_VRAM_INFO_V2 { @@ -4942,6 +5626,20 @@ typedef struct _ATOM_VRAM_INFO_V4 // ATOM_INIT_REG_BLOCK aMemAdjust; }ATOM_VRAM_INFO_V4; +typedef struct _ATOM_VRAM_INFO_HEADER_V2_1 +{ + ATOM_COMMON_TABLE_HEADER sHeader; + USHORT usMemAdjustTblOffset; // offset of ATOM_INIT_REG_BLOCK structure for memory vendor specific MC adjust setting + USHORT usMemClkPatchTblOffset; // offset of ATOM_INIT_REG_BLOCK structure for memory clock specific MC setting + USHORT usReserved[4]; + UCHAR ucNumOfVRAMModule; // indicate number of VRAM module + UCHAR ucMemoryClkPatchTblVer; // version of memory AC timing register list + UCHAR ucVramModuleVer; // indicate ATOM_VRAM_MODUE version + UCHAR ucReserved; + ATOM_VRAM_MODULE_V7 aVramInfo[ATOM_MAX_NUMBER_OF_VRAM_MODULE]; // just for allocation, real number of blocks is in ucNumOfVRAMModule; +}ATOM_VRAM_INFO_HEADER_V2_1; + + typedef struct _ATOM_VRAM_GPIO_DETECTION_INFO { ATOM_COMMON_TABLE_HEADER sHeader; @@ -5182,6 +5880,16 @@ typedef struct _ASIC_TRANSMITTER_INFO UCHAR ucReserved; }ASIC_TRANSMITTER_INFO; +#define ASIC_TRANSMITTER_INFO_CONFIG__DVO_SDR_MODE 0x01 +#define ASIC_TRANSMITTER_INFO_CONFIG__COHERENT_MODE 0x02 +#define ASIC_TRANSMITTER_INFO_CONFIG__ENCODEROBJ_ID_MASK 0xc4 +#define ASIC_TRANSMITTER_INFO_CONFIG__ENCODER_A 0x00 +#define ASIC_TRANSMITTER_INFO_CONFIG__ENCODER_B 0x04 +#define ASIC_TRANSMITTER_INFO_CONFIG__ENCODER_C 0x40 +#define ASIC_TRANSMITTER_INFO_CONFIG__ENCODER_D 0x44 +#define ASIC_TRANSMITTER_INFO_CONFIG__ENCODER_E 0x80 +#define ASIC_TRANSMITTER_INFO_CONFIG__ENCODER_F 0x84 + typedef struct _ASIC_ENCODER_INFO { UCHAR ucEncoderID; @@ -5284,6 +5992,28 @@ typedef struct _DP_ENCODER_SERVICE_PARAMETERS /* /obselete */ #define DP_ENCODER_SERVICE_PS_ALLOCATION WRITE_ONE_BYTE_HW_I2C_DATA_PARAMETERS + +typedef struct _DP_ENCODER_SERVICE_PARAMETERS_V2 +{ + USHORT usExtEncoderObjId; // External Encoder Object Id, output parameter only, use when ucAction = DP_SERVICE_V2_ACTION_DET_EXT_CONNECTION + UCHAR ucAuxId; + UCHAR ucAction; + UCHAR ucSinkType; // Iput and Output parameters. + UCHAR ucHPDId; // Input parameter, used when ucAction = DP_SERVICE_V2_ACTION_DET_EXT_CONNECTION + UCHAR ucReserved[2]; +}DP_ENCODER_SERVICE_PARAMETERS_V2; + +typedef struct _DP_ENCODER_SERVICE_PS_ALLOCATION_V2 +{ + DP_ENCODER_SERVICE_PARAMETERS_V2 asDPServiceParam; + PROCESS_AUX_CHANNEL_TRANSACTION_PARAMETERS_V2 asAuxParam; +}DP_ENCODER_SERVICE_PS_ALLOCATION_V2; + +// ucAction +#define DP_SERVICE_V2_ACTION_GET_SINK_TYPE 0x01 +#define DP_SERVICE_V2_ACTION_DET_LCD_CONNECTION 0x02 + + // DP_TRAINING_TABLE #define DPCD_SET_LINKRATE_LANENUM_PATTERN1_TBL_ADDR ATOM_DP_TRAINING_TBL_ADDR #define DPCD_SET_SS_CNTL_TBL_ADDR (ATOM_DP_TRAINING_TBL_ADDR + 8 ) @@ -5339,6 +6069,7 @@ typedef struct _SET_HWBLOCK_INSTANCE_PARAMETER_V2 #define SELECT_DCIO_IMPCAL 4 #define SELECT_DCIO_DIG 6 #define SELECT_CRTC_PIXEL_RATE 7 +#define SELECT_VGA_BLK 8 /****************************************************************************/ //Portion VI: Definitinos for vbios MC scratch registers that driver used -- cgit v1.2.3 From 603a9da37b83c2b4cc4495ecab7916f45cf5837e Mon Sep 17 00:00:00 2001 From: Alex Deucher Date: Mon, 22 Nov 2010 17:56:21 -0500 Subject: drm/radeon/kms: upstream power table updates Signed-off-by: Alex Deucher Signed-off-by: Dave Airlie --- drivers/gpu/drm/radeon/atombios.h | 144 ++++++++++++++++++++++++++++++++++++-- 1 file changed, 140 insertions(+), 4 deletions(-) diff --git a/drivers/gpu/drm/radeon/atombios.h b/drivers/gpu/drm/radeon/atombios.h index 6a9baa2443f8..58a0cd02c0a2 100644 --- a/drivers/gpu/drm/radeon/atombios.h +++ b/drivers/gpu/drm/radeon/atombios.h @@ -6475,7 +6475,17 @@ typedef struct _ATOM_PPLIB_THERMALCONTROLLER #define ATOM_PP_THERMALCONTROLLER_ADT7473 9 #define ATOM_PP_THERMALCONTROLLER_EXTERNAL_GPIO 11 #define ATOM_PP_THERMALCONTROLLER_EVERGREEN 12 +#define ATOM_PP_THERMALCONTROLLER_EMC2103 13 /* 0x0D */ // Only fan control will be implemented, do NOT show this in PPGen. +#define ATOM_PP_THERMALCONTROLLER_SUMO 14 /* 0x0E */ // Sumo type, used internally +#define ATOM_PP_THERMALCONTROLLER_NISLANDS 15 + +// Thermal controller 'combo type' to use an external controller for Fan control and an internal controller for thermal. +// We probably should reserve the bit 0x80 for this use. +// To keep the number of these types low we should also use the same code for all ASICs (i.e. do not distinguish RV6xx and RV7xx Internal here). +// The driver can pick the correct internal controller based on the ASIC. + #define ATOM_PP_THERMALCONTROLLER_ADT7473_WITH_INTERNAL 0x89 // ADT7473 Fan Control + Internal Thermal Controller +#define ATOM_PP_THERMALCONTROLLER_EMC2103_WITH_INTERNAL 0x8D // EMC2103 Fan Control + Internal Thermal Controller typedef struct _ATOM_PPLIB_STATE { @@ -6572,6 +6582,29 @@ typedef struct _ATOM_PPLIB_POWERPLAYTABLE3 USHORT usExtendendedHeaderOffset; } ATOM_PPLIB_POWERPLAYTABLE3, *LPATOM_PPLIB_POWERPLAYTABLE3; +typedef struct _ATOM_PPLIB_POWERPLAYTABLE4 +{ + ATOM_PPLIB_POWERPLAYTABLE3 basicTable3; + ULONG ulGoldenPPID; // PPGen use only + ULONG ulGoldenRevision; // PPGen use only + USHORT usVddcDependencyOnSCLKOffset; + USHORT usVddciDependencyOnMCLKOffset; + USHORT usVddcDependencyOnMCLKOffset; + USHORT usMaxClockVoltageOnDCOffset; + USHORT usReserved[2]; +} ATOM_PPLIB_POWERPLAYTABLE4, *LPATOM_PPLIB_POWERPLAYTABLE4; + +typedef struct _ATOM_PPLIB_POWERPLAYTABLE5 +{ + ATOM_PPLIB_POWERPLAYTABLE4 basicTable4; + ULONG ulTDPLimit; + ULONG ulNearTDPLimit; + ULONG ulSQRampingThreshold; + USHORT usCACLeakageTableOffset; // Points to ATOM_PPLIB_CAC_Leakage_Table + ULONG ulCACLeakage; // TBD, this parameter is still under discussion. Change to ulReserved if not needed. + ULONG ulReserved; +} ATOM_PPLIB_POWERPLAYTABLE5, *LPATOM_PPLIB_POWERPLAYTABLE5; + //// ATOM_PPLIB_NONCLOCK_INFO::usClassification #define ATOM_PPLIB_CLASSIFICATION_UI_MASK 0x0007 #define ATOM_PPLIB_CLASSIFICATION_UI_SHIFT 0 @@ -6595,6 +6628,10 @@ typedef struct _ATOM_PPLIB_POWERPLAYTABLE3 #define ATOM_PPLIB_CLASSIFICATION_HDSTATE 0x4000 #define ATOM_PPLIB_CLASSIFICATION_SDSTATE 0x8000 +//// ATOM_PPLIB_NONCLOCK_INFO::usClassification2 +#define ATOM_PPLIB_CLASSIFICATION2_LIMITEDPOWERSOURCE_2 0x0001 +#define ATOM_PPLIB_CLASSIFICATION2_ULV 0x0002 + //// ATOM_PPLIB_NONCLOCK_INFO::ulCapsAndSettings #define ATOM_PPLIB_SINGLE_DISPLAY_ONLY 0x00000001 #define ATOM_PPLIB_SUPPORTS_VIDEO_PLAYBACK 0x00000002 @@ -6627,9 +6664,21 @@ typedef struct _ATOM_PPLIB_POWERPLAYTABLE3 #define ATOM_PPLIB_M3ARB_MASK 0x00060000 #define ATOM_PPLIB_M3ARB_SHIFT 17 +#define ATOM_PPLIB_ENABLE_DRR 0x00080000 + +// remaining 16 bits are reserved +typedef struct _ATOM_PPLIB_THERMAL_STATE +{ + UCHAR ucMinTemperature; + UCHAR ucMaxTemperature; + UCHAR ucThermalAction; +}ATOM_PPLIB_THERMAL_STATE, *LPATOM_PPLIB_THERMAL_STATE; + // Contained in an array starting at the offset // in ATOM_PPLIB_POWERPLAYTABLE::usNonClockInfoArrayOffset. // referenced from ATOM_PPLIB_STATE_INFO::ucNonClockStateIndex +#define ATOM_PPLIB_NONCLOCKINFO_VER1 12 +#define ATOM_PPLIB_NONCLOCKINFO_VER2 24 typedef struct _ATOM_PPLIB_NONCLOCK_INFO { USHORT usClassification; @@ -6637,15 +6686,15 @@ typedef struct _ATOM_PPLIB_NONCLOCK_INFO UCHAR ucMaxTemperature; ULONG ulCapsAndSettings; UCHAR ucRequiredPower; - UCHAR ucUnused1[3]; + USHORT usClassification2; + ULONG ulVCLK; + ULONG ulDCLK; + UCHAR ucUnused[5]; } ATOM_PPLIB_NONCLOCK_INFO; // Contained in an array starting at the offset // in ATOM_PPLIB_POWERPLAYTABLE::usClockInfoArrayOffset. // referenced from ATOM_PPLIB_STATE::ucClockStateIndices -#define ATOM_PPLIB_NONCLOCKINFO_VER1 12 -#define ATOM_PPLIB_NONCLOCKINFO_VER2 24 - typedef struct _ATOM_PPLIB_R600_CLOCK_INFO { USHORT usEngineClockLow; @@ -6716,6 +6765,93 @@ typedef struct _ATOM_PPLIB_RS780_CLOCK_INFO #define ATOM_PPLIB_RS780_HTLINKFREQ_LOW 1 #define ATOM_PPLIB_RS780_HTLINKFREQ_HIGH 2 +typedef struct _ATOM_PPLIB_SUMO_CLOCK_INFO{ + USHORT usEngineClockLow; //clockfrequency & 0xFFFF. The unit is in 10khz + UCHAR ucEngineClockHigh; //clockfrequency >> 16. + UCHAR vddcIndex; //2-bit vddc index; + UCHAR leakage; //please use 8-bit absolute value, not the 6-bit % value + //please initalize to 0 + UCHAR rsv; + //please initalize to 0 + USHORT rsv1; + //please initialize to 0s + ULONG rsv2[2]; +}ATOM_PPLIB_SUMO_CLOCK_INFO; + + + +typedef struct _ATOM_PPLIB_STATE_V2 +{ + //number of valid dpm levels in this state; Driver uses it to calculate the whole + //size of the state: sizeof(ATOM_PPLIB_STATE_V2) + (ucNumDPMLevels - 1) * sizeof(UCHAR) + UCHAR ucNumDPMLevels; + + //a index to the array of nonClockInfos + UCHAR nonClockInfoIndex; + /** + * Driver will read the first ucNumDPMLevels in this array + */ + UCHAR clockInfoIndex[1]; +} ATOM_PPLIB_STATE_V2; + +typedef struct StateArray{ + //how many states we have + UCHAR ucNumEntries; + + ATOM_PPLIB_STATE_V2 states[1]; +}StateArray; + + +typedef struct ClockInfoArray{ + //how many clock levels we have + UCHAR ucNumEntries; + + //sizeof(ATOM_PPLIB_SUMO_CLOCK_INFO) + UCHAR ucEntrySize; + + //this is for Sumo + ATOM_PPLIB_SUMO_CLOCK_INFO clockInfo[1]; +}ClockInfoArray; + +typedef struct NonClockInfoArray{ + + //how many non-clock levels we have. normally should be same as number of states + UCHAR ucNumEntries; + //sizeof(ATOM_PPLIB_NONCLOCK_INFO) + UCHAR ucEntrySize; + + ATOM_PPLIB_NONCLOCK_INFO nonClockInfo[1]; +}NonClockInfoArray; + +typedef struct _ATOM_PPLIB_Clock_Voltage_Dependency_Record +{ + USHORT usClockLow; + UCHAR ucClockHigh; + USHORT usVoltage; +}ATOM_PPLIB_Clock_Voltage_Dependency_Record; + +typedef struct _ATOM_PPLIB_Clock_Voltage_Dependency_Table +{ + UCHAR ucNumEntries; // Number of entries. + ATOM_PPLIB_Clock_Voltage_Dependency_Record entries[1]; // Dynamically allocate entries. +}ATOM_PPLIB_Clock_Voltage_Dependency_Table; + +typedef struct _ATOM_PPLIB_Clock_Voltage_Limit_Record +{ + USHORT usSclkLow; + UCHAR ucSclkHigh; + USHORT usMclkLow; + UCHAR ucMclkHigh; + USHORT usVddc; + USHORT usVddci; +}ATOM_PPLIB_Clock_Voltage_Limit_Record; + +typedef struct _ATOM_PPLIB_Clock_Voltage_Limit_Table +{ + UCHAR ucNumEntries; // Number of entries. + ATOM_PPLIB_Clock_Voltage_Limit_Record entries[1]; // Dynamically allocate entries. +}ATOM_PPLIB_Clock_Voltage_Limit_Table; + /**************************************************************************/ -- cgit v1.2.3 From 2f062fda4f83932b87b3b6c903eeade43422245a Mon Sep 17 00:00:00 2001 From: Alex Deucher Date: Mon, 22 Nov 2010 17:56:22 -0500 Subject: drm/radeon/kms: add new family id for AMD Ontario APUs Signed-off-by: Alex Deucher Signed-off-by: Dave Airlie --- drivers/gpu/drm/radeon/radeon.h | 1 + drivers/gpu/drm/radeon/radeon_family.h | 1 + 2 files changed, 2 insertions(+) diff --git a/drivers/gpu/drm/radeon/radeon.h b/drivers/gpu/drm/radeon/radeon.h index 3a7095743d44..cfaf71323491 100644 --- a/drivers/gpu/drm/radeon/radeon.h +++ b/drivers/gpu/drm/radeon/radeon.h @@ -1269,6 +1269,7 @@ void r100_pll_errata_after_index(struct radeon_device *rdev); #define ASIC_IS_DCE3(rdev) ((rdev->family >= CHIP_RV620)) #define ASIC_IS_DCE32(rdev) ((rdev->family >= CHIP_RV730)) #define ASIC_IS_DCE4(rdev) ((rdev->family >= CHIP_CEDAR)) +#define ASIC_IS_DCE41(rdev) ((rdev->family >= CHIP_PALM)) /* * BIOS helpers. diff --git a/drivers/gpu/drm/radeon/radeon_family.h b/drivers/gpu/drm/radeon/radeon_family.h index e329066dcabd..4c222d5437d1 100644 --- a/drivers/gpu/drm/radeon/radeon_family.h +++ b/drivers/gpu/drm/radeon/radeon_family.h @@ -80,6 +80,7 @@ enum radeon_family { CHIP_JUNIPER, CHIP_CYPRESS, CHIP_HEMLOCK, + CHIP_PALM, CHIP_LAST, }; -- cgit v1.2.3 From 4e8c65a1f9ee1a976d95c1d5d2027761aba2f1c3 Mon Sep 17 00:00:00 2001 From: Alex Deucher Date: Mon, 22 Nov 2010 17:56:23 -0500 Subject: drm/radeon/kms: atom changes for DCE4.1 devices Signed-off-by: Alex Deucher Signed-off-by: Dave Airlie --- drivers/gpu/drm/radeon/radeon_asic.c | 4 +++- drivers/gpu/drm/radeon/radeon_encoders.c | 25 +++++++++++++++++++++---- 2 files changed, 24 insertions(+), 5 deletions(-) diff --git a/drivers/gpu/drm/radeon/radeon_asic.c b/drivers/gpu/drm/radeon/radeon_asic.c index 64fb89ecbf74..bf7ec0441e63 100644 --- a/drivers/gpu/drm/radeon/radeon_asic.c +++ b/drivers/gpu/drm/radeon/radeon_asic.c @@ -849,7 +849,9 @@ int radeon_asic_init(struct radeon_device *rdev) if (rdev->flags & RADEON_SINGLE_CRTC) rdev->num_crtc = 1; else { - if (ASIC_IS_DCE4(rdev)) + if (ASIC_IS_DCE41(rdev)) + rdev->num_crtc = 2; + else if (ASIC_IS_DCE4(rdev)) rdev->num_crtc = 6; else rdev->num_crtc = 2; diff --git a/drivers/gpu/drm/radeon/radeon_encoders.c b/drivers/gpu/drm/radeon/radeon_encoders.c index 041943df966b..806d552299bf 100644 --- a/drivers/gpu/drm/radeon/radeon_encoders.c +++ b/drivers/gpu/drm/radeon/radeon_encoders.c @@ -713,7 +713,7 @@ atombios_get_encoder_mode(struct drm_encoder *encoder) * DIG1/2 can drive UNIPHY0/1/2 link A or link B * * DCE 4.0 - * - 3 DIG transmitter blocks UNPHY0/1/2 (links A and B). + * - 3 DIG transmitter blocks UNIPHY0/1/2 (links A and B). * Supports up to 6 digital outputs * - 6 DIG encoder blocks. * - DIG to PHY mapping is hardcoded @@ -724,6 +724,12 @@ atombios_get_encoder_mode(struct drm_encoder *encoder) * DIG5 drives UNIPHY2 link A, A+B * DIG6 drives UNIPHY2 link B * + * DCE 4.1 + * - 3 DIG transmitter blocks UNIPHY0/1/2 (links A and B). + * Supports up to 6 digital outputs + * - 2 DIG encoder blocks. + * DIG1/2 can drive UNIPHY0/1/2 link A or link B + * * Routing * crtc -> dig encoder -> UNIPHY/LVTMA (1 or 2 links) * Examples: @@ -904,9 +910,15 @@ atombios_dig_transmitter_setup(struct drm_encoder *encoder, int action, uint8_t else args.v3.ucLaneNum = 4; - if (dig->linkb) { - args.v3.acConfig.ucLinkSel = 1; - args.v3.acConfig.ucEncoderSel = 1; + if (ASIC_IS_DCE41(rdev)) { + args.v3.acConfig.ucEncoderSel = dig->dig_encoder; + if (dig->linkb) + args.v3.acConfig.ucLinkSel = 1; + } else { + if (dig->linkb) { + args.v3.acConfig.ucLinkSel = 1; + args.v3.acConfig.ucEncoderSel = 1; + } } /* Select the PLL for the PHY @@ -1483,6 +1495,11 @@ static int radeon_atom_pick_dig_encoder(struct drm_encoder *encoder) struct radeon_encoder_atom_dig *dig; uint32_t dig_enc_in_use = 0; + /* on DCE41 and encoder can driver any phy so just crtc id */ + if (ASIC_IS_DCE41(rdev)) { + return radeon_crtc->crtc_id; + } + if (ASIC_IS_DCE4(rdev)) { dig = radeon_encoder->enc_priv; switch (radeon_encoder->encoder_id) { -- cgit v1.2.3 From bf982ebf22d5d84dd4a2a8d8f1a11f75920020a7 Mon Sep 17 00:00:00 2001 From: Alex Deucher Date: Mon, 22 Nov 2010 17:56:24 -0500 Subject: drm/radeon/kms: Add support for external encoders on fusion APUs Signed-off-by: Alex Deucher Signed-off-by: Dave Airlie --- drivers/gpu/drm/radeon/radeon_display.c | 4 ++- drivers/gpu/drm/radeon/radeon_encoders.c | 54 ++++++++++++++++++++++++++++++-- 2 files changed, 54 insertions(+), 4 deletions(-) diff --git a/drivers/gpu/drm/radeon/radeon_display.c b/drivers/gpu/drm/radeon/radeon_display.c index 1df4dc6c063c..2697801e36e7 100644 --- a/drivers/gpu/drm/radeon/radeon_display.c +++ b/drivers/gpu/drm/radeon/radeon_display.c @@ -225,7 +225,7 @@ static void radeon_crtc_init(struct drm_device *dev, int index) radeon_legacy_init_crtc(dev, radeon_crtc); } -static const char *encoder_names[34] = { +static const char *encoder_names[36] = { "NONE", "INTERNAL_LVDS", "INTERNAL_TMDS1", @@ -260,6 +260,8 @@ static const char *encoder_names[34] = { "INTERNAL_KLDSCP_LVTMA", "INTERNAL_UNIPHY1", "INTERNAL_UNIPHY2", + "NUTMEG", + "TRAVIS", }; static const char *connector_names[15] = { diff --git a/drivers/gpu/drm/radeon/radeon_encoders.c b/drivers/gpu/drm/radeon/radeon_encoders.c index 806d552299bf..e4e64a80b58d 100644 --- a/drivers/gpu/drm/radeon/radeon_encoders.c +++ b/drivers/gpu/drm/radeon/radeon_encoders.c @@ -1056,6 +1056,7 @@ atombios_set_edp_panel_power(struct drm_connector *connector, int action) union external_encoder_control { EXTERNAL_ENCODER_CONTROL_PS_ALLOCATION v1; + EXTERNAL_ENCODER_CONTROL_PS_ALLOCATION_V3 v3; }; static void @@ -1066,6 +1067,7 @@ atombios_external_encoder_setup(struct drm_encoder *encoder, struct drm_device *dev = encoder->dev; struct radeon_device *rdev = dev->dev_private; struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); + struct radeon_encoder *ext_radeon_encoder = to_radeon_encoder(ext_encoder); union external_encoder_control args; struct drm_connector *connector = radeon_get_connector_for_encoder(encoder); int index = GetIndexIntoMasterTable(COMMAND, ExternalEncoderControl); @@ -1073,6 +1075,7 @@ atombios_external_encoder_setup(struct drm_encoder *encoder, int dp_clock = 0; int dp_lane_count = 0; int connector_object_id = 0; + u32 ext_enum = (ext_radeon_encoder->encoder_enum & ENUM_ID_MASK) >> ENUM_ID_SHIFT; if (connector) { struct radeon_connector *radeon_connector = to_radeon_connector(connector); @@ -1111,6 +1114,37 @@ atombios_external_encoder_setup(struct drm_encoder *encoder, else args.v1.sDigEncoder.ucLaneNum = 4; break; + case 3: + args.v3.sExtEncoder.ucAction = action; + if (action == EXTERNAL_ENCODER_ACTION_V3_ENCODER_INIT) + args.v3.sExtEncoder.usConnectorId = connector_object_id; + else + args.v3.sExtEncoder.usPixelClock = cpu_to_le16(radeon_encoder->pixel_clock / 10); + args.v3.sExtEncoder.ucEncoderMode = atombios_get_encoder_mode(encoder); + + if (args.v3.sExtEncoder.ucEncoderMode == ATOM_ENCODER_MODE_DP) { + if (dp_clock == 270000) + args.v3.sExtEncoder.ucConfig |= EXTERNAL_ENCODER_CONFIG_V3_DPLINKRATE_2_70GHZ; + else if (dp_clock == 540000) + args.v3.sExtEncoder.ucConfig |= EXTERNAL_ENCODER_CONFIG_V3_DPLINKRATE_5_40GHZ; + args.v3.sExtEncoder.ucLaneNum = dp_lane_count; + } else if (radeon_encoder->pixel_clock > 165000) + args.v3.sExtEncoder.ucLaneNum = 8; + else + args.v3.sExtEncoder.ucLaneNum = 4; + switch (ext_enum) { + case GRAPH_OBJECT_ENUM_ID1: + args.v3.sExtEncoder.ucConfig |= EXTERNAL_ENCODER_CONFIG_V3_ENCODER1; + break; + case GRAPH_OBJECT_ENUM_ID2: + args.v3.sExtEncoder.ucConfig |= EXTERNAL_ENCODER_CONFIG_V3_ENCODER2; + break; + case GRAPH_OBJECT_ENUM_ID3: + args.v3.sExtEncoder.ucConfig |= EXTERNAL_ENCODER_CONFIG_V3_ENCODER3; + break; + } + args.v3.sExtEncoder.ucBitPerColor = PANEL_8BIT_PER_COLOR; + break; default: DRM_ERROR("Unknown table version: %d, %d\n", frev, crev); return; @@ -1301,12 +1335,18 @@ radeon_atom_encoder_dpms(struct drm_encoder *encoder, int mode) switch (mode) { case DRM_MODE_DPMS_ON: default: - action = ATOM_ENABLE; + if (ASIC_IS_DCE41(rdev) && (rdev->flags & RADEON_IS_IGP)) + action = EXTERNAL_ENCODER_ACTION_V3_ENABLE_OUTPUT; + else + action = ATOM_ENABLE; break; case DRM_MODE_DPMS_STANDBY: case DRM_MODE_DPMS_SUSPEND: case DRM_MODE_DPMS_OFF: - action = ATOM_DISABLE; + if (ASIC_IS_DCE41(rdev) && (rdev->flags & RADEON_IS_IGP)) + action = EXTERNAL_ENCODER_ACTION_V3_DISABLE_OUTPUT; + else + action = ATOM_DISABLE; break; } atombios_external_encoder_setup(encoder, ext_encoder, action); @@ -1627,7 +1667,13 @@ radeon_atom_encoder_mode_set(struct drm_encoder *encoder, } if (ext_encoder) { - atombios_external_encoder_setup(encoder, ext_encoder, ATOM_ENABLE); + if (ASIC_IS_DCE41(rdev) && (rdev->flags & RADEON_IS_IGP)) { + atombios_external_encoder_setup(encoder, ext_encoder, + EXTERNAL_ENCODER_ACTION_V3_ENCODER_INIT); + atombios_external_encoder_setup(encoder, ext_encoder, + EXTERNAL_ENCODER_ACTION_V3_ENCODER_SETUP); + } else + atombios_external_encoder_setup(encoder, ext_encoder, ATOM_ENABLE); } atombios_apply_encoder_quirks(encoder, adjusted_mode); @@ -2046,6 +2092,8 @@ radeon_add_atom_encoder(struct drm_device *dev, uint32_t encoder_enum, uint32_t case ENCODER_OBJECT_ID_TITFP513: case ENCODER_OBJECT_ID_VT1623: case ENCODER_OBJECT_ID_HDMI_SI1930: + case ENCODER_OBJECT_ID_TRAVIS: + case ENCODER_OBJECT_ID_NUTMEG: /* these are handled by the primary encoders */ radeon_encoder->is_ext_encoder = true; if (radeon_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT)) -- cgit v1.2.3 From 4339c442c0736db42329b68602308e95bcc75a30 Mon Sep 17 00:00:00 2001 From: Alex Deucher Date: Mon, 22 Nov 2010 17:56:25 -0500 Subject: drm/radeon/kms: add support for ss overrides on Fusion APUs System specific spread spectrum overrides can be specified in the integrated system info table for Fusion APUs. This adds support for using those overrides. Signed-off-by: Alex Deucher Signed-off-by: Dave Airlie --- drivers/gpu/drm/radeon/radeon_atombios.c | 39 ++++++++++++++++++++++++++++++++ 1 file changed, 39 insertions(+) diff --git a/drivers/gpu/drm/radeon/radeon_atombios.c b/drivers/gpu/drm/radeon/radeon_atombios.c index 87ead090c7d5..4d37b2d1ba74 100644 --- a/drivers/gpu/drm/radeon/radeon_atombios.c +++ b/drivers/gpu/drm/radeon/radeon_atombios.c @@ -1321,6 +1321,43 @@ bool radeon_atombios_get_ppll_ss_info(struct radeon_device *rdev, return false; } +static void radeon_atombios_get_igp_ss_overrides(struct radeon_device *rdev, + struct radeon_atom_ss *ss, + int id) +{ + struct radeon_mode_info *mode_info = &rdev->mode_info; + int index = GetIndexIntoMasterTable(DATA, IntegratedSystemInfo); + u16 data_offset, size; + struct _ATOM_INTEGRATED_SYSTEM_INFO_V6 *igp_info; + u8 frev, crev; + u16 percentage = 0, rate = 0; + + /* get any igp specific overrides */ + if (atom_parse_data_header(mode_info->atom_context, index, &size, + &frev, &crev, &data_offset)) { + igp_info = (struct _ATOM_INTEGRATED_SYSTEM_INFO_V6 *) + (mode_info->atom_context->bios + data_offset); + switch (id) { + case ASIC_INTERNAL_SS_ON_TMDS: + percentage = le16_to_cpu(igp_info->usDVISSPercentage); + rate = le16_to_cpu(igp_info->usDVISSpreadRateIn10Hz); + break; + case ASIC_INTERNAL_SS_ON_HDMI: + percentage = le16_to_cpu(igp_info->usHDMISSPercentage); + rate = le16_to_cpu(igp_info->usHDMISSpreadRateIn10Hz); + break; + case ASIC_INTERNAL_SS_ON_LVDS: + percentage = le16_to_cpu(igp_info->usLvdsSSPercentage); + rate = le16_to_cpu(igp_info->usLvdsSSpreadRateIn10Hz); + break; + } + if (percentage) + ss->percentage = percentage; + if (rate) + ss->rate = rate; + } +} + union asic_ss_info { struct _ATOM_ASIC_INTERNAL_SS_INFO info; struct _ATOM_ASIC_INTERNAL_SS_INFO_V2 info_2; @@ -1385,6 +1422,8 @@ bool radeon_atombios_get_asic_ss_info(struct radeon_device *rdev, le16_to_cpu(ss_info->info_3.asSpreadSpectrum[i].usSpreadSpectrumPercentage); ss->type = ss_info->info_3.asSpreadSpectrum[i].ucSpreadSpectrumMode; ss->rate = le16_to_cpu(ss_info->info_3.asSpreadSpectrum[i].usSpreadRateIn10Hz); + if (rdev->flags & RADEON_IS_IGP) + radeon_atombios_get_igp_ss_overrides(rdev, ss, id); return true; } } -- cgit v1.2.3 From 0ef0c1f7349e782f6c79cb7e4bf8a4c3ce3371c6 Mon Sep 17 00:00:00 2001 From: Alex Deucher Date: Mon, 22 Nov 2010 17:56:26 -0500 Subject: drm/radeon/kms: move r7xx/evergreen to its own vram_gtt setup function MC_VM_FB_LOCATION is at a different offset between r6xx and r7xx/evergreen. The location is needed for vram setup on fusion chips. Signed-off-by: Alex Deucher Signed-off-by: Dave Airlie --- drivers/gpu/drm/radeon/evergreen.c | 2 +- drivers/gpu/drm/radeon/r600.c | 2 +- drivers/gpu/drm/radeon/radeon.h | 2 +- drivers/gpu/drm/radeon/rv770.c | 44 +++++++++++++++++++++++++++++++++++++- 4 files changed, 46 insertions(+), 4 deletions(-) diff --git a/drivers/gpu/drm/radeon/evergreen.c b/drivers/gpu/drm/radeon/evergreen.c index 728358e6b798..e9b262fdaa2c 100644 --- a/drivers/gpu/drm/radeon/evergreen.c +++ b/drivers/gpu/drm/radeon/evergreen.c @@ -1945,7 +1945,7 @@ int evergreen_mc_init(struct radeon_device *rdev) rdev->mc.real_vram_size = RREG32(CONFIG_MEMSIZE) * 1024 * 1024; rdev->mc.visible_vram_size = rdev->mc.aper_size; rdev->mc.active_vram_size = rdev->mc.visible_vram_size; - r600_vram_gtt_location(rdev, &rdev->mc); + r700_vram_gtt_location(rdev, &rdev->mc); radeon_update_bandwidth_info(rdev); return 0; diff --git a/drivers/gpu/drm/radeon/r600.c b/drivers/gpu/drm/radeon/r600.c index a3552594ccc4..93b1c687328b 100644 --- a/drivers/gpu/drm/radeon/r600.c +++ b/drivers/gpu/drm/radeon/r600.c @@ -1161,7 +1161,7 @@ static void r600_mc_program(struct radeon_device *rdev) * Note: GTT start, end, size should be initialized before calling this * function on AGP platform. */ -void r600_vram_gtt_location(struct radeon_device *rdev, struct radeon_mc *mc) +static void r600_vram_gtt_location(struct radeon_device *rdev, struct radeon_mc *mc) { u64 size_bf, size_af; diff --git a/drivers/gpu/drm/radeon/radeon.h b/drivers/gpu/drm/radeon/radeon.h index cfaf71323491..bf3a4fc82964 100644 --- a/drivers/gpu/drm/radeon/radeon.h +++ b/drivers/gpu/drm/radeon/radeon.h @@ -1433,7 +1433,6 @@ extern void rs690_line_buffer_adjust(struct radeon_device *rdev, struct drm_display_mode *mode2); /* r600, rv610, rv630, rv620, rv635, rv670, rs780, rs880 */ -extern void r600_vram_gtt_location(struct radeon_device *rdev, struct radeon_mc *mc); extern bool r600_card_posted(struct radeon_device *rdev); extern void r600_cp_stop(struct radeon_device *rdev); extern int r600_cp_start(struct radeon_device *rdev); @@ -1479,6 +1478,7 @@ extern void r600_hdmi_setmode(struct drm_encoder *encoder, struct drm_display_mo extern int r600_hdmi_buffer_status_changed(struct drm_encoder *encoder); extern void r600_hdmi_update_audio_settings(struct drm_encoder *encoder); +extern void r700_vram_gtt_location(struct radeon_device *rdev, struct radeon_mc *mc); extern void r700_cp_stop(struct radeon_device *rdev); extern void r700_cp_fini(struct radeon_device *rdev); extern void evergreen_disable_interrupt_state(struct radeon_device *rdev); diff --git a/drivers/gpu/drm/radeon/rv770.c b/drivers/gpu/drm/radeon/rv770.c index 24ebd0879c4b..c23349a46fd2 100644 --- a/drivers/gpu/drm/radeon/rv770.c +++ b/drivers/gpu/drm/radeon/rv770.c @@ -1001,6 +1001,48 @@ static void rv770_vram_scratch_fini(struct radeon_device *rdev) radeon_bo_unref(&rdev->vram_scratch.robj); } +void r700_vram_gtt_location(struct radeon_device *rdev, struct radeon_mc *mc) +{ + u64 size_bf, size_af; + + if (mc->mc_vram_size > 0xE0000000) { + /* leave room for at least 512M GTT */ + dev_warn(rdev->dev, "limiting VRAM\n"); + mc->real_vram_size = 0xE0000000; + mc->mc_vram_size = 0xE0000000; + } + if (rdev->flags & RADEON_IS_AGP) { + size_bf = mc->gtt_start; + size_af = 0xFFFFFFFF - mc->gtt_end + 1; + if (size_bf > size_af) { + if (mc->mc_vram_size > size_bf) { + dev_warn(rdev->dev, "limiting VRAM\n"); + mc->real_vram_size = size_bf; + mc->mc_vram_size = size_bf; + } + mc->vram_start = mc->gtt_start - mc->mc_vram_size; + } else { + if (mc->mc_vram_size > size_af) { + dev_warn(rdev->dev, "limiting VRAM\n"); + mc->real_vram_size = size_af; + mc->mc_vram_size = size_af; + } + mc->vram_start = mc->gtt_end; + } + mc->vram_end = mc->vram_start + mc->mc_vram_size - 1; + dev_info(rdev->dev, "VRAM: %lluM 0x%08llX - 0x%08llX (%lluM used)\n", + mc->mc_vram_size >> 20, mc->vram_start, + mc->vram_end, mc->real_vram_size >> 20); + } else { + u64 base = 0; + if (rdev->flags & RADEON_IS_IGP) + base = (RREG32(MC_VM_FB_LOCATION) & 0xFFFF) << 24; + radeon_vram_location(rdev, &rdev->mc, base); + rdev->mc.gtt_base_align = 0; + radeon_gtt_location(rdev, mc); + } +} + int rv770_mc_init(struct radeon_device *rdev) { u32 tmp; @@ -1041,7 +1083,7 @@ int rv770_mc_init(struct radeon_device *rdev) rdev->mc.real_vram_size = RREG32(CONFIG_MEMSIZE); rdev->mc.visible_vram_size = rdev->mc.aper_size; rdev->mc.active_vram_size = rdev->mc.visible_vram_size; - r600_vram_gtt_location(rdev, &rdev->mc); + r700_vram_gtt_location(rdev, &rdev->mc); radeon_update_bandwidth_info(rdev); return 0; -- cgit v1.2.3 From 6eb18f8b6006b0e869a8484b1daaa63adce1b73e Mon Sep 17 00:00:00 2001 From: Alex Deucher Date: Mon, 22 Nov 2010 17:56:27 -0500 Subject: drm/radeon/kms: MC setup changes for fusion APUs - CONFIG_MEMSIZE is in bytes on fusion. - FB_BASE and FB_TOP are finer grained. Signed-off-by: Alex Deucher Signed-off-by: Dave Airlie --- drivers/gpu/drm/radeon/evergreen.c | 12 +++++++++--- drivers/gpu/drm/radeon/rv770.c | 10 +++++++++- drivers/gpu/drm/radeon/rv770d.h | 1 + 3 files changed, 19 insertions(+), 4 deletions(-) diff --git a/drivers/gpu/drm/radeon/evergreen.c b/drivers/gpu/drm/radeon/evergreen.c index e9b262fdaa2c..090f74700081 100644 --- a/drivers/gpu/drm/radeon/evergreen.c +++ b/drivers/gpu/drm/radeon/evergreen.c @@ -1940,9 +1940,15 @@ int evergreen_mc_init(struct radeon_device *rdev) rdev->mc.aper_base = pci_resource_start(rdev->pdev, 0); rdev->mc.aper_size = pci_resource_len(rdev->pdev, 0); /* Setup GPU memory space */ - /* size in MB on evergreen */ - rdev->mc.mc_vram_size = RREG32(CONFIG_MEMSIZE) * 1024 * 1024; - rdev->mc.real_vram_size = RREG32(CONFIG_MEMSIZE) * 1024 * 1024; + if (rdev->flags & RADEON_IS_IGP) { + /* size in bytes on fusion */ + rdev->mc.mc_vram_size = RREG32(CONFIG_MEMSIZE); + rdev->mc.real_vram_size = RREG32(CONFIG_MEMSIZE); + } else { + /* size in MB on evergreen */ + rdev->mc.mc_vram_size = RREG32(CONFIG_MEMSIZE) * 1024 * 1024; + rdev->mc.real_vram_size = RREG32(CONFIG_MEMSIZE) * 1024 * 1024; + } rdev->mc.visible_vram_size = rdev->mc.aper_size; rdev->mc.active_vram_size = rdev->mc.visible_vram_size; r700_vram_gtt_location(rdev, &rdev->mc); diff --git a/drivers/gpu/drm/radeon/rv770.c b/drivers/gpu/drm/radeon/rv770.c index c23349a46fd2..2b66af9066b2 100644 --- a/drivers/gpu/drm/radeon/rv770.c +++ b/drivers/gpu/drm/radeon/rv770.c @@ -237,6 +237,12 @@ static void rv770_mc_program(struct radeon_device *rdev) rdev->mc.vram_end >> 12); } WREG32(MC_VM_SYSTEM_APERTURE_DEFAULT_ADDR, 0); + if (rdev->flags & RADEON_IS_IGP) { + tmp = RREG32(MC_FUS_VM_FB_OFFSET) & 0x000FFFFF; + tmp |= ((rdev->mc.vram_end >> 20) & 0xF) << 24; + tmp |= ((rdev->mc.vram_start >> 20) & 0xF) << 20; + WREG32(MC_FUS_VM_FB_OFFSET, tmp); + } tmp = ((rdev->mc.vram_end >> 24) & 0xFFFF) << 16; tmp |= ((rdev->mc.vram_start >> 24) & 0xFFFF); WREG32(MC_VM_FB_LOCATION, tmp); @@ -1035,8 +1041,10 @@ void r700_vram_gtt_location(struct radeon_device *rdev, struct radeon_mc *mc) mc->vram_end, mc->real_vram_size >> 20); } else { u64 base = 0; - if (rdev->flags & RADEON_IS_IGP) + if (rdev->flags & RADEON_IS_IGP) { base = (RREG32(MC_VM_FB_LOCATION) & 0xFFFF) << 24; + base |= RREG32(MC_FUS_VM_FB_OFFSET) & 0x00F00000; + } radeon_vram_location(rdev, &rdev->mc, base); rdev->mc.gtt_base_align = 0; radeon_gtt_location(rdev, mc); diff --git a/drivers/gpu/drm/radeon/rv770d.h b/drivers/gpu/drm/radeon/rv770d.h index 7b1c8f8f4074..e09a403f1c64 100644 --- a/drivers/gpu/drm/radeon/rv770d.h +++ b/drivers/gpu/drm/radeon/rv770d.h @@ -158,6 +158,7 @@ #define MC_VM_AGP_BOT 0x202C #define MC_VM_AGP_BASE 0x2030 #define MC_VM_FB_LOCATION 0x2024 +#define MC_FUS_VM_FB_OFFSET 0x2898 #define MC_VM_MB_L1_TLB0_CNTL 0x2234 #define MC_VM_MB_L1_TLB1_CNTL 0x2238 #define MC_VM_MB_L1_TLB2_CNTL 0x223C -- cgit v1.2.3 From 180074010c6a8d1948638b4a136d2bd8111f4459 Mon Sep 17 00:00:00 2001 From: Alex Deucher Date: Mon, 22 Nov 2010 17:56:28 -0500 Subject: drm/radeon/kms: evergreen.c updates for fusion fusion chips only have 2 crtcs. Signed-off-by: Alex Deucher Signed-off-by: Dave Airlie --- drivers/gpu/drm/radeon/evergreen.c | 188 +++++++++++++++++++-------------- drivers/gpu/drm/radeon/radeon_device.c | 7 +- 2 files changed, 113 insertions(+), 82 deletions(-) diff --git a/drivers/gpu/drm/radeon/evergreen.c b/drivers/gpu/drm/radeon/evergreen.c index 090f74700081..07c4c6f216f0 100644 --- a/drivers/gpu/drm/radeon/evergreen.c +++ b/drivers/gpu/drm/radeon/evergreen.c @@ -888,31 +888,39 @@ static void evergreen_mc_stop(struct radeon_device *rdev, struct evergreen_mc_sa save->vga_hdp_control = RREG32(VGA_HDP_CONTROL); save->crtc_control[0] = RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC0_REGISTER_OFFSET); save->crtc_control[1] = RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC1_REGISTER_OFFSET); - save->crtc_control[2] = RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC2_REGISTER_OFFSET); - save->crtc_control[3] = RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC3_REGISTER_OFFSET); - save->crtc_control[4] = RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC4_REGISTER_OFFSET); - save->crtc_control[5] = RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC5_REGISTER_OFFSET); + if (!(rdev->flags & RADEON_IS_IGP)) { + save->crtc_control[2] = RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC2_REGISTER_OFFSET); + save->crtc_control[3] = RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC3_REGISTER_OFFSET); + save->crtc_control[4] = RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC4_REGISTER_OFFSET); + save->crtc_control[5] = RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC5_REGISTER_OFFSET); + } /* Stop all video */ WREG32(VGA_RENDER_CONTROL, 0); WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC0_REGISTER_OFFSET, 1); WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC1_REGISTER_OFFSET, 1); - WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC2_REGISTER_OFFSET, 1); - WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC3_REGISTER_OFFSET, 1); - WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC4_REGISTER_OFFSET, 1); - WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC5_REGISTER_OFFSET, 1); + if (!(rdev->flags & RADEON_IS_IGP)) { + WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC2_REGISTER_OFFSET, 1); + WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC3_REGISTER_OFFSET, 1); + WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC4_REGISTER_OFFSET, 1); + WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC5_REGISTER_OFFSET, 1); + } WREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC0_REGISTER_OFFSET, 0); WREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC1_REGISTER_OFFSET, 0); - WREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC2_REGISTER_OFFSET, 0); - WREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC3_REGISTER_OFFSET, 0); - WREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC4_REGISTER_OFFSET, 0); - WREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC5_REGISTER_OFFSET, 0); + if (!(rdev->flags & RADEON_IS_IGP)) { + WREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC2_REGISTER_OFFSET, 0); + WREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC3_REGISTER_OFFSET, 0); + WREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC4_REGISTER_OFFSET, 0); + WREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC5_REGISTER_OFFSET, 0); + } WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC0_REGISTER_OFFSET, 0); WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC1_REGISTER_OFFSET, 0); - WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC2_REGISTER_OFFSET, 0); - WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC3_REGISTER_OFFSET, 0); - WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC4_REGISTER_OFFSET, 0); - WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC5_REGISTER_OFFSET, 0); + if (!(rdev->flags & RADEON_IS_IGP)) { + WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC2_REGISTER_OFFSET, 0); + WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC3_REGISTER_OFFSET, 0); + WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC4_REGISTER_OFFSET, 0); + WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC5_REGISTER_OFFSET, 0); + } WREG32(D1VGA_CONTROL, 0); WREG32(D2VGA_CONTROL, 0); @@ -942,41 +950,43 @@ static void evergreen_mc_resume(struct radeon_device *rdev, struct evergreen_mc_ WREG32(EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS + EVERGREEN_CRTC1_REGISTER_OFFSET, (u32)rdev->mc.vram_start); - WREG32(EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS_HIGH + EVERGREEN_CRTC2_REGISTER_OFFSET, - upper_32_bits(rdev->mc.vram_start)); - WREG32(EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS_HIGH + EVERGREEN_CRTC2_REGISTER_OFFSET, - upper_32_bits(rdev->mc.vram_start)); - WREG32(EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS + EVERGREEN_CRTC2_REGISTER_OFFSET, - (u32)rdev->mc.vram_start); - WREG32(EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS + EVERGREEN_CRTC2_REGISTER_OFFSET, - (u32)rdev->mc.vram_start); - - WREG32(EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS_HIGH + EVERGREEN_CRTC3_REGISTER_OFFSET, - upper_32_bits(rdev->mc.vram_start)); - WREG32(EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS_HIGH + EVERGREEN_CRTC3_REGISTER_OFFSET, - upper_32_bits(rdev->mc.vram_start)); - WREG32(EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS + EVERGREEN_CRTC3_REGISTER_OFFSET, - (u32)rdev->mc.vram_start); - WREG32(EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS + EVERGREEN_CRTC3_REGISTER_OFFSET, - (u32)rdev->mc.vram_start); - - WREG32(EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS_HIGH + EVERGREEN_CRTC4_REGISTER_OFFSET, - upper_32_bits(rdev->mc.vram_start)); - WREG32(EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS_HIGH + EVERGREEN_CRTC4_REGISTER_OFFSET, - upper_32_bits(rdev->mc.vram_start)); - WREG32(EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS + EVERGREEN_CRTC4_REGISTER_OFFSET, - (u32)rdev->mc.vram_start); - WREG32(EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS + EVERGREEN_CRTC4_REGISTER_OFFSET, - (u32)rdev->mc.vram_start); - - WREG32(EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS_HIGH + EVERGREEN_CRTC5_REGISTER_OFFSET, - upper_32_bits(rdev->mc.vram_start)); - WREG32(EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS_HIGH + EVERGREEN_CRTC5_REGISTER_OFFSET, - upper_32_bits(rdev->mc.vram_start)); - WREG32(EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS + EVERGREEN_CRTC5_REGISTER_OFFSET, - (u32)rdev->mc.vram_start); - WREG32(EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS + EVERGREEN_CRTC5_REGISTER_OFFSET, - (u32)rdev->mc.vram_start); + if (!(rdev->flags & RADEON_IS_IGP)) { + WREG32(EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS_HIGH + EVERGREEN_CRTC2_REGISTER_OFFSET, + upper_32_bits(rdev->mc.vram_start)); + WREG32(EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS_HIGH + EVERGREEN_CRTC2_REGISTER_OFFSET, + upper_32_bits(rdev->mc.vram_start)); + WREG32(EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS + EVERGREEN_CRTC2_REGISTER_OFFSET, + (u32)rdev->mc.vram_start); + WREG32(EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS + EVERGREEN_CRTC2_REGISTER_OFFSET, + (u32)rdev->mc.vram_start); + + WREG32(EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS_HIGH + EVERGREEN_CRTC3_REGISTER_OFFSET, + upper_32_bits(rdev->mc.vram_start)); + WREG32(EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS_HIGH + EVERGREEN_CRTC3_REGISTER_OFFSET, + upper_32_bits(rdev->mc.vram_start)); + WREG32(EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS + EVERGREEN_CRTC3_REGISTER_OFFSET, + (u32)rdev->mc.vram_start); + WREG32(EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS + EVERGREEN_CRTC3_REGISTER_OFFSET, + (u32)rdev->mc.vram_start); + + WREG32(EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS_HIGH + EVERGREEN_CRTC4_REGISTER_OFFSET, + upper_32_bits(rdev->mc.vram_start)); + WREG32(EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS_HIGH + EVERGREEN_CRTC4_REGISTER_OFFSET, + upper_32_bits(rdev->mc.vram_start)); + WREG32(EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS + EVERGREEN_CRTC4_REGISTER_OFFSET, + (u32)rdev->mc.vram_start); + WREG32(EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS + EVERGREEN_CRTC4_REGISTER_OFFSET, + (u32)rdev->mc.vram_start); + + WREG32(EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS_HIGH + EVERGREEN_CRTC5_REGISTER_OFFSET, + upper_32_bits(rdev->mc.vram_start)); + WREG32(EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS_HIGH + EVERGREEN_CRTC5_REGISTER_OFFSET, + upper_32_bits(rdev->mc.vram_start)); + WREG32(EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS + EVERGREEN_CRTC5_REGISTER_OFFSET, + (u32)rdev->mc.vram_start); + WREG32(EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS + EVERGREEN_CRTC5_REGISTER_OFFSET, + (u32)rdev->mc.vram_start); + } WREG32(EVERGREEN_VGA_MEMORY_BASE_ADDRESS_HIGH, upper_32_bits(rdev->mc.vram_start)); WREG32(EVERGREEN_VGA_MEMORY_BASE_ADDRESS, (u32)rdev->mc.vram_start); @@ -992,22 +1002,28 @@ static void evergreen_mc_resume(struct radeon_device *rdev, struct evergreen_mc_ WREG32(EVERGREEN_D6VGA_CONTROL, save->vga_control[5]); WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC0_REGISTER_OFFSET, 1); WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC1_REGISTER_OFFSET, 1); - WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC2_REGISTER_OFFSET, 1); - WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC3_REGISTER_OFFSET, 1); - WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC4_REGISTER_OFFSET, 1); - WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC5_REGISTER_OFFSET, 1); + if (!(rdev->flags & RADEON_IS_IGP)) { + WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC2_REGISTER_OFFSET, 1); + WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC3_REGISTER_OFFSET, 1); + WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC4_REGISTER_OFFSET, 1); + WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC5_REGISTER_OFFSET, 1); + } WREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC0_REGISTER_OFFSET, save->crtc_control[0]); WREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC1_REGISTER_OFFSET, save->crtc_control[1]); - WREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC2_REGISTER_OFFSET, save->crtc_control[2]); - WREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC3_REGISTER_OFFSET, save->crtc_control[3]); - WREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC4_REGISTER_OFFSET, save->crtc_control[4]); - WREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC5_REGISTER_OFFSET, save->crtc_control[5]); + if (!(rdev->flags & RADEON_IS_IGP)) { + WREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC2_REGISTER_OFFSET, save->crtc_control[2]); + WREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC3_REGISTER_OFFSET, save->crtc_control[3]); + WREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC4_REGISTER_OFFSET, save->crtc_control[4]); + WREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC5_REGISTER_OFFSET, save->crtc_control[5]); + } WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC0_REGISTER_OFFSET, 0); WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC1_REGISTER_OFFSET, 0); - WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC2_REGISTER_OFFSET, 0); - WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC3_REGISTER_OFFSET, 0); - WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC4_REGISTER_OFFSET, 0); - WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC5_REGISTER_OFFSET, 0); + if (!(rdev->flags & RADEON_IS_IGP)) { + WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC2_REGISTER_OFFSET, 0); + WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC3_REGISTER_OFFSET, 0); + WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC4_REGISTER_OFFSET, 0); + WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC5_REGISTER_OFFSET, 0); + } WREG32(VGA_RENDER_CONTROL, save->vga_render_control); } @@ -2068,17 +2084,21 @@ void evergreen_disable_interrupt_state(struct radeon_device *rdev) WREG32(GRBM_INT_CNTL, 0); WREG32(INT_MASK + EVERGREEN_CRTC0_REGISTER_OFFSET, 0); WREG32(INT_MASK + EVERGREEN_CRTC1_REGISTER_OFFSET, 0); - WREG32(INT_MASK + EVERGREEN_CRTC2_REGISTER_OFFSET, 0); - WREG32(INT_MASK + EVERGREEN_CRTC3_REGISTER_OFFSET, 0); - WREG32(INT_MASK + EVERGREEN_CRTC4_REGISTER_OFFSET, 0); - WREG32(INT_MASK + EVERGREEN_CRTC5_REGISTER_OFFSET, 0); + if (!(rdev->flags & RADEON_IS_IGP)) { + WREG32(INT_MASK + EVERGREEN_CRTC2_REGISTER_OFFSET, 0); + WREG32(INT_MASK + EVERGREEN_CRTC3_REGISTER_OFFSET, 0); + WREG32(INT_MASK + EVERGREEN_CRTC4_REGISTER_OFFSET, 0); + WREG32(INT_MASK + EVERGREEN_CRTC5_REGISTER_OFFSET, 0); + } WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC0_REGISTER_OFFSET, 0); WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC1_REGISTER_OFFSET, 0); - WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC2_REGISTER_OFFSET, 0); - WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC3_REGISTER_OFFSET, 0); - WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC4_REGISTER_OFFSET, 0); - WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC5_REGISTER_OFFSET, 0); + if (!(rdev->flags & RADEON_IS_IGP)) { + WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC2_REGISTER_OFFSET, 0); + WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC3_REGISTER_OFFSET, 0); + WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC4_REGISTER_OFFSET, 0); + WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC5_REGISTER_OFFSET, 0); + } WREG32(DACA_AUTODETECT_INT_CONTROL, 0); WREG32(DACB_AUTODETECT_INT_CONTROL, 0); @@ -2187,10 +2207,12 @@ int evergreen_irq_set(struct radeon_device *rdev) WREG32(INT_MASK + EVERGREEN_CRTC0_REGISTER_OFFSET, crtc1); WREG32(INT_MASK + EVERGREEN_CRTC1_REGISTER_OFFSET, crtc2); - WREG32(INT_MASK + EVERGREEN_CRTC2_REGISTER_OFFSET, crtc3); - WREG32(INT_MASK + EVERGREEN_CRTC3_REGISTER_OFFSET, crtc4); - WREG32(INT_MASK + EVERGREEN_CRTC4_REGISTER_OFFSET, crtc5); - WREG32(INT_MASK + EVERGREEN_CRTC5_REGISTER_OFFSET, crtc6); + if (!(rdev->flags & RADEON_IS_IGP)) { + WREG32(INT_MASK + EVERGREEN_CRTC2_REGISTER_OFFSET, crtc3); + WREG32(INT_MASK + EVERGREEN_CRTC3_REGISTER_OFFSET, crtc4); + WREG32(INT_MASK + EVERGREEN_CRTC4_REGISTER_OFFSET, crtc5); + WREG32(INT_MASK + EVERGREEN_CRTC5_REGISTER_OFFSET, crtc6); + } WREG32(DC_HPD1_INT_CONTROL, hpd1); WREG32(DC_HPD2_INT_CONTROL, hpd2); @@ -2710,12 +2732,16 @@ static bool evergreen_card_posted(struct radeon_device *rdev) u32 reg; /* first check CRTCs */ - reg = RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC0_REGISTER_OFFSET) | - RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC1_REGISTER_OFFSET) | - RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC2_REGISTER_OFFSET) | - RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC3_REGISTER_OFFSET) | - RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC4_REGISTER_OFFSET) | - RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC5_REGISTER_OFFSET); + if (rdev->flags & RADEON_IS_IGP) + reg = RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC0_REGISTER_OFFSET) | + RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC1_REGISTER_OFFSET); + else + reg = RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC0_REGISTER_OFFSET) | + RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC1_REGISTER_OFFSET) | + RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC2_REGISTER_OFFSET) | + RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC3_REGISTER_OFFSET) | + RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC4_REGISTER_OFFSET) | + RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC5_REGISTER_OFFSET); if (reg & EVERGREEN_CRTC_MASTER_EN) return true; diff --git a/drivers/gpu/drm/radeon/radeon_device.c b/drivers/gpu/drm/radeon/radeon_device.c index d8ac1849180d..dd93c9c94144 100644 --- a/drivers/gpu/drm/radeon/radeon_device.c +++ b/drivers/gpu/drm/radeon/radeon_device.c @@ -335,7 +335,12 @@ bool radeon_card_posted(struct radeon_device *rdev) uint32_t reg; /* first check CRTCs */ - if (ASIC_IS_DCE4(rdev)) { + if (ASIC_IS_DCE41(rdev)) { + reg = RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC0_REGISTER_OFFSET) | + RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC1_REGISTER_OFFSET); + if (reg & EVERGREEN_CRTC_MASTER_EN) + return true; + } else if (ASIC_IS_DCE4(rdev)) { reg = RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC0_REGISTER_OFFSET) | RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC1_REGISTER_OFFSET) | RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC2_REGISTER_OFFSET) | -- cgit v1.2.3 From 958261d1e8755d1423beb0951ed0b9552c96f638 Mon Sep 17 00:00:00 2001 From: Alex Deucher Date: Mon, 22 Nov 2010 17:56:30 -0500 Subject: drm/radeon/kms: add radeon_asic struct for AMD Ontario fusion APUs Signed-off-by: Alex Deucher Signed-off-by: Dave Airlie --- drivers/gpu/drm/radeon/radeon_asic.c | 46 ++++++++++++++++++++++++++++++++++++ 1 file changed, 46 insertions(+) diff --git a/drivers/gpu/drm/radeon/radeon_asic.c b/drivers/gpu/drm/radeon/radeon_asic.c index bf7ec0441e63..4e487cc16e7f 100644 --- a/drivers/gpu/drm/radeon/radeon_asic.c +++ b/drivers/gpu/drm/radeon/radeon_asic.c @@ -751,6 +751,49 @@ static struct radeon_asic evergreen_asic = { .pm_get_dynpm_state = &r600_pm_get_dynpm_state, }; +static struct radeon_asic sumo_asic = { + .init = &evergreen_init, + .fini = &evergreen_fini, + .suspend = &evergreen_suspend, + .resume = &evergreen_resume, + .cp_commit = &r600_cp_commit, + .gpu_is_lockup = &evergreen_gpu_is_lockup, + .asic_reset = &evergreen_asic_reset, + .vga_set_state = &r600_vga_set_state, + .gart_tlb_flush = &evergreen_pcie_gart_tlb_flush, + .gart_set_page = &rs600_gart_set_page, + .ring_test = &r600_ring_test, + .ring_ib_execute = &r600_ring_ib_execute, + .irq_set = &evergreen_irq_set, + .irq_process = &evergreen_irq_process, + .get_vblank_counter = &evergreen_get_vblank_counter, + .fence_ring_emit = &r600_fence_ring_emit, + .cs_parse = &evergreen_cs_parse, + .copy_blit = NULL, + .copy_dma = NULL, + .copy = NULL, + .get_engine_clock = &radeon_atom_get_engine_clock, + .set_engine_clock = &radeon_atom_set_engine_clock, + .get_memory_clock = NULL, + .set_memory_clock = NULL, + .get_pcie_lanes = NULL, + .set_pcie_lanes = NULL, + .set_clock_gating = NULL, + .set_surface_reg = r600_set_surface_reg, + .clear_surface_reg = r600_clear_surface_reg, + .bandwidth_update = &evergreen_bandwidth_update, + .hpd_init = &evergreen_hpd_init, + .hpd_fini = &evergreen_hpd_fini, + .hpd_sense = &evergreen_hpd_sense, + .hpd_set_polarity = &evergreen_hpd_set_polarity, + .gui_idle = &r600_gui_idle, + .pm_misc = &evergreen_pm_misc, + .pm_prepare = &evergreen_pm_prepare, + .pm_finish = &evergreen_pm_finish, + .pm_init_profile = &rs780_pm_init_profile, + .pm_get_dynpm_state = &r600_pm_get_dynpm_state, +}; + int radeon_asic_init(struct radeon_device *rdev) { radeon_register_accessor_init(rdev); @@ -835,6 +878,9 @@ int radeon_asic_init(struct radeon_device *rdev) case CHIP_HEMLOCK: rdev->asic = &evergreen_asic; break; + case CHIP_PALM: + rdev->asic = &sumo_asic; + break; default: /* FIXME: not supported yet */ return -EINVAL; -- cgit v1.2.3 From d5e455e48b095df7f5381dff028f8523aaf565a9 Mon Sep 17 00:00:00 2001 From: Alex Deucher Date: Mon, 22 Nov 2010 17:56:29 -0500 Subject: drm/radeon/kms: fill in GPU init for AMD Ontario Fusion APUs Signed-off-by: Alex Deucher Signed-off-by: Dave Airlie --- drivers/gpu/drm/radeon/evergreen.c | 49 ++++++++++++++++++++++++++++++++++---- 1 file changed, 44 insertions(+), 5 deletions(-) diff --git a/drivers/gpu/drm/radeon/evergreen.c b/drivers/gpu/drm/radeon/evergreen.c index 07c4c6f216f0..7f859ffe99ce 100644 --- a/drivers/gpu/drm/radeon/evergreen.c +++ b/drivers/gpu/drm/radeon/evergreen.c @@ -1299,6 +1299,7 @@ static u32 evergreen_get_tile_pipe_to_backend_map(struct radeon_device *rdev, switch (rdev->family) { case CHIP_CEDAR: case CHIP_REDWOOD: + case CHIP_PALM: force_no_swizzle = false; break; case CHIP_CYPRESS: @@ -1423,6 +1424,7 @@ static void evergreen_program_channel_remap(struct radeon_device *rdev) case CHIP_JUNIPER: case CHIP_REDWOOD: case CHIP_CEDAR: + case CHIP_PALM: default: tcp_chan_steer_lo = 0x76543210; tcp_chan_steer_hi = 0x0000ba98; @@ -1541,6 +1543,27 @@ static void evergreen_gpu_init(struct radeon_device *rdev) rdev->config.evergreen.max_hw_contexts = 4; rdev->config.evergreen.sq_num_cf_insts = 1; + rdev->config.evergreen.sc_prim_fifo_size = 0x40; + rdev->config.evergreen.sc_hiz_tile_fifo_size = 0x30; + rdev->config.evergreen.sc_earlyz_tile_fifo_size = 0x130; + break; + case CHIP_PALM: + rdev->config.evergreen.num_ses = 1; + rdev->config.evergreen.max_pipes = 2; + rdev->config.evergreen.max_tile_pipes = 2; + rdev->config.evergreen.max_simds = 2; + rdev->config.evergreen.max_backends = 1 * rdev->config.evergreen.num_ses; + rdev->config.evergreen.max_gprs = 256; + rdev->config.evergreen.max_threads = 192; + rdev->config.evergreen.max_gs_threads = 16; + rdev->config.evergreen.max_stack_entries = 256; + rdev->config.evergreen.sx_num_of_sets = 4; + rdev->config.evergreen.sx_max_export_size = 128; + rdev->config.evergreen.sx_max_export_pos_size = 32; + rdev->config.evergreen.sx_max_export_smx_size = 96; + rdev->config.evergreen.max_hw_contexts = 4; + rdev->config.evergreen.sq_num_cf_insts = 1; + rdev->config.evergreen.sc_prim_fifo_size = 0x40; rdev->config.evergreen.sc_hiz_tile_fifo_size = 0x30; rdev->config.evergreen.sc_earlyz_tile_fifo_size = 0x130; @@ -1821,9 +1844,15 @@ static void evergreen_gpu_init(struct radeon_device *rdev) GS_PRIO(2) | ES_PRIO(3)); - if (rdev->family == CHIP_CEDAR) + switch (rdev->family) { + case CHIP_CEDAR: + case CHIP_PALM: /* no vertex cache */ sq_config &= ~VC_ENABLE; + break; + default: + break; + } sq_lds_resource_mgmt = RREG32(SQ_LDS_RESOURCE_MGMT); @@ -1835,10 +1864,15 @@ static void evergreen_gpu_init(struct radeon_device *rdev) sq_gpr_resource_mgmt_3 = NUM_HS_GPRS((rdev->config.evergreen.max_gprs - (4 * 2)) * 3 / 32); sq_gpr_resource_mgmt_3 |= NUM_LS_GPRS((rdev->config.evergreen.max_gprs - (4 * 2)) * 3 / 32); - if (rdev->family == CHIP_CEDAR) + switch (rdev->family) { + case CHIP_CEDAR: + case CHIP_PALM: ps_thread_count = 96; - else + break; + default: ps_thread_count = 128; + break; + } sq_thread_resource_mgmt = NUM_PS_THREADS(ps_thread_count); sq_thread_resource_mgmt |= NUM_VS_THREADS((((rdev->config.evergreen.max_threads - ps_thread_count) / 6) / 8) * 8); @@ -1869,10 +1903,15 @@ static void evergreen_gpu_init(struct radeon_device *rdev) WREG32(PA_SC_FORCE_EOV_MAX_CNTS, (FORCE_EOV_MAX_CLK_CNT(4095) | FORCE_EOV_MAX_REZ_CNT(255))); - if (rdev->family == CHIP_CEDAR) + switch (rdev->family) { + case CHIP_CEDAR: + case CHIP_PALM: vgt_cache_invalidation = CACHE_INVALIDATION(TC_ONLY); - else + break; + default: vgt_cache_invalidation = CACHE_INVALIDATION(VC_AND_TC); + break; + } vgt_cache_invalidation |= AUTO_INVLD_EN(ES_AND_GS_AUTO); WREG32(VGT_CACHE_INVALIDATION, vgt_cache_invalidation); -- cgit v1.2.3 From e33df25fecd31be889a878bc75313817bc292bac Mon Sep 17 00:00:00 2001 From: Alex Deucher Date: Mon, 22 Nov 2010 17:56:32 -0500 Subject: drm/radeon/kms: add thermal sensor support for fusion APUs Signed-off-by: Alex Deucher Signed-off-by: Dave Airlie --- drivers/gpu/drm/radeon/evergreen.c | 8 ++++++++ drivers/gpu/drm/radeon/evergreend.h | 4 +++- drivers/gpu/drm/radeon/radeon.h | 2 ++ drivers/gpu/drm/radeon/radeon_pm.c | 4 ++++ 4 files changed, 17 insertions(+), 1 deletion(-) diff --git a/drivers/gpu/drm/radeon/evergreen.c b/drivers/gpu/drm/radeon/evergreen.c index 7f859ffe99ce..075d6c172595 100644 --- a/drivers/gpu/drm/radeon/evergreen.c +++ b/drivers/gpu/drm/radeon/evergreen.c @@ -57,6 +57,14 @@ u32 evergreen_get_temp(struct radeon_device *rdev) return actual_temp * 1000; } +u32 sumo_get_temp(struct radeon_device *rdev) +{ + u32 temp = RREG32(CG_THERMAL_STATUS) & 0xff; + u32 actual_temp = (temp >> 1) & 0xff; + + return actual_temp * 1000; +} + void evergreen_pm_misc(struct radeon_device *rdev) { int req_ps_idx = rdev->pm.requested_power_state_index; diff --git a/drivers/gpu/drm/radeon/evergreend.h b/drivers/gpu/drm/radeon/evergreend.h index 9644b1cbfb09..87fcaba76695 100644 --- a/drivers/gpu/drm/radeon/evergreend.h +++ b/drivers/gpu/drm/radeon/evergreend.h @@ -164,11 +164,13 @@ #define SE_SC_BUSY (1 << 29) #define SE_DB_BUSY (1 << 30) #define SE_CB_BUSY (1 << 31) - +/* evergreen */ #define CG_MULT_THERMAL_STATUS 0x740 #define ASIC_T(x) ((x) << 16) #define ASIC_T_MASK 0x7FF0000 #define ASIC_T_SHIFT 16 +/* APU */ +#define CG_THERMAL_STATUS 0x678 #define HDP_HOST_PATH_CNTL 0x2C00 #define HDP_NONSURFACE_BASE 0x2C04 diff --git a/drivers/gpu/drm/radeon/radeon.h b/drivers/gpu/drm/radeon/radeon.h index bf3a4fc82964..0507ee7e16eb 100644 --- a/drivers/gpu/drm/radeon/radeon.h +++ b/drivers/gpu/drm/radeon/radeon.h @@ -180,6 +180,7 @@ void rs690_pm_info(struct radeon_device *rdev); extern u32 rv6xx_get_temp(struct radeon_device *rdev); extern u32 rv770_get_temp(struct radeon_device *rdev); extern u32 evergreen_get_temp(struct radeon_device *rdev); +extern u32 sumo_get_temp(struct radeon_device *rdev); /* * Fences. @@ -687,6 +688,7 @@ enum radeon_int_thermal_type { THERMAL_TYPE_RV6XX, THERMAL_TYPE_RV770, THERMAL_TYPE_EVERGREEN, + THERMAL_TYPE_SUMO, }; struct radeon_voltage { diff --git a/drivers/gpu/drm/radeon/radeon_pm.c b/drivers/gpu/drm/radeon/radeon_pm.c index 8c9b2ef32c68..ac4efb0f1cb4 100644 --- a/drivers/gpu/drm/radeon/radeon_pm.c +++ b/drivers/gpu/drm/radeon/radeon_pm.c @@ -449,6 +449,9 @@ static ssize_t radeon_hwmon_show_temp(struct device *dev, case THERMAL_TYPE_EVERGREEN: temp = evergreen_get_temp(rdev); break; + case THERMAL_TYPE_SUMO: + temp = sumo_get_temp(rdev); + break; default: temp = 0; break; @@ -487,6 +490,7 @@ static int radeon_hwmon_init(struct radeon_device *rdev) case THERMAL_TYPE_RV6XX: case THERMAL_TYPE_RV770: case THERMAL_TYPE_EVERGREEN: + case THERMAL_TYPE_SUMO: rdev->pm.int_hwmon_dev = hwmon_device_register(rdev->dev); if (IS_ERR(rdev->pm.int_hwmon_dev)) { err = PTR_ERR(rdev->pm.int_hwmon_dev); -- cgit v1.2.3 From e719ebd916c2ecee072affc9e7f0b92aa33c2f94 Mon Sep 17 00:00:00 2001 From: Alex Deucher Date: Mon, 22 Nov 2010 17:56:33 -0500 Subject: drm/radeon/kms: add bo blit support for Ontario fusion APUs Signed-off-by: Alex Deucher Signed-off-by: Dave Airlie --- drivers/gpu/drm/radeon/evergreen_blit_kms.c | 27 +++++++++++++++++++++++++-- drivers/gpu/drm/radeon/radeon_asic.c | 6 +++--- 2 files changed, 28 insertions(+), 5 deletions(-) diff --git a/drivers/gpu/drm/radeon/evergreen_blit_kms.c b/drivers/gpu/drm/radeon/evergreen_blit_kms.c index e0e590110dd4..2ccd1f0545fe 100644 --- a/drivers/gpu/drm/radeon/evergreen_blit_kms.c +++ b/drivers/gpu/drm/radeon/evergreen_blit_kms.c @@ -147,7 +147,8 @@ set_vtx_resource(struct radeon_device *rdev, u64 gpu_addr) radeon_ring_write(rdev, 0); radeon_ring_write(rdev, SQ_TEX_VTX_VALID_BUFFER << 30); - if (rdev->family == CHIP_CEDAR) + if ((rdev->family == CHIP_CEDAR) || + (rdev->family == CHIP_PALM)) cp_set_surface_sync(rdev, PACKET3_TC_ACTION_ENA, 48, gpu_addr); else @@ -331,9 +332,31 @@ set_default_state(struct radeon_device *rdev) num_hs_stack_entries = 85; num_ls_stack_entries = 85; break; + case CHIP_PALM: + num_ps_gprs = 93; + num_vs_gprs = 46; + num_temp_gprs = 4; + num_gs_gprs = 31; + num_es_gprs = 31; + num_hs_gprs = 23; + num_ls_gprs = 23; + num_ps_threads = 96; + num_vs_threads = 16; + num_gs_threads = 16; + num_es_threads = 16; + num_hs_threads = 16; + num_ls_threads = 16; + num_ps_stack_entries = 42; + num_vs_stack_entries = 42; + num_gs_stack_entries = 42; + num_es_stack_entries = 42; + num_hs_stack_entries = 42; + num_ls_stack_entries = 42; + break; } - if (rdev->family == CHIP_CEDAR) + if ((rdev->family == CHIP_CEDAR) || + (rdev->family == CHIP_PALM)) sq_config = 0; else sq_config = VC_ENABLE; diff --git a/drivers/gpu/drm/radeon/radeon_asic.c b/drivers/gpu/drm/radeon/radeon_asic.c index 4e487cc16e7f..de7bfbcd09c9 100644 --- a/drivers/gpu/drm/radeon/radeon_asic.c +++ b/drivers/gpu/drm/radeon/radeon_asic.c @@ -769,9 +769,9 @@ static struct radeon_asic sumo_asic = { .get_vblank_counter = &evergreen_get_vblank_counter, .fence_ring_emit = &r600_fence_ring_emit, .cs_parse = &evergreen_cs_parse, - .copy_blit = NULL, - .copy_dma = NULL, - .copy = NULL, + .copy_blit = &evergreen_copy_blit, + .copy_dma = &evergreen_copy_blit, + .copy = &evergreen_copy_blit, .get_engine_clock = &radeon_atom_get_engine_clock, .set_engine_clock = &radeon_atom_set_engine_clock, .get_memory_clock = NULL, -- cgit v1.2.3 From 560154e9a27f2f260fcb2dd18c488203246f257e Mon Sep 17 00:00:00 2001 From: Alex Deucher Date: Mon, 22 Nov 2010 17:56:34 -0500 Subject: drm/radeon/kms: refactor atombios power state fetching The function was getting too large. Rework it to share more state better handle new power table formats. Signed-off-by: Alex Deucher Signed-off-by: Dave Airlie --- drivers/gpu/drm/radeon/radeon_atombios.c | 941 ++++++++++++++++--------------- 1 file changed, 475 insertions(+), 466 deletions(-) diff --git a/drivers/gpu/drm/radeon/radeon_atombios.c b/drivers/gpu/drm/radeon/radeon_atombios.c index 4d37b2d1ba74..7056e20d329d 100644 --- a/drivers/gpu/drm/radeon/radeon_atombios.c +++ b/drivers/gpu/drm/radeon/radeon_atombios.c @@ -1763,495 +1763,504 @@ static const char *pp_lib_thermal_controller_names[] = { "RV6xx", "RV770", "adt7473", + "NONE", "External GPIO", "Evergreen", "adt7473 with internal", - }; union power_info { struct _ATOM_POWERPLAY_INFO info; struct _ATOM_POWERPLAY_INFO_V2 info_2; struct _ATOM_POWERPLAY_INFO_V3 info_3; - struct _ATOM_PPLIB_POWERPLAYTABLE info_4; + struct _ATOM_PPLIB_POWERPLAYTABLE pplib; }; -void radeon_atombios_get_power_modes(struct radeon_device *rdev) +union pplib_clock_info { + struct _ATOM_PPLIB_R600_CLOCK_INFO r600; + struct _ATOM_PPLIB_RS780_CLOCK_INFO rs780; + struct _ATOM_PPLIB_EVERGREEN_CLOCK_INFO evergreen; +}; + +union pplib_power_state { + struct _ATOM_PPLIB_STATE v1; + struct _ATOM_PPLIB_STATE_V2 v2; +}; + +static void radeon_atombios_parse_misc_flags_1_3(struct radeon_device *rdev, + int state_index, + u32 misc, u32 misc2) +{ + rdev->pm.power_state[state_index].misc = misc; + rdev->pm.power_state[state_index].misc2 = misc2; + /* order matters! */ + if (misc & ATOM_PM_MISCINFO_POWER_SAVING_MODE) + rdev->pm.power_state[state_index].type = + POWER_STATE_TYPE_POWERSAVE; + if (misc & ATOM_PM_MISCINFO_DEFAULT_DC_STATE_ENTRY_TRUE) + rdev->pm.power_state[state_index].type = + POWER_STATE_TYPE_BATTERY; + if (misc & ATOM_PM_MISCINFO_DEFAULT_LOW_DC_STATE_ENTRY_TRUE) + rdev->pm.power_state[state_index].type = + POWER_STATE_TYPE_BATTERY; + if (misc & ATOM_PM_MISCINFO_LOAD_BALANCE_EN) + rdev->pm.power_state[state_index].type = + POWER_STATE_TYPE_BALANCED; + if (misc & ATOM_PM_MISCINFO_3D_ACCELERATION_EN) { + rdev->pm.power_state[state_index].type = + POWER_STATE_TYPE_PERFORMANCE; + rdev->pm.power_state[state_index].flags &= + ~RADEON_PM_STATE_SINGLE_DISPLAY_ONLY; + } + if (misc2 & ATOM_PM_MISCINFO2_SYSTEM_AC_LITE_MODE) + rdev->pm.power_state[state_index].type = + POWER_STATE_TYPE_BALANCED; + if (misc & ATOM_PM_MISCINFO_DRIVER_DEFAULT_MODE) { + rdev->pm.power_state[state_index].type = + POWER_STATE_TYPE_DEFAULT; + rdev->pm.default_power_state_index = state_index; + rdev->pm.power_state[state_index].default_clock_mode = + &rdev->pm.power_state[state_index].clock_info[0]; + } else if (state_index == 0) { + rdev->pm.power_state[state_index].clock_info[0].flags |= + RADEON_PM_MODE_NO_DISPLAY; + } +} + +static int radeon_atombios_parse_power_table_1_3(struct radeon_device *rdev) { struct radeon_mode_info *mode_info = &rdev->mode_info; + u32 misc, misc2 = 0; + int num_modes = 0, i; + int state_index = 0; + struct radeon_i2c_bus_rec i2c_bus; + union power_info *power_info; int index = GetIndexIntoMasterTable(DATA, PowerPlayInfo); - u16 data_offset; + u16 data_offset; u8 frev, crev; - u32 misc, misc2 = 0, sclk, mclk; - union power_info *power_info; - struct _ATOM_PPLIB_NONCLOCK_INFO *non_clock_info; - struct _ATOM_PPLIB_STATE *power_state; - int num_modes = 0, i, j; - int state_index = 0, mode_index = 0; - struct radeon_i2c_bus_rec i2c_bus; - - rdev->pm.default_power_state_index = -1; - if (atom_parse_data_header(mode_info->atom_context, index, NULL, - &frev, &crev, &data_offset)) { - power_info = (union power_info *)(mode_info->atom_context->bios + data_offset); - if (frev < 4) { - /* add the i2c bus for thermal/fan chip */ - if (power_info->info.ucOverdriveThermalController > 0) { - DRM_INFO("Possible %s thermal controller at 0x%02x\n", - thermal_controller_names[power_info->info.ucOverdriveThermalController], - power_info->info.ucOverdriveControllerAddress >> 1); - i2c_bus = radeon_lookup_i2c_gpio(rdev, power_info->info.ucOverdriveI2cLine); - rdev->pm.i2c_bus = radeon_i2c_lookup(rdev, &i2c_bus); - if (rdev->pm.i2c_bus) { - struct i2c_board_info info = { }; - const char *name = thermal_controller_names[power_info->info. - ucOverdriveThermalController]; - info.addr = power_info->info.ucOverdriveControllerAddress >> 1; - strlcpy(info.type, name, sizeof(info.type)); - i2c_new_device(&rdev->pm.i2c_bus->adapter, &info); - } + if (!atom_parse_data_header(mode_info->atom_context, index, NULL, + &frev, &crev, &data_offset)) + return state_index; + power_info = (union power_info *)(mode_info->atom_context->bios + data_offset); + + /* add the i2c bus for thermal/fan chip */ + if (power_info->info.ucOverdriveThermalController > 0) { + DRM_INFO("Possible %s thermal controller at 0x%02x\n", + thermal_controller_names[power_info->info.ucOverdriveThermalController], + power_info->info.ucOverdriveControllerAddress >> 1); + i2c_bus = radeon_lookup_i2c_gpio(rdev, power_info->info.ucOverdriveI2cLine); + rdev->pm.i2c_bus = radeon_i2c_lookup(rdev, &i2c_bus); + if (rdev->pm.i2c_bus) { + struct i2c_board_info info = { }; + const char *name = thermal_controller_names[power_info->info. + ucOverdriveThermalController]; + info.addr = power_info->info.ucOverdriveControllerAddress >> 1; + strlcpy(info.type, name, sizeof(info.type)); + i2c_new_device(&rdev->pm.i2c_bus->adapter, &info); + } + } + num_modes = power_info->info.ucNumOfPowerModeEntries; + if (num_modes > ATOM_MAX_NUMBEROF_POWER_BLOCK) + num_modes = ATOM_MAX_NUMBEROF_POWER_BLOCK; + /* last mode is usually default, array is low to high */ + for (i = 0; i < num_modes; i++) { + rdev->pm.power_state[state_index].clock_info[0].voltage.type = VOLTAGE_NONE; + switch (frev) { + case 1: + rdev->pm.power_state[state_index].num_clock_modes = 1; + rdev->pm.power_state[state_index].clock_info[0].mclk = + le16_to_cpu(power_info->info.asPowerPlayInfo[i].usMemoryClock); + rdev->pm.power_state[state_index].clock_info[0].sclk = + le16_to_cpu(power_info->info.asPowerPlayInfo[i].usEngineClock); + /* skip invalid modes */ + if ((rdev->pm.power_state[state_index].clock_info[0].mclk == 0) || + (rdev->pm.power_state[state_index].clock_info[0].sclk == 0)) + continue; + rdev->pm.power_state[state_index].pcie_lanes = + power_info->info.asPowerPlayInfo[i].ucNumPciELanes; + misc = le32_to_cpu(power_info->info.asPowerPlayInfo[i].ulMiscInfo); + if ((misc & ATOM_PM_MISCINFO_VOLTAGE_DROP_SUPPORT) || + (misc & ATOM_PM_MISCINFO_VOLTAGE_DROP_ACTIVE_HIGH)) { + rdev->pm.power_state[state_index].clock_info[0].voltage.type = + VOLTAGE_GPIO; + rdev->pm.power_state[state_index].clock_info[0].voltage.gpio = + radeon_lookup_gpio(rdev, + power_info->info.asPowerPlayInfo[i].ucVoltageDropIndex); + if (misc & ATOM_PM_MISCINFO_VOLTAGE_DROP_ACTIVE_HIGH) + rdev->pm.power_state[state_index].clock_info[0].voltage.active_high = + true; + else + rdev->pm.power_state[state_index].clock_info[0].voltage.active_high = + false; + } else if (misc & ATOM_PM_MISCINFO_PROGRAM_VOLTAGE) { + rdev->pm.power_state[state_index].clock_info[0].voltage.type = + VOLTAGE_VDDC; + rdev->pm.power_state[state_index].clock_info[0].voltage.vddc_id = + power_info->info.asPowerPlayInfo[i].ucVoltageDropIndex; } - num_modes = power_info->info.ucNumOfPowerModeEntries; - if (num_modes > ATOM_MAX_NUMBEROF_POWER_BLOCK) - num_modes = ATOM_MAX_NUMBEROF_POWER_BLOCK; - /* last mode is usually default, array is low to high */ - for (i = 0; i < num_modes; i++) { - rdev->pm.power_state[state_index].clock_info[0].voltage.type = VOLTAGE_NONE; - switch (frev) { - case 1: - rdev->pm.power_state[state_index].num_clock_modes = 1; - rdev->pm.power_state[state_index].clock_info[0].mclk = - le16_to_cpu(power_info->info.asPowerPlayInfo[i].usMemoryClock); - rdev->pm.power_state[state_index].clock_info[0].sclk = - le16_to_cpu(power_info->info.asPowerPlayInfo[i].usEngineClock); - /* skip invalid modes */ - if ((rdev->pm.power_state[state_index].clock_info[0].mclk == 0) || - (rdev->pm.power_state[state_index].clock_info[0].sclk == 0)) - continue; - rdev->pm.power_state[state_index].pcie_lanes = - power_info->info.asPowerPlayInfo[i].ucNumPciELanes; - misc = le32_to_cpu(power_info->info.asPowerPlayInfo[i].ulMiscInfo); - if ((misc & ATOM_PM_MISCINFO_VOLTAGE_DROP_SUPPORT) || - (misc & ATOM_PM_MISCINFO_VOLTAGE_DROP_ACTIVE_HIGH)) { - rdev->pm.power_state[state_index].clock_info[0].voltage.type = - VOLTAGE_GPIO; - rdev->pm.power_state[state_index].clock_info[0].voltage.gpio = - radeon_lookup_gpio(rdev, - power_info->info.asPowerPlayInfo[i].ucVoltageDropIndex); - if (misc & ATOM_PM_MISCINFO_VOLTAGE_DROP_ACTIVE_HIGH) - rdev->pm.power_state[state_index].clock_info[0].voltage.active_high = - true; - else - rdev->pm.power_state[state_index].clock_info[0].voltage.active_high = - false; - } else if (misc & ATOM_PM_MISCINFO_PROGRAM_VOLTAGE) { - rdev->pm.power_state[state_index].clock_info[0].voltage.type = - VOLTAGE_VDDC; - rdev->pm.power_state[state_index].clock_info[0].voltage.vddc_id = - power_info->info.asPowerPlayInfo[i].ucVoltageDropIndex; - } - rdev->pm.power_state[state_index].flags = RADEON_PM_STATE_SINGLE_DISPLAY_ONLY; - rdev->pm.power_state[state_index].misc = misc; - /* order matters! */ - if (misc & ATOM_PM_MISCINFO_POWER_SAVING_MODE) - rdev->pm.power_state[state_index].type = - POWER_STATE_TYPE_POWERSAVE; - if (misc & ATOM_PM_MISCINFO_DEFAULT_DC_STATE_ENTRY_TRUE) - rdev->pm.power_state[state_index].type = - POWER_STATE_TYPE_BATTERY; - if (misc & ATOM_PM_MISCINFO_DEFAULT_LOW_DC_STATE_ENTRY_TRUE) - rdev->pm.power_state[state_index].type = - POWER_STATE_TYPE_BATTERY; - if (misc & ATOM_PM_MISCINFO_LOAD_BALANCE_EN) - rdev->pm.power_state[state_index].type = - POWER_STATE_TYPE_BALANCED; - if (misc & ATOM_PM_MISCINFO_3D_ACCELERATION_EN) { - rdev->pm.power_state[state_index].type = - POWER_STATE_TYPE_PERFORMANCE; - rdev->pm.power_state[state_index].flags &= - ~RADEON_PM_STATE_SINGLE_DISPLAY_ONLY; - } - if (misc & ATOM_PM_MISCINFO_DRIVER_DEFAULT_MODE) { - rdev->pm.power_state[state_index].type = - POWER_STATE_TYPE_DEFAULT; - rdev->pm.default_power_state_index = state_index; - rdev->pm.power_state[state_index].default_clock_mode = - &rdev->pm.power_state[state_index].clock_info[0]; - rdev->pm.power_state[state_index].flags &= - ~RADEON_PM_STATE_SINGLE_DISPLAY_ONLY; - } else if (state_index == 0) { - rdev->pm.power_state[state_index].clock_info[0].flags |= - RADEON_PM_MODE_NO_DISPLAY; - } - state_index++; - break; - case 2: - rdev->pm.power_state[state_index].num_clock_modes = 1; - rdev->pm.power_state[state_index].clock_info[0].mclk = - le32_to_cpu(power_info->info_2.asPowerPlayInfo[i].ulMemoryClock); - rdev->pm.power_state[state_index].clock_info[0].sclk = - le32_to_cpu(power_info->info_2.asPowerPlayInfo[i].ulEngineClock); - /* skip invalid modes */ - if ((rdev->pm.power_state[state_index].clock_info[0].mclk == 0) || - (rdev->pm.power_state[state_index].clock_info[0].sclk == 0)) - continue; - rdev->pm.power_state[state_index].pcie_lanes = - power_info->info_2.asPowerPlayInfo[i].ucNumPciELanes; - misc = le32_to_cpu(power_info->info_2.asPowerPlayInfo[i].ulMiscInfo); - misc2 = le32_to_cpu(power_info->info_2.asPowerPlayInfo[i].ulMiscInfo2); - if ((misc & ATOM_PM_MISCINFO_VOLTAGE_DROP_SUPPORT) || - (misc & ATOM_PM_MISCINFO_VOLTAGE_DROP_ACTIVE_HIGH)) { - rdev->pm.power_state[state_index].clock_info[0].voltage.type = - VOLTAGE_GPIO; - rdev->pm.power_state[state_index].clock_info[0].voltage.gpio = - radeon_lookup_gpio(rdev, - power_info->info_2.asPowerPlayInfo[i].ucVoltageDropIndex); - if (misc & ATOM_PM_MISCINFO_VOLTAGE_DROP_ACTIVE_HIGH) - rdev->pm.power_state[state_index].clock_info[0].voltage.active_high = - true; - else - rdev->pm.power_state[state_index].clock_info[0].voltage.active_high = - false; - } else if (misc & ATOM_PM_MISCINFO_PROGRAM_VOLTAGE) { - rdev->pm.power_state[state_index].clock_info[0].voltage.type = - VOLTAGE_VDDC; - rdev->pm.power_state[state_index].clock_info[0].voltage.vddc_id = - power_info->info_2.asPowerPlayInfo[i].ucVoltageDropIndex; - } - rdev->pm.power_state[state_index].flags = RADEON_PM_STATE_SINGLE_DISPLAY_ONLY; - rdev->pm.power_state[state_index].misc = misc; - rdev->pm.power_state[state_index].misc2 = misc2; - /* order matters! */ - if (misc & ATOM_PM_MISCINFO_POWER_SAVING_MODE) - rdev->pm.power_state[state_index].type = - POWER_STATE_TYPE_POWERSAVE; - if (misc & ATOM_PM_MISCINFO_DEFAULT_DC_STATE_ENTRY_TRUE) - rdev->pm.power_state[state_index].type = - POWER_STATE_TYPE_BATTERY; - if (misc & ATOM_PM_MISCINFO_DEFAULT_LOW_DC_STATE_ENTRY_TRUE) - rdev->pm.power_state[state_index].type = - POWER_STATE_TYPE_BATTERY; - if (misc & ATOM_PM_MISCINFO_LOAD_BALANCE_EN) - rdev->pm.power_state[state_index].type = - POWER_STATE_TYPE_BALANCED; - if (misc & ATOM_PM_MISCINFO_3D_ACCELERATION_EN) { - rdev->pm.power_state[state_index].type = - POWER_STATE_TYPE_PERFORMANCE; - rdev->pm.power_state[state_index].flags &= - ~RADEON_PM_STATE_SINGLE_DISPLAY_ONLY; - } - if (misc2 & ATOM_PM_MISCINFO2_SYSTEM_AC_LITE_MODE) - rdev->pm.power_state[state_index].type = - POWER_STATE_TYPE_BALANCED; - if (misc2 & ATOM_PM_MISCINFO2_MULTI_DISPLAY_SUPPORT) - rdev->pm.power_state[state_index].flags &= - ~RADEON_PM_STATE_SINGLE_DISPLAY_ONLY; - if (misc & ATOM_PM_MISCINFO_DRIVER_DEFAULT_MODE) { - rdev->pm.power_state[state_index].type = - POWER_STATE_TYPE_DEFAULT; - rdev->pm.default_power_state_index = state_index; - rdev->pm.power_state[state_index].default_clock_mode = - &rdev->pm.power_state[state_index].clock_info[0]; - rdev->pm.power_state[state_index].flags &= - ~RADEON_PM_STATE_SINGLE_DISPLAY_ONLY; - } else if (state_index == 0) { - rdev->pm.power_state[state_index].clock_info[0].flags |= - RADEON_PM_MODE_NO_DISPLAY; - } - state_index++; - break; - case 3: - rdev->pm.power_state[state_index].num_clock_modes = 1; - rdev->pm.power_state[state_index].clock_info[0].mclk = - le32_to_cpu(power_info->info_3.asPowerPlayInfo[i].ulMemoryClock); - rdev->pm.power_state[state_index].clock_info[0].sclk = - le32_to_cpu(power_info->info_3.asPowerPlayInfo[i].ulEngineClock); - /* skip invalid modes */ - if ((rdev->pm.power_state[state_index].clock_info[0].mclk == 0) || - (rdev->pm.power_state[state_index].clock_info[0].sclk == 0)) - continue; - rdev->pm.power_state[state_index].pcie_lanes = - power_info->info_3.asPowerPlayInfo[i].ucNumPciELanes; - misc = le32_to_cpu(power_info->info_3.asPowerPlayInfo[i].ulMiscInfo); - misc2 = le32_to_cpu(power_info->info_3.asPowerPlayInfo[i].ulMiscInfo2); - if ((misc & ATOM_PM_MISCINFO_VOLTAGE_DROP_SUPPORT) || - (misc & ATOM_PM_MISCINFO_VOLTAGE_DROP_ACTIVE_HIGH)) { - rdev->pm.power_state[state_index].clock_info[0].voltage.type = - VOLTAGE_GPIO; - rdev->pm.power_state[state_index].clock_info[0].voltage.gpio = - radeon_lookup_gpio(rdev, - power_info->info_3.asPowerPlayInfo[i].ucVoltageDropIndex); - if (misc & ATOM_PM_MISCINFO_VOLTAGE_DROP_ACTIVE_HIGH) - rdev->pm.power_state[state_index].clock_info[0].voltage.active_high = - true; - else - rdev->pm.power_state[state_index].clock_info[0].voltage.active_high = - false; - } else if (misc & ATOM_PM_MISCINFO_PROGRAM_VOLTAGE) { - rdev->pm.power_state[state_index].clock_info[0].voltage.type = - VOLTAGE_VDDC; - rdev->pm.power_state[state_index].clock_info[0].voltage.vddc_id = - power_info->info_3.asPowerPlayInfo[i].ucVoltageDropIndex; - if (misc2 & ATOM_PM_MISCINFO2_VDDCI_DYNAMIC_VOLTAGE_EN) { - rdev->pm.power_state[state_index].clock_info[0].voltage.vddci_enabled = - true; - rdev->pm.power_state[state_index].clock_info[0].voltage.vddci_id = - power_info->info_3.asPowerPlayInfo[i].ucVDDCI_VoltageDropIndex; - } - } - rdev->pm.power_state[state_index].flags = RADEON_PM_STATE_SINGLE_DISPLAY_ONLY; - rdev->pm.power_state[state_index].misc = misc; - rdev->pm.power_state[state_index].misc2 = misc2; - /* order matters! */ - if (misc & ATOM_PM_MISCINFO_POWER_SAVING_MODE) - rdev->pm.power_state[state_index].type = - POWER_STATE_TYPE_POWERSAVE; - if (misc & ATOM_PM_MISCINFO_DEFAULT_DC_STATE_ENTRY_TRUE) - rdev->pm.power_state[state_index].type = - POWER_STATE_TYPE_BATTERY; - if (misc & ATOM_PM_MISCINFO_DEFAULT_LOW_DC_STATE_ENTRY_TRUE) - rdev->pm.power_state[state_index].type = - POWER_STATE_TYPE_BATTERY; - if (misc & ATOM_PM_MISCINFO_LOAD_BALANCE_EN) - rdev->pm.power_state[state_index].type = - POWER_STATE_TYPE_BALANCED; - if (misc & ATOM_PM_MISCINFO_3D_ACCELERATION_EN) { - rdev->pm.power_state[state_index].type = - POWER_STATE_TYPE_PERFORMANCE; - rdev->pm.power_state[state_index].flags &= - ~RADEON_PM_STATE_SINGLE_DISPLAY_ONLY; - } - if (misc2 & ATOM_PM_MISCINFO2_SYSTEM_AC_LITE_MODE) - rdev->pm.power_state[state_index].type = - POWER_STATE_TYPE_BALANCED; - if (misc & ATOM_PM_MISCINFO_DRIVER_DEFAULT_MODE) { - rdev->pm.power_state[state_index].type = - POWER_STATE_TYPE_DEFAULT; - rdev->pm.default_power_state_index = state_index; - rdev->pm.power_state[state_index].default_clock_mode = - &rdev->pm.power_state[state_index].clock_info[0]; - } else if (state_index == 0) { - rdev->pm.power_state[state_index].clock_info[0].flags |= - RADEON_PM_MODE_NO_DISPLAY; - } - state_index++; - break; - } + rdev->pm.power_state[state_index].flags = RADEON_PM_STATE_SINGLE_DISPLAY_ONLY; + radeon_atombios_parse_misc_flags_1_3(rdev, state_index, misc, 0); + state_index++; + break; + case 2: + rdev->pm.power_state[state_index].num_clock_modes = 1; + rdev->pm.power_state[state_index].clock_info[0].mclk = + le32_to_cpu(power_info->info_2.asPowerPlayInfo[i].ulMemoryClock); + rdev->pm.power_state[state_index].clock_info[0].sclk = + le32_to_cpu(power_info->info_2.asPowerPlayInfo[i].ulEngineClock); + /* skip invalid modes */ + if ((rdev->pm.power_state[state_index].clock_info[0].mclk == 0) || + (rdev->pm.power_state[state_index].clock_info[0].sclk == 0)) + continue; + rdev->pm.power_state[state_index].pcie_lanes = + power_info->info_2.asPowerPlayInfo[i].ucNumPciELanes; + misc = le32_to_cpu(power_info->info_2.asPowerPlayInfo[i].ulMiscInfo); + misc2 = le32_to_cpu(power_info->info_2.asPowerPlayInfo[i].ulMiscInfo2); + if ((misc & ATOM_PM_MISCINFO_VOLTAGE_DROP_SUPPORT) || + (misc & ATOM_PM_MISCINFO_VOLTAGE_DROP_ACTIVE_HIGH)) { + rdev->pm.power_state[state_index].clock_info[0].voltage.type = + VOLTAGE_GPIO; + rdev->pm.power_state[state_index].clock_info[0].voltage.gpio = + radeon_lookup_gpio(rdev, + power_info->info_2.asPowerPlayInfo[i].ucVoltageDropIndex); + if (misc & ATOM_PM_MISCINFO_VOLTAGE_DROP_ACTIVE_HIGH) + rdev->pm.power_state[state_index].clock_info[0].voltage.active_high = + true; + else + rdev->pm.power_state[state_index].clock_info[0].voltage.active_high = + false; + } else if (misc & ATOM_PM_MISCINFO_PROGRAM_VOLTAGE) { + rdev->pm.power_state[state_index].clock_info[0].voltage.type = + VOLTAGE_VDDC; + rdev->pm.power_state[state_index].clock_info[0].voltage.vddc_id = + power_info->info_2.asPowerPlayInfo[i].ucVoltageDropIndex; } - /* last mode is usually default */ - if (rdev->pm.default_power_state_index == -1) { - rdev->pm.power_state[state_index - 1].type = - POWER_STATE_TYPE_DEFAULT; - rdev->pm.default_power_state_index = state_index - 1; - rdev->pm.power_state[state_index - 1].default_clock_mode = - &rdev->pm.power_state[state_index - 1].clock_info[0]; - rdev->pm.power_state[state_index].flags &= - ~RADEON_PM_STATE_SINGLE_DISPLAY_ONLY; - rdev->pm.power_state[state_index].misc = 0; - rdev->pm.power_state[state_index].misc2 = 0; + rdev->pm.power_state[state_index].flags = RADEON_PM_STATE_SINGLE_DISPLAY_ONLY; + radeon_atombios_parse_misc_flags_1_3(rdev, state_index, misc, misc2); + state_index++; + break; + case 3: + rdev->pm.power_state[state_index].num_clock_modes = 1; + rdev->pm.power_state[state_index].clock_info[0].mclk = + le32_to_cpu(power_info->info_3.asPowerPlayInfo[i].ulMemoryClock); + rdev->pm.power_state[state_index].clock_info[0].sclk = + le32_to_cpu(power_info->info_3.asPowerPlayInfo[i].ulEngineClock); + /* skip invalid modes */ + if ((rdev->pm.power_state[state_index].clock_info[0].mclk == 0) || + (rdev->pm.power_state[state_index].clock_info[0].sclk == 0)) + continue; + rdev->pm.power_state[state_index].pcie_lanes = + power_info->info_3.asPowerPlayInfo[i].ucNumPciELanes; + misc = le32_to_cpu(power_info->info_3.asPowerPlayInfo[i].ulMiscInfo); + misc2 = le32_to_cpu(power_info->info_3.asPowerPlayInfo[i].ulMiscInfo2); + if ((misc & ATOM_PM_MISCINFO_VOLTAGE_DROP_SUPPORT) || + (misc & ATOM_PM_MISCINFO_VOLTAGE_DROP_ACTIVE_HIGH)) { + rdev->pm.power_state[state_index].clock_info[0].voltage.type = + VOLTAGE_GPIO; + rdev->pm.power_state[state_index].clock_info[0].voltage.gpio = + radeon_lookup_gpio(rdev, + power_info->info_3.asPowerPlayInfo[i].ucVoltageDropIndex); + if (misc & ATOM_PM_MISCINFO_VOLTAGE_DROP_ACTIVE_HIGH) + rdev->pm.power_state[state_index].clock_info[0].voltage.active_high = + true; + else + rdev->pm.power_state[state_index].clock_info[0].voltage.active_high = + false; + } else if (misc & ATOM_PM_MISCINFO_PROGRAM_VOLTAGE) { + rdev->pm.power_state[state_index].clock_info[0].voltage.type = + VOLTAGE_VDDC; + rdev->pm.power_state[state_index].clock_info[0].voltage.vddc_id = + power_info->info_3.asPowerPlayInfo[i].ucVoltageDropIndex; + if (misc2 & ATOM_PM_MISCINFO2_VDDCI_DYNAMIC_VOLTAGE_EN) { + rdev->pm.power_state[state_index].clock_info[0].voltage.vddci_enabled = + true; + rdev->pm.power_state[state_index].clock_info[0].voltage.vddci_id = + power_info->info_3.asPowerPlayInfo[i].ucVDDCI_VoltageDropIndex; + } } + rdev->pm.power_state[state_index].flags = RADEON_PM_STATE_SINGLE_DISPLAY_ONLY; + radeon_atombios_parse_misc_flags_1_3(rdev, state_index, misc, misc2); + state_index++; + break; + } + } + /* last mode is usually default */ + if (rdev->pm.default_power_state_index == -1) { + rdev->pm.power_state[state_index - 1].type = + POWER_STATE_TYPE_DEFAULT; + rdev->pm.default_power_state_index = state_index - 1; + rdev->pm.power_state[state_index - 1].default_clock_mode = + &rdev->pm.power_state[state_index - 1].clock_info[0]; + rdev->pm.power_state[state_index].flags &= + ~RADEON_PM_STATE_SINGLE_DISPLAY_ONLY; + rdev->pm.power_state[state_index].misc = 0; + rdev->pm.power_state[state_index].misc2 = 0; + } + return state_index; +} + +static void radeon_atombios_add_pplib_thermal_controller(struct radeon_device *rdev, + ATOM_PPLIB_THERMALCONTROLLER *controller) +{ + struct radeon_i2c_bus_rec i2c_bus; + + /* add the i2c bus for thermal/fan chip */ + if (controller->ucType > 0) { + if (controller->ucType == ATOM_PP_THERMALCONTROLLER_RV6xx) { + DRM_INFO("Internal thermal controller %s fan control\n", + (controller->ucFanParameters & + ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with"); + rdev->pm.int_thermal_type = THERMAL_TYPE_RV6XX; + } else if (controller->ucType == ATOM_PP_THERMALCONTROLLER_RV770) { + DRM_INFO("Internal thermal controller %s fan control\n", + (controller->ucFanParameters & + ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with"); + rdev->pm.int_thermal_type = THERMAL_TYPE_RV770; + } else if (controller->ucType == ATOM_PP_THERMALCONTROLLER_EVERGREEN) { + DRM_INFO("Internal thermal controller %s fan control\n", + (controller->ucFanParameters & + ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with"); + rdev->pm.int_thermal_type = THERMAL_TYPE_EVERGREEN; + } else if ((controller->ucType == + ATOM_PP_THERMALCONTROLLER_EXTERNAL_GPIO) || + (controller->ucType == + ATOM_PP_THERMALCONTROLLER_ADT7473_WITH_INTERNAL)) { + DRM_INFO("Special thermal controller config\n"); } else { - int fw_index = GetIndexIntoMasterTable(DATA, FirmwareInfo); - uint8_t fw_frev, fw_crev; - uint16_t fw_data_offset, vddc = 0; - union firmware_info *firmware_info; - ATOM_PPLIB_THERMALCONTROLLER *controller = &power_info->info_4.sThermalController; - - if (atom_parse_data_header(mode_info->atom_context, fw_index, NULL, - &fw_frev, &fw_crev, &fw_data_offset)) { - firmware_info = - (union firmware_info *)(mode_info->atom_context->bios + - fw_data_offset); - vddc = firmware_info->info_14.usBootUpVDDCVoltage; + DRM_INFO("Possible %s thermal controller at 0x%02x %s fan control\n", + pp_lib_thermal_controller_names[controller->ucType], + controller->ucI2cAddress >> 1, + (controller->ucFanParameters & + ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with"); + i2c_bus = radeon_lookup_i2c_gpio(rdev, controller->ucI2cLine); + rdev->pm.i2c_bus = radeon_i2c_lookup(rdev, &i2c_bus); + if (rdev->pm.i2c_bus) { + struct i2c_board_info info = { }; + const char *name = pp_lib_thermal_controller_names[controller->ucType]; + info.addr = controller->ucI2cAddress >> 1; + strlcpy(info.type, name, sizeof(info.type)); + i2c_new_device(&rdev->pm.i2c_bus->adapter, &info); } + } + } +} - /* add the i2c bus for thermal/fan chip */ - if (controller->ucType > 0) { - if (controller->ucType == ATOM_PP_THERMALCONTROLLER_RV6xx) { - DRM_INFO("Internal thermal controller %s fan control\n", - (controller->ucFanParameters & - ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with"); - rdev->pm.int_thermal_type = THERMAL_TYPE_RV6XX; - } else if (controller->ucType == ATOM_PP_THERMALCONTROLLER_RV770) { - DRM_INFO("Internal thermal controller %s fan control\n", - (controller->ucFanParameters & - ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with"); - rdev->pm.int_thermal_type = THERMAL_TYPE_RV770; - } else if (controller->ucType == ATOM_PP_THERMALCONTROLLER_EVERGREEN) { - DRM_INFO("Internal thermal controller %s fan control\n", - (controller->ucFanParameters & - ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with"); - rdev->pm.int_thermal_type = THERMAL_TYPE_EVERGREEN; - } else if ((controller->ucType == - ATOM_PP_THERMALCONTROLLER_EXTERNAL_GPIO) || - (controller->ucType == - ATOM_PP_THERMALCONTROLLER_ADT7473_WITH_INTERNAL)) { - DRM_INFO("Special thermal controller config\n"); - } else { - DRM_INFO("Possible %s thermal controller at 0x%02x %s fan control\n", - pp_lib_thermal_controller_names[controller->ucType], - controller->ucI2cAddress >> 1, - (controller->ucFanParameters & - ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with"); - i2c_bus = radeon_lookup_i2c_gpio(rdev, controller->ucI2cLine); - rdev->pm.i2c_bus = radeon_i2c_lookup(rdev, &i2c_bus); - if (rdev->pm.i2c_bus) { - struct i2c_board_info info = { }; - const char *name = pp_lib_thermal_controller_names[controller->ucType]; - info.addr = controller->ucI2cAddress >> 1; - strlcpy(info.type, name, sizeof(info.type)); - i2c_new_device(&rdev->pm.i2c_bus->adapter, &info); - } +static u16 radeon_atombios_get_default_vddc(struct radeon_device *rdev) +{ + struct radeon_mode_info *mode_info = &rdev->mode_info; + int index = GetIndexIntoMasterTable(DATA, FirmwareInfo); + u8 frev, crev; + u16 data_offset; + union firmware_info *firmware_info; + u16 vddc = 0; - } - } - /* first mode is usually default, followed by low to high */ - for (i = 0; i < power_info->info_4.ucNumStates; i++) { - mode_index = 0; - power_state = (struct _ATOM_PPLIB_STATE *) - (mode_info->atom_context->bios + - data_offset + - le16_to_cpu(power_info->info_4.usStateArrayOffset) + - i * power_info->info_4.ucStateEntrySize); - non_clock_info = (struct _ATOM_PPLIB_NONCLOCK_INFO *) - (mode_info->atom_context->bios + - data_offset + - le16_to_cpu(power_info->info_4.usNonClockInfoArrayOffset) + - (power_state->ucNonClockStateIndex * - power_info->info_4.ucNonClockSize)); - for (j = 0; j < (power_info->info_4.ucStateEntrySize - 1); j++) { - if (rdev->flags & RADEON_IS_IGP) { - struct _ATOM_PPLIB_RS780_CLOCK_INFO *clock_info = - (struct _ATOM_PPLIB_RS780_CLOCK_INFO *) - (mode_info->atom_context->bios + - data_offset + - le16_to_cpu(power_info->info_4.usClockInfoArrayOffset) + - (power_state->ucClockStateIndices[j] * - power_info->info_4.ucClockInfoSize)); - sclk = le16_to_cpu(clock_info->usLowEngineClockLow); - sclk |= clock_info->ucLowEngineClockHigh << 16; - rdev->pm.power_state[state_index].clock_info[mode_index].sclk = sclk; - /* skip invalid modes */ - if (rdev->pm.power_state[state_index].clock_info[mode_index].sclk == 0) - continue; - /* voltage works differently on IGPs */ - mode_index++; - } else if (ASIC_IS_DCE4(rdev)) { - struct _ATOM_PPLIB_EVERGREEN_CLOCK_INFO *clock_info = - (struct _ATOM_PPLIB_EVERGREEN_CLOCK_INFO *) - (mode_info->atom_context->bios + - data_offset + - le16_to_cpu(power_info->info_4.usClockInfoArrayOffset) + - (power_state->ucClockStateIndices[j] * - power_info->info_4.ucClockInfoSize)); - sclk = le16_to_cpu(clock_info->usEngineClockLow); - sclk |= clock_info->ucEngineClockHigh << 16; - mclk = le16_to_cpu(clock_info->usMemoryClockLow); - mclk |= clock_info->ucMemoryClockHigh << 16; - rdev->pm.power_state[state_index].clock_info[mode_index].mclk = mclk; - rdev->pm.power_state[state_index].clock_info[mode_index].sclk = sclk; - /* skip invalid modes */ - if ((rdev->pm.power_state[state_index].clock_info[mode_index].mclk == 0) || - (rdev->pm.power_state[state_index].clock_info[mode_index].sclk == 0)) - continue; - rdev->pm.power_state[state_index].clock_info[mode_index].voltage.type = - VOLTAGE_SW; - rdev->pm.power_state[state_index].clock_info[mode_index].voltage.voltage = - clock_info->usVDDC; - /* XXX usVDDCI */ - mode_index++; - } else { - struct _ATOM_PPLIB_R600_CLOCK_INFO *clock_info = - (struct _ATOM_PPLIB_R600_CLOCK_INFO *) - (mode_info->atom_context->bios + - data_offset + - le16_to_cpu(power_info->info_4.usClockInfoArrayOffset) + - (power_state->ucClockStateIndices[j] * - power_info->info_4.ucClockInfoSize)); - sclk = le16_to_cpu(clock_info->usEngineClockLow); - sclk |= clock_info->ucEngineClockHigh << 16; - mclk = le16_to_cpu(clock_info->usMemoryClockLow); - mclk |= clock_info->ucMemoryClockHigh << 16; - rdev->pm.power_state[state_index].clock_info[mode_index].mclk = mclk; - rdev->pm.power_state[state_index].clock_info[mode_index].sclk = sclk; - /* skip invalid modes */ - if ((rdev->pm.power_state[state_index].clock_info[mode_index].mclk == 0) || - (rdev->pm.power_state[state_index].clock_info[mode_index].sclk == 0)) - continue; - rdev->pm.power_state[state_index].clock_info[mode_index].voltage.type = - VOLTAGE_SW; - rdev->pm.power_state[state_index].clock_info[mode_index].voltage.voltage = - clock_info->usVDDC; - mode_index++; - } - } - rdev->pm.power_state[state_index].num_clock_modes = mode_index; - if (mode_index) { - misc = le32_to_cpu(non_clock_info->ulCapsAndSettings); - misc2 = le16_to_cpu(non_clock_info->usClassification); - rdev->pm.power_state[state_index].misc = misc; - rdev->pm.power_state[state_index].misc2 = misc2; - rdev->pm.power_state[state_index].pcie_lanes = - ((misc & ATOM_PPLIB_PCIE_LINK_WIDTH_MASK) >> - ATOM_PPLIB_PCIE_LINK_WIDTH_SHIFT) + 1; - switch (misc2 & ATOM_PPLIB_CLASSIFICATION_UI_MASK) { - case ATOM_PPLIB_CLASSIFICATION_UI_BATTERY: - rdev->pm.power_state[state_index].type = - POWER_STATE_TYPE_BATTERY; - break; - case ATOM_PPLIB_CLASSIFICATION_UI_BALANCED: - rdev->pm.power_state[state_index].type = - POWER_STATE_TYPE_BALANCED; - break; - case ATOM_PPLIB_CLASSIFICATION_UI_PERFORMANCE: - rdev->pm.power_state[state_index].type = - POWER_STATE_TYPE_PERFORMANCE; - break; - case ATOM_PPLIB_CLASSIFICATION_UI_NONE: - if (misc2 & ATOM_PPLIB_CLASSIFICATION_3DPERFORMANCE) - rdev->pm.power_state[state_index].type = - POWER_STATE_TYPE_PERFORMANCE; - break; - } - rdev->pm.power_state[state_index].flags = 0; - if (misc & ATOM_PPLIB_SINGLE_DISPLAY_ONLY) - rdev->pm.power_state[state_index].flags |= - RADEON_PM_STATE_SINGLE_DISPLAY_ONLY; - if (misc2 & ATOM_PPLIB_CLASSIFICATION_BOOT) { - rdev->pm.power_state[state_index].type = - POWER_STATE_TYPE_DEFAULT; - rdev->pm.default_power_state_index = state_index; - rdev->pm.power_state[state_index].default_clock_mode = - &rdev->pm.power_state[state_index].clock_info[mode_index - 1]; - /* patch the table values with the default slck/mclk from firmware info */ - for (j = 0; j < mode_index; j++) { - rdev->pm.power_state[state_index].clock_info[j].mclk = - rdev->clock.default_mclk; - rdev->pm.power_state[state_index].clock_info[j].sclk = - rdev->clock.default_sclk; - if (vddc) - rdev->pm.power_state[state_index].clock_info[j].voltage.voltage = - vddc; - } - } - state_index++; - } - } - /* if multiple clock modes, mark the lowest as no display */ - for (i = 0; i < state_index; i++) { - if (rdev->pm.power_state[i].num_clock_modes > 1) - rdev->pm.power_state[i].clock_info[0].flags |= - RADEON_PM_MODE_NO_DISPLAY; - } - /* first mode is usually default */ - if (rdev->pm.default_power_state_index == -1) { - rdev->pm.power_state[0].type = - POWER_STATE_TYPE_DEFAULT; - rdev->pm.default_power_state_index = 0; - rdev->pm.power_state[0].default_clock_mode = - &rdev->pm.power_state[0].clock_info[0]; - } + if (atom_parse_data_header(mode_info->atom_context, index, NULL, + &frev, &crev, &data_offset)) { + firmware_info = + (union firmware_info *)(mode_info->atom_context->bios + + data_offset); + vddc = firmware_info->info_14.usBootUpVDDCVoltage; + } + + return vddc; +} + +static void radeon_atombios_parse_pplib_non_clock_info(struct radeon_device *rdev, + int state_index, int mode_index, + struct _ATOM_PPLIB_NONCLOCK_INFO *non_clock_info) +{ + int j; + u32 misc = le32_to_cpu(non_clock_info->ulCapsAndSettings); + u32 misc2 = le16_to_cpu(non_clock_info->usClassification); + u16 vddc = radeon_atombios_get_default_vddc(rdev); + + rdev->pm.power_state[state_index].misc = misc; + rdev->pm.power_state[state_index].misc2 = misc2; + rdev->pm.power_state[state_index].pcie_lanes = + ((misc & ATOM_PPLIB_PCIE_LINK_WIDTH_MASK) >> + ATOM_PPLIB_PCIE_LINK_WIDTH_SHIFT) + 1; + switch (misc2 & ATOM_PPLIB_CLASSIFICATION_UI_MASK) { + case ATOM_PPLIB_CLASSIFICATION_UI_BATTERY: + rdev->pm.power_state[state_index].type = + POWER_STATE_TYPE_BATTERY; + break; + case ATOM_PPLIB_CLASSIFICATION_UI_BALANCED: + rdev->pm.power_state[state_index].type = + POWER_STATE_TYPE_BALANCED; + break; + case ATOM_PPLIB_CLASSIFICATION_UI_PERFORMANCE: + rdev->pm.power_state[state_index].type = + POWER_STATE_TYPE_PERFORMANCE; + break; + case ATOM_PPLIB_CLASSIFICATION_UI_NONE: + if (misc2 & ATOM_PPLIB_CLASSIFICATION_3DPERFORMANCE) + rdev->pm.power_state[state_index].type = + POWER_STATE_TYPE_PERFORMANCE; + break; + } + rdev->pm.power_state[state_index].flags = 0; + if (misc & ATOM_PPLIB_SINGLE_DISPLAY_ONLY) + rdev->pm.power_state[state_index].flags |= + RADEON_PM_STATE_SINGLE_DISPLAY_ONLY; + if (misc2 & ATOM_PPLIB_CLASSIFICATION_BOOT) { + rdev->pm.power_state[state_index].type = + POWER_STATE_TYPE_DEFAULT; + rdev->pm.default_power_state_index = state_index; + rdev->pm.power_state[state_index].default_clock_mode = + &rdev->pm.power_state[state_index].clock_info[mode_index - 1]; + /* patch the table values with the default slck/mclk from firmware info */ + for (j = 0; j < mode_index; j++) { + rdev->pm.power_state[state_index].clock_info[j].mclk = + rdev->clock.default_mclk; + rdev->pm.power_state[state_index].clock_info[j].sclk = + rdev->clock.default_sclk; + if (vddc) + rdev->pm.power_state[state_index].clock_info[j].voltage.voltage = + vddc; + } + } +} + +static bool radeon_atombios_parse_pplib_clock_info(struct radeon_device *rdev, + int state_index, int mode_index, + union pplib_clock_info *clock_info) +{ + u32 sclk, mclk; + + if (rdev->flags & RADEON_IS_IGP) { + sclk = le16_to_cpu(clock_info->rs780.usLowEngineClockLow); + sclk |= clock_info->rs780.ucLowEngineClockHigh << 16; + rdev->pm.power_state[state_index].clock_info[mode_index].sclk = sclk; + } else if (ASIC_IS_DCE4(rdev)) { + sclk = le16_to_cpu(clock_info->evergreen.usEngineClockLow); + sclk |= clock_info->evergreen.ucEngineClockHigh << 16; + mclk = le16_to_cpu(clock_info->evergreen.usMemoryClockLow); + mclk |= clock_info->evergreen.ucMemoryClockHigh << 16; + rdev->pm.power_state[state_index].clock_info[mode_index].mclk = mclk; + rdev->pm.power_state[state_index].clock_info[mode_index].sclk = sclk; + rdev->pm.power_state[state_index].clock_info[mode_index].voltage.type = + VOLTAGE_SW; + rdev->pm.power_state[state_index].clock_info[mode_index].voltage.voltage = + clock_info->evergreen.usVDDC; + } else { + sclk = le16_to_cpu(clock_info->r600.usEngineClockLow); + sclk |= clock_info->r600.ucEngineClockHigh << 16; + mclk = le16_to_cpu(clock_info->r600.usMemoryClockLow); + mclk |= clock_info->r600.ucMemoryClockHigh << 16; + rdev->pm.power_state[state_index].clock_info[mode_index].mclk = mclk; + rdev->pm.power_state[state_index].clock_info[mode_index].sclk = sclk; + rdev->pm.power_state[state_index].clock_info[mode_index].voltage.type = + VOLTAGE_SW; + rdev->pm.power_state[state_index].clock_info[mode_index].voltage.voltage = + clock_info->r600.usVDDC; + } + + if (rdev->flags & RADEON_IS_IGP) { + /* skip invalid modes */ + if (rdev->pm.power_state[state_index].clock_info[mode_index].sclk == 0) + return false; + } else { + /* skip invalid modes */ + if ((rdev->pm.power_state[state_index].clock_info[mode_index].mclk == 0) || + (rdev->pm.power_state[state_index].clock_info[mode_index].sclk == 0)) + return false; + } + return true; +} + +static int radeon_atombios_parse_power_table_4_5(struct radeon_device *rdev) +{ + struct radeon_mode_info *mode_info = &rdev->mode_info; + struct _ATOM_PPLIB_NONCLOCK_INFO *non_clock_info; + union pplib_power_state *power_state; + int i, j; + int state_index = 0, mode_index = 0; + union pplib_clock_info *clock_info; + bool valid; + union power_info *power_info; + int index = GetIndexIntoMasterTable(DATA, PowerPlayInfo); + u16 data_offset; + u8 frev, crev; + + if (!atom_parse_data_header(mode_info->atom_context, index, NULL, + &frev, &crev, &data_offset)) + return state_index; + power_info = (union power_info *)(mode_info->atom_context->bios + data_offset); + + radeon_atombios_add_pplib_thermal_controller(rdev, &power_info->pplib.sThermalController); + /* first mode is usually default, followed by low to high */ + for (i = 0; i < power_info->pplib.ucNumStates; i++) { + mode_index = 0; + power_state = (union pplib_power_state *) + (mode_info->atom_context->bios + data_offset + + le16_to_cpu(power_info->pplib.usStateArrayOffset) + + i * power_info->pplib.ucStateEntrySize); + non_clock_info = (struct _ATOM_PPLIB_NONCLOCK_INFO *) + (mode_info->atom_context->bios + data_offset + + le16_to_cpu(power_info->pplib.usNonClockInfoArrayOffset) + + (power_state->v1.ucNonClockStateIndex * + power_info->pplib.ucNonClockSize)); + for (j = 0; j < (power_info->pplib.ucStateEntrySize - 1); j++) { + clock_info = (union pplib_clock_info *) + (mode_info->atom_context->bios + data_offset + + le16_to_cpu(power_info->pplib.usClockInfoArrayOffset) + + (power_state->v1.ucClockStateIndices[j] * + power_info->pplib.ucClockInfoSize)); + valid = radeon_atombios_parse_pplib_clock_info(rdev, + state_index, mode_index, + clock_info); + if (valid) + mode_index++; + } + rdev->pm.power_state[state_index].num_clock_modes = mode_index; + if (mode_index) { + radeon_atombios_parse_pplib_non_clock_info(rdev, state_index, mode_index, + non_clock_info); + state_index++; + } + } + /* if multiple clock modes, mark the lowest as no display */ + for (i = 0; i < state_index; i++) { + if (rdev->pm.power_state[i].num_clock_modes > 1) + rdev->pm.power_state[i].clock_info[0].flags |= + RADEON_PM_MODE_NO_DISPLAY; + } + /* first mode is usually default */ + if (rdev->pm.default_power_state_index == -1) { + rdev->pm.power_state[0].type = + POWER_STATE_TYPE_DEFAULT; + rdev->pm.default_power_state_index = 0; + rdev->pm.power_state[0].default_clock_mode = + &rdev->pm.power_state[0].clock_info[0]; + } + return state_index; +} + +void radeon_atombios_get_power_modes(struct radeon_device *rdev) +{ + struct radeon_mode_info *mode_info = &rdev->mode_info; + int index = GetIndexIntoMasterTable(DATA, PowerPlayInfo); + u16 data_offset; + u8 frev, crev; + int state_index = 0; + + rdev->pm.default_power_state_index = -1; + + if (atom_parse_data_header(mode_info->atom_context, index, NULL, + &frev, &crev, &data_offset)) { + switch (frev) { + case 1: + case 2: + case 3: + state_index = radeon_atombios_parse_power_table_1_3(rdev); + break; + case 4: + case 5: + state_index = radeon_atombios_parse_power_table_4_5(rdev); + break; + default: + break; } } else { /* add the default mode */ -- cgit v1.2.3 From b0e664140a170382531a8c8b5396bf7e6903d5e3 Mon Sep 17 00:00:00 2001 From: Alex Deucher Date: Mon, 22 Nov 2010 17:56:35 -0500 Subject: drm/radeon/kms: add power table parsing support for Ontario fusion APUs The vbios power tables on my inagua board seem a bit funky... Signed-off-by: Alex Deucher Signed-off-by: Dave Airlie --- drivers/gpu/drm/radeon/radeon_atombios.c | 106 +++++++++++++++++++++++++++++-- 1 file changed, 101 insertions(+), 5 deletions(-) diff --git a/drivers/gpu/drm/radeon/radeon_atombios.c b/drivers/gpu/drm/radeon/radeon_atombios.c index 7056e20d329d..ac882639b3ed 100644 --- a/drivers/gpu/drm/radeon/radeon_atombios.c +++ b/drivers/gpu/drm/radeon/radeon_atombios.c @@ -1766,7 +1766,8 @@ static const char *pp_lib_thermal_controller_names[] = { "NONE", "External GPIO", "Evergreen", - "adt7473 with internal", + "emc2103", + "Sumo", }; union power_info { @@ -1774,12 +1775,15 @@ union power_info { struct _ATOM_POWERPLAY_INFO_V2 info_2; struct _ATOM_POWERPLAY_INFO_V3 info_3; struct _ATOM_PPLIB_POWERPLAYTABLE pplib; + struct _ATOM_PPLIB_POWERPLAYTABLE2 pplib2; + struct _ATOM_PPLIB_POWERPLAYTABLE3 pplib3; }; union pplib_clock_info { struct _ATOM_PPLIB_R600_CLOCK_INFO r600; struct _ATOM_PPLIB_RS780_CLOCK_INFO rs780; struct _ATOM_PPLIB_EVERGREEN_CLOCK_INFO evergreen; + struct _ATOM_PPLIB_SUMO_CLOCK_INFO sumo; }; union pplib_power_state { @@ -2022,10 +2026,17 @@ static void radeon_atombios_add_pplib_thermal_controller(struct radeon_device *r (controller->ucFanParameters & ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with"); rdev->pm.int_thermal_type = THERMAL_TYPE_EVERGREEN; + } else if (controller->ucType == ATOM_PP_THERMALCONTROLLER_SUMO) { + DRM_INFO("Internal thermal controller %s fan control\n", + (controller->ucFanParameters & + ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with"); + rdev->pm.int_thermal_type = THERMAL_TYPE_SUMO; } else if ((controller->ucType == ATOM_PP_THERMALCONTROLLER_EXTERNAL_GPIO) || (controller->ucType == - ATOM_PP_THERMALCONTROLLER_ADT7473_WITH_INTERNAL)) { + ATOM_PP_THERMALCONTROLLER_ADT7473_WITH_INTERNAL) || + (controller->ucType == + ATOM_PP_THERMALCONTROLLER_EMC2103_WITH_INTERNAL)) { DRM_INFO("Special thermal controller config\n"); } else { DRM_INFO("Possible %s thermal controller at 0x%02x %s fan control\n", @@ -2129,9 +2140,15 @@ static bool radeon_atombios_parse_pplib_clock_info(struct radeon_device *rdev, u32 sclk, mclk; if (rdev->flags & RADEON_IS_IGP) { - sclk = le16_to_cpu(clock_info->rs780.usLowEngineClockLow); - sclk |= clock_info->rs780.ucLowEngineClockHigh << 16; - rdev->pm.power_state[state_index].clock_info[mode_index].sclk = sclk; + if (rdev->family >= CHIP_PALM) { + sclk = le16_to_cpu(clock_info->sumo.usEngineClockLow); + sclk |= clock_info->sumo.ucEngineClockHigh << 16; + rdev->pm.power_state[state_index].clock_info[mode_index].sclk = sclk; + } else { + sclk = le16_to_cpu(clock_info->rs780.usLowEngineClockLow); + sclk |= clock_info->rs780.ucLowEngineClockHigh << 16; + rdev->pm.power_state[state_index].clock_info[mode_index].sclk = sclk; + } } else if (ASIC_IS_DCE4(rdev)) { sclk = le16_to_cpu(clock_info->evergreen.usEngineClockLow); sclk |= clock_info->evergreen.ucEngineClockHigh << 16; @@ -2237,6 +2254,82 @@ static int radeon_atombios_parse_power_table_4_5(struct radeon_device *rdev) return state_index; } +static int radeon_atombios_parse_power_table_6(struct radeon_device *rdev) +{ + struct radeon_mode_info *mode_info = &rdev->mode_info; + struct _ATOM_PPLIB_NONCLOCK_INFO *non_clock_info; + union pplib_power_state *power_state; + int i, j, non_clock_array_index, clock_array_index; + int state_index = 0, mode_index = 0; + union pplib_clock_info *clock_info; + struct StateArray *state_array; + struct ClockInfoArray *clock_info_array; + struct NonClockInfoArray *non_clock_info_array; + bool valid; + union power_info *power_info; + int index = GetIndexIntoMasterTable(DATA, PowerPlayInfo); + u16 data_offset; + u8 frev, crev; + + if (!atom_parse_data_header(mode_info->atom_context, index, NULL, + &frev, &crev, &data_offset)) + return state_index; + power_info = (union power_info *)(mode_info->atom_context->bios + data_offset); + + radeon_atombios_add_pplib_thermal_controller(rdev, &power_info->pplib.sThermalController); + state_array = (struct StateArray *) + (mode_info->atom_context->bios + data_offset + + power_info->pplib.usStateArrayOffset); + clock_info_array = (struct ClockInfoArray *) + (mode_info->atom_context->bios + data_offset + + power_info->pplib.usClockInfoArrayOffset); + non_clock_info_array = (struct NonClockInfoArray *) + (mode_info->atom_context->bios + data_offset + + power_info->pplib.usNonClockInfoArrayOffset); + for (i = 0; i < state_array->ucNumEntries; i++) { + mode_index = 0; + power_state = (union pplib_power_state *)&state_array->states[i]; + /* XXX this might be an inagua bug... */ + non_clock_array_index = i; /* power_state->v2.nonClockInfoIndex */ + non_clock_info = (struct _ATOM_PPLIB_NONCLOCK_INFO *) + &non_clock_info_array->nonClockInfo[non_clock_array_index]; + for (j = 0; j < power_state->v2.ucNumDPMLevels; j++) { + clock_array_index = power_state->v2.clockInfoIndex[j]; + /* XXX this might be an inagua bug... */ + if (clock_array_index >= clock_info_array->ucNumEntries) + continue; + clock_info = (union pplib_clock_info *) + &clock_info_array->clockInfo[clock_array_index]; + valid = radeon_atombios_parse_pplib_clock_info(rdev, + state_index, mode_index, + clock_info); + if (valid) + mode_index++; + } + rdev->pm.power_state[state_index].num_clock_modes = mode_index; + if (mode_index) { + radeon_atombios_parse_pplib_non_clock_info(rdev, state_index, mode_index, + non_clock_info); + state_index++; + } + } + /* if multiple clock modes, mark the lowest as no display */ + for (i = 0; i < state_index; i++) { + if (rdev->pm.power_state[i].num_clock_modes > 1) + rdev->pm.power_state[i].clock_info[0].flags |= + RADEON_PM_MODE_NO_DISPLAY; + } + /* first mode is usually default */ + if (rdev->pm.default_power_state_index == -1) { + rdev->pm.power_state[0].type = + POWER_STATE_TYPE_DEFAULT; + rdev->pm.default_power_state_index = 0; + rdev->pm.power_state[0].default_clock_mode = + &rdev->pm.power_state[0].clock_info[0]; + } + return state_index; +} + void radeon_atombios_get_power_modes(struct radeon_device *rdev) { struct radeon_mode_info *mode_info = &rdev->mode_info; @@ -2259,6 +2352,9 @@ void radeon_atombios_get_power_modes(struct radeon_device *rdev) case 5: state_index = radeon_atombios_parse_power_table_4_5(rdev); break; + case 6: + state_index = radeon_atombios_parse_power_table_6(rdev); + break; default: break; } -- cgit v1.2.3 From bbbf9b7b02e0090d91c422398089c9f22361dc94 Mon Sep 17 00:00:00 2001 From: Alex Deucher Date: Mon, 22 Nov 2010 17:56:36 -0500 Subject: drm/radeon/kms: enable MSIs on fusion APUs Signed-off-by: Alex Deucher Signed-off-by: Dave Airlie --- drivers/gpu/drm/radeon/radeon_irq_kms.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/gpu/drm/radeon/radeon_irq_kms.c b/drivers/gpu/drm/radeon/radeon_irq_kms.c index a108c7ed14f5..b492b7dc63ac 100644 --- a/drivers/gpu/drm/radeon/radeon_irq_kms.c +++ b/drivers/gpu/drm/radeon/radeon_irq_kms.c @@ -121,7 +121,7 @@ int radeon_irq_kms_init(struct radeon_device *rdev) * chips. Disable MSI on them for now. */ if ((rdev->family >= CHIP_RV380) && - (!(rdev->flags & RADEON_IS_IGP)) && + ((!(rdev->flags & RADEON_IS_IGP)) || (rdev->family >= CHIP_PALM)) && (!(rdev->flags & RADEON_IS_AGP))) { int ret = pci_enable_msi(rdev->pdev); if (!ret) { -- cgit v1.2.3 From 11fa1618e327af7f08085e134b3325de9caf7a94 Mon Sep 17 00:00:00 2001 From: Alex Deucher Date: Mon, 22 Nov 2010 17:56:37 -0500 Subject: drm/radeon/kms: add Ontario Fusion APU pci ids Signed-off-by: Alex Deucher Signed-off-by: Dave Airlie --- include/drm/drm_pciids.h | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/include/drm/drm_pciids.h b/include/drm/drm_pciids.h index 883c1d439899..e6b28a39942f 100644 --- a/include/drm/drm_pciids.h +++ b/include/drm/drm_pciids.h @@ -419,6 +419,10 @@ {0x1002, 0x9713, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS880|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ {0x1002, 0x9714, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS880|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ {0x1002, 0x9715, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS880|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ + {0x1002, 0x9802, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_PALM|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ + {0x1002, 0x9803, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_PALM|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ + {0x1002, 0x9804, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_PALM|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ + {0x1002, 0x9805, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_PALM|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ {0, 0, 0} #define r128_PCI_IDS \ -- cgit v1.2.3 From 439bd6cde45d0bff4f97513b01b976b9f72ae1f0 Mon Sep 17 00:00:00 2001 From: Alex Deucher Date: Mon, 22 Nov 2010 17:56:31 -0500 Subject: drm/radeon/kms: add Ontario APU ucode loading support Signed-off-by: Alex Deucher Signed-off-by: Dave Airlie --- drivers/gpu/drm/radeon/r600.c | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/drivers/gpu/drm/radeon/r600.c b/drivers/gpu/drm/radeon/r600.c index 93b1c687328b..b37361b69ad5 100644 --- a/drivers/gpu/drm/radeon/r600.c +++ b/drivers/gpu/drm/radeon/r600.c @@ -83,6 +83,9 @@ MODULE_FIRMWARE("radeon/JUNIPER_rlc.bin"); MODULE_FIRMWARE("radeon/CYPRESS_pfp.bin"); MODULE_FIRMWARE("radeon/CYPRESS_me.bin"); MODULE_FIRMWARE("radeon/CYPRESS_rlc.bin"); +MODULE_FIRMWARE("radeon/PALM_pfp.bin"); +MODULE_FIRMWARE("radeon/PALM_me.bin"); +MODULE_FIRMWARE("radeon/SUMO_rlc.bin"); int r600_debugfs_mc_info_init(struct radeon_device *rdev); @@ -1998,6 +2001,10 @@ int r600_init_microcode(struct radeon_device *rdev) chip_name = "CYPRESS"; rlc_chip_name = "CYPRESS"; break; + case CHIP_PALM: + chip_name = "PALM"; + rlc_chip_name = "SUMO"; + break; default: BUG(); } -- cgit v1.2.3 From 3685092b717882bb9b6801bf3e4b02a106e3b129 Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Tue, 23 Nov 2010 08:49:38 +0000 Subject: drm/i915: Avoid oops when capturing NULL ring for inactive pinned buffers Signed-off-by: Chris Wilson --- drivers/gpu/drm/i915/i915_debugfs.c | 6 +++--- drivers/gpu/drm/i915/i915_irq.c | 2 +- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c index 421b8414b577..4fe49e0228ef 100644 --- a/drivers/gpu/drm/i915/i915_debugfs.c +++ b/drivers/gpu/drm/i915/i915_debugfs.c @@ -572,9 +572,9 @@ static int i915_ringbuffer_info(struct seq_file *m, void *data) static const char *ring_str(int ring) { switch (ring) { - case RING_RENDER: return "render"; - case RING_BSD: return "bsd"; - case RING_BLT: return "blt"; + case RING_RENDER: return " render"; + case RING_BSD: return " bsd"; + case RING_BLT: return " blt"; default: return ""; } } diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c index 0b6052abedd1..a8f55f061f6d 100644 --- a/drivers/gpu/drm/i915/i915_irq.c +++ b/drivers/gpu/drm/i915/i915_irq.c @@ -575,7 +575,7 @@ static u32 capture_bo_list(struct drm_i915_error_buffer *err, err->tiling = obj->tiling_mode; err->dirty = obj->dirty; err->purgeable = obj->madv != I915_MADV_WILLNEED; - err->ring = obj->ring->id; + err->ring = obj->ring ? obj->ring->id : 0; if (++i == count) break; -- cgit v1.2.3 From 0b0b053a3949f5c467c3b3ba135d4c161f9fbd00 Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Tue, 23 Nov 2010 09:45:50 +0000 Subject: drm/i915/panel: Restore saved value of BLC_PWM_CTL After a GPU reset, the backlight controller registers may be also reset to 0. In that case we should restore those to the original values programmed by the BIOS. Note that we still lack the code to handle the case where the BIOS failed to program those registers at all... Signed-off-by: Chris Wilson --- drivers/gpu/drm/i915/intel_panel.c | 52 ++++++++++++++++++++++++++++++-------- 1 file changed, 42 insertions(+), 10 deletions(-) diff --git a/drivers/gpu/drm/i915/intel_panel.c b/drivers/gpu/drm/i915/intel_panel.c index 92ff8f385278..7350ec2515c6 100644 --- a/drivers/gpu/drm/i915/intel_panel.c +++ b/drivers/gpu/drm/i915/intel_panel.c @@ -125,15 +125,55 @@ static int is_backlight_combination_mode(struct drm_device *dev) return 0; } +static u32 i915_read_blc_pwm_ctl(struct drm_i915_private *dev_priv) +{ + u32 val; + + /* Restore the CTL value if it lost, e.g. GPU reset */ + + if (HAS_PCH_SPLIT(dev_priv->dev)) { + val = I915_READ(BLC_PWM_PCH_CTL2); + if (dev_priv->saveBLC_PWM_CTL2 == 0) { + dev_priv->saveBLC_PWM_CTL2 = val; + } else if (val == 0) { + I915_WRITE(BLC_PWM_PCH_CTL2, + dev_priv->saveBLC_PWM_CTL); + val = dev_priv->saveBLC_PWM_CTL; + } + } else { + val = I915_READ(BLC_PWM_CTL); + if (dev_priv->saveBLC_PWM_CTL == 0) { + dev_priv->saveBLC_PWM_CTL = val; + dev_priv->saveBLC_PWM_CTL2 = I915_READ(BLC_PWM_CTL2); + } else if (val == 0) { + I915_WRITE(BLC_PWM_CTL, + dev_priv->saveBLC_PWM_CTL); + I915_WRITE(BLC_PWM_CTL2, + dev_priv->saveBLC_PWM_CTL2); + val = dev_priv->saveBLC_PWM_CTL; + } + } + + return val; +} + u32 intel_panel_get_max_backlight(struct drm_device *dev) { struct drm_i915_private *dev_priv = dev->dev_private; u32 max; + max = i915_read_blc_pwm_ctl(dev_priv); + if (max == 0) { + /* XXX add code here to query mode clock or hardware clock + * and program max PWM appropriately. + */ + printk_once(KERN_WARNING "fixme: max PWM is zero.\n"); + return 1; + } + if (HAS_PCH_SPLIT(dev)) { - max = I915_READ(BLC_PWM_PCH_CTL2) >> 16; + max >>= 16; } else { - max = I915_READ(BLC_PWM_CTL); if (IS_PINEVIEW(dev)) { max >>= 17; } else { @@ -146,14 +186,6 @@ u32 intel_panel_get_max_backlight(struct drm_device *dev) max *= 0xff; } - if (max == 0) { - /* XXX add code here to query mode clock or hardware clock - * and program max PWM appropriately. - */ - DRM_ERROR("fixme: max PWM is zero.\n"); - max = 1; - } - DRM_DEBUG_DRIVER("max backlight PWM = %d\n", max); return max; } -- cgit v1.2.3 From fe669bf88e9108b96a847385df08c9b1e98c1420 Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Tue, 23 Nov 2010 12:09:30 +0000 Subject: drm/i915: Compute physical addresses from base of stolen memory The GATT is a write-only set of registers, reading from them in the manner of i915_gtt_to_phys() is supposed to be undefined. However a simple solution exists as we allocate linear memory from the stolen area, we can simply add the block offset to the base register. As a side-effect we recover all the unused stolen GTT entries and so enlarge our aperture. Signed-off-by: Chris Wilson --- drivers/gpu/drm/i915/i915_dma.c | 197 ++++++++++++++++------------------------ drivers/gpu/drm/i915/i915_drv.h | 2 +- 2 files changed, 77 insertions(+), 122 deletions(-) diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c index cf4e06a9417a..3df271215ab9 100644 --- a/drivers/gpu/drm/i915/i915_dma.c +++ b/drivers/gpu/drm/i915/i915_dma.c @@ -1007,73 +1007,47 @@ intel_teardown_mchbar(struct drm_device *dev) #define PTE_VALID (1 << 0) /** - * i915_gtt_to_phys - take a GTT address and turn it into a physical one + * i915_stolen_to_phys - take an offset into stolen memory and turn it into + * a physical one * @dev: drm device - * @gtt_addr: address to translate + * @offset: address to translate * - * Some chip functions require allocations from stolen space but need the - * physical address of the memory in question. We use this routine - * to get a physical address suitable for register programming from a given - * GTT address. + * Some chip functions require allocations from stolen space and need the + * physical address of the memory in question. */ -static unsigned long i915_gtt_to_phys(struct drm_device *dev, - unsigned long gtt_addr) +static unsigned long i915_stolen_to_phys(struct drm_device *dev, u32 offset) { - unsigned long *gtt; - unsigned long entry, phys; - int gtt_bar = IS_GEN2(dev) ? 1 : 0; - int gtt_offset, gtt_size; - - if (INTEL_INFO(dev)->gen >= 4) { - if (IS_G4X(dev) || INTEL_INFO(dev)->gen > 4) { - gtt_offset = 2*1024*1024; - gtt_size = 2*1024*1024; - } else { - gtt_offset = 512*1024; - gtt_size = 512*1024; - } + struct drm_i915_private *dev_priv = dev->dev_private; + struct pci_dev *pdev = dev_priv->bridge_dev; + u32 base; + +#if 0 + /* On the machines I have tested the Graphics Base of Stolen Memory + * is unreliable, so compute the base by subtracting the stolen memory + * from the Top of Low Usable DRAM which is where the BIOS places + * the graphics stolen memory. + */ + if (INTEL_INFO(dev)->gen > 3 || IS_G33(dev)) { + /* top 32bits are reserved = 0 */ + pci_read_config_dword(pdev, 0xA4, &base); } else { - gtt_bar = 3; - gtt_offset = 0; - gtt_size = pci_resource_len(dev->pdev, gtt_bar); - } - - gtt = ioremap_wc(pci_resource_start(dev->pdev, gtt_bar) + gtt_offset, - gtt_size); - if (!gtt) { - DRM_ERROR("ioremap of GTT failed\n"); - return 0; - } - - entry = *(volatile u32 *)(gtt + (gtt_addr / 1024)); - - DRM_DEBUG_DRIVER("GTT addr: 0x%08lx, PTE: 0x%08lx\n", gtt_addr, entry); - - /* Mask out these reserved bits on this hardware. */ - if (INTEL_INFO(dev)->gen < 4 && !IS_G33(dev)) - entry &= ~PTE_ADDRESS_MASK_HIGH; - - /* If it's not a mapping type we know, then bail. */ - if ((entry & PTE_MAPPING_TYPE_MASK) != PTE_MAPPING_TYPE_UNCACHED && - (entry & PTE_MAPPING_TYPE_MASK) != PTE_MAPPING_TYPE_CACHED) { - iounmap(gtt); - return 0; - } - - if (!(entry & PTE_VALID)) { - DRM_ERROR("bad GTT entry in stolen space\n"); - iounmap(gtt); - return 0; + /* XXX presume 8xx is the same as i915 */ + pci_bus_read_config_dword(pdev->bus, 2, 0x5C, &base); + } +#else + if (INTEL_INFO(dev)->gen > 3 || IS_G33(dev)) { + u16 val; + pci_read_config_word(pdev, 0xb0, &val); + base = val >> 4 << 20; + } else { + u8 val; + pci_read_config_byte(pdev, 0x9c, &val); + base = val >> 3 << 27; } + base -= dev_priv->mm.gtt->gtt_stolen_entries << PAGE_SHIFT; +#endif - iounmap(gtt); - - phys =(entry & PTE_ADDRESS_MASK) | - ((uint64_t)(entry & PTE_ADDRESS_MASK_HIGH) << (32 - 4)); - - DRM_DEBUG_DRIVER("GTT addr: 0x%08lx, phys addr: 0x%08lx\n", gtt_addr, phys); - - return phys; + return base + offset; } static void i915_warn_stolen(struct drm_device *dev) @@ -1089,47 +1063,28 @@ static void i915_setup_compression(struct drm_device *dev, int size) unsigned long cfb_base; unsigned long ll_base = 0; - /* Leave 1M for line length buffer & misc. */ - compressed_fb = drm_mm_search_free(&dev_priv->mm.vram, size, 4096, 0); - if (!compressed_fb) { - dev_priv->no_fbc_reason = FBC_STOLEN_TOO_SMALL; - i915_warn_stolen(dev); - return; - } - - compressed_fb = drm_mm_get_block(compressed_fb, size, 4096); - if (!compressed_fb) { - i915_warn_stolen(dev); - dev_priv->no_fbc_reason = FBC_STOLEN_TOO_SMALL; - return; - } + compressed_fb = drm_mm_search_free(&dev_priv->mm.stolen, size, 4096, 0); + if (compressed_fb) + compressed_fb = drm_mm_get_block(compressed_fb, size, 4096); + if (!compressed_fb) + goto err; - cfb_base = i915_gtt_to_phys(dev, compressed_fb->start); - if (!cfb_base) { - DRM_ERROR("failed to get stolen phys addr, disabling FBC\n"); - drm_mm_put_block(compressed_fb); - } + cfb_base = i915_stolen_to_phys(dev, compressed_fb->start); + if (!cfb_base) + goto err_fb; if (!(IS_GM45(dev) || IS_IRONLAKE_M(dev))) { - compressed_llb = drm_mm_search_free(&dev_priv->mm.vram, 4096, - 4096, 0); - if (!compressed_llb) { - i915_warn_stolen(dev); - return; - } - - compressed_llb = drm_mm_get_block(compressed_llb, 4096, 4096); - if (!compressed_llb) { - i915_warn_stolen(dev); - return; - } + compressed_llb = drm_mm_search_free(&dev_priv->mm.stolen, + 4096, 4096, 0); + if (compressed_llb) + compressed_llb = drm_mm_get_block(compressed_llb, + 4096, 4096); + if (!compressed_llb) + goto err_fb; - ll_base = i915_gtt_to_phys(dev, compressed_llb->start); - if (!ll_base) { - DRM_ERROR("failed to get stolen phys addr, disabling FBC\n"); - drm_mm_put_block(compressed_fb); - drm_mm_put_block(compressed_llb); - } + ll_base = i915_stolen_to_phys(dev, compressed_llb->start); + if (!ll_base) + goto err_llb; } dev_priv->cfb_size = size; @@ -1146,8 +1101,17 @@ static void i915_setup_compression(struct drm_device *dev, int size) dev_priv->compressed_llb = compressed_llb; } - DRM_DEBUG_KMS("FBC base 0x%08lx, ll base 0x%08lx, size %dM\n", cfb_base, - ll_base, size >> 20); + DRM_DEBUG_KMS("FBC base 0x%08lx, ll base 0x%08lx, size %dM\n", + cfb_base, ll_base, size >> 20); + return; + +err_llb: + drm_mm_put_block(compressed_llb); +err_fb: + drm_mm_put_block(compressed_fb); +err: + dev_priv->no_fbc_reason = FBC_STOLEN_TOO_SMALL; + i915_warn_stolen(dev); } static void i915_cleanup_compression(struct drm_device *dev) @@ -1207,12 +1171,11 @@ static int i915_load_modeset_init(struct drm_device *dev) prealloc_size = dev_priv->mm.gtt->gtt_stolen_entries << PAGE_SHIFT; gtt_size = dev_priv->mm.gtt->gtt_total_entries << PAGE_SHIFT; mappable_size = dev_priv->mm.gtt->gtt_mappable_entries << PAGE_SHIFT; - gtt_size -= PAGE_SIZE; - /* Basic memrange allocator for stolen space (aka mm.vram) */ - drm_mm_init(&dev_priv->mm.vram, 0, prealloc_size); + /* Basic memrange allocator for stolen space */ + drm_mm_init(&dev_priv->mm.stolen, 0, prealloc_size); - /* Let GEM Manage from end of prealloc space to end of aperture. + /* Let GEM Manage all of the aperture. * * However, leave one page at the end still bound to the scratch page. * There are a number of places where the hardware apparently @@ -1221,7 +1184,7 @@ static int i915_load_modeset_init(struct drm_device *dev) * at the last page of the aperture. One page should be enough to * keep any prefetching inside of the aperture. */ - i915_gem_do_init(dev, prealloc_size, mappable_size, gtt_size); + i915_gem_do_init(dev, 0, mappable_size, gtt_size - PAGE_SIZE); mutex_lock(&dev->struct_mutex); ret = i915_gem_init_ringbuffer(dev); @@ -1233,16 +1196,17 @@ static int i915_load_modeset_init(struct drm_device *dev) if (I915_HAS_FBC(dev) && i915_powersave) { int cfb_size; - /* Try to get an 8M buffer... */ - if (prealloc_size > (9*1024*1024)) - cfb_size = 8*1024*1024; + /* Leave 1M for line length buffer & misc. */ + + /* Try to get a 32M buffer... */ + if (prealloc_size > (36*1024*1024)) + cfb_size = 32*1024*1024; else /* fall back to 7/8 of the stolen space */ cfb_size = prealloc_size * 7 / 8; i915_setup_compression(dev, cfb_size); } - /* Allow hardware batchbuffers unless told otherwise. - */ + /* Allow hardware batchbuffers unless told otherwise. */ dev_priv->allow_batchbuffer = 1; ret = intel_parse_bios(dev); @@ -1892,7 +1856,8 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags) { struct drm_i915_private *dev_priv; int ret = 0, mmio_bar; - uint32_t agp_size, prealloc_size; + uint32_t agp_size; + /* i915 has 4 more counters */ dev->counters += 4; dev->types[6] = _DRM_STAT_IRQ; @@ -1932,7 +1897,6 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags) goto out_iomapfree; } - prealloc_size = dev_priv->mm.gtt->gtt_stolen_entries << PAGE_SHIFT; agp_size = dev_priv->mm.gtt->gtt_mappable_entries << PAGE_SHIFT; dev_priv->mm.gtt_mapping = @@ -1980,15 +1944,6 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags) /* enable GEM by default */ dev_priv->has_gem = 1; - if (prealloc_size > agp_size * 3 / 4) { - DRM_ERROR("Detected broken video BIOS with %d/%dkB of video " - "memory stolen.\n", - prealloc_size / 1024, agp_size / 1024); - DRM_ERROR("Disabling GEM. (try reducing stolen memory or " - "updating the BIOS to fix).\n"); - dev_priv->has_gem = 0; - } - if (dev_priv->has_gem == 0 && drm_core_check_feature(dev, DRIVER_MODESET)) { DRM_ERROR("kernel modesetting requires GEM, disabling driver.\n"); @@ -2162,7 +2117,7 @@ int i915_driver_unload(struct drm_device *dev) mutex_unlock(&dev->struct_mutex); if (I915_HAS_FBC(dev) && i915_powersave) i915_cleanup_compression(dev); - drm_mm_takedown(&dev_priv->mm.vram); + drm_mm_takedown(&dev_priv->mm.stolen); intel_cleanup_overlay(dev); diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index db79df376b86..745e46b0673f 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h @@ -544,7 +544,7 @@ typedef struct drm_i915_private { /** Bridge to intel-gtt-ko */ struct intel_gtt *gtt; /** Memory allocator for GTT stolen memory */ - struct drm_mm vram; + struct drm_mm stolen; /** Memory allocator for GTT */ struct drm_mm gtt_space; /** End of mappable part of GTT */ -- cgit v1.2.3 From 1b6064d79b9a1c5e5aa6fcc6855f3da5e639ff73 Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Tue, 23 Nov 2010 12:33:54 +0000 Subject: agp/intel: Remove the artificial cap on stolen size Now that the stolen memory does not also steal entries from the GTT, we can use all the memory the BIOS set aside for the GPU. Signed-off-by: Chris Wilson --- drivers/char/agp/intel-gtt.c | 16 +++------------- 1 file changed, 3 insertions(+), 13 deletions(-) diff --git a/drivers/char/agp/intel-gtt.c b/drivers/char/agp/intel-gtt.c index fc1637c32cb1..19919ef9d661 100644 --- a/drivers/char/agp/intel-gtt.c +++ b/drivers/char/agp/intel-gtt.c @@ -39,9 +39,6 @@ #define USE_PCI_DMA_API 0 #endif -/* Max amount of stolen space, anything above will be returned to Linux */ -int intel_max_stolen = 32 * 1024 * 1024; - static const struct aper_size_info_fixed intel_i810_sizes[] = { {64, 16384, 4}, @@ -486,7 +483,7 @@ static unsigned int intel_gtt_stolen_entries(void) u8 rdct; int local = 0; static const int ddt[4] = { 0, 16, 32, 64 }; - unsigned int overhead_entries, stolen_entries; + unsigned int overhead_entries; unsigned int stolen_size = 0; pci_read_config_word(intel_private.bridge_dev, @@ -625,12 +622,7 @@ static unsigned int intel_gtt_stolen_entries(void) } } - if (!local && stolen_size > intel_max_stolen) { - dev_info(&intel_private.bridge_dev->dev, - "detected %dK stolen memory, trimming to %dK\n", - stolen_size / KB(1), intel_max_stolen / KB(1)); - stolen_size = intel_max_stolen; - } else if (stolen_size > 0) { + if (stolen_size > 0) { dev_info(&intel_private.bridge_dev->dev, "detected %dK %s memory\n", stolen_size / KB(1), local ? "local" : "stolen"); } else { @@ -639,9 +631,7 @@ static unsigned int intel_gtt_stolen_entries(void) stolen_size = 0; } - stolen_entries = stolen_size/KB(4) - overhead_entries; - - return stolen_entries; + return stolen_size/KB(4) - overhead_entries; } static void i965_adjust_pgetbl_size(unsigned int size_flag) -- cgit v1.2.3 From c64f7ba5f1006d8c20eacafecf98d4d00a6902a0 Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Tue, 23 Nov 2010 14:24:24 +0000 Subject: agp/intel: Remove confusion of stolen entries not stolen memory Signed-off-by: Chris Wilson --- drivers/char/agp/intel-gtt.c | 40 +++++++--------------------------------- drivers/gpu/drm/i915/i915_dma.c | 4 ++-- drivers/gpu/drm/i915/i915_drv.h | 2 +- include/drm/intel-gtt.h | 11 +++++------ 4 files changed, 15 insertions(+), 42 deletions(-) diff --git a/drivers/char/agp/intel-gtt.c b/drivers/char/agp/intel-gtt.c index 19919ef9d661..d2733e526a68 100644 --- a/drivers/char/agp/intel-gtt.c +++ b/drivers/char/agp/intel-gtt.c @@ -477,26 +477,17 @@ static const struct aper_size_info_fixed const intel_fake_agp_sizes[] = { {512, 131072, 7}, }; -static unsigned int intel_gtt_stolen_entries(void) +static unsigned int intel_gtt_stolen_size(void) { u16 gmch_ctrl; u8 rdct; int local = 0; static const int ddt[4] = { 0, 16, 32, 64 }; - unsigned int overhead_entries; unsigned int stolen_size = 0; pci_read_config_word(intel_private.bridge_dev, I830_GMCH_CTRL, &gmch_ctrl); - if (INTEL_GTT_GEN > 4 || IS_PINEVIEW) - overhead_entries = 0; - else - overhead_entries = intel_private.base.gtt_mappable_entries - / 1024; - - overhead_entries += 1; /* BIOS popup */ - if (intel_private.bridge_dev->device == PCI_DEVICE_ID_INTEL_82830_HB || intel_private.bridge_dev->device == PCI_DEVICE_ID_INTEL_82845G_HB) { switch (gmch_ctrl & I830_GMCH_GMS_MASK) { @@ -631,7 +622,7 @@ static unsigned int intel_gtt_stolen_entries(void) stolen_size = 0; } - return stolen_size/KB(4) - overhead_entries; + return stolen_size; } static void i965_adjust_pgetbl_size(unsigned int size_flag) @@ -817,8 +808,8 @@ static int intel_gtt_init(void) global_cache_flush(); /* FIXME: ? */ /* we have to call this as early as possible after the MMIO base address is known */ - intel_private.base.gtt_stolen_entries = intel_gtt_stolen_entries(); - if (intel_private.base.gtt_stolen_entries == 0) { + intel_private.base.stolen_size = intel_gtt_stolen_size(); + if (intel_private.base.stolen_size == 0) { intel_private.driver->cleanup(); iounmap(intel_private.registers); iounmap(intel_private.gtt); @@ -1006,8 +997,7 @@ static int intel_fake_agp_configure(void) agp_bridge->gart_bus_addr = intel_private.gma_bus_addr; - for (i = intel_private.base.gtt_stolen_entries; - i < intel_private.base.gtt_total_entries; i++) { + for (i = 0; i < intel_private.base.gtt_total_entries; i++) { intel_private.driver->write_entry(intel_private.scratch_page_dma, i, 0); } @@ -1065,17 +1055,7 @@ static int intel_fake_agp_insert_entries(struct agp_memory *mem, if (mem->page_count == 0) goto out; - if (pg_start < intel_private.base.gtt_stolen_entries) { - dev_printk(KERN_DEBUG, &intel_private.pcidev->dev, - "pg_start == 0x%.8lx, gtt_stolen_entries == 0x%.8x\n", - pg_start, intel_private.base.gtt_stolen_entries); - - dev_info(&intel_private.pcidev->dev, - "trying to insert into local/stolen memory\n"); - goto out_err; - } - - if ((pg_start + mem->page_count) > intel_private.base.gtt_total_entries) + if (pg_start + mem->page_count > intel_private.base.gtt_total_entries) goto out_err; if (type != mem->type) @@ -1118,12 +1098,6 @@ static int intel_fake_agp_remove_entries(struct agp_memory *mem, if (mem->page_count == 0) return 0; - if (pg_start < intel_private.base.gtt_stolen_entries) { - dev_info(&intel_private.pcidev->dev, - "trying to disable local/stolen memory\n"); - return -EINVAL; - } - if (USE_PCI_DMA_API && INTEL_GTT_GEN > 2) intel_agp_unmap_memory(mem); @@ -1629,7 +1603,7 @@ int intel_gmch_probe(struct pci_dev *pdev, } EXPORT_SYMBOL(intel_gmch_probe); -struct intel_gtt *intel_gtt_get(void) +const struct intel_gtt *intel_gtt_get(void) { return &intel_private.base; } diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c index 3df271215ab9..86b0e8052ec9 100644 --- a/drivers/gpu/drm/i915/i915_dma.c +++ b/drivers/gpu/drm/i915/i915_dma.c @@ -1044,7 +1044,7 @@ static unsigned long i915_stolen_to_phys(struct drm_device *dev, u32 offset) pci_read_config_byte(pdev, 0x9c, &val); base = val >> 3 << 27; } - base -= dev_priv->mm.gtt->gtt_stolen_entries << PAGE_SHIFT; + base -= dev_priv->mm.gtt->stolen_size; #endif return base + offset; @@ -1168,7 +1168,7 @@ static int i915_load_modeset_init(struct drm_device *dev) unsigned long prealloc_size, gtt_size, mappable_size; int ret = 0; - prealloc_size = dev_priv->mm.gtt->gtt_stolen_entries << PAGE_SHIFT; + prealloc_size = dev_priv->mm.gtt->stolen_size; gtt_size = dev_priv->mm.gtt->gtt_total_entries << PAGE_SHIFT; mappable_size = dev_priv->mm.gtt->gtt_mappable_entries << PAGE_SHIFT; diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index 745e46b0673f..b62ff5d6fa7e 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h @@ -542,7 +542,7 @@ typedef struct drm_i915_private { struct { /** Bridge to intel-gtt-ko */ - struct intel_gtt *gtt; + const struct intel_gtt *gtt; /** Memory allocator for GTT stolen memory */ struct drm_mm stolen; /** Memory allocator for GTT */ diff --git a/include/drm/intel-gtt.h b/include/drm/intel-gtt.h index d3c81946f613..020f8aab7e5b 100644 --- a/include/drm/intel-gtt.h +++ b/include/drm/intel-gtt.h @@ -2,17 +2,16 @@ #ifndef _DRM_INTEL_GTT_H #define _DRM_INTEL_GTT_H -struct intel_gtt { - /* Number of stolen gtt entries at the beginning. */ - unsigned int gtt_stolen_entries; + +const struct intel_gtt { + /* Size of memory reserved for graphics by the BIOS */ + unsigned int stolen_size; /* Total number of gtt entries. */ unsigned int gtt_total_entries; /* Part of the gtt that is mappable by the cpu, for those chips where * this is not the full gtt. */ unsigned int gtt_mappable_entries; -}; - -struct intel_gtt *intel_gtt_get(void); +} *intel_gtt_get(void); #endif -- cgit v1.2.3 From faa60c4174d943696b9478c5de6438c4f0fecba6 Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Tue, 23 Nov 2010 13:50:14 +0000 Subject: drm/i915: Contract the magic IPS constants into a direct LUT ... and no need to perform a linear search for the index. Signed-off-by: Chris Wilson --- drivers/gpu/drm/i915/i915_dma.c | 304 +++++++++++++++++++--------------------- 1 file changed, 144 insertions(+), 160 deletions(-) diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c index 86b0e8052ec9..7084de7c4c55 100644 --- a/drivers/gpu/drm/i915/i915_dma.c +++ b/drivers/gpu/drm/i915/i915_dma.c @@ -1396,152 +1396,12 @@ static void i915_ironlake_get_mem_freq(struct drm_device *dev) } } -struct v_table { - u8 vid; - unsigned long vd; /* in .1 mil */ - unsigned long vm; /* in .1 mil */ - u8 pvid; -}; - -static struct v_table v_table[] = { - { 0, 16125, 15000, 0x7f, }, - { 1, 16000, 14875, 0x7e, }, - { 2, 15875, 14750, 0x7d, }, - { 3, 15750, 14625, 0x7c, }, - { 4, 15625, 14500, 0x7b, }, - { 5, 15500, 14375, 0x7a, }, - { 6, 15375, 14250, 0x79, }, - { 7, 15250, 14125, 0x78, }, - { 8, 15125, 14000, 0x77, }, - { 9, 15000, 13875, 0x76, }, - { 10, 14875, 13750, 0x75, }, - { 11, 14750, 13625, 0x74, }, - { 12, 14625, 13500, 0x73, }, - { 13, 14500, 13375, 0x72, }, - { 14, 14375, 13250, 0x71, }, - { 15, 14250, 13125, 0x70, }, - { 16, 14125, 13000, 0x6f, }, - { 17, 14000, 12875, 0x6e, }, - { 18, 13875, 12750, 0x6d, }, - { 19, 13750, 12625, 0x6c, }, - { 20, 13625, 12500, 0x6b, }, - { 21, 13500, 12375, 0x6a, }, - { 22, 13375, 12250, 0x69, }, - { 23, 13250, 12125, 0x68, }, - { 24, 13125, 12000, 0x67, }, - { 25, 13000, 11875, 0x66, }, - { 26, 12875, 11750, 0x65, }, - { 27, 12750, 11625, 0x64, }, - { 28, 12625, 11500, 0x63, }, - { 29, 12500, 11375, 0x62, }, - { 30, 12375, 11250, 0x61, }, - { 31, 12250, 11125, 0x60, }, - { 32, 12125, 11000, 0x5f, }, - { 33, 12000, 10875, 0x5e, }, - { 34, 11875, 10750, 0x5d, }, - { 35, 11750, 10625, 0x5c, }, - { 36, 11625, 10500, 0x5b, }, - { 37, 11500, 10375, 0x5a, }, - { 38, 11375, 10250, 0x59, }, - { 39, 11250, 10125, 0x58, }, - { 40, 11125, 10000, 0x57, }, - { 41, 11000, 9875, 0x56, }, - { 42, 10875, 9750, 0x55, }, - { 43, 10750, 9625, 0x54, }, - { 44, 10625, 9500, 0x53, }, - { 45, 10500, 9375, 0x52, }, - { 46, 10375, 9250, 0x51, }, - { 47, 10250, 9125, 0x50, }, - { 48, 10125, 9000, 0x4f, }, - { 49, 10000, 8875, 0x4e, }, - { 50, 9875, 8750, 0x4d, }, - { 51, 9750, 8625, 0x4c, }, - { 52, 9625, 8500, 0x4b, }, - { 53, 9500, 8375, 0x4a, }, - { 54, 9375, 8250, 0x49, }, - { 55, 9250, 8125, 0x48, }, - { 56, 9125, 8000, 0x47, }, - { 57, 9000, 7875, 0x46, }, - { 58, 8875, 7750, 0x45, }, - { 59, 8750, 7625, 0x44, }, - { 60, 8625, 7500, 0x43, }, - { 61, 8500, 7375, 0x42, }, - { 62, 8375, 7250, 0x41, }, - { 63, 8250, 7125, 0x40, }, - { 64, 8125, 7000, 0x3f, }, - { 65, 8000, 6875, 0x3e, }, - { 66, 7875, 6750, 0x3d, }, - { 67, 7750, 6625, 0x3c, }, - { 68, 7625, 6500, 0x3b, }, - { 69, 7500, 6375, 0x3a, }, - { 70, 7375, 6250, 0x39, }, - { 71, 7250, 6125, 0x38, }, - { 72, 7125, 6000, 0x37, }, - { 73, 7000, 5875, 0x36, }, - { 74, 6875, 5750, 0x35, }, - { 75, 6750, 5625, 0x34, }, - { 76, 6625, 5500, 0x33, }, - { 77, 6500, 5375, 0x32, }, - { 78, 6375, 5250, 0x31, }, - { 79, 6250, 5125, 0x30, }, - { 80, 6125, 5000, 0x2f, }, - { 81, 6000, 4875, 0x2e, }, - { 82, 5875, 4750, 0x2d, }, - { 83, 5750, 4625, 0x2c, }, - { 84, 5625, 4500, 0x2b, }, - { 85, 5500, 4375, 0x2a, }, - { 86, 5375, 4250, 0x29, }, - { 87, 5250, 4125, 0x28, }, - { 88, 5125, 4000, 0x27, }, - { 89, 5000, 3875, 0x26, }, - { 90, 4875, 3750, 0x25, }, - { 91, 4750, 3625, 0x24, }, - { 92, 4625, 3500, 0x23, }, - { 93, 4500, 3375, 0x22, }, - { 94, 4375, 3250, 0x21, }, - { 95, 4250, 3125, 0x20, }, - { 96, 4125, 3000, 0x1f, }, - { 97, 4125, 3000, 0x1e, }, - { 98, 4125, 3000, 0x1d, }, - { 99, 4125, 3000, 0x1c, }, - { 100, 4125, 3000, 0x1b, }, - { 101, 4125, 3000, 0x1a, }, - { 102, 4125, 3000, 0x19, }, - { 103, 4125, 3000, 0x18, }, - { 104, 4125, 3000, 0x17, }, - { 105, 4125, 3000, 0x16, }, - { 106, 4125, 3000, 0x15, }, - { 107, 4125, 3000, 0x14, }, - { 108, 4125, 3000, 0x13, }, - { 109, 4125, 3000, 0x12, }, - { 110, 4125, 3000, 0x11, }, - { 111, 4125, 3000, 0x10, }, - { 112, 4125, 3000, 0x0f, }, - { 113, 4125, 3000, 0x0e, }, - { 114, 4125, 3000, 0x0d, }, - { 115, 4125, 3000, 0x0c, }, - { 116, 4125, 3000, 0x0b, }, - { 117, 4125, 3000, 0x0a, }, - { 118, 4125, 3000, 0x09, }, - { 119, 4125, 3000, 0x08, }, - { 120, 1125, 0, 0x07, }, - { 121, 1000, 0, 0x06, }, - { 122, 875, 0, 0x05, }, - { 123, 750, 0, 0x04, }, - { 124, 625, 0, 0x03, }, - { 125, 500, 0, 0x02, }, - { 126, 375, 0, 0x01, }, - { 127, 0, 0, 0x00, }, -}; - -struct cparams { - int i; - int t; - int m; - int c; -}; - -static struct cparams cparams[] = { +static const struct cparams { + u16 i; + u16 t; + u16 m; + u16 c; +} cparams[] = { { 1, 1333, 301, 28664 }, { 1, 1066, 294, 24460 }, { 1, 800, 294, 25192 }, @@ -1607,21 +1467,145 @@ unsigned long i915_mch_val(struct drm_i915_private *dev_priv) return ((m * x) / 127) - b; } -static unsigned long pvid_to_extvid(struct drm_i915_private *dev_priv, u8 pxvid) +static u16 pvid_to_extvid(struct drm_i915_private *dev_priv, u8 pxvid) { - unsigned long val = 0; - int i; - - for (i = 0; i < ARRAY_SIZE(v_table); i++) { - if (v_table[i].pvid == pxvid) { - if (IS_MOBILE(dev_priv->dev)) - val = v_table[i].vm; - else - val = v_table[i].vd; - } - } - - return val; + static const struct v_table { + u16 vd; /* in .1 mil */ + u16 vm; /* in .1 mil */ + } v_table[] = { + { 0, 0, }, + { 375, 0, }, + { 500, 0, }, + { 625, 0, }, + { 750, 0, }, + { 875, 0, }, + { 1000, 0, }, + { 1125, 0, }, + { 4125, 3000, }, + { 4125, 3000, }, + { 4125, 3000, }, + { 4125, 3000, }, + { 4125, 3000, }, + { 4125, 3000, }, + { 4125, 3000, }, + { 4125, 3000, }, + { 4125, 3000, }, + { 4125, 3000, }, + { 4125, 3000, }, + { 4125, 3000, }, + { 4125, 3000, }, + { 4125, 3000, }, + { 4125, 3000, }, + { 4125, 3000, }, + { 4125, 3000, }, + { 4125, 3000, }, + { 4125, 3000, }, + { 4125, 3000, }, + { 4125, 3000, }, + { 4125, 3000, }, + { 4125, 3000, }, + { 4125, 3000, }, + { 4250, 3125, }, + { 4375, 3250, }, + { 4500, 3375, }, + { 4625, 3500, }, + { 4750, 3625, }, + { 4875, 3750, }, + { 5000, 3875, }, + { 5125, 4000, }, + { 5250, 4125, }, + { 5375, 4250, }, + { 5500, 4375, }, + { 5625, 4500, }, + { 5750, 4625, }, + { 5875, 4750, }, + { 6000, 4875, }, + { 6125, 5000, }, + { 6250, 5125, }, + { 6375, 5250, }, + { 6500, 5375, }, + { 6625, 5500, }, + { 6750, 5625, }, + { 6875, 5750, }, + { 7000, 5875, }, + { 7125, 6000, }, + { 7250, 6125, }, + { 7375, 6250, }, + { 7500, 6375, }, + { 7625, 6500, }, + { 7750, 6625, }, + { 7875, 6750, }, + { 8000, 6875, }, + { 8125, 7000, }, + { 8250, 7125, }, + { 8375, 7250, }, + { 8500, 7375, }, + { 8625, 7500, }, + { 8750, 7625, }, + { 8875, 7750, }, + { 9000, 7875, }, + { 9125, 8000, }, + { 9250, 8125, }, + { 9375, 8250, }, + { 9500, 8375, }, + { 9625, 8500, }, + { 9750, 8625, }, + { 9875, 8750, }, + { 10000, 8875, }, + { 10125, 9000, }, + { 10250, 9125, }, + { 10375, 9250, }, + { 10500, 9375, }, + { 10625, 9500, }, + { 10750, 9625, }, + { 10875, 9750, }, + { 11000, 9875, }, + { 11125, 10000, }, + { 11250, 10125, }, + { 11375, 10250, }, + { 11500, 10375, }, + { 11625, 10500, }, + { 11750, 10625, }, + { 11875, 10750, }, + { 12000, 10875, }, + { 12125, 11000, }, + { 12250, 11125, }, + { 12375, 11250, }, + { 12500, 11375, }, + { 12625, 11500, }, + { 12750, 11625, }, + { 12875, 11750, }, + { 13000, 11875, }, + { 13125, 12000, }, + { 13250, 12125, }, + { 13375, 12250, }, + { 13500, 12375, }, + { 13625, 12500, }, + { 13750, 12625, }, + { 13875, 12750, }, + { 14000, 12875, }, + { 14125, 13000, }, + { 14250, 13125, }, + { 14375, 13250, }, + { 14500, 13375, }, + { 14625, 13500, }, + { 14750, 13625, }, + { 14875, 13750, }, + { 15000, 13875, }, + { 15125, 14000, }, + { 15250, 14125, }, + { 15375, 14250, }, + { 15500, 14375, }, + { 15625, 14500, }, + { 15750, 14625, }, + { 15875, 14750, }, + { 16000, 14875, }, + { 16125, 15000, }, + }; + if (dev_priv->info->is_mobile) + return v_table[pxvid].vm; + else + return v_table[pxvid].vd; } void i915_update_gfx_val(struct drm_i915_private *dev_priv) -- cgit v1.2.3 From b47cf66f315a258c458ed4345c443dba396fb787 Mon Sep 17 00:00:00 2001 From: Daniel Vetter Date: Thu, 4 Nov 2010 18:41:50 +0100 Subject: intel-gtt: drop dcache support for i830 and later i830_check_flags already disallows it, so no need to implement it in the write_entry function. Seems to be a remnant from i810 support. Signed-off-by: Daniel Vetter Signed-off-by: Chris Wilson --- drivers/char/agp/intel-gtt.c | 8 +------- 1 file changed, 1 insertion(+), 7 deletions(-) diff --git a/drivers/char/agp/intel-gtt.c b/drivers/char/agp/intel-gtt.c index d2733e526a68..8a30a16c5706 100644 --- a/drivers/char/agp/intel-gtt.c +++ b/drivers/char/agp/intel-gtt.c @@ -896,14 +896,8 @@ static void i830_write_entry(dma_addr_t addr, unsigned int entry, { u32 pte_flags = I810_PTE_VALID; - switch (flags) { - case AGP_DCACHE_MEMORY: - pte_flags |= I810_PTE_LOCAL; - break; - case AGP_USER_CACHED_MEMORY: + if (flags == AGP_USER_CACHED_MEMORY) pte_flags |= I830_PTE_SYSTEM_CACHED; - break; - } writel(addr | pte_flags, intel_private.gtt + entry); } -- cgit v1.2.3 From 24a6b387af7cd5d1e0e5d15b15104644a5105de7 Mon Sep 17 00:00:00 2001 From: Daniel Vetter Date: Thu, 4 Nov 2010 20:14:15 +0100 Subject: intel-gtt: kill unneeded sandybridge memory types Used for the now dead agp type_to_mask stuff. Signed-off-by: Daniel Vetter Signed-off-by: Chris Wilson --- drivers/char/agp/intel-gtt.c | 6 ------ 1 file changed, 6 deletions(-) diff --git a/drivers/char/agp/intel-gtt.c b/drivers/char/agp/intel-gtt.c index 8a30a16c5706..2470040023d3 100644 --- a/drivers/char/agp/intel-gtt.c +++ b/drivers/char/agp/intel-gtt.c @@ -59,12 +59,6 @@ static struct gatt_mask intel_i810_masks[] = .type = INTEL_AGP_CACHED_MEMORY} }; -#define INTEL_AGP_UNCACHED_MEMORY 0 -#define INTEL_AGP_CACHED_MEMORY_LLC 1 -#define INTEL_AGP_CACHED_MEMORY_LLC_GFDT 2 -#define INTEL_AGP_CACHED_MEMORY_LLC_MLC 3 -#define INTEL_AGP_CACHED_MEMORY_LLC_MLC_GFDT 4 - struct intel_gtt_driver { unsigned int gen : 8; unsigned int is_g33 : 1; -- cgit v1.2.3 From 625dd9d331d8a1ce5ee4e9924a22f3e55b7ac615 Mon Sep 17 00:00:00 2001 From: Daniel Vetter Date: Thu, 4 Nov 2010 20:07:57 +0100 Subject: intel-gtt: switch i81x to the write_entry helpers Initialization is still done with the old code with a few added things sprinkled in to make the intel_fake_agp helper functions work. Signed-off-by: Daniel Vetter Signed-off-by: Chris Wilson --- drivers/char/agp/intel-gtt.c | 152 +++++++++++++++++-------------------------- 1 file changed, 60 insertions(+), 92 deletions(-) diff --git a/drivers/char/agp/intel-gtt.c b/drivers/char/agp/intel-gtt.c index 2470040023d3..9d17a6d51640 100644 --- a/drivers/char/agp/intel-gtt.c +++ b/drivers/char/agp/intel-gtt.c @@ -101,6 +101,9 @@ static struct _intel_private { dma_addr_t scratch_page_dma; } intel_private; +static int intel_fake_agp_insert_entries(struct agp_memory *mem, + off_t pg_start, int type); + #define INTEL_GTT_GEN intel_private.driver->gen #define IS_G33 intel_private.driver->is_g33 #define IS_PINEVIEW intel_private.driver->is_pineview @@ -176,10 +179,12 @@ static int intel_i810_fetch_size(void) if ((smram_miscc & I810_GFX_MEM_WIN_SIZE) == I810_GFX_MEM_WIN_32M) { agp_bridge->current_size = (void *) (values + 1); agp_bridge->aperture_size_idx = 1; + intel_private.base.gtt_total_entries = KB(32) / 4; return values[1].size; } else { agp_bridge->current_size = (void *) (values); agp_bridge->aperture_size_idx = 0; + intel_private.base.gtt_total_entries = KB(64) / 4; return values[0].size; } @@ -206,6 +211,9 @@ static int intel_i810_configure(void) } } + intel_private.gtt = intel_private.registers + I810_PTE_BASE; + intel_private.scratch_page_dma = agp_bridge->scratch_page & PAGE_MASK; + if ((readl(intel_private.registers+I810_DRAM_CTL) & I810_DRAM_ROW_0) == I810_DRAM_ROW_0_SDRAM) { /* This will need to be dynamically assigned */ @@ -273,79 +281,27 @@ static void i8xx_destroy_pages(struct page *page) static int intel_i810_insert_entries(struct agp_memory *mem, off_t pg_start, int type) { - int i, j, num_entries; - void *temp; - int ret = -EINVAL; - int mask_type; - - if (mem->page_count == 0) - goto out; - - temp = agp_bridge->current_size; - num_entries = A_SIZE_FIX(temp)->num_entries; - - if ((pg_start + mem->page_count) > num_entries) - goto out_err; - - - for (j = pg_start; j < (pg_start + mem->page_count); j++) { - if (!PGE_EMPTY(agp_bridge, readl(agp_bridge->gatt_table+j))) { - ret = -EBUSY; - goto out_err; - } - } - - if (type != mem->type) - goto out_err; + int i; - mask_type = agp_bridge->driver->agp_type_to_mask_type(agp_bridge, type); + if (type == AGP_DCACHE_MEMORY) { + if ((pg_start + mem->page_count) + > intel_private.num_dcache_entries) + return -EINVAL; - switch (mask_type) { - case AGP_DCACHE_MEMORY: if (!mem->is_flushed) global_cache_flush(); + for (i = pg_start; i < (pg_start + mem->page_count); i++) { - writel((i*4096)|I810_PTE_LOCAL|I810_PTE_VALID, - intel_private.registers+I810_PTE_BASE+(i*4)); - } - readl(intel_private.registers+I810_PTE_BASE+((i-1)*4)); - break; - case AGP_PHYS_MEMORY: - case AGP_NORMAL_MEMORY: - if (!mem->is_flushed) - global_cache_flush(); - for (i = 0, j = pg_start; i < mem->page_count; i++, j++) { - writel(agp_bridge->driver->mask_memory(agp_bridge, - page_to_phys(mem->pages[i]), mask_type), - intel_private.registers+I810_PTE_BASE+(j*4)); + dma_addr_t addr = i << PAGE_SHIFT; + intel_private.driver->write_entry(addr, + i, type); } - readl(intel_private.registers+I810_PTE_BASE+((j-1)*4)); - break; - default: - goto out_err; - } - -out: - ret = 0; -out_err: - mem->is_flushed = true; - return ret; -} + readl(intel_private.gtt+i-1); -static int intel_i810_remove_entries(struct agp_memory *mem, off_t pg_start, - int type) -{ - int i; - - if (mem->page_count == 0) return 0; - - for (i = pg_start; i < (mem->page_count + pg_start); i++) { - writel(agp_bridge->scratch_page, intel_private.registers+I810_PTE_BASE+(i*4)); } - readl(intel_private.registers+I810_PTE_BASE+((i-1)*4)); - return 0; + return intel_fake_agp_insert_entries(mem, pg_start, type); } /* @@ -390,29 +346,6 @@ static struct agp_memory *alloc_agpphysmem_i8xx(size_t pg_count, int type) return new; } -static struct agp_memory *intel_i810_alloc_by_type(size_t pg_count, int type) -{ - struct agp_memory *new; - - if (type == AGP_DCACHE_MEMORY) { - if (pg_count != intel_private.num_dcache_entries) - return NULL; - - new = agp_create_memory(1); - if (new == NULL) - return NULL; - - new->type = AGP_DCACHE_MEMORY; - new->page_count = pg_count; - new->num_scratch_pages = 0; - agp_free_page_array(new); - return new; - } - if (type == AGP_PHYS_MEMORY) - return alloc_agpphysmem_i8xx(pg_count, type); - return NULL; -} - static void intel_i810_free_by_type(struct agp_memory *curr) { agp_free_key(curr->key); @@ -463,6 +396,23 @@ static int intel_gtt_setup_scratch_page(void) return 0; } +static void i810_write_entry(dma_addr_t addr, unsigned int entry, + unsigned int flags) +{ + u32 pte_flags = I810_PTE_VALID; + + switch (flags) { + case AGP_DCACHE_MEMORY: + pte_flags |= I810_PTE_LOCAL; + break; + case AGP_USER_CACHED_MEMORY: + pte_flags |= I830_PTE_SYSTEM_CACHED; + break; + } + + writel(addr | pte_flags, intel_private.gtt + entry); +} + static const struct aper_size_info_fixed const intel_fake_agp_sizes[] = { {128, 32768, 5}, /* The 64M mode still requires a 128k gatt */ @@ -760,7 +710,7 @@ static void intel_gtt_cleanup(void) iounmap(intel_private.gtt); iounmap(intel_private.registers); - + intel_gtt_teardown_scratch_page(); } @@ -889,7 +839,7 @@ static void i830_write_entry(dma_addr_t addr, unsigned int entry, unsigned int flags) { u32 pte_flags = I810_PTE_VALID; - + if (flags == AGP_USER_CACHED_MEMORY) pte_flags |= I830_PTE_SYSTEM_CACHED; @@ -1106,6 +1056,22 @@ static void intel_fake_agp_chipset_flush(struct agp_bridge_data *bridge) static struct agp_memory *intel_fake_agp_alloc_by_type(size_t pg_count, int type) { + struct agp_memory *new; + + if (type == AGP_DCACHE_MEMORY && INTEL_GTT_GEN == 1) { + if (pg_count != intel_private.num_dcache_entries) + return NULL; + + new = agp_create_memory(1); + if (new == NULL) + return NULL; + + new->type = AGP_DCACHE_MEMORY; + new->page_count = pg_count; + new->num_scratch_pages = 0; + agp_free_page_array(new); + return new; + } if (type == AGP_PHYS_MEMORY) return alloc_agpphysmem_i8xx(pg_count, type); /* always return NULL for other allocation types for now */ @@ -1316,8 +1282,8 @@ static const struct agp_bridge_driver intel_810_driver = { .create_gatt_table = agp_generic_create_gatt_table, .free_gatt_table = agp_generic_free_gatt_table, .insert_memory = intel_i810_insert_entries, - .remove_memory = intel_i810_remove_entries, - .alloc_by_type = intel_i810_alloc_by_type, + .remove_memory = intel_fake_agp_remove_entries, + .alloc_by_type = intel_fake_agp_alloc_by_type, .free_by_type = intel_i810_free_by_type, .agp_alloc_page = agp_generic_alloc_page, .agp_alloc_pages = agp_generic_alloc_pages, @@ -1352,6 +1318,8 @@ static const struct agp_bridge_driver intel_fake_agp_driver = { static const struct intel_gtt_driver i81x_gtt_driver = { .gen = 1, .dma_mask_size = 32, + .check_flags = i830_check_flags, + .write_entry = i810_write_entry, }; static const struct intel_gtt_driver i8xx_gtt_driver = { .gen = 2, @@ -1369,7 +1337,7 @@ static const struct intel_gtt_driver i915_gtt_driver = { .setup = i9xx_setup, .cleanup = i9xx_cleanup, /* i945 is the last gpu to need phys mem (for overlay and cursors). */ - .write_entry = i830_write_entry, + .write_entry = i830_write_entry, .dma_mask_size = 32, .check_flags = i830_check_flags, .chipset_flush = i9xx_chipset_flush, @@ -1557,7 +1525,7 @@ int intel_gmch_probe(struct pci_dev *pdev, if (find_gmch(intel_gtt_chipsets[i].gmch_chip_id)) { bridge->driver = intel_gtt_chipsets[i].gmch_driver; - intel_private.driver = + intel_private.driver = intel_gtt_chipsets[i].gtt_driver; break; } -- cgit v1.2.3 From 820647b97a9cbdd976c7177f1b6047fc1f6dd5c0 Mon Sep 17 00:00:00 2001 From: Daniel Vetter Date: Fri, 5 Nov 2010 13:30:14 +0100 Subject: intel-gtt: switch i81x to the common initialization helpers Still a separate agp_bridge_driver because of the i81x-only dedicated vram support. Signed-off-by: Daniel Vetter Signed-off-by: Chris Wilson --- drivers/char/agp/intel-gtt.c | 198 ++++++++++++++++--------------------------- 1 file changed, 71 insertions(+), 127 deletions(-) diff --git a/drivers/char/agp/intel-gtt.c b/drivers/char/agp/intel-gtt.c index 9d17a6d51640..4b79a7b47522 100644 --- a/drivers/char/agp/intel-gtt.c +++ b/drivers/char/agp/intel-gtt.c @@ -39,26 +39,10 @@ #define USE_PCI_DMA_API 0 #endif -static const struct aper_size_info_fixed intel_i810_sizes[] = -{ - {64, 16384, 4}, - /* The 32M mode still requires a 64k gatt */ - {32, 8192, 4} -}; - #define AGP_DCACHE_MEMORY 1 #define AGP_PHYS_MEMORY 2 #define INTEL_AGP_CACHED_MEMORY 3 -static struct gatt_mask intel_i810_masks[] = -{ - {.mask = I810_PTE_VALID, .type = 0}, - {.mask = (I810_PTE_VALID | I810_PTE_LOCAL), .type = AGP_DCACHE_MEMORY}, - {.mask = I810_PTE_VALID, .type = 0}, - {.mask = I810_PTE_VALID | I830_PTE_SYSTEM_CACHED, - .type = INTEL_AGP_CACHED_MEMORY} -}; - struct intel_gtt_driver { unsigned int gen : 8; unsigned int is_g33 : 1; @@ -94,6 +78,7 @@ static struct _intel_private { void __iomem *i9xx_flush_page; void *i8xx_flush_page; }; + char *i81x_gtt_table; struct page *i8xx_page; struct resource ifp_resource; int resource_valid; @@ -163,86 +148,6 @@ static void intel_agp_unmap_memory(struct agp_memory *mem) intel_agp_free_sglist(mem); } -static int intel_i810_fetch_size(void) -{ - u32 smram_miscc; - struct aper_size_info_fixed *values; - - pci_read_config_dword(intel_private.bridge_dev, - I810_SMRAM_MISCC, &smram_miscc); - values = A_SIZE_FIX(agp_bridge->driver->aperture_sizes); - - if ((smram_miscc & I810_GMS) == I810_GMS_DISABLE) { - dev_warn(&intel_private.bridge_dev->dev, "i810 is disabled\n"); - return 0; - } - if ((smram_miscc & I810_GFX_MEM_WIN_SIZE) == I810_GFX_MEM_WIN_32M) { - agp_bridge->current_size = (void *) (values + 1); - agp_bridge->aperture_size_idx = 1; - intel_private.base.gtt_total_entries = KB(32) / 4; - return values[1].size; - } else { - agp_bridge->current_size = (void *) (values); - agp_bridge->aperture_size_idx = 0; - intel_private.base.gtt_total_entries = KB(64) / 4; - return values[0].size; - } - - return 0; -} - -static int intel_i810_configure(void) -{ - struct aper_size_info_fixed *current_size; - u32 temp; - int i; - - current_size = A_SIZE_FIX(agp_bridge->current_size); - - if (!intel_private.registers) { - pci_read_config_dword(intel_private.pcidev, I810_MMADDR, &temp); - temp &= 0xfff80000; - - intel_private.registers = ioremap(temp, 128 * 4096); - if (!intel_private.registers) { - dev_err(&intel_private.pcidev->dev, - "can't remap memory\n"); - return -ENOMEM; - } - } - - intel_private.gtt = intel_private.registers + I810_PTE_BASE; - intel_private.scratch_page_dma = agp_bridge->scratch_page & PAGE_MASK; - - if ((readl(intel_private.registers+I810_DRAM_CTL) - & I810_DRAM_ROW_0) == I810_DRAM_ROW_0_SDRAM) { - /* This will need to be dynamically assigned */ - dev_info(&intel_private.pcidev->dev, - "detected 4MB dedicated video ram\n"); - intel_private.num_dcache_entries = 1024; - } - pci_read_config_dword(intel_private.pcidev, I810_GMADDR, &temp); - agp_bridge->gart_bus_addr = (temp & PCI_BASE_ADDRESS_MEM_MASK); - writel(agp_bridge->gatt_bus_addr | I810_PGETBL_ENABLED, intel_private.registers+I810_PGETBL_CTL); - readl(intel_private.registers+I810_PGETBL_CTL); /* PCI Posting. */ - - if (agp_bridge->driver->needs_scratch_page) { - for (i = 0; i < current_size->num_entries; i++) { - writel(agp_bridge->scratch_page, intel_private.registers+I810_PTE_BASE+(i*4)); - } - readl(intel_private.registers+I810_PTE_BASE+((i-1)*4)); /* PCI posting. */ - } - global_cache_flush(); - return 0; -} - -static void intel_i810_cleanup(void) -{ - writel(0, intel_private.registers+I810_PGETBL_CTL); - readl(intel_private.registers); /* PCI Posting. */ - iounmap(intel_private.registers); -} - static void intel_fake_agp_enable(struct agp_bridge_data *bridge, u32 mode) { return; @@ -278,6 +183,46 @@ static void i8xx_destroy_pages(struct page *page) atomic_dec(&agp_bridge->current_memory_agp); } +#define I810_GTT_ORDER 4 +static int i810_setup(void) +{ + u32 reg_addr; + char *gtt_table; + + /* i81x does not preallocate the gtt. It's always 64kb in size. */ + gtt_table = alloc_gatt_pages(I810_GTT_ORDER); + if (gtt_table == NULL) + return -ENOMEM; + intel_private.i81x_gtt_table = gtt_table; + + pci_read_config_dword(intel_private.pcidev, I810_MMADDR, ®_addr); + reg_addr &= 0xfff80000; + + intel_private.registers = ioremap(reg_addr, KB(64)); + if (!intel_private.registers) + return -ENOMEM; + + writel(virt_to_phys(gtt_table) | I810_PGETBL_ENABLED, + intel_private.registers+I810_PGETBL_CTL); + + intel_private.gtt_bus_addr = reg_addr + I810_PTE_BASE; + + if ((readl(intel_private.registers+I810_DRAM_CTL) + & I810_DRAM_ROW_0) == I810_DRAM_ROW_0_SDRAM) { + dev_info(&intel_private.pcidev->dev, + "detected 4MB dedicated video ram\n"); + intel_private.num_dcache_entries = 1024; + } + + return 0; +} + +static void i810_cleanup(void) +{ + writel(0, intel_private.registers+I810_PGETBL_CTL); + free_gatt_pages(intel_private.i81x_gtt_table, I810_GTT_ORDER); +} + static int intel_i810_insert_entries(struct agp_memory *mem, off_t pg_start, int type) { @@ -363,13 +308,6 @@ static void intel_i810_free_by_type(struct agp_memory *curr) kfree(curr); } -static unsigned long intel_i810_mask_memory(struct agp_bridge_data *bridge, - dma_addr_t addr, int type) -{ - /* Type checking must be done elsewhere */ - return addr | bridge->driver->masks[type].mask; -} - static int intel_gtt_setup_scratch_page(void) { struct page *page; @@ -414,9 +352,9 @@ static void i810_write_entry(dma_addr_t addr, unsigned int entry, } static const struct aper_size_info_fixed const intel_fake_agp_sizes[] = { + {32, 8192, 3}, + {64, 16384, 4}, {128, 32768, 5}, - /* The 64M mode still requires a 128k gatt */ - {64, 16384, 5}, {256, 65536, 6}, {512, 131072, 7}, }; @@ -429,6 +367,9 @@ static unsigned int intel_gtt_stolen_size(void) static const int ddt[4] = { 0, 16, 32, 64 }; unsigned int stolen_size = 0; + if (INTEL_GTT_GEN == 1) + return 0; /* no stolen mem on i81x */ + pci_read_config_word(intel_private.bridge_dev, I830_GMCH_CTRL, &gmch_ctrl); @@ -677,7 +618,18 @@ static unsigned int intel_gtt_mappable_entries(void) { unsigned int aperture_size; - if (INTEL_GTT_GEN == 2) { + if (INTEL_GTT_GEN == 1) { + u32 smram_miscc; + + pci_read_config_dword(intel_private.bridge_dev, + I810_SMRAM_MISCC, &smram_miscc); + + if ((smram_miscc & I810_GFX_MEM_WIN_SIZE) + == I810_GFX_MEM_WIN_32M) + aperture_size = MB(32); + else + aperture_size = MB(64); + } else if (INTEL_GTT_GEN == 2) { u16 gmch_ctrl; pci_read_config_word(intel_private.bridge_dev, @@ -751,14 +703,7 @@ static int intel_gtt_init(void) global_cache_flush(); /* FIXME: ? */ - /* we have to call this as early as possible after the MMIO base address is known */ intel_private.base.stolen_size = intel_gtt_stolen_size(); - if (intel_private.base.stolen_size == 0) { - intel_private.driver->cleanup(); - iounmap(intel_private.registers); - iounmap(intel_private.gtt); - return -ENOMEM; - } ret = intel_gtt_setup_scratch_page(); if (ret != 0) { @@ -851,7 +796,7 @@ static bool intel_enable_gtt(void) u32 gma_addr; u8 __iomem *reg; - if (INTEL_GTT_GEN == 2) + if (INTEL_GTT_GEN <= 2) pci_read_config_dword(intel_private.pcidev, I810_GMADDR, &gma_addr); else @@ -1268,19 +1213,16 @@ static int i9xx_setup(void) static const struct agp_bridge_driver intel_810_driver = { .owner = THIS_MODULE, - .aperture_sizes = intel_i810_sizes, .size_type = FIXED_APER_SIZE, - .num_aperture_sizes = 2, - .needs_scratch_page = true, - .configure = intel_i810_configure, - .fetch_size = intel_i810_fetch_size, - .cleanup = intel_i810_cleanup, - .mask_memory = intel_i810_mask_memory, - .masks = intel_i810_masks, + .aperture_sizes = intel_fake_agp_sizes, + .num_aperture_sizes = ARRAY_SIZE(intel_fake_agp_sizes), + .configure = intel_fake_agp_configure, + .fetch_size = intel_fake_agp_fetch_size, + .cleanup = intel_gtt_cleanup, .agp_enable = intel_fake_agp_enable, .cache_flush = global_cache_flush, - .create_gatt_table = agp_generic_create_gatt_table, - .free_gatt_table = agp_generic_free_gatt_table, + .create_gatt_table = intel_fake_agp_create_gatt_table, + .free_gatt_table = intel_fake_agp_free_gatt_table, .insert_memory = intel_i810_insert_entries, .remove_memory = intel_fake_agp_remove_entries, .alloc_by_type = intel_fake_agp_alloc_by_type, @@ -1289,7 +1231,6 @@ static const struct agp_bridge_driver intel_810_driver = { .agp_alloc_pages = agp_generic_alloc_pages, .agp_destroy_page = agp_generic_destroy_page, .agp_destroy_pages = agp_generic_destroy_pages, - .agp_type_to_mask_type = agp_generic_type_to_mask_type, }; static const struct agp_bridge_driver intel_fake_agp_driver = { @@ -1317,7 +1258,10 @@ static const struct agp_bridge_driver intel_fake_agp_driver = { static const struct intel_gtt_driver i81x_gtt_driver = { .gen = 1, + .has_pgtbl_enable = 1, .dma_mask_size = 32, + .setup = i810_setup, + .cleanup = i810_cleanup, .check_flags = i830_check_flags, .write_entry = i810_write_entry, }; @@ -1549,8 +1493,8 @@ int intel_gmch_probe(struct pci_dev *pdev, pci_set_consistent_dma_mask(intel_private.pcidev, DMA_BIT_MASK(mask)); - if (bridge->driver == &intel_810_driver) - return 1; + /*if (bridge->driver == &intel_810_driver) + return 1;*/ if (intel_gtt_init() != 0) return 0; -- cgit v1.2.3 From ff26860fb53f2dcfaaaf1bf017b09dbdfddff5ee Mon Sep 17 00:00:00 2001 From: Daniel Vetter Date: Fri, 5 Nov 2010 15:43:35 +0100 Subject: intel-gtt: fold i81x-only dcache support into the generic driver Now the intel-gtt.c rewrite is complete! Signed-off-by: Daniel Vetter Signed-off-by: Chris Wilson --- drivers/char/agp/intel-gtt.c | 150 ++++++++++++++++++------------------------- 1 file changed, 61 insertions(+), 89 deletions(-) diff --git a/drivers/char/agp/intel-gtt.c b/drivers/char/agp/intel-gtt.c index 4b79a7b47522..72267c801637 100644 --- a/drivers/char/agp/intel-gtt.c +++ b/drivers/char/agp/intel-gtt.c @@ -86,9 +86,6 @@ static struct _intel_private { dma_addr_t scratch_page_dma; } intel_private; -static int intel_fake_agp_insert_entries(struct agp_memory *mem, - off_t pg_start, int type); - #define INTEL_GTT_GEN intel_private.driver->gen #define IS_G33 intel_private.driver->is_g33 #define IS_PINEVIEW intel_private.driver->is_pineview @@ -223,30 +220,26 @@ static void i810_cleanup(void) free_gatt_pages(intel_private.i81x_gtt_table, I810_GTT_ORDER); } -static int intel_i810_insert_entries(struct agp_memory *mem, off_t pg_start, - int type) +static int i810_insert_dcache_entries(struct agp_memory *mem, off_t pg_start, + int type) { int i; - if (type == AGP_DCACHE_MEMORY) { - if ((pg_start + mem->page_count) - > intel_private.num_dcache_entries) - return -EINVAL; - - if (!mem->is_flushed) - global_cache_flush(); + if ((pg_start + mem->page_count) + > intel_private.num_dcache_entries) + return -EINVAL; - for (i = pg_start; i < (pg_start + mem->page_count); i++) { - dma_addr_t addr = i << PAGE_SHIFT; - intel_private.driver->write_entry(addr, - i, type); - } - readl(intel_private.gtt+i-1); + if (!mem->is_flushed) + global_cache_flush(); - return 0; + for (i = pg_start; i < (pg_start + mem->page_count); i++) { + dma_addr_t addr = i << PAGE_SHIFT; + intel_private.driver->write_entry(addr, + i, type); } + readl(intel_private.gtt+i-1); - return intel_fake_agp_insert_entries(mem, pg_start, type); + return 0; } /* @@ -935,6 +928,9 @@ static int intel_fake_agp_insert_entries(struct agp_memory *mem, int i, j; int ret = -EINVAL; + if (INTEL_GTT_GEN == 1 && type == AGP_DCACHE_MEMORY) + return i810_insert_dcache_entries(mem, pg_start, type); + if (mem->page_count == 0) goto out; @@ -1211,28 +1207,6 @@ static int i9xx_setup(void) return 0; } -static const struct agp_bridge_driver intel_810_driver = { - .owner = THIS_MODULE, - .size_type = FIXED_APER_SIZE, - .aperture_sizes = intel_fake_agp_sizes, - .num_aperture_sizes = ARRAY_SIZE(intel_fake_agp_sizes), - .configure = intel_fake_agp_configure, - .fetch_size = intel_fake_agp_fetch_size, - .cleanup = intel_gtt_cleanup, - .agp_enable = intel_fake_agp_enable, - .cache_flush = global_cache_flush, - .create_gatt_table = intel_fake_agp_create_gatt_table, - .free_gatt_table = intel_fake_agp_free_gatt_table, - .insert_memory = intel_i810_insert_entries, - .remove_memory = intel_fake_agp_remove_entries, - .alloc_by_type = intel_fake_agp_alloc_by_type, - .free_by_type = intel_i810_free_by_type, - .agp_alloc_page = agp_generic_alloc_page, - .agp_alloc_pages = agp_generic_alloc_pages, - .agp_destroy_page = agp_generic_destroy_page, - .agp_destroy_pages = agp_generic_destroy_pages, -}; - static const struct agp_bridge_driver intel_fake_agp_driver = { .owner = THIS_MODULE, .size_type = FIXED_APER_SIZE, @@ -1352,93 +1326,92 @@ static const struct intel_gtt_driver sandybridge_gtt_driver = { static const struct intel_gtt_driver_description { unsigned int gmch_chip_id; char *name; - const struct agp_bridge_driver *gmch_driver; const struct intel_gtt_driver *gtt_driver; } intel_gtt_chipsets[] = { - { PCI_DEVICE_ID_INTEL_82810_IG1, "i810", &intel_810_driver, + { PCI_DEVICE_ID_INTEL_82810_IG1, "i810", &i81x_gtt_driver}, - { PCI_DEVICE_ID_INTEL_82810_IG3, "i810", &intel_810_driver, + { PCI_DEVICE_ID_INTEL_82810_IG3, "i810", &i81x_gtt_driver}, - { PCI_DEVICE_ID_INTEL_82810E_IG, "i810", &intel_810_driver, + { PCI_DEVICE_ID_INTEL_82810E_IG, "i810", &i81x_gtt_driver}, - { PCI_DEVICE_ID_INTEL_82815_CGC, "i815", &intel_810_driver, + { PCI_DEVICE_ID_INTEL_82815_CGC, "i815", &i81x_gtt_driver}, { PCI_DEVICE_ID_INTEL_82830_CGC, "830M", - &intel_fake_agp_driver, &i8xx_gtt_driver}, + &i8xx_gtt_driver}, { PCI_DEVICE_ID_INTEL_82845G_IG, "830M", - &intel_fake_agp_driver, &i8xx_gtt_driver}, + &i8xx_gtt_driver}, { PCI_DEVICE_ID_INTEL_82854_IG, "854", - &intel_fake_agp_driver, &i8xx_gtt_driver}, + &i8xx_gtt_driver}, { PCI_DEVICE_ID_INTEL_82855GM_IG, "855GM", - &intel_fake_agp_driver, &i8xx_gtt_driver}, + &i8xx_gtt_driver}, { PCI_DEVICE_ID_INTEL_82865_IG, "865", - &intel_fake_agp_driver, &i8xx_gtt_driver}, + &i8xx_gtt_driver}, { PCI_DEVICE_ID_INTEL_E7221_IG, "E7221 (i915)", - &intel_fake_agp_driver, &i915_gtt_driver }, + &i915_gtt_driver }, { PCI_DEVICE_ID_INTEL_82915G_IG, "915G", - &intel_fake_agp_driver, &i915_gtt_driver }, + &i915_gtt_driver }, { PCI_DEVICE_ID_INTEL_82915GM_IG, "915GM", - &intel_fake_agp_driver, &i915_gtt_driver }, + &i915_gtt_driver }, { PCI_DEVICE_ID_INTEL_82945G_IG, "945G", - &intel_fake_agp_driver, &i915_gtt_driver }, + &i915_gtt_driver }, { PCI_DEVICE_ID_INTEL_82945GM_IG, "945GM", - &intel_fake_agp_driver, &i915_gtt_driver }, + &i915_gtt_driver }, { PCI_DEVICE_ID_INTEL_82945GME_IG, "945GME", - &intel_fake_agp_driver, &i915_gtt_driver }, + &i915_gtt_driver }, { PCI_DEVICE_ID_INTEL_82946GZ_IG, "946GZ", - &intel_fake_agp_driver, &i965_gtt_driver }, + &i965_gtt_driver }, { PCI_DEVICE_ID_INTEL_82G35_IG, "G35", - &intel_fake_agp_driver, &i965_gtt_driver }, + &i965_gtt_driver }, { PCI_DEVICE_ID_INTEL_82965Q_IG, "965Q", - &intel_fake_agp_driver, &i965_gtt_driver }, + &i965_gtt_driver }, { PCI_DEVICE_ID_INTEL_82965G_IG, "965G", - &intel_fake_agp_driver, &i965_gtt_driver }, + &i965_gtt_driver }, { PCI_DEVICE_ID_INTEL_82965GM_IG, "965GM", - &intel_fake_agp_driver, &i965_gtt_driver }, + &i965_gtt_driver }, { PCI_DEVICE_ID_INTEL_82965GME_IG, "965GME/GLE", - &intel_fake_agp_driver, &i965_gtt_driver }, + &i965_gtt_driver }, { PCI_DEVICE_ID_INTEL_G33_IG, "G33", - &intel_fake_agp_driver, &g33_gtt_driver }, + &g33_gtt_driver }, { PCI_DEVICE_ID_INTEL_Q35_IG, "Q35", - &intel_fake_agp_driver, &g33_gtt_driver }, + &g33_gtt_driver }, { PCI_DEVICE_ID_INTEL_Q33_IG, "Q33", - &intel_fake_agp_driver, &g33_gtt_driver }, + &g33_gtt_driver }, { PCI_DEVICE_ID_INTEL_PINEVIEW_M_IG, "GMA3150", - &intel_fake_agp_driver, &pineview_gtt_driver }, + &pineview_gtt_driver }, { PCI_DEVICE_ID_INTEL_PINEVIEW_IG, "GMA3150", - &intel_fake_agp_driver, &pineview_gtt_driver }, + &pineview_gtt_driver }, { PCI_DEVICE_ID_INTEL_GM45_IG, "GM45", - &intel_fake_agp_driver, &g4x_gtt_driver }, + &g4x_gtt_driver }, { PCI_DEVICE_ID_INTEL_EAGLELAKE_IG, "Eaglelake", - &intel_fake_agp_driver, &g4x_gtt_driver }, + &g4x_gtt_driver }, { PCI_DEVICE_ID_INTEL_Q45_IG, "Q45/Q43", - &intel_fake_agp_driver, &g4x_gtt_driver }, + &g4x_gtt_driver }, { PCI_DEVICE_ID_INTEL_G45_IG, "G45/G43", - &intel_fake_agp_driver, &g4x_gtt_driver }, + &g4x_gtt_driver }, { PCI_DEVICE_ID_INTEL_B43_IG, "B43", - &intel_fake_agp_driver, &g4x_gtt_driver }, + &g4x_gtt_driver }, { PCI_DEVICE_ID_INTEL_B43_1_IG, "B43", - &intel_fake_agp_driver, &g4x_gtt_driver }, + &g4x_gtt_driver }, { PCI_DEVICE_ID_INTEL_G41_IG, "G41", - &intel_fake_agp_driver, &g4x_gtt_driver }, + &g4x_gtt_driver }, { PCI_DEVICE_ID_INTEL_IRONLAKE_D_IG, - "HD Graphics", &intel_fake_agp_driver, &ironlake_gtt_driver }, + "HD Graphics", &ironlake_gtt_driver }, { PCI_DEVICE_ID_INTEL_IRONLAKE_M_IG, - "HD Graphics", &intel_fake_agp_driver, &ironlake_gtt_driver }, + "HD Graphics", &ironlake_gtt_driver }, { PCI_DEVICE_ID_INTEL_SANDYBRIDGE_GT1_IG, - "Sandybridge", &intel_fake_agp_driver, &sandybridge_gtt_driver }, + "Sandybridge", &sandybridge_gtt_driver }, { PCI_DEVICE_ID_INTEL_SANDYBRIDGE_GT2_IG, - "Sandybridge", &intel_fake_agp_driver, &sandybridge_gtt_driver }, + "Sandybridge", &sandybridge_gtt_driver }, { PCI_DEVICE_ID_INTEL_SANDYBRIDGE_GT2_PLUS_IG, - "Sandybridge", &intel_fake_agp_driver, &sandybridge_gtt_driver }, + "Sandybridge", &sandybridge_gtt_driver }, { PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_GT1_IG, - "Sandybridge", &intel_fake_agp_driver, &sandybridge_gtt_driver }, + "Sandybridge", &sandybridge_gtt_driver }, { PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_GT2_IG, - "Sandybridge", &intel_fake_agp_driver, &sandybridge_gtt_driver }, + "Sandybridge", &sandybridge_gtt_driver }, { PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_GT2_PLUS_IG, - "Sandybridge", &intel_fake_agp_driver, &sandybridge_gtt_driver }, + "Sandybridge", &sandybridge_gtt_driver }, { PCI_DEVICE_ID_INTEL_SANDYBRIDGE_S_IG, - "Sandybridge", &intel_fake_agp_driver, &sandybridge_gtt_driver }, + "Sandybridge", &sandybridge_gtt_driver }, { 0, NULL, NULL } }; @@ -1463,21 +1436,20 @@ int intel_gmch_probe(struct pci_dev *pdev, struct agp_bridge_data *bridge) { int i, mask; - bridge->driver = NULL; + intel_private.driver = NULL; for (i = 0; intel_gtt_chipsets[i].name != NULL; i++) { if (find_gmch(intel_gtt_chipsets[i].gmch_chip_id)) { - bridge->driver = - intel_gtt_chipsets[i].gmch_driver; intel_private.driver = intel_gtt_chipsets[i].gtt_driver; break; } } - if (!bridge->driver) + if (!intel_private.driver) return 0; + bridge->driver = &intel_fake_agp_driver; bridge->dev_private_data = &intel_private; bridge->dev = pdev; -- cgit v1.2.3 From 23ed992a5ebe6964ebe312b54142fbc5e8185cdc Mon Sep 17 00:00:00 2001 From: Daniel Vetter Date: Fri, 5 Nov 2010 18:04:52 +0100 Subject: drm/i915|intel-gtt: consolidate intel-gtt.h headers ... and a few other defines. Signed-off-by: Daniel Vetter Signed-off-by: Chris Wilson --- drivers/char/agp/intel-gtt.c | 5 ----- drivers/gpu/drm/i915/i915_gem.c | 1 - include/drm/intel-gtt.h | 12 ++++++++++++ include/linux/intel-gtt.h | 20 -------------------- 4 files changed, 12 insertions(+), 26 deletions(-) delete mode 100644 include/linux/intel-gtt.h diff --git a/drivers/char/agp/intel-gtt.c b/drivers/char/agp/intel-gtt.c index 72267c801637..291ac5113576 100644 --- a/drivers/char/agp/intel-gtt.c +++ b/drivers/char/agp/intel-gtt.c @@ -24,7 +24,6 @@ #include #include "agp.h" #include "intel-agp.h" -#include #include /* @@ -39,10 +38,6 @@ #define USE_PCI_DMA_API 0 #endif -#define AGP_DCACHE_MEMORY 1 -#define AGP_PHYS_MEMORY 2 -#define INTEL_AGP_CACHED_MEMORY 3 - struct intel_gtt_driver { unsigned int gen : 8; unsigned int is_g33 : 1; diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index bf05ac414b1f..68492357658c 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c @@ -34,7 +34,6 @@ #include #include #include -#include struct change_domains { uint32_t invalidate_domains; diff --git a/include/drm/intel-gtt.h b/include/drm/intel-gtt.h index 020f8aab7e5b..9f91cbe35157 100644 --- a/include/drm/intel-gtt.h +++ b/include/drm/intel-gtt.h @@ -13,5 +13,17 @@ const struct intel_gtt { unsigned int gtt_mappable_entries; } *intel_gtt_get(void); + +/* Special gtt memory types */ +#define AGP_DCACHE_MEMORY 1 +#define AGP_PHYS_MEMORY 2 + +/* New caching attributes for gen6/sandybridge */ +#define AGP_USER_CACHED_MEMORY_LLC_MLC (AGP_USER_TYPES + 2) +#define AGP_USER_UNCACHED_MEMORY (AGP_USER_TYPES + 4) + +/* flag for GFDT type */ +#define AGP_USER_CACHED_MEMORY_GFDT (1 << 3) + #endif diff --git a/include/linux/intel-gtt.h b/include/linux/intel-gtt.h deleted file mode 100644 index 1d19ab2afa39..000000000000 --- a/include/linux/intel-gtt.h +++ /dev/null @@ -1,20 +0,0 @@ -/* - * Common Intel AGPGART and GTT definitions. - */ -#ifndef _INTEL_GTT_H -#define _INTEL_GTT_H - -#include - -/* This is for Intel only GTT controls. - * - * Sandybridge: AGP_USER_CACHED_MEMORY default to LLC only - */ - -#define AGP_USER_CACHED_MEMORY_LLC_MLC (AGP_USER_TYPES + 2) -#define AGP_USER_UNCACHED_MEMORY (AGP_USER_TYPES + 4) - -/* flag for GFDT type */ -#define AGP_USER_CACHED_MEMORY_GFDT (1 << 3) - -#endif -- cgit v1.2.3 From 40ce6575102b23e432932b5ce41c44bf7cc5023b Mon Sep 17 00:00:00 2001 From: Daniel Vetter Date: Fri, 5 Nov 2010 18:12:18 +0100 Subject: drm/i915/gtt: call chipset flush directly Signed-off-by: Daniel Vetter Signed-off-by: Chris Wilson --- drivers/char/agp/intel-gtt.c | 7 +++++++ drivers/gpu/drm/i915/i915_gem.c | 9 ++++----- include/drm/intel-gtt.h | 2 +- 3 files changed, 12 insertions(+), 6 deletions(-) diff --git a/drivers/char/agp/intel-gtt.c b/drivers/char/agp/intel-gtt.c index 291ac5113576..8e2e208c925b 100644 --- a/drivers/char/agp/intel-gtt.c +++ b/drivers/char/agp/intel-gtt.c @@ -1476,6 +1476,13 @@ const struct intel_gtt *intel_gtt_get(void) } EXPORT_SYMBOL(intel_gtt_get); +void intel_gtt_chipset_flush(void) +{ + if (intel_private.driver->chipset_flush) + intel_private.driver->chipset_flush(); +} +EXPORT_SYMBOL(intel_gtt_chipset_flush); + void intel_gmch_remove(struct pci_dev *pdev) { if (intel_private.pcidev) diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index 68492357658c..b663d2da1db3 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c @@ -2192,7 +2192,7 @@ i915_gem_flush(struct drm_device *dev, drm_i915_private_t *dev_priv = dev->dev_private; if (flush_domains & I915_GEM_DOMAIN_CPU) - drm_agp_chipset_flush(dev); + intel_gtt_chipset_flush(); if ((flush_domains | invalidate_domains) & I915_GEM_GPU_DOMAINS) { if (flush_rings & RING_RENDER) @@ -2920,14 +2920,13 @@ i915_gem_object_flush_gtt_write_domain(struct drm_gem_object *obj) static void i915_gem_object_flush_cpu_write_domain(struct drm_gem_object *obj) { - struct drm_device *dev = obj->dev; uint32_t old_write_domain; if (obj->write_domain != I915_GEM_DOMAIN_CPU) return; i915_gem_clflush_object(obj); - drm_agp_chipset_flush(dev); + intel_gtt_chipset_flush(); old_write_domain = obj->write_domain; obj->write_domain = 0; @@ -5069,7 +5068,7 @@ void i915_gem_detach_phys_object(struct drm_device *dev, page_cache_release(page); } } - drm_agp_chipset_flush(dev); + intel_gtt_chipset_flush(); obj_priv->phys_obj->cur_obj = NULL; obj_priv->phys_obj = NULL; @@ -5161,7 +5160,7 @@ i915_gem_phys_pwrite(struct drm_device *dev, struct drm_gem_object *obj, return -EFAULT; } - drm_agp_chipset_flush(dev); + intel_gtt_chipset_flush(); return 0; } diff --git a/include/drm/intel-gtt.h b/include/drm/intel-gtt.h index 9f91cbe35157..c35817a11690 100644 --- a/include/drm/intel-gtt.h +++ b/include/drm/intel-gtt.h @@ -13,6 +13,7 @@ const struct intel_gtt { unsigned int gtt_mappable_entries; } *intel_gtt_get(void); +void intel_gtt_chipset_flush(void); /* Special gtt memory types */ #define AGP_DCACHE_MEMORY 1 @@ -26,4 +27,3 @@ const struct intel_gtt { #define AGP_USER_CACHED_MEMORY_GFDT (1 << 3) #endif - -- cgit v1.2.3 From 4af72e2865a23ac090884a421bd1a8b19e247a22 Mon Sep 17 00:00:00 2001 From: Daniel Vetter Date: Fri, 5 Nov 2010 18:13:43 +0100 Subject: drm: kill drm_agp_chipset_flush No longer used. Cc: Dave Airlie Signed-off-by: Daniel Vetter Signed-off-by: Chris Wilson --- drivers/gpu/drm/drm_agpsupport.c | 6 ------ include/drm/drmP.h | 1 - 2 files changed, 7 deletions(-) diff --git a/drivers/gpu/drm/drm_agpsupport.c b/drivers/gpu/drm/drm_agpsupport.c index 252fdb98b73a..0cb2ba50af53 100644 --- a/drivers/gpu/drm/drm_agpsupport.c +++ b/drivers/gpu/drm/drm_agpsupport.c @@ -466,10 +466,4 @@ drm_agp_bind_pages(struct drm_device *dev, } EXPORT_SYMBOL(drm_agp_bind_pages); -void drm_agp_chipset_flush(struct drm_device *dev) -{ - agp_flush_chipset(dev->agp->bridge); -} -EXPORT_SYMBOL(drm_agp_chipset_flush); - #endif /* __OS_HAS_AGP */ diff --git a/include/drm/drmP.h b/include/drm/drmP.h index d4bc0f5cab8f..628f76772d22 100644 --- a/include/drm/drmP.h +++ b/include/drm/drmP.h @@ -1319,7 +1319,6 @@ extern int drm_agp_unbind_ioctl(struct drm_device *dev, void *data, extern int drm_agp_bind(struct drm_device *dev, struct drm_agp_binding *request); extern int drm_agp_bind_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv); -extern void drm_agp_chipset_flush(struct drm_device *dev); /* Stub support (drm_stub.h) */ extern int drm_setmaster_ioctl(struct drm_device *dev, void *data, -- cgit v1.2.3 From f050a8abbda0efcd597c6b1825e3b9ce4d613383 Mon Sep 17 00:00:00 2001 From: Daniel Vetter Date: Fri, 5 Nov 2010 18:40:56 +0100 Subject: agp: kill agp_flush_chipset and corresponding ioctl The intel drm calls the chipset functions now directly. Userspace never called the corresponding ioctl, hence it can be killed, too. Cc: Dave Airlie Signed-off-by: Daniel Vetter Signed-off-by: Chris Wilson --- drivers/char/agp/agp.h | 1 - drivers/char/agp/compat_ioctl.c | 1 - drivers/char/agp/compat_ioctl.h | 1 - drivers/char/agp/frontend.c | 8 -------- drivers/char/agp/generic.c | 7 ------- drivers/char/agp/intel-gtt.c | 6 ------ include/linux/agp_backend.h | 1 - 7 files changed, 25 deletions(-) diff --git a/drivers/char/agp/agp.h b/drivers/char/agp/agp.h index 5259065f3c79..3e67ddde9e16 100644 --- a/drivers/char/agp/agp.h +++ b/drivers/char/agp/agp.h @@ -120,7 +120,6 @@ struct agp_bridge_driver { void (*agp_destroy_page)(struct page *, int flags); void (*agp_destroy_pages)(struct agp_memory *); int (*agp_type_to_mask_type) (struct agp_bridge_data *, int); - void (*chipset_flush)(struct agp_bridge_data *); }; struct agp_bridge_data { diff --git a/drivers/char/agp/compat_ioctl.c b/drivers/char/agp/compat_ioctl.c index 9d2c97a69cdd..a48e05b31593 100644 --- a/drivers/char/agp/compat_ioctl.c +++ b/drivers/char/agp/compat_ioctl.c @@ -276,7 +276,6 @@ long compat_agp_ioctl(struct file *file, unsigned int cmd, unsigned long arg) break; case AGPIOC_CHIPSET_FLUSH32: - ret_val = agpioc_chipset_flush_wrap(curr_priv); break; } diff --git a/drivers/char/agp/compat_ioctl.h b/drivers/char/agp/compat_ioctl.h index 0c9678ac0371..f30e0fd97963 100644 --- a/drivers/char/agp/compat_ioctl.h +++ b/drivers/char/agp/compat_ioctl.h @@ -102,6 +102,5 @@ void agp_free_memory_wrap(struct agp_memory *memory); struct agp_memory *agp_allocate_memory_wrap(size_t pg_count, u32 type); struct agp_memory *agp_find_mem_by_key(int key); struct agp_client *agp_find_client_by_pid(pid_t id); -int agpioc_chipset_flush_wrap(struct agp_file_private *priv); #endif /* _AGP_COMPAT_H */ diff --git a/drivers/char/agp/frontend.c b/drivers/char/agp/frontend.c index 3cb4539a98b2..2e044338753c 100644 --- a/drivers/char/agp/frontend.c +++ b/drivers/char/agp/frontend.c @@ -957,13 +957,6 @@ static int agpioc_unbind_wrap(struct agp_file_private *priv, void __user *arg) return agp_unbind_memory(memory); } -int agpioc_chipset_flush_wrap(struct agp_file_private *priv) -{ - DBG(""); - agp_flush_chipset(agp_bridge); - return 0; -} - static long agp_ioctl(struct file *file, unsigned int cmd, unsigned long arg) { @@ -1039,7 +1032,6 @@ static long agp_ioctl(struct file *file, break; case AGPIOC_CHIPSET_FLUSH: - ret_val = agpioc_chipset_flush_wrap(curr_priv); break; } diff --git a/drivers/char/agp/generic.c b/drivers/char/agp/generic.c index 4956f1c8f9d5..78bc8de0f234 100644 --- a/drivers/char/agp/generic.c +++ b/drivers/char/agp/generic.c @@ -81,13 +81,6 @@ static int agp_get_key(void) return -1; } -void agp_flush_chipset(struct agp_bridge_data *bridge) -{ - if (bridge->driver->chipset_flush) - bridge->driver->chipset_flush(bridge); -} -EXPORT_SYMBOL(agp_flush_chipset); - /* * Use kmalloc if possible for the page list. Otherwise fall back to * vmalloc. This speeds things up and also saves memory for small AGP diff --git a/drivers/char/agp/intel-gtt.c b/drivers/char/agp/intel-gtt.c index 8e2e208c925b..1603e4f8ae73 100644 --- a/drivers/char/agp/intel-gtt.c +++ b/drivers/char/agp/intel-gtt.c @@ -984,11 +984,6 @@ static int intel_fake_agp_remove_entries(struct agp_memory *mem, return 0; } -static void intel_fake_agp_chipset_flush(struct agp_bridge_data *bridge) -{ - intel_private.driver->chipset_flush(); -} - static struct agp_memory *intel_fake_agp_alloc_by_type(size_t pg_count, int type) { @@ -1222,7 +1217,6 @@ static const struct agp_bridge_driver intel_fake_agp_driver = { .agp_alloc_pages = agp_generic_alloc_pages, .agp_destroy_page = agp_generic_destroy_page, .agp_destroy_pages = agp_generic_destroy_pages, - .chipset_flush = intel_fake_agp_chipset_flush, }; static const struct intel_gtt_driver i81x_gtt_driver = { diff --git a/include/linux/agp_backend.h b/include/linux/agp_backend.h index 09ea4a1e9505..a479b4885d25 100644 --- a/include/linux/agp_backend.h +++ b/include/linux/agp_backend.h @@ -106,6 +106,5 @@ extern int agp_rebind_memory(void); extern void agp_enable(struct agp_bridge_data *, u32); extern struct agp_bridge_data *agp_backend_acquire(struct pci_dev *); extern void agp_backend_release(struct agp_bridge_data *); -extern void agp_flush_chipset(struct agp_bridge_data *); #endif /* _AGP_BACKEND_H */ -- cgit v1.2.3 From 93a37f20eabeea4039130527b07453038c07f471 Mon Sep 17 00:00:00 2001 From: Daniel Vetter Date: Fri, 5 Nov 2010 20:24:53 +0100 Subject: drm/i915: track objects in the gtt This is required to restore gtt mappings on resume when agp is gone. The right way to do this would be to make sturct drm_mm_node embeddable and use the allocation list maintained by the drm memory manager. But that's a bigger project. Getting rid of the per bo agp_mem will save more memory than this wastes, anyway. Signed-off-by: Daniel Vetter Signed-off-by: Chris Wilson --- drivers/gpu/drm/i915/i915_drv.h | 4 ++++ drivers/gpu/drm/i915/i915_gem.c | 4 ++++ 2 files changed, 8 insertions(+) diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index b62ff5d6fa7e..67ec046aeb90 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h @@ -547,6 +547,9 @@ typedef struct drm_i915_private { struct drm_mm stolen; /** Memory allocator for GTT */ struct drm_mm gtt_space; + /** List of all objects in gtt_space. Used to restore gtt + * mappings on resume */ + struct list_head gtt_list; /** End of mappable part of GTT */ unsigned long gtt_mappable_end; @@ -714,6 +717,7 @@ struct drm_i915_gem_object { /** Current space allocated to this object in the GTT, if any. */ struct drm_mm_node *gtt_space; + struct list_head gtt_list; /** This object's place on the active/flushing/inactive lists */ struct list_head ring_list; diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index b663d2da1db3..07e1b25f4a8a 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c @@ -95,6 +95,7 @@ static void i915_gem_info_add_gtt(struct drm_i915_private *dev_priv, min_t(size_t, obj->gtt_space->size, dev_priv->mm.gtt_mappable_end - obj->gtt_offset); } + list_add_tail(&obj->gtt_list, &dev_priv->mm.gtt_list); } static void i915_gem_info_remove_gtt(struct drm_i915_private *dev_priv, @@ -107,6 +108,7 @@ static void i915_gem_info_remove_gtt(struct drm_i915_private *dev_priv, min_t(size_t, obj->gtt_space->size, dev_priv->mm.gtt_mappable_end - obj->gtt_offset); } + list_del_init(&obj->gtt_list); } /** @@ -4604,6 +4606,7 @@ struct drm_gem_object * i915_gem_alloc_object(struct drm_device *dev, obj->base.driver_private = NULL; obj->fence_reg = I915_FENCE_REG_NONE; INIT_LIST_HEAD(&obj->mm_list); + INIT_LIST_HEAD(&obj->gtt_list); INIT_LIST_HEAD(&obj->ring_list); INIT_LIST_HEAD(&obj->gpu_write_list); obj->madv = I915_MADV_WILLNEED; @@ -4916,6 +4919,7 @@ i915_gem_load(struct drm_device *dev) INIT_LIST_HEAD(&dev_priv->mm.pinned_list); INIT_LIST_HEAD(&dev_priv->mm.fence_list); INIT_LIST_HEAD(&dev_priv->mm.deferred_free_list); + INIT_LIST_HEAD(&dev_priv->mm.gtt_list); init_ring_lists(&dev_priv->render_ring); init_ring_lists(&dev_priv->bsd_ring); init_ring_lists(&dev_priv->blt_ring); -- cgit v1.2.3 From 76aaf22016caa7764f40e792aaca7b4918312b22 Mon Sep 17 00:00:00 2001 From: Daniel Vetter Date: Fri, 5 Nov 2010 22:23:30 +0100 Subject: drm/i915: restore gtt on resume in the drm instead of in intel-gtt.ko This still uses the agp functions to actually reinstate the mappings (with a gross hack to make agp cooperate), but it wires everything up correctly for the switchover. The call to agp_rebind_memory can be dropped because all non-kms drivers do all their rebinding on EnterVT. v2: Be more paranoid and flush the chipset cache after restoring gtt mappings. Signed-off-by: Daniel Vetter Signed-off-by: Chris Wilson --- drivers/char/agp/intel-agp.c | 5 ---- drivers/gpu/drm/i915/Makefile | 1 + drivers/gpu/drm/i915/i915_drv.c | 1 + drivers/gpu/drm/i915/i915_drv.h | 3 +++ drivers/gpu/drm/i915/i915_gem_gtt.c | 49 +++++++++++++++++++++++++++++++++++++ 5 files changed, 54 insertions(+), 5 deletions(-) create mode 100644 drivers/gpu/drm/i915/i915_gem_gtt.c diff --git a/drivers/char/agp/intel-agp.c b/drivers/char/agp/intel-agp.c index e72f49d52202..07e9796fead7 100644 --- a/drivers/char/agp/intel-agp.c +++ b/drivers/char/agp/intel-agp.c @@ -828,14 +828,9 @@ static void __devexit agp_intel_remove(struct pci_dev *pdev) static int agp_intel_resume(struct pci_dev *pdev) { struct agp_bridge_data *bridge = pci_get_drvdata(pdev); - int ret_val; bridge->driver->configure(); - ret_val = agp_rebind_memory(); - if (ret_val != 0) - return ret_val; - return 0; } #endif diff --git a/drivers/gpu/drm/i915/Makefile b/drivers/gpu/drm/i915/Makefile index fdc833d5cc7b..b3cdb4fe46e0 100644 --- a/drivers/gpu/drm/i915/Makefile +++ b/drivers/gpu/drm/i915/Makefile @@ -8,6 +8,7 @@ i915-y := i915_drv.o i915_dma.o i915_irq.o i915_mem.o \ i915_suspend.o \ i915_gem.o \ i915_gem_debug.o \ + i915_gem_gtt.o \ i915_gem_evict.o \ i915_gem_tiling.o \ i915_trace_points.o \ diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c index 8c5541950b8d..1a15b7886b8c 100644 --- a/drivers/gpu/drm/i915/i915_drv.c +++ b/drivers/gpu/drm/i915/i915_drv.c @@ -310,6 +310,7 @@ static int i915_drm_thaw(struct drm_device *dev) /* KMS EnterVT equivalent */ if (drm_core_check_feature(dev, DRIVER_MODESET)) { mutex_lock(&dev->struct_mutex); + i915_gem_restore_gtt_mappings(dev); dev_priv->mm.suspended = 0; error = i915_gem_init_ringbuffer(dev); diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index 67ec046aeb90..b6b5d08c9956 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h @@ -1141,6 +1141,9 @@ void i915_gem_detach_phys_object(struct drm_device *dev, void i915_gem_free_all_phys_object(struct drm_device *dev); void i915_gem_release(struct drm_device * dev, struct drm_file *file_priv); +/* i915_gem_gtt.c */ +void i915_gem_restore_gtt_mappings(struct drm_device *dev); + /* i915_gem_evict.c */ int i915_gem_evict_something(struct drm_device *dev, int min_size, unsigned alignment, bool mappable); diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c new file mode 100644 index 000000000000..5f21d0a962c4 --- /dev/null +++ b/drivers/gpu/drm/i915/i915_gem_gtt.c @@ -0,0 +1,49 @@ +/* + * Copyright © 2010 Daniel Vetter + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice (including the next + * paragraph) shall be included in all copies or substantial portions of the + * Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS + * IN THE SOFTWARE. + * + */ + +#include "drmP.h" +#include "drm.h" +#include "i915_drm.h" +#include "i915_drv.h" +#include "i915_trace.h" +#include "intel_drv.h" + +void i915_gem_restore_gtt_mappings(struct drm_device *dev) +{ + struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_gem_object *obj_priv; + int ret; + + list_for_each_entry(obj_priv, + &dev_priv->mm.gtt_list, + gtt_list) { + /* Hack to force agp to reinsert buffer object. */ + obj_priv->agp_mem->is_bound = false; + ret = agp_bind_memory(obj_priv->agp_mem, obj_priv->gtt_space->start / PAGE_SIZE); + BUG_ON(ret != 0); + } + + /* Be paranoid and flush the chipset cache. */ + intel_gtt_chipset_flush(); +} -- cgit v1.2.3 From cb16b67b5cb33b7d6732e0c416d29d933eea13ce Mon Sep 17 00:00:00 2001 From: Daniel Vetter Date: Fri, 5 Nov 2010 22:27:10 +0100 Subject: agp: kill agp_rebind_memory Its only user, intel-gtt.c is now gone. Cc: Dave Airlie Signed-off-by: Daniel Vetter Signed-off-by: Chris Wilson --- drivers/char/agp/generic.c | 20 -------------------- include/linux/agp_backend.h | 1 - 2 files changed, 21 deletions(-) diff --git a/drivers/char/agp/generic.c b/drivers/char/agp/generic.c index 78bc8de0f234..012cba0d6d96 100644 --- a/drivers/char/agp/generic.c +++ b/drivers/char/agp/generic.c @@ -480,26 +480,6 @@ int agp_unbind_memory(struct agp_memory *curr) } EXPORT_SYMBOL(agp_unbind_memory); -/** - * agp_rebind_emmory - Rewrite the entire GATT, useful on resume - */ -int agp_rebind_memory(void) -{ - struct agp_memory *curr; - int ret_val = 0; - - spin_lock(&agp_bridge->mapped_lock); - list_for_each_entry(curr, &agp_bridge->mapped_list, mapped_list) { - ret_val = curr->bridge->driver->insert_memory(curr, - curr->pg_start, - curr->type); - if (ret_val != 0) - break; - } - spin_unlock(&agp_bridge->mapped_lock); - return ret_val; -} -EXPORT_SYMBOL(agp_rebind_memory); /* End - Routines for handling swapping of agp_memory into the GATT */ diff --git a/include/linux/agp_backend.h b/include/linux/agp_backend.h index a479b4885d25..eaf6cd75a1b1 100644 --- a/include/linux/agp_backend.h +++ b/include/linux/agp_backend.h @@ -102,7 +102,6 @@ extern struct agp_memory *agp_allocate_memory(struct agp_bridge_data *, size_t, extern int agp_copy_info(struct agp_bridge_data *, struct agp_kern_info *); extern int agp_bind_memory(struct agp_memory *, off_t); extern int agp_unbind_memory(struct agp_memory *); -extern int agp_rebind_memory(void); extern void agp_enable(struct agp_bridge_data *, u32); extern struct agp_bridge_data *agp_backend_acquire(struct pci_dev *); extern void agp_backend_release(struct agp_bridge_data *); -- cgit v1.2.3 From 7c2e6fdf452cddeff6a8ee5156edba39e53246fc Mon Sep 17 00:00:00 2001 From: Daniel Vetter Date: Sat, 6 Nov 2010 10:10:47 +0100 Subject: drm/i915: move gtt handling to i915_gem_gtt.c No more drm_*_agp in i915_gem.c! Signed-off-by: Daniel Vetter Signed-off-by: Chris Wilson --- drivers/gpu/drm/i915/i915_drv.h | 2 ++ drivers/gpu/drm/i915/i915_gem.c | 14 +++----------- drivers/gpu/drm/i915/i915_gem_gtt.c | 28 ++++++++++++++++++++++++++++ 3 files changed, 33 insertions(+), 11 deletions(-) diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index b6b5d08c9956..b3bfab93e14c 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h @@ -1143,6 +1143,8 @@ void i915_gem_release(struct drm_device * dev, struct drm_file *file_priv); /* i915_gem_gtt.c */ void i915_gem_restore_gtt_mappings(struct drm_device *dev); +int i915_gem_gtt_bind_object(struct drm_gem_object *obj); +void i915_gem_gtt_unbind_object(struct drm_gem_object *obj); /* i915_gem_evict.c */ int i915_gem_evict_something(struct drm_device *dev, int min_size, diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index 07e1b25f4a8a..3cac366b3053 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c @@ -2288,8 +2288,7 @@ i915_gem_object_unbind(struct drm_gem_object *obj) if (obj_priv->fence_reg != I915_FENCE_REG_NONE) i915_gem_clear_fence_reg(obj); - drm_unbind_agp(obj_priv->agp_mem); - drm_free_agp(obj_priv->agp_mem, obj->size / PAGE_SIZE); + i915_gem_gtt_unbind_object(obj); i915_gem_object_put_pages_gtt(obj); @@ -2808,15 +2807,8 @@ i915_gem_object_bind_to_gtt(struct drm_gem_object *obj, return ret; } - /* Create an AGP memory structure pointing at our pages, and bind it - * into the GTT. - */ - obj_priv->agp_mem = drm_agp_bind_pages(dev, - obj_priv->pages, - obj->size >> PAGE_SHIFT, - obj_priv->gtt_space->start, - obj_priv->agp_type); - if (obj_priv->agp_mem == NULL) { + ret = i915_gem_gtt_bind_object(obj); + if (ret) { i915_gem_object_put_pages_gtt(obj); drm_mm_put_block(obj_priv->gtt_space); obj_priv->gtt_space = NULL; diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c index 5f21d0a962c4..f78c15f9e7f2 100644 --- a/drivers/gpu/drm/i915/i915_gem_gtt.c +++ b/drivers/gpu/drm/i915/i915_gem_gtt.c @@ -47,3 +47,31 @@ void i915_gem_restore_gtt_mappings(struct drm_device *dev) /* Be paranoid and flush the chipset cache. */ intel_gtt_chipset_flush(); } + +int i915_gem_gtt_bind_object(struct drm_gem_object *obj) +{ + struct drm_device *dev = obj->dev; + struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); + + /* Create an AGP memory structure pointing at our pages, and bind it + * into the GTT. + */ + obj_priv->agp_mem = drm_agp_bind_pages(dev, + obj_priv->pages, + obj->size >> PAGE_SHIFT, + obj_priv->gtt_space->start, + obj_priv->agp_type); + + if (obj_priv->agp_mem) + return 0; + else + return -ENOMEM; +} + +void i915_gem_gtt_unbind_object(struct drm_gem_object *obj) +{ + struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); + + drm_unbind_agp(obj_priv->agp_mem); + drm_free_agp(obj_priv->agp_mem, obj->size / PAGE_SIZE); +} -- cgit v1.2.3 From 4080775b60cc26044e7c4aba5e76e5041b0d7004 Mon Sep 17 00:00:00 2001 From: Daniel Vetter Date: Sat, 6 Nov 2010 11:18:58 +0100 Subject: intel-gtt: export api for drm/i915 Just some minor shuffling to get rid of any agp traces in the exported functions. Signed-off-by: Daniel Vetter Signed-off-by: Chris Wilson --- drivers/char/agp/intel-gtt.c | 120 ++++++++++++++++++++++++------------------- include/drm/intel-gtt.h | 12 +++++ 2 files changed, 80 insertions(+), 52 deletions(-) diff --git a/drivers/char/agp/intel-gtt.c b/drivers/char/agp/intel-gtt.c index 1603e4f8ae73..5a2b7360f5be 100644 --- a/drivers/char/agp/intel-gtt.c +++ b/drivers/char/agp/intel-gtt.c @@ -87,41 +87,29 @@ static struct _intel_private { #define IS_IRONLAKE intel_private.driver->is_ironlake #define HAS_PGTBL_EN intel_private.driver->has_pgtbl_enable -static void intel_agp_free_sglist(struct agp_memory *mem) -{ - struct sg_table st; - - st.sgl = mem->sg_list; - st.orig_nents = st.nents = mem->page_count; - - sg_free_table(&st); - - mem->sg_list = NULL; - mem->num_sg = 0; -} - -static int intel_agp_map_memory(struct agp_memory *mem) +int intel_gtt_map_memory(struct page **pages, unsigned int num_entries, + struct scatterlist **sg_list, int *num_sg) { struct sg_table st; struct scatterlist *sg; int i; - if (mem->sg_list) + if (*sg_list) return 0; /* already mapped (for e.g. resume */ - DBG("try mapping %lu pages\n", (unsigned long)mem->page_count); + DBG("try mapping %lu pages\n", (unsigned long)num_entries); - if (sg_alloc_table(&st, mem->page_count, GFP_KERNEL)) + if (sg_alloc_table(&st, num_entries, GFP_KERNEL)) goto err; - mem->sg_list = sg = st.sgl; + *sg_list = sg = st.sgl; - for (i = 0 ; i < mem->page_count; i++, sg = sg_next(sg)) - sg_set_page(sg, mem->pages[i], PAGE_SIZE, 0); + for (i = 0 ; i < num_entries; i++, sg = sg_next(sg)) + sg_set_page(sg, pages[i], PAGE_SIZE, 0); - mem->num_sg = pci_map_sg(intel_private.pcidev, mem->sg_list, - mem->page_count, PCI_DMA_BIDIRECTIONAL); - if (unlikely(!mem->num_sg)) + *num_sg = pci_map_sg(intel_private.pcidev, *sg_list, + num_entries, PCI_DMA_BIDIRECTIONAL); + if (unlikely(!*num_sg)) goto err; return 0; @@ -130,15 +118,22 @@ err: sg_free_table(&st); return -ENOMEM; } +EXPORT_SYMBOL(intel_gtt_map_memory); -static void intel_agp_unmap_memory(struct agp_memory *mem) +void intel_gtt_unmap_memory(struct scatterlist *sg_list, int num_sg) { + struct sg_table st; DBG("try unmapping %lu pages\n", (unsigned long)mem->page_count); - pci_unmap_sg(intel_private.pcidev, mem->sg_list, - mem->page_count, PCI_DMA_BIDIRECTIONAL); - intel_agp_free_sglist(mem); + pci_unmap_sg(intel_private.pcidev, sg_list, + num_sg, PCI_DMA_BIDIRECTIONAL); + + st.sgl = sg_list; + st.orig_nents = st.nents = num_sg; + + sg_free_table(&st); } +EXPORT_SYMBOL(intel_gtt_unmap_memory); static void intel_fake_agp_enable(struct agp_bridge_data *bridge, u32 mode) { @@ -307,7 +302,7 @@ static int intel_gtt_setup_scratch_page(void) get_page(page); set_pages_uc(page, 1); - if (USE_PCI_DMA_API && INTEL_GTT_GEN > 2) { + if (intel_private.base.needs_dmar) { dma_addr = pci_map_page(intel_private.pcidev, page, 0, PAGE_SIZE, PCI_DMA_BIDIRECTIONAL); if (pci_dma_mapping_error(intel_private.pcidev, dma_addr)) @@ -699,6 +694,8 @@ static int intel_gtt_init(void) return ret; } + intel_private.base.needs_dmar = USE_PCI_DMA_API && INTEL_GTT_GEN > 2; + return 0; } @@ -892,10 +889,10 @@ static bool i830_check_flags(unsigned int flags) return false; } -static void intel_gtt_insert_sg_entries(struct scatterlist *sg_list, - unsigned int sg_len, - unsigned int pg_start, - unsigned int flags) +void intel_gtt_insert_sg_entries(struct scatterlist *sg_list, + unsigned int sg_len, + unsigned int pg_start, + unsigned int flags) { struct scatterlist *sg; unsigned int len, m; @@ -916,11 +913,25 @@ static void intel_gtt_insert_sg_entries(struct scatterlist *sg_list, } readl(intel_private.gtt+j-1); } +EXPORT_SYMBOL(intel_gtt_insert_sg_entries); + +void intel_gtt_insert_pages(unsigned int first_entry, unsigned int num_entries, + struct page **pages, unsigned int flags) +{ + int i, j; + + for (i = 0, j = first_entry; i < num_entries; i++, j++) { + dma_addr_t addr = page_to_phys(pages[i]); + intel_private.driver->write_entry(addr, + j, flags); + } + readl(intel_private.gtt+j-1); +} +EXPORT_SYMBOL(intel_gtt_insert_pages); static int intel_fake_agp_insert_entries(struct agp_memory *mem, off_t pg_start, int type) { - int i, j; int ret = -EINVAL; if (INTEL_GTT_GEN == 1 && type == AGP_DCACHE_MEMORY) @@ -941,21 +952,17 @@ static int intel_fake_agp_insert_entries(struct agp_memory *mem, if (!mem->is_flushed) global_cache_flush(); - if (USE_PCI_DMA_API && INTEL_GTT_GEN > 2) { - ret = intel_agp_map_memory(mem); + if (intel_private.base.needs_dmar) { + ret = intel_gtt_map_memory(mem->pages, mem->page_count, + &mem->sg_list, &mem->num_sg); if (ret != 0) return ret; intel_gtt_insert_sg_entries(mem->sg_list, mem->num_sg, pg_start, type); - } else { - for (i = 0, j = pg_start; i < mem->page_count; i++, j++) { - dma_addr_t addr = page_to_phys(mem->pages[i]); - intel_private.driver->write_entry(addr, - j, type); - } - readl(intel_private.gtt+j-1); - } + } else + intel_gtt_insert_pages(pg_start, mem->page_count, mem->pages, + type); out: ret = 0; @@ -964,22 +971,31 @@ out_err: return ret; } +void intel_gtt_clear_range(unsigned int first_entry, unsigned int num_entries) +{ + unsigned int i; + + for (i = first_entry; i < (first_entry + num_entries); i++) { + intel_private.driver->write_entry(intel_private.scratch_page_dma, + i, 0); + } + readl(intel_private.gtt+i-1); +} +EXPORT_SYMBOL(intel_gtt_clear_range); + static int intel_fake_agp_remove_entries(struct agp_memory *mem, off_t pg_start, int type) { - int i; - if (mem->page_count == 0) return 0; - if (USE_PCI_DMA_API && INTEL_GTT_GEN > 2) - intel_agp_unmap_memory(mem); - - for (i = pg_start; i < (mem->page_count + pg_start); i++) { - intel_private.driver->write_entry(intel_private.scratch_page_dma, - i, 0); + if (intel_private.base.needs_dmar) { + intel_gtt_unmap_memory(mem->sg_list, mem->num_sg); + mem->sg_list = NULL; + mem->num_sg = 0; } - readl(intel_private.gtt+i-1); + + intel_gtt_clear_range(pg_start, mem->page_count); return 0; } diff --git a/include/drm/intel-gtt.h b/include/drm/intel-gtt.h index c35817a11690..9e343c0998b4 100644 --- a/include/drm/intel-gtt.h +++ b/include/drm/intel-gtt.h @@ -11,9 +11,21 @@ const struct intel_gtt { /* Part of the gtt that is mappable by the cpu, for those chips where * this is not the full gtt. */ unsigned int gtt_mappable_entries; + /* Whether i915 needs to use the dmar apis or not. */ + unsigned int needs_dmar : 1; } *intel_gtt_get(void); void intel_gtt_chipset_flush(void); +void intel_gtt_unmap_memory(struct scatterlist *sg_list, int num_sg); +void intel_gtt_clear_range(unsigned int first_entry, unsigned int num_entries); +int intel_gtt_map_memory(struct page **pages, unsigned int num_entries, + struct scatterlist **sg_list, int *num_sg); +void intel_gtt_insert_sg_entries(struct scatterlist *sg_list, + unsigned int sg_len, + unsigned int pg_start, + unsigned int flags); +void intel_gtt_insert_pages(unsigned int first_entry, unsigned int num_entries, + struct page **pages, unsigned int flags); /* Special gtt memory types */ #define AGP_DCACHE_MEMORY 1 -- cgit v1.2.3 From 185cbcb304ba4dee55e39593fd86dcd7813f62ec Mon Sep 17 00:00:00 2001 From: Daniel Vetter Date: Sat, 6 Nov 2010 12:12:35 +0100 Subject: drm/i915: no more agp for gem Signed-off-by: Daniel Vetter Signed-off-by: Chris Wilson --- drivers/gpu/drm/i915/i915_drv.h | 9 ++++-- drivers/gpu/drm/i915/i915_gem_gtt.c | 63 ++++++++++++++++++++++++++----------- 2 files changed, 50 insertions(+), 22 deletions(-) diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index b3bfab93e14c..dc371d987aa7 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h @@ -791,11 +791,14 @@ struct drm_i915_gem_object { unsigned int fault_mappable : 1; unsigned int pin_mappable : 1; - /** AGP memory structure for our GTT binding. */ - DRM_AGP_MEM *agp_mem; - struct page **pages; + /** + * DMAR support + */ + struct scatterlist *sg_list; + int num_sg; + /** * Current offset of the object in GTT space. * diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c index f78c15f9e7f2..0b34a1aee9b6 100644 --- a/drivers/gpu/drm/i915/i915_gem_gtt.c +++ b/drivers/gpu/drm/i915/i915_gem_gtt.c @@ -33,15 +33,24 @@ void i915_gem_restore_gtt_mappings(struct drm_device *dev) { struct drm_i915_private *dev_priv = dev->dev_private; struct drm_i915_gem_object *obj_priv; - int ret; list_for_each_entry(obj_priv, &dev_priv->mm.gtt_list, gtt_list) { - /* Hack to force agp to reinsert buffer object. */ - obj_priv->agp_mem->is_bound = false; - ret = agp_bind_memory(obj_priv->agp_mem, obj_priv->gtt_space->start / PAGE_SIZE); - BUG_ON(ret != 0); + if (dev_priv->mm.gtt->needs_dmar) { + BUG_ON(!obj_priv->sg_list); + + intel_gtt_insert_sg_entries(obj_priv->sg_list, + obj_priv->num_sg, + obj_priv->gtt_space->start + >> PAGE_SHIFT, + obj_priv->agp_type); + } else + intel_gtt_insert_pages(obj_priv->gtt_space->start + >> PAGE_SHIFT, + obj_priv->base.size >> PAGE_SHIFT, + obj_priv->pages, + obj_priv->agp_type); } /* Be paranoid and flush the chipset cache. */ @@ -51,27 +60,43 @@ void i915_gem_restore_gtt_mappings(struct drm_device *dev) int i915_gem_gtt_bind_object(struct drm_gem_object *obj) { struct drm_device *dev = obj->dev; + struct drm_i915_private *dev_priv = dev->dev_private; struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); + int ret; - /* Create an AGP memory structure pointing at our pages, and bind it - * into the GTT. - */ - obj_priv->agp_mem = drm_agp_bind_pages(dev, - obj_priv->pages, - obj->size >> PAGE_SHIFT, - obj_priv->gtt_space->start, - obj_priv->agp_type); + if (dev_priv->mm.gtt->needs_dmar) { + ret = intel_gtt_map_memory(obj_priv->pages, + obj->size >> PAGE_SHIFT, + &obj_priv->sg_list, + &obj_priv->num_sg); + if (ret != 0) + return ret; + + intel_gtt_insert_sg_entries(obj_priv->sg_list, obj_priv->num_sg, + obj_priv->gtt_space->start + >> PAGE_SHIFT, + obj_priv->agp_type); + } else + intel_gtt_insert_pages(obj_priv->gtt_space->start >> PAGE_SHIFT, + obj->size >> PAGE_SHIFT, + obj_priv->pages, + obj_priv->agp_type); - if (obj_priv->agp_mem) - return 0; - else - return -ENOMEM; + return 0; } void i915_gem_gtt_unbind_object(struct drm_gem_object *obj) { + struct drm_device *dev = obj->dev; + struct drm_i915_private *dev_priv = dev->dev_private; struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); - drm_unbind_agp(obj_priv->agp_mem); - drm_free_agp(obj_priv->agp_mem, obj->size / PAGE_SIZE); + if (dev_priv->mm.gtt->needs_dmar) { + intel_gtt_unmap_memory(obj_priv->sg_list, obj_priv->num_sg); + obj_priv->sg_list = NULL; + obj_priv->num_sg = 0; + } + + intel_gtt_clear_range(obj_priv->gtt_space->start >> PAGE_SHIFT, + obj->size >> PAGE_SHIFT); } -- cgit v1.2.3 From 05394f3975dceb107a5e1393e2244946e5b43660 Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Mon, 8 Nov 2010 19:18:58 +0000 Subject: drm/i915: Use drm_i915_gem_object as the preferred type A glorified s/obj_priv/obj/ with a net reduction of over a 100 lines and many characters! Signed-off-by: Chris Wilson --- drivers/gpu/drm/i915/i915_debugfs.c | 64 +- drivers/gpu/drm/i915/i915_dma.c | 2 +- drivers/gpu/drm/i915/i915_drv.h | 84 +- drivers/gpu/drm/i915/i915_gem.c | 1268 +++++++++++++++---------------- drivers/gpu/drm/i915/i915_gem_debug.c | 23 +- drivers/gpu/drm/i915/i915_gem_evict.c | 67 +- drivers/gpu/drm/i915/i915_gem_gtt.c | 68 +- drivers/gpu/drm/i915/i915_gem_tiling.c | 104 ++- drivers/gpu/drm/i915/i915_irq.c | 67 +- drivers/gpu/drm/i915/i915_trace.h | 41 +- drivers/gpu/drm/i915/intel_display.c | 242 +++--- drivers/gpu/drm/i915/intel_drv.h | 15 +- drivers/gpu/drm/i915/intel_fb.c | 25 +- drivers/gpu/drm/i915/intel_overlay.c | 48 +- drivers/gpu/drm/i915/intel_ringbuffer.c | 54 +- drivers/gpu/drm/i915/intel_ringbuffer.h | 4 +- 16 files changed, 1019 insertions(+), 1157 deletions(-) diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c index 4fe49e0228ef..1e8cd74d18d5 100644 --- a/drivers/gpu/drm/i915/i915_debugfs.c +++ b/drivers/gpu/drm/i915/i915_debugfs.c @@ -87,19 +87,19 @@ static int i915_capabilities(struct seq_file *m, void *data) return 0; } -static const char *get_pin_flag(struct drm_i915_gem_object *obj_priv) +static const char *get_pin_flag(struct drm_i915_gem_object *obj) { - if (obj_priv->user_pin_count > 0) + if (obj->user_pin_count > 0) return "P"; - else if (obj_priv->pin_count > 0) + else if (obj->pin_count > 0) return "p"; else return " "; } -static const char *get_tiling_flag(struct drm_i915_gem_object *obj_priv) +static const char *get_tiling_flag(struct drm_i915_gem_object *obj) { - switch (obj_priv->tiling_mode) { + switch (obj->tiling_mode) { default: case I915_TILING_NONE: return " "; case I915_TILING_X: return "X"; @@ -140,7 +140,7 @@ static int i915_gem_object_list_info(struct seq_file *m, void *data) struct list_head *head; struct drm_device *dev = node->minor->dev; drm_i915_private_t *dev_priv = dev->dev_private; - struct drm_i915_gem_object *obj_priv; + struct drm_i915_gem_object *obj; size_t total_obj_size, total_gtt_size; int count, ret; @@ -175,12 +175,12 @@ static int i915_gem_object_list_info(struct seq_file *m, void *data) } total_obj_size = total_gtt_size = count = 0; - list_for_each_entry(obj_priv, head, mm_list) { + list_for_each_entry(obj, head, mm_list) { seq_printf(m, " "); - describe_obj(m, obj_priv); + describe_obj(m, obj); seq_printf(m, "\n"); - total_obj_size += obj_priv->base.size; - total_gtt_size += obj_priv->gtt_space->size; + total_obj_size += obj->base.size; + total_gtt_size += obj->gtt_space->size; count++; } mutex_unlock(&dev->struct_mutex); @@ -251,14 +251,14 @@ static int i915_gem_pageflip_info(struct seq_file *m, void *data) seq_printf(m, "%d prepares\n", work->pending); if (work->old_fb_obj) { - struct drm_i915_gem_object *obj_priv = to_intel_bo(work->old_fb_obj); - if(obj_priv) - seq_printf(m, "Old framebuffer gtt_offset 0x%08x\n", obj_priv->gtt_offset ); + struct drm_i915_gem_object *obj = work->old_fb_obj; + if (obj) + seq_printf(m, "Old framebuffer gtt_offset 0x%08x\n", obj->gtt_offset); } if (work->pending_flip_obj) { - struct drm_i915_gem_object *obj_priv = to_intel_bo(work->pending_flip_obj); - if(obj_priv) - seq_printf(m, "New framebuffer gtt_offset 0x%08x\n", obj_priv->gtt_offset ); + struct drm_i915_gem_object *obj = work->pending_flip_obj; + if (obj) + seq_printf(m, "New framebuffer gtt_offset 0x%08x\n", obj->gtt_offset); } } spin_unlock_irqrestore(&dev->event_lock, flags); @@ -421,17 +421,17 @@ static int i915_gem_fence_regs_info(struct seq_file *m, void *data) seq_printf(m, "Reserved fences = %d\n", dev_priv->fence_reg_start); seq_printf(m, "Total fences = %d\n", dev_priv->num_fence_regs); for (i = 0; i < dev_priv->num_fence_regs; i++) { - struct drm_gem_object *obj = dev_priv->fence_regs[i].obj; + struct drm_i915_gem_object *obj = dev_priv->fence_regs[i].obj; seq_printf(m, "Fenced object[%2d] = ", i); if (obj == NULL) seq_printf(m, "unused"); else - describe_obj(m, to_intel_bo(obj)); + describe_obj(m, obj); seq_printf(m, "\n"); } - mutex_unlock(&dev->struct_mutex); + mutex_unlock(&dev->struct_mutex); return 0; } @@ -465,14 +465,14 @@ static int i915_hws_info(struct seq_file *m, void *data) static void i915_dump_object(struct seq_file *m, struct io_mapping *mapping, - struct drm_i915_gem_object *obj_priv) + struct drm_i915_gem_object *obj) { int page, page_count, i; - page_count = obj_priv->base.size / PAGE_SIZE; + page_count = obj->base.size / PAGE_SIZE; for (page = 0; page < page_count; page++) { u32 *mem = io_mapping_map_wc(mapping, - obj_priv->gtt_offset + page * PAGE_SIZE); + obj->gtt_offset + page * PAGE_SIZE); for (i = 0; i < PAGE_SIZE; i += 4) seq_printf(m, "%08x : %08x\n", i, mem[i / 4]); io_mapping_unmap(mem); @@ -484,25 +484,21 @@ static int i915_batchbuffer_info(struct seq_file *m, void *data) struct drm_info_node *node = (struct drm_info_node *) m->private; struct drm_device *dev = node->minor->dev; drm_i915_private_t *dev_priv = dev->dev_private; - struct drm_gem_object *obj; - struct drm_i915_gem_object *obj_priv; + struct drm_i915_gem_object *obj; int ret; ret = mutex_lock_interruptible(&dev->struct_mutex); if (ret) return ret; - list_for_each_entry(obj_priv, &dev_priv->mm.active_list, mm_list) { - obj = &obj_priv->base; - if (obj->read_domains & I915_GEM_DOMAIN_COMMAND) { - seq_printf(m, "--- gtt_offset = 0x%08x\n", - obj_priv->gtt_offset); - i915_dump_object(m, dev_priv->mm.gtt_mapping, obj_priv); + list_for_each_entry(obj, &dev_priv->mm.active_list, mm_list) { + if (obj->base.read_domains & I915_GEM_DOMAIN_COMMAND) { + seq_printf(m, "--- gtt_offset = 0x%08x\n", obj->gtt_offset); + i915_dump_object(m, dev_priv->mm.gtt_mapping, obj); } } mutex_unlock(&dev->struct_mutex); - return 0; } @@ -525,7 +521,7 @@ static int i915_ringbuffer_data(struct seq_file *m, void *data) if (ret) return ret; - if (!ring->gem_object) { + if (!ring->obj) { seq_printf(m, "No ringbuffer setup\n"); } else { u8 *virt = ring->virtual_start; @@ -983,7 +979,7 @@ static int i915_gem_framebuffer_info(struct seq_file *m, void *data) fb->base.height, fb->base.depth, fb->base.bits_per_pixel); - describe_obj(m, to_intel_bo(fb->obj)); + describe_obj(m, fb->obj); seq_printf(m, "\n"); list_for_each_entry(fb, &dev->mode_config.fb_list, base.head) { @@ -995,7 +991,7 @@ static int i915_gem_framebuffer_info(struct seq_file *m, void *data) fb->base.height, fb->base.depth, fb->base.bits_per_pixel); - describe_obj(m, to_intel_bo(fb->obj)); + describe_obj(m, fb->obj); seq_printf(m, "\n"); } diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c index 7084de7c4c55..7960fd63ecb1 100644 --- a/drivers/gpu/drm/i915/i915_dma.c +++ b/drivers/gpu/drm/i915/i915_dma.c @@ -157,7 +157,7 @@ static int i915_initialize(struct drm_device * dev, drm_i915_init_t * init) } if (init->ring_size != 0) { - if (dev_priv->render_ring.gem_object != NULL) { + if (dev_priv->render_ring.obj != NULL) { i915_dma_cleanup(dev); DRM_ERROR("Client tried to initialize ringbuffer in " "GEM mode\n"); diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index dc371d987aa7..22d6388b331f 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h @@ -32,7 +32,6 @@ #include "i915_reg.h" #include "intel_bios.h" -#include "i915_trace.h" #include "intel_ringbuffer.h" #include #include @@ -90,7 +89,7 @@ struct drm_i915_gem_phys_object { int id; struct page **page_list; drm_dma_handle_t *handle; - struct drm_gem_object *cur_obj; + struct drm_i915_gem_object *cur_obj; }; struct mem_block { @@ -125,7 +124,7 @@ struct drm_i915_master_private { #define I915_FENCE_REG_NONE -1 struct drm_i915_fence_reg { - struct drm_gem_object *obj; + struct drm_i915_gem_object *obj; struct list_head lru_list; bool gpu; }; @@ -280,9 +279,9 @@ typedef struct drm_i915_private { uint32_t counter; unsigned int seqno_gfx_addr; drm_local_map_t hws_map; - struct drm_gem_object *seqno_obj; - struct drm_gem_object *pwrctx; - struct drm_gem_object *renderctx; + struct drm_i915_gem_object *seqno_obj; + struct drm_i915_gem_object *pwrctx; + struct drm_i915_gem_object *renderctx; struct resource mch_res; @@ -690,14 +689,14 @@ typedef struct drm_i915_private { u8 fmax; u8 fstart; - u64 last_count1; - unsigned long last_time1; - u64 last_count2; - struct timespec last_time2; - unsigned long gfx_power; - int c_m; - int r_t; - u8 corr; + u64 last_count1; + unsigned long last_time1; + u64 last_count2; + struct timespec last_time2; + unsigned long gfx_power; + int c_m; + int r_t; + u8 corr; spinlock_t *mchdev_lock; enum no_fbc_reason no_fbc_reason; @@ -711,7 +710,6 @@ typedef struct drm_i915_private { struct intel_fbdev *fbdev; } drm_i915_private_t; -/** driver private structure attached to each drm_gem_object */ struct drm_i915_gem_object { struct drm_gem_object base; @@ -918,7 +916,7 @@ enum intel_chip_family { #define HAS_BLT(dev) (INTEL_INFO(dev)->has_blt_ring) #define I915_NEED_GFX_HWS(dev) (INTEL_INFO(dev)->need_gfx_hws) -#define HAS_OVERLAY(dev) (INTEL_INFO(dev)->has_overlay) +#define HAS_OVERLAY(dev) (INTEL_INFO(dev)->has_overlay) #define OVERLAY_NEEDS_PHYSICAL(dev) (INTEL_INFO(dev)->overlay_needs_physical) /* With the 945 and later, Y tiling got adjusted so that it was 32 128-byte @@ -947,6 +945,8 @@ enum intel_chip_family { #define HAS_PCH_CPT(dev) (INTEL_PCH_TYPE(dev) == PCH_CPT) #define HAS_PCH_IBX(dev) (INTEL_PCH_TYPE(dev) == PCH_IBX) +#include "i915_trace.h" + extern struct drm_ioctl_desc i915_ioctls[]; extern int i915_max_ioctl; extern unsigned int i915_fbpercrtc; @@ -1085,14 +1085,15 @@ int i915_gem_get_aperture_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv); void i915_gem_load(struct drm_device *dev); int i915_gem_init_object(struct drm_gem_object *obj); -struct drm_gem_object * i915_gem_alloc_object(struct drm_device *dev, - size_t size); +struct drm_i915_gem_object *i915_gem_alloc_object(struct drm_device *dev, + size_t size); void i915_gem_free_object(struct drm_gem_object *obj); -int i915_gem_object_pin(struct drm_gem_object *obj, uint32_t alignment, +int i915_gem_object_pin(struct drm_i915_gem_object *obj, + uint32_t alignment, bool map_and_fenceable); -void i915_gem_object_unpin(struct drm_gem_object *obj); -int i915_gem_object_unbind(struct drm_gem_object *obj); -void i915_gem_release_mmap(struct drm_gem_object *obj); +void i915_gem_object_unpin(struct drm_i915_gem_object *obj); +int i915_gem_object_unbind(struct drm_i915_gem_object *obj); +void i915_gem_release_mmap(struct drm_i915_gem_object *obj); void i915_gem_lastclose(struct drm_device *dev); /** @@ -1104,14 +1105,14 @@ i915_seqno_passed(uint32_t seq1, uint32_t seq2) return (int32_t)(seq1 - seq2) >= 0; } -int i915_gem_object_get_fence_reg(struct drm_gem_object *obj, +int i915_gem_object_get_fence_reg(struct drm_i915_gem_object *obj, bool interruptible); -int i915_gem_object_put_fence_reg(struct drm_gem_object *obj, +int i915_gem_object_put_fence_reg(struct drm_i915_gem_object *obj, bool interruptible); void i915_gem_retire_requests(struct drm_device *dev); void i915_gem_reset(struct drm_device *dev); -void i915_gem_clflush_object(struct drm_gem_object *obj); -int i915_gem_object_set_domain(struct drm_gem_object *obj, +void i915_gem_clflush_object(struct drm_i915_gem_object *obj); +int i915_gem_object_set_domain(struct drm_i915_gem_object *obj, uint32_t read_domains, uint32_t write_domain); int i915_gem_object_flush_gpu(struct drm_i915_gem_object *obj, @@ -1131,23 +1132,23 @@ int i915_do_wait_request(struct drm_device *dev, bool interruptible, struct intel_ring_buffer *ring); int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf); -int i915_gem_object_set_to_gtt_domain(struct drm_gem_object *obj, +int i915_gem_object_set_to_gtt_domain(struct drm_i915_gem_object *obj, int write); -int i915_gem_object_set_to_display_plane(struct drm_gem_object *obj, +int i915_gem_object_set_to_display_plane(struct drm_i915_gem_object *obj, bool pipelined); int i915_gem_attach_phys_object(struct drm_device *dev, - struct drm_gem_object *obj, + struct drm_i915_gem_object *obj, int id, int align); void i915_gem_detach_phys_object(struct drm_device *dev, - struct drm_gem_object *obj); + struct drm_i915_gem_object *obj); void i915_gem_free_all_phys_object(struct drm_device *dev); -void i915_gem_release(struct drm_device * dev, struct drm_file *file_priv); +void i915_gem_release(struct drm_device *dev, struct drm_file *file); /* i915_gem_gtt.c */ void i915_gem_restore_gtt_mappings(struct drm_device *dev); -int i915_gem_gtt_bind_object(struct drm_gem_object *obj); -void i915_gem_gtt_unbind_object(struct drm_gem_object *obj); +int i915_gem_gtt_bind_object(struct drm_i915_gem_object *obj); +void i915_gem_gtt_unbind_object(struct drm_i915_gem_object *obj); /* i915_gem_evict.c */ int i915_gem_evict_something(struct drm_device *dev, int min_size, @@ -1157,19 +1158,20 @@ int i915_gem_evict_inactive(struct drm_device *dev, bool purgeable_only); /* i915_gem_tiling.c */ void i915_gem_detect_bit_6_swizzle(struct drm_device *dev); -void i915_gem_object_do_bit_17_swizzle(struct drm_gem_object *obj); -void i915_gem_object_save_bit_17_swizzle(struct drm_gem_object *obj); +void i915_gem_object_do_bit_17_swizzle(struct drm_i915_gem_object *obj); +void i915_gem_object_save_bit_17_swizzle(struct drm_i915_gem_object *obj); /* i915_gem_debug.c */ -void i915_gem_dump_object(struct drm_gem_object *obj, int len, +void i915_gem_dump_object(struct drm_i915_gem_object *obj, int len, const char *where, uint32_t mark); #if WATCH_LISTS int i915_verify_lists(struct drm_device *dev); #else #define i915_verify_lists(dev) 0 #endif -void i915_gem_object_check_coherency(struct drm_gem_object *obj, int handle); -void i915_gem_dump_object(struct drm_gem_object *obj, int len, +void i915_gem_object_check_coherency(struct drm_i915_gem_object *obj, + int handle); +void i915_gem_dump_object(struct drm_i915_gem_object *obj, int len, const char *where, uint32_t mark); /* i915_debugfs.c */ @@ -1251,10 +1253,10 @@ extern void intel_display_print_error_state(struct seq_file *m, * In that case, we don't need to do it when GEM is initialized as nobody else * has access to the ring. */ -#define RING_LOCK_TEST_WITH_RETURN(dev, file_priv) do { \ - if (((drm_i915_private_t *)dev->dev_private)->render_ring.gem_object \ +#define RING_LOCK_TEST_WITH_RETURN(dev, file) do { \ + if (((drm_i915_private_t *)dev->dev_private)->render_ring.obj \ == NULL) \ - LOCK_TEST_WITH_RETURN(dev, file_priv); \ + LOCK_TEST_WITH_RETURN(dev, file); \ } while (0) diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index 3cac366b3053..d196895527a6 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c @@ -41,29 +41,30 @@ struct change_domains { uint32_t flush_rings; }; -static uint32_t i915_gem_get_gtt_alignment(struct drm_i915_gem_object *obj_priv); -static uint32_t i915_gem_get_gtt_size(struct drm_i915_gem_object *obj_priv); +static uint32_t i915_gem_get_gtt_alignment(struct drm_i915_gem_object *obj); +static uint32_t i915_gem_get_gtt_size(struct drm_i915_gem_object *obj); -static int i915_gem_object_flush_gpu_write_domain(struct drm_gem_object *obj, +static int i915_gem_object_flush_gpu_write_domain(struct drm_i915_gem_object *obj, bool pipelined); -static void i915_gem_object_flush_gtt_write_domain(struct drm_gem_object *obj); -static void i915_gem_object_flush_cpu_write_domain(struct drm_gem_object *obj); -static int i915_gem_object_set_to_cpu_domain(struct drm_gem_object *obj, +static void i915_gem_object_flush_gtt_write_domain(struct drm_i915_gem_object *obj); +static void i915_gem_object_flush_cpu_write_domain(struct drm_i915_gem_object *obj); +static int i915_gem_object_set_to_cpu_domain(struct drm_i915_gem_object *obj, int write); -static int i915_gem_object_set_cpu_read_domain_range(struct drm_gem_object *obj, +static int i915_gem_object_set_cpu_read_domain_range(struct drm_i915_gem_object *obj, uint64_t offset, uint64_t size); -static void i915_gem_object_set_to_full_cpu_read_domain(struct drm_gem_object *obj); -static int i915_gem_object_wait_rendering(struct drm_gem_object *obj, +static void i915_gem_object_set_to_full_cpu_read_domain(struct drm_i915_gem_object *obj); +static int i915_gem_object_wait_rendering(struct drm_i915_gem_object *obj, bool interruptible); -static int i915_gem_object_bind_to_gtt(struct drm_gem_object *obj, +static int i915_gem_object_bind_to_gtt(struct drm_i915_gem_object *obj, unsigned alignment, bool map_and_fenceable); -static void i915_gem_clear_fence_reg(struct drm_gem_object *obj); -static int i915_gem_phys_pwrite(struct drm_device *dev, struct drm_gem_object *obj, +static void i915_gem_clear_fence_reg(struct drm_i915_gem_object *obj); +static int i915_gem_phys_pwrite(struct drm_device *dev, + struct drm_i915_gem_object *obj, struct drm_i915_gem_pwrite *args, - struct drm_file *file_priv); -static void i915_gem_free_object_tail(struct drm_gem_object *obj); + struct drm_file *file); +static void i915_gem_free_object_tail(struct drm_i915_gem_object *obj); static int i915_gem_inactive_shrink(struct shrinker *shrinker, int nr_to_scan, @@ -212,11 +213,9 @@ static int i915_mutex_lock_interruptible(struct drm_device *dev) } static inline bool -i915_gem_object_is_inactive(struct drm_i915_gem_object *obj_priv) +i915_gem_object_is_inactive(struct drm_i915_gem_object *obj) { - return obj_priv->gtt_space && - !obj_priv->active && - obj_priv->pin_count == 0; + return obj->gtt_space && !obj->active && obj->pin_count == 0; } int i915_gem_do_init(struct drm_device *dev, @@ -244,7 +243,7 @@ int i915_gem_do_init(struct drm_device *dev, int i915_gem_init_ioctl(struct drm_device *dev, void *data, - struct drm_file *file_priv) + struct drm_file *file) { struct drm_i915_gem_init *args = data; int ret; @@ -258,7 +257,7 @@ i915_gem_init_ioctl(struct drm_device *dev, void *data, int i915_gem_get_aperture_ioctl(struct drm_device *dev, void *data, - struct drm_file *file_priv) + struct drm_file *file) { struct drm_i915_private *dev_priv = dev->dev_private; struct drm_i915_gem_get_aperture *args = data; @@ -280,10 +279,10 @@ i915_gem_get_aperture_ioctl(struct drm_device *dev, void *data, */ int i915_gem_create_ioctl(struct drm_device *dev, void *data, - struct drm_file *file_priv) + struct drm_file *file) { struct drm_i915_gem_create *args = data; - struct drm_gem_object *obj; + struct drm_i915_gem_object *obj; int ret; u32 handle; @@ -294,29 +293,28 @@ i915_gem_create_ioctl(struct drm_device *dev, void *data, if (obj == NULL) return -ENOMEM; - ret = drm_gem_handle_create(file_priv, obj, &handle); + ret = drm_gem_handle_create(file, &obj->base, &handle); if (ret) { - drm_gem_object_release(obj); - i915_gem_info_remove_obj(dev->dev_private, obj->size); + drm_gem_object_release(&obj->base); + i915_gem_info_remove_obj(dev->dev_private, obj->base.size); kfree(obj); return ret; } /* drop reference from allocate - handle holds it now */ - drm_gem_object_unreference(obj); + drm_gem_object_unreference(&obj->base); trace_i915_gem_object_create(obj); args->handle = handle; return 0; } -static int i915_gem_object_needs_bit17_swizzle(struct drm_gem_object *obj) +static int i915_gem_object_needs_bit17_swizzle(struct drm_i915_gem_object *obj) { - drm_i915_private_t *dev_priv = obj->dev->dev_private; - struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); + drm_i915_private_t *dev_priv = obj->base.dev->dev_private; return dev_priv->mm.bit_6_swizzle_x == I915_BIT_6_SWIZZLE_9_10_17 && - obj_priv->tiling_mode != I915_TILING_NONE; + obj->tiling_mode != I915_TILING_NONE; } static inline void @@ -392,12 +390,12 @@ slow_shmem_bit17_copy(struct page *gpu_page, * fault, it fails so we can fall back to i915_gem_shmem_pwrite_slow(). */ static int -i915_gem_shmem_pread_fast(struct drm_device *dev, struct drm_gem_object *obj, +i915_gem_shmem_pread_fast(struct drm_device *dev, + struct drm_i915_gem_object *obj, struct drm_i915_gem_pread *args, - struct drm_file *file_priv) + struct drm_file *file) { - struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); - struct address_space *mapping = obj->filp->f_path.dentry->d_inode->i_mapping; + struct address_space *mapping = obj->base.filp->f_path.dentry->d_inode->i_mapping; ssize_t remain; loff_t offset; char __user *user_data; @@ -406,7 +404,6 @@ i915_gem_shmem_pread_fast(struct drm_device *dev, struct drm_gem_object *obj, user_data = (char __user *) (uintptr_t) args->data_ptr; remain = args->size; - obj_priv = to_intel_bo(obj); offset = args->offset; while (remain > 0) { @@ -455,12 +452,12 @@ i915_gem_shmem_pread_fast(struct drm_device *dev, struct drm_gem_object *obj, * and not take page faults. */ static int -i915_gem_shmem_pread_slow(struct drm_device *dev, struct drm_gem_object *obj, +i915_gem_shmem_pread_slow(struct drm_device *dev, + struct drm_i915_gem_object *obj, struct drm_i915_gem_pread *args, - struct drm_file *file_priv) + struct drm_file *file) { - struct address_space *mapping = obj->filp->f_path.dentry->d_inode->i_mapping; - struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); + struct address_space *mapping = obj->base.filp->f_path.dentry->d_inode->i_mapping; struct mm_struct *mm = current->mm; struct page **user_pages; ssize_t remain; @@ -506,7 +503,6 @@ i915_gem_shmem_pread_slow(struct drm_device *dev, struct drm_gem_object *obj, do_bit17_swizzling = i915_gem_object_needs_bit17_swizzle(obj); - obj_priv = to_intel_bo(obj); offset = args->offset; while (remain > 0) { @@ -575,11 +571,10 @@ out: */ int i915_gem_pread_ioctl(struct drm_device *dev, void *data, - struct drm_file *file_priv) + struct drm_file *file) { struct drm_i915_gem_pread *args = data; - struct drm_gem_object *obj; - struct drm_i915_gem_object *obj_priv; + struct drm_i915_gem_object *obj; int ret = 0; if (args->size == 0) @@ -599,15 +594,15 @@ i915_gem_pread_ioctl(struct drm_device *dev, void *data, if (ret) return ret; - obj = drm_gem_object_lookup(dev, file_priv, args->handle); + obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle)); if (obj == NULL) { ret = -ENOENT; goto unlock; } - obj_priv = to_intel_bo(obj); /* Bounds check source. */ - if (args->offset > obj->size || args->size > obj->size - args->offset) { + if (args->offset > obj->base.size || + args->size > obj->base.size - args->offset) { ret = -EINVAL; goto out; } @@ -620,12 +615,12 @@ i915_gem_pread_ioctl(struct drm_device *dev, void *data, ret = -EFAULT; if (!i915_gem_object_needs_bit17_swizzle(obj)) - ret = i915_gem_shmem_pread_fast(dev, obj, args, file_priv); + ret = i915_gem_shmem_pread_fast(dev, obj, args, file); if (ret == -EFAULT) - ret = i915_gem_shmem_pread_slow(dev, obj, args, file_priv); + ret = i915_gem_shmem_pread_slow(dev, obj, args, file); out: - drm_gem_object_unreference(obj); + drm_gem_object_unreference(&obj->base); unlock: mutex_unlock(&dev->struct_mutex); return ret; @@ -680,11 +675,11 @@ slow_kernel_write(struct io_mapping *mapping, * user into the GTT, uncached. */ static int -i915_gem_gtt_pwrite_fast(struct drm_device *dev, struct drm_gem_object *obj, +i915_gem_gtt_pwrite_fast(struct drm_device *dev, + struct drm_i915_gem_object *obj, struct drm_i915_gem_pwrite *args, - struct drm_file *file_priv) + struct drm_file *file) { - struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); drm_i915_private_t *dev_priv = dev->dev_private; ssize_t remain; loff_t offset, page_base; @@ -694,8 +689,7 @@ i915_gem_gtt_pwrite_fast(struct drm_device *dev, struct drm_gem_object *obj, user_data = (char __user *) (uintptr_t) args->data_ptr; remain = args->size; - obj_priv = to_intel_bo(obj); - offset = obj_priv->gtt_offset + args->offset; + offset = obj->gtt_offset + args->offset; while (remain > 0) { /* Operation in this page @@ -735,11 +729,11 @@ i915_gem_gtt_pwrite_fast(struct drm_device *dev, struct drm_gem_object *obj, * than using i915_gem_gtt_pwrite_fast on a G45 (32-bit). */ static int -i915_gem_gtt_pwrite_slow(struct drm_device *dev, struct drm_gem_object *obj, +i915_gem_gtt_pwrite_slow(struct drm_device *dev, + struct drm_i915_gem_object *obj, struct drm_i915_gem_pwrite *args, - struct drm_file *file_priv) + struct drm_file *file) { - struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); drm_i915_private_t *dev_priv = dev->dev_private; ssize_t remain; loff_t gtt_page_base, offset; @@ -780,8 +774,7 @@ i915_gem_gtt_pwrite_slow(struct drm_device *dev, struct drm_gem_object *obj, if (ret) goto out_unpin_pages; - obj_priv = to_intel_bo(obj); - offset = obj_priv->gtt_offset + args->offset; + offset = obj->gtt_offset + args->offset; while (remain > 0) { /* Operation in this page @@ -827,12 +820,12 @@ out_unpin_pages: * copy_from_user into the kmapped pages backing the object. */ static int -i915_gem_shmem_pwrite_fast(struct drm_device *dev, struct drm_gem_object *obj, +i915_gem_shmem_pwrite_fast(struct drm_device *dev, + struct drm_i915_gem_object *obj, struct drm_i915_gem_pwrite *args, - struct drm_file *file_priv) + struct drm_file *file) { - struct address_space *mapping = obj->filp->f_path.dentry->d_inode->i_mapping; - struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); + struct address_space *mapping = obj->base.filp->f_path.dentry->d_inode->i_mapping; ssize_t remain; loff_t offset; char __user *user_data; @@ -841,9 +834,8 @@ i915_gem_shmem_pwrite_fast(struct drm_device *dev, struct drm_gem_object *obj, user_data = (char __user *) (uintptr_t) args->data_ptr; remain = args->size; - obj_priv = to_intel_bo(obj); offset = args->offset; - obj_priv->dirty = 1; + obj->dirty = 1; while (remain > 0) { struct page *page; @@ -898,12 +890,12 @@ i915_gem_shmem_pwrite_fast(struct drm_device *dev, struct drm_gem_object *obj, * struct_mutex is held. */ static int -i915_gem_shmem_pwrite_slow(struct drm_device *dev, struct drm_gem_object *obj, +i915_gem_shmem_pwrite_slow(struct drm_device *dev, + struct drm_i915_gem_object *obj, struct drm_i915_gem_pwrite *args, - struct drm_file *file_priv) + struct drm_file *file) { - struct address_space *mapping = obj->filp->f_path.dentry->d_inode->i_mapping; - struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); + struct address_space *mapping = obj->base.filp->f_path.dentry->d_inode->i_mapping; struct mm_struct *mm = current->mm; struct page **user_pages; ssize_t remain; @@ -947,9 +939,8 @@ i915_gem_shmem_pwrite_slow(struct drm_device *dev, struct drm_gem_object *obj, do_bit17_swizzling = i915_gem_object_needs_bit17_swizzle(obj); - obj_priv = to_intel_bo(obj); offset = args->offset; - obj_priv->dirty = 1; + obj->dirty = 1; while (remain > 0) { struct page *page; @@ -1020,8 +1011,7 @@ i915_gem_pwrite_ioctl(struct drm_device *dev, void *data, struct drm_file *file) { struct drm_i915_gem_pwrite *args = data; - struct drm_gem_object *obj; - struct drm_i915_gem_object *obj_priv; + struct drm_i915_gem_object *obj; int ret; if (args->size == 0) @@ -1041,15 +1031,15 @@ i915_gem_pwrite_ioctl(struct drm_device *dev, void *data, if (ret) return ret; - obj = drm_gem_object_lookup(dev, file, args->handle); + obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle)); if (obj == NULL) { ret = -ENOENT; goto unlock; } - obj_priv = to_intel_bo(obj); /* Bounds check destination. */ - if (args->offset > obj->size || args->size > obj->size - args->offset) { + if (args->offset > obj->base.size || + args->size > obj->base.size - args->offset) { ret = -EINVAL; goto out; } @@ -1060,11 +1050,11 @@ i915_gem_pwrite_ioctl(struct drm_device *dev, void *data, * pread/pwrite currently are reading and writing from the CPU * perspective, requiring manual detiling by the client. */ - if (obj_priv->phys_obj) + if (obj->phys_obj) ret = i915_gem_phys_pwrite(dev, obj, args, file); - else if (obj_priv->tiling_mode == I915_TILING_NONE && - obj_priv->gtt_space && - obj->write_domain != I915_GEM_DOMAIN_CPU) { + else if (obj->tiling_mode == I915_TILING_NONE && + obj->gtt_space && + obj->base.write_domain != I915_GEM_DOMAIN_CPU) { ret = i915_gem_object_pin(obj, 0, true); if (ret) goto out; @@ -1092,7 +1082,7 @@ out_unpin: } out: - drm_gem_object_unreference(obj); + drm_gem_object_unreference(&obj->base); unlock: mutex_unlock(&dev->struct_mutex); return ret; @@ -1104,12 +1094,11 @@ unlock: */ int i915_gem_set_domain_ioctl(struct drm_device *dev, void *data, - struct drm_file *file_priv) + struct drm_file *file) { struct drm_i915_private *dev_priv = dev->dev_private; struct drm_i915_gem_set_domain *args = data; - struct drm_gem_object *obj; - struct drm_i915_gem_object *obj_priv; + struct drm_i915_gem_object *obj; uint32_t read_domains = args->read_domains; uint32_t write_domain = args->write_domain; int ret; @@ -1134,12 +1123,11 @@ i915_gem_set_domain_ioctl(struct drm_device *dev, void *data, if (ret) return ret; - obj = drm_gem_object_lookup(dev, file_priv, args->handle); + obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle)); if (obj == NULL) { ret = -ENOENT; goto unlock; } - obj_priv = to_intel_bo(obj); intel_mark_busy(dev, obj); @@ -1149,9 +1137,9 @@ i915_gem_set_domain_ioctl(struct drm_device *dev, void *data, /* Update the LRU on the fence for the CPU access that's * about to occur. */ - if (obj_priv->fence_reg != I915_FENCE_REG_NONE) { + if (obj->fence_reg != I915_FENCE_REG_NONE) { struct drm_i915_fence_reg *reg = - &dev_priv->fence_regs[obj_priv->fence_reg]; + &dev_priv->fence_regs[obj->fence_reg]; list_move_tail(®->lru_list, &dev_priv->mm.fence_list); } @@ -1167,10 +1155,10 @@ i915_gem_set_domain_ioctl(struct drm_device *dev, void *data, } /* Maintain LRU order of "inactive" objects */ - if (ret == 0 && i915_gem_object_is_inactive(obj_priv)) - list_move_tail(&obj_priv->mm_list, &dev_priv->mm.inactive_list); + if (ret == 0 && i915_gem_object_is_inactive(obj)) + list_move_tail(&obj->mm_list, &dev_priv->mm.inactive_list); - drm_gem_object_unreference(obj); + drm_gem_object_unreference(&obj->base); unlock: mutex_unlock(&dev->struct_mutex); return ret; @@ -1181,10 +1169,10 @@ unlock: */ int i915_gem_sw_finish_ioctl(struct drm_device *dev, void *data, - struct drm_file *file_priv) + struct drm_file *file) { struct drm_i915_gem_sw_finish *args = data; - struct drm_gem_object *obj; + struct drm_i915_gem_object *obj; int ret = 0; if (!(dev->driver->driver_features & DRIVER_GEM)) @@ -1194,17 +1182,17 @@ i915_gem_sw_finish_ioctl(struct drm_device *dev, void *data, if (ret) return ret; - obj = drm_gem_object_lookup(dev, file_priv, args->handle); + obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle)); if (obj == NULL) { ret = -ENOENT; goto unlock; } /* Pinned buffers may be scanout, so flush the cache */ - if (to_intel_bo(obj)->pin_count) + if (obj->pin_count) i915_gem_object_flush_cpu_write_domain(obj); - drm_gem_object_unreference(obj); + drm_gem_object_unreference(&obj->base); unlock: mutex_unlock(&dev->struct_mutex); return ret; @@ -1219,7 +1207,7 @@ unlock: */ int i915_gem_mmap_ioctl(struct drm_device *dev, void *data, - struct drm_file *file_priv) + struct drm_file *file) { struct drm_i915_private *dev_priv = dev->dev_private; struct drm_i915_gem_mmap *args = data; @@ -1230,7 +1218,7 @@ i915_gem_mmap_ioctl(struct drm_device *dev, void *data, if (!(dev->driver->driver_features & DRIVER_GEM)) return -ENODEV; - obj = drm_gem_object_lookup(dev, file_priv, args->handle); + obj = drm_gem_object_lookup(dev, file, args->handle); if (obj == NULL) return -ENOENT; @@ -1273,10 +1261,9 @@ i915_gem_mmap_ioctl(struct drm_device *dev, void *data, */ int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf) { - struct drm_gem_object *obj = vma->vm_private_data; - struct drm_device *dev = obj->dev; + struct drm_i915_gem_object *obj = to_intel_bo(vma->vm_private_data); + struct drm_device *dev = obj->base.dev; drm_i915_private_t *dev_priv = dev->dev_private; - struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); pgoff_t page_offset; unsigned long pfn; int ret = 0; @@ -1288,17 +1275,17 @@ int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf) /* Now bind it into the GTT if needed */ mutex_lock(&dev->struct_mutex); - BUG_ON(obj_priv->pin_count && !obj_priv->pin_mappable); + BUG_ON(obj->pin_count && !obj->pin_mappable); - if (obj_priv->gtt_space) { - if (!obj_priv->map_and_fenceable) { + if (obj->gtt_space) { + if (!obj->map_and_fenceable) { ret = i915_gem_object_unbind(obj); if (ret) goto unlock; } } - if (!obj_priv->gtt_space) { + if (!obj->gtt_space) { ret = i915_gem_object_bind_to_gtt(obj, 0, true); if (ret) goto unlock; @@ -1308,22 +1295,22 @@ int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf) if (ret) goto unlock; - if (!obj_priv->fault_mappable) { - obj_priv->fault_mappable = true; - i915_gem_info_update_mappable(dev_priv, obj_priv, true); + if (!obj->fault_mappable) { + obj->fault_mappable = true; + i915_gem_info_update_mappable(dev_priv, obj, true); } /* Need a new fence register? */ - if (obj_priv->tiling_mode != I915_TILING_NONE) { + if (obj->tiling_mode != I915_TILING_NONE) { ret = i915_gem_object_get_fence_reg(obj, true); if (ret) goto unlock; } - if (i915_gem_object_is_inactive(obj_priv)) - list_move_tail(&obj_priv->mm_list, &dev_priv->mm.inactive_list); + if (i915_gem_object_is_inactive(obj)) + list_move_tail(&obj->mm_list, &dev_priv->mm.inactive_list); - pfn = ((dev->agp->base + obj_priv->gtt_offset) >> PAGE_SHIFT) + + pfn = ((dev->agp->base + obj->gtt_offset) >> PAGE_SHIFT) + page_offset; /* Finally, remap it using the new GTT offset */ @@ -1356,36 +1343,39 @@ unlock: * This routine allocates and attaches a fake offset for @obj. */ static int -i915_gem_create_mmap_offset(struct drm_gem_object *obj) +i915_gem_create_mmap_offset(struct drm_i915_gem_object *obj) { - struct drm_device *dev = obj->dev; + struct drm_device *dev = obj->base.dev; struct drm_gem_mm *mm = dev->mm_private; struct drm_map_list *list; struct drm_local_map *map; int ret = 0; /* Set the object up for mmap'ing */ - list = &obj->map_list; + list = &obj->base.map_list; list->map = kzalloc(sizeof(struct drm_map_list), GFP_KERNEL); if (!list->map) return -ENOMEM; map = list->map; map->type = _DRM_GEM; - map->size = obj->size; + map->size = obj->base.size; map->handle = obj; /* Get a DRM GEM mmap offset allocated... */ list->file_offset_node = drm_mm_search_free(&mm->offset_manager, - obj->size / PAGE_SIZE, 0, 0); + obj->base.size / PAGE_SIZE, + 0, 0); if (!list->file_offset_node) { - DRM_ERROR("failed to allocate offset for bo %d\n", obj->name); + DRM_ERROR("failed to allocate offset for bo %d\n", + obj->base.name); ret = -ENOSPC; goto out_free_list; } list->file_offset_node = drm_mm_get_block(list->file_offset_node, - obj->size / PAGE_SIZE, 0); + obj->base.size / PAGE_SIZE, + 0); if (!list->file_offset_node) { ret = -ENOMEM; goto out_free_list; @@ -1424,29 +1414,28 @@ out_free_list: * fixup by i915_gem_fault(). */ void -i915_gem_release_mmap(struct drm_gem_object *obj) +i915_gem_release_mmap(struct drm_i915_gem_object *obj) { - struct drm_device *dev = obj->dev; + struct drm_device *dev = obj->base.dev; struct drm_i915_private *dev_priv = dev->dev_private; - struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); - if (unlikely(obj->map_list.map && dev->dev_mapping)) + if (unlikely(obj->base.map_list.map && dev->dev_mapping)) unmap_mapping_range(dev->dev_mapping, - (loff_t)obj->map_list.hash.key<size, 1); + (loff_t)obj->base.map_list.hash.key<base.size, 1); - if (obj_priv->fault_mappable) { - obj_priv->fault_mappable = false; - i915_gem_info_update_mappable(dev_priv, obj_priv, false); + if (obj->fault_mappable) { + obj->fault_mappable = false; + i915_gem_info_update_mappable(dev_priv, obj, false); } } static void -i915_gem_free_mmap_offset(struct drm_gem_object *obj) +i915_gem_free_mmap_offset(struct drm_i915_gem_object *obj) { - struct drm_device *dev = obj->dev; + struct drm_device *dev = obj->base.dev; struct drm_gem_mm *mm = dev->mm_private; - struct drm_map_list *list = &obj->map_list; + struct drm_map_list *list = &obj->base.map_list; drm_ht_remove_item(&mm->offset_hash, &list->hash); drm_mm_put_block(list->file_offset_node); @@ -1462,23 +1451,23 @@ i915_gem_free_mmap_offset(struct drm_gem_object *obj) * potential fence register mapping. */ static uint32_t -i915_gem_get_gtt_alignment(struct drm_i915_gem_object *obj_priv) +i915_gem_get_gtt_alignment(struct drm_i915_gem_object *obj) { - struct drm_device *dev = obj_priv->base.dev; + struct drm_device *dev = obj->base.dev; /* * Minimum alignment is 4k (GTT page size), but might be greater * if a fence register is needed for the object. */ if (INTEL_INFO(dev)->gen >= 4 || - obj_priv->tiling_mode == I915_TILING_NONE) + obj->tiling_mode == I915_TILING_NONE) return 4096; /* * Previous chips need to be aligned to the size of the smallest * fence register that can contain the object. */ - return i915_gem_get_gtt_size(obj_priv); + return i915_gem_get_gtt_size(obj); } /** @@ -1490,16 +1479,16 @@ i915_gem_get_gtt_alignment(struct drm_i915_gem_object *obj_priv) * unfenced tiled surface requirements. */ static uint32_t -i915_gem_get_unfenced_gtt_alignment(struct drm_i915_gem_object *obj_priv) +i915_gem_get_unfenced_gtt_alignment(struct drm_i915_gem_object *obj) { - struct drm_device *dev = obj_priv->base.dev; + struct drm_device *dev = obj->base.dev; int tile_height; /* * Minimum alignment is 4k (GTT page size) for sane hw. */ if (INTEL_INFO(dev)->gen >= 4 || IS_G33(dev) || - obj_priv->tiling_mode == I915_TILING_NONE) + obj->tiling_mode == I915_TILING_NONE) return 4096; /* @@ -1508,18 +1497,18 @@ i915_gem_get_unfenced_gtt_alignment(struct drm_i915_gem_object *obj_priv) * placed in a fenced gtt region). */ if (IS_GEN2(dev) || - (obj_priv->tiling_mode == I915_TILING_Y && HAS_128_BYTE_Y_TILING(dev))) + (obj->tiling_mode == I915_TILING_Y && HAS_128_BYTE_Y_TILING(dev))) tile_height = 32; else tile_height = 8; - return tile_height * obj_priv->stride * 2; + return tile_height * obj->stride * 2; } static uint32_t -i915_gem_get_gtt_size(struct drm_i915_gem_object *obj_priv) +i915_gem_get_gtt_size(struct drm_i915_gem_object *obj) { - struct drm_device *dev = obj_priv->base.dev; + struct drm_device *dev = obj->base.dev; uint32_t size; /* @@ -1527,7 +1516,7 @@ i915_gem_get_gtt_size(struct drm_i915_gem_object *obj_priv) * if a fence register is needed for the object. */ if (INTEL_INFO(dev)->gen >= 4) - return obj_priv->base.size; + return obj->base.size; /* * Previous chips need to be aligned to the size of the smallest @@ -1538,7 +1527,7 @@ i915_gem_get_gtt_size(struct drm_i915_gem_object *obj_priv) else size = 512*1024; - while (size < obj_priv->base.size) + while (size < obj->base.size) size <<= 1; return size; @@ -1548,7 +1537,7 @@ i915_gem_get_gtt_size(struct drm_i915_gem_object *obj_priv) * i915_gem_mmap_gtt_ioctl - prepare an object for GTT mmap'ing * @dev: DRM device * @data: GTT mapping ioctl data - * @file_priv: GEM object info + * @file: GEM object info * * Simply returns the fake offset to userspace so it can mmap it. * The mmap call will end up in drm_gem_mmap(), which will set things @@ -1561,12 +1550,11 @@ i915_gem_get_gtt_size(struct drm_i915_gem_object *obj_priv) */ int i915_gem_mmap_gtt_ioctl(struct drm_device *dev, void *data, - struct drm_file *file_priv) + struct drm_file *file) { struct drm_i915_private *dev_priv = dev->dev_private; struct drm_i915_gem_mmap_gtt *args = data; - struct drm_gem_object *obj; - struct drm_i915_gem_object *obj_priv; + struct drm_i915_gem_object *obj; int ret; if (!(dev->driver->driver_features & DRIVER_GEM)) @@ -1576,44 +1564,42 @@ i915_gem_mmap_gtt_ioctl(struct drm_device *dev, void *data, if (ret) return ret; - obj = drm_gem_object_lookup(dev, file_priv, args->handle); + obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle)); if (obj == NULL) { ret = -ENOENT; goto unlock; } - obj_priv = to_intel_bo(obj); - if (obj->size > dev_priv->mm.gtt_mappable_end) { + if (obj->base.size > dev_priv->mm.gtt_mappable_end) { ret = -E2BIG; goto unlock; } - if (obj_priv->madv != I915_MADV_WILLNEED) { + if (obj->madv != I915_MADV_WILLNEED) { DRM_ERROR("Attempting to mmap a purgeable buffer\n"); ret = -EINVAL; goto out; } - if (!obj->map_list.map) { + if (!obj->base.map_list.map) { ret = i915_gem_create_mmap_offset(obj); if (ret) goto out; } - args->offset = (u64)obj->map_list.hash.key << PAGE_SHIFT; + args->offset = (u64)obj->base.map_list.hash.key << PAGE_SHIFT; out: - drm_gem_object_unreference(obj); + drm_gem_object_unreference(&obj->base); unlock: mutex_unlock(&dev->struct_mutex); return ret; } static int -i915_gem_object_get_pages_gtt(struct drm_gem_object *obj, +i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj, gfp_t gfpmask) { - struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); int page_count, i; struct address_space *mapping; struct inode *inode; @@ -1622,13 +1608,13 @@ i915_gem_object_get_pages_gtt(struct drm_gem_object *obj, /* Get the list of pages out of our struct file. They'll be pinned * at this point until we release them. */ - page_count = obj->size / PAGE_SIZE; - BUG_ON(obj_priv->pages != NULL); - obj_priv->pages = drm_malloc_ab(page_count, sizeof(struct page *)); - if (obj_priv->pages == NULL) + page_count = obj->base.size / PAGE_SIZE; + BUG_ON(obj->pages != NULL); + obj->pages = drm_malloc_ab(page_count, sizeof(struct page *)); + if (obj->pages == NULL) return -ENOMEM; - inode = obj->filp->f_path.dentry->d_inode; + inode = obj->base.filp->f_path.dentry->d_inode; mapping = inode->i_mapping; for (i = 0; i < page_count; i++) { page = read_cache_page_gfp(mapping, i, @@ -1639,51 +1625,50 @@ i915_gem_object_get_pages_gtt(struct drm_gem_object *obj, if (IS_ERR(page)) goto err_pages; - obj_priv->pages[i] = page; + obj->pages[i] = page; } - if (obj_priv->tiling_mode != I915_TILING_NONE) + if (obj->tiling_mode != I915_TILING_NONE) i915_gem_object_do_bit_17_swizzle(obj); return 0; err_pages: while (i--) - page_cache_release(obj_priv->pages[i]); + page_cache_release(obj->pages[i]); - drm_free_large(obj_priv->pages); - obj_priv->pages = NULL; + drm_free_large(obj->pages); + obj->pages = NULL; return PTR_ERR(page); } static void -i915_gem_object_put_pages_gtt(struct drm_gem_object *obj) +i915_gem_object_put_pages_gtt(struct drm_i915_gem_object *obj) { - struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); - int page_count = obj->size / PAGE_SIZE; + int page_count = obj->base.size / PAGE_SIZE; int i; - BUG_ON(obj_priv->madv == __I915_MADV_PURGED); + BUG_ON(obj->madv == __I915_MADV_PURGED); - if (obj_priv->tiling_mode != I915_TILING_NONE) + if (obj->tiling_mode != I915_TILING_NONE) i915_gem_object_save_bit_17_swizzle(obj); - if (obj_priv->madv == I915_MADV_DONTNEED) - obj_priv->dirty = 0; + if (obj->madv == I915_MADV_DONTNEED) + obj->dirty = 0; for (i = 0; i < page_count; i++) { - if (obj_priv->dirty) - set_page_dirty(obj_priv->pages[i]); + if (obj->dirty) + set_page_dirty(obj->pages[i]); - if (obj_priv->madv == I915_MADV_WILLNEED) - mark_page_accessed(obj_priv->pages[i]); + if (obj->madv == I915_MADV_WILLNEED) + mark_page_accessed(obj->pages[i]); - page_cache_release(obj_priv->pages[i]); + page_cache_release(obj->pages[i]); } - obj_priv->dirty = 0; + obj->dirty = 0; - drm_free_large(obj_priv->pages); - obj_priv->pages = NULL; + drm_free_large(obj->pages); + obj->pages = NULL; } static uint32_t @@ -1695,47 +1680,44 @@ i915_gem_next_request_seqno(struct drm_device *dev, } static void -i915_gem_object_move_to_active(struct drm_gem_object *obj, +i915_gem_object_move_to_active(struct drm_i915_gem_object *obj, struct intel_ring_buffer *ring) { - struct drm_device *dev = obj->dev; + struct drm_device *dev = obj->base.dev; struct drm_i915_private *dev_priv = dev->dev_private; - struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); uint32_t seqno = i915_gem_next_request_seqno(dev, ring); BUG_ON(ring == NULL); - obj_priv->ring = ring; + obj->ring = ring; /* Add a reference if we're newly entering the active list. */ - if (!obj_priv->active) { - drm_gem_object_reference(obj); - obj_priv->active = 1; + if (!obj->active) { + drm_gem_object_reference(&obj->base); + obj->active = 1; } /* Move from whatever list we were on to the tail of execution. */ - list_move_tail(&obj_priv->mm_list, &dev_priv->mm.active_list); - list_move_tail(&obj_priv->ring_list, &ring->active_list); - obj_priv->last_rendering_seqno = seqno; + list_move_tail(&obj->mm_list, &dev_priv->mm.active_list); + list_move_tail(&obj->ring_list, &ring->active_list); + obj->last_rendering_seqno = seqno; } static void -i915_gem_object_move_to_flushing(struct drm_gem_object *obj) +i915_gem_object_move_to_flushing(struct drm_i915_gem_object *obj) { - struct drm_device *dev = obj->dev; + struct drm_device *dev = obj->base.dev; drm_i915_private_t *dev_priv = dev->dev_private; - struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); - BUG_ON(!obj_priv->active); - list_move_tail(&obj_priv->mm_list, &dev_priv->mm.flushing_list); - list_del_init(&obj_priv->ring_list); - obj_priv->last_rendering_seqno = 0; + BUG_ON(!obj->active); + list_move_tail(&obj->mm_list, &dev_priv->mm.flushing_list); + list_del_init(&obj->ring_list); + obj->last_rendering_seqno = 0; } /* Immediately discard the backing storage */ static void -i915_gem_object_truncate(struct drm_gem_object *obj) +i915_gem_object_truncate(struct drm_i915_gem_object *obj) { - struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); struct inode *inode; /* Our goal here is to return as much of the memory as @@ -1744,40 +1726,39 @@ i915_gem_object_truncate(struct drm_gem_object *obj) * backing pages, *now*. Here we mirror the actions taken * when by shmem_delete_inode() to release the backing store. */ - inode = obj->filp->f_path.dentry->d_inode; + inode = obj->base.filp->f_path.dentry->d_inode; truncate_inode_pages(inode->i_mapping, 0); if (inode->i_op->truncate_range) inode->i_op->truncate_range(inode, 0, (loff_t)-1); - obj_priv->madv = __I915_MADV_PURGED; + obj->madv = __I915_MADV_PURGED; } static inline int -i915_gem_object_is_purgeable(struct drm_i915_gem_object *obj_priv) +i915_gem_object_is_purgeable(struct drm_i915_gem_object *obj) { - return obj_priv->madv == I915_MADV_DONTNEED; + return obj->madv == I915_MADV_DONTNEED; } static void -i915_gem_object_move_to_inactive(struct drm_gem_object *obj) +i915_gem_object_move_to_inactive(struct drm_i915_gem_object *obj) { - struct drm_device *dev = obj->dev; + struct drm_device *dev = obj->base.dev; drm_i915_private_t *dev_priv = dev->dev_private; - struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); - if (obj_priv->pin_count != 0) - list_move_tail(&obj_priv->mm_list, &dev_priv->mm.pinned_list); + if (obj->pin_count != 0) + list_move_tail(&obj->mm_list, &dev_priv->mm.pinned_list); else - list_move_tail(&obj_priv->mm_list, &dev_priv->mm.inactive_list); - list_del_init(&obj_priv->ring_list); + list_move_tail(&obj->mm_list, &dev_priv->mm.inactive_list); + list_del_init(&obj->ring_list); - BUG_ON(!list_empty(&obj_priv->gpu_write_list)); + BUG_ON(!list_empty(&obj->gpu_write_list)); - obj_priv->last_rendering_seqno = 0; - obj_priv->ring = NULL; - if (obj_priv->active) { - obj_priv->active = 0; - drm_gem_object_unreference(obj); + obj->last_rendering_seqno = 0; + obj->ring = NULL; + if (obj->active) { + obj->active = 0; + drm_gem_object_unreference(&obj->base); } WARN_ON(i915_verify_lists(dev)); } @@ -1788,30 +1769,28 @@ i915_gem_process_flushing_list(struct drm_device *dev, struct intel_ring_buffer *ring) { drm_i915_private_t *dev_priv = dev->dev_private; - struct drm_i915_gem_object *obj_priv, *next; + struct drm_i915_gem_object *obj, *next; - list_for_each_entry_safe(obj_priv, next, + list_for_each_entry_safe(obj, next, &ring->gpu_write_list, gpu_write_list) { - struct drm_gem_object *obj = &obj_priv->base; + if (obj->base.write_domain & flush_domains) { + uint32_t old_write_domain = obj->base.write_domain; - if (obj->write_domain & flush_domains) { - uint32_t old_write_domain = obj->write_domain; - - obj->write_domain = 0; - list_del_init(&obj_priv->gpu_write_list); + obj->base.write_domain = 0; + list_del_init(&obj->gpu_write_list); i915_gem_object_move_to_active(obj, ring); /* update the fence lru list */ - if (obj_priv->fence_reg != I915_FENCE_REG_NONE) { + if (obj->fence_reg != I915_FENCE_REG_NONE) { struct drm_i915_fence_reg *reg = - &dev_priv->fence_regs[obj_priv->fence_reg]; + &dev_priv->fence_regs[obj->fence_reg]; list_move_tail(®->lru_list, &dev_priv->mm.fence_list); } trace_i915_gem_object_change_domain(obj, - obj->read_domains, + obj->base.read_domains, old_write_domain); } } @@ -1912,22 +1891,22 @@ static void i915_gem_reset_ring_lists(struct drm_i915_private *dev_priv, } while (!list_empty(&ring->active_list)) { - struct drm_i915_gem_object *obj_priv; + struct drm_i915_gem_object *obj; - obj_priv = list_first_entry(&ring->active_list, - struct drm_i915_gem_object, - ring_list); + obj = list_first_entry(&ring->active_list, + struct drm_i915_gem_object, + ring_list); - obj_priv->base.write_domain = 0; - list_del_init(&obj_priv->gpu_write_list); - i915_gem_object_move_to_inactive(&obj_priv->base); + obj->base.write_domain = 0; + list_del_init(&obj->gpu_write_list); + i915_gem_object_move_to_inactive(obj); } } void i915_gem_reset(struct drm_device *dev) { struct drm_i915_private *dev_priv = dev->dev_private; - struct drm_i915_gem_object *obj_priv; + struct drm_i915_gem_object *obj; int i; i915_gem_reset_ring_lists(dev_priv, &dev_priv->render_ring); @@ -1939,23 +1918,23 @@ void i915_gem_reset(struct drm_device *dev) * lost bo to the inactive list. */ while (!list_empty(&dev_priv->mm.flushing_list)) { - obj_priv = list_first_entry(&dev_priv->mm.flushing_list, - struct drm_i915_gem_object, - mm_list); + obj= list_first_entry(&dev_priv->mm.flushing_list, + struct drm_i915_gem_object, + mm_list); - obj_priv->base.write_domain = 0; - list_del_init(&obj_priv->gpu_write_list); - i915_gem_object_move_to_inactive(&obj_priv->base); + obj->base.write_domain = 0; + list_del_init(&obj->gpu_write_list); + i915_gem_object_move_to_inactive(obj); } /* Move everything out of the GPU domains to ensure we do any * necessary invalidation upon reuse. */ - list_for_each_entry(obj_priv, + list_for_each_entry(obj, &dev_priv->mm.inactive_list, mm_list) { - obj_priv->base.read_domains &= ~I915_GEM_GPU_DOMAINS; + obj->base.read_domains &= ~I915_GEM_GPU_DOMAINS; } /* The fence registers are invalidated so clear them out */ @@ -2008,18 +1987,16 @@ i915_gem_retire_requests_ring(struct drm_device *dev, * by the ringbuffer to the flushing/inactive lists as appropriate. */ while (!list_empty(&ring->active_list)) { - struct drm_gem_object *obj; - struct drm_i915_gem_object *obj_priv; + struct drm_i915_gem_object *obj; - obj_priv = list_first_entry(&ring->active_list, - struct drm_i915_gem_object, - ring_list); + obj= list_first_entry(&ring->active_list, + struct drm_i915_gem_object, + ring_list); - if (!i915_seqno_passed(seqno, obj_priv->last_rendering_seqno)) + if (!i915_seqno_passed(seqno, obj->last_rendering_seqno)) break; - obj = &obj_priv->base; - if (obj->write_domain != 0) + if (obj->base.write_domain != 0) i915_gem_object_move_to_flushing(obj); else i915_gem_object_move_to_inactive(obj); @@ -2040,17 +2017,17 @@ i915_gem_retire_requests(struct drm_device *dev) drm_i915_private_t *dev_priv = dev->dev_private; if (!list_empty(&dev_priv->mm.deferred_free_list)) { - struct drm_i915_gem_object *obj_priv, *tmp; + struct drm_i915_gem_object *obj, *next; /* We must be careful that during unbind() we do not * accidentally infinitely recurse into retire requests. * Currently: * retire -> free -> unbind -> wait -> retire_ring */ - list_for_each_entry_safe(obj_priv, tmp, + list_for_each_entry_safe(obj, next, &dev_priv->mm.deferred_free_list, mm_list) - i915_gem_free_object_tail(&obj_priv->base); + i915_gem_free_object_tail(obj); } i915_gem_retire_requests_ring(dev, &dev_priv->render_ring); @@ -2175,7 +2152,6 @@ i915_wait_request(struct drm_device *dev, uint32_t seqno, static void i915_gem_flush_ring(struct drm_device *dev, - struct drm_file *file_priv, struct intel_ring_buffer *ring, uint32_t invalidate_domains, uint32_t flush_domains) @@ -2186,7 +2162,6 @@ i915_gem_flush_ring(struct drm_device *dev, static void i915_gem_flush(struct drm_device *dev, - struct drm_file *file_priv, uint32_t invalidate_domains, uint32_t flush_domains, uint32_t flush_rings) @@ -2198,16 +2173,13 @@ i915_gem_flush(struct drm_device *dev, if ((flush_domains | invalidate_domains) & I915_GEM_GPU_DOMAINS) { if (flush_rings & RING_RENDER) - i915_gem_flush_ring(dev, file_priv, - &dev_priv->render_ring, + i915_gem_flush_ring(dev, &dev_priv->render_ring, invalidate_domains, flush_domains); if (flush_rings & RING_BSD) - i915_gem_flush_ring(dev, file_priv, - &dev_priv->bsd_ring, + i915_gem_flush_ring(dev, &dev_priv->bsd_ring, invalidate_domains, flush_domains); if (flush_rings & RING_BLT) - i915_gem_flush_ring(dev, file_priv, - &dev_priv->blt_ring, + i915_gem_flush_ring(dev, &dev_priv->blt_ring, invalidate_domains, flush_domains); } } @@ -2217,26 +2189,25 @@ i915_gem_flush(struct drm_device *dev, * safe to unbind from the GTT or access from the CPU. */ static int -i915_gem_object_wait_rendering(struct drm_gem_object *obj, +i915_gem_object_wait_rendering(struct drm_i915_gem_object *obj, bool interruptible) { - struct drm_device *dev = obj->dev; - struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); + struct drm_device *dev = obj->base.dev; int ret; /* This function only exists to support waiting for existing rendering, * not for emitting required flushes. */ - BUG_ON((obj->write_domain & I915_GEM_GPU_DOMAINS) != 0); + BUG_ON((obj->base.write_domain & I915_GEM_GPU_DOMAINS) != 0); /* If there is rendering queued on the buffer being evicted, wait for * it. */ - if (obj_priv->active) { + if (obj->active) { ret = i915_do_wait_request(dev, - obj_priv->last_rendering_seqno, + obj->last_rendering_seqno, interruptible, - obj_priv->ring); + obj->ring); if (ret) return ret; } @@ -2248,17 +2219,16 @@ i915_gem_object_wait_rendering(struct drm_gem_object *obj, * Unbinds an object from the GTT aperture. */ int -i915_gem_object_unbind(struct drm_gem_object *obj) +i915_gem_object_unbind(struct drm_i915_gem_object *obj) { - struct drm_device *dev = obj->dev; + struct drm_device *dev = obj->base.dev; struct drm_i915_private *dev_priv = dev->dev_private; - struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); int ret = 0; - if (obj_priv->gtt_space == NULL) + if (obj->gtt_space == NULL) return 0; - if (obj_priv->pin_count != 0) { + if (obj->pin_count != 0) { DRM_ERROR("Attempting to unbind pinned buffer\n"); return -EINVAL; } @@ -2281,27 +2251,27 @@ i915_gem_object_unbind(struct drm_gem_object *obj) */ if (ret) { i915_gem_clflush_object(obj); - obj->read_domains = obj->write_domain = I915_GEM_DOMAIN_CPU; + obj->base.read_domains = obj->base.write_domain = I915_GEM_DOMAIN_CPU; } /* release the fence reg _after_ flushing */ - if (obj_priv->fence_reg != I915_FENCE_REG_NONE) + if (obj->fence_reg != I915_FENCE_REG_NONE) i915_gem_clear_fence_reg(obj); i915_gem_gtt_unbind_object(obj); i915_gem_object_put_pages_gtt(obj); - i915_gem_info_remove_gtt(dev_priv, obj_priv); - list_del_init(&obj_priv->mm_list); + i915_gem_info_remove_gtt(dev_priv, obj); + list_del_init(&obj->mm_list); /* Avoid an unnecessary call to unbind on rebind. */ - obj_priv->map_and_fenceable = true; + obj->map_and_fenceable = true; - drm_mm_put_block(obj_priv->gtt_space); - obj_priv->gtt_space = NULL; - obj_priv->gtt_offset = 0; + drm_mm_put_block(obj->gtt_space); + obj->gtt_space = NULL; + obj->gtt_offset = 0; - if (i915_gem_object_is_purgeable(obj_priv)) + if (i915_gem_object_is_purgeable(obj)) i915_gem_object_truncate(obj); trace_i915_gem_object_unbind(obj); @@ -2315,7 +2285,7 @@ static int i915_ring_idle(struct drm_device *dev, if (list_empty(&ring->gpu_write_list) && list_empty(&ring->active_list)) return 0; - i915_gem_flush_ring(dev, NULL, ring, + i915_gem_flush_ring(dev, ring, I915_GEM_GPU_DOMAINS, I915_GEM_GPU_DOMAINS); return i915_wait_request(dev, i915_gem_next_request_seqno(dev, ring), @@ -2350,89 +2320,86 @@ i915_gpu_idle(struct drm_device *dev) return 0; } -static void sandybridge_write_fence_reg(struct drm_gem_object *obj) +static void sandybridge_write_fence_reg(struct drm_i915_gem_object *obj) { - struct drm_device *dev = obj->dev; + struct drm_device *dev = obj->base.dev; drm_i915_private_t *dev_priv = dev->dev_private; - struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); - u32 size = i915_gem_get_gtt_size(obj_priv); - int regnum = obj_priv->fence_reg; + u32 size = obj->gtt_space->size; + int regnum = obj->fence_reg; uint64_t val; - val = (uint64_t)((obj_priv->gtt_offset + size - 4096) & + val = (uint64_t)((obj->gtt_offset + size - 4096) & 0xfffff000) << 32; - val |= obj_priv->gtt_offset & 0xfffff000; - val |= (uint64_t)((obj_priv->stride / 128) - 1) << + val |= obj->gtt_offset & 0xfffff000; + val |= (uint64_t)((obj->stride / 128) - 1) << SANDYBRIDGE_FENCE_PITCH_SHIFT; - if (obj_priv->tiling_mode == I915_TILING_Y) + if (obj->tiling_mode == I915_TILING_Y) val |= 1 << I965_FENCE_TILING_Y_SHIFT; val |= I965_FENCE_REG_VALID; I915_WRITE64(FENCE_REG_SANDYBRIDGE_0 + (regnum * 8), val); } -static void i965_write_fence_reg(struct drm_gem_object *obj) +static void i965_write_fence_reg(struct drm_i915_gem_object *obj) { - struct drm_device *dev = obj->dev; + struct drm_device *dev = obj->base.dev; drm_i915_private_t *dev_priv = dev->dev_private; - struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); - u32 size = i915_gem_get_gtt_size(obj_priv); - int regnum = obj_priv->fence_reg; + u32 size = obj->gtt_space->size; + int regnum = obj->fence_reg; uint64_t val; - val = (uint64_t)((obj_priv->gtt_offset + size - 4096) & + val = (uint64_t)((obj->gtt_offset + size - 4096) & 0xfffff000) << 32; - val |= obj_priv->gtt_offset & 0xfffff000; - val |= ((obj_priv->stride / 128) - 1) << I965_FENCE_PITCH_SHIFT; - if (obj_priv->tiling_mode == I915_TILING_Y) + val |= obj->gtt_offset & 0xfffff000; + val |= ((obj->stride / 128) - 1) << I965_FENCE_PITCH_SHIFT; + if (obj->tiling_mode == I915_TILING_Y) val |= 1 << I965_FENCE_TILING_Y_SHIFT; val |= I965_FENCE_REG_VALID; I915_WRITE64(FENCE_REG_965_0 + (regnum * 8), val); } -static void i915_write_fence_reg(struct drm_gem_object *obj) +static void i915_write_fence_reg(struct drm_i915_gem_object *obj) { - struct drm_device *dev = obj->dev; + struct drm_device *dev = obj->base.dev; drm_i915_private_t *dev_priv = dev->dev_private; - struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); - u32 size = i915_gem_get_gtt_size(obj_priv); + u32 size = obj->gtt_space->size; uint32_t fence_reg, val, pitch_val; int tile_width; - if ((obj_priv->gtt_offset & ~I915_FENCE_START_MASK) || - (obj_priv->gtt_offset & (size - 1))) { + if ((obj->gtt_offset & ~I915_FENCE_START_MASK) || + (obj->gtt_offset & (size - 1))) { WARN(1, "%s: object 0x%08x [fenceable? %d] not 1M or size (0x%08x) aligned [gtt_space offset=%lx, size=%lx]\n", - __func__, obj_priv->gtt_offset, obj_priv->map_and_fenceable, size, - obj_priv->gtt_space->start, obj_priv->gtt_space->size); + __func__, obj->gtt_offset, obj->map_and_fenceable, size, + obj->gtt_space->start, obj->gtt_space->size); return; } - if (obj_priv->tiling_mode == I915_TILING_Y && + if (obj->tiling_mode == I915_TILING_Y && HAS_128_BYTE_Y_TILING(dev)) tile_width = 128; else tile_width = 512; /* Note: pitch better be a power of two tile widths */ - pitch_val = obj_priv->stride / tile_width; + pitch_val = obj->stride / tile_width; pitch_val = ffs(pitch_val) - 1; - if (obj_priv->tiling_mode == I915_TILING_Y && + if (obj->tiling_mode == I915_TILING_Y && HAS_128_BYTE_Y_TILING(dev)) WARN_ON(pitch_val > I830_FENCE_MAX_PITCH_VAL); else WARN_ON(pitch_val > I915_FENCE_MAX_PITCH_VAL); - val = obj_priv->gtt_offset; - if (obj_priv->tiling_mode == I915_TILING_Y) + val = obj->gtt_offset; + if (obj->tiling_mode == I915_TILING_Y) val |= 1 << I830_FENCE_TILING_Y_SHIFT; val |= I915_FENCE_SIZE_BITS(size); val |= pitch_val << I830_FENCE_PITCH_SHIFT; val |= I830_FENCE_REG_VALID; - fence_reg = obj_priv->fence_reg; + fence_reg = obj->fence_reg; if (fence_reg < 8) fence_reg = FENCE_REG_830_0 + fence_reg * 4; else @@ -2440,30 +2407,29 @@ static void i915_write_fence_reg(struct drm_gem_object *obj) I915_WRITE(fence_reg, val); } -static void i830_write_fence_reg(struct drm_gem_object *obj) +static void i830_write_fence_reg(struct drm_i915_gem_object *obj) { - struct drm_device *dev = obj->dev; + struct drm_device *dev = obj->base.dev; drm_i915_private_t *dev_priv = dev->dev_private; - struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); - u32 size = i915_gem_get_gtt_size(obj_priv); - int regnum = obj_priv->fence_reg; + u32 size = obj->gtt_space->size; + int regnum = obj->fence_reg; uint32_t val; uint32_t pitch_val; uint32_t fence_size_bits; - if ((obj_priv->gtt_offset & ~I830_FENCE_START_MASK) || - (obj_priv->gtt_offset & (obj->size - 1))) { + if ((obj->gtt_offset & ~I830_FENCE_START_MASK) || + (obj->gtt_offset & (obj->base.size - 1))) { WARN(1, "%s: object 0x%08x not 512K or size aligned\n", - __func__, obj_priv->gtt_offset); + __func__, obj->gtt_offset); return; } - pitch_val = obj_priv->stride / 128; + pitch_val = obj->stride / 128; pitch_val = ffs(pitch_val) - 1; WARN_ON(pitch_val > I830_FENCE_MAX_PITCH_VAL); - val = obj_priv->gtt_offset; - if (obj_priv->tiling_mode == I915_TILING_Y) + val = obj->gtt_offset; + if (obj->tiling_mode == I915_TILING_Y) val |= 1 << I830_FENCE_TILING_Y_SHIFT; fence_size_bits = I830_FENCE_SIZE_BITS(size); WARN_ON(fence_size_bits & ~0x00000f00); @@ -2479,7 +2445,7 @@ static int i915_find_fence_reg(struct drm_device *dev, { struct drm_i915_private *dev_priv = dev->dev_private; struct drm_i915_fence_reg *reg; - struct drm_i915_gem_object *obj_priv = NULL; + struct drm_i915_gem_object *obj = NULL; int i, avail, ret; /* First try to find a free reg */ @@ -2489,9 +2455,8 @@ static int i915_find_fence_reg(struct drm_device *dev, if (!reg->obj) return i; - obj_priv = to_intel_bo(reg->obj); - if (!obj_priv->pin_count) - avail++; + if (!reg->obj->pin_count) + avail++; } if (avail == 0) @@ -2501,12 +2466,12 @@ static int i915_find_fence_reg(struct drm_device *dev, avail = I915_FENCE_REG_NONE; list_for_each_entry(reg, &dev_priv->mm.fence_list, lru_list) { - obj_priv = to_intel_bo(reg->obj); - if (obj_priv->pin_count) + obj = reg->obj; + if (obj->pin_count) continue; /* found one! */ - avail = obj_priv->fence_reg; + avail = obj->fence_reg; break; } @@ -2516,9 +2481,9 @@ static int i915_find_fence_reg(struct drm_device *dev, * might drop that one, causing a use-after-free in it. So hold a * private reference to obj like the other callers of put_fence_reg * (set_tiling ioctl) do. */ - drm_gem_object_reference(&obj_priv->base); - ret = i915_gem_object_put_fence_reg(&obj_priv->base, interruptible); - drm_gem_object_unreference(&obj_priv->base); + drm_gem_object_reference(&obj->base); + ret = i915_gem_object_put_fence_reg(obj, interruptible); + drm_gem_object_unreference(&obj->base); if (ret != 0) return ret; @@ -2539,39 +2504,38 @@ static int i915_find_fence_reg(struct drm_device *dev, * and tiling format. */ int -i915_gem_object_get_fence_reg(struct drm_gem_object *obj, +i915_gem_object_get_fence_reg(struct drm_i915_gem_object *obj, bool interruptible) { - struct drm_device *dev = obj->dev; + struct drm_device *dev = obj->base.dev; struct drm_i915_private *dev_priv = dev->dev_private; - struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); struct drm_i915_fence_reg *reg = NULL; int ret; /* Just update our place in the LRU if our fence is getting used. */ - if (obj_priv->fence_reg != I915_FENCE_REG_NONE) { - reg = &dev_priv->fence_regs[obj_priv->fence_reg]; + if (obj->fence_reg != I915_FENCE_REG_NONE) { + reg = &dev_priv->fence_regs[obj->fence_reg]; list_move_tail(®->lru_list, &dev_priv->mm.fence_list); return 0; } - switch (obj_priv->tiling_mode) { + switch (obj->tiling_mode) { case I915_TILING_NONE: WARN(1, "allocating a fence for non-tiled object?\n"); break; case I915_TILING_X: - if (!obj_priv->stride) + if (!obj->stride) return -EINVAL; - WARN((obj_priv->stride & (512 - 1)), + WARN((obj->stride & (512 - 1)), "object 0x%08x is X tiled but has non-512B pitch\n", - obj_priv->gtt_offset); + obj->gtt_offset); break; case I915_TILING_Y: - if (!obj_priv->stride) + if (!obj->stride) return -EINVAL; - WARN((obj_priv->stride & (128 - 1)), + WARN((obj->stride & (128 - 1)), "object 0x%08x is Y tiled but has non-128B pitch\n", - obj_priv->gtt_offset); + obj->gtt_offset); break; } @@ -2579,8 +2543,8 @@ i915_gem_object_get_fence_reg(struct drm_gem_object *obj, if (ret < 0) return ret; - obj_priv->fence_reg = ret; - reg = &dev_priv->fence_regs[obj_priv->fence_reg]; + obj->fence_reg = ret; + reg = &dev_priv->fence_regs[obj->fence_reg]; list_add_tail(®->lru_list, &dev_priv->mm.fence_list); reg->obj = obj; @@ -2602,8 +2566,8 @@ i915_gem_object_get_fence_reg(struct drm_gem_object *obj, } trace_i915_gem_object_get_fence(obj, - obj_priv->fence_reg, - obj_priv->tiling_mode); + obj->fence_reg, + obj->tiling_mode); return 0; } @@ -2613,40 +2577,38 @@ i915_gem_object_get_fence_reg(struct drm_gem_object *obj, * @obj: object to clear * * Zeroes out the fence register itself and clears out the associated - * data structures in dev_priv and obj_priv. + * data structures in dev_priv and obj. */ static void -i915_gem_clear_fence_reg(struct drm_gem_object *obj) +i915_gem_clear_fence_reg(struct drm_i915_gem_object *obj) { - struct drm_device *dev = obj->dev; + struct drm_device *dev = obj->base.dev; drm_i915_private_t *dev_priv = dev->dev_private; - struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); - struct drm_i915_fence_reg *reg = - &dev_priv->fence_regs[obj_priv->fence_reg]; + struct drm_i915_fence_reg *reg = &dev_priv->fence_regs[obj->fence_reg]; uint32_t fence_reg; switch (INTEL_INFO(dev)->gen) { case 6: I915_WRITE64(FENCE_REG_SANDYBRIDGE_0 + - (obj_priv->fence_reg * 8), 0); + (obj->fence_reg * 8), 0); break; case 5: case 4: - I915_WRITE64(FENCE_REG_965_0 + (obj_priv->fence_reg * 8), 0); + I915_WRITE64(FENCE_REG_965_0 + (obj->fence_reg * 8), 0); break; case 3: - if (obj_priv->fence_reg >= 8) - fence_reg = FENCE_REG_945_8 + (obj_priv->fence_reg - 8) * 4; + if (obj->fence_reg >= 8) + fence_reg = FENCE_REG_945_8 + (obj->fence_reg - 8) * 4; else case 2: - fence_reg = FENCE_REG_830_0 + obj_priv->fence_reg * 4; + fence_reg = FENCE_REG_830_0 + obj->fence_reg * 4; I915_WRITE(fence_reg, 0); break; } reg->obj = NULL; - obj_priv->fence_reg = I915_FENCE_REG_NONE; + obj->fence_reg = I915_FENCE_REG_NONE; list_del_init(®->lru_list); } @@ -2657,18 +2619,17 @@ i915_gem_clear_fence_reg(struct drm_gem_object *obj) * @bool: whether the wait upon the fence is interruptible * * Zeroes out the fence register itself and clears out the associated - * data structures in dev_priv and obj_priv. + * data structures in dev_priv and obj. */ int -i915_gem_object_put_fence_reg(struct drm_gem_object *obj, +i915_gem_object_put_fence_reg(struct drm_i915_gem_object *obj, bool interruptible) { - struct drm_device *dev = obj->dev; + struct drm_device *dev = obj->base.dev; struct drm_i915_private *dev_priv = dev->dev_private; - struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); struct drm_i915_fence_reg *reg; - if (obj_priv->fence_reg == I915_FENCE_REG_NONE) + if (obj->fence_reg == I915_FENCE_REG_NONE) return 0; /* If we've changed tiling, GTT-mappings of the object @@ -2681,7 +2642,7 @@ i915_gem_object_put_fence_reg(struct drm_gem_object *obj, * therefore we must wait for any outstanding access to complete * before clearing the fence. */ - reg = &dev_priv->fence_regs[obj_priv->fence_reg]; + reg = &dev_priv->fence_regs[obj->fence_reg]; if (reg->gpu) { int ret; @@ -2706,27 +2667,26 @@ i915_gem_object_put_fence_reg(struct drm_gem_object *obj, * Finds free space in the GTT aperture and binds the object there. */ static int -i915_gem_object_bind_to_gtt(struct drm_gem_object *obj, +i915_gem_object_bind_to_gtt(struct drm_i915_gem_object *obj, unsigned alignment, bool map_and_fenceable) { - struct drm_device *dev = obj->dev; + struct drm_device *dev = obj->base.dev; drm_i915_private_t *dev_priv = dev->dev_private; - struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); struct drm_mm_node *free_space; gfp_t gfpmask = __GFP_NORETRY | __GFP_NOWARN; u32 size, fence_size, fence_alignment, unfenced_alignment; bool mappable, fenceable; int ret; - if (obj_priv->madv != I915_MADV_WILLNEED) { + if (obj->madv != I915_MADV_WILLNEED) { DRM_ERROR("Attempting to bind a purgeable object\n"); return -EINVAL; } - fence_size = i915_gem_get_gtt_size(obj_priv); - fence_alignment = i915_gem_get_gtt_alignment(obj_priv); - unfenced_alignment = i915_gem_get_unfenced_gtt_alignment(obj_priv); + fence_size = i915_gem_get_gtt_size(obj); + fence_alignment = i915_gem_get_gtt_alignment(obj); + unfenced_alignment = i915_gem_get_unfenced_gtt_alignment(obj); if (alignment == 0) alignment = map_and_fenceable ? fence_alignment : @@ -2736,12 +2696,12 @@ i915_gem_object_bind_to_gtt(struct drm_gem_object *obj, return -EINVAL; } - size = map_and_fenceable ? fence_size : obj->size; + size = map_and_fenceable ? fence_size : obj->base.size; /* If the object is bigger than the entire aperture, reject it early * before evicting everything in a vain attempt to find space. */ - if (obj->size > + if (obj->base.size > (map_and_fenceable ? dev_priv->mm.gtt_mappable_end : dev_priv->mm.gtt_total)) { DRM_ERROR("Attempting to bind an object larger than the aperture\n"); return -E2BIG; @@ -2760,16 +2720,16 @@ i915_gem_object_bind_to_gtt(struct drm_gem_object *obj, if (free_space != NULL) { if (map_and_fenceable) - obj_priv->gtt_space = + obj->gtt_space = drm_mm_get_block_range_generic(free_space, size, alignment, 0, dev_priv->mm.gtt_mappable_end, 0); else - obj_priv->gtt_space = + obj->gtt_space = drm_mm_get_block(free_space, size, alignment); } - if (obj_priv->gtt_space == NULL) { + if (obj->gtt_space == NULL) { /* If the gtt is empty and we're still having trouble * fitting our object in, we're out of memory. */ @@ -2783,8 +2743,8 @@ i915_gem_object_bind_to_gtt(struct drm_gem_object *obj, ret = i915_gem_object_get_pages_gtt(obj, gfpmask); if (ret) { - drm_mm_put_block(obj_priv->gtt_space); - obj_priv->gtt_space = NULL; + drm_mm_put_block(obj->gtt_space); + obj->gtt_space = NULL; if (ret == -ENOMEM) { /* first try to clear up some space from the GTT */ @@ -2810,8 +2770,8 @@ i915_gem_object_bind_to_gtt(struct drm_gem_object *obj, ret = i915_gem_gtt_bind_object(obj); if (ret) { i915_gem_object_put_pages_gtt(obj); - drm_mm_put_block(obj_priv->gtt_space); - obj_priv->gtt_space = NULL; + drm_mm_put_block(obj->gtt_space); + obj->gtt_space = NULL; ret = i915_gem_evict_something(dev, size, alignment, map_and_fenceable); @@ -2821,65 +2781,61 @@ i915_gem_object_bind_to_gtt(struct drm_gem_object *obj, goto search_free; } - obj_priv->gtt_offset = obj_priv->gtt_space->start; + obj->gtt_offset = obj->gtt_space->start; /* keep track of bounds object by adding it to the inactive list */ - list_add_tail(&obj_priv->mm_list, &dev_priv->mm.inactive_list); - i915_gem_info_add_gtt(dev_priv, obj_priv); + list_add_tail(&obj->mm_list, &dev_priv->mm.inactive_list); + i915_gem_info_add_gtt(dev_priv, obj); /* Assert that the object is not currently in any GPU domain. As it * wasn't in the GTT, there shouldn't be any way it could have been in * a GPU cache */ - BUG_ON(obj->read_domains & I915_GEM_GPU_DOMAINS); - BUG_ON(obj->write_domain & I915_GEM_GPU_DOMAINS); + BUG_ON(obj->base.read_domains & I915_GEM_GPU_DOMAINS); + BUG_ON(obj->base.write_domain & I915_GEM_GPU_DOMAINS); - trace_i915_gem_object_bind(obj, obj_priv->gtt_offset, map_and_fenceable); + trace_i915_gem_object_bind(obj, obj->gtt_offset, map_and_fenceable); fenceable = - obj_priv->gtt_space->size == fence_size && - (obj_priv->gtt_space->start & (fence_alignment -1)) == 0; + obj->gtt_space->size == fence_size && + (obj->gtt_space->start & (fence_alignment -1)) == 0; mappable = - obj_priv->gtt_offset + obj->size <= dev_priv->mm.gtt_mappable_end; + obj->gtt_offset + obj->base.size <= dev_priv->mm.gtt_mappable_end; - obj_priv->map_and_fenceable = mappable && fenceable; + obj->map_and_fenceable = mappable && fenceable; return 0; } void -i915_gem_clflush_object(struct drm_gem_object *obj) +i915_gem_clflush_object(struct drm_i915_gem_object *obj) { - struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); - /* If we don't have a page list set up, then we're not pinned * to GPU, and we can ignore the cache flush because it'll happen * again at bind time. */ - if (obj_priv->pages == NULL) + if (obj->pages == NULL) return; trace_i915_gem_object_clflush(obj); - drm_clflush_pages(obj_priv->pages, obj->size / PAGE_SIZE); + drm_clflush_pages(obj->pages, obj->base.size / PAGE_SIZE); } /** Flushes any GPU write domain for the object if it's dirty. */ static int -i915_gem_object_flush_gpu_write_domain(struct drm_gem_object *obj, +i915_gem_object_flush_gpu_write_domain(struct drm_i915_gem_object *obj, bool pipelined) { - struct drm_device *dev = obj->dev; + struct drm_device *dev = obj->base.dev; - if ((obj->write_domain & I915_GEM_GPU_DOMAINS) == 0) + if ((obj->base.write_domain & I915_GEM_GPU_DOMAINS) == 0) return 0; /* Queue the GPU write cache flushing we need. */ - i915_gem_flush_ring(dev, NULL, - to_intel_bo(obj)->ring, - 0, obj->write_domain); - BUG_ON(obj->write_domain); + i915_gem_flush_ring(dev, obj->ring, 0, obj->base.write_domain); + BUG_ON(obj->base.write_domain); if (pipelined) return 0; @@ -2889,11 +2845,11 @@ i915_gem_object_flush_gpu_write_domain(struct drm_gem_object *obj, /** Flushes the GTT write domain for the object if it's dirty. */ static void -i915_gem_object_flush_gtt_write_domain(struct drm_gem_object *obj) +i915_gem_object_flush_gtt_write_domain(struct drm_i915_gem_object *obj) { uint32_t old_write_domain; - if (obj->write_domain != I915_GEM_DOMAIN_GTT) + if (obj->base.write_domain != I915_GEM_DOMAIN_GTT) return; /* No actual flushing is required for the GTT write domain. Writes @@ -2902,30 +2858,30 @@ i915_gem_object_flush_gtt_write_domain(struct drm_gem_object *obj) */ i915_gem_release_mmap(obj); - old_write_domain = obj->write_domain; - obj->write_domain = 0; + old_write_domain = obj->base.write_domain; + obj->base.write_domain = 0; trace_i915_gem_object_change_domain(obj, - obj->read_domains, + obj->base.read_domains, old_write_domain); } /** Flushes the CPU write domain for the object if it's dirty. */ static void -i915_gem_object_flush_cpu_write_domain(struct drm_gem_object *obj) +i915_gem_object_flush_cpu_write_domain(struct drm_i915_gem_object *obj) { uint32_t old_write_domain; - if (obj->write_domain != I915_GEM_DOMAIN_CPU) + if (obj->base.write_domain != I915_GEM_DOMAIN_CPU) return; i915_gem_clflush_object(obj); intel_gtt_chipset_flush(); - old_write_domain = obj->write_domain; - obj->write_domain = 0; + old_write_domain = obj->base.write_domain; + obj->base.write_domain = 0; trace_i915_gem_object_change_domain(obj, - obj->read_domains, + obj->base.read_domains, old_write_domain); } @@ -2936,14 +2892,13 @@ i915_gem_object_flush_cpu_write_domain(struct drm_gem_object *obj) * flushes to occur. */ int -i915_gem_object_set_to_gtt_domain(struct drm_gem_object *obj, int write) +i915_gem_object_set_to_gtt_domain(struct drm_i915_gem_object *obj, int write) { - struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); uint32_t old_write_domain, old_read_domains; int ret; /* Not valid to be called on unbound objects. */ - if (obj_priv->gtt_space == NULL) + if (obj->gtt_space == NULL) return -EINVAL; ret = i915_gem_object_flush_gpu_write_domain(obj, false); @@ -2958,18 +2913,18 @@ i915_gem_object_set_to_gtt_domain(struct drm_gem_object *obj, int write) return ret; } - old_write_domain = obj->write_domain; - old_read_domains = obj->read_domains; + old_write_domain = obj->base.write_domain; + old_read_domains = obj->base.read_domains; /* It should now be out of any other write domains, and we can update * the domain values for our changes. */ - BUG_ON((obj->write_domain & ~I915_GEM_DOMAIN_GTT) != 0); - obj->read_domains |= I915_GEM_DOMAIN_GTT; + BUG_ON((obj->base.write_domain & ~I915_GEM_DOMAIN_GTT) != 0); + obj->base.read_domains |= I915_GEM_DOMAIN_GTT; if (write) { - obj->read_domains = I915_GEM_DOMAIN_GTT; - obj->write_domain = I915_GEM_DOMAIN_GTT; - obj_priv->dirty = 1; + obj->base.read_domains = I915_GEM_DOMAIN_GTT; + obj->base.write_domain = I915_GEM_DOMAIN_GTT; + obj->dirty = 1; } trace_i915_gem_object_change_domain(obj, @@ -2984,15 +2939,14 @@ i915_gem_object_set_to_gtt_domain(struct drm_gem_object *obj, int write) * wait, as in modesetting process we're not supposed to be interrupted. */ int -i915_gem_object_set_to_display_plane(struct drm_gem_object *obj, +i915_gem_object_set_to_display_plane(struct drm_i915_gem_object *obj, bool pipelined) { - struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); uint32_t old_read_domains; int ret; /* Not valid to be called on unbound objects. */ - if (obj_priv->gtt_space == NULL) + if (obj->gtt_space == NULL) return -EINVAL; ret = i915_gem_object_flush_gpu_write_domain(obj, true); @@ -3008,12 +2962,12 @@ i915_gem_object_set_to_display_plane(struct drm_gem_object *obj, i915_gem_object_flush_cpu_write_domain(obj); - old_read_domains = obj->read_domains; - obj->read_domains |= I915_GEM_DOMAIN_GTT; + old_read_domains = obj->base.read_domains; + obj->base.read_domains |= I915_GEM_DOMAIN_GTT; trace_i915_gem_object_change_domain(obj, old_read_domains, - obj->write_domain); + obj->base.write_domain); return 0; } @@ -3026,10 +2980,10 @@ i915_gem_object_flush_gpu(struct drm_i915_gem_object *obj, return 0; if (obj->base.write_domain & I915_GEM_GPU_DOMAINS) - i915_gem_flush_ring(obj->base.dev, NULL, obj->ring, + i915_gem_flush_ring(obj->base.dev, obj->ring, 0, obj->base.write_domain); - return i915_gem_object_wait_rendering(&obj->base, interruptible); + return i915_gem_object_wait_rendering(obj, interruptible); } /** @@ -3039,7 +2993,7 @@ i915_gem_object_flush_gpu(struct drm_i915_gem_object *obj, * flushes to occur. */ static int -i915_gem_object_set_to_cpu_domain(struct drm_gem_object *obj, int write) +i915_gem_object_set_to_cpu_domain(struct drm_i915_gem_object *obj, int write) { uint32_t old_write_domain, old_read_domains; int ret; @@ -3061,27 +3015,27 @@ i915_gem_object_set_to_cpu_domain(struct drm_gem_object *obj, int write) return ret; } - old_write_domain = obj->write_domain; - old_read_domains = obj->read_domains; + old_write_domain = obj->base.write_domain; + old_read_domains = obj->base.read_domains; /* Flush the CPU cache if it's still invalid. */ - if ((obj->read_domains & I915_GEM_DOMAIN_CPU) == 0) { + if ((obj->base.read_domains & I915_GEM_DOMAIN_CPU) == 0) { i915_gem_clflush_object(obj); - obj->read_domains |= I915_GEM_DOMAIN_CPU; + obj->base.read_domains |= I915_GEM_DOMAIN_CPU; } /* It should now be out of any other write domains, and we can update * the domain values for our changes. */ - BUG_ON((obj->write_domain & ~I915_GEM_DOMAIN_CPU) != 0); + BUG_ON((obj->base.write_domain & ~I915_GEM_DOMAIN_CPU) != 0); /* If we're writing through the CPU, then the GPU read domains will * need to be invalidated at next use. */ if (write) { - obj->read_domains = I915_GEM_DOMAIN_CPU; - obj->write_domain = I915_GEM_DOMAIN_CPU; + obj->base.read_domains = I915_GEM_DOMAIN_CPU; + obj->base.write_domain = I915_GEM_DOMAIN_CPU; } trace_i915_gem_object_change_domain(obj, @@ -3203,20 +3157,18 @@ i915_gem_object_set_to_cpu_domain(struct drm_gem_object *obj, int write) * drm_agp_chipset_flush */ static void -i915_gem_object_set_to_gpu_domain(struct drm_gem_object *obj, +i915_gem_object_set_to_gpu_domain(struct drm_i915_gem_object *obj, struct intel_ring_buffer *ring, struct change_domains *cd) { - struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); - uint32_t invalidate_domains = 0; - uint32_t flush_domains = 0; + uint32_t invalidate_domains = 0, flush_domains = 0; /* * If the object isn't moving to a new write domain, * let the object stay in multiple read domains */ - if (obj->pending_write_domain == 0) - obj->pending_read_domains |= obj->read_domains; + if (obj->base.pending_write_domain == 0) + obj->base.pending_read_domains |= obj->base.read_domains; /* * Flush the current write domain if @@ -3224,18 +3176,18 @@ i915_gem_object_set_to_gpu_domain(struct drm_gem_object *obj, * any read domains which differ from the old * write domain */ - if (obj->write_domain && - (obj->write_domain != obj->pending_read_domains || - obj_priv->ring != ring)) { - flush_domains |= obj->write_domain; + if (obj->base.write_domain && + (obj->base.write_domain != obj->base.pending_read_domains || + obj->ring != ring)) { + flush_domains |= obj->base.write_domain; invalidate_domains |= - obj->pending_read_domains & ~obj->write_domain; + obj->base.pending_read_domains & ~obj->base.write_domain; } /* * Invalidate any read caches which may have * stale data. That is, any new read domains. */ - invalidate_domains |= obj->pending_read_domains & ~obj->read_domains; + invalidate_domains |= obj->base.pending_read_domains & ~obj->base.read_domains; if ((flush_domains | invalidate_domains) & I915_GEM_DOMAIN_CPU) i915_gem_clflush_object(obj); @@ -3249,13 +3201,13 @@ i915_gem_object_set_to_gpu_domain(struct drm_gem_object *obj, * write_domains). So if we have a current write domain that we * aren't changing, set pending_write_domain to that. */ - if (flush_domains == 0 && obj->pending_write_domain == 0) - obj->pending_write_domain = obj->write_domain; + if (flush_domains == 0 && obj->base.pending_write_domain == 0) + obj->base.pending_write_domain = obj->base.write_domain; cd->invalidate_domains |= invalidate_domains; cd->flush_domains |= flush_domains; if (flush_domains & I915_GEM_GPU_DOMAINS) - cd->flush_rings |= obj_priv->ring->id; + cd->flush_rings |= obj->ring->id; if (invalidate_domains & I915_GEM_GPU_DOMAINS) cd->flush_rings |= ring->id; } @@ -3267,30 +3219,28 @@ i915_gem_object_set_to_gpu_domain(struct drm_gem_object *obj, * and doesn't handle transitioning from !(read_domains & I915_GEM_DOMAIN_CPU). */ static void -i915_gem_object_set_to_full_cpu_read_domain(struct drm_gem_object *obj) +i915_gem_object_set_to_full_cpu_read_domain(struct drm_i915_gem_object *obj) { - struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); - - if (!obj_priv->page_cpu_valid) + if (!obj->page_cpu_valid) return; /* If we're partially in the CPU read domain, finish moving it in. */ - if (obj->read_domains & I915_GEM_DOMAIN_CPU) { + if (obj->base.read_domains & I915_GEM_DOMAIN_CPU) { int i; - for (i = 0; i <= (obj->size - 1) / PAGE_SIZE; i++) { - if (obj_priv->page_cpu_valid[i]) + for (i = 0; i <= (obj->base.size - 1) / PAGE_SIZE; i++) { + if (obj->page_cpu_valid[i]) continue; - drm_clflush_pages(obj_priv->pages + i, 1); + drm_clflush_pages(obj->pages + i, 1); } } /* Free the page_cpu_valid mappings which are now stale, whether * or not we've got I915_GEM_DOMAIN_CPU. */ - kfree(obj_priv->page_cpu_valid); - obj_priv->page_cpu_valid = NULL; + kfree(obj->page_cpu_valid); + obj->page_cpu_valid = NULL; } /** @@ -3306,14 +3256,13 @@ i915_gem_object_set_to_full_cpu_read_domain(struct drm_gem_object *obj) * flushes to occur. */ static int -i915_gem_object_set_cpu_read_domain_range(struct drm_gem_object *obj, +i915_gem_object_set_cpu_read_domain_range(struct drm_i915_gem_object *obj, uint64_t offset, uint64_t size) { - struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); uint32_t old_read_domains; int i, ret; - if (offset == 0 && size == obj->size) + if (offset == 0 && size == obj->base.size) return i915_gem_object_set_to_cpu_domain(obj, 0); ret = i915_gem_object_flush_gpu_write_domain(obj, false); @@ -3322,45 +3271,45 @@ i915_gem_object_set_cpu_read_domain_range(struct drm_gem_object *obj, i915_gem_object_flush_gtt_write_domain(obj); /* If we're already fully in the CPU read domain, we're done. */ - if (obj_priv->page_cpu_valid == NULL && - (obj->read_domains & I915_GEM_DOMAIN_CPU) != 0) + if (obj->page_cpu_valid == NULL && + (obj->base.read_domains & I915_GEM_DOMAIN_CPU) != 0) return 0; /* Otherwise, create/clear the per-page CPU read domain flag if we're * newly adding I915_GEM_DOMAIN_CPU */ - if (obj_priv->page_cpu_valid == NULL) { - obj_priv->page_cpu_valid = kzalloc(obj->size / PAGE_SIZE, - GFP_KERNEL); - if (obj_priv->page_cpu_valid == NULL) + if (obj->page_cpu_valid == NULL) { + obj->page_cpu_valid = kzalloc(obj->base.size / PAGE_SIZE, + GFP_KERNEL); + if (obj->page_cpu_valid == NULL) return -ENOMEM; - } else if ((obj->read_domains & I915_GEM_DOMAIN_CPU) == 0) - memset(obj_priv->page_cpu_valid, 0, obj->size / PAGE_SIZE); + } else if ((obj->base.read_domains & I915_GEM_DOMAIN_CPU) == 0) + memset(obj->page_cpu_valid, 0, obj->base.size / PAGE_SIZE); /* Flush the cache on any pages that are still invalid from the CPU's * perspective. */ for (i = offset / PAGE_SIZE; i <= (offset + size - 1) / PAGE_SIZE; i++) { - if (obj_priv->page_cpu_valid[i]) + if (obj->page_cpu_valid[i]) continue; - drm_clflush_pages(obj_priv->pages + i, 1); + drm_clflush_pages(obj->pages + i, 1); - obj_priv->page_cpu_valid[i] = 1; + obj->page_cpu_valid[i] = 1; } /* It should now be out of any other write domains, and we can update * the domain values for our changes. */ - BUG_ON((obj->write_domain & ~I915_GEM_DOMAIN_CPU) != 0); + BUG_ON((obj->base.write_domain & ~I915_GEM_DOMAIN_CPU) != 0); - old_read_domains = obj->read_domains; - obj->read_domains |= I915_GEM_DOMAIN_CPU; + old_read_domains = obj->base.read_domains; + obj->base.read_domains |= I915_GEM_DOMAIN_CPU; trace_i915_gem_object_change_domain(obj, old_read_domains, - obj->write_domain); + obj->base.write_domain); return 0; } @@ -3490,7 +3439,7 @@ i915_gem_execbuffer_relocate_entry(struct drm_i915_gem_object *obj, uint32_t __iomem *reloc_entry; void __iomem *reloc_page; - ret = i915_gem_object_set_to_gtt_domain(&obj->base, 1); + ret = i915_gem_object_set_to_gtt_domain(obj, 1); if (ret) goto err; @@ -3564,14 +3513,14 @@ i915_gem_execbuffer_relocate_object_slow(struct drm_i915_gem_object *obj, static int i915_gem_execbuffer_relocate(struct drm_device *dev, struct drm_file *file, - struct drm_gem_object **object_list, + struct drm_i915_gem_object **object_list, struct drm_i915_gem_exec_object2 *exec_list, int count) { int i, ret; for (i = 0; i < count; i++) { - struct drm_i915_gem_object *obj = to_intel_bo(object_list[i]); + struct drm_i915_gem_object *obj = object_list[i]; obj->base.pending_read_domains = 0; obj->base.pending_write_domain = 0; ret = i915_gem_execbuffer_relocate_object(obj, file, @@ -3586,7 +3535,7 @@ i915_gem_execbuffer_relocate(struct drm_device *dev, static int i915_gem_execbuffer_reserve(struct drm_device *dev, struct drm_file *file, - struct drm_gem_object **object_list, + struct drm_i915_gem_object **object_list, struct drm_i915_gem_exec_object2 *exec_list, int count) { @@ -3599,7 +3548,7 @@ i915_gem_execbuffer_reserve(struct drm_device *dev, ret = 0; for (i = 0; i < count; i++) { struct drm_i915_gem_exec_object2 *entry = &exec_list[i]; - struct drm_i915_gem_object *obj = to_intel_bo(object_list[i]); + struct drm_i915_gem_object *obj = object_list[i]; bool need_fence = entry->flags & EXEC_OBJECT_NEEDS_FENCE && obj->tiling_mode != I915_TILING_NONE; @@ -3610,12 +3559,12 @@ i915_gem_execbuffer_reserve(struct drm_device *dev, /* Check fence reg constraints and rebind if necessary */ if (need_mappable && !obj->map_and_fenceable) { - ret = i915_gem_object_unbind(&obj->base); + ret = i915_gem_object_unbind(obj); if (ret) break; } - ret = i915_gem_object_pin(&obj->base, + ret = i915_gem_object_pin(obj, entry->alignment, need_mappable); if (ret) @@ -3626,9 +3575,9 @@ i915_gem_execbuffer_reserve(struct drm_device *dev, * to properly handle blits to/from tiled surfaces. */ if (need_fence) { - ret = i915_gem_object_get_fence_reg(&obj->base, true); + ret = i915_gem_object_get_fence_reg(obj, true); if (ret) { - i915_gem_object_unpin(&obj->base); + i915_gem_object_unpin(obj); break; } @@ -3658,17 +3607,15 @@ i915_gem_execbuffer_reserve(struct drm_device *dev, static int i915_gem_execbuffer_relocate_slow(struct drm_device *dev, struct drm_file *file, - struct drm_gem_object **object_list, + struct drm_i915_gem_object **object_list, struct drm_i915_gem_exec_object2 *exec_list, int count) { struct drm_i915_gem_relocation_entry *reloc; int i, total, ret; - for (i = 0; i < count; i++) { - struct drm_i915_gem_object *obj = to_intel_bo(object_list[i]); - obj->in_execbuffer = false; - } + for (i = 0; i < count; i++) + object_list[i]->in_execbuffer = false; mutex_unlock(&dev->struct_mutex); @@ -3713,7 +3660,7 @@ i915_gem_execbuffer_relocate_slow(struct drm_device *dev, total = 0; for (i = 0; i < count; i++) { - struct drm_i915_gem_object *obj = to_intel_bo(object_list[i]); + struct drm_i915_gem_object *obj = object_list[i]; obj->base.pending_read_domains = 0; obj->base.pending_write_domain = 0; ret = i915_gem_execbuffer_relocate_object_slow(obj, file, @@ -3740,7 +3687,7 @@ static int i915_gem_execbuffer_move_to_gpu(struct drm_device *dev, struct drm_file *file, struct intel_ring_buffer *ring, - struct drm_gem_object **objects, + struct drm_i915_gem_object **objects, int count) { struct change_domains cd; @@ -3759,17 +3706,17 @@ i915_gem_execbuffer_move_to_gpu(struct drm_device *dev, cd.invalidate_domains, cd.flush_domains); #endif - i915_gem_flush(dev, file, + i915_gem_flush(dev, cd.invalidate_domains, cd.flush_domains, cd.flush_rings); } for (i = 0; i < count; i++) { - struct drm_i915_gem_object *obj = to_intel_bo(objects[i]); + struct drm_i915_gem_object *obj = objects[i]; /* XXX replace with semaphores */ if (obj->ring && ring != obj->ring) { - ret = i915_gem_object_wait_rendering(&obj->base, true); + ret = i915_gem_object_wait_rendering(obj, true); if (ret) return ret; } @@ -3891,8 +3838,8 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data, struct drm_i915_gem_exec_object2 *exec_list) { drm_i915_private_t *dev_priv = dev->dev_private; - struct drm_gem_object **object_list = NULL; - struct drm_gem_object *batch_obj; + struct drm_i915_gem_object **object_list = NULL; + struct drm_i915_gem_object *batch_obj; struct drm_clip_rect *cliprects = NULL; struct drm_i915_gem_request *request = NULL; int ret, i, flips; @@ -3987,29 +3934,29 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data, /* Look up object handles */ for (i = 0; i < args->buffer_count; i++) { - struct drm_i915_gem_object *obj_priv; + struct drm_i915_gem_object *obj; - object_list[i] = drm_gem_object_lookup(dev, file, - exec_list[i].handle); - if (object_list[i] == NULL) { + obj = to_intel_bo (drm_gem_object_lookup(dev, file, + exec_list[i].handle)); + if (obj == NULL) { DRM_ERROR("Invalid object handle %d at index %d\n", exec_list[i].handle, i); /* prevent error path from reading uninitialized data */ - args->buffer_count = i + 1; + args->buffer_count = i; ret = -ENOENT; goto err; } + object_list[i] = obj; - obj_priv = to_intel_bo(object_list[i]); - if (obj_priv->in_execbuffer) { + if (obj->in_execbuffer) { DRM_ERROR("Object %p appears more than once in object list\n", - object_list[i]); + obj); /* prevent error path from reading uninitialized data */ args->buffer_count = i + 1; ret = -EINVAL; goto err; } - obj_priv->in_execbuffer = true; + obj->in_execbuffer = true; } /* Move the objects en-masse into the GTT, evicting if necessary. */ @@ -4037,15 +3984,15 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data, /* Set the pending read domains for the batch buffer to COMMAND */ batch_obj = object_list[args->buffer_count-1]; - if (batch_obj->pending_write_domain) { + if (batch_obj->base.pending_write_domain) { DRM_ERROR("Attempting to use self-modifying batch buffer\n"); ret = -EINVAL; goto err; } - batch_obj->pending_read_domains |= I915_GEM_DOMAIN_COMMAND; + batch_obj->base.pending_read_domains |= I915_GEM_DOMAIN_COMMAND; /* Sanity check the batch buffer */ - exec_offset = to_intel_bo(batch_obj)->gtt_offset; + exec_offset = batch_obj->gtt_offset; ret = i915_gem_check_execbuffer(args, exec_offset); if (ret != 0) { DRM_ERROR("execbuf with invalid offset/length\n"); @@ -4077,8 +4024,8 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data, */ flips = 0; for (i = 0; i < args->buffer_count; i++) { - if (object_list[i]->write_domain) - flips |= atomic_read(&to_intel_bo(object_list[i])->pending_flip); + if (object_list[i]->base.write_domain) + flips |= atomic_read(&object_list[i]->pending_flip); } if (flips) { int plane, flip_mask; @@ -4110,23 +4057,22 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data, } for (i = 0; i < args->buffer_count; i++) { - struct drm_gem_object *obj = object_list[i]; + struct drm_i915_gem_object *obj = object_list[i]; - obj->read_domains = obj->pending_read_domains; - obj->write_domain = obj->pending_write_domain; + obj->base.read_domains = obj->base.pending_read_domains; + obj->base.write_domain = obj->base.pending_write_domain; i915_gem_object_move_to_active(obj, ring); - if (obj->write_domain) { - struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); - obj_priv->dirty = 1; - list_move_tail(&obj_priv->gpu_write_list, + if (obj->base.write_domain) { + obj->dirty = 1; + list_move_tail(&obj->gpu_write_list, &ring->gpu_write_list); intel_mark_busy(dev, obj); } trace_i915_gem_object_change_domain(obj, - obj->read_domains, - obj->write_domain); + obj->base.read_domains, + obj->base.write_domain); } /* @@ -4142,11 +4088,8 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data, err: for (i = 0; i < args->buffer_count; i++) { - if (object_list[i] == NULL) - break; - - to_intel_bo(object_list[i])->in_execbuffer = false; - drm_gem_object_unreference(object_list[i]); + object_list[i]->in_execbuffer = false; + drm_gem_object_unreference(&object_list[i]->base); } mutex_unlock(&dev->struct_mutex); @@ -4165,7 +4108,7 @@ pre_mutex_err: */ int i915_gem_execbuffer(struct drm_device *dev, void *data, - struct drm_file *file_priv) + struct drm_file *file) { struct drm_i915_gem_execbuffer *args = data; struct drm_i915_gem_execbuffer2 exec2; @@ -4227,7 +4170,7 @@ i915_gem_execbuffer(struct drm_device *dev, void *data, exec2.cliprects_ptr = args->cliprects_ptr; exec2.flags = I915_EXEC_RENDER; - ret = i915_gem_do_execbuffer(dev, data, file_priv, &exec2, exec2_list); + ret = i915_gem_do_execbuffer(dev, data, file, &exec2, exec2_list); if (!ret) { /* Copy the new buffer offsets back to the user's exec list. */ for (i = 0; i < args->buffer_count; i++) @@ -4252,7 +4195,7 @@ i915_gem_execbuffer(struct drm_device *dev, void *data, int i915_gem_execbuffer2(struct drm_device *dev, void *data, - struct drm_file *file_priv) + struct drm_file *file) { struct drm_i915_gem_execbuffer2 *args = data; struct drm_i915_gem_exec_object2 *exec2_list = NULL; @@ -4285,7 +4228,7 @@ i915_gem_execbuffer2(struct drm_device *dev, void *data, return -EFAULT; } - ret = i915_gem_do_execbuffer(dev, data, file_priv, args, exec2_list); + ret = i915_gem_do_execbuffer(dev, data, file, args, exec2_list); if (!ret) { /* Copy the new buffer offsets back to the user's exec list. */ ret = copy_to_user((struct drm_i915_relocation_entry __user *) @@ -4305,109 +4248,106 @@ i915_gem_execbuffer2(struct drm_device *dev, void *data, } int -i915_gem_object_pin(struct drm_gem_object *obj, uint32_t alignment, +i915_gem_object_pin(struct drm_i915_gem_object *obj, + uint32_t alignment, bool map_and_fenceable) { - struct drm_device *dev = obj->dev; + struct drm_device *dev = obj->base.dev; struct drm_i915_private *dev_priv = dev->dev_private; - struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); int ret; - BUG_ON(obj_priv->pin_count == DRM_I915_GEM_OBJECT_MAX_PIN_COUNT); + BUG_ON(obj->pin_count == DRM_I915_GEM_OBJECT_MAX_PIN_COUNT); BUG_ON(map_and_fenceable && !map_and_fenceable); WARN_ON(i915_verify_lists(dev)); - if (obj_priv->gtt_space != NULL) { - if ((alignment && obj_priv->gtt_offset & (alignment - 1)) || - (map_and_fenceable && !obj_priv->map_and_fenceable)) { - WARN(obj_priv->pin_count, + if (obj->gtt_space != NULL) { + if ((alignment && obj->gtt_offset & (alignment - 1)) || + (map_and_fenceable && !obj->map_and_fenceable)) { + WARN(obj->pin_count, "bo is already pinned with incorrect alignment:" " offset=%x, req.alignment=%x, req.map_and_fenceable=%d," " obj->map_and_fenceable=%d\n", - obj_priv->gtt_offset, alignment, + obj->gtt_offset, alignment, map_and_fenceable, - obj_priv->map_and_fenceable); + obj->map_and_fenceable); ret = i915_gem_object_unbind(obj); if (ret) return ret; } } - if (obj_priv->gtt_space == NULL) { + if (obj->gtt_space == NULL) { ret = i915_gem_object_bind_to_gtt(obj, alignment, map_and_fenceable); if (ret) return ret; } - if (obj_priv->pin_count++ == 0) { - i915_gem_info_add_pin(dev_priv, obj_priv, map_and_fenceable); - if (!obj_priv->active) - list_move_tail(&obj_priv->mm_list, + if (obj->pin_count++ == 0) { + i915_gem_info_add_pin(dev_priv, obj, map_and_fenceable); + if (!obj->active) + list_move_tail(&obj->mm_list, &dev_priv->mm.pinned_list); } - BUG_ON(!obj_priv->pin_mappable && map_and_fenceable); + BUG_ON(!obj->pin_mappable && map_and_fenceable); WARN_ON(i915_verify_lists(dev)); return 0; } void -i915_gem_object_unpin(struct drm_gem_object *obj) +i915_gem_object_unpin(struct drm_i915_gem_object *obj) { - struct drm_device *dev = obj->dev; + struct drm_device *dev = obj->base.dev; drm_i915_private_t *dev_priv = dev->dev_private; - struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); WARN_ON(i915_verify_lists(dev)); - BUG_ON(obj_priv->pin_count == 0); - BUG_ON(obj_priv->gtt_space == NULL); + BUG_ON(obj->pin_count == 0); + BUG_ON(obj->gtt_space == NULL); - if (--obj_priv->pin_count == 0) { - if (!obj_priv->active) - list_move_tail(&obj_priv->mm_list, + if (--obj->pin_count == 0) { + if (!obj->active) + list_move_tail(&obj->mm_list, &dev_priv->mm.inactive_list); - i915_gem_info_remove_pin(dev_priv, obj_priv); + i915_gem_info_remove_pin(dev_priv, obj); } WARN_ON(i915_verify_lists(dev)); } int i915_gem_pin_ioctl(struct drm_device *dev, void *data, - struct drm_file *file_priv) + struct drm_file *file) { struct drm_i915_gem_pin *args = data; - struct drm_gem_object *obj; - struct drm_i915_gem_object *obj_priv; + struct drm_i915_gem_object *obj; int ret; ret = i915_mutex_lock_interruptible(dev); if (ret) return ret; - obj = drm_gem_object_lookup(dev, file_priv, args->handle); + obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle)); if (obj == NULL) { ret = -ENOENT; goto unlock; } - obj_priv = to_intel_bo(obj); - if (obj_priv->madv != I915_MADV_WILLNEED) { + if (obj->madv != I915_MADV_WILLNEED) { DRM_ERROR("Attempting to pin a purgeable buffer\n"); ret = -EINVAL; goto out; } - if (obj_priv->pin_filp != NULL && obj_priv->pin_filp != file_priv) { + if (obj->pin_filp != NULL && obj->pin_filp != file) { DRM_ERROR("Already pinned in i915_gem_pin_ioctl(): %d\n", args->handle); ret = -EINVAL; goto out; } - obj_priv->user_pin_count++; - obj_priv->pin_filp = file_priv; - if (obj_priv->user_pin_count == 1) { + obj->user_pin_count++; + obj->pin_filp = file; + if (obj->user_pin_count == 1) { ret = i915_gem_object_pin(obj, args->alignment, true); if (ret) goto out; @@ -4417,9 +4357,9 @@ i915_gem_pin_ioctl(struct drm_device *dev, void *data, * as the X server doesn't manage domains yet */ i915_gem_object_flush_cpu_write_domain(obj); - args->offset = obj_priv->gtt_offset; + args->offset = obj->gtt_offset; out: - drm_gem_object_unreference(obj); + drm_gem_object_unreference(&obj->base); unlock: mutex_unlock(&dev->struct_mutex); return ret; @@ -4427,38 +4367,36 @@ unlock: int i915_gem_unpin_ioctl(struct drm_device *dev, void *data, - struct drm_file *file_priv) + struct drm_file *file) { struct drm_i915_gem_pin *args = data; - struct drm_gem_object *obj; - struct drm_i915_gem_object *obj_priv; + struct drm_i915_gem_object *obj; int ret; ret = i915_mutex_lock_interruptible(dev); if (ret) return ret; - obj = drm_gem_object_lookup(dev, file_priv, args->handle); + obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle)); if (obj == NULL) { ret = -ENOENT; goto unlock; } - obj_priv = to_intel_bo(obj); - if (obj_priv->pin_filp != file_priv) { + if (obj->pin_filp != file) { DRM_ERROR("Not pinned by caller in i915_gem_pin_ioctl(): %d\n", args->handle); ret = -EINVAL; goto out; } - obj_priv->user_pin_count--; - if (obj_priv->user_pin_count == 0) { - obj_priv->pin_filp = NULL; + obj->user_pin_count--; + if (obj->user_pin_count == 0) { + obj->pin_filp = NULL; i915_gem_object_unpin(obj); } out: - drm_gem_object_unreference(obj); + drm_gem_object_unreference(&obj->base); unlock: mutex_unlock(&dev->struct_mutex); return ret; @@ -4466,52 +4404,49 @@ unlock: int i915_gem_busy_ioctl(struct drm_device *dev, void *data, - struct drm_file *file_priv) + struct drm_file *file) { struct drm_i915_gem_busy *args = data; - struct drm_gem_object *obj; - struct drm_i915_gem_object *obj_priv; + struct drm_i915_gem_object *obj; int ret; ret = i915_mutex_lock_interruptible(dev); if (ret) return ret; - obj = drm_gem_object_lookup(dev, file_priv, args->handle); + obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle)); if (obj == NULL) { ret = -ENOENT; goto unlock; } - obj_priv = to_intel_bo(obj); /* Count all active objects as busy, even if they are currently not used * by the gpu. Users of this interface expect objects to eventually * become non-busy without any further actions, therefore emit any * necessary flushes here. */ - args->busy = obj_priv->active; + args->busy = obj->active; if (args->busy) { /* Unconditionally flush objects, even when the gpu still uses this * object. Userspace calling this function indicates that it wants to * use this buffer rather sooner than later, so issuing the required * flush earlier is beneficial. */ - if (obj->write_domain & I915_GEM_GPU_DOMAINS) - i915_gem_flush_ring(dev, file_priv, - obj_priv->ring, - 0, obj->write_domain); + if (obj->base.write_domain & I915_GEM_GPU_DOMAINS) + i915_gem_flush_ring(dev, obj->ring, + 0, obj->base.write_domain); /* Update the active list for the hardware's current position. * Otherwise this only updates on a delayed timer or when irqs * are actually unmasked, and our working set ends up being * larger than required. */ - i915_gem_retire_requests_ring(dev, obj_priv->ring); + i915_gem_retire_requests_ring(dev, obj->ring); - args->busy = obj_priv->active; + args->busy = obj->active; } - drm_gem_object_unreference(obj); + drm_gem_object_unreference(&obj->base); unlock: mutex_unlock(&dev->struct_mutex); return ret; @@ -4529,8 +4464,7 @@ i915_gem_madvise_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv) { struct drm_i915_gem_madvise *args = data; - struct drm_gem_object *obj; - struct drm_i915_gem_object *obj_priv; + struct drm_i915_gem_object *obj; int ret; switch (args->madv) { @@ -4545,37 +4479,36 @@ i915_gem_madvise_ioctl(struct drm_device *dev, void *data, if (ret) return ret; - obj = drm_gem_object_lookup(dev, file_priv, args->handle); + obj = to_intel_bo(drm_gem_object_lookup(dev, file_priv, args->handle)); if (obj == NULL) { ret = -ENOENT; goto unlock; } - obj_priv = to_intel_bo(obj); - if (obj_priv->pin_count) { + if (obj->pin_count) { ret = -EINVAL; goto out; } - if (obj_priv->madv != __I915_MADV_PURGED) - obj_priv->madv = args->madv; + if (obj->madv != __I915_MADV_PURGED) + obj->madv = args->madv; /* if the object is no longer bound, discard its backing storage */ - if (i915_gem_object_is_purgeable(obj_priv) && - obj_priv->gtt_space == NULL) + if (i915_gem_object_is_purgeable(obj) && + obj->gtt_space == NULL) i915_gem_object_truncate(obj); - args->retained = obj_priv->madv != __I915_MADV_PURGED; + args->retained = obj->madv != __I915_MADV_PURGED; out: - drm_gem_object_unreference(obj); + drm_gem_object_unreference(&obj->base); unlock: mutex_unlock(&dev->struct_mutex); return ret; } -struct drm_gem_object * i915_gem_alloc_object(struct drm_device *dev, - size_t size) +struct drm_i915_gem_object *i915_gem_alloc_object(struct drm_device *dev, + size_t size) { struct drm_i915_private *dev_priv = dev->dev_private; struct drm_i915_gem_object *obj; @@ -4605,7 +4538,7 @@ struct drm_gem_object * i915_gem_alloc_object(struct drm_device *dev, /* Avoid an unnecessary call to unbind on the first bind. */ obj->map_and_fenceable = true; - return &obj->base; + return obj; } int i915_gem_init_object(struct drm_gem_object *obj) @@ -4615,42 +4548,41 @@ int i915_gem_init_object(struct drm_gem_object *obj) return 0; } -static void i915_gem_free_object_tail(struct drm_gem_object *obj) +static void i915_gem_free_object_tail(struct drm_i915_gem_object *obj) { - struct drm_device *dev = obj->dev; + struct drm_device *dev = obj->base.dev; drm_i915_private_t *dev_priv = dev->dev_private; - struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); int ret; ret = i915_gem_object_unbind(obj); if (ret == -ERESTARTSYS) { - list_move(&obj_priv->mm_list, + list_move(&obj->mm_list, &dev_priv->mm.deferred_free_list); return; } - if (obj->map_list.map) + if (obj->base.map_list.map) i915_gem_free_mmap_offset(obj); - drm_gem_object_release(obj); - i915_gem_info_remove_obj(dev_priv, obj->size); + drm_gem_object_release(&obj->base); + i915_gem_info_remove_obj(dev_priv, obj->base.size); - kfree(obj_priv->page_cpu_valid); - kfree(obj_priv->bit_17); - kfree(obj_priv); + kfree(obj->page_cpu_valid); + kfree(obj->bit_17); + kfree(obj); } -void i915_gem_free_object(struct drm_gem_object *obj) +void i915_gem_free_object(struct drm_gem_object *gem_obj) { - struct drm_device *dev = obj->dev; - struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); + struct drm_i915_gem_object *obj = to_intel_bo(gem_obj); + struct drm_device *dev = obj->base.dev; trace_i915_gem_object_destroy(obj); - while (obj_priv->pin_count > 0) + while (obj->pin_count > 0) i915_gem_object_unpin(obj); - if (obj_priv->phys_obj) + if (obj->phys_obj) i915_gem_detach_phys_object(dev, obj); i915_gem_free_object_tail(obj); @@ -4710,8 +4642,7 @@ static int i915_gem_init_pipe_control(struct drm_device *dev) { drm_i915_private_t *dev_priv = dev->dev_private; - struct drm_gem_object *obj; - struct drm_i915_gem_object *obj_priv; + struct drm_i915_gem_object *obj; int ret; obj = i915_gem_alloc_object(dev, 4096); @@ -4720,15 +4651,14 @@ i915_gem_init_pipe_control(struct drm_device *dev) ret = -ENOMEM; goto err; } - obj_priv = to_intel_bo(obj); - obj_priv->agp_type = AGP_USER_CACHED_MEMORY; + obj->agp_type = AGP_USER_CACHED_MEMORY; ret = i915_gem_object_pin(obj, 4096, true); if (ret) goto err_unref; - dev_priv->seqno_gfx_addr = obj_priv->gtt_offset; - dev_priv->seqno_page = kmap(obj_priv->pages[0]); + dev_priv->seqno_gfx_addr = obj->gtt_offset; + dev_priv->seqno_page = kmap(obj->pages[0]); if (dev_priv->seqno_page == NULL) goto err_unpin; @@ -4740,7 +4670,7 @@ i915_gem_init_pipe_control(struct drm_device *dev) err_unpin: i915_gem_object_unpin(obj); err_unref: - drm_gem_object_unreference(obj); + drm_gem_object_unreference(&obj->base); err: return ret; } @@ -4750,14 +4680,12 @@ static void i915_gem_cleanup_pipe_control(struct drm_device *dev) { drm_i915_private_t *dev_priv = dev->dev_private; - struct drm_gem_object *obj; - struct drm_i915_gem_object *obj_priv; + struct drm_i915_gem_object *obj; obj = dev_priv->seqno_obj; - obj_priv = to_intel_bo(obj); - kunmap(obj_priv->pages[0]); + kunmap(obj->pages[0]); i915_gem_object_unpin(obj); - drm_gem_object_unreference(obj); + drm_gem_object_unreference(&obj->base); dev_priv->seqno_obj = NULL; dev_priv->seqno_page = NULL; @@ -5035,20 +4963,18 @@ void i915_gem_free_all_phys_object(struct drm_device *dev) } void i915_gem_detach_phys_object(struct drm_device *dev, - struct drm_gem_object *obj) + struct drm_i915_gem_object *obj) { - struct address_space *mapping = obj->filp->f_path.dentry->d_inode->i_mapping; - struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); + struct address_space *mapping = obj->base.filp->f_path.dentry->d_inode->i_mapping; char *vaddr; int i; int page_count; - if (!obj_priv->phys_obj) + if (!obj->phys_obj) return; - vaddr = obj_priv->phys_obj->handle->vaddr; - - page_count = obj->size / PAGE_SIZE; + vaddr = obj->phys_obj->handle->vaddr; + page_count = obj->base.size / PAGE_SIZE; for (i = 0; i < page_count; i++) { struct page *page = read_cache_page_gfp(mapping, i, GFP_HIGHUSER | __GFP_RECLAIMABLE); @@ -5066,19 +4992,18 @@ void i915_gem_detach_phys_object(struct drm_device *dev, } intel_gtt_chipset_flush(); - obj_priv->phys_obj->cur_obj = NULL; - obj_priv->phys_obj = NULL; + obj->phys_obj->cur_obj = NULL; + obj->phys_obj = NULL; } int i915_gem_attach_phys_object(struct drm_device *dev, - struct drm_gem_object *obj, + struct drm_i915_gem_object *obj, int id, int align) { - struct address_space *mapping = obj->filp->f_path.dentry->d_inode->i_mapping; + struct address_space *mapping = obj->base.filp->f_path.dentry->d_inode->i_mapping; drm_i915_private_t *dev_priv = dev->dev_private; - struct drm_i915_gem_object *obj_priv; int ret = 0; int page_count; int i; @@ -5086,10 +5011,8 @@ i915_gem_attach_phys_object(struct drm_device *dev, if (id > I915_MAX_PHYS_OBJECT) return -EINVAL; - obj_priv = to_intel_bo(obj); - - if (obj_priv->phys_obj) { - if (obj_priv->phys_obj->id == id) + if (obj->phys_obj) { + if (obj->phys_obj->id == id) return 0; i915_gem_detach_phys_object(dev, obj); } @@ -5097,18 +5020,19 @@ i915_gem_attach_phys_object(struct drm_device *dev, /* create a new object */ if (!dev_priv->mm.phys_objs[id - 1]) { ret = i915_gem_init_phys_object(dev, id, - obj->size, align); + obj->base.size, align); if (ret) { - DRM_ERROR("failed to init phys object %d size: %zu\n", id, obj->size); + DRM_ERROR("failed to init phys object %d size: %zu\n", + id, obj->base.size); return ret; } } /* bind to the object */ - obj_priv->phys_obj = dev_priv->mm.phys_objs[id - 1]; - obj_priv->phys_obj->cur_obj = obj; + obj->phys_obj = dev_priv->mm.phys_objs[id - 1]; + obj->phys_obj->cur_obj = obj; - page_count = obj->size / PAGE_SIZE; + page_count = obj->base.size / PAGE_SIZE; for (i = 0; i < page_count; i++) { struct page *page; @@ -5120,7 +5044,7 @@ i915_gem_attach_phys_object(struct drm_device *dev, return PTR_ERR(page); src = kmap_atomic(page); - dst = obj_priv->phys_obj->handle->vaddr + (i * PAGE_SIZE); + dst = obj->phys_obj->handle->vaddr + (i * PAGE_SIZE); memcpy(dst, src, PAGE_SIZE); kunmap_atomic(src); @@ -5132,16 +5056,14 @@ i915_gem_attach_phys_object(struct drm_device *dev, } static int -i915_gem_phys_pwrite(struct drm_device *dev, struct drm_gem_object *obj, +i915_gem_phys_pwrite(struct drm_device *dev, + struct drm_i915_gem_object *obj, struct drm_i915_gem_pwrite *args, struct drm_file *file_priv) { - struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); - void *vaddr = obj_priv->phys_obj->handle->vaddr + args->offset; + void *vaddr = obj->phys_obj->handle->vaddr + args->offset; char __user *user_data = (char __user *) (uintptr_t) args->data_ptr; - DRM_DEBUG_DRIVER("vaddr %p, %lld\n", vaddr, args->size); - if (__copy_from_user_inatomic_nocache(vaddr, user_data, args->size)) { unsigned long unwritten; @@ -5228,7 +5150,7 @@ rescan: &dev_priv->mm.inactive_list, mm_list) { if (i915_gem_object_is_purgeable(obj)) { - i915_gem_object_unbind(&obj->base); + i915_gem_object_unbind(obj); if (--nr_to_scan == 0) break; } @@ -5240,7 +5162,7 @@ rescan: &dev_priv->mm.inactive_list, mm_list) { if (nr_to_scan) { - i915_gem_object_unbind(&obj->base); + i915_gem_object_unbind(obj); nr_to_scan--; } else cnt++; diff --git a/drivers/gpu/drm/i915/i915_gem_debug.c b/drivers/gpu/drm/i915/i915_gem_debug.c index 48644b840a8d..29d014c48ca2 100644 --- a/drivers/gpu/drm/i915/i915_gem_debug.c +++ b/drivers/gpu/drm/i915/i915_gem_debug.c @@ -152,13 +152,12 @@ i915_gem_dump_page(struct page *page, uint32_t start, uint32_t end, } void -i915_gem_dump_object(struct drm_gem_object *obj, int len, +i915_gem_dump_object(struct drm_i915_gem_object *obj, int len, const char *where, uint32_t mark) { - struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); int page; - DRM_INFO("%s: object at offset %08x\n", where, obj_priv->gtt_offset); + DRM_INFO("%s: object at offset %08x\n", where, obj->gtt_offset); for (page = 0; page < (len + PAGE_SIZE-1) / PAGE_SIZE; page++) { int page_len, chunk, chunk_len; @@ -170,9 +169,9 @@ i915_gem_dump_object(struct drm_gem_object *obj, int len, chunk_len = page_len - chunk; if (chunk_len > 128) chunk_len = 128; - i915_gem_dump_page(obj_priv->pages[page], + i915_gem_dump_page(obj->pages[page], chunk, chunk + chunk_len, - obj_priv->gtt_offset + + obj->gtt_offset + page * PAGE_SIZE, mark); } @@ -182,21 +181,19 @@ i915_gem_dump_object(struct drm_gem_object *obj, int len, #if WATCH_COHERENCY void -i915_gem_object_check_coherency(struct drm_gem_object *obj, int handle) +i915_gem_object_check_coherency(struct drm_i915_gem_object *obj, int handle) { - struct drm_device *dev = obj->dev; - struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); + struct drm_device *dev = obj->base.dev; int page; uint32_t *gtt_mapping; uint32_t *backing_map = NULL; int bad_count = 0; DRM_INFO("%s: checking coherency of object %p@0x%08x (%d, %zdkb):\n", - __func__, obj, obj_priv->gtt_offset, handle, + __func__, obj, obj->gtt_offset, handle, obj->size / 1024); - gtt_mapping = ioremap(dev->agp->base + obj_priv->gtt_offset, - obj->size); + gtt_mapping = ioremap(dev->agp->base + obj->gtt_offset, obj->base.size); if (gtt_mapping == NULL) { DRM_ERROR("failed to map GTT space\n"); return; @@ -205,7 +202,7 @@ i915_gem_object_check_coherency(struct drm_gem_object *obj, int handle) for (page = 0; page < obj->size / PAGE_SIZE; page++) { int i; - backing_map = kmap_atomic(obj_priv->pages[page], KM_USER0); + backing_map = kmap_atomic(obj->pages[page], KM_USER0); if (backing_map == NULL) { DRM_ERROR("failed to map backing page\n"); @@ -220,7 +217,7 @@ i915_gem_object_check_coherency(struct drm_gem_object *obj, int handle) if (cpuval != gttval) { DRM_INFO("incoherent CPU vs GPU at 0x%08x: " "0x%08x vs 0x%08x\n", - (int)(obj_priv->gtt_offset + + (int)(obj->gtt_offset + page * PAGE_SIZE + i * 4), cpuval, gttval); if (bad_count++ >= 8) { diff --git a/drivers/gpu/drm/i915/i915_gem_evict.c b/drivers/gpu/drm/i915/i915_gem_evict.c index 3f6f336bbb4d..03e15d37b550 100644 --- a/drivers/gpu/drm/i915/i915_gem_evict.c +++ b/drivers/gpu/drm/i915/i915_gem_evict.c @@ -32,12 +32,11 @@ #include "i915_drm.h" static bool -mark_free(struct drm_i915_gem_object *obj_priv, - struct list_head *unwind) +mark_free(struct drm_i915_gem_object *obj, struct list_head *unwind) { - list_add(&obj_priv->evict_list, unwind); - drm_gem_object_reference(&obj_priv->base); - return drm_mm_scan_add_block(obj_priv->gtt_space); + list_add(&obj->evict_list, unwind); + drm_gem_object_reference(&obj->base); + return drm_mm_scan_add_block(obj->gtt_space); } int @@ -46,7 +45,7 @@ i915_gem_evict_something(struct drm_device *dev, int min_size, { drm_i915_private_t *dev_priv = dev->dev_private; struct list_head eviction_list, unwind_list; - struct drm_i915_gem_object *obj_priv; + struct drm_i915_gem_object *obj; int ret = 0; i915_gem_retire_requests(dev); @@ -96,42 +95,42 @@ i915_gem_evict_something(struct drm_device *dev, int min_size, drm_mm_init_scan(&dev_priv->mm.gtt_space, min_size, alignment); /* First see if there is a large enough contiguous idle region... */ - list_for_each_entry(obj_priv, &dev_priv->mm.inactive_list, mm_list) { - if (mark_free(obj_priv, &unwind_list)) + list_for_each_entry(obj, &dev_priv->mm.inactive_list, mm_list) { + if (mark_free(obj, &unwind_list)) goto found; } /* Now merge in the soon-to-be-expired objects... */ - list_for_each_entry(obj_priv, &dev_priv->mm.active_list, mm_list) { + list_for_each_entry(obj, &dev_priv->mm.active_list, mm_list) { /* Does the object require an outstanding flush? */ - if (obj_priv->base.write_domain || obj_priv->pin_count) + if (obj->base.write_domain || obj->pin_count) continue; - if (mark_free(obj_priv, &unwind_list)) + if (mark_free(obj, &unwind_list)) goto found; } /* Finally add anything with a pending flush (in order of retirement) */ - list_for_each_entry(obj_priv, &dev_priv->mm.flushing_list, mm_list) { - if (obj_priv->pin_count) + list_for_each_entry(obj, &dev_priv->mm.flushing_list, mm_list) { + if (obj->pin_count) continue; - if (mark_free(obj_priv, &unwind_list)) + if (mark_free(obj, &unwind_list)) goto found; } - list_for_each_entry(obj_priv, &dev_priv->mm.active_list, mm_list) { - if (! obj_priv->base.write_domain || obj_priv->pin_count) + list_for_each_entry(obj, &dev_priv->mm.active_list, mm_list) { + if (! obj->base.write_domain || obj->pin_count) continue; - if (mark_free(obj_priv, &unwind_list)) + if (mark_free(obj, &unwind_list)) goto found; } /* Nothing found, clean up and bail out! */ - list_for_each_entry(obj_priv, &unwind_list, evict_list) { - ret = drm_mm_scan_remove_block(obj_priv->gtt_space); + list_for_each_entry(obj, &unwind_list, evict_list) { + ret = drm_mm_scan_remove_block(obj->gtt_space); BUG_ON(ret); - drm_gem_object_unreference(&obj_priv->base); + drm_gem_object_unreference(&obj->base); } /* We expect the caller to unpin, evict all and try again, or give up. @@ -145,26 +144,26 @@ found: * temporary list. */ INIT_LIST_HEAD(&eviction_list); while (!list_empty(&unwind_list)) { - obj_priv = list_first_entry(&unwind_list, - struct drm_i915_gem_object, - evict_list); - if (drm_mm_scan_remove_block(obj_priv->gtt_space)) { - list_move(&obj_priv->evict_list, &eviction_list); + obj = list_first_entry(&unwind_list, + struct drm_i915_gem_object, + evict_list); + if (drm_mm_scan_remove_block(obj->gtt_space)) { + list_move(&obj->evict_list, &eviction_list); continue; } - list_del(&obj_priv->evict_list); - drm_gem_object_unreference(&obj_priv->base); + list_del(&obj->evict_list); + drm_gem_object_unreference(&obj->base); } /* Unbinding will emit any required flushes */ while (!list_empty(&eviction_list)) { - obj_priv = list_first_entry(&eviction_list, - struct drm_i915_gem_object, - evict_list); + obj = list_first_entry(&eviction_list, + struct drm_i915_gem_object, + evict_list); if (ret == 0) - ret = i915_gem_object_unbind(&obj_priv->base); - list_del(&obj_priv->evict_list); - drm_gem_object_unreference(&obj_priv->base); + ret = i915_gem_object_unbind(obj); + list_del(&obj->evict_list); + drm_gem_object_unreference(&obj->base); } return ret; @@ -203,7 +202,7 @@ i915_gem_evict_inactive(struct drm_device *dev, bool purgeable_only) list_for_each_entry_safe(obj, next, &dev_priv->mm.inactive_list, mm_list) { if (!purgeable_only || obj->madv != I915_MADV_WILLNEED) { - int ret = i915_gem_object_unbind(&obj->base); + int ret = i915_gem_object_unbind(obj); if (ret) return ret; } diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c index 0b34a1aee9b6..71c2b0f3747b 100644 --- a/drivers/gpu/drm/i915/i915_gem_gtt.c +++ b/drivers/gpu/drm/i915/i915_gem_gtt.c @@ -32,71 +32,67 @@ void i915_gem_restore_gtt_mappings(struct drm_device *dev) { struct drm_i915_private *dev_priv = dev->dev_private; - struct drm_i915_gem_object *obj_priv; + struct drm_i915_gem_object *obj; - list_for_each_entry(obj_priv, - &dev_priv->mm.gtt_list, - gtt_list) { + list_for_each_entry(obj, &dev_priv->mm.gtt_list, gtt_list) { if (dev_priv->mm.gtt->needs_dmar) { - BUG_ON(!obj_priv->sg_list); + BUG_ON(!obj->sg_list); - intel_gtt_insert_sg_entries(obj_priv->sg_list, - obj_priv->num_sg, - obj_priv->gtt_space->start + intel_gtt_insert_sg_entries(obj->sg_list, + obj->num_sg, + obj->gtt_space->start >> PAGE_SHIFT, - obj_priv->agp_type); + obj->agp_type); } else - intel_gtt_insert_pages(obj_priv->gtt_space->start + intel_gtt_insert_pages(obj->gtt_space->start >> PAGE_SHIFT, - obj_priv->base.size >> PAGE_SHIFT, - obj_priv->pages, - obj_priv->agp_type); + obj->base.size >> PAGE_SHIFT, + obj->pages, + obj->agp_type); } /* Be paranoid and flush the chipset cache. */ intel_gtt_chipset_flush(); } -int i915_gem_gtt_bind_object(struct drm_gem_object *obj) +int i915_gem_gtt_bind_object(struct drm_i915_gem_object *obj) { - struct drm_device *dev = obj->dev; + struct drm_device *dev = obj->base.dev; struct drm_i915_private *dev_priv = dev->dev_private; - struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); int ret; if (dev_priv->mm.gtt->needs_dmar) { - ret = intel_gtt_map_memory(obj_priv->pages, - obj->size >> PAGE_SHIFT, - &obj_priv->sg_list, - &obj_priv->num_sg); + ret = intel_gtt_map_memory(obj->pages, + obj->base.size >> PAGE_SHIFT, + &obj->sg_list, + &obj->num_sg); if (ret != 0) return ret; - intel_gtt_insert_sg_entries(obj_priv->sg_list, obj_priv->num_sg, - obj_priv->gtt_space->start - >> PAGE_SHIFT, - obj_priv->agp_type); + intel_gtt_insert_sg_entries(obj->sg_list, + obj->num_sg, + obj->gtt_space->start >> PAGE_SHIFT, + obj->agp_type); } else - intel_gtt_insert_pages(obj_priv->gtt_space->start >> PAGE_SHIFT, - obj->size >> PAGE_SHIFT, - obj_priv->pages, - obj_priv->agp_type); + intel_gtt_insert_pages(obj->gtt_space->start >> PAGE_SHIFT, + obj->base.size >> PAGE_SHIFT, + obj->pages, + obj->agp_type); return 0; } -void i915_gem_gtt_unbind_object(struct drm_gem_object *obj) +void i915_gem_gtt_unbind_object(struct drm_i915_gem_object *obj) { - struct drm_device *dev = obj->dev; + struct drm_device *dev = obj->base.dev; struct drm_i915_private *dev_priv = dev->dev_private; - struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); if (dev_priv->mm.gtt->needs_dmar) { - intel_gtt_unmap_memory(obj_priv->sg_list, obj_priv->num_sg); - obj_priv->sg_list = NULL; - obj_priv->num_sg = 0; + intel_gtt_unmap_memory(obj->sg_list, obj->num_sg); + obj->sg_list = NULL; + obj->num_sg = 0; } - intel_gtt_clear_range(obj_priv->gtt_space->start >> PAGE_SHIFT, - obj->size >> PAGE_SHIFT); + intel_gtt_clear_range(obj->gtt_space->start >> PAGE_SHIFT, + obj->base.size >> PAGE_SHIFT); } diff --git a/drivers/gpu/drm/i915/i915_gem_tiling.c b/drivers/gpu/drm/i915/i915_gem_tiling.c index a517b48d441d..1c5fdb30f272 100644 --- a/drivers/gpu/drm/i915/i915_gem_tiling.c +++ b/drivers/gpu/drm/i915/i915_gem_tiling.c @@ -234,25 +234,24 @@ i915_tiling_ok(struct drm_device *dev, int stride, int size, int tiling_mode) /* Is the current GTT allocation valid for the change in tiling? */ static bool -i915_gem_object_fence_ok(struct drm_gem_object *obj, int tiling_mode) +i915_gem_object_fence_ok(struct drm_i915_gem_object *obj, int tiling_mode) { - struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); u32 size; if (tiling_mode == I915_TILING_NONE) return true; - if (INTEL_INFO(obj->dev)->gen >= 4) + if (INTEL_INFO(obj->base.dev)->gen >= 4) return true; - if (!obj_priv->gtt_space) + if (!obj->gtt_space) return true; - if (INTEL_INFO(obj->dev)->gen == 3) { - if (obj_priv->gtt_offset & ~I915_FENCE_START_MASK) + if (INTEL_INFO(obj->base.dev)->gen == 3) { + if (obj->gtt_offset & ~I915_FENCE_START_MASK) return false; } else { - if (obj_priv->gtt_offset & ~I830_FENCE_START_MASK) + if (obj->gtt_offset & ~I830_FENCE_START_MASK) return false; } @@ -260,18 +259,18 @@ i915_gem_object_fence_ok(struct drm_gem_object *obj, int tiling_mode) * Previous chips need to be aligned to the size of the smallest * fence register that can contain the object. */ - if (INTEL_INFO(obj->dev)->gen == 3) + if (INTEL_INFO(obj->base.dev)->gen == 3) size = 1024*1024; else size = 512*1024; - while (size < obj_priv->base.size) + while (size < obj->base.size) size <<= 1; - if (obj_priv->gtt_space->size != size) + if (obj->gtt_space->size != size) return false; - if (obj_priv->gtt_offset & (size - 1)) + if (obj->gtt_offset & (size - 1)) return false; return true; @@ -283,30 +282,29 @@ i915_gem_object_fence_ok(struct drm_gem_object *obj, int tiling_mode) */ int i915_gem_set_tiling(struct drm_device *dev, void *data, - struct drm_file *file_priv) + struct drm_file *file) { struct drm_i915_gem_set_tiling *args = data; drm_i915_private_t *dev_priv = dev->dev_private; - struct drm_gem_object *obj; - struct drm_i915_gem_object *obj_priv; + struct drm_i915_gem_object *obj; int ret; ret = i915_gem_check_is_wedged(dev); if (ret) return ret; - obj = drm_gem_object_lookup(dev, file_priv, args->handle); + obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle)); if (obj == NULL) return -ENOENT; - obj_priv = to_intel_bo(obj); - if (!i915_tiling_ok(dev, args->stride, obj->size, args->tiling_mode)) { - drm_gem_object_unreference_unlocked(obj); + if (!i915_tiling_ok(dev, + args->stride, obj->base.size, args->tiling_mode)) { + drm_gem_object_unreference_unlocked(&obj->base); return -EINVAL; } - if (obj_priv->pin_count) { - drm_gem_object_unreference_unlocked(obj); + if (obj->pin_count) { + drm_gem_object_unreference_unlocked(&obj->base); return -EBUSY; } @@ -340,8 +338,8 @@ i915_gem_set_tiling(struct drm_device *dev, void *data, } mutex_lock(&dev->struct_mutex); - if (args->tiling_mode != obj_priv->tiling_mode || - args->stride != obj_priv->stride) { + if (args->tiling_mode != obj->tiling_mode || + args->stride != obj->stride) { /* We need to rebind the object if its current allocation * no longer meets the alignment restrictions for its new * tiling mode. Otherwise we can just leave it alone, but @@ -349,22 +347,22 @@ i915_gem_set_tiling(struct drm_device *dev, void *data, */ if (!i915_gem_object_fence_ok(obj, args->tiling_mode)) ret = i915_gem_object_unbind(obj); - else if (obj_priv->fence_reg != I915_FENCE_REG_NONE) + else if (obj->fence_reg != I915_FENCE_REG_NONE) ret = i915_gem_object_put_fence_reg(obj, true); else i915_gem_release_mmap(obj); if (ret != 0) { - args->tiling_mode = obj_priv->tiling_mode; - args->stride = obj_priv->stride; + args->tiling_mode = obj->tiling_mode; + args->stride = obj->stride; goto err; } - obj_priv->tiling_mode = args->tiling_mode; - obj_priv->stride = args->stride; + obj->tiling_mode = args->tiling_mode; + obj->stride = args->stride; } err: - drm_gem_object_unreference(obj); + drm_gem_object_unreference(&obj->base); mutex_unlock(&dev->struct_mutex); return ret; @@ -375,22 +373,20 @@ err: */ int i915_gem_get_tiling(struct drm_device *dev, void *data, - struct drm_file *file_priv) + struct drm_file *file) { struct drm_i915_gem_get_tiling *args = data; drm_i915_private_t *dev_priv = dev->dev_private; - struct drm_gem_object *obj; - struct drm_i915_gem_object *obj_priv; + struct drm_i915_gem_object *obj; - obj = drm_gem_object_lookup(dev, file_priv, args->handle); + obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle)); if (obj == NULL) return -ENOENT; - obj_priv = to_intel_bo(obj); mutex_lock(&dev->struct_mutex); - args->tiling_mode = obj_priv->tiling_mode; - switch (obj_priv->tiling_mode) { + args->tiling_mode = obj->tiling_mode; + switch (obj->tiling_mode) { case I915_TILING_X: args->swizzle_mode = dev_priv->mm.bit_6_swizzle_x; break; @@ -410,7 +406,7 @@ i915_gem_get_tiling(struct drm_device *dev, void *data, if (args->swizzle_mode == I915_BIT_6_SWIZZLE_9_10_17) args->swizzle_mode = I915_BIT_6_SWIZZLE_9_10; - drm_gem_object_unreference(obj); + drm_gem_object_unreference(&obj->base); mutex_unlock(&dev->struct_mutex); return 0; @@ -440,46 +436,44 @@ i915_gem_swizzle_page(struct page *page) } void -i915_gem_object_do_bit_17_swizzle(struct drm_gem_object *obj) +i915_gem_object_do_bit_17_swizzle(struct drm_i915_gem_object *obj) { - struct drm_device *dev = obj->dev; + struct drm_device *dev = obj->base.dev; drm_i915_private_t *dev_priv = dev->dev_private; - struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); - int page_count = obj->size >> PAGE_SHIFT; + int page_count = obj->base.size >> PAGE_SHIFT; int i; if (dev_priv->mm.bit_6_swizzle_x != I915_BIT_6_SWIZZLE_9_10_17) return; - if (obj_priv->bit_17 == NULL) + if (obj->bit_17 == NULL) return; for (i = 0; i < page_count; i++) { - char new_bit_17 = page_to_phys(obj_priv->pages[i]) >> 17; + char new_bit_17 = page_to_phys(obj->pages[i]) >> 17; if ((new_bit_17 & 0x1) != - (test_bit(i, obj_priv->bit_17) != 0)) { - i915_gem_swizzle_page(obj_priv->pages[i]); - set_page_dirty(obj_priv->pages[i]); + (test_bit(i, obj->bit_17) != 0)) { + i915_gem_swizzle_page(obj->pages[i]); + set_page_dirty(obj->pages[i]); } } } void -i915_gem_object_save_bit_17_swizzle(struct drm_gem_object *obj) +i915_gem_object_save_bit_17_swizzle(struct drm_i915_gem_object *obj) { - struct drm_device *dev = obj->dev; + struct drm_device *dev = obj->base.dev; drm_i915_private_t *dev_priv = dev->dev_private; - struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); - int page_count = obj->size >> PAGE_SHIFT; + int page_count = obj->base.size >> PAGE_SHIFT; int i; if (dev_priv->mm.bit_6_swizzle_x != I915_BIT_6_SWIZZLE_9_10_17) return; - if (obj_priv->bit_17 == NULL) { - obj_priv->bit_17 = kmalloc(BITS_TO_LONGS(page_count) * + if (obj->bit_17 == NULL) { + obj->bit_17 = kmalloc(BITS_TO_LONGS(page_count) * sizeof(long), GFP_KERNEL); - if (obj_priv->bit_17 == NULL) { + if (obj->bit_17 == NULL) { DRM_ERROR("Failed to allocate memory for bit 17 " "record\n"); return; @@ -487,9 +481,9 @@ i915_gem_object_save_bit_17_swizzle(struct drm_gem_object *obj) } for (i = 0; i < page_count; i++) { - if (page_to_phys(obj_priv->pages[i]) & (1 << 17)) - __set_bit(i, obj_priv->bit_17); + if (page_to_phys(obj->pages[i]) & (1 << 17)) + __set_bit(i, obj->bit_17); else - __clear_bit(i, obj_priv->bit_17); + __clear_bit(i, obj->bit_17); } } diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c index a8f55f061f6d..09ac3bbd8165 100644 --- a/drivers/gpu/drm/i915/i915_irq.c +++ b/drivers/gpu/drm/i915/i915_irq.c @@ -423,28 +423,23 @@ static void i915_error_work_func(struct work_struct *work) #ifdef CONFIG_DEBUG_FS static struct drm_i915_error_object * i915_error_object_create(struct drm_device *dev, - struct drm_gem_object *src) + struct drm_i915_gem_object *src) { drm_i915_private_t *dev_priv = dev->dev_private; struct drm_i915_error_object *dst; - struct drm_i915_gem_object *src_priv; int page, page_count; u32 reloc_offset; - if (src == NULL) + if (src == NULL || src->pages == NULL) return NULL; - src_priv = to_intel_bo(src); - if (src_priv->pages == NULL) - return NULL; - - page_count = src->size / PAGE_SIZE; + page_count = src->base.size / PAGE_SIZE; dst = kmalloc(sizeof(*dst) + page_count * sizeof (u32 *), GFP_ATOMIC); if (dst == NULL) return NULL; - reloc_offset = src_priv->gtt_offset; + reloc_offset = src->gtt_offset; for (page = 0; page < page_count; page++) { unsigned long flags; void __iomem *s; @@ -466,7 +461,7 @@ i915_error_object_create(struct drm_device *dev, reloc_offset += PAGE_SIZE; } dst->page_count = page_count; - dst->gtt_offset = src_priv->gtt_offset; + dst->gtt_offset = src->gtt_offset; return dst; @@ -598,9 +593,9 @@ static u32 capture_bo_list(struct drm_i915_error_buffer *err, static void i915_capture_error_state(struct drm_device *dev) { struct drm_i915_private *dev_priv = dev->dev_private; - struct drm_i915_gem_object *obj_priv; + struct drm_i915_gem_object *obj; struct drm_i915_error_state *error; - struct drm_gem_object *batchbuffer[2]; + struct drm_i915_gem_object *batchbuffer[2]; unsigned long flags; u32 bbaddr; int count; @@ -668,34 +663,30 @@ static void i915_capture_error_state(struct drm_device *dev) batchbuffer[0] = NULL; batchbuffer[1] = NULL; count = 0; - list_for_each_entry(obj_priv, &dev_priv->mm.active_list, mm_list) { - struct drm_gem_object *obj = &obj_priv->base; - + list_for_each_entry(obj, &dev_priv->mm.active_list, mm_list) { if (batchbuffer[0] == NULL && - bbaddr >= obj_priv->gtt_offset && - bbaddr < obj_priv->gtt_offset + obj->size) + bbaddr >= obj->gtt_offset && + bbaddr < obj->gtt_offset + obj->base.size) batchbuffer[0] = obj; if (batchbuffer[1] == NULL && - error->acthd >= obj_priv->gtt_offset && - error->acthd < obj_priv->gtt_offset + obj->size) + error->acthd >= obj->gtt_offset && + error->acthd < obj->gtt_offset + obj->base.size) batchbuffer[1] = obj; count++; } /* Scan the other lists for completeness for those bizarre errors. */ if (batchbuffer[0] == NULL || batchbuffer[1] == NULL) { - list_for_each_entry(obj_priv, &dev_priv->mm.flushing_list, mm_list) { - struct drm_gem_object *obj = &obj_priv->base; - + list_for_each_entry(obj, &dev_priv->mm.flushing_list, mm_list) { if (batchbuffer[0] == NULL && - bbaddr >= obj_priv->gtt_offset && - bbaddr < obj_priv->gtt_offset + obj->size) + bbaddr >= obj->gtt_offset && + bbaddr < obj->gtt_offset + obj->base.size) batchbuffer[0] = obj; if (batchbuffer[1] == NULL && - error->acthd >= obj_priv->gtt_offset && - error->acthd < obj_priv->gtt_offset + obj->size) + error->acthd >= obj->gtt_offset && + error->acthd < obj->gtt_offset + obj->base.size) batchbuffer[1] = obj; if (batchbuffer[0] && batchbuffer[1]) @@ -703,17 +694,15 @@ static void i915_capture_error_state(struct drm_device *dev) } } if (batchbuffer[0] == NULL || batchbuffer[1] == NULL) { - list_for_each_entry(obj_priv, &dev_priv->mm.inactive_list, mm_list) { - struct drm_gem_object *obj = &obj_priv->base; - + list_for_each_entry(obj, &dev_priv->mm.inactive_list, mm_list) { if (batchbuffer[0] == NULL && - bbaddr >= obj_priv->gtt_offset && - bbaddr < obj_priv->gtt_offset + obj->size) + bbaddr >= obj->gtt_offset && + bbaddr < obj->gtt_offset + obj->base.size) batchbuffer[0] = obj; if (batchbuffer[1] == NULL && - error->acthd >= obj_priv->gtt_offset && - error->acthd < obj_priv->gtt_offset + obj->size) + error->acthd >= obj->gtt_offset && + error->acthd < obj->gtt_offset + obj->base.size) batchbuffer[1] = obj; if (batchbuffer[0] && batchbuffer[1]) @@ -732,14 +721,14 @@ static void i915_capture_error_state(struct drm_device *dev) /* Record the ringbuffer */ error->ringbuffer = i915_error_object_create(dev, - dev_priv->render_ring.gem_object); + dev_priv->render_ring.obj); /* Record buffers on the active and pinned lists. */ error->active_bo = NULL; error->pinned_bo = NULL; error->active_bo_count = count; - list_for_each_entry(obj_priv, &dev_priv->mm.pinned_list, mm_list) + list_for_each_entry(obj, &dev_priv->mm.pinned_list, mm_list) count++; error->pinned_bo_count = count - error->active_bo_count; @@ -948,7 +937,7 @@ static void i915_pageflip_stall_check(struct drm_device *dev, int pipe) drm_i915_private_t *dev_priv = dev->dev_private; struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe]; struct intel_crtc *intel_crtc = to_intel_crtc(crtc); - struct drm_i915_gem_object *obj_priv; + struct drm_i915_gem_object *obj; struct intel_unpin_work *work; unsigned long flags; bool stall_detected; @@ -967,13 +956,13 @@ static void i915_pageflip_stall_check(struct drm_device *dev, int pipe) } /* Potential stall - if we see that the flip has happened, assume a missed interrupt */ - obj_priv = to_intel_bo(work->pending_flip_obj); + obj = work->pending_flip_obj; if (INTEL_INFO(dev)->gen >= 4) { int dspsurf = intel_crtc->plane == 0 ? DSPASURF : DSPBSURF; - stall_detected = I915_READ(dspsurf) == obj_priv->gtt_offset; + stall_detected = I915_READ(dspsurf) == obj->gtt_offset; } else { int dspaddr = intel_crtc->plane == 0 ? DSPAADDR : DSPBADDR; - stall_detected = I915_READ(dspaddr) == (obj_priv->gtt_offset + + stall_detected = I915_READ(dspaddr) == (obj->gtt_offset + crtc->y * crtc->fb->pitch + crtc->x * crtc->fb->bits_per_pixel/8); } diff --git a/drivers/gpu/drm/i915/i915_trace.h b/drivers/gpu/drm/i915/i915_trace.h index 34ef49fd0377..1df7262ae077 100644 --- a/drivers/gpu/drm/i915/i915_trace.h +++ b/drivers/gpu/drm/i915/i915_trace.h @@ -6,6 +6,7 @@ #include #include +#include "i915_drv.h" #undef TRACE_SYSTEM #define TRACE_SYSTEM i915 @@ -16,18 +17,18 @@ TRACE_EVENT(i915_gem_object_create, - TP_PROTO(struct drm_gem_object *obj), + TP_PROTO(struct drm_i915_gem_object *obj), TP_ARGS(obj), TP_STRUCT__entry( - __field(struct drm_gem_object *, obj) + __field(struct drm_i915_gem_object *, obj) __field(u32, size) ), TP_fast_assign( __entry->obj = obj; - __entry->size = obj->size; + __entry->size = obj->base.size; ), TP_printk("obj=%p, size=%u", __entry->obj, __entry->size) @@ -35,12 +36,12 @@ TRACE_EVENT(i915_gem_object_create, TRACE_EVENT(i915_gem_object_bind, - TP_PROTO(struct drm_gem_object *obj, u32 gtt_offset, bool mappable), + TP_PROTO(struct drm_i915_gem_object *obj, u32 gtt_offset, bool mappable), TP_ARGS(obj, gtt_offset, mappable), TP_STRUCT__entry( - __field(struct drm_gem_object *, obj) + __field(struct drm_i915_gem_object *, obj) __field(u32, gtt_offset) __field(bool, mappable) ), @@ -58,20 +59,20 @@ TRACE_EVENT(i915_gem_object_bind, TRACE_EVENT(i915_gem_object_change_domain, - TP_PROTO(struct drm_gem_object *obj, uint32_t old_read_domains, uint32_t old_write_domain), + TP_PROTO(struct drm_i915_gem_object *obj, uint32_t old_read_domains, uint32_t old_write_domain), TP_ARGS(obj, old_read_domains, old_write_domain), TP_STRUCT__entry( - __field(struct drm_gem_object *, obj) + __field(struct drm_i915_gem_object *, obj) __field(u32, read_domains) __field(u32, write_domain) ), TP_fast_assign( __entry->obj = obj; - __entry->read_domains = obj->read_domains | (old_read_domains << 16); - __entry->write_domain = obj->write_domain | (old_write_domain << 16); + __entry->read_domains = obj->base.read_domains | (old_read_domains << 16); + __entry->write_domain = obj->base.write_domain | (old_write_domain << 16); ), TP_printk("obj=%p, read=%04x, write=%04x", @@ -81,12 +82,12 @@ TRACE_EVENT(i915_gem_object_change_domain, TRACE_EVENT(i915_gem_object_get_fence, - TP_PROTO(struct drm_gem_object *obj, int fence, int tiling_mode), + TP_PROTO(struct drm_i915_gem_object *obj, int fence, int tiling_mode), TP_ARGS(obj, fence, tiling_mode), TP_STRUCT__entry( - __field(struct drm_gem_object *, obj) + __field(struct drm_i915_gem_object *, obj) __field(int, fence) __field(int, tiling_mode) ), @@ -103,12 +104,12 @@ TRACE_EVENT(i915_gem_object_get_fence, DECLARE_EVENT_CLASS(i915_gem_object, - TP_PROTO(struct drm_gem_object *obj), + TP_PROTO(struct drm_i915_gem_object *obj), TP_ARGS(obj), TP_STRUCT__entry( - __field(struct drm_gem_object *, obj) + __field(struct drm_i915_gem_object *, obj) ), TP_fast_assign( @@ -120,21 +121,21 @@ DECLARE_EVENT_CLASS(i915_gem_object, DEFINE_EVENT(i915_gem_object, i915_gem_object_clflush, - TP_PROTO(struct drm_gem_object *obj), + TP_PROTO(struct drm_i915_gem_object *obj), TP_ARGS(obj) ); DEFINE_EVENT(i915_gem_object, i915_gem_object_unbind, - TP_PROTO(struct drm_gem_object *obj), + TP_PROTO(struct drm_i915_gem_object *obj), TP_ARGS(obj) ); DEFINE_EVENT(i915_gem_object, i915_gem_object_destroy, - TP_PROTO(struct drm_gem_object *obj), + TP_PROTO(struct drm_i915_gem_object *obj), TP_ARGS(obj) ); @@ -266,13 +267,13 @@ DEFINE_EVENT(i915_ring, i915_ring_wait_end, ); TRACE_EVENT(i915_flip_request, - TP_PROTO(int plane, struct drm_gem_object *obj), + TP_PROTO(int plane, struct drm_i915_gem_object *obj), TP_ARGS(plane, obj), TP_STRUCT__entry( __field(int, plane) - __field(struct drm_gem_object *, obj) + __field(struct drm_i915_gem_object *, obj) ), TP_fast_assign( @@ -284,13 +285,13 @@ TRACE_EVENT(i915_flip_request, ); TRACE_EVENT(i915_flip_complete, - TP_PROTO(int plane, struct drm_gem_object *obj), + TP_PROTO(int plane, struct drm_i915_gem_object *obj), TP_ARGS(plane, obj), TP_STRUCT__entry( __field(int, plane) - __field(struct drm_gem_object *, obj) + __field(struct drm_i915_gem_object *, obj) ), TP_fast_assign( diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c index d4bc443f43fc..ae7d4f55ce07 100644 --- a/drivers/gpu/drm/i915/intel_display.c +++ b/drivers/gpu/drm/i915/intel_display.c @@ -1066,13 +1066,13 @@ static void i8xx_enable_fbc(struct drm_crtc *crtc, unsigned long interval) struct drm_i915_private *dev_priv = dev->dev_private; struct drm_framebuffer *fb = crtc->fb; struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb); - struct drm_i915_gem_object *obj_priv = to_intel_bo(intel_fb->obj); + struct drm_i915_gem_object *obj = intel_fb->obj; struct intel_crtc *intel_crtc = to_intel_crtc(crtc); int plane, i; u32 fbc_ctl, fbc_ctl2; if (fb->pitch == dev_priv->cfb_pitch && - obj_priv->fence_reg == dev_priv->cfb_fence && + obj->fence_reg == dev_priv->cfb_fence && intel_crtc->plane == dev_priv->cfb_plane && I915_READ(FBC_CONTROL) & FBC_CTL_EN) return; @@ -1086,7 +1086,7 @@ static void i8xx_enable_fbc(struct drm_crtc *crtc, unsigned long interval) /* FBC_CTL wants 64B units */ dev_priv->cfb_pitch = (dev_priv->cfb_pitch / 64) - 1; - dev_priv->cfb_fence = obj_priv->fence_reg; + dev_priv->cfb_fence = obj->fence_reg; dev_priv->cfb_plane = intel_crtc->plane; plane = dev_priv->cfb_plane == 0 ? FBC_CTL_PLANEA : FBC_CTL_PLANEB; @@ -1096,7 +1096,7 @@ static void i8xx_enable_fbc(struct drm_crtc *crtc, unsigned long interval) /* Set it up... */ fbc_ctl2 = FBC_CTL_FENCE_DBL | FBC_CTL_IDLE_IMM | plane; - if (obj_priv->tiling_mode != I915_TILING_NONE) + if (obj->tiling_mode != I915_TILING_NONE) fbc_ctl2 |= FBC_CTL_CPU_FENCE; I915_WRITE(FBC_CONTROL2, fbc_ctl2); I915_WRITE(FBC_FENCE_OFF, crtc->y); @@ -1107,7 +1107,7 @@ static void i8xx_enable_fbc(struct drm_crtc *crtc, unsigned long interval) fbc_ctl |= FBC_CTL_C3_IDLE; /* 945 needs special SR handling */ fbc_ctl |= (dev_priv->cfb_pitch & 0xff) << FBC_CTL_STRIDE_SHIFT; fbc_ctl |= (interval & 0x2fff) << FBC_CTL_INTERVAL_SHIFT; - if (obj_priv->tiling_mode != I915_TILING_NONE) + if (obj->tiling_mode != I915_TILING_NONE) fbc_ctl |= dev_priv->cfb_fence; I915_WRITE(FBC_CONTROL, fbc_ctl); @@ -1150,7 +1150,7 @@ static void g4x_enable_fbc(struct drm_crtc *crtc, unsigned long interval) struct drm_i915_private *dev_priv = dev->dev_private; struct drm_framebuffer *fb = crtc->fb; struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb); - struct drm_i915_gem_object *obj_priv = to_intel_bo(intel_fb->obj); + struct drm_i915_gem_object *obj = intel_fb->obj; struct intel_crtc *intel_crtc = to_intel_crtc(crtc); int plane = intel_crtc->plane == 0 ? DPFC_CTL_PLANEA : DPFC_CTL_PLANEB; unsigned long stall_watermark = 200; @@ -1159,7 +1159,7 @@ static void g4x_enable_fbc(struct drm_crtc *crtc, unsigned long interval) dpfc_ctl = I915_READ(DPFC_CONTROL); if (dpfc_ctl & DPFC_CTL_EN) { if (dev_priv->cfb_pitch == dev_priv->cfb_pitch / 64 - 1 && - dev_priv->cfb_fence == obj_priv->fence_reg && + dev_priv->cfb_fence == obj->fence_reg && dev_priv->cfb_plane == intel_crtc->plane && dev_priv->cfb_y == crtc->y) return; @@ -1170,12 +1170,12 @@ static void g4x_enable_fbc(struct drm_crtc *crtc, unsigned long interval) } dev_priv->cfb_pitch = (dev_priv->cfb_pitch / 64) - 1; - dev_priv->cfb_fence = obj_priv->fence_reg; + dev_priv->cfb_fence = obj->fence_reg; dev_priv->cfb_plane = intel_crtc->plane; dev_priv->cfb_y = crtc->y; dpfc_ctl = plane | DPFC_SR_EN | DPFC_CTL_LIMIT_1X; - if (obj_priv->tiling_mode != I915_TILING_NONE) { + if (obj->tiling_mode != I915_TILING_NONE) { dpfc_ctl |= DPFC_CTL_FENCE_EN | dev_priv->cfb_fence; I915_WRITE(DPFC_CHICKEN, DPFC_HT_MODIFY); } else { @@ -1221,7 +1221,7 @@ static void ironlake_enable_fbc(struct drm_crtc *crtc, unsigned long interval) struct drm_i915_private *dev_priv = dev->dev_private; struct drm_framebuffer *fb = crtc->fb; struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb); - struct drm_i915_gem_object *obj_priv = to_intel_bo(intel_fb->obj); + struct drm_i915_gem_object *obj = intel_fb->obj; struct intel_crtc *intel_crtc = to_intel_crtc(crtc); int plane = intel_crtc->plane == 0 ? DPFC_CTL_PLANEA : DPFC_CTL_PLANEB; unsigned long stall_watermark = 200; @@ -1230,9 +1230,9 @@ static void ironlake_enable_fbc(struct drm_crtc *crtc, unsigned long interval) dpfc_ctl = I915_READ(ILK_DPFC_CONTROL); if (dpfc_ctl & DPFC_CTL_EN) { if (dev_priv->cfb_pitch == dev_priv->cfb_pitch / 64 - 1 && - dev_priv->cfb_fence == obj_priv->fence_reg && + dev_priv->cfb_fence == obj->fence_reg && dev_priv->cfb_plane == intel_crtc->plane && - dev_priv->cfb_offset == obj_priv->gtt_offset && + dev_priv->cfb_offset == obj->gtt_offset && dev_priv->cfb_y == crtc->y) return; @@ -1242,14 +1242,14 @@ static void ironlake_enable_fbc(struct drm_crtc *crtc, unsigned long interval) } dev_priv->cfb_pitch = (dev_priv->cfb_pitch / 64) - 1; - dev_priv->cfb_fence = obj_priv->fence_reg; + dev_priv->cfb_fence = obj->fence_reg; dev_priv->cfb_plane = intel_crtc->plane; - dev_priv->cfb_offset = obj_priv->gtt_offset; + dev_priv->cfb_offset = obj->gtt_offset; dev_priv->cfb_y = crtc->y; dpfc_ctl &= DPFC_RESERVED; dpfc_ctl |= (plane | DPFC_CTL_LIMIT_1X); - if (obj_priv->tiling_mode != I915_TILING_NONE) { + if (obj->tiling_mode != I915_TILING_NONE) { dpfc_ctl |= (DPFC_CTL_FENCE_EN | dev_priv->cfb_fence); I915_WRITE(ILK_DPFC_CHICKEN, DPFC_HT_MODIFY); } else { @@ -1260,7 +1260,7 @@ static void ironlake_enable_fbc(struct drm_crtc *crtc, unsigned long interval) (stall_watermark << DPFC_RECOMP_STALL_WM_SHIFT) | (interval << DPFC_RECOMP_TIMER_COUNT_SHIFT)); I915_WRITE(ILK_DPFC_FENCE_YOFF, crtc->y); - I915_WRITE(ILK_FBC_RT_BASE, obj_priv->gtt_offset | ILK_FBC_RT_VALID); + I915_WRITE(ILK_FBC_RT_BASE, obj->gtt_offset | ILK_FBC_RT_VALID); /* enable it... */ I915_WRITE(ILK_DPFC_CONTROL, dpfc_ctl | DPFC_CTL_EN); @@ -1345,7 +1345,7 @@ static void intel_update_fbc(struct drm_device *dev) struct intel_crtc *intel_crtc; struct drm_framebuffer *fb; struct intel_framebuffer *intel_fb; - struct drm_i915_gem_object *obj_priv; + struct drm_i915_gem_object *obj; DRM_DEBUG_KMS("\n"); @@ -1384,9 +1384,9 @@ static void intel_update_fbc(struct drm_device *dev) intel_crtc = to_intel_crtc(crtc); fb = crtc->fb; intel_fb = to_intel_framebuffer(fb); - obj_priv = to_intel_bo(intel_fb->obj); + obj = intel_fb->obj; - if (intel_fb->obj->size > dev_priv->cfb_size) { + if (intel_fb->obj->base.size > dev_priv->cfb_size) { DRM_DEBUG_KMS("framebuffer too large, disabling " "compression\n"); dev_priv->no_fbc_reason = FBC_STOLEN_TOO_SMALL; @@ -1410,7 +1410,7 @@ static void intel_update_fbc(struct drm_device *dev) dev_priv->no_fbc_reason = FBC_BAD_PLANE; goto out_disable; } - if (obj_priv->tiling_mode != I915_TILING_X) { + if (obj->tiling_mode != I915_TILING_X) { DRM_DEBUG_KMS("framebuffer not tiled, disabling compression\n"); dev_priv->no_fbc_reason = FBC_NOT_TILED; goto out_disable; @@ -1433,14 +1433,13 @@ out_disable: int intel_pin_and_fence_fb_obj(struct drm_device *dev, - struct drm_gem_object *obj, + struct drm_i915_gem_object *obj, bool pipelined) { - struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); u32 alignment; int ret; - switch (obj_priv->tiling_mode) { + switch (obj->tiling_mode) { case I915_TILING_NONE: if (IS_BROADWATER(dev) || IS_CRESTLINE(dev)) alignment = 128 * 1024; @@ -1474,7 +1473,7 @@ intel_pin_and_fence_fb_obj(struct drm_device *dev, * framebuffer compression. For simplicity, we always install * a fence as the cost is not that onerous. */ - if (obj_priv->tiling_mode != I915_TILING_NONE) { + if (obj->tiling_mode != I915_TILING_NONE) { ret = i915_gem_object_get_fence_reg(obj, false); if (ret) goto err_unpin; @@ -1496,8 +1495,7 @@ intel_pipe_set_base_atomic(struct drm_crtc *crtc, struct drm_framebuffer *fb, struct drm_i915_private *dev_priv = dev->dev_private; struct intel_crtc *intel_crtc = to_intel_crtc(crtc); struct intel_framebuffer *intel_fb; - struct drm_i915_gem_object *obj_priv; - struct drm_gem_object *obj; + struct drm_i915_gem_object *obj; int plane = intel_crtc->plane; unsigned long Start, Offset; u32 dspcntr; @@ -1514,7 +1512,6 @@ intel_pipe_set_base_atomic(struct drm_crtc *crtc, struct drm_framebuffer *fb, intel_fb = to_intel_framebuffer(fb); obj = intel_fb->obj; - obj_priv = to_intel_bo(obj); reg = DSPCNTR(plane); dspcntr = I915_READ(reg); @@ -1539,7 +1536,7 @@ intel_pipe_set_base_atomic(struct drm_crtc *crtc, struct drm_framebuffer *fb, return -EINVAL; } if (INTEL_INFO(dev)->gen >= 4) { - if (obj_priv->tiling_mode != I915_TILING_NONE) + if (obj->tiling_mode != I915_TILING_NONE) dspcntr |= DISPPLANE_TILED; else dspcntr &= ~DISPPLANE_TILED; @@ -1551,7 +1548,7 @@ intel_pipe_set_base_atomic(struct drm_crtc *crtc, struct drm_framebuffer *fb, I915_WRITE(reg, dspcntr); - Start = obj_priv->gtt_offset; + Start = obj->gtt_offset; Offset = y * fb->pitch + x * (fb->bits_per_pixel / 8); DRM_DEBUG_KMS("Writing base %08lX %08lX %d %d %d\n", @@ -1605,18 +1602,17 @@ intel_pipe_set_base(struct drm_crtc *crtc, int x, int y, if (old_fb) { struct drm_i915_private *dev_priv = dev->dev_private; - struct drm_gem_object *obj = to_intel_framebuffer(old_fb)->obj; - struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); + struct drm_i915_gem_object *obj = to_intel_framebuffer(old_fb)->obj; wait_event(dev_priv->pending_flip_queue, - atomic_read(&obj_priv->pending_flip) == 0); + atomic_read(&obj->pending_flip) == 0); /* Big Hammer, we also need to ensure that any pending * MI_WAIT_FOR_EVENT inside a user batch buffer on the * current scanout is retired before unpinning the old * framebuffer. */ - ret = i915_gem_object_flush_gpu(obj_priv, false); + ret = i915_gem_object_flush_gpu(obj, false); if (ret) { i915_gem_object_unpin(to_intel_framebuffer(crtc->fb)->obj); mutex_unlock(&dev->struct_mutex); @@ -2010,16 +2006,16 @@ static void intel_clear_scanline_wait(struct drm_device *dev) static void intel_crtc_wait_for_pending_flips(struct drm_crtc *crtc) { - struct drm_i915_gem_object *obj_priv; + struct drm_i915_gem_object *obj; struct drm_i915_private *dev_priv; if (crtc->fb == NULL) return; - obj_priv = to_intel_bo(to_intel_framebuffer(crtc->fb)->obj); + obj = to_intel_framebuffer(crtc->fb)->obj; dev_priv = crtc->dev->dev_private; wait_event(dev_priv->pending_flip_queue, - atomic_read(&obj_priv->pending_flip) == 0); + atomic_read(&obj->pending_flip) == 0); } static void ironlake_crtc_enable(struct drm_crtc *crtc) @@ -4333,15 +4329,14 @@ static void intel_crtc_update_cursor(struct drm_crtc *crtc, } static int intel_crtc_cursor_set(struct drm_crtc *crtc, - struct drm_file *file_priv, + struct drm_file *file, uint32_t handle, uint32_t width, uint32_t height) { struct drm_device *dev = crtc->dev; struct drm_i915_private *dev_priv = dev->dev_private; struct intel_crtc *intel_crtc = to_intel_crtc(crtc); - struct drm_gem_object *bo; - struct drm_i915_gem_object *obj_priv; + struct drm_i915_gem_object *obj; uint32_t addr; int ret; @@ -4351,7 +4346,7 @@ static int intel_crtc_cursor_set(struct drm_crtc *crtc, if (!handle) { DRM_DEBUG_KMS("cursor off\n"); addr = 0; - bo = NULL; + obj = NULL; mutex_lock(&dev->struct_mutex); goto finish; } @@ -4362,13 +4357,11 @@ static int intel_crtc_cursor_set(struct drm_crtc *crtc, return -EINVAL; } - bo = drm_gem_object_lookup(dev, file_priv, handle); - if (!bo) + obj = to_intel_bo(drm_gem_object_lookup(dev, file, handle)); + if (!obj) return -ENOENT; - obj_priv = to_intel_bo(bo); - - if (bo->size < width * height * 4) { + if (obj->base.size < width * height * 4) { DRM_ERROR("buffer is to small\n"); ret = -ENOMEM; goto fail; @@ -4377,29 +4370,29 @@ static int intel_crtc_cursor_set(struct drm_crtc *crtc, /* we only need to pin inside GTT if cursor is non-phy */ mutex_lock(&dev->struct_mutex); if (!dev_priv->info->cursor_needs_physical) { - ret = i915_gem_object_pin(bo, PAGE_SIZE, true); + ret = i915_gem_object_pin(obj, PAGE_SIZE, true); if (ret) { DRM_ERROR("failed to pin cursor bo\n"); goto fail_locked; } - ret = i915_gem_object_set_to_gtt_domain(bo, 0); + ret = i915_gem_object_set_to_gtt_domain(obj, 0); if (ret) { DRM_ERROR("failed to move cursor bo into the GTT\n"); goto fail_unpin; } - addr = obj_priv->gtt_offset; + addr = obj->gtt_offset; } else { int align = IS_I830(dev) ? 16 * 1024 : 256; - ret = i915_gem_attach_phys_object(dev, bo, + ret = i915_gem_attach_phys_object(dev, obj, (intel_crtc->pipe == 0) ? I915_GEM_PHYS_CURSOR_0 : I915_GEM_PHYS_CURSOR_1, align); if (ret) { DRM_ERROR("failed to attach phys object\n"); goto fail_locked; } - addr = obj_priv->phys_obj->handle->busaddr; + addr = obj->phys_obj->handle->busaddr; } if (IS_GEN2(dev)) @@ -4408,17 +4401,17 @@ static int intel_crtc_cursor_set(struct drm_crtc *crtc, finish: if (intel_crtc->cursor_bo) { if (dev_priv->info->cursor_needs_physical) { - if (intel_crtc->cursor_bo != bo) + if (intel_crtc->cursor_bo != obj) i915_gem_detach_phys_object(dev, intel_crtc->cursor_bo); } else i915_gem_object_unpin(intel_crtc->cursor_bo); - drm_gem_object_unreference(intel_crtc->cursor_bo); + drm_gem_object_unreference(&intel_crtc->cursor_bo->base); } mutex_unlock(&dev->struct_mutex); intel_crtc->cursor_addr = addr; - intel_crtc->cursor_bo = bo; + intel_crtc->cursor_bo = obj; intel_crtc->cursor_width = width; intel_crtc->cursor_height = height; @@ -4426,11 +4419,11 @@ static int intel_crtc_cursor_set(struct drm_crtc *crtc, return 0; fail_unpin: - i915_gem_object_unpin(bo); + i915_gem_object_unpin(obj); fail_locked: mutex_unlock(&dev->struct_mutex); fail: - drm_gem_object_unreference_unlocked(bo); + drm_gem_object_unreference_unlocked(&obj->base); return ret; } @@ -4890,7 +4883,7 @@ static void intel_idle_update(struct work_struct *work) * buffer), we'll also mark the display as busy, so we know to increase its * clock frequency. */ -void intel_mark_busy(struct drm_device *dev, struct drm_gem_object *obj) +void intel_mark_busy(struct drm_device *dev, struct drm_i915_gem_object *obj) { drm_i915_private_t *dev_priv = dev->dev_private; struct drm_crtc *crtc = NULL; @@ -4971,8 +4964,8 @@ static void intel_unpin_work_fn(struct work_struct *__work) mutex_lock(&work->dev->struct_mutex); i915_gem_object_unpin(work->old_fb_obj); - drm_gem_object_unreference(work->pending_flip_obj); - drm_gem_object_unreference(work->old_fb_obj); + drm_gem_object_unreference(&work->pending_flip_obj->base); + drm_gem_object_unreference(&work->old_fb_obj->base); mutex_unlock(&work->dev->struct_mutex); kfree(work); } @@ -4983,7 +4976,7 @@ static void do_intel_finish_page_flip(struct drm_device *dev, drm_i915_private_t *dev_priv = dev->dev_private; struct intel_crtc *intel_crtc = to_intel_crtc(crtc); struct intel_unpin_work *work; - struct drm_i915_gem_object *obj_priv; + struct drm_i915_gem_object *obj; struct drm_pending_vblank_event *e; struct timeval now; unsigned long flags; @@ -5015,10 +5008,10 @@ static void do_intel_finish_page_flip(struct drm_device *dev, spin_unlock_irqrestore(&dev->event_lock, flags); - obj_priv = to_intel_bo(work->old_fb_obj); + obj = work->old_fb_obj; atomic_clear_mask(1 << intel_crtc->plane, - &obj_priv->pending_flip.counter); - if (atomic_read(&obj_priv->pending_flip) == 0) + &obj->pending_flip.counter); + if (atomic_read(&obj->pending_flip) == 0) wake_up(&dev_priv->pending_flip_queue); schedule_work(&work->work); @@ -5065,8 +5058,7 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc, struct drm_device *dev = crtc->dev; struct drm_i915_private *dev_priv = dev->dev_private; struct intel_framebuffer *intel_fb; - struct drm_i915_gem_object *obj_priv; - struct drm_gem_object *obj; + struct drm_i915_gem_object *obj; struct intel_crtc *intel_crtc = to_intel_crtc(crtc); struct intel_unpin_work *work; unsigned long flags, offset; @@ -5105,8 +5097,8 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc, goto cleanup_work; /* Reference the objects for the scheduled work. */ - drm_gem_object_reference(work->old_fb_obj); - drm_gem_object_reference(obj); + drm_gem_object_reference(&work->old_fb_obj->base); + drm_gem_object_reference(&obj->base); crtc->fb = fb; @@ -5134,7 +5126,6 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc, } work->pending_flip_obj = obj; - obj_priv = to_intel_bo(obj); work->enable_stall_check = true; @@ -5148,15 +5139,14 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc, /* Block clients from rendering to the new back buffer until * the flip occurs and the object is no longer visible. */ - atomic_add(1 << intel_crtc->plane, - &to_intel_bo(work->old_fb_obj)->pending_flip); + atomic_add(1 << intel_crtc->plane, &work->old_fb_obj->pending_flip); switch (INTEL_INFO(dev)->gen) { case 2: OUT_RING(MI_DISPLAY_FLIP | MI_DISPLAY_FLIP_PLANE(intel_crtc->plane)); OUT_RING(fb->pitch); - OUT_RING(obj_priv->gtt_offset + offset); + OUT_RING(obj->gtt_offset + offset); OUT_RING(MI_NOOP); break; @@ -5164,7 +5154,7 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc, OUT_RING(MI_DISPLAY_FLIP_I915 | MI_DISPLAY_FLIP_PLANE(intel_crtc->plane)); OUT_RING(fb->pitch); - OUT_RING(obj_priv->gtt_offset + offset); + OUT_RING(obj->gtt_offset + offset); OUT_RING(MI_NOOP); break; @@ -5177,7 +5167,7 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc, OUT_RING(MI_DISPLAY_FLIP | MI_DISPLAY_FLIP_PLANE(intel_crtc->plane)); OUT_RING(fb->pitch); - OUT_RING(obj_priv->gtt_offset | obj_priv->tiling_mode); + OUT_RING(obj->gtt_offset | obj->tiling_mode); /* XXX Enabling the panel-fitter across page-flip is so far * untested on non-native modes, so ignore it for now. @@ -5191,8 +5181,8 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc, case 6: OUT_RING(MI_DISPLAY_FLIP | MI_DISPLAY_FLIP_PLANE(intel_crtc->plane)); - OUT_RING(fb->pitch | obj_priv->tiling_mode); - OUT_RING(obj_priv->gtt_offset); + OUT_RING(fb->pitch | obj->tiling_mode); + OUT_RING(obj->gtt_offset); pf = I915_READ(pipe == 0 ? PFA_CTL_1 : PFB_CTL_1) & PF_ENABLE; pipesrc = I915_READ(pipe == 0 ? PIPEASRC : PIPEBSRC) & 0x0fff0fff; @@ -5208,8 +5198,8 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc, return 0; cleanup_objs: - drm_gem_object_unreference(work->old_fb_obj); - drm_gem_object_unreference(obj); + drm_gem_object_unreference(&work->old_fb_obj->base); + drm_gem_object_unreference(&obj->base); cleanup_work: mutex_unlock(&dev->struct_mutex); @@ -5295,7 +5285,7 @@ static void intel_crtc_init(struct drm_device *dev, int pipe) } int intel_get_pipe_from_crtc_id(struct drm_device *dev, void *data, - struct drm_file *file_priv) + struct drm_file *file) { drm_i915_private_t *dev_priv = dev->dev_private; struct drm_i915_get_pipe_from_crtc_id *pipe_from_crtc_id = data; @@ -5440,19 +5430,19 @@ static void intel_user_framebuffer_destroy(struct drm_framebuffer *fb) struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb); drm_framebuffer_cleanup(fb); - drm_gem_object_unreference_unlocked(intel_fb->obj); + drm_gem_object_unreference_unlocked(&intel_fb->obj->base); kfree(intel_fb); } static int intel_user_framebuffer_create_handle(struct drm_framebuffer *fb, - struct drm_file *file_priv, + struct drm_file *file, unsigned int *handle) { struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb); - struct drm_gem_object *object = intel_fb->obj; + struct drm_i915_gem_object *obj = intel_fb->obj; - return drm_gem_handle_create(file_priv, object, handle); + return drm_gem_handle_create(file, &obj->base, handle); } static const struct drm_framebuffer_funcs intel_fb_funcs = { @@ -5463,12 +5453,11 @@ static const struct drm_framebuffer_funcs intel_fb_funcs = { int intel_framebuffer_init(struct drm_device *dev, struct intel_framebuffer *intel_fb, struct drm_mode_fb_cmd *mode_cmd, - struct drm_gem_object *obj) + struct drm_i915_gem_object *obj) { - struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); int ret; - if (obj_priv->tiling_mode == I915_TILING_Y) + if (obj->tiling_mode == I915_TILING_Y) return -EINVAL; if (mode_cmd->pitch & 63) @@ -5500,11 +5489,11 @@ intel_user_framebuffer_create(struct drm_device *dev, struct drm_file *filp, struct drm_mode_fb_cmd *mode_cmd) { - struct drm_gem_object *obj; + struct drm_i915_gem_object *obj; struct intel_framebuffer *intel_fb; int ret; - obj = drm_gem_object_lookup(dev, filp, mode_cmd->handle); + obj = to_intel_bo(drm_gem_object_lookup(dev, filp, mode_cmd->handle)); if (!obj) return ERR_PTR(-ENOENT); @@ -5512,10 +5501,9 @@ intel_user_framebuffer_create(struct drm_device *dev, if (!intel_fb) return ERR_PTR(-ENOMEM); - ret = intel_framebuffer_init(dev, intel_fb, - mode_cmd, obj); + ret = intel_framebuffer_init(dev, intel_fb, mode_cmd, obj); if (ret) { - drm_gem_object_unreference_unlocked(obj); + drm_gem_object_unreference_unlocked(&obj->base); kfree(intel_fb); return ERR_PTR(ret); } @@ -5528,10 +5516,10 @@ static const struct drm_mode_config_funcs intel_mode_funcs = { .output_poll_changed = intel_fb_output_poll_changed, }; -static struct drm_gem_object * +static struct drm_i915_gem_object * intel_alloc_context_page(struct drm_device *dev) { - struct drm_gem_object *ctx; + struct drm_i915_gem_object *ctx; int ret; ctx = i915_gem_alloc_object(dev, 4096); @@ -5559,7 +5547,7 @@ intel_alloc_context_page(struct drm_device *dev) err_unpin: i915_gem_object_unpin(ctx); err_unref: - drm_gem_object_unreference(ctx); + drm_gem_object_unreference(&ctx->base); mutex_unlock(&dev->struct_mutex); return NULL; } @@ -5886,20 +5874,17 @@ void intel_init_clock_gating(struct drm_device *dev) if (dev_priv->renderctx == NULL) dev_priv->renderctx = intel_alloc_context_page(dev); if (dev_priv->renderctx) { - struct drm_i915_gem_object *obj_priv; - obj_priv = to_intel_bo(dev_priv->renderctx); - if (obj_priv) { - if (BEGIN_LP_RING(4) == 0) { - OUT_RING(MI_SET_CONTEXT); - OUT_RING(obj_priv->gtt_offset | - MI_MM_SPACE_GTT | - MI_SAVE_EXT_STATE_EN | - MI_RESTORE_EXT_STATE_EN | - MI_RESTORE_INHIBIT); - OUT_RING(MI_NOOP); - OUT_RING(MI_FLUSH); - ADVANCE_LP_RING(); - } + struct drm_i915_gem_object *obj = dev_priv->renderctx; + if (BEGIN_LP_RING(4) == 0) { + OUT_RING(MI_SET_CONTEXT); + OUT_RING(obj->gtt_offset | + MI_MM_SPACE_GTT | + MI_SAVE_EXT_STATE_EN | + MI_RESTORE_EXT_STATE_EN | + MI_RESTORE_INHIBIT); + OUT_RING(MI_NOOP); + OUT_RING(MI_FLUSH); + ADVANCE_LP_RING(); } } else DRM_DEBUG_KMS("Failed to allocate render context." @@ -5907,22 +5892,11 @@ void intel_init_clock_gating(struct drm_device *dev) } if (I915_HAS_RC6(dev) && drm_core_check_feature(dev, DRIVER_MODESET)) { - struct drm_i915_gem_object *obj_priv = NULL; - + if (dev_priv->pwrctx == NULL) + dev_priv->pwrctx = intel_alloc_context_page(dev); if (dev_priv->pwrctx) { - obj_priv = to_intel_bo(dev_priv->pwrctx); - } else { - struct drm_gem_object *pwrctx; - - pwrctx = intel_alloc_context_page(dev); - if (pwrctx) { - dev_priv->pwrctx = pwrctx; - obj_priv = to_intel_bo(pwrctx); - } - } - - if (obj_priv) { - I915_WRITE(PWRCTXA, obj_priv->gtt_offset | PWRCTX_EN); + struct drm_i915_gem_object *obj = dev_priv->pwrctx; + I915_WRITE(PWRCTXA, obj->gtt_offset | PWRCTX_EN); I915_WRITE(MCHBAR_RENDER_STANDBY, I915_READ(MCHBAR_RENDER_STANDBY) & ~RCX_SW_EXIT); } @@ -6197,23 +6171,25 @@ void intel_modeset_cleanup(struct drm_device *dev) dev_priv->display.disable_fbc(dev); if (dev_priv->renderctx) { - struct drm_i915_gem_object *obj_priv; + struct drm_i915_gem_object *obj = dev_priv->renderctx; + + I915_WRITE(CCID, obj->gtt_offset &~ CCID_EN); + POSTING_READ(CCID); - obj_priv = to_intel_bo(dev_priv->renderctx); - I915_WRITE(CCID, obj_priv->gtt_offset &~ CCID_EN); - I915_READ(CCID); - i915_gem_object_unpin(dev_priv->renderctx); - drm_gem_object_unreference(dev_priv->renderctx); + i915_gem_object_unpin(obj); + drm_gem_object_unreference(&obj->base); + dev_priv->renderctx = NULL; } if (dev_priv->pwrctx) { - struct drm_i915_gem_object *obj_priv; + struct drm_i915_gem_object *obj = dev_priv->pwrctx; + + I915_WRITE(PWRCTXA, obj->gtt_offset &~ PWRCTX_EN); + POSTING_READ(PWRCTXA); - obj_priv = to_intel_bo(dev_priv->pwrctx); - I915_WRITE(PWRCTXA, obj_priv->gtt_offset &~ PWRCTX_EN); - I915_READ(PWRCTXA); - i915_gem_object_unpin(dev_priv->pwrctx); - drm_gem_object_unreference(dev_priv->pwrctx); + i915_gem_object_unpin(obj); + drm_gem_object_unreference(&obj->base); + dev_priv->pwrctx = NULL; } if (IS_IRONLAKE_M(dev)) diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h index 21551fe74541..5a4f14e36d6c 100644 --- a/drivers/gpu/drm/i915/intel_drv.h +++ b/drivers/gpu/drm/i915/intel_drv.h @@ -127,7 +127,7 @@ intel_mode_get_pixel_multiplier(const struct drm_display_mode *mode) struct intel_framebuffer { struct drm_framebuffer base; - struct drm_gem_object *obj; + struct drm_i915_gem_object *obj; }; struct intel_fbdev { @@ -166,7 +166,7 @@ struct intel_crtc { struct intel_unpin_work *unpin_work; int fdi_lanes; - struct drm_gem_object *cursor_bo; + struct drm_i915_gem_object *cursor_bo; uint32_t cursor_addr; int16_t cursor_x, cursor_y; int16_t cursor_width, cursor_height; @@ -220,8 +220,8 @@ intel_get_crtc_for_pipe(struct drm_device *dev, int pipe) struct intel_unpin_work { struct work_struct work; struct drm_device *dev; - struct drm_gem_object *old_fb_obj; - struct drm_gem_object *pending_flip_obj; + struct drm_i915_gem_object *old_fb_obj; + struct drm_i915_gem_object *pending_flip_obj; struct drm_pending_vblank_event *event; int pending; bool enable_stall_check; @@ -236,7 +236,8 @@ void intel_dip_infoframe_csum(struct dip_infoframe *avi_if); extern bool intel_sdvo_init(struct drm_device *dev, int output_device); extern void intel_dvo_init(struct drm_device *dev); extern void intel_tv_init(struct drm_device *dev); -extern void intel_mark_busy(struct drm_device *dev, struct drm_gem_object *obj); +extern void intel_mark_busy(struct drm_device *dev, + struct drm_i915_gem_object *obj); extern void intel_lvds_init(struct drm_device *dev); extern void intel_dp_init(struct drm_device *dev, int dp_reg); void @@ -299,13 +300,13 @@ extern void ironlake_disable_drps(struct drm_device *dev); extern void intel_init_emon(struct drm_device *dev); extern int intel_pin_and_fence_fb_obj(struct drm_device *dev, - struct drm_gem_object *obj, + struct drm_i915_gem_object *obj, bool pipelined); extern int intel_framebuffer_init(struct drm_device *dev, struct intel_framebuffer *ifb, struct drm_mode_fb_cmd *mode_cmd, - struct drm_gem_object *obj); + struct drm_i915_gem_object *obj); extern int intel_fbdev_init(struct drm_device *dev); extern void intel_fbdev_fini(struct drm_device *dev); diff --git a/drivers/gpu/drm/i915/intel_fb.c b/drivers/gpu/drm/i915/intel_fb.c index af2a1dddc28e..c2cffeb4fe89 100644 --- a/drivers/gpu/drm/i915/intel_fb.c +++ b/drivers/gpu/drm/i915/intel_fb.c @@ -65,8 +65,7 @@ static int intelfb_create(struct intel_fbdev *ifbdev, struct fb_info *info; struct drm_framebuffer *fb; struct drm_mode_fb_cmd mode_cmd; - struct drm_gem_object *fbo = NULL; - struct drm_i915_gem_object *obj_priv; + struct drm_i915_gem_object *obj; struct device *device = &dev->pdev->dev; int size, ret, mmio_bar = IS_GEN2(dev) ? 1 : 0; @@ -83,18 +82,17 @@ static int intelfb_create(struct intel_fbdev *ifbdev, size = mode_cmd.pitch * mode_cmd.height; size = ALIGN(size, PAGE_SIZE); - fbo = i915_gem_alloc_object(dev, size); - if (!fbo) { + obj = i915_gem_alloc_object(dev, size); + if (!obj) { DRM_ERROR("failed to allocate framebuffer\n"); ret = -ENOMEM; goto out; } - obj_priv = to_intel_bo(fbo); mutex_lock(&dev->struct_mutex); /* Flush everything out, we'll be doing GTT only from now on */ - ret = intel_pin_and_fence_fb_obj(dev, fbo, false); + ret = intel_pin_and_fence_fb_obj(dev, obj, false); if (ret) { DRM_ERROR("failed to pin fb: %d\n", ret); goto out_unref; @@ -108,7 +106,7 @@ static int intelfb_create(struct intel_fbdev *ifbdev, info->par = ifbdev; - ret = intel_framebuffer_init(dev, &ifbdev->ifb, &mode_cmd, fbo); + ret = intel_framebuffer_init(dev, &ifbdev->ifb, &mode_cmd, obj); if (ret) goto out_unpin; @@ -134,11 +132,10 @@ static int intelfb_create(struct intel_fbdev *ifbdev, else info->apertures->ranges[0].size = pci_resource_len(dev->pdev, 0); - info->fix.smem_start = dev->mode_config.fb_base + obj_priv->gtt_offset; + info->fix.smem_start = dev->mode_config.fb_base + obj->gtt_offset; info->fix.smem_len = size; - info->screen_base = ioremap_wc(dev->agp->base + obj_priv->gtt_offset, - size); + info->screen_base = ioremap_wc(dev->agp->base + obj->gtt_offset, size); if (!info->screen_base) { ret = -ENOSPC; goto out_unpin; @@ -168,7 +165,7 @@ static int intelfb_create(struct intel_fbdev *ifbdev, DRM_DEBUG_KMS("allocated %dx%d fb: 0x%08x, bo %p\n", fb->width, fb->height, - obj_priv->gtt_offset, fbo); + obj->gtt_offset, obj); mutex_unlock(&dev->struct_mutex); @@ -176,9 +173,9 @@ static int intelfb_create(struct intel_fbdev *ifbdev, return 0; out_unpin: - i915_gem_object_unpin(fbo); + i915_gem_object_unpin(obj); out_unref: - drm_gem_object_unreference(fbo); + drm_gem_object_unreference(&obj->base); mutex_unlock(&dev->struct_mutex); out: return ret; @@ -225,7 +222,7 @@ static void intel_fbdev_destroy(struct drm_device *dev, drm_framebuffer_cleanup(&ifb->base); if (ifb->obj) { - drm_gem_object_unreference_unlocked(ifb->obj); + drm_gem_object_unreference_unlocked(&ifb->obj->base); ifb->obj = NULL; } } diff --git a/drivers/gpu/drm/i915/intel_overlay.c b/drivers/gpu/drm/i915/intel_overlay.c index ec8ffaccbbdb..af715cc03ee0 100644 --- a/drivers/gpu/drm/i915/intel_overlay.c +++ b/drivers/gpu/drm/i915/intel_overlay.c @@ -376,24 +376,23 @@ static int intel_overlay_continue(struct intel_overlay *overlay, static void intel_overlay_release_old_vid_tail(struct intel_overlay *overlay) { - struct drm_gem_object *obj = &overlay->old_vid_bo->base; + struct drm_i915_gem_object *obj = overlay->old_vid_bo; i915_gem_object_unpin(obj); - drm_gem_object_unreference(obj); + drm_gem_object_unreference(&obj->base); overlay->old_vid_bo = NULL; } static void intel_overlay_off_tail(struct intel_overlay *overlay) { - struct drm_gem_object *obj; + struct drm_i915_gem_object *obj = overlay->vid_bo; /* never have the overlay hw on without showing a frame */ BUG_ON(!overlay->vid_bo); - obj = &overlay->vid_bo->base; i915_gem_object_unpin(obj); - drm_gem_object_unreference(obj); + drm_gem_object_unreference(&obj->base); overlay->vid_bo = NULL; overlay->crtc->overlay = NULL; @@ -764,13 +763,12 @@ static u32 overlay_cmd_reg(struct put_image_params *params) } static int intel_overlay_do_put_image(struct intel_overlay *overlay, - struct drm_gem_object *new_bo, + struct drm_i915_gem_object *new_bo, struct put_image_params *params) { int ret, tmp_width; struct overlay_registers *regs; bool scale_changed = false; - struct drm_i915_gem_object *bo_priv = to_intel_bo(new_bo); struct drm_device *dev = overlay->dev; BUG_ON(!mutex_is_locked(&dev->struct_mutex)); @@ -825,7 +823,7 @@ static int intel_overlay_do_put_image(struct intel_overlay *overlay, regs->SWIDTHSW = calc_swidthsw(overlay->dev, params->offset_Y, tmp_width); regs->SHEIGHT = params->src_h; - regs->OBUF_0Y = bo_priv->gtt_offset + params-> offset_Y; + regs->OBUF_0Y = new_bo->gtt_offset + params-> offset_Y; regs->OSTRIDE = params->stride_Y; if (params->format & I915_OVERLAY_YUV_PLANAR) { @@ -839,8 +837,8 @@ static int intel_overlay_do_put_image(struct intel_overlay *overlay, params->src_w/uv_hscale); regs->SWIDTHSW |= max_t(u32, tmp_U, tmp_V) << 16; regs->SHEIGHT |= (params->src_h/uv_vscale) << 16; - regs->OBUF_0U = bo_priv->gtt_offset + params->offset_U; - regs->OBUF_0V = bo_priv->gtt_offset + params->offset_V; + regs->OBUF_0U = new_bo->gtt_offset + params->offset_U; + regs->OBUF_0V = new_bo->gtt_offset + params->offset_V; regs->OSTRIDE |= params->stride_UV << 16; } @@ -857,7 +855,7 @@ static int intel_overlay_do_put_image(struct intel_overlay *overlay, goto out_unpin; overlay->old_vid_bo = overlay->vid_bo; - overlay->vid_bo = to_intel_bo(new_bo); + overlay->vid_bo = new_bo; return 0; @@ -970,7 +968,7 @@ static int check_overlay_scaling(struct put_image_params *rec) static int check_overlay_src(struct drm_device *dev, struct drm_intel_overlay_put_image *rec, - struct drm_gem_object *new_bo) + struct drm_i915_gem_object *new_bo) { int uv_hscale = uv_hsubsampling(rec->flags); int uv_vscale = uv_vsubsampling(rec->flags); @@ -1055,7 +1053,7 @@ static int check_overlay_src(struct drm_device *dev, return -EINVAL; tmp = rec->stride_Y*rec->src_height; - if (rec->offset_Y + tmp > new_bo->size) + if (rec->offset_Y + tmp > new_bo->base.size) return -EINVAL; break; @@ -1066,12 +1064,12 @@ static int check_overlay_src(struct drm_device *dev, return -EINVAL; tmp = rec->stride_Y * rec->src_height; - if (rec->offset_Y + tmp > new_bo->size) + if (rec->offset_Y + tmp > new_bo->base.size) return -EINVAL; tmp = rec->stride_UV * (rec->src_height / uv_vscale); - if (rec->offset_U + tmp > new_bo->size || - rec->offset_V + tmp > new_bo->size) + if (rec->offset_U + tmp > new_bo->base.size || + rec->offset_V + tmp > new_bo->base.size) return -EINVAL; break; } @@ -1114,7 +1112,7 @@ int intel_overlay_put_image(struct drm_device *dev, void *data, struct intel_overlay *overlay; struct drm_mode_object *drmmode_obj; struct intel_crtc *crtc; - struct drm_gem_object *new_bo; + struct drm_i915_gem_object *new_bo; struct put_image_params *params; int ret; @@ -1153,8 +1151,8 @@ int intel_overlay_put_image(struct drm_device *dev, void *data, } crtc = to_intel_crtc(obj_to_crtc(drmmode_obj)); - new_bo = drm_gem_object_lookup(dev, file_priv, - put_image_rec->bo_handle); + new_bo = to_intel_bo(drm_gem_object_lookup(dev, file_priv, + put_image_rec->bo_handle)); if (!new_bo) { ret = -ENOENT; goto out_free; @@ -1245,7 +1243,7 @@ int intel_overlay_put_image(struct drm_device *dev, void *data, out_unlock: mutex_unlock(&dev->struct_mutex); mutex_unlock(&dev->mode_config.mutex); - drm_gem_object_unreference_unlocked(new_bo); + drm_gem_object_unreference_unlocked(&new_bo->base); out_free: kfree(params); @@ -1398,7 +1396,7 @@ void intel_setup_overlay(struct drm_device *dev) { drm_i915_private_t *dev_priv = dev->dev_private; struct intel_overlay *overlay; - struct drm_gem_object *reg_bo; + struct drm_i915_gem_object *reg_bo; struct overlay_registers *regs; int ret; @@ -1413,7 +1411,7 @@ void intel_setup_overlay(struct drm_device *dev) reg_bo = i915_gem_alloc_object(dev, PAGE_SIZE); if (!reg_bo) goto out_free; - overlay->reg_bo = to_intel_bo(reg_bo); + overlay->reg_bo = reg_bo; if (OVERLAY_NEEDS_PHYSICAL(dev)) { ret = i915_gem_attach_phys_object(dev, reg_bo, @@ -1423,14 +1421,14 @@ void intel_setup_overlay(struct drm_device *dev) DRM_ERROR("failed to attach phys overlay regs\n"); goto out_free_bo; } - overlay->flip_addr = overlay->reg_bo->phys_obj->handle->busaddr; + overlay->flip_addr = reg_bo->phys_obj->handle->busaddr; } else { ret = i915_gem_object_pin(reg_bo, PAGE_SIZE, true); if (ret) { DRM_ERROR("failed to pin overlay register bo\n"); goto out_free_bo; } - overlay->flip_addr = overlay->reg_bo->gtt_offset; + overlay->flip_addr = reg_bo->gtt_offset; ret = i915_gem_object_set_to_gtt_domain(reg_bo, true); if (ret) { @@ -1462,7 +1460,7 @@ void intel_setup_overlay(struct drm_device *dev) out_unpin_bo: i915_gem_object_unpin(reg_bo); out_free_bo: - drm_gem_object_unreference(reg_bo); + drm_gem_object_unreference(®_bo->base); out_free: kfree(overlay); return; diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c index 1db860d7989a..181aad31125d 100644 --- a/drivers/gpu/drm/i915/intel_ringbuffer.c +++ b/drivers/gpu/drm/i915/intel_ringbuffer.c @@ -139,7 +139,7 @@ u32 intel_ring_get_active_head(struct intel_ring_buffer *ring) static int init_ring_common(struct intel_ring_buffer *ring) { drm_i915_private_t *dev_priv = ring->dev->dev_private; - struct drm_i915_gem_object *obj_priv = to_intel_bo(ring->gem_object); + struct drm_i915_gem_object *obj = ring->obj; u32 head; /* Stop the ring if it's running. */ @@ -148,7 +148,7 @@ static int init_ring_common(struct intel_ring_buffer *ring) ring->write_tail(ring, 0); /* Initialize the ring. */ - I915_WRITE_START(ring, obj_priv->gtt_offset); + I915_WRITE_START(ring, obj->gtt_offset); head = I915_READ_HEAD(ring) & HEAD_ADDR; /* G45 ring initialization fails to reset head to zero */ @@ -178,7 +178,7 @@ static int init_ring_common(struct intel_ring_buffer *ring) /* If the head is still not zero, the ring is dead */ if ((I915_READ_CTL(ring) & RING_VALID) == 0 || - I915_READ_START(ring) != obj_priv->gtt_offset || + I915_READ_START(ring) != obj->gtt_offset || (I915_READ_HEAD(ring) & HEAD_ADDR) != 0) { DRM_ERROR("%s initialization failed " "ctl %08x head %08x tail %08x start %08x\n", @@ -514,17 +514,15 @@ render_ring_dispatch_execbuffer(struct intel_ring_buffer *ring, static void cleanup_status_page(struct intel_ring_buffer *ring) { drm_i915_private_t *dev_priv = ring->dev->dev_private; - struct drm_gem_object *obj; - struct drm_i915_gem_object *obj_priv; + struct drm_i915_gem_object *obj; obj = ring->status_page.obj; if (obj == NULL) return; - obj_priv = to_intel_bo(obj); - kunmap(obj_priv->pages[0]); + kunmap(obj->pages[0]); i915_gem_object_unpin(obj); - drm_gem_object_unreference(obj); + drm_gem_object_unreference(&obj->base); ring->status_page.obj = NULL; memset(&dev_priv->hws_map, 0, sizeof(dev_priv->hws_map)); @@ -534,8 +532,7 @@ static int init_status_page(struct intel_ring_buffer *ring) { struct drm_device *dev = ring->dev; drm_i915_private_t *dev_priv = dev->dev_private; - struct drm_gem_object *obj; - struct drm_i915_gem_object *obj_priv; + struct drm_i915_gem_object *obj; int ret; obj = i915_gem_alloc_object(dev, 4096); @@ -544,16 +541,15 @@ static int init_status_page(struct intel_ring_buffer *ring) ret = -ENOMEM; goto err; } - obj_priv = to_intel_bo(obj); - obj_priv->agp_type = AGP_USER_CACHED_MEMORY; + obj->agp_type = AGP_USER_CACHED_MEMORY; ret = i915_gem_object_pin(obj, 4096, true); if (ret != 0) { goto err_unref; } - ring->status_page.gfx_addr = obj_priv->gtt_offset; - ring->status_page.page_addr = kmap(obj_priv->pages[0]); + ring->status_page.gfx_addr = obj->gtt_offset; + ring->status_page.page_addr = kmap(obj->pages[0]); if (ring->status_page.page_addr == NULL) { memset(&dev_priv->hws_map, 0, sizeof(dev_priv->hws_map)); goto err_unpin; @@ -570,7 +566,7 @@ static int init_status_page(struct intel_ring_buffer *ring) err_unpin: i915_gem_object_unpin(obj); err_unref: - drm_gem_object_unreference(obj); + drm_gem_object_unreference(&obj->base); err: return ret; } @@ -578,8 +574,7 @@ err: int intel_init_ring_buffer(struct drm_device *dev, struct intel_ring_buffer *ring) { - struct drm_i915_gem_object *obj_priv; - struct drm_gem_object *obj; + struct drm_i915_gem_object *obj; int ret; ring->dev = dev; @@ -600,15 +595,14 @@ int intel_init_ring_buffer(struct drm_device *dev, goto err_hws; } - ring->gem_object = obj; + ring->obj = obj; ret = i915_gem_object_pin(obj, PAGE_SIZE, true); if (ret) goto err_unref; - obj_priv = to_intel_bo(obj); ring->map.size = ring->size; - ring->map.offset = dev->agp->base + obj_priv->gtt_offset; + ring->map.offset = dev->agp->base + obj->gtt_offset; ring->map.type = 0; ring->map.flags = 0; ring->map.mtrr = 0; @@ -632,8 +626,8 @@ err_unmap: err_unpin: i915_gem_object_unpin(obj); err_unref: - drm_gem_object_unreference(obj); - ring->gem_object = NULL; + drm_gem_object_unreference(&obj->base); + ring->obj = NULL; err_hws: cleanup_status_page(ring); return ret; @@ -644,7 +638,7 @@ void intel_cleanup_ring_buffer(struct intel_ring_buffer *ring) struct drm_i915_private *dev_priv; int ret; - if (ring->gem_object == NULL) + if (ring->obj == NULL) return; /* Disable the ring buffer. The ring must be idle at this point */ @@ -654,9 +648,9 @@ void intel_cleanup_ring_buffer(struct intel_ring_buffer *ring) drm_core_ioremapfree(&ring->map, ring->dev); - i915_gem_object_unpin(ring->gem_object); - drm_gem_object_unreference(ring->gem_object); - ring->gem_object = NULL; + i915_gem_object_unpin(ring->obj); + drm_gem_object_unreference(&ring->obj->base); + ring->obj = NULL; if (ring->cleanup) ring->cleanup(ring); @@ -902,11 +896,11 @@ static int blt_ring_init(struct intel_ring_buffer *ring) u32 *ptr; int ret; - obj = to_intel_bo(i915_gem_alloc_object(ring->dev, 4096)); + obj = i915_gem_alloc_object(ring->dev, 4096); if (obj == NULL) return -ENOMEM; - ret = i915_gem_object_pin(&obj->base, 4096, true); + ret = i915_gem_object_pin(obj, 4096, true); if (ret) { drm_gem_object_unreference(&obj->base); return ret; @@ -917,9 +911,9 @@ static int blt_ring_init(struct intel_ring_buffer *ring) *ptr++ = MI_NOOP; kunmap(obj->pages[0]); - ret = i915_gem_object_set_to_gtt_domain(&obj->base, false); + ret = i915_gem_object_set_to_gtt_domain(obj, false); if (ret) { - i915_gem_object_unpin(&obj->base); + i915_gem_object_unpin(obj); drm_gem_object_unreference(&obj->base); return ret; } diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.h b/drivers/gpu/drm/i915/intel_ringbuffer.h index 2565d65a625b..1747e329ee94 100644 --- a/drivers/gpu/drm/i915/intel_ringbuffer.h +++ b/drivers/gpu/drm/i915/intel_ringbuffer.h @@ -4,7 +4,7 @@ struct intel_hw_status_page { u32 __iomem *page_addr; unsigned int gfx_addr; - struct drm_gem_object *obj; + struct drm_i915_gem_object *obj; }; #define I915_RING_READ(reg) i915_safe_read(dev_priv, reg) @@ -32,7 +32,7 @@ struct intel_ring_buffer { u32 mmio_base; void *virtual_start; struct drm_device *dev; - struct drm_gem_object *gem_object; + struct drm_i915_gem_object *obj; unsigned int head; unsigned int tail; -- cgit v1.2.3 From 92b88aeb1ad67417c002fdd77409771ca7e5433a Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Tue, 9 Nov 2010 11:47:32 +0000 Subject: drm/i915: Not all mappable regions require GTT fence regions Combining map_and_fenceable revealed a bug in i915_gem_object_gtt_size() in that it always computed the appropriate fence size for the object regardless of tiling state which caused us to over-allocate linear buffers when binding to the GTT. Signed-off-by: Chris Wilson --- drivers/gpu/drm/i915/i915_gem.c | 53 +++++++++++++++++------------------------ 1 file changed, 22 insertions(+), 31 deletions(-) diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index d196895527a6..8e3f1de681ed 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c @@ -41,9 +41,6 @@ struct change_domains { uint32_t flush_rings; }; -static uint32_t i915_gem_get_gtt_alignment(struct drm_i915_gem_object *obj); -static uint32_t i915_gem_get_gtt_size(struct drm_i915_gem_object *obj); - static int i915_gem_object_flush_gpu_write_domain(struct drm_i915_gem_object *obj, bool pipelined); static void i915_gem_object_flush_gtt_write_domain(struct drm_i915_gem_object *obj); @@ -1443,6 +1440,28 @@ i915_gem_free_mmap_offset(struct drm_i915_gem_object *obj) list->map = NULL; } +static uint32_t +i915_gem_get_gtt_size(struct drm_i915_gem_object *obj) +{ + struct drm_device *dev = obj->base.dev; + uint32_t size; + + if (INTEL_INFO(dev)->gen >= 4 || + obj->tiling_mode == I915_TILING_NONE) + return obj->base.size; + + /* Previous chips need a power-of-two fence region when tiling */ + if (INTEL_INFO(dev)->gen == 3) + size = 1024*1024; + else + size = 512*1024; + + while (size < obj->base.size) + size <<= 1; + + return size; +} + /** * i915_gem_get_gtt_alignment - return required GTT alignment for an object * @obj: object to check @@ -1505,34 +1524,6 @@ i915_gem_get_unfenced_gtt_alignment(struct drm_i915_gem_object *obj) return tile_height * obj->stride * 2; } -static uint32_t -i915_gem_get_gtt_size(struct drm_i915_gem_object *obj) -{ - struct drm_device *dev = obj->base.dev; - uint32_t size; - - /* - * Minimum alignment is 4k (GTT page size), but might be greater - * if a fence register is needed for the object. - */ - if (INTEL_INFO(dev)->gen >= 4) - return obj->base.size; - - /* - * Previous chips need to be aligned to the size of the smallest - * fence register that can contain the object. - */ - if (INTEL_INFO(dev)->gen == 3) - size = 1024*1024; - else - size = 512*1024; - - while (size < obj->base.size) - size <<= 1; - - return size; -} - /** * i915_gem_mmap_gtt_ioctl - prepare an object for GTT mmap'ing * @dev: DRM device -- cgit v1.2.3 From 7bdc9ab00b1b0fdbb490f41c5c7c2fbc66fed9ee Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Tue, 9 Nov 2010 17:53:20 +0000 Subject: agp/intel: Remove duplicate const drivers/char/agp/intel-gtt.c:340:48: warning: duplicate const Signed-off-by: Chris Wilson --- drivers/char/agp/intel-gtt.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/char/agp/intel-gtt.c b/drivers/char/agp/intel-gtt.c index 5a2b7360f5be..8cf600cdac06 100644 --- a/drivers/char/agp/intel-gtt.c +++ b/drivers/char/agp/intel-gtt.c @@ -334,7 +334,7 @@ static void i810_write_entry(dma_addr_t addr, unsigned int entry, writel(addr | pte_flags, intel_private.gtt + entry); } -static const struct aper_size_info_fixed const intel_fake_agp_sizes[] = { +static const struct aper_size_info_fixed intel_fake_agp_sizes[] = { {32, 8192, 3}, {64, 16384, 4}, {128, 32768, 5}, -- cgit v1.2.3 From 748ebc6017a943ec065e653e975a5e8dace77ac6 Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Sun, 24 Oct 2010 10:28:47 +0100 Subject: drm/i915: Record fence registers on error. Having seen the effects of erroneous fencing on the batchbuffer, a useful sanity check is to record the fence registers at the time of an error. Signed-off-by: Chris Wilson --- drivers/gpu/drm/i915/i915_debugfs.c | 3 +++ drivers/gpu/drm/i915/i915_drv.h | 1 + drivers/gpu/drm/i915/i915_irq.c | 30 ++++++++++++++++++++++++++++++ 3 files changed, 34 insertions(+) diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c index 1e8cd74d18d5..addb9392e1c4 100644 --- a/drivers/gpu/drm/i915/i915_debugfs.c +++ b/drivers/gpu/drm/i915/i915_debugfs.c @@ -684,6 +684,9 @@ static int i915_error_state(struct seq_file *m, void *unused) seq_printf(m, " INSTPM: 0x%08x\n", error->instpm); seq_printf(m, " seqno: 0x%08x\n", error->seqno); + for (i = 0; i < 16; i++) + seq_printf(m, " fence[%d] = %08llx\n", i, error->fence[i]); + if (error->active_bo) print_error_buffers(m, "Active", error->active_bo, diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index 22d6388b331f..699e71a0dab7 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h @@ -166,6 +166,7 @@ struct drm_i915_error_state { u32 instdone1; u32 seqno; u64 bbaddr; + u64 fence[16]; struct timeval time; struct drm_i915_error_object { int page_count; diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c index 09ac3bbd8165..de95c7bb6dba 100644 --- a/drivers/gpu/drm/i915/i915_irq.c +++ b/drivers/gpu/drm/i915/i915_irq.c @@ -581,6 +581,35 @@ static u32 capture_bo_list(struct drm_i915_error_buffer *err, return i; } +static void i915_gem_record_fences(struct drm_device *dev, + struct drm_i915_error_state *error) +{ + struct drm_i915_private *dev_priv = dev->dev_private; + int i; + + /* Fences */ + switch (INTEL_INFO(dev)->gen) { + case 6: + for (i = 0; i < 16; i++) + error->fence[i] = I915_READ64(FENCE_REG_SANDYBRIDGE_0 + (i * 8)); + break; + case 5: + case 4: + for (i = 0; i < 16; i++) + error->fence[i] = I915_READ64(FENCE_REG_965_0 + (i * 8)); + break; + case 3: + if (IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev)) + for (i = 0; i < 8; i++) + error->fence[i+8] = I915_READ(FENCE_REG_945_8 + (i * 4)); + case 2: + for (i = 0; i < 8; i++) + error->fence[i] = I915_READ(FENCE_REG_830_0 + (i * 4)); + break; + + } +} + /** * i915_capture_error_state - capture an error record for later analysis * @dev: drm device @@ -656,6 +685,7 @@ static void i915_capture_error_state(struct drm_device *dev) error->acthd = I915_READ(ACTHD); error->bbaddr = 0; } + i915_gem_record_fences(dev, error); bbaddr = i915_ringbuffer_last_batch(dev, &dev_priv->render_ring); -- cgit v1.2.3 From b6913e4bdb09134dbdccd613e880d413b5911591 Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Fri, 12 Nov 2010 10:46:37 +0000 Subject: drm/i915: Move the implementation details of PIPE_CONTROL to the ringbuffer The pipe control object is allocated by the device for the sole use of the render ringbuffer. Move this detail from the general code to the render ring buffer initialisation. Signed-off-by: Chris Wilson --- drivers/gpu/drm/i915/i915_drv.h | 3 - drivers/gpu/drm/i915/i915_gem.c | 70 +-------------------- drivers/gpu/drm/i915/intel_ringbuffer.c | 105 +++++++++++++++++++++++++++++--- 3 files changed, 97 insertions(+), 81 deletions(-) diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index 699e71a0dab7..5afcf2a07cfc 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h @@ -275,12 +275,9 @@ typedef struct drm_i915_private { uint32_t next_seqno; drm_dma_handle_t *status_page_dmah; - void *seqno_page; dma_addr_t dma_status_page; uint32_t counter; - unsigned int seqno_gfx_addr; drm_local_map_t hws_map; - struct drm_i915_gem_object *seqno_obj; struct drm_i915_gem_object *pwrctx; struct drm_i915_gem_object *renderctx; diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index 8e3f1de681ed..027212e5c34a 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c @@ -4625,78 +4625,15 @@ i915_gem_idle(struct drm_device *dev) return 0; } -/* - * 965+ support PIPE_CONTROL commands, which provide finer grained control - * over cache flushing. - */ -static int -i915_gem_init_pipe_control(struct drm_device *dev) -{ - drm_i915_private_t *dev_priv = dev->dev_private; - struct drm_i915_gem_object *obj; - int ret; - - obj = i915_gem_alloc_object(dev, 4096); - if (obj == NULL) { - DRM_ERROR("Failed to allocate seqno page\n"); - ret = -ENOMEM; - goto err; - } - obj->agp_type = AGP_USER_CACHED_MEMORY; - - ret = i915_gem_object_pin(obj, 4096, true); - if (ret) - goto err_unref; - - dev_priv->seqno_gfx_addr = obj->gtt_offset; - dev_priv->seqno_page = kmap(obj->pages[0]); - if (dev_priv->seqno_page == NULL) - goto err_unpin; - - dev_priv->seqno_obj = obj; - memset(dev_priv->seqno_page, 0, PAGE_SIZE); - - return 0; - -err_unpin: - i915_gem_object_unpin(obj); -err_unref: - drm_gem_object_unreference(&obj->base); -err: - return ret; -} - - -static void -i915_gem_cleanup_pipe_control(struct drm_device *dev) -{ - drm_i915_private_t *dev_priv = dev->dev_private; - struct drm_i915_gem_object *obj; - - obj = dev_priv->seqno_obj; - kunmap(obj->pages[0]); - i915_gem_object_unpin(obj); - drm_gem_object_unreference(&obj->base); - dev_priv->seqno_obj = NULL; - - dev_priv->seqno_page = NULL; -} - int i915_gem_init_ringbuffer(struct drm_device *dev) { drm_i915_private_t *dev_priv = dev->dev_private; int ret; - if (HAS_PIPE_CONTROL(dev)) { - ret = i915_gem_init_pipe_control(dev); - if (ret) - return ret; - } - ret = intel_init_render_ring_buffer(dev); if (ret) - goto cleanup_pipe_control; + return ret; if (HAS_BSD(dev)) { ret = intel_init_bsd_ring_buffer(dev); @@ -4718,9 +4655,6 @@ cleanup_bsd_ring: intel_cleanup_ring_buffer(&dev_priv->bsd_ring); cleanup_render_ring: intel_cleanup_ring_buffer(&dev_priv->render_ring); -cleanup_pipe_control: - if (HAS_PIPE_CONTROL(dev)) - i915_gem_cleanup_pipe_control(dev); return ret; } @@ -4732,8 +4666,6 @@ i915_gem_cleanup_ringbuffer(struct drm_device *dev) intel_cleanup_ring_buffer(&dev_priv->render_ring); intel_cleanup_ring_buffer(&dev_priv->bsd_ring); intel_cleanup_ring_buffer(&dev_priv->blt_ring); - if (HAS_PIPE_CONTROL(dev)) - i915_gem_cleanup_pipe_control(dev); } int diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c index 181aad31125d..b12578558268 100644 --- a/drivers/gpu/drm/i915/intel_ringbuffer.c +++ b/drivers/gpu/drm/i915/intel_ringbuffer.c @@ -202,6 +202,78 @@ static int init_ring_common(struct intel_ring_buffer *ring) return 0; } +/* + * 965+ support PIPE_CONTROL commands, which provide finer grained control + * over cache flushing. + */ +struct pipe_control { + struct drm_i915_gem_object *obj; + volatile u32 *cpu_page; + u32 gtt_offset; +}; + +static int +init_pipe_control(struct intel_ring_buffer *ring) +{ + struct pipe_control *pc; + struct drm_i915_gem_object *obj; + int ret; + + if (ring->private) + return 0; + + pc = kmalloc(sizeof(*pc), GFP_KERNEL); + if (!pc) + return -ENOMEM; + + obj = i915_gem_alloc_object(ring->dev, 4096); + if (obj == NULL) { + DRM_ERROR("Failed to allocate seqno page\n"); + ret = -ENOMEM; + goto err; + } + obj->agp_type = AGP_USER_CACHED_MEMORY; + + ret = i915_gem_object_pin(obj, 4096, true); + if (ret) + goto err_unref; + + pc->gtt_offset = obj->gtt_offset; + pc->cpu_page = kmap(obj->pages[0]); + if (pc->cpu_page == NULL) + goto err_unpin; + + pc->obj = obj; + ring->private = pc; + return 0; + +err_unpin: + i915_gem_object_unpin(obj); +err_unref: + drm_gem_object_unreference(&obj->base); +err: + kfree(pc); + return ret; +} + +static void +cleanup_pipe_control(struct intel_ring_buffer *ring) +{ + struct pipe_control *pc = ring->private; + struct drm_i915_gem_object *obj; + + if (!ring->private) + return; + + obj = pc->obj; + kunmap(obj->pages[0]); + i915_gem_object_unpin(obj); + drm_gem_object_unreference(&obj->base); + + kfree(pc); + ring->private = NULL; +} + static int init_render_ring(struct intel_ring_buffer *ring) { struct drm_device *dev = ring->dev; @@ -215,9 +287,23 @@ static int init_render_ring(struct intel_ring_buffer *ring) I915_WRITE(MI_MODE, mode); } + if (HAS_PIPE_CONTROL(dev)) { + ret = init_pipe_control(ring); + if (ret) + return ret; + } + return ret; } +static void render_ring_cleanup(struct intel_ring_buffer *ring) +{ + if (!ring->private) + return; + + cleanup_pipe_control(ring); +} + #define PIPE_CONTROL_FLUSH(ring__, addr__) \ do { \ intel_ring_emit(ring__, GFX_OP_PIPE_CONTROL | PIPE_CONTROL_QW_WRITE | \ @@ -240,8 +326,8 @@ render_ring_add_request(struct intel_ring_buffer *ring, u32 *result) { struct drm_device *dev = ring->dev; - drm_i915_private_t *dev_priv = dev->dev_private; u32 seqno = i915_gem_get_seqno(dev); + struct pipe_control *pc = ring->private; int ret; if (IS_GEN6(dev)) { @@ -253,12 +339,12 @@ render_ring_add_request(struct intel_ring_buffer *ring, intel_ring_emit(ring, PIPE_CONTROL_QW_WRITE | PIPE_CONTROL_WC_FLUSH | PIPE_CONTROL_IS_FLUSH | PIPE_CONTROL_NOTIFY); - intel_ring_emit(ring, dev_priv->seqno_gfx_addr | PIPE_CONTROL_GLOBAL_GTT); + intel_ring_emit(ring, pc->gtt_offset | PIPE_CONTROL_GLOBAL_GTT); intel_ring_emit(ring, seqno); intel_ring_emit(ring, 0); intel_ring_emit(ring, 0); } else if (HAS_PIPE_CONTROL(dev)) { - u32 scratch_addr = dev_priv->seqno_gfx_addr + 128; + u32 scratch_addr = pc->gtt_offset + 128; /* * Workaround qword write incoherence by flushing the @@ -271,7 +357,7 @@ render_ring_add_request(struct intel_ring_buffer *ring, intel_ring_emit(ring, GFX_OP_PIPE_CONTROL | PIPE_CONTROL_QW_WRITE | PIPE_CONTROL_WC_FLUSH | PIPE_CONTROL_TC_FLUSH); - intel_ring_emit(ring, dev_priv->seqno_gfx_addr | PIPE_CONTROL_GLOBAL_GTT); + intel_ring_emit(ring, pc->gtt_offset | PIPE_CONTROL_GLOBAL_GTT); intel_ring_emit(ring, seqno); intel_ring_emit(ring, 0); PIPE_CONTROL_FLUSH(ring, scratch_addr); @@ -288,7 +374,7 @@ render_ring_add_request(struct intel_ring_buffer *ring, intel_ring_emit(ring, GFX_OP_PIPE_CONTROL | PIPE_CONTROL_QW_WRITE | PIPE_CONTROL_WC_FLUSH | PIPE_CONTROL_TC_FLUSH | PIPE_CONTROL_NOTIFY); - intel_ring_emit(ring, dev_priv->seqno_gfx_addr | PIPE_CONTROL_GLOBAL_GTT); + intel_ring_emit(ring, pc->gtt_offset | PIPE_CONTROL_GLOBAL_GTT); intel_ring_emit(ring, seqno); intel_ring_emit(ring, 0); } else { @@ -312,10 +398,10 @@ static u32 render_ring_get_seqno(struct intel_ring_buffer *ring) { struct drm_device *dev = ring->dev; - drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; - if (HAS_PIPE_CONTROL(dev)) - return ((volatile u32 *)(dev_priv->seqno_page))[0]; - else + if (HAS_PIPE_CONTROL(dev)) { + struct pipe_control *pc = ring->private; + return pc->cpu_page[0]; + } else return intel_read_status_page(ring, I915_GEM_HWS_INDEX); } @@ -767,6 +853,7 @@ static const struct intel_ring_buffer render_ring = { .user_irq_get = render_ring_get_user_irq, .user_irq_put = render_ring_put_user_irq, .dispatch_execbuffer = render_ring_dispatch_execbuffer, + .cleanup = render_ring_cleanup, }; /* ring buffer for bit-stream decoder */ -- cgit v1.2.3 From dddbc0e5257572079602654258adc8d117e168ad Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Fri, 12 Nov 2010 13:56:11 +0000 Subject: drm/i915: Remove a defunct BUG_ON This used to check the precondition that all fences were to be located in a mappable area, redundant now as those two parameters are combined into one. After pinning, we assert that the buffer is bound into the desired region. Signed-off-by: Chris Wilson --- drivers/gpu/drm/i915/i915_gem.c | 1 - 1 file changed, 1 deletion(-) diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index 027212e5c34a..7d6ce34789ca 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c @@ -4248,7 +4248,6 @@ i915_gem_object_pin(struct drm_i915_gem_object *obj, int ret; BUG_ON(obj->pin_count == DRM_I915_GEM_OBJECT_MAX_PIN_COUNT); - BUG_ON(map_and_fenceable && !map_and_fenceable); WARN_ON(i915_verify_lists(dev)); if (obj->gtt_space != NULL) { -- cgit v1.2.3 From 576ae4b8e46b4cb9d5390f4348c265329793d9bf Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Fri, 12 Nov 2010 13:36:26 +0000 Subject: drm/i915: Extend hangcheck timeout ... reduce the frequency of checking to further reduce the wakeups and CPU overhead. Signed-off-by: Chris Wilson --- drivers/gpu/drm/i915/i915_drv.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index 5afcf2a07cfc..eb3f1f756e81 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h @@ -315,7 +315,7 @@ typedef struct drm_i915_private { int num_pipe; /* For hangcheck timer */ -#define DRM_I915_HANGCHECK_PERIOD 250 /* in ms */ +#define DRM_I915_HANGCHECK_PERIOD 1500 /* in ms */ struct timer_list hangcheck_timer; int hangcheck_count; uint32_t last_acthd; -- cgit v1.2.3 From 919926aeb3e89825093c743dd54f04e42e7d9150 Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Fri, 12 Nov 2010 13:42:53 +0000 Subject: drm/i915: Thread the pipelining ring through the callers. Signed-off-by: Chris Wilson --- drivers/gpu/drm/i915/i915_drv.h | 2 +- drivers/gpu/drm/i915/i915_gem.c | 28 +++++++++++++--------------- drivers/gpu/drm/i915/intel_display.c | 6 +++--- drivers/gpu/drm/i915/intel_drv.h | 2 +- 4 files changed, 18 insertions(+), 20 deletions(-) diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index eb3f1f756e81..42d3e901619d 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h @@ -1133,7 +1133,7 @@ int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf); int i915_gem_object_set_to_gtt_domain(struct drm_i915_gem_object *obj, int write); int i915_gem_object_set_to_display_plane(struct drm_i915_gem_object *obj, - bool pipelined); + struct intel_ring_buffer *pipelined); int i915_gem_attach_phys_object(struct drm_device *dev, struct drm_i915_gem_object *obj, int id, diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index 7d6ce34789ca..465e07abf5d0 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c @@ -42,11 +42,11 @@ struct change_domains { }; static int i915_gem_object_flush_gpu_write_domain(struct drm_i915_gem_object *obj, - bool pipelined); + struct intel_ring_buffer *pipelined); static void i915_gem_object_flush_gtt_write_domain(struct drm_i915_gem_object *obj); static void i915_gem_object_flush_cpu_write_domain(struct drm_i915_gem_object *obj); static int i915_gem_object_set_to_cpu_domain(struct drm_i915_gem_object *obj, - int write); + bool write); static int i915_gem_object_set_cpu_read_domain_range(struct drm_i915_gem_object *obj, uint64_t offset, uint64_t size); @@ -1274,12 +1274,10 @@ int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf) mutex_lock(&dev->struct_mutex); BUG_ON(obj->pin_count && !obj->pin_mappable); - if (obj->gtt_space) { - if (!obj->map_and_fenceable) { - ret = i915_gem_object_unbind(obj); - if (ret) - goto unlock; - } + if (!obj->map_and_fenceable) { + ret = i915_gem_object_unbind(obj); + if (ret) + goto unlock; } if (!obj->gtt_space) { @@ -2637,7 +2635,7 @@ i915_gem_object_put_fence_reg(struct drm_i915_gem_object *obj, if (reg->gpu) { int ret; - ret = i915_gem_object_flush_gpu_write_domain(obj, true); + ret = i915_gem_object_flush_gpu_write_domain(obj, NULL); if (ret) return ret; @@ -2817,7 +2815,7 @@ i915_gem_clflush_object(struct drm_i915_gem_object *obj) /** Flushes any GPU write domain for the object if it's dirty. */ static int i915_gem_object_flush_gpu_write_domain(struct drm_i915_gem_object *obj, - bool pipelined) + struct intel_ring_buffer *pipelined) { struct drm_device *dev = obj->base.dev; @@ -2828,7 +2826,7 @@ i915_gem_object_flush_gpu_write_domain(struct drm_i915_gem_object *obj, i915_gem_flush_ring(dev, obj->ring, 0, obj->base.write_domain); BUG_ON(obj->base.write_domain); - if (pipelined) + if (pipelined && pipelined == obj->ring) return 0; return i915_gem_object_wait_rendering(obj, true); @@ -2892,7 +2890,7 @@ i915_gem_object_set_to_gtt_domain(struct drm_i915_gem_object *obj, int write) if (obj->gtt_space == NULL) return -EINVAL; - ret = i915_gem_object_flush_gpu_write_domain(obj, false); + ret = i915_gem_object_flush_gpu_write_domain(obj, NULL); if (ret != 0) return ret; @@ -2931,7 +2929,7 @@ i915_gem_object_set_to_gtt_domain(struct drm_i915_gem_object *obj, int write) */ int i915_gem_object_set_to_display_plane(struct drm_i915_gem_object *obj, - bool pipelined) + struct intel_ring_buffer *pipelined) { uint32_t old_read_domains; int ret; @@ -2940,7 +2938,7 @@ i915_gem_object_set_to_display_plane(struct drm_i915_gem_object *obj, if (obj->gtt_space == NULL) return -EINVAL; - ret = i915_gem_object_flush_gpu_write_domain(obj, true); + ret = i915_gem_object_flush_gpu_write_domain(obj, pipelined); if (ret) return ret; @@ -2984,7 +2982,7 @@ i915_gem_object_flush_gpu(struct drm_i915_gem_object *obj, * flushes to occur. */ static int -i915_gem_object_set_to_cpu_domain(struct drm_i915_gem_object *obj, int write) +i915_gem_object_set_to_cpu_domain(struct drm_i915_gem_object *obj, bool write) { uint32_t old_write_domain, old_read_domains; int ret; diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c index ae7d4f55ce07..c2c94a26f92e 100644 --- a/drivers/gpu/drm/i915/intel_display.c +++ b/drivers/gpu/drm/i915/intel_display.c @@ -1434,7 +1434,7 @@ out_disable: int intel_pin_and_fence_fb_obj(struct drm_device *dev, struct drm_i915_gem_object *obj, - bool pipelined) + struct intel_ring_buffer *pipelined) { u32 alignment; int ret; @@ -1594,7 +1594,7 @@ intel_pipe_set_base(struct drm_crtc *crtc, int x, int y, mutex_lock(&dev->struct_mutex); ret = intel_pin_and_fence_fb_obj(dev, to_intel_framebuffer(crtc->fb)->obj, - false); + NULL); if (ret != 0) { mutex_unlock(&dev->struct_mutex); return ret; @@ -5092,7 +5092,7 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc, obj = intel_fb->obj; mutex_lock(&dev->struct_mutex); - ret = intel_pin_and_fence_fb_obj(dev, obj, true); + ret = intel_pin_and_fence_fb_obj(dev, obj, &dev_priv->render_ring); if (ret) goto cleanup_work; diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h index 5a4f14e36d6c..5154e315300d 100644 --- a/drivers/gpu/drm/i915/intel_drv.h +++ b/drivers/gpu/drm/i915/intel_drv.h @@ -301,7 +301,7 @@ extern void intel_init_emon(struct drm_device *dev); extern int intel_pin_and_fence_fb_obj(struct drm_device *dev, struct drm_i915_gem_object *obj, - bool pipelined); + struct intel_ring_buffer *pipelined); extern int intel_framebuffer_init(struct drm_device *dev, struct intel_framebuffer *ifb, -- cgit v1.2.3 From a7a09aebe8c0dd2b76c7b97018a9c614ddb483a5 Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Fri, 12 Nov 2010 13:49:09 +0000 Subject: drm/i915: Rework execbuffer pinning Avoid evicting buffers that will be used later in the batch in order to make room for the initial buffers by pinning all bound buffers in a single pass before binding (and evicting for) fresh buffer. Signed-off-by: Chris Wilson --- drivers/gpu/drm/i915/i915_gem.c | 81 +++++++++++++++++++++++++++++------------ 1 file changed, 58 insertions(+), 23 deletions(-) diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index 465e07abf5d0..061426e1bbf4 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c @@ -3531,44 +3531,75 @@ i915_gem_execbuffer_reserve(struct drm_device *dev, struct drm_i915_private *dev_priv = dev->dev_private; int ret, i, retry; - /* attempt to pin all of the buffers into the GTT */ + /* Attempt to pin all of the buffers into the GTT. + * This is done in 3 phases: + * + * 1a. Unbind all objects that do not match the GTT constraints for + * the execbuffer (fenceable, mappable, alignment etc). + * 1b. Increment pin count for already bound objects. + * 2. Bind new objects. + * 3. Decrement pin count. + * + * This avoid unnecessary unbinding of later objects in order to makr + * room for the earlier objects *unless* we need to defragment. + */ retry = 0; do { ret = 0; + + /* Unbind any ill-fitting objects or pin. */ for (i = 0; i < count; i++) { - struct drm_i915_gem_exec_object2 *entry = &exec_list[i]; struct drm_i915_gem_object *obj = object_list[i]; - bool need_fence = + struct drm_i915_gem_exec_object2 *entry = &exec_list[i]; + bool need_fence, need_mappable; + + if (!obj->gtt_space) + continue; + + need_fence = entry->flags & EXEC_OBJECT_NEEDS_FENCE && obj->tiling_mode != I915_TILING_NONE; - - /* g33/pnv can't fence buffers in the unmappable part */ - bool need_mappable = + need_mappable = entry->relocation_count ? true : need_fence; - /* Check fence reg constraints and rebind if necessary */ - if (need_mappable && !obj->map_and_fenceable) { + if ((entry->alignment && obj->gtt_offset & (entry->alignment - 1)) || + (need_mappable && !obj->map_and_fenceable)) ret = i915_gem_object_unbind(obj); + else + ret = i915_gem_object_pin(obj, + entry->alignment, + need_mappable); + if (ret) { + count = i; + goto err; + } + } + + /* Bind fresh objects */ + for (i = 0; i < count; i++) { + struct drm_i915_gem_exec_object2 *entry = &exec_list[i]; + struct drm_i915_gem_object *obj = object_list[i]; + bool need_fence; + + need_fence = + entry->flags & EXEC_OBJECT_NEEDS_FENCE && + obj->tiling_mode != I915_TILING_NONE; + + if (!obj->gtt_space) { + bool need_mappable = + entry->relocation_count ? true : need_fence; + + ret = i915_gem_object_pin(obj, + entry->alignment, + need_mappable); if (ret) break; } - ret = i915_gem_object_pin(obj, - entry->alignment, - need_mappable); - if (ret) - break; - - /* - * Pre-965 chips need a fence register set up in order - * to properly handle blits to/from tiled surfaces. - */ if (need_fence) { ret = i915_gem_object_get_fence_reg(obj, true); - if (ret) { - i915_gem_object_unpin(obj); + if (ret) break; - } dev_priv->fence_regs[obj->fence_reg].gpu = true; } @@ -3576,8 +3607,12 @@ i915_gem_execbuffer_reserve(struct drm_device *dev, entry->offset = obj->gtt_offset; } - while (i--) - i915_gem_object_unpin(object_list[i]); +err: /* Decrement pin count for bound objects */ + for (i = 0; i < count; i++) { + struct drm_i915_gem_object *obj = object_list[i]; + if (obj->gtt_space) + i915_gem_object_unpin(obj); + } if (ret != -ENOSPC || retry > 1) return ret; -- cgit v1.2.3 From caea7476d48e5f401f2d18b1738827748fb56c12 Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Fri, 12 Nov 2010 13:53:37 +0000 Subject: drm/i915: More accurately track last fence usage by the GPU Based on a patch by Daniel Vetter. Signed-off-by: Chris Wilson --- drivers/gpu/drm/i915/i915_debugfs.c | 3 +- drivers/gpu/drm/i915/i915_drv.h | 17 ++++-- drivers/gpu/drm/i915/i915_gem.c | 110 +++++++++++++++++++++--------------- 3 files changed, 78 insertions(+), 52 deletions(-) diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c index addb9392e1c4..5faae476954c 100644 --- a/drivers/gpu/drm/i915/i915_debugfs.c +++ b/drivers/gpu/drm/i915/i915_debugfs.c @@ -110,7 +110,7 @@ static const char *get_tiling_flag(struct drm_i915_gem_object *obj) static void describe_obj(struct seq_file *m, struct drm_i915_gem_object *obj) { - seq_printf(m, "%p: %s%s %8zd %08x %08x %d%s%s", + seq_printf(m, "%p: %s%s %8zd %04x %04x %d %d%s%s", &obj->base, get_pin_flag(obj), get_tiling_flag(obj), @@ -118,6 +118,7 @@ describe_obj(struct seq_file *m, struct drm_i915_gem_object *obj) obj->base.read_domains, obj->base.write_domain, obj->last_rendering_seqno, + obj->last_fenced_seqno, obj->dirty ? " dirty" : "", obj->madv == I915_MADV_DONTNEED ? " purgeable" : ""); if (obj->base.name) diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index 42d3e901619d..ee7df1d2b8c8 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h @@ -124,9 +124,8 @@ struct drm_i915_master_private { #define I915_FENCE_REG_NONE -1 struct drm_i915_fence_reg { - struct drm_i915_gem_object *obj; struct list_head lru_list; - bool gpu; + struct drm_i915_gem_object *obj; }; struct sdvo_device_mapping { @@ -787,6 +786,12 @@ struct drm_i915_gem_object { unsigned int fault_mappable : 1; unsigned int pin_mappable : 1; + /* + * Is the GPU currently using a fence to access this buffer, + */ + unsigned int pending_fenced_gpu_access:1; + unsigned int fenced_gpu_access:1; + struct page **pages; /** @@ -802,11 +807,13 @@ struct drm_i915_gem_object { */ uint32_t gtt_offset; - /* Which ring is refering to is this object */ - struct intel_ring_buffer *ring; - /** Breadcrumb of last rendering to the buffer. */ uint32_t last_rendering_seqno; + struct intel_ring_buffer *ring; + + /** Breadcrumb of last fenced GPU access to the buffer. */ + uint32_t last_fenced_seqno; + struct intel_ring_buffer *last_fenced_ring; /** Current tiling stride for the object, if it's tiled. */ uint32_t stride; diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index 061426e1bbf4..2cfdee8811c4 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c @@ -1688,7 +1688,27 @@ i915_gem_object_move_to_active(struct drm_i915_gem_object *obj, /* Move from whatever list we were on to the tail of execution. */ list_move_tail(&obj->mm_list, &dev_priv->mm.active_list); list_move_tail(&obj->ring_list, &ring->active_list); + obj->last_rendering_seqno = seqno; + if (obj->fenced_gpu_access) { + struct drm_i915_fence_reg *reg; + + BUG_ON(obj->fence_reg == I915_FENCE_REG_NONE); + + obj->last_fenced_seqno = seqno; + obj->last_fenced_ring = ring; + + reg = &dev_priv->fence_regs[obj->fence_reg]; + list_move_tail(®->lru_list, &dev_priv->mm.fence_list); + } +} + +static void +i915_gem_object_move_off_active(struct drm_i915_gem_object *obj) +{ + list_del_init(&obj->ring_list); + obj->last_rendering_seqno = 0; + obj->last_fenced_seqno = 0; } static void @@ -1699,8 +1719,33 @@ i915_gem_object_move_to_flushing(struct drm_i915_gem_object *obj) BUG_ON(!obj->active); list_move_tail(&obj->mm_list, &dev_priv->mm.flushing_list); - list_del_init(&obj->ring_list); - obj->last_rendering_seqno = 0; + + i915_gem_object_move_off_active(obj); +} + +static void +i915_gem_object_move_to_inactive(struct drm_i915_gem_object *obj) +{ + struct drm_device *dev = obj->base.dev; + struct drm_i915_private *dev_priv = dev->dev_private; + + if (obj->pin_count != 0) + list_move_tail(&obj->mm_list, &dev_priv->mm.pinned_list); + else + list_move_tail(&obj->mm_list, &dev_priv->mm.inactive_list); + + BUG_ON(!list_empty(&obj->gpu_write_list)); + BUG_ON(!obj->active); + obj->ring = NULL; + + i915_gem_object_move_off_active(obj); + obj->fenced_gpu_access = false; + obj->last_fenced_ring = NULL; + + obj->active = 0; + drm_gem_object_unreference(&obj->base); + + WARN_ON(i915_verify_lists(dev)); } /* Immediately discard the backing storage */ @@ -1729,35 +1774,11 @@ i915_gem_object_is_purgeable(struct drm_i915_gem_object *obj) return obj->madv == I915_MADV_DONTNEED; } -static void -i915_gem_object_move_to_inactive(struct drm_i915_gem_object *obj) -{ - struct drm_device *dev = obj->base.dev; - drm_i915_private_t *dev_priv = dev->dev_private; - - if (obj->pin_count != 0) - list_move_tail(&obj->mm_list, &dev_priv->mm.pinned_list); - else - list_move_tail(&obj->mm_list, &dev_priv->mm.inactive_list); - list_del_init(&obj->ring_list); - - BUG_ON(!list_empty(&obj->gpu_write_list)); - - obj->last_rendering_seqno = 0; - obj->ring = NULL; - if (obj->active) { - obj->active = 0; - drm_gem_object_unreference(&obj->base); - } - WARN_ON(i915_verify_lists(dev)); -} - static void i915_gem_process_flushing_list(struct drm_device *dev, uint32_t flush_domains, struct intel_ring_buffer *ring) { - drm_i915_private_t *dev_priv = dev->dev_private; struct drm_i915_gem_object *obj, *next; list_for_each_entry_safe(obj, next, @@ -1770,14 +1791,6 @@ i915_gem_process_flushing_list(struct drm_device *dev, list_del_init(&obj->gpu_write_list); i915_gem_object_move_to_active(obj, ring); - /* update the fence lru list */ - if (obj->fence_reg != I915_FENCE_REG_NONE) { - struct drm_i915_fence_reg *reg = - &dev_priv->fence_regs[obj->fence_reg]; - list_move_tail(®->lru_list, - &dev_priv->mm.fence_list); - } - trace_i915_gem_object_change_domain(obj, obj->base.read_domains, old_write_domain); @@ -2615,8 +2628,7 @@ i915_gem_object_put_fence_reg(struct drm_i915_gem_object *obj, bool interruptible) { struct drm_device *dev = obj->base.dev; - struct drm_i915_private *dev_priv = dev->dev_private; - struct drm_i915_fence_reg *reg; + int ret; if (obj->fence_reg == I915_FENCE_REG_NONE) return 0; @@ -2631,19 +2643,23 @@ i915_gem_object_put_fence_reg(struct drm_i915_gem_object *obj, * therefore we must wait for any outstanding access to complete * before clearing the fence. */ - reg = &dev_priv->fence_regs[obj->fence_reg]; - if (reg->gpu) { - int ret; - + if (obj->fenced_gpu_access) { ret = i915_gem_object_flush_gpu_write_domain(obj, NULL); if (ret) return ret; - ret = i915_gem_object_wait_rendering(obj, interruptible); + obj->fenced_gpu_access = false; + } + + if (obj->last_fenced_seqno) { + ret = i915_do_wait_request(dev, + obj->last_fenced_seqno, + interruptible, + obj->last_fenced_ring); if (ret) return ret; - reg->gpu = false; + obj->last_fenced_seqno = false; } i915_gem_object_flush_gtt_write_domain(obj); @@ -3166,8 +3182,9 @@ i915_gem_object_set_to_gpu_domain(struct drm_i915_gem_object *obj, * write domain */ if (obj->base.write_domain && - (obj->base.write_domain != obj->base.pending_read_domains || - obj->ring != ring)) { + (((obj->base.write_domain != obj->base.pending_read_domains || + obj->ring != ring)) || + (obj->fenced_gpu_access && !obj->pending_fenced_gpu_access))) { flush_domains |= obj->base.write_domain; invalidate_domains |= obj->base.pending_read_domains & ~obj->base.write_domain; @@ -3528,7 +3545,6 @@ i915_gem_execbuffer_reserve(struct drm_device *dev, struct drm_i915_gem_exec_object2 *exec_list, int count) { - struct drm_i915_private *dev_priv = dev->dev_private; int ret, i, retry; /* Attempt to pin all of the buffers into the GTT. @@ -3601,7 +3617,7 @@ i915_gem_execbuffer_reserve(struct drm_device *dev, if (ret) break; - dev_priv->fence_regs[obj->fence_reg].gpu = true; + obj->pending_fenced_gpu_access = true; } entry->offset = obj->gtt_offset; @@ -3981,6 +3997,7 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data, goto err; } obj->in_execbuffer = true; + obj->pending_fenced_gpu_access = false; } /* Move the objects en-masse into the GTT, evicting if necessary. */ @@ -4085,6 +4102,7 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data, obj->base.read_domains = obj->base.pending_read_domains; obj->base.write_domain = obj->base.pending_write_domain; + obj->fenced_gpu_access = obj->pending_fenced_gpu_access; i915_gem_object_move_to_active(obj, ring); if (obj->base.write_domain) { -- cgit v1.2.3 From c6642782b988e907bb50767eab50042f4947e163 Mon Sep 17 00:00:00 2001 From: Daniel Vetter Date: Fri, 12 Nov 2010 13:46:18 +0000 Subject: drm/i915: Add a mechanism for pipelining fence register updates Not employed just yet... Signed-off-by: Daniel Vetter Signed-off-by: Chris Wilson --- drivers/gpu/drm/i915/i915_gem.c | 133 +++++++++++++++++++++++++++------------- drivers/gpu/drm/i915/i915_reg.h | 8 ++- 2 files changed, 98 insertions(+), 43 deletions(-) diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index 2cfdee8811c4..1e9cf2bf9ba4 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c @@ -2322,7 +2322,8 @@ i915_gpu_idle(struct drm_device *dev) return 0; } -static void sandybridge_write_fence_reg(struct drm_i915_gem_object *obj) +static int sandybridge_write_fence_reg(struct drm_i915_gem_object *obj, + struct intel_ring_buffer *pipelined) { struct drm_device *dev = obj->base.dev; drm_i915_private_t *dev_priv = dev->dev_private; @@ -2331,7 +2332,7 @@ static void sandybridge_write_fence_reg(struct drm_i915_gem_object *obj) uint64_t val; val = (uint64_t)((obj->gtt_offset + size - 4096) & - 0xfffff000) << 32; + 0xfffff000) << 32; val |= obj->gtt_offset & 0xfffff000; val |= (uint64_t)((obj->stride / 128) - 1) << SANDYBRIDGE_FENCE_PITCH_SHIFT; @@ -2340,10 +2341,26 @@ static void sandybridge_write_fence_reg(struct drm_i915_gem_object *obj) val |= 1 << I965_FENCE_TILING_Y_SHIFT; val |= I965_FENCE_REG_VALID; - I915_WRITE64(FENCE_REG_SANDYBRIDGE_0 + (regnum * 8), val); + if (pipelined) { + int ret = intel_ring_begin(pipelined, 6); + if (ret) + return ret; + + intel_ring_emit(pipelined, MI_NOOP); + intel_ring_emit(pipelined, MI_LOAD_REGISTER_IMM(2)); + intel_ring_emit(pipelined, FENCE_REG_SANDYBRIDGE_0 + regnum*8); + intel_ring_emit(pipelined, (u32)val); + intel_ring_emit(pipelined, FENCE_REG_SANDYBRIDGE_0 + regnum*8 + 4); + intel_ring_emit(pipelined, (u32)(val >> 32)); + intel_ring_advance(pipelined); + } else + I915_WRITE64(FENCE_REG_SANDYBRIDGE_0 + regnum * 8, val); + + return 0; } -static void i965_write_fence_reg(struct drm_i915_gem_object *obj) +static int i965_write_fence_reg(struct drm_i915_gem_object *obj, + struct intel_ring_buffer *pipelined) { struct drm_device *dev = obj->base.dev; drm_i915_private_t *dev_priv = dev->dev_private; @@ -2359,27 +2376,41 @@ static void i965_write_fence_reg(struct drm_i915_gem_object *obj) val |= 1 << I965_FENCE_TILING_Y_SHIFT; val |= I965_FENCE_REG_VALID; - I915_WRITE64(FENCE_REG_965_0 + (regnum * 8), val); + if (pipelined) { + int ret = intel_ring_begin(pipelined, 6); + if (ret) + return ret; + + intel_ring_emit(pipelined, MI_NOOP); + intel_ring_emit(pipelined, MI_LOAD_REGISTER_IMM(2)); + intel_ring_emit(pipelined, FENCE_REG_965_0 + regnum*8); + intel_ring_emit(pipelined, (u32)val); + intel_ring_emit(pipelined, FENCE_REG_965_0 + regnum*8 + 4); + intel_ring_emit(pipelined, (u32)(val >> 32)); + intel_ring_advance(pipelined); + } else + I915_WRITE64(FENCE_REG_965_0 + regnum * 8, val); + + return 0; } -static void i915_write_fence_reg(struct drm_i915_gem_object *obj) +static int i915_write_fence_reg(struct drm_i915_gem_object *obj, + struct intel_ring_buffer *pipelined) { struct drm_device *dev = obj->base.dev; drm_i915_private_t *dev_priv = dev->dev_private; u32 size = obj->gtt_space->size; - uint32_t fence_reg, val, pitch_val; + u32 fence_reg, val, pitch_val; int tile_width; - if ((obj->gtt_offset & ~I915_FENCE_START_MASK) || - (obj->gtt_offset & (size - 1))) { - WARN(1, "%s: object 0x%08x [fenceable? %d] not 1M or size (0x%08x) aligned [gtt_space offset=%lx, size=%lx]\n", - __func__, obj->gtt_offset, obj->map_and_fenceable, size, - obj->gtt_space->start, obj->gtt_space->size); - return; - } + if (WARN((obj->gtt_offset & ~I915_FENCE_START_MASK) || + (size & -size) != size || + (obj->gtt_offset & (size - 1)), + "object 0x%08x [fenceable? %d] not 1M or pot-size (0x%08x) aligned\n", + obj->gtt_offset, obj->map_and_fenceable, size)) + return -EINVAL; - if (obj->tiling_mode == I915_TILING_Y && - HAS_128_BYTE_Y_TILING(dev)) + if (obj->tiling_mode == I915_TILING_Y && HAS_128_BYTE_Y_TILING(dev)) tile_width = 128; else tile_width = 512; @@ -2388,12 +2419,6 @@ static void i915_write_fence_reg(struct drm_i915_gem_object *obj) pitch_val = obj->stride / tile_width; pitch_val = ffs(pitch_val) - 1; - if (obj->tiling_mode == I915_TILING_Y && - HAS_128_BYTE_Y_TILING(dev)) - WARN_ON(pitch_val > I830_FENCE_MAX_PITCH_VAL); - else - WARN_ON(pitch_val > I915_FENCE_MAX_PITCH_VAL); - val = obj->gtt_offset; if (obj->tiling_mode == I915_TILING_Y) val |= 1 << I830_FENCE_TILING_Y_SHIFT; @@ -2406,10 +2431,25 @@ static void i915_write_fence_reg(struct drm_i915_gem_object *obj) fence_reg = FENCE_REG_830_0 + fence_reg * 4; else fence_reg = FENCE_REG_945_8 + (fence_reg - 8) * 4; - I915_WRITE(fence_reg, val); + + if (pipelined) { + int ret = intel_ring_begin(pipelined, 4); + if (ret) + return ret; + + intel_ring_emit(pipelined, MI_NOOP); + intel_ring_emit(pipelined, MI_LOAD_REGISTER_IMM(1)); + intel_ring_emit(pipelined, fence_reg); + intel_ring_emit(pipelined, val); + intel_ring_advance(pipelined); + } else + I915_WRITE(fence_reg, val); + + return 0; } -static void i830_write_fence_reg(struct drm_i915_gem_object *obj) +static int i830_write_fence_reg(struct drm_i915_gem_object *obj, + struct intel_ring_buffer *pipelined) { struct drm_device *dev = obj->base.dev; drm_i915_private_t *dev_priv = dev->dev_private; @@ -2417,29 +2457,38 @@ static void i830_write_fence_reg(struct drm_i915_gem_object *obj) int regnum = obj->fence_reg; uint32_t val; uint32_t pitch_val; - uint32_t fence_size_bits; - if ((obj->gtt_offset & ~I830_FENCE_START_MASK) || - (obj->gtt_offset & (obj->base.size - 1))) { - WARN(1, "%s: object 0x%08x not 512K or size aligned\n", - __func__, obj->gtt_offset); - return; - } + if (WARN((obj->gtt_offset & ~I830_FENCE_START_MASK) || + (size & -size) != size || + (obj->gtt_offset & (size - 1)), + "object 0x%08x not 512K or pot-size 0x%08x aligned\n", + obj->gtt_offset, size)) + return -EINVAL; pitch_val = obj->stride / 128; pitch_val = ffs(pitch_val) - 1; - WARN_ON(pitch_val > I830_FENCE_MAX_PITCH_VAL); val = obj->gtt_offset; if (obj->tiling_mode == I915_TILING_Y) val |= 1 << I830_FENCE_TILING_Y_SHIFT; - fence_size_bits = I830_FENCE_SIZE_BITS(size); - WARN_ON(fence_size_bits & ~0x00000f00); - val |= fence_size_bits; + val |= I830_FENCE_SIZE_BITS(size); val |= pitch_val << I830_FENCE_PITCH_SHIFT; val |= I830_FENCE_REG_VALID; - I915_WRITE(FENCE_REG_830_0 + (regnum * 4), val); + if (pipelined) { + int ret = intel_ring_begin(pipelined, 4); + if (ret) + return ret; + + intel_ring_emit(pipelined, MI_NOOP); + intel_ring_emit(pipelined, MI_LOAD_REGISTER_IMM(1)); + intel_ring_emit(pipelined, FENCE_REG_830_0 + regnum*4); + intel_ring_emit(pipelined, val); + intel_ring_advance(pipelined); + } else + I915_WRITE(FENCE_REG_830_0 + regnum * 4, val); + + return 0; } static int i915_find_fence_reg(struct drm_device *dev, @@ -2512,6 +2561,7 @@ i915_gem_object_get_fence_reg(struct drm_i915_gem_object *obj, struct drm_device *dev = obj->base.dev; struct drm_i915_private *dev_priv = dev->dev_private; struct drm_i915_fence_reg *reg = NULL; + struct intel_ring_buffer *pipelined = NULL; int ret; /* Just update our place in the LRU if our fence is getting used. */ @@ -2553,25 +2603,24 @@ i915_gem_object_get_fence_reg(struct drm_i915_gem_object *obj, switch (INTEL_INFO(dev)->gen) { case 6: - sandybridge_write_fence_reg(obj); + ret = sandybridge_write_fence_reg(obj, pipelined); break; case 5: case 4: - i965_write_fence_reg(obj); + ret = i965_write_fence_reg(obj, pipelined); break; case 3: - i915_write_fence_reg(obj); + ret = i915_write_fence_reg(obj, pipelined); break; case 2: - i830_write_fence_reg(obj); + ret = i830_write_fence_reg(obj, pipelined); break; } trace_i915_gem_object_get_fence(obj, obj->fence_reg, obj->tiling_mode); - - return 0; + return ret; } /** diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h index c668b2fb7e3d..ce97471d9c40 100644 --- a/drivers/gpu/drm/i915/i915_reg.h +++ b/drivers/gpu/drm/i915/i915_reg.h @@ -164,7 +164,13 @@ #define MI_MEM_VIRTUAL (1 << 22) /* 965+ only */ #define MI_STORE_DWORD_INDEX MI_INSTR(0x21, 1) #define MI_STORE_DWORD_INDEX_SHIFT 2 -#define MI_LOAD_REGISTER_IMM MI_INSTR(0x22, 1) +/* Official intel docs are somewhat sloppy concerning MI_LOAD_REGISTER_IMM: + * - Always issue a MI_NOOP _before_ the MI_LOAD_REGISTER_IMM - otherwise hw + * simply ignores the register load under certain conditions. + * - One can actually load arbitrary many arbitrary registers: Simply issue x + * address/value pairs. Don't overdue it, though, x <= 2^4 must hold! + */ +#define MI_LOAD_REGISTER_IMM(x) MI_INSTR(0x22, 2*x-1) #define MI_FLUSH_DW MI_INSTR(0x26, 2) /* for GEN6 */ #define MI_BATCH_BUFFER MI_INSTR(0x30, 1) #define MI_BATCH_NON_SECURE (1) -- cgit v1.2.3 From 312817a39f17dbb4de000165b5b724e3728cd91c Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Mon, 22 Nov 2010 11:50:11 +0000 Subject: drm/i915: Only save and restore fences for UMS With KMS, we can simply relinquish the fence when we idle the GPU and reassign it upon first use. Signed-off-by: Chris Wilson --- drivers/gpu/drm/i915/i915_gem.c | 25 ++++++----- drivers/gpu/drm/i915/i915_suspend.c | 89 +++++++++++++++++++------------------ 2 files changed, 61 insertions(+), 53 deletions(-) diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index 1e9cf2bf9ba4..939c9e34ce96 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c @@ -1905,11 +1905,22 @@ static void i915_gem_reset_ring_lists(struct drm_i915_private *dev_priv, } } +static void i915_gem_reset_fences(struct drm_device *dev) +{ + struct drm_i915_private *dev_priv = dev->dev_private; + int i; + + for (i = 0; i < 16; i++) { + struct drm_i915_fence_reg *reg = &dev_priv->fence_regs[i]; + if (reg->obj) + i915_gem_clear_fence_reg(reg->obj); + } +} + void i915_gem_reset(struct drm_device *dev) { struct drm_i915_private *dev_priv = dev->dev_private; struct drm_i915_gem_object *obj; - int i; i915_gem_reset_ring_lists(dev_priv, &dev_priv->render_ring); i915_gem_reset_ring_lists(dev_priv, &dev_priv->bsd_ring); @@ -1940,15 +1951,7 @@ void i915_gem_reset(struct drm_device *dev) } /* The fence registers are invalidated so clear them out */ - for (i = 0; i < 16; i++) { - struct drm_i915_fence_reg *reg; - - reg = &dev_priv->fence_regs[i]; - if (!reg->obj) - continue; - - i915_gem_clear_fence_reg(reg->obj); - } + i915_gem_reset_fences(dev); } /** @@ -4706,6 +4709,8 @@ i915_gem_idle(struct drm_device *dev) } } + i915_gem_reset_fences(dev); + /* Hack! Don't let anybody do execbuf while we don't control the chip. * We need to replace this with a semaphore, or something. * And not confound mm.suspended! diff --git a/drivers/gpu/drm/i915/i915_suspend.c b/drivers/gpu/drm/i915/i915_suspend.c index 42729d25da58..011325e51e3a 100644 --- a/drivers/gpu/drm/i915/i915_suspend.c +++ b/drivers/gpu/drm/i915/i915_suspend.c @@ -235,6 +235,7 @@ static void i915_restore_vga(struct drm_device *dev) static void i915_save_modeset_reg(struct drm_device *dev) { struct drm_i915_private *dev_priv = dev->dev_private; + int i; if (drm_core_check_feature(dev, DRIVER_MODESET)) return; @@ -367,6 +368,28 @@ static void i915_save_modeset_reg(struct drm_device *dev) } i915_save_palette(dev, PIPE_B); dev_priv->savePIPEBSTAT = I915_READ(PIPEBSTAT); + + /* Fences */ + switch (INTEL_INFO(dev)->gen) { + case 6: + for (i = 0; i < 16; i++) + dev_priv->saveFENCE[i] = I915_READ64(FENCE_REG_SANDYBRIDGE_0 + (i * 8)); + break; + case 5: + case 4: + for (i = 0; i < 16; i++) + dev_priv->saveFENCE[i] = I915_READ64(FENCE_REG_965_0 + (i * 8)); + break; + case 3: + if (IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev)) + for (i = 0; i < 8; i++) + dev_priv->saveFENCE[i+8] = I915_READ(FENCE_REG_945_8 + (i * 4)); + case 2: + for (i = 0; i < 8; i++) + dev_priv->saveFENCE[i] = I915_READ(FENCE_REG_830_0 + (i * 4)); + break; + } + return; } @@ -375,10 +398,33 @@ static void i915_restore_modeset_reg(struct drm_device *dev) struct drm_i915_private *dev_priv = dev->dev_private; int dpll_a_reg, fpa0_reg, fpa1_reg; int dpll_b_reg, fpb0_reg, fpb1_reg; + int i; if (drm_core_check_feature(dev, DRIVER_MODESET)) return; + /* Fences */ + switch (INTEL_INFO(dev)->gen) { + case 6: + for (i = 0; i < 16; i++) + I915_WRITE64(FENCE_REG_SANDYBRIDGE_0 + (i * 8), dev_priv->saveFENCE[i]); + break; + case 5: + case 4: + for (i = 0; i < 16; i++) + I915_WRITE64(FENCE_REG_965_0 + (i * 8), dev_priv->saveFENCE[i]); + break; + case 3: + case 2: + if (IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev)) + for (i = 0; i < 8; i++) + I915_WRITE(FENCE_REG_945_8 + (i * 4), dev_priv->saveFENCE[i+8]); + for (i = 0; i < 8; i++) + I915_WRITE(FENCE_REG_830_0 + (i * 4), dev_priv->saveFENCE[i]); + break; + } + + if (HAS_PCH_SPLIT(dev)) { dpll_a_reg = PCH_DPLL_A; dpll_b_reg = PCH_DPLL_B; @@ -788,28 +834,6 @@ int i915_save_state(struct drm_device *dev) for (i = 0; i < 3; i++) dev_priv->saveSWF2[i] = I915_READ(SWF30 + (i << 2)); - /* Fences */ - switch (INTEL_INFO(dev)->gen) { - case 6: - for (i = 0; i < 16; i++) - dev_priv->saveFENCE[i] = I915_READ64(FENCE_REG_SANDYBRIDGE_0 + (i * 8)); - break; - case 5: - case 4: - for (i = 0; i < 16; i++) - dev_priv->saveFENCE[i] = I915_READ64(FENCE_REG_965_0 + (i * 8)); - break; - case 3: - if (IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev)) - for (i = 0; i < 8; i++) - dev_priv->saveFENCE[i+8] = I915_READ(FENCE_REG_945_8 + (i * 4)); - case 2: - for (i = 0; i < 8; i++) - dev_priv->saveFENCE[i] = I915_READ(FENCE_REG_830_0 + (i * 4)); - break; - - } - return 0; } @@ -823,27 +847,6 @@ int i915_restore_state(struct drm_device *dev) /* Hardware status page */ I915_WRITE(HWS_PGA, dev_priv->saveHWS); - /* Fences */ - switch (INTEL_INFO(dev)->gen) { - case 6: - for (i = 0; i < 16; i++) - I915_WRITE64(FENCE_REG_SANDYBRIDGE_0 + (i * 8), dev_priv->saveFENCE[i]); - break; - case 5: - case 4: - for (i = 0; i < 16; i++) - I915_WRITE64(FENCE_REG_965_0 + (i * 8), dev_priv->saveFENCE[i]); - break; - case 3: - case 2: - if (IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev)) - for (i = 0; i < 8; i++) - I915_WRITE(FENCE_REG_945_8 + (i * 4), dev_priv->saveFENCE[i+8]); - for (i = 0; i < 8; i++) - I915_WRITE(FENCE_REG_830_0 + (i * 4), dev_priv->saveFENCE[i]); - break; - } - i915_restore_display(dev); /* Interrupt state */ -- cgit v1.2.3 From ab5793ad3ae11a5cbe2194b449e5fdd80b19f14f Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Mon, 22 Nov 2010 13:24:13 +0000 Subject: drm/i915: Tweak on-error bbaddr parsing for clarity Signed-off-by: Chris Wilson --- drivers/gpu/drm/i915/i915_irq.c | 19 ++++++++----------- 1 file changed, 8 insertions(+), 11 deletions(-) diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c index de95c7bb6dba..9aa1e1dc5fd5 100644 --- a/drivers/gpu/drm/i915/i915_irq.c +++ b/drivers/gpu/drm/i915/i915_irq.c @@ -525,26 +525,23 @@ i915_ringbuffer_last_batch(struct drm_device *dev, /* Locate the current position in the ringbuffer and walk back * to find the most recently dispatched batch buffer. */ - bbaddr = 0; head = I915_READ_HEAD(ring) & HEAD_ADDR; - val = (u32 *)(ring->virtual_start + head); + val = (u32 *)(ring->virtual_start + head); while (--val >= (u32 *)ring->virtual_start) { bbaddr = i915_get_bbaddr(dev, val); if (bbaddr) - break; + return bbaddr; } - if (bbaddr == 0) { - val = (u32 *)(ring->virtual_start + ring->size); - while (--val >= (u32 *)ring->virtual_start) { - bbaddr = i915_get_bbaddr(dev, val); - if (bbaddr) - break; - } + val = (u32 *)(ring->virtual_start + ring->size); + while (--val >= (u32 *)ring->virtual_start) { + bbaddr = i915_get_bbaddr(dev, val); + if (bbaddr) + return bbaddr; } - return bbaddr; + return 0; } static u32 capture_bo_list(struct drm_i915_error_buffer *err, -- cgit v1.2.3 From 2021746e1d5ad1e3b51e24480c566acbb833c7c1 Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Tue, 23 Nov 2010 15:26:33 +0000 Subject: drm/i915: Mark a few functions as __must_check ... to benefit from the compiler checking that we remember to handle and propagate errors. Signed-off-by: Chris Wilson --- drivers/gpu/drm/i915/i915_drv.h | 77 ++++++++++++++++++++++------------------- drivers/gpu/drm/i915/i915_gem.c | 37 +++++++++----------- 2 files changed, 58 insertions(+), 56 deletions(-) diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index ee7df1d2b8c8..b6ca10ade426 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h @@ -1093,11 +1093,11 @@ int i915_gem_init_object(struct drm_gem_object *obj); struct drm_i915_gem_object *i915_gem_alloc_object(struct drm_device *dev, size_t size); void i915_gem_free_object(struct drm_gem_object *obj); -int i915_gem_object_pin(struct drm_i915_gem_object *obj, - uint32_t alignment, - bool map_and_fenceable); +int __must_check i915_gem_object_pin(struct drm_i915_gem_object *obj, + uint32_t alignment, + bool map_and_fenceable); void i915_gem_object_unpin(struct drm_i915_gem_object *obj); -int i915_gem_object_unbind(struct drm_i915_gem_object *obj); +int __must_check i915_gem_object_unbind(struct drm_i915_gem_object *obj); void i915_gem_release_mmap(struct drm_i915_gem_object *obj); void i915_gem_lastclose(struct drm_device *dev); @@ -1110,37 +1110,42 @@ i915_seqno_passed(uint32_t seq1, uint32_t seq2) return (int32_t)(seq1 - seq2) >= 0; } -int i915_gem_object_get_fence_reg(struct drm_i915_gem_object *obj, - bool interruptible); -int i915_gem_object_put_fence_reg(struct drm_i915_gem_object *obj, - bool interruptible); +int __must_check i915_gem_object_get_fence_reg(struct drm_i915_gem_object *obj, + bool interruptible); +int __must_check i915_gem_object_put_fence_reg(struct drm_i915_gem_object *obj, + bool interruptible); + void i915_gem_retire_requests(struct drm_device *dev); void i915_gem_reset(struct drm_device *dev); void i915_gem_clflush_object(struct drm_i915_gem_object *obj); -int i915_gem_object_set_domain(struct drm_i915_gem_object *obj, - uint32_t read_domains, - uint32_t write_domain); -int i915_gem_object_flush_gpu(struct drm_i915_gem_object *obj, - bool interruptible); -int i915_gem_init_ringbuffer(struct drm_device *dev); +int __must_check i915_gem_object_set_domain(struct drm_i915_gem_object *obj, + uint32_t read_domains, + uint32_t write_domain); +int __must_check i915_gem_object_flush_gpu(struct drm_i915_gem_object *obj, + bool interruptible); +int __must_check i915_gem_init_ringbuffer(struct drm_device *dev); void i915_gem_cleanup_ringbuffer(struct drm_device *dev); -int i915_gem_do_init(struct drm_device *dev, unsigned long start, - unsigned long mappable_end, unsigned long end); -int i915_gpu_idle(struct drm_device *dev); -int i915_gem_idle(struct drm_device *dev); -int i915_add_request(struct drm_device *dev, - struct drm_file *file_priv, - struct drm_i915_gem_request *request, - struct intel_ring_buffer *ring); -int i915_do_wait_request(struct drm_device *dev, - uint32_t seqno, - bool interruptible, - struct intel_ring_buffer *ring); +void i915_gem_do_init(struct drm_device *dev, + unsigned long start, + unsigned long mappable_end, + unsigned long end); +int __must_check i915_gpu_idle(struct drm_device *dev); +int __must_check i915_gem_idle(struct drm_device *dev); +int __must_check i915_add_request(struct drm_device *dev, + struct drm_file *file_priv, + struct drm_i915_gem_request *request, + struct intel_ring_buffer *ring); +int __must_check i915_do_wait_request(struct drm_device *dev, + uint32_t seqno, + bool interruptible, + struct intel_ring_buffer *ring); int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf); -int i915_gem_object_set_to_gtt_domain(struct drm_i915_gem_object *obj, - int write); -int i915_gem_object_set_to_display_plane(struct drm_i915_gem_object *obj, - struct intel_ring_buffer *pipelined); +int __must_check +i915_gem_object_set_to_gtt_domain(struct drm_i915_gem_object *obj, + bool write); +int __must_check +i915_gem_object_set_to_display_plane(struct drm_i915_gem_object *obj, + struct intel_ring_buffer *pipelined); int i915_gem_attach_phys_object(struct drm_device *dev, struct drm_i915_gem_object *obj, int id, @@ -1152,14 +1157,16 @@ void i915_gem_release(struct drm_device *dev, struct drm_file *file); /* i915_gem_gtt.c */ void i915_gem_restore_gtt_mappings(struct drm_device *dev); -int i915_gem_gtt_bind_object(struct drm_i915_gem_object *obj); +int __must_check i915_gem_gtt_bind_object(struct drm_i915_gem_object *obj); void i915_gem_gtt_unbind_object(struct drm_i915_gem_object *obj); /* i915_gem_evict.c */ -int i915_gem_evict_something(struct drm_device *dev, int min_size, - unsigned alignment, bool mappable); -int i915_gem_evict_everything(struct drm_device *dev, bool purgeable_only); -int i915_gem_evict_inactive(struct drm_device *dev, bool purgeable_only); +int __must_check i915_gem_evict_something(struct drm_device *dev, int min_size, + unsigned alignment, bool mappable); +int __must_check i915_gem_evict_everything(struct drm_device *dev, + bool purgeable_only); +int __must_check i915_gem_evict_inactive(struct drm_device *dev, + bool purgeable_only); /* i915_gem_tiling.c */ void i915_gem_detect_bit_6_swizzle(struct drm_device *dev); diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index 939c9e34ce96..f6167c55a649 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c @@ -215,27 +215,19 @@ i915_gem_object_is_inactive(struct drm_i915_gem_object *obj) return obj->gtt_space && !obj->active && obj->pin_count == 0; } -int i915_gem_do_init(struct drm_device *dev, - unsigned long start, - unsigned long mappable_end, - unsigned long end) +void i915_gem_do_init(struct drm_device *dev, + unsigned long start, + unsigned long mappable_end, + unsigned long end) { drm_i915_private_t *dev_priv = dev->dev_private; - if (start >= end || - (start & (PAGE_SIZE - 1)) != 0 || - (end & (PAGE_SIZE - 1)) != 0) { - return -EINVAL; - } - drm_mm_init(&dev_priv->mm.gtt_space, start, end - start); dev_priv->mm.gtt_total = end - start; dev_priv->mm.mappable_gtt_total = min(end, mappable_end) - start; dev_priv->mm.gtt_mappable_end = mappable_end; - - return 0; } int @@ -243,13 +235,16 @@ i915_gem_init_ioctl(struct drm_device *dev, void *data, struct drm_file *file) { struct drm_i915_gem_init *args = data; - int ret; + + if (args->gtt_start >= args->gtt_end || + (args->gtt_end | args->gtt_start) & (PAGE_SIZE - 1)) + return -EINVAL; mutex_lock(&dev->struct_mutex); - ret = i915_gem_do_init(dev, args->gtt_start, args->gtt_end, args->gtt_end); + i915_gem_do_init(dev, args->gtt_start, args->gtt_end, args->gtt_end); mutex_unlock(&dev->struct_mutex); - return ret; + return 0; } int @@ -2949,7 +2944,7 @@ i915_gem_object_flush_cpu_write_domain(struct drm_i915_gem_object *obj) * flushes to occur. */ int -i915_gem_object_set_to_gtt_domain(struct drm_i915_gem_object *obj, int write) +i915_gem_object_set_to_gtt_domain(struct drm_i915_gem_object *obj, bool write) { uint32_t old_write_domain, old_read_domains; int ret; @@ -5177,8 +5172,8 @@ rescan: &dev_priv->mm.inactive_list, mm_list) { if (i915_gem_object_is_purgeable(obj)) { - i915_gem_object_unbind(obj); - if (--nr_to_scan == 0) + if (i915_gem_object_unbind(obj) == 0 && + --nr_to_scan == 0) break; } } @@ -5188,10 +5183,10 @@ rescan: list_for_each_entry_safe(obj, next, &dev_priv->mm.inactive_list, mm_list) { - if (nr_to_scan) { - i915_gem_object_unbind(obj); + if (nr_to_scan && + i915_gem_object_unbind(obj) == 0) nr_to_scan--; - } else + else cnt++; } -- cgit v1.2.3 From 6299f992c0491232f008028a1f40bc9d86c4c76c Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Wed, 24 Nov 2010 12:23:44 +0000 Subject: drm/i915: Defer accounting until read from debugfs Simply remove our accounting of objects inside the aperture, keeping only track of what is in the aperture and its current usage. This removes the over-complication of BUGs that were attempting to keep the accounting correct and also removes the overhead of the accounting on the hot-paths. Signed-off-by: Chris Wilson --- drivers/gpu/drm/i915/i915_debugfs.c | 84 +++++++++++++++++++---- drivers/gpu/drm/i915/i915_drv.h | 11 +--- drivers/gpu/drm/i915/i915_gem.c | 128 +++++++----------------------------- 3 files changed, 95 insertions(+), 128 deletions(-) diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c index 5faae476954c..3c9d4b876865 100644 --- a/drivers/gpu/drm/i915/i915_debugfs.c +++ b/drivers/gpu/drm/i915/i915_debugfs.c @@ -128,8 +128,15 @@ describe_obj(struct seq_file *m, struct drm_i915_gem_object *obj) if (obj->gtt_space != NULL) seq_printf(m, " (gtt offset: %08x, size: %08x)", obj->gtt_offset, (unsigned int)obj->gtt_space->size); - if (obj->pin_mappable || obj->fault_mappable) - seq_printf(m, " (mappable)"); + if (obj->pin_mappable || obj->fault_mappable) { + char s[3], *t = s; + if (obj->pin_mappable) + *t++ = 'p'; + if (obj->fault_mappable) + *t++ = 'f'; + *t = '\0'; + seq_printf(m, " (%s mappable)", s); + } if (obj->ring != NULL) seq_printf(m, " (%s)", obj->ring->name); } @@ -191,28 +198,79 @@ static int i915_gem_object_list_info(struct seq_file *m, void *data) return 0; } +#define count_objects(list, member) do { \ + list_for_each_entry(obj, list, member) { \ + size += obj->gtt_space->size; \ + ++count; \ + if (obj->map_and_fenceable) { \ + mappable_size += obj->gtt_space->size; \ + ++mappable_count; \ + } \ + } \ +} while(0) + static int i915_gem_object_info(struct seq_file *m, void* data) { struct drm_info_node *node = (struct drm_info_node *) m->private; struct drm_device *dev = node->minor->dev; struct drm_i915_private *dev_priv = dev->dev_private; + u32 count, mappable_count; + size_t size, mappable_size; + struct drm_i915_gem_object *obj; int ret; ret = mutex_lock_interruptible(&dev->struct_mutex); if (ret) return ret; - seq_printf(m, "%u objects\n", dev_priv->mm.object_count); - seq_printf(m, "%zu object bytes\n", dev_priv->mm.object_memory); - seq_printf(m, "%u pinned\n", dev_priv->mm.pin_count); - seq_printf(m, "%zu pin bytes\n", dev_priv->mm.pin_memory); - seq_printf(m, "%u mappable objects in gtt\n", dev_priv->mm.gtt_mappable_count); - seq_printf(m, "%zu mappable gtt bytes\n", dev_priv->mm.gtt_mappable_memory); - seq_printf(m, "%zu mappable gtt used bytes\n", dev_priv->mm.mappable_gtt_used); - seq_printf(m, "%zu mappable gtt total\n", dev_priv->mm.mappable_gtt_total); - seq_printf(m, "%u objects in gtt\n", dev_priv->mm.gtt_count); - seq_printf(m, "%zu gtt bytes\n", dev_priv->mm.gtt_memory); - seq_printf(m, "%zu gtt total\n", dev_priv->mm.gtt_total); + seq_printf(m, "%u objects, %zu bytes\n", + dev_priv->mm.object_count, + dev_priv->mm.object_memory); + + size = count = mappable_size = mappable_count = 0; + count_objects(&dev_priv->mm.gtt_list, gtt_list); + seq_printf(m, "%u [%u] objects, %zu [%zu] bytes in gtt\n", + count, mappable_count, size, mappable_size); + + size = count = mappable_size = mappable_count = 0; + count_objects(&dev_priv->mm.active_list, mm_list); + count_objects(&dev_priv->mm.flushing_list, mm_list); + seq_printf(m, " %u [%u] active objects, %zu [%zu] bytes\n", + count, mappable_count, size, mappable_size); + + size = count = mappable_size = mappable_count = 0; + count_objects(&dev_priv->mm.pinned_list, mm_list); + seq_printf(m, " %u [%u] pinned objects, %zu [%zu] bytes\n", + count, mappable_count, size, mappable_size); + + size = count = mappable_size = mappable_count = 0; + count_objects(&dev_priv->mm.inactive_list, mm_list); + seq_printf(m, " %u [%u] inactive objects, %zu [%zu] bytes\n", + count, mappable_count, size, mappable_size); + + size = count = mappable_size = mappable_count = 0; + count_objects(&dev_priv->mm.deferred_free_list, mm_list); + seq_printf(m, " %u [%u] freed objects, %zu [%zu] bytes\n", + count, mappable_count, size, mappable_size); + + size = count = mappable_size = mappable_count = 0; + list_for_each_entry(obj, &dev_priv->mm.gtt_list, gtt_list) { + if (obj->fault_mappable) { + size += obj->gtt_space->size; + ++count; + } + if (obj->pin_mappable) { + mappable_size += obj->gtt_space->size; + ++mappable_count; + } + } + seq_printf(m, "%u pinned mappable objects, %zu bytes\n", + mappable_count, mappable_size); + seq_printf(m, "%u fault mappable objects, %zu bytes\n", + count, size); + + seq_printf(m, "%zu [%zu] gtt total\n", + dev_priv->mm.gtt_total, dev_priv->mm.mappable_gtt_total); mutex_unlock(&dev->struct_mutex); diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index b6ca10ade426..4ad34f9c55a2 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h @@ -642,17 +642,10 @@ typedef struct drm_i915_private { struct drm_i915_gem_phys_object *phys_objs[I915_MAX_PHYS_OBJECT]; /* accounting, useful for userland debugging */ - size_t object_memory; - size_t pin_memory; - size_t gtt_memory; - size_t gtt_mappable_memory; - size_t mappable_gtt_used; - size_t mappable_gtt_total; size_t gtt_total; + size_t mappable_gtt_total; + size_t object_memory; u32 object_count; - u32 pin_count; - u32 gtt_mappable_count; - u32 gtt_count; } mm; struct sdvo_device_mapping sdvo_mappings[2]; /* indicate whether the LVDS_BORDER should be enabled or not */ diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index f6167c55a649..7507a86c3feb 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c @@ -83,80 +83,6 @@ static void i915_gem_info_remove_obj(struct drm_i915_private *dev_priv, dev_priv->mm.object_memory -= size; } -static void i915_gem_info_add_gtt(struct drm_i915_private *dev_priv, - struct drm_i915_gem_object *obj) -{ - dev_priv->mm.gtt_count++; - dev_priv->mm.gtt_memory += obj->gtt_space->size; - if (obj->gtt_offset < dev_priv->mm.gtt_mappable_end) { - dev_priv->mm.mappable_gtt_used += - min_t(size_t, obj->gtt_space->size, - dev_priv->mm.gtt_mappable_end - obj->gtt_offset); - } - list_add_tail(&obj->gtt_list, &dev_priv->mm.gtt_list); -} - -static void i915_gem_info_remove_gtt(struct drm_i915_private *dev_priv, - struct drm_i915_gem_object *obj) -{ - dev_priv->mm.gtt_count--; - dev_priv->mm.gtt_memory -= obj->gtt_space->size; - if (obj->gtt_offset < dev_priv->mm.gtt_mappable_end) { - dev_priv->mm.mappable_gtt_used -= - min_t(size_t, obj->gtt_space->size, - dev_priv->mm.gtt_mappable_end - obj->gtt_offset); - } - list_del_init(&obj->gtt_list); -} - -/** - * Update the mappable working set counters. Call _only_ when there is a change - * in one of (pin|fault)_mappable and update *_mappable _before_ calling. - * @mappable: new state the changed mappable flag (either pin_ or fault_). - */ -static void -i915_gem_info_update_mappable(struct drm_i915_private *dev_priv, - struct drm_i915_gem_object *obj, - bool mappable) -{ - if (mappable) { - if (obj->pin_mappable && obj->fault_mappable) - /* Combined state was already mappable. */ - return; - dev_priv->mm.gtt_mappable_count++; - dev_priv->mm.gtt_mappable_memory += obj->gtt_space->size; - } else { - if (obj->pin_mappable || obj->fault_mappable) - /* Combined state still mappable. */ - return; - dev_priv->mm.gtt_mappable_count--; - dev_priv->mm.gtt_mappable_memory -= obj->gtt_space->size; - } -} - -static void i915_gem_info_add_pin(struct drm_i915_private *dev_priv, - struct drm_i915_gem_object *obj, - bool mappable) -{ - dev_priv->mm.pin_count++; - dev_priv->mm.pin_memory += obj->gtt_space->size; - if (mappable) { - obj->pin_mappable = true; - i915_gem_info_update_mappable(dev_priv, obj, true); - } -} - -static void i915_gem_info_remove_pin(struct drm_i915_private *dev_priv, - struct drm_i915_gem_object *obj) -{ - dev_priv->mm.pin_count--; - dev_priv->mm.pin_memory -= obj->gtt_space->size; - if (obj->pin_mappable) { - obj->pin_mappable = false; - i915_gem_info_update_mappable(dev_priv, obj, false); - } -} - int i915_gem_check_is_wedged(struct drm_device *dev) { @@ -253,19 +179,24 @@ i915_gem_get_aperture_ioctl(struct drm_device *dev, void *data, { struct drm_i915_private *dev_priv = dev->dev_private; struct drm_i915_gem_get_aperture *args = data; + struct drm_i915_gem_object *obj; + size_t pinned; if (!(dev->driver->driver_features & DRIVER_GEM)) return -ENODEV; + pinned = 0; mutex_lock(&dev->struct_mutex); - args->aper_size = dev_priv->mm.gtt_total; - args->aper_available_size = args->aper_size - dev_priv->mm.pin_memory; + list_for_each_entry(obj, &dev_priv->mm.pinned_list, mm_list) + pinned += obj->gtt_space->size; mutex_unlock(&dev->struct_mutex); + args->aper_size = dev_priv->mm.gtt_total; + args->aper_available_size = args->aper_size -pinned; + return 0; } - /** * Creates a new mm object and returns a handle to it. */ @@ -1267,14 +1198,12 @@ int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf) /* Now bind it into the GTT if needed */ mutex_lock(&dev->struct_mutex); - BUG_ON(obj->pin_count && !obj->pin_mappable); if (!obj->map_and_fenceable) { ret = i915_gem_object_unbind(obj); if (ret) goto unlock; } - if (!obj->gtt_space) { ret = i915_gem_object_bind_to_gtt(obj, 0, true); if (ret) @@ -1285,11 +1214,6 @@ int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf) if (ret) goto unlock; - if (!obj->fault_mappable) { - obj->fault_mappable = true; - i915_gem_info_update_mappable(dev_priv, obj, true); - } - /* Need a new fence register? */ if (obj->tiling_mode != I915_TILING_NONE) { ret = i915_gem_object_get_fence_reg(obj, true); @@ -1300,6 +1224,8 @@ int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf) if (i915_gem_object_is_inactive(obj)) list_move_tail(&obj->mm_list, &dev_priv->mm.inactive_list); + obj->fault_mappable = true; + pfn = ((dev->agp->base + obj->gtt_offset) >> PAGE_SHIFT) + page_offset; @@ -1406,18 +1332,14 @@ out_free_list: void i915_gem_release_mmap(struct drm_i915_gem_object *obj) { - struct drm_device *dev = obj->base.dev; - struct drm_i915_private *dev_priv = dev->dev_private; + if (!obj->fault_mappable) + return; - if (unlikely(obj->base.map_list.map && dev->dev_mapping)) - unmap_mapping_range(dev->dev_mapping, - (loff_t)obj->base.map_list.hash.key<base.size, 1); + unmap_mapping_range(obj->base.dev->dev_mapping, + (loff_t)obj->base.map_list.hash.key<base.size, 1); - if (obj->fault_mappable) { - obj->fault_mappable = false; - i915_gem_info_update_mappable(dev_priv, obj, false); - } + obj->fault_mappable = false; } static void @@ -2221,8 +2143,6 @@ i915_gem_object_wait_rendering(struct drm_i915_gem_object *obj, int i915_gem_object_unbind(struct drm_i915_gem_object *obj) { - struct drm_device *dev = obj->base.dev; - struct drm_i915_private *dev_priv = dev->dev_private; int ret = 0; if (obj->gtt_space == NULL) @@ -2259,10 +2179,9 @@ i915_gem_object_unbind(struct drm_i915_gem_object *obj) i915_gem_clear_fence_reg(obj); i915_gem_gtt_unbind_object(obj); - i915_gem_object_put_pages_gtt(obj); - i915_gem_info_remove_gtt(dev_priv, obj); + list_del_init(&obj->gtt_list); list_del_init(&obj->mm_list); /* Avoid an unnecessary call to unbind on rebind. */ obj->map_and_fenceable = true; @@ -2833,11 +2752,8 @@ i915_gem_object_bind_to_gtt(struct drm_i915_gem_object *obj, goto search_free; } - obj->gtt_offset = obj->gtt_space->start; - - /* keep track of bounds object by adding it to the inactive list */ + list_add_tail(&obj->gtt_list, &dev_priv->mm.gtt_list); list_add_tail(&obj->mm_list, &dev_priv->mm.inactive_list); - i915_gem_info_add_gtt(dev_priv, obj); /* Assert that the object is not currently in any GPU domain. As it * wasn't in the GTT, there shouldn't be any way it could have been in @@ -2846,7 +2762,7 @@ i915_gem_object_bind_to_gtt(struct drm_i915_gem_object *obj, BUG_ON(obj->base.read_domains & I915_GEM_GPU_DOMAINS); BUG_ON(obj->base.write_domain & I915_GEM_GPU_DOMAINS); - trace_i915_gem_object_bind(obj, obj->gtt_offset, map_and_fenceable); + obj->gtt_offset = obj->gtt_space->start; fenceable = obj->gtt_space->size == fence_size && @@ -2857,6 +2773,7 @@ i915_gem_object_bind_to_gtt(struct drm_i915_gem_object *obj, obj->map_and_fenceable = mappable && fenceable; + trace_i915_gem_object_bind(obj, obj->gtt_offset, map_and_fenceable); return 0; } @@ -4372,12 +4289,11 @@ i915_gem_object_pin(struct drm_i915_gem_object *obj, } if (obj->pin_count++ == 0) { - i915_gem_info_add_pin(dev_priv, obj, map_and_fenceable); if (!obj->active) list_move_tail(&obj->mm_list, &dev_priv->mm.pinned_list); } - BUG_ON(!obj->pin_mappable && map_and_fenceable); + obj->pin_mappable |= map_and_fenceable; WARN_ON(i915_verify_lists(dev)); return 0; @@ -4397,7 +4313,7 @@ i915_gem_object_unpin(struct drm_i915_gem_object *obj) if (!obj->active) list_move_tail(&obj->mm_list, &dev_priv->mm.inactive_list); - i915_gem_info_remove_pin(dev_priv, obj); + obj->pin_mappable = false; } WARN_ON(i915_verify_lists(dev)); } -- cgit v1.2.3 From 54cf91dc4e51fd5070a9a2346377493cc38a1ca9 Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Thu, 25 Nov 2010 18:00:26 +0000 Subject: drm/i915: Split i915_gem_execbuffer into its own file. A number of dragons have been seen lurking within the execbuffer code. The first step is then to isolate them from the rest and begin to scrutinise them in depth. Suggested by Daniel Vetter. Signed-off-by: Chris Wilson --- drivers/gpu/drm/i915/Makefile | 3 +- drivers/gpu/drm/i915/i915_drv.h | 18 + drivers/gpu/drm/i915/i915_gem.c | 1164 +--------------------------- drivers/gpu/drm/i915/i915_gem_execbuffer.c | 1155 +++++++++++++++++++++++++++ 4 files changed, 1188 insertions(+), 1152 deletions(-) create mode 100644 drivers/gpu/drm/i915/i915_gem_execbuffer.c diff --git a/drivers/gpu/drm/i915/Makefile b/drivers/gpu/drm/i915/Makefile index b3cdb4fe46e0..0ae6a7c5020f 100644 --- a/drivers/gpu/drm/i915/Makefile +++ b/drivers/gpu/drm/i915/Makefile @@ -8,8 +8,9 @@ i915-y := i915_drv.o i915_dma.o i915_irq.o i915_mem.o \ i915_suspend.o \ i915_gem.o \ i915_gem_debug.o \ - i915_gem_gtt.o \ i915_gem_evict.o \ + i915_gem_execbuffer.o \ + i915_gem_gtt.o \ i915_gem_tiling.o \ i915_trace_points.o \ intel_display.o \ diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index 4ad34f9c55a2..6c10b645dde9 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h @@ -1083,6 +1083,10 @@ int i915_gem_get_aperture_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv); void i915_gem_load(struct drm_device *dev); int i915_gem_init_object(struct drm_gem_object *obj); +void i915_gem_flush_ring(struct drm_device *dev, + struct intel_ring_buffer *ring, + uint32_t invalidate_domains, + uint32_t flush_domains); struct drm_i915_gem_object *i915_gem_alloc_object(struct drm_device *dev, size_t size); void i915_gem_free_object(struct drm_gem_object *obj); @@ -1094,6 +1098,12 @@ int __must_check i915_gem_object_unbind(struct drm_i915_gem_object *obj); void i915_gem_release_mmap(struct drm_i915_gem_object *obj); void i915_gem_lastclose(struct drm_device *dev); +int __must_check i915_mutex_lock_interruptible(struct drm_device *dev); +int __must_check i915_gem_object_wait_rendering(struct drm_i915_gem_object *obj, + bool interruptible); +void i915_gem_object_move_to_active(struct drm_i915_gem_object *obj, + struct intel_ring_buffer *ring); + /** * Returns true if seq1 is later than seq2. */ @@ -1103,6 +1113,14 @@ i915_seqno_passed(uint32_t seq1, uint32_t seq2) return (int32_t)(seq1 - seq2) >= 0; } +static inline u32 +i915_gem_next_request_seqno(struct drm_device *dev, + struct intel_ring_buffer *ring) +{ + drm_i915_private_t *dev_priv = dev->dev_private; + return ring->outstanding_lazy_request = dev_priv->next_seqno; +} + int __must_check i915_gem_object_get_fence_reg(struct drm_i915_gem_object *obj, bool interruptible); int __must_check i915_gem_object_put_fence_reg(struct drm_i915_gem_object *obj, diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index 7507a86c3feb..b30c6c167048 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c @@ -35,12 +35,6 @@ #include #include -struct change_domains { - uint32_t invalidate_domains; - uint32_t flush_domains; - uint32_t flush_rings; -}; - static int i915_gem_object_flush_gpu_write_domain(struct drm_i915_gem_object *obj, struct intel_ring_buffer *pipelined); static void i915_gem_object_flush_gtt_write_domain(struct drm_i915_gem_object *obj); @@ -51,8 +45,6 @@ static int i915_gem_object_set_cpu_read_domain_range(struct drm_i915_gem_object uint64_t offset, uint64_t size); static void i915_gem_object_set_to_full_cpu_read_domain(struct drm_i915_gem_object *obj); -static int i915_gem_object_wait_rendering(struct drm_i915_gem_object *obj, - bool interruptible); static int i915_gem_object_bind_to_gtt(struct drm_i915_gem_object *obj, unsigned alignment, bool map_and_fenceable); @@ -113,7 +105,7 @@ i915_gem_check_is_wedged(struct drm_device *dev) return -EIO; } -static int i915_mutex_lock_interruptible(struct drm_device *dev) +int i915_mutex_lock_interruptible(struct drm_device *dev) { struct drm_i915_private *dev_priv = dev->dev_private; int ret; @@ -1577,15 +1569,7 @@ i915_gem_object_put_pages_gtt(struct drm_i915_gem_object *obj) obj->pages = NULL; } -static uint32_t -i915_gem_next_request_seqno(struct drm_device *dev, - struct intel_ring_buffer *ring) -{ - drm_i915_private_t *dev_priv = dev->dev_private; - return ring->outstanding_lazy_request = dev_priv->next_seqno; -} - -static void +void i915_gem_object_move_to_active(struct drm_i915_gem_object *obj, struct intel_ring_buffer *ring) { @@ -1762,24 +1746,6 @@ i915_add_request(struct drm_device *dev, return 0; } -/** - * Command execution barrier - * - * Ensures that all commands in the ring are finished - * before signalling the CPU - */ -static void -i915_retire_commands(struct drm_device *dev, struct intel_ring_buffer *ring) -{ - uint32_t flush_domains = 0; - - /* The sampler always gets flushed on i965 (sigh) */ - if (INTEL_INFO(dev)->gen >= 4) - flush_domains |= I915_GEM_DOMAIN_SAMPLER; - - ring->flush(ring, I915_GEM_DOMAIN_COMMAND, flush_domains); -} - static inline void i915_gem_request_remove_from_client(struct drm_i915_gem_request *request) { @@ -2072,45 +2038,11 @@ i915_wait_request(struct drm_device *dev, uint32_t seqno, return i915_do_wait_request(dev, seqno, 1, ring); } -static void -i915_gem_flush_ring(struct drm_device *dev, - struct intel_ring_buffer *ring, - uint32_t invalidate_domains, - uint32_t flush_domains) -{ - ring->flush(ring, invalidate_domains, flush_domains); - i915_gem_process_flushing_list(dev, flush_domains, ring); -} - -static void -i915_gem_flush(struct drm_device *dev, - uint32_t invalidate_domains, - uint32_t flush_domains, - uint32_t flush_rings) -{ - drm_i915_private_t *dev_priv = dev->dev_private; - - if (flush_domains & I915_GEM_DOMAIN_CPU) - intel_gtt_chipset_flush(); - - if ((flush_domains | invalidate_domains) & I915_GEM_GPU_DOMAINS) { - if (flush_rings & RING_RENDER) - i915_gem_flush_ring(dev, &dev_priv->render_ring, - invalidate_domains, flush_domains); - if (flush_rings & RING_BSD) - i915_gem_flush_ring(dev, &dev_priv->bsd_ring, - invalidate_domains, flush_domains); - if (flush_rings & RING_BLT) - i915_gem_flush_ring(dev, &dev_priv->blt_ring, - invalidate_domains, flush_domains); - } -} - /** * Ensures that all rendering to the object has completed and the object is * safe to unbind from the GTT or access from the CPU. */ -static int +int i915_gem_object_wait_rendering(struct drm_i915_gem_object *obj, bool interruptible) { @@ -2198,6 +2130,16 @@ i915_gem_object_unbind(struct drm_i915_gem_object *obj) return ret; } +void +i915_gem_flush_ring(struct drm_device *dev, + struct intel_ring_buffer *ring, + uint32_t invalidate_domains, + uint32_t flush_domains) +{ + ring->flush(ring, invalidate_domains, flush_domains); + i915_gem_process_flushing_list(dev, flush_domains, ring); +} + static int i915_ring_idle(struct drm_device *dev, struct intel_ring_buffer *ring) { @@ -3014,174 +2956,6 @@ i915_gem_object_set_to_cpu_domain(struct drm_i915_gem_object *obj, bool write) return 0; } -/* - * Set the next domain for the specified object. This - * may not actually perform the necessary flushing/invaliding though, - * as that may want to be batched with other set_domain operations - * - * This is (we hope) the only really tricky part of gem. The goal - * is fairly simple -- track which caches hold bits of the object - * and make sure they remain coherent. A few concrete examples may - * help to explain how it works. For shorthand, we use the notation - * (read_domains, write_domain), e.g. (CPU, CPU) to indicate the - * a pair of read and write domain masks. - * - * Case 1: the batch buffer - * - * 1. Allocated - * 2. Written by CPU - * 3. Mapped to GTT - * 4. Read by GPU - * 5. Unmapped from GTT - * 6. Freed - * - * Let's take these a step at a time - * - * 1. Allocated - * Pages allocated from the kernel may still have - * cache contents, so we set them to (CPU, CPU) always. - * 2. Written by CPU (using pwrite) - * The pwrite function calls set_domain (CPU, CPU) and - * this function does nothing (as nothing changes) - * 3. Mapped by GTT - * This function asserts that the object is not - * currently in any GPU-based read or write domains - * 4. Read by GPU - * i915_gem_execbuffer calls set_domain (COMMAND, 0). - * As write_domain is zero, this function adds in the - * current read domains (CPU+COMMAND, 0). - * flush_domains is set to CPU. - * invalidate_domains is set to COMMAND - * clflush is run to get data out of the CPU caches - * then i915_dev_set_domain calls i915_gem_flush to - * emit an MI_FLUSH and drm_agp_chipset_flush - * 5. Unmapped from GTT - * i915_gem_object_unbind calls set_domain (CPU, CPU) - * flush_domains and invalidate_domains end up both zero - * so no flushing/invalidating happens - * 6. Freed - * yay, done - * - * Case 2: The shared render buffer - * - * 1. Allocated - * 2. Mapped to GTT - * 3. Read/written by GPU - * 4. set_domain to (CPU,CPU) - * 5. Read/written by CPU - * 6. Read/written by GPU - * - * 1. Allocated - * Same as last example, (CPU, CPU) - * 2. Mapped to GTT - * Nothing changes (assertions find that it is not in the GPU) - * 3. Read/written by GPU - * execbuffer calls set_domain (RENDER, RENDER) - * flush_domains gets CPU - * invalidate_domains gets GPU - * clflush (obj) - * MI_FLUSH and drm_agp_chipset_flush - * 4. set_domain (CPU, CPU) - * flush_domains gets GPU - * invalidate_domains gets CPU - * wait_rendering (obj) to make sure all drawing is complete. - * This will include an MI_FLUSH to get the data from GPU - * to memory - * clflush (obj) to invalidate the CPU cache - * Another MI_FLUSH in i915_gem_flush (eliminate this somehow?) - * 5. Read/written by CPU - * cache lines are loaded and dirtied - * 6. Read written by GPU - * Same as last GPU access - * - * Case 3: The constant buffer - * - * 1. Allocated - * 2. Written by CPU - * 3. Read by GPU - * 4. Updated (written) by CPU again - * 5. Read by GPU - * - * 1. Allocated - * (CPU, CPU) - * 2. Written by CPU - * (CPU, CPU) - * 3. Read by GPU - * (CPU+RENDER, 0) - * flush_domains = CPU - * invalidate_domains = RENDER - * clflush (obj) - * MI_FLUSH - * drm_agp_chipset_flush - * 4. Updated (written) by CPU again - * (CPU, CPU) - * flush_domains = 0 (no previous write domain) - * invalidate_domains = 0 (no new read domains) - * 5. Read by GPU - * (CPU+RENDER, 0) - * flush_domains = CPU - * invalidate_domains = RENDER - * clflush (obj) - * MI_FLUSH - * drm_agp_chipset_flush - */ -static void -i915_gem_object_set_to_gpu_domain(struct drm_i915_gem_object *obj, - struct intel_ring_buffer *ring, - struct change_domains *cd) -{ - uint32_t invalidate_domains = 0, flush_domains = 0; - - /* - * If the object isn't moving to a new write domain, - * let the object stay in multiple read domains - */ - if (obj->base.pending_write_domain == 0) - obj->base.pending_read_domains |= obj->base.read_domains; - - /* - * Flush the current write domain if - * the new read domains don't match. Invalidate - * any read domains which differ from the old - * write domain - */ - if (obj->base.write_domain && - (((obj->base.write_domain != obj->base.pending_read_domains || - obj->ring != ring)) || - (obj->fenced_gpu_access && !obj->pending_fenced_gpu_access))) { - flush_domains |= obj->base.write_domain; - invalidate_domains |= - obj->base.pending_read_domains & ~obj->base.write_domain; - } - /* - * Invalidate any read caches which may have - * stale data. That is, any new read domains. - */ - invalidate_domains |= obj->base.pending_read_domains & ~obj->base.read_domains; - if ((flush_domains | invalidate_domains) & I915_GEM_DOMAIN_CPU) - i915_gem_clflush_object(obj); - - /* blow away mappings if mapped through GTT */ - if ((flush_domains | invalidate_domains) & I915_GEM_DOMAIN_GTT) - i915_gem_release_mmap(obj); - - /* The actual obj->write_domain will be updated with - * pending_write_domain after we emit the accumulated flush for all - * of our domain changes in execbuffers (which clears objects' - * write_domains). So if we have a current write domain that we - * aren't changing, set pending_write_domain to that. - */ - if (flush_domains == 0 && obj->base.pending_write_domain == 0) - obj->base.pending_write_domain = obj->base.write_domain; - - cd->invalidate_domains |= invalidate_domains; - cd->flush_domains |= flush_domains; - if (flush_domains & I915_GEM_GPU_DOMAINS) - cd->flush_rings |= obj->ring->id; - if (invalidate_domains & I915_GEM_GPU_DOMAINS) - cd->flush_rings |= ring->id; -} - /** * Moves the object from a partially CPU read to a full one. * @@ -3284,451 +3058,6 @@ i915_gem_object_set_cpu_read_domain_range(struct drm_i915_gem_object *obj, return 0; } -static int -i915_gem_execbuffer_relocate_entry(struct drm_i915_gem_object *obj, - struct drm_file *file_priv, - struct drm_i915_gem_exec_object2 *entry, - struct drm_i915_gem_relocation_entry *reloc) -{ - struct drm_device *dev = obj->base.dev; - struct drm_gem_object *target_obj; - uint32_t target_offset; - int ret = -EINVAL; - - target_obj = drm_gem_object_lookup(dev, file_priv, - reloc->target_handle); - if (target_obj == NULL) - return -ENOENT; - - target_offset = to_intel_bo(target_obj)->gtt_offset; - -#if WATCH_RELOC - DRM_INFO("%s: obj %p offset %08x target %d " - "read %08x write %08x gtt %08x " - "presumed %08x delta %08x\n", - __func__, - obj, - (int) reloc->offset, - (int) reloc->target_handle, - (int) reloc->read_domains, - (int) reloc->write_domain, - (int) target_offset, - (int) reloc->presumed_offset, - reloc->delta); -#endif - - /* The target buffer should have appeared before us in the - * exec_object list, so it should have a GTT space bound by now. - */ - if (target_offset == 0) { - DRM_ERROR("No GTT space found for object %d\n", - reloc->target_handle); - goto err; - } - - /* Validate that the target is in a valid r/w GPU domain */ - if (reloc->write_domain & (reloc->write_domain - 1)) { - DRM_ERROR("reloc with multiple write domains: " - "obj %p target %d offset %d " - "read %08x write %08x", - obj, reloc->target_handle, - (int) reloc->offset, - reloc->read_domains, - reloc->write_domain); - goto err; - } - if (reloc->write_domain & I915_GEM_DOMAIN_CPU || - reloc->read_domains & I915_GEM_DOMAIN_CPU) { - DRM_ERROR("reloc with read/write CPU domains: " - "obj %p target %d offset %d " - "read %08x write %08x", - obj, reloc->target_handle, - (int) reloc->offset, - reloc->read_domains, - reloc->write_domain); - goto err; - } - if (reloc->write_domain && target_obj->pending_write_domain && - reloc->write_domain != target_obj->pending_write_domain) { - DRM_ERROR("Write domain conflict: " - "obj %p target %d offset %d " - "new %08x old %08x\n", - obj, reloc->target_handle, - (int) reloc->offset, - reloc->write_domain, - target_obj->pending_write_domain); - goto err; - } - - target_obj->pending_read_domains |= reloc->read_domains; - target_obj->pending_write_domain |= reloc->write_domain; - - /* If the relocation already has the right value in it, no - * more work needs to be done. - */ - if (target_offset == reloc->presumed_offset) - goto out; - - /* Check that the relocation address is valid... */ - if (reloc->offset > obj->base.size - 4) { - DRM_ERROR("Relocation beyond object bounds: " - "obj %p target %d offset %d size %d.\n", - obj, reloc->target_handle, - (int) reloc->offset, - (int) obj->base.size); - goto err; - } - if (reloc->offset & 3) { - DRM_ERROR("Relocation not 4-byte aligned: " - "obj %p target %d offset %d.\n", - obj, reloc->target_handle, - (int) reloc->offset); - goto err; - } - - /* and points to somewhere within the target object. */ - if (reloc->delta >= target_obj->size) { - DRM_ERROR("Relocation beyond target object bounds: " - "obj %p target %d delta %d size %d.\n", - obj, reloc->target_handle, - (int) reloc->delta, - (int) target_obj->size); - goto err; - } - - reloc->delta += target_offset; - if (obj->base.write_domain == I915_GEM_DOMAIN_CPU) { - uint32_t page_offset = reloc->offset & ~PAGE_MASK; - char *vaddr; - - vaddr = kmap_atomic(obj->pages[reloc->offset >> PAGE_SHIFT]); - *(uint32_t *)(vaddr + page_offset) = reloc->delta; - kunmap_atomic(vaddr); - } else { - struct drm_i915_private *dev_priv = dev->dev_private; - uint32_t __iomem *reloc_entry; - void __iomem *reloc_page; - - ret = i915_gem_object_set_to_gtt_domain(obj, 1); - if (ret) - goto err; - - /* Map the page containing the relocation we're going to perform. */ - reloc->offset += obj->gtt_offset; - reloc_page = io_mapping_map_atomic_wc(dev_priv->mm.gtt_mapping, - reloc->offset & PAGE_MASK); - reloc_entry = (uint32_t __iomem *) - (reloc_page + (reloc->offset & ~PAGE_MASK)); - iowrite32(reloc->delta, reloc_entry); - io_mapping_unmap_atomic(reloc_page); - } - - /* and update the user's relocation entry */ - reloc->presumed_offset = target_offset; - -out: - ret = 0; -err: - drm_gem_object_unreference(target_obj); - return ret; -} - -static int -i915_gem_execbuffer_relocate_object(struct drm_i915_gem_object *obj, - struct drm_file *file_priv, - struct drm_i915_gem_exec_object2 *entry) -{ - struct drm_i915_gem_relocation_entry __user *user_relocs; - int i, ret; - - user_relocs = (void __user *)(uintptr_t)entry->relocs_ptr; - for (i = 0; i < entry->relocation_count; i++) { - struct drm_i915_gem_relocation_entry reloc; - - if (__copy_from_user_inatomic(&reloc, - user_relocs+i, - sizeof(reloc))) - return -EFAULT; - - ret = i915_gem_execbuffer_relocate_entry(obj, file_priv, entry, &reloc); - if (ret) - return ret; - - if (__copy_to_user_inatomic(&user_relocs[i].presumed_offset, - &reloc.presumed_offset, - sizeof(reloc.presumed_offset))) - return -EFAULT; - } - - return 0; -} - -static int -i915_gem_execbuffer_relocate_object_slow(struct drm_i915_gem_object *obj, - struct drm_file *file_priv, - struct drm_i915_gem_exec_object2 *entry, - struct drm_i915_gem_relocation_entry *relocs) -{ - int i, ret; - - for (i = 0; i < entry->relocation_count; i++) { - ret = i915_gem_execbuffer_relocate_entry(obj, file_priv, entry, &relocs[i]); - if (ret) - return ret; - } - - return 0; -} - -static int -i915_gem_execbuffer_relocate(struct drm_device *dev, - struct drm_file *file, - struct drm_i915_gem_object **object_list, - struct drm_i915_gem_exec_object2 *exec_list, - int count) -{ - int i, ret; - - for (i = 0; i < count; i++) { - struct drm_i915_gem_object *obj = object_list[i]; - obj->base.pending_read_domains = 0; - obj->base.pending_write_domain = 0; - ret = i915_gem_execbuffer_relocate_object(obj, file, - &exec_list[i]); - if (ret) - return ret; - } - - return 0; -} - -static int -i915_gem_execbuffer_reserve(struct drm_device *dev, - struct drm_file *file, - struct drm_i915_gem_object **object_list, - struct drm_i915_gem_exec_object2 *exec_list, - int count) -{ - int ret, i, retry; - - /* Attempt to pin all of the buffers into the GTT. - * This is done in 3 phases: - * - * 1a. Unbind all objects that do not match the GTT constraints for - * the execbuffer (fenceable, mappable, alignment etc). - * 1b. Increment pin count for already bound objects. - * 2. Bind new objects. - * 3. Decrement pin count. - * - * This avoid unnecessary unbinding of later objects in order to makr - * room for the earlier objects *unless* we need to defragment. - */ - retry = 0; - do { - ret = 0; - - /* Unbind any ill-fitting objects or pin. */ - for (i = 0; i < count; i++) { - struct drm_i915_gem_object *obj = object_list[i]; - struct drm_i915_gem_exec_object2 *entry = &exec_list[i]; - bool need_fence, need_mappable; - - if (!obj->gtt_space) - continue; - - need_fence = - entry->flags & EXEC_OBJECT_NEEDS_FENCE && - obj->tiling_mode != I915_TILING_NONE; - need_mappable = - entry->relocation_count ? true : need_fence; - - if ((entry->alignment && obj->gtt_offset & (entry->alignment - 1)) || - (need_mappable && !obj->map_and_fenceable)) - ret = i915_gem_object_unbind(obj); - else - ret = i915_gem_object_pin(obj, - entry->alignment, - need_mappable); - if (ret) { - count = i; - goto err; - } - } - - /* Bind fresh objects */ - for (i = 0; i < count; i++) { - struct drm_i915_gem_exec_object2 *entry = &exec_list[i]; - struct drm_i915_gem_object *obj = object_list[i]; - bool need_fence; - - need_fence = - entry->flags & EXEC_OBJECT_NEEDS_FENCE && - obj->tiling_mode != I915_TILING_NONE; - - if (!obj->gtt_space) { - bool need_mappable = - entry->relocation_count ? true : need_fence; - - ret = i915_gem_object_pin(obj, - entry->alignment, - need_mappable); - if (ret) - break; - } - - if (need_fence) { - ret = i915_gem_object_get_fence_reg(obj, true); - if (ret) - break; - - obj->pending_fenced_gpu_access = true; - } - - entry->offset = obj->gtt_offset; - } - -err: /* Decrement pin count for bound objects */ - for (i = 0; i < count; i++) { - struct drm_i915_gem_object *obj = object_list[i]; - if (obj->gtt_space) - i915_gem_object_unpin(obj); - } - - if (ret != -ENOSPC || retry > 1) - return ret; - - /* First attempt, just clear anything that is purgeable. - * Second attempt, clear the entire GTT. - */ - ret = i915_gem_evict_everything(dev, retry == 0); - if (ret) - return ret; - - retry++; - } while (1); -} - -static int -i915_gem_execbuffer_relocate_slow(struct drm_device *dev, - struct drm_file *file, - struct drm_i915_gem_object **object_list, - struct drm_i915_gem_exec_object2 *exec_list, - int count) -{ - struct drm_i915_gem_relocation_entry *reloc; - int i, total, ret; - - for (i = 0; i < count; i++) - object_list[i]->in_execbuffer = false; - - mutex_unlock(&dev->struct_mutex); - - total = 0; - for (i = 0; i < count; i++) - total += exec_list[i].relocation_count; - - reloc = drm_malloc_ab(total, sizeof(*reloc)); - if (reloc == NULL) { - mutex_lock(&dev->struct_mutex); - return -ENOMEM; - } - - total = 0; - for (i = 0; i < count; i++) { - struct drm_i915_gem_relocation_entry __user *user_relocs; - - user_relocs = (void __user *)(uintptr_t)exec_list[i].relocs_ptr; - - if (copy_from_user(reloc+total, user_relocs, - exec_list[i].relocation_count * - sizeof(*reloc))) { - ret = -EFAULT; - mutex_lock(&dev->struct_mutex); - goto err; - } - - total += exec_list[i].relocation_count; - } - - ret = i915_mutex_lock_interruptible(dev); - if (ret) { - mutex_lock(&dev->struct_mutex); - goto err; - } - - ret = i915_gem_execbuffer_reserve(dev, file, - object_list, exec_list, - count); - if (ret) - goto err; - - total = 0; - for (i = 0; i < count; i++) { - struct drm_i915_gem_object *obj = object_list[i]; - obj->base.pending_read_domains = 0; - obj->base.pending_write_domain = 0; - ret = i915_gem_execbuffer_relocate_object_slow(obj, file, - &exec_list[i], - reloc + total); - if (ret) - goto err; - - total += exec_list[i].relocation_count; - } - - /* Leave the user relocations as are, this is the painfully slow path, - * and we want to avoid the complication of dropping the lock whilst - * having buffers reserved in the aperture and so causing spurious - * ENOSPC for random operations. - */ - -err: - drm_free_large(reloc); - return ret; -} - -static int -i915_gem_execbuffer_move_to_gpu(struct drm_device *dev, - struct drm_file *file, - struct intel_ring_buffer *ring, - struct drm_i915_gem_object **objects, - int count) -{ - struct change_domains cd; - int ret, i; - - cd.invalidate_domains = 0; - cd.flush_domains = 0; - cd.flush_rings = 0; - for (i = 0; i < count; i++) - i915_gem_object_set_to_gpu_domain(objects[i], ring, &cd); - - if (cd.invalidate_domains | cd.flush_domains) { -#if WATCH_EXEC - DRM_INFO("%s: invalidate_domains %08x flush_domains %08x\n", - __func__, - cd.invalidate_domains, - cd.flush_domains); -#endif - i915_gem_flush(dev, - cd.invalidate_domains, - cd.flush_domains, - cd.flush_rings); - } - - for (i = 0; i < count; i++) { - struct drm_i915_gem_object *obj = objects[i]; - /* XXX replace with semaphores */ - if (obj->ring && ring != obj->ring) { - ret = i915_gem_object_wait_rendering(obj, true); - if (ret) - return ret; - } - } - - return 0; -} - /* Throttle our rendering by waiting until the ring has completed our requests * emitted over 20 msec ago. * @@ -3786,473 +3115,6 @@ i915_gem_ring_throttle(struct drm_device *dev, struct drm_file *file) return ret; } -static int -i915_gem_check_execbuffer(struct drm_i915_gem_execbuffer2 *exec, - uint64_t exec_offset) -{ - uint32_t exec_start, exec_len; - - exec_start = (uint32_t) exec_offset + exec->batch_start_offset; - exec_len = (uint32_t) exec->batch_len; - - if ((exec_start | exec_len) & 0x7) - return -EINVAL; - - if (!exec_start) - return -EINVAL; - - return 0; -} - -static int -validate_exec_list(struct drm_i915_gem_exec_object2 *exec, - int count) -{ - int i; - - for (i = 0; i < count; i++) { - char __user *ptr = (char __user *)(uintptr_t)exec[i].relocs_ptr; - int length; /* limited by fault_in_pages_readable() */ - - /* First check for malicious input causing overflow */ - if (exec[i].relocation_count > - INT_MAX / sizeof(struct drm_i915_gem_relocation_entry)) - return -EINVAL; - - length = exec[i].relocation_count * - sizeof(struct drm_i915_gem_relocation_entry); - if (!access_ok(VERIFY_READ, ptr, length)) - return -EFAULT; - - /* we may also need to update the presumed offsets */ - if (!access_ok(VERIFY_WRITE, ptr, length)) - return -EFAULT; - - if (fault_in_pages_readable(ptr, length)) - return -EFAULT; - } - - return 0; -} - -static int -i915_gem_do_execbuffer(struct drm_device *dev, void *data, - struct drm_file *file, - struct drm_i915_gem_execbuffer2 *args, - struct drm_i915_gem_exec_object2 *exec_list) -{ - drm_i915_private_t *dev_priv = dev->dev_private; - struct drm_i915_gem_object **object_list = NULL; - struct drm_i915_gem_object *batch_obj; - struct drm_clip_rect *cliprects = NULL; - struct drm_i915_gem_request *request = NULL; - int ret, i, flips; - uint64_t exec_offset; - - struct intel_ring_buffer *ring = NULL; - - ret = i915_gem_check_is_wedged(dev); - if (ret) - return ret; - - ret = validate_exec_list(exec_list, args->buffer_count); - if (ret) - return ret; - -#if WATCH_EXEC - DRM_INFO("buffers_ptr %d buffer_count %d len %08x\n", - (int) args->buffers_ptr, args->buffer_count, args->batch_len); -#endif - switch (args->flags & I915_EXEC_RING_MASK) { - case I915_EXEC_DEFAULT: - case I915_EXEC_RENDER: - ring = &dev_priv->render_ring; - break; - case I915_EXEC_BSD: - if (!HAS_BSD(dev)) { - DRM_ERROR("execbuf with invalid ring (BSD)\n"); - return -EINVAL; - } - ring = &dev_priv->bsd_ring; - break; - case I915_EXEC_BLT: - if (!HAS_BLT(dev)) { - DRM_ERROR("execbuf with invalid ring (BLT)\n"); - return -EINVAL; - } - ring = &dev_priv->blt_ring; - break; - default: - DRM_ERROR("execbuf with unknown ring: %d\n", - (int)(args->flags & I915_EXEC_RING_MASK)); - return -EINVAL; - } - - if (args->buffer_count < 1) { - DRM_ERROR("execbuf with %d buffers\n", args->buffer_count); - return -EINVAL; - } - object_list = drm_malloc_ab(sizeof(*object_list), args->buffer_count); - if (object_list == NULL) { - DRM_ERROR("Failed to allocate object list for %d buffers\n", - args->buffer_count); - ret = -ENOMEM; - goto pre_mutex_err; - } - - if (args->num_cliprects != 0) { - cliprects = kcalloc(args->num_cliprects, sizeof(*cliprects), - GFP_KERNEL); - if (cliprects == NULL) { - ret = -ENOMEM; - goto pre_mutex_err; - } - - ret = copy_from_user(cliprects, - (struct drm_clip_rect __user *) - (uintptr_t) args->cliprects_ptr, - sizeof(*cliprects) * args->num_cliprects); - if (ret != 0) { - DRM_ERROR("copy %d cliprects failed: %d\n", - args->num_cliprects, ret); - ret = -EFAULT; - goto pre_mutex_err; - } - } - - request = kzalloc(sizeof(*request), GFP_KERNEL); - if (request == NULL) { - ret = -ENOMEM; - goto pre_mutex_err; - } - - ret = i915_mutex_lock_interruptible(dev); - if (ret) - goto pre_mutex_err; - - if (dev_priv->mm.suspended) { - mutex_unlock(&dev->struct_mutex); - ret = -EBUSY; - goto pre_mutex_err; - } - - /* Look up object handles */ - for (i = 0; i < args->buffer_count; i++) { - struct drm_i915_gem_object *obj; - - obj = to_intel_bo (drm_gem_object_lookup(dev, file, - exec_list[i].handle)); - if (obj == NULL) { - DRM_ERROR("Invalid object handle %d at index %d\n", - exec_list[i].handle, i); - /* prevent error path from reading uninitialized data */ - args->buffer_count = i; - ret = -ENOENT; - goto err; - } - object_list[i] = obj; - - if (obj->in_execbuffer) { - DRM_ERROR("Object %p appears more than once in object list\n", - obj); - /* prevent error path from reading uninitialized data */ - args->buffer_count = i + 1; - ret = -EINVAL; - goto err; - } - obj->in_execbuffer = true; - obj->pending_fenced_gpu_access = false; - } - - /* Move the objects en-masse into the GTT, evicting if necessary. */ - ret = i915_gem_execbuffer_reserve(dev, file, - object_list, exec_list, - args->buffer_count); - if (ret) - goto err; - - /* The objects are in their final locations, apply the relocations. */ - ret = i915_gem_execbuffer_relocate(dev, file, - object_list, exec_list, - args->buffer_count); - if (ret) { - if (ret == -EFAULT) { - ret = i915_gem_execbuffer_relocate_slow(dev, file, - object_list, - exec_list, - args->buffer_count); - BUG_ON(!mutex_is_locked(&dev->struct_mutex)); - } - if (ret) - goto err; - } - - /* Set the pending read domains for the batch buffer to COMMAND */ - batch_obj = object_list[args->buffer_count-1]; - if (batch_obj->base.pending_write_domain) { - DRM_ERROR("Attempting to use self-modifying batch buffer\n"); - ret = -EINVAL; - goto err; - } - batch_obj->base.pending_read_domains |= I915_GEM_DOMAIN_COMMAND; - - /* Sanity check the batch buffer */ - exec_offset = batch_obj->gtt_offset; - ret = i915_gem_check_execbuffer(args, exec_offset); - if (ret != 0) { - DRM_ERROR("execbuf with invalid offset/length\n"); - goto err; - } - - ret = i915_gem_execbuffer_move_to_gpu(dev, file, ring, - object_list, args->buffer_count); - if (ret) - goto err; - -#if WATCH_COHERENCY - for (i = 0; i < args->buffer_count; i++) { - i915_gem_object_check_coherency(object_list[i], - exec_list[i].handle); - } -#endif - -#if WATCH_EXEC - i915_gem_dump_object(batch_obj, - args->batch_len, - __func__, - ~0); -#endif - - /* Check for any pending flips. As we only maintain a flip queue depth - * of 1, we can simply insert a WAIT for the next display flip prior - * to executing the batch and avoid stalling the CPU. - */ - flips = 0; - for (i = 0; i < args->buffer_count; i++) { - if (object_list[i]->base.write_domain) - flips |= atomic_read(&object_list[i]->pending_flip); - } - if (flips) { - int plane, flip_mask; - - for (plane = 0; flips >> plane; plane++) { - if (((flips >> plane) & 1) == 0) - continue; - - if (plane) - flip_mask = MI_WAIT_FOR_PLANE_B_FLIP; - else - flip_mask = MI_WAIT_FOR_PLANE_A_FLIP; - - ret = intel_ring_begin(ring, 2); - if (ret) - goto err; - - intel_ring_emit(ring, MI_WAIT_FOR_EVENT | flip_mask); - intel_ring_emit(ring, MI_NOOP); - intel_ring_advance(ring); - } - } - - /* Exec the batchbuffer */ - ret = ring->dispatch_execbuffer(ring, args, cliprects, exec_offset); - if (ret) { - DRM_ERROR("dispatch failed %d\n", ret); - goto err; - } - - for (i = 0; i < args->buffer_count; i++) { - struct drm_i915_gem_object *obj = object_list[i]; - - obj->base.read_domains = obj->base.pending_read_domains; - obj->base.write_domain = obj->base.pending_write_domain; - obj->fenced_gpu_access = obj->pending_fenced_gpu_access; - - i915_gem_object_move_to_active(obj, ring); - if (obj->base.write_domain) { - obj->dirty = 1; - list_move_tail(&obj->gpu_write_list, - &ring->gpu_write_list); - intel_mark_busy(dev, obj); - } - - trace_i915_gem_object_change_domain(obj, - obj->base.read_domains, - obj->base.write_domain); - } - - /* - * Ensure that the commands in the batch buffer are - * finished before the interrupt fires - */ - i915_retire_commands(dev, ring); - - if (i915_add_request(dev, file, request, ring)) - i915_gem_next_request_seqno(dev, ring); - else - request = NULL; - -err: - for (i = 0; i < args->buffer_count; i++) { - object_list[i]->in_execbuffer = false; - drm_gem_object_unreference(&object_list[i]->base); - } - - mutex_unlock(&dev->struct_mutex); - -pre_mutex_err: - drm_free_large(object_list); - kfree(cliprects); - kfree(request); - - return ret; -} - -/* - * Legacy execbuffer just creates an exec2 list from the original exec object - * list array and passes it to the real function. - */ -int -i915_gem_execbuffer(struct drm_device *dev, void *data, - struct drm_file *file) -{ - struct drm_i915_gem_execbuffer *args = data; - struct drm_i915_gem_execbuffer2 exec2; - struct drm_i915_gem_exec_object *exec_list = NULL; - struct drm_i915_gem_exec_object2 *exec2_list = NULL; - int ret, i; - -#if WATCH_EXEC - DRM_INFO("buffers_ptr %d buffer_count %d len %08x\n", - (int) args->buffers_ptr, args->buffer_count, args->batch_len); -#endif - - if (args->buffer_count < 1) { - DRM_ERROR("execbuf with %d buffers\n", args->buffer_count); - return -EINVAL; - } - - /* Copy in the exec list from userland */ - exec_list = drm_malloc_ab(sizeof(*exec_list), args->buffer_count); - exec2_list = drm_malloc_ab(sizeof(*exec2_list), args->buffer_count); - if (exec_list == NULL || exec2_list == NULL) { - DRM_ERROR("Failed to allocate exec list for %d buffers\n", - args->buffer_count); - drm_free_large(exec_list); - drm_free_large(exec2_list); - return -ENOMEM; - } - ret = copy_from_user(exec_list, - (struct drm_i915_relocation_entry __user *) - (uintptr_t) args->buffers_ptr, - sizeof(*exec_list) * args->buffer_count); - if (ret != 0) { - DRM_ERROR("copy %d exec entries failed %d\n", - args->buffer_count, ret); - drm_free_large(exec_list); - drm_free_large(exec2_list); - return -EFAULT; - } - - for (i = 0; i < args->buffer_count; i++) { - exec2_list[i].handle = exec_list[i].handle; - exec2_list[i].relocation_count = exec_list[i].relocation_count; - exec2_list[i].relocs_ptr = exec_list[i].relocs_ptr; - exec2_list[i].alignment = exec_list[i].alignment; - exec2_list[i].offset = exec_list[i].offset; - if (INTEL_INFO(dev)->gen < 4) - exec2_list[i].flags = EXEC_OBJECT_NEEDS_FENCE; - else - exec2_list[i].flags = 0; - } - - exec2.buffers_ptr = args->buffers_ptr; - exec2.buffer_count = args->buffer_count; - exec2.batch_start_offset = args->batch_start_offset; - exec2.batch_len = args->batch_len; - exec2.DR1 = args->DR1; - exec2.DR4 = args->DR4; - exec2.num_cliprects = args->num_cliprects; - exec2.cliprects_ptr = args->cliprects_ptr; - exec2.flags = I915_EXEC_RENDER; - - ret = i915_gem_do_execbuffer(dev, data, file, &exec2, exec2_list); - if (!ret) { - /* Copy the new buffer offsets back to the user's exec list. */ - for (i = 0; i < args->buffer_count; i++) - exec_list[i].offset = exec2_list[i].offset; - /* ... and back out to userspace */ - ret = copy_to_user((struct drm_i915_relocation_entry __user *) - (uintptr_t) args->buffers_ptr, - exec_list, - sizeof(*exec_list) * args->buffer_count); - if (ret) { - ret = -EFAULT; - DRM_ERROR("failed to copy %d exec entries " - "back to user (%d)\n", - args->buffer_count, ret); - } - } - - drm_free_large(exec_list); - drm_free_large(exec2_list); - return ret; -} - -int -i915_gem_execbuffer2(struct drm_device *dev, void *data, - struct drm_file *file) -{ - struct drm_i915_gem_execbuffer2 *args = data; - struct drm_i915_gem_exec_object2 *exec2_list = NULL; - int ret; - -#if WATCH_EXEC - DRM_INFO("buffers_ptr %d buffer_count %d len %08x\n", - (int) args->buffers_ptr, args->buffer_count, args->batch_len); -#endif - - if (args->buffer_count < 1) { - DRM_ERROR("execbuf2 with %d buffers\n", args->buffer_count); - return -EINVAL; - } - - exec2_list = drm_malloc_ab(sizeof(*exec2_list), args->buffer_count); - if (exec2_list == NULL) { - DRM_ERROR("Failed to allocate exec list for %d buffers\n", - args->buffer_count); - return -ENOMEM; - } - ret = copy_from_user(exec2_list, - (struct drm_i915_relocation_entry __user *) - (uintptr_t) args->buffers_ptr, - sizeof(*exec2_list) * args->buffer_count); - if (ret != 0) { - DRM_ERROR("copy %d exec entries failed %d\n", - args->buffer_count, ret); - drm_free_large(exec2_list); - return -EFAULT; - } - - ret = i915_gem_do_execbuffer(dev, data, file, args, exec2_list); - if (!ret) { - /* Copy the new buffer offsets back to the user's exec list. */ - ret = copy_to_user((struct drm_i915_relocation_entry __user *) - (uintptr_t) args->buffers_ptr, - exec2_list, - sizeof(*exec2_list) * args->buffer_count); - if (ret) { - ret = -EFAULT; - DRM_ERROR("failed to copy %d exec entries " - "back to user (%d)\n", - args->buffer_count, ret); - } - } - - drm_free_large(exec2_list); - return ret; -} - int i915_gem_object_pin(struct drm_i915_gem_object *obj, uint32_t alignment, diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c new file mode 100644 index 000000000000..bdc613b91af8 --- /dev/null +++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c @@ -0,0 +1,1155 @@ +/* + * Copyright © 2008,2010 Intel Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice (including the next + * paragraph) shall be included in all copies or substantial portions of the + * Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS + * IN THE SOFTWARE. + * + * Authors: + * Eric Anholt + * Chris Wilson + * + */ + +#include "drmP.h" +#include "drm.h" +#include "i915_drm.h" +#include "i915_drv.h" +#include "i915_trace.h" +#include "intel_drv.h" + +struct change_domains { + uint32_t invalidate_domains; + uint32_t flush_domains; + uint32_t flush_rings; +}; + +/* + * Set the next domain for the specified object. This + * may not actually perform the necessary flushing/invaliding though, + * as that may want to be batched with other set_domain operations + * + * This is (we hope) the only really tricky part of gem. The goal + * is fairly simple -- track which caches hold bits of the object + * and make sure they remain coherent. A few concrete examples may + * help to explain how it works. For shorthand, we use the notation + * (read_domains, write_domain), e.g. (CPU, CPU) to indicate the + * a pair of read and write domain masks. + * + * Case 1: the batch buffer + * + * 1. Allocated + * 2. Written by CPU + * 3. Mapped to GTT + * 4. Read by GPU + * 5. Unmapped from GTT + * 6. Freed + * + * Let's take these a step at a time + * + * 1. Allocated + * Pages allocated from the kernel may still have + * cache contents, so we set them to (CPU, CPU) always. + * 2. Written by CPU (using pwrite) + * The pwrite function calls set_domain (CPU, CPU) and + * this function does nothing (as nothing changes) + * 3. Mapped by GTT + * This function asserts that the object is not + * currently in any GPU-based read or write domains + * 4. Read by GPU + * i915_gem_execbuffer calls set_domain (COMMAND, 0). + * As write_domain is zero, this function adds in the + * current read domains (CPU+COMMAND, 0). + * flush_domains is set to CPU. + * invalidate_domains is set to COMMAND + * clflush is run to get data out of the CPU caches + * then i915_dev_set_domain calls i915_gem_flush to + * emit an MI_FLUSH and drm_agp_chipset_flush + * 5. Unmapped from GTT + * i915_gem_object_unbind calls set_domain (CPU, CPU) + * flush_domains and invalidate_domains end up both zero + * so no flushing/invalidating happens + * 6. Freed + * yay, done + * + * Case 2: The shared render buffer + * + * 1. Allocated + * 2. Mapped to GTT + * 3. Read/written by GPU + * 4. set_domain to (CPU,CPU) + * 5. Read/written by CPU + * 6. Read/written by GPU + * + * 1. Allocated + * Same as last example, (CPU, CPU) + * 2. Mapped to GTT + * Nothing changes (assertions find that it is not in the GPU) + * 3. Read/written by GPU + * execbuffer calls set_domain (RENDER, RENDER) + * flush_domains gets CPU + * invalidate_domains gets GPU + * clflush (obj) + * MI_FLUSH and drm_agp_chipset_flush + * 4. set_domain (CPU, CPU) + * flush_domains gets GPU + * invalidate_domains gets CPU + * wait_rendering (obj) to make sure all drawing is complete. + * This will include an MI_FLUSH to get the data from GPU + * to memory + * clflush (obj) to invalidate the CPU cache + * Another MI_FLUSH in i915_gem_flush (eliminate this somehow?) + * 5. Read/written by CPU + * cache lines are loaded and dirtied + * 6. Read written by GPU + * Same as last GPU access + * + * Case 3: The constant buffer + * + * 1. Allocated + * 2. Written by CPU + * 3. Read by GPU + * 4. Updated (written) by CPU again + * 5. Read by GPU + * + * 1. Allocated + * (CPU, CPU) + * 2. Written by CPU + * (CPU, CPU) + * 3. Read by GPU + * (CPU+RENDER, 0) + * flush_domains = CPU + * invalidate_domains = RENDER + * clflush (obj) + * MI_FLUSH + * drm_agp_chipset_flush + * 4. Updated (written) by CPU again + * (CPU, CPU) + * flush_domains = 0 (no previous write domain) + * invalidate_domains = 0 (no new read domains) + * 5. Read by GPU + * (CPU+RENDER, 0) + * flush_domains = CPU + * invalidate_domains = RENDER + * clflush (obj) + * MI_FLUSH + * drm_agp_chipset_flush + */ +static void +i915_gem_object_set_to_gpu_domain(struct drm_i915_gem_object *obj, + struct intel_ring_buffer *ring, + struct change_domains *cd) +{ + uint32_t invalidate_domains = 0, flush_domains = 0; + + /* + * If the object isn't moving to a new write domain, + * let the object stay in multiple read domains + */ + if (obj->base.pending_write_domain == 0) + obj->base.pending_read_domains |= obj->base.read_domains; + + /* + * Flush the current write domain if + * the new read domains don't match. Invalidate + * any read domains which differ from the old + * write domain + */ + if (obj->base.write_domain && + (((obj->base.write_domain != obj->base.pending_read_domains || + obj->ring != ring)) || + (obj->fenced_gpu_access && !obj->pending_fenced_gpu_access))) { + flush_domains |= obj->base.write_domain; + invalidate_domains |= + obj->base.pending_read_domains & ~obj->base.write_domain; + } + /* + * Invalidate any read caches which may have + * stale data. That is, any new read domains. + */ + invalidate_domains |= obj->base.pending_read_domains & ~obj->base.read_domains; + if ((flush_domains | invalidate_domains) & I915_GEM_DOMAIN_CPU) + i915_gem_clflush_object(obj); + + /* blow away mappings if mapped through GTT */ + if ((flush_domains | invalidate_domains) & I915_GEM_DOMAIN_GTT) + i915_gem_release_mmap(obj); + + /* The actual obj->write_domain will be updated with + * pending_write_domain after we emit the accumulated flush for all + * of our domain changes in execbuffers (which clears objects' + * write_domains). So if we have a current write domain that we + * aren't changing, set pending_write_domain to that. + */ + if (flush_domains == 0 && obj->base.pending_write_domain == 0) + obj->base.pending_write_domain = obj->base.write_domain; + + cd->invalidate_domains |= invalidate_domains; + cd->flush_domains |= flush_domains; + if (flush_domains & I915_GEM_GPU_DOMAINS) + cd->flush_rings |= obj->ring->id; + if (invalidate_domains & I915_GEM_GPU_DOMAINS) + cd->flush_rings |= ring->id; +} + +static int +i915_gem_execbuffer_relocate_entry(struct drm_i915_gem_object *obj, + struct drm_file *file_priv, + struct drm_i915_gem_exec_object2 *entry, + struct drm_i915_gem_relocation_entry *reloc) +{ + struct drm_device *dev = obj->base.dev; + struct drm_gem_object *target_obj; + uint32_t target_offset; + int ret = -EINVAL; + + target_obj = drm_gem_object_lookup(dev, file_priv, + reloc->target_handle); + if (target_obj == NULL) + return -ENOENT; + + target_offset = to_intel_bo(target_obj)->gtt_offset; + +#if WATCH_RELOC + DRM_INFO("%s: obj %p offset %08x target %d " + "read %08x write %08x gtt %08x " + "presumed %08x delta %08x\n", + __func__, + obj, + (int) reloc->offset, + (int) reloc->target_handle, + (int) reloc->read_domains, + (int) reloc->write_domain, + (int) target_offset, + (int) reloc->presumed_offset, + reloc->delta); +#endif + + /* The target buffer should have appeared before us in the + * exec_object list, so it should have a GTT space bound by now. + */ + if (target_offset == 0) { + DRM_ERROR("No GTT space found for object %d\n", + reloc->target_handle); + goto err; + } + + /* Validate that the target is in a valid r/w GPU domain */ + if (reloc->write_domain & (reloc->write_domain - 1)) { + DRM_ERROR("reloc with multiple write domains: " + "obj %p target %d offset %d " + "read %08x write %08x", + obj, reloc->target_handle, + (int) reloc->offset, + reloc->read_domains, + reloc->write_domain); + goto err; + } + if (reloc->write_domain & I915_GEM_DOMAIN_CPU || + reloc->read_domains & I915_GEM_DOMAIN_CPU) { + DRM_ERROR("reloc with read/write CPU domains: " + "obj %p target %d offset %d " + "read %08x write %08x", + obj, reloc->target_handle, + (int) reloc->offset, + reloc->read_domains, + reloc->write_domain); + goto err; + } + if (reloc->write_domain && target_obj->pending_write_domain && + reloc->write_domain != target_obj->pending_write_domain) { + DRM_ERROR("Write domain conflict: " + "obj %p target %d offset %d " + "new %08x old %08x\n", + obj, reloc->target_handle, + (int) reloc->offset, + reloc->write_domain, + target_obj->pending_write_domain); + goto err; + } + + target_obj->pending_read_domains |= reloc->read_domains; + target_obj->pending_write_domain |= reloc->write_domain; + + /* If the relocation already has the right value in it, no + * more work needs to be done. + */ + if (target_offset == reloc->presumed_offset) + goto out; + + /* Check that the relocation address is valid... */ + if (reloc->offset > obj->base.size - 4) { + DRM_ERROR("Relocation beyond object bounds: " + "obj %p target %d offset %d size %d.\n", + obj, reloc->target_handle, + (int) reloc->offset, + (int) obj->base.size); + goto err; + } + if (reloc->offset & 3) { + DRM_ERROR("Relocation not 4-byte aligned: " + "obj %p target %d offset %d.\n", + obj, reloc->target_handle, + (int) reloc->offset); + goto err; + } + + /* and points to somewhere within the target object. */ + if (reloc->delta >= target_obj->size) { + DRM_ERROR("Relocation beyond target object bounds: " + "obj %p target %d delta %d size %d.\n", + obj, reloc->target_handle, + (int) reloc->delta, + (int) target_obj->size); + goto err; + } + + reloc->delta += target_offset; + if (obj->base.write_domain == I915_GEM_DOMAIN_CPU) { + uint32_t page_offset = reloc->offset & ~PAGE_MASK; + char *vaddr; + + vaddr = kmap_atomic(obj->pages[reloc->offset >> PAGE_SHIFT]); + *(uint32_t *)(vaddr + page_offset) = reloc->delta; + kunmap_atomic(vaddr); + } else { + struct drm_i915_private *dev_priv = dev->dev_private; + uint32_t __iomem *reloc_entry; + void __iomem *reloc_page; + + ret = i915_gem_object_set_to_gtt_domain(obj, 1); + if (ret) + goto err; + + /* Map the page containing the relocation we're going to perform. */ + reloc->offset += obj->gtt_offset; + reloc_page = io_mapping_map_atomic_wc(dev_priv->mm.gtt_mapping, + reloc->offset & PAGE_MASK); + reloc_entry = (uint32_t __iomem *) + (reloc_page + (reloc->offset & ~PAGE_MASK)); + iowrite32(reloc->delta, reloc_entry); + io_mapping_unmap_atomic(reloc_page); + } + + /* and update the user's relocation entry */ + reloc->presumed_offset = target_offset; + +out: + ret = 0; +err: + drm_gem_object_unreference(target_obj); + return ret; +} + +static int +i915_gem_execbuffer_relocate_object(struct drm_i915_gem_object *obj, + struct drm_file *file_priv, + struct drm_i915_gem_exec_object2 *entry) +{ + struct drm_i915_gem_relocation_entry __user *user_relocs; + int i, ret; + + user_relocs = (void __user *)(uintptr_t)entry->relocs_ptr; + for (i = 0; i < entry->relocation_count; i++) { + struct drm_i915_gem_relocation_entry reloc; + + if (__copy_from_user_inatomic(&reloc, + user_relocs+i, + sizeof(reloc))) + return -EFAULT; + + ret = i915_gem_execbuffer_relocate_entry(obj, file_priv, entry, &reloc); + if (ret) + return ret; + + if (__copy_to_user_inatomic(&user_relocs[i].presumed_offset, + &reloc.presumed_offset, + sizeof(reloc.presumed_offset))) + return -EFAULT; + } + + return 0; +} + +static int +i915_gem_execbuffer_relocate_object_slow(struct drm_i915_gem_object *obj, + struct drm_file *file_priv, + struct drm_i915_gem_exec_object2 *entry, + struct drm_i915_gem_relocation_entry *relocs) +{ + int i, ret; + + for (i = 0; i < entry->relocation_count; i++) { + ret = i915_gem_execbuffer_relocate_entry(obj, file_priv, entry, &relocs[i]); + if (ret) + return ret; + } + + return 0; +} + +static int +i915_gem_execbuffer_relocate(struct drm_device *dev, + struct drm_file *file, + struct drm_i915_gem_object **object_list, + struct drm_i915_gem_exec_object2 *exec_list, + int count) +{ + int i, ret; + + for (i = 0; i < count; i++) { + struct drm_i915_gem_object *obj = object_list[i]; + obj->base.pending_read_domains = 0; + obj->base.pending_write_domain = 0; + ret = i915_gem_execbuffer_relocate_object(obj, file, + &exec_list[i]); + if (ret) + return ret; + } + + return 0; +} + +static int +i915_gem_execbuffer_reserve(struct drm_device *dev, + struct drm_file *file, + struct drm_i915_gem_object **object_list, + struct drm_i915_gem_exec_object2 *exec_list, + int count) +{ + int ret, i, retry; + + /* Attempt to pin all of the buffers into the GTT. + * This is done in 3 phases: + * + * 1a. Unbind all objects that do not match the GTT constraints for + * the execbuffer (fenceable, mappable, alignment etc). + * 1b. Increment pin count for already bound objects. + * 2. Bind new objects. + * 3. Decrement pin count. + * + * This avoid unnecessary unbinding of later objects in order to makr + * room for the earlier objects *unless* we need to defragment. + */ + retry = 0; + do { + ret = 0; + + /* Unbind any ill-fitting objects or pin. */ + for (i = 0; i < count; i++) { + struct drm_i915_gem_object *obj = object_list[i]; + struct drm_i915_gem_exec_object2 *entry = &exec_list[i]; + bool need_fence, need_mappable; + + if (!obj->gtt_space) + continue; + + need_fence = + entry->flags & EXEC_OBJECT_NEEDS_FENCE && + obj->tiling_mode != I915_TILING_NONE; + need_mappable = + entry->relocation_count ? true : need_fence; + + if ((entry->alignment && obj->gtt_offset & (entry->alignment - 1)) || + (need_mappable && !obj->map_and_fenceable)) + ret = i915_gem_object_unbind(obj); + else + ret = i915_gem_object_pin(obj, + entry->alignment, + need_mappable); + if (ret) { + count = i; + goto err; + } + } + + /* Bind fresh objects */ + for (i = 0; i < count; i++) { + struct drm_i915_gem_exec_object2 *entry = &exec_list[i]; + struct drm_i915_gem_object *obj = object_list[i]; + bool need_fence; + + need_fence = + entry->flags & EXEC_OBJECT_NEEDS_FENCE && + obj->tiling_mode != I915_TILING_NONE; + + if (!obj->gtt_space) { + bool need_mappable = + entry->relocation_count ? true : need_fence; + + ret = i915_gem_object_pin(obj, + entry->alignment, + need_mappable); + if (ret) + break; + } + + if (need_fence) { + ret = i915_gem_object_get_fence_reg(obj, true); + if (ret) + break; + + obj->pending_fenced_gpu_access = true; + } + + entry->offset = obj->gtt_offset; + } + +err: /* Decrement pin count for bound objects */ + for (i = 0; i < count; i++) { + struct drm_i915_gem_object *obj = object_list[i]; + if (obj->gtt_space) + i915_gem_object_unpin(obj); + } + + if (ret != -ENOSPC || retry > 1) + return ret; + + /* First attempt, just clear anything that is purgeable. + * Second attempt, clear the entire GTT. + */ + ret = i915_gem_evict_everything(dev, retry == 0); + if (ret) + return ret; + + retry++; + } while (1); +} + +static int +i915_gem_execbuffer_relocate_slow(struct drm_device *dev, + struct drm_file *file, + struct drm_i915_gem_object **object_list, + struct drm_i915_gem_exec_object2 *exec_list, + int count) +{ + struct drm_i915_gem_relocation_entry *reloc; + int i, total, ret; + + for (i = 0; i < count; i++) + object_list[i]->in_execbuffer = false; + + mutex_unlock(&dev->struct_mutex); + + total = 0; + for (i = 0; i < count; i++) + total += exec_list[i].relocation_count; + + reloc = drm_malloc_ab(total, sizeof(*reloc)); + if (reloc == NULL) { + mutex_lock(&dev->struct_mutex); + return -ENOMEM; + } + + total = 0; + for (i = 0; i < count; i++) { + struct drm_i915_gem_relocation_entry __user *user_relocs; + + user_relocs = (void __user *)(uintptr_t)exec_list[i].relocs_ptr; + + if (copy_from_user(reloc+total, user_relocs, + exec_list[i].relocation_count * + sizeof(*reloc))) { + ret = -EFAULT; + mutex_lock(&dev->struct_mutex); + goto err; + } + + total += exec_list[i].relocation_count; + } + + ret = i915_mutex_lock_interruptible(dev); + if (ret) { + mutex_lock(&dev->struct_mutex); + goto err; + } + + ret = i915_gem_execbuffer_reserve(dev, file, + object_list, exec_list, + count); + if (ret) + goto err; + + total = 0; + for (i = 0; i < count; i++) { + struct drm_i915_gem_object *obj = object_list[i]; + obj->base.pending_read_domains = 0; + obj->base.pending_write_domain = 0; + ret = i915_gem_execbuffer_relocate_object_slow(obj, file, + &exec_list[i], + reloc + total); + if (ret) + goto err; + + total += exec_list[i].relocation_count; + } + + /* Leave the user relocations as are, this is the painfully slow path, + * and we want to avoid the complication of dropping the lock whilst + * having buffers reserved in the aperture and so causing spurious + * ENOSPC for random operations. + */ + +err: + drm_free_large(reloc); + return ret; +} + +static void +i915_gem_execbuffer_flush(struct drm_device *dev, + uint32_t invalidate_domains, + uint32_t flush_domains, + uint32_t flush_rings) +{ + drm_i915_private_t *dev_priv = dev->dev_private; + + if (flush_domains & I915_GEM_DOMAIN_CPU) + intel_gtt_chipset_flush(); + + if ((flush_domains | invalidate_domains) & I915_GEM_GPU_DOMAINS) { + if (flush_rings & RING_RENDER) + i915_gem_flush_ring(dev, &dev_priv->render_ring, + invalidate_domains, flush_domains); + if (flush_rings & RING_BSD) + i915_gem_flush_ring(dev, &dev_priv->bsd_ring, + invalidate_domains, flush_domains); + if (flush_rings & RING_BLT) + i915_gem_flush_ring(dev, &dev_priv->blt_ring, + invalidate_domains, flush_domains); + } +} + + +static int +i915_gem_execbuffer_move_to_gpu(struct drm_device *dev, + struct drm_file *file, + struct intel_ring_buffer *ring, + struct drm_i915_gem_object **objects, + int count) +{ + struct change_domains cd; + int ret, i; + + cd.invalidate_domains = 0; + cd.flush_domains = 0; + cd.flush_rings = 0; + for (i = 0; i < count; i++) + i915_gem_object_set_to_gpu_domain(objects[i], ring, &cd); + + if (cd.invalidate_domains | cd.flush_domains) { +#if WATCH_EXEC + DRM_INFO("%s: invalidate_domains %08x flush_domains %08x\n", + __func__, + cd.invalidate_domains, + cd.flush_domains); +#endif + i915_gem_execbuffer_flush(dev, + cd.invalidate_domains, + cd.flush_domains, + cd.flush_rings); + } + + for (i = 0; i < count; i++) { + struct drm_i915_gem_object *obj = objects[i]; + /* XXX replace with semaphores */ + if (obj->ring && ring != obj->ring) { + ret = i915_gem_object_wait_rendering(obj, true); + if (ret) + return ret; + } + } + + return 0; +} + +static int +i915_gem_check_execbuffer(struct drm_i915_gem_execbuffer2 *exec, + uint64_t exec_offset) +{ + uint32_t exec_start, exec_len; + + exec_start = (uint32_t) exec_offset + exec->batch_start_offset; + exec_len = (uint32_t) exec->batch_len; + + if ((exec_start | exec_len) & 0x7) + return -EINVAL; + + if (!exec_start) + return -EINVAL; + + return 0; +} + +static int +validate_exec_list(struct drm_i915_gem_exec_object2 *exec, + int count) +{ + int i; + + for (i = 0; i < count; i++) { + char __user *ptr = (char __user *)(uintptr_t)exec[i].relocs_ptr; + int length; /* limited by fault_in_pages_readable() */ + + /* First check for malicious input causing overflow */ + if (exec[i].relocation_count > + INT_MAX / sizeof(struct drm_i915_gem_relocation_entry)) + return -EINVAL; + + length = exec[i].relocation_count * + sizeof(struct drm_i915_gem_relocation_entry); + if (!access_ok(VERIFY_READ, ptr, length)) + return -EFAULT; + + /* we may also need to update the presumed offsets */ + if (!access_ok(VERIFY_WRITE, ptr, length)) + return -EFAULT; + + if (fault_in_pages_readable(ptr, length)) + return -EFAULT; + } + + return 0; +} + +static void +i915_gem_execbuffer_retire_commands(struct drm_device *dev, + struct intel_ring_buffer *ring) +{ + uint32_t flush_domains = 0; + + /* The sampler always gets flushed on i965 (sigh) */ + if (INTEL_INFO(dev)->gen >= 4) + flush_domains |= I915_GEM_DOMAIN_SAMPLER; + + ring->flush(ring, I915_GEM_DOMAIN_COMMAND, flush_domains); +} + + +static int +i915_gem_do_execbuffer(struct drm_device *dev, void *data, + struct drm_file *file, + struct drm_i915_gem_execbuffer2 *args, + struct drm_i915_gem_exec_object2 *exec_list) +{ + drm_i915_private_t *dev_priv = dev->dev_private; + struct drm_i915_gem_object **object_list = NULL; + struct drm_i915_gem_object *batch_obj; + struct drm_clip_rect *cliprects = NULL; + struct drm_i915_gem_request *request = NULL; + struct intel_ring_buffer *ring; + int ret, i, flips; + uint64_t exec_offset; + + ret = validate_exec_list(exec_list, args->buffer_count); + if (ret) + return ret; + +#if WATCH_EXEC + DRM_INFO("buffers_ptr %d buffer_count %d len %08x\n", + (int) args->buffers_ptr, args->buffer_count, args->batch_len); +#endif + switch (args->flags & I915_EXEC_RING_MASK) { + case I915_EXEC_DEFAULT: + case I915_EXEC_RENDER: + ring = &dev_priv->render_ring; + break; + case I915_EXEC_BSD: + if (!HAS_BSD(dev)) { + DRM_ERROR("execbuf with invalid ring (BSD)\n"); + return -EINVAL; + } + ring = &dev_priv->bsd_ring; + break; + case I915_EXEC_BLT: + if (!HAS_BLT(dev)) { + DRM_ERROR("execbuf with invalid ring (BLT)\n"); + return -EINVAL; + } + ring = &dev_priv->blt_ring; + break; + default: + DRM_ERROR("execbuf with unknown ring: %d\n", + (int)(args->flags & I915_EXEC_RING_MASK)); + return -EINVAL; + } + + if (args->buffer_count < 1) { + DRM_ERROR("execbuf with %d buffers\n", args->buffer_count); + return -EINVAL; + } + object_list = drm_malloc_ab(sizeof(*object_list), args->buffer_count); + if (object_list == NULL) { + DRM_ERROR("Failed to allocate object list for %d buffers\n", + args->buffer_count); + ret = -ENOMEM; + goto pre_mutex_err; + } + + if (args->num_cliprects != 0) { + cliprects = kcalloc(args->num_cliprects, sizeof(*cliprects), + GFP_KERNEL); + if (cliprects == NULL) { + ret = -ENOMEM; + goto pre_mutex_err; + } + + ret = copy_from_user(cliprects, + (struct drm_clip_rect __user *) + (uintptr_t) args->cliprects_ptr, + sizeof(*cliprects) * args->num_cliprects); + if (ret != 0) { + DRM_ERROR("copy %d cliprects failed: %d\n", + args->num_cliprects, ret); + ret = -EFAULT; + goto pre_mutex_err; + } + } + + request = kzalloc(sizeof(*request), GFP_KERNEL); + if (request == NULL) { + ret = -ENOMEM; + goto pre_mutex_err; + } + + ret = i915_mutex_lock_interruptible(dev); + if (ret) + goto pre_mutex_err; + + if (dev_priv->mm.suspended) { + mutex_unlock(&dev->struct_mutex); + ret = -EBUSY; + goto pre_mutex_err; + } + + /* Look up object handles */ + for (i = 0; i < args->buffer_count; i++) { + struct drm_i915_gem_object *obj; + + obj = to_intel_bo (drm_gem_object_lookup(dev, file, + exec_list[i].handle)); + if (obj == NULL) { + DRM_ERROR("Invalid object handle %d at index %d\n", + exec_list[i].handle, i); + /* prevent error path from reading uninitialized data */ + args->buffer_count = i; + ret = -ENOENT; + goto err; + } + object_list[i] = obj; + + if (obj->in_execbuffer) { + DRM_ERROR("Object %p appears more than once in object list\n", + obj); + /* prevent error path from reading uninitialized data */ + args->buffer_count = i + 1; + ret = -EINVAL; + goto err; + } + obj->in_execbuffer = true; + obj->pending_fenced_gpu_access = false; + } + + /* Move the objects en-masse into the GTT, evicting if necessary. */ + ret = i915_gem_execbuffer_reserve(dev, file, + object_list, exec_list, + args->buffer_count); + if (ret) + goto err; + + /* The objects are in their final locations, apply the relocations. */ + ret = i915_gem_execbuffer_relocate(dev, file, + object_list, exec_list, + args->buffer_count); + if (ret) { + if (ret == -EFAULT) { + ret = i915_gem_execbuffer_relocate_slow(dev, file, + object_list, + exec_list, + args->buffer_count); + BUG_ON(!mutex_is_locked(&dev->struct_mutex)); + } + if (ret) + goto err; + } + + /* Set the pending read domains for the batch buffer to COMMAND */ + batch_obj = object_list[args->buffer_count-1]; + if (batch_obj->base.pending_write_domain) { + DRM_ERROR("Attempting to use self-modifying batch buffer\n"); + ret = -EINVAL; + goto err; + } + batch_obj->base.pending_read_domains |= I915_GEM_DOMAIN_COMMAND; + + /* Sanity check the batch buffer */ + exec_offset = batch_obj->gtt_offset; + ret = i915_gem_check_execbuffer(args, exec_offset); + if (ret != 0) { + DRM_ERROR("execbuf with invalid offset/length\n"); + goto err; + } + + ret = i915_gem_execbuffer_move_to_gpu(dev, file, ring, + object_list, args->buffer_count); + if (ret) + goto err; + +#if WATCH_COHERENCY + for (i = 0; i < args->buffer_count; i++) { + i915_gem_object_check_coherency(object_list[i], + exec_list[i].handle); + } +#endif + +#if WATCH_EXEC + i915_gem_dump_object(batch_obj, + args->batch_len, + __func__, + ~0); +#endif + + /* Check for any pending flips. As we only maintain a flip queue depth + * of 1, we can simply insert a WAIT for the next display flip prior + * to executing the batch and avoid stalling the CPU. + */ + flips = 0; + for (i = 0; i < args->buffer_count; i++) { + if (object_list[i]->base.write_domain) + flips |= atomic_read(&object_list[i]->pending_flip); + } + if (flips) { + int plane, flip_mask; + + for (plane = 0; flips >> plane; plane++) { + if (((flips >> plane) & 1) == 0) + continue; + + if (plane) + flip_mask = MI_WAIT_FOR_PLANE_B_FLIP; + else + flip_mask = MI_WAIT_FOR_PLANE_A_FLIP; + + ret = intel_ring_begin(ring, 2); + if (ret) + goto err; + + intel_ring_emit(ring, MI_WAIT_FOR_EVENT | flip_mask); + intel_ring_emit(ring, MI_NOOP); + intel_ring_advance(ring); + } + } + + /* Exec the batchbuffer */ + ret = ring->dispatch_execbuffer(ring, args, cliprects, exec_offset); + if (ret) { + DRM_ERROR("dispatch failed %d\n", ret); + goto err; + } + + for (i = 0; i < args->buffer_count; i++) { + struct drm_i915_gem_object *obj = object_list[i]; + + obj->base.read_domains = obj->base.pending_read_domains; + obj->base.write_domain = obj->base.pending_write_domain; + obj->fenced_gpu_access = obj->pending_fenced_gpu_access; + + i915_gem_object_move_to_active(obj, ring); + if (obj->base.write_domain) { + obj->dirty = 1; + list_move_tail(&obj->gpu_write_list, + &ring->gpu_write_list); + intel_mark_busy(dev, obj); + } + + trace_i915_gem_object_change_domain(obj, + obj->base.read_domains, + obj->base.write_domain); + } + + /* + * Ensure that the commands in the batch buffer are + * finished before the interrupt fires + */ + i915_gem_execbuffer_retire_commands(dev, ring); + + if (i915_add_request(dev, file, request, ring)) + i915_gem_next_request_seqno(dev, ring); + else + request = NULL; + +err: + for (i = 0; i < args->buffer_count; i++) { + object_list[i]->in_execbuffer = false; + drm_gem_object_unreference(&object_list[i]->base); + } + + mutex_unlock(&dev->struct_mutex); + +pre_mutex_err: + drm_free_large(object_list); + kfree(cliprects); + kfree(request); + + return ret; +} + +/* + * Legacy execbuffer just creates an exec2 list from the original exec object + * list array and passes it to the real function. + */ +int +i915_gem_execbuffer(struct drm_device *dev, void *data, + struct drm_file *file) +{ + struct drm_i915_gem_execbuffer *args = data; + struct drm_i915_gem_execbuffer2 exec2; + struct drm_i915_gem_exec_object *exec_list = NULL; + struct drm_i915_gem_exec_object2 *exec2_list = NULL; + int ret, i; + +#if WATCH_EXEC + DRM_INFO("buffers_ptr %d buffer_count %d len %08x\n", + (int) args->buffers_ptr, args->buffer_count, args->batch_len); +#endif + + if (args->buffer_count < 1) { + DRM_ERROR("execbuf with %d buffers\n", args->buffer_count); + return -EINVAL; + } + + /* Copy in the exec list from userland */ + exec_list = drm_malloc_ab(sizeof(*exec_list), args->buffer_count); + exec2_list = drm_malloc_ab(sizeof(*exec2_list), args->buffer_count); + if (exec_list == NULL || exec2_list == NULL) { + DRM_ERROR("Failed to allocate exec list for %d buffers\n", + args->buffer_count); + drm_free_large(exec_list); + drm_free_large(exec2_list); + return -ENOMEM; + } + ret = copy_from_user(exec_list, + (struct drm_i915_relocation_entry __user *) + (uintptr_t) args->buffers_ptr, + sizeof(*exec_list) * args->buffer_count); + if (ret != 0) { + DRM_ERROR("copy %d exec entries failed %d\n", + args->buffer_count, ret); + drm_free_large(exec_list); + drm_free_large(exec2_list); + return -EFAULT; + } + + for (i = 0; i < args->buffer_count; i++) { + exec2_list[i].handle = exec_list[i].handle; + exec2_list[i].relocation_count = exec_list[i].relocation_count; + exec2_list[i].relocs_ptr = exec_list[i].relocs_ptr; + exec2_list[i].alignment = exec_list[i].alignment; + exec2_list[i].offset = exec_list[i].offset; + if (INTEL_INFO(dev)->gen < 4) + exec2_list[i].flags = EXEC_OBJECT_NEEDS_FENCE; + else + exec2_list[i].flags = 0; + } + + exec2.buffers_ptr = args->buffers_ptr; + exec2.buffer_count = args->buffer_count; + exec2.batch_start_offset = args->batch_start_offset; + exec2.batch_len = args->batch_len; + exec2.DR1 = args->DR1; + exec2.DR4 = args->DR4; + exec2.num_cliprects = args->num_cliprects; + exec2.cliprects_ptr = args->cliprects_ptr; + exec2.flags = I915_EXEC_RENDER; + + ret = i915_gem_do_execbuffer(dev, data, file, &exec2, exec2_list); + if (!ret) { + /* Copy the new buffer offsets back to the user's exec list. */ + for (i = 0; i < args->buffer_count; i++) + exec_list[i].offset = exec2_list[i].offset; + /* ... and back out to userspace */ + ret = copy_to_user((struct drm_i915_relocation_entry __user *) + (uintptr_t) args->buffers_ptr, + exec_list, + sizeof(*exec_list) * args->buffer_count); + if (ret) { + ret = -EFAULT; + DRM_ERROR("failed to copy %d exec entries " + "back to user (%d)\n", + args->buffer_count, ret); + } + } + + drm_free_large(exec_list); + drm_free_large(exec2_list); + return ret; +} + +int +i915_gem_execbuffer2(struct drm_device *dev, void *data, + struct drm_file *file) +{ + struct drm_i915_gem_execbuffer2 *args = data; + struct drm_i915_gem_exec_object2 *exec2_list = NULL; + int ret; + +#if WATCH_EXEC + DRM_INFO("buffers_ptr %d buffer_count %d len %08x\n", + (int) args->buffers_ptr, args->buffer_count, args->batch_len); +#endif + + if (args->buffer_count < 1) { + DRM_ERROR("execbuf2 with %d buffers\n", args->buffer_count); + return -EINVAL; + } + + exec2_list = drm_malloc_ab(sizeof(*exec2_list), args->buffer_count); + if (exec2_list == NULL) { + DRM_ERROR("Failed to allocate exec list for %d buffers\n", + args->buffer_count); + return -ENOMEM; + } + ret = copy_from_user(exec2_list, + (struct drm_i915_relocation_entry __user *) + (uintptr_t) args->buffers_ptr, + sizeof(*exec2_list) * args->buffer_count); + if (ret != 0) { + DRM_ERROR("copy %d exec entries failed %d\n", + args->buffer_count, ret); + drm_free_large(exec2_list); + return -EFAULT; + } + + ret = i915_gem_do_execbuffer(dev, data, file, args, exec2_list); + if (!ret) { + /* Copy the new buffer offsets back to the user's exec list. */ + ret = copy_to_user((struct drm_i915_relocation_entry __user *) + (uintptr_t) args->buffers_ptr, + exec2_list, + sizeof(*exec2_list) * args->buffer_count); + if (ret) { + ret = -EFAULT; + DRM_ERROR("failed to copy %d exec entries " + "back to user (%d)\n", + args->buffer_count, ret); + } + } + + drm_free_large(exec2_list); + return ret; +} + -- cgit v1.2.3 From 432e58edc9de1d9c3d6a7b444b3c455b8f209a7d Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Thu, 25 Nov 2010 19:32:06 +0000 Subject: drm/i915: Avoid allocation for execbuffer object list Besides the minimal improvement in reducing the execbuffer overhead, the real benefit is clarifying a few routines. Signed-off-by: Chris Wilson --- drivers/gpu/drm/i915/i915_drv.h | 10 +- drivers/gpu/drm/i915/i915_gem.c | 1 + drivers/gpu/drm/i915/i915_gem_evict.c | 14 +- drivers/gpu/drm/i915/i915_gem_execbuffer.c | 403 ++++++++++++++--------------- 4 files changed, 199 insertions(+), 229 deletions(-) diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index 6c10b645dde9..e7c4108c94cd 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h @@ -712,8 +712,8 @@ struct drm_i915_gem_object { struct list_head mm_list; /** This object's place on GPU write list */ struct list_head gpu_write_list; - /** This object's place on eviction list */ - struct list_head evict_list; + /** This object's place in the batchbuffer or on the eviction list */ + struct list_head exec_list; /** * This is set if the object is on the active or flushing lists @@ -737,12 +737,6 @@ struct drm_i915_gem_object { */ signed int fence_reg : 5; - /** - * Used for checking the object doesn't appear more than once - * in an execbuffer object list. - */ - unsigned int in_execbuffer : 1; - /** * Advice: are the backing pages purgeable? */ diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index b30c6c167048..d9d81f94a4b8 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c @@ -3399,6 +3399,7 @@ struct drm_i915_gem_object *i915_gem_alloc_object(struct drm_device *dev, INIT_LIST_HEAD(&obj->mm_list); INIT_LIST_HEAD(&obj->gtt_list); INIT_LIST_HEAD(&obj->ring_list); + INIT_LIST_HEAD(&obj->exec_list); INIT_LIST_HEAD(&obj->gpu_write_list); obj->madv = I915_MADV_WILLNEED; /* Avoid an unnecessary call to unbind on the first bind. */ diff --git a/drivers/gpu/drm/i915/i915_gem_evict.c b/drivers/gpu/drm/i915/i915_gem_evict.c index 03e15d37b550..78b8cf90c922 100644 --- a/drivers/gpu/drm/i915/i915_gem_evict.c +++ b/drivers/gpu/drm/i915/i915_gem_evict.c @@ -34,7 +34,7 @@ static bool mark_free(struct drm_i915_gem_object *obj, struct list_head *unwind) { - list_add(&obj->evict_list, unwind); + list_add(&obj->exec_list, unwind); drm_gem_object_reference(&obj->base); return drm_mm_scan_add_block(obj->gtt_space); } @@ -127,7 +127,7 @@ i915_gem_evict_something(struct drm_device *dev, int min_size, } /* Nothing found, clean up and bail out! */ - list_for_each_entry(obj, &unwind_list, evict_list) { + list_for_each_entry(obj, &unwind_list, exec_list) { ret = drm_mm_scan_remove_block(obj->gtt_space); BUG_ON(ret); drm_gem_object_unreference(&obj->base); @@ -146,12 +146,12 @@ found: while (!list_empty(&unwind_list)) { obj = list_first_entry(&unwind_list, struct drm_i915_gem_object, - evict_list); + exec_list); if (drm_mm_scan_remove_block(obj->gtt_space)) { - list_move(&obj->evict_list, &eviction_list); + list_move(&obj->exec_list, &eviction_list); continue; } - list_del(&obj->evict_list); + list_del_init(&obj->exec_list); drm_gem_object_unreference(&obj->base); } @@ -159,10 +159,10 @@ found: while (!list_empty(&eviction_list)) { obj = list_first_entry(&eviction_list, struct drm_i915_gem_object, - evict_list); + exec_list); if (ret == 0) ret = i915_gem_object_unbind(obj); - list_del(&obj->evict_list); + list_del_init(&obj->exec_list); drm_gem_object_unreference(&obj->base); } diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c index bdc613b91af8..d54070111f9d 100644 --- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c +++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c @@ -406,18 +406,16 @@ i915_gem_execbuffer_relocate_object_slow(struct drm_i915_gem_object *obj, static int i915_gem_execbuffer_relocate(struct drm_device *dev, struct drm_file *file, - struct drm_i915_gem_object **object_list, - struct drm_i915_gem_exec_object2 *exec_list, - int count) + struct list_head *objects, + struct drm_i915_gem_exec_object2 *exec) { - int i, ret; + struct drm_i915_gem_object *obj; + int ret; - for (i = 0; i < count; i++) { - struct drm_i915_gem_object *obj = object_list[i]; + list_for_each_entry(obj, objects, exec_list) { obj->base.pending_read_domains = 0; obj->base.pending_write_domain = 0; - ret = i915_gem_execbuffer_relocate_object(obj, file, - &exec_list[i]); + ret = i915_gem_execbuffer_relocate_object(obj, file, exec++); if (ret) return ret; } @@ -428,11 +426,12 @@ i915_gem_execbuffer_relocate(struct drm_device *dev, static int i915_gem_execbuffer_reserve(struct drm_device *dev, struct drm_file *file, - struct drm_i915_gem_object **object_list, - struct drm_i915_gem_exec_object2 *exec_list, - int count) + struct list_head *objects, + struct drm_i915_gem_exec_object2 *exec) { - int ret, i, retry; + struct drm_i915_gem_object *obj; + struct drm_i915_gem_exec_object2 *entry; + int ret, retry; /* Attempt to pin all of the buffers into the GTT. * This is done in 3 phases: @@ -451,13 +450,14 @@ i915_gem_execbuffer_reserve(struct drm_device *dev, ret = 0; /* Unbind any ill-fitting objects or pin. */ - for (i = 0; i < count; i++) { - struct drm_i915_gem_object *obj = object_list[i]; - struct drm_i915_gem_exec_object2 *entry = &exec_list[i]; + entry = exec; + list_for_each_entry(obj, objects, exec_list) { bool need_fence, need_mappable; - if (!obj->gtt_space) + if (!obj->gtt_space) { + entry++; continue; + } need_fence = entry->flags & EXEC_OBJECT_NEEDS_FENCE && @@ -472,16 +472,15 @@ i915_gem_execbuffer_reserve(struct drm_device *dev, ret = i915_gem_object_pin(obj, entry->alignment, need_mappable); - if (ret) { - count = i; + if (ret) goto err; - } + + entry++; } /* Bind fresh objects */ - for (i = 0; i < count; i++) { - struct drm_i915_gem_exec_object2 *entry = &exec_list[i]; - struct drm_i915_gem_object *obj = object_list[i]; + entry = exec; + list_for_each_entry(obj, objects, exec_list) { bool need_fence; need_fence = @@ -504,15 +503,15 @@ i915_gem_execbuffer_reserve(struct drm_device *dev, if (ret) break; - obj->pending_fenced_gpu_access = true; } + obj->pending_fenced_gpu_access = need_fence; entry->offset = obj->gtt_offset; + entry++; } -err: /* Decrement pin count for bound objects */ - for (i = 0; i < count; i++) { - struct drm_i915_gem_object *obj = object_list[i]; + /* Decrement pin count for bound objects */ + list_for_each_entry(obj, objects, exec_list) { if (obj->gtt_space) i915_gem_object_unpin(obj); } @@ -529,26 +528,36 @@ err: /* Decrement pin count for bound objects */ retry++; } while (1); + +err: + while (objects != &obj->exec_list) { + if (obj->gtt_space) + i915_gem_object_unpin(obj); + + obj = list_entry(obj->exec_list.prev, + struct drm_i915_gem_object, + exec_list); + } + + return ret; } static int i915_gem_execbuffer_relocate_slow(struct drm_device *dev, struct drm_file *file, - struct drm_i915_gem_object **object_list, - struct drm_i915_gem_exec_object2 *exec_list, + struct list_head *objects, + struct drm_i915_gem_exec_object2 *exec, int count) { struct drm_i915_gem_relocation_entry *reloc; + struct drm_i915_gem_object *obj; int i, total, ret; - for (i = 0; i < count; i++) - object_list[i]->in_execbuffer = false; - mutex_unlock(&dev->struct_mutex); total = 0; for (i = 0; i < count; i++) - total += exec_list[i].relocation_count; + total += exec[i].relocation_count; reloc = drm_malloc_ab(total, sizeof(*reloc)); if (reloc == NULL) { @@ -560,17 +569,16 @@ i915_gem_execbuffer_relocate_slow(struct drm_device *dev, for (i = 0; i < count; i++) { struct drm_i915_gem_relocation_entry __user *user_relocs; - user_relocs = (void __user *)(uintptr_t)exec_list[i].relocs_ptr; + user_relocs = (void __user *)(uintptr_t)exec[i].relocs_ptr; if (copy_from_user(reloc+total, user_relocs, - exec_list[i].relocation_count * - sizeof(*reloc))) { + exec[i].relocation_count * sizeof(*reloc))) { ret = -EFAULT; mutex_lock(&dev->struct_mutex); goto err; } - total += exec_list[i].relocation_count; + total += exec[i].relocation_count; } ret = i915_mutex_lock_interruptible(dev); @@ -579,24 +587,22 @@ i915_gem_execbuffer_relocate_slow(struct drm_device *dev, goto err; } - ret = i915_gem_execbuffer_reserve(dev, file, - object_list, exec_list, - count); + ret = i915_gem_execbuffer_reserve(dev, file, objects, exec); if (ret) goto err; total = 0; - for (i = 0; i < count; i++) { - struct drm_i915_gem_object *obj = object_list[i]; + list_for_each_entry(obj, objects, exec_list) { obj->base.pending_read_domains = 0; obj->base.pending_write_domain = 0; ret = i915_gem_execbuffer_relocate_object_slow(obj, file, - &exec_list[i], + exec, reloc + total); if (ret) goto err; - total += exec_list[i].relocation_count; + total += exec->relocation_count; + exec++; } /* Leave the user relocations as are, this is the painfully slow path, @@ -636,20 +642,18 @@ i915_gem_execbuffer_flush(struct drm_device *dev, static int -i915_gem_execbuffer_move_to_gpu(struct drm_device *dev, - struct drm_file *file, - struct intel_ring_buffer *ring, - struct drm_i915_gem_object **objects, - int count) +i915_gem_execbuffer_move_to_gpu(struct intel_ring_buffer *ring, + struct list_head *objects) { + struct drm_i915_gem_object *obj; struct change_domains cd; - int ret, i; + int ret; cd.invalidate_domains = 0; cd.flush_domains = 0; cd.flush_rings = 0; - for (i = 0; i < count; i++) - i915_gem_object_set_to_gpu_domain(objects[i], ring, &cd); + list_for_each_entry(obj, objects, exec_list) + i915_gem_object_set_to_gpu_domain(obj, ring, &cd); if (cd.invalidate_domains | cd.flush_domains) { #if WATCH_EXEC @@ -658,14 +662,13 @@ i915_gem_execbuffer_move_to_gpu(struct drm_device *dev, cd.invalidate_domains, cd.flush_domains); #endif - i915_gem_execbuffer_flush(dev, + i915_gem_execbuffer_flush(ring->dev, cd.invalidate_domains, cd.flush_domains, cd.flush_rings); } - for (i = 0; i < count; i++) { - struct drm_i915_gem_object *obj = objects[i]; + list_for_each_entry(obj, objects, exec_list) { /* XXX replace with semaphores */ if (obj->ring && ring != obj->ring) { ret = i915_gem_object_wait_rendering(obj, true); @@ -677,22 +680,10 @@ i915_gem_execbuffer_move_to_gpu(struct drm_device *dev, return 0; } -static int -i915_gem_check_execbuffer(struct drm_i915_gem_execbuffer2 *exec, - uint64_t exec_offset) +static bool +i915_gem_check_execbuffer(struct drm_i915_gem_execbuffer2 *exec) { - uint32_t exec_start, exec_len; - - exec_start = (uint32_t) exec_offset + exec->batch_start_offset; - exec_len = (uint32_t) exec->batch_len; - - if ((exec_start | exec_len) & 0x7) - return -EINVAL; - - if (!exec_start) - return -EINVAL; - - return 0; + return ((exec->batch_start_offset | exec->batch_len) & 0x7) == 0; } static int @@ -726,36 +717,119 @@ validate_exec_list(struct drm_i915_gem_exec_object2 *exec, return 0; } +static int +i915_gem_execbuffer_wait_for_flips(struct intel_ring_buffer *ring, + struct list_head *objects) +{ + struct drm_i915_gem_object *obj; + int flips; + + /* Check for any pending flips. As we only maintain a flip queue depth + * of 1, we can simply insert a WAIT for the next display flip prior + * to executing the batch and avoid stalling the CPU. + */ + flips = 0; + list_for_each_entry(obj, objects, exec_list) { + if (obj->base.write_domain) + flips |= atomic_read(&obj->pending_flip); + } + if (flips) { + int plane, flip_mask, ret; + + for (plane = 0; flips >> plane; plane++) { + if (((flips >> plane) & 1) == 0) + continue; + + if (plane) + flip_mask = MI_WAIT_FOR_PLANE_B_FLIP; + else + flip_mask = MI_WAIT_FOR_PLANE_A_FLIP; + + ret = intel_ring_begin(ring, 2); + if (ret) + return ret; + + intel_ring_emit(ring, MI_WAIT_FOR_EVENT | flip_mask); + intel_ring_emit(ring, MI_NOOP); + intel_ring_advance(ring); + } + } + + return 0; +} + +static void +i915_gem_execbuffer_move_to_active(struct list_head *objects, + struct intel_ring_buffer *ring) +{ + struct drm_i915_gem_object *obj; + + list_for_each_entry(obj, objects, exec_list) { + obj->base.read_domains = obj->base.pending_read_domains; + obj->base.write_domain = obj->base.pending_write_domain; + obj->fenced_gpu_access = obj->pending_fenced_gpu_access; + + i915_gem_object_move_to_active(obj, ring); + if (obj->base.write_domain) { + obj->dirty = 1; + list_move_tail(&obj->gpu_write_list, + &ring->gpu_write_list); + intel_mark_busy(ring->dev, obj); + } + + trace_i915_gem_object_change_domain(obj, + obj->base.read_domains, + obj->base.write_domain); + } +} + static void i915_gem_execbuffer_retire_commands(struct drm_device *dev, + struct drm_file *file, struct intel_ring_buffer *ring) { - uint32_t flush_domains = 0; + struct drm_i915_gem_request *request; + u32 flush_domains; - /* The sampler always gets flushed on i965 (sigh) */ + /* + * Ensure that the commands in the batch buffer are + * finished before the interrupt fires. + * + * The sampler always gets flushed on i965 (sigh). + */ + flush_domains = 0; if (INTEL_INFO(dev)->gen >= 4) flush_domains |= I915_GEM_DOMAIN_SAMPLER; ring->flush(ring, I915_GEM_DOMAIN_COMMAND, flush_domains); -} + /* Add a breadcrumb for the completion of the batch buffer */ + request = kzalloc(sizeof(*request), GFP_KERNEL); + if (request == NULL || i915_add_request(dev, file, request, ring)) { + i915_gem_next_request_seqno(dev, ring); + kfree(request); + } +} static int i915_gem_do_execbuffer(struct drm_device *dev, void *data, struct drm_file *file, struct drm_i915_gem_execbuffer2 *args, - struct drm_i915_gem_exec_object2 *exec_list) + struct drm_i915_gem_exec_object2 *exec) { drm_i915_private_t *dev_priv = dev->dev_private; - struct drm_i915_gem_object **object_list = NULL; + struct list_head objects; struct drm_i915_gem_object *batch_obj; struct drm_clip_rect *cliprects = NULL; - struct drm_i915_gem_request *request = NULL; struct intel_ring_buffer *ring; - int ret, i, flips; - uint64_t exec_offset; + int ret, i; - ret = validate_exec_list(exec_list, args->buffer_count); + if (!i915_gem_check_execbuffer(args)) { + DRM_ERROR("execbuf with invalid offset/length\n"); + return -EINVAL; + } + + ret = validate_exec_list(exec, args->buffer_count); if (ret) return ret; @@ -792,40 +866,24 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data, DRM_ERROR("execbuf with %d buffers\n", args->buffer_count); return -EINVAL; } - object_list = drm_malloc_ab(sizeof(*object_list), args->buffer_count); - if (object_list == NULL) { - DRM_ERROR("Failed to allocate object list for %d buffers\n", - args->buffer_count); - ret = -ENOMEM; - goto pre_mutex_err; - } if (args->num_cliprects != 0) { - cliprects = kcalloc(args->num_cliprects, sizeof(*cliprects), + cliprects = kmalloc(args->num_cliprects * sizeof(*cliprects), GFP_KERNEL); if (cliprects == NULL) { ret = -ENOMEM; goto pre_mutex_err; } - ret = copy_from_user(cliprects, - (struct drm_clip_rect __user *) - (uintptr_t) args->cliprects_ptr, - sizeof(*cliprects) * args->num_cliprects); - if (ret != 0) { - DRM_ERROR("copy %d cliprects failed: %d\n", - args->num_cliprects, ret); + if (copy_from_user(cliprects, + (struct drm_clip_rect __user *)(uintptr_t) + args->cliprects_ptr, + sizeof(*cliprects)*args->num_cliprects)) { ret = -EFAULT; goto pre_mutex_err; } } - request = kzalloc(sizeof(*request), GFP_KERNEL); - if (request == NULL) { - ret = -ENOMEM; - goto pre_mutex_err; - } - ret = i915_mutex_lock_interruptible(dev); if (ret) goto pre_mutex_err; @@ -837,49 +895,41 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data, } /* Look up object handles */ + INIT_LIST_HEAD(&objects); for (i = 0; i < args->buffer_count; i++) { struct drm_i915_gem_object *obj; - obj = to_intel_bo (drm_gem_object_lookup(dev, file, - exec_list[i].handle)); + obj = to_intel_bo(drm_gem_object_lookup(dev, file, + exec[i].handle)); if (obj == NULL) { DRM_ERROR("Invalid object handle %d at index %d\n", - exec_list[i].handle, i); + exec[i].handle, i); /* prevent error path from reading uninitialized data */ - args->buffer_count = i; ret = -ENOENT; goto err; } - object_list[i] = obj; - if (obj->in_execbuffer) { - DRM_ERROR("Object %p appears more than once in object list\n", - obj); - /* prevent error path from reading uninitialized data */ - args->buffer_count = i + 1; + if (!list_empty(&obj->exec_list)) { + DRM_ERROR("Object %p [handle %d, index %d] appears more than once in object list\n", + obj, exec[i].handle, i); ret = -EINVAL; goto err; } - obj->in_execbuffer = true; - obj->pending_fenced_gpu_access = false; + + list_add_tail(&obj->exec_list, &objects); } /* Move the objects en-masse into the GTT, evicting if necessary. */ - ret = i915_gem_execbuffer_reserve(dev, file, - object_list, exec_list, - args->buffer_count); + ret = i915_gem_execbuffer_reserve(dev, file, &objects, exec); if (ret) goto err; /* The objects are in their final locations, apply the relocations. */ - ret = i915_gem_execbuffer_relocate(dev, file, - object_list, exec_list, - args->buffer_count); + ret = i915_gem_execbuffer_relocate(dev, file, &objects, exec); if (ret) { if (ret == -EFAULT) { ret = i915_gem_execbuffer_relocate_slow(dev, file, - object_list, - exec_list, + &objects, exec, args->buffer_count); BUG_ON(!mutex_is_locked(&dev->struct_mutex)); } @@ -888,7 +938,9 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data, } /* Set the pending read domains for the batch buffer to COMMAND */ - batch_obj = object_list[args->buffer_count-1]; + batch_obj = list_entry(objects.prev, + struct drm_i915_gem_object, + exec_list); if (batch_obj->base.pending_write_domain) { DRM_ERROR("Attempting to use self-modifying batch buffer\n"); ret = -EINVAL; @@ -896,115 +948,38 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data, } batch_obj->base.pending_read_domains |= I915_GEM_DOMAIN_COMMAND; - /* Sanity check the batch buffer */ - exec_offset = batch_obj->gtt_offset; - ret = i915_gem_check_execbuffer(args, exec_offset); - if (ret != 0) { - DRM_ERROR("execbuf with invalid offset/length\n"); + ret = i915_gem_execbuffer_move_to_gpu(ring, &objects); + if (ret) goto err; - } - ret = i915_gem_execbuffer_move_to_gpu(dev, file, ring, - object_list, args->buffer_count); + ret = i915_gem_execbuffer_wait_for_flips(ring, &objects); if (ret) goto err; -#if WATCH_COHERENCY - for (i = 0; i < args->buffer_count; i++) { - i915_gem_object_check_coherency(object_list[i], - exec_list[i].handle); - } -#endif - -#if WATCH_EXEC - i915_gem_dump_object(batch_obj, - args->batch_len, - __func__, - ~0); -#endif - - /* Check for any pending flips. As we only maintain a flip queue depth - * of 1, we can simply insert a WAIT for the next display flip prior - * to executing the batch and avoid stalling the CPU. - */ - flips = 0; - for (i = 0; i < args->buffer_count; i++) { - if (object_list[i]->base.write_domain) - flips |= atomic_read(&object_list[i]->pending_flip); - } - if (flips) { - int plane, flip_mask; - - for (plane = 0; flips >> plane; plane++) { - if (((flips >> plane) & 1) == 0) - continue; - - if (plane) - flip_mask = MI_WAIT_FOR_PLANE_B_FLIP; - else - flip_mask = MI_WAIT_FOR_PLANE_A_FLIP; - - ret = intel_ring_begin(ring, 2); - if (ret) - goto err; - - intel_ring_emit(ring, MI_WAIT_FOR_EVENT | flip_mask); - intel_ring_emit(ring, MI_NOOP); - intel_ring_advance(ring); - } - } - - /* Exec the batchbuffer */ - ret = ring->dispatch_execbuffer(ring, args, cliprects, exec_offset); - if (ret) { - DRM_ERROR("dispatch failed %d\n", ret); + ret = ring->dispatch_execbuffer(ring, + args, cliprects, + batch_obj->gtt_offset); + if (ret) goto err; - } - for (i = 0; i < args->buffer_count; i++) { - struct drm_i915_gem_object *obj = object_list[i]; - - obj->base.read_domains = obj->base.pending_read_domains; - obj->base.write_domain = obj->base.pending_write_domain; - obj->fenced_gpu_access = obj->pending_fenced_gpu_access; - - i915_gem_object_move_to_active(obj, ring); - if (obj->base.write_domain) { - obj->dirty = 1; - list_move_tail(&obj->gpu_write_list, - &ring->gpu_write_list); - intel_mark_busy(dev, obj); - } - - trace_i915_gem_object_change_domain(obj, - obj->base.read_domains, - obj->base.write_domain); - } - - /* - * Ensure that the commands in the batch buffer are - * finished before the interrupt fires - */ - i915_gem_execbuffer_retire_commands(dev, ring); - - if (i915_add_request(dev, file, request, ring)) - i915_gem_next_request_seqno(dev, ring); - else - request = NULL; + i915_gem_execbuffer_move_to_active(&objects, ring); + i915_gem_execbuffer_retire_commands(dev, file, ring); err: - for (i = 0; i < args->buffer_count; i++) { - object_list[i]->in_execbuffer = false; - drm_gem_object_unreference(&object_list[i]->base); + while (!list_empty(&objects)) { + struct drm_i915_gem_object *obj; + + obj = list_first_entry(&objects, + struct drm_i915_gem_object, + exec_list); + list_del_init(&obj->exec_list); + drm_gem_object_unreference(&obj->base); } mutex_unlock(&dev->struct_mutex); pre_mutex_err: - drm_free_large(object_list); kfree(cliprects); - kfree(request); - return ret; } -- cgit v1.2.3 From acb325062afc09c196f7d3888b81312e6ebcdc35 Mon Sep 17 00:00:00 2001 From: Alex Deucher Date: Tue, 23 Nov 2010 00:41:00 -0500 Subject: drm/radeon/kms: improve pflip precision on r1xx-r4xx The update pending bit has a separate enable bit. Cc: Mario Kleiner Signed-off-by: Alex Deucher Signed-off-by: Dave Airlie --- drivers/gpu/drm/radeon/r100.c | 8 +++++--- drivers/gpu/drm/radeon/radeon_reg.h | 2 ++ 2 files changed, 7 insertions(+), 3 deletions(-) diff --git a/drivers/gpu/drm/radeon/r100.c b/drivers/gpu/drm/radeon/r100.c index 2316f73db6c0..300b4a64d8fe 100644 --- a/drivers/gpu/drm/radeon/r100.c +++ b/drivers/gpu/drm/radeon/r100.c @@ -76,6 +76,8 @@ void r100_pre_page_flip(struct radeon_device *rdev, int crtc) /* make sure flip is at vb rather than hb */ tmp = RREG32(RADEON_CRTC_OFFSET_CNTL + radeon_crtc->crtc_offset); tmp &= ~RADEON_CRTC_OFFSET_FLIP_CNTL; + /* make sure pending bit is asserted */ + tmp |= RADEON_CRTC_GUI_TRIG_OFFSET_LEFT_EN; WREG32(RADEON_CRTC_OFFSET_CNTL + radeon_crtc->crtc_offset, tmp); /* set pageflip to happen as late as possible in the vblank interval. @@ -104,9 +106,9 @@ u32 r100_page_flip(struct radeon_device *rdev, int crtc_id, u64 crtc_base) /* update the scanout addresses */ WREG32(RADEON_CRTC_OFFSET + radeon_crtc->crtc_offset, tmp); - /* Note: We don't wait for update_pending to assert, as this never - * happens for some reason on R1xx - R4xx. Adds a bit of imprecision. - */ + /* Wait for update_pending to go high. */ + while (!(RREG32(RADEON_CRTC_OFFSET + radeon_crtc->crtc_offset) & RADEON_CRTC_OFFSET__GUI_TRIG_OFFSET)); + DRM_DEBUG("Update pending now high. Unlocking vupdate_lock.\n"); /* Unlock the lock, so double-buffering can take place inside vblank */ tmp &= ~RADEON_CRTC_OFFSET__OFFSET_LOCK; diff --git a/drivers/gpu/drm/radeon/radeon_reg.h b/drivers/gpu/drm/radeon/radeon_reg.h index 26c43e234350..0a310b7f71c3 100644 --- a/drivers/gpu/drm/radeon/radeon_reg.h +++ b/drivers/gpu/drm/radeon/radeon_reg.h @@ -510,6 +510,8 @@ # define RADEON_CRTC_TILE_EN (1 << 15) # define RADEON_CRTC_OFFSET_FLIP_CNTL (1 << 16) # define RADEON_CRTC_STEREO_OFFSET_EN (1 << 17) +# define RADEON_CRTC_GUI_TRIG_OFFSET_LEFT_EN (1 << 28) +# define RADEON_CRTC_GUI_TRIG_OFFSET_RIGHT_EN (1 << 29) #define R300_CRTC_TILE_X0_Y0 0x0350 #define R300_CRTC2_TILE_X0_Y0 0x0358 -- cgit v1.2.3 From 602606a472963a67b234e6b5c99293de4aa9d06b Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Sun, 28 Nov 2010 15:31:02 +0000 Subject: drm/i915/execbuffer: On error, starting unwinding from the previous object As the error occurred on the current object, it means that its state was not changed and so it should be excluded from the unwind. Reported-by: Daniel Vetter Signed-off-by: Chris Wilson --- drivers/gpu/drm/i915/i915_gem_execbuffer.c | 3 +++ 1 file changed, 3 insertions(+) diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c index d54070111f9d..66c898c8716d 100644 --- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c +++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c @@ -530,6 +530,9 @@ i915_gem_execbuffer_reserve(struct drm_device *dev, } while (1); err: + obj = list_entry(obj->exec_list.prev, + struct drm_i915_gem_object, + exec_list); while (objects != &obj->exec_list) { if (obj->gtt_space) i915_gem_object_unpin(obj); -- cgit v1.2.3 From 7d2cb39c332146b0906651a8567f8b81af4b1584 Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Sat, 27 Nov 2010 17:38:29 +0000 Subject: drm/i915: Release fenced GTT mapping on suspend ... so that upon first use after resume we will reacquire the fence reg. Reported-by: Keith Packard Signed-off-by: Chris Wilson --- drivers/gpu/drm/i915/i915_gem.c | 11 +++++++++-- 1 file changed, 9 insertions(+), 2 deletions(-) diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index a090acdf3bd5..eae52de75a4c 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c @@ -1794,8 +1794,15 @@ static void i915_gem_reset_fences(struct drm_device *dev) for (i = 0; i < 16; i++) { struct drm_i915_fence_reg *reg = &dev_priv->fence_regs[i]; - if (reg->obj) - i915_gem_clear_fence_reg(reg->obj); + struct drm_i915_gem_object *obj = reg->obj; + + if (!obj) + continue; + + if (obj->tiling_mode) + i915_gem_release_mmap(obj); + + i915_gem_clear_fence_reg(obj); } } -- cgit v1.2.3 From 70eac33e7ac370dc137cabff7a4ba3094ca25a8c Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Tue, 30 Nov 2010 14:07:47 +0000 Subject: drm/i915: Move instruction state invalidation from execbuffer to flush Signed-off-by: Chris Wilson --- drivers/gpu/drm/i915/intel_ringbuffer.c | 15 ++++----------- 1 file changed, 4 insertions(+), 11 deletions(-) diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c index b12578558268..7fc55a80be20 100644 --- a/drivers/gpu/drm/i915/intel_ringbuffer.c +++ b/drivers/gpu/drm/i915/intel_ringbuffer.c @@ -109,6 +109,10 @@ render_ring_flush(struct intel_ring_buffer *ring, if (invalidate_domains & I915_GEM_DOMAIN_INSTRUCTION) cmd |= MI_EXE_FLUSH; + if (invalidate_domains & I915_GEM_DOMAIN_COMMAND && + (IS_G4X(dev) || IS_GEN5(dev))) + cmd |= MI_INVALIDATE_ISP; + #if WATCH_EXEC DRM_INFO("%s: queue flush %08x to ring\n", __func__, cmd); #endif @@ -583,17 +587,6 @@ render_ring_dispatch_execbuffer(struct intel_ring_buffer *ring, intel_ring_advance(ring); } - if (IS_G4X(dev) || IS_GEN5(dev)) { - if (intel_ring_begin(ring, 2) == 0) { - intel_ring_emit(ring, MI_FLUSH | - MI_NO_WRITE_FLUSH | - MI_INVALIDATE_ISP ); - intel_ring_emit(ring, MI_NOOP); - intel_ring_advance(ring); - } - } - /* XXX breadcrumb */ - return 0; } -- cgit v1.2.3 From c4e7a4146798ce22c229dd21ed31f59f07c4119e Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Tue, 30 Nov 2010 14:10:25 +0000 Subject: drm/i915/ringbuffer: Handle cliprects in the caller This makes the various rings more consistent by removing the anomalous handing of the rendering ring execbuffer dispatch. Signed-off-by: Chris Wilson --- drivers/gpu/drm/i915/i915_dma.c | 22 ++++---- drivers/gpu/drm/i915/i915_drv.h | 4 +- drivers/gpu/drm/i915/i915_gem_execbuffer.c | 30 +++++++++-- drivers/gpu/drm/i915/intel_ringbuffer.c | 86 ++++++++++-------------------- drivers/gpu/drm/i915/intel_ringbuffer.h | 4 +- 5 files changed, 67 insertions(+), 79 deletions(-) diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c index 7960fd63ecb1..9a22da9b2083 100644 --- a/drivers/gpu/drm/i915/i915_dma.c +++ b/drivers/gpu/drm/i915/i915_dma.c @@ -352,16 +352,16 @@ static int i915_emit_cmds(struct drm_device * dev, int *buffer, int dwords) int i915_emit_box(struct drm_device *dev, - struct drm_clip_rect *boxes, - int i, int DR1, int DR4) + struct drm_clip_rect *box, + int DR1, int DR4) { struct drm_i915_private *dev_priv = dev->dev_private; - struct drm_clip_rect box = boxes[i]; int ret; - if (box.y2 <= box.y1 || box.x2 <= box.x1 || box.y2 <= 0 || box.x2 <= 0) { + if (box->y2 <= box->y1 || box->x2 <= box->x1 || + box->y2 <= 0 || box->x2 <= 0) { DRM_ERROR("Bad box %d,%d..%d,%d\n", - box.x1, box.y1, box.x2, box.y2); + box->x1, box->y1, box->x2, box->y2); return -EINVAL; } @@ -371,8 +371,8 @@ i915_emit_box(struct drm_device *dev, return ret; OUT_RING(GFX_OP_DRAWRECT_INFO_I965); - OUT_RING((box.x1 & 0xffff) | (box.y1 << 16)); - OUT_RING(((box.x2 - 1) & 0xffff) | ((box.y2 - 1) << 16)); + OUT_RING((box->x1 & 0xffff) | (box->y1 << 16)); + OUT_RING(((box->x2 - 1) & 0xffff) | ((box->y2 - 1) << 16)); OUT_RING(DR4); } else { ret = BEGIN_LP_RING(6); @@ -381,8 +381,8 @@ i915_emit_box(struct drm_device *dev, OUT_RING(GFX_OP_DRAWRECT_INFO); OUT_RING(DR1); - OUT_RING((box.x1 & 0xffff) | (box.y1 << 16)); - OUT_RING(((box.x2 - 1) & 0xffff) | ((box.y2 - 1) << 16)); + OUT_RING((box->x1 & 0xffff) | (box->y1 << 16)); + OUT_RING(((box->x2 - 1) & 0xffff) | ((box->y2 - 1) << 16)); OUT_RING(DR4); OUT_RING(0); } @@ -434,7 +434,7 @@ static int i915_dispatch_cmdbuffer(struct drm_device * dev, for (i = 0; i < count; i++) { if (i < nbox) { - ret = i915_emit_box(dev, cliprects, i, + ret = i915_emit_box(dev, &cliprects[i], cmd->DR1, cmd->DR4); if (ret) return ret; @@ -467,7 +467,7 @@ static int i915_dispatch_batchbuffer(struct drm_device * dev, count = nbox ? nbox : 1; for (i = 0; i < count; i++) { if (i < nbox) { - ret = i915_emit_box(dev, cliprects, i, + ret = i915_emit_box(dev, &cliprects[i], batch->DR1, batch->DR4); if (ret) return ret; diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index e7c4108c94cd..590d8f2d0958 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h @@ -966,8 +966,8 @@ extern int i915_driver_device_is_agp(struct drm_device * dev); extern long i915_compat_ioctl(struct file *filp, unsigned int cmd, unsigned long arg); extern int i915_emit_box(struct drm_device *dev, - struct drm_clip_rect *boxes, - int i, int DR1, int DR4); + struct drm_clip_rect *box, + int DR1, int DR4); extern int i915_reset(struct drm_device *dev, u8 flags); extern unsigned long i915_chipset_val(struct drm_i915_private *dev_priv); extern unsigned long i915_mch_val(struct drm_i915_private *dev_priv); diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c index 66c898c8716d..f57536a70a3a 100644 --- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c +++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c @@ -825,6 +825,7 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data, struct drm_i915_gem_object *batch_obj; struct drm_clip_rect *cliprects = NULL; struct intel_ring_buffer *ring; + u32 exec_start, exec_len; int ret, i; if (!i915_gem_check_execbuffer(args)) { @@ -871,6 +872,11 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data, } if (args->num_cliprects != 0) { + if (ring != &dev_priv->render_ring) { + DRM_ERROR("clip rectangles are only valid with the render ring\n"); + return -EINVAL; + } + cliprects = kmalloc(args->num_cliprects * sizeof(*cliprects), GFP_KERNEL); if (cliprects == NULL) { @@ -959,11 +965,25 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data, if (ret) goto err; - ret = ring->dispatch_execbuffer(ring, - args, cliprects, - batch_obj->gtt_offset); - if (ret) - goto err; + exec_start = batch_obj->gtt_offset + args->batch_start_offset; + exec_len = args->batch_len; + if (cliprects) { + for (i = 0; i < args->num_cliprects; i++) { + ret = i915_emit_box(dev, &cliprects[i], + args->DR1, args->DR4); + if (ret) + goto err; + + ret = ring->dispatch_execbuffer(ring, + exec_start, exec_len); + if (ret) + goto err; + } + } else { + ret = ring->dispatch_execbuffer(ring, exec_start, exec_len); + if (ret) + goto err; + } i915_gem_execbuffer_move_to_active(&objects, ring); i915_gem_execbuffer_retire_commands(dev, file, ring); diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c index 7fc55a80be20..21871b0766e2 100644 --- a/drivers/gpu/drm/i915/intel_ringbuffer.c +++ b/drivers/gpu/drm/i915/intel_ringbuffer.c @@ -508,25 +508,18 @@ ring_status_page_get_seqno(struct intel_ring_buffer *ring) } static int -ring_dispatch_execbuffer(struct intel_ring_buffer *ring, - struct drm_i915_gem_execbuffer2 *exec, - struct drm_clip_rect *cliprects, - uint64_t exec_offset) +ring_dispatch_execbuffer(struct intel_ring_buffer *ring, u32 offset, u32 length) { - uint32_t exec_start; int ret; - exec_start = (uint32_t) exec_offset + exec->batch_start_offset; - ret = intel_ring_begin(ring, 2); if (ret) return ret; intel_ring_emit(ring, - MI_BATCH_BUFFER_START | - (2 << 6) | + MI_BATCH_BUFFER_START | (2 << 6) | MI_BATCH_NON_SECURE_I965); - intel_ring_emit(ring, exec_start); + intel_ring_emit(ring, offset); intel_ring_advance(ring); return 0; @@ -534,58 +527,40 @@ ring_dispatch_execbuffer(struct intel_ring_buffer *ring, static int render_ring_dispatch_execbuffer(struct intel_ring_buffer *ring, - struct drm_i915_gem_execbuffer2 *exec, - struct drm_clip_rect *cliprects, - uint64_t exec_offset) + u32 offset, u32 len) { struct drm_device *dev = ring->dev; drm_i915_private_t *dev_priv = dev->dev_private; - int nbox = exec->num_cliprects; - uint32_t exec_start, exec_len; - int i, count, ret; - - exec_start = (uint32_t) exec_offset + exec->batch_start_offset; - exec_len = (uint32_t) exec->batch_len; + int ret; trace_i915_gem_request_submit(dev, dev_priv->next_seqno + 1); - count = nbox ? nbox : 1; - for (i = 0; i < count; i++) { - if (i < nbox) { - ret = i915_emit_box(dev, cliprects, i, - exec->DR1, exec->DR4); - if (ret) - return ret; - } + if (IS_I830(dev) || IS_845G(dev)) { + ret = intel_ring_begin(ring, 4); + if (ret) + return ret; - if (IS_I830(dev) || IS_845G(dev)) { - ret = intel_ring_begin(ring, 4); - if (ret) - return ret; + intel_ring_emit(ring, MI_BATCH_BUFFER); + intel_ring_emit(ring, offset | MI_BATCH_NON_SECURE); + intel_ring_emit(ring, offset + len - 8); + intel_ring_emit(ring, 0); + } else { + ret = intel_ring_begin(ring, 2); + if (ret) + return ret; - intel_ring_emit(ring, MI_BATCH_BUFFER); - intel_ring_emit(ring, exec_start | MI_BATCH_NON_SECURE); - intel_ring_emit(ring, exec_start + exec_len - 4); - intel_ring_emit(ring, 0); + if (INTEL_INFO(dev)->gen >= 4) { + intel_ring_emit(ring, + MI_BATCH_BUFFER_START | (2 << 6) | + MI_BATCH_NON_SECURE_I965); + intel_ring_emit(ring, offset); } else { - ret = intel_ring_begin(ring, 2); - if (ret) - return ret; - - if (INTEL_INFO(dev)->gen >= 4) { - intel_ring_emit(ring, - MI_BATCH_BUFFER_START | (2 << 6) - | MI_BATCH_NON_SECURE_I965); - intel_ring_emit(ring, exec_start); - } else { - intel_ring_emit(ring, MI_BATCH_BUFFER_START - | (2 << 6)); - intel_ring_emit(ring, exec_start | - MI_BATCH_NON_SECURE); - } + intel_ring_emit(ring, + MI_BATCH_BUFFER_START | (2 << 6)); + intel_ring_emit(ring, offset | MI_BATCH_NON_SECURE); } - intel_ring_advance(ring); } + intel_ring_advance(ring); return 0; } @@ -904,22 +879,17 @@ static void gen6_ring_flush(struct intel_ring_buffer *ring, static int gen6_ring_dispatch_execbuffer(struct intel_ring_buffer *ring, - struct drm_i915_gem_execbuffer2 *exec, - struct drm_clip_rect *cliprects, - uint64_t exec_offset) + u32 offset, u32 len) { - uint32_t exec_start; int ret; - exec_start = (uint32_t) exec_offset + exec->batch_start_offset; - ret = intel_ring_begin(ring, 2); if (ret) return ret; intel_ring_emit(ring, MI_BATCH_BUFFER_START | MI_BATCH_NON_SECURE_I965); /* bit0-7 is the length on GEN6+ */ - intel_ring_emit(ring, exec_start); + intel_ring_emit(ring, offset); intel_ring_advance(ring); return 0; diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.h b/drivers/gpu/drm/i915/intel_ringbuffer.h index 1747e329ee94..8e3526777926 100644 --- a/drivers/gpu/drm/i915/intel_ringbuffer.h +++ b/drivers/gpu/drm/i915/intel_ringbuffer.h @@ -57,9 +57,7 @@ struct intel_ring_buffer { u32 *seqno); u32 (*get_seqno)(struct intel_ring_buffer *ring); int (*dispatch_execbuffer)(struct intel_ring_buffer *ring, - struct drm_i915_gem_execbuffer2 *exec, - struct drm_clip_rect *cliprects, - uint64_t exec_offset); + u32 offset, u32 length); void (*cleanup)(struct intel_ring_buffer *ring); /** -- cgit v1.2.3 From 257e48f1474a1f5bfa6fcafb12f77a8479063293 Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Mon, 29 Nov 2010 16:19:24 +0000 Subject: drm/i915/lvds: Disable panel-fitter on gen4 for 1:1 scale factors Signed-off-by: Chris Wilson --- drivers/gpu/drm/i915/intel_lvds.c | 27 +++++++++++++++------------ 1 file changed, 15 insertions(+), 12 deletions(-) diff --git a/drivers/gpu/drm/i915/intel_lvds.c b/drivers/gpu/drm/i915/intel_lvds.c index 4324a326f98e..5b88e145ee79 100644 --- a/drivers/gpu/drm/i915/intel_lvds.c +++ b/drivers/gpu/drm/i915/intel_lvds.c @@ -269,14 +269,13 @@ static bool intel_lvds_mode_fixup(struct drm_encoder *encoder, u32 scaled_width = adjusted_mode->hdisplay * mode->vdisplay; u32 scaled_height = mode->hdisplay * adjusted_mode->vdisplay; - pfit_control |= PFIT_ENABLE; /* 965+ is easy, it does everything in hw */ if (scaled_width > scaled_height) - pfit_control |= PFIT_SCALING_PILLAR; + pfit_control |= PFIT_ENABLE | PFIT_SCALING_PILLAR; else if (scaled_width < scaled_height) - pfit_control |= PFIT_SCALING_LETTER; - else - pfit_control |= PFIT_SCALING_AUTO; + pfit_control |= PFIT_ENABLE | PFIT_SCALING_LETTER; + else if (adjusted_mode->hdisplay != mode->hdisplay) + pfit_control |= PFIT_ENABLE | PFIT_SCALING_AUTO; } else { u32 scaled_width = adjusted_mode->hdisplay * mode->vdisplay; u32 scaled_height = mode->hdisplay * adjusted_mode->vdisplay; @@ -323,13 +322,17 @@ static bool intel_lvds_mode_fixup(struct drm_encoder *encoder, * Full scaling, even if it changes the aspect ratio. * Fortunately this is all done for us in hw. */ - pfit_control |= PFIT_ENABLE; - if (INTEL_INFO(dev)->gen >= 4) - pfit_control |= PFIT_SCALING_AUTO; - else - pfit_control |= (VERT_AUTO_SCALE | HORIZ_AUTO_SCALE | - VERT_INTERP_BILINEAR | - HORIZ_INTERP_BILINEAR); + if (mode->vdisplay != adjusted_mode->vdisplay || + mode->hdisplay != adjusted_mode->hdisplay) { + pfit_control |= PFIT_ENABLE; + if (INTEL_INFO(dev)->gen >= 4) + pfit_control |= PFIT_SCALING_AUTO; + else + pfit_control |= (VERT_AUTO_SCALE | + VERT_INTERP_BILINEAR | + HORIZ_AUTO_SCALE | + HORIZ_INTERP_BILINEAR); + } break; default: -- cgit v1.2.3 From 87ca9c8a7ea9c8c7ce1561edaad1aa8570f1a01e Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Thu, 2 Dec 2010 09:42:56 +0000 Subject: drm/i915: Prevent stalling for a GTT read back from a read-only GPU target Signed-off-by: Chris Wilson --- drivers/gpu/drm/i915/i915_drv.h | 6 ++++++ drivers/gpu/drm/i915/i915_gem.c | 9 ++++++--- drivers/gpu/drm/i915/i915_gem_execbuffer.c | 1 + 3 files changed, 13 insertions(+), 3 deletions(-) diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index 590d8f2d0958..7b37c198cb19 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h @@ -728,6 +728,12 @@ struct drm_i915_gem_object { */ unsigned int dirty : 1; + /** + * This is set if the object has been written to since the last + * GPU flush. + */ + unsigned int pending_gpu_write : 1; + /** * Fence register bits (if any) for this object. Will be set * as needed when mapped into the GTT. diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index eae52de75a4c..c3e6d7bda6e1 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c @@ -1643,6 +1643,7 @@ i915_gem_object_move_to_inactive(struct drm_i915_gem_object *obj) obj->last_fenced_ring = NULL; obj->active = 0; + obj->pending_gpu_write = false; drm_gem_object_unreference(&obj->base); WARN_ON(i915_verify_lists(dev)); @@ -2810,9 +2811,11 @@ i915_gem_object_set_to_gtt_domain(struct drm_i915_gem_object *obj, bool write) return -EINVAL; i915_gem_object_flush_gpu_write_domain(obj); - ret = i915_gem_object_wait_rendering(obj, true); - if (ret) - return ret; + if (obj->pending_gpu_write || write) { + ret = i915_gem_object_wait_rendering(obj, true); + if (ret) + return ret; + } i915_gem_object_flush_cpu_write_domain(obj); diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c index f57536a70a3a..af01a58a643b 100644 --- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c +++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c @@ -775,6 +775,7 @@ i915_gem_execbuffer_move_to_active(struct list_head *objects, i915_gem_object_move_to_active(obj, ring); if (obj->base.write_domain) { obj->dirty = 1; + obj->pending_gpu_write = true; list_move_tail(&obj->gpu_write_list, &ring->gpu_write_list); intel_mark_busy(ring->dev, obj); -- cgit v1.2.3 From d9e86c0ee60f323e890484628f351bf50fa9a15d Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Wed, 10 Nov 2010 16:40:20 +0000 Subject: drm/i915: Pipelined fencing [infrastructure] With this change, every batchbuffer can use all available fences (save pinned and scanout, of course) without ever stalling the gpu! In theory. Currently the actual pipelined update of the register is disabled due to some stability issues. However, just the deferred update is a significant win. Based on a series of patches by Daniel Vetter. The premise is that before every access to a buffer through the GTT we have to declare whether we need a register or not. If the access is by the GPU, a pipelined update to the register is made via the ringbuffer, and we track the last seqno of the batches that access it. If by the CPU we wait for the last GPU access and update the register (either to clear or to set it for the current buffer). One advantage of being able to pipeline changes is that we can defer the actual updating of the fence register until we first need to access the object through the GTT, i.e. we can eliminate the stall on set_tiling. This is important as the userspace bo cache does not track the tiling status of active buffers which generate frequent stalls on gen3 when enabling tiling for an already bound buffer. Signed-off-by: Chris Wilson Reviewed-by: Daniel Vetter --- drivers/gpu/drm/i915/i915_drv.h | 10 +- drivers/gpu/drm/i915/i915_gem.c | 360 ++++++++++++++++++----------- drivers/gpu/drm/i915/i915_gem_execbuffer.c | 20 +- drivers/gpu/drm/i915/i915_gem_tiling.c | 23 +- drivers/gpu/drm/i915/intel_display.c | 17 +- drivers/gpu/drm/i915/intel_overlay.c | 10 + 6 files changed, 274 insertions(+), 166 deletions(-) diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index 7b37c198cb19..af9ff40b135b 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h @@ -126,6 +126,7 @@ struct drm_i915_master_private { struct drm_i915_fence_reg { struct list_head lru_list; struct drm_i915_gem_object *obj; + uint32_t setup_seqno; }; struct sdvo_device_mapping { @@ -752,6 +753,7 @@ struct drm_i915_gem_object { * Current tiling mode for the object. */ unsigned int tiling_mode : 2; + unsigned int tiling_changed : 1; /** How many users have pinned this object in GTT space. The following * users can each hold at most one reference: pwrite/pread, pin_ioctl @@ -1121,10 +1123,10 @@ i915_gem_next_request_seqno(struct drm_device *dev, return ring->outstanding_lazy_request = dev_priv->next_seqno; } -int __must_check i915_gem_object_get_fence_reg(struct drm_i915_gem_object *obj, - bool interruptible); -int __must_check i915_gem_object_put_fence_reg(struct drm_i915_gem_object *obj, - bool interruptible); +int __must_check i915_gem_object_get_fence(struct drm_i915_gem_object *obj, + struct intel_ring_buffer *pipelined, + bool interruptible); +int __must_check i915_gem_object_put_fence(struct drm_i915_gem_object *obj); void i915_gem_retire_requests(struct drm_device *dev); void i915_gem_reset(struct drm_device *dev); diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index c3e6d7bda6e1..23d2417a3585 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c @@ -47,7 +47,8 @@ static void i915_gem_object_set_to_full_cpu_read_domain(struct drm_i915_gem_obje static int i915_gem_object_bind_to_gtt(struct drm_i915_gem_object *obj, unsigned alignment, bool map_and_fenceable); -static void i915_gem_clear_fence_reg(struct drm_i915_gem_object *obj); +static void i915_gem_clear_fence_reg(struct drm_device *dev, + struct drm_i915_fence_reg *reg); static int i915_gem_phys_pwrite(struct drm_device *dev, struct drm_i915_gem_object *obj, struct drm_i915_gem_pwrite *args, @@ -684,7 +685,11 @@ i915_gem_gtt_pwrite_slow(struct drm_device *dev, goto out_unpin_pages; } - ret = i915_gem_object_set_to_gtt_domain(obj, 1); + ret = i915_gem_object_set_to_gtt_domain(obj, true); + if (ret) + goto out_unpin_pages; + + ret = i915_gem_object_put_fence(obj); if (ret) goto out_unpin_pages; @@ -966,14 +971,17 @@ i915_gem_pwrite_ioctl(struct drm_device *dev, void *data, */ if (obj->phys_obj) ret = i915_gem_phys_pwrite(dev, obj, args, file); - else if (obj->tiling_mode == I915_TILING_NONE && - obj->gtt_space && + else if (obj->gtt_space && obj->base.write_domain != I915_GEM_DOMAIN_CPU) { ret = i915_gem_object_pin(obj, 0, true); if (ret) goto out; - ret = i915_gem_object_set_to_gtt_domain(obj, 1); + ret = i915_gem_object_set_to_gtt_domain(obj, true); + if (ret) + goto out_unpin; + + ret = i915_gem_object_put_fence(obj); if (ret) goto out_unpin; @@ -1205,12 +1213,12 @@ int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf) if (ret) goto unlock; - /* Need a new fence register? */ - if (obj->tiling_mode != I915_TILING_NONE) { - ret = i915_gem_object_get_fence_reg(obj, true); - if (ret) - goto unlock; - } + if (obj->tiling_mode == I915_TILING_NONE) + ret = i915_gem_object_put_fence(obj); + else + ret = i915_gem_object_get_fence(obj, NULL, true); + if (ret) + goto unlock; if (i915_gem_object_is_inactive(obj)) list_move_tail(&obj->mm_list, &dev_priv->mm.inactive_list); @@ -1608,7 +1616,6 @@ i915_gem_object_move_off_active(struct drm_i915_gem_object *obj) { list_del_init(&obj->ring_list); obj->last_rendering_seqno = 0; - obj->last_fenced_seqno = 0; } static void @@ -1640,7 +1647,6 @@ i915_gem_object_move_to_inactive(struct drm_i915_gem_object *obj) i915_gem_object_move_off_active(obj); obj->fenced_gpu_access = false; - obj->last_fenced_ring = NULL; obj->active = 0; obj->pending_gpu_write = false; @@ -1803,7 +1809,11 @@ static void i915_gem_reset_fences(struct drm_device *dev) if (obj->tiling_mode) i915_gem_release_mmap(obj); - i915_gem_clear_fence_reg(obj); + reg->obj->fence_reg = I915_FENCE_REG_NONE; + reg->obj->fenced_gpu_access = false; + reg->obj->last_fenced_seqno = 0; + reg->obj->last_fenced_ring = NULL; + i915_gem_clear_fence_reg(dev, reg); } } @@ -2114,8 +2124,9 @@ i915_gem_object_unbind(struct drm_i915_gem_object *obj) } /* release the fence reg _after_ flushing */ - if (obj->fence_reg != I915_FENCE_REG_NONE) - i915_gem_clear_fence_reg(obj); + ret = i915_gem_object_put_fence(obj); + if (ret == -ERESTARTSYS) + return ret; i915_gem_gtt_unbind_object(obj); i915_gem_object_put_pages_gtt(obj); @@ -2357,59 +2368,118 @@ static int i830_write_fence_reg(struct drm_i915_gem_object *obj, return 0; } -static int i915_find_fence_reg(struct drm_device *dev, - bool interruptible) +static bool ring_passed_seqno(struct intel_ring_buffer *ring, u32 seqno) +{ + return i915_seqno_passed(ring->get_seqno(ring), seqno); +} + +static int +i915_gem_object_flush_fence(struct drm_i915_gem_object *obj, + struct intel_ring_buffer *pipelined, + bool interruptible) +{ + int ret; + + if (obj->fenced_gpu_access) { + if (obj->base.write_domain & I915_GEM_GPU_DOMAINS) + i915_gem_flush_ring(obj->base.dev, + obj->last_fenced_ring, + 0, obj->base.write_domain); + + obj->fenced_gpu_access = false; + } + + if (obj->last_fenced_seqno && pipelined != obj->last_fenced_ring) { + if (!ring_passed_seqno(obj->last_fenced_ring, + obj->last_fenced_seqno)) { + ret = i915_do_wait_request(obj->base.dev, + obj->last_fenced_seqno, + interruptible, + obj->last_fenced_ring); + if (ret) + return ret; + } + + obj->last_fenced_seqno = 0; + obj->last_fenced_ring = NULL; + } + + return 0; +} + +int +i915_gem_object_put_fence(struct drm_i915_gem_object *obj) +{ + int ret; + + if (obj->tiling_mode) + i915_gem_release_mmap(obj); + + ret = i915_gem_object_flush_fence(obj, NULL, true); + if (ret) + return ret; + + if (obj->fence_reg != I915_FENCE_REG_NONE) { + struct drm_i915_private *dev_priv = obj->base.dev->dev_private; + i915_gem_clear_fence_reg(obj->base.dev, + &dev_priv->fence_regs[obj->fence_reg]); + + obj->fence_reg = I915_FENCE_REG_NONE; + } + + return 0; +} + +static struct drm_i915_fence_reg * +i915_find_fence_reg(struct drm_device *dev, + struct intel_ring_buffer *pipelined) { struct drm_i915_private *dev_priv = dev->dev_private; - struct drm_i915_fence_reg *reg; - struct drm_i915_gem_object *obj = NULL; - int i, avail, ret; + struct drm_i915_fence_reg *reg, *first, *avail; + int i; /* First try to find a free reg */ - avail = 0; + avail = NULL; for (i = dev_priv->fence_reg_start; i < dev_priv->num_fence_regs; i++) { reg = &dev_priv->fence_regs[i]; if (!reg->obj) - return i; + return reg; if (!reg->obj->pin_count) - avail++; + avail = reg; } - if (avail == 0) - return -ENOSPC; + if (avail == NULL) + return NULL; /* None available, try to steal one or wait for a user to finish */ - avail = I915_FENCE_REG_NONE; - list_for_each_entry(reg, &dev_priv->mm.fence_list, - lru_list) { - obj = reg->obj; - if (obj->pin_count) + avail = first = NULL; + list_for_each_entry(reg, &dev_priv->mm.fence_list, lru_list) { + if (reg->obj->pin_count) continue; - /* found one! */ - avail = obj->fence_reg; - break; - } + if (first == NULL) + first = reg; - BUG_ON(avail == I915_FENCE_REG_NONE); + if (!pipelined || + !reg->obj->last_fenced_ring || + reg->obj->last_fenced_ring == pipelined) { + avail = reg; + break; + } + } - /* We only have a reference on obj from the active list. put_fence_reg - * might drop that one, causing a use-after-free in it. So hold a - * private reference to obj like the other callers of put_fence_reg - * (set_tiling ioctl) do. */ - drm_gem_object_reference(&obj->base); - ret = i915_gem_object_put_fence_reg(obj, interruptible); - drm_gem_object_unreference(&obj->base); - if (ret != 0) - return ret; + if (avail == NULL) + avail = first; return avail; } /** - * i915_gem_object_get_fence_reg - set up a fence reg for an object + * i915_gem_object_get_fence - set up a fence reg for an object * @obj: object to map through a fence reg + * @pipelined: ring on which to queue the change, or NULL for CPU access + * @interruptible: must we wait uninterruptibly for the register to retire? * * When mapping objects through the GTT, userspace wants to be able to write * to them without having to worry about swizzling if the object is tiled. @@ -2421,52 +2491,119 @@ static int i915_find_fence_reg(struct drm_device *dev, * and tiling format. */ int -i915_gem_object_get_fence_reg(struct drm_i915_gem_object *obj, - bool interruptible) +i915_gem_object_get_fence(struct drm_i915_gem_object *obj, + struct intel_ring_buffer *pipelined, + bool interruptible) { struct drm_device *dev = obj->base.dev; struct drm_i915_private *dev_priv = dev->dev_private; - struct drm_i915_fence_reg *reg = NULL; - struct intel_ring_buffer *pipelined = NULL; + struct drm_i915_fence_reg *reg; int ret; - /* Just update our place in the LRU if our fence is getting used. */ + /* Just update our place in the LRU if our fence is getting reused. */ if (obj->fence_reg != I915_FENCE_REG_NONE) { reg = &dev_priv->fence_regs[obj->fence_reg]; list_move_tail(®->lru_list, &dev_priv->mm.fence_list); + + if (!obj->fenced_gpu_access && !obj->last_fenced_seqno) + pipelined = NULL; + + if (!pipelined) { + if (reg->setup_seqno) { + if (!ring_passed_seqno(obj->last_fenced_ring, + reg->setup_seqno)) { + ret = i915_do_wait_request(obj->base.dev, + reg->setup_seqno, + interruptible, + obj->last_fenced_ring); + if (ret) + return ret; + } + + reg->setup_seqno = 0; + } + } else if (obj->last_fenced_ring && + obj->last_fenced_ring != pipelined) { + ret = i915_gem_object_flush_fence(obj, + pipelined, + interruptible); + if (ret) + return ret; + } else if (obj->tiling_changed) { + if (obj->fenced_gpu_access) { + if (obj->base.write_domain & I915_GEM_GPU_DOMAINS) + i915_gem_flush_ring(obj->base.dev, obj->ring, + 0, obj->base.write_domain); + + obj->fenced_gpu_access = false; + } + } + + if (!obj->fenced_gpu_access && !obj->last_fenced_seqno) + pipelined = NULL; + BUG_ON(!pipelined && reg->setup_seqno); + + if (obj->tiling_changed) { + if (pipelined) { + reg->setup_seqno = + i915_gem_next_request_seqno(dev, pipelined); + obj->last_fenced_seqno = reg->setup_seqno; + obj->last_fenced_ring = pipelined; + } + goto update; + } + return 0; } - switch (obj->tiling_mode) { - case I915_TILING_NONE: - WARN(1, "allocating a fence for non-tiled object?\n"); - break; - case I915_TILING_X: - if (!obj->stride) - return -EINVAL; - WARN((obj->stride & (512 - 1)), - "object 0x%08x is X tiled but has non-512B pitch\n", - obj->gtt_offset); - break; - case I915_TILING_Y: - if (!obj->stride) - return -EINVAL; - WARN((obj->stride & (128 - 1)), - "object 0x%08x is Y tiled but has non-128B pitch\n", - obj->gtt_offset); - break; - } + reg = i915_find_fence_reg(dev, pipelined); + if (reg == NULL) + return -ENOSPC; - ret = i915_find_fence_reg(dev, interruptible); - if (ret < 0) + ret = i915_gem_object_flush_fence(obj, pipelined, interruptible); + if (ret) return ret; - obj->fence_reg = ret; - reg = &dev_priv->fence_regs[obj->fence_reg]; - list_add_tail(®->lru_list, &dev_priv->mm.fence_list); + if (reg->obj) { + struct drm_i915_gem_object *old = reg->obj; + + drm_gem_object_reference(&old->base); + + if (old->tiling_mode) + i915_gem_release_mmap(old); + + /* XXX The pipelined change over appears to be incoherent. */ + ret = i915_gem_object_flush_fence(old, + NULL, //pipelined, + interruptible); + if (ret) { + drm_gem_object_unreference(&old->base); + return ret; + } + + if (old->last_fenced_seqno == 0 && obj->last_fenced_seqno == 0) + pipelined = NULL; + + old->fence_reg = I915_FENCE_REG_NONE; + old->last_fenced_ring = pipelined; + old->last_fenced_seqno = + pipelined ? i915_gem_next_request_seqno(dev, pipelined) : 0; + + drm_gem_object_unreference(&old->base); + } else if (obj->last_fenced_seqno == 0) + pipelined = NULL; reg->obj = obj; + list_move_tail(®->lru_list, &dev_priv->mm.fence_list); + obj->fence_reg = reg - dev_priv->fence_regs; + obj->last_fenced_ring = pipelined; + reg->setup_seqno = + pipelined ? i915_gem_next_request_seqno(dev, pipelined) : 0; + obj->last_fenced_seqno = reg->setup_seqno; + +update: + obj->tiling_changed = false; switch (INTEL_INFO(dev)->gen) { case 6: ret = sandybridge_write_fence_reg(obj, pipelined); @@ -2497,87 +2634,34 @@ i915_gem_object_get_fence_reg(struct drm_i915_gem_object *obj, * data structures in dev_priv and obj. */ static void -i915_gem_clear_fence_reg(struct drm_i915_gem_object *obj) +i915_gem_clear_fence_reg(struct drm_device *dev, + struct drm_i915_fence_reg *reg) { - struct drm_device *dev = obj->base.dev; drm_i915_private_t *dev_priv = dev->dev_private; - struct drm_i915_fence_reg *reg = &dev_priv->fence_regs[obj->fence_reg]; - uint32_t fence_reg; + uint32_t fence_reg = reg - dev_priv->fence_regs; switch (INTEL_INFO(dev)->gen) { case 6: - I915_WRITE64(FENCE_REG_SANDYBRIDGE_0 + - (obj->fence_reg * 8), 0); + I915_WRITE64(FENCE_REG_SANDYBRIDGE_0 + fence_reg*8, 0); break; case 5: case 4: - I915_WRITE64(FENCE_REG_965_0 + (obj->fence_reg * 8), 0); + I915_WRITE64(FENCE_REG_965_0 + fence_reg*8, 0); break; case 3: - if (obj->fence_reg >= 8) - fence_reg = FENCE_REG_945_8 + (obj->fence_reg - 8) * 4; + if (fence_reg >= 8) + fence_reg = FENCE_REG_945_8 + (fence_reg - 8) * 4; else case 2: - fence_reg = FENCE_REG_830_0 + obj->fence_reg * 4; + fence_reg = FENCE_REG_830_0 + fence_reg * 4; I915_WRITE(fence_reg, 0); break; } - reg->obj = NULL; - obj->fence_reg = I915_FENCE_REG_NONE; list_del_init(®->lru_list); -} - -/** - * i915_gem_object_put_fence_reg - waits on outstanding fenced access - * to the buffer to finish, and then resets the fence register. - * @obj: tiled object holding a fence register. - * @bool: whether the wait upon the fence is interruptible - * - * Zeroes out the fence register itself and clears out the associated - * data structures in dev_priv and obj. - */ -int -i915_gem_object_put_fence_reg(struct drm_i915_gem_object *obj, - bool interruptible) -{ - struct drm_device *dev = obj->base.dev; - int ret; - - if (obj->fence_reg == I915_FENCE_REG_NONE) - return 0; - - /* If we've changed tiling, GTT-mappings of the object - * need to re-fault to ensure that the correct fence register - * setup is in place. - */ - i915_gem_release_mmap(obj); - - /* On the i915, GPU access to tiled buffers is via a fence, - * therefore we must wait for any outstanding access to complete - * before clearing the fence. - */ - if (obj->fenced_gpu_access) { - i915_gem_object_flush_gpu_write_domain(obj); - obj->fenced_gpu_access = false; - } - - if (obj->last_fenced_seqno) { - ret = i915_do_wait_request(dev, - obj->last_fenced_seqno, - interruptible, - obj->last_fenced_ring); - if (ret) - return ret; - - obj->last_fenced_seqno = false; - } - - i915_gem_object_flush_gtt_write_domain(obj); - i915_gem_clear_fence_reg(obj); - - return 0; + reg->obj = NULL; + reg->setup_seqno = 0; } /** diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c index af01a58a643b..9bdc495e17bb 100644 --- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c +++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c @@ -424,7 +424,7 @@ i915_gem_execbuffer_relocate(struct drm_device *dev, } static int -i915_gem_execbuffer_reserve(struct drm_device *dev, +i915_gem_execbuffer_reserve(struct intel_ring_buffer *ring, struct drm_file *file, struct list_head *objects, struct drm_i915_gem_exec_object2 *exec) @@ -499,10 +499,15 @@ i915_gem_execbuffer_reserve(struct drm_device *dev, } if (need_fence) { - ret = i915_gem_object_get_fence_reg(obj, true); + ret = i915_gem_object_get_fence(obj, ring, 1); + if (ret) + break; + } else if (entry->flags & EXEC_OBJECT_NEEDS_FENCE && + obj->tiling_mode == I915_TILING_NONE) { + /* XXX pipelined! */ + ret = i915_gem_object_put_fence(obj); if (ret) break; - } obj->pending_fenced_gpu_access = need_fence; @@ -522,7 +527,7 @@ i915_gem_execbuffer_reserve(struct drm_device *dev, /* First attempt, just clear anything that is purgeable. * Second attempt, clear the entire GTT. */ - ret = i915_gem_evict_everything(dev, retry == 0); + ret = i915_gem_evict_everything(ring->dev, retry == 0); if (ret) return ret; @@ -548,6 +553,7 @@ err: static int i915_gem_execbuffer_relocate_slow(struct drm_device *dev, struct drm_file *file, + struct intel_ring_buffer *ring, struct list_head *objects, struct drm_i915_gem_exec_object2 *exec, int count) @@ -590,7 +596,7 @@ i915_gem_execbuffer_relocate_slow(struct drm_device *dev, goto err; } - ret = i915_gem_execbuffer_reserve(dev, file, objects, exec); + ret = i915_gem_execbuffer_reserve(ring, file, objects, exec); if (ret) goto err; @@ -930,7 +936,7 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data, } /* Move the objects en-masse into the GTT, evicting if necessary. */ - ret = i915_gem_execbuffer_reserve(dev, file, &objects, exec); + ret = i915_gem_execbuffer_reserve(ring, file, &objects, exec); if (ret) goto err; @@ -938,7 +944,7 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data, ret = i915_gem_execbuffer_relocate(dev, file, &objects, exec); if (ret) { if (ret == -EFAULT) { - ret = i915_gem_execbuffer_relocate_slow(dev, file, + ret = i915_gem_execbuffer_relocate_slow(dev, file, ring, &objects, exec, args->buffer_count); BUG_ON(!mutex_is_locked(&dev->struct_mutex)); diff --git a/drivers/gpu/drm/i915/i915_gem_tiling.c b/drivers/gpu/drm/i915/i915_gem_tiling.c index 1c5fdb30f272..22a32b9932c5 100644 --- a/drivers/gpu/drm/i915/i915_gem_tiling.c +++ b/drivers/gpu/drm/i915/i915_gem_tiling.c @@ -244,9 +244,6 @@ i915_gem_object_fence_ok(struct drm_i915_gem_object *obj, int tiling_mode) if (INTEL_INFO(obj->base.dev)->gen >= 4) return true; - if (!obj->gtt_space) - return true; - if (INTEL_INFO(obj->base.dev)->gen == 3) { if (obj->gtt_offset & ~I915_FENCE_START_MASK) return false; @@ -345,27 +342,21 @@ i915_gem_set_tiling(struct drm_device *dev, void *data, * tiling mode. Otherwise we can just leave it alone, but * need to ensure that any fence register is cleared. */ - if (!i915_gem_object_fence_ok(obj, args->tiling_mode)) - ret = i915_gem_object_unbind(obj); - else if (obj->fence_reg != I915_FENCE_REG_NONE) - ret = i915_gem_object_put_fence_reg(obj, true); - else - i915_gem_release_mmap(obj); + i915_gem_release_mmap(obj); - if (ret != 0) { - args->tiling_mode = obj->tiling_mode; - args->stride = obj->stride; - goto err; - } + obj->map_and_fenceable = + obj->gtt_space == NULL || + (obj->gtt_offset + obj->base.size <= dev_priv->mm.gtt_mappable_end && + i915_gem_object_fence_ok(obj, args->tiling_mode)); + obj->tiling_changed = true; obj->tiling_mode = args->tiling_mode; obj->stride = args->stride; } -err: drm_gem_object_unreference(&obj->base); mutex_unlock(&dev->struct_mutex); - return ret; + return 0; } /** diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c index c2c94a26f92e..e141dd2e46e5 100644 --- a/drivers/gpu/drm/i915/intel_display.c +++ b/drivers/gpu/drm/i915/intel_display.c @@ -1474,7 +1474,7 @@ intel_pin_and_fence_fb_obj(struct drm_device *dev, * a fence as the cost is not that onerous. */ if (obj->tiling_mode != I915_TILING_NONE) { - ret = i915_gem_object_get_fence_reg(obj, false); + ret = i915_gem_object_get_fence(obj, pipelined, false); if (ret) goto err_unpin; } @@ -4370,6 +4370,12 @@ static int intel_crtc_cursor_set(struct drm_crtc *crtc, /* we only need to pin inside GTT if cursor is non-phy */ mutex_lock(&dev->struct_mutex); if (!dev_priv->info->cursor_needs_physical) { + if (obj->tiling_mode) { + DRM_ERROR("cursor cannot be tiled\n"); + ret = -EINVAL; + goto fail_locked; + } + ret = i915_gem_object_pin(obj, PAGE_SIZE, true); if (ret) { DRM_ERROR("failed to pin cursor bo\n"); @@ -4382,6 +4388,12 @@ static int intel_crtc_cursor_set(struct drm_crtc *crtc, goto fail_unpin; } + ret = i915_gem_object_put_fence(obj); + if (ret) { + DRM_ERROR("failed to move cursor bo into the GTT\n"); + goto fail_unpin; + } + addr = obj->gtt_offset; } else { int align = IS_I830(dev) ? 16 * 1024 : 256; @@ -4966,6 +4978,7 @@ static void intel_unpin_work_fn(struct work_struct *__work) i915_gem_object_unpin(work->old_fb_obj); drm_gem_object_unreference(&work->pending_flip_obj->base); drm_gem_object_unreference(&work->old_fb_obj->base); + mutex_unlock(&work->dev->struct_mutex); kfree(work); } @@ -5009,10 +5022,12 @@ static void do_intel_finish_page_flip(struct drm_device *dev, spin_unlock_irqrestore(&dev->event_lock, flags); obj = work->old_fb_obj; + atomic_clear_mask(1 << intel_crtc->plane, &obj->pending_flip.counter); if (atomic_read(&obj->pending_flip) == 0) wake_up(&dev_priv->pending_flip_queue); + schedule_work(&work->work); trace_i915_flip_complete(intel_crtc->plane, work->pending_flip_obj); diff --git a/drivers/gpu/drm/i915/intel_overlay.c b/drivers/gpu/drm/i915/intel_overlay.c index af715cc03ee0..d0c1add393a3 100644 --- a/drivers/gpu/drm/i915/intel_overlay.c +++ b/drivers/gpu/drm/i915/intel_overlay.c @@ -787,6 +787,10 @@ static int intel_overlay_do_put_image(struct intel_overlay *overlay, if (ret != 0) goto out_unpin; + ret = i915_gem_object_put_fence(new_bo); + if (ret) + goto out_unpin; + if (!overlay->active) { regs = intel_overlay_map_regs(overlay); if (!regs) { @@ -1161,6 +1165,12 @@ int intel_overlay_put_image(struct drm_device *dev, void *data, mutex_lock(&dev->mode_config.mutex); mutex_lock(&dev->struct_mutex); + if (new_bo->tiling_mode) { + DRM_ERROR("buffer used for overlay image can not be tiled\n"); + ret = -EINVAL; + goto out_unlock; + } + ret = intel_overlay_recover_from_interrupt(overlay, true); if (ret != 0) goto out_unlock; -- cgit v1.2.3 From c6748e09eed072be077fe583976516b76daf42ec Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Fri, 12 Nov 2010 16:02:21 +0000 Subject: drm/i915: Remove inactive LRU tracking from set_domain_ioctl As the userspace mappings are torn down on every GPU write, we prefer to track when the buffer is activated (via a fresh i915_gem_fault). This makes the LRU conceptually simpler. With coherent mappings, the remaining use-case for set_domain_ioctl is GPU synchronisation. Signed-off-by: Chris Wilson --- drivers/gpu/drm/i915/i915_gem.c | 17 ----------------- 1 file changed, 17 deletions(-) diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index 23d2417a3585..9e036010e3fc 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c @@ -1018,7 +1018,6 @@ int i915_gem_set_domain_ioctl(struct drm_device *dev, void *data, struct drm_file *file) { - struct drm_i915_private *dev_priv = dev->dev_private; struct drm_i915_gem_set_domain *args = data; struct drm_i915_gem_object *obj; uint32_t read_domains = args->read_domains; @@ -1051,21 +1050,9 @@ i915_gem_set_domain_ioctl(struct drm_device *dev, void *data, goto unlock; } - intel_mark_busy(dev, obj); - if (read_domains & I915_GEM_DOMAIN_GTT) { ret = i915_gem_object_set_to_gtt_domain(obj, write_domain != 0); - /* Update the LRU on the fence for the CPU access that's - * about to occur. - */ - if (obj->fence_reg != I915_FENCE_REG_NONE) { - struct drm_i915_fence_reg *reg = - &dev_priv->fence_regs[obj->fence_reg]; - list_move_tail(®->lru_list, - &dev_priv->mm.fence_list); - } - /* Silently promote "you're not bound, there was nothing to do" * to success, since the client was just asking us to * make sure everything was done. @@ -1076,10 +1063,6 @@ i915_gem_set_domain_ioctl(struct drm_device *dev, void *data, ret = i915_gem_object_set_to_cpu_domain(obj, write_domain != 0); } - /* Maintain LRU order of "inactive" objects */ - if (ret == 0 && i915_gem_object_is_inactive(obj)) - list_move_tail(&obj->mm_list, &dev_priv->mm.inactive_list); - drm_gem_object_unreference(&obj->base); unlock: mutex_unlock(&dev->struct_mutex); -- cgit v1.2.3 From 60de2ba51eaba9eefcc355cb20c8582b1481e755 Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Fri, 12 Nov 2010 15:53:11 +0000 Subject: drm/i915: Kill the get_fence tracepoint As the tracepoint is now decoupled from when the actual register is assigned and was never complemented by detailing when the object lost its fence, it has outlived its limited usefulness. Profiling the actual stalls is a far more profitable venture anyway. Signed-off-by: Chris Wilson --- drivers/gpu/drm/i915/i915_gem.c | 3 --- drivers/gpu/drm/i915/i915_trace.h | 22 ---------------------- 2 files changed, 25 deletions(-) diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index 9e036010e3fc..d99212fe54ed 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c @@ -2603,9 +2603,6 @@ update: break; } - trace_i915_gem_object_get_fence(obj, - obj->fence_reg, - obj->tiling_mode); return ret; } diff --git a/drivers/gpu/drm/i915/i915_trace.h b/drivers/gpu/drm/i915/i915_trace.h index 1df7262ae077..7f0fc3ed61aa 100644 --- a/drivers/gpu/drm/i915/i915_trace.h +++ b/drivers/gpu/drm/i915/i915_trace.h @@ -80,28 +80,6 @@ TRACE_EVENT(i915_gem_object_change_domain, __entry->read_domains, __entry->write_domain) ); -TRACE_EVENT(i915_gem_object_get_fence, - - TP_PROTO(struct drm_i915_gem_object *obj, int fence, int tiling_mode), - - TP_ARGS(obj, fence, tiling_mode), - - TP_STRUCT__entry( - __field(struct drm_i915_gem_object *, obj) - __field(int, fence) - __field(int, tiling_mode) - ), - - TP_fast_assign( - __entry->obj = obj; - __entry->fence = fence; - __entry->tiling_mode = tiling_mode; - ), - - TP_printk("obj=%p, fence=%d, tiling=%d", - __entry->obj, __entry->fence, __entry->tiling_mode) -); - DECLARE_EVENT_CLASS(i915_gem_object, TP_PROTO(struct drm_i915_gem_object *obj), -- cgit v1.2.3 From 7c74cbd01b2698583fb74ebdfcd7ef4c768e6346 Mon Sep 17 00:00:00 2001 From: Ben Skeggs Date: Thu, 23 Sep 2010 11:03:01 +1000 Subject: drm/nouveau: tidy fifo swmthd handler a little Signed-off-by: Ben Skeggs --- drivers/gpu/drm/nouveau/nouveau_irq.c | 56 +++++++++++++++++++---------------- 1 file changed, 31 insertions(+), 25 deletions(-) diff --git a/drivers/gpu/drm/nouveau/nouveau_irq.c b/drivers/gpu/drm/nouveau/nouveau_irq.c index 7bfd9e6c9d67..a4fa9e14d66d 100644 --- a/drivers/gpu/drm/nouveau/nouveau_irq.c +++ b/drivers/gpu/drm/nouveau/nouveau_irq.c @@ -108,36 +108,45 @@ nouveau_call_method(struct nouveau_channel *chan, int class, int mthd, int data) } static bool -nouveau_fifo_swmthd(struct nouveau_channel *chan, uint32_t addr, uint32_t data) +nouveau_fifo_swmthd(struct drm_device *dev, u32 chid, u32 addr, u32 data) { - struct drm_device *dev = chan->dev; + struct drm_nouveau_private *dev_priv = dev->dev_private; + struct nouveau_channel *chan = NULL; + struct nouveau_gpuobj *obj; const int subc = (addr >> 13) & 0x7; const int mthd = addr & 0x1ffc; + bool handled = false; + u32 engine; - if (mthd == 0x0000) { - struct nouveau_gpuobj *gpuobj; - - gpuobj = nouveau_ramht_find(chan, data); - if (!gpuobj) - return false; + if (likely(chid >= 0 && chid < dev_priv->engine.fifo.channels)) + chan = dev_priv->fifos[chid]; + if (unlikely(!chan)) + return false; - if (gpuobj->engine != NVOBJ_ENGINE_SW) - return false; + switch (mthd) { + case 0x0000: /* bind object to subchannel */ + obj = nouveau_ramht_find(chan, data); + if (unlikely(!obj || obj->engine != NVOBJ_ENGINE_SW)) + break; - chan->sw_subchannel[subc] = gpuobj->class; - nv_wr32(dev, NV04_PFIFO_CACHE1_ENGINE, nv_rd32(dev, - NV04_PFIFO_CACHE1_ENGINE) & ~(0xf << subc * 4)); - return true; - } + chan->sw_subchannel[subc] = obj->class; + engine = 0x0000000f << (subc * 4); - /* hw object */ - if (nv_rd32(dev, NV04_PFIFO_CACHE1_ENGINE) & (1 << (subc*4))) - return false; + nv_mask(dev, NV04_PFIFO_CACHE1_ENGINE, engine, 0x00000000); + handled = true; + break; + default: + engine = nv_rd32(dev, NV04_PFIFO_CACHE1_ENGINE); + if (unlikely(((engine >> (subc * 4)) & 0xf) != 0)) + break; - if (nouveau_call_method(chan, chan->sw_subchannel[subc], mthd, data)) - return false; + if (!nouveau_call_method(chan, chan->sw_subchannel[subc], + mthd, data)) + handled = true; + break; + } - return true; + return handled; } static void @@ -150,14 +159,11 @@ nouveau_fifo_irq_handler(struct drm_device *dev) reassign = nv_rd32(dev, NV03_PFIFO_CACHES) & 1; while ((status = nv_rd32(dev, NV03_PFIFO_INTR_0)) && (cnt++ < 100)) { - struct nouveau_channel *chan = NULL; uint32_t chid, get; nv_wr32(dev, NV03_PFIFO_CACHES, 0); chid = engine->fifo.channel_id(dev); - if (chid >= 0 && chid < engine->fifo.channels) - chan = dev_priv->fifos[chid]; get = nv_rd32(dev, NV03_PFIFO_CACHE1_GET); if (status & NV_PFIFO_INTR_CACHE_ERROR) { @@ -184,7 +190,7 @@ nouveau_fifo_irq_handler(struct drm_device *dev) NV40_PFIFO_CACHE1_DATA(ptr)); } - if (!chan || !nouveau_fifo_swmthd(chan, mthd, data)) { + if (!nouveau_fifo_swmthd(dev, chid, mthd, data)) { NV_INFO(dev, "PFIFO_CACHE_ERROR - Ch %d/%d " "Mthd 0x%04x Data 0x%08x\n", chid, (mthd >> 13) & 7, mthd & 0x1ffc, -- cgit v1.2.3 From ceed5f30bf0f515b52246230e5faacf89983fd8f Mon Sep 17 00:00:00 2001 From: Ben Skeggs Date: Tue, 5 Oct 2010 16:41:29 +1000 Subject: drm/nouveau: disallow fbcon accel if running in interrupt context A future commit will add locking to the DRM's channel, and there's numerous problems that come up if we allow printk from an interrupt context to be accelerated. It seems saner to just disallow it completely. As a nice side-effect, all the "to accel or not to accel" logic gets moved out of the chipset-specific code. Signed-off-by: Ben Skeggs --- drivers/gpu/drm/nouveau/nouveau_fbcon.c | 137 +++++++++++++++++++++++--------- drivers/gpu/drm/nouveau/nouveau_fbcon.h | 12 +-- drivers/gpu/drm/nouveau/nv04_fbcon.c | 68 ++++++---------- drivers/gpu/drm/nouveau/nv50_fbcon.c | 69 ++++++---------- 4 files changed, 153 insertions(+), 133 deletions(-) diff --git a/drivers/gpu/drm/nouveau/nouveau_fbcon.c b/drivers/gpu/drm/nouveau/nouveau_fbcon.c index 02a4d1fd4845..22e83adcc930 100644 --- a/drivers/gpu/drm/nouveau/nouveau_fbcon.c +++ b/drivers/gpu/drm/nouveau/nouveau_fbcon.c @@ -49,6 +49,90 @@ #include "nouveau_fbcon.h" #include "nouveau_dma.h" +static void +nouveau_fbcon_fillrect(struct fb_info *info, const struct fb_fillrect *rect) +{ + struct nouveau_fbdev *nfbdev = info->par; + struct drm_device *dev = nfbdev->dev; + struct drm_nouveau_private *dev_priv = dev->dev_private; + int ret; + + if (info->state != FBINFO_STATE_RUNNING) + return; + + ret = -ENODEV; + if (!in_interrupt() && !(info->flags & FBINFO_HWACCEL_DISABLED)) { + if (dev_priv->card_type < NV_50) + ret = nv04_fbcon_fillrect(info, rect); + else + if (dev_priv->card_type < NV_C0) + ret = nv50_fbcon_fillrect(info, rect); + } + + if (ret == 0) + return; + + if (ret != -ENODEV) + nouveau_fbcon_gpu_lockup(info); + cfb_fillrect(info, rect); +} + +static void +nouveau_fbcon_copyarea(struct fb_info *info, const struct fb_copyarea *image) +{ + struct nouveau_fbdev *nfbdev = info->par; + struct drm_device *dev = nfbdev->dev; + struct drm_nouveau_private *dev_priv = dev->dev_private; + int ret; + + if (info->state != FBINFO_STATE_RUNNING) + return; + + ret = -ENODEV; + if (!in_interrupt() && !(info->flags & FBINFO_HWACCEL_DISABLED)) { + if (dev_priv->card_type < NV_50) + ret = nv04_fbcon_copyarea(info, image); + else + if (dev_priv->card_type < NV_C0) + ret = nv50_fbcon_copyarea(info, image); + } + + if (ret == 0) + return; + + if (ret != -ENODEV) + nouveau_fbcon_gpu_lockup(info); + cfb_copyarea(info, image); +} + +static void +nouveau_fbcon_imageblit(struct fb_info *info, const struct fb_image *image) +{ + struct nouveau_fbdev *nfbdev = info->par; + struct drm_device *dev = nfbdev->dev; + struct drm_nouveau_private *dev_priv = dev->dev_private; + int ret; + + if (info->state != FBINFO_STATE_RUNNING) + return; + + ret = -ENODEV; + if (!in_interrupt() && !(info->flags & FBINFO_HWACCEL_DISABLED)) { + if (dev_priv->card_type < NV_50) + ret = nv04_fbcon_imageblit(info, image); + else + if (dev_priv->card_type < NV_C0) + ret = nv50_fbcon_imageblit(info, image); + } + + if (ret == 0) + return; + + if (ret != -ENODEV) + nouveau_fbcon_gpu_lockup(info); + cfb_imageblit(info, image); +} + static int nouveau_fbcon_sync(struct fb_info *info) { @@ -97,24 +181,9 @@ static struct fb_ops nouveau_fbcon_ops = { .owner = THIS_MODULE, .fb_check_var = drm_fb_helper_check_var, .fb_set_par = drm_fb_helper_set_par, - .fb_fillrect = cfb_fillrect, - .fb_copyarea = cfb_copyarea, - .fb_imageblit = cfb_imageblit, - .fb_sync = nouveau_fbcon_sync, - .fb_pan_display = drm_fb_helper_pan_display, - .fb_blank = drm_fb_helper_blank, - .fb_setcmap = drm_fb_helper_setcmap, - .fb_debug_enter = drm_fb_helper_debug_enter, - .fb_debug_leave = drm_fb_helper_debug_leave, -}; - -static struct fb_ops nv04_fbcon_ops = { - .owner = THIS_MODULE, - .fb_check_var = drm_fb_helper_check_var, - .fb_set_par = drm_fb_helper_set_par, - .fb_fillrect = nv04_fbcon_fillrect, - .fb_copyarea = nv04_fbcon_copyarea, - .fb_imageblit = nv04_fbcon_imageblit, + .fb_fillrect = nouveau_fbcon_fillrect, + .fb_copyarea = nouveau_fbcon_copyarea, + .fb_imageblit = nouveau_fbcon_imageblit, .fb_sync = nouveau_fbcon_sync, .fb_pan_display = drm_fb_helper_pan_display, .fb_blank = drm_fb_helper_blank, @@ -123,14 +192,13 @@ static struct fb_ops nv04_fbcon_ops = { .fb_debug_leave = drm_fb_helper_debug_leave, }; -static struct fb_ops nv50_fbcon_ops = { +static struct fb_ops nouveau_fbcon_sw_ops = { .owner = THIS_MODULE, .fb_check_var = drm_fb_helper_check_var, .fb_set_par = drm_fb_helper_set_par, - .fb_fillrect = nv50_fbcon_fillrect, - .fb_copyarea = nv50_fbcon_copyarea, - .fb_imageblit = nv50_fbcon_imageblit, - .fb_sync = nouveau_fbcon_sync, + .fb_fillrect = cfb_fillrect, + .fb_copyarea = cfb_copyarea, + .fb_imageblit = cfb_imageblit, .fb_pan_display = drm_fb_helper_pan_display, .fb_blank = drm_fb_helper_blank, .fb_setcmap = drm_fb_helper_setcmap, @@ -257,7 +325,7 @@ nouveau_fbcon_create(struct nouveau_fbdev *nfbdev, FBINFO_HWACCEL_FILLRECT | FBINFO_HWACCEL_IMAGEBLIT; info->flags |= FBINFO_CAN_FORCE_OUTPUT; - info->fbops = &nouveau_fbcon_ops; + info->fbops = &nouveau_fbcon_sw_ops; info->fix.smem_start = dev->mode_config.fb_base + nvbo->bo.offset - dev_priv->vm_vram_base; info->fix.smem_len = size; @@ -286,18 +354,15 @@ nouveau_fbcon_create(struct nouveau_fbdev *nfbdev, info->pixmap.scan_align = 1; if (dev_priv->channel && !nouveau_nofbaccel) { - switch (dev_priv->card_type) { - case NV_C0: - break; - case NV_50: - nv50_fbcon_accel_init(info); - info->fbops = &nv50_fbcon_ops; - break; - default: - nv04_fbcon_accel_init(info); - info->fbops = &nv04_fbcon_ops; - break; - }; + ret = -ENODEV; + if (dev_priv->card_type < NV_50) + ret = nv04_fbcon_accel_init(info); + else + if (dev_priv->card_type < NV_C0) + ret = nv50_fbcon_accel_init(info); + + if (ret == 0) + info->fbops = &nouveau_fbcon_ops; } nouveau_fbcon_zfill(dev, nfbdev); diff --git a/drivers/gpu/drm/nouveau/nouveau_fbcon.h b/drivers/gpu/drm/nouveau/nouveau_fbcon.h index e7e12684c37e..6b933f2c3a5b 100644 --- a/drivers/gpu/drm/nouveau/nouveau_fbcon.h +++ b/drivers/gpu/drm/nouveau/nouveau_fbcon.h @@ -40,13 +40,13 @@ struct nouveau_fbdev { void nouveau_fbcon_restore(void); -void nv04_fbcon_copyarea(struct fb_info *info, const struct fb_copyarea *region); -void nv04_fbcon_fillrect(struct fb_info *info, const struct fb_fillrect *rect); -void nv04_fbcon_imageblit(struct fb_info *info, const struct fb_image *image); +int nv04_fbcon_copyarea(struct fb_info *info, const struct fb_copyarea *region); +int nv04_fbcon_fillrect(struct fb_info *info, const struct fb_fillrect *rect); +int nv04_fbcon_imageblit(struct fb_info *info, const struct fb_image *image); int nv04_fbcon_accel_init(struct fb_info *info); -void nv50_fbcon_fillrect(struct fb_info *info, const struct fb_fillrect *rect); -void nv50_fbcon_copyarea(struct fb_info *info, const struct fb_copyarea *region); -void nv50_fbcon_imageblit(struct fb_info *info, const struct fb_image *image); +int nv50_fbcon_fillrect(struct fb_info *info, const struct fb_fillrect *rect); +int nv50_fbcon_copyarea(struct fb_info *info, const struct fb_copyarea *region); +int nv50_fbcon_imageblit(struct fb_info *info, const struct fb_image *image); int nv50_fbcon_accel_init(struct fb_info *info); void nouveau_fbcon_gpu_lockup(struct fb_info *info); diff --git a/drivers/gpu/drm/nouveau/nv04_fbcon.c b/drivers/gpu/drm/nouveau/nv04_fbcon.c index 33e4c9388bc1..a32804e7d202 100644 --- a/drivers/gpu/drm/nouveau/nv04_fbcon.c +++ b/drivers/gpu/drm/nouveau/nv04_fbcon.c @@ -28,52 +28,39 @@ #include "nouveau_ramht.h" #include "nouveau_fbcon.h" -void +int nv04_fbcon_copyarea(struct fb_info *info, const struct fb_copyarea *region) { struct nouveau_fbdev *nfbdev = info->par; struct drm_device *dev = nfbdev->dev; struct drm_nouveau_private *dev_priv = dev->dev_private; struct nouveau_channel *chan = dev_priv->channel; + int ret; - if (info->state != FBINFO_STATE_RUNNING) - return; - - if (!(info->flags & FBINFO_HWACCEL_DISABLED) && RING_SPACE(chan, 4)) { - nouveau_fbcon_gpu_lockup(info); - } - - if (info->flags & FBINFO_HWACCEL_DISABLED) { - cfb_copyarea(info, region); - return; - } + ret = RING_SPACE(chan, 4); + if (ret) + return ret; BEGIN_RING(chan, NvSubImageBlit, 0x0300, 3); OUT_RING(chan, (region->sy << 16) | region->sx); OUT_RING(chan, (region->dy << 16) | region->dx); OUT_RING(chan, (region->height << 16) | region->width); FIRE_RING(chan); + return 0; } -void +int nv04_fbcon_fillrect(struct fb_info *info, const struct fb_fillrect *rect) { struct nouveau_fbdev *nfbdev = info->par; struct drm_device *dev = nfbdev->dev; struct drm_nouveau_private *dev_priv = dev->dev_private; struct nouveau_channel *chan = dev_priv->channel; + int ret; - if (info->state != FBINFO_STATE_RUNNING) - return; - - if (!(info->flags & FBINFO_HWACCEL_DISABLED) && RING_SPACE(chan, 7)) { - nouveau_fbcon_gpu_lockup(info); - } - - if (info->flags & FBINFO_HWACCEL_DISABLED) { - cfb_fillrect(info, rect); - return; - } + ret = RING_SPACE(chan, 7); + if (ret) + return ret; BEGIN_RING(chan, NvSubGdiRect, 0x02fc, 1); OUT_RING(chan, (rect->rop != ROP_COPY) ? 1 : 3); @@ -87,9 +74,10 @@ nv04_fbcon_fillrect(struct fb_info *info, const struct fb_fillrect *rect) OUT_RING(chan, (rect->dx << 16) | rect->dy); OUT_RING(chan, (rect->width << 16) | rect->height); FIRE_RING(chan); + return 0; } -void +int nv04_fbcon_imageblit(struct fb_info *info, const struct fb_image *image) { struct nouveau_fbdev *nfbdev = info->par; @@ -101,23 +89,14 @@ nv04_fbcon_imageblit(struct fb_info *info, const struct fb_image *image) uint32_t dsize; uint32_t width; uint32_t *data = (uint32_t *)image->data; + int ret; - if (info->state != FBINFO_STATE_RUNNING) - return; - - if (image->depth != 1) { - cfb_imageblit(info, image); - return; - } - - if (!(info->flags & FBINFO_HWACCEL_DISABLED) && RING_SPACE(chan, 8)) { - nouveau_fbcon_gpu_lockup(info); - } + if (image->depth != 1) + return -ENODEV; - if (info->flags & FBINFO_HWACCEL_DISABLED) { - cfb_imageblit(info, image); - return; - } + ret = RING_SPACE(chan, 8); + if (ret) + return ret; width = ALIGN(image->width, 8); dsize = ALIGN(width * image->height, 32) >> 5; @@ -144,11 +123,9 @@ nv04_fbcon_imageblit(struct fb_info *info, const struct fb_image *image) while (dsize) { int iter_len = dsize > 128 ? 128 : dsize; - if (RING_SPACE(chan, iter_len + 1)) { - nouveau_fbcon_gpu_lockup(info); - cfb_imageblit(info, image); - return; - } + ret = RING_SPACE(chan, iter_len + 1); + if (ret) + return ret; BEGIN_RING(chan, NvSubGdiRect, 0x0c00, iter_len); OUT_RINGp(chan, data, iter_len); @@ -157,6 +134,7 @@ nv04_fbcon_imageblit(struct fb_info *info, const struct fb_image *image) } FIRE_RING(chan); + return 0; } static int diff --git a/drivers/gpu/drm/nouveau/nv50_fbcon.c b/drivers/gpu/drm/nouveau/nv50_fbcon.c index 6dcf048eddbc..6edf9dca35ca 100644 --- a/drivers/gpu/drm/nouveau/nv50_fbcon.c +++ b/drivers/gpu/drm/nouveau/nv50_fbcon.c @@ -4,26 +4,18 @@ #include "nouveau_ramht.h" #include "nouveau_fbcon.h" -void +int nv50_fbcon_fillrect(struct fb_info *info, const struct fb_fillrect *rect) { struct nouveau_fbdev *nfbdev = info->par; struct drm_device *dev = nfbdev->dev; struct drm_nouveau_private *dev_priv = dev->dev_private; struct nouveau_channel *chan = dev_priv->channel; + int ret; - if (info->state != FBINFO_STATE_RUNNING) - return; - - if (!(info->flags & FBINFO_HWACCEL_DISABLED) && - RING_SPACE(chan, rect->rop == ROP_COPY ? 7 : 11)) { - nouveau_fbcon_gpu_lockup(info); - } - - if (info->flags & FBINFO_HWACCEL_DISABLED) { - cfb_fillrect(info, rect); - return; - } + ret = RING_SPACE(chan, rect->rop == ROP_COPY ? 7 : 11); + if (ret) + return ret; if (rect->rop != ROP_COPY) { BEGIN_RING(chan, NvSub2D, 0x02ac, 1); @@ -45,27 +37,21 @@ nv50_fbcon_fillrect(struct fb_info *info, const struct fb_fillrect *rect) OUT_RING(chan, 3); } FIRE_RING(chan); + return 0; } -void +int nv50_fbcon_copyarea(struct fb_info *info, const struct fb_copyarea *region) { struct nouveau_fbdev *nfbdev = info->par; struct drm_device *dev = nfbdev->dev; struct drm_nouveau_private *dev_priv = dev->dev_private; struct nouveau_channel *chan = dev_priv->channel; + int ret; - if (info->state != FBINFO_STATE_RUNNING) - return; - - if (!(info->flags & FBINFO_HWACCEL_DISABLED) && RING_SPACE(chan, 12)) { - nouveau_fbcon_gpu_lockup(info); - } - - if (info->flags & FBINFO_HWACCEL_DISABLED) { - cfb_copyarea(info, region); - return; - } + ret = RING_SPACE(chan, 12); + if (ret) + return ret; BEGIN_RING(chan, NvSub2D, 0x0110, 1); OUT_RING(chan, 0); @@ -80,9 +66,10 @@ nv50_fbcon_copyarea(struct fb_info *info, const struct fb_copyarea *region) OUT_RING(chan, 0); OUT_RING(chan, region->sy); FIRE_RING(chan); + return 0; } -void +int nv50_fbcon_imageblit(struct fb_info *info, const struct fb_image *image) { struct nouveau_fbdev *nfbdev = info->par; @@ -92,23 +79,14 @@ nv50_fbcon_imageblit(struct fb_info *info, const struct fb_image *image) uint32_t width, dwords, *data = (uint32_t *)image->data; uint32_t mask = ~(~0 >> (32 - info->var.bits_per_pixel)); uint32_t *palette = info->pseudo_palette; + int ret; - if (info->state != FBINFO_STATE_RUNNING) - return; - - if (image->depth != 1) { - cfb_imageblit(info, image); - return; - } - - if (!(info->flags & FBINFO_HWACCEL_DISABLED) && RING_SPACE(chan, 11)) { - nouveau_fbcon_gpu_lockup(info); - } + if (image->depth != 1) + return -ENODEV; - if (info->flags & FBINFO_HWACCEL_DISABLED) { - cfb_imageblit(info, image); - return; - } + ret = RING_SPACE(chan, 11); + if (ret) + return ret; width = ALIGN(image->width, 32); dwords = (width * image->height) >> 5; @@ -134,11 +112,9 @@ nv50_fbcon_imageblit(struct fb_info *info, const struct fb_image *image) while (dwords) { int push = dwords > 2047 ? 2047 : dwords; - if (RING_SPACE(chan, push + 1)) { - nouveau_fbcon_gpu_lockup(info); - cfb_imageblit(info, image); - return; - } + ret = RING_SPACE(chan, push + 1); + if (ret) + return ret; dwords -= push; @@ -148,6 +124,7 @@ nv50_fbcon_imageblit(struct fb_info *info, const struct fb_image *image) } FIRE_RING(chan); + return 0; } int -- cgit v1.2.3 From 6a6b73f254123851f7f73ab5e57344a569d6a0ab Mon Sep 17 00:00:00 2001 From: Ben Skeggs Date: Tue, 5 Oct 2010 16:53:48 +1000 Subject: drm/nouveau: add per-channel mutex, use to lock access to drm's channel This fixes a race condition between fbcon acceleration and TTM buffer moves. To reproduce: - start X - switch to vt and "while (true); do dmesg; done" - switch to another vt and "sleep 2 && cat /path/to/debugfs/dri/0/evict_vram" - switch back to vt running dmesg We don't make use of this on any other channel yet, they're currently protected by drm_global_mutex. This will change in the near future. Signed-off-by: Ben Skeggs --- drivers/gpu/drm/nouveau/nouveau_bo.c | 15 +++++++++++---- drivers/gpu/drm/nouveau/nouveau_channel.c | 1 + drivers/gpu/drm/nouveau/nouveau_drv.h | 2 ++ drivers/gpu/drm/nouveau/nouveau_fbcon.c | 17 ++++++++++++++--- 4 files changed, 28 insertions(+), 7 deletions(-) diff --git a/drivers/gpu/drm/nouveau/nouveau_bo.c b/drivers/gpu/drm/nouveau/nouveau_bo.c index c41e1c200ef5..d8817b4bb189 100644 --- a/drivers/gpu/drm/nouveau/nouveau_bo.c +++ b/drivers/gpu/drm/nouveau/nouveau_bo.c @@ -683,17 +683,24 @@ nouveau_bo_move_m2mf(struct ttm_buffer_object *bo, int evict, bool intr, int ret; chan = nvbo->channel; - if (!chan || nvbo->no_vm) + if (!chan || nvbo->no_vm) { chan = dev_priv->channel; + mutex_lock(&chan->mutex); + } if (dev_priv->card_type < NV_50) ret = nv04_bo_move_m2mf(chan, bo, &bo->mem, new_mem); else ret = nv50_bo_move_m2mf(chan, bo, &bo->mem, new_mem); - if (ret) - return ret; + if (ret == 0) { + ret = nouveau_bo_move_accel_cleanup(chan, nvbo, evict, + no_wait_reserve, + no_wait_gpu, new_mem); + } - return nouveau_bo_move_accel_cleanup(chan, nvbo, evict, no_wait_reserve, no_wait_gpu, new_mem); + if (chan == dev_priv->channel) + mutex_unlock(&chan->mutex); + return ret; } static int diff --git a/drivers/gpu/drm/nouveau/nouveau_channel.c b/drivers/gpu/drm/nouveau/nouveau_channel.c index 373950e34814..8636478c477a 100644 --- a/drivers/gpu/drm/nouveau/nouveau_channel.c +++ b/drivers/gpu/drm/nouveau/nouveau_channel.c @@ -145,6 +145,7 @@ nouveau_channel_alloc(struct drm_device *dev, struct nouveau_channel **chan_ret, chan->file_priv = file_priv; chan->vram_handle = vram_handle; chan->gart_handle = tt_handle; + mutex_init(&chan->mutex); NV_INFO(dev, "Allocating FIFO number %d\n", channel); diff --git a/drivers/gpu/drm/nouveau/nouveau_drv.h b/drivers/gpu/drm/nouveau/nouveau_drv.h index 1c7db64c03bf..04bc56cf4f1a 100644 --- a/drivers/gpu/drm/nouveau/nouveau_drv.h +++ b/drivers/gpu/drm/nouveau/nouveau_drv.h @@ -166,6 +166,8 @@ struct nouveau_channel { struct drm_device *dev; int id; + struct mutex mutex; + /* owner of this fifo */ struct drm_file *file_priv; /* mapping of the fifo itself */ diff --git a/drivers/gpu/drm/nouveau/nouveau_fbcon.c b/drivers/gpu/drm/nouveau/nouveau_fbcon.c index 22e83adcc930..bc30dbe11d00 100644 --- a/drivers/gpu/drm/nouveau/nouveau_fbcon.c +++ b/drivers/gpu/drm/nouveau/nouveau_fbcon.c @@ -62,11 +62,13 @@ nouveau_fbcon_fillrect(struct fb_info *info, const struct fb_fillrect *rect) ret = -ENODEV; if (!in_interrupt() && !(info->flags & FBINFO_HWACCEL_DISABLED)) { + mutex_lock(&dev_priv->channel->mutex); if (dev_priv->card_type < NV_50) ret = nv04_fbcon_fillrect(info, rect); else if (dev_priv->card_type < NV_C0) ret = nv50_fbcon_fillrect(info, rect); + mutex_unlock(&dev_priv->channel->mutex); } if (ret == 0) @@ -90,11 +92,13 @@ nouveau_fbcon_copyarea(struct fb_info *info, const struct fb_copyarea *image) ret = -ENODEV; if (!in_interrupt() && !(info->flags & FBINFO_HWACCEL_DISABLED)) { + mutex_lock(&dev_priv->channel->mutex); if (dev_priv->card_type < NV_50) ret = nv04_fbcon_copyarea(info, image); else if (dev_priv->card_type < NV_C0) ret = nv50_fbcon_copyarea(info, image); + mutex_unlock(&dev_priv->channel->mutex); } if (ret == 0) @@ -118,11 +122,13 @@ nouveau_fbcon_imageblit(struct fb_info *info, const struct fb_image *image) ret = -ENODEV; if (!in_interrupt() && !(info->flags & FBINFO_HWACCEL_DISABLED)) { + mutex_lock(&dev_priv->channel->mutex); if (dev_priv->card_type < NV_50) ret = nv04_fbcon_imageblit(info, image); else if (dev_priv->card_type < NV_C0) ret = nv50_fbcon_imageblit(info, image); + mutex_unlock(&dev_priv->channel->mutex); } if (ret == 0) @@ -142,12 +148,15 @@ nouveau_fbcon_sync(struct fb_info *info) struct nouveau_channel *chan = dev_priv->channel; int ret, i; - if (!chan || !chan->accel_done || + if (!chan || !chan->accel_done || in_interrupt() || info->state != FBINFO_STATE_RUNNING || info->flags & FBINFO_HWACCEL_DISABLED) return 0; - if (RING_SPACE(chan, 4)) { + mutex_lock(&chan->mutex); + ret = RING_SPACE(chan, 4); + if (ret) { + mutex_unlock(&chan->mutex); nouveau_fbcon_gpu_lockup(info); return 0; } @@ -158,6 +167,7 @@ nouveau_fbcon_sync(struct fb_info *info) OUT_RING(chan, 0); nouveau_bo_wr32(chan->notifier_bo, chan->m2mf_ntfy + 3, 0xffffffff); FIRE_RING(chan); + mutex_unlock(&chan->mutex); ret = -EBUSY; for (i = 0; i < 100000; i++) { @@ -353,6 +363,8 @@ nouveau_fbcon_create(struct nouveau_fbdev *nfbdev, info->pixmap.flags = FB_PIXMAP_SYSTEM; info->pixmap.scan_align = 1; + mutex_unlock(&dev->struct_mutex); + if (dev_priv->channel && !nouveau_nofbaccel) { ret = -ENODEV; if (dev_priv->card_type < NV_50) @@ -373,7 +385,6 @@ nouveau_fbcon_create(struct nouveau_fbdev *nfbdev, nouveau_fb->base.height, nvbo->bo.offset, nvbo); - mutex_unlock(&dev->struct_mutex); vga_switcheroo_client_fb_set(dev->pdev, info); return 0; -- cgit v1.2.3 From cff5c1332486ced8ff4180e957e04983cb72a39e Mon Sep 17 00:00:00 2001 From: Ben Skeggs Date: Wed, 6 Oct 2010 16:16:59 +1000 Subject: drm/nouveau: add more fine-grained locking to channel list + structures Signed-off-by: Ben Skeggs --- drivers/gpu/drm/nouveau/nouveau_channel.c | 186 +++++++++++++++++------------ drivers/gpu/drm/nouveau/nouveau_drv.c | 7 +- drivers/gpu/drm/nouveau/nouveau_drv.h | 23 ++-- drivers/gpu/drm/nouveau/nouveau_fence.c | 10 ++ drivers/gpu/drm/nouveau/nouveau_gem.c | 28 +++-- drivers/gpu/drm/nouveau/nouveau_irq.c | 42 ++++--- drivers/gpu/drm/nouveau/nouveau_notifier.c | 10 +- drivers/gpu/drm/nouveau/nouveau_object.c | 34 ++++-- drivers/gpu/drm/nouveau/nouveau_state.c | 8 +- drivers/gpu/drm/nouveau/nv04_fifo.c | 4 +- drivers/gpu/drm/nouveau/nv04_graph.c | 4 +- drivers/gpu/drm/nouveau/nv10_fifo.c | 2 +- drivers/gpu/drm/nouveau/nv10_graph.c | 4 +- drivers/gpu/drm/nouveau/nv40_fifo.c | 2 +- drivers/gpu/drm/nouveau/nv40_graph.c | 2 +- drivers/gpu/drm/nouveau/nv50_fb.c | 6 +- drivers/gpu/drm/nouveau/nv50_fifo.c | 9 +- drivers/gpu/drm/nouveau/nv50_graph.c | 2 +- drivers/gpu/drm/nouveau/nv50_instmem.c | 14 +-- 19 files changed, 236 insertions(+), 161 deletions(-) diff --git a/drivers/gpu/drm/nouveau/nouveau_channel.c b/drivers/gpu/drm/nouveau/nouveau_channel.c index 8636478c477a..47bf2d3d658c 100644 --- a/drivers/gpu/drm/nouveau/nouveau_channel.c +++ b/drivers/gpu/drm/nouveau/nouveau_channel.c @@ -107,54 +107,54 @@ nouveau_channel_user_pushbuf_alloc(struct drm_device *dev) int nouveau_channel_alloc(struct drm_device *dev, struct nouveau_channel **chan_ret, struct drm_file *file_priv, - uint32_t vram_handle, uint32_t tt_handle) + uint32_t vram_handle, uint32_t gart_handle) { struct drm_nouveau_private *dev_priv = dev->dev_private; struct nouveau_pgraph_engine *pgraph = &dev_priv->engine.graph; struct nouveau_fifo_engine *pfifo = &dev_priv->engine.fifo; struct nouveau_channel *chan; - int channel, user; - int ret; - - /* - * Alright, here is the full story - * Nvidia cards have multiple hw fifo contexts (praise them for that, - * no complicated crash-prone context switches) - * We allocate a new context for each app and let it write to it - * directly (woo, full userspace command submission !) - * When there are no more contexts, you lost - */ - for (channel = 0; channel < pfifo->channels; channel++) { - if (dev_priv->fifos[channel] == NULL) - break; - } - - /* no more fifos. you lost. */ - if (channel == pfifo->channels) - return -EINVAL; + unsigned long flags; + int user, ret; - dev_priv->fifos[channel] = kzalloc(sizeof(struct nouveau_channel), - GFP_KERNEL); - if (!dev_priv->fifos[channel]) + /* allocate and lock channel structure */ + chan = kzalloc(sizeof(*chan), GFP_KERNEL); + if (!chan) return -ENOMEM; - chan = dev_priv->fifos[channel]; - INIT_LIST_HEAD(&chan->nvsw.vbl_wait); - INIT_LIST_HEAD(&chan->fence.pending); chan->dev = dev; - chan->id = channel; chan->file_priv = file_priv; chan->vram_handle = vram_handle; - chan->gart_handle = tt_handle; + chan->gart_handle = gart_handle; + + atomic_set(&chan->refcount, 1); mutex_init(&chan->mutex); + mutex_lock(&chan->mutex); - NV_INFO(dev, "Allocating FIFO number %d\n", channel); + /* allocate hw channel id */ + spin_lock_irqsave(&dev_priv->channels.lock, flags); + for (chan->id = 0; chan->id < pfifo->channels; chan->id++) { + if (!dev_priv->channels.ptr[chan->id]) { + dev_priv->channels.ptr[chan->id] = chan; + break; + } + } + spin_unlock_irqrestore(&dev_priv->channels.lock, flags); + + if (chan->id == pfifo->channels) { + mutex_unlock(&chan->mutex); + kfree(chan); + return -ENODEV; + } + + NV_DEBUG(dev, "initialising channel %d\n", chan->id); + INIT_LIST_HEAD(&chan->nvsw.vbl_wait); + INIT_LIST_HEAD(&chan->fence.pending); /* Allocate DMA push buffer */ chan->pushbuf_bo = nouveau_channel_user_pushbuf_alloc(dev); if (!chan->pushbuf_bo) { ret = -ENOMEM; NV_ERROR(dev, "pushbuf %d\n", ret); - nouveau_channel_free(chan); + nouveau_channel_put(&chan); return ret; } @@ -162,18 +162,18 @@ nouveau_channel_alloc(struct drm_device *dev, struct nouveau_channel **chan_ret, /* Locate channel's user control regs */ if (dev_priv->card_type < NV_40) - user = NV03_USER(channel); + user = NV03_USER(chan->id); else if (dev_priv->card_type < NV_50) - user = NV40_USER(channel); + user = NV40_USER(chan->id); else - user = NV50_USER(channel); + user = NV50_USER(chan->id); chan->user = ioremap(pci_resource_start(dev->pdev, 0) + user, PAGE_SIZE); if (!chan->user) { NV_ERROR(dev, "ioremap of regs failed.\n"); - nouveau_channel_free(chan); + nouveau_channel_put(&chan); return -ENOMEM; } chan->user_put = 0x40; @@ -183,15 +183,15 @@ nouveau_channel_alloc(struct drm_device *dev, struct nouveau_channel **chan_ret, ret = nouveau_notifier_init_channel(chan); if (ret) { NV_ERROR(dev, "ntfy %d\n", ret); - nouveau_channel_free(chan); + nouveau_channel_put(&chan); return ret; } /* Setup channel's default objects */ - ret = nouveau_gpuobj_channel_init(chan, vram_handle, tt_handle); + ret = nouveau_gpuobj_channel_init(chan, vram_handle, gart_handle); if (ret) { NV_ERROR(dev, "gpuobj %d\n", ret); - nouveau_channel_free(chan); + nouveau_channel_put(&chan); return ret; } @@ -199,7 +199,7 @@ nouveau_channel_alloc(struct drm_device *dev, struct nouveau_channel **chan_ret, ret = nouveau_channel_pushbuf_ctxdma_init(chan); if (ret) { NV_ERROR(dev, "pbctxdma %d\n", ret); - nouveau_channel_free(chan); + nouveau_channel_put(&chan); return ret; } @@ -209,14 +209,14 @@ nouveau_channel_alloc(struct drm_device *dev, struct nouveau_channel **chan_ret, /* Create a graphics context for new channel */ ret = pgraph->create_context(chan); if (ret) { - nouveau_channel_free(chan); + nouveau_channel_put(&chan); return ret; } /* Construct inital RAMFC for new channel */ ret = pfifo->create_context(chan); if (ret) { - nouveau_channel_free(chan); + nouveau_channel_put(&chan); return ret; } @@ -226,33 +226,70 @@ nouveau_channel_alloc(struct drm_device *dev, struct nouveau_channel **chan_ret, if (!ret) ret = nouveau_fence_channel_init(chan); if (ret) { - nouveau_channel_free(chan); + nouveau_channel_put(&chan); return ret; } nouveau_debugfs_channel_init(chan); - NV_INFO(dev, "%s: initialised FIFO %d\n", __func__, channel); + NV_DEBUG(dev, "channel %d initialised\n", chan->id); *chan_ret = chan; return 0; } -/* stops a fifo */ +struct nouveau_channel * +nouveau_channel_get(struct drm_device *dev, struct drm_file *file_priv, int id) +{ + struct drm_nouveau_private *dev_priv = dev->dev_private; + struct nouveau_channel *chan = ERR_PTR(-ENODEV); + unsigned long flags; + + spin_lock_irqsave(&dev_priv->channels.lock, flags); + chan = dev_priv->channels.ptr[id]; + + if (unlikely(!chan || atomic_read(&chan->refcount) == 0)) { + spin_unlock_irqrestore(&dev_priv->channels.lock, flags); + return ERR_PTR(-EINVAL); + } + + if (unlikely(file_priv && chan->file_priv != file_priv)) { + spin_unlock_irqrestore(&dev_priv->channels.lock, flags); + return ERR_PTR(-EINVAL); + } + + atomic_inc(&chan->refcount); + spin_unlock_irqrestore(&dev_priv->channels.lock, flags); + + mutex_lock(&chan->mutex); + return chan; +} + void -nouveau_channel_free(struct nouveau_channel *chan) +nouveau_channel_put(struct nouveau_channel **pchan) { + struct nouveau_channel *chan = *pchan; struct drm_device *dev = chan->dev; struct drm_nouveau_private *dev_priv = dev->dev_private; - struct nouveau_pgraph_engine *pgraph = &dev_priv->engine.graph; struct nouveau_fifo_engine *pfifo = &dev_priv->engine.fifo; + struct nouveau_pgraph_engine *pgraph = &dev_priv->engine.graph; unsigned long flags; int ret; - NV_INFO(dev, "%s: freeing fifo %d\n", __func__, chan->id); + /* unlock the channel */ + mutex_unlock(&chan->mutex); + + /* decrement the refcount, and we're done if there's still refs */ + if (likely(!atomic_dec_and_test(&chan->refcount))) { + *pchan = NULL; + return; + } + /* noone wants the channel anymore */ + NV_DEBUG(dev, "freeing channel %d\n", chan->id); nouveau_debugfs_channel_fini(chan); + *pchan = NULL; - /* Give outstanding push buffers a chance to complete */ + /* give it chance to idle */ nouveau_fence_update(chan); if (chan->fence.sequence != chan->fence.sequence_ack) { struct nouveau_fence *fence = NULL; @@ -267,13 +304,13 @@ nouveau_channel_free(struct nouveau_channel *chan) NV_ERROR(dev, "Failed to idle channel %d.\n", chan->id); } - /* Ensure all outstanding fences are signaled. They should be if the + /* ensure all outstanding fences are signaled. they should be if the * above attempts at idling were OK, but if we failed this'll tell TTM * we're done with the buffers. */ nouveau_fence_channel_fini(chan); - /* This will prevent pfifo from switching channels. */ + /* boot it off the hardware */ pfifo->reassign(dev, false); /* We want to give pgraph a chance to idle and get rid of all potential @@ -302,7 +339,14 @@ nouveau_channel_free(struct nouveau_channel *chan) spin_unlock_irqrestore(&dev_priv->context_switch_lock, flags); - /* Release the channel's resources */ + /* aside from its resources, the channel should now be dead, + * remove it from the channel list + */ + spin_lock_irqsave(&dev_priv->channels.lock, flags); + dev_priv->channels.ptr[chan->id] = NULL; + spin_unlock_irqrestore(&dev_priv->channels.lock, flags); + + /* destroy any resources the channel owned */ nouveau_gpuobj_ref(NULL, &chan->pushbuf); if (chan->pushbuf_bo) { nouveau_bo_unmap(chan->pushbuf_bo); @@ -314,7 +358,6 @@ nouveau_channel_free(struct nouveau_channel *chan) if (chan->user) iounmap(chan->user); - dev_priv->fifos[chan->id] = NULL; kfree(chan); } @@ -324,31 +367,20 @@ nouveau_channel_cleanup(struct drm_device *dev, struct drm_file *file_priv) { struct drm_nouveau_private *dev_priv = dev->dev_private; struct nouveau_engine *engine = &dev_priv->engine; + struct nouveau_channel *chan; int i; NV_DEBUG(dev, "clearing FIFO enables from file_priv\n"); for (i = 0; i < engine->fifo.channels; i++) { - struct nouveau_channel *chan = dev_priv->fifos[i]; + chan = nouveau_channel_get(dev, file_priv, i); + if (IS_ERR(chan)) + continue; - if (chan && chan->file_priv == file_priv) - nouveau_channel_free(chan); + atomic_dec(&chan->refcount); + nouveau_channel_put(&chan); } } -int -nouveau_channel_owner(struct drm_device *dev, struct drm_file *file_priv, - int channel) -{ - struct drm_nouveau_private *dev_priv = dev->dev_private; - struct nouveau_engine *engine = &dev_priv->engine; - - if (channel >= engine->fifo.channels) - return 0; - if (dev_priv->fifos[channel] == NULL) - return 0; - - return (dev_priv->fifos[channel]->file_priv == file_priv); -} /*********************************** * ioctls wrapping the functions @@ -396,24 +428,26 @@ nouveau_ioctl_fifo_alloc(struct drm_device *dev, void *data, /* Named memory object area */ ret = drm_gem_handle_create(file_priv, chan->notifier_bo->gem, &init->notifier_handle); - if (ret) { - nouveau_channel_free(chan); - return ret; - } - return 0; + if (ret == 0) + atomic_inc(&chan->refcount); /* userspace reference */ + nouveau_channel_put(&chan); + return ret; } static int nouveau_ioctl_fifo_free(struct drm_device *dev, void *data, struct drm_file *file_priv) { - struct drm_nouveau_channel_free *cfree = data; + struct drm_nouveau_channel_free *req = data; struct nouveau_channel *chan; - NOUVEAU_GET_USER_CHANNEL_WITH_RETURN(cfree->channel, file_priv, chan); + chan = nouveau_channel_get(dev, file_priv, req->channel); + if (IS_ERR(chan)) + return PTR_ERR(chan); - nouveau_channel_free(chan); + atomic_dec(&chan->refcount); + nouveau_channel_put(&chan); return 0; } diff --git a/drivers/gpu/drm/nouveau/nouveau_drv.c b/drivers/gpu/drm/nouveau/nouveau_drv.c index 90875494a65a..f139aa2cbe5c 100644 --- a/drivers/gpu/drm/nouveau/nouveau_drv.c +++ b/drivers/gpu/drm/nouveau/nouveau_drv.c @@ -195,9 +195,8 @@ nouveau_pci_suspend(struct pci_dev *pdev, pm_message_t pm_state) for (i = 0; i < pfifo->channels; i++) { struct nouveau_fence *fence = NULL; - chan = dev_priv->fifos[i]; - if (!chan || (dev_priv->card_type >= NV_50 && - chan == dev_priv->fifos[0])) + chan = dev_priv->channels.ptr[i]; + if (!chan || !chan->pushbuf_bo) continue; ret = nouveau_fence_new(chan, &fence, true); @@ -313,7 +312,7 @@ nouveau_pci_resume(struct pci_dev *pdev) int j; for (i = 0; i < dev_priv->engine.fifo.channels; i++) { - chan = dev_priv->fifos[i]; + chan = dev_priv->channels.ptr[i]; if (!chan || !chan->pushbuf_bo) continue; diff --git a/drivers/gpu/drm/nouveau/nouveau_drv.h b/drivers/gpu/drm/nouveau/nouveau_drv.h index 04bc56cf4f1a..c3f102125083 100644 --- a/drivers/gpu/drm/nouveau/nouveau_drv.h +++ b/drivers/gpu/drm/nouveau/nouveau_drv.h @@ -166,6 +166,7 @@ struct nouveau_channel { struct drm_device *dev; int id; + atomic_t refcount; struct mutex mutex; /* owner of this fifo */ @@ -607,8 +608,10 @@ struct drm_nouveau_private { struct nouveau_bo *bo; } fence; - int fifo_alloc_count; - struct nouveau_channel *fifos[NOUVEAU_MAX_CHANNEL_NR]; + struct { + spinlock_t lock; + struct nouveau_channel *ptr[NOUVEAU_MAX_CHANNEL_NR]; + } channels; struct nouveau_engine engine; struct nouveau_channel *channel; @@ -721,16 +724,6 @@ nouveau_bo_ref(struct nouveau_bo *ref, struct nouveau_bo **pnvbo) return 0; } -#define NOUVEAU_GET_USER_CHANNEL_WITH_RETURN(id, cl, ch) do { \ - struct drm_nouveau_private *nv = dev->dev_private; \ - if (!nouveau_channel_owner(dev, (cl), (id))) { \ - NV_ERROR(dev, "pid %d doesn't own channel %d\n", \ - DRM_CURRENTPID, (id)); \ - return -EPERM; \ - } \ - (ch) = nv->fifos[(id)]; \ -} while (0) - /* nouveau_drv.c */ extern int nouveau_agpmode; extern int nouveau_duallink; @@ -805,13 +798,13 @@ extern int nouveau_ioctl_notifier_free(struct drm_device *, void *data, extern struct drm_ioctl_desc nouveau_ioctls[]; extern int nouveau_max_ioctl; extern void nouveau_channel_cleanup(struct drm_device *, struct drm_file *); -extern int nouveau_channel_owner(struct drm_device *, struct drm_file *, - int channel); extern int nouveau_channel_alloc(struct drm_device *dev, struct nouveau_channel **chan, struct drm_file *file_priv, uint32_t fb_ctxdma, uint32_t tt_ctxdma); -extern void nouveau_channel_free(struct nouveau_channel *); +extern struct nouveau_channel * +nouveau_channel_get(struct drm_device *, struct drm_file *, int id); +extern void nouveau_channel_put(struct nouveau_channel **); /* nouveau_object.c */ extern int nouveau_gpuobj_early_init(struct drm_device *); diff --git a/drivers/gpu/drm/nouveau/nouveau_fence.c b/drivers/gpu/drm/nouveau/nouveau_fence.c index ab1bbfbf266e..42694b122eef 100644 --- a/drivers/gpu/drm/nouveau/nouveau_fence.c +++ b/drivers/gpu/drm/nouveau/nouveau_fence.c @@ -393,8 +393,18 @@ nouveau_fence_sync(struct nouveau_fence *fence, return nouveau_fence_wait(fence, NULL, false, false); } + /* try to take wchan's mutex, if we can't take it right away + * we have to fallback to software sync to prevent locking + * order issues + */ + if (!mutex_trylock(&wchan->mutex)) { + free_semaphore(&sema->ref); + return nouveau_fence_wait(fence, NULL, false, false); + } + /* Make wchan wait until it gets signalled */ ret = emit_semaphore(wchan, NV_SW_SEMAPHORE_ACQUIRE, sema); + mutex_unlock(&wchan->mutex); if (ret) goto out; diff --git a/drivers/gpu/drm/nouveau/nouveau_gem.c b/drivers/gpu/drm/nouveau/nouveau_gem.c index 1f2301d26c0a..454d5ceb28f1 100644 --- a/drivers/gpu/drm/nouveau/nouveau_gem.c +++ b/drivers/gpu/drm/nouveau/nouveau_gem.c @@ -146,11 +146,6 @@ nouveau_gem_ioctl_new(struct drm_device *dev, void *data, if (unlikely(dev_priv->ttm.bdev.dev_mapping == NULL)) dev_priv->ttm.bdev.dev_mapping = dev_priv->dev->dev_mapping; - if (req->channel_hint) { - NOUVEAU_GET_USER_CHANNEL_WITH_RETURN(req->channel_hint, - file_priv, chan); - } - if (req->info.domain & NOUVEAU_GEM_DOMAIN_VRAM) flags |= TTM_PL_FLAG_VRAM; if (req->info.domain & NOUVEAU_GEM_DOMAIN_GART) @@ -161,10 +156,18 @@ nouveau_gem_ioctl_new(struct drm_device *dev, void *data, if (!nouveau_gem_tile_flags_valid(dev, req->info.tile_flags)) return -EINVAL; + if (req->channel_hint) { + chan = nouveau_channel_get(dev, file_priv, req->channel_hint); + if (IS_ERR(chan)) + return PTR_ERR(chan); + } + ret = nouveau_gem_new(dev, chan, req->info.size, req->align, flags, req->info.tile_mode, req->info.tile_flags, false, (req->info.domain & NOUVEAU_GEM_DOMAIN_MAPPABLE), &nvbo); + if (chan) + nouveau_channel_put(&chan); if (ret) return ret; @@ -341,9 +344,7 @@ retry: return -EINVAL; } - mutex_unlock(&drm_global_mutex); ret = ttm_bo_wait_cpu(&nvbo->bo, false); - mutex_lock(&drm_global_mutex); if (ret) { NV_ERROR(dev, "fail wait_cpu\n"); return ret; @@ -585,7 +586,9 @@ nouveau_gem_ioctl_pushbuf(struct drm_device *dev, void *data, struct nouveau_fence *fence = NULL; int i, j, ret = 0, do_reloc = 0; - NOUVEAU_GET_USER_CHANNEL_WITH_RETURN(req->channel, file_priv, chan); + chan = nouveau_channel_get(dev, file_priv, req->channel); + if (IS_ERR(chan)) + return PTR_ERR(chan); req->vram_available = dev_priv->fb_aper_free; req->gart_available = dev_priv->gart_info.aper_free; @@ -595,28 +598,34 @@ nouveau_gem_ioctl_pushbuf(struct drm_device *dev, void *data, if (unlikely(req->nr_push > NOUVEAU_GEM_MAX_PUSH)) { NV_ERROR(dev, "pushbuf push count exceeds limit: %d max %d\n", req->nr_push, NOUVEAU_GEM_MAX_PUSH); + nouveau_channel_put(&chan); return -EINVAL; } if (unlikely(req->nr_buffers > NOUVEAU_GEM_MAX_BUFFERS)) { NV_ERROR(dev, "pushbuf bo count exceeds limit: %d max %d\n", req->nr_buffers, NOUVEAU_GEM_MAX_BUFFERS); + nouveau_channel_put(&chan); return -EINVAL; } if (unlikely(req->nr_relocs > NOUVEAU_GEM_MAX_RELOCS)) { NV_ERROR(dev, "pushbuf reloc count exceeds limit: %d max %d\n", req->nr_relocs, NOUVEAU_GEM_MAX_RELOCS); + nouveau_channel_put(&chan); return -EINVAL; } push = u_memcpya(req->push, req->nr_push, sizeof(*push)); - if (IS_ERR(push)) + if (IS_ERR(push)) { + nouveau_channel_put(&chan); return PTR_ERR(push); + } bo = u_memcpya(req->buffers, req->nr_buffers, sizeof(*bo)); if (IS_ERR(bo)) { kfree(push); + nouveau_channel_put(&chan); return PTR_ERR(bo); } @@ -750,6 +759,7 @@ out_next: req->suffix1 = 0x00000000; } + nouveau_channel_put(&chan); return ret; } diff --git a/drivers/gpu/drm/nouveau/nouveau_irq.c b/drivers/gpu/drm/nouveau/nouveau_irq.c index a4fa9e14d66d..c5e37bc17192 100644 --- a/drivers/gpu/drm/nouveau/nouveau_irq.c +++ b/drivers/gpu/drm/nouveau/nouveau_irq.c @@ -113,15 +113,17 @@ nouveau_fifo_swmthd(struct drm_device *dev, u32 chid, u32 addr, u32 data) struct drm_nouveau_private *dev_priv = dev->dev_private; struct nouveau_channel *chan = NULL; struct nouveau_gpuobj *obj; + unsigned long flags; const int subc = (addr >> 13) & 0x7; const int mthd = addr & 0x1ffc; bool handled = false; u32 engine; + spin_lock_irqsave(&dev_priv->channels.lock, flags); if (likely(chid >= 0 && chid < dev_priv->engine.fifo.channels)) - chan = dev_priv->fifos[chid]; + chan = dev_priv->channels.ptr[chid]; if (unlikely(!chan)) - return false; + goto out; switch (mthd) { case 0x0000: /* bind object to subchannel */ @@ -146,6 +148,8 @@ nouveau_fifo_swmthd(struct drm_device *dev, u32 chid, u32 addr, u32 data) break; } +out: + spin_unlock_irqrestore(&dev_priv->channels.lock, flags); return handled; } @@ -398,6 +402,8 @@ static int nouveau_graph_chid_from_grctx(struct drm_device *dev) { struct drm_nouveau_private *dev_priv = dev->dev_private; + struct nouveau_channel *chan; + unsigned long flags; uint32_t inst; int i; @@ -407,27 +413,29 @@ nouveau_graph_chid_from_grctx(struct drm_device *dev) if (dev_priv->card_type < NV_50) { inst = (nv_rd32(dev, 0x40032c) & 0xfffff) << 4; + spin_lock_irqsave(&dev_priv->channels.lock, flags); for (i = 0; i < dev_priv->engine.fifo.channels; i++) { - struct nouveau_channel *chan = dev_priv->fifos[i]; - + chan = dev_priv->channels.ptr[i]; if (!chan || !chan->ramin_grctx) continue; if (inst == chan->ramin_grctx->pinst) break; } + spin_unlock_irqrestore(&dev_priv->channels.lock, flags); } else { inst = (nv_rd32(dev, 0x40032c) & 0xfffff) << 12; + spin_lock_irqsave(&dev_priv->channels.lock, flags); for (i = 0; i < dev_priv->engine.fifo.channels; i++) { - struct nouveau_channel *chan = dev_priv->fifos[i]; - + chan = dev_priv->channels.ptr[i]; if (!chan || !chan->ramin) continue; if (inst == chan->ramin->vinst) break; } + spin_unlock_irqrestore(&dev_priv->channels.lock, flags); } @@ -449,7 +457,8 @@ nouveau_graph_trapped_channel(struct drm_device *dev, int *channel_ret) else channel = nouveau_graph_chid_from_grctx(dev); - if (channel >= engine->fifo.channels || !dev_priv->fifos[channel]) { + if (channel >= engine->fifo.channels || + !dev_priv->channels.ptr[channel]) { NV_ERROR(dev, "AIII, invalid/inactive channel id %d\n", channel); return -EINVAL; } @@ -532,14 +541,19 @@ nouveau_pgraph_intr_swmthd(struct drm_device *dev, struct nouveau_pgraph_trap *trap) { struct drm_nouveau_private *dev_priv = dev->dev_private; + unsigned long flags; + int ret = -EINVAL; + + spin_lock_irqsave(&dev_priv->channels.lock, flags); + if (trap->channel > 0 && + trap->channel < dev_priv->engine.fifo.channels && + dev_priv->channels.ptr[trap->channel]) { + ret = nouveau_call_method(dev_priv->channels.ptr[trap->channel], + trap->class, trap->mthd, trap->data); + } + spin_unlock_irqrestore(&dev_priv->channels.lock, flags); - if (trap->channel < 0 || - trap->channel >= dev_priv->engine.fifo.channels || - !dev_priv->fifos[trap->channel]) - return -ENODEV; - - return nouveau_call_method(dev_priv->fifos[trap->channel], - trap->class, trap->mthd, trap->data); + return ret; } static inline void diff --git a/drivers/gpu/drm/nouveau/nouveau_notifier.c b/drivers/gpu/drm/nouveau/nouveau_notifier.c index 2cc59f8c658b..2c5a1f66f7f0 100644 --- a/drivers/gpu/drm/nouveau/nouveau_notifier.c +++ b/drivers/gpu/drm/nouveau/nouveau_notifier.c @@ -185,11 +185,11 @@ nouveau_ioctl_notifier_alloc(struct drm_device *dev, void *data, struct nouveau_channel *chan; int ret; - NOUVEAU_GET_USER_CHANNEL_WITH_RETURN(na->channel, file_priv, chan); + chan = nouveau_channel_get(dev, file_priv, na->channel); + if (IS_ERR(chan)) + return PTR_ERR(chan); ret = nouveau_notifier_alloc(chan, na->handle, na->size, &na->offset); - if (ret) - return ret; - - return 0; + nouveau_channel_put(&chan); + return ret; } diff --git a/drivers/gpu/drm/nouveau/nouveau_object.c b/drivers/gpu/drm/nouveau/nouveau_object.c index dd572adca02a..068441c4b563 100644 --- a/drivers/gpu/drm/nouveau/nouveau_object.c +++ b/drivers/gpu/drm/nouveau/nouveau_object.c @@ -876,8 +876,6 @@ int nouveau_ioctl_grobj_alloc(struct drm_device *dev, void *data, struct nouveau_channel *chan; int ret; - NOUVEAU_GET_USER_CHANNEL_WITH_RETURN(init->channel, file_priv, chan); - if (init->handle == ~0) return -EINVAL; @@ -893,8 +891,14 @@ int nouveau_ioctl_grobj_alloc(struct drm_device *dev, void *data, return -EPERM; } - if (nouveau_ramht_find(chan, init->handle)) - return -EEXIST; + chan = nouveau_channel_get(dev, file_priv, init->channel); + if (IS_ERR(chan)) + return PTR_ERR(chan); + + if (nouveau_ramht_find(chan, init->handle)) { + ret = -EEXIST; + goto out; + } if (!grc->software) ret = nouveau_gpuobj_gr_new(chan, grc->id, &gr); @@ -903,7 +907,7 @@ int nouveau_ioctl_grobj_alloc(struct drm_device *dev, void *data, if (ret) { NV_ERROR(dev, "Error creating object: %d (%d/0x%08x)\n", ret, init->channel, init->handle); - return ret; + goto out; } ret = nouveau_ramht_insert(chan, init->handle, gr); @@ -911,10 +915,11 @@ int nouveau_ioctl_grobj_alloc(struct drm_device *dev, void *data, if (ret) { NV_ERROR(dev, "Error referencing object: %d (%d/0x%08x)\n", ret, init->channel, init->handle); - return ret; } - return 0; +out: + nouveau_channel_put(&chan); + return ret; } int nouveau_ioctl_gpuobj_free(struct drm_device *dev, void *data, @@ -923,15 +928,20 @@ int nouveau_ioctl_gpuobj_free(struct drm_device *dev, void *data, struct drm_nouveau_gpuobj_free *objfree = data; struct nouveau_gpuobj *gpuobj; struct nouveau_channel *chan; + int ret = -ENOENT; - NOUVEAU_GET_USER_CHANNEL_WITH_RETURN(objfree->channel, file_priv, chan); + chan = nouveau_channel_get(dev, file_priv, objfree->channel); + if (IS_ERR(chan)) + return PTR_ERR(chan); gpuobj = nouveau_ramht_find(chan, objfree->handle); - if (!gpuobj) - return -ENOENT; + if (gpuobj) { + nouveau_ramht_remove(chan, objfree->handle); + ret = 0; + } - nouveau_ramht_remove(chan, objfree->handle); - return 0; + nouveau_channel_put(&chan); + return ret; } u32 diff --git a/drivers/gpu/drm/nouveau/nouveau_state.c b/drivers/gpu/drm/nouveau/nouveau_state.c index 049f755567e5..513c1063fb5e 100644 --- a/drivers/gpu/drm/nouveau/nouveau_state.c +++ b/drivers/gpu/drm/nouveau/nouveau_state.c @@ -516,11 +516,11 @@ nouveau_card_init_channel(struct drm_device *dev) if (ret) goto out_err; + mutex_unlock(&dev_priv->channel->mutex); return 0; out_err: - nouveau_channel_free(dev_priv->channel); - dev_priv->channel = NULL; + nouveau_channel_put(&dev_priv->channel); return ret; } @@ -567,6 +567,7 @@ nouveau_card_init(struct drm_device *dev) if (ret) goto out; engine = &dev_priv->engine; + spin_lock_init(&dev_priv->channels.lock); spin_lock_init(&dev_priv->context_switch_lock); /* Make the CRTCs and I2C buses accessible */ @@ -713,8 +714,7 @@ static void nouveau_card_takedown(struct drm_device *dev) if (!engine->graph.accel_blocked) { nouveau_fence_fini(dev); - nouveau_channel_free(dev_priv->channel); - dev_priv->channel = NULL; + nouveau_channel_put(&dev_priv->channel); } if (!nouveau_noaccel) { diff --git a/drivers/gpu/drm/nouveau/nv04_fifo.c b/drivers/gpu/drm/nouveau/nv04_fifo.c index 708293b7ddcd..25c439dcdfd9 100644 --- a/drivers/gpu/drm/nouveau/nv04_fifo.c +++ b/drivers/gpu/drm/nouveau/nv04_fifo.c @@ -208,7 +208,7 @@ nv04_fifo_unload_context(struct drm_device *dev) if (chid < 0 || chid >= dev_priv->engine.fifo.channels) return 0; - chan = dev_priv->fifos[chid]; + chan = dev_priv->channels.ptr[chid]; if (!chan) { NV_ERROR(dev, "Inactive channel on PFIFO: %d\n", chid); return -EINVAL; @@ -289,7 +289,7 @@ nv04_fifo_init(struct drm_device *dev) pfifo->reassign(dev, true); for (i = 0; i < dev_priv->engine.fifo.channels; i++) { - if (dev_priv->fifos[i]) { + if (dev_priv->channels.ptr[i]) { uint32_t mode = nv_rd32(dev, NV04_PFIFO_MODE); nv_wr32(dev, NV04_PFIFO_MODE, mode | (1 << i)); } diff --git a/drivers/gpu/drm/nouveau/nv04_graph.c b/drivers/gpu/drm/nouveau/nv04_graph.c index c8973421b635..98b9525c1ebd 100644 --- a/drivers/gpu/drm/nouveau/nv04_graph.c +++ b/drivers/gpu/drm/nouveau/nv04_graph.c @@ -357,7 +357,7 @@ nv04_graph_channel(struct drm_device *dev) if (chid >= dev_priv->engine.fifo.channels) return NULL; - return dev_priv->fifos[chid]; + return dev_priv->channels.ptr[chid]; } void @@ -376,7 +376,7 @@ nv04_graph_context_switch(struct drm_device *dev) /* Load context for next channel */ chid = dev_priv->engine.fifo.channel_id(dev); - chan = dev_priv->fifos[chid]; + chan = dev_priv->channels.ptr[chid]; if (chan) nv04_graph_load_context(chan); diff --git a/drivers/gpu/drm/nouveau/nv10_fifo.c b/drivers/gpu/drm/nouveau/nv10_fifo.c index f1b03ad58fd5..39328fcce709 100644 --- a/drivers/gpu/drm/nouveau/nv10_fifo.c +++ b/drivers/gpu/drm/nouveau/nv10_fifo.c @@ -241,7 +241,7 @@ nv10_fifo_init(struct drm_device *dev) pfifo->reassign(dev, true); for (i = 0; i < dev_priv->engine.fifo.channels; i++) { - if (dev_priv->fifos[i]) { + if (dev_priv->channels.ptr[i]) { uint32_t mode = nv_rd32(dev, NV04_PFIFO_MODE); nv_wr32(dev, NV04_PFIFO_MODE, mode | (1 << i)); } diff --git a/drivers/gpu/drm/nouveau/nv10_graph.c b/drivers/gpu/drm/nouveau/nv10_graph.c index 8e68c9731159..cd931b57cf05 100644 --- a/drivers/gpu/drm/nouveau/nv10_graph.c +++ b/drivers/gpu/drm/nouveau/nv10_graph.c @@ -802,7 +802,7 @@ nv10_graph_context_switch(struct drm_device *dev) /* Load context for next channel */ chid = (nv_rd32(dev, NV04_PGRAPH_TRAPPED_ADDR) >> 20) & 0x1f; - chan = dev_priv->fifos[chid]; + chan = dev_priv->channels.ptr[chid]; if (chan && chan->pgraph_ctx) nv10_graph_load_context(chan); @@ -833,7 +833,7 @@ nv10_graph_channel(struct drm_device *dev) if (chid >= dev_priv->engine.fifo.channels) return NULL; - return dev_priv->fifos[chid]; + return dev_priv->channels.ptr[chid]; } int nv10_graph_create_context(struct nouveau_channel *chan) diff --git a/drivers/gpu/drm/nouveau/nv40_fifo.c b/drivers/gpu/drm/nouveau/nv40_fifo.c index d337b8b28cdd..3c7be3dc8b88 100644 --- a/drivers/gpu/drm/nouveau/nv40_fifo.c +++ b/drivers/gpu/drm/nouveau/nv40_fifo.c @@ -301,7 +301,7 @@ nv40_fifo_init(struct drm_device *dev) pfifo->reassign(dev, true); for (i = 0; i < dev_priv->engine.fifo.channels; i++) { - if (dev_priv->fifos[i]) { + if (dev_priv->channels.ptr[i]) { uint32_t mode = nv_rd32(dev, NV04_PFIFO_MODE); nv_wr32(dev, NV04_PFIFO_MODE, mode | (1 << i)); } diff --git a/drivers/gpu/drm/nouveau/nv40_graph.c b/drivers/gpu/drm/nouveau/nv40_graph.c index 7ee1b91569b8..e0b41a26447f 100644 --- a/drivers/gpu/drm/nouveau/nv40_graph.c +++ b/drivers/gpu/drm/nouveau/nv40_graph.c @@ -42,7 +42,7 @@ nv40_graph_channel(struct drm_device *dev) inst = (inst & NV40_PGRAPH_CTXCTL_CUR_INSTANCE) << 4; for (i = 0; i < dev_priv->engine.fifo.channels; i++) { - struct nouveau_channel *chan = dev_priv->fifos[i]; + struct nouveau_channel *chan = dev_priv->channels.ptr[i]; if (chan && chan->ramin_grctx && chan->ramin_grctx->pinst == inst) diff --git a/drivers/gpu/drm/nouveau/nv50_fb.c b/drivers/gpu/drm/nouveau/nv50_fb.c index cd1988b15d2c..d745c952986a 100644 --- a/drivers/gpu/drm/nouveau/nv50_fb.c +++ b/drivers/gpu/drm/nouveau/nv50_fb.c @@ -42,6 +42,7 @@ void nv50_fb_vm_trap(struct drm_device *dev, int display, const char *name) { struct drm_nouveau_private *dev_priv = dev->dev_private; + unsigned long flags; u32 trap[6], idx, chinst; int i, ch; @@ -60,8 +61,10 @@ nv50_fb_vm_trap(struct drm_device *dev, int display, const char *name) return; chinst = (trap[2] << 16) | trap[1]; + + spin_lock_irqsave(&dev_priv->channels.lock, flags); for (ch = 0; ch < dev_priv->engine.fifo.channels; ch++) { - struct nouveau_channel *chan = dev_priv->fifos[ch]; + struct nouveau_channel *chan = dev_priv->channels.ptr[ch]; if (!chan || !chan->ramin) continue; @@ -69,6 +72,7 @@ nv50_fb_vm_trap(struct drm_device *dev, int display, const char *name) if (chinst == chan->ramin->vinst >> 12) break; } + spin_unlock_irqrestore(&dev_priv->channels.lock, flags); NV_INFO(dev, "%s - VM: Trapped %s at %02x%04x%04x status %08x " "channel %d (0x%08x)\n", diff --git a/drivers/gpu/drm/nouveau/nv50_fifo.c b/drivers/gpu/drm/nouveau/nv50_fifo.c index 1da65bd60c10..815960fe4f43 100644 --- a/drivers/gpu/drm/nouveau/nv50_fifo.c +++ b/drivers/gpu/drm/nouveau/nv50_fifo.c @@ -44,7 +44,8 @@ nv50_fifo_playlist_update(struct drm_device *dev) /* We never schedule channel 0 or 127 */ for (i = 1, nr = 0; i < 127; i++) { - if (dev_priv->fifos[i] && dev_priv->fifos[i]->ramfc) { + if (dev_priv->channels.ptr[i] && + dev_priv->channels.ptr[i]->ramfc) { nv_wo32(cur, (nr * 4), i); nr++; } @@ -60,7 +61,7 @@ static void nv50_fifo_channel_enable(struct drm_device *dev, int channel) { struct drm_nouveau_private *dev_priv = dev->dev_private; - struct nouveau_channel *chan = dev_priv->fifos[channel]; + struct nouveau_channel *chan = dev_priv->channels.ptr[channel]; uint32_t inst; NV_DEBUG(dev, "ch%d\n", channel); @@ -118,7 +119,7 @@ nv50_fifo_init_context_table(struct drm_device *dev) NV_DEBUG(dev, "\n"); for (i = 0; i < NV50_PFIFO_CTX_TABLE__SIZE; i++) { - if (dev_priv->fifos[i]) + if (dev_priv->channels.ptr[i]) nv50_fifo_channel_enable(dev, i); else nv50_fifo_channel_disable(dev, i); @@ -392,7 +393,7 @@ nv50_fifo_unload_context(struct drm_device *dev) if (chid < 1 || chid >= dev_priv->engine.fifo.channels - 1) return 0; - chan = dev_priv->fifos[chid]; + chan = dev_priv->channels.ptr[chid]; if (!chan) { NV_ERROR(dev, "Inactive channel on PFIFO: %d\n", chid); return -EINVAL; diff --git a/drivers/gpu/drm/nouveau/nv50_graph.c b/drivers/gpu/drm/nouveau/nv50_graph.c index 8b669d0af610..24a3f8487579 100644 --- a/drivers/gpu/drm/nouveau/nv50_graph.c +++ b/drivers/gpu/drm/nouveau/nv50_graph.c @@ -190,7 +190,7 @@ nv50_graph_channel(struct drm_device *dev) inst = (inst & NV50_PGRAPH_CTXCTL_CUR_INSTANCE) << 12; for (i = 0; i < dev_priv->engine.fifo.channels; i++) { - struct nouveau_channel *chan = dev_priv->fifos[i]; + struct nouveau_channel *chan = dev_priv->channels.ptr[i]; if (chan && chan->ramin && chan->ramin->vinst == inst) return chan; diff --git a/drivers/gpu/drm/nouveau/nv50_instmem.c b/drivers/gpu/drm/nouveau/nv50_instmem.c index b773229b7647..0651e7629235 100644 --- a/drivers/gpu/drm/nouveau/nv50_instmem.c +++ b/drivers/gpu/drm/nouveau/nv50_instmem.c @@ -131,10 +131,10 @@ nv50_instmem_init(struct drm_device *dev) } /* we need a channel to plug into the hw to control the BARs */ - ret = nv50_channel_new(dev, 128*1024, &dev_priv->fifos[0]); + ret = nv50_channel_new(dev, 128*1024, &dev_priv->channels.ptr[0]); if (ret) return ret; - chan = dev_priv->fifos[127] = dev_priv->fifos[0]; + chan = dev_priv->channels.ptr[127] = dev_priv->channels.ptr[0]; /* allocate page table for PRAMIN BAR */ ret = nouveau_gpuobj_new(dev, chan, (dev_priv->ramin_size >> 12) * 8, @@ -240,7 +240,7 @@ nv50_instmem_takedown(struct drm_device *dev) { struct drm_nouveau_private *dev_priv = dev->dev_private; struct nv50_instmem_priv *priv = dev_priv->engine.instmem.priv; - struct nouveau_channel *chan = dev_priv->fifos[0]; + struct nouveau_channel *chan = dev_priv->channels.ptr[0]; int i; NV_DEBUG(dev, "\n"); @@ -264,8 +264,8 @@ nv50_instmem_takedown(struct drm_device *dev) nouveau_gpuobj_ref(NULL, &chan->vm_vram_pt[i]); dev_priv->vm_vram_pt_nr = 0; - nv50_channel_del(&dev_priv->fifos[0]); - dev_priv->fifos[127] = NULL; + nv50_channel_del(&dev_priv->channels.ptr[0]); + dev_priv->channels.ptr[127] = NULL; } dev_priv->engine.instmem.priv = NULL; @@ -276,7 +276,7 @@ int nv50_instmem_suspend(struct drm_device *dev) { struct drm_nouveau_private *dev_priv = dev->dev_private; - struct nouveau_channel *chan = dev_priv->fifos[0]; + struct nouveau_channel *chan = dev_priv->channels.ptr[0]; struct nouveau_gpuobj *ramin = chan->ramin; int i; @@ -294,7 +294,7 @@ nv50_instmem_resume(struct drm_device *dev) { struct drm_nouveau_private *dev_priv = dev->dev_private; struct nv50_instmem_priv *priv = dev_priv->engine.instmem.priv; - struct nouveau_channel *chan = dev_priv->fifos[0]; + struct nouveau_channel *chan = dev_priv->channels.ptr[0]; struct nouveau_gpuobj *ramin = chan->ramin; int i; -- cgit v1.2.3 From b12120a58e06cc6b799494c80215e418c14f9f5f Mon Sep 17 00:00:00 2001 From: Ben Skeggs Date: Wed, 6 Oct 2010 16:20:17 +1000 Subject: drm/nouveau: switch to unlocked ioctls Signed-off-by: Ben Skeggs --- drivers/gpu/drm/nouveau/nouveau_channel.c | 24 ++++++++++++------------ 1 file changed, 12 insertions(+), 12 deletions(-) diff --git a/drivers/gpu/drm/nouveau/nouveau_channel.c b/drivers/gpu/drm/nouveau/nouveau_channel.c index 47bf2d3d658c..0e2414b0ae50 100644 --- a/drivers/gpu/drm/nouveau/nouveau_channel.c +++ b/drivers/gpu/drm/nouveau/nouveau_channel.c @@ -456,18 +456,18 @@ nouveau_ioctl_fifo_free(struct drm_device *dev, void *data, ***********************************/ struct drm_ioctl_desc nouveau_ioctls[] = { - DRM_IOCTL_DEF_DRV(NOUVEAU_GETPARAM, nouveau_ioctl_getparam, DRM_AUTH), - DRM_IOCTL_DEF_DRV(NOUVEAU_SETPARAM, nouveau_ioctl_setparam, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), - DRM_IOCTL_DEF_DRV(NOUVEAU_CHANNEL_ALLOC, nouveau_ioctl_fifo_alloc, DRM_AUTH), - DRM_IOCTL_DEF_DRV(NOUVEAU_CHANNEL_FREE, nouveau_ioctl_fifo_free, DRM_AUTH), - DRM_IOCTL_DEF_DRV(NOUVEAU_GROBJ_ALLOC, nouveau_ioctl_grobj_alloc, DRM_AUTH), - DRM_IOCTL_DEF_DRV(NOUVEAU_NOTIFIEROBJ_ALLOC, nouveau_ioctl_notifier_alloc, DRM_AUTH), - DRM_IOCTL_DEF_DRV(NOUVEAU_GPUOBJ_FREE, nouveau_ioctl_gpuobj_free, DRM_AUTH), - DRM_IOCTL_DEF_DRV(NOUVEAU_GEM_NEW, nouveau_gem_ioctl_new, DRM_AUTH), - DRM_IOCTL_DEF_DRV(NOUVEAU_GEM_PUSHBUF, nouveau_gem_ioctl_pushbuf, DRM_AUTH), - DRM_IOCTL_DEF_DRV(NOUVEAU_GEM_CPU_PREP, nouveau_gem_ioctl_cpu_prep, DRM_AUTH), - DRM_IOCTL_DEF_DRV(NOUVEAU_GEM_CPU_FINI, nouveau_gem_ioctl_cpu_fini, DRM_AUTH), - DRM_IOCTL_DEF_DRV(NOUVEAU_GEM_INFO, nouveau_gem_ioctl_info, DRM_AUTH), + DRM_IOCTL_DEF_DRV(NOUVEAU_GETPARAM, nouveau_ioctl_getparam, DRM_UNLOCKED|DRM_AUTH), + DRM_IOCTL_DEF_DRV(NOUVEAU_SETPARAM, nouveau_ioctl_setparam, DRM_UNLOCKED|DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), + DRM_IOCTL_DEF_DRV(NOUVEAU_CHANNEL_ALLOC, nouveau_ioctl_fifo_alloc, DRM_UNLOCKED|DRM_AUTH), + DRM_IOCTL_DEF_DRV(NOUVEAU_CHANNEL_FREE, nouveau_ioctl_fifo_free, DRM_UNLOCKED|DRM_AUTH), + DRM_IOCTL_DEF_DRV(NOUVEAU_GROBJ_ALLOC, nouveau_ioctl_grobj_alloc, DRM_UNLOCKED|DRM_AUTH), + DRM_IOCTL_DEF_DRV(NOUVEAU_NOTIFIEROBJ_ALLOC, nouveau_ioctl_notifier_alloc, DRM_UNLOCKED|DRM_AUTH), + DRM_IOCTL_DEF_DRV(NOUVEAU_GPUOBJ_FREE, nouveau_ioctl_gpuobj_free, DRM_UNLOCKED|DRM_AUTH), + DRM_IOCTL_DEF_DRV(NOUVEAU_GEM_NEW, nouveau_gem_ioctl_new, DRM_UNLOCKED|DRM_AUTH), + DRM_IOCTL_DEF_DRV(NOUVEAU_GEM_PUSHBUF, nouveau_gem_ioctl_pushbuf, DRM_UNLOCKED|DRM_AUTH), + DRM_IOCTL_DEF_DRV(NOUVEAU_GEM_CPU_PREP, nouveau_gem_ioctl_cpu_prep, DRM_UNLOCKED|DRM_AUTH), + DRM_IOCTL_DEF_DRV(NOUVEAU_GEM_CPU_FINI, nouveau_gem_ioctl_cpu_fini, DRM_UNLOCKED|DRM_AUTH), + DRM_IOCTL_DEF_DRV(NOUVEAU_GEM_INFO, nouveau_gem_ioctl_info, DRM_UNLOCKED|DRM_AUTH), }; int nouveau_max_ioctl = DRM_ARRAY_SIZE(nouveau_ioctls); -- cgit v1.2.3 From 21e86c1c8a844bf978f8fc431a59c9f5a578812d Mon Sep 17 00:00:00 2001 From: Ben Skeggs Date: Mon, 11 Oct 2010 11:48:45 +1000 Subject: drm/nouveau: remove cpu_writers lock No other driver uses this, and userspace should be responsible for handling locking between them if they share BOs. Signed-off-by: Ben Skeggs --- drivers/gpu/drm/nouveau/nouveau_drv.h | 1 - drivers/gpu/drm/nouveau/nouveau_gem.c | 64 +++-------------------------------- include/drm/nouveau_drm.h | 1 - 3 files changed, 4 insertions(+), 62 deletions(-) diff --git a/drivers/gpu/drm/nouveau/nouveau_drv.h b/drivers/gpu/drm/nouveau/nouveau_drv.h index c3f102125083..b78663fc334c 100644 --- a/drivers/gpu/drm/nouveau/nouveau_drv.h +++ b/drivers/gpu/drm/nouveau/nouveau_drv.h @@ -96,7 +96,6 @@ struct nouveau_bo { struct nouveau_tile_reg *tile; struct drm_gem_object *gem; - struct drm_file *cpu_filp; int pin_refcnt; }; diff --git a/drivers/gpu/drm/nouveau/nouveau_gem.c b/drivers/gpu/drm/nouveau/nouveau_gem.c index 454d5ceb28f1..e14d10e38870 100644 --- a/drivers/gpu/drm/nouveau/nouveau_gem.c +++ b/drivers/gpu/drm/nouveau/nouveau_gem.c @@ -48,9 +48,6 @@ nouveau_gem_object_del(struct drm_gem_object *gem) return; nvbo->gem = NULL; - if (unlikely(nvbo->cpu_filp)) - ttm_bo_synccpu_write_release(bo); - if (unlikely(nvbo->pin_refcnt)) { nvbo->pin_refcnt = 1; nouveau_bo_unpin(nvbo); @@ -334,23 +331,6 @@ retry: validate_fini(op, NULL); return -EINVAL; } - - if (unlikely(atomic_read(&nvbo->bo.cpu_writers) > 0)) { - validate_fini(op, NULL); - - if (nvbo->cpu_filp == file_priv) { - NV_ERROR(dev, "bo %p mapped by process trying " - "to validate it!\n", nvbo); - return -EINVAL; - } - - ret = ttm_bo_wait_cpu(&nvbo->bo, false); - if (ret) { - NV_ERROR(dev, "fail wait_cpu\n"); - return ret; - } - goto retry; - } } return 0; @@ -791,26 +771,9 @@ nouveau_gem_ioctl_cpu_prep(struct drm_device *dev, void *data, return -ENOENT; nvbo = nouveau_gem_object(gem); - if (nvbo->cpu_filp) { - if (nvbo->cpu_filp == file_priv) - goto out; - - ret = ttm_bo_wait_cpu(&nvbo->bo, no_wait); - if (ret) - goto out; - } - - if (req->flags & NOUVEAU_GEM_CPU_PREP_NOBLOCK) { - spin_lock(&nvbo->bo.bdev->fence_lock); - ret = ttm_bo_wait(&nvbo->bo, false, false, no_wait); - spin_unlock(&nvbo->bo.bdev->fence_lock); - } else { - ret = ttm_bo_synccpu_write_grab(&nvbo->bo, no_wait); - if (ret == 0) - nvbo->cpu_filp = file_priv; - } - -out: + spin_lock(&nvbo->bo.bdev->fence_lock); + ret = ttm_bo_wait(&nvbo->bo, true, true, no_wait); + spin_unlock(&nvbo->bo.bdev->fence_lock); drm_gem_object_unreference_unlocked(gem); return ret; } @@ -819,26 +782,7 @@ int nouveau_gem_ioctl_cpu_fini(struct drm_device *dev, void *data, struct drm_file *file_priv) { - struct drm_nouveau_gem_cpu_prep *req = data; - struct drm_gem_object *gem; - struct nouveau_bo *nvbo; - int ret = -EINVAL; - - gem = drm_gem_object_lookup(dev, file_priv, req->handle); - if (!gem) - return -ENOENT; - nvbo = nouveau_gem_object(gem); - - if (nvbo->cpu_filp != file_priv) - goto out; - nvbo->cpu_filp = NULL; - - ttm_bo_synccpu_write_release(&nvbo->bo); - ret = 0; - -out: - drm_gem_object_unreference_unlocked(gem); - return ret; + return 0; } int diff --git a/include/drm/nouveau_drm.h b/include/drm/nouveau_drm.h index bc5590b1a1ac..60a7b3e9c2e9 100644 --- a/include/drm/nouveau_drm.h +++ b/include/drm/nouveau_drm.h @@ -171,7 +171,6 @@ struct drm_nouveau_gem_pushbuf { }; #define NOUVEAU_GEM_CPU_PREP_NOWAIT 0x00000001 -#define NOUVEAU_GEM_CPU_PREP_NOBLOCK 0x00000002 #define NOUVEAU_GEM_CPU_PREP_WRITE 0x00000004 struct drm_nouveau_gem_cpu_prep { uint32_t handle; -- cgit v1.2.3 From 08cd3d4311fb9c5038bc6fb0c83c250cfb218da2 Mon Sep 17 00:00:00 2001 From: Ben Skeggs Date: Tue, 12 Oct 2010 08:01:59 +1000 Subject: drm/nouveau: fix thinko in channel locking in semaphore path Signed-off-by: Ben Skeggs --- drivers/gpu/drm/nouveau/nouveau_fence.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/drivers/gpu/drm/nouveau/nouveau_fence.c b/drivers/gpu/drm/nouveau/nouveau_fence.c index 42694b122eef..5f9f66f35067 100644 --- a/drivers/gpu/drm/nouveau/nouveau_fence.c +++ b/drivers/gpu/drm/nouveau/nouveau_fence.c @@ -393,23 +393,23 @@ nouveau_fence_sync(struct nouveau_fence *fence, return nouveau_fence_wait(fence, NULL, false, false); } - /* try to take wchan's mutex, if we can't take it right away + /* try to take chan's mutex, if we can't take it right away * we have to fallback to software sync to prevent locking * order issues */ - if (!mutex_trylock(&wchan->mutex)) { + if (!mutex_trylock(&chan->mutex)) { free_semaphore(&sema->ref); return nouveau_fence_wait(fence, NULL, false, false); } /* Make wchan wait until it gets signalled */ ret = emit_semaphore(wchan, NV_SW_SEMAPHORE_ACQUIRE, sema); - mutex_unlock(&wchan->mutex); if (ret) goto out; /* Signal the semaphore from chan */ ret = emit_semaphore(chan, NV_SW_SEMAPHORE_RELEASE, sema); + mutex_unlock(&chan->mutex); out: kref_put(&sema->ref, free_semaphore); return ret; -- cgit v1.2.3 From 938c40ed69b2c1eceb72247b5e8975b28afc195f Mon Sep 17 00:00:00 2001 From: Ben Skeggs Date: Tue, 12 Oct 2010 09:54:54 +1000 Subject: drm/nouveau: use interruptible waits during pushbuf validation Signed-off-by: Ben Skeggs --- drivers/gpu/drm/nouveau/nouveau_gem.c | 31 +++++++++++++++++++------------ 1 file changed, 19 insertions(+), 12 deletions(-) diff --git a/drivers/gpu/drm/nouveau/nouveau_gem.c b/drivers/gpu/drm/nouveau/nouveau_gem.c index e14d10e38870..23b521ea8e04 100644 --- a/drivers/gpu/drm/nouveau/nouveau_gem.c +++ b/drivers/gpu/drm/nouveau/nouveau_gem.c @@ -299,14 +299,15 @@ retry: return -EINVAL; } - ret = ttm_bo_reserve(&nvbo->bo, false, false, true, sequence); + ret = ttm_bo_reserve(&nvbo->bo, true, false, true, sequence); if (ret) { validate_fini(op, NULL); - if (ret == -EAGAIN) - ret = ttm_bo_wait_unreserved(&nvbo->bo, false); + if (unlikely(ret == -EAGAIN)) + ret = ttm_bo_wait_unreserved(&nvbo->bo, true); drm_gem_object_unreference_unlocked(gem); - if (ret) { - NV_ERROR(dev, "fail reserve\n"); + if (unlikely(ret)) { + if (ret != -ERESTARTSYS) + NV_ERROR(dev, "fail reserve\n"); return ret; } goto retry; @@ -365,10 +366,11 @@ validate_list(struct nouveau_channel *chan, struct list_head *list, nvbo->channel = (b->read_domains & (1 << 31)) ? NULL : chan; ret = ttm_bo_validate(&nvbo->bo, &nvbo->placement, - false, false, false); + true, false, false); nvbo->channel = NULL; if (unlikely(ret)) { - NV_ERROR(dev, "fail ttm_validate\n"); + if (ret != -ERESTARTSYS) + NV_ERROR(dev, "fail ttm_validate\n"); return ret; } @@ -420,13 +422,15 @@ nouveau_gem_pushbuf_validate(struct nouveau_channel *chan, ret = validate_init(chan, file_priv, pbbo, nr_buffers, op); if (unlikely(ret)) { - NV_ERROR(dev, "validate_init\n"); + if (ret != -ERESTARTSYS) + NV_ERROR(dev, "validate_init\n"); return ret; } ret = validate_list(chan, &op->vram_list, pbbo, user_buffers); if (unlikely(ret < 0)) { - NV_ERROR(dev, "validate vram_list\n"); + if (ret != -ERESTARTSYS) + NV_ERROR(dev, "validate vram_list\n"); validate_fini(op, NULL); return ret; } @@ -434,7 +438,8 @@ nouveau_gem_pushbuf_validate(struct nouveau_channel *chan, ret = validate_list(chan, &op->gart_list, pbbo, user_buffers); if (unlikely(ret < 0)) { - NV_ERROR(dev, "validate gart_list\n"); + if (ret != -ERESTARTSYS) + NV_ERROR(dev, "validate gart_list\n"); validate_fini(op, NULL); return ret; } @@ -442,7 +447,8 @@ nouveau_gem_pushbuf_validate(struct nouveau_channel *chan, ret = validate_list(chan, &op->both_list, pbbo, user_buffers); if (unlikely(ret < 0)) { - NV_ERROR(dev, "validate both_list\n"); + if (ret != -ERESTARTSYS) + NV_ERROR(dev, "validate both_list\n"); validate_fini(op, NULL); return ret; } @@ -628,7 +634,8 @@ nouveau_gem_ioctl_pushbuf(struct drm_device *dev, void *data, ret = nouveau_gem_pushbuf_validate(chan, file_priv, bo, req->buffers, req->nr_buffers, &op, &do_reloc); if (ret) { - NV_ERROR(dev, "validate: %d\n", ret); + if (ret != -ERESTARTSYS) + NV_ERROR(dev, "validate: %d\n", ret); goto out; } -- cgit v1.2.3 From 18a16a768c3d37f5bfdbb414217b530294d5d442 Mon Sep 17 00:00:00 2001 From: Ben Skeggs Date: Tue, 12 Oct 2010 10:11:00 +1000 Subject: drm/nouveau: return error from nouveau_ramht_remove() if not found Signed-off-by: Ben Skeggs --- drivers/gpu/drm/nouveau/nouveau_object.c | 10 ++-------- drivers/gpu/drm/nouveau/nouveau_ramht.c | 5 +++-- drivers/gpu/drm/nouveau/nouveau_ramht.h | 2 +- 3 files changed, 6 insertions(+), 11 deletions(-) diff --git a/drivers/gpu/drm/nouveau/nouveau_object.c b/drivers/gpu/drm/nouveau/nouveau_object.c index 068441c4b563..ce9958a30175 100644 --- a/drivers/gpu/drm/nouveau/nouveau_object.c +++ b/drivers/gpu/drm/nouveau/nouveau_object.c @@ -926,20 +926,14 @@ int nouveau_ioctl_gpuobj_free(struct drm_device *dev, void *data, struct drm_file *file_priv) { struct drm_nouveau_gpuobj_free *objfree = data; - struct nouveau_gpuobj *gpuobj; struct nouveau_channel *chan; - int ret = -ENOENT; + int ret; chan = nouveau_channel_get(dev, file_priv, objfree->channel); if (IS_ERR(chan)) return PTR_ERR(chan); - gpuobj = nouveau_ramht_find(chan, objfree->handle); - if (gpuobj) { - nouveau_ramht_remove(chan, objfree->handle); - ret = 0; - } - + ret = nouveau_ramht_remove(chan, objfree->handle); nouveau_channel_put(&chan); return ret; } diff --git a/drivers/gpu/drm/nouveau/nouveau_ramht.c b/drivers/gpu/drm/nouveau/nouveau_ramht.c index 2d8580927ca4..b4c63c058888 100644 --- a/drivers/gpu/drm/nouveau/nouveau_ramht.c +++ b/drivers/gpu/drm/nouveau/nouveau_ramht.c @@ -214,18 +214,19 @@ out: spin_unlock_irqrestore(&chan->ramht->lock, flags); } -void +int nouveau_ramht_remove(struct nouveau_channel *chan, u32 handle) { struct nouveau_ramht_entry *entry; entry = nouveau_ramht_remove_entry(chan, handle); if (!entry) - return; + return -ENOENT; nouveau_ramht_remove_hash(chan, entry->handle); nouveau_gpuobj_ref(NULL, &entry->gpuobj); kfree(entry); + return 0; } struct nouveau_gpuobj * diff --git a/drivers/gpu/drm/nouveau/nouveau_ramht.h b/drivers/gpu/drm/nouveau/nouveau_ramht.h index b79cb5e1a8f1..c82de98fee0e 100644 --- a/drivers/gpu/drm/nouveau/nouveau_ramht.h +++ b/drivers/gpu/drm/nouveau/nouveau_ramht.h @@ -48,7 +48,7 @@ extern void nouveau_ramht_ref(struct nouveau_ramht *, struct nouveau_ramht **, extern int nouveau_ramht_insert(struct nouveau_channel *, u32 handle, struct nouveau_gpuobj *); -extern void nouveau_ramht_remove(struct nouveau_channel *, u32 handle); +extern int nouveau_ramht_remove(struct nouveau_channel *, u32 handle); extern struct nouveau_gpuobj * nouveau_ramht_find(struct nouveau_channel *chan, u32 handle); -- cgit v1.2.3 From 6032649df9f456f379be8d51f64488cacbfa8317 Mon Sep 17 00:00:00 2001 From: Ben Skeggs Date: Tue, 12 Oct 2010 12:31:32 +1000 Subject: drm/nouveau: hook up acpi power supply change tracking Not used at all yet, but lets hook it up now anyway. Signed-off-by: Ben Skeggs --- drivers/gpu/drm/nouveau/nouveau_drv.h | 1 + drivers/gpu/drm/nouveau/nouveau_pm.c | 30 ++++++++++++++++++++++++++++++ 2 files changed, 31 insertions(+) diff --git a/drivers/gpu/drm/nouveau/nouveau_drv.h b/drivers/gpu/drm/nouveau/nouveau_drv.h index b78663fc334c..699d546623bd 100644 --- a/drivers/gpu/drm/nouveau/nouveau_drv.h +++ b/drivers/gpu/drm/nouveau/nouveau_drv.h @@ -439,6 +439,7 @@ struct nouveau_pm_engine { struct nouveau_pm_level *cur; struct device *hwmon; + struct notifier_block acpi_nb; int (*clock_get)(struct drm_device *, u32 id); void *(*clock_pre)(struct drm_device *, struct nouveau_pm_level *, diff --git a/drivers/gpu/drm/nouveau/nouveau_pm.c b/drivers/gpu/drm/nouveau/nouveau_pm.c index 9f7b158f5825..d93814160bcf 100644 --- a/drivers/gpu/drm/nouveau/nouveau_pm.c +++ b/drivers/gpu/drm/nouveau/nouveau_pm.c @@ -27,6 +27,10 @@ #include "nouveau_drv.h" #include "nouveau_pm.h" +#ifdef CONFIG_ACPI +#include +#endif +#include #include #include @@ -446,6 +450,25 @@ nouveau_hwmon_fini(struct drm_device *dev) #endif } +#ifdef CONFIG_ACPI +static int +nouveau_pm_acpi_event(struct notifier_block *nb, unsigned long val, void *data) +{ + struct drm_nouveau_private *dev_priv = + container_of(nb, struct drm_nouveau_private, engine.pm.acpi_nb); + struct drm_device *dev = dev_priv->dev; + struct acpi_bus_event *entry = (struct acpi_bus_event *)data; + + if (strcmp(entry->device_class, "ac_adapter") == 0) { + bool ac = power_supply_is_system_supplied(); + + NV_DEBUG(dev, "power supply changed: %s\n", ac ? "AC" : "DC"); + } + + return NOTIFY_OK; +} +#endif + int nouveau_pm_init(struct drm_device *dev) { @@ -485,6 +508,10 @@ nouveau_pm_init(struct drm_device *dev) nouveau_sysfs_init(dev); nouveau_hwmon_init(dev); +#ifdef CONFIG_ACPI + pm->acpi_nb.notifier_call = nouveau_pm_acpi_event; + register_acpi_notifier(&pm->acpi_nb); +#endif return 0; } @@ -503,6 +530,9 @@ nouveau_pm_fini(struct drm_device *dev) nouveau_perf_fini(dev); nouveau_volt_fini(dev); +#ifdef CONFIG_ACPI + unregister_acpi_notifier(&pm->acpi_nb); +#endif nouveau_hwmon_fini(dev); nouveau_sysfs_fini(dev); } -- cgit v1.2.3 From 9acc8100cb14b91d446a482fdd0cf7e3ccbcf930 Mon Sep 17 00:00:00 2001 From: Ben Skeggs Date: Thu, 14 Oct 2010 14:55:23 +1000 Subject: drm/nouveau: fallback to sw fbcon if we can't get mutex immediately Signed-off-by: Ben Skeggs --- drivers/gpu/drm/nouveau/nouveau_fbcon.c | 16 +++++++++------- 1 file changed, 9 insertions(+), 7 deletions(-) diff --git a/drivers/gpu/drm/nouveau/nouveau_fbcon.c b/drivers/gpu/drm/nouveau/nouveau_fbcon.c index bc30dbe11d00..0fce4eb914d5 100644 --- a/drivers/gpu/drm/nouveau/nouveau_fbcon.c +++ b/drivers/gpu/drm/nouveau/nouveau_fbcon.c @@ -61,8 +61,8 @@ nouveau_fbcon_fillrect(struct fb_info *info, const struct fb_fillrect *rect) return; ret = -ENODEV; - if (!in_interrupt() && !(info->flags & FBINFO_HWACCEL_DISABLED)) { - mutex_lock(&dev_priv->channel->mutex); + if (!in_interrupt() && !(info->flags & FBINFO_HWACCEL_DISABLED) && + mutex_trylock(&dev_priv->channel->mutex)) { if (dev_priv->card_type < NV_50) ret = nv04_fbcon_fillrect(info, rect); else @@ -91,8 +91,8 @@ nouveau_fbcon_copyarea(struct fb_info *info, const struct fb_copyarea *image) return; ret = -ENODEV; - if (!in_interrupt() && !(info->flags & FBINFO_HWACCEL_DISABLED)) { - mutex_lock(&dev_priv->channel->mutex); + if (!in_interrupt() && !(info->flags & FBINFO_HWACCEL_DISABLED) && + mutex_trylock(&dev_priv->channel->mutex)) { if (dev_priv->card_type < NV_50) ret = nv04_fbcon_copyarea(info, image); else @@ -121,8 +121,8 @@ nouveau_fbcon_imageblit(struct fb_info *info, const struct fb_image *image) return; ret = -ENODEV; - if (!in_interrupt() && !(info->flags & FBINFO_HWACCEL_DISABLED)) { - mutex_lock(&dev_priv->channel->mutex); + if (!in_interrupt() && !(info->flags & FBINFO_HWACCEL_DISABLED) && + mutex_trylock(&dev_priv->channel->mutex)) { if (dev_priv->card_type < NV_50) ret = nv04_fbcon_imageblit(info, image); else @@ -153,7 +153,9 @@ nouveau_fbcon_sync(struct fb_info *info) info->flags & FBINFO_HWACCEL_DISABLED) return 0; - mutex_lock(&chan->mutex); + if (!mutex_trylock(&chan->mutex)) + return 0; + ret = RING_SPACE(chan, 4); if (ret) { mutex_unlock(&chan->mutex); -- cgit v1.2.3 From cbb4b608b603c7fba4c1cf5b298d1aa23d3fdd22 Mon Sep 17 00:00:00 2001 From: Ben Skeggs Date: Mon, 18 Oct 2010 12:34:04 +1000 Subject: drm/nv50: remove some unnecessary PDISPLAY init Signed-off-by: Ben Skeggs --- drivers/gpu/drm/nouveau/nv50_display.c | 15 ++------------- 1 file changed, 2 insertions(+), 13 deletions(-) diff --git a/drivers/gpu/drm/nouveau/nv50_display.c b/drivers/gpu/drm/nouveau/nv50_display.c index f624c611ddea..7ac87efb791f 100644 --- a/drivers/gpu/drm/nouveau/nv50_display.c +++ b/drivers/gpu/drm/nouveau/nv50_display.c @@ -218,9 +218,9 @@ nv50_display_init(struct drm_device *dev) struct nouveau_gpio_engine *pgpio = &dev_priv->engine.gpio; struct nouveau_channel *evo = dev_priv->evo; struct drm_connector *connector; - uint32_t val, ram_amount; - uint64_t start; int ret, i; + u64 start; + u32 val; NV_DEBUG_KMS(dev, "\n"); @@ -262,17 +262,6 @@ nv50_display_init(struct drm_device *dev) nv_wr32(dev, NV50_PDISPLAY_DAC_CLK_CTRL1(i), 0x00000001); } - /* This used to be in crtc unblank, but seems out of place there. */ - nv_wr32(dev, NV50_PDISPLAY_UNK_380, 0); - /* RAM is clamped to 256 MiB. */ - ram_amount = dev_priv->vram_size; - NV_DEBUG_KMS(dev, "ram_amount %d\n", ram_amount); - if (ram_amount > 256*1024*1024) - ram_amount = 256*1024*1024; - nv_wr32(dev, NV50_PDISPLAY_RAM_AMOUNT, ram_amount - 1); - nv_wr32(dev, NV50_PDISPLAY_UNK_388, 0x150000); - nv_wr32(dev, NV50_PDISPLAY_UNK_38C, 0); - /* The precise purpose is unknown, i suspect it has something to do * with text mode. */ -- cgit v1.2.3 From aa5ac7358c010dcb5916d1d0c2651ae65a4f7ab7 Mon Sep 17 00:00:00 2001 From: Francisco Jerez Date: Thu, 14 Oct 2010 20:37:56 +0200 Subject: drm/nouveau: Leave BO eviction synchronization for later. The pushbuf ioctl syncs after validation, no need for this anymore. Signed-off-by: Francisco Jerez Signed-off-by: Ben Skeggs --- drivers/gpu/drm/nouveau/nouveau_bo.c | 7 ------- 1 file changed, 7 deletions(-) diff --git a/drivers/gpu/drm/nouveau/nouveau_bo.c b/drivers/gpu/drm/nouveau/nouveau_bo.c index d8817b4bb189..8442bfbf5d42 100644 --- a/drivers/gpu/drm/nouveau/nouveau_bo.c +++ b/drivers/gpu/drm/nouveau/nouveau_bo.c @@ -485,15 +485,8 @@ nouveau_bo_move_accel_cleanup(struct nouveau_channel *chan, if (ret) return ret; - if (nvbo->channel) { - ret = nouveau_fence_sync(fence, nvbo->channel); - if (ret) - goto out; - } - ret = ttm_bo_move_accel_cleanup(&nvbo->bo, fence, NULL, evict, no_wait_reserve, no_wait_gpu, new_mem); -out: nouveau_fence_unref((void *)&fence); return ret; } -- cgit v1.2.3 From fcccab2e4eb8d579837481054cc2cb28eea0baef Mon Sep 17 00:00:00 2001 From: Francisco Jerez Date: Thu, 14 Oct 2010 21:55:23 +0200 Subject: drm/nouveau: Use lazy fence waits when doing software interchannel sync. Signed-off-by: Francisco Jerez Signed-off-by: Ben Skeggs --- drivers/gpu/drm/nouveau/nouveau_fence.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/gpu/drm/nouveau/nouveau_fence.c b/drivers/gpu/drm/nouveau/nouveau_fence.c index 5f9f66f35067..0a22955998e8 100644 --- a/drivers/gpu/drm/nouveau/nouveau_fence.c +++ b/drivers/gpu/drm/nouveau/nouveau_fence.c @@ -390,7 +390,7 @@ nouveau_fence_sync(struct nouveau_fence *fence, if (!sema) { /* Early card or broken userspace, fall back to * software sync. */ - return nouveau_fence_wait(fence, NULL, false, false); + return nouveau_fence_wait(fence, NULL, true, false); } /* try to take chan's mutex, if we can't take it right away @@ -399,7 +399,7 @@ nouveau_fence_sync(struct nouveau_fence *fence, */ if (!mutex_trylock(&chan->mutex)) { free_semaphore(&sema->ref); - return nouveau_fence_wait(fence, NULL, false, false); + return nouveau_fence_wait(fence, NULL, true, false); } /* Make wchan wait until it gets signalled */ -- cgit v1.2.3 From 3945e47543863385b54d94c94b023ee7ca9df972 Mon Sep 17 00:00:00 2001 From: Francisco Jerez Date: Mon, 18 Oct 2010 03:53:39 +0200 Subject: drm/nouveau: Refactor context destruction to avoid a lock ordering issue. The destroy_context() engine hooks call gpuobj management functions to release the channel resources, these functions use HARDIRQ-unsafe locks whereas destroy_context() is called with the HARDIRQ-safe context_switch_lock held, that's a lock ordering violation. Push the engine-specific channel destruction logic into destroy_context() and let the hardware-specific code lock and unlock when it's actually needed. Change the engine destruction order to avoid a race in the small gap between pgraph and pfifo context uninitialization. Reported-by: Marcin Slusarz Signed-off-by: Francisco Jerez Signed-off-by: Ben Skeggs --- drivers/gpu/drm/nouveau/nouveau_channel.c | 24 ++++++------------------ drivers/gpu/drm/nouveau/nouveau_drv.h | 2 -- drivers/gpu/drm/nouveau/nouveau_state.c | 8 ++++---- drivers/gpu/drm/nouveau/nv04_fifo.c | 21 +++++++++++++++++++-- drivers/gpu/drm/nouveau/nv04_graph.c | 15 +++++++++++++++ drivers/gpu/drm/nouveau/nv10_fifo.c | 11 ----------- drivers/gpu/drm/nouveau/nv10_graph.c | 15 +++++++++++++++ drivers/gpu/drm/nouveau/nv20_graph.c | 14 +++++++++++++- drivers/gpu/drm/nouveau/nv40_fifo.c | 11 ----------- drivers/gpu/drm/nouveau/nv40_graph.c | 16 ++++++++++++++++ drivers/gpu/drm/nouveau/nv50_fifo.c | 17 +++++++++++++++++ drivers/gpu/drm/nouveau/nv50_graph.c | 11 +++++++++++ 12 files changed, 116 insertions(+), 49 deletions(-) diff --git a/drivers/gpu/drm/nouveau/nouveau_channel.c b/drivers/gpu/drm/nouveau/nouveau_channel.c index 0e2414b0ae50..9a051fafa7c3 100644 --- a/drivers/gpu/drm/nouveau/nouveau_channel.c +++ b/drivers/gpu/drm/nouveau/nouveau_channel.c @@ -313,32 +313,20 @@ nouveau_channel_put(struct nouveau_channel **pchan) /* boot it off the hardware */ pfifo->reassign(dev, false); - /* We want to give pgraph a chance to idle and get rid of all potential - * errors. We need to do this before the lock, otherwise the irq handler - * is unable to process them. + /* We want to give pgraph a chance to idle and get rid of all + * potential errors. We need to do this without the context + * switch lock held, otherwise the irq handler is unable to + * process them. */ if (pgraph->channel(dev) == chan) nouveau_wait_for_idle(dev); - spin_lock_irqsave(&dev_priv->context_switch_lock, flags); - - pgraph->fifo_access(dev, false); - if (pgraph->channel(dev) == chan) - pgraph->unload_context(dev); - pgraph->destroy_context(chan); - pgraph->fifo_access(dev, true); - - if (pfifo->channel_id(dev) == chan->id) { - pfifo->disable(dev); - pfifo->unload_context(dev); - pfifo->enable(dev); - } + /* destroy the engine specific contexts */ pfifo->destroy_context(chan); + pgraph->destroy_context(chan); pfifo->reassign(dev, true); - spin_unlock_irqrestore(&dev_priv->context_switch_lock, flags); - /* aside from its resources, the channel should now be dead, * remove it from the channel list */ diff --git a/drivers/gpu/drm/nouveau/nouveau_drv.h b/drivers/gpu/drm/nouveau/nouveau_drv.h index 699d546623bd..198dabebafb2 100644 --- a/drivers/gpu/drm/nouveau/nouveau_drv.h +++ b/drivers/gpu/drm/nouveau/nouveau_drv.h @@ -998,14 +998,12 @@ extern int nv04_fifo_unload_context(struct drm_device *); extern int nv10_fifo_init(struct drm_device *); extern int nv10_fifo_channel_id(struct drm_device *); extern int nv10_fifo_create_context(struct nouveau_channel *); -extern void nv10_fifo_destroy_context(struct nouveau_channel *); extern int nv10_fifo_load_context(struct nouveau_channel *); extern int nv10_fifo_unload_context(struct drm_device *); /* nv40_fifo.c */ extern int nv40_fifo_init(struct drm_device *); extern int nv40_fifo_create_context(struct nouveau_channel *); -extern void nv40_fifo_destroy_context(struct nouveau_channel *); extern int nv40_fifo_load_context(struct nouveau_channel *); extern int nv40_fifo_unload_context(struct drm_device *); diff --git a/drivers/gpu/drm/nouveau/nouveau_state.c b/drivers/gpu/drm/nouveau/nouveau_state.c index 513c1063fb5e..1a4ba6ccafb1 100644 --- a/drivers/gpu/drm/nouveau/nouveau_state.c +++ b/drivers/gpu/drm/nouveau/nouveau_state.c @@ -137,7 +137,7 @@ static int nouveau_init_engine_ptrs(struct drm_device *dev) engine->fifo.cache_pull = nv04_fifo_cache_pull; engine->fifo.channel_id = nv10_fifo_channel_id; engine->fifo.create_context = nv10_fifo_create_context; - engine->fifo.destroy_context = nv10_fifo_destroy_context; + engine->fifo.destroy_context = nv04_fifo_destroy_context; engine->fifo.load_context = nv10_fifo_load_context; engine->fifo.unload_context = nv10_fifo_unload_context; engine->display.early_init = nv04_display_early_init; @@ -191,7 +191,7 @@ static int nouveau_init_engine_ptrs(struct drm_device *dev) engine->fifo.cache_pull = nv04_fifo_cache_pull; engine->fifo.channel_id = nv10_fifo_channel_id; engine->fifo.create_context = nv10_fifo_create_context; - engine->fifo.destroy_context = nv10_fifo_destroy_context; + engine->fifo.destroy_context = nv04_fifo_destroy_context; engine->fifo.load_context = nv10_fifo_load_context; engine->fifo.unload_context = nv10_fifo_unload_context; engine->display.early_init = nv04_display_early_init; @@ -245,7 +245,7 @@ static int nouveau_init_engine_ptrs(struct drm_device *dev) engine->fifo.cache_pull = nv04_fifo_cache_pull; engine->fifo.channel_id = nv10_fifo_channel_id; engine->fifo.create_context = nv10_fifo_create_context; - engine->fifo.destroy_context = nv10_fifo_destroy_context; + engine->fifo.destroy_context = nv04_fifo_destroy_context; engine->fifo.load_context = nv10_fifo_load_context; engine->fifo.unload_context = nv10_fifo_unload_context; engine->display.early_init = nv04_display_early_init; @@ -302,7 +302,7 @@ static int nouveau_init_engine_ptrs(struct drm_device *dev) engine->fifo.cache_pull = nv04_fifo_cache_pull; engine->fifo.channel_id = nv10_fifo_channel_id; engine->fifo.create_context = nv40_fifo_create_context; - engine->fifo.destroy_context = nv40_fifo_destroy_context; + engine->fifo.destroy_context = nv04_fifo_destroy_context; engine->fifo.load_context = nv40_fifo_load_context; engine->fifo.unload_context = nv40_fifo_unload_context; engine->display.early_init = nv04_display_early_init; diff --git a/drivers/gpu/drm/nouveau/nv04_fifo.c b/drivers/gpu/drm/nouveau/nv04_fifo.c index 25c439dcdfd9..4c0d3a8fca68 100644 --- a/drivers/gpu/drm/nouveau/nv04_fifo.c +++ b/drivers/gpu/drm/nouveau/nv04_fifo.c @@ -151,10 +151,27 @@ void nv04_fifo_destroy_context(struct nouveau_channel *chan) { struct drm_device *dev = chan->dev; + struct drm_nouveau_private *dev_priv = dev->dev_private; + struct nouveau_fifo_engine *pfifo = &dev_priv->engine.fifo; + unsigned long flags; - nv_wr32(dev, NV04_PFIFO_MODE, - nv_rd32(dev, NV04_PFIFO_MODE) & ~(1 << chan->id)); + spin_lock_irqsave(&dev_priv->context_switch_lock, flags); + pfifo->reassign(dev, false); + + /* Unload the context if it's the currently active one */ + if (pfifo->channel_id(dev) == chan->id) { + pfifo->disable(dev); + pfifo->unload_context(dev); + pfifo->enable(dev); + } + + /* Keep it from being rescheduled */ + nv_mask(dev, NV04_PFIFO_MODE, 1 << chan->id, 0); + + pfifo->reassign(dev, true); + spin_unlock_irqrestore(&dev_priv->context_switch_lock, flags); + /* Free the channel resources */ nouveau_gpuobj_ref(NULL, &chan->ramfc); } diff --git a/drivers/gpu/drm/nouveau/nv04_graph.c b/drivers/gpu/drm/nouveau/nv04_graph.c index 98b9525c1ebd..1e2ad3942330 100644 --- a/drivers/gpu/drm/nouveau/nv04_graph.c +++ b/drivers/gpu/drm/nouveau/nv04_graph.c @@ -412,10 +412,25 @@ int nv04_graph_create_context(struct nouveau_channel *chan) void nv04_graph_destroy_context(struct nouveau_channel *chan) { + struct drm_device *dev = chan->dev; + struct drm_nouveau_private *dev_priv = dev->dev_private; + struct nouveau_pgraph_engine *pgraph = &dev_priv->engine.graph; struct graph_state *pgraph_ctx = chan->pgraph_ctx; + unsigned long flags; + + spin_lock_irqsave(&dev_priv->context_switch_lock, flags); + pgraph->fifo_access(dev, false); + + /* Unload the context if it's the currently active one */ + if (pgraph->channel(dev) == chan) + pgraph->unload_context(dev); + /* Free the context resources */ kfree(pgraph_ctx); chan->pgraph_ctx = NULL; + + pgraph->fifo_access(dev, true); + spin_unlock_irqrestore(&dev_priv->context_switch_lock, flags); } int nv04_graph_load_context(struct nouveau_channel *chan) diff --git a/drivers/gpu/drm/nouveau/nv10_fifo.c b/drivers/gpu/drm/nouveau/nv10_fifo.c index 39328fcce709..912556f2e33c 100644 --- a/drivers/gpu/drm/nouveau/nv10_fifo.c +++ b/drivers/gpu/drm/nouveau/nv10_fifo.c @@ -73,17 +73,6 @@ nv10_fifo_create_context(struct nouveau_channel *chan) return 0; } -void -nv10_fifo_destroy_context(struct nouveau_channel *chan) -{ - struct drm_device *dev = chan->dev; - - nv_wr32(dev, NV04_PFIFO_MODE, - nv_rd32(dev, NV04_PFIFO_MODE) & ~(1 << chan->id)); - - nouveau_gpuobj_ref(NULL, &chan->ramfc); -} - static void nv10_fifo_do_load_context(struct drm_device *dev, int chid) { diff --git a/drivers/gpu/drm/nouveau/nv10_graph.c b/drivers/gpu/drm/nouveau/nv10_graph.c index cd931b57cf05..e3a87a64c164 100644 --- a/drivers/gpu/drm/nouveau/nv10_graph.c +++ b/drivers/gpu/drm/nouveau/nv10_graph.c @@ -875,10 +875,25 @@ int nv10_graph_create_context(struct nouveau_channel *chan) void nv10_graph_destroy_context(struct nouveau_channel *chan) { + struct drm_device *dev = chan->dev; + struct drm_nouveau_private *dev_priv = dev->dev_private; + struct nouveau_pgraph_engine *pgraph = &dev_priv->engine.graph; struct graph_state *pgraph_ctx = chan->pgraph_ctx; + unsigned long flags; + + spin_lock_irqsave(&dev_priv->context_switch_lock, flags); + pgraph->fifo_access(dev, false); + + /* Unload the context if it's the currently active one */ + if (pgraph->channel(dev) == chan) + pgraph->unload_context(dev); + /* Free the context resources */ kfree(pgraph_ctx); chan->pgraph_ctx = NULL; + + pgraph->fifo_access(dev, true); + spin_unlock_irqrestore(&dev_priv->context_switch_lock, flags); } void diff --git a/drivers/gpu/drm/nouveau/nv20_graph.c b/drivers/gpu/drm/nouveau/nv20_graph.c index 12ab9cd56eca..8a0402012557 100644 --- a/drivers/gpu/drm/nouveau/nv20_graph.c +++ b/drivers/gpu/drm/nouveau/nv20_graph.c @@ -425,9 +425,21 @@ nv20_graph_destroy_context(struct nouveau_channel *chan) struct drm_device *dev = chan->dev; struct drm_nouveau_private *dev_priv = dev->dev_private; struct nouveau_pgraph_engine *pgraph = &dev_priv->engine.graph; + unsigned long flags; - nouveau_gpuobj_ref(NULL, &chan->ramin_grctx); + spin_lock_irqsave(&dev_priv->context_switch_lock, flags); + pgraph->fifo_access(dev, false); + + /* Unload the context if it's the currently active one */ + if (pgraph->channel(dev) == chan) + pgraph->unload_context(dev); + + pgraph->fifo_access(dev, true); + spin_unlock_irqrestore(&dev_priv->context_switch_lock, flags); + + /* Free the context resources */ nv_wo32(pgraph->ctx_table, chan->id * 4, 0); + nouveau_gpuobj_ref(NULL, &chan->ramin_grctx); } int diff --git a/drivers/gpu/drm/nouveau/nv40_fifo.c b/drivers/gpu/drm/nouveau/nv40_fifo.c index 3c7be3dc8b88..311ac9ea5d53 100644 --- a/drivers/gpu/drm/nouveau/nv40_fifo.c +++ b/drivers/gpu/drm/nouveau/nv40_fifo.c @@ -70,17 +70,6 @@ nv40_fifo_create_context(struct nouveau_channel *chan) return 0; } -void -nv40_fifo_destroy_context(struct nouveau_channel *chan) -{ - struct drm_device *dev = chan->dev; - - nv_wr32(dev, NV04_PFIFO_MODE, - nv_rd32(dev, NV04_PFIFO_MODE) & ~(1 << chan->id)); - - nouveau_gpuobj_ref(NULL, &chan->ramfc); -} - static void nv40_fifo_do_load_context(struct drm_device *dev, int chid) { diff --git a/drivers/gpu/drm/nouveau/nv40_graph.c b/drivers/gpu/drm/nouveau/nv40_graph.c index e0b41a26447f..70d97cde49d0 100644 --- a/drivers/gpu/drm/nouveau/nv40_graph.c +++ b/drivers/gpu/drm/nouveau/nv40_graph.c @@ -79,6 +79,22 @@ nv40_graph_create_context(struct nouveau_channel *chan) void nv40_graph_destroy_context(struct nouveau_channel *chan) { + struct drm_device *dev = chan->dev; + struct drm_nouveau_private *dev_priv = dev->dev_private; + struct nouveau_pgraph_engine *pgraph = &dev_priv->engine.graph; + unsigned long flags; + + spin_lock_irqsave(&dev_priv->context_switch_lock, flags); + pgraph->fifo_access(dev, false); + + /* Unload the context if it's the currently active one */ + if (pgraph->channel(dev) == chan) + pgraph->unload_context(dev); + + pgraph->fifo_access(dev, true); + spin_unlock_irqrestore(&dev_priv->context_switch_lock, flags); + + /* Free the context resources */ nouveau_gpuobj_ref(NULL, &chan->ramin_grctx); } diff --git a/drivers/gpu/drm/nouveau/nv50_fifo.c b/drivers/gpu/drm/nouveau/nv50_fifo.c index 815960fe4f43..d3295aae0c4e 100644 --- a/drivers/gpu/drm/nouveau/nv50_fifo.c +++ b/drivers/gpu/drm/nouveau/nv50_fifo.c @@ -292,10 +292,23 @@ void nv50_fifo_destroy_context(struct nouveau_channel *chan) { struct drm_device *dev = chan->dev; + struct drm_nouveau_private *dev_priv = dev->dev_private; + struct nouveau_fifo_engine *pfifo = &dev_priv->engine.fifo; struct nouveau_gpuobj *ramfc = NULL; + unsigned long flags; NV_DEBUG(dev, "ch%d\n", chan->id); + spin_lock_irqsave(&dev_priv->context_switch_lock, flags); + pfifo->reassign(dev, false); + + /* Unload the context if it's the currently active one */ + if (pfifo->channel_id(dev) == chan->id) { + pfifo->disable(dev); + pfifo->unload_context(dev); + pfifo->enable(dev); + } + /* This will ensure the channel is seen as disabled. */ nouveau_gpuobj_ref(chan->ramfc, &ramfc); nouveau_gpuobj_ref(NULL, &chan->ramfc); @@ -306,6 +319,10 @@ nv50_fifo_destroy_context(struct nouveau_channel *chan) nv50_fifo_channel_disable(dev, 127); nv50_fifo_playlist_update(dev); + pfifo->reassign(dev, true); + spin_unlock_irqrestore(&dev_priv->context_switch_lock, flags); + + /* Free the channel resources */ nouveau_gpuobj_ref(NULL, &ramfc); nouveau_gpuobj_ref(NULL, &chan->cache); } diff --git a/drivers/gpu/drm/nouveau/nv50_graph.c b/drivers/gpu/drm/nouveau/nv50_graph.c index 24a3f8487579..dcc9175fb794 100644 --- a/drivers/gpu/drm/nouveau/nv50_graph.c +++ b/drivers/gpu/drm/nouveau/nv50_graph.c @@ -242,17 +242,28 @@ nv50_graph_destroy_context(struct nouveau_channel *chan) { struct drm_device *dev = chan->dev; struct drm_nouveau_private *dev_priv = dev->dev_private; + struct nouveau_pgraph_engine *pgraph = &dev_priv->engine.graph; int i, hdr = (dev_priv->chipset == 0x50) ? 0x200 : 0x20; + unsigned long flags; NV_DEBUG(dev, "ch%d\n", chan->id); if (!chan->ramin) return; + spin_lock_irqsave(&dev_priv->context_switch_lock, flags); + pgraph->fifo_access(dev, false); + + if (pgraph->channel(dev) == chan) + pgraph->unload_context(dev); + for (i = hdr; i < hdr + 24; i += 4) nv_wo32(chan->ramin, i, 0); dev_priv->engine.instmem.flush(dev); + pgraph->fifo_access(dev, true); + spin_unlock_irqrestore(&dev_priv->context_switch_lock, flags); + nouveau_gpuobj_ref(NULL, &chan->ramin_grctx); } -- cgit v1.2.3 From f175b745b50c5c5356e8b3b409b7f38aa44de6bb Mon Sep 17 00:00:00 2001 From: Francisco Jerez Date: Mon, 18 Oct 2010 03:54:33 +0200 Subject: drm/nouveau: Fix race condition in channel refcount handling. nouveau_channel_put() can be executed after the 'refcount == 0' check in nouveau_channel_get() and before the channel reference count is incremented. In that case CPU0 will take the context down while CPU1 thinks it owns the channel and 'refcount == 1'. Signed-off-by: Francisco Jerez Signed-off-by: Ben Skeggs --- drivers/gpu/drm/nouveau/nouveau_channel.c | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/drivers/gpu/drm/nouveau/nouveau_channel.c b/drivers/gpu/drm/nouveau/nouveau_channel.c index 9a051fafa7c3..c46a6f641964 100644 --- a/drivers/gpu/drm/nouveau/nouveau_channel.c +++ b/drivers/gpu/drm/nouveau/nouveau_channel.c @@ -247,17 +247,16 @@ nouveau_channel_get(struct drm_device *dev, struct drm_file *file_priv, int id) spin_lock_irqsave(&dev_priv->channels.lock, flags); chan = dev_priv->channels.ptr[id]; - if (unlikely(!chan || atomic_read(&chan->refcount) == 0)) { + if (unlikely(!chan || (file_priv && chan->file_priv != file_priv))) { spin_unlock_irqrestore(&dev_priv->channels.lock, flags); return ERR_PTR(-EINVAL); } - if (unlikely(file_priv && chan->file_priv != file_priv)) { + if (unlikely(!atomic_inc_not_zero(&chan->refcount))) { spin_unlock_irqrestore(&dev_priv->channels.lock, flags); return ERR_PTR(-EINVAL); } - atomic_inc(&chan->refcount); spin_unlock_irqrestore(&dev_priv->channels.lock, flags); mutex_lock(&chan->mutex); -- cgit v1.2.3 From feeb0aecfb73b5b7699c0a85ba1650e6f9c50be2 Mon Sep 17 00:00:00 2001 From: Francisco Jerez Date: Mon, 18 Oct 2010 02:58:04 +0200 Subject: drm/nouveau: Add unlocked variants of nouveau_channel_get/put. Signed-off-by: Francisco Jerez Signed-off-by: Ben Skeggs --- drivers/gpu/drm/nouveau/nouveau_channel.c | 36 ++++++++++++++++++++----------- drivers/gpu/drm/nouveau/nouveau_drv.h | 3 +++ 2 files changed, 26 insertions(+), 13 deletions(-) diff --git a/drivers/gpu/drm/nouveau/nouveau_channel.c b/drivers/gpu/drm/nouveau/nouveau_channel.c index c46a6f641964..38929fd3f4e3 100644 --- a/drivers/gpu/drm/nouveau/nouveau_channel.c +++ b/drivers/gpu/drm/nouveau/nouveau_channel.c @@ -237,34 +237,40 @@ nouveau_channel_alloc(struct drm_device *dev, struct nouveau_channel **chan_ret, return 0; } +struct nouveau_channel * +nouveau_channel_get_unlocked(struct nouveau_channel *ref) +{ + if (likely(ref && atomic_inc_not_zero(&ref->refcount))) + return ref; + + return NULL; +} + struct nouveau_channel * nouveau_channel_get(struct drm_device *dev, struct drm_file *file_priv, int id) { struct drm_nouveau_private *dev_priv = dev->dev_private; - struct nouveau_channel *chan = ERR_PTR(-ENODEV); + struct nouveau_channel *chan; unsigned long flags; spin_lock_irqsave(&dev_priv->channels.lock, flags); - chan = dev_priv->channels.ptr[id]; + chan = nouveau_channel_get_unlocked(dev_priv->channels.ptr[id]); + spin_unlock_irqrestore(&dev_priv->channels.lock, flags); - if (unlikely(!chan || (file_priv && chan->file_priv != file_priv))) { - spin_unlock_irqrestore(&dev_priv->channels.lock, flags); + if (unlikely(!chan)) return ERR_PTR(-EINVAL); - } - if (unlikely(!atomic_inc_not_zero(&chan->refcount))) { - spin_unlock_irqrestore(&dev_priv->channels.lock, flags); + if (unlikely(file_priv && chan->file_priv != file_priv)) { + nouveau_channel_put_unlocked(&chan); return ERR_PTR(-EINVAL); } - spin_unlock_irqrestore(&dev_priv->channels.lock, flags); - mutex_lock(&chan->mutex); return chan; } void -nouveau_channel_put(struct nouveau_channel **pchan) +nouveau_channel_put_unlocked(struct nouveau_channel **pchan) { struct nouveau_channel *chan = *pchan; struct drm_device *dev = chan->dev; @@ -274,9 +280,6 @@ nouveau_channel_put(struct nouveau_channel **pchan) unsigned long flags; int ret; - /* unlock the channel */ - mutex_unlock(&chan->mutex); - /* decrement the refcount, and we're done if there's still refs */ if (likely(!atomic_dec_and_test(&chan->refcount))) { *pchan = NULL; @@ -348,6 +351,13 @@ nouveau_channel_put(struct nouveau_channel **pchan) kfree(chan); } +void +nouveau_channel_put(struct nouveau_channel **pchan) +{ + mutex_unlock(&(*pchan)->mutex); + nouveau_channel_put_unlocked(pchan); +} + /* cleans up all the fifos from file_priv */ void nouveau_channel_cleanup(struct drm_device *dev, struct drm_file *file_priv) diff --git a/drivers/gpu/drm/nouveau/nouveau_drv.h b/drivers/gpu/drm/nouveau/nouveau_drv.h index 198dabebafb2..e6708be58bcf 100644 --- a/drivers/gpu/drm/nouveau/nouveau_drv.h +++ b/drivers/gpu/drm/nouveau/nouveau_drv.h @@ -803,7 +803,10 @@ extern int nouveau_channel_alloc(struct drm_device *dev, struct drm_file *file_priv, uint32_t fb_ctxdma, uint32_t tt_ctxdma); extern struct nouveau_channel * +nouveau_channel_get_unlocked(struct nouveau_channel *); +extern struct nouveau_channel * nouveau_channel_get(struct drm_device *, struct drm_file *, int id); +extern void nouveau_channel_put_unlocked(struct nouveau_channel **); extern void nouveau_channel_put(struct nouveau_channel **); /* nouveau_object.c */ -- cgit v1.2.3 From 36c952e8b3bad911ef13111058f0a5c4b361027e Mon Sep 17 00:00:00 2001 From: Francisco Jerez Date: Mon, 18 Oct 2010 03:01:34 +0200 Subject: drm/nouveau: Fix lock unbalance on card take down. Signed-off-by: Francisco Jerez Signed-off-by: Ben Skeggs --- drivers/gpu/drm/nouveau/nouveau_state.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/gpu/drm/nouveau/nouveau_state.c b/drivers/gpu/drm/nouveau/nouveau_state.c index 1a4ba6ccafb1..af203cc5d7ad 100644 --- a/drivers/gpu/drm/nouveau/nouveau_state.c +++ b/drivers/gpu/drm/nouveau/nouveau_state.c @@ -714,7 +714,7 @@ static void nouveau_card_takedown(struct drm_device *dev) if (!engine->graph.accel_blocked) { nouveau_fence_fini(dev); - nouveau_channel_put(&dev_priv->channel); + nouveau_channel_put_unlocked(&dev_priv->channel); } if (!nouveau_noaccel) { -- cgit v1.2.3 From f091a3d403065416b7d27221bbeb956481132ffd Mon Sep 17 00:00:00 2001 From: Francisco Jerez Date: Mon, 18 Oct 2010 03:55:48 +0200 Subject: drm/nouveau: Implement weak channel references. nouveau_channel_ref() takes a "weak" channel reference that doesn't prevent the hardware channel resources from being released, it just keeps the channel data structure alive. Signed-off-by: Francisco Jerez Signed-off-by: Ben Skeggs --- drivers/gpu/drm/nouveau/nouveau_channel.c | 55 ++++++++++++++++++++++--------- drivers/gpu/drm/nouveau/nouveau_drv.h | 8 ++++- 2 files changed, 47 insertions(+), 16 deletions(-) diff --git a/drivers/gpu/drm/nouveau/nouveau_channel.c b/drivers/gpu/drm/nouveau/nouveau_channel.c index 38929fd3f4e3..76033c509d35 100644 --- a/drivers/gpu/drm/nouveau/nouveau_channel.c +++ b/drivers/gpu/drm/nouveau/nouveau_channel.c @@ -125,7 +125,8 @@ nouveau_channel_alloc(struct drm_device *dev, struct nouveau_channel **chan_ret, chan->vram_handle = vram_handle; chan->gart_handle = gart_handle; - atomic_set(&chan->refcount, 1); + kref_init(&chan->ref); + atomic_set(&chan->users, 1); mutex_init(&chan->mutex); mutex_lock(&chan->mutex); @@ -133,7 +134,7 @@ nouveau_channel_alloc(struct drm_device *dev, struct nouveau_channel **chan_ret, spin_lock_irqsave(&dev_priv->channels.lock, flags); for (chan->id = 0; chan->id < pfifo->channels; chan->id++) { if (!dev_priv->channels.ptr[chan->id]) { - dev_priv->channels.ptr[chan->id] = chan; + nouveau_channel_ref(chan, &dev_priv->channels.ptr[chan->id]); break; } } @@ -240,10 +241,12 @@ nouveau_channel_alloc(struct drm_device *dev, struct nouveau_channel **chan_ret, struct nouveau_channel * nouveau_channel_get_unlocked(struct nouveau_channel *ref) { - if (likely(ref && atomic_inc_not_zero(&ref->refcount))) - return ref; + struct nouveau_channel *chan = NULL; - return NULL; + if (likely(ref && atomic_inc_not_zero(&ref->users))) + nouveau_channel_ref(ref, &chan); + + return chan; } struct nouveau_channel * @@ -281,15 +284,14 @@ nouveau_channel_put_unlocked(struct nouveau_channel **pchan) int ret; /* decrement the refcount, and we're done if there's still refs */ - if (likely(!atomic_dec_and_test(&chan->refcount))) { - *pchan = NULL; + if (likely(!atomic_dec_and_test(&chan->users))) { + nouveau_channel_ref(NULL, pchan); return; } /* noone wants the channel anymore */ NV_DEBUG(dev, "freeing channel %d\n", chan->id); nouveau_debugfs_channel_fini(chan); - *pchan = NULL; /* give it chance to idle */ nouveau_fence_update(chan); @@ -333,7 +335,7 @@ nouveau_channel_put_unlocked(struct nouveau_channel **pchan) * remove it from the channel list */ spin_lock_irqsave(&dev_priv->channels.lock, flags); - dev_priv->channels.ptr[chan->id] = NULL; + nouveau_channel_ref(NULL, &dev_priv->channels.ptr[chan->id]); spin_unlock_irqrestore(&dev_priv->channels.lock, flags); /* destroy any resources the channel owned */ @@ -345,10 +347,8 @@ nouveau_channel_put_unlocked(struct nouveau_channel **pchan) } nouveau_gpuobj_channel_takedown(chan); nouveau_notifier_takedown_channel(chan); - if (chan->user) - iounmap(chan->user); - kfree(chan); + nouveau_channel_ref(NULL, pchan); } void @@ -358,6 +358,31 @@ nouveau_channel_put(struct nouveau_channel **pchan) nouveau_channel_put_unlocked(pchan); } +static void +nouveau_channel_del(struct kref *ref) +{ + struct nouveau_channel *chan = + container_of(ref, struct nouveau_channel, ref); + + if (chan->user) + iounmap(chan->user); + + kfree(chan); +} + +void +nouveau_channel_ref(struct nouveau_channel *chan, + struct nouveau_channel **pchan) +{ + if (chan) + kref_get(&chan->ref); + + if (*pchan) + kref_put(&(*pchan)->ref, nouveau_channel_del); + + *pchan = chan; +} + /* cleans up all the fifos from file_priv */ void nouveau_channel_cleanup(struct drm_device *dev, struct drm_file *file_priv) @@ -373,7 +398,7 @@ nouveau_channel_cleanup(struct drm_device *dev, struct drm_file *file_priv) if (IS_ERR(chan)) continue; - atomic_dec(&chan->refcount); + atomic_dec(&chan->users); nouveau_channel_put(&chan); } } @@ -427,7 +452,7 @@ nouveau_ioctl_fifo_alloc(struct drm_device *dev, void *data, &init->notifier_handle); if (ret == 0) - atomic_inc(&chan->refcount); /* userspace reference */ + atomic_inc(&chan->users); /* userspace reference */ nouveau_channel_put(&chan); return ret; } @@ -443,7 +468,7 @@ nouveau_ioctl_fifo_free(struct drm_device *dev, void *data, if (IS_ERR(chan)) return PTR_ERR(chan); - atomic_dec(&chan->refcount); + atomic_dec(&chan->users); nouveau_channel_put(&chan); return 0; } diff --git a/drivers/gpu/drm/nouveau/nouveau_drv.h b/drivers/gpu/drm/nouveau/nouveau_drv.h index e6708be58bcf..d152bc334fd6 100644 --- a/drivers/gpu/drm/nouveau/nouveau_drv.h +++ b/drivers/gpu/drm/nouveau/nouveau_drv.h @@ -165,7 +165,11 @@ struct nouveau_channel { struct drm_device *dev; int id; - atomic_t refcount; + /* references to the channel data structure */ + struct kref ref; + /* users of the hardware channel resources, the hardware + * context will be kicked off when it reaches zero. */ + atomic_t users; struct mutex mutex; /* owner of this fifo */ @@ -808,6 +812,8 @@ extern struct nouveau_channel * nouveau_channel_get(struct drm_device *, struct drm_file *, int id); extern void nouveau_channel_put_unlocked(struct nouveau_channel **); extern void nouveau_channel_put(struct nouveau_channel **); +extern void nouveau_channel_ref(struct nouveau_channel *chan, + struct nouveau_channel **pchan); /* nouveau_object.c */ extern int nouveau_gpuobj_early_init(struct drm_device *); -- cgit v1.2.3 From 2a6789ae5e8a6b25a0835834655205166a757a81 Mon Sep 17 00:00:00 2001 From: Francisco Jerez Date: Mon, 18 Oct 2010 03:56:14 +0200 Subject: drm/nouveau: Make fences take a weak channel reference. Fences didn't increment the channel reference count, and the fenced channel could go away at any time. Fixes a potential race in nouveau_fence_update(). Signed-off-by: Francisco Jerez Signed-off-by: Ben Skeggs --- drivers/gpu/drm/nouveau/nouveau_fence.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/drivers/gpu/drm/nouveau/nouveau_fence.c b/drivers/gpu/drm/nouveau/nouveau_fence.c index 0a22955998e8..e1ae9bab6e41 100644 --- a/drivers/gpu/drm/nouveau/nouveau_fence.c +++ b/drivers/gpu/drm/nouveau/nouveau_fence.c @@ -64,6 +64,7 @@ nouveau_fence_del(struct kref *ref) struct nouveau_fence *fence = container_of(ref, struct nouveau_fence, refcount); + nouveau_channel_ref(NULL, &fence->channel); kfree(fence); } @@ -113,7 +114,7 @@ nouveau_fence_new(struct nouveau_channel *chan, struct nouveau_fence **pfence, if (!fence) return -ENOMEM; kref_init(&fence->refcount); - fence->channel = chan; + nouveau_channel_ref(chan, &fence->channel); if (emit) ret = nouveau_fence_emit(fence); -- cgit v1.2.3 From 2b478addc0bfaaf5031e36ee166c9457ceae628c Mon Sep 17 00:00:00 2001 From: Francisco Jerez Date: Mon, 18 Oct 2010 03:56:40 +0200 Subject: drm/nouveau: Avoid race in the interchannel sync code. It needs a "strong" channel reference because it actually writes to the channel pushbuf, otherwise the corresponding FIFO context could get kicked off in the middle of nouveau_fence_sync(). Signed-off-by: Francisco Jerez Signed-off-by: Ben Skeggs --- drivers/gpu/drm/nouveau/nouveau_fence.c | 24 +++++++++++++++--------- 1 file changed, 15 insertions(+), 9 deletions(-) diff --git a/drivers/gpu/drm/nouveau/nouveau_fence.c b/drivers/gpu/drm/nouveau/nouveau_fence.c index e1ae9bab6e41..29fe03bc4870 100644 --- a/drivers/gpu/drm/nouveau/nouveau_fence.c +++ b/drivers/gpu/drm/nouveau/nouveau_fence.c @@ -128,7 +128,7 @@ nouveau_fence_new(struct nouveau_channel *chan, struct nouveau_fence **pfence, struct nouveau_channel * nouveau_fence_channel(struct nouveau_fence *fence) { - return fence ? fence->channel : NULL; + return fence ? nouveau_channel_get_unlocked(fence->channel) : NULL; } int @@ -381,17 +381,18 @@ nouveau_fence_sync(struct nouveau_fence *fence, struct nouveau_channel *chan = nouveau_fence_channel(fence); struct drm_device *dev = wchan->dev; struct nouveau_semaphore *sema; - int ret; + int ret = 0; - if (likely(!fence || chan == wchan || + if (likely(!chan || chan == wchan || nouveau_fence_signalled(fence, NULL))) - return 0; + goto out; sema = alloc_semaphore(dev); if (!sema) { /* Early card or broken userspace, fall back to * software sync. */ - return nouveau_fence_wait(fence, NULL, true, false); + ret = nouveau_fence_wait(fence, NULL, true, false); + goto out; } /* try to take chan's mutex, if we can't take it right away @@ -399,20 +400,25 @@ nouveau_fence_sync(struct nouveau_fence *fence, * order issues */ if (!mutex_trylock(&chan->mutex)) { - free_semaphore(&sema->ref); - return nouveau_fence_wait(fence, NULL, true, false); + ret = nouveau_fence_wait(fence, NULL, true, false); + goto out_unref; } /* Make wchan wait until it gets signalled */ ret = emit_semaphore(wchan, NV_SW_SEMAPHORE_ACQUIRE, sema); if (ret) - goto out; + goto out_unlock; /* Signal the semaphore from chan */ ret = emit_semaphore(chan, NV_SW_SEMAPHORE_RELEASE, sema); + +out_unlock: mutex_unlock(&chan->mutex); -out: +out_unref: kref_put(&sema->ref, free_semaphore); +out: + if (chan) + nouveau_channel_put_unlocked(&chan); return ret; } -- cgit v1.2.3 From 889fa93dc0ab94b9255e9dd8e2036facfee5485e Mon Sep 17 00:00:00 2001 From: Francisco Jerez Date: Mon, 18 Oct 2010 03:57:19 +0200 Subject: drm/nouveau: Take fence spinlock in nouveau_fence_channel_fini(). Without it there's a potential race with nouveau_fence_update(). Signed-off-by: Francisco Jerez Signed-off-by: Ben Skeggs --- drivers/gpu/drm/nouveau/nouveau_fence.c | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/drivers/gpu/drm/nouveau/nouveau_fence.c b/drivers/gpu/drm/nouveau/nouveau_fence.c index 29fe03bc4870..f70bec835f5f 100644 --- a/drivers/gpu/drm/nouveau/nouveau_fence.c +++ b/drivers/gpu/drm/nouveau/nouveau_fence.c @@ -490,6 +490,8 @@ nouveau_fence_channel_fini(struct nouveau_channel *chan) { struct nouveau_fence *tmp, *fence; + spin_lock(&chan->fence.lock); + list_for_each_entry_safe(fence, tmp, &chan->fence.pending, entry) { fence->signalled = true; list_del(&fence->entry); @@ -499,6 +501,8 @@ nouveau_fence_channel_fini(struct nouveau_channel *chan) kref_put(&fence->refcount, nouveau_fence_del); } + + spin_unlock(&chan->fence.lock); } int -- cgit v1.2.3 From 0541324abce0225a795222558fdfe35c8dbc5b4f Mon Sep 17 00:00:00 2001 From: Francisco Jerez Date: Mon, 18 Oct 2010 16:15:15 +0200 Subject: drm/nv40: Ignore sync-to-vblank active when waiting for idle. Signed-off-by: Francisco Jerez Signed-off-by: Ben Skeggs --- drivers/gpu/drm/nouveau/nouveau_reg.h | 1 + drivers/gpu/drm/nouveau/nouveau_state.c | 8 +++++++- 2 files changed, 8 insertions(+), 1 deletion(-) diff --git a/drivers/gpu/drm/nouveau/nouveau_reg.h b/drivers/gpu/drm/nouveau/nouveau_reg.h index 1b42541ca9e5..1bbe7037cf99 100644 --- a/drivers/gpu/drm/nouveau/nouveau_reg.h +++ b/drivers/gpu/drm/nouveau/nouveau_reg.h @@ -332,6 +332,7 @@ #define NV04_PGRAPH_BSWIZZLE5 0x004006A0 #define NV03_PGRAPH_STATUS 0x004006B0 #define NV04_PGRAPH_STATUS 0x00400700 +# define NV40_PGRAPH_STATUS_SYNC_STALL 0x00004000 #define NV04_PGRAPH_TRAPPED_ADDR 0x00400704 #define NV04_PGRAPH_TRAPPED_DATA 0x00400708 #define NV04_PGRAPH_SURFACE 0x0040070C diff --git a/drivers/gpu/drm/nouveau/nouveau_state.c b/drivers/gpu/drm/nouveau/nouveau_state.c index af203cc5d7ad..82b58188398b 100644 --- a/drivers/gpu/drm/nouveau/nouveau_state.c +++ b/drivers/gpu/drm/nouveau/nouveau_state.c @@ -1105,7 +1105,13 @@ bool nouveau_wait_until(struct drm_device *dev, uint64_t timeout, /* Waits for PGRAPH to go completely idle */ bool nouveau_wait_for_idle(struct drm_device *dev) { - if (!nv_wait(dev, NV04_PGRAPH_STATUS, 0xffffffff, 0x00000000)) { + struct drm_nouveau_private *dev_priv = dev->dev_private; + uint32_t mask = ~0; + + if (dev_priv->card_type == NV_40) + mask &= ~NV40_PGRAPH_STATUS_SYNC_STALL; + + if (!nv_wait(dev, NV04_PGRAPH_STATUS, mask, 0)) { NV_ERROR(dev, "PGRAPH idle timed out with status 0x%08x\n", nv_rd32(dev, NV04_PGRAPH_STATUS)); return false; -- cgit v1.2.3 From 9100468d1be26063aa25ecd667ea922c101d203f Mon Sep 17 00:00:00 2001 From: Ben Skeggs Date: Fri, 15 Oct 2010 09:15:26 +1000 Subject: drm/nouveau: pass gpuobj alignment request down into backing allocator Signed-off-by: Ben Skeggs --- drivers/gpu/drm/nouveau/nouveau_drv.h | 8 ++++---- drivers/gpu/drm/nouveau/nouveau_object.c | 2 +- drivers/gpu/drm/nouveau/nv04_instmem.c | 2 +- drivers/gpu/drm/nouveau/nv50_instmem.c | 10 +++++----- drivers/gpu/drm/nouveau/nvc0_instmem.c | 6 +++--- 5 files changed, 14 insertions(+), 14 deletions(-) diff --git a/drivers/gpu/drm/nouveau/nouveau_drv.h b/drivers/gpu/drm/nouveau/nouveau_drv.h index d152bc334fd6..1d474f526986 100644 --- a/drivers/gpu/drm/nouveau/nouveau_drv.h +++ b/drivers/gpu/drm/nouveau/nouveau_drv.h @@ -265,7 +265,7 @@ struct nouveau_instmem_engine { void (*resume)(struct drm_device *dev); int (*populate)(struct drm_device *, struct nouveau_gpuobj *, - uint32_t *size); + u32 *size, u32 align); void (*clear)(struct drm_device *, struct nouveau_gpuobj *); int (*bind)(struct drm_device *, struct nouveau_gpuobj *); int (*unbind)(struct drm_device *, struct nouveau_gpuobj *); @@ -1121,7 +1121,7 @@ extern void nv04_instmem_takedown(struct drm_device *); extern int nv04_instmem_suspend(struct drm_device *); extern void nv04_instmem_resume(struct drm_device *); extern int nv04_instmem_populate(struct drm_device *, struct nouveau_gpuobj *, - uint32_t *size); + u32 *size, u32 align); extern void nv04_instmem_clear(struct drm_device *, struct nouveau_gpuobj *); extern int nv04_instmem_bind(struct drm_device *, struct nouveau_gpuobj *); extern int nv04_instmem_unbind(struct drm_device *, struct nouveau_gpuobj *); @@ -1133,7 +1133,7 @@ extern void nv50_instmem_takedown(struct drm_device *); extern int nv50_instmem_suspend(struct drm_device *); extern void nv50_instmem_resume(struct drm_device *); extern int nv50_instmem_populate(struct drm_device *, struct nouveau_gpuobj *, - uint32_t *size); + u32 *size, u32 align); extern void nv50_instmem_clear(struct drm_device *, struct nouveau_gpuobj *); extern int nv50_instmem_bind(struct drm_device *, struct nouveau_gpuobj *); extern int nv50_instmem_unbind(struct drm_device *, struct nouveau_gpuobj *); @@ -1147,7 +1147,7 @@ extern void nvc0_instmem_takedown(struct drm_device *); extern int nvc0_instmem_suspend(struct drm_device *); extern void nvc0_instmem_resume(struct drm_device *); extern int nvc0_instmem_populate(struct drm_device *, struct nouveau_gpuobj *, - uint32_t *size); + u32 *size, u32 align); extern void nvc0_instmem_clear(struct drm_device *, struct nouveau_gpuobj *); extern int nvc0_instmem_bind(struct drm_device *, struct nouveau_gpuobj *); extern int nvc0_instmem_unbind(struct drm_device *, struct nouveau_gpuobj *); diff --git a/drivers/gpu/drm/nouveau/nouveau_object.c b/drivers/gpu/drm/nouveau/nouveau_object.c index ce9958a30175..0b8183edfcd7 100644 --- a/drivers/gpu/drm/nouveau/nouveau_object.c +++ b/drivers/gpu/drm/nouveau/nouveau_object.c @@ -112,7 +112,7 @@ nouveau_gpuobj_new(struct drm_device *dev, struct nouveau_channel *chan, NV_DEBUG(dev, "global heap\n"); /* allocate backing pages, sets vinst */ - ret = engine->instmem.populate(dev, gpuobj, &size); + ret = engine->instmem.populate(dev, gpuobj, &size, align); if (ret) { nouveau_gpuobj_ref(NULL, &gpuobj); return ret; diff --git a/drivers/gpu/drm/nouveau/nv04_instmem.c b/drivers/gpu/drm/nouveau/nv04_instmem.c index 0b5ae297abde..554e55d0ec48 100644 --- a/drivers/gpu/drm/nouveau/nv04_instmem.c +++ b/drivers/gpu/drm/nouveau/nv04_instmem.c @@ -99,7 +99,7 @@ nv04_instmem_takedown(struct drm_device *dev) int nv04_instmem_populate(struct drm_device *dev, struct nouveau_gpuobj *gpuobj, - uint32_t *sz) + u32 *size, u32 align) { return 0; } diff --git a/drivers/gpu/drm/nouveau/nv50_instmem.c b/drivers/gpu/drm/nouveau/nv50_instmem.c index 0651e7629235..2c98eb176d64 100644 --- a/drivers/gpu/drm/nouveau/nv50_instmem.c +++ b/drivers/gpu/drm/nouveau/nv50_instmem.c @@ -322,19 +322,19 @@ nv50_instmem_resume(struct drm_device *dev) int nv50_instmem_populate(struct drm_device *dev, struct nouveau_gpuobj *gpuobj, - uint32_t *sz) + u32 *size, u32 align) { int ret; if (gpuobj->im_backing) return -EINVAL; - *sz = ALIGN(*sz, 4096); - if (*sz == 0) + *size = ALIGN(*size, 4096); + if (*size == 0) return -EINVAL; - ret = nouveau_bo_new(dev, NULL, *sz, 0, TTM_PL_FLAG_VRAM, 0, 0x0000, - true, false, &gpuobj->im_backing); + ret = nouveau_bo_new(dev, NULL, *size, align, TTM_PL_FLAG_VRAM, + 0, 0x0000, true, false, &gpuobj->im_backing); if (ret) { NV_ERROR(dev, "error getting PRAMIN backing pages: %d\n", ret); return ret; diff --git a/drivers/gpu/drm/nouveau/nvc0_instmem.c b/drivers/gpu/drm/nouveau/nvc0_instmem.c index 13a0f78a9088..7b4e71f5c274 100644 --- a/drivers/gpu/drm/nouveau/nvc0_instmem.c +++ b/drivers/gpu/drm/nouveau/nvc0_instmem.c @@ -28,7 +28,7 @@ int nvc0_instmem_populate(struct drm_device *dev, struct nouveau_gpuobj *gpuobj, - uint32_t *size) + u32 *size, u32 align) { int ret; @@ -36,8 +36,8 @@ nvc0_instmem_populate(struct drm_device *dev, struct nouveau_gpuobj *gpuobj, if (*size == 0) return -EINVAL; - ret = nouveau_bo_new(dev, NULL, *size, 0, TTM_PL_FLAG_VRAM, 0, 0x0000, - true, false, &gpuobj->im_backing); + ret = nouveau_bo_new(dev, NULL, *size, align, TTM_PL_FLAG_VRAM, + 0, 0x0000, true, false, &gpuobj->im_backing); if (ret) { NV_ERROR(dev, "error getting PRAMIN backing pages: %d\n", ret); return ret; -- cgit v1.2.3 From 50536946faaf3d9ac0245838eb09e5eb1065b06c Mon Sep 17 00:00:00 2001 From: Ben Skeggs Date: Tue, 19 Oct 2010 19:47:06 +1000 Subject: drm/nouveau: store engine type in gpuobj class structs We will eventually want to address hw engines other than PGRAPH. Signed-off-by: Ben Skeggs --- drivers/gpu/drm/nouveau/nouveau_drv.h | 4 +- drivers/gpu/drm/nouveau/nouveau_object.c | 2 +- drivers/gpu/drm/nouveau/nv04_graph.c | 84 ++++++++++++++++---------------- drivers/gpu/drm/nouveau/nv10_graph.c | 40 +++++++-------- drivers/gpu/drm/nouveau/nv20_graph.c | 70 +++++++++++++------------- drivers/gpu/drm/nouveau/nv40_graph.c | 34 ++++++------- drivers/gpu/drm/nouveau/nv50_graph.c | 20 ++++---- 7 files changed, 127 insertions(+), 127 deletions(-) diff --git a/drivers/gpu/drm/nouveau/nouveau_drv.h b/drivers/gpu/drm/nouveau/nouveau_drv.h index 1d474f526986..2fdc26655ca1 100644 --- a/drivers/gpu/drm/nouveau/nouveau_drv.h +++ b/drivers/gpu/drm/nouveau/nouveau_drv.h @@ -132,7 +132,7 @@ enum nouveau_flags { #define NVOBJ_ENGINE_SW 0 #define NVOBJ_ENGINE_GR 1 -#define NVOBJ_ENGINE_DISPLAY 2 +#define NVOBJ_ENGINE_DISPLAY 0xcafe0001 #define NVOBJ_ENGINE_INT 0xdeadbeef #define NVOBJ_FLAG_ZERO_ALLOC (1 << 1) @@ -324,7 +324,7 @@ struct nouveau_pgraph_object_method { struct nouveau_pgraph_object_class { int id; - bool software; + u32 engine; struct nouveau_pgraph_object_method *methods; }; diff --git a/drivers/gpu/drm/nouveau/nouveau_object.c b/drivers/gpu/drm/nouveau/nouveau_object.c index 0b8183edfcd7..70ffd7530503 100644 --- a/drivers/gpu/drm/nouveau/nouveau_object.c +++ b/drivers/gpu/drm/nouveau/nouveau_object.c @@ -900,7 +900,7 @@ int nouveau_ioctl_grobj_alloc(struct drm_device *dev, void *data, goto out; } - if (!grc->software) + if (grc->engine != NVOBJ_ENGINE_SW) ret = nouveau_gpuobj_gr_new(chan, grc->id, &gr); else ret = nouveau_gpuobj_sw_new(chan, grc->id, &gr); diff --git a/drivers/gpu/drm/nouveau/nv04_graph.c b/drivers/gpu/drm/nouveau/nv04_graph.c index 1e2ad3942330..5d8ab1b6b04a 100644 --- a/drivers/gpu/drm/nouveau/nv04_graph.c +++ b/drivers/gpu/drm/nouveau/nv04_graph.c @@ -1091,48 +1091,48 @@ static struct nouveau_pgraph_object_method nv04_graph_mthds_surf3d[] = { }; struct nouveau_pgraph_object_class nv04_graph_grclass[] = { - { 0x0038, false, NULL }, /* dvd subpicture */ - { 0x0039, false, NULL }, /* m2mf */ - { 0x004b, false, nv04_graph_mthds_nv03_gdirect }, /* nv03 gdirect */ - { 0x004a, false, nv04_graph_mthds_nv04_gdirect }, /* nv04 gdirect */ - { 0x001f, false, nv04_graph_mthds_nv01_imageblit }, /* nv01 imageblit */ - { 0x005f, false, nv04_graph_mthds_nv04_imageblit_ifc }, /* nv04 imageblit */ - { 0x0060, false, nv04_graph_mthds_nv04_iifc }, /* nv04 iifc */ - { 0x0064, false, NULL }, /* nv05 iifc */ - { 0x0021, false, nv04_graph_mthds_nv01_ifc }, /* nv01 ifc */ - { 0x0061, false, nv04_graph_mthds_nv04_imageblit_ifc }, /* nv04 ifc */ - { 0x0065, false, NULL }, /* nv05 ifc */ - { 0x0036, false, nv04_graph_mthds_nv03_sifc }, /* nv03 sifc */ - { 0x0076, false, nv04_graph_mthds_nv04_sifc }, /* nv04 sifc */ - { 0x0066, false, NULL }, /* nv05 sifc */ - { 0x0037, false, nv04_graph_mthds_nv03_sifm }, /* nv03 sifm */ - { 0x0077, false, nv04_graph_mthds_nv04_sifm }, /* nv04 sifm */ - { 0x0030, false, NULL }, /* null */ - { 0x0042, false, NULL }, /* surf2d */ - { 0x0043, false, NULL }, /* rop */ - { 0x0012, false, NULL }, /* beta1 */ - { 0x0072, false, NULL }, /* beta4 */ - { 0x0019, false, NULL }, /* cliprect */ - { 0x0018, false, NULL }, /* nv01 pattern */ - { 0x0044, false, NULL }, /* nv04 pattern */ - { 0x0052, false, NULL }, /* swzsurf */ - { 0x0053, false, nv04_graph_mthds_surf3d }, /* surf3d */ - { 0x0048, false, nv04_graph_mthds_nv03_tex_tri }, /* nv03 tex_tri */ - { 0x0054, false, NULL }, /* tex_tri */ - { 0x0055, false, NULL }, /* multitex_tri */ - { 0x0017, false, NULL }, /* nv01 chroma */ - { 0x0057, false, NULL }, /* nv04 chroma */ - { 0x0058, false, NULL }, /* surf_dst */ - { 0x0059, false, NULL }, /* surf_src */ - { 0x005a, false, NULL }, /* surf_color */ - { 0x005b, false, NULL }, /* surf_zeta */ - { 0x001c, false, nv04_graph_mthds_nv01_shape }, /* nv01 line */ - { 0x005c, false, nv04_graph_mthds_nv04_shape }, /* nv04 line */ - { 0x001d, false, nv04_graph_mthds_nv01_shape }, /* nv01 tri */ - { 0x005d, false, nv04_graph_mthds_nv04_shape }, /* nv04 tri */ - { 0x001e, false, nv04_graph_mthds_nv01_shape }, /* nv01 rect */ - { 0x005e, false, nv04_graph_mthds_nv04_shape }, /* nv04 rect */ - { 0x506e, true, nv04_graph_mthds_sw }, + { 0x0038, NVOBJ_ENGINE_GR, NULL }, /* dvd subpicture */ + { 0x0039, NVOBJ_ENGINE_GR, NULL }, /* m2mf */ + { 0x004b, NVOBJ_ENGINE_GR, nv04_graph_mthds_nv03_gdirect }, /* nv03 gdirect */ + { 0x004a, NVOBJ_ENGINE_GR, nv04_graph_mthds_nv04_gdirect }, /* nv04 gdirect */ + { 0x001f, NVOBJ_ENGINE_GR, nv04_graph_mthds_nv01_imageblit }, /* nv01 imageblit */ + { 0x005f, NVOBJ_ENGINE_GR, nv04_graph_mthds_nv04_imageblit_ifc }, /* nv04 imageblit */ + { 0x0060, NVOBJ_ENGINE_GR, nv04_graph_mthds_nv04_iifc }, /* nv04 iifc */ + { 0x0064, NVOBJ_ENGINE_GR, NULL }, /* nv05 iifc */ + { 0x0021, NVOBJ_ENGINE_GR, nv04_graph_mthds_nv01_ifc }, /* nv01 ifc */ + { 0x0061, NVOBJ_ENGINE_GR, nv04_graph_mthds_nv04_imageblit_ifc }, /* nv04 ifc */ + { 0x0065, NVOBJ_ENGINE_GR, NULL }, /* nv05 ifc */ + { 0x0036, NVOBJ_ENGINE_GR, nv04_graph_mthds_nv03_sifc }, /* nv03 sifc */ + { 0x0076, NVOBJ_ENGINE_GR, nv04_graph_mthds_nv04_sifc }, /* nv04 sifc */ + { 0x0066, NVOBJ_ENGINE_GR, NULL }, /* nv05 sifc */ + { 0x0037, NVOBJ_ENGINE_GR, nv04_graph_mthds_nv03_sifm }, /* nv03 sifm */ + { 0x0077, NVOBJ_ENGINE_GR, nv04_graph_mthds_nv04_sifm }, /* nv04 sifm */ + { 0x0030, NVOBJ_ENGINE_GR, NULL }, /* null */ + { 0x0042, NVOBJ_ENGINE_GR, NULL }, /* surf2d */ + { 0x0043, NVOBJ_ENGINE_GR, NULL }, /* rop */ + { 0x0012, NVOBJ_ENGINE_GR, NULL }, /* beta1 */ + { 0x0072, NVOBJ_ENGINE_GR, NULL }, /* beta4 */ + { 0x0019, NVOBJ_ENGINE_GR, NULL }, /* cliprect */ + { 0x0018, NVOBJ_ENGINE_GR, NULL }, /* nv01 pattern */ + { 0x0044, NVOBJ_ENGINE_GR, NULL }, /* nv04 pattern */ + { 0x0052, NVOBJ_ENGINE_GR, NULL }, /* swzsurf */ + { 0x0053, NVOBJ_ENGINE_GR, nv04_graph_mthds_surf3d }, /* surf3d */ + { 0x0048, NVOBJ_ENGINE_GR, nv04_graph_mthds_nv03_tex_tri }, /* nv03 tex_tri */ + { 0x0054, NVOBJ_ENGINE_GR, NULL }, /* tex_tri */ + { 0x0055, NVOBJ_ENGINE_GR, NULL }, /* multitex_tri */ + { 0x0017, NVOBJ_ENGINE_GR, NULL }, /* nv01 chroma */ + { 0x0057, NVOBJ_ENGINE_GR, NULL }, /* nv04 chroma */ + { 0x0058, NVOBJ_ENGINE_GR, NULL }, /* surf_dst */ + { 0x0059, NVOBJ_ENGINE_GR, NULL }, /* surf_src */ + { 0x005a, NVOBJ_ENGINE_GR, NULL }, /* surf_color */ + { 0x005b, NVOBJ_ENGINE_GR, NULL }, /* surf_zeta */ + { 0x001c, NVOBJ_ENGINE_GR, nv04_graph_mthds_nv01_shape }, /* nv01 line */ + { 0x005c, NVOBJ_ENGINE_GR, nv04_graph_mthds_nv04_shape }, /* nv04 line */ + { 0x001d, NVOBJ_ENGINE_GR, nv04_graph_mthds_nv01_shape }, /* nv01 tri */ + { 0x005d, NVOBJ_ENGINE_GR, nv04_graph_mthds_nv04_shape }, /* nv04 tri */ + { 0x001e, NVOBJ_ENGINE_GR, nv04_graph_mthds_nv01_shape }, /* nv01 rect */ + { 0x005e, NVOBJ_ENGINE_GR, nv04_graph_mthds_nv04_shape }, /* nv04 rect */ + { 0x506e, NVOBJ_ENGINE_SW, nv04_graph_mthds_sw }, {} }; diff --git a/drivers/gpu/drm/nouveau/nv10_graph.c b/drivers/gpu/drm/nouveau/nv10_graph.c index e3a87a64c164..375d63161d12 100644 --- a/drivers/gpu/drm/nouveau/nv10_graph.c +++ b/drivers/gpu/drm/nouveau/nv10_graph.c @@ -1075,25 +1075,25 @@ static struct nouveau_pgraph_object_method nv17_graph_celsius_mthds[] = { }; struct nouveau_pgraph_object_class nv10_graph_grclass[] = { - { 0x0030, false, NULL }, /* null */ - { 0x0039, false, NULL }, /* m2mf */ - { 0x004a, false, NULL }, /* gdirect */ - { 0x005f, false, NULL }, /* imageblit */ - { 0x009f, false, NULL }, /* imageblit (nv12) */ - { 0x008a, false, NULL }, /* ifc */ - { 0x0089, false, NULL }, /* sifm */ - { 0x0062, false, NULL }, /* surf2d */ - { 0x0043, false, NULL }, /* rop */ - { 0x0012, false, NULL }, /* beta1 */ - { 0x0072, false, NULL }, /* beta4 */ - { 0x0019, false, NULL }, /* cliprect */ - { 0x0044, false, NULL }, /* pattern */ - { 0x0052, false, NULL }, /* swzsurf */ - { 0x0093, false, NULL }, /* surf3d */ - { 0x0094, false, NULL }, /* tex_tri */ - { 0x0095, false, NULL }, /* multitex_tri */ - { 0x0056, false, NULL }, /* celcius (nv10) */ - { 0x0096, false, NULL }, /* celcius (nv11) */ - { 0x0099, false, nv17_graph_celsius_mthds }, /* celcius (nv17) */ + { 0x0030, NVOBJ_ENGINE_GR, NULL }, /* null */ + { 0x0039, NVOBJ_ENGINE_GR, NULL }, /* m2mf */ + { 0x004a, NVOBJ_ENGINE_GR, NULL }, /* gdirect */ + { 0x005f, NVOBJ_ENGINE_GR, NULL }, /* imageblit */ + { 0x009f, NVOBJ_ENGINE_GR, NULL }, /* imageblit (nv12) */ + { 0x008a, NVOBJ_ENGINE_GR, NULL }, /* ifc */ + { 0x0089, NVOBJ_ENGINE_GR, NULL }, /* sifm */ + { 0x0062, NVOBJ_ENGINE_GR, NULL }, /* surf2d */ + { 0x0043, NVOBJ_ENGINE_GR, NULL }, /* rop */ + { 0x0012, NVOBJ_ENGINE_GR, NULL }, /* beta1 */ + { 0x0072, NVOBJ_ENGINE_GR, NULL }, /* beta4 */ + { 0x0019, NVOBJ_ENGINE_GR, NULL }, /* cliprect */ + { 0x0044, NVOBJ_ENGINE_GR, NULL }, /* pattern */ + { 0x0052, NVOBJ_ENGINE_GR, NULL }, /* swzsurf */ + { 0x0093, NVOBJ_ENGINE_GR, NULL }, /* surf3d */ + { 0x0094, NVOBJ_ENGINE_GR, NULL }, /* tex_tri */ + { 0x0095, NVOBJ_ENGINE_GR, NULL }, /* multitex_tri */ + { 0x0056, NVOBJ_ENGINE_GR, NULL }, /* celcius (nv10) */ + { 0x0096, NVOBJ_ENGINE_GR, NULL }, /* celcius (nv11) */ + { 0x0099, NVOBJ_ENGINE_GR, nv17_graph_celsius_mthds }, /* celcius (nv17) */ {} }; diff --git a/drivers/gpu/drm/nouveau/nv20_graph.c b/drivers/gpu/drm/nouveau/nv20_graph.c index 8a0402012557..109418d72f93 100644 --- a/drivers/gpu/drm/nouveau/nv20_graph.c +++ b/drivers/gpu/drm/nouveau/nv20_graph.c @@ -757,45 +757,45 @@ nv30_graph_init(struct drm_device *dev) } struct nouveau_pgraph_object_class nv20_graph_grclass[] = { - { 0x0030, false, NULL }, /* null */ - { 0x0039, false, NULL }, /* m2mf */ - { 0x004a, false, NULL }, /* gdirect */ - { 0x009f, false, NULL }, /* imageblit (nv12) */ - { 0x008a, false, NULL }, /* ifc */ - { 0x0089, false, NULL }, /* sifm */ - { 0x0062, false, NULL }, /* surf2d */ - { 0x0043, false, NULL }, /* rop */ - { 0x0012, false, NULL }, /* beta1 */ - { 0x0072, false, NULL }, /* beta4 */ - { 0x0019, false, NULL }, /* cliprect */ - { 0x0044, false, NULL }, /* pattern */ - { 0x009e, false, NULL }, /* swzsurf */ - { 0x0096, false, NULL }, /* celcius */ - { 0x0097, false, NULL }, /* kelvin (nv20) */ - { 0x0597, false, NULL }, /* kelvin (nv25) */ + { 0x0030, NVOBJ_ENGINE_GR, NULL }, /* null */ + { 0x0039, NVOBJ_ENGINE_GR, NULL }, /* m2mf */ + { 0x004a, NVOBJ_ENGINE_GR, NULL }, /* gdirect */ + { 0x009f, NVOBJ_ENGINE_GR, NULL }, /* imageblit (nv12) */ + { 0x008a, NVOBJ_ENGINE_GR, NULL }, /* ifc */ + { 0x0089, NVOBJ_ENGINE_GR, NULL }, /* sifm */ + { 0x0062, NVOBJ_ENGINE_GR, NULL }, /* surf2d */ + { 0x0043, NVOBJ_ENGINE_GR, NULL }, /* rop */ + { 0x0012, NVOBJ_ENGINE_GR, NULL }, /* beta1 */ + { 0x0072, NVOBJ_ENGINE_GR, NULL }, /* beta4 */ + { 0x0019, NVOBJ_ENGINE_GR, NULL }, /* cliprect */ + { 0x0044, NVOBJ_ENGINE_GR, NULL }, /* pattern */ + { 0x009e, NVOBJ_ENGINE_GR, NULL }, /* swzsurf */ + { 0x0096, NVOBJ_ENGINE_GR, NULL }, /* celcius */ + { 0x0097, NVOBJ_ENGINE_GR, NULL }, /* kelvin (nv20) */ + { 0x0597, NVOBJ_ENGINE_GR, NULL }, /* kelvin (nv25) */ {} }; struct nouveau_pgraph_object_class nv30_graph_grclass[] = { - { 0x0030, false, NULL }, /* null */ - { 0x0039, false, NULL }, /* m2mf */ - { 0x004a, false, NULL }, /* gdirect */ - { 0x009f, false, NULL }, /* imageblit (nv12) */ - { 0x008a, false, NULL }, /* ifc */ - { 0x038a, false, NULL }, /* ifc (nv30) */ - { 0x0089, false, NULL }, /* sifm */ - { 0x0389, false, NULL }, /* sifm (nv30) */ - { 0x0062, false, NULL }, /* surf2d */ - { 0x0362, false, NULL }, /* surf2d (nv30) */ - { 0x0043, false, NULL }, /* rop */ - { 0x0012, false, NULL }, /* beta1 */ - { 0x0072, false, NULL }, /* beta4 */ - { 0x0019, false, NULL }, /* cliprect */ - { 0x0044, false, NULL }, /* pattern */ - { 0x039e, false, NULL }, /* swzsurf */ - { 0x0397, false, NULL }, /* rankine (nv30) */ - { 0x0497, false, NULL }, /* rankine (nv35) */ - { 0x0697, false, NULL }, /* rankine (nv34) */ + { 0x0030, NVOBJ_ENGINE_GR, NULL }, /* null */ + { 0x0039, NVOBJ_ENGINE_GR, NULL }, /* m2mf */ + { 0x004a, NVOBJ_ENGINE_GR, NULL }, /* gdirect */ + { 0x009f, NVOBJ_ENGINE_GR, NULL }, /* imageblit (nv12) */ + { 0x008a, NVOBJ_ENGINE_GR, NULL }, /* ifc */ + { 0x038a, NVOBJ_ENGINE_GR, NULL }, /* ifc (nv30) */ + { 0x0089, NVOBJ_ENGINE_GR, NULL }, /* sifm */ + { 0x0389, NVOBJ_ENGINE_GR, NULL }, /* sifm (nv30) */ + { 0x0062, NVOBJ_ENGINE_GR, NULL }, /* surf2d */ + { 0x0362, NVOBJ_ENGINE_GR, NULL }, /* surf2d (nv30) */ + { 0x0043, NVOBJ_ENGINE_GR, NULL }, /* rop */ + { 0x0012, NVOBJ_ENGINE_GR, NULL }, /* beta1 */ + { 0x0072, NVOBJ_ENGINE_GR, NULL }, /* beta4 */ + { 0x0019, NVOBJ_ENGINE_GR, NULL }, /* cliprect */ + { 0x0044, NVOBJ_ENGINE_GR, NULL }, /* pattern */ + { 0x039e, NVOBJ_ENGINE_GR, NULL }, /* swzsurf */ + { 0x0397, NVOBJ_ENGINE_GR, NULL }, /* rankine (nv30) */ + { 0x0497, NVOBJ_ENGINE_GR, NULL }, /* rankine (nv35) */ + { 0x0697, NVOBJ_ENGINE_GR, NULL }, /* rankine (nv34) */ {} }; diff --git a/drivers/gpu/drm/nouveau/nv40_graph.c b/drivers/gpu/drm/nouveau/nv40_graph.c index 70d97cde49d0..cd47760b2d0a 100644 --- a/drivers/gpu/drm/nouveau/nv40_graph.c +++ b/drivers/gpu/drm/nouveau/nv40_graph.c @@ -409,23 +409,23 @@ void nv40_graph_takedown(struct drm_device *dev) } struct nouveau_pgraph_object_class nv40_graph_grclass[] = { - { 0x0030, false, NULL }, /* null */ - { 0x0039, false, NULL }, /* m2mf */ - { 0x004a, false, NULL }, /* gdirect */ - { 0x009f, false, NULL }, /* imageblit (nv12) */ - { 0x008a, false, NULL }, /* ifc */ - { 0x0089, false, NULL }, /* sifm */ - { 0x3089, false, NULL }, /* sifm (nv40) */ - { 0x0062, false, NULL }, /* surf2d */ - { 0x3062, false, NULL }, /* surf2d (nv40) */ - { 0x0043, false, NULL }, /* rop */ - { 0x0012, false, NULL }, /* beta1 */ - { 0x0072, false, NULL }, /* beta4 */ - { 0x0019, false, NULL }, /* cliprect */ - { 0x0044, false, NULL }, /* pattern */ - { 0x309e, false, NULL }, /* swzsurf */ - { 0x4097, false, NULL }, /* curie (nv40) */ - { 0x4497, false, NULL }, /* curie (nv44) */ + { 0x0030, NVOBJ_ENGINE_GR, NULL }, /* null */ + { 0x0039, NVOBJ_ENGINE_GR, NULL }, /* m2mf */ + { 0x004a, NVOBJ_ENGINE_GR, NULL }, /* gdirect */ + { 0x009f, NVOBJ_ENGINE_GR, NULL }, /* imageblit (nv12) */ + { 0x008a, NVOBJ_ENGINE_GR, NULL }, /* ifc */ + { 0x0089, NVOBJ_ENGINE_GR, NULL }, /* sifm */ + { 0x3089, NVOBJ_ENGINE_GR, NULL }, /* sifm (nv40) */ + { 0x0062, NVOBJ_ENGINE_GR, NULL }, /* surf2d */ + { 0x3062, NVOBJ_ENGINE_GR, NULL }, /* surf2d (nv40) */ + { 0x0043, NVOBJ_ENGINE_GR, NULL }, /* rop */ + { 0x0012, NVOBJ_ENGINE_GR, NULL }, /* beta1 */ + { 0x0072, NVOBJ_ENGINE_GR, NULL }, /* beta4 */ + { 0x0019, NVOBJ_ENGINE_GR, NULL }, /* cliprect */ + { 0x0044, NVOBJ_ENGINE_GR, NULL }, /* pattern */ + { 0x309e, NVOBJ_ENGINE_GR, NULL }, /* swzsurf */ + { 0x4097, NVOBJ_ENGINE_GR, NULL }, /* curie (nv40) */ + { 0x4497, NVOBJ_ENGINE_GR, NULL }, /* curie (nv44) */ {} }; diff --git a/drivers/gpu/drm/nouveau/nv50_graph.c b/drivers/gpu/drm/nouveau/nv50_graph.c index dcc9175fb794..01a598917e3c 100644 --- a/drivers/gpu/drm/nouveau/nv50_graph.c +++ b/drivers/gpu/drm/nouveau/nv50_graph.c @@ -401,16 +401,16 @@ static struct nouveau_pgraph_object_method nv50_graph_nvsw_methods[] = { }; struct nouveau_pgraph_object_class nv50_graph_grclass[] = { - { 0x506e, true, nv50_graph_nvsw_methods }, /* nvsw */ - { 0x0030, false, NULL }, /* null */ - { 0x5039, false, NULL }, /* m2mf */ - { 0x502d, false, NULL }, /* 2d */ - { 0x50c0, false, NULL }, /* compute */ - { 0x85c0, false, NULL }, /* compute (nva3, nva5, nva8) */ - { 0x5097, false, NULL }, /* tesla (nv50) */ - { 0x8297, false, NULL }, /* tesla (nv8x/nv9x) */ - { 0x8397, false, NULL }, /* tesla (nva0, nvaa, nvac) */ - { 0x8597, false, NULL }, /* tesla (nva3, nva5, nva8) */ + { 0x506e, NVOBJ_ENGINE_SW, nv50_graph_nvsw_methods }, /* nvsw */ + { 0x0030, NVOBJ_ENGINE_GR, NULL }, /* null */ + { 0x5039, NVOBJ_ENGINE_GR, NULL }, /* m2mf */ + { 0x502d, NVOBJ_ENGINE_GR, NULL }, /* 2d */ + { 0x50c0, NVOBJ_ENGINE_GR, NULL }, /* compute */ + { 0x85c0, NVOBJ_ENGINE_GR, NULL }, /* compute (nva3, nva5, nva8) */ + { 0x5097, NVOBJ_ENGINE_GR, NULL }, /* tesla (nv50) */ + { 0x8297, NVOBJ_ENGINE_GR, NULL }, /* tesla (nv8x/nv9x) */ + { 0x8397, NVOBJ_ENGINE_GR, NULL }, /* tesla (nva0, nvaa, nvac) */ + { 0x8597, NVOBJ_ENGINE_GR, NULL }, /* tesla (nva3, nva5, nva8) */ {} }; -- cgit v1.2.3 From a6a1a38075661bec189f2bad7912f8861e6ce357 Mon Sep 17 00:00:00 2001 From: Ben Skeggs Date: Tue, 19 Oct 2010 19:57:34 +1000 Subject: drm/nouveau: use object class structs more extensively The structs themselves, as well as the non-sw object creation function are probably very misnamed now. That's a problem for later :) Signed-off-by: Ben Skeggs --- drivers/gpu/drm/nouveau/nouveau_drv.h | 2 - drivers/gpu/drm/nouveau/nouveau_fence.c | 2 +- drivers/gpu/drm/nouveau/nouveau_object.c | 97 ++++++++++++++++---------------- drivers/gpu/drm/nouveau/nv10_graph.c | 1 + drivers/gpu/drm/nouveau/nv20_graph.c | 2 + drivers/gpu/drm/nouveau/nv40_graph.c | 1 + 6 files changed, 53 insertions(+), 52 deletions(-) diff --git a/drivers/gpu/drm/nouveau/nouveau_drv.h b/drivers/gpu/drm/nouveau/nouveau_drv.h index 2fdc26655ca1..2099f04c0b0a 100644 --- a/drivers/gpu/drm/nouveau/nouveau_drv.h +++ b/drivers/gpu/drm/nouveau/nouveau_drv.h @@ -842,8 +842,6 @@ extern int nouveau_gpuobj_gart_dma_new(struct nouveau_channel *, uint32_t *o_ret); extern int nouveau_gpuobj_gr_new(struct nouveau_channel *, int class, struct nouveau_gpuobj **); -extern int nouveau_gpuobj_sw_new(struct nouveau_channel *, int class, - struct nouveau_gpuobj **); extern int nouveau_ioctl_grobj_alloc(struct drm_device *, void *data, struct drm_file *); extern int nouveau_ioctl_gpuobj_free(struct drm_device *, void *data, diff --git a/drivers/gpu/drm/nouveau/nouveau_fence.c b/drivers/gpu/drm/nouveau/nouveau_fence.c index f70bec835f5f..75ce1b45d8a4 100644 --- a/drivers/gpu/drm/nouveau/nouveau_fence.c +++ b/drivers/gpu/drm/nouveau/nouveau_fence.c @@ -437,7 +437,7 @@ nouveau_fence_channel_init(struct nouveau_channel *chan) int ret; /* Create an NV_SW object for various sync purposes */ - ret = nouveau_gpuobj_sw_new(chan, NV_SW, &obj); + ret = nouveau_gpuobj_gr_new(chan, NV_SW, &obj); if (ret) return ret; diff --git a/drivers/gpu/drm/nouveau/nouveau_object.c b/drivers/gpu/drm/nouveau/nouveau_object.c index 70ffd7530503..9c26da4cdc00 100644 --- a/drivers/gpu/drm/nouveau/nouveau_object.c +++ b/drivers/gpu/drm/nouveau/nouveau_object.c @@ -495,23 +495,67 @@ nouveau_gpuobj_gart_dma_new(struct nouveau_channel *chan, entry[5]: set to 0? */ +static int +nouveau_gpuobj_sw_new(struct nouveau_channel *chan, int class, + struct nouveau_gpuobj **gpuobj_ret) +{ + struct drm_nouveau_private *dev_priv; + struct nouveau_gpuobj *gpuobj; + + if (!chan || !gpuobj_ret || *gpuobj_ret != NULL) + return -EINVAL; + dev_priv = chan->dev->dev_private; + + gpuobj = kzalloc(sizeof(*gpuobj), GFP_KERNEL); + if (!gpuobj) + return -ENOMEM; + gpuobj->dev = chan->dev; + gpuobj->engine = NVOBJ_ENGINE_SW; + gpuobj->class = class; + kref_init(&gpuobj->refcount); + gpuobj->cinst = 0x40; + + spin_lock(&dev_priv->ramin_lock); + list_add_tail(&gpuobj->list, &dev_priv->gpuobj_list); + spin_unlock(&dev_priv->ramin_lock); + *gpuobj_ret = gpuobj; + return 0; +} + int nouveau_gpuobj_gr_new(struct nouveau_channel *chan, int class, struct nouveau_gpuobj **gpuobj) { + struct drm_nouveau_private *dev_priv = chan->dev->dev_private; + struct nouveau_pgraph_engine *pgraph = &dev_priv->engine.graph; + struct nouveau_pgraph_object_class *grc; struct drm_device *dev = chan->dev; - struct drm_nouveau_private *dev_priv = dev->dev_private; int ret; NV_DEBUG(dev, "ch%d class=0x%04x\n", chan->id, class); + grc = pgraph->grclass; + while (grc->id) { + if (grc->id == class) + break; + grc++; + } + + if (!grc->id) { + NV_ERROR(dev, "illegal object class: 0x%x\n", class); + return -EINVAL; + } + + if (grc->engine == NVOBJ_ENGINE_SW) + return nouveau_gpuobj_sw_new(chan, class, gpuobj); + ret = nouveau_gpuobj_new(dev, chan, nouveau_gpuobj_class_instmem_size(dev, class), 16, NVOBJ_FLAG_ZERO_ALLOC | NVOBJ_FLAG_ZERO_FREE, gpuobj); if (ret) { - NV_ERROR(dev, "Error creating gpuobj: %d\n", ret); + NV_ERROR(dev, "error creating gpuobj: %d\n", ret); return ret; } @@ -541,38 +585,11 @@ nouveau_gpuobj_gr_new(struct nouveau_channel *chan, int class, } dev_priv->engine.instmem.flush(dev); - (*gpuobj)->engine = NVOBJ_ENGINE_GR; + (*gpuobj)->engine = grc->engine; (*gpuobj)->class = class; return 0; } -int -nouveau_gpuobj_sw_new(struct nouveau_channel *chan, int class, - struct nouveau_gpuobj **gpuobj_ret) -{ - struct drm_nouveau_private *dev_priv; - struct nouveau_gpuobj *gpuobj; - - if (!chan || !gpuobj_ret || *gpuobj_ret != NULL) - return -EINVAL; - dev_priv = chan->dev->dev_private; - - gpuobj = kzalloc(sizeof(*gpuobj), GFP_KERNEL); - if (!gpuobj) - return -ENOMEM; - gpuobj->dev = chan->dev; - gpuobj->engine = NVOBJ_ENGINE_SW; - gpuobj->class = class; - kref_init(&gpuobj->refcount); - gpuobj->cinst = 0x40; - - spin_lock(&dev_priv->ramin_lock); - list_add_tail(&gpuobj->list, &dev_priv->gpuobj_list); - spin_unlock(&dev_priv->ramin_lock); - *gpuobj_ret = gpuobj; - return 0; -} - static int nouveau_gpuobj_channel_init_pramin(struct nouveau_channel *chan) { @@ -868,10 +885,7 @@ nouveau_gpuobj_resume(struct drm_device *dev) int nouveau_ioctl_grobj_alloc(struct drm_device *dev, void *data, struct drm_file *file_priv) { - struct drm_nouveau_private *dev_priv = dev->dev_private; struct drm_nouveau_grobj_alloc *init = data; - struct nouveau_pgraph_engine *pgraph = &dev_priv->engine.graph; - struct nouveau_pgraph_object_class *grc; struct nouveau_gpuobj *gr = NULL; struct nouveau_channel *chan; int ret; @@ -879,18 +893,6 @@ int nouveau_ioctl_grobj_alloc(struct drm_device *dev, void *data, if (init->handle == ~0) return -EINVAL; - grc = pgraph->grclass; - while (grc->id) { - if (grc->id == init->class) - break; - grc++; - } - - if (!grc->id) { - NV_ERROR(dev, "Illegal object class: 0x%x\n", init->class); - return -EPERM; - } - chan = nouveau_channel_get(dev, file_priv, init->channel); if (IS_ERR(chan)) return PTR_ERR(chan); @@ -900,10 +902,7 @@ int nouveau_ioctl_grobj_alloc(struct drm_device *dev, void *data, goto out; } - if (grc->engine != NVOBJ_ENGINE_SW) - ret = nouveau_gpuobj_gr_new(chan, grc->id, &gr); - else - ret = nouveau_gpuobj_sw_new(chan, grc->id, &gr); + ret = nouveau_gpuobj_gr_new(chan, init->class, &gr); if (ret) { NV_ERROR(dev, "Error creating object: %d (%d/0x%08x)\n", ret, init->channel, init->handle); diff --git a/drivers/gpu/drm/nouveau/nv10_graph.c b/drivers/gpu/drm/nouveau/nv10_graph.c index 375d63161d12..ed31a622889e 100644 --- a/drivers/gpu/drm/nouveau/nv10_graph.c +++ b/drivers/gpu/drm/nouveau/nv10_graph.c @@ -1075,6 +1075,7 @@ static struct nouveau_pgraph_object_method nv17_graph_celsius_mthds[] = { }; struct nouveau_pgraph_object_class nv10_graph_grclass[] = { + { 0x506e, NVOBJ_ENGINE_SW, NULL }, /* nvsw */ { 0x0030, NVOBJ_ENGINE_GR, NULL }, /* null */ { 0x0039, NVOBJ_ENGINE_GR, NULL }, /* m2mf */ { 0x004a, NVOBJ_ENGINE_GR, NULL }, /* gdirect */ diff --git a/drivers/gpu/drm/nouveau/nv20_graph.c b/drivers/gpu/drm/nouveau/nv20_graph.c index 109418d72f93..872f8d059694 100644 --- a/drivers/gpu/drm/nouveau/nv20_graph.c +++ b/drivers/gpu/drm/nouveau/nv20_graph.c @@ -757,6 +757,7 @@ nv30_graph_init(struct drm_device *dev) } struct nouveau_pgraph_object_class nv20_graph_grclass[] = { + { 0x506e, NVOBJ_ENGINE_SW, NULL }, /* nvsw */ { 0x0030, NVOBJ_ENGINE_GR, NULL }, /* null */ { 0x0039, NVOBJ_ENGINE_GR, NULL }, /* m2mf */ { 0x004a, NVOBJ_ENGINE_GR, NULL }, /* gdirect */ @@ -777,6 +778,7 @@ struct nouveau_pgraph_object_class nv20_graph_grclass[] = { }; struct nouveau_pgraph_object_class nv30_graph_grclass[] = { + { 0x506e, NVOBJ_ENGINE_SW, NULL }, /* nvsw */ { 0x0030, NVOBJ_ENGINE_GR, NULL }, /* null */ { 0x0039, NVOBJ_ENGINE_GR, NULL }, /* m2mf */ { 0x004a, NVOBJ_ENGINE_GR, NULL }, /* gdirect */ diff --git a/drivers/gpu/drm/nouveau/nv40_graph.c b/drivers/gpu/drm/nouveau/nv40_graph.c index cd47760b2d0a..70d957895cea 100644 --- a/drivers/gpu/drm/nouveau/nv40_graph.c +++ b/drivers/gpu/drm/nouveau/nv40_graph.c @@ -409,6 +409,7 @@ void nv40_graph_takedown(struct drm_device *dev) } struct nouveau_pgraph_object_class nv40_graph_grclass[] = { + { 0x506e, NVOBJ_ENGINE_SW, NULL }, /* nvsw */ { 0x0030, NVOBJ_ENGINE_GR, NULL }, /* null */ { 0x0039, NVOBJ_ENGINE_GR, NULL }, /* m2mf */ { 0x004a, NVOBJ_ENGINE_GR, NULL }, /* gdirect */ -- cgit v1.2.3 From b8c157d3a9a13871742c8a8d3d4598c3791ed5f5 Mon Sep 17 00:00:00 2001 From: Ben Skeggs Date: Wed, 20 Oct 2010 10:39:35 +1000 Subject: drm/nouveau: only expose the object classes that are supported by the chipset We previously added all the available classes for the entire generation, even though the objects wouldn't work on the hardware. Signed-off-by: Ben Skeggs --- drivers/gpu/drm/nouveau/nouveau_drv.h | 38 +-- drivers/gpu/drm/nouveau/nouveau_irq.c | 36 +-- drivers/gpu/drm/nouveau/nouveau_object.c | 112 ++++++- drivers/gpu/drm/nouveau/nouveau_state.c | 7 - drivers/gpu/drm/nouveau/nv04_graph.c | 530 ++++++++++++++++++------------- drivers/gpu/drm/nouveau/nv10_graph.c | 92 +++--- drivers/gpu/drm/nouveau/nv20_graph.c | 131 +++++--- drivers/gpu/drm/nouveau/nv40_graph.c | 63 ++-- drivers/gpu/drm/nouveau/nv50_graph.c | 91 ++++-- 9 files changed, 667 insertions(+), 433 deletions(-) diff --git a/drivers/gpu/drm/nouveau/nouveau_drv.h b/drivers/gpu/drm/nouveau/nouveau_drv.h index 2099f04c0b0a..3fb87995446b 100644 --- a/drivers/gpu/drm/nouveau/nouveau_drv.h +++ b/drivers/gpu/drm/nouveau/nouveau_drv.h @@ -316,21 +316,9 @@ struct nouveau_fifo_engine { void (*tlb_flush)(struct drm_device *dev); }; -struct nouveau_pgraph_object_method { - int id; - int (*exec)(struct nouveau_channel *chan, int grclass, int mthd, - uint32_t data); -}; - -struct nouveau_pgraph_object_class { - int id; - u32 engine; - struct nouveau_pgraph_object_method *methods; -}; - struct nouveau_pgraph_engine { - struct nouveau_pgraph_object_class *grclass; bool accel_blocked; + bool registered; int grctx_size; /* NV2x/NV3x context table (0x400780) */ @@ -584,6 +572,7 @@ struct drm_nouveau_private { bool ramin_available; struct drm_mm ramin_heap; struct list_head gpuobj_list; + struct list_head classes; struct nouveau_bo *vga_ram; @@ -816,12 +805,29 @@ extern void nouveau_channel_ref(struct nouveau_channel *chan, struct nouveau_channel **pchan); /* nouveau_object.c */ +#define NVOBJ_CLASS(d,c,e) do { \ + int ret = nouveau_gpuobj_class_new((d), (c), NVOBJ_ENGINE_##e); \ + if (ret) \ + return ret; \ +} while(0) + +#define NVOBJ_MTHD(d,c,m,e) do { \ + int ret = nouveau_gpuobj_mthd_new((d), (c), (m), (e)); \ + if (ret) \ + return ret; \ +} while(0) + extern int nouveau_gpuobj_early_init(struct drm_device *); extern int nouveau_gpuobj_init(struct drm_device *); extern void nouveau_gpuobj_takedown(struct drm_device *); extern int nouveau_gpuobj_suspend(struct drm_device *dev); extern void nouveau_gpuobj_suspend_cleanup(struct drm_device *dev); extern void nouveau_gpuobj_resume(struct drm_device *dev); +extern int nouveau_gpuobj_class_new(struct drm_device *, u32 class, u32 eng); +extern int nouveau_gpuobj_mthd_new(struct drm_device *, u32 class, u32 mthd, + int (*exec)(struct nouveau_channel *, + u32 class, u32 mthd, u32 data)); +extern int nouveau_gpuobj_mthd_call(struct nouveau_channel *, u32, u32, u32); extern int nouveau_gpuobj_channel_init(struct nouveau_channel *, uint32_t vram_h, uint32_t tt_h); extern void nouveau_gpuobj_channel_takedown(struct nouveau_channel *); @@ -1038,7 +1044,6 @@ extern int nvc0_fifo_load_context(struct nouveau_channel *); extern int nvc0_fifo_unload_context(struct drm_device *); /* nv04_graph.c */ -extern struct nouveau_pgraph_object_class nv04_graph_grclass[]; extern int nv04_graph_init(struct drm_device *); extern void nv04_graph_takedown(struct drm_device *); extern void nv04_graph_fifo_access(struct drm_device *, bool); @@ -1050,7 +1055,6 @@ extern int nv04_graph_unload_context(struct drm_device *); extern void nv04_graph_context_switch(struct drm_device *); /* nv10_graph.c */ -extern struct nouveau_pgraph_object_class nv10_graph_grclass[]; extern int nv10_graph_init(struct drm_device *); extern void nv10_graph_takedown(struct drm_device *); extern struct nouveau_channel *nv10_graph_channel(struct drm_device *); @@ -1063,8 +1067,6 @@ extern void nv10_graph_set_region_tiling(struct drm_device *, int, uint32_t, uint32_t, uint32_t); /* nv20_graph.c */ -extern struct nouveau_pgraph_object_class nv20_graph_grclass[]; -extern struct nouveau_pgraph_object_class nv30_graph_grclass[]; extern int nv20_graph_create_context(struct nouveau_channel *); extern void nv20_graph_destroy_context(struct nouveau_channel *); extern int nv20_graph_load_context(struct nouveau_channel *); @@ -1076,7 +1078,6 @@ extern void nv20_graph_set_region_tiling(struct drm_device *, int, uint32_t, uint32_t, uint32_t); /* nv40_graph.c */ -extern struct nouveau_pgraph_object_class nv40_graph_grclass[]; extern int nv40_graph_init(struct drm_device *); extern void nv40_graph_takedown(struct drm_device *); extern struct nouveau_channel *nv40_graph_channel(struct drm_device *); @@ -1089,7 +1090,6 @@ extern void nv40_graph_set_region_tiling(struct drm_device *, int, uint32_t, uint32_t, uint32_t); /* nv50_graph.c */ -extern struct nouveau_pgraph_object_class nv50_graph_grclass[]; extern int nv50_graph_init(struct drm_device *); extern void nv50_graph_takedown(struct drm_device *); extern void nv50_graph_fifo_access(struct drm_device *, bool); diff --git a/drivers/gpu/drm/nouveau/nouveau_irq.c b/drivers/gpu/drm/nouveau/nouveau_irq.c index c5e37bc17192..f09151d17297 100644 --- a/drivers/gpu/drm/nouveau/nouveau_irq.c +++ b/drivers/gpu/drm/nouveau/nouveau_irq.c @@ -80,33 +80,6 @@ nouveau_irq_uninstall(struct drm_device *dev) nv_wr32(dev, NV03_PMC_INTR_EN_0, 0); } -static int -nouveau_call_method(struct nouveau_channel *chan, int class, int mthd, int data) -{ - struct drm_nouveau_private *dev_priv = chan->dev->dev_private; - struct nouveau_pgraph_object_method *grm; - struct nouveau_pgraph_object_class *grc; - - grc = dev_priv->engine.graph.grclass; - while (grc->id) { - if (grc->id == class) - break; - grc++; - } - - if (grc->id != class || !grc->methods) - return -ENOENT; - - grm = grc->methods; - while (grm->id) { - if (grm->id == mthd) - return grm->exec(chan, class, mthd, data); - grm++; - } - - return -ENOENT; -} - static bool nouveau_fifo_swmthd(struct drm_device *dev, u32 chid, u32 addr, u32 data) { @@ -142,8 +115,8 @@ nouveau_fifo_swmthd(struct drm_device *dev, u32 chid, u32 addr, u32 data) if (unlikely(((engine >> (subc * 4)) & 0xf) != 0)) break; - if (!nouveau_call_method(chan, chan->sw_subchannel[subc], - mthd, data)) + if (!nouveau_gpuobj_mthd_call(chan, chan->sw_subchannel[subc], + mthd, data)) handled = true; break; } @@ -541,6 +514,7 @@ nouveau_pgraph_intr_swmthd(struct drm_device *dev, struct nouveau_pgraph_trap *trap) { struct drm_nouveau_private *dev_priv = dev->dev_private; + struct nouveau_channel *chan; unsigned long flags; int ret = -EINVAL; @@ -548,8 +522,8 @@ nouveau_pgraph_intr_swmthd(struct drm_device *dev, if (trap->channel > 0 && trap->channel < dev_priv->engine.fifo.channels && dev_priv->channels.ptr[trap->channel]) { - ret = nouveau_call_method(dev_priv->channels.ptr[trap->channel], - trap->class, trap->mthd, trap->data); + chan = dev_priv->channels.ptr[trap->channel]; + ret = nouveau_gpuobj_mthd_call(chan, trap->class, trap->mthd, trap->data); } spin_unlock_irqrestore(&dev_priv->channels.lock, flags); diff --git a/drivers/gpu/drm/nouveau/nouveau_object.c b/drivers/gpu/drm/nouveau/nouveau_object.c index 9c26da4cdc00..6226beb3613d 100644 --- a/drivers/gpu/drm/nouveau/nouveau_object.c +++ b/drivers/gpu/drm/nouveau/nouveau_object.c @@ -36,6 +36,83 @@ #include "nouveau_drm.h" #include "nouveau_ramht.h" +struct nouveau_gpuobj_method { + struct list_head head; + u32 mthd; + int (*exec)(struct nouveau_channel *, u32 class, u32 mthd, u32 data); +}; + +struct nouveau_gpuobj_class { + struct list_head head; + struct list_head methods; + u32 id; + u32 engine; +}; + +int +nouveau_gpuobj_class_new(struct drm_device *dev, u32 class, u32 engine) +{ + struct drm_nouveau_private *dev_priv = dev->dev_private; + struct nouveau_gpuobj_class *oc; + + oc = kzalloc(sizeof(*oc), GFP_KERNEL); + if (!oc) + return -ENOMEM; + + INIT_LIST_HEAD(&oc->methods); + oc->id = class; + oc->engine = engine; + list_add(&oc->head, &dev_priv->classes); + return 0; +} + +int +nouveau_gpuobj_mthd_new(struct drm_device *dev, u32 class, u32 mthd, + int (*exec)(struct nouveau_channel *, u32, u32, u32)) +{ + struct drm_nouveau_private *dev_priv = dev->dev_private; + struct nouveau_gpuobj_method *om; + struct nouveau_gpuobj_class *oc; + + list_for_each_entry(oc, &dev_priv->classes, head) { + if (oc->id == class) + goto found; + } + + return -EINVAL; + +found: + om = kzalloc(sizeof(*om), GFP_KERNEL); + if (!om) + return -ENOMEM; + + om->mthd = mthd; + om->exec = exec; + list_add(&om->head, &oc->methods); + return 0; +} + +int +nouveau_gpuobj_mthd_call(struct nouveau_channel *chan, + u32 class, u32 mthd, u32 data) +{ + struct drm_nouveau_private *dev_priv = chan->dev->dev_private; + struct nouveau_gpuobj_method *om; + struct nouveau_gpuobj_class *oc; + + list_for_each_entry(oc, &dev_priv->classes, head) { + if (oc->id != class) + continue; + + list_for_each_entry(om, &oc->methods, head) { + if (om->mthd == mthd) + return om->exec(chan, class, mthd, data); + } + } + + return -ENOENT; +} + /* NVidia uses context objects to drive drawing operations. Context objects can be selected into 8 subchannels in the FIFO, @@ -205,9 +282,20 @@ void nouveau_gpuobj_takedown(struct drm_device *dev) { struct drm_nouveau_private *dev_priv = dev->dev_private; + struct nouveau_gpuobj_method *om, *tm; + struct nouveau_gpuobj_class *oc, *tc; NV_DEBUG(dev, "\n"); + list_for_each_entry_safe(oc, tc, &dev_priv->classes, head) { + list_for_each_entry_safe(om, tm, &oc->methods, head) { + list_del(&om->head); + kfree(om); + } + list_del(&oc->head); + kfree(oc); + } + BUG_ON(!list_empty(&dev_priv->gpuobj_list)); } @@ -527,26 +615,22 @@ nouveau_gpuobj_gr_new(struct nouveau_channel *chan, int class, struct nouveau_gpuobj **gpuobj) { struct drm_nouveau_private *dev_priv = chan->dev->dev_private; - struct nouveau_pgraph_engine *pgraph = &dev_priv->engine.graph; - struct nouveau_pgraph_object_class *grc; struct drm_device *dev = chan->dev; + struct nouveau_gpuobj_class *oc; int ret; NV_DEBUG(dev, "ch%d class=0x%04x\n", chan->id, class); - grc = pgraph->grclass; - while (grc->id) { - if (grc->id == class) - break; - grc++; + list_for_each_entry(oc, &dev_priv->classes, head) { + if (oc->id == class) + goto found; } - if (!grc->id) { - NV_ERROR(dev, "illegal object class: 0x%x\n", class); - return -EINVAL; - } + NV_ERROR(dev, "illegal object class: 0x%x\n", class); + return -EINVAL; - if (grc->engine == NVOBJ_ENGINE_SW) +found: + if (oc->engine == NVOBJ_ENGINE_SW) return nouveau_gpuobj_sw_new(chan, class, gpuobj); ret = nouveau_gpuobj_new(dev, chan, @@ -585,8 +669,8 @@ nouveau_gpuobj_gr_new(struct nouveau_channel *chan, int class, } dev_priv->engine.instmem.flush(dev); - (*gpuobj)->engine = grc->engine; - (*gpuobj)->class = class; + (*gpuobj)->engine = oc->engine; + (*gpuobj)->class = oc->id; return 0; } diff --git a/drivers/gpu/drm/nouveau/nouveau_state.c b/drivers/gpu/drm/nouveau/nouveau_state.c index 82b58188398b..be28754ffd50 100644 --- a/drivers/gpu/drm/nouveau/nouveau_state.c +++ b/drivers/gpu/drm/nouveau/nouveau_state.c @@ -65,7 +65,6 @@ static int nouveau_init_engine_ptrs(struct drm_device *dev) engine->timer.takedown = nv04_timer_takedown; engine->fb.init = nv04_fb_init; engine->fb.takedown = nv04_fb_takedown; - engine->graph.grclass = nv04_graph_grclass; engine->graph.init = nv04_graph_init; engine->graph.takedown = nv04_graph_takedown; engine->graph.fifo_access = nv04_graph_fifo_access; @@ -118,7 +117,6 @@ static int nouveau_init_engine_ptrs(struct drm_device *dev) engine->fb.init = nv10_fb_init; engine->fb.takedown = nv10_fb_takedown; engine->fb.set_region_tiling = nv10_fb_set_region_tiling; - engine->graph.grclass = nv10_graph_grclass; engine->graph.init = nv10_graph_init; engine->graph.takedown = nv10_graph_takedown; engine->graph.channel = nv10_graph_channel; @@ -172,7 +170,6 @@ static int nouveau_init_engine_ptrs(struct drm_device *dev) engine->fb.init = nv10_fb_init; engine->fb.takedown = nv10_fb_takedown; engine->fb.set_region_tiling = nv10_fb_set_region_tiling; - engine->graph.grclass = nv20_graph_grclass; engine->graph.init = nv20_graph_init; engine->graph.takedown = nv20_graph_takedown; engine->graph.channel = nv10_graph_channel; @@ -226,7 +223,6 @@ static int nouveau_init_engine_ptrs(struct drm_device *dev) engine->fb.init = nv30_fb_init; engine->fb.takedown = nv30_fb_takedown; engine->fb.set_region_tiling = nv10_fb_set_region_tiling; - engine->graph.grclass = nv30_graph_grclass; engine->graph.init = nv30_graph_init; engine->graph.takedown = nv20_graph_takedown; engine->graph.fifo_access = nv04_graph_fifo_access; @@ -283,7 +279,6 @@ static int nouveau_init_engine_ptrs(struct drm_device *dev) engine->fb.init = nv40_fb_init; engine->fb.takedown = nv40_fb_takedown; engine->fb.set_region_tiling = nv40_fb_set_region_tiling; - engine->graph.grclass = nv40_graph_grclass; engine->graph.init = nv40_graph_init; engine->graph.takedown = nv40_graph_takedown; engine->graph.fifo_access = nv04_graph_fifo_access; @@ -345,7 +340,6 @@ static int nouveau_init_engine_ptrs(struct drm_device *dev) engine->timer.takedown = nv04_timer_takedown; engine->fb.init = nv50_fb_init; engine->fb.takedown = nv50_fb_takedown; - engine->graph.grclass = nv50_graph_grclass; engine->graph.init = nv50_graph_init; engine->graph.takedown = nv50_graph_takedown; engine->graph.fifo_access = nv50_graph_fifo_access; @@ -424,7 +418,6 @@ static int nouveau_init_engine_ptrs(struct drm_device *dev) engine->timer.takedown = nv04_timer_takedown; engine->fb.init = nvc0_fb_init; engine->fb.takedown = nvc0_fb_takedown; - engine->graph.grclass = NULL; //nvc0_graph_grclass; engine->graph.init = nvc0_graph_init; engine->graph.takedown = nvc0_graph_takedown; engine->graph.fifo_access = nvc0_graph_fifo_access; diff --git a/drivers/gpu/drm/nouveau/nv04_graph.c b/drivers/gpu/drm/nouveau/nv04_graph.c index 5d8ab1b6b04a..81aba097ef95 100644 --- a/drivers/gpu/drm/nouveau/nv04_graph.c +++ b/drivers/gpu/drm/nouveau/nv04_graph.c @@ -27,6 +27,8 @@ #include "nouveau_drm.h" #include "nouveau_drv.h" +static int nv04_graph_register(struct drm_device *dev); + static uint32_t nv04_graph_ctx_regs[] = { 0x0040053c, 0x00400544, @@ -483,12 +485,17 @@ int nv04_graph_init(struct drm_device *dev) { struct drm_nouveau_private *dev_priv = dev->dev_private; uint32_t tmp; + int ret; nv_wr32(dev, NV03_PMC_ENABLE, nv_rd32(dev, NV03_PMC_ENABLE) & ~NV_PMC_ENABLE_PGRAPH); nv_wr32(dev, NV03_PMC_ENABLE, nv_rd32(dev, NV03_PMC_ENABLE) | NV_PMC_ENABLE_PGRAPH); + ret = nv04_graph_register(dev); + if (ret) + return ret; + /* Enable PGRAPH interrupts */ nv_wr32(dev, NV03_PGRAPH_INTR, 0xFFFFFFFF); nv_wr32(dev, NV03_PGRAPH_INTR_EN, 0xFFFFFFFF); @@ -539,8 +546,8 @@ nv04_graph_fifo_access(struct drm_device *dev, bool enabled) } static int -nv04_graph_mthd_set_ref(struct nouveau_channel *chan, int grclass, - int mthd, uint32_t data) +nv04_graph_mthd_set_ref(struct nouveau_channel *chan, + u32 class, u32 mthd, u32 data) { atomic_set(&chan->fence.last_sequence_irq, data); return 0; @@ -621,12 +628,12 @@ nv04_graph_mthd_set_ref(struct nouveau_channel *chan, int grclass, */ static void -nv04_graph_set_ctx1(struct nouveau_channel *chan, uint32_t mask, uint32_t value) +nv04_graph_set_ctx1(struct nouveau_channel *chan, u32 mask, u32 value) { struct drm_device *dev = chan->dev; - uint32_t instance = (nv_rd32(dev, NV04_PGRAPH_CTX_SWITCH4) & 0xffff) << 4; + u32 instance = (nv_rd32(dev, NV04_PGRAPH_CTX_SWITCH4) & 0xffff) << 4; int subc = (nv_rd32(dev, NV04_PGRAPH_TRAPPED_ADDR) >> 13) & 0x7; - uint32_t tmp; + u32 tmp; tmp = nv_ri32(dev, instance); tmp &= ~mask; @@ -638,11 +645,11 @@ nv04_graph_set_ctx1(struct nouveau_channel *chan, uint32_t mask, uint32_t value) } static void -nv04_graph_set_ctx_val(struct nouveau_channel *chan, uint32_t mask, uint32_t value) +nv04_graph_set_ctx_val(struct nouveau_channel *chan, u32 mask, u32 value) { struct drm_device *dev = chan->dev; - uint32_t instance = (nv_rd32(dev, NV04_PGRAPH_CTX_SWITCH4) & 0xffff) << 4; - uint32_t tmp, ctx1; + u32 instance = (nv_rd32(dev, NV04_PGRAPH_CTX_SWITCH4) & 0xffff) << 4; + u32 tmp, ctx1; int class, op, valid = 1; ctx1 = nv_ri32(dev, instance); @@ -687,13 +694,13 @@ nv04_graph_set_ctx_val(struct nouveau_channel *chan, uint32_t mask, uint32_t val } static int -nv04_graph_mthd_set_operation(struct nouveau_channel *chan, int grclass, - int mthd, uint32_t data) +nv04_graph_mthd_set_operation(struct nouveau_channel *chan, + u32 class, u32 mthd, u32 data) { if (data > 5) return 1; /* Old versions of the objects only accept first three operations. */ - if (data > 2 && grclass < 0x40) + if (data > 2 && class < 0x40) return 1; nv04_graph_set_ctx1(chan, 0x00038000, data << 15); /* changing operation changes set of objects needed for validation */ @@ -702,8 +709,8 @@ nv04_graph_mthd_set_operation(struct nouveau_channel *chan, int grclass, } static int -nv04_graph_mthd_surf3d_clip_h(struct nouveau_channel *chan, int grclass, - int mthd, uint32_t data) +nv04_graph_mthd_surf3d_clip_h(struct nouveau_channel *chan, + u32 class, u32 mthd, u32 data) { uint32_t min = data & 0xffff, max; uint32_t w = data >> 16; @@ -721,8 +728,8 @@ nv04_graph_mthd_surf3d_clip_h(struct nouveau_channel *chan, int grclass, } static int -nv04_graph_mthd_surf3d_clip_v(struct nouveau_channel *chan, int grclass, - int mthd, uint32_t data) +nv04_graph_mthd_surf3d_clip_v(struct nouveau_channel *chan, + u32 class, u32 mthd, u32 data) { uint32_t min = data & 0xffff, max; uint32_t w = data >> 16; @@ -740,8 +747,8 @@ nv04_graph_mthd_surf3d_clip_v(struct nouveau_channel *chan, int grclass, } static int -nv04_graph_mthd_bind_surf2d(struct nouveau_channel *chan, int grclass, - int mthd, uint32_t data) +nv04_graph_mthd_bind_surf2d(struct nouveau_channel *chan, + u32 class, u32 mthd, u32 data) { switch (nv_ri32(chan->dev, data << 4) & 0xff) { case 0x30: @@ -757,8 +764,8 @@ nv04_graph_mthd_bind_surf2d(struct nouveau_channel *chan, int grclass, } static int -nv04_graph_mthd_bind_surf2d_swzsurf(struct nouveau_channel *chan, int grclass, - int mthd, uint32_t data) +nv04_graph_mthd_bind_surf2d_swzsurf(struct nouveau_channel *chan, + u32 class, u32 mthd, u32 data) { switch (nv_ri32(chan->dev, data << 4) & 0xff) { case 0x30: @@ -778,8 +785,8 @@ nv04_graph_mthd_bind_surf2d_swzsurf(struct nouveau_channel *chan, int grclass, } static int -nv04_graph_mthd_bind_nv01_patt(struct nouveau_channel *chan, int grclass, - int mthd, uint32_t data) +nv04_graph_mthd_bind_nv01_patt(struct nouveau_channel *chan, + u32 class, u32 mthd, u32 data) { switch (nv_ri32(chan->dev, data << 4) & 0xff) { case 0x30: @@ -793,8 +800,8 @@ nv04_graph_mthd_bind_nv01_patt(struct nouveau_channel *chan, int grclass, } static int -nv04_graph_mthd_bind_nv04_patt(struct nouveau_channel *chan, int grclass, - int mthd, uint32_t data) +nv04_graph_mthd_bind_nv04_patt(struct nouveau_channel *chan, + u32 class, u32 mthd, u32 data) { switch (nv_ri32(chan->dev, data << 4) & 0xff) { case 0x30: @@ -808,8 +815,8 @@ nv04_graph_mthd_bind_nv04_patt(struct nouveau_channel *chan, int grclass, } static int -nv04_graph_mthd_bind_rop(struct nouveau_channel *chan, int grclass, - int mthd, uint32_t data) +nv04_graph_mthd_bind_rop(struct nouveau_channel *chan, + u32 class, u32 mthd, u32 data) { switch (nv_ri32(chan->dev, data << 4) & 0xff) { case 0x30: @@ -823,8 +830,8 @@ nv04_graph_mthd_bind_rop(struct nouveau_channel *chan, int grclass, } static int -nv04_graph_mthd_bind_beta1(struct nouveau_channel *chan, int grclass, - int mthd, uint32_t data) +nv04_graph_mthd_bind_beta1(struct nouveau_channel *chan, + u32 class, u32 mthd, u32 data) { switch (nv_ri32(chan->dev, data << 4) & 0xff) { case 0x30: @@ -838,8 +845,8 @@ nv04_graph_mthd_bind_beta1(struct nouveau_channel *chan, int grclass, } static int -nv04_graph_mthd_bind_beta4(struct nouveau_channel *chan, int grclass, - int mthd, uint32_t data) +nv04_graph_mthd_bind_beta4(struct nouveau_channel *chan, + u32 class, u32 mthd, u32 data) { switch (nv_ri32(chan->dev, data << 4) & 0xff) { case 0x30: @@ -853,8 +860,8 @@ nv04_graph_mthd_bind_beta4(struct nouveau_channel *chan, int grclass, } static int -nv04_graph_mthd_bind_surf_dst(struct nouveau_channel *chan, int grclass, - int mthd, uint32_t data) +nv04_graph_mthd_bind_surf_dst(struct nouveau_channel *chan, + u32 class, u32 mthd, u32 data) { switch (nv_ri32(chan->dev, data << 4) & 0xff) { case 0x30: @@ -868,8 +875,8 @@ nv04_graph_mthd_bind_surf_dst(struct nouveau_channel *chan, int grclass, } static int -nv04_graph_mthd_bind_surf_src(struct nouveau_channel *chan, int grclass, - int mthd, uint32_t data) +nv04_graph_mthd_bind_surf_src(struct nouveau_channel *chan, + u32 class, u32 mthd, u32 data) { switch (nv_ri32(chan->dev, data << 4) & 0xff) { case 0x30: @@ -883,8 +890,8 @@ nv04_graph_mthd_bind_surf_src(struct nouveau_channel *chan, int grclass, } static int -nv04_graph_mthd_bind_surf_color(struct nouveau_channel *chan, int grclass, - int mthd, uint32_t data) +nv04_graph_mthd_bind_surf_color(struct nouveau_channel *chan, + u32 class, u32 mthd, u32 data) { switch (nv_ri32(chan->dev, data << 4) & 0xff) { case 0x30: @@ -898,8 +905,8 @@ nv04_graph_mthd_bind_surf_color(struct nouveau_channel *chan, int grclass, } static int -nv04_graph_mthd_bind_surf_zeta(struct nouveau_channel *chan, int grclass, - int mthd, uint32_t data) +nv04_graph_mthd_bind_surf_zeta(struct nouveau_channel *chan, + u32 class, u32 mthd, u32 data) { switch (nv_ri32(chan->dev, data << 4) & 0xff) { case 0x30: @@ -913,8 +920,8 @@ nv04_graph_mthd_bind_surf_zeta(struct nouveau_channel *chan, int grclass, } static int -nv04_graph_mthd_bind_clip(struct nouveau_channel *chan, int grclass, - int mthd, uint32_t data) +nv04_graph_mthd_bind_clip(struct nouveau_channel *chan, + u32 class, u32 mthd, u32 data) { switch (nv_ri32(chan->dev, data << 4) & 0xff) { case 0x30: @@ -928,8 +935,8 @@ nv04_graph_mthd_bind_clip(struct nouveau_channel *chan, int grclass, } static int -nv04_graph_mthd_bind_chroma(struct nouveau_channel *chan, int grclass, - int mthd, uint32_t data) +nv04_graph_mthd_bind_chroma(struct nouveau_channel *chan, + u32 class, u32 mthd, u32 data) { switch (nv_ri32(chan->dev, data << 4) & 0xff) { case 0x30: @@ -945,194 +952,259 @@ nv04_graph_mthd_bind_chroma(struct nouveau_channel *chan, int grclass, return 1; } -static struct nouveau_pgraph_object_method nv04_graph_mthds_sw[] = { - { 0x0150, nv04_graph_mthd_set_ref }, - {} -}; - -static struct nouveau_pgraph_object_method nv04_graph_mthds_nv03_gdirect[] = { - { 0x0184, nv04_graph_mthd_bind_nv01_patt }, - { 0x0188, nv04_graph_mthd_bind_rop }, - { 0x018c, nv04_graph_mthd_bind_beta1 }, - { 0x0190, nv04_graph_mthd_bind_surf_dst }, - { 0x02fc, nv04_graph_mthd_set_operation }, - {}, -}; - -static struct nouveau_pgraph_object_method nv04_graph_mthds_nv04_gdirect[] = { - { 0x0188, nv04_graph_mthd_bind_nv04_patt }, - { 0x018c, nv04_graph_mthd_bind_rop }, - { 0x0190, nv04_graph_mthd_bind_beta1 }, - { 0x0194, nv04_graph_mthd_bind_beta4 }, - { 0x0198, nv04_graph_mthd_bind_surf2d }, - { 0x02fc, nv04_graph_mthd_set_operation }, - {}, -}; - -static struct nouveau_pgraph_object_method nv04_graph_mthds_nv01_imageblit[] = { - { 0x0184, nv04_graph_mthd_bind_chroma }, - { 0x0188, nv04_graph_mthd_bind_clip }, - { 0x018c, nv04_graph_mthd_bind_nv01_patt }, - { 0x0190, nv04_graph_mthd_bind_rop }, - { 0x0194, nv04_graph_mthd_bind_beta1 }, - { 0x0198, nv04_graph_mthd_bind_surf_dst }, - { 0x019c, nv04_graph_mthd_bind_surf_src }, - { 0x02fc, nv04_graph_mthd_set_operation }, - {}, -}; - -static struct nouveau_pgraph_object_method nv04_graph_mthds_nv04_imageblit_ifc[] = { - { 0x0184, nv04_graph_mthd_bind_chroma }, - { 0x0188, nv04_graph_mthd_bind_clip }, - { 0x018c, nv04_graph_mthd_bind_nv04_patt }, - { 0x0190, nv04_graph_mthd_bind_rop }, - { 0x0194, nv04_graph_mthd_bind_beta1 }, - { 0x0198, nv04_graph_mthd_bind_beta4 }, - { 0x019c, nv04_graph_mthd_bind_surf2d }, - { 0x02fc, nv04_graph_mthd_set_operation }, - {}, -}; - -static struct nouveau_pgraph_object_method nv04_graph_mthds_nv04_iifc[] = { - { 0x0188, nv04_graph_mthd_bind_chroma }, - { 0x018c, nv04_graph_mthd_bind_clip }, - { 0x0190, nv04_graph_mthd_bind_nv04_patt }, - { 0x0194, nv04_graph_mthd_bind_rop }, - { 0x0198, nv04_graph_mthd_bind_beta1 }, - { 0x019c, nv04_graph_mthd_bind_beta4 }, - { 0x01a0, nv04_graph_mthd_bind_surf2d_swzsurf }, - { 0x03e4, nv04_graph_mthd_set_operation }, - {}, -}; - -static struct nouveau_pgraph_object_method nv04_graph_mthds_nv01_ifc[] = { - { 0x0184, nv04_graph_mthd_bind_chroma }, - { 0x0188, nv04_graph_mthd_bind_clip }, - { 0x018c, nv04_graph_mthd_bind_nv01_patt }, - { 0x0190, nv04_graph_mthd_bind_rop }, - { 0x0194, nv04_graph_mthd_bind_beta1 }, - { 0x0198, nv04_graph_mthd_bind_surf_dst }, - { 0x02fc, nv04_graph_mthd_set_operation }, - {}, -}; - -static struct nouveau_pgraph_object_method nv04_graph_mthds_nv03_sifc[] = { - { 0x0184, nv04_graph_mthd_bind_chroma }, - { 0x0188, nv04_graph_mthd_bind_nv01_patt }, - { 0x018c, nv04_graph_mthd_bind_rop }, - { 0x0190, nv04_graph_mthd_bind_beta1 }, - { 0x0194, nv04_graph_mthd_bind_surf_dst }, - { 0x02fc, nv04_graph_mthd_set_operation }, - {}, -}; - -static struct nouveau_pgraph_object_method nv04_graph_mthds_nv04_sifc[] = { - { 0x0184, nv04_graph_mthd_bind_chroma }, - { 0x0188, nv04_graph_mthd_bind_nv04_patt }, - { 0x018c, nv04_graph_mthd_bind_rop }, - { 0x0190, nv04_graph_mthd_bind_beta1 }, - { 0x0194, nv04_graph_mthd_bind_beta4 }, - { 0x0198, nv04_graph_mthd_bind_surf2d }, - { 0x02fc, nv04_graph_mthd_set_operation }, - {}, -}; - -static struct nouveau_pgraph_object_method nv04_graph_mthds_nv03_sifm[] = { - { 0x0188, nv04_graph_mthd_bind_nv01_patt }, - { 0x018c, nv04_graph_mthd_bind_rop }, - { 0x0190, nv04_graph_mthd_bind_beta1 }, - { 0x0194, nv04_graph_mthd_bind_surf_dst }, - { 0x0304, nv04_graph_mthd_set_operation }, - {}, -}; - -static struct nouveau_pgraph_object_method nv04_graph_mthds_nv04_sifm[] = { - { 0x0188, nv04_graph_mthd_bind_nv04_patt }, - { 0x018c, nv04_graph_mthd_bind_rop }, - { 0x0190, nv04_graph_mthd_bind_beta1 }, - { 0x0194, nv04_graph_mthd_bind_beta4 }, - { 0x0198, nv04_graph_mthd_bind_surf2d_swzsurf }, - { 0x0304, nv04_graph_mthd_set_operation }, - {}, -}; - -static struct nouveau_pgraph_object_method nv04_graph_mthds_nv01_shape[] = { - { 0x0184, nv04_graph_mthd_bind_clip }, - { 0x0188, nv04_graph_mthd_bind_nv01_patt }, - { 0x018c, nv04_graph_mthd_bind_rop }, - { 0x0190, nv04_graph_mthd_bind_beta1 }, - { 0x0194, nv04_graph_mthd_bind_surf_dst }, - { 0x02fc, nv04_graph_mthd_set_operation }, - {}, -}; - -static struct nouveau_pgraph_object_method nv04_graph_mthds_nv04_shape[] = { - { 0x0184, nv04_graph_mthd_bind_clip }, - { 0x0188, nv04_graph_mthd_bind_nv04_patt }, - { 0x018c, nv04_graph_mthd_bind_rop }, - { 0x0190, nv04_graph_mthd_bind_beta1 }, - { 0x0194, nv04_graph_mthd_bind_beta4 }, - { 0x0198, nv04_graph_mthd_bind_surf2d }, - { 0x02fc, nv04_graph_mthd_set_operation }, - {}, -}; - -static struct nouveau_pgraph_object_method nv04_graph_mthds_nv03_tex_tri[] = { - { 0x0188, nv04_graph_mthd_bind_clip }, - { 0x018c, nv04_graph_mthd_bind_surf_color }, - { 0x0190, nv04_graph_mthd_bind_surf_zeta }, - {}, -}; +static int +nv04_graph_register(struct drm_device *dev) +{ + struct drm_nouveau_private *dev_priv = dev->dev_private; -static struct nouveau_pgraph_object_method nv04_graph_mthds_surf3d[] = { - { 0x02f8, nv04_graph_mthd_surf3d_clip_h }, - { 0x02fc, nv04_graph_mthd_surf3d_clip_v }, - {}, -}; + if (dev_priv->engine.graph.registered) + return 0; -struct nouveau_pgraph_object_class nv04_graph_grclass[] = { - { 0x0038, NVOBJ_ENGINE_GR, NULL }, /* dvd subpicture */ - { 0x0039, NVOBJ_ENGINE_GR, NULL }, /* m2mf */ - { 0x004b, NVOBJ_ENGINE_GR, nv04_graph_mthds_nv03_gdirect }, /* nv03 gdirect */ - { 0x004a, NVOBJ_ENGINE_GR, nv04_graph_mthds_nv04_gdirect }, /* nv04 gdirect */ - { 0x001f, NVOBJ_ENGINE_GR, nv04_graph_mthds_nv01_imageblit }, /* nv01 imageblit */ - { 0x005f, NVOBJ_ENGINE_GR, nv04_graph_mthds_nv04_imageblit_ifc }, /* nv04 imageblit */ - { 0x0060, NVOBJ_ENGINE_GR, nv04_graph_mthds_nv04_iifc }, /* nv04 iifc */ - { 0x0064, NVOBJ_ENGINE_GR, NULL }, /* nv05 iifc */ - { 0x0021, NVOBJ_ENGINE_GR, nv04_graph_mthds_nv01_ifc }, /* nv01 ifc */ - { 0x0061, NVOBJ_ENGINE_GR, nv04_graph_mthds_nv04_imageblit_ifc }, /* nv04 ifc */ - { 0x0065, NVOBJ_ENGINE_GR, NULL }, /* nv05 ifc */ - { 0x0036, NVOBJ_ENGINE_GR, nv04_graph_mthds_nv03_sifc }, /* nv03 sifc */ - { 0x0076, NVOBJ_ENGINE_GR, nv04_graph_mthds_nv04_sifc }, /* nv04 sifc */ - { 0x0066, NVOBJ_ENGINE_GR, NULL }, /* nv05 sifc */ - { 0x0037, NVOBJ_ENGINE_GR, nv04_graph_mthds_nv03_sifm }, /* nv03 sifm */ - { 0x0077, NVOBJ_ENGINE_GR, nv04_graph_mthds_nv04_sifm }, /* nv04 sifm */ - { 0x0030, NVOBJ_ENGINE_GR, NULL }, /* null */ - { 0x0042, NVOBJ_ENGINE_GR, NULL }, /* surf2d */ - { 0x0043, NVOBJ_ENGINE_GR, NULL }, /* rop */ - { 0x0012, NVOBJ_ENGINE_GR, NULL }, /* beta1 */ - { 0x0072, NVOBJ_ENGINE_GR, NULL }, /* beta4 */ - { 0x0019, NVOBJ_ENGINE_GR, NULL }, /* cliprect */ - { 0x0018, NVOBJ_ENGINE_GR, NULL }, /* nv01 pattern */ - { 0x0044, NVOBJ_ENGINE_GR, NULL }, /* nv04 pattern */ - { 0x0052, NVOBJ_ENGINE_GR, NULL }, /* swzsurf */ - { 0x0053, NVOBJ_ENGINE_GR, nv04_graph_mthds_surf3d }, /* surf3d */ - { 0x0048, NVOBJ_ENGINE_GR, nv04_graph_mthds_nv03_tex_tri }, /* nv03 tex_tri */ - { 0x0054, NVOBJ_ENGINE_GR, NULL }, /* tex_tri */ - { 0x0055, NVOBJ_ENGINE_GR, NULL }, /* multitex_tri */ - { 0x0017, NVOBJ_ENGINE_GR, NULL }, /* nv01 chroma */ - { 0x0057, NVOBJ_ENGINE_GR, NULL }, /* nv04 chroma */ - { 0x0058, NVOBJ_ENGINE_GR, NULL }, /* surf_dst */ - { 0x0059, NVOBJ_ENGINE_GR, NULL }, /* surf_src */ - { 0x005a, NVOBJ_ENGINE_GR, NULL }, /* surf_color */ - { 0x005b, NVOBJ_ENGINE_GR, NULL }, /* surf_zeta */ - { 0x001c, NVOBJ_ENGINE_GR, nv04_graph_mthds_nv01_shape }, /* nv01 line */ - { 0x005c, NVOBJ_ENGINE_GR, nv04_graph_mthds_nv04_shape }, /* nv04 line */ - { 0x001d, NVOBJ_ENGINE_GR, nv04_graph_mthds_nv01_shape }, /* nv01 tri */ - { 0x005d, NVOBJ_ENGINE_GR, nv04_graph_mthds_nv04_shape }, /* nv04 tri */ - { 0x001e, NVOBJ_ENGINE_GR, nv04_graph_mthds_nv01_shape }, /* nv01 rect */ - { 0x005e, NVOBJ_ENGINE_GR, nv04_graph_mthds_nv04_shape }, /* nv04 rect */ - { 0x506e, NVOBJ_ENGINE_SW, nv04_graph_mthds_sw }, - {} + /* dvd subpicture */ + NVOBJ_CLASS(dev, 0x0038, GR); + + /* m2mf */ + NVOBJ_CLASS(dev, 0x0039, GR); + + /* nv03 gdirect */ + NVOBJ_CLASS(dev, 0x004b, GR); + NVOBJ_MTHD (dev, 0x004b, 0x0184, nv04_graph_mthd_bind_nv01_patt); + NVOBJ_MTHD (dev, 0x004b, 0x0188, nv04_graph_mthd_bind_rop); + NVOBJ_MTHD (dev, 0x004b, 0x018c, nv04_graph_mthd_bind_beta1); + NVOBJ_MTHD (dev, 0x004b, 0x0190, nv04_graph_mthd_bind_surf_dst); + NVOBJ_MTHD (dev, 0x004b, 0x02fc, nv04_graph_mthd_set_operation); + + /* nv04 gdirect */ + NVOBJ_CLASS(dev, 0x004a, GR); + NVOBJ_MTHD (dev, 0x004a, 0x0188, nv04_graph_mthd_bind_nv04_patt); + NVOBJ_MTHD (dev, 0x004a, 0x018c, nv04_graph_mthd_bind_rop); + NVOBJ_MTHD (dev, 0x004a, 0x0190, nv04_graph_mthd_bind_beta1); + NVOBJ_MTHD (dev, 0x004a, 0x0194, nv04_graph_mthd_bind_beta4); + NVOBJ_MTHD (dev, 0x004a, 0x0198, nv04_graph_mthd_bind_surf2d); + NVOBJ_MTHD (dev, 0x004a, 0x02fc, nv04_graph_mthd_set_operation); + + /* nv01 imageblit */ + NVOBJ_CLASS(dev, 0x001f, GR); + NVOBJ_MTHD (dev, 0x001f, 0x0184, nv04_graph_mthd_bind_chroma); + NVOBJ_MTHD (dev, 0x001f, 0x0188, nv04_graph_mthd_bind_clip); + NVOBJ_MTHD (dev, 0x001f, 0x018c, nv04_graph_mthd_bind_nv01_patt); + NVOBJ_MTHD (dev, 0x001f, 0x0190, nv04_graph_mthd_bind_rop); + NVOBJ_MTHD (dev, 0x001f, 0x0194, nv04_graph_mthd_bind_beta1); + NVOBJ_MTHD (dev, 0x001f, 0x0198, nv04_graph_mthd_bind_surf_dst); + NVOBJ_MTHD (dev, 0x001f, 0x019c, nv04_graph_mthd_bind_surf_src); + NVOBJ_MTHD (dev, 0x001f, 0x02fc, nv04_graph_mthd_set_operation); + + /* nv04 imageblit */ + NVOBJ_CLASS(dev, 0x005f, GR); + NVOBJ_MTHD (dev, 0x005f, 0x0184, nv04_graph_mthd_bind_chroma); + NVOBJ_MTHD (dev, 0x005f, 0x0188, nv04_graph_mthd_bind_clip); + NVOBJ_MTHD (dev, 0x005f, 0x018c, nv04_graph_mthd_bind_nv04_patt); + NVOBJ_MTHD (dev, 0x005f, 0x0190, nv04_graph_mthd_bind_rop); + NVOBJ_MTHD (dev, 0x005f, 0x0194, nv04_graph_mthd_bind_beta1); + NVOBJ_MTHD (dev, 0x005f, 0x0198, nv04_graph_mthd_bind_beta4); + NVOBJ_MTHD (dev, 0x005f, 0x019c, nv04_graph_mthd_bind_surf2d); + NVOBJ_MTHD (dev, 0x005f, 0x02fc, nv04_graph_mthd_set_operation); + + /* nv04 iifc */ + NVOBJ_CLASS(dev, 0x0060, GR); + NVOBJ_MTHD (dev, 0x0060, 0x0188, nv04_graph_mthd_bind_chroma); + NVOBJ_MTHD (dev, 0x0060, 0x018c, nv04_graph_mthd_bind_clip); + NVOBJ_MTHD (dev, 0x0060, 0x0190, nv04_graph_mthd_bind_nv04_patt); + NVOBJ_MTHD (dev, 0x0060, 0x0194, nv04_graph_mthd_bind_rop); + NVOBJ_MTHD (dev, 0x0060, 0x0198, nv04_graph_mthd_bind_beta1); + NVOBJ_MTHD (dev, 0x0060, 0x019c, nv04_graph_mthd_bind_beta4); + NVOBJ_MTHD (dev, 0x0060, 0x01a0, nv04_graph_mthd_bind_surf2d_swzsurf); + NVOBJ_MTHD (dev, 0x0060, 0x03e4, nv04_graph_mthd_set_operation); + + /* nv05 iifc */ + NVOBJ_CLASS(dev, 0x0064, GR); + + /* nv01 ifc */ + NVOBJ_CLASS(dev, 0x0021, GR); + NVOBJ_MTHD (dev, 0x0021, 0x0184, nv04_graph_mthd_bind_chroma); + NVOBJ_MTHD (dev, 0x0021, 0x0188, nv04_graph_mthd_bind_clip); + NVOBJ_MTHD (dev, 0x0021, 0x018c, nv04_graph_mthd_bind_nv01_patt); + NVOBJ_MTHD (dev, 0x0021, 0x0190, nv04_graph_mthd_bind_rop); + NVOBJ_MTHD (dev, 0x0021, 0x0194, nv04_graph_mthd_bind_beta1); + NVOBJ_MTHD (dev, 0x0021, 0x0198, nv04_graph_mthd_bind_surf_dst); + NVOBJ_MTHD (dev, 0x0021, 0x02fc, nv04_graph_mthd_set_operation); + + /* nv04 ifc */ + NVOBJ_CLASS(dev, 0x0061, GR); + NVOBJ_MTHD (dev, 0x0061, 0x0184, nv04_graph_mthd_bind_chroma); + NVOBJ_MTHD (dev, 0x0061, 0x0188, nv04_graph_mthd_bind_clip); + NVOBJ_MTHD (dev, 0x0061, 0x018c, nv04_graph_mthd_bind_nv04_patt); + NVOBJ_MTHD (dev, 0x0061, 0x0190, nv04_graph_mthd_bind_rop); + NVOBJ_MTHD (dev, 0x0061, 0x0194, nv04_graph_mthd_bind_beta1); + NVOBJ_MTHD (dev, 0x0061, 0x0198, nv04_graph_mthd_bind_beta4); + NVOBJ_MTHD (dev, 0x0061, 0x019c, nv04_graph_mthd_bind_surf2d); + NVOBJ_MTHD (dev, 0x0061, 0x02fc, nv04_graph_mthd_set_operation); + + /* nv05 ifc */ + NVOBJ_CLASS(dev, 0x0065, GR); + + /* nv03 sifc */ + NVOBJ_CLASS(dev, 0x0036, GR); + NVOBJ_MTHD (dev, 0x0036, 0x0184, nv04_graph_mthd_bind_chroma); + NVOBJ_MTHD (dev, 0x0036, 0x0188, nv04_graph_mthd_bind_nv01_patt); + NVOBJ_MTHD (dev, 0x0036, 0x018c, nv04_graph_mthd_bind_rop); + NVOBJ_MTHD (dev, 0x0036, 0x0190, nv04_graph_mthd_bind_beta1); + NVOBJ_MTHD (dev, 0x0036, 0x0194, nv04_graph_mthd_bind_surf_dst); + NVOBJ_MTHD (dev, 0x0036, 0x02fc, nv04_graph_mthd_set_operation); + + /* nv04 sifc */ + NVOBJ_CLASS(dev, 0x0076, GR); + NVOBJ_MTHD (dev, 0x0076, 0x0184, nv04_graph_mthd_bind_chroma); + NVOBJ_MTHD (dev, 0x0076, 0x0188, nv04_graph_mthd_bind_nv04_patt); + NVOBJ_MTHD (dev, 0x0076, 0x018c, nv04_graph_mthd_bind_rop); + NVOBJ_MTHD (dev, 0x0076, 0x0190, nv04_graph_mthd_bind_beta1); + NVOBJ_MTHD (dev, 0x0076, 0x0194, nv04_graph_mthd_bind_beta4); + NVOBJ_MTHD (dev, 0x0076, 0x0198, nv04_graph_mthd_bind_surf2d); + NVOBJ_MTHD (dev, 0x0076, 0x02fc, nv04_graph_mthd_set_operation); + + /* nv05 sifc */ + NVOBJ_CLASS(dev, 0x0066, GR); + + /* nv03 sifm */ + NVOBJ_CLASS(dev, 0x0037, GR); + NVOBJ_MTHD (dev, 0x0037, 0x0188, nv04_graph_mthd_bind_nv01_patt); + NVOBJ_MTHD (dev, 0x0037, 0x018c, nv04_graph_mthd_bind_rop); + NVOBJ_MTHD (dev, 0x0037, 0x0190, nv04_graph_mthd_bind_beta1); + NVOBJ_MTHD (dev, 0x0037, 0x0194, nv04_graph_mthd_bind_surf_dst); + NVOBJ_MTHD (dev, 0x0037, 0x0304, nv04_graph_mthd_set_operation); + + /* nv04 sifm */ + NVOBJ_CLASS(dev, 0x0077, GR); + NVOBJ_MTHD (dev, 0x0077, 0x0188, nv04_graph_mthd_bind_nv04_patt); + NVOBJ_MTHD (dev, 0x0077, 0x018c, nv04_graph_mthd_bind_rop); + NVOBJ_MTHD (dev, 0x0077, 0x0190, nv04_graph_mthd_bind_beta1); + NVOBJ_MTHD (dev, 0x0077, 0x0194, nv04_graph_mthd_bind_beta4); + NVOBJ_MTHD (dev, 0x0077, 0x0198, nv04_graph_mthd_bind_surf2d_swzsurf); + NVOBJ_MTHD (dev, 0x0077, 0x0304, nv04_graph_mthd_set_operation); + + /* null */ + NVOBJ_CLASS(dev, 0x0030, GR); + + /* surf2d */ + NVOBJ_CLASS(dev, 0x0042, GR); + + /* rop */ + NVOBJ_CLASS(dev, 0x0043, GR); + + /* beta1 */ + NVOBJ_CLASS(dev, 0x0012, GR); + + /* beta4 */ + NVOBJ_CLASS(dev, 0x0072, GR); + + /* cliprect */ + NVOBJ_CLASS(dev, 0x0019, GR); + + /* nv01 pattern */ + NVOBJ_CLASS(dev, 0x0018, GR); + + /* nv04 pattern */ + NVOBJ_CLASS(dev, 0x0044, GR); + + /* swzsurf */ + NVOBJ_CLASS(dev, 0x0052, GR); + + /* surf3d */ + NVOBJ_CLASS(dev, 0x0053, GR); + NVOBJ_MTHD (dev, 0x0053, 0x02f8, nv04_graph_mthd_surf3d_clip_h); + NVOBJ_MTHD (dev, 0x0053, 0x02fc, nv04_graph_mthd_surf3d_clip_v); + + /* nv03 tex_tri */ + NVOBJ_CLASS(dev, 0x0048, GR); + NVOBJ_MTHD (dev, 0x0048, 0x0188, nv04_graph_mthd_bind_clip); + NVOBJ_MTHD (dev, 0x0048, 0x018c, nv04_graph_mthd_bind_surf_color); + NVOBJ_MTHD (dev, 0x0048, 0x0190, nv04_graph_mthd_bind_surf_zeta); + + /* tex_tri */ + NVOBJ_CLASS(dev, 0x0054, GR); + + /* multitex_tri */ + NVOBJ_CLASS(dev, 0x0055, GR); + + /* nv01 chroma */ + NVOBJ_CLASS(dev, 0x0017, GR); + + /* nv04 chroma */ + NVOBJ_CLASS(dev, 0x0057, GR); + + /* surf_dst */ + NVOBJ_CLASS(dev, 0x0058, GR); + + /* surf_src */ + NVOBJ_CLASS(dev, 0x0059, GR); + + /* surf_color */ + NVOBJ_CLASS(dev, 0x005a, GR); + + /* surf_zeta */ + NVOBJ_CLASS(dev, 0x005b, GR); + + /* nv01 line */ + NVOBJ_CLASS(dev, 0x001c, GR); + NVOBJ_MTHD (dev, 0x001c, 0x0184, nv04_graph_mthd_bind_clip); + NVOBJ_MTHD (dev, 0x001c, 0x0188, nv04_graph_mthd_bind_nv01_patt); + NVOBJ_MTHD (dev, 0x001c, 0x018c, nv04_graph_mthd_bind_rop); + NVOBJ_MTHD (dev, 0x001c, 0x0190, nv04_graph_mthd_bind_beta1); + NVOBJ_MTHD (dev, 0x001c, 0x0194, nv04_graph_mthd_bind_surf_dst); + NVOBJ_MTHD (dev, 0x001c, 0x02fc, nv04_graph_mthd_set_operation); + + /* nv04 line */ + NVOBJ_CLASS(dev, 0x005c, GR); + NVOBJ_MTHD (dev, 0x005c, 0x0184, nv04_graph_mthd_bind_clip); + NVOBJ_MTHD (dev, 0x005c, 0x0188, nv04_graph_mthd_bind_nv04_patt); + NVOBJ_MTHD (dev, 0x005c, 0x018c, nv04_graph_mthd_bind_rop); + NVOBJ_MTHD (dev, 0x005c, 0x0190, nv04_graph_mthd_bind_beta1); + NVOBJ_MTHD (dev, 0x005c, 0x0194, nv04_graph_mthd_bind_beta4); + NVOBJ_MTHD (dev, 0x005c, 0x0198, nv04_graph_mthd_bind_surf2d); + NVOBJ_MTHD (dev, 0x005c, 0x02fc, nv04_graph_mthd_set_operation); + + /* nv01 tri */ + NVOBJ_CLASS(dev, 0x001d, GR); + NVOBJ_MTHD (dev, 0x001d, 0x0184, nv04_graph_mthd_bind_clip); + NVOBJ_MTHD (dev, 0x001d, 0x0188, nv04_graph_mthd_bind_nv01_patt); + NVOBJ_MTHD (dev, 0x001d, 0x018c, nv04_graph_mthd_bind_rop); + NVOBJ_MTHD (dev, 0x001d, 0x0190, nv04_graph_mthd_bind_beta1); + NVOBJ_MTHD (dev, 0x001d, 0x0194, nv04_graph_mthd_bind_surf_dst); + NVOBJ_MTHD (dev, 0x001d, 0x02fc, nv04_graph_mthd_set_operation); + + /* nv04 tri */ + NVOBJ_CLASS(dev, 0x005d, GR); + NVOBJ_MTHD (dev, 0x005d, 0x0184, nv04_graph_mthd_bind_clip); + NVOBJ_MTHD (dev, 0x005d, 0x0188, nv04_graph_mthd_bind_nv04_patt); + NVOBJ_MTHD (dev, 0x005d, 0x018c, nv04_graph_mthd_bind_rop); + NVOBJ_MTHD (dev, 0x005d, 0x0190, nv04_graph_mthd_bind_beta1); + NVOBJ_MTHD (dev, 0x005d, 0x0194, nv04_graph_mthd_bind_beta4); + NVOBJ_MTHD (dev, 0x005d, 0x0198, nv04_graph_mthd_bind_surf2d); + NVOBJ_MTHD (dev, 0x005d, 0x02fc, nv04_graph_mthd_set_operation); + + /* nv01 rect */ + NVOBJ_CLASS(dev, 0x001e, GR); + NVOBJ_MTHD (dev, 0x001e, 0x0184, nv04_graph_mthd_bind_clip); + NVOBJ_MTHD (dev, 0x001e, 0x0188, nv04_graph_mthd_bind_nv01_patt); + NVOBJ_MTHD (dev, 0x001e, 0x018c, nv04_graph_mthd_bind_rop); + NVOBJ_MTHD (dev, 0x001e, 0x0190, nv04_graph_mthd_bind_beta1); + NVOBJ_MTHD (dev, 0x001e, 0x0194, nv04_graph_mthd_bind_surf_dst); + NVOBJ_MTHD (dev, 0x001e, 0x02fc, nv04_graph_mthd_set_operation); + + /* nv04 rect */ + NVOBJ_CLASS(dev, 0x005e, GR); + NVOBJ_MTHD (dev, 0x005e, 0x0184, nv04_graph_mthd_bind_clip); + NVOBJ_MTHD (dev, 0x005e, 0x0188, nv04_graph_mthd_bind_nv04_patt); + NVOBJ_MTHD (dev, 0x005e, 0x018c, nv04_graph_mthd_bind_rop); + NVOBJ_MTHD (dev, 0x005e, 0x0190, nv04_graph_mthd_bind_beta1); + NVOBJ_MTHD (dev, 0x005e, 0x0194, nv04_graph_mthd_bind_beta4); + NVOBJ_MTHD (dev, 0x005e, 0x0198, nv04_graph_mthd_bind_surf2d); + NVOBJ_MTHD (dev, 0x005e, 0x02fc, nv04_graph_mthd_set_operation); + + /* nvsw */ + NVOBJ_CLASS(dev, 0x506e, SW); + NVOBJ_MTHD (dev, 0x506e, 0x0150, nv04_graph_mthd_set_ref); + + dev_priv->engine.graph.registered = true; + return 0; }; - diff --git a/drivers/gpu/drm/nouveau/nv10_graph.c b/drivers/gpu/drm/nouveau/nv10_graph.c index ed31a622889e..17c20dbff232 100644 --- a/drivers/gpu/drm/nouveau/nv10_graph.c +++ b/drivers/gpu/drm/nouveau/nv10_graph.c @@ -27,6 +27,8 @@ #include "nouveau_drm.h" #include "nouveau_drv.h" +static int nv10_graph_register(struct drm_device *); + #define NV10_FIFO_NUMBER 32 struct pipe_state { @@ -914,13 +916,17 @@ int nv10_graph_init(struct drm_device *dev) { struct drm_nouveau_private *dev_priv = dev->dev_private; uint32_t tmp; - int i; + int ret, i; nv_wr32(dev, NV03_PMC_ENABLE, nv_rd32(dev, NV03_PMC_ENABLE) & ~NV_PMC_ENABLE_PGRAPH); nv_wr32(dev, NV03_PMC_ENABLE, nv_rd32(dev, NV03_PMC_ENABLE) | NV_PMC_ENABLE_PGRAPH); + ret = nv10_graph_register(dev); + if (ret) + return ret; + nv_wr32(dev, NV03_PGRAPH_INTR , 0xFFFFFFFF); nv_wr32(dev, NV03_PGRAPH_INTR_EN, 0xFFFFFFFF); @@ -966,8 +972,8 @@ void nv10_graph_takedown(struct drm_device *dev) } static int -nv17_graph_mthd_lma_window(struct nouveau_channel *chan, int grclass, - int mthd, uint32_t data) +nv17_graph_mthd_lma_window(struct nouveau_channel *chan, + u32 class, u32 mthd, u32 data) { struct drm_device *dev = chan->dev; struct graph_state *ctx = chan->pgraph_ctx; @@ -1046,8 +1052,8 @@ nv17_graph_mthd_lma_window(struct nouveau_channel *chan, int grclass, } static int -nv17_graph_mthd_lma_enable(struct nouveau_channel *chan, int grclass, - int mthd, uint32_t data) +nv17_graph_mthd_lma_enable(struct nouveau_channel *chan, + u32 class, u32 mthd, u32 data) { struct drm_device *dev = chan->dev; struct drm_nouveau_private *dev_priv = dev->dev_private; @@ -1065,36 +1071,48 @@ nv17_graph_mthd_lma_enable(struct nouveau_channel *chan, int grclass, return 0; } -static struct nouveau_pgraph_object_method nv17_graph_celsius_mthds[] = { - { 0x1638, nv17_graph_mthd_lma_window }, - { 0x163c, nv17_graph_mthd_lma_window }, - { 0x1640, nv17_graph_mthd_lma_window }, - { 0x1644, nv17_graph_mthd_lma_window }, - { 0x1658, nv17_graph_mthd_lma_enable }, - {} -}; +static int +nv10_graph_register(struct drm_device *dev) +{ + struct drm_nouveau_private *dev_priv = dev->dev_private; -struct nouveau_pgraph_object_class nv10_graph_grclass[] = { - { 0x506e, NVOBJ_ENGINE_SW, NULL }, /* nvsw */ - { 0x0030, NVOBJ_ENGINE_GR, NULL }, /* null */ - { 0x0039, NVOBJ_ENGINE_GR, NULL }, /* m2mf */ - { 0x004a, NVOBJ_ENGINE_GR, NULL }, /* gdirect */ - { 0x005f, NVOBJ_ENGINE_GR, NULL }, /* imageblit */ - { 0x009f, NVOBJ_ENGINE_GR, NULL }, /* imageblit (nv12) */ - { 0x008a, NVOBJ_ENGINE_GR, NULL }, /* ifc */ - { 0x0089, NVOBJ_ENGINE_GR, NULL }, /* sifm */ - { 0x0062, NVOBJ_ENGINE_GR, NULL }, /* surf2d */ - { 0x0043, NVOBJ_ENGINE_GR, NULL }, /* rop */ - { 0x0012, NVOBJ_ENGINE_GR, NULL }, /* beta1 */ - { 0x0072, NVOBJ_ENGINE_GR, NULL }, /* beta4 */ - { 0x0019, NVOBJ_ENGINE_GR, NULL }, /* cliprect */ - { 0x0044, NVOBJ_ENGINE_GR, NULL }, /* pattern */ - { 0x0052, NVOBJ_ENGINE_GR, NULL }, /* swzsurf */ - { 0x0093, NVOBJ_ENGINE_GR, NULL }, /* surf3d */ - { 0x0094, NVOBJ_ENGINE_GR, NULL }, /* tex_tri */ - { 0x0095, NVOBJ_ENGINE_GR, NULL }, /* multitex_tri */ - { 0x0056, NVOBJ_ENGINE_GR, NULL }, /* celcius (nv10) */ - { 0x0096, NVOBJ_ENGINE_GR, NULL }, /* celcius (nv11) */ - { 0x0099, NVOBJ_ENGINE_GR, nv17_graph_celsius_mthds }, /* celcius (nv17) */ - {} -}; + if (dev_priv->engine.graph.registered) + return 0; + + NVOBJ_CLASS(dev, 0x506e, SW); /* nvsw */ + NVOBJ_CLASS(dev, 0x0030, GR); /* null */ + NVOBJ_CLASS(dev, 0x0039, GR); /* m2mf */ + NVOBJ_CLASS(dev, 0x004a, GR); /* gdirect */ + NVOBJ_CLASS(dev, 0x005f, GR); /* imageblit */ + NVOBJ_CLASS(dev, 0x009f, GR); /* imageblit (nv12) */ + NVOBJ_CLASS(dev, 0x008a, GR); /* ifc */ + NVOBJ_CLASS(dev, 0x0089, GR); /* sifm */ + NVOBJ_CLASS(dev, 0x0062, GR); /* surf2d */ + NVOBJ_CLASS(dev, 0x0043, GR); /* rop */ + NVOBJ_CLASS(dev, 0x0012, GR); /* beta1 */ + NVOBJ_CLASS(dev, 0x0072, GR); /* beta4 */ + NVOBJ_CLASS(dev, 0x0019, GR); /* cliprect */ + NVOBJ_CLASS(dev, 0x0044, GR); /* pattern */ + NVOBJ_CLASS(dev, 0x0052, GR); /* swzsurf */ + NVOBJ_CLASS(dev, 0x0093, GR); /* surf3d */ + NVOBJ_CLASS(dev, 0x0094, GR); /* tex_tri */ + NVOBJ_CLASS(dev, 0x0095, GR); /* multitex_tri */ + + /* celcius */ + if (dev_priv->chipset <= 0x10) { + NVOBJ_CLASS(dev, 0x0056, GR); + } else + if (dev_priv->chipset <= 0x17 || dev_priv->chipset == 0x1a) { + NVOBJ_CLASS(dev, 0x0096, GR); + } else { + NVOBJ_CLASS(dev, 0x0099, GR); + NVOBJ_MTHD (dev, 0x0099, 0x1638, nv17_graph_mthd_lma_window); + NVOBJ_MTHD (dev, 0x0099, 0x163c, nv17_graph_mthd_lma_window); + NVOBJ_MTHD (dev, 0x0099, 0x1640, nv17_graph_mthd_lma_window); + NVOBJ_MTHD (dev, 0x0099, 0x1644, nv17_graph_mthd_lma_window); + NVOBJ_MTHD (dev, 0x0099, 0x1658, nv17_graph_mthd_lma_enable); + } + + dev_priv->engine.graph.registered = true; + return 0; +} diff --git a/drivers/gpu/drm/nouveau/nv20_graph.c b/drivers/gpu/drm/nouveau/nv20_graph.c index 872f8d059694..7720bccb3c98 100644 --- a/drivers/gpu/drm/nouveau/nv20_graph.c +++ b/drivers/gpu/drm/nouveau/nv20_graph.c @@ -32,6 +32,9 @@ #define NV34_GRCTX_SIZE (18140) #define NV35_36_GRCTX_SIZE (22396) +static int nv20_graph_register(struct drm_device *); +static int nv30_graph_register(struct drm_device *); + static void nv20_graph_context_init(struct drm_device *dev, struct nouveau_gpuobj *ctx) { @@ -572,6 +575,12 @@ nv20_graph_init(struct drm_device *dev) nv20_graph_rdi(dev); + ret = nv20_graph_register(dev); + if (ret) { + nouveau_gpuobj_ref(NULL, &pgraph->ctx_table); + return ret; + } + nv_wr32(dev, NV03_PGRAPH_INTR , 0xFFFFFFFF); nv_wr32(dev, NV03_PGRAPH_INTR_EN, 0xFFFFFFFF); @@ -696,6 +705,12 @@ nv30_graph_init(struct drm_device *dev) return ret; } + ret = nv30_graph_register(dev); + if (ret) { + nouveau_gpuobj_ref(NULL, &pgraph->ctx_table); + return ret; + } + nv_wr32(dev, NV20_PGRAPH_CHANNEL_CTX_TABLE, pgraph->ctx_table->pinst >> 4); @@ -756,48 +771,76 @@ nv30_graph_init(struct drm_device *dev) return 0; } -struct nouveau_pgraph_object_class nv20_graph_grclass[] = { - { 0x506e, NVOBJ_ENGINE_SW, NULL }, /* nvsw */ - { 0x0030, NVOBJ_ENGINE_GR, NULL }, /* null */ - { 0x0039, NVOBJ_ENGINE_GR, NULL }, /* m2mf */ - { 0x004a, NVOBJ_ENGINE_GR, NULL }, /* gdirect */ - { 0x009f, NVOBJ_ENGINE_GR, NULL }, /* imageblit (nv12) */ - { 0x008a, NVOBJ_ENGINE_GR, NULL }, /* ifc */ - { 0x0089, NVOBJ_ENGINE_GR, NULL }, /* sifm */ - { 0x0062, NVOBJ_ENGINE_GR, NULL }, /* surf2d */ - { 0x0043, NVOBJ_ENGINE_GR, NULL }, /* rop */ - { 0x0012, NVOBJ_ENGINE_GR, NULL }, /* beta1 */ - { 0x0072, NVOBJ_ENGINE_GR, NULL }, /* beta4 */ - { 0x0019, NVOBJ_ENGINE_GR, NULL }, /* cliprect */ - { 0x0044, NVOBJ_ENGINE_GR, NULL }, /* pattern */ - { 0x009e, NVOBJ_ENGINE_GR, NULL }, /* swzsurf */ - { 0x0096, NVOBJ_ENGINE_GR, NULL }, /* celcius */ - { 0x0097, NVOBJ_ENGINE_GR, NULL }, /* kelvin (nv20) */ - { 0x0597, NVOBJ_ENGINE_GR, NULL }, /* kelvin (nv25) */ - {} -}; - -struct nouveau_pgraph_object_class nv30_graph_grclass[] = { - { 0x506e, NVOBJ_ENGINE_SW, NULL }, /* nvsw */ - { 0x0030, NVOBJ_ENGINE_GR, NULL }, /* null */ - { 0x0039, NVOBJ_ENGINE_GR, NULL }, /* m2mf */ - { 0x004a, NVOBJ_ENGINE_GR, NULL }, /* gdirect */ - { 0x009f, NVOBJ_ENGINE_GR, NULL }, /* imageblit (nv12) */ - { 0x008a, NVOBJ_ENGINE_GR, NULL }, /* ifc */ - { 0x038a, NVOBJ_ENGINE_GR, NULL }, /* ifc (nv30) */ - { 0x0089, NVOBJ_ENGINE_GR, NULL }, /* sifm */ - { 0x0389, NVOBJ_ENGINE_GR, NULL }, /* sifm (nv30) */ - { 0x0062, NVOBJ_ENGINE_GR, NULL }, /* surf2d */ - { 0x0362, NVOBJ_ENGINE_GR, NULL }, /* surf2d (nv30) */ - { 0x0043, NVOBJ_ENGINE_GR, NULL }, /* rop */ - { 0x0012, NVOBJ_ENGINE_GR, NULL }, /* beta1 */ - { 0x0072, NVOBJ_ENGINE_GR, NULL }, /* beta4 */ - { 0x0019, NVOBJ_ENGINE_GR, NULL }, /* cliprect */ - { 0x0044, NVOBJ_ENGINE_GR, NULL }, /* pattern */ - { 0x039e, NVOBJ_ENGINE_GR, NULL }, /* swzsurf */ - { 0x0397, NVOBJ_ENGINE_GR, NULL }, /* rankine (nv30) */ - { 0x0497, NVOBJ_ENGINE_GR, NULL }, /* rankine (nv35) */ - { 0x0697, NVOBJ_ENGINE_GR, NULL }, /* rankine (nv34) */ - {} -}; +static int +nv20_graph_register(struct drm_device *dev) +{ + struct drm_nouveau_private *dev_priv = dev->dev_private; + + if (dev_priv->engine.graph.registered) + return 0; + NVOBJ_CLASS(dev, 0x506e, SW); /* nvsw */ + NVOBJ_CLASS(dev, 0x0030, GR); /* null */ + NVOBJ_CLASS(dev, 0x0039, GR); /* m2mf */ + NVOBJ_CLASS(dev, 0x004a, GR); /* gdirect */ + NVOBJ_CLASS(dev, 0x009f, GR); /* imageblit (nv12) */ + NVOBJ_CLASS(dev, 0x008a, GR); /* ifc */ + NVOBJ_CLASS(dev, 0x0089, GR); /* sifm */ + NVOBJ_CLASS(dev, 0x0062, GR); /* surf2d */ + NVOBJ_CLASS(dev, 0x0043, GR); /* rop */ + NVOBJ_CLASS(dev, 0x0012, GR); /* beta1 */ + NVOBJ_CLASS(dev, 0x0072, GR); /* beta4 */ + NVOBJ_CLASS(dev, 0x0019, GR); /* cliprect */ + NVOBJ_CLASS(dev, 0x0044, GR); /* pattern */ + NVOBJ_CLASS(dev, 0x009e, GR); /* swzsurf */ + NVOBJ_CLASS(dev, 0x0096, GR); /* celcius */ + + /* kelvin */ + if (dev_priv->chipset < 0x25) + NVOBJ_CLASS(dev, 0x0097, GR); + else + NVOBJ_CLASS(dev, 0x0597, GR); + + dev_priv->engine.graph.registered = true; + return 0; +} + +static int +nv30_graph_register(struct drm_device *dev) +{ + struct drm_nouveau_private *dev_priv = dev->dev_private; + + if (dev_priv->engine.graph.registered) + return 0; + + NVOBJ_CLASS(dev, 0x506e, SW); /* nvsw */ + NVOBJ_CLASS(dev, 0x0030, GR); /* null */ + NVOBJ_CLASS(dev, 0x0039, GR); /* m2mf */ + NVOBJ_CLASS(dev, 0x004a, GR); /* gdirect */ + NVOBJ_CLASS(dev, 0x009f, GR); /* imageblit (nv12) */ + NVOBJ_CLASS(dev, 0x008a, GR); /* ifc */ + NVOBJ_CLASS(dev, 0x038a, GR); /* ifc (nv30) */ + NVOBJ_CLASS(dev, 0x0089, GR); /* sifm */ + NVOBJ_CLASS(dev, 0x0389, GR); /* sifm (nv30) */ + NVOBJ_CLASS(dev, 0x0062, GR); /* surf2d */ + NVOBJ_CLASS(dev, 0x0362, GR); /* surf2d (nv30) */ + NVOBJ_CLASS(dev, 0x0043, GR); /* rop */ + NVOBJ_CLASS(dev, 0x0012, GR); /* beta1 */ + NVOBJ_CLASS(dev, 0x0072, GR); /* beta4 */ + NVOBJ_CLASS(dev, 0x0019, GR); /* cliprect */ + NVOBJ_CLASS(dev, 0x0044, GR); /* pattern */ + NVOBJ_CLASS(dev, 0x039e, GR); /* swzsurf */ + + /* rankine */ + if (0x00000003 & (1 << (dev_priv->chipset & 0x0f))) + NVOBJ_CLASS(dev, 0x0397, GR); + else + if (0x00000010 & (1 << (dev_priv->chipset & 0x0f))) + NVOBJ_CLASS(dev, 0x0697, GR); + else + if (0x000001e0 & (1 << (dev_priv->chipset & 0x0f))) + NVOBJ_CLASS(dev, 0x0497, GR); + + dev_priv->engine.graph.registered = true; + return 0; +} diff --git a/drivers/gpu/drm/nouveau/nv40_graph.c b/drivers/gpu/drm/nouveau/nv40_graph.c index 70d957895cea..b9361e28687c 100644 --- a/drivers/gpu/drm/nouveau/nv40_graph.c +++ b/drivers/gpu/drm/nouveau/nv40_graph.c @@ -29,6 +29,8 @@ #include "nouveau_drv.h" #include "nouveau_grctx.h" +static int nv40_graph_register(struct drm_device *); + struct nouveau_channel * nv40_graph_channel(struct drm_device *dev) { @@ -248,7 +250,7 @@ nv40_graph_init(struct drm_device *dev) struct nouveau_fb_engine *pfb = &dev_priv->engine.fb; struct nouveau_grctx ctx = {}; uint32_t vramsz, *cp; - int i, j; + int ret, i, j; nv_wr32(dev, NV03_PMC_ENABLE, nv_rd32(dev, NV03_PMC_ENABLE) & ~NV_PMC_ENABLE_PGRAPH); @@ -272,6 +274,10 @@ nv40_graph_init(struct drm_device *dev) kfree(cp); + ret = nv40_graph_register(dev); + if (ret) + return ret; + /* No context present currently */ nv_wr32(dev, NV40_PGRAPH_CTXCTL_CUR, 0x00000000); @@ -408,25 +414,38 @@ void nv40_graph_takedown(struct drm_device *dev) { } -struct nouveau_pgraph_object_class nv40_graph_grclass[] = { - { 0x506e, NVOBJ_ENGINE_SW, NULL }, /* nvsw */ - { 0x0030, NVOBJ_ENGINE_GR, NULL }, /* null */ - { 0x0039, NVOBJ_ENGINE_GR, NULL }, /* m2mf */ - { 0x004a, NVOBJ_ENGINE_GR, NULL }, /* gdirect */ - { 0x009f, NVOBJ_ENGINE_GR, NULL }, /* imageblit (nv12) */ - { 0x008a, NVOBJ_ENGINE_GR, NULL }, /* ifc */ - { 0x0089, NVOBJ_ENGINE_GR, NULL }, /* sifm */ - { 0x3089, NVOBJ_ENGINE_GR, NULL }, /* sifm (nv40) */ - { 0x0062, NVOBJ_ENGINE_GR, NULL }, /* surf2d */ - { 0x3062, NVOBJ_ENGINE_GR, NULL }, /* surf2d (nv40) */ - { 0x0043, NVOBJ_ENGINE_GR, NULL }, /* rop */ - { 0x0012, NVOBJ_ENGINE_GR, NULL }, /* beta1 */ - { 0x0072, NVOBJ_ENGINE_GR, NULL }, /* beta4 */ - { 0x0019, NVOBJ_ENGINE_GR, NULL }, /* cliprect */ - { 0x0044, NVOBJ_ENGINE_GR, NULL }, /* pattern */ - { 0x309e, NVOBJ_ENGINE_GR, NULL }, /* swzsurf */ - { 0x4097, NVOBJ_ENGINE_GR, NULL }, /* curie (nv40) */ - { 0x4497, NVOBJ_ENGINE_GR, NULL }, /* curie (nv44) */ - {} -}; +static int +nv40_graph_register(struct drm_device *dev) +{ + struct drm_nouveau_private *dev_priv = dev->dev_private; + + if (dev_priv->engine.graph.registered) + return 0; + NVOBJ_CLASS(dev, 0x506e, SW); /* nvsw */ + NVOBJ_CLASS(dev, 0x0030, GR); /* null */ + NVOBJ_CLASS(dev, 0x0039, GR); /* m2mf */ + NVOBJ_CLASS(dev, 0x004a, GR); /* gdirect */ + NVOBJ_CLASS(dev, 0x009f, GR); /* imageblit (nv12) */ + NVOBJ_CLASS(dev, 0x008a, GR); /* ifc */ + NVOBJ_CLASS(dev, 0x0089, GR); /* sifm */ + NVOBJ_CLASS(dev, 0x3089, GR); /* sifm (nv40) */ + NVOBJ_CLASS(dev, 0x0062, GR); /* surf2d */ + NVOBJ_CLASS(dev, 0x3062, GR); /* surf2d (nv40) */ + NVOBJ_CLASS(dev, 0x0043, GR); /* rop */ + NVOBJ_CLASS(dev, 0x0012, GR); /* beta1 */ + NVOBJ_CLASS(dev, 0x0072, GR); /* beta4 */ + NVOBJ_CLASS(dev, 0x0019, GR); /* cliprect */ + NVOBJ_CLASS(dev, 0x0044, GR); /* pattern */ + NVOBJ_CLASS(dev, 0x309e, GR); /* swzsurf */ + + /* curie */ + if (dev_priv->chipset >= 0x60 || + 0x00005450 & (1 << (dev_priv->chipset & 0x0f))) + NVOBJ_CLASS(dev, 0x4497, GR); + else + NVOBJ_CLASS(dev, 0x4097, GR); + + dev_priv->engine.graph.registered = true; + return 0; +} diff --git a/drivers/gpu/drm/nouveau/nv50_graph.c b/drivers/gpu/drm/nouveau/nv50_graph.c index 01a598917e3c..84ca90e91811 100644 --- a/drivers/gpu/drm/nouveau/nv50_graph.c +++ b/drivers/gpu/drm/nouveau/nv50_graph.c @@ -30,6 +30,8 @@ #include "nouveau_ramht.h" #include "nouveau_grctx.h" +static int nv50_graph_register(struct drm_device *); + static void nv50_graph_init_reset(struct drm_device *dev) { @@ -145,12 +147,15 @@ nv50_graph_init(struct drm_device *dev) nv50_graph_init_reset(dev); nv50_graph_init_regs__nv(dev); nv50_graph_init_regs(dev); - nv50_graph_init_intr(dev); ret = nv50_graph_init_ctxctl(dev); if (ret) return ret; + ret = nv50_graph_register(dev); + if (ret) + return ret; + nv50_graph_init_intr(dev); return 0; } @@ -333,8 +338,8 @@ nv50_graph_context_switch(struct drm_device *dev) } static int -nv50_graph_nvsw_dma_vblsem(struct nouveau_channel *chan, int grclass, - int mthd, uint32_t data) +nv50_graph_nvsw_dma_vblsem(struct nouveau_channel *chan, + u32 class, u32 mthd, u32 data) { struct nouveau_gpuobj *gpuobj; @@ -351,8 +356,8 @@ nv50_graph_nvsw_dma_vblsem(struct nouveau_channel *chan, int grclass, } static int -nv50_graph_nvsw_vblsem_offset(struct nouveau_channel *chan, int grclass, - int mthd, uint32_t data) +nv50_graph_nvsw_vblsem_offset(struct nouveau_channel *chan, + u32 class, u32 mthd, u32 data) { if (nouveau_notifier_offset(chan->nvsw.vblsem, &data)) return -ERANGE; @@ -362,16 +367,16 @@ nv50_graph_nvsw_vblsem_offset(struct nouveau_channel *chan, int grclass, } static int -nv50_graph_nvsw_vblsem_release_val(struct nouveau_channel *chan, int grclass, - int mthd, uint32_t data) +nv50_graph_nvsw_vblsem_release_val(struct nouveau_channel *chan, + u32 class, u32 mthd, u32 data) { chan->nvsw.vblsem_rval = data; return 0; } static int -nv50_graph_nvsw_vblsem_release(struct nouveau_channel *chan, int grclass, - int mthd, uint32_t data) +nv50_graph_nvsw_vblsem_release(struct nouveau_channel *chan, + u32 class, u32 mthd, u32 data) { struct drm_device *dev = chan->dev; struct drm_nouveau_private *dev_priv = dev->dev_private; @@ -392,27 +397,53 @@ nv50_graph_nvsw_vblsem_release(struct nouveau_channel *chan, int grclass, return 0; } -static struct nouveau_pgraph_object_method nv50_graph_nvsw_methods[] = { - { 0x018c, nv50_graph_nvsw_dma_vblsem }, - { 0x0400, nv50_graph_nvsw_vblsem_offset }, - { 0x0404, nv50_graph_nvsw_vblsem_release_val }, - { 0x0408, nv50_graph_nvsw_vblsem_release }, - {} -}; - -struct nouveau_pgraph_object_class nv50_graph_grclass[] = { - { 0x506e, NVOBJ_ENGINE_SW, nv50_graph_nvsw_methods }, /* nvsw */ - { 0x0030, NVOBJ_ENGINE_GR, NULL }, /* null */ - { 0x5039, NVOBJ_ENGINE_GR, NULL }, /* m2mf */ - { 0x502d, NVOBJ_ENGINE_GR, NULL }, /* 2d */ - { 0x50c0, NVOBJ_ENGINE_GR, NULL }, /* compute */ - { 0x85c0, NVOBJ_ENGINE_GR, NULL }, /* compute (nva3, nva5, nva8) */ - { 0x5097, NVOBJ_ENGINE_GR, NULL }, /* tesla (nv50) */ - { 0x8297, NVOBJ_ENGINE_GR, NULL }, /* tesla (nv8x/nv9x) */ - { 0x8397, NVOBJ_ENGINE_GR, NULL }, /* tesla (nva0, nvaa, nvac) */ - { 0x8597, NVOBJ_ENGINE_GR, NULL }, /* tesla (nva3, nva5, nva8) */ - {} -}; +static int +nv50_graph_register(struct drm_device *dev) +{ + struct drm_nouveau_private *dev_priv = dev->dev_private; + + if (dev_priv->engine.graph.registered) + return 0; + + NVOBJ_CLASS(dev, 0x506e, SW); /* nvsw */ + NVOBJ_MTHD (dev, 0x506e, 0x018c, nv50_graph_nvsw_dma_vblsem); + NVOBJ_MTHD (dev, 0x506e, 0x0400, nv50_graph_nvsw_vblsem_offset); + NVOBJ_MTHD (dev, 0x506e, 0x0404, nv50_graph_nvsw_vblsem_release_val); + NVOBJ_MTHD (dev, 0x506e, 0x0408, nv50_graph_nvsw_vblsem_release); + + NVOBJ_CLASS(dev, 0x0030, GR); /* null */ + NVOBJ_CLASS(dev, 0x5039, GR); /* m2mf */ + NVOBJ_CLASS(dev, 0x502d, GR); /* 2d */ + NVOBJ_CLASS(dev, 0x50c0, GR); /* compute */ + NVOBJ_CLASS(dev, 0x85c0, GR); /* compute (nva3, nva5, nva8) */ + + /* tesla */ + if (dev_priv->chipset == 0x50) + NVOBJ_CLASS(dev, 0x5097, GR); /* tesla (nv50) */ + else + if (dev_priv->chipset < 0xa0) + NVOBJ_CLASS(dev, 0x8297, GR); /* tesla (nv8x/nv9x) */ + else { + switch (dev_priv->chipset) { + case 0xa0: + case 0xaa: + case 0xac: + NVOBJ_CLASS(dev, 0x8397, GR); + break; + case 0xa3: + case 0xa5: + case 0xa8: + NVOBJ_CLASS(dev, 0x8597, GR); + break; + case 0xaf: + NVOBJ_CLASS(dev, 0x8697, GR); + break; + } + } + + dev_priv->engine.graph.registered = true; + return 0; +} void nv50_graph_tlb_flush(struct drm_device *dev) -- cgit v1.2.3 From bd2e597de8dbd000a3977871f15cb81e2925d63e Mon Sep 17 00:00:00 2001 From: Ben Skeggs Date: Tue, 19 Oct 2010 20:06:01 +1000 Subject: drm/nv84: add support for the PCRYPT engine Signed-off-by: Ben Skeggs --- drivers/gpu/drm/nouveau/Makefile | 1 + drivers/gpu/drm/nouveau/nouveau_channel.c | 12 ++++ drivers/gpu/drm/nouveau/nouveau_drv.c | 1 + drivers/gpu/drm/nouveau/nouveau_drv.h | 24 +++++++ drivers/gpu/drm/nouveau/nouveau_irq.c | 16 +++++ drivers/gpu/drm/nouveau/nouveau_object.c | 3 +- drivers/gpu/drm/nouveau/nouveau_state.c | 62 ++++++++++++++--- drivers/gpu/drm/nouveau/nv84_crypt.c | 110 ++++++++++++++++++++++++++++++ 8 files changed, 218 insertions(+), 11 deletions(-) create mode 100644 drivers/gpu/drm/nouveau/nv84_crypt.c diff --git a/drivers/gpu/drm/nouveau/Makefile b/drivers/gpu/drm/nouveau/Makefile index 23fa82d667d6..a541f5bff75d 100644 --- a/drivers/gpu/drm/nouveau/Makefile +++ b/drivers/gpu/drm/nouveau/Makefile @@ -18,6 +18,7 @@ nouveau-y := nouveau_drv.o nouveau_state.o nouveau_channel.o nouveau_mem.o \ nv04_graph.o nv10_graph.o nv20_graph.o \ nv40_graph.o nv50_graph.o nvc0_graph.o \ nv40_grctx.o nv50_grctx.o \ + nv84_crypt.o \ nv04_instmem.o nv50_instmem.o nvc0_instmem.o \ nv50_crtc.o nv50_dac.o nv50_sor.o \ nv50_cursor.o nv50_display.o nv50_fbcon.o \ diff --git a/drivers/gpu/drm/nouveau/nouveau_channel.c b/drivers/gpu/drm/nouveau/nouveau_channel.c index 76033c509d35..8f2df6beb893 100644 --- a/drivers/gpu/drm/nouveau/nouveau_channel.c +++ b/drivers/gpu/drm/nouveau/nouveau_channel.c @@ -112,6 +112,7 @@ nouveau_channel_alloc(struct drm_device *dev, struct nouveau_channel **chan_ret, struct drm_nouveau_private *dev_priv = dev->dev_private; struct nouveau_pgraph_engine *pgraph = &dev_priv->engine.graph; struct nouveau_fifo_engine *pfifo = &dev_priv->engine.fifo; + struct nouveau_crypt_engine *pcrypt = &dev_priv->engine.crypt; struct nouveau_channel *chan; unsigned long flags; int user, ret; @@ -214,6 +215,14 @@ nouveau_channel_alloc(struct drm_device *dev, struct nouveau_channel **chan_ret, return ret; } + if (pcrypt->create_context) { + ret = pcrypt->create_context(chan); + if (ret) { + nouveau_channel_put(&chan); + return ret; + } + } + /* Construct inital RAMFC for new channel */ ret = pfifo->create_context(chan); if (ret) { @@ -280,6 +289,7 @@ nouveau_channel_put_unlocked(struct nouveau_channel **pchan) struct drm_nouveau_private *dev_priv = dev->dev_private; struct nouveau_fifo_engine *pfifo = &dev_priv->engine.fifo; struct nouveau_pgraph_engine *pgraph = &dev_priv->engine.graph; + struct nouveau_crypt_engine *pcrypt = &dev_priv->engine.crypt; unsigned long flags; int ret; @@ -328,6 +338,8 @@ nouveau_channel_put_unlocked(struct nouveau_channel **pchan) /* destroy the engine specific contexts */ pfifo->destroy_context(chan); pgraph->destroy_context(chan); + if (pcrypt->destroy_context) + pcrypt->destroy_context(chan); pfifo->reassign(dev, true); diff --git a/drivers/gpu/drm/nouveau/nouveau_drv.c b/drivers/gpu/drm/nouveau/nouveau_drv.c index f139aa2cbe5c..6dbb8818c530 100644 --- a/drivers/gpu/drm/nouveau/nouveau_drv.c +++ b/drivers/gpu/drm/nouveau/nouveau_drv.c @@ -299,6 +299,7 @@ nouveau_pci_resume(struct pci_dev *pdev) engine->timer.init(dev); engine->fb.init(dev); engine->graph.init(dev); + engine->crypt.init(dev); engine->fifo.init(dev); NV_INFO(dev, "Restoring GPU objects...\n"); diff --git a/drivers/gpu/drm/nouveau/nouveau_drv.h b/drivers/gpu/drm/nouveau/nouveau_drv.h index 3fb87995446b..d15bfd427267 100644 --- a/drivers/gpu/drm/nouveau/nouveau_drv.h +++ b/drivers/gpu/drm/nouveau/nouveau_drv.h @@ -132,6 +132,11 @@ enum nouveau_flags { #define NVOBJ_ENGINE_SW 0 #define NVOBJ_ENGINE_GR 1 +#define NVOBJ_ENGINE_PPP 2 +#define NVOBJ_ENGINE_COPY 3 +#define NVOBJ_ENGINE_VP 4 +#define NVOBJ_ENGINE_CRYPT 5 +#define NVOBJ_ENGINE_BSP 6 #define NVOBJ_ENGINE_DISPLAY 0xcafe0001 #define NVOBJ_ENGINE_INT 0xdeadbeef @@ -208,6 +213,7 @@ struct nouveau_channel { /* PGRAPH context */ /* XXX may be merge 2 pointers as private data ??? */ struct nouveau_gpuobj *ramin_grctx; + struct nouveau_gpuobj *crypt_ctx; void *pgraph_ctx; /* NV50 VM */ @@ -444,6 +450,16 @@ struct nouveau_pm_engine { int (*temp_get)(struct drm_device *); }; +struct nouveau_crypt_engine { + bool registered; + + int (*init)(struct drm_device *); + void (*takedown)(struct drm_device *); + int (*create_context)(struct nouveau_channel *); + void (*destroy_context)(struct nouveau_channel *); + void (*tlb_flush)(struct drm_device *dev); +}; + struct nouveau_engine { struct nouveau_instmem_engine instmem; struct nouveau_mc_engine mc; @@ -454,6 +470,7 @@ struct nouveau_engine { struct nouveau_display_engine display; struct nouveau_gpio_engine gpio; struct nouveau_pm_engine pm; + struct nouveau_crypt_engine crypt; }; struct nouveau_pll_vals { @@ -1113,6 +1130,13 @@ extern void nvc0_graph_destroy_context(struct nouveau_channel *); extern int nvc0_graph_load_context(struct nouveau_channel *); extern int nvc0_graph_unload_context(struct drm_device *); +/* nv84_crypt.c */ +extern int nv84_crypt_init(struct drm_device *dev); +extern void nv84_crypt_fini(struct drm_device *dev); +extern int nv84_crypt_create_context(struct nouveau_channel *); +extern void nv84_crypt_destroy_context(struct nouveau_channel *); +extern void nv84_crypt_tlb_flush(struct drm_device *dev); + /* nv04_instmem.c */ extern int nv04_instmem_init(struct drm_device *); extern void nv04_instmem_takedown(struct drm_device *); diff --git a/drivers/gpu/drm/nouveau/nouveau_irq.c b/drivers/gpu/drm/nouveau/nouveau_irq.c index f09151d17297..ca9b969f4f9c 100644 --- a/drivers/gpu/drm/nouveau/nouveau_irq.c +++ b/drivers/gpu/drm/nouveau/nouveau_irq.c @@ -1230,6 +1230,22 @@ nouveau_irq_handler(DRM_IRQ_ARGS) status &= ~NV_PMC_INTR_0_PGRAPH_PENDING; } + if (status & 0x00004000) { + u32 stat = nv_rd32(dev, 0x102130); + u32 mthd = nv_rd32(dev, 0x102190); + u32 data = nv_rd32(dev, 0x102194); + u32 inst = nv_rd32(dev, 0x102188) & 0x7fffffff; + + NV_INFO(dev, "PCRYPT_INTR: 0x%08x 0x%08x 0x%08x 0x%08x\n", + stat, mthd, data, inst); + nv_wr32(dev, 0x102130, stat); + nv_wr32(dev, 0x10200c, 0x10); + + nv50_fb_vm_trap(dev, nouveau_ratelimit(), "PCRYPT"); + status &= ~0x00004000; + + } + if (status & NV_PMC_INTR_0_CRTCn_PENDING) { nouveau_crtc_irq_handler(dev, (status>>24)&3); status &= ~NV_PMC_INTR_0_CRTCn_PENDING; diff --git a/drivers/gpu/drm/nouveau/nouveau_object.c b/drivers/gpu/drm/nouveau/nouveau_object.c index 6226beb3613d..ee526534c6f4 100644 --- a/drivers/gpu/drm/nouveau/nouveau_object.c +++ b/drivers/gpu/drm/nouveau/nouveau_object.c @@ -272,6 +272,7 @@ nouveau_gpuobj_init(struct drm_device *dev) NV_DEBUG(dev, "\n"); INIT_LIST_HEAD(&dev_priv->gpuobj_list); + INIT_LIST_HEAD(&dev_priv->classes); spin_lock_init(&dev_priv->ramin_lock); dev_priv->ramin_base = ~0; @@ -686,7 +687,7 @@ nouveau_gpuobj_channel_init_pramin(struct nouveau_channel *chan) NV_DEBUG(dev, "ch%d\n", chan->id); /* Base amount for object storage (4KiB enough?) */ - size = 0x1000; + size = 0x2000; base = 0; /* PGRAPH context */ diff --git a/drivers/gpu/drm/nouveau/nouveau_state.c b/drivers/gpu/drm/nouveau/nouveau_state.c index be28754ffd50..f13134aa8c4f 100644 --- a/drivers/gpu/drm/nouveau/nouveau_state.c +++ b/drivers/gpu/drm/nouveau/nouveau_state.c @@ -98,6 +98,8 @@ static int nouveau_init_engine_ptrs(struct drm_device *dev) engine->pm.clock_get = nv04_pm_clock_get; engine->pm.clock_pre = nv04_pm_clock_pre; engine->pm.clock_set = nv04_pm_clock_set; + engine->crypt.init = nouveau_stub_init; + engine->crypt.takedown = nouveau_stub_takedown; break; case 0x10: engine->instmem.init = nv04_instmem_init; @@ -151,6 +153,8 @@ static int nouveau_init_engine_ptrs(struct drm_device *dev) engine->pm.clock_get = nv04_pm_clock_get; engine->pm.clock_pre = nv04_pm_clock_pre; engine->pm.clock_set = nv04_pm_clock_set; + engine->crypt.init = nouveau_stub_init; + engine->crypt.takedown = nouveau_stub_takedown; break; case 0x20: engine->instmem.init = nv04_instmem_init; @@ -204,6 +208,8 @@ static int nouveau_init_engine_ptrs(struct drm_device *dev) engine->pm.clock_get = nv04_pm_clock_get; engine->pm.clock_pre = nv04_pm_clock_pre; engine->pm.clock_set = nv04_pm_clock_set; + engine->crypt.init = nouveau_stub_init; + engine->crypt.takedown = nouveau_stub_takedown; break; case 0x30: engine->instmem.init = nv04_instmem_init; @@ -259,6 +265,8 @@ static int nouveau_init_engine_ptrs(struct drm_device *dev) engine->pm.clock_set = nv04_pm_clock_set; engine->pm.voltage_get = nouveau_voltage_gpio_get; engine->pm.voltage_set = nouveau_voltage_gpio_set; + engine->crypt.init = nouveau_stub_init; + engine->crypt.takedown = nouveau_stub_takedown; break; case 0x40: case 0x60: @@ -316,6 +324,8 @@ static int nouveau_init_engine_ptrs(struct drm_device *dev) engine->pm.voltage_get = nouveau_voltage_gpio_get; engine->pm.voltage_set = nouveau_voltage_gpio_set; engine->pm.temp_get = nv40_temp_get; + engine->crypt.init = nouveau_stub_init; + engine->crypt.takedown = nouveau_stub_takedown; break; case 0x50: case 0x80: /* gotta love NVIDIA's consistency.. */ @@ -380,19 +390,23 @@ static int nouveau_init_engine_ptrs(struct drm_device *dev) engine->gpio.set = nv50_gpio_set; engine->gpio.irq_enable = nv50_gpio_irq_enable; switch (dev_priv->chipset) { - case 0xa3: - case 0xa5: - case 0xa8: - case 0xaf: - engine->pm.clock_get = nva3_pm_clock_get; - engine->pm.clock_pre = nva3_pm_clock_pre; - engine->pm.clock_set = nva3_pm_clock_set; - break; - default: + case 0x84: + case 0x86: + case 0x92: + case 0x94: + case 0x96: + case 0x98: + case 0xa0: + case 0x50: engine->pm.clock_get = nv50_pm_clock_get; engine->pm.clock_pre = nv50_pm_clock_pre; engine->pm.clock_set = nv50_pm_clock_set; break; + default: + engine->pm.clock_get = nva3_pm_clock_get; + engine->pm.clock_pre = nva3_pm_clock_pre; + engine->pm.clock_set = nva3_pm_clock_set; + break; } engine->pm.voltage_get = nouveau_voltage_gpio_get; engine->pm.voltage_set = nouveau_voltage_gpio_set; @@ -400,6 +414,23 @@ static int nouveau_init_engine_ptrs(struct drm_device *dev) engine->pm.temp_get = nv84_temp_get; else engine->pm.temp_get = nv40_temp_get; + switch (dev_priv->chipset) { + case 0x84: + case 0x86: + case 0x92: + case 0x94: + case 0x96: + case 0xa0: + engine->crypt.init = nv84_crypt_init; + engine->crypt.takedown = nv84_crypt_fini; + engine->crypt.create_context = nv84_crypt_create_context; + engine->crypt.destroy_context = nv84_crypt_destroy_context; + break; + default: + engine->crypt.init = nouveau_stub_init; + engine->crypt.takedown = nouveau_stub_takedown; + break; + } break; case 0xC0: engine->instmem.init = nvc0_instmem_init; @@ -447,6 +478,8 @@ static int nouveau_init_engine_ptrs(struct drm_device *dev) engine->gpio.get = nv50_gpio_get; engine->gpio.set = nv50_gpio_set; engine->gpio.irq_enable = nv50_gpio_irq_enable; + engine->crypt.init = nouveau_stub_init; + engine->crypt.takedown = nouveau_stub_takedown; break; default: NV_ERROR(dev, "NV%02x unsupported\n", dev_priv->chipset); @@ -619,10 +652,15 @@ nouveau_card_init(struct drm_device *dev) if (ret) goto out_fb; + /* PCRYPT */ + ret = engine->crypt.init(dev); + if (ret) + goto out_graph; + /* PFIFO */ ret = engine->fifo.init(dev); if (ret) - goto out_graph; + goto out_crypt; } ret = engine->display.create(dev); @@ -669,6 +707,9 @@ out_display: out_fifo: if (!nouveau_noaccel) engine->fifo.takedown(dev); +out_crypt: + if (!nouveau_noaccel) + engine->crypt.takedown(dev); out_graph: if (!nouveau_noaccel) engine->graph.takedown(dev); @@ -712,6 +753,7 @@ static void nouveau_card_takedown(struct drm_device *dev) if (!nouveau_noaccel) { engine->fifo.takedown(dev); + engine->crypt.takedown(dev); engine->graph.takedown(dev); } engine->fb.takedown(dev); diff --git a/drivers/gpu/drm/nouveau/nv84_crypt.c b/drivers/gpu/drm/nouveau/nv84_crypt.c new file mode 100644 index 000000000000..63bd6bb41e38 --- /dev/null +++ b/drivers/gpu/drm/nouveau/nv84_crypt.c @@ -0,0 +1,110 @@ +/* + * Copyright 2010 Red Hat Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: Ben Skeggs + */ + +#include "drmP.h" +#include "nouveau_drv.h" + +int +nv84_crypt_create_context(struct nouveau_channel *chan) +{ + struct drm_device *dev = chan->dev; + struct drm_nouveau_private *dev_priv = dev->dev_private; + struct nouveau_gpuobj *ramin = chan->ramin; + int ret; + + NV_DEBUG(dev, "ch%d\n", chan->id); + + ret = nouveau_gpuobj_new(dev, chan, 256, 0x1000, + NVOBJ_FLAG_ZERO_ALLOC | NVOBJ_FLAG_ZERO_FREE, + &chan->crypt_ctx); + if (ret) + return ret; + + nv_wo32(ramin, 0xa0, 0x00190000); + nv_wo32(ramin, 0xa4, chan->crypt_ctx->vinst + 0xff); + nv_wo32(ramin, 0xa8, chan->crypt_ctx->vinst); + nv_wo32(ramin, 0xac, 0); + nv_wo32(ramin, 0xb0, 0); + nv_wo32(ramin, 0xb4, 0); + + dev_priv->engine.instmem.flush(dev); + return 0; +} + +void +nv84_crypt_destroy_context(struct nouveau_channel *chan) +{ + struct drm_device *dev = chan->dev; + u32 inst; + + if (!chan->ramin) + return; + + inst = (chan->ramin->vinst >> 12); + inst |= 0x80000000; + + /* mark context as invalid if still on the hardware, not + * doing this causes issues the next time PCRYPT is used, + * unsurprisingly :) + */ + nv_wr32(dev, 0x10200c, 0x00000000); + if (nv_rd32(dev, 0x102188) == inst) + nv_mask(dev, 0x102188, 0x80000000, 0x00000000); + if (nv_rd32(dev, 0x10218c) == inst) + nv_mask(dev, 0x10218c, 0x80000000, 0x00000000); + nv_wr32(dev, 0x10200c, 0x00000010); + + nouveau_gpuobj_ref(NULL, &chan->crypt_ctx); +} + +void +nv84_crypt_tlb_flush(struct drm_device *dev) +{ + nv50_vm_flush(dev, 0x0a); +} + +int +nv84_crypt_init(struct drm_device *dev) +{ + struct drm_nouveau_private *dev_priv = dev->dev_private; + struct nouveau_crypt_engine *pcrypt = &dev_priv->engine.crypt; + + if (!pcrypt->registered) { + NVOBJ_CLASS(dev, 0x74c1, CRYPT); + pcrypt->registered = true; + } + + nv_mask(dev, 0x000200, 0x00004000, 0x00000000); + nv_mask(dev, 0x000200, 0x00004000, 0x00004000); + nv_wr32(dev, 0x102130, 0xffffffff); + nv_wr32(dev, 0x102140, 0xffffffbf); + nv_wr32(dev, 0x10200c, 0x00000010); + return 0; +} + +void +nv84_crypt_fini(struct drm_device *dev) +{ + nv_wr32(dev, 0x102140, 0x00000000); +} -- cgit v1.2.3 From 3052be2cea64371300a0338d3ca5d3575fbd109c Mon Sep 17 00:00:00 2001 From: Ben Skeggs Date: Wed, 20 Oct 2010 11:46:38 +1000 Subject: drm/nv50: remove excessive alignment of graph/crypt contexts Signed-off-by: Ben Skeggs --- drivers/gpu/drm/nouveau/nv50_graph.c | 2 +- drivers/gpu/drm/nouveau/nv84_crypt.c | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/gpu/drm/nouveau/nv50_graph.c b/drivers/gpu/drm/nouveau/nv50_graph.c index 84ca90e91811..a764af52a3ba 100644 --- a/drivers/gpu/drm/nouveau/nv50_graph.c +++ b/drivers/gpu/drm/nouveau/nv50_graph.c @@ -216,7 +216,7 @@ nv50_graph_create_context(struct nouveau_channel *chan) NV_DEBUG(dev, "ch%d\n", chan->id); - ret = nouveau_gpuobj_new(dev, chan, pgraph->grctx_size, 0x1000, + ret = nouveau_gpuobj_new(dev, chan, pgraph->grctx_size, 0, NVOBJ_FLAG_ZERO_ALLOC | NVOBJ_FLAG_ZERO_FREE, &chan->ramin_grctx); if (ret) diff --git a/drivers/gpu/drm/nouveau/nv84_crypt.c b/drivers/gpu/drm/nouveau/nv84_crypt.c index 63bd6bb41e38..f988b1a9d1d7 100644 --- a/drivers/gpu/drm/nouveau/nv84_crypt.c +++ b/drivers/gpu/drm/nouveau/nv84_crypt.c @@ -35,7 +35,7 @@ nv84_crypt_create_context(struct nouveau_channel *chan) NV_DEBUG(dev, "ch%d\n", chan->id); - ret = nouveau_gpuobj_new(dev, chan, 256, 0x1000, + ret = nouveau_gpuobj_new(dev, chan, 256, 0, NVOBJ_FLAG_ZERO_ALLOC | NVOBJ_FLAG_ZERO_FREE, &chan->crypt_ctx); if (ret) -- cgit v1.2.3 From f4512e6579ddaa9b1f8ab1d5659131243ffc421f Mon Sep 17 00:00:00 2001 From: Ben Skeggs Date: Wed, 20 Oct 2010 11:47:09 +1000 Subject: drm/nv50: create graph and crypt contexts on demand This really needs cleaning up somehow, and probably investigate what's needed to do this on earlier generations. NVIDIA do something similar there too. Signed-off-by: Ben Skeggs --- drivers/gpu/drm/nouveau/nouveau_channel.c | 11 ++--------- drivers/gpu/drm/nouveau/nouveau_object.c | 23 +++++++++++++++++++++++ 2 files changed, 25 insertions(+), 9 deletions(-) diff --git a/drivers/gpu/drm/nouveau/nouveau_channel.c b/drivers/gpu/drm/nouveau/nouveau_channel.c index 8f2df6beb893..f2d674202369 100644 --- a/drivers/gpu/drm/nouveau/nouveau_channel.c +++ b/drivers/gpu/drm/nouveau/nouveau_channel.c @@ -112,7 +112,6 @@ nouveau_channel_alloc(struct drm_device *dev, struct nouveau_channel **chan_ret, struct drm_nouveau_private *dev_priv = dev->dev_private; struct nouveau_pgraph_engine *pgraph = &dev_priv->engine.graph; struct nouveau_fifo_engine *pfifo = &dev_priv->engine.fifo; - struct nouveau_crypt_engine *pcrypt = &dev_priv->engine.crypt; struct nouveau_channel *chan; unsigned long flags; int user, ret; @@ -209,14 +208,8 @@ nouveau_channel_alloc(struct drm_device *dev, struct nouveau_channel **chan_ret, pfifo->reassign(dev, false); /* Create a graphics context for new channel */ - ret = pgraph->create_context(chan); - if (ret) { - nouveau_channel_put(&chan); - return ret; - } - - if (pcrypt->create_context) { - ret = pcrypt->create_context(chan); + if (dev_priv->card_type < NV_50) { + ret = pgraph->create_context(chan); if (ret) { nouveau_channel_put(&chan); return ret; diff --git a/drivers/gpu/drm/nouveau/nouveau_object.c b/drivers/gpu/drm/nouveau/nouveau_object.c index ee526534c6f4..54078186fe65 100644 --- a/drivers/gpu/drm/nouveau/nouveau_object.c +++ b/drivers/gpu/drm/nouveau/nouveau_object.c @@ -634,6 +634,29 @@ found: if (oc->engine == NVOBJ_ENGINE_SW) return nouveau_gpuobj_sw_new(chan, class, gpuobj); + switch (oc->engine) { + case NVOBJ_ENGINE_GR: + if (dev_priv->card_type >= NV_50 && !chan->ramin_grctx) { + struct nouveau_pgraph_engine *pgraph = + &dev_priv->engine.graph; + + ret = pgraph->create_context(chan); + if (ret) + return ret; + } + break; + case NVOBJ_ENGINE_CRYPT: + if (!chan->crypt_ctx) { + struct nouveau_crypt_engine *pcrypt = + &dev_priv->engine.crypt; + + ret = pcrypt->create_context(chan); + if (ret) + return ret; + } + break; + } + ret = nouveau_gpuobj_new(dev, chan, nouveau_gpuobj_class_instmem_size(dev, class), 16, -- cgit v1.2.3 From 106ddad5aa8e8e03503cea05f9a64611f849952f Mon Sep 17 00:00:00 2001 From: Ben Skeggs Date: Tue, 19 Oct 2010 11:14:17 +1000 Subject: drm/nv50: clearer separation of the stages of evo init Signed-off-by: Ben Skeggs --- drivers/gpu/drm/nouveau/nouveau_reg.h | 14 ++--- drivers/gpu/drm/nouveau/nv50_display.c | 93 ++++++++++++++++++---------------- 2 files changed, 55 insertions(+), 52 deletions(-) diff --git a/drivers/gpu/drm/nouveau/nouveau_reg.h b/drivers/gpu/drm/nouveau/nouveau_reg.h index 1bbe7037cf99..5e28bc63c41e 100644 --- a/drivers/gpu/drm/nouveau/nouveau_reg.h +++ b/drivers/gpu/drm/nouveau/nouveau_reg.h @@ -747,15 +747,11 @@ #define NV50_PDISPLAY_CURSOR_CURSOR_CTRL2_STATUS 0x00030000 #define NV50_PDISPLAY_CURSOR_CURSOR_CTRL2_STATUS_ACTIVE 0x00010000 -#define NV50_PDISPLAY_CTRL_STATE 0x00610300 -#define NV50_PDISPLAY_CTRL_STATE_PENDING 0x80000000 -#define NV50_PDISPLAY_CTRL_STATE_METHOD 0x00001ffc -#define NV50_PDISPLAY_CTRL_STATE_ENABLE 0x00000001 -#define NV50_PDISPLAY_CTRL_VAL 0x00610304 -#define NV50_PDISPLAY_UNK_380 0x00610380 -#define NV50_PDISPLAY_RAM_AMOUNT 0x00610384 -#define NV50_PDISPLAY_UNK_388 0x00610388 -#define NV50_PDISPLAY_UNK_38C 0x0061038c +#define NV50_PDISPLAY_PIO_CTRL 0x00610300 +#define NV50_PDISPLAY_PIO_CTRL_PENDING 0x80000000 +#define NV50_PDISPLAY_PIO_CTRL_MTHD 0x00001ffc +#define NV50_PDISPLAY_PIO_CTRL_ENABLED 0x00000001 +#define NV50_PDISPLAY_PIO_DATA 0x00610304 #define NV50_PDISPLAY_CRTC_P(i, r) ((i) * 0x540 + NV50_PDISPLAY_CRTC_##r) #define NV50_PDISPLAY_CRTC_C(i, r) (4 + (i) * 0x540 + NV50_PDISPLAY_CRTC_##r) diff --git a/drivers/gpu/drm/nouveau/nv50_display.c b/drivers/gpu/drm/nouveau/nv50_display.c index 7ac87efb791f..7c9c7c5bf22a 100644 --- a/drivers/gpu/drm/nouveau/nv50_display.c +++ b/drivers/gpu/drm/nouveau/nv50_display.c @@ -225,6 +225,7 @@ nv50_display_init(struct drm_device *dev) NV_DEBUG_KMS(dev, "\n"); nv_wr32(dev, 0x00610184, nv_rd32(dev, 0x00614004)); + /* * I think the 0x006101XX range is some kind of main control area * that enables things. @@ -240,16 +241,19 @@ nv50_display_init(struct drm_device *dev) val = nv_rd32(dev, 0x0061610c + (i * 0x800)); nv_wr32(dev, 0x0061019c + (i * 0x10), val); } + /* DAC */ for (i = 0; i < 3; i++) { val = nv_rd32(dev, 0x0061a000 + (i * 0x800)); nv_wr32(dev, 0x006101d0 + (i * 0x04), val); } + /* SOR */ for (i = 0; i < nv50_sor_nr(dev); i++) { val = nv_rd32(dev, 0x0061c000 + (i * 0x800)); nv_wr32(dev, 0x006101e0 + (i * 0x04), val); } + /* EXT */ for (i = 0; i < 3; i++) { val = nv_rd32(dev, 0x0061e000 + (i * 0x800)); @@ -276,6 +280,49 @@ nv50_display_init(struct drm_device *dev) } } + for (i = 0; i < 2; i++) { + nv_wr32(dev, NV50_PDISPLAY_CURSOR_CURSOR_CTRL2(i), 0x2000); + if (!nv_wait(dev, NV50_PDISPLAY_CURSOR_CURSOR_CTRL2(i), + NV50_PDISPLAY_CURSOR_CURSOR_CTRL2_STATUS, 0)) { + NV_ERROR(dev, "timeout: CURSOR_CTRL2_STATUS == 0\n"); + NV_ERROR(dev, "CURSOR_CTRL2 = 0x%08x\n", + nv_rd32(dev, NV50_PDISPLAY_CURSOR_CURSOR_CTRL2(i))); + return -EBUSY; + } + + nv_wr32(dev, NV50_PDISPLAY_CURSOR_CURSOR_CTRL2(i), + NV50_PDISPLAY_CURSOR_CURSOR_CTRL2_ON); + if (!nv_wait(dev, NV50_PDISPLAY_CURSOR_CURSOR_CTRL2(i), + NV50_PDISPLAY_CURSOR_CURSOR_CTRL2_STATUS, + NV50_PDISPLAY_CURSOR_CURSOR_CTRL2_STATUS_ACTIVE)) { + NV_ERROR(dev, "timeout: " + "CURSOR_CTRL2_STATUS_ACTIVE(%d)\n", i); + NV_ERROR(dev, "CURSOR_CTRL2(%d) = 0x%08x\n", i, + nv_rd32(dev, NV50_PDISPLAY_CURSOR_CURSOR_CTRL2(i))); + return -EBUSY; + } + } + + nv_wr32(dev, NV50_PDISPLAY_OBJECTS, (evo->ramin->vinst >> 8) | 9); + nv_wr32(dev, NV50_PDISPLAY_PIO_CTRL, 0x00000000); + nv_wr32(dev, 0x610028, 0x00000000); + nv_mask(dev, NV50_PDISPLAY_INTR_0, 0x00000000, 0x00000000); + nv_mask(dev, NV50_PDISPLAY_INTR_1, 0x00000000, 0x00000000); + nv_wr32(dev, NV50_PDISPLAY_INTR_EN, + NV50_PDISPLAY_INTR_EN_CLK_UNK10 | + NV50_PDISPLAY_INTR_EN_CLK_UNK20 | + NV50_PDISPLAY_INTR_EN_CLK_UNK40); + + /* enable hotplug interrupts */ + list_for_each_entry(connector, &dev->mode_config.connector_list, head) { + struct nouveau_connector *conn = nouveau_connector(connector); + + if (conn->dcb->gpio_tag == 0xff) + continue; + + pgpio->irq_enable(dev, conn->dcb->gpio_tag, true); + } + /* taken from nv bug #12637, attempts to un-wedge the hw if it's * stuck in some unspecified state */ @@ -297,7 +344,6 @@ nv50_display_init(struct drm_device *dev) } } - nv_wr32(dev, NV50_PDISPLAY_CTRL_STATE, NV50_PDISPLAY_CTRL_STATE_ENABLE); nv_wr32(dev, NV50_PDISPLAY_CHANNEL_STAT(0), 0x1000b03); if (!nv_wait(dev, NV50_PDISPLAY_CHANNEL_STAT(0), 0x40000000, 0x40000000)) { @@ -307,31 +353,6 @@ nv50_display_init(struct drm_device *dev) return -EBUSY; } - for (i = 0; i < 2; i++) { - nv_wr32(dev, NV50_PDISPLAY_CURSOR_CURSOR_CTRL2(i), 0x2000); - if (!nv_wait(dev, NV50_PDISPLAY_CURSOR_CURSOR_CTRL2(i), - NV50_PDISPLAY_CURSOR_CURSOR_CTRL2_STATUS, 0)) { - NV_ERROR(dev, "timeout: CURSOR_CTRL2_STATUS == 0\n"); - NV_ERROR(dev, "CURSOR_CTRL2 = 0x%08x\n", - nv_rd32(dev, NV50_PDISPLAY_CURSOR_CURSOR_CTRL2(i))); - return -EBUSY; - } - - nv_wr32(dev, NV50_PDISPLAY_CURSOR_CURSOR_CTRL2(i), - NV50_PDISPLAY_CURSOR_CURSOR_CTRL2_ON); - if (!nv_wait(dev, NV50_PDISPLAY_CURSOR_CURSOR_CTRL2(i), - NV50_PDISPLAY_CURSOR_CURSOR_CTRL2_STATUS, - NV50_PDISPLAY_CURSOR_CURSOR_CTRL2_STATUS_ACTIVE)) { - NV_ERROR(dev, "timeout: " - "CURSOR_CTRL2_STATUS_ACTIVE(%d)\n", i); - NV_ERROR(dev, "CURSOR_CTRL2(%d) = 0x%08x\n", i, - nv_rd32(dev, NV50_PDISPLAY_CURSOR_CURSOR_CTRL2(i))); - return -EBUSY; - } - } - - nv_wr32(dev, NV50_PDISPLAY_OBJECTS, (evo->ramin->vinst >> 8) | 9); - /* initialise fifo */ nv_wr32(dev, NV50_PDISPLAY_CHANNEL_DMA_CB(0), ((evo->pushbuf_bo->bo.mem.start << PAGE_SHIFT) >> 8) | @@ -350,7 +371,9 @@ nv50_display_init(struct drm_device *dev) nv_wr32(dev, NV50_PDISPLAY_USER_PUT(0), 0); nv_wr32(dev, NV50_PDISPLAY_CHANNEL_STAT(0), 0x01000003 | NV50_PDISPLAY_CHANNEL_STAT_DMA_ENABLED); - nv_wr32(dev, 0x610300, nv_rd32(dev, 0x610300) & ~1); + + /* enable error reporting on the channel */ + nv_mask(dev, 0x610028, 0x00000000, 0x00010001 << 0); evo->dma.max = (4096/4) - 2; evo->dma.put = 0; @@ -382,21 +405,6 @@ nv50_display_init(struct drm_device *dev) if (!nv_wait(dev, 0x640004, 0xffffffff, evo->dma.put << 2)) NV_ERROR(dev, "evo pushbuf stalled\n"); - /* enable clock change interrupts. */ - nv_wr32(dev, 0x610028, 0x00010001); - nv_wr32(dev, NV50_PDISPLAY_INTR_EN, (NV50_PDISPLAY_INTR_EN_CLK_UNK10 | - NV50_PDISPLAY_INTR_EN_CLK_UNK20 | - NV50_PDISPLAY_INTR_EN_CLK_UNK40)); - - /* enable hotplug interrupts */ - list_for_each_entry(connector, &dev->mode_config.connector_list, head) { - struct nouveau_connector *conn = nouveau_connector(connector); - - if (conn->dcb->gpio_tag == 0xff) - continue; - - pgpio->irq_enable(dev, conn->dcb->gpio_tag, true); - } return 0; } @@ -442,7 +450,6 @@ static int nv50_display_disable(struct drm_device *dev) } nv_wr32(dev, NV50_PDISPLAY_CHANNEL_STAT(0), 0); - nv_wr32(dev, NV50_PDISPLAY_CTRL_STATE, 0); if (!nv_wait(dev, NV50_PDISPLAY_CHANNEL_STAT(0), 0x1e0000, 0)) { NV_ERROR(dev, "timeout: (0x610200 & 0x1e0000) == 0\n"); NV_ERROR(dev, "0x610200 = 0x%08x\n", -- cgit v1.2.3 From b7bc613a4cc08d867b43189c2af0bb83b1fa1dc6 Mon Sep 17 00:00:00 2001 From: Ben Skeggs Date: Tue, 19 Oct 2010 13:05:51 +1000 Subject: drm/nv50: move evo handling to nv50_evo.c Signed-off-by: Ben Skeggs --- drivers/gpu/drm/nouveau/Makefile | 2 +- drivers/gpu/drm/nouveau/nouveau_reg.h | 22 +-- drivers/gpu/drm/nouveau/nv50_display.c | 238 +------------------------- drivers/gpu/drm/nouveau/nv50_evo.c | 295 +++++++++++++++++++++++++++++++++ drivers/gpu/drm/nouveau/nv50_evo.h | 10 ++ 5 files changed, 322 insertions(+), 245 deletions(-) create mode 100644 drivers/gpu/drm/nouveau/nv50_evo.c diff --git a/drivers/gpu/drm/nouveau/Makefile b/drivers/gpu/drm/nouveau/Makefile index a541f5bff75d..c8c8de0bbc77 100644 --- a/drivers/gpu/drm/nouveau/Makefile +++ b/drivers/gpu/drm/nouveau/Makefile @@ -20,7 +20,7 @@ nouveau-y := nouveau_drv.o nouveau_state.o nouveau_channel.o nouveau_mem.o \ nv40_grctx.o nv50_grctx.o \ nv84_crypt.o \ nv04_instmem.o nv50_instmem.o nvc0_instmem.o \ - nv50_crtc.o nv50_dac.o nv50_sor.o \ + nv50_evo.o nv50_crtc.o nv50_dac.o nv50_sor.o \ nv50_cursor.o nv50_display.o nv50_fbcon.o \ nv04_dac.o nv04_dfp.o nv04_tv.o nv17_tv.o nv17_tv_modes.o \ nv04_crtc.o nv04_display.o nv04_cursor.o nv04_fbcon.o \ diff --git a/drivers/gpu/drm/nouveau/nouveau_reg.h b/drivers/gpu/drm/nouveau/nouveau_reg.h index 5e28bc63c41e..d0ce86c24ebf 100644 --- a/drivers/gpu/drm/nouveau/nouveau_reg.h +++ b/drivers/gpu/drm/nouveau/nouveau_reg.h @@ -729,17 +729,17 @@ #define NV50_PDISPLAY_UNK30_CTRL_PENDING 0x80000000 #define NV50_PDISPLAY_TRAPPED_ADDR 0x00610080 #define NV50_PDISPLAY_TRAPPED_DATA 0x00610084 -#define NV50_PDISPLAY_CHANNEL_STAT(i) ((i) * 0x10 + 0x00610200) -#define NV50_PDISPLAY_CHANNEL_STAT_DMA 0x00000010 -#define NV50_PDISPLAY_CHANNEL_STAT_DMA_DISABLED 0x00000000 -#define NV50_PDISPLAY_CHANNEL_STAT_DMA_ENABLED 0x00000010 -#define NV50_PDISPLAY_CHANNEL_DMA_CB(i) ((i) * 0x10 + 0x00610204) -#define NV50_PDISPLAY_CHANNEL_DMA_CB_LOCATION 0x00000002 -#define NV50_PDISPLAY_CHANNEL_DMA_CB_LOCATION_VRAM 0x00000000 -#define NV50_PDISPLAY_CHANNEL_DMA_CB_LOCATION_SYSTEM 0x00000002 -#define NV50_PDISPLAY_CHANNEL_DMA_CB_VALID 0x00000001 -#define NV50_PDISPLAY_CHANNEL_UNK2(i) ((i) * 0x10 + 0x00610208) -#define NV50_PDISPLAY_CHANNEL_UNK3(i) ((i) * 0x10 + 0x0061020c) +#define NV50_PDISPLAY_EVO_CTRL(i) ((i) * 0x10 + 0x00610200) +#define NV50_PDISPLAY_EVO_CTRL_DMA 0x00000010 +#define NV50_PDISPLAY_EVO_CTRL_DMA_DISABLED 0x00000000 +#define NV50_PDISPLAY_EVO_CTRL_DMA_ENABLED 0x00000010 +#define NV50_PDISPLAY_EVO_DMA_CB(i) ((i) * 0x10 + 0x00610204) +#define NV50_PDISPLAY_EVO_DMA_CB_LOCATION 0x00000002 +#define NV50_PDISPLAY_EVO_DMA_CB_LOCATION_VRAM 0x00000000 +#define NV50_PDISPLAY_EVO_DMA_CB_LOCATION_SYSTEM 0x00000002 +#define NV50_PDISPLAY_EVO_DMA_CB_VALID 0x00000001 +#define NV50_PDISPLAY_EVO_UNK2(i) ((i) * 0x10 + 0x00610208) +#define NV50_PDISPLAY_EVO_HASH_TAG(i) ((i) * 0x10 + 0x0061020c) #define NV50_PDISPLAY_CURSOR 0x00610270 #define NV50_PDISPLAY_CURSOR_CURSOR_CTRL2(i) ((i) * 0x10 + 0x00610270) diff --git a/drivers/gpu/drm/nouveau/nv50_display.c b/drivers/gpu/drm/nouveau/nv50_display.c index 7c9c7c5bf22a..db100a8f231e 100644 --- a/drivers/gpu/drm/nouveau/nv50_display.c +++ b/drivers/gpu/drm/nouveau/nv50_display.c @@ -46,159 +46,6 @@ nv50_sor_nr(struct drm_device *dev) return 4; } -static void -nv50_evo_channel_del(struct nouveau_channel **pchan) -{ - struct nouveau_channel *chan = *pchan; - - if (!chan) - return; - *pchan = NULL; - - nouveau_gpuobj_channel_takedown(chan); - nouveau_bo_unmap(chan->pushbuf_bo); - nouveau_bo_ref(NULL, &chan->pushbuf_bo); - - if (chan->user) - iounmap(chan->user); - - kfree(chan); -} - -static int -nv50_evo_dmaobj_new(struct nouveau_channel *evo, uint32_t class, uint32_t name, - uint32_t tile_flags, uint32_t magic_flags, - uint32_t offset, uint32_t limit) -{ - struct drm_nouveau_private *dev_priv = evo->dev->dev_private; - struct drm_device *dev = evo->dev; - struct nouveau_gpuobj *obj = NULL; - int ret; - - ret = nouveau_gpuobj_new(dev, evo, 6*4, 32, 0, &obj); - if (ret) - return ret; - obj->engine = NVOBJ_ENGINE_DISPLAY; - - nv_wo32(obj, 0, (tile_flags << 22) | (magic_flags << 16) | class); - nv_wo32(obj, 4, limit); - nv_wo32(obj, 8, offset); - nv_wo32(obj, 12, 0x00000000); - nv_wo32(obj, 16, 0x00000000); - if (dev_priv->card_type < NV_C0) - nv_wo32(obj, 20, 0x00010000); - else - nv_wo32(obj, 20, 0x00020000); - dev_priv->engine.instmem.flush(dev); - - ret = nouveau_ramht_insert(evo, name, obj); - nouveau_gpuobj_ref(NULL, &obj); - if (ret) { - return ret; - } - - return 0; -} - -static int -nv50_evo_channel_new(struct drm_device *dev, struct nouveau_channel **pchan) -{ - struct drm_nouveau_private *dev_priv = dev->dev_private; - struct nouveau_gpuobj *ramht = NULL; - struct nouveau_channel *chan; - int ret; - - chan = kzalloc(sizeof(struct nouveau_channel), GFP_KERNEL); - if (!chan) - return -ENOMEM; - *pchan = chan; - - chan->id = -1; - chan->dev = dev; - chan->user_get = 4; - chan->user_put = 0; - - ret = nouveau_gpuobj_new(dev, NULL, 32768, 0x1000, - NVOBJ_FLAG_ZERO_ALLOC, &chan->ramin); - if (ret) { - NV_ERROR(dev, "Error allocating EVO channel memory: %d\n", ret); - nv50_evo_channel_del(pchan); - return ret; - } - - ret = drm_mm_init(&chan->ramin_heap, 0, 32768); - if (ret) { - NV_ERROR(dev, "Error initialising EVO PRAMIN heap: %d\n", ret); - nv50_evo_channel_del(pchan); - return ret; - } - - ret = nouveau_gpuobj_new(dev, chan, 4096, 16, 0, &ramht); - if (ret) { - NV_ERROR(dev, "Unable to allocate EVO RAMHT: %d\n", ret); - nv50_evo_channel_del(pchan); - return ret; - } - - ret = nouveau_ramht_new(dev, ramht, &chan->ramht); - nouveau_gpuobj_ref(NULL, &ramht); - if (ret) { - nv50_evo_channel_del(pchan); - return ret; - } - - if (dev_priv->chipset != 0x50) { - ret = nv50_evo_dmaobj_new(chan, 0x3d, NvEvoFB16, 0x70, 0x19, - 0, 0xffffffff); - if (ret) { - nv50_evo_channel_del(pchan); - return ret; - } - - - ret = nv50_evo_dmaobj_new(chan, 0x3d, NvEvoFB32, 0x7a, 0x19, - 0, 0xffffffff); - if (ret) { - nv50_evo_channel_del(pchan); - return ret; - } - } - - ret = nv50_evo_dmaobj_new(chan, 0x3d, NvEvoVRAM, 0, 0x19, - 0, dev_priv->vram_size); - if (ret) { - nv50_evo_channel_del(pchan); - return ret; - } - - ret = nouveau_bo_new(dev, NULL, 4096, 0, TTM_PL_FLAG_VRAM, 0, 0, - false, true, &chan->pushbuf_bo); - if (ret == 0) - ret = nouveau_bo_pin(chan->pushbuf_bo, TTM_PL_FLAG_VRAM); - if (ret) { - NV_ERROR(dev, "Error creating EVO DMA push buffer: %d\n", ret); - nv50_evo_channel_del(pchan); - return ret; - } - - ret = nouveau_bo_map(chan->pushbuf_bo); - if (ret) { - NV_ERROR(dev, "Error mapping EVO DMA push buffer: %d\n", ret); - nv50_evo_channel_del(pchan); - return ret; - } - - chan->user = ioremap(pci_resource_start(dev->pdev, 0) + - NV50_PDISPLAY_USER(0), PAGE_SIZE); - if (!chan->user) { - NV_ERROR(dev, "Error mapping EVO control regs.\n"); - nv50_evo_channel_del(pchan); - return -ENOMEM; - } - - return 0; -} - int nv50_display_early_init(struct drm_device *dev) { @@ -214,12 +61,10 @@ int nv50_display_init(struct drm_device *dev) { struct drm_nouveau_private *dev_priv = dev->dev_private; - struct nouveau_timer_engine *ptimer = &dev_priv->engine.timer; struct nouveau_gpio_engine *pgpio = &dev_priv->engine.gpio; - struct nouveau_channel *evo = dev_priv->evo; struct drm_connector *connector; + struct nouveau_channel *evo; int ret, i; - u64 start; u32 val; NV_DEBUG_KMS(dev, "\n"); @@ -303,7 +148,6 @@ nv50_display_init(struct drm_device *dev) } } - nv_wr32(dev, NV50_PDISPLAY_OBJECTS, (evo->ramin->vinst >> 8) | 9); nv_wr32(dev, NV50_PDISPLAY_PIO_CTRL, 0x00000000); nv_wr32(dev, 0x610028, 0x00000000); nv_mask(dev, NV50_PDISPLAY_INTR_0, 0x00000000, 0x00000000); @@ -323,69 +167,12 @@ nv50_display_init(struct drm_device *dev) pgpio->irq_enable(dev, conn->dcb->gpio_tag, true); } - /* taken from nv bug #12637, attempts to un-wedge the hw if it's - * stuck in some unspecified state - */ - start = ptimer->read(dev); - nv_wr32(dev, NV50_PDISPLAY_CHANNEL_STAT(0), 0x2b00); - while ((val = nv_rd32(dev, NV50_PDISPLAY_CHANNEL_STAT(0))) & 0x1e0000) { - if ((val & 0x9f0000) == 0x20000) - nv_wr32(dev, NV50_PDISPLAY_CHANNEL_STAT(0), - val | 0x800000); - - if ((val & 0x3f0000) == 0x30000) - nv_wr32(dev, NV50_PDISPLAY_CHANNEL_STAT(0), - val | 0x200000); - - if (ptimer->read(dev) - start > 1000000000ULL) { - NV_ERROR(dev, "timeout: (0x610200 & 0x1e0000) != 0\n"); - NV_ERROR(dev, "0x610200 = 0x%08x\n", val); - return -EBUSY; - } - } - - nv_wr32(dev, NV50_PDISPLAY_CHANNEL_STAT(0), 0x1000b03); - if (!nv_wait(dev, NV50_PDISPLAY_CHANNEL_STAT(0), - 0x40000000, 0x40000000)) { - NV_ERROR(dev, "timeout: (0x610200 & 0x40000000) == 0x40000000\n"); - NV_ERROR(dev, "0x610200 = 0x%08x\n", - nv_rd32(dev, NV50_PDISPLAY_CHANNEL_STAT(0))); - return -EBUSY; - } - - /* initialise fifo */ - nv_wr32(dev, NV50_PDISPLAY_CHANNEL_DMA_CB(0), - ((evo->pushbuf_bo->bo.mem.start << PAGE_SHIFT) >> 8) | - NV50_PDISPLAY_CHANNEL_DMA_CB_LOCATION_VRAM | - NV50_PDISPLAY_CHANNEL_DMA_CB_VALID); - nv_wr32(dev, NV50_PDISPLAY_CHANNEL_UNK2(0), 0x00010000); - nv_wr32(dev, NV50_PDISPLAY_CHANNEL_UNK3(0), 0x00000002); - if (!nv_wait(dev, 0x610200, 0x80000000, 0x00000000)) { - NV_ERROR(dev, "timeout: (0x610200 & 0x80000000) == 0\n"); - NV_ERROR(dev, "0x610200 = 0x%08x\n", nv_rd32(dev, 0x610200)); - return -EBUSY; - } - nv_wr32(dev, NV50_PDISPLAY_CHANNEL_STAT(0), - (nv_rd32(dev, NV50_PDISPLAY_CHANNEL_STAT(0)) & ~0x00000003) | - NV50_PDISPLAY_CHANNEL_STAT_DMA_ENABLED); - nv_wr32(dev, NV50_PDISPLAY_USER_PUT(0), 0); - nv_wr32(dev, NV50_PDISPLAY_CHANNEL_STAT(0), 0x01000003 | - NV50_PDISPLAY_CHANNEL_STAT_DMA_ENABLED); - - /* enable error reporting on the channel */ - nv_mask(dev, 0x610028, 0x00000000, 0x00010001 << 0); - - evo->dma.max = (4096/4) - 2; - evo->dma.put = 0; - evo->dma.cur = evo->dma.put; - evo->dma.free = evo->dma.max - evo->dma.cur; - - ret = RING_SPACE(evo, NOUVEAU_DMA_SKIPS); + ret = nv50_evo_init(dev); if (ret) return ret; + evo = dev_priv->evo; - for (i = 0; i < NOUVEAU_DMA_SKIPS; i++) - OUT_RING(evo, 0); + nv_wr32(dev, NV50_PDISPLAY_OBJECTS, (evo->ramin->vinst >> 8) | 9); ret = RING_SPACE(evo, 11); if (ret) @@ -449,12 +236,7 @@ static int nv50_display_disable(struct drm_device *dev) } } - nv_wr32(dev, NV50_PDISPLAY_CHANNEL_STAT(0), 0); - if (!nv_wait(dev, NV50_PDISPLAY_CHANNEL_STAT(0), 0x1e0000, 0)) { - NV_ERROR(dev, "timeout: (0x610200 & 0x1e0000) == 0\n"); - NV_ERROR(dev, "0x610200 = 0x%08x\n", - nv_rd32(dev, NV50_PDISPLAY_CHANNEL_STAT(0))); - } + nv50_evo_fini(dev); for (i = 0; i < 3; i++) { if (!nv_wait(dev, NV50_PDISPLAY_SOR_DPMS_STATE(i), @@ -504,13 +286,6 @@ int nv50_display_create(struct drm_device *dev) dev->mode_config.fb_base = dev_priv->fb_phys; - /* Create EVO channel */ - ret = nv50_evo_channel_new(dev, &dev_priv->evo); - if (ret) { - NV_ERROR(dev, "Error creating EVO channel: %d\n", ret); - return ret; - } - /* Create CRTC objects */ for (i = 0; i < 2; i++) nv50_crtc_create(dev, i); @@ -565,14 +340,11 @@ int nv50_display_create(struct drm_device *dev) void nv50_display_destroy(struct drm_device *dev) { - struct drm_nouveau_private *dev_priv = dev->dev_private; - NV_DEBUG_KMS(dev, "\n"); drm_mode_config_cleanup(dev); nv50_display_disable(dev); - nv50_evo_channel_del(&dev_priv->evo); } static u16 diff --git a/drivers/gpu/drm/nouveau/nv50_evo.c b/drivers/gpu/drm/nouveau/nv50_evo.c new file mode 100644 index 000000000000..211f5fb1dc73 --- /dev/null +++ b/drivers/gpu/drm/nouveau/nv50_evo.c @@ -0,0 +1,295 @@ +/* + * Copyright 2010 Red Hat Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: Ben Skeggs + */ + +#include "drmP.h" + +#include "nouveau_drv.h" +#include "nouveau_dma.h" +#include "nouveau_ramht.h" + +static void +nv50_evo_channel_del(struct nouveau_channel **pchan) +{ + struct nouveau_channel *chan = *pchan; + + if (!chan) + return; + *pchan = NULL; + + nouveau_gpuobj_channel_takedown(chan); + nouveau_bo_unmap(chan->pushbuf_bo); + nouveau_bo_ref(NULL, &chan->pushbuf_bo); + + if (chan->user) + iounmap(chan->user); + + kfree(chan); +} + +int +nv50_evo_dmaobj_new(struct nouveau_channel *evo, u32 class, u32 name, + u32 tile_flags, u32 magic_flags, u32 offset, u32 limit) +{ + struct drm_nouveau_private *dev_priv = evo->dev->dev_private; + struct drm_device *dev = evo->dev; + struct nouveau_gpuobj *obj = NULL; + int ret; + + ret = nouveau_gpuobj_new(dev, evo, 6*4, 32, 0, &obj); + if (ret) + return ret; + obj->engine = NVOBJ_ENGINE_DISPLAY; + + nv_wo32(obj, 0, (tile_flags << 22) | (magic_flags << 16) | class); + nv_wo32(obj, 4, limit); + nv_wo32(obj, 8, offset); + nv_wo32(obj, 12, 0x00000000); + nv_wo32(obj, 16, 0x00000000); + if (dev_priv->card_type < NV_C0) + nv_wo32(obj, 20, 0x00010000); + else + nv_wo32(obj, 20, 0x00020000); + dev_priv->engine.instmem.flush(dev); + + ret = nouveau_ramht_insert(evo, name, obj); + nouveau_gpuobj_ref(NULL, &obj); + if (ret) { + return ret; + } + + return 0; +} + +static int +nv50_evo_channel_new(struct drm_device *dev, struct nouveau_channel **pchan) +{ + struct drm_nouveau_private *dev_priv = dev->dev_private; + struct nouveau_gpuobj *ramht = NULL; + struct nouveau_channel *chan; + int ret; + + chan = kzalloc(sizeof(struct nouveau_channel), GFP_KERNEL); + if (!chan) + return -ENOMEM; + *pchan = chan; + + chan->id = -1; + chan->dev = dev; + chan->user_get = 4; + chan->user_put = 0; + + ret = nouveau_gpuobj_new(dev, NULL, 32768, 0x1000, + NVOBJ_FLAG_ZERO_ALLOC, &chan->ramin); + if (ret) { + NV_ERROR(dev, "Error allocating EVO channel memory: %d\n", ret); + nv50_evo_channel_del(pchan); + return ret; + } + + ret = drm_mm_init(&chan->ramin_heap, 0, 32768); + if (ret) { + NV_ERROR(dev, "Error initialising EVO PRAMIN heap: %d\n", ret); + nv50_evo_channel_del(pchan); + return ret; + } + + ret = nouveau_gpuobj_new(dev, chan, 4096, 16, 0, &ramht); + if (ret) { + NV_ERROR(dev, "Unable to allocate EVO RAMHT: %d\n", ret); + nv50_evo_channel_del(pchan); + return ret; + } + + ret = nouveau_ramht_new(dev, ramht, &chan->ramht); + nouveau_gpuobj_ref(NULL, &ramht); + if (ret) { + nv50_evo_channel_del(pchan); + return ret; + } + + if (dev_priv->chipset != 0x50) { + ret = nv50_evo_dmaobj_new(chan, 0x3d, NvEvoFB16, 0x70, 0x19, + 0, 0xffffffff); + if (ret) { + nv50_evo_channel_del(pchan); + return ret; + } + + + ret = nv50_evo_dmaobj_new(chan, 0x3d, NvEvoFB32, 0x7a, 0x19, + 0, 0xffffffff); + if (ret) { + nv50_evo_channel_del(pchan); + return ret; + } + } + + ret = nv50_evo_dmaobj_new(chan, 0x3d, NvEvoVRAM, 0, 0x19, + 0, dev_priv->vram_size); + if (ret) { + nv50_evo_channel_del(pchan); + return ret; + } + + ret = nouveau_bo_new(dev, NULL, 4096, 0, TTM_PL_FLAG_VRAM, 0, 0, + false, true, &chan->pushbuf_bo); + if (ret == 0) + ret = nouveau_bo_pin(chan->pushbuf_bo, TTM_PL_FLAG_VRAM); + if (ret) { + NV_ERROR(dev, "Error creating EVO DMA push buffer: %d\n", ret); + nv50_evo_channel_del(pchan); + return ret; + } + + ret = nouveau_bo_map(chan->pushbuf_bo); + if (ret) { + NV_ERROR(dev, "Error mapping EVO DMA push buffer: %d\n", ret); + nv50_evo_channel_del(pchan); + return ret; + } + + chan->user = ioremap(pci_resource_start(dev->pdev, 0) + + NV50_PDISPLAY_USER(0), PAGE_SIZE); + if (!chan->user) { + NV_ERROR(dev, "Error mapping EVO control regs.\n"); + nv50_evo_channel_del(pchan); + return -ENOMEM; + } + + return 0; +} + +static int +nv50_evo_channel_init(struct nouveau_channel *evo) +{ + struct drm_nouveau_private *dev_priv = evo->dev->dev_private; + struct nouveau_timer_engine *ptimer = &dev_priv->engine.timer; + struct drm_device *dev = evo->dev; + int ret, i; + u64 start; + u32 tmp; + + /* taken from nv bug #12637, attempts to un-wedge the hw if it's + * stuck in some unspecified state + */ + start = ptimer->read(dev); + nv_wr32(dev, NV50_PDISPLAY_EVO_CTRL(0), 0x2b00); + while ((tmp = nv_rd32(dev, NV50_PDISPLAY_EVO_CTRL(0))) & 0x1e0000) { + if ((tmp & 0x9f0000) == 0x20000) + nv_wr32(dev, NV50_PDISPLAY_EVO_CTRL(0), tmp | 0x800000); + + if ((tmp & 0x3f0000) == 0x30000) + nv_wr32(dev, NV50_PDISPLAY_EVO_CTRL(0), tmp | 0x200000); + + if (ptimer->read(dev) - start > 1000000000ULL) { + NV_ERROR(dev, "timeout: (0x610200 & 0x1e0000) != 0\n"); + NV_ERROR(dev, "0x610200 = 0x%08x\n", tmp); + return -EBUSY; + } + } + + nv_wr32(dev, NV50_PDISPLAY_EVO_CTRL(0), 0x1000b03); + if (!nv_wait(dev, NV50_PDISPLAY_EVO_CTRL(0), + 0x40000000, 0x40000000)) { + NV_ERROR(dev, "timeout: (0x610200 & 0x40000000) == 0x40000000\n"); + NV_ERROR(dev, "0x610200 = 0x%08x\n", + nv_rd32(dev, NV50_PDISPLAY_EVO_CTRL(0))); + return -EBUSY; + } + + /* initialise fifo */ + nv_wr32(dev, NV50_PDISPLAY_EVO_DMA_CB(0), + ((evo->pushbuf_bo->bo.mem.start << PAGE_SHIFT) >> 8) | + NV50_PDISPLAY_EVO_DMA_CB_LOCATION_VRAM | + NV50_PDISPLAY_EVO_DMA_CB_VALID); + nv_wr32(dev, NV50_PDISPLAY_EVO_UNK2(0), 0x00010000); + nv_wr32(dev, NV50_PDISPLAY_EVO_HASH_TAG(0), 0x00000002); + if (!nv_wait(dev, 0x610200, 0x80000000, 0x00000000)) { + NV_ERROR(dev, "timeout: (0x610200 & 0x80000000) == 0\n"); + NV_ERROR(dev, "0x610200 = 0x%08x\n", nv_rd32(dev, 0x610200)); + return -EBUSY; + } + nv_wr32(dev, NV50_PDISPLAY_EVO_CTRL(0), + (nv_rd32(dev, NV50_PDISPLAY_EVO_CTRL(0)) & ~0x00000003) | + NV50_PDISPLAY_EVO_CTRL_DMA_ENABLED); + nv_wr32(dev, NV50_PDISPLAY_USER_PUT(0), 0); + nv_wr32(dev, NV50_PDISPLAY_EVO_CTRL(0), 0x01000003 | + NV50_PDISPLAY_EVO_CTRL_DMA_ENABLED); + + /* enable error reporting on the channel */ + nv_mask(dev, 0x610028, 0x00000000, 0x00010001 << 0); + + evo->dma.max = (4096/4) - 2; + evo->dma.put = 0; + evo->dma.cur = evo->dma.put; + evo->dma.free = evo->dma.max - evo->dma.cur; + + ret = RING_SPACE(evo, NOUVEAU_DMA_SKIPS); + if (ret) + return ret; + + for (i = 0; i < NOUVEAU_DMA_SKIPS; i++) + OUT_RING(evo, 0); + + return 0; +} + +static void +nv50_evo_channel_fini(struct nouveau_channel *evo) +{ + struct drm_device *dev = evo->dev; + + nv_wr32(dev, NV50_PDISPLAY_EVO_CTRL(0), 0); + if (!nv_wait(dev, NV50_PDISPLAY_EVO_CTRL(0), 0x1e0000, 0)) { + NV_ERROR(dev, "timeout: (0x610200 & 0x1e0000) == 0\n"); + NV_ERROR(dev, "0x610200 = 0x%08x\n", + nv_rd32(dev, NV50_PDISPLAY_EVO_CTRL(0))); + } +} + +int +nv50_evo_init(struct drm_device *dev) +{ + struct drm_nouveau_private *dev_priv = dev->dev_private; + int ret; + + if (!dev_priv->evo) { + ret = nv50_evo_channel_new(dev, &dev_priv->evo); + if (ret) + return ret; + } + + return nv50_evo_channel_init(dev_priv->evo); +} + +void +nv50_evo_fini(struct drm_device *dev) +{ + struct drm_nouveau_private *dev_priv = dev->dev_private; + + if (dev_priv->evo) { + nv50_evo_channel_fini(dev_priv->evo); + nv50_evo_channel_del(&dev_priv->evo); + } +} diff --git a/drivers/gpu/drm/nouveau/nv50_evo.h b/drivers/gpu/drm/nouveau/nv50_evo.h index aae13343bcec..aa4f0d3cea8e 100644 --- a/drivers/gpu/drm/nouveau/nv50_evo.h +++ b/drivers/gpu/drm/nouveau/nv50_evo.h @@ -24,6 +24,15 @@ * */ +#ifndef __NV50_EVO_H__ +#define __NV50_EVO_H__ + +int nv50_evo_init(struct drm_device *dev); +void nv50_evo_fini(struct drm_device *dev); +int nv50_evo_dmaobj_new(struct nouveau_channel *, u32 class, u32 name, + u32 tile_flags, u32 magic_flags, + u32 offset, u32 limit); + #define NV50_EVO_UPDATE 0x00000080 #define NV50_EVO_UNK84 0x00000084 #define NV50_EVO_UNK84_NOTIFY 0x40000000 @@ -111,3 +120,4 @@ #define NV50_EVO_CRTC_SCALE_RES1 0x000008d8 #define NV50_EVO_CRTC_SCALE_RES2 0x000008dc +#endif -- cgit v1.2.3 From 1e96268aca1bb40f42bdbc9d2293b123b072f1de Mon Sep 17 00:00:00 2001 From: Ben Skeggs Date: Tue, 19 Oct 2010 14:18:06 +1000 Subject: drm/nv50: initial work to allow multiple evo channels This doesn't work yet for unknown reasons. Signed-off-by: Ben Skeggs --- drivers/gpu/drm/nouveau/nouveau_drv.h | 1 + drivers/gpu/drm/nouveau/nouveau_ramht.c | 2 +- drivers/gpu/drm/nouveau/nv50_evo.c | 246 +++++++++++++++++++------------- 3 files changed, 147 insertions(+), 102 deletions(-) diff --git a/drivers/gpu/drm/nouveau/nouveau_drv.h b/drivers/gpu/drm/nouveau/nouveau_drv.h index d15bfd427267..bf34f25c7a54 100644 --- a/drivers/gpu/drm/nouveau/nouveau_drv.h +++ b/drivers/gpu/drm/nouveau/nouveau_drv.h @@ -689,6 +689,7 @@ struct drm_nouveau_private { struct backlight_device *backlight; struct nouveau_channel *evo; + u32 evo_alloc; struct { struct dcb_entry *dcb; u16 script; diff --git a/drivers/gpu/drm/nouveau/nouveau_ramht.c b/drivers/gpu/drm/nouveau/nouveau_ramht.c index b4c63c058888..d4a2adc927d8 100644 --- a/drivers/gpu/drm/nouveau/nouveau_ramht.c +++ b/drivers/gpu/drm/nouveau/nouveau_ramht.c @@ -114,7 +114,7 @@ nouveau_ramht_insert(struct nouveau_channel *chan, u32 handle, (gpuobj->engine << NV40_RAMHT_CONTEXT_ENGINE_SHIFT); } else { if (gpuobj->engine == NVOBJ_ENGINE_DISPLAY) { - ctx = (gpuobj->cinst << 10) | 2; + ctx = (gpuobj->cinst << 10) | chan->id; } else { ctx = (gpuobj->cinst >> 4) | ((gpuobj->engine << diff --git a/drivers/gpu/drm/nouveau/nv50_evo.c b/drivers/gpu/drm/nouveau/nv50_evo.c index 211f5fb1dc73..dbad233e750f 100644 --- a/drivers/gpu/drm/nouveau/nv50_evo.c +++ b/drivers/gpu/drm/nouveau/nv50_evo.c @@ -29,22 +29,26 @@ #include "nouveau_ramht.h" static void -nv50_evo_channel_del(struct nouveau_channel **pchan) +nv50_evo_channel_del(struct nouveau_channel **pevo) { - struct nouveau_channel *chan = *pchan; + struct drm_nouveau_private *dev_priv; + struct nouveau_channel *evo = *pevo; - if (!chan) + if (!evo) return; - *pchan = NULL; + *pevo = NULL; - nouveau_gpuobj_channel_takedown(chan); - nouveau_bo_unmap(chan->pushbuf_bo); - nouveau_bo_ref(NULL, &chan->pushbuf_bo); + dev_priv = evo->dev->dev_private; + dev_priv->evo_alloc &= ~(1 << evo->id); - if (chan->user) - iounmap(chan->user); + nouveau_gpuobj_channel_takedown(evo); + nouveau_bo_unmap(evo->pushbuf_bo); + nouveau_bo_ref(NULL, &evo->pushbuf_bo); - kfree(chan); + if (evo->user) + iounmap(evo->user); + + kfree(evo); } int @@ -56,7 +60,7 @@ nv50_evo_dmaobj_new(struct nouveau_channel *evo, u32 class, u32 name, struct nouveau_gpuobj *obj = NULL; int ret; - ret = nouveau_gpuobj_new(dev, evo, 6*4, 32, 0, &obj); + ret = nouveau_gpuobj_new(dev, dev_priv->evo, 6*4, 32, 0, &obj); if (ret) return ret; obj->engine = NVOBJ_ENGINE_DISPLAY; @@ -82,101 +86,63 @@ nv50_evo_dmaobj_new(struct nouveau_channel *evo, u32 class, u32 name, } static int -nv50_evo_channel_new(struct drm_device *dev, struct nouveau_channel **pchan) +nv50_evo_channel_new(struct drm_device *dev, struct nouveau_channel **pevo) { struct drm_nouveau_private *dev_priv = dev->dev_private; - struct nouveau_gpuobj *ramht = NULL; - struct nouveau_channel *chan; + struct nouveau_channel *evo; int ret; - chan = kzalloc(sizeof(struct nouveau_channel), GFP_KERNEL); - if (!chan) + evo = kzalloc(sizeof(struct nouveau_channel), GFP_KERNEL); + if (!evo) return -ENOMEM; - *pchan = chan; - - chan->id = -1; - chan->dev = dev; - chan->user_get = 4; - chan->user_put = 0; - - ret = nouveau_gpuobj_new(dev, NULL, 32768, 0x1000, - NVOBJ_FLAG_ZERO_ALLOC, &chan->ramin); - if (ret) { - NV_ERROR(dev, "Error allocating EVO channel memory: %d\n", ret); - nv50_evo_channel_del(pchan); - return ret; - } - - ret = drm_mm_init(&chan->ramin_heap, 0, 32768); - if (ret) { - NV_ERROR(dev, "Error initialising EVO PRAMIN heap: %d\n", ret); - nv50_evo_channel_del(pchan); - return ret; - } + *pevo = evo; - ret = nouveau_gpuobj_new(dev, chan, 4096, 16, 0, &ramht); - if (ret) { - NV_ERROR(dev, "Unable to allocate EVO RAMHT: %d\n", ret); - nv50_evo_channel_del(pchan); - return ret; - } + for (evo->id = 0; evo->id < 5; evo->id++) { + if (dev_priv->evo_alloc & (1 << evo->id)) + continue; - ret = nouveau_ramht_new(dev, ramht, &chan->ramht); - nouveau_gpuobj_ref(NULL, &ramht); - if (ret) { - nv50_evo_channel_del(pchan); - return ret; + dev_priv->evo_alloc |= (1 << evo->id); + break; } - if (dev_priv->chipset != 0x50) { - ret = nv50_evo_dmaobj_new(chan, 0x3d, NvEvoFB16, 0x70, 0x19, - 0, 0xffffffff); - if (ret) { - nv50_evo_channel_del(pchan); - return ret; - } - - - ret = nv50_evo_dmaobj_new(chan, 0x3d, NvEvoFB32, 0x7a, 0x19, - 0, 0xffffffff); - if (ret) { - nv50_evo_channel_del(pchan); - return ret; - } + if (evo->id == 5) { + kfree(evo); + return -ENODEV; } - ret = nv50_evo_dmaobj_new(chan, 0x3d, NvEvoVRAM, 0, 0x19, - 0, dev_priv->vram_size); - if (ret) { - nv50_evo_channel_del(pchan); - return ret; - } + evo->dev = dev; + evo->user_get = 4; + evo->user_put = 0; ret = nouveau_bo_new(dev, NULL, 4096, 0, TTM_PL_FLAG_VRAM, 0, 0, - false, true, &chan->pushbuf_bo); + false, true, &evo->pushbuf_bo); if (ret == 0) - ret = nouveau_bo_pin(chan->pushbuf_bo, TTM_PL_FLAG_VRAM); + ret = nouveau_bo_pin(evo->pushbuf_bo, TTM_PL_FLAG_VRAM); if (ret) { NV_ERROR(dev, "Error creating EVO DMA push buffer: %d\n", ret); - nv50_evo_channel_del(pchan); + nv50_evo_channel_del(pevo); return ret; } - ret = nouveau_bo_map(chan->pushbuf_bo); + ret = nouveau_bo_map(evo->pushbuf_bo); if (ret) { NV_ERROR(dev, "Error mapping EVO DMA push buffer: %d\n", ret); - nv50_evo_channel_del(pchan); + nv50_evo_channel_del(pevo); return ret; } - chan->user = ioremap(pci_resource_start(dev->pdev, 0) + - NV50_PDISPLAY_USER(0), PAGE_SIZE); - if (!chan->user) { + evo->user = ioremap(pci_resource_start(dev->pdev, 0) + + NV50_PDISPLAY_USER(evo->id), PAGE_SIZE); + if (!evo->user) { NV_ERROR(dev, "Error mapping EVO control regs.\n"); - nv50_evo_channel_del(pchan); + nv50_evo_channel_del(pevo); return -ENOMEM; } + /* bind primary evo channel's ramht to the channel */ + if (dev_priv->evo && evo != dev_priv->evo) + nouveau_ramht_ref(dev_priv->evo->ramht, &evo->ramht, NULL); + return 0; } @@ -186,7 +152,7 @@ nv50_evo_channel_init(struct nouveau_channel *evo) struct drm_nouveau_private *dev_priv = evo->dev->dev_private; struct nouveau_timer_engine *ptimer = &dev_priv->engine.timer; struct drm_device *dev = evo->dev; - int ret, i; + int id = evo->id, ret, i; u64 start; u32 tmp; @@ -194,13 +160,13 @@ nv50_evo_channel_init(struct nouveau_channel *evo) * stuck in some unspecified state */ start = ptimer->read(dev); - nv_wr32(dev, NV50_PDISPLAY_EVO_CTRL(0), 0x2b00); - while ((tmp = nv_rd32(dev, NV50_PDISPLAY_EVO_CTRL(0))) & 0x1e0000) { + nv_wr32(dev, NV50_PDISPLAY_EVO_CTRL(id), 0x2b00); + while ((tmp = nv_rd32(dev, NV50_PDISPLAY_EVO_CTRL(id))) & 0x1e0000) { if ((tmp & 0x9f0000) == 0x20000) - nv_wr32(dev, NV50_PDISPLAY_EVO_CTRL(0), tmp | 0x800000); + nv_wr32(dev, NV50_PDISPLAY_EVO_CTRL(id), tmp | 0x800000); if ((tmp & 0x3f0000) == 0x30000) - nv_wr32(dev, NV50_PDISPLAY_EVO_CTRL(0), tmp | 0x200000); + nv_wr32(dev, NV50_PDISPLAY_EVO_CTRL(id), tmp | 0x200000); if (ptimer->read(dev) - start > 1000000000ULL) { NV_ERROR(dev, "timeout: (0x610200 & 0x1e0000) != 0\n"); @@ -209,36 +175,37 @@ nv50_evo_channel_init(struct nouveau_channel *evo) } } - nv_wr32(dev, NV50_PDISPLAY_EVO_CTRL(0), 0x1000b03); - if (!nv_wait(dev, NV50_PDISPLAY_EVO_CTRL(0), + nv_wr32(dev, NV50_PDISPLAY_EVO_CTRL(id), 0x1000b03); + if (!nv_wait(dev, NV50_PDISPLAY_EVO_CTRL(id), 0x40000000, 0x40000000)) { NV_ERROR(dev, "timeout: (0x610200 & 0x40000000) == 0x40000000\n"); NV_ERROR(dev, "0x610200 = 0x%08x\n", - nv_rd32(dev, NV50_PDISPLAY_EVO_CTRL(0))); + nv_rd32(dev, NV50_PDISPLAY_EVO_CTRL(id))); return -EBUSY; } /* initialise fifo */ - nv_wr32(dev, NV50_PDISPLAY_EVO_DMA_CB(0), + nv_wr32(dev, NV50_PDISPLAY_EVO_DMA_CB(id), ((evo->pushbuf_bo->bo.mem.start << PAGE_SHIFT) >> 8) | NV50_PDISPLAY_EVO_DMA_CB_LOCATION_VRAM | NV50_PDISPLAY_EVO_DMA_CB_VALID); - nv_wr32(dev, NV50_PDISPLAY_EVO_UNK2(0), 0x00010000); - nv_wr32(dev, NV50_PDISPLAY_EVO_HASH_TAG(0), 0x00000002); - if (!nv_wait(dev, 0x610200, 0x80000000, 0x00000000)) { + nv_wr32(dev, NV50_PDISPLAY_EVO_UNK2(id), 0x00010000); + nv_wr32(dev, NV50_PDISPLAY_EVO_HASH_TAG(id), id); + if (!nv_wait(dev, NV50_PDISPLAY_EVO_CTRL(id), 0x80000000, 0x00000000)) { NV_ERROR(dev, "timeout: (0x610200 & 0x80000000) == 0\n"); - NV_ERROR(dev, "0x610200 = 0x%08x\n", nv_rd32(dev, 0x610200)); + NV_ERROR(dev, "0x610200 = 0x%08x\n", + nv_rd32(dev, NV50_PDISPLAY_EVO_CTRL(id))); return -EBUSY; } - nv_wr32(dev, NV50_PDISPLAY_EVO_CTRL(0), - (nv_rd32(dev, NV50_PDISPLAY_EVO_CTRL(0)) & ~0x00000003) | + nv_wr32(dev, NV50_PDISPLAY_EVO_CTRL(id), + (nv_rd32(dev, NV50_PDISPLAY_EVO_CTRL(id)) & ~0x00000003) | NV50_PDISPLAY_EVO_CTRL_DMA_ENABLED); - nv_wr32(dev, NV50_PDISPLAY_USER_PUT(0), 0); - nv_wr32(dev, NV50_PDISPLAY_EVO_CTRL(0), 0x01000003 | - NV50_PDISPLAY_EVO_CTRL_DMA_ENABLED); + nv_wr32(dev, NV50_PDISPLAY_USER_PUT(id), 0); + nv_wr32(dev, NV50_PDISPLAY_EVO_CTRL(id), 0x01000003 | + NV50_PDISPLAY_EVO_CTRL_DMA_ENABLED); /* enable error reporting on the channel */ - nv_mask(dev, 0x610028, 0x00000000, 0x00010001 << 0); + nv_mask(dev, 0x610028, 0x00000000, 0x00010001 << id); evo->dma.max = (4096/4) - 2; evo->dma.put = 0; @@ -260,12 +227,89 @@ nv50_evo_channel_fini(struct nouveau_channel *evo) { struct drm_device *dev = evo->dev; - nv_wr32(dev, NV50_PDISPLAY_EVO_CTRL(0), 0); - if (!nv_wait(dev, NV50_PDISPLAY_EVO_CTRL(0), 0x1e0000, 0)) { + nv_wr32(dev, NV50_PDISPLAY_EVO_CTRL(evo->id), 0); + if (!nv_wait(dev, NV50_PDISPLAY_EVO_CTRL(evo->id), 0x1e0000, 0)) { NV_ERROR(dev, "timeout: (0x610200 & 0x1e0000) == 0\n"); NV_ERROR(dev, "0x610200 = 0x%08x\n", - nv_rd32(dev, NV50_PDISPLAY_EVO_CTRL(0))); + nv_rd32(dev, NV50_PDISPLAY_EVO_CTRL(evo->id))); + } +} + +static int +nv50_evo_create(struct drm_device *dev) +{ + struct drm_nouveau_private *dev_priv = dev->dev_private; + struct nouveau_gpuobj *ramht = NULL; + struct nouveau_channel *evo; + int ret; + + /* create primary evo channel, the one we use for modesetting + * purporses + */ + ret = nv50_evo_channel_new(dev, &dev_priv->evo); + if (ret) + return ret; + evo = dev_priv->evo; + + /* setup object management on it, any other evo channel will + * use this also as there's no per-channel support on the + * hardware + */ + ret = nouveau_gpuobj_new(dev, NULL, 32768, 0x1000, + NVOBJ_FLAG_ZERO_ALLOC, &evo->ramin); + if (ret) { + NV_ERROR(dev, "Error allocating EVO channel memory: %d\n", ret); + nv50_evo_channel_del(&dev_priv->evo); + return ret; + } + + ret = drm_mm_init(&evo->ramin_heap, 0, 32768); + if (ret) { + NV_ERROR(dev, "Error initialising EVO PRAMIN heap: %d\n", ret); + nv50_evo_channel_del(&dev_priv->evo); + return ret; + } + + ret = nouveau_gpuobj_new(dev, evo, 4096, 16, 0, &ramht); + if (ret) { + NV_ERROR(dev, "Unable to allocate EVO RAMHT: %d\n", ret); + nv50_evo_channel_del(&dev_priv->evo); + return ret; + } + + ret = nouveau_ramht_new(dev, ramht, &evo->ramht); + nouveau_gpuobj_ref(NULL, &ramht); + if (ret) { + nv50_evo_channel_del(&dev_priv->evo); + return ret; + } + + /* create some default objects for the scanout memtypes we support */ + if (dev_priv->chipset != 0x50) { + ret = nv50_evo_dmaobj_new(evo, 0x3d, NvEvoFB16, 0x70, 0x19, + 0, 0xffffffff); + if (ret) { + nv50_evo_channel_del(&dev_priv->evo); + return ret; + } + + + ret = nv50_evo_dmaobj_new(evo, 0x3d, NvEvoFB32, 0x7a, 0x19, + 0, 0xffffffff); + if (ret) { + nv50_evo_channel_del(&dev_priv->evo); + return ret; + } + } + + ret = nv50_evo_dmaobj_new(evo, 0x3d, NvEvoVRAM, 0, 0x19, + 0, dev_priv->vram_size); + if (ret) { + nv50_evo_channel_del(&dev_priv->evo); + return ret; } + + return 0; } int @@ -275,7 +319,7 @@ nv50_evo_init(struct drm_device *dev) int ret; if (!dev_priv->evo) { - ret = nv50_evo_channel_new(dev, &dev_priv->evo); + ret = nv50_evo_create(dev); if (ret) return ret; } -- cgit v1.2.3 From 43ce028ff2b1df68c690f0af14a109288d3e9e86 Mon Sep 17 00:00:00 2001 From: Ben Skeggs Date: Tue, 19 Oct 2010 18:01:41 +1000 Subject: drm/nv50: rework evo init to match nvidia more closely Signed-off-by: Ben Skeggs --- drivers/gpu/drm/nouveau/nv50_evo.c | 73 ++++++++++++++------------------------ 1 file changed, 26 insertions(+), 47 deletions(-) diff --git a/drivers/gpu/drm/nouveau/nv50_evo.c b/drivers/gpu/drm/nouveau/nv50_evo.c index dbad233e750f..d270f17a5310 100644 --- a/drivers/gpu/drm/nouveau/nv50_evo.c +++ b/drivers/gpu/drm/nouveau/nv50_evo.c @@ -149,60 +149,36 @@ nv50_evo_channel_new(struct drm_device *dev, struct nouveau_channel **pevo) static int nv50_evo_channel_init(struct nouveau_channel *evo) { - struct drm_nouveau_private *dev_priv = evo->dev->dev_private; - struct nouveau_timer_engine *ptimer = &dev_priv->engine.timer; struct drm_device *dev = evo->dev; int id = evo->id, ret, i; - u64 start; + u64 pushbuf = evo->pushbuf_bo->bo.mem.start << PAGE_SHIFT; u32 tmp; - /* taken from nv bug #12637, attempts to un-wedge the hw if it's - * stuck in some unspecified state - */ - start = ptimer->read(dev); - nv_wr32(dev, NV50_PDISPLAY_EVO_CTRL(id), 0x2b00); - while ((tmp = nv_rd32(dev, NV50_PDISPLAY_EVO_CTRL(id))) & 0x1e0000) { - if ((tmp & 0x9f0000) == 0x20000) - nv_wr32(dev, NV50_PDISPLAY_EVO_CTRL(id), tmp | 0x800000); - - if ((tmp & 0x3f0000) == 0x30000) - nv_wr32(dev, NV50_PDISPLAY_EVO_CTRL(id), tmp | 0x200000); - - if (ptimer->read(dev) - start > 1000000000ULL) { - NV_ERROR(dev, "timeout: (0x610200 & 0x1e0000) != 0\n"); - NV_ERROR(dev, "0x610200 = 0x%08x\n", tmp); - return -EBUSY; - } - } + tmp = nv_rd32(dev, NV50_PDISPLAY_EVO_CTRL(id)); + if ((tmp & 0x009f0000) == 0x00020000) + nv_wr32(dev, NV50_PDISPLAY_EVO_CTRL(id), tmp | 0x00800000); - nv_wr32(dev, NV50_PDISPLAY_EVO_CTRL(id), 0x1000b03); - if (!nv_wait(dev, NV50_PDISPLAY_EVO_CTRL(id), - 0x40000000, 0x40000000)) { - NV_ERROR(dev, "timeout: (0x610200 & 0x40000000) == 0x40000000\n"); - NV_ERROR(dev, "0x610200 = 0x%08x\n", - nv_rd32(dev, NV50_PDISPLAY_EVO_CTRL(id))); - return -EBUSY; - } + tmp = nv_rd32(dev, NV50_PDISPLAY_EVO_CTRL(id)); + if ((tmp & 0x003f0000) == 0x00030000) + nv_wr32(dev, NV50_PDISPLAY_EVO_CTRL(id), tmp | 0x00600000); /* initialise fifo */ - nv_wr32(dev, NV50_PDISPLAY_EVO_DMA_CB(id), - ((evo->pushbuf_bo->bo.mem.start << PAGE_SHIFT) >> 8) | - NV50_PDISPLAY_EVO_DMA_CB_LOCATION_VRAM | - NV50_PDISPLAY_EVO_DMA_CB_VALID); + nv_wr32(dev, NV50_PDISPLAY_EVO_DMA_CB(id), pushbuf >> 8 | + NV50_PDISPLAY_EVO_DMA_CB_LOCATION_VRAM | + NV50_PDISPLAY_EVO_DMA_CB_VALID); nv_wr32(dev, NV50_PDISPLAY_EVO_UNK2(id), 0x00010000); nv_wr32(dev, NV50_PDISPLAY_EVO_HASH_TAG(id), id); + nv_mask(dev, NV50_PDISPLAY_EVO_CTRL(id), NV50_PDISPLAY_EVO_CTRL_DMA, + NV50_PDISPLAY_EVO_CTRL_DMA_ENABLED); + + nv_wr32(dev, NV50_PDISPLAY_USER_PUT(id), 0x00000000); + nv_wr32(dev, NV50_PDISPLAY_EVO_CTRL(id), 0x01000003 | + NV50_PDISPLAY_EVO_CTRL_DMA_ENABLED); if (!nv_wait(dev, NV50_PDISPLAY_EVO_CTRL(id), 0x80000000, 0x00000000)) { - NV_ERROR(dev, "timeout: (0x610200 & 0x80000000) == 0\n"); - NV_ERROR(dev, "0x610200 = 0x%08x\n", + NV_ERROR(dev, "EvoCh %d init timeout: 0x%08x\n", id, nv_rd32(dev, NV50_PDISPLAY_EVO_CTRL(id))); return -EBUSY; } - nv_wr32(dev, NV50_PDISPLAY_EVO_CTRL(id), - (nv_rd32(dev, NV50_PDISPLAY_EVO_CTRL(id)) & ~0x00000003) | - NV50_PDISPLAY_EVO_CTRL_DMA_ENABLED); - nv_wr32(dev, NV50_PDISPLAY_USER_PUT(id), 0); - nv_wr32(dev, NV50_PDISPLAY_EVO_CTRL(id), 0x01000003 | - NV50_PDISPLAY_EVO_CTRL_DMA_ENABLED); /* enable error reporting on the channel */ nv_mask(dev, 0x610028, 0x00000000, 0x00010001 << id); @@ -226,12 +202,15 @@ static void nv50_evo_channel_fini(struct nouveau_channel *evo) { struct drm_device *dev = evo->dev; - - nv_wr32(dev, NV50_PDISPLAY_EVO_CTRL(evo->id), 0); - if (!nv_wait(dev, NV50_PDISPLAY_EVO_CTRL(evo->id), 0x1e0000, 0)) { - NV_ERROR(dev, "timeout: (0x610200 & 0x1e0000) == 0\n"); - NV_ERROR(dev, "0x610200 = 0x%08x\n", - nv_rd32(dev, NV50_PDISPLAY_EVO_CTRL(evo->id))); + int id = evo->id; + + nv_mask(dev, 0x610028, 0x00010001 << id, 0x00000000); + nv_mask(dev, NV50_PDISPLAY_EVO_CTRL(id), 0x00001010, 0x00001000); + nv_wr32(dev, NV50_PDISPLAY_INTR_0, (1 << id)); + nv_mask(dev, NV50_PDISPLAY_EVO_CTRL(id), 0x00000003, 0x00000000); + if (!nv_wait(dev, NV50_PDISPLAY_EVO_CTRL(id), 0x001e0000, 0x00000000)) { + NV_ERROR(dev, "EvoCh %d takedown timeout: 0x%08x\n", id, + nv_rd32(dev, NV50_PDISPLAY_EVO_CTRL(id))); } } -- cgit v1.2.3 From 97e2000f757c19bb53e032320669f9a0d0b2a989 Mon Sep 17 00:00:00 2001 From: Ben Skeggs Date: Wed, 20 Oct 2010 14:23:29 +1000 Subject: drm/nv50: improve evo error handler when more than just channel 0 active Signed-off-by: Ben Skeggs --- drivers/gpu/drm/nouveau/nouveau_reg.h | 21 +++++++++--------- drivers/gpu/drm/nouveau/nv50_display.c | 40 +++++++++++++++++++--------------- drivers/gpu/drm/nouveau/nv50_graph.c | 11 +++++----- 3 files changed, 39 insertions(+), 33 deletions(-) diff --git a/drivers/gpu/drm/nouveau/nouveau_reg.h b/drivers/gpu/drm/nouveau/nouveau_reg.h index d0ce86c24ebf..b6384d36d5d0 100644 --- a/drivers/gpu/drm/nouveau/nouveau_reg.h +++ b/drivers/gpu/drm/nouveau/nouveau_reg.h @@ -715,20 +715,21 @@ #define NV50_PDISPLAY_INTR_1_CLK_UNK10 0x00000010 #define NV50_PDISPLAY_INTR_1_CLK_UNK20 0x00000020 #define NV50_PDISPLAY_INTR_1_CLK_UNK40 0x00000040 -#define NV50_PDISPLAY_INTR_EN 0x0061002c -#define NV50_PDISPLAY_INTR_EN_VBLANK_CRTC 0x0000000c -#define NV50_PDISPLAY_INTR_EN_VBLANK_CRTC_(n) (1 << ((n) + 2)) -#define NV50_PDISPLAY_INTR_EN_VBLANK_CRTC_0 0x00000004 -#define NV50_PDISPLAY_INTR_EN_VBLANK_CRTC_1 0x00000008 -#define NV50_PDISPLAY_INTR_EN_CLK_UNK10 0x00000010 -#define NV50_PDISPLAY_INTR_EN_CLK_UNK20 0x00000020 -#define NV50_PDISPLAY_INTR_EN_CLK_UNK40 0x00000040 +#define NV50_PDISPLAY_INTR_EN_0 0x00610028 +#define NV50_PDISPLAY_INTR_EN_1 0x0061002c +#define NV50_PDISPLAY_INTR_EN_1_VBLANK_CRTC 0x0000000c +#define NV50_PDISPLAY_INTR_EN_1_VBLANK_CRTC_(n) (1 << ((n) + 2)) +#define NV50_PDISPLAY_INTR_EN_1_VBLANK_CRTC_0 0x00000004 +#define NV50_PDISPLAY_INTR_EN_1_VBLANK_CRTC_1 0x00000008 +#define NV50_PDISPLAY_INTR_EN_1_CLK_UNK10 0x00000010 +#define NV50_PDISPLAY_INTR_EN_1_CLK_UNK20 0x00000020 +#define NV50_PDISPLAY_INTR_EN_1_CLK_UNK40 0x00000040 #define NV50_PDISPLAY_UNK30_CTRL 0x00610030 #define NV50_PDISPLAY_UNK30_CTRL_UPDATE_VCLK0 0x00000200 #define NV50_PDISPLAY_UNK30_CTRL_UPDATE_VCLK1 0x00000400 #define NV50_PDISPLAY_UNK30_CTRL_PENDING 0x80000000 -#define NV50_PDISPLAY_TRAPPED_ADDR 0x00610080 -#define NV50_PDISPLAY_TRAPPED_DATA 0x00610084 +#define NV50_PDISPLAY_TRAPPED_ADDR(i) ((i) * 0x08 + 0x00610080) +#define NV50_PDISPLAY_TRAPPED_DATA(i) ((i) * 0x08 + 0x00610084) #define NV50_PDISPLAY_EVO_CTRL(i) ((i) * 0x10 + 0x00610200) #define NV50_PDISPLAY_EVO_CTRL_DMA 0x00000010 #define NV50_PDISPLAY_EVO_CTRL_DMA_DISABLED 0x00000000 diff --git a/drivers/gpu/drm/nouveau/nv50_display.c b/drivers/gpu/drm/nouveau/nv50_display.c index db100a8f231e..99871e304d10 100644 --- a/drivers/gpu/drm/nouveau/nv50_display.c +++ b/drivers/gpu/drm/nouveau/nv50_display.c @@ -149,13 +149,13 @@ nv50_display_init(struct drm_device *dev) } nv_wr32(dev, NV50_PDISPLAY_PIO_CTRL, 0x00000000); - nv_wr32(dev, 0x610028, 0x00000000); nv_mask(dev, NV50_PDISPLAY_INTR_0, 0x00000000, 0x00000000); + nv_wr32(dev, NV50_PDISPLAY_INTR_EN_0, 0x00000000); nv_mask(dev, NV50_PDISPLAY_INTR_1, 0x00000000, 0x00000000); - nv_wr32(dev, NV50_PDISPLAY_INTR_EN, - NV50_PDISPLAY_INTR_EN_CLK_UNK10 | - NV50_PDISPLAY_INTR_EN_CLK_UNK20 | - NV50_PDISPLAY_INTR_EN_CLK_UNK40); + nv_wr32(dev, NV50_PDISPLAY_INTR_EN_1, + NV50_PDISPLAY_INTR_EN_1_CLK_UNK10 | + NV50_PDISPLAY_INTR_EN_1_CLK_UNK20 | + NV50_PDISPLAY_INTR_EN_1_CLK_UNK40); /* enable hotplug interrupts */ list_for_each_entry(connector, &dev->mode_config.connector_list, head) { @@ -248,7 +248,7 @@ static int nv50_display_disable(struct drm_device *dev) } /* disable interrupts. */ - nv_wr32(dev, NV50_PDISPLAY_INTR_EN, 0x00000000); + nv_wr32(dev, NV50_PDISPLAY_INTR_EN_1, 0x00000000); /* disable hotplug interrupts */ nv_wr32(dev, 0xe054, 0xffffffff); @@ -451,8 +451,7 @@ nv50_display_vblank_handler(struct drm_device *dev, uint32_t intr) if (intr & NV50_PDISPLAY_INTR_1_VBLANK_CRTC_1) nv50_display_vblank_crtc_handler(dev, 1); - nv_wr32(dev, NV50_PDISPLAY_INTR_EN, nv_rd32(dev, - NV50_PDISPLAY_INTR_EN) & ~intr); + nv_mask(dev, NV50_PDISPLAY_INTR_EN_1, intr, 0x00000000); nv_wr32(dev, NV50_PDISPLAY_INTR_1, intr); } @@ -779,16 +778,23 @@ nv50_display_irq_handler_bh(struct work_struct *work) static void nv50_display_error_handler(struct drm_device *dev) { - uint32_t addr, data; + u32 channels = (nv_rd32(dev, NV50_PDISPLAY_INTR_0) & 0x001f0000) >> 16; + u32 addr, data; + int chid; - nv_wr32(dev, NV50_PDISPLAY_INTR_0, 0x00010000); - addr = nv_rd32(dev, NV50_PDISPLAY_TRAPPED_ADDR); - data = nv_rd32(dev, NV50_PDISPLAY_TRAPPED_DATA); + for (chid = 0; chid < 5; chid++) { + if (!(channels & (1 << chid))) + continue; - NV_ERROR(dev, "EvoCh %d Mthd 0x%04x Data 0x%08x (0x%04x 0x%02x)\n", - 0, addr & 0xffc, data, addr >> 16, (addr >> 12) & 0xf); + nv_wr32(dev, NV50_PDISPLAY_INTR_0, 0x00010000 << chid); + addr = nv_rd32(dev, NV50_PDISPLAY_TRAPPED_ADDR(chid)); + data = nv_rd32(dev, NV50_PDISPLAY_TRAPPED_DATA(chid)); + NV_ERROR(dev, "EvoCh %d Mthd 0x%04x Data 0x%08x " + "(0x%04x 0x%02x)\n", chid, + addr & 0xffc, data, addr >> 16, (addr >> 12) & 0xf); - nv_wr32(dev, NV50_PDISPLAY_TRAPPED_ADDR, 0x90000000); + nv_wr32(dev, NV50_PDISPLAY_TRAPPED_ADDR(chid), 0x90000000); + } } void @@ -891,9 +897,9 @@ nv50_display_irq_handler(struct drm_device *dev) if (!intr0 && !(intr1 & ~delayed)) break; - if (intr0 & 0x00010000) { + if (intr0 & 0x001f0000) { nv50_display_error_handler(dev); - intr0 &= ~0x00010000; + intr0 &= ~0x001f0000; } if (intr1 & NV50_PDISPLAY_INTR_1_VBLANK_CRTC) { diff --git a/drivers/gpu/drm/nouveau/nv50_graph.c b/drivers/gpu/drm/nouveau/nv50_graph.c index a764af52a3ba..d441308a09cf 100644 --- a/drivers/gpu/drm/nouveau/nv50_graph.c +++ b/drivers/gpu/drm/nouveau/nv50_graph.c @@ -384,13 +384,12 @@ nv50_graph_nvsw_vblsem_release(struct nouveau_channel *chan, if (!chan->nvsw.vblsem || chan->nvsw.vblsem_offset == ~0 || data > 1) return -EINVAL; - if (!(nv_rd32(dev, NV50_PDISPLAY_INTR_EN) & - NV50_PDISPLAY_INTR_EN_VBLANK_CRTC_(data))) { + if (!(nv_rd32(dev, NV50_PDISPLAY_INTR_EN_1) & + NV50_PDISPLAY_INTR_EN_1_VBLANK_CRTC_(data))) { nv_wr32(dev, NV50_PDISPLAY_INTR_1, - NV50_PDISPLAY_INTR_1_VBLANK_CRTC_(data)); - nv_wr32(dev, NV50_PDISPLAY_INTR_EN, nv_rd32(dev, - NV50_PDISPLAY_INTR_EN) | - NV50_PDISPLAY_INTR_EN_VBLANK_CRTC_(data)); + NV50_PDISPLAY_INTR_1_VBLANK_CRTC_(data)); + nv_mask(dev, NV50_PDISPLAY_INTR_EN_1, 0, + NV50_PDISPLAY_INTR_EN_1_VBLANK_CRTC_(data)); } list_add(&chan->nvsw.vbl_wait, &dev_priv->vbl_waiting); -- cgit v1.2.3 From 8888cb18ffd1f32c51e61ca8d3d179c3c3946d1e Mon Sep 17 00:00:00 2001 From: Ben Skeggs Date: Wed, 20 Oct 2010 15:35:28 +1000 Subject: drm/nv50: fix evo instmem alignment Not an issue right now, we're forced to 64k size/alignment by the BO allocator anyway. This won't be the case soon. Signed-off-by: Ben Skeggs --- drivers/gpu/drm/nouveau/nv50_evo.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/gpu/drm/nouveau/nv50_evo.c b/drivers/gpu/drm/nouveau/nv50_evo.c index d270f17a5310..887b2a97e2a2 100644 --- a/drivers/gpu/drm/nouveau/nv50_evo.c +++ b/drivers/gpu/drm/nouveau/nv50_evo.c @@ -234,7 +234,7 @@ nv50_evo_create(struct drm_device *dev) * use this also as there's no per-channel support on the * hardware */ - ret = nouveau_gpuobj_new(dev, NULL, 32768, 0x1000, + ret = nouveau_gpuobj_new(dev, NULL, 32768, 65536, NVOBJ_FLAG_ZERO_ALLOC, &evo->ramin); if (ret) { NV_ERROR(dev, "Error allocating EVO channel memory: %d\n", ret); -- cgit v1.2.3 From 01d63187d1aad6236dd229d5824c61a60f1ab42c Mon Sep 17 00:00:00 2001 From: Ben Skeggs Date: Thu, 21 Oct 2010 11:37:21 +1000 Subject: drm/nv10: fix thinko and let nv17 do 3d again :) Signed-off-by: Ben Skeggs --- drivers/gpu/drm/nouveau/nv10_graph.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/gpu/drm/nouveau/nv10_graph.c b/drivers/gpu/drm/nouveau/nv10_graph.c index 17c20dbff232..a571bfd4471b 100644 --- a/drivers/gpu/drm/nouveau/nv10_graph.c +++ b/drivers/gpu/drm/nouveau/nv10_graph.c @@ -1102,7 +1102,7 @@ nv10_graph_register(struct drm_device *dev) if (dev_priv->chipset <= 0x10) { NVOBJ_CLASS(dev, 0x0056, GR); } else - if (dev_priv->chipset <= 0x17 || dev_priv->chipset == 0x1a) { + if (dev_priv->chipset < 0x17 || dev_priv->chipset == 0x1a) { NVOBJ_CLASS(dev, 0x0096, GR); } else { NVOBJ_CLASS(dev, 0x0099, GR); -- cgit v1.2.3 From 35fa2f2ad161024e735fb0cd571cb92e50171afd Mon Sep 17 00:00:00 2001 From: Ben Skeggs Date: Thu, 21 Oct 2010 14:07:03 +1000 Subject: drm/nouveau: add support for MSI Only supported on NV50+ so far, and disabled by default currently. The module parameter "msi=1" will enable it. There's a kernel bug which will cause this to fail if the module (or the NVIDIA binary driver) has ever been loaded before loading nouveau with MSI enabled. As such, this is only safe to enable if you have nouveau load on boot, and don't wish to ever reload it. The workaround is to "echo 0 > /sys/bus/pci/devices//enable" until the enable count reads 0. Then you should be able to load nouveau with MSI enabled. Signed-off-by: Ben Skeggs --- drivers/gpu/drm/nouveau/nouveau_drv.c | 4 ++++ drivers/gpu/drm/nouveau/nouveau_drv.h | 5 +++++ drivers/gpu/drm/nouveau/nouveau_irq.c | 35 +++++++++++++++++++++++++++++++++ drivers/gpu/drm/nouveau/nouveau_state.c | 9 +++------ 4 files changed, 47 insertions(+), 6 deletions(-) diff --git a/drivers/gpu/drm/nouveau/nouveau_drv.c b/drivers/gpu/drm/nouveau/nouveau_drv.c index 6dbb8818c530..db926ecf5b6f 100644 --- a/drivers/gpu/drm/nouveau/nouveau_drv.c +++ b/drivers/gpu/drm/nouveau/nouveau_drv.c @@ -115,6 +115,10 @@ MODULE_PARM_DESC(perflvl_wr, "Allow perflvl changes (warning: dangerous!)\n"); int nouveau_perflvl_wr; module_param_named(perflvl_wr, nouveau_perflvl_wr, int, 0400); +MODULE_PARM_DESC(msi, "Enable MSI (default: off)\n"); +int nouveau_msi; +module_param_named(msi, nouveau_msi, int, 0400); + int nouveau_fbpercrtc; #if 0 module_param_named(fbpercrtc, nouveau_fbpercrtc, int, 0400); diff --git a/drivers/gpu/drm/nouveau/nouveau_drv.h b/drivers/gpu/drm/nouveau/nouveau_drv.h index bf34f25c7a54..9ab7dc8ede4e 100644 --- a/drivers/gpu/drm/nouveau/nouveau_drv.h +++ b/drivers/gpu/drm/nouveau/nouveau_drv.h @@ -593,6 +593,8 @@ struct drm_nouveau_private { struct nouveau_bo *vga_ram; + /* interrupt handling */ + bool msi_enabled; struct workqueue_struct *wq; struct work_struct irq_work; struct work_struct hpd_work; @@ -754,6 +756,7 @@ extern int nouveau_force_post; extern int nouveau_override_conntype; extern char *nouveau_perflvl; extern int nouveau_perflvl_wr; +extern int nouveau_msi; extern int nouveau_pci_suspend(struct pci_dev *pdev, pm_message_t pm_state); extern int nouveau_pci_resume(struct pci_dev *pdev); @@ -872,6 +875,8 @@ extern int nouveau_ioctl_gpuobj_free(struct drm_device *, void *data, struct drm_file *); /* nouveau_irq.c */ +extern int nouveau_irq_init(struct drm_device *); +extern void nouveau_irq_fini(struct drm_device *); extern irqreturn_t nouveau_irq_handler(DRM_IRQ_ARGS); extern void nouveau_irq_preinstall(struct drm_device *); extern int nouveau_irq_postinstall(struct drm_device *); diff --git a/drivers/gpu/drm/nouveau/nouveau_irq.c b/drivers/gpu/drm/nouveau/nouveau_irq.c index ca9b969f4f9c..17e2fa86cde7 100644 --- a/drivers/gpu/drm/nouveau/nouveau_irq.c +++ b/drivers/gpu/drm/nouveau/nouveau_irq.c @@ -68,8 +68,13 @@ nouveau_irq_preinstall(struct drm_device *dev) int nouveau_irq_postinstall(struct drm_device *dev) { + struct drm_nouveau_private *dev_priv = dev->dev_private; + /* Master enable */ nv_wr32(dev, NV03_PMC_INTR_EN_0, NV_PMC_INTR_EN_0_MASTER_ENABLE); + if (dev_priv->msi_enabled) + nv_wr08(dev, 0x00088068, 0xff); + return 0; } @@ -1263,5 +1268,35 @@ nouveau_irq_handler(DRM_IRQ_ARGS) spin_unlock_irqrestore(&dev_priv->context_switch_lock, flags); + if (dev_priv->msi_enabled) + nv_wr08(dev, 0x00088068, 0xff); + return IRQ_HANDLED; } + +int +nouveau_irq_init(struct drm_device *dev) +{ + struct drm_nouveau_private *dev_priv = dev->dev_private; + int ret; + + if (nouveau_msi != 0 && dev_priv->card_type >= NV_50) { + ret = pci_enable_msi(dev->pdev); + if (ret == 0) { + NV_INFO(dev, "enabled MSI\n"); + dev_priv->msi_enabled = true; + } + } + + return drm_irq_install(dev); +} + +void +nouveau_irq_fini(struct drm_device *dev) +{ + struct drm_nouveau_private *dev_priv = dev->dev_private; + + drm_irq_uninstall(dev); + if (dev_priv->msi_enabled) + pci_disable_msi(dev->pdev); +} diff --git a/drivers/gpu/drm/nouveau/nouveau_state.c b/drivers/gpu/drm/nouveau/nouveau_state.c index f13134aa8c4f..410a79f5e0cd 100644 --- a/drivers/gpu/drm/nouveau/nouveau_state.c +++ b/drivers/gpu/drm/nouveau/nouveau_state.c @@ -667,10 +667,7 @@ nouveau_card_init(struct drm_device *dev) if (ret) goto out_fifo; - /* this call irq_preinstall, register irq handler and - * call irq_postinstall - */ - ret = drm_irq_install(dev); + ret = nouveau_irq_init(dev); if (ret) goto out_display; @@ -701,7 +698,7 @@ nouveau_card_init(struct drm_device *dev) out_fence: nouveau_fence_fini(dev); out_irq: - drm_irq_uninstall(dev); + nouveau_irq_fini(dev); out_display: engine->display.destroy(dev); out_fifo: @@ -772,7 +769,7 @@ static void nouveau_card_takedown(struct drm_device *dev) nouveau_gpuobj_takedown(dev); nouveau_mem_vram_fini(dev); - drm_irq_uninstall(dev); + nouveau_irq_fini(dev); nouveau_pm_fini(dev); nouveau_bios_takedown(dev); -- cgit v1.2.3 From 5f80198e43cf96542923269ccb607052e5f144cc Mon Sep 17 00:00:00 2001 From: Ben Skeggs Date: Fri, 22 Oct 2010 08:44:09 +1000 Subject: drm/nv50: regression fix, point NVAA/NVAC at correct PM functions Signed-off-by: Ben Skeggs --- drivers/gpu/drm/nouveau/nouveau_state.c | 2 ++ 1 file changed, 2 insertions(+) diff --git a/drivers/gpu/drm/nouveau/nouveau_state.c b/drivers/gpu/drm/nouveau/nouveau_state.c index 410a79f5e0cd..c5f29f0d18da 100644 --- a/drivers/gpu/drm/nouveau/nouveau_state.c +++ b/drivers/gpu/drm/nouveau/nouveau_state.c @@ -397,6 +397,8 @@ static int nouveau_init_engine_ptrs(struct drm_device *dev) case 0x96: case 0x98: case 0xa0: + case 0xaa: + case 0xac: case 0x50: engine->pm.clock_get = nv50_pm_clock_get; engine->pm.clock_pre = nv50_pm_clock_pre; -- cgit v1.2.3 From 382d62e524db528cdf53563ad9a018adc170dfde Mon Sep 17 00:00:00 2001 From: Marcin Slusarz Date: Wed, 20 Oct 2010 21:50:24 +0200 Subject: drm/nouveau: fix annoying nouveau_fence type issue nouveau_fence_* functions are not type safe, which could lead to bugs. Additionally every use of nouveau_fence_unref had to cast struct nouveau_fence to void **. Fix it by renaming old functions and creating static inline functions with new prototypes. We still need old functions, because we pass function pointers to ttm. As we are wrapping functions, drop unused "void *arg" parameter where possible. Signed-off-by: Marcin Slusarz Signed-off-by: Francisco Jerez Signed-off-by: Ben Skeggs --- drivers/gpu/drm/nouveau/nouveau_bo.c | 12 +++++------ drivers/gpu/drm/nouveau/nouveau_channel.c | 4 ++-- drivers/gpu/drm/nouveau/nouveau_drv.c | 4 ++-- drivers/gpu/drm/nouveau/nouveau_drv.h | 33 ++++++++++++++++++++++++++----- drivers/gpu/drm/nouveau/nouveau_fence.c | 22 ++++++++++----------- drivers/gpu/drm/nouveau/nouveau_gem.c | 4 ++-- drivers/gpu/drm/nouveau/nouveau_mem.c | 4 ++-- 7 files changed, 53 insertions(+), 30 deletions(-) diff --git a/drivers/gpu/drm/nouveau/nouveau_bo.c b/drivers/gpu/drm/nouveau/nouveau_bo.c index 8442bfbf5d42..c09928322eb9 100644 --- a/drivers/gpu/drm/nouveau/nouveau_bo.c +++ b/drivers/gpu/drm/nouveau/nouveau_bo.c @@ -487,7 +487,7 @@ nouveau_bo_move_accel_cleanup(struct nouveau_channel *chan, ret = ttm_bo_move_accel_cleanup(&nvbo->bo, fence, NULL, evict, no_wait_reserve, no_wait_gpu, new_mem); - nouveau_fence_unref((void *)&fence); + nouveau_fence_unref(&fence); return ret; } @@ -949,11 +949,11 @@ struct ttm_bo_driver nouveau_bo_driver = { .evict_flags = nouveau_bo_evict_flags, .move = nouveau_bo_move, .verify_access = nouveau_bo_verify_access, - .sync_obj_signaled = nouveau_fence_signalled, - .sync_obj_wait = nouveau_fence_wait, - .sync_obj_flush = nouveau_fence_flush, - .sync_obj_unref = nouveau_fence_unref, - .sync_obj_ref = nouveau_fence_ref, + .sync_obj_signaled = __nouveau_fence_signalled, + .sync_obj_wait = __nouveau_fence_wait, + .sync_obj_flush = __nouveau_fence_flush, + .sync_obj_unref = __nouveau_fence_unref, + .sync_obj_ref = __nouveau_fence_ref, .fault_reserve_notify = &nouveau_ttm_fault_reserve_notify, .io_mem_reserve = &nouveau_ttm_io_mem_reserve, .io_mem_free = &nouveau_ttm_io_mem_free, diff --git a/drivers/gpu/drm/nouveau/nouveau_channel.c b/drivers/gpu/drm/nouveau/nouveau_channel.c index f2d674202369..c9cdbd786dae 100644 --- a/drivers/gpu/drm/nouveau/nouveau_channel.c +++ b/drivers/gpu/drm/nouveau/nouveau_channel.c @@ -303,8 +303,8 @@ nouveau_channel_put_unlocked(struct nouveau_channel **pchan) ret = nouveau_fence_new(chan, &fence, true); if (ret == 0) { - ret = nouveau_fence_wait(fence, NULL, false, false); - nouveau_fence_unref((void *)&fence); + ret = nouveau_fence_wait(fence, false, false); + nouveau_fence_unref(&fence); } if (ret) diff --git a/drivers/gpu/drm/nouveau/nouveau_drv.c b/drivers/gpu/drm/nouveau/nouveau_drv.c index db926ecf5b6f..15f48493d0d8 100644 --- a/drivers/gpu/drm/nouveau/nouveau_drv.c +++ b/drivers/gpu/drm/nouveau/nouveau_drv.c @@ -205,8 +205,8 @@ nouveau_pci_suspend(struct pci_dev *pdev, pm_message_t pm_state) ret = nouveau_fence_new(chan, &fence, true); if (ret == 0) { - ret = nouveau_fence_wait(fence, NULL, false, false); - nouveau_fence_unref((void *)&fence); + ret = nouveau_fence_wait(fence, false, false); + nouveau_fence_unref(&fence); } if (ret) { diff --git a/drivers/gpu/drm/nouveau/nouveau_drv.h b/drivers/gpu/drm/nouveau/nouveau_drv.h index 9ab7dc8ede4e..a356d894a2ee 100644 --- a/drivers/gpu/drm/nouveau/nouveau_drv.h +++ b/drivers/gpu/drm/nouveau/nouveau_drv.h @@ -1264,12 +1264,35 @@ extern void nouveau_fence_work(struct nouveau_fence *fence, void (*work)(void *priv, bool signalled), void *priv); struct nouveau_channel *nouveau_fence_channel(struct nouveau_fence *); -extern bool nouveau_fence_signalled(void *obj, void *arg); -extern int nouveau_fence_wait(void *obj, void *arg, bool lazy, bool intr); + +extern bool __nouveau_fence_signalled(void *obj, void *arg); +extern int __nouveau_fence_wait(void *obj, void *arg, bool lazy, bool intr); +extern int __nouveau_fence_flush(void *obj, void *arg); +extern void __nouveau_fence_unref(void **obj); +extern void *__nouveau_fence_ref(void *obj); + +static inline bool nouveau_fence_signalled(struct nouveau_fence *obj) +{ + return __nouveau_fence_signalled(obj, NULL); +} +static inline int +nouveau_fence_wait(struct nouveau_fence *obj, bool lazy, bool intr) +{ + return __nouveau_fence_wait(obj, NULL, lazy, intr); +} extern int nouveau_fence_sync(struct nouveau_fence *, struct nouveau_channel *); -extern int nouveau_fence_flush(void *obj, void *arg); -extern void nouveau_fence_unref(void **obj); -extern void *nouveau_fence_ref(void *obj); +static inline int nouveau_fence_flush(struct nouveau_fence *obj) +{ + return __nouveau_fence_flush(obj, NULL); +} +static inline void nouveau_fence_unref(struct nouveau_fence **obj) +{ + __nouveau_fence_unref((void **)obj); +} +static inline struct nouveau_fence *nouveau_fence_ref(struct nouveau_fence *obj) +{ + return __nouveau_fence_ref(obj); +} /* nouveau_gem.c */ extern int nouveau_gem_new(struct drm_device *, struct nouveau_channel *, diff --git a/drivers/gpu/drm/nouveau/nouveau_fence.c b/drivers/gpu/drm/nouveau/nouveau_fence.c index 75ce1b45d8a4..91aa6c54cc96 100644 --- a/drivers/gpu/drm/nouveau/nouveau_fence.c +++ b/drivers/gpu/drm/nouveau/nouveau_fence.c @@ -120,7 +120,7 @@ nouveau_fence_new(struct nouveau_channel *chan, struct nouveau_fence **pfence, ret = nouveau_fence_emit(fence); if (ret) - nouveau_fence_unref((void *)&fence); + nouveau_fence_unref(&fence); *pfence = fence; return ret; } @@ -183,7 +183,7 @@ nouveau_fence_work(struct nouveau_fence *fence, } void -nouveau_fence_unref(void **sync_obj) +__nouveau_fence_unref(void **sync_obj) { struct nouveau_fence *fence = nouveau_fence(*sync_obj); @@ -193,7 +193,7 @@ nouveau_fence_unref(void **sync_obj) } void * -nouveau_fence_ref(void *sync_obj) +__nouveau_fence_ref(void *sync_obj) { struct nouveau_fence *fence = nouveau_fence(sync_obj); @@ -202,7 +202,7 @@ nouveau_fence_ref(void *sync_obj) } bool -nouveau_fence_signalled(void *sync_obj, void *sync_arg) +__nouveau_fence_signalled(void *sync_obj, void *sync_arg) { struct nouveau_fence *fence = nouveau_fence(sync_obj); struct nouveau_channel *chan = fence->channel; @@ -215,13 +215,13 @@ nouveau_fence_signalled(void *sync_obj, void *sync_arg) } int -nouveau_fence_wait(void *sync_obj, void *sync_arg, bool lazy, bool intr) +__nouveau_fence_wait(void *sync_obj, void *sync_arg, bool lazy, bool intr) { unsigned long timeout = jiffies + (3 * DRM_HZ); int ret = 0; while (1) { - if (nouveau_fence_signalled(sync_obj, sync_arg)) + if (__nouveau_fence_signalled(sync_obj, sync_arg)) break; if (time_after_eq(jiffies, timeout)) { @@ -369,7 +369,7 @@ emit_semaphore(struct nouveau_channel *chan, int method, kref_get(&sema->ref); nouveau_fence_work(fence, semaphore_work, sema); - nouveau_fence_unref((void *)&fence); + nouveau_fence_unref(&fence); return 0; } @@ -384,14 +384,14 @@ nouveau_fence_sync(struct nouveau_fence *fence, int ret = 0; if (likely(!chan || chan == wchan || - nouveau_fence_signalled(fence, NULL))) + nouveau_fence_signalled(fence))) goto out; sema = alloc_semaphore(dev); if (!sema) { /* Early card or broken userspace, fall back to * software sync. */ - ret = nouveau_fence_wait(fence, NULL, true, false); + ret = nouveau_fence_wait(fence, true, false); goto out; } @@ -400,7 +400,7 @@ nouveau_fence_sync(struct nouveau_fence *fence, * order issues */ if (!mutex_trylock(&chan->mutex)) { - ret = nouveau_fence_wait(fence, NULL, true, false); + ret = nouveau_fence_wait(fence, true, false); goto out_unref; } @@ -423,7 +423,7 @@ out: } int -nouveau_fence_flush(void *sync_obj, void *sync_arg) +__nouveau_fence_flush(void *sync_obj, void *sync_arg) { return 0; } diff --git a/drivers/gpu/drm/nouveau/nouveau_gem.c b/drivers/gpu/drm/nouveau/nouveau_gem.c index 23b521ea8e04..de8535b58710 100644 --- a/drivers/gpu/drm/nouveau/nouveau_gem.c +++ b/drivers/gpu/drm/nouveau/nouveau_gem.c @@ -238,7 +238,7 @@ validate_fini_list(struct list_head *list, struct nouveau_fence *fence) prev_fence = nvbo->bo.sync_obj; nvbo->bo.sync_obj = nouveau_fence_ref(fence); spin_unlock(&nvbo->bo.bdev->fence_lock); - nouveau_fence_unref((void *)&prev_fence); + nouveau_fence_unref(&prev_fence); } if (unlikely(nvbo->validate_mapped)) { @@ -728,7 +728,7 @@ nouveau_gem_ioctl_pushbuf(struct drm_device *dev, void *data, out: validate_fini(&op, fence); - nouveau_fence_unref((void**)&fence); + nouveau_fence_unref(&fence); kfree(bo); kfree(push); diff --git a/drivers/gpu/drm/nouveau/nouveau_mem.c b/drivers/gpu/drm/nouveau/nouveau_mem.c index fe4a30dc4b42..a7c3e08aa7b5 100644 --- a/drivers/gpu/drm/nouveau/nouveau_mem.c +++ b/drivers/gpu/drm/nouveau/nouveau_mem.c @@ -54,7 +54,7 @@ nv10_mem_set_region_tiling(struct drm_device *dev, int i, uint32_t addr, tile->addr = addr; tile->size = size; tile->used = !!pitch; - nouveau_fence_unref((void **)&tile->fence); + nouveau_fence_unref(&tile->fence); pfifo->reassign(dev, false); pfifo->cache_pull(dev, false); @@ -87,7 +87,7 @@ nv10_mem_set_tiling(struct drm_device *dev, uint32_t addr, uint32_t size, continue; if (tile->fence && - !nouveau_fence_signalled(tile->fence, NULL)) + !nouveau_fence_signalled(tile->fence)) /* Pending tile region. */ continue; -- cgit v1.2.3 From 63f7fcfebd2ff1995b649101d6120b60fa0e5b06 Mon Sep 17 00:00:00 2001 From: Francisco Jerez Date: Fri, 22 Oct 2010 04:31:02 +0200 Subject: drm/nv04: Make CRTC base changes effective in the next hsync. Signed-off-by: Francisco Jerez Signed-off-by: Ben Skeggs --- drivers/gpu/drm/nouveau/nouveau_hw.c | 2 +- drivers/gpu/drm/nouveau/nv04_crtc.c | 5 ++++- drivers/gpu/drm/nouveau/nvreg.h | 3 ++- 3 files changed, 7 insertions(+), 3 deletions(-) diff --git a/drivers/gpu/drm/nouveau/nouveau_hw.c b/drivers/gpu/drm/nouveau/nouveau_hw.c index b9672a05c411..979275836985 100644 --- a/drivers/gpu/drm/nouveau/nouveau_hw.c +++ b/drivers/gpu/drm/nouveau/nouveau_hw.c @@ -953,7 +953,7 @@ nv_load_state_ext(struct drm_device *dev, int head, NVWriteCRTC(dev, head, NV_PCRTC_850, regp->crtc_850); reg900 = NVReadRAMDAC(dev, head, NV_PRAMDAC_900); - if (regp->crtc_cfg == NV_PCRTC_CONFIG_START_ADDRESS_HSYNC) + if (regp->crtc_cfg == NV10_PCRTC_CONFIG_START_ADDRESS_HSYNC) NVWriteRAMDAC(dev, head, NV_PRAMDAC_900, reg900 | 0x10000); else NVWriteRAMDAC(dev, head, NV_PRAMDAC_900, reg900 & ~0x10000); diff --git a/drivers/gpu/drm/nouveau/nv04_crtc.c b/drivers/gpu/drm/nouveau/nv04_crtc.c index 40e180741629..b61a7ff8b7b7 100644 --- a/drivers/gpu/drm/nouveau/nv04_crtc.c +++ b/drivers/gpu/drm/nouveau/nv04_crtc.c @@ -551,7 +551,10 @@ nv_crtc_mode_set_regs(struct drm_crtc *crtc, struct drm_display_mode * mode) if (dev_priv->card_type >= NV_30) regp->gpio_ext = NVReadCRTC(dev, 0, NV_PCRTC_GPIO_EXT); - regp->crtc_cfg = NV_PCRTC_CONFIG_START_ADDRESS_HSYNC; + if (dev_priv->card_type >= NV_10) + regp->crtc_cfg = NV10_PCRTC_CONFIG_START_ADDRESS_HSYNC; + else + regp->crtc_cfg = NV04_PCRTC_CONFIG_START_ADDRESS_HSYNC; /* Some misc regs */ if (dev_priv->card_type == NV_40) { diff --git a/drivers/gpu/drm/nouveau/nvreg.h b/drivers/gpu/drm/nouveau/nvreg.h index 881f8a585613..fe0f253089ac 100644 --- a/drivers/gpu/drm/nouveau/nvreg.h +++ b/drivers/gpu/drm/nouveau/nvreg.h @@ -153,7 +153,8 @@ #define NV_PCRTC_START 0x00600800 #define NV_PCRTC_CONFIG 0x00600804 # define NV_PCRTC_CONFIG_START_ADDRESS_NON_VGA (1 << 0) -# define NV_PCRTC_CONFIG_START_ADDRESS_HSYNC (2 << 0) +# define NV04_PCRTC_CONFIG_START_ADDRESS_HSYNC (4 << 0) +# define NV10_PCRTC_CONFIG_START_ADDRESS_HSYNC (2 << 0) #define NV_PCRTC_CURSOR_CONFIG 0x00600810 # define NV_PCRTC_CURSOR_CONFIG_ENABLE_ENABLE (1 << 0) # define NV_PCRTC_CURSOR_CONFIG_DOUBLE_SCAN_ENABLE (1 << 4) -- cgit v1.2.3 From 042206c0cd4924879c4292c5ffa2bf1e8023ae5a Mon Sep 17 00:00:00 2001 From: Francisco Jerez Date: Thu, 21 Oct 2010 18:19:29 +0200 Subject: drm/nouveau: Implement the vblank DRM hooks. Signed-off-by: Francisco Jerez Signed-off-by: Ben Skeggs --- drivers/gpu/drm/nouveau/nouveau_display.c | 27 +++++++++++++++++++++++++++ drivers/gpu/drm/nouveau/nouveau_drv.c | 4 ++++ drivers/gpu/drm/nouveau/nouveau_drv.h | 4 ++++ drivers/gpu/drm/nouveau/nouveau_hw.c | 5 +++-- drivers/gpu/drm/nouveau/nouveau_irq.c | 8 ++++++-- drivers/gpu/drm/nouveau/nouveau_state.c | 12 +++++++----- drivers/gpu/drm/nouveau/nv50_display.c | 16 +++++++--------- drivers/gpu/drm/nouveau/nv50_graph.c | 9 +-------- 8 files changed, 59 insertions(+), 26 deletions(-) diff --git a/drivers/gpu/drm/nouveau/nouveau_display.c b/drivers/gpu/drm/nouveau/nouveau_display.c index 2e11fd65b4dd..f8987bcb7f51 100644 --- a/drivers/gpu/drm/nouveau/nouveau_display.c +++ b/drivers/gpu/drm/nouveau/nouveau_display.c @@ -29,6 +29,7 @@ #include "nouveau_drv.h" #include "nouveau_fb.h" #include "nouveau_fbcon.h" +#include "nouveau_hw.h" static void nouveau_user_framebuffer_destroy(struct drm_framebuffer *drm_fb) @@ -104,3 +105,29 @@ const struct drm_mode_config_funcs nouveau_mode_config_funcs = { .output_poll_changed = nouveau_fbcon_output_poll_changed, }; +int +nouveau_vblank_enable(struct drm_device *dev, int crtc) +{ + struct drm_nouveau_private *dev_priv = dev->dev_private; + + if (dev_priv->card_type >= NV_50) + nv_mask(dev, NV50_PDISPLAY_INTR_EN_1, 0, + NV50_PDISPLAY_INTR_EN_1_VBLANK_CRTC_(crtc)); + else + NVWriteCRTC(dev, crtc, NV_PCRTC_INTR_EN_0, + NV_PCRTC_INTR_0_VBLANK); + + return 0; +} + +void +nouveau_vblank_disable(struct drm_device *dev, int crtc) +{ + struct drm_nouveau_private *dev_priv = dev->dev_private; + + if (dev_priv->card_type >= NV_50) + nv_mask(dev, NV50_PDISPLAY_INTR_EN_1, + NV50_PDISPLAY_INTR_EN_1_VBLANK_CRTC_(crtc), 0); + else + NVWriteCRTC(dev, crtc, NV_PCRTC_INTR_EN_0, 0); +} diff --git a/drivers/gpu/drm/nouveau/nouveau_drv.c b/drivers/gpu/drm/nouveau/nouveau_drv.c index 15f48493d0d8..52f9307d7396 100644 --- a/drivers/gpu/drm/nouveau/nouveau_drv.c +++ b/drivers/gpu/drm/nouveau/nouveau_drv.c @@ -397,6 +397,9 @@ static struct drm_driver driver = { .irq_postinstall = nouveau_irq_postinstall, .irq_uninstall = nouveau_irq_uninstall, .irq_handler = nouveau_irq_handler, + .get_vblank_counter = drm_vblank_count, + .enable_vblank = nouveau_vblank_enable, + .disable_vblank = nouveau_vblank_disable, .reclaim_buffers = drm_core_reclaim_buffers, .ioctls = nouveau_ioctls, .fops = { @@ -407,6 +410,7 @@ static struct drm_driver driver = { .mmap = nouveau_ttm_mmap, .poll = drm_poll, .fasync = drm_fasync, + .read = drm_read, #if defined(CONFIG_COMPAT) .compat_ioctl = nouveau_compat_ioctl, #endif diff --git a/drivers/gpu/drm/nouveau/nouveau_drv.h b/drivers/gpu/drm/nouveau/nouveau_drv.h index a356d894a2ee..7cf034fd5cd0 100644 --- a/drivers/gpu/drm/nouveau/nouveau_drv.h +++ b/drivers/gpu/drm/nouveau/nouveau_drv.h @@ -1312,6 +1312,10 @@ extern int nouveau_gem_ioctl_cpu_fini(struct drm_device *, void *, extern int nouveau_gem_ioctl_info(struct drm_device *, void *, struct drm_file *); +/* nouveau_display.c */ +int nouveau_vblank_enable(struct drm_device *dev, int crtc); +void nouveau_vblank_disable(struct drm_device *dev, int crtc); + /* nv10_gpio.c */ int nv10_gpio_get(struct drm_device *dev, enum dcb_gpio_tag tag); int nv10_gpio_set(struct drm_device *dev, enum dcb_gpio_tag tag, int state); diff --git a/drivers/gpu/drm/nouveau/nouveau_hw.c b/drivers/gpu/drm/nouveau/nouveau_hw.c index 979275836985..6ba640e7a7e9 100644 --- a/drivers/gpu/drm/nouveau/nouveau_hw.c +++ b/drivers/gpu/drm/nouveau/nouveau_hw.c @@ -1017,8 +1017,9 @@ nv_load_state_ext(struct drm_device *dev, int head, NVWriteCRTC(dev, head, NV_PCRTC_START, regp->fb_start); - /* Setting 1 on this value gives you interrupts for every vblank period. */ - NVWriteCRTC(dev, head, NV_PCRTC_INTR_EN_0, 0); + /* Enable vblank interrupts. */ + NVWriteCRTC(dev, head, NV_PCRTC_INTR_EN_0, + (dev->vblank_enabled[head] ? 1 : 0)); NVWriteCRTC(dev, head, NV_PCRTC_INTR_0, NV_PCRTC_INTR_0_VBLANK); } diff --git a/drivers/gpu/drm/nouveau/nouveau_irq.c b/drivers/gpu/drm/nouveau/nouveau_irq.c index 17e2fa86cde7..f3ae74ef3318 100644 --- a/drivers/gpu/drm/nouveau/nouveau_irq.c +++ b/drivers/gpu/drm/nouveau/nouveau_irq.c @@ -1200,11 +1200,15 @@ nv50_pgraph_irq_handler(struct drm_device *dev) static void nouveau_crtc_irq_handler(struct drm_device *dev, int crtc) { - if (crtc & 1) + if (crtc & 1) { nv_wr32(dev, NV_CRTC0_INTSTAT, NV_CRTC_INTR_VBLANK); + drm_handle_vblank(dev, 0); + } - if (crtc & 2) + if (crtc & 2) { nv_wr32(dev, NV_CRTC1_INTSTAT, NV_CRTC_INTR_VBLANK); + drm_handle_vblank(dev, 1); + } } irqreturn_t diff --git a/drivers/gpu/drm/nouveau/nouveau_state.c b/drivers/gpu/drm/nouveau/nouveau_state.c index c5f29f0d18da..d72aa8d19a19 100644 --- a/drivers/gpu/drm/nouveau/nouveau_state.c +++ b/drivers/gpu/drm/nouveau/nouveau_state.c @@ -669,13 +669,13 @@ nouveau_card_init(struct drm_device *dev) if (ret) goto out_fifo; - ret = nouveau_irq_init(dev); + ret = drm_vblank_init(dev, nv_two_heads(dev) ? 2 : 1); if (ret) - goto out_display; + goto out_vblank; - ret = drm_vblank_init(dev, 0); + ret = nouveau_irq_init(dev); if (ret) - goto out_irq; + goto out_vblank; /* what about PVIDEO/PCRTC/PRAMDAC etc? */ @@ -701,7 +701,8 @@ out_fence: nouveau_fence_fini(dev); out_irq: nouveau_irq_fini(dev); -out_display: +out_vblank: + drm_vblank_cleanup(dev); engine->display.destroy(dev); out_fifo: if (!nouveau_noaccel) @@ -772,6 +773,7 @@ static void nouveau_card_takedown(struct drm_device *dev) nouveau_mem_vram_fini(dev); nouveau_irq_fini(dev); + drm_vblank_cleanup(dev); nouveau_pm_fini(dev); nouveau_bios_takedown(dev); diff --git a/drivers/gpu/drm/nouveau/nv50_display.c b/drivers/gpu/drm/nouveau/nv50_display.c index 99871e304d10..17b950abf200 100644 --- a/drivers/gpu/drm/nouveau/nv50_display.c +++ b/drivers/gpu/drm/nouveau/nv50_display.c @@ -428,31 +428,29 @@ static void nv50_display_vblank_crtc_handler(struct drm_device *dev, int crtc) { struct drm_nouveau_private *dev_priv = dev->dev_private; - struct nouveau_channel *chan; - struct list_head *entry, *tmp; - - list_for_each_safe(entry, tmp, &dev_priv->vbl_waiting) { - chan = list_entry(entry, struct nouveau_channel, nvsw.vbl_wait); + struct nouveau_channel *chan, *tmp; + list_for_each_entry_safe(chan, tmp, &dev_priv->vbl_waiting, + nvsw.vbl_wait) { nouveau_bo_wr32(chan->notifier_bo, chan->nvsw.vblsem_offset, chan->nvsw.vblsem_rval); list_del(&chan->nvsw.vbl_wait); + drm_vblank_put(dev, crtc); } + + drm_handle_vblank(dev, crtc); } static void nv50_display_vblank_handler(struct drm_device *dev, uint32_t intr) { - intr &= NV50_PDISPLAY_INTR_1_VBLANK_CRTC; - if (intr & NV50_PDISPLAY_INTR_1_VBLANK_CRTC_0) nv50_display_vblank_crtc_handler(dev, 0); if (intr & NV50_PDISPLAY_INTR_1_VBLANK_CRTC_1) nv50_display_vblank_crtc_handler(dev, 1); - nv_mask(dev, NV50_PDISPLAY_INTR_EN_1, intr, 0x00000000); - nv_wr32(dev, NV50_PDISPLAY_INTR_1, intr); + nv_wr32(dev, NV50_PDISPLAY_INTR_1, NV50_PDISPLAY_INTR_1_VBLANK_CRTC); } static void diff --git a/drivers/gpu/drm/nouveau/nv50_graph.c b/drivers/gpu/drm/nouveau/nv50_graph.c index d441308a09cf..ac7f62d524bb 100644 --- a/drivers/gpu/drm/nouveau/nv50_graph.c +++ b/drivers/gpu/drm/nouveau/nv50_graph.c @@ -384,14 +384,7 @@ nv50_graph_nvsw_vblsem_release(struct nouveau_channel *chan, if (!chan->nvsw.vblsem || chan->nvsw.vblsem_offset == ~0 || data > 1) return -EINVAL; - if (!(nv_rd32(dev, NV50_PDISPLAY_INTR_EN_1) & - NV50_PDISPLAY_INTR_EN_1_VBLANK_CRTC_(data))) { - nv_wr32(dev, NV50_PDISPLAY_INTR_1, - NV50_PDISPLAY_INTR_1_VBLANK_CRTC_(data)); - nv_mask(dev, NV50_PDISPLAY_INTR_EN_1, 0, - NV50_PDISPLAY_INTR_EN_1_VBLANK_CRTC_(data)); - } - + drm_vblank_get(dev, data); list_add(&chan->nvsw.vbl_wait, &dev_priv->vbl_waiting); return 0; } -- cgit v1.2.3 From 332b242f47786d1a43bd7a19a0513dd5d493db8e Mon Sep 17 00:00:00 2001 From: Francisco Jerez Date: Wed, 20 Oct 2010 23:35:40 +0200 Subject: drm/nouveau: Implement the pageflip ioctl. nv0x-nv4x should be mostly fine, nv50 doesn't work yet. Signed-off-by: Francisco Jerez Signed-off-by: Ben Skeggs --- drivers/gpu/drm/nouveau/nouveau_bo.c | 12 ++ drivers/gpu/drm/nouveau/nouveau_channel.c | 1 + drivers/gpu/drm/nouveau/nouveau_display.c | 180 ++++++++++++++++++++++++++++++ drivers/gpu/drm/nouveau/nouveau_drv.h | 16 +++ drivers/gpu/drm/nouveau/nouveau_gem.c | 11 +- drivers/gpu/drm/nouveau/nouveau_state.c | 3 + drivers/gpu/drm/nouveau/nv04_crtc.c | 1 + drivers/gpu/drm/nouveau/nv04_graph.c | 16 +++ drivers/gpu/drm/nouveau/nv10_graph.c | 4 + drivers/gpu/drm/nouveau/nv20_graph.c | 8 ++ drivers/gpu/drm/nouveau/nv40_graph.c | 4 + drivers/gpu/drm/nouveau/nv50_crtc.c | 1 + drivers/gpu/drm/nouveau/nv50_graph.c | 16 +++ include/drm/nouveau_drm.h | 1 + 14 files changed, 265 insertions(+), 9 deletions(-) diff --git a/drivers/gpu/drm/nouveau/nouveau_bo.c b/drivers/gpu/drm/nouveau/nouveau_bo.c index c09928322eb9..cdc8f544d47f 100644 --- a/drivers/gpu/drm/nouveau/nouveau_bo.c +++ b/drivers/gpu/drm/nouveau/nouveau_bo.c @@ -942,6 +942,18 @@ nouveau_ttm_fault_reserve_notify(struct ttm_buffer_object *bo) return ttm_bo_validate(bo, &nvbo->placement, false, true, false); } +void +nouveau_bo_fence(struct nouveau_bo *nvbo, struct nouveau_fence *fence) +{ + spin_lock(&nvbo->bo.bdev->fence_lock); + __nouveau_fence_unref(&nvbo->bo.sync_obj); + + if (likely(fence)) + nvbo->bo.sync_obj = nouveau_fence_ref(fence); + + spin_unlock(&nvbo->bo.bdev->fence_lock); +} + struct ttm_bo_driver nouveau_bo_driver = { .create_ttm_backend_entry = nouveau_bo_create_ttm_backend_entry, .invalidate_caches = nouveau_bo_invalidate_caches, diff --git a/drivers/gpu/drm/nouveau/nouveau_channel.c b/drivers/gpu/drm/nouveau/nouveau_channel.c index c9cdbd786dae..11b2370e16da 100644 --- a/drivers/gpu/drm/nouveau/nouveau_channel.c +++ b/drivers/gpu/drm/nouveau/nouveau_channel.c @@ -148,6 +148,7 @@ nouveau_channel_alloc(struct drm_device *dev, struct nouveau_channel **chan_ret, NV_DEBUG(dev, "initialising channel %d\n", chan->id); INIT_LIST_HEAD(&chan->nvsw.vbl_wait); + INIT_LIST_HEAD(&chan->nvsw.flip); INIT_LIST_HEAD(&chan->fence.pending); /* Allocate DMA push buffer */ diff --git a/drivers/gpu/drm/nouveau/nouveau_display.c b/drivers/gpu/drm/nouveau/nouveau_display.c index f8987bcb7f51..505c6bfb4d75 100644 --- a/drivers/gpu/drm/nouveau/nouveau_display.c +++ b/drivers/gpu/drm/nouveau/nouveau_display.c @@ -30,6 +30,8 @@ #include "nouveau_fb.h" #include "nouveau_fbcon.h" #include "nouveau_hw.h" +#include "nouveau_crtc.h" +#include "nouveau_dma.h" static void nouveau_user_framebuffer_destroy(struct drm_framebuffer *drm_fb) @@ -131,3 +133,181 @@ nouveau_vblank_disable(struct drm_device *dev, int crtc) else NVWriteCRTC(dev, crtc, NV_PCRTC_INTR_EN_0, 0); } + +static int +nouveau_page_flip_reserve(struct nouveau_bo *old_bo, + struct nouveau_bo *new_bo) +{ + int ret; + + ret = nouveau_bo_pin(new_bo, TTM_PL_FLAG_VRAM); + if (ret) + return ret; + + ret = ttm_bo_reserve(&new_bo->bo, false, false, false, 0); + if (ret) + goto fail; + + ret = ttm_bo_reserve(&old_bo->bo, false, false, false, 0); + if (ret) + goto fail_unreserve; + + return 0; + +fail_unreserve: + ttm_bo_unreserve(&new_bo->bo); +fail: + nouveau_bo_unpin(new_bo); + return ret; +} + +static void +nouveau_page_flip_unreserve(struct nouveau_bo *old_bo, + struct nouveau_bo *new_bo, + struct nouveau_fence *fence) +{ + nouveau_bo_fence(new_bo, fence); + ttm_bo_unreserve(&new_bo->bo); + + nouveau_bo_fence(old_bo, fence); + ttm_bo_unreserve(&old_bo->bo); + + nouveau_bo_unpin(old_bo); +} + +static int +nouveau_page_flip_emit(struct nouveau_channel *chan, + struct nouveau_bo *old_bo, + struct nouveau_bo *new_bo, + struct nouveau_page_flip_state *s, + struct nouveau_fence **pfence) +{ + struct drm_device *dev = chan->dev; + unsigned long flags; + int ret; + + /* Queue it to the pending list */ + spin_lock_irqsave(&dev->event_lock, flags); + list_add_tail(&s->head, &chan->nvsw.flip); + spin_unlock_irqrestore(&dev->event_lock, flags); + + /* Synchronize with the old framebuffer */ + ret = nouveau_fence_sync(old_bo->bo.sync_obj, chan); + if (ret) + goto fail; + + /* Emit the pageflip */ + ret = RING_SPACE(chan, 2); + if (ret) + goto fail; + + BEGIN_RING(chan, NvSubSw, NV_SW_PAGE_FLIP, 1); + OUT_RING(chan, 0); + FIRE_RING(chan); + + ret = nouveau_fence_new(chan, pfence, true); + if (ret) + goto fail; + + return 0; +fail: + spin_lock_irqsave(&dev->event_lock, flags); + list_del(&s->head); + spin_unlock_irqrestore(&dev->event_lock, flags); + return ret; +} + +int +nouveau_crtc_page_flip(struct drm_crtc *crtc, struct drm_framebuffer *fb, + struct drm_pending_vblank_event *event) +{ + struct drm_device *dev = crtc->dev; + struct drm_nouveau_private *dev_priv = dev->dev_private; + struct nouveau_bo *old_bo = nouveau_framebuffer(crtc->fb)->nvbo; + struct nouveau_bo *new_bo = nouveau_framebuffer(fb)->nvbo; + struct nouveau_page_flip_state *s; + struct nouveau_channel *chan; + struct nouveau_fence *fence; + int ret; + + if (dev_priv->engine.graph.accel_blocked) + return -ENODEV; + + s = kzalloc(sizeof(*s), GFP_KERNEL); + if (!s) + return -ENOMEM; + + /* Don't let the buffers go away while we flip */ + ret = nouveau_page_flip_reserve(old_bo, new_bo); + if (ret) + goto fail_free; + + /* Initialize a page flip struct */ + *s = (struct nouveau_page_flip_state) + { { }, s->event, nouveau_crtc(crtc)->index, + fb->bits_per_pixel, fb->pitch, crtc->x, crtc->y, + new_bo->bo.offset }; + + /* Choose the channel the flip will be handled in */ + chan = nouveau_fence_channel(new_bo->bo.sync_obj); + if (!chan) + chan = nouveau_channel_get_unlocked(dev_priv->channel); + mutex_lock(&chan->mutex); + + /* Emit a page flip */ + ret = nouveau_page_flip_emit(chan, old_bo, new_bo, s, &fence); + nouveau_channel_put(&chan); + if (ret) + goto fail_unreserve; + + /* Update the crtc struct and cleanup */ + crtc->fb = fb; + + nouveau_page_flip_unreserve(old_bo, new_bo, fence); + nouveau_fence_unref(&fence); + return 0; + +fail_unreserve: + nouveau_page_flip_unreserve(old_bo, new_bo, NULL); +fail_free: + kfree(s); + return ret; +} + +int +nouveau_finish_page_flip(struct nouveau_channel *chan, + struct nouveau_page_flip_state *ps) +{ + struct drm_device *dev = chan->dev; + struct nouveau_page_flip_state *s; + unsigned long flags; + + spin_lock_irqsave(&dev->event_lock, flags); + + if (list_empty(&chan->nvsw.flip)) { + NV_ERROR(dev, "Unexpected pageflip in channel %d.\n", chan->id); + spin_unlock_irqrestore(&dev->event_lock, flags); + return -EINVAL; + } + + s = list_first_entry(&chan->nvsw.flip, + struct nouveau_page_flip_state, head); + if (s->event) { + struct drm_pending_vblank_event *e = s->event; + struct timeval now; + + do_gettimeofday(&now); + e->event.sequence = 0; + e->event.tv_sec = now.tv_sec; + e->event.tv_usec = now.tv_usec; + list_add_tail(&e->base.link, &e->base.file_priv->event_list); + wake_up_interruptible(&e->base.file_priv->event_wait); + } + + list_del(&s->head); + *ps = *s; + kfree(s); + + spin_unlock_irqrestore(&dev->event_lock, flags); + return 0; +} diff --git a/drivers/gpu/drm/nouveau/nouveau_drv.h b/drivers/gpu/drm/nouveau/nouveau_drv.h index 7cf034fd5cd0..2bb1f1572a55 100644 --- a/drivers/gpu/drm/nouveau/nouveau_drv.h +++ b/drivers/gpu/drm/nouveau/nouveau_drv.h @@ -166,6 +166,13 @@ struct nouveau_gpuobj { void *priv; }; +struct nouveau_page_flip_state { + struct list_head head; + struct drm_pending_vblank_event *event; + int crtc, bpp, pitch, x, y; + uint64_t offset; +}; + struct nouveau_channel { struct drm_device *dev; int id; @@ -253,6 +260,7 @@ struct nouveau_channel { uint32_t vblsem_offset; uint32_t vblsem_rval; struct list_head vbl_wait; + struct list_head flip; } nvsw; struct { @@ -1076,6 +1084,8 @@ extern void nv04_graph_destroy_context(struct nouveau_channel *); extern int nv04_graph_load_context(struct nouveau_channel *); extern int nv04_graph_unload_context(struct drm_device *); extern void nv04_graph_context_switch(struct drm_device *); +extern int nv04_graph_mthd_page_flip(struct nouveau_channel *chan, + u32 class, u32 mthd, u32 data); /* nv10_graph.c */ extern int nv10_graph_init(struct drm_device *); @@ -1249,6 +1259,7 @@ extern u16 nouveau_bo_rd16(struct nouveau_bo *nvbo, unsigned index); extern void nouveau_bo_wr16(struct nouveau_bo *nvbo, unsigned index, u16 val); extern u32 nouveau_bo_rd32(struct nouveau_bo *nvbo, unsigned index); extern void nouveau_bo_wr32(struct nouveau_bo *nvbo, unsigned index, u32 val); +extern void nouveau_bo_fence(struct nouveau_bo *, struct nouveau_fence *); /* nouveau_fence.c */ struct nouveau_fence; @@ -1315,6 +1326,10 @@ extern int nouveau_gem_ioctl_info(struct drm_device *, void *, /* nouveau_display.c */ int nouveau_vblank_enable(struct drm_device *dev, int crtc); void nouveau_vblank_disable(struct drm_device *dev, int crtc); +int nouveau_crtc_page_flip(struct drm_crtc *crtc, struct drm_framebuffer *fb, + struct drm_pending_vblank_event *event); +int nouveau_finish_page_flip(struct nouveau_channel *, + struct nouveau_page_flip_state *); /* nv10_gpio.c */ int nv10_gpio_get(struct drm_device *dev, enum dcb_gpio_tag tag); @@ -1514,5 +1529,6 @@ nv_match_device(struct drm_device *dev, unsigned device, #define NV_SW_VBLSEM_OFFSET 0x00000400 #define NV_SW_VBLSEM_RELEASE_VALUE 0x00000404 #define NV_SW_VBLSEM_RELEASE 0x00000408 +#define NV_SW_PAGE_FLIP 0x00000500 #endif /* __NOUVEAU_DRV_H__ */ diff --git a/drivers/gpu/drm/nouveau/nouveau_gem.c b/drivers/gpu/drm/nouveau/nouveau_gem.c index de8535b58710..9886b644f27d 100644 --- a/drivers/gpu/drm/nouveau/nouveau_gem.c +++ b/drivers/gpu/drm/nouveau/nouveau_gem.c @@ -231,15 +231,8 @@ validate_fini_list(struct list_head *list, struct nouveau_fence *fence) list_for_each_safe(entry, tmp, list) { nvbo = list_entry(entry, struct nouveau_bo, entry); - if (likely(fence)) { - struct nouveau_fence *prev_fence; - - spin_lock(&nvbo->bo.bdev->fence_lock); - prev_fence = nvbo->bo.sync_obj; - nvbo->bo.sync_obj = nouveau_fence_ref(fence); - spin_unlock(&nvbo->bo.bdev->fence_lock); - nouveau_fence_unref(&prev_fence); - } + + nouveau_bo_fence(nvbo, fence); if (unlikely(nvbo->validate_mapped)) { ttm_bo_kunmap(&nvbo->kmap); diff --git a/drivers/gpu/drm/nouveau/nouveau_state.c b/drivers/gpu/drm/nouveau/nouveau_state.c index d72aa8d19a19..ec9d193b6f61 100644 --- a/drivers/gpu/drm/nouveau/nouveau_state.c +++ b/drivers/gpu/drm/nouveau/nouveau_state.c @@ -1090,6 +1090,9 @@ int nouveau_ioctl_getparam(struct drm_device *dev, void *data, case NOUVEAU_GETPARAM_HAS_BO_USAGE: getparam->value = 1; break; + case NOUVEAU_GETPARAM_HAS_PAGEFLIP: + getparam->value = (dev_priv->card_type < NV_50); + break; case NOUVEAU_GETPARAM_GRAPH_UNITS: /* NV40 and NV50 versions are quite different, but register * address is the same. User is supposed to know the card diff --git a/drivers/gpu/drm/nouveau/nv04_crtc.c b/drivers/gpu/drm/nouveau/nv04_crtc.c index b61a7ff8b7b7..e4aea721971f 100644 --- a/drivers/gpu/drm/nouveau/nv04_crtc.c +++ b/drivers/gpu/drm/nouveau/nv04_crtc.c @@ -989,6 +989,7 @@ static const struct drm_crtc_funcs nv04_crtc_funcs = { .cursor_move = nv04_crtc_cursor_move, .gamma_set = nv_crtc_gamma_set, .set_config = drm_crtc_helper_set_config, + .page_flip = nouveau_crtc_page_flip, .destroy = nv_crtc_destroy, }; diff --git a/drivers/gpu/drm/nouveau/nv04_graph.c b/drivers/gpu/drm/nouveau/nv04_graph.c index 81aba097ef95..239519aefce6 100644 --- a/drivers/gpu/drm/nouveau/nv04_graph.c +++ b/drivers/gpu/drm/nouveau/nv04_graph.c @@ -26,6 +26,7 @@ #include "drm.h" #include "nouveau_drm.h" #include "nouveau_drv.h" +#include "nouveau_hw.h" static int nv04_graph_register(struct drm_device *dev); @@ -553,6 +554,20 @@ nv04_graph_mthd_set_ref(struct nouveau_channel *chan, return 0; } +int +nv04_graph_mthd_page_flip(struct nouveau_channel *chan, + u32 class, u32 mthd, u32 data) +{ + struct drm_device *dev = chan->dev; + struct nouveau_page_flip_state s; + + if (!nouveau_finish_page_flip(chan, &s)) + nv_set_crtc_base(dev, s.crtc, + s.offset + s.y * s.pitch + s.x * s.bpp / 8); + + return 0; +} + /* * Software methods, why they are needed, and how they all work: * @@ -1204,6 +1219,7 @@ nv04_graph_register(struct drm_device *dev) /* nvsw */ NVOBJ_CLASS(dev, 0x506e, SW); NVOBJ_MTHD (dev, 0x506e, 0x0150, nv04_graph_mthd_set_ref); + NVOBJ_MTHD (dev, 0x506e, 0x0500, nv04_graph_mthd_page_flip); dev_priv->engine.graph.registered = true; return 0; diff --git a/drivers/gpu/drm/nouveau/nv10_graph.c b/drivers/gpu/drm/nouveau/nv10_graph.c index a571bfd4471b..3fbb49dfc09c 100644 --- a/drivers/gpu/drm/nouveau/nv10_graph.c +++ b/drivers/gpu/drm/nouveau/nv10_graph.c @@ -1113,6 +1113,10 @@ nv10_graph_register(struct drm_device *dev) NVOBJ_MTHD (dev, 0x0099, 0x1658, nv17_graph_mthd_lma_enable); } + /* nvsw */ + NVOBJ_CLASS(dev, 0x506e, SW); + NVOBJ_MTHD (dev, 0x506e, 0x0500, nv04_graph_mthd_page_flip); + dev_priv->engine.graph.registered = true; return 0; } diff --git a/drivers/gpu/drm/nouveau/nv20_graph.c b/drivers/gpu/drm/nouveau/nv20_graph.c index 7720bccb3c98..51b9dd12949d 100644 --- a/drivers/gpu/drm/nouveau/nv20_graph.c +++ b/drivers/gpu/drm/nouveau/nv20_graph.c @@ -801,6 +801,10 @@ nv20_graph_register(struct drm_device *dev) else NVOBJ_CLASS(dev, 0x0597, GR); + /* nvsw */ + NVOBJ_CLASS(dev, 0x506e, SW); + NVOBJ_MTHD (dev, 0x506e, 0x0500, nv04_graph_mthd_page_flip); + dev_priv->engine.graph.registered = true; return 0; } @@ -841,6 +845,10 @@ nv30_graph_register(struct drm_device *dev) if (0x000001e0 & (1 << (dev_priv->chipset & 0x0f))) NVOBJ_CLASS(dev, 0x0497, GR); + /* nvsw */ + NVOBJ_CLASS(dev, 0x506e, SW); + NVOBJ_MTHD (dev, 0x506e, 0x0500, nv04_graph_mthd_page_flip); + dev_priv->engine.graph.registered = true; return 0; } diff --git a/drivers/gpu/drm/nouveau/nv40_graph.c b/drivers/gpu/drm/nouveau/nv40_graph.c index b9361e28687c..159bdcd757d4 100644 --- a/drivers/gpu/drm/nouveau/nv40_graph.c +++ b/drivers/gpu/drm/nouveau/nv40_graph.c @@ -446,6 +446,10 @@ nv40_graph_register(struct drm_device *dev) else NVOBJ_CLASS(dev, 0x4097, GR); + /* nvsw */ + NVOBJ_CLASS(dev, 0x506e, SW); + NVOBJ_MTHD (dev, 0x506e, 0x0500, nv04_graph_mthd_page_flip); + dev_priv->engine.graph.registered = true; return 0; } diff --git a/drivers/gpu/drm/nouveau/nv50_crtc.c b/drivers/gpu/drm/nouveau/nv50_crtc.c index 56476d0c6de8..1225ea0a841b 100644 --- a/drivers/gpu/drm/nouveau/nv50_crtc.c +++ b/drivers/gpu/drm/nouveau/nv50_crtc.c @@ -437,6 +437,7 @@ static const struct drm_crtc_funcs nv50_crtc_funcs = { .cursor_move = nv50_crtc_cursor_move, .gamma_set = nv50_crtc_gamma_set, .set_config = drm_crtc_helper_set_config, + .page_flip = nouveau_crtc_page_flip, .destroy = nv50_crtc_destroy, }; diff --git a/drivers/gpu/drm/nouveau/nv50_graph.c b/drivers/gpu/drm/nouveau/nv50_graph.c index ac7f62d524bb..6d81f4dab37d 100644 --- a/drivers/gpu/drm/nouveau/nv50_graph.c +++ b/drivers/gpu/drm/nouveau/nv50_graph.c @@ -29,6 +29,8 @@ #include "nouveau_drv.h" #include "nouveau_ramht.h" #include "nouveau_grctx.h" +#include "nouveau_dma.h" +#include "nv50_evo.h" static int nv50_graph_register(struct drm_device *); @@ -389,6 +391,19 @@ nv50_graph_nvsw_vblsem_release(struct nouveau_channel *chan, return 0; } +static int +nv50_graph_nvsw_mthd_page_flip(struct nouveau_channel *chan, + u32 class, u32 mthd, u32 data) +{ + struct nouveau_page_flip_state s; + + if (!nouveau_finish_page_flip(chan, &s)) { + /* XXX - Do something here */ + } + + return 0; +} + static int nv50_graph_register(struct drm_device *dev) { @@ -402,6 +417,7 @@ nv50_graph_register(struct drm_device *dev) NVOBJ_MTHD (dev, 0x506e, 0x0400, nv50_graph_nvsw_vblsem_offset); NVOBJ_MTHD (dev, 0x506e, 0x0404, nv50_graph_nvsw_vblsem_release_val); NVOBJ_MTHD (dev, 0x506e, 0x0408, nv50_graph_nvsw_vblsem_release); + NVOBJ_MTHD (dev, 0x506e, 0x0500, nv50_graph_nvsw_mthd_page_flip); NVOBJ_CLASS(dev, 0x0030, GR); /* null */ NVOBJ_CLASS(dev, 0x5039, GR); /* m2mf */ diff --git a/include/drm/nouveau_drm.h b/include/drm/nouveau_drm.h index 60a7b3e9c2e9..39aa0fd6a529 100644 --- a/include/drm/nouveau_drm.h +++ b/include/drm/nouveau_drm.h @@ -81,6 +81,7 @@ struct drm_nouveau_gpuobj_free { #define NOUVEAU_GETPARAM_GRAPH_UNITS 13 #define NOUVEAU_GETPARAM_PTIMER_TIME 14 #define NOUVEAU_GETPARAM_HAS_BO_USAGE 15 +#define NOUVEAU_GETPARAM_HAS_PAGEFLIP 16 struct drm_nouveau_getparam { uint64_t param; uint64_t value; -- cgit v1.2.3 From 1c180fa5bd5f264e4863bb88861e8cd7d135b917 Mon Sep 17 00:00:00 2001 From: Francisco Jerez Date: Mon, 25 Oct 2010 03:30:34 +0200 Subject: drm/nouveau: Call drm_vblank_pre/post_modeset() around mode setting. Signed-off-by: Francisco Jerez Signed-off-by: Ben Skeggs --- drivers/gpu/drm/nouveau/nv04_crtc.c | 2 ++ drivers/gpu/drm/nouveau/nv50_crtc.c | 2 ++ 2 files changed, 4 insertions(+) diff --git a/drivers/gpu/drm/nouveau/nv04_crtc.c b/drivers/gpu/drm/nouveau/nv04_crtc.c index e4aea721971f..297505eb98d5 100644 --- a/drivers/gpu/drm/nouveau/nv04_crtc.c +++ b/drivers/gpu/drm/nouveau/nv04_crtc.c @@ -672,6 +672,7 @@ static void nv_crtc_prepare(struct drm_crtc *crtc) if (nv_two_heads(dev)) NVSetOwner(dev, nv_crtc->index); + drm_vblank_pre_modeset(dev, nv_crtc->index); funcs->dpms(crtc, DRM_MODE_DPMS_OFF); NVBlankScreen(dev, nv_crtc->index, true); @@ -704,6 +705,7 @@ static void nv_crtc_commit(struct drm_crtc *crtc) #endif funcs->dpms(crtc, DRM_MODE_DPMS_ON); + drm_vblank_post_modeset(dev, nv_crtc->index); } static void nv_crtc_destroy(struct drm_crtc *crtc) diff --git a/drivers/gpu/drm/nouveau/nv50_crtc.c b/drivers/gpu/drm/nouveau/nv50_crtc.c index 1225ea0a841b..f3570cc45017 100644 --- a/drivers/gpu/drm/nouveau/nv50_crtc.c +++ b/drivers/gpu/drm/nouveau/nv50_crtc.c @@ -454,6 +454,7 @@ nv50_crtc_prepare(struct drm_crtc *crtc) NV_DEBUG_KMS(dev, "index %d\n", nv_crtc->index); + drm_vblank_pre_modeset(dev, nv_crtc->index); nv50_crtc_blank(nv_crtc, true); } @@ -469,6 +470,7 @@ nv50_crtc_commit(struct drm_crtc *crtc) NV_DEBUG_KMS(dev, "index %d\n", nv_crtc->index); nv50_crtc_blank(nv_crtc, false); + drm_vblank_post_modeset(dev, nv_crtc->index); ret = RING_SPACE(evo, 2); if (ret) { -- cgit v1.2.3 From 1f6d2de2c539df6fe52ad2187191a9dfe10c7233 Mon Sep 17 00:00:00 2001 From: Francisco Jerez Date: Sun, 24 Oct 2010 14:15:58 +0200 Subject: drm/nv50: Keep track of the head a channel is vsync'ing to. In a multihead setup vblank interrupts may end up enabled in both heads. In that case we want to ignore the vblank interrupts coming from the wrong CRTC to avoid tearing and unbalanced calls to drm_vblank_get/put (fdo bug 31074). Reported-by: Felix Leimbach Signed-off-by: Francisco Jerez Signed-off-by: Ben Skeggs --- drivers/gpu/drm/nouveau/nouveau_drv.h | 1 + drivers/gpu/drm/nouveau/nv50_display.c | 3 +++ drivers/gpu/drm/nouveau/nv50_graph.c | 3 +++ 3 files changed, 7 insertions(+) diff --git a/drivers/gpu/drm/nouveau/nouveau_drv.h b/drivers/gpu/drm/nouveau/nouveau_drv.h index 2bb1f1572a55..5814db82f778 100644 --- a/drivers/gpu/drm/nouveau/nouveau_drv.h +++ b/drivers/gpu/drm/nouveau/nouveau_drv.h @@ -257,6 +257,7 @@ struct nouveau_channel { struct { struct nouveau_gpuobj *vblsem; + uint32_t vblsem_head; uint32_t vblsem_offset; uint32_t vblsem_rval; struct list_head vbl_wait; diff --git a/drivers/gpu/drm/nouveau/nv50_display.c b/drivers/gpu/drm/nouveau/nv50_display.c index 17b950abf200..41b212801870 100644 --- a/drivers/gpu/drm/nouveau/nv50_display.c +++ b/drivers/gpu/drm/nouveau/nv50_display.c @@ -432,6 +432,9 @@ nv50_display_vblank_crtc_handler(struct drm_device *dev, int crtc) list_for_each_entry_safe(chan, tmp, &dev_priv->vbl_waiting, nvsw.vbl_wait) { + if (chan->nvsw.vblsem_head != crtc) + continue; + nouveau_bo_wr32(chan->notifier_bo, chan->nvsw.vblsem_offset, chan->nvsw.vblsem_rval); list_del(&chan->nvsw.vbl_wait); diff --git a/drivers/gpu/drm/nouveau/nv50_graph.c b/drivers/gpu/drm/nouveau/nv50_graph.c index 6d81f4dab37d..e0f52942b2eb 100644 --- a/drivers/gpu/drm/nouveau/nv50_graph.c +++ b/drivers/gpu/drm/nouveau/nv50_graph.c @@ -387,7 +387,10 @@ nv50_graph_nvsw_vblsem_release(struct nouveau_channel *chan, return -EINVAL; drm_vblank_get(dev, data); + + chan->nvsw.vblsem_head = data; list_add(&chan->nvsw.vbl_wait, &dev_priv->vbl_waiting); + return 0; } -- cgit v1.2.3 From e419cf0954901bb3a987f8b76cbc9654ca06121c Mon Sep 17 00:00:00 2001 From: Francisco Jerez Date: Mon, 25 Oct 2010 23:38:59 +0200 Subject: drm/nouveau: Add a separate class for the kernel channel mutex. nouveau_bo_move_m2mf() needs to lock the kernel channel, and it may be called from the pushbuf IOCTL with an user channel already locked. Use a separate subclass for the kernel channel mutex because this is legitimate mutex nesting. Signed-off-by: Francisco Jerez Signed-off-by: Ben Skeggs --- drivers/gpu/drm/nouveau/nouveau_bo.c | 2 +- drivers/gpu/drm/nouveau/nouveau_drv.h | 5 +++++ 2 files changed, 6 insertions(+), 1 deletion(-) diff --git a/drivers/gpu/drm/nouveau/nouveau_bo.c b/drivers/gpu/drm/nouveau/nouveau_bo.c index cdc8f544d47f..099f806f39ed 100644 --- a/drivers/gpu/drm/nouveau/nouveau_bo.c +++ b/drivers/gpu/drm/nouveau/nouveau_bo.c @@ -678,7 +678,7 @@ nouveau_bo_move_m2mf(struct ttm_buffer_object *bo, int evict, bool intr, chan = nvbo->channel; if (!chan || nvbo->no_vm) { chan = dev_priv->channel; - mutex_lock(&chan->mutex); + mutex_lock_nested(&chan->mutex, NOUVEAU_KCHANNEL_MUTEX); } if (dev_priv->card_type < NV_50) diff --git a/drivers/gpu/drm/nouveau/nouveau_drv.h b/drivers/gpu/drm/nouveau/nouveau_drv.h index 5814db82f778..ce0475ead381 100644 --- a/drivers/gpu/drm/nouveau/nouveau_drv.h +++ b/drivers/gpu/drm/nouveau/nouveau_drv.h @@ -173,6 +173,11 @@ struct nouveau_page_flip_state { uint64_t offset; }; +enum nouveau_channel_mutex_class { + NOUVEAU_UCHANNEL_MUTEX, + NOUVEAU_KCHANNEL_MUTEX +}; + struct nouveau_channel { struct drm_device *dev; int id; -- cgit v1.2.3 From a5cf68b04b2b8ea716cf6fd8499c1c54d05fdf5e Mon Sep 17 00:00:00 2001 From: Francisco Jerez Date: Sun, 24 Oct 2010 16:14:41 +0200 Subject: drm/nouveau: Rework tile region handling. The point is to share more code between the PFB/PGRAPH tile region hooks, and give the hardware specific functions a chance to allocate per-region resources. Signed-off-by: Francisco Jerez Signed-off-by: Ben Skeggs --- drivers/gpu/drm/nouveau/nouveau_bo.c | 11 ++-- drivers/gpu/drm/nouveau/nouveau_drv.h | 59 ++++++++++------- drivers/gpu/drm/nouveau/nouveau_mem.c | 111 +++++++++++++++++++------------- drivers/gpu/drm/nouveau/nouveau_state.c | 25 ++++--- drivers/gpu/drm/nouveau/nv10_fb.c | 48 +++++++++----- drivers/gpu/drm/nouveau/nv10_graph.c | 17 ++--- drivers/gpu/drm/nouveau/nv20_graph.c | 25 ++++--- drivers/gpu/drm/nouveau/nv30_fb.c | 23 ++++++- drivers/gpu/drm/nouveau/nv40_fb.c | 22 +++---- drivers/gpu/drm/nouveau/nv40_graph.c | 40 ++++++------ 10 files changed, 221 insertions(+), 160 deletions(-) diff --git a/drivers/gpu/drm/nouveau/nouveau_bo.c b/drivers/gpu/drm/nouveau/nouveau_bo.c index 099f806f39ed..8d5dd980240d 100644 --- a/drivers/gpu/drm/nouveau/nouveau_bo.c +++ b/drivers/gpu/drm/nouveau/nouveau_bo.c @@ -46,9 +46,7 @@ nouveau_bo_del_ttm(struct ttm_buffer_object *bo) if (unlikely(nvbo->gem)) DRM_ERROR("bo %p still attached to GEM object\n", bo); - if (nvbo->tile) - nv10_mem_expire_tiling(dev, nvbo->tile, NULL); - + nv10_mem_put_tile_region(dev, nvbo->tile, NULL); kfree(nvbo); } @@ -792,7 +790,8 @@ nouveau_bo_vm_bind(struct ttm_buffer_object *bo, struct ttm_mem_reg *new_mem, } else if (dev_priv->card_type >= NV_10) { *new_tile = nv10_mem_set_tiling(dev, offset, new_mem->size, - nvbo->tile_mode); + nvbo->tile_mode, + nvbo->tile_flags); } return 0; @@ -808,9 +807,7 @@ nouveau_bo_vm_cleanup(struct ttm_buffer_object *bo, if (dev_priv->card_type >= NV_10 && dev_priv->card_type < NV_50) { - if (*old_tile) - nv10_mem_expire_tiling(dev, *old_tile, bo->sync_obj); - + nv10_mem_put_tile_region(dev, *old_tile, bo->sync_obj); *old_tile = new_tile; } } diff --git a/drivers/gpu/drm/nouveau/nouveau_drv.h b/drivers/gpu/drm/nouveau/nouveau_drv.h index ce0475ead381..8b524d894f18 100644 --- a/drivers/gpu/drm/nouveau/nouveau_drv.h +++ b/drivers/gpu/drm/nouveau/nouveau_drv.h @@ -66,10 +66,11 @@ struct nouveau_grctx; #define NV50_VM_VRAM_NR (NV50_VM_MAX_VRAM / NV50_VM_BLOCK) struct nouveau_tile_reg { - struct nouveau_fence *fence; - uint32_t addr; - uint32_t size; bool used; + uint32_t addr; + uint32_t limit; + uint32_t pitch; + struct nouveau_fence *fence; }; struct nouveau_bo { @@ -309,8 +310,11 @@ struct nouveau_fb_engine { int (*init)(struct drm_device *dev); void (*takedown)(struct drm_device *dev); - void (*set_region_tiling)(struct drm_device *dev, int i, uint32_t addr, - uint32_t size, uint32_t pitch); + void (*init_tile_region)(struct drm_device *dev, int i, + uint32_t addr, uint32_t size, + uint32_t pitch, uint32_t flags); + void (*set_tile_region)(struct drm_device *dev, int i); + void (*free_tile_region)(struct drm_device *dev, int i); }; struct nouveau_fifo_engine { @@ -356,8 +360,7 @@ struct nouveau_pgraph_engine { int (*unload_context)(struct drm_device *); void (*tlb_flush)(struct drm_device *dev); - void (*set_region_tiling)(struct drm_device *dev, int i, uint32_t addr, - uint32_t size, uint32_t pitch); + void (*set_tile_region)(struct drm_device *dev, int i); }; struct nouveau_display_engine { @@ -668,7 +671,10 @@ struct drm_nouveau_private { } gart_info; /* nv10-nv40 tiling regions */ - struct nouveau_tile_reg tile[NOUVEAU_MAX_TILE_NR]; + struct { + struct nouveau_tile_reg reg[NOUVEAU_MAX_TILE_NR]; + spinlock_t lock; + } tile; /* VRAM/fb configuration */ uint64_t vram_size; @@ -798,13 +804,12 @@ extern void nouveau_mem_gart_fini(struct drm_device *); extern int nouveau_mem_init_agp(struct drm_device *); extern int nouveau_mem_reset_agp(struct drm_device *); extern void nouveau_mem_close(struct drm_device *); -extern struct nouveau_tile_reg *nv10_mem_set_tiling(struct drm_device *dev, - uint32_t addr, - uint32_t size, - uint32_t pitch); -extern void nv10_mem_expire_tiling(struct drm_device *dev, - struct nouveau_tile_reg *tile, - struct nouveau_fence *fence); +extern struct nouveau_tile_reg *nv10_mem_set_tiling( + struct drm_device *dev, uint32_t addr, uint32_t size, + uint32_t pitch, uint32_t flags); +extern void nv10_mem_put_tile_region(struct drm_device *dev, + struct nouveau_tile_reg *tile, + struct nouveau_fence *fence); extern int nv50_mem_vm_bind_linear(struct drm_device *, uint64_t virt, uint32_t size, uint32_t flags, uint64_t phys); @@ -1011,18 +1016,25 @@ extern void nv04_fb_takedown(struct drm_device *); /* nv10_fb.c */ extern int nv10_fb_init(struct drm_device *); extern void nv10_fb_takedown(struct drm_device *); -extern void nv10_fb_set_region_tiling(struct drm_device *, int, uint32_t, - uint32_t, uint32_t); +extern void nv10_fb_init_tile_region(struct drm_device *dev, int i, + uint32_t addr, uint32_t size, + uint32_t pitch, uint32_t flags); +extern void nv10_fb_set_tile_region(struct drm_device *dev, int i); +extern void nv10_fb_free_tile_region(struct drm_device *dev, int i); /* nv30_fb.c */ extern int nv30_fb_init(struct drm_device *); extern void nv30_fb_takedown(struct drm_device *); +extern void nv30_fb_init_tile_region(struct drm_device *dev, int i, + uint32_t addr, uint32_t size, + uint32_t pitch, uint32_t flags); +extern void nv30_fb_free_tile_region(struct drm_device *dev, int i); /* nv40_fb.c */ extern int nv40_fb_init(struct drm_device *); extern void nv40_fb_takedown(struct drm_device *); -extern void nv40_fb_set_region_tiling(struct drm_device *, int, uint32_t, - uint32_t, uint32_t); +extern void nv40_fb_set_tile_region(struct drm_device *dev, int i); + /* nv50_fb.c */ extern int nv50_fb_init(struct drm_device *); extern void nv50_fb_takedown(struct drm_device *); @@ -1102,8 +1114,7 @@ extern void nv10_graph_destroy_context(struct nouveau_channel *); extern int nv10_graph_load_context(struct nouveau_channel *); extern int nv10_graph_unload_context(struct drm_device *); extern void nv10_graph_context_switch(struct drm_device *); -extern void nv10_graph_set_region_tiling(struct drm_device *, int, uint32_t, - uint32_t, uint32_t); +extern void nv10_graph_set_tile_region(struct drm_device *dev, int i); /* nv20_graph.c */ extern int nv20_graph_create_context(struct nouveau_channel *); @@ -1113,8 +1124,7 @@ extern int nv20_graph_unload_context(struct drm_device *); extern int nv20_graph_init(struct drm_device *); extern void nv20_graph_takedown(struct drm_device *); extern int nv30_graph_init(struct drm_device *); -extern void nv20_graph_set_region_tiling(struct drm_device *, int, uint32_t, - uint32_t, uint32_t); +extern void nv20_graph_set_tile_region(struct drm_device *dev, int i); /* nv40_graph.c */ extern int nv40_graph_init(struct drm_device *); @@ -1125,8 +1135,7 @@ extern void nv40_graph_destroy_context(struct nouveau_channel *); extern int nv40_graph_load_context(struct nouveau_channel *); extern int nv40_graph_unload_context(struct drm_device *); extern void nv40_grctx_init(struct nouveau_grctx *); -extern void nv40_graph_set_region_tiling(struct drm_device *, int, uint32_t, - uint32_t, uint32_t); +extern void nv40_graph_set_tile_region(struct drm_device *dev, int i); /* nv50_graph.c */ extern int nv50_graph_init(struct drm_device *); diff --git a/drivers/gpu/drm/nouveau/nouveau_mem.c b/drivers/gpu/drm/nouveau/nouveau_mem.c index a7c3e08aa7b5..549f59052881 100644 --- a/drivers/gpu/drm/nouveau/nouveau_mem.c +++ b/drivers/gpu/drm/nouveau/nouveau_mem.c @@ -42,83 +42,104 @@ */ static void -nv10_mem_set_region_tiling(struct drm_device *dev, int i, uint32_t addr, - uint32_t size, uint32_t pitch) +nv10_mem_update_tile_region(struct drm_device *dev, + struct nouveau_tile_reg *tile, uint32_t addr, + uint32_t size, uint32_t pitch, uint32_t flags) { struct drm_nouveau_private *dev_priv = dev->dev_private; struct nouveau_fifo_engine *pfifo = &dev_priv->engine.fifo; struct nouveau_fb_engine *pfb = &dev_priv->engine.fb; struct nouveau_pgraph_engine *pgraph = &dev_priv->engine.graph; - struct nouveau_tile_reg *tile = &dev_priv->tile[i]; + int i = tile - dev_priv->tile.reg; + unsigned long save; - tile->addr = addr; - tile->size = size; - tile->used = !!pitch; nouveau_fence_unref(&tile->fence); + if (tile->pitch) + pfb->free_tile_region(dev, i); + + if (pitch) + pfb->init_tile_region(dev, i, addr, size, pitch, flags); + + spin_lock_irqsave(&dev_priv->context_switch_lock, save); pfifo->reassign(dev, false); pfifo->cache_pull(dev, false); nouveau_wait_for_idle(dev); - pgraph->set_region_tiling(dev, i, addr, size, pitch); - pfb->set_region_tiling(dev, i, addr, size, pitch); + pfb->set_tile_region(dev, i); + pgraph->set_tile_region(dev, i); pfifo->cache_pull(dev, true); pfifo->reassign(dev, true); + spin_unlock_irqrestore(&dev_priv->context_switch_lock, save); } -struct nouveau_tile_reg * -nv10_mem_set_tiling(struct drm_device *dev, uint32_t addr, uint32_t size, - uint32_t pitch) +static struct nouveau_tile_reg * +nv10_mem_get_tile_region(struct drm_device *dev, int i) { struct drm_nouveau_private *dev_priv = dev->dev_private; - struct nouveau_fb_engine *pfb = &dev_priv->engine.fb; - struct nouveau_tile_reg *found = NULL; - unsigned long i, flags; + struct nouveau_tile_reg *tile = &dev_priv->tile.reg[i]; - spin_lock_irqsave(&dev_priv->context_switch_lock, flags); + spin_lock(&dev_priv->tile.lock); - for (i = 0; i < pfb->num_tiles; i++) { - struct nouveau_tile_reg *tile = &dev_priv->tile[i]; - - if (tile->used) - /* Tile region in use. */ - continue; + if (!tile->used && + (!tile->fence || nouveau_fence_signalled(tile->fence))) + tile->used = true; + else + tile = NULL; - if (tile->fence && - !nouveau_fence_signalled(tile->fence)) - /* Pending tile region. */ - continue; + spin_unlock(&dev_priv->tile.lock); + return tile; +} - if (max(tile->addr, addr) < - min(tile->addr + tile->size, addr + size)) - /* Kill an intersecting tile region. */ - nv10_mem_set_region_tiling(dev, i, 0, 0, 0); +void +nv10_mem_put_tile_region(struct drm_device *dev, struct nouveau_tile_reg *tile, + struct nouveau_fence *fence) +{ + struct drm_nouveau_private *dev_priv = dev->dev_private; - if (pitch && !found) { - /* Free tile region. */ - nv10_mem_set_region_tiling(dev, i, addr, size, pitch); - found = tile; + if (tile) { + spin_lock(&dev_priv->tile.lock); + if (fence) { + /* Mark it as pending. */ + tile->fence = fence; + nouveau_fence_ref(fence); } - } - - spin_unlock_irqrestore(&dev_priv->context_switch_lock, flags); - return found; + tile->used = false; + spin_unlock(&dev_priv->tile.lock); + } } -void -nv10_mem_expire_tiling(struct drm_device *dev, struct nouveau_tile_reg *tile, - struct nouveau_fence *fence) +struct nouveau_tile_reg * +nv10_mem_set_tiling(struct drm_device *dev, uint32_t addr, uint32_t size, + uint32_t pitch, uint32_t flags) { - if (fence) { - /* Mark it as pending. */ - tile->fence = fence; - nouveau_fence_ref(fence); + struct drm_nouveau_private *dev_priv = dev->dev_private; + struct nouveau_fb_engine *pfb = &dev_priv->engine.fb; + struct nouveau_tile_reg *tile, *found = NULL; + int i; + + for (i = 0; i < pfb->num_tiles; i++) { + tile = nv10_mem_get_tile_region(dev, i); + + if (pitch && !found) { + found = tile; + continue; + + } else if (tile && tile->pitch) { + /* Kill an unused tile region. */ + nv10_mem_update_tile_region(dev, tile, 0, 0, 0, 0); + } + + nv10_mem_put_tile_region(dev, tile, NULL); } - tile->used = false; + if (found) + nv10_mem_update_tile_region(dev, found, addr, size, + pitch, flags); + return found; } /* diff --git a/drivers/gpu/drm/nouveau/nouveau_state.c b/drivers/gpu/drm/nouveau/nouveau_state.c index ec9d193b6f61..1a7a50ccb7c8 100644 --- a/drivers/gpu/drm/nouveau/nouveau_state.c +++ b/drivers/gpu/drm/nouveau/nouveau_state.c @@ -118,7 +118,9 @@ static int nouveau_init_engine_ptrs(struct drm_device *dev) engine->timer.takedown = nv04_timer_takedown; engine->fb.init = nv10_fb_init; engine->fb.takedown = nv10_fb_takedown; - engine->fb.set_region_tiling = nv10_fb_set_region_tiling; + engine->fb.init_tile_region = nv10_fb_init_tile_region; + engine->fb.set_tile_region = nv10_fb_set_tile_region; + engine->fb.free_tile_region = nv10_fb_free_tile_region; engine->graph.init = nv10_graph_init; engine->graph.takedown = nv10_graph_takedown; engine->graph.channel = nv10_graph_channel; @@ -127,7 +129,7 @@ static int nouveau_init_engine_ptrs(struct drm_device *dev) engine->graph.fifo_access = nv04_graph_fifo_access; engine->graph.load_context = nv10_graph_load_context; engine->graph.unload_context = nv10_graph_unload_context; - engine->graph.set_region_tiling = nv10_graph_set_region_tiling; + engine->graph.set_tile_region = nv10_graph_set_tile_region; engine->fifo.channels = 32; engine->fifo.init = nv10_fifo_init; engine->fifo.takedown = nouveau_stub_takedown; @@ -173,7 +175,9 @@ static int nouveau_init_engine_ptrs(struct drm_device *dev) engine->timer.takedown = nv04_timer_takedown; engine->fb.init = nv10_fb_init; engine->fb.takedown = nv10_fb_takedown; - engine->fb.set_region_tiling = nv10_fb_set_region_tiling; + engine->fb.init_tile_region = nv10_fb_init_tile_region; + engine->fb.set_tile_region = nv10_fb_set_tile_region; + engine->fb.free_tile_region = nv10_fb_free_tile_region; engine->graph.init = nv20_graph_init; engine->graph.takedown = nv20_graph_takedown; engine->graph.channel = nv10_graph_channel; @@ -182,7 +186,7 @@ static int nouveau_init_engine_ptrs(struct drm_device *dev) engine->graph.fifo_access = nv04_graph_fifo_access; engine->graph.load_context = nv20_graph_load_context; engine->graph.unload_context = nv20_graph_unload_context; - engine->graph.set_region_tiling = nv20_graph_set_region_tiling; + engine->graph.set_tile_region = nv20_graph_set_tile_region; engine->fifo.channels = 32; engine->fifo.init = nv10_fifo_init; engine->fifo.takedown = nouveau_stub_takedown; @@ -228,7 +232,9 @@ static int nouveau_init_engine_ptrs(struct drm_device *dev) engine->timer.takedown = nv04_timer_takedown; engine->fb.init = nv30_fb_init; engine->fb.takedown = nv30_fb_takedown; - engine->fb.set_region_tiling = nv10_fb_set_region_tiling; + engine->fb.init_tile_region = nv30_fb_init_tile_region; + engine->fb.set_tile_region = nv10_fb_set_tile_region; + engine->fb.free_tile_region = nv30_fb_free_tile_region; engine->graph.init = nv30_graph_init; engine->graph.takedown = nv20_graph_takedown; engine->graph.fifo_access = nv04_graph_fifo_access; @@ -237,7 +243,7 @@ static int nouveau_init_engine_ptrs(struct drm_device *dev) engine->graph.destroy_context = nv20_graph_destroy_context; engine->graph.load_context = nv20_graph_load_context; engine->graph.unload_context = nv20_graph_unload_context; - engine->graph.set_region_tiling = nv20_graph_set_region_tiling; + engine->graph.set_tile_region = nv20_graph_set_tile_region; engine->fifo.channels = 32; engine->fifo.init = nv10_fifo_init; engine->fifo.takedown = nouveau_stub_takedown; @@ -286,7 +292,9 @@ static int nouveau_init_engine_ptrs(struct drm_device *dev) engine->timer.takedown = nv04_timer_takedown; engine->fb.init = nv40_fb_init; engine->fb.takedown = nv40_fb_takedown; - engine->fb.set_region_tiling = nv40_fb_set_region_tiling; + engine->fb.init_tile_region = nv30_fb_init_tile_region; + engine->fb.set_tile_region = nv40_fb_set_tile_region; + engine->fb.free_tile_region = nv30_fb_free_tile_region; engine->graph.init = nv40_graph_init; engine->graph.takedown = nv40_graph_takedown; engine->graph.fifo_access = nv04_graph_fifo_access; @@ -295,7 +303,7 @@ static int nouveau_init_engine_ptrs(struct drm_device *dev) engine->graph.destroy_context = nv40_graph_destroy_context; engine->graph.load_context = nv40_graph_load_context; engine->graph.unload_context = nv40_graph_unload_context; - engine->graph.set_region_tiling = nv40_graph_set_region_tiling; + engine->graph.set_tile_region = nv40_graph_set_tile_region; engine->fifo.channels = 32; engine->fifo.init = nv40_fifo_init; engine->fifo.takedown = nouveau_stub_takedown; @@ -596,6 +604,7 @@ nouveau_card_init(struct drm_device *dev) goto out; engine = &dev_priv->engine; spin_lock_init(&dev_priv->channels.lock); + spin_lock_init(&dev_priv->tile.lock); spin_lock_init(&dev_priv->context_switch_lock); /* Make the CRTCs and I2C buses accessible */ diff --git a/drivers/gpu/drm/nouveau/nv10_fb.c b/drivers/gpu/drm/nouveau/nv10_fb.c index cc5cda44e501..d50acc6a90d1 100644 --- a/drivers/gpu/drm/nouveau/nv10_fb.c +++ b/drivers/gpu/drm/nouveau/nv10_fb.c @@ -4,22 +4,40 @@ #include "nouveau_drm.h" void -nv10_fb_set_region_tiling(struct drm_device *dev, int i, uint32_t addr, - uint32_t size, uint32_t pitch) +nv10_fb_init_tile_region(struct drm_device *dev, int i, uint32_t addr, + uint32_t size, uint32_t pitch, uint32_t flags) { struct drm_nouveau_private *dev_priv = dev->dev_private; - uint32_t limit = max(1u, addr + size) - 1; - - if (pitch) { - if (dev_priv->card_type >= NV_20) - addr |= 1; - else - addr |= 1 << 31; - } - - nv_wr32(dev, NV10_PFB_TLIMIT(i), limit); - nv_wr32(dev, NV10_PFB_TSIZE(i), pitch); - nv_wr32(dev, NV10_PFB_TILE(i), addr); + struct nouveau_tile_reg *tile = &dev_priv->tile.reg[i]; + + tile->addr = addr; + tile->limit = max(1u, addr + size) - 1; + tile->pitch = pitch; + + if (dev_priv->card_type == NV_20) + tile->addr |= 1; + else + tile->addr |= 1 << 31; +} + +void +nv10_fb_free_tile_region(struct drm_device *dev, int i) +{ + struct drm_nouveau_private *dev_priv = dev->dev_private; + struct nouveau_tile_reg *tile = &dev_priv->tile.reg[i]; + + tile->addr = tile->limit = tile->pitch = 0; +} + +void +nv10_fb_set_tile_region(struct drm_device *dev, int i) +{ + struct drm_nouveau_private *dev_priv = dev->dev_private; + struct nouveau_tile_reg *tile = &dev_priv->tile.reg[i]; + + nv_wr32(dev, NV10_PFB_TLIMIT(i), tile->limit); + nv_wr32(dev, NV10_PFB_TSIZE(i), tile->pitch); + nv_wr32(dev, NV10_PFB_TILE(i), tile->addr); } int @@ -33,7 +51,7 @@ nv10_fb_init(struct drm_device *dev) /* Turn all the tiling regions off. */ for (i = 0; i < pfb->num_tiles; i++) - pfb->set_region_tiling(dev, i, 0, 0, 0); + pfb->set_tile_region(dev, i); return 0; } diff --git a/drivers/gpu/drm/nouveau/nv10_graph.c b/drivers/gpu/drm/nouveau/nv10_graph.c index 3fbb49dfc09c..1cd141edca04 100644 --- a/drivers/gpu/drm/nouveau/nv10_graph.c +++ b/drivers/gpu/drm/nouveau/nv10_graph.c @@ -899,17 +899,14 @@ void nv10_graph_destroy_context(struct nouveau_channel *chan) } void -nv10_graph_set_region_tiling(struct drm_device *dev, int i, uint32_t addr, - uint32_t size, uint32_t pitch) +nv10_graph_set_tile_region(struct drm_device *dev, int i) { - uint32_t limit = max(1u, addr + size) - 1; - - if (pitch) - addr |= 1 << 31; + struct drm_nouveau_private *dev_priv = dev->dev_private; + struct nouveau_tile_reg *tile = &dev_priv->tile.reg[i]; - nv_wr32(dev, NV10_PGRAPH_TLIMIT(i), limit); - nv_wr32(dev, NV10_PGRAPH_TSIZE(i), pitch); - nv_wr32(dev, NV10_PGRAPH_TILE(i), addr); + nv_wr32(dev, NV10_PGRAPH_TLIMIT(i), tile->limit); + nv_wr32(dev, NV10_PGRAPH_TSIZE(i), tile->pitch); + nv_wr32(dev, NV10_PGRAPH_TILE(i), tile->addr); } int nv10_graph_init(struct drm_device *dev) @@ -949,7 +946,7 @@ int nv10_graph_init(struct drm_device *dev) /* Turn all the tiling regions off. */ for (i = 0; i < NV10_PFB_TILE__SIZE; i++) - nv10_graph_set_region_tiling(dev, i, 0, 0, 0); + nv10_graph_set_tile_region(dev, i); nv_wr32(dev, NV10_PGRAPH_CTX_SWITCH(0), 0x00000000); nv_wr32(dev, NV10_PGRAPH_CTX_SWITCH(1), 0x00000000); diff --git a/drivers/gpu/drm/nouveau/nv20_graph.c b/drivers/gpu/drm/nouveau/nv20_graph.c index 51b9dd12949d..a71871b91c69 100644 --- a/drivers/gpu/drm/nouveau/nv20_graph.c +++ b/drivers/gpu/drm/nouveau/nv20_graph.c @@ -511,24 +511,21 @@ nv20_graph_rdi(struct drm_device *dev) } void -nv20_graph_set_region_tiling(struct drm_device *dev, int i, uint32_t addr, - uint32_t size, uint32_t pitch) +nv20_graph_set_tile_region(struct drm_device *dev, int i) { - uint32_t limit = max(1u, addr + size) - 1; - - if (pitch) - addr |= 1; + struct drm_nouveau_private *dev_priv = dev->dev_private; + struct nouveau_tile_reg *tile = &dev_priv->tile.reg[i]; - nv_wr32(dev, NV20_PGRAPH_TLIMIT(i), limit); - nv_wr32(dev, NV20_PGRAPH_TSIZE(i), pitch); - nv_wr32(dev, NV20_PGRAPH_TILE(i), addr); + nv_wr32(dev, NV20_PGRAPH_TLIMIT(i), tile->limit); + nv_wr32(dev, NV20_PGRAPH_TSIZE(i), tile->pitch); + nv_wr32(dev, NV20_PGRAPH_TILE(i), tile->addr); nv_wr32(dev, NV10_PGRAPH_RDI_INDEX, 0x00EA0030 + 4 * i); - nv_wr32(dev, NV10_PGRAPH_RDI_DATA, limit); + nv_wr32(dev, NV10_PGRAPH_RDI_DATA, tile->limit); nv_wr32(dev, NV10_PGRAPH_RDI_INDEX, 0x00EA0050 + 4 * i); - nv_wr32(dev, NV10_PGRAPH_RDI_DATA, pitch); + nv_wr32(dev, NV10_PGRAPH_RDI_DATA, tile->pitch); nv_wr32(dev, NV10_PGRAPH_RDI_INDEX, 0x00EA0010 + 4 * i); - nv_wr32(dev, NV10_PGRAPH_RDI_DATA, addr); + nv_wr32(dev, NV10_PGRAPH_RDI_DATA, tile->addr); } int @@ -612,7 +609,7 @@ nv20_graph_init(struct drm_device *dev) /* Turn all the tiling regions off. */ for (i = 0; i < NV10_PFB_TILE__SIZE; i++) - nv20_graph_set_region_tiling(dev, i, 0, 0, 0); + nv20_graph_set_tile_region(dev, i); for (i = 0; i < 8; i++) { nv_wr32(dev, 0x400980 + i * 4, nv_rd32(dev, 0x100300 + i * 4)); @@ -751,7 +748,7 @@ nv30_graph_init(struct drm_device *dev) /* Turn all the tiling regions off. */ for (i = 0; i < NV10_PFB_TILE__SIZE; i++) - nv20_graph_set_region_tiling(dev, i, 0, 0, 0); + nv20_graph_set_tile_region(dev, i); nv_wr32(dev, NV10_PGRAPH_CTX_CONTROL, 0x10000100); nv_wr32(dev, NV10_PGRAPH_STATE , 0xFFFFFFFF); diff --git a/drivers/gpu/drm/nouveau/nv30_fb.c b/drivers/gpu/drm/nouveau/nv30_fb.c index 4a3f2f095128..e0135f0e2144 100644 --- a/drivers/gpu/drm/nouveau/nv30_fb.c +++ b/drivers/gpu/drm/nouveau/nv30_fb.c @@ -29,6 +29,27 @@ #include "nouveau_drv.h" #include "nouveau_drm.h" +void +nv30_fb_init_tile_region(struct drm_device *dev, int i, uint32_t addr, + uint32_t size, uint32_t pitch, uint32_t flags) +{ + struct drm_nouveau_private *dev_priv = dev->dev_private; + struct nouveau_tile_reg *tile = &dev_priv->tile.reg[i]; + + tile->addr = addr | 1; + tile->limit = max(1u, addr + size) - 1; + tile->pitch = pitch; +} + +void +nv30_fb_free_tile_region(struct drm_device *dev, int i) +{ + struct drm_nouveau_private *dev_priv = dev->dev_private; + struct nouveau_tile_reg *tile = &dev_priv->tile.reg[i]; + + tile->addr = tile->limit = tile->pitch = 0; +} + static int calc_bias(struct drm_device *dev, int k, int i, int j) { @@ -65,7 +86,7 @@ nv30_fb_init(struct drm_device *dev) /* Turn all the tiling regions off. */ for (i = 0; i < pfb->num_tiles; i++) - pfb->set_region_tiling(dev, i, 0, 0, 0); + pfb->set_tile_region(dev, i); /* Init the memory timing regs at 0x10037c/0x1003ac */ if (dev_priv->chipset == 0x30 || diff --git a/drivers/gpu/drm/nouveau/nv40_fb.c b/drivers/gpu/drm/nouveau/nv40_fb.c index 3cd07d8d5bd7..f3d9c0505f7b 100644 --- a/drivers/gpu/drm/nouveau/nv40_fb.c +++ b/drivers/gpu/drm/nouveau/nv40_fb.c @@ -4,26 +4,22 @@ #include "nouveau_drm.h" void -nv40_fb_set_region_tiling(struct drm_device *dev, int i, uint32_t addr, - uint32_t size, uint32_t pitch) +nv40_fb_set_tile_region(struct drm_device *dev, int i) { struct drm_nouveau_private *dev_priv = dev->dev_private; - uint32_t limit = max(1u, addr + size) - 1; - - if (pitch) - addr |= 1; + struct nouveau_tile_reg *tile = &dev_priv->tile.reg[i]; switch (dev_priv->chipset) { case 0x40: - nv_wr32(dev, NV10_PFB_TLIMIT(i), limit); - nv_wr32(dev, NV10_PFB_TSIZE(i), pitch); - nv_wr32(dev, NV10_PFB_TILE(i), addr); + nv_wr32(dev, NV10_PFB_TLIMIT(i), tile->limit); + nv_wr32(dev, NV10_PFB_TSIZE(i), tile->pitch); + nv_wr32(dev, NV10_PFB_TILE(i), tile->addr); break; default: - nv_wr32(dev, NV40_PFB_TLIMIT(i), limit); - nv_wr32(dev, NV40_PFB_TSIZE(i), pitch); - nv_wr32(dev, NV40_PFB_TILE(i), addr); + nv_wr32(dev, NV40_PFB_TLIMIT(i), tile->limit); + nv_wr32(dev, NV40_PFB_TSIZE(i), tile->pitch); + nv_wr32(dev, NV40_PFB_TILE(i), tile->addr); break; } } @@ -64,7 +60,7 @@ nv40_fb_init(struct drm_device *dev) /* Turn all the tiling regions off. */ for (i = 0; i < pfb->num_tiles; i++) - pfb->set_region_tiling(dev, i, 0, 0, 0); + pfb->set_tile_region(dev, i); return 0; } diff --git a/drivers/gpu/drm/nouveau/nv40_graph.c b/drivers/gpu/drm/nouveau/nv40_graph.c index 159bdcd757d4..7a51608b55ba 100644 --- a/drivers/gpu/drm/nouveau/nv40_graph.c +++ b/drivers/gpu/drm/nouveau/nv40_graph.c @@ -192,43 +192,39 @@ nv40_graph_unload_context(struct drm_device *dev) } void -nv40_graph_set_region_tiling(struct drm_device *dev, int i, uint32_t addr, - uint32_t size, uint32_t pitch) +nv40_graph_set_tile_region(struct drm_device *dev, int i) { struct drm_nouveau_private *dev_priv = dev->dev_private; - uint32_t limit = max(1u, addr + size) - 1; - - if (pitch) - addr |= 1; + struct nouveau_tile_reg *tile = &dev_priv->tile.reg[i]; switch (dev_priv->chipset) { case 0x44: case 0x4a: case 0x4e: - nv_wr32(dev, NV20_PGRAPH_TSIZE(i), pitch); - nv_wr32(dev, NV20_PGRAPH_TLIMIT(i), limit); - nv_wr32(dev, NV20_PGRAPH_TILE(i), addr); + nv_wr32(dev, NV20_PGRAPH_TSIZE(i), tile->pitch); + nv_wr32(dev, NV20_PGRAPH_TLIMIT(i), tile->limit); + nv_wr32(dev, NV20_PGRAPH_TILE(i), tile->addr); break; case 0x46: case 0x47: case 0x49: case 0x4b: - nv_wr32(dev, NV47_PGRAPH_TSIZE(i), pitch); - nv_wr32(dev, NV47_PGRAPH_TLIMIT(i), limit); - nv_wr32(dev, NV47_PGRAPH_TILE(i), addr); - nv_wr32(dev, NV40_PGRAPH_TSIZE1(i), pitch); - nv_wr32(dev, NV40_PGRAPH_TLIMIT1(i), limit); - nv_wr32(dev, NV40_PGRAPH_TILE1(i), addr); + nv_wr32(dev, NV47_PGRAPH_TSIZE(i), tile->pitch); + nv_wr32(dev, NV47_PGRAPH_TLIMIT(i), tile->limit); + nv_wr32(dev, NV47_PGRAPH_TILE(i), tile->addr); + nv_wr32(dev, NV40_PGRAPH_TSIZE1(i), tile->pitch); + nv_wr32(dev, NV40_PGRAPH_TLIMIT1(i), tile->limit); + nv_wr32(dev, NV40_PGRAPH_TILE1(i), tile->addr); break; default: - nv_wr32(dev, NV20_PGRAPH_TSIZE(i), pitch); - nv_wr32(dev, NV20_PGRAPH_TLIMIT(i), limit); - nv_wr32(dev, NV20_PGRAPH_TILE(i), addr); - nv_wr32(dev, NV40_PGRAPH_TSIZE1(i), pitch); - nv_wr32(dev, NV40_PGRAPH_TLIMIT1(i), limit); - nv_wr32(dev, NV40_PGRAPH_TILE1(i), addr); + nv_wr32(dev, NV20_PGRAPH_TSIZE(i), tile->pitch); + nv_wr32(dev, NV20_PGRAPH_TLIMIT(i), tile->limit); + nv_wr32(dev, NV20_PGRAPH_TILE(i), tile->addr); + nv_wr32(dev, NV40_PGRAPH_TSIZE1(i), tile->pitch); + nv_wr32(dev, NV40_PGRAPH_TLIMIT1(i), tile->limit); + nv_wr32(dev, NV40_PGRAPH_TILE1(i), tile->addr); break; } } @@ -369,7 +365,7 @@ nv40_graph_init(struct drm_device *dev) /* Turn all the tiling regions off. */ for (i = 0; i < pfb->num_tiles; i++) - nv40_graph_set_region_tiling(dev, i, 0, 0, 0); + nv40_graph_set_tile_region(dev, i); /* begin RAM config */ vramsz = pci_resource_len(dev->pdev, 0) - 1; -- cgit v1.2.3 From 87a326a38589e1c919af5f86a59cd571ff0aa831 Mon Sep 17 00:00:00 2001 From: Francisco Jerez Date: Sun, 24 Oct 2010 16:36:12 +0200 Subject: drm/nv20: Add Z compression support. Signed-off-by: Francisco Jerez Tested-by: Xavier Chantry Signed-off-by: Ben Skeggs --- drivers/gpu/drm/nouveau/nouveau_drv.h | 3 ++ drivers/gpu/drm/nouveau/nouveau_reg.h | 6 +++ drivers/gpu/drm/nouveau/nv10_fb.c | 90 +++++++++++++++++++++++++++++++++-- drivers/gpu/drm/nouveau/nv20_graph.c | 21 ++++---- 4 files changed, 106 insertions(+), 14 deletions(-) diff --git a/drivers/gpu/drm/nouveau/nouveau_drv.h b/drivers/gpu/drm/nouveau/nouveau_drv.h index 8b524d894f18..a94430b94a20 100644 --- a/drivers/gpu/drm/nouveau/nouveau_drv.h +++ b/drivers/gpu/drm/nouveau/nouveau_drv.h @@ -70,6 +70,8 @@ struct nouveau_tile_reg { uint32_t addr; uint32_t limit; uint32_t pitch; + uint32_t zcomp; + struct drm_mm_node *tag_mem; struct nouveau_fence *fence; }; @@ -306,6 +308,7 @@ struct nouveau_timer_engine { struct nouveau_fb_engine { int num_tiles; + struct drm_mm tag_heap; int (*init)(struct drm_device *dev); void (*takedown)(struct drm_device *dev); diff --git a/drivers/gpu/drm/nouveau/nouveau_reg.h b/drivers/gpu/drm/nouveau/nouveau_reg.h index b6384d36d5d0..df3a87e792f2 100644 --- a/drivers/gpu/drm/nouveau/nouveau_reg.h +++ b/drivers/gpu/drm/nouveau/nouveau_reg.h @@ -45,6 +45,11 @@ # define NV04_PFB_REF_CMD_REFRESH (1 << 0) #define NV04_PFB_PRE 0x001002d4 # define NV04_PFB_PRE_CMD_PRECHARGE (1 << 0) +#define NV20_PFB_ZCOMP(i) (0x00100300 + 4*(i)) +# define NV20_PFB_ZCOMP_MODE_32 (4 << 24) +# define NV20_PFB_ZCOMP_EN (1 << 31) +# define NV25_PFB_ZCOMP_MODE_16 (1 << 20) +# define NV25_PFB_ZCOMP_MODE_32 (2 << 20) #define NV10_PFB_CLOSE_PAGE2 0x0010033c #define NV04_PFB_SCRAMBLE(i) (0x00100400 + 4 * (i)) #define NV40_PFB_TILE(i) (0x00100600 + (i*16)) @@ -379,6 +384,7 @@ #define NV20_PGRAPH_TLIMIT(i) (0x00400904 + (i*16)) #define NV20_PGRAPH_TSIZE(i) (0x00400908 + (i*16)) #define NV20_PGRAPH_TSTATUS(i) (0x0040090C + (i*16)) +#define NV20_PGRAPH_ZCOMP(i) (0x00400980 + 4*(i)) #define NV10_PGRAPH_TILE(i) (0x00400B00 + (i*16)) #define NV10_PGRAPH_TLIMIT(i) (0x00400B04 + (i*16)) #define NV10_PGRAPH_TSIZE(i) (0x00400B08 + (i*16)) diff --git a/drivers/gpu/drm/nouveau/nv10_fb.c b/drivers/gpu/drm/nouveau/nv10_fb.c index d50acc6a90d1..f78181a59b4a 100644 --- a/drivers/gpu/drm/nouveau/nv10_fb.c +++ b/drivers/gpu/drm/nouveau/nv10_fb.c @@ -3,21 +3,81 @@ #include "nouveau_drv.h" #include "nouveau_drm.h" +static struct drm_mm_node * +nv20_fb_alloc_tag(struct drm_device *dev, uint32_t size) +{ + struct drm_nouveau_private *dev_priv = dev->dev_private; + struct nouveau_fb_engine *pfb = &dev_priv->engine.fb; + struct drm_mm_node *mem; + int ret; + + ret = drm_mm_pre_get(&pfb->tag_heap); + if (ret) + return NULL; + + spin_lock(&dev_priv->tile.lock); + mem = drm_mm_search_free(&pfb->tag_heap, size, 0, 0); + if (mem) + mem = drm_mm_get_block_atomic(mem, size, 0); + spin_unlock(&dev_priv->tile.lock); + + return mem; +} + +static void +nv20_fb_free_tag(struct drm_device *dev, struct drm_mm_node *mem) +{ + struct drm_nouveau_private *dev_priv = dev->dev_private; + + spin_lock(&dev_priv->tile.lock); + drm_mm_put_block(mem); + spin_unlock(&dev_priv->tile.lock); +} + void nv10_fb_init_tile_region(struct drm_device *dev, int i, uint32_t addr, uint32_t size, uint32_t pitch, uint32_t flags) { struct drm_nouveau_private *dev_priv = dev->dev_private; struct nouveau_tile_reg *tile = &dev_priv->tile.reg[i]; + int bpp = (flags & NOUVEAU_GEM_TILE_32BPP ? 32 : 16); tile->addr = addr; tile->limit = max(1u, addr + size) - 1; tile->pitch = pitch; - if (dev_priv->card_type == NV_20) - tile->addr |= 1; - else + if (dev_priv->card_type == NV_20) { + if (flags & NOUVEAU_GEM_TILE_ZETA) { + /* + * Allocate some of the on-die tag memory, + * used to store Z compression meta-data (most + * likely just a bitmap determining if a given + * tile is compressed or not). + */ + tile->tag_mem = nv20_fb_alloc_tag(dev, size / 256); + + if (tile->tag_mem) { + /* Enable Z compression */ + if (dev_priv->chipset >= 0x25) + tile->zcomp = tile->tag_mem->start | + (bpp == 16 ? + NV25_PFB_ZCOMP_MODE_16 : + NV25_PFB_ZCOMP_MODE_32); + else + tile->zcomp = tile->tag_mem->start | + NV20_PFB_ZCOMP_EN | + (bpp == 16 ? 0 : + NV20_PFB_ZCOMP_MODE_32); + } + + tile->addr |= 3; + } else { + tile->addr |= 1; + } + + } else { tile->addr |= 1 << 31; + } } void @@ -26,7 +86,12 @@ nv10_fb_free_tile_region(struct drm_device *dev, int i) struct drm_nouveau_private *dev_priv = dev->dev_private; struct nouveau_tile_reg *tile = &dev_priv->tile.reg[i]; - tile->addr = tile->limit = tile->pitch = 0; + if (tile->tag_mem) { + nv20_fb_free_tag(dev, tile->tag_mem); + tile->tag_mem = NULL; + } + + tile->addr = tile->limit = tile->pitch = tile->zcomp = 0; } void @@ -38,6 +103,9 @@ nv10_fb_set_tile_region(struct drm_device *dev, int i) nv_wr32(dev, NV10_PFB_TLIMIT(i), tile->limit); nv_wr32(dev, NV10_PFB_TSIZE(i), tile->pitch); nv_wr32(dev, NV10_PFB_TILE(i), tile->addr); + + if (dev_priv->card_type == NV_20) + nv_wr32(dev, NV20_PFB_ZCOMP(i), tile->zcomp); } int @@ -49,6 +117,11 @@ nv10_fb_init(struct drm_device *dev) pfb->num_tiles = NV10_PFB_TILE__SIZE; + if (dev_priv->card_type == NV_20) + drm_mm_init(&pfb->tag_heap, 0, + (dev_priv->chipset >= 0x25 ? + 64 * 1024 : 32 * 1024)); + /* Turn all the tiling regions off. */ for (i = 0; i < pfb->num_tiles; i++) pfb->set_tile_region(dev, i); @@ -59,4 +132,13 @@ nv10_fb_init(struct drm_device *dev) void nv10_fb_takedown(struct drm_device *dev) { + struct drm_nouveau_private *dev_priv = dev->dev_private; + struct nouveau_fb_engine *pfb = &dev_priv->engine.fb; + int i; + + for (i = 0; i < pfb->num_tiles; i++) + pfb->free_tile_region(dev, i); + + if (dev_priv->card_type == NV_20) + drm_mm_takedown(&pfb->tag_heap); } diff --git a/drivers/gpu/drm/nouveau/nv20_graph.c b/drivers/gpu/drm/nouveau/nv20_graph.c index a71871b91c69..bd065c2fcba4 100644 --- a/drivers/gpu/drm/nouveau/nv20_graph.c +++ b/drivers/gpu/drm/nouveau/nv20_graph.c @@ -526,6 +526,12 @@ nv20_graph_set_tile_region(struct drm_device *dev, int i) nv_wr32(dev, NV10_PGRAPH_RDI_DATA, tile->pitch); nv_wr32(dev, NV10_PGRAPH_RDI_INDEX, 0x00EA0010 + 4 * i); nv_wr32(dev, NV10_PGRAPH_RDI_DATA, tile->addr); + + if (dev_priv->card_type == NV_20) { + nv_wr32(dev, NV20_PGRAPH_ZCOMP(i), tile->zcomp); + nv_wr32(dev, NV10_PGRAPH_RDI_INDEX, 0x00ea0090 + 4 * i); + nv_wr32(dev, NV10_PGRAPH_RDI_DATA, tile->zcomp); + } } int @@ -589,16 +595,17 @@ nv20_graph_init(struct drm_device *dev) nv_wr32(dev, 0x40009C , 0x00000040); if (dev_priv->chipset >= 0x25) { - nv_wr32(dev, 0x400890, 0x00080000); + nv_wr32(dev, 0x400890, 0x00a8cfff); nv_wr32(dev, 0x400610, 0x304B1FB6); - nv_wr32(dev, 0x400B80, 0x18B82880); + nv_wr32(dev, 0x400B80, 0x1cbd3883); nv_wr32(dev, 0x400B84, 0x44000000); nv_wr32(dev, 0x400098, 0x40000080); nv_wr32(dev, 0x400B88, 0x000000ff); + } else { - nv_wr32(dev, 0x400880, 0x00080000); /* 0x0008c7df */ + nv_wr32(dev, 0x400880, 0x0008c7df); nv_wr32(dev, 0x400094, 0x00000005); - nv_wr32(dev, 0x400B80, 0x45CAA208); /* 0x45eae20e */ + nv_wr32(dev, 0x400B80, 0x45eae20e); nv_wr32(dev, 0x400B84, 0x24000000); nv_wr32(dev, 0x400098, 0x00000040); nv_wr32(dev, NV10_PGRAPH_RDI_INDEX, 0x00E00038); @@ -611,12 +618,6 @@ nv20_graph_init(struct drm_device *dev) for (i = 0; i < NV10_PFB_TILE__SIZE; i++) nv20_graph_set_tile_region(dev, i); - for (i = 0; i < 8; i++) { - nv_wr32(dev, 0x400980 + i * 4, nv_rd32(dev, 0x100300 + i * 4)); - nv_wr32(dev, NV10_PGRAPH_RDI_INDEX, 0x00EA0090 + i * 4); - nv_wr32(dev, NV10_PGRAPH_RDI_DATA, - nv_rd32(dev, 0x100300 + i * 4)); - } nv_wr32(dev, 0x4009a0, nv_rd32(dev, 0x100324)); nv_wr32(dev, NV10_PGRAPH_RDI_INDEX, 0x00EA000C); nv_wr32(dev, NV10_PGRAPH_RDI_DATA, nv_rd32(dev, 0x100324)); -- cgit v1.2.3 From 23c45e8ed203f933753fb66a6290c4ff853eb7bb Mon Sep 17 00:00:00 2001 From: Francisco Jerez Date: Thu, 28 Oct 2010 23:10:29 +0200 Subject: drm/nouveau: Fix sleep while atomic in nouveau_bo_fence(). Reported-by: Pekka Paalanen Signed-off-by: Francisco Jerez Signed-off-by: Ben Skeggs --- drivers/gpu/drm/nouveau/nouveau_bo.c | 10 +++++++--- 1 file changed, 7 insertions(+), 3 deletions(-) diff --git a/drivers/gpu/drm/nouveau/nouveau_bo.c b/drivers/gpu/drm/nouveau/nouveau_bo.c index 8d5dd980240d..f4ee43db00aa 100644 --- a/drivers/gpu/drm/nouveau/nouveau_bo.c +++ b/drivers/gpu/drm/nouveau/nouveau_bo.c @@ -942,13 +942,17 @@ nouveau_ttm_fault_reserve_notify(struct ttm_buffer_object *bo) void nouveau_bo_fence(struct nouveau_bo *nvbo, struct nouveau_fence *fence) { - spin_lock(&nvbo->bo.bdev->fence_lock); - __nouveau_fence_unref(&nvbo->bo.sync_obj); + struct nouveau_fence *old_fence; if (likely(fence)) - nvbo->bo.sync_obj = nouveau_fence_ref(fence); + nouveau_fence_ref(fence); + spin_lock(&nvbo->bo.bdev->fence_lock); + old_fence = nvbo->bo.sync_obj; + nvbo->bo.sync_obj = fence; spin_unlock(&nvbo->bo.bdev->fence_lock); + + nouveau_fence_unref(&old_fence); } struct ttm_bo_driver nouveau_bo_driver = { -- cgit v1.2.3 From aa2c2e8039e3692065022a7ff885009b2d88818a Mon Sep 17 00:00:00 2001 From: Ben Skeggs Date: Thu, 4 Nov 2010 13:40:15 +1000 Subject: drm/nv50: fix compute object class Signed-off-by: Ben Skeggs --- drivers/gpu/drm/nouveau/nv50_graph.c | 10 ++++++++-- 1 file changed, 8 insertions(+), 2 deletions(-) diff --git a/drivers/gpu/drm/nouveau/nv50_graph.c b/drivers/gpu/drm/nouveau/nv50_graph.c index e0f52942b2eb..7a98d41c68a6 100644 --- a/drivers/gpu/drm/nouveau/nv50_graph.c +++ b/drivers/gpu/drm/nouveau/nv50_graph.c @@ -425,8 +425,6 @@ nv50_graph_register(struct drm_device *dev) NVOBJ_CLASS(dev, 0x0030, GR); /* null */ NVOBJ_CLASS(dev, 0x5039, GR); /* m2mf */ NVOBJ_CLASS(dev, 0x502d, GR); /* 2d */ - NVOBJ_CLASS(dev, 0x50c0, GR); /* compute */ - NVOBJ_CLASS(dev, 0x85c0, GR); /* compute (nva3, nva5, nva8) */ /* tesla */ if (dev_priv->chipset == 0x50) @@ -452,6 +450,14 @@ nv50_graph_register(struct drm_device *dev) } } + /* compute */ + if (dev_priv->chipset <= 0xa0 || + dev_priv->chipset == 0xaa || + dev_priv->chipset == 0xac) + NVOBJ_CLASS(dev, 0x50c0, GR); + else + NVOBJ_CLASS(dev, 0x85c0, GR); + dev_priv->engine.graph.registered = true; return 0; } -- cgit v1.2.3 From a169f09b96306cc353ffe0e1bc4bc0e1e9492281 Mon Sep 17 00:00:00 2001 From: Ben Skeggs Date: Fri, 5 Nov 2010 09:40:00 +1000 Subject: drm/nv50: 0x50c0 apparently works on NVA3+ too, so lets allow it Signed-off-by: Ben Skeggs --- drivers/gpu/drm/nouveau/nv50_graph.c | 9 ++++----- 1 file changed, 4 insertions(+), 5 deletions(-) diff --git a/drivers/gpu/drm/nouveau/nv50_graph.c b/drivers/gpu/drm/nouveau/nv50_graph.c index 7a98d41c68a6..6785269f778a 100644 --- a/drivers/gpu/drm/nouveau/nv50_graph.c +++ b/drivers/gpu/drm/nouveau/nv50_graph.c @@ -451,11 +451,10 @@ nv50_graph_register(struct drm_device *dev) } /* compute */ - if (dev_priv->chipset <= 0xa0 || - dev_priv->chipset == 0xaa || - dev_priv->chipset == 0xac) - NVOBJ_CLASS(dev, 0x50c0, GR); - else + NVOBJ_CLASS(dev, 0x50c0, GR); + if (dev_priv->chipset > 0xa0 && + dev_priv->chipset != 0xaa && + dev_priv->chipset != 0xac) NVOBJ_CLASS(dev, 0x85c0, GR); dev_priv->engine.graph.registered = true; -- cgit v1.2.3 From 8f8a54482b008714ccfad15f4592b3802b80d479 Mon Sep 17 00:00:00 2001 From: Ben Skeggs Date: Wed, 3 Nov 2010 09:57:28 +1000 Subject: drm/nouveau: allow irq handlers to be installed by engine-specific code Lets start to clean up this mess! Reviewed-by: Francisco Jerez Signed-off-by: Ben Skeggs --- drivers/gpu/drm/nouveau/nouveau_drv.h | 4 ++++ drivers/gpu/drm/nouveau/nouveau_irq.c | 34 +++++++++++++++++++++++++++++++++- 2 files changed, 37 insertions(+), 1 deletion(-) diff --git a/drivers/gpu/drm/nouveau/nouveau_drv.h b/drivers/gpu/drm/nouveau/nouveau_drv.h index a94430b94a20..52dc97d87ebd 100644 --- a/drivers/gpu/drm/nouveau/nouveau_drv.h +++ b/drivers/gpu/drm/nouveau/nouveau_drv.h @@ -614,6 +614,7 @@ struct drm_nouveau_private { struct nouveau_bo *vga_ram; /* interrupt handling */ + void (*irq_handler[32])(struct drm_device *); bool msi_enabled; struct workqueue_struct *wq; struct work_struct irq_work; @@ -900,6 +901,9 @@ extern int nouveau_ioctl_gpuobj_free(struct drm_device *, void *data, extern int nouveau_irq_init(struct drm_device *); extern void nouveau_irq_fini(struct drm_device *); extern irqreturn_t nouveau_irq_handler(DRM_IRQ_ARGS); +extern void nouveau_irq_register(struct drm_device *, int status_bit, + void (*)(struct drm_device *)); +extern void nouveau_irq_unregister(struct drm_device *, int status_bit); extern void nouveau_irq_preinstall(struct drm_device *); extern int nouveau_irq_postinstall(struct drm_device *); extern void nouveau_irq_uninstall(struct drm_device *); diff --git a/drivers/gpu/drm/nouveau/nouveau_irq.c b/drivers/gpu/drm/nouveau/nouveau_irq.c index f3ae74ef3318..819bc3dd89e0 100644 --- a/drivers/gpu/drm/nouveau/nouveau_irq.c +++ b/drivers/gpu/drm/nouveau/nouveau_irq.c @@ -1216,8 +1216,9 @@ nouveau_irq_handler(DRM_IRQ_ARGS) { struct drm_device *dev = (struct drm_device *)arg; struct drm_nouveau_private *dev_priv = dev->dev_private; - uint32_t status; unsigned long flags; + u32 status; + int i; status = nv_rd32(dev, NV03_PMC_INTR_0); if (!status) @@ -1267,6 +1268,14 @@ nouveau_irq_handler(DRM_IRQ_ARGS) NV_PMC_INTR_0_NV50_I2C_PENDING); } + for (i = 0; i < 32 && status; i++) { + if (!(status & (1 << i)) || !dev_priv->irq_handler[i]) + continue; + + dev_priv->irq_handler[i](dev); + status &= ~(1 << i); + } + if (status) NV_ERROR(dev, "Unhandled PMC INTR status bits 0x%08x\n", status); @@ -1304,3 +1313,26 @@ nouveau_irq_fini(struct drm_device *dev) if (dev_priv->msi_enabled) pci_disable_msi(dev->pdev); } + +void +nouveau_irq_register(struct drm_device *dev, int status_bit, + void (*handler)(struct drm_device *)) +{ + struct drm_nouveau_private *dev_priv = dev->dev_private; + unsigned long flags; + + spin_lock_irqsave(&dev_priv->context_switch_lock, flags); + dev_priv->irq_handler[status_bit] = handler; + spin_unlock_irqrestore(&dev_priv->context_switch_lock, flags); +} + +void +nouveau_irq_unregister(struct drm_device *dev, int status_bit) +{ + struct drm_nouveau_private *dev_priv = dev->dev_private; + unsigned long flags; + + spin_lock_irqsave(&dev_priv->context_switch_lock, flags); + dev_priv->irq_handler[status_bit] = NULL; + spin_unlock_irqrestore(&dev_priv->context_switch_lock, flags); +} -- cgit v1.2.3 From d7facf9dc50acff69de9688088caa78b3cf69ebb Mon Sep 17 00:00:00 2001 From: Ben Skeggs Date: Wed, 3 Nov 2010 10:06:43 +1000 Subject: drm/nv84: move PCRYPT ISR out of nouveau_irq.c Reviewed-by: Francisco Jerez Signed-off-by: Ben Skeggs --- drivers/gpu/drm/nouveau/Makefile | 2 +- drivers/gpu/drm/nouveau/nouveau_irq.c | 25 +---------------------- drivers/gpu/drm/nouveau/nouveau_util.c | 36 ++++++++++++++++++++++++++++++++++ drivers/gpu/drm/nouveau/nouveau_util.h | 33 +++++++++++++++++++++++++++++++ drivers/gpu/drm/nouveau/nv84_crypt.c | 27 +++++++++++++++++++++++++ 5 files changed, 98 insertions(+), 25 deletions(-) create mode 100644 drivers/gpu/drm/nouveau/nouveau_util.c create mode 100644 drivers/gpu/drm/nouveau/nouveau_util.h diff --git a/drivers/gpu/drm/nouveau/Makefile b/drivers/gpu/drm/nouveau/Makefile index c8c8de0bbc77..7ea9a1154ca8 100644 --- a/drivers/gpu/drm/nouveau/Makefile +++ b/drivers/gpu/drm/nouveau/Makefile @@ -5,7 +5,7 @@ ccflags-y := -Iinclude/drm nouveau-y := nouveau_drv.o nouveau_state.o nouveau_channel.o nouveau_mem.o \ nouveau_object.o nouveau_irq.o nouveau_notifier.o \ - nouveau_sgdma.o nouveau_dma.o \ + nouveau_sgdma.o nouveau_dma.o nouveau_util.o \ nouveau_bo.o nouveau_fence.o nouveau_gem.o nouveau_ttm.o \ nouveau_hw.o nouveau_calc.o nouveau_bios.o nouveau_i2c.o \ nouveau_display.o nouveau_connector.o nouveau_fbcon.o \ diff --git a/drivers/gpu/drm/nouveau/nouveau_irq.c b/drivers/gpu/drm/nouveau/nouveau_irq.c index 819bc3dd89e0..bdaf8ae44476 100644 --- a/drivers/gpu/drm/nouveau/nouveau_irq.c +++ b/drivers/gpu/drm/nouveau/nouveau_irq.c @@ -36,19 +36,12 @@ #include "nouveau_drv.h" #include "nouveau_reg.h" #include "nouveau_ramht.h" -#include +#include "nouveau_util.h" /* needed for hotplug irq */ #include "nouveau_connector.h" #include "nv50_display.h" -static DEFINE_RATELIMIT_STATE(nouveau_ratelimit_state, 3 * HZ, 20); - -static int nouveau_ratelimit(void) -{ - return __ratelimit(&nouveau_ratelimit_state); -} - void nouveau_irq_preinstall(struct drm_device *dev) { @@ -1240,22 +1233,6 @@ nouveau_irq_handler(DRM_IRQ_ARGS) status &= ~NV_PMC_INTR_0_PGRAPH_PENDING; } - if (status & 0x00004000) { - u32 stat = nv_rd32(dev, 0x102130); - u32 mthd = nv_rd32(dev, 0x102190); - u32 data = nv_rd32(dev, 0x102194); - u32 inst = nv_rd32(dev, 0x102188) & 0x7fffffff; - - NV_INFO(dev, "PCRYPT_INTR: 0x%08x 0x%08x 0x%08x 0x%08x\n", - stat, mthd, data, inst); - nv_wr32(dev, 0x102130, stat); - nv_wr32(dev, 0x10200c, 0x10); - - nv50_fb_vm_trap(dev, nouveau_ratelimit(), "PCRYPT"); - status &= ~0x00004000; - - } - if (status & NV_PMC_INTR_0_CRTCn_PENDING) { nouveau_crtc_irq_handler(dev, (status>>24)&3); status &= ~NV_PMC_INTR_0_CRTCn_PENDING; diff --git a/drivers/gpu/drm/nouveau/nouveau_util.c b/drivers/gpu/drm/nouveau/nouveau_util.c new file mode 100644 index 000000000000..e8b1eaaa212b --- /dev/null +++ b/drivers/gpu/drm/nouveau/nouveau_util.c @@ -0,0 +1,36 @@ +/* + * Copyright (C) 2010 Nouveau Project + * + * All Rights Reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining + * a copy of this software and associated documentation files (the + * "Software"), to deal in the Software without restriction, including + * without limitation the rights to use, copy, modify, merge, publish, + * distribute, sublicense, and/or sell copies of the Software, and to + * permit persons to whom the Software is furnished to do so, subject to + * the following conditions: + * + * The above copyright notice and this permission notice (including the + * next paragraph) shall be included in all copies or substantial + * portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. + * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE + * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION + * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION + * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + * + */ + +#include + +static DEFINE_RATELIMIT_STATE(nouveau_ratelimit_state, 3 * HZ, 20); + +int +nouveau_ratelimit(void) +{ + return __ratelimit(&nouveau_ratelimit_state); +} diff --git a/drivers/gpu/drm/nouveau/nouveau_util.h b/drivers/gpu/drm/nouveau/nouveau_util.h new file mode 100644 index 000000000000..9a7a7c18c99a --- /dev/null +++ b/drivers/gpu/drm/nouveau/nouveau_util.h @@ -0,0 +1,33 @@ +/* + * Copyright (C) 2010 Nouveau Project + * + * All Rights Reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining + * a copy of this software and associated documentation files (the + * "Software"), to deal in the Software without restriction, including + * without limitation the rights to use, copy, modify, merge, publish, + * distribute, sublicense, and/or sell copies of the Software, and to + * permit persons to whom the Software is furnished to do so, subject to + * the following conditions: + * + * The above copyright notice and this permission notice (including the + * next paragraph) shall be included in all copies or substantial + * portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. + * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE + * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION + * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION + * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + * + */ + +#ifndef __NOUVEAU_UTIL_H__ +#define __NOUVEAU_UTIL_H__ + +int nouveau_ratelimit(void); + +#endif diff --git a/drivers/gpu/drm/nouveau/nv84_crypt.c b/drivers/gpu/drm/nouveau/nv84_crypt.c index f988b1a9d1d7..1cda0240f55d 100644 --- a/drivers/gpu/drm/nouveau/nv84_crypt.c +++ b/drivers/gpu/drm/nouveau/nv84_crypt.c @@ -24,6 +24,9 @@ #include "drmP.h" #include "nouveau_drv.h" +#include "nouveau_util.h" + +static void nv84_crypt_isr(struct drm_device *); int nv84_crypt_create_context(struct nouveau_channel *chan) @@ -97,8 +100,11 @@ nv84_crypt_init(struct drm_device *dev) nv_mask(dev, 0x000200, 0x00004000, 0x00000000); nv_mask(dev, 0x000200, 0x00004000, 0x00004000); + + nouveau_irq_register(dev, 14, nv84_crypt_isr); nv_wr32(dev, 0x102130, 0xffffffff); nv_wr32(dev, 0x102140, 0xffffffbf); + nv_wr32(dev, 0x10200c, 0x00000010); return 0; } @@ -107,4 +113,25 @@ void nv84_crypt_fini(struct drm_device *dev) { nv_wr32(dev, 0x102140, 0x00000000); + nouveau_irq_unregister(dev, 14); +} + +static void +nv84_crypt_isr(struct drm_device *dev) +{ + u32 stat = nv_rd32(dev, 0x102130); + u32 mthd = nv_rd32(dev, 0x102190); + u32 data = nv_rd32(dev, 0x102194); + u32 inst = nv_rd32(dev, 0x102188) & 0x7fffffff; + int show = nouveau_ratelimit(); + + if (show) { + NV_INFO(dev, "PCRYPT_INTR: 0x%08x 0x%08x 0x%08x 0x%08x\n", + stat, mthd, data, inst); + } + + nv_wr32(dev, 0x102130, stat); + nv_wr32(dev, 0x10200c, 0x10); + + nv50_fb_vm_trap(dev, show, "PCRYPT"); } -- cgit v1.2.3 From 2cbd4c818578ef8f2e486dc77267ead1e503c637 Mon Sep 17 00:00:00 2001 From: Ben Skeggs Date: Wed, 3 Nov 2010 10:18:04 +1000 Subject: drm/nv50: move GPIO ISR to nv50_gpio.c Reviewed-by: Francisco Jerez Signed-off-by: Ben Skeggs --- drivers/gpu/drm/nouveau/nouveau_drv.h | 1 + drivers/gpu/drm/nouveau/nouveau_irq.c | 6 ++---- drivers/gpu/drm/nouveau/nouveau_state.c | 2 +- drivers/gpu/drm/nouveau/nv50_display.c | 19 ----------------- drivers/gpu/drm/nouveau/nv50_gpio.c | 36 +++++++++++++++++++++++++++++++++ 5 files changed, 40 insertions(+), 24 deletions(-) diff --git a/drivers/gpu/drm/nouveau/nouveau_drv.h b/drivers/gpu/drm/nouveau/nouveau_drv.h index 52dc97d87ebd..c0fad126eaa4 100644 --- a/drivers/gpu/drm/nouveau/nouveau_drv.h +++ b/drivers/gpu/drm/nouveau/nouveau_drv.h @@ -1359,6 +1359,7 @@ int nv10_gpio_set(struct drm_device *dev, enum dcb_gpio_tag tag, int state); /* nv50_gpio.c */ int nv50_gpio_init(struct drm_device *dev); +void nv50_gpio_fini(struct drm_device *dev); int nv50_gpio_get(struct drm_device *dev, enum dcb_gpio_tag tag); int nv50_gpio_set(struct drm_device *dev, enum dcb_gpio_tag tag, int state); void nv50_gpio_irq_enable(struct drm_device *, enum dcb_gpio_tag, bool on); diff --git a/drivers/gpu/drm/nouveau/nouveau_irq.c b/drivers/gpu/drm/nouveau/nouveau_irq.c index bdaf8ae44476..061bae33b6e0 100644 --- a/drivers/gpu/drm/nouveau/nouveau_irq.c +++ b/drivers/gpu/drm/nouveau/nouveau_irq.c @@ -1238,11 +1238,9 @@ nouveau_irq_handler(DRM_IRQ_ARGS) status &= ~NV_PMC_INTR_0_CRTCn_PENDING; } - if (status & (NV_PMC_INTR_0_NV50_DISPLAY_PENDING | - NV_PMC_INTR_0_NV50_I2C_PENDING)) { + if (status & NV_PMC_INTR_0_NV50_DISPLAY_PENDING) { nv50_display_irq_handler(dev); - status &= ~(NV_PMC_INTR_0_NV50_DISPLAY_PENDING | - NV_PMC_INTR_0_NV50_I2C_PENDING); + status &= ~NV_PMC_INTR_0_NV50_DISPLAY_PENDING; } for (i = 0; i < 32 && status; i++) { diff --git a/drivers/gpu/drm/nouveau/nouveau_state.c b/drivers/gpu/drm/nouveau/nouveau_state.c index 1a7a50ccb7c8..84bff459491e 100644 --- a/drivers/gpu/drm/nouveau/nouveau_state.c +++ b/drivers/gpu/drm/nouveau/nouveau_state.c @@ -393,7 +393,7 @@ static int nouveau_init_engine_ptrs(struct drm_device *dev) engine->display.init = nv50_display_init; engine->display.destroy = nv50_display_destroy; engine->gpio.init = nv50_gpio_init; - engine->gpio.takedown = nouveau_stub_takedown; + engine->gpio.takedown = nv50_gpio_fini; engine->gpio.get = nv50_gpio_get; engine->gpio.set = nv50_gpio_set; engine->gpio.irq_enable = nv50_gpio_irq_enable; diff --git a/drivers/gpu/drm/nouveau/nv50_display.c b/drivers/gpu/drm/nouveau/nv50_display.c index 41b212801870..42cb5b5c73c0 100644 --- a/drivers/gpu/drm/nouveau/nv50_display.c +++ b/drivers/gpu/drm/nouveau/nv50_display.c @@ -869,25 +869,6 @@ nv50_display_irq_handler(struct drm_device *dev) struct drm_nouveau_private *dev_priv = dev->dev_private; uint32_t delayed = 0; - if (nv_rd32(dev, NV50_PMC_INTR_0) & NV50_PMC_INTR_0_HOTPLUG) { - uint32_t hpd0_bits, hpd1_bits = 0; - - hpd0_bits = nv_rd32(dev, 0xe054); - nv_wr32(dev, 0xe054, hpd0_bits); - - if (dev_priv->chipset >= 0x90) { - hpd1_bits = nv_rd32(dev, 0xe074); - nv_wr32(dev, 0xe074, hpd1_bits); - } - - spin_lock(&dev_priv->hpd_state.lock); - dev_priv->hpd_state.hpd0_bits |= hpd0_bits; - dev_priv->hpd_state.hpd1_bits |= hpd1_bits; - spin_unlock(&dev_priv->hpd_state.lock); - - queue_work(dev_priv->wq, &dev_priv->hpd_work); - } - while (nv_rd32(dev, NV50_PMC_INTR_0) & NV50_PMC_INTR_0_DISPLAY) { uint32_t intr0 = nv_rd32(dev, NV50_PDISPLAY_INTR_0); uint32_t intr1 = nv_rd32(dev, NV50_PDISPLAY_INTR_1); diff --git a/drivers/gpu/drm/nouveau/nv50_gpio.c b/drivers/gpu/drm/nouveau/nv50_gpio.c index b2fab2bf3d61..302f7ebe5b6f 100644 --- a/drivers/gpu/drm/nouveau/nv50_gpio.c +++ b/drivers/gpu/drm/nouveau/nv50_gpio.c @@ -26,6 +26,8 @@ #include "nouveau_drv.h" #include "nouveau_hw.h" +static void nv50_gpio_isr(struct drm_device *dev); + static int nv50_gpio_location(struct dcb_gpio_entry *gpio, uint32_t *reg, uint32_t *shift) { @@ -107,5 +109,39 @@ nv50_gpio_init(struct drm_device *dev) nv_wr32(dev, 0xe074, 0xffffffff); } + nouveau_irq_register(dev, 21, nv50_gpio_isr); return 0; } + +void +nv50_gpio_fini(struct drm_device *dev) +{ + struct drm_nouveau_private *dev_priv = dev->dev_private; + + nv_wr32(dev, 0xe050, 0x00000000); + if (dev_priv->chipset >= 0x90) + nv_wr32(dev, 0xe070, 0x00000000); + nouveau_irq_unregister(dev, 21); +} + +static void +nv50_gpio_isr(struct drm_device *dev) +{ + struct drm_nouveau_private *dev_priv = dev->dev_private; + uint32_t hpd0_bits, hpd1_bits = 0; + + hpd0_bits = nv_rd32(dev, 0xe054); + nv_wr32(dev, 0xe054, hpd0_bits); + + if (dev_priv->chipset >= 0x90) { + hpd1_bits = nv_rd32(dev, 0xe074); + nv_wr32(dev, 0xe074, hpd1_bits); + } + + spin_lock(&dev_priv->hpd_state.lock); + dev_priv->hpd_state.hpd0_bits |= hpd0_bits; + dev_priv->hpd_state.hpd1_bits |= hpd1_bits; + spin_unlock(&dev_priv->hpd_state.lock); + + queue_work(dev_priv->wq, &dev_priv->hpd_work); +} -- cgit v1.2.3 From 19b7fc7bf59f4bf02ee738a79baaccae31220df3 Mon Sep 17 00:00:00 2001 From: Ben Skeggs Date: Wed, 3 Nov 2010 10:27:27 +1000 Subject: drm/nv50: use register/unregister functionality for PDISPLAY ISR Reviewed-by: Francisco Jerez Signed-off-by: Ben Skeggs --- drivers/gpu/drm/nouveau/nouveau_irq.c | 16 +--------------- drivers/gpu/drm/nouveau/nv50_display.c | 10 ++++++++-- drivers/gpu/drm/nouveau/nv50_display.h | 1 - drivers/gpu/drm/nouveau/nv50_gpio.c | 4 ++++ 4 files changed, 13 insertions(+), 18 deletions(-) diff --git a/drivers/gpu/drm/nouveau/nouveau_irq.c b/drivers/gpu/drm/nouveau/nouveau_irq.c index 061bae33b6e0..2f546884f2ed 100644 --- a/drivers/gpu/drm/nouveau/nouveau_irq.c +++ b/drivers/gpu/drm/nouveau/nouveau_irq.c @@ -38,10 +38,6 @@ #include "nouveau_ramht.h" #include "nouveau_util.h" -/* needed for hotplug irq */ -#include "nouveau_connector.h" -#include "nv50_display.h" - void nouveau_irq_preinstall(struct drm_device *dev) { @@ -50,12 +46,7 @@ nouveau_irq_preinstall(struct drm_device *dev) /* Master disable */ nv_wr32(dev, NV03_PMC_INTR_EN_0, 0); - if (dev_priv->card_type >= NV_50) { - INIT_WORK(&dev_priv->irq_work, nv50_display_irq_handler_bh); - INIT_WORK(&dev_priv->hpd_work, nv50_display_irq_hotplug_bh); - spin_lock_init(&dev_priv->hpd_state.lock); - INIT_LIST_HEAD(&dev_priv->vbl_waiting); - } + INIT_LIST_HEAD(&dev_priv->vbl_waiting); } int @@ -1238,11 +1229,6 @@ nouveau_irq_handler(DRM_IRQ_ARGS) status &= ~NV_PMC_INTR_0_CRTCn_PENDING; } - if (status & NV_PMC_INTR_0_NV50_DISPLAY_PENDING) { - nv50_display_irq_handler(dev); - status &= ~NV_PMC_INTR_0_NV50_DISPLAY_PENDING; - } - for (i = 0; i < 32 && status; i++) { if (!(status & (1 << i)) || !dev_priv->irq_handler[i]) continue; diff --git a/drivers/gpu/drm/nouveau/nv50_display.c b/drivers/gpu/drm/nouveau/nv50_display.c index 42cb5b5c73c0..e5dbd171672c 100644 --- a/drivers/gpu/drm/nouveau/nv50_display.c +++ b/drivers/gpu/drm/nouveau/nv50_display.c @@ -33,6 +33,8 @@ #include "nouveau_ramht.h" #include "drm_crtc_helper.h" +static void nv50_display_isr(struct drm_device *); + static inline int nv50_sor_nr(struct drm_device *dev) { @@ -328,6 +330,9 @@ int nv50_display_create(struct drm_device *dev) } } + INIT_WORK(&dev_priv->irq_work, nv50_display_irq_handler_bh); + nouveau_irq_register(dev, 26, nv50_display_isr); + ret = nv50_display_init(dev); if (ret) { nv50_display_destroy(dev); @@ -345,6 +350,7 @@ nv50_display_destroy(struct drm_device *dev) drm_mode_config_cleanup(dev); nv50_display_disable(dev); + nouveau_irq_unregister(dev, 26); } static u16 @@ -863,8 +869,8 @@ nv50_display_irq_hotplug_bh(struct work_struct *work) drm_helper_hpd_irq_event(dev); } -void -nv50_display_irq_handler(struct drm_device *dev) +static void +nv50_display_isr(struct drm_device *dev) { struct drm_nouveau_private *dev_priv = dev->dev_private; uint32_t delayed = 0; diff --git a/drivers/gpu/drm/nouveau/nv50_display.h b/drivers/gpu/drm/nouveau/nv50_display.h index c551f0b85ee0..a269fccf3555 100644 --- a/drivers/gpu/drm/nouveau/nv50_display.h +++ b/drivers/gpu/drm/nouveau/nv50_display.h @@ -35,7 +35,6 @@ #include "nouveau_crtc.h" #include "nv50_evo.h" -void nv50_display_irq_handler(struct drm_device *dev); void nv50_display_irq_handler_bh(struct work_struct *work); void nv50_display_irq_hotplug_bh(struct work_struct *work); int nv50_display_early_init(struct drm_device *dev); diff --git a/drivers/gpu/drm/nouveau/nv50_gpio.c b/drivers/gpu/drm/nouveau/nv50_gpio.c index 302f7ebe5b6f..87266d1b5def 100644 --- a/drivers/gpu/drm/nouveau/nv50_gpio.c +++ b/drivers/gpu/drm/nouveau/nv50_gpio.c @@ -26,6 +26,8 @@ #include "nouveau_drv.h" #include "nouveau_hw.h" +#include "nv50_display.h" + static void nv50_gpio_isr(struct drm_device *dev); static int @@ -109,6 +111,8 @@ nv50_gpio_init(struct drm_device *dev) nv_wr32(dev, 0xe074, 0xffffffff); } + INIT_WORK(&dev_priv->hpd_work, nv50_display_irq_hotplug_bh); + spin_lock_init(&dev_priv->hpd_state.lock); nouveau_irq_register(dev, 21, nv50_gpio_isr); return 0; } -- cgit v1.2.3 From 8cbe71a6e70b5439ae60bd542231c4b8878a8f1c Mon Sep 17 00:00:00 2001 From: Ben Skeggs Date: Wed, 3 Nov 2010 10:45:48 +1000 Subject: drm/nouveau: move bitfield/enum helpers to nouveau_util.c Reviewed-by: Francisco Jerez Signed-off-by: Ben Skeggs --- drivers/gpu/drm/nouveau/nouveau_irq.c | 86 +++++++--------------------------- drivers/gpu/drm/nouveau/nouveau_util.c | 33 +++++++++++++ drivers/gpu/drm/nouveau/nouveau_util.h | 12 +++++ 3 files changed, 63 insertions(+), 68 deletions(-) diff --git a/drivers/gpu/drm/nouveau/nouveau_irq.c b/drivers/gpu/drm/nouveau/nouveau_irq.c index 2f546884f2ed..e8a3c400f741 100644 --- a/drivers/gpu/drm/nouveau/nouveau_irq.c +++ b/drivers/gpu/drm/nouveau/nouveau_irq.c @@ -267,28 +267,25 @@ nouveau_fifo_irq_handler(struct drm_device *dev) nv_wr32(dev, NV03_PMC_INTR_0, NV_PMC_INTR_0_PFIFO_PENDING); } -struct nouveau_bitfield_names { - uint32_t mask; - const char *name; -}; - -static struct nouveau_bitfield_names nstatus_names[] = +static struct nouveau_bitfield nstatus_names[] = { { NV04_PGRAPH_NSTATUS_STATE_IN_USE, "STATE_IN_USE" }, { NV04_PGRAPH_NSTATUS_INVALID_STATE, "INVALID_STATE" }, { NV04_PGRAPH_NSTATUS_BAD_ARGUMENT, "BAD_ARGUMENT" }, - { NV04_PGRAPH_NSTATUS_PROTECTION_FAULT, "PROTECTION_FAULT" } + { NV04_PGRAPH_NSTATUS_PROTECTION_FAULT, "PROTECTION_FAULT" }, + {} }; -static struct nouveau_bitfield_names nstatus_names_nv10[] = +static struct nouveau_bitfield nstatus_names_nv10[] = { { NV10_PGRAPH_NSTATUS_STATE_IN_USE, "STATE_IN_USE" }, { NV10_PGRAPH_NSTATUS_INVALID_STATE, "INVALID_STATE" }, { NV10_PGRAPH_NSTATUS_BAD_ARGUMENT, "BAD_ARGUMENT" }, - { NV10_PGRAPH_NSTATUS_PROTECTION_FAULT, "PROTECTION_FAULT" } + { NV10_PGRAPH_NSTATUS_PROTECTION_FAULT, "PROTECTION_FAULT" }, + {} }; -static struct nouveau_bitfield_names nsource_names[] = +static struct nouveau_bitfield nsource_names[] = { { NV03_PGRAPH_NSOURCE_NOTIFICATION, "NOTIFICATION" }, { NV03_PGRAPH_NSOURCE_DATA_ERROR, "DATA_ERROR" }, @@ -309,57 +306,9 @@ static struct nouveau_bitfield_names nsource_names[] = { NV03_PGRAPH_NSOURCE_DMA_VTX_PROTECTION, "DMA_VTX_PROTECTION" }, { NV03_PGRAPH_NSOURCE_DMA_WIDTH_A, "DMA_WIDTH_A" }, { NV03_PGRAPH_NSOURCE_DMA_WIDTH_B, "DMA_WIDTH_B" }, + {} }; -static void -nouveau_print_bitfield_names_(uint32_t value, - const struct nouveau_bitfield_names *namelist, - const int namelist_len) -{ - /* - * Caller must have already printed the KERN_* log level for us. - * Also the caller is responsible for adding the newline. - */ - int i; - for (i = 0; i < namelist_len; ++i) { - uint32_t mask = namelist[i].mask; - if (value & mask) { - printk(" %s", namelist[i].name); - value &= ~mask; - } - } - if (value) - printk(" (unknown bits 0x%08x)", value); -} -#define nouveau_print_bitfield_names(val, namelist) \ - nouveau_print_bitfield_names_((val), (namelist), ARRAY_SIZE(namelist)) - -struct nouveau_enum_names { - uint32_t value; - const char *name; -}; - -static void -nouveau_print_enum_names_(uint32_t value, - const struct nouveau_enum_names *namelist, - const int namelist_len) -{ - /* - * Caller must have already printed the KERN_* log level for us. - * Also the caller is responsible for adding the newline. - */ - int i; - for (i = 0; i < namelist_len; ++i) { - if (value == namelist[i].value) { - printk("%s", namelist[i].name); - return; - } - } - printk("unknown value 0x%08x", value); -} -#define nouveau_print_enum_names(val, namelist) \ - nouveau_print_enum_names_((val), (namelist), ARRAY_SIZE(namelist)) - static int nouveau_graph_chid_from_grctx(struct drm_device *dev) { @@ -482,12 +431,12 @@ nouveau_graph_dump_trap_info(struct drm_device *dev, const char *id, if (dev_priv->card_type < NV_50) { NV_INFO(dev, "%s - nSource:", id); - nouveau_print_bitfield_names(nsource, nsource_names); + nouveau_bitfield_print(nsource_names, nsource); printk(", nStatus:"); if (dev_priv->card_type < NV_10) - nouveau_print_bitfield_names(nstatus, nstatus_names); + nouveau_bitfield_print(nstatus_names, nstatus); else - nouveau_print_bitfield_names(nstatus, nstatus_names_nv10); + nouveau_bitfield_print(nstatus_names_nv10, nstatus); printk("\n"); } @@ -631,13 +580,14 @@ nouveau_pgraph_irq_handler(struct drm_device *dev) nv_wr32(dev, NV03_PMC_INTR_0, NV_PMC_INTR_0_PGRAPH_PENDING); } -static struct nouveau_enum_names nv50_mp_exec_error_names[] = +static struct nouveau_enum nv50_mp_exec_error_names[] = { { 3, "STACK_UNDERFLOW" }, { 4, "QUADON_ACTIVE" }, { 8, "TIMEOUT" }, { 0x10, "INVALID_OPCODE" }, { 0x40, "BREAKPOINT" }, + {} }; static void @@ -666,8 +616,7 @@ nv50_pgraph_mp_trap(struct drm_device *dev, int tpid, int display) ophigh= nv_rd32(dev, addr + 0x74); NV_INFO(dev, "PGRAPH_TRAP_MP_EXEC - " "TP %d MP %d: ", tpid, i); - nouveau_print_enum_names(status, - nv50_mp_exec_error_names); + nouveau_enum_print(nv50_mp_exec_error_names, status); printk(" at %06x warp %d, opcode %08x %08x\n", pc&0xffffff, pc >> 24, oplow, ophigh); @@ -1020,7 +969,7 @@ nv50_pgraph_trap_handler(struct drm_device *dev) } /* There must be a *lot* of these. Will take some time to gather them up. */ -static struct nouveau_enum_names nv50_data_error_names[] = +static struct nouveau_enum nv50_data_error_names[] = { { 4, "INVALID_VALUE" }, { 5, "INVALID_ENUM" }, @@ -1028,6 +977,7 @@ static struct nouveau_enum_names nv50_data_error_names[] = { 0xc, "INVALID_BITFIELD" }, { 0x28, "MP_NO_REG_SPACE" }, { 0x2b, "MP_BLOCK_SIZE_MISMATCH" }, + {} }; static void @@ -1126,8 +1076,8 @@ nv50_pgraph_irq_handler(struct drm_device *dev) nouveau_graph_dump_trap_info(dev, "PGRAPH_DATA_ERROR", &trap); NV_INFO (dev, "PGRAPH_DATA_ERROR - "); - nouveau_print_enum_names(nv_rd32(dev, 0x400110), - nv50_data_error_names); + nouveau_enum_print(nv50_data_error_names, + nv_rd32(dev, 0x400110)); printk("\n"); } status &= ~0x00100000; diff --git a/drivers/gpu/drm/nouveau/nouveau_util.c b/drivers/gpu/drm/nouveau/nouveau_util.c index e8b1eaaa212b..fbe0fb13bc1e 100644 --- a/drivers/gpu/drm/nouveau/nouveau_util.c +++ b/drivers/gpu/drm/nouveau/nouveau_util.c @@ -27,8 +27,41 @@ #include +#include "nouveau_util.h" + static DEFINE_RATELIMIT_STATE(nouveau_ratelimit_state, 3 * HZ, 20); +void +nouveau_bitfield_print(const struct nouveau_bitfield *bf, u32 value) +{ + while (bf->name) { + if (value & bf->mask) { + printk(" %s", bf->name); + value &= ~bf->mask; + } + + bf++; + } + + if (value) + printk(" (unknown bits 0x%08x)", value); +} + +void +nouveau_enum_print(const struct nouveau_enum *en, u32 value) +{ + while (en->name) { + if (value == en->value) { + printk("%s", en->name); + return; + } + + en++; + } + + printk("(unknown enum 0x%08x)", value); +} + int nouveau_ratelimit(void) { diff --git a/drivers/gpu/drm/nouveau/nouveau_util.h b/drivers/gpu/drm/nouveau/nouveau_util.h index 9a7a7c18c99a..d9ceaea26f4b 100644 --- a/drivers/gpu/drm/nouveau/nouveau_util.h +++ b/drivers/gpu/drm/nouveau/nouveau_util.h @@ -28,6 +28,18 @@ #ifndef __NOUVEAU_UTIL_H__ #define __NOUVEAU_UTIL_H__ +struct nouveau_bitfield { + u32 mask; + const char *name; +}; + +struct nouveau_enum { + u32 value; + const char *name; +}; + +void nouveau_bitfield_print(const struct nouveau_bitfield *, u32 value); +void nouveau_enum_print(const struct nouveau_enum *, u32 value); int nouveau_ratelimit(void); #endif -- cgit v1.2.3 From 25b85783da8c71e577c676173e9d60a1b7e6113a Mon Sep 17 00:00:00 2001 From: Ben Skeggs Date: Wed, 3 Nov 2010 11:36:09 +1000 Subject: drm/nv04-nv40: register vblank isr Reviewed-by: Francisco Jerez Signed-off-by: Ben Skeggs --- drivers/gpu/drm/nouveau/nouveau_irq.c | 19 ------------------- drivers/gpu/drm/nouveau/nv04_display.c | 18 ++++++++++++++++++ 2 files changed, 18 insertions(+), 19 deletions(-) diff --git a/drivers/gpu/drm/nouveau/nouveau_irq.c b/drivers/gpu/drm/nouveau/nouveau_irq.c index e8a3c400f741..6c30669ac0b6 100644 --- a/drivers/gpu/drm/nouveau/nouveau_irq.c +++ b/drivers/gpu/drm/nouveau/nouveau_irq.c @@ -1131,20 +1131,6 @@ nv50_pgraph_irq_handler(struct drm_device *dev) nv_wr32(dev, 0x400824, nv_rd32(dev, 0x400824) & ~(1 << 31)); } -static void -nouveau_crtc_irq_handler(struct drm_device *dev, int crtc) -{ - if (crtc & 1) { - nv_wr32(dev, NV_CRTC0_INTSTAT, NV_CRTC_INTR_VBLANK); - drm_handle_vblank(dev, 0); - } - - if (crtc & 2) { - nv_wr32(dev, NV_CRTC1_INTSTAT, NV_CRTC_INTR_VBLANK); - drm_handle_vblank(dev, 1); - } -} - irqreturn_t nouveau_irq_handler(DRM_IRQ_ARGS) { @@ -1174,11 +1160,6 @@ nouveau_irq_handler(DRM_IRQ_ARGS) status &= ~NV_PMC_INTR_0_PGRAPH_PENDING; } - if (status & NV_PMC_INTR_0_CRTCn_PENDING) { - nouveau_crtc_irq_handler(dev, (status>>24)&3); - status &= ~NV_PMC_INTR_0_CRTCn_PENDING; - } - for (i = 0; i < 32 && status; i++) { if (!(status & (1 << i)) || !dev_priv->irq_handler[i]) continue; diff --git a/drivers/gpu/drm/nouveau/nv04_display.c b/drivers/gpu/drm/nouveau/nv04_display.c index 9e28cf772e3c..0978a5b326dc 100644 --- a/drivers/gpu/drm/nouveau/nv04_display.c +++ b/drivers/gpu/drm/nouveau/nv04_display.c @@ -32,6 +32,9 @@ #include "nouveau_encoder.h" #include "nouveau_connector.h" +static void nv04_vblank_crtc0_isr(struct drm_device *); +static void nv04_vblank_crtc1_isr(struct drm_device *); + static void nv04_display_store_initial_head_owner(struct drm_device *dev) { @@ -197,6 +200,8 @@ nv04_display_create(struct drm_device *dev) func->save(encoder); } + nouveau_irq_register(dev, 24, nv04_vblank_crtc0_isr); + nouveau_irq_register(dev, 25, nv04_vblank_crtc1_isr); return 0; } @@ -258,3 +263,16 @@ nv04_display_init(struct drm_device *dev) return 0; } +static void +nv04_vblank_crtc0_isr(struct drm_device *dev) +{ + nv_wr32(dev, NV_CRTC0_INTSTAT, NV_CRTC_INTR_VBLANK); + drm_handle_vblank(dev, 0); +} + +static void +nv04_vblank_crtc1_isr(struct drm_device *dev) +{ + nv_wr32(dev, NV_CRTC1_INTSTAT, NV_CRTC_INTR_VBLANK); + drm_handle_vblank(dev, 1); +} -- cgit v1.2.3 From 5178d40dff23b5eef7f0a3be2411fa6a347e750d Mon Sep 17 00:00:00 2001 From: Ben Skeggs Date: Wed, 3 Nov 2010 10:56:05 +1000 Subject: drm/nouveau: move PFIFO ISR into nv04_fifo.c Reviewed-by: Francisco Jerez Signed-off-by: Ben Skeggs --- drivers/gpu/drm/nouveau/nouveau_drv.h | 2 + drivers/gpu/drm/nouveau/nouveau_irq.c | 203 ------------------------------- drivers/gpu/drm/nouveau/nouveau_state.c | 10 +- drivers/gpu/drm/nouveau/nv04_fifo.c | 206 ++++++++++++++++++++++++++++++++ drivers/gpu/drm/nouveau/nv10_fifo.c | 1 + drivers/gpu/drm/nouveau/nv40_fifo.c | 1 + drivers/gpu/drm/nouveau/nv50_fifo.c | 4 + 7 files changed, 219 insertions(+), 208 deletions(-) diff --git a/drivers/gpu/drm/nouveau/nouveau_drv.h b/drivers/gpu/drm/nouveau/nouveau_drv.h index c0fad126eaa4..e174479ab675 100644 --- a/drivers/gpu/drm/nouveau/nouveau_drv.h +++ b/drivers/gpu/drm/nouveau/nouveau_drv.h @@ -1053,6 +1053,7 @@ extern void nvc0_fb_takedown(struct drm_device *); /* nv04_fifo.c */ extern int nv04_fifo_init(struct drm_device *); +extern void nv04_fifo_fini(struct drm_device *); extern void nv04_fifo_disable(struct drm_device *); extern void nv04_fifo_enable(struct drm_device *); extern bool nv04_fifo_reassign(struct drm_device *, bool); @@ -1062,6 +1063,7 @@ extern int nv04_fifo_create_context(struct nouveau_channel *); extern void nv04_fifo_destroy_context(struct nouveau_channel *); extern int nv04_fifo_load_context(struct nouveau_channel *); extern int nv04_fifo_unload_context(struct drm_device *); +extern void nv04_fifo_isr(struct drm_device *); /* nv10_fifo.c */ extern int nv10_fifo_init(struct drm_device *); diff --git a/drivers/gpu/drm/nouveau/nouveau_irq.c b/drivers/gpu/drm/nouveau/nouveau_irq.c index 6c30669ac0b6..16f42f774a9e 100644 --- a/drivers/gpu/drm/nouveau/nouveau_irq.c +++ b/drivers/gpu/drm/nouveau/nouveau_irq.c @@ -69,204 +69,6 @@ nouveau_irq_uninstall(struct drm_device *dev) nv_wr32(dev, NV03_PMC_INTR_EN_0, 0); } -static bool -nouveau_fifo_swmthd(struct drm_device *dev, u32 chid, u32 addr, u32 data) -{ - struct drm_nouveau_private *dev_priv = dev->dev_private; - struct nouveau_channel *chan = NULL; - struct nouveau_gpuobj *obj; - unsigned long flags; - const int subc = (addr >> 13) & 0x7; - const int mthd = addr & 0x1ffc; - bool handled = false; - u32 engine; - - spin_lock_irqsave(&dev_priv->channels.lock, flags); - if (likely(chid >= 0 && chid < dev_priv->engine.fifo.channels)) - chan = dev_priv->channels.ptr[chid]; - if (unlikely(!chan)) - goto out; - - switch (mthd) { - case 0x0000: /* bind object to subchannel */ - obj = nouveau_ramht_find(chan, data); - if (unlikely(!obj || obj->engine != NVOBJ_ENGINE_SW)) - break; - - chan->sw_subchannel[subc] = obj->class; - engine = 0x0000000f << (subc * 4); - - nv_mask(dev, NV04_PFIFO_CACHE1_ENGINE, engine, 0x00000000); - handled = true; - break; - default: - engine = nv_rd32(dev, NV04_PFIFO_CACHE1_ENGINE); - if (unlikely(((engine >> (subc * 4)) & 0xf) != 0)) - break; - - if (!nouveau_gpuobj_mthd_call(chan, chan->sw_subchannel[subc], - mthd, data)) - handled = true; - break; - } - -out: - spin_unlock_irqrestore(&dev_priv->channels.lock, flags); - return handled; -} - -static void -nouveau_fifo_irq_handler(struct drm_device *dev) -{ - struct drm_nouveau_private *dev_priv = dev->dev_private; - struct nouveau_engine *engine = &dev_priv->engine; - uint32_t status, reassign; - int cnt = 0; - - reassign = nv_rd32(dev, NV03_PFIFO_CACHES) & 1; - while ((status = nv_rd32(dev, NV03_PFIFO_INTR_0)) && (cnt++ < 100)) { - uint32_t chid, get; - - nv_wr32(dev, NV03_PFIFO_CACHES, 0); - - chid = engine->fifo.channel_id(dev); - get = nv_rd32(dev, NV03_PFIFO_CACHE1_GET); - - if (status & NV_PFIFO_INTR_CACHE_ERROR) { - uint32_t mthd, data; - int ptr; - - /* NV_PFIFO_CACHE1_GET actually goes to 0xffc before - * wrapping on my G80 chips, but CACHE1 isn't big - * enough for this much data.. Tests show that it - * wraps around to the start at GET=0x800.. No clue - * as to why.. - */ - ptr = (get & 0x7ff) >> 2; - - if (dev_priv->card_type < NV_40) { - mthd = nv_rd32(dev, - NV04_PFIFO_CACHE1_METHOD(ptr)); - data = nv_rd32(dev, - NV04_PFIFO_CACHE1_DATA(ptr)); - } else { - mthd = nv_rd32(dev, - NV40_PFIFO_CACHE1_METHOD(ptr)); - data = nv_rd32(dev, - NV40_PFIFO_CACHE1_DATA(ptr)); - } - - if (!nouveau_fifo_swmthd(dev, chid, mthd, data)) { - NV_INFO(dev, "PFIFO_CACHE_ERROR - Ch %d/%d " - "Mthd 0x%04x Data 0x%08x\n", - chid, (mthd >> 13) & 7, mthd & 0x1ffc, - data); - } - - nv_wr32(dev, NV04_PFIFO_CACHE1_DMA_PUSH, 0); - nv_wr32(dev, NV03_PFIFO_INTR_0, - NV_PFIFO_INTR_CACHE_ERROR); - - nv_wr32(dev, NV03_PFIFO_CACHE1_PUSH0, - nv_rd32(dev, NV03_PFIFO_CACHE1_PUSH0) & ~1); - nv_wr32(dev, NV03_PFIFO_CACHE1_GET, get + 4); - nv_wr32(dev, NV03_PFIFO_CACHE1_PUSH0, - nv_rd32(dev, NV03_PFIFO_CACHE1_PUSH0) | 1); - nv_wr32(dev, NV04_PFIFO_CACHE1_HASH, 0); - - nv_wr32(dev, NV04_PFIFO_CACHE1_DMA_PUSH, - nv_rd32(dev, NV04_PFIFO_CACHE1_DMA_PUSH) | 1); - nv_wr32(dev, NV04_PFIFO_CACHE1_PULL0, 1); - - status &= ~NV_PFIFO_INTR_CACHE_ERROR; - } - - if (status & NV_PFIFO_INTR_DMA_PUSHER) { - u32 dma_get = nv_rd32(dev, 0x003244); - u32 dma_put = nv_rd32(dev, 0x003240); - u32 push = nv_rd32(dev, 0x003220); - u32 state = nv_rd32(dev, 0x003228); - - if (dev_priv->card_type == NV_50) { - u32 ho_get = nv_rd32(dev, 0x003328); - u32 ho_put = nv_rd32(dev, 0x003320); - u32 ib_get = nv_rd32(dev, 0x003334); - u32 ib_put = nv_rd32(dev, 0x003330); - - if (nouveau_ratelimit()) - NV_INFO(dev, "PFIFO_DMA_PUSHER - Ch %d Get 0x%02x%08x " - "Put 0x%02x%08x IbGet 0x%08x IbPut 0x%08x " - "State 0x%08x Push 0x%08x\n", - chid, ho_get, dma_get, ho_put, - dma_put, ib_get, ib_put, state, - push); - - /* METHOD_COUNT, in DMA_STATE on earlier chipsets */ - nv_wr32(dev, 0x003364, 0x00000000); - if (dma_get != dma_put || ho_get != ho_put) { - nv_wr32(dev, 0x003244, dma_put); - nv_wr32(dev, 0x003328, ho_put); - } else - if (ib_get != ib_put) { - nv_wr32(dev, 0x003334, ib_put); - } - } else { - NV_INFO(dev, "PFIFO_DMA_PUSHER - Ch %d Get 0x%08x " - "Put 0x%08x State 0x%08x Push 0x%08x\n", - chid, dma_get, dma_put, state, push); - - if (dma_get != dma_put) - nv_wr32(dev, 0x003244, dma_put); - } - - nv_wr32(dev, 0x003228, 0x00000000); - nv_wr32(dev, 0x003220, 0x00000001); - nv_wr32(dev, 0x002100, NV_PFIFO_INTR_DMA_PUSHER); - status &= ~NV_PFIFO_INTR_DMA_PUSHER; - } - - if (status & NV_PFIFO_INTR_SEMAPHORE) { - uint32_t sem; - - status &= ~NV_PFIFO_INTR_SEMAPHORE; - nv_wr32(dev, NV03_PFIFO_INTR_0, - NV_PFIFO_INTR_SEMAPHORE); - - sem = nv_rd32(dev, NV10_PFIFO_CACHE1_SEMAPHORE); - nv_wr32(dev, NV10_PFIFO_CACHE1_SEMAPHORE, sem | 0x1); - - nv_wr32(dev, NV03_PFIFO_CACHE1_GET, get + 4); - nv_wr32(dev, NV04_PFIFO_CACHE1_PULL0, 1); - } - - if (dev_priv->card_type == NV_50) { - if (status & 0x00000010) { - nv50_fb_vm_trap(dev, 1, "PFIFO_BAR_FAULT"); - status &= ~0x00000010; - nv_wr32(dev, 0x002100, 0x00000010); - } - } - - if (status) { - if (nouveau_ratelimit()) - NV_INFO(dev, "PFIFO_INTR 0x%08x - Ch %d\n", - status, chid); - nv_wr32(dev, NV03_PFIFO_INTR_0, status); - status = 0; - } - - nv_wr32(dev, NV03_PFIFO_CACHES, reassign); - } - - if (status) { - NV_INFO(dev, "PFIFO still angry after %d spins, halt\n", cnt); - nv_wr32(dev, 0x2140, 0); - nv_wr32(dev, 0x140, 0); - } - - nv_wr32(dev, NV03_PMC_INTR_0, NV_PMC_INTR_0_PFIFO_PENDING); -} - static struct nouveau_bitfield nstatus_names[] = { { NV04_PGRAPH_NSTATUS_STATE_IN_USE, "STATE_IN_USE" }, @@ -1146,11 +948,6 @@ nouveau_irq_handler(DRM_IRQ_ARGS) spin_lock_irqsave(&dev_priv->context_switch_lock, flags); - if (status & NV_PMC_INTR_0_PFIFO_PENDING) { - nouveau_fifo_irq_handler(dev); - status &= ~NV_PMC_INTR_0_PFIFO_PENDING; - } - if (status & NV_PMC_INTR_0_PGRAPH_PENDING) { if (dev_priv->card_type >= NV_50) nv50_pgraph_irq_handler(dev); diff --git a/drivers/gpu/drm/nouveau/nouveau_state.c b/drivers/gpu/drm/nouveau/nouveau_state.c index 84bff459491e..262545b58d96 100644 --- a/drivers/gpu/drm/nouveau/nouveau_state.c +++ b/drivers/gpu/drm/nouveau/nouveau_state.c @@ -75,7 +75,7 @@ static int nouveau_init_engine_ptrs(struct drm_device *dev) engine->graph.unload_context = nv04_graph_unload_context; engine->fifo.channels = 16; engine->fifo.init = nv04_fifo_init; - engine->fifo.takedown = nouveau_stub_takedown; + engine->fifo.takedown = nv04_fifo_fini; engine->fifo.disable = nv04_fifo_disable; engine->fifo.enable = nv04_fifo_enable; engine->fifo.reassign = nv04_fifo_reassign; @@ -132,7 +132,7 @@ static int nouveau_init_engine_ptrs(struct drm_device *dev) engine->graph.set_tile_region = nv10_graph_set_tile_region; engine->fifo.channels = 32; engine->fifo.init = nv10_fifo_init; - engine->fifo.takedown = nouveau_stub_takedown; + engine->fifo.takedown = nv04_fifo_fini; engine->fifo.disable = nv04_fifo_disable; engine->fifo.enable = nv04_fifo_enable; engine->fifo.reassign = nv04_fifo_reassign; @@ -189,7 +189,7 @@ static int nouveau_init_engine_ptrs(struct drm_device *dev) engine->graph.set_tile_region = nv20_graph_set_tile_region; engine->fifo.channels = 32; engine->fifo.init = nv10_fifo_init; - engine->fifo.takedown = nouveau_stub_takedown; + engine->fifo.takedown = nv04_fifo_fini; engine->fifo.disable = nv04_fifo_disable; engine->fifo.enable = nv04_fifo_enable; engine->fifo.reassign = nv04_fifo_reassign; @@ -246,7 +246,7 @@ static int nouveau_init_engine_ptrs(struct drm_device *dev) engine->graph.set_tile_region = nv20_graph_set_tile_region; engine->fifo.channels = 32; engine->fifo.init = nv10_fifo_init; - engine->fifo.takedown = nouveau_stub_takedown; + engine->fifo.takedown = nv04_fifo_fini; engine->fifo.disable = nv04_fifo_disable; engine->fifo.enable = nv04_fifo_enable; engine->fifo.reassign = nv04_fifo_reassign; @@ -306,7 +306,7 @@ static int nouveau_init_engine_ptrs(struct drm_device *dev) engine->graph.set_tile_region = nv40_graph_set_tile_region; engine->fifo.channels = 32; engine->fifo.init = nv40_fifo_init; - engine->fifo.takedown = nouveau_stub_takedown; + engine->fifo.takedown = nv04_fifo_fini; engine->fifo.disable = nv04_fifo_disable; engine->fifo.enable = nv04_fifo_enable; engine->fifo.reassign = nv04_fifo_reassign; diff --git a/drivers/gpu/drm/nouveau/nv04_fifo.c b/drivers/gpu/drm/nouveau/nv04_fifo.c index 4c0d3a8fca68..a32ba8ccaae6 100644 --- a/drivers/gpu/drm/nouveau/nv04_fifo.c +++ b/drivers/gpu/drm/nouveau/nv04_fifo.c @@ -28,6 +28,7 @@ #include "drm.h" #include "nouveau_drv.h" #include "nouveau_ramht.h" +#include "nouveau_util.h" #define NV04_RAMFC(c) (dev_priv->ramfc->pinst + ((c) * NV04_RAMFC__SIZE)) #define NV04_RAMFC__SIZE 32 @@ -284,6 +285,7 @@ nv04_fifo_init_ramxx(struct drm_device *dev) static void nv04_fifo_init_intr(struct drm_device *dev) { + nouveau_irq_register(dev, 8, nv04_fifo_isr); nv_wr32(dev, 0x002100, 0xffffffff); nv_wr32(dev, 0x002140, 0xffffffff); } @@ -315,3 +317,207 @@ nv04_fifo_init(struct drm_device *dev) return 0; } +void +nv04_fifo_fini(struct drm_device *dev) +{ + nv_wr32(dev, 0x2140, 0x00000000); + nouveau_irq_unregister(dev, 8); +} + +static bool +nouveau_fifo_swmthd(struct drm_device *dev, u32 chid, u32 addr, u32 data) +{ + struct drm_nouveau_private *dev_priv = dev->dev_private; + struct nouveau_channel *chan = NULL; + struct nouveau_gpuobj *obj; + unsigned long flags; + const int subc = (addr >> 13) & 0x7; + const int mthd = addr & 0x1ffc; + bool handled = false; + u32 engine; + + spin_lock_irqsave(&dev_priv->channels.lock, flags); + if (likely(chid >= 0 && chid < dev_priv->engine.fifo.channels)) + chan = dev_priv->channels.ptr[chid]; + if (unlikely(!chan)) + goto out; + + switch (mthd) { + case 0x0000: /* bind object to subchannel */ + obj = nouveau_ramht_find(chan, data); + if (unlikely(!obj || obj->engine != NVOBJ_ENGINE_SW)) + break; + + chan->sw_subchannel[subc] = obj->class; + engine = 0x0000000f << (subc * 4); + + nv_mask(dev, NV04_PFIFO_CACHE1_ENGINE, engine, 0x00000000); + handled = true; + break; + default: + engine = nv_rd32(dev, NV04_PFIFO_CACHE1_ENGINE); + if (unlikely(((engine >> (subc * 4)) & 0xf) != 0)) + break; + + if (!nouveau_gpuobj_mthd_call(chan, chan->sw_subchannel[subc], + mthd, data)) + handled = true; + break; + } + +out: + spin_unlock_irqrestore(&dev_priv->channels.lock, flags); + return handled; +} + +void +nv04_fifo_isr(struct drm_device *dev) +{ + struct drm_nouveau_private *dev_priv = dev->dev_private; + struct nouveau_engine *engine = &dev_priv->engine; + uint32_t status, reassign; + int cnt = 0; + + reassign = nv_rd32(dev, NV03_PFIFO_CACHES) & 1; + while ((status = nv_rd32(dev, NV03_PFIFO_INTR_0)) && (cnt++ < 100)) { + uint32_t chid, get; + + nv_wr32(dev, NV03_PFIFO_CACHES, 0); + + chid = engine->fifo.channel_id(dev); + get = nv_rd32(dev, NV03_PFIFO_CACHE1_GET); + + if (status & NV_PFIFO_INTR_CACHE_ERROR) { + uint32_t mthd, data; + int ptr; + + /* NV_PFIFO_CACHE1_GET actually goes to 0xffc before + * wrapping on my G80 chips, but CACHE1 isn't big + * enough for this much data.. Tests show that it + * wraps around to the start at GET=0x800.. No clue + * as to why.. + */ + ptr = (get & 0x7ff) >> 2; + + if (dev_priv->card_type < NV_40) { + mthd = nv_rd32(dev, + NV04_PFIFO_CACHE1_METHOD(ptr)); + data = nv_rd32(dev, + NV04_PFIFO_CACHE1_DATA(ptr)); + } else { + mthd = nv_rd32(dev, + NV40_PFIFO_CACHE1_METHOD(ptr)); + data = nv_rd32(dev, + NV40_PFIFO_CACHE1_DATA(ptr)); + } + + if (!nouveau_fifo_swmthd(dev, chid, mthd, data)) { + NV_INFO(dev, "PFIFO_CACHE_ERROR - Ch %d/%d " + "Mthd 0x%04x Data 0x%08x\n", + chid, (mthd >> 13) & 7, mthd & 0x1ffc, + data); + } + + nv_wr32(dev, NV04_PFIFO_CACHE1_DMA_PUSH, 0); + nv_wr32(dev, NV03_PFIFO_INTR_0, + NV_PFIFO_INTR_CACHE_ERROR); + + nv_wr32(dev, NV03_PFIFO_CACHE1_PUSH0, + nv_rd32(dev, NV03_PFIFO_CACHE1_PUSH0) & ~1); + nv_wr32(dev, NV03_PFIFO_CACHE1_GET, get + 4); + nv_wr32(dev, NV03_PFIFO_CACHE1_PUSH0, + nv_rd32(dev, NV03_PFIFO_CACHE1_PUSH0) | 1); + nv_wr32(dev, NV04_PFIFO_CACHE1_HASH, 0); + + nv_wr32(dev, NV04_PFIFO_CACHE1_DMA_PUSH, + nv_rd32(dev, NV04_PFIFO_CACHE1_DMA_PUSH) | 1); + nv_wr32(dev, NV04_PFIFO_CACHE1_PULL0, 1); + + status &= ~NV_PFIFO_INTR_CACHE_ERROR; + } + + if (status & NV_PFIFO_INTR_DMA_PUSHER) { + u32 dma_get = nv_rd32(dev, 0x003244); + u32 dma_put = nv_rd32(dev, 0x003240); + u32 push = nv_rd32(dev, 0x003220); + u32 state = nv_rd32(dev, 0x003228); + + if (dev_priv->card_type == NV_50) { + u32 ho_get = nv_rd32(dev, 0x003328); + u32 ho_put = nv_rd32(dev, 0x003320); + u32 ib_get = nv_rd32(dev, 0x003334); + u32 ib_put = nv_rd32(dev, 0x003330); + + if (nouveau_ratelimit()) + NV_INFO(dev, "PFIFO_DMA_PUSHER - Ch %d Get 0x%02x%08x " + "Put 0x%02x%08x IbGet 0x%08x IbPut 0x%08x " + "State 0x%08x Push 0x%08x\n", + chid, ho_get, dma_get, ho_put, + dma_put, ib_get, ib_put, state, + push); + + /* METHOD_COUNT, in DMA_STATE on earlier chipsets */ + nv_wr32(dev, 0x003364, 0x00000000); + if (dma_get != dma_put || ho_get != ho_put) { + nv_wr32(dev, 0x003244, dma_put); + nv_wr32(dev, 0x003328, ho_put); + } else + if (ib_get != ib_put) { + nv_wr32(dev, 0x003334, ib_put); + } + } else { + NV_INFO(dev, "PFIFO_DMA_PUSHER - Ch %d Get 0x%08x " + "Put 0x%08x State 0x%08x Push 0x%08x\n", + chid, dma_get, dma_put, state, push); + + if (dma_get != dma_put) + nv_wr32(dev, 0x003244, dma_put); + } + + nv_wr32(dev, 0x003228, 0x00000000); + nv_wr32(dev, 0x003220, 0x00000001); + nv_wr32(dev, 0x002100, NV_PFIFO_INTR_DMA_PUSHER); + status &= ~NV_PFIFO_INTR_DMA_PUSHER; + } + + if (status & NV_PFIFO_INTR_SEMAPHORE) { + uint32_t sem; + + status &= ~NV_PFIFO_INTR_SEMAPHORE; + nv_wr32(dev, NV03_PFIFO_INTR_0, + NV_PFIFO_INTR_SEMAPHORE); + + sem = nv_rd32(dev, NV10_PFIFO_CACHE1_SEMAPHORE); + nv_wr32(dev, NV10_PFIFO_CACHE1_SEMAPHORE, sem | 0x1); + + nv_wr32(dev, NV03_PFIFO_CACHE1_GET, get + 4); + nv_wr32(dev, NV04_PFIFO_CACHE1_PULL0, 1); + } + + if (dev_priv->card_type == NV_50) { + if (status & 0x00000010) { + nv50_fb_vm_trap(dev, 1, "PFIFO_BAR_FAULT"); + status &= ~0x00000010; + nv_wr32(dev, 0x002100, 0x00000010); + } + } + + if (status) { + if (nouveau_ratelimit()) + NV_INFO(dev, "PFIFO_INTR 0x%08x - Ch %d\n", + status, chid); + nv_wr32(dev, NV03_PFIFO_INTR_0, status); + status = 0; + } + + nv_wr32(dev, NV03_PFIFO_CACHES, reassign); + } + + if (status) { + NV_INFO(dev, "PFIFO still angry after %d spins, halt\n", cnt); + nv_wr32(dev, 0x2140, 0); + nv_wr32(dev, 0x140, 0); + } + + nv_wr32(dev, NV03_PMC_INTR_0, NV_PMC_INTR_0_PFIFO_PENDING); +} diff --git a/drivers/gpu/drm/nouveau/nv10_fifo.c b/drivers/gpu/drm/nouveau/nv10_fifo.c index 912556f2e33c..acb9216e6d0a 100644 --- a/drivers/gpu/drm/nouveau/nv10_fifo.c +++ b/drivers/gpu/drm/nouveau/nv10_fifo.c @@ -208,6 +208,7 @@ nv10_fifo_init_ramxx(struct drm_device *dev) static void nv10_fifo_init_intr(struct drm_device *dev) { + nouveau_irq_register(dev, 8, nv04_fifo_isr); nv_wr32(dev, 0x002100, 0xffffffff); nv_wr32(dev, 0x002140, 0xffffffff); } diff --git a/drivers/gpu/drm/nouveau/nv40_fifo.c b/drivers/gpu/drm/nouveau/nv40_fifo.c index 311ac9ea5d53..f6b3580c685a 100644 --- a/drivers/gpu/drm/nouveau/nv40_fifo.c +++ b/drivers/gpu/drm/nouveau/nv40_fifo.c @@ -268,6 +268,7 @@ nv40_fifo_init_ramxx(struct drm_device *dev) static void nv40_fifo_init_intr(struct drm_device *dev) { + nouveau_irq_register(dev, 8, nv04_fifo_isr); nv_wr32(dev, 0x002100, 0xffffffff); nv_wr32(dev, 0x002140, 0xffffffff); } diff --git a/drivers/gpu/drm/nouveau/nv50_fifo.c b/drivers/gpu/drm/nouveau/nv50_fifo.c index d3295aae0c4e..ed18952ae7f4 100644 --- a/drivers/gpu/drm/nouveau/nv50_fifo.c +++ b/drivers/gpu/drm/nouveau/nv50_fifo.c @@ -106,6 +106,7 @@ nv50_fifo_init_intr(struct drm_device *dev) { NV_DEBUG(dev, "\n"); + nouveau_irq_register(dev, 8, nv04_fifo_isr); nv_wr32(dev, NV03_PFIFO_INTR_0, 0xFFFFFFFF); nv_wr32(dev, NV03_PFIFO_INTR_EN_0, 0xFFFFFFFF); } @@ -207,6 +208,9 @@ nv50_fifo_takedown(struct drm_device *dev) if (!pfifo->playlist[0]) return; + nv_wr32(dev, 0x2140, 0x00000000); + nouveau_irq_unregister(dev, 8); + nouveau_gpuobj_ref(NULL, &pfifo->playlist[0]); nouveau_gpuobj_ref(NULL, &pfifo->playlist[1]); } -- cgit v1.2.3 From 274fec93cdd627408a799519fa602f2eecb14d2f Mon Sep 17 00:00:00 2001 From: Ben Skeggs Date: Wed, 3 Nov 2010 13:16:18 +1000 Subject: drm/nouveau: tidy+move PGRAPH ISRs to their respective *_graph.c files Reviewed-by: Francisco Jerez Signed-off-by: Ben Skeggs --- drivers/gpu/drm/nouveau/nouveau_drv.h | 8 +- drivers/gpu/drm/nouveau/nouveau_irq.c | 894 +------------------------------ drivers/gpu/drm/nouveau/nouveau_object.c | 18 + drivers/gpu/drm/nouveau/nv04_graph.c | 95 +++- drivers/gpu/drm/nouveau/nv10_graph.c | 72 ++- drivers/gpu/drm/nouveau/nv20_graph.c | 47 ++ drivers/gpu/drm/nouveau/nv40_graph.c | 69 +++ drivers/gpu/drm/nouveau/nv50_graph.c | 500 ++++++++++++++++- 8 files changed, 809 insertions(+), 894 deletions(-) diff --git a/drivers/gpu/drm/nouveau/nouveau_drv.h b/drivers/gpu/drm/nouveau/nouveau_drv.h index e174479ab675..b19ef7fbb9dd 100644 --- a/drivers/gpu/drm/nouveau/nouveau_drv.h +++ b/drivers/gpu/drm/nouveau/nouveau_drv.h @@ -54,6 +54,7 @@ struct nouveau_fpriv { #include "nouveau_drm.h" #include "nouveau_reg.h" #include "nouveau_bios.h" +#include "nouveau_util.h" struct nouveau_grctx; #define MAX_NUM_DCB_ENTRIES 16 @@ -872,6 +873,7 @@ extern int nouveau_gpuobj_mthd_new(struct drm_device *, u32 class, u32 mthd, int (*exec)(struct nouveau_channel *, u32 class, u32 mthd, u32 data)); extern int nouveau_gpuobj_mthd_call(struct nouveau_channel *, u32, u32, u32); +extern int nouveau_gpuobj_mthd_call2(struct drm_device *, int, u32, u32, u32); extern int nouveau_gpuobj_channel_init(struct nouveau_channel *, uint32_t vram_h, uint32_t tt_h); extern void nouveau_gpuobj_channel_takedown(struct nouveau_channel *); @@ -1110,9 +1112,9 @@ extern int nv04_graph_create_context(struct nouveau_channel *); extern void nv04_graph_destroy_context(struct nouveau_channel *); extern int nv04_graph_load_context(struct nouveau_channel *); extern int nv04_graph_unload_context(struct drm_device *); -extern void nv04_graph_context_switch(struct drm_device *); extern int nv04_graph_mthd_page_flip(struct nouveau_channel *chan, u32 class, u32 mthd, u32 data); +extern struct nouveau_bitfield nv04_graph_nsource[]; /* nv10_graph.c */ extern int nv10_graph_init(struct drm_device *); @@ -1122,8 +1124,9 @@ extern int nv10_graph_create_context(struct nouveau_channel *); extern void nv10_graph_destroy_context(struct nouveau_channel *); extern int nv10_graph_load_context(struct nouveau_channel *); extern int nv10_graph_unload_context(struct drm_device *); -extern void nv10_graph_context_switch(struct drm_device *); extern void nv10_graph_set_tile_region(struct drm_device *dev, int i); +extern struct nouveau_bitfield nv10_graph_intr[]; +extern struct nouveau_bitfield nv10_graph_nstatus[]; /* nv20_graph.c */ extern int nv20_graph_create_context(struct nouveau_channel *); @@ -1155,7 +1158,6 @@ extern int nv50_graph_create_context(struct nouveau_channel *); extern void nv50_graph_destroy_context(struct nouveau_channel *); extern int nv50_graph_load_context(struct nouveau_channel *); extern int nv50_graph_unload_context(struct drm_device *); -extern void nv50_graph_context_switch(struct drm_device *); extern int nv50_grctx_init(struct nouveau_grctx *); extern void nv50_graph_tlb_flush(struct drm_device *dev); extern void nv86_graph_tlb_flush(struct drm_device *dev); diff --git a/drivers/gpu/drm/nouveau/nouveau_irq.c b/drivers/gpu/drm/nouveau/nouveau_irq.c index 16f42f774a9e..2ba7265bc967 100644 --- a/drivers/gpu/drm/nouveau/nouveau_irq.c +++ b/drivers/gpu/drm/nouveau/nouveau_irq.c @@ -69,910 +69,34 @@ nouveau_irq_uninstall(struct drm_device *dev) nv_wr32(dev, NV03_PMC_INTR_EN_0, 0); } -static struct nouveau_bitfield nstatus_names[] = -{ - { NV04_PGRAPH_NSTATUS_STATE_IN_USE, "STATE_IN_USE" }, - { NV04_PGRAPH_NSTATUS_INVALID_STATE, "INVALID_STATE" }, - { NV04_PGRAPH_NSTATUS_BAD_ARGUMENT, "BAD_ARGUMENT" }, - { NV04_PGRAPH_NSTATUS_PROTECTION_FAULT, "PROTECTION_FAULT" }, - {} -}; - -static struct nouveau_bitfield nstatus_names_nv10[] = -{ - { NV10_PGRAPH_NSTATUS_STATE_IN_USE, "STATE_IN_USE" }, - { NV10_PGRAPH_NSTATUS_INVALID_STATE, "INVALID_STATE" }, - { NV10_PGRAPH_NSTATUS_BAD_ARGUMENT, "BAD_ARGUMENT" }, - { NV10_PGRAPH_NSTATUS_PROTECTION_FAULT, "PROTECTION_FAULT" }, - {} -}; - -static struct nouveau_bitfield nsource_names[] = -{ - { NV03_PGRAPH_NSOURCE_NOTIFICATION, "NOTIFICATION" }, - { NV03_PGRAPH_NSOURCE_DATA_ERROR, "DATA_ERROR" }, - { NV03_PGRAPH_NSOURCE_PROTECTION_ERROR, "PROTECTION_ERROR" }, - { NV03_PGRAPH_NSOURCE_RANGE_EXCEPTION, "RANGE_EXCEPTION" }, - { NV03_PGRAPH_NSOURCE_LIMIT_COLOR, "LIMIT_COLOR" }, - { NV03_PGRAPH_NSOURCE_LIMIT_ZETA, "LIMIT_ZETA" }, - { NV03_PGRAPH_NSOURCE_ILLEGAL_MTHD, "ILLEGAL_MTHD" }, - { NV03_PGRAPH_NSOURCE_DMA_R_PROTECTION, "DMA_R_PROTECTION" }, - { NV03_PGRAPH_NSOURCE_DMA_W_PROTECTION, "DMA_W_PROTECTION" }, - { NV03_PGRAPH_NSOURCE_FORMAT_EXCEPTION, "FORMAT_EXCEPTION" }, - { NV03_PGRAPH_NSOURCE_PATCH_EXCEPTION, "PATCH_EXCEPTION" }, - { NV03_PGRAPH_NSOURCE_STATE_INVALID, "STATE_INVALID" }, - { NV03_PGRAPH_NSOURCE_DOUBLE_NOTIFY, "DOUBLE_NOTIFY" }, - { NV03_PGRAPH_NSOURCE_NOTIFY_IN_USE, "NOTIFY_IN_USE" }, - { NV03_PGRAPH_NSOURCE_METHOD_CNT, "METHOD_CNT" }, - { NV03_PGRAPH_NSOURCE_BFR_NOTIFICATION, "BFR_NOTIFICATION" }, - { NV03_PGRAPH_NSOURCE_DMA_VTX_PROTECTION, "DMA_VTX_PROTECTION" }, - { NV03_PGRAPH_NSOURCE_DMA_WIDTH_A, "DMA_WIDTH_A" }, - { NV03_PGRAPH_NSOURCE_DMA_WIDTH_B, "DMA_WIDTH_B" }, - {} -}; - -static int -nouveau_graph_chid_from_grctx(struct drm_device *dev) -{ - struct drm_nouveau_private *dev_priv = dev->dev_private; - struct nouveau_channel *chan; - unsigned long flags; - uint32_t inst; - int i; - - if (dev_priv->card_type < NV_40) - return dev_priv->engine.fifo.channels; - else - if (dev_priv->card_type < NV_50) { - inst = (nv_rd32(dev, 0x40032c) & 0xfffff) << 4; - - spin_lock_irqsave(&dev_priv->channels.lock, flags); - for (i = 0; i < dev_priv->engine.fifo.channels; i++) { - chan = dev_priv->channels.ptr[i]; - if (!chan || !chan->ramin_grctx) - continue; - - if (inst == chan->ramin_grctx->pinst) - break; - } - spin_unlock_irqrestore(&dev_priv->channels.lock, flags); - } else { - inst = (nv_rd32(dev, 0x40032c) & 0xfffff) << 12; - - spin_lock_irqsave(&dev_priv->channels.lock, flags); - for (i = 0; i < dev_priv->engine.fifo.channels; i++) { - chan = dev_priv->channels.ptr[i]; - if (!chan || !chan->ramin) - continue; - - if (inst == chan->ramin->vinst) - break; - } - spin_unlock_irqrestore(&dev_priv->channels.lock, flags); - } - - - return i; -} - -static int -nouveau_graph_trapped_channel(struct drm_device *dev, int *channel_ret) -{ - struct drm_nouveau_private *dev_priv = dev->dev_private; - struct nouveau_engine *engine = &dev_priv->engine; - int channel; - - if (dev_priv->card_type < NV_10) - channel = (nv_rd32(dev, NV04_PGRAPH_TRAPPED_ADDR) >> 24) & 0xf; - else - if (dev_priv->card_type < NV_40) - channel = (nv_rd32(dev, NV04_PGRAPH_TRAPPED_ADDR) >> 20) & 0x1f; - else - channel = nouveau_graph_chid_from_grctx(dev); - - if (channel >= engine->fifo.channels || - !dev_priv->channels.ptr[channel]) { - NV_ERROR(dev, "AIII, invalid/inactive channel id %d\n", channel); - return -EINVAL; - } - - *channel_ret = channel; - return 0; -} - -struct nouveau_pgraph_trap { - int channel; - int class; - int subc, mthd, size; - uint32_t data, data2; - uint32_t nsource, nstatus; -}; - -static void -nouveau_graph_trap_info(struct drm_device *dev, - struct nouveau_pgraph_trap *trap) -{ - struct drm_nouveau_private *dev_priv = dev->dev_private; - uint32_t address; - - trap->nsource = trap->nstatus = 0; - if (dev_priv->card_type < NV_50) { - trap->nsource = nv_rd32(dev, NV03_PGRAPH_NSOURCE); - trap->nstatus = nv_rd32(dev, NV03_PGRAPH_NSTATUS); - } - - if (nouveau_graph_trapped_channel(dev, &trap->channel)) - trap->channel = -1; - address = nv_rd32(dev, NV04_PGRAPH_TRAPPED_ADDR); - - trap->mthd = address & 0x1FFC; - trap->data = nv_rd32(dev, NV04_PGRAPH_TRAPPED_DATA); - if (dev_priv->card_type < NV_10) { - trap->subc = (address >> 13) & 0x7; - } else { - trap->subc = (address >> 16) & 0x7; - trap->data2 = nv_rd32(dev, NV10_PGRAPH_TRAPPED_DATA_HIGH); - } - - if (dev_priv->card_type < NV_10) - trap->class = nv_rd32(dev, 0x400180 + trap->subc*4) & 0xFF; - else if (dev_priv->card_type < NV_40) - trap->class = nv_rd32(dev, 0x400160 + trap->subc*4) & 0xFFF; - else if (dev_priv->card_type < NV_50) - trap->class = nv_rd32(dev, 0x400160 + trap->subc*4) & 0xFFFF; - else - trap->class = nv_rd32(dev, 0x400814); -} - -static void -nouveau_graph_dump_trap_info(struct drm_device *dev, const char *id, - struct nouveau_pgraph_trap *trap) -{ - struct drm_nouveau_private *dev_priv = dev->dev_private; - uint32_t nsource = trap->nsource, nstatus = trap->nstatus; - - if (dev_priv->card_type < NV_50) { - NV_INFO(dev, "%s - nSource:", id); - nouveau_bitfield_print(nsource_names, nsource); - printk(", nStatus:"); - if (dev_priv->card_type < NV_10) - nouveau_bitfield_print(nstatus_names, nstatus); - else - nouveau_bitfield_print(nstatus_names_nv10, nstatus); - printk("\n"); - } - - NV_INFO(dev, "%s - Ch %d/%d Class 0x%04x Mthd 0x%04x " - "Data 0x%08x:0x%08x\n", - id, trap->channel, trap->subc, - trap->class, trap->mthd, - trap->data2, trap->data); -} - -static int -nouveau_pgraph_intr_swmthd(struct drm_device *dev, - struct nouveau_pgraph_trap *trap) -{ - struct drm_nouveau_private *dev_priv = dev->dev_private; - struct nouveau_channel *chan; - unsigned long flags; - int ret = -EINVAL; - - spin_lock_irqsave(&dev_priv->channels.lock, flags); - if (trap->channel > 0 && - trap->channel < dev_priv->engine.fifo.channels && - dev_priv->channels.ptr[trap->channel]) { - chan = dev_priv->channels.ptr[trap->channel]; - ret = nouveau_gpuobj_mthd_call(chan, trap->class, trap->mthd, trap->data); - } - spin_unlock_irqrestore(&dev_priv->channels.lock, flags); - - return ret; -} - -static inline void -nouveau_pgraph_intr_notify(struct drm_device *dev, uint32_t nsource) -{ - struct nouveau_pgraph_trap trap; - int unhandled = 0; - - nouveau_graph_trap_info(dev, &trap); - - if (nsource & NV03_PGRAPH_NSOURCE_ILLEGAL_MTHD) { - if (nouveau_pgraph_intr_swmthd(dev, &trap)) - unhandled = 1; - } else { - unhandled = 1; - } - - if (unhandled) - nouveau_graph_dump_trap_info(dev, "PGRAPH_NOTIFY", &trap); -} - - -static inline void -nouveau_pgraph_intr_error(struct drm_device *dev, uint32_t nsource) -{ - struct nouveau_pgraph_trap trap; - int unhandled = 0; - - nouveau_graph_trap_info(dev, &trap); - trap.nsource = nsource; - - if (nsource & NV03_PGRAPH_NSOURCE_ILLEGAL_MTHD) { - if (nouveau_pgraph_intr_swmthd(dev, &trap)) - unhandled = 1; - } else if (nsource & NV03_PGRAPH_NSOURCE_DMA_VTX_PROTECTION) { - uint32_t v = nv_rd32(dev, 0x402000); - nv_wr32(dev, 0x402000, v); - - /* dump the error anyway for now: it's useful for - Gallium development */ - unhandled = 1; - } else { - unhandled = 1; - } - - if (unhandled && nouveau_ratelimit()) - nouveau_graph_dump_trap_info(dev, "PGRAPH_ERROR", &trap); -} - -static inline void -nouveau_pgraph_intr_context_switch(struct drm_device *dev) -{ - struct drm_nouveau_private *dev_priv = dev->dev_private; - struct nouveau_engine *engine = &dev_priv->engine; - uint32_t chid; - - chid = engine->fifo.channel_id(dev); - NV_DEBUG(dev, "PGRAPH context switch interrupt channel %x\n", chid); - - switch (dev_priv->card_type) { - case NV_04: - nv04_graph_context_switch(dev); - break; - case NV_10: - nv10_graph_context_switch(dev); - break; - default: - NV_ERROR(dev, "Context switch not implemented\n"); - break; - } -} - -static void -nouveau_pgraph_irq_handler(struct drm_device *dev) -{ - uint32_t status; - - while ((status = nv_rd32(dev, NV03_PGRAPH_INTR))) { - uint32_t nsource = nv_rd32(dev, NV03_PGRAPH_NSOURCE); - - if (status & NV_PGRAPH_INTR_NOTIFY) { - nouveau_pgraph_intr_notify(dev, nsource); - - status &= ~NV_PGRAPH_INTR_NOTIFY; - nv_wr32(dev, NV03_PGRAPH_INTR, NV_PGRAPH_INTR_NOTIFY); - } - - if (status & NV_PGRAPH_INTR_ERROR) { - nouveau_pgraph_intr_error(dev, nsource); - - status &= ~NV_PGRAPH_INTR_ERROR; - nv_wr32(dev, NV03_PGRAPH_INTR, NV_PGRAPH_INTR_ERROR); - } - - if (status & NV_PGRAPH_INTR_CONTEXT_SWITCH) { - status &= ~NV_PGRAPH_INTR_CONTEXT_SWITCH; - nv_wr32(dev, NV03_PGRAPH_INTR, - NV_PGRAPH_INTR_CONTEXT_SWITCH); - - nouveau_pgraph_intr_context_switch(dev); - } - - if (status) { - NV_INFO(dev, "Unhandled PGRAPH_INTR - 0x%08x\n", status); - nv_wr32(dev, NV03_PGRAPH_INTR, status); - } - - if ((nv_rd32(dev, NV04_PGRAPH_FIFO) & (1 << 0)) == 0) - nv_wr32(dev, NV04_PGRAPH_FIFO, 1); - } - - nv_wr32(dev, NV03_PMC_INTR_0, NV_PMC_INTR_0_PGRAPH_PENDING); -} - -static struct nouveau_enum nv50_mp_exec_error_names[] = -{ - { 3, "STACK_UNDERFLOW" }, - { 4, "QUADON_ACTIVE" }, - { 8, "TIMEOUT" }, - { 0x10, "INVALID_OPCODE" }, - { 0x40, "BREAKPOINT" }, - {} -}; - -static void -nv50_pgraph_mp_trap(struct drm_device *dev, int tpid, int display) -{ - struct drm_nouveau_private *dev_priv = dev->dev_private; - uint32_t units = nv_rd32(dev, 0x1540); - uint32_t addr, mp10, status, pc, oplow, ophigh; - int i; - int mps = 0; - for (i = 0; i < 4; i++) { - if (!(units & 1 << (i+24))) - continue; - if (dev_priv->chipset < 0xa0) - addr = 0x408200 + (tpid << 12) + (i << 7); - else - addr = 0x408100 + (tpid << 11) + (i << 7); - mp10 = nv_rd32(dev, addr + 0x10); - status = nv_rd32(dev, addr + 0x14); - if (!status) - continue; - if (display) { - nv_rd32(dev, addr + 0x20); - pc = nv_rd32(dev, addr + 0x24); - oplow = nv_rd32(dev, addr + 0x70); - ophigh= nv_rd32(dev, addr + 0x74); - NV_INFO(dev, "PGRAPH_TRAP_MP_EXEC - " - "TP %d MP %d: ", tpid, i); - nouveau_enum_print(nv50_mp_exec_error_names, status); - printk(" at %06x warp %d, opcode %08x %08x\n", - pc&0xffffff, pc >> 24, - oplow, ophigh); - } - nv_wr32(dev, addr + 0x10, mp10); - nv_wr32(dev, addr + 0x14, 0); - mps++; - } - if (!mps && display) - NV_INFO(dev, "PGRAPH_TRAP_MP_EXEC - TP %d: " - "No MPs claiming errors?\n", tpid); -} - -static void -nv50_pgraph_tp_trap(struct drm_device *dev, int type, uint32_t ustatus_old, - uint32_t ustatus_new, int display, const char *name) -{ - struct drm_nouveau_private *dev_priv = dev->dev_private; - int tps = 0; - uint32_t units = nv_rd32(dev, 0x1540); - int i, r; - uint32_t ustatus_addr, ustatus; - for (i = 0; i < 16; i++) { - if (!(units & (1 << i))) - continue; - if (dev_priv->chipset < 0xa0) - ustatus_addr = ustatus_old + (i << 12); - else - ustatus_addr = ustatus_new + (i << 11); - ustatus = nv_rd32(dev, ustatus_addr) & 0x7fffffff; - if (!ustatus) - continue; - tps++; - switch (type) { - case 6: /* texture error... unknown for now */ - nv50_fb_vm_trap(dev, display, name); - if (display) { - NV_ERROR(dev, "magic set %d:\n", i); - for (r = ustatus_addr + 4; r <= ustatus_addr + 0x10; r += 4) - NV_ERROR(dev, "\t0x%08x: 0x%08x\n", r, - nv_rd32(dev, r)); - } - break; - case 7: /* MP error */ - if (ustatus & 0x00010000) { - nv50_pgraph_mp_trap(dev, i, display); - ustatus &= ~0x00010000; - } - break; - case 8: /* TPDMA error */ - { - uint32_t e0c = nv_rd32(dev, ustatus_addr + 4); - uint32_t e10 = nv_rd32(dev, ustatus_addr + 8); - uint32_t e14 = nv_rd32(dev, ustatus_addr + 0xc); - uint32_t e18 = nv_rd32(dev, ustatus_addr + 0x10); - uint32_t e1c = nv_rd32(dev, ustatus_addr + 0x14); - uint32_t e20 = nv_rd32(dev, ustatus_addr + 0x18); - uint32_t e24 = nv_rd32(dev, ustatus_addr + 0x1c); - nv50_fb_vm_trap(dev, display, name); - /* 2d engine destination */ - if (ustatus & 0x00000010) { - if (display) { - NV_INFO(dev, "PGRAPH_TRAP_TPDMA_2D - TP %d - Unknown fault at address %02x%08x\n", - i, e14, e10); - NV_INFO(dev, "PGRAPH_TRAP_TPDMA_2D - TP %d - e0c: %08x, e18: %08x, e1c: %08x, e20: %08x, e24: %08x\n", - i, e0c, e18, e1c, e20, e24); - } - ustatus &= ~0x00000010; - } - /* Render target */ - if (ustatus & 0x00000040) { - if (display) { - NV_INFO(dev, "PGRAPH_TRAP_TPDMA_RT - TP %d - Unknown fault at address %02x%08x\n", - i, e14, e10); - NV_INFO(dev, "PGRAPH_TRAP_TPDMA_RT - TP %d - e0c: %08x, e18: %08x, e1c: %08x, e20: %08x, e24: %08x\n", - i, e0c, e18, e1c, e20, e24); - } - ustatus &= ~0x00000040; - } - /* CUDA memory: l[], g[] or stack. */ - if (ustatus & 0x00000080) { - if (display) { - if (e18 & 0x80000000) { - /* g[] read fault? */ - NV_INFO(dev, "PGRAPH_TRAP_TPDMA - TP %d - Global read fault at address %02x%08x\n", - i, e14, e10 | ((e18 >> 24) & 0x1f)); - e18 &= ~0x1f000000; - } else if (e18 & 0xc) { - /* g[] write fault? */ - NV_INFO(dev, "PGRAPH_TRAP_TPDMA - TP %d - Global write fault at address %02x%08x\n", - i, e14, e10 | ((e18 >> 7) & 0x1f)); - e18 &= ~0x00000f80; - } else { - NV_INFO(dev, "PGRAPH_TRAP_TPDMA - TP %d - Unknown CUDA fault at address %02x%08x\n", - i, e14, e10); - } - NV_INFO(dev, "PGRAPH_TRAP_TPDMA - TP %d - e0c: %08x, e18: %08x, e1c: %08x, e20: %08x, e24: %08x\n", - i, e0c, e18, e1c, e20, e24); - } - ustatus &= ~0x00000080; - } - } - break; - } - if (ustatus) { - if (display) - NV_INFO(dev, "%s - TP%d: Unhandled ustatus 0x%08x\n", name, i, ustatus); - } - nv_wr32(dev, ustatus_addr, 0xc0000000); - } - - if (!tps && display) - NV_INFO(dev, "%s - No TPs claiming errors?\n", name); -} - -static void -nv50_pgraph_trap_handler(struct drm_device *dev) -{ - struct nouveau_pgraph_trap trap; - uint32_t status = nv_rd32(dev, 0x400108); - uint32_t ustatus; - int display = nouveau_ratelimit(); - - - if (!status && display) { - nouveau_graph_trap_info(dev, &trap); - nouveau_graph_dump_trap_info(dev, "PGRAPH_TRAP", &trap); - NV_INFO(dev, "PGRAPH_TRAP - no units reporting traps?\n"); - } - - /* DISPATCH: Relays commands to other units and handles NOTIFY, - * COND, QUERY. If you get a trap from it, the command is still stuck - * in DISPATCH and you need to do something about it. */ - if (status & 0x001) { - ustatus = nv_rd32(dev, 0x400804) & 0x7fffffff; - if (!ustatus && display) { - NV_INFO(dev, "PGRAPH_TRAP_DISPATCH - no ustatus?\n"); - } - - /* Known to be triggered by screwed up NOTIFY and COND... */ - if (ustatus & 0x00000001) { - nv50_fb_vm_trap(dev, display, "PGRAPH_TRAP_DISPATCH_FAULT"); - nv_wr32(dev, 0x400500, 0); - if (nv_rd32(dev, 0x400808) & 0x80000000) { - if (display) { - if (nouveau_graph_trapped_channel(dev, &trap.channel)) - trap.channel = -1; - trap.class = nv_rd32(dev, 0x400814); - trap.mthd = nv_rd32(dev, 0x400808) & 0x1ffc; - trap.subc = (nv_rd32(dev, 0x400808) >> 16) & 0x7; - trap.data = nv_rd32(dev, 0x40080c); - trap.data2 = nv_rd32(dev, 0x400810); - nouveau_graph_dump_trap_info(dev, - "PGRAPH_TRAP_DISPATCH_FAULT", &trap); - NV_INFO(dev, "PGRAPH_TRAP_DISPATCH_FAULT - 400808: %08x\n", nv_rd32(dev, 0x400808)); - NV_INFO(dev, "PGRAPH_TRAP_DISPATCH_FAULT - 400848: %08x\n", nv_rd32(dev, 0x400848)); - } - nv_wr32(dev, 0x400808, 0); - } else if (display) { - NV_INFO(dev, "PGRAPH_TRAP_DISPATCH_FAULT - No stuck command?\n"); - } - nv_wr32(dev, 0x4008e8, nv_rd32(dev, 0x4008e8) & 3); - nv_wr32(dev, 0x400848, 0); - ustatus &= ~0x00000001; - } - if (ustatus & 0x00000002) { - nv50_fb_vm_trap(dev, display, "PGRAPH_TRAP_DISPATCH_QUERY"); - nv_wr32(dev, 0x400500, 0); - if (nv_rd32(dev, 0x40084c) & 0x80000000) { - if (display) { - if (nouveau_graph_trapped_channel(dev, &trap.channel)) - trap.channel = -1; - trap.class = nv_rd32(dev, 0x400814); - trap.mthd = nv_rd32(dev, 0x40084c) & 0x1ffc; - trap.subc = (nv_rd32(dev, 0x40084c) >> 16) & 0x7; - trap.data = nv_rd32(dev, 0x40085c); - trap.data2 = 0; - nouveau_graph_dump_trap_info(dev, - "PGRAPH_TRAP_DISPATCH_QUERY", &trap); - NV_INFO(dev, "PGRAPH_TRAP_DISPATCH_QUERY - 40084c: %08x\n", nv_rd32(dev, 0x40084c)); - } - nv_wr32(dev, 0x40084c, 0); - } else if (display) { - NV_INFO(dev, "PGRAPH_TRAP_DISPATCH_QUERY - No stuck command?\n"); - } - ustatus &= ~0x00000002; - } - if (ustatus && display) - NV_INFO(dev, "PGRAPH_TRAP_DISPATCH - Unhandled ustatus 0x%08x\n", ustatus); - nv_wr32(dev, 0x400804, 0xc0000000); - nv_wr32(dev, 0x400108, 0x001); - status &= ~0x001; - } - - /* TRAPs other than dispatch use the "normal" trap regs. */ - if (status && display) { - nouveau_graph_trap_info(dev, &trap); - nouveau_graph_dump_trap_info(dev, - "PGRAPH_TRAP", &trap); - } - - /* M2MF: Memory to memory copy engine. */ - if (status & 0x002) { - ustatus = nv_rd32(dev, 0x406800) & 0x7fffffff; - if (!ustatus && display) { - NV_INFO(dev, "PGRAPH_TRAP_M2MF - no ustatus?\n"); - } - if (ustatus & 0x00000001) { - nv50_fb_vm_trap(dev, display, "PGRAPH_TRAP_M2MF_NOTIFY"); - ustatus &= ~0x00000001; - } - if (ustatus & 0x00000002) { - nv50_fb_vm_trap(dev, display, "PGRAPH_TRAP_M2MF_IN"); - ustatus &= ~0x00000002; - } - if (ustatus & 0x00000004) { - nv50_fb_vm_trap(dev, display, "PGRAPH_TRAP_M2MF_OUT"); - ustatus &= ~0x00000004; - } - NV_INFO (dev, "PGRAPH_TRAP_M2MF - %08x %08x %08x %08x\n", - nv_rd32(dev, 0x406804), - nv_rd32(dev, 0x406808), - nv_rd32(dev, 0x40680c), - nv_rd32(dev, 0x406810)); - if (ustatus && display) - NV_INFO(dev, "PGRAPH_TRAP_M2MF - Unhandled ustatus 0x%08x\n", ustatus); - /* No sane way found yet -- just reset the bugger. */ - nv_wr32(dev, 0x400040, 2); - nv_wr32(dev, 0x400040, 0); - nv_wr32(dev, 0x406800, 0xc0000000); - nv_wr32(dev, 0x400108, 0x002); - status &= ~0x002; - } - - /* VFETCH: Fetches data from vertex buffers. */ - if (status & 0x004) { - ustatus = nv_rd32(dev, 0x400c04) & 0x7fffffff; - if (!ustatus && display) { - NV_INFO(dev, "PGRAPH_TRAP_VFETCH - no ustatus?\n"); - } - if (ustatus & 0x00000001) { - nv50_fb_vm_trap(dev, display, "PGRAPH_TRAP_VFETCH_FAULT"); - NV_INFO (dev, "PGRAPH_TRAP_VFETCH_FAULT - %08x %08x %08x %08x\n", - nv_rd32(dev, 0x400c00), - nv_rd32(dev, 0x400c08), - nv_rd32(dev, 0x400c0c), - nv_rd32(dev, 0x400c10)); - ustatus &= ~0x00000001; - } - if (ustatus && display) - NV_INFO(dev, "PGRAPH_TRAP_VFETCH - Unhandled ustatus 0x%08x\n", ustatus); - nv_wr32(dev, 0x400c04, 0xc0000000); - nv_wr32(dev, 0x400108, 0x004); - status &= ~0x004; - } - - /* STRMOUT: DirectX streamout / OpenGL transform feedback. */ - if (status & 0x008) { - ustatus = nv_rd32(dev, 0x401800) & 0x7fffffff; - if (!ustatus && display) { - NV_INFO(dev, "PGRAPH_TRAP_STRMOUT - no ustatus?\n"); - } - if (ustatus & 0x00000001) { - nv50_fb_vm_trap(dev, display, "PGRAPH_TRAP_STRMOUT_FAULT"); - NV_INFO (dev, "PGRAPH_TRAP_STRMOUT_FAULT - %08x %08x %08x %08x\n", - nv_rd32(dev, 0x401804), - nv_rd32(dev, 0x401808), - nv_rd32(dev, 0x40180c), - nv_rd32(dev, 0x401810)); - ustatus &= ~0x00000001; - } - if (ustatus && display) - NV_INFO(dev, "PGRAPH_TRAP_STRMOUT - Unhandled ustatus 0x%08x\n", ustatus); - /* No sane way found yet -- just reset the bugger. */ - nv_wr32(dev, 0x400040, 0x80); - nv_wr32(dev, 0x400040, 0); - nv_wr32(dev, 0x401800, 0xc0000000); - nv_wr32(dev, 0x400108, 0x008); - status &= ~0x008; - } - - /* CCACHE: Handles code and c[] caches and fills them. */ - if (status & 0x010) { - ustatus = nv_rd32(dev, 0x405018) & 0x7fffffff; - if (!ustatus && display) { - NV_INFO(dev, "PGRAPH_TRAP_CCACHE - no ustatus?\n"); - } - if (ustatus & 0x00000001) { - nv50_fb_vm_trap(dev, display, "PGRAPH_TRAP_CCACHE_FAULT"); - NV_INFO (dev, "PGRAPH_TRAP_CCACHE_FAULT - %08x %08x %08x %08x %08x %08x %08x\n", - nv_rd32(dev, 0x405800), - nv_rd32(dev, 0x405804), - nv_rd32(dev, 0x405808), - nv_rd32(dev, 0x40580c), - nv_rd32(dev, 0x405810), - nv_rd32(dev, 0x405814), - nv_rd32(dev, 0x40581c)); - ustatus &= ~0x00000001; - } - if (ustatus && display) - NV_INFO(dev, "PGRAPH_TRAP_CCACHE - Unhandled ustatus 0x%08x\n", ustatus); - nv_wr32(dev, 0x405018, 0xc0000000); - nv_wr32(dev, 0x400108, 0x010); - status &= ~0x010; - } - - /* Unknown, not seen yet... 0x402000 is the only trap status reg - * remaining, so try to handle it anyway. Perhaps related to that - * unknown DMA slot on tesla? */ - if (status & 0x20) { - nv50_fb_vm_trap(dev, display, "PGRAPH_TRAP_UNKC04"); - ustatus = nv_rd32(dev, 0x402000) & 0x7fffffff; - if (display) - NV_INFO(dev, "PGRAPH_TRAP_UNKC04 - Unhandled ustatus 0x%08x\n", ustatus); - nv_wr32(dev, 0x402000, 0xc0000000); - /* no status modifiction on purpose */ - } - - /* TEXTURE: CUDA texturing units */ - if (status & 0x040) { - nv50_pgraph_tp_trap (dev, 6, 0x408900, 0x408600, display, - "PGRAPH_TRAP_TEXTURE"); - nv_wr32(dev, 0x400108, 0x040); - status &= ~0x040; - } - - /* MP: CUDA execution engines. */ - if (status & 0x080) { - nv50_pgraph_tp_trap (dev, 7, 0x408314, 0x40831c, display, - "PGRAPH_TRAP_MP"); - nv_wr32(dev, 0x400108, 0x080); - status &= ~0x080; - } - - /* TPDMA: Handles TP-initiated uncached memory accesses: - * l[], g[], stack, 2d surfaces, render targets. */ - if (status & 0x100) { - nv50_pgraph_tp_trap (dev, 8, 0x408e08, 0x408708, display, - "PGRAPH_TRAP_TPDMA"); - nv_wr32(dev, 0x400108, 0x100); - status &= ~0x100; - } - - if (status) { - if (display) - NV_INFO(dev, "PGRAPH_TRAP - Unknown trap 0x%08x\n", - status); - nv_wr32(dev, 0x400108, status); - } -} - -/* There must be a *lot* of these. Will take some time to gather them up. */ -static struct nouveau_enum nv50_data_error_names[] = -{ - { 4, "INVALID_VALUE" }, - { 5, "INVALID_ENUM" }, - { 8, "INVALID_OBJECT" }, - { 0xc, "INVALID_BITFIELD" }, - { 0x28, "MP_NO_REG_SPACE" }, - { 0x2b, "MP_BLOCK_SIZE_MISMATCH" }, - {} -}; - -static void -nv50_pgraph_irq_handler(struct drm_device *dev) -{ - struct nouveau_pgraph_trap trap; - int unhandled = 0; - uint32_t status; - - while ((status = nv_rd32(dev, NV03_PGRAPH_INTR))) { - /* NOTIFY: You've set a NOTIFY an a command and it's done. */ - if (status & 0x00000001) { - nouveau_graph_trap_info(dev, &trap); - if (nouveau_ratelimit()) - nouveau_graph_dump_trap_info(dev, - "PGRAPH_NOTIFY", &trap); - status &= ~0x00000001; - nv_wr32(dev, NV03_PGRAPH_INTR, 0x00000001); - } - - /* COMPUTE_QUERY: Purpose and exact cause unknown, happens - * when you write 0x200 to 0x50c0 method 0x31c. */ - if (status & 0x00000002) { - nouveau_graph_trap_info(dev, &trap); - if (nouveau_ratelimit()) - nouveau_graph_dump_trap_info(dev, - "PGRAPH_COMPUTE_QUERY", &trap); - status &= ~0x00000002; - nv_wr32(dev, NV03_PGRAPH_INTR, 0x00000002); - } - - /* Unknown, never seen: 0x4 */ - - /* ILLEGAL_MTHD: You used a wrong method for this class. */ - if (status & 0x00000010) { - nouveau_graph_trap_info(dev, &trap); - if (nouveau_pgraph_intr_swmthd(dev, &trap)) - unhandled = 1; - if (unhandled && nouveau_ratelimit()) - nouveau_graph_dump_trap_info(dev, - "PGRAPH_ILLEGAL_MTHD", &trap); - status &= ~0x00000010; - nv_wr32(dev, NV03_PGRAPH_INTR, 0x00000010); - } - - /* ILLEGAL_CLASS: You used a wrong class. */ - if (status & 0x00000020) { - nouveau_graph_trap_info(dev, &trap); - if (nouveau_ratelimit()) - nouveau_graph_dump_trap_info(dev, - "PGRAPH_ILLEGAL_CLASS", &trap); - status &= ~0x00000020; - nv_wr32(dev, NV03_PGRAPH_INTR, 0x00000020); - } - - /* DOUBLE_NOTIFY: You tried to set a NOTIFY on another NOTIFY. */ - if (status & 0x00000040) { - nouveau_graph_trap_info(dev, &trap); - if (nouveau_ratelimit()) - nouveau_graph_dump_trap_info(dev, - "PGRAPH_DOUBLE_NOTIFY", &trap); - status &= ~0x00000040; - nv_wr32(dev, NV03_PGRAPH_INTR, 0x00000040); - } - - /* CONTEXT_SWITCH: PGRAPH needs us to load a new context */ - if (status & 0x00001000) { - nv_wr32(dev, 0x400500, 0x00000000); - nv_wr32(dev, NV03_PGRAPH_INTR, - NV_PGRAPH_INTR_CONTEXT_SWITCH); - nv_wr32(dev, NV40_PGRAPH_INTR_EN, nv_rd32(dev, - NV40_PGRAPH_INTR_EN) & - ~NV_PGRAPH_INTR_CONTEXT_SWITCH); - nv_wr32(dev, 0x400500, 0x00010001); - - nv50_graph_context_switch(dev); - - status &= ~NV_PGRAPH_INTR_CONTEXT_SWITCH; - } - - /* BUFFER_NOTIFY: Your m2mf transfer finished */ - if (status & 0x00010000) { - nouveau_graph_trap_info(dev, &trap); - if (nouveau_ratelimit()) - nouveau_graph_dump_trap_info(dev, - "PGRAPH_BUFFER_NOTIFY", &trap); - status &= ~0x00010000; - nv_wr32(dev, NV03_PGRAPH_INTR, 0x00010000); - } - - /* DATA_ERROR: Invalid value for this method, or invalid - * state in current PGRAPH context for this operation */ - if (status & 0x00100000) { - nouveau_graph_trap_info(dev, &trap); - if (nouveau_ratelimit()) { - nouveau_graph_dump_trap_info(dev, - "PGRAPH_DATA_ERROR", &trap); - NV_INFO (dev, "PGRAPH_DATA_ERROR - "); - nouveau_enum_print(nv50_data_error_names, - nv_rd32(dev, 0x400110)); - printk("\n"); - } - status &= ~0x00100000; - nv_wr32(dev, NV03_PGRAPH_INTR, 0x00100000); - } - - /* TRAP: Something bad happened in the middle of command - * execution. Has a billion types, subtypes, and even - * subsubtypes. */ - if (status & 0x00200000) { - nv50_pgraph_trap_handler(dev); - status &= ~0x00200000; - nv_wr32(dev, NV03_PGRAPH_INTR, 0x00200000); - } - - /* Unknown, never seen: 0x00400000 */ - - /* SINGLE_STEP: Happens on every method if you turned on - * single stepping in 40008c */ - if (status & 0x01000000) { - nouveau_graph_trap_info(dev, &trap); - if (nouveau_ratelimit()) - nouveau_graph_dump_trap_info(dev, - "PGRAPH_SINGLE_STEP", &trap); - status &= ~0x01000000; - nv_wr32(dev, NV03_PGRAPH_INTR, 0x01000000); - } - - /* 0x02000000 happens when you pause a ctxprog... - * but the only way this can happen that I know is by - * poking the relevant MMIO register, and we don't - * do that. */ - - if (status) { - NV_INFO(dev, "Unhandled PGRAPH_INTR - 0x%08x\n", - status); - nv_wr32(dev, NV03_PGRAPH_INTR, status); - } - - { - const int isb = (1 << 16) | (1 << 0); - - if ((nv_rd32(dev, 0x400500) & isb) != isb) - nv_wr32(dev, 0x400500, - nv_rd32(dev, 0x400500) | isb); - } - } - - nv_wr32(dev, NV03_PMC_INTR_0, NV_PMC_INTR_0_PGRAPH_PENDING); - if (nv_rd32(dev, 0x400824) & (1 << 31)) - nv_wr32(dev, 0x400824, nv_rd32(dev, 0x400824) & ~(1 << 31)); -} - irqreturn_t nouveau_irq_handler(DRM_IRQ_ARGS) { struct drm_device *dev = (struct drm_device *)arg; struct drm_nouveau_private *dev_priv = dev->dev_private; unsigned long flags; - u32 status; + u32 stat; int i; - status = nv_rd32(dev, NV03_PMC_INTR_0); - if (!status) + stat = nv_rd32(dev, NV03_PMC_INTR_0); + if (!stat) return IRQ_NONE; spin_lock_irqsave(&dev_priv->context_switch_lock, flags); - - if (status & NV_PMC_INTR_0_PGRAPH_PENDING) { - if (dev_priv->card_type >= NV_50) - nv50_pgraph_irq_handler(dev); - else - nouveau_pgraph_irq_handler(dev); - - status &= ~NV_PMC_INTR_0_PGRAPH_PENDING; - } - - for (i = 0; i < 32 && status; i++) { - if (!(status & (1 << i)) || !dev_priv->irq_handler[i]) + for (i = 0; i < 32 && stat; i++) { + if (!(stat & (1 << i)) || !dev_priv->irq_handler[i]) continue; dev_priv->irq_handler[i](dev); - status &= ~(1 << i); + stat &= ~(1 << i); } - if (status) - NV_ERROR(dev, "Unhandled PMC INTR status bits 0x%08x\n", status); - - spin_unlock_irqrestore(&dev_priv->context_switch_lock, flags); - if (dev_priv->msi_enabled) nv_wr08(dev, 0x00088068, 0xff); + spin_unlock_irqrestore(&dev_priv->context_switch_lock, flags); + if (stat && nouveau_ratelimit()) + NV_ERROR(dev, "PMC - unhandled INTR 0x%08x\n", stat); return IRQ_HANDLED; } diff --git a/drivers/gpu/drm/nouveau/nouveau_object.c b/drivers/gpu/drm/nouveau/nouveau_object.c index 54078186fe65..94429553433c 100644 --- a/drivers/gpu/drm/nouveau/nouveau_object.c +++ b/drivers/gpu/drm/nouveau/nouveau_object.c @@ -113,6 +113,24 @@ nouveau_gpuobj_mthd_call(struct nouveau_channel *chan, return -ENOENT; } +int +nouveau_gpuobj_mthd_call2(struct drm_device *dev, int chid, + u32 class, u32 mthd, u32 data) +{ + struct drm_nouveau_private *dev_priv = dev->dev_private; + struct nouveau_channel *chan = NULL; + unsigned long flags; + int ret = -EINVAL; + + spin_lock_irqsave(&dev_priv->channels.lock, flags); + if (chid > 0 && chid < dev_priv->engine.fifo.channels) + chan = dev_priv->channels.ptr[chid]; + if (chan) + ret = nouveau_gpuobj_mthd_call(chan, class, mthd, data); + spin_unlock_irqrestore(&dev_priv->channels.lock, flags); + return ret; +} + /* NVidia uses context objects to drive drawing operations. Context objects can be selected into 8 subchannels in the FIFO, diff --git a/drivers/gpu/drm/nouveau/nv04_graph.c b/drivers/gpu/drm/nouveau/nv04_graph.c index 239519aefce6..0bc616d35eb6 100644 --- a/drivers/gpu/drm/nouveau/nv04_graph.c +++ b/drivers/gpu/drm/nouveau/nv04_graph.c @@ -27,8 +27,10 @@ #include "nouveau_drm.h" #include "nouveau_drv.h" #include "nouveau_hw.h" +#include "nouveau_util.h" -static int nv04_graph_register(struct drm_device *dev); +static int nv04_graph_register(struct drm_device *dev); +static void nv04_graph_isr(struct drm_device *dev); static uint32_t nv04_graph_ctx_regs[] = { 0x0040053c, @@ -363,7 +365,7 @@ nv04_graph_channel(struct drm_device *dev) return dev_priv->channels.ptr[chid]; } -void +static void nv04_graph_context_switch(struct drm_device *dev) { struct drm_nouveau_private *dev_priv = dev->dev_private; @@ -498,6 +500,7 @@ int nv04_graph_init(struct drm_device *dev) return ret; /* Enable PGRAPH interrupts */ + nouveau_irq_register(dev, 12, nv04_graph_isr); nv_wr32(dev, NV03_PGRAPH_INTR, 0xFFFFFFFF); nv_wr32(dev, NV03_PGRAPH_INTR_EN, 0xFFFFFFFF); @@ -533,6 +536,8 @@ int nv04_graph_init(struct drm_device *dev) void nv04_graph_takedown(struct drm_device *dev) { + nv_wr32(dev, NV03_PGRAPH_INTR_EN, 0x00000000); + nouveau_irq_unregister(dev, 12); } void @@ -1224,3 +1229,89 @@ nv04_graph_register(struct drm_device *dev) dev_priv->engine.graph.registered = true; return 0; }; + +static struct nouveau_bitfield nv04_graph_intr[] = { + { NV_PGRAPH_INTR_NOTIFY, "NOTIFY" }, + {} +}; + +static struct nouveau_bitfield nv04_graph_nstatus[] = +{ + { NV04_PGRAPH_NSTATUS_STATE_IN_USE, "STATE_IN_USE" }, + { NV04_PGRAPH_NSTATUS_INVALID_STATE, "INVALID_STATE" }, + { NV04_PGRAPH_NSTATUS_BAD_ARGUMENT, "BAD_ARGUMENT" }, + { NV04_PGRAPH_NSTATUS_PROTECTION_FAULT, "PROTECTION_FAULT" }, + {} +}; + +struct nouveau_bitfield nv04_graph_nsource[] = +{ + { NV03_PGRAPH_NSOURCE_NOTIFICATION, "NOTIFICATION" }, + { NV03_PGRAPH_NSOURCE_DATA_ERROR, "DATA_ERROR" }, + { NV03_PGRAPH_NSOURCE_PROTECTION_ERROR, "PROTECTION_ERROR" }, + { NV03_PGRAPH_NSOURCE_RANGE_EXCEPTION, "RANGE_EXCEPTION" }, + { NV03_PGRAPH_NSOURCE_LIMIT_COLOR, "LIMIT_COLOR" }, + { NV03_PGRAPH_NSOURCE_LIMIT_ZETA, "LIMIT_ZETA" }, + { NV03_PGRAPH_NSOURCE_ILLEGAL_MTHD, "ILLEGAL_MTHD" }, + { NV03_PGRAPH_NSOURCE_DMA_R_PROTECTION, "DMA_R_PROTECTION" }, + { NV03_PGRAPH_NSOURCE_DMA_W_PROTECTION, "DMA_W_PROTECTION" }, + { NV03_PGRAPH_NSOURCE_FORMAT_EXCEPTION, "FORMAT_EXCEPTION" }, + { NV03_PGRAPH_NSOURCE_PATCH_EXCEPTION, "PATCH_EXCEPTION" }, + { NV03_PGRAPH_NSOURCE_STATE_INVALID, "STATE_INVALID" }, + { NV03_PGRAPH_NSOURCE_DOUBLE_NOTIFY, "DOUBLE_NOTIFY" }, + { NV03_PGRAPH_NSOURCE_NOTIFY_IN_USE, "NOTIFY_IN_USE" }, + { NV03_PGRAPH_NSOURCE_METHOD_CNT, "METHOD_CNT" }, + { NV03_PGRAPH_NSOURCE_BFR_NOTIFICATION, "BFR_NOTIFICATION" }, + { NV03_PGRAPH_NSOURCE_DMA_VTX_PROTECTION, "DMA_VTX_PROTECTION" }, + { NV03_PGRAPH_NSOURCE_DMA_WIDTH_A, "DMA_WIDTH_A" }, + { NV03_PGRAPH_NSOURCE_DMA_WIDTH_B, "DMA_WIDTH_B" }, + {} +}; + +static void +nv04_graph_isr(struct drm_device *dev) +{ + u32 stat; + + while ((stat = nv_rd32(dev, NV03_PGRAPH_INTR))) { + u32 nsource = nv_rd32(dev, NV03_PGRAPH_NSOURCE); + u32 nstatus = nv_rd32(dev, NV03_PGRAPH_NSTATUS); + u32 addr = nv_rd32(dev, NV04_PGRAPH_TRAPPED_ADDR); + u32 chid = (addr & 0x0f000000) >> 24; + u32 subc = (addr & 0x0000e000) >> 13; + u32 mthd = (addr & 0x00001ffc); + u32 data = nv_rd32(dev, NV04_PGRAPH_TRAPPED_DATA); + u32 class = nv_rd32(dev, 0x400180 + subc * 4) & 0xff; + u32 show = stat; + + if (stat & NV_PGRAPH_INTR_NOTIFY) { + if (nsource & NV03_PGRAPH_NSOURCE_ILLEGAL_MTHD) { + if (!nouveau_gpuobj_mthd_call2(dev, chid, class, mthd, data)) + show &= ~NV_PGRAPH_INTR_NOTIFY; + } + } + + if (stat & NV_PGRAPH_INTR_CONTEXT_SWITCH) { + nv_wr32(dev, NV03_PGRAPH_INTR, NV_PGRAPH_INTR_CONTEXT_SWITCH); + stat &= ~NV_PGRAPH_INTR_CONTEXT_SWITCH; + show &= ~NV_PGRAPH_INTR_CONTEXT_SWITCH; + nv04_graph_context_switch(dev); + } + + nv_wr32(dev, NV03_PGRAPH_INTR, stat); + nv_wr32(dev, NV04_PGRAPH_FIFO, 0x00000001); + + if (show && nouveau_ratelimit()) { + NV_INFO(dev, "PGRAPH -"); + nouveau_bitfield_print(nv04_graph_intr, show); + printk(" nsource:"); + nouveau_bitfield_print(nv04_graph_nsource, nsource); + printk(" nstatus:"); + nouveau_bitfield_print(nv04_graph_nstatus, nstatus); + printk("\n"); + NV_INFO(dev, "PGRAPH - ch %d/%d class 0x%04x " + "mthd 0x%04x data 0x%08x\n", + chid, subc, class, mthd, data); + } + } +} diff --git a/drivers/gpu/drm/nouveau/nv10_graph.c b/drivers/gpu/drm/nouveau/nv10_graph.c index 1cd141edca04..536b39e4a9e2 100644 --- a/drivers/gpu/drm/nouveau/nv10_graph.c +++ b/drivers/gpu/drm/nouveau/nv10_graph.c @@ -26,8 +26,10 @@ #include "drm.h" #include "nouveau_drm.h" #include "nouveau_drv.h" +#include "nouveau_util.h" -static int nv10_graph_register(struct drm_device *); +static int nv10_graph_register(struct drm_device *); +static void nv10_graph_isr(struct drm_device *); #define NV10_FIFO_NUMBER 32 @@ -788,7 +790,7 @@ nv10_graph_unload_context(struct drm_device *dev) return 0; } -void +static void nv10_graph_context_switch(struct drm_device *dev) { struct drm_nouveau_private *dev_priv = dev->dev_private; @@ -924,6 +926,7 @@ int nv10_graph_init(struct drm_device *dev) if (ret) return ret; + nouveau_irq_register(dev, 12, nv10_graph_isr); nv_wr32(dev, NV03_PGRAPH_INTR , 0xFFFFFFFF); nv_wr32(dev, NV03_PGRAPH_INTR_EN, 0xFFFFFFFF); @@ -966,6 +969,8 @@ int nv10_graph_init(struct drm_device *dev) void nv10_graph_takedown(struct drm_device *dev) { + nv_wr32(dev, NV03_PGRAPH_INTR_EN, 0x00000000); + nouveau_irq_unregister(dev, 12); } static int @@ -1117,3 +1122,66 @@ nv10_graph_register(struct drm_device *dev) dev_priv->engine.graph.registered = true; return 0; } + +struct nouveau_bitfield nv10_graph_intr[] = { + { NV_PGRAPH_INTR_NOTIFY, "NOTIFY" }, + { NV_PGRAPH_INTR_ERROR, "ERROR" }, + {} +}; + +struct nouveau_bitfield nv10_graph_nstatus[] = +{ + { NV10_PGRAPH_NSTATUS_STATE_IN_USE, "STATE_IN_USE" }, + { NV10_PGRAPH_NSTATUS_INVALID_STATE, "INVALID_STATE" }, + { NV10_PGRAPH_NSTATUS_BAD_ARGUMENT, "BAD_ARGUMENT" }, + { NV10_PGRAPH_NSTATUS_PROTECTION_FAULT, "PROTECTION_FAULT" }, + {} +}; + +static void +nv10_graph_isr(struct drm_device *dev) +{ + u32 stat; + + while ((stat = nv_rd32(dev, NV03_PGRAPH_INTR))) { + u32 nsource = nv_rd32(dev, NV03_PGRAPH_NSOURCE); + u32 nstatus = nv_rd32(dev, NV03_PGRAPH_NSTATUS); + u32 addr = nv_rd32(dev, NV04_PGRAPH_TRAPPED_ADDR); + u32 chid = (addr & 0x01f00000) >> 20; + u32 subc = (addr & 0x00070000) >> 16; + u32 mthd = (addr & 0x00001ffc); + u32 data = nv_rd32(dev, NV04_PGRAPH_TRAPPED_DATA); + u32 class = nv_rd32(dev, 0x400160 + subc * 4) & 0xfff; + u32 show = stat; + + if (stat & NV_PGRAPH_INTR_ERROR) { + if (nsource & NV03_PGRAPH_NSOURCE_ILLEGAL_MTHD) { + if (!nouveau_gpuobj_mthd_call2(dev, chid, class, mthd, data)) + show &= ~NV_PGRAPH_INTR_ERROR; + } + } + + if (stat & NV_PGRAPH_INTR_CONTEXT_SWITCH) { + nv_wr32(dev, NV03_PGRAPH_INTR, NV_PGRAPH_INTR_CONTEXT_SWITCH); + stat &= ~NV_PGRAPH_INTR_CONTEXT_SWITCH; + show &= ~NV_PGRAPH_INTR_CONTEXT_SWITCH; + nv10_graph_context_switch(dev); + } + + nv_wr32(dev, NV03_PGRAPH_INTR, stat); + nv_wr32(dev, NV04_PGRAPH_FIFO, 0x00000001); + + if (show && nouveau_ratelimit()) { + NV_INFO(dev, "PGRAPH -"); + nouveau_bitfield_print(nv10_graph_intr, show); + printk(" nsource:"); + nouveau_bitfield_print(nv04_graph_nsource, nsource); + printk(" nstatus:"); + nouveau_bitfield_print(nv10_graph_nstatus, nstatus); + printk("\n"); + NV_INFO(dev, "PGRAPH - ch %d/%d class 0x%04x " + "mthd 0x%04x data 0x%08x\n", + chid, subc, class, mthd, data); + } + } +} diff --git a/drivers/gpu/drm/nouveau/nv20_graph.c b/drivers/gpu/drm/nouveau/nv20_graph.c index bd065c2fcba4..8464b76798d5 100644 --- a/drivers/gpu/drm/nouveau/nv20_graph.c +++ b/drivers/gpu/drm/nouveau/nv20_graph.c @@ -34,6 +34,7 @@ static int nv20_graph_register(struct drm_device *); static int nv30_graph_register(struct drm_device *); +static void nv20_graph_isr(struct drm_device *); static void nv20_graph_context_init(struct drm_device *dev, struct nouveau_gpuobj *ctx) @@ -584,6 +585,7 @@ nv20_graph_init(struct drm_device *dev) return ret; } + nouveau_irq_register(dev, 12, nv20_graph_isr); nv_wr32(dev, NV03_PGRAPH_INTR , 0xFFFFFFFF); nv_wr32(dev, NV03_PGRAPH_INTR_EN, 0xFFFFFFFF); @@ -661,6 +663,9 @@ nv20_graph_takedown(struct drm_device *dev) struct drm_nouveau_private *dev_priv = dev->dev_private; struct nouveau_pgraph_engine *pgraph = &dev_priv->engine.graph; + nv_wr32(dev, NV03_PGRAPH_INTR_EN, 0x00000000); + nouveau_irq_unregister(dev, 12); + nouveau_gpuobj_ref(NULL, &pgraph->ctx_table); } @@ -712,6 +717,7 @@ nv30_graph_init(struct drm_device *dev) nv_wr32(dev, NV20_PGRAPH_CHANNEL_CTX_TABLE, pgraph->ctx_table->pinst >> 4); + nouveau_irq_register(dev, 12, nv20_graph_isr); nv_wr32(dev, NV03_PGRAPH_INTR , 0xFFFFFFFF); nv_wr32(dev, NV03_PGRAPH_INTR_EN, 0xFFFFFFFF); @@ -850,3 +856,44 @@ nv30_graph_register(struct drm_device *dev) dev_priv->engine.graph.registered = true; return 0; } + +static void +nv20_graph_isr(struct drm_device *dev) +{ + u32 stat; + + while ((stat = nv_rd32(dev, NV03_PGRAPH_INTR))) { + u32 nsource = nv_rd32(dev, NV03_PGRAPH_NSOURCE); + u32 nstatus = nv_rd32(dev, NV03_PGRAPH_NSTATUS); + u32 addr = nv_rd32(dev, NV04_PGRAPH_TRAPPED_ADDR); + u32 chid = (addr & 0x01f00000) >> 20; + u32 subc = (addr & 0x00070000) >> 16; + u32 mthd = (addr & 0x00001ffc); + u32 data = nv_rd32(dev, NV04_PGRAPH_TRAPPED_DATA); + u32 class = nv_rd32(dev, 0x400160 + subc * 4) & 0xfff; + u32 show = stat; + + if (stat & NV_PGRAPH_INTR_ERROR) { + if (nsource & NV03_PGRAPH_NSOURCE_ILLEGAL_MTHD) { + if (!nouveau_gpuobj_mthd_call2(dev, chid, class, mthd, data)) + show &= ~NV_PGRAPH_INTR_ERROR; + } + } + + nv_wr32(dev, NV03_PGRAPH_INTR, stat); + nv_wr32(dev, NV04_PGRAPH_FIFO, 0x00000001); + + if (show && nouveau_ratelimit()) { + NV_INFO(dev, "PGRAPH -"); + nouveau_bitfield_print(nv10_graph_intr, show); + printk(" nsource:"); + nouveau_bitfield_print(nv04_graph_nsource, nsource); + printk(" nstatus:"); + nouveau_bitfield_print(nv10_graph_nstatus, nstatus); + printk("\n"); + NV_INFO(dev, "PGRAPH - ch %d/%d class 0x%04x " + "mthd 0x%04x data 0x%08x\n", + chid, subc, class, mthd, data); + } + } +} diff --git a/drivers/gpu/drm/nouveau/nv40_graph.c b/drivers/gpu/drm/nouveau/nv40_graph.c index 7a51608b55ba..0618846a97ce 100644 --- a/drivers/gpu/drm/nouveau/nv40_graph.c +++ b/drivers/gpu/drm/nouveau/nv40_graph.c @@ -30,6 +30,7 @@ #include "nouveau_grctx.h" static int nv40_graph_register(struct drm_device *); +static void nv40_graph_isr(struct drm_device *); struct nouveau_channel * nv40_graph_channel(struct drm_device *dev) @@ -277,6 +278,7 @@ nv40_graph_init(struct drm_device *dev) /* No context present currently */ nv_wr32(dev, NV40_PGRAPH_CTXCTL_CUR, 0x00000000); + nouveau_irq_register(dev, 12, nv40_graph_isr); nv_wr32(dev, NV03_PGRAPH_INTR , 0xFFFFFFFF); nv_wr32(dev, NV40_PGRAPH_INTR_EN, 0xFFFFFFFF); @@ -408,6 +410,7 @@ nv40_graph_init(struct drm_device *dev) void nv40_graph_takedown(struct drm_device *dev) { + nouveau_irq_unregister(dev, 12); } static int @@ -449,3 +452,69 @@ nv40_graph_register(struct drm_device *dev) dev_priv->engine.graph.registered = true; return 0; } + +static int +nv40_graph_isr_chid(struct drm_device *dev, u32 inst) +{ + struct drm_nouveau_private *dev_priv = dev->dev_private; + struct nouveau_channel *chan; + unsigned long flags; + int i; + + spin_lock_irqsave(&dev_priv->channels.lock, flags); + for (i = 0; i < dev_priv->engine.fifo.channels; i++) { + chan = dev_priv->channels.ptr[i]; + if (!chan || !chan->ramin_grctx) + continue; + + if (inst == chan->ramin_grctx->pinst) + break; + } + spin_unlock_irqrestore(&dev_priv->channels.lock, flags); + return i; +} + +static void +nv40_graph_isr(struct drm_device *dev) +{ + u32 stat; + + while ((stat = nv_rd32(dev, NV03_PGRAPH_INTR))) { + u32 nsource = nv_rd32(dev, NV03_PGRAPH_NSOURCE); + u32 nstatus = nv_rd32(dev, NV03_PGRAPH_NSTATUS); + u32 inst = (nv_rd32(dev, 0x40032c) & 0x000fffff) << 4; + u32 chid = nv40_graph_isr_chid(dev, inst); + u32 addr = nv_rd32(dev, NV04_PGRAPH_TRAPPED_ADDR); + u32 subc = (addr & 0x00070000) >> 16; + u32 mthd = (addr & 0x00001ffc); + u32 data = nv_rd32(dev, NV04_PGRAPH_TRAPPED_DATA); + u32 class = nv_rd32(dev, 0x400160 + subc * 4) & 0xffff; + u32 show = stat; + + if (stat & NV_PGRAPH_INTR_ERROR) { + if (nsource & NV03_PGRAPH_NSOURCE_ILLEGAL_MTHD) { + if (!nouveau_gpuobj_mthd_call2(dev, chid, class, mthd, data)) + show &= ~NV_PGRAPH_INTR_ERROR; + } else + if (nsource & NV03_PGRAPH_NSOURCE_DMA_VTX_PROTECTION) { + nv_mask(dev, 0x402000, 0, 0); + } + } + + nv_wr32(dev, NV03_PGRAPH_INTR, stat); + nv_wr32(dev, NV04_PGRAPH_FIFO, 0x00000001); + + if (show && nouveau_ratelimit()) { + NV_INFO(dev, "PGRAPH -"); + nouveau_bitfield_print(nv10_graph_intr, show); + printk(" nsource:"); + nouveau_bitfield_print(nv04_graph_nsource, nsource); + printk(" nstatus:"); + nouveau_bitfield_print(nv10_graph_nstatus, nstatus); + printk("\n"); + NV_INFO(dev, "PGRAPH - ch %d (0x%08x) subc %d " + "class 0x%04x mthd 0x%04x data 0x%08x\n", + chid, inst, subc, class, mthd, data); + } + } +} diff --git a/drivers/gpu/drm/nouveau/nv50_graph.c b/drivers/gpu/drm/nouveau/nv50_graph.c index 6785269f778a..b3900788c66d 100644 --- a/drivers/gpu/drm/nouveau/nv50_graph.c +++ b/drivers/gpu/drm/nouveau/nv50_graph.c @@ -32,7 +32,8 @@ #include "nouveau_dma.h" #include "nv50_evo.h" -static int nv50_graph_register(struct drm_device *); +static int nv50_graph_register(struct drm_device *); +static void nv50_graph_isr(struct drm_device *); static void nv50_graph_init_reset(struct drm_device *dev) @@ -50,6 +51,7 @@ nv50_graph_init_intr(struct drm_device *dev) { NV_DEBUG(dev, "\n"); + nouveau_irq_register(dev, 12, nv50_graph_isr); nv_wr32(dev, NV03_PGRAPH_INTR, 0xffffffff); nv_wr32(dev, 0x400138, 0xffffffff); nv_wr32(dev, NV40_PGRAPH_INTR_EN, 0xffffffff); @@ -165,6 +167,8 @@ void nv50_graph_takedown(struct drm_device *dev) { NV_DEBUG(dev, "\n"); + nv_wr32(dev, 0x40013c, 0x00000000); + nouveau_irq_unregister(dev, 12); } void @@ -324,7 +328,7 @@ nv50_graph_unload_context(struct drm_device *dev) return 0; } -void +static void nv50_graph_context_switch(struct drm_device *dev) { uint32_t inst; @@ -512,3 +516,495 @@ nv86_graph_tlb_flush(struct drm_device *dev) nv_mask(dev, 0x400500, 0x00000001, 0x00000001); spin_unlock_irqrestore(&dev_priv->context_switch_lock, flags); } + +static struct nouveau_enum nv50_mp_exec_error_names[] = +{ + { 3, "STACK_UNDERFLOW" }, + { 4, "QUADON_ACTIVE" }, + { 8, "TIMEOUT" }, + { 0x10, "INVALID_OPCODE" }, + { 0x40, "BREAKPOINT" }, + {} +}; + +static struct nouveau_bitfield nv50_graph_trap_m2mf[] = { + { 0x00000001, "NOTIFY" }, + { 0x00000002, "IN" }, + { 0x00000004, "OUT" }, + {} +}; + +static struct nouveau_bitfield nv50_graph_trap_vfetch[] = { + { 0x00000001, "FAULT" }, + {} +}; + +static struct nouveau_bitfield nv50_graph_trap_strmout[] = { + { 0x00000001, "FAULT" }, + {} +}; + +static struct nouveau_bitfield nv50_graph_trap_ccache[] = { + { 0x00000001, "FAULT" }, + {} +}; + +/* There must be a *lot* of these. Will take some time to gather them up. */ +static struct nouveau_enum nv50_data_error_names[] = { + { 4, "INVALID_VALUE" }, + { 5, "INVALID_ENUM" }, + { 8, "INVALID_OBJECT" }, + { 0xc, "INVALID_BITFIELD" }, + { 0x28, "MP_NO_REG_SPACE" }, + { 0x2b, "MP_BLOCK_SIZE_MISMATCH" }, + {} +}; + +static struct nouveau_bitfield nv50_graph_intr[] = { + { 0x00000001, "NOTIFY" }, + { 0x00000002, "COMPUTE_QUERY" }, + { 0x00000010, "ILLEGAL_MTHD" }, + { 0x00000020, "ILLEGAL_CLASS" }, + { 0x00000040, "DOUBLE_NOTIFY" }, + { 0x00001000, "CONTEXT_SWITCH" }, + { 0x00010000, "BUFFER_NOTIFY" }, + { 0x00100000, "DATA_ERROR" }, + { 0x00200000, "TRAP" }, + { 0x01000000, "SINGLE_STEP" }, + {} +}; + +static void +nv50_pgraph_mp_trap(struct drm_device *dev, int tpid, int display) +{ + struct drm_nouveau_private *dev_priv = dev->dev_private; + uint32_t units = nv_rd32(dev, 0x1540); + uint32_t addr, mp10, status, pc, oplow, ophigh; + int i; + int mps = 0; + for (i = 0; i < 4; i++) { + if (!(units & 1 << (i+24))) + continue; + if (dev_priv->chipset < 0xa0) + addr = 0x408200 + (tpid << 12) + (i << 7); + else + addr = 0x408100 + (tpid << 11) + (i << 7); + mp10 = nv_rd32(dev, addr + 0x10); + status = nv_rd32(dev, addr + 0x14); + if (!status) + continue; + if (display) { + nv_rd32(dev, addr + 0x20); + pc = nv_rd32(dev, addr + 0x24); + oplow = nv_rd32(dev, addr + 0x70); + ophigh= nv_rd32(dev, addr + 0x74); + NV_INFO(dev, "PGRAPH_TRAP_MP_EXEC - " + "TP %d MP %d: ", tpid, i); + nouveau_enum_print(nv50_mp_exec_error_names, status); + printk(" at %06x warp %d, opcode %08x %08x\n", + pc&0xffffff, pc >> 24, + oplow, ophigh); + } + nv_wr32(dev, addr + 0x10, mp10); + nv_wr32(dev, addr + 0x14, 0); + mps++; + } + if (!mps && display) + NV_INFO(dev, "PGRAPH_TRAP_MP_EXEC - TP %d: " + "No MPs claiming errors?\n", tpid); +} + +static void +nv50_pgraph_tp_trap(struct drm_device *dev, int type, uint32_t ustatus_old, + uint32_t ustatus_new, int display, const char *name) +{ + struct drm_nouveau_private *dev_priv = dev->dev_private; + int tps = 0; + uint32_t units = nv_rd32(dev, 0x1540); + int i, r; + uint32_t ustatus_addr, ustatus; + for (i = 0; i < 16; i++) { + if (!(units & (1 << i))) + continue; + if (dev_priv->chipset < 0xa0) + ustatus_addr = ustatus_old + (i << 12); + else + ustatus_addr = ustatus_new + (i << 11); + ustatus = nv_rd32(dev, ustatus_addr) & 0x7fffffff; + if (!ustatus) + continue; + tps++; + switch (type) { + case 6: /* texture error... unknown for now */ + nv50_fb_vm_trap(dev, display, name); + if (display) { + NV_ERROR(dev, "magic set %d:\n", i); + for (r = ustatus_addr + 4; r <= ustatus_addr + 0x10; r += 4) + NV_ERROR(dev, "\t0x%08x: 0x%08x\n", r, + nv_rd32(dev, r)); + } + break; + case 7: /* MP error */ + if (ustatus & 0x00010000) { + nv50_pgraph_mp_trap(dev, i, display); + ustatus &= ~0x00010000; + } + break; + case 8: /* TPDMA error */ + { + uint32_t e0c = nv_rd32(dev, ustatus_addr + 4); + uint32_t e10 = nv_rd32(dev, ustatus_addr + 8); + uint32_t e14 = nv_rd32(dev, ustatus_addr + 0xc); + uint32_t e18 = nv_rd32(dev, ustatus_addr + 0x10); + uint32_t e1c = nv_rd32(dev, ustatus_addr + 0x14); + uint32_t e20 = nv_rd32(dev, ustatus_addr + 0x18); + uint32_t e24 = nv_rd32(dev, ustatus_addr + 0x1c); + nv50_fb_vm_trap(dev, display, name); + /* 2d engine destination */ + if (ustatus & 0x00000010) { + if (display) { + NV_INFO(dev, "PGRAPH_TRAP_TPDMA_2D - TP %d - Unknown fault at address %02x%08x\n", + i, e14, e10); + NV_INFO(dev, "PGRAPH_TRAP_TPDMA_2D - TP %d - e0c: %08x, e18: %08x, e1c: %08x, e20: %08x, e24: %08x\n", + i, e0c, e18, e1c, e20, e24); + } + ustatus &= ~0x00000010; + } + /* Render target */ + if (ustatus & 0x00000040) { + if (display) { + NV_INFO(dev, "PGRAPH_TRAP_TPDMA_RT - TP %d - Unknown fault at address %02x%08x\n", + i, e14, e10); + NV_INFO(dev, "PGRAPH_TRAP_TPDMA_RT - TP %d - e0c: %08x, e18: %08x, e1c: %08x, e20: %08x, e24: %08x\n", + i, e0c, e18, e1c, e20, e24); + } + ustatus &= ~0x00000040; + } + /* CUDA memory: l[], g[] or stack. */ + if (ustatus & 0x00000080) { + if (display) { + if (e18 & 0x80000000) { + /* g[] read fault? */ + NV_INFO(dev, "PGRAPH_TRAP_TPDMA - TP %d - Global read fault at address %02x%08x\n", + i, e14, e10 | ((e18 >> 24) & 0x1f)); + e18 &= ~0x1f000000; + } else if (e18 & 0xc) { + /* g[] write fault? */ + NV_INFO(dev, "PGRAPH_TRAP_TPDMA - TP %d - Global write fault at address %02x%08x\n", + i, e14, e10 | ((e18 >> 7) & 0x1f)); + e18 &= ~0x00000f80; + } else { + NV_INFO(dev, "PGRAPH_TRAP_TPDMA - TP %d - Unknown CUDA fault at address %02x%08x\n", + i, e14, e10); + } + NV_INFO(dev, "PGRAPH_TRAP_TPDMA - TP %d - e0c: %08x, e18: %08x, e1c: %08x, e20: %08x, e24: %08x\n", + i, e0c, e18, e1c, e20, e24); + } + ustatus &= ~0x00000080; + } + } + break; + } + if (ustatus) { + if (display) + NV_INFO(dev, "%s - TP%d: Unhandled ustatus 0x%08x\n", name, i, ustatus); + } + nv_wr32(dev, ustatus_addr, 0xc0000000); + } + + if (!tps && display) + NV_INFO(dev, "%s - No TPs claiming errors?\n", name); +} + +static int +nv50_pgraph_trap_handler(struct drm_device *dev, u32 display, u64 inst, u32 chid) +{ + u32 status = nv_rd32(dev, 0x400108); + u32 ustatus; + + if (!status && display) { + NV_INFO(dev, "PGRAPH - TRAP: no units reporting traps?\n"); + return 1; + } + + /* DISPATCH: Relays commands to other units and handles NOTIFY, + * COND, QUERY. If you get a trap from it, the command is still stuck + * in DISPATCH and you need to do something about it. */ + if (status & 0x001) { + ustatus = nv_rd32(dev, 0x400804) & 0x7fffffff; + if (!ustatus && display) { + NV_INFO(dev, "PGRAPH_TRAP_DISPATCH - no ustatus?\n"); + } + + nv_wr32(dev, 0x400500, 0x00000000); + + /* Known to be triggered by screwed up NOTIFY and COND... */ + if (ustatus & 0x00000001) { + u32 addr = nv_rd32(dev, 0x400808); + u32 subc = (addr & 0x00070000) >> 16; + u32 mthd = (addr & 0x00001ffc); + u32 datal = nv_rd32(dev, 0x40080c); + u32 datah = nv_rd32(dev, 0x400810); + u32 class = nv_rd32(dev, 0x400814); + u32 r848 = nv_rd32(dev, 0x400848); + + NV_INFO(dev, "PGRAPH - TRAP DISPATCH_FAULT\n"); + if (display && (addr & 0x80000000)) { + NV_INFO(dev, "PGRAPH - ch %d (0x%010llx) " + "subc %d class 0x%04x mthd 0x%04x " + "data 0x%08x%08x " + "400808 0x%08x 400848 0x%08x\n", + chid, inst, subc, class, mthd, datah, + datal, addr, r848); + } else + if (display) { + NV_INFO(dev, "PGRAPH - no stuck command?\n"); + } + + nv_wr32(dev, 0x400808, 0); + nv_wr32(dev, 0x4008e8, nv_rd32(dev, 0x4008e8) & 3); + nv_wr32(dev, 0x400848, 0); + ustatus &= ~0x00000001; + } + + if (ustatus & 0x00000002) { + u32 addr = nv_rd32(dev, 0x40084c); + u32 subc = (addr & 0x00070000) >> 16; + u32 mthd = (addr & 0x00001ffc); + u32 data = nv_rd32(dev, 0x40085c); + u32 class = nv_rd32(dev, 0x400814); + + NV_INFO(dev, "PGRAPH - TRAP DISPATCH_QUERY\n"); + if (display && (addr & 0x80000000)) { + NV_INFO(dev, "PGRAPH - ch %d (0x%010llx) " + "subc %d class 0x%04x mthd 0x%04x " + "data 0x%08x 40084c 0x%08x\n", + chid, inst, subc, class, mthd, + data, addr); + } else + if (display) { + NV_INFO(dev, "PGRAPH - no stuck command?\n"); + } + + nv_wr32(dev, 0x40084c, 0); + ustatus &= ~0x00000002; + } + + if (ustatus && display) { + NV_INFO(dev, "PGRAPH - TRAP_DISPATCH (unknown " + "0x%08x)\n", ustatus); + } + + nv_wr32(dev, 0x400804, 0xc0000000); + nv_wr32(dev, 0x400108, 0x001); + status &= ~0x001; + if (!status) + return 0; + } + + /* M2MF: Memory to memory copy engine. */ + if (status & 0x002) { + u32 ustatus = nv_rd32(dev, 0x406800) & 0x7fffffff; + if (display) { + NV_INFO(dev, "PGRAPH - TRAP_M2MF"); + nouveau_bitfield_print(nv50_graph_trap_m2mf, ustatus); + printk("\n"); + NV_INFO(dev, "PGRAPH - TRAP_M2MF %08x %08x %08x %08x\n", + nv_rd32(dev, 0x406804), nv_rd32(dev, 0x406808), + nv_rd32(dev, 0x40680c), nv_rd32(dev, 0x406810)); + + } + + /* No sane way found yet -- just reset the bugger. */ + nv_wr32(dev, 0x400040, 2); + nv_wr32(dev, 0x400040, 0); + nv_wr32(dev, 0x406800, 0xc0000000); + nv_wr32(dev, 0x400108, 0x002); + status &= ~0x002; + } + + /* VFETCH: Fetches data from vertex buffers. */ + if (status & 0x004) { + u32 ustatus = nv_rd32(dev, 0x400c04) & 0x7fffffff; + if (display) { + NV_INFO(dev, "PGRAPH - TRAP_VFETCH"); + nouveau_bitfield_print(nv50_graph_trap_vfetch, ustatus); + printk("\n"); + NV_INFO(dev, "PGRAPH - TRAP_VFETCH %08x %08x %08x %08x\n", + nv_rd32(dev, 0x400c00), nv_rd32(dev, 0x400c08), + nv_rd32(dev, 0x400c0c), nv_rd32(dev, 0x400c10)); + } + + nv_wr32(dev, 0x400c04, 0xc0000000); + nv_wr32(dev, 0x400108, 0x004); + status &= ~0x004; + } + + /* STRMOUT: DirectX streamout / OpenGL transform feedback. */ + if (status & 0x008) { + ustatus = nv_rd32(dev, 0x401800) & 0x7fffffff; + if (display) { + NV_INFO(dev, "PGRAPH - TRAP_STRMOUT"); + nouveau_bitfield_print(nv50_graph_trap_strmout, ustatus); + printk("\n"); + NV_INFO(dev, "PGRAPH - TRAP_STRMOUT %08x %08x %08x %08x\n", + nv_rd32(dev, 0x401804), nv_rd32(dev, 0x401808), + nv_rd32(dev, 0x40180c), nv_rd32(dev, 0x401810)); + + } + + /* No sane way found yet -- just reset the bugger. */ + nv_wr32(dev, 0x400040, 0x80); + nv_wr32(dev, 0x400040, 0); + nv_wr32(dev, 0x401800, 0xc0000000); + nv_wr32(dev, 0x400108, 0x008); + status &= ~0x008; + } + + /* CCACHE: Handles code and c[] caches and fills them. */ + if (status & 0x010) { + ustatus = nv_rd32(dev, 0x405018) & 0x7fffffff; + if (display) { + NV_INFO(dev, "PGRAPH - TRAP_CCACHE"); + nouveau_bitfield_print(nv50_graph_trap_ccache, ustatus); + printk("\n"); + NV_INFO(dev, "PGRAPH - TRAP_CCACHE %08x %08x %08x %08x" + " %08x %08x %08x\n", + nv_rd32(dev, 0x405800), nv_rd32(dev, 0x405804), + nv_rd32(dev, 0x405808), nv_rd32(dev, 0x40580c), + nv_rd32(dev, 0x405810), nv_rd32(dev, 0x405814), + nv_rd32(dev, 0x40581c)); + + } + + nv_wr32(dev, 0x405018, 0xc0000000); + nv_wr32(dev, 0x400108, 0x010); + status &= ~0x010; + } + + /* Unknown, not seen yet... 0x402000 is the only trap status reg + * remaining, so try to handle it anyway. Perhaps related to that + * unknown DMA slot on tesla? */ + if (status & 0x20) { + ustatus = nv_rd32(dev, 0x402000) & 0x7fffffff; + if (display) + NV_INFO(dev, "PGRAPH - TRAP_UNKC04 0x%08x\n", ustatus); + nv_wr32(dev, 0x402000, 0xc0000000); + /* no status modifiction on purpose */ + } + + /* TEXTURE: CUDA texturing units */ + if (status & 0x040) { + nv50_pgraph_tp_trap(dev, 6, 0x408900, 0x408600, display, + "PGRAPH - TRAP_TEXTURE"); + nv_wr32(dev, 0x400108, 0x040); + status &= ~0x040; + } + + /* MP: CUDA execution engines. */ + if (status & 0x080) { + nv50_pgraph_tp_trap(dev, 7, 0x408314, 0x40831c, display, + "PGRAPH - TRAP_MP"); + nv_wr32(dev, 0x400108, 0x080); + status &= ~0x080; + } + + /* TPDMA: Handles TP-initiated uncached memory accesses: + * l[], g[], stack, 2d surfaces, render targets. */ + if (status & 0x100) { + nv50_pgraph_tp_trap(dev, 8, 0x408e08, 0x408708, display, + "PGRAPH - TRAP_TPDMA"); + nv_wr32(dev, 0x400108, 0x100); + status &= ~0x100; + } + + if (status) { + if (display) + NV_INFO(dev, "PGRAPH - TRAP: unknown 0x%08x\n", status); + nv_wr32(dev, 0x400108, status); + } + + return 1; +} + +static int +nv50_graph_isr_chid(struct drm_device *dev, u64 inst) +{ + struct drm_nouveau_private *dev_priv = dev->dev_private; + struct nouveau_channel *chan; + unsigned long flags; + int i; + + spin_lock_irqsave(&dev_priv->channels.lock, flags); + for (i = 0; i < dev_priv->engine.fifo.channels; i++) { + chan = dev_priv->channels.ptr[i]; + if (!chan || !chan->ramin) + continue; + + if (inst == chan->ramin->vinst) + break; + } + spin_unlock_irqrestore(&dev_priv->channels.lock, flags); + return i; +} + +static void +nv50_graph_isr(struct drm_device *dev) +{ + u32 stat; + + while ((stat = nv_rd32(dev, 0x400100))) { + u64 inst = (u64)(nv_rd32(dev, 0x40032c) & 0x0fffffff) << 12; + u32 chid = nv50_graph_isr_chid(dev, inst); + u32 addr = nv_rd32(dev, NV04_PGRAPH_TRAPPED_ADDR); + u32 subc = (addr & 0x00070000) >> 16; + u32 mthd = (addr & 0x00001ffc); + u32 data = nv_rd32(dev, NV04_PGRAPH_TRAPPED_DATA); + u32 class = nv_rd32(dev, 0x400814); + u32 show = stat; + + if (stat & 0x00000010) { + if (!nouveau_gpuobj_mthd_call2(dev, chid, class, + mthd, data)) + show &= ~0x00000010; + } + + if (stat & 0x00001000) { + nv_wr32(dev, 0x400500, 0x00000000); + nv_wr32(dev, 0x400100, 0x00001000); + nv_mask(dev, 0x40013c, 0x00001000, 0x00000000); + nv50_graph_context_switch(dev); + stat &= ~0x00001000; + show &= ~0x00001000; + } + + show = (show && nouveau_ratelimit()) ? show : 0; + + if (show & 0x00100000) { + u32 ecode = nv_rd32(dev, 0x400110); + NV_INFO(dev, "PGRAPH - DATA_ERROR "); + nouveau_enum_print(nv50_data_error_names, ecode); + printk("\n"); + } + + if (stat & 0x00200000) { + if (!nv50_pgraph_trap_handler(dev, show, inst, chid)) + show &= ~0x00200000; + } + + nv_wr32(dev, 0x400100, stat); + nv_wr32(dev, 0x400500, 0x00010001); + + if (show) { + NV_INFO(dev, "PGRAPH -"); + nouveau_bitfield_print(nv50_graph_intr, show); + printk("\n"); + NV_INFO(dev, "PGRAPH - ch %d (0x%010llx) subc %d " + "class 0x%04x mthd 0x%04x data 0x%08x\n", + chid, inst, subc, class, mthd, data); + } + } + + if (nv_rd32(dev, 0x400824) & (1 << 31)) + nv_wr32(dev, 0x400824, nv_rd32(dev, 0x400824) & ~(1 << 31)); +} -- cgit v1.2.3 From e4cbadcaaa4678020e37ca93502942ffdf9aef80 Mon Sep 17 00:00:00 2001 From: Ben Skeggs Date: Thu, 11 Nov 2010 13:59:05 +1000 Subject: drm/nv04-nv40: unregister irq handler on destroy Signed-off-by: Ben Skeggs --- drivers/gpu/drm/nouveau/nv04_display.c | 3 +++ 1 file changed, 3 insertions(+) diff --git a/drivers/gpu/drm/nouveau/nv04_display.c b/drivers/gpu/drm/nouveau/nv04_display.c index 0978a5b326dc..1715e1464b7d 100644 --- a/drivers/gpu/drm/nouveau/nv04_display.c +++ b/drivers/gpu/drm/nouveau/nv04_display.c @@ -213,6 +213,9 @@ nv04_display_destroy(struct drm_device *dev) NV_DEBUG_KMS(dev, "\n"); + nouveau_irq_unregister(dev, 24); + nouveau_irq_unregister(dev, 25); + /* Turn every CRTC off. */ list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { struct drm_mode_set modeset = { -- cgit v1.2.3 From fce2bad0ee2666d6a10bfeb634b1021469cc3d79 Mon Sep 17 00:00:00 2001 From: Ben Skeggs Date: Thu, 11 Nov 2010 16:14:56 +1000 Subject: drm/nv50: rework PGPIO IRQ handling and hotplug detection Allows callers to install their own handlers for when a GPIO line changes state (such as for hotplug detect). This also fixes a bug where we weren't acknowledging the GPIO IRQ until after the bottom half had run, causing a severe IRQ storm in some cases. Reviewed-by: Francisco Jerez Signed-off-by: Ben Skeggs --- drivers/gpu/drm/nouveau/nouveau_connector.c | 54 +++++++- drivers/gpu/drm/nouveau/nouveau_dp.c | 6 +- drivers/gpu/drm/nouveau/nouveau_drv.h | 21 ++-- drivers/gpu/drm/nouveau/nouveau_state.c | 4 + drivers/gpu/drm/nouveau/nv50_display.c | 66 ---------- drivers/gpu/drm/nouveau/nv50_display.h | 1 - drivers/gpu/drm/nouveau/nv50_gpio.c | 186 +++++++++++++++++++++++++--- 7 files changed, 234 insertions(+), 104 deletions(-) diff --git a/drivers/gpu/drm/nouveau/nouveau_connector.c b/drivers/gpu/drm/nouveau/nouveau_connector.c index 52c356e9a3d1..a21e00076839 100644 --- a/drivers/gpu/drm/nouveau/nouveau_connector.c +++ b/drivers/gpu/drm/nouveau/nouveau_connector.c @@ -37,6 +37,8 @@ #include "nouveau_connector.h" #include "nouveau_hw.h" +static void nouveau_connector_hotplug(void *, int); + static struct nouveau_encoder * find_encoder_by_type(struct drm_connector *connector, int type) { @@ -94,22 +96,30 @@ nouveau_connector_bpp(struct drm_connector *connector) } static void -nouveau_connector_destroy(struct drm_connector *drm_connector) +nouveau_connector_destroy(struct drm_connector *connector) { - struct nouveau_connector *nv_connector = - nouveau_connector(drm_connector); + struct nouveau_connector *nv_connector = nouveau_connector(connector); + struct drm_nouveau_private *dev_priv; + struct nouveau_gpio_engine *pgpio; struct drm_device *dev; if (!nv_connector) return; dev = nv_connector->base.dev; + dev_priv = dev->dev_private; NV_DEBUG_KMS(dev, "\n"); + pgpio = &dev_priv->engine.gpio; + if (pgpio->irq_unregister) { + pgpio->irq_unregister(dev, nv_connector->dcb->gpio_tag, + nouveau_connector_hotplug, connector); + } + kfree(nv_connector->edid); - drm_sysfs_connector_remove(drm_connector); - drm_connector_cleanup(drm_connector); - kfree(drm_connector); + drm_sysfs_connector_remove(connector); + drm_connector_cleanup(connector); + kfree(connector); } static struct nouveau_i2c_chan * @@ -760,6 +770,7 @@ nouveau_connector_create(struct drm_device *dev, int index) { const struct drm_connector_funcs *funcs = &nouveau_connector_funcs; struct drm_nouveau_private *dev_priv = dev->dev_private; + struct nouveau_gpio_engine *pgpio = &dev_priv->engine.gpio; struct nouveau_connector *nv_connector = NULL; struct dcb_connector_table_entry *dcb = NULL; struct drm_connector *connector; @@ -876,6 +887,11 @@ nouveau_connector_create(struct drm_device *dev, int index) break; } + if (pgpio->irq_register) { + pgpio->irq_register(dev, nv_connector->dcb->gpio_tag, + nouveau_connector_hotplug, connector); + } + drm_sysfs_connector_add(connector); dcb->drm = connector; return dcb->drm; @@ -886,3 +902,29 @@ fail: return ERR_PTR(ret); } + +static void +nouveau_connector_hotplug(void *data, int plugged) +{ + struct drm_connector *connector = data; + struct drm_device *dev = connector->dev; + + NV_INFO(dev, "%splugged %s\n", plugged ? "" : "un", + drm_get_connector_name(connector)); + + if (connector->encoder && connector->encoder->crtc && + connector->encoder->crtc->enabled) { + struct nouveau_encoder *nv_encoder = nouveau_encoder(connector->encoder); + struct drm_encoder_helper_funcs *helper = + connector->encoder->helper_private; + + if (nv_encoder->dcb->type == OUTPUT_DP) { + if (plugged) + helper->dpms(connector->encoder, DRM_MODE_DPMS_ON); + else + helper->dpms(connector->encoder, DRM_MODE_DPMS_OFF); + } + } + + drm_helper_hpd_irq_event(dev); +} diff --git a/drivers/gpu/drm/nouveau/nouveau_dp.c b/drivers/gpu/drm/nouveau/nouveau_dp.c index 4562f309ae3d..38d599554bce 100644 --- a/drivers/gpu/drm/nouveau/nouveau_dp.c +++ b/drivers/gpu/drm/nouveau/nouveau_dp.c @@ -279,7 +279,7 @@ nouveau_dp_link_train(struct drm_encoder *encoder) struct bit_displayport_encoder_table *dpe; int dpe_headerlen; uint8_t config[4], status[3]; - bool cr_done, cr_max_vs, eq_done; + bool cr_done, cr_max_vs, eq_done, hpd_state; int ret = 0, i, tries, voltage; NV_DEBUG_KMS(dev, "link training!!\n"); @@ -297,7 +297,7 @@ nouveau_dp_link_train(struct drm_encoder *encoder) /* disable hotplug detect, this flips around on some panels during * link training. */ - pgpio->irq_enable(dev, nv_connector->dcb->gpio_tag, false); + hpd_state = pgpio->irq_enable(dev, nv_connector->dcb->gpio_tag, false); if (dpe->script0) { NV_DEBUG_KMS(dev, "SOR-%d: running DP script 0\n", nv_encoder->or); @@ -439,7 +439,7 @@ stop: } /* re-enable hotplug detect */ - pgpio->irq_enable(dev, nv_connector->dcb->gpio_tag, true); + pgpio->irq_enable(dev, nv_connector->dcb->gpio_tag, hpd_state); return eq_done; } diff --git a/drivers/gpu/drm/nouveau/nouveau_drv.h b/drivers/gpu/drm/nouveau/nouveau_drv.h index b19ef7fbb9dd..912c9f785222 100644 --- a/drivers/gpu/drm/nouveau/nouveau_drv.h +++ b/drivers/gpu/drm/nouveau/nouveau_drv.h @@ -376,13 +376,19 @@ struct nouveau_display_engine { }; struct nouveau_gpio_engine { + void *priv; + int (*init)(struct drm_device *); void (*takedown)(struct drm_device *); int (*get)(struct drm_device *, enum dcb_gpio_tag); int (*set)(struct drm_device *, enum dcb_gpio_tag, int state); - void (*irq_enable)(struct drm_device *, enum dcb_gpio_tag, bool on); + int (*irq_register)(struct drm_device *, enum dcb_gpio_tag, + void (*)(void *, int), void *); + void (*irq_unregister)(struct drm_device *, enum dcb_gpio_tag, + void (*)(void *, int), void *); + bool (*irq_enable)(struct drm_device *, enum dcb_gpio_tag, bool on); }; struct nouveau_pm_voltage_level { @@ -619,13 +625,6 @@ struct drm_nouveau_private { bool msi_enabled; struct workqueue_struct *wq; struct work_struct irq_work; - struct work_struct hpd_work; - - struct { - spinlock_t lock; - uint32_t hpd0_bits; - uint32_t hpd1_bits; - } hpd_state; struct list_head vbl_waiting; @@ -1366,7 +1365,11 @@ int nv50_gpio_init(struct drm_device *dev); void nv50_gpio_fini(struct drm_device *dev); int nv50_gpio_get(struct drm_device *dev, enum dcb_gpio_tag tag); int nv50_gpio_set(struct drm_device *dev, enum dcb_gpio_tag tag, int state); -void nv50_gpio_irq_enable(struct drm_device *, enum dcb_gpio_tag, bool on); +int nv50_gpio_irq_register(struct drm_device *, enum dcb_gpio_tag, + void (*)(void *, int), void *); +void nv50_gpio_irq_unregister(struct drm_device *, enum dcb_gpio_tag, + void (*)(void *, int), void *); +bool nv50_gpio_irq_enable(struct drm_device *, enum dcb_gpio_tag, bool on); /* nv50_calc. */ int nv50_calc_pll(struct drm_device *, struct pll_lims *, int clk, diff --git a/drivers/gpu/drm/nouveau/nouveau_state.c b/drivers/gpu/drm/nouveau/nouveau_state.c index 262545b58d96..b26b34c419cb 100644 --- a/drivers/gpu/drm/nouveau/nouveau_state.c +++ b/drivers/gpu/drm/nouveau/nouveau_state.c @@ -396,6 +396,8 @@ static int nouveau_init_engine_ptrs(struct drm_device *dev) engine->gpio.takedown = nv50_gpio_fini; engine->gpio.get = nv50_gpio_get; engine->gpio.set = nv50_gpio_set; + engine->gpio.irq_register = nv50_gpio_irq_register; + engine->gpio.irq_unregister = nv50_gpio_irq_unregister; engine->gpio.irq_enable = nv50_gpio_irq_enable; switch (dev_priv->chipset) { case 0x84: @@ -487,6 +489,8 @@ static int nouveau_init_engine_ptrs(struct drm_device *dev) engine->gpio.takedown = nouveau_stub_takedown; engine->gpio.get = nv50_gpio_get; engine->gpio.set = nv50_gpio_set; + engine->gpio.irq_register = nv50_gpio_irq_register; + engine->gpio.irq_unregister = nv50_gpio_irq_unregister; engine->gpio.irq_enable = nv50_gpio_irq_enable; engine->crypt.init = nouveau_stub_init; engine->crypt.takedown = nouveau_stub_takedown; diff --git a/drivers/gpu/drm/nouveau/nv50_display.c b/drivers/gpu/drm/nouveau/nv50_display.c index e5dbd171672c..7cc94ed9ed95 100644 --- a/drivers/gpu/drm/nouveau/nv50_display.c +++ b/drivers/gpu/drm/nouveau/nv50_display.c @@ -804,71 +804,6 @@ nv50_display_error_handler(struct drm_device *dev) } } -void -nv50_display_irq_hotplug_bh(struct work_struct *work) -{ - struct drm_nouveau_private *dev_priv = - container_of(work, struct drm_nouveau_private, hpd_work); - struct drm_device *dev = dev_priv->dev; - struct drm_connector *connector; - const uint32_t gpio_reg[4] = { 0xe104, 0xe108, 0xe280, 0xe284 }; - uint32_t unplug_mask, plug_mask, change_mask; - uint32_t hpd0, hpd1; - - spin_lock_irq(&dev_priv->hpd_state.lock); - hpd0 = dev_priv->hpd_state.hpd0_bits; - dev_priv->hpd_state.hpd0_bits = 0; - hpd1 = dev_priv->hpd_state.hpd1_bits; - dev_priv->hpd_state.hpd1_bits = 0; - spin_unlock_irq(&dev_priv->hpd_state.lock); - - hpd0 &= nv_rd32(dev, 0xe050); - if (dev_priv->chipset >= 0x90) - hpd1 &= nv_rd32(dev, 0xe070); - - plug_mask = (hpd0 & 0x0000ffff) | (hpd1 << 16); - unplug_mask = (hpd0 >> 16) | (hpd1 & 0xffff0000); - change_mask = plug_mask | unplug_mask; - - list_for_each_entry(connector, &dev->mode_config.connector_list, head) { - struct drm_encoder_helper_funcs *helper; - struct nouveau_connector *nv_connector = - nouveau_connector(connector); - struct nouveau_encoder *nv_encoder; - struct dcb_gpio_entry *gpio; - uint32_t reg; - bool plugged; - - if (!nv_connector->dcb) - continue; - - gpio = nouveau_bios_gpio_entry(dev, nv_connector->dcb->gpio_tag); - if (!gpio || !(change_mask & (1 << gpio->line))) - continue; - - reg = nv_rd32(dev, gpio_reg[gpio->line >> 3]); - plugged = !!(reg & (4 << ((gpio->line & 7) << 2))); - NV_INFO(dev, "%splugged %s\n", plugged ? "" : "un", - drm_get_connector_name(connector)) ; - - if (!connector->encoder || !connector->encoder->crtc || - !connector->encoder->crtc->enabled) - continue; - nv_encoder = nouveau_encoder(connector->encoder); - helper = connector->encoder->helper_private; - - if (nv_encoder->dcb->type != OUTPUT_DP) - continue; - - if (plugged) - helper->dpms(connector->encoder, DRM_MODE_DPMS_ON); - else - helper->dpms(connector->encoder, DRM_MODE_DPMS_OFF); - } - - drm_helper_hpd_irq_event(dev); -} - static void nv50_display_isr(struct drm_device *dev) { @@ -918,4 +853,3 @@ nv50_display_isr(struct drm_device *dev) } } } - diff --git a/drivers/gpu/drm/nouveau/nv50_display.h b/drivers/gpu/drm/nouveau/nv50_display.h index a269fccf3555..f0e30b78ef6b 100644 --- a/drivers/gpu/drm/nouveau/nv50_display.h +++ b/drivers/gpu/drm/nouveau/nv50_display.h @@ -36,7 +36,6 @@ #include "nv50_evo.h" void nv50_display_irq_handler_bh(struct work_struct *work); -void nv50_display_irq_hotplug_bh(struct work_struct *work); int nv50_display_early_init(struct drm_device *dev); void nv50_display_late_takedown(struct drm_device *dev); int nv50_display_create(struct drm_device *dev); diff --git a/drivers/gpu/drm/nouveau/nv50_gpio.c b/drivers/gpu/drm/nouveau/nv50_gpio.c index 87266d1b5def..6b149c0cc06d 100644 --- a/drivers/gpu/drm/nouveau/nv50_gpio.c +++ b/drivers/gpu/drm/nouveau/nv50_gpio.c @@ -29,6 +29,24 @@ #include "nv50_display.h" static void nv50_gpio_isr(struct drm_device *dev); +static void nv50_gpio_isr_bh(struct work_struct *work); + +struct nv50_gpio_priv { + struct list_head handlers; + spinlock_t lock; +}; + +struct nv50_gpio_handler { + struct drm_device *dev; + struct list_head head; + struct work_struct work; + bool inhibit; + + struct dcb_gpio_entry *gpio; + + void (*handler)(void *data, int state); + void *data; +}; static int nv50_gpio_location(struct dcb_gpio_entry *gpio, uint32_t *reg, uint32_t *shift) @@ -79,29 +97,123 @@ nv50_gpio_set(struct drm_device *dev, enum dcb_gpio_tag tag, int state) return 0; } +int +nv50_gpio_irq_register(struct drm_device *dev, enum dcb_gpio_tag tag, + void (*handler)(void *, int), void *data) +{ + struct drm_nouveau_private *dev_priv = dev->dev_private; + struct nouveau_gpio_engine *pgpio = &dev_priv->engine.gpio; + struct nv50_gpio_priv *priv = pgpio->priv; + struct nv50_gpio_handler *gpioh; + struct dcb_gpio_entry *gpio; + unsigned long flags; + + gpio = nouveau_bios_gpio_entry(dev, tag); + if (!gpio) + return -ENOENT; + + gpioh = kzalloc(sizeof(*gpioh), GFP_KERNEL); + if (!gpioh) + return -ENOMEM; + + INIT_WORK(&gpioh->work, nv50_gpio_isr_bh); + gpioh->dev = dev; + gpioh->gpio = gpio; + gpioh->handler = handler; + gpioh->data = data; + + spin_lock_irqsave(&priv->lock, flags); + list_add(&gpioh->head, &priv->handlers); + spin_unlock_irqrestore(&priv->lock, flags); + return 0; +} + void -nv50_gpio_irq_enable(struct drm_device *dev, enum dcb_gpio_tag tag, bool on) +nv50_gpio_irq_unregister(struct drm_device *dev, enum dcb_gpio_tag tag, + void (*handler)(void *, int), void *data) { + struct drm_nouveau_private *dev_priv = dev->dev_private; + struct nouveau_gpio_engine *pgpio = &dev_priv->engine.gpio; + struct nv50_gpio_priv *priv = pgpio->priv; + struct nv50_gpio_handler *gpioh, *tmp; struct dcb_gpio_entry *gpio; - u32 reg, mask; + unsigned long flags; gpio = nouveau_bios_gpio_entry(dev, tag); - if (!gpio) { - NV_ERROR(dev, "gpio tag 0x%02x not found\n", tag); + if (!gpio) return; + + spin_lock_irqsave(&priv->lock, flags); + list_for_each_entry_safe(gpioh, tmp, &priv->handlers, head) { + if (gpioh->gpio != gpio || + gpioh->handler != handler || + gpioh->data != data) + continue; + list_del(&gpioh->head); + kfree(gpioh); } + spin_unlock_irqrestore(&priv->lock, flags); +} + +bool +nv50_gpio_irq_enable(struct drm_device *dev, enum dcb_gpio_tag tag, bool on) +{ + struct dcb_gpio_entry *gpio; + u32 reg, mask; + + gpio = nouveau_bios_gpio_entry(dev, tag); + if (!gpio) + return false; reg = gpio->line < 16 ? 0xe050 : 0xe070; mask = 0x00010001 << (gpio->line & 0xf); nv_wr32(dev, reg + 4, mask); - nv_mask(dev, reg + 0, mask, on ? mask : 0); + reg = nv_mask(dev, reg + 0, mask, on ? mask : 0); + return (reg & mask) == mask; +} + +static int +nv50_gpio_create(struct drm_device *dev) +{ + struct drm_nouveau_private *dev_priv = dev->dev_private; + struct nouveau_gpio_engine *pgpio = &dev_priv->engine.gpio; + struct nv50_gpio_priv *priv; + + priv = kzalloc(sizeof(*priv), GFP_KERNEL); + if (!priv) + return -ENOMEM; + + INIT_LIST_HEAD(&priv->handlers); + spin_lock_init(&priv->lock); + pgpio->priv = priv; + return 0; +} + +static void +nv50_gpio_destroy(struct drm_device *dev) +{ + struct drm_nouveau_private *dev_priv = dev->dev_private; + struct nouveau_gpio_engine *pgpio = &dev_priv->engine.gpio; + + kfree(pgpio->priv); + pgpio->priv = NULL; } int nv50_gpio_init(struct drm_device *dev) { struct drm_nouveau_private *dev_priv = dev->dev_private; + struct nouveau_gpio_engine *pgpio = &dev_priv->engine.gpio; + struct nv50_gpio_priv *priv; + int ret; + + if (!pgpio->priv) { + ret = nv50_gpio_create(dev); + if (ret) + return ret; + } + priv = pgpio->priv; /* disable, and ack any pending gpio interrupts */ nv_wr32(dev, 0xe050, 0x00000000); @@ -111,8 +223,6 @@ nv50_gpio_init(struct drm_device *dev) nv_wr32(dev, 0xe074, 0xffffffff); } - INIT_WORK(&dev_priv->hpd_work, nv50_display_irq_hotplug_bh); - spin_lock_init(&dev_priv->hpd_state.lock); nouveau_irq_register(dev, 21, nv50_gpio_isr); return 0; } @@ -126,26 +236,64 @@ nv50_gpio_fini(struct drm_device *dev) if (dev_priv->chipset >= 0x90) nv_wr32(dev, 0xe070, 0x00000000); nouveau_irq_unregister(dev, 21); + + nv50_gpio_destroy(dev); +} + +static void +nv50_gpio_isr_bh(struct work_struct *work) +{ + struct nv50_gpio_handler *gpioh = + container_of(work, struct nv50_gpio_handler, work); + struct drm_nouveau_private *dev_priv = gpioh->dev->dev_private; + struct nouveau_gpio_engine *pgpio = &dev_priv->engine.gpio; + struct nv50_gpio_priv *priv = pgpio->priv; + unsigned long flags; + int state; + + state = pgpio->get(gpioh->dev, gpioh->gpio->tag); + if (state < 0) + return; + + gpioh->handler(gpioh->data, state); + + spin_lock_irqsave(&priv->lock, flags); + gpioh->inhibit = false; + spin_unlock_irqrestore(&priv->lock, flags); } static void nv50_gpio_isr(struct drm_device *dev) { struct drm_nouveau_private *dev_priv = dev->dev_private; - uint32_t hpd0_bits, hpd1_bits = 0; + struct nouveau_gpio_engine *pgpio = &dev_priv->engine.gpio; + struct nv50_gpio_priv *priv = pgpio->priv; + struct nv50_gpio_handler *gpioh; + u32 intr0, intr1 = 0; + u32 hi, lo, ch; - hpd0_bits = nv_rd32(dev, 0xe054); - nv_wr32(dev, 0xe054, hpd0_bits); + intr0 = nv_rd32(dev, 0xe054) & nv_rd32(dev, 0xe050); + if (dev_priv->chipset >= 0x90) + intr1 = nv_rd32(dev, 0xe074) & nv_rd32(dev, 0xe070); - if (dev_priv->chipset >= 0x90) { - hpd1_bits = nv_rd32(dev, 0xe074); - nv_wr32(dev, 0xe074, hpd1_bits); - } + hi = (intr0 & 0x0000ffff) | (intr1 << 16); + lo = (intr0 >> 16) | (intr1 & 0xffff0000); + ch = hi | lo; - spin_lock(&dev_priv->hpd_state.lock); - dev_priv->hpd_state.hpd0_bits |= hpd0_bits; - dev_priv->hpd_state.hpd1_bits |= hpd1_bits; - spin_unlock(&dev_priv->hpd_state.lock); + nv_wr32(dev, 0xe054, intr0); + if (dev_priv->chipset >= 0x90) + nv_wr32(dev, 0xe074, intr1); + + spin_lock(&priv->lock); + list_for_each_entry(gpioh, &priv->handlers, head) { + if (!(ch & (1 << gpioh->gpio->line))) + continue; - queue_work(dev_priv->wq, &dev_priv->hpd_work); + if (gpioh->inhibit) + continue; + gpioh->inhibit = true; + + queue_work(dev_priv->wq, &gpioh->work); + } + spin_unlock(&priv->lock); } -- cgit v1.2.3 From dc1e5c0dbff27c2b5147eaea16c578d2337870c3 Mon Sep 17 00:00:00 2001 From: Ben Skeggs Date: Mon, 25 Oct 2010 15:23:59 +1000 Subject: drm/nouveau: simplify gpuobj suspend/resume Reviewed-by: Francisco Jerez Signed-off-by: Ben Skeggs --- drivers/gpu/drm/nouveau/nouveau_drv.c | 14 ++++---- drivers/gpu/drm/nouveau/nouveau_drv.h | 3 +- drivers/gpu/drm/nouveau/nouveau_object.c | 56 ++++++-------------------------- drivers/gpu/drm/nouveau/nv50_instmem.c | 21 ++---------- 4 files changed, 21 insertions(+), 73 deletions(-) diff --git a/drivers/gpu/drm/nouveau/nouveau_drv.c b/drivers/gpu/drm/nouveau/nouveau_drv.c index 52f9307d7396..7ff5b4369f03 100644 --- a/drivers/gpu/drm/nouveau/nouveau_drv.c +++ b/drivers/gpu/drm/nouveau/nouveau_drv.c @@ -222,17 +222,17 @@ nouveau_pci_suspend(struct pci_dev *pdev, pm_message_t pm_state) pfifo->unload_context(dev); pgraph->unload_context(dev); - NV_INFO(dev, "Suspending GPU objects...\n"); - ret = nouveau_gpuobj_suspend(dev); + ret = pinstmem->suspend(dev); if (ret) { NV_ERROR(dev, "... failed: %d\n", ret); goto out_abort; } - ret = pinstmem->suspend(dev); + NV_INFO(dev, "Suspending GPU objects...\n"); + ret = nouveau_gpuobj_suspend(dev); if (ret) { NV_ERROR(dev, "... failed: %d\n", ret); - nouveau_gpuobj_suspend_cleanup(dev); + pinstmem->resume(dev); goto out_abort; } @@ -297,6 +297,9 @@ nouveau_pci_resume(struct pci_dev *pdev) } } + NV_INFO(dev, "Restoring GPU objects...\n"); + nouveau_gpuobj_resume(dev); + NV_INFO(dev, "Reinitialising engines...\n"); engine->instmem.resume(dev); engine->mc.init(dev); @@ -306,9 +309,6 @@ nouveau_pci_resume(struct pci_dev *pdev) engine->crypt.init(dev); engine->fifo.init(dev); - NV_INFO(dev, "Restoring GPU objects...\n"); - nouveau_gpuobj_resume(dev); - nouveau_irq_postinstall(dev); /* Re-write SKIPS, they'll have been lost over the suspend */ diff --git a/drivers/gpu/drm/nouveau/nouveau_drv.h b/drivers/gpu/drm/nouveau/nouveau_drv.h index 912c9f785222..18a611e1ab80 100644 --- a/drivers/gpu/drm/nouveau/nouveau_drv.h +++ b/drivers/gpu/drm/nouveau/nouveau_drv.h @@ -153,7 +153,7 @@ struct nouveau_gpuobj { struct drm_mm_node *im_pramin; struct nouveau_bo *im_backing; - uint32_t *im_backing_suspend; + u32 *suspend; int im_bound; uint32_t flags; @@ -865,7 +865,6 @@ extern int nouveau_gpuobj_early_init(struct drm_device *); extern int nouveau_gpuobj_init(struct drm_device *); extern void nouveau_gpuobj_takedown(struct drm_device *); extern int nouveau_gpuobj_suspend(struct drm_device *dev); -extern void nouveau_gpuobj_suspend_cleanup(struct drm_device *dev); extern void nouveau_gpuobj_resume(struct drm_device *dev); extern int nouveau_gpuobj_class_new(struct drm_device *, u32 class, u32 eng); extern int nouveau_gpuobj_mthd_new(struct drm_device *, u32 class, u32 mthd, diff --git a/drivers/gpu/drm/nouveau/nouveau_object.c b/drivers/gpu/drm/nouveau/nouveau_object.c index 94429553433c..8c5e35cc04df 100644 --- a/drivers/gpu/drm/nouveau/nouveau_object.c +++ b/drivers/gpu/drm/nouveau/nouveau_object.c @@ -934,54 +934,23 @@ nouveau_gpuobj_suspend(struct drm_device *dev) struct nouveau_gpuobj *gpuobj; int i; - if (dev_priv->card_type < NV_50) { - dev_priv->susres.ramin_copy = vmalloc(dev_priv->ramin_rsvd_vram); - if (!dev_priv->susres.ramin_copy) - return -ENOMEM; - - for (i = 0; i < dev_priv->ramin_rsvd_vram; i += 4) - dev_priv->susres.ramin_copy[i/4] = nv_ri32(dev, i); - return 0; - } - list_for_each_entry(gpuobj, &dev_priv->gpuobj_list, list) { - if (!gpuobj->im_backing) + if (gpuobj->cinst != 0xdeadbeef) continue; - gpuobj->im_backing_suspend = vmalloc(gpuobj->size); - if (!gpuobj->im_backing_suspend) { + gpuobj->suspend = vmalloc(gpuobj->size); + if (!gpuobj->suspend) { nouveau_gpuobj_resume(dev); return -ENOMEM; } for (i = 0; i < gpuobj->size; i += 4) - gpuobj->im_backing_suspend[i/4] = nv_ro32(gpuobj, i); + gpuobj->suspend[i/4] = nv_ro32(gpuobj, i); } return 0; } -void -nouveau_gpuobj_suspend_cleanup(struct drm_device *dev) -{ - struct drm_nouveau_private *dev_priv = dev->dev_private; - struct nouveau_gpuobj *gpuobj; - - if (dev_priv->card_type < NV_50) { - vfree(dev_priv->susres.ramin_copy); - dev_priv->susres.ramin_copy = NULL; - return; - } - - list_for_each_entry(gpuobj, &dev_priv->gpuobj_list, list) { - if (!gpuobj->im_backing_suspend) - continue; - - vfree(gpuobj->im_backing_suspend); - gpuobj->im_backing_suspend = NULL; - } -} - void nouveau_gpuobj_resume(struct drm_device *dev) { @@ -989,23 +958,18 @@ nouveau_gpuobj_resume(struct drm_device *dev) struct nouveau_gpuobj *gpuobj; int i; - if (dev_priv->card_type < NV_50) { - for (i = 0; i < dev_priv->ramin_rsvd_vram; i += 4) - nv_wi32(dev, i, dev_priv->susres.ramin_copy[i/4]); - nouveau_gpuobj_suspend_cleanup(dev); - return; - } - list_for_each_entry(gpuobj, &dev_priv->gpuobj_list, list) { - if (!gpuobj->im_backing_suspend) + if (!gpuobj->suspend) continue; for (i = 0; i < gpuobj->size; i += 4) - nv_wo32(gpuobj, i, gpuobj->im_backing_suspend[i/4]); - dev_priv->engine.instmem.flush(dev); + nv_wo32(gpuobj, i, gpuobj->suspend[i/4]); + + vfree(gpuobj->suspend); + gpuobj->suspend = NULL; } - nouveau_gpuobj_suspend_cleanup(dev); + dev_priv->engine.instmem.flush(dev); } int nouveau_ioctl_grobj_alloc(struct drm_device *dev, void *data, diff --git a/drivers/gpu/drm/nouveau/nv50_instmem.c b/drivers/gpu/drm/nouveau/nv50_instmem.c index 2c98eb176d64..1640c12d8b3a 100644 --- a/drivers/gpu/drm/nouveau/nv50_instmem.c +++ b/drivers/gpu/drm/nouveau/nv50_instmem.c @@ -276,16 +276,8 @@ int nv50_instmem_suspend(struct drm_device *dev) { struct drm_nouveau_private *dev_priv = dev->dev_private; - struct nouveau_channel *chan = dev_priv->channels.ptr[0]; - struct nouveau_gpuobj *ramin = chan->ramin; - int i; - ramin->im_backing_suspend = vmalloc(ramin->size); - if (!ramin->im_backing_suspend) - return -ENOMEM; - - for (i = 0; i < ramin->size; i += 4) - ramin->im_backing_suspend[i/4] = nv_ri32(dev, i); + dev_priv->ramin_available = false; return 0; } @@ -295,17 +287,8 @@ nv50_instmem_resume(struct drm_device *dev) struct drm_nouveau_private *dev_priv = dev->dev_private; struct nv50_instmem_priv *priv = dev_priv->engine.instmem.priv; struct nouveau_channel *chan = dev_priv->channels.ptr[0]; - struct nouveau_gpuobj *ramin = chan->ramin; int i; - dev_priv->ramin_available = false; - dev_priv->ramin_base = ~0; - for (i = 0; i < ramin->size; i += 4) - nv_wo32(ramin, i, ramin->im_backing_suspend[i/4]); - dev_priv->ramin_available = true; - vfree(ramin->im_backing_suspend); - ramin->im_backing_suspend = NULL; - /* Poke the relevant regs, and pray it works :) */ nv_wr32(dev, NV50_PUNK_BAR_CFG_BASE, (chan->ramin->vinst >> 12)); nv_wr32(dev, NV50_PUNK_UNK1710, 0); @@ -318,6 +301,8 @@ nv50_instmem_resume(struct drm_device *dev) for (i = 0; i < 8; i++) nv_wr32(dev, 0x1900 + (i*4), 0); + + dev_priv->ramin_available = true; } int -- cgit v1.2.3 From e41115d0ad5c40a7ea4d85b1c77b4c02185a5581 Mon Sep 17 00:00:00 2001 From: Ben Skeggs Date: Mon, 1 Nov 2010 11:45:02 +1000 Subject: drm/nouveau: rework gpu-specific instmem interfaces Reviewed-by: Francisco Jerez Signed-off-by: Ben Skeggs --- drivers/gpu/drm/nouveau/nouveau_drv.h | 44 +++++------ drivers/gpu/drm/nouveau/nouveau_object.c | 112 +++++++++----------------- drivers/gpu/drm/nouveau/nouveau_state.c | 56 ++++++------- drivers/gpu/drm/nouveau/nv04_instmem.c | 50 ++++++++---- drivers/gpu/drm/nouveau/nv50_instmem.c | 130 +++++++++++++++++-------------- drivers/gpu/drm/nouveau/nvc0_instmem.c | 121 +++++++++++++++++----------- 6 files changed, 270 insertions(+), 243 deletions(-) diff --git a/drivers/gpu/drm/nouveau/nouveau_drv.h b/drivers/gpu/drm/nouveau/nouveau_drv.h index 18a611e1ab80..822cd40b3eb4 100644 --- a/drivers/gpu/drm/nouveau/nouveau_drv.h +++ b/drivers/gpu/drm/nouveau/nouveau_drv.h @@ -146,15 +146,16 @@ enum nouveau_flags { #define NVOBJ_FLAG_ZERO_ALLOC (1 << 1) #define NVOBJ_FLAG_ZERO_FREE (1 << 2) + +#define NVOBJ_CINST_GLOBAL 0xdeadbeef + struct nouveau_gpuobj { struct drm_device *dev; struct kref refcount; struct list_head list; - struct drm_mm_node *im_pramin; - struct nouveau_bo *im_backing; + void *node; u32 *suspend; - int im_bound; uint32_t flags; @@ -288,11 +289,11 @@ struct nouveau_instmem_engine { int (*suspend)(struct drm_device *dev); void (*resume)(struct drm_device *dev); - int (*populate)(struct drm_device *, struct nouveau_gpuobj *, - u32 *size, u32 align); - void (*clear)(struct drm_device *, struct nouveau_gpuobj *); - int (*bind)(struct drm_device *, struct nouveau_gpuobj *); - int (*unbind)(struct drm_device *, struct nouveau_gpuobj *); + int (*get)(struct nouveau_gpuobj *, u32 size, u32 align); + void (*put)(struct nouveau_gpuobj *); + int (*map)(struct nouveau_gpuobj *); + void (*unmap)(struct nouveau_gpuobj *); + void (*flush)(struct drm_device *); }; @@ -1182,11 +1183,10 @@ extern int nv04_instmem_init(struct drm_device *); extern void nv04_instmem_takedown(struct drm_device *); extern int nv04_instmem_suspend(struct drm_device *); extern void nv04_instmem_resume(struct drm_device *); -extern int nv04_instmem_populate(struct drm_device *, struct nouveau_gpuobj *, - u32 *size, u32 align); -extern void nv04_instmem_clear(struct drm_device *, struct nouveau_gpuobj *); -extern int nv04_instmem_bind(struct drm_device *, struct nouveau_gpuobj *); -extern int nv04_instmem_unbind(struct drm_device *, struct nouveau_gpuobj *); +extern int nv04_instmem_get(struct nouveau_gpuobj *, u32 size, u32 align); +extern void nv04_instmem_put(struct nouveau_gpuobj *); +extern int nv04_instmem_map(struct nouveau_gpuobj *); +extern void nv04_instmem_unmap(struct nouveau_gpuobj *); extern void nv04_instmem_flush(struct drm_device *); /* nv50_instmem.c */ @@ -1194,11 +1194,10 @@ extern int nv50_instmem_init(struct drm_device *); extern void nv50_instmem_takedown(struct drm_device *); extern int nv50_instmem_suspend(struct drm_device *); extern void nv50_instmem_resume(struct drm_device *); -extern int nv50_instmem_populate(struct drm_device *, struct nouveau_gpuobj *, - u32 *size, u32 align); -extern void nv50_instmem_clear(struct drm_device *, struct nouveau_gpuobj *); -extern int nv50_instmem_bind(struct drm_device *, struct nouveau_gpuobj *); -extern int nv50_instmem_unbind(struct drm_device *, struct nouveau_gpuobj *); +extern int nv50_instmem_get(struct nouveau_gpuobj *, u32 size, u32 align); +extern void nv50_instmem_put(struct nouveau_gpuobj *); +extern int nv50_instmem_map(struct nouveau_gpuobj *); +extern void nv50_instmem_unmap(struct nouveau_gpuobj *); extern void nv50_instmem_flush(struct drm_device *); extern void nv84_instmem_flush(struct drm_device *); extern void nv50_vm_flush(struct drm_device *, int engine); @@ -1208,11 +1207,10 @@ extern int nvc0_instmem_init(struct drm_device *); extern void nvc0_instmem_takedown(struct drm_device *); extern int nvc0_instmem_suspend(struct drm_device *); extern void nvc0_instmem_resume(struct drm_device *); -extern int nvc0_instmem_populate(struct drm_device *, struct nouveau_gpuobj *, - u32 *size, u32 align); -extern void nvc0_instmem_clear(struct drm_device *, struct nouveau_gpuobj *); -extern int nvc0_instmem_bind(struct drm_device *, struct nouveau_gpuobj *); -extern int nvc0_instmem_unbind(struct drm_device *, struct nouveau_gpuobj *); +extern int nvc0_instmem_get(struct nouveau_gpuobj *, u32 size, u32 align); +extern void nvc0_instmem_put(struct nouveau_gpuobj *); +extern int nvc0_instmem_map(struct nouveau_gpuobj *); +extern void nvc0_instmem_unmap(struct nouveau_gpuobj *); extern void nvc0_instmem_flush(struct drm_device *); /* nv04_mc.c */ diff --git a/drivers/gpu/drm/nouveau/nouveau_object.c b/drivers/gpu/drm/nouveau/nouveau_object.c index 8c5e35cc04df..e8c74de905ec 100644 --- a/drivers/gpu/drm/nouveau/nouveau_object.c +++ b/drivers/gpu/drm/nouveau/nouveau_object.c @@ -168,17 +168,14 @@ nouveau_gpuobj_new(struct drm_device *dev, struct nouveau_channel *chan, struct nouveau_gpuobj **gpuobj_ret) { struct drm_nouveau_private *dev_priv = dev->dev_private; - struct nouveau_engine *engine = &dev_priv->engine; + struct nouveau_instmem_engine *instmem = &dev_priv->engine.instmem; struct nouveau_gpuobj *gpuobj; struct drm_mm_node *ramin = NULL; - int ret; + int ret, i; NV_DEBUG(dev, "ch%d size=%u align=%d flags=0x%08x\n", chan ? chan->id : -1, size, align, flags); - if (!dev_priv || !gpuobj_ret || *gpuobj_ret != NULL) - return -EINVAL; - gpuobj = kzalloc(sizeof(*gpuobj), GFP_KERNEL); if (!gpuobj) return -ENOMEM; @@ -193,88 +190,45 @@ nouveau_gpuobj_new(struct drm_device *dev, struct nouveau_channel *chan, spin_unlock(&dev_priv->ramin_lock); if (chan) { - NV_DEBUG(dev, "channel heap\n"); - ramin = drm_mm_search_free(&chan->ramin_heap, size, align, 0); if (ramin) ramin = drm_mm_get_block(ramin, size, align); - if (!ramin) { nouveau_gpuobj_ref(NULL, &gpuobj); return -ENOMEM; } - } else { - NV_DEBUG(dev, "global heap\n"); - - /* allocate backing pages, sets vinst */ - ret = engine->instmem.populate(dev, gpuobj, &size, align); - if (ret) { - nouveau_gpuobj_ref(NULL, &gpuobj); - return ret; - } - /* try and get aperture space */ - do { - if (drm_mm_pre_get(&dev_priv->ramin_heap)) - return -ENOMEM; - - spin_lock(&dev_priv->ramin_lock); - ramin = drm_mm_search_free(&dev_priv->ramin_heap, size, - align, 0); - if (ramin == NULL) { - spin_unlock(&dev_priv->ramin_lock); - nouveau_gpuobj_ref(NULL, &gpuobj); - return -ENOMEM; - } - - ramin = drm_mm_get_block_atomic(ramin, size, align); - spin_unlock(&dev_priv->ramin_lock); - } while (ramin == NULL); + gpuobj->pinst = chan->ramin->pinst; + if (gpuobj->pinst != ~0) + gpuobj->pinst += ramin->start; - /* on nv50 it's ok to fail, we have a fallback path */ - if (!ramin && dev_priv->card_type < NV_50) { - nouveau_gpuobj_ref(NULL, &gpuobj); - return -ENOMEM; - } - } + if (dev_priv->card_type < NV_50) + gpuobj->cinst = gpuobj->pinst; + else + gpuobj->cinst = ramin->start; - /* if we got a chunk of the aperture, map pages into it */ - gpuobj->im_pramin = ramin; - if (!chan && gpuobj->im_pramin && dev_priv->ramin_available) { - ret = engine->instmem.bind(dev, gpuobj); + gpuobj->vinst = ramin->start + chan->ramin->vinst; + gpuobj->node = ramin; + } else { + ret = instmem->get(gpuobj, size, align); if (ret) { nouveau_gpuobj_ref(NULL, &gpuobj); return ret; } - } - - /* calculate the various different addresses for the object */ - if (chan) { - gpuobj->pinst = chan->ramin->pinst; - if (gpuobj->pinst != ~0) - gpuobj->pinst += gpuobj->im_pramin->start; - if (dev_priv->card_type < NV_50) { - gpuobj->cinst = gpuobj->pinst; - } else { - gpuobj->cinst = gpuobj->im_pramin->start; - gpuobj->vinst = gpuobj->im_pramin->start + - chan->ramin->vinst; - } - } else { - if (gpuobj->im_pramin) - gpuobj->pinst = gpuobj->im_pramin->start; - else + ret = -ENOSYS; + if (dev_priv->ramin_available) + ret = instmem->map(gpuobj); + if (ret) gpuobj->pinst = ~0; - gpuobj->cinst = 0xdeadbeef; + + gpuobj->cinst = NVOBJ_CINST_GLOBAL; } if (gpuobj->flags & NVOBJ_FLAG_ZERO_ALLOC) { - int i; - for (i = 0; i < gpuobj->size; i += 4) nv_wo32(gpuobj, i, 0); - engine->instmem.flush(dev); + instmem->flush(dev); } @@ -326,26 +280,34 @@ nouveau_gpuobj_del(struct kref *ref) container_of(ref, struct nouveau_gpuobj, refcount); struct drm_device *dev = gpuobj->dev; struct drm_nouveau_private *dev_priv = dev->dev_private; - struct nouveau_engine *engine = &dev_priv->engine; + struct nouveau_instmem_engine *instmem = &dev_priv->engine.instmem; int i; NV_DEBUG(dev, "gpuobj %p\n", gpuobj); - if (gpuobj->im_pramin && (gpuobj->flags & NVOBJ_FLAG_ZERO_FREE)) { + if (gpuobj->node && (gpuobj->flags & NVOBJ_FLAG_ZERO_FREE)) { for (i = 0; i < gpuobj->size; i += 4) nv_wo32(gpuobj, i, 0); - engine->instmem.flush(dev); + instmem->flush(dev); } if (gpuobj->dtor) gpuobj->dtor(dev, gpuobj); - if (gpuobj->im_backing) - engine->instmem.clear(dev, gpuobj); + if (gpuobj->cinst == NVOBJ_CINST_GLOBAL) { + if (gpuobj->node) { + instmem->unmap(gpuobj); + instmem->put(gpuobj); + } + } else { + if (gpuobj->node) { + spin_lock(&dev_priv->ramin_lock); + drm_mm_put_block(gpuobj->node); + spin_unlock(&dev_priv->ramin_lock); + } + } spin_lock(&dev_priv->ramin_lock); - if (gpuobj->im_pramin) - drm_mm_put_block(gpuobj->im_pramin); list_del(&gpuobj->list); spin_unlock(&dev_priv->ramin_lock); @@ -385,7 +347,7 @@ nouveau_gpuobj_new_fake(struct drm_device *dev, u32 pinst, u64 vinst, kref_init(&gpuobj->refcount); gpuobj->size = size; gpuobj->pinst = pinst; - gpuobj->cinst = 0xdeadbeef; + gpuobj->cinst = NVOBJ_CINST_GLOBAL; gpuobj->vinst = vinst; if (gpuobj->flags & NVOBJ_FLAG_ZERO_ALLOC) { @@ -935,7 +897,7 @@ nouveau_gpuobj_suspend(struct drm_device *dev) int i; list_for_each_entry(gpuobj, &dev_priv->gpuobj_list, list) { - if (gpuobj->cinst != 0xdeadbeef) + if (gpuobj->cinst != NVOBJ_CINST_GLOBAL) continue; gpuobj->suspend = vmalloc(gpuobj->size); diff --git a/drivers/gpu/drm/nouveau/nouveau_state.c b/drivers/gpu/drm/nouveau/nouveau_state.c index b26b34c419cb..b42e29d1935e 100644 --- a/drivers/gpu/drm/nouveau/nouveau_state.c +++ b/drivers/gpu/drm/nouveau/nouveau_state.c @@ -53,10 +53,10 @@ static int nouveau_init_engine_ptrs(struct drm_device *dev) engine->instmem.takedown = nv04_instmem_takedown; engine->instmem.suspend = nv04_instmem_suspend; engine->instmem.resume = nv04_instmem_resume; - engine->instmem.populate = nv04_instmem_populate; - engine->instmem.clear = nv04_instmem_clear; - engine->instmem.bind = nv04_instmem_bind; - engine->instmem.unbind = nv04_instmem_unbind; + engine->instmem.get = nv04_instmem_get; + engine->instmem.put = nv04_instmem_put; + engine->instmem.map = nv04_instmem_map; + engine->instmem.unmap = nv04_instmem_unmap; engine->instmem.flush = nv04_instmem_flush; engine->mc.init = nv04_mc_init; engine->mc.takedown = nv04_mc_takedown; @@ -106,10 +106,10 @@ static int nouveau_init_engine_ptrs(struct drm_device *dev) engine->instmem.takedown = nv04_instmem_takedown; engine->instmem.suspend = nv04_instmem_suspend; engine->instmem.resume = nv04_instmem_resume; - engine->instmem.populate = nv04_instmem_populate; - engine->instmem.clear = nv04_instmem_clear; - engine->instmem.bind = nv04_instmem_bind; - engine->instmem.unbind = nv04_instmem_unbind; + engine->instmem.get = nv04_instmem_get; + engine->instmem.put = nv04_instmem_put; + engine->instmem.map = nv04_instmem_map; + engine->instmem.unmap = nv04_instmem_unmap; engine->instmem.flush = nv04_instmem_flush; engine->mc.init = nv04_mc_init; engine->mc.takedown = nv04_mc_takedown; @@ -163,10 +163,10 @@ static int nouveau_init_engine_ptrs(struct drm_device *dev) engine->instmem.takedown = nv04_instmem_takedown; engine->instmem.suspend = nv04_instmem_suspend; engine->instmem.resume = nv04_instmem_resume; - engine->instmem.populate = nv04_instmem_populate; - engine->instmem.clear = nv04_instmem_clear; - engine->instmem.bind = nv04_instmem_bind; - engine->instmem.unbind = nv04_instmem_unbind; + engine->instmem.get = nv04_instmem_get; + engine->instmem.put = nv04_instmem_put; + engine->instmem.map = nv04_instmem_map; + engine->instmem.unmap = nv04_instmem_unmap; engine->instmem.flush = nv04_instmem_flush; engine->mc.init = nv04_mc_init; engine->mc.takedown = nv04_mc_takedown; @@ -220,10 +220,10 @@ static int nouveau_init_engine_ptrs(struct drm_device *dev) engine->instmem.takedown = nv04_instmem_takedown; engine->instmem.suspend = nv04_instmem_suspend; engine->instmem.resume = nv04_instmem_resume; - engine->instmem.populate = nv04_instmem_populate; - engine->instmem.clear = nv04_instmem_clear; - engine->instmem.bind = nv04_instmem_bind; - engine->instmem.unbind = nv04_instmem_unbind; + engine->instmem.get = nv04_instmem_get; + engine->instmem.put = nv04_instmem_put; + engine->instmem.map = nv04_instmem_map; + engine->instmem.unmap = nv04_instmem_unmap; engine->instmem.flush = nv04_instmem_flush; engine->mc.init = nv04_mc_init; engine->mc.takedown = nv04_mc_takedown; @@ -280,10 +280,10 @@ static int nouveau_init_engine_ptrs(struct drm_device *dev) engine->instmem.takedown = nv04_instmem_takedown; engine->instmem.suspend = nv04_instmem_suspend; engine->instmem.resume = nv04_instmem_resume; - engine->instmem.populate = nv04_instmem_populate; - engine->instmem.clear = nv04_instmem_clear; - engine->instmem.bind = nv04_instmem_bind; - engine->instmem.unbind = nv04_instmem_unbind; + engine->instmem.get = nv04_instmem_get; + engine->instmem.put = nv04_instmem_put; + engine->instmem.map = nv04_instmem_map; + engine->instmem.unmap = nv04_instmem_unmap; engine->instmem.flush = nv04_instmem_flush; engine->mc.init = nv40_mc_init; engine->mc.takedown = nv40_mc_takedown; @@ -343,10 +343,10 @@ static int nouveau_init_engine_ptrs(struct drm_device *dev) engine->instmem.takedown = nv50_instmem_takedown; engine->instmem.suspend = nv50_instmem_suspend; engine->instmem.resume = nv50_instmem_resume; - engine->instmem.populate = nv50_instmem_populate; - engine->instmem.clear = nv50_instmem_clear; - engine->instmem.bind = nv50_instmem_bind; - engine->instmem.unbind = nv50_instmem_unbind; + engine->instmem.get = nv50_instmem_get; + engine->instmem.put = nv50_instmem_put; + engine->instmem.map = nv50_instmem_map; + engine->instmem.unmap = nv50_instmem_unmap; if (dev_priv->chipset == 0x50) engine->instmem.flush = nv50_instmem_flush; else @@ -449,10 +449,10 @@ static int nouveau_init_engine_ptrs(struct drm_device *dev) engine->instmem.takedown = nvc0_instmem_takedown; engine->instmem.suspend = nvc0_instmem_suspend; engine->instmem.resume = nvc0_instmem_resume; - engine->instmem.populate = nvc0_instmem_populate; - engine->instmem.clear = nvc0_instmem_clear; - engine->instmem.bind = nvc0_instmem_bind; - engine->instmem.unbind = nvc0_instmem_unbind; + engine->instmem.get = nvc0_instmem_get; + engine->instmem.put = nvc0_instmem_put; + engine->instmem.map = nvc0_instmem_map; + engine->instmem.unmap = nvc0_instmem_unmap; engine->instmem.flush = nvc0_instmem_flush; engine->mc.init = nv50_mc_init; engine->mc.takedown = nv50_mc_takedown; diff --git a/drivers/gpu/drm/nouveau/nv04_instmem.c b/drivers/gpu/drm/nouveau/nv04_instmem.c index 554e55d0ec48..b8e3edb5c063 100644 --- a/drivers/gpu/drm/nouveau/nv04_instmem.c +++ b/drivers/gpu/drm/nouveau/nv04_instmem.c @@ -98,42 +98,66 @@ nv04_instmem_takedown(struct drm_device *dev) } int -nv04_instmem_populate(struct drm_device *dev, struct nouveau_gpuobj *gpuobj, - u32 *size, u32 align) +nv04_instmem_suspend(struct drm_device *dev) { return 0; } void -nv04_instmem_clear(struct drm_device *dev, struct nouveau_gpuobj *gpuobj) -{ -} - -int -nv04_instmem_bind(struct drm_device *dev, struct nouveau_gpuobj *gpuobj) +nv04_instmem_resume(struct drm_device *dev) { - return 0; } int -nv04_instmem_unbind(struct drm_device *dev, struct nouveau_gpuobj *gpuobj) +nv04_instmem_get(struct nouveau_gpuobj *gpuobj, u32 size, u32 align) { + struct drm_nouveau_private *dev_priv = gpuobj->dev->dev_private; + struct drm_mm_node *ramin = NULL; + + do { + if (drm_mm_pre_get(&dev_priv->ramin_heap)) + return -ENOMEM; + + spin_lock(&dev_priv->ramin_lock); + ramin = drm_mm_search_free(&dev_priv->ramin_heap, size, align, 0); + if (ramin == NULL) { + spin_unlock(&dev_priv->ramin_lock); + return -ENOMEM; + } + + ramin = drm_mm_get_block_atomic(ramin, size, align); + spin_unlock(&dev_priv->ramin_lock); + } while (ramin == NULL); + + gpuobj->node = ramin; + gpuobj->vinst = ramin->start; return 0; } void -nv04_instmem_flush(struct drm_device *dev) +nv04_instmem_put(struct nouveau_gpuobj *gpuobj) { + struct drm_nouveau_private *dev_priv = gpuobj->dev->dev_private; + + spin_lock(&dev_priv->ramin_lock); + drm_mm_put_block(gpuobj->node); + gpuobj->node = NULL; + spin_unlock(&dev_priv->ramin_lock); } int -nv04_instmem_suspend(struct drm_device *dev) +nv04_instmem_map(struct nouveau_gpuobj *gpuobj) { + gpuobj->pinst = gpuobj->vinst; return 0; } void -nv04_instmem_resume(struct drm_device *dev) +nv04_instmem_unmap(struct nouveau_gpuobj *gpuobj) { } +void +nv04_instmem_flush(struct drm_device *dev) +{ +} diff --git a/drivers/gpu/drm/nouveau/nv50_instmem.c b/drivers/gpu/drm/nouveau/nv50_instmem.c index 1640c12d8b3a..87160952a30b 100644 --- a/drivers/gpu/drm/nouveau/nv50_instmem.c +++ b/drivers/gpu/drm/nouveau/nv50_instmem.c @@ -157,10 +157,7 @@ nv50_instmem_init(struct drm_device *dev) nv_wo32(priv->pramin_bar, 0x10, 0x00000000); nv_wo32(priv->pramin_bar, 0x14, 0x00000000); - /* map channel into PRAMIN, gpuobj didn't do it for us */ - ret = nv50_instmem_bind(dev, chan->ramin); - if (ret) - return ret; + nv50_instmem_map(chan->ramin); /* poke regs... */ nv_wr32(dev, 0x001704, 0x00000000 | (chan->ramin->vinst >> 12)); @@ -305,72 +302,91 @@ nv50_instmem_resume(struct drm_device *dev) dev_priv->ramin_available = true; } +struct nv50_gpuobj_node { + struct nouveau_bo *vram; + struct drm_mm_node *ramin; + u32 align; +}; + + int -nv50_instmem_populate(struct drm_device *dev, struct nouveau_gpuobj *gpuobj, - u32 *size, u32 align) +nv50_instmem_get(struct nouveau_gpuobj *gpuobj, u32 size, u32 align) { + struct drm_device *dev = gpuobj->dev; + struct nv50_gpuobj_node *node = NULL; int ret; - if (gpuobj->im_backing) - return -EINVAL; - - *size = ALIGN(*size, 4096); - if (*size == 0) - return -EINVAL; + node = kzalloc(sizeof(*node), GFP_KERNEL); + if (!node) + return -ENOMEM; + node->align = align; - ret = nouveau_bo_new(dev, NULL, *size, align, TTM_PL_FLAG_VRAM, - 0, 0x0000, true, false, &gpuobj->im_backing); + ret = nouveau_bo_new(dev, NULL, size, align, TTM_PL_FLAG_VRAM, + 0, 0x0000, true, false, &node->vram); if (ret) { NV_ERROR(dev, "error getting PRAMIN backing pages: %d\n", ret); return ret; } - ret = nouveau_bo_pin(gpuobj->im_backing, TTM_PL_FLAG_VRAM); + ret = nouveau_bo_pin(node->vram, TTM_PL_FLAG_VRAM); if (ret) { NV_ERROR(dev, "error pinning PRAMIN backing VRAM: %d\n", ret); - nouveau_bo_ref(NULL, &gpuobj->im_backing); + nouveau_bo_ref(NULL, &node->vram); return ret; } - gpuobj->vinst = gpuobj->im_backing->bo.mem.start << PAGE_SHIFT; + gpuobj->vinst = node->vram->bo.mem.start << PAGE_SHIFT; + gpuobj->size = node->vram->bo.mem.num_pages << PAGE_SHIFT; + gpuobj->node = node; return 0; } void -nv50_instmem_clear(struct drm_device *dev, struct nouveau_gpuobj *gpuobj) +nv50_instmem_put(struct nouveau_gpuobj *gpuobj) { - struct drm_nouveau_private *dev_priv = dev->dev_private; + struct nv50_gpuobj_node *node; - if (gpuobj && gpuobj->im_backing) { - if (gpuobj->im_bound) - dev_priv->engine.instmem.unbind(dev, gpuobj); - nouveau_bo_unpin(gpuobj->im_backing); - nouveau_bo_ref(NULL, &gpuobj->im_backing); - gpuobj->im_backing = NULL; - } + node = gpuobj->node; + gpuobj->node = NULL; + + nouveau_bo_unpin(node->vram); + nouveau_bo_ref(NULL, &node->vram); + kfree(node); } int -nv50_instmem_bind(struct drm_device *dev, struct nouveau_gpuobj *gpuobj) +nv50_instmem_map(struct nouveau_gpuobj *gpuobj) { - struct drm_nouveau_private *dev_priv = dev->dev_private; + struct drm_nouveau_private *dev_priv = gpuobj->dev->dev_private; struct nv50_instmem_priv *priv = dev_priv->engine.instmem.priv; - struct nouveau_gpuobj *pramin_pt = priv->pramin_pt; - uint32_t pte, pte_end; - uint64_t vram; - - if (!gpuobj->im_backing || !gpuobj->im_pramin || gpuobj->im_bound) - return -EINVAL; + struct nv50_gpuobj_node *node = gpuobj->node; + struct drm_device *dev = gpuobj->dev; + struct drm_mm_node *ramin = NULL; + u32 pte, pte_end; + u64 vram; + + do { + if (drm_mm_pre_get(&dev_priv->ramin_heap)) + return -ENOMEM; + + spin_lock(&dev_priv->ramin_lock); + ramin = drm_mm_search_free(&dev_priv->ramin_heap, gpuobj->size, + node->align, 0); + if (ramin == NULL) { + spin_unlock(&dev_priv->ramin_lock); + return -ENOMEM; + } - NV_DEBUG(dev, "st=0x%lx sz=0x%lx\n", - gpuobj->im_pramin->start, gpuobj->im_pramin->size); + ramin = drm_mm_get_block_atomic(ramin, gpuobj->size, node->align); + spin_unlock(&dev_priv->ramin_lock); + } while (ramin == NULL); - pte = (gpuobj->im_pramin->start >> 12) << 1; - pte_end = ((gpuobj->im_pramin->size >> 12) << 1) + pte; + pte = (ramin->start >> 12) << 1; + pte_end = ((ramin->size >> 12) << 1) + pte; vram = gpuobj->vinst; NV_DEBUG(dev, "pramin=0x%lx, pte=%d, pte_end=%d\n", - gpuobj->im_pramin->start, pte, pte_end); + ramin->start, pte, pte_end); NV_DEBUG(dev, "first vram page: 0x%010llx\n", gpuobj->vinst); vram |= 1; @@ -380,8 +396,8 @@ nv50_instmem_bind(struct drm_device *dev, struct nouveau_gpuobj *gpuobj) } while (pte < pte_end) { - nv_wo32(pramin_pt, (pte * 4) + 0, lower_32_bits(vram)); - nv_wo32(pramin_pt, (pte * 4) + 4, upper_32_bits(vram)); + nv_wo32(priv->pramin_pt, (pte * 4) + 0, lower_32_bits(vram)); + nv_wo32(priv->pramin_pt, (pte * 4) + 4, upper_32_bits(vram)); vram += 0x1000; pte += 2; } @@ -389,36 +405,36 @@ nv50_instmem_bind(struct drm_device *dev, struct nouveau_gpuobj *gpuobj) nv50_vm_flush(dev, 6); - gpuobj->im_bound = 1; + node->ramin = ramin; + gpuobj->pinst = ramin->start; return 0; } -int -nv50_instmem_unbind(struct drm_device *dev, struct nouveau_gpuobj *gpuobj) +void +nv50_instmem_unmap(struct nouveau_gpuobj *gpuobj) { - struct drm_nouveau_private *dev_priv = dev->dev_private; + struct drm_nouveau_private *dev_priv = gpuobj->dev->dev_private; struct nv50_instmem_priv *priv = dev_priv->engine.instmem.priv; - uint32_t pte, pte_end; - - if (gpuobj->im_bound == 0) - return -EINVAL; + struct nv50_gpuobj_node *node = gpuobj->node; + u32 pte, pte_end; - /* can happen during late takedown */ - if (unlikely(!dev_priv->ramin_available)) - return 0; + if (!node->ramin || !dev_priv->ramin_available) + return; - pte = (gpuobj->im_pramin->start >> 12) << 1; - pte_end = ((gpuobj->im_pramin->size >> 12) << 1) + pte; + pte = (node->ramin->start >> 12) << 1; + pte_end = ((node->ramin->size >> 12) << 1) + pte; while (pte < pte_end) { nv_wo32(priv->pramin_pt, (pte * 4) + 0, 0x00000000); nv_wo32(priv->pramin_pt, (pte * 4) + 4, 0x00000000); pte += 2; } - dev_priv->engine.instmem.flush(dev); + dev_priv->engine.instmem.flush(gpuobj->dev); - gpuobj->im_bound = 0; - return 0; + spin_lock(&dev_priv->ramin_lock); + drm_mm_put_block(node->ramin); + node->ramin = NULL; + spin_unlock(&dev_priv->ramin_lock); } void diff --git a/drivers/gpu/drm/nouveau/nvc0_instmem.c b/drivers/gpu/drm/nouveau/nvc0_instmem.c index 7b4e71f5c274..39232085193d 100644 --- a/drivers/gpu/drm/nouveau/nvc0_instmem.c +++ b/drivers/gpu/drm/nouveau/nvc0_instmem.c @@ -26,67 +26,89 @@ #include "nouveau_drv.h" +struct nvc0_gpuobj_node { + struct nouveau_bo *vram; + struct drm_mm_node *ramin; + u32 align; +}; + int -nvc0_instmem_populate(struct drm_device *dev, struct nouveau_gpuobj *gpuobj, - u32 *size, u32 align) +nvc0_instmem_get(struct nouveau_gpuobj *gpuobj, u32 size, u32 align) { + struct drm_device *dev = gpuobj->dev; + struct nvc0_gpuobj_node *node = NULL; int ret; - *size = ALIGN(*size, 4096); - if (*size == 0) - return -EINVAL; + node = kzalloc(sizeof(*node), GFP_KERNEL); + if (!node) + return -ENOMEM; + node->align = align; - ret = nouveau_bo_new(dev, NULL, *size, align, TTM_PL_FLAG_VRAM, - 0, 0x0000, true, false, &gpuobj->im_backing); + ret = nouveau_bo_new(dev, NULL, size, align, TTM_PL_FLAG_VRAM, + 0, 0x0000, true, false, &node->vram); if (ret) { NV_ERROR(dev, "error getting PRAMIN backing pages: %d\n", ret); return ret; } - ret = nouveau_bo_pin(gpuobj->im_backing, TTM_PL_FLAG_VRAM); + ret = nouveau_bo_pin(node->vram, TTM_PL_FLAG_VRAM); if (ret) { NV_ERROR(dev, "error pinning PRAMIN backing VRAM: %d\n", ret); - nouveau_bo_ref(NULL, &gpuobj->im_backing); + nouveau_bo_ref(NULL, &node->vram); return ret; } - gpuobj->vinst = gpuobj->im_backing->bo.mem.start << PAGE_SHIFT; + gpuobj->vinst = node->vram->bo.mem.start << PAGE_SHIFT; + gpuobj->size = node->vram->bo.mem.num_pages << PAGE_SHIFT; + gpuobj->node = node; return 0; } void -nvc0_instmem_clear(struct drm_device *dev, struct nouveau_gpuobj *gpuobj) +nvc0_instmem_put(struct nouveau_gpuobj *gpuobj) { - struct drm_nouveau_private *dev_priv = dev->dev_private; + struct nvc0_gpuobj_node *node; - if (gpuobj && gpuobj->im_backing) { - if (gpuobj->im_bound) - dev_priv->engine.instmem.unbind(dev, gpuobj); - nouveau_bo_unpin(gpuobj->im_backing); - nouveau_bo_ref(NULL, &gpuobj->im_backing); - gpuobj->im_backing = NULL; - } + node = gpuobj->node; + gpuobj->node = NULL; + + nouveau_bo_unpin(node->vram); + nouveau_bo_ref(NULL, &node->vram); + kfree(node); } int -nvc0_instmem_bind(struct drm_device *dev, struct nouveau_gpuobj *gpuobj) +nvc0_instmem_map(struct nouveau_gpuobj *gpuobj) { - struct drm_nouveau_private *dev_priv = dev->dev_private; - uint32_t pte, pte_end; - uint64_t vram; - - if (!gpuobj->im_backing || !gpuobj->im_pramin || gpuobj->im_bound) - return -EINVAL; - - NV_DEBUG(dev, "st=0x%lx sz=0x%lx\n", - gpuobj->im_pramin->start, gpuobj->im_pramin->size); - - pte = gpuobj->im_pramin->start >> 12; - pte_end = (gpuobj->im_pramin->size >> 12) + pte; + struct drm_nouveau_private *dev_priv = gpuobj->dev->dev_private; + struct nvc0_gpuobj_node *node = gpuobj->node; + struct drm_device *dev = gpuobj->dev; + struct drm_mm_node *ramin = NULL; + u32 pte, pte_end; + u64 vram; + + do { + if (drm_mm_pre_get(&dev_priv->ramin_heap)) + return -ENOMEM; + + spin_lock(&dev_priv->ramin_lock); + ramin = drm_mm_search_free(&dev_priv->ramin_heap, gpuobj->size, + node->align, 0); + if (ramin == NULL) { + spin_unlock(&dev_priv->ramin_lock); + return -ENOMEM; + } + + ramin = drm_mm_get_block_atomic(ramin, gpuobj->size, node->align); + spin_unlock(&dev_priv->ramin_lock); + } while (ramin == NULL); + + pte = (ramin->start >> 12) << 1; + pte_end = ((ramin->size >> 12) << 1) + pte; vram = gpuobj->vinst; NV_DEBUG(dev, "pramin=0x%lx, pte=%d, pte_end=%d\n", - gpuobj->im_pramin->start, pte, pte_end); + ramin->start, pte, pte_end); NV_DEBUG(dev, "first vram page: 0x%010llx\n", gpuobj->vinst); while (pte < pte_end) { @@ -103,30 +125,35 @@ nvc0_instmem_bind(struct drm_device *dev, struct nouveau_gpuobj *gpuobj) nv_wr32(dev, 0x100cbc, 0x80000005); } - gpuobj->im_bound = 1; + node->ramin = ramin; + gpuobj->pinst = ramin->start; return 0; } -int -nvc0_instmem_unbind(struct drm_device *dev, struct nouveau_gpuobj *gpuobj) +void +nvc0_instmem_unmap(struct nouveau_gpuobj *gpuobj) { - struct drm_nouveau_private *dev_priv = dev->dev_private; - uint32_t pte, pte_end; + struct drm_nouveau_private *dev_priv = gpuobj->dev->dev_private; + struct nvc0_gpuobj_node *node = gpuobj->node; + u32 pte, pte_end; - if (gpuobj->im_bound == 0) - return -EINVAL; + if (!node->ramin || !dev_priv->ramin_available) + return; + + pte = (node->ramin->start >> 12) << 1; + pte_end = ((node->ramin->size >> 12) << 1) + pte; - pte = gpuobj->im_pramin->start >> 12; - pte_end = (gpuobj->im_pramin->size >> 12) + pte; while (pte < pte_end) { - nv_wr32(dev, 0x702000 + (pte * 8), 0); - nv_wr32(dev, 0x702004 + (pte * 8), 0); + nv_wr32(gpuobj->dev, 0x702000 + (pte * 8), 0); + nv_wr32(gpuobj->dev, 0x702004 + (pte * 8), 0); pte++; } - dev_priv->engine.instmem.flush(dev); + dev_priv->engine.instmem.flush(gpuobj->dev); - gpuobj->im_bound = 0; - return 0; + spin_lock(&dev_priv->ramin_lock); + drm_mm_put_block(node->ramin); + node->ramin = NULL; + spin_unlock(&dev_priv->ramin_lock); } void -- cgit v1.2.3 From 20f63afe988a25b0a4d991e87b41f76ee14e2a84 Mon Sep 17 00:00:00 2001 From: Ben Skeggs Date: Mon, 15 Nov 2010 12:50:50 +1000 Subject: drm/nv50: allocate page for unknown PFB object in nv50_fb.c Signed-off-by: Ben Skeggs --- drivers/gpu/drm/nouveau/nouveau_drv.h | 1 + drivers/gpu/drm/nouveau/nv50_fb.c | 56 ++++++++++++++++++++++++++++++++++- 2 files changed, 56 insertions(+), 1 deletion(-) diff --git a/drivers/gpu/drm/nouveau/nouveau_drv.h b/drivers/gpu/drm/nouveau/nouveau_drv.h index 822cd40b3eb4..e1619c674380 100644 --- a/drivers/gpu/drm/nouveau/nouveau_drv.h +++ b/drivers/gpu/drm/nouveau/nouveau_drv.h @@ -311,6 +311,7 @@ struct nouveau_timer_engine { struct nouveau_fb_engine { int num_tiles; struct drm_mm tag_heap; + void *priv; int (*init)(struct drm_device *dev); void (*takedown)(struct drm_device *dev); diff --git a/drivers/gpu/drm/nouveau/nv50_fb.c b/drivers/gpu/drm/nouveau/nv50_fb.c index d745c952986a..aa6d78366078 100644 --- a/drivers/gpu/drm/nouveau/nv50_fb.c +++ b/drivers/gpu/drm/nouveau/nv50_fb.c @@ -3,16 +3,58 @@ #include "nouveau_drv.h" #include "nouveau_drm.h" +struct nv50_fb_priv { + struct page *r100c08_page; + dma_addr_t r100c08; +}; + +static int +nv50_fb_create(struct drm_device *dev) +{ + struct drm_nouveau_private *dev_priv = dev->dev_private; + struct nv50_fb_priv *priv; + + priv = kzalloc(sizeof(*priv), GFP_KERNEL); + if (!priv) + return -ENOMEM; + + priv->r100c08_page = alloc_page(GFP_KERNEL | __GFP_ZERO); + if (!priv->r100c08_page) { + kfree(priv); + return -ENOMEM; + } + + priv->r100c08 = pci_map_page(dev->pdev, priv->r100c08_page, 0, + PAGE_SIZE, PCI_DMA_BIDIRECTIONAL); + if (pci_dma_mapping_error(dev->pdev, priv->r100c08)) { + __free_page(priv->r100c08_page); + kfree(priv); + return -EFAULT; + } + + dev_priv->engine.fb.priv = priv; + return 0; +} + int nv50_fb_init(struct drm_device *dev) { struct drm_nouveau_private *dev_priv = dev->dev_private; + struct nv50_fb_priv *priv; + int ret; + + if (!dev_priv->engine.fb.priv) { + ret = nv50_fb_create(dev); + if (ret) + return ret; + } + priv = dev_priv->engine.fb.priv; /* Not a clue what this is exactly. Without pointing it at a * scratch page, VRAM->GART blits with M2MF (as in DDX DFS) * cause IOMMU "read from address 0" errors (rh#561267) */ - nv_wr32(dev, 0x100c08, dev_priv->gart_info.sg_dummy_bus >> 8); + nv_wr32(dev, 0x100c08, priv->r100c08 >> 8); /* This is needed to get meaningful information from 100c90 * on traps. No idea what these values mean exactly. */ @@ -36,6 +78,18 @@ nv50_fb_init(struct drm_device *dev) void nv50_fb_takedown(struct drm_device *dev) { + struct drm_nouveau_private *dev_priv = dev->dev_private; + struct nv50_fb_priv *priv; + + priv = dev_priv->engine.fb.priv; + if (!priv) + return; + dev_priv->engine.fb.priv = NULL; + + pci_unmap_page(dev->pdev, priv->r100c08, PAGE_SIZE, + PCI_DMA_BIDIRECTIONAL); + __free_page(priv->r100c08_page); + kfree(priv); } void -- cgit v1.2.3 From 9fea1bcbff037cb61f71e0d699180030b1f509a5 Mon Sep 17 00:00:00 2001 From: Ben Skeggs Date: Mon, 15 Nov 2010 12:52:23 +1000 Subject: drm/nv50: fix 0x100c90 init for NVAF Signed-off-by: Ben Skeggs --- drivers/gpu/drm/nouveau/nv50_fb.c | 9 ++++++--- 1 file changed, 6 insertions(+), 3 deletions(-) diff --git a/drivers/gpu/drm/nouveau/nv50_fb.c b/drivers/gpu/drm/nouveau/nv50_fb.c index aa6d78366078..50290dea0ac4 100644 --- a/drivers/gpu/drm/nouveau/nv50_fb.c +++ b/drivers/gpu/drm/nouveau/nv50_fb.c @@ -60,15 +60,18 @@ nv50_fb_init(struct drm_device *dev) * on traps. No idea what these values mean exactly. */ switch (dev_priv->chipset) { case 0x50: - nv_wr32(dev, 0x100c90, 0x0707ff); + nv_wr32(dev, 0x100c90, 0x000707ff); break; case 0xa3: case 0xa5: case 0xa8: - nv_wr32(dev, 0x100c90, 0x0d0fff); + nv_wr32(dev, 0x100c90, 0x000d0fff); + break; + case 0xaf: + nv_wr32(dev, 0x100c90, 0x089d1fff); break; default: - nv_wr32(dev, 0x100c90, 0x1d07ff); + nv_wr32(dev, 0x100c90, 0x001d07ff); break; } -- cgit v1.2.3 From 7b4808bb6ee63c9cc9c9be5a52f0c7babfc50659 Mon Sep 17 00:00:00 2001 From: Ben Skeggs Date: Mon, 15 Nov 2010 12:54:57 +1000 Subject: drm/nouveau: remove dummy page use from PCI(E)GART, use PTE present instead Signed-off-by: Ben Skeggs --- drivers/gpu/drm/nouveau/nouveau_drv.h | 2 -- drivers/gpu/drm/nouveau/nouveau_sgdma.c | 38 +++------------------------------ 2 files changed, 3 insertions(+), 37 deletions(-) diff --git a/drivers/gpu/drm/nouveau/nouveau_drv.h b/drivers/gpu/drm/nouveau/nouveau_drv.h index e1619c674380..d76d2c09049d 100644 --- a/drivers/gpu/drm/nouveau/nouveau_drv.h +++ b/drivers/gpu/drm/nouveau/nouveau_drv.h @@ -672,8 +672,6 @@ struct drm_nouveau_private { uint64_t aper_free; struct nouveau_gpuobj *sg_ctxdma; - struct page *sg_dummy_page; - dma_addr_t sg_dummy_bus; } gart_info; /* nv10-nv40 tiling regions */ diff --git a/drivers/gpu/drm/nouveau/nouveau_sgdma.c b/drivers/gpu/drm/nouveau/nouveau_sgdma.c index d4ac97007038..54af7608d45c 100644 --- a/drivers/gpu/drm/nouveau/nouveau_sgdma.c +++ b/drivers/gpu/drm/nouveau/nouveau_sgdma.c @@ -144,19 +144,15 @@ nouveau_sgdma_unbind(struct ttm_backend *be) pte = nvbe->pte_start; for (i = 0; i < nvbe->nr_pages; i++) { - dma_addr_t dma_offset = dev_priv->gart_info.sg_dummy_bus; - for (j = 0; j < PAGE_SIZE / NV_CTXDMA_PAGE_SIZE; j++) { if (dev_priv->card_type < NV_50) { - nv_wo32(gpuobj, (pte * 4) + 0, dma_offset | 3); + nv_wo32(gpuobj, (pte * 4) + 0, 0x00000000); pte += 1; } else { nv_wo32(gpuobj, (pte * 4) + 0, 0x00000000); nv_wo32(gpuobj, (pte * 4) + 4, 0x00000000); pte += 2; } - - dma_offset += NV_CTXDMA_PAGE_SIZE; } } dev_priv->engine.instmem.flush(nvbe->dev); @@ -218,7 +214,6 @@ int nouveau_sgdma_init(struct drm_device *dev) { struct drm_nouveau_private *dev_priv = dev->dev_private; - struct pci_dev *pdev = dev->pdev; struct nouveau_gpuobj *gpuobj = NULL; uint32_t aper_size, obj_size; int i, ret; @@ -245,22 +240,6 @@ nouveau_sgdma_init(struct drm_device *dev) return ret; } - dev_priv->gart_info.sg_dummy_page = - alloc_page(GFP_KERNEL|__GFP_DMA32|__GFP_ZERO); - if (!dev_priv->gart_info.sg_dummy_page) { - nouveau_gpuobj_ref(NULL, &gpuobj); - return -ENOMEM; - } - - set_bit(PG_locked, &dev_priv->gart_info.sg_dummy_page->flags); - dev_priv->gart_info.sg_dummy_bus = - pci_map_page(pdev, dev_priv->gart_info.sg_dummy_page, 0, - PAGE_SIZE, PCI_DMA_BIDIRECTIONAL); - if (pci_dma_mapping_error(pdev, dev_priv->gart_info.sg_dummy_bus)) { - nouveau_gpuobj_ref(NULL, &gpuobj); - return -EFAULT; - } - if (dev_priv->card_type < NV_50) { /* special case, allocated from global instmem heap so * cinst is invalid, we use it on all channels though so @@ -277,10 +256,8 @@ nouveau_sgdma_init(struct drm_device *dev) (NV_DMA_ACCESS_RW << 14) | (NV_DMA_TARGET_PCI << 16)); nv_wo32(gpuobj, 4, aper_size - 1); - for (i = 2; i < 2 + (aper_size >> 12); i++) { - nv_wo32(gpuobj, i * 4, - dev_priv->gart_info.sg_dummy_bus | 3); - } + for (i = 2; i < 2 + (aper_size >> 12); i++) + nv_wo32(gpuobj, i * 4, 0x00000000); } else { for (i = 0; i < obj_size; i += 8) { nv_wo32(gpuobj, i + 0, 0x00000000); @@ -301,15 +278,6 @@ nouveau_sgdma_takedown(struct drm_device *dev) { struct drm_nouveau_private *dev_priv = dev->dev_private; - if (dev_priv->gart_info.sg_dummy_page) { - pci_unmap_page(dev->pdev, dev_priv->gart_info.sg_dummy_bus, - NV_CTXDMA_PAGE_SIZE, PCI_DMA_BIDIRECTIONAL); - unlock_page(dev_priv->gart_info.sg_dummy_page); - __free_page(dev_priv->gart_info.sg_dummy_page); - dev_priv->gart_info.sg_dummy_page = NULL; - dev_priv->gart_info.sg_dummy_bus = 0; - } - nouveau_gpuobj_ref(NULL, &dev_priv->gart_info.sg_ctxdma); } -- cgit v1.2.3 From 2cb3d3b6c64d37514fd8865748de66ff35a489af Mon Sep 17 00:00:00 2001 From: Ben Skeggs Date: Mon, 15 Nov 2010 16:28:19 +1000 Subject: drm/nv84: fix minor issues in PCRYPT implementation Fix running of destroy_context() when create_context() has never been called for the channel, and fill in engine's tlb_flush() function pointer. Signed-off-by: Ben Skeggs --- drivers/gpu/drm/nouveau/nouveau_state.c | 1 + drivers/gpu/drm/nouveau/nv84_crypt.c | 2 +- 2 files changed, 2 insertions(+), 1 deletion(-) diff --git a/drivers/gpu/drm/nouveau/nouveau_state.c b/drivers/gpu/drm/nouveau/nouveau_state.c index b42e29d1935e..75e70022fbd3 100644 --- a/drivers/gpu/drm/nouveau/nouveau_state.c +++ b/drivers/gpu/drm/nouveau/nouveau_state.c @@ -437,6 +437,7 @@ static int nouveau_init_engine_ptrs(struct drm_device *dev) engine->crypt.takedown = nv84_crypt_fini; engine->crypt.create_context = nv84_crypt_create_context; engine->crypt.destroy_context = nv84_crypt_destroy_context; + engine->crypt.tlb_flush = nv84_crypt_tlb_flush; break; default: engine->crypt.init = nouveau_stub_init; diff --git a/drivers/gpu/drm/nouveau/nv84_crypt.c b/drivers/gpu/drm/nouveau/nv84_crypt.c index 1cda0240f55d..780bb1d66f95 100644 --- a/drivers/gpu/drm/nouveau/nv84_crypt.c +++ b/drivers/gpu/drm/nouveau/nv84_crypt.c @@ -61,7 +61,7 @@ nv84_crypt_destroy_context(struct nouveau_channel *chan) struct drm_device *dev = chan->dev; u32 inst; - if (!chan->ramin) + if (!chan->crypt_ctx) return; inst = (chan->ramin->vinst >> 12); -- cgit v1.2.3 From 6d6c5a157af45a5bd50ab913b07d826811a9ea0a Mon Sep 17 00:00:00 2001 From: Ben Skeggs Date: Tue, 16 Nov 2010 10:17:53 +1000 Subject: drm/nouveau: remove some useless GETPARAMs These have been unused since UMS support was ripped out, so lets remove them completely. Signed-off-by: Ben Skeggs --- drivers/gpu/drm/nouveau/nouveau_state.c | 17 +---------------- include/drm/nouveau_drm.h | 3 --- 2 files changed, 1 insertion(+), 19 deletions(-) diff --git a/drivers/gpu/drm/nouveau/nouveau_state.c b/drivers/gpu/drm/nouveau/nouveau_state.c index 75e70022fbd3..35b28406caf6 100644 --- a/drivers/gpu/drm/nouveau/nouveau_state.c +++ b/drivers/gpu/drm/nouveau/nouveau_state.c @@ -1074,21 +1074,6 @@ int nouveau_ioctl_getparam(struct drm_device *dev, void *data, else getparam->value = NV_PCI; break; - case NOUVEAU_GETPARAM_FB_PHYSICAL: - getparam->value = dev_priv->fb_phys; - break; - case NOUVEAU_GETPARAM_AGP_PHYSICAL: - getparam->value = dev_priv->gart_info.aper_base; - break; - case NOUVEAU_GETPARAM_PCI_PHYSICAL: - if (dev->sg) { - getparam->value = (unsigned long)dev->sg->virtual; - } else { - NV_ERROR(dev, "Requested PCIGART address, " - "while no PCIGART was created\n"); - return -EINVAL; - } - break; case NOUVEAU_GETPARAM_FB_SIZE: getparam->value = dev_priv->fb_available_size; break; @@ -1096,7 +1081,7 @@ int nouveau_ioctl_getparam(struct drm_device *dev, void *data, getparam->value = dev_priv->gart_info.aper_size; break; case NOUVEAU_GETPARAM_VM_VRAM_BASE: - getparam->value = dev_priv->vm_vram_base; + getparam->value = 0; /* deprecated */ break; case NOUVEAU_GETPARAM_PTIMER_TIME: getparam->value = dev_priv->engine.timer.read(dev); diff --git a/include/drm/nouveau_drm.h b/include/drm/nouveau_drm.h index 39aa0fd6a529..e2cfe80f6fca 100644 --- a/include/drm/nouveau_drm.h +++ b/include/drm/nouveau_drm.h @@ -71,11 +71,8 @@ struct drm_nouveau_gpuobj_free { #define NOUVEAU_GETPARAM_PCI_VENDOR 3 #define NOUVEAU_GETPARAM_PCI_DEVICE 4 #define NOUVEAU_GETPARAM_BUS_TYPE 5 -#define NOUVEAU_GETPARAM_FB_PHYSICAL 6 -#define NOUVEAU_GETPARAM_AGP_PHYSICAL 7 #define NOUVEAU_GETPARAM_FB_SIZE 8 #define NOUVEAU_GETPARAM_AGP_SIZE 9 -#define NOUVEAU_GETPARAM_PCI_PHYSICAL 10 #define NOUVEAU_GETPARAM_CHIPSET_ID 11 #define NOUVEAU_GETPARAM_VM_VRAM_BASE 12 #define NOUVEAU_GETPARAM_GRAPH_UNITS 13 -- cgit v1.2.3 From 7f4a195fcbd8b16f25f1de7f1419414d7505daa5 Mon Sep 17 00:00:00 2001 From: Ben Skeggs Date: Tue, 16 Nov 2010 11:50:09 +1000 Subject: drm/nouveau: tidy up and extend dma object creation interfaces Reviewed-by: Francisco Jerez Signed-off-by: Ben Skeggs --- drivers/gpu/drm/nouveau/nouveau_channel.c | 23 ++- drivers/gpu/drm/nouveau/nouveau_drv.h | 26 +++- drivers/gpu/drm/nouveau/nouveau_fence.c | 4 +- drivers/gpu/drm/nouveau/nouveau_notifier.c | 29 +--- drivers/gpu/drm/nouveau/nouveau_object.c | 235 +++++++++++++++++------------ drivers/gpu/drm/nouveau/nouveau_reg.h | 11 -- drivers/gpu/drm/nouveau/nouveau_sgdma.c | 7 +- drivers/gpu/drm/nouveau/nouveau_state.c | 9 +- 8 files changed, 184 insertions(+), 160 deletions(-) diff --git a/drivers/gpu/drm/nouveau/nouveau_channel.c b/drivers/gpu/drm/nouveau/nouveau_channel.c index 11b2370e16da..0f33132fba3b 100644 --- a/drivers/gpu/drm/nouveau/nouveau_channel.c +++ b/drivers/gpu/drm/nouveau/nouveau_channel.c @@ -39,22 +39,22 @@ nouveau_channel_pushbuf_ctxdma_init(struct nouveau_channel *chan) if (dev_priv->card_type >= NV_50) { ret = nouveau_gpuobj_dma_new(chan, NV_CLASS_DMA_IN_MEMORY, 0, - dev_priv->vm_end, NV_DMA_ACCESS_RO, - NV_DMA_TARGET_AGP, &pushbuf); + dev_priv->vm_end, NV_MEM_ACCESS_RO, + NV_MEM_TARGET_VM, &pushbuf); chan->pushbuf_base = pb->bo.offset; } else if (pb->bo.mem.mem_type == TTM_PL_TT) { - ret = nouveau_gpuobj_gart_dma_new(chan, 0, - dev_priv->gart_info.aper_size, - NV_DMA_ACCESS_RO, &pushbuf, - NULL); + ret = nouveau_gpuobj_dma_new(chan, NV_CLASS_DMA_IN_MEMORY, 0, + dev_priv->gart_info.aper_size, + NV_MEM_ACCESS_RO, + NV_MEM_TARGET_GART, &pushbuf); chan->pushbuf_base = pb->bo.mem.start << PAGE_SHIFT; } else if (dev_priv->card_type != NV_04) { ret = nouveau_gpuobj_dma_new(chan, NV_CLASS_DMA_IN_MEMORY, 0, dev_priv->fb_available_size, - NV_DMA_ACCESS_RO, - NV_DMA_TARGET_VIDMEM, &pushbuf); + NV_MEM_ACCESS_RO, + NV_MEM_TARGET_VRAM, &pushbuf); chan->pushbuf_base = pb->bo.mem.start << PAGE_SHIFT; } else { /* NV04 cmdbuf hack, from original ddx.. not sure of it's @@ -62,11 +62,10 @@ nouveau_channel_pushbuf_ctxdma_init(struct nouveau_channel *chan) * VRAM. */ ret = nouveau_gpuobj_dma_new(chan, NV_CLASS_DMA_IN_MEMORY, - pci_resource_start(dev->pdev, - 1), + pci_resource_start(dev->pdev, 1), dev_priv->fb_available_size, - NV_DMA_ACCESS_RO, - NV_DMA_TARGET_PCI, &pushbuf); + NV_MEM_ACCESS_RO, + NV_MEM_TARGET_PCI, &pushbuf); chan->pushbuf_base = pb->bo.mem.start << PAGE_SHIFT; } diff --git a/drivers/gpu/drm/nouveau/nouveau_drv.h b/drivers/gpu/drm/nouveau/nouveau_drv.h index d76d2c09049d..a52b1da32031 100644 --- a/drivers/gpu/drm/nouveau/nouveau_drv.h +++ b/drivers/gpu/drm/nouveau/nouveau_drv.h @@ -886,12 +886,14 @@ extern int nouveau_gpuobj_new_fake(struct drm_device *, u32 pinst, u64 vinst, extern int nouveau_gpuobj_dma_new(struct nouveau_channel *, int class, uint64_t offset, uint64_t size, int access, int target, struct nouveau_gpuobj **); -extern int nouveau_gpuobj_gart_dma_new(struct nouveau_channel *, - uint64_t offset, uint64_t size, - int access, struct nouveau_gpuobj **, - uint32_t *o_ret); extern int nouveau_gpuobj_gr_new(struct nouveau_channel *, int class, struct nouveau_gpuobj **); +extern int nv50_gpuobj_dma_new(struct nouveau_channel *, int class, u64 base, + u64 size, int target, int access, u32 type, + u32 comp, struct nouveau_gpuobj **pobj); +extern void nv50_gpuobj_dma_init(struct nouveau_gpuobj *, u32 offset, + int class, u64 base, u64 size, int target, + int access, u32 type, u32 comp); extern int nouveau_ioctl_grobj_alloc(struct drm_device *, void *data, struct drm_file *); extern int nouveau_ioctl_gpuobj_free(struct drm_device *, void *data, @@ -1545,6 +1547,22 @@ nv_match_device(struct drm_device *dev, unsigned device, dev->pdev->subsystem_device == sub_device; } +/* memory type/access flags, do not match hardware values */ +#define NV_MEM_ACCESS_RO 1 +#define NV_MEM_ACCESS_WO 2 +#define NV_MEM_ACCESS_RW (NV_MEM_ACCESS_RO | NV_MEM_ACCESS_WO) +#define NV_MEM_ACCESS_VM 4 + +#define NV_MEM_TARGET_VRAM 0 +#define NV_MEM_TARGET_PCI 1 +#define NV_MEM_TARGET_PCI_NOSNOOP 2 +#define NV_MEM_TARGET_VM 3 +#define NV_MEM_TARGET_GART 4 + +#define NV_MEM_TYPE_VM 0x7f +#define NV_MEM_COMP_VM 0x03 + +/* NV_SW object class */ #define NV_SW 0x0000506e #define NV_SW_DMA_SEMAPHORE 0x00000060 #define NV_SW_SEMAPHORE_OFFSET 0x00000064 diff --git a/drivers/gpu/drm/nouveau/nouveau_fence.c b/drivers/gpu/drm/nouveau/nouveau_fence.c index 91aa6c54cc96..2579fc69d182 100644 --- a/drivers/gpu/drm/nouveau/nouveau_fence.c +++ b/drivers/gpu/drm/nouveau/nouveau_fence.c @@ -459,8 +459,8 @@ nouveau_fence_channel_init(struct nouveau_channel *chan) ret = nouveau_gpuobj_dma_new(chan, NV_CLASS_DMA_IN_MEMORY, mem->start << PAGE_SHIFT, mem->size << PAGE_SHIFT, - NV_DMA_ACCESS_RW, - NV_DMA_TARGET_VIDMEM, &obj); + NV_MEM_ACCESS_RW, + NV_MEM_TARGET_VRAM, &obj); if (ret) return ret; diff --git a/drivers/gpu/drm/nouveau/nouveau_notifier.c b/drivers/gpu/drm/nouveau/nouveau_notifier.c index 2c5a1f66f7f0..a050b7b69782 100644 --- a/drivers/gpu/drm/nouveau/nouveau_notifier.c +++ b/drivers/gpu/drm/nouveau/nouveau_notifier.c @@ -99,7 +99,6 @@ nouveau_notifier_alloc(struct nouveau_channel *chan, uint32_t handle, int size, uint32_t *b_offset) { struct drm_device *dev = chan->dev; - struct drm_nouveau_private *dev_priv = dev->dev_private; struct nouveau_gpuobj *nobj = NULL; struct drm_mm_node *mem; uint32_t offset; @@ -113,31 +112,15 @@ nouveau_notifier_alloc(struct nouveau_channel *chan, uint32_t handle, return -ENOMEM; } - offset = chan->notifier_bo->bo.mem.start << PAGE_SHIFT; - if (chan->notifier_bo->bo.mem.mem_type == TTM_PL_VRAM) { - target = NV_DMA_TARGET_VIDMEM; - } else - if (chan->notifier_bo->bo.mem.mem_type == TTM_PL_TT) { - if (dev_priv->gart_info.type == NOUVEAU_GART_SGDMA && - dev_priv->card_type < NV_50) { - ret = nouveau_sgdma_get_page(dev, offset, &offset); - if (ret) - return ret; - target = NV_DMA_TARGET_PCI; - } else { - target = NV_DMA_TARGET_AGP; - if (dev_priv->card_type >= NV_50) - offset += dev_priv->vm_gart_base; - } - } else { - NV_ERROR(dev, "Bad DMA target, mem_type %d!\n", - chan->notifier_bo->bo.mem.mem_type); - return -EINVAL; - } + if (chan->notifier_bo->bo.mem.mem_type == TTM_PL_VRAM) + target = NV_MEM_TARGET_VRAM; + else + target = NV_MEM_TARGET_GART; + offset = chan->notifier_bo->bo.mem.start << PAGE_SHIFT; offset += mem->start; ret = nouveau_gpuobj_dma_new(chan, NV_CLASS_DMA_IN_MEMORY, offset, - mem->size, NV_DMA_ACCESS_RW, target, + mem->size, NV_MEM_ACCESS_RW, target, &nobj); if (ret) { drm_mm_put_block(mem); diff --git a/drivers/gpu/drm/nouveau/nouveau_object.c b/drivers/gpu/drm/nouveau/nouveau_object.c index e8c74de905ec..924653c30783 100644 --- a/drivers/gpu/drm/nouveau/nouveau_object.c +++ b/drivers/gpu/drm/nouveau/nouveau_object.c @@ -404,113 +404,157 @@ nouveau_gpuobj_class_instmem_size(struct drm_device *dev, int class) The method below creates a DMA object in instance RAM and returns a handle to it that can be used to set up context objects. */ -int -nouveau_gpuobj_dma_new(struct nouveau_channel *chan, int class, - uint64_t offset, uint64_t size, int access, - int target, struct nouveau_gpuobj **gpuobj) + +void +nv50_gpuobj_dma_init(struct nouveau_gpuobj *obj, u32 offset, int class, + u64 base, u64 size, int target, int access, + u32 type, u32 comp) { - struct drm_device *dev = chan->dev; - struct drm_nouveau_private *dev_priv = dev->dev_private; - struct nouveau_instmem_engine *instmem = &dev_priv->engine.instmem; - int ret; + struct drm_nouveau_private *dev_priv = obj->dev->dev_private; + struct nouveau_instmem_engine *pinstmem = &dev_priv->engine.instmem; + u32 flags0; - NV_DEBUG(dev, "ch%d class=0x%04x offset=0x%llx size=0x%llx\n", - chan->id, class, offset, size); - NV_DEBUG(dev, "access=%d target=%d\n", access, target); + flags0 = (comp << 29) | (type << 22) | class; + flags0 |= 0x00100000; + + switch (access) { + case NV_MEM_ACCESS_RO: flags0 |= 0x00040000; break; + case NV_MEM_ACCESS_RW: + case NV_MEM_ACCESS_WO: flags0 |= 0x00080000; break; + default: + break; + } switch (target) { - case NV_DMA_TARGET_AGP: - offset += dev_priv->gart_info.aper_base; + case NV_MEM_TARGET_VRAM: + flags0 |= 0x00010000; + break; + case NV_MEM_TARGET_PCI: + flags0 |= 0x00020000; + break; + case NV_MEM_TARGET_PCI_NOSNOOP: + flags0 |= 0x00030000; break; + case NV_MEM_TARGET_GART: + base += dev_priv->vm_gart_base; default: + flags0 &= ~0x00100000; break; } - ret = nouveau_gpuobj_new(dev, chan, - nouveau_gpuobj_class_instmem_size(dev, class), - 16, NVOBJ_FLAG_ZERO_ALLOC | - NVOBJ_FLAG_ZERO_FREE, gpuobj); - if (ret) { - NV_ERROR(dev, "Error creating gpuobj: %d\n", ret); - return ret; - } + /* convert to base + limit */ + size = (base + size) - 1; - if (dev_priv->card_type < NV_50) { - uint32_t frame, adjust, pte_flags = 0; - - if (access != NV_DMA_ACCESS_RO) - pte_flags |= (1<<1); - adjust = offset & 0x00000fff; - frame = offset & ~0x00000fff; - - nv_wo32(*gpuobj, 0, ((1<<12) | (1<<13) | (adjust << 20) | - (access << 14) | (target << 16) | - class)); - nv_wo32(*gpuobj, 4, size - 1); - nv_wo32(*gpuobj, 8, frame | pte_flags); - nv_wo32(*gpuobj, 12, frame | pte_flags); - } else { - uint64_t limit = offset + size - 1; - uint32_t flags0, flags5; + nv_wo32(obj, offset + 0x00, flags0); + nv_wo32(obj, offset + 0x04, lower_32_bits(size)); + nv_wo32(obj, offset + 0x08, lower_32_bits(base)); + nv_wo32(obj, offset + 0x0c, upper_32_bits(size) << 24 | + upper_32_bits(base)); + nv_wo32(obj, offset + 0x10, 0x00000000); + nv_wo32(obj, offset + 0x14, 0x00000000); - if (target == NV_DMA_TARGET_VIDMEM) { - flags0 = 0x00190000; - flags5 = 0x00010000; - } else { - flags0 = 0x7fc00000; - flags5 = 0x00080000; - } + pinstmem->flush(obj->dev); +} - nv_wo32(*gpuobj, 0, flags0 | class); - nv_wo32(*gpuobj, 4, lower_32_bits(limit)); - nv_wo32(*gpuobj, 8, lower_32_bits(offset)); - nv_wo32(*gpuobj, 12, ((upper_32_bits(limit) & 0xff) << 24) | - (upper_32_bits(offset) & 0xff)); - nv_wo32(*gpuobj, 20, flags5); - } +int +nv50_gpuobj_dma_new(struct nouveau_channel *chan, int class, u64 base, u64 size, + int target, int access, u32 type, u32 comp, + struct nouveau_gpuobj **pobj) +{ + struct drm_device *dev = chan->dev; + int ret; - instmem->flush(dev); + ret = nouveau_gpuobj_new(dev, chan, 24, 16, NVOBJ_FLAG_ZERO_ALLOC | + NVOBJ_FLAG_ZERO_FREE, pobj); + if (ret) + return ret; - (*gpuobj)->engine = NVOBJ_ENGINE_SW; - (*gpuobj)->class = class; + nv50_gpuobj_dma_init(*pobj, 0, class, base, size, target, + access, type, comp); return 0; } int -nouveau_gpuobj_gart_dma_new(struct nouveau_channel *chan, - uint64_t offset, uint64_t size, int access, - struct nouveau_gpuobj **gpuobj, - uint32_t *o_ret) +nouveau_gpuobj_dma_new(struct nouveau_channel *chan, int class, u64 base, + u64 size, int access, int target, + struct nouveau_gpuobj **pobj) { + struct drm_nouveau_private *dev_priv = chan->dev->dev_private; struct drm_device *dev = chan->dev; - struct drm_nouveau_private *dev_priv = dev->dev_private; + struct nouveau_gpuobj *obj; + u32 page_addr, flags0, flags2; int ret; - if (dev_priv->gart_info.type == NOUVEAU_GART_AGP || - (dev_priv->card_type >= NV_50 && - dev_priv->gart_info.type == NOUVEAU_GART_SGDMA)) { - ret = nouveau_gpuobj_dma_new(chan, NV_CLASS_DMA_IN_MEMORY, - offset + dev_priv->vm_gart_base, - size, access, NV_DMA_TARGET_AGP, - gpuobj); - if (o_ret) - *o_ret = 0; - } else - if (dev_priv->gart_info.type == NOUVEAU_GART_SGDMA) { - nouveau_gpuobj_ref(dev_priv->gart_info.sg_ctxdma, gpuobj); - if (offset & ~0xffffffffULL) { - NV_ERROR(dev, "obj offset exceeds 32-bits\n"); - return -EINVAL; + if (dev_priv->card_type >= NV_50) { + u32 comp = (target == NV_MEM_TARGET_VM) ? NV_MEM_COMP_VM : 0; + u32 type = (target == NV_MEM_TARGET_VM) ? NV_MEM_TYPE_VM : 0; + + return nv50_gpuobj_dma_new(chan, class, base, size, + target, access, type, comp, pobj); + } + + if (target == NV_MEM_TARGET_GART) { + if (dev_priv->gart_info.type == NOUVEAU_GART_AGP) { + target = NV_MEM_TARGET_PCI_NOSNOOP; + base += dev_priv->gart_info.aper_base; + } else + if (base != 0) { + ret = nouveau_sgdma_get_page(dev, base, &page_addr); + if (ret) + return ret; + + target = NV_MEM_TARGET_PCI; + base = page_addr; + } else { + nouveau_gpuobj_ref(dev_priv->gart_info.sg_ctxdma, pobj); + return 0; } - if (o_ret) - *o_ret = (uint32_t)offset; - ret = (*gpuobj != NULL) ? 0 : -EINVAL; - } else { - NV_ERROR(dev, "Invalid GART type %d\n", dev_priv->gart_info.type); - return -EINVAL; } - return ret; + flags0 = class; + flags0 |= 0x00003000; /* PT present, PT linear */ + flags2 = 0; + + switch (target) { + case NV_MEM_TARGET_PCI: + flags0 |= 0x00020000; + break; + case NV_MEM_TARGET_PCI_NOSNOOP: + flags0 |= 0x00030000; + break; + default: + break; + } + + switch (access) { + case NV_MEM_ACCESS_RO: + flags0 |= 0x00004000; + break; + case NV_MEM_ACCESS_WO: + flags0 |= 0x00008000; + default: + flags2 |= 0x00000002; + break; + } + + flags0 |= (base & 0x00000fff) << 20; + flags2 |= (base & 0xfffff000); + + ret = nouveau_gpuobj_new(dev, chan, (dev_priv->card_type >= NV_40) ? + 32 : 16, 16, NVOBJ_FLAG_ZERO_ALLOC | + NVOBJ_FLAG_ZERO_FREE, &obj); + if (ret) + return ret; + + nv_wo32(obj, 0x00, flags0); + nv_wo32(obj, 0x04, size - 1); + nv_wo32(obj, 0x08, flags2); + nv_wo32(obj, 0x0c, flags2); + + obj->engine = NVOBJ_ENGINE_SW; + obj->class = class; + *pobj = obj; + return 0; } /* Context objects in the instance RAM have the following structure. @@ -806,8 +850,8 @@ nouveau_gpuobj_channel_init(struct nouveau_channel *chan, if (dev_priv->card_type >= NV_50) { ret = nouveau_gpuobj_dma_new(chan, NV_CLASS_DMA_IN_MEMORY, 0, dev_priv->vm_end, - NV_DMA_ACCESS_RW, - NV_DMA_TARGET_AGP, &vram); + NV_MEM_ACCESS_RW, + NV_MEM_TARGET_VM, &vram); if (ret) { NV_ERROR(dev, "Error creating VRAM ctxdma: %d\n", ret); return ret; @@ -815,8 +859,8 @@ nouveau_gpuobj_channel_init(struct nouveau_channel *chan, } else { ret = nouveau_gpuobj_dma_new(chan, NV_CLASS_DMA_IN_MEMORY, 0, dev_priv->fb_available_size, - NV_DMA_ACCESS_RW, - NV_DMA_TARGET_VIDMEM, &vram); + NV_MEM_ACCESS_RW, + NV_MEM_TARGET_VRAM, &vram); if (ret) { NV_ERROR(dev, "Error creating VRAM ctxdma: %d\n", ret); return ret; @@ -834,20 +878,13 @@ nouveau_gpuobj_channel_init(struct nouveau_channel *chan, if (dev_priv->card_type >= NV_50) { ret = nouveau_gpuobj_dma_new(chan, NV_CLASS_DMA_IN_MEMORY, 0, dev_priv->vm_end, - NV_DMA_ACCESS_RW, - NV_DMA_TARGET_AGP, &tt); - if (ret) { - NV_ERROR(dev, "Error creating VRAM ctxdma: %d\n", ret); - return ret; - } - } else - if (dev_priv->gart_info.type != NOUVEAU_GART_NONE) { - ret = nouveau_gpuobj_gart_dma_new(chan, 0, - dev_priv->gart_info.aper_size, - NV_DMA_ACCESS_RW, &tt, NULL); + NV_MEM_ACCESS_RW, + NV_MEM_TARGET_VM, &tt); } else { - NV_ERROR(dev, "Invalid GART type %d\n", dev_priv->gart_info.type); - ret = -EINVAL; + ret = nouveau_gpuobj_dma_new(chan, NV_CLASS_DMA_IN_MEMORY, + 0, dev_priv->gart_info.aper_size, + NV_MEM_ACCESS_RW, + NV_MEM_TARGET_GART, &tt); } if (ret) { diff --git a/drivers/gpu/drm/nouveau/nouveau_reg.h b/drivers/gpu/drm/nouveau/nouveau_reg.h index df3a87e792f2..04e8fb795269 100644 --- a/drivers/gpu/drm/nouveau/nouveau_reg.h +++ b/drivers/gpu/drm/nouveau/nouveau_reg.h @@ -79,17 +79,6 @@ # define NV40_RAMHT_CONTEXT_ENGINE_SHIFT 20 # define NV40_RAMHT_CONTEXT_INSTANCE_SHIFT 0 -/* DMA object defines */ -#define NV_DMA_ACCESS_RW 0 -#define NV_DMA_ACCESS_RO 1 -#define NV_DMA_ACCESS_WO 2 -#define NV_DMA_TARGET_VIDMEM 0 -#define NV_DMA_TARGET_PCI 2 -#define NV_DMA_TARGET_AGP 3 -/* The following is not a real value used by the card, it's changed by - * nouveau_object_dma_create */ -#define NV_DMA_TARGET_PCI_NONLINEAR 8 - /* Some object classes we care about in the drm */ #define NV_CLASS_DMA_FROM_MEMORY 0x00000002 #define NV_CLASS_DMA_TO_MEMORY 0x00000003 diff --git a/drivers/gpu/drm/nouveau/nouveau_sgdma.c b/drivers/gpu/drm/nouveau/nouveau_sgdma.c index 54af7608d45c..db32644f6114 100644 --- a/drivers/gpu/drm/nouveau/nouveau_sgdma.c +++ b/drivers/gpu/drm/nouveau/nouveau_sgdma.c @@ -247,14 +247,11 @@ nouveau_sgdma_init(struct drm_device *dev) */ gpuobj->cinst = gpuobj->pinst; - /* Maybe use NV_DMA_TARGET_AGP for PCIE? NVIDIA do this, and - * confirmed to work on c51. Perhaps means NV_DMA_TARGET_PCIE - * on those cards? */ nv_wo32(gpuobj, 0, NV_CLASS_DMA_IN_MEMORY | (1 << 12) /* PT present */ | (0 << 13) /* PT *not* linear */ | - (NV_DMA_ACCESS_RW << 14) | - (NV_DMA_TARGET_PCI << 16)); + (0 << 14) /* RW */ | + (2 << 16) /* PCI */); nv_wo32(gpuobj, 4, aper_size - 1); for (i = 2; i < 2 + (aper_size >> 12); i++) nv_wo32(gpuobj, i * 4, 0x00000000); diff --git a/drivers/gpu/drm/nouveau/nouveau_state.c b/drivers/gpu/drm/nouveau/nouveau_state.c index 35b28406caf6..e779e9320453 100644 --- a/drivers/gpu/drm/nouveau/nouveau_state.c +++ b/drivers/gpu/drm/nouveau/nouveau_state.c @@ -536,7 +536,7 @@ nouveau_card_init_channel(struct drm_device *dev) ret = nouveau_gpuobj_dma_new(dev_priv->channel, NV_CLASS_DMA_IN_MEMORY, 0, dev_priv->vram_size, - NV_DMA_ACCESS_RW, NV_DMA_TARGET_VIDMEM, + NV_MEM_ACCESS_RW, NV_MEM_TARGET_VRAM, &gpuobj); if (ret) goto out_err; @@ -546,9 +546,10 @@ nouveau_card_init_channel(struct drm_device *dev) if (ret) goto out_err; - ret = nouveau_gpuobj_gart_dma_new(dev_priv->channel, 0, - dev_priv->gart_info.aper_size, - NV_DMA_ACCESS_RW, &gpuobj, NULL); + ret = nouveau_gpuobj_dma_new(dev_priv->channel, NV_CLASS_DMA_IN_MEMORY, + 0, dev_priv->gart_info.aper_size, + NV_MEM_ACCESS_RW, NV_MEM_TARGET_GART, + &gpuobj); if (ret) goto out_err; -- cgit v1.2.3 From 2e5702aff39532662198459726c624d5eadbdd78 Mon Sep 17 00:00:00 2001 From: Francisco Jerez Date: Fri, 19 Nov 2010 18:08:47 +1000 Subject: drm/nouveau: fabricate DCB encoder table for iMac G4 In typical Apple fashion there's no standard information about what encoders are present on this machine, this patch adds a quirk to provide it. Signed-off-by: Francisco Jerez Signed-off-by: Ben Skeggs --- drivers/gpu/drm/nouveau/nouveau_bios.c | 102 ++++++++++++--------------------- 1 file changed, 38 insertions(+), 64 deletions(-) diff --git a/drivers/gpu/drm/nouveau/nouveau_bios.c b/drivers/gpu/drm/nouveau/nouveau_bios.c index b2293576f278..d3046559bf05 100644 --- a/drivers/gpu/drm/nouveau/nouveau_bios.c +++ b/drivers/gpu/drm/nouveau/nouveau_bios.c @@ -6053,52 +6053,17 @@ static struct dcb_entry *new_dcb_entry(struct dcb_table *dcb) return entry; } -static void fabricate_vga_output(struct dcb_table *dcb, int i2c, int heads) +static void fabricate_dcb_output(struct dcb_table *dcb, int type, int i2c, + int heads, int or) { struct dcb_entry *entry = new_dcb_entry(dcb); - entry->type = 0; + entry->type = type; entry->i2c_index = i2c; entry->heads = heads; - entry->location = DCB_LOC_ON_CHIP; - entry->or = 1; -} - -static void fabricate_dvi_i_output(struct dcb_table *dcb, bool twoHeads) -{ - struct dcb_entry *entry = new_dcb_entry(dcb); - - entry->type = 2; - entry->i2c_index = LEGACY_I2C_PANEL; - entry->heads = twoHeads ? 3 : 1; - entry->location = !DCB_LOC_ON_CHIP; /* ie OFF CHIP */ - entry->or = 1; /* means |0x10 gets set on CRE_LCD__INDEX */ - entry->duallink_possible = false; /* SiI164 and co. are single link */ - -#if 0 - /* - * For dvi-a either crtc probably works, but my card appears to only - * support dvi-d. "nvidia" still attempts to program it for dvi-a, - * doing the full fp output setup (program 0x6808.. fp dimension regs, - * setting 0x680848 to 0x10000111 to enable, maybe setting 0x680880); - * the monitor picks up the mode res ok and lights up, but no pixel - * data appears, so the board manufacturer probably connected up the - * sync lines, but missed the video traces / components - * - * with this introduction, dvi-a left as an exercise for the reader. - */ - fabricate_vga_output(dcb, LEGACY_I2C_PANEL, entry->heads); -#endif -} - -static void fabricate_tv_output(struct dcb_table *dcb, bool twoHeads) -{ - struct dcb_entry *entry = new_dcb_entry(dcb); - - entry->type = 1; - entry->i2c_index = LEGACY_I2C_TV; - entry->heads = twoHeads ? 3 : 1; - entry->location = !DCB_LOC_ON_CHIP; /* ie OFF CHIP */ + if (type != OUTPUT_ANALOG) + entry->location = !DCB_LOC_ON_CHIP; /* ie OFF CHIP */ + entry->or = or; } static bool @@ -6365,8 +6330,36 @@ apply_dcb_encoder_quirks(struct drm_device *dev, int idx, u32 *conn, u32 *conf) return true; } +static void +fabricate_dcb_encoder_table(struct drm_device *dev, struct nvbios *bios) +{ + struct dcb_table *dcb = &bios->dcb; + int all_heads = (nv_two_heads(dev) ? 3 : 1); + +#ifdef __powerpc__ + /* Apple iMac G4 NV17 */ + if (of_machine_is_compatible("PowerMac4,5")) { + fabricate_dcb_output(dcb, OUTPUT_TMDS, 0, all_heads, 1); + fabricate_dcb_output(dcb, OUTPUT_ANALOG, 1, all_heads, 2); + return; + } +#endif + + /* Make up some sane defaults */ + fabricate_dcb_output(dcb, OUTPUT_ANALOG, LEGACY_I2C_CRT, 1, 1); + + if (nv04_tv_identify(dev, bios->legacy.i2c_indices.tv) >= 0) + fabricate_dcb_output(dcb, OUTPUT_TV, LEGACY_I2C_TV, + all_heads, 0); + + else if (bios->tmds.output0_script_ptr || + bios->tmds.output1_script_ptr) + fabricate_dcb_output(dcb, OUTPUT_TMDS, LEGACY_I2C_PANEL, + all_heads, 1); +} + static int -parse_dcb_table(struct drm_device *dev, struct nvbios *bios, bool twoHeads) +parse_dcb_table(struct drm_device *dev, struct nvbios *bios) { struct drm_nouveau_private *dev_priv = dev->dev_private; struct dcb_table *dcb = &bios->dcb; @@ -6386,12 +6379,7 @@ parse_dcb_table(struct drm_device *dev, struct nvbios *bios, bool twoHeads) /* this situation likely means a really old card, pre DCB */ if (dcbptr == 0x0) { - NV_INFO(dev, "Assuming a CRT output exists\n"); - fabricate_vga_output(dcb, LEGACY_I2C_CRT, 1); - - if (nv04_tv_identify(dev, bios->legacy.i2c_indices.tv) >= 0) - fabricate_tv_output(dcb, twoHeads); - + fabricate_dcb_encoder_table(dev, bios); return 0; } @@ -6451,21 +6439,7 @@ parse_dcb_table(struct drm_device *dev, struct nvbios *bios, bool twoHeads) */ NV_TRACEWARN(dev, "No useful information in BIOS output table; " "adding all possible outputs\n"); - fabricate_vga_output(dcb, LEGACY_I2C_CRT, 1); - - /* - * Attempt to detect TV before DVI because the test - * for the former is more accurate and it rules the - * latter out. - */ - if (nv04_tv_identify(dev, - bios->legacy.i2c_indices.tv) >= 0) - fabricate_tv_output(dcb, twoHeads); - - else if (bios->tmds.output0_script_ptr || - bios->tmds.output1_script_ptr) - fabricate_dvi_i_output(dcb, twoHeads); - + fabricate_dcb_encoder_table(dev, bios); return 0; } @@ -6859,7 +6833,7 @@ nouveau_bios_init(struct drm_device *dev) if (ret) return ret; - ret = parse_dcb_table(dev, bios, nv_two_heads(dev)); + ret = parse_dcb_table(dev, bios); if (ret) return ret; -- cgit v1.2.3 From 17fe6981109e995f36723e4880a97d48fa38920a Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Fri, 3 Dec 2010 20:17:19 +0000 Subject: drm/i915/lvds: Connect the PWM to the LVDS pipe ... and do not just assume to always use pipe B. Signed-off-by: Chris Wilson --- drivers/gpu/drm/i915/intel_lvds.c | 14 +++++++++++--- 1 file changed, 11 insertions(+), 3 deletions(-) diff --git a/drivers/gpu/drm/i915/intel_lvds.c b/drivers/gpu/drm/i915/intel_lvds.c index 23247b235e68..d07055c4d84f 100644 --- a/drivers/gpu/drm/i915/intel_lvds.c +++ b/drivers/gpu/drm/i915/intel_lvds.c @@ -1012,10 +1012,18 @@ bool intel_lvds_init(struct drm_device *dev) out: if (HAS_PCH_SPLIT(dev)) { u32 pwm; - /* make sure PWM is enabled */ + + pipe = (I915_READ(PCH_LVDS) & LVDS_PIPEB_SELECT) ? 1 : 0; + + /* make sure PWM is enabled and locked to the LVDS pipe */ pwm = I915_READ(BLC_PWM_CPU_CTL2); - pwm |= (PWM_ENABLE | PWM_PIPE_B); - I915_WRITE(BLC_PWM_CPU_CTL2, pwm); + if (pipe == 0 && (pwm & PWM_PIPE_B)) + I915_WRITE(BLC_PWM_CPU_CTL2, pwm & ~PWM_ENABLE); + if (pipe) + pwm |= PWM_PIPE_B; + else + pwm &= ~PWM_PIPE_B; + I915_WRITE(BLC_PWM_CPU_CTL2, pwm | PWM_ENABLE); pwm = I915_READ(BLC_PWM_PCH_CTL1); pwm |= PWM_PCH_ENABLE; -- cgit v1.2.3 From a589b9f429ac0e5bcdebda0f74ee313d39d69b7f Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Fri, 3 Dec 2010 21:13:16 +0000 Subject: drm/i915: Explain why we need to write DPLL twice ... it's because setting the Pixel Multiply bits only takes effect once the PLL is enabled and stable. Signed-off-by: Chris Wilson --- drivers/gpu/drm/i915/intel_display.c | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c index f7962b741c94..e3b8d0dc7a7d 100644 --- a/drivers/gpu/drm/i915/intel_display.c +++ b/drivers/gpu/drm/i915/intel_display.c @@ -4089,13 +4089,13 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc, } I915_WRITE(DPLL_MD(pipe), temp); } else { - /* write it again -- the BIOS does, after all */ + /* The pixel multiplier can only be updated once the + * DPLL is enabled and the clocks are stable. + * + * So write it again. + */ I915_WRITE(dpll_reg, dpll); } - - /* Wait for the clocks to stabilize. */ - POSTING_READ(dpll_reg); - udelay(150); } intel_crtc->lowfreq_avail = false; -- cgit v1.2.3 From c1858123dba4a9e40355363ac7a153ea23a5315d Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Fri, 3 Dec 2010 21:35:48 +0000 Subject: drm/i915: Enable CB tuning of the Display PLL Magic numbers from the specs. This is supposed to allow the PLL some variance to improve jitter performance and VCO headroom across manufacturing and environmental variations. Signed-off-by: Chris Wilson --- drivers/gpu/drm/i915/i915_reg.h | 1 + drivers/gpu/drm/i915/intel_display.c | 17 ++++++++++++++++- 2 files changed, 17 insertions(+), 1 deletion(-) diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h index 998457063d5e..06175e98c5bb 100644 --- a/drivers/gpu/drm/i915/i915_reg.h +++ b/drivers/gpu/drm/i915/i915_reg.h @@ -2712,6 +2712,7 @@ #define PCH_DPLL(pipe) _PIPE(pipe, PCH_DPLL_A, PCH_DPLL_B) #define PCH_FPA0 0xc6040 +#define FP_CB_TUNE (0x3<<22) #define PCH_FPA1 0xc6044 #define PCH_FPB0 0xc6048 #define PCH_FPB1 0xc604c diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c index e3b8d0dc7a7d..f2aa76bd72c0 100644 --- a/drivers/gpu/drm/i915/intel_display.c +++ b/drivers/gpu/drm/i915/intel_display.c @@ -3857,6 +3857,22 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc, reduced_clock.m2; } + /* Enable autotuning of the PLL clock (if permissible) */ + if (HAS_PCH_SPLIT(dev)) { + int factor = 21; + + if (is_lvds) { + if ((dev_priv->lvds_use_ssc && + dev_priv->lvds_ssc_freq == 100) || + (I915_READ(PCH_LVDS) & LVDS_CLKB_POWER_MASK) == LVDS_CLKB_POWER_UP) + factor = 25; + } else if (is_sdvo && is_tv) + factor = 20; + + if (clock.m1 < factor * clock.n) + fp |= FP_CB_TUNE; + } + dpll = 0; if (!HAS_PCH_SPLIT(dev)) dpll = DPLL_VGA_MODE_DIS; @@ -4071,7 +4087,6 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc, } if (!has_edp_encoder || intel_encoder_is_pch_edp(&has_edp_encoder->base)) { - I915_WRITE(fp_reg, fp); I915_WRITE(dpll_reg, dpll); /* Wait for the clocks to stabilize. */ -- cgit v1.2.3 From f684f5b48cd27a031928c137531aace728d765a6 Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Sat, 4 Dec 2010 17:22:41 +0000 Subject: drm/i915: Re-enable RC6 for power-savings. Let's see if we've successfully cleared up all the bugs from last time... Signed-off-by: Chris Wilson --- drivers/gpu/drm/i915/intel_display.c | 1 - 1 file changed, 1 deletion(-) diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c index f2aa76bd72c0..3063edd2456f 100644 --- a/drivers/gpu/drm/i915/intel_display.c +++ b/drivers/gpu/drm/i915/intel_display.c @@ -5864,7 +5864,6 @@ void intel_init_clock_gating(struct drm_device *dev) _3D_CHICKEN2_WM_READ_PIPELINED << 16 | _3D_CHICKEN2_WM_READ_PIPELINED); } - return; } else if (IS_G4X(dev)) { uint32_t dspclk_gate; I915_WRITE(RENCLK_GATE_D1, 0); -- cgit v1.2.3 From 4add75c43f39573edc884d46b7c2b7414f01171a Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Sat, 4 Dec 2010 17:49:46 +0000 Subject: drm/i915: Allow LVDS to be on pipe A for Ironlake+ Previously we enabled this for gen4, only to have to revert it due to it causing a large number of spurious wakeups. Try again hoping that the hardware has become more sane in the mean time... Signed-off-by: Chris Wilson --- drivers/gpu/drm/i915/intel_lvds.c | 2 ++ 1 file changed, 2 insertions(+) diff --git a/drivers/gpu/drm/i915/intel_lvds.c b/drivers/gpu/drm/i915/intel_lvds.c index d07055c4d84f..6d1106eafcfd 100644 --- a/drivers/gpu/drm/i915/intel_lvds.c +++ b/drivers/gpu/drm/i915/intel_lvds.c @@ -907,6 +907,8 @@ bool intel_lvds_init(struct drm_device *dev) intel_encoder->clone_mask = (1 << INTEL_LVDS_CLONE_BIT); intel_encoder->crtc_mask = (1 << 1); + if (INTEL_INFO(dev)->gen >= 5) + intel_encoder->crtc_mask |= (1 << 0); drm_encoder_helper_add(encoder, &intel_lvds_helper_funcs); drm_connector_helper_add(connector, &intel_lvds_connector_helper_funcs); connector->display_info.subpixel_order = SubPixelHorizontalRGB; -- cgit v1.2.3 From 340479aac697bc73e225c122a9753d4964eeda3f Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Sat, 4 Dec 2010 18:17:15 +0000 Subject: drm/i915: Be paranoid and bail on resetting if we can't take the lock. This will declare the machine wedged, but is better than truly wedging the machine. Signed-off-by: Chris Wilson --- drivers/gpu/drm/i915/i915_drv.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c index 1a15b7886b8c..64844e2e9f86 100644 --- a/drivers/gpu/drm/i915/i915_drv.c +++ b/drivers/gpu/drm/i915/i915_drv.c @@ -440,7 +440,8 @@ int i915_reset(struct drm_device *dev, u8 flags) bool need_display = true; int ret; - mutex_lock(&dev->struct_mutex); + if (!mutex_trylock(&dev->struct_mutex)) + return -EBUSY; i915_gem_reset(dev); -- cgit v1.2.3 From 1ec14ad3132702694f2e1a90b30641cf111183b9 Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Sat, 4 Dec 2010 11:30:53 +0000 Subject: drm/i915: Implement GPU semaphores for inter-ring synchronisation on SNB The bulk of the change is to convert the growing list of rings into an array so that the relationship between the rings and the semaphore sync registers can be easily computed. Signed-off-by: Chris Wilson --- drivers/gpu/drm/i915/i915_debugfs.c | 75 +++--- drivers/gpu/drm/i915/i915_dma.c | 60 +++-- drivers/gpu/drm/i915/i915_drv.c | 6 +- drivers/gpu/drm/i915/i915_drv.h | 48 ++-- drivers/gpu/drm/i915/i915_gem.c | 86 +++--- drivers/gpu/drm/i915/i915_gem_execbuffer.c | 94 +++++-- drivers/gpu/drm/i915/i915_irq.c | 209 ++++++++------- drivers/gpu/drm/i915/i915_reg.h | 19 +- drivers/gpu/drm/i915/intel_display.c | 4 +- drivers/gpu/drm/i915/intel_opregion.c | 8 +- drivers/gpu/drm/i915/intel_overlay.c | 8 +- drivers/gpu/drm/i915/intel_ringbuffer.c | 415 ++++++++++++++++++----------- drivers/gpu/drm/i915/intel_ringbuffer.h | 41 ++- drivers/gpu/drm/i915/intel_tv.c | 14 +- 14 files changed, 648 insertions(+), 439 deletions(-) diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c index 3c9d4b876865..aedb02157474 100644 --- a/drivers/gpu/drm/i915/i915_debugfs.c +++ b/drivers/gpu/drm/i915/i915_debugfs.c @@ -339,10 +339,10 @@ static int i915_gem_request_info(struct seq_file *m, void *data) return ret; count = 0; - if (!list_empty(&dev_priv->render_ring.request_list)) { + if (!list_empty(&dev_priv->ring[RCS].request_list)) { seq_printf(m, "Render requests:\n"); list_for_each_entry(gem_request, - &dev_priv->render_ring.request_list, + &dev_priv->ring[RCS].request_list, list) { seq_printf(m, " %d @ %d\n", gem_request->seqno, @@ -350,10 +350,10 @@ static int i915_gem_request_info(struct seq_file *m, void *data) } count++; } - if (!list_empty(&dev_priv->bsd_ring.request_list)) { + if (!list_empty(&dev_priv->ring[VCS].request_list)) { seq_printf(m, "BSD requests:\n"); list_for_each_entry(gem_request, - &dev_priv->bsd_ring.request_list, + &dev_priv->ring[VCS].request_list, list) { seq_printf(m, " %d @ %d\n", gem_request->seqno, @@ -361,10 +361,10 @@ static int i915_gem_request_info(struct seq_file *m, void *data) } count++; } - if (!list_empty(&dev_priv->blt_ring.request_list)) { + if (!list_empty(&dev_priv->ring[BCS].request_list)) { seq_printf(m, "BLT requests:\n"); list_for_each_entry(gem_request, - &dev_priv->blt_ring.request_list, + &dev_priv->ring[BCS].request_list, list) { seq_printf(m, " %d @ %d\n", gem_request->seqno, @@ -398,15 +398,14 @@ static int i915_gem_seqno_info(struct seq_file *m, void *data) struct drm_info_node *node = (struct drm_info_node *) m->private; struct drm_device *dev = node->minor->dev; drm_i915_private_t *dev_priv = dev->dev_private; - int ret; + int ret, i; ret = mutex_lock_interruptible(&dev->struct_mutex); if (ret) return ret; - i915_ring_seqno_info(m, &dev_priv->render_ring); - i915_ring_seqno_info(m, &dev_priv->bsd_ring); - i915_ring_seqno_info(m, &dev_priv->blt_ring); + for (i = 0; i < I915_NUM_RINGS; i++) + i915_ring_seqno_info(m, &dev_priv->ring[i]); mutex_unlock(&dev->struct_mutex); @@ -419,7 +418,7 @@ static int i915_interrupt_info(struct seq_file *m, void *data) struct drm_info_node *node = (struct drm_info_node *) m->private; struct drm_device *dev = node->minor->dev; drm_i915_private_t *dev_priv = dev->dev_private; - int ret; + int ret, i; ret = mutex_lock_interruptible(&dev->struct_mutex); if (ret) @@ -458,9 +457,8 @@ static int i915_interrupt_info(struct seq_file *m, void *data) } seq_printf(m, "Interrupts received: %d\n", atomic_read(&dev_priv->irq_received)); - i915_ring_seqno_info(m, &dev_priv->render_ring); - i915_ring_seqno_info(m, &dev_priv->bsd_ring); - i915_ring_seqno_info(m, &dev_priv->blt_ring); + for (i = 0; i < I915_NUM_RINGS; i++) + i915_ring_seqno_info(m, &dev_priv->ring[i]); mutex_unlock(&dev->struct_mutex); return 0; @@ -503,13 +501,7 @@ static int i915_hws_info(struct seq_file *m, void *data) volatile u32 *hws; int i; - switch ((uintptr_t)node->info_ent->data) { - case RING_RENDER: ring = &dev_priv->render_ring; break; - case RING_BSD: ring = &dev_priv->bsd_ring; break; - case RING_BLT: ring = &dev_priv->blt_ring; break; - default: return -EINVAL; - } - + ring = &dev_priv->ring[(uintptr_t)node->info_ent->data]; hws = (volatile u32 *)ring->status_page.page_addr; if (hws == NULL) return 0; @@ -569,17 +561,11 @@ static int i915_ringbuffer_data(struct seq_file *m, void *data) struct intel_ring_buffer *ring; int ret; - switch ((uintptr_t)node->info_ent->data) { - case RING_RENDER: ring = &dev_priv->render_ring; break; - case RING_BSD: ring = &dev_priv->bsd_ring; break; - case RING_BLT: ring = &dev_priv->blt_ring; break; - default: return -EINVAL; - } - ret = mutex_lock_interruptible(&dev->struct_mutex); if (ret) return ret; + ring = &dev_priv->ring[(uintptr_t)node->info_ent->data]; if (!ring->obj) { seq_printf(m, "No ringbuffer setup\n"); } else { @@ -603,21 +589,20 @@ static int i915_ringbuffer_info(struct seq_file *m, void *data) drm_i915_private_t *dev_priv = dev->dev_private; struct intel_ring_buffer *ring; - switch ((uintptr_t)node->info_ent->data) { - case RING_RENDER: ring = &dev_priv->render_ring; break; - case RING_BSD: ring = &dev_priv->bsd_ring; break; - case RING_BLT: ring = &dev_priv->blt_ring; break; - default: return -EINVAL; - } - + ring = &dev_priv->ring[(uintptr_t)node->info_ent->data]; if (ring->size == 0) - return 0; + return 0; seq_printf(m, "Ring %s:\n", ring->name); seq_printf(m, " Head : %08x\n", I915_READ_HEAD(ring) & HEAD_ADDR); seq_printf(m, " Tail : %08x\n", I915_READ_TAIL(ring) & TAIL_ADDR); seq_printf(m, " Size : %08x\n", ring->size); seq_printf(m, " Active : %08x\n", intel_ring_get_active_head(ring)); + seq_printf(m, " NOPID : %08x\n", I915_READ_NOPID(ring)); + if (IS_GEN6(dev)) { + seq_printf(m, " Sync 0 : %08x\n", I915_READ_SYNC_0(ring)); + seq_printf(m, " Sync 1 : %08x\n", I915_READ_SYNC_1(ring)); + } seq_printf(m, " Control : %08x\n", I915_READ_CTL(ring)); seq_printf(m, " Start : %08x\n", I915_READ_START(ring)); @@ -1177,15 +1162,15 @@ static struct drm_info_list i915_debugfs_list[] = { {"i915_gem_seqno", i915_gem_seqno_info, 0}, {"i915_gem_fence_regs", i915_gem_fence_regs_info, 0}, {"i915_gem_interrupt", i915_interrupt_info, 0}, - {"i915_gem_hws", i915_hws_info, 0, (void *)RING_RENDER}, - {"i915_gem_hws_blt", i915_hws_info, 0, (void *)RING_BLT}, - {"i915_gem_hws_bsd", i915_hws_info, 0, (void *)RING_BSD}, - {"i915_ringbuffer_data", i915_ringbuffer_data, 0, (void *)RING_RENDER}, - {"i915_ringbuffer_info", i915_ringbuffer_info, 0, (void *)RING_RENDER}, - {"i915_bsd_ringbuffer_data", i915_ringbuffer_data, 0, (void *)RING_BSD}, - {"i915_bsd_ringbuffer_info", i915_ringbuffer_info, 0, (void *)RING_BSD}, - {"i915_blt_ringbuffer_data", i915_ringbuffer_data, 0, (void *)RING_BLT}, - {"i915_blt_ringbuffer_info", i915_ringbuffer_info, 0, (void *)RING_BLT}, + {"i915_gem_hws", i915_hws_info, 0, (void *)RCS}, + {"i915_gem_hws_blt", i915_hws_info, 0, (void *)BCS}, + {"i915_gem_hws_bsd", i915_hws_info, 0, (void *)VCS}, + {"i915_ringbuffer_data", i915_ringbuffer_data, 0, (void *)RCS}, + {"i915_ringbuffer_info", i915_ringbuffer_info, 0, (void *)RCS}, + {"i915_bsd_ringbuffer_data", i915_ringbuffer_data, 0, (void *)VCS}, + {"i915_bsd_ringbuffer_info", i915_ringbuffer_info, 0, (void *)VCS}, + {"i915_blt_ringbuffer_data", i915_ringbuffer_data, 0, (void *)BCS}, + {"i915_blt_ringbuffer_info", i915_ringbuffer_info, 0, (void *)BCS}, {"i915_batchbuffers", i915_batchbuffer_info, 0}, {"i915_error_state", i915_error_state, 0}, {"i915_rstdby_delays", i915_rstdby_delays, 0}, diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c index 9a22da9b2083..664300986fb4 100644 --- a/drivers/gpu/drm/i915/i915_dma.c +++ b/drivers/gpu/drm/i915/i915_dma.c @@ -49,6 +49,8 @@ static int i915_init_phys_hws(struct drm_device *dev) { drm_i915_private_t *dev_priv = dev->dev_private; + struct intel_ring_buffer *ring = LP_RING(dev_priv); + /* Program Hardware Status Page */ dev_priv->status_page_dmah = drm_pci_alloc(dev, PAGE_SIZE, PAGE_SIZE); @@ -57,11 +59,10 @@ static int i915_init_phys_hws(struct drm_device *dev) DRM_ERROR("Can not allocate hardware status page\n"); return -ENOMEM; } - dev_priv->render_ring.status_page.page_addr - = dev_priv->status_page_dmah->vaddr; + ring->status_page.page_addr = dev_priv->status_page_dmah->vaddr; dev_priv->dma_status_page = dev_priv->status_page_dmah->busaddr; - memset(dev_priv->render_ring.status_page.page_addr, 0, PAGE_SIZE); + memset(ring->status_page.page_addr, 0, PAGE_SIZE); if (INTEL_INFO(dev)->gen >= 4) dev_priv->dma_status_page |= (dev_priv->dma_status_page >> 28) & @@ -79,13 +80,15 @@ static int i915_init_phys_hws(struct drm_device *dev) static void i915_free_hws(struct drm_device *dev) { drm_i915_private_t *dev_priv = dev->dev_private; + struct intel_ring_buffer *ring = LP_RING(dev_priv); + if (dev_priv->status_page_dmah) { drm_pci_free(dev, dev_priv->status_page_dmah); dev_priv->status_page_dmah = NULL; } - if (dev_priv->render_ring.status_page.gfx_addr) { - dev_priv->render_ring.status_page.gfx_addr = 0; + if (ring->status_page.gfx_addr) { + ring->status_page.gfx_addr = 0; drm_core_ioremapfree(&dev_priv->hws_map, dev); } @@ -97,7 +100,7 @@ void i915_kernel_lost_context(struct drm_device * dev) { drm_i915_private_t *dev_priv = dev->dev_private; struct drm_i915_master_private *master_priv; - struct intel_ring_buffer *ring = &dev_priv->render_ring; + struct intel_ring_buffer *ring = LP_RING(dev_priv); /* * We should never lose context on the ring with modesetting @@ -123,6 +126,8 @@ void i915_kernel_lost_context(struct drm_device * dev) static int i915_dma_cleanup(struct drm_device * dev) { drm_i915_private_t *dev_priv = dev->dev_private; + int i; + /* Make sure interrupts are disabled here because the uninstall ioctl * may not have been called from userspace and after dev_private * is freed, it's too late. @@ -131,9 +136,8 @@ static int i915_dma_cleanup(struct drm_device * dev) drm_irq_uninstall(dev); mutex_lock(&dev->struct_mutex); - intel_cleanup_ring_buffer(&dev_priv->render_ring); - intel_cleanup_ring_buffer(&dev_priv->bsd_ring); - intel_cleanup_ring_buffer(&dev_priv->blt_ring); + for (i = 0; i < I915_NUM_RINGS; i++) + intel_cleanup_ring_buffer(&dev_priv->ring[i]); mutex_unlock(&dev->struct_mutex); /* Clear the HWS virtual address at teardown */ @@ -147,6 +151,7 @@ static int i915_initialize(struct drm_device * dev, drm_i915_init_t * init) { drm_i915_private_t *dev_priv = dev->dev_private; struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv; + struct intel_ring_buffer *ring = LP_RING(dev_priv); master_priv->sarea = drm_getsarea(dev); if (master_priv->sarea) { @@ -157,24 +162,24 @@ static int i915_initialize(struct drm_device * dev, drm_i915_init_t * init) } if (init->ring_size != 0) { - if (dev_priv->render_ring.obj != NULL) { + if (ring->obj != NULL) { i915_dma_cleanup(dev); DRM_ERROR("Client tried to initialize ringbuffer in " "GEM mode\n"); return -EINVAL; } - dev_priv->render_ring.size = init->ring_size; + ring->size = init->ring_size; - dev_priv->render_ring.map.offset = init->ring_start; - dev_priv->render_ring.map.size = init->ring_size; - dev_priv->render_ring.map.type = 0; - dev_priv->render_ring.map.flags = 0; - dev_priv->render_ring.map.mtrr = 0; + ring->map.offset = init->ring_start; + ring->map.size = init->ring_size; + ring->map.type = 0; + ring->map.flags = 0; + ring->map.mtrr = 0; - drm_core_ioremap_wc(&dev_priv->render_ring.map, dev); + drm_core_ioremap_wc(&ring->map, dev); - if (dev_priv->render_ring.map.handle == NULL) { + if (ring->map.handle == NULL) { i915_dma_cleanup(dev); DRM_ERROR("can not ioremap virtual address for" " ring buffer\n"); @@ -182,7 +187,7 @@ static int i915_initialize(struct drm_device * dev, drm_i915_init_t * init) } } - dev_priv->render_ring.virtual_start = dev_priv->render_ring.map.handle; + ring->virtual_start = ring->map.handle; dev_priv->cpp = init->cpp; dev_priv->back_offset = init->back_offset; @@ -201,12 +206,10 @@ static int i915_initialize(struct drm_device * dev, drm_i915_init_t * init) static int i915_dma_resume(struct drm_device * dev) { drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; + struct intel_ring_buffer *ring = LP_RING(dev_priv); - struct intel_ring_buffer *ring; DRM_DEBUG_DRIVER("%s\n", __func__); - ring = &dev_priv->render_ring; - if (ring->map.handle == NULL) { DRM_ERROR("can not ioremap virtual address for" " ring buffer\n"); @@ -326,7 +329,7 @@ static int i915_emit_cmds(struct drm_device * dev, int *buffer, int dwords) drm_i915_private_t *dev_priv = dev->dev_private; int i, ret; - if ((dwords+1) * sizeof(int) >= dev_priv->render_ring.size - 8) + if ((dwords+1) * sizeof(int) >= LP_RING(dev_priv)->size - 8) return -EINVAL; for (i = 0; i < dwords;) { @@ -565,13 +568,12 @@ static int i915_dispatch_flip(struct drm_device * dev) return 0; } -static int i915_quiescent(struct drm_device * dev) +static int i915_quiescent(struct drm_device *dev) { - drm_i915_private_t *dev_priv = dev->dev_private; + struct intel_ring_buffer *ring = LP_RING(dev->dev_private); i915_kernel_lost_context(dev); - return intel_wait_ring_buffer(&dev_priv->render_ring, - dev_priv->render_ring.size - 8); + return intel_wait_ring_buffer(ring, ring->size - 8); } static int i915_flush_ioctl(struct drm_device *dev, void *data, @@ -828,7 +830,7 @@ static int i915_set_status_page(struct drm_device *dev, void *data, { drm_i915_private_t *dev_priv = dev->dev_private; drm_i915_hws_addr_t *hws = data; - struct intel_ring_buffer *ring = &dev_priv->render_ring; + struct intel_ring_buffer *ring = LP_RING(dev_priv); if (!I915_NEED_GFX_HWS(dev)) return -EINVAL; @@ -1978,7 +1980,7 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags) if (!IS_I945G(dev) && !IS_I945GM(dev)) pci_enable_msi(dev->pdev); - spin_lock_init(&dev_priv->user_irq_lock); + spin_lock_init(&dev_priv->irq_lock); spin_lock_init(&dev_priv->error_lock); dev_priv->trace_irq_seqno = 0; diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c index 64844e2e9f86..413a040386a9 100644 --- a/drivers/gpu/drm/i915/i915_drv.c +++ b/drivers/gpu/drm/i915/i915_drv.c @@ -487,11 +487,11 @@ int i915_reset(struct drm_device *dev, u8 flags) !dev_priv->mm.suspended) { dev_priv->mm.suspended = 0; - dev_priv->render_ring.init(&dev_priv->render_ring); + dev_priv->ring[RCS].init(&dev_priv->ring[RCS]); if (HAS_BSD(dev)) - dev_priv->bsd_ring.init(&dev_priv->bsd_ring); + dev_priv->ring[VCS].init(&dev_priv->ring[VCS]); if (HAS_BLT(dev)) - dev_priv->blt_ring.init(&dev_priv->blt_ring); + dev_priv->ring[BCS].init(&dev_priv->ring[BCS]); mutex_unlock(&dev->struct_mutex); drm_irq_uninstall(dev); diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index af9ff40b135b..8b19b5806230 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h @@ -269,9 +269,7 @@ typedef struct drm_i915_private { } *gmbus; struct pci_dev *bridge_dev; - struct intel_ring_buffer render_ring; - struct intel_ring_buffer bsd_ring; - struct intel_ring_buffer blt_ring; + struct intel_ring_buffer ring[I915_NUM_RINGS]; uint32_t next_seqno; drm_dma_handle_t *status_page_dmah; @@ -290,19 +288,15 @@ typedef struct drm_i915_private { int page_flipping; atomic_t irq_received; - /** Protects user_irq_refcount and irq_mask_reg */ - spinlock_t user_irq_lock; u32 trace_irq_seqno; + + /* protects the irq masks */ + spinlock_t irq_lock; /** Cached value of IMR to avoid reads in updating the bitfield */ - u32 irq_mask_reg; u32 pipestat[2]; - /** splitted irq regs for graphics and display engine on Ironlake, - irq_mask_reg is still used for display irq. */ - u32 gt_irq_mask_reg; - u32 gt_irq_enable_reg; - u32 de_irq_enable_reg; - u32 pch_irq_mask_reg; - u32 pch_irq_enable_reg; + u32 irq_mask; + u32 gt_irq_mask; + u32 pch_irq_mask; u32 hotplug_supported_mask; struct work_struct hotplug_work; @@ -1104,7 +1098,8 @@ int __must_check i915_mutex_lock_interruptible(struct drm_device *dev); int __must_check i915_gem_object_wait_rendering(struct drm_i915_gem_object *obj, bool interruptible); void i915_gem_object_move_to_active(struct drm_i915_gem_object *obj, - struct intel_ring_buffer *ring); + struct intel_ring_buffer *ring, + u32 seqno); /** * Returns true if seq1 is later than seq2. @@ -1272,6 +1267,17 @@ extern void intel_display_print_error_state(struct seq_file *m, struct intel_display_error_state *error); #endif +#define LP_RING(d) (&((struct drm_i915_private *)(d))->ring[RCS]) + +#define BEGIN_LP_RING(n) \ + intel_ring_begin(LP_RING(dev_priv), (n)) + +#define OUT_RING(x) \ + intel_ring_emit(LP_RING(dev_priv), x) + +#define ADVANCE_LP_RING() \ + intel_ring_advance(LP_RING(dev_priv)) + /** * Lock test for when it's just for synchronization of ring access. * @@ -1279,8 +1285,7 @@ extern void intel_display_print_error_state(struct seq_file *m, * has access to the ring. */ #define RING_LOCK_TEST_WITH_RETURN(dev, file) do { \ - if (((drm_i915_private_t *)dev->dev_private)->render_ring.obj \ - == NULL) \ + if (LP_RING(dev->dev_private)->obj == NULL) \ LOCK_TEST_WITH_RETURN(dev, file); \ } while (0) @@ -1366,15 +1371,6 @@ i915_write(struct drm_i915_private *dev_priv, u32 reg, u64 val, int len) } } -#define BEGIN_LP_RING(n) \ - intel_ring_begin(&dev_priv->render_ring, (n)) - -#define OUT_RING(x) \ - intel_ring_emit(&dev_priv->render_ring, x) - -#define ADVANCE_LP_RING() \ - intel_ring_advance(&dev_priv->render_ring) - /** * Reads a dword out of the status page, which is written to from the command * queue by automatic updates, MI_REPORT_HEAD, MI_STORE_DATA_INDEX, or @@ -1391,7 +1387,7 @@ i915_write(struct drm_i915_private *dev_priv, u32 reg, u64 val, int len) * The area from dword 0x20 to 0x3ff is available for driver usage. */ #define READ_HWSP(dev_priv, reg) (((volatile u32 *)\ - (dev_priv->render_ring.status_page.page_addr))[reg]) + (LP_RING(dev_priv)->status_page.page_addr))[reg]) #define READ_BREADCRUMB(dev_priv) READ_HWSP(dev_priv, I915_BREADCRUMB_INDEX) #define I915_GEM_HWS_INDEX 0x20 #define I915_BREADCRUMB_INDEX 0x21 diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index d99212fe54ed..eeed2e99d247 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c @@ -1561,11 +1561,11 @@ i915_gem_object_put_pages_gtt(struct drm_i915_gem_object *obj) void i915_gem_object_move_to_active(struct drm_i915_gem_object *obj, - struct intel_ring_buffer *ring) + struct intel_ring_buffer *ring, + u32 seqno) { struct drm_device *dev = obj->base.dev; struct drm_i915_private *dev_priv = dev->dev_private; - uint32_t seqno = i915_gem_next_request_seqno(dev, ring); BUG_ON(ring == NULL); obj->ring = ring; @@ -1679,7 +1679,8 @@ i915_gem_process_flushing_list(struct drm_device *dev, obj->base.write_domain = 0; list_del_init(&obj->gpu_write_list); - i915_gem_object_move_to_active(obj, ring); + i915_gem_object_move_to_active(obj, ring, + i915_gem_next_request_seqno(dev, ring)); trace_i915_gem_object_change_domain(obj, obj->base.read_domains, @@ -1804,10 +1805,10 @@ void i915_gem_reset(struct drm_device *dev) { struct drm_i915_private *dev_priv = dev->dev_private; struct drm_i915_gem_object *obj; + int i; - i915_gem_reset_ring_lists(dev_priv, &dev_priv->render_ring); - i915_gem_reset_ring_lists(dev_priv, &dev_priv->bsd_ring); - i915_gem_reset_ring_lists(dev_priv, &dev_priv->blt_ring); + for (i = 0; i < I915_NUM_RINGS; i++) + i915_gem_reset_ring_lists(dev_priv, &dev_priv->ring[i]); /* Remove anything from the flushing lists. The GPU cache is likely * to be lost on reset along with the data, so simply move the @@ -1846,6 +1847,7 @@ i915_gem_retire_requests_ring(struct drm_device *dev, { drm_i915_private_t *dev_priv = dev->dev_private; uint32_t seqno; + int i; if (!ring->status_page.page_addr || list_empty(&ring->request_list)) @@ -1854,6 +1856,11 @@ i915_gem_retire_requests_ring(struct drm_device *dev, WARN_ON(i915_verify_lists(dev)); seqno = ring->get_seqno(ring); + + for (i = 0; i < I915_NUM_RINGS; i++) + if (seqno >= ring->sync_seqno[i]) + ring->sync_seqno[i] = 0; + while (!list_empty(&ring->request_list)) { struct drm_i915_gem_request *request; @@ -1892,7 +1899,7 @@ i915_gem_retire_requests_ring(struct drm_device *dev, if (unlikely (dev_priv->trace_irq_seqno && i915_seqno_passed(dev_priv->trace_irq_seqno, seqno))) { - ring->user_irq_put(ring); + ring->irq_put(ring); dev_priv->trace_irq_seqno = 0; } @@ -1903,6 +1910,7 @@ void i915_gem_retire_requests(struct drm_device *dev) { drm_i915_private_t *dev_priv = dev->dev_private; + int i; if (!list_empty(&dev_priv->mm.deferred_free_list)) { struct drm_i915_gem_object *obj, *next; @@ -1918,9 +1926,8 @@ i915_gem_retire_requests(struct drm_device *dev) i915_gem_free_object_tail(obj); } - i915_gem_retire_requests_ring(dev, &dev_priv->render_ring); - i915_gem_retire_requests_ring(dev, &dev_priv->bsd_ring); - i915_gem_retire_requests_ring(dev, &dev_priv->blt_ring); + for (i = 0; i < I915_NUM_RINGS; i++) + i915_gem_retire_requests_ring(dev, &dev_priv->ring[i]); } static void @@ -1942,9 +1949,9 @@ i915_gem_retire_work_handler(struct work_struct *work) i915_gem_retire_requests(dev); if (!dev_priv->mm.suspended && - (!list_empty(&dev_priv->render_ring.request_list) || - !list_empty(&dev_priv->bsd_ring.request_list) || - !list_empty(&dev_priv->blt_ring.request_list))) + (!list_empty(&dev_priv->ring[RCS].request_list) || + !list_empty(&dev_priv->ring[VCS].request_list) || + !list_empty(&dev_priv->ring[BCS].request_list))) queue_delayed_work(dev_priv->wq, &dev_priv->mm.retire_work, HZ); mutex_unlock(&dev->struct_mutex); } @@ -1993,7 +2000,7 @@ i915_do_wait_request(struct drm_device *dev, uint32_t seqno, trace_i915_gem_request_wait_begin(dev, seqno); ring->waiting_seqno = seqno; - ring->user_irq_get(ring); + ring->irq_get(ring); if (interruptible) ret = wait_event_interruptible(ring->irq_queue, i915_seqno_passed(ring->get_seqno(ring), seqno) @@ -2003,7 +2010,7 @@ i915_do_wait_request(struct drm_device *dev, uint32_t seqno, i915_seqno_passed(ring->get_seqno(ring), seqno) || atomic_read(&dev_priv->mm.wedged)); - ring->user_irq_put(ring); + ring->irq_put(ring); ring->waiting_seqno = 0; trace_i915_gem_request_wait_end(dev, seqno); @@ -2159,7 +2166,7 @@ i915_gpu_idle(struct drm_device *dev) { drm_i915_private_t *dev_priv = dev->dev_private; bool lists_empty; - int ret; + int ret, i; lists_empty = (list_empty(&dev_priv->mm.flushing_list) && list_empty(&dev_priv->mm.active_list)); @@ -2167,17 +2174,11 @@ i915_gpu_idle(struct drm_device *dev) return 0; /* Flush everything onto the inactive list. */ - ret = i915_ring_idle(dev, &dev_priv->render_ring); - if (ret) - return ret; - - ret = i915_ring_idle(dev, &dev_priv->bsd_ring); - if (ret) - return ret; - - ret = i915_ring_idle(dev, &dev_priv->blt_ring); - if (ret) - return ret; + for (i = 0; i < I915_NUM_RINGS; i++) { + ret = i915_ring_idle(dev, &dev_priv->ring[i]); + if (ret) + return ret; + } return 0; } @@ -3153,11 +3154,11 @@ i915_gem_ring_throttle(struct drm_device *dev, struct drm_file *file) * generation is designed to be run atomically and so is * lockless. */ - ring->user_irq_get(ring); + ring->irq_get(ring); ret = wait_event_interruptible(ring->irq_queue, i915_seqno_passed(ring->get_seqno(ring), seqno) || atomic_read(&dev_priv->mm.wedged)); - ring->user_irq_put(ring); + ring->irq_put(ring); if (ret == 0 && atomic_read(&dev_priv->mm.wedged)) ret = -EIO; @@ -3584,9 +3585,9 @@ i915_gem_init_ringbuffer(struct drm_device *dev) return 0; cleanup_bsd_ring: - intel_cleanup_ring_buffer(&dev_priv->bsd_ring); + intel_cleanup_ring_buffer(&dev_priv->ring[VCS]); cleanup_render_ring: - intel_cleanup_ring_buffer(&dev_priv->render_ring); + intel_cleanup_ring_buffer(&dev_priv->ring[RCS]); return ret; } @@ -3594,10 +3595,10 @@ void i915_gem_cleanup_ringbuffer(struct drm_device *dev) { drm_i915_private_t *dev_priv = dev->dev_private; + int i; - intel_cleanup_ring_buffer(&dev_priv->render_ring); - intel_cleanup_ring_buffer(&dev_priv->bsd_ring); - intel_cleanup_ring_buffer(&dev_priv->blt_ring); + for (i = 0; i < I915_NUM_RINGS; i++) + intel_cleanup_ring_buffer(&dev_priv->ring[i]); } int @@ -3605,7 +3606,7 @@ i915_gem_entervt_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv) { drm_i915_private_t *dev_priv = dev->dev_private; - int ret; + int ret, i; if (drm_core_check_feature(dev, DRIVER_MODESET)) return 0; @@ -3625,14 +3626,12 @@ i915_gem_entervt_ioctl(struct drm_device *dev, void *data, } BUG_ON(!list_empty(&dev_priv->mm.active_list)); - BUG_ON(!list_empty(&dev_priv->render_ring.active_list)); - BUG_ON(!list_empty(&dev_priv->bsd_ring.active_list)); - BUG_ON(!list_empty(&dev_priv->blt_ring.active_list)); BUG_ON(!list_empty(&dev_priv->mm.flushing_list)); BUG_ON(!list_empty(&dev_priv->mm.inactive_list)); - BUG_ON(!list_empty(&dev_priv->render_ring.request_list)); - BUG_ON(!list_empty(&dev_priv->bsd_ring.request_list)); - BUG_ON(!list_empty(&dev_priv->blt_ring.request_list)); + for (i = 0; i < I915_NUM_RINGS; i++) { + BUG_ON(!list_empty(&dev_priv->ring[i].active_list)); + BUG_ON(!list_empty(&dev_priv->ring[i].request_list)); + } mutex_unlock(&dev->struct_mutex); ret = drm_irq_install(dev); @@ -3695,9 +3694,8 @@ i915_gem_load(struct drm_device *dev) INIT_LIST_HEAD(&dev_priv->mm.fence_list); INIT_LIST_HEAD(&dev_priv->mm.deferred_free_list); INIT_LIST_HEAD(&dev_priv->mm.gtt_list); - init_ring_lists(&dev_priv->render_ring); - init_ring_lists(&dev_priv->bsd_ring); - init_ring_lists(&dev_priv->blt_ring); + for (i = 0; i < I915_NUM_RINGS; i++) + init_ring_lists(&dev_priv->ring[i]); for (i = 0; i < 16; i++) INIT_LIST_HEAD(&dev_priv->fence_regs[i].lru_list); INIT_DELAYED_WORK(&dev_priv->mm.retire_work, diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c index 9bdc495e17bb..6fc9cc485781 100644 --- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c +++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c @@ -632,23 +632,59 @@ i915_gem_execbuffer_flush(struct drm_device *dev, uint32_t flush_rings) { drm_i915_private_t *dev_priv = dev->dev_private; + int i; if (flush_domains & I915_GEM_DOMAIN_CPU) intel_gtt_chipset_flush(); if ((flush_domains | invalidate_domains) & I915_GEM_GPU_DOMAINS) { - if (flush_rings & RING_RENDER) - i915_gem_flush_ring(dev, &dev_priv->render_ring, - invalidate_domains, flush_domains); - if (flush_rings & RING_BSD) - i915_gem_flush_ring(dev, &dev_priv->bsd_ring, - invalidate_domains, flush_domains); - if (flush_rings & RING_BLT) - i915_gem_flush_ring(dev, &dev_priv->blt_ring, - invalidate_domains, flush_domains); + for (i = 0; i < I915_NUM_RINGS; i++) + if (flush_rings & (1 << i)) + i915_gem_flush_ring(dev, &dev_priv->ring[i], + invalidate_domains, + flush_domains); } } +static int +i915_gem_execbuffer_sync_rings(struct drm_i915_gem_object *obj, + struct intel_ring_buffer *to) +{ + struct intel_ring_buffer *from = obj->ring; + u32 seqno; + int ret, idx; + + if (from == NULL || to == from) + return 0; + + if (INTEL_INFO(obj->base.dev)->gen < 6) + return i915_gem_object_wait_rendering(obj, true); + + idx = intel_ring_sync_index(from, to); + + seqno = obj->last_rendering_seqno; + if (seqno <= from->sync_seqno[idx]) + return 0; + + if (seqno == from->outstanding_lazy_request) { + struct drm_i915_gem_request *request; + + request = kzalloc(sizeof(*request), GFP_KERNEL); + if (request == NULL) + return -ENOMEM; + + ret = i915_add_request(obj->base.dev, NULL, request, from); + if (ret) { + kfree(request); + return ret; + } + + seqno = request->seqno; + } + + from->sync_seqno[idx] = seqno; + return intel_ring_sync(to, from, seqno - 1); +} static int i915_gem_execbuffer_move_to_gpu(struct intel_ring_buffer *ring, @@ -678,12 +714,9 @@ i915_gem_execbuffer_move_to_gpu(struct intel_ring_buffer *ring, } list_for_each_entry(obj, objects, exec_list) { - /* XXX replace with semaphores */ - if (obj->ring && ring != obj->ring) { - ret = i915_gem_object_wait_rendering(obj, true); - if (ret) - return ret; - } + ret = i915_gem_execbuffer_sync_rings(obj, ring); + if (ret) + return ret; } return 0; @@ -769,7 +802,8 @@ i915_gem_execbuffer_wait_for_flips(struct intel_ring_buffer *ring, static void i915_gem_execbuffer_move_to_active(struct list_head *objects, - struct intel_ring_buffer *ring) + struct intel_ring_buffer *ring, + u32 seqno) { struct drm_i915_gem_object *obj; @@ -778,7 +812,7 @@ i915_gem_execbuffer_move_to_active(struct list_head *objects, obj->base.write_domain = obj->base.pending_write_domain; obj->fenced_gpu_access = obj->pending_fenced_gpu_access; - i915_gem_object_move_to_active(obj, ring); + i915_gem_object_move_to_active(obj, ring, seqno); if (obj->base.write_domain) { obj->dirty = 1; obj->pending_gpu_write = true; @@ -833,6 +867,7 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data, struct drm_clip_rect *cliprects = NULL; struct intel_ring_buffer *ring; u32 exec_start, exec_len; + u32 seqno; int ret, i; if (!i915_gem_check_execbuffer(args)) { @@ -851,21 +886,21 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data, switch (args->flags & I915_EXEC_RING_MASK) { case I915_EXEC_DEFAULT: case I915_EXEC_RENDER: - ring = &dev_priv->render_ring; + ring = &dev_priv->ring[RCS]; break; case I915_EXEC_BSD: if (!HAS_BSD(dev)) { DRM_ERROR("execbuf with invalid ring (BSD)\n"); return -EINVAL; } - ring = &dev_priv->bsd_ring; + ring = &dev_priv->ring[VCS]; break; case I915_EXEC_BLT: if (!HAS_BLT(dev)) { DRM_ERROR("execbuf with invalid ring (BLT)\n"); return -EINVAL; } - ring = &dev_priv->blt_ring; + ring = &dev_priv->ring[BCS]; break; default: DRM_ERROR("execbuf with unknown ring: %d\n", @@ -879,7 +914,7 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data, } if (args->num_cliprects != 0) { - if (ring != &dev_priv->render_ring) { + if (ring != &dev_priv->ring[RCS]) { DRM_ERROR("clip rectangles are only valid with the render ring\n"); return -EINVAL; } @@ -972,6 +1007,21 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data, if (ret) goto err; + seqno = i915_gem_next_request_seqno(dev, ring); + for (i = 0; i < I915_NUM_RINGS-1; i++) { + if (seqno < ring->sync_seqno[i]) { + /* The GPU can not handle its semaphore value wrapping, + * so every billion or so execbuffers, we need to stall + * the GPU in order to reset the counters. + */ + ret = i915_gpu_idle(dev); + if (ret) + goto err; + + BUG_ON(ring->sync_seqno[i]); + } + } + exec_start = batch_obj->gtt_offset + args->batch_start_offset; exec_len = args->batch_len; if (cliprects) { @@ -992,7 +1042,7 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data, goto err; } - i915_gem_execbuffer_move_to_active(&objects, ring); + i915_gem_execbuffer_move_to_active(&objects, ring, seqno); i915_gem_execbuffer_retire_commands(dev, file, ring); err: diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c index 9aa1e1dc5fd5..5e831b7eb3f1 100644 --- a/drivers/gpu/drm/i915/i915_irq.c +++ b/drivers/gpu/drm/i915/i915_irq.c @@ -67,9 +67,9 @@ void ironlake_enable_graphics_irq(drm_i915_private_t *dev_priv, u32 mask) { - if ((dev_priv->gt_irq_mask_reg & mask) != 0) { - dev_priv->gt_irq_mask_reg &= ~mask; - I915_WRITE(GTIMR, dev_priv->gt_irq_mask_reg); + if ((dev_priv->gt_irq_mask & mask) != 0) { + dev_priv->gt_irq_mask &= ~mask; + I915_WRITE(GTIMR, dev_priv->gt_irq_mask); POSTING_READ(GTIMR); } } @@ -77,9 +77,9 @@ ironlake_enable_graphics_irq(drm_i915_private_t *dev_priv, u32 mask) void ironlake_disable_graphics_irq(drm_i915_private_t *dev_priv, u32 mask) { - if ((dev_priv->gt_irq_mask_reg & mask) != mask) { - dev_priv->gt_irq_mask_reg |= mask; - I915_WRITE(GTIMR, dev_priv->gt_irq_mask_reg); + if ((dev_priv->gt_irq_mask & mask) != mask) { + dev_priv->gt_irq_mask |= mask; + I915_WRITE(GTIMR, dev_priv->gt_irq_mask); POSTING_READ(GTIMR); } } @@ -88,9 +88,9 @@ ironlake_disable_graphics_irq(drm_i915_private_t *dev_priv, u32 mask) static void ironlake_enable_display_irq(drm_i915_private_t *dev_priv, u32 mask) { - if ((dev_priv->irq_mask_reg & mask) != 0) { - dev_priv->irq_mask_reg &= ~mask; - I915_WRITE(DEIMR, dev_priv->irq_mask_reg); + if ((dev_priv->irq_mask & mask) != 0) { + dev_priv->irq_mask &= ~mask; + I915_WRITE(DEIMR, dev_priv->irq_mask); POSTING_READ(DEIMR); } } @@ -98,9 +98,9 @@ ironlake_enable_display_irq(drm_i915_private_t *dev_priv, u32 mask) static inline void ironlake_disable_display_irq(drm_i915_private_t *dev_priv, u32 mask) { - if ((dev_priv->irq_mask_reg & mask) != mask) { - dev_priv->irq_mask_reg |= mask; - I915_WRITE(DEIMR, dev_priv->irq_mask_reg); + if ((dev_priv->irq_mask & mask) != mask) { + dev_priv->irq_mask |= mask; + I915_WRITE(DEIMR, dev_priv->irq_mask); POSTING_READ(DEIMR); } } @@ -108,9 +108,9 @@ ironlake_disable_display_irq(drm_i915_private_t *dev_priv, u32 mask) void i915_enable_irq(drm_i915_private_t *dev_priv, u32 mask) { - if ((dev_priv->irq_mask_reg & mask) != 0) { - dev_priv->irq_mask_reg &= ~mask; - I915_WRITE(IMR, dev_priv->irq_mask_reg); + if ((dev_priv->irq_mask & mask) != 0) { + dev_priv->irq_mask &= ~mask; + I915_WRITE(IMR, dev_priv->irq_mask); POSTING_READ(IMR); } } @@ -118,9 +118,9 @@ i915_enable_irq(drm_i915_private_t *dev_priv, u32 mask) void i915_disable_irq(drm_i915_private_t *dev_priv, u32 mask) { - if ((dev_priv->irq_mask_reg & mask) != mask) { - dev_priv->irq_mask_reg |= mask; - I915_WRITE(IMR, dev_priv->irq_mask_reg); + if ((dev_priv->irq_mask & mask) != mask) { + dev_priv->irq_mask |= mask; + I915_WRITE(IMR, dev_priv->irq_mask); POSTING_READ(IMR); } } @@ -163,9 +163,12 @@ i915_disable_pipestat(drm_i915_private_t *dev_priv, int pipe, u32 mask) /** * intel_enable_asle - enable ASLE interrupt for OpRegion */ -void intel_enable_asle (struct drm_device *dev) +void intel_enable_asle(struct drm_device *dev) { - drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; + drm_i915_private_t *dev_priv = dev->dev_private; + unsigned long irqflags; + + spin_lock_irqsave(&dev_priv->irq_lock, irqflags); if (HAS_PCH_SPLIT(dev)) ironlake_enable_display_irq(dev_priv, DE_GSE); @@ -176,6 +179,8 @@ void intel_enable_asle (struct drm_device *dev) i915_enable_pipestat(dev_priv, 0, PIPE_LEGACY_BLC_EVENT_ENABLE); } + + spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); } /** @@ -344,12 +349,12 @@ static irqreturn_t ironlake_irq_handler(struct drm_device *dev) READ_BREADCRUMB(dev_priv); } - if (gt_iir & GT_PIPE_NOTIFY) - notify_ring(dev, &dev_priv->render_ring); + if (gt_iir & (GT_USER_INTERRUPT | GT_PIPE_NOTIFY)) + notify_ring(dev, &dev_priv->ring[RCS]); if (gt_iir & bsd_usr_interrupt) - notify_ring(dev, &dev_priv->bsd_ring); - if (HAS_BLT(dev) && gt_iir & GT_BLT_USER_INTERRUPT) - notify_ring(dev, &dev_priv->blt_ring); + notify_ring(dev, &dev_priv->ring[VCS]); + if (gt_iir & GT_BLT_USER_INTERRUPT) + notify_ring(dev, &dev_priv->ring[BCS]); if (de_iir & DE_GSE) intel_opregion_gse_intr(dev); @@ -640,8 +645,7 @@ static void i915_capture_error_state(struct drm_device *dev) DRM_DEBUG_DRIVER("generating error event\n"); - error->seqno = - dev_priv->render_ring.get_seqno(&dev_priv->render_ring); + error->seqno = dev_priv->ring[RCS].get_seqno(&dev_priv->ring[RCS]); error->eir = I915_READ(EIR); error->pgtbl_er = I915_READ(PGTBL_ER); error->pipeastat = I915_READ(PIPEASTAT); @@ -656,16 +660,16 @@ static void i915_capture_error_state(struct drm_device *dev) error->bcs_ipeir = I915_READ(BCS_IPEIR); error->bcs_instdone = I915_READ(BCS_INSTDONE); error->bcs_seqno = 0; - if (dev_priv->blt_ring.get_seqno) - error->bcs_seqno = dev_priv->blt_ring.get_seqno(&dev_priv->blt_ring); + if (dev_priv->ring[BCS].get_seqno) + error->bcs_seqno = dev_priv->ring[BCS].get_seqno(&dev_priv->ring[BCS]); error->vcs_acthd = I915_READ(VCS_ACTHD); error->vcs_ipehr = I915_READ(VCS_IPEHR); error->vcs_ipeir = I915_READ(VCS_IPEIR); error->vcs_instdone = I915_READ(VCS_INSTDONE); error->vcs_seqno = 0; - if (dev_priv->bsd_ring.get_seqno) - error->vcs_seqno = dev_priv->bsd_ring.get_seqno(&dev_priv->bsd_ring); + if (dev_priv->ring[VCS].get_seqno) + error->vcs_seqno = dev_priv->ring[VCS].get_seqno(&dev_priv->ring[VCS]); } if (INTEL_INFO(dev)->gen >= 4) { error->ipeir = I915_READ(IPEIR_I965); @@ -684,7 +688,7 @@ static void i915_capture_error_state(struct drm_device *dev) } i915_gem_record_fences(dev, error); - bbaddr = i915_ringbuffer_last_batch(dev, &dev_priv->render_ring); + bbaddr = i915_ringbuffer_last_batch(dev, &dev_priv->ring[RCS]); /* Grab the current batchbuffer, most likely to have crashed. */ batchbuffer[0] = NULL; @@ -748,7 +752,7 @@ static void i915_capture_error_state(struct drm_device *dev) /* Record the ringbuffer */ error->ringbuffer = i915_error_object_create(dev, - dev_priv->render_ring.obj); + dev_priv->ring[RCS].obj); /* Record buffers on the active and pinned lists. */ error->active_bo = NULL; @@ -949,11 +953,11 @@ void i915_handle_error(struct drm_device *dev, bool wedged) /* * Wakeup waiting processes so they don't hang */ - wake_up_all(&dev_priv->render_ring.irq_queue); + wake_up_all(&dev_priv->ring[RCS].irq_queue); if (HAS_BSD(dev)) - wake_up_all(&dev_priv->bsd_ring.irq_queue); + wake_up_all(&dev_priv->ring[VCS].irq_queue); if (HAS_BLT(dev)) - wake_up_all(&dev_priv->blt_ring.irq_queue); + wake_up_all(&dev_priv->ring[BCS].irq_queue); } queue_work(dev_priv->wq, &dev_priv->error_work); @@ -1035,7 +1039,7 @@ irqreturn_t i915_driver_irq_handler(DRM_IRQ_ARGS) * It doesn't set the bit in iir again, but it still produces * interrupts (for non-MSI). */ - spin_lock_irqsave(&dev_priv->user_irq_lock, irqflags); + spin_lock_irqsave(&dev_priv->irq_lock, irqflags); pipea_stats = I915_READ(PIPEASTAT); pipeb_stats = I915_READ(PIPEBSTAT); @@ -1058,7 +1062,7 @@ irqreturn_t i915_driver_irq_handler(DRM_IRQ_ARGS) I915_WRITE(PIPEBSTAT, pipeb_stats); irq_received = 1; } - spin_unlock_irqrestore(&dev_priv->user_irq_lock, irqflags); + spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); if (!irq_received) break; @@ -1091,9 +1095,9 @@ irqreturn_t i915_driver_irq_handler(DRM_IRQ_ARGS) } if (iir & I915_USER_INTERRUPT) - notify_ring(dev, &dev_priv->render_ring); - if (HAS_BSD(dev) && (iir & I915_BSD_USER_INTERRUPT)) - notify_ring(dev, &dev_priv->bsd_ring); + notify_ring(dev, &dev_priv->ring[RCS]); + if (iir & I915_BSD_USER_INTERRUPT) + notify_ring(dev, &dev_priv->ring[VCS]); if (iir & I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT) { intel_prepare_page_flip(dev, 0); @@ -1180,10 +1184,10 @@ static int i915_emit_irq(struct drm_device * dev) void i915_trace_irq_get(struct drm_device *dev, u32 seqno) { drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; - struct intel_ring_buffer *render_ring = &dev_priv->render_ring; + struct intel_ring_buffer *ring = LP_RING(dev_priv); if (dev_priv->trace_irq_seqno == 0) - render_ring->user_irq_get(render_ring); + ring->irq_get(ring); dev_priv->trace_irq_seqno = seqno; } @@ -1193,7 +1197,7 @@ static int i915_wait_irq(struct drm_device * dev, int irq_nr) drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv; int ret = 0; - struct intel_ring_buffer *render_ring = &dev_priv->render_ring; + struct intel_ring_buffer *ring = LP_RING(dev_priv); DRM_DEBUG_DRIVER("irq_nr=%d breadcrumb=%d\n", irq_nr, READ_BREADCRUMB(dev_priv)); @@ -1207,10 +1211,10 @@ static int i915_wait_irq(struct drm_device * dev, int irq_nr) if (master_priv->sarea_priv) master_priv->sarea_priv->perf_boxes |= I915_BOX_WAIT; - render_ring->user_irq_get(render_ring); - DRM_WAIT_ON(ret, dev_priv->render_ring.irq_queue, 3 * DRM_HZ, + ring->irq_get(ring); + DRM_WAIT_ON(ret, ring->irq_queue, 3 * DRM_HZ, READ_BREADCRUMB(dev_priv) >= irq_nr); - render_ring->user_irq_put(render_ring); + ring->irq_put(ring); if (ret == -EBUSY) { DRM_ERROR("EBUSY -- rec: %d emitted: %d\n", @@ -1229,7 +1233,7 @@ int i915_irq_emit(struct drm_device *dev, void *data, drm_i915_irq_emit_t *emit = data; int result; - if (!dev_priv || !dev_priv->render_ring.virtual_start) { + if (!dev_priv || !LP_RING(dev_priv)->virtual_start) { DRM_ERROR("called with no initialization\n"); return -EINVAL; } @@ -1275,9 +1279,9 @@ int i915_enable_vblank(struct drm_device *dev, int pipe) if (!i915_pipe_enabled(dev, pipe)) return -EINVAL; - spin_lock_irqsave(&dev_priv->user_irq_lock, irqflags); + spin_lock_irqsave(&dev_priv->irq_lock, irqflags); if (HAS_PCH_SPLIT(dev)) - ironlake_enable_display_irq(dev_priv, (pipe == 0) ? + ironlake_enable_display_irq(dev_priv, (pipe == 0) ? DE_PIPEA_VBLANK: DE_PIPEB_VBLANK); else if (INTEL_INFO(dev)->gen >= 4) i915_enable_pipestat(dev_priv, pipe, @@ -1285,7 +1289,7 @@ int i915_enable_vblank(struct drm_device *dev, int pipe) else i915_enable_pipestat(dev_priv, pipe, PIPE_VBLANK_INTERRUPT_ENABLE); - spin_unlock_irqrestore(&dev_priv->user_irq_lock, irqflags); + spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); return 0; } @@ -1297,15 +1301,15 @@ void i915_disable_vblank(struct drm_device *dev, int pipe) drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; unsigned long irqflags; - spin_lock_irqsave(&dev_priv->user_irq_lock, irqflags); + spin_lock_irqsave(&dev_priv->irq_lock, irqflags); if (HAS_PCH_SPLIT(dev)) - ironlake_disable_display_irq(dev_priv, (pipe == 0) ? + ironlake_disable_display_irq(dev_priv, (pipe == 0) ? DE_PIPEA_VBLANK: DE_PIPEB_VBLANK); else i915_disable_pipestat(dev_priv, pipe, PIPE_VBLANK_INTERRUPT_ENABLE | PIPE_START_VBLANK_INTERRUPT_ENABLE); - spin_unlock_irqrestore(&dev_priv->user_irq_lock, irqflags); + spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); } void i915_enable_interrupt (struct drm_device *dev) @@ -1397,6 +1401,27 @@ static bool i915_hangcheck_ring_idle(struct intel_ring_buffer *ring, bool *err) return false; } +static bool kick_ring(struct intel_ring_buffer *ring) +{ + struct drm_device *dev = ring->dev; + struct drm_i915_private *dev_priv = dev->dev_private; + u32 tmp = I915_READ_CTL(ring); + if (tmp & RING_WAIT) { + DRM_ERROR("Kicking stuck wait on %s\n", + ring->name); + I915_WRITE_CTL(ring, tmp); + return true; + } + if (IS_GEN6(dev) && + (tmp & RING_WAIT_SEMAPHORE)) { + DRM_ERROR("Kicking stuck semaphore on %s\n", + ring->name); + I915_WRITE_CTL(ring, tmp); + return true; + } + return false; +} + /** * This is called when the chip hasn't reported back with completed * batchbuffers in a long time. The first time this is called we simply record @@ -1411,9 +1436,9 @@ void i915_hangcheck_elapsed(unsigned long data) bool err = false; /* If all work is done then ACTHD clearly hasn't advanced. */ - if (i915_hangcheck_ring_idle(&dev_priv->render_ring, &err) && - i915_hangcheck_ring_idle(&dev_priv->bsd_ring, &err) && - i915_hangcheck_ring_idle(&dev_priv->blt_ring, &err)) { + if (i915_hangcheck_ring_idle(&dev_priv->ring[RCS], &err) && + i915_hangcheck_ring_idle(&dev_priv->ring[VCS], &err) && + i915_hangcheck_ring_idle(&dev_priv->ring[BCS], &err)) { dev_priv->hangcheck_count = 0; if (err) goto repeat; @@ -1442,12 +1467,17 @@ void i915_hangcheck_elapsed(unsigned long data) * and break the hang. This should work on * all but the second generation chipsets. */ - struct intel_ring_buffer *ring = &dev_priv->render_ring; - u32 tmp = I915_READ_CTL(ring); - if (tmp & RING_WAIT) { - I915_WRITE_CTL(ring, tmp); + + if (kick_ring(&dev_priv->ring[RCS])) + goto repeat; + + if (HAS_BSD(dev) && + kick_ring(&dev_priv->ring[VCS])) + goto repeat; + + if (HAS_BLT(dev) && + kick_ring(&dev_priv->ring[BCS])) goto repeat; - } } i915_handle_error(dev, true); @@ -1498,37 +1528,37 @@ static int ironlake_irq_postinstall(struct drm_device *dev) /* enable kind of interrupts always enabled */ u32 display_mask = DE_MASTER_IRQ_CONTROL | DE_GSE | DE_PCH_EVENT | DE_PLANEA_FLIP_DONE | DE_PLANEB_FLIP_DONE; - u32 render_mask = GT_PIPE_NOTIFY | GT_BSD_USER_INTERRUPT; + u32 render_irqs; u32 hotplug_mask; - dev_priv->irq_mask_reg = ~display_mask; - dev_priv->de_irq_enable_reg = display_mask | DE_PIPEA_VBLANK | DE_PIPEB_VBLANK; + dev_priv->irq_mask = ~display_mask; /* should always can generate irq */ I915_WRITE(DEIIR, I915_READ(DEIIR)); - I915_WRITE(DEIMR, dev_priv->irq_mask_reg); - I915_WRITE(DEIER, dev_priv->de_irq_enable_reg); + I915_WRITE(DEIMR, dev_priv->irq_mask); + I915_WRITE(DEIER, display_mask | DE_PIPEA_VBLANK | DE_PIPEB_VBLANK); POSTING_READ(DEIER); - if (IS_GEN6(dev)) { - render_mask = - GT_PIPE_NOTIFY | - GT_GEN6_BSD_USER_INTERRUPT | - GT_BLT_USER_INTERRUPT; - } - - dev_priv->gt_irq_mask_reg = ~render_mask; - dev_priv->gt_irq_enable_reg = render_mask; + dev_priv->gt_irq_mask = ~0; I915_WRITE(GTIIR, I915_READ(GTIIR)); - I915_WRITE(GTIMR, dev_priv->gt_irq_mask_reg); + I915_WRITE(GTIMR, dev_priv->gt_irq_mask); if (IS_GEN6(dev)) { - I915_WRITE(GEN6_RENDER_IMR, ~GEN6_RENDER_PIPE_CONTROL_NOTIFY_INTERRUPT); - I915_WRITE(GEN6_BSD_IMR, ~GEN6_BSD_IMR_USER_INTERRUPT); + I915_WRITE(GEN6_RENDER_IMR, ~GEN6_RENDER_USER_INTERRUPT); + I915_WRITE(GEN6_BSD_IMR, ~GEN6_BSD_USER_INTERRUPT); I915_WRITE(GEN6_BLITTER_IMR, ~GEN6_BLITTER_USER_INTERRUPT); } - I915_WRITE(GTIER, dev_priv->gt_irq_enable_reg); + if (IS_GEN6(dev)) + render_irqs = + GT_USER_INTERRUPT | + GT_GEN6_BSD_USER_INTERRUPT | + GT_BLT_USER_INTERRUPT; + else + render_irqs = + GT_PIPE_NOTIFY | + GT_BSD_USER_INTERRUPT; + I915_WRITE(GTIER, render_irqs); POSTING_READ(GTIER); if (HAS_PCH_CPT(dev)) { @@ -1539,12 +1569,11 @@ static int ironlake_irq_postinstall(struct drm_device *dev) SDE_PORTC_HOTPLUG | SDE_PORTD_HOTPLUG; } - dev_priv->pch_irq_mask_reg = ~hotplug_mask; - dev_priv->pch_irq_enable_reg = hotplug_mask; + dev_priv->pch_irq_mask = ~hotplug_mask; I915_WRITE(SDEIIR, I915_READ(SDEIIR)); - I915_WRITE(SDEIMR, dev_priv->pch_irq_mask_reg); - I915_WRITE(SDEIER, dev_priv->pch_irq_enable_reg); + I915_WRITE(SDEIMR, dev_priv->pch_irq_mask); + I915_WRITE(SDEIER, hotplug_mask); POSTING_READ(SDEIER); if (IS_IRONLAKE_M(dev)) { @@ -1594,11 +1623,11 @@ int i915_driver_irq_postinstall(struct drm_device *dev) u32 enable_mask = I915_INTERRUPT_ENABLE_FIX | I915_INTERRUPT_ENABLE_VAR; u32 error_mask; - DRM_INIT_WAITQUEUE(&dev_priv->render_ring.irq_queue); + DRM_INIT_WAITQUEUE(&dev_priv->ring[RCS].irq_queue); if (HAS_BSD(dev)) - DRM_INIT_WAITQUEUE(&dev_priv->bsd_ring.irq_queue); + DRM_INIT_WAITQUEUE(&dev_priv->ring[VCS].irq_queue); if (HAS_BLT(dev)) - DRM_INIT_WAITQUEUE(&dev_priv->blt_ring.irq_queue); + DRM_INIT_WAITQUEUE(&dev_priv->ring[BCS].irq_queue); dev_priv->vblank_pipe = DRM_I915_VBLANK_PIPE_A | DRM_I915_VBLANK_PIPE_B; @@ -1606,7 +1635,7 @@ int i915_driver_irq_postinstall(struct drm_device *dev) return ironlake_irq_postinstall(dev); /* Unmask the interrupts that we always want on. */ - dev_priv->irq_mask_reg = ~I915_INTERRUPT_ENABLE_FIX; + dev_priv->irq_mask = ~I915_INTERRUPT_ENABLE_FIX; dev_priv->pipestat[0] = 0; dev_priv->pipestat[1] = 0; @@ -1615,7 +1644,7 @@ int i915_driver_irq_postinstall(struct drm_device *dev) /* Enable in IER... */ enable_mask |= I915_DISPLAY_PORT_INTERRUPT; /* and unmask in IMR */ - dev_priv->irq_mask_reg &= ~I915_DISPLAY_PORT_INTERRUPT; + dev_priv->irq_mask &= ~I915_DISPLAY_PORT_INTERRUPT; } /* @@ -1633,7 +1662,7 @@ int i915_driver_irq_postinstall(struct drm_device *dev) } I915_WRITE(EMR, error_mask); - I915_WRITE(IMR, dev_priv->irq_mask_reg); + I915_WRITE(IMR, dev_priv->irq_mask); I915_WRITE(IER, enable_mask); POSTING_READ(IER); diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h index 06175e98c5bb..3e03094cf148 100644 --- a/drivers/gpu/drm/i915/i915_reg.h +++ b/drivers/gpu/drm/i915/i915_reg.h @@ -176,6 +176,11 @@ #define MI_BATCH_NON_SECURE (1) #define MI_BATCH_NON_SECURE_I965 (1<<8) #define MI_BATCH_BUFFER_START MI_INSTR(0x31, 0) +#define MI_SEMAPHORE_MBOX MI_INSTR(0x16, 1) /* gen6+ */ +#define MI_SEMAPHORE_GLOBAL_GTT (1<<22) +#define MI_SEMAPHORE_UPDATE (1<<21) +#define MI_SEMAPHORE_COMPARE (1<<20) +#define MI_SEMAPHORE_REGISTER (1<<18) /* * 3D instructions used by the kernel */ @@ -276,9 +281,12 @@ #define RING_HEAD(base) ((base)+0x34) #define RING_START(base) ((base)+0x38) #define RING_CTL(base) ((base)+0x3c) +#define RING_SYNC_0(base) ((base)+0x40) +#define RING_SYNC_1(base) ((base)+0x44) #define RING_HWS_PGA(base) ((base)+0x80) #define RING_HWS_PGA_GEN6(base) ((base)+0x2080) #define RING_ACTHD(base) ((base)+0x74) +#define RING_NOPID(base) ((base)+0x94) #define TAIL_ADDR 0x001FFFF8 #define HEAD_WRAP_COUNT 0xFFE00000 #define HEAD_WRAP_ONE 0x00200000 @@ -293,6 +301,7 @@ #define RING_INVALID 0x00000000 #define RING_WAIT_I8XX (1<<0) /* gen2, PRBx_HEAD */ #define RING_WAIT (1<<11) /* gen3+, PRBx_CTL */ +#define RING_WAIT_SEMAPHORE (1<<10) /* gen6+ */ #if 0 #define PRB0_TAIL 0x02030 #define PRB0_HEAD 0x02034 @@ -347,6 +356,14 @@ # define VS_TIMER_DISPATCH (1 << 6) # define MI_FLUSH_ENABLE (1 << 11) +#define GFX_MODE 0x02520 +#define GFX_RUN_LIST_ENABLE (1<<15) +#define GFX_TLB_INVALIDATE_ALWAYS (1<<13) +#define GFX_SURFACE_FAULT_ENABLE (1<<12) +#define GFX_REPLAY_MODE (1<<11) +#define GFX_PSMI_GRANULARITY (1<<10) +#define GFX_PPGTT_ENABLE (1<<9) + #define SCPD0 0x0209c /* 915+ only */ #define IER 0x020a0 #define IIR 0x020a4 @@ -498,7 +515,7 @@ #define GEN6_BSD_SLEEP_PSMI_CONTROL_IDLE_INDICATOR (1 << 3) #define GEN6_BSD_IMR 0x120a8 -#define GEN6_BSD_IMR_USER_INTERRUPT (1 << 12) +#define GEN6_BSD_USER_INTERRUPT (1 << 12) #define GEN6_BSD_RNCID 0x12198 diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c index 3063edd2456f..0b6272a2edfc 100644 --- a/drivers/gpu/drm/i915/intel_display.c +++ b/drivers/gpu/drm/i915/intel_display.c @@ -1998,7 +1998,7 @@ static void intel_clear_scanline_wait(struct drm_device *dev) /* Can't break the hang on i8xx */ return; - ring = &dev_priv->render_ring; + ring = LP_RING(dev_priv); tmp = I915_READ_CTL(ring); if (tmp & RING_WAIT) I915_WRITE_CTL(ring, tmp); @@ -5124,7 +5124,7 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc, obj = intel_fb->obj; mutex_lock(&dev->struct_mutex); - ret = intel_pin_and_fence_fb_obj(dev, obj, &dev_priv->render_ring); + ret = intel_pin_and_fence_fb_obj(dev, obj, LP_RING(dev_priv)); if (ret) goto cleanup_work; diff --git a/drivers/gpu/drm/i915/intel_opregion.c b/drivers/gpu/drm/i915/intel_opregion.c index 9b0d9a867aea..f295a7aaadf9 100644 --- a/drivers/gpu/drm/i915/intel_opregion.c +++ b/drivers/gpu/drm/i915/intel_opregion.c @@ -273,14 +273,8 @@ void intel_opregion_enable_asle(struct drm_device *dev) struct opregion_asle *asle = dev_priv->opregion.asle; if (asle) { - if (IS_MOBILE(dev)) { - unsigned long irqflags; - - spin_lock_irqsave(&dev_priv->user_irq_lock, irqflags); + if (IS_MOBILE(dev)) intel_enable_asle(dev); - spin_unlock_irqrestore(&dev_priv->user_irq_lock, - irqflags); - } asle->tche = ASLE_ALS_EN | ASLE_BLC_EN | ASLE_PFIT_EN | ASLE_PFMB_EN; diff --git a/drivers/gpu/drm/i915/intel_overlay.c b/drivers/gpu/drm/i915/intel_overlay.c index d0c1add393a3..3fbb98b948d6 100644 --- a/drivers/gpu/drm/i915/intel_overlay.c +++ b/drivers/gpu/drm/i915/intel_overlay.c @@ -221,7 +221,7 @@ static int intel_overlay_do_wait_request(struct intel_overlay *overlay, int ret; BUG_ON(overlay->last_flip_req); - ret = i915_add_request(dev, NULL, request, &dev_priv->render_ring); + ret = i915_add_request(dev, NULL, request, LP_RING(dev_priv)); if (ret) { kfree(request); return ret; @@ -230,7 +230,7 @@ static int intel_overlay_do_wait_request(struct intel_overlay *overlay, overlay->flip_tail = tail; ret = i915_do_wait_request(dev, overlay->last_flip_req, true, - &dev_priv->render_ring); + LP_RING(dev_priv)); if (ret) return ret; @@ -364,7 +364,7 @@ static int intel_overlay_continue(struct intel_overlay *overlay, OUT_RING(flip_addr); ADVANCE_LP_RING(); - ret = i915_add_request(dev, NULL, request, &dev_priv->render_ring); + ret = i915_add_request(dev, NULL, request, LP_RING(dev_priv)); if (ret) { kfree(request); return ret; @@ -454,7 +454,7 @@ static int intel_overlay_recover_from_interrupt(struct intel_overlay *overlay, return 0; ret = i915_do_wait_request(dev, overlay->last_flip_req, - interruptible, &dev_priv->render_ring); + interruptible, LP_RING(dev_priv)); if (ret) return ret; diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c index 21871b0766e2..f71db0cf4909 100644 --- a/drivers/gpu/drm/i915/intel_ringbuffer.c +++ b/drivers/gpu/drm/i915/intel_ringbuffer.c @@ -203,6 +203,7 @@ static int init_ring_common(struct intel_ring_buffer *ring) if (ring->space < 0) ring->space += ring->size; } + return 0; } @@ -281,17 +282,18 @@ cleanup_pipe_control(struct intel_ring_buffer *ring) static int init_render_ring(struct intel_ring_buffer *ring) { struct drm_device *dev = ring->dev; + struct drm_i915_private *dev_priv = dev->dev_private; int ret = init_ring_common(ring); if (INTEL_INFO(dev)->gen > 3) { - drm_i915_private_t *dev_priv = dev->dev_private; int mode = VS_TIMER_DISPATCH << 16 | VS_TIMER_DISPATCH; if (IS_GEN6(dev)) mode |= MI_FLUSH_ENABLE << 16 | MI_FLUSH_ENABLE; I915_WRITE(MI_MODE, mode); } - if (HAS_PIPE_CONTROL(dev)) { + if (INTEL_INFO(dev)->gen >= 6) { + } else if (HAS_PIPE_CONTROL(dev)) { ret = init_pipe_control(ring); if (ret) return ret; @@ -308,6 +310,80 @@ static void render_ring_cleanup(struct intel_ring_buffer *ring) cleanup_pipe_control(ring); } +static void +update_semaphore(struct intel_ring_buffer *ring, int i, u32 seqno) +{ + struct drm_device *dev = ring->dev; + struct drm_i915_private *dev_priv = dev->dev_private; + int id; + + /* + * cs -> 1 = vcs, 0 = bcs + * vcs -> 1 = bcs, 0 = cs, + * bcs -> 1 = cs, 0 = vcs. + */ + id = ring - dev_priv->ring; + id += 2 - i; + id %= 3; + + intel_ring_emit(ring, + MI_SEMAPHORE_MBOX | + MI_SEMAPHORE_REGISTER | + MI_SEMAPHORE_UPDATE); + intel_ring_emit(ring, seqno); + intel_ring_emit(ring, + RING_SYNC_0(dev_priv->ring[id].mmio_base) + 4*i); +} + +static int +gen6_add_request(struct intel_ring_buffer *ring, + u32 *result) +{ + u32 seqno; + int ret; + + ret = intel_ring_begin(ring, 10); + if (ret) + return ret; + + seqno = i915_gem_get_seqno(ring->dev); + update_semaphore(ring, 0, seqno); + update_semaphore(ring, 1, seqno); + + intel_ring_emit(ring, MI_STORE_DWORD_INDEX); + intel_ring_emit(ring, I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT); + intel_ring_emit(ring, seqno); + intel_ring_emit(ring, MI_USER_INTERRUPT); + intel_ring_advance(ring); + + *result = seqno; + return 0; +} + +int +intel_ring_sync(struct intel_ring_buffer *ring, + struct intel_ring_buffer *to, + u32 seqno) +{ + int ret; + + ret = intel_ring_begin(ring, 4); + if (ret) + return ret; + + intel_ring_emit(ring, + MI_SEMAPHORE_MBOX | + MI_SEMAPHORE_REGISTER | + intel_ring_sync_index(ring, to) << 17 | + MI_SEMAPHORE_COMPARE); + intel_ring_emit(ring, seqno); + intel_ring_emit(ring, 0); + intel_ring_emit(ring, MI_NOOP); + intel_ring_advance(ring); + + return 0; +} + #define PIPE_CONTROL_FLUSH(ring__, addr__) \ do { \ intel_ring_emit(ring__, GFX_OP_PIPE_CONTROL | PIPE_CONTROL_QW_WRITE | \ @@ -317,131 +393,128 @@ do { \ intel_ring_emit(ring__, 0); \ } while (0) -/** - * Creates a new sequence number, emitting a write of it to the status page - * plus an interrupt, which will trigger i915_user_interrupt_handler. - * - * Must be called with struct_lock held. - * - * Returned sequence numbers are nonzero on success. - */ static int -render_ring_add_request(struct intel_ring_buffer *ring, - u32 *result) +pc_render_add_request(struct intel_ring_buffer *ring, + u32 *result) { struct drm_device *dev = ring->dev; u32 seqno = i915_gem_get_seqno(dev); struct pipe_control *pc = ring->private; + u32 scratch_addr = pc->gtt_offset + 128; int ret; - if (IS_GEN6(dev)) { - ret = intel_ring_begin(ring, 6); - if (ret) - return ret; - - intel_ring_emit(ring, GFX_OP_PIPE_CONTROL | 3); - intel_ring_emit(ring, PIPE_CONTROL_QW_WRITE | - PIPE_CONTROL_WC_FLUSH | PIPE_CONTROL_IS_FLUSH | - PIPE_CONTROL_NOTIFY); - intel_ring_emit(ring, pc->gtt_offset | PIPE_CONTROL_GLOBAL_GTT); - intel_ring_emit(ring, seqno); - intel_ring_emit(ring, 0); - intel_ring_emit(ring, 0); - } else if (HAS_PIPE_CONTROL(dev)) { - u32 scratch_addr = pc->gtt_offset + 128; + /* + * Workaround qword write incoherence by flushing the + * PIPE_NOTIFY buffers out to memory before requesting + * an interrupt. + */ + ret = intel_ring_begin(ring, 32); + if (ret) + return ret; - /* - * Workaround qword write incoherence by flushing the - * PIPE_NOTIFY buffers out to memory before requesting - * an interrupt. - */ - ret = intel_ring_begin(ring, 32); - if (ret) - return ret; + intel_ring_emit(ring, GFX_OP_PIPE_CONTROL | PIPE_CONTROL_QW_WRITE | + PIPE_CONTROL_WC_FLUSH | PIPE_CONTROL_TC_FLUSH); + intel_ring_emit(ring, pc->gtt_offset | PIPE_CONTROL_GLOBAL_GTT); + intel_ring_emit(ring, seqno); + intel_ring_emit(ring, 0); + PIPE_CONTROL_FLUSH(ring, scratch_addr); + scratch_addr += 128; /* write to separate cachelines */ + PIPE_CONTROL_FLUSH(ring, scratch_addr); + scratch_addr += 128; + PIPE_CONTROL_FLUSH(ring, scratch_addr); + scratch_addr += 128; + PIPE_CONTROL_FLUSH(ring, scratch_addr); + scratch_addr += 128; + PIPE_CONTROL_FLUSH(ring, scratch_addr); + scratch_addr += 128; + PIPE_CONTROL_FLUSH(ring, scratch_addr); + intel_ring_emit(ring, GFX_OP_PIPE_CONTROL | PIPE_CONTROL_QW_WRITE | + PIPE_CONTROL_WC_FLUSH | PIPE_CONTROL_TC_FLUSH | + PIPE_CONTROL_NOTIFY); + intel_ring_emit(ring, pc->gtt_offset | PIPE_CONTROL_GLOBAL_GTT); + intel_ring_emit(ring, seqno); + intel_ring_emit(ring, 0); + intel_ring_advance(ring); - intel_ring_emit(ring, GFX_OP_PIPE_CONTROL | PIPE_CONTROL_QW_WRITE | - PIPE_CONTROL_WC_FLUSH | PIPE_CONTROL_TC_FLUSH); - intel_ring_emit(ring, pc->gtt_offset | PIPE_CONTROL_GLOBAL_GTT); - intel_ring_emit(ring, seqno); - intel_ring_emit(ring, 0); - PIPE_CONTROL_FLUSH(ring, scratch_addr); - scratch_addr += 128; /* write to separate cachelines */ - PIPE_CONTROL_FLUSH(ring, scratch_addr); - scratch_addr += 128; - PIPE_CONTROL_FLUSH(ring, scratch_addr); - scratch_addr += 128; - PIPE_CONTROL_FLUSH(ring, scratch_addr); - scratch_addr += 128; - PIPE_CONTROL_FLUSH(ring, scratch_addr); - scratch_addr += 128; - PIPE_CONTROL_FLUSH(ring, scratch_addr); - intel_ring_emit(ring, GFX_OP_PIPE_CONTROL | PIPE_CONTROL_QW_WRITE | - PIPE_CONTROL_WC_FLUSH | PIPE_CONTROL_TC_FLUSH | - PIPE_CONTROL_NOTIFY); - intel_ring_emit(ring, pc->gtt_offset | PIPE_CONTROL_GLOBAL_GTT); - intel_ring_emit(ring, seqno); - intel_ring_emit(ring, 0); - } else { - ret = intel_ring_begin(ring, 4); - if (ret) - return ret; + *result = seqno; + return 0; +} - intel_ring_emit(ring, MI_STORE_DWORD_INDEX); - intel_ring_emit(ring, I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT); - intel_ring_emit(ring, seqno); +static int +render_ring_add_request(struct intel_ring_buffer *ring, + u32 *result) +{ + struct drm_device *dev = ring->dev; + u32 seqno = i915_gem_get_seqno(dev); + int ret; - intel_ring_emit(ring, MI_USER_INTERRUPT); - } + ret = intel_ring_begin(ring, 4); + if (ret) + return ret; + intel_ring_emit(ring, MI_STORE_DWORD_INDEX); + intel_ring_emit(ring, I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT); + intel_ring_emit(ring, seqno); + intel_ring_emit(ring, MI_USER_INTERRUPT); intel_ring_advance(ring); + *result = seqno; return 0; } static u32 -render_ring_get_seqno(struct intel_ring_buffer *ring) +ring_get_seqno(struct intel_ring_buffer *ring) { - struct drm_device *dev = ring->dev; - if (HAS_PIPE_CONTROL(dev)) { - struct pipe_control *pc = ring->private; - return pc->cpu_page[0]; - } else - return intel_read_status_page(ring, I915_GEM_HWS_INDEX); + return intel_read_status_page(ring, I915_GEM_HWS_INDEX); +} + +static u32 +pc_render_get_seqno(struct intel_ring_buffer *ring) +{ + struct pipe_control *pc = ring->private; + return pc->cpu_page[0]; } static void -render_ring_get_user_irq(struct intel_ring_buffer *ring) +render_ring_get_irq(struct intel_ring_buffer *ring) { struct drm_device *dev = ring->dev; - drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; - unsigned long irqflags; - spin_lock_irqsave(&dev_priv->user_irq_lock, irqflags); - if (dev->irq_enabled && (++ring->user_irq_refcount == 1)) { + if (dev->irq_enabled && ++ring->irq_refcount == 1) { + drm_i915_private_t *dev_priv = dev->dev_private; + unsigned long irqflags; + + spin_lock_irqsave(&dev_priv->irq_lock, irqflags); + if (HAS_PCH_SPLIT(dev)) - ironlake_enable_graphics_irq(dev_priv, GT_PIPE_NOTIFY); + ironlake_enable_graphics_irq(dev_priv, + GT_PIPE_NOTIFY | GT_USER_INTERRUPT); else i915_enable_irq(dev_priv, I915_USER_INTERRUPT); + + spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); } - spin_unlock_irqrestore(&dev_priv->user_irq_lock, irqflags); } static void -render_ring_put_user_irq(struct intel_ring_buffer *ring) +render_ring_put_irq(struct intel_ring_buffer *ring) { struct drm_device *dev = ring->dev; - drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; - unsigned long irqflags; - spin_lock_irqsave(&dev_priv->user_irq_lock, irqflags); - BUG_ON(dev->irq_enabled && ring->user_irq_refcount <= 0); - if (dev->irq_enabled && (--ring->user_irq_refcount == 0)) { + BUG_ON(dev->irq_enabled && ring->irq_refcount == 0); + if (dev->irq_enabled && --ring->irq_refcount == 0) { + drm_i915_private_t *dev_priv = dev->dev_private; + unsigned long irqflags; + + spin_lock_irqsave(&dev_priv->irq_lock, irqflags); if (HAS_PCH_SPLIT(dev)) - ironlake_disable_graphics_irq(dev_priv, GT_PIPE_NOTIFY); + ironlake_disable_graphics_irq(dev_priv, + GT_USER_INTERRUPT | + GT_PIPE_NOTIFY); else i915_disable_irq(dev_priv, I915_USER_INTERRUPT); + spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); } - spin_unlock_irqrestore(&dev_priv->user_irq_lock, irqflags); } void intel_ring_setup_status_page(struct intel_ring_buffer *ring) @@ -459,6 +532,9 @@ bsd_ring_flush(struct intel_ring_buffer *ring, u32 invalidate_domains, u32 flush_domains) { + if ((flush_domains & I915_GEM_DOMAIN_RENDER) == 0) + return; + if (intel_ring_begin(ring, 2) == 0) { intel_ring_emit(ring, MI_FLUSH); intel_ring_emit(ring, MI_NOOP); @@ -491,20 +567,45 @@ ring_add_request(struct intel_ring_buffer *ring, } static void -bsd_ring_get_user_irq(struct intel_ring_buffer *ring) +ring_get_irq(struct intel_ring_buffer *ring, u32 flag) { - /* do nothing */ + struct drm_device *dev = ring->dev; + + if (dev->irq_enabled && ++ring->irq_refcount == 1) { + drm_i915_private_t *dev_priv = dev->dev_private; + unsigned long irqflags; + + spin_lock_irqsave(&dev_priv->irq_lock, irqflags); + ironlake_enable_graphics_irq(dev_priv, flag); + spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); + } } + static void -bsd_ring_put_user_irq(struct intel_ring_buffer *ring) +ring_put_irq(struct intel_ring_buffer *ring, u32 flag) { - /* do nothing */ + struct drm_device *dev = ring->dev; + + if (dev->irq_enabled && --ring->irq_refcount == 0) { + drm_i915_private_t *dev_priv = dev->dev_private; + unsigned long irqflags; + + spin_lock_irqsave(&dev_priv->irq_lock, irqflags); + ironlake_disable_graphics_irq(dev_priv, flag); + spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); + } } -static u32 -ring_status_page_get_seqno(struct intel_ring_buffer *ring) + +static void +bsd_ring_get_irq(struct intel_ring_buffer *ring) { - return intel_read_status_page(ring, I915_GEM_HWS_INDEX); + ring_get_irq(ring, GT_BSD_USER_INTERRUPT); +} +static void +bsd_ring_put_irq(struct intel_ring_buffer *ring) +{ + ring_put_irq(ring, GT_BSD_USER_INTERRUPT); } static int @@ -817,9 +918,9 @@ static const struct intel_ring_buffer render_ring = { .write_tail = ring_write_tail, .flush = render_ring_flush, .add_request = render_ring_add_request, - .get_seqno = render_ring_get_seqno, - .user_irq_get = render_ring_get_user_irq, - .user_irq_put = render_ring_put_user_irq, + .get_seqno = ring_get_seqno, + .irq_get = render_ring_get_irq, + .irq_put = render_ring_put_irq, .dispatch_execbuffer = render_ring_dispatch_execbuffer, .cleanup = render_ring_cleanup, }; @@ -835,9 +936,9 @@ static const struct intel_ring_buffer bsd_ring = { .write_tail = ring_write_tail, .flush = bsd_ring_flush, .add_request = ring_add_request, - .get_seqno = ring_status_page_get_seqno, - .user_irq_get = bsd_ring_get_user_irq, - .user_irq_put = bsd_ring_put_user_irq, + .get_seqno = ring_get_seqno, + .irq_get = bsd_ring_get_irq, + .irq_put = bsd_ring_put_irq, .dispatch_execbuffer = ring_dispatch_execbuffer, }; @@ -868,6 +969,9 @@ static void gen6_ring_flush(struct intel_ring_buffer *ring, u32 invalidate_domains, u32 flush_domains) { + if ((flush_domains & I915_GEM_DOMAIN_RENDER) == 0) + return; + if (intel_ring_begin(ring, 4) == 0) { intel_ring_emit(ring, MI_FLUSH_DW); intel_ring_emit(ring, 0); @@ -895,33 +999,46 @@ gen6_ring_dispatch_execbuffer(struct intel_ring_buffer *ring, return 0; } +static void +gen6_bsd_ring_get_irq(struct intel_ring_buffer *ring) +{ + ring_get_irq(ring, GT_GEN6_BSD_USER_INTERRUPT); +} + +static void +gen6_bsd_ring_put_irq(struct intel_ring_buffer *ring) +{ + ring_put_irq(ring, GT_GEN6_BSD_USER_INTERRUPT); +} + /* ring buffer for Video Codec for Gen6+ */ static const struct intel_ring_buffer gen6_bsd_ring = { - .name = "gen6 bsd ring", - .id = RING_BSD, - .mmio_base = GEN6_BSD_RING_BASE, - .size = 32 * PAGE_SIZE, - .init = init_ring_common, - .write_tail = gen6_bsd_ring_write_tail, - .flush = gen6_ring_flush, - .add_request = ring_add_request, - .get_seqno = ring_status_page_get_seqno, - .user_irq_get = bsd_ring_get_user_irq, - .user_irq_put = bsd_ring_put_user_irq, - .dispatch_execbuffer = gen6_ring_dispatch_execbuffer, + .name = "gen6 bsd ring", + .id = RING_BSD, + .mmio_base = GEN6_BSD_RING_BASE, + .size = 32 * PAGE_SIZE, + .init = init_ring_common, + .write_tail = gen6_bsd_ring_write_tail, + .flush = gen6_ring_flush, + .add_request = gen6_add_request, + .get_seqno = ring_get_seqno, + .irq_get = gen6_bsd_ring_get_irq, + .irq_put = gen6_bsd_ring_put_irq, + .dispatch_execbuffer = gen6_ring_dispatch_execbuffer, }; /* Blitter support (SandyBridge+) */ static void -blt_ring_get_user_irq(struct intel_ring_buffer *ring) +blt_ring_get_irq(struct intel_ring_buffer *ring) { - /* do nothing */ + ring_get_irq(ring, GT_BLT_USER_INTERRUPT); } + static void -blt_ring_put_user_irq(struct intel_ring_buffer *ring) +blt_ring_put_irq(struct intel_ring_buffer *ring) { - /* do nothing */ + ring_put_irq(ring, GT_BLT_USER_INTERRUPT); } @@ -994,6 +1111,9 @@ static void blt_ring_flush(struct intel_ring_buffer *ring, u32 invalidate_domains, u32 flush_domains) { + if ((flush_domains & I915_GEM_DOMAIN_RENDER) == 0) + return; + if (blt_ring_begin(ring, 4) == 0) { intel_ring_emit(ring, MI_FLUSH_DW); intel_ring_emit(ring, 0); @@ -1003,30 +1123,6 @@ static void blt_ring_flush(struct intel_ring_buffer *ring, } } -static int -blt_ring_add_request(struct intel_ring_buffer *ring, - u32 *result) -{ - u32 seqno; - int ret; - - ret = blt_ring_begin(ring, 4); - if (ret) - return ret; - - seqno = i915_gem_get_seqno(ring->dev); - - intel_ring_emit(ring, MI_STORE_DWORD_INDEX); - intel_ring_emit(ring, I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT); - intel_ring_emit(ring, seqno); - intel_ring_emit(ring, MI_USER_INTERRUPT); - intel_ring_advance(ring); - - DRM_DEBUG_DRIVER("%s %d\n", ring->name, seqno); - *result = seqno; - return 0; -} - static void blt_ring_cleanup(struct intel_ring_buffer *ring) { if (!ring->private) @@ -1045,10 +1141,10 @@ static const struct intel_ring_buffer gen6_blt_ring = { .init = blt_ring_init, .write_tail = ring_write_tail, .flush = blt_ring_flush, - .add_request = blt_ring_add_request, - .get_seqno = ring_status_page_get_seqno, - .user_irq_get = blt_ring_get_user_irq, - .user_irq_put = blt_ring_put_user_irq, + .add_request = gen6_add_request, + .get_seqno = ring_get_seqno, + .irq_get = blt_ring_get_irq, + .irq_put = blt_ring_put_irq, .dispatch_execbuffer = gen6_ring_dispatch_execbuffer, .cleanup = blt_ring_cleanup, }; @@ -1056,36 +1152,43 @@ static const struct intel_ring_buffer gen6_blt_ring = { int intel_init_render_ring_buffer(struct drm_device *dev) { drm_i915_private_t *dev_priv = dev->dev_private; + struct intel_ring_buffer *ring = &dev_priv->ring[RCS]; - dev_priv->render_ring = render_ring; + *ring = render_ring; + if (INTEL_INFO(dev)->gen >= 6) { + ring->add_request = gen6_add_request; + } else if (HAS_PIPE_CONTROL(dev)) { + ring->add_request = pc_render_add_request; + ring->get_seqno = pc_render_get_seqno; + } if (!I915_NEED_GFX_HWS(dev)) { - dev_priv->render_ring.status_page.page_addr - = dev_priv->status_page_dmah->vaddr; - memset(dev_priv->render_ring.status_page.page_addr, - 0, PAGE_SIZE); + ring->status_page.page_addr = dev_priv->status_page_dmah->vaddr; + memset(ring->status_page.page_addr, 0, PAGE_SIZE); } - return intel_init_ring_buffer(dev, &dev_priv->render_ring); + return intel_init_ring_buffer(dev, ring); } int intel_init_bsd_ring_buffer(struct drm_device *dev) { drm_i915_private_t *dev_priv = dev->dev_private; + struct intel_ring_buffer *ring = &dev_priv->ring[VCS]; if (IS_GEN6(dev)) - dev_priv->bsd_ring = gen6_bsd_ring; + *ring = gen6_bsd_ring; else - dev_priv->bsd_ring = bsd_ring; + *ring = bsd_ring; - return intel_init_ring_buffer(dev, &dev_priv->bsd_ring); + return intel_init_ring_buffer(dev, ring); } int intel_init_blt_ring_buffer(struct drm_device *dev) { drm_i915_private_t *dev_priv = dev->dev_private; + struct intel_ring_buffer *ring = &dev_priv->ring[BCS]; - dev_priv->blt_ring = gen6_blt_ring; + *ring = gen6_blt_ring; - return intel_init_ring_buffer(dev, &dev_priv->blt_ring); + return intel_init_ring_buffer(dev, ring); } diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.h b/drivers/gpu/drm/i915/intel_ringbuffer.h index 8e3526777926..6a3822bc6af2 100644 --- a/drivers/gpu/drm/i915/intel_ringbuffer.h +++ b/drivers/gpu/drm/i915/intel_ringbuffer.h @@ -1,6 +1,13 @@ #ifndef _INTEL_RINGBUFFER_H_ #define _INTEL_RINGBUFFER_H_ +enum { + RCS = 0x0, + VCS, + BCS, + I915_NUM_RINGS, +}; + struct intel_hw_status_page { u32 __iomem *page_addr; unsigned int gfx_addr; @@ -21,7 +28,10 @@ struct intel_hw_status_page { #define I915_READ_CTL(ring) I915_RING_READ(RING_CTL(ring->mmio_base)) #define I915_WRITE_CTL(ring, val) I915_WRITE(RING_CTL(ring->mmio_base), val) -struct drm_i915_gem_execbuffer2; +#define I915_READ_NOPID(ring) I915_RING_READ(RING_NOPID(ring->mmio_base)) +#define I915_READ_SYNC_0(ring) I915_RING_READ(RING_SYNC_0(ring->mmio_base)) +#define I915_READ_SYNC_1(ring) I915_RING_READ(RING_SYNC_1(ring->mmio_base)) + struct intel_ring_buffer { const char *name; enum intel_ring_id { @@ -42,9 +52,10 @@ struct intel_ring_buffer { u32 irq_seqno; /* last seq seem at irq time */ u32 waiting_seqno; - int user_irq_refcount; - void (*user_irq_get)(struct intel_ring_buffer *ring); - void (*user_irq_put)(struct intel_ring_buffer *ring); + u32 sync_seqno[I915_NUM_RINGS-1]; + u32 irq_refcount; + void (*irq_get)(struct intel_ring_buffer *ring); + void (*irq_put)(struct intel_ring_buffer *ring); int (*init)(struct intel_ring_buffer *ring); @@ -98,6 +109,25 @@ struct intel_ring_buffer { void *private; }; +static inline u32 +intel_ring_sync_index(struct intel_ring_buffer *ring, + struct intel_ring_buffer *other) +{ + int idx; + + /* + * cs -> 0 = vcs, 1 = bcs + * vcs -> 0 = bcs, 1 = cs, + * bcs -> 0 = cs, 1 = vcs. + */ + + idx = (other - ring) - 1; + if (idx < 0) + idx += I915_NUM_RINGS; + + return idx; +} + static inline u32 intel_read_status_page(struct intel_ring_buffer *ring, int reg) @@ -119,6 +149,9 @@ static inline void intel_ring_emit(struct intel_ring_buffer *ring, void intel_ring_advance(struct intel_ring_buffer *ring); u32 intel_ring_get_seqno(struct intel_ring_buffer *ring); +int intel_ring_sync(struct intel_ring_buffer *ring, + struct intel_ring_buffer *to, + u32 seqno); int intel_init_render_ring_buffer(struct drm_device *dev); int intel_init_bsd_ring_buffer(struct drm_device *dev); diff --git a/drivers/gpu/drm/i915/intel_tv.c b/drivers/gpu/drm/i915/intel_tv.c index 2f7681989316..93206e4eaa6f 100644 --- a/drivers/gpu/drm/i915/intel_tv.c +++ b/drivers/gpu/drm/i915/intel_tv.c @@ -1245,10 +1245,11 @@ intel_tv_detect_type (struct intel_tv *intel_tv) int type; /* Disable TV interrupts around load detect or we'll recurse */ - spin_lock_irqsave(&dev_priv->user_irq_lock, irqflags); - i915_disable_pipestat(dev_priv, 0, PIPE_HOTPLUG_INTERRUPT_ENABLE | + spin_lock_irqsave(&dev_priv->irq_lock, irqflags); + i915_disable_pipestat(dev_priv, 0, + PIPE_HOTPLUG_INTERRUPT_ENABLE | PIPE_HOTPLUG_TV_INTERRUPT_ENABLE); - spin_unlock_irqrestore(&dev_priv->user_irq_lock, irqflags); + spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); save_tv_dac = tv_dac = I915_READ(TV_DAC); save_tv_ctl = tv_ctl = I915_READ(TV_CTL); @@ -1301,10 +1302,11 @@ intel_tv_detect_type (struct intel_tv *intel_tv) I915_WRITE(TV_CTL, save_tv_ctl); /* Restore interrupt config */ - spin_lock_irqsave(&dev_priv->user_irq_lock, irqflags); - i915_enable_pipestat(dev_priv, 0, PIPE_HOTPLUG_INTERRUPT_ENABLE | + spin_lock_irqsave(&dev_priv->irq_lock, irqflags); + i915_enable_pipestat(dev_priv, 0, + PIPE_HOTPLUG_INTERRUPT_ENABLE | PIPE_HOTPLUG_TV_INTERRUPT_ENABLE); - spin_unlock_irqrestore(&dev_priv->user_irq_lock, irqflags); + spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); return type; } -- cgit v1.2.3 From f7746f0e1f271fbc64b57c29873c47f73e28e5a0 Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Sat, 4 Dec 2010 23:48:40 +0000 Subject: drm/i915: Enable self-refresh for Ironlake We disabled this a while ago as it was inexplicably broken. However, it now appears to work... Signed-off-by: Chris Wilson --- drivers/gpu/drm/i915/intel_display.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c index 0b6272a2edfc..83446b55f731 100644 --- a/drivers/gpu/drm/i915/intel_display.c +++ b/drivers/gpu/drm/i915/intel_display.c @@ -3456,7 +3456,7 @@ static void ironlake_update_wm(struct drm_device *dev, * display plane is used. */ tmp = 0; - if (enabled == 1 && /* XXX disabled due to buggy implmentation? */ 0) { + if (enabled == 1) { unsigned long line_time_us; int small, large, plane_fbc; int sr_clock, entries; -- cgit v1.2.3 From 160b1543cdae83e9f8914ac7afc3d2bd686140af Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Sun, 5 Dec 2010 14:54:05 +0000 Subject: drm/i915/dp: Trivial code tidy Locally scope the crtc to where it is used. Signed-off-by: Chris Wilson --- drivers/gpu/drm/i915/intel_dp.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c index 1d8d068bc473..2c9601f2f208 100644 --- a/drivers/gpu/drm/i915/intel_dp.c +++ b/drivers/gpu/drm/i915/intel_dp.c @@ -1374,7 +1374,6 @@ intel_dp_link_down(struct intel_dp *intel_dp) { struct drm_device *dev = intel_dp->base.base.dev; struct drm_i915_private *dev_priv = dev->dev_private; - struct intel_crtc *intel_crtc = to_intel_crtc(intel_dp->base.base.crtc); uint32_t DP = intel_dp->DP; DRM_DEBUG_KMS("\n"); @@ -1401,6 +1400,8 @@ intel_dp_link_down(struct intel_dp *intel_dp) DP |= DP_LINK_TRAIN_OFF; if (!HAS_PCH_CPT(dev) && (DP & DP_PIPEB_SELECT)) { + struct intel_crtc *intel_crtc = to_intel_crtc(intel_dp->base.base.crtc); + /* Hardware workaround: leaving our transcoder select * set to transcoder B while it's off will prevent the * corresponding HDMI output on transcoder A. @@ -1415,8 +1416,7 @@ intel_dp_link_down(struct intel_dp *intel_dp) /* Changes to enable or select take place the vblank * after being written. */ - intel_wait_for_vblank(intel_dp->base.base.dev, - intel_crtc->pipe); + intel_wait_for_vblank(dev, intel_crtc->pipe); } I915_WRITE(intel_dp->output_reg, DP & ~DP_PORT_EN); -- cgit v1.2.3 From 88f23b8fa3e6357c423af24ec31c661fc12f884b Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Sun, 5 Dec 2010 15:08:31 +0000 Subject: drm/i915: Avoid using PIPE_CONTROL on Ironlake The workaround is hideous and we are using the STORE_DWORD on all other generations on all other rings, so use for the gen5 render ring as well. Signed-off-by: Chris Wilson --- drivers/gpu/drm/i915/i915_irq.c | 4 +- drivers/gpu/drm/i915/intel_ringbuffer.c | 159 +------------------------------- 2 files changed, 4 insertions(+), 159 deletions(-) diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c index 5e831b7eb3f1..02e4dd82f754 100644 --- a/drivers/gpu/drm/i915/i915_irq.c +++ b/drivers/gpu/drm/i915/i915_irq.c @@ -349,7 +349,7 @@ static irqreturn_t ironlake_irq_handler(struct drm_device *dev) READ_BREADCRUMB(dev_priv); } - if (gt_iir & (GT_USER_INTERRUPT | GT_PIPE_NOTIFY)) + if (gt_iir & GT_USER_INTERRUPT) notify_ring(dev, &dev_priv->ring[RCS]); if (gt_iir & bsd_usr_interrupt) notify_ring(dev, &dev_priv->ring[VCS]); @@ -1556,7 +1556,7 @@ static int ironlake_irq_postinstall(struct drm_device *dev) GT_BLT_USER_INTERRUPT; else render_irqs = - GT_PIPE_NOTIFY | + GT_USER_INTERRUPT | GT_BSD_USER_INTERRUPT; I915_WRITE(GTIER, render_irqs); POSTING_READ(GTIER); diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c index f71db0cf4909..0ee78525959a 100644 --- a/drivers/gpu/drm/i915/intel_ringbuffer.c +++ b/drivers/gpu/drm/i915/intel_ringbuffer.c @@ -207,78 +207,6 @@ static int init_ring_common(struct intel_ring_buffer *ring) return 0; } -/* - * 965+ support PIPE_CONTROL commands, which provide finer grained control - * over cache flushing. - */ -struct pipe_control { - struct drm_i915_gem_object *obj; - volatile u32 *cpu_page; - u32 gtt_offset; -}; - -static int -init_pipe_control(struct intel_ring_buffer *ring) -{ - struct pipe_control *pc; - struct drm_i915_gem_object *obj; - int ret; - - if (ring->private) - return 0; - - pc = kmalloc(sizeof(*pc), GFP_KERNEL); - if (!pc) - return -ENOMEM; - - obj = i915_gem_alloc_object(ring->dev, 4096); - if (obj == NULL) { - DRM_ERROR("Failed to allocate seqno page\n"); - ret = -ENOMEM; - goto err; - } - obj->agp_type = AGP_USER_CACHED_MEMORY; - - ret = i915_gem_object_pin(obj, 4096, true); - if (ret) - goto err_unref; - - pc->gtt_offset = obj->gtt_offset; - pc->cpu_page = kmap(obj->pages[0]); - if (pc->cpu_page == NULL) - goto err_unpin; - - pc->obj = obj; - ring->private = pc; - return 0; - -err_unpin: - i915_gem_object_unpin(obj); -err_unref: - drm_gem_object_unreference(&obj->base); -err: - kfree(pc); - return ret; -} - -static void -cleanup_pipe_control(struct intel_ring_buffer *ring) -{ - struct pipe_control *pc = ring->private; - struct drm_i915_gem_object *obj; - - if (!ring->private) - return; - - obj = pc->obj; - kunmap(obj->pages[0]); - i915_gem_object_unpin(obj); - drm_gem_object_unreference(&obj->base); - - kfree(pc); - ring->private = NULL; -} - static int init_render_ring(struct intel_ring_buffer *ring) { struct drm_device *dev = ring->dev; @@ -292,24 +220,9 @@ static int init_render_ring(struct intel_ring_buffer *ring) I915_WRITE(MI_MODE, mode); } - if (INTEL_INFO(dev)->gen >= 6) { - } else if (HAS_PIPE_CONTROL(dev)) { - ret = init_pipe_control(ring); - if (ret) - return ret; - } - return ret; } -static void render_ring_cleanup(struct intel_ring_buffer *ring) -{ - if (!ring->private) - return; - - cleanup_pipe_control(ring); -} - static void update_semaphore(struct intel_ring_buffer *ring, int i, u32 seqno) { @@ -384,62 +297,6 @@ intel_ring_sync(struct intel_ring_buffer *ring, return 0; } -#define PIPE_CONTROL_FLUSH(ring__, addr__) \ -do { \ - intel_ring_emit(ring__, GFX_OP_PIPE_CONTROL | PIPE_CONTROL_QW_WRITE | \ - PIPE_CONTROL_DEPTH_STALL | 2); \ - intel_ring_emit(ring__, (addr__) | PIPE_CONTROL_GLOBAL_GTT); \ - intel_ring_emit(ring__, 0); \ - intel_ring_emit(ring__, 0); \ -} while (0) - -static int -pc_render_add_request(struct intel_ring_buffer *ring, - u32 *result) -{ - struct drm_device *dev = ring->dev; - u32 seqno = i915_gem_get_seqno(dev); - struct pipe_control *pc = ring->private; - u32 scratch_addr = pc->gtt_offset + 128; - int ret; - - /* - * Workaround qword write incoherence by flushing the - * PIPE_NOTIFY buffers out to memory before requesting - * an interrupt. - */ - ret = intel_ring_begin(ring, 32); - if (ret) - return ret; - - intel_ring_emit(ring, GFX_OP_PIPE_CONTROL | PIPE_CONTROL_QW_WRITE | - PIPE_CONTROL_WC_FLUSH | PIPE_CONTROL_TC_FLUSH); - intel_ring_emit(ring, pc->gtt_offset | PIPE_CONTROL_GLOBAL_GTT); - intel_ring_emit(ring, seqno); - intel_ring_emit(ring, 0); - PIPE_CONTROL_FLUSH(ring, scratch_addr); - scratch_addr += 128; /* write to separate cachelines */ - PIPE_CONTROL_FLUSH(ring, scratch_addr); - scratch_addr += 128; - PIPE_CONTROL_FLUSH(ring, scratch_addr); - scratch_addr += 128; - PIPE_CONTROL_FLUSH(ring, scratch_addr); - scratch_addr += 128; - PIPE_CONTROL_FLUSH(ring, scratch_addr); - scratch_addr += 128; - PIPE_CONTROL_FLUSH(ring, scratch_addr); - intel_ring_emit(ring, GFX_OP_PIPE_CONTROL | PIPE_CONTROL_QW_WRITE | - PIPE_CONTROL_WC_FLUSH | PIPE_CONTROL_TC_FLUSH | - PIPE_CONTROL_NOTIFY); - intel_ring_emit(ring, pc->gtt_offset | PIPE_CONTROL_GLOBAL_GTT); - intel_ring_emit(ring, seqno); - intel_ring_emit(ring, 0); - intel_ring_advance(ring); - - *result = seqno; - return 0; -} - static int render_ring_add_request(struct intel_ring_buffer *ring, u32 *result) @@ -468,13 +325,6 @@ ring_get_seqno(struct intel_ring_buffer *ring) return intel_read_status_page(ring, I915_GEM_HWS_INDEX); } -static u32 -pc_render_get_seqno(struct intel_ring_buffer *ring) -{ - struct pipe_control *pc = ring->private; - return pc->cpu_page[0]; -} - static void render_ring_get_irq(struct intel_ring_buffer *ring) { @@ -488,7 +338,7 @@ render_ring_get_irq(struct intel_ring_buffer *ring) if (HAS_PCH_SPLIT(dev)) ironlake_enable_graphics_irq(dev_priv, - GT_PIPE_NOTIFY | GT_USER_INTERRUPT); + GT_USER_INTERRUPT); else i915_enable_irq(dev_priv, I915_USER_INTERRUPT); @@ -509,8 +359,7 @@ render_ring_put_irq(struct intel_ring_buffer *ring) spin_lock_irqsave(&dev_priv->irq_lock, irqflags); if (HAS_PCH_SPLIT(dev)) ironlake_disable_graphics_irq(dev_priv, - GT_USER_INTERRUPT | - GT_PIPE_NOTIFY); + GT_USER_INTERRUPT); else i915_disable_irq(dev_priv, I915_USER_INTERRUPT); spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); @@ -922,7 +771,6 @@ static const struct intel_ring_buffer render_ring = { .irq_get = render_ring_get_irq, .irq_put = render_ring_put_irq, .dispatch_execbuffer = render_ring_dispatch_execbuffer, - .cleanup = render_ring_cleanup, }; /* ring buffer for bit-stream decoder */ @@ -1157,9 +1005,6 @@ int intel_init_render_ring_buffer(struct drm_device *dev) *ring = render_ring; if (INTEL_INFO(dev)->gen >= 6) { ring->add_request = gen6_add_request; - } else if (HAS_PIPE_CONTROL(dev)) { - ring->add_request = pc_render_add_request; - ring->get_seqno = pc_render_get_seqno; } if (!I915_NEED_GFX_HWS(dev)) { -- cgit v1.2.3 From 3c8cdf9b60b98c5b408e2cfbcab3160e25e5af5a Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Sun, 5 Dec 2010 16:45:02 +0000 Subject: drm/i915: Power Context register is only available for gen4 mobiles The ability to save the hardware context upon powering down the render clock through PWRCTXA is only available on a couple of gen4 chipsets. Signed-off-by: Chris Wilson --- drivers/gpu/drm/i915/intel_display.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c index a95c69392afc..aba1c33f6407 100644 --- a/drivers/gpu/drm/i915/intel_display.c +++ b/drivers/gpu/drm/i915/intel_display.c @@ -5973,7 +5973,7 @@ void intel_init_clock_gating(struct drm_device *dev) "Disable RC6\n"); } - if (I915_HAS_RC6(dev) && drm_core_check_feature(dev, DRIVER_MODESET)) { + if (IS_GEN4(dev) && IS_MOBILE(dev)) { if (dev_priv->pwrctx == NULL) dev_priv->pwrctx = intel_alloc_context_page(dev); if (dev_priv->pwrctx) { -- cgit v1.2.3 From e3c4e5dd5ad1993a3687862c982272f8f00cae30 Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Sun, 5 Dec 2010 16:49:51 +0000 Subject: drm/i915: caps.has_rc6 is no longer used, remove it. Signed-off-by: Chris Wilson --- drivers/gpu/drm/i915/i915_debugfs.c | 1 - drivers/gpu/drm/i915/i915_drv.c | 6 +++--- drivers/gpu/drm/i915/i915_drv.h | 2 -- 3 files changed, 3 insertions(+), 6 deletions(-) diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c index aedb02157474..22821994b35a 100644 --- a/drivers/gpu/drm/i915/i915_debugfs.c +++ b/drivers/gpu/drm/i915/i915_debugfs.c @@ -73,7 +73,6 @@ static int i915_capabilities(struct seq_file *m, void *data) B(is_broadwater); B(is_crestline); B(has_fbc); - B(has_rc6); B(has_pipe_cxsr); B(has_hotplug); B(cursor_needs_physical); diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c index 413a040386a9..91a3ad2cf942 100644 --- a/drivers/gpu/drm/i915/i915_drv.c +++ b/drivers/gpu/drm/i915/i915_drv.c @@ -111,7 +111,7 @@ static const struct intel_device_info intel_i965g_info = { static const struct intel_device_info intel_i965gm_info = { .gen = 4, .is_crestline = 1, - .is_mobile = 1, .has_fbc = 1, .has_rc6 = 1, .has_hotplug = 1, + .is_mobile = 1, .has_fbc = 1, .has_hotplug = 1, .has_overlay = 1, .supports_tv = 1, }; @@ -130,7 +130,7 @@ static const struct intel_device_info intel_g45_info = { static const struct intel_device_info intel_gm45_info = { .gen = 4, .is_g4x = 1, - .is_mobile = 1, .need_gfx_hws = 1, .has_fbc = 1, .has_rc6 = 1, + .is_mobile = 1, .need_gfx_hws = 1, .has_fbc = 1, .has_pipe_cxsr = 1, .has_hotplug = 1, .supports_tv = 1, .has_bsd_ring = 1, @@ -150,7 +150,7 @@ static const struct intel_device_info intel_ironlake_d_info = { static const struct intel_device_info intel_ironlake_m_info = { .gen = 5, .is_mobile = 1, - .need_gfx_hws = 1, .has_rc6 = 1, .has_hotplug = 1, + .need_gfx_hws = 1, .has_hotplug = 1, .has_fbc = 0, /* disabled due to buggy hardware */ .has_bsd_ring = 1, }; diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index 8b19b5806230..d9b54a27ccf8 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h @@ -223,7 +223,6 @@ struct intel_device_info { u8 is_broadwater : 1; u8 is_crestline : 1; u8 has_fbc : 1; - u8 has_rc6 : 1; u8 has_pipe_cxsr : 1; u8 has_hotplug : 1; u8 cursor_needs_physical : 1; @@ -930,7 +929,6 @@ enum intel_chip_family { #define HAS_FW_BLC(dev) (INTEL_INFO(dev)->gen > 2) #define HAS_PIPE_CXSR(dev) (INTEL_INFO(dev)->has_pipe_cxsr) #define I915_HAS_FBC(dev) (INTEL_INFO(dev)->has_fbc) -#define I915_HAS_RC6(dev) (INTEL_INFO(dev)->has_rc6) #define HAS_PCH_SPLIT(dev) (IS_GEN5(dev) || IS_GEN6(dev)) #define HAS_PIPE_CONTROL(dev) (IS_GEN5(dev) || IS_GEN6(dev)) -- cgit v1.2.3 From 9b3826bf8420e7280d9fffa233d93a7b85dcb1db Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Sun, 5 Dec 2010 17:11:54 +0000 Subject: drm/i915: Ignore fenced commands for gpu access on gen4 Userspace should not have been declaring that it needed fenced GPU access with gen4+ as those GPUs have no fenced commands, but to be on the safe side it is easier to ignore userspace in case they did. Signed-off-by: Chris Wilson --- drivers/gpu/drm/i915/i915_gem_execbuffer.c | 27 ++++++++++++++++----------- 1 file changed, 16 insertions(+), 11 deletions(-) diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c index 6fc9cc485781..1b2ceacd64f0 100644 --- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c +++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c @@ -432,6 +432,7 @@ i915_gem_execbuffer_reserve(struct intel_ring_buffer *ring, struct drm_i915_gem_object *obj; struct drm_i915_gem_exec_object2 *entry; int ret, retry; + bool has_fenced_gpu_access = INTEL_INFO(ring->dev)->gen < 4; /* Attempt to pin all of the buffers into the GTT. * This is done in 3 phases: @@ -460,6 +461,7 @@ i915_gem_execbuffer_reserve(struct intel_ring_buffer *ring, } need_fence = + has_fenced_gpu_access && entry->flags & EXEC_OBJECT_NEEDS_FENCE && obj->tiling_mode != I915_TILING_NONE; need_mappable = @@ -484,6 +486,7 @@ i915_gem_execbuffer_reserve(struct intel_ring_buffer *ring, bool need_fence; need_fence = + has_fenced_gpu_access && entry->flags & EXEC_OBJECT_NEEDS_FENCE && obj->tiling_mode != I915_TILING_NONE; @@ -498,18 +501,20 @@ i915_gem_execbuffer_reserve(struct intel_ring_buffer *ring, break; } - if (need_fence) { - ret = i915_gem_object_get_fence(obj, ring, 1); - if (ret) - break; - } else if (entry->flags & EXEC_OBJECT_NEEDS_FENCE && - obj->tiling_mode == I915_TILING_NONE) { - /* XXX pipelined! */ - ret = i915_gem_object_put_fence(obj); - if (ret) - break; + if (has_fenced_gpu_access) { + if (need_fence) { + ret = i915_gem_object_get_fence(obj, ring, 1); + if (ret) + break; + } else if (entry->flags & EXEC_OBJECT_NEEDS_FENCE && + obj->tiling_mode == I915_TILING_NONE) { + /* XXX pipelined! */ + ret = i915_gem_object_put_fence(obj); + if (ret) + break; + } + obj->pending_fenced_gpu_access = need_fence; } - obj->pending_fenced_gpu_access = need_fence; entry->offset = obj->gtt_offset; entry++; -- cgit v1.2.3 From 0cdab21f9a1fca50dd27e488839f5a6578e333b2 Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Sun, 5 Dec 2010 17:27:06 +0000 Subject: drm/i915: Uncouple render/power ctx before suspending Signed-off-by: Chris Wilson --- drivers/gpu/drm/i915/i915_suspend.c | 4 ++- drivers/gpu/drm/i915/intel_display.c | 55 ++++++++++++++++++++---------------- drivers/gpu/drm/i915/intel_drv.h | 3 +- 3 files changed, 36 insertions(+), 26 deletions(-) diff --git a/drivers/gpu/drm/i915/i915_suspend.c b/drivers/gpu/drm/i915/i915_suspend.c index 011325e51e3a..a311809f3c80 100644 --- a/drivers/gpu/drm/i915/i915_suspend.c +++ b/drivers/gpu/drm/i915/i915_suspend.c @@ -820,6 +820,8 @@ int i915_save_state(struct drm_device *dev) if (HAS_PCH_SPLIT(dev)) ironlake_disable_drps(dev); + intel_disable_clock_gating(dev); + /* Cache mode state */ dev_priv->saveCACHE_MODE_0 = I915_READ(CACHE_MODE_0); @@ -863,7 +865,7 @@ int i915_restore_state(struct drm_device *dev) } /* Clock gating state */ - intel_init_clock_gating(dev); + intel_enable_clock_gating(dev); if (HAS_PCH_SPLIT(dev)) { ironlake_enable_drps(dev); diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c index aba1c33f6407..31fc1d797f8f 100644 --- a/drivers/gpu/drm/i915/intel_display.c +++ b/drivers/gpu/drm/i915/intel_display.c @@ -5828,7 +5828,7 @@ void intel_init_emon(struct drm_device *dev) dev_priv->corr = (lcfuse & LCFUSE_HIV_MASK); } -void intel_init_clock_gating(struct drm_device *dev) +void intel_enable_clock_gating(struct drm_device *dev) { struct drm_i915_private *dev_priv = dev->dev_private; @@ -5985,6 +5985,33 @@ void intel_init_clock_gating(struct drm_device *dev) } } +void intel_disable_clock_gating(struct drm_device *dev) +{ + struct drm_i915_private *dev_priv = dev->dev_private; + + if (dev_priv->renderctx) { + struct drm_i915_gem_object *obj = dev_priv->renderctx; + + I915_WRITE(CCID, 0); + POSTING_READ(CCID); + + i915_gem_object_unpin(obj); + drm_gem_object_unreference(&obj->base); + dev_priv->renderctx = NULL; + } + + if (dev_priv->pwrctx) { + struct drm_i915_gem_object *obj = dev_priv->pwrctx; + + I915_WRITE(PWRCTXA, 0); + POSTING_READ(PWRCTXA); + + i915_gem_object_unpin(obj); + drm_gem_object_unreference(&obj->base); + dev_priv->pwrctx = NULL; + } +} + /* Set up chip specific display functions */ static void intel_init_display(struct drm_device *dev) { @@ -6211,7 +6238,7 @@ void intel_modeset_init(struct drm_device *dev) intel_setup_outputs(dev); - intel_init_clock_gating(dev); + intel_enable_clock_gating(dev); /* Just disable it once at startup */ i915_disable_vga(dev); @@ -6252,31 +6279,11 @@ void intel_modeset_cleanup(struct drm_device *dev) if (dev_priv->display.disable_fbc) dev_priv->display.disable_fbc(dev); - if (dev_priv->renderctx) { - struct drm_i915_gem_object *obj = dev_priv->renderctx; - - I915_WRITE(CCID, obj->gtt_offset &~ CCID_EN); - POSTING_READ(CCID); - - i915_gem_object_unpin(obj); - drm_gem_object_unreference(&obj->base); - dev_priv->renderctx = NULL; - } - - if (dev_priv->pwrctx) { - struct drm_i915_gem_object *obj = dev_priv->pwrctx; - - I915_WRITE(PWRCTXA, obj->gtt_offset &~ PWRCTX_EN); - POSTING_READ(PWRCTXA); - - i915_gem_object_unpin(obj); - drm_gem_object_unreference(&obj->base); - dev_priv->pwrctx = NULL; - } - if (IS_IRONLAKE_M(dev)) ironlake_disable_drps(dev); + intel_disable_clock_gating(dev); + mutex_unlock(&dev->struct_mutex); /* Disable the irq before mode object teardown, for the irq might diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h index 7273c9e7e0dc..acdea6549ec4 100644 --- a/drivers/gpu/drm/i915/intel_drv.h +++ b/drivers/gpu/drm/i915/intel_drv.h @@ -294,7 +294,8 @@ extern void intel_crtc_fb_gamma_set(struct drm_crtc *crtc, u16 red, u16 green, u16 blue, int regno); extern void intel_crtc_fb_gamma_get(struct drm_crtc *crtc, u16 *red, u16 *green, u16 *blue, int regno); -extern void intel_init_clock_gating(struct drm_device *dev); +extern void intel_enable_clock_gating(struct drm_device *dev); +extern void intel_disable_clock_gating(struct drm_device *dev); extern void ironlake_enable_drps(struct drm_device *dev); extern void ironlake_disable_drps(struct drm_device *dev); extern void intel_init_emon(struct drm_device *dev); -- cgit v1.2.3 From 6bda10d152735c22baf1dcd92937420b4b0a359a Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Sun, 5 Dec 2010 21:04:18 +0000 Subject: drm/i915: Completely disable fence pipelining. I'm still seeing tiling corruption of PutImage and CopyArea (I think) under mutter on pnv, so obviously the pipelining logic is deeply flawed. Signed-off-by: Chris Wilson --- drivers/gpu/drm/i915/i915_gem.c | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index eeed2e99d247..8685b9279029 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c @@ -2484,6 +2484,9 @@ i915_gem_object_get_fence(struct drm_i915_gem_object *obj, struct drm_i915_fence_reg *reg; int ret; + /* XXX disable pipelining. There are bugs. Shocking. */ + pipelined = NULL; + /* Just update our place in the LRU if our fence is getting reused. */ if (obj->fence_reg != I915_FENCE_REG_NONE) { reg = &dev_priv->fence_regs[obj->fence_reg]; @@ -2556,9 +2559,8 @@ i915_gem_object_get_fence(struct drm_i915_gem_object *obj, if (old->tiling_mode) i915_gem_release_mmap(old); - /* XXX The pipelined change over appears to be incoherent. */ ret = i915_gem_object_flush_fence(old, - NULL, //pipelined, + pipelined, interruptible); if (ret) { drm_gem_object_unreference(&old->base); -- cgit v1.2.3 From 0ac74c6b3386f0413baf48f6d61850650e0dc4ab Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Mon, 6 Dec 2010 14:36:02 +0000 Subject: drm/i915: Only emit a flush if there is an outstanding gpu write Signed-off-by: Chris Wilson --- drivers/gpu/drm/i915/i915_gem.c | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index 8685b9279029..e7733333767d 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c @@ -2154,8 +2154,9 @@ static int i915_ring_idle(struct drm_device *dev, if (list_empty(&ring->gpu_write_list) && list_empty(&ring->active_list)) return 0; - i915_gem_flush_ring(dev, ring, - I915_GEM_GPU_DOMAINS, I915_GEM_GPU_DOMAINS); + if (!list_empty(&ring->gpu_write_list)) + i915_gem_flush_ring(dev, ring, + I915_GEM_GPU_DOMAINS, I915_GEM_GPU_DOMAINS); return i915_wait_request(dev, i915_gem_next_request_seqno(dev, ring), ring); -- cgit v1.2.3 From 0be732841fb925b6f1242211ea211c022b6ac26c Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Mon, 6 Dec 2010 14:36:27 +0000 Subject: drm/i915: Wait for the bo if a display flip is pipelined on the other ring Signed-off-by: Chris Wilson --- drivers/gpu/drm/i915/i915_gem.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index e7733333767d..b57ce033e42a 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c @@ -2926,7 +2926,7 @@ i915_gem_object_set_to_display_plane(struct drm_i915_gem_object *obj, i915_gem_object_flush_gpu_write_domain(obj); /* Currently, we are always called from an non-interruptible context. */ - if (!pipelined) { + if (pipelined != obj->ring) { ret = i915_gem_object_wait_rendering(obj, false); if (ret) return ret; -- cgit v1.2.3 From c57802706aad9f179f2219415717896b3c177845 Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Tue, 7 Dec 2010 23:04:14 +0000 Subject: drm/i915: Disable renderctx powersaving support for Ironlake ... still causes a failure during suspend. Reported-by: Daniel Vetter Signed-off-by: Chris Wilson --- drivers/gpu/drm/i915/intel_display.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c index 31fc1d797f8f..1ccf2ad7cd88 100644 --- a/drivers/gpu/drm/i915/intel_display.c +++ b/drivers/gpu/drm/i915/intel_display.c @@ -5952,7 +5952,7 @@ void intel_enable_clock_gating(struct drm_device *dev) * GPU can automatically power down the render unit if given a page * to save state. */ - if (IS_IRONLAKE_M(dev)) { + if (IS_IRONLAKE_M(dev) && 0) { /* XXX causes a failure during suspend */ if (dev_priv->renderctx == NULL) dev_priv->renderctx = intel_alloc_context_page(dev); if (dev_priv->renderctx) { -- cgit v1.2.3 From ca130c2267d0719c92ed188e15082d6baad6c046 Mon Sep 17 00:00:00 2001 From: Francisco Jerez Date: Sat, 20 Nov 2010 14:42:57 +0100 Subject: drm/nv04-nv40: Give "gpuobj->cinst" the same meaning as on nv50. No functional changes, just simplify some code paths a bit. Signed-off-by: Francisco Jerez Signed-off-by: Ben Skeggs --- drivers/gpu/drm/nouveau/nouveau_object.c | 6 +----- drivers/gpu/drm/nouveau/nouveau_ramht.c | 4 ++-- drivers/gpu/drm/nouveau/nouveau_sgdma.c | 6 ------ 3 files changed, 3 insertions(+), 13 deletions(-) diff --git a/drivers/gpu/drm/nouveau/nouveau_object.c b/drivers/gpu/drm/nouveau/nouveau_object.c index 924653c30783..2fb7e9d47500 100644 --- a/drivers/gpu/drm/nouveau/nouveau_object.c +++ b/drivers/gpu/drm/nouveau/nouveau_object.c @@ -202,11 +202,7 @@ nouveau_gpuobj_new(struct drm_device *dev, struct nouveau_channel *chan, if (gpuobj->pinst != ~0) gpuobj->pinst += ramin->start; - if (dev_priv->card_type < NV_50) - gpuobj->cinst = gpuobj->pinst; - else - gpuobj->cinst = ramin->start; - + gpuobj->cinst = ramin->start; gpuobj->vinst = ramin->start + chan->ramin->vinst; gpuobj->node = ramin; } else { diff --git a/drivers/gpu/drm/nouveau/nouveau_ramht.c b/drivers/gpu/drm/nouveau/nouveau_ramht.c index d4a2adc927d8..bef3e6910418 100644 --- a/drivers/gpu/drm/nouveau/nouveau_ramht.c +++ b/drivers/gpu/drm/nouveau/nouveau_ramht.c @@ -104,12 +104,12 @@ nouveau_ramht_insert(struct nouveau_channel *chan, u32 handle, nouveau_gpuobj_ref(gpuobj, &entry->gpuobj); if (dev_priv->card_type < NV_40) { - ctx = NV_RAMHT_CONTEXT_VALID | (gpuobj->cinst >> 4) | + ctx = NV_RAMHT_CONTEXT_VALID | (gpuobj->pinst >> 4) | (chan->id << NV_RAMHT_CONTEXT_CHANNEL_SHIFT) | (gpuobj->engine << NV_RAMHT_CONTEXT_ENGINE_SHIFT); } else if (dev_priv->card_type < NV_50) { - ctx = (gpuobj->cinst >> 4) | + ctx = (gpuobj->pinst >> 4) | (chan->id << NV40_RAMHT_CONTEXT_CHANNEL_SHIFT) | (gpuobj->engine << NV40_RAMHT_CONTEXT_ENGINE_SHIFT); } else { diff --git a/drivers/gpu/drm/nouveau/nouveau_sgdma.c b/drivers/gpu/drm/nouveau/nouveau_sgdma.c index db32644f6114..a0bf130b02d3 100644 --- a/drivers/gpu/drm/nouveau/nouveau_sgdma.c +++ b/drivers/gpu/drm/nouveau/nouveau_sgdma.c @@ -241,12 +241,6 @@ nouveau_sgdma_init(struct drm_device *dev) } if (dev_priv->card_type < NV_50) { - /* special case, allocated from global instmem heap so - * cinst is invalid, we use it on all channels though so - * cinst needs to be valid, set it the same as pinst - */ - gpuobj->cinst = gpuobj->pinst; - nv_wo32(gpuobj, 0, NV_CLASS_DMA_IN_MEMORY | (1 << 12) /* PT present */ | (0 << 13) /* PT *not* linear */ | -- cgit v1.2.3 From 38cf189fa13e988f85efb6de26315e762cecc260 Mon Sep 17 00:00:00 2001 From: Francisco Jerez Date: Sat, 20 Nov 2010 14:43:51 +0100 Subject: drm/nv04-nv10: Don't re-enable FIFO access multiple times after IRQ dispatch. nvxx_graph_isr is already taking care of it. In some cases this could've made you miss PGRAPH interrupts (e.g. when you were supposed to get several IRQs of the same kind in a row). Signed-off-by: Francisco Jerez Signed-off-by: Ben Skeggs --- drivers/gpu/drm/nouveau/nv04_graph.c | 3 --- drivers/gpu/drm/nouveau/nv10_graph.c | 12 ------------ 2 files changed, 15 deletions(-) diff --git a/drivers/gpu/drm/nouveau/nv04_graph.c b/drivers/gpu/drm/nouveau/nv04_graph.c index 0bc616d35eb6..af75015068d6 100644 --- a/drivers/gpu/drm/nouveau/nv04_graph.c +++ b/drivers/gpu/drm/nouveau/nv04_graph.c @@ -373,7 +373,6 @@ nv04_graph_context_switch(struct drm_device *dev) struct nouveau_channel *chan = NULL; int chid; - pgraph->fifo_access(dev, false); nouveau_wait_for_idle(dev); /* If previous context is valid, we need to save it */ @@ -384,8 +383,6 @@ nv04_graph_context_switch(struct drm_device *dev) chan = dev_priv->channels.ptr[chid]; if (chan) nv04_graph_load_context(chan); - - pgraph->fifo_access(dev, true); } static uint32_t *ctx_reg(struct graph_state *ctx, uint32_t reg) diff --git a/drivers/gpu/drm/nouveau/nv10_graph.c b/drivers/gpu/drm/nouveau/nv10_graph.c index 536b39e4a9e2..8c92edb7bbcd 100644 --- a/drivers/gpu/drm/nouveau/nv10_graph.c +++ b/drivers/gpu/drm/nouveau/nv10_graph.c @@ -794,11 +794,9 @@ static void nv10_graph_context_switch(struct drm_device *dev) { struct drm_nouveau_private *dev_priv = dev->dev_private; - struct nouveau_pgraph_engine *pgraph = &dev_priv->engine.graph; struct nouveau_channel *chan = NULL; int chid; - pgraph->fifo_access(dev, false); nouveau_wait_for_idle(dev); /* If previous context is valid, we need to save it */ @@ -809,8 +807,6 @@ nv10_graph_context_switch(struct drm_device *dev) chan = dev_priv->channels.ptr[chid]; if (chan && chan->pgraph_ctx) nv10_graph_load_context(chan); - - pgraph->fifo_access(dev, true); } #define NV_WRITE_CTX(reg, val) do { \ @@ -980,8 +976,6 @@ nv17_graph_mthd_lma_window(struct nouveau_channel *chan, struct drm_device *dev = chan->dev; struct graph_state *ctx = chan->pgraph_ctx; struct pipe_state *pipe = &ctx->pipe_state; - struct drm_nouveau_private *dev_priv = dev->dev_private; - struct nouveau_pgraph_engine *pgraph = &dev_priv->engine.graph; uint32_t pipe_0x0040[1], pipe_0x64c0[8], pipe_0x6a80[3], pipe_0x6ab0[3]; uint32_t xfmode0, xfmode1; int i; @@ -1048,8 +1042,6 @@ nv17_graph_mthd_lma_window(struct nouveau_channel *chan, nouveau_wait_for_idle(dev); - pgraph->fifo_access(dev, true); - return 0; } @@ -1058,8 +1050,6 @@ nv17_graph_mthd_lma_enable(struct nouveau_channel *chan, u32 class, u32 mthd, u32 data) { struct drm_device *dev = chan->dev; - struct drm_nouveau_private *dev_priv = dev->dev_private; - struct nouveau_pgraph_engine *pgraph = &dev_priv->engine.graph; nouveau_wait_for_idle(dev); @@ -1068,8 +1058,6 @@ nv17_graph_mthd_lma_enable(struct nouveau_channel *chan, nv_wr32(dev, 0x004006b0, nv_rd32(dev, 0x004006b0) | 0x8 << 24); - pgraph->fifo_access(dev, true); - return 0; } -- cgit v1.2.3 From 6dccd311dd4b104b3bc53cb67aef414141d11c9f Mon Sep 17 00:00:00 2001 From: Francisco Jerez Date: Thu, 18 Nov 2010 23:57:46 +0100 Subject: drm/nouveau: Synchronize with the user channel before GPU object destruction. MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit There have been reports of PFIFO cache errors during context take down (fdo bug 31637). They are caused by some GPU objects being taken out while the channel is still potentially processing commands. Make sure that all the previous rendering has landed before releasing a GPU object. Reported-by: Grzesiek Sójka Reported-by: Patrice Mandin Signed-off-by: Francisco Jerez Acked-by: Ben Skeggs Signed-off-by: Ben Skeggs --- drivers/gpu/drm/nouveau/nouveau_channel.c | 36 +++++++++++++++++++------------ drivers/gpu/drm/nouveau/nouveau_drv.c | 16 ++------------ drivers/gpu/drm/nouveau/nouveau_drv.h | 1 + drivers/gpu/drm/nouveau/nouveau_object.c | 3 +++ 4 files changed, 28 insertions(+), 28 deletions(-) diff --git a/drivers/gpu/drm/nouveau/nouveau_channel.c b/drivers/gpu/drm/nouveau/nouveau_channel.c index 0f33132fba3b..3e49babd62ac 100644 --- a/drivers/gpu/drm/nouveau/nouveau_channel.c +++ b/drivers/gpu/drm/nouveau/nouveau_channel.c @@ -284,7 +284,6 @@ nouveau_channel_put_unlocked(struct nouveau_channel **pchan) struct nouveau_pgraph_engine *pgraph = &dev_priv->engine.graph; struct nouveau_crypt_engine *pcrypt = &dev_priv->engine.crypt; unsigned long flags; - int ret; /* decrement the refcount, and we're done if there's still refs */ if (likely(!atomic_dec_and_test(&chan->users))) { @@ -297,19 +296,7 @@ nouveau_channel_put_unlocked(struct nouveau_channel **pchan) nouveau_debugfs_channel_fini(chan); /* give it chance to idle */ - nouveau_fence_update(chan); - if (chan->fence.sequence != chan->fence.sequence_ack) { - struct nouveau_fence *fence = NULL; - - ret = nouveau_fence_new(chan, &fence, true); - if (ret == 0) { - ret = nouveau_fence_wait(fence, false, false); - nouveau_fence_unref(&fence); - } - - if (ret) - NV_ERROR(dev, "Failed to idle channel %d.\n", chan->id); - } + nouveau_channel_idle(chan); /* ensure all outstanding fences are signaled. they should be if the * above attempts at idling were OK, but if we failed this'll tell TTM @@ -388,6 +375,27 @@ nouveau_channel_ref(struct nouveau_channel *chan, *pchan = chan; } +void +nouveau_channel_idle(struct nouveau_channel *chan) +{ + struct drm_device *dev = chan->dev; + struct nouveau_fence *fence = NULL; + int ret; + + nouveau_fence_update(chan); + + if (chan->fence.sequence != chan->fence.sequence_ack) { + ret = nouveau_fence_new(chan, &fence, true); + if (!ret) { + ret = nouveau_fence_wait(fence, false, false); + nouveau_fence_unref(&fence); + } + + if (ret) + NV_ERROR(dev, "Failed to idle channel %d.\n", chan->id); + } +} + /* cleans up all the fifos from file_priv */ void nouveau_channel_cleanup(struct drm_device *dev, struct drm_file *file_priv) diff --git a/drivers/gpu/drm/nouveau/nouveau_drv.c b/drivers/gpu/drm/nouveau/nouveau_drv.c index 7ff5b4369f03..a48c7da133d2 100644 --- a/drivers/gpu/drm/nouveau/nouveau_drv.c +++ b/drivers/gpu/drm/nouveau/nouveau_drv.c @@ -197,22 +197,10 @@ nouveau_pci_suspend(struct pci_dev *pdev, pm_message_t pm_state) NV_INFO(dev, "Idling channels...\n"); for (i = 0; i < pfifo->channels; i++) { - struct nouveau_fence *fence = NULL; - chan = dev_priv->channels.ptr[i]; - if (!chan || !chan->pushbuf_bo) - continue; - - ret = nouveau_fence_new(chan, &fence, true); - if (ret == 0) { - ret = nouveau_fence_wait(fence, false, false); - nouveau_fence_unref(&fence); - } - if (ret) { - NV_ERROR(dev, "Failed to idle channel %d for suspend\n", - chan->id); - } + if (chan && chan->pushbuf_bo) + nouveau_channel_idle(chan); } pgraph->fifo_access(dev, false); diff --git a/drivers/gpu/drm/nouveau/nouveau_drv.h b/drivers/gpu/drm/nouveau/nouveau_drv.h index a52b1da32031..d001453e857b 100644 --- a/drivers/gpu/drm/nouveau/nouveau_drv.h +++ b/drivers/gpu/drm/nouveau/nouveau_drv.h @@ -847,6 +847,7 @@ extern void nouveau_channel_put_unlocked(struct nouveau_channel **); extern void nouveau_channel_put(struct nouveau_channel **); extern void nouveau_channel_ref(struct nouveau_channel *chan, struct nouveau_channel **pchan); +extern void nouveau_channel_idle(struct nouveau_channel *chan); /* nouveau_object.c */ #define NVOBJ_CLASS(d,c,e) do { \ diff --git a/drivers/gpu/drm/nouveau/nouveau_object.c b/drivers/gpu/drm/nouveau/nouveau_object.c index 2fb7e9d47500..24540862a23f 100644 --- a/drivers/gpu/drm/nouveau/nouveau_object.c +++ b/drivers/gpu/drm/nouveau/nouveau_object.c @@ -1017,6 +1017,9 @@ int nouveau_ioctl_gpuobj_free(struct drm_device *dev, void *data, if (IS_ERR(chan)) return PTR_ERR(chan); + /* Synchronize with the user channel */ + nouveau_channel_idle(chan); + ret = nouveau_ramht_remove(chan, objfree->handle); nouveau_channel_put(&chan); return ret; -- cgit v1.2.3 From a3d487ea5463a9c091b7f9b836b860cf3d82e641 Mon Sep 17 00:00:00 2001 From: Francisco Jerez Date: Sat, 20 Nov 2010 22:11:22 +0100 Subject: drm/nouveau: Use WC memory on the AGP GART. Signed-off-by: Francisco Jerez Signed-off-by: Ben Skeggs --- drivers/gpu/drm/nouveau/nouveau_bo.c | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/drivers/gpu/drm/nouveau/nouveau_bo.c b/drivers/gpu/drm/nouveau/nouveau_bo.c index f4ee43db00aa..f6f51b4259e7 100644 --- a/drivers/gpu/drm/nouveau/nouveau_bo.c +++ b/drivers/gpu/drm/nouveau/nouveau_bo.c @@ -424,8 +424,9 @@ nouveau_bo_init_mem_type(struct ttm_bo_device *bdev, uint32_t type, switch (dev_priv->gart_info.type) { case NOUVEAU_GART_AGP: man->flags = TTM_MEMTYPE_FLAG_MAPPABLE; - man->available_caching = TTM_PL_FLAG_UNCACHED; - man->default_caching = TTM_PL_FLAG_UNCACHED; + man->available_caching = TTM_PL_FLAG_UNCACHED | + TTM_PL_FLAG_WC; + man->default_caching = TTM_PL_FLAG_WC; break; case NOUVEAU_GART_SGDMA: man->flags = TTM_MEMTYPE_FLAG_MAPPABLE | -- cgit v1.2.3 From 395a31ec7ed9b02c5412f4405acbd6fceacca0fc Mon Sep 17 00:00:00 2001 From: Francisco Jerez Date: Thu, 25 Nov 2010 16:37:17 +0100 Subject: drm/nouveau: Spin for a bit in nouveau_fence_wait() before yielding the CPU. Sleeping doesn't pay off for very short delays in comparison with the minimum granularity of schedule_timeout(). Signed-off-by: Francisco Jerez Signed-off-by: Ben Skeggs --- drivers/gpu/drm/nouveau/nouveau_fence.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/drivers/gpu/drm/nouveau/nouveau_fence.c b/drivers/gpu/drm/nouveau/nouveau_fence.c index 2579fc69d182..abfeff19488e 100644 --- a/drivers/gpu/drm/nouveau/nouveau_fence.c +++ b/drivers/gpu/drm/nouveau/nouveau_fence.c @@ -218,6 +218,7 @@ int __nouveau_fence_wait(void *sync_obj, void *sync_arg, bool lazy, bool intr) { unsigned long timeout = jiffies + (3 * DRM_HZ); + unsigned long sleep_time = jiffies + 1; int ret = 0; while (1) { @@ -231,7 +232,7 @@ __nouveau_fence_wait(void *sync_obj, void *sync_arg, bool lazy, bool intr) __set_current_state(intr ? TASK_INTERRUPTIBLE : TASK_UNINTERRUPTIBLE); - if (lazy) + if (lazy && time_after_eq(jiffies, sleep_time)) schedule_timeout(1); if (intr && signal_pending(current)) { -- cgit v1.2.3 From d908175cca901b95ba1628428b216e6e7188e8fb Mon Sep 17 00:00:00 2001 From: Ben Skeggs Date: Mon, 22 Nov 2010 16:05:54 +1000 Subject: drm/nouveau: make fifo.create_context() responsible for mapping control regs The regs belong to PFIFO, they're different for pretty much the same generations we need different PFIFO control for, and NVC0 is going to be even more different than the rest. Signed-off-by: Ben Skeggs --- drivers/gpu/drm/nouveau/nouveau_channel.c | 22 +--------------------- drivers/gpu/drm/nouveau/nv04_fifo.c | 9 +++++++++ drivers/gpu/drm/nouveau/nv10_fifo.c | 5 +++++ drivers/gpu/drm/nouveau/nv40_fifo.c | 5 +++++ drivers/gpu/drm/nouveau/nv50_fifo.c | 9 +++++++++ 5 files changed, 29 insertions(+), 21 deletions(-) diff --git a/drivers/gpu/drm/nouveau/nouveau_channel.c b/drivers/gpu/drm/nouveau/nouveau_channel.c index 3e49babd62ac..a3d33a582a98 100644 --- a/drivers/gpu/drm/nouveau/nouveau_channel.c +++ b/drivers/gpu/drm/nouveau/nouveau_channel.c @@ -113,7 +113,7 @@ nouveau_channel_alloc(struct drm_device *dev, struct nouveau_channel **chan_ret, struct nouveau_fifo_engine *pfifo = &dev_priv->engine.fifo; struct nouveau_channel *chan; unsigned long flags; - int user, ret; + int ret; /* allocate and lock channel structure */ chan = kzalloc(sizeof(*chan), GFP_KERNEL); @@ -160,23 +160,6 @@ nouveau_channel_alloc(struct drm_device *dev, struct nouveau_channel **chan_ret, } nouveau_dma_pre_init(chan); - - /* Locate channel's user control regs */ - if (dev_priv->card_type < NV_40) - user = NV03_USER(chan->id); - else - if (dev_priv->card_type < NV_50) - user = NV40_USER(chan->id); - else - user = NV50_USER(chan->id); - - chan->user = ioremap(pci_resource_start(dev->pdev, 0) + user, - PAGE_SIZE); - if (!chan->user) { - NV_ERROR(dev, "ioremap of regs failed.\n"); - nouveau_channel_put(&chan); - return -ENOMEM; - } chan->user_put = 0x40; chan->user_get = 0x44; @@ -356,9 +339,6 @@ nouveau_channel_del(struct kref *ref) struct nouveau_channel *chan = container_of(ref, struct nouveau_channel, ref); - if (chan->user) - iounmap(chan->user); - kfree(chan); } diff --git a/drivers/gpu/drm/nouveau/nv04_fifo.c b/drivers/gpu/drm/nouveau/nv04_fifo.c index a32ba8ccaae6..f89d104698df 100644 --- a/drivers/gpu/drm/nouveau/nv04_fifo.c +++ b/drivers/gpu/drm/nouveau/nv04_fifo.c @@ -129,6 +129,11 @@ nv04_fifo_create_context(struct nouveau_channel *chan) if (ret) return ret; + chan->user = ioremap(pci_resource_start(dev->pdev, 0) + + NV03_USER(chan->id), PAGE_SIZE); + if (!chan->user) + return -ENOMEM; + spin_lock_irqsave(&dev_priv->context_switch_lock, flags); /* Setup initial state */ @@ -173,6 +178,10 @@ nv04_fifo_destroy_context(struct nouveau_channel *chan) spin_unlock_irqrestore(&dev_priv->context_switch_lock, flags); /* Free the channel resources */ + if (chan->user) { + iounmap(chan->user); + chan->user = NULL; + } nouveau_gpuobj_ref(NULL, &chan->ramfc); } diff --git a/drivers/gpu/drm/nouveau/nv10_fifo.c b/drivers/gpu/drm/nouveau/nv10_fifo.c index acb9216e6d0a..d2ecbff4bee1 100644 --- a/drivers/gpu/drm/nouveau/nv10_fifo.c +++ b/drivers/gpu/drm/nouveau/nv10_fifo.c @@ -53,6 +53,11 @@ nv10_fifo_create_context(struct nouveau_channel *chan) if (ret) return ret; + chan->user = ioremap(pci_resource_start(dev->pdev, 0) + + NV03_USER(chan->id), PAGE_SIZE); + if (!chan->user) + return -ENOMEM; + /* Fill entries that are seen filled in dumps of nvidia driver just * after channel's is put into DMA mode */ diff --git a/drivers/gpu/drm/nouveau/nv40_fifo.c b/drivers/gpu/drm/nouveau/nv40_fifo.c index f6b3580c685a..c86e4d4e9b96 100644 --- a/drivers/gpu/drm/nouveau/nv40_fifo.c +++ b/drivers/gpu/drm/nouveau/nv40_fifo.c @@ -47,6 +47,11 @@ nv40_fifo_create_context(struct nouveau_channel *chan) if (ret) return ret; + chan->user = ioremap(pci_resource_start(dev->pdev, 0) + + NV40_USER(chan->id), PAGE_SIZE); + if (!chan->user) + return -ENOMEM; + spin_lock_irqsave(&dev_priv->context_switch_lock, flags); nv_wi32(dev, fc + 0, chan->pushbuf_base); diff --git a/drivers/gpu/drm/nouveau/nv50_fifo.c b/drivers/gpu/drm/nouveau/nv50_fifo.c index ed18952ae7f4..7add3dfde3df 100644 --- a/drivers/gpu/drm/nouveau/nv50_fifo.c +++ b/drivers/gpu/drm/nouveau/nv50_fifo.c @@ -261,6 +261,11 @@ nv50_fifo_create_context(struct nouveau_channel *chan) } ramfc = chan->ramfc; + chan->user = ioremap(pci_resource_start(dev->pdev, 0) + + NV50_USER(chan->id), PAGE_SIZE); + if (!chan->user) + return -ENOMEM; + spin_lock_irqsave(&dev_priv->context_switch_lock, flags); nv_wo32(ramfc, 0x48, chan->pushbuf->cinst >> 4); @@ -327,6 +332,10 @@ nv50_fifo_destroy_context(struct nouveau_channel *chan) spin_unlock_irqrestore(&dev_priv->context_switch_lock, flags); /* Free the channel resources */ + if (chan->user) { + iounmap(chan->user); + chan->user = NULL; + } nouveau_gpuobj_ref(NULL, &ramfc); nouveau_gpuobj_ref(NULL, &chan->cache); } -- cgit v1.2.3 From ceac30999dfb00ee7b56cfea8b28ef50999a3c95 Mon Sep 17 00:00:00 2001 From: Ben Skeggs Date: Tue, 23 Nov 2010 10:10:24 +1000 Subject: drm/nouveau: implicitly insert non-DMA objects into RAMHT Signed-off-by: Ben Skeggs --- drivers/gpu/drm/nouveau/nouveau_dma.c | 10 ++---- drivers/gpu/drm/nouveau/nouveau_drv.h | 3 +- drivers/gpu/drm/nouveau/nouveau_fence.c | 7 +--- drivers/gpu/drm/nouveau/nouveau_object.c | 59 +++++++++++++++----------------- drivers/gpu/drm/nouveau/nv04_fbcon.c | 34 ++++++------------ drivers/gpu/drm/nouveau/nv50_fbcon.c | 10 ++---- 6 files changed, 43 insertions(+), 80 deletions(-) diff --git a/drivers/gpu/drm/nouveau/nouveau_dma.c b/drivers/gpu/drm/nouveau/nouveau_dma.c index 82581e600dcd..6ff77cedc008 100644 --- a/drivers/gpu/drm/nouveau/nouveau_dma.c +++ b/drivers/gpu/drm/nouveau/nouveau_dma.c @@ -59,17 +59,11 @@ nouveau_dma_init(struct nouveau_channel *chan) { struct drm_device *dev = chan->dev; struct drm_nouveau_private *dev_priv = dev->dev_private; - struct nouveau_gpuobj *obj = NULL; int ret, i; /* Create NV_MEMORY_TO_MEMORY_FORMAT for buffer moves */ - ret = nouveau_gpuobj_gr_new(chan, dev_priv->card_type < NV_50 ? - 0x0039 : 0x5039, &obj); - if (ret) - return ret; - - ret = nouveau_ramht_insert(chan, NvM2MF, obj); - nouveau_gpuobj_ref(NULL, &obj); + ret = nouveau_gpuobj_gr_new(chan, NvM2MF, dev_priv->card_type < NV_50 ? + 0x0039 : 0x5039); if (ret) return ret; diff --git a/drivers/gpu/drm/nouveau/nouveau_drv.h b/drivers/gpu/drm/nouveau/nouveau_drv.h index d001453e857b..bbf19861b6c8 100644 --- a/drivers/gpu/drm/nouveau/nouveau_drv.h +++ b/drivers/gpu/drm/nouveau/nouveau_drv.h @@ -887,8 +887,7 @@ extern int nouveau_gpuobj_new_fake(struct drm_device *, u32 pinst, u64 vinst, extern int nouveau_gpuobj_dma_new(struct nouveau_channel *, int class, uint64_t offset, uint64_t size, int access, int target, struct nouveau_gpuobj **); -extern int nouveau_gpuobj_gr_new(struct nouveau_channel *, int class, - struct nouveau_gpuobj **); +extern int nouveau_gpuobj_gr_new(struct nouveau_channel *, u32 handle, int class); extern int nv50_gpuobj_dma_new(struct nouveau_channel *, int class, u64 base, u64 size, int target, int access, u32 type, u32 comp, struct nouveau_gpuobj **pobj); diff --git a/drivers/gpu/drm/nouveau/nouveau_fence.c b/drivers/gpu/drm/nouveau/nouveau_fence.c index abfeff19488e..3d50d5c3b0fe 100644 --- a/drivers/gpu/drm/nouveau/nouveau_fence.c +++ b/drivers/gpu/drm/nouveau/nouveau_fence.c @@ -438,12 +438,7 @@ nouveau_fence_channel_init(struct nouveau_channel *chan) int ret; /* Create an NV_SW object for various sync purposes */ - ret = nouveau_gpuobj_gr_new(chan, NV_SW, &obj); - if (ret) - return ret; - - ret = nouveau_ramht_insert(chan, NvSw, obj); - nouveau_gpuobj_ref(NULL, &obj); + ret = nouveau_gpuobj_gr_new(chan, NvSw, NV_SW); if (ret) return ret; diff --git a/drivers/gpu/drm/nouveau/nouveau_object.c b/drivers/gpu/drm/nouveau/nouveau_object.c index 24540862a23f..3518ebba6fbd 100644 --- a/drivers/gpu/drm/nouveau/nouveau_object.c +++ b/drivers/gpu/drm/nouveau/nouveau_object.c @@ -608,13 +608,9 @@ static int nouveau_gpuobj_sw_new(struct nouveau_channel *chan, int class, struct nouveau_gpuobj **gpuobj_ret) { - struct drm_nouveau_private *dev_priv; + struct drm_nouveau_private *dev_priv = chan->dev->dev_private; struct nouveau_gpuobj *gpuobj; - if (!chan || !gpuobj_ret || *gpuobj_ret != NULL) - return -EINVAL; - dev_priv = chan->dev->dev_private; - gpuobj = kzalloc(sizeof(*gpuobj), GFP_KERNEL); if (!gpuobj) return -ENOMEM; @@ -632,12 +628,12 @@ nouveau_gpuobj_sw_new(struct nouveau_channel *chan, int class, } int -nouveau_gpuobj_gr_new(struct nouveau_channel *chan, int class, - struct nouveau_gpuobj **gpuobj) +nouveau_gpuobj_gr_new(struct nouveau_channel *chan, u32 handle, int class) { struct drm_nouveau_private *dev_priv = chan->dev->dev_private; struct drm_device *dev = chan->dev; struct nouveau_gpuobj_class *oc; + struct nouveau_gpuobj *gpuobj; int ret; NV_DEBUG(dev, "ch%d class=0x%04x\n", chan->id, class); @@ -651,10 +647,12 @@ nouveau_gpuobj_gr_new(struct nouveau_channel *chan, int class, return -EINVAL; found: - if (oc->engine == NVOBJ_ENGINE_SW) - return nouveau_gpuobj_sw_new(chan, class, gpuobj); - switch (oc->engine) { + case NVOBJ_ENGINE_SW: + ret = nouveau_gpuobj_sw_new(chan, class, &gpuobj); + if (ret) + return ret; + goto insert; case NVOBJ_ENGINE_GR: if (dev_priv->card_type >= NV_50 && !chan->ramin_grctx) { struct nouveau_pgraph_engine *pgraph = @@ -681,41 +679,47 @@ found: nouveau_gpuobj_class_instmem_size(dev, class), 16, NVOBJ_FLAG_ZERO_ALLOC | NVOBJ_FLAG_ZERO_FREE, - gpuobj); + &gpuobj); if (ret) { NV_ERROR(dev, "error creating gpuobj: %d\n", ret); return ret; } if (dev_priv->card_type >= NV_50) { - nv_wo32(*gpuobj, 0, class); - nv_wo32(*gpuobj, 20, 0x00010000); + nv_wo32(gpuobj, 0, class); + nv_wo32(gpuobj, 20, 0x00010000); } else { switch (class) { case NV_CLASS_NULL: - nv_wo32(*gpuobj, 0, 0x00001030); - nv_wo32(*gpuobj, 4, 0xFFFFFFFF); + nv_wo32(gpuobj, 0, 0x00001030); + nv_wo32(gpuobj, 4, 0xFFFFFFFF); break; default: if (dev_priv->card_type >= NV_40) { - nv_wo32(*gpuobj, 0, class); + nv_wo32(gpuobj, 0, class); #ifdef __BIG_ENDIAN - nv_wo32(*gpuobj, 8, 0x01000000); + nv_wo32(gpuobj, 8, 0x01000000); #endif } else { #ifdef __BIG_ENDIAN - nv_wo32(*gpuobj, 0, class | 0x00080000); + nv_wo32(gpuobj, 0, class | 0x00080000); #else - nv_wo32(*gpuobj, 0, class); + nv_wo32(gpuobj, 0, class); #endif } } } dev_priv->engine.instmem.flush(dev); - (*gpuobj)->engine = oc->engine; - (*gpuobj)->class = oc->id; - return 0; + gpuobj->engine = oc->engine; + gpuobj->class = oc->id; + +insert: + ret = nouveau_ramht_insert(chan, handle, gpuobj); + if (ret) + NV_ERROR(dev, "error adding gpuobj to RAMHT: %d\n", ret); + nouveau_gpuobj_ref(NULL, &gpuobj); + return ret; } static int @@ -971,7 +975,6 @@ int nouveau_ioctl_grobj_alloc(struct drm_device *dev, void *data, struct drm_file *file_priv) { struct drm_nouveau_grobj_alloc *init = data; - struct nouveau_gpuobj *gr = NULL; struct nouveau_channel *chan; int ret; @@ -987,18 +990,10 @@ int nouveau_ioctl_grobj_alloc(struct drm_device *dev, void *data, goto out; } - ret = nouveau_gpuobj_gr_new(chan, init->class, &gr); + ret = nouveau_gpuobj_gr_new(chan, init->handle, init->class); if (ret) { NV_ERROR(dev, "Error creating object: %d (%d/0x%08x)\n", ret, init->channel, init->handle); - goto out; - } - - ret = nouveau_ramht_insert(chan, init->handle, gr); - nouveau_gpuobj_ref(NULL, &gr); - if (ret) { - NV_ERROR(dev, "Error referencing object: %d (%d/0x%08x)\n", - ret, init->channel, init->handle); } out: diff --git a/drivers/gpu/drm/nouveau/nv04_fbcon.c b/drivers/gpu/drm/nouveau/nv04_fbcon.c index a32804e7d202..7a1189371096 100644 --- a/drivers/gpu/drm/nouveau/nv04_fbcon.c +++ b/drivers/gpu/drm/nouveau/nv04_fbcon.c @@ -137,22 +137,6 @@ nv04_fbcon_imageblit(struct fb_info *info, const struct fb_image *image) return 0; } -static int -nv04_fbcon_grobj_new(struct drm_device *dev, int class, uint32_t handle) -{ - struct drm_nouveau_private *dev_priv = dev->dev_private; - struct nouveau_gpuobj *obj = NULL; - int ret; - - ret = nouveau_gpuobj_gr_new(dev_priv->channel, class, &obj); - if (ret) - return ret; - - ret = nouveau_ramht_insert(dev_priv->channel, handle, obj); - nouveau_gpuobj_ref(NULL, &obj); - return ret; -} - int nv04_fbcon_accel_init(struct fb_info *info) { @@ -192,29 +176,31 @@ nv04_fbcon_accel_init(struct fb_info *info) return -EINVAL; } - ret = nv04_fbcon_grobj_new(dev, dev_priv->card_type >= NV_10 ? - 0x0062 : 0x0042, NvCtxSurf2D); + ret = nouveau_gpuobj_gr_new(chan, NvCtxSurf2D, + dev_priv->card_type >= NV_10 ? + 0x0062 : 0x0042); if (ret) return ret; - ret = nv04_fbcon_grobj_new(dev, 0x0019, NvClipRect); + ret = nouveau_gpuobj_gr_new(chan, NvClipRect, 0x0019); if (ret) return ret; - ret = nv04_fbcon_grobj_new(dev, 0x0043, NvRop); + ret = nouveau_gpuobj_gr_new(chan, NvRop, 0x0043); if (ret) return ret; - ret = nv04_fbcon_grobj_new(dev, 0x0044, NvImagePatt); + ret = nouveau_gpuobj_gr_new(chan, NvImagePatt, 0x0044); if (ret) return ret; - ret = nv04_fbcon_grobj_new(dev, 0x004a, NvGdiRect); + ret = nouveau_gpuobj_gr_new(chan, NvGdiRect, 0x004a); if (ret) return ret; - ret = nv04_fbcon_grobj_new(dev, dev_priv->chipset >= 0x11 ? - 0x009f : 0x005f, NvImageBlit); + ret = nouveau_gpuobj_gr_new(chan, NvImageBlit, + dev_priv->chipset >= 0x11 ? + 0x009f : 0x005f); if (ret) return ret; diff --git a/drivers/gpu/drm/nouveau/nv50_fbcon.c b/drivers/gpu/drm/nouveau/nv50_fbcon.c index 6edf9dca35ca..156731993907 100644 --- a/drivers/gpu/drm/nouveau/nv50_fbcon.c +++ b/drivers/gpu/drm/nouveau/nv50_fbcon.c @@ -134,9 +134,8 @@ nv50_fbcon_accel_init(struct fb_info *info) struct drm_device *dev = nfbdev->dev; struct drm_nouveau_private *dev_priv = dev->dev_private; struct nouveau_channel *chan = dev_priv->channel; - struct nouveau_gpuobj *eng2d = NULL; - uint64_t fb; int ret, format; + uint64_t fb; fb = info->fix.smem_start - dev_priv->fb_phys + dev_priv->vm_vram_base; @@ -167,12 +166,7 @@ nv50_fbcon_accel_init(struct fb_info *info) return -EINVAL; } - ret = nouveau_gpuobj_gr_new(dev_priv->channel, 0x502d, &eng2d); - if (ret) - return ret; - - ret = nouveau_ramht_insert(dev_priv->channel, Nv2D, eng2d); - nouveau_gpuobj_ref(NULL, &eng2d); + ret = nouveau_gpuobj_gr_new(dev_priv->channel, Nv2D, 0x502d); if (ret) return ret; -- cgit v1.2.3 From 12fb9525075982bc65cfd71427dd7afdf47bafed Mon Sep 17 00:00:00 2001 From: Ben Skeggs Date: Fri, 19 Nov 2010 14:32:56 +1000 Subject: drm/nouveau: introduce a util function to wait on reg != val Signed-off-by: Ben Skeggs --- drivers/gpu/drm/nouveau/nouveau_drv.h | 10 +++++++--- drivers/gpu/drm/nouveau/nouveau_hw.c | 4 ++-- drivers/gpu/drm/nouveau/nouveau_state.c | 22 ++++++++++++++++++++-- drivers/gpu/drm/nouveau/nv04_dac.c | 12 ++++++------ 4 files changed, 35 insertions(+), 13 deletions(-) diff --git a/drivers/gpu/drm/nouveau/nouveau_drv.h b/drivers/gpu/drm/nouveau/nouveau_drv.h index bbf19861b6c8..e82dff4008c9 100644 --- a/drivers/gpu/drm/nouveau/nouveau_drv.h +++ b/drivers/gpu/drm/nouveau/nouveau_drv.h @@ -795,8 +795,10 @@ extern int nouveau_ioctl_getparam(struct drm_device *, void *data, struct drm_file *); extern int nouveau_ioctl_setparam(struct drm_device *, void *data, struct drm_file *); -extern bool nouveau_wait_until(struct drm_device *, uint64_t timeout, - uint32_t reg, uint32_t mask, uint32_t val); +extern bool nouveau_wait_eq(struct drm_device *, uint64_t timeout, + uint32_t reg, uint32_t mask, uint32_t val); +extern bool nouveau_wait_ne(struct drm_device *, uint64_t timeout, + uint32_t reg, uint32_t mask, uint32_t val); extern bool nouveau_wait_for_idle(struct drm_device *); extern int nouveau_card_init(struct drm_device *); @@ -1434,7 +1436,9 @@ static inline void nv_wr08(struct drm_device *dev, unsigned reg, u8 val) } #define nv_wait(dev, reg, mask, val) \ - nouveau_wait_until(dev, 2000000000ULL, (reg), (mask), (val)) + nouveau_wait_eq(dev, 2000000000ULL, (reg), (mask), (val)) +#define nv_wait_ne(dev, reg, mask, val) \ + nouveau_wait_ne(dev, 2000000000ULL, (reg), (mask), (val)) /* PRAMIN access */ static inline u32 nv_ri32(struct drm_device *dev, unsigned offset) diff --git a/drivers/gpu/drm/nouveau/nouveau_hw.c b/drivers/gpu/drm/nouveau/nouveau_hw.c index 6ba640e7a7e9..053edf9d2f67 100644 --- a/drivers/gpu/drm/nouveau/nouveau_hw.c +++ b/drivers/gpu/drm/nouveau/nouveau_hw.c @@ -999,8 +999,8 @@ nv_load_state_ext(struct drm_device *dev, int head, if (dev_priv->card_type == NV_10) { /* Not waiting for vertical retrace before modifying CRE_53/CRE_54 causes lockups. */ - nouveau_wait_until(dev, 650000000, NV_PRMCIO_INP0__COLOR, 0x8, 0x8); - nouveau_wait_until(dev, 650000000, NV_PRMCIO_INP0__COLOR, 0x8, 0x0); + nouveau_wait_eq(dev, 650000000, NV_PRMCIO_INP0__COLOR, 0x8, 0x8); + nouveau_wait_eq(dev, 650000000, NV_PRMCIO_INP0__COLOR, 0x8, 0x0); } wr_cio_state(dev, head, regp, NV_CIO_CRE_53); diff --git a/drivers/gpu/drm/nouveau/nouveau_state.c b/drivers/gpu/drm/nouveau/nouveau_state.c index e779e9320453..e0811f93243d 100644 --- a/drivers/gpu/drm/nouveau/nouveau_state.c +++ b/drivers/gpu/drm/nouveau/nouveau_state.c @@ -1126,8 +1126,9 @@ nouveau_ioctl_setparam(struct drm_device *dev, void *data, } /* Wait until (value(reg) & mask) == val, up until timeout has hit */ -bool nouveau_wait_until(struct drm_device *dev, uint64_t timeout, - uint32_t reg, uint32_t mask, uint32_t val) +bool +nouveau_wait_eq(struct drm_device *dev, uint64_t timeout, + uint32_t reg, uint32_t mask, uint32_t val) { struct drm_nouveau_private *dev_priv = dev->dev_private; struct nouveau_timer_engine *ptimer = &dev_priv->engine.timer; @@ -1141,6 +1142,23 @@ bool nouveau_wait_until(struct drm_device *dev, uint64_t timeout, return false; } +/* Wait until (value(reg) & mask) != val, up until timeout has hit */ +bool +nouveau_wait_ne(struct drm_device *dev, uint64_t timeout, + uint32_t reg, uint32_t mask, uint32_t val) +{ + struct drm_nouveau_private *dev_priv = dev->dev_private; + struct nouveau_timer_engine *ptimer = &dev_priv->engine.timer; + uint64_t start = ptimer->read(dev); + + do { + if ((nv_rd32(dev, reg) & mask) != val) + return true; + } while (ptimer->read(dev) - start < timeout); + + return false; +} + /* Waits for PGRAPH to go completely idle */ bool nouveau_wait_for_idle(struct drm_device *dev) { diff --git a/drivers/gpu/drm/nouveau/nv04_dac.c b/drivers/gpu/drm/nouveau/nv04_dac.c index ba6423f2ffcc..e000455e06d0 100644 --- a/drivers/gpu/drm/nouveau/nv04_dac.c +++ b/drivers/gpu/drm/nouveau/nv04_dac.c @@ -74,14 +74,14 @@ static int sample_load_twice(struct drm_device *dev, bool sense[2]) * use a 10ms timeout (guards against crtc being inactive, in * which case blank state would never change) */ - if (!nouveau_wait_until(dev, 10000000, NV_PRMCIO_INP0__COLOR, - 0x00000001, 0x00000000)) + if (!nouveau_wait_eq(dev, 10000000, NV_PRMCIO_INP0__COLOR, + 0x00000001, 0x00000000)) return -EBUSY; - if (!nouveau_wait_until(dev, 10000000, NV_PRMCIO_INP0__COLOR, - 0x00000001, 0x00000001)) + if (!nouveau_wait_eq(dev, 10000000, NV_PRMCIO_INP0__COLOR, + 0x00000001, 0x00000001)) return -EBUSY; - if (!nouveau_wait_until(dev, 10000000, NV_PRMCIO_INP0__COLOR, - 0x00000001, 0x00000000)) + if (!nouveau_wait_eq(dev, 10000000, NV_PRMCIO_INP0__COLOR, + 0x00000001, 0x00000000)) return -EBUSY; udelay(100); -- cgit v1.2.3 From a0fd9b9f68cd7a5952eae3c5b3c5a3bc0eadfd44 Mon Sep 17 00:00:00 2001 From: Ben Skeggs Date: Fri, 26 Nov 2010 10:32:22 +1000 Subject: drm/nouveau: no need to zero dma objects, we fill them completely anyway Signed-off-by: Ben Skeggs --- drivers/gpu/drm/nouveau/nouveau_object.c | 7 ++----- 1 file changed, 2 insertions(+), 5 deletions(-) diff --git a/drivers/gpu/drm/nouveau/nouveau_object.c b/drivers/gpu/drm/nouveau/nouveau_object.c index 3518ebba6fbd..5cc3f7e59fa1 100644 --- a/drivers/gpu/drm/nouveau/nouveau_object.c +++ b/drivers/gpu/drm/nouveau/nouveau_object.c @@ -460,8 +460,7 @@ nv50_gpuobj_dma_new(struct nouveau_channel *chan, int class, u64 base, u64 size, struct drm_device *dev = chan->dev; int ret; - ret = nouveau_gpuobj_new(dev, chan, 24, 16, NVOBJ_FLAG_ZERO_ALLOC | - NVOBJ_FLAG_ZERO_FREE, pobj); + ret = nouveau_gpuobj_new(dev, chan, 24, 16, NVOBJ_FLAG_ZERO_FREE, pobj); if (ret) return ret; @@ -536,9 +535,7 @@ nouveau_gpuobj_dma_new(struct nouveau_channel *chan, int class, u64 base, flags0 |= (base & 0x00000fff) << 20; flags2 |= (base & 0xfffff000); - ret = nouveau_gpuobj_new(dev, chan, (dev_priv->card_type >= NV_40) ? - 32 : 16, 16, NVOBJ_FLAG_ZERO_ALLOC | - NVOBJ_FLAG_ZERO_FREE, &obj); + ret = nouveau_gpuobj_new(dev, chan, 16, 16, NVOBJ_FLAG_ZERO_FREE, &obj); if (ret) return ret; -- cgit v1.2.3 From 7a45d764a8e3177f0c9cd4a0be9f2ab7965e55cb Mon Sep 17 00:00:00 2001 From: Ben Skeggs Date: Mon, 22 Nov 2010 08:50:27 +1000 Subject: drm/nouveau: wrap calls to ttm_bo_validate() This will be used later to fixup bo.offset with a buffer's fixed GPU virtual address. Signed-off-by: Ben Skeggs --- drivers/gpu/drm/nouveau/nouveau_bo.c | 20 +++++++++++++++++--- drivers/gpu/drm/nouveau/nouveau_drv.h | 2 ++ drivers/gpu/drm/nouveau/nouveau_gem.c | 3 +-- 3 files changed, 20 insertions(+), 5 deletions(-) diff --git a/drivers/gpu/drm/nouveau/nouveau_bo.c b/drivers/gpu/drm/nouveau/nouveau_bo.c index f6f51b4259e7..a7883e7db344 100644 --- a/drivers/gpu/drm/nouveau/nouveau_bo.c +++ b/drivers/gpu/drm/nouveau/nouveau_bo.c @@ -242,7 +242,7 @@ nouveau_bo_pin(struct nouveau_bo *nvbo, uint32_t memtype) nouveau_bo_placement_set(nvbo, memtype, 0); - ret = ttm_bo_validate(bo, &nvbo->placement, false, false, false); + ret = nouveau_bo_validate(nvbo, false, false, false); if (ret == 0) { switch (bo->mem.mem_type) { case TTM_PL_VRAM: @@ -278,7 +278,7 @@ nouveau_bo_unpin(struct nouveau_bo *nvbo) nouveau_bo_placement_set(nvbo, bo->mem.placement, 0); - ret = ttm_bo_validate(bo, &nvbo->placement, false, false, false); + ret = nouveau_bo_validate(nvbo, false, false, false); if (ret == 0) { switch (bo->mem.mem_type) { case TTM_PL_VRAM: @@ -317,6 +317,20 @@ nouveau_bo_unmap(struct nouveau_bo *nvbo) ttm_bo_kunmap(&nvbo->kmap); } +int +nouveau_bo_validate(struct nouveau_bo *nvbo, bool interruptible, + bool no_wait_reserve, bool no_wait_gpu) +{ + int ret; + + ret = ttm_bo_validate(&nvbo->bo, &nvbo->placement, interruptible, + no_wait_reserve, no_wait_gpu); + if (ret) + return ret; + + return 0; +} + u16 nouveau_bo_rd16(struct nouveau_bo *nvbo, unsigned index) { @@ -937,7 +951,7 @@ nouveau_ttm_fault_reserve_notify(struct ttm_buffer_object *bo) nvbo->placement.fpfn = 0; nvbo->placement.lpfn = dev_priv->fb_mappable_pages; nouveau_bo_placement_set(nvbo, TTM_PL_VRAM, 0); - return ttm_bo_validate(bo, &nvbo->placement, false, true, false); + return nouveau_bo_validate(nvbo, false, true, false); } void diff --git a/drivers/gpu/drm/nouveau/nouveau_drv.h b/drivers/gpu/drm/nouveau/nouveau_drv.h index e82dff4008c9..22abe8579912 100644 --- a/drivers/gpu/drm/nouveau/nouveau_drv.h +++ b/drivers/gpu/drm/nouveau/nouveau_drv.h @@ -1285,6 +1285,8 @@ extern void nouveau_bo_wr16(struct nouveau_bo *nvbo, unsigned index, u16 val); extern u32 nouveau_bo_rd32(struct nouveau_bo *nvbo, unsigned index); extern void nouveau_bo_wr32(struct nouveau_bo *nvbo, unsigned index, u32 val); extern void nouveau_bo_fence(struct nouveau_bo *, struct nouveau_fence *); +extern int nouveau_bo_validate(struct nouveau_bo *, bool interruptible, + bool no_wait_reserve, bool no_wait_gpu); /* nouveau_fence.c */ struct nouveau_fence; diff --git a/drivers/gpu/drm/nouveau/nouveau_gem.c b/drivers/gpu/drm/nouveau/nouveau_gem.c index 9886b644f27d..0adb2a85c143 100644 --- a/drivers/gpu/drm/nouveau/nouveau_gem.c +++ b/drivers/gpu/drm/nouveau/nouveau_gem.c @@ -358,8 +358,7 @@ validate_list(struct nouveau_channel *chan, struct list_head *list, } nvbo->channel = (b->read_domains & (1 << 31)) ? NULL : chan; - ret = ttm_bo_validate(&nvbo->bo, &nvbo->placement, - true, false, false); + ret = nouveau_bo_validate(nvbo, true, false, false); nvbo->channel = NULL; if (unlikely(ret)) { if (ret != -ERESTARTSYS) -- cgit v1.2.3 From a8b214f007e299225d3fcf10c46f7fc603c275fa Mon Sep 17 00:00:00 2001 From: Ben Skeggs Date: Fri, 3 Dec 2010 09:05:20 +1000 Subject: drm/nouveau: fix use of drm_mm_node in semaphore object At some point in the future, this bo won't necessarily be backed by a drm_mm_node, so use the start/size fields of the ttm_mem_reg instead. Signed-off-by: Ben Skeggs --- drivers/gpu/drm/nouveau/nouveau_fence.c | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/drivers/gpu/drm/nouveau/nouveau_fence.c b/drivers/gpu/drm/nouveau/nouveau_fence.c index 3d50d5c3b0fe..01290d2952ae 100644 --- a/drivers/gpu/drm/nouveau/nouveau_fence.c +++ b/drivers/gpu/drm/nouveau/nouveau_fence.c @@ -450,12 +450,11 @@ nouveau_fence_channel_init(struct nouveau_channel *chan) /* Create a DMA object for the shared cross-channel sync area. */ if (USE_SEMA(dev)) { - struct drm_mm_node *mem = dev_priv->fence.bo->bo.mem.mm_node; + struct ttm_mem_reg *mem = &dev_priv->fence.bo->bo.mem; ret = nouveau_gpuobj_dma_new(chan, NV_CLASS_DMA_IN_MEMORY, mem->start << PAGE_SHIFT, - mem->size << PAGE_SHIFT, - NV_MEM_ACCESS_RW, + mem->size, NV_MEM_ACCESS_RW, NV_MEM_TARGET_VRAM, &obj); if (ret) return ret; -- cgit v1.2.3 From 937c3471cc8b7ef8f9e382d9e4ec232db151ea7b Mon Sep 17 00:00:00 2001 From: Francisco Jerez Date: Wed, 8 Dec 2010 02:35:45 +0100 Subject: drm/nouveau: Avoid potential race between nouveau_fence_update() and context takedown. Signed-off-by: Francisco Jerez Signed-off-by: Ben Skeggs --- drivers/gpu/drm/nouveau/nouveau_fence.c | 19 +++++++++++-------- 1 file changed, 11 insertions(+), 8 deletions(-) diff --git a/drivers/gpu/drm/nouveau/nouveau_fence.c b/drivers/gpu/drm/nouveau/nouveau_fence.c index 01290d2952ae..374a9793b85f 100644 --- a/drivers/gpu/drm/nouveau/nouveau_fence.c +++ b/drivers/gpu/drm/nouveau/nouveau_fence.c @@ -77,14 +77,17 @@ nouveau_fence_update(struct nouveau_channel *chan) spin_lock(&chan->fence.lock); - if (USE_REFCNT(dev)) - sequence = nvchan_rd32(chan, 0x48); - else - sequence = atomic_read(&chan->fence.last_sequence_irq); - - if (chan->fence.sequence_ack == sequence) - goto out; - chan->fence.sequence_ack = sequence; + /* Fetch the last sequence if the channel is still up and running */ + if (likely(!list_empty(&chan->fence.pending))) { + if (USE_REFCNT(dev)) + sequence = nvchan_rd32(chan, 0x48); + else + sequence = atomic_read(&chan->fence.last_sequence_irq); + + if (chan->fence.sequence_ack == sequence) + goto out; + chan->fence.sequence_ack = sequence; + } list_for_each_entry_safe(fence, tmp, &chan->fence.pending, entry) { sequence = fence->sequence; -- cgit v1.2.3 From 573a2a37e8648a3249426c816f51e7ef50f6f73e Mon Sep 17 00:00:00 2001 From: Ben Skeggs Date: Wed, 25 Aug 2010 15:26:04 +1000 Subject: drm/nv50: implement custom vram mm This is required on nv50 as we need to be able to have more precise control over physical VRAM allocations to avoid buffer corruption when using buffers of mixed memory types. This removes some nasty overallocation/alignment that we were previously using to "control" this problem. Signed-off-by: Ben Skeggs --- drivers/gpu/drm/nouveau/Makefile | 5 +- drivers/gpu/drm/nouveau/nouveau_bo.c | 43 +----- drivers/gpu/drm/nouveau/nouveau_drv.h | 10 ++ drivers/gpu/drm/nouveau/nouveau_mem.c | 201 ++++++++++++++---------- drivers/gpu/drm/nouveau/nouveau_mm.c | 271 +++++++++++++++++++++++++++++++++ drivers/gpu/drm/nouveau/nouveau_mm.h | 61 ++++++++ drivers/gpu/drm/nouveau/nv50_instmem.c | 1 + drivers/gpu/drm/nouveau/nv50_vram.c | 180 ++++++++++++++++++++++ 8 files changed, 650 insertions(+), 122 deletions(-) create mode 100644 drivers/gpu/drm/nouveau/nouveau_mm.c create mode 100644 drivers/gpu/drm/nouveau/nouveau_mm.h create mode 100644 drivers/gpu/drm/nouveau/nv50_vram.c diff --git a/drivers/gpu/drm/nouveau/Makefile b/drivers/gpu/drm/nouveau/Makefile index 7ea9a1154ca8..26fdd12561b6 100644 --- a/drivers/gpu/drm/nouveau/Makefile +++ b/drivers/gpu/drm/nouveau/Makefile @@ -9,7 +9,7 @@ nouveau-y := nouveau_drv.o nouveau_state.o nouveau_channel.o nouveau_mem.o \ nouveau_bo.o nouveau_fence.o nouveau_gem.o nouveau_ttm.o \ nouveau_hw.o nouveau_calc.o nouveau_bios.o nouveau_i2c.o \ nouveau_display.o nouveau_connector.o nouveau_fbcon.o \ - nouveau_dp.o nouveau_ramht.o \ + nouveau_dp.o nouveau_ramht.o nouveau_mm.o \ nouveau_pm.o nouveau_volt.o nouveau_perf.o nouveau_temp.o \ nv04_timer.o \ nv04_mc.o nv40_mc.o nv50_mc.o \ @@ -26,7 +26,8 @@ nouveau-y := nouveau_drv.o nouveau_state.o nouveau_channel.o nouveau_mem.o \ nv04_crtc.o nv04_display.o nv04_cursor.o nv04_fbcon.o \ nv10_gpio.o nv50_gpio.o \ nv50_calc.o \ - nv04_pm.o nv50_pm.o nva3_pm.o + nv04_pm.o nv50_pm.o nva3_pm.o \ + nv50_vram.o nouveau-$(CONFIG_DRM_NOUVEAU_DEBUG) += nouveau_debugfs.o nouveau-$(CONFIG_COMPAT) += nouveau_ioc32.o diff --git a/drivers/gpu/drm/nouveau/nouveau_bo.c b/drivers/gpu/drm/nouveau/nouveau_bo.c index a7883e7db344..5a71ca4346c8 100644 --- a/drivers/gpu/drm/nouveau/nouveau_bo.c +++ b/drivers/gpu/drm/nouveau/nouveau_bo.c @@ -57,42 +57,7 @@ nouveau_bo_fixup_align(struct drm_device *dev, { struct drm_nouveau_private *dev_priv = dev->dev_private; - /* - * Some of the tile_flags have a periodic structure of N*4096 bytes, - * align to to that as well as the page size. Align the size to the - * appropriate boundaries. This does imply that sizes are rounded up - * 3-7 pages, so be aware of this and do not waste memory by allocating - * many small buffers. - */ - if (dev_priv->card_type == NV_50) { - uint32_t block_size = dev_priv->vram_size >> 15; - int i; - - switch (tile_flags) { - case 0x1800: - case 0x2800: - case 0x4800: - case 0x7a00: - if (is_power_of_2(block_size)) { - for (i = 1; i < 10; i++) { - *align = 12 * i * block_size; - if (!(*align % 65536)) - break; - } - } else { - for (i = 1; i < 10; i++) { - *align = 8 * i * block_size; - if (!(*align % 65536)) - break; - } - } - *size = roundup(*size, *align); - break; - default: - break; - } - - } else { + if (dev_priv->card_type < NV_50) { if (tile_mode) { if (dev_priv->chipset >= 0x40) { *align = 65536; @@ -115,7 +80,6 @@ nouveau_bo_fixup_align(struct drm_device *dev, /* ALIGN works only on powers of two. */ *size = roundup(*size, PAGE_SIZE); - if (dev_priv->card_type == NV_50) { *size = roundup(*size, 65536); *align = max(65536, *align); @@ -422,7 +386,10 @@ nouveau_bo_init_mem_type(struct ttm_bo_device *bdev, uint32_t type, man->default_caching = TTM_PL_FLAG_CACHED; break; case TTM_PL_VRAM: - man->func = &ttm_bo_manager_func; + if (dev_priv->card_type == NV_50) + man->func = &nouveau_vram_manager; + else + man->func = &ttm_bo_manager_func; man->flags = TTM_MEMTYPE_FLAG_FIXED | TTM_MEMTYPE_FLAG_MAPPABLE; man->available_caching = TTM_PL_FLAG_UNCACHED | diff --git a/drivers/gpu/drm/nouveau/nouveau_drv.h b/drivers/gpu/drm/nouveau/nouveau_drv.h index 22abe8579912..1305e2c94201 100644 --- a/drivers/gpu/drm/nouveau/nouveau_drv.h +++ b/drivers/gpu/drm/nouveau/nouveau_drv.h @@ -66,6 +66,15 @@ struct nouveau_grctx; #define NV50_VM_BLOCK (512*1024*1024ULL) #define NV50_VM_VRAM_NR (NV50_VM_MAX_VRAM / NV50_VM_BLOCK) +struct nouveau_vram { + struct drm_device *dev; + + struct list_head regions; + u32 memtype; + u64 offset; + u64 size; +}; + struct nouveau_tile_reg { bool used; uint32_t addr; @@ -821,6 +830,7 @@ extern int nv50_mem_vm_bind_linear(struct drm_device *, uint64_t virt, uint64_t phys); extern void nv50_mem_vm_unbind(struct drm_device *, uint64_t virt, uint32_t size); +extern const struct ttm_mem_type_manager_func nouveau_vram_manager; /* nouveau_notifier.c */ extern int nouveau_notifier_init_channel(struct nouveau_channel *); diff --git a/drivers/gpu/drm/nouveau/nouveau_mem.c b/drivers/gpu/drm/nouveau/nouveau_mem.c index 549f59052881..dbeb9e5f6b22 100644 --- a/drivers/gpu/drm/nouveau/nouveau_mem.c +++ b/drivers/gpu/drm/nouveau/nouveau_mem.c @@ -36,6 +36,7 @@ #include "nouveau_drv.h" #include "nouveau_pm.h" +#include "nouveau_mm.h" /* * NV10-NV40 tiling helpers @@ -333,61 +334,6 @@ nouveau_mem_detect_nforce(struct drm_device *dev) return 0; } -static void -nv50_vram_preinit(struct drm_device *dev) -{ - struct drm_nouveau_private *dev_priv = dev->dev_private; - int i, parts, colbits, rowbitsa, rowbitsb, banks; - u64 rowsize, predicted; - u32 r0, r4, rt, ru; - - r0 = nv_rd32(dev, 0x100200); - r4 = nv_rd32(dev, 0x100204); - rt = nv_rd32(dev, 0x100250); - ru = nv_rd32(dev, 0x001540); - NV_DEBUG(dev, "memcfg 0x%08x 0x%08x 0x%08x 0x%08x\n", r0, r4, rt, ru); - - for (i = 0, parts = 0; i < 8; i++) { - if (ru & (0x00010000 << i)) - parts++; - } - - colbits = (r4 & 0x0000f000) >> 12; - rowbitsa = ((r4 & 0x000f0000) >> 16) + 8; - rowbitsb = ((r4 & 0x00f00000) >> 20) + 8; - banks = ((r4 & 0x01000000) ? 8 : 4); - - rowsize = parts * banks * (1 << colbits) * 8; - predicted = rowsize << rowbitsa; - if (r0 & 0x00000004) - predicted += rowsize << rowbitsb; - - if (predicted != dev_priv->vram_size) { - NV_WARN(dev, "memory controller reports %dMiB VRAM\n", - (u32)(dev_priv->vram_size >> 20)); - NV_WARN(dev, "we calculated %dMiB VRAM\n", - (u32)(predicted >> 20)); - } - - dev_priv->vram_rblock_size = rowsize >> 12; - if (rt & 1) - dev_priv->vram_rblock_size *= 3; - - NV_DEBUG(dev, "rblock %lld bytes\n", - (u64)dev_priv->vram_rblock_size << 12); -} - -static void -nvaa_vram_preinit(struct drm_device *dev) -{ - struct drm_nouveau_private *dev_priv = dev->dev_private; - - /* To our knowledge, there's no large scale reordering of pages - * that occurs on IGP chipsets. - */ - dev_priv->vram_rblock_size = 1; -} - static int nouveau_mem_detect(struct drm_device *dev) { @@ -404,22 +350,8 @@ nouveau_mem_detect(struct drm_device *dev) dev_priv->vram_size &= NV10_PFB_FIFO_DATA_RAM_AMOUNT_MB_MASK; } else if (dev_priv->card_type < NV_C0) { - dev_priv->vram_size = nv_rd32(dev, NV04_PFB_FIFO_DATA); - dev_priv->vram_size |= (dev_priv->vram_size & 0xff) << 32; - dev_priv->vram_size &= 0xffffffff00ll; - - switch (dev_priv->chipset) { - case 0xaa: - case 0xac: - case 0xaf: - dev_priv->vram_sys_base = nv_rd32(dev, 0x100e10); - dev_priv->vram_sys_base <<= 12; - nvaa_vram_preinit(dev); - break; - default: - nv50_vram_preinit(dev); - break; - } + if (nv50_vram_init(dev)) + return -ENOMEM; } else { dev_priv->vram_size = nv_rd32(dev, 0x10f20c) << 20; dev_priv->vram_size *= nv_rd32(dev, 0x121c74); @@ -568,10 +500,6 @@ nouveau_mem_vram_init(struct drm_device *dev) if (ret) return ret; - ret = nouveau_mem_detect(dev); - if (ret) - return ret; - dev_priv->fb_phys = pci_resource_start(dev->pdev, 1); ret = nouveau_ttm_global_init(dev_priv); @@ -587,13 +515,6 @@ nouveau_mem_vram_init(struct drm_device *dev) return ret; } - dev_priv->fb_available_size = dev_priv->vram_size; - dev_priv->fb_mappable_pages = dev_priv->fb_available_size; - if (dev_priv->fb_mappable_pages > pci_resource_len(dev->pdev, 1)) - dev_priv->fb_mappable_pages = - pci_resource_len(dev->pdev, 1); - dev_priv->fb_mappable_pages >>= PAGE_SHIFT; - /* reserve space at end of VRAM for PRAMIN */ if (dev_priv->chipset == 0x40 || dev_priv->chipset == 0x47 || dev_priv->chipset == 0x49 || dev_priv->chipset == 0x4b) @@ -604,6 +525,17 @@ nouveau_mem_vram_init(struct drm_device *dev) else dev_priv->ramin_rsvd_vram = (512 * 1024); + /* initialise gpu-specific vram backend */ + ret = nouveau_mem_detect(dev); + if (ret) + return ret; + + dev_priv->fb_available_size = dev_priv->vram_size; + dev_priv->fb_mappable_pages = dev_priv->fb_available_size; + if (dev_priv->fb_mappable_pages > pci_resource_len(dev->pdev, 1)) + dev_priv->fb_mappable_pages = pci_resource_len(dev->pdev, 1); + dev_priv->fb_mappable_pages >>= PAGE_SHIFT; + dev_priv->fb_available_size -= dev_priv->ramin_rsvd_vram; dev_priv->fb_aper_free = dev_priv->fb_available_size; @@ -820,3 +752,108 @@ nouveau_mem_timing_fini(struct drm_device *dev) kfree(mem->timing); } + +static int +nouveau_vram_manager_init(struct ttm_mem_type_manager *man, unsigned long p_size) +{ + struct drm_nouveau_private *dev_priv = nouveau_bdev(man->bdev); + struct nouveau_mm *mm; + u32 b_size; + int ret; + + p_size = (p_size << PAGE_SHIFT) >> 12; + b_size = dev_priv->vram_rblock_size >> 12; + + ret = nouveau_mm_init(&mm, 0, p_size, b_size); + if (ret) + return ret; + + man->priv = mm; + return 0; +} + +static int +nouveau_vram_manager_fini(struct ttm_mem_type_manager *man) +{ + struct nouveau_mm *mm = man->priv; + int ret; + + ret = nouveau_mm_fini(&mm); + if (ret) + return ret; + + man->priv = NULL; + return 0; +} + +static void +nouveau_vram_manager_del(struct ttm_mem_type_manager *man, + struct ttm_mem_reg *mem) +{ + struct drm_nouveau_private *dev_priv = nouveau_bdev(man->bdev); + struct drm_device *dev = dev_priv->dev; + + nv50_vram_del(dev, (struct nouveau_vram **)&mem->mm_node); +} + +static int +nouveau_vram_manager_new(struct ttm_mem_type_manager *man, + struct ttm_buffer_object *bo, + struct ttm_placement *placement, + struct ttm_mem_reg *mem) +{ + struct drm_nouveau_private *dev_priv = nouveau_bdev(man->bdev); + struct drm_device *dev = dev_priv->dev; + struct nouveau_bo *nvbo = nouveau_bo(bo); + struct nouveau_vram *vram; + int ret; + + ret = nv50_vram_new(dev, mem->num_pages << PAGE_SHIFT, 65536, 0, + (nvbo->tile_flags >> 8) & 0x7f, &vram); + if (ret) + return ret; + + mem->mm_node = vram; + mem->start = vram->offset >> PAGE_SHIFT; + return 0; +} + +void +nouveau_vram_manager_debug(struct ttm_mem_type_manager *man, const char *prefix) +{ + struct ttm_bo_global *glob = man->bdev->glob; + struct nouveau_mm *mm = man->priv; + struct nouveau_mm_node *r; + u64 total = 0, ttotal[3] = {}, tused[3] = {}, tfree[3] = {}; + int i; + + mutex_lock(&mm->mutex); + list_for_each_entry(r, &mm->nodes, nl_entry) { + printk(KERN_DEBUG "%s %s-%d: 0x%010llx 0x%010llx\n", + prefix, r->free ? "free" : "used", r->type, + ((u64)r->offset << 12), + (((u64)r->offset + r->length) << 12)); + total += r->length; + ttotal[r->type] += r->length; + if (r->free) + tfree[r->type] += r->length; + else + tused[r->type] += r->length; + } + mutex_unlock(&mm->mutex); + + printk(KERN_DEBUG "%s total: 0x%010llx\n", prefix, total << 12); + for (i = 0; i < 3; i++) { + printk(KERN_DEBUG "%s type %d: 0x%010llx, " + "used 0x%010llx, free 0x%010llx\n", prefix, + i, ttotal[i] << 12, tused[i] << 12, tfree[i] << 12); + } +} + +const struct ttm_mem_type_manager_func nouveau_vram_manager = { + nouveau_vram_manager_init, + nouveau_vram_manager_fini, + nouveau_vram_manager_new, + nouveau_vram_manager_del, + nouveau_vram_manager_debug +}; diff --git a/drivers/gpu/drm/nouveau/nouveau_mm.c b/drivers/gpu/drm/nouveau/nouveau_mm.c new file mode 100644 index 000000000000..cdbb11eb701b --- /dev/null +++ b/drivers/gpu/drm/nouveau/nouveau_mm.c @@ -0,0 +1,271 @@ +/* + * Copyright 2010 Red Hat Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: Ben Skeggs + */ + +#include "drmP.h" +#include "nouveau_drv.h" +#include "nouveau_mm.h" + +static inline void +region_put(struct nouveau_mm *rmm, struct nouveau_mm_node *a) +{ + list_del(&a->nl_entry); + list_del(&a->fl_entry); + kfree(a); +} + +static struct nouveau_mm_node * +region_split(struct nouveau_mm *rmm, struct nouveau_mm_node *a, u32 size) +{ + struct nouveau_mm_node *b; + + if (a->length == size) + return a; + + b = kmalloc(sizeof(*b), GFP_KERNEL); + if (unlikely(b == NULL)) + return NULL; + + b->offset = a->offset; + b->length = size; + b->free = a->free; + b->type = a->type; + a->offset += size; + a->length -= size; + list_add_tail(&b->nl_entry, &a->nl_entry); + if (b->free) + list_add_tail(&b->fl_entry, &a->fl_entry); + return b; +} + +static struct nouveau_mm_node * +nouveau_mm_merge(struct nouveau_mm *rmm, struct nouveau_mm_node *this) +{ + struct nouveau_mm_node *prev, *next; + + /* try to merge with free adjacent entries of same type */ + prev = list_entry(this->nl_entry.prev, struct nouveau_mm_node, nl_entry); + if (this->nl_entry.prev != &rmm->nodes) { + if (prev->free && prev->type == this->type) { + prev->length += this->length; + region_put(rmm, this); + this = prev; + } + } + + next = list_entry(this->nl_entry.next, struct nouveau_mm_node, nl_entry); + if (this->nl_entry.next != &rmm->nodes) { + if (next->free && next->type == this->type) { + next->offset = this->offset; + next->length += this->length; + region_put(rmm, this); + this = next; + } + } + + return this; +} + +void +nouveau_mm_put(struct nouveau_mm *rmm, struct nouveau_mm_node *this) +{ + u32 block_s, block_l; + + this->free = true; + list_add(&this->fl_entry, &rmm->free); + this = nouveau_mm_merge(rmm, this); + + /* any entirely free blocks now? we'll want to remove typing + * on them now so they can be use for any memory allocation + */ + block_s = roundup(this->offset, rmm->block_size); + if (block_s + rmm->block_size > this->offset + this->length) + return; + + /* split off any still-typed region at the start */ + if (block_s != this->offset) { + if (!region_split(rmm, this, block_s - this->offset)) + return; + } + + /* split off the soon-to-be-untyped block(s) */ + block_l = rounddown(this->length, rmm->block_size); + if (block_l != this->length) { + this = region_split(rmm, this, block_l); + if (!this) + return; + } + + /* mark as having no type, and retry merge with any adjacent + * untyped blocks + */ + this->type = 0; + nouveau_mm_merge(rmm, this); +} + +int +nouveau_mm_get(struct nouveau_mm *rmm, int type, u32 size, u32 size_nc, + u32 align, struct nouveau_mm_node **pnode) +{ + struct nouveau_mm_node *this, *tmp, *next; + u32 splitoff, avail, alloc; + + list_for_each_entry_safe(this, tmp, &rmm->free, fl_entry) { + next = list_entry(this->nl_entry.next, struct nouveau_mm_node, nl_entry); + if (this->nl_entry.next == &rmm->nodes) + next = NULL; + + /* skip wrongly typed blocks */ + if (this->type && this->type != type) + continue; + + /* account for alignment */ + splitoff = this->offset & (align - 1); + if (splitoff) + splitoff = align - splitoff; + + if (this->length <= splitoff) + continue; + + /* determine total memory available from this, and + * the next block (if appropriate) + */ + avail = this->length; + if (next && next->free && (!next->type || next->type == type)) + avail += next->length; + + avail -= splitoff; + + /* determine allocation size */ + if (size_nc) { + alloc = min(avail, size); + alloc = rounddown(alloc, size_nc); + if (alloc == 0) + continue; + } else { + alloc = size; + if (avail < alloc) + continue; + } + + /* untyped block, split off a chunk that's a multiple + * of block_size and type it + */ + if (!this->type) { + u32 block = roundup(alloc + splitoff, rmm->block_size); + if (this->length < block) + continue; + + this = region_split(rmm, this, block); + if (!this) + return -ENOMEM; + + this->type = type; + } + + /* stealing memory from adjacent block */ + if (alloc > this->length) { + u32 amount = alloc - (this->length - splitoff); + + if (!next->type) { + amount = roundup(amount, rmm->block_size); + + next = region_split(rmm, next, amount); + if (!next) + return -ENOMEM; + + next->type = type; + } + + this->length += amount; + next->offset += amount; + next->length -= amount; + if (!next->length) { + list_del(&next->nl_entry); + list_del(&next->fl_entry); + kfree(next); + } + } + + if (splitoff) { + if (!region_split(rmm, this, splitoff)) + return -ENOMEM; + } + + this = region_split(rmm, this, alloc); + if (this == NULL) + return -ENOMEM; + + this->free = false; + list_del(&this->fl_entry); + *pnode = this; + return 0; + } + + return -ENOMEM; +} + +int +nouveau_mm_init(struct nouveau_mm **prmm, u32 offset, u32 length, u32 block) +{ + struct nouveau_mm *rmm; + struct nouveau_mm_node *heap; + + heap = kzalloc(sizeof(*heap), GFP_KERNEL); + if (!heap) + return -ENOMEM; + heap->free = true; + heap->offset = roundup(offset, block); + heap->length = rounddown(offset + length, block) - heap->offset; + + rmm = kzalloc(sizeof(*rmm), GFP_KERNEL); + if (!rmm) { + kfree(heap); + return -ENOMEM; + } + rmm->block_size = block; + mutex_init(&rmm->mutex); + INIT_LIST_HEAD(&rmm->nodes); + INIT_LIST_HEAD(&rmm->free); + list_add(&heap->nl_entry, &rmm->nodes); + list_add(&heap->fl_entry, &rmm->free); + + *prmm = rmm; + return 0; +} + +int +nouveau_mm_fini(struct nouveau_mm **prmm) +{ + struct nouveau_mm *rmm = *prmm; + struct nouveau_mm_node *heap = + list_first_entry(&rmm->nodes, struct nouveau_mm_node, nl_entry); + + if (!list_is_singular(&rmm->nodes)) + return -EBUSY; + + kfree(heap); + kfree(rmm); + *prmm = NULL; + return 0; +} diff --git a/drivers/gpu/drm/nouveau/nouveau_mm.h b/drivers/gpu/drm/nouveau/nouveau_mm.h new file mode 100644 index 000000000000..7e8f8bd86d47 --- /dev/null +++ b/drivers/gpu/drm/nouveau/nouveau_mm.h @@ -0,0 +1,61 @@ +/* + * Copyright 2010 Red Hat Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: Ben Skeggs + */ + +#ifndef __NOUVEAU_REGION_H__ +#define __NOUVEAU_REGION_H__ + +struct nouveau_mm_node { + struct list_head nl_entry; + struct list_head fl_entry; + struct list_head rl_entry; + + bool free; + int type; + + u32 offset; + u32 length; +}; + +struct nouveau_mm { + struct list_head nodes; + struct list_head free; + + struct mutex mutex; + + u32 block_size; +}; + +int nouveau_mm_init(struct nouveau_mm **, u32 offset, u32 length, u32 block); +int nouveau_mm_fini(struct nouveau_mm **); +int nouveau_mm_pre(struct nouveau_mm *); +int nouveau_mm_get(struct nouveau_mm *, int type, u32 size, u32 size_nc, + u32 align, struct nouveau_mm_node **); +void nouveau_mm_put(struct nouveau_mm *, struct nouveau_mm_node *); + +int nv50_vram_init(struct drm_device *); +int nv50_vram_new(struct drm_device *, u64 size, u32 align, u32 size_nc, + u32 memtype, struct nouveau_vram **); +void nv50_vram_del(struct drm_device *, struct nouveau_vram **); + +#endif diff --git a/drivers/gpu/drm/nouveau/nv50_instmem.c b/drivers/gpu/drm/nouveau/nv50_instmem.c index 87160952a30b..1e7d50397e4a 100644 --- a/drivers/gpu/drm/nouveau/nv50_instmem.c +++ b/drivers/gpu/drm/nouveau/nv50_instmem.c @@ -325,6 +325,7 @@ nv50_instmem_get(struct nouveau_gpuobj *gpuobj, u32 size, u32 align) 0, 0x0000, true, false, &node->vram); if (ret) { NV_ERROR(dev, "error getting PRAMIN backing pages: %d\n", ret); + WARN_ON(1); return ret; } diff --git a/drivers/gpu/drm/nouveau/nv50_vram.c b/drivers/gpu/drm/nouveau/nv50_vram.c new file mode 100644 index 000000000000..6e753356cd94 --- /dev/null +++ b/drivers/gpu/drm/nouveau/nv50_vram.c @@ -0,0 +1,180 @@ +/* + * Copyright 2010 Red Hat Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: Ben Skeggs + */ + +#include "drmP.h" +#include "nouveau_drv.h" +#include "nouveau_mm.h" + +static int types[0x80] = { + 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 1, 1, 1, 1, 0, 0, 0, 0, 2, 2, 2, 2, 0, 0, 0, 0, + 1, 1, 1, 1, 1, 1, 1, 0, 2, 2, 2, 2, 2, 2, 2, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 1, 1, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2, 2, 0, 0, + 0, 0, 0, 0, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 2, 2, 2, 2, + 1, 0, 2, 0, 1, 0, 2, 0, 1, 1, 2, 2, 1, 1, 0, 0 +}; + +void +nv50_vram_del(struct drm_device *dev, struct nouveau_vram **pvram) +{ + struct drm_nouveau_private *dev_priv = dev->dev_private; + struct ttm_bo_device *bdev = &dev_priv->ttm.bdev; + struct ttm_mem_type_manager *man = &bdev->man[TTM_PL_VRAM]; + struct nouveau_mm *mm = man->priv; + struct nouveau_mm_node *this; + struct nouveau_vram *vram; + + vram = *pvram; + *pvram = NULL; + if (unlikely(vram == NULL)) + return; + + mutex_lock(&mm->mutex); + while (!list_empty(&vram->regions)) { + this = list_first_entry(&vram->regions, struct nouveau_mm_node, rl_entry); + + list_del(&this->rl_entry); + nouveau_mm_put(mm, this); + } + mutex_unlock(&mm->mutex); + + kfree(vram); +} + +int +nv50_vram_new(struct drm_device *dev, u64 size, u32 align, u32 size_nc, + u32 type, struct nouveau_vram **pvram) +{ + struct drm_nouveau_private *dev_priv = dev->dev_private; + struct ttm_bo_device *bdev = &dev_priv->ttm.bdev; + struct ttm_mem_type_manager *man = &bdev->man[TTM_PL_VRAM]; + struct nouveau_mm *mm = man->priv; + struct nouveau_mm_node *r; + struct nouveau_vram *vram; + int ret; + + if (!types[type]) + return -EINVAL; + size >>= 12; + align >>= 12; + size_nc >>= 12; + + vram = kzalloc(sizeof(*vram), GFP_KERNEL); + if (!vram) + return -ENOMEM; + + INIT_LIST_HEAD(&vram->regions); + vram->dev = dev_priv->dev; + vram->memtype = type; + vram->size = size; + + mutex_lock(&mm->mutex); + do { + ret = nouveau_mm_get(mm, types[type], size, size_nc, align, &r); + if (ret) { + mutex_unlock(&mm->mutex); + nv50_vram_del(dev, &vram); + return ret; + } + + list_add_tail(&r->rl_entry, &vram->regions); + size -= r->length; + } while (size); + mutex_unlock(&mm->mutex); + + r = list_first_entry(&vram->regions, struct nouveau_mm_node, rl_entry); + vram->offset = (u64)r->offset << 12; + *pvram = vram; + return 0; +} + +static u32 +nv50_vram_rblock(struct drm_device *dev) +{ + struct drm_nouveau_private *dev_priv = dev->dev_private; + int i, parts, colbits, rowbitsa, rowbitsb, banks; + u64 rowsize, predicted; + u32 r0, r4, rt, ru, rblock_size; + + r0 = nv_rd32(dev, 0x100200); + r4 = nv_rd32(dev, 0x100204); + rt = nv_rd32(dev, 0x100250); + ru = nv_rd32(dev, 0x001540); + NV_DEBUG(dev, "memcfg 0x%08x 0x%08x 0x%08x 0x%08x\n", r0, r4, rt, ru); + + for (i = 0, parts = 0; i < 8; i++) { + if (ru & (0x00010000 << i)) + parts++; + } + + colbits = (r4 & 0x0000f000) >> 12; + rowbitsa = ((r4 & 0x000f0000) >> 16) + 8; + rowbitsb = ((r4 & 0x00f00000) >> 20) + 8; + banks = ((r4 & 0x01000000) ? 8 : 4); + + rowsize = parts * banks * (1 << colbits) * 8; + predicted = rowsize << rowbitsa; + if (r0 & 0x00000004) + predicted += rowsize << rowbitsb; + + if (predicted != dev_priv->vram_size) { + NV_WARN(dev, "memory controller reports %dMiB VRAM\n", + (u32)(dev_priv->vram_size >> 20)); + NV_WARN(dev, "we calculated %dMiB VRAM\n", + (u32)(predicted >> 20)); + } + + rblock_size = rowsize; + if (rt & 1) + rblock_size *= 3; + + NV_DEBUG(dev, "rblock %d bytes\n", rblock_size); + return rblock_size; +} + +int +nv50_vram_init(struct drm_device *dev) +{ + struct drm_nouveau_private *dev_priv = dev->dev_private; + + dev_priv->vram_size = nv_rd32(dev, 0x10020c); + dev_priv->vram_size |= (dev_priv->vram_size & 0xff) << 32; + dev_priv->vram_size &= 0xffffffff00ULL; + + switch (dev_priv->chipset) { + case 0xaa: + case 0xac: + case 0xaf: + dev_priv->vram_sys_base = (u64)nv_rd32(dev, 0x100e10) << 12; + dev_priv->vram_rblock_size = 4096; + break; + default: + dev_priv->vram_rblock_size = nv50_vram_rblock(dev); + break; + } + + return 0; +} -- cgit v1.2.3 From a11c3198c9ba38d81e25b65e3908d531feba1372 Mon Sep 17 00:00:00 2001 From: Ben Skeggs Date: Fri, 27 Aug 2010 10:00:25 +1000 Subject: drm/nv50: import new vm code Signed-off-by: Ben Skeggs --- drivers/gpu/drm/nouveau/Makefile | 5 +- drivers/gpu/drm/nouveau/nouveau_drv.h | 9 +- drivers/gpu/drm/nouveau/nouveau_mem.c | 5 +- drivers/gpu/drm/nouveau/nouveau_object.c | 2 +- drivers/gpu/drm/nouveau/nouveau_vm.c | 421 +++++++++++++++++++++++++++++++ drivers/gpu/drm/nouveau/nouveau_vm.h | 107 ++++++++ drivers/gpu/drm/nouveau/nv50_fifo.c | 3 +- drivers/gpu/drm/nouveau/nv50_graph.c | 5 +- drivers/gpu/drm/nouveau/nv50_instmem.c | 12 +- drivers/gpu/drm/nouveau/nv50_vm.c | 164 ++++++++++++ drivers/gpu/drm/nouveau/nv84_crypt.c | 3 +- 11 files changed, 714 insertions(+), 22 deletions(-) create mode 100644 drivers/gpu/drm/nouveau/nouveau_vm.c create mode 100644 drivers/gpu/drm/nouveau/nouveau_vm.h create mode 100644 drivers/gpu/drm/nouveau/nv50_vm.c diff --git a/drivers/gpu/drm/nouveau/Makefile b/drivers/gpu/drm/nouveau/Makefile index 26fdd12561b6..b1d8941e04d8 100644 --- a/drivers/gpu/drm/nouveau/Makefile +++ b/drivers/gpu/drm/nouveau/Makefile @@ -9,8 +9,9 @@ nouveau-y := nouveau_drv.o nouveau_state.o nouveau_channel.o nouveau_mem.o \ nouveau_bo.o nouveau_fence.o nouveau_gem.o nouveau_ttm.o \ nouveau_hw.o nouveau_calc.o nouveau_bios.o nouveau_i2c.o \ nouveau_display.o nouveau_connector.o nouveau_fbcon.o \ - nouveau_dp.o nouveau_ramht.o nouveau_mm.o \ + nouveau_dp.o nouveau_ramht.o \ nouveau_pm.o nouveau_volt.o nouveau_perf.o nouveau_temp.o \ + nouveau_mm.o nouveau_vm.o \ nv04_timer.o \ nv04_mc.o nv40_mc.o nv50_mc.o \ nv04_fb.o nv10_fb.o nv30_fb.o nv40_fb.o nv50_fb.o nvc0_fb.o \ @@ -27,7 +28,7 @@ nouveau-y := nouveau_drv.o nouveau_state.o nouveau_channel.o nouveau_mem.o \ nv10_gpio.o nv50_gpio.o \ nv50_calc.o \ nv04_pm.o nv50_pm.o nva3_pm.o \ - nv50_vram.o + nv50_vram.o nv50_vm.o nouveau-$(CONFIG_DRM_NOUVEAU_DEBUG) += nouveau_debugfs.o nouveau-$(CONFIG_COMPAT) += nouveau_ioc32.o diff --git a/drivers/gpu/drm/nouveau/nouveau_drv.h b/drivers/gpu/drm/nouveau/nouveau_drv.h index 1305e2c94201..ce1dde4a65d6 100644 --- a/drivers/gpu/drm/nouveau/nouveau_drv.h +++ b/drivers/gpu/drm/nouveau/nouveau_drv.h @@ -153,6 +153,7 @@ enum nouveau_flags { #define NVOBJ_ENGINE_DISPLAY 0xcafe0001 #define NVOBJ_ENGINE_INT 0xdeadbeef +#define NVOBJ_FLAG_DONT_MAP (1 << 0) #define NVOBJ_FLAG_ZERO_ALLOC (1 << 1) #define NVOBJ_FLAG_ZERO_FREE (1 << 2) @@ -1213,7 +1214,6 @@ extern int nv50_instmem_map(struct nouveau_gpuobj *); extern void nv50_instmem_unmap(struct nouveau_gpuobj *); extern void nv50_instmem_flush(struct drm_device *); extern void nv84_instmem_flush(struct drm_device *); -extern void nv50_vm_flush(struct drm_device *, int engine); /* nvc0_instmem.c */ extern int nvc0_instmem_init(struct drm_device *); @@ -1564,10 +1564,11 @@ nv_match_device(struct drm_device *dev, unsigned device, } /* memory type/access flags, do not match hardware values */ -#define NV_MEM_ACCESS_RO 1 -#define NV_MEM_ACCESS_WO 2 +#define NV_MEM_ACCESS_RO 1 +#define NV_MEM_ACCESS_WO 2 #define NV_MEM_ACCESS_RW (NV_MEM_ACCESS_RO | NV_MEM_ACCESS_WO) -#define NV_MEM_ACCESS_VM 4 +#define NV_MEM_ACCESS_SYS 4 +#define NV_MEM_ACCESS_VM 8 #define NV_MEM_TARGET_VRAM 0 #define NV_MEM_TARGET_PCI 1 diff --git a/drivers/gpu/drm/nouveau/nouveau_mem.c b/drivers/gpu/drm/nouveau/nouveau_mem.c index dbeb9e5f6b22..2d02401e8227 100644 --- a/drivers/gpu/drm/nouveau/nouveau_mem.c +++ b/drivers/gpu/drm/nouveau/nouveau_mem.c @@ -37,6 +37,7 @@ #include "nouveau_drv.h" #include "nouveau_pm.h" #include "nouveau_mm.h" +#include "nouveau_vm.h" /* * NV10-NV40 tiling helpers @@ -201,7 +202,7 @@ nv50_mem_vm_bind_linear(struct drm_device *dev, uint64_t virt, uint32_t size, dev_priv->engine.instmem.flush(dev); dev_priv->engine.fifo.tlb_flush(dev); dev_priv->engine.graph.tlb_flush(dev); - nv50_vm_flush(dev, 6); + nv50_vm_flush_engine(dev, 6); return 0; } @@ -234,7 +235,7 @@ nv50_mem_vm_unbind(struct drm_device *dev, uint64_t virt, uint32_t size) dev_priv->engine.instmem.flush(dev); dev_priv->engine.fifo.tlb_flush(dev); dev_priv->engine.graph.tlb_flush(dev); - nv50_vm_flush(dev, 6); + nv50_vm_flush_engine(dev, 6); } /* diff --git a/drivers/gpu/drm/nouveau/nouveau_object.c b/drivers/gpu/drm/nouveau/nouveau_object.c index 5cc3f7e59fa1..dd1859f7d8b0 100644 --- a/drivers/gpu/drm/nouveau/nouveau_object.c +++ b/drivers/gpu/drm/nouveau/nouveau_object.c @@ -213,7 +213,7 @@ nouveau_gpuobj_new(struct drm_device *dev, struct nouveau_channel *chan, } ret = -ENOSYS; - if (dev_priv->ramin_available) + if (!(flags & NVOBJ_FLAG_DONT_MAP)) ret = instmem->map(gpuobj); if (ret) gpuobj->pinst = ~0; diff --git a/drivers/gpu/drm/nouveau/nouveau_vm.c b/drivers/gpu/drm/nouveau/nouveau_vm.c new file mode 100644 index 000000000000..07ab1749cf7d --- /dev/null +++ b/drivers/gpu/drm/nouveau/nouveau_vm.c @@ -0,0 +1,421 @@ +/* + * Copyright 2010 Red Hat Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: Ben Skeggs + */ + +#include "drmP.h" +#include "nouveau_drv.h" +#include "nouveau_mm.h" +#include "nouveau_vm.h" + +void +nouveau_vm_map_at(struct nouveau_vma *vma, u64 delta, struct nouveau_vram *vram) +{ + struct nouveau_vm *vm = vma->vm; + struct nouveau_mm_node *r; + u32 offset = vma->node->offset + (delta >> 12); + u32 bits = vma->node->type - 12; + u32 pde = (offset >> vm->pgt_bits) - vm->fpde; + u32 pte = (offset & ((1 << vm->pgt_bits) - 1)) >> bits; + u32 max = 1 << (vm->pgt_bits - bits); + u32 end, len; + + list_for_each_entry(r, &vram->regions, rl_entry) { + u64 phys = (u64)r->offset << 12; + u32 num = r->length >> bits; + + while (num) { + struct nouveau_gpuobj *pgt = vm->pgt[pde].obj; + + end = (pte + num); + if (unlikely(end >= max)) + end = max; + len = end - pte; + + vm->map(vma, pgt, vram, pte, len, phys); + + num -= len; + pte += len; + if (unlikely(end >= max)) { + pde++; + pte = 0; + } + } + } + + vm->flush(vm); +} + +void +nouveau_vm_map(struct nouveau_vma *vma, struct nouveau_vram *vram) +{ + nouveau_vm_map_at(vma, 0, vram); +} + +void +nouveau_vm_map_sg(struct nouveau_vma *vma, u64 delta, u64 length, + dma_addr_t *list) +{ + struct nouveau_vm *vm = vma->vm; + u32 offset = vma->node->offset + (delta >> 12); + u32 bits = vma->node->type - 12; + u32 num = length >> vma->node->type; + u32 pde = (offset >> vm->pgt_bits) - vm->fpde; + u32 pte = (offset & ((1 << vm->pgt_bits) - 1)) >> bits; + u32 max = 1 << (vm->pgt_bits - bits); + u32 end, len; + + while (num) { + struct nouveau_gpuobj *pgt = vm->pgt[pde].obj; + + end = (pte + num); + if (unlikely(end >= max)) + end = max; + len = end - pte; + + vm->map_sg(vma, pgt, pte, list, len); + + num -= len; + pte += len; + list += len; + if (unlikely(end >= max)) { + pde++; + pte = 0; + } + } + + vm->flush(vm); +} + +void +nouveau_vm_unmap_at(struct nouveau_vma *vma, u64 delta, u64 length) +{ + struct nouveau_vm *vm = vma->vm; + u32 offset = vma->node->offset + (delta >> 12); + u32 bits = vma->node->type - 12; + u32 num = length >> vma->node->type; + u32 pde = (offset >> vm->pgt_bits) - vm->fpde; + u32 pte = (offset & ((1 << vm->pgt_bits) - 1)) >> bits; + u32 max = 1 << (vm->pgt_bits - bits); + u32 end, len; + + while (num) { + struct nouveau_gpuobj *pgt = vm->pgt[pde].obj; + + end = (pte + num); + if (unlikely(end >= max)) + end = max; + len = end - pte; + + vm->unmap(pgt, pte, len); + + num -= len; + pte += len; + if (unlikely(end >= max)) { + pde++; + pte = 0; + } + } + + vm->flush(vm); +} + +void +nouveau_vm_unmap(struct nouveau_vma *vma) +{ + nouveau_vm_unmap_at(vma, 0, (u64)vma->node->length << 12); +} + +static void +nouveau_vm_unmap_pgt(struct nouveau_vm *vm, u32 fpde, u32 lpde) +{ + struct nouveau_vm_pgd *vpgd; + struct nouveau_vm_pgt *vpgt; + struct nouveau_gpuobj *pgt; + u32 pde; + + for (pde = fpde; pde <= lpde; pde++) { + vpgt = &vm->pgt[pde - vm->fpde]; + if (--vpgt->refcount) + continue; + + list_for_each_entry(vpgd, &vm->pgd_list, head) { + vm->unmap_pgt(vpgd->obj, pde); + } + + pgt = vpgt->obj; + vpgt->obj = NULL; + + mutex_unlock(&vm->mm->mutex); + nouveau_gpuobj_ref(NULL, &pgt); + mutex_lock(&vm->mm->mutex); + } +} + +static int +nouveau_vm_map_pgt(struct nouveau_vm *vm, u32 pde, u32 type) +{ + struct nouveau_vm_pgt *vpgt = &vm->pgt[pde - vm->fpde]; + struct nouveau_vm_pgd *vpgd; + struct nouveau_gpuobj *pgt; + u32 pgt_size; + int ret; + + pgt_size = (1 << (vm->pgt_bits + 12)) >> type; + pgt_size *= 8; + + mutex_unlock(&vm->mm->mutex); + ret = nouveau_gpuobj_new(vm->dev, NULL, pgt_size, 0x1000, + NVOBJ_FLAG_ZERO_ALLOC, &pgt); + mutex_lock(&vm->mm->mutex); + if (unlikely(ret)) + return ret; + + /* someone beat us to filling the PDE while we didn't have the lock */ + if (unlikely(vpgt->refcount++)) { + mutex_unlock(&vm->mm->mutex); + nouveau_gpuobj_ref(NULL, &pgt); + mutex_lock(&vm->mm->mutex); + return 0; + } + + list_for_each_entry(vpgd, &vm->pgd_list, head) { + vm->map_pgt(vpgd->obj, type, pde, pgt); + } + + vpgt->page_shift = type; + vpgt->obj = pgt; + return 0; +} + +int +nouveau_vm_get(struct nouveau_vm *vm, u64 size, u32 page_shift, + u32 access, struct nouveau_vma *vma) +{ + u32 align = (1 << page_shift) >> 12; + u32 msize = size >> 12; + u32 fpde, lpde, pde; + int ret; + + mutex_lock(&vm->mm->mutex); + ret = nouveau_mm_get(vm->mm, page_shift, msize, 0, align, &vma->node); + if (unlikely(ret != 0)) { + mutex_unlock(&vm->mm->mutex); + return ret; + } + + fpde = (vma->node->offset >> vm->pgt_bits); + lpde = (vma->node->offset + vma->node->length - 1) >> vm->pgt_bits; + for (pde = fpde; pde <= lpde; pde++) { + struct nouveau_vm_pgt *vpgt = &vm->pgt[pde - vm->fpde]; + + if (likely(vpgt->refcount)) { + vpgt->refcount++; + continue; + } + + ret = nouveau_vm_map_pgt(vm, pde, vma->node->type); + if (ret) { + if (pde != fpde) + nouveau_vm_unmap_pgt(vm, fpde, pde - 1); + nouveau_mm_put(vm->mm, vma->node); + mutex_unlock(&vm->mm->mutex); + vma->node = NULL; + return ret; + } + } + mutex_unlock(&vm->mm->mutex); + + vma->vm = vm; + vma->offset = (u64)vma->node->offset << 12; + vma->access = access; + return 0; +} + +void +nouveau_vm_put(struct nouveau_vma *vma) +{ + struct nouveau_vm *vm = vma->vm; + u32 fpde, lpde; + + if (unlikely(vma->node == NULL)) + return; + fpde = (vma->node->offset >> vm->pgt_bits); + lpde = (vma->node->offset + vma->node->length - 1) >> vm->pgt_bits; + + mutex_lock(&vm->mm->mutex); + nouveau_mm_put(vm->mm, vma->node); + vma->node = NULL; + nouveau_vm_unmap_pgt(vm, fpde, lpde); + mutex_unlock(&vm->mm->mutex); +} + +int +nouveau_vm_new(struct drm_device *dev, u64 offset, u64 length, u64 mm_offset, + u8 pgt_bits, u8 spg_shift, u8 lpg_shift, + struct nouveau_vm **pvm) +{ + struct drm_nouveau_private *dev_priv = dev->dev_private; + struct nouveau_vm *vm; + u64 mm_length = (offset + length) - mm_offset; + u32 block; + int ret; + + vm = kzalloc(sizeof(*vm), GFP_KERNEL); + if (!vm) + return -ENOMEM; + + if (dev_priv->card_type == NV_50) { + vm->map_pgt = nv50_vm_map_pgt; + vm->unmap_pgt = nv50_vm_unmap_pgt; + vm->map = nv50_vm_map; + vm->map_sg = nv50_vm_map_sg; + vm->unmap = nv50_vm_unmap; + vm->flush = nv50_vm_flush; + } else { + kfree(vm); + return -ENOSYS; + } + + vm->fpde = offset >> pgt_bits; + vm->lpde = (offset + length - 1) >> pgt_bits; + vm->pgt = kcalloc(vm->lpde - vm->fpde + 1, sizeof(*vm->pgt), GFP_KERNEL); + if (!vm->pgt) { + kfree(vm); + return -ENOMEM; + } + + INIT_LIST_HEAD(&vm->pgd_list); + vm->dev = dev; + vm->refcount = 1; + vm->pgt_bits = pgt_bits - 12; + vm->spg_shift = spg_shift; + vm->lpg_shift = lpg_shift; + + block = (1 << pgt_bits); + if (length < block) + block = length; + + ret = nouveau_mm_init(&vm->mm, mm_offset >> 12, mm_length >> 12, + block >> 12); + if (ret) { + kfree(vm); + return ret; + } + + *pvm = vm; + return 0; +} + +static int +nouveau_vm_link(struct nouveau_vm *vm, struct nouveau_gpuobj *pgd) +{ + struct nouveau_vm_pgd *vpgd; + int i; + + if (!pgd) + return 0; + + vpgd = kzalloc(sizeof(*vpgd), GFP_KERNEL); + if (!vpgd) + return -ENOMEM; + + nouveau_gpuobj_ref(pgd, &vpgd->obj); + + mutex_lock(&vm->mm->mutex); + for (i = vm->fpde; i <= vm->lpde; i++) { + struct nouveau_vm_pgt *vpgt = &vm->pgt[i - vm->fpde]; + + if (!vpgt->obj) { + vm->unmap_pgt(pgd, i); + continue; + } + + vm->map_pgt(pgd, vpgt->page_shift, i, vpgt->obj); + } + list_add(&vpgd->head, &vm->pgd_list); + mutex_unlock(&vm->mm->mutex); + return 0; +} + +static void +nouveau_vm_unlink(struct nouveau_vm *vm, struct nouveau_gpuobj *pgd) +{ + struct nouveau_vm_pgd *vpgd, *tmp; + + if (!pgd) + return; + + mutex_lock(&vm->mm->mutex); + list_for_each_entry_safe(vpgd, tmp, &vm->pgd_list, head) { + if (vpgd->obj != pgd) + continue; + + list_del(&vpgd->head); + nouveau_gpuobj_ref(NULL, &vpgd->obj); + kfree(vpgd); + } + mutex_unlock(&vm->mm->mutex); +} + +static void +nouveau_vm_del(struct nouveau_vm *vm) +{ + struct nouveau_vm_pgd *vpgd, *tmp; + + list_for_each_entry_safe(vpgd, tmp, &vm->pgd_list, head) { + nouveau_vm_unlink(vm, vpgd->obj); + } + WARN_ON(nouveau_mm_fini(&vm->mm) != 0); + + kfree(vm->pgt); + kfree(vm); +} + +int +nouveau_vm_ref(struct nouveau_vm *ref, struct nouveau_vm **ptr, + struct nouveau_gpuobj *pgd) +{ + struct nouveau_vm *vm; + int ret; + + vm = ref; + if (vm) { + ret = nouveau_vm_link(vm, pgd); + if (ret) + return ret; + + vm->refcount++; + } + + vm = *ptr; + *ptr = ref; + + if (vm) { + nouveau_vm_unlink(vm, pgd); + + if (--vm->refcount == 0) + nouveau_vm_del(vm); + } + + return 0; +} diff --git a/drivers/gpu/drm/nouveau/nouveau_vm.h b/drivers/gpu/drm/nouveau/nouveau_vm.h new file mode 100644 index 000000000000..b6755cfa7b71 --- /dev/null +++ b/drivers/gpu/drm/nouveau/nouveau_vm.h @@ -0,0 +1,107 @@ +/* + * Copyright 2010 Red Hat Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: Ben Skeggs + */ + +#ifndef __NOUVEAU_VM_H__ +#define __NOUVEAU_VM_H__ + +#include "drmP.h" + +#include "nouveau_drv.h" +#include "nouveau_mm.h" + +struct nouveau_vm_pgt { + struct nouveau_gpuobj *obj; + u32 page_shift; + u32 refcount; +}; + +struct nouveau_vm_pgd { + struct list_head head; + struct nouveau_gpuobj *obj; +}; + +struct nouveau_vma { + struct nouveau_vm *vm; + struct nouveau_mm_node *node; + u64 offset; + u32 access; +}; + +struct nouveau_vm { + struct drm_device *dev; + struct nouveau_mm *mm; + int refcount; + + struct list_head pgd_list; + atomic_t pgraph_refs; + atomic_t pcrypt_refs; + + struct nouveau_vm_pgt *pgt; + u32 fpde; + u32 lpde; + + u32 pgt_bits; + u8 spg_shift; + u8 lpg_shift; + + void (*map_pgt)(struct nouveau_gpuobj *pgd, u32 type, u32 pde, + struct nouveau_gpuobj *pgt); + void (*unmap_pgt)(struct nouveau_gpuobj *pgd, u32 pde); + void (*map)(struct nouveau_vma *, struct nouveau_gpuobj *, + struct nouveau_vram *, u32 pte, u32 cnt, u64 phys); + void (*map_sg)(struct nouveau_vma *, struct nouveau_gpuobj *, + u32 pte, dma_addr_t *, u32 cnt); + void (*unmap)(struct nouveau_gpuobj *pgt, u32 pte, u32 cnt); + void (*flush)(struct nouveau_vm *); +}; + +/* nouveau_vm.c */ +int nouveau_vm_new(struct drm_device *, u64 offset, u64 length, u64 mm_offset, + u8 pgt_bits, u8 spg_shift, u8 lpg_shift, + struct nouveau_vm **); +int nouveau_vm_ref(struct nouveau_vm *, struct nouveau_vm **, + struct nouveau_gpuobj *pgd); +int nouveau_vm_get(struct nouveau_vm *, u64 size, u32 page_shift, + u32 access, struct nouveau_vma *); +void nouveau_vm_put(struct nouveau_vma *); +void nouveau_vm_map(struct nouveau_vma *, struct nouveau_vram *); +void nouveau_vm_map_at(struct nouveau_vma *, u64 offset, struct nouveau_vram *); +void nouveau_vm_unmap(struct nouveau_vma *); +void nouveau_vm_unmap_at(struct nouveau_vma *, u64 offset, u64 length); +void nouveau_vm_map_sg(struct nouveau_vma *, u64 offset, u64 length, + dma_addr_t *); + +/* nv50_vm.c */ +void nv50_vm_map_pgt(struct nouveau_gpuobj *pgd, u32 type, u32 pde, + struct nouveau_gpuobj *pgt); +void nv50_vm_unmap_pgt(struct nouveau_gpuobj *pgd, u32 pde); +void nv50_vm_map(struct nouveau_vma *, struct nouveau_gpuobj *, + struct nouveau_vram *, u32 pte, u32 cnt, u64 phys); +void nv50_vm_map_sg(struct nouveau_vma *, struct nouveau_gpuobj *, + u32 pte, dma_addr_t *, u32 cnt); +void nv50_vm_unmap(struct nouveau_gpuobj *, u32 pte, u32 cnt); +void nv50_vm_flush(struct nouveau_vm *); +void nv50_vm_flush_engine(struct drm_device *, int engine); + +#endif diff --git a/drivers/gpu/drm/nouveau/nv50_fifo.c b/drivers/gpu/drm/nouveau/nv50_fifo.c index 7add3dfde3df..8dd04c5dac67 100644 --- a/drivers/gpu/drm/nouveau/nv50_fifo.c +++ b/drivers/gpu/drm/nouveau/nv50_fifo.c @@ -28,6 +28,7 @@ #include "drm.h" #include "nouveau_drv.h" #include "nouveau_ramht.h" +#include "nouveau_vm.h" static void nv50_fifo_playlist_update(struct drm_device *dev) @@ -498,5 +499,5 @@ nv50_fifo_unload_context(struct drm_device *dev) void nv50_fifo_tlb_flush(struct drm_device *dev) { - nv50_vm_flush(dev, 5); + nv50_vm_flush_engine(dev, 5); } diff --git a/drivers/gpu/drm/nouveau/nv50_graph.c b/drivers/gpu/drm/nouveau/nv50_graph.c index b3900788c66d..f5fd1b296d27 100644 --- a/drivers/gpu/drm/nouveau/nv50_graph.c +++ b/drivers/gpu/drm/nouveau/nv50_graph.c @@ -30,6 +30,7 @@ #include "nouveau_ramht.h" #include "nouveau_grctx.h" #include "nouveau_dma.h" +#include "nouveau_vm.h" #include "nv50_evo.h" static int nv50_graph_register(struct drm_device *); @@ -468,7 +469,7 @@ nv50_graph_register(struct drm_device *dev) void nv50_graph_tlb_flush(struct drm_device *dev) { - nv50_vm_flush(dev, 0); + nv50_vm_flush_engine(dev, 0); } void @@ -511,7 +512,7 @@ nv86_graph_tlb_flush(struct drm_device *dev) nv_rd32(dev, 0x400384), nv_rd32(dev, 0x400388)); } - nv50_vm_flush(dev, 0); + nv50_vm_flush_engine(dev, 0); nv_mask(dev, 0x400500, 0x00000001, 0x00000001); spin_unlock_irqrestore(&dev_priv->context_switch_lock, flags); diff --git a/drivers/gpu/drm/nouveau/nv50_instmem.c b/drivers/gpu/drm/nouveau/nv50_instmem.c index 1e7d50397e4a..4eb2f0835e27 100644 --- a/drivers/gpu/drm/nouveau/nv50_instmem.c +++ b/drivers/gpu/drm/nouveau/nv50_instmem.c @@ -27,7 +27,9 @@ #include "drmP.h" #include "drm.h" + #include "nouveau_drv.h" +#include "nouveau_vm.h" struct nv50_instmem_priv { uint32_t save1700[5]; /* 0x1700->0x1710 */ @@ -404,7 +406,7 @@ nv50_instmem_map(struct nouveau_gpuobj *gpuobj) } dev_priv->engine.instmem.flush(dev); - nv50_vm_flush(dev, 6); + nv50_vm_flush_engine(dev, 6); node->ramin = ramin; gpuobj->pinst = ramin->start; @@ -454,11 +456,3 @@ nv84_instmem_flush(struct drm_device *dev) NV_ERROR(dev, "PRAMIN flush timeout\n"); } -void -nv50_vm_flush(struct drm_device *dev, int engine) -{ - nv_wr32(dev, 0x100c80, (engine << 16) | 1); - if (!nv_wait(dev, 0x100c80, 0x00000001, 0x00000000)) - NV_ERROR(dev, "vm flush timeout: engine %d\n", engine); -} - diff --git a/drivers/gpu/drm/nouveau/nv50_vm.c b/drivers/gpu/drm/nouveau/nv50_vm.c new file mode 100644 index 000000000000..ab6c3d0ce32e --- /dev/null +++ b/drivers/gpu/drm/nouveau/nv50_vm.c @@ -0,0 +1,164 @@ +/* + * Copyright 2010 Red Hat Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: Ben Skeggs + */ + +#include "drmP.h" + +#include "nouveau_drv.h" +#include "nouveau_vm.h" + +void +nv50_vm_map_pgt(struct nouveau_gpuobj *pgd, u32 type, u32 pde, + struct nouveau_gpuobj *pgt) +{ + struct drm_nouveau_private *dev_priv = pgd->dev->dev_private; + u32 coverage = (pgt->size >> 3) << type; + u64 phys; + + phys = pgt->vinst; + phys |= 0x01; /* present */ + phys |= (type == 12) ? 0x02 : 0x00; /* 4KiB pages */ + if (dev_priv->vram_sys_base) { + phys += dev_priv->vram_sys_base; + phys |= 0x30; + } + + if (coverage <= 32 * 1024 * 1024) + phys |= 0x60; + else if (coverage <= 64 * 1024 * 1024) + phys |= 0x40; + else if (coverage < 128 * 1024 * 1024) + phys |= 0x20; + + nv_wo32(pgd, (pde * 8) + 0, lower_32_bits(phys)); + nv_wo32(pgd, (pde * 8) + 4, upper_32_bits(phys)); +} + +void +nv50_vm_unmap_pgt(struct nouveau_gpuobj *pgd, u32 pde) +{ + nv_wo32(pgd, (pde * 8) + 0, 0x00000000); + nv_wo32(pgd, (pde * 8) + 4, 0xdeadcafe); +} + +static inline u64 +nv50_vm_addr(struct nouveau_vma *vma, struct nouveau_gpuobj *pgt, + u64 phys, u32 memtype, u32 target) +{ + struct drm_nouveau_private *dev_priv = pgt->dev->dev_private; + + phys |= 1; /* present */ + phys |= (u64)memtype << 40; + + /* IGPs don't have real VRAM, re-target to stolen system memory */ + if (target == 0 && dev_priv->vram_sys_base) { + phys += dev_priv->vram_sys_base; + target = 3; + } + + phys |= target << 4; + + if (vma->access & NV_MEM_ACCESS_SYS) + phys |= (1 << 6); + + if (!(vma->access & NV_MEM_ACCESS_WO)) + phys |= (1 << 3); + + return phys; +} + +void +nv50_vm_map(struct nouveau_vma *vma, struct nouveau_gpuobj *pgt, + struct nouveau_vram *mem, u32 pte, u32 cnt, u64 phys) +{ + u32 block, i; + + phys = nv50_vm_addr(vma, pgt, phys, mem->memtype, 0); + pte <<= 3; + cnt <<= 3; + + while (cnt) { + u32 offset_h = upper_32_bits(phys); + u32 offset_l = lower_32_bits(phys); + + for (i = 7; i >= 0; i--) { + block = 1 << (i + 3); + if (cnt >= block && !(pte & (block - 1))) + break; + } + offset_l |= (i << 7); + + phys += block << (vma->node->type - 3); + cnt -= block; + + while (block) { + nv_wo32(pgt, pte + 0, offset_l); + nv_wo32(pgt, pte + 4, offset_h); + pte += 8; + block -= 8; + } + } +} + +void +nv50_vm_map_sg(struct nouveau_vma *vma, struct nouveau_gpuobj *pgt, + u32 pte, dma_addr_t *list, u32 cnt) +{ + pte <<= 3; + while (cnt--) { + u64 phys = nv50_vm_addr(vma, pgt, (u64)*list++, 0, 2); + nv_wo32(pgt, pte + 0, lower_32_bits(phys)); + nv_wo32(pgt, pte + 4, upper_32_bits(phys)); + pte += 8; + } +} + +void +nv50_vm_unmap(struct nouveau_gpuobj *pgt, u32 pte, u32 cnt) +{ + pte <<= 3; + while (cnt--) { + nv_wo32(pgt, pte + 0, 0x00000000); + nv_wo32(pgt, pte + 4, 0x00000000); + pte += 8; + } +} + +void +nv50_vm_flush(struct nouveau_vm *vm) +{ + struct drm_nouveau_private *dev_priv = vm->dev->dev_private; + struct nouveau_instmem_engine *pinstmem = &dev_priv->engine.instmem; + + pinstmem->flush(vm->dev); + + nv50_vm_flush_engine(vm->dev, 6); +} + +void +nv50_vm_flush_engine(struct drm_device *dev, int engine) +{ + nv_wr32(dev, 0x100c80, (engine << 16) | 1); + if (!nv_wait(dev, 0x100c80, 0x00000001, 0x00000000)) + NV_ERROR(dev, "vm flush timeout: engine %d\n", engine); +} diff --git a/drivers/gpu/drm/nouveau/nv84_crypt.c b/drivers/gpu/drm/nouveau/nv84_crypt.c index 780bb1d66f95..a333e5905346 100644 --- a/drivers/gpu/drm/nouveau/nv84_crypt.c +++ b/drivers/gpu/drm/nouveau/nv84_crypt.c @@ -25,6 +25,7 @@ #include "drmP.h" #include "nouveau_drv.h" #include "nouveau_util.h" +#include "nouveau_vm.h" static void nv84_crypt_isr(struct drm_device *); @@ -84,7 +85,7 @@ nv84_crypt_destroy_context(struct nouveau_channel *chan) void nv84_crypt_tlb_flush(struct drm_device *dev) { - nv50_vm_flush(dev, 0x0a); + nv50_vm_flush_engine(dev, 0x0a); } int -- cgit v1.2.3 From f869ef882382a4b6cb42d259e399aeec3781d4bb Mon Sep 17 00:00:00 2001 From: Ben Skeggs Date: Mon, 15 Nov 2010 11:53:16 +1000 Subject: drm/nv50: implement BAR1/BAR3 management on top of new VM code Signed-off-by: Ben Skeggs --- drivers/gpu/drm/nouveau/nouveau_bo.c | 46 +++++- drivers/gpu/drm/nouveau/nouveau_drv.h | 10 ++ drivers/gpu/drm/nouveau/nv50_instmem.c | 258 ++++++++++++++------------------- drivers/gpu/drm/nouveau/nv50_vm.c | 1 - 4 files changed, 162 insertions(+), 153 deletions(-) diff --git a/drivers/gpu/drm/nouveau/nouveau_bo.c b/drivers/gpu/drm/nouveau/nouveau_bo.c index 5a71ca4346c8..4d142031d542 100644 --- a/drivers/gpu/drm/nouveau/nouveau_bo.c +++ b/drivers/gpu/drm/nouveau/nouveau_bo.c @@ -32,6 +32,8 @@ #include "nouveau_drm.h" #include "nouveau_drv.h" #include "nouveau_dma.h" +#include "nouveau_mm.h" +#include "nouveau_vm.h" #include #include @@ -386,10 +388,13 @@ nouveau_bo_init_mem_type(struct ttm_bo_device *bdev, uint32_t type, man->default_caching = TTM_PL_FLAG_CACHED; break; case TTM_PL_VRAM: - if (dev_priv->card_type == NV_50) + if (dev_priv->card_type == NV_50) { man->func = &nouveau_vram_manager; - else + man->io_reserve_fastpath = false; + man->use_io_reserve_lru = true; + } else { man->func = &ttm_bo_manager_func; + } man->flags = TTM_MEMTYPE_FLAG_FIXED | TTM_MEMTYPE_FLAG_MAPPABLE; man->available_caching = TTM_PL_FLAG_UNCACHED | @@ -858,6 +863,7 @@ nouveau_ttm_io_mem_reserve(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem) struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type]; struct drm_nouveau_private *dev_priv = nouveau_bdev(bdev); struct drm_device *dev = dev_priv->dev; + int ret; mem->bus.addr = NULL; mem->bus.offset = 0; @@ -880,9 +886,32 @@ nouveau_ttm_io_mem_reserve(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem) #endif break; case TTM_PL_VRAM: - mem->bus.offset = mem->start << PAGE_SHIFT; + { + struct nouveau_vram *vram = mem->mm_node; + + if (!dev_priv->bar1_vm) { + mem->bus.offset = mem->start << PAGE_SHIFT; + mem->bus.base = pci_resource_start(dev->pdev, 1); + mem->bus.is_iomem = true; + break; + } + + ret = nouveau_vm_get(dev_priv->bar1_vm, mem->bus.size, 12, + NV_MEM_ACCESS_RW, &vram->bar_vma); + if (ret) + return ret; + + nouveau_vm_map(&vram->bar_vma, vram); + if (ret) { + nouveau_vm_put(&vram->bar_vma); + return ret; + } + + mem->bus.offset = vram->bar_vma.offset; + mem->bus.offset -= 0x0020000000ULL; mem->bus.base = pci_resource_start(dev->pdev, 1); mem->bus.is_iomem = true; + } break; default: return -EINVAL; @@ -893,6 +922,17 @@ nouveau_ttm_io_mem_reserve(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem) static void nouveau_ttm_io_mem_free(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem) { + struct drm_nouveau_private *dev_priv = nouveau_bdev(bdev); + struct nouveau_vram *vram = mem->mm_node; + + if (!dev_priv->bar1_vm || mem->mem_type != TTM_PL_VRAM) + return; + + if (!vram->bar_vma.node) + return; + + nouveau_vm_unmap(&vram->bar_vma); + nouveau_vm_put(&vram->bar_vma); } static int diff --git a/drivers/gpu/drm/nouveau/nouveau_drv.h b/drivers/gpu/drm/nouveau/nouveau_drv.h index ce1dde4a65d6..452a8652a498 100644 --- a/drivers/gpu/drm/nouveau/nouveau_drv.h +++ b/drivers/gpu/drm/nouveau/nouveau_drv.h @@ -55,7 +55,10 @@ struct nouveau_fpriv { #include "nouveau_reg.h" #include "nouveau_bios.h" #include "nouveau_util.h" + struct nouveau_grctx; +struct nouveau_vram; +#include "nouveau_vm.h" #define MAX_NUM_DCB_ENTRIES 16 @@ -69,6 +72,8 @@ struct nouveau_grctx; struct nouveau_vram { struct drm_device *dev; + struct nouveau_vma bar_vma; + struct list_head regions; u32 memtype; u64 offset; @@ -244,6 +249,7 @@ struct nouveau_channel { void *pgraph_ctx; /* NV50 VM */ + struct nouveau_vm *vm; struct nouveau_gpuobj *vm_pd; struct nouveau_gpuobj *vm_gart_pt; struct nouveau_gpuobj *vm_vram_pt[NV50_VM_VRAM_NR]; @@ -701,6 +707,10 @@ struct drm_nouveau_private { uint64_t fb_aper_free; int fb_mtrr; + /* BAR control (NV50-) */ + struct nouveau_vm *bar1_vm; + struct nouveau_vm *bar3_vm; + /* G8x/G9x virtual address space */ uint64_t vm_gart_base; uint64_t vm_gart_size; diff --git a/drivers/gpu/drm/nouveau/nv50_instmem.c b/drivers/gpu/drm/nouveau/nv50_instmem.c index 4eb2f0835e27..4ba8f74e77b1 100644 --- a/drivers/gpu/drm/nouveau/nv50_instmem.c +++ b/drivers/gpu/drm/nouveau/nv50_instmem.c @@ -31,12 +31,16 @@ #include "nouveau_drv.h" #include "nouveau_vm.h" +#define BAR1_VM_BASE 0x0020000000ULL +#define BAR1_VM_SIZE pci_resource_len(dev->pdev, 1) +#define BAR3_VM_BASE 0x0000000000ULL +#define BAR3_VM_SIZE pci_resource_len(dev->pdev, 3) + struct nv50_instmem_priv { uint32_t save1700[5]; /* 0x1700->0x1710 */ - struct nouveau_gpuobj *pramin_pt; - struct nouveau_gpuobj *pramin_bar; - struct nouveau_gpuobj *fb_bar; + struct nouveau_gpuobj *bar1_dmaobj; + struct nouveau_gpuobj *bar3_dmaobj; }; static void @@ -50,6 +54,7 @@ nv50_channel_del(struct nouveau_channel **pchan) return; nouveau_gpuobj_ref(NULL, &chan->ramfc); + nouveau_vm_ref(NULL, &chan->vm, chan->vm_pd); nouveau_gpuobj_ref(NULL, &chan->vm_pd); if (chan->ramin_heap.free_stack.next) drm_mm_takedown(&chan->ramin_heap); @@ -58,14 +63,14 @@ nv50_channel_del(struct nouveau_channel **pchan) } static int -nv50_channel_new(struct drm_device *dev, u32 size, +nv50_channel_new(struct drm_device *dev, u32 size, struct nouveau_vm *vm, struct nouveau_channel **pchan) { struct drm_nouveau_private *dev_priv = dev->dev_private; u32 pgd = (dev_priv->chipset == 0x50) ? 0x1400 : 0x0200; u32 fc = (dev_priv->chipset == 0x50) ? 0x0000 : 0x4200; struct nouveau_channel *chan; - int ret; + int ret, i; chan = kzalloc(sizeof(*chan), GFP_KERNEL); if (!chan) @@ -94,6 +99,17 @@ nv50_channel_new(struct drm_device *dev, u32 size, return ret; } + for (i = 0; i < 0x4000; i += 8) { + nv_wo32(chan->vm_pd, i + 0, 0x00000000); + nv_wo32(chan->vm_pd, i + 4, 0xdeadcafe); + } + + ret = nouveau_vm_ref(vm, &chan->vm, chan->vm_pd); + if (ret) { + nv50_channel_del(&chan); + return ret; + } + ret = nouveau_gpuobj_new_fake(dev, chan->ramin->pinst == ~0 ? ~0 : chan->ramin->pinst + fc, chan->ramin->vinst + fc, 0x100, @@ -113,6 +129,7 @@ nv50_instmem_init(struct drm_device *dev) struct drm_nouveau_private *dev_priv = dev->dev_private; struct nv50_instmem_priv *priv; struct nouveau_channel *chan; + struct nouveau_vm *vm; int ret, i; u32 tmp; @@ -129,53 +146,75 @@ nv50_instmem_init(struct drm_device *dev) ret = drm_mm_init(&dev_priv->ramin_heap, 0, dev_priv->ramin_size); if (ret) { NV_ERROR(dev, "Failed to init RAMIN heap\n"); - return -ENOMEM; + goto error; } - /* we need a channel to plug into the hw to control the BARs */ - ret = nv50_channel_new(dev, 128*1024, &dev_priv->channels.ptr[0]); + /* BAR3 */ + ret = nouveau_vm_new(dev, BAR3_VM_BASE, BAR3_VM_SIZE, BAR3_VM_BASE, + 29, 12, 16, &dev_priv->bar3_vm); if (ret) - return ret; - chan = dev_priv->channels.ptr[127] = dev_priv->channels.ptr[0]; + goto error; - /* allocate page table for PRAMIN BAR */ - ret = nouveau_gpuobj_new(dev, chan, (dev_priv->ramin_size >> 12) * 8, - 0x1000, NVOBJ_FLAG_ZERO_ALLOC, - &priv->pramin_pt); + ret = nouveau_gpuobj_new(dev, NULL, (BAR3_VM_SIZE >> 12) * 8, + 0x1000, NVOBJ_FLAG_DONT_MAP | + NVOBJ_FLAG_ZERO_ALLOC, + &dev_priv->bar3_vm->pgt[0].obj); if (ret) - return ret; + goto error; + dev_priv->bar3_vm->pgt[0].page_shift = 12; + dev_priv->bar3_vm->pgt[0].refcount = 1; - nv_wo32(chan->vm_pd, 0x0000, priv->pramin_pt->vinst | 0x63); - nv_wo32(chan->vm_pd, 0x0004, 0); + nv50_instmem_map(dev_priv->bar3_vm->pgt[0].obj); - /* DMA object for PRAMIN BAR */ - ret = nouveau_gpuobj_new(dev, chan, 6*4, 16, 0, &priv->pramin_bar); + ret = nv50_channel_new(dev, 128 * 1024, dev_priv->bar3_vm, &chan); if (ret) - return ret; - nv_wo32(priv->pramin_bar, 0x00, 0x7fc00000); - nv_wo32(priv->pramin_bar, 0x04, dev_priv->ramin_size - 1); - nv_wo32(priv->pramin_bar, 0x08, 0x00000000); - nv_wo32(priv->pramin_bar, 0x0c, 0x00000000); - nv_wo32(priv->pramin_bar, 0x10, 0x00000000); - nv_wo32(priv->pramin_bar, 0x14, 0x00000000); + goto error; + dev_priv->channels.ptr[0] = dev_priv->channels.ptr[127] = chan; - nv50_instmem_map(chan->ramin); + ret = nv50_gpuobj_dma_new(chan, 0x0000, BAR3_VM_BASE, BAR3_VM_SIZE, + NV_MEM_TARGET_VM, NV_MEM_ACCESS_VM, + NV_MEM_TYPE_VM, NV_MEM_COMP_VM, + &priv->bar3_dmaobj); + if (ret) + goto error; - /* poke regs... */ nv_wr32(dev, 0x001704, 0x00000000 | (chan->ramin->vinst >> 12)); nv_wr32(dev, 0x001704, 0x40000000 | (chan->ramin->vinst >> 12)); - nv_wr32(dev, 0x00170c, 0x80000000 | (priv->pramin_bar->cinst >> 4)); + nv_wr32(dev, 0x00170c, 0x80000000 | (priv->bar3_dmaobj->cinst >> 4)); tmp = nv_ri32(dev, 0); nv_wi32(dev, 0, ~tmp); if (nv_ri32(dev, 0) != ~tmp) { NV_ERROR(dev, "PRAMIN readback failed\n"); - return -EIO; + ret = -EIO; + goto error; } nv_wi32(dev, 0, tmp); dev_priv->ramin_available = true; + /* BAR1 */ + ret = nouveau_vm_new(dev, BAR1_VM_BASE, BAR1_VM_SIZE, BAR1_VM_BASE, + 29, 12, 16, &vm); + if (ret) + goto error; + + ret = nouveau_vm_ref(vm, &dev_priv->bar1_vm, chan->vm_pd); + if (ret) + goto error; + nouveau_vm_ref(NULL, &vm, NULL); + + ret = nv50_gpuobj_dma_new(chan, 0x0000, BAR1_VM_BASE, BAR1_VM_SIZE, + NV_MEM_TARGET_VM, NV_MEM_ACCESS_VM, + NV_MEM_TYPE_VM, NV_MEM_COMP_VM, + &priv->bar1_dmaobj); + if (ret) + goto error; + + nv_wr32(dev, 0x001708, 0x80000000 | (priv->bar1_dmaobj->cinst >> 4)); + for (i = 0; i < 8; i++) + nv_wr32(dev, 0x1900 + (i*4), 0); + /* Determine VM layout */ dev_priv->vm_gart_base = roundup(NV50_VM_BLOCK, NV50_VM_BLOCK); dev_priv->vm_gart_size = NV50_VM_BLOCK; @@ -200,38 +239,19 @@ nv50_instmem_init(struct drm_device *dev) for (i = 0; i < dev_priv->vm_vram_pt_nr; i++) { ret = nouveau_gpuobj_new(dev, NULL, NV50_VM_BLOCK / 0x10000 * 8, 0, NVOBJ_FLAG_ZERO_ALLOC, - &chan->vm_vram_pt[i]); + &dev_priv->vm_vram_pt[i]); if (ret) { NV_ERROR(dev, "Error creating VRAM PGT: %d\n", ret); dev_priv->vm_vram_pt_nr = i; return ret; } - dev_priv->vm_vram_pt[i] = chan->vm_vram_pt[i]; - - nv_wo32(chan->vm_pd, 0x10 + (i*8), - chan->vm_vram_pt[i]->vinst | 0x61); - nv_wo32(chan->vm_pd, 0x14 + (i*8), 0); } - /* DMA object for FB BAR */ - ret = nouveau_gpuobj_new(dev, chan, 6*4, 16, 0, &priv->fb_bar); - if (ret) - return ret; - nv_wo32(priv->fb_bar, 0x00, 0x7fc00000); - nv_wo32(priv->fb_bar, 0x04, 0x40000000 + - pci_resource_len(dev->pdev, 1) - 1); - nv_wo32(priv->fb_bar, 0x08, 0x40000000); - nv_wo32(priv->fb_bar, 0x0c, 0x00000000); - nv_wo32(priv->fb_bar, 0x10, 0x00000000); - nv_wo32(priv->fb_bar, 0x14, 0x00000000); - - dev_priv->engine.instmem.flush(dev); - - nv_wr32(dev, 0x001708, 0x80000000 | (priv->fb_bar->cinst >> 4)); - for (i = 0; i < 8; i++) - nv_wr32(dev, 0x1900 + (i*4), 0); - return 0; + +error: + nv50_instmem_takedown(dev); + return ret; } void @@ -249,23 +269,25 @@ nv50_instmem_takedown(struct drm_device *dev) dev_priv->ramin_available = false; - /* Restore state from before init */ + for (i = 0; i < dev_priv->vm_vram_pt_nr; i++) + nouveau_gpuobj_ref(NULL, &dev_priv->vm_vram_pt[i]); + dev_priv->vm_vram_pt_nr = 0; + for (i = 0x1700; i <= 0x1710; i += 4) nv_wr32(dev, i, priv->save1700[(i - 0x1700) / 4]); - nouveau_gpuobj_ref(NULL, &priv->fb_bar); - nouveau_gpuobj_ref(NULL, &priv->pramin_bar); - nouveau_gpuobj_ref(NULL, &priv->pramin_pt); + nouveau_gpuobj_ref(NULL, &priv->bar3_dmaobj); + nouveau_gpuobj_ref(NULL, &priv->bar1_dmaobj); - /* Destroy dummy channel */ - if (chan) { - for (i = 0; i < dev_priv->vm_vram_pt_nr; i++) - nouveau_gpuobj_ref(NULL, &chan->vm_vram_pt[i]); - dev_priv->vm_vram_pt_nr = 0; + nouveau_vm_ref(NULL, &dev_priv->bar1_vm, chan->vm_pd); + dev_priv->channels.ptr[127] = 0; + nv50_channel_del(&dev_priv->channels.ptr[0]); - nv50_channel_del(&dev_priv->channels.ptr[0]); - dev_priv->channels.ptr[127] = NULL; - } + nouveau_gpuobj_ref(NULL, &dev_priv->bar3_vm->pgt[0].obj); + nouveau_vm_ref(NULL, &dev_priv->bar3_vm, NULL); + + if (dev_priv->ramin_heap.free_stack.next) + drm_mm_takedown(&dev_priv->ramin_heap); dev_priv->engine.instmem.priv = NULL; kfree(priv); @@ -293,9 +315,9 @@ nv50_instmem_resume(struct drm_device *dev) nv_wr32(dev, NV50_PUNK_UNK1710, 0); nv_wr32(dev, NV50_PUNK_BAR_CFG_BASE, (chan->ramin->vinst >> 12) | NV50_PUNK_BAR_CFG_BASE_VALID); - nv_wr32(dev, NV50_PUNK_BAR1_CTXDMA, (priv->fb_bar->cinst >> 4) | + nv_wr32(dev, NV50_PUNK_BAR1_CTXDMA, (priv->bar1_dmaobj->cinst >> 4) | NV50_PUNK_BAR1_CTXDMA_VALID); - nv_wr32(dev, NV50_PUNK_BAR3_CTXDMA, (priv->pramin_bar->cinst >> 4) | + nv_wr32(dev, NV50_PUNK_BAR3_CTXDMA, (priv->bar3_dmaobj->cinst >> 4) | NV50_PUNK_BAR3_CTXDMA_VALID); for (i = 0; i < 8; i++) @@ -305,8 +327,7 @@ nv50_instmem_resume(struct drm_device *dev) } struct nv50_gpuobj_node { - struct nouveau_bo *vram; - struct drm_mm_node *ramin; + struct nouveau_vram *vram; u32 align; }; @@ -323,23 +344,17 @@ nv50_instmem_get(struct nouveau_gpuobj *gpuobj, u32 size, u32 align) return -ENOMEM; node->align = align; - ret = nouveau_bo_new(dev, NULL, size, align, TTM_PL_FLAG_VRAM, - 0, 0x0000, true, false, &node->vram); - if (ret) { - NV_ERROR(dev, "error getting PRAMIN backing pages: %d\n", ret); - WARN_ON(1); - return ret; - } + size = (size + 4095) & ~4095; + align = max(align, (u32)4096); - ret = nouveau_bo_pin(node->vram, TTM_PL_FLAG_VRAM); + ret = nv50_vram_new(dev, size, align, 0, 0, &node->vram); if (ret) { - NV_ERROR(dev, "error pinning PRAMIN backing VRAM: %d\n", ret); - nouveau_bo_ref(NULL, &node->vram); + kfree(node); return ret; } - gpuobj->vinst = node->vram->bo.mem.start << PAGE_SHIFT; - gpuobj->size = node->vram->bo.mem.num_pages << PAGE_SHIFT; + gpuobj->vinst = node->vram->offset; + gpuobj->size = size; gpuobj->node = node; return 0; } @@ -347,13 +362,13 @@ nv50_instmem_get(struct nouveau_gpuobj *gpuobj, u32 size, u32 align) void nv50_instmem_put(struct nouveau_gpuobj *gpuobj) { + struct drm_device *dev = gpuobj->dev; struct nv50_gpuobj_node *node; node = gpuobj->node; gpuobj->node = NULL; - nouveau_bo_unpin(node->vram); - nouveau_bo_ref(NULL, &node->vram); + nv50_vram_del(dev, &node->vram); kfree(node); } @@ -361,83 +376,28 @@ int nv50_instmem_map(struct nouveau_gpuobj *gpuobj) { struct drm_nouveau_private *dev_priv = gpuobj->dev->dev_private; - struct nv50_instmem_priv *priv = dev_priv->engine.instmem.priv; struct nv50_gpuobj_node *node = gpuobj->node; - struct drm_device *dev = gpuobj->dev; - struct drm_mm_node *ramin = NULL; - u32 pte, pte_end; - u64 vram; - - do { - if (drm_mm_pre_get(&dev_priv->ramin_heap)) - return -ENOMEM; - - spin_lock(&dev_priv->ramin_lock); - ramin = drm_mm_search_free(&dev_priv->ramin_heap, gpuobj->size, - node->align, 0); - if (ramin == NULL) { - spin_unlock(&dev_priv->ramin_lock); - return -ENOMEM; - } - - ramin = drm_mm_get_block_atomic(ramin, gpuobj->size, node->align); - spin_unlock(&dev_priv->ramin_lock); - } while (ramin == NULL); - - pte = (ramin->start >> 12) << 1; - pte_end = ((ramin->size >> 12) << 1) + pte; - vram = gpuobj->vinst; - - NV_DEBUG(dev, "pramin=0x%lx, pte=%d, pte_end=%d\n", - ramin->start, pte, pte_end); - NV_DEBUG(dev, "first vram page: 0x%010llx\n", gpuobj->vinst); - - vram |= 1; - if (dev_priv->vram_sys_base) { - vram += dev_priv->vram_sys_base; - vram |= 0x30; - } - - while (pte < pte_end) { - nv_wo32(priv->pramin_pt, (pte * 4) + 0, lower_32_bits(vram)); - nv_wo32(priv->pramin_pt, (pte * 4) + 4, upper_32_bits(vram)); - vram += 0x1000; - pte += 2; - } - dev_priv->engine.instmem.flush(dev); + int ret; - nv50_vm_flush_engine(dev, 6); + ret = nouveau_vm_get(dev_priv->bar3_vm, gpuobj->size, 12, + NV_MEM_ACCESS_RW, &node->vram->bar_vma); + if (ret) + return ret; - node->ramin = ramin; - gpuobj->pinst = ramin->start; + nouveau_vm_map(&node->vram->bar_vma, node->vram); + gpuobj->pinst = node->vram->bar_vma.offset; return 0; } void nv50_instmem_unmap(struct nouveau_gpuobj *gpuobj) { - struct drm_nouveau_private *dev_priv = gpuobj->dev->dev_private; - struct nv50_instmem_priv *priv = dev_priv->engine.instmem.priv; struct nv50_gpuobj_node *node = gpuobj->node; - u32 pte, pte_end; - if (!node->ramin || !dev_priv->ramin_available) - return; - - pte = (node->ramin->start >> 12) << 1; - pte_end = ((node->ramin->size >> 12) << 1) + pte; - - while (pte < pte_end) { - nv_wo32(priv->pramin_pt, (pte * 4) + 0, 0x00000000); - nv_wo32(priv->pramin_pt, (pte * 4) + 4, 0x00000000); - pte += 2; + if (node->vram->bar_vma.node) { + nouveau_vm_unmap(&node->vram->bar_vma); + nouveau_vm_put(&node->vram->bar_vma); } - dev_priv->engine.instmem.flush(gpuobj->dev); - - spin_lock(&dev_priv->ramin_lock); - drm_mm_put_block(node->ramin); - node->ramin = NULL; - spin_unlock(&dev_priv->ramin_lock); } void diff --git a/drivers/gpu/drm/nouveau/nv50_vm.c b/drivers/gpu/drm/nouveau/nv50_vm.c index ab6c3d0ce32e..efc63c0b0d92 100644 --- a/drivers/gpu/drm/nouveau/nv50_vm.c +++ b/drivers/gpu/drm/nouveau/nv50_vm.c @@ -151,7 +151,6 @@ nv50_vm_flush(struct nouveau_vm *vm) struct nouveau_instmem_engine *pinstmem = &dev_priv->engine.instmem; pinstmem->flush(vm->dev); - nv50_vm_flush_engine(vm->dev, 6); } -- cgit v1.2.3 From 4c1361429841344ce4d164492ee7620cf3286eb7 Mon Sep 17 00:00:00 2001 From: Ben Skeggs Date: Mon, 15 Nov 2010 11:54:21 +1000 Subject: drm/nv50: implement global channel address space on new VM code As of this commit, it's guaranteed that if an object is in VRAM that its GPU virtual address will be constant. Signed-off-by: Ben Skeggs --- drivers/gpu/drm/nouveau/nouveau_bo.c | 41 ++++++++------ drivers/gpu/drm/nouveau/nouveau_channel.c | 2 +- drivers/gpu/drm/nouveau/nouveau_drv.c | 8 +-- drivers/gpu/drm/nouveau/nouveau_drv.h | 17 +----- drivers/gpu/drm/nouveau/nouveau_fbcon.c | 4 +- drivers/gpu/drm/nouveau/nouveau_mem.c | 94 ------------------------------- drivers/gpu/drm/nouveau/nouveau_object.c | 47 ++++------------ drivers/gpu/drm/nouveau/nv50_crtc.c | 6 +- drivers/gpu/drm/nouveau/nv50_fbcon.c | 13 ++--- drivers/gpu/drm/nouveau/nv50_graph.c | 3 + drivers/gpu/drm/nouveau/nv50_instmem.c | 48 +++++----------- drivers/gpu/drm/nouveau/nv50_vm.c | 17 +++++- drivers/gpu/drm/nouveau/nv84_crypt.c | 2 + 13 files changed, 86 insertions(+), 216 deletions(-) diff --git a/drivers/gpu/drm/nouveau/nouveau_bo.c b/drivers/gpu/drm/nouveau/nouveau_bo.c index 4d142031d542..203e75de4128 100644 --- a/drivers/gpu/drm/nouveau/nouveau_bo.c +++ b/drivers/gpu/drm/nouveau/nouveau_bo.c @@ -49,6 +49,7 @@ nouveau_bo_del_ttm(struct ttm_buffer_object *bo) DRM_ERROR("bo %p still attached to GEM object\n", bo); nv10_mem_put_tile_region(dev, nvbo->tile, NULL); + nouveau_vm_put(&nvbo->vma); kfree(nvbo); } @@ -113,6 +114,15 @@ nouveau_bo_new(struct drm_device *dev, struct nouveau_channel *chan, &align, &size); align >>= PAGE_SHIFT; + if (!nvbo->no_vm && dev_priv->chan_vm) { + ret = nouveau_vm_get(dev_priv->chan_vm, size, 16, + NV_MEM_ACCESS_RW, &nvbo->vma); + if (ret) { + kfree(nvbo); + return ret; + } + } + nouveau_bo_placement_set(nvbo, flags, 0); nvbo->channel = chan; @@ -125,6 +135,11 @@ nouveau_bo_new(struct drm_device *dev, struct nouveau_channel *chan, } nvbo->channel = NULL; + if (nvbo->vma.node) { + if (nvbo->bo.mem.mem_type == TTM_PL_VRAM) + nvbo->bo.offset = nvbo->vma.offset; + } + *pnvbo = nvbo; return 0; } @@ -294,6 +309,11 @@ nouveau_bo_validate(struct nouveau_bo *nvbo, bool interruptible, if (ret) return ret; + if (nvbo->vma.node) { + if (nvbo->bo.mem.mem_type == TTM_PL_VRAM) + nvbo->bo.offset = nvbo->vma.offset; + } + return 0; } @@ -400,10 +420,7 @@ nouveau_bo_init_mem_type(struct ttm_bo_device *bdev, uint32_t type, man->available_caching = TTM_PL_FLAG_UNCACHED | TTM_PL_FLAG_WC; man->default_caching = TTM_PL_FLAG_WC; - if (dev_priv->card_type == NV_50) - man->gpu_offset = 0x40000000; - else - man->gpu_offset = 0; + man->gpu_offset = 0; break; case TTM_PL_TT: man->func = &ttm_bo_manager_func; @@ -507,12 +524,12 @@ nv50_bo_move_m2mf(struct nouveau_channel *chan, struct ttm_buffer_object *bo, dst_offset = new_mem->start << PAGE_SHIFT; if (!nvbo->no_vm) { if (old_mem->mem_type == TTM_PL_VRAM) - src_offset += dev_priv->vm_vram_base; + src_offset = nvbo->vma.offset; else src_offset += dev_priv->vm_gart_base; if (new_mem->mem_type == TTM_PL_VRAM) - dst_offset += dev_priv->vm_vram_base; + dst_offset = nvbo->vma.offset; else dst_offset += dev_priv->vm_gart_base; } @@ -756,7 +773,6 @@ nouveau_bo_vm_bind(struct ttm_buffer_object *bo, struct ttm_mem_reg *new_mem, struct drm_device *dev = dev_priv->dev; struct nouveau_bo *nvbo = nouveau_bo(bo); uint64_t offset; - int ret; if (nvbo->no_vm || new_mem->mem_type != TTM_PL_VRAM) { /* Nothing to do. */ @@ -766,15 +782,8 @@ nouveau_bo_vm_bind(struct ttm_buffer_object *bo, struct ttm_mem_reg *new_mem, offset = new_mem->start << PAGE_SHIFT; - if (dev_priv->card_type == NV_50) { - ret = nv50_mem_vm_bind_linear(dev, - offset + dev_priv->vm_vram_base, - new_mem->size, - nouveau_bo_tile_layout(nvbo), - offset); - if (ret) - return ret; - + if (dev_priv->chan_vm) { + nouveau_vm_map(&nvbo->vma, new_mem->mm_node); } else if (dev_priv->card_type >= NV_10) { *new_tile = nv10_mem_set_tiling(dev, offset, new_mem->size, nvbo->tile_mode, diff --git a/drivers/gpu/drm/nouveau/nouveau_channel.c b/drivers/gpu/drm/nouveau/nouveau_channel.c index a3d33a582a98..6f37995aee2d 100644 --- a/drivers/gpu/drm/nouveau/nouveau_channel.c +++ b/drivers/gpu/drm/nouveau/nouveau_channel.c @@ -39,7 +39,7 @@ nouveau_channel_pushbuf_ctxdma_init(struct nouveau_channel *chan) if (dev_priv->card_type >= NV_50) { ret = nouveau_gpuobj_dma_new(chan, NV_CLASS_DMA_IN_MEMORY, 0, - dev_priv->vm_end, NV_MEM_ACCESS_RO, + (1ULL << 40), NV_MEM_ACCESS_RO, NV_MEM_TARGET_VM, &pushbuf); chan->pushbuf_base = pb->bo.offset; } else diff --git a/drivers/gpu/drm/nouveau/nouveau_drv.c b/drivers/gpu/drm/nouveau/nouveau_drv.c index a48c7da133d2..bb170570938b 100644 --- a/drivers/gpu/drm/nouveau/nouveau_drv.c +++ b/drivers/gpu/drm/nouveau/nouveau_drv.c @@ -339,13 +339,11 @@ nouveau_pci_resume(struct pci_dev *pdev) list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc); + u32 offset = nv_crtc->cursor.nvbo->bo.mem.start << PAGE_SHIFT; - nv_crtc->cursor.set_offset(nv_crtc, - nv_crtc->cursor.nvbo->bo.offset - - dev_priv->vm_vram_base); - + nv_crtc->cursor.set_offset(nv_crtc, offset); nv_crtc->cursor.set_pos(nv_crtc, nv_crtc->cursor_saved_x, - nv_crtc->cursor_saved_y); + nv_crtc->cursor_saved_y); } /* Force CLUT to get re-loaded during modeset */ diff --git a/drivers/gpu/drm/nouveau/nouveau_drv.h b/drivers/gpu/drm/nouveau/nouveau_drv.h index 452a8652a498..dce9a5f6f6c4 100644 --- a/drivers/gpu/drm/nouveau/nouveau_drv.h +++ b/drivers/gpu/drm/nouveau/nouveau_drv.h @@ -65,10 +65,6 @@ struct nouveau_vram; #define NOUVEAU_MAX_CHANNEL_NR 128 #define NOUVEAU_MAX_TILE_NR 15 -#define NV50_VM_MAX_VRAM (2*1024*1024*1024ULL) -#define NV50_VM_BLOCK (512*1024*1024ULL) -#define NV50_VM_VRAM_NR (NV50_VM_MAX_VRAM / NV50_VM_BLOCK) - struct nouveau_vram { struct drm_device *dev; @@ -106,6 +102,7 @@ struct nouveau_bo { struct nouveau_channel *channel; + struct nouveau_vma vma; bool mappable; bool no_vm; @@ -252,7 +249,6 @@ struct nouveau_channel { struct nouveau_vm *vm; struct nouveau_gpuobj *vm_pd; struct nouveau_gpuobj *vm_gart_pt; - struct nouveau_gpuobj *vm_vram_pt[NV50_VM_VRAM_NR]; /* Objects */ struct nouveau_gpuobj *ramin; /* Private instmem */ @@ -712,13 +708,9 @@ struct drm_nouveau_private { struct nouveau_vm *bar3_vm; /* G8x/G9x virtual address space */ + struct nouveau_vm *chan_vm; uint64_t vm_gart_base; uint64_t vm_gart_size; - uint64_t vm_vram_base; - uint64_t vm_vram_size; - uint64_t vm_end; - struct nouveau_gpuobj *vm_vram_pt[NV50_VM_VRAM_NR]; - int vm_vram_pt_nr; struct nvbios vbios; @@ -836,11 +828,6 @@ extern struct nouveau_tile_reg *nv10_mem_set_tiling( extern void nv10_mem_put_tile_region(struct drm_device *dev, struct nouveau_tile_reg *tile, struct nouveau_fence *fence); -extern int nv50_mem_vm_bind_linear(struct drm_device *, uint64_t virt, - uint32_t size, uint32_t flags, - uint64_t phys); -extern void nv50_mem_vm_unbind(struct drm_device *, uint64_t virt, - uint32_t size); extern const struct ttm_mem_type_manager_func nouveau_vram_manager; /* nouveau_notifier.c */ diff --git a/drivers/gpu/drm/nouveau/nouveau_fbcon.c b/drivers/gpu/drm/nouveau/nouveau_fbcon.c index 0fce4eb914d5..ea861c915149 100644 --- a/drivers/gpu/drm/nouveau/nouveau_fbcon.c +++ b/drivers/gpu/drm/nouveau/nouveau_fbcon.c @@ -338,8 +338,8 @@ nouveau_fbcon_create(struct nouveau_fbdev *nfbdev, FBINFO_HWACCEL_IMAGEBLIT; info->flags |= FBINFO_CAN_FORCE_OUTPUT; info->fbops = &nouveau_fbcon_sw_ops; - info->fix.smem_start = dev->mode_config.fb_base + nvbo->bo.offset - - dev_priv->vm_vram_base; + info->fix.smem_start = dev->mode_config.fb_base + + (nvbo->bo.mem.start << PAGE_SHIFT); info->fix.smem_len = size; info->screen_base = nvbo_kmap_obj_iovirtual(nouveau_fb->nvbo); diff --git a/drivers/gpu/drm/nouveau/nouveau_mem.c b/drivers/gpu/drm/nouveau/nouveau_mem.c index 2d02401e8227..4d2d3de97ee9 100644 --- a/drivers/gpu/drm/nouveau/nouveau_mem.c +++ b/drivers/gpu/drm/nouveau/nouveau_mem.c @@ -144,100 +144,6 @@ nv10_mem_set_tiling(struct drm_device *dev, uint32_t addr, uint32_t size, return found; } -/* - * NV50 VM helpers - */ -int -nv50_mem_vm_bind_linear(struct drm_device *dev, uint64_t virt, uint32_t size, - uint32_t flags, uint64_t phys) -{ - struct drm_nouveau_private *dev_priv = dev->dev_private; - struct nouveau_gpuobj *pgt; - unsigned block; - int i; - - virt = ((virt - dev_priv->vm_vram_base) >> 16) << 1; - size = (size >> 16) << 1; - - phys |= ((uint64_t)flags << 32); - phys |= 1; - if (dev_priv->vram_sys_base) { - phys += dev_priv->vram_sys_base; - phys |= 0x30; - } - - while (size) { - unsigned offset_h = upper_32_bits(phys); - unsigned offset_l = lower_32_bits(phys); - unsigned pte, end; - - for (i = 7; i >= 0; i--) { - block = 1 << (i + 1); - if (size >= block && !(virt & (block - 1))) - break; - } - offset_l |= (i << 7); - - phys += block << 15; - size -= block; - - while (block) { - pgt = dev_priv->vm_vram_pt[virt >> 14]; - pte = virt & 0x3ffe; - - end = pte + block; - if (end > 16384) - end = 16384; - block -= (end - pte); - virt += (end - pte); - - while (pte < end) { - nv_wo32(pgt, (pte * 4) + 0, offset_l); - nv_wo32(pgt, (pte * 4) + 4, offset_h); - pte += 2; - } - } - } - - dev_priv->engine.instmem.flush(dev); - dev_priv->engine.fifo.tlb_flush(dev); - dev_priv->engine.graph.tlb_flush(dev); - nv50_vm_flush_engine(dev, 6); - return 0; -} - -void -nv50_mem_vm_unbind(struct drm_device *dev, uint64_t virt, uint32_t size) -{ - struct drm_nouveau_private *dev_priv = dev->dev_private; - struct nouveau_gpuobj *pgt; - unsigned pages, pte, end; - - virt -= dev_priv->vm_vram_base; - pages = (size >> 16) << 1; - - while (pages) { - pgt = dev_priv->vm_vram_pt[virt >> 29]; - pte = (virt & 0x1ffe0000ULL) >> 15; - - end = pte + pages; - if (end > 16384) - end = 16384; - pages -= (end - pte); - virt += (end - pte) << 15; - - while (pte < end) { - nv_wo32(pgt, (pte * 4), 0); - pte++; - } - } - - dev_priv->engine.instmem.flush(dev); - dev_priv->engine.fifo.tlb_flush(dev); - dev_priv->engine.graph.tlb_flush(dev); - nv50_vm_flush_engine(dev, 6); -} - /* * Cleanup everything */ diff --git a/drivers/gpu/drm/nouveau/nouveau_object.c b/drivers/gpu/drm/nouveau/nouveau_object.c index dd1859f7d8b0..573fd7316d63 100644 --- a/drivers/gpu/drm/nouveau/nouveau_object.c +++ b/drivers/gpu/drm/nouveau/nouveau_object.c @@ -35,6 +35,7 @@ #include "nouveau_drv.h" #include "nouveau_drm.h" #include "nouveau_ramht.h" +#include "nouveau_vm.h" struct nouveau_gpuobj_method { struct list_head head; @@ -770,9 +771,8 @@ nouveau_gpuobj_channel_init(struct nouveau_channel *chan, { struct drm_device *dev = chan->dev; struct drm_nouveau_private *dev_priv = dev->dev_private; - struct nouveau_instmem_engine *instmem = &dev_priv->engine.instmem; struct nouveau_gpuobj *vram = NULL, *tt = NULL; - int ret, i; + int ret; NV_DEBUG(dev, "ch%d vram=0x%08x tt=0x%08x\n", chan->id, vram_h, tt_h); @@ -783,16 +783,14 @@ nouveau_gpuobj_channel_init(struct nouveau_channel *chan, return ret; } - /* NV50 VM + /* NV50/NVC0 VM * - Allocate per-channel page-directory - * - Map GART and VRAM into the channel's address space at the - * locations determined during init. + * - Link with shared channel VM */ - if (dev_priv->card_type >= NV_50) { + if (dev_priv->chan_vm) { u32 pgd_offs = (dev_priv->chipset == 0x50) ? 0x1400 : 0x0200; u64 vm_vinst = chan->ramin->vinst + pgd_offs; u32 vm_pinst = chan->ramin->pinst; - u32 pde; if (vm_pinst != ~0) vm_pinst += pgd_offs; @@ -801,29 +799,9 @@ nouveau_gpuobj_channel_init(struct nouveau_channel *chan, 0, &chan->vm_pd); if (ret) return ret; - for (i = 0; i < 0x4000; i += 8) { - nv_wo32(chan->vm_pd, i + 0, 0x00000000); - nv_wo32(chan->vm_pd, i + 4, 0xdeadcafe); - } - - nouveau_gpuobj_ref(dev_priv->gart_info.sg_ctxdma, - &chan->vm_gart_pt); - pde = (dev_priv->vm_gart_base / (512*1024*1024)) * 8; - nv_wo32(chan->vm_pd, pde + 0, chan->vm_gart_pt->vinst | 3); - nv_wo32(chan->vm_pd, pde + 4, 0x00000000); - - pde = (dev_priv->vm_vram_base / (512*1024*1024)) * 8; - for (i = 0; i < dev_priv->vm_vram_pt_nr; i++) { - nouveau_gpuobj_ref(dev_priv->vm_vram_pt[i], - &chan->vm_vram_pt[i]); - - nv_wo32(chan->vm_pd, pde + 0, - chan->vm_vram_pt[i]->vinst | 0x61); - nv_wo32(chan->vm_pd, pde + 4, 0x00000000); - pde += 8; - } - instmem->flush(dev); + nouveau_vm_ref(dev_priv->chan_vm, &chan->vm, chan->vm_pd); + chan->vm->map_pgt(chan->vm_pd, 12, 1, dev_priv->gart_info.sg_ctxdma); } /* RAMHT */ @@ -846,8 +824,7 @@ nouveau_gpuobj_channel_init(struct nouveau_channel *chan, /* VRAM ctxdma */ if (dev_priv->card_type >= NV_50) { ret = nouveau_gpuobj_dma_new(chan, NV_CLASS_DMA_IN_MEMORY, - 0, dev_priv->vm_end, - NV_MEM_ACCESS_RW, + 0, (1ULL << 40), NV_MEM_ACCESS_RW, NV_MEM_TARGET_VM, &vram); if (ret) { NV_ERROR(dev, "Error creating VRAM ctxdma: %d\n", ret); @@ -874,8 +851,7 @@ nouveau_gpuobj_channel_init(struct nouveau_channel *chan, /* TT memory ctxdma */ if (dev_priv->card_type >= NV_50) { ret = nouveau_gpuobj_dma_new(chan, NV_CLASS_DMA_IN_MEMORY, - 0, dev_priv->vm_end, - NV_MEM_ACCESS_RW, + 0, (1ULL << 40), NV_MEM_ACCESS_RW, NV_MEM_TARGET_VM, &tt); } else { ret = nouveau_gpuobj_dma_new(chan, NV_CLASS_DMA_IN_MEMORY, @@ -902,9 +878,7 @@ nouveau_gpuobj_channel_init(struct nouveau_channel *chan, void nouveau_gpuobj_channel_takedown(struct nouveau_channel *chan) { - struct drm_nouveau_private *dev_priv = chan->dev->dev_private; struct drm_device *dev = chan->dev; - int i; NV_DEBUG(dev, "ch%d\n", chan->id); @@ -913,10 +887,9 @@ nouveau_gpuobj_channel_takedown(struct nouveau_channel *chan) nouveau_ramht_ref(NULL, &chan->ramht, chan); + nouveau_vm_ref(NULL, &chan->vm, chan->vm_pd); nouveau_gpuobj_ref(NULL, &chan->vm_pd); nouveau_gpuobj_ref(NULL, &chan->vm_gart_pt); - for (i = 0; i < dev_priv->vm_vram_pt_nr; i++) - nouveau_gpuobj_ref(NULL, &chan->vm_vram_pt[i]); if (chan->ramin_heap.free_stack.next) drm_mm_takedown(&chan->ramin_heap); diff --git a/drivers/gpu/drm/nouveau/nv50_crtc.c b/drivers/gpu/drm/nouveau/nv50_crtc.c index f3570cc45017..2c346f797285 100644 --- a/drivers/gpu/drm/nouveau/nv50_crtc.c +++ b/drivers/gpu/drm/nouveau/nv50_crtc.c @@ -345,7 +345,6 @@ nv50_crtc_cursor_set(struct drm_crtc *crtc, struct drm_file *file_priv, uint32_t buffer_handle, uint32_t width, uint32_t height) { struct drm_device *dev = crtc->dev; - struct drm_nouveau_private *dev_priv = dev->dev_private; struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc); struct nouveau_bo *cursor = NULL; struct drm_gem_object *gem; @@ -374,8 +373,7 @@ nv50_crtc_cursor_set(struct drm_crtc *crtc, struct drm_file *file_priv, nouveau_bo_unmap(cursor); - nv_crtc->cursor.set_offset(nv_crtc, nv_crtc->cursor.nvbo->bo.offset - - dev_priv->vm_vram_base); + nv_crtc->cursor.set_offset(nv_crtc, nv_crtc->cursor.nvbo->bo.mem.start << PAGE_SHIFT); nv_crtc->cursor.show(nv_crtc, true); out: @@ -548,7 +546,7 @@ nv50_crtc_do_mode_set_base(struct drm_crtc *crtc, return -EINVAL; } - nv_crtc->fb.offset = fb->nvbo->bo.offset - dev_priv->vm_vram_base; + nv_crtc->fb.offset = fb->nvbo->bo.mem.start << PAGE_SHIFT; nv_crtc->fb.tile_flags = nouveau_bo_tile_layout(fb->nvbo); nv_crtc->fb.cpp = drm_fb->bits_per_pixel / 8; if (!nv_crtc->fb.blanked && dev_priv->chipset != 0x50) { diff --git a/drivers/gpu/drm/nouveau/nv50_fbcon.c b/drivers/gpu/drm/nouveau/nv50_fbcon.c index 156731993907..6d38cb1488ae 100644 --- a/drivers/gpu/drm/nouveau/nv50_fbcon.c +++ b/drivers/gpu/drm/nouveau/nv50_fbcon.c @@ -3,6 +3,7 @@ #include "nouveau_dma.h" #include "nouveau_ramht.h" #include "nouveau_fbcon.h" +#include "nouveau_mm.h" int nv50_fbcon_fillrect(struct fb_info *info, const struct fb_fillrect *rect) @@ -134,10 +135,8 @@ nv50_fbcon_accel_init(struct fb_info *info) struct drm_device *dev = nfbdev->dev; struct drm_nouveau_private *dev_priv = dev->dev_private; struct nouveau_channel *chan = dev_priv->channel; + struct nouveau_bo *nvbo = nfbdev->nouveau_fb.nvbo; int ret, format; - uint64_t fb; - - fb = info->fix.smem_start - dev_priv->fb_phys + dev_priv->vm_vram_base; switch (info->var.bits_per_pixel) { case 8: @@ -224,8 +223,8 @@ nv50_fbcon_accel_init(struct fb_info *info) OUT_RING(chan, info->fix.line_length); OUT_RING(chan, info->var.xres_virtual); OUT_RING(chan, info->var.yres_virtual); - OUT_RING(chan, upper_32_bits(fb)); - OUT_RING(chan, lower_32_bits(fb)); + OUT_RING(chan, upper_32_bits(nvbo->vma.offset)); + OUT_RING(chan, lower_32_bits(nvbo->vma.offset)); BEGIN_RING(chan, NvSub2D, 0x0230, 2); OUT_RING(chan, format); OUT_RING(chan, 1); @@ -233,8 +232,8 @@ nv50_fbcon_accel_init(struct fb_info *info) OUT_RING(chan, info->fix.line_length); OUT_RING(chan, info->var.xres_virtual); OUT_RING(chan, info->var.yres_virtual); - OUT_RING(chan, upper_32_bits(fb)); - OUT_RING(chan, lower_32_bits(fb)); + OUT_RING(chan, upper_32_bits(nvbo->vma.offset)); + OUT_RING(chan, lower_32_bits(nvbo->vma.offset)); return 0; } diff --git a/drivers/gpu/drm/nouveau/nv50_graph.c b/drivers/gpu/drm/nouveau/nv50_graph.c index f5fd1b296d27..c510e74acf4d 100644 --- a/drivers/gpu/drm/nouveau/nv50_graph.c +++ b/drivers/gpu/drm/nouveau/nv50_graph.c @@ -246,6 +246,7 @@ nv50_graph_create_context(struct nouveau_channel *chan) nv_wo32(chan->ramin_grctx, 0x00000, chan->ramin->vinst >> 12); dev_priv->engine.instmem.flush(dev); + atomic_inc(&chan->vm->pgraph_refs); return 0; } @@ -277,6 +278,8 @@ nv50_graph_destroy_context(struct nouveau_channel *chan) spin_unlock_irqrestore(&dev_priv->context_switch_lock, flags); nouveau_gpuobj_ref(NULL, &chan->ramin_grctx); + + atomic_dec(&chan->vm->pgraph_refs); } static int diff --git a/drivers/gpu/drm/nouveau/nv50_instmem.c b/drivers/gpu/drm/nouveau/nv50_instmem.c index 4ba8f74e77b1..08202fd682e4 100644 --- a/drivers/gpu/drm/nouveau/nv50_instmem.c +++ b/drivers/gpu/drm/nouveau/nv50_instmem.c @@ -131,6 +131,7 @@ nv50_instmem_init(struct drm_device *dev) struct nouveau_channel *chan; struct nouveau_vm *vm; int ret, i; + u64 nongart_o; u32 tmp; priv = kzalloc(sizeof(*priv), GFP_KERNEL); @@ -215,37 +216,18 @@ nv50_instmem_init(struct drm_device *dev) for (i = 0; i < 8; i++) nv_wr32(dev, 0x1900 + (i*4), 0); - /* Determine VM layout */ - dev_priv->vm_gart_base = roundup(NV50_VM_BLOCK, NV50_VM_BLOCK); - dev_priv->vm_gart_size = NV50_VM_BLOCK; - - dev_priv->vm_vram_base = dev_priv->vm_gart_base + dev_priv->vm_gart_size; - dev_priv->vm_vram_size = dev_priv->vram_size; - if (dev_priv->vm_vram_size > NV50_VM_MAX_VRAM) - dev_priv->vm_vram_size = NV50_VM_MAX_VRAM; - dev_priv->vm_vram_size = roundup(dev_priv->vm_vram_size, NV50_VM_BLOCK); - dev_priv->vm_vram_pt_nr = dev_priv->vm_vram_size / NV50_VM_BLOCK; - - dev_priv->vm_end = dev_priv->vm_vram_base + dev_priv->vm_vram_size; - - NV_DEBUG(dev, "NV50VM: GART 0x%016llx-0x%016llx\n", - dev_priv->vm_gart_base, - dev_priv->vm_gart_base + dev_priv->vm_gart_size - 1); - NV_DEBUG(dev, "NV50VM: VRAM 0x%016llx-0x%016llx\n", - dev_priv->vm_vram_base, - dev_priv->vm_vram_base + dev_priv->vm_vram_size - 1); - - /* VRAM page table(s), mapped into VM at +1GiB */ - for (i = 0; i < dev_priv->vm_vram_pt_nr; i++) { - ret = nouveau_gpuobj_new(dev, NULL, NV50_VM_BLOCK / 0x10000 * 8, - 0, NVOBJ_FLAG_ZERO_ALLOC, - &dev_priv->vm_vram_pt[i]); - if (ret) { - NV_ERROR(dev, "Error creating VRAM PGT: %d\n", ret); - dev_priv->vm_vram_pt_nr = i; - return ret; - } - } + /* Create shared channel VM, space is reserved for GART mappings at + * the beginning of this address space, it's managed separately + * because TTM makes life painful + */ + dev_priv->vm_gart_base = 0x0020000000ULL; + dev_priv->vm_gart_size = 512 * 1024 * 1024; + nongart_o = dev_priv->vm_gart_base + dev_priv->vm_gart_size; + + ret = nouveau_vm_new(dev, 0, (1ULL << 40), nongart_o, + 29, 12, 16, &dev_priv->chan_vm); + if (ret) + return ret; return 0; @@ -269,9 +251,7 @@ nv50_instmem_takedown(struct drm_device *dev) dev_priv->ramin_available = false; - for (i = 0; i < dev_priv->vm_vram_pt_nr; i++) - nouveau_gpuobj_ref(NULL, &dev_priv->vm_vram_pt[i]); - dev_priv->vm_vram_pt_nr = 0; + nouveau_vm_ref(NULL, &dev_priv->chan_vm, NULL); for (i = 0x1700; i <= 0x1710; i += 4) nv_wr32(dev, i, priv->save1700[(i - 0x1700) / 4]); diff --git a/drivers/gpu/drm/nouveau/nv50_vm.c b/drivers/gpu/drm/nouveau/nv50_vm.c index efc63c0b0d92..eebab95f59b2 100644 --- a/drivers/gpu/drm/nouveau/nv50_vm.c +++ b/drivers/gpu/drm/nouveau/nv50_vm.c @@ -149,9 +149,24 @@ nv50_vm_flush(struct nouveau_vm *vm) { struct drm_nouveau_private *dev_priv = vm->dev->dev_private; struct nouveau_instmem_engine *pinstmem = &dev_priv->engine.instmem; + struct nouveau_fifo_engine *pfifo = &dev_priv->engine.fifo; + struct nouveau_pgraph_engine *pgraph = &dev_priv->engine.graph; + struct nouveau_crypt_engine *pcrypt = &dev_priv->engine.crypt; pinstmem->flush(vm->dev); - nv50_vm_flush_engine(vm->dev, 6); + + /* BAR */ + if (vm != dev_priv->chan_vm) { + nv50_vm_flush_engine(vm->dev, 6); + return; + } + + pfifo->tlb_flush(vm->dev); + + if (atomic_read(&vm->pgraph_refs)) + pgraph->tlb_flush(vm->dev); + if (atomic_read(&vm->pcrypt_refs)) + pcrypt->tlb_flush(vm->dev); } void diff --git a/drivers/gpu/drm/nouveau/nv84_crypt.c b/drivers/gpu/drm/nouveau/nv84_crypt.c index a333e5905346..ec18ae1c3886 100644 --- a/drivers/gpu/drm/nouveau/nv84_crypt.c +++ b/drivers/gpu/drm/nouveau/nv84_crypt.c @@ -53,6 +53,7 @@ nv84_crypt_create_context(struct nouveau_channel *chan) nv_wo32(ramin, 0xb4, 0); dev_priv->engine.instmem.flush(dev); + atomic_inc(&chan->vm->pcrypt_refs); return 0; } @@ -80,6 +81,7 @@ nv84_crypt_destroy_context(struct nouveau_channel *chan) nv_wr32(dev, 0x10200c, 0x00000010); nouveau_gpuobj_ref(NULL, &chan->crypt_ctx); + atomic_dec(&chan->vm->pcrypt_refs); } void -- cgit v1.2.3 From bfd83aca5a1c4cc07fb18d96ed8660f2c338a8f3 Mon Sep 17 00:00:00 2001 From: Ben Skeggs Date: Fri, 12 Nov 2010 15:12:51 +1000 Subject: drm/nv50: enable 4KiB pages for small vram allocations Signed-off-by: Ben Skeggs --- drivers/gpu/drm/nouveau/nouveau_bo.c | 41 ++++++++++++++++++++--------------- drivers/gpu/drm/nouveau/nouveau_mem.c | 3 ++- 2 files changed, 25 insertions(+), 19 deletions(-) diff --git a/drivers/gpu/drm/nouveau/nouveau_bo.c b/drivers/gpu/drm/nouveau/nouveau_bo.c index 203e75de4128..bc28aeada8c3 100644 --- a/drivers/gpu/drm/nouveau/nouveau_bo.c +++ b/drivers/gpu/drm/nouveau/nouveau_bo.c @@ -54,39 +54,45 @@ nouveau_bo_del_ttm(struct ttm_buffer_object *bo) } static void -nouveau_bo_fixup_align(struct drm_device *dev, - uint32_t tile_mode, uint32_t tile_flags, - int *align, int *size) +nouveau_bo_fixup_align(struct nouveau_bo *nvbo, int *align, int *size, + int *page_shift) { - struct drm_nouveau_private *dev_priv = dev->dev_private; + struct drm_nouveau_private *dev_priv = nouveau_bdev(nvbo->bo.bdev); if (dev_priv->card_type < NV_50) { - if (tile_mode) { + if (nvbo->tile_mode) { if (dev_priv->chipset >= 0x40) { *align = 65536; - *size = roundup(*size, 64 * tile_mode); + *size = roundup(*size, 64 * nvbo->tile_mode); } else if (dev_priv->chipset >= 0x30) { *align = 32768; - *size = roundup(*size, 64 * tile_mode); + *size = roundup(*size, 64 * nvbo->tile_mode); } else if (dev_priv->chipset >= 0x20) { *align = 16384; - *size = roundup(*size, 64 * tile_mode); + *size = roundup(*size, 64 * nvbo->tile_mode); } else if (dev_priv->chipset >= 0x10) { *align = 16384; - *size = roundup(*size, 32 * tile_mode); + *size = roundup(*size, 32 * nvbo->tile_mode); } } + } else { + if (likely(dev_priv->chan_vm)) { + if (*size > 256 * 1024) + *page_shift = dev_priv->chan_vm->lpg_shift; + else + *page_shift = dev_priv->chan_vm->spg_shift; + } else { + *page_shift = 12; + } + + *size = roundup(*size, (1 << *page_shift)); + *align = max((1 << *page_shift), *align); } - /* ALIGN works only on powers of two. */ *size = roundup(*size, PAGE_SIZE); - if (dev_priv->card_type == NV_50) { - *size = roundup(*size, 65536); - *align = max(65536, *align); - } } int @@ -97,7 +103,7 @@ nouveau_bo_new(struct drm_device *dev, struct nouveau_channel *chan, { struct drm_nouveau_private *dev_priv = dev->dev_private; struct nouveau_bo *nvbo; - int ret = 0; + int ret = 0, page_shift = 0; nvbo = kzalloc(sizeof(struct nouveau_bo), GFP_KERNEL); if (!nvbo) @@ -110,12 +116,11 @@ nouveau_bo_new(struct drm_device *dev, struct nouveau_channel *chan, nvbo->tile_flags = tile_flags; nvbo->bo.bdev = &dev_priv->ttm.bdev; - nouveau_bo_fixup_align(dev, tile_mode, nouveau_bo_tile_layout(nvbo), - &align, &size); + nouveau_bo_fixup_align(nvbo, &align, &size, &page_shift); align >>= PAGE_SHIFT; if (!nvbo->no_vm && dev_priv->chan_vm) { - ret = nouveau_vm_get(dev_priv->chan_vm, size, 16, + ret = nouveau_vm_get(dev_priv->chan_vm, size, page_shift, NV_MEM_ACCESS_RW, &nvbo->vma); if (ret) { kfree(nvbo); diff --git a/drivers/gpu/drm/nouveau/nouveau_mem.c b/drivers/gpu/drm/nouveau/nouveau_mem.c index 4d2d3de97ee9..3a531d53de4d 100644 --- a/drivers/gpu/drm/nouveau/nouveau_mem.c +++ b/drivers/gpu/drm/nouveau/nouveau_mem.c @@ -715,7 +715,8 @@ nouveau_vram_manager_new(struct ttm_mem_type_manager *man, struct nouveau_vram *vram; int ret; - ret = nv50_vram_new(dev, mem->num_pages << PAGE_SHIFT, 65536, 0, + ret = nv50_vram_new(dev, mem->num_pages << PAGE_SHIFT, + mem->page_alignment << PAGE_SHIFT, 0, (nvbo->tile_flags >> 8) & 0x7f, &vram); if (ret) return ret; -- cgit v1.2.3 From 5f6fdca570b13a8a2c9cab9ab6edfc17487049cf Mon Sep 17 00:00:00 2001 From: Ben Skeggs Date: Fri, 12 Nov 2010 15:13:59 +1000 Subject: drm/nv50: enable non-contig vram allocations where requested Signed-off-by: Ben Skeggs --- drivers/gpu/drm/nouveau/nouveau_mem.c | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/drivers/gpu/drm/nouveau/nouveau_mem.c b/drivers/gpu/drm/nouveau/nouveau_mem.c index 3a531d53de4d..5a1809480388 100644 --- a/drivers/gpu/drm/nouveau/nouveau_mem.c +++ b/drivers/gpu/drm/nouveau/nouveau_mem.c @@ -713,10 +713,14 @@ nouveau_vram_manager_new(struct ttm_mem_type_manager *man, struct drm_device *dev = dev_priv->dev; struct nouveau_bo *nvbo = nouveau_bo(bo); struct nouveau_vram *vram; + u32 size_nc = 0; int ret; + if (nvbo->tile_flags & NOUVEAU_GEM_TILE_NONCONTIG) + size_nc = 1 << nvbo->vma.node->type; + ret = nv50_vram_new(dev, mem->num_pages << PAGE_SHIFT, - mem->page_alignment << PAGE_SHIFT, 0, + mem->page_alignment << PAGE_SHIFT, size_nc, (nvbo->tile_flags >> 8) & 0x7f, &vram); if (ret) return ret; -- cgit v1.2.3 From b571fe21f5c24760368b3fb927af5a7384d7721b Mon Sep 17 00:00:00 2001 From: Ben Skeggs Date: Tue, 16 Nov 2010 10:13:05 +1000 Subject: drm/nv50: tidy up PCIEGART implementation Signed-off-by: Ben Skeggs --- drivers/gpu/drm/nouveau/nouveau_bo.c | 7 +- drivers/gpu/drm/nouveau/nouveau_drv.h | 4 +- drivers/gpu/drm/nouveau/nouveau_object.c | 4 +- drivers/gpu/drm/nouveau/nouveau_sgdma.c | 147 +++++++++++++++---------------- drivers/gpu/drm/nouveau/nv50_instmem.c | 12 +-- 5 files changed, 79 insertions(+), 95 deletions(-) diff --git a/drivers/gpu/drm/nouveau/nouveau_bo.c b/drivers/gpu/drm/nouveau/nouveau_bo.c index bc28aeada8c3..42d1ad62b381 100644 --- a/drivers/gpu/drm/nouveau/nouveau_bo.c +++ b/drivers/gpu/drm/nouveau/nouveau_bo.c @@ -425,7 +425,6 @@ nouveau_bo_init_mem_type(struct ttm_bo_device *bdev, uint32_t type, man->available_caching = TTM_PL_FLAG_UNCACHED | TTM_PL_FLAG_WC; man->default_caching = TTM_PL_FLAG_WC; - man->gpu_offset = 0; break; case TTM_PL_TT: man->func = &ttm_bo_manager_func; @@ -441,13 +440,13 @@ nouveau_bo_init_mem_type(struct ttm_bo_device *bdev, uint32_t type, TTM_MEMTYPE_FLAG_CMA; man->available_caching = TTM_PL_MASK_CACHING; man->default_caching = TTM_PL_FLAG_CACHED; + man->gpu_offset = dev_priv->gart_info.aper_base; break; default: NV_ERROR(dev, "Unknown GART type: %d\n", dev_priv->gart_info.type); return -EINVAL; } - man->gpu_offset = dev_priv->vm_gart_base; break; default: NV_ERROR(dev, "Unsupported memory type %u\n", (unsigned)type); @@ -531,12 +530,12 @@ nv50_bo_move_m2mf(struct nouveau_channel *chan, struct ttm_buffer_object *bo, if (old_mem->mem_type == TTM_PL_VRAM) src_offset = nvbo->vma.offset; else - src_offset += dev_priv->vm_gart_base; + src_offset += dev_priv->gart_info.aper_base; if (new_mem->mem_type == TTM_PL_VRAM) dst_offset = nvbo->vma.offset; else - dst_offset += dev_priv->vm_gart_base; + dst_offset += dev_priv->gart_info.aper_base; } ret = RING_SPACE(chan, 3); diff --git a/drivers/gpu/drm/nouveau/nouveau_drv.h b/drivers/gpu/drm/nouveau/nouveau_drv.h index dce9a5f6f6c4..4c5fc9c9912b 100644 --- a/drivers/gpu/drm/nouveau/nouveau_drv.h +++ b/drivers/gpu/drm/nouveau/nouveau_drv.h @@ -248,7 +248,6 @@ struct nouveau_channel { /* NV50 VM */ struct nouveau_vm *vm; struct nouveau_gpuobj *vm_pd; - struct nouveau_gpuobj *vm_gart_pt; /* Objects */ struct nouveau_gpuobj *ramin; /* Private instmem */ @@ -684,6 +683,7 @@ struct drm_nouveau_private { uint64_t aper_free; struct nouveau_gpuobj *sg_ctxdma; + struct nouveau_vma vma; } gart_info; /* nv10-nv40 tiling regions */ @@ -709,8 +709,6 @@ struct drm_nouveau_private { /* G8x/G9x virtual address space */ struct nouveau_vm *chan_vm; - uint64_t vm_gart_base; - uint64_t vm_gart_size; struct nvbios vbios; diff --git a/drivers/gpu/drm/nouveau/nouveau_object.c b/drivers/gpu/drm/nouveau/nouveau_object.c index 573fd7316d63..d1bed40dc449 100644 --- a/drivers/gpu/drm/nouveau/nouveau_object.c +++ b/drivers/gpu/drm/nouveau/nouveau_object.c @@ -433,7 +433,7 @@ nv50_gpuobj_dma_init(struct nouveau_gpuobj *obj, u32 offset, int class, flags0 |= 0x00030000; break; case NV_MEM_TARGET_GART: - base += dev_priv->vm_gart_base; + base += dev_priv->gart_info.aper_base; default: flags0 &= ~0x00100000; break; @@ -801,7 +801,6 @@ nouveau_gpuobj_channel_init(struct nouveau_channel *chan, return ret; nouveau_vm_ref(dev_priv->chan_vm, &chan->vm, chan->vm_pd); - chan->vm->map_pgt(chan->vm_pd, 12, 1, dev_priv->gart_info.sg_ctxdma); } /* RAMHT */ @@ -889,7 +888,6 @@ nouveau_gpuobj_channel_takedown(struct nouveau_channel *chan) nouveau_vm_ref(NULL, &chan->vm, chan->vm_pd); nouveau_gpuobj_ref(NULL, &chan->vm_pd); - nouveau_gpuobj_ref(NULL, &chan->vm_gart_pt); if (chan->ramin_heap.free_stack.next) drm_mm_takedown(&chan->ramin_heap); diff --git a/drivers/gpu/drm/nouveau/nouveau_sgdma.c b/drivers/gpu/drm/nouveau/nouveau_sgdma.c index a0bf130b02d3..b57201ab538e 100644 --- a/drivers/gpu/drm/nouveau/nouveau_sgdma.c +++ b/drivers/gpu/drm/nouveau/nouveau_sgdma.c @@ -14,7 +14,7 @@ struct nouveau_sgdma_be { dma_addr_t *pages; unsigned nr_pages; - unsigned pte_start; + u64 offset; bool bound; }; @@ -74,18 +74,6 @@ nouveau_sgdma_clear(struct ttm_backend *be) } } -static inline unsigned -nouveau_sgdma_pte(struct drm_device *dev, uint64_t offset) -{ - struct drm_nouveau_private *dev_priv = dev->dev_private; - unsigned pte = (offset >> NV_CTXDMA_PAGE_SHIFT); - - if (dev_priv->card_type < NV_50) - return pte + 2; - - return pte << 1; -} - static int nouveau_sgdma_bind(struct ttm_backend *be, struct ttm_mem_reg *mem) { @@ -97,32 +85,17 @@ nouveau_sgdma_bind(struct ttm_backend *be, struct ttm_mem_reg *mem) NV_DEBUG(dev, "pg=0x%lx\n", mem->start); - pte = nouveau_sgdma_pte(nvbe->dev, mem->start << PAGE_SHIFT); - nvbe->pte_start = pte; + nvbe->offset = mem->start << PAGE_SHIFT; + pte = (nvbe->offset >> NV_CTXDMA_PAGE_SHIFT) + 2; for (i = 0; i < nvbe->nr_pages; i++) { dma_addr_t dma_offset = nvbe->pages[i]; uint32_t offset_l = lower_32_bits(dma_offset); - uint32_t offset_h = upper_32_bits(dma_offset); - - for (j = 0; j < PAGE_SIZE / NV_CTXDMA_PAGE_SIZE; j++) { - if (dev_priv->card_type < NV_50) { - nv_wo32(gpuobj, (pte * 4) + 0, offset_l | 3); - pte += 1; - } else { - nv_wo32(gpuobj, (pte * 4) + 0, offset_l | 0x21); - nv_wo32(gpuobj, (pte * 4) + 4, offset_h & 0xff); - pte += 2; - } + for (j = 0; j < PAGE_SIZE / NV_CTXDMA_PAGE_SIZE; j++, pte++) { + nv_wo32(gpuobj, (pte * 4) + 0, offset_l | 3); dma_offset += NV_CTXDMA_PAGE_SIZE; } } - dev_priv->engine.instmem.flush(nvbe->dev); - - if (dev_priv->card_type == NV_50) { - dev_priv->engine.fifo.tlb_flush(dev); - dev_priv->engine.graph.tlb_flush(dev); - } nvbe->bound = true; return 0; @@ -142,24 +115,10 @@ nouveau_sgdma_unbind(struct ttm_backend *be) if (!nvbe->bound) return 0; - pte = nvbe->pte_start; + pte = (nvbe->offset >> NV_CTXDMA_PAGE_SHIFT) + 2; for (i = 0; i < nvbe->nr_pages; i++) { - for (j = 0; j < PAGE_SIZE / NV_CTXDMA_PAGE_SIZE; j++) { - if (dev_priv->card_type < NV_50) { - nv_wo32(gpuobj, (pte * 4) + 0, 0x00000000); - pte += 1; - } else { - nv_wo32(gpuobj, (pte * 4) + 0, 0x00000000); - nv_wo32(gpuobj, (pte * 4) + 4, 0x00000000); - pte += 2; - } - } - } - dev_priv->engine.instmem.flush(nvbe->dev); - - if (dev_priv->card_type == NV_50) { - dev_priv->engine.fifo.tlb_flush(dev); - dev_priv->engine.graph.tlb_flush(dev); + for (j = 0; j < PAGE_SIZE / NV_CTXDMA_PAGE_SIZE; j++, pte++) + nv_wo32(gpuobj, (pte * 4) + 0, 0x00000000); } nvbe->bound = false; @@ -182,6 +141,35 @@ nouveau_sgdma_destroy(struct ttm_backend *be) } } +static int +nv50_sgdma_bind(struct ttm_backend *be, struct ttm_mem_reg *mem) +{ + struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)be; + struct drm_nouveau_private *dev_priv = nvbe->dev->dev_private; + + nvbe->offset = mem->start << PAGE_SHIFT; + + nouveau_vm_map_sg(&dev_priv->gart_info.vma, nvbe->offset, + nvbe->nr_pages << PAGE_SHIFT, nvbe->pages); + nvbe->bound = true; + return 0; +} + +static int +nv50_sgdma_unbind(struct ttm_backend *be) +{ + struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)be; + struct drm_nouveau_private *dev_priv = nvbe->dev->dev_private; + + if (!nvbe->bound) + return 0; + + nouveau_vm_unmap_at(&dev_priv->gart_info.vma, nvbe->offset, + nvbe->nr_pages << PAGE_SHIFT); + nvbe->bound = false; + return 0; +} + static struct ttm_backend_func nouveau_sgdma_backend = { .populate = nouveau_sgdma_populate, .clear = nouveau_sgdma_clear, @@ -190,23 +178,30 @@ static struct ttm_backend_func nouveau_sgdma_backend = { .destroy = nouveau_sgdma_destroy }; +static struct ttm_backend_func nv50_sgdma_backend = { + .populate = nouveau_sgdma_populate, + .clear = nouveau_sgdma_clear, + .bind = nv50_sgdma_bind, + .unbind = nv50_sgdma_unbind, + .destroy = nouveau_sgdma_destroy +}; + struct ttm_backend * nouveau_sgdma_init_ttm(struct drm_device *dev) { struct drm_nouveau_private *dev_priv = dev->dev_private; struct nouveau_sgdma_be *nvbe; - if (!dev_priv->gart_info.sg_ctxdma) - return NULL; - nvbe = kzalloc(sizeof(*nvbe), GFP_KERNEL); if (!nvbe) return NULL; nvbe->dev = dev; - nvbe->backend.func = &nouveau_sgdma_backend; - + if (dev_priv->card_type < NV_50) + nvbe->backend.func = &nouveau_sgdma_backend; + else + nvbe->backend.func = &nv50_sgdma_backend; return &nvbe->backend; } @@ -226,21 +221,15 @@ nouveau_sgdma_init(struct drm_device *dev) obj_size = (aper_size >> NV_CTXDMA_PAGE_SHIFT) * 4; obj_size += 8; /* ctxdma header */ - } else { - /* 1 entire VM page table */ - aper_size = (512 * 1024 * 1024); - obj_size = (aper_size >> NV_CTXDMA_PAGE_SHIFT) * 8; - } - ret = nouveau_gpuobj_new(dev, NULL, obj_size, 16, - NVOBJ_FLAG_ZERO_ALLOC | - NVOBJ_FLAG_ZERO_FREE, &gpuobj); - if (ret) { - NV_ERROR(dev, "Error creating sgdma object: %d\n", ret); - return ret; - } + ret = nouveau_gpuobj_new(dev, NULL, obj_size, 16, + NVOBJ_FLAG_ZERO_ALLOC | + NVOBJ_FLAG_ZERO_FREE, &gpuobj); + if (ret) { + NV_ERROR(dev, "Error creating sgdma object: %d\n", ret); + return ret; + } - if (dev_priv->card_type < NV_50) { nv_wo32(gpuobj, 0, NV_CLASS_DMA_IN_MEMORY | (1 << 12) /* PT present */ | (0 << 13) /* PT *not* linear */ | @@ -249,18 +238,23 @@ nouveau_sgdma_init(struct drm_device *dev) nv_wo32(gpuobj, 4, aper_size - 1); for (i = 2; i < 2 + (aper_size >> 12); i++) nv_wo32(gpuobj, i * 4, 0x00000000); - } else { - for (i = 0; i < obj_size; i += 8) { - nv_wo32(gpuobj, i + 0, 0x00000000); - nv_wo32(gpuobj, i + 4, 0x00000000); - } + + dev_priv->gart_info.sg_ctxdma = gpuobj; + dev_priv->gart_info.aper_base = 0; + dev_priv->gart_info.aper_size = aper_size; + } else + if (dev_priv->chan_vm) { + ret = nouveau_vm_get(dev_priv->chan_vm, 512 * 1024 * 1024, + 12, NV_MEM_ACCESS_RW, + &dev_priv->gart_info.vma); + if (ret) + return ret; + + dev_priv->gart_info.aper_base = dev_priv->gart_info.vma.offset; + dev_priv->gart_info.aper_size = 512 * 1024 * 1024; } - dev_priv->engine.instmem.flush(dev); dev_priv->gart_info.type = NOUVEAU_GART_SGDMA; - dev_priv->gart_info.aper_base = 0; - dev_priv->gart_info.aper_size = aper_size; - dev_priv->gart_info.sg_ctxdma = gpuobj; return 0; } @@ -270,6 +264,7 @@ nouveau_sgdma_takedown(struct drm_device *dev) struct drm_nouveau_private *dev_priv = dev->dev_private; nouveau_gpuobj_ref(NULL, &dev_priv->gart_info.sg_ctxdma); + nouveau_vm_put(&dev_priv->gart_info.vma); } int diff --git a/drivers/gpu/drm/nouveau/nv50_instmem.c b/drivers/gpu/drm/nouveau/nv50_instmem.c index 08202fd682e4..ec102bda8446 100644 --- a/drivers/gpu/drm/nouveau/nv50_instmem.c +++ b/drivers/gpu/drm/nouveau/nv50_instmem.c @@ -131,7 +131,6 @@ nv50_instmem_init(struct drm_device *dev) struct nouveau_channel *chan; struct nouveau_vm *vm; int ret, i; - u64 nongart_o; u32 tmp; priv = kzalloc(sizeof(*priv), GFP_KERNEL); @@ -216,15 +215,10 @@ nv50_instmem_init(struct drm_device *dev) for (i = 0; i < 8; i++) nv_wr32(dev, 0x1900 + (i*4), 0); - /* Create shared channel VM, space is reserved for GART mappings at - * the beginning of this address space, it's managed separately - * because TTM makes life painful + /* Create shared channel VM, space is reserved at the beginning + * to catch "NULL pointer" references */ - dev_priv->vm_gart_base = 0x0020000000ULL; - dev_priv->vm_gart_size = 512 * 1024 * 1024; - nongart_o = dev_priv->vm_gart_base + dev_priv->vm_gart_size; - - ret = nouveau_vm_new(dev, 0, (1ULL << 40), nongart_o, + ret = nouveau_vm_new(dev, 0, (1ULL << 40), 0x0020000000ULL, 29, 12, 16, &dev_priv->chan_vm); if (ret) return ret; -- cgit v1.2.3 From 34cf01bc4b8021cef62cbd79224577c13d01b106 Mon Sep 17 00:00:00 2001 From: Ben Skeggs Date: Mon, 22 Nov 2010 10:48:51 +1000 Subject: drm/nouveau: allow gpuobj vinst to be a virtual address when necessary Signed-off-by: Ben Skeggs --- drivers/gpu/drm/nouveau/nouveau_drv.h | 1 + drivers/gpu/drm/nouveau/nv50_instmem.c | 25 +++++++++++++++++++++++-- 2 files changed, 24 insertions(+), 2 deletions(-) diff --git a/drivers/gpu/drm/nouveau/nouveau_drv.h b/drivers/gpu/drm/nouveau/nouveau_drv.h index 4c5fc9c9912b..2cd87e6b54b7 100644 --- a/drivers/gpu/drm/nouveau/nouveau_drv.h +++ b/drivers/gpu/drm/nouveau/nouveau_drv.h @@ -158,6 +158,7 @@ enum nouveau_flags { #define NVOBJ_FLAG_DONT_MAP (1 << 0) #define NVOBJ_FLAG_ZERO_ALLOC (1 << 1) #define NVOBJ_FLAG_ZERO_FREE (1 << 2) +#define NVOBJ_FLAG_VM (1 << 3) #define NVOBJ_CINST_GLOBAL 0xdeadbeef diff --git a/drivers/gpu/drm/nouveau/nv50_instmem.c b/drivers/gpu/drm/nouveau/nv50_instmem.c index ec102bda8446..e2efd6f22ffb 100644 --- a/drivers/gpu/drm/nouveau/nv50_instmem.c +++ b/drivers/gpu/drm/nouveau/nv50_instmem.c @@ -302,6 +302,7 @@ nv50_instmem_resume(struct drm_device *dev) struct nv50_gpuobj_node { struct nouveau_vram *vram; + struct nouveau_vma chan_vma; u32 align; }; @@ -310,6 +311,7 @@ int nv50_instmem_get(struct nouveau_gpuobj *gpuobj, u32 size, u32 align) { struct drm_device *dev = gpuobj->dev; + struct drm_nouveau_private *dev_priv = dev->dev_private; struct nv50_gpuobj_node *node = NULL; int ret; @@ -328,8 +330,23 @@ nv50_instmem_get(struct nouveau_gpuobj *gpuobj, u32 size, u32 align) } gpuobj->vinst = node->vram->offset; - gpuobj->size = size; - gpuobj->node = node; + + if (gpuobj->flags & NVOBJ_FLAG_VM) { + ret = nouveau_vm_get(dev_priv->chan_vm, size, 12, + NV_MEM_ACCESS_RW | NV_MEM_ACCESS_SYS, + &node->chan_vma); + if (ret) { + nv50_vram_del(dev, &node->vram); + kfree(node); + return ret; + } + + nouveau_vm_map(&node->chan_vma, node->vram); + gpuobj->vinst = node->chan_vma.offset; + } + + gpuobj->size = size; + gpuobj->node = node; return 0; } @@ -342,6 +359,10 @@ nv50_instmem_put(struct nouveau_gpuobj *gpuobj) node = gpuobj->node; gpuobj->node = NULL; + if (node->chan_vma.node) { + nouveau_vm_unmap(&node->chan_vma); + nouveau_vm_put(&node->chan_vma); + } nv50_vram_del(dev, &node->vram); kfree(node); } -- cgit v1.2.3 From 60d2a88ae896ae51c76f8b15c2f4b762d5b00864 Mon Sep 17 00:00:00 2001 From: Ben Skeggs Date: Mon, 6 Dec 2010 15:28:54 +1000 Subject: drm/nouveau: kick vram functions out into an "engine" NVC0 will be able to share some of nv50's paths this way. This also makes it the card-specific vram code responsible for deciding if a given set of tile_flags is valid, rather than duplicating the allowed types in nv50_vram.c and nouveau_gem.c Signed-off-by: Ben Skeggs --- drivers/gpu/drm/nouveau/nouveau_drv.h | 12 +++++++++ drivers/gpu/drm/nouveau/nouveau_gem.c | 30 +++------------------ drivers/gpu/drm/nouveau/nouveau_mem.c | 47 ++++++++++++++++++--------------- drivers/gpu/drm/nouveau/nouveau_mm.h | 1 + drivers/gpu/drm/nouveau/nouveau_state.c | 16 +++++++++++ drivers/gpu/drm/nouveau/nv50_instmem.c | 9 ++++--- drivers/gpu/drm/nouveau/nv50_vram.c | 10 +++++++ 7 files changed, 74 insertions(+), 51 deletions(-) diff --git a/drivers/gpu/drm/nouveau/nouveau_drv.h b/drivers/gpu/drm/nouveau/nouveau_drv.h index 2cd87e6b54b7..f8931b2d129b 100644 --- a/drivers/gpu/drm/nouveau/nouveau_drv.h +++ b/drivers/gpu/drm/nouveau/nouveau_drv.h @@ -501,6 +501,15 @@ struct nouveau_crypt_engine { void (*tlb_flush)(struct drm_device *dev); }; +struct nouveau_vram_engine { + int (*init)(struct drm_device *); + int (*get)(struct drm_device *, u64, u32 align, u32 size_nc, + u32 type, struct nouveau_vram **); + void (*put)(struct drm_device *, struct nouveau_vram **); + + bool (*flags_valid)(struct drm_device *, u32 tile_flags); +}; + struct nouveau_engine { struct nouveau_instmem_engine instmem; struct nouveau_mc_engine mc; @@ -512,6 +521,7 @@ struct nouveau_engine { struct nouveau_gpio_engine gpio; struct nouveau_pm_engine pm; struct nouveau_crypt_engine crypt; + struct nouveau_vram_engine vram; }; struct nouveau_pll_vals { @@ -821,6 +831,8 @@ extern void nouveau_mem_gart_fini(struct drm_device *); extern int nouveau_mem_init_agp(struct drm_device *); extern int nouveau_mem_reset_agp(struct drm_device *); extern void nouveau_mem_close(struct drm_device *); +extern int nouveau_mem_detect(struct drm_device *); +extern bool nouveau_mem_flags_valid(struct drm_device *, u32 tile_flags); extern struct nouveau_tile_reg *nv10_mem_set_tiling( struct drm_device *dev, uint32_t addr, uint32_t size, uint32_t pitch, uint32_t flags); diff --git a/drivers/gpu/drm/nouveau/nouveau_gem.c b/drivers/gpu/drm/nouveau/nouveau_gem.c index 0adb2a85c143..506c508b7eda 100644 --- a/drivers/gpu/drm/nouveau/nouveau_gem.c +++ b/drivers/gpu/drm/nouveau/nouveau_gem.c @@ -103,32 +103,6 @@ nouveau_gem_info(struct drm_gem_object *gem, struct drm_nouveau_gem_info *rep) return 0; } -static bool -nouveau_gem_tile_flags_valid(struct drm_device *dev, uint32_t tile_flags) -{ - struct drm_nouveau_private *dev_priv = dev->dev_private; - - if (dev_priv->card_type >= NV_50) { - switch (tile_flags & NOUVEAU_GEM_TILE_LAYOUT_MASK) { - case 0x0000: - case 0x1800: - case 0x2800: - case 0x4800: - case 0x7000: - case 0x7400: - case 0x7a00: - case 0xe000: - return true; - } - } else { - if (!(tile_flags & NOUVEAU_GEM_TILE_LAYOUT_MASK)) - return true; - } - - NV_ERROR(dev, "bad page flags: 0x%08x\n", tile_flags); - return false; -} - int nouveau_gem_ioctl_new(struct drm_device *dev, void *data, struct drm_file *file_priv) @@ -150,8 +124,10 @@ nouveau_gem_ioctl_new(struct drm_device *dev, void *data, if (!flags || req->info.domain & NOUVEAU_GEM_DOMAIN_CPU) flags |= TTM_PL_FLAG_SYSTEM; - if (!nouveau_gem_tile_flags_valid(dev, req->info.tile_flags)) + if (!dev_priv->engine.vram.flags_valid(dev, req->info.tile_flags)) { + NV_ERROR(dev, "bad page flags: 0x%08x\n", req->info.tile_flags); return -EINVAL; + } if (req->channel_hint) { chan = nouveau_channel_get(dev, file_priv, req->channel_hint); diff --git a/drivers/gpu/drm/nouveau/nouveau_mem.c b/drivers/gpu/drm/nouveau/nouveau_mem.c index 5a1809480388..224181193a1f 100644 --- a/drivers/gpu/drm/nouveau/nouveau_mem.c +++ b/drivers/gpu/drm/nouveau/nouveau_mem.c @@ -241,7 +241,7 @@ nouveau_mem_detect_nforce(struct drm_device *dev) return 0; } -static int +int nouveau_mem_detect(struct drm_device *dev) { struct drm_nouveau_private *dev_priv = dev->dev_private; @@ -255,26 +255,25 @@ nouveau_mem_detect(struct drm_device *dev) if (dev_priv->card_type < NV_50) { dev_priv->vram_size = nv_rd32(dev, NV04_PFB_FIFO_DATA); dev_priv->vram_size &= NV10_PFB_FIFO_DATA_RAM_AMOUNT_MB_MASK; - } else - if (dev_priv->card_type < NV_C0) { - if (nv50_vram_init(dev)) - return -ENOMEM; } else { dev_priv->vram_size = nv_rd32(dev, 0x10f20c) << 20; dev_priv->vram_size *= nv_rd32(dev, 0x121c74); } - NV_INFO(dev, "Detected %dMiB VRAM\n", (int)(dev_priv->vram_size >> 20)); - if (dev_priv->vram_sys_base) { - NV_INFO(dev, "Stolen system memory at: 0x%010llx\n", - dev_priv->vram_sys_base); - } - if (dev_priv->vram_size) return 0; return -ENOMEM; } +bool +nouveau_mem_flags_valid(struct drm_device *dev, u32 tile_flags) +{ + if (!(tile_flags & NOUVEAU_GEM_TILE_LAYOUT_MASK)) + return true; + + return false; +} + #if __OS_HAS_AGP static unsigned long get_agp_mode(struct drm_device *dev, unsigned long mode) @@ -432,11 +431,16 @@ nouveau_mem_vram_init(struct drm_device *dev) else dev_priv->ramin_rsvd_vram = (512 * 1024); - /* initialise gpu-specific vram backend */ - ret = nouveau_mem_detect(dev); + ret = dev_priv->engine.vram.init(dev); if (ret) return ret; + NV_INFO(dev, "Detected %dMiB VRAM\n", (int)(dev_priv->vram_size >> 20)); + if (dev_priv->vram_sys_base) { + NV_INFO(dev, "Stolen system memory at: 0x%010llx\n", + dev_priv->vram_sys_base); + } + dev_priv->fb_available_size = dev_priv->vram_size; dev_priv->fb_mappable_pages = dev_priv->fb_available_size; if (dev_priv->fb_mappable_pages > pci_resource_len(dev->pdev, 1)) @@ -698,9 +702,10 @@ nouveau_vram_manager_del(struct ttm_mem_type_manager *man, struct ttm_mem_reg *mem) { struct drm_nouveau_private *dev_priv = nouveau_bdev(man->bdev); + struct nouveau_vram_engine *vram = &dev_priv->engine.vram; struct drm_device *dev = dev_priv->dev; - nv50_vram_del(dev, (struct nouveau_vram **)&mem->mm_node); + vram->put(dev, (struct nouveau_vram **)&mem->mm_node); } static int @@ -710,30 +715,30 @@ nouveau_vram_manager_new(struct ttm_mem_type_manager *man, struct ttm_mem_reg *mem) { struct drm_nouveau_private *dev_priv = nouveau_bdev(man->bdev); + struct nouveau_vram_engine *vram = &dev_priv->engine.vram; struct drm_device *dev = dev_priv->dev; struct nouveau_bo *nvbo = nouveau_bo(bo); - struct nouveau_vram *vram; + struct nouveau_vram *node; u32 size_nc = 0; int ret; if (nvbo->tile_flags & NOUVEAU_GEM_TILE_NONCONTIG) size_nc = 1 << nvbo->vma.node->type; - ret = nv50_vram_new(dev, mem->num_pages << PAGE_SHIFT, - mem->page_alignment << PAGE_SHIFT, size_nc, - (nvbo->tile_flags >> 8) & 0x7f, &vram); + ret = vram->get(dev, mem->num_pages << PAGE_SHIFT, + mem->page_alignment << PAGE_SHIFT, size_nc, + (nvbo->tile_flags >> 8) & 0xff, &node); if (ret) return ret; - mem->mm_node = vram; - mem->start = vram->offset >> PAGE_SHIFT; + mem->mm_node = node; + mem->start = node->offset >> PAGE_SHIFT; return 0; } void nouveau_vram_manager_debug(struct ttm_mem_type_manager *man, const char *prefix) { - struct ttm_bo_global *glob = man->bdev->glob; struct nouveau_mm *mm = man->priv; struct nouveau_mm_node *r; u64 total = 0, ttotal[3] = {}, tused[3] = {}, tfree[3] = {}; diff --git a/drivers/gpu/drm/nouveau/nouveau_mm.h b/drivers/gpu/drm/nouveau/nouveau_mm.h index 7e8f8bd86d47..250e642de0a7 100644 --- a/drivers/gpu/drm/nouveau/nouveau_mm.h +++ b/drivers/gpu/drm/nouveau/nouveau_mm.h @@ -57,5 +57,6 @@ int nv50_vram_init(struct drm_device *); int nv50_vram_new(struct drm_device *, u64 size, u32 align, u32 size_nc, u32 memtype, struct nouveau_vram **); void nv50_vram_del(struct drm_device *, struct nouveau_vram **); +bool nv50_vram_flags_valid(struct drm_device *, u32 tile_flags); #endif diff --git a/drivers/gpu/drm/nouveau/nouveau_state.c b/drivers/gpu/drm/nouveau/nouveau_state.c index e0811f93243d..8eac943e8fd2 100644 --- a/drivers/gpu/drm/nouveau/nouveau_state.c +++ b/drivers/gpu/drm/nouveau/nouveau_state.c @@ -100,6 +100,8 @@ static int nouveau_init_engine_ptrs(struct drm_device *dev) engine->pm.clock_set = nv04_pm_clock_set; engine->crypt.init = nouveau_stub_init; engine->crypt.takedown = nouveau_stub_takedown; + engine->vram.init = nouveau_mem_detect; + engine->vram.flags_valid = nouveau_mem_flags_valid; break; case 0x10: engine->instmem.init = nv04_instmem_init; @@ -157,6 +159,8 @@ static int nouveau_init_engine_ptrs(struct drm_device *dev) engine->pm.clock_set = nv04_pm_clock_set; engine->crypt.init = nouveau_stub_init; engine->crypt.takedown = nouveau_stub_takedown; + engine->vram.init = nouveau_mem_detect; + engine->vram.flags_valid = nouveau_mem_flags_valid; break; case 0x20: engine->instmem.init = nv04_instmem_init; @@ -214,6 +218,8 @@ static int nouveau_init_engine_ptrs(struct drm_device *dev) engine->pm.clock_set = nv04_pm_clock_set; engine->crypt.init = nouveau_stub_init; engine->crypt.takedown = nouveau_stub_takedown; + engine->vram.init = nouveau_mem_detect; + engine->vram.flags_valid = nouveau_mem_flags_valid; break; case 0x30: engine->instmem.init = nv04_instmem_init; @@ -273,6 +279,8 @@ static int nouveau_init_engine_ptrs(struct drm_device *dev) engine->pm.voltage_set = nouveau_voltage_gpio_set; engine->crypt.init = nouveau_stub_init; engine->crypt.takedown = nouveau_stub_takedown; + engine->vram.init = nouveau_mem_detect; + engine->vram.flags_valid = nouveau_mem_flags_valid; break; case 0x40: case 0x60: @@ -334,6 +342,8 @@ static int nouveau_init_engine_ptrs(struct drm_device *dev) engine->pm.temp_get = nv40_temp_get; engine->crypt.init = nouveau_stub_init; engine->crypt.takedown = nouveau_stub_takedown; + engine->vram.init = nouveau_mem_detect; + engine->vram.flags_valid = nouveau_mem_flags_valid; break; case 0x50: case 0x80: /* gotta love NVIDIA's consistency.. */ @@ -444,6 +454,10 @@ static int nouveau_init_engine_ptrs(struct drm_device *dev) engine->crypt.takedown = nouveau_stub_takedown; break; } + engine->vram.init = nv50_vram_init; + engine->vram.get = nv50_vram_new; + engine->vram.put = nv50_vram_del; + engine->vram.flags_valid = nv50_vram_flags_valid; break; case 0xC0: engine->instmem.init = nvc0_instmem_init; @@ -495,6 +509,8 @@ static int nouveau_init_engine_ptrs(struct drm_device *dev) engine->gpio.irq_enable = nv50_gpio_irq_enable; engine->crypt.init = nouveau_stub_init; engine->crypt.takedown = nouveau_stub_takedown; + engine->vram.init = nouveau_mem_detect; + engine->vram.flags_valid = nouveau_mem_flags_valid; break; default: NV_ERROR(dev, "NV%02x unsupported\n", dev_priv->chipset); diff --git a/drivers/gpu/drm/nouveau/nv50_instmem.c b/drivers/gpu/drm/nouveau/nv50_instmem.c index e2efd6f22ffb..38f30270cb9f 100644 --- a/drivers/gpu/drm/nouveau/nv50_instmem.c +++ b/drivers/gpu/drm/nouveau/nv50_instmem.c @@ -312,6 +312,7 @@ nv50_instmem_get(struct nouveau_gpuobj *gpuobj, u32 size, u32 align) { struct drm_device *dev = gpuobj->dev; struct drm_nouveau_private *dev_priv = dev->dev_private; + struct nouveau_vram_engine *vram = &dev_priv->engine.vram; struct nv50_gpuobj_node *node = NULL; int ret; @@ -323,7 +324,7 @@ nv50_instmem_get(struct nouveau_gpuobj *gpuobj, u32 size, u32 align) size = (size + 4095) & ~4095; align = max(align, (u32)4096); - ret = nv50_vram_new(dev, size, align, 0, 0, &node->vram); + ret = vram->get(dev, size, align, 0, 0, &node->vram); if (ret) { kfree(node); return ret; @@ -336,7 +337,7 @@ nv50_instmem_get(struct nouveau_gpuobj *gpuobj, u32 size, u32 align) NV_MEM_ACCESS_RW | NV_MEM_ACCESS_SYS, &node->chan_vma); if (ret) { - nv50_vram_del(dev, &node->vram); + vram->put(dev, &node->vram); kfree(node); return ret; } @@ -354,6 +355,8 @@ void nv50_instmem_put(struct nouveau_gpuobj *gpuobj) { struct drm_device *dev = gpuobj->dev; + struct drm_nouveau_private *dev_priv = dev->dev_private; + struct nouveau_vram_engine *vram = &dev_priv->engine.vram; struct nv50_gpuobj_node *node; node = gpuobj->node; @@ -363,7 +366,7 @@ nv50_instmem_put(struct nouveau_gpuobj *gpuobj) nouveau_vm_unmap(&node->chan_vma); nouveau_vm_put(&node->chan_vma); } - nv50_vram_del(dev, &node->vram); + vram->put(dev, &node->vram); kfree(node); } diff --git a/drivers/gpu/drm/nouveau/nv50_vram.c b/drivers/gpu/drm/nouveau/nv50_vram.c index 6e753356cd94..47489ed0a5a8 100644 --- a/drivers/gpu/drm/nouveau/nv50_vram.c +++ b/drivers/gpu/drm/nouveau/nv50_vram.c @@ -37,6 +37,16 @@ static int types[0x80] = { 1, 0, 2, 0, 1, 0, 2, 0, 1, 1, 2, 2, 1, 1, 0, 0 }; +bool +nv50_vram_flags_valid(struct drm_device *dev, u32 tile_flags) +{ + int type = (tile_flags & NOUVEAU_GEM_TILE_LAYOUT_MASK) >> 8; + + if (likely(type < sizeof(types) && types[type])) + return true; + return false; +} + void nv50_vram_del(struct drm_device *dev, struct nouveau_vram **pvram) { -- cgit v1.2.3 From fd70b6cd780742b97f525415bf5e4fb24a4bb6d8 Mon Sep 17 00:00:00 2001 From: Francisco Jerez Date: Wed, 8 Dec 2010 02:37:12 +0100 Subject: drm/nv04-nv40: Fix up PCI(E) GART DMA object bus address calculation. Signed-off-by: Francisco Jerez Signed-off-by: Ben Skeggs --- drivers/gpu/drm/nouveau/nouveau_drv.h | 4 ++-- drivers/gpu/drm/nouveau/nouveau_object.c | 8 ++------ drivers/gpu/drm/nouveau/nouveau_sgdma.c | 16 ++++++---------- 3 files changed, 10 insertions(+), 18 deletions(-) diff --git a/drivers/gpu/drm/nouveau/nouveau_drv.h b/drivers/gpu/drm/nouveau/nouveau_drv.h index f8931b2d129b..8f13906185b2 100644 --- a/drivers/gpu/drm/nouveau/nouveau_drv.h +++ b/drivers/gpu/drm/nouveau/nouveau_drv.h @@ -934,8 +934,8 @@ extern void nouveau_irq_uninstall(struct drm_device *); /* nouveau_sgdma.c */ extern int nouveau_sgdma_init(struct drm_device *); extern void nouveau_sgdma_takedown(struct drm_device *); -extern int nouveau_sgdma_get_page(struct drm_device *, uint32_t offset, - uint32_t *page); +extern uint32_t nouveau_sgdma_get_physical(struct drm_device *, + uint32_t offset); extern struct ttm_backend *nouveau_sgdma_init_ttm(struct drm_device *); /* nouveau_debugfs.c */ diff --git a/drivers/gpu/drm/nouveau/nouveau_object.c b/drivers/gpu/drm/nouveau/nouveau_object.c index d1bed40dc449..55c9fdcfa67f 100644 --- a/drivers/gpu/drm/nouveau/nouveau_object.c +++ b/drivers/gpu/drm/nouveau/nouveau_object.c @@ -478,7 +478,7 @@ nouveau_gpuobj_dma_new(struct nouveau_channel *chan, int class, u64 base, struct drm_nouveau_private *dev_priv = chan->dev->dev_private; struct drm_device *dev = chan->dev; struct nouveau_gpuobj *obj; - u32 page_addr, flags0, flags2; + u32 flags0, flags2; int ret; if (dev_priv->card_type >= NV_50) { @@ -495,12 +495,8 @@ nouveau_gpuobj_dma_new(struct nouveau_channel *chan, int class, u64 base, base += dev_priv->gart_info.aper_base; } else if (base != 0) { - ret = nouveau_sgdma_get_page(dev, base, &page_addr); - if (ret) - return ret; - + base = nouveau_sgdma_get_physical(dev, base); target = NV_MEM_TARGET_PCI; - base = page_addr; } else { nouveau_gpuobj_ref(dev_priv->gart_info.sg_ctxdma, pobj); return 0; diff --git a/drivers/gpu/drm/nouveau/nouveau_sgdma.c b/drivers/gpu/drm/nouveau/nouveau_sgdma.c index b57201ab538e..9a250eb53098 100644 --- a/drivers/gpu/drm/nouveau/nouveau_sgdma.c +++ b/drivers/gpu/drm/nouveau/nouveau_sgdma.c @@ -267,19 +267,15 @@ nouveau_sgdma_takedown(struct drm_device *dev) nouveau_vm_put(&dev_priv->gart_info.vma); } -int -nouveau_sgdma_get_page(struct drm_device *dev, uint32_t offset, uint32_t *page) +uint32_t +nouveau_sgdma_get_physical(struct drm_device *dev, uint32_t offset) { struct drm_nouveau_private *dev_priv = dev->dev_private; struct nouveau_gpuobj *gpuobj = dev_priv->gart_info.sg_ctxdma; - int pte; + int pte = (offset >> NV_CTXDMA_PAGE_SHIFT) + 2; - pte = (offset >> NV_CTXDMA_PAGE_SHIFT) << 2; - if (dev_priv->card_type < NV_50) { - *page = nv_ro32(gpuobj, (pte + 8)) & ~NV_CTXDMA_PAGE_MASK; - return 0; - } + BUG_ON(dev_priv->card_type >= NV_50); - NV_ERROR(dev, "Unimplemented on NV50\n"); - return -EINVAL; + return (nv_ro32(gpuobj, 4 * pte) & ~NV_CTXDMA_PAGE_MASK) | + (offset & NV_CTXDMA_PAGE_MASK); } -- cgit v1.2.3 From ff7ea4c04012e01a9a50c5e42dabdaf0794734ce Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Wed, 8 Dec 2010 09:43:41 +0000 Subject: drm/i915: Re-arm the idle timers if the device is still busy Don't post a downclocking task if the device is still active when the idle timer fires. A pathological process could queue up several seconds worth of processing and then go to sleep, during which time the idle timer would kick in and downclock the GPU. Signed-off-by: Chris Wilson --- drivers/gpu/drm/i915/intel_display.c | 18 ++++++++++++++++-- 1 file changed, 16 insertions(+), 2 deletions(-) diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c index 1ccf2ad7cd88..6d4faff8121b 100644 --- a/drivers/gpu/drm/i915/intel_display.c +++ b/drivers/gpu/drm/i915/intel_display.c @@ -4758,8 +4758,14 @@ static void intel_gpu_idle_timer(unsigned long arg) struct drm_device *dev = (struct drm_device *)arg; drm_i915_private_t *dev_priv = dev->dev_private; - dev_priv->busy = false; + if (!list_empty(&dev_priv->mm.active_list)) { + /* Still processing requests, so just re-arm the timer. */ + mod_timer(&dev_priv->idle_timer, jiffies + + msecs_to_jiffies(GPU_IDLE_TIMEOUT)); + return; + } + dev_priv->busy = false; queue_work(dev_priv->wq, &dev_priv->idle_work); } @@ -4770,9 +4776,17 @@ static void intel_crtc_idle_timer(unsigned long arg) struct intel_crtc *intel_crtc = (struct intel_crtc *)arg; struct drm_crtc *crtc = &intel_crtc->base; drm_i915_private_t *dev_priv = crtc->dev->dev_private; + struct intel_framebuffer *intel_fb; - intel_crtc->busy = false; + intel_fb = to_intel_framebuffer(crtc->fb); + if (intel_fb && intel_fb->obj->active) { + /* The framebuffer is still being accessed by the GPU. */ + mod_timer(&intel_crtc->idle_timer, jiffies + + msecs_to_jiffies(CRTC_IDLE_TIMEOUT)); + return; + } + intel_crtc->busy = false; queue_work(dev_priv->wq, &dev_priv->idle_work); } -- cgit v1.2.3 From 67731b87e9572801c41f8fe779750babdd362416 Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Wed, 8 Dec 2010 10:38:14 +0000 Subject: drm/i915: Eliminate drm_gem_object_lookup during relocation As we provide a list of all objects that will be accessed from the batchbuffer, we can build a lut of the handles associated with those objects for this invocation and use that to avoid the overhead of looking up those objects again for every relocation. The cost of building and searching a small hash table is much less than that of acquiring a spinlock, searching a radix tree and manipulating an atomic refcnt per relocation. Signed-off-by: Chris Wilson --- drivers/gpu/drm/i915/i915_drv.h | 6 ++ drivers/gpu/drm/i915/i915_gem_execbuffer.c | 150 +++++++++++++++++++++++------ 2 files changed, 129 insertions(+), 27 deletions(-) diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index d9b54a27ccf8..b8a55008a1b5 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h @@ -788,6 +788,12 @@ struct drm_i915_gem_object { struct scatterlist *sg_list; int num_sg; + /** + * Used for performing relocations during execbuffer insertion. + */ + struct hlist_node exec_node; + unsigned long exec_handle; + /** * Current offset of the object in GTT space. * diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c index 1b2ceacd64f0..3305ae528de4 100644 --- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c +++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c @@ -207,9 +207,67 @@ i915_gem_object_set_to_gpu_domain(struct drm_i915_gem_object *obj, cd->flush_rings |= ring->id; } +struct eb_objects { + int and; + struct hlist_head buckets[0]; +}; + +static struct eb_objects * +eb_create(int size) +{ + struct eb_objects *eb; + int count = PAGE_SIZE / sizeof(struct hlist_head) / 2; + while (count > size) + count >>= 1; + eb = kzalloc(count*sizeof(struct hlist_head) + + sizeof(struct eb_objects), + GFP_KERNEL); + if (eb == NULL) + return eb; + + eb->and = count - 1; + return eb; +} + +static void +eb_reset(struct eb_objects *eb) +{ + memset(eb->buckets, 0, (eb->and+1)*sizeof(struct hlist_head)); +} + +static void +eb_add_object(struct eb_objects *eb, struct drm_i915_gem_object *obj) +{ + hlist_add_head(&obj->exec_node, + &eb->buckets[obj->exec_handle & eb->and]); +} + +static struct drm_i915_gem_object * +eb_get_object(struct eb_objects *eb, unsigned long handle) +{ + struct hlist_head *head; + struct hlist_node *node; + struct drm_i915_gem_object *obj; + + head = &eb->buckets[handle & eb->and]; + hlist_for_each(node, head) { + obj = hlist_entry(node, struct drm_i915_gem_object, exec_node); + if (obj->exec_handle == handle) + return obj; + } + + return NULL; +} + +static void +eb_destroy(struct eb_objects *eb) +{ + kfree(eb); +} + static int i915_gem_execbuffer_relocate_entry(struct drm_i915_gem_object *obj, - struct drm_file *file_priv, + struct eb_objects *eb, struct drm_i915_gem_exec_object2 *entry, struct drm_i915_gem_relocation_entry *reloc) { @@ -218,9 +276,9 @@ i915_gem_execbuffer_relocate_entry(struct drm_i915_gem_object *obj, uint32_t target_offset; int ret = -EINVAL; - target_obj = drm_gem_object_lookup(dev, file_priv, - reloc->target_handle); - if (target_obj == NULL) + /* we've already hold a reference to all valid objects */ + target_obj = &eb_get_object(eb, reloc->target_handle)->base; + if (unlikely(target_obj == NULL)) return -ENOENT; target_offset = to_intel_bo(target_obj)->gtt_offset; @@ -246,7 +304,7 @@ i915_gem_execbuffer_relocate_entry(struct drm_i915_gem_object *obj, if (target_offset == 0) { DRM_ERROR("No GTT space found for object %d\n", reloc->target_handle); - goto err; + return ret; } /* Validate that the target is in a valid r/w GPU domain */ @@ -258,7 +316,7 @@ i915_gem_execbuffer_relocate_entry(struct drm_i915_gem_object *obj, (int) reloc->offset, reloc->read_domains, reloc->write_domain); - goto err; + return ret; } if (reloc->write_domain & I915_GEM_DOMAIN_CPU || reloc->read_domains & I915_GEM_DOMAIN_CPU) { @@ -269,7 +327,7 @@ i915_gem_execbuffer_relocate_entry(struct drm_i915_gem_object *obj, (int) reloc->offset, reloc->read_domains, reloc->write_domain); - goto err; + return ret; } if (reloc->write_domain && target_obj->pending_write_domain && reloc->write_domain != target_obj->pending_write_domain) { @@ -280,7 +338,7 @@ i915_gem_execbuffer_relocate_entry(struct drm_i915_gem_object *obj, (int) reloc->offset, reloc->write_domain, target_obj->pending_write_domain); - goto err; + return ret; } target_obj->pending_read_domains |= reloc->read_domains; @@ -290,7 +348,7 @@ i915_gem_execbuffer_relocate_entry(struct drm_i915_gem_object *obj, * more work needs to be done. */ if (target_offset == reloc->presumed_offset) - goto out; + return 0; /* Check that the relocation address is valid... */ if (reloc->offset > obj->base.size - 4) { @@ -299,14 +357,14 @@ i915_gem_execbuffer_relocate_entry(struct drm_i915_gem_object *obj, obj, reloc->target_handle, (int) reloc->offset, (int) obj->base.size); - goto err; + return ret; } if (reloc->offset & 3) { DRM_ERROR("Relocation not 4-byte aligned: " "obj %p target %d offset %d.\n", obj, reloc->target_handle, (int) reloc->offset); - goto err; + return ret; } /* and points to somewhere within the target object. */ @@ -316,7 +374,7 @@ i915_gem_execbuffer_relocate_entry(struct drm_i915_gem_object *obj, obj, reloc->target_handle, (int) reloc->delta, (int) target_obj->size); - goto err; + return ret; } reloc->delta += target_offset; @@ -334,7 +392,7 @@ i915_gem_execbuffer_relocate_entry(struct drm_i915_gem_object *obj, ret = i915_gem_object_set_to_gtt_domain(obj, 1); if (ret) - goto err; + return ret; /* Map the page containing the relocation we're going to perform. */ reloc->offset += obj->gtt_offset; @@ -349,16 +407,12 @@ i915_gem_execbuffer_relocate_entry(struct drm_i915_gem_object *obj, /* and update the user's relocation entry */ reloc->presumed_offset = target_offset; -out: - ret = 0; -err: - drm_gem_object_unreference(target_obj); - return ret; + return 0; } static int i915_gem_execbuffer_relocate_object(struct drm_i915_gem_object *obj, - struct drm_file *file_priv, + struct eb_objects *eb, struct drm_i915_gem_exec_object2 *entry) { struct drm_i915_gem_relocation_entry __user *user_relocs; @@ -373,7 +427,7 @@ i915_gem_execbuffer_relocate_object(struct drm_i915_gem_object *obj, sizeof(reloc))) return -EFAULT; - ret = i915_gem_execbuffer_relocate_entry(obj, file_priv, entry, &reloc); + ret = i915_gem_execbuffer_relocate_entry(obj, eb, entry, &reloc); if (ret) return ret; @@ -388,14 +442,14 @@ i915_gem_execbuffer_relocate_object(struct drm_i915_gem_object *obj, static int i915_gem_execbuffer_relocate_object_slow(struct drm_i915_gem_object *obj, - struct drm_file *file_priv, + struct eb_objects *eb, struct drm_i915_gem_exec_object2 *entry, struct drm_i915_gem_relocation_entry *relocs) { int i, ret; for (i = 0; i < entry->relocation_count; i++) { - ret = i915_gem_execbuffer_relocate_entry(obj, file_priv, entry, &relocs[i]); + ret = i915_gem_execbuffer_relocate_entry(obj, eb, entry, &relocs[i]); if (ret) return ret; } @@ -405,7 +459,7 @@ i915_gem_execbuffer_relocate_object_slow(struct drm_i915_gem_object *obj, static int i915_gem_execbuffer_relocate(struct drm_device *dev, - struct drm_file *file, + struct eb_objects *eb, struct list_head *objects, struct drm_i915_gem_exec_object2 *exec) { @@ -415,7 +469,7 @@ i915_gem_execbuffer_relocate(struct drm_device *dev, list_for_each_entry(obj, objects, exec_list) { obj->base.pending_read_domains = 0; obj->base.pending_write_domain = 0; - ret = i915_gem_execbuffer_relocate_object(obj, file, exec++); + ret = i915_gem_execbuffer_relocate_object(obj, eb, exec++); if (ret) return ret; } @@ -560,6 +614,7 @@ i915_gem_execbuffer_relocate_slow(struct drm_device *dev, struct drm_file *file, struct intel_ring_buffer *ring, struct list_head *objects, + struct eb_objects *eb, struct drm_i915_gem_exec_object2 *exec, int count) { @@ -567,6 +622,15 @@ i915_gem_execbuffer_relocate_slow(struct drm_device *dev, struct drm_i915_gem_object *obj; int i, total, ret; + /* We may process another execbuffer during the unlock... */ + while (list_empty(objects)) { + obj = list_first_entry(objects, + struct drm_i915_gem_object, + exec_list); + list_del_init(&obj->exec_list); + drm_gem_object_unreference(&obj->base); + } + mutex_unlock(&dev->struct_mutex); total = 0; @@ -601,6 +665,26 @@ i915_gem_execbuffer_relocate_slow(struct drm_device *dev, goto err; } + /* reacquire the objects */ + INIT_LIST_HEAD(objects); + eb_reset(eb); + for (i = 0; i < count; i++) { + struct drm_i915_gem_object *obj; + + obj = to_intel_bo(drm_gem_object_lookup(dev, file, + exec[i].handle)); + if (obj == NULL) { + DRM_ERROR("Invalid object handle %d at index %d\n", + exec[i].handle, i); + ret = -ENOENT; + goto err; + } + + list_add_tail(&obj->exec_list, objects); + obj->exec_handle = exec[i].handle; + eb_add_object(eb, obj); + } + ret = i915_gem_execbuffer_reserve(ring, file, objects, exec); if (ret) goto err; @@ -609,7 +693,7 @@ i915_gem_execbuffer_relocate_slow(struct drm_device *dev, list_for_each_entry(obj, objects, exec_list) { obj->base.pending_read_domains = 0; obj->base.pending_write_domain = 0; - ret = i915_gem_execbuffer_relocate_object_slow(obj, file, + ret = i915_gem_execbuffer_relocate_object_slow(obj, eb, exec, reloc + total); if (ret) @@ -868,6 +952,7 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data, { drm_i915_private_t *dev_priv = dev->dev_private; struct list_head objects; + struct eb_objects *eb; struct drm_i915_gem_object *batch_obj; struct drm_clip_rect *cliprects = NULL; struct intel_ring_buffer *ring; @@ -950,6 +1035,13 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data, goto pre_mutex_err; } + eb = eb_create(args->buffer_count); + if (eb == NULL) { + mutex_unlock(&dev->struct_mutex); + ret = -ENOMEM; + goto pre_mutex_err; + } + /* Look up object handles */ INIT_LIST_HEAD(&objects); for (i = 0; i < args->buffer_count; i++) { @@ -973,6 +1065,8 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data, } list_add_tail(&obj->exec_list, &objects); + obj->exec_handle = exec[i].handle; + eb_add_object(eb, obj); } /* Move the objects en-masse into the GTT, evicting if necessary. */ @@ -981,11 +1075,12 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data, goto err; /* The objects are in their final locations, apply the relocations. */ - ret = i915_gem_execbuffer_relocate(dev, file, &objects, exec); + ret = i915_gem_execbuffer_relocate(dev, eb, &objects, exec); if (ret) { if (ret == -EFAULT) { ret = i915_gem_execbuffer_relocate_slow(dev, file, ring, - &objects, exec, + &objects, eb, + exec, args->buffer_count); BUG_ON(!mutex_is_locked(&dev->struct_mutex)); } @@ -1051,6 +1146,7 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data, i915_gem_execbuffer_retire_commands(dev, file, ring); err: + eb_destroy(eb); while (!list_empty(&objects)) { struct drm_i915_gem_object *obj; -- cgit v1.2.3 From b8f7ab1788f23d79084f051bb9ae3cb02b55bff3 Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Wed, 8 Dec 2010 10:43:06 +0000 Subject: drm/i915: Mark the user reloc error paths as unlikely Signed-off-by: Chris Wilson --- drivers/gpu/drm/i915/i915_gem_execbuffer.c | 17 ++++++++--------- 1 file changed, 8 insertions(+), 9 deletions(-) diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c index 3305ae528de4..fda0dc858a1f 100644 --- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c +++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c @@ -301,14 +301,14 @@ i915_gem_execbuffer_relocate_entry(struct drm_i915_gem_object *obj, /* The target buffer should have appeared before us in the * exec_object list, so it should have a GTT space bound by now. */ - if (target_offset == 0) { + if (unlikely(target_offset == 0)) { DRM_ERROR("No GTT space found for object %d\n", reloc->target_handle); return ret; } /* Validate that the target is in a valid r/w GPU domain */ - if (reloc->write_domain & (reloc->write_domain - 1)) { + if (unlikely(reloc->write_domain & (reloc->write_domain - 1))) { DRM_ERROR("reloc with multiple write domains: " "obj %p target %d offset %d " "read %08x write %08x", @@ -318,8 +318,7 @@ i915_gem_execbuffer_relocate_entry(struct drm_i915_gem_object *obj, reloc->write_domain); return ret; } - if (reloc->write_domain & I915_GEM_DOMAIN_CPU || - reloc->read_domains & I915_GEM_DOMAIN_CPU) { + if (unlikely((reloc->write_domain | reloc->read_domains) & I915_GEM_DOMAIN_CPU)) { DRM_ERROR("reloc with read/write CPU domains: " "obj %p target %d offset %d " "read %08x write %08x", @@ -329,8 +328,8 @@ i915_gem_execbuffer_relocate_entry(struct drm_i915_gem_object *obj, reloc->write_domain); return ret; } - if (reloc->write_domain && target_obj->pending_write_domain && - reloc->write_domain != target_obj->pending_write_domain) { + if (unlikely(reloc->write_domain && target_obj->pending_write_domain && + reloc->write_domain != target_obj->pending_write_domain)) { DRM_ERROR("Write domain conflict: " "obj %p target %d offset %d " "new %08x old %08x\n", @@ -351,7 +350,7 @@ i915_gem_execbuffer_relocate_entry(struct drm_i915_gem_object *obj, return 0; /* Check that the relocation address is valid... */ - if (reloc->offset > obj->base.size - 4) { + if (unlikely(reloc->offset > obj->base.size - 4)) { DRM_ERROR("Relocation beyond object bounds: " "obj %p target %d offset %d size %d.\n", obj, reloc->target_handle, @@ -359,7 +358,7 @@ i915_gem_execbuffer_relocate_entry(struct drm_i915_gem_object *obj, (int) obj->base.size); return ret; } - if (reloc->offset & 3) { + if (unlikely(reloc->offset & 3)) { DRM_ERROR("Relocation not 4-byte aligned: " "obj %p target %d offset %d.\n", obj, reloc->target_handle, @@ -368,7 +367,7 @@ i915_gem_execbuffer_relocate_entry(struct drm_i915_gem_object *obj, } /* and points to somewhere within the target object. */ - if (reloc->delta >= target_obj->size) { + if (unlikely(reloc->delta >= target_obj->size)) { DRM_ERROR("Relocation beyond target object bounds: " "obj %p target %d delta %d size %d.\n", obj, reloc->target_handle, -- cgit v1.2.3 From 4a19d02e0a8cd8799e5d150d8eb74861e1a4cdec Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Wed, 8 Dec 2010 13:31:49 +0000 Subject: drm/i915: driver.suspend and .resume are always set So we can remove the repeated initialisation. Signed-off-by: Chris Wilson --- drivers/gpu/drm/i915/i915_drv.c | 5 ----- 1 file changed, 5 deletions(-) diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c index 91a3ad2cf942..ad28b21f4d03 100644 --- a/drivers/gpu/drm/i915/i915_drv.c +++ b/drivers/gpu/drm/i915/i915_drv.c @@ -700,11 +700,6 @@ static int __init i915_init(void) driver.driver_features &= ~DRIVER_MODESET; #endif - if (!(driver.driver_features & DRIVER_MODESET)) { - driver.suspend = i915_suspend; - driver.resume = i915_resume; - } - return drm_init(&driver); } -- cgit v1.2.3 From d1c3b177b9940541e89015a726ac279caf1a21f3 Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Wed, 8 Dec 2010 14:26:19 +0000 Subject: drm/i915: Restore GTT mapping first upon resume As suggested by Daniel Vetter, this is a safeguard should any of the registers cause reference to PTE entries. Signed-off-by: Chris Wilson --- drivers/gpu/drm/i915/i915_drv.c | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c index ad28b21f4d03..2be344a8e93b 100644 --- a/drivers/gpu/drm/i915/i915_drv.c +++ b/drivers/gpu/drm/i915/i915_drv.c @@ -304,13 +304,18 @@ static int i915_drm_thaw(struct drm_device *dev) struct drm_i915_private *dev_priv = dev->dev_private; int error = 0; + if (drm_core_check_feature(dev, DRIVER_MODESET)) { + mutex_lock(&dev->struct_mutex); + i915_gem_restore_gtt_mappings(dev); + mutex_unlock(&dev->struct_mutex); + } + i915_restore_state(dev); intel_opregion_setup(dev); /* KMS EnterVT equivalent */ if (drm_core_check_feature(dev, DRIVER_MODESET)) { mutex_lock(&dev->struct_mutex); - i915_gem_restore_gtt_mappings(dev); dev_priv->mm.suspended = 0; error = i915_gem_init_ringbuffer(dev); -- cgit v1.2.3 From a8e93126a6f10d0a14ba8407ec112b1b3a5e2e97 Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Wed, 8 Dec 2010 14:28:54 +0000 Subject: drm/i915/gtt: Clear the cachelines upon resume Required for my pineview system to not barf after resuming. Signed-off-by: Chris Wilson --- drivers/gpu/drm/i915/i915_gem_gtt.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c index 71c2b0f3747b..86673e77d7cb 100644 --- a/drivers/gpu/drm/i915/i915_gem_gtt.c +++ b/drivers/gpu/drm/i915/i915_gem_gtt.c @@ -35,6 +35,8 @@ void i915_gem_restore_gtt_mappings(struct drm_device *dev) struct drm_i915_gem_object *obj; list_for_each_entry(obj, &dev_priv->mm.gtt_list, gtt_list) { + i915_gem_clflush_object(obj); + if (dev_priv->mm.gtt->needs_dmar) { BUG_ON(!obj->sg_list); @@ -51,7 +53,6 @@ void i915_gem_restore_gtt_mappings(struct drm_device *dev) obj->agp_type); } - /* Be paranoid and flush the chipset cache. */ intel_gtt_chipset_flush(); } -- cgit v1.2.3 From eb43f4af7ecb7d51ba44f5e96bf74eedf1c27d62 Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Wed, 8 Dec 2010 17:32:24 +0000 Subject: drm/i915: Terminate the FORCE WAKE after we have finished reading Once we have read the value out of the GT power well, we need to remove the FORCE WAKE bit to allow the system to auto-power down. Signed-off-by: Chris Wilson --- drivers/gpu/drm/i915/i915_drv.c | 22 ++++++++++++++++++++++ drivers/gpu/drm/i915/i915_drv.h | 21 ++++++++++++--------- drivers/gpu/drm/i915/i915_reg.h | 1 + 3 files changed, 35 insertions(+), 9 deletions(-) diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c index 2be344a8e93b..5f20cd988612 100644 --- a/drivers/gpu/drm/i915/i915_drv.c +++ b/drivers/gpu/drm/i915/i915_drv.c @@ -244,6 +244,28 @@ void intel_detect_pch (struct drm_device *dev) } } +void __gen6_force_wake_get(struct drm_i915_private *dev_priv) +{ + int count; + + count = 0; + while (count++ < 50 && (I915_READ_NOTRACE(FORCEWAKE_ACK) & 1)) + udelay(10); + + I915_WRITE_NOTRACE(FORCEWAKE, 1); + POSTING_READ(FORCEWAKE); + + count = 0; + while (count++ < 50 && (I915_READ_NOTRACE(FORCEWAKE_ACK) & 1) == 0) + udelay(10); +} + +void __gen6_force_wake_put(struct drm_i915_private *dev_priv) +{ + I915_WRITE_NOTRACE(FORCEWAKE, 0); + POSTING_READ(FORCEWAKE); +} + static int i915_drm_freeze(struct drm_device *dev) { struct drm_i915_private *dev_priv = dev->dev_private; diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index b8a55008a1b5..30780f2cab6f 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h @@ -1341,17 +1341,20 @@ __i915_write(64, q) * must be set to prevent GT core from power down and stale values being * returned. */ +void __gen6_force_wake_get(struct drm_i915_private *dev_priv); +void __gen6_force_wake_put (struct drm_i915_private *dev_priv); static inline u32 i915_safe_read(struct drm_i915_private *dev_priv, u32 reg) { - if (IS_GEN6(dev_priv->dev)) { - I915_WRITE_NOTRACE(FORCEWAKE, 1); - POSTING_READ(FORCEWAKE); - /* XXX How long do we really need to wait here? - * Will different registers/engines require different periods? - */ - udelay(100); - } - return I915_READ(reg); + u32 val; + + if (dev_priv->info->gen >= 6) { + __gen6_force_wake_get(dev_priv); + val = I915_READ(reg); + __gen6_force_wake_put(dev_priv); + } else + val = I915_READ(reg); + + return val; } static inline void diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h index 3e03094cf148..2a36e05f1b85 100644 --- a/drivers/gpu/drm/i915/i915_reg.h +++ b/drivers/gpu/drm/i915/i915_reg.h @@ -3115,4 +3115,5 @@ #define EDP_LINK_TRAIN_VOL_EMP_MASK_SNB (0x3f<<22) #define FORCEWAKE 0xA18C +#define FORCEWAKE_ACK 0x130090 #endif /* _I915_REG_H_ */ -- cgit v1.2.3 From 8fd2685911cb6c140e6d0588ac04990ce65d4537 Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Wed, 8 Dec 2010 18:40:43 +0000 Subject: drm/i915: Enable RC6 autodownclocking on Sandybridge Signed-off-by: Chris Wilson --- drivers/gpu/drm/i915/i915_reg.h | 59 ++++++++++++++++++++++++ drivers/gpu/drm/i915/intel_display.c | 89 ++++++++++++++++++++++++++++++++++++ 2 files changed, 148 insertions(+) diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h index 2a36e05f1b85..b284c13fd632 100644 --- a/drivers/gpu/drm/i915/i915_reg.h +++ b/drivers/gpu/drm/i915/i915_reg.h @@ -283,6 +283,7 @@ #define RING_CTL(base) ((base)+0x3c) #define RING_SYNC_0(base) ((base)+0x40) #define RING_SYNC_1(base) ((base)+0x44) +#define RING_MAX_IDLE(base) ((base)+0x54) #define RING_HWS_PGA(base) ((base)+0x80) #define RING_HWS_PGA_GEN6(base) ((base)+0x2080) #define RING_ACTHD(base) ((base)+0x74) @@ -3116,4 +3117,62 @@ #define FORCEWAKE 0xA18C #define FORCEWAKE_ACK 0x130090 + +#define GEN6_RC_NORMAL_FREQ 0xA008 +#define GEN6_TURBO_DISABLE (1<<31) +#define GEN6_FREQUENCY(x) ((x)<<25) +#define GEN6_OFFSET(x) ((x)<<19) +#define GEN6_AGGRESSIVE_TURBO (0<<15) +#define GEN6_RC_VIDEO_FREQ 0xA00C +#define GEN6_RC_CONTROL 0xA090 +#define GEN6_RC_CTL_RC6pp_ENABLE (1<<16) +#define GEN6_RC_CTL_RC6p_ENABLE (1<<17) +#define GEN6_RC_CTL_RC6_ENABLE (1<<18) +#define GEN6_RC_CTL_RC1e_ENABLE (1<<20) +#define GEN6_RC_CTL_RC7_ENABLE (1<<22) +#define GEN6_RC_CTL_EI_MODE(x) ((x)<<27) +#define GEN6_RC_CTL_HW_ENABLE (1<<31) +#define GEN6_RP_DOWN_TIMEOUT 0xA010 +#define GEN6_RP_INTERRUPT_LIMITS 0xA014 +#define GEN6_RP_CONTROL 0xA024 +#define GEN6_RP_MEDIA_TURBO (1<<11) +#define GEN6_RP_USE_NORMAL_FREQ (1<<9) +#define GEN6_RP_MEDIA_IS_GFX (1<<8) +#define GEN6_RP_ENABLE (1<<7) +#define GEN6_RP_UP_BUSY_MAX (0x2<<3) +#define GEN6_RP_DOWN_BUSY_MIN (0x2<<0) +#define GEN6_RP_UP_THRESHOLD 0xA02C +#define GEN6_RP_DOWN_THRESHOLD 0xA030 +#define GEN6_RP_UP_EI 0xA068 +#define GEN6_RP_DOWN_EI 0xA06C +#define GEN6_RP_IDLE_HYSTERSIS 0xA070 +#define GEN6_RC_STATE 0xA094 +#define GEN6_RC1_WAKE_RATE_LIMIT 0xA098 +#define GEN6_RC6_WAKE_RATE_LIMIT 0xA09C +#define GEN6_RC6pp_WAKE_RATE_LIMIT 0xA0A0 +#define GEN6_RC_EVALUATION_INTERVAL 0xA0A8 +#define GEN6_RC_IDLE_HYSTERSIS 0xA0AC +#define GEN6_RC_SLEEP 0xA0B0 +#define GEN6_RC1e_THRESHOLD 0xA0B4 +#define GEN6_RC6_THRESHOLD 0xA0B8 +#define GEN6_RC6p_THRESHOLD 0xA0BC +#define GEN6_RC6pp_THRESHOLD 0xA0C0 + +#define GEN6_PMISR 0x44020 +#define GEN6_PMIMR 0x44024 +#define GEN6_PMIIR 0x44028 +#define GEN6_PMIER 0x4402C +#define GEN6_PM_MBOX_EVENT (1<<25) +#define GEN6_PM_THERMAL_EVENT (1<<24) +#define GEN6_PM_RP_DOWN_TIMEOUT (1<<6) +#define GEN6_PM_RP_UP_THRESHOLD (1<<5) +#define GEN6_PM_RP_DOWN_THRESHOLD (1<<4) +#define GEN6_PM_RP_UP_EI_EXPIRED (1<<2) +#define GEN6_PM_RP_DOWN_EI_EXPIRED (1<<1) + +#define GEN6_PCODE_MAILBOX 0x138124 +#define GEN6_PCODE_READY (1<<31) +#define GEN6_PCODE_WRITE_MIN_FREQ_TABLE 0x9 +#define GEN6_PCODE_DATA 0x138128 + #endif /* _I915_REG_H_ */ diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c index 6d4faff8121b..17c213fef0ec 100644 --- a/drivers/gpu/drm/i915/intel_display.c +++ b/drivers/gpu/drm/i915/intel_display.c @@ -5842,6 +5842,91 @@ void intel_init_emon(struct drm_device *dev) dev_priv->corr = (lcfuse & LCFUSE_HIV_MASK); } +static void gen6_enable_rc6(struct drm_i915_private *dev_priv) +{ + int i; + + /* Here begins a magic sequence of register writes to enable + * auto-downclocking. + * + * Perhaps there might be some value in exposing these to + * userspace... + */ + I915_WRITE(GEN6_RC_STATE, 0); + __gen6_force_wake_get(dev_priv); + + /* disable the counters and set determistic thresholds */ + I915_WRITE(GEN6_RC_CONTROL, 0); + + I915_WRITE(GEN6_RC1_WAKE_RATE_LIMIT, 1000 << 16); + I915_WRITE(GEN6_RC6_WAKE_RATE_LIMIT, 40 << 16 | 30); + I915_WRITE(GEN6_RC6pp_WAKE_RATE_LIMIT, 30); + I915_WRITE(GEN6_RC_EVALUATION_INTERVAL, 125000); + I915_WRITE(GEN6_RC_IDLE_HYSTERSIS, 25); + + for (i = 0; i < I915_NUM_RINGS; i++) + I915_WRITE(RING_MAX_IDLE(dev_priv->ring[i].mmio_base), 10); + + I915_WRITE(GEN6_RC_SLEEP, 0); + I915_WRITE(GEN6_RC1e_THRESHOLD, 1000); + I915_WRITE(GEN6_RC6_THRESHOLD, 50000); + I915_WRITE(GEN6_RC6p_THRESHOLD, 100000); + I915_WRITE(GEN6_RC6pp_THRESHOLD, 64000); /* unused */ + + I915_WRITE(GEN6_RC_CONTROL, + GEN6_RC_CTL_RC6p_ENABLE | + GEN6_RC_CTL_RC6_ENABLE | + GEN6_RC_CTL_HW_ENABLE); + + I915_WRITE(GEN6_RC_NORMAL_FREQ, + GEN6_FREQUENCY(10) | + GEN6_OFFSET(0) | + GEN6_AGGRESSIVE_TURBO); + I915_WRITE(GEN6_RC_VIDEO_FREQ, + GEN6_FREQUENCY(12)); + + I915_WRITE(GEN6_RP_DOWN_TIMEOUT, 1000000); + I915_WRITE(GEN6_RP_INTERRUPT_LIMITS, + 18 << 24 | + 6 << 16); + I915_WRITE(GEN6_RP_UP_THRESHOLD, 90000); + I915_WRITE(GEN6_RP_DOWN_THRESHOLD, 100000); + I915_WRITE(GEN6_RP_UP_EI, 100000); + I915_WRITE(GEN6_RP_DOWN_EI, 300000); + I915_WRITE(GEN6_RP_IDLE_HYSTERSIS, 10); + I915_WRITE(GEN6_RP_CONTROL, + GEN6_RP_MEDIA_TURBO | + GEN6_RP_USE_NORMAL_FREQ | + GEN6_RP_MEDIA_IS_GFX | + GEN6_RP_ENABLE | + GEN6_RP_UP_BUSY_MAX | + GEN6_RP_DOWN_BUSY_MIN); + + if (wait_for((I915_READ(GEN6_PCODE_MAILBOX) & GEN6_PCODE_READY) == 0, + 500)) + DRM_ERROR("timeout waiting for pcode mailbox to become idle\n"); + + I915_WRITE(GEN6_PCODE_DATA, 0); + I915_WRITE(GEN6_PCODE_MAILBOX, + GEN6_PCODE_READY | + GEN6_PCODE_WRITE_MIN_FREQ_TABLE); + if (wait_for((I915_READ(GEN6_PCODE_MAILBOX) & GEN6_PCODE_READY) == 0, + 500)) + DRM_ERROR("timeout waiting for pcode mailbox to finish\n"); + + /* requires MSI enabled */ + I915_WRITE(GEN6_PMIER, + GEN6_PM_MBOX_EVENT | + GEN6_PM_THERMAL_EVENT | + GEN6_PM_RP_DOWN_TIMEOUT | + GEN6_PM_RP_UP_THRESHOLD | + GEN6_PM_RP_DOWN_THRESHOLD | + GEN6_PM_RP_UP_EI_EXPIRED | + GEN6_PM_RP_DOWN_EI_EXPIRED); + + __gen6_force_wake_put(dev_priv); +} + void intel_enable_clock_gating(struct drm_device *dev) { struct drm_i915_private *dev_priv = dev->dev_private; @@ -5924,6 +6009,7 @@ void intel_enable_clock_gating(struct drm_device *dev) _3D_CHICKEN2_WM_READ_PIPELINED << 16 | _3D_CHICKEN2_WM_READ_PIPELINED); } + } else if (IS_G4X(dev)) { uint32_t dspclk_gate; I915_WRITE(RENCLK_GATE_D1, 0); @@ -5997,6 +6083,9 @@ void intel_enable_clock_gating(struct drm_device *dev) I915_READ(MCHBAR_RENDER_STANDBY) & ~RCX_SW_EXIT); } } + + if (IS_GEN6(dev)) + gen6_enable_rc6(dev_priv); } void intel_disable_clock_gating(struct drm_device *dev) -- cgit v1.2.3 From b13c2b96bf15b9dd0f1a45fd788f3a3025c5aec6 Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Mon, 13 Dec 2010 16:54:50 +0000 Subject: drm/i915/ringbuffer: Make IRQ refcnting atomic In order to enforce the correct memory barriers for irq get/put, we need to perform the actual counting using atomic operations. Signed-off-by: Chris Wilson --- drivers/gpu/drm/i915/i915_gem.c | 37 ++++++++++++++------------- drivers/gpu/drm/i915/i915_irq.c | 17 +++++++------ drivers/gpu/drm/i915/intel_ringbuffer.c | 44 +++++++++++++++++++-------------- drivers/gpu/drm/i915/intel_ringbuffer.h | 4 +-- 4 files changed, 56 insertions(+), 46 deletions(-) diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index 27fa2a1b26a5..726c2ccd674c 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c @@ -2000,17 +2000,19 @@ i915_do_wait_request(struct drm_device *dev, uint32_t seqno, trace_i915_gem_request_wait_begin(dev, seqno); ring->waiting_seqno = seqno; - ring->irq_get(ring); - if (interruptible) - ret = wait_event_interruptible(ring->irq_queue, - i915_seqno_passed(ring->get_seqno(ring), seqno) - || atomic_read(&dev_priv->mm.wedged)); - else - wait_event(ring->irq_queue, - i915_seqno_passed(ring->get_seqno(ring), seqno) - || atomic_read(&dev_priv->mm.wedged)); + ret = -ENODEV; + if (ring->irq_get(ring)) { + if (interruptible) + ret = wait_event_interruptible(ring->irq_queue, + i915_seqno_passed(ring->get_seqno(ring), seqno) + || atomic_read(&dev_priv->mm.wedged)); + else + wait_event(ring->irq_queue, + i915_seqno_passed(ring->get_seqno(ring), seqno) + || atomic_read(&dev_priv->mm.wedged)); - ring->irq_put(ring); + ring->irq_put(ring); + } ring->waiting_seqno = 0; trace_i915_gem_request_wait_end(dev, seqno); @@ -3157,14 +3159,15 @@ i915_gem_ring_throttle(struct drm_device *dev, struct drm_file *file) * generation is designed to be run atomically and so is * lockless. */ - ring->irq_get(ring); - ret = wait_event_interruptible(ring->irq_queue, - i915_seqno_passed(ring->get_seqno(ring), seqno) - || atomic_read(&dev_priv->mm.wedged)); - ring->irq_put(ring); + if (ring->irq_get(ring)) { + ret = wait_event_interruptible(ring->irq_queue, + i915_seqno_passed(ring->get_seqno(ring), seqno) + || atomic_read(&dev_priv->mm.wedged)); + ring->irq_put(ring); - if (ret == 0 && atomic_read(&dev_priv->mm.wedged)) - ret = -EIO; + if (ret == 0 && atomic_read(&dev_priv->mm.wedged)) + ret = -EIO; + } } if (ret == 0) diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c index 02e4dd82f754..2ddb98b5c90f 100644 --- a/drivers/gpu/drm/i915/i915_irq.c +++ b/drivers/gpu/drm/i915/i915_irq.c @@ -1186,10 +1186,9 @@ void i915_trace_irq_get(struct drm_device *dev, u32 seqno) drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; struct intel_ring_buffer *ring = LP_RING(dev_priv); - if (dev_priv->trace_irq_seqno == 0) - ring->irq_get(ring); - - dev_priv->trace_irq_seqno = seqno; + if (dev_priv->trace_irq_seqno == 0 && + ring->irq_get(ring)) + dev_priv->trace_irq_seqno = seqno; } static int i915_wait_irq(struct drm_device * dev, int irq_nr) @@ -1211,10 +1210,12 @@ static int i915_wait_irq(struct drm_device * dev, int irq_nr) if (master_priv->sarea_priv) master_priv->sarea_priv->perf_boxes |= I915_BOX_WAIT; - ring->irq_get(ring); - DRM_WAIT_ON(ret, ring->irq_queue, 3 * DRM_HZ, - READ_BREADCRUMB(dev_priv) >= irq_nr); - ring->irq_put(ring); + ret = -ENODEV; + if (ring->irq_get(ring)) { + DRM_WAIT_ON(ret, ring->irq_queue, 3 * DRM_HZ, + READ_BREADCRUMB(dev_priv) >= irq_nr); + ring->irq_put(ring); + } if (ret == -EBUSY) { DRM_ERROR("EBUSY -- rec: %d emitted: %d\n", diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c index 74b99718a1fb..a3fd993e0de0 100644 --- a/drivers/gpu/drm/i915/intel_ringbuffer.c +++ b/drivers/gpu/drm/i915/intel_ringbuffer.c @@ -327,25 +327,28 @@ ring_get_seqno(struct intel_ring_buffer *ring) return intel_read_status_page(ring, I915_GEM_HWS_INDEX); } -static void +static bool render_ring_get_irq(struct intel_ring_buffer *ring) { struct drm_device *dev = ring->dev; - if (dev->irq_enabled && ++ring->irq_refcount == 1) { + if (!dev->irq_enabled) + return false; + + if (atomic_inc_return(&ring->irq_refcount) == 1) { drm_i915_private_t *dev_priv = dev->dev_private; unsigned long irqflags; spin_lock_irqsave(&dev_priv->irq_lock, irqflags); - if (HAS_PCH_SPLIT(dev)) ironlake_enable_graphics_irq(dev_priv, GT_USER_INTERRUPT); else i915_enable_irq(dev_priv, I915_USER_INTERRUPT); - spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); } + + return true; } static void @@ -353,8 +356,7 @@ render_ring_put_irq(struct intel_ring_buffer *ring) { struct drm_device *dev = ring->dev; - BUG_ON(dev->irq_enabled && ring->irq_refcount == 0); - if (dev->irq_enabled && --ring->irq_refcount == 0) { + if (atomic_dec_and_test(&ring->irq_refcount)) { drm_i915_private_t *dev_priv = dev->dev_private; unsigned long irqflags; @@ -417,12 +419,15 @@ ring_add_request(struct intel_ring_buffer *ring, return 0; } -static void +static bool ring_get_irq(struct intel_ring_buffer *ring, u32 flag) { struct drm_device *dev = ring->dev; - if (dev->irq_enabled && ++ring->irq_refcount == 1) { + if (!dev->irq_enabled) + return false; + + if (atomic_inc_return(&ring->irq_refcount) == 1) { drm_i915_private_t *dev_priv = dev->dev_private; unsigned long irqflags; @@ -430,6 +435,8 @@ ring_get_irq(struct intel_ring_buffer *ring, u32 flag) ironlake_enable_graphics_irq(dev_priv, flag); spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); } + + return true; } static void @@ -437,7 +444,7 @@ ring_put_irq(struct intel_ring_buffer *ring, u32 flag) { struct drm_device *dev = ring->dev; - if (dev->irq_enabled && --ring->irq_refcount == 0) { + if (atomic_dec_and_test(&ring->irq_refcount)) { drm_i915_private_t *dev_priv = dev->dev_private; unsigned long irqflags; @@ -447,16 +454,15 @@ ring_put_irq(struct intel_ring_buffer *ring, u32 flag) } } - -static void +static bool bsd_ring_get_irq(struct intel_ring_buffer *ring) { - ring_get_irq(ring, GT_BSD_USER_INTERRUPT); + return ring_get_irq(ring, GT_BSD_USER_INTERRUPT); } static void bsd_ring_put_irq(struct intel_ring_buffer *ring) { - ring_put_irq(ring, GT_BSD_USER_INTERRUPT); + ring_put_irq(ring, GT_BSD_USER_INTERRUPT); } static int @@ -846,16 +852,16 @@ gen6_ring_dispatch_execbuffer(struct intel_ring_buffer *ring, return 0; } -static void +static bool gen6_bsd_ring_get_irq(struct intel_ring_buffer *ring) { - ring_get_irq(ring, GT_GEN6_BSD_USER_INTERRUPT); + return ring_get_irq(ring, GT_GEN6_BSD_USER_INTERRUPT); } static void gen6_bsd_ring_put_irq(struct intel_ring_buffer *ring) { - ring_put_irq(ring, GT_GEN6_BSD_USER_INTERRUPT); + ring_put_irq(ring, GT_GEN6_BSD_USER_INTERRUPT); } /* ring buffer for Video Codec for Gen6+ */ @@ -876,16 +882,16 @@ static const struct intel_ring_buffer gen6_bsd_ring = { /* Blitter support (SandyBridge+) */ -static void +static bool blt_ring_get_irq(struct intel_ring_buffer *ring) { - ring_get_irq(ring, GT_BLT_USER_INTERRUPT); + return ring_get_irq(ring, GT_BLT_USER_INTERRUPT); } static void blt_ring_put_irq(struct intel_ring_buffer *ring) { - ring_put_irq(ring, GT_BLT_USER_INTERRUPT); + ring_put_irq(ring, GT_BLT_USER_INTERRUPT); } diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.h b/drivers/gpu/drm/i915/intel_ringbuffer.h index 9652e4600b5e..8e2e357ad6ee 100644 --- a/drivers/gpu/drm/i915/intel_ringbuffer.h +++ b/drivers/gpu/drm/i915/intel_ringbuffer.h @@ -54,8 +54,8 @@ struct intel_ring_buffer { u32 irq_seqno; /* last seq seem at irq time */ u32 waiting_seqno; u32 sync_seqno[I915_NUM_RINGS-1]; - u32 irq_refcount; - void (*irq_get)(struct intel_ring_buffer *ring); + atomic_t irq_refcount; + bool __must_check (*irq_get)(struct intel_ring_buffer *ring); void (*irq_put)(struct intel_ring_buffer *ring); int (*init)(struct intel_ring_buffer *ring); -- cgit v1.2.3 From b5ba177d8d71f011c23b1cabec99fdaddae65c4d Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Tue, 14 Dec 2010 12:17:15 +0000 Subject: drm/i915: Poll for seqno completion if IRQ is disabled Bugzilla: https://bugs.freedesktop.org/show_bug.cgi?id=32288 Signed-off-by: Chris Wilson --- drivers/gpu/drm/i915/i915_gem.c | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index 726c2ccd674c..5a0fbe59dd5b 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c @@ -2000,7 +2000,6 @@ i915_do_wait_request(struct drm_device *dev, uint32_t seqno, trace_i915_gem_request_wait_begin(dev, seqno); ring->waiting_seqno = seqno; - ret = -ENODEV; if (ring->irq_get(ring)) { if (interruptible) ret = wait_event_interruptible(ring->irq_queue, @@ -2012,7 +2011,10 @@ i915_do_wait_request(struct drm_device *dev, uint32_t seqno, || atomic_read(&dev_priv->mm.wedged)); ring->irq_put(ring); - } + } else if (wait_for(i915_seqno_passed(ring->get_seqno(ring), + seqno) || + atomic_read(&dev_priv->mm.wedged), 3000)) + ret = -EBUSY; ring->waiting_seqno = 0; trace_i915_gem_request_wait_end(dev, seqno); -- cgit v1.2.3 From 1b894b59247728b02d6363d458088cf438f5ec92 Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Tue, 14 Dec 2010 20:04:54 +0000 Subject: drm/i915: Pass clock limits down to PLL matcher As we already know the limits for the hardware clock, pass it down rather than recomputing them for each match. Signed-off-by: Chris Wilson --- drivers/gpu/drm/i915/intel_display.c | 34 ++++++++++++++++------------------ 1 file changed, 16 insertions(+), 18 deletions(-) diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c index 17c213fef0ec..f2d50141e0c2 100644 --- a/drivers/gpu/drm/i915/intel_display.c +++ b/drivers/gpu/drm/i915/intel_display.c @@ -642,26 +642,23 @@ static const intel_limit_t intel_limits_ironlake_display_port = { .find_pll = intel_find_pll_ironlake_dp, }; -static const intel_limit_t *intel_ironlake_limit(struct drm_crtc *crtc) +static const intel_limit_t *intel_ironlake_limit(struct drm_crtc *crtc, + int refclk) { struct drm_device *dev = crtc->dev; struct drm_i915_private *dev_priv = dev->dev_private; const intel_limit_t *limit; - int refclk = 120; if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) { - if (dev_priv->lvds_use_ssc && dev_priv->lvds_ssc_freq == 100) - refclk = 100; - if ((I915_READ(PCH_LVDS) & LVDS_CLKB_POWER_MASK) == LVDS_CLKB_POWER_UP) { /* LVDS dual channel */ - if (refclk == 100) + if (refclk == 100000) limit = &intel_limits_ironlake_dual_lvds_100m; else limit = &intel_limits_ironlake_dual_lvds; } else { - if (refclk == 100) + if (refclk == 100000) limit = &intel_limits_ironlake_single_lvds_100m; else limit = &intel_limits_ironlake_single_lvds; @@ -702,13 +699,13 @@ static const intel_limit_t *intel_g4x_limit(struct drm_crtc *crtc) return limit; } -static const intel_limit_t *intel_limit(struct drm_crtc *crtc) +static const intel_limit_t *intel_limit(struct drm_crtc *crtc, int refclk) { struct drm_device *dev = crtc->dev; const intel_limit_t *limit; if (HAS_PCH_SPLIT(dev)) - limit = intel_ironlake_limit(crtc); + limit = intel_ironlake_limit(crtc, refclk); else if (IS_G4X(dev)) { limit = intel_g4x_limit(crtc); } else if (IS_PINEVIEW(dev)) { @@ -773,11 +770,10 @@ bool intel_pipe_has_type(struct drm_crtc *crtc, int type) * the given connectors. */ -static bool intel_PLL_is_valid(struct drm_crtc *crtc, intel_clock_t *clock) +static bool intel_PLL_is_valid(struct drm_device *dev, + const intel_limit_t *limit, + const intel_clock_t *clock) { - const intel_limit_t *limit = intel_limit (crtc); - struct drm_device *dev = crtc->dev; - if (clock->p1 < limit->p1.min || limit->p1.max < clock->p1) INTELPllInvalid ("p1 out of range\n"); if (clock->p < limit->p.min || limit->p.max < clock->p) @@ -849,8 +845,8 @@ intel_find_best_PLL(const intel_limit_t *limit, struct drm_crtc *crtc, int this_err; intel_clock(dev, refclk, &clock); - - if (!intel_PLL_is_valid(crtc, &clock)) + if (!intel_PLL_is_valid(dev, limit, + &clock)) continue; this_err = abs(clock.dot - target); @@ -912,9 +908,11 @@ intel_g4x_find_best_PLL(const intel_limit_t *limit, struct drm_crtc *crtc, int this_err; intel_clock(dev, refclk, &clock); - if (!intel_PLL_is_valid(crtc, &clock)) + if (!intel_PLL_is_valid(dev, limit, + &clock)) continue; - this_err = abs(clock.dot - target) ; + + this_err = abs(clock.dot - target); if (this_err < err_most) { *best_clock = clock; err_most = this_err; @@ -3655,7 +3653,7 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc, * refclk, or FALSE. The returned values represent the clock equation: * reflck * (5 * (m1 + 2) + (m2 + 2)) / (n + 2) / p1 / p2. */ - limit = intel_limit(crtc); + limit = intel_limit(crtc, refclk); ok = limit->find_pll(limit, crtc, adjusted_mode->clock, refclk, &clock); if (!ok) { DRM_ERROR("Couldn't find PLL settings for mode!\n"); -- cgit v1.2.3 From c6df541c00e53a4fdff7a130d4365f848075adcc Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Wed, 15 Dec 2010 09:56:50 +0000 Subject: Revert "drm/i915: Avoid using PIPE_CONTROL on Ironlake" Restore PIPE_CONTROL once again just for Ironlake, as it appears that MI_USER_INTERRUPT does not have the same coherency guarantees, that is on Ironlake the interrupt following a GPU write is not guaranteed to arrive after the write is coherent from the CPU, as it does on the other generations. Reported-by: Zhenyu Wang Reported-by: Shuang He Bugzilla: https://bugs.freedesktop.org/show_bug.cgi?id=32402 Signed-off-by: Chris Wilson --- drivers/gpu/drm/i915/i915_irq.c | 3 +- drivers/gpu/drm/i915/intel_ringbuffer.c | 162 +++++++++++++++++++++++++++++++- 2 files changed, 162 insertions(+), 3 deletions(-) diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c index 2ddb98b5c90f..e4a2e2c3dbe3 100644 --- a/drivers/gpu/drm/i915/i915_irq.c +++ b/drivers/gpu/drm/i915/i915_irq.c @@ -349,7 +349,7 @@ static irqreturn_t ironlake_irq_handler(struct drm_device *dev) READ_BREADCRUMB(dev_priv); } - if (gt_iir & GT_USER_INTERRUPT) + if (gt_iir & (GT_USER_INTERRUPT | GT_PIPE_NOTIFY)) notify_ring(dev, &dev_priv->ring[RCS]); if (gt_iir & bsd_usr_interrupt) notify_ring(dev, &dev_priv->ring[VCS]); @@ -1558,6 +1558,7 @@ static int ironlake_irq_postinstall(struct drm_device *dev) else render_irqs = GT_USER_INTERRUPT | + GT_PIPE_NOTIFY | GT_BSD_USER_INTERRUPT; I915_WRITE(GTIER, render_irqs); POSTING_READ(GTIER); diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c index a3fd993e0de0..56bc95c056dd 100644 --- a/drivers/gpu/drm/i915/intel_ringbuffer.c +++ b/drivers/gpu/drm/i915/intel_ringbuffer.c @@ -209,6 +209,78 @@ static int init_ring_common(struct intel_ring_buffer *ring) return 0; } +/* + * 965+ support PIPE_CONTROL commands, which provide finer grained control + * over cache flushing. + */ +struct pipe_control { + struct drm_i915_gem_object *obj; + volatile u32 *cpu_page; + u32 gtt_offset; +}; + +static int +init_pipe_control(struct intel_ring_buffer *ring) +{ + struct pipe_control *pc; + struct drm_i915_gem_object *obj; + int ret; + + if (ring->private) + return 0; + + pc = kmalloc(sizeof(*pc), GFP_KERNEL); + if (!pc) + return -ENOMEM; + + obj = i915_gem_alloc_object(ring->dev, 4096); + if (obj == NULL) { + DRM_ERROR("Failed to allocate seqno page\n"); + ret = -ENOMEM; + goto err; + } + obj->agp_type = AGP_USER_CACHED_MEMORY; + + ret = i915_gem_object_pin(obj, 4096, true); + if (ret) + goto err_unref; + + pc->gtt_offset = obj->gtt_offset; + pc->cpu_page = kmap(obj->pages[0]); + if (pc->cpu_page == NULL) + goto err_unpin; + + pc->obj = obj; + ring->private = pc; + return 0; + +err_unpin: + i915_gem_object_unpin(obj); +err_unref: + drm_gem_object_unreference(&obj->base); +err: + kfree(pc); + return ret; +} + +static void +cleanup_pipe_control(struct intel_ring_buffer *ring) +{ + struct pipe_control *pc = ring->private; + struct drm_i915_gem_object *obj; + + if (!ring->private) + return; + + obj = pc->obj; + kunmap(obj->pages[0]); + i915_gem_object_unpin(obj); + drm_gem_object_unreference(&obj->base); + + kfree(pc); + ring->private = NULL; +} + static int init_render_ring(struct intel_ring_buffer *ring) { struct drm_device *dev = ring->dev; @@ -222,9 +294,24 @@ static int init_render_ring(struct intel_ring_buffer *ring) I915_WRITE(MI_MODE, mode); } + if (INTEL_INFO(dev)->gen >= 6) { + } else if (IS_GEN5(dev)) { + ret = init_pipe_control(ring); + if (ret) + return ret; + } + return ret; } +static void render_ring_cleanup(struct intel_ring_buffer *ring) +{ + if (!ring->private) + return; + + cleanup_pipe_control(ring); +} + static void update_semaphore(struct intel_ring_buffer *ring, int i, u32 seqno) { @@ -299,6 +386,65 @@ intel_ring_sync(struct intel_ring_buffer *ring, return 0; } +#define PIPE_CONTROL_FLUSH(ring__, addr__) \ +do { \ + intel_ring_emit(ring__, GFX_OP_PIPE_CONTROL | PIPE_CONTROL_QW_WRITE | \ + PIPE_CONTROL_DEPTH_STALL | 2); \ + intel_ring_emit(ring__, (addr__) | PIPE_CONTROL_GLOBAL_GTT); \ + intel_ring_emit(ring__, 0); \ + intel_ring_emit(ring__, 0); \ +} while (0) + +static int +pc_render_add_request(struct intel_ring_buffer *ring, + u32 *result) +{ + struct drm_device *dev = ring->dev; + u32 seqno = i915_gem_get_seqno(dev); + struct pipe_control *pc = ring->private; + u32 scratch_addr = pc->gtt_offset + 128; + int ret; + + /* For Ironlake, MI_USER_INTERRUPT was deprecated and apparently + * incoherent with writes to memory, i.e. completely fubar, + * so we need to use PIPE_NOTIFY instead. + * + * However, we also need to workaround the qword write + * incoherence by flushing the 6 PIPE_NOTIFY buffers out to + * memory before requesting an interrupt. + */ + ret = intel_ring_begin(ring, 32); + if (ret) + return ret; + + intel_ring_emit(ring, GFX_OP_PIPE_CONTROL | PIPE_CONTROL_QW_WRITE | + PIPE_CONTROL_WC_FLUSH | PIPE_CONTROL_TC_FLUSH); + intel_ring_emit(ring, pc->gtt_offset | PIPE_CONTROL_GLOBAL_GTT); + intel_ring_emit(ring, seqno); + intel_ring_emit(ring, 0); + PIPE_CONTROL_FLUSH(ring, scratch_addr); + scratch_addr += 128; /* write to separate cachelines */ + PIPE_CONTROL_FLUSH(ring, scratch_addr); + scratch_addr += 128; + PIPE_CONTROL_FLUSH(ring, scratch_addr); + scratch_addr += 128; + PIPE_CONTROL_FLUSH(ring, scratch_addr); + scratch_addr += 128; + PIPE_CONTROL_FLUSH(ring, scratch_addr); + scratch_addr += 128; + PIPE_CONTROL_FLUSH(ring, scratch_addr); + intel_ring_emit(ring, GFX_OP_PIPE_CONTROL | PIPE_CONTROL_QW_WRITE | + PIPE_CONTROL_WC_FLUSH | PIPE_CONTROL_TC_FLUSH | + PIPE_CONTROL_NOTIFY); + intel_ring_emit(ring, pc->gtt_offset | PIPE_CONTROL_GLOBAL_GTT); + intel_ring_emit(ring, seqno); + intel_ring_emit(ring, 0); + intel_ring_advance(ring); + + *result = seqno; + return 0; +} + static int render_ring_add_request(struct intel_ring_buffer *ring, u32 *result) @@ -327,6 +473,13 @@ ring_get_seqno(struct intel_ring_buffer *ring) return intel_read_status_page(ring, I915_GEM_HWS_INDEX); } +static u32 +pc_render_get_seqno(struct intel_ring_buffer *ring) +{ + struct pipe_control *pc = ring->private; + return pc->cpu_page[0]; +} + static bool render_ring_get_irq(struct intel_ring_buffer *ring) { @@ -342,7 +495,7 @@ render_ring_get_irq(struct intel_ring_buffer *ring) spin_lock_irqsave(&dev_priv->irq_lock, irqflags); if (HAS_PCH_SPLIT(dev)) ironlake_enable_graphics_irq(dev_priv, - GT_USER_INTERRUPT); + GT_PIPE_NOTIFY | GT_USER_INTERRUPT); else i915_enable_irq(dev_priv, I915_USER_INTERRUPT); spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); @@ -363,7 +516,8 @@ render_ring_put_irq(struct intel_ring_buffer *ring) spin_lock_irqsave(&dev_priv->irq_lock, irqflags); if (HAS_PCH_SPLIT(dev)) ironlake_disable_graphics_irq(dev_priv, - GT_USER_INTERRUPT); + GT_USER_INTERRUPT | + GT_PIPE_NOTIFY); else i915_disable_irq(dev_priv, I915_USER_INTERRUPT); spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); @@ -776,6 +930,7 @@ static const struct intel_ring_buffer render_ring = { .irq_get = render_ring_get_irq, .irq_put = render_ring_put_irq, .dispatch_execbuffer = render_ring_dispatch_execbuffer, + .cleanup = render_ring_cleanup, }; /* ring buffer for bit-stream decoder */ @@ -1010,6 +1165,9 @@ int intel_init_render_ring_buffer(struct drm_device *dev) *ring = render_ring; if (INTEL_INFO(dev)->gen >= 6) { ring->add_request = gen6_add_request; + } else if (IS_GEN5(dev)) { + ring->add_request = pc_render_add_request; + ring->get_seqno = pc_render_get_seqno; } if (!I915_NEED_GFX_HWS(dev)) { -- cgit v1.2.3 From b7f1de289c50beb4998611ba5373e539efd0f79f Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Tue, 14 Dec 2010 16:09:31 +0000 Subject: drm/i915: Wait for vblank before unpinning old fb Be paranoid and ensure that the vblank has passed and the scanout has switched to the new fb, before unpinning the old one and possibly tearing down its PTEs. Signed-off-by: Chris Wilson --- drivers/gpu/drm/i915/intel_display.c | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c index f2d50141e0c2..840bfc3ae719 100644 --- a/drivers/gpu/drm/i915/intel_display.c +++ b/drivers/gpu/drm/i915/intel_display.c @@ -1626,8 +1626,10 @@ intel_pipe_set_base(struct drm_crtc *crtc, int x, int y, return ret; } - if (old_fb) + if (old_fb) { + intel_wait_for_vblank(dev, intel_crtc->pipe); i915_gem_object_unpin(to_intel_framebuffer(old_fb)->obj); + } mutex_unlock(&dev->struct_mutex); -- cgit v1.2.3 From 1398261a2e84c537c409259cfe9db3d0abcd9f99 Mon Sep 17 00:00:00 2001 From: Yuanhan Liu Date: Wed, 15 Dec 2010 15:42:31 +0800 Subject: drm/i915: Add self-refresh support on Sandybridge Add the support of memory self-refresh on Sandybridge, which is now support 3 levels of watermarks and the source of the latency values for watermarks has changed. On Sandybridge, the LP0 WM value is not hardcoded any more. All the latency value is now should be extracted from MCHBAR SSKPD register. And the MCHBAR base address is changed, too. For the WM values, if any calculated watermark values is larger than the maximum value that can be programmed into the associated watermark register, that watermark must be disabled. Signed-off-by: Yuanhan Liu [ickle: remove duplicate compute routines and fixup for checkpatch] Signed-off-by: Chris Wilson --- drivers/gpu/drm/i915/i915_debugfs.c | 2 +- drivers/gpu/drm/i915/i915_reg.h | 40 +++++ drivers/gpu/drm/i915/intel_display.c | 310 +++++++++++++++++++++++++++++++++-- 3 files changed, 334 insertions(+), 18 deletions(-) diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c index 22821994b35a..864e75d762e6 100644 --- a/drivers/gpu/drm/i915/i915_debugfs.c +++ b/drivers/gpu/drm/i915/i915_debugfs.c @@ -933,7 +933,7 @@ static int i915_sr_status(struct seq_file *m, void *unused) drm_i915_private_t *dev_priv = dev->dev_private; bool sr_enabled = false; - if (IS_GEN5(dev)) + if (HAS_PCH_SPLIT(dev)) sr_enabled = I915_READ(WM1_LP_ILK) & WM1_LP_SR_EN; else if (IS_CRESTLINE(dev) || IS_I945G(dev) || IS_I945GM(dev)) sr_enabled = I915_READ(FW_BLC_SELF) & FW_BLC_SELF_EN; diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h index b284c13fd632..61ef98db9ff0 100644 --- a/drivers/gpu/drm/i915/i915_reg.h +++ b/drivers/gpu/drm/i915/i915_reg.h @@ -596,6 +596,8 @@ #define ILK_DISPLAY_CHICKEN1 0x42000 #define ILK_FBCQ_DIS (1<<22) +#define ILK_PABSTRETCH_DIS (1<<21) + /* * GPIO regs @@ -955,6 +957,8 @@ */ #define MCHBAR_MIRROR_BASE 0x10000 +#define MCHBAR_MIRROR_BASE_SNB 0x140000 + /** 915-945 and GM965 MCH register controlling DRAM channel access */ #define DCC 0x10200 #define DCC_ADDRESSING_MODE_SINGLE_CHANNEL (0 << 0) @@ -2346,6 +2350,40 @@ #define ILK_FIFO_LINE_SIZE 64 +/* define the WM info on Sandybridge */ +#define SNB_DISPLAY_FIFO 128 +#define SNB_DISPLAY_MAXWM 0x7f /* bit 16:22 */ +#define SNB_DISPLAY_DFTWM 8 +#define SNB_CURSOR_FIFO 32 +#define SNB_CURSOR_MAXWM 0x1f /* bit 4:0 */ +#define SNB_CURSOR_DFTWM 8 + +#define SNB_DISPLAY_SR_FIFO 512 +#define SNB_DISPLAY_MAX_SRWM 0x1ff /* bit 16:8 */ +#define SNB_DISPLAY_DFT_SRWM 0x3f +#define SNB_CURSOR_SR_FIFO 64 +#define SNB_CURSOR_MAX_SRWM 0x3f /* bit 5:0 */ +#define SNB_CURSOR_DFT_SRWM 8 + +#define SNB_FBC_MAX_SRWM 0xf /* bit 23:20 */ + +#define SNB_FIFO_LINE_SIZE 64 + + +/* the address where we get all kinds of latency value */ +#define SSKPD 0x5d10 +#define SSKPD_WM_MASK 0x3f +#define SSKPD_WM0_SHIFT 0 +#define SSKPD_WM1_SHIFT 8 +#define SSKPD_WM2_SHIFT 16 +#define SSKPD_WM3_SHIFT 24 + +#define SNB_LATENCY(shift) (I915_READ(MCHBAR_MIRROR_BASE_SNB + SSKPD) >> (shift) & SSKPD_WM_MASK) +#define SNB_READ_WM0_LATENCY() SNB_LATENCY(SSKPD_WM0_SHIFT) +#define SNB_READ_WM1_LATENCY() SNB_LATENCY(SSKPD_WM1_SHIFT) +#define SNB_READ_WM2_LATENCY() SNB_LATENCY(SSKPD_WM2_SHIFT) +#define SNB_READ_WM3_LATENCY() SNB_LATENCY(SSKPD_WM3_SHIFT) + /* * The two pipe frame counter registers are not synchronized, so * reading a stable value is somewhat tricky. The following code @@ -2651,6 +2689,8 @@ #define ILK_VSDPFD_FULL (1<<21) #define ILK_DSPCLK_GATE 0x42020 #define ILK_DPARB_CLK_GATE (1<<5) +#define ILK_DPFD_CLK_GATE (1<<7) + /* According to spec this bit 7/8/9 of 0x42020 should be set to enable FBC */ #define ILK_CLK_FBC (1<<7) #define ILK_DPFC_DIS1 (1<<8) diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c index 840bfc3ae719..eaf2bc6b537d 100644 --- a/drivers/gpu/drm/i915/intel_display.c +++ b/drivers/gpu/drm/i915/intel_display.c @@ -2845,6 +2845,39 @@ static struct intel_watermark_params ironlake_cursor_srwm_info = { ILK_FIFO_LINE_SIZE }; +static struct intel_watermark_params sandybridge_display_wm_info = { + SNB_DISPLAY_FIFO, + SNB_DISPLAY_MAXWM, + SNB_DISPLAY_DFTWM, + 2, + SNB_FIFO_LINE_SIZE +}; + +static struct intel_watermark_params sandybridge_cursor_wm_info = { + SNB_CURSOR_FIFO, + SNB_CURSOR_MAXWM, + SNB_CURSOR_DFTWM, + 2, + SNB_FIFO_LINE_SIZE +}; + +static struct intel_watermark_params sandybridge_display_srwm_info = { + SNB_DISPLAY_SR_FIFO, + SNB_DISPLAY_MAX_SRWM, + SNB_DISPLAY_DFT_SRWM, + 2, + SNB_FIFO_LINE_SIZE +}; + +static struct intel_watermark_params sandybridge_cursor_srwm_info = { + SNB_CURSOR_SR_FIFO, + SNB_CURSOR_MAX_SRWM, + SNB_CURSOR_DFT_SRWM, + 2, + SNB_FIFO_LINE_SIZE +}; + + /** * intel_calculate_wm - calculate watermark level * @clock_in_khz: pixel clock @@ -3378,6 +3411,10 @@ static void i830_update_wm(struct drm_device *dev, int planea_clock, int unused, static bool ironlake_compute_wm0(struct drm_device *dev, int pipe, + const struct intel_watermark_params *display, + int display_latency, + const struct intel_watermark_params *cursor, + int cursor_latency, int *plane_wm, int *cursor_wm) { @@ -3395,22 +3432,20 @@ static bool ironlake_compute_wm0(struct drm_device *dev, pixel_size = crtc->fb->bits_per_pixel / 8; /* Use the small buffer method to calculate plane watermark */ - entries = ((clock * pixel_size / 1000) * ILK_LP0_PLANE_LATENCY) / 1000; - entries = DIV_ROUND_UP(entries, - ironlake_display_wm_info.cacheline_size); - *plane_wm = entries + ironlake_display_wm_info.guard_size; - if (*plane_wm > (int)ironlake_display_wm_info.max_wm) - *plane_wm = ironlake_display_wm_info.max_wm; + entries = ((clock * pixel_size / 1000) * display_latency * 100) / 1000; + entries = DIV_ROUND_UP(entries, display->cacheline_size); + *plane_wm = entries + display->guard_size; + if (*plane_wm > (int)display->max_wm) + *plane_wm = display->max_wm; /* Use the large buffer method to calculate cursor watermark */ line_time_us = ((htotal * 1000) / clock); - line_count = (ILK_LP0_CURSOR_LATENCY / line_time_us + 1000) / 1000; + line_count = (cursor_latency * 100 / line_time_us + 1000) / 1000; entries = line_count * 64 * pixel_size; - entries = DIV_ROUND_UP(entries, - ironlake_cursor_wm_info.cacheline_size); - *cursor_wm = entries + ironlake_cursor_wm_info.guard_size; - if (*cursor_wm > ironlake_cursor_wm_info.max_wm) - *cursor_wm = ironlake_cursor_wm_info.max_wm; + entries = DIV_ROUND_UP(entries, cursor->cacheline_size); + *cursor_wm = entries + cursor->guard_size; + if (*cursor_wm > (int)cursor->max_wm) + *cursor_wm = (int)cursor->max_wm; return true; } @@ -3425,7 +3460,12 @@ static void ironlake_update_wm(struct drm_device *dev, int tmp; enabled = 0; - if (ironlake_compute_wm0(dev, 0, &plane_wm, &cursor_wm)) { + if (ironlake_compute_wm0(dev, 0, + &ironlake_display_wm_info, + ILK_LP0_PLANE_LATENCY, + &ironlake_cursor_wm_info, + ILK_LP0_CURSOR_LATENCY, + &plane_wm, &cursor_wm)) { I915_WRITE(WM0_PIPEA_ILK, (plane_wm << WM0_PIPE_PLANE_SHIFT) | cursor_wm); DRM_DEBUG_KMS("FIFO watermarks For pipe A -" @@ -3434,7 +3474,12 @@ static void ironlake_update_wm(struct drm_device *dev, enabled++; } - if (ironlake_compute_wm0(dev, 1, &plane_wm, &cursor_wm)) { + if (ironlake_compute_wm0(dev, 1, + &ironlake_display_wm_info, + ILK_LP0_PLANE_LATENCY, + &ironlake_cursor_wm_info, + ILK_LP0_CURSOR_LATENCY, + &plane_wm, &cursor_wm)) { I915_WRITE(WM0_PIPEB_ILK, (plane_wm << WM0_PIPE_PLANE_SHIFT) | cursor_wm); DRM_DEBUG_KMS("FIFO watermarks For pipe B -" @@ -3500,6 +3545,197 @@ static void ironlake_update_wm(struct drm_device *dev, /* XXX setup WM2 and WM3 */ } +/* + * Check the wm result. + * + * If any calculated watermark values is larger than the maximum value that + * can be programmed into the associated watermark register, that watermark + * must be disabled. + * + * Also return true if all of those watermark values is 0, which is set by + * sandybridge_compute_srwm, to indicate the latency is ZERO. + */ +static bool sandybridge_check_srwm(struct drm_device *dev, int level, + int fbc_wm, int display_wm, int cursor_wm) +{ + struct drm_i915_private *dev_priv = dev->dev_private; + + DRM_DEBUG_KMS("watermark %d: display plane %d, fbc lines %d," + " cursor %d\n", level, display_wm, fbc_wm, cursor_wm); + + if (fbc_wm > SNB_FBC_MAX_SRWM) { + DRM_DEBUG_KMS("fbc watermark(%d) is too large(%d), disabling wm%d+\n", + fbc_wm, SNB_FBC_MAX_SRWM, level); + + /* fbc has it's own way to disable FBC WM */ + I915_WRITE(DISP_ARB_CTL, + I915_READ(DISP_ARB_CTL) | DISP_FBC_WM_DIS); + return false; + } + + if (display_wm > SNB_DISPLAY_MAX_SRWM) { + DRM_DEBUG_KMS("display watermark(%d) is too large(%d), disabling wm%d+\n", + display_wm, SNB_DISPLAY_MAX_SRWM, level); + return false; + } + + if (cursor_wm > SNB_CURSOR_MAX_SRWM) { + DRM_DEBUG_KMS("cursor watermark(%d) is too large(%d), disabling wm%d+\n", + cursor_wm, SNB_CURSOR_MAX_SRWM, level); + return false; + } + + if (!(fbc_wm || display_wm || cursor_wm)) { + DRM_DEBUG_KMS("latency %d is 0, disabling wm%d+\n", level, level); + return false; + } + + return true; +} + +/* + * Compute watermark values of WM[1-3], + */ +static bool sandybridge_compute_srwm(struct drm_device *dev, int level, + int hdisplay, int htotal, int pixel_size, + int clock, int latency_ns, int *fbc_wm, + int *display_wm, int *cursor_wm) +{ + + unsigned long line_time_us; + int small, large; + int entries; + int line_count, line_size; + + if (!latency_ns) { + *fbc_wm = *display_wm = *cursor_wm = 0; + return false; + } + + line_time_us = (htotal * 1000) / clock; + line_count = (latency_ns / line_time_us + 1000) / 1000; + line_size = hdisplay * pixel_size; + + /* Use the minimum of the small and large buffer method for primary */ + small = ((clock * pixel_size / 1000) * latency_ns) / 1000; + large = line_count * line_size; + + entries = DIV_ROUND_UP(min(small, large), + sandybridge_display_srwm_info.cacheline_size); + *display_wm = entries + sandybridge_display_srwm_info.guard_size; + + /* + * Spec said: + * FBC WM = ((Final Primary WM * 64) / number of bytes per line) + 2 + */ + *fbc_wm = DIV_ROUND_UP(*display_wm * 64, line_size) + 2; + + /* calculate the self-refresh watermark for display cursor */ + entries = line_count * pixel_size * 64; + entries = DIV_ROUND_UP(entries, + sandybridge_cursor_srwm_info.cacheline_size); + *cursor_wm = entries + sandybridge_cursor_srwm_info.guard_size; + + return sandybridge_check_srwm(dev, level, + *fbc_wm, *display_wm, *cursor_wm); +} + +static void sandybridge_update_wm(struct drm_device *dev, + int planea_clock, int planeb_clock, + int hdisplay, int htotal, + int pixel_size) +{ + struct drm_i915_private *dev_priv = dev->dev_private; + int latency = SNB_READ_WM0_LATENCY(); + int fbc_wm, plane_wm, cursor_wm, enabled; + int clock; + + enabled = 0; + if (ironlake_compute_wm0(dev, 0, + &sandybridge_display_wm_info, latency, + &sandybridge_cursor_wm_info, latency, + &plane_wm, &cursor_wm)) { + I915_WRITE(WM0_PIPEA_ILK, + (plane_wm << WM0_PIPE_PLANE_SHIFT) | cursor_wm); + DRM_DEBUG_KMS("FIFO watermarks For pipe A -" + " plane %d, " "cursor: %d\n", + plane_wm, cursor_wm); + enabled++; + } + + if (ironlake_compute_wm0(dev, 1, + &sandybridge_display_wm_info, latency, + &sandybridge_cursor_wm_info, latency, + &plane_wm, &cursor_wm)) { + I915_WRITE(WM0_PIPEB_ILK, + (plane_wm << WM0_PIPE_PLANE_SHIFT) | cursor_wm); + DRM_DEBUG_KMS("FIFO watermarks For pipe B -" + " plane %d, cursor: %d\n", + plane_wm, cursor_wm); + enabled++; + } + + /* + * Calculate and update the self-refresh watermark only when one + * display plane is used. + * + * SNB support 3 levels of watermark. + * + * WM1/WM2/WM2 watermarks have to be enabled in the ascending order, + * and disabled in the descending order + * + */ + I915_WRITE(WM3_LP_ILK, 0); + I915_WRITE(WM2_LP_ILK, 0); + I915_WRITE(WM1_LP_ILK, 0); + + if (enabled != 1) + return; + + clock = planea_clock ? planea_clock : planeb_clock; + + /* WM1 */ + if (!sandybridge_compute_srwm(dev, 1, hdisplay, htotal, pixel_size, + clock, SNB_READ_WM1_LATENCY() * 500, + &fbc_wm, &plane_wm, &cursor_wm)) + return; + + I915_WRITE(WM1_LP_ILK, + WM1_LP_SR_EN | + (SNB_READ_WM1_LATENCY() << WM1_LP_LATENCY_SHIFT) | + (fbc_wm << WM1_LP_FBC_SHIFT) | + (plane_wm << WM1_LP_SR_SHIFT) | + cursor_wm); + + /* WM2 */ + if (!sandybridge_compute_srwm(dev, 2, + hdisplay, htotal, pixel_size, + clock, SNB_READ_WM2_LATENCY() * 500, + &fbc_wm, &plane_wm, &cursor_wm)) + return; + + I915_WRITE(WM2_LP_ILK, + WM2_LP_EN | + (SNB_READ_WM2_LATENCY() << WM1_LP_LATENCY_SHIFT) | + (fbc_wm << WM1_LP_FBC_SHIFT) | + (plane_wm << WM1_LP_SR_SHIFT) | + cursor_wm); + + /* WM3 */ + if (!sandybridge_compute_srwm(dev, 3, + hdisplay, htotal, pixel_size, + clock, SNB_READ_WM3_LATENCY() * 500, + &fbc_wm, &plane_wm, &cursor_wm)) + return; + + I915_WRITE(WM3_LP_ILK, + WM3_LP_EN | + (SNB_READ_WM3_LATENCY() << WM1_LP_LATENCY_SHIFT) | + (fbc_wm << WM1_LP_FBC_SHIFT) | + (plane_wm << WM1_LP_SR_SHIFT) | + cursor_wm); +} + /** * intel_update_watermarks - update FIFO watermark values based on current modes * @@ -5975,9 +6211,9 @@ void intel_enable_clock_gating(struct drm_device *dev) I915_WRITE(DISP_ARB_CTL, (I915_READ(DISP_ARB_CTL) | DISP_FBC_WM_DIS)); - I915_WRITE(WM3_LP_ILK, 0); - I915_WRITE(WM2_LP_ILK, 0); - I915_WRITE(WM1_LP_ILK, 0); + I915_WRITE(WM3_LP_ILK, 0); + I915_WRITE(WM2_LP_ILK, 0); + I915_WRITE(WM1_LP_ILK, 0); } /* * Based on the document from hardware guys the following bits @@ -6010,6 +6246,38 @@ void intel_enable_clock_gating(struct drm_device *dev) _3D_CHICKEN2_WM_READ_PIPELINED); } + if (IS_GEN6(dev)) { + I915_WRITE(WM3_LP_ILK, 0); + I915_WRITE(WM2_LP_ILK, 0); + I915_WRITE(WM1_LP_ILK, 0); + + /* + * According to the spec the following bits should be + * set in order to enable memory self-refresh and fbc: + * The bit21 and bit22 of 0x42000 + * The bit21 and bit22 of 0x42004 + * The bit5 and bit7 of 0x42020 + * The bit14 of 0x70180 + * The bit14 of 0x71180 + */ + I915_WRITE(ILK_DISPLAY_CHICKEN1, + I915_READ(ILK_DISPLAY_CHICKEN1) | + ILK_FBCQ_DIS | ILK_PABSTRETCH_DIS); + I915_WRITE(ILK_DISPLAY_CHICKEN2, + I915_READ(ILK_DISPLAY_CHICKEN2) | + ILK_DPARB_GATE | ILK_VSDPFD_FULL); + I915_WRITE(ILK_DSPCLK_GATE, + I915_READ(ILK_DSPCLK_GATE) | + ILK_DPARB_CLK_GATE | + ILK_DPFD_CLK_GATE); + + I915_WRITE(DSPACNTR, + I915_READ(DSPACNTR) | + DISPPLANE_TRICKLE_FEED_DISABLE); + I915_WRITE(DSPBCNTR, + I915_READ(DSPBCNTR) | + DISPPLANE_TRICKLE_FEED_DISABLE); + } } else if (IS_G4X(dev)) { uint32_t dspclk_gate; I915_WRITE(RENCLK_GATE_D1, 0); @@ -6176,6 +6444,14 @@ static void intel_init_display(struct drm_device *dev) "Disable CxSR\n"); dev_priv->display.update_wm = NULL; } + } else if (IS_GEN6(dev)) { + if (SNB_READ_WM0_LATENCY()) { + dev_priv->display.update_wm = sandybridge_update_wm; + } else { + DRM_DEBUG_KMS("Failed to read display plane latency. " + "Disable CxSR\n"); + dev_priv->display.update_wm = NULL; + } } else dev_priv->display.update_wm = NULL; } else if (IS_PINEVIEW(dev)) { -- cgit v1.2.3 From 9c04f015ebc2cc2cca5a4a576deb82a311578edc Mon Sep 17 00:00:00 2001 From: Yuanhan Liu Date: Wed, 15 Dec 2010 15:42:32 +0800 Subject: drm/i915: Add frame buffer compression on Sandybridge Add frame buffer compression on Sandybridge. The method is similar to Ironlake, except that two new registers of type GTTMMADR must be written with the right fence info. Signed-off-by: Yuanhan Liu Signed-off-by: Chris Wilson --- drivers/gpu/drm/i915/i915_dma.c | 4 ++-- drivers/gpu/drm/i915/i915_drv.c | 1 + drivers/gpu/drm/i915/i915_reg.h | 10 ++++++++++ drivers/gpu/drm/i915/intel_display.c | 8 +++++++- 4 files changed, 20 insertions(+), 3 deletions(-) diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c index e9fb8953c606..3f7b20392e26 100644 --- a/drivers/gpu/drm/i915/i915_dma.c +++ b/drivers/gpu/drm/i915/i915_dma.c @@ -1078,7 +1078,7 @@ static void i915_setup_compression(struct drm_device *dev, int size) if (!cfb_base) goto err_fb; - if (!(IS_GM45(dev) || IS_IRONLAKE_M(dev))) { + if (!(IS_GM45(dev) || HAS_PCH_SPLIT(dev))) { compressed_llb = drm_mm_search_free(&dev_priv->mm.stolen, 4096, 4096, 0); if (compressed_llb) @@ -1096,7 +1096,7 @@ static void i915_setup_compression(struct drm_device *dev, int size) intel_disable_fbc(dev); dev_priv->compressed_fb = compressed_fb; - if (IS_IRONLAKE_M(dev)) + if (HAS_PCH_SPLIT(dev)) I915_WRITE(ILK_DPFC_CB_BASE, compressed_fb->start); else if (IS_GM45(dev)) { I915_WRITE(DPFC_CB_BASE, compressed_fb->start); diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c index 5f20cd988612..bdb29b2a01ed 100644 --- a/drivers/gpu/drm/i915/i915_drv.c +++ b/drivers/gpu/drm/i915/i915_drv.c @@ -165,6 +165,7 @@ static const struct intel_device_info intel_sandybridge_d_info = { static const struct intel_device_info intel_sandybridge_m_info = { .gen = 6, .is_mobile = 1, .need_gfx_hws = 1, .has_hotplug = 1, + .has_fbc = 1, .has_bsd_ring = 1, .has_blt_ring = 1, }; diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h index 61ef98db9ff0..820e9dfaadc7 100644 --- a/drivers/gpu/drm/i915/i915_reg.h +++ b/drivers/gpu/drm/i915/i915_reg.h @@ -599,6 +599,16 @@ #define ILK_PABSTRETCH_DIS (1<<21) +/* + * Framebuffer compression for Sandybridge + * + * The following two registers are of type GTTMMADR + */ +#define SNB_DPFC_CTL_SA 0x100100 +#define SNB_CPU_FENCE_ENABLE (1<<29) +#define DPFC_CPU_FENCE_OFFSET 0x100104 + + /* * GPIO regs */ diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c index eaf2bc6b537d..8645a974a499 100644 --- a/drivers/gpu/drm/i915/intel_display.c +++ b/drivers/gpu/drm/i915/intel_display.c @@ -1262,6 +1262,12 @@ static void ironlake_enable_fbc(struct drm_crtc *crtc, unsigned long interval) /* enable it... */ I915_WRITE(ILK_DPFC_CONTROL, dpfc_ctl | DPFC_CTL_EN); + if (IS_GEN6(dev)) { + I915_WRITE(SNB_DPFC_CTL_SA, + SNB_CPU_FENCE_ENABLE | dev_priv->cfb_fence); + I915_WRITE(DPFC_CPU_FENCE_OFFSET, crtc->y); + } + DRM_DEBUG_KMS("enabled fbc on plane %d\n", intel_crtc->plane); } @@ -6395,7 +6401,7 @@ static void intel_init_display(struct drm_device *dev) dev_priv->display.dpms = i9xx_crtc_dpms; if (I915_HAS_FBC(dev)) { - if (IS_IRONLAKE_M(dev)) { + if (HAS_PCH_SPLIT(dev)) { dev_priv->display.fbc_enabled = ironlake_fbc_enabled; dev_priv->display.enable_fbc = ironlake_enable_fbc; dev_priv->display.disable_fbc = ironlake_disable_fbc; -- cgit v1.2.3 From c45aadabb961501b3e64bc92d0bf12fdce26d37d Mon Sep 17 00:00:00 2001 From: Francisco Jerez Date: Thu, 16 Dec 2010 10:30:35 +1000 Subject: drm/nv50: fix a couple of vm init issues Fixes overwriting the first page table entry when testing that the PRAMIN BAR can be correctly read/written, and adds an additional bar flush after poking the BAR3 control regs. Signed-off-by: Francisco Jerez Signed-off-by: Ben Skeggs --- drivers/gpu/drm/nouveau/nv50_instmem.c | 13 +++++++------ 1 file changed, 7 insertions(+), 6 deletions(-) diff --git a/drivers/gpu/drm/nouveau/nv50_instmem.c b/drivers/gpu/drm/nouveau/nv50_instmem.c index 38f30270cb9f..adac4da98f7e 100644 --- a/drivers/gpu/drm/nouveau/nv50_instmem.c +++ b/drivers/gpu/drm/nouveau/nv50_instmem.c @@ -182,16 +182,17 @@ nv50_instmem_init(struct drm_device *dev) nv_wr32(dev, 0x001704, 0x40000000 | (chan->ramin->vinst >> 12)); nv_wr32(dev, 0x00170c, 0x80000000 | (priv->bar3_dmaobj->cinst >> 4)); - tmp = nv_ri32(dev, 0); - nv_wi32(dev, 0, ~tmp); - if (nv_ri32(dev, 0) != ~tmp) { + dev_priv->engine.instmem.flush(dev); + dev_priv->ramin_available = true; + + tmp = nv_ro32(chan->ramin, 0); + nv_wo32(chan->ramin, 0, ~tmp); + if (nv_ro32(chan->ramin, 0) != ~tmp) { NV_ERROR(dev, "PRAMIN readback failed\n"); ret = -EIO; goto error; } - nv_wi32(dev, 0, tmp); - - dev_priv->ramin_available = true; + nv_wo32(chan->ramin, 0, tmp); /* BAR1 */ ret = nouveau_vm_new(dev, BAR1_VM_BASE, BAR1_VM_SIZE, BAR1_VM_BASE, -- cgit v1.2.3 From 99ee7fac189893c90145a22b86bbcfdc98f69a9c Mon Sep 17 00:00:00 2001 From: Dave Airlie Date: Tue, 23 Nov 2010 11:47:49 +1000 Subject: drm/radeon: add initial tracepoint support. this adds a bo create, and fence seq tracking tracepoints. This is just an initial set to play around with, we should investigate what others we need would be useful. Signed-off-by: Dave Airlie --- drivers/gpu/drm/radeon/Makefile | 5 +- drivers/gpu/drm/radeon/radeon_fence.c | 4 ++ drivers/gpu/drm/radeon/radeon_object.c | 2 + drivers/gpu/drm/radeon/radeon_trace.h | 82 ++++++++++++++++++++++++++++ drivers/gpu/drm/radeon/radeon_trace_points.c | 9 +++ 5 files changed, 101 insertions(+), 1 deletion(-) create mode 100644 drivers/gpu/drm/radeon/radeon_trace.h create mode 100644 drivers/gpu/drm/radeon/radeon_trace_points.c diff --git a/drivers/gpu/drm/radeon/Makefile b/drivers/gpu/drm/radeon/Makefile index 6cae4f2028d2..e97e6f842699 100644 --- a/drivers/gpu/drm/radeon/Makefile +++ b/drivers/gpu/drm/radeon/Makefile @@ -65,10 +65,13 @@ radeon-y += radeon_device.o radeon_asic.o radeon_kms.o \ rs400.o rs600.o rs690.o rv515.o r520.o r600.o rv770.o radeon_test.o \ r200.o radeon_legacy_tv.o r600_cs.o r600_blit.o r600_blit_shaders.o \ r600_blit_kms.o radeon_pm.o atombios_dp.o r600_audio.o r600_hdmi.o \ - evergreen.o evergreen_cs.o evergreen_blit_shaders.o evergreen_blit_kms.o + evergreen.o evergreen_cs.o evergreen_blit_shaders.o evergreen_blit_kms.o \ + radeon_trace_points.o radeon-$(CONFIG_COMPAT) += radeon_ioc32.o radeon-$(CONFIG_VGA_SWITCHEROO) += radeon_atpx_handler.o radeon-$(CONFIG_ACPI) += radeon_acpi.o obj-$(CONFIG_DRM_RADEON)+= radeon.o + +CFLAGS_radeon_trace_points.o := -I$(src) \ No newline at end of file diff --git a/drivers/gpu/drm/radeon/radeon_fence.c b/drivers/gpu/drm/radeon/radeon_fence.c index daacb281dfaf..171b0b2e3a64 100644 --- a/drivers/gpu/drm/radeon/radeon_fence.c +++ b/drivers/gpu/drm/radeon/radeon_fence.c @@ -38,6 +38,7 @@ #include "drm.h" #include "radeon_reg.h" #include "radeon.h" +#include "radeon_trace.h" int radeon_fence_emit(struct radeon_device *rdev, struct radeon_fence *fence) { @@ -57,6 +58,7 @@ int radeon_fence_emit(struct radeon_device *rdev, struct radeon_fence *fence) } else radeon_fence_ring_emit(rdev, fence); + trace_radeon_fence_emit(rdev->ddev, fence->seq); fence->emited = true; list_del(&fence->list); list_add_tail(&fence->list, &rdev->fence_drv.emited); @@ -213,6 +215,7 @@ int radeon_fence_wait(struct radeon_fence *fence, bool intr) retry: /* save current sequence used to check for GPU lockup */ seq = rdev->fence_drv.last_seq; + trace_radeon_fence_wait_begin(rdev->ddev, seq); if (intr) { radeon_irq_kms_sw_irq_get(rdev); r = wait_event_interruptible_timeout(rdev->fence_drv.queue, @@ -227,6 +230,7 @@ retry: radeon_fence_signaled(fence), timeout); radeon_irq_kms_sw_irq_put(rdev); } + trace_radeon_fence_wait_end(rdev->ddev, seq); if (unlikely(!radeon_fence_signaled(fence))) { /* we were interrupted for some reason and fence isn't * isn't signaled yet, resume wait diff --git a/drivers/gpu/drm/radeon/radeon_object.c b/drivers/gpu/drm/radeon/radeon_object.c index a8594d289bcf..8bdf0ba2983a 100644 --- a/drivers/gpu/drm/radeon/radeon_object.c +++ b/drivers/gpu/drm/radeon/radeon_object.c @@ -34,6 +34,7 @@ #include #include "radeon_drm.h" #include "radeon.h" +#include "radeon_trace.h" int radeon_ttm_init(struct radeon_device *rdev); @@ -137,6 +138,7 @@ retry: list_add_tail(&bo->list, &rdev->gem.objects); mutex_unlock(&bo->rdev->gem.mutex); } + trace_radeon_bo_create(bo); return 0; } diff --git a/drivers/gpu/drm/radeon/radeon_trace.h b/drivers/gpu/drm/radeon/radeon_trace.h new file mode 100644 index 000000000000..eafd8160a155 --- /dev/null +++ b/drivers/gpu/drm/radeon/radeon_trace.h @@ -0,0 +1,82 @@ +#if !defined(_RADEON_TRACE_H) || defined(TRACE_HEADER_MULTI_READ) +#define _RADEON_TRACE_H_ + +#include +#include +#include + +#include + +#undef TRACE_SYSTEM +#define TRACE_SYSTEM radeon +#define TRACE_SYSTEM_STRING __stringify(TRACE_SYSTEM) +#define TRACE_INCLUDE_FILE radeon_trace + +TRACE_EVENT(radeon_bo_create, + TP_PROTO(struct radeon_bo *bo), + TP_ARGS(bo), + TP_STRUCT__entry( + __field(struct radeon_bo *, bo) + __field(u32, pages) + ), + + TP_fast_assign( + __entry->bo = bo; + __entry->pages = bo->tbo.num_pages; + ), + TP_printk("bo=%p, pages=%u", __entry->bo, __entry->pages) +); + +DECLARE_EVENT_CLASS(radeon_fence_request, + + TP_PROTO(struct drm_device *dev, u32 seqno), + + TP_ARGS(dev, seqno), + + TP_STRUCT__entry( + __field(u32, dev) + __field(u32, seqno) + ), + + TP_fast_assign( + __entry->dev = dev->primary->index; + __entry->seqno = seqno; + ), + + TP_printk("dev=%u, seqno=%u", __entry->dev, __entry->seqno) +); + +DEFINE_EVENT(radeon_fence_request, radeon_fence_emit, + + TP_PROTO(struct drm_device *dev, u32 seqno), + + TP_ARGS(dev, seqno) +); + +DEFINE_EVENT(radeon_fence_request, radeon_fence_retire, + + TP_PROTO(struct drm_device *dev, u32 seqno), + + TP_ARGS(dev, seqno) +); + +DEFINE_EVENT(radeon_fence_request, radeon_fence_wait_begin, + + TP_PROTO(struct drm_device *dev, u32 seqno), + + TP_ARGS(dev, seqno) +); + +DEFINE_EVENT(radeon_fence_request, radeon_fence_wait_end, + + TP_PROTO(struct drm_device *dev, u32 seqno), + + TP_ARGS(dev, seqno) +); + +#endif + +/* This part must be outside protection */ +#undef TRACE_INCLUDE_PATH +#define TRACE_INCLUDE_PATH . +#include diff --git a/drivers/gpu/drm/radeon/radeon_trace_points.c b/drivers/gpu/drm/radeon/radeon_trace_points.c new file mode 100644 index 000000000000..8175993df84d --- /dev/null +++ b/drivers/gpu/drm/radeon/radeon_trace_points.c @@ -0,0 +1,9 @@ +/* Copyright Red Hat Inc 2010. + * Author : Dave Airlie + */ +#include +#include "radeon_drm.h" +#include "radeon.h" + +#define CREATE_TRACE_POINTS +#include "radeon_trace.h" -- cgit v1.2.3 From b921bae2eedc806b118a03d986cf0be9ffd3af40 Mon Sep 17 00:00:00 2001 From: Ben Skeggs Date: Thu, 16 Dec 2010 14:47:46 +1000 Subject: drm/ttm: delay freeing of old node during move_memcpy until after iounmap Drivers using their own implementation of io_mem_reserve/io_mem_free are likely to store the tracking information for the map in mem.mm_node, so it can't be freed while still mapped. Signed-off-by: Ben Skeggs Reviewed-by: Thomas Hellstrom Signed-off-by: Dave Airlie --- drivers/gpu/drm/ttm/ttm_bo_util.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/gpu/drm/ttm/ttm_bo_util.c b/drivers/gpu/drm/ttm/ttm_bo_util.c index a89839f83f6c..77dbf408c0d0 100644 --- a/drivers/gpu/drm/ttm/ttm_bo_util.c +++ b/drivers/gpu/drm/ttm/ttm_bo_util.c @@ -370,7 +370,6 @@ int ttm_bo_move_memcpy(struct ttm_buffer_object *bo, } mb(); out2: - ttm_bo_free_old_node(bo); old_copy = *old_mem; *old_mem = *new_mem; new_mem->mm_node = NULL; @@ -385,6 +384,7 @@ out1: ttm_mem_reg_iounmap(bdev, old_mem, new_iomap); out: ttm_mem_reg_iounmap(bdev, &old_copy, old_iomap); + ttm_bo_mem_put(bo, &old_copy); return ret; } EXPORT_SYMBOL(ttm_bo_move_memcpy); -- cgit v1.2.3 From b4183e301ac1dfaf93e3e92fd70a0c3203c5a27d Mon Sep 17 00:00:00 2001 From: Alex Deucher Date: Wed, 15 Dec 2010 11:04:10 -0500 Subject: drm/radeon/kms: fix vram start calculation on ontario (v2) Signed-off-by: Alex Deucher Signed-off-by: Dave Airlie --- drivers/gpu/drm/radeon/evergreen.c | 6 ++++++ drivers/gpu/drm/radeon/evergreend.h | 1 + drivers/gpu/drm/radeon/rv770.c | 13 +------------ drivers/gpu/drm/radeon/rv770d.h | 1 - 4 files changed, 8 insertions(+), 13 deletions(-) diff --git a/drivers/gpu/drm/radeon/evergreen.c b/drivers/gpu/drm/radeon/evergreen.c index 522d29b37007..f7d7477daffb 100644 --- a/drivers/gpu/drm/radeon/evergreen.c +++ b/drivers/gpu/drm/radeon/evergreen.c @@ -1134,6 +1134,12 @@ static void evergreen_mc_program(struct radeon_device *rdev) rdev->mc.vram_end >> 12); } WREG32(MC_VM_SYSTEM_APERTURE_DEFAULT_ADDR, 0); + if (rdev->flags & RADEON_IS_IGP) { + tmp = RREG32(MC_FUS_VM_FB_OFFSET) & 0x000FFFFF; + tmp |= ((rdev->mc.vram_end >> 20) & 0xF) << 24; + tmp |= ((rdev->mc.vram_start >> 20) & 0xF) << 20; + WREG32(MC_FUS_VM_FB_OFFSET, tmp); + } tmp = ((rdev->mc.vram_end >> 24) & 0xFFFF) << 16; tmp |= ((rdev->mc.vram_start >> 24) & 0xFFFF); WREG32(MC_VM_FB_LOCATION, tmp); diff --git a/drivers/gpu/drm/radeon/evergreend.h b/drivers/gpu/drm/radeon/evergreend.h index 87fcaba76695..5b869ce86917 100644 --- a/drivers/gpu/drm/radeon/evergreend.h +++ b/drivers/gpu/drm/radeon/evergreend.h @@ -202,6 +202,7 @@ #define MC_VM_AGP_BOT 0x202C #define MC_VM_AGP_BASE 0x2030 #define MC_VM_FB_LOCATION 0x2024 +#define MC_FUS_VM_FB_OFFSET 0x2898 #define MC_VM_MB_L1_TLB0_CNTL 0x2234 #define MC_VM_MB_L1_TLB1_CNTL 0x2238 #define MC_VM_MB_L1_TLB2_CNTL 0x223C diff --git a/drivers/gpu/drm/radeon/rv770.c b/drivers/gpu/drm/radeon/rv770.c index 7c2e0b19a558..645aa1fd7611 100644 --- a/drivers/gpu/drm/radeon/rv770.c +++ b/drivers/gpu/drm/radeon/rv770.c @@ -271,12 +271,6 @@ static void rv770_mc_program(struct radeon_device *rdev) rdev->mc.vram_end >> 12); } WREG32(MC_VM_SYSTEM_APERTURE_DEFAULT_ADDR, 0); - if (rdev->flags & RADEON_IS_IGP) { - tmp = RREG32(MC_FUS_VM_FB_OFFSET) & 0x000FFFFF; - tmp |= ((rdev->mc.vram_end >> 20) & 0xF) << 24; - tmp |= ((rdev->mc.vram_start >> 20) & 0xF) << 20; - WREG32(MC_FUS_VM_FB_OFFSET, tmp); - } tmp = ((rdev->mc.vram_end >> 24) & 0xFFFF) << 16; tmp |= ((rdev->mc.vram_start >> 24) & 0xFFFF); WREG32(MC_VM_FB_LOCATION, tmp); @@ -1074,12 +1068,7 @@ void r700_vram_gtt_location(struct radeon_device *rdev, struct radeon_mc *mc) mc->mc_vram_size >> 20, mc->vram_start, mc->vram_end, mc->real_vram_size >> 20); } else { - u64 base = 0; - if (rdev->flags & RADEON_IS_IGP) { - base = (RREG32(MC_VM_FB_LOCATION) & 0xFFFF) << 24; - base |= RREG32(MC_FUS_VM_FB_OFFSET) & 0x00F00000; - } - radeon_vram_location(rdev, &rdev->mc, base); + radeon_vram_location(rdev, &rdev->mc, 0); rdev->mc.gtt_base_align = 0; radeon_gtt_location(rdev, mc); } diff --git a/drivers/gpu/drm/radeon/rv770d.h b/drivers/gpu/drm/radeon/rv770d.h index 98f9ad256d3d..fc77e1e1a179 100644 --- a/drivers/gpu/drm/radeon/rv770d.h +++ b/drivers/gpu/drm/radeon/rv770d.h @@ -158,7 +158,6 @@ #define MC_VM_AGP_BOT 0x202C #define MC_VM_AGP_BASE 0x2030 #define MC_VM_FB_LOCATION 0x2024 -#define MC_FUS_VM_FB_OFFSET 0x2898 #define MC_VM_MB_L1_TLB0_CNTL 0x2234 #define MC_VM_MB_L1_TLB1_CNTL 0x2238 #define MC_VM_MB_L1_TLB2_CNTL 0x223C -- cgit v1.2.3 From b08ebe7e776e5be0271ed1e1bbb384e1f29dd117 Mon Sep 17 00:00:00 2001 From: Alex Deucher Date: Fri, 3 Dec 2010 15:34:16 -0500 Subject: drm/radeon/kms: properly print ontario chip id Signed-off-by: Alex Deucher Signed-off-by: Dave Airlie --- drivers/gpu/drm/radeon/radeon_device.c | 1 + 1 file changed, 1 insertion(+) diff --git a/drivers/gpu/drm/radeon/radeon_device.c b/drivers/gpu/drm/radeon/radeon_device.c index dd93c9c94144..3952cf3d0ee9 100644 --- a/drivers/gpu/drm/radeon/radeon_device.c +++ b/drivers/gpu/drm/radeon/radeon_device.c @@ -81,6 +81,7 @@ static const char radeon_family_name[][16] = { "JUNIPER", "CYPRESS", "HEMLOCK", + "PALM", "LAST", }; -- cgit v1.2.3 From 0af7e4dff50454905092d468e91c1ef92e10e6b4 Mon Sep 17 00:00:00 2001 From: Mario Kleiner Date: Wed, 8 Dec 2010 04:07:19 +0100 Subject: drm/i915: Add support for precise vblank timestamping (v2) v2: Change IS_IRONLAKE to IS_GEN5 to adapt to 2.6.37 This patch adds new functions for use by the drm core: .get_vblank_timestamp() provides a precise timestamp for the end of the most recent (or current) vblank interval of a given crtc, as needed for the DRI2 implementation of the OML_sync_control extension. It is a thin wrapper around the drm function drm_calc_vbltimestamp_from_scanoutpos() which does almost all the work. .get_scanout_position() provides the current horizontal and vertical video scanout position and "in vblank" status of a given crtc, as needed by the drm for use by drm_calc_vbltimestamp_from_scanoutpos(). The patch modifies the pageflip completion routine to use these precise vblank timestamps as the timestamps for pageflip completion events. This code has been only tested on a HP-Mini Netbook with Atom processor and Intel 945GME gpu. The codepath for (IS_G4X(dev) || IS_GEN5(dev) || IS_GEN6(dev)) gpu's has not been tested so far due to lack of hardware. Signed-off-by: Mario Kleiner Acked-by: Jesse Barnes Signed-off-by: Chris Wilson --- drivers/gpu/drm/i915/i915_drv.c | 2 + drivers/gpu/drm/i915/i915_drv.h | 7 +++ drivers/gpu/drm/i915/i915_irq.c | 86 ++++++++++++++++++++++++++++++++++++ drivers/gpu/drm/i915/i915_reg.h | 1 + drivers/gpu/drm/i915/intel_display.c | 29 +++++++++--- 5 files changed, 119 insertions(+), 6 deletions(-) diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c index bdb29b2a01ed..9eee6cf7901e 100644 --- a/drivers/gpu/drm/i915/i915_drv.c +++ b/drivers/gpu/drm/i915/i915_drv.c @@ -652,6 +652,8 @@ static struct drm_driver driver = { .device_is_agp = i915_driver_device_is_agp, .enable_vblank = i915_enable_vblank, .disable_vblank = i915_disable_vblank, + .get_vblank_timestamp = i915_get_vblank_timestamp, + .get_scanout_position = i915_get_crtc_scanoutpos, .irq_preinstall = i915_driver_irq_preinstall, .irq_postinstall = i915_driver_irq_postinstall, .irq_uninstall = i915_driver_irq_uninstall, diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index 30780f2cab6f..53dfc8398a96 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h @@ -1019,6 +1019,13 @@ void i915_disable_pipestat(drm_i915_private_t *dev_priv, int pipe, u32 mask); void intel_enable_asle (struct drm_device *dev); +int i915_get_vblank_timestamp(struct drm_device *dev, int crtc, + int *max_error, + struct timeval *vblank_time, + unsigned flags); + +int i915_get_crtc_scanoutpos(struct drm_device *dev, int pipe, + int *vpos, int *hpos); #ifdef CONFIG_DEBUG_FS extern void i915_destroy_error_state(struct drm_device *dev); diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c index e4a2e2c3dbe3..adf983f01dda 100644 --- a/drivers/gpu/drm/i915/i915_irq.c +++ b/drivers/gpu/drm/i915/i915_irq.c @@ -248,6 +248,92 @@ u32 gm45_get_vblank_counter(struct drm_device *dev, int pipe) return I915_READ(reg); } +int i915_get_crtc_scanoutpos(struct drm_device *dev, int pipe, + int *vpos, int *hpos) +{ + drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; + u32 vbl = 0, position = 0; + int vbl_start, vbl_end, htotal, vtotal; + bool in_vbl = true; + int ret = 0; + + if (!i915_pipe_enabled(dev, pipe)) { + DRM_DEBUG_DRIVER("trying to get scanoutpos for disabled " + "pipe %d\n", pipe); + return 0; + } + + /* Get vtotal. */ + vtotal = 1 + ((I915_READ(VTOTAL(pipe)) >> 16) & 0x1fff); + + if (INTEL_INFO(dev)->gen >= 4) { + /* No obvious pixelcount register. Only query vertical + * scanout position from Display scan line register. + */ + position = I915_READ(PIPEDSL(pipe)); + + /* Decode into vertical scanout position. Don't have + * horizontal scanout position. + */ + *vpos = position & 0x1fff; + *hpos = 0; + } else { + /* Have access to pixelcount since start of frame. + * We can split this into vertical and horizontal + * scanout position. + */ + position = (I915_READ(PIPEFRAMEPIXEL(pipe)) & PIPE_PIXEL_MASK) >> PIPE_PIXEL_SHIFT; + + htotal = 1 + ((I915_READ(HTOTAL(pipe)) >> 16) & 0x1fff); + *vpos = position / htotal; + *hpos = position - (*vpos * htotal); + } + + /* Query vblank area. */ + vbl = I915_READ(VBLANK(pipe)); + + /* Test position against vblank region. */ + vbl_start = vbl & 0x1fff; + vbl_end = (vbl >> 16) & 0x1fff; + + if ((*vpos < vbl_start) || (*vpos > vbl_end)) + in_vbl = false; + + /* Inside "upper part" of vblank area? Apply corrective offset: */ + if (in_vbl && (*vpos >= vbl_start)) + *vpos = *vpos - vtotal; + + /* Readouts valid? */ + if (vbl > 0) + ret |= DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_ACCURATE; + + /* In vblank? */ + if (in_vbl) + ret |= DRM_SCANOUTPOS_INVBL; + + return ret; +} + +int i915_get_vblank_timestamp(struct drm_device *dev, int crtc, + int *max_error, + struct timeval *vblank_time, + unsigned flags) +{ + struct drm_crtc *drmcrtc; + + if (crtc < 0 || crtc >= dev->num_crtcs) { + DRM_ERROR("Invalid crtc %d\n", crtc); + return -EINVAL; + } + + /* Get drm_crtc to timestamp: */ + drmcrtc = intel_get_crtc_for_pipe(dev, crtc); + + /* Helper routine in DRM core does all the work: */ + return drm_calc_vbltimestamp_from_scanoutpos(dev, crtc, max_error, + vblank_time, flags, drmcrtc); +} + /* * Handle hotplug events outside the interrupt handler proper. */ diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h index 820e9dfaadc7..c2231f7d2d97 100644 --- a/drivers/gpu/drm/i915/i915_reg.h +++ b/drivers/gpu/drm/i915/i915_reg.h @@ -2253,6 +2253,7 @@ #define PIPESRC(pipe) _PIPE(pipe, PIPEASRC, PIPEBSRC) #define PIPECONF(pipe) _PIPE(pipe, PIPEACONF, PIPEBCONF) #define PIPEDSL(pipe) _PIPE(pipe, PIPEADSL, PIPEBDSL) +#define PIPEFRAMEPIXEL(pipe) _PIPE(pipe, PIPEAFRAMEPIXEL, PIPEBFRAMEPIXEL) #define DSPARB 0x70030 #define DSPARB_CSTART_MASK (0x7f << 7) diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c index 8645a974a499..0c201d684584 100644 --- a/drivers/gpu/drm/i915/intel_display.c +++ b/drivers/gpu/drm/i915/intel_display.c @@ -5252,7 +5252,8 @@ static void intel_unpin_work_fn(struct work_struct *__work) } static void do_intel_finish_page_flip(struct drm_device *dev, - struct drm_crtc *crtc) + struct drm_crtc *crtc, + int called_before_vblirq) { drm_i915_private_t *dev_priv = dev->dev_private; struct intel_crtc *intel_crtc = to_intel_crtc(crtc); @@ -5274,19 +5275,33 @@ static void do_intel_finish_page_flip(struct drm_device *dev, } intel_crtc->unpin_work = NULL; - drm_vblank_put(dev, intel_crtc->pipe); if (work->event) { e = work->event; - do_gettimeofday(&now); - e->event.sequence = drm_vblank_count(dev, intel_crtc->pipe); + e->event.sequence = drm_vblank_count_and_time(dev, intel_crtc->pipe, &now); + + /* Called before vblank count and timestamps have + * been updated for the vblank interval of flip + * completion? Need to increment vblank count and + * add one videorefresh duration to returned timestamp + * to account for this. + */ + if (called_before_vblirq) { + e->event.sequence++; + now = ns_to_timeval(timeval_to_ns(&now) + + crtc->framedur_ns); + } + e->event.tv_sec = now.tv_sec; e->event.tv_usec = now.tv_usec; + list_add_tail(&e->base.link, &e->base.file_priv->event_list); wake_up_interruptible(&e->base.file_priv->event_wait); } + drm_vblank_put(dev, intel_crtc->pipe); + spin_unlock_irqrestore(&dev->event_lock, flags); obj = work->old_fb_obj; @@ -5306,7 +5321,8 @@ void intel_finish_page_flip(struct drm_device *dev, int pipe) drm_i915_private_t *dev_priv = dev->dev_private; struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe]; - do_intel_finish_page_flip(dev, crtc); + /* Called after drm_handle_vblank has run for finish vblank. */ + do_intel_finish_page_flip(dev, crtc, 0); } void intel_finish_page_flip_plane(struct drm_device *dev, int plane) @@ -5314,7 +5330,8 @@ void intel_finish_page_flip_plane(struct drm_device *dev, int plane) drm_i915_private_t *dev_priv = dev->dev_private; struct drm_crtc *crtc = dev_priv->plane_to_crtc_mapping[plane]; - do_intel_finish_page_flip(dev, crtc); + /* Called before drm_handle_vblank has run for finish vblank. */ + do_intel_finish_page_flip(dev, crtc, 1); } void intel_prepare_page_flip(struct drm_device *dev, int plane) -- cgit v1.2.3 From 49b14a5ca26fc18bafe33bd9704a1a1cea681fbf Mon Sep 17 00:00:00 2001 From: Mario Kleiner Date: Thu, 9 Dec 2010 07:00:07 +0100 Subject: drm/i915: Add Guess-o-matic for pageflip timestamping. This patch changes the strategy for pageflip completion timestamping. It detects if the pageflip completion routine gets executed before or after drm_handle_vblank, and thereby decides if the returned vblank count and timestamp must be incremented by 1 frame(duration) or not. It compares the current system time at invocation against the current vblank timestamp. If the difference is more than 0.9 video refresh interval durations then it assumes the vblank timestamp and count are outdated and need to be incremented and does so. Otherwise it assumes a delayed pageflip irq and doesn't correct the timestamp and count. Advantage of this patch: Pageflip timestamping becomes more robust against implementation errors and is maintenance free for future GPU's. Disadvantage: A few dozen (hundred?) nsecs extra time spent in pageflip irq handler for each flip, compared to hard-coded per-gpu settings? Signed-off-by: Mario Kleiner Acked-by: Jesse Barnes Signed-off-by: Chris Wilson --- drivers/gpu/drm/i915/intel_display.c | 34 ++++++++++++++++++++-------------- 1 file changed, 20 insertions(+), 14 deletions(-) diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c index 0c201d684584..fe6538297872 100644 --- a/drivers/gpu/drm/i915/intel_display.c +++ b/drivers/gpu/drm/i915/intel_display.c @@ -5252,21 +5252,22 @@ static void intel_unpin_work_fn(struct work_struct *__work) } static void do_intel_finish_page_flip(struct drm_device *dev, - struct drm_crtc *crtc, - int called_before_vblirq) + struct drm_crtc *crtc) { drm_i915_private_t *dev_priv = dev->dev_private; struct intel_crtc *intel_crtc = to_intel_crtc(crtc); struct intel_unpin_work *work; struct drm_i915_gem_object *obj; struct drm_pending_vblank_event *e; - struct timeval now; + struct timeval tnow, tvbl; unsigned long flags; /* Ignore early vblank irqs */ if (intel_crtc == NULL) return; + do_gettimeofday(&tnow); + spin_lock_irqsave(&dev->event_lock, flags); work = intel_crtc->unpin_work; if (work == NULL || !work->pending) { @@ -5278,22 +5279,29 @@ static void do_intel_finish_page_flip(struct drm_device *dev, if (work->event) { e = work->event; - e->event.sequence = drm_vblank_count_and_time(dev, intel_crtc->pipe, &now); + e->event.sequence = drm_vblank_count_and_time(dev, intel_crtc->pipe, &tvbl); /* Called before vblank count and timestamps have * been updated for the vblank interval of flip * completion? Need to increment vblank count and * add one videorefresh duration to returned timestamp - * to account for this. + * to account for this. We assume this happened if we + * get called over 0.9 frame durations after the last + * timestamped vblank. + * + * This calculation can not be used with vrefresh rates + * below 5Hz (10Hz to be on the safe side) without + * promoting to 64 integers. */ - if (called_before_vblirq) { + if (10 * (timeval_to_ns(&tnow) - timeval_to_ns(&tvbl)) > + 9 * crtc->framedur_ns) { e->event.sequence++; - now = ns_to_timeval(timeval_to_ns(&now) + - crtc->framedur_ns); + tvbl = ns_to_timeval(timeval_to_ns(&tvbl) + + crtc->framedur_ns); } - e->event.tv_sec = now.tv_sec; - e->event.tv_usec = now.tv_usec; + e->event.tv_sec = tvbl.tv_sec; + e->event.tv_usec = tvbl.tv_usec; list_add_tail(&e->base.link, &e->base.file_priv->event_list); @@ -5321,8 +5329,7 @@ void intel_finish_page_flip(struct drm_device *dev, int pipe) drm_i915_private_t *dev_priv = dev->dev_private; struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe]; - /* Called after drm_handle_vblank has run for finish vblank. */ - do_intel_finish_page_flip(dev, crtc, 0); + do_intel_finish_page_flip(dev, crtc); } void intel_finish_page_flip_plane(struct drm_device *dev, int plane) @@ -5330,8 +5337,7 @@ void intel_finish_page_flip_plane(struct drm_device *dev, int plane) drm_i915_private_t *dev_priv = dev->dev_private; struct drm_crtc *crtc = dev_priv->plane_to_crtc_mapping[plane]; - /* Called before drm_handle_vblank has run for finish vblank. */ - do_intel_finish_page_flip(dev, crtc, 1); + do_intel_finish_page_flip(dev, crtc); } void intel_prepare_page_flip(struct drm_device *dev, int plane) -- cgit v1.2.3 From 6714afb108b4973a07413ec58cb300b6d90b8df4 Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Fri, 17 Dec 2010 04:10:51 +0000 Subject: drm/i915/sdvo: Border and stall select became test bits in gen5 This is even more important as those bits will be moved in future. Signed-off-by: Chris Wilson --- drivers/gpu/drm/i915/intel_sdvo.c | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/drivers/gpu/drm/i915/intel_sdvo.c b/drivers/gpu/drm/i915/intel_sdvo.c index 27e63abf2a73..6c0bb18a26e8 100644 --- a/drivers/gpu/drm/i915/intel_sdvo.c +++ b/drivers/gpu/drm/i915/intel_sdvo.c @@ -1045,7 +1045,9 @@ static void intel_sdvo_mode_set(struct drm_encoder *encoder, /* Set the SDVO control regs. */ if (INTEL_INFO(dev)->gen >= 4) { - sdvox = SDVO_BORDER_ENABLE; + sdvox = 0; + if (INTEL_INFO(dev)->gen < 5) + sdvox |= SDVO_BORDER_ENABLE; if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC) sdvox |= SDVO_VSYNC_ACTIVE_HIGH; if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC) @@ -1075,7 +1077,8 @@ static void intel_sdvo_mode_set(struct drm_encoder *encoder, sdvox |= (pixel_multiplier - 1) << SDVO_PORT_MULTIPLY_SHIFT; } - if (input_dtd.part2.sdvo_flags & SDVO_NEED_TO_STALL) + if (input_dtd.part2.sdvo_flags & SDVO_NEED_TO_STALL && + INTEL_INFO(dev)->gen < 5) sdvox |= SDVO_STALL_SELECT; intel_sdvo_write_sdvox(intel_sdvo, sdvox); } -- cgit v1.2.3 From 9c3d2f7ffac34c62fea0b73e607707168a6f09b1 Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Fri, 17 Dec 2010 10:54:26 +0000 Subject: drm/i915: Enable EI mode for RCx decision making on Sandybridge And no I have no idea what the difference is either, just that is the recommendation. Signed-off-by: Chris Wilson --- drivers/gpu/drm/i915/intel_display.c | 1 + 1 file changed, 1 insertion(+) diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c index fe6538297872..c79bee4b4d56 100644 --- a/drivers/gpu/drm/i915/intel_display.c +++ b/drivers/gpu/drm/i915/intel_display.c @@ -6141,6 +6141,7 @@ static void gen6_enable_rc6(struct drm_i915_private *dev_priv) I915_WRITE(GEN6_RC_CONTROL, GEN6_RC_CTL_RC6p_ENABLE | GEN6_RC_CTL_RC6_ENABLE | + GEN6_RC_CTL_EI_MODE(1) | GEN6_RC_CTL_HW_ENABLE); I915_WRITE(GEN6_RC_NORMAL_FREQ, -- cgit v1.2.3 From 3b8d8d91d51c7d15cda51052624169edf7b6dbc6 Mon Sep 17 00:00:00 2001 From: Jesse Barnes Date: Fri, 17 Dec 2010 14:19:02 -0800 Subject: drm/i915: dynamic render p-state support for Sandy Bridge Add an interrupt handler for switching graphics frequencies and handling PM interrupts. This should allow for increased performance when busy and lower power consumption when idle. Signed-off-by: Jesse Barnes Signed-off-by: Chris Wilson --- drivers/gpu/drm/i915/i915_debugfs.c | 54 ++++++++++++++++++++++++++++++------ drivers/gpu/drm/i915/i915_drv.h | 1 + drivers/gpu/drm/i915/i915_irq.c | 47 +++++++++++++++++++++++++++++-- drivers/gpu/drm/i915/i915_reg.h | 8 +++++- drivers/gpu/drm/i915/i915_suspend.c | 9 ++++-- drivers/gpu/drm/i915/intel_display.c | 36 ++++++++++++++++++++---- drivers/gpu/drm/i915/intel_drv.h | 2 ++ 7 files changed, 137 insertions(+), 20 deletions(-) diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c index 864e75d762e6..92f75782c332 100644 --- a/drivers/gpu/drm/i915/i915_debugfs.c +++ b/drivers/gpu/drm/i915/i915_debugfs.c @@ -797,15 +797,51 @@ static int i915_cur_delayinfo(struct seq_file *m, void *unused) struct drm_info_node *node = (struct drm_info_node *) m->private; struct drm_device *dev = node->minor->dev; drm_i915_private_t *dev_priv = dev->dev_private; - u16 rgvswctl = I915_READ16(MEMSWCTL); - u16 rgvstat = I915_READ16(MEMSTAT_ILK); - - seq_printf(m, "Requested P-state: %d\n", (rgvswctl >> 8) & 0xf); - seq_printf(m, "Requested VID: %d\n", rgvswctl & 0x3f); - seq_printf(m, "Current VID: %d\n", (rgvstat & MEMSTAT_VID_MASK) >> - MEMSTAT_VID_SHIFT); - seq_printf(m, "Current P-state: %d\n", - (rgvstat & MEMSTAT_PSTATE_MASK) >> MEMSTAT_PSTATE_SHIFT); + + if (IS_GEN5(dev)) { + u16 rgvswctl = I915_READ16(MEMSWCTL); + u16 rgvstat = I915_READ16(MEMSTAT_ILK); + + seq_printf(m, "Requested P-state: %d\n", (rgvswctl >> 8) & 0xf); + seq_printf(m, "Requested VID: %d\n", rgvswctl & 0x3f); + seq_printf(m, "Current VID: %d\n", (rgvstat & MEMSTAT_VID_MASK) >> + MEMSTAT_VID_SHIFT); + seq_printf(m, "Current P-state: %d\n", + (rgvstat & MEMSTAT_PSTATE_MASK) >> MEMSTAT_PSTATE_SHIFT); + } else if (IS_GEN6(dev)) { + u32 gt_perf_status = I915_READ(GEN6_GT_PERF_STATUS); + u32 rp_state_limits = I915_READ(GEN6_RP_STATE_LIMITS); + u32 rp_state_cap = I915_READ(GEN6_RP_STATE_CAP); + int max_freq; + + /* RPSTAT1 is in the GT power well */ + __gen6_force_wake_get(dev_priv); + + seq_printf(m, "GT_PERF_STATUS: 0x%08x\n", gt_perf_status); + seq_printf(m, "RPSTAT1: 0x%08x\n", I915_READ(GEN6_RPSTAT1)); + seq_printf(m, "Render p-state ratio: %d\n", + (gt_perf_status & 0xff00) >> 8); + seq_printf(m, "Render p-state VID: %d\n", + gt_perf_status & 0xff); + seq_printf(m, "Render p-state limit: %d\n", + rp_state_limits & 0xff); + + max_freq = (rp_state_cap & 0xff0000) >> 16; + seq_printf(m, "Lowest (RPN) frequency: %dMHz\n", + max_freq * 100); + + max_freq = (rp_state_cap & 0xff00) >> 8; + seq_printf(m, "Nominal (RP1) frequency: %dMHz\n", + max_freq * 100); + + max_freq = rp_state_cap & 0xff; + seq_printf(m, "Max non-overclocked (RP0) frequency: %dMHz\n", + max_freq * 100); + + __gen6_force_wake_put(dev_priv); + } else { + seq_printf(m, "no P-state info available\n"); + } return 0; } diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index 53dfc8398a96..2a653cc80395 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h @@ -1264,6 +1264,7 @@ extern void intel_disable_fbc(struct drm_device *dev); extern void intel_enable_fbc(struct drm_crtc *crtc, unsigned long interval); extern bool intel_fbc_enabled(struct drm_device *dev); extern bool ironlake_set_drps(struct drm_device *dev, u8 val); +extern void gen6_set_rps(struct drm_device *dev, u8 val); extern void intel_detect_pch (struct drm_device *dev); extern int intel_trans_dp_port_sel (struct drm_crtc *crtc); diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c index adf983f01dda..0dadc025b77b 100644 --- a/drivers/gpu/drm/i915/i915_irq.c +++ b/drivers/gpu/drm/i915/i915_irq.c @@ -397,11 +397,49 @@ static void notify_ring(struct drm_device *dev, jiffies + msecs_to_jiffies(DRM_I915_HANGCHECK_PERIOD)); } +static void gen6_pm_irq_handler(struct drm_device *dev) +{ + drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; + u8 new_delay = dev_priv->cur_delay; + u32 pm_iir; + + pm_iir = I915_READ(GEN6_PMIIR); + if (!pm_iir) + return; + + if (pm_iir & GEN6_PM_RP_UP_THRESHOLD) { + if (dev_priv->cur_delay != dev_priv->max_delay) + new_delay = dev_priv->cur_delay + 1; + if (new_delay > dev_priv->max_delay) + new_delay = dev_priv->max_delay; + } else if (pm_iir & (GEN6_PM_RP_DOWN_THRESHOLD | GEN6_PM_RP_DOWN_TIMEOUT)) { + if (dev_priv->cur_delay != dev_priv->min_delay) + new_delay = dev_priv->cur_delay - 1; + if (new_delay < dev_priv->min_delay) { + new_delay = dev_priv->min_delay; + I915_WRITE(GEN6_RP_INTERRUPT_LIMITS, + I915_READ(GEN6_RP_INTERRUPT_LIMITS) | + ((new_delay << 16) & 0x3f0000)); + } else { + /* Make sure we continue to get down interrupts + * until we hit the minimum frequency */ + I915_WRITE(GEN6_RP_INTERRUPT_LIMITS, + I915_READ(GEN6_RP_INTERRUPT_LIMITS) & ~0x3f0000); + } + + } + + gen6_set_rps(dev, new_delay); + dev_priv->cur_delay = new_delay; + + I915_WRITE(GEN6_PMIIR, pm_iir); +} + static irqreturn_t ironlake_irq_handler(struct drm_device *dev) { drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; int ret = IRQ_NONE; - u32 de_iir, gt_iir, de_ier, pch_iir; + u32 de_iir, gt_iir, de_ier, pch_iir, pm_iir; u32 hotplug_mask; struct drm_i915_master_private *master_priv; u32 bsd_usr_interrupt = GT_BSD_USER_INTERRUPT; @@ -417,8 +455,10 @@ static irqreturn_t ironlake_irq_handler(struct drm_device *dev) de_iir = I915_READ(DEIIR); gt_iir = I915_READ(GTIIR); pch_iir = I915_READ(SDEIIR); + pm_iir = I915_READ(GEN6_PMIIR); - if (de_iir == 0 && gt_iir == 0 && pch_iir == 0) + if (de_iir == 0 && gt_iir == 0 && pch_iir == 0 && + (!IS_GEN6(dev) || pm_iir == 0)) goto done; if (HAS_PCH_CPT(dev)) @@ -470,6 +510,9 @@ static irqreturn_t ironlake_irq_handler(struct drm_device *dev) i915_handle_rps_change(dev); } + if (IS_GEN6(dev)) + gen6_pm_irq_handler(dev); + /* should clear PCH hotplug event before clear CPU irq */ I915_WRITE(SDEIIR, pch_iir); I915_WRITE(GTIIR, gt_iir); diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h index c2231f7d2d97..d60860ec8cf4 100644 --- a/drivers/gpu/drm/i915/i915_reg.h +++ b/drivers/gpu/drm/i915/i915_reg.h @@ -1188,6 +1188,10 @@ #define DDRMPLL1 0X12c20 #define PEG_BAND_GAP_DATA 0x14d68 +#define GEN6_GT_PERF_STATUS 0x145948 +#define GEN6_RP_STATE_LIMITS 0x145994 +#define GEN6_RP_STATE_CAP 0x145998 + /* * Logical Context regs */ @@ -3169,7 +3173,7 @@ #define FORCEWAKE 0xA18C #define FORCEWAKE_ACK 0x130090 -#define GEN6_RC_NORMAL_FREQ 0xA008 +#define GEN6_RPNSWREQ 0xA008 #define GEN6_TURBO_DISABLE (1<<31) #define GEN6_FREQUENCY(x) ((x)<<25) #define GEN6_OFFSET(x) ((x)<<19) @@ -3185,6 +3189,7 @@ #define GEN6_RC_CTL_HW_ENABLE (1<<31) #define GEN6_RP_DOWN_TIMEOUT 0xA010 #define GEN6_RP_INTERRUPT_LIMITS 0xA014 +#define GEN6_RPSTAT1 0xA01C #define GEN6_RP_CONTROL 0xA024 #define GEN6_RP_MEDIA_TURBO (1<<11) #define GEN6_RP_USE_NORMAL_FREQ (1<<9) @@ -3208,6 +3213,7 @@ #define GEN6_RC6_THRESHOLD 0xA0B8 #define GEN6_RC6p_THRESHOLD 0xA0BC #define GEN6_RC6pp_THRESHOLD 0xA0C0 +#define GEN6_PMINTRMSK 0xA168 #define GEN6_PMISR 0x44020 #define GEN6_PMIMR 0x44024 diff --git a/drivers/gpu/drm/i915/i915_suspend.c b/drivers/gpu/drm/i915/i915_suspend.c index a311809f3c80..f623efdb1151 100644 --- a/drivers/gpu/drm/i915/i915_suspend.c +++ b/drivers/gpu/drm/i915/i915_suspend.c @@ -817,8 +817,10 @@ int i915_save_state(struct drm_device *dev) dev_priv->saveIMR = I915_READ(IMR); } - if (HAS_PCH_SPLIT(dev)) + if (IS_IRONLAKE_M(dev)) ironlake_disable_drps(dev); + if (IS_GEN6(dev)) + gen6_disable_rps(dev); intel_disable_clock_gating(dev); @@ -867,11 +869,14 @@ int i915_restore_state(struct drm_device *dev) /* Clock gating state */ intel_enable_clock_gating(dev); - if (HAS_PCH_SPLIT(dev)) { + if (IS_IRONLAKE_M(dev)) { ironlake_enable_drps(dev); intel_init_emon(dev); } + if (IS_GEN6(dev)) + gen6_enable_rps(dev_priv); + /* Cache mode state */ I915_WRITE (CACHE_MODE_0, dev_priv->saveCACHE_MODE_0 | 0xffff0000); diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c index c79bee4b4d56..880659680d0a 100644 --- a/drivers/gpu/drm/i915/intel_display.c +++ b/drivers/gpu/drm/i915/intel_display.c @@ -6021,6 +6021,25 @@ void ironlake_disable_drps(struct drm_device *dev) } +void gen6_set_rps(struct drm_device *dev, u8 val) +{ + struct drm_i915_private *dev_priv = dev->dev_private; + u32 swreq; + + swreq = (val & 0x3ff) << 25; + I915_WRITE(GEN6_RPNSWREQ, swreq); +} + +void gen6_disable_rps(struct drm_device *dev) +{ + struct drm_i915_private *dev_priv = dev->dev_private; + + I915_WRITE(GEN6_RPNSWREQ, 1 << 31); + I915_WRITE(GEN6_PMINTRMSK, 0xffffffff); + I915_WRITE(GEN6_PMIER, 0); + I915_WRITE(GEN6_PMIIR, I915_READ(GEN6_PMIIR)); +} + static unsigned long intel_pxfreq(u32 vidfreq) { unsigned long freq; @@ -6107,7 +6126,7 @@ void intel_init_emon(struct drm_device *dev) dev_priv->corr = (lcfuse & LCFUSE_HIV_MASK); } -static void gen6_enable_rc6(struct drm_i915_private *dev_priv) +void gen6_enable_rps(struct drm_i915_private *dev_priv) { int i; @@ -6120,7 +6139,7 @@ static void gen6_enable_rc6(struct drm_i915_private *dev_priv) I915_WRITE(GEN6_RC_STATE, 0); __gen6_force_wake_get(dev_priv); - /* disable the counters and set determistic thresholds */ + /* disable the counters and set deterministic thresholds */ I915_WRITE(GEN6_RC_CONTROL, 0); I915_WRITE(GEN6_RC1_WAKE_RATE_LIMIT, 1000 << 16); @@ -6144,7 +6163,7 @@ static void gen6_enable_rc6(struct drm_i915_private *dev_priv) GEN6_RC_CTL_EI_MODE(1) | GEN6_RC_CTL_HW_ENABLE); - I915_WRITE(GEN6_RC_NORMAL_FREQ, + I915_WRITE(GEN6_RPNSWREQ, GEN6_FREQUENCY(10) | GEN6_OFFSET(0) | GEN6_AGGRESSIVE_TURBO); @@ -6189,6 +6208,9 @@ static void gen6_enable_rc6(struct drm_i915_private *dev_priv) GEN6_PM_RP_DOWN_THRESHOLD | GEN6_PM_RP_UP_EI_EXPIRED | GEN6_PM_RP_DOWN_EI_EXPIRED); + I915_WRITE(GEN6_PMIMR, 0); + /* enable all PM interrupts */ + I915_WRITE(GEN6_PMINTRMSK, 0); __gen6_force_wake_put(dev_priv); } @@ -6381,9 +6403,6 @@ void intel_enable_clock_gating(struct drm_device *dev) I915_READ(MCHBAR_RENDER_STANDBY) & ~RCX_SW_EXIT); } } - - if (IS_GEN6(dev)) - gen6_enable_rc6(dev_priv); } void intel_disable_clock_gating(struct drm_device *dev) @@ -6657,6 +6676,9 @@ void intel_modeset_init(struct drm_device *dev) intel_init_emon(dev); } + if (IS_GEN6(dev)) + gen6_enable_rps(dev_priv); + INIT_WORK(&dev_priv->idle_work, intel_idle_update); setup_timer(&dev_priv->idle_timer, intel_gpu_idle_timer, (unsigned long)dev); @@ -6690,6 +6712,8 @@ void intel_modeset_cleanup(struct drm_device *dev) if (IS_IRONLAKE_M(dev)) ironlake_disable_drps(dev); + if (IS_GEN6(dev)) + gen6_disable_rps(dev); intel_disable_clock_gating(dev); diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h index acdea6549ec4..d782ad9fd6db 100644 --- a/drivers/gpu/drm/i915/intel_drv.h +++ b/drivers/gpu/drm/i915/intel_drv.h @@ -298,6 +298,8 @@ extern void intel_enable_clock_gating(struct drm_device *dev); extern void intel_disable_clock_gating(struct drm_device *dev); extern void ironlake_enable_drps(struct drm_device *dev); extern void ironlake_disable_drps(struct drm_device *dev); +extern void gen6_enable_rps(struct drm_i915_private *dev_priv); +extern void gen6_disable_rps(struct drm_device *dev); extern void intel_init_emon(struct drm_device *dev); extern int intel_pin_and_fence_fb_obj(struct drm_device *dev, -- cgit v1.2.3 From 72bfa19c8deb4d1db5ad068c34fd580cb295cbe8 Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Sun, 19 Dec 2010 11:42:05 +0000 Subject: drm/i915: Allow the application to choose the constant addressing mode The relative-to-general state default is useless as it means having to rewrite the streaming kernels for each batch. Relative-to-surface is more useful, as that stream usually needs to be rewritten for each batch. And absolute addressing mode, vital if you start streaming state, is also only available by adjusting the register... Signed-off-by: Chris Wilson --- drivers/gpu/drm/i915/i915_dma.c | 3 +++ drivers/gpu/drm/i915/i915_drv.h | 1 + drivers/gpu/drm/i915/i915_gem.c | 2 ++ drivers/gpu/drm/i915/i915_gem_execbuffer.c | 35 +++++++++++++++++++++++++++++- include/drm/i915_drm.h | 12 ++++++++++ 5 files changed, 52 insertions(+), 1 deletion(-) diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c index 3f7b20392e26..18746e6cb129 100644 --- a/drivers/gpu/drm/i915/i915_dma.c +++ b/drivers/gpu/drm/i915/i915_dma.c @@ -778,6 +778,9 @@ static int i915_getparam(struct drm_device *dev, void *data, case I915_PARAM_HAS_COHERENT_RINGS: value = 1; break; + case I915_PARAM_HAS_EXEC_CONSTANTS: + value = INTEL_INFO(dev)->gen >= 4; + break; default: DRM_DEBUG_DRIVER("Unknown parameter %d\n", param->param); diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index 2a653cc80395..aac1bf332f75 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h @@ -258,6 +258,7 @@ typedef struct drm_i915_private { const struct intel_device_info *info; int has_gem; + int relative_constants_mode; void __iomem *regs; diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index 5a0fbe59dd5b..c79c0b62ef60 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c @@ -3735,6 +3735,8 @@ i915_gem_load(struct drm_device *dev) } } + dev_priv->relative_constants_mode = I915_EXEC_CONSTANTS_REL_GENERAL; + /* Old X drivers will take 0-2 for front, back, depth buffers */ if (!drm_core_check_feature(dev, DRIVER_MODESET)) dev_priv->fence_reg_start = 3; diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c index fda0dc858a1f..61129e6759eb 100644 --- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c +++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c @@ -957,7 +957,7 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data, struct intel_ring_buffer *ring; u32 exec_start, exec_len; u32 seqno; - int ret, i; + int ret, mode, i; if (!i915_gem_check_execbuffer(args)) { DRM_ERROR("execbuf with invalid offset/length\n"); @@ -997,6 +997,39 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data, return -EINVAL; } + mode = args->flags & I915_EXEC_CONSTANTS_MASK; + switch (mode) { + case I915_EXEC_CONSTANTS_REL_GENERAL: + case I915_EXEC_CONSTANTS_ABSOLUTE: + case I915_EXEC_CONSTANTS_REL_SURFACE: + if (ring == &dev_priv->ring[RCS] && + mode != dev_priv->relative_constants_mode) { + if (INTEL_INFO(dev)->gen < 4) + return -EINVAL; + + if (INTEL_INFO(dev)->gen > 5 && + mode == I915_EXEC_CONSTANTS_REL_SURFACE) + return -EINVAL; + + ret = intel_ring_begin(ring, 4); + if (ret) + return ret; + + intel_ring_emit(ring, MI_NOOP); + intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(1)); + intel_ring_emit(ring, INSTPM); + intel_ring_emit(ring, + I915_EXEC_CONSTANTS_MASK << 16 | mode); + intel_ring_advance(ring); + + dev_priv->relative_constants_mode = mode; + } + break; + default: + DRM_ERROR("execbuf with unknown constants: %d\n", mode); + return -EINVAL; + } + if (args->buffer_count < 1) { DRM_ERROR("execbuf with %d buffers\n", args->buffer_count); return -EINVAL; diff --git a/include/drm/i915_drm.h b/include/drm/i915_drm.h index a2776e2807a4..0039f1f97ad8 100644 --- a/include/drm/i915_drm.h +++ b/include/drm/i915_drm.h @@ -289,6 +289,7 @@ typedef struct drm_i915_irq_wait { #define I915_PARAM_HAS_BLT 11 #define I915_PARAM_HAS_RELAXED_FENCING 12 #define I915_PARAM_HAS_COHERENT_RINGS 13 +#define I915_PARAM_HAS_EXEC_CONSTANTS 14 typedef struct drm_i915_getparam { int param; @@ -635,6 +636,17 @@ struct drm_i915_gem_execbuffer2 { #define I915_EXEC_RENDER (1<<0) #define I915_EXEC_BSD (2<<0) #define I915_EXEC_BLT (3<<0) + +/* Used for switching the constants addressing mode on gen4+ RENDER ring. + * Gen6+ only supports relative addressing to dynamic state (default) and + * absolute addressing. + * + * These flags are ignored for the BSD and BLT rings. + */ +#define I915_EXEC_CONSTANTS_MASK (3<<6) +#define I915_EXEC_CONSTANTS_REL_GENERAL (0<<6) /* default */ +#define I915_EXEC_CONSTANTS_ABSOLUTE (1<<6) +#define I915_EXEC_CONSTANTS_REL_SURFACE (2<<6) /* gen4/5 only */ __u64 flags; __u64 rsvd1; __u64 rsvd2; -- cgit v1.2.3 From 5909a77ac62cc042f94bd262016cf468a2f96022 Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Mon, 20 Dec 2010 09:45:15 +0000 Subject: drm/i915: Undo "Uncouple render/power ctx before suspending" Manaul revert of 0cdab21f9a1fca50dd27e488839f5a6578e333b2, just to remove the call to disable the clock gatings and powerctx before suspend. Peter Clifton bisected a suspend failure on his gme45 and found this to be the culprit. As this was intended to be a fix for a similar suspend failure for Ironlake (it didn't work), undoing this patch should have no other side-effects. Reported-and-tested-by: Peter Clifton Signed-off-by: Chris Wilson --- drivers/gpu/drm/i915/i915_suspend.c | 2 ++ 1 file changed, 2 insertions(+) diff --git a/drivers/gpu/drm/i915/i915_suspend.c b/drivers/gpu/drm/i915/i915_suspend.c index f623efdb1151..410772466fa7 100644 --- a/drivers/gpu/drm/i915/i915_suspend.c +++ b/drivers/gpu/drm/i915/i915_suspend.c @@ -822,7 +822,9 @@ int i915_save_state(struct drm_device *dev) if (IS_GEN6(dev)) gen6_disable_rps(dev); + /* XXX disabling the clock gating breaks suspend on gm45 intel_disable_clock_gating(dev); + */ /* Cache mode state */ dev_priv->saveCACHE_MODE_0 = I915_READ(CACHE_MODE_0); -- cgit v1.2.3 From d095e23206e6291cbde4b2937d1316a42d9e6440 Mon Sep 17 00:00:00 2001 From: Dan Carpenter Date: Mon, 20 Dec 2010 12:26:26 +0300 Subject: drm/nouveau: sizeof() vs ARRAY_SIZE() ARRAY_SIZE() was intended here, sizeof() is too large. Signed-off-by: Dan Carpenter Signed-off-by: Ben Skeggs --- drivers/gpu/drm/nouveau/nv50_vram.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/gpu/drm/nouveau/nv50_vram.c b/drivers/gpu/drm/nouveau/nv50_vram.c index 47489ed0a5a8..58e98ad36347 100644 --- a/drivers/gpu/drm/nouveau/nv50_vram.c +++ b/drivers/gpu/drm/nouveau/nv50_vram.c @@ -42,7 +42,7 @@ nv50_vram_flags_valid(struct drm_device *dev, u32 tile_flags) { int type = (tile_flags & NOUVEAU_GEM_TILE_LAYOUT_MASK) >> 8; - if (likely(type < sizeof(types) && types[type])) + if (likely(type < ARRAY_SIZE(types) && types[type])) return true; return false; } -- cgit v1.2.3 From 910d1b3a8cec4fef20f11eb39e9ec6ac6e964b0e Mon Sep 17 00:00:00 2001 From: Ben Skeggs Date: Tue, 21 Dec 2010 11:15:44 +1000 Subject: drm/nv50: fix smatch warning in nv50_vram.c Reported-by: Dan Carpenter Signed-off-by: Ben Skeggs --- drivers/gpu/drm/nouveau/nv50_vm.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/drivers/gpu/drm/nouveau/nv50_vm.c b/drivers/gpu/drm/nouveau/nv50_vm.c index eebab95f59b2..7939387f7f80 100644 --- a/drivers/gpu/drm/nouveau/nv50_vm.c +++ b/drivers/gpu/drm/nouveau/nv50_vm.c @@ -91,7 +91,8 @@ void nv50_vm_map(struct nouveau_vma *vma, struct nouveau_gpuobj *pgt, struct nouveau_vram *mem, u32 pte, u32 cnt, u64 phys) { - u32 block, i; + u32 block; + int i; phys = nv50_vm_addr(vma, pgt, phys, mem->memtype, 0); pte <<= 3; -- cgit v1.2.3 From 147dc38e976f4dd6d888d585649e724a3e82a9b2 Mon Sep 17 00:00:00 2001 From: Ben Skeggs Date: Thu, 16 Dec 2010 13:32:36 +1000 Subject: drm/nv50: add missing license header to nv50_fbcon.c Signed-off-by: Ben Skeggs --- drivers/gpu/drm/nouveau/nv50_fbcon.c | 24 ++++++++++++++++++++++++ 1 file changed, 24 insertions(+) diff --git a/drivers/gpu/drm/nouveau/nv50_fbcon.c b/drivers/gpu/drm/nouveau/nv50_fbcon.c index 6d38cb1488ae..791ded1c5c6d 100644 --- a/drivers/gpu/drm/nouveau/nv50_fbcon.c +++ b/drivers/gpu/drm/nouveau/nv50_fbcon.c @@ -1,3 +1,27 @@ +/* + * Copyright 2010 Red Hat Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: Ben Skeggs + */ + #include "drmP.h" #include "nouveau_drv.h" #include "nouveau_dma.h" -- cgit v1.2.3 From 57084d05379fe5c081d024006129b0565a11855f Mon Sep 17 00:00:00 2001 From: James Simmons Date: Mon, 20 Dec 2010 19:10:39 +0000 Subject: drm/fb: Don't expose mmio for fbdev emulation layer For the fbdev api if the struct fb_var_screeninfo accel_flags field is set to FB_ACCELF_TEXT then userland applications can not mmap the mmio region. Since it is a bad idea for DRM drivers to expose the mmio region via the fbdev layer we always set the accel_flags to prevent this. Please apply. Signed-off-by: James Simmons Signed-off-by: Dave Airlie --- drivers/gpu/drm/drm_fb_helper.c | 3 +++ drivers/gpu/drm/i915/intel_fb.c | 6 +----- drivers/gpu/drm/nouveau/nouveau_fbcon.c | 4 ---- drivers/gpu/drm/radeon/radeon_fb.c | 2 -- drivers/gpu/drm/vmwgfx/vmwgfx_fb.c | 3 --- 5 files changed, 4 insertions(+), 14 deletions(-) diff --git a/drivers/gpu/drm/drm_fb_helper.c b/drivers/gpu/drm/drm_fb_helper.c index d2849e4ea4d0..aa377115483b 100644 --- a/drivers/gpu/drm/drm_fb_helper.c +++ b/drivers/gpu/drm/drm_fb_helper.c @@ -985,6 +985,8 @@ void drm_fb_helper_fill_fix(struct fb_info *info, uint32_t pitch, info->fix.type = FB_TYPE_PACKED_PIXELS; info->fix.visual = depth == 8 ? FB_VISUAL_PSEUDOCOLOR : FB_VISUAL_TRUECOLOR; + info->fix.mmio_start = 0; + info->fix.mmio_len = 0; info->fix.type_aux = 0; info->fix.xpanstep = 1; /* doing it in hw */ info->fix.ypanstep = 1; /* doing it in hw */ @@ -1005,6 +1007,7 @@ void drm_fb_helper_fill_var(struct fb_info *info, struct drm_fb_helper *fb_helpe info->var.xres_virtual = fb->width; info->var.yres_virtual = fb->height; info->var.bits_per_pixel = fb->bits_per_pixel; + info->var.accel_flags = FB_ACCELF_TEXT; info->var.xoffset = 0; info->var.yoffset = 0; info->var.activate = FB_ACTIVATE_NOW; diff --git a/drivers/gpu/drm/i915/intel_fb.c b/drivers/gpu/drm/i915/intel_fb.c index af2a1dddc28e..ced3eef8da07 100644 --- a/drivers/gpu/drm/i915/intel_fb.c +++ b/drivers/gpu/drm/i915/intel_fb.c @@ -68,7 +68,7 @@ static int intelfb_create(struct intel_fbdev *ifbdev, struct drm_gem_object *fbo = NULL; struct drm_i915_gem_object *obj_priv; struct device *device = &dev->pdev->dev; - int size, ret, mmio_bar = IS_GEN2(dev) ? 1 : 0; + int size, ret; /* we don't do packed 24bpp */ if (sizes->surface_bpp == 24) @@ -156,10 +156,6 @@ static int intelfb_create(struct intel_fbdev *ifbdev, drm_fb_helper_fill_fix(info, fb->pitch, fb->depth); drm_fb_helper_fill_var(info, &ifbdev->helper, sizes->fb_width, sizes->fb_height); - /* FIXME: we really shouldn't expose mmio space at all */ - info->fix.mmio_start = pci_resource_start(dev->pdev, mmio_bar); - info->fix.mmio_len = pci_resource_len(dev->pdev, mmio_bar); - info->pixmap.size = 64*1024; info->pixmap.buf_align = 8; info->pixmap.access_align = 32; diff --git a/drivers/gpu/drm/nouveau/nouveau_fbcon.c b/drivers/gpu/drm/nouveau/nouveau_fbcon.c index ea861c915149..9dbe1eff2ae5 100644 --- a/drivers/gpu/drm/nouveau/nouveau_fbcon.c +++ b/drivers/gpu/drm/nouveau/nouveau_fbcon.c @@ -348,10 +348,6 @@ nouveau_fbcon_create(struct nouveau_fbdev *nfbdev, drm_fb_helper_fill_fix(info, fb->pitch, fb->depth); drm_fb_helper_fill_var(info, &nfbdev->helper, sizes->fb_width, sizes->fb_height); - /* FIXME: we really shouldn't expose mmio space at all */ - info->fix.mmio_start = pci_resource_start(pdev, 1); - info->fix.mmio_len = pci_resource_len(pdev, 1); - /* Set aperture base/size for vesafb takeover */ info->apertures = dev_priv->apertures; if (!info->apertures) { diff --git a/drivers/gpu/drm/radeon/radeon_fb.c b/drivers/gpu/drm/radeon/radeon_fb.c index efa211898fe6..f7b4762fa6b6 100644 --- a/drivers/gpu/drm/radeon/radeon_fb.c +++ b/drivers/gpu/drm/radeon/radeon_fb.c @@ -247,8 +247,6 @@ static int radeonfb_create(struct radeon_fbdev *rfbdev, info->apertures->ranges[0].base = rdev->ddev->mode_config.fb_base; info->apertures->ranges[0].size = rdev->mc.real_vram_size; - info->fix.mmio_start = 0; - info->fix.mmio_len = 0; info->pixmap.size = 64*1024; info->pixmap.buf_align = 8; info->pixmap.access_align = 32; diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_fb.c b/drivers/gpu/drm/vmwgfx/vmwgfx_fb.c index 41d9a5b73c03..dd596f2bd6e8 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_fb.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_fb.c @@ -480,9 +480,6 @@ int vmw_fb_init(struct vmw_private *vmw_priv) info->fix.smem_start = 0; info->fix.smem_len = fb_size; - info->fix.mmio_start = 0; - info->fix.mmio_len = 0; - info->pseudo_palette = par->pseudo_palette; info->screen_base = par->vmalloc; info->screen_size = fb_size; -- cgit v1.2.3 From 21719b398b0e49d21c0b9083cffd552ba28ed4af Mon Sep 17 00:00:00 2001 From: Tijl Coosemans Date: Tue, 21 Dec 2010 12:54:52 +1000 Subject: drm/radeon: Definition of R_0003C2_GENMO_WT seems wrong In drivers/gpu/drm/radeon/r100d.h R_0003C2_GENMO_WT is defined as 0x3C0. I think this should be 0x3C2. Signed-off-by: Dave Airlie --- drivers/gpu/drm/radeon/r100d.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/gpu/drm/radeon/r100d.h b/drivers/gpu/drm/radeon/r100d.h index b121b6c678d4..eab91760fae0 100644 --- a/drivers/gpu/drm/radeon/r100d.h +++ b/drivers/gpu/drm/radeon/r100d.h @@ -551,7 +551,7 @@ #define S_000360_CUR2_LOCK(x) (((x) & 0x1) << 31) #define G_000360_CUR2_LOCK(x) (((x) >> 31) & 0x1) #define C_000360_CUR2_LOCK 0x7FFFFFFF -#define R_0003C2_GENMO_WT 0x0003C0 +#define R_0003C2_GENMO_WT 0x0003C2 #define S_0003C2_GENMO_MONO_ADDRESS_B(x) (((x) & 0x1) << 0) #define G_0003C2_GENMO_MONO_ADDRESS_B(x) (((x) >> 0) & 0x1) #define C_0003C2_GENMO_MONO_ADDRESS_B 0xFE -- cgit v1.2.3 From 48c36c8f9a3e881953bb72deb55623a53795a684 Mon Sep 17 00:00:00 2001 From: Marek Olšák Date: Sat, 18 Dec 2010 11:41:49 +0100 Subject: drm/radeon/kms: allow r500 US_FORMAT regs in the CS checker MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit add to the 2.8 bump for pageflip The purpose of these regs is to work around a TX hw bug in R520. Signed-off-by: Marek Olšák Signed-off-by: Dave Airlie --- drivers/gpu/drm/radeon/radeon_drv.c | 2 +- drivers/gpu/drm/radeon/reg_srcs/rv515 | 16 ++++++++++++++++ 2 files changed, 17 insertions(+), 1 deletion(-) diff --git a/drivers/gpu/drm/radeon/radeon_drv.c b/drivers/gpu/drm/radeon/radeon_drv.c index a92d2a5cea90..6fb1218f9d76 100644 --- a/drivers/gpu/drm/radeon/radeon_drv.c +++ b/drivers/gpu/drm/radeon/radeon_drv.c @@ -48,7 +48,7 @@ * - 2.5.0 - add get accel 2 to work around ddx breakage for evergreen * - 2.6.0 - add tiling config query (r6xx+), add initial HiZ support (r300->r500) * 2.7.0 - fixups for r600 2D tiling support. (no external ABI change), add eg dyn gpr regs - * 2.8.0 - pageflip support + * 2.8.0 - pageflip support, r500 US_FORMAT regs. */ #define KMS_DRIVER_MAJOR 2 #define KMS_DRIVER_MINOR 8 diff --git a/drivers/gpu/drm/radeon/reg_srcs/rv515 b/drivers/gpu/drm/radeon/reg_srcs/rv515 index b3f9f1d92005..ef422bbacfc1 100644 --- a/drivers/gpu/drm/radeon/reg_srcs/rv515 +++ b/drivers/gpu/drm/radeon/reg_srcs/rv515 @@ -304,6 +304,22 @@ rv515 0x6d40 0x4630 US_CODE_ADDR 0x4634 US_CODE_RANGE 0x4638 US_CODE_OFFSET +0x4640 US_FORMAT0_0 +0x4644 US_FORMAT0_1 +0x4648 US_FORMAT0_2 +0x464C US_FORMAT0_3 +0x4650 US_FORMAT0_4 +0x4654 US_FORMAT0_5 +0x4658 US_FORMAT0_6 +0x465C US_FORMAT0_7 +0x4660 US_FORMAT0_8 +0x4664 US_FORMAT0_9 +0x4668 US_FORMAT0_10 +0x466C US_FORMAT0_11 +0x4670 US_FORMAT0_12 +0x4674 US_FORMAT0_13 +0x4678 US_FORMAT0_14 +0x467C US_FORMAT0_15 0x46A4 US_OUT_FMT_0 0x46A8 US_OUT_FMT_1 0x46AC US_OUT_FMT_2 -- cgit v1.2.3 From b61c99de8e7905916d1508ba7c9a9c08656e169d Mon Sep 17 00:00:00 2001 From: Alex Deucher Date: Thu, 16 Dec 2010 18:40:29 -0500 Subject: drm/radeon/kms: fix DCE4.1 dig routing (v2) Works more like DCE4.0 despite what the docs say. This fixes blank screen issues when changing crtc routing due to incorrect crtc to dig mapping. v2: only two DIGx blocks, routing is hardcoded based on link. Signed-off-by: Alex Deucher Signed-off-by: Dave Airlie --- drivers/gpu/drm/radeon/radeon_encoders.c | 54 +++++++++++++++----------------- 1 file changed, 25 insertions(+), 29 deletions(-) diff --git a/drivers/gpu/drm/radeon/radeon_encoders.c b/drivers/gpu/drm/radeon/radeon_encoders.c index e4e64a80b58d..55b84b8e6b29 100644 --- a/drivers/gpu/drm/radeon/radeon_encoders.c +++ b/drivers/gpu/drm/radeon/radeon_encoders.c @@ -910,15 +910,9 @@ atombios_dig_transmitter_setup(struct drm_encoder *encoder, int action, uint8_t else args.v3.ucLaneNum = 4; - if (ASIC_IS_DCE41(rdev)) { - args.v3.acConfig.ucEncoderSel = dig->dig_encoder; - if (dig->linkb) - args.v3.acConfig.ucLinkSel = 1; - } else { - if (dig->linkb) { - args.v3.acConfig.ucLinkSel = 1; - args.v3.acConfig.ucEncoderSel = 1; - } + if (dig->linkb) { + args.v3.acConfig.ucLinkSel = 1; + args.v3.acConfig.ucEncoderSel = 1; } /* Select the PLL for the PHY @@ -1535,32 +1529,34 @@ static int radeon_atom_pick_dig_encoder(struct drm_encoder *encoder) struct radeon_encoder_atom_dig *dig; uint32_t dig_enc_in_use = 0; - /* on DCE41 and encoder can driver any phy so just crtc id */ - if (ASIC_IS_DCE41(rdev)) { - return radeon_crtc->crtc_id; - } - if (ASIC_IS_DCE4(rdev)) { dig = radeon_encoder->enc_priv; - switch (radeon_encoder->encoder_id) { - case ENCODER_OBJECT_ID_INTERNAL_UNIPHY: + if (ASIC_IS_DCE41(rdev)) { if (dig->linkb) return 1; else return 0; - break; - case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1: - if (dig->linkb) - return 3; - else - return 2; - break; - case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2: - if (dig->linkb) - return 5; - else - return 4; - break; + } else { + switch (radeon_encoder->encoder_id) { + case ENCODER_OBJECT_ID_INTERNAL_UNIPHY: + if (dig->linkb) + return 1; + else + return 0; + break; + case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1: + if (dig->linkb) + return 3; + else + return 2; + break; + case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2: + if (dig->linkb) + return 5; + else + return 4; + break; + } } } -- cgit v1.2.3 From 7d6aa80c35bb1f706e2894100dcc51e19a83f913 Mon Sep 17 00:00:00 2001 From: Alex Deucher Date: Mon, 20 Dec 2010 11:21:43 -0500 Subject: drm/radeon/kms/atom: clean up op_mask handler Readability cleanup and fix debugging output, no functional change. Reported-by: Frank Huang Signed-off-by: Alex Deucher Signed-off-by: Dave Airlie --- drivers/gpu/drm/radeon/atom.c | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/drivers/gpu/drm/radeon/atom.c b/drivers/gpu/drm/radeon/atom.c index 8e421f644a54..9b0773b75e86 100644 --- a/drivers/gpu/drm/radeon/atom.c +++ b/drivers/gpu/drm/radeon/atom.c @@ -733,16 +733,16 @@ static void atom_op_jump(atom_exec_context *ctx, int *ptr, int arg) static void atom_op_mask(atom_exec_context *ctx, int *ptr, int arg) { uint8_t attr = U8((*ptr)++); - uint32_t dst, src1, src2, saved; + uint32_t dst, mask, src, saved; int dptr = *ptr; SDEBUG(" dst: "); dst = atom_get_dst(ctx, arg, attr, ptr, &saved, 1); - SDEBUG(" src1: "); - src1 = atom_get_src_direct(ctx, ((attr >> 3) & 7), ptr); - SDEBUG(" src2: "); - src2 = atom_get_src(ctx, attr, ptr); - dst &= src1; - dst |= src2; + mask = atom_get_src_direct(ctx, ((attr >> 3) & 7), ptr); + SDEBUG(" mask: 0x%08x", mask); + SDEBUG(" src: "); + src = atom_get_src(ctx, attr, ptr); + dst &= mask; + dst |= src; SDEBUG(" dst: "); atom_put_dst(ctx, arg, attr, &dptr, dst, saved); } -- cgit v1.2.3 From 3ce05168907c9b1358492a73badb0ff1603fb81d Mon Sep 17 00:00:00 2001 From: David Fries Date: Sun, 12 Dec 2010 12:39:22 -0600 Subject: drm/kms: load fbcon from drm_kms_helper Kconfig says fbcon is required by drm_kms_helper. If radeon, fbcon, and drm_kms_helper are all modules, radeon is auto loaded (by PCI id?), drm_kms_helper is loaded because of the module dependency, but fbcon isn't loaded leaving the console unusable. Since fbcon is required and there isn't an explicit module dependency, request the module to be loaded from drm_kms_helper. Signed-off-by: David Fries Cc: David Airlie Cc: dri-devel@lists.freedesktop.org Signed-off-by: Dave Airlie --- drivers/gpu/drm/drm_fb_helper.c | 21 +++++++++++++++++++++ 1 file changed, 21 insertions(+) diff --git a/drivers/gpu/drm/drm_fb_helper.c b/drivers/gpu/drm/drm_fb_helper.c index aa377115483b..5c4f9b9ecdc0 100644 --- a/drivers/gpu/drm/drm_fb_helper.c +++ b/drivers/gpu/drm/drm_fb_helper.c @@ -1533,3 +1533,24 @@ bool drm_fb_helper_hotplug_event(struct drm_fb_helper *fb_helper) } EXPORT_SYMBOL(drm_fb_helper_hotplug_event); +/* The Kconfig DRM_KMS_HELPER selects FRAMEBUFFER_CONSOLE (if !EMBEDDED) + * but the module doesn't depend on any fb console symbols. At least + * attempt to load fbcon to avoid leaving the system without a usable console. + */ +#if defined(CONFIG_FRAMEBUFFER_CONSOLE_MODULE) && !defined(CONFIG_EMBEDDED) +static int __init drm_fb_helper_modinit(void) +{ + const char *name = "fbcon"; + struct module *fbcon; + + mutex_lock(&module_mutex); + fbcon = find_module(name); + mutex_unlock(&module_mutex); + + if (!fbcon) + request_module_nowait(name); + return 0; +} + +module_init(drm_fb_helper_modinit); +#endif -- cgit v1.2.3 From 000fa7cf46479238f7b9f2d7763e41af5268b16a Mon Sep 17 00:00:00 2001 From: Mario Kleiner Date: Fri, 10 Dec 2010 16:00:19 +0100 Subject: drm-vblank: Always return true vblank count of scheduled vblank event. This patch tries to make sure that the vbl.reply.sequence vblank count for a queued or emitted vblank event always corresponds to the true vblank count of queueing/emission, so the ddx can rely on the returned target_msc for consistency checks and implementation of swap_intervals in glXSwapBuffers(). Without this there is a small race-condition between the userspace ddx queueing a vblank event and the vblank counter incrementing before the event gets queued in the kernel. Signed-off-by: Mario Kleiner Acked-by: Jesse Barnes Signed-off-by: Dave Airlie --- drivers/gpu/drm/drm_irq.c | 3 +++ 1 file changed, 3 insertions(+) diff --git a/drivers/gpu/drm/drm_irq.c b/drivers/gpu/drm/drm_irq.c index 4e82d0d3c378..55160d7c38b6 100644 --- a/drivers/gpu/drm/drm_irq.c +++ b/drivers/gpu/drm/drm_irq.c @@ -1086,15 +1086,18 @@ static int drm_queue_vblank_event(struct drm_device *dev, int pipe, e->event.sequence = vblwait->request.sequence; if ((seq - vblwait->request.sequence) <= (1 << 23)) { + e->event.sequence = seq; e->event.tv_sec = now.tv_sec; e->event.tv_usec = now.tv_usec; drm_vblank_put(dev, e->pipe); list_add_tail(&e->base.link, &e->base.file_priv->event_list); wake_up_interruptible(&e->base.file_priv->event_wait); + vblwait->reply.sequence = seq; trace_drm_vblank_event_delivered(current->pid, pipe, vblwait->request.sequence); } else { list_add_tail(&e->base.link, &dev->vblank_event_list); + vblwait->reply.sequence = vblwait->request.sequence; } spin_unlock_irqrestore(&dev->event_lock, flags); -- cgit v1.2.3 From 7a868e18a4907dc8f1f05d99bcb9fd3fa8881ee4 Mon Sep 17 00:00:00 2001 From: Alex Deucher Date: Wed, 8 Dec 2010 22:13:05 -0500 Subject: drm/radeon/kms: use LCD physical size from vbios tables if available Some systems have the LCD width and height in mm available in the LCD info table. Use this info if there is no EDID to provide it. Signed-off-by: Alex Deucher Signed-off-by: Dave Airlie --- drivers/gpu/drm/radeon/radeon_atombios.c | 3 +++ drivers/gpu/drm/radeon/radeon_connectors.c | 3 +++ 2 files changed, 6 insertions(+) diff --git a/drivers/gpu/drm/radeon/radeon_atombios.c b/drivers/gpu/drm/radeon/radeon_atombios.c index ac882639b3ed..35c5ff09040d 100644 --- a/drivers/gpu/drm/radeon/radeon_atombios.c +++ b/drivers/gpu/drm/radeon/radeon_atombios.c @@ -1500,6 +1500,9 @@ struct radeon_encoder_atom_dig *radeon_atombios_get_lvds_info(struct if (misc & ATOM_DOUBLE_CLOCK_MODE) lvds->native_mode.flags |= DRM_MODE_FLAG_DBLSCAN; + lvds->native_mode.width_mm = lvds_info->info.sLCDTiming.usImageHSize; + lvds->native_mode.height_mm = lvds_info->info.sLCDTiming.usImageVSize; + /* set crtc values */ drm_mode_set_crtcinfo(&lvds->native_mode, CRTC_INTERLACE_HALVE_V); diff --git a/drivers/gpu/drm/radeon/radeon_connectors.c b/drivers/gpu/drm/radeon/radeon_connectors.c index 3bef9f6d66fd..f3ba066ded20 100644 --- a/drivers/gpu/drm/radeon/radeon_connectors.c +++ b/drivers/gpu/drm/radeon/radeon_connectors.c @@ -472,6 +472,9 @@ static int radeon_lvds_get_modes(struct drm_connector *connector) if (mode) { ret = 1; drm_mode_probed_add(connector, mode); + /* add the width/height from vbios tables if available */ + connector->display_info.width_mm = mode->width_mm; + connector->display_info.height_mm = mode->height_mm; /* add scaled modes */ radeon_add_common_modes(encoder, connector); } -- cgit v1.2.3 From c324acd5032f516b8188da99d2ce05cf8d1294d6 Mon Sep 17 00:00:00 2001 From: Alex Deucher Date: Wed, 8 Dec 2010 22:13:06 -0500 Subject: drm/radeon/kms: parse the extended LCD info block This block may contain various additional LCD info such as physical size and a stored EDID. Signed-off-by: Alex Deucher Signed-off-by: Dave Airlie --- drivers/gpu/drm/radeon/radeon_atombios.c | 53 ++++++++++++++++++++++++++++++++ drivers/gpu/drm/radeon/radeon_combios.c | 3 +- drivers/gpu/drm/radeon/radeon_display.c | 14 +++++++-- drivers/gpu/drm/radeon/radeon_mode.h | 2 +- 4 files changed, 67 insertions(+), 5 deletions(-) diff --git a/drivers/gpu/drm/radeon/radeon_atombios.c b/drivers/gpu/drm/radeon/radeon_atombios.c index 35c5ff09040d..d6c611eee204 100644 --- a/drivers/gpu/drm/radeon/radeon_atombios.c +++ b/drivers/gpu/drm/radeon/radeon_atombios.c @@ -1515,6 +1515,59 @@ struct radeon_encoder_atom_dig *radeon_atombios_get_lvds_info(struct else lvds->linkb = false; + /* parse the lcd record table */ + if (lvds_info->info.usModePatchTableOffset) { + ATOM_FAKE_EDID_PATCH_RECORD *fake_edid_record; + ATOM_PANEL_RESOLUTION_PATCH_RECORD *panel_res_record; + bool bad_record = false; + u8 *record = (u8 *)(mode_info->atom_context->bios + + data_offset + + lvds_info->info.usModePatchTableOffset); + while (*record != ATOM_RECORD_END_TYPE) { + switch (*record) { + case LCD_MODE_PATCH_RECORD_MODE_TYPE: + record += sizeof(ATOM_PATCH_RECORD_MODE); + break; + case LCD_RTS_RECORD_TYPE: + record += sizeof(ATOM_LCD_RTS_RECORD); + break; + case LCD_CAP_RECORD_TYPE: + record += sizeof(ATOM_LCD_MODE_CONTROL_CAP); + break; + case LCD_FAKE_EDID_PATCH_RECORD_TYPE: + fake_edid_record = (ATOM_FAKE_EDID_PATCH_RECORD *)record; + if (fake_edid_record->ucFakeEDIDLength) { + struct edid *edid; + int edid_size = + max((int)EDID_LENGTH, (int)fake_edid_record->ucFakeEDIDLength); + edid = kmalloc(edid_size, GFP_KERNEL); + if (edid) { + memcpy((u8 *)edid, (u8 *)&fake_edid_record->ucFakeEDIDString[0], + fake_edid_record->ucFakeEDIDLength); + + if (drm_edid_is_valid(edid)) + rdev->mode_info.bios_hardcoded_edid = edid; + else + kfree(edid); + } + } + record += sizeof(ATOM_FAKE_EDID_PATCH_RECORD); + break; + case LCD_PANEL_RESOLUTION_RECORD_TYPE: + panel_res_record = (ATOM_PANEL_RESOLUTION_PATCH_RECORD *)record; + lvds->native_mode.width_mm = panel_res_record->usHSize; + lvds->native_mode.height_mm = panel_res_record->usVSize; + record += sizeof(ATOM_PANEL_RESOLUTION_PATCH_RECORD); + break; + default: + DRM_ERROR("Bad LCD record %d\n", *record); + bad_record = true; + break; + } + if (bad_record) + break; + } + } } return lvds; } diff --git a/drivers/gpu/drm/radeon/radeon_combios.c b/drivers/gpu/drm/radeon/radeon_combios.c index 3bddea5b5295..111a844c1ecb 100644 --- a/drivers/gpu/drm/radeon/radeon_combios.c +++ b/drivers/gpu/drm/radeon/radeon_combios.c @@ -471,8 +471,9 @@ bool radeon_combios_check_hardcoded_edid(struct radeon_device *rdev) return true; } +/* this is used for atom LCDs as well */ struct edid * -radeon_combios_get_hardcoded_edid(struct radeon_device *rdev) +radeon_bios_get_hardcoded_edid(struct radeon_device *rdev) { if (rdev->mode_info.bios_hardcoded_edid) return rdev->mode_info.bios_hardcoded_edid; diff --git a/drivers/gpu/drm/radeon/radeon_display.c b/drivers/gpu/drm/radeon/radeon_display.c index 7b17e639ab32..acebbc76c2f9 100644 --- a/drivers/gpu/drm/radeon/radeon_display.c +++ b/drivers/gpu/drm/radeon/radeon_display.c @@ -679,9 +679,17 @@ int radeon_ddc_get_modes(struct radeon_connector *radeon_connector) if (!radeon_connector->edid) { radeon_connector->edid = drm_get_edid(&radeon_connector->base, &radeon_connector->ddc_bus->adapter); } - /* some servers provide a hardcoded edid in rom for KVMs */ - if (!radeon_connector->edid) - radeon_connector->edid = radeon_combios_get_hardcoded_edid(rdev); + + if (!radeon_connector->edid) { + if (rdev->is_atom_bios) { + /* some laptops provide a hardcoded edid in rom for LCDs */ + if (((radeon_connector->base.connector_type == DRM_MODE_CONNECTOR_LVDS) || + (radeon_connector->base.connector_type == DRM_MODE_CONNECTOR_eDP))) + radeon_connector->edid = radeon_bios_get_hardcoded_edid(rdev); + } else + /* some servers provide a hardcoded edid in rom for KVMs */ + radeon_connector->edid = radeon_bios_get_hardcoded_edid(rdev); + } if (radeon_connector->edid) { drm_mode_connector_update_edid_property(&radeon_connector->base, radeon_connector->edid); ret = drm_add_edid_modes(&radeon_connector->base, radeon_connector->edid); diff --git a/drivers/gpu/drm/radeon/radeon_mode.h b/drivers/gpu/drm/radeon/radeon_mode.h index f406f02bf14e..fd185f783a31 100644 --- a/drivers/gpu/drm/radeon/radeon_mode.h +++ b/drivers/gpu/drm/radeon/radeon_mode.h @@ -566,7 +566,7 @@ extern int radeon_get_crtc_scanoutpos(struct drm_device *dev, int crtc, extern bool radeon_combios_check_hardcoded_edid(struct radeon_device *rdev); extern struct edid * -radeon_combios_get_hardcoded_edid(struct radeon_device *rdev); +radeon_bios_get_hardcoded_edid(struct radeon_device *rdev); extern bool radeon_atom_get_clock_info(struct drm_device *dev); extern bool radeon_combios_get_clock_info(struct drm_device *dev); extern struct radeon_encoder_atom_dig * -- cgit v1.2.3 From 3ee0128140eed7d32b785a335099a2ec38258283 Mon Sep 17 00:00:00 2001 From: Ben Skeggs Date: Wed, 15 Dec 2010 11:04:39 +1000 Subject: drm/nouveau: modify vm to accomodate dual page tables for nvc0 Signed-off-by: Ben Skeggs --- drivers/gpu/drm/nouveau/nouveau_vm.c | 59 ++++++++++++++++------------------ drivers/gpu/drm/nouveau/nouveau_vm.h | 16 ++++----- drivers/gpu/drm/nouveau/nv50_instmem.c | 16 ++++----- drivers/gpu/drm/nouveau/nv50_vm.c | 49 ++++++++++++++-------------- 4 files changed, 65 insertions(+), 75 deletions(-) diff --git a/drivers/gpu/drm/nouveau/nouveau_vm.c b/drivers/gpu/drm/nouveau/nouveau_vm.c index 07ab1749cf7d..b023a64c27d8 100644 --- a/drivers/gpu/drm/nouveau/nouveau_vm.c +++ b/drivers/gpu/drm/nouveau/nouveau_vm.c @@ -32,6 +32,7 @@ nouveau_vm_map_at(struct nouveau_vma *vma, u64 delta, struct nouveau_vram *vram) { struct nouveau_vm *vm = vma->vm; struct nouveau_mm_node *r; + int big = vma->node->type != vm->spg_shift; u32 offset = vma->node->offset + (delta >> 12); u32 bits = vma->node->type - 12; u32 pde = (offset >> vm->pgt_bits) - vm->fpde; @@ -44,7 +45,7 @@ nouveau_vm_map_at(struct nouveau_vma *vma, u64 delta, struct nouveau_vram *vram) u32 num = r->length >> bits; while (num) { - struct nouveau_gpuobj *pgt = vm->pgt[pde].obj; + struct nouveau_gpuobj *pgt = vm->pgt[pde].obj[big]; end = (pte + num); if (unlikely(end >= max)) @@ -76,6 +77,7 @@ nouveau_vm_map_sg(struct nouveau_vma *vma, u64 delta, u64 length, dma_addr_t *list) { struct nouveau_vm *vm = vma->vm; + int big = vma->node->type != vm->spg_shift; u32 offset = vma->node->offset + (delta >> 12); u32 bits = vma->node->type - 12; u32 num = length >> vma->node->type; @@ -85,7 +87,7 @@ nouveau_vm_map_sg(struct nouveau_vma *vma, u64 delta, u64 length, u32 end, len; while (num) { - struct nouveau_gpuobj *pgt = vm->pgt[pde].obj; + struct nouveau_gpuobj *pgt = vm->pgt[pde].obj[big]; end = (pte + num); if (unlikely(end >= max)) @@ -110,6 +112,7 @@ void nouveau_vm_unmap_at(struct nouveau_vma *vma, u64 delta, u64 length) { struct nouveau_vm *vm = vma->vm; + int big = vma->node->type != vm->spg_shift; u32 offset = vma->node->offset + (delta >> 12); u32 bits = vma->node->type - 12; u32 num = length >> vma->node->type; @@ -119,7 +122,7 @@ nouveau_vm_unmap_at(struct nouveau_vma *vma, u64 delta, u64 length) u32 end, len; while (num) { - struct nouveau_gpuobj *pgt = vm->pgt[pde].obj; + struct nouveau_gpuobj *pgt = vm->pgt[pde].obj[big]; end = (pte + num); if (unlikely(end >= max)) @@ -146,7 +149,7 @@ nouveau_vm_unmap(struct nouveau_vma *vma) } static void -nouveau_vm_unmap_pgt(struct nouveau_vm *vm, u32 fpde, u32 lpde) +nouveau_vm_unmap_pgt(struct nouveau_vm *vm, int big, u32 fpde, u32 lpde) { struct nouveau_vm_pgd *vpgd; struct nouveau_vm_pgt *vpgt; @@ -155,16 +158,16 @@ nouveau_vm_unmap_pgt(struct nouveau_vm *vm, u32 fpde, u32 lpde) for (pde = fpde; pde <= lpde; pde++) { vpgt = &vm->pgt[pde - vm->fpde]; - if (--vpgt->refcount) + if (--vpgt->refcount[big]) continue; + pgt = vpgt->obj[big]; + vpgt->obj[big] = NULL; + list_for_each_entry(vpgd, &vm->pgd_list, head) { - vm->unmap_pgt(vpgd->obj, pde); + vm->map_pgt(vpgd->obj, pde, vpgt->obj); } - pgt = vpgt->obj; - vpgt->obj = NULL; - mutex_unlock(&vm->mm->mutex); nouveau_gpuobj_ref(NULL, &pgt); mutex_lock(&vm->mm->mutex); @@ -177,6 +180,7 @@ nouveau_vm_map_pgt(struct nouveau_vm *vm, u32 pde, u32 type) struct nouveau_vm_pgt *vpgt = &vm->pgt[pde - vm->fpde]; struct nouveau_vm_pgd *vpgd; struct nouveau_gpuobj *pgt; + int big = (type != vm->spg_shift); u32 pgt_size; int ret; @@ -191,19 +195,18 @@ nouveau_vm_map_pgt(struct nouveau_vm *vm, u32 pde, u32 type) return ret; /* someone beat us to filling the PDE while we didn't have the lock */ - if (unlikely(vpgt->refcount++)) { + if (unlikely(vpgt->refcount[big]++)) { mutex_unlock(&vm->mm->mutex); nouveau_gpuobj_ref(NULL, &pgt); mutex_lock(&vm->mm->mutex); return 0; } + vpgt->obj[big] = pgt; list_for_each_entry(vpgd, &vm->pgd_list, head) { - vm->map_pgt(vpgd->obj, type, pde, pgt); + vm->map_pgt(vpgd->obj, pde, vpgt->obj); } - vpgt->page_shift = type; - vpgt->obj = pgt; return 0; } @@ -227,16 +230,17 @@ nouveau_vm_get(struct nouveau_vm *vm, u64 size, u32 page_shift, lpde = (vma->node->offset + vma->node->length - 1) >> vm->pgt_bits; for (pde = fpde; pde <= lpde; pde++) { struct nouveau_vm_pgt *vpgt = &vm->pgt[pde - vm->fpde]; + int big = (vma->node->type != vm->spg_shift); - if (likely(vpgt->refcount)) { - vpgt->refcount++; + if (likely(vpgt->refcount[big])) { + vpgt->refcount[big]++; continue; } ret = nouveau_vm_map_pgt(vm, pde, vma->node->type); if (ret) { if (pde != fpde) - nouveau_vm_unmap_pgt(vm, fpde, pde - 1); + nouveau_vm_unmap_pgt(vm, big, fpde, pde - 1); nouveau_mm_put(vm->mm, vma->node); mutex_unlock(&vm->mm->mutex); vma->node = NULL; @@ -263,21 +267,20 @@ nouveau_vm_put(struct nouveau_vma *vma) lpde = (vma->node->offset + vma->node->length - 1) >> vm->pgt_bits; mutex_lock(&vm->mm->mutex); + nouveau_vm_unmap_pgt(vm, vma->node->type != vm->spg_shift, fpde, lpde); nouveau_mm_put(vm->mm, vma->node); vma->node = NULL; - nouveau_vm_unmap_pgt(vm, fpde, lpde); mutex_unlock(&vm->mm->mutex); } int nouveau_vm_new(struct drm_device *dev, u64 offset, u64 length, u64 mm_offset, - u8 pgt_bits, u8 spg_shift, u8 lpg_shift, struct nouveau_vm **pvm) { struct drm_nouveau_private *dev_priv = dev->dev_private; struct nouveau_vm *vm; u64 mm_length = (offset + length) - mm_offset; - u32 block; + u32 block, pgt_bits; int ret; vm = kzalloc(sizeof(*vm), GFP_KERNEL); @@ -286,11 +289,13 @@ nouveau_vm_new(struct drm_device *dev, u64 offset, u64 length, u64 mm_offset, if (dev_priv->card_type == NV_50) { vm->map_pgt = nv50_vm_map_pgt; - vm->unmap_pgt = nv50_vm_unmap_pgt; vm->map = nv50_vm_map; vm->map_sg = nv50_vm_map_sg; vm->unmap = nv50_vm_unmap; vm->flush = nv50_vm_flush; + vm->spg_shift = 12; + vm->lpg_shift = 16; + pgt_bits = 29; } else { kfree(vm); return -ENOSYS; @@ -308,8 +313,6 @@ nouveau_vm_new(struct drm_device *dev, u64 offset, u64 length, u64 mm_offset, vm->dev = dev; vm->refcount = 1; vm->pgt_bits = pgt_bits - 12; - vm->spg_shift = spg_shift; - vm->lpg_shift = lpg_shift; block = (1 << pgt_bits); if (length < block) @@ -342,16 +345,8 @@ nouveau_vm_link(struct nouveau_vm *vm, struct nouveau_gpuobj *pgd) nouveau_gpuobj_ref(pgd, &vpgd->obj); mutex_lock(&vm->mm->mutex); - for (i = vm->fpde; i <= vm->lpde; i++) { - struct nouveau_vm_pgt *vpgt = &vm->pgt[i - vm->fpde]; - - if (!vpgt->obj) { - vm->unmap_pgt(pgd, i); - continue; - } - - vm->map_pgt(pgd, vpgt->page_shift, i, vpgt->obj); - } + for (i = vm->fpde; i <= vm->lpde; i++) + vm->map_pgt(pgd, i, vm->pgt[i - vm->fpde].obj); list_add(&vpgd->head, &vm->pgd_list); mutex_unlock(&vm->mm->mutex); return 0; diff --git a/drivers/gpu/drm/nouveau/nouveau_vm.h b/drivers/gpu/drm/nouveau/nouveau_vm.h index b6755cfa7b71..105b6f65f19d 100644 --- a/drivers/gpu/drm/nouveau/nouveau_vm.h +++ b/drivers/gpu/drm/nouveau/nouveau_vm.h @@ -31,9 +31,8 @@ #include "nouveau_mm.h" struct nouveau_vm_pgt { - struct nouveau_gpuobj *obj; - u32 page_shift; - u32 refcount; + struct nouveau_gpuobj *obj[2]; + u32 refcount[2]; }; struct nouveau_vm_pgd { @@ -65,9 +64,8 @@ struct nouveau_vm { u8 spg_shift; u8 lpg_shift; - void (*map_pgt)(struct nouveau_gpuobj *pgd, u32 type, u32 pde, - struct nouveau_gpuobj *pgt); - void (*unmap_pgt)(struct nouveau_gpuobj *pgd, u32 pde); + void (*map_pgt)(struct nouveau_gpuobj *pgd, u32 pde, + struct nouveau_gpuobj *pgt[2]); void (*map)(struct nouveau_vma *, struct nouveau_gpuobj *, struct nouveau_vram *, u32 pte, u32 cnt, u64 phys); void (*map_sg)(struct nouveau_vma *, struct nouveau_gpuobj *, @@ -78,7 +76,6 @@ struct nouveau_vm { /* nouveau_vm.c */ int nouveau_vm_new(struct drm_device *, u64 offset, u64 length, u64 mm_offset, - u8 pgt_bits, u8 spg_shift, u8 lpg_shift, struct nouveau_vm **); int nouveau_vm_ref(struct nouveau_vm *, struct nouveau_vm **, struct nouveau_gpuobj *pgd); @@ -93,9 +90,8 @@ void nouveau_vm_map_sg(struct nouveau_vma *, u64 offset, u64 length, dma_addr_t *); /* nv50_vm.c */ -void nv50_vm_map_pgt(struct nouveau_gpuobj *pgd, u32 type, u32 pde, - struct nouveau_gpuobj *pgt); -void nv50_vm_unmap_pgt(struct nouveau_gpuobj *pgd, u32 pde); +void nv50_vm_map_pgt(struct nouveau_gpuobj *pgd, u32 pde, + struct nouveau_gpuobj *pgt[2]); void nv50_vm_map(struct nouveau_vma *, struct nouveau_gpuobj *, struct nouveau_vram *, u32 pte, u32 cnt, u64 phys); void nv50_vm_map_sg(struct nouveau_vma *, struct nouveau_gpuobj *, diff --git a/drivers/gpu/drm/nouveau/nv50_instmem.c b/drivers/gpu/drm/nouveau/nv50_instmem.c index adac4da98f7e..2e1b1cd19a4b 100644 --- a/drivers/gpu/drm/nouveau/nv50_instmem.c +++ b/drivers/gpu/drm/nouveau/nv50_instmem.c @@ -151,20 +151,19 @@ nv50_instmem_init(struct drm_device *dev) /* BAR3 */ ret = nouveau_vm_new(dev, BAR3_VM_BASE, BAR3_VM_SIZE, BAR3_VM_BASE, - 29, 12, 16, &dev_priv->bar3_vm); + &dev_priv->bar3_vm); if (ret) goto error; ret = nouveau_gpuobj_new(dev, NULL, (BAR3_VM_SIZE >> 12) * 8, 0x1000, NVOBJ_FLAG_DONT_MAP | NVOBJ_FLAG_ZERO_ALLOC, - &dev_priv->bar3_vm->pgt[0].obj); + &dev_priv->bar3_vm->pgt[0].obj[0]); if (ret) goto error; - dev_priv->bar3_vm->pgt[0].page_shift = 12; - dev_priv->bar3_vm->pgt[0].refcount = 1; + dev_priv->bar3_vm->pgt[0].refcount[0] = 1; - nv50_instmem_map(dev_priv->bar3_vm->pgt[0].obj); + nv50_instmem_map(dev_priv->bar3_vm->pgt[0].obj[0]); ret = nv50_channel_new(dev, 128 * 1024, dev_priv->bar3_vm, &chan); if (ret) @@ -195,8 +194,7 @@ nv50_instmem_init(struct drm_device *dev) nv_wo32(chan->ramin, 0, tmp); /* BAR1 */ - ret = nouveau_vm_new(dev, BAR1_VM_BASE, BAR1_VM_SIZE, BAR1_VM_BASE, - 29, 12, 16, &vm); + ret = nouveau_vm_new(dev, BAR1_VM_BASE, BAR1_VM_SIZE, BAR1_VM_BASE, &vm); if (ret) goto error; @@ -220,7 +218,7 @@ nv50_instmem_init(struct drm_device *dev) * to catch "NULL pointer" references */ ret = nouveau_vm_new(dev, 0, (1ULL << 40), 0x0020000000ULL, - 29, 12, 16, &dev_priv->chan_vm); + &dev_priv->chan_vm); if (ret) return ret; @@ -258,7 +256,7 @@ nv50_instmem_takedown(struct drm_device *dev) dev_priv->channels.ptr[127] = 0; nv50_channel_del(&dev_priv->channels.ptr[0]); - nouveau_gpuobj_ref(NULL, &dev_priv->bar3_vm->pgt[0].obj); + nouveau_gpuobj_ref(NULL, &dev_priv->bar3_vm->pgt[0].obj[0]); nouveau_vm_ref(NULL, &dev_priv->bar3_vm, NULL); if (dev_priv->ramin_heap.free_stack.next) diff --git a/drivers/gpu/drm/nouveau/nv50_vm.c b/drivers/gpu/drm/nouveau/nv50_vm.c index 7939387f7f80..38e523e10995 100644 --- a/drivers/gpu/drm/nouveau/nv50_vm.c +++ b/drivers/gpu/drm/nouveau/nv50_vm.c @@ -28,39 +28,40 @@ #include "nouveau_vm.h" void -nv50_vm_map_pgt(struct nouveau_gpuobj *pgd, u32 type, u32 pde, - struct nouveau_gpuobj *pgt) +nv50_vm_map_pgt(struct nouveau_gpuobj *pgd, u32 pde, + struct nouveau_gpuobj *pgt[2]) { struct drm_nouveau_private *dev_priv = pgd->dev->dev_private; - u32 coverage = (pgt->size >> 3) << type; - u64 phys; - - phys = pgt->vinst; - phys |= 0x01; /* present */ - phys |= (type == 12) ? 0x02 : 0x00; /* 4KiB pages */ - if (dev_priv->vram_sys_base) { - phys += dev_priv->vram_sys_base; - phys |= 0x30; + u64 phys = 0xdeadcafe00000000ULL; + u32 coverage = 0; + + if (pgt[0]) { + phys = 0x00000003 | pgt[0]->vinst; /* present, 4KiB pages */ + coverage = (pgt[0]->size >> 3) << 12; + } else + if (pgt[1]) { + phys = 0x00000001 | pgt[1]->vinst; /* present */ + coverage = (pgt[1]->size >> 3) << 16; } - if (coverage <= 32 * 1024 * 1024) - phys |= 0x60; - else if (coverage <= 64 * 1024 * 1024) - phys |= 0x40; - else if (coverage < 128 * 1024 * 1024) - phys |= 0x20; + if (phys & 1) { + if (dev_priv->vram_sys_base) { + phys += dev_priv->vram_sys_base; + phys |= 0x30; + } + + if (coverage <= 32 * 1024 * 1024) + phys |= 0x60; + else if (coverage <= 64 * 1024 * 1024) + phys |= 0x40; + else if (coverage < 128 * 1024 * 1024) + phys |= 0x20; + } nv_wo32(pgd, (pde * 8) + 0, lower_32_bits(phys)); nv_wo32(pgd, (pde * 8) + 4, upper_32_bits(phys)); } -void -nv50_vm_unmap_pgt(struct nouveau_gpuobj *pgd, u32 pde) -{ - nv_wo32(pgd, (pde * 8) + 0, 0x00000000); - nv_wo32(pgd, (pde * 8) + 4, 0xdeadcafe); -} - static inline u64 nv50_vm_addr(struct nouveau_vma *vma, struct nouveau_gpuobj *pgt, u64 phys, u32 memtype, u32 target) -- cgit v1.2.3 From 4c74eb7ff276813ee73943a3756b295675fb2865 Mon Sep 17 00:00:00 2001 From: Ben Skeggs Date: Wed, 10 Nov 2010 14:10:04 +1000 Subject: drm/nvc0: import initial vm backend Signed-off-by: Ben Skeggs --- drivers/gpu/drm/nouveau/Makefile | 2 +- drivers/gpu/drm/nouveau/nouveau_bo.c | 5 +- drivers/gpu/drm/nouveau/nouveau_drv.h | 1 + drivers/gpu/drm/nouveau/nouveau_mem.c | 4 ++ drivers/gpu/drm/nouveau/nouveau_vm.c | 31 +++++++-- drivers/gpu/drm/nouveau/nouveau_vm.h | 10 +++ drivers/gpu/drm/nouveau/nvc0_vm.c | 123 ++++++++++++++++++++++++++++++++++ 7 files changed, 169 insertions(+), 7 deletions(-) create mode 100644 drivers/gpu/drm/nouveau/nvc0_vm.c diff --git a/drivers/gpu/drm/nouveau/Makefile b/drivers/gpu/drm/nouveau/Makefile index b1d8941e04d8..e89d89593af1 100644 --- a/drivers/gpu/drm/nouveau/Makefile +++ b/drivers/gpu/drm/nouveau/Makefile @@ -28,7 +28,7 @@ nouveau-y := nouveau_drv.o nouveau_state.o nouveau_channel.o nouveau_mem.o \ nv10_gpio.o nv50_gpio.o \ nv50_calc.o \ nv04_pm.o nv50_pm.o nva3_pm.o \ - nv50_vram.o nv50_vm.o + nv50_vram.o nv50_vm.o nvc0_vm.o nouveau-$(CONFIG_DRM_NOUVEAU_DEBUG) += nouveau_debugfs.o nouveau-$(CONFIG_COMPAT) += nouveau_ioc32.o diff --git a/drivers/gpu/drm/nouveau/nouveau_bo.c b/drivers/gpu/drm/nouveau/nouveau_bo.c index 42d1ad62b381..d17ffea3c617 100644 --- a/drivers/gpu/drm/nouveau/nouveau_bo.c +++ b/drivers/gpu/drm/nouveau/nouveau_bo.c @@ -909,8 +909,9 @@ nouveau_ttm_io_mem_reserve(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem) break; } - ret = nouveau_vm_get(dev_priv->bar1_vm, mem->bus.size, 12, - NV_MEM_ACCESS_RW, &vram->bar_vma); + ret = nouveau_vm_get(dev_priv->bar1_vm, mem->bus.size, + vram->page_shift, NV_MEM_ACCESS_RW, + &vram->bar_vma); if (ret) return ret; diff --git a/drivers/gpu/drm/nouveau/nouveau_drv.h b/drivers/gpu/drm/nouveau/nouveau_drv.h index 8f13906185b2..57da219fb18b 100644 --- a/drivers/gpu/drm/nouveau/nouveau_drv.h +++ b/drivers/gpu/drm/nouveau/nouveau_drv.h @@ -69,6 +69,7 @@ struct nouveau_vram { struct drm_device *dev; struct nouveau_vma bar_vma; + u8 page_shift; struct list_head regions; u32 memtype; diff --git a/drivers/gpu/drm/nouveau/nouveau_mem.c b/drivers/gpu/drm/nouveau/nouveau_mem.c index 224181193a1f..07be1dd04530 100644 --- a/drivers/gpu/drm/nouveau/nouveau_mem.c +++ b/drivers/gpu/drm/nouveau/nouveau_mem.c @@ -731,6 +731,10 @@ nouveau_vram_manager_new(struct ttm_mem_type_manager *man, if (ret) return ret; + node->page_shift = 12; + if (nvbo->vma.node) + node->page_shift = nvbo->vma.node->type; + mem->mm_node = node; mem->start = node->offset >> PAGE_SHIFT; return 0; diff --git a/drivers/gpu/drm/nouveau/nouveau_vm.c b/drivers/gpu/drm/nouveau/nouveau_vm.c index b023a64c27d8..97d82aedf86b 100644 --- a/drivers/gpu/drm/nouveau/nouveau_vm.c +++ b/drivers/gpu/drm/nouveau/nouveau_vm.c @@ -295,7 +295,34 @@ nouveau_vm_new(struct drm_device *dev, u64 offset, u64 length, u64 mm_offset, vm->flush = nv50_vm_flush; vm->spg_shift = 12; vm->lpg_shift = 16; + pgt_bits = 29; + block = (1 << pgt_bits); + if (length < block) + block = length; + + } else + if (dev_priv->card_type == NV_C0) { + vm->map_pgt = nvc0_vm_map_pgt; + vm->map = nvc0_vm_map; + vm->map_sg = nvc0_vm_map_sg; + vm->unmap = nvc0_vm_unmap; + vm->flush = nvc0_vm_flush; + vm->spg_shift = 12; + vm->lpg_shift = 17; + pgt_bits = 27; + + /* Should be 4096 everywhere, this is a hack that's + * currently necessary to avoid an elusive bug that + * causes corruption when mixing small/large pages + */ + if (length < (1ULL << 40)) + block = 4096; + else { + block = (1 << pgt_bits); + if (length < block) + block = length; + } } else { kfree(vm); return -ENOSYS; @@ -314,10 +341,6 @@ nouveau_vm_new(struct drm_device *dev, u64 offset, u64 length, u64 mm_offset, vm->refcount = 1; vm->pgt_bits = pgt_bits - 12; - block = (1 << pgt_bits); - if (length < block) - block = length; - ret = nouveau_mm_init(&vm->mm, mm_offset >> 12, mm_length >> 12, block >> 12); if (ret) { diff --git a/drivers/gpu/drm/nouveau/nouveau_vm.h b/drivers/gpu/drm/nouveau/nouveau_vm.h index 105b6f65f19d..e1193515771b 100644 --- a/drivers/gpu/drm/nouveau/nouveau_vm.h +++ b/drivers/gpu/drm/nouveau/nouveau_vm.h @@ -100,4 +100,14 @@ void nv50_vm_unmap(struct nouveau_gpuobj *, u32 pte, u32 cnt); void nv50_vm_flush(struct nouveau_vm *); void nv50_vm_flush_engine(struct drm_device *, int engine); +/* nvc0_vm.c */ +void nvc0_vm_map_pgt(struct nouveau_gpuobj *pgd, u32 pde, + struct nouveau_gpuobj *pgt[2]); +void nvc0_vm_map(struct nouveau_vma *, struct nouveau_gpuobj *, + struct nouveau_vram *, u32 pte, u32 cnt, u64 phys); +void nvc0_vm_map_sg(struct nouveau_vma *, struct nouveau_gpuobj *, + u32 pte, dma_addr_t *, u32 cnt); +void nvc0_vm_unmap(struct nouveau_gpuobj *, u32 pte, u32 cnt); +void nvc0_vm_flush(struct nouveau_vm *); + #endif diff --git a/drivers/gpu/drm/nouveau/nvc0_vm.c b/drivers/gpu/drm/nouveau/nvc0_vm.c new file mode 100644 index 000000000000..4b9251bb0ff4 --- /dev/null +++ b/drivers/gpu/drm/nouveau/nvc0_vm.c @@ -0,0 +1,123 @@ +/* + * Copyright 2010 Red Hat Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: Ben Skeggs + */ + +#include "drmP.h" + +#include "nouveau_drv.h" +#include "nouveau_vm.h" + +void +nvc0_vm_map_pgt(struct nouveau_gpuobj *pgd, u32 index, + struct nouveau_gpuobj *pgt[2]) +{ + u32 pde[2] = { 0, 0 }; + + if (pgt[0]) + pde[1] = 0x00000001 | (pgt[0]->vinst >> 8); + if (pgt[1]) + pde[0] = 0x00000001 | (pgt[1]->vinst >> 8); + + nv_wo32(pgd, (index * 8) + 0, pde[0]); + nv_wo32(pgd, (index * 8) + 4, pde[1]); +} + +static inline u64 +nvc0_vm_addr(struct nouveau_vma *vma, u64 phys, u32 memtype, u32 target) +{ + phys >>= 8; + + phys |= 0x00000001; /* present */ +// if (vma->access & NV_MEM_ACCESS_SYS) +// phys |= 0x00000002; + + phys |= ((u64)target << 32); + phys |= ((u64)memtype << 36); + + return phys; +} + +void +nvc0_vm_map(struct nouveau_vma *vma, struct nouveau_gpuobj *pgt, + struct nouveau_vram *mem, u32 pte, u32 cnt, u64 phys) +{ + u32 next = 1 << (vma->node->type - 8); + + phys = nvc0_vm_addr(vma, phys, mem->memtype, 0); + pte <<= 3; + while (cnt--) { + nv_wo32(pgt, pte + 0, lower_32_bits(phys)); + nv_wo32(pgt, pte + 4, upper_32_bits(phys)); + phys += next; + pte += 8; + } +} + +void +nvc0_vm_map_sg(struct nouveau_vma *vma, struct nouveau_gpuobj *pgt, + u32 pte, dma_addr_t *list, u32 cnt) +{ + pte <<= 3; + while (cnt--) { + u64 phys = nvc0_vm_addr(vma, *list++, 0, 5); + nv_wo32(pgt, pte + 0, lower_32_bits(phys)); + nv_wo32(pgt, pte + 4, upper_32_bits(phys)); + pte += 8; + } +} + +void +nvc0_vm_unmap(struct nouveau_gpuobj *pgt, u32 pte, u32 cnt) +{ + pte <<= 3; + while (cnt--) { + nv_wo32(pgt, pte + 0, 0x00000000); + nv_wo32(pgt, pte + 4, 0x00000000); + pte += 8; + } +} + +void +nvc0_vm_flush(struct nouveau_vm *vm) +{ + struct drm_nouveau_private *dev_priv = vm->dev->dev_private; + struct nouveau_instmem_engine *pinstmem = &dev_priv->engine.instmem; + struct drm_device *dev = vm->dev; + struct nouveau_vm_pgd *vpgd; + u32 r100c80, engine; + + pinstmem->flush(vm->dev); + + if (vm == dev_priv->chan_vm) + engine = 1; + else + engine = 5; + + list_for_each_entry(vpgd, &vm->pgd_list, head) { + r100c80 = nv_rd32(dev, 0x100c80); + nv_wr32(dev, 0x100cb8, vpgd->obj->vinst >> 8); + nv_wr32(dev, 0x100cbc, 0x80000000 | engine); + if (!nv_wait(dev, 0x100c80, 0xffffffff, r100c80)) + NV_ERROR(dev, "vm flush timeout eng %d\n", engine); + } +} -- cgit v1.2.3 From 8984e046153eb1d6b0b24626169f9c6e58232e1b Mon Sep 17 00:00:00 2001 From: Ben Skeggs Date: Mon, 15 Nov 2010 11:48:33 +1000 Subject: drm/nvc0: initial vm implementation, use for bar1/bar3 management Signed-off-by: Ben Skeggs --- drivers/gpu/drm/nouveau/Makefile | 3 +- drivers/gpu/drm/nouveau/nouveau_bo.c | 18 +- drivers/gpu/drm/nouveau/nouveau_drv.h | 8 +- drivers/gpu/drm/nouveau/nouveau_mem.c | 3 - drivers/gpu/drm/nouveau/nouveau_mm.h | 5 + drivers/gpu/drm/nouveau/nouveau_state.c | 16 +- drivers/gpu/drm/nouveau/nvc0_instmem.c | 326 ++++++++++++++------------------ drivers/gpu/drm/nouveau/nvc0_vram.c | 91 +++++++++ 8 files changed, 266 insertions(+), 204 deletions(-) create mode 100644 drivers/gpu/drm/nouveau/nvc0_vram.c diff --git a/drivers/gpu/drm/nouveau/Makefile b/drivers/gpu/drm/nouveau/Makefile index e89d89593af1..d9d22ffff81e 100644 --- a/drivers/gpu/drm/nouveau/Makefile +++ b/drivers/gpu/drm/nouveau/Makefile @@ -28,7 +28,8 @@ nouveau-y := nouveau_drv.o nouveau_state.o nouveau_channel.o nouveau_mem.o \ nv10_gpio.o nv50_gpio.o \ nv50_calc.o \ nv04_pm.o nv50_pm.o nva3_pm.o \ - nv50_vram.o nv50_vm.o nvc0_vm.o + nv50_vram.o nvc0_vram.o \ + nv50_vm.o nvc0_vm.o nouveau-$(CONFIG_DRM_NOUVEAU_DEBUG) += nouveau_debugfs.o nouveau-$(CONFIG_COMPAT) += nouveau_ioc32.o diff --git a/drivers/gpu/drm/nouveau/nouveau_bo.c b/drivers/gpu/drm/nouveau/nouveau_bo.c index d17ffea3c617..6f3096a50297 100644 --- a/drivers/gpu/drm/nouveau/nouveau_bo.c +++ b/drivers/gpu/drm/nouveau/nouveau_bo.c @@ -120,6 +120,9 @@ nouveau_bo_new(struct drm_device *dev, struct nouveau_channel *chan, align >>= PAGE_SHIFT; if (!nvbo->no_vm && dev_priv->chan_vm) { + if (dev_priv->card_type == NV_C0) + page_shift = 12; + ret = nouveau_vm_get(dev_priv->chan_vm, size, page_shift, NV_MEM_ACCESS_RW, &nvbo->vma); if (ret) { @@ -413,7 +416,7 @@ nouveau_bo_init_mem_type(struct ttm_bo_device *bdev, uint32_t type, man->default_caching = TTM_PL_FLAG_CACHED; break; case TTM_PL_VRAM: - if (dev_priv->card_type == NV_50) { + if (dev_priv->card_type >= NV_50) { man->func = &nouveau_vram_manager; man->io_reserve_fastpath = false; man->use_io_reserve_lru = true; @@ -901,6 +904,7 @@ nouveau_ttm_io_mem_reserve(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem) case TTM_PL_VRAM: { struct nouveau_vram *vram = mem->mm_node; + u8 page_shift; if (!dev_priv->bar1_vm) { mem->bus.offset = mem->start << PAGE_SHIFT; @@ -909,8 +913,13 @@ nouveau_ttm_io_mem_reserve(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem) break; } + if (dev_priv->card_type == NV_C0) + page_shift = vram->page_shift; + else + page_shift = 12; + ret = nouveau_vm_get(dev_priv->bar1_vm, mem->bus.size, - vram->page_shift, NV_MEM_ACCESS_RW, + page_shift, NV_MEM_ACCESS_RW, &vram->bar_vma); if (ret) return ret; @@ -921,8 +930,9 @@ nouveau_ttm_io_mem_reserve(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem) return ret; } - mem->bus.offset = vram->bar_vma.offset; - mem->bus.offset -= 0x0020000000ULL; + mem->bus.offset = vram->bar_vma.offset; + if (dev_priv->card_type == NV_50) /*XXX*/ + mem->bus.offset -= 0x0020000000ULL; mem->bus.base = pci_resource_start(dev->pdev, 1); mem->bus.is_iomem = true; } diff --git a/drivers/gpu/drm/nouveau/nouveau_drv.h b/drivers/gpu/drm/nouveau/nouveau_drv.h index 57da219fb18b..c1e85c4c7ac2 100644 --- a/drivers/gpu/drm/nouveau/nouveau_drv.h +++ b/drivers/gpu/drm/nouveau/nouveau_drv.h @@ -842,6 +842,9 @@ extern void nv10_mem_put_tile_region(struct drm_device *dev, struct nouveau_fence *fence); extern const struct ttm_mem_type_manager_func nouveau_vram_manager; +/* nvc0_vram.c */ +extern const struct ttm_mem_type_manager_func nvc0_vram_manager; + /* nouveau_notifier.c */ extern int nouveau_notifier_init_channel(struct nouveau_channel *); extern void nouveau_notifier_takedown_channel(struct nouveau_channel *); @@ -1229,11 +1232,6 @@ extern int nvc0_instmem_init(struct drm_device *); extern void nvc0_instmem_takedown(struct drm_device *); extern int nvc0_instmem_suspend(struct drm_device *); extern void nvc0_instmem_resume(struct drm_device *); -extern int nvc0_instmem_get(struct nouveau_gpuobj *, u32 size, u32 align); -extern void nvc0_instmem_put(struct nouveau_gpuobj *); -extern int nvc0_instmem_map(struct nouveau_gpuobj *); -extern void nvc0_instmem_unmap(struct nouveau_gpuobj *); -extern void nvc0_instmem_flush(struct drm_device *); /* nv04_mc.c */ extern int nv04_mc_init(struct drm_device *); diff --git a/drivers/gpu/drm/nouveau/nouveau_mem.c b/drivers/gpu/drm/nouveau/nouveau_mem.c index 07be1dd04530..69044eb104bb 100644 --- a/drivers/gpu/drm/nouveau/nouveau_mem.c +++ b/drivers/gpu/drm/nouveau/nouveau_mem.c @@ -255,9 +255,6 @@ nouveau_mem_detect(struct drm_device *dev) if (dev_priv->card_type < NV_50) { dev_priv->vram_size = nv_rd32(dev, NV04_PFB_FIFO_DATA); dev_priv->vram_size &= NV10_PFB_FIFO_DATA_RAM_AMOUNT_MB_MASK; - } else { - dev_priv->vram_size = nv_rd32(dev, 0x10f20c) << 20; - dev_priv->vram_size *= nv_rd32(dev, 0x121c74); } if (dev_priv->vram_size) diff --git a/drivers/gpu/drm/nouveau/nouveau_mm.h b/drivers/gpu/drm/nouveau/nouveau_mm.h index 250e642de0a7..af3844933036 100644 --- a/drivers/gpu/drm/nouveau/nouveau_mm.h +++ b/drivers/gpu/drm/nouveau/nouveau_mm.h @@ -59,4 +59,9 @@ int nv50_vram_new(struct drm_device *, u64 size, u32 align, u32 size_nc, void nv50_vram_del(struct drm_device *, struct nouveau_vram **); bool nv50_vram_flags_valid(struct drm_device *, u32 tile_flags); +int nvc0_vram_init(struct drm_device *); +int nvc0_vram_new(struct drm_device *, u64 size, u32 align, u32 ncmin, + u32 memtype, struct nouveau_vram **); +bool nvc0_vram_flags_valid(struct drm_device *, u32 tile_flags); + #endif diff --git a/drivers/gpu/drm/nouveau/nouveau_state.c b/drivers/gpu/drm/nouveau/nouveau_state.c index 8eac943e8fd2..813790f4c7c7 100644 --- a/drivers/gpu/drm/nouveau/nouveau_state.c +++ b/drivers/gpu/drm/nouveau/nouveau_state.c @@ -464,11 +464,11 @@ static int nouveau_init_engine_ptrs(struct drm_device *dev) engine->instmem.takedown = nvc0_instmem_takedown; engine->instmem.suspend = nvc0_instmem_suspend; engine->instmem.resume = nvc0_instmem_resume; - engine->instmem.get = nvc0_instmem_get; - engine->instmem.put = nvc0_instmem_put; - engine->instmem.map = nvc0_instmem_map; - engine->instmem.unmap = nvc0_instmem_unmap; - engine->instmem.flush = nvc0_instmem_flush; + engine->instmem.get = nv50_instmem_get; + engine->instmem.put = nv50_instmem_put; + engine->instmem.map = nv50_instmem_map; + engine->instmem.unmap = nv50_instmem_unmap; + engine->instmem.flush = nv84_instmem_flush; engine->mc.init = nv50_mc_init; engine->mc.takedown = nv50_mc_takedown; engine->timer.init = nv04_timer_init; @@ -509,8 +509,10 @@ static int nouveau_init_engine_ptrs(struct drm_device *dev) engine->gpio.irq_enable = nv50_gpio_irq_enable; engine->crypt.init = nouveau_stub_init; engine->crypt.takedown = nouveau_stub_takedown; - engine->vram.init = nouveau_mem_detect; - engine->vram.flags_valid = nouveau_mem_flags_valid; + engine->vram.init = nvc0_vram_init; + engine->vram.get = nvc0_vram_new; + engine->vram.put = nv50_vram_del; + engine->vram.flags_valid = nvc0_vram_flags_valid; break; default: NV_ERROR(dev, "NV%02x unsupported\n", dev_priv->chipset); diff --git a/drivers/gpu/drm/nouveau/nvc0_instmem.c b/drivers/gpu/drm/nouveau/nvc0_instmem.c index 39232085193d..211099736553 100644 --- a/drivers/gpu/drm/nouveau/nvc0_instmem.c +++ b/drivers/gpu/drm/nouveau/nvc0_instmem.c @@ -25,233 +25,191 @@ #include "drmP.h" #include "nouveau_drv.h" +#include "nouveau_vm.h" -struct nvc0_gpuobj_node { - struct nouveau_bo *vram; - struct drm_mm_node *ramin; - u32 align; +struct nvc0_instmem_priv { + struct nouveau_gpuobj *bar1_pgd; + struct nouveau_channel *bar1; + struct nouveau_gpuobj *bar3_pgd; + struct nouveau_channel *bar3; }; int -nvc0_instmem_get(struct nouveau_gpuobj *gpuobj, u32 size, u32 align) +nvc0_instmem_suspend(struct drm_device *dev) { - struct drm_device *dev = gpuobj->dev; - struct nvc0_gpuobj_node *node = NULL; - int ret; - - node = kzalloc(sizeof(*node), GFP_KERNEL); - if (!node) - return -ENOMEM; - node->align = align; - - ret = nouveau_bo_new(dev, NULL, size, align, TTM_PL_FLAG_VRAM, - 0, 0x0000, true, false, &node->vram); - if (ret) { - NV_ERROR(dev, "error getting PRAMIN backing pages: %d\n", ret); - return ret; - } - - ret = nouveau_bo_pin(node->vram, TTM_PL_FLAG_VRAM); - if (ret) { - NV_ERROR(dev, "error pinning PRAMIN backing VRAM: %d\n", ret); - nouveau_bo_ref(NULL, &node->vram); - return ret; - } + struct drm_nouveau_private *dev_priv = dev->dev_private; - gpuobj->vinst = node->vram->bo.mem.start << PAGE_SHIFT; - gpuobj->size = node->vram->bo.mem.num_pages << PAGE_SHIFT; - gpuobj->node = node; + dev_priv->ramin_available = false; return 0; } void -nvc0_instmem_put(struct nouveau_gpuobj *gpuobj) +nvc0_instmem_resume(struct drm_device *dev) { - struct nvc0_gpuobj_node *node; - - node = gpuobj->node; - gpuobj->node = NULL; + struct drm_nouveau_private *dev_priv = dev->dev_private; + struct nvc0_instmem_priv *priv = dev_priv->engine.instmem.priv; - nouveau_bo_unpin(node->vram); - nouveau_bo_ref(NULL, &node->vram); - kfree(node); + nv_mask(dev, 0x100c80, 0x00000001, 0x00000000); + nv_wr32(dev, 0x001704, 0x80000000 | priv->bar1->ramin->vinst >> 12); + nv_wr32(dev, 0x001714, 0xc0000000 | priv->bar3->ramin->vinst >> 12); + dev_priv->ramin_available = true; } -int -nvc0_instmem_map(struct nouveau_gpuobj *gpuobj) +static void +nvc0_channel_del(struct nouveau_channel **pchan) { - struct drm_nouveau_private *dev_priv = gpuobj->dev->dev_private; - struct nvc0_gpuobj_node *node = gpuobj->node; - struct drm_device *dev = gpuobj->dev; - struct drm_mm_node *ramin = NULL; - u32 pte, pte_end; - u64 vram; - - do { - if (drm_mm_pre_get(&dev_priv->ramin_heap)) - return -ENOMEM; - - spin_lock(&dev_priv->ramin_lock); - ramin = drm_mm_search_free(&dev_priv->ramin_heap, gpuobj->size, - node->align, 0); - if (ramin == NULL) { - spin_unlock(&dev_priv->ramin_lock); - return -ENOMEM; - } - - ramin = drm_mm_get_block_atomic(ramin, gpuobj->size, node->align); - spin_unlock(&dev_priv->ramin_lock); - } while (ramin == NULL); - - pte = (ramin->start >> 12) << 1; - pte_end = ((ramin->size >> 12) << 1) + pte; - vram = gpuobj->vinst; - - NV_DEBUG(dev, "pramin=0x%lx, pte=%d, pte_end=%d\n", - ramin->start, pte, pte_end); - NV_DEBUG(dev, "first vram page: 0x%010llx\n", gpuobj->vinst); - - while (pte < pte_end) { - nv_wr32(dev, 0x702000 + (pte * 8), (vram >> 8) | 1); - nv_wr32(dev, 0x702004 + (pte * 8), 0); - vram += 4096; - pte++; - } - dev_priv->engine.instmem.flush(dev); + struct nouveau_channel *chan; - if (1) { - u32 chan = nv_rd32(dev, 0x1700) << 16; - nv_wr32(dev, 0x100cb8, (chan + 0x1000) >> 8); - nv_wr32(dev, 0x100cbc, 0x80000005); - } + chan = *pchan; + *pchan = NULL; + if (!chan) + return; - node->ramin = ramin; - gpuobj->pinst = ramin->start; - return 0; + nouveau_vm_ref(NULL, &chan->vm, NULL); + if (chan->ramin_heap.free_stack.next) + drm_mm_takedown(&chan->ramin_heap); + nouveau_gpuobj_ref(NULL, &chan->ramin); + kfree(chan); } -void -nvc0_instmem_unmap(struct nouveau_gpuobj *gpuobj) +static int +nvc0_channel_new(struct drm_device *dev, u32 size, struct nouveau_vm *vm, + struct nouveau_channel **pchan, + struct nouveau_gpuobj *pgd, u64 vm_size) { - struct drm_nouveau_private *dev_priv = gpuobj->dev->dev_private; - struct nvc0_gpuobj_node *node = gpuobj->node; - u32 pte, pte_end; + struct nouveau_channel *chan; + int ret; - if (!node->ramin || !dev_priv->ramin_available) - return; + chan = kzalloc(sizeof(*chan), GFP_KERNEL); + if (!chan) + return -ENOMEM; + chan->dev = dev; - pte = (node->ramin->start >> 12) << 1; - pte_end = ((node->ramin->size >> 12) << 1) + pte; + ret = nouveau_gpuobj_new(dev, NULL, size, 0x1000, 0, &chan->ramin); + if (ret) { + nvc0_channel_del(&chan); + return ret; + } - while (pte < pte_end) { - nv_wr32(gpuobj->dev, 0x702000 + (pte * 8), 0); - nv_wr32(gpuobj->dev, 0x702004 + (pte * 8), 0); - pte++; + ret = drm_mm_init(&chan->ramin_heap, 0x1000, size - 0x1000); + if (ret) { + nvc0_channel_del(&chan); + return ret; } - dev_priv->engine.instmem.flush(gpuobj->dev); - spin_lock(&dev_priv->ramin_lock); - drm_mm_put_block(node->ramin); - node->ramin = NULL; - spin_unlock(&dev_priv->ramin_lock); -} + ret = nouveau_vm_ref(vm, &chan->vm, NULL); + if (ret) { + nvc0_channel_del(&chan); + return ret; + } -void -nvc0_instmem_flush(struct drm_device *dev) -{ - nv_wr32(dev, 0x070000, 1); - if (!nv_wait(dev, 0x070000, 0x00000002, 0x00000000)) - NV_ERROR(dev, "PRAMIN flush timeout\n"); + nv_wo32(chan->ramin, 0x0200, lower_32_bits(pgd->vinst)); + nv_wo32(chan->ramin, 0x0204, upper_32_bits(pgd->vinst)); + nv_wo32(chan->ramin, 0x0208, lower_32_bits(vm_size - 1)); + nv_wo32(chan->ramin, 0x020c, upper_32_bits(vm_size - 1)); + + *pchan = chan; + return 0; } int -nvc0_instmem_suspend(struct drm_device *dev) +nvc0_instmem_init(struct drm_device *dev) { struct drm_nouveau_private *dev_priv = dev->dev_private; - u32 *buf; - int i; + struct nouveau_instmem_engine *pinstmem = &dev_priv->engine.instmem; + struct pci_dev *pdev = dev->pdev; + struct nvc0_instmem_priv *priv; + struct nouveau_vm *vm = NULL; + int ret; - dev_priv->susres.ramin_copy = vmalloc(65536); - if (!dev_priv->susres.ramin_copy) + priv = kzalloc(sizeof(*priv), GFP_KERNEL); + if (!priv) return -ENOMEM; - buf = dev_priv->susres.ramin_copy; - - for (i = 0; i < 65536; i += 4) - buf[i/4] = nv_rd32(dev, NV04_PRAMIN + i); + pinstmem->priv = priv; + + /* BAR3 VM */ + ret = nouveau_vm_new(dev, 0, pci_resource_len(pdev, 3), 0, + &dev_priv->bar3_vm); + if (ret) + goto error; + + ret = nouveau_gpuobj_new(dev, NULL, + (pci_resource_len(pdev, 3) >> 12) * 8, 0, + NVOBJ_FLAG_DONT_MAP | + NVOBJ_FLAG_ZERO_ALLOC, + &dev_priv->bar3_vm->pgt[0].obj[0]); + if (ret) + goto error; + dev_priv->bar3_vm->pgt[0].refcount[0] = 1; + + nv50_instmem_map(dev_priv->bar3_vm->pgt[0].obj[0]); + + ret = nouveau_gpuobj_new(dev, NULL, 0x8000, 4096, + NVOBJ_FLAG_ZERO_ALLOC, &priv->bar3_pgd); + if (ret) + goto error; + + ret = nouveau_vm_ref(dev_priv->bar3_vm, &vm, priv->bar3_pgd); + if (ret) + goto error; + nouveau_vm_ref(NULL, &vm, NULL); + + ret = nvc0_channel_new(dev, 8192, dev_priv->bar3_vm, &priv->bar3, + priv->bar3_pgd, pci_resource_len(dev->pdev, 3)); + if (ret) + goto error; + + /* BAR1 VM */ + ret = nouveau_vm_new(dev, 0, pci_resource_len(pdev, 1), 0, &vm); + if (ret) + goto error; + + ret = nouveau_gpuobj_new(dev, NULL, 0x8000, 4096, + NVOBJ_FLAG_ZERO_ALLOC, &priv->bar1_pgd); + if (ret) + goto error; + + ret = nouveau_vm_ref(vm, &dev_priv->bar1_vm, priv->bar1_pgd); + if (ret) + goto error; + nouveau_vm_ref(NULL, &vm, NULL); + + ret = nvc0_channel_new(dev, 8192, dev_priv->bar1_vm, &priv->bar1, + priv->bar1_pgd, pci_resource_len(dev->pdev, 1)); + if (ret) + goto error; + + nvc0_instmem_resume(dev); return 0; +error: + nvc0_instmem_takedown(dev); + return ret; } void -nvc0_instmem_resume(struct drm_device *dev) -{ - struct drm_nouveau_private *dev_priv = dev->dev_private; - u32 *buf = dev_priv->susres.ramin_copy; - u64 chan; - int i; - - chan = dev_priv->vram_size - dev_priv->ramin_rsvd_vram; - nv_wr32(dev, 0x001700, chan >> 16); - - for (i = 0; i < 65536; i += 4) - nv_wr32(dev, NV04_PRAMIN + i, buf[i/4]); - vfree(dev_priv->susres.ramin_copy); - dev_priv->susres.ramin_copy = NULL; - - nv_wr32(dev, 0x001714, 0xc0000000 | (chan >> 12)); -} - -int -nvc0_instmem_init(struct drm_device *dev) +nvc0_instmem_takedown(struct drm_device *dev) { struct drm_nouveau_private *dev_priv = dev->dev_private; - u64 chan, pgt3, imem, lim3 = dev_priv->ramin_size - 1; - int ret, i; - - dev_priv->ramin_rsvd_vram = 1 * 1024 * 1024; - chan = dev_priv->vram_size - dev_priv->ramin_rsvd_vram; - imem = 4096 + 4096 + 32768; - - nv_wr32(dev, 0x001700, chan >> 16); - - /* channel setup */ - nv_wr32(dev, 0x700200, lower_32_bits(chan + 0x1000)); - nv_wr32(dev, 0x700204, upper_32_bits(chan + 0x1000)); - nv_wr32(dev, 0x700208, lower_32_bits(lim3)); - nv_wr32(dev, 0x70020c, upper_32_bits(lim3)); - - /* point pgd -> pgt */ - nv_wr32(dev, 0x701000, 0); - nv_wr32(dev, 0x701004, ((chan + 0x2000) >> 8) | 1); - - /* point pgt -> physical vram for channel */ - pgt3 = 0x2000; - for (i = 0; i < dev_priv->ramin_rsvd_vram; i += 4096, pgt3 += 8) { - nv_wr32(dev, 0x700000 + pgt3, ((chan + i) >> 8) | 1); - nv_wr32(dev, 0x700004 + pgt3, 0); - } + struct nvc0_instmem_priv *priv = dev_priv->engine.instmem.priv; + struct nouveau_vm *vm = NULL; - /* clear rest of pgt */ - for (; i < dev_priv->ramin_size; i += 4096, pgt3 += 8) { - nv_wr32(dev, 0x700000 + pgt3, 0); - nv_wr32(dev, 0x700004 + pgt3, 0); - } + nvc0_instmem_suspend(dev); - /* point bar3 at the channel */ - nv_wr32(dev, 0x001714, 0xc0000000 | (chan >> 12)); + nv_wr32(dev, 0x1704, 0x00000000); + nv_wr32(dev, 0x1714, 0x00000000); - /* Global PRAMIN heap */ - ret = drm_mm_init(&dev_priv->ramin_heap, imem, - dev_priv->ramin_size - imem); - if (ret) { - NV_ERROR(dev, "Failed to init RAMIN heap\n"); - return -ENOMEM; - } + nvc0_channel_del(&priv->bar1); + nouveau_vm_ref(NULL, &dev_priv->bar1_vm, priv->bar1_pgd); + nouveau_gpuobj_ref(NULL, &priv->bar1_pgd); - return 0; -} + nvc0_channel_del(&priv->bar3); + nouveau_vm_ref(dev_priv->bar3_vm, &vm, NULL); + nouveau_vm_ref(NULL, &vm, priv->bar3_pgd); + nouveau_gpuobj_ref(NULL, &priv->bar3_pgd); + nouveau_gpuobj_ref(NULL, &dev_priv->bar3_vm->pgt[0].obj[0]); + nouveau_vm_ref(NULL, &dev_priv->bar3_vm, NULL); -void -nvc0_instmem_takedown(struct drm_device *dev) -{ + dev_priv->engine.instmem.priv = NULL; + kfree(priv); } diff --git a/drivers/gpu/drm/nouveau/nvc0_vram.c b/drivers/gpu/drm/nouveau/nvc0_vram.c new file mode 100644 index 000000000000..41fcae5ffba4 --- /dev/null +++ b/drivers/gpu/drm/nouveau/nvc0_vram.c @@ -0,0 +1,91 @@ +/* + * Copyright 2010 Red Hat Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: Ben Skeggs + */ + +#include "drmP.h" +#include "nouveau_drv.h" +#include "nouveau_mm.h" + +bool +nvc0_vram_flags_valid(struct drm_device *dev, u32 tile_flags) +{ + if (likely(!(tile_flags & NOUVEAU_GEM_TILE_LAYOUT_MASK))) + return true; + return false; +} + +int +nvc0_vram_new(struct drm_device *dev, u64 size, u32 align, u32 ncmin, + u32 type, struct nouveau_vram **pvram) +{ + struct drm_nouveau_private *dev_priv = dev->dev_private; + struct ttm_bo_device *bdev = &dev_priv->ttm.bdev; + struct ttm_mem_type_manager *man = &bdev->man[TTM_PL_VRAM]; + struct nouveau_mm *mm = man->priv; + struct nouveau_mm_node *r; + struct nouveau_vram *vram; + int ret; + + size >>= 12; + align >>= 12; + ncmin >>= 12; + + vram = kzalloc(sizeof(*vram), GFP_KERNEL); + if (!vram) + return -ENOMEM; + + INIT_LIST_HEAD(&vram->regions); + vram->dev = dev_priv->dev; + vram->memtype = type; + vram->size = size; + + mutex_lock(&mm->mutex); + do { + ret = nouveau_mm_get(mm, 1, size, ncmin, align, &r); + if (ret) { + mutex_unlock(&mm->mutex); + nv50_vram_del(dev, &vram); + return ret; + } + + list_add_tail(&r->rl_entry, &vram->regions); + size -= r->length; + } while (size); + mutex_unlock(&mm->mutex); + + r = list_first_entry(&vram->regions, struct nouveau_mm_node, rl_entry); + vram->offset = (u64)r->offset << 12; + *pvram = vram; + return 0; +} + +int +nvc0_vram_init(struct drm_device *dev) +{ + struct drm_nouveau_private *dev_priv = dev->dev_private; + + dev_priv->vram_size = nv_rd32(dev, 0x10f20c) << 20; + dev_priv->vram_size *= nv_rd32(dev, 0x121c74); + dev_priv->vram_rblock_size = 4096; + return 0; +} -- cgit v1.2.3 From 99805566d93d69b7daf2373aba9fae52a62396f4 Mon Sep 17 00:00:00 2001 From: Ben Skeggs Date: Tue, 16 Nov 2010 14:58:42 +1000 Subject: drm/nvc0: create shared channel vm Signed-off-by: Ben Skeggs --- drivers/gpu/drm/nouveau/nvc0_instmem.c | 16 ++++++++++++++++ 1 file changed, 16 insertions(+) diff --git a/drivers/gpu/drm/nouveau/nvc0_instmem.c b/drivers/gpu/drm/nouveau/nvc0_instmem.c index 211099736553..c09091749054 100644 --- a/drivers/gpu/drm/nouveau/nvc0_instmem.c +++ b/drivers/gpu/drm/nouveau/nvc0_instmem.c @@ -32,6 +32,7 @@ struct nvc0_instmem_priv { struct nouveau_channel *bar1; struct nouveau_gpuobj *bar3_pgd; struct nouveau_channel *bar3; + struct nouveau_gpuobj *chan_pgd; }; int @@ -179,6 +180,18 @@ nvc0_instmem_init(struct drm_device *dev) if (ret) goto error; + /* channel vm */ + ret = nouveau_vm_new(dev, 0, (1ULL << 40), 0x0008000000ULL, &vm); + if (ret) + goto error; + + ret = nouveau_gpuobj_new(dev, NULL, 0x8000, 4096, 0, &priv->chan_pgd); + if (ret) + goto error; + + nouveau_vm_ref(vm, &dev_priv->chan_vm, priv->chan_pgd); + nouveau_vm_ref(NULL, &vm, NULL); + nvc0_instmem_resume(dev); return 0; error: @@ -198,6 +211,9 @@ nvc0_instmem_takedown(struct drm_device *dev) nv_wr32(dev, 0x1704, 0x00000000); nv_wr32(dev, 0x1714, 0x00000000); + nouveau_vm_ref(NULL, &dev_priv->chan_vm, priv->chan_pgd); + nouveau_gpuobj_ref(NULL, &priv->chan_pgd); + nvc0_channel_del(&priv->bar1); nouveau_vm_ref(NULL, &dev_priv->bar1_vm, priv->bar1_pgd); nouveau_gpuobj_ref(NULL, &priv->bar1_pgd); -- cgit v1.2.3 From 587107b690f567690982fb504047e7b0a6590344 Mon Sep 17 00:00:00 2001 From: Ben Skeggs Date: Wed, 24 Nov 2010 10:07:21 +1000 Subject: drm/nvc0: reject the notifier_alloc ioctl Signed-off-by: Ben Skeggs --- drivers/gpu/drm/nouveau/nouveau_notifier.c | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/drivers/gpu/drm/nouveau/nouveau_notifier.c b/drivers/gpu/drm/nouveau/nouveau_notifier.c index a050b7b69782..fe29d604b820 100644 --- a/drivers/gpu/drm/nouveau/nouveau_notifier.c +++ b/drivers/gpu/drm/nouveau/nouveau_notifier.c @@ -164,10 +164,15 @@ int nouveau_ioctl_notifier_alloc(struct drm_device *dev, void *data, struct drm_file *file_priv) { + struct drm_nouveau_private *dev_priv = dev->dev_private; struct drm_nouveau_notifierobj_alloc *na = data; struct nouveau_channel *chan; int ret; + /* completely unnecessary for these chipsets... */ + if (unlikely(dev_priv->card_type >= NV_C0)) + return -EINVAL; + chan = nouveau_channel_get(dev, file_priv, na->channel); if (IS_ERR(chan)) return PTR_ERR(chan); -- cgit v1.2.3 From 7460d70355eb568817296d6d3364f72d72eeba70 Mon Sep 17 00:00:00 2001 From: Ben Skeggs Date: Wed, 24 Nov 2010 10:10:23 +1000 Subject: drm/nvc0: gpuobj_new need only check validity and init the relevant engine Signed-off-by: Ben Skeggs --- drivers/gpu/drm/nouveau/nouveau_object.c | 15 +++++++++++---- 1 file changed, 11 insertions(+), 4 deletions(-) diff --git a/drivers/gpu/drm/nouveau/nouveau_object.c b/drivers/gpu/drm/nouveau/nouveau_object.c index 55c9fdcfa67f..c64fd971b867 100644 --- a/drivers/gpu/drm/nouveau/nouveau_object.c +++ b/drivers/gpu/drm/nouveau/nouveau_object.c @@ -643,10 +643,13 @@ nouveau_gpuobj_gr_new(struct nouveau_channel *chan, u32 handle, int class) found: switch (oc->engine) { case NVOBJ_ENGINE_SW: - ret = nouveau_gpuobj_sw_new(chan, class, &gpuobj); - if (ret) - return ret; - goto insert; + if (dev_priv->card_type < NV_C0) { + ret = nouveau_gpuobj_sw_new(chan, class, &gpuobj); + if (ret) + return ret; + goto insert; + } + break; case NVOBJ_ENGINE_GR: if (dev_priv->card_type >= NV_50 && !chan->ramin_grctx) { struct nouveau_pgraph_engine *pgraph = @@ -669,6 +672,10 @@ found: break; } + /* we're done if this is fermi */ + if (dev_priv->card_type >= NV_C0) + return 0; + ret = nouveau_gpuobj_new(dev, chan, nouveau_gpuobj_class_instmem_size(dev, class), 16, -- cgit v1.2.3 From effd6e066f406277254ad7603cf2c7465c4f114b Mon Sep 17 00:00:00 2001 From: Ben Skeggs Date: Wed, 24 Nov 2010 10:15:05 +1000 Subject: drm/nvc0: implement channel structure initialisation Signed-off-by: Ben Skeggs --- drivers/gpu/drm/nouveau/nouveau_object.c | 24 ++++++++++++++++++++---- 1 file changed, 20 insertions(+), 4 deletions(-) diff --git a/drivers/gpu/drm/nouveau/nouveau_object.c b/drivers/gpu/drm/nouveau/nouveau_object.c index c64fd971b867..d77b1fcd19d4 100644 --- a/drivers/gpu/drm/nouveau/nouveau_object.c +++ b/drivers/gpu/drm/nouveau/nouveau_object.c @@ -779,6 +779,25 @@ nouveau_gpuobj_channel_init(struct nouveau_channel *chan, NV_DEBUG(dev, "ch%d vram=0x%08x tt=0x%08x\n", chan->id, vram_h, tt_h); + if (dev_priv->card_type == NV_C0) { + struct nouveau_vm *vm = dev_priv->chan_vm; + struct nouveau_vm_pgd *vpgd; + + ret = nouveau_gpuobj_new(dev, NULL, 4096, 0x1000, 0, + &chan->ramin); + if (ret) + return ret; + + nouveau_vm_ref(vm, &chan->vm, NULL); + + vpgd = list_first_entry(&vm->pgd_list, struct nouveau_vm_pgd, head); + nv_wo32(chan->ramin, 0x0200, lower_32_bits(vpgd->obj->vinst)); + nv_wo32(chan->ramin, 0x0204, upper_32_bits(vpgd->obj->vinst)); + nv_wo32(chan->ramin, 0x0208, 0xffffffff); + nv_wo32(chan->ramin, 0x020c, 0x000000ff); + return 0; + } + /* Allocate a chunk of memory for per-channel object storage */ ret = nouveau_gpuobj_channel_init_pramin(chan); if (ret) { @@ -786,7 +805,7 @@ nouveau_gpuobj_channel_init(struct nouveau_channel *chan, return ret; } - /* NV50/NVC0 VM + /* NV50 VM * - Allocate per-channel page-directory * - Link with shared channel VM */ @@ -884,9 +903,6 @@ nouveau_gpuobj_channel_takedown(struct nouveau_channel *chan) NV_DEBUG(dev, "ch%d\n", chan->id); - if (!chan->ramht) - return; - nouveau_ramht_ref(NULL, &chan->ramht, chan); nouveau_vm_ref(NULL, &chan->vm, chan->vm_pd); -- cgit v1.2.3 From 5216782bf8c195de3befe0742a877c987dd3c4fd Mon Sep 17 00:00:00 2001 From: Ben Skeggs Date: Wed, 24 Nov 2010 10:18:28 +1000 Subject: drm/nvc0: skip dma object creation for drm channel Signed-off-by: Ben Skeggs --- drivers/gpu/drm/nouveau/nouveau_state.c | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/drivers/gpu/drm/nouveau/nouveau_state.c b/drivers/gpu/drm/nouveau/nouveau_state.c index 813790f4c7c7..d5b17b6ccd3a 100644 --- a/drivers/gpu/drm/nouveau/nouveau_state.c +++ b/drivers/gpu/drm/nouveau/nouveau_state.c @@ -552,6 +552,10 @@ nouveau_card_init_channel(struct drm_device *dev) if (ret) return ret; + /* no dma objects on fermi... */ + if (dev_priv->card_type >= NV_C0) + goto out_done; + ret = nouveau_gpuobj_dma_new(dev_priv->channel, NV_CLASS_DMA_IN_MEMORY, 0, dev_priv->vram_size, NV_MEM_ACCESS_RW, NV_MEM_TARGET_VRAM, @@ -576,6 +580,7 @@ nouveau_card_init_channel(struct drm_device *dev) if (ret) goto out_err; +out_done: mutex_unlock(&dev_priv->channel->mutex); return 0; -- cgit v1.2.3 From 96545299d7405d4c0f44b727718e263653fc11aa Mon Sep 17 00:00:00 2001 From: Ben Skeggs Date: Wed, 24 Nov 2010 10:26:24 +1000 Subject: drm/nvc0: fix channel dma init paths Signed-off-by: Ben Skeggs --- drivers/gpu/drm/nouveau/nouveau_channel.c | 20 ++++++++++++++++---- drivers/gpu/drm/nouveau/nouveau_dma.c | 22 ++++++++++++++++------ drivers/gpu/drm/nouveau/nouveau_dma.h | 6 ++++++ 3 files changed, 38 insertions(+), 10 deletions(-) diff --git a/drivers/gpu/drm/nouveau/nouveau_channel.c b/drivers/gpu/drm/nouveau/nouveau_channel.c index 6f37995aee2d..e37977d02463 100644 --- a/drivers/gpu/drm/nouveau/nouveau_channel.c +++ b/drivers/gpu/drm/nouveau/nouveau_channel.c @@ -38,9 +38,14 @@ nouveau_channel_pushbuf_ctxdma_init(struct nouveau_channel *chan) int ret; if (dev_priv->card_type >= NV_50) { - ret = nouveau_gpuobj_dma_new(chan, NV_CLASS_DMA_IN_MEMORY, 0, - (1ULL << 40), NV_MEM_ACCESS_RO, - NV_MEM_TARGET_VM, &pushbuf); + if (dev_priv->card_type < NV_C0) { + ret = nouveau_gpuobj_dma_new(chan, + NV_CLASS_DMA_IN_MEMORY, 0, + (1ULL << 40), + NV_MEM_ACCESS_RO, + NV_MEM_TARGET_VM, + &pushbuf); + } chan->pushbuf_base = pb->bo.offset; } else if (pb->bo.mem.mem_type == TTM_PL_TT) { @@ -71,7 +76,7 @@ nouveau_channel_pushbuf_ctxdma_init(struct nouveau_channel *chan) nouveau_gpuobj_ref(pushbuf, &chan->pushbuf); nouveau_gpuobj_ref(NULL, &pushbuf); - return 0; + return ret; } static struct nouveau_bo * @@ -99,6 +104,13 @@ nouveau_channel_user_pushbuf_alloc(struct drm_device *dev) return NULL; } + ret = nouveau_bo_map(pushbuf); + if (ret) { + nouveau_bo_unpin(pushbuf); + nouveau_bo_ref(NULL, &pushbuf); + return NULL; + } + return pushbuf; } diff --git a/drivers/gpu/drm/nouveau/nouveau_dma.c b/drivers/gpu/drm/nouveau/nouveau_dma.c index 6ff77cedc008..65699bfaaaea 100644 --- a/drivers/gpu/drm/nouveau/nouveau_dma.c +++ b/drivers/gpu/drm/nouveau/nouveau_dma.c @@ -36,7 +36,7 @@ nouveau_dma_pre_init(struct nouveau_channel *chan) struct drm_nouveau_private *dev_priv = chan->dev->dev_private; struct nouveau_bo *pushbuf = chan->pushbuf_bo; - if (dev_priv->card_type == NV_50) { + if (dev_priv->card_type >= NV_50) { const int ib_size = pushbuf->bo.mem.size / 2; chan->dma.ib_base = (pushbuf->bo.mem.size - ib_size) >> 2; @@ -61,6 +61,21 @@ nouveau_dma_init(struct nouveau_channel *chan) struct drm_nouveau_private *dev_priv = dev->dev_private; int ret, i; + if (dev_priv->card_type >= NV_C0) { + ret = nouveau_gpuobj_gr_new(chan, 0x9039, 0x9039); + if (ret) + return ret; + + ret = RING_SPACE(chan, 2); + if (ret) + return ret; + + BEGIN_NVC0(chan, 2, NvSubM2MF, 0x0000, 1); + OUT_RING (chan, 0x00009039); + FIRE_RING (chan); + return 0; + } + /* Create NV_MEMORY_TO_MEMORY_FORMAT for buffer moves */ ret = nouveau_gpuobj_gr_new(chan, NvM2MF, dev_priv->card_type < NV_50 ? 0x0039 : 0x5039); @@ -72,11 +87,6 @@ nouveau_dma_init(struct nouveau_channel *chan) if (ret) return ret; - /* Map push buffer */ - ret = nouveau_bo_map(chan->pushbuf_bo); - if (ret) - return ret; - /* Insert NOPS for NOUVEAU_DMA_SKIPS */ ret = RING_SPACE(chan, NOUVEAU_DMA_SKIPS); if (ret) diff --git a/drivers/gpu/drm/nouveau/nouveau_dma.h b/drivers/gpu/drm/nouveau/nouveau_dma.h index d578c21d3c8d..c118a331b5bc 100644 --- a/drivers/gpu/drm/nouveau/nouveau_dma.h +++ b/drivers/gpu/drm/nouveau/nouveau_dma.h @@ -124,6 +124,12 @@ OUT_RING(struct nouveau_channel *chan, int data) extern void OUT_RINGp(struct nouveau_channel *chan, const void *data, unsigned nr_dwords); +static inline void +BEGIN_NVC0(struct nouveau_channel *chan, int op, int subc, int mthd, int size) +{ + OUT_RING(chan, (op << 28) | (size << 16) | (subc << 13) | (mthd >> 2)); +} + static inline void BEGIN_RING(struct nouveau_channel *chan, int subc, int mthd, int size) { -- cgit v1.2.3 From 529c4959129c7c425aaf3d5b6acc63edf76827ad Mon Sep 17 00:00:00 2001 From: Ben Skeggs Date: Wed, 24 Nov 2010 10:30:22 +1000 Subject: drm/nvc0: implement fencing Just simple REF_CNT fencing for the moment. Signed-off-by: Ben Skeggs --- drivers/gpu/drm/nouveau/nouveau_fence.c | 28 ++++++++++++++++++++-------- 1 file changed, 20 insertions(+), 8 deletions(-) diff --git a/drivers/gpu/drm/nouveau/nouveau_fence.c b/drivers/gpu/drm/nouveau/nouveau_fence.c index 374a9793b85f..88b2f29ca3e4 100644 --- a/drivers/gpu/drm/nouveau/nouveau_fence.c +++ b/drivers/gpu/drm/nouveau/nouveau_fence.c @@ -32,7 +32,8 @@ #include "nouveau_dma.h" #define USE_REFCNT(dev) (nouveau_private(dev)->chipset >= 0x10) -#define USE_SEMA(dev) (nouveau_private(dev)->chipset >= 0x17) +#define USE_SEMA(dev) (nouveau_private(dev)->chipset >= 0x17 && \ + nouveau_private(dev)->card_type < NV_C0) struct nouveau_fence { struct nouveau_channel *channel; @@ -139,6 +140,7 @@ nouveau_fence_emit(struct nouveau_fence *fence) { struct nouveau_channel *chan = fence->channel; struct drm_device *dev = chan->dev; + struct drm_nouveau_private *dev_priv = dev->dev_private; int ret; ret = RING_SPACE(chan, 2); @@ -159,8 +161,15 @@ nouveau_fence_emit(struct nouveau_fence *fence) list_add_tail(&fence->entry, &chan->fence.pending); spin_unlock(&chan->fence.lock); - BEGIN_RING(chan, NvSubSw, USE_REFCNT(dev) ? 0x0050 : 0x0150, 1); - OUT_RING(chan, fence->sequence); + if (USE_REFCNT(dev)) { + if (dev_priv->card_type < NV_C0) + BEGIN_RING(chan, NvSubSw, 0x0050, 1); + else + BEGIN_NVC0(chan, 2, NvSubSw, 0x0050, 1); + } else { + BEGIN_RING(chan, NvSubSw, 0x0150, 1); + } + OUT_RING (chan, fence->sequence); FIRE_RING(chan); return 0; @@ -445,11 +454,14 @@ nouveau_fence_channel_init(struct nouveau_channel *chan) if (ret) return ret; - ret = RING_SPACE(chan, 2); - if (ret) - return ret; - BEGIN_RING(chan, NvSubSw, 0, 1); - OUT_RING(chan, NvSw); + /* we leave subchannel empty for nvc0 */ + if (dev_priv->card_type < NV_C0) { + ret = RING_SPACE(chan, 2); + if (ret) + return ret; + BEGIN_RING(chan, NvSubSw, 0, 1); + OUT_RING(chan, NvSw); + } /* Create a DMA object for the shared cross-channel sync area. */ if (USE_SEMA(dev)) { -- cgit v1.2.3 From b2b099388fa76f0be25431794d369d251a4002dd Mon Sep 17 00:00:00 2001 From: Ben Skeggs Date: Wed, 24 Nov 2010 10:47:15 +1000 Subject: drm/nvc0: implement pfifo engine hooks Signed-off-by: Ben Skeggs --- drivers/gpu/drm/nouveau/nouveau_drv.h | 2 + drivers/gpu/drm/nouveau/nvc0_fifo.c | 359 ++++++++++++++++++++++++++++++++++ 2 files changed, 361 insertions(+) diff --git a/drivers/gpu/drm/nouveau/nouveau_drv.h b/drivers/gpu/drm/nouveau/nouveau_drv.h index c1e85c4c7ac2..6a2b3359f368 100644 --- a/drivers/gpu/drm/nouveau/nouveau_drv.h +++ b/drivers/gpu/drm/nouveau/nouveau_drv.h @@ -240,6 +240,7 @@ struct nouveau_channel { /* PFIFO context */ struct nouveau_gpuobj *ramfc; struct nouveau_gpuobj *cache; + void *fifo_priv; /* PGRAPH context */ /* XXX may be merge 2 pointers as private data ??? */ @@ -337,6 +338,7 @@ struct nouveau_fb_engine { }; struct nouveau_fifo_engine { + void *priv; int channels; struct nouveau_gpuobj *playlist[2]; diff --git a/drivers/gpu/drm/nouveau/nvc0_fifo.c b/drivers/gpu/drm/nouveau/nvc0_fifo.c index 890c2b95fbc1..82a4ded5dae8 100644 --- a/drivers/gpu/drm/nouveau/nvc0_fifo.c +++ b/drivers/gpu/drm/nouveau/nvc0_fifo.c @@ -25,6 +25,48 @@ #include "drmP.h" #include "nouveau_drv.h" +#include "nouveau_mm.h" + +static void nvc0_fifo_isr(struct drm_device *); + +struct nvc0_fifo_priv { + struct nouveau_gpuobj *playlist[2]; + int cur_playlist; + struct nouveau_vma user_vma; +}; + +struct nvc0_fifo_chan { + struct nouveau_bo *user; + struct nouveau_gpuobj *ramfc; +}; + +static void +nvc0_fifo_playlist_update(struct drm_device *dev) +{ + struct drm_nouveau_private *dev_priv = dev->dev_private; + struct nouveau_instmem_engine *pinstmem = &dev_priv->engine.instmem; + struct nouveau_fifo_engine *pfifo = &dev_priv->engine.fifo; + struct nvc0_fifo_priv *priv = pfifo->priv; + struct nouveau_gpuobj *cur; + int i, p; + + cur = priv->playlist[priv->cur_playlist]; + priv->cur_playlist = !priv->cur_playlist; + + for (i = 0, p = 0; i < 128; i++) { + if (!(nv_rd32(dev, 0x3004 + (i * 8)) & 1)) + continue; + nv_wo32(cur, p + 0, i); + nv_wo32(cur, p + 4, 0x00000004); + p += 8; + } + pinstmem->flush(dev); + + nv_wr32(dev, 0x002270, cur->vinst >> 12); + nv_wr32(dev, 0x002274, 0x01f00000 | (p >> 3)); + if (!nv_wait(dev, 0x00227c, 0x00100000, 0x00000000)) + NV_ERROR(dev, "PFIFO - playlist update failed\n"); +} void nvc0_fifo_disable(struct drm_device *dev) @@ -57,12 +99,135 @@ nvc0_fifo_channel_id(struct drm_device *dev) int nvc0_fifo_create_context(struct nouveau_channel *chan) { + struct drm_device *dev = chan->dev; + struct drm_nouveau_private *dev_priv = dev->dev_private; + struct nouveau_instmem_engine *pinstmem = &dev_priv->engine.instmem; + struct nouveau_fifo_engine *pfifo = &dev_priv->engine.fifo; + struct nvc0_fifo_priv *priv = pfifo->priv; + struct nvc0_fifo_chan *fifoch; + u64 ib_virt, user_vinst; + int ret; + + chan->fifo_priv = kzalloc(sizeof(*fifoch), GFP_KERNEL); + if (!chan->fifo_priv) + return -ENOMEM; + fifoch = chan->fifo_priv; + + /* allocate vram for control regs, map into polling area */ + ret = nouveau_bo_new(dev, NULL, 0x1000, 0, TTM_PL_FLAG_VRAM, + 0, 0, true, true, &fifoch->user); + if (ret) + goto error; + + ret = nouveau_bo_pin(fifoch->user, TTM_PL_FLAG_VRAM); + if (ret) { + nouveau_bo_ref(NULL, &fifoch->user); + goto error; + } + + user_vinst = fifoch->user->bo.mem.start << PAGE_SHIFT; + + ret = nouveau_bo_map(fifoch->user); + if (ret) { + nouveau_bo_unpin(fifoch->user); + nouveau_bo_ref(NULL, &fifoch->user); + goto error; + } + + nouveau_vm_map_at(&priv->user_vma, chan->id * 0x1000, + fifoch->user->bo.mem.mm_node); + + chan->user = ioremap_wc(pci_resource_start(dev->pdev, 1) + + priv->user_vma.offset + (chan->id * 0x1000), + PAGE_SIZE); + if (!chan->user) { + ret = -ENOMEM; + goto error; + } + + ib_virt = chan->pushbuf_base + chan->dma.ib_base * 4; + + /* zero channel regs */ + nouveau_bo_wr32(fifoch->user, 0x0040/4, 0); + nouveau_bo_wr32(fifoch->user, 0x0044/4, 0); + nouveau_bo_wr32(fifoch->user, 0x0048/4, 0); + nouveau_bo_wr32(fifoch->user, 0x004c/4, 0); + nouveau_bo_wr32(fifoch->user, 0x0050/4, 0); + nouveau_bo_wr32(fifoch->user, 0x0058/4, 0); + nouveau_bo_wr32(fifoch->user, 0x005c/4, 0); + nouveau_bo_wr32(fifoch->user, 0x0060/4, 0); + nouveau_bo_wr32(fifoch->user, 0x0088/4, 0); + nouveau_bo_wr32(fifoch->user, 0x008c/4, 0); + + /* ramfc */ + ret = nouveau_gpuobj_new_fake(dev, chan->ramin->pinst, + chan->ramin->vinst, 0x100, + NVOBJ_FLAG_ZERO_ALLOC, &fifoch->ramfc); + if (ret) + goto error; + + nv_wo32(fifoch->ramfc, 0x08, lower_32_bits(user_vinst)); + nv_wo32(fifoch->ramfc, 0x0c, upper_32_bits(user_vinst)); + nv_wo32(fifoch->ramfc, 0x10, 0x0000face); + nv_wo32(fifoch->ramfc, 0x30, 0xfffff902); + nv_wo32(fifoch->ramfc, 0x48, lower_32_bits(ib_virt)); + nv_wo32(fifoch->ramfc, 0x4c, drm_order(chan->dma.ib_max + 1) << 16 | + upper_32_bits(ib_virt)); + nv_wo32(fifoch->ramfc, 0x54, 0x00000002); + nv_wo32(fifoch->ramfc, 0x84, 0x20400000); + nv_wo32(fifoch->ramfc, 0x94, 0x30000001); + nv_wo32(fifoch->ramfc, 0x9c, 0x00000100); + nv_wo32(fifoch->ramfc, 0xa4, 0x1f1f1f1f); + nv_wo32(fifoch->ramfc, 0xa8, 0x1f1f1f1f); + nv_wo32(fifoch->ramfc, 0xac, 0x0000001f); + nv_wo32(fifoch->ramfc, 0xb8, 0xf8000000); + nv_wo32(fifoch->ramfc, 0xf8, 0x10003080); /* 0x002310 */ + nv_wo32(fifoch->ramfc, 0xfc, 0x10000010); /* 0x002350 */ + pinstmem->flush(dev); + + nv_wr32(dev, 0x003000 + (chan->id * 8), 0xc0000000 | + (chan->ramin->vinst >> 12)); + nv_wr32(dev, 0x003004 + (chan->id * 8), 0x001f0001); + nvc0_fifo_playlist_update(dev); return 0; + +error: + pfifo->destroy_context(chan); + return ret; } void nvc0_fifo_destroy_context(struct nouveau_channel *chan) { + struct drm_device *dev = chan->dev; + struct nvc0_fifo_chan *fifoch; + + nv_mask(dev, 0x003004 + (chan->id * 8), 0x00000001, 0x00000000); + nv_wr32(dev, 0x002634, chan->id); + if (!nv_wait(dev, 0x0002634, 0xffffffff, chan->id)) + NV_WARN(dev, "0x2634 != chid: 0x%08x\n", nv_rd32(dev, 0x2634)); + + nvc0_fifo_playlist_update(dev); + + nv_wr32(dev, 0x003000 + (chan->id * 8), 0x00000000); + + if (chan->user) { + iounmap(chan->user); + chan->user = NULL; + } + + fifoch = chan->fifo_priv; + chan->fifo_priv = NULL; + if (!fifoch) + return; + + nouveau_gpuobj_ref(NULL, &fifoch->ramfc); + if (fifoch->user) { + nouveau_bo_unmap(fifoch->user); + nouveau_bo_unpin(fifoch->user); + nouveau_bo_ref(NULL, &fifoch->user); + } + kfree(fifoch); } int @@ -77,14 +242,208 @@ nvc0_fifo_unload_context(struct drm_device *dev) return 0; } +static void +nvc0_fifo_destroy(struct drm_device *dev) +{ + struct drm_nouveau_private *dev_priv = dev->dev_private; + struct nouveau_fifo_engine *pfifo = &dev_priv->engine.fifo; + struct nvc0_fifo_priv *priv; + + priv = pfifo->priv; + if (!priv) + return; + + nouveau_vm_put(&priv->user_vma); + nouveau_gpuobj_ref(NULL, &priv->playlist[1]); + nouveau_gpuobj_ref(NULL, &priv->playlist[0]); + kfree(priv); +} + void nvc0_fifo_takedown(struct drm_device *dev) { + nv_wr32(dev, 0x002140, 0x00000000); + nvc0_fifo_destroy(dev); +} + +static int +nvc0_fifo_create(struct drm_device *dev) +{ + struct drm_nouveau_private *dev_priv = dev->dev_private; + struct nouveau_fifo_engine *pfifo = &dev_priv->engine.fifo; + struct nvc0_fifo_priv *priv; + int ret; + + priv = kzalloc(sizeof(*priv), GFP_KERNEL); + if (!priv) + return -ENOMEM; + pfifo->priv = priv; + + ret = nouveau_gpuobj_new(dev, NULL, 0x1000, 0x1000, 0, + &priv->playlist[0]); + if (ret) + goto error; + + ret = nouveau_gpuobj_new(dev, NULL, 0x1000, 0x1000, 0, + &priv->playlist[1]); + if (ret) + goto error; + + ret = nouveau_vm_get(dev_priv->bar1_vm, pfifo->channels * 0x1000, + 12, NV_MEM_ACCESS_RW, &priv->user_vma); + if (ret) + goto error; + + nouveau_irq_register(dev, 8, nvc0_fifo_isr); + NVOBJ_CLASS(dev, 0x506e, SW); /* nvsw */ + return 0; + +error: + nvc0_fifo_destroy(dev); + return ret; } int nvc0_fifo_init(struct drm_device *dev) { + struct drm_nouveau_private *dev_priv = dev->dev_private; + struct nouveau_fifo_engine *pfifo = &dev_priv->engine.fifo; + struct nvc0_fifo_priv *priv; + int ret, i; + + if (!pfifo->priv) { + ret = nvc0_fifo_create(dev); + if (ret) + return ret; + } + priv = pfifo->priv; + + /* reset PFIFO, enable all available PSUBFIFO areas */ + nv_mask(dev, 0x000200, 0x00000100, 0x00000000); + nv_mask(dev, 0x000200, 0x00000100, 0x00000100); + nv_wr32(dev, 0x000204, 0xffffffff); + nv_wr32(dev, 0x002204, 0xffffffff); + + /* assign engines to subfifos */ + nv_wr32(dev, 0x002208, ~(1 << 0)); /* PGRAPH */ + nv_wr32(dev, 0x00220c, ~(1 << 1)); /* PVP */ + nv_wr32(dev, 0x002210, ~(1 << 1)); /* PPP */ + nv_wr32(dev, 0x002214, ~(1 << 1)); /* PBSP */ + nv_wr32(dev, 0x002218, ~(1 << 2)); /* PCE0 */ + nv_wr32(dev, 0x00221c, ~(1 << 1)); /* PCE1 */ + + /* PSUBFIFO[n] */ + for (i = 0; i < 3; i++) { + nv_mask(dev, 0x04013c + (i * 0x2000), 0x10000100, 0x00000000); + nv_wr32(dev, 0x040108 + (i * 0x2000), 0xffffffff); /* INTR */ + nv_wr32(dev, 0x04010c + (i * 0x2000), 0xfffffeff); /* INTR_EN */ + } + + nv_mask(dev, 0x002200, 0x00000001, 0x00000001); + nv_wr32(dev, 0x002254, 0x10000000 | priv->user_vma.offset >> 12); + + nv_wr32(dev, 0x002a00, 0xffffffff); /* clears PFIFO.INTR bit 30 */ + nv_wr32(dev, 0x002100, 0xffffffff); + nv_wr32(dev, 0x002140, 0xbfffffff); return 0; } +struct nouveau_enum nvc0_fifo_fault_unit[] = { + { 0, "PGRAPH" }, + { 3, "PEEPHOLE" }, + { 4, "BAR1" }, + { 5, "BAR3" }, + { 7, "PFIFO" }, + {} +}; + +struct nouveau_enum nvc0_fifo_fault_reason[] = { + { 0, "PT_NOT_PRESENT" }, + { 1, "PT_TOO_SHORT" }, + { 2, "PAGE_NOT_PRESENT" }, + { 3, "VM_LIMIT_EXCEEDED" }, + {} +}; + +struct nouveau_bitfield nvc0_fifo_subfifo_intr[] = { +/* { 0x00008000, "" } seen with null ib push */ + { 0x00200000, "ILLEGAL_MTHD" }, + { 0x00800000, "EMPTY_SUBC" }, + {} +}; + +static void +nvc0_fifo_isr_vm_fault(struct drm_device *dev, int unit) +{ + u32 inst = nv_rd32(dev, 0x2800 + (unit * 0x10)); + u32 valo = nv_rd32(dev, 0x2804 + (unit * 0x10)); + u32 vahi = nv_rd32(dev, 0x2808 + (unit * 0x10)); + u32 stat = nv_rd32(dev, 0x280c + (unit * 0x10)); + + NV_INFO(dev, "PFIFO: %s fault at 0x%010llx [", + (stat & 0x00000080) ? "write" : "read", (u64)vahi << 32 | valo); + nouveau_enum_print(nvc0_fifo_fault_reason, stat & 0x0000000f); + printk("] from "); + nouveau_enum_print(nvc0_fifo_fault_unit, unit); + printk(" on channel 0x%010llx\n", (u64)inst << 12); +} + +static void +nvc0_fifo_isr_subfifo_intr(struct drm_device *dev, int unit) +{ + u32 stat = nv_rd32(dev, 0x040108 + (unit * 0x2000)); + u32 addr = nv_rd32(dev, 0x0400c0 + (unit * 0x2000)); + u32 data = nv_rd32(dev, 0x0400c4 + (unit * 0x2000)); + u32 chid = nv_rd32(dev, 0x040120 + (unit * 0x2000)) & 0x7f; + u32 subc = (addr & 0x00070000); + u32 mthd = (addr & 0x00003ffc); + + NV_INFO(dev, "PSUBFIFO %d:", unit); + nouveau_bitfield_print(nvc0_fifo_subfifo_intr, stat); + NV_INFO(dev, "PSUBFIFO %d: ch %d subc %d mthd 0x%04x data 0x%08x\n", + unit, chid, subc, mthd, data); + + nv_wr32(dev, 0x0400c0 + (unit * 0x2000), 0x80600008); + nv_wr32(dev, 0x040108 + (unit * 0x2000), stat); +} + +static void +nvc0_fifo_isr(struct drm_device *dev) +{ + u32 stat = nv_rd32(dev, 0x002100); + + if (stat & 0x10000000) { + u32 units = nv_rd32(dev, 0x00259c); + u32 u = units; + + while (u) { + int i = ffs(u) - 1; + nvc0_fifo_isr_vm_fault(dev, i); + u &= ~(1 << i); + } + + nv_wr32(dev, 0x00259c, units); + stat &= ~0x10000000; + } + + if (stat & 0x20000000) { + u32 units = nv_rd32(dev, 0x0025a0); + u32 u = units; + + while (u) { + int i = ffs(u) - 1; + nvc0_fifo_isr_subfifo_intr(dev, i); + u &= ~(1 << i); + } + + nv_wr32(dev, 0x0025a0, units); + stat &= ~0x20000000; + } + + if (stat) { + NV_INFO(dev, "PFIFO: unhandled status 0x%08x\n", stat); + nv_wr32(dev, 0x002100, stat); + } + + nv_wr32(dev, 0x2140, 0); +} -- cgit v1.2.3 From 966a5b7daa15e152bc2e1f0407939cc721fb5995 Mon Sep 17 00:00:00 2001 From: Ben Skeggs Date: Wed, 24 Nov 2010 10:49:02 +1000 Subject: drm/nvc0: implement pgraph engine hooks Signed-off-by: Ben Skeggs --- drivers/gpu/drm/nouveau/Makefile | 2 +- drivers/gpu/drm/nouveau/nouveau_bo.c | 2 +- drivers/gpu/drm/nouveau/nouveau_drv.h | 1 + drivers/gpu/drm/nouveau/nvc0_graph.c | 709 +++++++- drivers/gpu/drm/nouveau/nvc0_graph.h | 66 + drivers/gpu/drm/nouveau/nvc0_grctx.c | 2854 +++++++++++++++++++++++++++++++++ 6 files changed, 3631 insertions(+), 3 deletions(-) create mode 100644 drivers/gpu/drm/nouveau/nvc0_graph.h create mode 100644 drivers/gpu/drm/nouveau/nvc0_grctx.c diff --git a/drivers/gpu/drm/nouveau/Makefile b/drivers/gpu/drm/nouveau/Makefile index d9d22ffff81e..141771beb8e6 100644 --- a/drivers/gpu/drm/nouveau/Makefile +++ b/drivers/gpu/drm/nouveau/Makefile @@ -18,7 +18,7 @@ nouveau-y := nouveau_drv.o nouveau_state.o nouveau_channel.o nouveau_mem.o \ nv04_fifo.o nv10_fifo.o nv40_fifo.o nv50_fifo.o nvc0_fifo.o \ nv04_graph.o nv10_graph.o nv20_graph.o \ nv40_graph.o nv50_graph.o nvc0_graph.o \ - nv40_grctx.o nv50_grctx.o \ + nv40_grctx.o nv50_grctx.o nvc0_grctx.o \ nv84_crypt.o \ nv04_instmem.o nv50_instmem.o nvc0_instmem.o \ nv50_evo.o nv50_crtc.o nv50_dac.o nv50_sor.o \ diff --git a/drivers/gpu/drm/nouveau/nouveau_bo.c b/drivers/gpu/drm/nouveau/nouveau_bo.c index 6f3096a50297..5dc639e0c969 100644 --- a/drivers/gpu/drm/nouveau/nouveau_bo.c +++ b/drivers/gpu/drm/nouveau/nouveau_bo.c @@ -839,7 +839,7 @@ nouveau_bo_move(struct ttm_buffer_object *bo, bool evict, bool intr, } /* Software copy if the card isn't up and running yet. */ - if (!dev_priv->channel) { + if (!dev_priv->channel || dev_priv->card_type == NV_C0) { ret = ttm_bo_move_memcpy(bo, evict, no_wait_reserve, no_wait_gpu, new_mem); goto out; } diff --git a/drivers/gpu/drm/nouveau/nouveau_drv.h b/drivers/gpu/drm/nouveau/nouveau_drv.h index 6a2b3359f368..e81575687354 100644 --- a/drivers/gpu/drm/nouveau/nouveau_drv.h +++ b/drivers/gpu/drm/nouveau/nouveau_drv.h @@ -365,6 +365,7 @@ struct nouveau_pgraph_engine { bool accel_blocked; bool registered; int grctx_size; + void *priv; /* NV2x/NV3x context table (0x400780) */ struct nouveau_gpuobj *ctx_table; diff --git a/drivers/gpu/drm/nouveau/nvc0_graph.c b/drivers/gpu/drm/nouveau/nvc0_graph.c index 717a5177a8d8..cf2f6aa920b4 100644 --- a/drivers/gpu/drm/nouveau/nvc0_graph.c +++ b/drivers/gpu/drm/nouveau/nvc0_graph.c @@ -22,9 +22,16 @@ * Authors: Ben Skeggs */ +#include + #include "drmP.h" #include "nouveau_drv.h" +#include "nouveau_mm.h" +#include "nvc0_graph.h" + +static void nvc0_graph_isr(struct drm_device *); +static int nvc0_graph_unload_context_to(struct drm_device *dev, u64 chan); void nvc0_graph_fifo_access(struct drm_device *dev, bool enabled) @@ -37,39 +44,739 @@ nvc0_graph_channel(struct drm_device *dev) return NULL; } +static int +nvc0_graph_construct_context(struct nouveau_channel *chan) +{ + struct drm_nouveau_private *dev_priv = chan->dev->dev_private; + struct nvc0_graph_priv *priv = dev_priv->engine.graph.priv; + struct nvc0_graph_chan *grch = chan->pgraph_ctx; + struct drm_device *dev = chan->dev; + int ret, i; + u32 *ctx; + + ctx = kmalloc(priv->grctx_size, GFP_KERNEL); + if (!ctx) + return -ENOMEM; + + nvc0_graph_load_context(chan); + + nv_wo32(grch->grctx, 0x1c, 1); + nv_wo32(grch->grctx, 0x20, 0); + nv_wo32(grch->grctx, 0x28, 0); + nv_wo32(grch->grctx, 0x2c, 0); + dev_priv->engine.instmem.flush(dev); + + ret = nvc0_grctx_generate(chan); + if (ret) { + kfree(ctx); + return ret; + } + + ret = nvc0_graph_unload_context_to(dev, chan->ramin->vinst); + if (ret) { + kfree(ctx); + return ret; + } + + for (i = 0; i < priv->grctx_size; i += 4) + ctx[i / 4] = nv_ro32(grch->grctx, i); + + priv->grctx_vals = ctx; + return 0; +} + +static int +nvc0_graph_create_context_mmio_list(struct nouveau_channel *chan) +{ + struct drm_nouveau_private *dev_priv = chan->dev->dev_private; + struct nvc0_graph_priv *priv = dev_priv->engine.graph.priv; + struct nvc0_graph_chan *grch = chan->pgraph_ctx; + struct drm_device *dev = chan->dev; + int i = 0, gpc, tp, ret; + u32 magic; + + ret = nouveau_gpuobj_new(dev, NULL, 0x2000, 256, NVOBJ_FLAG_VM, + &grch->unk408004); + if (ret) + return ret; + + ret = nouveau_gpuobj_new(dev, NULL, 0x8000, 256, NVOBJ_FLAG_VM, + &grch->unk40800c); + if (ret) + return ret; + + ret = nouveau_gpuobj_new(dev, NULL, 384 * 1024, 4096, NVOBJ_FLAG_VM, + &grch->unk418810); + if (ret) + return ret; + + ret = nouveau_gpuobj_new(dev, NULL, 0x1000, 0, NVOBJ_FLAG_VM, + &grch->mmio); + if (ret) + return ret; + + + nv_wo32(grch->mmio, i++ * 4, 0x00408004); + nv_wo32(grch->mmio, i++ * 4, grch->unk408004->vinst >> 8); + nv_wo32(grch->mmio, i++ * 4, 0x00408008); + nv_wo32(grch->mmio, i++ * 4, 0x80000018); + + nv_wo32(grch->mmio, i++ * 4, 0x0040800c); + nv_wo32(grch->mmio, i++ * 4, grch->unk40800c->vinst >> 8); + nv_wo32(grch->mmio, i++ * 4, 0x00408010); + nv_wo32(grch->mmio, i++ * 4, 0x80000000); + + nv_wo32(grch->mmio, i++ * 4, 0x00418810); + nv_wo32(grch->mmio, i++ * 4, 0x80000000 | grch->unk418810->vinst >> 12); + nv_wo32(grch->mmio, i++ * 4, 0x00419848); + nv_wo32(grch->mmio, i++ * 4, 0x10000000 | grch->unk418810->vinst >> 12); + + nv_wo32(grch->mmio, i++ * 4, 0x00419004); + nv_wo32(grch->mmio, i++ * 4, grch->unk40800c->vinst >> 8); + nv_wo32(grch->mmio, i++ * 4, 0x00419008); + nv_wo32(grch->mmio, i++ * 4, 0x00000000); + + nv_wo32(grch->mmio, i++ * 4, 0x00418808); + nv_wo32(grch->mmio, i++ * 4, grch->unk408004->vinst >> 8); + nv_wo32(grch->mmio, i++ * 4, 0x0041880c); + nv_wo32(grch->mmio, i++ * 4, 0x80000018); + + magic = 0x02180000; + nv_wo32(grch->mmio, i++ * 4, 0x00405830); + nv_wo32(grch->mmio, i++ * 4, magic); + for (gpc = 0; gpc < priv->gpc_nr; gpc++) { + for (tp = 0; tp < priv->tp_nr[gpc]; tp++, magic += 0x02fc) { + u32 reg = 0x504520 + (gpc * 0x8000) + (tp * 0x0800); + nv_wo32(grch->mmio, i++ * 4, reg); + nv_wo32(grch->mmio, i++ * 4, magic); + } + } + + grch->mmio_nr = i / 2; + return 0; +} + int nvc0_graph_create_context(struct nouveau_channel *chan) { + struct drm_nouveau_private *dev_priv = chan->dev->dev_private; + struct nouveau_instmem_engine *pinstmem = &dev_priv->engine.instmem; + struct nouveau_pgraph_engine *pgraph = &dev_priv->engine.graph; + struct nvc0_graph_priv *priv = pgraph->priv; + struct nvc0_graph_chan *grch; + struct drm_device *dev = chan->dev; + struct nouveau_gpuobj *grctx; + int ret, i; + + chan->pgraph_ctx = kzalloc(sizeof(*grch), GFP_KERNEL); + if (!chan->pgraph_ctx) + return -ENOMEM; + grch = chan->pgraph_ctx; + + ret = nouveau_gpuobj_new(dev, NULL, priv->grctx_size, 256, + NVOBJ_FLAG_VM | NVOBJ_FLAG_ZERO_ALLOC, + &grch->grctx); + if (ret) + goto error; + chan->ramin_grctx = grch->grctx; + grctx = grch->grctx; + + ret = nvc0_graph_create_context_mmio_list(chan); + if (ret) + goto error; + + nv_wo32(chan->ramin, 0x0210, lower_32_bits(grctx->vinst) | 4); + nv_wo32(chan->ramin, 0x0214, upper_32_bits(grctx->vinst)); + pinstmem->flush(dev); + + if (!priv->grctx_vals) { + ret = nvc0_graph_construct_context(chan); + if (ret) + goto error; + } + + for (i = 0; i < priv->grctx_size; i += 4) + nv_wo32(grctx, i, priv->grctx_vals[i / 4]); + + nv_wo32(grctx, 0xf4, 0); + nv_wo32(grctx, 0xf8, 0); + nv_wo32(grctx, 0x10, grch->mmio_nr); + nv_wo32(grctx, 0x14, lower_32_bits(grch->mmio->vinst)); + nv_wo32(grctx, 0x18, upper_32_bits(grch->mmio->vinst)); + nv_wo32(grctx, 0x1c, 1); + nv_wo32(grctx, 0x20, 0); + nv_wo32(grctx, 0x28, 0); + nv_wo32(grctx, 0x2c, 0); + pinstmem->flush(dev); return 0; + +error: + pgraph->destroy_context(chan); + return ret; } void nvc0_graph_destroy_context(struct nouveau_channel *chan) { + struct nvc0_graph_chan *grch; + + grch = chan->pgraph_ctx; + chan->pgraph_ctx = NULL; + if (!grch) + return; + + nouveau_gpuobj_ref(NULL, &grch->mmio); + nouveau_gpuobj_ref(NULL, &grch->unk418810); + nouveau_gpuobj_ref(NULL, &grch->unk40800c); + nouveau_gpuobj_ref(NULL, &grch->unk408004); + nouveau_gpuobj_ref(NULL, &grch->grctx); + chan->ramin_grctx = NULL; } int nvc0_graph_load_context(struct nouveau_channel *chan) { + struct drm_device *dev = chan->dev; + + nv_wr32(dev, 0x409840, 0x00000030); + nv_wr32(dev, 0x409500, 0x80000000 | chan->ramin->vinst >> 12); + nv_wr32(dev, 0x409504, 0x00000003); + if (!nv_wait(dev, 0x409800, 0x00000010, 0x00000010)) + NV_ERROR(dev, "PGRAPH: load_ctx timeout\n"); + + printk(KERN_ERR "load_ctx 0x%08x\n", nv_rd32(dev, 0x409b00)); + return 0; +} + +static int +nvc0_graph_unload_context_to(struct drm_device *dev, u64 chan) +{ + nv_wr32(dev, 0x409840, 0x00000003); + nv_wr32(dev, 0x409500, 0x80000000 | chan >> 12); + nv_wr32(dev, 0x409504, 0x00000009); + if (!nv_wait(dev, 0x409800, 0x00000001, 0x00000000)) { + NV_ERROR(dev, "PGRAPH: unload_ctx timeout\n"); + return -EBUSY; + } + return 0; } int nvc0_graph_unload_context(struct drm_device *dev) { - return 0; + u64 inst = (u64)(nv_rd32(dev, 0x409b00) & 0x0fffffff) << 12; + return nvc0_graph_unload_context_to(dev, inst); +} + +static void +nvc0_graph_destroy(struct drm_device *dev) +{ + struct drm_nouveau_private *dev_priv = dev->dev_private; + struct nouveau_pgraph_engine *pgraph = &dev_priv->engine.graph; + struct nvc0_graph_priv *priv; + + priv = pgraph->priv; + if (!priv) + return; + + nouveau_irq_unregister(dev, 12); + + nouveau_gpuobj_ref(NULL, &priv->unk4188b8); + nouveau_gpuobj_ref(NULL, &priv->unk4188b4); + + if (priv->grctx_vals) + kfree(priv->grctx_vals); + kfree(priv); } void nvc0_graph_takedown(struct drm_device *dev) { + nvc0_graph_destroy(dev); +} + +static int +nvc0_graph_create(struct drm_device *dev) +{ + struct drm_nouveau_private *dev_priv = dev->dev_private; + struct nouveau_pgraph_engine *pgraph = &dev_priv->engine.graph; + struct nvc0_graph_priv *priv; + int ret, gpc, i; + + priv = kzalloc(sizeof(*priv), GFP_KERNEL); + if (!priv) + return -ENOMEM; + pgraph->priv = priv; + + ret = nouveau_gpuobj_new(dev, NULL, 0x1000, 256, 0, &priv->unk4188b4); + if (ret) + goto error; + + ret = nouveau_gpuobj_new(dev, NULL, 0x1000, 256, 0, &priv->unk4188b8); + if (ret) + goto error; + + for (i = 0; i < 0x1000; i += 4) { + nv_wo32(priv->unk4188b4, i, 0x00000010); + nv_wo32(priv->unk4188b8, i, 0x00000010); + } + + priv->gpc_nr = nv_rd32(dev, 0x409604) & 0x0000001f; + priv->rop_nr = (nv_rd32(dev, 0x409604) & 0x001f0000) >> 16; + for (gpc = 0; gpc < priv->gpc_nr; gpc++) { + priv->tp_nr[gpc] = nv_rd32(dev, GPC_UNIT(gpc, 0x2608)); + priv->tp_total += priv->tp_nr[gpc]; + } + + /*XXX: these need figuring out... */ + switch (dev_priv->chipset) { + case 0xc0: + if (priv->tp_total == 11) { /* 465, 3/4/4/0, 4 */ + priv->magic_not_rop_nr = 0x07; + priv->magic419bd0 = 0x0a360000; + priv->magic419be4 = 0x04c33a54; + /* filled values up to tp_total, the rest 0 */ + priv->magicgpc980[0] = 0x22111000; + priv->magicgpc980[1] = 0x00000233; + priv->magicgpc980[2] = 0x00000000; + priv->magicgpc980[3] = 0x00000000; + priv->magicgpc918 = 0x000ba2e9; + } else + if (priv->tp_total == 14) { /* 470, 3/3/4/4, 5 */ + priv->magic_not_rop_nr = 0x05; + priv->magic419bd0 = 0x043c0000; + priv->magic419be4 = 0x09041208; + priv->magicgpc980[0] = 0x11110000; + priv->magicgpc980[1] = 0x00233222; + priv->magicgpc980[2] = 0x00000000; + priv->magicgpc980[3] = 0x00000000; + priv->magicgpc918 = 0x00092493; + } else + if (priv->tp_total == 15) { /* 480, 3/4/4/4, 6 */ + priv->magic_not_rop_nr = 0x06; + priv->magic419bd0 = 0x023e0000; + priv->magic419be4 = 0x10414104; + priv->magicgpc980[0] = 0x11110000; + priv->magicgpc980[1] = 0x03332222; + priv->magicgpc980[2] = 0x00000000; + priv->magicgpc980[3] = 0x00000000; + priv->magicgpc918 = 0x00088889; + } + break; + case 0xc3: /* 450, 4/0/0/0, 2 */ + priv->magic_not_rop_nr = 0x03; + priv->magic419bd0 = 0x00500000; + priv->magic419be4 = 0x00000000; + priv->magicgpc980[0] = 0x00003210; + priv->magicgpc980[1] = 0x00000000; + priv->magicgpc980[2] = 0x00000000; + priv->magicgpc980[3] = 0x00000000; + priv->magicgpc918 = 0x00200000; + break; + case 0xc4: /* 460, 3/4/0/0, 4 */ + priv->magic_not_rop_nr = 0x01; + priv->magic419bd0 = 0x045c0000; + priv->magic419be4 = 0x09041208; + priv->magicgpc980[0] = 0x02321100; + priv->magicgpc980[1] = 0x00000000; + priv->magicgpc980[2] = 0x00000000; + priv->magicgpc980[3] = 0x00000000; + priv->magicgpc918 = 0x00124925; + break; + } + + if (!priv->magic419bd0) { + NV_ERROR(dev, "PGRAPH: unknown config: %d/%d/%d/%d, %d\n", + priv->tp_nr[0], priv->tp_nr[1], priv->tp_nr[2], + priv->tp_nr[3], priv->rop_nr); + /* use 0xc3's values... */ + priv->magic_not_rop_nr = 0x03; + priv->magic419bd0 = 0x00500000; + priv->magic419be4 = 0x00000000; + priv->magicgpc980[0] = 0x00003210; + priv->magicgpc980[1] = 0x00000000; + priv->magicgpc980[2] = 0x00000000; + priv->magicgpc980[3] = 0x00000000; + priv->magicgpc918 = 0x00200000; + } + + nouveau_irq_register(dev, 12, nvc0_graph_isr); + NVOBJ_CLASS(dev, 0x902d, GR); /* 2D */ + NVOBJ_CLASS(dev, 0x9039, GR); /* M2MF */ + NVOBJ_CLASS(dev, 0x9097, GR); /* 3D */ + NVOBJ_CLASS(dev, 0x90c0, GR); /* COMPUTE */ + return 0; + +error: + nvc0_graph_destroy(dev); + return ret; +} + +static void +nvc0_graph_init_obj418880(struct drm_device *dev) +{ + struct drm_nouveau_private *dev_priv = dev->dev_private; + struct nouveau_pgraph_engine *pgraph = &dev_priv->engine.graph; + struct nvc0_graph_priv *priv = pgraph->priv; + int i; + + nv_wr32(dev, GPC_BCAST(0x0880), 0x00000000); + nv_wr32(dev, GPC_BCAST(0x08a4), 0x00000000); + for (i = 0; i < 4; i++) + nv_wr32(dev, GPC_BCAST(0x0888) + (i * 4), 0x00000000); + nv_wr32(dev, GPC_BCAST(0x08b4), priv->unk4188b4->vinst >> 8); + nv_wr32(dev, GPC_BCAST(0x08b8), priv->unk4188b8->vinst >> 8); +} + +static void +nvc0_graph_init_regs(struct drm_device *dev) +{ + nv_wr32(dev, 0x400080, 0x003083c2); + nv_wr32(dev, 0x400088, 0x00006fe7); + nv_wr32(dev, 0x40008c, 0x00000000); + nv_wr32(dev, 0x400090, 0x00000030); + nv_wr32(dev, 0x40013c, 0x013901f7); + nv_wr32(dev, 0x400140, 0x00000100); + nv_wr32(dev, 0x400144, 0x00000000); + nv_wr32(dev, 0x400148, 0x00000110); + nv_wr32(dev, 0x400138, 0x00000000); + nv_wr32(dev, 0x400130, 0x00000000); + nv_wr32(dev, 0x400134, 0x00000000); + nv_wr32(dev, 0x400124, 0x00000002); +} + +static void +nvc0_graph_init_gpc_0(struct drm_device *dev) +{ + struct drm_nouveau_private *dev_priv = dev->dev_private; + struct nvc0_graph_priv *priv = dev_priv->engine.graph.priv; + int gpc; + + // TP ROP UNKVAL(magic_not_rop_nr) + // 450: 4/0/0/0 2 3 + // 460: 3/4/0/0 4 1 + // 465: 3/4/4/0 4 7 + // 470: 3/3/4/4 5 5 + // 480: 3/4/4/4 6 6 + + // magicgpc918 + // 450: 00200000 00000000001000000000000000000000 + // 460: 00124925 00000000000100100100100100100101 + // 465: 000ba2e9 00000000000010111010001011101001 + // 470: 00092493 00000000000010010010010010010011 + // 480: 00088889 00000000000010001000100010001001 + + /* filled values up to tp_total, remainder 0 */ + // 450: 00003210 00000000 00000000 00000000 + // 460: 02321100 00000000 00000000 00000000 + // 465: 22111000 00000233 00000000 00000000 + // 470: 11110000 00233222 00000000 00000000 + // 480: 11110000 03332222 00000000 00000000 + + nv_wr32(dev, GPC_BCAST(0x0980), priv->magicgpc980[0]); + nv_wr32(dev, GPC_BCAST(0x0984), priv->magicgpc980[1]); + nv_wr32(dev, GPC_BCAST(0x0988), priv->magicgpc980[2]); + nv_wr32(dev, GPC_BCAST(0x098c), priv->magicgpc980[3]); + + for (gpc = 0; gpc < priv->gpc_nr; gpc++) { + nv_wr32(dev, GPC_UNIT(gpc, 0x0914), priv->magic_not_rop_nr << 8 | + priv->tp_nr[gpc]); + nv_wr32(dev, GPC_UNIT(gpc, 0x0910), 0x00040000 | priv->tp_total); + nv_wr32(dev, GPC_UNIT(gpc, 0x0918), priv->magicgpc918); + } + + nv_wr32(dev, GPC_BCAST(0x1bd4), priv->magicgpc918); + nv_wr32(dev, GPC_BCAST(0x08ac), priv->rop_nr); +} + +static void +nvc0_graph_init_units(struct drm_device *dev) +{ + nv_wr32(dev, 0x409c24, 0x000f0000); + nv_wr32(dev, 0x404000, 0xc0000000); /* DISPATCH */ + nv_wr32(dev, 0x404600, 0xc0000000); /* M2MF */ + nv_wr32(dev, 0x408030, 0xc0000000); + nv_wr32(dev, 0x40601c, 0xc0000000); + nv_wr32(dev, 0x404490, 0xc0000000); /* MACRO */ + nv_wr32(dev, 0x406018, 0xc0000000); + nv_wr32(dev, 0x405840, 0xc0000000); + nv_wr32(dev, 0x405844, 0x00ffffff); + nv_mask(dev, 0x419cc0, 0x00000008, 0x00000008); + nv_mask(dev, 0x419eb4, 0x00001000, 0x00001000); +} + +static void +nvc0_graph_init_gpc_1(struct drm_device *dev) +{ + struct drm_nouveau_private *dev_priv = dev->dev_private; + struct nvc0_graph_priv *priv = dev_priv->engine.graph.priv; + int gpc, tp; + + for (gpc = 0; gpc < priv->gpc_nr; gpc++) { + nv_wr32(dev, GPC_UNIT(gpc, 0x0420), 0xc0000000); + nv_wr32(dev, GPC_UNIT(gpc, 0x0900), 0xc0000000); + nv_wr32(dev, GPC_UNIT(gpc, 0x1028), 0xc0000000); + nv_wr32(dev, GPC_UNIT(gpc, 0x0824), 0xc0000000); + for (tp = 0; tp < priv->tp_nr[gpc]; tp++) { + nv_wr32(dev, TP_UNIT(gpc, tp, 0x508), 0xffffffff); + nv_wr32(dev, TP_UNIT(gpc, tp, 0x50c), 0xffffffff); + nv_wr32(dev, TP_UNIT(gpc, tp, 0x224), 0xc0000000); + nv_wr32(dev, TP_UNIT(gpc, tp, 0x48c), 0xc0000000); + nv_wr32(dev, TP_UNIT(gpc, tp, 0x084), 0xc0000000); + nv_wr32(dev, TP_UNIT(gpc, tp, 0xe44), 0x001ffffe); + nv_wr32(dev, TP_UNIT(gpc, tp, 0xe4c), 0x0000000f); + } + nv_wr32(dev, GPC_UNIT(gpc, 0x2c90), 0xffffffff); + nv_wr32(dev, GPC_UNIT(gpc, 0x2c94), 0xffffffff); + } +} + +static void +nvc0_graph_init_rop(struct drm_device *dev) +{ + struct drm_nouveau_private *dev_priv = dev->dev_private; + struct nvc0_graph_priv *priv = dev_priv->engine.graph.priv; + int rop; + + for (rop = 0; rop < priv->rop_nr; rop++) { + nv_wr32(dev, ROP_UNIT(rop, 0x144), 0xc0000000); + nv_wr32(dev, ROP_UNIT(rop, 0x070), 0xc0000000); + nv_wr32(dev, ROP_UNIT(rop, 0x204), 0xffffffff); + nv_wr32(dev, ROP_UNIT(rop, 0x208), 0xffffffff); + } +} + +static int +nvc0_fuc_load_fw(struct drm_device *dev, u32 fuc_base, + const char *code_fw, const char *data_fw) +{ + const struct firmware *fw; + char name[32]; + int ret, i; + + snprintf(name, sizeof(name), "nouveau/%s", data_fw); + ret = request_firmware(&fw, name, &dev->pdev->dev); + if (ret) { + NV_ERROR(dev, "failed to load %s\n", data_fw); + return ret; + } + + nv_wr32(dev, fuc_base + 0x01c0, 0x01000000); + for (i = 0; i < fw->size / 4; i++) + nv_wr32(dev, fuc_base + 0x01c4, ((u32 *)fw->data)[i]); + release_firmware(fw); + + snprintf(name, sizeof(name), "nouveau/%s", code_fw); + ret = request_firmware(&fw, name, &dev->pdev->dev); + if (ret) { + NV_ERROR(dev, "failed to load %s\n", code_fw); + return ret; + } + + nv_wr32(dev, fuc_base + 0x0180, 0x01000000); + for (i = 0; i < fw->size / 4; i++) { + if ((i & 0x3f) == 0) + nv_wr32(dev, fuc_base + 0x0188, i >> 6); + nv_wr32(dev, fuc_base + 0x0184, ((u32 *)fw->data)[i]); + } + release_firmware(fw); + + return 0; +} + +static int +nvc0_graph_init_ctxctl(struct drm_device *dev) +{ + struct drm_nouveau_private *dev_priv = dev->dev_private; + struct nvc0_graph_priv *priv = dev_priv->engine.graph.priv; + u32 r000260; + int ret; + + /* load fuc microcode */ + r000260 = nv_mask(dev, 0x000260, 0x00000001, 0x00000000); + ret = nvc0_fuc_load_fw(dev, 0x409000, "fuc409c", "fuc409d"); + if (ret == 0) + nvc0_fuc_load_fw(dev, 0x41a000, "fuc41ac", "fuc41ad"); + nv_wr32(dev, 0x000260, r000260); + + if (ret) + return ret; + + /* start both of them running */ + nv_wr32(dev, 0x409840, 0xffffffff); + nv_wr32(dev, 0x41a10c, 0x00000000); + nv_wr32(dev, 0x40910c, 0x00000000); + nv_wr32(dev, 0x41a100, 0x00000002); + nv_wr32(dev, 0x409100, 0x00000002); + if (!nv_wait(dev, 0x409800, 0x00000001, 0x00000001)) + NV_INFO(dev, "0x409800 wait failed\n"); + + nv_wr32(dev, 0x409840, 0xffffffff); + nv_wr32(dev, 0x409500, 0x7fffffff); + nv_wr32(dev, 0x409504, 0x00000021); + + nv_wr32(dev, 0x409840, 0xffffffff); + nv_wr32(dev, 0x409500, 0x00000000); + nv_wr32(dev, 0x409504, 0x00000010); + if (!nv_wait_ne(dev, 0x409800, 0xffffffff, 0x00000000)) { + NV_ERROR(dev, "fuc09 req 0x10 timeout\n"); + return -EBUSY; + } + priv->grctx_size = nv_rd32(dev, 0x409800); + + nv_wr32(dev, 0x409840, 0xffffffff); + nv_wr32(dev, 0x409500, 0x00000000); + nv_wr32(dev, 0x409504, 0x00000016); + if (!nv_wait_ne(dev, 0x409800, 0xffffffff, 0x00000000)) { + NV_ERROR(dev, "fuc09 req 0x16 timeout\n"); + return -EBUSY; + } + + nv_wr32(dev, 0x409840, 0xffffffff); + nv_wr32(dev, 0x409500, 0x00000000); + nv_wr32(dev, 0x409504, 0x00000025); + if (!nv_wait_ne(dev, 0x409800, 0xffffffff, 0x00000000)) { + NV_ERROR(dev, "fuc09 req 0x25 timeout\n"); + return -EBUSY; + } + + return 0; } int nvc0_graph_init(struct drm_device *dev) { struct drm_nouveau_private *dev_priv = dev->dev_private; + struct nouveau_pgraph_engine *pgraph = &dev_priv->engine.graph; + struct nvc0_graph_priv *priv; + int ret; + dev_priv->engine.graph.accel_blocked = true; + + switch (dev_priv->chipset) { + case 0xc0: + case 0xc3: + case 0xc4: + break; + default: + NV_ERROR(dev, "PGRAPH: unsupported chipset, please report!\n"); + if (nouveau_noaccel != 0) + return 0; + break; + } + + nv_mask(dev, 0x000200, 0x18001000, 0x00000000); + nv_mask(dev, 0x000200, 0x18001000, 0x18001000); + + if (!pgraph->priv) { + ret = nvc0_graph_create(dev); + if (ret) + return ret; + } + priv = pgraph->priv; + + nvc0_graph_init_obj418880(dev); + nvc0_graph_init_regs(dev); + //nvc0_graph_init_unitplemented_magics(dev); + nvc0_graph_init_gpc_0(dev); + //nvc0_graph_init_unitplemented_c242(dev); + + nv_wr32(dev, 0x400500, 0x00010001); + nv_wr32(dev, 0x400100, 0xffffffff); + nv_wr32(dev, 0x40013c, 0xffffffff); + + nvc0_graph_init_units(dev); + nvc0_graph_init_gpc_1(dev); + nvc0_graph_init_rop(dev); + + nv_wr32(dev, 0x400108, 0xffffffff); + nv_wr32(dev, 0x400138, 0xffffffff); + nv_wr32(dev, 0x400118, 0xffffffff); + nv_wr32(dev, 0x400130, 0xffffffff); + nv_wr32(dev, 0x40011c, 0xffffffff); + nv_wr32(dev, 0x400134, 0xffffffff); + nv_wr32(dev, 0x400054, 0x34ce3464); + + ret = nvc0_graph_init_ctxctl(dev); + if (ret) + return ret; + + dev_priv->engine.graph.accel_blocked = false; return 0; } +static struct nouveau_enum nvc0_graph_data_error[] = { + { 5, "INVALID_ENUM" }, + {} +}; + +static int +nvc0_graph_isr_chid(struct drm_device *dev, u64 inst) +{ + struct drm_nouveau_private *dev_priv = dev->dev_private; + struct nouveau_channel *chan; + unsigned long flags; + int i; + + spin_lock_irqsave(&dev_priv->channels.lock, flags); + for (i = 0; i < dev_priv->engine.fifo.channels; i++) { + chan = dev_priv->channels.ptr[i]; + if (!chan || !chan->ramin) + continue; + + if (inst == chan->ramin->vinst) + break; + } + spin_unlock_irqrestore(&dev_priv->channels.lock, flags); + return i; +} + +static void +nvc0_graph_isr(struct drm_device *dev) +{ + u64 inst = (u64)(nv_rd32(dev, 0x409b00) & 0x0fffffff) << 12; + u32 chid = nvc0_graph_isr_chid(dev, inst); + u32 stat = nv_rd32(dev, 0x400100); + u32 addr = nv_rd32(dev, 0x400704); + u32 mthd = (addr & 0x00003ffc); + u32 subc = (addr & 0x00070000) >> 16; + u32 data = nv_rd32(dev, 0x400708); + u32 code = nv_rd32(dev, 0x400110); + u32 class = nv_rd32(dev, 0x404200 + (subc * 4)); + + if (stat & 0x00000010) { + NV_INFO(dev, "PGRAPH: ILLEGAL_MTHD ch %d [0x%010llx] subc %d " + "class 0x%04x mthd 0x%04x data 0x%08x\n", + chid, inst, subc, class, mthd, data); + nv_wr32(dev, 0x400100, 0x00000010); + stat &= ~0x00000010; + } + + if (stat & 0x00100000) { + NV_INFO(dev, "PGRAPH: DATA_ERROR ["); + nouveau_enum_print(nvc0_graph_data_error, code); + printk("] ch %d [0x%010llx] subc %d class 0x%04x " + "mthd 0x%04x data 0x%08x\n", + chid, inst, subc, class, mthd, data); + nv_wr32(dev, 0x400100, 0x00100000); + stat &= ~0x00100000; + } + + if (stat & 0x00080000) { + u32 ustat = nv_rd32(dev, 0x409c18); + + NV_INFO(dev, "PGRAPH: CTXCTRL ustat 0x%08x\n", ustat); + + nv_wr32(dev, 0x409c20, ustat); + nv_wr32(dev, 0x400100, 0x00080000); + stat &= ~0x00080000; + } + + if (stat) { + NV_INFO(dev, "PGRAPH: unknown stat 0x%08x\n", stat); + nv_wr32(dev, 0x400100, stat); + } + + nv_wr32(dev, 0x400500, 0x00010001); +} diff --git a/drivers/gpu/drm/nouveau/nvc0_graph.h b/drivers/gpu/drm/nouveau/nvc0_graph.h new file mode 100644 index 000000000000..1e1f24f3fd34 --- /dev/null +++ b/drivers/gpu/drm/nouveau/nvc0_graph.h @@ -0,0 +1,66 @@ +/* + * Copyright 2010 Red Hat Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: Ben Skeggs + */ + +#ifndef __NVC0_GRAPH_H__ +#define __NVC0_GRAPH_H__ + +#define GPC_MAX 4 +#define TP_MAX 32 + +#define ROP_BCAST(r) (0x408800 + (r)) +#define ROP_UNIT(u,r) (0x410000 + (u) * 0x400 + (r)) +#define GPC_BCAST(r) (0x418000 + (r)) +#define GPC_UNIT(t,r) (0x500000 + (t) * 0x8000 + (r)) +#define TP_UNIT(t,m,r) (0x504000 + (t) * 0x8000 + (m) * 0x800 + (r)) + +struct nvc0_graph_priv { + u8 gpc_nr; + u8 rop_nr; + u8 tp_nr[GPC_MAX]; + u8 tp_total; + + u32 grctx_size; + u32 *grctx_vals; + struct nouveau_gpuobj *unk4188b4; + struct nouveau_gpuobj *unk4188b8; + + u8 magic_not_rop_nr; + u32 magic419bd0; + u32 magic419be4; + u32 magicgpc980[4]; + u32 magicgpc918; +}; + +struct nvc0_graph_chan { + struct nouveau_gpuobj *grctx; + struct nouveau_gpuobj *unk408004; // 0x418810 too + struct nouveau_gpuobj *unk40800c; // 0x419004 too + struct nouveau_gpuobj *unk418810; // 0x419848 too + struct nouveau_gpuobj *mmio; + int mmio_nr; +}; + +int nvc0_grctx_generate(struct nouveau_channel *); + +#endif diff --git a/drivers/gpu/drm/nouveau/nvc0_grctx.c b/drivers/gpu/drm/nouveau/nvc0_grctx.c new file mode 100644 index 000000000000..88fa6211ac19 --- /dev/null +++ b/drivers/gpu/drm/nouveau/nvc0_grctx.c @@ -0,0 +1,2854 @@ +/* + * Copyright 2010 Red Hat Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: Ben Skeggs + */ + +#include "drmP.h" +#include "nouveau_drv.h" +#include "nouveau_mm.h" +#include "nvc0_graph.h" + +static void +nv_icmd(struct drm_device *dev, u32 icmd, u32 data) +{ + nv_wr32(dev, 0x400204, data); + nv_wr32(dev, 0x400200, icmd); + while (nv_rd32(dev, 0x400700) & 2) {} +} + +static void +nv_mthd(struct drm_device *dev, u32 class, u32 mthd, u32 data) +{ + nv_wr32(dev, 0x40448c, data); + nv_wr32(dev, 0x404488, 0x80000000 | (mthd << 14) | class); +} + +static void +nvc0_grctx_generate_9097(struct drm_device *dev) +{ + nv_mthd(dev, 0x9097, 0x0800, 0x00000000); + nv_mthd(dev, 0x9097, 0x0840, 0x00000000); + nv_mthd(dev, 0x9097, 0x0880, 0x00000000); + nv_mthd(dev, 0x9097, 0x08c0, 0x00000000); + nv_mthd(dev, 0x9097, 0x0900, 0x00000000); + nv_mthd(dev, 0x9097, 0x0940, 0x00000000); + nv_mthd(dev, 0x9097, 0x0980, 0x00000000); + nv_mthd(dev, 0x9097, 0x09c0, 0x00000000); + nv_mthd(dev, 0x9097, 0x0804, 0x00000000); + nv_mthd(dev, 0x9097, 0x0844, 0x00000000); + nv_mthd(dev, 0x9097, 0x0884, 0x00000000); + nv_mthd(dev, 0x9097, 0x08c4, 0x00000000); + nv_mthd(dev, 0x9097, 0x0904, 0x00000000); + nv_mthd(dev, 0x9097, 0x0944, 0x00000000); + nv_mthd(dev, 0x9097, 0x0984, 0x00000000); + nv_mthd(dev, 0x9097, 0x09c4, 0x00000000); + nv_mthd(dev, 0x9097, 0x0808, 0x00000400); + nv_mthd(dev, 0x9097, 0x0848, 0x00000400); + nv_mthd(dev, 0x9097, 0x0888, 0x00000400); + nv_mthd(dev, 0x9097, 0x08c8, 0x00000400); + nv_mthd(dev, 0x9097, 0x0908, 0x00000400); + nv_mthd(dev, 0x9097, 0x0948, 0x00000400); + nv_mthd(dev, 0x9097, 0x0988, 0x00000400); + nv_mthd(dev, 0x9097, 0x09c8, 0x00000400); + nv_mthd(dev, 0x9097, 0x080c, 0x00000300); + nv_mthd(dev, 0x9097, 0x084c, 0x00000300); + nv_mthd(dev, 0x9097, 0x088c, 0x00000300); + nv_mthd(dev, 0x9097, 0x08cc, 0x00000300); + nv_mthd(dev, 0x9097, 0x090c, 0x00000300); + nv_mthd(dev, 0x9097, 0x094c, 0x00000300); + nv_mthd(dev, 0x9097, 0x098c, 0x00000300); + nv_mthd(dev, 0x9097, 0x09cc, 0x00000300); + nv_mthd(dev, 0x9097, 0x0810, 0x000000cf); + nv_mthd(dev, 0x9097, 0x0850, 0x00000000); + nv_mthd(dev, 0x9097, 0x0890, 0x00000000); + nv_mthd(dev, 0x9097, 0x08d0, 0x00000000); + nv_mthd(dev, 0x9097, 0x0910, 0x00000000); + nv_mthd(dev, 0x9097, 0x0950, 0x00000000); + nv_mthd(dev, 0x9097, 0x0990, 0x00000000); + nv_mthd(dev, 0x9097, 0x09d0, 0x00000000); + nv_mthd(dev, 0x9097, 0x0814, 0x00000040); + nv_mthd(dev, 0x9097, 0x0854, 0x00000040); + nv_mthd(dev, 0x9097, 0x0894, 0x00000040); + nv_mthd(dev, 0x9097, 0x08d4, 0x00000040); + nv_mthd(dev, 0x9097, 0x0914, 0x00000040); + nv_mthd(dev, 0x9097, 0x0954, 0x00000040); + nv_mthd(dev, 0x9097, 0x0994, 0x00000040); + nv_mthd(dev, 0x9097, 0x09d4, 0x00000040); + nv_mthd(dev, 0x9097, 0x0818, 0x00000001); + nv_mthd(dev, 0x9097, 0x0858, 0x00000001); + nv_mthd(dev, 0x9097, 0x0898, 0x00000001); + nv_mthd(dev, 0x9097, 0x08d8, 0x00000001); + nv_mthd(dev, 0x9097, 0x0918, 0x00000001); + nv_mthd(dev, 0x9097, 0x0958, 0x00000001); + nv_mthd(dev, 0x9097, 0x0998, 0x00000001); + nv_mthd(dev, 0x9097, 0x09d8, 0x00000001); + nv_mthd(dev, 0x9097, 0x081c, 0x00000000); + nv_mthd(dev, 0x9097, 0x085c, 0x00000000); + nv_mthd(dev, 0x9097, 0x089c, 0x00000000); + nv_mthd(dev, 0x9097, 0x08dc, 0x00000000); + nv_mthd(dev, 0x9097, 0x091c, 0x00000000); + nv_mthd(dev, 0x9097, 0x095c, 0x00000000); + nv_mthd(dev, 0x9097, 0x099c, 0x00000000); + nv_mthd(dev, 0x9097, 0x09dc, 0x00000000); + nv_mthd(dev, 0x9097, 0x0820, 0x00000000); + nv_mthd(dev, 0x9097, 0x0860, 0x00000000); + nv_mthd(dev, 0x9097, 0x08a0, 0x00000000); + nv_mthd(dev, 0x9097, 0x08e0, 0x00000000); + nv_mthd(dev, 0x9097, 0x0920, 0x00000000); + nv_mthd(dev, 0x9097, 0x0960, 0x00000000); + nv_mthd(dev, 0x9097, 0x09a0, 0x00000000); + nv_mthd(dev, 0x9097, 0x09e0, 0x00000000); + nv_mthd(dev, 0x9097, 0x2700, 0x00000000); + nv_mthd(dev, 0x9097, 0x2720, 0x00000000); + nv_mthd(dev, 0x9097, 0x2740, 0x00000000); + nv_mthd(dev, 0x9097, 0x2760, 0x00000000); + nv_mthd(dev, 0x9097, 0x2780, 0x00000000); + nv_mthd(dev, 0x9097, 0x27a0, 0x00000000); + nv_mthd(dev, 0x9097, 0x27c0, 0x00000000); + nv_mthd(dev, 0x9097, 0x27e0, 0x00000000); + nv_mthd(dev, 0x9097, 0x2704, 0x00000000); + nv_mthd(dev, 0x9097, 0x2724, 0x00000000); + nv_mthd(dev, 0x9097, 0x2744, 0x00000000); + nv_mthd(dev, 0x9097, 0x2764, 0x00000000); + nv_mthd(dev, 0x9097, 0x2784, 0x00000000); + nv_mthd(dev, 0x9097, 0x27a4, 0x00000000); + nv_mthd(dev, 0x9097, 0x27c4, 0x00000000); + nv_mthd(dev, 0x9097, 0x27e4, 0x00000000); + nv_mthd(dev, 0x9097, 0x2708, 0x00000000); + nv_mthd(dev, 0x9097, 0x2728, 0x00000000); + nv_mthd(dev, 0x9097, 0x2748, 0x00000000); + nv_mthd(dev, 0x9097, 0x2768, 0x00000000); + nv_mthd(dev, 0x9097, 0x2788, 0x00000000); + nv_mthd(dev, 0x9097, 0x27a8, 0x00000000); + nv_mthd(dev, 0x9097, 0x27c8, 0x00000000); + nv_mthd(dev, 0x9097, 0x27e8, 0x00000000); + nv_mthd(dev, 0x9097, 0x270c, 0x00000000); + nv_mthd(dev, 0x9097, 0x272c, 0x00000000); + nv_mthd(dev, 0x9097, 0x274c, 0x00000000); + nv_mthd(dev, 0x9097, 0x276c, 0x00000000); + nv_mthd(dev, 0x9097, 0x278c, 0x00000000); + nv_mthd(dev, 0x9097, 0x27ac, 0x00000000); + nv_mthd(dev, 0x9097, 0x27cc, 0x00000000); + nv_mthd(dev, 0x9097, 0x27ec, 0x00000000); + nv_mthd(dev, 0x9097, 0x2710, 0x00014000); + nv_mthd(dev, 0x9097, 0x2730, 0x00014000); + nv_mthd(dev, 0x9097, 0x2750, 0x00014000); + nv_mthd(dev, 0x9097, 0x2770, 0x00014000); + nv_mthd(dev, 0x9097, 0x2790, 0x00014000); + nv_mthd(dev, 0x9097, 0x27b0, 0x00014000); + nv_mthd(dev, 0x9097, 0x27d0, 0x00014000); + nv_mthd(dev, 0x9097, 0x27f0, 0x00014000); + nv_mthd(dev, 0x9097, 0x2714, 0x00000040); + nv_mthd(dev, 0x9097, 0x2734, 0x00000040); + nv_mthd(dev, 0x9097, 0x2754, 0x00000040); + nv_mthd(dev, 0x9097, 0x2774, 0x00000040); + nv_mthd(dev, 0x9097, 0x2794, 0x00000040); + nv_mthd(dev, 0x9097, 0x27b4, 0x00000040); + nv_mthd(dev, 0x9097, 0x27d4, 0x00000040); + nv_mthd(dev, 0x9097, 0x27f4, 0x00000040); + nv_mthd(dev, 0x9097, 0x1c00, 0x00000000); + nv_mthd(dev, 0x9097, 0x1c10, 0x00000000); + nv_mthd(dev, 0x9097, 0x1c20, 0x00000000); + nv_mthd(dev, 0x9097, 0x1c30, 0x00000000); + nv_mthd(dev, 0x9097, 0x1c40, 0x00000000); + nv_mthd(dev, 0x9097, 0x1c50, 0x00000000); + nv_mthd(dev, 0x9097, 0x1c60, 0x00000000); + nv_mthd(dev, 0x9097, 0x1c70, 0x00000000); + nv_mthd(dev, 0x9097, 0x1c80, 0x00000000); + nv_mthd(dev, 0x9097, 0x1c90, 0x00000000); + nv_mthd(dev, 0x9097, 0x1ca0, 0x00000000); + nv_mthd(dev, 0x9097, 0x1cb0, 0x00000000); + nv_mthd(dev, 0x9097, 0x1cc0, 0x00000000); + nv_mthd(dev, 0x9097, 0x1cd0, 0x00000000); + nv_mthd(dev, 0x9097, 0x1ce0, 0x00000000); + nv_mthd(dev, 0x9097, 0x1cf0, 0x00000000); + nv_mthd(dev, 0x9097, 0x1c04, 0x00000000); + nv_mthd(dev, 0x9097, 0x1c14, 0x00000000); + nv_mthd(dev, 0x9097, 0x1c24, 0x00000000); + nv_mthd(dev, 0x9097, 0x1c34, 0x00000000); + nv_mthd(dev, 0x9097, 0x1c44, 0x00000000); + nv_mthd(dev, 0x9097, 0x1c54, 0x00000000); + nv_mthd(dev, 0x9097, 0x1c64, 0x00000000); + nv_mthd(dev, 0x9097, 0x1c74, 0x00000000); + nv_mthd(dev, 0x9097, 0x1c84, 0x00000000); + nv_mthd(dev, 0x9097, 0x1c94, 0x00000000); + nv_mthd(dev, 0x9097, 0x1ca4, 0x00000000); + nv_mthd(dev, 0x9097, 0x1cb4, 0x00000000); + nv_mthd(dev, 0x9097, 0x1cc4, 0x00000000); + nv_mthd(dev, 0x9097, 0x1cd4, 0x00000000); + nv_mthd(dev, 0x9097, 0x1ce4, 0x00000000); + nv_mthd(dev, 0x9097, 0x1cf4, 0x00000000); + nv_mthd(dev, 0x9097, 0x1c08, 0x00000000); + nv_mthd(dev, 0x9097, 0x1c18, 0x00000000); + nv_mthd(dev, 0x9097, 0x1c28, 0x00000000); + nv_mthd(dev, 0x9097, 0x1c38, 0x00000000); + nv_mthd(dev, 0x9097, 0x1c48, 0x00000000); + nv_mthd(dev, 0x9097, 0x1c58, 0x00000000); + nv_mthd(dev, 0x9097, 0x1c68, 0x00000000); + nv_mthd(dev, 0x9097, 0x1c78, 0x00000000); + nv_mthd(dev, 0x9097, 0x1c88, 0x00000000); + nv_mthd(dev, 0x9097, 0x1c98, 0x00000000); + nv_mthd(dev, 0x9097, 0x1ca8, 0x00000000); + nv_mthd(dev, 0x9097, 0x1cb8, 0x00000000); + nv_mthd(dev, 0x9097, 0x1cc8, 0x00000000); + nv_mthd(dev, 0x9097, 0x1cd8, 0x00000000); + nv_mthd(dev, 0x9097, 0x1ce8, 0x00000000); + nv_mthd(dev, 0x9097, 0x1cf8, 0x00000000); + nv_mthd(dev, 0x9097, 0x1c0c, 0x00000000); + nv_mthd(dev, 0x9097, 0x1c1c, 0x00000000); + nv_mthd(dev, 0x9097, 0x1c2c, 0x00000000); + nv_mthd(dev, 0x9097, 0x1c3c, 0x00000000); + nv_mthd(dev, 0x9097, 0x1c4c, 0x00000000); + nv_mthd(dev, 0x9097, 0x1c5c, 0x00000000); + nv_mthd(dev, 0x9097, 0x1c6c, 0x00000000); + nv_mthd(dev, 0x9097, 0x1c7c, 0x00000000); + nv_mthd(dev, 0x9097, 0x1c8c, 0x00000000); + nv_mthd(dev, 0x9097, 0x1c9c, 0x00000000); + nv_mthd(dev, 0x9097, 0x1cac, 0x00000000); + nv_mthd(dev, 0x9097, 0x1cbc, 0x00000000); + nv_mthd(dev, 0x9097, 0x1ccc, 0x00000000); + nv_mthd(dev, 0x9097, 0x1cdc, 0x00000000); + nv_mthd(dev, 0x9097, 0x1cec, 0x00000000); + nv_mthd(dev, 0x9097, 0x1cfc, 0x00000000); + nv_mthd(dev, 0x9097, 0x1d00, 0x00000000); + nv_mthd(dev, 0x9097, 0x1d10, 0x00000000); + nv_mthd(dev, 0x9097, 0x1d20, 0x00000000); + nv_mthd(dev, 0x9097, 0x1d30, 0x00000000); + nv_mthd(dev, 0x9097, 0x1d40, 0x00000000); + nv_mthd(dev, 0x9097, 0x1d50, 0x00000000); + nv_mthd(dev, 0x9097, 0x1d60, 0x00000000); + nv_mthd(dev, 0x9097, 0x1d70, 0x00000000); + nv_mthd(dev, 0x9097, 0x1d80, 0x00000000); + nv_mthd(dev, 0x9097, 0x1d90, 0x00000000); + nv_mthd(dev, 0x9097, 0x1da0, 0x00000000); + nv_mthd(dev, 0x9097, 0x1db0, 0x00000000); + nv_mthd(dev, 0x9097, 0x1dc0, 0x00000000); + nv_mthd(dev, 0x9097, 0x1dd0, 0x00000000); + nv_mthd(dev, 0x9097, 0x1de0, 0x00000000); + nv_mthd(dev, 0x9097, 0x1df0, 0x00000000); + nv_mthd(dev, 0x9097, 0x1d04, 0x00000000); + nv_mthd(dev, 0x9097, 0x1d14, 0x00000000); + nv_mthd(dev, 0x9097, 0x1d24, 0x00000000); + nv_mthd(dev, 0x9097, 0x1d34, 0x00000000); + nv_mthd(dev, 0x9097, 0x1d44, 0x00000000); + nv_mthd(dev, 0x9097, 0x1d54, 0x00000000); + nv_mthd(dev, 0x9097, 0x1d64, 0x00000000); + nv_mthd(dev, 0x9097, 0x1d74, 0x00000000); + nv_mthd(dev, 0x9097, 0x1d84, 0x00000000); + nv_mthd(dev, 0x9097, 0x1d94, 0x00000000); + nv_mthd(dev, 0x9097, 0x1da4, 0x00000000); + nv_mthd(dev, 0x9097, 0x1db4, 0x00000000); + nv_mthd(dev, 0x9097, 0x1dc4, 0x00000000); + nv_mthd(dev, 0x9097, 0x1dd4, 0x00000000); + nv_mthd(dev, 0x9097, 0x1de4, 0x00000000); + nv_mthd(dev, 0x9097, 0x1df4, 0x00000000); + nv_mthd(dev, 0x9097, 0x1d08, 0x00000000); + nv_mthd(dev, 0x9097, 0x1d18, 0x00000000); + nv_mthd(dev, 0x9097, 0x1d28, 0x00000000); + nv_mthd(dev, 0x9097, 0x1d38, 0x00000000); + nv_mthd(dev, 0x9097, 0x1d48, 0x00000000); + nv_mthd(dev, 0x9097, 0x1d58, 0x00000000); + nv_mthd(dev, 0x9097, 0x1d68, 0x00000000); + nv_mthd(dev, 0x9097, 0x1d78, 0x00000000); + nv_mthd(dev, 0x9097, 0x1d88, 0x00000000); + nv_mthd(dev, 0x9097, 0x1d98, 0x00000000); + nv_mthd(dev, 0x9097, 0x1da8, 0x00000000); + nv_mthd(dev, 0x9097, 0x1db8, 0x00000000); + nv_mthd(dev, 0x9097, 0x1dc8, 0x00000000); + nv_mthd(dev, 0x9097, 0x1dd8, 0x00000000); + nv_mthd(dev, 0x9097, 0x1de8, 0x00000000); + nv_mthd(dev, 0x9097, 0x1df8, 0x00000000); + nv_mthd(dev, 0x9097, 0x1d0c, 0x00000000); + nv_mthd(dev, 0x9097, 0x1d1c, 0x00000000); + nv_mthd(dev, 0x9097, 0x1d2c, 0x00000000); + nv_mthd(dev, 0x9097, 0x1d3c, 0x00000000); + nv_mthd(dev, 0x9097, 0x1d4c, 0x00000000); + nv_mthd(dev, 0x9097, 0x1d5c, 0x00000000); + nv_mthd(dev, 0x9097, 0x1d6c, 0x00000000); + nv_mthd(dev, 0x9097, 0x1d7c, 0x00000000); + nv_mthd(dev, 0x9097, 0x1d8c, 0x00000000); + nv_mthd(dev, 0x9097, 0x1d9c, 0x00000000); + nv_mthd(dev, 0x9097, 0x1dac, 0x00000000); + nv_mthd(dev, 0x9097, 0x1dbc, 0x00000000); + nv_mthd(dev, 0x9097, 0x1dcc, 0x00000000); + nv_mthd(dev, 0x9097, 0x1ddc, 0x00000000); + nv_mthd(dev, 0x9097, 0x1dec, 0x00000000); + nv_mthd(dev, 0x9097, 0x1dfc, 0x00000000); + nv_mthd(dev, 0x9097, 0x1f00, 0x00000000); + nv_mthd(dev, 0x9097, 0x1f08, 0x00000000); + nv_mthd(dev, 0x9097, 0x1f10, 0x00000000); + nv_mthd(dev, 0x9097, 0x1f18, 0x00000000); + nv_mthd(dev, 0x9097, 0x1f20, 0x00000000); + nv_mthd(dev, 0x9097, 0x1f28, 0x00000000); + nv_mthd(dev, 0x9097, 0x1f30, 0x00000000); + nv_mthd(dev, 0x9097, 0x1f38, 0x00000000); + nv_mthd(dev, 0x9097, 0x1f40, 0x00000000); + nv_mthd(dev, 0x9097, 0x1f48, 0x00000000); + nv_mthd(dev, 0x9097, 0x1f50, 0x00000000); + nv_mthd(dev, 0x9097, 0x1f58, 0x00000000); + nv_mthd(dev, 0x9097, 0x1f60, 0x00000000); + nv_mthd(dev, 0x9097, 0x1f68, 0x00000000); + nv_mthd(dev, 0x9097, 0x1f70, 0x00000000); + nv_mthd(dev, 0x9097, 0x1f78, 0x00000000); + nv_mthd(dev, 0x9097, 0x1f04, 0x00000000); + nv_mthd(dev, 0x9097, 0x1f0c, 0x00000000); + nv_mthd(dev, 0x9097, 0x1f14, 0x00000000); + nv_mthd(dev, 0x9097, 0x1f1c, 0x00000000); + nv_mthd(dev, 0x9097, 0x1f24, 0x00000000); + nv_mthd(dev, 0x9097, 0x1f2c, 0x00000000); + nv_mthd(dev, 0x9097, 0x1f34, 0x00000000); + nv_mthd(dev, 0x9097, 0x1f3c, 0x00000000); + nv_mthd(dev, 0x9097, 0x1f44, 0x00000000); + nv_mthd(dev, 0x9097, 0x1f4c, 0x00000000); + nv_mthd(dev, 0x9097, 0x1f54, 0x00000000); + nv_mthd(dev, 0x9097, 0x1f5c, 0x00000000); + nv_mthd(dev, 0x9097, 0x1f64, 0x00000000); + nv_mthd(dev, 0x9097, 0x1f6c, 0x00000000); + nv_mthd(dev, 0x9097, 0x1f74, 0x00000000); + nv_mthd(dev, 0x9097, 0x1f7c, 0x00000000); + nv_mthd(dev, 0x9097, 0x1f80, 0x00000000); + nv_mthd(dev, 0x9097, 0x1f88, 0x00000000); + nv_mthd(dev, 0x9097, 0x1f90, 0x00000000); + nv_mthd(dev, 0x9097, 0x1f98, 0x00000000); + nv_mthd(dev, 0x9097, 0x1fa0, 0x00000000); + nv_mthd(dev, 0x9097, 0x1fa8, 0x00000000); + nv_mthd(dev, 0x9097, 0x1fb0, 0x00000000); + nv_mthd(dev, 0x9097, 0x1fb8, 0x00000000); + nv_mthd(dev, 0x9097, 0x1fc0, 0x00000000); + nv_mthd(dev, 0x9097, 0x1fc8, 0x00000000); + nv_mthd(dev, 0x9097, 0x1fd0, 0x00000000); + nv_mthd(dev, 0x9097, 0x1fd8, 0x00000000); + nv_mthd(dev, 0x9097, 0x1fe0, 0x00000000); + nv_mthd(dev, 0x9097, 0x1fe8, 0x00000000); + nv_mthd(dev, 0x9097, 0x1ff0, 0x00000000); + nv_mthd(dev, 0x9097, 0x1ff8, 0x00000000); + nv_mthd(dev, 0x9097, 0x1f84, 0x00000000); + nv_mthd(dev, 0x9097, 0x1f8c, 0x00000000); + nv_mthd(dev, 0x9097, 0x1f94, 0x00000000); + nv_mthd(dev, 0x9097, 0x1f9c, 0x00000000); + nv_mthd(dev, 0x9097, 0x1fa4, 0x00000000); + nv_mthd(dev, 0x9097, 0x1fac, 0x00000000); + nv_mthd(dev, 0x9097, 0x1fb4, 0x00000000); + nv_mthd(dev, 0x9097, 0x1fbc, 0x00000000); + nv_mthd(dev, 0x9097, 0x1fc4, 0x00000000); + nv_mthd(dev, 0x9097, 0x1fcc, 0x00000000); + nv_mthd(dev, 0x9097, 0x1fd4, 0x00000000); + nv_mthd(dev, 0x9097, 0x1fdc, 0x00000000); + nv_mthd(dev, 0x9097, 0x1fe4, 0x00000000); + nv_mthd(dev, 0x9097, 0x1fec, 0x00000000); + nv_mthd(dev, 0x9097, 0x1ff4, 0x00000000); + nv_mthd(dev, 0x9097, 0x1ffc, 0x00000000); + nv_mthd(dev, 0x9097, 0x2200, 0x00000022); + nv_mthd(dev, 0x9097, 0x2210, 0x00000022); + nv_mthd(dev, 0x9097, 0x2220, 0x00000022); + nv_mthd(dev, 0x9097, 0x2230, 0x00000022); + nv_mthd(dev, 0x9097, 0x2240, 0x00000022); + nv_mthd(dev, 0x9097, 0x2000, 0x00000000); + nv_mthd(dev, 0x9097, 0x2040, 0x00000011); + nv_mthd(dev, 0x9097, 0x2080, 0x00000020); + nv_mthd(dev, 0x9097, 0x20c0, 0x00000030); + nv_mthd(dev, 0x9097, 0x2100, 0x00000040); + nv_mthd(dev, 0x9097, 0x2140, 0x00000051); + nv_mthd(dev, 0x9097, 0x200c, 0x00000001); + nv_mthd(dev, 0x9097, 0x204c, 0x00000001); + nv_mthd(dev, 0x9097, 0x208c, 0x00000001); + nv_mthd(dev, 0x9097, 0x20cc, 0x00000001); + nv_mthd(dev, 0x9097, 0x210c, 0x00000001); + nv_mthd(dev, 0x9097, 0x214c, 0x00000001); + nv_mthd(dev, 0x9097, 0x2010, 0x00000000); + nv_mthd(dev, 0x9097, 0x2050, 0x00000000); + nv_mthd(dev, 0x9097, 0x2090, 0x00000001); + nv_mthd(dev, 0x9097, 0x20d0, 0x00000002); + nv_mthd(dev, 0x9097, 0x2110, 0x00000003); + nv_mthd(dev, 0x9097, 0x2150, 0x00000004); + nv_mthd(dev, 0x9097, 0x0380, 0x00000000); + nv_mthd(dev, 0x9097, 0x03a0, 0x00000000); + nv_mthd(dev, 0x9097, 0x03c0, 0x00000000); + nv_mthd(dev, 0x9097, 0x03e0, 0x00000000); + nv_mthd(dev, 0x9097, 0x0384, 0x00000000); + nv_mthd(dev, 0x9097, 0x03a4, 0x00000000); + nv_mthd(dev, 0x9097, 0x03c4, 0x00000000); + nv_mthd(dev, 0x9097, 0x03e4, 0x00000000); + nv_mthd(dev, 0x9097, 0x0388, 0x00000000); + nv_mthd(dev, 0x9097, 0x03a8, 0x00000000); + nv_mthd(dev, 0x9097, 0x03c8, 0x00000000); + nv_mthd(dev, 0x9097, 0x03e8, 0x00000000); + nv_mthd(dev, 0x9097, 0x038c, 0x00000000); + nv_mthd(dev, 0x9097, 0x03ac, 0x00000000); + nv_mthd(dev, 0x9097, 0x03cc, 0x00000000); + nv_mthd(dev, 0x9097, 0x03ec, 0x00000000); + nv_mthd(dev, 0x9097, 0x0700, 0x00000000); + nv_mthd(dev, 0x9097, 0x0710, 0x00000000); + nv_mthd(dev, 0x9097, 0x0720, 0x00000000); + nv_mthd(dev, 0x9097, 0x0730, 0x00000000); + nv_mthd(dev, 0x9097, 0x0704, 0x00000000); + nv_mthd(dev, 0x9097, 0x0714, 0x00000000); + nv_mthd(dev, 0x9097, 0x0724, 0x00000000); + nv_mthd(dev, 0x9097, 0x0734, 0x00000000); + nv_mthd(dev, 0x9097, 0x0708, 0x00000000); + nv_mthd(dev, 0x9097, 0x0718, 0x00000000); + nv_mthd(dev, 0x9097, 0x0728, 0x00000000); + nv_mthd(dev, 0x9097, 0x0738, 0x00000000); + nv_mthd(dev, 0x9097, 0x2800, 0x00000000); + nv_mthd(dev, 0x9097, 0x2804, 0x00000000); + nv_mthd(dev, 0x9097, 0x2808, 0x00000000); + nv_mthd(dev, 0x9097, 0x280c, 0x00000000); + nv_mthd(dev, 0x9097, 0x2810, 0x00000000); + nv_mthd(dev, 0x9097, 0x2814, 0x00000000); + nv_mthd(dev, 0x9097, 0x2818, 0x00000000); + nv_mthd(dev, 0x9097, 0x281c, 0x00000000); + nv_mthd(dev, 0x9097, 0x2820, 0x00000000); + nv_mthd(dev, 0x9097, 0x2824, 0x00000000); + nv_mthd(dev, 0x9097, 0x2828, 0x00000000); + nv_mthd(dev, 0x9097, 0x282c, 0x00000000); + nv_mthd(dev, 0x9097, 0x2830, 0x00000000); + nv_mthd(dev, 0x9097, 0x2834, 0x00000000); + nv_mthd(dev, 0x9097, 0x2838, 0x00000000); + nv_mthd(dev, 0x9097, 0x283c, 0x00000000); + nv_mthd(dev, 0x9097, 0x2840, 0x00000000); + nv_mthd(dev, 0x9097, 0x2844, 0x00000000); + nv_mthd(dev, 0x9097, 0x2848, 0x00000000); + nv_mthd(dev, 0x9097, 0x284c, 0x00000000); + nv_mthd(dev, 0x9097, 0x2850, 0x00000000); + nv_mthd(dev, 0x9097, 0x2854, 0x00000000); + nv_mthd(dev, 0x9097, 0x2858, 0x00000000); + nv_mthd(dev, 0x9097, 0x285c, 0x00000000); + nv_mthd(dev, 0x9097, 0x2860, 0x00000000); + nv_mthd(dev, 0x9097, 0x2864, 0x00000000); + nv_mthd(dev, 0x9097, 0x2868, 0x00000000); + nv_mthd(dev, 0x9097, 0x286c, 0x00000000); + nv_mthd(dev, 0x9097, 0x2870, 0x00000000); + nv_mthd(dev, 0x9097, 0x2874, 0x00000000); + nv_mthd(dev, 0x9097, 0x2878, 0x00000000); + nv_mthd(dev, 0x9097, 0x287c, 0x00000000); + nv_mthd(dev, 0x9097, 0x2880, 0x00000000); + nv_mthd(dev, 0x9097, 0x2884, 0x00000000); + nv_mthd(dev, 0x9097, 0x2888, 0x00000000); + nv_mthd(dev, 0x9097, 0x288c, 0x00000000); + nv_mthd(dev, 0x9097, 0x2890, 0x00000000); + nv_mthd(dev, 0x9097, 0x2894, 0x00000000); + nv_mthd(dev, 0x9097, 0x2898, 0x00000000); + nv_mthd(dev, 0x9097, 0x289c, 0x00000000); + nv_mthd(dev, 0x9097, 0x28a0, 0x00000000); + nv_mthd(dev, 0x9097, 0x28a4, 0x00000000); + nv_mthd(dev, 0x9097, 0x28a8, 0x00000000); + nv_mthd(dev, 0x9097, 0x28ac, 0x00000000); + nv_mthd(dev, 0x9097, 0x28b0, 0x00000000); + nv_mthd(dev, 0x9097, 0x28b4, 0x00000000); + nv_mthd(dev, 0x9097, 0x28b8, 0x00000000); + nv_mthd(dev, 0x9097, 0x28bc, 0x00000000); + nv_mthd(dev, 0x9097, 0x28c0, 0x00000000); + nv_mthd(dev, 0x9097, 0x28c4, 0x00000000); + nv_mthd(dev, 0x9097, 0x28c8, 0x00000000); + nv_mthd(dev, 0x9097, 0x28cc, 0x00000000); + nv_mthd(dev, 0x9097, 0x28d0, 0x00000000); + nv_mthd(dev, 0x9097, 0x28d4, 0x00000000); + nv_mthd(dev, 0x9097, 0x28d8, 0x00000000); + nv_mthd(dev, 0x9097, 0x28dc, 0x00000000); + nv_mthd(dev, 0x9097, 0x28e0, 0x00000000); + nv_mthd(dev, 0x9097, 0x28e4, 0x00000000); + nv_mthd(dev, 0x9097, 0x28e8, 0x00000000); + nv_mthd(dev, 0x9097, 0x28ec, 0x00000000); + nv_mthd(dev, 0x9097, 0x28f0, 0x00000000); + nv_mthd(dev, 0x9097, 0x28f4, 0x00000000); + nv_mthd(dev, 0x9097, 0x28f8, 0x00000000); + nv_mthd(dev, 0x9097, 0x28fc, 0x00000000); + nv_mthd(dev, 0x9097, 0x2900, 0x00000000); + nv_mthd(dev, 0x9097, 0x2904, 0x00000000); + nv_mthd(dev, 0x9097, 0x2908, 0x00000000); + nv_mthd(dev, 0x9097, 0x290c, 0x00000000); + nv_mthd(dev, 0x9097, 0x2910, 0x00000000); + nv_mthd(dev, 0x9097, 0x2914, 0x00000000); + nv_mthd(dev, 0x9097, 0x2918, 0x00000000); + nv_mthd(dev, 0x9097, 0x291c, 0x00000000); + nv_mthd(dev, 0x9097, 0x2920, 0x00000000); + nv_mthd(dev, 0x9097, 0x2924, 0x00000000); + nv_mthd(dev, 0x9097, 0x2928, 0x00000000); + nv_mthd(dev, 0x9097, 0x292c, 0x00000000); + nv_mthd(dev, 0x9097, 0x2930, 0x00000000); + nv_mthd(dev, 0x9097, 0x2934, 0x00000000); + nv_mthd(dev, 0x9097, 0x2938, 0x00000000); + nv_mthd(dev, 0x9097, 0x293c, 0x00000000); + nv_mthd(dev, 0x9097, 0x2940, 0x00000000); + nv_mthd(dev, 0x9097, 0x2944, 0x00000000); + nv_mthd(dev, 0x9097, 0x2948, 0x00000000); + nv_mthd(dev, 0x9097, 0x294c, 0x00000000); + nv_mthd(dev, 0x9097, 0x2950, 0x00000000); + nv_mthd(dev, 0x9097, 0x2954, 0x00000000); + nv_mthd(dev, 0x9097, 0x2958, 0x00000000); + nv_mthd(dev, 0x9097, 0x295c, 0x00000000); + nv_mthd(dev, 0x9097, 0x2960, 0x00000000); + nv_mthd(dev, 0x9097, 0x2964, 0x00000000); + nv_mthd(dev, 0x9097, 0x2968, 0x00000000); + nv_mthd(dev, 0x9097, 0x296c, 0x00000000); + nv_mthd(dev, 0x9097, 0x2970, 0x00000000); + nv_mthd(dev, 0x9097, 0x2974, 0x00000000); + nv_mthd(dev, 0x9097, 0x2978, 0x00000000); + nv_mthd(dev, 0x9097, 0x297c, 0x00000000); + nv_mthd(dev, 0x9097, 0x2980, 0x00000000); + nv_mthd(dev, 0x9097, 0x2984, 0x00000000); + nv_mthd(dev, 0x9097, 0x2988, 0x00000000); + nv_mthd(dev, 0x9097, 0x298c, 0x00000000); + nv_mthd(dev, 0x9097, 0x2990, 0x00000000); + nv_mthd(dev, 0x9097, 0x2994, 0x00000000); + nv_mthd(dev, 0x9097, 0x2998, 0x00000000); + nv_mthd(dev, 0x9097, 0x299c, 0x00000000); + nv_mthd(dev, 0x9097, 0x29a0, 0x00000000); + nv_mthd(dev, 0x9097, 0x29a4, 0x00000000); + nv_mthd(dev, 0x9097, 0x29a8, 0x00000000); + nv_mthd(dev, 0x9097, 0x29ac, 0x00000000); + nv_mthd(dev, 0x9097, 0x29b0, 0x00000000); + nv_mthd(dev, 0x9097, 0x29b4, 0x00000000); + nv_mthd(dev, 0x9097, 0x29b8, 0x00000000); + nv_mthd(dev, 0x9097, 0x29bc, 0x00000000); + nv_mthd(dev, 0x9097, 0x29c0, 0x00000000); + nv_mthd(dev, 0x9097, 0x29c4, 0x00000000); + nv_mthd(dev, 0x9097, 0x29c8, 0x00000000); + nv_mthd(dev, 0x9097, 0x29cc, 0x00000000); + nv_mthd(dev, 0x9097, 0x29d0, 0x00000000); + nv_mthd(dev, 0x9097, 0x29d4, 0x00000000); + nv_mthd(dev, 0x9097, 0x29d8, 0x00000000); + nv_mthd(dev, 0x9097, 0x29dc, 0x00000000); + nv_mthd(dev, 0x9097, 0x29e0, 0x00000000); + nv_mthd(dev, 0x9097, 0x29e4, 0x00000000); + nv_mthd(dev, 0x9097, 0x29e8, 0x00000000); + nv_mthd(dev, 0x9097, 0x29ec, 0x00000000); + nv_mthd(dev, 0x9097, 0x29f0, 0x00000000); + nv_mthd(dev, 0x9097, 0x29f4, 0x00000000); + nv_mthd(dev, 0x9097, 0x29f8, 0x00000000); + nv_mthd(dev, 0x9097, 0x29fc, 0x00000000); + nv_mthd(dev, 0x9097, 0x0a00, 0x00000000); + nv_mthd(dev, 0x9097, 0x0a20, 0x00000000); + nv_mthd(dev, 0x9097, 0x0a40, 0x00000000); + nv_mthd(dev, 0x9097, 0x0a60, 0x00000000); + nv_mthd(dev, 0x9097, 0x0a80, 0x00000000); + nv_mthd(dev, 0x9097, 0x0aa0, 0x00000000); + nv_mthd(dev, 0x9097, 0x0ac0, 0x00000000); + nv_mthd(dev, 0x9097, 0x0ae0, 0x00000000); + nv_mthd(dev, 0x9097, 0x0b00, 0x00000000); + nv_mthd(dev, 0x9097, 0x0b20, 0x00000000); + nv_mthd(dev, 0x9097, 0x0b40, 0x00000000); + nv_mthd(dev, 0x9097, 0x0b60, 0x00000000); + nv_mthd(dev, 0x9097, 0x0b80, 0x00000000); + nv_mthd(dev, 0x9097, 0x0ba0, 0x00000000); + nv_mthd(dev, 0x9097, 0x0bc0, 0x00000000); + nv_mthd(dev, 0x9097, 0x0be0, 0x00000000); + nv_mthd(dev, 0x9097, 0x0a04, 0x00000000); + nv_mthd(dev, 0x9097, 0x0a24, 0x00000000); + nv_mthd(dev, 0x9097, 0x0a44, 0x00000000); + nv_mthd(dev, 0x9097, 0x0a64, 0x00000000); + nv_mthd(dev, 0x9097, 0x0a84, 0x00000000); + nv_mthd(dev, 0x9097, 0x0aa4, 0x00000000); + nv_mthd(dev, 0x9097, 0x0ac4, 0x00000000); + nv_mthd(dev, 0x9097, 0x0ae4, 0x00000000); + nv_mthd(dev, 0x9097, 0x0b04, 0x00000000); + nv_mthd(dev, 0x9097, 0x0b24, 0x00000000); + nv_mthd(dev, 0x9097, 0x0b44, 0x00000000); + nv_mthd(dev, 0x9097, 0x0b64, 0x00000000); + nv_mthd(dev, 0x9097, 0x0b84, 0x00000000); + nv_mthd(dev, 0x9097, 0x0ba4, 0x00000000); + nv_mthd(dev, 0x9097, 0x0bc4, 0x00000000); + nv_mthd(dev, 0x9097, 0x0be4, 0x00000000); + nv_mthd(dev, 0x9097, 0x0a08, 0x00000000); + nv_mthd(dev, 0x9097, 0x0a28, 0x00000000); + nv_mthd(dev, 0x9097, 0x0a48, 0x00000000); + nv_mthd(dev, 0x9097, 0x0a68, 0x00000000); + nv_mthd(dev, 0x9097, 0x0a88, 0x00000000); + nv_mthd(dev, 0x9097, 0x0aa8, 0x00000000); + nv_mthd(dev, 0x9097, 0x0ac8, 0x00000000); + nv_mthd(dev, 0x9097, 0x0ae8, 0x00000000); + nv_mthd(dev, 0x9097, 0x0b08, 0x00000000); + nv_mthd(dev, 0x9097, 0x0b28, 0x00000000); + nv_mthd(dev, 0x9097, 0x0b48, 0x00000000); + nv_mthd(dev, 0x9097, 0x0b68, 0x00000000); + nv_mthd(dev, 0x9097, 0x0b88, 0x00000000); + nv_mthd(dev, 0x9097, 0x0ba8, 0x00000000); + nv_mthd(dev, 0x9097, 0x0bc8, 0x00000000); + nv_mthd(dev, 0x9097, 0x0be8, 0x00000000); + nv_mthd(dev, 0x9097, 0x0a0c, 0x00000000); + nv_mthd(dev, 0x9097, 0x0a2c, 0x00000000); + nv_mthd(dev, 0x9097, 0x0a4c, 0x00000000); + nv_mthd(dev, 0x9097, 0x0a6c, 0x00000000); + nv_mthd(dev, 0x9097, 0x0a8c, 0x00000000); + nv_mthd(dev, 0x9097, 0x0aac, 0x00000000); + nv_mthd(dev, 0x9097, 0x0acc, 0x00000000); + nv_mthd(dev, 0x9097, 0x0aec, 0x00000000); + nv_mthd(dev, 0x9097, 0x0b0c, 0x00000000); + nv_mthd(dev, 0x9097, 0x0b2c, 0x00000000); + nv_mthd(dev, 0x9097, 0x0b4c, 0x00000000); + nv_mthd(dev, 0x9097, 0x0b6c, 0x00000000); + nv_mthd(dev, 0x9097, 0x0b8c, 0x00000000); + nv_mthd(dev, 0x9097, 0x0bac, 0x00000000); + nv_mthd(dev, 0x9097, 0x0bcc, 0x00000000); + nv_mthd(dev, 0x9097, 0x0bec, 0x00000000); + nv_mthd(dev, 0x9097, 0x0a10, 0x00000000); + nv_mthd(dev, 0x9097, 0x0a30, 0x00000000); + nv_mthd(dev, 0x9097, 0x0a50, 0x00000000); + nv_mthd(dev, 0x9097, 0x0a70, 0x00000000); + nv_mthd(dev, 0x9097, 0x0a90, 0x00000000); + nv_mthd(dev, 0x9097, 0x0ab0, 0x00000000); + nv_mthd(dev, 0x9097, 0x0ad0, 0x00000000); + nv_mthd(dev, 0x9097, 0x0af0, 0x00000000); + nv_mthd(dev, 0x9097, 0x0b10, 0x00000000); + nv_mthd(dev, 0x9097, 0x0b30, 0x00000000); + nv_mthd(dev, 0x9097, 0x0b50, 0x00000000); + nv_mthd(dev, 0x9097, 0x0b70, 0x00000000); + nv_mthd(dev, 0x9097, 0x0b90, 0x00000000); + nv_mthd(dev, 0x9097, 0x0bb0, 0x00000000); + nv_mthd(dev, 0x9097, 0x0bd0, 0x00000000); + nv_mthd(dev, 0x9097, 0x0bf0, 0x00000000); + nv_mthd(dev, 0x9097, 0x0a14, 0x00000000); + nv_mthd(dev, 0x9097, 0x0a34, 0x00000000); + nv_mthd(dev, 0x9097, 0x0a54, 0x00000000); + nv_mthd(dev, 0x9097, 0x0a74, 0x00000000); + nv_mthd(dev, 0x9097, 0x0a94, 0x00000000); + nv_mthd(dev, 0x9097, 0x0ab4, 0x00000000); + nv_mthd(dev, 0x9097, 0x0ad4, 0x00000000); + nv_mthd(dev, 0x9097, 0x0af4, 0x00000000); + nv_mthd(dev, 0x9097, 0x0b14, 0x00000000); + nv_mthd(dev, 0x9097, 0x0b34, 0x00000000); + nv_mthd(dev, 0x9097, 0x0b54, 0x00000000); + nv_mthd(dev, 0x9097, 0x0b74, 0x00000000); + nv_mthd(dev, 0x9097, 0x0b94, 0x00000000); + nv_mthd(dev, 0x9097, 0x0bb4, 0x00000000); + nv_mthd(dev, 0x9097, 0x0bd4, 0x00000000); + nv_mthd(dev, 0x9097, 0x0bf4, 0x00000000); + nv_mthd(dev, 0x9097, 0x0c00, 0x00000000); + nv_mthd(dev, 0x9097, 0x0c10, 0x00000000); + nv_mthd(dev, 0x9097, 0x0c20, 0x00000000); + nv_mthd(dev, 0x9097, 0x0c30, 0x00000000); + nv_mthd(dev, 0x9097, 0x0c40, 0x00000000); + nv_mthd(dev, 0x9097, 0x0c50, 0x00000000); + nv_mthd(dev, 0x9097, 0x0c60, 0x00000000); + nv_mthd(dev, 0x9097, 0x0c70, 0x00000000); + nv_mthd(dev, 0x9097, 0x0c80, 0x00000000); + nv_mthd(dev, 0x9097, 0x0c90, 0x00000000); + nv_mthd(dev, 0x9097, 0x0ca0, 0x00000000); + nv_mthd(dev, 0x9097, 0x0cb0, 0x00000000); + nv_mthd(dev, 0x9097, 0x0cc0, 0x00000000); + nv_mthd(dev, 0x9097, 0x0cd0, 0x00000000); + nv_mthd(dev, 0x9097, 0x0ce0, 0x00000000); + nv_mthd(dev, 0x9097, 0x0cf0, 0x00000000); + nv_mthd(dev, 0x9097, 0x0c04, 0x00000000); + nv_mthd(dev, 0x9097, 0x0c14, 0x00000000); + nv_mthd(dev, 0x9097, 0x0c24, 0x00000000); + nv_mthd(dev, 0x9097, 0x0c34, 0x00000000); + nv_mthd(dev, 0x9097, 0x0c44, 0x00000000); + nv_mthd(dev, 0x9097, 0x0c54, 0x00000000); + nv_mthd(dev, 0x9097, 0x0c64, 0x00000000); + nv_mthd(dev, 0x9097, 0x0c74, 0x00000000); + nv_mthd(dev, 0x9097, 0x0c84, 0x00000000); + nv_mthd(dev, 0x9097, 0x0c94, 0x00000000); + nv_mthd(dev, 0x9097, 0x0ca4, 0x00000000); + nv_mthd(dev, 0x9097, 0x0cb4, 0x00000000); + nv_mthd(dev, 0x9097, 0x0cc4, 0x00000000); + nv_mthd(dev, 0x9097, 0x0cd4, 0x00000000); + nv_mthd(dev, 0x9097, 0x0ce4, 0x00000000); + nv_mthd(dev, 0x9097, 0x0cf4, 0x00000000); + nv_mthd(dev, 0x9097, 0x0c08, 0x00000000); + nv_mthd(dev, 0x9097, 0x0c18, 0x00000000); + nv_mthd(dev, 0x9097, 0x0c28, 0x00000000); + nv_mthd(dev, 0x9097, 0x0c38, 0x00000000); + nv_mthd(dev, 0x9097, 0x0c48, 0x00000000); + nv_mthd(dev, 0x9097, 0x0c58, 0x00000000); + nv_mthd(dev, 0x9097, 0x0c68, 0x00000000); + nv_mthd(dev, 0x9097, 0x0c78, 0x00000000); + nv_mthd(dev, 0x9097, 0x0c88, 0x00000000); + nv_mthd(dev, 0x9097, 0x0c98, 0x00000000); + nv_mthd(dev, 0x9097, 0x0ca8, 0x00000000); + nv_mthd(dev, 0x9097, 0x0cb8, 0x00000000); + nv_mthd(dev, 0x9097, 0x0cc8, 0x00000000); + nv_mthd(dev, 0x9097, 0x0cd8, 0x00000000); + nv_mthd(dev, 0x9097, 0x0ce8, 0x00000000); + nv_mthd(dev, 0x9097, 0x0cf8, 0x00000000); + nv_mthd(dev, 0x9097, 0x0c0c, 0x3f800000); + nv_mthd(dev, 0x9097, 0x0c1c, 0x3f800000); + nv_mthd(dev, 0x9097, 0x0c2c, 0x3f800000); + nv_mthd(dev, 0x9097, 0x0c3c, 0x3f800000); + nv_mthd(dev, 0x9097, 0x0c4c, 0x3f800000); + nv_mthd(dev, 0x9097, 0x0c5c, 0x3f800000); + nv_mthd(dev, 0x9097, 0x0c6c, 0x3f800000); + nv_mthd(dev, 0x9097, 0x0c7c, 0x3f800000); + nv_mthd(dev, 0x9097, 0x0c8c, 0x3f800000); + nv_mthd(dev, 0x9097, 0x0c9c, 0x3f800000); + nv_mthd(dev, 0x9097, 0x0cac, 0x3f800000); + nv_mthd(dev, 0x9097, 0x0cbc, 0x3f800000); + nv_mthd(dev, 0x9097, 0x0ccc, 0x3f800000); + nv_mthd(dev, 0x9097, 0x0cdc, 0x3f800000); + nv_mthd(dev, 0x9097, 0x0cec, 0x3f800000); + nv_mthd(dev, 0x9097, 0x0cfc, 0x3f800000); + nv_mthd(dev, 0x9097, 0x0d00, 0xffff0000); + nv_mthd(dev, 0x9097, 0x0d08, 0xffff0000); + nv_mthd(dev, 0x9097, 0x0d10, 0xffff0000); + nv_mthd(dev, 0x9097, 0x0d18, 0xffff0000); + nv_mthd(dev, 0x9097, 0x0d20, 0xffff0000); + nv_mthd(dev, 0x9097, 0x0d28, 0xffff0000); + nv_mthd(dev, 0x9097, 0x0d30, 0xffff0000); + nv_mthd(dev, 0x9097, 0x0d38, 0xffff0000); + nv_mthd(dev, 0x9097, 0x0d04, 0xffff0000); + nv_mthd(dev, 0x9097, 0x0d0c, 0xffff0000); + nv_mthd(dev, 0x9097, 0x0d14, 0xffff0000); + nv_mthd(dev, 0x9097, 0x0d1c, 0xffff0000); + nv_mthd(dev, 0x9097, 0x0d24, 0xffff0000); + nv_mthd(dev, 0x9097, 0x0d2c, 0xffff0000); + nv_mthd(dev, 0x9097, 0x0d34, 0xffff0000); + nv_mthd(dev, 0x9097, 0x0d3c, 0xffff0000); + nv_mthd(dev, 0x9097, 0x0e00, 0x00000000); + nv_mthd(dev, 0x9097, 0x0e10, 0x00000000); + nv_mthd(dev, 0x9097, 0x0e20, 0x00000000); + nv_mthd(dev, 0x9097, 0x0e30, 0x00000000); + nv_mthd(dev, 0x9097, 0x0e40, 0x00000000); + nv_mthd(dev, 0x9097, 0x0e50, 0x00000000); + nv_mthd(dev, 0x9097, 0x0e60, 0x00000000); + nv_mthd(dev, 0x9097, 0x0e70, 0x00000000); + nv_mthd(dev, 0x9097, 0x0e80, 0x00000000); + nv_mthd(dev, 0x9097, 0x0e90, 0x00000000); + nv_mthd(dev, 0x9097, 0x0ea0, 0x00000000); + nv_mthd(dev, 0x9097, 0x0eb0, 0x00000000); + nv_mthd(dev, 0x9097, 0x0ec0, 0x00000000); + nv_mthd(dev, 0x9097, 0x0ed0, 0x00000000); + nv_mthd(dev, 0x9097, 0x0ee0, 0x00000000); + nv_mthd(dev, 0x9097, 0x0ef0, 0x00000000); + nv_mthd(dev, 0x9097, 0x0e04, 0xffff0000); + nv_mthd(dev, 0x9097, 0x0e14, 0xffff0000); + nv_mthd(dev, 0x9097, 0x0e24, 0xffff0000); + nv_mthd(dev, 0x9097, 0x0e34, 0xffff0000); + nv_mthd(dev, 0x9097, 0x0e44, 0xffff0000); + nv_mthd(dev, 0x9097, 0x0e54, 0xffff0000); + nv_mthd(dev, 0x9097, 0x0e64, 0xffff0000); + nv_mthd(dev, 0x9097, 0x0e74, 0xffff0000); + nv_mthd(dev, 0x9097, 0x0e84, 0xffff0000); + nv_mthd(dev, 0x9097, 0x0e94, 0xffff0000); + nv_mthd(dev, 0x9097, 0x0ea4, 0xffff0000); + nv_mthd(dev, 0x9097, 0x0eb4, 0xffff0000); + nv_mthd(dev, 0x9097, 0x0ec4, 0xffff0000); + nv_mthd(dev, 0x9097, 0x0ed4, 0xffff0000); + nv_mthd(dev, 0x9097, 0x0ee4, 0xffff0000); + nv_mthd(dev, 0x9097, 0x0ef4, 0xffff0000); + nv_mthd(dev, 0x9097, 0x0e08, 0xffff0000); + nv_mthd(dev, 0x9097, 0x0e18, 0xffff0000); + nv_mthd(dev, 0x9097, 0x0e28, 0xffff0000); + nv_mthd(dev, 0x9097, 0x0e38, 0xffff0000); + nv_mthd(dev, 0x9097, 0x0e48, 0xffff0000); + nv_mthd(dev, 0x9097, 0x0e58, 0xffff0000); + nv_mthd(dev, 0x9097, 0x0e68, 0xffff0000); + nv_mthd(dev, 0x9097, 0x0e78, 0xffff0000); + nv_mthd(dev, 0x9097, 0x0e88, 0xffff0000); + nv_mthd(dev, 0x9097, 0x0e98, 0xffff0000); + nv_mthd(dev, 0x9097, 0x0ea8, 0xffff0000); + nv_mthd(dev, 0x9097, 0x0eb8, 0xffff0000); + nv_mthd(dev, 0x9097, 0x0ec8, 0xffff0000); + nv_mthd(dev, 0x9097, 0x0ed8, 0xffff0000); + nv_mthd(dev, 0x9097, 0x0ee8, 0xffff0000); + nv_mthd(dev, 0x9097, 0x0ef8, 0xffff0000); + nv_mthd(dev, 0x9097, 0x0d40, 0x00000000); + nv_mthd(dev, 0x9097, 0x0d48, 0x00000000); + nv_mthd(dev, 0x9097, 0x0d50, 0x00000000); + nv_mthd(dev, 0x9097, 0x0d58, 0x00000000); + nv_mthd(dev, 0x9097, 0x0d44, 0x00000000); + nv_mthd(dev, 0x9097, 0x0d4c, 0x00000000); + nv_mthd(dev, 0x9097, 0x0d54, 0x00000000); + nv_mthd(dev, 0x9097, 0x0d5c, 0x00000000); + nv_mthd(dev, 0x9097, 0x1e00, 0x00000001); + nv_mthd(dev, 0x9097, 0x1e20, 0x00000001); + nv_mthd(dev, 0x9097, 0x1e40, 0x00000001); + nv_mthd(dev, 0x9097, 0x1e60, 0x00000001); + nv_mthd(dev, 0x9097, 0x1e80, 0x00000001); + nv_mthd(dev, 0x9097, 0x1ea0, 0x00000001); + nv_mthd(dev, 0x9097, 0x1ec0, 0x00000001); + nv_mthd(dev, 0x9097, 0x1ee0, 0x00000001); + nv_mthd(dev, 0x9097, 0x1e04, 0x00000001); + nv_mthd(dev, 0x9097, 0x1e24, 0x00000001); + nv_mthd(dev, 0x9097, 0x1e44, 0x00000001); + nv_mthd(dev, 0x9097, 0x1e64, 0x00000001); + nv_mthd(dev, 0x9097, 0x1e84, 0x00000001); + nv_mthd(dev, 0x9097, 0x1ea4, 0x00000001); + nv_mthd(dev, 0x9097, 0x1ec4, 0x00000001); + nv_mthd(dev, 0x9097, 0x1ee4, 0x00000001); + nv_mthd(dev, 0x9097, 0x1e08, 0x00000002); + nv_mthd(dev, 0x9097, 0x1e28, 0x00000002); + nv_mthd(dev, 0x9097, 0x1e48, 0x00000002); + nv_mthd(dev, 0x9097, 0x1e68, 0x00000002); + nv_mthd(dev, 0x9097, 0x1e88, 0x00000002); + nv_mthd(dev, 0x9097, 0x1ea8, 0x00000002); + nv_mthd(dev, 0x9097, 0x1ec8, 0x00000002); + nv_mthd(dev, 0x9097, 0x1ee8, 0x00000002); + nv_mthd(dev, 0x9097, 0x1e0c, 0x00000001); + nv_mthd(dev, 0x9097, 0x1e2c, 0x00000001); + nv_mthd(dev, 0x9097, 0x1e4c, 0x00000001); + nv_mthd(dev, 0x9097, 0x1e6c, 0x00000001); + nv_mthd(dev, 0x9097, 0x1e8c, 0x00000001); + nv_mthd(dev, 0x9097, 0x1eac, 0x00000001); + nv_mthd(dev, 0x9097, 0x1ecc, 0x00000001); + nv_mthd(dev, 0x9097, 0x1eec, 0x00000001); + nv_mthd(dev, 0x9097, 0x1e10, 0x00000001); + nv_mthd(dev, 0x9097, 0x1e30, 0x00000001); + nv_mthd(dev, 0x9097, 0x1e50, 0x00000001); + nv_mthd(dev, 0x9097, 0x1e70, 0x00000001); + nv_mthd(dev, 0x9097, 0x1e90, 0x00000001); + nv_mthd(dev, 0x9097, 0x1eb0, 0x00000001); + nv_mthd(dev, 0x9097, 0x1ed0, 0x00000001); + nv_mthd(dev, 0x9097, 0x1ef0, 0x00000001); + nv_mthd(dev, 0x9097, 0x1e14, 0x00000002); + nv_mthd(dev, 0x9097, 0x1e34, 0x00000002); + nv_mthd(dev, 0x9097, 0x1e54, 0x00000002); + nv_mthd(dev, 0x9097, 0x1e74, 0x00000002); + nv_mthd(dev, 0x9097, 0x1e94, 0x00000002); + nv_mthd(dev, 0x9097, 0x1eb4, 0x00000002); + nv_mthd(dev, 0x9097, 0x1ed4, 0x00000002); + nv_mthd(dev, 0x9097, 0x1ef4, 0x00000002); + nv_mthd(dev, 0x9097, 0x1e18, 0x00000001); + nv_mthd(dev, 0x9097, 0x1e38, 0x00000001); + nv_mthd(dev, 0x9097, 0x1e58, 0x00000001); + nv_mthd(dev, 0x9097, 0x1e78, 0x00000001); + nv_mthd(dev, 0x9097, 0x1e98, 0x00000001); + nv_mthd(dev, 0x9097, 0x1eb8, 0x00000001); + nv_mthd(dev, 0x9097, 0x1ed8, 0x00000001); + nv_mthd(dev, 0x9097, 0x1ef8, 0x00000001); + nv_mthd(dev, 0x9097, 0x3400, 0x00000000); + nv_mthd(dev, 0x9097, 0x3404, 0x00000000); + nv_mthd(dev, 0x9097, 0x3408, 0x00000000); + nv_mthd(dev, 0x9097, 0x340c, 0x00000000); + nv_mthd(dev, 0x9097, 0x3410, 0x00000000); + nv_mthd(dev, 0x9097, 0x3414, 0x00000000); + nv_mthd(dev, 0x9097, 0x3418, 0x00000000); + nv_mthd(dev, 0x9097, 0x341c, 0x00000000); + nv_mthd(dev, 0x9097, 0x3420, 0x00000000); + nv_mthd(dev, 0x9097, 0x3424, 0x00000000); + nv_mthd(dev, 0x9097, 0x3428, 0x00000000); + nv_mthd(dev, 0x9097, 0x342c, 0x00000000); + nv_mthd(dev, 0x9097, 0x3430, 0x00000000); + nv_mthd(dev, 0x9097, 0x3434, 0x00000000); + nv_mthd(dev, 0x9097, 0x3438, 0x00000000); + nv_mthd(dev, 0x9097, 0x343c, 0x00000000); + nv_mthd(dev, 0x9097, 0x3440, 0x00000000); + nv_mthd(dev, 0x9097, 0x3444, 0x00000000); + nv_mthd(dev, 0x9097, 0x3448, 0x00000000); + nv_mthd(dev, 0x9097, 0x344c, 0x00000000); + nv_mthd(dev, 0x9097, 0x3450, 0x00000000); + nv_mthd(dev, 0x9097, 0x3454, 0x00000000); + nv_mthd(dev, 0x9097, 0x3458, 0x00000000); + nv_mthd(dev, 0x9097, 0x345c, 0x00000000); + nv_mthd(dev, 0x9097, 0x3460, 0x00000000); + nv_mthd(dev, 0x9097, 0x3464, 0x00000000); + nv_mthd(dev, 0x9097, 0x3468, 0x00000000); + nv_mthd(dev, 0x9097, 0x346c, 0x00000000); + nv_mthd(dev, 0x9097, 0x3470, 0x00000000); + nv_mthd(dev, 0x9097, 0x3474, 0x00000000); + nv_mthd(dev, 0x9097, 0x3478, 0x00000000); + nv_mthd(dev, 0x9097, 0x347c, 0x00000000); + nv_mthd(dev, 0x9097, 0x3480, 0x00000000); + nv_mthd(dev, 0x9097, 0x3484, 0x00000000); + nv_mthd(dev, 0x9097, 0x3488, 0x00000000); + nv_mthd(dev, 0x9097, 0x348c, 0x00000000); + nv_mthd(dev, 0x9097, 0x3490, 0x00000000); + nv_mthd(dev, 0x9097, 0x3494, 0x00000000); + nv_mthd(dev, 0x9097, 0x3498, 0x00000000); + nv_mthd(dev, 0x9097, 0x349c, 0x00000000); + nv_mthd(dev, 0x9097, 0x34a0, 0x00000000); + nv_mthd(dev, 0x9097, 0x34a4, 0x00000000); + nv_mthd(dev, 0x9097, 0x34a8, 0x00000000); + nv_mthd(dev, 0x9097, 0x34ac, 0x00000000); + nv_mthd(dev, 0x9097, 0x34b0, 0x00000000); + nv_mthd(dev, 0x9097, 0x34b4, 0x00000000); + nv_mthd(dev, 0x9097, 0x34b8, 0x00000000); + nv_mthd(dev, 0x9097, 0x34bc, 0x00000000); + nv_mthd(dev, 0x9097, 0x34c0, 0x00000000); + nv_mthd(dev, 0x9097, 0x34c4, 0x00000000); + nv_mthd(dev, 0x9097, 0x34c8, 0x00000000); + nv_mthd(dev, 0x9097, 0x34cc, 0x00000000); + nv_mthd(dev, 0x9097, 0x34d0, 0x00000000); + nv_mthd(dev, 0x9097, 0x34d4, 0x00000000); + nv_mthd(dev, 0x9097, 0x34d8, 0x00000000); + nv_mthd(dev, 0x9097, 0x34dc, 0x00000000); + nv_mthd(dev, 0x9097, 0x34e0, 0x00000000); + nv_mthd(dev, 0x9097, 0x34e4, 0x00000000); + nv_mthd(dev, 0x9097, 0x34e8, 0x00000000); + nv_mthd(dev, 0x9097, 0x34ec, 0x00000000); + nv_mthd(dev, 0x9097, 0x34f0, 0x00000000); + nv_mthd(dev, 0x9097, 0x34f4, 0x00000000); + nv_mthd(dev, 0x9097, 0x34f8, 0x00000000); + nv_mthd(dev, 0x9097, 0x34fc, 0x00000000); + nv_mthd(dev, 0x9097, 0x3500, 0x00000000); + nv_mthd(dev, 0x9097, 0x3504, 0x00000000); + nv_mthd(dev, 0x9097, 0x3508, 0x00000000); + nv_mthd(dev, 0x9097, 0x350c, 0x00000000); + nv_mthd(dev, 0x9097, 0x3510, 0x00000000); + nv_mthd(dev, 0x9097, 0x3514, 0x00000000); + nv_mthd(dev, 0x9097, 0x3518, 0x00000000); + nv_mthd(dev, 0x9097, 0x351c, 0x00000000); + nv_mthd(dev, 0x9097, 0x3520, 0x00000000); + nv_mthd(dev, 0x9097, 0x3524, 0x00000000); + nv_mthd(dev, 0x9097, 0x3528, 0x00000000); + nv_mthd(dev, 0x9097, 0x352c, 0x00000000); + nv_mthd(dev, 0x9097, 0x3530, 0x00000000); + nv_mthd(dev, 0x9097, 0x3534, 0x00000000); + nv_mthd(dev, 0x9097, 0x3538, 0x00000000); + nv_mthd(dev, 0x9097, 0x353c, 0x00000000); + nv_mthd(dev, 0x9097, 0x3540, 0x00000000); + nv_mthd(dev, 0x9097, 0x3544, 0x00000000); + nv_mthd(dev, 0x9097, 0x3548, 0x00000000); + nv_mthd(dev, 0x9097, 0x354c, 0x00000000); + nv_mthd(dev, 0x9097, 0x3550, 0x00000000); + nv_mthd(dev, 0x9097, 0x3554, 0x00000000); + nv_mthd(dev, 0x9097, 0x3558, 0x00000000); + nv_mthd(dev, 0x9097, 0x355c, 0x00000000); + nv_mthd(dev, 0x9097, 0x3560, 0x00000000); + nv_mthd(dev, 0x9097, 0x3564, 0x00000000); + nv_mthd(dev, 0x9097, 0x3568, 0x00000000); + nv_mthd(dev, 0x9097, 0x356c, 0x00000000); + nv_mthd(dev, 0x9097, 0x3570, 0x00000000); + nv_mthd(dev, 0x9097, 0x3574, 0x00000000); + nv_mthd(dev, 0x9097, 0x3578, 0x00000000); + nv_mthd(dev, 0x9097, 0x357c, 0x00000000); + nv_mthd(dev, 0x9097, 0x3580, 0x00000000); + nv_mthd(dev, 0x9097, 0x3584, 0x00000000); + nv_mthd(dev, 0x9097, 0x3588, 0x00000000); + nv_mthd(dev, 0x9097, 0x358c, 0x00000000); + nv_mthd(dev, 0x9097, 0x3590, 0x00000000); + nv_mthd(dev, 0x9097, 0x3594, 0x00000000); + nv_mthd(dev, 0x9097, 0x3598, 0x00000000); + nv_mthd(dev, 0x9097, 0x359c, 0x00000000); + nv_mthd(dev, 0x9097, 0x35a0, 0x00000000); + nv_mthd(dev, 0x9097, 0x35a4, 0x00000000); + nv_mthd(dev, 0x9097, 0x35a8, 0x00000000); + nv_mthd(dev, 0x9097, 0x35ac, 0x00000000); + nv_mthd(dev, 0x9097, 0x35b0, 0x00000000); + nv_mthd(dev, 0x9097, 0x35b4, 0x00000000); + nv_mthd(dev, 0x9097, 0x35b8, 0x00000000); + nv_mthd(dev, 0x9097, 0x35bc, 0x00000000); + nv_mthd(dev, 0x9097, 0x35c0, 0x00000000); + nv_mthd(dev, 0x9097, 0x35c4, 0x00000000); + nv_mthd(dev, 0x9097, 0x35c8, 0x00000000); + nv_mthd(dev, 0x9097, 0x35cc, 0x00000000); + nv_mthd(dev, 0x9097, 0x35d0, 0x00000000); + nv_mthd(dev, 0x9097, 0x35d4, 0x00000000); + nv_mthd(dev, 0x9097, 0x35d8, 0x00000000); + nv_mthd(dev, 0x9097, 0x35dc, 0x00000000); + nv_mthd(dev, 0x9097, 0x35e0, 0x00000000); + nv_mthd(dev, 0x9097, 0x35e4, 0x00000000); + nv_mthd(dev, 0x9097, 0x35e8, 0x00000000); + nv_mthd(dev, 0x9097, 0x35ec, 0x00000000); + nv_mthd(dev, 0x9097, 0x35f0, 0x00000000); + nv_mthd(dev, 0x9097, 0x35f4, 0x00000000); + nv_mthd(dev, 0x9097, 0x35f8, 0x00000000); + nv_mthd(dev, 0x9097, 0x35fc, 0x00000000); + nv_mthd(dev, 0x9097, 0x030c, 0x00000001); + nv_mthd(dev, 0x9097, 0x1944, 0x00000000); + nv_mthd(dev, 0x9097, 0x1514, 0x00000000); + nv_mthd(dev, 0x9097, 0x0d68, 0x0000ffff); + nv_mthd(dev, 0x9097, 0x121c, 0x0fac6881); + nv_mthd(dev, 0x9097, 0x0fac, 0x00000001); + nv_mthd(dev, 0x9097, 0x1538, 0x00000001); + nv_mthd(dev, 0x9097, 0x0fe0, 0x00000000); + nv_mthd(dev, 0x9097, 0x0fe4, 0x00000000); + nv_mthd(dev, 0x9097, 0x0fe8, 0x00000014); + nv_mthd(dev, 0x9097, 0x0fec, 0x00000040); + nv_mthd(dev, 0x9097, 0x0ff0, 0x00000000); + nv_mthd(dev, 0x9097, 0x179c, 0x00000000); + nv_mthd(dev, 0x9097, 0x1228, 0x00000400); + nv_mthd(dev, 0x9097, 0x122c, 0x00000300); + nv_mthd(dev, 0x9097, 0x1230, 0x00010001); + nv_mthd(dev, 0x9097, 0x07f8, 0x00000000); + nv_mthd(dev, 0x9097, 0x15b4, 0x00000001); + nv_mthd(dev, 0x9097, 0x15cc, 0x00000000); + nv_mthd(dev, 0x9097, 0x1534, 0x00000000); + nv_mthd(dev, 0x9097, 0x0fb0, 0x00000000); + nv_mthd(dev, 0x9097, 0x15d0, 0x00000000); + nv_mthd(dev, 0x9097, 0x153c, 0x00000000); + nv_mthd(dev, 0x9097, 0x16b4, 0x00000003); + nv_mthd(dev, 0x9097, 0x0fbc, 0x0000ffff); + nv_mthd(dev, 0x9097, 0x0fc0, 0x0000ffff); + nv_mthd(dev, 0x9097, 0x0fc4, 0x0000ffff); + nv_mthd(dev, 0x9097, 0x0fc8, 0x0000ffff); + nv_mthd(dev, 0x9097, 0x0df8, 0x00000000); + nv_mthd(dev, 0x9097, 0x0dfc, 0x00000000); + nv_mthd(dev, 0x9097, 0x1948, 0x00000000); + nv_mthd(dev, 0x9097, 0x1970, 0x00000001); + nv_mthd(dev, 0x9097, 0x161c, 0x000009f0); + nv_mthd(dev, 0x9097, 0x0dcc, 0x00000010); + nv_mthd(dev, 0x9097, 0x163c, 0x00000000); + nv_mthd(dev, 0x9097, 0x15e4, 0x00000000); + nv_mthd(dev, 0x9097, 0x1160, 0x25e00040); + nv_mthd(dev, 0x9097, 0x1164, 0x25e00040); + nv_mthd(dev, 0x9097, 0x1168, 0x25e00040); + nv_mthd(dev, 0x9097, 0x116c, 0x25e00040); + nv_mthd(dev, 0x9097, 0x1170, 0x25e00040); + nv_mthd(dev, 0x9097, 0x1174, 0x25e00040); + nv_mthd(dev, 0x9097, 0x1178, 0x25e00040); + nv_mthd(dev, 0x9097, 0x117c, 0x25e00040); + nv_mthd(dev, 0x9097, 0x1180, 0x25e00040); + nv_mthd(dev, 0x9097, 0x1184, 0x25e00040); + nv_mthd(dev, 0x9097, 0x1188, 0x25e00040); + nv_mthd(dev, 0x9097, 0x118c, 0x25e00040); + nv_mthd(dev, 0x9097, 0x1190, 0x25e00040); + nv_mthd(dev, 0x9097, 0x1194, 0x25e00040); + nv_mthd(dev, 0x9097, 0x1198, 0x25e00040); + nv_mthd(dev, 0x9097, 0x119c, 0x25e00040); + nv_mthd(dev, 0x9097, 0x11a0, 0x25e00040); + nv_mthd(dev, 0x9097, 0x11a4, 0x25e00040); + nv_mthd(dev, 0x9097, 0x11a8, 0x25e00040); + nv_mthd(dev, 0x9097, 0x11ac, 0x25e00040); + nv_mthd(dev, 0x9097, 0x11b0, 0x25e00040); + nv_mthd(dev, 0x9097, 0x11b4, 0x25e00040); + nv_mthd(dev, 0x9097, 0x11b8, 0x25e00040); + nv_mthd(dev, 0x9097, 0x11bc, 0x25e00040); + nv_mthd(dev, 0x9097, 0x11c0, 0x25e00040); + nv_mthd(dev, 0x9097, 0x11c4, 0x25e00040); + nv_mthd(dev, 0x9097, 0x11c8, 0x25e00040); + nv_mthd(dev, 0x9097, 0x11cc, 0x25e00040); + nv_mthd(dev, 0x9097, 0x11d0, 0x25e00040); + nv_mthd(dev, 0x9097, 0x11d4, 0x25e00040); + nv_mthd(dev, 0x9097, 0x11d8, 0x25e00040); + nv_mthd(dev, 0x9097, 0x11dc, 0x25e00040); + nv_mthd(dev, 0x9097, 0x1880, 0x00000000); + nv_mthd(dev, 0x9097, 0x1884, 0x00000000); + nv_mthd(dev, 0x9097, 0x1888, 0x00000000); + nv_mthd(dev, 0x9097, 0x188c, 0x00000000); + nv_mthd(dev, 0x9097, 0x1890, 0x00000000); + nv_mthd(dev, 0x9097, 0x1894, 0x00000000); + nv_mthd(dev, 0x9097, 0x1898, 0x00000000); + nv_mthd(dev, 0x9097, 0x189c, 0x00000000); + nv_mthd(dev, 0x9097, 0x18a0, 0x00000000); + nv_mthd(dev, 0x9097, 0x18a4, 0x00000000); + nv_mthd(dev, 0x9097, 0x18a8, 0x00000000); + nv_mthd(dev, 0x9097, 0x18ac, 0x00000000); + nv_mthd(dev, 0x9097, 0x18b0, 0x00000000); + nv_mthd(dev, 0x9097, 0x18b4, 0x00000000); + nv_mthd(dev, 0x9097, 0x18b8, 0x00000000); + nv_mthd(dev, 0x9097, 0x18bc, 0x00000000); + nv_mthd(dev, 0x9097, 0x18c0, 0x00000000); + nv_mthd(dev, 0x9097, 0x18c4, 0x00000000); + nv_mthd(dev, 0x9097, 0x18c8, 0x00000000); + nv_mthd(dev, 0x9097, 0x18cc, 0x00000000); + nv_mthd(dev, 0x9097, 0x18d0, 0x00000000); + nv_mthd(dev, 0x9097, 0x18d4, 0x00000000); + nv_mthd(dev, 0x9097, 0x18d8, 0x00000000); + nv_mthd(dev, 0x9097, 0x18dc, 0x00000000); + nv_mthd(dev, 0x9097, 0x18e0, 0x00000000); + nv_mthd(dev, 0x9097, 0x18e4, 0x00000000); + nv_mthd(dev, 0x9097, 0x18e8, 0x00000000); + nv_mthd(dev, 0x9097, 0x18ec, 0x00000000); + nv_mthd(dev, 0x9097, 0x18f0, 0x00000000); + nv_mthd(dev, 0x9097, 0x18f4, 0x00000000); + nv_mthd(dev, 0x9097, 0x18f8, 0x00000000); + nv_mthd(dev, 0x9097, 0x18fc, 0x00000000); + nv_mthd(dev, 0x9097, 0x0f84, 0x00000000); + nv_mthd(dev, 0x9097, 0x0f88, 0x00000000); + nv_mthd(dev, 0x9097, 0x17c8, 0x00000000); + nv_mthd(dev, 0x9097, 0x17cc, 0x00000000); + nv_mthd(dev, 0x9097, 0x17d0, 0x000000ff); + nv_mthd(dev, 0x9097, 0x17d4, 0xffffffff); + nv_mthd(dev, 0x9097, 0x17d8, 0x00000002); + nv_mthd(dev, 0x9097, 0x17dc, 0x00000000); + nv_mthd(dev, 0x9097, 0x15f4, 0x00000000); + nv_mthd(dev, 0x9097, 0x15f8, 0x00000000); + nv_mthd(dev, 0x9097, 0x1434, 0x00000000); + nv_mthd(dev, 0x9097, 0x1438, 0x00000000); + nv_mthd(dev, 0x9097, 0x0d74, 0x00000000); + nv_mthd(dev, 0x9097, 0x0dec, 0x00000001); + nv_mthd(dev, 0x9097, 0x13a4, 0x00000000); + nv_mthd(dev, 0x9097, 0x1318, 0x00000001); + nv_mthd(dev, 0x9097, 0x1644, 0x00000000); + nv_mthd(dev, 0x9097, 0x0748, 0x00000000); + nv_mthd(dev, 0x9097, 0x0de8, 0x00000000); + nv_mthd(dev, 0x9097, 0x1648, 0x00000000); + nv_mthd(dev, 0x9097, 0x12a4, 0x00000000); + nv_mthd(dev, 0x9097, 0x1120, 0x00000000); + nv_mthd(dev, 0x9097, 0x1124, 0x00000000); + nv_mthd(dev, 0x9097, 0x1128, 0x00000000); + nv_mthd(dev, 0x9097, 0x112c, 0x00000000); + nv_mthd(dev, 0x9097, 0x1118, 0x00000000); + nv_mthd(dev, 0x9097, 0x164c, 0x00000000); + nv_mthd(dev, 0x9097, 0x1658, 0x00000000); + nv_mthd(dev, 0x9097, 0x1910, 0x00000290); + nv_mthd(dev, 0x9097, 0x1518, 0x00000000); + nv_mthd(dev, 0x9097, 0x165c, 0x00000001); + nv_mthd(dev, 0x9097, 0x1520, 0x00000000); + nv_mthd(dev, 0x9097, 0x1604, 0x00000000); + nv_mthd(dev, 0x9097, 0x1570, 0x00000000); + nv_mthd(dev, 0x9097, 0x13b0, 0x3f800000); + nv_mthd(dev, 0x9097, 0x13b4, 0x3f800000); + nv_mthd(dev, 0x9097, 0x020c, 0x00000000); + nv_mthd(dev, 0x9097, 0x1670, 0x30201000); + nv_mthd(dev, 0x9097, 0x1674, 0x70605040); + nv_mthd(dev, 0x9097, 0x1678, 0xb8a89888); + nv_mthd(dev, 0x9097, 0x167c, 0xf8e8d8c8); + nv_mthd(dev, 0x9097, 0x166c, 0x00000000); + nv_mthd(dev, 0x9097, 0x1680, 0x00ffff00); + nv_mthd(dev, 0x9097, 0x12d0, 0x00000003); + nv_mthd(dev, 0x9097, 0x12d4, 0x00000002); + nv_mthd(dev, 0x9097, 0x1684, 0x00000000); + nv_mthd(dev, 0x9097, 0x1688, 0x00000000); + nv_mthd(dev, 0x9097, 0x0dac, 0x00001b02); + nv_mthd(dev, 0x9097, 0x0db0, 0x00001b02); + nv_mthd(dev, 0x9097, 0x0db4, 0x00000000); + nv_mthd(dev, 0x9097, 0x168c, 0x00000000); + nv_mthd(dev, 0x9097, 0x15bc, 0x00000000); + nv_mthd(dev, 0x9097, 0x156c, 0x00000000); + nv_mthd(dev, 0x9097, 0x187c, 0x00000000); + nv_mthd(dev, 0x9097, 0x1110, 0x00000001); + nv_mthd(dev, 0x9097, 0x0dc0, 0x00000000); + nv_mthd(dev, 0x9097, 0x0dc4, 0x00000000); + nv_mthd(dev, 0x9097, 0x0dc8, 0x00000000); + nv_mthd(dev, 0x9097, 0x1234, 0x00000000); + nv_mthd(dev, 0x9097, 0x1690, 0x00000000); + nv_mthd(dev, 0x9097, 0x12ac, 0x00000001); + nv_mthd(dev, 0x9097, 0x02c4, 0x00000000); + nv_mthd(dev, 0x9097, 0x0790, 0x00000000); + nv_mthd(dev, 0x9097, 0x0794, 0x00000000); + nv_mthd(dev, 0x9097, 0x0798, 0x00000000); + nv_mthd(dev, 0x9097, 0x079c, 0x00000000); + nv_mthd(dev, 0x9097, 0x07a0, 0x00000000); + nv_mthd(dev, 0x9097, 0x077c, 0x00000000); + nv_mthd(dev, 0x9097, 0x1000, 0x00000010); + nv_mthd(dev, 0x9097, 0x10fc, 0x00000000); + nv_mthd(dev, 0x9097, 0x1290, 0x00000000); + nv_mthd(dev, 0x9097, 0x0218, 0x00000010); + nv_mthd(dev, 0x9097, 0x12d8, 0x00000000); + nv_mthd(dev, 0x9097, 0x12dc, 0x00000010); + nv_mthd(dev, 0x9097, 0x0d94, 0x00000001); + nv_mthd(dev, 0x9097, 0x155c, 0x00000000); + nv_mthd(dev, 0x9097, 0x1560, 0x00000000); + nv_mthd(dev, 0x9097, 0x1564, 0x00001fff); + nv_mthd(dev, 0x9097, 0x1574, 0x00000000); + nv_mthd(dev, 0x9097, 0x1578, 0x00000000); + nv_mthd(dev, 0x9097, 0x157c, 0x003fffff); + nv_mthd(dev, 0x9097, 0x1354, 0x00000000); + nv_mthd(dev, 0x9097, 0x1664, 0x00000000); + nv_mthd(dev, 0x9097, 0x1610, 0x00000012); + nv_mthd(dev, 0x9097, 0x1608, 0x00000000); + nv_mthd(dev, 0x9097, 0x160c, 0x00000000); + nv_mthd(dev, 0x9097, 0x162c, 0x00000003); + nv_mthd(dev, 0x9097, 0x0210, 0x00000000); + nv_mthd(dev, 0x9097, 0x0320, 0x00000000); + nv_mthd(dev, 0x9097, 0x0324, 0x3f800000); + nv_mthd(dev, 0x9097, 0x0328, 0x3f800000); + nv_mthd(dev, 0x9097, 0x032c, 0x3f800000); + nv_mthd(dev, 0x9097, 0x0330, 0x3f800000); + nv_mthd(dev, 0x9097, 0x0334, 0x3f800000); + nv_mthd(dev, 0x9097, 0x0338, 0x3f800000); + nv_mthd(dev, 0x9097, 0x0750, 0x00000000); + nv_mthd(dev, 0x9097, 0x0760, 0x39291909); + nv_mthd(dev, 0x9097, 0x0764, 0x79695949); + nv_mthd(dev, 0x9097, 0x0768, 0xb9a99989); + nv_mthd(dev, 0x9097, 0x076c, 0xf9e9d9c9); + nv_mthd(dev, 0x9097, 0x0770, 0x30201000); + nv_mthd(dev, 0x9097, 0x0774, 0x70605040); + nv_mthd(dev, 0x9097, 0x0778, 0x00009080); + nv_mthd(dev, 0x9097, 0x0780, 0x39291909); + nv_mthd(dev, 0x9097, 0x0784, 0x79695949); + nv_mthd(dev, 0x9097, 0x0788, 0xb9a99989); + nv_mthd(dev, 0x9097, 0x078c, 0xf9e9d9c9); + nv_mthd(dev, 0x9097, 0x07d0, 0x30201000); + nv_mthd(dev, 0x9097, 0x07d4, 0x70605040); + nv_mthd(dev, 0x9097, 0x07d8, 0x00009080); + nv_mthd(dev, 0x9097, 0x037c, 0x00000001); + nv_mthd(dev, 0x9097, 0x0740, 0x00000000); + nv_mthd(dev, 0x9097, 0x0744, 0x00000000); + nv_mthd(dev, 0x9097, 0x2600, 0x00000000); + nv_mthd(dev, 0x9097, 0x1918, 0x00000000); + nv_mthd(dev, 0x9097, 0x191c, 0x00000900); + nv_mthd(dev, 0x9097, 0x1920, 0x00000405); + nv_mthd(dev, 0x9097, 0x1308, 0x00000001); + nv_mthd(dev, 0x9097, 0x1924, 0x00000000); + nv_mthd(dev, 0x9097, 0x13ac, 0x00000000); + nv_mthd(dev, 0x9097, 0x192c, 0x00000001); + nv_mthd(dev, 0x9097, 0x193c, 0x00002c1c); + nv_mthd(dev, 0x9097, 0x0d7c, 0x00000000); + nv_mthd(dev, 0x9097, 0x0f8c, 0x00000000); + nv_mthd(dev, 0x9097, 0x02c0, 0x00000001); + nv_mthd(dev, 0x9097, 0x1510, 0x00000000); + nv_mthd(dev, 0x9097, 0x1940, 0x00000000); + nv_mthd(dev, 0x9097, 0x0ff4, 0x00000000); + nv_mthd(dev, 0x9097, 0x0ff8, 0x00000000); + nv_mthd(dev, 0x9097, 0x194c, 0x00000000); + nv_mthd(dev, 0x9097, 0x1950, 0x00000000); + nv_mthd(dev, 0x9097, 0x1968, 0x00000000); + nv_mthd(dev, 0x9097, 0x1590, 0x0000003f); + nv_mthd(dev, 0x9097, 0x07e8, 0x00000000); + nv_mthd(dev, 0x9097, 0x07ec, 0x00000000); + nv_mthd(dev, 0x9097, 0x07f0, 0x00000000); + nv_mthd(dev, 0x9097, 0x07f4, 0x00000000); + nv_mthd(dev, 0x9097, 0x196c, 0x00000011); + nv_mthd(dev, 0x9097, 0x197c, 0x00000000); + nv_mthd(dev, 0x9097, 0x0fcc, 0x00000000); + nv_mthd(dev, 0x9097, 0x0fd0, 0x00000000); + nv_mthd(dev, 0x9097, 0x02d8, 0x00000040); + nv_mthd(dev, 0x9097, 0x1980, 0x00000080); + nv_mthd(dev, 0x9097, 0x1504, 0x00000080); + nv_mthd(dev, 0x9097, 0x1984, 0x00000000); + nv_mthd(dev, 0x9097, 0x0300, 0x00000001); + nv_mthd(dev, 0x9097, 0x13a8, 0x00000000); + nv_mthd(dev, 0x9097, 0x12ec, 0x00000000); + nv_mthd(dev, 0x9097, 0x1310, 0x00000000); + nv_mthd(dev, 0x9097, 0x1314, 0x00000001); + nv_mthd(dev, 0x9097, 0x1380, 0x00000000); + nv_mthd(dev, 0x9097, 0x1384, 0x00000001); + nv_mthd(dev, 0x9097, 0x1388, 0x00000001); + nv_mthd(dev, 0x9097, 0x138c, 0x00000001); + nv_mthd(dev, 0x9097, 0x1390, 0x00000001); + nv_mthd(dev, 0x9097, 0x1394, 0x00000000); + nv_mthd(dev, 0x9097, 0x139c, 0x00000000); + nv_mthd(dev, 0x9097, 0x1398, 0x00000000); + nv_mthd(dev, 0x9097, 0x1594, 0x00000000); + nv_mthd(dev, 0x9097, 0x1598, 0x00000001); + nv_mthd(dev, 0x9097, 0x159c, 0x00000001); + nv_mthd(dev, 0x9097, 0x15a0, 0x00000001); + nv_mthd(dev, 0x9097, 0x15a4, 0x00000001); + nv_mthd(dev, 0x9097, 0x0f54, 0x00000000); + nv_mthd(dev, 0x9097, 0x0f58, 0x00000000); + nv_mthd(dev, 0x9097, 0x0f5c, 0x00000000); + nv_mthd(dev, 0x9097, 0x19bc, 0x00000000); + nv_mthd(dev, 0x9097, 0x0f9c, 0x00000000); + nv_mthd(dev, 0x9097, 0x0fa0, 0x00000000); + nv_mthd(dev, 0x9097, 0x12cc, 0x00000000); + nv_mthd(dev, 0x9097, 0x12e8, 0x00000000); + nv_mthd(dev, 0x9097, 0x130c, 0x00000001); + nv_mthd(dev, 0x9097, 0x1360, 0x00000000); + nv_mthd(dev, 0x9097, 0x1364, 0x00000000); + nv_mthd(dev, 0x9097, 0x1368, 0x00000000); + nv_mthd(dev, 0x9097, 0x136c, 0x00000000); + nv_mthd(dev, 0x9097, 0x1370, 0x00000000); + nv_mthd(dev, 0x9097, 0x1374, 0x00000000); + nv_mthd(dev, 0x9097, 0x1378, 0x00000000); + nv_mthd(dev, 0x9097, 0x137c, 0x00000000); + nv_mthd(dev, 0x9097, 0x133c, 0x00000001); + nv_mthd(dev, 0x9097, 0x1340, 0x00000001); + nv_mthd(dev, 0x9097, 0x1344, 0x00000002); + nv_mthd(dev, 0x9097, 0x1348, 0x00000001); + nv_mthd(dev, 0x9097, 0x134c, 0x00000001); + nv_mthd(dev, 0x9097, 0x1350, 0x00000002); + nv_mthd(dev, 0x9097, 0x1358, 0x00000001); + nv_mthd(dev, 0x9097, 0x12e4, 0x00000000); + nv_mthd(dev, 0x9097, 0x131c, 0x00000000); + nv_mthd(dev, 0x9097, 0x1320, 0x00000000); + nv_mthd(dev, 0x9097, 0x1324, 0x00000000); + nv_mthd(dev, 0x9097, 0x1328, 0x00000000); + nv_mthd(dev, 0x9097, 0x19c0, 0x00000000); + nv_mthd(dev, 0x9097, 0x1140, 0x00000000); + nv_mthd(dev, 0x9097, 0x19c4, 0x00000000); + nv_mthd(dev, 0x9097, 0x19c8, 0x00001500); + nv_mthd(dev, 0x9097, 0x135c, 0x00000000); + nv_mthd(dev, 0x9097, 0x0f90, 0x00000000); + nv_mthd(dev, 0x9097, 0x19e0, 0x00000001); + nv_mthd(dev, 0x9097, 0x19e4, 0x00000001); + nv_mthd(dev, 0x9097, 0x19e8, 0x00000001); + nv_mthd(dev, 0x9097, 0x19ec, 0x00000001); + nv_mthd(dev, 0x9097, 0x19f0, 0x00000001); + nv_mthd(dev, 0x9097, 0x19f4, 0x00000001); + nv_mthd(dev, 0x9097, 0x19f8, 0x00000001); + nv_mthd(dev, 0x9097, 0x19fc, 0x00000001); + nv_mthd(dev, 0x9097, 0x19cc, 0x00000001); + nv_mthd(dev, 0x9097, 0x15b8, 0x00000000); + nv_mthd(dev, 0x9097, 0x1a00, 0x00001111); + nv_mthd(dev, 0x9097, 0x1a04, 0x00000000); + nv_mthd(dev, 0x9097, 0x1a08, 0x00000000); + nv_mthd(dev, 0x9097, 0x1a0c, 0x00000000); + nv_mthd(dev, 0x9097, 0x1a10, 0x00000000); + nv_mthd(dev, 0x9097, 0x1a14, 0x00000000); + nv_mthd(dev, 0x9097, 0x1a18, 0x00000000); + nv_mthd(dev, 0x9097, 0x1a1c, 0x00000000); + nv_mthd(dev, 0x9097, 0x0d6c, 0xffff0000); + nv_mthd(dev, 0x9097, 0x0d70, 0xffff0000); + nv_mthd(dev, 0x9097, 0x10f8, 0x00001010); + nv_mthd(dev, 0x9097, 0x0d80, 0x00000000); + nv_mthd(dev, 0x9097, 0x0d84, 0x00000000); + nv_mthd(dev, 0x9097, 0x0d88, 0x00000000); + nv_mthd(dev, 0x9097, 0x0d8c, 0x00000000); + nv_mthd(dev, 0x9097, 0x0d90, 0x00000000); + nv_mthd(dev, 0x9097, 0x0da0, 0x00000000); + nv_mthd(dev, 0x9097, 0x1508, 0x80000000); + nv_mthd(dev, 0x9097, 0x150c, 0x40000000); + nv_mthd(dev, 0x9097, 0x1668, 0x00000000); + nv_mthd(dev, 0x9097, 0x0318, 0x00000008); + nv_mthd(dev, 0x9097, 0x031c, 0x00000008); + nv_mthd(dev, 0x9097, 0x0d9c, 0x00000001); + nv_mthd(dev, 0x9097, 0x07dc, 0x00000000); + nv_mthd(dev, 0x9097, 0x074c, 0x00000055); + nv_mthd(dev, 0x9097, 0x1420, 0x00000003); + nv_mthd(dev, 0x9097, 0x17bc, 0x00000000); + nv_mthd(dev, 0x9097, 0x17c0, 0x00000000); + nv_mthd(dev, 0x9097, 0x17c4, 0x00000001); + nv_mthd(dev, 0x9097, 0x1008, 0x00000008); + nv_mthd(dev, 0x9097, 0x100c, 0x00000040); + nv_mthd(dev, 0x9097, 0x1010, 0x0000012c); + nv_mthd(dev, 0x9097, 0x0d60, 0x00000040); + nv_mthd(dev, 0x9097, 0x075c, 0x00000003); + nv_mthd(dev, 0x9097, 0x1018, 0x00000020); + nv_mthd(dev, 0x9097, 0x101c, 0x00000001); + nv_mthd(dev, 0x9097, 0x1020, 0x00000020); + nv_mthd(dev, 0x9097, 0x1024, 0x00000001); + nv_mthd(dev, 0x9097, 0x1444, 0x00000000); + nv_mthd(dev, 0x9097, 0x1448, 0x00000000); + nv_mthd(dev, 0x9097, 0x144c, 0x00000000); + nv_mthd(dev, 0x9097, 0x0360, 0x20164010); + nv_mthd(dev, 0x9097, 0x0364, 0x00000020); + nv_mthd(dev, 0x9097, 0x0368, 0x00000000); + nv_mthd(dev, 0x9097, 0x0de4, 0x00000000); + nv_mthd(dev, 0x9097, 0x0204, 0x00000006); + nv_mthd(dev, 0x9097, 0x0208, 0x00000000); + nv_mthd(dev, 0x9097, 0x02cc, 0x003fffff); + nv_mthd(dev, 0x9097, 0x02d0, 0x00000c48); + nv_mthd(dev, 0x9097, 0x1220, 0x00000005); + nv_mthd(dev, 0x9097, 0x0fdc, 0x00000000); + nv_mthd(dev, 0x9097, 0x0f98, 0x00300008); + nv_mthd(dev, 0x9097, 0x1284, 0x04000080); + nv_mthd(dev, 0x9097, 0x1450, 0x00300008); + nv_mthd(dev, 0x9097, 0x1454, 0x04000080); + nv_mthd(dev, 0x9097, 0x0214, 0x00000000); + /* in trace, right after 0x90c0, not here */ + nv_mthd(dev, 0x9097, 0x3410, 0x80002006); +} + +static void +nvc0_grctx_generate_902d(struct drm_device *dev) +{ + nv_mthd(dev, 0x902d, 0x0200, 0x000000cf); + nv_mthd(dev, 0x902d, 0x0204, 0x00000001); + nv_mthd(dev, 0x902d, 0x0208, 0x00000020); + nv_mthd(dev, 0x902d, 0x020c, 0x00000001); + nv_mthd(dev, 0x902d, 0x0210, 0x00000000); + nv_mthd(dev, 0x902d, 0x0214, 0x00000080); + nv_mthd(dev, 0x902d, 0x0218, 0x00000100); + nv_mthd(dev, 0x902d, 0x021c, 0x00000100); + nv_mthd(dev, 0x902d, 0x0220, 0x00000000); + nv_mthd(dev, 0x902d, 0x0224, 0x00000000); + nv_mthd(dev, 0x902d, 0x0230, 0x000000cf); + nv_mthd(dev, 0x902d, 0x0234, 0x00000001); + nv_mthd(dev, 0x902d, 0x0238, 0x00000020); + nv_mthd(dev, 0x902d, 0x023c, 0x00000001); + nv_mthd(dev, 0x902d, 0x0244, 0x00000080); + nv_mthd(dev, 0x902d, 0x0248, 0x00000100); + nv_mthd(dev, 0x902d, 0x024c, 0x00000100); +} + +static void +nvc0_grctx_generate_9039(struct drm_device *dev) +{ + nv_mthd(dev, 0x9039, 0x030c, 0x00000000); + nv_mthd(dev, 0x9039, 0x0310, 0x00000000); + nv_mthd(dev, 0x9039, 0x0314, 0x00000000); + nv_mthd(dev, 0x9039, 0x0320, 0x00000000); + nv_mthd(dev, 0x9039, 0x0238, 0x00000000); + nv_mthd(dev, 0x9039, 0x023c, 0x00000000); + nv_mthd(dev, 0x9039, 0x0318, 0x00000000); + nv_mthd(dev, 0x9039, 0x031c, 0x00000000); +} + +static void +nvc0_grctx_generate_90c0(struct drm_device *dev) +{ + nv_mthd(dev, 0x90c0, 0x270c, 0x00000000); + nv_mthd(dev, 0x90c0, 0x272c, 0x00000000); + nv_mthd(dev, 0x90c0, 0x274c, 0x00000000); + nv_mthd(dev, 0x90c0, 0x276c, 0x00000000); + nv_mthd(dev, 0x90c0, 0x278c, 0x00000000); + nv_mthd(dev, 0x90c0, 0x27ac, 0x00000000); + nv_mthd(dev, 0x90c0, 0x27cc, 0x00000000); + nv_mthd(dev, 0x90c0, 0x27ec, 0x00000000); + nv_mthd(dev, 0x90c0, 0x030c, 0x00000001); + nv_mthd(dev, 0x90c0, 0x1944, 0x00000000); + nv_mthd(dev, 0x90c0, 0x0758, 0x00000100); + nv_mthd(dev, 0x90c0, 0x02c4, 0x00000000); + nv_mthd(dev, 0x90c0, 0x0790, 0x00000000); + nv_mthd(dev, 0x90c0, 0x0794, 0x00000000); + nv_mthd(dev, 0x90c0, 0x0798, 0x00000000); + nv_mthd(dev, 0x90c0, 0x079c, 0x00000000); + nv_mthd(dev, 0x90c0, 0x07a0, 0x00000000); + nv_mthd(dev, 0x90c0, 0x077c, 0x00000000); + nv_mthd(dev, 0x90c0, 0x0204, 0x00000000); + nv_mthd(dev, 0x90c0, 0x0208, 0x00000000); + nv_mthd(dev, 0x90c0, 0x020c, 0x00000000); + nv_mthd(dev, 0x90c0, 0x0214, 0x00000000); + nv_mthd(dev, 0x90c0, 0x024c, 0x00000000); + nv_mthd(dev, 0x90c0, 0x0d94, 0x00000001); + nv_mthd(dev, 0x90c0, 0x1608, 0x00000000); + nv_mthd(dev, 0x90c0, 0x160c, 0x00000000); + nv_mthd(dev, 0x90c0, 0x1664, 0x00000000); +} + +static void +nvc0_grctx_generate_dispatch(struct drm_device *dev) +{ + int i; + + nv_wr32(dev, 0x404004, 0x00000000); + nv_wr32(dev, 0x404008, 0x00000000); + nv_wr32(dev, 0x40400c, 0x00000000); + nv_wr32(dev, 0x404010, 0x00000000); + nv_wr32(dev, 0x404014, 0x00000000); + nv_wr32(dev, 0x404018, 0x00000000); + nv_wr32(dev, 0x40401c, 0x00000000); + nv_wr32(dev, 0x404020, 0x00000000); + nv_wr32(dev, 0x404024, 0x00000000); + nv_wr32(dev, 0x404028, 0x00000000); + nv_wr32(dev, 0x40402c, 0x00000000); + nv_wr32(dev, 0x404044, 0x00000000); + nv_wr32(dev, 0x404094, 0x00000000); + nv_wr32(dev, 0x404098, 0x00000000); + nv_wr32(dev, 0x40409c, 0x00000000); + nv_wr32(dev, 0x4040a0, 0x00000000); + nv_wr32(dev, 0x4040a4, 0x00000000); + nv_wr32(dev, 0x4040a8, 0x00000000); + nv_wr32(dev, 0x4040ac, 0x00000000); + nv_wr32(dev, 0x4040b0, 0x00000000); + nv_wr32(dev, 0x4040b4, 0x00000000); + nv_wr32(dev, 0x4040b8, 0x00000000); + nv_wr32(dev, 0x4040bc, 0x00000000); + nv_wr32(dev, 0x4040c0, 0x00000000); + nv_wr32(dev, 0x4040c4, 0x00000000); + nv_wr32(dev, 0x4040c8, 0xf0000087); + nv_wr32(dev, 0x4040d4, 0x00000000); + nv_wr32(dev, 0x4040d8, 0x00000000); + nv_wr32(dev, 0x4040dc, 0x00000000); + nv_wr32(dev, 0x4040e0, 0x00000000); + nv_wr32(dev, 0x4040e4, 0x00000000); + nv_wr32(dev, 0x4040e8, 0x00001000); + nv_wr32(dev, 0x4040f8, 0x00000000); + nv_wr32(dev, 0x404130, 0x00000000); + nv_wr32(dev, 0x404134, 0x00000000); + nv_wr32(dev, 0x404138, 0x20000040); + nv_wr32(dev, 0x404150, 0x0000002e); + nv_wr32(dev, 0x404154, 0x00000400); + nv_wr32(dev, 0x404158, 0x00000200); + nv_wr32(dev, 0x404164, 0x00000055); + nv_wr32(dev, 0x404168, 0x00000000); + nv_wr32(dev, 0x404174, 0x00000000); + nv_wr32(dev, 0x404178, 0x00000000); + nv_wr32(dev, 0x40417c, 0x00000000); + for (i = 0; i < 8; i++) + nv_wr32(dev, 0x404200 + (i * 4), 0x00000000); /* subc */ +} + +static void +nvc0_grctx_generate_macro(struct drm_device *dev) +{ + nv_wr32(dev, 0x404404, 0x00000000); + nv_wr32(dev, 0x404408, 0x00000000); + nv_wr32(dev, 0x40440c, 0x00000000); + nv_wr32(dev, 0x404410, 0x00000000); + nv_wr32(dev, 0x404414, 0x00000000); + nv_wr32(dev, 0x404418, 0x00000000); + nv_wr32(dev, 0x40441c, 0x00000000); + nv_wr32(dev, 0x404420, 0x00000000); + nv_wr32(dev, 0x404424, 0x00000000); + nv_wr32(dev, 0x404428, 0x00000000); + nv_wr32(dev, 0x40442c, 0x00000000); + nv_wr32(dev, 0x404430, 0x00000000); + nv_wr32(dev, 0x404434, 0x00000000); + nv_wr32(dev, 0x404438, 0x00000000); + nv_wr32(dev, 0x404460, 0x00000000); + nv_wr32(dev, 0x404464, 0x00000000); + nv_wr32(dev, 0x404468, 0x00ffffff); + nv_wr32(dev, 0x40446c, 0x00000000); + nv_wr32(dev, 0x404480, 0x00000001); + nv_wr32(dev, 0x404498, 0x00000001); +} + +static void +nvc0_grctx_generate_m2mf(struct drm_device *dev) +{ + nv_wr32(dev, 0x404604, 0x00000015); + nv_wr32(dev, 0x404608, 0x00000000); + nv_wr32(dev, 0x40460c, 0x00002e00); + nv_wr32(dev, 0x404610, 0x00000100); + nv_wr32(dev, 0x404618, 0x00000000); + nv_wr32(dev, 0x40461c, 0x00000000); + nv_wr32(dev, 0x404620, 0x00000000); + nv_wr32(dev, 0x404624, 0x00000000); + nv_wr32(dev, 0x404628, 0x00000000); + nv_wr32(dev, 0x40462c, 0x00000000); + nv_wr32(dev, 0x404630, 0x00000000); + nv_wr32(dev, 0x404634, 0x00000000); + nv_wr32(dev, 0x404638, 0x00000004); + nv_wr32(dev, 0x40463c, 0x00000000); + nv_wr32(dev, 0x404640, 0x00000000); + nv_wr32(dev, 0x404644, 0x00000000); + nv_wr32(dev, 0x404648, 0x00000000); + nv_wr32(dev, 0x40464c, 0x00000000); + nv_wr32(dev, 0x404650, 0x00000000); + nv_wr32(dev, 0x404654, 0x00000000); + nv_wr32(dev, 0x404658, 0x00000000); + nv_wr32(dev, 0x40465c, 0x007f0100); + nv_wr32(dev, 0x404660, 0x00000000); + nv_wr32(dev, 0x404664, 0x00000000); + nv_wr32(dev, 0x404668, 0x00000000); + nv_wr32(dev, 0x40466c, 0x00000000); + nv_wr32(dev, 0x404670, 0x00000000); + nv_wr32(dev, 0x404674, 0x00000000); + nv_wr32(dev, 0x404678, 0x00000000); + nv_wr32(dev, 0x40467c, 0x00000002); + nv_wr32(dev, 0x404680, 0x00000000); + nv_wr32(dev, 0x404684, 0x00000000); + nv_wr32(dev, 0x404688, 0x00000000); + nv_wr32(dev, 0x40468c, 0x00000000); + nv_wr32(dev, 0x404690, 0x00000000); + nv_wr32(dev, 0x404694, 0x00000000); + nv_wr32(dev, 0x404698, 0x00000000); + nv_wr32(dev, 0x40469c, 0x00000000); + nv_wr32(dev, 0x4046a0, 0x007f0080); + nv_wr32(dev, 0x4046a4, 0x00000000); + nv_wr32(dev, 0x4046a8, 0x00000000); + nv_wr32(dev, 0x4046ac, 0x00000000); + nv_wr32(dev, 0x4046b0, 0x00000000); + nv_wr32(dev, 0x4046b4, 0x00000000); + nv_wr32(dev, 0x4046b8, 0x00000000); + nv_wr32(dev, 0x4046bc, 0x00000000); + nv_wr32(dev, 0x4046c0, 0x00000000); + nv_wr32(dev, 0x4046c4, 0x00000000); + nv_wr32(dev, 0x4046c8, 0x00000000); + nv_wr32(dev, 0x4046cc, 0x00000000); + nv_wr32(dev, 0x4046d0, 0x00000000); + nv_wr32(dev, 0x4046d4, 0x00000000); + nv_wr32(dev, 0x4046d8, 0x00000000); + nv_wr32(dev, 0x4046dc, 0x00000000); + nv_wr32(dev, 0x4046e0, 0x00000000); + nv_wr32(dev, 0x4046e4, 0x00000000); + nv_wr32(dev, 0x4046e8, 0x00000000); + nv_wr32(dev, 0x4046f0, 0x00000000); + nv_wr32(dev, 0x4046f4, 0x00000000); +} + +static void +nvc0_grctx_generate_unk47xx(struct drm_device *dev) +{ + nv_wr32(dev, 0x404700, 0x00000000); + nv_wr32(dev, 0x404704, 0x00000000); + nv_wr32(dev, 0x404708, 0x00000000); + nv_wr32(dev, 0x40470c, 0x00000000); + nv_wr32(dev, 0x404710, 0x00000000); + nv_wr32(dev, 0x404714, 0x00000000); + nv_wr32(dev, 0x404718, 0x00000000); + nv_wr32(dev, 0x40471c, 0x00000000); + nv_wr32(dev, 0x404720, 0x00000000); + nv_wr32(dev, 0x404724, 0x00000000); + nv_wr32(dev, 0x404728, 0x00000000); + nv_wr32(dev, 0x40472c, 0x00000000); + nv_wr32(dev, 0x404730, 0x00000000); + nv_wr32(dev, 0x404734, 0x00000100); + nv_wr32(dev, 0x404738, 0x00000000); + nv_wr32(dev, 0x40473c, 0x00000000); + nv_wr32(dev, 0x404740, 0x00000000); + nv_wr32(dev, 0x404744, 0x00000000); + nv_wr32(dev, 0x404748, 0x00000000); + nv_wr32(dev, 0x40474c, 0x00000000); + nv_wr32(dev, 0x404750, 0x00000000); + nv_wr32(dev, 0x404754, 0x00000000); +} + +static void +nvc0_grctx_generate_unk58xx(struct drm_device *dev) +{ + nv_wr32(dev, 0x405800, 0x078000bf); + nv_wr32(dev, 0x405830, 0x02180000); + nv_wr32(dev, 0x405834, 0x00000000); + nv_wr32(dev, 0x405838, 0x00000000); + nv_wr32(dev, 0x405854, 0x00000000); + nv_wr32(dev, 0x405870, 0x00000001); + nv_wr32(dev, 0x405874, 0x00000001); + nv_wr32(dev, 0x405878, 0x00000001); + nv_wr32(dev, 0x40587c, 0x00000001); + nv_wr32(dev, 0x405a00, 0x00000000); + nv_wr32(dev, 0x405a04, 0x00000000); + nv_wr32(dev, 0x405a18, 0x00000000); +} + +static void +nvc0_grctx_generate_unk60xx(struct drm_device *dev) +{ + nv_wr32(dev, 0x406020, 0x000103c1); + nv_wr32(dev, 0x406028, 0x00000001); + nv_wr32(dev, 0x40602c, 0x00000001); + nv_wr32(dev, 0x406030, 0x00000001); + nv_wr32(dev, 0x406034, 0x00000001); +} + +static void +nvc0_grctx_generate_unk64xx(struct drm_device *dev) +{ + nv_wr32(dev, 0x4064a8, 0x00000000); + nv_wr32(dev, 0x4064ac, 0x00003fff); + nv_wr32(dev, 0x4064b4, 0x00000000); + nv_wr32(dev, 0x4064b8, 0x00000000); +} + +static void +nvc0_grctx_generate_unk78xx(struct drm_device *dev) +{ + nv_wr32(dev, 0x407804, 0x00000023); + nv_wr32(dev, 0x40780c, 0x0a418820); + nv_wr32(dev, 0x407810, 0x062080e6); + nv_wr32(dev, 0x407814, 0x020398a4); + nv_wr32(dev, 0x407818, 0x0e629062); + nv_wr32(dev, 0x40781c, 0x0a418820); + nv_wr32(dev, 0x407820, 0x000000e6); + nv_wr32(dev, 0x4078bc, 0x00000103); +} + +static void +nvc0_grctx_generate_unk80xx(struct drm_device *dev) +{ + nv_wr32(dev, 0x408000, 0x00000000); + nv_wr32(dev, 0x408004, 0x00000000); + nv_wr32(dev, 0x408008, 0x00000018); + nv_wr32(dev, 0x40800c, 0x00000000); + nv_wr32(dev, 0x408010, 0x00000000); + nv_wr32(dev, 0x408014, 0x00000069); + nv_wr32(dev, 0x408018, 0xe100e100); + nv_wr32(dev, 0x408064, 0x00000000); +} + +static void +nvc0_grctx_generate_rop(struct drm_device *dev) +{ + struct drm_nouveau_private *dev_priv = dev->dev_private; + + // ROPC_BROADCAST + nv_wr32(dev, 0x408800, 0x02802a3c); + nv_wr32(dev, 0x408804, 0x00000040); + nv_wr32(dev, 0x408808, 0x0003e00d); + switch (dev_priv->chipset) { + case 0xc0: + nv_wr32(dev, 0x408900, 0x0080b801); + break; + case 0xc3: + case 0xc4: + nv_wr32(dev, 0x408900, 0x3080b801); + break; + } + nv_wr32(dev, 0x408904, 0x02000001); + nv_wr32(dev, 0x408908, 0x00c80929); + nv_wr32(dev, 0x40890c, 0x00000000); + nv_wr32(dev, 0x408980, 0x0000011d); +} + +static void +nvc0_grctx_generate_gpc(struct drm_device *dev) +{ + int i; + + // GPC_BROADCAST + nv_wr32(dev, 0x418380, 0x00000016); + nv_wr32(dev, 0x418400, 0x38004e00); + nv_wr32(dev, 0x418404, 0x71e0ffff); + nv_wr32(dev, 0x418408, 0x00000000); + nv_wr32(dev, 0x41840c, 0x00001008); + nv_wr32(dev, 0x418410, 0x0fff0fff); + nv_wr32(dev, 0x418414, 0x00200fff); + nv_wr32(dev, 0x418450, 0x00000000); + nv_wr32(dev, 0x418454, 0x00000000); + nv_wr32(dev, 0x418458, 0x00000000); + nv_wr32(dev, 0x41845c, 0x00000000); + nv_wr32(dev, 0x418460, 0x00000000); + nv_wr32(dev, 0x418464, 0x00000000); + nv_wr32(dev, 0x418468, 0x00000001); + nv_wr32(dev, 0x41846c, 0x00000000); + nv_wr32(dev, 0x418470, 0x00000000); + nv_wr32(dev, 0x418600, 0x0000001f); + nv_wr32(dev, 0x418684, 0x0000000f); + nv_wr32(dev, 0x418700, 0x00000002); + nv_wr32(dev, 0x418704, 0x00000080); + nv_wr32(dev, 0x418708, 0x00000000); + nv_wr32(dev, 0x41870c, 0x07c80000); + nv_wr32(dev, 0x418710, 0x00000000); + nv_wr32(dev, 0x418800, 0x0006860a); + nv_wr32(dev, 0x418808, 0x00000000); + nv_wr32(dev, 0x41880c, 0x00000000); + nv_wr32(dev, 0x418810, 0x00000000); + nv_wr32(dev, 0x418828, 0x00008442); + nv_wr32(dev, 0x418830, 0x00000001); + nv_wr32(dev, 0x4188d8, 0x00000008); + nv_wr32(dev, 0x4188e0, 0x01000000); + nv_wr32(dev, 0x4188e8, 0x00000000); + nv_wr32(dev, 0x4188ec, 0x00000000); + nv_wr32(dev, 0x4188f0, 0x00000000); + nv_wr32(dev, 0x4188f4, 0x00000000); + nv_wr32(dev, 0x4188f8, 0x00000000); + nv_wr32(dev, 0x4188fc, 0x00100000); + nv_wr32(dev, 0x41891c, 0x00ff00ff); + nv_wr32(dev, 0x418924, 0x00000000); + nv_wr32(dev, 0x418928, 0x00ffff00); + nv_wr32(dev, 0x41892c, 0x0000ff00); + for (i = 0; i < 8; i++) { + nv_wr32(dev, 0x418a00 + (i * 0x20), 0x00000000); + nv_wr32(dev, 0x418a04 + (i * 0x20), 0x00000000); + nv_wr32(dev, 0x418a08 + (i * 0x20), 0x00000000); + nv_wr32(dev, 0x418a0c + (i * 0x20), 0x00010000); + nv_wr32(dev, 0x418a10 + (i * 0x20), 0x00000000); + nv_wr32(dev, 0x418a14 + (i * 0x20), 0x00000000); + nv_wr32(dev, 0x418a18 + (i * 0x20), 0x00000000); + } + nv_wr32(dev, 0x418b00, 0x00000000); + nv_wr32(dev, 0x418b08, 0x0a418820); + nv_wr32(dev, 0x418b0c, 0x062080e6); + nv_wr32(dev, 0x418b10, 0x020398a4); + nv_wr32(dev, 0x418b14, 0x0e629062); + nv_wr32(dev, 0x418b18, 0x0a418820); + nv_wr32(dev, 0x418b1c, 0x000000e6); + nv_wr32(dev, 0x418bb8, 0x00000103); + nv_wr32(dev, 0x418c08, 0x00000001); + nv_wr32(dev, 0x418c10, 0x00000000); + nv_wr32(dev, 0x418c14, 0x00000000); + nv_wr32(dev, 0x418c18, 0x00000000); + nv_wr32(dev, 0x418c1c, 0x00000000); + nv_wr32(dev, 0x418c20, 0x00000000); + nv_wr32(dev, 0x418c24, 0x00000000); + nv_wr32(dev, 0x418c28, 0x00000000); + nv_wr32(dev, 0x418c2c, 0x00000000); + nv_wr32(dev, 0x418c80, 0x20200004); + nv_wr32(dev, 0x418c8c, 0x00000001); + nv_wr32(dev, 0x419000, 0x00000780); + nv_wr32(dev, 0x419004, 0x00000000); + nv_wr32(dev, 0x419008, 0x00000000); + nv_wr32(dev, 0x419014, 0x00000004); +} + +static void +nvc0_grctx_generate_tp(struct drm_device *dev) +{ + struct drm_nouveau_private *dev_priv = dev->dev_private; + + // GPC_BROADCAST.TP_BROADCAST + nv_wr32(dev, 0x419848, 0x00000000); + nv_wr32(dev, 0x419864, 0x0000012a); + nv_wr32(dev, 0x419888, 0x00000000); + nv_wr32(dev, 0x419a00, 0x000001f0); + nv_wr32(dev, 0x419a04, 0x00000001); + nv_wr32(dev, 0x419a08, 0x00000023); + nv_wr32(dev, 0x419a0c, 0x00020000); + nv_wr32(dev, 0x419a10, 0x00000000); + nv_wr32(dev, 0x419a14, 0x00000200); + nv_wr32(dev, 0x419a1c, 0x00000000); + nv_wr32(dev, 0x419a20, 0x00000800); + if (dev_priv->chipset != 0xc0) + nv_wr32(dev, 0x00419ac4, 0x0007f440); // 0xc3 + nv_wr32(dev, 0x419b00, 0x0a418820); + nv_wr32(dev, 0x419b04, 0x062080e6); + nv_wr32(dev, 0x419b08, 0x020398a4); + nv_wr32(dev, 0x419b0c, 0x0e629062); + nv_wr32(dev, 0x419b10, 0x0a418820); + nv_wr32(dev, 0x419b14, 0x000000e6); + nv_wr32(dev, 0x419bd0, 0x00900103); + nv_wr32(dev, 0x419be0, 0x00000001); + nv_wr32(dev, 0x419be4, 0x00000000); + nv_wr32(dev, 0x419c00, 0x00000002); + nv_wr32(dev, 0x419c04, 0x00000006); + nv_wr32(dev, 0x419c08, 0x00000002); + nv_wr32(dev, 0x419c20, 0x00000000); + nv_wr32(dev, 0x419cbc, 0x28137606); + nv_wr32(dev, 0x419ce8, 0x00000000); + nv_wr32(dev, 0x419cf4, 0x00000183); + nv_wr32(dev, 0x419d20, 0x02180000); + nv_wr32(dev, 0x419d24, 0x00001fff); + nv_wr32(dev, 0x419e04, 0x00000000); + nv_wr32(dev, 0x419e08, 0x00000000); + nv_wr32(dev, 0x419e0c, 0x00000000); + nv_wr32(dev, 0x419e10, 0x00000002); + nv_wr32(dev, 0x419e44, 0x001beff2); + nv_wr32(dev, 0x419e48, 0x00000000); + nv_wr32(dev, 0x419e4c, 0x0000000f); + nv_wr32(dev, 0x419e50, 0x00000000); + nv_wr32(dev, 0x419e54, 0x00000000); + nv_wr32(dev, 0x419e58, 0x00000000); + nv_wr32(dev, 0x419e5c, 0x00000000); + nv_wr32(dev, 0x419e60, 0x00000000); + nv_wr32(dev, 0x419e64, 0x00000000); + nv_wr32(dev, 0x419e68, 0x00000000); + nv_wr32(dev, 0x419e6c, 0x00000000); + nv_wr32(dev, 0x419e70, 0x00000000); + nv_wr32(dev, 0x419e74, 0x00000000); + nv_wr32(dev, 0x419e78, 0x00000000); + nv_wr32(dev, 0x419e7c, 0x00000000); + nv_wr32(dev, 0x419e80, 0x00000000); + nv_wr32(dev, 0x419e84, 0x00000000); + nv_wr32(dev, 0x419e88, 0x00000000); + nv_wr32(dev, 0x419e8c, 0x00000000); + nv_wr32(dev, 0x419e90, 0x00000000); + nv_wr32(dev, 0x419e98, 0x00000000); + if (dev_priv->chipset != 0xc0) + nv_wr32(dev, 0x419ee0, 0x00011110); + nv_wr32(dev, 0x419f50, 0x00000000); + nv_wr32(dev, 0x419f54, 0x00000000); + if (dev_priv->chipset != 0xc0) + nv_wr32(dev, 0x419f58, 0x00000000); +} + +int +nvc0_grctx_generate(struct nouveau_channel *chan) +{ + struct drm_nouveau_private *dev_priv = chan->dev->dev_private; + struct nvc0_graph_priv *priv = dev_priv->engine.graph.priv; + struct nvc0_graph_chan *grch = chan->pgraph_ctx; + struct drm_device *dev = chan->dev; + int i, gpc, tp, id; + u32 r000260; + + r000260 = nv_rd32(dev, 0x000260); + nv_wr32(dev, 0x000260, r000260 & ~1); + nv_wr32(dev, 0x400208, 0x00000000); + + nvc0_grctx_generate_dispatch(dev); + nvc0_grctx_generate_macro(dev); + nvc0_grctx_generate_m2mf(dev); + nvc0_grctx_generate_unk47xx(dev); + nvc0_grctx_generate_unk58xx(dev); + nvc0_grctx_generate_unk60xx(dev); + nvc0_grctx_generate_unk64xx(dev); + nvc0_grctx_generate_unk78xx(dev); + nvc0_grctx_generate_unk80xx(dev); + nvc0_grctx_generate_rop(dev); + nvc0_grctx_generate_gpc(dev); + nvc0_grctx_generate_tp(dev); + + nv_wr32(dev, 0x404154, 0x00000000); + + /* fuc "mmio list" writes */ + for (i = 0; i < grch->mmio_nr * 8; i += 8) { + u32 reg = nv_ro32(grch->mmio, i + 0); + nv_wr32(dev, reg, nv_ro32(grch->mmio, i + 4)); + } + + for (tp = 0, id = 0; tp < 4; tp++) { + for (gpc = 0; gpc < priv->gpc_nr; gpc++) { + if (tp <= priv->tp_nr[gpc]) { + nv_wr32(dev, TP_UNIT(gpc, tp, 0x698), id); + nv_wr32(dev, TP_UNIT(gpc, tp, 0x4e8), id); + nv_wr32(dev, GPC_UNIT(gpc, 0x0c10 + tp * 4), id); + nv_wr32(dev, TP_UNIT(gpc, tp, 0x088), id); + id++; + } + + nv_wr32(dev, GPC_UNIT(gpc, 0x0c08), priv->tp_nr[gpc]); + nv_wr32(dev, GPC_UNIT(gpc, 0x0c8c), priv->tp_nr[gpc]); + } + } + + nv_wr32(dev, 0x406028, 0x00000443); + nv_wr32(dev, 0x405870, 0x00000443); + nv_wr32(dev, 0x40602c, 0x00000000); + nv_wr32(dev, 0x405874, 0x00000000); + nv_wr32(dev, 0x406030, 0x00000000); + nv_wr32(dev, 0x405878, 0x00000000); + nv_wr32(dev, 0x406034, 0x00000000); + nv_wr32(dev, 0x40587c, 0x00000000); + + if (1) { + const u8 chipset_tp_max[] = { 16, 0, 0, 4, 8 }; + u8 max = chipset_tp_max[dev_priv->chipset & 0x0f]; + u8 tpnr[GPC_MAX]; + u8 data[32]; + + memcpy(tpnr, priv->tp_nr, sizeof(priv->tp_nr)); + memset(data, 0x1f, sizeof(data)); + + gpc = -1; + for (tp = 0; tp < priv->tp_total; tp++) { + do { + gpc = (gpc + 1) % priv->gpc_nr; + } while (!tpnr[gpc]); + tpnr[gpc]--; + data[tp] = gpc; + } + + for (i = 0; i < max / 4; i++) + nv_wr32(dev, 0x4060a8 + (i * 4), ((u32 *)data)[i]); + } + + if (1) { + u32 data[6] = {}; + u8 tpnr[GPC_MAX]; + + memcpy(tpnr, priv->tp_nr, sizeof(priv->tp_nr)); + + for (tp = 0; tp < priv->tp_total; tp++) { + do { + gpc = (gpc + 1) % priv->gpc_nr; + } while (!tpnr[gpc]); + tpnr[gpc]--; + + data[tp / 6] |= gpc << ((tp % 6) * 5); + } + + for (; tp < 32; tp++) + data[tp / 6] |= 7 << ((tp % 6) * 5); + + // GPC_BROADCAST + nv_wr32(dev, 0x418bb8, (priv->tp_total << 8) | + priv->magic_not_rop_nr); + for (i = 0; i < 6; i++) + nv_wr32(dev, 0x418b08 + (i * 4), data[i]); + + // GPC_BROADCAST.TP_BROADCAST + nv_wr32(dev, 0x419bd0, (priv->tp_total << 8) | + priv->magic_not_rop_nr | + priv->magic419bd0); + nv_wr32(dev, 0x419be4, priv->magic419be4); + for (i = 0; i < 6; i++) + nv_wr32(dev, 0x419b00 + (i * 4), data[i]); + + // UNK78xx + nv_wr32(dev, 0x4078bc, (priv->tp_total << 8) | + priv->magic_not_rop_nr); + for (i = 0; i < 6; i++) + nv_wr32(dev, 0x40780c + (i * 4), data[i]); + } + + if (1) { + u32 tp_mask = 0, tp_set = 0; + u8 tpnr[GPC_MAX]; + + memcpy(tpnr, priv->tp_nr, sizeof(priv->tp_nr)); + for (gpc = 0; gpc < priv->gpc_nr; gpc++) + tp_mask |= ((1 << priv->tp_nr[gpc]) - 1) << (gpc * 8); + + gpc = -1; + for (i = 0, gpc = -1; i < 32; i++) { + int ltp = i * (priv->tp_total - 1) / 32; + + do { + gpc = (gpc + 1) % priv->gpc_nr; + } while (!tpnr[gpc]); + tp = priv->tp_nr[gpc] - tpnr[gpc]--; + + tp_set |= 1 << ((gpc * 8) + tp); + + do { + nv_wr32(dev, 0x406800 + (i * 0x20), tp_set); + tp_set ^= tp_mask; + nv_wr32(dev, 0x406c00 + (i * 0x20), tp_set); + tp_set ^= tp_mask; + } while (ltp == (++i * (priv->tp_total - 1) / 32)); + i--; + } + } + + nv_wr32(dev, 0x400208, 0x80000000); + + nv_icmd(dev, 0x00001000, 0x00000004); + nv_icmd(dev, 0x000000a9, 0x0000ffff); + nv_icmd(dev, 0x00000038, 0x0fac6881); + nv_icmd(dev, 0x0000003d, 0x00000001); + nv_icmd(dev, 0x000000e8, 0x00000400); + nv_icmd(dev, 0x000000e9, 0x00000400); + nv_icmd(dev, 0x000000ea, 0x00000400); + nv_icmd(dev, 0x000000eb, 0x00000400); + nv_icmd(dev, 0x000000ec, 0x00000400); + nv_icmd(dev, 0x000000ed, 0x00000400); + nv_icmd(dev, 0x000000ee, 0x00000400); + nv_icmd(dev, 0x000000ef, 0x00000400); + nv_icmd(dev, 0x00000078, 0x00000300); + nv_icmd(dev, 0x00000079, 0x00000300); + nv_icmd(dev, 0x0000007a, 0x00000300); + nv_icmd(dev, 0x0000007b, 0x00000300); + nv_icmd(dev, 0x0000007c, 0x00000300); + nv_icmd(dev, 0x0000007d, 0x00000300); + nv_icmd(dev, 0x0000007e, 0x00000300); + nv_icmd(dev, 0x0000007f, 0x00000300); + nv_icmd(dev, 0x00000050, 0x00000011); + nv_icmd(dev, 0x00000058, 0x00000008); + nv_icmd(dev, 0x00000059, 0x00000008); + nv_icmd(dev, 0x0000005a, 0x00000008); + nv_icmd(dev, 0x0000005b, 0x00000008); + nv_icmd(dev, 0x0000005c, 0x00000008); + nv_icmd(dev, 0x0000005d, 0x00000008); + nv_icmd(dev, 0x0000005e, 0x00000008); + nv_icmd(dev, 0x0000005f, 0x00000008); + nv_icmd(dev, 0x00000208, 0x00000001); + nv_icmd(dev, 0x00000209, 0x00000001); + nv_icmd(dev, 0x0000020a, 0x00000001); + nv_icmd(dev, 0x0000020b, 0x00000001); + nv_icmd(dev, 0x0000020c, 0x00000001); + nv_icmd(dev, 0x0000020d, 0x00000001); + nv_icmd(dev, 0x0000020e, 0x00000001); + nv_icmd(dev, 0x0000020f, 0x00000001); + nv_icmd(dev, 0x00000081, 0x00000001); + nv_icmd(dev, 0x00000085, 0x00000004); + nv_icmd(dev, 0x00000088, 0x00000400); + nv_icmd(dev, 0x00000090, 0x00000300); + nv_icmd(dev, 0x00000098, 0x00001001); + nv_icmd(dev, 0x000000e3, 0x00000001); + nv_icmd(dev, 0x000000da, 0x00000001); + nv_icmd(dev, 0x000000f8, 0x00000003); + nv_icmd(dev, 0x000000fa, 0x00000001); + nv_icmd(dev, 0x0000009f, 0x0000ffff); + nv_icmd(dev, 0x000000a0, 0x0000ffff); + nv_icmd(dev, 0x000000a1, 0x0000ffff); + nv_icmd(dev, 0x000000a2, 0x0000ffff); + nv_icmd(dev, 0x000000b1, 0x00000001); + nv_icmd(dev, 0x000000b2, 0x00000000); + nv_icmd(dev, 0x000000b3, 0x00000000); + nv_icmd(dev, 0x000000b4, 0x00000000); + nv_icmd(dev, 0x000000b5, 0x00000000); + nv_icmd(dev, 0x000000b6, 0x00000000); + nv_icmd(dev, 0x000000b7, 0x00000000); + nv_icmd(dev, 0x000000b8, 0x00000000); + nv_icmd(dev, 0x000000b9, 0x00000000); + nv_icmd(dev, 0x000000ba, 0x00000000); + nv_icmd(dev, 0x000000bb, 0x00000000); + nv_icmd(dev, 0x000000bc, 0x00000000); + nv_icmd(dev, 0x000000bd, 0x00000000); + nv_icmd(dev, 0x000000be, 0x00000000); + nv_icmd(dev, 0x000000bf, 0x00000000); + nv_icmd(dev, 0x000000c0, 0x00000000); + nv_icmd(dev, 0x000000c1, 0x00000000); + nv_icmd(dev, 0x000000c2, 0x00000000); + nv_icmd(dev, 0x000000c3, 0x00000000); + nv_icmd(dev, 0x000000c4, 0x00000000); + nv_icmd(dev, 0x000000c5, 0x00000000); + nv_icmd(dev, 0x000000c6, 0x00000000); + nv_icmd(dev, 0x000000c7, 0x00000000); + nv_icmd(dev, 0x000000c8, 0x00000000); + nv_icmd(dev, 0x000000c9, 0x00000000); + nv_icmd(dev, 0x000000ca, 0x00000000); + nv_icmd(dev, 0x000000cb, 0x00000000); + nv_icmd(dev, 0x000000cc, 0x00000000); + nv_icmd(dev, 0x000000cd, 0x00000000); + nv_icmd(dev, 0x000000ce, 0x00000000); + nv_icmd(dev, 0x000000cf, 0x00000000); + nv_icmd(dev, 0x000000d0, 0x00000000); + nv_icmd(dev, 0x000000d1, 0x00000000); + nv_icmd(dev, 0x000000d2, 0x00000000); + nv_icmd(dev, 0x000000d3, 0x00000000); + nv_icmd(dev, 0x000000d4, 0x00000000); + nv_icmd(dev, 0x000000d5, 0x00000000); + nv_icmd(dev, 0x000000d6, 0x00000000); + nv_icmd(dev, 0x000000d7, 0x00000000); + nv_icmd(dev, 0x000000d8, 0x00000000); + nv_icmd(dev, 0x000000d9, 0x00000000); + nv_icmd(dev, 0x00000210, 0x00000040); + nv_icmd(dev, 0x00000211, 0x00000040); + nv_icmd(dev, 0x00000212, 0x00000040); + nv_icmd(dev, 0x00000213, 0x00000040); + nv_icmd(dev, 0x00000214, 0x00000040); + nv_icmd(dev, 0x00000215, 0x00000040); + nv_icmd(dev, 0x00000216, 0x00000040); + nv_icmd(dev, 0x00000217, 0x00000040); + nv_icmd(dev, 0x00000218, 0x0000c080); + nv_icmd(dev, 0x00000219, 0x0000c080); + nv_icmd(dev, 0x0000021a, 0x0000c080); + nv_icmd(dev, 0x0000021b, 0x0000c080); + nv_icmd(dev, 0x0000021c, 0x0000c080); + nv_icmd(dev, 0x0000021d, 0x0000c080); + nv_icmd(dev, 0x0000021e, 0x0000c080); + nv_icmd(dev, 0x0000021f, 0x0000c080); + nv_icmd(dev, 0x000000ad, 0x0000013e); + nv_icmd(dev, 0x000000e1, 0x00000010); + nv_icmd(dev, 0x00000290, 0x00000000); + nv_icmd(dev, 0x00000291, 0x00000000); + nv_icmd(dev, 0x00000292, 0x00000000); + nv_icmd(dev, 0x00000293, 0x00000000); + nv_icmd(dev, 0x00000294, 0x00000000); + nv_icmd(dev, 0x00000295, 0x00000000); + nv_icmd(dev, 0x00000296, 0x00000000); + nv_icmd(dev, 0x00000297, 0x00000000); + nv_icmd(dev, 0x00000298, 0x00000000); + nv_icmd(dev, 0x00000299, 0x00000000); + nv_icmd(dev, 0x0000029a, 0x00000000); + nv_icmd(dev, 0x0000029b, 0x00000000); + nv_icmd(dev, 0x0000029c, 0x00000000); + nv_icmd(dev, 0x0000029d, 0x00000000); + nv_icmd(dev, 0x0000029e, 0x00000000); + nv_icmd(dev, 0x0000029f, 0x00000000); + nv_icmd(dev, 0x000003b0, 0x00000000); + nv_icmd(dev, 0x000003b1, 0x00000000); + nv_icmd(dev, 0x000003b2, 0x00000000); + nv_icmd(dev, 0x000003b3, 0x00000000); + nv_icmd(dev, 0x000003b4, 0x00000000); + nv_icmd(dev, 0x000003b5, 0x00000000); + nv_icmd(dev, 0x000003b6, 0x00000000); + nv_icmd(dev, 0x000003b7, 0x00000000); + nv_icmd(dev, 0x000003b8, 0x00000000); + nv_icmd(dev, 0x000003b9, 0x00000000); + nv_icmd(dev, 0x000003ba, 0x00000000); + nv_icmd(dev, 0x000003bb, 0x00000000); + nv_icmd(dev, 0x000003bc, 0x00000000); + nv_icmd(dev, 0x000003bd, 0x00000000); + nv_icmd(dev, 0x000003be, 0x00000000); + nv_icmd(dev, 0x000003bf, 0x00000000); + nv_icmd(dev, 0x000002a0, 0x00000000); + nv_icmd(dev, 0x000002a1, 0x00000000); + nv_icmd(dev, 0x000002a2, 0x00000000); + nv_icmd(dev, 0x000002a3, 0x00000000); + nv_icmd(dev, 0x000002a4, 0x00000000); + nv_icmd(dev, 0x000002a5, 0x00000000); + nv_icmd(dev, 0x000002a6, 0x00000000); + nv_icmd(dev, 0x000002a7, 0x00000000); + nv_icmd(dev, 0x000002a8, 0x00000000); + nv_icmd(dev, 0x000002a9, 0x00000000); + nv_icmd(dev, 0x000002aa, 0x00000000); + nv_icmd(dev, 0x000002ab, 0x00000000); + nv_icmd(dev, 0x000002ac, 0x00000000); + nv_icmd(dev, 0x000002ad, 0x00000000); + nv_icmd(dev, 0x000002ae, 0x00000000); + nv_icmd(dev, 0x000002af, 0x00000000); + nv_icmd(dev, 0x00000420, 0x00000000); + nv_icmd(dev, 0x00000421, 0x00000000); + nv_icmd(dev, 0x00000422, 0x00000000); + nv_icmd(dev, 0x00000423, 0x00000000); + nv_icmd(dev, 0x00000424, 0x00000000); + nv_icmd(dev, 0x00000425, 0x00000000); + nv_icmd(dev, 0x00000426, 0x00000000); + nv_icmd(dev, 0x00000427, 0x00000000); + nv_icmd(dev, 0x00000428, 0x00000000); + nv_icmd(dev, 0x00000429, 0x00000000); + nv_icmd(dev, 0x0000042a, 0x00000000); + nv_icmd(dev, 0x0000042b, 0x00000000); + nv_icmd(dev, 0x0000042c, 0x00000000); + nv_icmd(dev, 0x0000042d, 0x00000000); + nv_icmd(dev, 0x0000042e, 0x00000000); + nv_icmd(dev, 0x0000042f, 0x00000000); + nv_icmd(dev, 0x000002b0, 0x00000000); + nv_icmd(dev, 0x000002b1, 0x00000000); + nv_icmd(dev, 0x000002b2, 0x00000000); + nv_icmd(dev, 0x000002b3, 0x00000000); + nv_icmd(dev, 0x000002b4, 0x00000000); + nv_icmd(dev, 0x000002b5, 0x00000000); + nv_icmd(dev, 0x000002b6, 0x00000000); + nv_icmd(dev, 0x000002b7, 0x00000000); + nv_icmd(dev, 0x000002b8, 0x00000000); + nv_icmd(dev, 0x000002b9, 0x00000000); + nv_icmd(dev, 0x000002ba, 0x00000000); + nv_icmd(dev, 0x000002bb, 0x00000000); + nv_icmd(dev, 0x000002bc, 0x00000000); + nv_icmd(dev, 0x000002bd, 0x00000000); + nv_icmd(dev, 0x000002be, 0x00000000); + nv_icmd(dev, 0x000002bf, 0x00000000); + nv_icmd(dev, 0x00000430, 0x00000000); + nv_icmd(dev, 0x00000431, 0x00000000); + nv_icmd(dev, 0x00000432, 0x00000000); + nv_icmd(dev, 0x00000433, 0x00000000); + nv_icmd(dev, 0x00000434, 0x00000000); + nv_icmd(dev, 0x00000435, 0x00000000); + nv_icmd(dev, 0x00000436, 0x00000000); + nv_icmd(dev, 0x00000437, 0x00000000); + nv_icmd(dev, 0x00000438, 0x00000000); + nv_icmd(dev, 0x00000439, 0x00000000); + nv_icmd(dev, 0x0000043a, 0x00000000); + nv_icmd(dev, 0x0000043b, 0x00000000); + nv_icmd(dev, 0x0000043c, 0x00000000); + nv_icmd(dev, 0x0000043d, 0x00000000); + nv_icmd(dev, 0x0000043e, 0x00000000); + nv_icmd(dev, 0x0000043f, 0x00000000); + nv_icmd(dev, 0x000002c0, 0x00000000); + nv_icmd(dev, 0x000002c1, 0x00000000); + nv_icmd(dev, 0x000002c2, 0x00000000); + nv_icmd(dev, 0x000002c3, 0x00000000); + nv_icmd(dev, 0x000002c4, 0x00000000); + nv_icmd(dev, 0x000002c5, 0x00000000); + nv_icmd(dev, 0x000002c6, 0x00000000); + nv_icmd(dev, 0x000002c7, 0x00000000); + nv_icmd(dev, 0x000002c8, 0x00000000); + nv_icmd(dev, 0x000002c9, 0x00000000); + nv_icmd(dev, 0x000002ca, 0x00000000); + nv_icmd(dev, 0x000002cb, 0x00000000); + nv_icmd(dev, 0x000002cc, 0x00000000); + nv_icmd(dev, 0x000002cd, 0x00000000); + nv_icmd(dev, 0x000002ce, 0x00000000); + nv_icmd(dev, 0x000002cf, 0x00000000); + nv_icmd(dev, 0x000004d0, 0x00000000); + nv_icmd(dev, 0x000004d1, 0x00000000); + nv_icmd(dev, 0x000004d2, 0x00000000); + nv_icmd(dev, 0x000004d3, 0x00000000); + nv_icmd(dev, 0x000004d4, 0x00000000); + nv_icmd(dev, 0x000004d5, 0x00000000); + nv_icmd(dev, 0x000004d6, 0x00000000); + nv_icmd(dev, 0x000004d7, 0x00000000); + nv_icmd(dev, 0x000004d8, 0x00000000); + nv_icmd(dev, 0x000004d9, 0x00000000); + nv_icmd(dev, 0x000004da, 0x00000000); + nv_icmd(dev, 0x000004db, 0x00000000); + nv_icmd(dev, 0x000004dc, 0x00000000); + nv_icmd(dev, 0x000004dd, 0x00000000); + nv_icmd(dev, 0x000004de, 0x00000000); + nv_icmd(dev, 0x000004df, 0x00000000); + nv_icmd(dev, 0x00000720, 0x00000000); + nv_icmd(dev, 0x00000721, 0x00000000); + nv_icmd(dev, 0x00000722, 0x00000000); + nv_icmd(dev, 0x00000723, 0x00000000); + nv_icmd(dev, 0x00000724, 0x00000000); + nv_icmd(dev, 0x00000725, 0x00000000); + nv_icmd(dev, 0x00000726, 0x00000000); + nv_icmd(dev, 0x00000727, 0x00000000); + nv_icmd(dev, 0x00000728, 0x00000000); + nv_icmd(dev, 0x00000729, 0x00000000); + nv_icmd(dev, 0x0000072a, 0x00000000); + nv_icmd(dev, 0x0000072b, 0x00000000); + nv_icmd(dev, 0x0000072c, 0x00000000); + nv_icmd(dev, 0x0000072d, 0x00000000); + nv_icmd(dev, 0x0000072e, 0x00000000); + nv_icmd(dev, 0x0000072f, 0x00000000); + nv_icmd(dev, 0x000008c0, 0x00000000); + nv_icmd(dev, 0x000008c1, 0x00000000); + nv_icmd(dev, 0x000008c2, 0x00000000); + nv_icmd(dev, 0x000008c3, 0x00000000); + nv_icmd(dev, 0x000008c4, 0x00000000); + nv_icmd(dev, 0x000008c5, 0x00000000); + nv_icmd(dev, 0x000008c6, 0x00000000); + nv_icmd(dev, 0x000008c7, 0x00000000); + nv_icmd(dev, 0x000008c8, 0x00000000); + nv_icmd(dev, 0x000008c9, 0x00000000); + nv_icmd(dev, 0x000008ca, 0x00000000); + nv_icmd(dev, 0x000008cb, 0x00000000); + nv_icmd(dev, 0x000008cc, 0x00000000); + nv_icmd(dev, 0x000008cd, 0x00000000); + nv_icmd(dev, 0x000008ce, 0x00000000); + nv_icmd(dev, 0x000008cf, 0x00000000); + nv_icmd(dev, 0x00000890, 0x00000000); + nv_icmd(dev, 0x00000891, 0x00000000); + nv_icmd(dev, 0x00000892, 0x00000000); + nv_icmd(dev, 0x00000893, 0x00000000); + nv_icmd(dev, 0x00000894, 0x00000000); + nv_icmd(dev, 0x00000895, 0x00000000); + nv_icmd(dev, 0x00000896, 0x00000000); + nv_icmd(dev, 0x00000897, 0x00000000); + nv_icmd(dev, 0x00000898, 0x00000000); + nv_icmd(dev, 0x00000899, 0x00000000); + nv_icmd(dev, 0x0000089a, 0x00000000); + nv_icmd(dev, 0x0000089b, 0x00000000); + nv_icmd(dev, 0x0000089c, 0x00000000); + nv_icmd(dev, 0x0000089d, 0x00000000); + nv_icmd(dev, 0x0000089e, 0x00000000); + nv_icmd(dev, 0x0000089f, 0x00000000); + nv_icmd(dev, 0x000008e0, 0x00000000); + nv_icmd(dev, 0x000008e1, 0x00000000); + nv_icmd(dev, 0x000008e2, 0x00000000); + nv_icmd(dev, 0x000008e3, 0x00000000); + nv_icmd(dev, 0x000008e4, 0x00000000); + nv_icmd(dev, 0x000008e5, 0x00000000); + nv_icmd(dev, 0x000008e6, 0x00000000); + nv_icmd(dev, 0x000008e7, 0x00000000); + nv_icmd(dev, 0x000008e8, 0x00000000); + nv_icmd(dev, 0x000008e9, 0x00000000); + nv_icmd(dev, 0x000008ea, 0x00000000); + nv_icmd(dev, 0x000008eb, 0x00000000); + nv_icmd(dev, 0x000008ec, 0x00000000); + nv_icmd(dev, 0x000008ed, 0x00000000); + nv_icmd(dev, 0x000008ee, 0x00000000); + nv_icmd(dev, 0x000008ef, 0x00000000); + nv_icmd(dev, 0x000008a0, 0x00000000); + nv_icmd(dev, 0x000008a1, 0x00000000); + nv_icmd(dev, 0x000008a2, 0x00000000); + nv_icmd(dev, 0x000008a3, 0x00000000); + nv_icmd(dev, 0x000008a4, 0x00000000); + nv_icmd(dev, 0x000008a5, 0x00000000); + nv_icmd(dev, 0x000008a6, 0x00000000); + nv_icmd(dev, 0x000008a7, 0x00000000); + nv_icmd(dev, 0x000008a8, 0x00000000); + nv_icmd(dev, 0x000008a9, 0x00000000); + nv_icmd(dev, 0x000008aa, 0x00000000); + nv_icmd(dev, 0x000008ab, 0x00000000); + nv_icmd(dev, 0x000008ac, 0x00000000); + nv_icmd(dev, 0x000008ad, 0x00000000); + nv_icmd(dev, 0x000008ae, 0x00000000); + nv_icmd(dev, 0x000008af, 0x00000000); + nv_icmd(dev, 0x000008f0, 0x00000000); + nv_icmd(dev, 0x000008f1, 0x00000000); + nv_icmd(dev, 0x000008f2, 0x00000000); + nv_icmd(dev, 0x000008f3, 0x00000000); + nv_icmd(dev, 0x000008f4, 0x00000000); + nv_icmd(dev, 0x000008f5, 0x00000000); + nv_icmd(dev, 0x000008f6, 0x00000000); + nv_icmd(dev, 0x000008f7, 0x00000000); + nv_icmd(dev, 0x000008f8, 0x00000000); + nv_icmd(dev, 0x000008f9, 0x00000000); + nv_icmd(dev, 0x000008fa, 0x00000000); + nv_icmd(dev, 0x000008fb, 0x00000000); + nv_icmd(dev, 0x000008fc, 0x00000000); + nv_icmd(dev, 0x000008fd, 0x00000000); + nv_icmd(dev, 0x000008fe, 0x00000000); + nv_icmd(dev, 0x000008ff, 0x00000000); + nv_icmd(dev, 0x0000094c, 0x000000ff); + nv_icmd(dev, 0x0000094d, 0xffffffff); + nv_icmd(dev, 0x0000094e, 0x00000002); + nv_icmd(dev, 0x000002ec, 0x00000001); + nv_icmd(dev, 0x00000303, 0x00000001); + nv_icmd(dev, 0x000002e6, 0x00000001); + nv_icmd(dev, 0x00000466, 0x00000052); + nv_icmd(dev, 0x00000301, 0x3f800000); + nv_icmd(dev, 0x00000304, 0x30201000); + nv_icmd(dev, 0x00000305, 0x70605040); + nv_icmd(dev, 0x00000306, 0xb8a89888); + nv_icmd(dev, 0x00000307, 0xf8e8d8c8); + nv_icmd(dev, 0x0000030a, 0x00ffff00); + nv_icmd(dev, 0x0000030b, 0x0000001a); + nv_icmd(dev, 0x0000030c, 0x00000001); + nv_icmd(dev, 0x00000318, 0x00000001); + nv_icmd(dev, 0x00000340, 0x00000000); + nv_icmd(dev, 0x00000375, 0x00000001); + nv_icmd(dev, 0x00000351, 0x00000100); + nv_icmd(dev, 0x0000037d, 0x00000006); + nv_icmd(dev, 0x000003a0, 0x00000002); + nv_icmd(dev, 0x000003aa, 0x00000001); + nv_icmd(dev, 0x000003a9, 0x00000001); + nv_icmd(dev, 0x00000380, 0x00000001); + nv_icmd(dev, 0x00000360, 0x00000040); + nv_icmd(dev, 0x00000366, 0x00000000); + nv_icmd(dev, 0x00000367, 0x00000000); + nv_icmd(dev, 0x00000368, 0x00001fff); + nv_icmd(dev, 0x00000370, 0x00000000); + nv_icmd(dev, 0x00000371, 0x00000000); + nv_icmd(dev, 0x00000372, 0x003fffff); + nv_icmd(dev, 0x0000037a, 0x00000012); + nv_icmd(dev, 0x000005e0, 0x00000022); + nv_icmd(dev, 0x000005e1, 0x00000022); + nv_icmd(dev, 0x000005e2, 0x00000022); + nv_icmd(dev, 0x000005e3, 0x00000022); + nv_icmd(dev, 0x000005e4, 0x00000022); + nv_icmd(dev, 0x00000619, 0x00000003); + nv_icmd(dev, 0x00000811, 0x00000003); + nv_icmd(dev, 0x00000812, 0x00000004); + nv_icmd(dev, 0x00000813, 0x00000006); + nv_icmd(dev, 0x00000814, 0x00000008); + nv_icmd(dev, 0x00000815, 0x0000000b); + nv_icmd(dev, 0x00000800, 0x00000001); + nv_icmd(dev, 0x00000801, 0x00000001); + nv_icmd(dev, 0x00000802, 0x00000001); + nv_icmd(dev, 0x00000803, 0x00000001); + nv_icmd(dev, 0x00000804, 0x00000001); + nv_icmd(dev, 0x00000805, 0x00000001); + nv_icmd(dev, 0x00000632, 0x00000001); + nv_icmd(dev, 0x00000633, 0x00000002); + nv_icmd(dev, 0x00000634, 0x00000003); + nv_icmd(dev, 0x00000635, 0x00000004); + nv_icmd(dev, 0x00000654, 0x3f800000); + nv_icmd(dev, 0x00000657, 0x3f800000); + nv_icmd(dev, 0x00000655, 0x3f800000); + nv_icmd(dev, 0x00000656, 0x3f800000); + nv_icmd(dev, 0x000006cd, 0x3f800000); + nv_icmd(dev, 0x000007f5, 0x3f800000); + nv_icmd(dev, 0x000007dc, 0x39291909); + nv_icmd(dev, 0x000007dd, 0x79695949); + nv_icmd(dev, 0x000007de, 0xb9a99989); + nv_icmd(dev, 0x000007df, 0xf9e9d9c9); + nv_icmd(dev, 0x000007e8, 0x00003210); + nv_icmd(dev, 0x000007e9, 0x00007654); + nv_icmd(dev, 0x000007ea, 0x00000098); + nv_icmd(dev, 0x000007ec, 0x39291909); + nv_icmd(dev, 0x000007ed, 0x79695949); + nv_icmd(dev, 0x000007ee, 0xb9a99989); + nv_icmd(dev, 0x000007ef, 0xf9e9d9c9); + nv_icmd(dev, 0x000007f0, 0x00003210); + nv_icmd(dev, 0x000007f1, 0x00007654); + nv_icmd(dev, 0x000007f2, 0x00000098); + nv_icmd(dev, 0x000005a5, 0x00000001); + nv_icmd(dev, 0x00000980, 0x00000000); + nv_icmd(dev, 0x00000981, 0x00000000); + nv_icmd(dev, 0x00000982, 0x00000000); + nv_icmd(dev, 0x00000983, 0x00000000); + nv_icmd(dev, 0x00000984, 0x00000000); + nv_icmd(dev, 0x00000985, 0x00000000); + nv_icmd(dev, 0x00000986, 0x00000000); + nv_icmd(dev, 0x00000987, 0x00000000); + nv_icmd(dev, 0x00000988, 0x00000000); + nv_icmd(dev, 0x00000989, 0x00000000); + nv_icmd(dev, 0x0000098a, 0x00000000); + nv_icmd(dev, 0x0000098b, 0x00000000); + nv_icmd(dev, 0x0000098c, 0x00000000); + nv_icmd(dev, 0x0000098d, 0x00000000); + nv_icmd(dev, 0x0000098e, 0x00000000); + nv_icmd(dev, 0x0000098f, 0x00000000); + nv_icmd(dev, 0x00000990, 0x00000000); + nv_icmd(dev, 0x00000991, 0x00000000); + nv_icmd(dev, 0x00000992, 0x00000000); + nv_icmd(dev, 0x00000993, 0x00000000); + nv_icmd(dev, 0x00000994, 0x00000000); + nv_icmd(dev, 0x00000995, 0x00000000); + nv_icmd(dev, 0x00000996, 0x00000000); + nv_icmd(dev, 0x00000997, 0x00000000); + nv_icmd(dev, 0x00000998, 0x00000000); + nv_icmd(dev, 0x00000999, 0x00000000); + nv_icmd(dev, 0x0000099a, 0x00000000); + nv_icmd(dev, 0x0000099b, 0x00000000); + nv_icmd(dev, 0x0000099c, 0x00000000); + nv_icmd(dev, 0x0000099d, 0x00000000); + nv_icmd(dev, 0x0000099e, 0x00000000); + nv_icmd(dev, 0x0000099f, 0x00000000); + nv_icmd(dev, 0x000009a0, 0x00000000); + nv_icmd(dev, 0x000009a1, 0x00000000); + nv_icmd(dev, 0x000009a2, 0x00000000); + nv_icmd(dev, 0x000009a3, 0x00000000); + nv_icmd(dev, 0x000009a4, 0x00000000); + nv_icmd(dev, 0x000009a5, 0x00000000); + nv_icmd(dev, 0x000009a6, 0x00000000); + nv_icmd(dev, 0x000009a7, 0x00000000); + nv_icmd(dev, 0x000009a8, 0x00000000); + nv_icmd(dev, 0x000009a9, 0x00000000); + nv_icmd(dev, 0x000009aa, 0x00000000); + nv_icmd(dev, 0x000009ab, 0x00000000); + nv_icmd(dev, 0x000009ac, 0x00000000); + nv_icmd(dev, 0x000009ad, 0x00000000); + nv_icmd(dev, 0x000009ae, 0x00000000); + nv_icmd(dev, 0x000009af, 0x00000000); + nv_icmd(dev, 0x000009b0, 0x00000000); + nv_icmd(dev, 0x000009b1, 0x00000000); + nv_icmd(dev, 0x000009b2, 0x00000000); + nv_icmd(dev, 0x000009b3, 0x00000000); + nv_icmd(dev, 0x000009b4, 0x00000000); + nv_icmd(dev, 0x000009b5, 0x00000000); + nv_icmd(dev, 0x000009b6, 0x00000000); + nv_icmd(dev, 0x000009b7, 0x00000000); + nv_icmd(dev, 0x000009b8, 0x00000000); + nv_icmd(dev, 0x000009b9, 0x00000000); + nv_icmd(dev, 0x000009ba, 0x00000000); + nv_icmd(dev, 0x000009bb, 0x00000000); + nv_icmd(dev, 0x000009bc, 0x00000000); + nv_icmd(dev, 0x000009bd, 0x00000000); + nv_icmd(dev, 0x000009be, 0x00000000); + nv_icmd(dev, 0x000009bf, 0x00000000); + nv_icmd(dev, 0x000009c0, 0x00000000); + nv_icmd(dev, 0x000009c1, 0x00000000); + nv_icmd(dev, 0x000009c2, 0x00000000); + nv_icmd(dev, 0x000009c3, 0x00000000); + nv_icmd(dev, 0x000009c4, 0x00000000); + nv_icmd(dev, 0x000009c5, 0x00000000); + nv_icmd(dev, 0x000009c6, 0x00000000); + nv_icmd(dev, 0x000009c7, 0x00000000); + nv_icmd(dev, 0x000009c8, 0x00000000); + nv_icmd(dev, 0x000009c9, 0x00000000); + nv_icmd(dev, 0x000009ca, 0x00000000); + nv_icmd(dev, 0x000009cb, 0x00000000); + nv_icmd(dev, 0x000009cc, 0x00000000); + nv_icmd(dev, 0x000009cd, 0x00000000); + nv_icmd(dev, 0x000009ce, 0x00000000); + nv_icmd(dev, 0x000009cf, 0x00000000); + nv_icmd(dev, 0x000009d0, 0x00000000); + nv_icmd(dev, 0x000009d1, 0x00000000); + nv_icmd(dev, 0x000009d2, 0x00000000); + nv_icmd(dev, 0x000009d3, 0x00000000); + nv_icmd(dev, 0x000009d4, 0x00000000); + nv_icmd(dev, 0x000009d5, 0x00000000); + nv_icmd(dev, 0x000009d6, 0x00000000); + nv_icmd(dev, 0x000009d7, 0x00000000); + nv_icmd(dev, 0x000009d8, 0x00000000); + nv_icmd(dev, 0x000009d9, 0x00000000); + nv_icmd(dev, 0x000009da, 0x00000000); + nv_icmd(dev, 0x000009db, 0x00000000); + nv_icmd(dev, 0x000009dc, 0x00000000); + nv_icmd(dev, 0x000009dd, 0x00000000); + nv_icmd(dev, 0x000009de, 0x00000000); + nv_icmd(dev, 0x000009df, 0x00000000); + nv_icmd(dev, 0x000009e0, 0x00000000); + nv_icmd(dev, 0x000009e1, 0x00000000); + nv_icmd(dev, 0x000009e2, 0x00000000); + nv_icmd(dev, 0x000009e3, 0x00000000); + nv_icmd(dev, 0x000009e4, 0x00000000); + nv_icmd(dev, 0x000009e5, 0x00000000); + nv_icmd(dev, 0x000009e6, 0x00000000); + nv_icmd(dev, 0x000009e7, 0x00000000); + nv_icmd(dev, 0x000009e8, 0x00000000); + nv_icmd(dev, 0x000009e9, 0x00000000); + nv_icmd(dev, 0x000009ea, 0x00000000); + nv_icmd(dev, 0x000009eb, 0x00000000); + nv_icmd(dev, 0x000009ec, 0x00000000); + nv_icmd(dev, 0x000009ed, 0x00000000); + nv_icmd(dev, 0x000009ee, 0x00000000); + nv_icmd(dev, 0x000009ef, 0x00000000); + nv_icmd(dev, 0x000009f0, 0x00000000); + nv_icmd(dev, 0x000009f1, 0x00000000); + nv_icmd(dev, 0x000009f2, 0x00000000); + nv_icmd(dev, 0x000009f3, 0x00000000); + nv_icmd(dev, 0x000009f4, 0x00000000); + nv_icmd(dev, 0x000009f5, 0x00000000); + nv_icmd(dev, 0x000009f6, 0x00000000); + nv_icmd(dev, 0x000009f7, 0x00000000); + nv_icmd(dev, 0x000009f8, 0x00000000); + nv_icmd(dev, 0x000009f9, 0x00000000); + nv_icmd(dev, 0x000009fa, 0x00000000); + nv_icmd(dev, 0x000009fb, 0x00000000); + nv_icmd(dev, 0x000009fc, 0x00000000); + nv_icmd(dev, 0x000009fd, 0x00000000); + nv_icmd(dev, 0x000009fe, 0x00000000); + nv_icmd(dev, 0x000009ff, 0x00000000); + nv_icmd(dev, 0x00000468, 0x00000004); + nv_icmd(dev, 0x0000046c, 0x00000001); + nv_icmd(dev, 0x00000470, 0x00000000); + nv_icmd(dev, 0x00000471, 0x00000000); + nv_icmd(dev, 0x00000472, 0x00000000); + nv_icmd(dev, 0x00000473, 0x00000000); + nv_icmd(dev, 0x00000474, 0x00000000); + nv_icmd(dev, 0x00000475, 0x00000000); + nv_icmd(dev, 0x00000476, 0x00000000); + nv_icmd(dev, 0x00000477, 0x00000000); + nv_icmd(dev, 0x00000478, 0x00000000); + nv_icmd(dev, 0x00000479, 0x00000000); + nv_icmd(dev, 0x0000047a, 0x00000000); + nv_icmd(dev, 0x0000047b, 0x00000000); + nv_icmd(dev, 0x0000047c, 0x00000000); + nv_icmd(dev, 0x0000047d, 0x00000000); + nv_icmd(dev, 0x0000047e, 0x00000000); + nv_icmd(dev, 0x0000047f, 0x00000000); + nv_icmd(dev, 0x00000480, 0x00000000); + nv_icmd(dev, 0x00000481, 0x00000000); + nv_icmd(dev, 0x00000482, 0x00000000); + nv_icmd(dev, 0x00000483, 0x00000000); + nv_icmd(dev, 0x00000484, 0x00000000); + nv_icmd(dev, 0x00000485, 0x00000000); + nv_icmd(dev, 0x00000486, 0x00000000); + nv_icmd(dev, 0x00000487, 0x00000000); + nv_icmd(dev, 0x00000488, 0x00000000); + nv_icmd(dev, 0x00000489, 0x00000000); + nv_icmd(dev, 0x0000048a, 0x00000000); + nv_icmd(dev, 0x0000048b, 0x00000000); + nv_icmd(dev, 0x0000048c, 0x00000000); + nv_icmd(dev, 0x0000048d, 0x00000000); + nv_icmd(dev, 0x0000048e, 0x00000000); + nv_icmd(dev, 0x0000048f, 0x00000000); + nv_icmd(dev, 0x00000490, 0x00000000); + nv_icmd(dev, 0x00000491, 0x00000000); + nv_icmd(dev, 0x00000492, 0x00000000); + nv_icmd(dev, 0x00000493, 0x00000000); + nv_icmd(dev, 0x00000494, 0x00000000); + nv_icmd(dev, 0x00000495, 0x00000000); + nv_icmd(dev, 0x00000496, 0x00000000); + nv_icmd(dev, 0x00000497, 0x00000000); + nv_icmd(dev, 0x00000498, 0x00000000); + nv_icmd(dev, 0x00000499, 0x00000000); + nv_icmd(dev, 0x0000049a, 0x00000000); + nv_icmd(dev, 0x0000049b, 0x00000000); + nv_icmd(dev, 0x0000049c, 0x00000000); + nv_icmd(dev, 0x0000049d, 0x00000000); + nv_icmd(dev, 0x0000049e, 0x00000000); + nv_icmd(dev, 0x0000049f, 0x00000000); + nv_icmd(dev, 0x000004a0, 0x00000000); + nv_icmd(dev, 0x000004a1, 0x00000000); + nv_icmd(dev, 0x000004a2, 0x00000000); + nv_icmd(dev, 0x000004a3, 0x00000000); + nv_icmd(dev, 0x000004a4, 0x00000000); + nv_icmd(dev, 0x000004a5, 0x00000000); + nv_icmd(dev, 0x000004a6, 0x00000000); + nv_icmd(dev, 0x000004a7, 0x00000000); + nv_icmd(dev, 0x000004a8, 0x00000000); + nv_icmd(dev, 0x000004a9, 0x00000000); + nv_icmd(dev, 0x000004aa, 0x00000000); + nv_icmd(dev, 0x000004ab, 0x00000000); + nv_icmd(dev, 0x000004ac, 0x00000000); + nv_icmd(dev, 0x000004ad, 0x00000000); + nv_icmd(dev, 0x000004ae, 0x00000000); + nv_icmd(dev, 0x000004af, 0x00000000); + nv_icmd(dev, 0x000004b0, 0x00000000); + nv_icmd(dev, 0x000004b1, 0x00000000); + nv_icmd(dev, 0x000004b2, 0x00000000); + nv_icmd(dev, 0x000004b3, 0x00000000); + nv_icmd(dev, 0x000004b4, 0x00000000); + nv_icmd(dev, 0x000004b5, 0x00000000); + nv_icmd(dev, 0x000004b6, 0x00000000); + nv_icmd(dev, 0x000004b7, 0x00000000); + nv_icmd(dev, 0x000004b8, 0x00000000); + nv_icmd(dev, 0x000004b9, 0x00000000); + nv_icmd(dev, 0x000004ba, 0x00000000); + nv_icmd(dev, 0x000004bb, 0x00000000); + nv_icmd(dev, 0x000004bc, 0x00000000); + nv_icmd(dev, 0x000004bd, 0x00000000); + nv_icmd(dev, 0x000004be, 0x00000000); + nv_icmd(dev, 0x000004bf, 0x00000000); + nv_icmd(dev, 0x000004c0, 0x00000000); + nv_icmd(dev, 0x000004c1, 0x00000000); + nv_icmd(dev, 0x000004c2, 0x00000000); + nv_icmd(dev, 0x000004c3, 0x00000000); + nv_icmd(dev, 0x000004c4, 0x00000000); + nv_icmd(dev, 0x000004c5, 0x00000000); + nv_icmd(dev, 0x000004c6, 0x00000000); + nv_icmd(dev, 0x000004c7, 0x00000000); + nv_icmd(dev, 0x000004c8, 0x00000000); + nv_icmd(dev, 0x000004c9, 0x00000000); + nv_icmd(dev, 0x000004ca, 0x00000000); + nv_icmd(dev, 0x000004cb, 0x00000000); + nv_icmd(dev, 0x000004cc, 0x00000000); + nv_icmd(dev, 0x000004cd, 0x00000000); + nv_icmd(dev, 0x000004ce, 0x00000000); + nv_icmd(dev, 0x000004cf, 0x00000000); + nv_icmd(dev, 0x00000510, 0x3f800000); + nv_icmd(dev, 0x00000511, 0x3f800000); + nv_icmd(dev, 0x00000512, 0x3f800000); + nv_icmd(dev, 0x00000513, 0x3f800000); + nv_icmd(dev, 0x00000514, 0x3f800000); + nv_icmd(dev, 0x00000515, 0x3f800000); + nv_icmd(dev, 0x00000516, 0x3f800000); + nv_icmd(dev, 0x00000517, 0x3f800000); + nv_icmd(dev, 0x00000518, 0x3f800000); + nv_icmd(dev, 0x00000519, 0x3f800000); + nv_icmd(dev, 0x0000051a, 0x3f800000); + nv_icmd(dev, 0x0000051b, 0x3f800000); + nv_icmd(dev, 0x0000051c, 0x3f800000); + nv_icmd(dev, 0x0000051d, 0x3f800000); + nv_icmd(dev, 0x0000051e, 0x3f800000); + nv_icmd(dev, 0x0000051f, 0x3f800000); + nv_icmd(dev, 0x00000520, 0x000002b6); + nv_icmd(dev, 0x00000529, 0x00000001); + nv_icmd(dev, 0x00000530, 0xffff0000); + nv_icmd(dev, 0x00000531, 0xffff0000); + nv_icmd(dev, 0x00000532, 0xffff0000); + nv_icmd(dev, 0x00000533, 0xffff0000); + nv_icmd(dev, 0x00000534, 0xffff0000); + nv_icmd(dev, 0x00000535, 0xffff0000); + nv_icmd(dev, 0x00000536, 0xffff0000); + nv_icmd(dev, 0x00000537, 0xffff0000); + nv_icmd(dev, 0x00000538, 0xffff0000); + nv_icmd(dev, 0x00000539, 0xffff0000); + nv_icmd(dev, 0x0000053a, 0xffff0000); + nv_icmd(dev, 0x0000053b, 0xffff0000); + nv_icmd(dev, 0x0000053c, 0xffff0000); + nv_icmd(dev, 0x0000053d, 0xffff0000); + nv_icmd(dev, 0x0000053e, 0xffff0000); + nv_icmd(dev, 0x0000053f, 0xffff0000); + nv_icmd(dev, 0x00000585, 0x0000003f); + nv_icmd(dev, 0x00000576, 0x00000003); + nv_icmd(dev, 0x00000586, 0x00000040); + nv_icmd(dev, 0x00000582, 0x00000080); + nv_icmd(dev, 0x00000583, 0x00000080); + nv_icmd(dev, 0x000005c2, 0x00000001); + nv_icmd(dev, 0x00000638, 0x00000001); + nv_icmd(dev, 0x00000639, 0x00000001); + nv_icmd(dev, 0x0000063a, 0x00000002); + nv_icmd(dev, 0x0000063b, 0x00000001); + nv_icmd(dev, 0x0000063c, 0x00000001); + nv_icmd(dev, 0x0000063d, 0x00000002); + nv_icmd(dev, 0x0000063e, 0x00000001); + nv_icmd(dev, 0x000008b8, 0x00000001); + nv_icmd(dev, 0x000008b9, 0x00000001); + nv_icmd(dev, 0x000008ba, 0x00000001); + nv_icmd(dev, 0x000008bb, 0x00000001); + nv_icmd(dev, 0x000008bc, 0x00000001); + nv_icmd(dev, 0x000008bd, 0x00000001); + nv_icmd(dev, 0x000008be, 0x00000001); + nv_icmd(dev, 0x000008bf, 0x00000001); + nv_icmd(dev, 0x00000900, 0x00000001); + nv_icmd(dev, 0x00000901, 0x00000001); + nv_icmd(dev, 0x00000902, 0x00000001); + nv_icmd(dev, 0x00000903, 0x00000001); + nv_icmd(dev, 0x00000904, 0x00000001); + nv_icmd(dev, 0x00000905, 0x00000001); + nv_icmd(dev, 0x00000906, 0x00000001); + nv_icmd(dev, 0x00000907, 0x00000001); + nv_icmd(dev, 0x00000908, 0x00000002); + nv_icmd(dev, 0x00000909, 0x00000002); + nv_icmd(dev, 0x0000090a, 0x00000002); + nv_icmd(dev, 0x0000090b, 0x00000002); + nv_icmd(dev, 0x0000090c, 0x00000002); + nv_icmd(dev, 0x0000090d, 0x00000002); + nv_icmd(dev, 0x0000090e, 0x00000002); + nv_icmd(dev, 0x0000090f, 0x00000002); + nv_icmd(dev, 0x00000910, 0x00000001); + nv_icmd(dev, 0x00000911, 0x00000001); + nv_icmd(dev, 0x00000912, 0x00000001); + nv_icmd(dev, 0x00000913, 0x00000001); + nv_icmd(dev, 0x00000914, 0x00000001); + nv_icmd(dev, 0x00000915, 0x00000001); + nv_icmd(dev, 0x00000916, 0x00000001); + nv_icmd(dev, 0x00000917, 0x00000001); + nv_icmd(dev, 0x00000918, 0x00000001); + nv_icmd(dev, 0x00000919, 0x00000001); + nv_icmd(dev, 0x0000091a, 0x00000001); + nv_icmd(dev, 0x0000091b, 0x00000001); + nv_icmd(dev, 0x0000091c, 0x00000001); + nv_icmd(dev, 0x0000091d, 0x00000001); + nv_icmd(dev, 0x0000091e, 0x00000001); + nv_icmd(dev, 0x0000091f, 0x00000001); + nv_icmd(dev, 0x00000920, 0x00000002); + nv_icmd(dev, 0x00000921, 0x00000002); + nv_icmd(dev, 0x00000922, 0x00000002); + nv_icmd(dev, 0x00000923, 0x00000002); + nv_icmd(dev, 0x00000924, 0x00000002); + nv_icmd(dev, 0x00000925, 0x00000002); + nv_icmd(dev, 0x00000926, 0x00000002); + nv_icmd(dev, 0x00000927, 0x00000002); + nv_icmd(dev, 0x00000928, 0x00000001); + nv_icmd(dev, 0x00000929, 0x00000001); + nv_icmd(dev, 0x0000092a, 0x00000001); + nv_icmd(dev, 0x0000092b, 0x00000001); + nv_icmd(dev, 0x0000092c, 0x00000001); + nv_icmd(dev, 0x0000092d, 0x00000001); + nv_icmd(dev, 0x0000092e, 0x00000001); + nv_icmd(dev, 0x0000092f, 0x00000001); + nv_icmd(dev, 0x00000648, 0x00000001); + nv_icmd(dev, 0x00000649, 0x00000001); + nv_icmd(dev, 0x0000064a, 0x00000001); + nv_icmd(dev, 0x0000064b, 0x00000001); + nv_icmd(dev, 0x0000064c, 0x00000001); + nv_icmd(dev, 0x0000064d, 0x00000001); + nv_icmd(dev, 0x0000064e, 0x00000001); + nv_icmd(dev, 0x0000064f, 0x00000001); + nv_icmd(dev, 0x00000650, 0x00000001); + nv_icmd(dev, 0x00000658, 0x0000000f); + nv_icmd(dev, 0x000007ff, 0x0000000a); + nv_icmd(dev, 0x0000066a, 0x40000000); + nv_icmd(dev, 0x0000066b, 0x10000000); + nv_icmd(dev, 0x0000066c, 0xffff0000); + nv_icmd(dev, 0x0000066d, 0xffff0000); + nv_icmd(dev, 0x000007af, 0x00000008); + nv_icmd(dev, 0x000007b0, 0x00000008); + nv_icmd(dev, 0x000007f6, 0x00000001); + nv_icmd(dev, 0x000006b2, 0x00000055); + nv_icmd(dev, 0x000007ad, 0x00000003); + nv_icmd(dev, 0x00000937, 0x00000001); + nv_icmd(dev, 0x00000971, 0x00000008); + nv_icmd(dev, 0x00000972, 0x00000040); + nv_icmd(dev, 0x00000973, 0x0000012c); + nv_icmd(dev, 0x0000097c, 0x00000040); + nv_icmd(dev, 0x00000979, 0x00000003); + nv_icmd(dev, 0x00000975, 0x00000020); + nv_icmd(dev, 0x00000976, 0x00000001); + nv_icmd(dev, 0x00000977, 0x00000020); + nv_icmd(dev, 0x00000978, 0x00000001); + nv_icmd(dev, 0x00000957, 0x00000003); + nv_icmd(dev, 0x0000095e, 0x20164010); + nv_icmd(dev, 0x0000095f, 0x00000020); + nv_icmd(dev, 0x00000683, 0x00000006); + nv_icmd(dev, 0x00000685, 0x003fffff); + nv_icmd(dev, 0x00000687, 0x00000c48); + nv_icmd(dev, 0x000006a0, 0x00000005); + nv_icmd(dev, 0x00000840, 0x00300008); + nv_icmd(dev, 0x00000841, 0x04000080); + nv_icmd(dev, 0x00000842, 0x00300008); + nv_icmd(dev, 0x00000843, 0x04000080); + nv_icmd(dev, 0x00000818, 0x00000000); + nv_icmd(dev, 0x00000819, 0x00000000); + nv_icmd(dev, 0x0000081a, 0x00000000); + nv_icmd(dev, 0x0000081b, 0x00000000); + nv_icmd(dev, 0x0000081c, 0x00000000); + nv_icmd(dev, 0x0000081d, 0x00000000); + nv_icmd(dev, 0x0000081e, 0x00000000); + nv_icmd(dev, 0x0000081f, 0x00000000); + nv_icmd(dev, 0x00000848, 0x00000000); + nv_icmd(dev, 0x00000849, 0x00000000); + nv_icmd(dev, 0x0000084a, 0x00000000); + nv_icmd(dev, 0x0000084b, 0x00000000); + nv_icmd(dev, 0x0000084c, 0x00000000); + nv_icmd(dev, 0x0000084d, 0x00000000); + nv_icmd(dev, 0x0000084e, 0x00000000); + nv_icmd(dev, 0x0000084f, 0x00000000); + nv_icmd(dev, 0x00000850, 0x00000000); + nv_icmd(dev, 0x00000851, 0x00000000); + nv_icmd(dev, 0x00000852, 0x00000000); + nv_icmd(dev, 0x00000853, 0x00000000); + nv_icmd(dev, 0x00000854, 0x00000000); + nv_icmd(dev, 0x00000855, 0x00000000); + nv_icmd(dev, 0x00000856, 0x00000000); + nv_icmd(dev, 0x00000857, 0x00000000); + nv_icmd(dev, 0x00000738, 0x00000000); + nv_icmd(dev, 0x000006aa, 0x00000001); + nv_icmd(dev, 0x000006ab, 0x00000002); + nv_icmd(dev, 0x000006ac, 0x00000080); + nv_icmd(dev, 0x000006ad, 0x00000100); + nv_icmd(dev, 0x000006ae, 0x00000100); + nv_icmd(dev, 0x000006b1, 0x00000011); + nv_icmd(dev, 0x000006bb, 0x000000cf); + nv_icmd(dev, 0x000006ce, 0x2a712488); + nv_icmd(dev, 0x00000739, 0x4085c000); + nv_icmd(dev, 0x0000073a, 0x00000080); + nv_icmd(dev, 0x00000786, 0x80000100); + nv_icmd(dev, 0x0000073c, 0x00010100); + nv_icmd(dev, 0x0000073d, 0x02800000); + nv_icmd(dev, 0x00000787, 0x000000cf); + nv_icmd(dev, 0x0000078c, 0x00000008); + nv_icmd(dev, 0x00000792, 0x00000001); + nv_icmd(dev, 0x00000794, 0x00000001); + nv_icmd(dev, 0x00000795, 0x00000001); + nv_icmd(dev, 0x00000796, 0x00000001); + nv_icmd(dev, 0x00000797, 0x000000cf); + nv_icmd(dev, 0x00000836, 0x00000001); + nv_icmd(dev, 0x0000079a, 0x00000002); + nv_icmd(dev, 0x00000833, 0x04444480); + nv_icmd(dev, 0x000007a1, 0x00000001); + nv_icmd(dev, 0x000007a3, 0x00000001); + nv_icmd(dev, 0x000007a4, 0x00000001); + nv_icmd(dev, 0x000007a5, 0x00000001); + nv_icmd(dev, 0x00000831, 0x00000004); + nv_icmd(dev, 0x0000080c, 0x00000002); + nv_icmd(dev, 0x0000080d, 0x00000100); + nv_icmd(dev, 0x0000080e, 0x00000100); + nv_icmd(dev, 0x0000080f, 0x00000001); + nv_icmd(dev, 0x00000823, 0x00000002); + nv_icmd(dev, 0x00000824, 0x00000100); + nv_icmd(dev, 0x00000825, 0x00000100); + nv_icmd(dev, 0x00000826, 0x00000001); + nv_icmd(dev, 0x0000095d, 0x00000001); + nv_icmd(dev, 0x0000082b, 0x00000004); + nv_icmd(dev, 0x00000942, 0x00010001); + nv_icmd(dev, 0x00000943, 0x00000001); + nv_icmd(dev, 0x00000944, 0x00000022); + nv_icmd(dev, 0x000007c5, 0x00010001); + nv_icmd(dev, 0x00000834, 0x00000001); + nv_icmd(dev, 0x000007c7, 0x00000001); + nv_icmd(dev, 0x0000c1b0, 0x0000000f); + nv_icmd(dev, 0x0000c1b1, 0x0000000f); + nv_icmd(dev, 0x0000c1b2, 0x0000000f); + nv_icmd(dev, 0x0000c1b3, 0x0000000f); + nv_icmd(dev, 0x0000c1b4, 0x0000000f); + nv_icmd(dev, 0x0000c1b5, 0x0000000f); + nv_icmd(dev, 0x0000c1b6, 0x0000000f); + nv_icmd(dev, 0x0000c1b7, 0x0000000f); + nv_icmd(dev, 0x0000c1b8, 0x0fac6881); + nv_icmd(dev, 0x0000c1b9, 0x00fac688); + nv_icmd(dev, 0x0001e100, 0x00000001); + nv_icmd(dev, 0x00001000, 0x00000002); + nv_icmd(dev, 0x000006aa, 0x00000001); + nv_icmd(dev, 0x000006ad, 0x00000100); + nv_icmd(dev, 0x000006ae, 0x00000100); + nv_icmd(dev, 0x000006b1, 0x00000011); + nv_icmd(dev, 0x0000078c, 0x00000008); + nv_icmd(dev, 0x00000792, 0x00000001); + nv_icmd(dev, 0x00000794, 0x00000001); + nv_icmd(dev, 0x00000795, 0x00000001); + nv_icmd(dev, 0x00000796, 0x00000001); + nv_icmd(dev, 0x00000797, 0x000000cf); + nv_icmd(dev, 0x0000079a, 0x00000002); + nv_icmd(dev, 0x00000833, 0x04444480); + nv_icmd(dev, 0x000007a1, 0x00000001); + nv_icmd(dev, 0x000007a3, 0x00000001); + nv_icmd(dev, 0x000007a4, 0x00000001); + nv_icmd(dev, 0x000007a5, 0x00000001); + nv_icmd(dev, 0x00000831, 0x00000004); + nv_icmd(dev, 0x0001e100, 0x00000001); + nv_icmd(dev, 0x00001000, 0x00000014); + nv_icmd(dev, 0x00000351, 0x00000100); + nv_icmd(dev, 0x00000957, 0x00000003); + nv_icmd(dev, 0x0000095d, 0x00000001); + nv_icmd(dev, 0x0000082b, 0x00000004); + nv_icmd(dev, 0x00000942, 0x00010001); + nv_icmd(dev, 0x00000943, 0x00000001); + nv_icmd(dev, 0x000007c5, 0x00010001); + nv_icmd(dev, 0x00000834, 0x00000001); + nv_icmd(dev, 0x000007c7, 0x00000001); + nv_icmd(dev, 0x0001e100, 0x00000001); + nv_icmd(dev, 0x00001000, 0x00000001); + nv_icmd(dev, 0x0000080c, 0x00000002); + nv_icmd(dev, 0x0000080d, 0x00000100); + nv_icmd(dev, 0x0000080e, 0x00000100); + nv_icmd(dev, 0x0000080f, 0x00000001); + nv_icmd(dev, 0x00000823, 0x00000002); + nv_icmd(dev, 0x00000824, 0x00000100); + nv_icmd(dev, 0x00000825, 0x00000100); + nv_icmd(dev, 0x00000826, 0x00000001); + nv_icmd(dev, 0x0001e100, 0x00000001); + nv_wr32(dev, 0x400208, 0x00000000); + nv_wr32(dev, 0x404154, 0x00000400); + + nvc0_grctx_generate_9097(dev); + nvc0_grctx_generate_902d(dev); + nvc0_grctx_generate_9039(dev); + nvc0_grctx_generate_90c0(dev); + + nv_wr32(dev, 0x000260, r000260); + return 0; +} -- cgit v1.2.3 From ddbaf79a8b047dcccf766d0518626cdc0f43d58e Mon Sep 17 00:00:00 2001 From: Ben Skeggs Date: Wed, 24 Nov 2010 10:52:43 +1000 Subject: drm/nvc0: implement fbcon acceleration Signed-off-by: Ben Skeggs --- drivers/gpu/drm/nouveau/Makefile | 5 +- drivers/gpu/drm/nouveau/nouveau_fbcon.c | 24 ++- drivers/gpu/drm/nouveau/nouveau_fbcon.h | 6 + drivers/gpu/drm/nouveau/nvc0_fbcon.c | 271 ++++++++++++++++++++++++++++++++ 4 files changed, 300 insertions(+), 6 deletions(-) create mode 100644 drivers/gpu/drm/nouveau/nvc0_fbcon.c diff --git a/drivers/gpu/drm/nouveau/Makefile b/drivers/gpu/drm/nouveau/Makefile index 141771beb8e6..e12c97fd8db8 100644 --- a/drivers/gpu/drm/nouveau/Makefile +++ b/drivers/gpu/drm/nouveau/Makefile @@ -22,9 +22,10 @@ nouveau-y := nouveau_drv.o nouveau_state.o nouveau_channel.o nouveau_mem.o \ nv84_crypt.o \ nv04_instmem.o nv50_instmem.o nvc0_instmem.o \ nv50_evo.o nv50_crtc.o nv50_dac.o nv50_sor.o \ - nv50_cursor.o nv50_display.o nv50_fbcon.o \ + nv50_cursor.o nv50_display.o \ nv04_dac.o nv04_dfp.o nv04_tv.o nv17_tv.o nv17_tv_modes.o \ - nv04_crtc.o nv04_display.o nv04_cursor.o nv04_fbcon.o \ + nv04_crtc.o nv04_display.o nv04_cursor.o \ + nv04_fbcon.o nv50_fbcon.o nvc0_fbcon.o \ nv10_gpio.o nv50_gpio.o \ nv50_calc.o \ nv04_pm.o nv50_pm.o nva3_pm.o \ diff --git a/drivers/gpu/drm/nouveau/nouveau_fbcon.c b/drivers/gpu/drm/nouveau/nouveau_fbcon.c index ea861c915149..326eeda6ad7f 100644 --- a/drivers/gpu/drm/nouveau/nouveau_fbcon.c +++ b/drivers/gpu/drm/nouveau/nouveau_fbcon.c @@ -68,6 +68,8 @@ nouveau_fbcon_fillrect(struct fb_info *info, const struct fb_fillrect *rect) else if (dev_priv->card_type < NV_C0) ret = nv50_fbcon_fillrect(info, rect); + else + ret = nvc0_fbcon_fillrect(info, rect); mutex_unlock(&dev_priv->channel->mutex); } @@ -98,6 +100,8 @@ nouveau_fbcon_copyarea(struct fb_info *info, const struct fb_copyarea *image) else if (dev_priv->card_type < NV_C0) ret = nv50_fbcon_copyarea(info, image); + else + ret = nvc0_fbcon_copyarea(info, image); mutex_unlock(&dev_priv->channel->mutex); } @@ -128,6 +132,8 @@ nouveau_fbcon_imageblit(struct fb_info *info, const struct fb_image *image) else if (dev_priv->card_type < NV_C0) ret = nv50_fbcon_imageblit(info, image); + else + ret = nvc0_fbcon_imageblit(info, image); mutex_unlock(&dev_priv->channel->mutex); } @@ -163,10 +169,18 @@ nouveau_fbcon_sync(struct fb_info *info) return 0; } - BEGIN_RING(chan, 0, 0x0104, 1); - OUT_RING(chan, 0); - BEGIN_RING(chan, 0, 0x0100, 1); - OUT_RING(chan, 0); + if (dev_priv->card_type >= NV_C0) { + BEGIN_NVC0(chan, 2, NvSub2D, 0x010c, 1); + OUT_RING (chan, 0); + BEGIN_NVC0(chan, 2, NvSub2D, 0x0100, 1); + OUT_RING (chan, 0); + } else { + BEGIN_RING(chan, 0, 0x0104, 1); + OUT_RING (chan, 0); + BEGIN_RING(chan, 0, 0x0100, 1); + OUT_RING (chan, 0); + } + nouveau_bo_wr32(chan->notifier_bo, chan->m2mf_ntfy + 3, 0xffffffff); FIRE_RING(chan); mutex_unlock(&chan->mutex); @@ -374,6 +388,8 @@ nouveau_fbcon_create(struct nouveau_fbdev *nfbdev, else if (dev_priv->card_type < NV_C0) ret = nv50_fbcon_accel_init(info); + else + ret = nvc0_fbcon_accel_init(info); if (ret == 0) info->fbops = &nouveau_fbcon_ops; diff --git a/drivers/gpu/drm/nouveau/nouveau_fbcon.h b/drivers/gpu/drm/nouveau/nouveau_fbcon.h index 6b933f2c3a5b..b73c29f87fc3 100644 --- a/drivers/gpu/drm/nouveau/nouveau_fbcon.h +++ b/drivers/gpu/drm/nouveau/nouveau_fbcon.h @@ -44,11 +44,17 @@ int nv04_fbcon_copyarea(struct fb_info *info, const struct fb_copyarea *region); int nv04_fbcon_fillrect(struct fb_info *info, const struct fb_fillrect *rect); int nv04_fbcon_imageblit(struct fb_info *info, const struct fb_image *image); int nv04_fbcon_accel_init(struct fb_info *info); + int nv50_fbcon_fillrect(struct fb_info *info, const struct fb_fillrect *rect); int nv50_fbcon_copyarea(struct fb_info *info, const struct fb_copyarea *region); int nv50_fbcon_imageblit(struct fb_info *info, const struct fb_image *image); int nv50_fbcon_accel_init(struct fb_info *info); +int nvc0_fbcon_fillrect(struct fb_info *info, const struct fb_fillrect *rect); +int nvc0_fbcon_copyarea(struct fb_info *info, const struct fb_copyarea *region); +int nvc0_fbcon_imageblit(struct fb_info *info, const struct fb_image *image); +int nvc0_fbcon_accel_init(struct fb_info *info); + void nouveau_fbcon_gpu_lockup(struct fb_info *info); int nouveau_fbcon_init(struct drm_device *dev); diff --git a/drivers/gpu/drm/nouveau/nvc0_fbcon.c b/drivers/gpu/drm/nouveau/nvc0_fbcon.c new file mode 100644 index 000000000000..cbb4a1ae20b1 --- /dev/null +++ b/drivers/gpu/drm/nouveau/nvc0_fbcon.c @@ -0,0 +1,271 @@ +/* + * Copyright 2010 Red Hat Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: Ben Skeggs + */ + +#include "drmP.h" +#include "nouveau_drv.h" +#include "nouveau_dma.h" +#include "nouveau_ramht.h" +#include "nouveau_fbcon.h" +#include "nouveau_mm.h" + +int +nvc0_fbcon_fillrect(struct fb_info *info, const struct fb_fillrect *rect) +{ + struct nouveau_fbdev *nfbdev = info->par; + struct drm_device *dev = nfbdev->dev; + struct drm_nouveau_private *dev_priv = dev->dev_private; + struct nouveau_channel *chan = dev_priv->channel; + int ret; + + ret = RING_SPACE(chan, rect->rop == ROP_COPY ? 7 : 11); + if (ret) + return ret; + + if (rect->rop != ROP_COPY) { + BEGIN_NVC0(chan, 2, NvSub2D, 0x02ac, 1); + OUT_RING (chan, 1); + } + BEGIN_NVC0(chan, 2, NvSub2D, 0x0588, 1); + if (info->fix.visual == FB_VISUAL_TRUECOLOR || + info->fix.visual == FB_VISUAL_DIRECTCOLOR) + OUT_RING (chan, ((uint32_t *)info->pseudo_palette)[rect->color]); + else + OUT_RING (chan, rect->color); + BEGIN_NVC0(chan, 2, NvSub2D, 0x0600, 4); + OUT_RING (chan, rect->dx); + OUT_RING (chan, rect->dy); + OUT_RING (chan, rect->dx + rect->width); + OUT_RING (chan, rect->dy + rect->height); + if (rect->rop != ROP_COPY) { + BEGIN_NVC0(chan, 2, NvSub2D, 0x02ac, 1); + OUT_RING (chan, 3); + } + FIRE_RING(chan); + return 0; +} + +int +nvc0_fbcon_copyarea(struct fb_info *info, const struct fb_copyarea *region) +{ + struct nouveau_fbdev *nfbdev = info->par; + struct drm_device *dev = nfbdev->dev; + struct drm_nouveau_private *dev_priv = dev->dev_private; + struct nouveau_channel *chan = dev_priv->channel; + int ret; + + ret = RING_SPACE(chan, 12); + if (ret) + return ret; + + BEGIN_NVC0(chan, 2, NvSub2D, 0x0110, 1); + OUT_RING (chan, 0); + BEGIN_NVC0(chan, 2, NvSub2D, 0x08b0, 4); + OUT_RING (chan, region->dx); + OUT_RING (chan, region->dy); + OUT_RING (chan, region->width); + OUT_RING (chan, region->height); + BEGIN_NVC0(chan, 2, NvSub2D, 0x08d0, 4); + OUT_RING (chan, 0); + OUT_RING (chan, region->sx); + OUT_RING (chan, 0); + OUT_RING (chan, region->sy); + FIRE_RING(chan); + return 0; +} + +int +nvc0_fbcon_imageblit(struct fb_info *info, const struct fb_image *image) +{ + struct nouveau_fbdev *nfbdev = info->par; + struct drm_device *dev = nfbdev->dev; + struct drm_nouveau_private *dev_priv = dev->dev_private; + struct nouveau_channel *chan = dev_priv->channel; + uint32_t width, dwords, *data = (uint32_t *)image->data; + uint32_t mask = ~(~0 >> (32 - info->var.bits_per_pixel)); + uint32_t *palette = info->pseudo_palette; + int ret; + + if (image->depth != 1) + return -ENODEV; + + ret = RING_SPACE(chan, 11); + if (ret) + return ret; + + width = ALIGN(image->width, 32); + dwords = (width * image->height) >> 5; + + BEGIN_NVC0(chan, 2, NvSub2D, 0x0814, 2); + if (info->fix.visual == FB_VISUAL_TRUECOLOR || + info->fix.visual == FB_VISUAL_DIRECTCOLOR) { + OUT_RING (chan, palette[image->bg_color] | mask); + OUT_RING (chan, palette[image->fg_color] | mask); + } else { + OUT_RING (chan, image->bg_color); + OUT_RING (chan, image->fg_color); + } + BEGIN_NVC0(chan, 2, NvSub2D, 0x0838, 2); + OUT_RING (chan, image->width); + OUT_RING (chan, image->height); + BEGIN_NVC0(chan, 2, NvSub2D, 0x0850, 4); + OUT_RING (chan, 0); + OUT_RING (chan, image->dx); + OUT_RING (chan, 0); + OUT_RING (chan, image->dy); + + while (dwords) { + int push = dwords > 2047 ? 2047 : dwords; + + ret = RING_SPACE(chan, push + 1); + if (ret) + return ret; + + dwords -= push; + + BEGIN_NVC0(chan, 6, NvSub2D, 0x0860, push); + OUT_RINGp(chan, data, push); + data += push; + } + + FIRE_RING(chan); + return 0; +} + +int +nvc0_fbcon_accel_init(struct fb_info *info) +{ + struct nouveau_fbdev *nfbdev = info->par; + struct drm_device *dev = nfbdev->dev; + struct drm_nouveau_private *dev_priv = dev->dev_private; + struct nouveau_channel *chan = dev_priv->channel; + struct nouveau_bo *nvbo = nfbdev->nouveau_fb.nvbo; + int ret, format; + + ret = nouveau_gpuobj_gr_new(chan, 0x902d, 0x902d); + if (ret) + return ret; + + switch (info->var.bits_per_pixel) { + case 8: + format = 0xf3; + break; + case 15: + format = 0xf8; + break; + case 16: + format = 0xe8; + break; + case 32: + switch (info->var.transp.length) { + case 0: /* depth 24 */ + case 8: /* depth 32, just use 24.. */ + format = 0xe6; + break; + case 2: /* depth 30 */ + format = 0xd1; + break; + default: + return -EINVAL; + } + break; + default: + return -EINVAL; + } + + ret = RING_SPACE(chan, 60); + if (ret) { + WARN_ON(1); + nouveau_fbcon_gpu_lockup(info); + return ret; + } + + printk(KERN_ERR "fb vma 0x%010llx\n", nvbo->vma.offset); + + BEGIN_NVC0(chan, 2, NvSub2D, 0x0000, 1); + OUT_RING (chan, 0x0000902d); + BEGIN_NVC0(chan, 2, NvSub2D, 0x0104, 2); + OUT_RING (chan, upper_32_bits(chan->notifier_bo->bo.offset)); + OUT_RING (chan, lower_32_bits(chan->notifier_bo->bo.offset)); + BEGIN_NVC0(chan, 2, NvSub2D, 0x0290, 1); + OUT_RING (chan, 0); + BEGIN_NVC0(chan, 2, NvSub2D, 0x0888, 1); + OUT_RING (chan, 1); + BEGIN_NVC0(chan, 2, NvSub2D, 0x02ac, 1); + OUT_RING (chan, 3); + BEGIN_NVC0(chan, 2, NvSub2D, 0x02a0, 1); + OUT_RING (chan, 0x55); + BEGIN_NVC0(chan, 2, NvSub2D, 0x08c0, 4); + OUT_RING (chan, 0); + OUT_RING (chan, 1); + OUT_RING (chan, 0); + OUT_RING (chan, 1); + BEGIN_NVC0(chan, 2, NvSub2D, 0x0580, 2); + OUT_RING (chan, 4); + OUT_RING (chan, format); + BEGIN_NVC0(chan, 2, NvSub2D, 0x02e8, 2); + OUT_RING (chan, 2); + OUT_RING (chan, 1); + + BEGIN_NVC0(chan, 2, NvSub2D, 0x0804, 1); + OUT_RING (chan, format); + BEGIN_NVC0(chan, 2, NvSub2D, 0x0800, 1); + OUT_RING (chan, 1); + BEGIN_NVC0(chan, 2, NvSub2D, 0x0808, 3); + OUT_RING (chan, 0); + OUT_RING (chan, 0); + OUT_RING (chan, 1); + BEGIN_NVC0(chan, 2, NvSub2D, 0x081c, 1); + OUT_RING (chan, 1); + BEGIN_NVC0(chan, 2, NvSub2D, 0x0840, 4); + OUT_RING (chan, 0); + OUT_RING (chan, 1); + OUT_RING (chan, 0); + OUT_RING (chan, 1); + BEGIN_NVC0(chan, 2, NvSub2D, 0x0200, 10); + OUT_RING (chan, format); + OUT_RING (chan, 1); + OUT_RING (chan, 0); + OUT_RING (chan, 1); + OUT_RING (chan, 0); + OUT_RING (chan, info->fix.line_length); + OUT_RING (chan, info->var.xres_virtual); + OUT_RING (chan, info->var.yres_virtual); + OUT_RING (chan, upper_32_bits(nvbo->vma.offset)); + OUT_RING (chan, lower_32_bits(nvbo->vma.offset)); + BEGIN_NVC0(chan, 2, NvSub2D, 0x0230, 10); + OUT_RING (chan, format); + OUT_RING (chan, 1); + OUT_RING (chan, 0); + OUT_RING (chan, 1); + OUT_RING (chan, 0); + OUT_RING (chan, info->fix.line_length); + OUT_RING (chan, info->var.xres_virtual); + OUT_RING (chan, info->var.yres_virtual); + OUT_RING (chan, upper_32_bits(nvbo->vma.offset)); + OUT_RING (chan, lower_32_bits(nvbo->vma.offset)); + FIRE_RING (chan); + + return 0; +} + -- cgit v1.2.3 From 6d86951a45013ac5b060c5e6307b11b7c685c76f Mon Sep 17 00:00:00 2001 From: Ben Skeggs Date: Wed, 8 Dec 2010 11:19:30 +1000 Subject: drm/nvc0: initial support for tiled buffer objects Signed-off-by: Ben Skeggs --- drivers/gpu/drm/nouveau/nouveau_bo.c | 3 -- drivers/gpu/drm/nouveau/nouveau_dma.h | 3 +- drivers/gpu/drm/nouveau/nv50_crtc.c | 18 +++++++----- drivers/gpu/drm/nouveau/nv50_evo.c | 53 ++++++++++++++++++++++++++--------- drivers/gpu/drm/nouveau/nvc0_vram.c | 10 ++++++- 5 files changed, 62 insertions(+), 25 deletions(-) diff --git a/drivers/gpu/drm/nouveau/nouveau_bo.c b/drivers/gpu/drm/nouveau/nouveau_bo.c index 5dc639e0c969..74d0ef41118d 100644 --- a/drivers/gpu/drm/nouveau/nouveau_bo.c +++ b/drivers/gpu/drm/nouveau/nouveau_bo.c @@ -120,9 +120,6 @@ nouveau_bo_new(struct drm_device *dev, struct nouveau_channel *chan, align >>= PAGE_SHIFT; if (!nvbo->no_vm && dev_priv->chan_vm) { - if (dev_priv->card_type == NV_C0) - page_shift = 12; - ret = nouveau_vm_get(dev_priv->chan_vm, size, page_shift, NV_MEM_ACCESS_RW, &nvbo->vma); if (ret) { diff --git a/drivers/gpu/drm/nouveau/nouveau_dma.h b/drivers/gpu/drm/nouveau/nouveau_dma.h index c118a331b5bc..c36f1763feaa 100644 --- a/drivers/gpu/drm/nouveau/nouveau_dma.h +++ b/drivers/gpu/drm/nouveau/nouveau_dma.h @@ -77,7 +77,8 @@ enum { /* G80+ display objects */ NvEvoVRAM = 0x01000000, NvEvoFB16 = 0x01000001, - NvEvoFB32 = 0x01000002 + NvEvoFB32 = 0x01000002, + NvEvoVRAM_LP = 0x01000003 }; #define NV_MEMORY_TO_MEMORY_FORMAT 0x00000039 diff --git a/drivers/gpu/drm/nouveau/nv50_crtc.c b/drivers/gpu/drm/nouveau/nv50_crtc.c index 2c346f797285..9023c4dbb449 100644 --- a/drivers/gpu/drm/nouveau/nv50_crtc.c +++ b/drivers/gpu/drm/nouveau/nv50_crtc.c @@ -115,15 +115,16 @@ nv50_crtc_blank(struct nouveau_crtc *nv_crtc, bool blanked) OUT_RING(evo, 0); BEGIN_RING(evo, 0, NV50_EVO_CRTC(index, FB_DMA), 1); if (dev_priv->chipset != 0x50) - if (nv_crtc->fb.tile_flags == 0x7a00) + if (nv_crtc->fb.tile_flags == 0x7a00 || + nv_crtc->fb.tile_flags == 0xfe00) OUT_RING(evo, NvEvoFB32); else if (nv_crtc->fb.tile_flags == 0x7000) OUT_RING(evo, NvEvoFB16); else - OUT_RING(evo, NvEvoVRAM); + OUT_RING(evo, NvEvoVRAM_LP); else - OUT_RING(evo, NvEvoVRAM); + OUT_RING(evo, NvEvoVRAM_LP); } nv_crtc->fb.blanked = blanked; @@ -555,13 +556,14 @@ nv50_crtc_do_mode_set_base(struct drm_crtc *crtc, return ret; BEGIN_RING(evo, 0, NV50_EVO_CRTC(nv_crtc->index, FB_DMA), 1); - if (nv_crtc->fb.tile_flags == 0x7a00) + if (nv_crtc->fb.tile_flags == 0x7a00 || + nv_crtc->fb.tile_flags == 0xfe00) OUT_RING(evo, NvEvoFB32); else if (nv_crtc->fb.tile_flags == 0x7000) OUT_RING(evo, NvEvoFB16); else - OUT_RING(evo, NvEvoVRAM); + OUT_RING(evo, NvEvoVRAM_LP); } ret = RING_SPACE(evo, 12); @@ -575,8 +577,10 @@ nv50_crtc_do_mode_set_base(struct drm_crtc *crtc, if (!nv_crtc->fb.tile_flags) { OUT_RING(evo, drm_fb->pitch | (1 << 20)); } else { - OUT_RING(evo, ((drm_fb->pitch / 4) << 4) | - fb->nvbo->tile_mode); + u32 tile_mode = fb->nvbo->tile_mode; + if (dev_priv->card_type >= NV_C0) + tile_mode >>= 4; + OUT_RING(evo, ((drm_fb->pitch / 4) << 4) | tile_mode); } if (dev_priv->chipset == 0x50) OUT_RING(evo, (nv_crtc->fb.tile_flags << 8) | format); diff --git a/drivers/gpu/drm/nouveau/nv50_evo.c b/drivers/gpu/drm/nouveau/nv50_evo.c index 887b2a97e2a2..14e24e906ee8 100644 --- a/drivers/gpu/drm/nouveau/nv50_evo.c +++ b/drivers/gpu/drm/nouveau/nv50_evo.c @@ -53,7 +53,8 @@ nv50_evo_channel_del(struct nouveau_channel **pevo) int nv50_evo_dmaobj_new(struct nouveau_channel *evo, u32 class, u32 name, - u32 tile_flags, u32 magic_flags, u32 offset, u32 limit) + u32 tile_flags, u32 magic_flags, u32 offset, u32 limit, + u32 flags5) { struct drm_nouveau_private *dev_priv = evo->dev->dev_private; struct drm_device *dev = evo->dev; @@ -70,10 +71,7 @@ nv50_evo_dmaobj_new(struct nouveau_channel *evo, u32 class, u32 name, nv_wo32(obj, 8, offset); nv_wo32(obj, 12, 0x00000000); nv_wo32(obj, 16, 0x00000000); - if (dev_priv->card_type < NV_C0) - nv_wo32(obj, 20, 0x00010000); - else - nv_wo32(obj, 20, 0x00020000); + nv_wo32(obj, 20, flags5); dev_priv->engine.instmem.flush(dev); ret = nouveau_ramht_insert(evo, name, obj); @@ -264,9 +262,31 @@ nv50_evo_create(struct drm_device *dev) } /* create some default objects for the scanout memtypes we support */ + if (dev_priv->card_type >= NV_C0) { + ret = nv50_evo_dmaobj_new(evo, 0x3d, NvEvoFB32, 0xfe, 0x19, + 0, 0xffffffff, 0x00000000); + if (ret) { + nv50_evo_channel_del(&dev_priv->evo); + return ret; + } + + ret = nv50_evo_dmaobj_new(evo, 0x3d, NvEvoVRAM, 0, 0x19, + 0, dev_priv->vram_size, 0x00020000); + if (ret) { + nv50_evo_channel_del(&dev_priv->evo); + return ret; + } + + ret = nv50_evo_dmaobj_new(evo, 0x3d, NvEvoVRAM_LP, 0, 0x19, + 0, dev_priv->vram_size, 0x00000000); + if (ret) { + nv50_evo_channel_del(&dev_priv->evo); + return ret; + } + } else if (dev_priv->chipset != 0x50) { ret = nv50_evo_dmaobj_new(evo, 0x3d, NvEvoFB16, 0x70, 0x19, - 0, 0xffffffff); + 0, 0xffffffff, 0x00010000); if (ret) { nv50_evo_channel_del(&dev_priv->evo); return ret; @@ -274,18 +294,25 @@ nv50_evo_create(struct drm_device *dev) ret = nv50_evo_dmaobj_new(evo, 0x3d, NvEvoFB32, 0x7a, 0x19, - 0, 0xffffffff); + 0, 0xffffffff, 0x00010000); if (ret) { nv50_evo_channel_del(&dev_priv->evo); return ret; } - } - ret = nv50_evo_dmaobj_new(evo, 0x3d, NvEvoVRAM, 0, 0x19, - 0, dev_priv->vram_size); - if (ret) { - nv50_evo_channel_del(&dev_priv->evo); - return ret; + ret = nv50_evo_dmaobj_new(evo, 0x3d, NvEvoVRAM, 0, 0x19, + 0, dev_priv->vram_size, 0x00010000); + if (ret) { + nv50_evo_channel_del(&dev_priv->evo); + return ret; + } + + ret = nv50_evo_dmaobj_new(evo, 0x3d, NvEvoVRAM_LP, 0, 0x19, + 0, dev_priv->vram_size, 0x00010000); + if (ret) { + nv50_evo_channel_del(&dev_priv->evo); + return ret; + } } return 0; diff --git a/drivers/gpu/drm/nouveau/nvc0_vram.c b/drivers/gpu/drm/nouveau/nvc0_vram.c index 41fcae5ffba4..858eda5dedd1 100644 --- a/drivers/gpu/drm/nouveau/nvc0_vram.c +++ b/drivers/gpu/drm/nouveau/nvc0_vram.c @@ -29,8 +29,16 @@ bool nvc0_vram_flags_valid(struct drm_device *dev, u32 tile_flags) { - if (likely(!(tile_flags & NOUVEAU_GEM_TILE_LAYOUT_MASK))) + switch (tile_flags & NOUVEAU_GEM_TILE_LAYOUT_MASK) { + case 0x0000: + case 0xfe00: + case 0xdb00: + case 0x1100: return true; + default: + break; + } + return false; } -- cgit v1.2.3 From 183720b8af5301e2eab7f3163f03133c5a6ad6da Mon Sep 17 00:00:00 2001 From: Ben Skeggs Date: Thu, 9 Dec 2010 15:17:10 +1000 Subject: drm/nvc0: accelerate ttm buffer moves Signed-off-by: Ben Skeggs --- drivers/gpu/drm/nouveau/nouveau_bo.c | 57 +++++++++++++++++++++++++++++++++++- 1 file changed, 56 insertions(+), 1 deletion(-) diff --git a/drivers/gpu/drm/nouveau/nouveau_bo.c b/drivers/gpu/drm/nouveau/nouveau_bo.c index 74d0ef41118d..a7fae26f4654 100644 --- a/drivers/gpu/drm/nouveau/nouveau_bo.c +++ b/drivers/gpu/drm/nouveau/nouveau_bo.c @@ -514,6 +514,58 @@ nouveau_bo_mem_ctxdma(struct ttm_buffer_object *bo, return chan->vram_handle; } +static int +nvc0_bo_move_m2mf(struct nouveau_channel *chan, struct ttm_buffer_object *bo, + struct ttm_mem_reg *old_mem, struct ttm_mem_reg *new_mem) +{ + struct drm_nouveau_private *dev_priv = nouveau_bdev(bo->bdev); + struct nouveau_bo *nvbo = nouveau_bo(bo); + u64 src_offset = old_mem->start << PAGE_SHIFT; + u64 dst_offset = new_mem->start << PAGE_SHIFT; + u32 page_count = new_mem->num_pages; + int ret; + + if (!nvbo->no_vm) { + if (old_mem->mem_type == TTM_PL_VRAM) + src_offset = nvbo->vma.offset; + else + src_offset += dev_priv->gart_info.aper_base; + + if (new_mem->mem_type == TTM_PL_VRAM) + dst_offset = nvbo->vma.offset; + else + dst_offset += dev_priv->gart_info.aper_base; + } + + page_count = new_mem->num_pages; + while (page_count) { + int line_count = (page_count > 2047) ? 2047 : page_count; + + ret = RING_SPACE(chan, 12); + if (ret) + return ret; + + BEGIN_NVC0(chan, 2, NvSubM2MF, 0x0238, 2); + OUT_RING (chan, upper_32_bits(dst_offset)); + OUT_RING (chan, lower_32_bits(dst_offset)); + BEGIN_NVC0(chan, 2, NvSubM2MF, 0x030c, 6); + OUT_RING (chan, upper_32_bits(src_offset)); + OUT_RING (chan, lower_32_bits(src_offset)); + OUT_RING (chan, PAGE_SIZE); /* src_pitch */ + OUT_RING (chan, PAGE_SIZE); /* dst_pitch */ + OUT_RING (chan, PAGE_SIZE); /* line_length */ + OUT_RING (chan, line_count); + BEGIN_NVC0(chan, 2, NvSubM2MF, 0x0300, 1); + OUT_RING (chan, 0x00100110); + + page_count -= line_count; + src_offset += (PAGE_SIZE * line_count); + dst_offset += (PAGE_SIZE * line_count); + } + + return 0; +} + static int nv50_bo_move_m2mf(struct nouveau_channel *chan, struct ttm_buffer_object *bo, struct ttm_mem_reg *old_mem, struct ttm_mem_reg *new_mem) @@ -690,7 +742,10 @@ nouveau_bo_move_m2mf(struct ttm_buffer_object *bo, int evict, bool intr, if (dev_priv->card_type < NV_50) ret = nv04_bo_move_m2mf(chan, bo, &bo->mem, new_mem); else + if (dev_priv->card_type < NV_C0) ret = nv50_bo_move_m2mf(chan, bo, &bo->mem, new_mem); + else + ret = nvc0_bo_move_m2mf(chan, bo, &bo->mem, new_mem); if (ret == 0) { ret = nouveau_bo_move_accel_cleanup(chan, nvbo, evict, no_wait_reserve, @@ -836,7 +891,7 @@ nouveau_bo_move(struct ttm_buffer_object *bo, bool evict, bool intr, } /* Software copy if the card isn't up and running yet. */ - if (!dev_priv->channel || dev_priv->card_type == NV_C0) { + if (!dev_priv->channel) { ret = ttm_bo_move_memcpy(bo, evict, no_wait_reserve, no_wait_gpu, new_mem); goto out; } -- cgit v1.2.3 From 880981e49b90568ebb53428e231b43b8c1049ef9 Mon Sep 17 00:00:00 2001 From: Alex Deucher Date: Tue, 21 Dec 2010 16:16:13 -0500 Subject: drm/radeon/kms: disable bo moves using the blitter Blitting from vram to gart is problematic at the moment. Use the CPU for now to avoid buffer corruption. Signed-off-by: Alex Deucher Signed-off-by: Dave Airlie --- drivers/gpu/drm/radeon/evergreen.c | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/drivers/gpu/drm/radeon/evergreen.c b/drivers/gpu/drm/radeon/evergreen.c index f7d7477daffb..499122c8ccc9 100644 --- a/drivers/gpu/drm/radeon/evergreen.c +++ b/drivers/gpu/drm/radeon/evergreen.c @@ -2782,6 +2782,11 @@ static int evergreen_startup(struct radeon_device *rdev) rdev->asic->copy = NULL; dev_warn(rdev->dev, "failed blitter (%d) falling back to memcpy\n", r); } + /* XXX: ontario has problems blitting to gart at the moment */ + if (rdev->family == CHIP_PALM) { + rdev->asic->copy = NULL; + rdev->mc.active_vram_size = rdev->mc.visible_vram_size; + } /* allocate wb buffer */ r = radeon_wb_init(rdev); -- cgit v1.2.3 From 204663c48711ddceee09df46269cd34d49d1f7be Mon Sep 17 00:00:00 2001 From: Marek Olšák Date: Tue, 21 Dec 2010 21:27:34 +0100 Subject: drm/radeon/kms: add ARGB2101010 colorbuffer support for r500 MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit This should be part of DRM 2.8.0. Signed-off-by: Marek Olšák Signed-off-by: Dave Airlie --- drivers/gpu/drm/radeon/r300.c | 7 +++++++ drivers/gpu/drm/radeon/radeon_drv.c | 2 +- 2 files changed, 8 insertions(+), 1 deletion(-) diff --git a/drivers/gpu/drm/radeon/r300.c b/drivers/gpu/drm/radeon/r300.c index cde1d3480d93..36b4f7b48d6a 100644 --- a/drivers/gpu/drm/radeon/r300.c +++ b/drivers/gpu/drm/radeon/r300.c @@ -787,6 +787,13 @@ static int r300_packet0_check(struct radeon_cs_parser *p, case 15: track->cb[i].cpp = 2; break; + case 5: + if (p->rdev->family < CHIP_RV515) { + DRM_ERROR("Invalid color buffer format (%d)!\n", + ((idx_value >> 21) & 0xF)); + return -EINVAL; + } + /* Pass through. */ case 6: track->cb[i].cpp = 4; break; diff --git a/drivers/gpu/drm/radeon/radeon_drv.c b/drivers/gpu/drm/radeon/radeon_drv.c index 6fb1218f9d76..520b776b09f4 100644 --- a/drivers/gpu/drm/radeon/radeon_drv.c +++ b/drivers/gpu/drm/radeon/radeon_drv.c @@ -48,7 +48,7 @@ * - 2.5.0 - add get accel 2 to work around ddx breakage for evergreen * - 2.6.0 - add tiling config query (r6xx+), add initial HiZ support (r300->r500) * 2.7.0 - fixups for r600 2D tiling support. (no external ABI change), add eg dyn gpr regs - * 2.8.0 - pageflip support, r500 US_FORMAT regs. + * 2.8.0 - pageflip support, r500 US_FORMAT regs. r500 ARGB2101010 colorbuf */ #define KMS_DRIVER_MAJOR 2 #define KMS_DRIVER_MINOR 8 -- cgit v1.2.3 From c42988012ad9c1807b7c7a5ff855cd630094989b Mon Sep 17 00:00:00 2001 From: Ben Hutchings Date: Sat, 25 Dec 2010 16:17:32 +0000 Subject: drm/nouveau: Only select ACPI_VIDEO if its dependencies are met CONFIG_ACPI_VIDEO depends on more than just CONFIG_ACPI, so add those dependencies to the Kconfig select condition. The case where some dependencies fail to be satisfied should be handled correctly, because in that case the ACPI_VIDEO symbols we use are converted into static-inline stubs. Signed-off-by: Ben Hutchings Signed-off-by: Francisco Jerez Signed-off-by: Ben Skeggs --- drivers/gpu/drm/nouveau/Kconfig | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/gpu/drm/nouveau/Kconfig b/drivers/gpu/drm/nouveau/Kconfig index 72730e9ca06c..21d6c29c2d21 100644 --- a/drivers/gpu/drm/nouveau/Kconfig +++ b/drivers/gpu/drm/nouveau/Kconfig @@ -10,7 +10,7 @@ config DRM_NOUVEAU select FB select FRAMEBUFFER_CONSOLE if !EMBEDDED select FB_BACKLIGHT if DRM_NOUVEAU_BACKLIGHT - select ACPI_VIDEO if ACPI + select ACPI_VIDEO if ACPI && X86 && BACKLIGHT_CLASS_DEVICE && VIDEO_OUTPUT_CONTROL && INPUT help Choose this option for open-source nVidia support. -- cgit v1.2.3 From 8e91182bbdb8591be4835907d825a5b9a80495a9 Mon Sep 17 00:00:00 2001 From: Michel Hermier Date: Sat, 25 Dec 2010 16:58:56 +0100 Subject: drm/nouveau: Validate channel indices passed from userspace. When hacking the libdrm for improvements, I triggered a kernel crash related to the fact that the NOUVEAU_NOTIFIEROBJ_ALLOC ioctl calls nouveau_channel_get with an unchecked channel index. The patch ensures that the channel index is an unsigned and validates its value in nouveau_channel_get. Signed-off-by: Michel Hermier Signed-off-by: Francisco Jerez Signed-off-by: Ben Skeggs --- drivers/gpu/drm/nouveau/nouveau_channel.c | 3 +++ 1 file changed, 3 insertions(+) diff --git a/drivers/gpu/drm/nouveau/nouveau_channel.c b/drivers/gpu/drm/nouveau/nouveau_channel.c index e37977d02463..4d2f19420922 100644 --- a/drivers/gpu/drm/nouveau/nouveau_channel.c +++ b/drivers/gpu/drm/nouveau/nouveau_channel.c @@ -253,6 +253,9 @@ nouveau_channel_get(struct drm_device *dev, struct drm_file *file_priv, int id) struct nouveau_channel *chan; unsigned long flags; + if (unlikely(id < 0 || id >= NOUVEAU_MAX_CHANNEL_NR)) + return ERR_PTR(-EINVAL); + spin_lock_irqsave(&dev_priv->channels.lock, flags); chan = nouveau_channel_get_unlocked(dev_priv->channels.ptr[id]); spin_unlock_irqrestore(&dev_priv->channels.lock, flags); -- cgit v1.2.3 From b1cd916ab96e5fcd16e2009410af2e8edfae2962 Mon Sep 17 00:00:00 2001 From: Ben Skeggs Date: Fri, 24 Dec 2010 09:33:33 +1000 Subject: drm/nvc0: kill off a couple more magics Signed-off-by: Ben Skeggs --- drivers/gpu/drm/nouveau/nvc0_graph.c | 14 +------------- drivers/gpu/drm/nouveau/nvc0_graph.h | 2 -- drivers/gpu/drm/nouveau/nvc0_grctx.c | 24 ++++++++++++++++++++---- 3 files changed, 21 insertions(+), 19 deletions(-) diff --git a/drivers/gpu/drm/nouveau/nvc0_graph.c b/drivers/gpu/drm/nouveau/nvc0_graph.c index cf2f6aa920b4..f03c17a10ab9 100644 --- a/drivers/gpu/drm/nouveau/nvc0_graph.c +++ b/drivers/gpu/drm/nouveau/nvc0_graph.c @@ -334,8 +334,6 @@ nvc0_graph_create(struct drm_device *dev) case 0xc0: if (priv->tp_total == 11) { /* 465, 3/4/4/0, 4 */ priv->magic_not_rop_nr = 0x07; - priv->magic419bd0 = 0x0a360000; - priv->magic419be4 = 0x04c33a54; /* filled values up to tp_total, the rest 0 */ priv->magicgpc980[0] = 0x22111000; priv->magicgpc980[1] = 0x00000233; @@ -345,8 +343,6 @@ nvc0_graph_create(struct drm_device *dev) } else if (priv->tp_total == 14) { /* 470, 3/3/4/4, 5 */ priv->magic_not_rop_nr = 0x05; - priv->magic419bd0 = 0x043c0000; - priv->magic419be4 = 0x09041208; priv->magicgpc980[0] = 0x11110000; priv->magicgpc980[1] = 0x00233222; priv->magicgpc980[2] = 0x00000000; @@ -355,8 +351,6 @@ nvc0_graph_create(struct drm_device *dev) } else if (priv->tp_total == 15) { /* 480, 3/4/4/4, 6 */ priv->magic_not_rop_nr = 0x06; - priv->magic419bd0 = 0x023e0000; - priv->magic419be4 = 0x10414104; priv->magicgpc980[0] = 0x11110000; priv->magicgpc980[1] = 0x03332222; priv->magicgpc980[2] = 0x00000000; @@ -366,8 +360,6 @@ nvc0_graph_create(struct drm_device *dev) break; case 0xc3: /* 450, 4/0/0/0, 2 */ priv->magic_not_rop_nr = 0x03; - priv->magic419bd0 = 0x00500000; - priv->magic419be4 = 0x00000000; priv->magicgpc980[0] = 0x00003210; priv->magicgpc980[1] = 0x00000000; priv->magicgpc980[2] = 0x00000000; @@ -376,8 +368,6 @@ nvc0_graph_create(struct drm_device *dev) break; case 0xc4: /* 460, 3/4/0/0, 4 */ priv->magic_not_rop_nr = 0x01; - priv->magic419bd0 = 0x045c0000; - priv->magic419be4 = 0x09041208; priv->magicgpc980[0] = 0x02321100; priv->magicgpc980[1] = 0x00000000; priv->magicgpc980[2] = 0x00000000; @@ -386,14 +376,12 @@ nvc0_graph_create(struct drm_device *dev) break; } - if (!priv->magic419bd0) { + if (!priv->magic_not_rop_nr) { NV_ERROR(dev, "PGRAPH: unknown config: %d/%d/%d/%d, %d\n", priv->tp_nr[0], priv->tp_nr[1], priv->tp_nr[2], priv->tp_nr[3], priv->rop_nr); /* use 0xc3's values... */ priv->magic_not_rop_nr = 0x03; - priv->magic419bd0 = 0x00500000; - priv->magic419be4 = 0x00000000; priv->magicgpc980[0] = 0x00003210; priv->magicgpc980[1] = 0x00000000; priv->magicgpc980[2] = 0x00000000; diff --git a/drivers/gpu/drm/nouveau/nvc0_graph.h b/drivers/gpu/drm/nouveau/nvc0_graph.h index 1e1f24f3fd34..40e26f9c56c4 100644 --- a/drivers/gpu/drm/nouveau/nvc0_graph.h +++ b/drivers/gpu/drm/nouveau/nvc0_graph.h @@ -46,8 +46,6 @@ struct nvc0_graph_priv { struct nouveau_gpuobj *unk4188b8; u8 magic_not_rop_nr; - u32 magic419bd0; - u32 magic419be4; u32 magicgpc980[4]; u32 magicgpc918; }; diff --git a/drivers/gpu/drm/nouveau/nvc0_grctx.c b/drivers/gpu/drm/nouveau/nvc0_grctx.c index 88fa6211ac19..fddfab73a58d 100644 --- a/drivers/gpu/drm/nouveau/nvc0_grctx.c +++ b/drivers/gpu/drm/nouveau/nvc0_grctx.c @@ -1875,9 +1875,11 @@ nvc0_grctx_generate(struct nouveau_channel *chan) } if (1) { - u32 data[6] = {}; + u32 data[6] = {}, data2[2] = {}; u8 tpnr[GPC_MAX]; + u8 shift, ntpcv; + /* calculate first set of magics */ memcpy(tpnr, priv->tp_nr, sizeof(priv->tp_nr)); for (tp = 0; tp < priv->tp_total; tp++) { @@ -1892,6 +1894,20 @@ nvc0_grctx_generate(struct nouveau_channel *chan) for (; tp < 32; tp++) data[tp / 6] |= 7 << ((tp % 6) * 5); + /* and the second... */ + shift = 0; + ntpcv = priv->tp_total; + while (!(ntpcv & (1 << 4))) { + ntpcv <<= 1; + shift++; + } + + data2[0] = (ntpcv << 16); + data2[0] |= (shift << 21); + data2[0] |= (((1 << (0 + 5)) % ntpcv) << 24); + for (i = 1; i < 7; i++) + data2[1] |= ((1 << (i + 5)) % ntpcv) << ((i - 1) * 5); + // GPC_BROADCAST nv_wr32(dev, 0x418bb8, (priv->tp_total << 8) | priv->magic_not_rop_nr); @@ -1900,9 +1916,9 @@ nvc0_grctx_generate(struct nouveau_channel *chan) // GPC_BROADCAST.TP_BROADCAST nv_wr32(dev, 0x419bd0, (priv->tp_total << 8) | - priv->magic_not_rop_nr | - priv->magic419bd0); - nv_wr32(dev, 0x419be4, priv->magic419be4); + priv->magic_not_rop_nr | + data2[0]); + nv_wr32(dev, 0x419be4, data2[1]); for (i = 0; i < 6; i++) nv_wr32(dev, 0x419b00 + (i * 4), data[i]); -- cgit v1.2.3 From 93d0cd7b9e4855c12b3bb5c9367872476c78fef9 Mon Sep 17 00:00:00 2001 From: Ben Skeggs Date: Wed, 29 Dec 2010 10:41:20 +1000 Subject: drm/nvc0: nuke left-over debug messages Signed-off-by: Ben Skeggs --- drivers/gpu/drm/nouveau/nvc0_fbcon.c | 2 -- drivers/gpu/drm/nouveau/nvc0_graph.c | 1 - 2 files changed, 3 deletions(-) diff --git a/drivers/gpu/drm/nouveau/nvc0_fbcon.c b/drivers/gpu/drm/nouveau/nvc0_fbcon.c index cbb4a1ae20b1..fa5d4c234383 100644 --- a/drivers/gpu/drm/nouveau/nvc0_fbcon.c +++ b/drivers/gpu/drm/nouveau/nvc0_fbcon.c @@ -200,8 +200,6 @@ nvc0_fbcon_accel_init(struct fb_info *info) return ret; } - printk(KERN_ERR "fb vma 0x%010llx\n", nvbo->vma.offset); - BEGIN_NVC0(chan, 2, NvSub2D, 0x0000, 1); OUT_RING (chan, 0x0000902d); BEGIN_NVC0(chan, 2, NvSub2D, 0x0104, 2); diff --git a/drivers/gpu/drm/nouveau/nvc0_graph.c b/drivers/gpu/drm/nouveau/nvc0_graph.c index f03c17a10ab9..c28bc7b04d27 100644 --- a/drivers/gpu/drm/nouveau/nvc0_graph.c +++ b/drivers/gpu/drm/nouveau/nvc0_graph.c @@ -244,7 +244,6 @@ nvc0_graph_load_context(struct nouveau_channel *chan) if (!nv_wait(dev, 0x409800, 0x00000010, 0x00000010)) NV_ERROR(dev, "PGRAPH: load_ctx timeout\n"); - printk(KERN_ERR "load_ctx 0x%08x\n", nv_rd32(dev, 0x409b00)); return 0; } -- cgit v1.2.3 From eae5e7f304222ee795936e9466110a9d5d5ec558 Mon Sep 17 00:00:00 2001 From: Ben Skeggs Date: Thu, 30 Dec 2010 11:40:07 +1000 Subject: drm/nvc0: parse a couple more PGRAPH_INTR Signed-off-by: Ben Skeggs --- drivers/gpu/drm/nouveau/nvc0_graph.c | 16 ++++++++++++++++ 1 file changed, 16 insertions(+) diff --git a/drivers/gpu/drm/nouveau/nvc0_graph.c b/drivers/gpu/drm/nouveau/nvc0_graph.c index c28bc7b04d27..8352d6b92cca 100644 --- a/drivers/gpu/drm/nouveau/nvc0_graph.c +++ b/drivers/gpu/drm/nouveau/nvc0_graph.c @@ -740,6 +740,14 @@ nvc0_graph_isr(struct drm_device *dev) stat &= ~0x00000010; } + if (stat & 0x00000020) { + NV_INFO(dev, "PGRAPH: ILLEGAL_CLASS ch %d [0x%010llx] subc %d " + "class 0x%04x mthd 0x%04x data 0x%08x\n", + chid, inst, subc, class, mthd, data); + nv_wr32(dev, 0x400100, 0x00000020); + stat &= ~0x00000020; + } + if (stat & 0x00100000) { NV_INFO(dev, "PGRAPH: DATA_ERROR ["); nouveau_enum_print(nvc0_graph_data_error, code); @@ -750,6 +758,14 @@ nvc0_graph_isr(struct drm_device *dev) stat &= ~0x00100000; } + if (stat & 0x00200000) { + u32 trap = nv_rd32(dev, 0x400108); + NV_INFO(dev, "PGRAPH: TRAP ch %d status 0x%08x\n", chid, trap); + nv_wr32(dev, 0x400108, trap); + nv_wr32(dev, 0x400100, 0x00200000); + stat &= ~0x00200000; + } + if (stat & 0x00080000) { u32 ustat = nv_rd32(dev, 0x409c18); -- cgit v1.2.3 From 6effe39364f1212aa57e1b5f0bd0f388ebfe9b24 Mon Sep 17 00:00:00 2001 From: Ben Skeggs Date: Thu, 30 Dec 2010 11:48:03 +1000 Subject: drm/nv50: sync up gr data error names with rnn, use for nvc0 also Signed-off-by: Ben Skeggs --- drivers/gpu/drm/nouveau/nouveau_drv.h | 1 + drivers/gpu/drm/nouveau/nv50_graph.c | 49 ++++++++++++++++++++++++++++++----- drivers/gpu/drm/nouveau/nvc0_graph.c | 7 +---- 3 files changed, 44 insertions(+), 13 deletions(-) diff --git a/drivers/gpu/drm/nouveau/nouveau_drv.h b/drivers/gpu/drm/nouveau/nouveau_drv.h index e81575687354..6d749b792088 100644 --- a/drivers/gpu/drm/nouveau/nouveau_drv.h +++ b/drivers/gpu/drm/nouveau/nouveau_drv.h @@ -1189,6 +1189,7 @@ extern int nv50_graph_unload_context(struct drm_device *); extern int nv50_grctx_init(struct nouveau_grctx *); extern void nv50_graph_tlb_flush(struct drm_device *dev); extern void nv86_graph_tlb_flush(struct drm_device *dev); +extern struct nouveau_enum nv50_data_error_names[]; /* nvc0_graph.c */ extern int nvc0_graph_init(struct drm_device *); diff --git a/drivers/gpu/drm/nouveau/nv50_graph.c b/drivers/gpu/drm/nouveau/nv50_graph.c index c510e74acf4d..2d7ea75a09d4 100644 --- a/drivers/gpu/drm/nouveau/nv50_graph.c +++ b/drivers/gpu/drm/nouveau/nv50_graph.c @@ -554,13 +554,48 @@ static struct nouveau_bitfield nv50_graph_trap_ccache[] = { }; /* There must be a *lot* of these. Will take some time to gather them up. */ -static struct nouveau_enum nv50_data_error_names[] = { - { 4, "INVALID_VALUE" }, - { 5, "INVALID_ENUM" }, - { 8, "INVALID_OBJECT" }, - { 0xc, "INVALID_BITFIELD" }, - { 0x28, "MP_NO_REG_SPACE" }, - { 0x2b, "MP_BLOCK_SIZE_MISMATCH" }, +struct nouveau_enum nv50_data_error_names[] = { + { 0x00000003, "INVALID_QUERY_OR_TEXTURE" }, + { 0x00000004, "INVALID_VALUE" }, + { 0x00000005, "INVALID_ENUM" }, + { 0x00000008, "INVALID_OBJECT" }, + { 0x00000009, "READ_ONLY_OBJECT" }, + { 0x0000000a, "SUPERVISOR_OBJECT" }, + { 0x0000000b, "INVALID_ADDRESS_ALIGNMENT" }, + { 0x0000000c, "INVALID_BITFIELD" }, + { 0x0000000d, "BEGIN_END_ACTIVE" }, + { 0x0000000e, "SEMANTIC_COLOR_BACK_OVER_LIMIT" }, + { 0x0000000f, "VIEWPORT_ID_NEEDS_GP" }, + { 0x00000010, "RT_DOUBLE_BIND" }, + { 0x00000011, "RT_TYPES_MISMATCH" }, + { 0x00000012, "RT_LINEAR_WITH_ZETA" }, + { 0x00000015, "FP_TOO_FEW_REGS" }, + { 0x00000016, "ZETA_FORMAT_CSAA_MISMATCH" }, + { 0x00000017, "RT_LINEAR_WITH_MSAA" }, + { 0x00000018, "FP_INTERPOLANT_START_OVER_LIMIT" }, + { 0x00000019, "SEMANTIC_LAYER_OVER_LIMIT" }, + { 0x0000001a, "RT_INVALID_ALIGNMENT" }, + { 0x0000001b, "SAMPLER_OVER_LIMIT" }, + { 0x0000001c, "TEXTURE_OVER_LIMIT" }, + { 0x0000001e, "GP_TOO_MANY_OUTPUTS" }, + { 0x0000001f, "RT_BPP128_WITH_MS8" }, + { 0x00000021, "Z_OUT_OF_BOUNDS" }, + { 0x00000023, "XY_OUT_OF_BOUNDS" }, + { 0x00000027, "CP_MORE_PARAMS_THAN_SHARED" }, + { 0x00000028, "CP_NO_REG_SPACE_STRIPED" }, + { 0x00000029, "CP_NO_REG_SPACE_PACKED" }, + { 0x0000002a, "CP_NOT_ENOUGH_WARPS" }, + { 0x0000002b, "CP_BLOCK_SIZE_MISMATCH" }, + { 0x0000002c, "CP_NOT_ENOUGH_LOCAL_WARPS" }, + { 0x0000002d, "CP_NOT_ENOUGH_STACK_WARPS" }, + { 0x0000002e, "CP_NO_BLOCKDIM_LATCH" }, + { 0x00000031, "ENG2D_FORMAT_MISMATCH" }, + { 0x0000003f, "PRIMITIVE_ID_NEEDS_GP" }, + { 0x00000044, "SEMANTIC_VIEWPORT_OVER_LIMIT" }, + { 0x00000045, "SEMANTIC_COLOR_FRONT_OVER_LIMIT" }, + { 0x00000046, "LAYER_ID_NEEDS_GP" }, + { 0x00000047, "SEMANTIC_CLIP_OVER_LIMIT" }, + { 0x00000048, "SEMANTIC_PTSZ_OVER_LIMIT" }, {} }; diff --git a/drivers/gpu/drm/nouveau/nvc0_graph.c b/drivers/gpu/drm/nouveau/nvc0_graph.c index 8352d6b92cca..43b44a4b3ab8 100644 --- a/drivers/gpu/drm/nouveau/nvc0_graph.c +++ b/drivers/gpu/drm/nouveau/nvc0_graph.c @@ -693,11 +693,6 @@ nvc0_graph_init(struct drm_device *dev) return 0; } -static struct nouveau_enum nvc0_graph_data_error[] = { - { 5, "INVALID_ENUM" }, - {} -}; - static int nvc0_graph_isr_chid(struct drm_device *dev, u64 inst) { @@ -750,7 +745,7 @@ nvc0_graph_isr(struct drm_device *dev) if (stat & 0x00100000) { NV_INFO(dev, "PGRAPH: DATA_ERROR ["); - nouveau_enum_print(nvc0_graph_data_error, code); + nouveau_enum_print(nv50_data_error_names, code); printk("] ch %d [0x%010llx] subc %d class 0x%04x " "mthd 0x%04x data 0x%08x\n", chid, inst, subc, class, mthd, data); -- cgit v1.2.3 From 2a55c9a7ff2a863f05a9b9c8519ef81737d4a3ef Mon Sep 17 00:00:00 2001 From: Ben Skeggs Date: Thu, 30 Dec 2010 11:53:48 +1000 Subject: drm/nvc0: reserve only subc 0 for kernel use Current 3D driver expects this behaviour. While this could be changed, there's no compelling reason to reserve more than one subchannel for the DRM. If we ever need to use an object other then M2MF, we can just re-bind subchannel 0 as required. Signed-off-by: Ben Skeggs --- drivers/gpu/drm/nouveau/nouveau_channel.c | 22 ++++++++++++++-------- drivers/gpu/drm/nouveau/nouveau_fence.c | 2 +- 2 files changed, 15 insertions(+), 9 deletions(-) diff --git a/drivers/gpu/drm/nouveau/nouveau_channel.c b/drivers/gpu/drm/nouveau/nouveau_channel.c index 4d2f19420922..a57a1d2f3a11 100644 --- a/drivers/gpu/drm/nouveau/nouveau_channel.c +++ b/drivers/gpu/drm/nouveau/nouveau_channel.c @@ -446,14 +446,20 @@ nouveau_ioctl_fifo_alloc(struct drm_device *dev, void *data, else init->pushbuf_domains = NOUVEAU_GEM_DOMAIN_GART; - init->subchan[0].handle = NvM2MF; - if (dev_priv->card_type < NV_50) - init->subchan[0].grclass = 0x0039; - else - init->subchan[0].grclass = 0x5039; - init->subchan[1].handle = NvSw; - init->subchan[1].grclass = NV_SW; - init->nr_subchan = 2; + if (dev_priv->card_type < NV_C0) { + init->subchan[0].handle = NvM2MF; + if (dev_priv->card_type < NV_50) + init->subchan[0].grclass = 0x0039; + else + init->subchan[0].grclass = 0x5039; + init->subchan[1].handle = NvSw; + init->subchan[1].grclass = NV_SW; + init->nr_subchan = 2; + } else { + init->subchan[0].handle = 0x9039; + init->subchan[0].grclass = 0x9039; + init->nr_subchan = 1; + } /* Named memory object area */ ret = drm_gem_handle_create(file_priv, chan->notifier_bo->gem, diff --git a/drivers/gpu/drm/nouveau/nouveau_fence.c b/drivers/gpu/drm/nouveau/nouveau_fence.c index 88b2f29ca3e4..221b8462ea37 100644 --- a/drivers/gpu/drm/nouveau/nouveau_fence.c +++ b/drivers/gpu/drm/nouveau/nouveau_fence.c @@ -165,7 +165,7 @@ nouveau_fence_emit(struct nouveau_fence *fence) if (dev_priv->card_type < NV_C0) BEGIN_RING(chan, NvSubSw, 0x0050, 1); else - BEGIN_NVC0(chan, 2, NvSubSw, 0x0050, 1); + BEGIN_NVC0(chan, 2, NvSubM2MF, 0x0050, 1); } else { BEGIN_RING(chan, NvSubSw, 0x0150, 1); } -- cgit v1.2.3 From ec9c0883ea3aa90d6c6d6b16a64d0986f40baff0 Mon Sep 17 00:00:00 2001 From: Ben Skeggs Date: Fri, 31 Dec 2010 12:10:49 +1000 Subject: drm/nvc0/pfifo: support for chipsets with only one PSUBFIFO (0xc1) Signed-off-by: Ben Skeggs --- drivers/gpu/drm/nouveau/nvc0_fifo.c | 18 ++++++++++++------ 1 file changed, 12 insertions(+), 6 deletions(-) diff --git a/drivers/gpu/drm/nouveau/nvc0_fifo.c b/drivers/gpu/drm/nouveau/nvc0_fifo.c index 82a4ded5dae8..e6f92c541dba 100644 --- a/drivers/gpu/drm/nouveau/nvc0_fifo.c +++ b/drivers/gpu/drm/nouveau/nvc0_fifo.c @@ -33,6 +33,7 @@ struct nvc0_fifo_priv { struct nouveau_gpuobj *playlist[2]; int cur_playlist; struct nouveau_vma user_vma; + int spoon_nr; }; struct nvc0_fifo_chan { @@ -324,13 +325,18 @@ nvc0_fifo_init(struct drm_device *dev) nv_wr32(dev, 0x000204, 0xffffffff); nv_wr32(dev, 0x002204, 0xffffffff); + priv->spoon_nr = hweight32(nv_rd32(dev, 0x002204)); + NV_DEBUG(dev, "PFIFO: %d subfifo(s)\n", priv->spoon_nr); + /* assign engines to subfifos */ - nv_wr32(dev, 0x002208, ~(1 << 0)); /* PGRAPH */ - nv_wr32(dev, 0x00220c, ~(1 << 1)); /* PVP */ - nv_wr32(dev, 0x002210, ~(1 << 1)); /* PPP */ - nv_wr32(dev, 0x002214, ~(1 << 1)); /* PBSP */ - nv_wr32(dev, 0x002218, ~(1 << 2)); /* PCE0 */ - nv_wr32(dev, 0x00221c, ~(1 << 1)); /* PCE1 */ + if (priv->spoon_nr >= 3) { + nv_wr32(dev, 0x002208, ~(1 << 0)); /* PGRAPH */ + nv_wr32(dev, 0x00220c, ~(1 << 1)); /* PVP */ + nv_wr32(dev, 0x002210, ~(1 << 1)); /* PPP */ + nv_wr32(dev, 0x002214, ~(1 << 1)); /* PBSP */ + nv_wr32(dev, 0x002218, ~(1 << 2)); /* PCE0 */ + nv_wr32(dev, 0x00221c, ~(1 << 1)); /* PCE1 */ + } /* PSUBFIFO[n] */ for (i = 0; i < 3; i++) { -- cgit v1.2.3 From 680a48720f55d2f21cbf45c1f7ca38d6b73471dc Mon Sep 17 00:00:00 2001 From: Ben Skeggs Date: Fri, 31 Dec 2010 12:21:56 +1000 Subject: drm/nvc0/pgraph: more unit names Signed-off-by: Ben Skeggs --- drivers/gpu/drm/nouveau/nvc0_grctx.c | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/drivers/gpu/drm/nouveau/nvc0_grctx.c b/drivers/gpu/drm/nouveau/nvc0_grctx.c index fddfab73a58d..a45cdc494502 100644 --- a/drivers/gpu/drm/nouveau/nvc0_grctx.c +++ b/drivers/gpu/drm/nouveau/nvc0_grctx.c @@ -1557,7 +1557,7 @@ nvc0_grctx_generate_unk47xx(struct drm_device *dev) } static void -nvc0_grctx_generate_unk58xx(struct drm_device *dev) +nvc0_grctx_generate_shaders(struct drm_device *dev) { nv_wr32(dev, 0x405800, 0x078000bf); nv_wr32(dev, 0x405830, 0x02180000); @@ -1593,7 +1593,7 @@ nvc0_grctx_generate_unk64xx(struct drm_device *dev) } static void -nvc0_grctx_generate_unk78xx(struct drm_device *dev) +nvc0_grctx_generate_tpbus(struct drm_device *dev) { nv_wr32(dev, 0x407804, 0x00000023); nv_wr32(dev, 0x40780c, 0x0a418820); @@ -1606,7 +1606,7 @@ nvc0_grctx_generate_unk78xx(struct drm_device *dev) } static void -nvc0_grctx_generate_unk80xx(struct drm_device *dev) +nvc0_grctx_generate_ccache(struct drm_device *dev) { nv_wr32(dev, 0x408000, 0x00000000); nv_wr32(dev, 0x408004, 0x00000000); @@ -1811,11 +1811,11 @@ nvc0_grctx_generate(struct nouveau_channel *chan) nvc0_grctx_generate_macro(dev); nvc0_grctx_generate_m2mf(dev); nvc0_grctx_generate_unk47xx(dev); - nvc0_grctx_generate_unk58xx(dev); + nvc0_grctx_generate_shaders(dev); nvc0_grctx_generate_unk60xx(dev); nvc0_grctx_generate_unk64xx(dev); - nvc0_grctx_generate_unk78xx(dev); - nvc0_grctx_generate_unk80xx(dev); + nvc0_grctx_generate_tpbus(dev); + nvc0_grctx_generate_ccache(dev); nvc0_grctx_generate_rop(dev); nvc0_grctx_generate_gpc(dev); nvc0_grctx_generate_tp(dev); -- cgit v1.2.3 From 47a44d27ca246b815de2b4223daf86070315fb8d Mon Sep 17 00:00:00 2001 From: Ben Skeggs Date: Sat, 1 Jan 2011 12:26:23 +1000 Subject: drm/nvc0/pgraph: fix 0x406028/0x405870 init Signed-off-by: Ben Skeggs --- drivers/gpu/drm/nouveau/nvc0_grctx.c | 10 +++++++--- 1 file changed, 7 insertions(+), 3 deletions(-) diff --git a/drivers/gpu/drm/nouveau/nvc0_grctx.c b/drivers/gpu/drm/nouveau/nvc0_grctx.c index a45cdc494502..b9e68b2d30aa 100644 --- a/drivers/gpu/drm/nouveau/nvc0_grctx.c +++ b/drivers/gpu/drm/nouveau/nvc0_grctx.c @@ -1801,7 +1801,7 @@ nvc0_grctx_generate(struct nouveau_channel *chan) struct nvc0_graph_chan *grch = chan->pgraph_ctx; struct drm_device *dev = chan->dev; int i, gpc, tp, id; - u32 r000260; + u32 r000260, tmp; r000260 = nv_rd32(dev, 0x000260); nv_wr32(dev, 0x000260, r000260 & ~1); @@ -1843,8 +1843,12 @@ nvc0_grctx_generate(struct nouveau_channel *chan) } } - nv_wr32(dev, 0x406028, 0x00000443); - nv_wr32(dev, 0x405870, 0x00000443); + tmp = 0; + for (i = 0; i < priv->gpc_nr; i++) + tmp |= priv->tp_nr[i] << (i * 4); + nv_wr32(dev, 0x406028, tmp); + nv_wr32(dev, 0x405870, tmp); + nv_wr32(dev, 0x40602c, 0x00000000); nv_wr32(dev, 0x405874, 0x00000000); nv_wr32(dev, 0x406030, 0x00000000); -- cgit v1.2.3 From 17db7042b7ff77a4ae8b83ab42ec8286a9715a48 Mon Sep 17 00:00:00 2001 From: Alex Deucher Date: Tue, 21 Dec 2010 16:05:39 -0500 Subject: drm/radeon/kms: implement gpu lockup check for evergreen Now that soft reset works, we can add this. Signed-off-by: Alex Deucher Signed-off-by: Dave Airlie --- drivers/gpu/drm/radeon/evergreen.c | 26 ++++++++++++++++++++++++-- drivers/gpu/drm/radeon/radeon.h | 1 + 2 files changed, 25 insertions(+), 2 deletions(-) diff --git a/drivers/gpu/drm/radeon/evergreen.c b/drivers/gpu/drm/radeon/evergreen.c index ec641ce64764..3ae63ceb9c33 100644 --- a/drivers/gpu/drm/radeon/evergreen.c +++ b/drivers/gpu/drm/radeon/evergreen.c @@ -2085,8 +2085,30 @@ int evergreen_mc_init(struct radeon_device *rdev) bool evergreen_gpu_is_lockup(struct radeon_device *rdev) { - /* FIXME: implement for evergreen */ - return false; + u32 srbm_status; + u32 grbm_status; + u32 grbm_status_se0, grbm_status_se1; + struct r100_gpu_lockup *lockup = &rdev->config.evergreen.lockup; + int r; + + srbm_status = RREG32(SRBM_STATUS); + grbm_status = RREG32(GRBM_STATUS); + grbm_status_se0 = RREG32(GRBM_STATUS_SE0); + grbm_status_se1 = RREG32(GRBM_STATUS_SE1); + if (!(grbm_status & GUI_ACTIVE)) { + r100_gpu_lockup_update(lockup, &rdev->cp); + return false; + } + /* force CP activities */ + r = radeon_ring_lock(rdev, 2); + if (!r) { + /* PACKET2 NOP */ + radeon_ring_write(rdev, 0x80000000); + radeon_ring_write(rdev, 0x80000000); + radeon_ring_unlock_commit(rdev); + } + rdev->cp.rptr = RREG32(CP_RB_RPTR); + return r100_gpu_cp_is_lockup(rdev, lockup, &rdev->cp); } static int evergreen_gpu_soft_reset(struct radeon_device *rdev) diff --git a/drivers/gpu/drm/radeon/radeon.h b/drivers/gpu/drm/radeon/radeon.h index 431d4186ddf0..d2697f8f2da8 100644 --- a/drivers/gpu/drm/radeon/radeon.h +++ b/drivers/gpu/drm/radeon/radeon.h @@ -1031,6 +1031,7 @@ struct evergreen_asic { unsigned tiling_npipes; unsigned tiling_group_size; unsigned tile_config; + struct r100_gpu_lockup lockup; }; union radeon_asic_config { -- cgit v1.2.3 From 6c2df40ec00e52a5fb0c691b0e79324b9015aaa4 Mon Sep 17 00:00:00 2001 From: Dave Airlie Date: Mon, 6 Dec 2010 12:30:31 +1000 Subject: vga_switcheroo: print the IGD/DIS flag in the debugfs output. We really want to see this so we can confirm that we pick the right one. Signed-off-by: Dave Airlie --- drivers/gpu/vga/vga_switcheroo.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/drivers/gpu/vga/vga_switcheroo.c b/drivers/gpu/vga/vga_switcheroo.c index c8768f38511e..0a8678fe4d64 100644 --- a/drivers/gpu/vga/vga_switcheroo.c +++ b/drivers/gpu/vga/vga_switcheroo.c @@ -174,7 +174,8 @@ static int vga_switcheroo_show(struct seq_file *m, void *v) int i; mutex_lock(&vgasr_mutex); for (i = 0; i < VGA_SWITCHEROO_MAX_CLIENTS; i++) { - seq_printf(m, "%d:%c:%s:%s\n", i, + seq_printf(m, "%d:%s:%c:%s:%s\n", i, + vgasr_priv.clients[i].id == VGA_SWITCHEROO_DIS ? "DIS" : "IGD", vgasr_priv.clients[i].active ? '+' : ' ', vgasr_priv.clients[i].pwr_state ? "Pwr" : "Off", pci_name(vgasr_priv.clients[i].pdev)); -- cgit v1.2.3 From 5cfb3c3a1013e7fca54c3624871755cdfd960b3b Mon Sep 17 00:00:00 2001 From: Dave Airlie Date: Mon, 6 Dec 2010 12:31:50 +1000 Subject: vga_switcheroo: make power switch handler optional At least on the nvidia mux the power switch seems to be executed by the ACPI PS0/PS3 methods so need to do it explicitly. Signed-off-by: Dave Airlie --- drivers/gpu/vga/vga_switcheroo.c | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/drivers/gpu/vga/vga_switcheroo.c b/drivers/gpu/vga/vga_switcheroo.c index 0a8678fe4d64..c3c213b74478 100644 --- a/drivers/gpu/vga/vga_switcheroo.c +++ b/drivers/gpu/vga/vga_switcheroo.c @@ -191,9 +191,8 @@ static int vga_switcheroo_debugfs_open(struct inode *inode, struct file *file) static int vga_switchon(struct vga_switcheroo_client *client) { - int ret; - - ret = vgasr_priv.handler->power_state(client->id, VGA_SWITCHEROO_ON); + if (vgasr_priv.handler->power_state) + vgasr_priv.handler->power_state(client->id, VGA_SWITCHEROO_ON); /* call the driver callback to turn on device */ client->set_gpu_state(client->pdev, VGA_SWITCHEROO_ON); client->pwr_state = VGA_SWITCHEROO_ON; @@ -204,7 +203,8 @@ static int vga_switchoff(struct vga_switcheroo_client *client) { /* call the driver callback to turn off device */ client->set_gpu_state(client->pdev, VGA_SWITCHEROO_OFF); - vgasr_priv.handler->power_state(client->id, VGA_SWITCHEROO_OFF); + if (vgasr_priv.handler->power_state) + vgasr_priv.handler->power_state(client->id, VGA_SWITCHEROO_OFF); client->pwr_state = VGA_SWITCHEROO_OFF; return 0; } -- cgit v1.2.3 From 851ab954daee0d8328e239a468835e36e6215182 Mon Sep 17 00:00:00 2001 From: Dave Airlie Date: Mon, 6 Dec 2010 12:35:52 +1000 Subject: vga_switcheroo: add debugging mux switch option. This allows the mux to be switched from userspace using MIGD/MDIS command to the switch. Signed-off-by: Dave Airlie --- drivers/gpu/vga/vga_switcheroo.c | 16 ++++++++++++++++ 1 file changed, 16 insertions(+) diff --git a/drivers/gpu/vga/vga_switcheroo.c b/drivers/gpu/vga/vga_switcheroo.c index c3c213b74478..463691a1650e 100644 --- a/drivers/gpu/vga/vga_switcheroo.c +++ b/drivers/gpu/vga/vga_switcheroo.c @@ -266,6 +266,7 @@ vga_switcheroo_debugfs_write(struct file *filp, const char __user *ubuf, const char *pdev_name; int i, ret; bool delay = false, can_switch; + bool just_mux = false; int client_id = -1; struct vga_switcheroo_client *client = NULL; @@ -320,6 +321,15 @@ vga_switcheroo_debugfs_write(struct file *filp, const char __user *ubuf, if (strncmp(usercmd, "DIS", 3) == 0) client_id = VGA_SWITCHEROO_DIS; + if (strncmp(usercmd, "MIGD", 3) == 0) { + just_mux = true; + client_id = VGA_SWITCHEROO_IGD; + } + if (strncmp(usercmd, "MDIS", 3) == 0) { + just_mux = true; + client_id = VGA_SWITCHEROO_DIS; + } + if (client_id == -1) goto out; @@ -331,6 +341,12 @@ vga_switcheroo_debugfs_write(struct file *filp, const char __user *ubuf, } vgasr_priv.delayed_switch_active = false; + + if (just_mux) { + ret = vgasr_priv.handler->switchto(client_id); + goto out; + } + /* okay we want a switch - test if devices are willing to switch */ can_switch = true; for (i = 0; i < VGA_SWITCHEROO_MAX_CLIENTS; i++) { -- cgit v1.2.3 From 5ccb377feaaff3daa1e9a179534bbd7550d78af6 Mon Sep 17 00:00:00 2001 From: Dave Airlie Date: Tue, 7 Dec 2010 13:56:26 +1000 Subject: drm/nouveau: add delayed switch complete callback. this just adds the callback on the delayed switch mechanism. Signed-off-by: Dave Airlie --- drivers/gpu/drm/nouveau/nouveau_state.c | 1 + 1 file changed, 1 insertion(+) diff --git a/drivers/gpu/drm/nouveau/nouveau_state.c b/drivers/gpu/drm/nouveau/nouveau_state.c index d5b17b6ccd3a..db4b410f4a8c 100644 --- a/drivers/gpu/drm/nouveau/nouveau_state.c +++ b/drivers/gpu/drm/nouveau/nouveau_state.c @@ -1054,6 +1054,7 @@ err_out: void nouveau_lastclose(struct drm_device *dev) { + vga_switcheroo_process_delayed_switch(); } int nouveau_unload(struct drm_device *dev) -- cgit v1.2.3 From d1fbd923da0f982f12b61a44e74c2e1f74c6ef56 Mon Sep 17 00:00:00 2001 From: Dave Airlie Date: Mon, 6 Dec 2010 12:56:44 +1000 Subject: nouveau/acpi: improve detection of what is IGD and what is DIS. This improves the IGD/DIS picking using firstly if Intel, then if the bus is bus 0. There may be a correct way to do this, but I've no idea what it is. Signed-off-by: Dave Airlie --- drivers/gpu/drm/nouveau/nouveau_acpi.c | 11 ++++++++--- 1 file changed, 8 insertions(+), 3 deletions(-) diff --git a/drivers/gpu/drm/nouveau/nouveau_acpi.c b/drivers/gpu/drm/nouveau/nouveau_acpi.c index 119152606e4c..a54238058dc5 100644 --- a/drivers/gpu/drm/nouveau/nouveau_acpi.c +++ b/drivers/gpu/drm/nouveau/nouveau_acpi.c @@ -130,10 +130,15 @@ static int nouveau_dsm_init(void) static int nouveau_dsm_get_client_id(struct pci_dev *pdev) { - if (nouveau_dsm_priv.dhandle == DEVICE_ACPI_HANDLE(&pdev->dev)) + /* easy option one - intel vendor ID means Integrated */ + if (pdev->vendor == PCI_VENDOR_ID_INTEL) return VGA_SWITCHEROO_IGD; - else - return VGA_SWITCHEROO_DIS; + + /* is this device on Bus 0? - this may need improving */ + if (pdev->bus->number == 0) + return VGA_SWITCHEROO_IGD; + + return VGA_SWITCHEROO_DIS; } static struct vga_switcheroo_handler nouveau_dsm_handler = { -- cgit v1.2.3 From 8d608aa6295242fe4c4b6105b8c59c6a5b232d89 Mon Sep 17 00:00:00 2001 From: Dave Airlie Date: Tue, 7 Dec 2010 08:57:57 +1000 Subject: vga_switcheroo: add reprobe hook for fbcon to recheck connected outputs. This adds a hook after the mux is switched for the driver to reprobe the connected outputs. Signed-off-by: Dave Airlie --- drivers/gpu/drm/i915/i915_dma.c | 1 + drivers/gpu/drm/nouveau/nouveau_state.c | 7 +++++++ drivers/gpu/drm/radeon/radeon_device.c | 1 + drivers/gpu/vga/vga_switcheroo.c | 6 ++++++ include/linux/vga_switcheroo.h | 1 + 5 files changed, 16 insertions(+) diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c index 8cb7f93732c2..ec1f650f6fab 100644 --- a/drivers/gpu/drm/i915/i915_dma.c +++ b/drivers/gpu/drm/i915/i915_dma.c @@ -1231,6 +1231,7 @@ static int i915_load_modeset_init(struct drm_device *dev) ret = vga_switcheroo_register_client(dev->pdev, i915_switcheroo_set_state, + NULL, i915_switcheroo_can_switch); if (ret) goto cleanup_vga_client; diff --git a/drivers/gpu/drm/nouveau/nouveau_state.c b/drivers/gpu/drm/nouveau/nouveau_state.c index db4b410f4a8c..1b87eee22fa9 100644 --- a/drivers/gpu/drm/nouveau/nouveau_state.c +++ b/drivers/gpu/drm/nouveau/nouveau_state.c @@ -605,6 +605,12 @@ static void nouveau_switcheroo_set_state(struct pci_dev *pdev, } } +static void nouveau_switcheroo_reprobe(struct pci_dev *pdev) +{ + struct drm_device *dev = pci_get_drvdata(pdev); + nouveau_fbcon_output_poll_changed(dev); +} + static bool nouveau_switcheroo_can_switch(struct pci_dev *pdev) { struct drm_device *dev = pci_get_drvdata(pdev); @@ -625,6 +631,7 @@ nouveau_card_init(struct drm_device *dev) vga_client_register(dev->pdev, dev, NULL, nouveau_vga_set_decode); vga_switcheroo_register_client(dev->pdev, nouveau_switcheroo_set_state, + nouveau_switcheroo_reprobe, nouveau_switcheroo_can_switch); /* Initialise internal driver API hooks */ diff --git a/drivers/gpu/drm/radeon/radeon_device.c b/drivers/gpu/drm/radeon/radeon_device.c index 96f0eb484ce6..1a1017f0d9db 100644 --- a/drivers/gpu/drm/radeon/radeon_device.c +++ b/drivers/gpu/drm/radeon/radeon_device.c @@ -779,6 +779,7 @@ int radeon_device_init(struct radeon_device *rdev, vga_client_register(rdev->pdev, rdev, NULL, radeon_vga_set_decode); vga_switcheroo_register_client(rdev->pdev, radeon_switcheroo_set_state, + NULL, radeon_switcheroo_can_switch); r = radeon_init(rdev); diff --git a/drivers/gpu/vga/vga_switcheroo.c b/drivers/gpu/vga/vga_switcheroo.c index 463691a1650e..2b8e1d25e8d0 100644 --- a/drivers/gpu/vga/vga_switcheroo.c +++ b/drivers/gpu/vga/vga_switcheroo.c @@ -33,6 +33,7 @@ struct vga_switcheroo_client { struct fb_info *fb_info; int pwr_state; void (*set_gpu_state)(struct pci_dev *pdev, enum vga_switcheroo_state); + void (*reprobe)(struct pci_dev *pdev); bool (*can_switch)(struct pci_dev *pdev); int id; bool active; @@ -103,6 +104,7 @@ static void vga_switcheroo_enable(void) int vga_switcheroo_register_client(struct pci_dev *pdev, void (*set_gpu_state)(struct pci_dev *pdev, enum vga_switcheroo_state), + void (*reprobe)(struct pci_dev *pdev), bool (*can_switch)(struct pci_dev *pdev)) { int index; @@ -117,6 +119,7 @@ int vga_switcheroo_register_client(struct pci_dev *pdev, vgasr_priv.clients[index].pwr_state = VGA_SWITCHEROO_ON; vgasr_priv.clients[index].pdev = pdev; vgasr_priv.clients[index].set_gpu_state = set_gpu_state; + vgasr_priv.clients[index].reprobe = reprobe; vgasr_priv.clients[index].can_switch = can_switch; vgasr_priv.clients[index].id = -1; if (pdev->resource[PCI_ROM_RESOURCE].flags & IORESOURCE_ROM_SHADOW) @@ -251,6 +254,9 @@ static int vga_switchto(struct vga_switcheroo_client *new_client) if (ret) return ret; + if (new_client->reprobe) + new_client->reprobe(new_client->pdev); + if (active->pwr_state == VGA_SWITCHEROO_ON) vga_switchoff(active); diff --git a/include/linux/vga_switcheroo.h b/include/linux/vga_switcheroo.h index ae9ab13b963d..f80fa9de5b82 100644 --- a/include/linux/vga_switcheroo.h +++ b/include/linux/vga_switcheroo.h @@ -33,6 +33,7 @@ struct vga_switcheroo_handler { void vga_switcheroo_unregister_client(struct pci_dev *dev); int vga_switcheroo_register_client(struct pci_dev *dev, void (*set_gpu_state)(struct pci_dev *dev, enum vga_switcheroo_state), + void (*reprobe)(struct pci_dev *dev), bool (*can_switch)(struct pci_dev *dev)); void vga_switcheroo_client_fb_set(struct pci_dev *dev, -- cgit v1.2.3 From 5bcf719b7db0f9366cedaf102b081f99b1c325ae Mon Sep 17 00:00:00 2001 From: Dave Airlie Date: Tue, 7 Dec 2010 09:20:40 +1000 Subject: drm/switcheroo: track state of switch in drivers. We need to track the state of the switch in drivers, so that after s/r we don't resume the card we've explicitly switched off before. Also don't allow a userspace open to occur if we've switched the gpu off. Signed-off-by: Dave Airlie --- drivers/gpu/drm/drm_fops.c | 2 ++ drivers/gpu/drm/i915/i915_dma.c | 4 ++++ drivers/gpu/drm/i915/i915_drv.c | 12 +++++++++++- drivers/gpu/drm/nouveau/nouveau_drv.c | 6 ++++++ drivers/gpu/drm/nouveau/nouveau_drv.h | 2 ++ drivers/gpu/drm/nouveau/nouveau_state.c | 4 ++++ drivers/gpu/drm/radeon/radeon.h | 1 - drivers/gpu/drm/radeon/radeon_device.c | 12 ++++++------ drivers/gpu/drm/radeon/radeon_kms.c | 4 ---- include/drm/drmP.h | 6 +++++- 10 files changed, 40 insertions(+), 13 deletions(-) diff --git a/drivers/gpu/drm/drm_fops.c b/drivers/gpu/drm/drm_fops.c index a39794bac04b..2ec7d48fc4a8 100644 --- a/drivers/gpu/drm/drm_fops.c +++ b/drivers/gpu/drm/drm_fops.c @@ -236,6 +236,8 @@ static int drm_open_helper(struct inode *inode, struct file *filp, return -EBUSY; /* No exclusive opens */ if (!drm_cpu_valid()) return -EINVAL; + if (dev->switch_power_state != DRM_SWITCH_POWER_ON) + return -EINVAL; DRM_DEBUG("pid = %d, minor = %d\n", task_pid_nr(current), minor_id); diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c index ec1f650f6fab..0568dbdc10ef 100644 --- a/drivers/gpu/drm/i915/i915_dma.c +++ b/drivers/gpu/drm/i915/i915_dma.c @@ -1151,12 +1151,16 @@ static void i915_switcheroo_set_state(struct pci_dev *pdev, enum vga_switcheroo_ pm_message_t pmm = { .event = PM_EVENT_SUSPEND }; if (state == VGA_SWITCHEROO_ON) { printk(KERN_INFO "i915: switched on\n"); + dev->switch_power_state = DRM_SWITCH_POWER_CHANGING; /* i915 resume handler doesn't set to D0 */ pci_set_power_state(dev->pdev, PCI_D0); i915_resume(dev); + dev->switch_power_state = DRM_SWITCH_POWER_ON; } else { printk(KERN_ERR "i915: switched off\n"); + dev->switch_power_state = DRM_SWITCH_POWER_CHANGING; i915_suspend(dev, pmm); + dev->switch_power_state = DRM_SWITCH_POWER_OFF; } } diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c index 9eee6cf7901e..872493331988 100644 --- a/drivers/gpu/drm/i915/i915_drv.c +++ b/drivers/gpu/drm/i915/i915_drv.c @@ -271,6 +271,8 @@ static int i915_drm_freeze(struct drm_device *dev) { struct drm_i915_private *dev_priv = dev->dev_private; + drm_kms_helper_poll_disable(dev); + pci_save_state(dev->pdev); /* If KMS is active, we do the leavevt stuff here */ @@ -307,7 +309,9 @@ int i915_suspend(struct drm_device *dev, pm_message_t state) if (state.event == PM_EVENT_PRETHAW) return 0; - drm_kms_helper_poll_disable(dev); + + if (dev->switch_power_state == DRM_SWITCH_POWER_OFF) + return 0; error = i915_drm_freeze(dev); if (error) @@ -361,6 +365,9 @@ int i915_resume(struct drm_device *dev) { int ret; + if (dev->switch_power_state == DRM_SWITCH_POWER_OFF) + return 0; + if (pci_enable_device(dev->pdev)) return -EIO; @@ -569,6 +576,9 @@ static int i915_pm_suspend(struct device *dev) return -ENODEV; } + if (drm_dev->switch_power_state == DRM_SWITCH_POWER_OFF) + return 0; + error = i915_drm_freeze(drm_dev); if (error) return error; diff --git a/drivers/gpu/drm/nouveau/nouveau_drv.c b/drivers/gpu/drm/nouveau/nouveau_drv.c index bb170570938b..13bb672a16f4 100644 --- a/drivers/gpu/drm/nouveau/nouveau_drv.c +++ b/drivers/gpu/drm/nouveau/nouveau_drv.c @@ -171,6 +171,9 @@ nouveau_pci_suspend(struct pci_dev *pdev, pm_message_t pm_state) if (pm_state.event == PM_EVENT_PRETHAW) return 0; + if (dev->switch_power_state == DRM_SWITCH_POWER_OFF) + return 0; + NV_INFO(dev, "Disabling fbcon acceleration...\n"); nouveau_fbcon_save_disable_accel(dev); @@ -254,6 +257,9 @@ nouveau_pci_resume(struct pci_dev *pdev) struct drm_crtc *crtc; int ret, i; + if (dev->switch_power_state == DRM_SWITCH_POWER_OFF) + return 0; + nouveau_fbcon_save_disable_accel(dev); NV_INFO(dev, "We're back, enabling device...\n"); diff --git a/drivers/gpu/drm/nouveau/nouveau_drv.h b/drivers/gpu/drm/nouveau/nouveau_drv.h index e81575687354..e59f5bcab1ad 100644 --- a/drivers/gpu/drm/nouveau/nouveau_drv.h +++ b/drivers/gpu/drm/nouveau/nouveau_drv.h @@ -753,6 +753,8 @@ struct drm_nouveau_private { struct nouveau_fbdev *nfbdev; struct apertures_struct *apertures; + + bool powered_down; }; static inline struct drm_nouveau_private * diff --git a/drivers/gpu/drm/nouveau/nouveau_state.c b/drivers/gpu/drm/nouveau/nouveau_state.c index 1b87eee22fa9..a54fc431fe98 100644 --- a/drivers/gpu/drm/nouveau/nouveau_state.c +++ b/drivers/gpu/drm/nouveau/nouveau_state.c @@ -596,12 +596,16 @@ static void nouveau_switcheroo_set_state(struct pci_dev *pdev, pm_message_t pmm = { .event = PM_EVENT_SUSPEND }; if (state == VGA_SWITCHEROO_ON) { printk(KERN_ERR "VGA switcheroo: switched nouveau on\n"); + dev->switch_power_state = DRM_SWITCH_POWER_CHANGING; nouveau_pci_resume(pdev); drm_kms_helper_poll_enable(dev); + dev->switch_power_state = DRM_SWITCH_POWER_ON; } else { printk(KERN_ERR "VGA switcheroo: switched nouveau off\n"); + dev->switch_power_state = DRM_SWITCH_POWER_CHANGING; drm_kms_helper_poll_disable(dev); nouveau_pci_suspend(pdev, pmm); + dev->switch_power_state = DRM_SWITCH_POWER_OFF; } } diff --git a/drivers/gpu/drm/radeon/radeon.h b/drivers/gpu/drm/radeon/radeon.h index d2697f8f2da8..140eaceab279 100644 --- a/drivers/gpu/drm/radeon/radeon.h +++ b/drivers/gpu/drm/radeon/radeon.h @@ -1167,7 +1167,6 @@ struct radeon_device { uint8_t audio_status_bits; uint8_t audio_category_code; - bool powered_down; struct notifier_block acpi_nb; /* only one userspace can use Hyperz features at a time */ struct drm_file *hyperz_filp; diff --git a/drivers/gpu/drm/radeon/radeon_device.c b/drivers/gpu/drm/radeon/radeon_device.c index 1a1017f0d9db..4ee0c53b28a7 100644 --- a/drivers/gpu/drm/radeon/radeon_device.c +++ b/drivers/gpu/drm/radeon/radeon_device.c @@ -642,20 +642,20 @@ void radeon_check_arguments(struct radeon_device *rdev) static void radeon_switcheroo_set_state(struct pci_dev *pdev, enum vga_switcheroo_state state) { struct drm_device *dev = pci_get_drvdata(pdev); - struct radeon_device *rdev = dev->dev_private; pm_message_t pmm = { .event = PM_EVENT_SUSPEND }; if (state == VGA_SWITCHEROO_ON) { printk(KERN_INFO "radeon: switched on\n"); /* don't suspend or resume card normally */ - rdev->powered_down = false; + dev->switch_power_state = DRM_SWITCH_POWER_CHANGING; radeon_resume_kms(dev); + dev->switch_power_state = DRM_SWITCH_POWER_ON; drm_kms_helper_poll_enable(dev); } else { printk(KERN_INFO "radeon: switched off\n"); drm_kms_helper_poll_disable(dev); + dev->switch_power_state = DRM_SWITCH_POWER_CHANGING; radeon_suspend_kms(dev, pmm); - /* don't suspend or resume card normally */ - rdev->powered_down = true; + dev->switch_power_state = DRM_SWITCH_POWER_OFF; } } @@ -842,7 +842,7 @@ int radeon_suspend_kms(struct drm_device *dev, pm_message_t state) } rdev = dev->dev_private; - if (rdev->powered_down) + if (dev->switch_power_state == DRM_SWITCH_POWER_OFF) return 0; /* turn off display hw */ @@ -900,7 +900,7 @@ int radeon_resume_kms(struct drm_device *dev) struct drm_connector *connector; struct radeon_device *rdev = dev->dev_private; - if (rdev->powered_down) + if (dev->switch_power_state == DRM_SWITCH_POWER_OFF) return 0; acquire_console_sem(); diff --git a/drivers/gpu/drm/radeon/radeon_kms.c b/drivers/gpu/drm/radeon/radeon_kms.c index 4bf423ca4c12..b2686334d46b 100644 --- a/drivers/gpu/drm/radeon/radeon_kms.c +++ b/drivers/gpu/drm/radeon/radeon_kms.c @@ -203,10 +203,6 @@ int radeon_info_ioctl(struct drm_device *dev, void *data, struct drm_file *filp) */ int radeon_driver_firstopen_kms(struct drm_device *dev) { - struct radeon_device *rdev = dev->dev_private; - - if (rdev->powered_down) - return -EINVAL; return 0; } diff --git a/include/drm/drmP.h b/include/drm/drmP.h index 0f14f94ed8f4..a4694c610330 100644 --- a/include/drm/drmP.h +++ b/include/drm/drmP.h @@ -1121,9 +1121,13 @@ struct drm_device { spinlock_t object_name_lock; struct idr object_name_idr; /*@} */ - + int switch_power_state; }; +#define DRM_SWITCH_POWER_ON 0 +#define DRM_SWITCH_POWER_OFF 1 +#define DRM_SWITCH_POWER_CHANGING 2 + static __inline__ int drm_core_check_feature(struct drm_device *dev, int feature) { -- cgit v1.2.3 From 66b37c6777c4686f121fe4a83176e535a7f4b1af Mon Sep 17 00:00:00 2001 From: Dave Airlie Date: Tue, 7 Dec 2010 14:24:25 +1000 Subject: vga_switcheroo: split switching into two stages. stage 1: turn card on, switch boot vga pointer. stage 2: switch fbs, switch mux and power off old card. Signed-off-by: Dave Airlie --- drivers/gpu/vga/vga_switcheroo.c | 45 ++++++++++++++++++++++++++++++---------- 1 file changed, 34 insertions(+), 11 deletions(-) diff --git a/drivers/gpu/vga/vga_switcheroo.c b/drivers/gpu/vga/vga_switcheroo.c index 2b8e1d25e8d0..d2d8543686d3 100644 --- a/drivers/gpu/vga/vga_switcheroo.c +++ b/drivers/gpu/vga/vga_switcheroo.c @@ -212,7 +212,8 @@ static int vga_switchoff(struct vga_switcheroo_client *client) return 0; } -static int vga_switchto(struct vga_switcheroo_client *new_client) +/* stage one happens before delay */ +static int vga_switchto_stage1(struct vga_switcheroo_client *new_client) { int ret; int i; @@ -239,10 +240,28 @@ static int vga_switchto(struct vga_switcheroo_client *new_client) vga_switchon(new_client); /* swap shadow resource to denote boot VGA device has changed so X starts on new device */ - active->active = false; - active->pdev->resource[PCI_ROM_RESOURCE].flags &= ~IORESOURCE_ROM_SHADOW; new_client->pdev->resource[PCI_ROM_RESOURCE].flags |= IORESOURCE_ROM_SHADOW; + return 0; +} + +/* post delay */ +static int vga_switchto_stage2(struct vga_switcheroo_client *new_client) +{ + int ret; + int i; + struct vga_switcheroo_client *active = NULL; + + for (i = 0; i < VGA_SWITCHEROO_MAX_CLIENTS; i++) { + if (vgasr_priv.clients[i].active == true) { + active = &vgasr_priv.clients[i]; + break; + } + } + if (!active) + return 0; + + active->active = false; if (new_client->fb_info) { struct fb_event event; @@ -368,18 +387,22 @@ vga_switcheroo_debugfs_write(struct file *filp, const char __user *ubuf, if (can_switch == true) { pdev_name = pci_name(client->pdev); - ret = vga_switchto(client); + ret = vga_switchto_stage1(client); + if (ret) + printk(KERN_ERR "vga_switcheroo: switching failed stage 1 %d\n", ret); + + ret = vga_switchto_stage2(client); if (ret) - printk(KERN_ERR "vga_switcheroo: switching failed %d\n", ret); + printk(KERN_ERR "vga_switcheroo: switching failed stage 2 %d\n", ret); + } else { printk(KERN_INFO "vga_switcheroo: setting delayed switch to client %d\n", client->id); vgasr_priv.delayed_switch_active = true; vgasr_priv.delayed_client_id = client_id; - /* we should at least power up the card to - make the switch faster */ - if (client->pwr_state == VGA_SWITCHEROO_OFF) - vga_switchon(client); + ret = vga_switchto_stage1(client); + if (ret) + printk(KERN_ERR "vga_switcheroo: delayed switching stage 1 failed %d\n", ret); } out: @@ -461,9 +484,9 @@ int vga_switcheroo_process_delayed_switch(void) goto err; pdev_name = pci_name(client->pdev); - ret = vga_switchto(client); + ret = vga_switchto_stage2(client); if (ret) - printk(KERN_ERR "vga_switcheroo: delayed switching failed %d\n", ret); + printk(KERN_ERR "vga_switcheroo: delayed switching failed stage 2 %d\n", ret); vgasr_priv.delayed_switch_active = false; err = 0; -- cgit v1.2.3 From 2f299d5de02da3ffb1f9e1a05c91dcd1173ebd3c Mon Sep 17 00:00:00 2001 From: Alex Deucher Date: Tue, 4 Jan 2011 17:42:20 -0500 Subject: drm/radeon/kms: adjust quirk for acer laptop Acer laptop (TravelMate 5730G) has an HDMI connector on the laptop and a DVI connector on the docking station and both share the same encoder, hpd pin, and ddc line. The bios connector table reflects this and is technically correct, however, we drop the DVI connector here since xrandr has no concept of encoders (only crtcs and connectors) and will try and drive both connectors with different crtcs which isn't possible on the hardware side and leaves no crtcs for LVDS or VGA. Fixes: https://bugs.freedesktop.org/show_bug.cgi?id=32732 Signed-off-by: Alex Deucher Cc: stable@kernel.org Signed-off-by: Dave Airlie --- drivers/gpu/drm/radeon/radeon_atombios.c | 21 +++++++++++---------- 1 file changed, 11 insertions(+), 10 deletions(-) diff --git a/drivers/gpu/drm/radeon/radeon_atombios.c b/drivers/gpu/drm/radeon/radeon_atombios.c index 45bc750e9ae2..6317d2c9e6d9 100644 --- a/drivers/gpu/drm/radeon/radeon_atombios.c +++ b/drivers/gpu/drm/radeon/radeon_atombios.c @@ -313,7 +313,6 @@ static bool radeon_atom_apply_quirks(struct drm_device *dev, uint16_t *line_mux, struct radeon_hpd *hpd) { - struct radeon_device *rdev = dev->dev_private; /* Asus M2A-VM HDMI board lists the DVI port as HDMI */ if ((dev->pdev->device == 0x791e) && @@ -425,21 +424,23 @@ static bool radeon_atom_apply_quirks(struct drm_device *dev, } } - /* Acer laptop reports DVI-D as DVI-I and hpd pins reversed */ + /* Acer laptop (Acer TravelMate 5730G) has an HDMI port + * on the laptop and a DVI port on the docking station and + * both share the same encoder, hpd pin, and ddc line. + * So while the bios table is technically correct, + * we drop the DVI port here since xrandr has no concept of + * encoders and will try and drive both connectors + * with different crtcs which isn't possible on the hardware + * side and leaves no crtcs for LVDS or VGA. + */ if ((dev->pdev->device == 0x95c4) && (dev->pdev->subsystem_vendor == 0x1025) && (dev->pdev->subsystem_device == 0x013c)) { - struct radeon_gpio_rec gpio; - if ((*connector_type == DRM_MODE_CONNECTOR_DVII) && (supported_device == ATOM_DEVICE_DFP1_SUPPORT)) { - gpio = radeon_lookup_gpio(rdev, 6); - *hpd = radeon_atom_get_hpd_info_from_gpio(rdev, &gpio); + /* actually it's a DVI-D port not DVI-I */ *connector_type = DRM_MODE_CONNECTOR_DVID; - } else if ((*connector_type == DRM_MODE_CONNECTOR_HDMIA) && - (supported_device == ATOM_DEVICE_DFP1_SUPPORT)) { - gpio = radeon_lookup_gpio(rdev, 7); - *hpd = radeon_atom_get_hpd_info_from_gpio(rdev, &gpio); + return false; } } -- cgit v1.2.3 From 9eba4a93ce520a627e876b0d1851d4f78a701c2b Mon Sep 17 00:00:00 2001 From: Marek Olšák Date: Wed, 5 Jan 2011 05:46:48 +0100 Subject: drm/radeon/kms: manage r300 CMASK RAM access and allow CMASK clear MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit The CMASK RAM is for colorbuffer compression (used in conjunction with MSAA). Only one user (filp) can access it. The CMASK RAM access is managed in the same way as Hyper-Z, but there is a separate ioctl, because an app that uses MSAA does not necessarily have to use zbuffering. Signed-off-by: Marek Olšák Signed-off-by: Dave Airlie --- drivers/gpu/drm/radeon/r300.c | 9 +++++++++ drivers/gpu/drm/radeon/r300d.h | 1 + drivers/gpu/drm/radeon/radeon.h | 3 ++- drivers/gpu/drm/radeon/radeon_drv.c | 2 +- drivers/gpu/drm/radeon/radeon_kms.c | 39 +++++++++++++++++++++++++------------ include/drm/radeon_drm.h | 1 + 6 files changed, 41 insertions(+), 14 deletions(-) diff --git a/drivers/gpu/drm/radeon/r300.c b/drivers/gpu/drm/radeon/r300.c index 36b4f7b48d6a..23fee54c3b75 100644 --- a/drivers/gpu/drm/radeon/r300.c +++ b/drivers/gpu/drm/radeon/r300.c @@ -745,6 +745,11 @@ static int r300_packet0_check(struct radeon_cs_parser *p, break; case 0x4E00: /* RB3D_CCTL */ + if ((idx_value & (1 << 10)) && /* CMASK_ENABLE */ + p->rdev->cmask_filp != p->filp) { + DRM_ERROR("Invalid RB3D_CCTL: Cannot enable CMASK.\n"); + return -EINVAL; + } track->num_cb = ((idx_value >> 5) & 0x3) + 1; break; case 0x4E38: @@ -1206,6 +1211,10 @@ static int r300_packet3_check(struct radeon_cs_parser *p, if (p->rdev->hyperz_filp != p->filp) return -EINVAL; break; + case PACKET3_3D_CLEAR_CMASK: + if (p->rdev->cmask_filp != p->filp) + return -EINVAL; + break; case PACKET3_NOP: break; default: diff --git a/drivers/gpu/drm/radeon/r300d.h b/drivers/gpu/drm/radeon/r300d.h index 0c036c60d9df..1f519a5ffb8c 100644 --- a/drivers/gpu/drm/radeon/r300d.h +++ b/drivers/gpu/drm/radeon/r300d.h @@ -54,6 +54,7 @@ #define PACKET3_3D_DRAW_IMMD_2 0x35 #define PACKET3_3D_DRAW_INDX_2 0x36 #define PACKET3_3D_CLEAR_HIZ 0x37 +#define PACKET3_3D_CLEAR_CMASK 0x38 #define PACKET3_BITBLT_MULTI 0x9B #define PACKET0(reg, n) (CP_PACKET0 | \ diff --git a/drivers/gpu/drm/radeon/radeon.h b/drivers/gpu/drm/radeon/radeon.h index 140eaceab279..a835d95021d1 100644 --- a/drivers/gpu/drm/radeon/radeon.h +++ b/drivers/gpu/drm/radeon/radeon.h @@ -1168,8 +1168,9 @@ struct radeon_device { uint8_t audio_category_code; struct notifier_block acpi_nb; - /* only one userspace can use Hyperz features at a time */ + /* only one userspace can use Hyperz features or CMASK at a time */ struct drm_file *hyperz_filp; + struct drm_file *cmask_filp; /* i2c buses */ struct radeon_i2c_chan *i2c_bus[RADEON_MAX_I2C_BUS]; }; diff --git a/drivers/gpu/drm/radeon/radeon_drv.c b/drivers/gpu/drm/radeon/radeon_drv.c index 18225f21ee0f..be5cb4f28c29 100644 --- a/drivers/gpu/drm/radeon/radeon_drv.c +++ b/drivers/gpu/drm/radeon/radeon_drv.c @@ -48,7 +48,7 @@ * - 2.5.0 - add get accel 2 to work around ddx breakage for evergreen * - 2.6.0 - add tiling config query (r6xx+), add initial HiZ support (r300->r500) * 2.7.0 - fixups for r600 2D tiling support. (no external ABI change), add eg dyn gpr regs - * 2.8.0 - pageflip support, r500 US_FORMAT regs. r500 ARGB2101010 colorbuf + * 2.8.0 - pageflip support, r500 US_FORMAT regs. r500 ARGB2101010 colorbuf, r300->r500 CMASK */ #define KMS_DRIVER_MAJOR 2 #define KMS_DRIVER_MINOR 8 diff --git a/drivers/gpu/drm/radeon/radeon_kms.c b/drivers/gpu/drm/radeon/radeon_kms.c index b2686334d46b..28a53e4a925f 100644 --- a/drivers/gpu/drm/radeon/radeon_kms.c +++ b/drivers/gpu/drm/radeon/radeon_kms.c @@ -96,9 +96,27 @@ out: return r; } +static void radeon_set_filp_rights(struct drm_device *dev, + struct drm_file **owner, + struct drm_file *applier, + uint32_t *value) +{ + mutex_lock(&dev->struct_mutex); + if (*value == 1) { + /* wants rights */ + if (!*owner) + *owner = applier; + } else if (*value == 0) { + /* revokes rights */ + if (*owner == applier) + *owner = NULL; + } + *value = *owner == applier ? 1 : 0; + mutex_unlock(&dev->struct_mutex); +} /* - * Userspace get informations ioctl + * Userspace get information ioctl */ int radeon_info_ioctl(struct drm_device *dev, void *data, struct drm_file *filp) { @@ -173,18 +191,15 @@ int radeon_info_ioctl(struct drm_device *dev, void *data, struct drm_file *filp) DRM_DEBUG_KMS("WANT_HYPERZ: invalid value %d\n", value); return -EINVAL; } - mutex_lock(&dev->struct_mutex); - if (value == 1) { - /* wants hyper-z */ - if (!rdev->hyperz_filp) - rdev->hyperz_filp = filp; - } else if (value == 0) { - /* revokes hyper-z */ - if (rdev->hyperz_filp == filp) - rdev->hyperz_filp = NULL; + radeon_set_filp_rights(dev, &rdev->hyperz_filp, filp, &value); + break; + case RADEON_INFO_WANT_CMASK: + /* The same logic as Hyper-Z. */ + if (value >= 2) { + DRM_DEBUG_KMS("WANT_CMASK: invalid value %d\n", value); + return -EINVAL; } - value = rdev->hyperz_filp == filp ? 1 : 0; - mutex_unlock(&dev->struct_mutex); + radeon_set_filp_rights(dev, &rdev->cmask_filp, filp, &value); break; default: DRM_DEBUG_KMS("Invalid request %d\n", info->request); diff --git a/include/drm/radeon_drm.h b/include/drm/radeon_drm.h index 10f8b53bdd40..e95a86b8b689 100644 --- a/include/drm/radeon_drm.h +++ b/include/drm/radeon_drm.h @@ -906,6 +906,7 @@ struct drm_radeon_cs { #define RADEON_INFO_ACCEL_WORKING2 0x05 #define RADEON_INFO_TILING_CONFIG 0x06 #define RADEON_INFO_WANT_HYPERZ 0x07 +#define RADEON_INFO_WANT_CMASK 0x08 /* get access to CMASK on r300 */ struct drm_radeon_info { uint32_t request; -- cgit v1.2.3 From af5dd83b873efd4e1477f2265b6fa15a825aff26 Mon Sep 17 00:00:00 2001 From: Dave Airlie Date: Thu, 6 Jan 2011 13:04:32 +1000 Subject: vga_switcheroo: fix build with non switcheroo enabled path. Signed-off-by: Dave Airlie --- include/linux/vga_switcheroo.h | 1 + 1 file changed, 1 insertion(+) diff --git a/include/linux/vga_switcheroo.h b/include/linux/vga_switcheroo.h index f80fa9de5b82..4b9a7f596f92 100644 --- a/include/linux/vga_switcheroo.h +++ b/include/linux/vga_switcheroo.h @@ -49,6 +49,7 @@ int vga_switcheroo_process_delayed_switch(void); static inline void vga_switcheroo_unregister_client(struct pci_dev *dev) {} static inline int vga_switcheroo_register_client(struct pci_dev *dev, void (*set_gpu_state)(struct pci_dev *dev, enum vga_switcheroo_state), + void (*reprobe)(struct pci_dev *dev), bool (*can_switch)(struct pci_dev *dev)) { return 0; } static inline void vga_switcheroo_client_fb_set(struct pci_dev *dev, struct fb_info *info) {} static inline int vga_switcheroo_register_handler(struct vga_switcheroo_handler *handler) { return 0; } -- cgit v1.2.3 From 32c87fca2fac490e34a9fa900b45f2fbb4faacf9 Mon Sep 17 00:00:00 2001 From: Tejun Heo Date: Mon, 3 Jan 2011 14:49:32 +0100 Subject: drm/radeon: use system_wq instead of dev_priv->wq With cmwq, there's no reason for radeon to use a dedicated workqueue. Drop dev_priv->wq and use system_wq instead. Because radeon_driver_irq_uninstall_kms() may be called from unsleepable context, the work items can't be flushed from there. Instead, init and flush from radeon_irq_kms_init/fini(). While at it, simplify canceling/flushing of rdev->pm.dynpm_idle_work. Always initialize and sync cancel instead of being unnecessarily smart about it. Signed-off-by: Tejun Heo Acked-by: Alex Deucher Cc: dri-devel@lists.freedesktop.org Signed-off-by: Dave Airlie --- drivers/gpu/drm/radeon/evergreen.c | 2 +- drivers/gpu/drm/radeon/r100.c | 2 +- drivers/gpu/drm/radeon/r600.c | 2 +- drivers/gpu/drm/radeon/radeon.h | 1 - drivers/gpu/drm/radeon/radeon_device.c | 6 ----- drivers/gpu/drm/radeon/radeon_irq_kms.c | 5 ++-- drivers/gpu/drm/radeon/radeon_pm.c | 47 +++++++++++---------------------- drivers/gpu/drm/radeon/rs600.c | 2 +- 8 files changed, 23 insertions(+), 44 deletions(-) diff --git a/drivers/gpu/drm/radeon/evergreen.c b/drivers/gpu/drm/radeon/evergreen.c index 3ae63ceb9c33..eaf4fba90b72 100644 --- a/drivers/gpu/drm/radeon/evergreen.c +++ b/drivers/gpu/drm/radeon/evergreen.c @@ -2756,7 +2756,7 @@ restart_ih: if (wptr != rdev->ih.wptr) goto restart_ih; if (queue_hotplug) - queue_work(rdev->wq, &rdev->hotplug_work); + schedule_work(&rdev->hotplug_work); rdev->ih.rptr = rptr; WREG32(IH_RB_RPTR, rdev->ih.rptr); spin_unlock_irqrestore(&rdev->ih.lock, flags); diff --git a/drivers/gpu/drm/radeon/r100.c b/drivers/gpu/drm/radeon/r100.c index 300b4a64d8fe..f637595b14e1 100644 --- a/drivers/gpu/drm/radeon/r100.c +++ b/drivers/gpu/drm/radeon/r100.c @@ -682,7 +682,7 @@ int r100_irq_process(struct radeon_device *rdev) /* reset gui idle ack. the status bit is broken */ rdev->irq.gui_idle_acked = false; if (queue_hotplug) - queue_work(rdev->wq, &rdev->hotplug_work); + schedule_work(&rdev->hotplug_work); if (rdev->msi_enabled) { switch (rdev->family) { case CHIP_RS400: diff --git a/drivers/gpu/drm/radeon/r600.c b/drivers/gpu/drm/radeon/r600.c index f95ca5b44396..279794c391e9 100644 --- a/drivers/gpu/drm/radeon/r600.c +++ b/drivers/gpu/drm/radeon/r600.c @@ -3442,7 +3442,7 @@ restart_ih: if (wptr != rdev->ih.wptr) goto restart_ih; if (queue_hotplug) - queue_work(rdev->wq, &rdev->hotplug_work); + schedule_work(&rdev->hotplug_work); rdev->ih.rptr = rptr; WREG32(IH_RB_RPTR, rdev->ih.rptr); spin_unlock_irqrestore(&rdev->ih.lock, flags); diff --git a/drivers/gpu/drm/radeon/radeon.h b/drivers/gpu/drm/radeon/radeon.h index a835d95021d1..aff8080026a1 100644 --- a/drivers/gpu/drm/radeon/radeon.h +++ b/drivers/gpu/drm/radeon/radeon.h @@ -1152,7 +1152,6 @@ struct radeon_device { struct r700_vram_scratch vram_scratch; int msi_enabled; /* msi enabled */ struct r600_ih ih; /* r6/700 interrupt ring */ - struct workqueue_struct *wq; struct work_struct hotplug_work; int num_crtc; /* number of crtcs */ struct mutex dc_hw_i2c_mutex; /* display controller hw i2c mutex */ diff --git a/drivers/gpu/drm/radeon/radeon_device.c b/drivers/gpu/drm/radeon/radeon_device.c index 4ee0c53b28a7..44cf0d707006 100644 --- a/drivers/gpu/drm/radeon/radeon_device.c +++ b/drivers/gpu/drm/radeon/radeon_device.c @@ -710,11 +710,6 @@ int radeon_device_init(struct radeon_device *rdev, init_waitqueue_head(&rdev->irq.vblank_queue); init_waitqueue_head(&rdev->irq.idle_queue); - /* setup workqueue */ - rdev->wq = create_workqueue("radeon"); - if (rdev->wq == NULL) - return -ENOMEM; - /* Set asic functions */ r = radeon_asic_init(rdev); if (r) @@ -813,7 +808,6 @@ void radeon_device_fini(struct radeon_device *rdev) /* evict vram memory */ radeon_bo_evict_vram(rdev); radeon_fini(rdev); - destroy_workqueue(rdev->wq); vga_switcheroo_unregister_client(rdev->pdev); vga_client_register(rdev->pdev, NULL, NULL, NULL); if (rdev->rio_mem) diff --git a/drivers/gpu/drm/radeon/radeon_irq_kms.c b/drivers/gpu/drm/radeon/radeon_irq_kms.c index c6861bb751ad..a289646e8aa4 100644 --- a/drivers/gpu/drm/radeon/radeon_irq_kms.c +++ b/drivers/gpu/drm/radeon/radeon_irq_kms.c @@ -64,8 +64,6 @@ void radeon_driver_irq_preinstall_kms(struct drm_device *dev) struct radeon_device *rdev = dev->dev_private; unsigned i; - INIT_WORK(&rdev->hotplug_work, radeon_hotplug_work_func); - /* Disable *all* interrupts */ rdev->irq.sw_int = false; rdev->irq.gui_idle = false; @@ -114,6 +112,8 @@ int radeon_irq_kms_init(struct radeon_device *rdev) { int r = 0; + INIT_WORK(&rdev->hotplug_work, radeon_hotplug_work_func); + spin_lock_init(&rdev->irq.sw_lock); r = drm_vblank_init(rdev->ddev, rdev->num_crtc); if (r) { @@ -152,6 +152,7 @@ void radeon_irq_kms_fini(struct radeon_device *rdev) if (rdev->msi_enabled) pci_disable_msi(rdev->pdev); } + flush_work_sync(&rdev->hotplug_work); } void radeon_irq_kms_sw_irq_get(struct radeon_device *rdev) diff --git a/drivers/gpu/drm/radeon/radeon_pm.c b/drivers/gpu/drm/radeon/radeon_pm.c index 4de7776bd1c5..0afd26ccccfa 100644 --- a/drivers/gpu/drm/radeon/radeon_pm.c +++ b/drivers/gpu/drm/radeon/radeon_pm.c @@ -405,20 +405,13 @@ static ssize_t radeon_set_pm_method(struct device *dev, rdev->pm.dynpm_planned_action = DYNPM_ACTION_DEFAULT; mutex_unlock(&rdev->pm.mutex); } else if (strncmp("profile", buf, strlen("profile")) == 0) { - bool flush_wq = false; - mutex_lock(&rdev->pm.mutex); - if (rdev->pm.pm_method == PM_METHOD_DYNPM) { - cancel_delayed_work(&rdev->pm.dynpm_idle_work); - flush_wq = true; - } /* disable dynpm */ rdev->pm.dynpm_state = DYNPM_STATE_DISABLED; rdev->pm.dynpm_planned_action = DYNPM_ACTION_NONE; rdev->pm.pm_method = PM_METHOD_PROFILE; mutex_unlock(&rdev->pm.mutex); - if (flush_wq) - flush_workqueue(rdev->wq); + cancel_delayed_work_sync(&rdev->pm.dynpm_idle_work); } else { DRM_ERROR("invalid power method!\n"); goto fail; @@ -524,18 +517,14 @@ static void radeon_hwmon_fini(struct radeon_device *rdev) void radeon_pm_suspend(struct radeon_device *rdev) { - bool flush_wq = false; - mutex_lock(&rdev->pm.mutex); if (rdev->pm.pm_method == PM_METHOD_DYNPM) { - cancel_delayed_work(&rdev->pm.dynpm_idle_work); if (rdev->pm.dynpm_state == DYNPM_STATE_ACTIVE) rdev->pm.dynpm_state = DYNPM_STATE_SUSPENDED; - flush_wq = true; } mutex_unlock(&rdev->pm.mutex); - if (flush_wq) - flush_workqueue(rdev->wq); + + cancel_delayed_work_sync(&rdev->pm.dynpm_idle_work); } void radeon_pm_resume(struct radeon_device *rdev) @@ -550,8 +539,8 @@ void radeon_pm_resume(struct radeon_device *rdev) if (rdev->pm.pm_method == PM_METHOD_DYNPM && rdev->pm.dynpm_state == DYNPM_STATE_SUSPENDED) { rdev->pm.dynpm_state = DYNPM_STATE_ACTIVE; - queue_delayed_work(rdev->wq, &rdev->pm.dynpm_idle_work, - msecs_to_jiffies(RADEON_IDLE_LOOP_MS)); + schedule_delayed_work(&rdev->pm.dynpm_idle_work, + msecs_to_jiffies(RADEON_IDLE_LOOP_MS)); } mutex_unlock(&rdev->pm.mutex); radeon_pm_compute_clocks(rdev); @@ -585,6 +574,9 @@ int radeon_pm_init(struct radeon_device *rdev) ret = radeon_hwmon_init(rdev); if (ret) return ret; + + INIT_DELAYED_WORK(&rdev->pm.dynpm_idle_work, radeon_dynpm_idle_work_handler); + if (rdev->pm.num_power_states > 1) { /* where's the best place to put these? */ ret = device_create_file(rdev->dev, &dev_attr_power_profile); @@ -598,8 +590,6 @@ int radeon_pm_init(struct radeon_device *rdev) rdev->acpi_nb.notifier_call = radeon_acpi_event; register_acpi_notifier(&rdev->acpi_nb); #endif - INIT_DELAYED_WORK(&rdev->pm.dynpm_idle_work, radeon_dynpm_idle_work_handler); - if (radeon_debugfs_pm_init(rdev)) { DRM_ERROR("Failed to register debugfs file for PM!\n"); } @@ -613,25 +603,20 @@ int radeon_pm_init(struct radeon_device *rdev) void radeon_pm_fini(struct radeon_device *rdev) { if (rdev->pm.num_power_states > 1) { - bool flush_wq = false; - mutex_lock(&rdev->pm.mutex); if (rdev->pm.pm_method == PM_METHOD_PROFILE) { rdev->pm.profile = PM_PROFILE_DEFAULT; radeon_pm_update_profile(rdev); radeon_pm_set_clocks(rdev); } else if (rdev->pm.pm_method == PM_METHOD_DYNPM) { - /* cancel work */ - cancel_delayed_work(&rdev->pm.dynpm_idle_work); - flush_wq = true; /* reset default clocks */ rdev->pm.dynpm_state = DYNPM_STATE_DISABLED; rdev->pm.dynpm_planned_action = DYNPM_ACTION_DEFAULT; radeon_pm_set_clocks(rdev); } mutex_unlock(&rdev->pm.mutex); - if (flush_wq) - flush_workqueue(rdev->wq); + + cancel_delayed_work_sync(&rdev->pm.dynpm_idle_work); device_remove_file(rdev->dev, &dev_attr_power_profile); device_remove_file(rdev->dev, &dev_attr_power_method); @@ -690,12 +675,12 @@ void radeon_pm_compute_clocks(struct radeon_device *rdev) radeon_pm_get_dynpm_state(rdev); radeon_pm_set_clocks(rdev); - queue_delayed_work(rdev->wq, &rdev->pm.dynpm_idle_work, - msecs_to_jiffies(RADEON_IDLE_LOOP_MS)); + schedule_delayed_work(&rdev->pm.dynpm_idle_work, + msecs_to_jiffies(RADEON_IDLE_LOOP_MS)); } else if (rdev->pm.dynpm_state == DYNPM_STATE_PAUSED) { rdev->pm.dynpm_state = DYNPM_STATE_ACTIVE; - queue_delayed_work(rdev->wq, &rdev->pm.dynpm_idle_work, - msecs_to_jiffies(RADEON_IDLE_LOOP_MS)); + schedule_delayed_work(&rdev->pm.dynpm_idle_work, + msecs_to_jiffies(RADEON_IDLE_LOOP_MS)); DRM_DEBUG_DRIVER("radeon: dynamic power management activated\n"); } } else { /* count == 0 */ @@ -800,8 +785,8 @@ static void radeon_dynpm_idle_work_handler(struct work_struct *work) radeon_pm_set_clocks(rdev); } - queue_delayed_work(rdev->wq, &rdev->pm.dynpm_idle_work, - msecs_to_jiffies(RADEON_IDLE_LOOP_MS)); + schedule_delayed_work(&rdev->pm.dynpm_idle_work, + msecs_to_jiffies(RADEON_IDLE_LOOP_MS)); } mutex_unlock(&rdev->pm.mutex); ttm_bo_unlock_delayed_workqueue(&rdev->mman.bdev, resched); diff --git a/drivers/gpu/drm/radeon/rs600.c b/drivers/gpu/drm/radeon/rs600.c index 9a85b1614c86..b4192acaab5f 100644 --- a/drivers/gpu/drm/radeon/rs600.c +++ b/drivers/gpu/drm/radeon/rs600.c @@ -692,7 +692,7 @@ int rs600_irq_process(struct radeon_device *rdev) /* reset gui idle ack. the status bit is broken */ rdev->irq.gui_idle_acked = false; if (queue_hotplug) - queue_work(rdev->wq, &rdev->hotplug_work); + schedule_work(&rdev->hotplug_work); if (rdev->msi_enabled) { switch (rdev->family) { case CHIP_RS600: -- cgit v1.2.3 From f598aa7593427ffe3a61e7767c34bd695a5e7ed0 Mon Sep 17 00:00:00 2001 From: Alex Deucher Date: Tue, 4 Jan 2011 00:43:39 -0500 Subject: drm/radeon/kms: add quirk for Mac Radeon HD 2600 card MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Reported-by: 屋国遥 Signed-off-by: Alex Deucher Cc: stable@kernel.org Signed-off-by: Dave Airlie --- drivers/gpu/drm/radeon/radeon_atombios.c | 11 +++++++++++ 1 file changed, 11 insertions(+) diff --git a/drivers/gpu/drm/radeon/radeon_atombios.c b/drivers/gpu/drm/radeon/radeon_atombios.c index 6317d2c9e6d9..e4f7e3e82a50 100644 --- a/drivers/gpu/drm/radeon/radeon_atombios.c +++ b/drivers/gpu/drm/radeon/radeon_atombios.c @@ -387,6 +387,17 @@ static bool radeon_atom_apply_quirks(struct drm_device *dev, *line_mux = 0x90; } + /* mac rv630 */ + if ((dev->pdev->device == 0x9588) && + (dev->pdev->subsystem_vendor == 0x106b) && + (dev->pdev->subsystem_device == 0x00a6)) { + if ((supported_device == ATOM_DEVICE_TV1_SUPPORT) && + (*connector_type == DRM_MODE_CONNECTOR_DVII)) { + *connector_type = DRM_MODE_CONNECTOR_9PinDIN; + *line_mux = CONNECTOR_7PIN_DIN_ENUM_ID1; + } + } + /* ASUS HD 3600 XT board lists the DVI port as HDMI */ if ((dev->pdev->device == 0x9598) && (dev->pdev->subsystem_vendor == 0x1043) && -- cgit v1.2.3 From eeb9cc015f91ff08453040dd5b2fde0dbaac90d3 Mon Sep 17 00:00:00 2001 From: Ben Skeggs Date: Thu, 6 Jan 2011 22:10:15 +1000 Subject: drm/nvc0: fix init without firmware present Signed-off-by: Ben Skeggs --- drivers/gpu/drm/nouveau/nvc0_graph.c | 8 +++----- 1 file changed, 3 insertions(+), 5 deletions(-) diff --git a/drivers/gpu/drm/nouveau/nvc0_graph.c b/drivers/gpu/drm/nouveau/nvc0_graph.c index 43b44a4b3ab8..5feacd5d5fa4 100644 --- a/drivers/gpu/drm/nouveau/nvc0_graph.c +++ b/drivers/gpu/drm/nouveau/nvc0_graph.c @@ -584,7 +584,7 @@ nvc0_graph_init_ctxctl(struct drm_device *dev) r000260 = nv_mask(dev, 0x000260, 0x00000001, 0x00000000); ret = nvc0_fuc_load_fw(dev, 0x409000, "fuc409c", "fuc409d"); if (ret == 0) - nvc0_fuc_load_fw(dev, 0x41a000, "fuc41ac", "fuc41ad"); + ret = nvc0_fuc_load_fw(dev, 0x41a000, "fuc41ac", "fuc41ad"); nv_wr32(dev, 0x000260, r000260); if (ret) @@ -686,10 +686,8 @@ nvc0_graph_init(struct drm_device *dev) nv_wr32(dev, 0x400054, 0x34ce3464); ret = nvc0_graph_init_ctxctl(dev); - if (ret) - return ret; - - dev_priv->engine.graph.accel_blocked = false; + if (ret == 0) + dev_priv->engine.graph.accel_blocked = false; return 0; } -- cgit v1.2.3 From dfe63bb0ad9810db13aab0058caba97866e0a681 Mon Sep 17 00:00:00 2001 From: James Simmons Date: Thu, 23 Dec 2010 16:40:37 +0000 Subject: drm: Update fbdev fb_fix_screeninfo If you change the color depth via fbset or some other framebuffer aware userland application struct fb_fix_screeninfo is not updated to this new information. This patch fixes this issue. Also the function is changed to just pass in struct drm_framebuffer so in the future we could use more fields. I'm hoping some day fix->smem* could be set here :-) Signed-off-by: James Simmons Signed-off-by: Dave Airlie --- drivers/gpu/drm/drm_fb_helper.c | 41 +++++++++++++++++---------------- drivers/gpu/drm/i915/intel_fb.c | 1 - drivers/gpu/drm/nouveau/nouveau_fbcon.c | 1 - drivers/gpu/drm/radeon/radeon_fb.c | 2 -- include/drm/drm_fb_helper.h | 3 --- 5 files changed, 21 insertions(+), 27 deletions(-) diff --git a/drivers/gpu/drm/drm_fb_helper.c b/drivers/gpu/drm/drm_fb_helper.c index 5c4f9b9ecdc0..0307d601f5e5 100644 --- a/drivers/gpu/drm/drm_fb_helper.c +++ b/drivers/gpu/drm/drm_fb_helper.c @@ -607,6 +607,25 @@ void drm_fb_helper_fini(struct drm_fb_helper *fb_helper) } EXPORT_SYMBOL(drm_fb_helper_fini); +void drm_fb_helper_fill_fix(struct fb_info *info, struct drm_framebuffer *fb) +{ + info->fix.type = FB_TYPE_PACKED_PIXELS; + info->fix.visual = fb->depth == 8 ? FB_VISUAL_PSEUDOCOLOR : + FB_VISUAL_TRUECOLOR; + info->fix.mmio_start = 0; + info->fix.mmio_len = 0; + info->fix.type_aux = 0; + info->fix.xpanstep = 1; /* doing it in hw */ + info->fix.ypanstep = 1; /* doing it in hw */ + info->fix.ywrapstep = 0; + info->fix.accel = FB_ACCEL_NONE; + info->fix.type_aux = 0; + + info->fix.line_length = fb->pitch; + return; +} +EXPORT_SYMBOL(drm_fb_helper_fill_fix); + static int setcolreg(struct drm_crtc *crtc, u16 red, u16 green, u16 blue, u16 regno, struct fb_info *info) { @@ -816,6 +835,7 @@ int drm_fb_helper_set_par(struct fb_info *info) mutex_unlock(&dev->mode_config.mutex); return ret; } + drm_fb_helper_fill_fix(info, fb_helper->fb); } mutex_unlock(&dev->mode_config.mutex); @@ -953,6 +973,7 @@ int drm_fb_helper_single_fb_probe(struct drm_fb_helper *fb_helper, if (new_fb) { info->var.pixclock = 0; + drm_fb_helper_fill_fix(info, fb_helper->fb); if (register_framebuffer(info) < 0) { return -EINVAL; } @@ -979,26 +1000,6 @@ int drm_fb_helper_single_fb_probe(struct drm_fb_helper *fb_helper, } EXPORT_SYMBOL(drm_fb_helper_single_fb_probe); -void drm_fb_helper_fill_fix(struct fb_info *info, uint32_t pitch, - uint32_t depth) -{ - info->fix.type = FB_TYPE_PACKED_PIXELS; - info->fix.visual = depth == 8 ? FB_VISUAL_PSEUDOCOLOR : - FB_VISUAL_TRUECOLOR; - info->fix.mmio_start = 0; - info->fix.mmio_len = 0; - info->fix.type_aux = 0; - info->fix.xpanstep = 1; /* doing it in hw */ - info->fix.ypanstep = 1; /* doing it in hw */ - info->fix.ywrapstep = 0; - info->fix.accel = FB_ACCEL_NONE; - info->fix.type_aux = 0; - - info->fix.line_length = pitch; - return; -} -EXPORT_SYMBOL(drm_fb_helper_fill_fix); - void drm_fb_helper_fill_var(struct fb_info *info, struct drm_fb_helper *fb_helper, uint32_t fb_width, uint32_t fb_height) { diff --git a/drivers/gpu/drm/i915/intel_fb.c b/drivers/gpu/drm/i915/intel_fb.c index 67738f32dfd4..701e830d0012 100644 --- a/drivers/gpu/drm/i915/intel_fb.c +++ b/drivers/gpu/drm/i915/intel_fb.c @@ -150,7 +150,6 @@ static int intelfb_create(struct intel_fbdev *ifbdev, // memset(info->screen_base, 0, size); - drm_fb_helper_fill_fix(info, fb->pitch, fb->depth); drm_fb_helper_fill_var(info, &ifbdev->helper, sizes->fb_width, sizes->fb_height); info->pixmap.size = 64*1024; diff --git a/drivers/gpu/drm/nouveau/nouveau_fbcon.c b/drivers/gpu/drm/nouveau/nouveau_fbcon.c index 6d56a54b6e2e..a26d04740c88 100644 --- a/drivers/gpu/drm/nouveau/nouveau_fbcon.c +++ b/drivers/gpu/drm/nouveau/nouveau_fbcon.c @@ -359,7 +359,6 @@ nouveau_fbcon_create(struct nouveau_fbdev *nfbdev, info->screen_base = nvbo_kmap_obj_iovirtual(nouveau_fb->nvbo); info->screen_size = size; - drm_fb_helper_fill_fix(info, fb->pitch, fb->depth); drm_fb_helper_fill_var(info, &nfbdev->helper, sizes->fb_width, sizes->fb_height); /* Set aperture base/size for vesafb takeover */ diff --git a/drivers/gpu/drm/radeon/radeon_fb.c b/drivers/gpu/drm/radeon/radeon_fb.c index 66324b5bb5ba..ca32e9c1e91d 100644 --- a/drivers/gpu/drm/radeon/radeon_fb.c +++ b/drivers/gpu/drm/radeon/radeon_fb.c @@ -225,8 +225,6 @@ static int radeonfb_create(struct radeon_fbdev *rfbdev, strcpy(info->fix.id, "radeondrmfb"); - drm_fb_helper_fill_fix(info, fb->pitch, fb->depth); - info->flags = FBINFO_DEFAULT | FBINFO_CAN_FORCE_OUTPUT; info->fbops = &radeonfb_ops; diff --git a/include/drm/drm_fb_helper.h b/include/drm/drm_fb_helper.h index f22e7fe4b6db..aac27bd56e89 100644 --- a/include/drm/drm_fb_helper.h +++ b/include/drm/drm_fb_helper.h @@ -121,9 +121,6 @@ int drm_fb_helper_setcolreg(unsigned regno, void drm_fb_helper_restore(void); void drm_fb_helper_fill_var(struct fb_info *info, struct drm_fb_helper *fb_helper, uint32_t fb_width, uint32_t fb_height); -void drm_fb_helper_fill_fix(struct fb_info *info, uint32_t pitch, - uint32_t depth); - int drm_fb_helper_setcmap(struct fb_cmap *cmap, struct fb_info *info); bool drm_fb_helper_hotplug_event(struct drm_fb_helper *fb_helper); -- cgit v1.2.3 From e457acaed4c56a6831b82365e17e8f42f1ee129c Mon Sep 17 00:00:00 2001 From: Ben Skeggs Date: Tue, 4 Jan 2011 12:41:37 +1000 Subject: drm/nouveau: create grctx on the fly on all chipsets Signed-off-by: Ben Skeggs --- drivers/gpu/drm/nouveau/nouveau_channel.c | 10 ---------- drivers/gpu/drm/nouveau/nouveau_object.c | 3 ++- drivers/gpu/drm/nouveau/nv40_fifo.c | 1 - drivers/gpu/drm/nouveau/nv40_graph.c | 12 ++++++++++++ 4 files changed, 14 insertions(+), 12 deletions(-) diff --git a/drivers/gpu/drm/nouveau/nouveau_channel.c b/drivers/gpu/drm/nouveau/nouveau_channel.c index a57a1d2f3a11..3960d66d7aba 100644 --- a/drivers/gpu/drm/nouveau/nouveau_channel.c +++ b/drivers/gpu/drm/nouveau/nouveau_channel.c @@ -121,7 +121,6 @@ nouveau_channel_alloc(struct drm_device *dev, struct nouveau_channel **chan_ret, uint32_t vram_handle, uint32_t gart_handle) { struct drm_nouveau_private *dev_priv = dev->dev_private; - struct nouveau_pgraph_engine *pgraph = &dev_priv->engine.graph; struct nouveau_fifo_engine *pfifo = &dev_priv->engine.fifo; struct nouveau_channel *chan; unsigned long flags; @@ -202,15 +201,6 @@ nouveau_channel_alloc(struct drm_device *dev, struct nouveau_channel **chan_ret, /* disable the fifo caches */ pfifo->reassign(dev, false); - /* Create a graphics context for new channel */ - if (dev_priv->card_type < NV_50) { - ret = pgraph->create_context(chan); - if (ret) { - nouveau_channel_put(&chan); - return ret; - } - } - /* Construct inital RAMFC for new channel */ ret = pfifo->create_context(chan); if (ret) { diff --git a/drivers/gpu/drm/nouveau/nouveau_object.c b/drivers/gpu/drm/nouveau/nouveau_object.c index d77b1fcd19d4..30b6544467ca 100644 --- a/drivers/gpu/drm/nouveau/nouveau_object.c +++ b/drivers/gpu/drm/nouveau/nouveau_object.c @@ -651,7 +651,8 @@ found: } break; case NVOBJ_ENGINE_GR: - if (dev_priv->card_type >= NV_50 && !chan->ramin_grctx) { + if ((dev_priv->card_type >= NV_20 && !chan->ramin_grctx) || + (dev_priv->card_type < NV_20 && !chan->pgraph_ctx)) { struct nouveau_pgraph_engine *pgraph = &dev_priv->engine.graph; diff --git a/drivers/gpu/drm/nouveau/nv40_fifo.c b/drivers/gpu/drm/nouveau/nv40_fifo.c index c86e4d4e9b96..49b9a35a9cd6 100644 --- a/drivers/gpu/drm/nouveau/nv40_fifo.c +++ b/drivers/gpu/drm/nouveau/nv40_fifo.c @@ -64,7 +64,6 @@ nv40_fifo_create_context(struct nouveau_channel *chan) NV_PFIFO_CACHE1_BIG_ENDIAN | #endif 0x30000000 /* no idea.. */); - nv_wi32(dev, fc + 56, chan->ramin_grctx->pinst >> 4); nv_wi32(dev, fc + 60, 0x0001FFFF); /* enable the fifo dma operation */ diff --git a/drivers/gpu/drm/nouveau/nv40_graph.c b/drivers/gpu/drm/nouveau/nv40_graph.c index 0618846a97ce..19ef92a0375a 100644 --- a/drivers/gpu/drm/nouveau/nv40_graph.c +++ b/drivers/gpu/drm/nouveau/nv40_graph.c @@ -62,6 +62,7 @@ nv40_graph_create_context(struct nouveau_channel *chan) struct drm_nouveau_private *dev_priv = dev->dev_private; struct nouveau_pgraph_engine *pgraph = &dev_priv->engine.graph; struct nouveau_grctx ctx = {}; + unsigned long flags; int ret; ret = nouveau_gpuobj_new(dev, chan, pgraph->grctx_size, 16, @@ -76,6 +77,17 @@ nv40_graph_create_context(struct nouveau_channel *chan) nv40_grctx_init(&ctx); nv_wo32(chan->ramin_grctx, 0, chan->ramin_grctx->pinst); + + /* init grctx pointer in ramfc, and on PFIFO if channel is + * already active there + */ + spin_lock_irqsave(&dev_priv->context_switch_lock, flags); + nv_wo32(chan->ramfc, 0x38, chan->ramin_grctx->pinst >> 4); + nv_mask(dev, 0x002500, 0x00000001, 0x00000000); + if ((nv_rd32(dev, 0x003204) & 0x0000001f) == chan->id) + nv_wr32(dev, 0x0032e0, chan->ramin_grctx->pinst >> 4); + nv_mask(dev, 0x002500, 0x00000001, 0x00000001); + spin_unlock_irqrestore(&dev_priv->context_switch_lock, flags); return 0; } -- cgit v1.2.3 From 3313e3d4333ccbf8bd7c816775cfe9aca623bd8a Mon Sep 17 00:00:00 2001 From: Alex Deucher Date: Thu, 6 Jan 2011 18:49:34 -0500 Subject: drm/radeon/kms: add pcie get/set lane support for r6xx/r7xx/evergreen Signed-off-by: Alex Deucher Signed-off-by: Dave Airlie --- drivers/gpu/drm/radeon/r300.c | 5 +- drivers/gpu/drm/radeon/r600.c | 118 +++++++++++++++++++++++++++++++++++ drivers/gpu/drm/radeon/radeon.h | 8 +++ drivers/gpu/drm/radeon/radeon_asic.c | 14 ++--- drivers/gpu/drm/radeon/radeon_asic.h | 2 + drivers/gpu/drm/radeon/radeon_reg.h | 9 +++ 6 files changed, 145 insertions(+), 11 deletions(-) diff --git a/drivers/gpu/drm/radeon/r300.c b/drivers/gpu/drm/radeon/r300.c index 23fee54c3b75..fae5e709f270 100644 --- a/drivers/gpu/drm/radeon/r300.c +++ b/drivers/gpu/drm/radeon/r300.c @@ -558,10 +558,7 @@ int rv370_get_pcie_lanes(struct radeon_device *rdev) /* FIXME wait for idle */ - if (rdev->family < CHIP_R600) - link_width_cntl = RREG32_PCIE(RADEON_PCIE_LC_LINK_WIDTH_CNTL); - else - link_width_cntl = RREG32_PCIE_P(RADEON_PCIE_LC_LINK_WIDTH_CNTL); + link_width_cntl = RREG32_PCIE(RADEON_PCIE_LC_LINK_WIDTH_CNTL); switch ((link_width_cntl & RADEON_PCIE_LC_LINK_WIDTH_RD_MASK) >> RADEON_PCIE_LC_LINK_WIDTH_RD_SHIFT) { case RADEON_PCIE_LC_LINK_WIDTH_X0: diff --git a/drivers/gpu/drm/radeon/r600.c b/drivers/gpu/drm/radeon/r600.c index 279794c391e9..60ad8c03081a 100644 --- a/drivers/gpu/drm/radeon/r600.c +++ b/drivers/gpu/drm/radeon/r600.c @@ -3531,3 +3531,121 @@ void r600_ioctl_wait_idle(struct radeon_device *rdev, struct radeon_bo *bo) } else WREG32(R_005480_HDP_MEM_COHERENCY_FLUSH_CNTL, 0x1); } + +void r600_set_pcie_lanes(struct radeon_device *rdev, int lanes) +{ + u32 link_width_cntl, mask, target_reg; + + if (rdev->flags & RADEON_IS_IGP) + return; + + if (!(rdev->flags & RADEON_IS_PCIE)) + return; + + /* x2 cards have a special sequence */ + if (ASIC_IS_X2(rdev)) + return; + + /* FIXME wait for idle */ + + switch (lanes) { + case 0: + mask = RADEON_PCIE_LC_LINK_WIDTH_X0; + break; + case 1: + mask = RADEON_PCIE_LC_LINK_WIDTH_X1; + break; + case 2: + mask = RADEON_PCIE_LC_LINK_WIDTH_X2; + break; + case 4: + mask = RADEON_PCIE_LC_LINK_WIDTH_X4; + break; + case 8: + mask = RADEON_PCIE_LC_LINK_WIDTH_X8; + break; + case 12: + mask = RADEON_PCIE_LC_LINK_WIDTH_X12; + break; + case 16: + default: + mask = RADEON_PCIE_LC_LINK_WIDTH_X16; + break; + } + + link_width_cntl = RREG32_PCIE_P(RADEON_PCIE_LC_LINK_WIDTH_CNTL); + + if ((link_width_cntl & RADEON_PCIE_LC_LINK_WIDTH_RD_MASK) == + (mask << RADEON_PCIE_LC_LINK_WIDTH_RD_SHIFT)) + return; + + if (link_width_cntl & R600_PCIE_LC_UPCONFIGURE_DIS) + return; + + link_width_cntl &= ~(RADEON_PCIE_LC_LINK_WIDTH_MASK | + RADEON_PCIE_LC_RECONFIG_NOW | + R600_PCIE_LC_RENEGOTIATE_EN | + R600_PCIE_LC_RECONFIG_ARC_MISSING_ESCAPE); + link_width_cntl |= mask; + + WREG32_PCIE_P(RADEON_PCIE_LC_LINK_WIDTH_CNTL, link_width_cntl); + + /* some northbridges can renegotiate the link rather than requiring + * a complete re-config. + * e.g., AMD 780/790 northbridges (pci ids: 0x5956, 0x5957, 0x5958, etc.) + */ + if (link_width_cntl & R600_PCIE_LC_RENEGOTIATION_SUPPORT) + link_width_cntl |= R600_PCIE_LC_RENEGOTIATE_EN | R600_PCIE_LC_UPCONFIGURE_SUPPORT; + else + link_width_cntl |= R600_PCIE_LC_RECONFIG_ARC_MISSING_ESCAPE; + + WREG32_PCIE_P(RADEON_PCIE_LC_LINK_WIDTH_CNTL, (link_width_cntl | + RADEON_PCIE_LC_RECONFIG_NOW)); + + if (rdev->family >= CHIP_RV770) + target_reg = R700_TARGET_AND_CURRENT_PROFILE_INDEX; + else + target_reg = R600_TARGET_AND_CURRENT_PROFILE_INDEX; + + /* wait for lane set to complete */ + link_width_cntl = RREG32(target_reg); + while (link_width_cntl == 0xffffffff) + link_width_cntl = RREG32(target_reg); + +} + +int r600_get_pcie_lanes(struct radeon_device *rdev) +{ + u32 link_width_cntl; + + if (rdev->flags & RADEON_IS_IGP) + return 0; + + if (!(rdev->flags & RADEON_IS_PCIE)) + return 0; + + /* x2 cards have a special sequence */ + if (ASIC_IS_X2(rdev)) + return 0; + + /* FIXME wait for idle */ + + link_width_cntl = RREG32_PCIE_P(RADEON_PCIE_LC_LINK_WIDTH_CNTL); + + switch ((link_width_cntl & RADEON_PCIE_LC_LINK_WIDTH_RD_MASK) >> RADEON_PCIE_LC_LINK_WIDTH_RD_SHIFT) { + case RADEON_PCIE_LC_LINK_WIDTH_X0: + return 0; + case RADEON_PCIE_LC_LINK_WIDTH_X1: + return 1; + case RADEON_PCIE_LC_LINK_WIDTH_X2: + return 2; + case RADEON_PCIE_LC_LINK_WIDTH_X4: + return 4; + case RADEON_PCIE_LC_LINK_WIDTH_X8: + return 8; + case RADEON_PCIE_LC_LINK_WIDTH_X16: + default: + return 16; + } +} + diff --git a/drivers/gpu/drm/radeon/radeon.h b/drivers/gpu/drm/radeon/radeon.h index aff8080026a1..e9fb64c1e20b 100644 --- a/drivers/gpu/drm/radeon/radeon.h +++ b/drivers/gpu/drm/radeon/radeon.h @@ -1317,6 +1317,14 @@ void r100_pll_errata_after_index(struct radeon_device *rdev); (rdev->family == CHIP_RV410) || \ (rdev->family == CHIP_RS400) || \ (rdev->family == CHIP_RS480)) +#define ASIC_IS_X2(rdev) ((rdev->ddev->pdev->device == 0x9441) || \ + (rdev->ddev->pdev->device == 0x9443) || \ + (rdev->ddev->pdev->device == 0x944B) || \ + (rdev->ddev->pdev->device == 0x9506) || \ + (rdev->ddev->pdev->device == 0x9509) || \ + (rdev->ddev->pdev->device == 0x950F) || \ + (rdev->ddev->pdev->device == 0x689C) || \ + (rdev->ddev->pdev->device == 0x689D)) #define ASIC_IS_AVIVO(rdev) ((rdev->family >= CHIP_RS600)) #define ASIC_IS_DCE2(rdev) ((rdev->family == CHIP_RS600) || \ (rdev->family == CHIP_RS690) || \ diff --git a/drivers/gpu/drm/radeon/radeon_asic.c b/drivers/gpu/drm/radeon/radeon_asic.c index 3d73fe484f42..53c62404795d 100644 --- a/drivers/gpu/drm/radeon/radeon_asic.c +++ b/drivers/gpu/drm/radeon/radeon_asic.c @@ -94,7 +94,7 @@ static void radeon_register_accessor_init(struct radeon_device *rdev) rdev->mc_rreg = &rs600_mc_rreg; rdev->mc_wreg = &rs600_mc_wreg; } - if ((rdev->family >= CHIP_R600) && (rdev->family <= CHIP_RV740)) { + if ((rdev->family >= CHIP_R600) && (rdev->family <= CHIP_HEMLOCK)) { rdev->pciep_rreg = &r600_pciep_rreg; rdev->pciep_wreg = &r600_pciep_wreg; } @@ -631,8 +631,8 @@ static struct radeon_asic r600_asic = { .set_engine_clock = &radeon_atom_set_engine_clock, .get_memory_clock = &radeon_atom_get_memory_clock, .set_memory_clock = &radeon_atom_set_memory_clock, - .get_pcie_lanes = &rv370_get_pcie_lanes, - .set_pcie_lanes = NULL, + .get_pcie_lanes = &r600_get_pcie_lanes, + .set_pcie_lanes = &r600_set_pcie_lanes, .set_clock_gating = NULL, .set_surface_reg = r600_set_surface_reg, .clear_surface_reg = r600_clear_surface_reg, @@ -725,8 +725,8 @@ static struct radeon_asic rv770_asic = { .set_engine_clock = &radeon_atom_set_engine_clock, .get_memory_clock = &radeon_atom_get_memory_clock, .set_memory_clock = &radeon_atom_set_memory_clock, - .get_pcie_lanes = &rv370_get_pcie_lanes, - .set_pcie_lanes = NULL, + .get_pcie_lanes = &r600_get_pcie_lanes, + .set_pcie_lanes = &r600_set_pcie_lanes, .set_clock_gating = &radeon_atom_set_clock_gating, .set_surface_reg = r600_set_surface_reg, .clear_surface_reg = r600_clear_surface_reg, @@ -772,8 +772,8 @@ static struct radeon_asic evergreen_asic = { .set_engine_clock = &radeon_atom_set_engine_clock, .get_memory_clock = &radeon_atom_get_memory_clock, .set_memory_clock = &radeon_atom_set_memory_clock, - .get_pcie_lanes = NULL, - .set_pcie_lanes = NULL, + .get_pcie_lanes = &r600_get_pcie_lanes, + .set_pcie_lanes = &r600_set_pcie_lanes, .set_clock_gating = NULL, .set_surface_reg = r600_set_surface_reg, .clear_surface_reg = r600_clear_surface_reg, diff --git a/drivers/gpu/drm/radeon/radeon_asic.h b/drivers/gpu/drm/radeon/radeon_asic.h index 4970eda1bd41..9ac71b8d1b9d 100644 --- a/drivers/gpu/drm/radeon/radeon_asic.h +++ b/drivers/gpu/drm/radeon/radeon_asic.h @@ -284,6 +284,8 @@ extern void r600_pm_misc(struct radeon_device *rdev); extern void r600_pm_init_profile(struct radeon_device *rdev); extern void rs780_pm_init_profile(struct radeon_device *rdev); extern void r600_pm_get_dynpm_state(struct radeon_device *rdev); +extern void r600_set_pcie_lanes(struct radeon_device *rdev, int lanes); +extern int r600_get_pcie_lanes(struct radeon_device *rdev); /* * rv770,rv730,rv710,rv740 diff --git a/drivers/gpu/drm/radeon/radeon_reg.h b/drivers/gpu/drm/radeon/radeon_reg.h index 0a310b7f71c3..3369ce984af1 100644 --- a/drivers/gpu/drm/radeon/radeon_reg.h +++ b/drivers/gpu/drm/radeon/radeon_reg.h @@ -320,6 +320,15 @@ # define RADEON_PCIE_LC_RECONFIG_NOW (1 << 8) # define RADEON_PCIE_LC_RECONFIG_LATER (1 << 9) # define RADEON_PCIE_LC_SHORT_RECONFIG_EN (1 << 10) +# define R600_PCIE_LC_RECONFIG_ARC_MISSING_ESCAPE (1 << 7) +# define R600_PCIE_LC_RENEGOTIATION_SUPPORT (1 << 9) +# define R600_PCIE_LC_RENEGOTIATE_EN (1 << 10) +# define R600_PCIE_LC_SHORT_RECONFIG_EN (1 << 11) +# define R600_PCIE_LC_UPCONFIGURE_SUPPORT (1 << 12) +# define R600_PCIE_LC_UPCONFIGURE_DIS (1 << 13) + +#define R600_TARGET_AND_CURRENT_PROFILE_INDEX 0x70c +#define R700_TARGET_AND_CURRENT_PROFILE_INDEX 0x66c #define RADEON_CACHE_CNTL 0x1724 #define RADEON_CACHE_LINE 0x0f0c /* PCI */ -- cgit v1.2.3 From 9e46a48df24f9698b34d28385b320c529851e5f7 Mon Sep 17 00:00:00 2001 From: Alex Deucher Date: Thu, 6 Jan 2011 18:49:35 -0500 Subject: drm/radeon/kms: add support for gen2 pcie link speeds Supported on rv6xx/r7xx/evergreen. Cards come up in gen1 mode. Signed-off-by: Alex Deucher Signed-off-by: Dave Airlie --- drivers/gpu/drm/radeon/evergreen.c | 53 +++++++++++++++++++ drivers/gpu/drm/radeon/evergreend.h | 38 ++++++++++++++ drivers/gpu/drm/radeon/r600.c | 102 ++++++++++++++++++++++++++++++++++++ drivers/gpu/drm/radeon/r600d.h | 39 ++++++++++++++ drivers/gpu/drm/radeon/radeon.h | 2 + drivers/gpu/drm/radeon/rv770.c | 76 +++++++++++++++++++++++++++ drivers/gpu/drm/radeon/rv770d.h | 38 ++++++++++++++ 7 files changed, 348 insertions(+) diff --git a/drivers/gpu/drm/radeon/evergreen.c b/drivers/gpu/drm/radeon/evergreen.c index eaf4fba90b72..11344c76e216 100644 --- a/drivers/gpu/drm/radeon/evergreen.c +++ b/drivers/gpu/drm/radeon/evergreen.c @@ -39,6 +39,7 @@ static void evergreen_gpu_init(struct radeon_device *rdev); void evergreen_fini(struct radeon_device *rdev); +static void evergreen_pcie_gen2_enable(struct radeon_device *rdev); void evergreen_pre_page_flip(struct radeon_device *rdev, int crtc) { @@ -2767,6 +2768,9 @@ static int evergreen_startup(struct radeon_device *rdev) { int r; + /* enable pcie gen2 link */ + evergreen_pcie_gen2_enable(rdev); + if (!rdev->me_fw || !rdev->pfp_fw || !rdev->rlc_fw) { r = r600_init_microcode(rdev); if (r) { @@ -3049,3 +3053,52 @@ void evergreen_fini(struct radeon_device *rdev) rdev->bios = NULL; radeon_dummy_page_fini(rdev); } + +static void evergreen_pcie_gen2_enable(struct radeon_device *rdev) +{ + u32 link_width_cntl, speed_cntl; + + if (rdev->flags & RADEON_IS_IGP) + return; + + if (!(rdev->flags & RADEON_IS_PCIE)) + return; + + /* x2 cards have a special sequence */ + if (ASIC_IS_X2(rdev)) + return; + + speed_cntl = RREG32_PCIE_P(PCIE_LC_SPEED_CNTL); + if ((speed_cntl & LC_OTHER_SIDE_EVER_SENT_GEN2) || + (speed_cntl & LC_OTHER_SIDE_SUPPORTS_GEN2)) { + + link_width_cntl = RREG32_PCIE_P(PCIE_LC_LINK_WIDTH_CNTL); + link_width_cntl &= ~LC_UPCONFIGURE_DIS; + WREG32_PCIE_P(PCIE_LC_LINK_WIDTH_CNTL, link_width_cntl); + + speed_cntl = RREG32_PCIE_P(PCIE_LC_SPEED_CNTL); + speed_cntl &= ~LC_TARGET_LINK_SPEED_OVERRIDE_EN; + WREG32_PCIE_P(PCIE_LC_SPEED_CNTL, speed_cntl); + + speed_cntl = RREG32_PCIE_P(PCIE_LC_SPEED_CNTL); + speed_cntl |= LC_CLR_FAILED_SPD_CHANGE_CNT; + WREG32_PCIE_P(PCIE_LC_SPEED_CNTL, speed_cntl); + + speed_cntl = RREG32_PCIE_P(PCIE_LC_SPEED_CNTL); + speed_cntl &= ~LC_CLR_FAILED_SPD_CHANGE_CNT; + WREG32_PCIE_P(PCIE_LC_SPEED_CNTL, speed_cntl); + + speed_cntl = RREG32_PCIE_P(PCIE_LC_SPEED_CNTL); + speed_cntl |= LC_GEN2_EN_STRAP; + WREG32_PCIE_P(PCIE_LC_SPEED_CNTL, speed_cntl); + + } else { + link_width_cntl = RREG32_PCIE_P(PCIE_LC_LINK_WIDTH_CNTL); + /* XXX: only disable it if gen1 bridge vendor == 0x111d or 0x1106 */ + if (1) + link_width_cntl |= LC_UPCONFIGURE_DIS; + else + link_width_cntl &= ~LC_UPCONFIGURE_DIS; + WREG32_PCIE_P(PCIE_LC_LINK_WIDTH_CNTL, link_width_cntl); + } +} diff --git a/drivers/gpu/drm/radeon/evergreend.h b/drivers/gpu/drm/radeon/evergreend.h index 94140e142abb..b8da323f15c8 100644 --- a/drivers/gpu/drm/radeon/evergreend.h +++ b/drivers/gpu/drm/radeon/evergreend.h @@ -581,6 +581,44 @@ # define DC_HPDx_RX_INT_TIMER(x) ((x) << 16) # define DC_HPDx_EN (1 << 28) +/* PCIE link stuff */ +#define PCIE_LC_TRAINING_CNTL 0xa1 /* PCIE_P */ +#define PCIE_LC_LINK_WIDTH_CNTL 0xa2 /* PCIE_P */ +# define LC_LINK_WIDTH_SHIFT 0 +# define LC_LINK_WIDTH_MASK 0x7 +# define LC_LINK_WIDTH_X0 0 +# define LC_LINK_WIDTH_X1 1 +# define LC_LINK_WIDTH_X2 2 +# define LC_LINK_WIDTH_X4 3 +# define LC_LINK_WIDTH_X8 4 +# define LC_LINK_WIDTH_X16 6 +# define LC_LINK_WIDTH_RD_SHIFT 4 +# define LC_LINK_WIDTH_RD_MASK 0x70 +# define LC_RECONFIG_ARC_MISSING_ESCAPE (1 << 7) +# define LC_RECONFIG_NOW (1 << 8) +# define LC_RENEGOTIATION_SUPPORT (1 << 9) +# define LC_RENEGOTIATE_EN (1 << 10) +# define LC_SHORT_RECONFIG_EN (1 << 11) +# define LC_UPCONFIGURE_SUPPORT (1 << 12) +# define LC_UPCONFIGURE_DIS (1 << 13) +#define PCIE_LC_SPEED_CNTL 0xa4 /* PCIE_P */ +# define LC_GEN2_EN_STRAP (1 << 0) +# define LC_TARGET_LINK_SPEED_OVERRIDE_EN (1 << 1) +# define LC_FORCE_EN_HW_SPEED_CHANGE (1 << 5) +# define LC_FORCE_DIS_HW_SPEED_CHANGE (1 << 6) +# define LC_SPEED_CHANGE_ATTEMPTS_ALLOWED_MASK (0x3 << 8) +# define LC_SPEED_CHANGE_ATTEMPTS_ALLOWED_SHIFT 3 +# define LC_CURRENT_DATA_RATE (1 << 11) +# define LC_VOLTAGE_TIMER_SEL_MASK (0xf << 14) +# define LC_CLR_FAILED_SPD_CHANGE_CNT (1 << 21) +# define LC_OTHER_SIDE_EVER_SENT_GEN2 (1 << 23) +# define LC_OTHER_SIDE_SUPPORTS_GEN2 (1 << 24) +#define MM_CFGREGS_CNTL 0x544c +# define MM_WR_TO_CFG_EN (1 << 3) +#define LINK_CNTL2 0x88 /* F0 */ +# define TARGET_LINK_SPEED_MASK (0xf << 0) +# define SELECTABLE_DEEMPHASIS (1 << 6) + /* * PM4 */ diff --git a/drivers/gpu/drm/radeon/r600.c b/drivers/gpu/drm/radeon/r600.c index 60ad8c03081a..6b50716267c0 100644 --- a/drivers/gpu/drm/radeon/r600.c +++ b/drivers/gpu/drm/radeon/r600.c @@ -94,6 +94,7 @@ int r600_mc_wait_for_idle(struct radeon_device *rdev); void r600_gpu_init(struct radeon_device *rdev); void r600_fini(struct radeon_device *rdev); void r600_irq_disable(struct radeon_device *rdev); +static void r600_pcie_gen2_enable(struct radeon_device *rdev); /* get temperature in millidegrees */ u32 rv6xx_get_temp(struct radeon_device *rdev) @@ -2379,6 +2380,9 @@ int r600_startup(struct radeon_device *rdev) { int r; + /* enable pcie gen2 link */ + r600_pcie_gen2_enable(rdev); + if (!rdev->me_fw || !rdev->pfp_fw || !rdev->rlc_fw) { r = r600_init_microcode(rdev); if (r) { @@ -3649,3 +3653,101 @@ int r600_get_pcie_lanes(struct radeon_device *rdev) } } +static void r600_pcie_gen2_enable(struct radeon_device *rdev) +{ + u32 link_width_cntl, lanes, speed_cntl, training_cntl, tmp; + u16 link_cntl2; + + if (rdev->flags & RADEON_IS_IGP) + return; + + if (!(rdev->flags & RADEON_IS_PCIE)) + return; + + /* x2 cards have a special sequence */ + if (ASIC_IS_X2(rdev)) + return; + + /* only RV6xx+ chips are supported */ + if (rdev->family <= CHIP_R600) + return; + + /* 55 nm r6xx asics */ + if ((rdev->family == CHIP_RV670) || + (rdev->family == CHIP_RV620) || + (rdev->family == CHIP_RV635)) { + /* advertise upconfig capability */ + link_width_cntl = RREG32_PCIE_P(PCIE_LC_LINK_WIDTH_CNTL); + link_width_cntl &= ~LC_UPCONFIGURE_DIS; + WREG32_PCIE_P(PCIE_LC_LINK_WIDTH_CNTL, link_width_cntl); + link_width_cntl = RREG32_PCIE_P(PCIE_LC_LINK_WIDTH_CNTL); + if (link_width_cntl & LC_RENEGOTIATION_SUPPORT) { + lanes = (link_width_cntl & LC_LINK_WIDTH_RD_MASK) >> LC_LINK_WIDTH_RD_SHIFT; + link_width_cntl &= ~(LC_LINK_WIDTH_MASK | + LC_RECONFIG_ARC_MISSING_ESCAPE); + link_width_cntl |= lanes | LC_RECONFIG_NOW | LC_RENEGOTIATE_EN; + WREG32_PCIE_P(PCIE_LC_LINK_WIDTH_CNTL, link_width_cntl); + } else { + link_width_cntl |= LC_UPCONFIGURE_DIS; + WREG32_PCIE_P(PCIE_LC_LINK_WIDTH_CNTL, link_width_cntl); + } + } + + speed_cntl = RREG32_PCIE_P(PCIE_LC_SPEED_CNTL); + if ((speed_cntl & LC_OTHER_SIDE_EVER_SENT_GEN2) && + (speed_cntl & LC_OTHER_SIDE_SUPPORTS_GEN2)) { + + /* 55 nm r6xx asics */ + if ((rdev->family == CHIP_RV670) || + (rdev->family == CHIP_RV620) || + (rdev->family == CHIP_RV635)) { + WREG32(MM_CFGREGS_CNTL, 0x8); + link_cntl2 = RREG32(0x4088); + WREG32(MM_CFGREGS_CNTL, 0); + /* not supported yet */ + if (link_cntl2 & SELECTABLE_DEEMPHASIS) + return; + } + + speed_cntl &= ~LC_SPEED_CHANGE_ATTEMPTS_ALLOWED_MASK; + speed_cntl |= (0x3 << LC_SPEED_CHANGE_ATTEMPTS_ALLOWED_SHIFT); + speed_cntl &= ~LC_VOLTAGE_TIMER_SEL_MASK; + speed_cntl &= ~LC_FORCE_DIS_HW_SPEED_CHANGE; + speed_cntl |= LC_FORCE_EN_HW_SPEED_CHANGE; + WREG32_PCIE_P(PCIE_LC_SPEED_CNTL, speed_cntl); + + tmp = RREG32(0x541c); + WREG32(0x541c, tmp | 0x8); + WREG32(MM_CFGREGS_CNTL, MM_WR_TO_CFG_EN); + link_cntl2 = RREG16(0x4088); + link_cntl2 &= ~TARGET_LINK_SPEED_MASK; + link_cntl2 |= 0x2; + WREG16(0x4088, link_cntl2); + WREG32(MM_CFGREGS_CNTL, 0); + + if ((rdev->family == CHIP_RV670) || + (rdev->family == CHIP_RV620) || + (rdev->family == CHIP_RV635)) { + training_cntl = RREG32_PCIE_P(PCIE_LC_TRAINING_CNTL); + training_cntl &= ~LC_POINT_7_PLUS_EN; + WREG32_PCIE_P(PCIE_LC_TRAINING_CNTL, training_cntl); + } else { + speed_cntl = RREG32_PCIE_P(PCIE_LC_SPEED_CNTL); + speed_cntl &= ~LC_TARGET_LINK_SPEED_OVERRIDE_EN; + WREG32_PCIE_P(PCIE_LC_SPEED_CNTL, speed_cntl); + } + + speed_cntl = RREG32_PCIE_P(PCIE_LC_SPEED_CNTL); + speed_cntl |= LC_GEN2_EN_STRAP; + WREG32_PCIE_P(PCIE_LC_SPEED_CNTL, speed_cntl); + + } else { + link_width_cntl = RREG32_PCIE_P(PCIE_LC_LINK_WIDTH_CNTL); + /* XXX: only disable it if gen1 bridge vendor == 0x111d or 0x1106 */ + if (1) + link_width_cntl |= LC_UPCONFIGURE_DIS; + else + link_width_cntl &= ~LC_UPCONFIGURE_DIS; + WREG32_PCIE_P(PCIE_LC_LINK_WIDTH_CNTL, link_width_cntl); + } +} diff --git a/drivers/gpu/drm/radeon/r600d.h b/drivers/gpu/drm/radeon/r600d.h index c89cfa8e0c05..a5d898b4bad2 100644 --- a/drivers/gpu/drm/radeon/r600d.h +++ b/drivers/gpu/drm/radeon/r600d.h @@ -737,6 +737,45 @@ # define DxGRPH_PFLIP_INT_MASK (1 << 0) # define DxGRPH_PFLIP_INT_TYPE (1 << 8) +/* PCIE link stuff */ +#define PCIE_LC_TRAINING_CNTL 0xa1 /* PCIE_P */ +# define LC_POINT_7_PLUS_EN (1 << 6) +#define PCIE_LC_LINK_WIDTH_CNTL 0xa2 /* PCIE_P */ +# define LC_LINK_WIDTH_SHIFT 0 +# define LC_LINK_WIDTH_MASK 0x7 +# define LC_LINK_WIDTH_X0 0 +# define LC_LINK_WIDTH_X1 1 +# define LC_LINK_WIDTH_X2 2 +# define LC_LINK_WIDTH_X4 3 +# define LC_LINK_WIDTH_X8 4 +# define LC_LINK_WIDTH_X16 6 +# define LC_LINK_WIDTH_RD_SHIFT 4 +# define LC_LINK_WIDTH_RD_MASK 0x70 +# define LC_RECONFIG_ARC_MISSING_ESCAPE (1 << 7) +# define LC_RECONFIG_NOW (1 << 8) +# define LC_RENEGOTIATION_SUPPORT (1 << 9) +# define LC_RENEGOTIATE_EN (1 << 10) +# define LC_SHORT_RECONFIG_EN (1 << 11) +# define LC_UPCONFIGURE_SUPPORT (1 << 12) +# define LC_UPCONFIGURE_DIS (1 << 13) +#define PCIE_LC_SPEED_CNTL 0xa4 /* PCIE_P */ +# define LC_GEN2_EN_STRAP (1 << 0) +# define LC_TARGET_LINK_SPEED_OVERRIDE_EN (1 << 1) +# define LC_FORCE_EN_HW_SPEED_CHANGE (1 << 5) +# define LC_FORCE_DIS_HW_SPEED_CHANGE (1 << 6) +# define LC_SPEED_CHANGE_ATTEMPTS_ALLOWED_MASK (0x3 << 8) +# define LC_SPEED_CHANGE_ATTEMPTS_ALLOWED_SHIFT 3 +# define LC_CURRENT_DATA_RATE (1 << 11) +# define LC_VOLTAGE_TIMER_SEL_MASK (0xf << 14) +# define LC_CLR_FAILED_SPD_CHANGE_CNT (1 << 21) +# define LC_OTHER_SIDE_EVER_SENT_GEN2 (1 << 23) +# define LC_OTHER_SIDE_SUPPORTS_GEN2 (1 << 24) +#define MM_CFGREGS_CNTL 0x544c +# define MM_WR_TO_CFG_EN (1 << 3) +#define LINK_CNTL2 0x88 /* F0 */ +# define TARGET_LINK_SPEED_MASK (0xf << 0) +# define SELECTABLE_DEEMPHASIS (1 << 6) + /* * PM4 */ diff --git a/drivers/gpu/drm/radeon/radeon.h b/drivers/gpu/drm/radeon/radeon.h index e9fb64c1e20b..3e635c651900 100644 --- a/drivers/gpu/drm/radeon/radeon.h +++ b/drivers/gpu/drm/radeon/radeon.h @@ -1244,6 +1244,8 @@ static inline void r100_io_wreg(struct radeon_device *rdev, u32 reg, u32 v) */ #define RREG8(reg) readb(((void __iomem *)rdev->rmmio) + (reg)) #define WREG8(reg, v) writeb(v, ((void __iomem *)rdev->rmmio) + (reg)) +#define RREG16(reg) readw(((void __iomem *)rdev->rmmio) + (reg)) +#define WREG16(reg, v) writew(v, ((void __iomem *)rdev->rmmio) + (reg)) #define RREG32(reg) r100_mm_rreg(rdev, (reg)) #define DREG32(reg) printk(KERN_INFO "REGISTER: " #reg " : 0x%08X\n", r100_mm_rreg(rdev, (reg))) #define WREG32(reg, v) r100_mm_wreg(rdev, (reg), (v)) diff --git a/drivers/gpu/drm/radeon/rv770.c b/drivers/gpu/drm/radeon/rv770.c index 645aa1fd7611..3a264aa3a79a 100644 --- a/drivers/gpu/drm/radeon/rv770.c +++ b/drivers/gpu/drm/radeon/rv770.c @@ -41,6 +41,7 @@ static void rv770_gpu_init(struct radeon_device *rdev); void rv770_fini(struct radeon_device *rdev); +static void rv770_pcie_gen2_enable(struct radeon_device *rdev); u32 rv770_page_flip(struct radeon_device *rdev, int crtc_id, u64 crtc_base) { @@ -1124,6 +1125,9 @@ static int rv770_startup(struct radeon_device *rdev) { int r; + /* enable pcie gen2 link */ + rv770_pcie_gen2_enable(rdev); + if (!rdev->me_fw || !rdev->pfp_fw || !rdev->rlc_fw) { r = r600_init_microcode(rdev); if (r) { @@ -1362,3 +1366,75 @@ void rv770_fini(struct radeon_device *rdev) rdev->bios = NULL; radeon_dummy_page_fini(rdev); } + +static void rv770_pcie_gen2_enable(struct radeon_device *rdev) +{ + u32 link_width_cntl, lanes, speed_cntl, tmp; + u16 link_cntl2; + + if (rdev->flags & RADEON_IS_IGP) + return; + + if (!(rdev->flags & RADEON_IS_PCIE)) + return; + + /* x2 cards have a special sequence */ + if (ASIC_IS_X2(rdev)) + return; + + /* advertise upconfig capability */ + link_width_cntl = RREG32_PCIE_P(PCIE_LC_LINK_WIDTH_CNTL); + link_width_cntl &= ~LC_UPCONFIGURE_DIS; + WREG32_PCIE_P(PCIE_LC_LINK_WIDTH_CNTL, link_width_cntl); + link_width_cntl = RREG32_PCIE_P(PCIE_LC_LINK_WIDTH_CNTL); + if (link_width_cntl & LC_RENEGOTIATION_SUPPORT) { + lanes = (link_width_cntl & LC_LINK_WIDTH_RD_MASK) >> LC_LINK_WIDTH_RD_SHIFT; + link_width_cntl &= ~(LC_LINK_WIDTH_MASK | + LC_RECONFIG_ARC_MISSING_ESCAPE); + link_width_cntl |= lanes | LC_RECONFIG_NOW | + LC_RENEGOTIATE_EN | LC_UPCONFIGURE_SUPPORT; + WREG32_PCIE_P(PCIE_LC_LINK_WIDTH_CNTL, link_width_cntl); + } else { + link_width_cntl |= LC_UPCONFIGURE_DIS; + WREG32_PCIE_P(PCIE_LC_LINK_WIDTH_CNTL, link_width_cntl); + } + + speed_cntl = RREG32_PCIE_P(PCIE_LC_SPEED_CNTL); + if ((speed_cntl & LC_OTHER_SIDE_EVER_SENT_GEN2) && + (speed_cntl & LC_OTHER_SIDE_SUPPORTS_GEN2)) { + + tmp = RREG32(0x541c); + WREG32(0x541c, tmp | 0x8); + WREG32(MM_CFGREGS_CNTL, MM_WR_TO_CFG_EN); + link_cntl2 = RREG16(0x4088); + link_cntl2 &= ~TARGET_LINK_SPEED_MASK; + link_cntl2 |= 0x2; + WREG16(0x4088, link_cntl2); + WREG32(MM_CFGREGS_CNTL, 0); + + speed_cntl = RREG32_PCIE_P(PCIE_LC_SPEED_CNTL); + speed_cntl &= ~LC_TARGET_LINK_SPEED_OVERRIDE_EN; + WREG32_PCIE_P(PCIE_LC_SPEED_CNTL, speed_cntl); + + speed_cntl = RREG32_PCIE_P(PCIE_LC_SPEED_CNTL); + speed_cntl |= LC_CLR_FAILED_SPD_CHANGE_CNT; + WREG32_PCIE_P(PCIE_LC_SPEED_CNTL, speed_cntl); + + speed_cntl = RREG32_PCIE_P(PCIE_LC_SPEED_CNTL); + speed_cntl &= ~LC_CLR_FAILED_SPD_CHANGE_CNT; + WREG32_PCIE_P(PCIE_LC_SPEED_CNTL, speed_cntl); + + speed_cntl = RREG32_PCIE_P(PCIE_LC_SPEED_CNTL); + speed_cntl |= LC_GEN2_EN_STRAP; + WREG32_PCIE_P(PCIE_LC_SPEED_CNTL, speed_cntl); + + } else { + link_width_cntl = RREG32_PCIE_P(PCIE_LC_LINK_WIDTH_CNTL); + /* XXX: only disable it if gen1 bridge vendor == 0x111d or 0x1106 */ + if (1) + link_width_cntl |= LC_UPCONFIGURE_DIS; + else + link_width_cntl &= ~LC_UPCONFIGURE_DIS; + WREG32_PCIE_P(PCIE_LC_LINK_WIDTH_CNTL, link_width_cntl); + } +} diff --git a/drivers/gpu/drm/radeon/rv770d.h b/drivers/gpu/drm/radeon/rv770d.h index fc77e1e1a179..abc8cf5a3672 100644 --- a/drivers/gpu/drm/radeon/rv770d.h +++ b/drivers/gpu/drm/radeon/rv770d.h @@ -360,4 +360,42 @@ #define D1GRPH_SECONDARY_SURFACE_ADDRESS_HIGH 0x691c #define D2GRPH_SECONDARY_SURFACE_ADDRESS_HIGH 0x611c +/* PCIE link stuff */ +#define PCIE_LC_TRAINING_CNTL 0xa1 /* PCIE_P */ +#define PCIE_LC_LINK_WIDTH_CNTL 0xa2 /* PCIE_P */ +# define LC_LINK_WIDTH_SHIFT 0 +# define LC_LINK_WIDTH_MASK 0x7 +# define LC_LINK_WIDTH_X0 0 +# define LC_LINK_WIDTH_X1 1 +# define LC_LINK_WIDTH_X2 2 +# define LC_LINK_WIDTH_X4 3 +# define LC_LINK_WIDTH_X8 4 +# define LC_LINK_WIDTH_X16 6 +# define LC_LINK_WIDTH_RD_SHIFT 4 +# define LC_LINK_WIDTH_RD_MASK 0x70 +# define LC_RECONFIG_ARC_MISSING_ESCAPE (1 << 7) +# define LC_RECONFIG_NOW (1 << 8) +# define LC_RENEGOTIATION_SUPPORT (1 << 9) +# define LC_RENEGOTIATE_EN (1 << 10) +# define LC_SHORT_RECONFIG_EN (1 << 11) +# define LC_UPCONFIGURE_SUPPORT (1 << 12) +# define LC_UPCONFIGURE_DIS (1 << 13) +#define PCIE_LC_SPEED_CNTL 0xa4 /* PCIE_P */ +# define LC_GEN2_EN_STRAP (1 << 0) +# define LC_TARGET_LINK_SPEED_OVERRIDE_EN (1 << 1) +# define LC_FORCE_EN_HW_SPEED_CHANGE (1 << 5) +# define LC_FORCE_DIS_HW_SPEED_CHANGE (1 << 6) +# define LC_SPEED_CHANGE_ATTEMPTS_ALLOWED_MASK (0x3 << 8) +# define LC_SPEED_CHANGE_ATTEMPTS_ALLOWED_SHIFT 3 +# define LC_CURRENT_DATA_RATE (1 << 11) +# define LC_VOLTAGE_TIMER_SEL_MASK (0xf << 14) +# define LC_CLR_FAILED_SPD_CHANGE_CNT (1 << 21) +# define LC_OTHER_SIDE_EVER_SENT_GEN2 (1 << 23) +# define LC_OTHER_SIDE_SUPPORTS_GEN2 (1 << 24) +#define MM_CFGREGS_CNTL 0x544c +# define MM_WR_TO_CFG_EN (1 << 3) +#define LINK_CNTL2 0x88 /* F0 */ +# define TARGET_LINK_SPEED_MASK (0xf << 0) +# define SELECTABLE_DEEMPHASIS (1 << 6) + #endif -- cgit v1.2.3 From c46cb4dabddeda851d0d5faee64484f3783053e3 Mon Sep 17 00:00:00 2001 From: Alex Deucher Date: Thu, 6 Jan 2011 19:12:37 -0500 Subject: drm/radeon/kms: set the MSB of the HDP slice size Signed-off-by: Alex Deucher Signed-off-by: Dave Airlie --- drivers/gpu/drm/radeon/evergreen.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/gpu/drm/radeon/evergreen.c b/drivers/gpu/drm/radeon/evergreen.c index 11344c76e216..b5bc7d038fd9 100644 --- a/drivers/gpu/drm/radeon/evergreen.c +++ b/drivers/gpu/drm/radeon/evergreen.c @@ -1147,7 +1147,7 @@ static void evergreen_mc_program(struct radeon_device *rdev) tmp |= ((rdev->mc.vram_start >> 24) & 0xFFFF); WREG32(MC_VM_FB_LOCATION, tmp); WREG32(HDP_NONSURFACE_BASE, (rdev->mc.vram_start >> 8)); - WREG32(HDP_NONSURFACE_INFO, (2 << 7)); + WREG32(HDP_NONSURFACE_INFO, (2 << 7) | (1 << 30)); WREG32(HDP_NONSURFACE_SIZE, 0x3FFFFFFF); if (rdev->flags & RADEON_IS_AGP) { WREG32(MC_VM_AGP_TOP, rdev->mc.gtt_end >> 16); -- cgit v1.2.3 From 32171d2297daa2fe90d6ef41c5424ca9ac5bd797 Mon Sep 17 00:00:00 2001 From: Alex Deucher Date: Thu, 6 Jan 2011 19:13:32 -0500 Subject: drm/radeon/kms: fix some typos in evergreen pm4 defines Signed-off-by: Alex Deucher Signed-off-by: Dave Airlie --- drivers/gpu/drm/radeon/evergreend.h | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/drivers/gpu/drm/radeon/evergreend.h b/drivers/gpu/drm/radeon/evergreend.h index b8da323f15c8..36d32d83d866 100644 --- a/drivers/gpu/drm/radeon/evergreend.h +++ b/drivers/gpu/drm/radeon/evergreend.h @@ -648,7 +648,7 @@ #define PACKET3_NOP 0x10 #define PACKET3_SET_BASE 0x11 #define PACKET3_CLEAR_STATE 0x12 -#define PACKET3_INDIRECT_BUFFER_SIZE 0x13 +#define PACKET3_INDEX_BUFFER_SIZE 0x13 #define PACKET3_DISPATCH_DIRECT 0x15 #define PACKET3_DISPATCH_INDIRECT 0x16 #define PACKET3_INDIRECT_BUFFER_END 0x17 @@ -689,14 +689,14 @@ # define PACKET3_CB8_DEST_BASE_ENA (1 << 15) # define PACKET3_CB9_DEST_BASE_ENA (1 << 16) # define PACKET3_CB10_DEST_BASE_ENA (1 << 17) -# define PACKET3_CB11_DEST_BASE_ENA (1 << 17) +# define PACKET3_CB11_DEST_BASE_ENA (1 << 18) # define PACKET3_FULL_CACHE_ENA (1 << 20) # define PACKET3_TC_ACTION_ENA (1 << 23) # define PACKET3_VC_ACTION_ENA (1 << 24) # define PACKET3_CB_ACTION_ENA (1 << 25) # define PACKET3_DB_ACTION_ENA (1 << 26) # define PACKET3_SH_ACTION_ENA (1 << 27) -# define PACKET3_SMX_ACTION_ENA (1 << 28) +# define PACKET3_SX_ACTION_ENA (1 << 28) #define PACKET3_ME_INITIALIZE 0x44 #define PACKET3_ME_INITIALIZE_DEVICE_ID(x) ((x) << 16) #define PACKET3_COND_WRITE 0x45 -- cgit v1.2.3 From 633b91643f50779897bcab5e50d08a4fe4ba9f13 Mon Sep 17 00:00:00 2001 From: Alex Deucher Date: Thu, 6 Jan 2011 21:19:11 -0500 Subject: drm/radeon/kms: clean up ASIC_IS_DCE41() macro only fusion asics are dce4.1 Signed-off-by: Alex Deucher Signed-off-by: Dave Airlie --- drivers/gpu/drm/radeon/radeon.h | 3 ++- drivers/gpu/drm/radeon/radeon_encoders.c | 6 +++--- 2 files changed, 5 insertions(+), 4 deletions(-) diff --git a/drivers/gpu/drm/radeon/radeon.h b/drivers/gpu/drm/radeon/radeon.h index 3e635c651900..396e30703e7a 100644 --- a/drivers/gpu/drm/radeon/radeon.h +++ b/drivers/gpu/drm/radeon/radeon.h @@ -1335,7 +1335,8 @@ void r100_pll_errata_after_index(struct radeon_device *rdev); #define ASIC_IS_DCE3(rdev) ((rdev->family >= CHIP_RV620)) #define ASIC_IS_DCE32(rdev) ((rdev->family >= CHIP_RV730)) #define ASIC_IS_DCE4(rdev) ((rdev->family >= CHIP_CEDAR)) -#define ASIC_IS_DCE41(rdev) ((rdev->family >= CHIP_PALM)) +#define ASIC_IS_DCE41(rdev) ((rdev->family >= CHIP_PALM) && \ + (rdev->flags & RADEON_IS_IGP)) /* * BIOS helpers. diff --git a/drivers/gpu/drm/radeon/radeon_encoders.c b/drivers/gpu/drm/radeon/radeon_encoders.c index 55b84b8e6b29..c83ad890e4d2 100644 --- a/drivers/gpu/drm/radeon/radeon_encoders.c +++ b/drivers/gpu/drm/radeon/radeon_encoders.c @@ -1329,7 +1329,7 @@ radeon_atom_encoder_dpms(struct drm_encoder *encoder, int mode) switch (mode) { case DRM_MODE_DPMS_ON: default: - if (ASIC_IS_DCE41(rdev) && (rdev->flags & RADEON_IS_IGP)) + if (ASIC_IS_DCE41(rdev)) action = EXTERNAL_ENCODER_ACTION_V3_ENABLE_OUTPUT; else action = ATOM_ENABLE; @@ -1337,7 +1337,7 @@ radeon_atom_encoder_dpms(struct drm_encoder *encoder, int mode) case DRM_MODE_DPMS_STANDBY: case DRM_MODE_DPMS_SUSPEND: case DRM_MODE_DPMS_OFF: - if (ASIC_IS_DCE41(rdev) && (rdev->flags & RADEON_IS_IGP)) + if (ASIC_IS_DCE41(rdev)) action = EXTERNAL_ENCODER_ACTION_V3_DISABLE_OUTPUT; else action = ATOM_DISABLE; @@ -1663,7 +1663,7 @@ radeon_atom_encoder_mode_set(struct drm_encoder *encoder, } if (ext_encoder) { - if (ASIC_IS_DCE41(rdev) && (rdev->flags & RADEON_IS_IGP)) { + if (ASIC_IS_DCE41(rdev)) { atombios_external_encoder_setup(encoder, ext_encoder, EXTERNAL_ENCODER_ACTION_V3_ENCODER_INIT); atombios_external_encoder_setup(encoder, ext_encoder, -- cgit v1.2.3 From 1fe183050f71ba183fe2d693bbef5fa605723043 Mon Sep 17 00:00:00 2001 From: Alex Deucher Date: Thu, 6 Jan 2011 21:19:12 -0500 Subject: drm/radeon/kms: add NI chip families Signed-off-by: Alex Deucher Signed-off-by: Dave Airlie --- drivers/gpu/drm/radeon/radeon.h | 1 + drivers/gpu/drm/radeon/radeon_device.c | 3 +++ drivers/gpu/drm/radeon/radeon_family.h | 3 +++ 3 files changed, 7 insertions(+) diff --git a/drivers/gpu/drm/radeon/radeon.h b/drivers/gpu/drm/radeon/radeon.h index 396e30703e7a..73730fd1d0af 100644 --- a/drivers/gpu/drm/radeon/radeon.h +++ b/drivers/gpu/drm/radeon/radeon.h @@ -1337,6 +1337,7 @@ void r100_pll_errata_after_index(struct radeon_device *rdev); #define ASIC_IS_DCE4(rdev) ((rdev->family >= CHIP_CEDAR)) #define ASIC_IS_DCE41(rdev) ((rdev->family >= CHIP_PALM) && \ (rdev->flags & RADEON_IS_IGP)) +#define ASIC_IS_DCE5(rdev) ((rdev->family >= CHIP_BARTS)) /* * BIOS helpers. diff --git a/drivers/gpu/drm/radeon/radeon_device.c b/drivers/gpu/drm/radeon/radeon_device.c index 44cf0d707006..e35343007229 100644 --- a/drivers/gpu/drm/radeon/radeon_device.c +++ b/drivers/gpu/drm/radeon/radeon_device.c @@ -82,6 +82,9 @@ static const char radeon_family_name[][16] = { "CYPRESS", "HEMLOCK", "PALM", + "BARTS", + "TURKS", + "CAICOS", "LAST", }; diff --git a/drivers/gpu/drm/radeon/radeon_family.h b/drivers/gpu/drm/radeon/radeon_family.h index 4c222d5437d1..1ca55eb09ad3 100644 --- a/drivers/gpu/drm/radeon/radeon_family.h +++ b/drivers/gpu/drm/radeon/radeon_family.h @@ -81,6 +81,9 @@ enum radeon_family { CHIP_CYPRESS, CHIP_HEMLOCK, CHIP_PALM, + CHIP_BARTS, + CHIP_TURKS, + CHIP_CAICOS, CHIP_LAST, }; -- cgit v1.2.3 From 936b27cce8fdb8ca50a593714963862586285094 Mon Sep 17 00:00:00 2001 From: Alex Deucher Date: Thu, 6 Jan 2011 21:19:13 -0500 Subject: drm/radeon/kms: update display watermark calculations for DCE5 Signed-off-by: Alex Deucher Signed-off-by: Dave Airlie --- drivers/gpu/drm/radeon/evergreen.c | 20 ++++++++++++++++---- 1 file changed, 16 insertions(+), 4 deletions(-) diff --git a/drivers/gpu/drm/radeon/evergreen.c b/drivers/gpu/drm/radeon/evergreen.c index b5bc7d038fd9..9c990c3f877f 100644 --- a/drivers/gpu/drm/radeon/evergreen.c +++ b/drivers/gpu/drm/radeon/evergreen.c @@ -401,16 +401,28 @@ static u32 evergreen_line_buffer_adjust(struct radeon_device *rdev, case 0: case 4: default: - return 3840 * 2; + if (ASIC_IS_DCE5(rdev)) + return 4096 * 2; + else + return 3840 * 2; case 1: case 5: - return 5760 * 2; + if (ASIC_IS_DCE5(rdev)) + return 6144 * 2; + else + return 5760 * 2; case 2: case 6: - return 7680 * 2; + if (ASIC_IS_DCE5(rdev)) + return 8192 * 2; + else + return 7680 * 2; case 3: case 7: - return 1920 * 2; + if (ASIC_IS_DCE5(rdev)) + return 2048 * 2; + else + return 1920 * 2; } } -- cgit v1.2.3 From 881dd74ea731067f8fc81608e3a8914fdd66bc6d Mon Sep 17 00:00:00 2001 From: Alex Deucher Date: Thu, 6 Jan 2011 21:19:14 -0500 Subject: drm/radeon/kms: DCE5 supports 16k display surfaces Signed-off-by: Alex Deucher Signed-off-by: Dave Airlie --- drivers/gpu/drm/radeon/radeon_display.c | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/drivers/gpu/drm/radeon/radeon_display.c b/drivers/gpu/drm/radeon/radeon_display.c index acebbc76c2f9..30d867c85825 100644 --- a/drivers/gpu/drm/radeon/radeon_display.c +++ b/drivers/gpu/drm/radeon/radeon_display.c @@ -1119,7 +1119,10 @@ int radeon_modeset_init(struct radeon_device *rdev) rdev->ddev->mode_config.funcs = (void *)&radeon_mode_funcs; - if (ASIC_IS_AVIVO(rdev)) { + if (ASIC_IS_DCE5(rdev)) { + rdev->ddev->mode_config.max_width = 16384; + rdev->ddev->mode_config.max_height = 16384; + } else if (ASIC_IS_AVIVO(rdev)) { rdev->ddev->mode_config.max_width = 8192; rdev->ddev->mode_config.max_height = 8192; } else { -- cgit v1.2.3 From f82b3ddc5fac044a28ab841bfd4ae48e2e43a21b Mon Sep 17 00:00:00 2001 From: Alex Deucher Date: Thu, 6 Jan 2011 21:19:15 -0500 Subject: drm/radeon/kms: DCE5 atom SetPixelClock updates Signed-off-by: Alex Deucher Signed-off-by: Dave Airlie --- drivers/gpu/drm/radeon/atombios_crtc.c | 31 ++++++++++++++++++++++++++++--- drivers/gpu/drm/radeon/radeon_atombios.c | 25 +++++++++++++++++++------ 2 files changed, 47 insertions(+), 9 deletions(-) diff --git a/drivers/gpu/drm/radeon/atombios_crtc.c b/drivers/gpu/drm/radeon/atombios_crtc.c index 9fbabaa6ee44..b3e5e7549008 100644 --- a/drivers/gpu/drm/radeon/atombios_crtc.c +++ b/drivers/gpu/drm/radeon/atombios_crtc.c @@ -673,9 +673,14 @@ union set_pixel_clock { PIXEL_CLOCK_PARAMETERS_V2 v2; PIXEL_CLOCK_PARAMETERS_V3 v3; PIXEL_CLOCK_PARAMETERS_V5 v5; + PIXEL_CLOCK_PARAMETERS_V6 v6; }; -static void atombios_crtc_set_dcpll(struct drm_crtc *crtc) +/* on DCE5, make sure the voltage is high enough to support the + * required disp clk. + */ +static void atombios_crtc_set_dcpll(struct drm_crtc *crtc, + u32 dispclk) { struct drm_device *dev = crtc->dev; struct radeon_device *rdev = dev->dev_private; @@ -698,9 +703,16 @@ static void atombios_crtc_set_dcpll(struct drm_crtc *crtc) * SetPixelClock provides the dividers */ args.v5.ucCRTC = ATOM_CRTC_INVALID; - args.v5.usPixelClock = rdev->clock.default_dispclk; + args.v5.usPixelClock = dispclk; args.v5.ucPpll = ATOM_DCPLL; break; + case 6: + /* if the default dcpll clock is specified, + * SetPixelClock provides the dividers + */ + args.v6.ulDispEngClkFreq = dispclk; + args.v6.ucPpll = ATOM_DCPLL; + break; default: DRM_ERROR("Unknown table version %d %d\n", frev, crev); return; @@ -784,6 +796,18 @@ static void atombios_crtc_program_pll(struct drm_crtc *crtc, args.v5.ucEncoderMode = encoder_mode; args.v5.ucPpll = pll_id; break; + case 6: + args.v6.ulCrtcPclkFreq.ucCRTC = crtc_id; + args.v6.ulCrtcPclkFreq.ulPixelClock = cpu_to_le32(clock / 10); + args.v6.ucRefDiv = ref_div; + args.v6.usFbDiv = cpu_to_le16(fb_div); + args.v6.ulFbDivDecFrac = cpu_to_le32(frac_fb_div * 100000); + args.v6.ucPostDiv = post_div; + args.v6.ucMiscInfo = 0; /* HDMI depth, etc. */ + args.v6.ucTransmitterID = encoder_id; + args.v6.ucEncoderMode = encoder_mode; + args.v6.ucPpll = pll_id; + break; default: DRM_ERROR("Unknown table version %d %d\n", frev, crev); return; @@ -1377,7 +1401,8 @@ int atombios_crtc_mode_set(struct drm_crtc *crtc, rdev->clock.default_dispclk); if (ss_enabled) atombios_crtc_program_ss(crtc, ATOM_DISABLE, ATOM_DCPLL, &ss); - atombios_crtc_set_dcpll(crtc); + /* XXX: DCE5, make sure voltage, dispclk is high enough */ + atombios_crtc_set_dcpll(crtc, rdev->clock.default_dispclk); if (ss_enabled) atombios_crtc_program_ss(crtc, ATOM_ENABLE, ATOM_DCPLL, &ss); } diff --git a/drivers/gpu/drm/radeon/radeon_atombios.c b/drivers/gpu/drm/radeon/radeon_atombios.c index e4f7e3e82a50..11573d085e61 100644 --- a/drivers/gpu/drm/radeon/radeon_atombios.c +++ b/drivers/gpu/drm/radeon/radeon_atombios.c @@ -1086,6 +1086,7 @@ union firmware_info { ATOM_FIRMWARE_INFO_V1_3 info_13; ATOM_FIRMWARE_INFO_V1_4 info_14; ATOM_FIRMWARE_INFO_V2_1 info_21; + ATOM_FIRMWARE_INFO_V2_2 info_22; }; bool radeon_atom_get_clock_info(struct drm_device *dev) @@ -1160,8 +1161,12 @@ bool radeon_atom_get_clock_info(struct drm_device *dev) *p2pll = *p1pll; /* system clock */ - spll->reference_freq = - le16_to_cpu(firmware_info->info.usReferenceClock); + if (ASIC_IS_DCE4(rdev)) + spll->reference_freq = + le16_to_cpu(firmware_info->info_21.usCoreReferenceClock); + else + spll->reference_freq = + le16_to_cpu(firmware_info->info.usReferenceClock); spll->reference_div = 0; spll->pll_out_min = @@ -1183,8 +1188,12 @@ bool radeon_atom_get_clock_info(struct drm_device *dev) le16_to_cpu(firmware_info->info.usMaxEngineClockPLL_Input); /* memory clock */ - mpll->reference_freq = - le16_to_cpu(firmware_info->info.usReferenceClock); + if (ASIC_IS_DCE4(rdev)) + mpll->reference_freq = + le16_to_cpu(firmware_info->info_21.usMemoryReferenceClock); + else + mpll->reference_freq = + le16_to_cpu(firmware_info->info.usReferenceClock); mpll->reference_div = 0; mpll->pll_out_min = @@ -1213,8 +1222,12 @@ bool radeon_atom_get_clock_info(struct drm_device *dev) if (ASIC_IS_DCE4(rdev)) { rdev->clock.default_dispclk = le32_to_cpu(firmware_info->info_21.ulDefaultDispEngineClkFreq); - if (rdev->clock.default_dispclk == 0) - rdev->clock.default_dispclk = 60000; /* 600 Mhz */ + if (rdev->clock.default_dispclk == 0) { + if (ASIC_IS_DCE5(rdev)) + rdev->clock.default_dispclk = 54000; /* 540 Mhz */ + else + rdev->clock.default_dispclk = 60000; /* 600 Mhz */ + } rdev->clock.dp_extclk = le16_to_cpu(firmware_info->info_21.usUniphyDPModeExtClkFreq); } -- cgit v1.2.3 From a572eaa3726968555451ba301ff8c61e90e8c278 Mon Sep 17 00:00:00 2001 From: Alex Deucher Date: Thu, 6 Jan 2011 21:19:16 -0500 Subject: drm/radeon/kms: DCE5 atom spread spectrum updates Signed-off-by: Alex Deucher Signed-off-by: Dave Airlie --- drivers/gpu/drm/radeon/atombios_crtc.c | 26 +++++++++++++++++++++++++- 1 file changed, 25 insertions(+), 1 deletion(-) diff --git a/drivers/gpu/drm/radeon/atombios_crtc.c b/drivers/gpu/drm/radeon/atombios_crtc.c index b3e5e7549008..b0ab185b86f6 100644 --- a/drivers/gpu/drm/radeon/atombios_crtc.c +++ b/drivers/gpu/drm/radeon/atombios_crtc.c @@ -403,6 +403,7 @@ union atom_enable_ss { ENABLE_LVDS_SS_PARAMETERS_V2 lvds_ss_2; ENABLE_SPREAD_SPECTRUM_ON_PPLL_PS_ALLOCATION v1; ENABLE_SPREAD_SPECTRUM_ON_PPLL_V2 v2; + ENABLE_SPREAD_SPECTRUM_ON_PPLL_V3 v3; }; static void atombios_crtc_program_ss(struct drm_crtc *crtc, @@ -417,7 +418,30 @@ static void atombios_crtc_program_ss(struct drm_crtc *crtc, memset(&args, 0, sizeof(args)); - if (ASIC_IS_DCE4(rdev)) { + if (ASIC_IS_DCE5(rdev)) { + args.v3.usSpreadSpectrumAmountFrac = 0; + args.v3.ucSpreadSpectrumType = ss->type; + switch (pll_id) { + case ATOM_PPLL1: + args.v3.ucSpreadSpectrumType |= ATOM_PPLL_SS_TYPE_V3_P1PLL; + args.v3.usSpreadSpectrumAmount = ss->amount; + args.v3.usSpreadSpectrumStep = ss->step; + break; + case ATOM_PPLL2: + args.v3.ucSpreadSpectrumType |= ATOM_PPLL_SS_TYPE_V3_P2PLL; + args.v3.usSpreadSpectrumAmount = ss->amount; + args.v3.usSpreadSpectrumStep = ss->step; + break; + case ATOM_DCPLL: + args.v3.ucSpreadSpectrumType |= ATOM_PPLL_SS_TYPE_V3_DCPLL; + args.v3.usSpreadSpectrumAmount = 0; + args.v3.usSpreadSpectrumStep = 0; + break; + case ATOM_PPLL_INVALID: + return; + } + args.v2.ucEnable = enable; + } else if (ASIC_IS_DCE4(rdev)) { args.v2.usSpreadSpectrumPercentage = cpu_to_le16(ss->percentage); args.v2.ucSpreadSpectrumType = ss->type; switch (pll_id) { -- cgit v1.2.3 From a001182af807e2e0e1eb497dc5418d1220406d9b Mon Sep 17 00:00:00 2001 From: Alex Deucher Date: Thu, 6 Jan 2011 21:19:17 -0500 Subject: drm/radeon/kms: DCE5 atom transmitter control updates Signed-off-by: Alex Deucher Signed-off-by: Dave Airlie --- drivers/gpu/drm/radeon/radeon_encoders.c | 19 ++++++++++++++----- 1 file changed, 14 insertions(+), 5 deletions(-) diff --git a/drivers/gpu/drm/radeon/radeon_encoders.c b/drivers/gpu/drm/radeon/radeon_encoders.c index c83ad890e4d2..76835b0397ab 100644 --- a/drivers/gpu/drm/radeon/radeon_encoders.c +++ b/drivers/gpu/drm/radeon/radeon_encoders.c @@ -712,7 +712,7 @@ atombios_get_encoder_mode(struct drm_encoder *encoder) * - 2 DIG encoder blocks. * DIG1/2 can drive UNIPHY0/1/2 link A or link B * - * DCE 4.0 + * DCE 4.0/5.0 * - 3 DIG transmitter blocks UNIPHY0/1/2 (links A and B). * Supports up to 6 digital outputs * - 6 DIG encoder blocks. @@ -829,6 +829,7 @@ union dig_transmitter_control { DIG_TRANSMITTER_CONTROL_PS_ALLOCATION v1; DIG_TRANSMITTER_CONTROL_PARAMETERS_V2 v2; DIG_TRANSMITTER_CONTROL_PARAMETERS_V3 v3; + DIG_TRANSMITTER_CONTROL_PARAMETERS_V4 v4; }; void @@ -923,10 +924,18 @@ atombios_dig_transmitter_setup(struct drm_encoder *encoder, int action, uint8_t struct radeon_crtc *radeon_crtc = to_radeon_crtc(encoder->crtc); pll_id = radeon_crtc->pll_id; } - if (is_dp && rdev->clock.dp_extclk) - args.v3.acConfig.ucRefClkSource = 2; /* external src */ - else - args.v3.acConfig.ucRefClkSource = pll_id; + + if (ASIC_IS_DCE5(rdev)) { + if (is_dp && rdev->clock.dp_extclk) + args.v4.acConfig.ucRefClkSource = 3; /* external src */ + else + args.v4.acConfig.ucRefClkSource = pll_id; + } else { + if (is_dp && rdev->clock.dp_extclk) + args.v3.acConfig.ucRefClkSource = 2; /* external src */ + else + args.v3.acConfig.ucRefClkSource = pll_id; + } switch (radeon_encoder->encoder_id) { case ENCODER_OBJECT_ID_INTERNAL_UNIPHY: -- cgit v1.2.3 From badbb57b93adda990b4e2420ddfdf834504a217e Mon Sep 17 00:00:00 2001 From: Alex Deucher Date: Thu, 6 Jan 2011 21:19:18 -0500 Subject: drm/radeon/kms: DCE5 atom dig encoder updates Signed-off-by: Alex Deucher Signed-off-by: Dave Airlie --- drivers/gpu/drm/radeon/radeon_encoders.c | 31 ++++++++++++++++++++++++++----- 1 file changed, 26 insertions(+), 5 deletions(-) diff --git a/drivers/gpu/drm/radeon/radeon_encoders.c b/drivers/gpu/drm/radeon/radeon_encoders.c index 76835b0397ab..989ba26135b8 100644 --- a/drivers/gpu/drm/radeon/radeon_encoders.c +++ b/drivers/gpu/drm/radeon/radeon_encoders.c @@ -743,6 +743,7 @@ union dig_encoder_control { DIG_ENCODER_CONTROL_PS_ALLOCATION v1; DIG_ENCODER_CONTROL_PARAMETERS_V2 v2; DIG_ENCODER_CONTROL_PARAMETERS_V3 v3; + DIG_ENCODER_CONTROL_PARAMETERS_V4 v4; }; void @@ -758,6 +759,7 @@ atombios_dig_encoder_setup(struct drm_encoder *encoder, int action) uint8_t frev, crev; int dp_clock = 0; int dp_lane_count = 0; + int hpd_id = RADEON_HPD_NONE; if (connector) { struct radeon_connector *radeon_connector = to_radeon_connector(connector); @@ -766,6 +768,7 @@ atombios_dig_encoder_setup(struct drm_encoder *encoder, int action) dp_clock = dig_connector->dp_clock; dp_lane_count = dig_connector->dp_lane_count; + hpd_id = radeon_connector->hpd.hpd; } /* no dig encoder assigned */ @@ -790,19 +793,36 @@ atombios_dig_encoder_setup(struct drm_encoder *encoder, int action) args.v1.usPixelClock = cpu_to_le16(radeon_encoder->pixel_clock / 10); args.v1.ucEncoderMode = atombios_get_encoder_mode(encoder); - if (args.v1.ucEncoderMode == ATOM_ENCODER_MODE_DP) { - if (dp_clock == 270000) - args.v1.ucConfig |= ATOM_ENCODER_CONFIG_DPLINKRATE_2_70GHZ; + if ((args.v1.ucEncoderMode == ATOM_ENCODER_MODE_DP) || + (args.v1.ucEncoderMode == ATOM_ENCODER_MODE_DP_MST)) args.v1.ucLaneNum = dp_lane_count; - } else if (radeon_encoder->pixel_clock > 165000) + else if (radeon_encoder->pixel_clock > 165000) args.v1.ucLaneNum = 8; else args.v1.ucLaneNum = 4; - if (ASIC_IS_DCE4(rdev)) { + if (ASIC_IS_DCE5(rdev)) { + if ((args.v1.ucEncoderMode == ATOM_ENCODER_MODE_DP) || + (args.v1.ucEncoderMode == ATOM_ENCODER_MODE_DP_MST)) { + if (dp_clock == 270000) + args.v1.ucConfig |= ATOM_ENCODER_CONFIG_V4_DPLINKRATE_2_70GHZ; + else if (dp_clock == 540000) + args.v1.ucConfig |= ATOM_ENCODER_CONFIG_V4_DPLINKRATE_5_40GHZ; + } + args.v4.acConfig.ucDigSel = dig->dig_encoder; + args.v4.ucBitPerColor = PANEL_8BIT_PER_COLOR; + if (hpd_id == RADEON_HPD_NONE) + args.v4.ucHPD_ID = 0; + else + args.v4.ucHPD_ID = hpd_id + 1; + } else if (ASIC_IS_DCE4(rdev)) { + if ((args.v1.ucEncoderMode == ATOM_ENCODER_MODE_DP) && (dp_clock == 270000)) + args.v1.ucConfig |= ATOM_ENCODER_CONFIG_V3_DPLINKRATE_2_70GHZ; args.v3.acConfig.ucDigSel = dig->dig_encoder; args.v3.ucBitPerColor = PANEL_8BIT_PER_COLOR; } else { + if ((args.v1.ucEncoderMode == ATOM_ENCODER_MODE_DP) && (dp_clock == 270000)) + args.v1.ucConfig |= ATOM_ENCODER_CONFIG_DPLINKRATE_2_70GHZ; switch (radeon_encoder->encoder_id) { case ENCODER_OBJECT_ID_INTERNAL_UNIPHY: args.v1.ucConfig = ATOM_ENCODER_CONFIG_V2_TRANSMITTER1; @@ -1538,6 +1558,7 @@ static int radeon_atom_pick_dig_encoder(struct drm_encoder *encoder) struct radeon_encoder_atom_dig *dig; uint32_t dig_enc_in_use = 0; + /* DCE4/5 */ if (ASIC_IS_DCE4(rdev)) { dig = radeon_encoder->enc_priv; if (ASIC_IS_DCE41(rdev)) { -- cgit v1.2.3 From 69c74525c26a1b71116d9bd3a136bf1314a3e8cd Mon Sep 17 00:00:00 2001 From: Alex Deucher Date: Thu, 6 Jan 2011 21:19:19 -0500 Subject: drm/radeon/kms: dac dpms updates for DCE5 The DAC1OutputControl table was removed for DCE5. DAC1EncoderControl now handles everything. Signed-off-by: Alex Deucher Signed-off-by: Dave Airlie --- drivers/gpu/drm/radeon/radeon_encoders.c | 28 ++++++++++++++++++++++------ 1 file changed, 22 insertions(+), 6 deletions(-) diff --git a/drivers/gpu/drm/radeon/radeon_encoders.c b/drivers/gpu/drm/radeon/radeon_encoders.c index 989ba26135b8..2e1d720fca06 100644 --- a/drivers/gpu/drm/radeon/radeon_encoders.c +++ b/drivers/gpu/drm/radeon/radeon_encoders.c @@ -1227,6 +1227,7 @@ radeon_atom_encoder_dpms(struct drm_encoder *encoder, int mode) DISPLAY_DEVICE_OUTPUT_CONTROL_PS_ALLOCATION args; int index = 0; bool is_dig = false; + bool is_dce5_dac = false; memset(&args, 0, sizeof(args)); @@ -1265,12 +1266,16 @@ radeon_atom_encoder_dpms(struct drm_encoder *encoder, int mode) break; case ENCODER_OBJECT_ID_INTERNAL_DAC1: case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC1: - if (radeon_encoder->active_device & (ATOM_DEVICE_TV_SUPPORT)) - index = GetIndexIntoMasterTable(COMMAND, TV1OutputControl); - else if (radeon_encoder->active_device & (ATOM_DEVICE_CV_SUPPORT)) - index = GetIndexIntoMasterTable(COMMAND, CV1OutputControl); - else - index = GetIndexIntoMasterTable(COMMAND, DAC1OutputControl); + if (ASIC_IS_DCE5(rdev)) + is_dce5_dac = true; + else { + if (radeon_encoder->active_device & (ATOM_DEVICE_TV_SUPPORT)) + index = GetIndexIntoMasterTable(COMMAND, TV1OutputControl); + else if (radeon_encoder->active_device & (ATOM_DEVICE_CV_SUPPORT)) + index = GetIndexIntoMasterTable(COMMAND, CV1OutputControl); + else + index = GetIndexIntoMasterTable(COMMAND, DAC1OutputControl); + } break; case ENCODER_OBJECT_ID_INTERNAL_DAC2: case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC2: @@ -1329,6 +1334,17 @@ radeon_atom_encoder_dpms(struct drm_encoder *encoder, int mode) atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_LCD_BLOFF, 0, 0); break; } + } else if (is_dce5_dac) { + switch (mode) { + case DRM_MODE_DPMS_ON: + atombios_dac_setup(encoder, ATOM_ENABLE); + break; + case DRM_MODE_DPMS_STANDBY: + case DRM_MODE_DPMS_SUSPEND: + case DRM_MODE_DPMS_OFF: + atombios_dac_setup(encoder, ATOM_DISABLE); + break; + } } else { switch (mode) { case DRM_MODE_DPMS_ON: -- cgit v1.2.3 From d07f4e83506712f31ad661415ea28d536a70a939 Mon Sep 17 00:00:00 2001 From: Alex Deucher Date: Thu, 6 Jan 2011 21:19:20 -0500 Subject: drm/radeon/kms: dvo dpms updates for DCE5 The DVOOutputControl table was removed for DCE5. DVOEncoderControl now handles everything. Signed-off-by: Alex Deucher Signed-off-by: Dave Airlie --- drivers/gpu/drm/radeon/radeon_encoders.c | 16 +++++++++++++++- 1 file changed, 15 insertions(+), 1 deletion(-) diff --git a/drivers/gpu/drm/radeon/radeon_encoders.c b/drivers/gpu/drm/radeon/radeon_encoders.c index 2e1d720fca06..3866c648423b 100644 --- a/drivers/gpu/drm/radeon/radeon_encoders.c +++ b/drivers/gpu/drm/radeon/radeon_encoders.c @@ -1228,6 +1228,7 @@ radeon_atom_encoder_dpms(struct drm_encoder *encoder, int mode) int index = 0; bool is_dig = false; bool is_dce5_dac = false; + bool is_dce5_dvo = false; memset(&args, 0, sizeof(args)); @@ -1250,7 +1251,9 @@ radeon_atom_encoder_dpms(struct drm_encoder *encoder, int mode) index = GetIndexIntoMasterTable(COMMAND, DVOOutputControl); break; case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DVO1: - if (ASIC_IS_DCE3(rdev)) + if (ASIC_IS_DCE5(rdev)) + is_dce5_dvo = true; + else if (ASIC_IS_DCE3(rdev)) is_dig = true; else index = GetIndexIntoMasterTable(COMMAND, DVOOutputControl); @@ -1345,6 +1348,17 @@ radeon_atom_encoder_dpms(struct drm_encoder *encoder, int mode) atombios_dac_setup(encoder, ATOM_DISABLE); break; } + } else if (is_dce5_dvo) { + switch (mode) { + case DRM_MODE_DPMS_ON: + atombios_dvo_setup(encoder, ATOM_ENABLE); + break; + case DRM_MODE_DPMS_STANDBY: + case DRM_MODE_DPMS_SUSPEND: + case DRM_MODE_DPMS_OFF: + atombios_dvo_setup(encoder, ATOM_DISABLE); + break; + } } else { switch (mode) { case DRM_MODE_DPMS_ON: -- cgit v1.2.3 From 36868bda88b92ce8a9aa8b3ee2e0d1e0de09cc19 Mon Sep 17 00:00:00 2001 From: Alex Deucher Date: Thu, 6 Jan 2011 21:19:21 -0500 Subject: drm/radeon/kms: parse DCE5 encoder caps when setting up encoders Needed to tell which DIG encoders are HBR2 capable for DP 1.2. Signed-off-by: Alex Deucher Signed-off-by: Dave Airlie --- drivers/gpu/drm/radeon/radeon_atombios.c | 44 +++++++++++++++++++++++++------- drivers/gpu/drm/radeon/radeon_encoders.c | 6 ++++- drivers/gpu/drm/radeon/radeon_mode.h | 1 + 3 files changed, 41 insertions(+), 10 deletions(-) diff --git a/drivers/gpu/drm/radeon/radeon_atombios.c b/drivers/gpu/drm/radeon/radeon_atombios.c index 11573d085e61..a2dfe257079c 100644 --- a/drivers/gpu/drm/radeon/radeon_atombios.c +++ b/drivers/gpu/drm/radeon/radeon_atombios.c @@ -37,7 +37,7 @@ radeon_get_encoder_enum(struct drm_device *dev, uint32_t supported_device, extern void radeon_link_encoder_connector(struct drm_device *dev); extern void radeon_add_atom_encoder(struct drm_device *dev, uint32_t encoder_enum, - uint32_t supported_device); + uint32_t supported_device, u16 caps); /* from radeon_connector.c */ extern void @@ -537,6 +537,7 @@ bool radeon_get_atom_connector_info_from_object_table(struct drm_device *dev) u16 size, data_offset; u8 frev, crev; ATOM_CONNECTOR_OBJECT_TABLE *con_obj; + ATOM_ENCODER_OBJECT_TABLE *enc_obj; ATOM_OBJECT_TABLE *router_obj; ATOM_DISPLAY_OBJECT_PATH_TABLE *path_obj; ATOM_OBJECT_HEADER *obj_header; @@ -561,6 +562,9 @@ bool radeon_get_atom_connector_info_from_object_table(struct drm_device *dev) con_obj = (ATOM_CONNECTOR_OBJECT_TABLE *) (ctx->bios + data_offset + le16_to_cpu(obj_header->usConnectorObjectTableOffset)); + enc_obj = (ATOM_ENCODER_OBJECT_TABLE *) + (ctx->bios + data_offset + + le16_to_cpu(obj_header->usEncoderObjectTableOffset)); router_obj = (ATOM_OBJECT_TABLE *) (ctx->bios + data_offset + le16_to_cpu(obj_header->usRouterObjectTableOffset)); @@ -666,14 +670,35 @@ bool radeon_get_atom_connector_info_from_object_table(struct drm_device *dev) OBJECT_TYPE_MASK) >> OBJECT_TYPE_SHIFT; if (grph_obj_type == GRAPH_OBJECT_TYPE_ENCODER) { - u16 encoder_obj = le16_to_cpu(path->usGraphicObjIds[j]); - - radeon_add_atom_encoder(dev, - encoder_obj, - le16_to_cpu - (path-> - usDeviceTag)); + for (k = 0; k < enc_obj->ucNumberOfObjects; k++) { + u16 encoder_obj = le16_to_cpu(enc_obj->asObjects[k].usObjectID); + if (le16_to_cpu(path->usGraphicObjIds[j]) == encoder_obj) { + ATOM_COMMON_RECORD_HEADER *record = (ATOM_COMMON_RECORD_HEADER *) + (ctx->bios + data_offset + + le16_to_cpu(enc_obj->asObjects[k].usRecordOffset)); + ATOM_ENCODER_CAP_RECORD *cap_record; + u16 caps = 0; + while (record->ucRecordType > 0 && + record->ucRecordType <= ATOM_MAX_OBJECT_RECORD_NUMBER) { + switch (record->ucRecordType) { + case ATOM_ENCODER_CAP_RECORD_TYPE: + cap_record =(ATOM_ENCODER_CAP_RECORD *) + record; + caps = le16_to_cpu(cap_record->usEncoderCap); + break; + } + record = (ATOM_COMMON_RECORD_HEADER *) + ((char *)record + record->ucRecordSize); + } + radeon_add_atom_encoder(dev, + encoder_obj, + le16_to_cpu + (path-> + usDeviceTag), + caps); + } + } } else if (grph_obj_type == GRAPH_OBJECT_TYPE_ROUTER) { for (k = 0; k < router_obj->ucNumberOfObjects; k++) { u16 router_obj_id = le16_to_cpu(router_obj->asObjects[k].usObjectID); @@ -1007,7 +1032,8 @@ bool radeon_get_atom_connector_info_from_supported_devices_table(struct radeon_get_encoder_enum(dev, (1 << i), dac), - (1 << i)); + (1 << i), + 0); else radeon_add_legacy_encoder(dev, radeon_get_encoder_enum(dev, diff --git a/drivers/gpu/drm/radeon/radeon_encoders.c b/drivers/gpu/drm/radeon/radeon_encoders.c index 3866c648423b..e7a948f60587 100644 --- a/drivers/gpu/drm/radeon/radeon_encoders.c +++ b/drivers/gpu/drm/radeon/radeon_encoders.c @@ -2046,7 +2046,10 @@ radeon_atombios_set_dig_info(struct radeon_encoder *radeon_encoder) } void -radeon_add_atom_encoder(struct drm_device *dev, uint32_t encoder_enum, uint32_t supported_device) +radeon_add_atom_encoder(struct drm_device *dev, + uint32_t encoder_enum, + uint32_t supported_device, + u16 caps) { struct radeon_device *rdev = dev->dev_private; struct drm_encoder *encoder; @@ -2089,6 +2092,7 @@ radeon_add_atom_encoder(struct drm_device *dev, uint32_t encoder_enum, uint32_t radeon_encoder->rmx_type = RMX_OFF; radeon_encoder->underscan_type = UNDERSCAN_OFF; radeon_encoder->is_ext_encoder = false; + radeon_encoder->caps = caps; switch (radeon_encoder->encoder_id) { case ENCODER_OBJECT_ID_INTERNAL_LVDS: diff --git a/drivers/gpu/drm/radeon/radeon_mode.h b/drivers/gpu/drm/radeon/radeon_mode.h index fd185f783a31..12bdeab91c86 100644 --- a/drivers/gpu/drm/radeon/radeon_mode.h +++ b/drivers/gpu/drm/radeon/radeon_mode.h @@ -379,6 +379,7 @@ struct radeon_encoder { int hdmi_audio_workaround; int hdmi_buffer_status; bool is_ext_encoder; + u16 caps; }; struct radeon_connector_atom_dig { -- cgit v1.2.3 From 4fddba1fd9df94caaa03956bf36e1a887a1c92a5 Mon Sep 17 00:00:00 2001 From: Alex Deucher Date: Thu, 6 Jan 2011 21:19:22 -0500 Subject: drm/radeon/kms: handle NI thermal controller Signed-off-by: Alex Deucher Signed-off-by: Dave Airlie --- drivers/gpu/drm/radeon/radeon.h | 1 + drivers/gpu/drm/radeon/radeon_atombios.c | 6 ++++++ drivers/gpu/drm/radeon/radeon_pm.c | 1 + 3 files changed, 8 insertions(+) diff --git a/drivers/gpu/drm/radeon/radeon.h b/drivers/gpu/drm/radeon/radeon.h index 73730fd1d0af..5598f9559a64 100644 --- a/drivers/gpu/drm/radeon/radeon.h +++ b/drivers/gpu/drm/radeon/radeon.h @@ -739,6 +739,7 @@ enum radeon_int_thermal_type { THERMAL_TYPE_RV770, THERMAL_TYPE_EVERGREEN, THERMAL_TYPE_SUMO, + THERMAL_TYPE_NI, }; struct radeon_voltage { diff --git a/drivers/gpu/drm/radeon/radeon_atombios.c b/drivers/gpu/drm/radeon/radeon_atombios.c index a2dfe257079c..03f1c9a10ba4 100644 --- a/drivers/gpu/drm/radeon/radeon_atombios.c +++ b/drivers/gpu/drm/radeon/radeon_atombios.c @@ -1891,6 +1891,7 @@ static const char *pp_lib_thermal_controller_names[] = { "Evergreen", "emc2103", "Sumo", + "Northern Islands", }; union power_info { @@ -2154,6 +2155,11 @@ static void radeon_atombios_add_pplib_thermal_controller(struct radeon_device *r (controller->ucFanParameters & ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with"); rdev->pm.int_thermal_type = THERMAL_TYPE_SUMO; + } else if (controller->ucType == ATOM_PP_THERMALCONTROLLER_NISLANDS) { + DRM_INFO("Internal thermal controller %s fan control\n", + (controller->ucFanParameters & + ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with"); + rdev->pm.int_thermal_type = THERMAL_TYPE_NI; } else if ((controller->ucType == ATOM_PP_THERMALCONTROLLER_EXTERNAL_GPIO) || (controller->ucType == diff --git a/drivers/gpu/drm/radeon/radeon_pm.c b/drivers/gpu/drm/radeon/radeon_pm.c index 0afd26ccccfa..7ad2e1a6991d 100644 --- a/drivers/gpu/drm/radeon/radeon_pm.c +++ b/drivers/gpu/drm/radeon/radeon_pm.c @@ -440,6 +440,7 @@ static ssize_t radeon_hwmon_show_temp(struct device *dev, temp = rv770_get_temp(rdev); break; case THERMAL_TYPE_EVERGREEN: + case THERMAL_TYPE_NI: temp = evergreen_get_temp(rdev); break; case THERMAL_TYPE_SUMO: -- cgit v1.2.3 From c901bcddd09560b78f0a5f8081b86745cc553edf Mon Sep 17 00:00:00 2001 From: Alex Deucher Date: Thu, 6 Jan 2011 21:19:23 -0500 Subject: drm/radeon/kms: add disabled vbios accessor for NI asics Some systems disable the vbios on secondary cards or cards that have been posted. This code re-enabled the vbios so the driver can load it. Signed-off-by: Alex Deucher Signed-off-by: Dave Airlie --- drivers/gpu/drm/radeon/radeon_bios.c | 41 ++++++++++++++++++++++++++++++++++++ 1 file changed, 41 insertions(+) diff --git a/drivers/gpu/drm/radeon/radeon_bios.c b/drivers/gpu/drm/radeon/radeon_bios.c index 8f2c7b50dcf5..1aba85cad1a8 100644 --- a/drivers/gpu/drm/radeon/radeon_bios.c +++ b/drivers/gpu/drm/radeon/radeon_bios.c @@ -131,6 +131,45 @@ static bool radeon_atrm_get_bios(struct radeon_device *rdev) return true; } +static bool ni_read_disabled_bios(struct radeon_device *rdev) +{ + u32 bus_cntl; + u32 d1vga_control; + u32 d2vga_control; + u32 vga_render_control; + u32 rom_cntl; + bool r; + + bus_cntl = RREG32(R600_BUS_CNTL); + d1vga_control = RREG32(AVIVO_D1VGA_CONTROL); + d2vga_control = RREG32(AVIVO_D2VGA_CONTROL); + vga_render_control = RREG32(AVIVO_VGA_RENDER_CONTROL); + rom_cntl = RREG32(R600_ROM_CNTL); + + /* enable the rom */ + WREG32(R600_BUS_CNTL, (bus_cntl & ~R600_BIOS_ROM_DIS)); + /* Disable VGA mode */ + WREG32(AVIVO_D1VGA_CONTROL, + (d1vga_control & ~(AVIVO_DVGA_CONTROL_MODE_ENABLE | + AVIVO_DVGA_CONTROL_TIMING_SELECT))); + WREG32(AVIVO_D2VGA_CONTROL, + (d2vga_control & ~(AVIVO_DVGA_CONTROL_MODE_ENABLE | + AVIVO_DVGA_CONTROL_TIMING_SELECT))); + WREG32(AVIVO_VGA_RENDER_CONTROL, + (vga_render_control & ~AVIVO_VGA_VSTATUS_CNTL_MASK)); + WREG32(R600_ROM_CNTL, rom_cntl | R600_SCK_OVERWRITE); + + r = radeon_read_bios(rdev); + + /* restore regs */ + WREG32(R600_BUS_CNTL, bus_cntl); + WREG32(AVIVO_D1VGA_CONTROL, d1vga_control); + WREG32(AVIVO_D2VGA_CONTROL, d2vga_control); + WREG32(AVIVO_VGA_RENDER_CONTROL, vga_render_control); + WREG32(R600_ROM_CNTL, rom_cntl); + return r; +} + static bool r700_read_disabled_bios(struct radeon_device *rdev) { uint32_t viph_control; @@ -416,6 +455,8 @@ static bool radeon_read_disabled_bios(struct radeon_device *rdev) { if (rdev->flags & RADEON_IS_IGP) return igp_read_bios_from_vram(rdev); + else if (rdev->family >= CHIP_BARTS) + return ni_read_disabled_bios(rdev); else if (rdev->family >= CHIP_RV770) return r700_read_disabled_bios(rdev); else if (rdev->family >= CHIP_R600) -- cgit v1.2.3 From adb68fa2f79fbfb49a920c1b69d607a3ab4f985b Mon Sep 17 00:00:00 2001 From: Alex Deucher Date: Thu, 6 Jan 2011 21:19:24 -0500 Subject: drm/radeon/kms: fill gpu init for NI asics Signed-off-by: Alex Deucher Signed-off-by: Dave Airlie --- drivers/gpu/drm/radeon/evergreen.c | 71 ++++++++++++++++++++++++++++++++++++++ 1 file changed, 71 insertions(+) diff --git a/drivers/gpu/drm/radeon/evergreen.c b/drivers/gpu/drm/radeon/evergreen.c index 9c990c3f877f..6a73867cf25c 100644 --- a/drivers/gpu/drm/radeon/evergreen.c +++ b/drivers/gpu/drm/radeon/evergreen.c @@ -1384,11 +1384,14 @@ static u32 evergreen_get_tile_pipe_to_backend_map(struct radeon_device *rdev, case CHIP_CEDAR: case CHIP_REDWOOD: case CHIP_PALM: + case CHIP_TURKS: + case CHIP_CAICOS: force_no_swizzle = false; break; case CHIP_CYPRESS: case CHIP_HEMLOCK: case CHIP_JUNIPER: + case CHIP_BARTS: default: force_no_swizzle = true; break; @@ -1502,6 +1505,7 @@ static void evergreen_program_channel_remap(struct radeon_device *rdev) switch (rdev->family) { case CHIP_HEMLOCK: case CHIP_CYPRESS: + case CHIP_BARTS: tcp_chan_steer_lo = 0x54763210; tcp_chan_steer_hi = 0x0000ba98; break; @@ -1509,6 +1513,8 @@ static void evergreen_program_channel_remap(struct radeon_device *rdev) case CHIP_REDWOOD: case CHIP_CEDAR: case CHIP_PALM: + case CHIP_TURKS: + case CHIP_CAICOS: default: tcp_chan_steer_lo = 0x76543210; tcp_chan_steer_hi = 0x0000ba98; @@ -1648,6 +1654,69 @@ static void evergreen_gpu_init(struct radeon_device *rdev) rdev->config.evergreen.max_hw_contexts = 4; rdev->config.evergreen.sq_num_cf_insts = 1; + rdev->config.evergreen.sc_prim_fifo_size = 0x40; + rdev->config.evergreen.sc_hiz_tile_fifo_size = 0x30; + rdev->config.evergreen.sc_earlyz_tile_fifo_size = 0x130; + break; + case CHIP_BARTS: + rdev->config.evergreen.num_ses = 2; + rdev->config.evergreen.max_pipes = 4; + rdev->config.evergreen.max_tile_pipes = 8; + rdev->config.evergreen.max_simds = 7; + rdev->config.evergreen.max_backends = 4 * rdev->config.evergreen.num_ses; + rdev->config.evergreen.max_gprs = 256; + rdev->config.evergreen.max_threads = 248; + rdev->config.evergreen.max_gs_threads = 32; + rdev->config.evergreen.max_stack_entries = 512; + rdev->config.evergreen.sx_num_of_sets = 4; + rdev->config.evergreen.sx_max_export_size = 256; + rdev->config.evergreen.sx_max_export_pos_size = 64; + rdev->config.evergreen.sx_max_export_smx_size = 192; + rdev->config.evergreen.max_hw_contexts = 8; + rdev->config.evergreen.sq_num_cf_insts = 2; + + rdev->config.evergreen.sc_prim_fifo_size = 0x100; + rdev->config.evergreen.sc_hiz_tile_fifo_size = 0x30; + rdev->config.evergreen.sc_earlyz_tile_fifo_size = 0x130; + break; + case CHIP_TURKS: + rdev->config.evergreen.num_ses = 1; + rdev->config.evergreen.max_pipes = 4; + rdev->config.evergreen.max_tile_pipes = 4; + rdev->config.evergreen.max_simds = 6; + rdev->config.evergreen.max_backends = 2 * rdev->config.evergreen.num_ses; + rdev->config.evergreen.max_gprs = 256; + rdev->config.evergreen.max_threads = 248; + rdev->config.evergreen.max_gs_threads = 32; + rdev->config.evergreen.max_stack_entries = 256; + rdev->config.evergreen.sx_num_of_sets = 4; + rdev->config.evergreen.sx_max_export_size = 256; + rdev->config.evergreen.sx_max_export_pos_size = 64; + rdev->config.evergreen.sx_max_export_smx_size = 192; + rdev->config.evergreen.max_hw_contexts = 8; + rdev->config.evergreen.sq_num_cf_insts = 2; + + rdev->config.evergreen.sc_prim_fifo_size = 0x100; + rdev->config.evergreen.sc_hiz_tile_fifo_size = 0x30; + rdev->config.evergreen.sc_earlyz_tile_fifo_size = 0x130; + break; + case CHIP_CAICOS: + rdev->config.evergreen.num_ses = 1; + rdev->config.evergreen.max_pipes = 4; + rdev->config.evergreen.max_tile_pipes = 2; + rdev->config.evergreen.max_simds = 2; + rdev->config.evergreen.max_backends = 1 * rdev->config.evergreen.num_ses; + rdev->config.evergreen.max_gprs = 256; + rdev->config.evergreen.max_threads = 192; + rdev->config.evergreen.max_gs_threads = 16; + rdev->config.evergreen.max_stack_entries = 256; + rdev->config.evergreen.sx_num_of_sets = 4; + rdev->config.evergreen.sx_max_export_size = 128; + rdev->config.evergreen.sx_max_export_pos_size = 32; + rdev->config.evergreen.sx_max_export_smx_size = 96; + rdev->config.evergreen.max_hw_contexts = 4; + rdev->config.evergreen.sq_num_cf_insts = 1; + rdev->config.evergreen.sc_prim_fifo_size = 0x40; rdev->config.evergreen.sc_hiz_tile_fifo_size = 0x30; rdev->config.evergreen.sc_earlyz_tile_fifo_size = 0x130; @@ -1931,6 +2000,7 @@ static void evergreen_gpu_init(struct radeon_device *rdev) switch (rdev->family) { case CHIP_CEDAR: case CHIP_PALM: + case CHIP_CAICOS: /* no vertex cache */ sq_config &= ~VC_ENABLE; break; @@ -1990,6 +2060,7 @@ static void evergreen_gpu_init(struct radeon_device *rdev) switch (rdev->family) { case CHIP_CEDAR: case CHIP_PALM: + case CHIP_CAICOS: vgt_cache_invalidation = CACHE_INVALIDATION(TC_ONLY); break; default: -- cgit v1.2.3 From 03f40090555bd3de9fc80aa4e805ac7fa9c39dfe Mon Sep 17 00:00:00 2001 From: Alex Deucher Date: Thu, 6 Jan 2011 21:19:25 -0500 Subject: drm/radeon/kms: add backend map workaround for barts Same as Cypress. Signed-off-by: Alex Deucher Signed-off-by: Dave Airlie --- drivers/gpu/drm/radeon/evergreen.c | 1 + 1 file changed, 1 insertion(+) diff --git a/drivers/gpu/drm/radeon/evergreen.c b/drivers/gpu/drm/radeon/evergreen.c index 6a73867cf25c..c40e5ad251dd 100644 --- a/drivers/gpu/drm/radeon/evergreen.c +++ b/drivers/gpu/drm/radeon/evergreen.c @@ -1862,6 +1862,7 @@ static void evergreen_gpu_init(struct radeon_device *rdev) switch (rdev->family) { case CHIP_CYPRESS: case CHIP_HEMLOCK: + case CHIP_BARTS: gb_backend_map = 0x66442200; break; case CHIP_JUNIPER: -- cgit v1.2.3 From 9ace9f7b168fef492f731ba60da5c76bc0776e6d Mon Sep 17 00:00:00 2001 From: Alex Deucher Date: Thu, 6 Jan 2011 21:19:26 -0500 Subject: drm/radeon/kms: adjust default clock/vddc tracking for pm on DCE5 NI chips no longer load the MC ucode in the asic_init sequence so the asic comes up in a basic mode with low engine/memory clocks and a voltage. Once the MC ucode is loaded by the driver the card can be programmed to it's proper default clocks and voltage. As such the default clocks in the firmware info table as the post clocks, not the default running clocks. Track the default post clocks and default running clocks separately to handle this. Signed-off-by: Alex Deucher Signed-off-by: Dave Airlie --- drivers/gpu/drm/radeon/radeon.h | 3 +++ drivers/gpu/drm/radeon/radeon_atombios.c | 25 ++++++++++++++++--------- drivers/gpu/drm/radeon/radeon_pm.c | 18 ++++++++++-------- 3 files changed, 29 insertions(+), 17 deletions(-) diff --git a/drivers/gpu/drm/radeon/radeon.h b/drivers/gpu/drm/radeon/radeon.h index 5598f9559a64..8c62b2f58923 100644 --- a/drivers/gpu/drm/radeon/radeon.h +++ b/drivers/gpu/drm/radeon/radeon.h @@ -823,6 +823,9 @@ struct radeon_pm { u32 current_sclk; u32 current_mclk; u32 current_vddc; + u32 default_sclk; + u32 default_mclk; + u32 default_vddc; struct radeon_i2c_chan *i2c_bus; /* selected pm method */ enum radeon_pm_method pm_method; diff --git a/drivers/gpu/drm/radeon/radeon_atombios.c b/drivers/gpu/drm/radeon/radeon_atombios.c index 03f1c9a10ba4..1573202a6418 100644 --- a/drivers/gpu/drm/radeon/radeon_atombios.c +++ b/drivers/gpu/drm/radeon/radeon_atombios.c @@ -2249,15 +2249,22 @@ static void radeon_atombios_parse_pplib_non_clock_info(struct radeon_device *rde rdev->pm.default_power_state_index = state_index; rdev->pm.power_state[state_index].default_clock_mode = &rdev->pm.power_state[state_index].clock_info[mode_index - 1]; - /* patch the table values with the default slck/mclk from firmware info */ - for (j = 0; j < mode_index; j++) { - rdev->pm.power_state[state_index].clock_info[j].mclk = - rdev->clock.default_mclk; - rdev->pm.power_state[state_index].clock_info[j].sclk = - rdev->clock.default_sclk; - if (vddc) - rdev->pm.power_state[state_index].clock_info[j].voltage.voltage = - vddc; + if (ASIC_IS_DCE5(rdev)) { + /* NI chips post without MC ucode, so default clocks are strobe mode only */ + rdev->pm.default_sclk = rdev->pm.power_state[state_index].clock_info[0].sclk; + rdev->pm.default_mclk = rdev->pm.power_state[state_index].clock_info[0].mclk; + rdev->pm.default_vddc = rdev->pm.power_state[state_index].clock_info[0].voltage.voltage; + } else { + /* patch the table values with the default slck/mclk from firmware info */ + for (j = 0; j < mode_index; j++) { + rdev->pm.power_state[state_index].clock_info[j].mclk = + rdev->clock.default_mclk; + rdev->pm.power_state[state_index].clock_info[j].sclk = + rdev->clock.default_sclk; + if (vddc) + rdev->pm.power_state[state_index].clock_info[j].voltage.voltage = + vddc; + } } } } diff --git a/drivers/gpu/drm/radeon/radeon_pm.c b/drivers/gpu/drm/radeon/radeon_pm.c index 7ad2e1a6991d..9052d1e3a5fe 100644 --- a/drivers/gpu/drm/radeon/radeon_pm.c +++ b/drivers/gpu/drm/radeon/radeon_pm.c @@ -167,13 +167,13 @@ static void radeon_set_power_state(struct radeon_device *rdev) if (radeon_gui_idle(rdev)) { sclk = rdev->pm.power_state[rdev->pm.requested_power_state_index]. clock_info[rdev->pm.requested_clock_mode_index].sclk; - if (sclk > rdev->clock.default_sclk) - sclk = rdev->clock.default_sclk; + if (sclk > rdev->pm.default_sclk) + sclk = rdev->pm.default_sclk; mclk = rdev->pm.power_state[rdev->pm.requested_power_state_index]. clock_info[rdev->pm.requested_clock_mode_index].mclk; - if (mclk > rdev->clock.default_mclk) - mclk = rdev->clock.default_mclk; + if (mclk > rdev->pm.default_mclk) + mclk = rdev->pm.default_mclk; /* upvolt before raising clocks, downvolt after lowering clocks */ if (sclk < rdev->pm.current_sclk) @@ -534,8 +534,8 @@ void radeon_pm_resume(struct radeon_device *rdev) mutex_lock(&rdev->pm.mutex); rdev->pm.current_power_state_index = rdev->pm.default_power_state_index; rdev->pm.current_clock_mode_index = 0; - rdev->pm.current_sclk = rdev->clock.default_sclk; - rdev->pm.current_mclk = rdev->clock.default_mclk; + rdev->pm.current_sclk = rdev->pm.default_sclk; + rdev->pm.current_mclk = rdev->pm.default_mclk; rdev->pm.current_vddc = rdev->pm.power_state[rdev->pm.default_power_state_index].clock_info[0].voltage.voltage; if (rdev->pm.pm_method == PM_METHOD_DYNPM && rdev->pm.dynpm_state == DYNPM_STATE_SUSPENDED) { @@ -558,6 +558,8 @@ int radeon_pm_init(struct radeon_device *rdev) rdev->pm.dynpm_planned_action = DYNPM_ACTION_NONE; rdev->pm.dynpm_can_upclock = true; rdev->pm.dynpm_can_downclock = true; + rdev->pm.default_sclk = rdev->clock.default_sclk; + rdev->pm.default_mclk = rdev->clock.default_mclk; rdev->pm.current_sclk = rdev->clock.default_sclk; rdev->pm.current_mclk = rdev->clock.default_mclk; rdev->pm.int_thermal_type = THERMAL_TYPE_NONE; @@ -804,9 +806,9 @@ static int radeon_debugfs_pm_info(struct seq_file *m, void *data) struct drm_device *dev = node->minor->dev; struct radeon_device *rdev = dev->dev_private; - seq_printf(m, "default engine clock: %u0 kHz\n", rdev->clock.default_sclk); + seq_printf(m, "default engine clock: %u0 kHz\n", rdev->pm.default_sclk); seq_printf(m, "current engine clock: %u0 kHz\n", radeon_get_engine_clock(rdev)); - seq_printf(m, "default memory clock: %u0 kHz\n", rdev->clock.default_mclk); + seq_printf(m, "default memory clock: %u0 kHz\n", rdev->pm.default_mclk); if (rdev->asic->get_memory_clock) seq_printf(m, "current memory clock: %u0 kHz\n", radeon_get_memory_clock(rdev)); if (rdev->pm.current_vddc) -- cgit v1.2.3 From 7d52785d2ac1a07bc012b76cb465c9a01d830a32 Mon Sep 17 00:00:00 2001 From: Alex Deucher Date: Thu, 6 Jan 2011 21:19:27 -0500 Subject: drm/radeon/kms: always use writeback/events for fences on NI Signed-off-by: Alex Deucher Signed-off-by: Dave Airlie --- drivers/gpu/drm/radeon/radeon_device.c | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/drivers/gpu/drm/radeon/radeon_device.c b/drivers/gpu/drm/radeon/radeon_device.c index e35343007229..26091d602b84 100644 --- a/drivers/gpu/drm/radeon/radeon_device.c +++ b/drivers/gpu/drm/radeon/radeon_device.c @@ -228,6 +228,11 @@ int radeon_wb_init(struct radeon_device *rdev) rdev->wb.use_event = true; } } + /* always use writeback/events on NI */ + if (ASIC_IS_DCE5(rdev)) { + rdev->wb.enabled = true; + rdev->wb.use_event = true; + } dev_info(rdev->dev, "WB %sabled\n", rdev->wb.enabled ? "en" : "dis"); -- cgit v1.2.3 From ff5b8562d965687261968d02762f9ae73e80a948 Mon Sep 17 00:00:00 2001 From: Alex Deucher Date: Thu, 6 Jan 2011 21:19:28 -0500 Subject: drm/radeon/kms: add bo blit support for NI Signed-off-by: Alex Deucher Signed-off-by: Dave Airlie --- drivers/gpu/drm/radeon/evergreen_blit_kms.c | 69 ++++++++++++++++++++++++++++- 1 file changed, 67 insertions(+), 2 deletions(-) diff --git a/drivers/gpu/drm/radeon/evergreen_blit_kms.c b/drivers/gpu/drm/radeon/evergreen_blit_kms.c index 2ccd1f0545fe..b758dc7f2f2c 100644 --- a/drivers/gpu/drm/radeon/evergreen_blit_kms.c +++ b/drivers/gpu/drm/radeon/evergreen_blit_kms.c @@ -148,7 +148,8 @@ set_vtx_resource(struct radeon_device *rdev, u64 gpu_addr) radeon_ring_write(rdev, SQ_TEX_VTX_VALID_BUFFER << 30); if ((rdev->family == CHIP_CEDAR) || - (rdev->family == CHIP_PALM)) + (rdev->family == CHIP_PALM) || + (rdev->family == CHIP_CAICOS)) cp_set_surface_sync(rdev, PACKET3_TC_ACTION_ENA, 48, gpu_addr); else @@ -353,10 +354,74 @@ set_default_state(struct radeon_device *rdev) num_hs_stack_entries = 42; num_ls_stack_entries = 42; break; + case CHIP_BARTS: + num_ps_gprs = 93; + num_vs_gprs = 46; + num_temp_gprs = 4; + num_gs_gprs = 31; + num_es_gprs = 31; + num_hs_gprs = 23; + num_ls_gprs = 23; + num_ps_threads = 128; + num_vs_threads = 20; + num_gs_threads = 20; + num_es_threads = 20; + num_hs_threads = 20; + num_ls_threads = 20; + num_ps_stack_entries = 85; + num_vs_stack_entries = 85; + num_gs_stack_entries = 85; + num_es_stack_entries = 85; + num_hs_stack_entries = 85; + num_ls_stack_entries = 85; + break; + case CHIP_TURKS: + num_ps_gprs = 93; + num_vs_gprs = 46; + num_temp_gprs = 4; + num_gs_gprs = 31; + num_es_gprs = 31; + num_hs_gprs = 23; + num_ls_gprs = 23; + num_ps_threads = 128; + num_vs_threads = 20; + num_gs_threads = 20; + num_es_threads = 20; + num_hs_threads = 20; + num_ls_threads = 20; + num_ps_stack_entries = 42; + num_vs_stack_entries = 42; + num_gs_stack_entries = 42; + num_es_stack_entries = 42; + num_hs_stack_entries = 42; + num_ls_stack_entries = 42; + break; + case CHIP_CAICOS: + num_ps_gprs = 93; + num_vs_gprs = 46; + num_temp_gprs = 4; + num_gs_gprs = 31; + num_es_gprs = 31; + num_hs_gprs = 23; + num_ls_gprs = 23; + num_ps_threads = 128; + num_vs_threads = 10; + num_gs_threads = 10; + num_es_threads = 10; + num_hs_threads = 10; + num_ls_threads = 10; + num_ps_stack_entries = 42; + num_vs_stack_entries = 42; + num_gs_stack_entries = 42; + num_es_stack_entries = 42; + num_hs_stack_entries = 42; + num_ls_stack_entries = 42; + break; } if ((rdev->family == CHIP_CEDAR) || - (rdev->family == CHIP_PALM)) + (rdev->family == CHIP_PALM) || + (rdev->family == CHIP_CAICOS)) sq_config = 0; else sq_config = VC_ENABLE; -- cgit v1.2.3 From 58c2e9f02ad09b4375eb11c1f65565a83310deca Mon Sep 17 00:00:00 2001 From: Alex Deucher Date: Thu, 6 Jan 2011 21:19:29 -0500 Subject: drm/radeon/kms: add ni_reg.h This adds some new NI (northern islands) specific display register defines. Signed-off-by: Alex Deucher Signed-off-by: Dave Airlie --- drivers/gpu/drm/radeon/ni_reg.h | 86 +++++++++++++++++++++++++++++++++++++ drivers/gpu/drm/radeon/radeon_reg.h | 1 + 2 files changed, 87 insertions(+) create mode 100644 drivers/gpu/drm/radeon/ni_reg.h diff --git a/drivers/gpu/drm/radeon/ni_reg.h b/drivers/gpu/drm/radeon/ni_reg.h new file mode 100644 index 000000000000..5db7b7d6feb0 --- /dev/null +++ b/drivers/gpu/drm/radeon/ni_reg.h @@ -0,0 +1,86 @@ +/* + * Copyright 2010 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: Alex Deucher + */ +#ifndef __NI_REG_H__ +#define __NI_REG_H__ + +/* northern islands - DCE5 */ + +#define NI_INPUT_GAMMA_CONTROL 0x6840 +# define NI_GRPH_INPUT_GAMMA_MODE(x) (((x) & 0x3) << 0) +# define NI_INPUT_GAMMA_USE_LUT 0 +# define NI_INPUT_GAMMA_BYPASS 1 +# define NI_INPUT_GAMMA_SRGB_24 2 +# define NI_INPUT_GAMMA_XVYCC_222 3 +# define NI_OVL_INPUT_GAMMA_MODE(x) (((x) & 0x3) << 4) + +#define NI_PRESCALE_GRPH_CONTROL 0x68b4 +# define NI_GRPH_PRESCALE_BYPASS (1 << 4) + +#define NI_PRESCALE_OVL_CONTROL 0x68c4 +# define NI_OVL_PRESCALE_BYPASS (1 << 4) + +#define NI_INPUT_CSC_CONTROL 0x68d4 +# define NI_INPUT_CSC_GRPH_MODE(x) (((x) & 0x3) << 0) +# define NI_INPUT_CSC_BYPASS 0 +# define NI_INPUT_CSC_PROG_COEFF 1 +# define NI_INPUT_CSC_PROG_SHARED_MATRIXA 2 +# define NI_INPUT_CSC_OVL_MODE(x) (((x) & 0x3) << 4) + +#define NI_OUTPUT_CSC_CONTROL 0x68f0 +# define NI_OUTPUT_CSC_GRPH_MODE(x) (((x) & 0x7) << 0) +# define NI_OUTPUT_CSC_BYPASS 0 +# define NI_OUTPUT_CSC_TV_RGB 1 +# define NI_OUTPUT_CSC_YCBCR_601 2 +# define NI_OUTPUT_CSC_YCBCR_709 3 +# define NI_OUTPUT_CSC_PROG_COEFF 4 +# define NI_OUTPUT_CSC_PROG_SHARED_MATRIXB 5 +# define NI_OUTPUT_CSC_OVL_MODE(x) (((x) & 0x7) << 4) + +#define NI_DEGAMMA_CONTROL 0x6960 +# define NI_GRPH_DEGAMMA_MODE(x) (((x) & 0x3) << 0) +# define NI_DEGAMMA_BYPASS 0 +# define NI_DEGAMMA_SRGB_24 1 +# define NI_DEGAMMA_XVYCC_222 2 +# define NI_OVL_DEGAMMA_MODE(x) (((x) & 0x3) << 4) +# define NI_ICON_DEGAMMA_MODE(x) (((x) & 0x3) << 8) +# define NI_CURSOR_DEGAMMA_MODE(x) (((x) & 0x3) << 12) + +#define NI_GAMUT_REMAP_CONTROL 0x6964 +# define NI_GRPH_GAMUT_REMAP_MODE(x) (((x) & 0x3) << 0) +# define NI_GAMUT_REMAP_BYPASS 0 +# define NI_GAMUT_REMAP_PROG_COEFF 1 +# define NI_GAMUT_REMAP_PROG_SHARED_MATRIXA 2 +# define NI_GAMUT_REMAP_PROG_SHARED_MATRIXB 3 +# define NI_OVL_GAMUT_REMAP_MODE(x) (((x) & 0x3) << 4) + +#define NI_REGAMMA_CONTROL 0x6a80 +# define NI_GRPH_REGAMMA_MODE(x) (((x) & 0x7) << 0) +# define NI_REGAMMA_BYPASS 0 +# define NI_REGAMMA_SRGB_24 1 +# define NI_REGAMMA_XVYCC_222 2 +# define NI_REGAMMA_PROG_A 3 +# define NI_REGAMMA_PROG_B 4 +# define NI_OVL_REGAMMA_MODE(x) (((x) & 0x7) << 4) + +#endif diff --git a/drivers/gpu/drm/radeon/radeon_reg.h b/drivers/gpu/drm/radeon/radeon_reg.h index 3369ce984af1..3cd4dace57c7 100644 --- a/drivers/gpu/drm/radeon/radeon_reg.h +++ b/drivers/gpu/drm/radeon/radeon_reg.h @@ -55,6 +55,7 @@ #include "r500_reg.h" #include "r600_reg.h" #include "evergreen_reg.h" +#include "ni_reg.h" #define RADEON_MC_AGP_LOCATION 0x014c #define RADEON_MC_AGP_START_MASK 0x0000FFFF -- cgit v1.2.3 From fee298fd6f3d8bde5fc2912192d026c78ba81982 Mon Sep 17 00:00:00 2001 From: Alex Deucher Date: Thu, 6 Jan 2011 21:19:30 -0500 Subject: drm/radeon/kms: add support for DCE5 display LUTs The hardware supports advanced user defined color management but at the moment, there is no infrastructure in place to take advantage of it so for now we just support the legacy LUTs. Signed-off-by: Alex Deucher Signed-off-by: Dave Airlie --- drivers/gpu/drm/radeon/radeon_display.c | 68 +++++++++++++++++++++++++++++++-- 1 file changed, 65 insertions(+), 3 deletions(-) diff --git a/drivers/gpu/drm/radeon/radeon_display.c b/drivers/gpu/drm/radeon/radeon_display.c index 30d867c85825..d26dabf878d9 100644 --- a/drivers/gpu/drm/radeon/radeon_display.c +++ b/drivers/gpu/drm/radeon/radeon_display.c @@ -68,7 +68,7 @@ static void avivo_crtc_load_lut(struct drm_crtc *crtc) WREG32(AVIVO_D1GRPH_LUT_SEL + radeon_crtc->crtc_offset, radeon_crtc->crtc_id); } -static void evergreen_crtc_load_lut(struct drm_crtc *crtc) +static void dce4_crtc_load_lut(struct drm_crtc *crtc) { struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc); struct drm_device *dev = crtc->dev; @@ -98,6 +98,66 @@ static void evergreen_crtc_load_lut(struct drm_crtc *crtc) } } +static void dce5_crtc_load_lut(struct drm_crtc *crtc) +{ + struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc); + struct drm_device *dev = crtc->dev; + struct radeon_device *rdev = dev->dev_private; + int i; + + DRM_DEBUG_KMS("%d\n", radeon_crtc->crtc_id); + + WREG32(NI_INPUT_CSC_CONTROL + radeon_crtc->crtc_offset, + (NI_INPUT_CSC_GRPH_MODE(NI_INPUT_CSC_BYPASS) | + NI_INPUT_CSC_OVL_MODE(NI_INPUT_CSC_BYPASS))); + WREG32(NI_PRESCALE_GRPH_CONTROL + radeon_crtc->crtc_offset, + NI_GRPH_PRESCALE_BYPASS); + WREG32(NI_PRESCALE_OVL_CONTROL + radeon_crtc->crtc_offset, + NI_OVL_PRESCALE_BYPASS); + WREG32(NI_INPUT_GAMMA_CONTROL + radeon_crtc->crtc_offset, + (NI_GRPH_INPUT_GAMMA_MODE(NI_INPUT_GAMMA_USE_LUT) | + NI_OVL_INPUT_GAMMA_MODE(NI_INPUT_GAMMA_USE_LUT))); + + WREG32(EVERGREEN_DC_LUT_CONTROL + radeon_crtc->crtc_offset, 0); + + WREG32(EVERGREEN_DC_LUT_BLACK_OFFSET_BLUE + radeon_crtc->crtc_offset, 0); + WREG32(EVERGREEN_DC_LUT_BLACK_OFFSET_GREEN + radeon_crtc->crtc_offset, 0); + WREG32(EVERGREEN_DC_LUT_BLACK_OFFSET_RED + radeon_crtc->crtc_offset, 0); + + WREG32(EVERGREEN_DC_LUT_WHITE_OFFSET_BLUE + radeon_crtc->crtc_offset, 0xffff); + WREG32(EVERGREEN_DC_LUT_WHITE_OFFSET_GREEN + radeon_crtc->crtc_offset, 0xffff); + WREG32(EVERGREEN_DC_LUT_WHITE_OFFSET_RED + radeon_crtc->crtc_offset, 0xffff); + + WREG32(EVERGREEN_DC_LUT_RW_MODE + radeon_crtc->crtc_offset, 0); + WREG32(EVERGREEN_DC_LUT_WRITE_EN_MASK + radeon_crtc->crtc_offset, 0x00000007); + + WREG32(EVERGREEN_DC_LUT_RW_INDEX + radeon_crtc->crtc_offset, 0); + for (i = 0; i < 256; i++) { + WREG32(EVERGREEN_DC_LUT_30_COLOR + radeon_crtc->crtc_offset, + (radeon_crtc->lut_r[i] << 20) | + (radeon_crtc->lut_g[i] << 10) | + (radeon_crtc->lut_b[i] << 0)); + } + + WREG32(NI_DEGAMMA_CONTROL + radeon_crtc->crtc_offset, + (NI_GRPH_DEGAMMA_MODE(NI_DEGAMMA_BYPASS) | + NI_OVL_DEGAMMA_MODE(NI_DEGAMMA_BYPASS) | + NI_ICON_DEGAMMA_MODE(NI_DEGAMMA_BYPASS) | + NI_CURSOR_DEGAMMA_MODE(NI_DEGAMMA_BYPASS))); + WREG32(NI_GAMUT_REMAP_CONTROL + radeon_crtc->crtc_offset, + (NI_GRPH_GAMUT_REMAP_MODE(NI_GAMUT_REMAP_BYPASS) | + NI_OVL_GAMUT_REMAP_MODE(NI_GAMUT_REMAP_BYPASS))); + WREG32(NI_REGAMMA_CONTROL + radeon_crtc->crtc_offset, + (NI_GRPH_REGAMMA_MODE(NI_REGAMMA_BYPASS) | + NI_OVL_REGAMMA_MODE(NI_REGAMMA_BYPASS))); + WREG32(NI_OUTPUT_CSC_CONTROL + radeon_crtc->crtc_offset, + (NI_OUTPUT_CSC_GRPH_MODE(NI_OUTPUT_CSC_BYPASS) | + NI_OUTPUT_CSC_OVL_MODE(NI_OUTPUT_CSC_BYPASS))); + /* XXX match this to the depth of the crtc fmt block, move to modeset? */ + WREG32(0x6940 + radeon_crtc->crtc_offset, 0); + +} + static void legacy_crtc_load_lut(struct drm_crtc *crtc) { struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc); @@ -130,8 +190,10 @@ void radeon_crtc_load_lut(struct drm_crtc *crtc) if (!crtc->enabled) return; - if (ASIC_IS_DCE4(rdev)) - evergreen_crtc_load_lut(crtc); + if (ASIC_IS_DCE5(rdev)) + dce5_crtc_load_lut(crtc); + else if (ASIC_IS_DCE4(rdev)) + dce4_crtc_load_lut(crtc); else if (ASIC_IS_AVIVO(rdev)) avivo_crtc_load_lut(crtc); else -- cgit v1.2.3 From 0af62b0168043896a042b005ff88caa77dd94d04 Mon Sep 17 00:00:00 2001 From: Alex Deucher Date: Thu, 6 Jan 2011 21:19:31 -0500 Subject: drm/radeon/kms: add ucode loader for NI The MC ucode is no longer loaded by the vbios tables as on previous asics. It now must be loaded by the driver. Signed-off-by: Alex Deucher Signed-off-by: Dave Airlie --- drivers/gpu/drm/radeon/Makefile | 2 +- drivers/gpu/drm/radeon/evergreen.c | 21 ++- drivers/gpu/drm/radeon/ni.c | 316 +++++++++++++++++++++++++++++++++++++ drivers/gpu/drm/radeon/nid.h | 41 +++++ drivers/gpu/drm/radeon/radeon.h | 4 + 5 files changed, 380 insertions(+), 4 deletions(-) create mode 100644 drivers/gpu/drm/radeon/ni.c create mode 100644 drivers/gpu/drm/radeon/nid.h diff --git a/drivers/gpu/drm/radeon/Makefile b/drivers/gpu/drm/radeon/Makefile index e97e6f842699..e47eecfc2df4 100644 --- a/drivers/gpu/drm/radeon/Makefile +++ b/drivers/gpu/drm/radeon/Makefile @@ -66,7 +66,7 @@ radeon-y += radeon_device.o radeon_asic.o radeon_kms.o \ r200.o radeon_legacy_tv.o r600_cs.o r600_blit.o r600_blit_shaders.o \ r600_blit_kms.o radeon_pm.o atombios_dp.o r600_audio.o r600_hdmi.o \ evergreen.o evergreen_cs.o evergreen_blit_shaders.o evergreen_blit_kms.o \ - radeon_trace_points.o + radeon_trace_points.o ni.o radeon-$(CONFIG_COMPAT) += radeon_ioc32.o radeon-$(CONFIG_VGA_SWITCHEROO) += radeon_atpx_handler.o diff --git a/drivers/gpu/drm/radeon/evergreen.c b/drivers/gpu/drm/radeon/evergreen.c index c40e5ad251dd..4fb2101ee476 100644 --- a/drivers/gpu/drm/radeon/evergreen.c +++ b/drivers/gpu/drm/radeon/evergreen.c @@ -2855,12 +2855,27 @@ static int evergreen_startup(struct radeon_device *rdev) /* enable pcie gen2 link */ evergreen_pcie_gen2_enable(rdev); - if (!rdev->me_fw || !rdev->pfp_fw || !rdev->rlc_fw) { - r = r600_init_microcode(rdev); + if (ASIC_IS_DCE5(rdev)) { + if (!rdev->me_fw || !rdev->pfp_fw || !rdev->rlc_fw || !rdev->mc_fw) { + r = ni_init_microcode(rdev); + if (r) { + DRM_ERROR("Failed to load firmware!\n"); + return r; + } + } + r = btc_mc_load_microcode(rdev); if (r) { - DRM_ERROR("Failed to load firmware!\n"); + DRM_ERROR("Failed to load MC firmware!\n"); return r; } + } else { + if (!rdev->me_fw || !rdev->pfp_fw || !rdev->rlc_fw) { + r = r600_init_microcode(rdev); + if (r) { + DRM_ERROR("Failed to load firmware!\n"); + return r; + } + } } evergreen_mc_program(rdev); diff --git a/drivers/gpu/drm/radeon/ni.c b/drivers/gpu/drm/radeon/ni.c new file mode 100644 index 000000000000..5e0bef80ad7f --- /dev/null +++ b/drivers/gpu/drm/radeon/ni.c @@ -0,0 +1,316 @@ +/* + * Copyright 2010 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: Alex Deucher + */ +#include +#include +#include +#include "drmP.h" +#include "radeon.h" +#include "radeon_asic.h" +#include "radeon_drm.h" +#include "nid.h" +#include "atom.h" +#include "ni_reg.h" + +#define EVERGREEN_PFP_UCODE_SIZE 1120 +#define EVERGREEN_PM4_UCODE_SIZE 1376 +#define EVERGREEN_RLC_UCODE_SIZE 768 +#define BTC_MC_UCODE_SIZE 6024 + +/* Firmware Names */ +MODULE_FIRMWARE("radeon/BARTS_pfp.bin"); +MODULE_FIRMWARE("radeon/BARTS_me.bin"); +MODULE_FIRMWARE("radeon/BARTS_mc.bin"); +MODULE_FIRMWARE("radeon/BTC_rlc.bin"); +MODULE_FIRMWARE("radeon/TURKS_pfp.bin"); +MODULE_FIRMWARE("radeon/TURKS_me.bin"); +MODULE_FIRMWARE("radeon/TURKS_mc.bin"); +MODULE_FIRMWARE("radeon/CAICOS_pfp.bin"); +MODULE_FIRMWARE("radeon/CAICOS_me.bin"); +MODULE_FIRMWARE("radeon/CAICOS_mc.bin"); + +#define BTC_IO_MC_REGS_SIZE 29 + +static const u32 barts_io_mc_regs[BTC_IO_MC_REGS_SIZE][2] = { + {0x00000077, 0xff010100}, + {0x00000078, 0x00000000}, + {0x00000079, 0x00001434}, + {0x0000007a, 0xcc08ec08}, + {0x0000007b, 0x00040000}, + {0x0000007c, 0x000080c0}, + {0x0000007d, 0x09000000}, + {0x0000007e, 0x00210404}, + {0x00000081, 0x08a8e800}, + {0x00000082, 0x00030444}, + {0x00000083, 0x00000000}, + {0x00000085, 0x00000001}, + {0x00000086, 0x00000002}, + {0x00000087, 0x48490000}, + {0x00000088, 0x20244647}, + {0x00000089, 0x00000005}, + {0x0000008b, 0x66030000}, + {0x0000008c, 0x00006603}, + {0x0000008d, 0x00000100}, + {0x0000008f, 0x00001c0a}, + {0x00000090, 0xff000001}, + {0x00000094, 0x00101101}, + {0x00000095, 0x00000fff}, + {0x00000096, 0x00116fff}, + {0x00000097, 0x60010000}, + {0x00000098, 0x10010000}, + {0x00000099, 0x00006000}, + {0x0000009a, 0x00001000}, + {0x0000009f, 0x00946a00} +}; + +static const u32 turks_io_mc_regs[BTC_IO_MC_REGS_SIZE][2] = { + {0x00000077, 0xff010100}, + {0x00000078, 0x00000000}, + {0x00000079, 0x00001434}, + {0x0000007a, 0xcc08ec08}, + {0x0000007b, 0x00040000}, + {0x0000007c, 0x000080c0}, + {0x0000007d, 0x09000000}, + {0x0000007e, 0x00210404}, + {0x00000081, 0x08a8e800}, + {0x00000082, 0x00030444}, + {0x00000083, 0x00000000}, + {0x00000085, 0x00000001}, + {0x00000086, 0x00000002}, + {0x00000087, 0x48490000}, + {0x00000088, 0x20244647}, + {0x00000089, 0x00000005}, + {0x0000008b, 0x66030000}, + {0x0000008c, 0x00006603}, + {0x0000008d, 0x00000100}, + {0x0000008f, 0x00001c0a}, + {0x00000090, 0xff000001}, + {0x00000094, 0x00101101}, + {0x00000095, 0x00000fff}, + {0x00000096, 0x00116fff}, + {0x00000097, 0x60010000}, + {0x00000098, 0x10010000}, + {0x00000099, 0x00006000}, + {0x0000009a, 0x00001000}, + {0x0000009f, 0x00936a00} +}; + +static const u32 caicos_io_mc_regs[BTC_IO_MC_REGS_SIZE][2] = { + {0x00000077, 0xff010100}, + {0x00000078, 0x00000000}, + {0x00000079, 0x00001434}, + {0x0000007a, 0xcc08ec08}, + {0x0000007b, 0x00040000}, + {0x0000007c, 0x000080c0}, + {0x0000007d, 0x09000000}, + {0x0000007e, 0x00210404}, + {0x00000081, 0x08a8e800}, + {0x00000082, 0x00030444}, + {0x00000083, 0x00000000}, + {0x00000085, 0x00000001}, + {0x00000086, 0x00000002}, + {0x00000087, 0x48490000}, + {0x00000088, 0x20244647}, + {0x00000089, 0x00000005}, + {0x0000008b, 0x66030000}, + {0x0000008c, 0x00006603}, + {0x0000008d, 0x00000100}, + {0x0000008f, 0x00001c0a}, + {0x00000090, 0xff000001}, + {0x00000094, 0x00101101}, + {0x00000095, 0x00000fff}, + {0x00000096, 0x00116fff}, + {0x00000097, 0x60010000}, + {0x00000098, 0x10010000}, + {0x00000099, 0x00006000}, + {0x0000009a, 0x00001000}, + {0x0000009f, 0x00916a00} +}; + +int btc_mc_load_microcode(struct radeon_device *rdev) +{ + const __be32 *fw_data; + u32 mem_type, running, blackout = 0; + u32 *io_mc_regs; + int i; + + if (!rdev->mc_fw) + return -EINVAL; + + switch (rdev->family) { + case CHIP_BARTS: + io_mc_regs = (u32 *)&barts_io_mc_regs; + break; + case CHIP_TURKS: + io_mc_regs = (u32 *)&turks_io_mc_regs; + break; + case CHIP_CAICOS: + default: + io_mc_regs = (u32 *)&caicos_io_mc_regs; + break; + } + + mem_type = (RREG32(MC_SEQ_MISC0) & MC_SEQ_MISC0_GDDR5_MASK) >> MC_SEQ_MISC0_GDDR5_SHIFT; + running = RREG32(MC_SEQ_SUP_CNTL) & RUN_MASK; + + if ((mem_type == MC_SEQ_MISC0_GDDR5_VALUE) && (running == 0)) { + if (running) { + blackout = RREG32(MC_SHARED_BLACKOUT_CNTL); + WREG32(MC_SHARED_BLACKOUT_CNTL, 1); + } + + /* reset the engine and set to writable */ + WREG32(MC_SEQ_SUP_CNTL, 0x00000008); + WREG32(MC_SEQ_SUP_CNTL, 0x00000010); + + /* load mc io regs */ + for (i = 0; i < BTC_IO_MC_REGS_SIZE; i++) { + WREG32(MC_SEQ_IO_DEBUG_INDEX, io_mc_regs[(i << 1)]); + WREG32(MC_SEQ_IO_DEBUG_DATA, io_mc_regs[(i << 1) + 1]); + } + /* load the MC ucode */ + fw_data = (const __be32 *)rdev->mc_fw->data; + for (i = 0; i < BTC_MC_UCODE_SIZE; i++) + WREG32(MC_SEQ_SUP_PGM, be32_to_cpup(fw_data++)); + + /* put the engine back into the active state */ + WREG32(MC_SEQ_SUP_CNTL, 0x00000008); + WREG32(MC_SEQ_SUP_CNTL, 0x00000004); + WREG32(MC_SEQ_SUP_CNTL, 0x00000001); + + /* wait for training to complete */ + while (!(RREG32(MC_IO_PAD_CNTL_D0) & MEM_FALL_OUT_CMD)) + udelay(10); + + if (running) + WREG32(MC_SHARED_BLACKOUT_CNTL, blackout); + } + + return 0; +} + +int ni_init_microcode(struct radeon_device *rdev) +{ + struct platform_device *pdev; + const char *chip_name; + const char *rlc_chip_name; + size_t pfp_req_size, me_req_size, rlc_req_size, mc_req_size; + char fw_name[30]; + int err; + + DRM_DEBUG("\n"); + + pdev = platform_device_register_simple("radeon_cp", 0, NULL, 0); + err = IS_ERR(pdev); + if (err) { + printk(KERN_ERR "radeon_cp: Failed to register firmware\n"); + return -EINVAL; + } + + switch (rdev->family) { + case CHIP_BARTS: + chip_name = "BARTS"; + rlc_chip_name = "BTC"; + break; + case CHIP_TURKS: + chip_name = "TURKS"; + rlc_chip_name = "BTC"; + break; + case CHIP_CAICOS: + chip_name = "CAICOS"; + rlc_chip_name = "BTC"; + break; + default: BUG(); + } + + pfp_req_size = EVERGREEN_PFP_UCODE_SIZE * 4; + me_req_size = EVERGREEN_PM4_UCODE_SIZE * 4; + rlc_req_size = EVERGREEN_RLC_UCODE_SIZE * 4; + mc_req_size = BTC_MC_UCODE_SIZE * 4; + + DRM_INFO("Loading %s Microcode\n", chip_name); + + snprintf(fw_name, sizeof(fw_name), "radeon/%s_pfp.bin", chip_name); + err = request_firmware(&rdev->pfp_fw, fw_name, &pdev->dev); + if (err) + goto out; + if (rdev->pfp_fw->size != pfp_req_size) { + printk(KERN_ERR + "ni_cp: Bogus length %zu in firmware \"%s\"\n", + rdev->pfp_fw->size, fw_name); + err = -EINVAL; + goto out; + } + + snprintf(fw_name, sizeof(fw_name), "radeon/%s_me.bin", chip_name); + err = request_firmware(&rdev->me_fw, fw_name, &pdev->dev); + if (err) + goto out; + if (rdev->me_fw->size != me_req_size) { + printk(KERN_ERR + "ni_cp: Bogus length %zu in firmware \"%s\"\n", + rdev->me_fw->size, fw_name); + err = -EINVAL; + } + + snprintf(fw_name, sizeof(fw_name), "radeon/%s_rlc.bin", rlc_chip_name); + err = request_firmware(&rdev->rlc_fw, fw_name, &pdev->dev); + if (err) + goto out; + if (rdev->rlc_fw->size != rlc_req_size) { + printk(KERN_ERR + "ni_rlc: Bogus length %zu in firmware \"%s\"\n", + rdev->rlc_fw->size, fw_name); + err = -EINVAL; + } + + snprintf(fw_name, sizeof(fw_name), "radeon/%s_mc.bin", chip_name); + err = request_firmware(&rdev->mc_fw, fw_name, &pdev->dev); + if (err) + goto out; + if (rdev->mc_fw->size != mc_req_size) { + printk(KERN_ERR + "ni_mc: Bogus length %zu in firmware \"%s\"\n", + rdev->mc_fw->size, fw_name); + err = -EINVAL; + } +out: + platform_device_unregister(pdev); + + if (err) { + if (err != -EINVAL) + printk(KERN_ERR + "ni_cp: Failed to load firmware \"%s\"\n", + fw_name); + release_firmware(rdev->pfp_fw); + rdev->pfp_fw = NULL; + release_firmware(rdev->me_fw); + rdev->me_fw = NULL; + release_firmware(rdev->rlc_fw); + rdev->rlc_fw = NULL; + release_firmware(rdev->mc_fw); + rdev->mc_fw = NULL; + } + return err; +} + diff --git a/drivers/gpu/drm/radeon/nid.h b/drivers/gpu/drm/radeon/nid.h new file mode 100644 index 000000000000..f7b445390e02 --- /dev/null +++ b/drivers/gpu/drm/radeon/nid.h @@ -0,0 +1,41 @@ +/* + * Copyright 2010 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: Alex Deucher + */ +#ifndef NI_H +#define NI_H + +#define MC_SHARED_BLACKOUT_CNTL 0x20ac +#define MC_SEQ_SUP_CNTL 0x28c8 +#define RUN_MASK (1 << 0) +#define MC_SEQ_SUP_PGM 0x28cc +#define MC_IO_PAD_CNTL_D0 0x29d0 +#define MEM_FALL_OUT_CMD (1 << 8) +#define MC_SEQ_MISC0 0x2a00 +#define MC_SEQ_MISC0_GDDR5_SHIFT 28 +#define MC_SEQ_MISC0_GDDR5_MASK 0xf0000000 +#define MC_SEQ_MISC0_GDDR5_VALUE 5 +#define MC_SEQ_IO_DEBUG_INDEX 0x2a44 +#define MC_SEQ_IO_DEBUG_DATA 0x2a48 + +#endif + diff --git a/drivers/gpu/drm/radeon/radeon.h b/drivers/gpu/drm/radeon/radeon.h index 8c62b2f58923..da21105488da 100644 --- a/drivers/gpu/drm/radeon/radeon.h +++ b/drivers/gpu/drm/radeon/radeon.h @@ -1152,6 +1152,7 @@ struct radeon_device { const struct firmware *me_fw; /* all family ME firmware */ const struct firmware *pfp_fw; /* r6/700 PFP firmware */ const struct firmware *rlc_fw; /* r6/700 RLC firmware */ + const struct firmware *mc_fw; /* NI MC firmware */ struct r600_blit r600_blit; struct r700_vram_scratch vram_scratch; int msi_enabled; /* msi enabled */ @@ -1561,6 +1562,9 @@ extern int evergreen_irq_set(struct radeon_device *rdev); extern int evergreen_blit_init(struct radeon_device *rdev); extern void evergreen_blit_fini(struct radeon_device *rdev); +extern int ni_init_microcode(struct radeon_device *rdev); +extern int btc_mc_load_microcode(struct radeon_device *rdev); + /* radeon_acpi.c */ #if defined(CONFIG_ACPI) extern int radeon_acpi_init(struct radeon_device *rdev); -- cgit v1.2.3 From ed18a3603f5b466e0300fc5e0c349dbcce376861 Mon Sep 17 00:00:00 2001 From: Alex Deucher Date: Thu, 6 Jan 2011 21:19:32 -0500 Subject: drm/radeon/kms/ni: load default sclk/mclk/vddc at pm init The vbios only partially initializes the memory controller on NI, so now we need to load the MC ucode in the driver and set the default clocks once the ucode is loaded. Signed-off-by: Alex Deucher Signed-off-by: Dave Airlie --- drivers/gpu/drm/radeon/radeon_pm.c | 18 ++++++++++++++++++ 1 file changed, 18 insertions(+) diff --git a/drivers/gpu/drm/radeon/radeon_pm.c b/drivers/gpu/drm/radeon/radeon_pm.c index 9052d1e3a5fe..3b1b2bf9cdd5 100644 --- a/drivers/gpu/drm/radeon/radeon_pm.c +++ b/drivers/gpu/drm/radeon/radeon_pm.c @@ -530,6 +530,15 @@ void radeon_pm_suspend(struct radeon_device *rdev) void radeon_pm_resume(struct radeon_device *rdev) { + /* set up the default clocks if the MC ucode is loaded */ + if (ASIC_IS_DCE5(rdev) && rdev->mc_fw) { + if (rdev->pm.default_vddc) + radeon_atom_set_voltage(rdev, rdev->pm.default_vddc); + if (rdev->pm.default_sclk) + radeon_set_engine_clock(rdev, rdev->pm.default_sclk); + if (rdev->pm.default_mclk) + radeon_set_memory_clock(rdev, rdev->pm.default_mclk); + } /* asic init will reset the default power state */ mutex_lock(&rdev->pm.mutex); rdev->pm.current_power_state_index = rdev->pm.default_power_state_index; @@ -571,6 +580,15 @@ int radeon_pm_init(struct radeon_device *rdev) radeon_combios_get_power_modes(rdev); radeon_pm_print_states(rdev); radeon_pm_init_profile(rdev); + /* set up the default clocks if the MC ucode is loaded */ + if (ASIC_IS_DCE5(rdev) && rdev->mc_fw) { + if (rdev->pm.default_vddc) + radeon_atom_set_voltage(rdev, rdev->pm.default_vddc); + if (rdev->pm.default_sclk) + radeon_set_engine_clock(rdev, rdev->pm.default_sclk); + if (rdev->pm.default_mclk) + radeon_set_memory_clock(rdev, rdev->pm.default_mclk); + } } /* set up the internal thermal sensor if applicable */ -- cgit v1.2.3 From a43b7665de7b1adbda5ce19d57cb65add0982c8f Mon Sep 17 00:00:00 2001 From: Alex Deucher Date: Thu, 6 Jan 2011 21:19:33 -0500 Subject: drm/radeon/kms: add radeon_asic struct for NI asics Signed-off-by: Alex Deucher Signed-off-by: Dave Airlie --- drivers/gpu/drm/radeon/radeon_asic.c | 51 ++++++++++++++++++++++++++++++++++++ 1 file changed, 51 insertions(+) diff --git a/drivers/gpu/drm/radeon/radeon_asic.c b/drivers/gpu/drm/radeon/radeon_asic.c index 53c62404795d..3a1b16186224 100644 --- a/drivers/gpu/drm/radeon/radeon_asic.c +++ b/drivers/gpu/drm/radeon/radeon_asic.c @@ -836,6 +836,52 @@ static struct radeon_asic sumo_asic = { .pm_get_dynpm_state = &r600_pm_get_dynpm_state, }; +static struct radeon_asic btc_asic = { + .init = &evergreen_init, + .fini = &evergreen_fini, + .suspend = &evergreen_suspend, + .resume = &evergreen_resume, + .cp_commit = &r600_cp_commit, + .gpu_is_lockup = &evergreen_gpu_is_lockup, + .asic_reset = &evergreen_asic_reset, + .vga_set_state = &r600_vga_set_state, + .gart_tlb_flush = &evergreen_pcie_gart_tlb_flush, + .gart_set_page = &rs600_gart_set_page, + .ring_test = &r600_ring_test, + .ring_ib_execute = &r600_ring_ib_execute, + .irq_set = &evergreen_irq_set, + .irq_process = &evergreen_irq_process, + .get_vblank_counter = &evergreen_get_vblank_counter, + .fence_ring_emit = &r600_fence_ring_emit, + .cs_parse = &evergreen_cs_parse, + .copy_blit = &evergreen_copy_blit, + .copy_dma = &evergreen_copy_blit, + .copy = &evergreen_copy_blit, + .get_engine_clock = &radeon_atom_get_engine_clock, + .set_engine_clock = &radeon_atom_set_engine_clock, + .get_memory_clock = &radeon_atom_get_memory_clock, + .set_memory_clock = &radeon_atom_set_memory_clock, + .get_pcie_lanes = NULL, + .set_pcie_lanes = NULL, + .set_clock_gating = NULL, + .set_surface_reg = r600_set_surface_reg, + .clear_surface_reg = r600_clear_surface_reg, + .bandwidth_update = &evergreen_bandwidth_update, + .hpd_init = &evergreen_hpd_init, + .hpd_fini = &evergreen_hpd_fini, + .hpd_sense = &evergreen_hpd_sense, + .hpd_set_polarity = &evergreen_hpd_set_polarity, + .gui_idle = &r600_gui_idle, + .pm_misc = &evergreen_pm_misc, + .pm_prepare = &evergreen_pm_prepare, + .pm_finish = &evergreen_pm_finish, + .pm_init_profile = &r600_pm_init_profile, + .pm_get_dynpm_state = &r600_pm_get_dynpm_state, + .pre_page_flip = &evergreen_pre_page_flip, + .page_flip = &evergreen_page_flip, + .post_page_flip = &evergreen_post_page_flip, +}; + int radeon_asic_init(struct radeon_device *rdev) { radeon_register_accessor_init(rdev); @@ -923,6 +969,11 @@ int radeon_asic_init(struct radeon_device *rdev) case CHIP_PALM: rdev->asic = &sumo_asic; break; + case CHIP_BARTS: + case CHIP_TURKS: + case CHIP_CAICOS: + rdev->asic = &btc_asic; + break; default: /* FIXME: not supported yet */ return -EINVAL; -- cgit v1.2.3 From 0d1014a2f2d1ad660caafae6f62796a93307867f Mon Sep 17 00:00:00 2001 From: Alex Deucher Date: Thu, 6 Jan 2011 21:19:34 -0500 Subject: drm/radeon/kms: don't enable pcie gen2 on NI yet Still needs to be implemented. Signed-off-by: Alex Deucher Signed-off-by: Dave Airlie --- drivers/gpu/drm/radeon/evergreen.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/drivers/gpu/drm/radeon/evergreen.c b/drivers/gpu/drm/radeon/evergreen.c index 4fb2101ee476..7fe8ebdcdc0e 100644 --- a/drivers/gpu/drm/radeon/evergreen.c +++ b/drivers/gpu/drm/radeon/evergreen.c @@ -2853,7 +2853,8 @@ static int evergreen_startup(struct radeon_device *rdev) int r; /* enable pcie gen2 link */ - evergreen_pcie_gen2_enable(rdev); + if (!ASIC_IS_DCE5(rdev)) + evergreen_pcie_gen2_enable(rdev); if (ASIC_IS_DCE5(rdev)) { if (!rdev->me_fw || !rdev->pfp_fw || !rdev->rlc_fw || !rdev->mc_fw) { -- cgit v1.2.3 From 2b2fd604bda01bfea9f1657ca468c134448e03a5 Mon Sep 17 00:00:00 2001 From: Alex Deucher Date: Thu, 6 Jan 2011 21:19:35 -0500 Subject: drm/radeon/kms: add NI pci ids Signed-off-by: Alex Deucher Signed-off-by: Dave Airlie --- include/drm/drm_pciids.h | 36 ++++++++++++++++++++++++++++++++++++ 1 file changed, 36 insertions(+) diff --git a/include/drm/drm_pciids.h b/include/drm/drm_pciids.h index e6b28a39942f..fe29ae328bd9 100644 --- a/include/drm/drm_pciids.h +++ b/include/drm/drm_pciids.h @@ -142,6 +142,42 @@ {0x1002, 0x5e4c, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV410|RADEON_NEW_MEMMAP}, \ {0x1002, 0x5e4d, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV410|RADEON_NEW_MEMMAP}, \ {0x1002, 0x5e4f, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV410|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x6720, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_BARTS|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x6721, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_BARTS|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x6722, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_BARTS|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x6723, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_BARTS|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x6724, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_BARTS|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x6725, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_BARTS|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x6726, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_BARTS|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x6727, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_BARTS|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x6728, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_BARTS|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x6729, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_BARTS|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x6738, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_BARTS|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x6739, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_BARTS|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x6740, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TURKS|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x6741, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TURKS|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x6742, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TURKS|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x6743, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TURKS|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x6744, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TURKS|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x6745, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TURKS|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x6746, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TURKS|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x6747, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TURKS|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x6748, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TURKS|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x6749, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TURKS|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x6750, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TURKS|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x6758, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TURKS|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x6759, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TURKS|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x6760, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CAICOS|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x6761, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CAICOS|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x6762, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CAICOS|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x6763, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CAICOS|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x6764, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CAICOS|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x6765, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CAICOS|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x6766, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CAICOS|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x6767, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CAICOS|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x6768, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CAICOS|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x6770, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CAICOS|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x6779, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CAICOS|RADEON_NEW_MEMMAP}, \ {0x1002, 0x6880, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CYPRESS|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ {0x1002, 0x6888, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CYPRESS|RADEON_NEW_MEMMAP}, \ {0x1002, 0x6889, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CYPRESS|RADEON_NEW_MEMMAP}, \ -- cgit v1.2.3 From fea6f330cdd18f79d50bcdfbedb96d929a173e0d Mon Sep 17 00:00:00 2001 From: Dan Carpenter Date: Fri, 7 Jan 2011 08:12:27 +0300 Subject: vga_switcheroo: comparing too few characters in strncmp() This is a copy-and-paste bug. We should be comparing 4 characters here instead of 3. Signed-off-by: Dan Carpenter Signed-off-by: Dave Airlie --- drivers/gpu/vga/vga_switcheroo.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/gpu/vga/vga_switcheroo.c b/drivers/gpu/vga/vga_switcheroo.c index d2d8543686d3..e01cacba685f 100644 --- a/drivers/gpu/vga/vga_switcheroo.c +++ b/drivers/gpu/vga/vga_switcheroo.c @@ -346,11 +346,11 @@ vga_switcheroo_debugfs_write(struct file *filp, const char __user *ubuf, if (strncmp(usercmd, "DIS", 3) == 0) client_id = VGA_SWITCHEROO_DIS; - if (strncmp(usercmd, "MIGD", 3) == 0) { + if (strncmp(usercmd, "MIGD", 4) == 0) { just_mux = true; client_id = VGA_SWITCHEROO_IGD; } - if (strncmp(usercmd, "MDIS", 3) == 0) { + if (strncmp(usercmd, "MDIS", 4) == 0) { just_mux = true; client_id = VGA_SWITCHEROO_DIS; } -- cgit v1.2.3 From 187f3da3d98a1740d71e1f5d4fb3c68df09ecca8 Mon Sep 17 00:00:00 2001 From: Daniel Vetter Date: Sun, 28 Nov 2010 19:06:09 +0100 Subject: radeon: consolidate asic-specific function decls for pre-r600 Move them to radeon_asic.h together with the other asic specific stuff. Signed-off-by: Daniel Vetter Signed-off-by: Dave Airlie --- drivers/gpu/drm/radeon/radeon.h | 59 ------------------------------------ drivers/gpu/drm/radeon/radeon_asic.h | 53 ++++++++++++++++++++++++++++++-- 2 files changed, 50 insertions(+), 62 deletions(-) diff --git a/drivers/gpu/drm/radeon/radeon.h b/drivers/gpu/drm/radeon/radeon.h index da21105488da..e9486630a467 100644 --- a/drivers/gpu/drm/radeon/radeon.h +++ b/drivers/gpu/drm/radeon/radeon.h @@ -1449,65 +1449,6 @@ extern void radeon_gtt_location(struct radeon_device *rdev, struct radeon_mc *mc extern int radeon_resume_kms(struct drm_device *dev); extern int radeon_suspend_kms(struct drm_device *dev, pm_message_t state); -/* r100,rv100,rs100,rv200,rs200,r200,rv250,rs300,rv280 */ -extern void r100_gpu_lockup_update(struct r100_gpu_lockup *lockup, struct radeon_cp *cp); -extern bool r100_gpu_cp_is_lockup(struct radeon_device *rdev, struct r100_gpu_lockup *lockup, struct radeon_cp *cp); - -/* rv200,rv250,rv280 */ -extern void r200_set_safe_registers(struct radeon_device *rdev); - -/* r300,r350,rv350,rv370,rv380 */ -extern void r300_set_reg_safe(struct radeon_device *rdev); -extern void r300_mc_program(struct radeon_device *rdev); -extern void r300_mc_init(struct radeon_device *rdev); -extern void r300_clock_startup(struct radeon_device *rdev); -extern int r300_mc_wait_for_idle(struct radeon_device *rdev); -extern int rv370_pcie_gart_init(struct radeon_device *rdev); -extern void rv370_pcie_gart_fini(struct radeon_device *rdev); -extern int rv370_pcie_gart_enable(struct radeon_device *rdev); -extern void rv370_pcie_gart_disable(struct radeon_device *rdev); - -/* r420,r423,rv410 */ -extern u32 r420_mc_rreg(struct radeon_device *rdev, u32 reg); -extern void r420_mc_wreg(struct radeon_device *rdev, u32 reg, u32 v); -extern int r420_debugfs_pipes_info_init(struct radeon_device *rdev); -extern void r420_pipes_init(struct radeon_device *rdev); - -/* rv515 */ -struct rv515_mc_save { - u32 d1vga_control; - u32 d2vga_control; - u32 vga_render_control; - u32 vga_hdp_control; - u32 d1crtc_control; - u32 d2crtc_control; -}; -extern void rv515_bandwidth_avivo_update(struct radeon_device *rdev); -extern void rv515_vga_render_disable(struct radeon_device *rdev); -extern void rv515_set_safe_registers(struct radeon_device *rdev); -extern void rv515_mc_stop(struct radeon_device *rdev, struct rv515_mc_save *save); -extern void rv515_mc_resume(struct radeon_device *rdev, struct rv515_mc_save *save); -extern void rv515_clock_startup(struct radeon_device *rdev); -extern void rv515_debugfs(struct radeon_device *rdev); -extern int rv515_suspend(struct radeon_device *rdev); - -/* rs400 */ -extern int rs400_gart_init(struct radeon_device *rdev); -extern int rs400_gart_enable(struct radeon_device *rdev); -extern void rs400_gart_adjust_size(struct radeon_device *rdev); -extern void rs400_gart_disable(struct radeon_device *rdev); -extern void rs400_gart_fini(struct radeon_device *rdev); - -/* rs600 */ -extern void rs600_set_safe_registers(struct radeon_device *rdev); -extern int rs600_irq_set(struct radeon_device *rdev); -extern void rs600_irq_disable(struct radeon_device *rdev); - -/* rs690, rs740 */ -extern void rs690_line_buffer_adjust(struct radeon_device *rdev, - struct drm_display_mode *mode1, - struct drm_display_mode *mode2); - /* r600, rv610, rv630, rv620, rv635, rv670, rs780, rs880 */ extern bool r600_card_posted(struct radeon_device *rdev); extern void r600_cp_stop(struct radeon_device *rdev); diff --git a/drivers/gpu/drm/radeon/radeon_asic.h b/drivers/gpu/drm/radeon/radeon_asic.h index 9ac71b8d1b9d..e01f07718539 100644 --- a/drivers/gpu/drm/radeon/radeon_asic.h +++ b/drivers/gpu/drm/radeon/radeon_asic.h @@ -102,6 +102,11 @@ int r100_pci_gart_enable(struct radeon_device *rdev); void r100_pci_gart_disable(struct radeon_device *rdev); int r100_debugfs_mc_info_init(struct radeon_device *rdev); int r100_gui_wait_for_idle(struct radeon_device *rdev); +void r100_gpu_lockup_update(struct r100_gpu_lockup *lockup, + struct radeon_cp *cp); +bool r100_gpu_cp_is_lockup(struct radeon_device *rdev, + struct r100_gpu_lockup *lockup, + struct radeon_cp *cp); void r100_ib_fini(struct radeon_device *rdev); int r100_ib_init(struct radeon_device *rdev); void r100_irq_disable(struct radeon_device *rdev); @@ -138,10 +143,11 @@ extern void r100_post_page_flip(struct radeon_device *rdev, int crtc); * r200,rv250,rs300,rv280 */ extern int r200_copy_dma(struct radeon_device *rdev, - uint64_t src_offset, - uint64_t dst_offset, - unsigned num_pages, + uint64_t src_offset, + uint64_t dst_offset, + unsigned num_pages, struct radeon_fence *fence); +void r200_set_safe_registers(struct radeon_device *rdev); /* * r300,r350,rv350,rv380 @@ -162,6 +168,15 @@ extern uint32_t rv370_pcie_rreg(struct radeon_device *rdev, uint32_t reg); extern void rv370_pcie_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v); extern void rv370_set_pcie_lanes(struct radeon_device *rdev, int lanes); extern int rv370_get_pcie_lanes(struct radeon_device *rdev); +extern void r300_set_reg_safe(struct radeon_device *rdev); +extern void r300_mc_program(struct radeon_device *rdev); +extern void r300_mc_init(struct radeon_device *rdev); +extern void r300_clock_startup(struct radeon_device *rdev); +extern int r300_mc_wait_for_idle(struct radeon_device *rdev); +extern int rv370_pcie_gart_init(struct radeon_device *rdev); +extern void rv370_pcie_gart_fini(struct radeon_device *rdev); +extern int rv370_pcie_gart_enable(struct radeon_device *rdev); +extern void rv370_pcie_gart_disable(struct radeon_device *rdev); /* * r420,r423,rv410 @@ -171,6 +186,10 @@ extern void r420_fini(struct radeon_device *rdev); extern int r420_suspend(struct radeon_device *rdev); extern int r420_resume(struct radeon_device *rdev); extern void r420_pm_init_profile(struct radeon_device *rdev); +extern u32 r420_mc_rreg(struct radeon_device *rdev, u32 reg); +extern void r420_mc_wreg(struct radeon_device *rdev, u32 reg, u32 v); +extern int r420_debugfs_pipes_info_init(struct radeon_device *rdev); +extern void r420_pipes_init(struct radeon_device *rdev); /* * rs400,rs480 @@ -183,6 +202,12 @@ void rs400_gart_tlb_flush(struct radeon_device *rdev); int rs400_gart_set_page(struct radeon_device *rdev, int i, uint64_t addr); uint32_t rs400_mc_rreg(struct radeon_device *rdev, uint32_t reg); void rs400_mc_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v); +int rs400_gart_init(struct radeon_device *rdev); +int rs400_gart_enable(struct radeon_device *rdev); +void rs400_gart_adjust_size(struct radeon_device *rdev); +void rs400_gart_disable(struct radeon_device *rdev); +void rs400_gart_fini(struct radeon_device *rdev); + /* * rs600. @@ -194,6 +219,7 @@ extern int rs600_suspend(struct radeon_device *rdev); extern int rs600_resume(struct radeon_device *rdev); int rs600_irq_set(struct radeon_device *rdev); int rs600_irq_process(struct radeon_device *rdev); +void rs600_irq_disable(struct radeon_device *rdev); u32 rs600_get_vblank_counter(struct radeon_device *rdev, int crtc); void rs600_gart_tlb_flush(struct radeon_device *rdev); int rs600_gart_set_page(struct radeon_device *rdev, int i, uint64_t addr); @@ -211,6 +237,8 @@ extern void rs600_pm_finish(struct radeon_device *rdev); extern void rs600_pre_page_flip(struct radeon_device *rdev, int crtc); extern u32 rs600_page_flip(struct radeon_device *rdev, int crtc, u64 crtc_base); extern void rs600_post_page_flip(struct radeon_device *rdev, int crtc); +void rs600_set_safe_registers(struct radeon_device *rdev); + /* * rs690,rs740 @@ -222,10 +250,21 @@ int rs690_suspend(struct radeon_device *rdev); uint32_t rs690_mc_rreg(struct radeon_device *rdev, uint32_t reg); void rs690_mc_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v); void rs690_bandwidth_update(struct radeon_device *rdev); +void rs690_line_buffer_adjust(struct radeon_device *rdev, + struct drm_display_mode *mode1, + struct drm_display_mode *mode2); /* * rv515 */ +struct rv515_mc_save { + u32 d1vga_control; + u32 d2vga_control; + u32 vga_render_control; + u32 vga_hdp_control; + u32 d1crtc_control; + u32 d2crtc_control; +}; int rv515_init(struct radeon_device *rdev); void rv515_fini(struct radeon_device *rdev); uint32_t rv515_mc_rreg(struct radeon_device *rdev, uint32_t reg); @@ -236,6 +275,14 @@ void rv515_pcie_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v); void rv515_bandwidth_update(struct radeon_device *rdev); int rv515_resume(struct radeon_device *rdev); int rv515_suspend(struct radeon_device *rdev); +void rv515_bandwidth_avivo_update(struct radeon_device *rdev); +void rv515_vga_render_disable(struct radeon_device *rdev); +void rv515_set_safe_registers(struct radeon_device *rdev); +void rv515_mc_stop(struct radeon_device *rdev, struct rv515_mc_save *save); +void rv515_mc_resume(struct radeon_device *rdev, struct rv515_mc_save *save); +void rv515_clock_startup(struct radeon_device *rdev); +void rv515_debugfs(struct radeon_device *rdev); + /* * r520,rv530,rv560,rv570,r580 -- cgit v1.2.3 From 07cfe0e7a820ecad078c04e9c2a102521709145d Mon Sep 17 00:00:00 2001 From: Lucas Stach Date: Thu, 6 Jan 2011 20:29:53 +0100 Subject: drm/nouveau: fix hwmon device binding Bind the hwmon structs to nouveau device kobj. This makes sure the hwmon files are created in the device subdir in line with all other hwmon drivers. Signed-off-by: Lucas Stach Signed-off-by: Ben Skeggs --- drivers/gpu/drm/nouveau/nouveau_pm.c | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/drivers/gpu/drm/nouveau/nouveau_pm.c b/drivers/gpu/drm/nouveau/nouveau_pm.c index d93814160bcf..fb846a3fef15 100644 --- a/drivers/gpu/drm/nouveau/nouveau_pm.c +++ b/drivers/gpu/drm/nouveau/nouveau_pm.c @@ -422,8 +422,7 @@ nouveau_hwmon_init(struct drm_device *dev) return ret; } dev_set_drvdata(hwmon_dev, dev); - ret = sysfs_create_group(&hwmon_dev->kobj, - &hwmon_attrgroup); + ret = sysfs_create_group(&dev->pdev->dev.kobj, &hwmon_attrgroup); if (ret) { NV_ERROR(dev, "Unable to create hwmon sysfs file: %d\n", ret); -- cgit v1.2.3 From 0ba41e449fd0f45f5b29c1009020ab1b298bedda Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Sat, 8 Jan 2011 15:10:41 +0000 Subject: drm: Restore the old_fb upon modeset failure ... or else we may end up disabling the wrong framebuffer, leading to an OOPS, e.g: [ 6033.229012] kernel BUG at drivers/gpu/drm/i915/i915_gem.c:3271! [ 6033.229012] invalid opcode: 0000 [#1] SMP [ 6033.229012] last sysfs file: /sys/devices/virtual/backlight/acpi_video0/uevent [ 6033.229012] Modules linked in: sunrpc cpufreq_ondemand acpi_cpufreq mperf snd_hda_codec_analog snd_hda_intel snd_hda_codec snd_hwdep snd_seq snd_seq_device snd_pcm snd_timer thinkpad_acpi ppdev snd r852 sm_common iTCO_wdt uvcvideo i2c_i801 iTCO_vendor_support microcode wmi nand videodev nand_ids nand_ecc snd_page_alloc parport_pc parport mtd soundcore joydev v4l1_compat pcspkr uinput ipv6 sdhci_pci sdhci mmc_core yenta_socket i915 drm_kms_helper drm i2c_algo_bit i2c_core video output [last unloaded: scsi_wait_scan] [ 6033.229012] [ 6033.229012] Pid: 4834, comm: Xorg Not tainted 2.6.37-rc8+ #25 7661BL5/7661BL5 [ 6033.229012] EIP: 0060:[] EFLAGS: 00013246 CPU: 0 [ 6033.229012] EIP is at i915_gem_object_unpin+0x23/0x76 [i915] [ 6033.229012] EAX: f68a4000 EBX: f6831f00 ECX: 000600fa EDX: f68a8000 [ 6033.229012] ESI: f68a4014 EDI: f68a42b8 EBP: f2169c44 ESP: f2169c3c [ 6033.229012] DS: 007b ES: 007b FS: 00d8 GS: 00e0 SS: 0068 [ 6033.229012] Process Xorg (pid: 4834, ti=f2168000 task=f21c8000 task.ti=f2168000) [ 6033.229012] Stack: [ 6033.229012] f3a84800 f68a4014 f2169c54 f87045d8 f3a84800 f872d9a8 f2169c68 f7fd8091 [ 6033.229012] f3b952a4 00000000 f68a414c f2169cf0 f7fd9377 00000000 00000000 f7fd98b0 [ 6033.229012] f7fd9f4e 0000000f f7f328a0 00000000 00000000 00000000 f2169ca4 f68a414c [ 6033.229012] Call Trace: [ 6033.229012] [] ? intel_crtc_disable+0x36/0x41 [i915] [ 6033.229012] [] ? drm_helper_disable_unused_functions+0xcd/0xf9 [drm_kms_helper] [ 6033.229012] [] ? drm_crtc_helper_set_config+0x62a/0x7f7 [drm_kms_helper] [ 6033.229012] [] ? __slab_free+0x1b/0xa4 [ 6033.229012] [] ? drm_fb_helper_initial_config+0x466/0x497 [drm_kms_helper] [ 6033.229012] [] ? drm_fb_helper_restore+0x10/0x2a [drm_kms_helper] [ 6033.229012] [] ? i915_driver_lastclose+0x2a/0x57 [i915] [ 6033.229012] [] ? drm_lastclose+0x45/0x23e [drm] [ 6033.229012] [] ? drm_release+0x462/0x4d7 [drm] Signed-off-by: Chris Wilson Cc: stable@kernel.org Signed-off-by: Dave Airlie --- drivers/gpu/drm/drm_crtc_helper.c | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/drivers/gpu/drm/drm_crtc_helper.c b/drivers/gpu/drm/drm_crtc_helper.c index 8de333aeeca3..952b3d4fb2a6 100644 --- a/drivers/gpu/drm/drm_crtc_helper.c +++ b/drivers/gpu/drm/drm_crtc_helper.c @@ -661,6 +661,7 @@ int drm_crtc_helper_set_config(struct drm_mode_set *set) old_fb)) { DRM_ERROR("failed to set mode on [CRTC:%d]\n", set->crtc->base.id); + set->crtc->fb = old_fb; ret = -EINVAL; goto fail; } @@ -675,8 +676,10 @@ int drm_crtc_helper_set_config(struct drm_mode_set *set) set->crtc->fb = set->fb; ret = crtc_funcs->mode_set_base(set->crtc, set->x, set->y, old_fb); - if (ret != 0) + if (ret != 0) { + set->crtc->fb = old_fb; goto fail; + } } DRM_DEBUG_KMS("Setting connector DPMS state to on\n"); for (i = 0; i < set->num_connectors; i++) { -- cgit v1.2.3 From c268cd36cc8fd543f58aee0392c78bb92e54edcb Mon Sep 17 00:00:00 2001 From: Alex Deucher Date: Fri, 17 Dec 2010 10:27:25 -0500 Subject: drm/radeon/kms: only enable hdmi features if the monitor supports audio This avoids some issues in the current hdmi audio code for monitors without audio. Signed-off-by: Alex Deucher Signed-off-by: Dave Airlie --- drivers/gpu/drm/radeon/radeon_encoders.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/drivers/gpu/drm/radeon/radeon_encoders.c b/drivers/gpu/drm/radeon/radeon_encoders.c index e7a948f60587..8fd184286c0b 100644 --- a/drivers/gpu/drm/radeon/radeon_encoders.c +++ b/drivers/gpu/drm/radeon/radeon_encoders.c @@ -641,7 +641,7 @@ atombios_get_encoder_mode(struct drm_encoder *encoder) switch (connector->connector_type) { case DRM_MODE_CONNECTOR_DVII: case DRM_MODE_CONNECTOR_HDMIB: /* HDMI-B is basically DL-DVI; analog works fine */ - if (drm_detect_hdmi_monitor(radeon_connector->edid)) { + if (drm_detect_monitor_audio(radeon_connector->edid)) { /* fix me */ if (ASIC_IS_DCE4(rdev)) return ATOM_ENCODER_MODE_DVI; @@ -655,7 +655,7 @@ atombios_get_encoder_mode(struct drm_encoder *encoder) case DRM_MODE_CONNECTOR_DVID: case DRM_MODE_CONNECTOR_HDMIA: default: - if (drm_detect_hdmi_monitor(radeon_connector->edid)) { + if (drm_detect_monitor_audio(radeon_connector->edid)) { /* fix me */ if (ASIC_IS_DCE4(rdev)) return ATOM_ENCODER_MODE_DVI; @@ -673,7 +673,7 @@ atombios_get_encoder_mode(struct drm_encoder *encoder) if ((dig_connector->dp_sink_type == CONNECTOR_OBJECT_ID_DISPLAYPORT) || (dig_connector->dp_sink_type == CONNECTOR_OBJECT_ID_eDP)) return ATOM_ENCODER_MODE_DP; - else if (drm_detect_hdmi_monitor(radeon_connector->edid)) { + else if (drm_detect_monitor_audio(radeon_connector->edid)) { /* fix me */ if (ASIC_IS_DCE4(rdev)) return ATOM_ENCODER_MODE_DVI; -- cgit v1.2.3 From 56bec7c009872ef33fe452ea75fecba481351b44 Mon Sep 17 00:00:00 2001 From: Alex Deucher Date: Wed, 5 Jan 2011 11:57:52 -0500 Subject: drm/radeon/kms: disable underscan by default Lots of HDMI TVs overscan the incoming image by default. The underscan option was added as a way to compensate for that by underscanning the image so that the edges would not be cut off on an overscanning TV. However, the TV provides no way of knowing whether it is overscanning or not. If the user has disabled overscan on their TV or has a TV that does not overscan, you will get black bars around the edges of your screen. Prior to the patch we got complaints and bug reports from users with overscanning TVs, now with the patch, we get lots of complaints and bug reports from users with non-overscanning TVs. There's no good default, but on average there have been more users complaining about it being on by default than off, so change it to off. This will probably result in a new deluge of overscanning TV user complaints. There's no winning. Signed-off-by: Alex Deucher Signed-off-by: Dave Airlie --- drivers/gpu/drm/radeon/radeon_connectors.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/drivers/gpu/drm/radeon/radeon_connectors.c b/drivers/gpu/drm/radeon/radeon_connectors.c index 5b00f92a50a2..22b7e3dc0eca 100644 --- a/drivers/gpu/drm/radeon/radeon_connectors.c +++ b/drivers/gpu/drm/radeon/radeon_connectors.c @@ -1219,7 +1219,7 @@ radeon_add_atom_connector(struct drm_device *dev, if (ASIC_IS_AVIVO(rdev)) { drm_connector_attach_property(&radeon_connector->base, rdev->mode_info.underscan_property, - UNDERSCAN_AUTO); + UNDERSCAN_OFF); drm_connector_attach_property(&radeon_connector->base, rdev->mode_info.underscan_hborder_property, 0); @@ -1259,7 +1259,7 @@ radeon_add_atom_connector(struct drm_device *dev, if (ASIC_IS_AVIVO(rdev)) { drm_connector_attach_property(&radeon_connector->base, rdev->mode_info.underscan_property, - UNDERSCAN_AUTO); + UNDERSCAN_OFF); drm_connector_attach_property(&radeon_connector->base, rdev->mode_info.underscan_hborder_property, 0); @@ -1302,7 +1302,7 @@ radeon_add_atom_connector(struct drm_device *dev, if (ASIC_IS_AVIVO(rdev)) { drm_connector_attach_property(&radeon_connector->base, rdev->mode_info.underscan_property, - UNDERSCAN_AUTO); + UNDERSCAN_OFF); drm_connector_attach_property(&radeon_connector->base, rdev->mode_info.underscan_hborder_property, 0); -- cgit v1.2.3