diff options
author | Daniel Vetter <daniel.vetter@ffwll.ch> | 2012-06-01 10:49:16 +0200 |
---|---|---|
committer | Daniel Vetter <daniel.vetter@ffwll.ch> | 2012-06-01 10:52:54 +0200 |
commit | e269f90f3d3f7c70cf661c660bf445597261f54a (patch) | |
tree | f09685517e9efa7d675dad8fd13694157ce50521 /drivers/gpu/drm/i915/i915_irq.c | |
parent | drm/i915: simplify sysfs setup code (diff) | |
parent | radeon: add radeon prime vmap support. (diff) | |
download | linux-e269f90f3d3f7c70cf661c660bf445597261f54a.tar.xz linux-e269f90f3d3f7c70cf661c660bf445597261f54a.zip |
Merge remote-tracking branch 'airlied/drm-prime-vmap' into drm-intel-next-queued
We need the latest dma-buf code from Dave Airlie so that we can pimp
the backing storage handling code in drm/i915 with Chris Wilson's
unbound tracking and stolen mem backed gem object code.
Signed-Off-by: Daniel Vetter <daniel.vetter@ffwll.ch>
Diffstat (limited to 'drivers/gpu/drm/i915/i915_irq.c')
-rw-r--r-- | drivers/gpu/drm/i915/i915_irq.c | 37 |
1 files changed, 7 insertions, 30 deletions
diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c index 6553dcc2ca79..0e876646d769 100644 --- a/drivers/gpu/drm/i915/i915_irq.c +++ b/drivers/gpu/drm/i915/i915_irq.c @@ -350,8 +350,8 @@ static void gen6_pm_rps_work(struct work_struct *work) { drm_i915_private_t *dev_priv = container_of(work, drm_i915_private_t, rps_work); - u8 new_delay = dev_priv->cur_delay; u32 pm_iir, pm_imr; + u8 new_delay; spin_lock_irq(&dev_priv->rps_lock); pm_iir = dev_priv->pm_iir; @@ -360,41 +360,18 @@ static void gen6_pm_rps_work(struct work_struct *work) I915_WRITE(GEN6_PMIMR, 0); spin_unlock_irq(&dev_priv->rps_lock); - if (!pm_iir) + if ((pm_iir & GEN6_PM_DEFERRED_EVENTS) == 0) return; mutex_lock(&dev_priv->dev->struct_mutex); - if (pm_iir & GEN6_PM_RP_UP_THRESHOLD) { - if (dev_priv->cur_delay != dev_priv->max_delay) - new_delay = dev_priv->cur_delay + 1; - if (new_delay > dev_priv->max_delay) - new_delay = dev_priv->max_delay; - } else if (pm_iir & (GEN6_PM_RP_DOWN_THRESHOLD | GEN6_PM_RP_DOWN_TIMEOUT)) { - gen6_gt_force_wake_get(dev_priv); - if (dev_priv->cur_delay != dev_priv->min_delay) - new_delay = dev_priv->cur_delay - 1; - if (new_delay < dev_priv->min_delay) { - new_delay = dev_priv->min_delay; - I915_WRITE(GEN6_RP_INTERRUPT_LIMITS, - I915_READ(GEN6_RP_INTERRUPT_LIMITS) | - ((new_delay << 16) & 0x3f0000)); - } else { - /* Make sure we continue to get down interrupts - * until we hit the minimum frequency */ - I915_WRITE(GEN6_RP_INTERRUPT_LIMITS, - I915_READ(GEN6_RP_INTERRUPT_LIMITS) & ~0x3f0000); - } - gen6_gt_force_wake_put(dev_priv); - } + + if (pm_iir & GEN6_PM_RP_UP_THRESHOLD) + new_delay = dev_priv->cur_delay + 1; + else + new_delay = dev_priv->cur_delay - 1; gen6_set_rps(dev_priv->dev, new_delay); - dev_priv->cur_delay = new_delay; - /* - * rps_lock not held here because clearing is non-destructive. There is - * an *extremely* unlikely race with gen6_rps_enable() that is prevented - * by holding struct_mutex for the duration of the write. - */ mutex_unlock(&dev_priv->dev->struct_mutex); } |