diff options
Diffstat (limited to 'drivers/gpu/drm/i915')
-rw-r--r-- | drivers/gpu/drm/i915/i915_dma.c | 9 | ||||
-rw-r--r-- | drivers/gpu/drm/i915/i915_drv.c | 33 | ||||
-rw-r--r-- | drivers/gpu/drm/i915/i915_drv.h | 13 | ||||
-rw-r--r-- | drivers/gpu/drm/i915/i915_gem.c | 11 | ||||
-rw-r--r-- | drivers/gpu/drm/i915/i915_gem_userptr.c | 409 | ||||
-rw-r--r-- | drivers/gpu/drm/i915/i915_irq.c | 33 | ||||
-rw-r--r-- | drivers/gpu/drm/i915/i915_reg.h | 12 | ||||
-rw-r--r-- | drivers/gpu/drm/i915/intel_bios.c | 2 | ||||
-rw-r--r-- | drivers/gpu/drm/i915/intel_crt.c | 9 | ||||
-rw-r--r-- | drivers/gpu/drm/i915/intel_display.c | 73 | ||||
-rw-r--r-- | drivers/gpu/drm/i915/intel_dp.c | 92 | ||||
-rw-r--r-- | drivers/gpu/drm/i915/intel_drv.h | 9 | ||||
-rw-r--r-- | drivers/gpu/drm/i915/intel_hdmi.c | 7 | ||||
-rw-r--r-- | drivers/gpu/drm/i915/intel_lvds.c | 2 | ||||
-rw-r--r-- | drivers/gpu/drm/i915/intel_panel.c | 8 | ||||
-rw-r--r-- | drivers/gpu/drm/i915/intel_ringbuffer.c | 66 | ||||
-rw-r--r-- | drivers/gpu/drm/i915/intel_tv.c | 21 |
17 files changed, 500 insertions, 309 deletions
diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c index 2e7f03ad5ee2..9933c26017ed 100644 --- a/drivers/gpu/drm/i915/i915_dma.c +++ b/drivers/gpu/drm/i915/i915_dma.c @@ -1336,12 +1336,17 @@ static int i915_load_modeset_init(struct drm_device *dev) intel_power_domains_init_hw(dev_priv); + /* + * We enable some interrupt sources in our postinstall hooks, so mark + * interrupts as enabled _before_ actually enabling them to avoid + * special cases in our ordering checks. + */ + dev_priv->pm._irqs_disabled = false; + ret = drm_irq_install(dev, dev->pdev->irq); if (ret) goto cleanup_gem_stolen; - dev_priv->pm._irqs_disabled = false; - /* Important: The output setup functions called by modeset_init need * working irqs for e.g. gmbus and dp aux transfers. */ intel_modeset_init(dev); diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c index ec96f9a9724c..e27cdbe9d524 100644 --- a/drivers/gpu/drm/i915/i915_drv.c +++ b/drivers/gpu/drm/i915/i915_drv.c @@ -494,6 +494,36 @@ bool i915_semaphore_is_enabled(struct drm_device *dev) return true; } +void intel_hpd_cancel_work(struct drm_i915_private *dev_priv) +{ + spin_lock_irq(&dev_priv->irq_lock); + + dev_priv->long_hpd_port_mask = 0; + dev_priv->short_hpd_port_mask = 0; + dev_priv->hpd_event_bits = 0; + + spin_unlock_irq(&dev_priv->irq_lock); + + cancel_work_sync(&dev_priv->dig_port_work); + cancel_work_sync(&dev_priv->hotplug_work); + cancel_delayed_work_sync(&dev_priv->hotplug_reenable_work); +} + +static void intel_suspend_encoders(struct drm_i915_private *dev_priv) +{ + struct drm_device *dev = dev_priv->dev; + struct drm_encoder *encoder; + + drm_modeset_lock_all(dev); + list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) { + struct intel_encoder *intel_encoder = to_intel_encoder(encoder); + + if (intel_encoder->suspend) + intel_encoder->suspend(intel_encoder); + } + drm_modeset_unlock_all(dev); +} + static int i915_drm_freeze(struct drm_device *dev) { struct drm_i915_private *dev_priv = dev->dev_private; @@ -538,6 +568,9 @@ static int i915_drm_freeze(struct drm_device *dev) flush_delayed_work(&dev_priv->rps.delayed_resume_work); intel_runtime_pm_disable_interrupts(dev); + intel_hpd_cancel_work(dev_priv); + + intel_suspend_encoders(dev_priv); intel_suspend_gt_powersave(dev); diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index 4412f6a4383b..3524306d8cfb 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h @@ -184,6 +184,7 @@ enum hpd_pin { if ((1 << (domain)) & (mask)) struct drm_i915_private; +struct i915_mm_struct; struct i915_mmu_object; enum intel_dpll_id { @@ -1458,7 +1459,7 @@ struct drm_i915_private { } hpd_mark; } hpd_stats[HPD_NUM_PINS]; u32 hpd_event_bits; - struct timer_list hotplug_reenable_timer; + struct delayed_work hotplug_reenable_work; struct i915_fbc fbc; struct i915_drrs drrs; @@ -1506,9 +1507,8 @@ struct drm_i915_private { struct i915_gtt gtt; /* VM representing the global address space */ struct i915_gem_mm mm; -#if defined(CONFIG_MMU_NOTIFIER) - DECLARE_HASHTABLE(mmu_notifiers, 7); -#endif + DECLARE_HASHTABLE(mm_structs, 7); + struct mutex mm_lock; /* Kernel Modesetting */ @@ -1814,8 +1814,8 @@ struct drm_i915_gem_object { unsigned workers :4; #define I915_GEM_USERPTR_MAX_WORKERS 15 - struct mm_struct *mm; - struct i915_mmu_object *mn; + struct i915_mm_struct *mm; + struct i915_mmu_object *mmu_object; struct work_struct *work; } userptr; }; @@ -2178,6 +2178,7 @@ extern unsigned long i915_mch_val(struct drm_i915_private *dev_priv); extern unsigned long i915_gfx_val(struct drm_i915_private *dev_priv); extern void i915_update_gfx_val(struct drm_i915_private *dev_priv); int vlv_force_gfx_clock(struct drm_i915_private *dev_priv, bool on); +void intel_hpd_cancel_work(struct drm_i915_private *dev_priv); extern void intel_console_resume(struct work_struct *work); diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index ba7f5c6bb50d..ad55b06a3cb1 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c @@ -1590,10 +1590,13 @@ unlock: out: switch (ret) { case -EIO: - /* If this -EIO is due to a gpu hang, give the reset code a - * chance to clean up the mess. Otherwise return the proper - * SIGBUS. */ - if (i915_terminally_wedged(&dev_priv->gpu_error)) { + /* + * We eat errors when the gpu is terminally wedged to avoid + * userspace unduly crashing (gl has no provisions for mmaps to + * fail). But any other -EIO isn't ours (e.g. swap in failure) + * and so needs to be reported. + */ + if (!i915_terminally_wedged(&dev_priv->gpu_error)) { ret = VM_FAULT_SIGBUS; break; } diff --git a/drivers/gpu/drm/i915/i915_gem_userptr.c b/drivers/gpu/drm/i915/i915_gem_userptr.c index fe69fc837d9e..d38413997379 100644 --- a/drivers/gpu/drm/i915/i915_gem_userptr.c +++ b/drivers/gpu/drm/i915/i915_gem_userptr.c @@ -32,6 +32,15 @@ #include <linux/mempolicy.h> #include <linux/swap.h> +struct i915_mm_struct { + struct mm_struct *mm; + struct drm_device *dev; + struct i915_mmu_notifier *mn; + struct hlist_node node; + struct kref kref; + struct work_struct work; +}; + #if defined(CONFIG_MMU_NOTIFIER) #include <linux/interval_tree.h> @@ -41,16 +50,12 @@ struct i915_mmu_notifier { struct mmu_notifier mn; struct rb_root objects; struct list_head linear; - struct drm_device *dev; - struct mm_struct *mm; - struct work_struct work; - unsigned long count; unsigned long serial; bool has_linear; }; struct i915_mmu_object { - struct i915_mmu_notifier *mmu; + struct i915_mmu_notifier *mn; struct interval_tree_node it; struct list_head link; struct drm_i915_gem_object *obj; @@ -96,18 +101,18 @@ static void *invalidate_range__linear(struct i915_mmu_notifier *mn, unsigned long start, unsigned long end) { - struct i915_mmu_object *mmu; + struct i915_mmu_object *mo; unsigned long serial; restart: serial = mn->serial; - list_for_each_entry(mmu, &mn->linear, link) { + list_for_each_entry(mo, &mn->linear, link) { struct drm_i915_gem_object *obj; - if (mmu->it.last < start || mmu->it.start > end) + if (mo->it.last < start || mo->it.start > end) continue; - obj = mmu->obj; + obj = mo->obj; drm_gem_object_reference(&obj->base); spin_unlock(&mn->lock); @@ -160,130 +165,47 @@ static const struct mmu_notifier_ops i915_gem_userptr_notifier = { }; static struct i915_mmu_notifier * -__i915_mmu_notifier_lookup(struct drm_device *dev, struct mm_struct *mm) -{ - struct drm_i915_private *dev_priv = to_i915(dev); - struct i915_mmu_notifier *mmu; - - /* Protected by dev->struct_mutex */ - hash_for_each_possible(dev_priv->mmu_notifiers, mmu, node, (unsigned long)mm) - if (mmu->mm == mm) - return mmu; - - return NULL; -} - -static struct i915_mmu_notifier * -i915_mmu_notifier_get(struct drm_device *dev, struct mm_struct *mm) +i915_mmu_notifier_create(struct mm_struct *mm) { - struct drm_i915_private *dev_priv = to_i915(dev); - struct i915_mmu_notifier *mmu; + struct i915_mmu_notifier *mn; int ret; - lockdep_assert_held(&dev->struct_mutex); - - mmu = __i915_mmu_notifier_lookup(dev, mm); - if (mmu) - return mmu; - - mmu = kmalloc(sizeof(*mmu), GFP_KERNEL); - if (mmu == NULL) + mn = kmalloc(sizeof(*mn), GFP_KERNEL); + if (mn == NULL) return ERR_PTR(-ENOMEM); - spin_lock_init(&mmu->lock); - mmu->dev = dev; - mmu->mn.ops = &i915_gem_userptr_notifier; - mmu->mm = mm; - mmu->objects = RB_ROOT; - mmu->count = 0; - mmu->serial = 1; - INIT_LIST_HEAD(&mmu->linear); - mmu->has_linear = false; - - /* Protected by mmap_sem (write-lock) */ - ret = __mmu_notifier_register(&mmu->mn, mm); + spin_lock_init(&mn->lock); + mn->mn.ops = &i915_gem_userptr_notifier; + mn->objects = RB_ROOT; + mn->serial = 1; + INIT_LIST_HEAD(&mn->linear); + mn->has_linear = false; + + /* Protected by mmap_sem (write-lock) */ + ret = __mmu_notifier_register(&mn->mn, mm); if (ret) { - kfree(mmu); + kfree(mn); return ERR_PTR(ret); } - /* Protected by dev->struct_mutex */ - hash_add(dev_priv->mmu_notifiers, &mmu->node, (unsigned long)mm); - return mmu; + return mn; } -static void -__i915_mmu_notifier_destroy_worker(struct work_struct *work) +static void __i915_mmu_notifier_update_serial(struct i915_mmu_notifier *mn) { - struct i915_mmu_notifier *mmu = container_of(work, typeof(*mmu), work); - mmu_notifier_unregister(&mmu->mn, mmu->mm); - kfree(mmu); -} - -static void -__i915_mmu_notifier_destroy(struct i915_mmu_notifier *mmu) -{ - lockdep_assert_held(&mmu->dev->struct_mutex); - - /* Protected by dev->struct_mutex */ - hash_del(&mmu->node); - - /* Our lock ordering is: mmap_sem, mmu_notifier_scru, struct_mutex. - * We enter the function holding struct_mutex, therefore we need - * to drop our mutex prior to calling mmu_notifier_unregister in - * order to prevent lock inversion (and system-wide deadlock) - * between the mmap_sem and struct-mutex. Hence we defer the - * unregistration to a workqueue where we hold no locks. - */ - INIT_WORK(&mmu->work, __i915_mmu_notifier_destroy_worker); - schedule_work(&mmu->work); -} - -static void __i915_mmu_notifier_update_serial(struct i915_mmu_notifier *mmu) -{ - if (++mmu->serial == 0) - mmu->serial = 1; -} - -static bool i915_mmu_notifier_has_linear(struct i915_mmu_notifier *mmu) -{ - struct i915_mmu_object *mn; - - list_for_each_entry(mn, &mmu->linear, link) - if (mn->is_linear) - return true; - - return false; -} - -static void -i915_mmu_notifier_del(struct i915_mmu_notifier *mmu, - struct i915_mmu_object *mn) -{ - lockdep_assert_held(&mmu->dev->struct_mutex); - - spin_lock(&mmu->lock); - list_del(&mn->link); - if (mn->is_linear) - mmu->has_linear = i915_mmu_notifier_has_linear(mmu); - else - interval_tree_remove(&mn->it, &mmu->objects); - __i915_mmu_notifier_update_serial(mmu); - spin_unlock(&mmu->lock); - - /* Protected against _add() by dev->struct_mutex */ - if (--mmu->count == 0) - __i915_mmu_notifier_destroy(mmu); + if (++mn->serial == 0) + mn->serial = 1; } static int -i915_mmu_notifier_add(struct i915_mmu_notifier *mmu, - struct i915_mmu_object *mn) +i915_mmu_notifier_add(struct drm_device *dev, + struct i915_mmu_notifier *mn, + struct i915_mmu_object *mo) { struct interval_tree_node *it; int ret; - ret = i915_mutex_lock_interruptible(mmu->dev); + ret = i915_mutex_lock_interruptible(dev); if (ret) return ret; @@ -291,11 +213,11 @@ i915_mmu_notifier_add(struct i915_mmu_notifier *mmu, * remove the objects from the interval tree) before we do * the check for overlapping objects. */ - i915_gem_retire_requests(mmu->dev); + i915_gem_retire_requests(dev); - spin_lock(&mmu->lock); - it = interval_tree_iter_first(&mmu->objects, - mn->it.start, mn->it.last); + spin_lock(&mn->lock); + it = interval_tree_iter_first(&mn->objects, + mo->it.start, mo->it.last); if (it) { struct drm_i915_gem_object *obj; @@ -312,86 +234,122 @@ i915_mmu_notifier_add(struct i915_mmu_notifier *mmu, obj = container_of(it, struct i915_mmu_object, it)->obj; if (!obj->userptr.workers) - mmu->has_linear = mn->is_linear = true; + mn->has_linear = mo->is_linear = true; else ret = -EAGAIN; } else - interval_tree_insert(&mn->it, &mmu->objects); + interval_tree_insert(&mo->it, &mn->objects); if (ret == 0) { - list_add(&mn->link, &mmu->linear); - __i915_mmu_notifier_update_serial(mmu); + list_add(&mo->link, &mn->linear); + __i915_mmu_notifier_update_serial(mn); } - spin_unlock(&mmu->lock); - mutex_unlock(&mmu->dev->struct_mutex); + spin_unlock(&mn->lock); + mutex_unlock(&dev->struct_mutex); return ret; } +static bool i915_mmu_notifier_has_linear(struct i915_mmu_notifier *mn) +{ + struct i915_mmu_object *mo; + + list_for_each_entry(mo, &mn->linear, link) + if (mo->is_linear) + return true; + + return false; +} + +static void +i915_mmu_notifier_del(struct i915_mmu_notifier *mn, + struct i915_mmu_object *mo) +{ + spin_lock(&mn->lock); + list_del(&mo->link); + if (mo->is_linear) + mn->has_linear = i915_mmu_notifier_has_linear(mn); + else + interval_tree_remove(&mo->it, &mn->objects); + __i915_mmu_notifier_update_serial(mn); + spin_unlock(&mn->lock); +} + static void i915_gem_userptr_release__mmu_notifier(struct drm_i915_gem_object *obj) { - struct i915_mmu_object *mn; + struct i915_mmu_object *mo; - mn = obj->userptr.mn; - if (mn == NULL) + mo = obj->userptr.mmu_object; + if (mo == NULL) return; - i915_mmu_notifier_del(mn->mmu, mn); - obj->userptr.mn = NULL; + i915_mmu_notifier_del(mo->mn, mo); + kfree(mo); + + obj->userptr.mmu_object = NULL; +} + +static struct i915_mmu_notifier * +i915_mmu_notifier_find(struct i915_mm_struct *mm) +{ + if (mm->mn == NULL) { + down_write(&mm->mm->mmap_sem); + mutex_lock(&to_i915(mm->dev)->mm_lock); + if (mm->mn == NULL) + mm->mn = i915_mmu_notifier_create(mm->mm); + mutex_unlock(&to_i915(mm->dev)->mm_lock); + up_write(&mm->mm->mmap_sem); + } + return mm->mn; } static int i915_gem_userptr_init__mmu_notifier(struct drm_i915_gem_object *obj, unsigned flags) { - struct i915_mmu_notifier *mmu; - struct i915_mmu_object *mn; + struct i915_mmu_notifier *mn; + struct i915_mmu_object *mo; int ret; if (flags & I915_USERPTR_UNSYNCHRONIZED) return capable(CAP_SYS_ADMIN) ? 0 : -EPERM; - down_write(&obj->userptr.mm->mmap_sem); - ret = i915_mutex_lock_interruptible(obj->base.dev); - if (ret == 0) { - mmu = i915_mmu_notifier_get(obj->base.dev, obj->userptr.mm); - if (!IS_ERR(mmu)) - mmu->count++; /* preemptive add to act as a refcount */ - else - ret = PTR_ERR(mmu); - mutex_unlock(&obj->base.dev->struct_mutex); - } - up_write(&obj->userptr.mm->mmap_sem); - if (ret) - return ret; + if (WARN_ON(obj->userptr.mm == NULL)) + return -EINVAL; - mn = kzalloc(sizeof(*mn), GFP_KERNEL); - if (mn == NULL) { - ret = -ENOMEM; - goto destroy_mmu; - } + mn = i915_mmu_notifier_find(obj->userptr.mm); + if (IS_ERR(mn)) + return PTR_ERR(mn); - mn->mmu = mmu; - mn->it.start = obj->userptr.ptr; - mn->it.last = mn->it.start + obj->base.size - 1; - mn->obj = obj; + mo = kzalloc(sizeof(*mo), GFP_KERNEL); + if (mo == NULL) + return -ENOMEM; - ret = i915_mmu_notifier_add(mmu, mn); - if (ret) - goto free_mn; + mo->mn = mn; + mo->it.start = obj->userptr.ptr; + mo->it.last = mo->it.start + obj->base.size - 1; + mo->obj = obj; - obj->userptr.mn = mn; + ret = i915_mmu_notifier_add(obj->base.dev, mn, mo); + if (ret) { + kfree(mo); + return ret; + } + + obj->userptr.mmu_object = mo; return 0; +} + +static void +i915_mmu_notifier_free(struct i915_mmu_notifier *mn, + struct mm_struct *mm) +{ + if (mn == NULL) + return; -free_mn: + mmu_notifier_unregister(&mn->mn, mm); kfree(mn); -destroy_mmu: - mutex_lock(&obj->base.dev->struct_mutex); - if (--mmu->count == 0) - __i915_mmu_notifier_destroy(mmu); - mutex_unlock(&obj->base.dev->struct_mutex); - return ret; } #else @@ -413,15 +371,114 @@ i915_gem_userptr_init__mmu_notifier(struct drm_i915_gem_object *obj, return 0; } + +static void +i915_mmu_notifier_free(struct i915_mmu_notifier *mn, + struct mm_struct *mm) +{ +} + #endif +static struct i915_mm_struct * +__i915_mm_struct_find(struct drm_i915_private *dev_priv, struct mm_struct *real) +{ + struct i915_mm_struct *mm; + + /* Protected by dev_priv->mm_lock */ + hash_for_each_possible(dev_priv->mm_structs, mm, node, (unsigned long)real) + if (mm->mm == real) + return mm; + + return NULL; +} + +static int +i915_gem_userptr_init__mm_struct(struct drm_i915_gem_object *obj) +{ + struct drm_i915_private *dev_priv = to_i915(obj->base.dev); + struct i915_mm_struct *mm; + int ret = 0; + + /* During release of the GEM object we hold the struct_mutex. This + * precludes us from calling mmput() at that time as that may be + * the last reference and so call exit_mmap(). exit_mmap() will + * attempt to reap the vma, and if we were holding a GTT mmap + * would then call drm_gem_vm_close() and attempt to reacquire + * the struct mutex. So in order to avoid that recursion, we have + * to defer releasing the mm reference until after we drop the + * struct_mutex, i.e. we need to schedule a worker to do the clean + * up. + */ + mutex_lock(&dev_priv->mm_lock); + mm = __i915_mm_struct_find(dev_priv, current->mm); + if (mm == NULL) { + mm = kmalloc(sizeof(*mm), GFP_KERNEL); + if (mm == NULL) { + ret = -ENOMEM; + goto out; + } + + kref_init(&mm->kref); + mm->dev = obj->base.dev; + + mm->mm = current->mm; + atomic_inc(¤t->mm->mm_count); + + mm->mn = NULL; + + /* Protected by dev_priv->mm_lock */ + hash_add(dev_priv->mm_structs, + &mm->node, (unsigned long)mm->mm); + } else + kref_get(&mm->kref); + + obj->userptr.mm = mm; +out: + mutex_unlock(&dev_priv->mm_lock); + return ret; +} + +static void +__i915_mm_struct_free__worker(struct work_struct *work) +{ + struct i915_mm_struct *mm = container_of(work, typeof(*mm), work); + i915_mmu_notifier_free(mm->mn, mm->mm); + mmdrop(mm->mm); + kfree(mm); +} + +static void +__i915_mm_struct_free(struct kref *kref) +{ + struct i915_mm_struct *mm = container_of(kref, typeof(*mm), kref); + + /* Protected by dev_priv->mm_lock */ + hash_del(&mm->node); + mutex_unlock(&to_i915(mm->dev)->mm_lock); + + INIT_WORK(&mm->work, __i915_mm_struct_free__worker); + schedule_work(&mm->work); +} + +static void +i915_gem_userptr_release__mm_struct(struct drm_i915_gem_object *obj) +{ + if (obj->userptr.mm == NULL) + return; + + kref_put_mutex(&obj->userptr.mm->kref, + __i915_mm_struct_free, + &to_i915(obj->base.dev)->mm_lock); + obj->userptr.mm = NULL; +} + struct get_pages_work { struct work_struct work; struct drm_i915_gem_object *obj; struct task_struct *task; }; - #if IS_ENABLED(CONFIG_SWIOTLB) #define swiotlb_active() swiotlb_nr_tbl() #else @@ -479,7 +536,7 @@ __i915_gem_userptr_get_pages_worker(struct work_struct *_work) if (pvec == NULL) pvec = drm_malloc_ab(num_pages, sizeof(struct page *)); if (pvec != NULL) { - struct mm_struct *mm = obj->userptr.mm; + struct mm_struct *mm = obj->userptr.mm->mm; down_read(&mm->mmap_sem); while (pinned < num_pages) { @@ -545,7 +602,7 @@ i915_gem_userptr_get_pages(struct drm_i915_gem_object *obj) pvec = NULL; pinned = 0; - if (obj->userptr.mm == current->mm) { + if (obj->userptr.mm->mm == current->mm) { pvec = kmalloc(num_pages*sizeof(struct page *), GFP_TEMPORARY | __GFP_NOWARN | __GFP_NORETRY); if (pvec == NULL) { @@ -651,17 +708,13 @@ static void i915_gem_userptr_release(struct drm_i915_gem_object *obj) { i915_gem_userptr_release__mmu_notifier(obj); - - if (obj->userptr.mm) { - mmput(obj->userptr.mm); - obj->userptr.mm = NULL; - } + i915_gem_userptr_release__mm_struct(obj); } static int i915_gem_userptr_dmabuf_export(struct drm_i915_gem_object *obj) { - if (obj->userptr.mn) + if (obj->userptr.mmu_object) return 0; return i915_gem_userptr_init__mmu_notifier(obj, 0); @@ -736,7 +789,6 @@ i915_gem_userptr_ioctl(struct drm_device *dev, void *data, struct drm_file *file return -ENODEV; } - /* Allocate the new object */ obj = i915_gem_object_alloc(dev); if (obj == NULL) return -ENOMEM; @@ -754,8 +806,8 @@ i915_gem_userptr_ioctl(struct drm_device *dev, void *data, struct drm_file *file * at binding. This means that we need to hook into the mmu_notifier * in order to detect if the mmu is destroyed. */ - ret = -ENOMEM; - if ((obj->userptr.mm = get_task_mm(current))) + ret = i915_gem_userptr_init__mm_struct(obj); + if (ret == 0) ret = i915_gem_userptr_init__mmu_notifier(obj, args->flags); if (ret == 0) ret = drm_gem_handle_create(file, &obj->base, &handle); @@ -772,9 +824,8 @@ i915_gem_userptr_ioctl(struct drm_device *dev, void *data, struct drm_file *file int i915_gem_init_userptr(struct drm_device *dev) { -#if defined(CONFIG_MMU_NOTIFIER) struct drm_i915_private *dev_priv = to_i915(dev); - hash_init(dev_priv->mmu_notifiers); -#endif + mutex_init(&dev_priv->mm_lock); + hash_init(dev_priv->mm_structs); return 0; } diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c index 390ccc2a3096..0050ee9470f1 100644 --- a/drivers/gpu/drm/i915/i915_irq.c +++ b/drivers/gpu/drm/i915/i915_irq.c @@ -1189,8 +1189,8 @@ static void i915_hotplug_work_func(struct work_struct *work) * some connectors */ if (hpd_disabled) { drm_kms_helper_poll_enable(dev); - mod_timer(&dev_priv->hotplug_reenable_timer, - jiffies + msecs_to_jiffies(I915_REENABLE_HOTPLUG_DELAY)); + mod_delayed_work(system_wq, &dev_priv->hotplug_reenable_work, + msecs_to_jiffies(I915_REENABLE_HOTPLUG_DELAY)); } spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); @@ -1213,11 +1213,6 @@ static void i915_hotplug_work_func(struct work_struct *work) drm_kms_helper_hotplug_event(dev); } -static void intel_hpd_irq_uninstall(struct drm_i915_private *dev_priv) -{ - del_timer_sync(&dev_priv->hotplug_reenable_timer); -} - static void ironlake_rps_change_irq_handler(struct drm_device *dev) { struct drm_i915_private *dev_priv = dev->dev_private; @@ -3892,8 +3887,6 @@ static void gen8_irq_uninstall(struct drm_device *dev) if (!dev_priv) return; - intel_hpd_irq_uninstall(dev_priv); - gen8_irq_reset(dev); } @@ -3908,8 +3901,6 @@ static void valleyview_irq_uninstall(struct drm_device *dev) I915_WRITE(VLV_MASTER_IER, 0); - intel_hpd_irq_uninstall(dev_priv); - for_each_pipe(pipe) I915_WRITE(PIPESTAT(pipe), 0xffff); @@ -3988,8 +3979,6 @@ static void ironlake_irq_uninstall(struct drm_device *dev) if (!dev_priv) return; - intel_hpd_irq_uninstall(dev_priv); - ironlake_irq_reset(dev); } @@ -4360,8 +4349,6 @@ static void i915_irq_uninstall(struct drm_device * dev) struct drm_i915_private *dev_priv = dev->dev_private; int pipe; - intel_hpd_irq_uninstall(dev_priv); - if (I915_HAS_HOTPLUG(dev)) { I915_WRITE(PORT_HOTPLUG_EN, 0); I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT)); @@ -4598,8 +4585,6 @@ static void i965_irq_uninstall(struct drm_device * dev) if (!dev_priv) return; - intel_hpd_irq_uninstall(dev_priv); - I915_WRITE(PORT_HOTPLUG_EN, 0); I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT)); @@ -4615,14 +4600,18 @@ static void i965_irq_uninstall(struct drm_device * dev) I915_WRITE(IIR, I915_READ(IIR)); } -static void intel_hpd_irq_reenable(unsigned long data) +static void intel_hpd_irq_reenable(struct work_struct *work) { - struct drm_i915_private *dev_priv = (struct drm_i915_private *)data; + struct drm_i915_private *dev_priv = + container_of(work, typeof(*dev_priv), + hotplug_reenable_work.work); struct drm_device *dev = dev_priv->dev; struct drm_mode_config *mode_config = &dev->mode_config; unsigned long irqflags; int i; + intel_runtime_pm_get(dev_priv); + spin_lock_irqsave(&dev_priv->irq_lock, irqflags); for (i = (HPD_NONE + 1); i < HPD_NUM_PINS; i++) { struct drm_connector *connector; @@ -4648,6 +4637,8 @@ static void intel_hpd_irq_reenable(unsigned long data) if (dev_priv->display.hpd_irq_setup) dev_priv->display.hpd_irq_setup(dev); spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); + + intel_runtime_pm_put(dev_priv); } void intel_irq_init(struct drm_device *dev) @@ -4670,8 +4661,8 @@ void intel_irq_init(struct drm_device *dev) setup_timer(&dev_priv->gpu_error.hangcheck_timer, i915_hangcheck_elapsed, (unsigned long) dev); - setup_timer(&dev_priv->hotplug_reenable_timer, intel_hpd_irq_reenable, - (unsigned long) dev_priv); + INIT_DELAYED_WORK(&dev_priv->hotplug_reenable_work, + intel_hpd_irq_reenable); pm_qos_add_request(&dev_priv->pm_qos, PM_QOS_CPU_DMA_LATENCY, PM_QOS_DEFAULT_VALUE); diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h index e4d7607da2c4..f29b44c86a2f 100644 --- a/drivers/gpu/drm/i915/i915_reg.h +++ b/drivers/gpu/drm/i915/i915_reg.h @@ -334,16 +334,20 @@ #define GFX_OP_DESTBUFFER_INFO ((0x3<<29)|(0x1d<<24)|(0x8e<<16)|1) #define GFX_OP_DRAWRECT_INFO ((0x3<<29)|(0x1d<<24)|(0x80<<16)|(0x3)) #define GFX_OP_DRAWRECT_INFO_I965 ((0x7900<<16)|0x2) -#define SRC_COPY_BLT_CMD ((2<<29)|(0x43<<22)|4) + +#define COLOR_BLT_CMD (2<<29 | 0x40<<22 | (5-2)) +#define SRC_COPY_BLT_CMD ((2<<29)|(0x43<<22)|4) #define XY_SRC_COPY_BLT_CMD ((2<<29)|(0x53<<22)|6) #define XY_MONO_SRC_COPY_IMM_BLT ((2<<29)|(0x71<<22)|5) -#define XY_SRC_COPY_BLT_WRITE_ALPHA (1<<21) -#define XY_SRC_COPY_BLT_WRITE_RGB (1<<20) +#define BLT_WRITE_A (2<<20) +#define BLT_WRITE_RGB (1<<20) +#define BLT_WRITE_RGBA (BLT_WRITE_RGB | BLT_WRITE_A) #define BLT_DEPTH_8 (0<<24) #define BLT_DEPTH_16_565 (1<<24) #define BLT_DEPTH_16_1555 (2<<24) #define BLT_DEPTH_32 (3<<24) -#define BLT_ROP_GXCOPY (0xcc<<16) +#define BLT_ROP_SRC_COPY (0xcc<<16) +#define BLT_ROP_COLOR_COPY (0xf0<<16) #define XY_SRC_COPY_BLT_SRC_TILED (1<<15) /* 965+ only */ #define XY_SRC_COPY_BLT_DST_TILED (1<<11) /* 965+ only */ #define CMD_OP_DISPLAYBUFFER_INFO ((0x0<<29)|(0x14<<23)|2) diff --git a/drivers/gpu/drm/i915/intel_bios.c b/drivers/gpu/drm/i915/intel_bios.c index a66955037e4e..eee79e1c3222 100644 --- a/drivers/gpu/drm/i915/intel_bios.c +++ b/drivers/gpu/drm/i915/intel_bios.c @@ -1123,7 +1123,7 @@ init_vbt_defaults(struct drm_i915_private *dev_priv) } } -static int __init intel_no_opregion_vbt_callback(const struct dmi_system_id *id) +static int intel_no_opregion_vbt_callback(const struct dmi_system_id *id) { DRM_DEBUG_KMS("Falling back to manually reading VBT from " "VBIOS ROM for %s\n", diff --git a/drivers/gpu/drm/i915/intel_crt.c b/drivers/gpu/drm/i915/intel_crt.c index 2efaf8e8d9c4..9212e6504e0f 100644 --- a/drivers/gpu/drm/i915/intel_crt.c +++ b/drivers/gpu/drm/i915/intel_crt.c @@ -699,16 +699,21 @@ intel_crt_detect(struct drm_connector *connector, bool force) goto out; } + drm_modeset_acquire_init(&ctx, 0); + /* for pre-945g platforms use load detect */ if (intel_get_load_detect_pipe(connector, NULL, &tmp, &ctx)) { if (intel_crt_detect_ddc(connector)) status = connector_status_connected; else status = intel_crt_load_detect(crt); - intel_release_load_detect_pipe(connector, &tmp, &ctx); + intel_release_load_detect_pipe(connector, &tmp); } else status = connector_status_unknown; + drm_modeset_drop_locks(&ctx); + drm_modeset_acquire_fini(&ctx); + out: intel_display_power_put(dev_priv, power_domain); return status; @@ -799,7 +804,7 @@ static const struct drm_encoder_funcs intel_crt_enc_funcs = { .destroy = intel_encoder_destroy, }; -static int __init intel_no_crt_dmi_callback(const struct dmi_system_id *id) +static int intel_no_crt_dmi_callback(const struct dmi_system_id *id) { DRM_INFO("Skipping CRT initialization for %s\n", id->ident); return 1; diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c index 018fb7222f60..d8324c69fa86 100644 --- a/drivers/gpu/drm/i915/intel_display.c +++ b/drivers/gpu/drm/i915/intel_display.c @@ -2233,6 +2233,15 @@ intel_pin_and_fence_fb_obj(struct drm_device *dev, if (need_vtd_wa(dev) && alignment < 256 * 1024) alignment = 256 * 1024; + /* + * Global gtt pte registers are special registers which actually forward + * writes to a chunk of system memory. Which means that there is no risk + * that the register values disappear as soon as we call + * intel_runtime_pm_put(), so it is correct to wrap only the + * pin/unpin/fence and not more. + */ + intel_runtime_pm_get(dev_priv); + dev_priv->mm.interruptible = false; ret = i915_gem_object_pin_to_display_plane(obj, alignment, pipelined); if (ret) @@ -2250,12 +2259,14 @@ intel_pin_and_fence_fb_obj(struct drm_device *dev, i915_gem_object_pin_fence(obj); dev_priv->mm.interruptible = true; + intel_runtime_pm_put(dev_priv); return 0; err_unpin: i915_gem_object_unpin_from_display_plane(obj); err_interruptible: dev_priv->mm.interruptible = true; + intel_runtime_pm_put(dev_priv); return ret; } @@ -4188,10 +4199,6 @@ static void ironlake_crtc_disable(struct drm_crtc *crtc) intel_set_pch_fifo_underrun_reporting(dev, pipe, false); intel_disable_pipe(dev_priv, pipe); - - if (intel_crtc->config.dp_encoder_is_mst) - intel_ddi_set_vc_payload_alloc(crtc, false); - ironlake_pfit_disable(intel_crtc); for_each_encoder_on_crtc(dev, crtc, encoder) @@ -4256,6 +4263,9 @@ static void haswell_crtc_disable(struct drm_crtc *crtc) intel_set_pch_fifo_underrun_reporting(dev, TRANSCODER_A, false); intel_disable_pipe(dev_priv, pipe); + if (intel_crtc->config.dp_encoder_is_mst) + intel_ddi_set_vc_payload_alloc(crtc, false); + intel_ddi_disable_transcoder_func(dev_priv, cpu_transcoder); ironlake_pfit_disable(intel_crtc); @@ -8240,6 +8250,15 @@ static int intel_crtc_cursor_set_obj(struct drm_crtc *crtc, goto fail_locked; } + /* + * Global gtt pte registers are special registers which actually + * forward writes to a chunk of system memory. Which means that + * there is no risk that the register values disappear as soon + * as we call intel_runtime_pm_put(), so it is correct to wrap + * only the pin/unpin/fence and not more. + */ + intel_runtime_pm_get(dev_priv); + /* Note that the w/a also requires 2 PTE of padding following * the bo. We currently fill all unused PTE with the shadow * page and so we should always have valid PTE following the @@ -8252,16 +8271,20 @@ static int intel_crtc_cursor_set_obj(struct drm_crtc *crtc, ret = i915_gem_object_pin_to_display_plane(obj, alignment, NULL); if (ret) { DRM_DEBUG_KMS("failed to move cursor bo into the GTT\n"); + intel_runtime_pm_put(dev_priv); goto fail_locked; } ret = i915_gem_object_put_fence(obj); if (ret) { DRM_DEBUG_KMS("failed to release fence for cursor"); + intel_runtime_pm_put(dev_priv); goto fail_unpin; } addr = i915_gem_obj_ggtt_offset(obj); + + intel_runtime_pm_put(dev_priv); } else { int align = IS_I830(dev) ? 16 * 1024 : 256; ret = i915_gem_object_attach_phys(obj, align); @@ -8462,8 +8485,6 @@ bool intel_get_load_detect_pipe(struct drm_connector *connector, connector->base.id, connector->name, encoder->base.id, encoder->name); - drm_modeset_acquire_init(ctx, 0); - retry: ret = drm_modeset_lock(&config->connection_mutex, ctx); if (ret) @@ -8502,10 +8523,14 @@ retry: i++; if (!(encoder->possible_crtcs & (1 << i))) continue; - if (!possible_crtc->enabled) { - crtc = possible_crtc; - break; - } + if (possible_crtc->enabled) + continue; + /* This can occur when applying the pipe A quirk on resume. */ + if (to_intel_crtc(possible_crtc)->new_enabled) + continue; + + crtc = possible_crtc; + break; } /* @@ -8574,15 +8599,11 @@ fail_unlock: goto retry; } - drm_modeset_drop_locks(ctx); - drm_modeset_acquire_fini(ctx); - return false; } void intel_release_load_detect_pipe(struct drm_connector *connector, - struct intel_load_detect_pipe *old, - struct drm_modeset_acquire_ctx *ctx) + struct intel_load_detect_pipe *old) { struct intel_encoder *intel_encoder = intel_attached_encoder(connector); @@ -8606,17 +8627,12 @@ void intel_release_load_detect_pipe(struct drm_connector *connector, drm_framebuffer_unreference(old->release_fb); } - goto unlock; return; } /* Switch crtc and encoder back off if necessary */ if (old->dpms_mode != DRM_MODE_DPMS_ON) connector->funcs->dpms(connector, old->dpms_mode); - -unlock: - drm_modeset_drop_locks(ctx); - drm_modeset_acquire_fini(ctx); } static int i9xx_pll_refclk(struct drm_device *dev, @@ -11700,8 +11716,8 @@ intel_cursor_plane_update(struct drm_plane *plane, struct drm_crtc *crtc, }; const struct drm_rect clip = { /* integer pixels */ - .x2 = intel_crtc->config.pipe_src_w, - .y2 = intel_crtc->config.pipe_src_h, + .x2 = intel_crtc->active ? intel_crtc->config.pipe_src_w : 0, + .y2 = intel_crtc->active ? intel_crtc->config.pipe_src_h : 0, }; bool visible; int ret; @@ -12488,6 +12504,9 @@ static struct intel_quirk intel_quirks[] = { /* Acer C720 and C720P Chromebooks (Celeron 2955U) have backlights */ { 0x0a06, 0x1025, 0x0a11, quirk_backlight_present }, + /* Acer C720 Chromebook (Core i3 4005U) */ + { 0x0a16, 0x1025, 0x0a11, quirk_backlight_present }, + /* Toshiba CB35 Chromebook (Celeron 2955U) */ { 0x0a06, 0x1179, 0x0a88, quirk_backlight_present }, @@ -12659,7 +12678,7 @@ static void intel_enable_pipe_a(struct drm_device *dev) struct intel_connector *connector; struct drm_connector *crt = NULL; struct intel_load_detect_pipe load_detect_temp; - struct drm_modeset_acquire_ctx ctx; + struct drm_modeset_acquire_ctx *ctx = dev->mode_config.acquire_ctx; /* We can't just switch on the pipe A, we need to set things up with a * proper mode and output configuration. As a gross hack, enable pipe A @@ -12676,10 +12695,8 @@ static void intel_enable_pipe_a(struct drm_device *dev) if (!crt) return; - if (intel_get_load_detect_pipe(crt, NULL, &load_detect_temp, &ctx)) - intel_release_load_detect_pipe(crt, &load_detect_temp, &ctx); - - + if (intel_get_load_detect_pipe(crt, NULL, &load_detect_temp, ctx)) + intel_release_load_detect_pipe(crt, &load_detect_temp); } static bool @@ -13112,7 +13129,7 @@ void intel_modeset_cleanup(struct drm_device *dev) * experience fancy races otherwise. */ drm_irq_uninstall(dev); - cancel_work_sync(&dev_priv->hotplug_work); + intel_hpd_cancel_work(dev_priv); dev_priv->pm._irqs_disabled = true; /* diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c index ee3942f0b068..fdff1d420c14 100644 --- a/drivers/gpu/drm/i915/intel_dp.c +++ b/drivers/gpu/drm/i915/intel_dp.c @@ -1631,6 +1631,10 @@ static void intel_dp_get_config(struct intel_encoder *encoder, pipe_config->adjusted_mode.flags |= flags; + if (!HAS_PCH_SPLIT(dev) && !IS_VALLEYVIEW(dev) && + tmp & DP_COLOR_RANGE_16_235) + pipe_config->limited_color_range = true; + pipe_config->has_dp_encoder = true; intel_dp_get_m_n(crtc, pipe_config); @@ -3553,6 +3557,9 @@ intel_dp_check_link_status(struct intel_dp *intel_dp) if (WARN_ON(!intel_encoder->base.crtc)) return; + if (!to_intel_crtc(intel_encoder->base.crtc)->active) + return; + /* Try to read receiver status if the link appears to be up */ if (!intel_dp_get_link_status(intel_dp, link_status)) { return; @@ -3658,24 +3665,12 @@ ironlake_dp_detect(struct intel_dp *intel_dp) return intel_dp_detect_dpcd(intel_dp); } -static enum drm_connector_status -g4x_dp_detect(struct intel_dp *intel_dp) +static int g4x_digital_port_connected(struct drm_device *dev, + struct intel_digital_port *intel_dig_port) { - struct drm_device *dev = intel_dp_to_dev(intel_dp); struct drm_i915_private *dev_priv = dev->dev_private; - struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp); uint32_t bit; - /* Can't disconnect eDP, but you can close the lid... */ - if (is_edp(intel_dp)) { - enum drm_connector_status status; - - status = intel_panel_detect(dev); - if (status == connector_status_unknown) - status = connector_status_connected; - return status; - } - if (IS_VALLEYVIEW(dev)) { switch (intel_dig_port->port) { case PORT_B: @@ -3688,7 +3683,7 @@ g4x_dp_detect(struct intel_dp *intel_dp) bit = PORTD_HOTPLUG_LIVE_STATUS_VLV; break; default: - return connector_status_unknown; + return -EINVAL; } } else { switch (intel_dig_port->port) { @@ -3702,11 +3697,36 @@ g4x_dp_detect(struct intel_dp *intel_dp) bit = PORTD_HOTPLUG_LIVE_STATUS_G4X; break; default: - return connector_status_unknown; + return -EINVAL; } } if ((I915_READ(PORT_HOTPLUG_STAT) & bit) == 0) + return 0; + return 1; +} + +static enum drm_connector_status +g4x_dp_detect(struct intel_dp *intel_dp) +{ + struct drm_device *dev = intel_dp_to_dev(intel_dp); + struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp); + int ret; + + /* Can't disconnect eDP, but you can close the lid... */ + if (is_edp(intel_dp)) { + enum drm_connector_status status; + + status = intel_panel_detect(dev); + if (status == connector_status_unknown) + status = connector_status_connected; + return status; + } + + ret = g4x_digital_port_connected(dev, intel_dig_port); + if (ret == -EINVAL) + return connector_status_unknown; + else if (ret == 0) return connector_status_disconnected; return intel_dp_detect_dpcd(intel_dp); @@ -4003,6 +4023,16 @@ void intel_dp_encoder_destroy(struct drm_encoder *encoder) kfree(intel_dig_port); } +static void intel_dp_encoder_suspend(struct intel_encoder *intel_encoder) +{ + struct intel_dp *intel_dp = enc_to_intel_dp(&intel_encoder->base); + + if (!is_edp(intel_dp)) + return; + + edp_panel_vdd_off_sync(intel_dp); +} + static void intel_dp_encoder_reset(struct drm_encoder *encoder) { intel_edp_panel_vdd_sanitize(to_intel_encoder(encoder)); @@ -4037,18 +4067,30 @@ bool intel_dp_hpd_pulse(struct intel_digital_port *intel_dig_port, bool long_hpd) { struct intel_dp *intel_dp = &intel_dig_port->dp; + struct intel_encoder *intel_encoder = &intel_dig_port->base; struct drm_device *dev = intel_dig_port->base.base.dev; struct drm_i915_private *dev_priv = dev->dev_private; - int ret; + enum intel_display_power_domain power_domain; + bool ret = true; + if (intel_dig_port->base.type != INTEL_OUTPUT_EDP) intel_dig_port->base.type = INTEL_OUTPUT_DISPLAYPORT; DRM_DEBUG_KMS("got hpd irq on port %d - %s\n", intel_dig_port->port, long_hpd ? "long" : "short"); + power_domain = intel_display_port_power_domain(intel_encoder); + intel_display_power_get(dev_priv, power_domain); + if (long_hpd) { - if (!ibx_digital_port_connected(dev_priv, intel_dig_port)) - goto mst_fail; + + if (HAS_PCH_SPLIT(dev)) { + if (!ibx_digital_port_connected(dev_priv, intel_dig_port)) + goto mst_fail; + } else { + if (g4x_digital_port_connected(dev, intel_dig_port) != 1) + goto mst_fail; + } if (!intel_dp_get_dpcd(intel_dp)) { goto mst_fail; @@ -4061,8 +4103,7 @@ intel_dp_hpd_pulse(struct intel_digital_port *intel_dig_port, bool long_hpd) } else { if (intel_dp->is_mst) { - ret = intel_dp_check_mst_status(intel_dp); - if (ret == -EINVAL) + if (intel_dp_check_mst_status(intel_dp) == -EINVAL) goto mst_fail; } @@ -4076,7 +4117,8 @@ intel_dp_hpd_pulse(struct intel_digital_port *intel_dig_port, bool long_hpd) drm_modeset_unlock(&dev->mode_config.connection_mutex); } } - return false; + ret = false; + goto put_power; mst_fail: /* if we were in MST mode, and device is not there get out of MST mode */ if (intel_dp->is_mst) { @@ -4084,7 +4126,10 @@ mst_fail: intel_dp->is_mst = false; drm_dp_mst_topology_mgr_set_mst(&intel_dp->mst_mgr, intel_dp->is_mst); } - return true; +put_power: + intel_display_power_put(dev_priv, power_domain); + + return ret; } /* Return which DP Port should be selected for Transcoder DP control */ @@ -4722,6 +4767,7 @@ intel_dp_init(struct drm_device *dev, int output_reg, enum port port) intel_encoder->disable = intel_disable_dp; intel_encoder->get_hw_state = intel_dp_get_hw_state; intel_encoder->get_config = intel_dp_get_config; + intel_encoder->suspend = intel_dp_encoder_suspend; if (IS_CHERRYVIEW(dev)) { intel_encoder->pre_pll_enable = chv_dp_pre_pll_enable; intel_encoder->pre_enable = chv_pre_enable_dp; diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h index 4b2664bd5b81..b8c8bbd8e5f9 100644 --- a/drivers/gpu/drm/i915/intel_drv.h +++ b/drivers/gpu/drm/i915/intel_drv.h @@ -153,6 +153,12 @@ struct intel_encoder { * be set correctly before calling this function. */ void (*get_config)(struct intel_encoder *, struct intel_crtc_config *pipe_config); + /* + * Called during system suspend after all pending requests for the + * encoder are flushed (for example for DP AUX transactions) and + * device interrupts are disabled. + */ + void (*suspend)(struct intel_encoder *); int crtc_mask; enum hpd_pin hpd_pin; }; @@ -830,8 +836,7 @@ bool intel_get_load_detect_pipe(struct drm_connector *connector, struct intel_load_detect_pipe *old, struct drm_modeset_acquire_ctx *ctx); void intel_release_load_detect_pipe(struct drm_connector *connector, - struct intel_load_detect_pipe *old, - struct drm_modeset_acquire_ctx *ctx); + struct intel_load_detect_pipe *old); int intel_pin_and_fence_fb_obj(struct drm_device *dev, struct drm_i915_gem_object *obj, struct intel_engine_cs *pipelined); diff --git a/drivers/gpu/drm/i915/intel_hdmi.c b/drivers/gpu/drm/i915/intel_hdmi.c index f9151f6641d9..ca34de7f6a7b 100644 --- a/drivers/gpu/drm/i915/intel_hdmi.c +++ b/drivers/gpu/drm/i915/intel_hdmi.c @@ -712,7 +712,8 @@ static void intel_hdmi_get_config(struct intel_encoder *encoder, struct intel_crtc_config *pipe_config) { struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(&encoder->base); - struct drm_i915_private *dev_priv = encoder->base.dev->dev_private; + struct drm_device *dev = encoder->base.dev; + struct drm_i915_private *dev_priv = dev->dev_private; u32 tmp, flags = 0; int dotclock; @@ -734,6 +735,10 @@ static void intel_hdmi_get_config(struct intel_encoder *encoder, if (tmp & HDMI_MODE_SELECT_HDMI) pipe_config->has_audio = true; + if (!HAS_PCH_SPLIT(dev) && + tmp & HDMI_COLOR_RANGE_16_235) + pipe_config->limited_color_range = true; + pipe_config->adjusted_mode.flags |= flags; if ((tmp & SDVO_COLOR_FORMAT_MASK) == HDMI_COLOR_FORMAT_12bpc) diff --git a/drivers/gpu/drm/i915/intel_lvds.c b/drivers/gpu/drm/i915/intel_lvds.c index 881361c0f27e..fdf40267249c 100644 --- a/drivers/gpu/drm/i915/intel_lvds.c +++ b/drivers/gpu/drm/i915/intel_lvds.c @@ -538,7 +538,7 @@ static const struct drm_encoder_funcs intel_lvds_enc_funcs = { .destroy = intel_encoder_destroy, }; -static int __init intel_no_lvds_dmi_callback(const struct dmi_system_id *id) +static int intel_no_lvds_dmi_callback(const struct dmi_system_id *id) { DRM_INFO("Skipping LVDS initialization for %s\n", id->ident); return 1; diff --git a/drivers/gpu/drm/i915/intel_panel.c b/drivers/gpu/drm/i915/intel_panel.c index 59b028f0b1e8..8e374449c6b5 100644 --- a/drivers/gpu/drm/i915/intel_panel.c +++ b/drivers/gpu/drm/i915/intel_panel.c @@ -801,7 +801,7 @@ static void pch_enable_backlight(struct intel_connector *connector) cpu_ctl2 = I915_READ(BLC_PWM_CPU_CTL2); if (cpu_ctl2 & BLM_PWM_ENABLE) { - WARN(1, "cpu backlight already enabled\n"); + DRM_DEBUG_KMS("cpu backlight already enabled\n"); cpu_ctl2 &= ~BLM_PWM_ENABLE; I915_WRITE(BLC_PWM_CPU_CTL2, cpu_ctl2); } @@ -845,7 +845,7 @@ static void i9xx_enable_backlight(struct intel_connector *connector) ctl = I915_READ(BLC_PWM_CTL); if (ctl & BACKLIGHT_DUTY_CYCLE_MASK_PNV) { - WARN(1, "backlight already enabled\n"); + DRM_DEBUG_KMS("backlight already enabled\n"); I915_WRITE(BLC_PWM_CTL, 0); } @@ -876,7 +876,7 @@ static void i965_enable_backlight(struct intel_connector *connector) ctl2 = I915_READ(BLC_PWM_CTL2); if (ctl2 & BLM_PWM_ENABLE) { - WARN(1, "backlight already enabled\n"); + DRM_DEBUG_KMS("backlight already enabled\n"); ctl2 &= ~BLM_PWM_ENABLE; I915_WRITE(BLC_PWM_CTL2, ctl2); } @@ -910,7 +910,7 @@ static void vlv_enable_backlight(struct intel_connector *connector) ctl2 = I915_READ(VLV_BLC_PWM_CTL2(pipe)); if (ctl2 & BLM_PWM_ENABLE) { - WARN(1, "backlight already enabled\n"); + DRM_DEBUG_KMS("backlight already enabled\n"); ctl2 &= ~BLM_PWM_ENABLE; I915_WRITE(VLV_BLC_PWM_CTL2(pipe), ctl2); } diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c index 16371a444426..47a126a0493f 100644 --- a/drivers/gpu/drm/i915/intel_ringbuffer.c +++ b/drivers/gpu/drm/i915/intel_ringbuffer.c @@ -1363,54 +1363,66 @@ i965_dispatch_execbuffer(struct intel_engine_cs *ring, /* Just userspace ABI convention to limit the wa batch bo to a resonable size */ #define I830_BATCH_LIMIT (256*1024) +#define I830_TLB_ENTRIES (2) +#define I830_WA_SIZE max(I830_TLB_ENTRIES*4096, I830_BATCH_LIMIT) static int i830_dispatch_execbuffer(struct intel_engine_cs *ring, u64 offset, u32 len, unsigned flags) { + u32 cs_offset = ring->scratch.gtt_offset; int ret; - if (flags & I915_DISPATCH_PINNED) { - ret = intel_ring_begin(ring, 4); - if (ret) - return ret; + ret = intel_ring_begin(ring, 6); + if (ret) + return ret; - intel_ring_emit(ring, MI_BATCH_BUFFER); - intel_ring_emit(ring, offset | (flags & I915_DISPATCH_SECURE ? 0 : MI_BATCH_NON_SECURE)); - intel_ring_emit(ring, offset + len - 8); - intel_ring_emit(ring, MI_NOOP); - intel_ring_advance(ring); - } else { - u32 cs_offset = ring->scratch.gtt_offset; + /* Evict the invalid PTE TLBs */ + intel_ring_emit(ring, COLOR_BLT_CMD | BLT_WRITE_RGBA); + intel_ring_emit(ring, BLT_DEPTH_32 | BLT_ROP_COLOR_COPY | 4096); + intel_ring_emit(ring, I830_TLB_ENTRIES << 16 | 4); /* load each page */ + intel_ring_emit(ring, cs_offset); + intel_ring_emit(ring, 0xdeadbeef); + intel_ring_emit(ring, MI_NOOP); + intel_ring_advance(ring); + if ((flags & I915_DISPATCH_PINNED) == 0) { if (len > I830_BATCH_LIMIT) return -ENOSPC; - ret = intel_ring_begin(ring, 9+3); + ret = intel_ring_begin(ring, 6 + 2); if (ret) return ret; - /* Blit the batch (which has now all relocs applied) to the stable batch - * scratch bo area (so that the CS never stumbles over its tlb - * invalidation bug) ... */ - intel_ring_emit(ring, XY_SRC_COPY_BLT_CMD | - XY_SRC_COPY_BLT_WRITE_ALPHA | - XY_SRC_COPY_BLT_WRITE_RGB); - intel_ring_emit(ring, BLT_DEPTH_32 | BLT_ROP_GXCOPY | 4096); - intel_ring_emit(ring, 0); - intel_ring_emit(ring, (DIV_ROUND_UP(len, 4096) << 16) | 1024); + + /* Blit the batch (which has now all relocs applied) to the + * stable batch scratch bo area (so that the CS never + * stumbles over its tlb invalidation bug) ... + */ + intel_ring_emit(ring, SRC_COPY_BLT_CMD | BLT_WRITE_RGBA); + intel_ring_emit(ring, BLT_DEPTH_32 | BLT_ROP_SRC_COPY | 4096); + intel_ring_emit(ring, DIV_ROUND_UP(len, 4096) << 16 | 4096); intel_ring_emit(ring, cs_offset); - intel_ring_emit(ring, 0); intel_ring_emit(ring, 4096); intel_ring_emit(ring, offset); + intel_ring_emit(ring, MI_FLUSH); + intel_ring_emit(ring, MI_NOOP); + intel_ring_advance(ring); /* ... and execute it. */ - intel_ring_emit(ring, MI_BATCH_BUFFER); - intel_ring_emit(ring, cs_offset | (flags & I915_DISPATCH_SECURE ? 0 : MI_BATCH_NON_SECURE)); - intel_ring_emit(ring, cs_offset + len - 8); - intel_ring_advance(ring); + offset = cs_offset; } + ret = intel_ring_begin(ring, 4); + if (ret) + return ret; + + intel_ring_emit(ring, MI_BATCH_BUFFER); + intel_ring_emit(ring, offset | (flags & I915_DISPATCH_SECURE ? 0 : MI_BATCH_NON_SECURE)); + intel_ring_emit(ring, offset + len - 8); + intel_ring_emit(ring, MI_NOOP); + intel_ring_advance(ring); + return 0; } @@ -2200,7 +2212,7 @@ int intel_init_render_ring_buffer(struct drm_device *dev) /* Workaround batchbuffer to combat CS tlb bug. */ if (HAS_BROKEN_CS_TLB(dev)) { - obj = i915_gem_alloc_object(dev, I830_BATCH_LIMIT); + obj = i915_gem_alloc_object(dev, I830_WA_SIZE); if (obj == NULL) { DRM_ERROR("Failed to allocate batch bo\n"); return -ENOMEM; diff --git a/drivers/gpu/drm/i915/intel_tv.c b/drivers/gpu/drm/i915/intel_tv.c index e211eef4b7e4..c14341ca3ef9 100644 --- a/drivers/gpu/drm/i915/intel_tv.c +++ b/drivers/gpu/drm/i915/intel_tv.c @@ -854,6 +854,10 @@ intel_enable_tv(struct intel_encoder *encoder) struct drm_device *dev = encoder->base.dev; struct drm_i915_private *dev_priv = dev->dev_private; + /* Prevents vblank waits from timing out in intel_tv_detect_type() */ + intel_wait_for_vblank(encoder->base.dev, + to_intel_crtc(encoder->base.crtc)->pipe); + I915_WRITE(TV_CTL, I915_READ(TV_CTL) | TV_ENC_ENABLE); } @@ -1311,6 +1315,7 @@ intel_tv_detect(struct drm_connector *connector, bool force) { struct drm_display_mode mode; struct intel_tv *intel_tv = intel_attached_tv(connector); + enum drm_connector_status status; int type; DRM_DEBUG_KMS("[CONNECTOR:%d:%s] force=%d\n", @@ -1323,16 +1328,24 @@ intel_tv_detect(struct drm_connector *connector, bool force) struct intel_load_detect_pipe tmp; struct drm_modeset_acquire_ctx ctx; + drm_modeset_acquire_init(&ctx, 0); + if (intel_get_load_detect_pipe(connector, &mode, &tmp, &ctx)) { type = intel_tv_detect_type(intel_tv, connector); - intel_release_load_detect_pipe(connector, &tmp, &ctx); + intel_release_load_detect_pipe(connector, &tmp); + status = type < 0 ? + connector_status_disconnected : + connector_status_connected; } else - return connector_status_unknown; + status = connector_status_unknown; + + drm_modeset_drop_locks(&ctx); + drm_modeset_acquire_fini(&ctx); } else return connector->status; - if (type < 0) - return connector_status_disconnected; + if (status != connector_status_connected) + return status; intel_tv->type = type; intel_tv_find_better_format(connector); |