diff options
author | Dave Airlie <airlied@redhat.com> | 2010-10-26 01:23:22 +0200 |
---|---|---|
committer | Dave Airlie <airlied@redhat.com> | 2010-10-26 01:23:22 +0200 |
commit | e3ce8a0b277438591844847ac7c89a980b4cfa6d (patch) | |
tree | c9bf47675403a54be2e0c54df9357d2b9c65326b /drivers | |
parent | drm/ttm: Optimize delayed buffer destruction (diff) | |
parent | drm/i915: Move gpu_write_list to per-ring (diff) | |
download | linux-e3ce8a0b277438591844847ac7c89a980b4cfa6d.tar.xz linux-e3ce8a0b277438591844847ac7c89a980b4cfa6d.zip |
Merge remote branch 'intel/drm-intel-next' of ../drm-next into drm-core-next
* 'intel/drm-intel-next' of ../drm-next: (63 commits)
drm/i915: Move gpu_write_list to per-ring
drm/i915: Invalidate the to-ring, flush the old-ring when updating domains
drm/i915/ringbuffer: Write the value passed in to the tail register
agp/intel: Restore valid PTE bit for Sandybridge after bdd3072
drm/i915: Fix flushing regression from 9af90d19f
drm/i915/sdvo: Remove unused encoding member
i915: enable AVI infoframe for intel_hdmi.c [v4]
drm/i915: Fix current fb blocking for page flip
drm/i915: IS_IRONLAKE is synonymous with gen == 5
drm/i915: Enable SandyBridge blitter ring
drm/i915/ringbuffer: Remove broken intel_fill_struct()
drm/i915/ringbuffer: Fix emit batch buffer regression from 8187a2b
drm/i915: Copy the updated reloc->presumed_offset back to the user
drm/i915: Track objects in global active list (as well as per-ring)
drm/i915: Simplify most HAS_BSD() checks
drm/i915: cache the last object lookup during pin_and_relocate()
drm/i915: Do interrupible mutex lock first to avoid locking for unreference
drivers: gpu: drm: i915: Fix a typo.
agp/intel: Also add B43.1 to list of supported devices
drm/i915: rearrange mutex acquisition for pread
...
Diffstat (limited to 'drivers')
26 files changed, 1970 insertions, 1365 deletions
diff --git a/drivers/char/agp/intel-agp.c b/drivers/char/agp/intel-agp.c index 5cd2221ab472..e72f49d52202 100644 --- a/drivers/char/agp/intel-agp.c +++ b/drivers/char/agp/intel-agp.c @@ -895,6 +895,7 @@ static struct pci_device_id agp_intel_pci_table[] = { ID(PCI_DEVICE_ID_INTEL_G45_HB), ID(PCI_DEVICE_ID_INTEL_G41_HB), ID(PCI_DEVICE_ID_INTEL_B43_HB), + ID(PCI_DEVICE_ID_INTEL_B43_1_HB), ID(PCI_DEVICE_ID_INTEL_IRONLAKE_D_HB), ID(PCI_DEVICE_ID_INTEL_IRONLAKE_M_HB), ID(PCI_DEVICE_ID_INTEL_IRONLAKE_MA_HB), diff --git a/drivers/char/agp/intel-gtt.c b/drivers/char/agp/intel-gtt.c index 0c8ff6d8824b..6b6760ea2435 100644 --- a/drivers/char/agp/intel-gtt.c +++ b/drivers/char/agp/intel-gtt.c @@ -1211,13 +1211,13 @@ static void gen6_write_entry(dma_addr_t addr, unsigned int entry, u32 pte_flags; if (type_mask == AGP_USER_UNCACHED_MEMORY) - pte_flags = GEN6_PTE_UNCACHED; + pte_flags = GEN6_PTE_UNCACHED | I810_PTE_VALID; else if (type_mask == AGP_USER_CACHED_MEMORY_LLC_MLC) { - pte_flags = GEN6_PTE_LLC; + pte_flags = GEN6_PTE_LLC | I810_PTE_VALID; if (gfdt) pte_flags |= GEN6_PTE_GFDT; } else { /* set 'normal'/'cached' to LLC by default */ - pte_flags = GEN6_PTE_LLC_MLC; + pte_flags = GEN6_PTE_LLC_MLC | I810_PTE_VALID; if (gfdt) pte_flags |= GEN6_PTE_GFDT; } diff --git a/drivers/gpu/drm/drm_edid.c b/drivers/gpu/drm/drm_edid.c index fd033ebbdf84..c1a26217a530 100644 --- a/drivers/gpu/drm/drm_edid.c +++ b/drivers/gpu/drm/drm_edid.c @@ -1267,34 +1267,51 @@ add_detailed_modes(struct drm_connector *connector, struct edid *edid, } #define HDMI_IDENTIFIER 0x000C03 +#define AUDIO_BLOCK 0x01 #define VENDOR_BLOCK 0x03 +#define EDID_BASIC_AUDIO (1 << 6) + /** - * drm_detect_hdmi_monitor - detect whether monitor is hdmi. - * @edid: monitor EDID information - * - * Parse the CEA extension according to CEA-861-B. - * Return true if HDMI, false if not or unknown. + * Search EDID for CEA extension block. */ -bool drm_detect_hdmi_monitor(struct edid *edid) +static u8 *drm_find_cea_extension(struct edid *edid) { - char *edid_ext = NULL; - int i, hdmi_id; - int start_offset, end_offset; - bool is_hdmi = false; + u8 *edid_ext = NULL; + int i; /* No EDID or EDID extensions */ if (edid == NULL || edid->extensions == 0) - goto end; + return NULL; /* Find CEA extension */ for (i = 0; i < edid->extensions; i++) { - edid_ext = (char *)edid + EDID_LENGTH * (i + 1); - /* This block is CEA extension */ - if (edid_ext[0] == 0x02) + edid_ext = (u8 *)edid + EDID_LENGTH * (i + 1); + if (edid_ext[0] == CEA_EXT) break; } if (i == edid->extensions) + return NULL; + + return edid_ext; +} + +/** + * drm_detect_hdmi_monitor - detect whether monitor is hdmi. + * @edid: monitor EDID information + * + * Parse the CEA extension according to CEA-861-B. + * Return true if HDMI, false if not or unknown. + */ +bool drm_detect_hdmi_monitor(struct edid *edid) +{ + u8 *edid_ext; + int i, hdmi_id; + int start_offset, end_offset; + bool is_hdmi = false; + + edid_ext = drm_find_cea_extension(edid); + if (!edid_ext) goto end; /* Data block offset in CEA extension block */ @@ -1325,6 +1342,53 @@ end: EXPORT_SYMBOL(drm_detect_hdmi_monitor); /** + * drm_detect_monitor_audio - check monitor audio capability + * + * Monitor should have CEA extension block. + * If monitor has 'basic audio', but no CEA audio blocks, it's 'basic + * audio' only. If there is any audio extension block and supported + * audio format, assume at least 'basic audio' support, even if 'basic + * audio' is not defined in EDID. + * + */ +bool drm_detect_monitor_audio(struct edid *edid) +{ + u8 *edid_ext; + int i, j; + bool has_audio = false; + int start_offset, end_offset; + + edid_ext = drm_find_cea_extension(edid); + if (!edid_ext) + goto end; + + has_audio = ((edid_ext[3] & EDID_BASIC_AUDIO) != 0); + + if (has_audio) { + DRM_DEBUG_KMS("Monitor has basic audio support\n"); + goto end; + } + + /* Data block offset in CEA extension block */ + start_offset = 4; + end_offset = edid_ext[2]; + + for (i = start_offset; i < end_offset; + i += ((edid_ext[i] & 0x1f) + 1)) { + if ((edid_ext[i] >> 5) == AUDIO_BLOCK) { + has_audio = true; + for (j = 1; j < (edid_ext[i] & 0x1f); j += 3) + DRM_DEBUG_KMS("CEA audio format %d\n", + (edid_ext[i + j] >> 3) & 0xf); + goto end; + } + } +end: + return has_audio; +} +EXPORT_SYMBOL(drm_detect_monitor_audio); + +/** * drm_add_edid_modes - add modes from EDID data, if available * @connector: connector we're probing * @edid: edid data diff --git a/drivers/gpu/drm/i915/Makefile b/drivers/gpu/drm/i915/Makefile index f6e98dd416c9..fdc833d5cc7b 100644 --- a/drivers/gpu/drm/i915/Makefile +++ b/drivers/gpu/drm/i915/Makefile @@ -35,6 +35,8 @@ i915-y := i915_drv.o i915_dma.o i915_irq.o i915_mem.o \ i915-$(CONFIG_COMPAT) += i915_ioc32.o +i915-$(CONFIG_ACPI) += intel_acpi.o + obj-$(CONFIG_DRM_I915) += i915.o CFLAGS_i915_trace_points.o := -I$(src) diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c index d598070fb279..7698983577d1 100644 --- a/drivers/gpu/drm/i915/i915_debugfs.c +++ b/drivers/gpu/drm/i915/i915_debugfs.c @@ -41,8 +41,7 @@ #if defined(CONFIG_DEBUG_FS) enum { - RENDER_LIST, - BSD_LIST, + ACTIVE_LIST, FLUSHING_LIST, INACTIVE_LIST, PINNED_LIST, @@ -72,7 +71,6 @@ static int i915_capabilities(struct seq_file *m, void *data) B(is_pineview); B(is_broadwater); B(is_crestline); - B(is_ironlake); B(has_fbc); B(has_rc6); B(has_pipe_cxsr); @@ -81,6 +79,8 @@ static int i915_capabilities(struct seq_file *m, void *data) B(has_overlay); B(overlay_needs_physical); B(supports_tv); + B(has_bsd_ring); + B(has_blt_ring); #undef B return 0; @@ -125,6 +125,8 @@ describe_obj(struct seq_file *m, struct drm_i915_gem_object *obj) seq_printf(m, " (fence: %d)", obj->fence_reg); if (obj->gtt_space != NULL) seq_printf(m, " (gtt_offset: %08x)", obj->gtt_offset); + if (obj->ring != NULL) + seq_printf(m, " (%s)", obj->ring->name); } static int i915_gem_object_list_info(struct seq_file *m, void *data) @@ -143,13 +145,9 @@ static int i915_gem_object_list_info(struct seq_file *m, void *data) return ret; switch (list) { - case RENDER_LIST: - seq_printf(m, "Render:\n"); - head = &dev_priv->render_ring.active_list; - break; - case BSD_LIST: - seq_printf(m, "BSD:\n"); - head = &dev_priv->bsd_ring.active_list; + case ACTIVE_LIST: + seq_printf(m, "Active:\n"); + head = &dev_priv->mm.active_list; break; case INACTIVE_LIST: seq_printf(m, "Inactive:\n"); @@ -173,7 +171,7 @@ static int i915_gem_object_list_info(struct seq_file *m, void *data) } total_obj_size = total_gtt_size = count = 0; - list_for_each_entry(obj_priv, head, list) { + list_for_each_entry(obj_priv, head, mm_list) { seq_printf(m, " "); describe_obj(m, obj_priv); seq_printf(m, "\n"); @@ -460,8 +458,7 @@ static int i915_batchbuffer_info(struct seq_file *m, void *data) if (ret) return ret; - list_for_each_entry(obj_priv, &dev_priv->render_ring.active_list, - list) { + list_for_each_entry(obj_priv, &dev_priv->mm.active_list, mm_list) { obj = &obj_priv->base; if (obj->read_domains & I915_GEM_DOMAIN_COMMAND) { seq_printf(m, "--- gtt_offset = 0x%08x\n", @@ -797,7 +794,7 @@ static int i915_sr_status(struct seq_file *m, void *unused) drm_i915_private_t *dev_priv = dev->dev_private; bool sr_enabled = false; - if (IS_IRONLAKE(dev)) + if (IS_GEN5(dev)) sr_enabled = I915_READ(WM1_LP_ILK) & WM1_LP_SR_EN; else if (IS_CRESTLINE(dev) || IS_I945G(dev) || IS_I945GM(dev)) sr_enabled = I915_READ(FW_BLC_SELF) & FW_BLC_SELF_EN; @@ -1020,8 +1017,7 @@ static int i915_wedged_create(struct dentry *root, struct drm_minor *minor) static struct drm_info_list i915_debugfs_list[] = { {"i915_capabilities", i915_capabilities, 0, 0}, {"i915_gem_objects", i915_gem_object_info, 0}, - {"i915_gem_render_active", i915_gem_object_list_info, 0, (void *) RENDER_LIST}, - {"i915_gem_bsd_active", i915_gem_object_list_info, 0, (void *) BSD_LIST}, + {"i915_gem_active", i915_gem_object_list_info, 0, (void *) ACTIVE_LIST}, {"i915_gem_flushing", i915_gem_object_list_info, 0, (void *) FLUSHING_LIST}, {"i915_gem_inactive", i915_gem_object_list_info, 0, (void *) INACTIVE_LIST}, {"i915_gem_pinned", i915_gem_object_list_info, 0, (void *) PINNED_LIST}, diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c index 251987307ebe..7a26f4dd21ae 100644 --- a/drivers/gpu/drm/i915/i915_dma.c +++ b/drivers/gpu/drm/i915/i915_dma.c @@ -132,8 +132,8 @@ static int i915_dma_cleanup(struct drm_device * dev) mutex_lock(&dev->struct_mutex); intel_cleanup_ring_buffer(dev, &dev_priv->render_ring); - if (HAS_BSD(dev)) - intel_cleanup_ring_buffer(dev, &dev_priv->bsd_ring); + intel_cleanup_ring_buffer(dev, &dev_priv->bsd_ring); + intel_cleanup_ring_buffer(dev, &dev_priv->blt_ring); mutex_unlock(&dev->struct_mutex); /* Clear the HWS virtual address at teardown */ @@ -499,7 +499,7 @@ static int i915_dispatch_batchbuffer(struct drm_device * dev, } - if (IS_G4X(dev) || IS_IRONLAKE(dev)) { + if (IS_G4X(dev) || IS_GEN5(dev)) { BEGIN_LP_RING(2); OUT_RING(MI_FLUSH | MI_NO_WRITE_FLUSH | MI_INVALIDATE_ISP); OUT_RING(MI_NOOP); @@ -764,6 +764,9 @@ static int i915_getparam(struct drm_device *dev, void *data, case I915_PARAM_HAS_BSD: value = HAS_BSD(dev); break; + case I915_PARAM_HAS_BLT: + value = HAS_BLT(dev); + break; default: DRM_DEBUG_DRIVER("Unknown parameter %d\n", param->param); @@ -1199,9 +1202,6 @@ static int i915_load_modeset_init(struct drm_device *dev, /* Basic memrange allocator for stolen space (aka mm.vram) */ drm_mm_init(&dev_priv->mm.vram, 0, prealloc_size); - /* We're off and running w/KMS */ - dev_priv->mm.suspended = 0; - /* Let GEM Manage from end of prealloc space to end of aperture. * * However, leave one page at the end still bound to the scratch page. @@ -1235,7 +1235,7 @@ static int i915_load_modeset_init(struct drm_device *dev, */ dev_priv->allow_batchbuffer = 1; - ret = intel_init_bios(dev); + ret = intel_parse_bios(dev); if (ret) DRM_INFO("failed to find VBIOS tables\n"); @@ -1244,6 +1244,8 @@ static int i915_load_modeset_init(struct drm_device *dev, if (ret) goto cleanup_ringbuffer; + intel_register_dsm_handler(); + ret = vga_switcheroo_register_client(dev->pdev, i915_switcheroo_set_state, i915_switcheroo_can_switch); @@ -1269,6 +1271,10 @@ static int i915_load_modeset_init(struct drm_device *dev, goto cleanup_irq; drm_kms_helper_poll_init(dev); + + /* We're off and running w/KMS */ + dev_priv->mm.suspended = 0; + return 0; cleanup_irq: @@ -1989,7 +1995,7 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags) dev->driver->get_vblank_counter = i915_get_vblank_counter; dev->max_vblank_count = 0xffffff; /* only 24 bits of frame count */ - if (IS_G4X(dev) || IS_IRONLAKE(dev) || IS_GEN6(dev)) { + if (IS_G4X(dev) || IS_GEN5(dev) || IS_GEN6(dev)) { dev->max_vblank_count = 0xffffffff; /* full 32 bit counter */ dev->driver->get_vblank_counter = gm45_get_vblank_counter; } @@ -1999,6 +2005,9 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags) intel_setup_gmbus(dev); intel_opregion_setup(dev); + /* Make sure the bios did its job and set up vital registers */ + intel_setup_bios(dev); + i915_gem_load(dev); /* Init HWS */ @@ -2010,7 +2019,7 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags) if (IS_PINEVIEW(dev)) i915_pineview_get_mem_freq(dev); - else if (IS_IRONLAKE(dev)) + else if (IS_GEN5(dev)) i915_ironlake_get_mem_freq(dev); /* On the 945G/GM, the chipset reports the MSI capability on the @@ -2063,9 +2072,6 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags) dev_priv->mchdev_lock = &mchdev_lock; spin_unlock(&mchdev_lock); - /* XXX Prevent module unload due to memory corruption bugs. */ - __module_get(THIS_MODULE); - return 0; out_workqueue_free: @@ -2134,9 +2140,6 @@ int i915_driver_unload(struct drm_device *dev) if (dev->pdev->msi_enabled) pci_disable_msi(dev->pdev); - if (dev_priv->regs != NULL) - iounmap(dev_priv->regs); - intel_opregion_fini(dev); if (drm_core_check_feature(dev, DRIVER_MODESET)) { @@ -2153,8 +2156,14 @@ int i915_driver_unload(struct drm_device *dev) drm_mm_takedown(&dev_priv->mm.vram); intel_cleanup_overlay(dev); + + if (!I915_NEED_GFX_HWS(dev)) + i915_free_hws(dev); } + if (dev_priv->regs != NULL) + iounmap(dev_priv->regs); + intel_teardown_gmbus(dev); intel_teardown_mchbar(dev); diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c index c3decb2fef4b..8e632110c58f 100644 --- a/drivers/gpu/drm/i915/i915_drv.c +++ b/drivers/gpu/drm/i915/i915_drv.c @@ -143,13 +143,13 @@ static const struct intel_device_info intel_pineview_info = { }; static const struct intel_device_info intel_ironlake_d_info = { - .gen = 5, .is_ironlake = 1, + .gen = 5, .need_gfx_hws = 1, .has_pipe_cxsr = 1, .has_hotplug = 1, .has_bsd_ring = 1, }; static const struct intel_device_info intel_ironlake_m_info = { - .gen = 5, .is_ironlake = 1, .is_mobile = 1, + .gen = 5, .is_mobile = 1, .need_gfx_hws = 1, .has_fbc = 1, .has_rc6 = 1, .has_hotplug = 1, .has_bsd_ring = 1, }; @@ -158,12 +158,14 @@ static const struct intel_device_info intel_sandybridge_d_info = { .gen = 6, .need_gfx_hws = 1, .has_hotplug = 1, .has_bsd_ring = 1, + .has_blt_ring = 1, }; static const struct intel_device_info intel_sandybridge_m_info = { .gen = 6, .is_mobile = 1, .need_gfx_hws = 1, .has_hotplug = 1, .has_bsd_ring = 1, + .has_blt_ring = 1, }; static const struct pci_device_id pciidlist[] = { /* aka */ diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index 73ad8bff2c2a..2c2c19b6285e 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h @@ -206,7 +206,6 @@ struct intel_device_info { u8 is_pineview : 1; u8 is_broadwater : 1; u8 is_crestline : 1; - u8 is_ironlake : 1; u8 has_fbc : 1; u8 has_rc6 : 1; u8 has_pipe_cxsr : 1; @@ -216,6 +215,7 @@ struct intel_device_info { u8 overlay_needs_physical : 1; u8 supports_tv : 1; u8 has_bsd_ring : 1; + u8 has_blt_ring : 1; }; enum no_fbc_reason { @@ -255,6 +255,7 @@ typedef struct drm_i915_private { struct pci_dev *bridge_dev; struct intel_ring_buffer render_ring; struct intel_ring_buffer bsd_ring; + struct intel_ring_buffer blt_ring; uint32_t next_seqno; drm_dma_handle_t *status_page_dmah; @@ -339,17 +340,18 @@ typedef struct drm_i915_private { unsigned int int_crt_support:1; unsigned int lvds_use_ssc:1; int lvds_ssc_freq; - struct { - u8 rate:4; - u8 lanes:4; - u8 preemphasis:4; - u8 vswing:4; - - u8 initialized:1; - u8 support:1; - u8 bpp:6; + int rate; + int lanes; + int preemphasis; + int vswing; + + bool initialized; + bool support; + int bpp; + struct edp_power_seq pps; } edp; + bool no_aux_handshake; struct notifier_block lid_notifier; @@ -547,6 +549,17 @@ typedef struct drm_i915_private { struct list_head shrink_list; /** + * List of objects currently involved in rendering. + * + * Includes buffers having the contents of their GPU caches + * flushed, not necessarily primitives. last_rendering_seqno + * represents when the rendering involved will be completed. + * + * A reference is held on the buffer while on this list. + */ + struct list_head active_list; + + /** * List of objects which are not in the ringbuffer but which * still have a write_domain which needs to be flushed before * unbinding. @@ -558,15 +571,6 @@ typedef struct drm_i915_private { struct list_head flushing_list; /** - * List of objects currently pending a GPU write flush. - * - * All elements on this list will belong to either the - * active_list or flushing_list, last_rendering_seqno can - * be used to differentiate between the two elements. - */ - struct list_head gpu_write_list; - - /** * LRU list of objects which are not in the ringbuffer and * are ready to unbind, but are still in the GTT. * @@ -713,7 +717,8 @@ struct drm_i915_gem_object { struct drm_mm_node *gtt_space; /** This object's place on the active/flushing/inactive lists */ - struct list_head list; + struct list_head ring_list; + struct list_head mm_list; /** This object's place on GPU write list */ struct list_head gpu_write_list; /** This object's place on eviction list */ @@ -1136,6 +1141,15 @@ static inline void intel_opregion_gse_intr(struct drm_device *dev) { return; } static inline void intel_opregion_enable_asle(struct drm_device *dev) { return; } #endif +/* intel_acpi.c */ +#ifdef CONFIG_ACPI +extern void intel_register_dsm_handler(void); +extern void intel_unregister_dsm_handler(void); +#else +static inline void intel_register_dsm_handler(void) { return; } +static inline void intel_unregister_dsm_handler(void) { return; } +#endif /* CONFIG_ACPI */ + /* modesetting */ extern void intel_modeset_init(struct drm_device *dev); extern void intel_modeset_cleanup(struct drm_device *dev); @@ -1268,7 +1282,6 @@ static inline void i915_write(struct drm_i915_private *dev_priv, u32 reg, #define IS_G33(dev) (INTEL_INFO(dev)->is_g33) #define IS_IRONLAKE_D(dev) ((dev)->pci_device == 0x0042) #define IS_IRONLAKE_M(dev) ((dev)->pci_device == 0x0046) -#define IS_IRONLAKE(dev) (INTEL_INFO(dev)->is_ironlake) #define IS_MOBILE(dev) (INTEL_INFO(dev)->is_mobile) #define IS_GEN2(dev) (INTEL_INFO(dev)->gen == 2) @@ -1278,6 +1291,7 @@ static inline void i915_write(struct drm_i915_private *dev_priv, u32 reg, #define IS_GEN6(dev) (INTEL_INFO(dev)->gen == 6) #define HAS_BSD(dev) (INTEL_INFO(dev)->has_bsd_ring) +#define HAS_BLT(dev) (INTEL_INFO(dev)->has_blt_ring) #define I915_NEED_GFX_HWS(dev) (INTEL_INFO(dev)->need_gfx_hws) #define HAS_OVERLAY(dev) (INTEL_INFO(dev)->has_overlay) @@ -1289,8 +1303,8 @@ static inline void i915_write(struct drm_i915_private *dev_priv, u32 reg, #define HAS_128_BYTE_Y_TILING(dev) (!IS_GEN2(dev) && !(IS_I915G(dev) || \ IS_I915GM(dev))) #define SUPPORTS_DIGITAL_OUTPUTS(dev) (!IS_GEN2(dev) && !IS_PINEVIEW(dev)) -#define SUPPORTS_INTEGRATED_HDMI(dev) (IS_G4X(dev) || IS_IRONLAKE(dev)) -#define SUPPORTS_INTEGRATED_DP(dev) (IS_G4X(dev) || IS_IRONLAKE(dev)) +#define SUPPORTS_INTEGRATED_HDMI(dev) (IS_G4X(dev) || IS_GEN5(dev)) +#define SUPPORTS_INTEGRATED_DP(dev) (IS_G4X(dev) || IS_GEN5(dev)) #define SUPPORTS_EDP(dev) (IS_IRONLAKE_M(dev)) #define SUPPORTS_TV(dev) (INTEL_INFO(dev)->supports_tv) #define I915_HAS_HOTPLUG(dev) (INTEL_INFO(dev)->has_hotplug) @@ -1302,9 +1316,8 @@ static inline void i915_write(struct drm_i915_private *dev_priv, u32 reg, #define I915_HAS_FBC(dev) (INTEL_INFO(dev)->has_fbc) #define I915_HAS_RC6(dev) (INTEL_INFO(dev)->has_rc6) -#define HAS_PCH_SPLIT(dev) (IS_IRONLAKE(dev) || \ - IS_GEN6(dev)) -#define HAS_PIPE_CONTROL(dev) (IS_IRONLAKE(dev) || IS_GEN6(dev)) +#define HAS_PCH_SPLIT(dev) (IS_GEN5(dev) || IS_GEN6(dev)) +#define HAS_PIPE_CONTROL(dev) (IS_GEN5(dev) || IS_GEN6(dev)) #define INTEL_PCH_TYPE(dev) (((struct drm_i915_private *)(dev)->dev_private)->pch_type) #define HAS_PCH_CPT(dev) (INTEL_PCH_TYPE(dev) == PCH_CPT) diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index 100a7537980e..6c2618d884e7 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c @@ -244,12 +244,17 @@ i915_gem_create_ioctl(struct drm_device *dev, void *data, return -ENOMEM; ret = drm_gem_handle_create(file_priv, obj, &handle); - /* drop reference from allocate - handle holds it now */ - drm_gem_object_unreference_unlocked(obj); if (ret) { + drm_gem_object_release(obj); + i915_gem_info_remove_obj(dev->dev_private, obj->size); + kfree(obj); return ret; } + /* drop reference from allocate - handle holds it now */ + drm_gem_object_unreference(obj); + trace_i915_gem_object_create(obj); + args->handle = handle; return 0; } @@ -260,19 +265,14 @@ fast_shmem_read(struct page **pages, char __user *data, int length) { - char __iomem *vaddr; - int unwritten; + char *vaddr; + int ret; vaddr = kmap_atomic(pages[page_base >> PAGE_SHIFT], KM_USER0); - if (vaddr == NULL) - return -ENOMEM; - unwritten = __copy_to_user_inatomic(data, vaddr + page_offset, length); + ret = __copy_to_user_inatomic(data, vaddr + page_offset, length); kunmap_atomic(vaddr, KM_USER0); - if (unwritten) - return -EFAULT; - - return 0; + return ret; } static int i915_gem_object_needs_bit17_swizzle(struct drm_gem_object *obj) @@ -366,24 +366,10 @@ i915_gem_shmem_pread_fast(struct drm_device *dev, struct drm_gem_object *obj, loff_t offset, page_base; char __user *user_data; int page_offset, page_length; - int ret; user_data = (char __user *) (uintptr_t) args->data_ptr; remain = args->size; - ret = i915_mutex_lock_interruptible(dev); - if (ret) - return ret; - - ret = i915_gem_object_get_pages(obj, 0); - if (ret != 0) - goto fail_unlock; - - ret = i915_gem_object_set_cpu_read_domain_range(obj, args->offset, - args->size); - if (ret != 0) - goto fail_put_pages; - obj_priv = to_intel_bo(obj); offset = args->offset; @@ -400,23 +386,17 @@ i915_gem_shmem_pread_fast(struct drm_device *dev, struct drm_gem_object *obj, if ((page_offset + remain) > PAGE_SIZE) page_length = PAGE_SIZE - page_offset; - ret = fast_shmem_read(obj_priv->pages, - page_base, page_offset, - user_data, page_length); - if (ret) - goto fail_put_pages; + if (fast_shmem_read(obj_priv->pages, + page_base, page_offset, + user_data, page_length)) + return -EFAULT; remain -= page_length; user_data += page_length; offset += page_length; } -fail_put_pages: - i915_gem_object_put_pages(obj); -fail_unlock: - mutex_unlock(&dev->struct_mutex); - - return ret; + return 0; } static int @@ -477,33 +457,28 @@ i915_gem_shmem_pread_slow(struct drm_device *dev, struct drm_gem_object *obj, last_data_page = (data_ptr + args->size - 1) / PAGE_SIZE; num_pages = last_data_page - first_data_page + 1; - user_pages = drm_calloc_large(num_pages, sizeof(struct page *)); + user_pages = drm_malloc_ab(num_pages, sizeof(struct page *)); if (user_pages == NULL) return -ENOMEM; + mutex_unlock(&dev->struct_mutex); down_read(&mm->mmap_sem); pinned_pages = get_user_pages(current, mm, (uintptr_t)args->data_ptr, num_pages, 1, 0, user_pages, NULL); up_read(&mm->mmap_sem); + mutex_lock(&dev->struct_mutex); if (pinned_pages < num_pages) { ret = -EFAULT; - goto fail_put_user_pages; + goto out; } - do_bit17_swizzling = i915_gem_object_needs_bit17_swizzle(obj); - - ret = i915_mutex_lock_interruptible(dev); - if (ret) - goto fail_put_user_pages; - - ret = i915_gem_object_get_pages_or_evict(obj); + ret = i915_gem_object_set_cpu_read_domain_range(obj, + args->offset, + args->size); if (ret) - goto fail_unlock; + goto out; - ret = i915_gem_object_set_cpu_read_domain_range(obj, args->offset, - args->size); - if (ret != 0) - goto fail_put_pages; + do_bit17_swizzling = i915_gem_object_needs_bit17_swizzle(obj); obj_priv = to_intel_bo(obj); offset = args->offset; @@ -548,11 +523,7 @@ i915_gem_shmem_pread_slow(struct drm_device *dev, struct drm_gem_object *obj, offset += page_length; } -fail_put_pages: - i915_gem_object_put_pages(obj); -fail_unlock: - mutex_unlock(&dev->struct_mutex); -fail_put_user_pages: +out: for (i = 0; i < pinned_pages; i++) { SetPageDirty(user_pages[i]); page_cache_release(user_pages[i]); @@ -576,9 +547,15 @@ i915_gem_pread_ioctl(struct drm_device *dev, void *data, struct drm_i915_gem_object *obj_priv; int ret = 0; + ret = i915_mutex_lock_interruptible(dev); + if (ret) + return ret; + obj = drm_gem_object_lookup(dev, file_priv, args->handle); - if (obj == NULL) - return -ENOENT; + if (obj == NULL) { + ret = -ENOENT; + goto unlock; + } obj_priv = to_intel_bo(obj); /* Bounds check source. */ @@ -597,17 +574,35 @@ i915_gem_pread_ioctl(struct drm_device *dev, void *data, goto out; } - if (i915_gem_object_needs_bit17_swizzle(obj)) { - ret = i915_gem_shmem_pread_slow(dev, obj, args, file_priv); - } else { - ret = i915_gem_shmem_pread_fast(dev, obj, args, file_priv); - if (ret != 0) - ret = i915_gem_shmem_pread_slow(dev, obj, args, - file_priv); + ret = fault_in_pages_writeable((char __user *)(uintptr_t)args->data_ptr, + args->size); + if (ret) { + ret = -EFAULT; + goto out; } + ret = i915_gem_object_get_pages_or_evict(obj); + if (ret) + goto out; + + ret = i915_gem_object_set_cpu_read_domain_range(obj, + args->offset, + args->size); + if (ret) + goto out_put; + + ret = -EFAULT; + if (!i915_gem_object_needs_bit17_swizzle(obj)) + ret = i915_gem_shmem_pread_fast(dev, obj, args, file_priv); + if (ret == -EFAULT) + ret = i915_gem_shmem_pread_slow(dev, obj, args, file_priv); + +out_put: + i915_gem_object_put_pages(obj); out: - drm_gem_object_unreference_unlocked(obj); + drm_gem_object_unreference(obj); +unlock: + mutex_unlock(&dev->struct_mutex); return ret; } @@ -628,9 +623,7 @@ fast_user_write(struct io_mapping *mapping, unwritten = __copy_from_user_inatomic_nocache(vaddr_atomic + page_offset, user_data, length); io_mapping_unmap_atomic(vaddr_atomic, KM_USER0); - if (unwritten) - return -EFAULT; - return 0; + return unwritten; } /* Here's the write path which can sleep for @@ -663,18 +656,14 @@ fast_shmem_write(struct page **pages, char __user *data, int length) { - char __iomem *vaddr; - unsigned long unwritten; + char *vaddr; + int ret; vaddr = kmap_atomic(pages[page_base >> PAGE_SHIFT], KM_USER0); - if (vaddr == NULL) - return -ENOMEM; - unwritten = __copy_from_user_inatomic(vaddr + page_offset, data, length); + ret = __copy_from_user_inatomic(vaddr + page_offset, data, length); kunmap_atomic(vaddr, KM_USER0); - if (unwritten) - return -EFAULT; - return 0; + return ret; } /** @@ -692,24 +681,10 @@ i915_gem_gtt_pwrite_fast(struct drm_device *dev, struct drm_gem_object *obj, loff_t offset, page_base; char __user *user_data; int page_offset, page_length; - int ret; user_data = (char __user *) (uintptr_t) args->data_ptr; remain = args->size; - ret = i915_mutex_lock_interruptible(dev); - if (ret) - return ret; - - ret = i915_gem_object_pin(obj, 0); - if (ret) { - mutex_unlock(&dev->struct_mutex); - return ret; - } - ret = i915_gem_object_set_to_gtt_domain(obj, 1); - if (ret) - goto fail; - obj_priv = to_intel_bo(obj); offset = obj_priv->gtt_offset + args->offset; @@ -726,26 +701,21 @@ i915_gem_gtt_pwrite_fast(struct drm_device *dev, struct drm_gem_object *obj, if ((page_offset + remain) > PAGE_SIZE) page_length = PAGE_SIZE - page_offset; - ret = fast_user_write (dev_priv->mm.gtt_mapping, page_base, - page_offset, user_data, page_length); - /* If we get a fault while copying data, then (presumably) our * source page isn't available. Return the error and we'll * retry in the slow path. */ - if (ret) - goto fail; + if (fast_user_write(dev_priv->mm.gtt_mapping, page_base, + page_offset, user_data, page_length)) + + return -EFAULT; remain -= page_length; user_data += page_length; offset += page_length; } -fail: - i915_gem_object_unpin(obj); - mutex_unlock(&dev->struct_mutex); - - return ret; + return 0; } /** @@ -782,30 +752,24 @@ i915_gem_gtt_pwrite_slow(struct drm_device *dev, struct drm_gem_object *obj, last_data_page = (data_ptr + args->size - 1) / PAGE_SIZE; num_pages = last_data_page - first_data_page + 1; - user_pages = drm_calloc_large(num_pages, sizeof(struct page *)); + user_pages = drm_malloc_ab(num_pages, sizeof(struct page *)); if (user_pages == NULL) return -ENOMEM; + mutex_unlock(&dev->struct_mutex); down_read(&mm->mmap_sem); pinned_pages = get_user_pages(current, mm, (uintptr_t)args->data_ptr, num_pages, 0, 0, user_pages, NULL); up_read(&mm->mmap_sem); + mutex_lock(&dev->struct_mutex); if (pinned_pages < num_pages) { ret = -EFAULT; goto out_unpin_pages; } - ret = i915_mutex_lock_interruptible(dev); - if (ret) - goto out_unpin_pages; - - ret = i915_gem_object_pin(obj, 0); - if (ret) - goto out_unlock; - ret = i915_gem_object_set_to_gtt_domain(obj, 1); if (ret) - goto out_unpin_object; + goto out_unpin_pages; obj_priv = to_intel_bo(obj); offset = obj_priv->gtt_offset + args->offset; @@ -841,10 +805,6 @@ i915_gem_gtt_pwrite_slow(struct drm_device *dev, struct drm_gem_object *obj, data_ptr += page_length; } -out_unpin_object: - i915_gem_object_unpin(obj); -out_unlock: - mutex_unlock(&dev->struct_mutex); out_unpin_pages: for (i = 0; i < pinned_pages; i++) page_cache_release(user_pages[i]); @@ -867,23 +827,10 @@ i915_gem_shmem_pwrite_fast(struct drm_device *dev, struct drm_gem_object *obj, loff_t offset, page_base; char __user *user_data; int page_offset, page_length; - int ret; user_data = (char __user *) (uintptr_t) args->data_ptr; remain = args->size; - ret = i915_mutex_lock_interruptible(dev); - if (ret) - return ret; - - ret = i915_gem_object_get_pages(obj, 0); - if (ret != 0) - goto fail_unlock; - - ret = i915_gem_object_set_to_cpu_domain(obj, 1); - if (ret != 0) - goto fail_put_pages; - obj_priv = to_intel_bo(obj); offset = args->offset; obj_priv->dirty = 1; @@ -901,23 +848,17 @@ i915_gem_shmem_pwrite_fast(struct drm_device *dev, struct drm_gem_object *obj, if ((page_offset + remain) > PAGE_SIZE) page_length = PAGE_SIZE - page_offset; - ret = fast_shmem_write(obj_priv->pages, + if (fast_shmem_write(obj_priv->pages, page_base, page_offset, - user_data, page_length); - if (ret) - goto fail_put_pages; + user_data, page_length)) + return -EFAULT; remain -= page_length; user_data += page_length; offset += page_length; } -fail_put_pages: - i915_gem_object_put_pages(obj); -fail_unlock: - mutex_unlock(&dev->struct_mutex); - - return ret; + return 0; } /** @@ -955,32 +896,26 @@ i915_gem_shmem_pwrite_slow(struct drm_device *dev, struct drm_gem_object *obj, last_data_page = (data_ptr + args->size - 1) / PAGE_SIZE; num_pages = last_data_page - first_data_page + 1; - user_pages = drm_calloc_large(num_pages, sizeof(struct page *)); + user_pages = drm_malloc_ab(num_pages, sizeof(struct page *)); if (user_pages == NULL) return -ENOMEM; + mutex_unlock(&dev->struct_mutex); down_read(&mm->mmap_sem); pinned_pages = get_user_pages(current, mm, (uintptr_t)args->data_ptr, num_pages, 0, 0, user_pages, NULL); up_read(&mm->mmap_sem); + mutex_lock(&dev->struct_mutex); if (pinned_pages < num_pages) { ret = -EFAULT; - goto fail_put_user_pages; + goto out; } - do_bit17_swizzling = i915_gem_object_needs_bit17_swizzle(obj); - - ret = i915_mutex_lock_interruptible(dev); - if (ret) - goto fail_put_user_pages; - - ret = i915_gem_object_get_pages_or_evict(obj); + ret = i915_gem_object_set_to_cpu_domain(obj, 1); if (ret) - goto fail_unlock; + goto out; - ret = i915_gem_object_set_to_cpu_domain(obj, 1); - if (ret != 0) - goto fail_put_pages; + do_bit17_swizzling = i915_gem_object_needs_bit17_swizzle(obj); obj_priv = to_intel_bo(obj); offset = args->offset; @@ -1026,11 +961,7 @@ i915_gem_shmem_pwrite_slow(struct drm_device *dev, struct drm_gem_object *obj, offset += page_length; } -fail_put_pages: - i915_gem_object_put_pages(obj); -fail_unlock: - mutex_unlock(&dev->struct_mutex); -fail_put_user_pages: +out: for (i = 0; i < pinned_pages; i++) page_cache_release(user_pages[i]); drm_free_large(user_pages); @@ -1045,18 +976,25 @@ fail_put_user_pages: */ int i915_gem_pwrite_ioctl(struct drm_device *dev, void *data, - struct drm_file *file_priv) + struct drm_file *file) { struct drm_i915_gem_pwrite *args = data; struct drm_gem_object *obj; struct drm_i915_gem_object *obj_priv; int ret = 0; - obj = drm_gem_object_lookup(dev, file_priv, args->handle); - if (obj == NULL) - return -ENOENT; + ret = i915_mutex_lock_interruptible(dev); + if (ret) + return ret; + + obj = drm_gem_object_lookup(dev, file, args->handle); + if (obj == NULL) { + ret = -ENOENT; + goto unlock; + } obj_priv = to_intel_bo(obj); + /* Bounds check destination. */ if (args->offset > obj->size || args->size > obj->size - args->offset) { ret = -EINVAL; @@ -1073,6 +1011,13 @@ i915_gem_pwrite_ioctl(struct drm_device *dev, void *data, goto out; } + ret = fault_in_pages_readable((char __user *)(uintptr_t)args->data_ptr, + args->size); + if (ret) { + ret = -EFAULT; + goto out; + } + /* We can only do the GTT pwrite on untiled buffers, as otherwise * it would end up going through the fenced access, and we'll get * different detiling behavior between reading and writing. @@ -1080,32 +1025,47 @@ i915_gem_pwrite_ioctl(struct drm_device *dev, void *data, * perspective, requiring manual detiling by the client. */ if (obj_priv->phys_obj) - ret = i915_gem_phys_pwrite(dev, obj, args, file_priv); + ret = i915_gem_phys_pwrite(dev, obj, args, file); else if (obj_priv->tiling_mode == I915_TILING_NONE && obj_priv->gtt_space && obj->write_domain != I915_GEM_DOMAIN_CPU) { - ret = i915_gem_gtt_pwrite_fast(dev, obj, args, file_priv); - if (ret == -EFAULT) { - ret = i915_gem_gtt_pwrite_slow(dev, obj, args, - file_priv); - } - } else if (i915_gem_object_needs_bit17_swizzle(obj)) { - ret = i915_gem_shmem_pwrite_slow(dev, obj, args, file_priv); + ret = i915_gem_object_pin(obj, 0); + if (ret) + goto out; + + ret = i915_gem_object_set_to_gtt_domain(obj, 1); + if (ret) + goto out_unpin; + + ret = i915_gem_gtt_pwrite_fast(dev, obj, args, file); + if (ret == -EFAULT) + ret = i915_gem_gtt_pwrite_slow(dev, obj, args, file); + +out_unpin: + i915_gem_object_unpin(obj); } else { - ret = i915_gem_shmem_pwrite_fast(dev, obj, args, file_priv); - if (ret == -EFAULT) { - ret = i915_gem_shmem_pwrite_slow(dev, obj, args, - file_priv); - } - } + ret = i915_gem_object_get_pages_or_evict(obj); + if (ret) + goto out; -#if WATCH_PWRITE - if (ret) - DRM_INFO("pwrite failed %d\n", ret); -#endif + ret = i915_gem_object_set_to_cpu_domain(obj, 1); + if (ret) + goto out_put; + + ret = -EFAULT; + if (!i915_gem_object_needs_bit17_swizzle(obj)) + ret = i915_gem_shmem_pwrite_fast(dev, obj, args, file); + if (ret == -EFAULT) + ret = i915_gem_shmem_pwrite_slow(dev, obj, args, file); + +out_put: + i915_gem_object_put_pages(obj); + } out: - drm_gem_object_unreference_unlocked(obj); + drm_gem_object_unreference(obj); +unlock: + mutex_unlock(&dev->struct_mutex); return ret; } @@ -1141,16 +1101,16 @@ i915_gem_set_domain_ioctl(struct drm_device *dev, void *data, if (write_domain != 0 && read_domains != write_domain) return -EINVAL; - obj = drm_gem_object_lookup(dev, file_priv, args->handle); - if (obj == NULL) - return -ENOENT; - obj_priv = to_intel_bo(obj); - ret = i915_mutex_lock_interruptible(dev); - if (ret) { - drm_gem_object_unreference_unlocked(obj); + if (ret) return ret; + + obj = drm_gem_object_lookup(dev, file_priv, args->handle); + if (obj == NULL) { + ret = -ENOENT; + goto unlock; } + obj_priv = to_intel_bo(obj); intel_mark_busy(dev, obj); @@ -1179,9 +1139,10 @@ i915_gem_set_domain_ioctl(struct drm_device *dev, void *data, /* Maintain LRU order of "inactive" objects */ if (ret == 0 && i915_gem_object_is_inactive(obj_priv)) - list_move_tail(&obj_priv->list, &dev_priv->mm.inactive_list); + list_move_tail(&obj_priv->mm_list, &dev_priv->mm.inactive_list); drm_gem_object_unreference(obj); +unlock: mutex_unlock(&dev->struct_mutex); return ret; } @@ -1200,14 +1161,14 @@ i915_gem_sw_finish_ioctl(struct drm_device *dev, void *data, if (!(dev->driver->driver_features & DRIVER_GEM)) return -ENODEV; - obj = drm_gem_object_lookup(dev, file_priv, args->handle); - if (obj == NULL) - return -ENOENT; - ret = i915_mutex_lock_interruptible(dev); - if (ret) { - drm_gem_object_unreference_unlocked(obj); + if (ret) return ret; + + obj = drm_gem_object_lookup(dev, file_priv, args->handle); + if (obj == NULL) { + ret = -ENOENT; + goto unlock; } /* Pinned buffers may be scanout, so flush the cache */ @@ -1215,6 +1176,7 @@ i915_gem_sw_finish_ioctl(struct drm_device *dev, void *data, i915_gem_object_flush_cpu_write_domain(obj); drm_gem_object_unreference(obj); +unlock: mutex_unlock(&dev->struct_mutex); return ret; } @@ -1309,7 +1271,7 @@ int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf) } if (i915_gem_object_is_inactive(obj_priv)) - list_move_tail(&obj_priv->list, &dev_priv->mm.inactive_list); + list_move_tail(&obj_priv->mm_list, &dev_priv->mm.inactive_list); pfn = ((dev->agp->base + obj_priv->gtt_offset) >> PAGE_SHIFT) + page_offset; @@ -1512,33 +1474,27 @@ i915_gem_mmap_gtt_ioctl(struct drm_device *dev, void *data, if (!(dev->driver->driver_features & DRIVER_GEM)) return -ENODEV; - obj = drm_gem_object_lookup(dev, file_priv, args->handle); - if (obj == NULL) - return -ENOENT; - ret = i915_mutex_lock_interruptible(dev); - if (ret) { - drm_gem_object_unreference_unlocked(obj); + if (ret) return ret; - } + obj = drm_gem_object_lookup(dev, file_priv, args->handle); + if (obj == NULL) { + ret = -ENOENT; + goto unlock; + } obj_priv = to_intel_bo(obj); if (obj_priv->madv != I915_MADV_WILLNEED) { DRM_ERROR("Attempting to mmap a purgeable buffer\n"); - drm_gem_object_unreference(obj); - mutex_unlock(&dev->struct_mutex); - return -EINVAL; + ret = -EINVAL; + goto out; } - if (!obj_priv->mmap_offset) { ret = i915_gem_create_mmap_offset(obj); - if (ret) { - drm_gem_object_unreference(obj); - mutex_unlock(&dev->struct_mutex); - return ret; - } + if (ret) + goto out; } args->offset = obj_priv->mmap_offset; @@ -1549,17 +1505,15 @@ i915_gem_mmap_gtt_ioctl(struct drm_device *dev, void *data, */ if (!obj_priv->agp_mem) { ret = i915_gem_object_bind_to_gtt(obj, 0); - if (ret) { - drm_gem_object_unreference(obj); - mutex_unlock(&dev->struct_mutex); - return ret; - } + if (ret) + goto out; } +out: drm_gem_object_unreference(obj); +unlock: mutex_unlock(&dev->struct_mutex); - - return 0; + return ret; } static void @@ -1611,6 +1565,7 @@ i915_gem_object_move_to_active(struct drm_gem_object *obj, struct intel_ring_buffer *ring) { struct drm_device *dev = obj->dev; + struct drm_i915_private *dev_priv = dev->dev_private; struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); uint32_t seqno = i915_gem_next_request_seqno(dev, ring); @@ -1624,7 +1579,8 @@ i915_gem_object_move_to_active(struct drm_gem_object *obj, } /* Move from whatever list we were on to the tail of execution. */ - list_move_tail(&obj_priv->list, &ring->active_list); + list_move_tail(&obj_priv->mm_list, &dev_priv->mm.active_list); + list_move_tail(&obj_priv->ring_list, &ring->active_list); obj_priv->last_rendering_seqno = seqno; } @@ -1636,7 +1592,8 @@ i915_gem_object_move_to_flushing(struct drm_gem_object *obj) struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); BUG_ON(!obj_priv->active); - list_move_tail(&obj_priv->list, &dev_priv->mm.flushing_list); + list_move_tail(&obj_priv->mm_list, &dev_priv->mm.flushing_list); + list_del_init(&obj_priv->ring_list); obj_priv->last_rendering_seqno = 0; } @@ -1675,9 +1632,10 @@ i915_gem_object_move_to_inactive(struct drm_gem_object *obj) struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); if (obj_priv->pin_count != 0) - list_move_tail(&obj_priv->list, &dev_priv->mm.pinned_list); + list_move_tail(&obj_priv->mm_list, &dev_priv->mm.pinned_list); else - list_move_tail(&obj_priv->list, &dev_priv->mm.inactive_list); + list_move_tail(&obj_priv->mm_list, &dev_priv->mm.inactive_list); + list_del_init(&obj_priv->ring_list); BUG_ON(!list_empty(&obj_priv->gpu_write_list)); @@ -1699,12 +1657,11 @@ i915_gem_process_flushing_list(struct drm_device *dev, struct drm_i915_gem_object *obj_priv, *next; list_for_each_entry_safe(obj_priv, next, - &dev_priv->mm.gpu_write_list, + &ring->gpu_write_list, gpu_write_list) { struct drm_gem_object *obj = &obj_priv->base; - if (obj->write_domain & flush_domains && - obj_priv->ring == ring) { + if (obj->write_domain & flush_domains) { uint32_t old_write_domain = obj->write_domain; obj->write_domain = 0; @@ -1826,7 +1783,7 @@ static void i915_gem_reset_ring_lists(struct drm_i915_private *dev_priv, obj_priv = list_first_entry(&ring->active_list, struct drm_i915_gem_object, - list); + ring_list); obj_priv->base.write_domain = 0; list_del_init(&obj_priv->gpu_write_list); @@ -1841,8 +1798,8 @@ void i915_gem_reset(struct drm_device *dev) int i; i915_gem_reset_ring_lists(dev_priv, &dev_priv->render_ring); - if (HAS_BSD(dev)) - i915_gem_reset_ring_lists(dev_priv, &dev_priv->bsd_ring); + i915_gem_reset_ring_lists(dev_priv, &dev_priv->bsd_ring); + i915_gem_reset_ring_lists(dev_priv, &dev_priv->blt_ring); /* Remove anything from the flushing lists. The GPU cache is likely * to be lost on reset along with the data, so simply move the @@ -1851,7 +1808,7 @@ void i915_gem_reset(struct drm_device *dev) while (!list_empty(&dev_priv->mm.flushing_list)) { obj_priv = list_first_entry(&dev_priv->mm.flushing_list, struct drm_i915_gem_object, - list); + mm_list); obj_priv->base.write_domain = 0; list_del_init(&obj_priv->gpu_write_list); @@ -1863,7 +1820,7 @@ void i915_gem_reset(struct drm_device *dev) */ list_for_each_entry(obj_priv, &dev_priv->mm.inactive_list, - list) + mm_list) { obj_priv->base.read_domains &= ~I915_GEM_GPU_DOMAINS; } @@ -1923,7 +1880,7 @@ i915_gem_retire_requests_ring(struct drm_device *dev, obj_priv = list_first_entry(&ring->active_list, struct drm_i915_gem_object, - list); + ring_list); if (!i915_seqno_passed(seqno, obj_priv->last_rendering_seqno)) break; @@ -1959,13 +1916,13 @@ i915_gem_retire_requests(struct drm_device *dev) */ list_for_each_entry_safe(obj_priv, tmp, &dev_priv->mm.deferred_free_list, - list) + mm_list) i915_gem_free_object_tail(&obj_priv->base); } i915_gem_retire_requests_ring(dev, &dev_priv->render_ring); - if (HAS_BSD(dev)) - i915_gem_retire_requests_ring(dev, &dev_priv->bsd_ring); + i915_gem_retire_requests_ring(dev, &dev_priv->bsd_ring); + i915_gem_retire_requests_ring(dev, &dev_priv->blt_ring); } static void @@ -1988,8 +1945,8 @@ i915_gem_retire_work_handler(struct work_struct *work) if (!dev_priv->mm.suspended && (!list_empty(&dev_priv->render_ring.request_list) || - (HAS_BSD(dev) && - !list_empty(&dev_priv->bsd_ring.request_list)))) + !list_empty(&dev_priv->bsd_ring.request_list) || + !list_empty(&dev_priv->blt_ring.request_list))) queue_delayed_work(dev_priv->wq, &dev_priv->mm.retire_work, HZ); mutex_unlock(&dev->struct_mutex); } @@ -2108,6 +2065,10 @@ i915_gem_flush(struct drm_device *dev, i915_gem_flush_ring(dev, file_priv, &dev_priv->bsd_ring, invalidate_domains, flush_domains); + if (flush_rings & RING_BLT) + i915_gem_flush_ring(dev, file_priv, + &dev_priv->blt_ring, + invalidate_domains, flush_domains); } } @@ -2194,10 +2155,11 @@ i915_gem_object_unbind(struct drm_gem_object *obj) BUG_ON(obj_priv->pages_refcount); i915_gem_info_remove_gtt(dev_priv, obj->size); - list_del_init(&obj_priv->list); + list_del_init(&obj_priv->mm_list); drm_mm_put_block(obj_priv->gtt_space); obj_priv->gtt_space = NULL; + obj_priv->gtt_offset = 0; if (i915_gem_object_is_purgeable(obj_priv)) i915_gem_object_truncate(obj); @@ -2210,6 +2172,9 @@ i915_gem_object_unbind(struct drm_gem_object *obj) static int i915_ring_idle(struct drm_device *dev, struct intel_ring_buffer *ring) { + if (list_empty(&ring->gpu_write_list)) + return 0; + i915_gem_flush_ring(dev, NULL, ring, I915_GEM_GPU_DOMAINS, I915_GEM_GPU_DOMAINS); return i915_wait_request(dev, @@ -2226,8 +2191,8 @@ i915_gpu_idle(struct drm_device *dev) lists_empty = (list_empty(&dev_priv->mm.flushing_list) && list_empty(&dev_priv->render_ring.active_list) && - (!HAS_BSD(dev) || - list_empty(&dev_priv->bsd_ring.active_list))); + list_empty(&dev_priv->bsd_ring.active_list) && + list_empty(&dev_priv->blt_ring.active_list)); if (lists_empty) return 0; @@ -2236,11 +2201,13 @@ i915_gpu_idle(struct drm_device *dev) if (ret) return ret; - if (HAS_BSD(dev)) { - ret = i915_ring_idle(dev, &dev_priv->bsd_ring); - if (ret) - return ret; - } + ret = i915_ring_idle(dev, &dev_priv->bsd_ring); + if (ret) + return ret; + + ret = i915_ring_idle(dev, &dev_priv->blt_ring); + if (ret) + return ret; return 0; } @@ -2691,12 +2658,9 @@ i915_gem_object_bind_to_gtt(struct drm_gem_object *obj, unsigned alignment) search_free: free_space = drm_mm_search_free(&dev_priv->mm.gtt_space, obj->size, alignment, 0); - if (free_space != NULL) { + if (free_space != NULL) obj_priv->gtt_space = drm_mm_get_block(free_space, obj->size, alignment); - if (obj_priv->gtt_space != NULL) - obj_priv->gtt_offset = obj_priv->gtt_space->start; - } if (obj_priv->gtt_space == NULL) { /* If the gtt is empty and we're still having trouble * fitting our object in, we're out of memory. @@ -2739,7 +2703,7 @@ i915_gem_object_bind_to_gtt(struct drm_gem_object *obj, unsigned alignment) obj_priv->agp_mem = drm_agp_bind_pages(dev, obj_priv->pages, obj->size >> PAGE_SHIFT, - obj_priv->gtt_offset, + obj_priv->gtt_space->start, obj_priv->agp_type); if (obj_priv->agp_mem == NULL) { i915_gem_object_put_pages(obj); @@ -2754,7 +2718,7 @@ i915_gem_object_bind_to_gtt(struct drm_gem_object *obj, unsigned alignment) } /* keep track of bounds object by adding it to the inactive list */ - list_add_tail(&obj_priv->list, &dev_priv->mm.inactive_list); + list_add_tail(&obj_priv->mm_list, &dev_priv->mm.inactive_list); i915_gem_info_add_gtt(dev_priv, obj->size); /* Assert that the object is not currently in any GPU domain. As it @@ -2764,6 +2728,7 @@ i915_gem_object_bind_to_gtt(struct drm_gem_object *obj, unsigned alignment) BUG_ON(obj->read_domains & I915_GEM_GPU_DOMAINS); BUG_ON(obj->write_domain & I915_GEM_GPU_DOMAINS); + obj_priv->gtt_offset = obj_priv->gtt_space->start; trace_i915_gem_object_bind(obj, obj_priv->gtt_offset); return 0; @@ -3115,7 +3080,8 @@ i915_gem_object_set_to_cpu_domain(struct drm_gem_object *obj, int write) * drm_agp_chipset_flush */ static void -i915_gem_object_set_to_gpu_domain(struct drm_gem_object *obj) +i915_gem_object_set_to_gpu_domain(struct drm_gem_object *obj, + struct intel_ring_buffer *ring) { struct drm_device *dev = obj->dev; struct drm_i915_private *dev_priv = dev->dev_private; @@ -3124,9 +3090,6 @@ i915_gem_object_set_to_gpu_domain(struct drm_gem_object *obj) uint32_t flush_domains = 0; uint32_t old_read_domains; - BUG_ON(obj->pending_read_domains & I915_GEM_DOMAIN_CPU); - BUG_ON(obj->pending_write_domain == I915_GEM_DOMAIN_CPU); - intel_mark_busy(dev, obj); /* @@ -3172,8 +3135,10 @@ i915_gem_object_set_to_gpu_domain(struct drm_gem_object *obj) dev->invalidate_domains |= invalidate_domains; dev->flush_domains |= flush_domains; - if (obj_priv->ring) + if (flush_domains & I915_GEM_GPU_DOMAINS) dev_priv->mm.flush_rings |= obj_priv->ring->id; + if (invalidate_domains & I915_GEM_GPU_DOMAINS) + dev_priv->mm.flush_rings |= ring->id; trace_i915_gem_object_change_domain(obj, old_read_domains, @@ -3289,68 +3254,42 @@ i915_gem_object_set_cpu_read_domain_range(struct drm_gem_object *obj, * Pin an object to the GTT and evaluate the relocations landing in it. */ static int -i915_gem_object_pin_and_relocate(struct drm_gem_object *obj, - struct drm_file *file_priv, - struct drm_i915_gem_exec_object2 *entry, - struct drm_i915_gem_relocation_entry *relocs) +i915_gem_execbuffer_relocate(struct drm_i915_gem_object *obj, + struct drm_file *file_priv, + struct drm_i915_gem_exec_object2 *entry) { - struct drm_device *dev = obj->dev; + struct drm_device *dev = obj->base.dev; drm_i915_private_t *dev_priv = dev->dev_private; - struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); - int i, ret; - void __iomem *reloc_page; - bool need_fence; - - need_fence = entry->flags & EXEC_OBJECT_NEEDS_FENCE && - obj_priv->tiling_mode != I915_TILING_NONE; - - /* Check fence reg constraints and rebind if necessary */ - if (need_fence && - !i915_gem_object_fence_offset_ok(obj, - obj_priv->tiling_mode)) { - ret = i915_gem_object_unbind(obj); - if (ret) - return ret; - } + struct drm_i915_gem_relocation_entry __user *user_relocs; + struct drm_gem_object *target_obj = NULL; + uint32_t target_handle = 0; + int i, ret = 0; - /* Choose the GTT offset for our buffer and put it there. */ - ret = i915_gem_object_pin(obj, (uint32_t) entry->alignment); - if (ret) - return ret; + user_relocs = (void __user *)(uintptr_t)entry->relocs_ptr; + for (i = 0; i < entry->relocation_count; i++) { + struct drm_i915_gem_relocation_entry reloc; + uint32_t target_offset; - /* - * Pre-965 chips need a fence register set up in order to - * properly handle blits to/from tiled surfaces. - */ - if (need_fence) { - ret = i915_gem_object_get_fence_reg(obj, true); - if (ret != 0) { - i915_gem_object_unpin(obj); - return ret; + if (__copy_from_user_inatomic(&reloc, + user_relocs+i, + sizeof(reloc))) { + ret = -EFAULT; + break; } - dev_priv->fence_regs[obj_priv->fence_reg].gpu = true; - } + if (reloc.target_handle != target_handle) { + drm_gem_object_unreference(target_obj); - entry->offset = obj_priv->gtt_offset; + target_obj = drm_gem_object_lookup(dev, file_priv, + reloc.target_handle); + if (target_obj == NULL) { + ret = -ENOENT; + break; + } - /* Apply the relocations, using the GTT aperture to avoid cache - * flushing requirements. - */ - for (i = 0; i < entry->relocation_count; i++) { - struct drm_i915_gem_relocation_entry *reloc= &relocs[i]; - struct drm_gem_object *target_obj; - struct drm_i915_gem_object *target_obj_priv; - uint32_t reloc_val, reloc_offset; - uint32_t __iomem *reloc_entry; - - target_obj = drm_gem_object_lookup(obj->dev, file_priv, - reloc->target_handle); - if (target_obj == NULL) { - i915_gem_object_unpin(obj); - return -ENOENT; + target_handle = reloc.target_handle; } - target_obj_priv = to_intel_bo(target_obj); + target_offset = to_intel_bo(target_obj)->gtt_offset; #if WATCH_RELOC DRM_INFO("%s: obj %p offset %08x target %d " @@ -3358,136 +3297,202 @@ i915_gem_object_pin_and_relocate(struct drm_gem_object *obj, "presumed %08x delta %08x\n", __func__, obj, - (int) reloc->offset, - (int) reloc->target_handle, - (int) reloc->read_domains, - (int) reloc->write_domain, - (int) target_obj_priv->gtt_offset, - (int) reloc->presumed_offset, - reloc->delta); + (int) reloc.offset, + (int) reloc.target_handle, + (int) reloc.read_domains, + (int) reloc.write_domain, + (int) target_offset, + (int) reloc.presumed_offset, + reloc.delta); #endif /* The target buffer should have appeared before us in the * exec_object list, so it should have a GTT space bound by now. */ - if (target_obj_priv->gtt_space == NULL) { + if (target_offset == 0) { DRM_ERROR("No GTT space found for object %d\n", - reloc->target_handle); - drm_gem_object_unreference(target_obj); - i915_gem_object_unpin(obj); - return -EINVAL; + reloc.target_handle); + ret = -EINVAL; + break; } /* Validate that the target is in a valid r/w GPU domain */ - if (reloc->write_domain & (reloc->write_domain - 1)) { + if (reloc.write_domain & (reloc.write_domain - 1)) { DRM_ERROR("reloc with multiple write domains: " "obj %p target %d offset %d " "read %08x write %08x", - obj, reloc->target_handle, - (int) reloc->offset, - reloc->read_domains, - reloc->write_domain); - drm_gem_object_unreference(target_obj); - i915_gem_object_unpin(obj); - return -EINVAL; + obj, reloc.target_handle, + (int) reloc.offset, + reloc.read_domains, + reloc.write_domain); + ret = -EINVAL; + break; } - if (reloc->write_domain & I915_GEM_DOMAIN_CPU || - reloc->read_domains & I915_GEM_DOMAIN_CPU) { + if (reloc.write_domain & I915_GEM_DOMAIN_CPU || + reloc.read_domains & I915_GEM_DOMAIN_CPU) { DRM_ERROR("reloc with read/write CPU domains: " "obj %p target %d offset %d " "read %08x write %08x", - obj, reloc->target_handle, - (int) reloc->offset, - reloc->read_domains, - reloc->write_domain); - drm_gem_object_unreference(target_obj); - i915_gem_object_unpin(obj); - return -EINVAL; + obj, reloc.target_handle, + (int) reloc.offset, + reloc.read_domains, + reloc.write_domain); + ret = -EINVAL; + break; } - if (reloc->write_domain && target_obj->pending_write_domain && - reloc->write_domain != target_obj->pending_write_domain) { + if (reloc.write_domain && target_obj->pending_write_domain && + reloc.write_domain != target_obj->pending_write_domain) { DRM_ERROR("Write domain conflict: " "obj %p target %d offset %d " "new %08x old %08x\n", - obj, reloc->target_handle, - (int) reloc->offset, - reloc->write_domain, + obj, reloc.target_handle, + (int) reloc.offset, + reloc.write_domain, target_obj->pending_write_domain); - drm_gem_object_unreference(target_obj); - i915_gem_object_unpin(obj); - return -EINVAL; + ret = -EINVAL; + break; } - target_obj->pending_read_domains |= reloc->read_domains; - target_obj->pending_write_domain |= reloc->write_domain; + target_obj->pending_read_domains |= reloc.read_domains; + target_obj->pending_write_domain |= reloc.write_domain; /* If the relocation already has the right value in it, no * more work needs to be done. */ - if (target_obj_priv->gtt_offset == reloc->presumed_offset) { - drm_gem_object_unreference(target_obj); + if (target_offset == reloc.presumed_offset) continue; - } /* Check that the relocation address is valid... */ - if (reloc->offset > obj->size - 4) { + if (reloc.offset > obj->base.size - 4) { DRM_ERROR("Relocation beyond object bounds: " "obj %p target %d offset %d size %d.\n", - obj, reloc->target_handle, - (int) reloc->offset, (int) obj->size); - drm_gem_object_unreference(target_obj); - i915_gem_object_unpin(obj); - return -EINVAL; + obj, reloc.target_handle, + (int) reloc.offset, (int) obj->base.size); + ret = -EINVAL; + break; } - if (reloc->offset & 3) { + if (reloc.offset & 3) { DRM_ERROR("Relocation not 4-byte aligned: " "obj %p target %d offset %d.\n", - obj, reloc->target_handle, - (int) reloc->offset); - drm_gem_object_unreference(target_obj); - i915_gem_object_unpin(obj); - return -EINVAL; + obj, reloc.target_handle, + (int) reloc.offset); + ret = -EINVAL; + break; } /* and points to somewhere within the target object. */ - if (reloc->delta >= target_obj->size) { + if (reloc.delta >= target_obj->size) { DRM_ERROR("Relocation beyond target object bounds: " "obj %p target %d delta %d size %d.\n", - obj, reloc->target_handle, - (int) reloc->delta, (int) target_obj->size); - drm_gem_object_unreference(target_obj); - i915_gem_object_unpin(obj); - return -EINVAL; + obj, reloc.target_handle, + (int) reloc.delta, (int) target_obj->size); + ret = -EINVAL; + break; } - ret = i915_gem_object_set_to_gtt_domain(obj, 1); - if (ret != 0) { - drm_gem_object_unreference(target_obj); - i915_gem_object_unpin(obj); - return ret; + reloc.delta += target_offset; + if (obj->base.write_domain == I915_GEM_DOMAIN_CPU) { + uint32_t page_offset = reloc.offset & ~PAGE_MASK; + char *vaddr; + + vaddr = kmap_atomic(obj->pages[reloc.offset >> PAGE_SHIFT], KM_USER0); + *(uint32_t *)(vaddr + page_offset) = reloc.delta; + kunmap_atomic(vaddr, KM_USER0); + } else { + uint32_t __iomem *reloc_entry; + void __iomem *reloc_page; + + ret = i915_gem_object_set_to_gtt_domain(&obj->base, 1); + if (ret) + break; + + /* Map the page containing the relocation we're going to perform. */ + reloc.offset += obj->gtt_offset; + reloc_page = io_mapping_map_atomic_wc(dev_priv->mm.gtt_mapping, + reloc.offset & PAGE_MASK, + KM_USER0); + reloc_entry = (uint32_t __iomem *) + (reloc_page + (reloc.offset & ~PAGE_MASK)); + iowrite32(reloc.delta, reloc_entry); + io_mapping_unmap_atomic(reloc_page, KM_USER0); } - /* Map the page containing the relocation we're going to - * perform. - */ - reloc_offset = obj_priv->gtt_offset + reloc->offset; - reloc_page = io_mapping_map_atomic_wc(dev_priv->mm.gtt_mapping, - (reloc_offset & - ~(PAGE_SIZE - 1)), - KM_USER0); - reloc_entry = (uint32_t __iomem *)(reloc_page + - (reloc_offset & (PAGE_SIZE - 1))); - reloc_val = target_obj_priv->gtt_offset + reloc->delta; - - writel(reloc_val, reloc_entry); - io_mapping_unmap_atomic(reloc_page, KM_USER0); - - /* The updated presumed offset for this entry will be - * copied back out to the user. - */ - reloc->presumed_offset = target_obj_priv->gtt_offset; + /* and update the user's relocation entry */ + reloc.presumed_offset = target_offset; + if (__copy_to_user_inatomic(&user_relocs[i].presumed_offset, + &reloc.presumed_offset, + sizeof(reloc.presumed_offset))) { + ret = -EFAULT; + break; + } + } + + drm_gem_object_unreference(target_obj); + return ret; +} + +static int +i915_gem_execbuffer_pin(struct drm_device *dev, + struct drm_file *file, + struct drm_gem_object **object_list, + struct drm_i915_gem_exec_object2 *exec_list, + int count) +{ + struct drm_i915_private *dev_priv = dev->dev_private; + int ret, i, retry; + + /* attempt to pin all of the buffers into the GTT */ + for (retry = 0; retry < 2; retry++) { + ret = 0; + for (i = 0; i < count; i++) { + struct drm_i915_gem_exec_object2 *entry = &exec_list[i]; + struct drm_i915_gem_object *obj= to_intel_bo(object_list[i]); + bool need_fence = + entry->flags & EXEC_OBJECT_NEEDS_FENCE && + obj->tiling_mode != I915_TILING_NONE; + + /* Check fence reg constraints and rebind if necessary */ + if (need_fence && + !i915_gem_object_fence_offset_ok(&obj->base, + obj->tiling_mode)) { + ret = i915_gem_object_unbind(&obj->base); + if (ret) + break; + } + + ret = i915_gem_object_pin(&obj->base, entry->alignment); + if (ret) + break; + + /* + * Pre-965 chips need a fence register set up in order + * to properly handle blits to/from tiled surfaces. + */ + if (need_fence) { + ret = i915_gem_object_get_fence_reg(&obj->base, true); + if (ret) { + i915_gem_object_unpin(&obj->base); + break; + } - drm_gem_object_unreference(target_obj); + dev_priv->fence_regs[obj->fence_reg].gpu = true; + } + + entry->offset = obj->gtt_offset; + } + + while (i--) + i915_gem_object_unpin(object_list[i]); + + if (ret == 0) + break; + + if (ret != -ENOSPC || retry) + return ret; + + ret = i915_gem_evict_everything(dev); + if (ret) + return ret; } return 0; @@ -3551,86 +3556,8 @@ i915_gem_ring_throttle(struct drm_device *dev, struct drm_file *file) } static int -i915_gem_get_relocs_from_user(struct drm_i915_gem_exec_object2 *exec_list, - uint32_t buffer_count, - struct drm_i915_gem_relocation_entry **relocs) -{ - uint32_t reloc_count = 0, reloc_index = 0, i; - int ret; - - *relocs = NULL; - for (i = 0; i < buffer_count; i++) { - if (reloc_count + exec_list[i].relocation_count < reloc_count) - return -EINVAL; - reloc_count += exec_list[i].relocation_count; - } - - *relocs = drm_calloc_large(reloc_count, sizeof(**relocs)); - if (*relocs == NULL) { - DRM_ERROR("failed to alloc relocs, count %d\n", reloc_count); - return -ENOMEM; - } - - for (i = 0; i < buffer_count; i++) { - struct drm_i915_gem_relocation_entry __user *user_relocs; - - user_relocs = (void __user *)(uintptr_t)exec_list[i].relocs_ptr; - - ret = copy_from_user(&(*relocs)[reloc_index], - user_relocs, - exec_list[i].relocation_count * - sizeof(**relocs)); - if (ret != 0) { - drm_free_large(*relocs); - *relocs = NULL; - return -EFAULT; - } - - reloc_index += exec_list[i].relocation_count; - } - - return 0; -} - -static int -i915_gem_put_relocs_to_user(struct drm_i915_gem_exec_object2 *exec_list, - uint32_t buffer_count, - struct drm_i915_gem_relocation_entry *relocs) -{ - uint32_t reloc_count = 0, i; - int ret = 0; - - if (relocs == NULL) - return 0; - - for (i = 0; i < buffer_count; i++) { - struct drm_i915_gem_relocation_entry __user *user_relocs; - int unwritten; - - user_relocs = (void __user *)(uintptr_t)exec_list[i].relocs_ptr; - - unwritten = copy_to_user(user_relocs, - &relocs[reloc_count], - exec_list[i].relocation_count * - sizeof(*relocs)); - - if (unwritten) { - ret = -EFAULT; - goto err; - } - - reloc_count += exec_list[i].relocation_count; - } - -err: - drm_free_large(relocs); - - return ret; -} - -static int -i915_gem_check_execbuffer (struct drm_i915_gem_execbuffer2 *exec, - uint64_t exec_offset) +i915_gem_check_execbuffer(struct drm_i915_gem_execbuffer2 *exec, + uint64_t exec_offset) { uint32_t exec_start, exec_len; @@ -3647,43 +3574,32 @@ i915_gem_check_execbuffer (struct drm_i915_gem_execbuffer2 *exec, } static int -i915_gem_wait_for_pending_flip(struct drm_device *dev, - struct drm_gem_object **object_list, - int count) +validate_exec_list(struct drm_i915_gem_exec_object2 *exec, + int count) { - drm_i915_private_t *dev_priv = dev->dev_private; - struct drm_i915_gem_object *obj_priv; - DEFINE_WAIT(wait); - int i, ret = 0; + int i; - for (;;) { - prepare_to_wait(&dev_priv->pending_flip_queue, - &wait, TASK_INTERRUPTIBLE); - for (i = 0; i < count; i++) { - obj_priv = to_intel_bo(object_list[i]); - if (atomic_read(&obj_priv->pending_flip) > 0) - break; - } - if (i == count) - break; + for (i = 0; i < count; i++) { + char __user *ptr = (char __user *)(uintptr_t)exec[i].relocs_ptr; + size_t length = exec[i].relocation_count * sizeof(struct drm_i915_gem_relocation_entry); - if (!signal_pending(current)) { - mutex_unlock(&dev->struct_mutex); - schedule(); - mutex_lock(&dev->struct_mutex); - continue; - } - ret = -ERESTARTSYS; - break; + if (!access_ok(VERIFY_READ, ptr, length)) + return -EFAULT; + + /* we may also need to update the presumed offsets */ + if (!access_ok(VERIFY_WRITE, ptr, length)) + return -EFAULT; + + if (fault_in_pages_readable(ptr, length)) + return -EFAULT; } - finish_wait(&dev_priv->pending_flip_queue, &wait); - return ret; + return 0; } static int i915_gem_do_execbuffer(struct drm_device *dev, void *data, - struct drm_file *file_priv, + struct drm_file *file, struct drm_i915_gem_execbuffer2 *args, struct drm_i915_gem_exec_object2 *exec_list) { @@ -3692,12 +3608,9 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data, struct drm_gem_object *batch_obj; struct drm_i915_gem_object *obj_priv; struct drm_clip_rect *cliprects = NULL; - struct drm_i915_gem_relocation_entry *relocs = NULL; struct drm_i915_gem_request *request = NULL; - int ret, ret2, i, pinned = 0; + int ret, i, flips; uint64_t exec_offset; - uint32_t reloc_index; - int pin_tries, flips; struct intel_ring_buffer *ring = NULL; @@ -3705,18 +3618,37 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data, if (ret) return ret; + ret = validate_exec_list(exec_list, args->buffer_count); + if (ret) + return ret; + #if WATCH_EXEC DRM_INFO("buffers_ptr %d buffer_count %d len %08x\n", (int) args->buffers_ptr, args->buffer_count, args->batch_len); #endif - if (args->flags & I915_EXEC_BSD) { + switch (args->flags & I915_EXEC_RING_MASK) { + case I915_EXEC_DEFAULT: + case I915_EXEC_RENDER: + ring = &dev_priv->render_ring; + break; + case I915_EXEC_BSD: if (!HAS_BSD(dev)) { - DRM_ERROR("execbuf with wrong flag\n"); + DRM_ERROR("execbuf with invalid ring (BSD)\n"); return -EINVAL; } ring = &dev_priv->bsd_ring; - } else { - ring = &dev_priv->render_ring; + break; + case I915_EXEC_BLT: + if (!HAS_BLT(dev)) { + DRM_ERROR("execbuf with invalid ring (BLT)\n"); + return -EINVAL; + } + ring = &dev_priv->blt_ring; + break; + default: + DRM_ERROR("execbuf with unknown ring: %d\n", + (int)(args->flags & I915_EXEC_RING_MASK)); + return -EINVAL; } if (args->buffer_count < 1) { @@ -3757,11 +3689,6 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data, goto pre_mutex_err; } - ret = i915_gem_get_relocs_from_user(exec_list, args->buffer_count, - &relocs); - if (ret != 0) - goto pre_mutex_err; - ret = i915_mutex_lock_interruptible(dev); if (ret) goto pre_mutex_err; @@ -3773,9 +3700,8 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data, } /* Look up object handles */ - flips = 0; for (i = 0; i < args->buffer_count; i++) { - object_list[i] = drm_gem_object_lookup(dev, file_priv, + object_list[i] = drm_gem_object_lookup(dev, file, exec_list[i].handle); if (object_list[i] == NULL) { DRM_ERROR("Invalid object handle %d at index %d\n", @@ -3796,76 +3722,22 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data, goto err; } obj_priv->in_execbuffer = true; - flips += atomic_read(&obj_priv->pending_flip); } - if (flips > 0) { - ret = i915_gem_wait_for_pending_flip(dev, object_list, - args->buffer_count); - if (ret) - goto err; - } - - /* Pin and relocate */ - for (pin_tries = 0; ; pin_tries++) { - ret = 0; - reloc_index = 0; - - for (i = 0; i < args->buffer_count; i++) { - object_list[i]->pending_read_domains = 0; - object_list[i]->pending_write_domain = 0; - ret = i915_gem_object_pin_and_relocate(object_list[i], - file_priv, - &exec_list[i], - &relocs[reloc_index]); - if (ret) - break; - pinned = i + 1; - reloc_index += exec_list[i].relocation_count; - } - /* success */ - if (ret == 0) - break; - - /* error other than GTT full, or we've already tried again */ - if (ret != -ENOSPC || pin_tries >= 1) { - if (ret != -ERESTARTSYS) { - unsigned long long total_size = 0; - int num_fences = 0; - for (i = 0; i < args->buffer_count; i++) { - obj_priv = to_intel_bo(object_list[i]); - - total_size += object_list[i]->size; - num_fences += - exec_list[i].flags & EXEC_OBJECT_NEEDS_FENCE && - obj_priv->tiling_mode != I915_TILING_NONE; - } - DRM_ERROR("Failed to pin buffer %d of %d, total %llu bytes, %d fences: %d\n", - pinned+1, args->buffer_count, - total_size, num_fences, - ret); - DRM_ERROR("%u objects [%u pinned, %u GTT], " - "%zu object bytes [%zu pinned], " - "%zu /%zu gtt bytes\n", - dev_priv->mm.object_count, - dev_priv->mm.pin_count, - dev_priv->mm.gtt_count, - dev_priv->mm.object_memory, - dev_priv->mm.pin_memory, - dev_priv->mm.gtt_memory, - dev_priv->mm.gtt_total); - } - goto err; - } - - /* unpin all of our buffers */ - for (i = 0; i < pinned; i++) - i915_gem_object_unpin(object_list[i]); - pinned = 0; + /* Move the objects en-masse into the GTT, evicting if necessary. */ + ret = i915_gem_execbuffer_pin(dev, file, + object_list, exec_list, + args->buffer_count); + if (ret) + goto err; - /* evict everyone we can from the aperture */ - ret = i915_gem_evict_everything(dev); - if (ret && ret != -ENOSPC) + /* The objects are in their final locations, apply the relocations. */ + for (i = 0; i < args->buffer_count; i++) { + struct drm_i915_gem_object *obj = to_intel_bo(object_list[i]); + obj->base.pending_read_domains = 0; + obj->base.pending_write_domain = 0; + ret = i915_gem_execbuffer_relocate(obj, file, &exec_list[i]); + if (ret) goto err; } @@ -3878,9 +3750,9 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data, } batch_obj->pending_read_domains |= I915_GEM_DOMAIN_COMMAND; - /* Sanity check the batch buffer, prior to moving objects */ - exec_offset = exec_list[args->buffer_count - 1].offset; - ret = i915_gem_check_execbuffer (args, exec_offset); + /* Sanity check the batch buffer */ + exec_offset = to_intel_bo(batch_obj)->gtt_offset; + ret = i915_gem_check_execbuffer(args, exec_offset); if (ret != 0) { DRM_ERROR("execbuf with invalid offset/length\n"); goto err; @@ -3898,7 +3770,7 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data, struct drm_gem_object *obj = object_list[i]; /* Compute new gpu domains and update invalidate/flush */ - i915_gem_object_set_to_gpu_domain(obj); + i915_gem_object_set_to_gpu_domain(obj, ring); } if (dev->invalidate_domains | dev->flush_domains) { @@ -3908,7 +3780,7 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data, dev->invalidate_domains, dev->flush_domains); #endif - i915_gem_flush(dev, file_priv, + i915_gem_flush(dev, file, dev->invalidate_domains, dev->flush_domains, dev_priv->mm.flush_rings); @@ -3916,14 +3788,8 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data, for (i = 0; i < args->buffer_count; i++) { struct drm_gem_object *obj = object_list[i]; - struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); uint32_t old_write_domain = obj->write_domain; - obj->write_domain = obj->pending_write_domain; - if (obj->write_domain) - list_move_tail(&obj_priv->gpu_write_list, - &dev_priv->mm.gpu_write_list); - trace_i915_gem_object_change_domain(obj, obj->read_domains, old_write_domain); @@ -3943,9 +3809,38 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data, ~0); #endif + /* Check for any pending flips. As we only maintain a flip queue depth + * of 1, we can simply insert a WAIT for the next display flip prior + * to executing the batch and avoid stalling the CPU. + */ + flips = 0; + for (i = 0; i < args->buffer_count; i++) { + if (object_list[i]->write_domain) + flips |= atomic_read(&to_intel_bo(object_list[i])->pending_flip); + } + if (flips) { + int plane, flip_mask; + + for (plane = 0; flips >> plane; plane++) { + if (((flips >> plane) & 1) == 0) + continue; + + if (plane) + flip_mask = MI_WAIT_FOR_PLANE_B_FLIP; + else + flip_mask = MI_WAIT_FOR_PLANE_A_FLIP; + + intel_ring_begin(dev, ring, 2); + intel_ring_emit(dev, ring, + MI_WAIT_FOR_EVENT | flip_mask); + intel_ring_emit(dev, ring, MI_NOOP); + intel_ring_advance(dev, ring); + } + } + /* Exec the batchbuffer */ ret = ring->dispatch_gem_execbuffer(dev, ring, args, - cliprects, exec_offset); + cliprects, exec_offset); if (ret) { DRM_ERROR("dispatch failed %d\n", ret); goto err; @@ -3959,18 +3854,17 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data, for (i = 0; i < args->buffer_count; i++) { struct drm_gem_object *obj = object_list[i]; - obj_priv = to_intel_bo(obj); i915_gem_object_move_to_active(obj, ring); + if (obj->write_domain) + list_move_tail(&to_intel_bo(obj)->gpu_write_list, + &ring->gpu_write_list); } - i915_add_request(dev, file_priv, request, ring); + i915_add_request(dev, file, request, ring); request = NULL; err: - for (i = 0; i < pinned; i++) - i915_gem_object_unpin(object_list[i]); - for (i = 0; i < args->buffer_count; i++) { if (object_list[i]) { obj_priv = to_intel_bo(object_list[i]); @@ -3982,20 +3876,6 @@ err: mutex_unlock(&dev->struct_mutex); pre_mutex_err: - /* Copy the updated relocations out regardless of current error - * state. Failure to update the relocs would mean that the next - * time userland calls execbuf, it would do so with presumed offset - * state that didn't match the actual object state. - */ - ret2 = i915_gem_put_relocs_to_user(exec_list, args->buffer_count, - relocs); - if (ret2 != 0) { - DRM_ERROR("Failed to copy relocations back out: %d\n", ret2); - - if (ret == 0) - ret = ret2; - } - drm_free_large(object_list); kfree(cliprects); kfree(request); @@ -4187,7 +4067,7 @@ i915_gem_object_pin(struct drm_gem_object *obj, uint32_t alignment) if (obj_priv->pin_count == 1) { i915_gem_info_add_pin(dev_priv, obj->size); if (!obj_priv->active) - list_move_tail(&obj_priv->list, + list_move_tail(&obj_priv->mm_list, &dev_priv->mm.pinned_list); } @@ -4213,7 +4093,7 @@ i915_gem_object_unpin(struct drm_gem_object *obj) */ if (obj_priv->pin_count == 0) { if (!obj_priv->active) - list_move_tail(&obj_priv->list, + list_move_tail(&obj_priv->mm_list, &dev_priv->mm.inactive_list); i915_gem_info_remove_pin(dev_priv, obj->size); } @@ -4229,44 +4109,36 @@ i915_gem_pin_ioctl(struct drm_device *dev, void *data, struct drm_i915_gem_object *obj_priv; int ret; + ret = i915_mutex_lock_interruptible(dev); + if (ret) + return ret; + obj = drm_gem_object_lookup(dev, file_priv, args->handle); if (obj == NULL) { - DRM_ERROR("Bad handle in i915_gem_pin_ioctl(): %d\n", - args->handle); - return -ENOENT; + ret = -ENOENT; + goto unlock; } obj_priv = to_intel_bo(obj); - ret = i915_mutex_lock_interruptible(dev); - if (ret) { - drm_gem_object_unreference_unlocked(obj); - return ret; - } - if (obj_priv->madv != I915_MADV_WILLNEED) { DRM_ERROR("Attempting to pin a purgeable buffer\n"); - drm_gem_object_unreference(obj); - mutex_unlock(&dev->struct_mutex); - return -EINVAL; + ret = -EINVAL; + goto out; } if (obj_priv->pin_filp != NULL && obj_priv->pin_filp != file_priv) { DRM_ERROR("Already pinned in i915_gem_pin_ioctl(): %d\n", args->handle); - drm_gem_object_unreference(obj); - mutex_unlock(&dev->struct_mutex); - return -EINVAL; + ret = -EINVAL; + goto out; } obj_priv->user_pin_count++; obj_priv->pin_filp = file_priv; if (obj_priv->user_pin_count == 1) { ret = i915_gem_object_pin(obj, args->alignment); - if (ret != 0) { - drm_gem_object_unreference(obj); - mutex_unlock(&dev->struct_mutex); - return ret; - } + if (ret) + goto out; } /* XXX - flush the CPU caches for pinned objects @@ -4274,10 +4146,11 @@ i915_gem_pin_ioctl(struct drm_device *dev, void *data, */ i915_gem_object_flush_cpu_write_domain(obj); args->offset = obj_priv->gtt_offset; +out: drm_gem_object_unreference(obj); +unlock: mutex_unlock(&dev->struct_mutex); - - return 0; + return ret; } int @@ -4289,27 +4162,22 @@ i915_gem_unpin_ioctl(struct drm_device *dev, void *data, struct drm_i915_gem_object *obj_priv; int ret; + ret = i915_mutex_lock_interruptible(dev); + if (ret) + return ret; + obj = drm_gem_object_lookup(dev, file_priv, args->handle); if (obj == NULL) { - DRM_ERROR("Bad handle in i915_gem_unpin_ioctl(): %d\n", - args->handle); - return -ENOENT; + ret = -ENOENT; + goto unlock; } - obj_priv = to_intel_bo(obj); - ret = i915_mutex_lock_interruptible(dev); - if (ret) { - drm_gem_object_unreference_unlocked(obj); - return ret; - } - if (obj_priv->pin_filp != file_priv) { DRM_ERROR("Not pinned by caller in i915_gem_pin_ioctl(): %d\n", args->handle); - drm_gem_object_unreference(obj); - mutex_unlock(&dev->struct_mutex); - return -EINVAL; + ret = -EINVAL; + goto out; } obj_priv->user_pin_count--; if (obj_priv->user_pin_count == 0) { @@ -4317,9 +4185,11 @@ i915_gem_unpin_ioctl(struct drm_device *dev, void *data, i915_gem_object_unpin(obj); } +out: drm_gem_object_unreference(obj); +unlock: mutex_unlock(&dev->struct_mutex); - return 0; + return ret; } int @@ -4331,25 +4201,22 @@ i915_gem_busy_ioctl(struct drm_device *dev, void *data, struct drm_i915_gem_object *obj_priv; int ret; - obj = drm_gem_object_lookup(dev, file_priv, args->handle); - if (obj == NULL) { - DRM_ERROR("Bad handle in i915_gem_busy_ioctl(): %d\n", - args->handle); - return -ENOENT; - } - ret = i915_mutex_lock_interruptible(dev); - if (ret) { - drm_gem_object_unreference_unlocked(obj); + if (ret) return ret; + + obj = drm_gem_object_lookup(dev, file_priv, args->handle); + if (obj == NULL) { + ret = -ENOENT; + goto unlock; } + obj_priv = to_intel_bo(obj); /* Count all active objects as busy, even if they are currently not used * by the gpu. Users of this interface expect objects to eventually * become non-busy without any further actions, therefore emit any * necessary flushes here. */ - obj_priv = to_intel_bo(obj); args->busy = obj_priv->active; if (args->busy) { /* Unconditionally flush objects, even when the gpu still uses this @@ -4373,8 +4240,9 @@ i915_gem_busy_ioctl(struct drm_device *dev, void *data, } drm_gem_object_unreference(obj); +unlock: mutex_unlock(&dev->struct_mutex); - return 0; + return ret; } int @@ -4401,26 +4269,20 @@ i915_gem_madvise_ioctl(struct drm_device *dev, void *data, return -EINVAL; } + ret = i915_mutex_lock_interruptible(dev); + if (ret) + return ret; + obj = drm_gem_object_lookup(dev, file_priv, args->handle); if (obj == NULL) { - DRM_ERROR("Bad handle in i915_gem_madvise_ioctl(): %d\n", - args->handle); - return -ENOENT; + ret = -ENOENT; + goto unlock; } obj_priv = to_intel_bo(obj); - ret = i915_mutex_lock_interruptible(dev); - if (ret) { - drm_gem_object_unreference_unlocked(obj); - return ret; - } - if (obj_priv->pin_count) { - drm_gem_object_unreference(obj); - mutex_unlock(&dev->struct_mutex); - - DRM_ERROR("Attempted i915_gem_madvise_ioctl() on a pinned object\n"); - return -EINVAL; + ret = -EINVAL; + goto out; } if (obj_priv->madv != __I915_MADV_PURGED) @@ -4433,10 +4295,11 @@ i915_gem_madvise_ioctl(struct drm_device *dev, void *data, args->retained = obj_priv->madv != __I915_MADV_PURGED; +out: drm_gem_object_unreference(obj); +unlock: mutex_unlock(&dev->struct_mutex); - - return 0; + return ret; } struct drm_gem_object * i915_gem_alloc_object(struct drm_device *dev, @@ -4462,12 +4325,11 @@ struct drm_gem_object * i915_gem_alloc_object(struct drm_device *dev, obj->agp_type = AGP_USER_MEMORY; obj->base.driver_private = NULL; obj->fence_reg = I915_FENCE_REG_NONE; - INIT_LIST_HEAD(&obj->list); + INIT_LIST_HEAD(&obj->mm_list); + INIT_LIST_HEAD(&obj->ring_list); INIT_LIST_HEAD(&obj->gpu_write_list); obj->madv = I915_MADV_WILLNEED; - trace_i915_gem_object_create(&obj->base); - return &obj->base; } @@ -4487,7 +4349,7 @@ static void i915_gem_free_object_tail(struct drm_gem_object *obj) ret = i915_gem_object_unbind(obj); if (ret == -ERESTARTSYS) { - list_move(&obj_priv->list, + list_move(&obj_priv->mm_list, &dev_priv->mm.deferred_free_list); return; } @@ -4527,10 +4389,7 @@ i915_gem_idle(struct drm_device *dev) mutex_lock(&dev->struct_mutex); - if (dev_priv->mm.suspended || - (dev_priv->render_ring.gem_object == NULL) || - (HAS_BSD(dev) && - dev_priv->bsd_ring.gem_object == NULL)) { + if (dev_priv->mm.suspended) { mutex_unlock(&dev->struct_mutex); return 0; } @@ -4651,10 +4510,18 @@ i915_gem_init_ringbuffer(struct drm_device *dev) goto cleanup_render_ring; } + if (HAS_BLT(dev)) { + ret = intel_init_blt_ring_buffer(dev); + if (ret) + goto cleanup_bsd_ring; + } + dev_priv->next_seqno = 1; return 0; +cleanup_bsd_ring: + intel_cleanup_ring_buffer(dev, &dev_priv->bsd_ring); cleanup_render_ring: intel_cleanup_ring_buffer(dev, &dev_priv->render_ring); cleanup_pipe_control: @@ -4669,8 +4536,8 @@ i915_gem_cleanup_ringbuffer(struct drm_device *dev) drm_i915_private_t *dev_priv = dev->dev_private; intel_cleanup_ring_buffer(dev, &dev_priv->render_ring); - if (HAS_BSD(dev)) - intel_cleanup_ring_buffer(dev, &dev_priv->bsd_ring); + intel_cleanup_ring_buffer(dev, &dev_priv->bsd_ring); + intel_cleanup_ring_buffer(dev, &dev_priv->blt_ring); if (HAS_PIPE_CONTROL(dev)) i915_gem_cleanup_pipe_control(dev); } @@ -4699,12 +4566,15 @@ i915_gem_entervt_ioctl(struct drm_device *dev, void *data, return ret; } + BUG_ON(!list_empty(&dev_priv->mm.active_list)); BUG_ON(!list_empty(&dev_priv->render_ring.active_list)); - BUG_ON(HAS_BSD(dev) && !list_empty(&dev_priv->bsd_ring.active_list)); + BUG_ON(!list_empty(&dev_priv->bsd_ring.active_list)); + BUG_ON(!list_empty(&dev_priv->blt_ring.active_list)); BUG_ON(!list_empty(&dev_priv->mm.flushing_list)); BUG_ON(!list_empty(&dev_priv->mm.inactive_list)); BUG_ON(!list_empty(&dev_priv->render_ring.request_list)); - BUG_ON(HAS_BSD(dev) && !list_empty(&dev_priv->bsd_ring.request_list)); + BUG_ON(!list_empty(&dev_priv->bsd_ring.request_list)); + BUG_ON(!list_empty(&dev_priv->blt_ring.request_list)); mutex_unlock(&dev->struct_mutex); ret = drm_irq_install(dev); @@ -4746,24 +4616,29 @@ i915_gem_lastclose(struct drm_device *dev) DRM_ERROR("failed to idle hardware: %d\n", ret); } +static void +init_ring_lists(struct intel_ring_buffer *ring) +{ + INIT_LIST_HEAD(&ring->active_list); + INIT_LIST_HEAD(&ring->request_list); + INIT_LIST_HEAD(&ring->gpu_write_list); +} + void i915_gem_load(struct drm_device *dev) { int i; drm_i915_private_t *dev_priv = dev->dev_private; + INIT_LIST_HEAD(&dev_priv->mm.active_list); INIT_LIST_HEAD(&dev_priv->mm.flushing_list); - INIT_LIST_HEAD(&dev_priv->mm.gpu_write_list); INIT_LIST_HEAD(&dev_priv->mm.inactive_list); INIT_LIST_HEAD(&dev_priv->mm.pinned_list); INIT_LIST_HEAD(&dev_priv->mm.fence_list); INIT_LIST_HEAD(&dev_priv->mm.deferred_free_list); - INIT_LIST_HEAD(&dev_priv->render_ring.active_list); - INIT_LIST_HEAD(&dev_priv->render_ring.request_list); - if (HAS_BSD(dev)) { - INIT_LIST_HEAD(&dev_priv->bsd_ring.active_list); - INIT_LIST_HEAD(&dev_priv->bsd_ring.request_list); - } + init_ring_lists(&dev_priv->render_ring); + init_ring_lists(&dev_priv->bsd_ring); + init_ring_lists(&dev_priv->blt_ring); for (i = 0; i < 16; i++) INIT_LIST_HEAD(&dev_priv->fence_regs[i].lru_list); INIT_DELAYED_WORK(&dev_priv->mm.retire_work, @@ -5026,9 +4901,9 @@ i915_gpu_is_active(struct drm_device *dev) int lists_empty; lists_empty = list_empty(&dev_priv->mm.flushing_list) && - list_empty(&dev_priv->render_ring.active_list); - if (HAS_BSD(dev)) - lists_empty &= list_empty(&dev_priv->bsd_ring.active_list); + list_empty(&dev_priv->render_ring.active_list) && + list_empty(&dev_priv->bsd_ring.active_list) && + list_empty(&dev_priv->blt_ring.active_list); return !lists_empty; } @@ -5050,7 +4925,7 @@ i915_gem_shrink(struct shrinker *shrink, int nr_to_scan, gfp_t gfp_mask) if (mutex_trylock(&dev->struct_mutex)) { list_for_each_entry(obj_priv, &dev_priv->mm.inactive_list, - list) + mm_list) cnt++; mutex_unlock(&dev->struct_mutex); } @@ -5076,7 +4951,7 @@ rescan: list_for_each_entry_safe(obj_priv, next_obj, &dev_priv->mm.inactive_list, - list) { + mm_list) { if (i915_gem_object_is_purgeable(obj_priv)) { i915_gem_object_unbind(&obj_priv->base); if (--nr_to_scan <= 0) @@ -5105,7 +4980,7 @@ rescan: list_for_each_entry_safe(obj_priv, next_obj, &dev_priv->mm.inactive_list, - list) { + mm_list) { if (nr_to_scan > 0) { i915_gem_object_unbind(&obj_priv->base); nr_to_scan--; diff --git a/drivers/gpu/drm/i915/i915_gem_evict.c b/drivers/gpu/drm/i915/i915_gem_evict.c index 3d7fbf32bb18..43a4013f53fa 100644 --- a/drivers/gpu/drm/i915/i915_gem_evict.c +++ b/drivers/gpu/drm/i915/i915_gem_evict.c @@ -31,49 +31,6 @@ #include "i915_drv.h" #include "i915_drm.h" -static struct drm_i915_gem_object * -i915_gem_next_active_object(struct drm_device *dev, - struct list_head **render_iter, - struct list_head **bsd_iter) -{ - drm_i915_private_t *dev_priv = dev->dev_private; - struct drm_i915_gem_object *render_obj = NULL, *bsd_obj = NULL; - - if (*render_iter != &dev_priv->render_ring.active_list) - render_obj = list_entry(*render_iter, - struct drm_i915_gem_object, - list); - - if (HAS_BSD(dev)) { - if (*bsd_iter != &dev_priv->bsd_ring.active_list) - bsd_obj = list_entry(*bsd_iter, - struct drm_i915_gem_object, - list); - - if (render_obj == NULL) { - *bsd_iter = (*bsd_iter)->next; - return bsd_obj; - } - - if (bsd_obj == NULL) { - *render_iter = (*render_iter)->next; - return render_obj; - } - - /* XXX can we handle seqno wrapping? */ - if (render_obj->last_rendering_seqno < bsd_obj->last_rendering_seqno) { - *render_iter = (*render_iter)->next; - return render_obj; - } else { - *bsd_iter = (*bsd_iter)->next; - return bsd_obj; - } - } else { - *render_iter = (*render_iter)->next; - return render_obj; - } -} - static bool mark_free(struct drm_i915_gem_object *obj_priv, struct list_head *unwind) @@ -83,18 +40,12 @@ mark_free(struct drm_i915_gem_object *obj_priv, return drm_mm_scan_add_block(obj_priv->gtt_space); } -#define i915_for_each_active_object(OBJ, R, B) \ - *(R) = dev_priv->render_ring.active_list.next; \ - *(B) = dev_priv->bsd_ring.active_list.next; \ - while (((OBJ) = i915_gem_next_active_object(dev, (R), (B))) != NULL) - int i915_gem_evict_something(struct drm_device *dev, int min_size, unsigned alignment) { drm_i915_private_t *dev_priv = dev->dev_private; struct list_head eviction_list, unwind_list; struct drm_i915_gem_object *obj_priv; - struct list_head *render_iter, *bsd_iter; int ret = 0; i915_gem_retire_requests(dev); @@ -131,13 +82,13 @@ i915_gem_evict_something(struct drm_device *dev, int min_size, unsigned alignmen drm_mm_init_scan(&dev_priv->mm.gtt_space, min_size, alignment); /* First see if there is a large enough contiguous idle region... */ - list_for_each_entry(obj_priv, &dev_priv->mm.inactive_list, list) { + list_for_each_entry(obj_priv, &dev_priv->mm.inactive_list, mm_list) { if (mark_free(obj_priv, &unwind_list)) goto found; } /* Now merge in the soon-to-be-expired objects... */ - i915_for_each_active_object(obj_priv, &render_iter, &bsd_iter) { + list_for_each_entry(obj_priv, &dev_priv->mm.active_list, mm_list) { /* Does the object require an outstanding flush? */ if (obj_priv->base.write_domain || obj_priv->pin_count) continue; @@ -147,14 +98,14 @@ i915_gem_evict_something(struct drm_device *dev, int min_size, unsigned alignmen } /* Finally add anything with a pending flush (in order of retirement) */ - list_for_each_entry(obj_priv, &dev_priv->mm.flushing_list, list) { + list_for_each_entry(obj_priv, &dev_priv->mm.flushing_list, mm_list) { if (obj_priv->pin_count) continue; if (mark_free(obj_priv, &unwind_list)) goto found; } - i915_for_each_active_object(obj_priv, &render_iter, &bsd_iter) { + list_for_each_entry(obj_priv, &dev_priv->mm.active_list, mm_list) { if (! obj_priv->base.write_domain || obj_priv->pin_count) continue; @@ -215,8 +166,8 @@ i915_gem_evict_everything(struct drm_device *dev) lists_empty = (list_empty(&dev_priv->mm.inactive_list) && list_empty(&dev_priv->mm.flushing_list) && list_empty(&dev_priv->render_ring.active_list) && - (!HAS_BSD(dev) - || list_empty(&dev_priv->bsd_ring.active_list))); + list_empty(&dev_priv->bsd_ring.active_list) && + list_empty(&dev_priv->blt_ring.active_list)); if (lists_empty) return -ENOSPC; @@ -234,8 +185,8 @@ i915_gem_evict_everything(struct drm_device *dev) lists_empty = (list_empty(&dev_priv->mm.inactive_list) && list_empty(&dev_priv->mm.flushing_list) && list_empty(&dev_priv->render_ring.active_list) && - (!HAS_BSD(dev) - || list_empty(&dev_priv->bsd_ring.active_list))); + list_empty(&dev_priv->bsd_ring.active_list) && + list_empty(&dev_priv->blt_ring.active_list)); BUG_ON(!lists_empty); return 0; @@ -253,7 +204,7 @@ i915_gem_evict_inactive(struct drm_device *dev) obj = &list_first_entry(&dev_priv->mm.inactive_list, struct drm_i915_gem_object, - list)->base; + mm_list)->base; ret = i915_gem_object_unbind(obj); if (ret != 0) { diff --git a/drivers/gpu/drm/i915/i915_gem_tiling.c b/drivers/gpu/drm/i915/i915_gem_tiling.c index 8c9ffc4768ee..af352de70be1 100644 --- a/drivers/gpu/drm/i915/i915_gem_tiling.c +++ b/drivers/gpu/drm/i915/i915_gem_tiling.c @@ -92,7 +92,7 @@ i915_gem_detect_bit_6_swizzle(struct drm_device *dev) uint32_t swizzle_x = I915_BIT_6_SWIZZLE_UNKNOWN; uint32_t swizzle_y = I915_BIT_6_SWIZZLE_UNKNOWN; - if (IS_IRONLAKE(dev) || IS_GEN6(dev)) { + if (IS_GEN5(dev) || IS_GEN6(dev)) { /* On Ironlake whatever DRAM config, GPU always do * same swizzling setup. */ diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c index 64c07c24e300..237b8bdb5994 100644 --- a/drivers/gpu/drm/i915/i915_irq.c +++ b/drivers/gpu/drm/i915/i915_irq.c @@ -293,13 +293,26 @@ static void i915_handle_rps_change(struct drm_device *dev) return; } +static void notify_ring(struct drm_device *dev, + struct intel_ring_buffer *ring) +{ + struct drm_i915_private *dev_priv = dev->dev_private; + u32 seqno = ring->get_seqno(dev, ring); + ring->irq_gem_seqno = seqno; + trace_i915_gem_request_complete(dev, seqno); + wake_up_all(&ring->irq_queue); + dev_priv->hangcheck_count = 0; + mod_timer(&dev_priv->hangcheck_timer, + jiffies + msecs_to_jiffies(DRM_I915_HANGCHECK_PERIOD)); +} + static irqreturn_t ironlake_irq_handler(struct drm_device *dev) { drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; int ret = IRQ_NONE; u32 de_iir, gt_iir, de_ier, pch_iir; + u32 hotplug_mask; struct drm_i915_master_private *master_priv; - struct intel_ring_buffer *render_ring = &dev_priv->render_ring; u32 bsd_usr_interrupt = GT_BSD_USER_INTERRUPT; if (IS_GEN6(dev)) @@ -317,6 +330,11 @@ static irqreturn_t ironlake_irq_handler(struct drm_device *dev) if (de_iir == 0 && gt_iir == 0 && pch_iir == 0) goto done; + if (HAS_PCH_CPT(dev)) + hotplug_mask = SDE_HOTPLUG_MASK_CPT; + else + hotplug_mask = SDE_HOTPLUG_MASK; + ret = IRQ_HANDLED; if (dev->primary->master) { @@ -326,17 +344,12 @@ static irqreturn_t ironlake_irq_handler(struct drm_device *dev) READ_BREADCRUMB(dev_priv); } - if (gt_iir & GT_PIPE_NOTIFY) { - u32 seqno = render_ring->get_seqno(dev, render_ring); - render_ring->irq_gem_seqno = seqno; - trace_i915_gem_request_complete(dev, seqno); - wake_up_all(&dev_priv->render_ring.irq_queue); - dev_priv->hangcheck_count = 0; - mod_timer(&dev_priv->hangcheck_timer, - jiffies + msecs_to_jiffies(DRM_I915_HANGCHECK_PERIOD)); - } + if (gt_iir & GT_PIPE_NOTIFY) + notify_ring(dev, &dev_priv->render_ring); if (gt_iir & bsd_usr_interrupt) - wake_up_all(&dev_priv->bsd_ring.irq_queue); + notify_ring(dev, &dev_priv->bsd_ring); + if (HAS_BLT(dev) && gt_iir & GT_BLT_USER_INTERRUPT) + notify_ring(dev, &dev_priv->blt_ring); if (de_iir & DE_GSE) intel_opregion_gse_intr(dev); @@ -358,10 +371,8 @@ static irqreturn_t ironlake_irq_handler(struct drm_device *dev) drm_handle_vblank(dev, 1); /* check event from PCH */ - if ((de_iir & DE_PCH_EVENT) && - (pch_iir & SDE_HOTPLUG_MASK)) { + if ((de_iir & DE_PCH_EVENT) && (pch_iir & hotplug_mask)) queue_work(dev_priv->wq, &dev_priv->hotplug_work); - } if (de_iir & DE_PCU_EVENT) { I915_WRITE16(MEMINTRSTS, I915_READ(MEMINTRSTS)); @@ -604,9 +615,7 @@ static void i915_capture_error_state(struct drm_device *dev) batchbuffer[0] = NULL; batchbuffer[1] = NULL; count = 0; - list_for_each_entry(obj_priv, - &dev_priv->render_ring.active_list, list) { - + list_for_each_entry(obj_priv, &dev_priv->mm.active_list, mm_list) { struct drm_gem_object *obj = &obj_priv->base; if (batchbuffer[0] == NULL && @@ -623,7 +632,7 @@ static void i915_capture_error_state(struct drm_device *dev) } /* Scan the other lists for completeness for those bizarre errors. */ if (batchbuffer[0] == NULL || batchbuffer[1] == NULL) { - list_for_each_entry(obj_priv, &dev_priv->mm.flushing_list, list) { + list_for_each_entry(obj_priv, &dev_priv->mm.flushing_list, mm_list) { struct drm_gem_object *obj = &obj_priv->base; if (batchbuffer[0] == NULL && @@ -641,7 +650,7 @@ static void i915_capture_error_state(struct drm_device *dev) } } if (batchbuffer[0] == NULL || batchbuffer[1] == NULL) { - list_for_each_entry(obj_priv, &dev_priv->mm.inactive_list, list) { + list_for_each_entry(obj_priv, &dev_priv->mm.inactive_list, mm_list) { struct drm_gem_object *obj = &obj_priv->base; if (batchbuffer[0] == NULL && @@ -660,7 +669,7 @@ static void i915_capture_error_state(struct drm_device *dev) } /* We need to copy these to an anonymous buffer as the simplest - * method to avoid being overwritten by userpace. + * method to avoid being overwritten by userspace. */ error->batchbuffer[0] = i915_error_object_create(dev, batchbuffer[0]); if (batchbuffer[1] != batchbuffer[0]) @@ -682,8 +691,7 @@ static void i915_capture_error_state(struct drm_device *dev) if (error->active_bo) { int i = 0; - list_for_each_entry(obj_priv, - &dev_priv->render_ring.active_list, list) { + list_for_each_entry(obj_priv, &dev_priv->mm.active_list, mm_list) { struct drm_gem_object *obj = &obj_priv->base; error->active_bo[i].size = obj->size; @@ -880,6 +888,8 @@ static void i915_handle_error(struct drm_device *dev, bool wedged) wake_up_all(&dev_priv->render_ring.irq_queue); if (HAS_BSD(dev)) wake_up_all(&dev_priv->bsd_ring.irq_queue); + if (HAS_BLT(dev)) + wake_up_all(&dev_priv->blt_ring.irq_queue); } queue_work(dev_priv->wq, &dev_priv->error_work); @@ -940,7 +950,6 @@ irqreturn_t i915_driver_irq_handler(DRM_IRQ_ARGS) unsigned long irqflags; int irq_received; int ret = IRQ_NONE; - struct intel_ring_buffer *render_ring = &dev_priv->render_ring; atomic_inc(&dev_priv->irq_received); @@ -1017,18 +1026,10 @@ irqreturn_t i915_driver_irq_handler(DRM_IRQ_ARGS) READ_BREADCRUMB(dev_priv); } - if (iir & I915_USER_INTERRUPT) { - u32 seqno = render_ring->get_seqno(dev, render_ring); - render_ring->irq_gem_seqno = seqno; - trace_i915_gem_request_complete(dev, seqno); - wake_up_all(&dev_priv->render_ring.irq_queue); - dev_priv->hangcheck_count = 0; - mod_timer(&dev_priv->hangcheck_timer, - jiffies + msecs_to_jiffies(DRM_I915_HANGCHECK_PERIOD)); - } - + if (iir & I915_USER_INTERRUPT) + notify_ring(dev, &dev_priv->render_ring); if (HAS_BSD(dev) && (iir & I915_BSD_USER_INTERRUPT)) - wake_up_all(&dev_priv->bsd_ring.irq_queue); + notify_ring(dev, &dev_priv->bsd_ring); if (iir & I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT) { intel_prepare_page_flip(dev, 0); @@ -1357,6 +1358,12 @@ void i915_hangcheck_elapsed(unsigned long data) missed_wakeup = true; } + if (dev_priv->blt_ring.waiting_gem_seqno && + waitqueue_active(&dev_priv->blt_ring.irq_queue)) { + wake_up_all(&dev_priv->blt_ring.irq_queue); + missed_wakeup = true; + } + if (missed_wakeup) DRM_ERROR("Hangcheck timer elapsed... GPU idle, missed IRQ.\n"); return; @@ -1431,8 +1438,7 @@ static int ironlake_irq_postinstall(struct drm_device *dev) u32 display_mask = DE_MASTER_IRQ_CONTROL | DE_GSE | DE_PCH_EVENT | DE_PLANEA_FLIP_DONE | DE_PLANEB_FLIP_DONE; u32 render_mask = GT_PIPE_NOTIFY | GT_BSD_USER_INTERRUPT; - u32 hotplug_mask = SDE_CRT_HOTPLUG | SDE_PORTB_HOTPLUG | - SDE_PORTC_HOTPLUG | SDE_PORTD_HOTPLUG; + u32 hotplug_mask; dev_priv->irq_mask_reg = ~display_mask; dev_priv->de_irq_enable_reg = display_mask | DE_PIPEA_VBLANK | DE_PIPEB_VBLANK; @@ -1443,8 +1449,12 @@ static int ironlake_irq_postinstall(struct drm_device *dev) I915_WRITE(DEIER, dev_priv->de_irq_enable_reg); (void) I915_READ(DEIER); - if (IS_GEN6(dev)) - render_mask = GT_PIPE_NOTIFY | GT_GEN6_BSD_USER_INTERRUPT; + if (IS_GEN6(dev)) { + render_mask = + GT_PIPE_NOTIFY | + GT_GEN6_BSD_USER_INTERRUPT | + GT_BLT_USER_INTERRUPT; + } dev_priv->gt_irq_mask_reg = ~render_mask; dev_priv->gt_irq_enable_reg = render_mask; @@ -1454,11 +1464,20 @@ static int ironlake_irq_postinstall(struct drm_device *dev) if (IS_GEN6(dev)) { I915_WRITE(GEN6_RENDER_IMR, ~GEN6_RENDER_PIPE_CONTROL_NOTIFY_INTERRUPT); I915_WRITE(GEN6_BSD_IMR, ~GEN6_BSD_IMR_USER_INTERRUPT); + I915_WRITE(GEN6_BLITTER_IMR, ~GEN6_BLITTER_USER_INTERRUPT); } I915_WRITE(GTIER, dev_priv->gt_irq_enable_reg); (void) I915_READ(GTIER); + if (HAS_PCH_CPT(dev)) { + hotplug_mask = SDE_CRT_HOTPLUG_CPT | SDE_PORTB_HOTPLUG_CPT | + SDE_PORTC_HOTPLUG_CPT | SDE_PORTD_HOTPLUG_CPT ; + } else { + hotplug_mask = SDE_CRT_HOTPLUG | SDE_PORTB_HOTPLUG | + SDE_PORTC_HOTPLUG | SDE_PORTD_HOTPLUG; + } + dev_priv->pch_irq_mask_reg = ~hotplug_mask; dev_priv->pch_irq_enable_reg = hotplug_mask; @@ -1515,9 +1534,10 @@ int i915_driver_irq_postinstall(struct drm_device *dev) u32 error_mask; DRM_INIT_WAITQUEUE(&dev_priv->render_ring.irq_queue); - if (HAS_BSD(dev)) DRM_INIT_WAITQUEUE(&dev_priv->bsd_ring.irq_queue); + if (HAS_BLT(dev)) + DRM_INIT_WAITQUEUE(&dev_priv->blt_ring.irq_queue); dev_priv->vblank_pipe = DRM_I915_VBLANK_PIPE_A | DRM_I915_VBLANK_PIPE_B; diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h index d02de212e6ad..25ed911a3112 100644 --- a/drivers/gpu/drm/i915/i915_reg.h +++ b/drivers/gpu/drm/i915/i915_reg.h @@ -263,6 +263,7 @@ #define RENDER_RING_BASE 0x02000 #define BSD_RING_BASE 0x04000 #define GEN6_BSD_RING_BASE 0x12000 +#define BLT_RING_BASE 0x22000 #define RING_TAIL(base) ((base)+0x30) #define RING_HEAD(base) ((base)+0x34) #define RING_START(base) ((base)+0x38) @@ -661,13 +662,6 @@ #define LVDS 0x61180 #define LVDS_ON (1<<31) -#define ADPA 0x61100 -#define ADPA_DPMS_MASK (~(3<<10)) -#define ADPA_DPMS_ON (0<<10) -#define ADPA_DPMS_SUSPEND (1<<10) -#define ADPA_DPMS_STANDBY (2<<10) -#define ADPA_DPMS_OFF (3<<10) - /* Scratch pad debug 0 reg: */ #define DPLL_FPA01_P1_POST_DIV_MASK_I830 0x001f0000 @@ -1200,6 +1194,7 @@ #define ADPA_DPMS_STANDBY (2<<10) #define ADPA_DPMS_OFF (3<<10) + /* Hotplug control (945+ only) */ #define PORT_HOTPLUG_EN 0x61110 #define HDMIB_HOTPLUG_INT_EN (1 << 29) @@ -1358,6 +1353,22 @@ #define LVDS_B0B3_POWER_DOWN (0 << 2) #define LVDS_B0B3_POWER_UP (3 << 2) +/* Video Data Island Packet control */ +#define VIDEO_DIP_DATA 0x61178 +#define VIDEO_DIP_CTL 0x61170 +#define VIDEO_DIP_ENABLE (1 << 31) +#define VIDEO_DIP_PORT_B (1 << 29) +#define VIDEO_DIP_PORT_C (2 << 29) +#define VIDEO_DIP_ENABLE_AVI (1 << 21) +#define VIDEO_DIP_ENABLE_VENDOR (2 << 21) +#define VIDEO_DIP_ENABLE_SPD (8 << 21) +#define VIDEO_DIP_SELECT_AVI (0 << 19) +#define VIDEO_DIP_SELECT_VENDOR (1 << 19) +#define VIDEO_DIP_SELECT_SPD (3 << 19) +#define VIDEO_DIP_FREQ_ONCE (0 << 16) +#define VIDEO_DIP_FREQ_VSYNC (1 << 16) +#define VIDEO_DIP_FREQ_2VSYNC (2 << 16) + /* Panel power sequencing */ #define PP_STATUS 0x61200 #define PP_ON (1 << 31) @@ -1373,6 +1384,9 @@ #define PP_SEQUENCE_ON (1 << 28) #define PP_SEQUENCE_OFF (2 << 28) #define PP_SEQUENCE_MASK 0x30000000 +#define PP_CYCLE_DELAY_ACTIVE (1 << 27) +#define PP_SEQUENCE_STATE_ON_IDLE (1 << 3) +#define PP_SEQUENCE_STATE_MASK 0x0000000f #define PP_CONTROL 0x61204 #define POWER_TARGET_ON (1 << 0) #define PP_ON_DELAYS 0x61208 @@ -2564,6 +2578,7 @@ #define GT_USER_INTERRUPT (1 << 0) #define GT_BSD_USER_INTERRUPT (1 << 5) #define GT_GEN6_BSD_USER_INTERRUPT (1 << 12) +#define GT_BLT_USER_INTERRUPT (1 << 22) #define GTISR 0x44010 #define GTIMR 0x44014 @@ -2598,6 +2613,10 @@ #define SDE_PORTD_HOTPLUG_CPT (1 << 23) #define SDE_PORTC_HOTPLUG_CPT (1 << 22) #define SDE_PORTB_HOTPLUG_CPT (1 << 21) +#define SDE_HOTPLUG_MASK_CPT (SDE_CRT_HOTPLUG_CPT | \ + SDE_PORTD_HOTPLUG_CPT | \ + SDE_PORTC_HOTPLUG_CPT | \ + SDE_PORTB_HOTPLUG_CPT) #define SDEISR 0xc4000 #define SDEIMR 0xc4004 @@ -2779,6 +2798,10 @@ #define FDI_RXA_CHICKEN 0xc200c #define FDI_RXB_CHICKEN 0xc2010 #define FDI_RX_PHASE_SYNC_POINTER_ENABLE (1) +#define FDI_RX_CHICKEN(pipe) _PIPE(pipe, FDI_RXA_CHICKEN, FDI_RXB_CHICKEN) + +#define SOUTH_DSPCLK_GATE_D 0xc2020 +#define PCH_DPLSUNIT_CLOCK_GATE_DISABLE (1<<29) /* CPU: FDI_TX */ #define FDI_TXA_CTL 0x60100 diff --git a/drivers/gpu/drm/i915/intel_acpi.c b/drivers/gpu/drm/i915/intel_acpi.c new file mode 100644 index 000000000000..65c88f9ba12c --- /dev/null +++ b/drivers/gpu/drm/i915/intel_acpi.c @@ -0,0 +1,286 @@ +/* + * Intel ACPI functions + * + * _DSM related code stolen from nouveau_acpi.c. + */ +#include <linux/pci.h> +#include <linux/acpi.h> +#include <linux/vga_switcheroo.h> +#include <acpi/acpi_drivers.h> + +#include "drmP.h" + +#define INTEL_DSM_REVISION_ID 1 /* For Calpella anyway... */ + +#define INTEL_DSM_FN_SUPPORTED_FUNCTIONS 0 /* No args */ +#define INTEL_DSM_FN_PLATFORM_MUX_INFO 1 /* No args */ + +static struct intel_dsm_priv { + acpi_handle dhandle; +} intel_dsm_priv; + +static const u8 intel_dsm_guid[] = { + 0xd3, 0x73, 0xd8, 0x7e, + 0xd0, 0xc2, + 0x4f, 0x4e, + 0xa8, 0x54, + 0x0f, 0x13, 0x17, 0xb0, 0x1c, 0x2c +}; + +static int intel_dsm(acpi_handle handle, int func, int arg) +{ + struct acpi_buffer output = { ACPI_ALLOCATE_BUFFER, NULL }; + struct acpi_object_list input; + union acpi_object params[4]; + union acpi_object *obj; + u32 result; + int ret = 0; + + input.count = 4; + input.pointer = params; + params[0].type = ACPI_TYPE_BUFFER; + params[0].buffer.length = sizeof(intel_dsm_guid); + params[0].buffer.pointer = (char *)intel_dsm_guid; + params[1].type = ACPI_TYPE_INTEGER; + params[1].integer.value = INTEL_DSM_REVISION_ID; + params[2].type = ACPI_TYPE_INTEGER; + params[2].integer.value = func; + params[3].type = ACPI_TYPE_INTEGER; + params[3].integer.value = arg; + + ret = acpi_evaluate_object(handle, "_DSM", &input, &output); + if (ret) { + DRM_DEBUG_DRIVER("failed to evaluate _DSM: %d\n", ret); + return ret; + } + + obj = (union acpi_object *)output.pointer; + + result = 0; + switch (obj->type) { + case ACPI_TYPE_INTEGER: + result = obj->integer.value; + break; + + case ACPI_TYPE_BUFFER: + if (obj->buffer.length == 4) { + result =(obj->buffer.pointer[0] | + (obj->buffer.pointer[1] << 8) | + (obj->buffer.pointer[2] << 16) | + (obj->buffer.pointer[3] << 24)); + break; + } + default: + ret = -EINVAL; + break; + } + if (result == 0x80000002) + ret = -ENODEV; + + kfree(output.pointer); + return ret; +} + +static char *intel_dsm_port_name(u8 id) +{ + switch (id) { + case 0: + return "Reserved"; + case 1: + return "Analog VGA"; + case 2: + return "LVDS"; + case 3: + return "Reserved"; + case 4: + return "HDMI/DVI_B"; + case 5: + return "HDMI/DVI_C"; + case 6: + return "HDMI/DVI_D"; + case 7: + return "DisplayPort_A"; + case 8: + return "DisplayPort_B"; + case 9: + return "DisplayPort_C"; + case 0xa: + return "DisplayPort_D"; + case 0xb: + case 0xc: + case 0xd: + return "Reserved"; + case 0xe: + return "WiDi"; + default: + return "bad type"; + } +} + +static char *intel_dsm_mux_type(u8 type) +{ + switch (type) { + case 0: + return "unknown"; + case 1: + return "No MUX, iGPU only"; + case 2: + return "No MUX, dGPU only"; + case 3: + return "MUXed between iGPU and dGPU"; + default: + return "bad type"; + } +} + +static void intel_dsm_platform_mux_info(void) +{ + struct acpi_buffer output = { ACPI_ALLOCATE_BUFFER, NULL }; + struct acpi_object_list input; + union acpi_object params[4]; + union acpi_object *pkg; + int i, ret; + + input.count = 4; + input.pointer = params; + params[0].type = ACPI_TYPE_BUFFER; + params[0].buffer.length = sizeof(intel_dsm_guid); + params[0].buffer.pointer = (char *)intel_dsm_guid; + params[1].type = ACPI_TYPE_INTEGER; + params[1].integer.value = INTEL_DSM_REVISION_ID; + params[2].type = ACPI_TYPE_INTEGER; + params[2].integer.value = INTEL_DSM_FN_PLATFORM_MUX_INFO; + params[3].type = ACPI_TYPE_INTEGER; + params[3].integer.value = 0; + + ret = acpi_evaluate_object(intel_dsm_priv.dhandle, "_DSM", &input, + &output); + if (ret) { + DRM_DEBUG_DRIVER("failed to evaluate _DSM: %d\n", ret); + goto out; + } + + pkg = (union acpi_object *)output.pointer; + + if (pkg->type == ACPI_TYPE_PACKAGE) { + union acpi_object *connector_count = &pkg->package.elements[0]; + DRM_DEBUG_DRIVER("MUX info connectors: %lld\n", + (unsigned long long)connector_count->integer.value); + for (i = 1; i < pkg->package.count; i++) { + union acpi_object *obj = &pkg->package.elements[i]; + union acpi_object *connector_id = + &obj->package.elements[0]; + union acpi_object *info = &obj->package.elements[1]; + DRM_DEBUG_DRIVER("Connector id: 0x%016llx\n", + (unsigned long long)connector_id->integer.value); + DRM_DEBUG_DRIVER(" port id: %s\n", + intel_dsm_port_name(info->buffer.pointer[0])); + DRM_DEBUG_DRIVER(" display mux info: %s\n", + intel_dsm_mux_type(info->buffer.pointer[1])); + DRM_DEBUG_DRIVER(" aux/dc mux info: %s\n", + intel_dsm_mux_type(info->buffer.pointer[2])); + DRM_DEBUG_DRIVER(" hpd mux info: %s\n", + intel_dsm_mux_type(info->buffer.pointer[3])); + } + } else { + DRM_ERROR("MUX INFO call failed\n"); + } + +out: + kfree(output.pointer); +} + +static int intel_dsm_switchto(enum vga_switcheroo_client_id id) +{ + return 0; +} + +static int intel_dsm_power_state(enum vga_switcheroo_client_id id, + enum vga_switcheroo_state state) +{ + return 0; +} + +static int intel_dsm_init(void) +{ + return 0; +} + +static int intel_dsm_get_client_id(struct pci_dev *pdev) +{ + if (intel_dsm_priv.dhandle == DEVICE_ACPI_HANDLE(&pdev->dev)) + return VGA_SWITCHEROO_IGD; + else + return VGA_SWITCHEROO_DIS; +} + +static struct vga_switcheroo_handler intel_dsm_handler = { + .switchto = intel_dsm_switchto, + .power_state = intel_dsm_power_state, + .init = intel_dsm_init, + .get_client_id = intel_dsm_get_client_id, +}; + +static bool intel_dsm_pci_probe(struct pci_dev *pdev) +{ + acpi_handle dhandle, intel_handle; + acpi_status status; + int ret; + + dhandle = DEVICE_ACPI_HANDLE(&pdev->dev); + if (!dhandle) + return false; + + status = acpi_get_handle(dhandle, "_DSM", &intel_handle); + if (ACPI_FAILURE(status)) { + DRM_DEBUG_KMS("no _DSM method for intel device\n"); + return false; + } + + ret = intel_dsm(dhandle, INTEL_DSM_FN_SUPPORTED_FUNCTIONS, 0); + if (ret < 0) { + DRM_ERROR("failed to get supported _DSM functions\n"); + return false; + } + + intel_dsm_priv.dhandle = dhandle; + + intel_dsm_platform_mux_info(); + return true; +} + +static bool intel_dsm_detect(void) +{ + char acpi_method_name[255] = { 0 }; + struct acpi_buffer buffer = {sizeof(acpi_method_name), acpi_method_name}; + struct pci_dev *pdev = NULL; + bool has_dsm = false; + int vga_count = 0; + + while ((pdev = pci_get_class(PCI_CLASS_DISPLAY_VGA << 8, pdev)) != NULL) { + vga_count++; + has_dsm |= intel_dsm_pci_probe(pdev); + } + + if (vga_count == 2 && has_dsm) { + acpi_get_name(intel_dsm_priv.dhandle, ACPI_FULL_PATHNAME, &buffer); + DRM_DEBUG_DRIVER("VGA switcheroo: detected DSM switching method %s handle\n", + acpi_method_name); + return true; + } + + return false; +} + +void intel_register_dsm_handler(void) +{ + if (!intel_dsm_detect()) + return; + + vga_switcheroo_register_handler(&intel_dsm_handler); +} + +void intel_unregister_dsm_handler(void) +{ + vga_switcheroo_unregister_handler(); +} diff --git a/drivers/gpu/drm/i915/intel_bios.c b/drivers/gpu/drm/i915/intel_bios.c index b1f73ac0f3fd..b0b1200ed650 100644 --- a/drivers/gpu/drm/i915/intel_bios.c +++ b/drivers/gpu/drm/i915/intel_bios.c @@ -24,6 +24,7 @@ * Eric Anholt <eric@anholt.net> * */ +#include <drm/drm_dp_helper.h> #include "drmP.h" #include "drm.h" #include "i915_drm.h" @@ -264,10 +265,10 @@ parse_general_features(struct drm_i915_private *dev_priv, dev_priv->lvds_use_ssc = general->enable_ssc; if (dev_priv->lvds_use_ssc) { - if (IS_I85X(dev_priv->dev)) + if (IS_I85X(dev)) dev_priv->lvds_ssc_freq = general->ssc_freq ? 66 : 48; - else if (IS_IRONLAKE(dev_priv->dev) || IS_GEN6(dev)) + else if (IS_GEN5(dev) || IS_GEN6(dev)) dev_priv->lvds_ssc_freq = general->ssc_freq ? 100 : 120; else @@ -413,6 +414,8 @@ static void parse_edp(struct drm_i915_private *dev_priv, struct bdb_header *bdb) { struct bdb_edp *edp; + struct edp_power_seq *edp_pps; + struct edp_link_params *edp_link_params; edp = find_section(bdb, BDB_EDP); if (!edp) { @@ -437,19 +440,54 @@ parse_edp(struct drm_i915_private *dev_priv, struct bdb_header *bdb) break; } - dev_priv->edp.rate = edp->link_params[panel_type].rate; - dev_priv->edp.lanes = edp->link_params[panel_type].lanes; - dev_priv->edp.preemphasis = edp->link_params[panel_type].preemphasis; - dev_priv->edp.vswing = edp->link_params[panel_type].vswing; + /* Get the eDP sequencing and link info */ + edp_pps = &edp->power_seqs[panel_type]; + edp_link_params = &edp->link_params[panel_type]; - DRM_DEBUG_KMS("eDP vBIOS settings: bpp=%d, rate=%d, lanes=%d, preemphasis=%d, vswing=%d\n", - dev_priv->edp.bpp, - dev_priv->edp.rate, - dev_priv->edp.lanes, - dev_priv->edp.preemphasis, - dev_priv->edp.vswing); + dev_priv->edp.pps = *edp_pps; - dev_priv->edp.initialized = true; + dev_priv->edp.rate = edp_link_params->rate ? DP_LINK_BW_2_7 : + DP_LINK_BW_1_62; + switch (edp_link_params->lanes) { + case 0: + dev_priv->edp.lanes = 1; + break; + case 1: + dev_priv->edp.lanes = 2; + break; + case 3: + default: + dev_priv->edp.lanes = 4; + break; + } + switch (edp_link_params->preemphasis) { + case 0: + dev_priv->edp.preemphasis = DP_TRAIN_PRE_EMPHASIS_0; + break; + case 1: + dev_priv->edp.preemphasis = DP_TRAIN_PRE_EMPHASIS_3_5; + break; + case 2: + dev_priv->edp.preemphasis = DP_TRAIN_PRE_EMPHASIS_6; + break; + case 3: + dev_priv->edp.preemphasis = DP_TRAIN_PRE_EMPHASIS_9_5; + break; + } + switch (edp_link_params->vswing) { + case 0: + dev_priv->edp.vswing = DP_TRAIN_VOLTAGE_SWING_400; + break; + case 1: + dev_priv->edp.vswing = DP_TRAIN_VOLTAGE_SWING_600; + break; + case 2: + dev_priv->edp.vswing = DP_TRAIN_VOLTAGE_SWING_800; + break; + case 3: + dev_priv->edp.vswing = DP_TRAIN_VOLTAGE_SWING_1200; + break; + } } static void @@ -539,7 +577,7 @@ init_vbt_defaults(struct drm_i915_private *dev_priv) } /** - * intel_init_bios - initialize VBIOS settings & find VBT + * intel_parse_bios - find VBT and initialize settings from the BIOS * @dev: DRM device * * Loads the Video BIOS and checks that the VBT exists. Sets scratch registers @@ -548,7 +586,7 @@ init_vbt_defaults(struct drm_i915_private *dev_priv) * Returns 0 on success, nonzero on failure. */ bool -intel_init_bios(struct drm_device *dev) +intel_parse_bios(struct drm_device *dev) { struct drm_i915_private *dev_priv = dev->dev_private; struct pci_dev *pdev = dev->pdev; @@ -609,3 +647,20 @@ intel_init_bios(struct drm_device *dev) return 0; } + +/* Ensure that vital registers have been initialised, even if the BIOS + * is absent or just failing to do its job. + */ +void intel_setup_bios(struct drm_device *dev) +{ + struct drm_i915_private *dev_priv = dev->dev_private; + + /* Set the Panel Power On/Off timings if uninitialized. */ + if ((I915_READ(PP_ON_DELAYS) == 0) && (I915_READ(PP_OFF_DELAYS) == 0)) { + /* Set T2 to 40ms and T5 to 200ms */ + I915_WRITE(PP_ON_DELAYS, 0x019007d0); + + /* Set T3 to 35ms and Tx to 200ms */ + I915_WRITE(PP_OFF_DELAYS, 0x015e07d0); + } +} diff --git a/drivers/gpu/drm/i915/intel_bios.h b/drivers/gpu/drm/i915/intel_bios.h index e1a598f2a966..5f8e4edcbbb9 100644 --- a/drivers/gpu/drm/i915/intel_bios.h +++ b/drivers/gpu/drm/i915/intel_bios.h @@ -467,7 +467,8 @@ struct bdb_edp { struct edp_link_params link_params[16]; } __attribute__ ((packed)); -bool intel_init_bios(struct drm_device *dev); +void intel_setup_bios(struct drm_device *dev); +bool intel_parse_bios(struct drm_device *dev); /* * Driver<->VBIOS interaction occurs through scratch bits in diff --git a/drivers/gpu/drm/i915/intel_crt.c b/drivers/gpu/drm/i915/intel_crt.c index 389fcd2aea1f..c55c77043357 100644 --- a/drivers/gpu/drm/i915/intel_crt.c +++ b/drivers/gpu/drm/i915/intel_crt.c @@ -191,7 +191,8 @@ static bool intel_ironlake_crt_detect_hotplug(struct drm_connector *connector) DRM_DEBUG_KMS("timed out waiting for FORCE_TRIGGER"); if (turn_off_dac) { - I915_WRITE(PCH_ADPA, temp); + /* Make sure hotplug is enabled */ + I915_WRITE(PCH_ADPA, temp | ADPA_CRT_HOTPLUG_ENABLE); (void)I915_READ(PCH_ADPA); } diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c index 96d08a9f3aaa..990f065374b2 100644 --- a/drivers/gpu/drm/i915/intel_display.c +++ b/drivers/gpu/drm/i915/intel_display.c @@ -345,8 +345,11 @@ intel_find_pll_ironlake_dp(const intel_limit_t *, struct drm_crtc *crtc, static inline u32 /* units of 100MHz */ intel_fdi_link_freq(struct drm_device *dev) { - struct drm_i915_private *dev_priv = dev->dev_private; - return (I915_READ(FDI_PLL_BIOS_0) & FDI_PLL_FB_CLOCK_MASK) + 2; + if (IS_GEN5(dev)) { + struct drm_i915_private *dev_priv = dev->dev_private; + return (I915_READ(FDI_PLL_BIOS_0) & FDI_PLL_FB_CLOCK_MASK) + 2; + } else + return 27; } static const intel_limit_t intel_limits_i8xx_dvo = { @@ -932,10 +935,6 @@ intel_find_pll_ironlake_dp(const intel_limit_t *limit, struct drm_crtc *crtc, struct drm_device *dev = crtc->dev; intel_clock_t clock; - /* return directly when it is eDP */ - if (HAS_eDP) - return true; - if (target < 200000) { clock.n = 1; clock.p1 = 2; @@ -1719,6 +1718,9 @@ static void ironlake_fdi_link_train(struct drm_crtc *crtc) POSTING_READ(reg); udelay(150); + /* Ironlake workaround, enable clock pointer after FDI enable*/ + I915_WRITE(FDI_RX_CHICKEN(pipe), FDI_RX_PHASE_SYNC_POINTER_ENABLE); + reg = FDI_RX_IIR(pipe); for (tries = 0; tries < 5; tries++) { temp = I915_READ(reg); @@ -1764,6 +1766,28 @@ static void ironlake_fdi_link_train(struct drm_crtc *crtc) DRM_ERROR("FDI train 2 fail!\n"); DRM_DEBUG_KMS("FDI train done\n"); + + /* enable normal train */ + reg = FDI_TX_CTL(pipe); + temp = I915_READ(reg); + temp &= ~FDI_LINK_TRAIN_NONE; + temp |= FDI_LINK_TRAIN_NONE | FDI_TX_ENHANCE_FRAME_ENABLE; + I915_WRITE(reg, temp); + + reg = FDI_RX_CTL(pipe); + temp = I915_READ(reg); + if (HAS_PCH_CPT(dev)) { + temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT; + temp |= FDI_LINK_TRAIN_NORMAL_CPT; + } else { + temp &= ~FDI_LINK_TRAIN_NONE; + temp |= FDI_LINK_TRAIN_NONE; + } + I915_WRITE(reg, temp | FDI_RX_ENHANCE_FRAME_ENABLE); + + /* wait one idle pattern time */ + POSTING_READ(reg); + udelay(1000); } static const int const snb_b_fdi_train_param [] = { @@ -2002,8 +2026,7 @@ static void ironlake_crtc_enable(struct drm_crtc *crtc) /* Enable panel fitting for LVDS */ if (dev_priv->pch_pf_size && - (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS) - || HAS_eDP || intel_pch_has_edp(crtc))) { + (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS) || HAS_eDP)) { /* Force use of hard-coded filter coefficients * as some pre-programmed values are broken, * e.g. x201. @@ -2022,7 +2045,7 @@ static void ironlake_crtc_enable(struct drm_crtc *crtc) if ((temp & PIPECONF_ENABLE) == 0) { I915_WRITE(reg, temp | PIPECONF_ENABLE); POSTING_READ(reg); - udelay(100); + intel_wait_for_vblank(dev, intel_crtc->pipe); } /* configure and enable CPU plane */ @@ -2067,28 +2090,6 @@ static void ironlake_crtc_enable(struct drm_crtc *crtc) I915_WRITE(TRANS_VBLANK(pipe), I915_READ(VBLANK(pipe))); I915_WRITE(TRANS_VSYNC(pipe), I915_READ(VSYNC(pipe))); - /* enable normal train */ - reg = FDI_TX_CTL(pipe); - temp = I915_READ(reg); - temp &= ~FDI_LINK_TRAIN_NONE; - temp |= FDI_LINK_TRAIN_NONE | FDI_TX_ENHANCE_FRAME_ENABLE; - I915_WRITE(reg, temp); - - reg = FDI_RX_CTL(pipe); - temp = I915_READ(reg); - if (HAS_PCH_CPT(dev)) { - temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT; - temp |= FDI_LINK_TRAIN_NORMAL_CPT; - } else { - temp &= ~FDI_LINK_TRAIN_NONE; - temp |= FDI_LINK_TRAIN_NONE; - } - I915_WRITE(reg, temp | FDI_RX_ENHANCE_FRAME_ENABLE); - - /* wait one idle pattern time */ - POSTING_READ(reg); - udelay(100); - /* For PCH DP, enable TRANS_DP_CTL */ if (HAS_PCH_CPT(dev) && intel_pipe_has_type(crtc, INTEL_OUTPUT_DISPLAYPORT)) { @@ -2134,7 +2135,7 @@ static void ironlake_crtc_enable(struct drm_crtc *crtc) temp |= I915_READ(PIPECONF(pipe)) & PIPE_BPC_MASK; I915_WRITE(reg, temp | TRANS_ENABLE); if (wait_for(I915_READ(reg) & TRANS_STATE_ENABLE, 100)) - DRM_ERROR("failed to enable transcoder\n"); + DRM_ERROR("failed to enable transcoder %d\n", pipe); intel_crtc_load_lut(crtc); intel_update_fbc(dev); @@ -2174,9 +2175,9 @@ static void ironlake_crtc_disable(struct drm_crtc *crtc) temp = I915_READ(reg); if (temp & PIPECONF_ENABLE) { I915_WRITE(reg, temp & ~PIPECONF_ENABLE); + POSTING_READ(reg); /* wait for cpu pipe off, pipe state */ - if (wait_for((I915_READ(reg) & I965_PIPECONF_ACTIVE) == 0, 50)) - DRM_ERROR("failed to turn off cpu pipe\n"); + intel_wait_for_pipe_off(dev, intel_crtc->pipe); } /* Disable PF */ @@ -2198,6 +2199,11 @@ static void ironlake_crtc_disable(struct drm_crtc *crtc) POSTING_READ(reg); udelay(100); + /* Ironlake workaround, disable clock pointer after downing FDI */ + I915_WRITE(FDI_RX_CHICKEN(pipe), + I915_READ(FDI_RX_CHICKEN(pipe) & + ~FDI_RX_PHASE_SYNC_POINTER_ENABLE)); + /* still set train pattern 1 */ reg = FDI_TX_CTL(pipe); temp = I915_READ(reg); @@ -3623,7 +3629,8 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc, refclk / 1000); } else if (!IS_GEN2(dev)) { refclk = 96000; - if (HAS_PCH_SPLIT(dev)) + if (HAS_PCH_SPLIT(dev) && + (!has_edp_encoder || intel_encoder_is_pch_edp(&has_edp_encoder->base))) refclk = 120000; /* 120Mhz refclk */ } else { refclk = 48000; @@ -3685,16 +3692,16 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc, /* FDI link */ if (HAS_PCH_SPLIT(dev)) { int lane = 0, link_bw, bpp; - /* eDP doesn't require FDI link, so just set DP M/N + /* CPU eDP doesn't require FDI link, so just set DP M/N according to current link config */ - if (has_edp_encoder) { + if (has_edp_encoder && !intel_encoder_is_pch_edp(&encoder->base)) { target_clock = mode->clock; intel_edp_link_config(has_edp_encoder, &lane, &link_bw); } else { - /* DP over FDI requires target mode clock + /* [e]DP over FDI requires target mode clock instead of link clock */ - if (is_dp) + if (is_dp || intel_encoder_is_pch_edp(&has_edp_encoder->base)) target_clock = mode->clock; else target_clock = adjusted_mode->clock; @@ -3718,7 +3725,7 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc, temp |= PIPE_8BPC; else temp |= PIPE_6BPC; - } else if (has_edp_encoder || (is_dp && intel_pch_has_edp(crtc))) { + } else if (has_edp_encoder) { switch (dev_priv->edp.bpp/3) { case 8: temp |= PIPE_8BPC; @@ -3794,13 +3801,25 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc, POSTING_READ(PCH_DREF_CONTROL); udelay(200); + } + temp &= ~DREF_CPU_SOURCE_OUTPUT_MASK; - temp &= ~DREF_CPU_SOURCE_OUTPUT_MASK; - temp |= DREF_CPU_SOURCE_OUTPUT_DOWNSPREAD; + /* Enable CPU source on CPU attached eDP */ + if (!intel_encoder_is_pch_edp(&has_edp_encoder->base)) { + if (dev_priv->lvds_use_ssc) + temp |= DREF_CPU_SOURCE_OUTPUT_DOWNSPREAD; + else + temp |= DREF_CPU_SOURCE_OUTPUT_NONSPREAD; } else { - temp |= DREF_CPU_SOURCE_OUTPUT_NONSPREAD; + /* Enable SSC on PCH eDP if needed */ + if (dev_priv->lvds_use_ssc) { + DRM_ERROR("enabling SSC on PCH\n"); + temp |= DREF_SUPERSPREAD_SOURCE_ENABLE; + } } I915_WRITE(PCH_DREF_CONTROL, temp); + POSTING_READ(PCH_DREF_CONTROL); + udelay(200); } } @@ -3835,7 +3854,7 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc, } dpll |= DPLL_DVO_HIGH_SPEED; } - if (is_dp) + if (is_dp || intel_encoder_is_pch_edp(&has_edp_encoder->base)) dpll |= DPLL_DVO_HIGH_SPEED; /* compute bitmask from p1 value */ @@ -3934,7 +3953,8 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc, dpll_reg = DPLL(pipe); } - if (!has_edp_encoder) { + /* PCH eDP needs FDI, but CPU eDP does not */ + if (!has_edp_encoder || intel_encoder_is_pch_edp(&has_edp_encoder->base)) { I915_WRITE(fp_reg, fp); I915_WRITE(dpll_reg, dpll & ~DPLL_VCO_ENABLE); @@ -4011,9 +4031,9 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc, } } - if (is_dp) + if (is_dp || intel_encoder_is_pch_edp(&has_edp_encoder->base)) { intel_dp_set_m_n(crtc, mode, adjusted_mode); - else if (HAS_PCH_SPLIT(dev)) { + } else if (HAS_PCH_SPLIT(dev)) { /* For non-DP output, clear any trans DP clock recovery setting.*/ if (pipe == 0) { I915_WRITE(TRANSA_DATA_M1, 0); @@ -4028,7 +4048,7 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc, } } - if (!has_edp_encoder) { + if (!has_edp_encoder || intel_encoder_is_pch_edp(&has_edp_encoder->base)) { I915_WRITE(fp_reg, fp); I915_WRITE(dpll_reg, dpll); @@ -4122,29 +4142,8 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc, I915_WRITE(PIPE_LINK_M1(pipe), m_n.link_m); I915_WRITE(PIPE_LINK_N1(pipe), m_n.link_n); - if (has_edp_encoder) { + if (has_edp_encoder && !intel_encoder_is_pch_edp(&has_edp_encoder->base)) { ironlake_set_pll_edp(crtc, adjusted_mode->clock); - } else { - /* enable FDI RX PLL too */ - reg = FDI_RX_CTL(pipe); - temp = I915_READ(reg); - I915_WRITE(reg, temp | FDI_RX_PLL_ENABLE); - - POSTING_READ(reg); - udelay(200); - - /* enable FDI TX PLL too */ - reg = FDI_TX_CTL(pipe); - temp = I915_READ(reg); - I915_WRITE(reg, temp | FDI_TX_PLL_ENABLE); - - /* enable FDI RX PCDCLK */ - reg = FDI_RX_CTL(pipe); - temp = I915_READ(reg); - I915_WRITE(reg, temp | FDI_PCDCLK); - - POSTING_READ(reg); - udelay(200); } } @@ -4153,7 +4152,7 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc, intel_wait_for_vblank(dev, pipe); - if (IS_IRONLAKE(dev)) { + if (IS_GEN5(dev)) { /* enable address swizzle for tiling buffer */ temp = I915_READ(DISP_ARB_CTL); I915_WRITE(DISP_ARB_CTL, temp | DISP_TILE_SURFACE_SWIZZLING); @@ -4992,11 +4991,10 @@ static void do_intel_finish_page_flip(struct drm_device *dev, spin_unlock_irqrestore(&dev->event_lock, flags); - obj_priv = to_intel_bo(work->pending_flip_obj); - - /* Initial scanout buffer will have a 0 pending flip count */ - if ((atomic_read(&obj_priv->pending_flip) == 0) || - atomic_dec_and_test(&obj_priv->pending_flip)) + obj_priv = to_intel_bo(work->old_fb_obj); + atomic_clear_mask(1 << intel_crtc->plane, + &obj_priv->pending_flip.counter); + if (atomic_read(&obj_priv->pending_flip) == 0) wake_up(&dev_priv->pending_flip_queue); schedule_work(&work->work); @@ -5092,9 +5090,14 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc, if (ret) goto cleanup_objs; - obj_priv = to_intel_bo(obj); - atomic_inc(&obj_priv->pending_flip); + /* Block clients from rendering to the new back buffer until + * the flip occurs and the object is no longer visible. + */ + atomic_add(1 << intel_crtc->plane, + &to_intel_bo(work->old_fb_obj)->pending_flip); + work->pending_flip_obj = obj; + obj_priv = to_intel_bo(obj); if (IS_GEN3(dev) || IS_GEN2(dev)) { u32 flip_mask; @@ -5736,7 +5739,7 @@ void intel_init_clock_gating(struct drm_device *dev) if (HAS_PCH_SPLIT(dev)) { uint32_t dspclk_gate = VRHUNIT_CLOCK_GATE_DISABLE; - if (IS_IRONLAKE(dev)) { + if (IS_GEN5(dev)) { /* Required for FBC */ dspclk_gate |= DPFDUNIT_CLOCK_GATE_DISABLE; /* Required for CxSR */ @@ -5750,13 +5753,20 @@ void intel_init_clock_gating(struct drm_device *dev) I915_WRITE(PCH_DSPCLK_GATE_D, dspclk_gate); /* + * On Ibex Peak and Cougar Point, we need to disable clock + * gating for the panel power sequencer or it will fail to + * start up when no ports are active. + */ + I915_WRITE(SOUTH_DSPCLK_GATE_D, PCH_DPLSUNIT_CLOCK_GATE_DISABLE); + + /* * According to the spec the following bits should be set in * order to enable memory self-refresh * The bit 22/21 of 0x42004 * The bit 5 of 0x42020 * The bit 15 of 0x45000 */ - if (IS_IRONLAKE(dev)) { + if (IS_GEN5(dev)) { I915_WRITE(ILK_DISPLAY_CHICKEN2, (I915_READ(ILK_DISPLAY_CHICKEN2) | ILK_DPARB_GATE | ILK_VSDPFD_FULL)); @@ -5932,7 +5942,7 @@ static void intel_init_display(struct drm_device *dev) /* For FIFO watermark updates */ if (HAS_PCH_SPLIT(dev)) { - if (IS_IRONLAKE(dev)) { + if (IS_GEN5(dev)) { if (I915_READ(MLTR_ILK) & ILK_SRLT_MASK) dev_priv->display.update_wm = ironlake_update_wm; else { @@ -6131,6 +6141,9 @@ void intel_modeset_cleanup(struct drm_device *dev) drm_kms_helper_poll_fini(dev); mutex_lock(&dev->struct_mutex); + intel_unregister_dsm_handler(); + + list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { /* Skip inactive CRTCs */ if (!crtc->fb) diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c index 152d94507b79..891f4f1d63b1 100644 --- a/drivers/gpu/drm/i915/intel_dp.c +++ b/drivers/gpu/drm/i915/intel_dp.c @@ -42,15 +42,13 @@ #define DP_LINK_CONFIGURATION_SIZE 9 -#define IS_eDP(i) ((i)->base.type == INTEL_OUTPUT_EDP) -#define IS_PCH_eDP(i) ((i)->is_pch_edp) - struct intel_dp { struct intel_encoder base; uint32_t output_reg; uint32_t DP; uint8_t link_configuration[DP_LINK_CONFIGURATION_SIZE]; bool has_audio; + int force_audio; int dpms_mode; uint8_t link_bw; uint8_t lane_count; @@ -60,8 +58,35 @@ struct intel_dp { bool is_pch_edp; uint8_t train_set[4]; uint8_t link_status[DP_LINK_STATUS_SIZE]; + + struct drm_property *force_audio_property; }; +/** + * is_edp - is the given port attached to an eDP panel (either CPU or PCH) + * @intel_dp: DP struct + * + * If a CPU or PCH DP output is attached to an eDP panel, this function + * will return true, and false otherwise. + */ +static bool is_edp(struct intel_dp *intel_dp) +{ + return intel_dp->base.type == INTEL_OUTPUT_EDP; +} + +/** + * is_pch_edp - is the port on the PCH and attached to an eDP panel? + * @intel_dp: DP struct + * + * Returns true if the given DP struct corresponds to a PCH DP port attached + * to an eDP panel, false otherwise. Helpful for determining whether we + * may need FDI resources for a given DP output or not. + */ +static bool is_pch_edp(struct intel_dp *intel_dp) +{ + return intel_dp->is_pch_edp; +} + static struct intel_dp *enc_to_intel_dp(struct drm_encoder *encoder) { return container_of(encoder, struct intel_dp, base.base); @@ -73,6 +98,25 @@ static struct intel_dp *intel_attached_dp(struct drm_connector *connector) struct intel_dp, base); } +/** + * intel_encoder_is_pch_edp - is the given encoder a PCH attached eDP? + * @encoder: DRM encoder + * + * Return true if @encoder corresponds to a PCH attached eDP panel. Needed + * by intel_display.c. + */ +bool intel_encoder_is_pch_edp(struct drm_encoder *encoder) +{ + struct intel_dp *intel_dp; + + if (!encoder) + return false; + + intel_dp = enc_to_intel_dp(encoder); + + return is_pch_edp(intel_dp); +} + static void intel_dp_start_link_train(struct intel_dp *intel_dp); static void intel_dp_complete_link_train(struct intel_dp *intel_dp); static void intel_dp_link_down(struct intel_dp *intel_dp); @@ -138,7 +182,7 @@ intel_dp_link_required(struct drm_device *dev, struct intel_dp *intel_dp, int pi { struct drm_i915_private *dev_priv = dev->dev_private; - if (IS_eDP(intel_dp) || IS_PCH_eDP(intel_dp)) + if (is_edp(intel_dp)) return (pixel_clock * dev_priv->edp.bpp + 7) / 8; else return pixel_clock * 3; @@ -160,8 +204,7 @@ intel_dp_mode_valid(struct drm_connector *connector, int max_link_clock = intel_dp_link_clock(intel_dp_max_link_bw(intel_dp)); int max_lanes = intel_dp_max_lane_count(intel_dp); - if ((IS_eDP(intel_dp) || IS_PCH_eDP(intel_dp)) && - dev_priv->panel_fixed_mode) { + if (is_edp(intel_dp) && dev_priv->panel_fixed_mode) { if (mode->hdisplay > dev_priv->panel_fixed_mode->hdisplay) return MODE_PANEL; @@ -171,7 +214,7 @@ intel_dp_mode_valid(struct drm_connector *connector, /* only refuse the mode on non eDP since we have seen some wierd eDP panels which are outside spec tolerances but somehow work by magic */ - if (!IS_eDP(intel_dp) && + if (!is_edp(intel_dp) && (intel_dp_link_required(connector->dev, intel_dp, mode->clock) > intel_dp_max_data_rate(max_link_clock, max_lanes))) return MODE_CLOCK_HIGH; @@ -258,7 +301,7 @@ intel_dp_aux_ch(struct intel_dp *intel_dp, * Note that PCH attached eDP panels should use a 125MHz input * clock divider. */ - if (IS_eDP(intel_dp) && !IS_PCH_eDP(intel_dp)) { + if (is_edp(intel_dp) && !is_pch_edp(intel_dp)) { if (IS_GEN6(dev)) aux_clock_divider = 200; /* SNB eDP input clock at 400Mhz */ else @@ -530,8 +573,7 @@ intel_dp_mode_fixup(struct drm_encoder *encoder, struct drm_display_mode *mode, int max_clock = intel_dp_max_link_bw(intel_dp) == DP_LINK_BW_2_7 ? 1 : 0; static int bws[2] = { DP_LINK_BW_1_62, DP_LINK_BW_2_7 }; - if ((IS_eDP(intel_dp) || IS_PCH_eDP(intel_dp)) && - dev_priv->panel_fixed_mode) { + if (is_edp(intel_dp) && dev_priv->panel_fixed_mode) { intel_fixed_panel_mode(dev_priv->panel_fixed_mode, adjusted_mode); intel_pch_panel_fitting(dev, DRM_MODE_SCALE_FULLSCREEN, mode, adjusted_mode); @@ -542,6 +584,17 @@ intel_dp_mode_fixup(struct drm_encoder *encoder, struct drm_display_mode *mode, mode->clock = dev_priv->panel_fixed_mode->clock; } + /* Just use VBT values for eDP */ + if (is_edp(intel_dp)) { + intel_dp->lane_count = dev_priv->edp.lanes; + intel_dp->link_bw = dev_priv->edp.rate; + adjusted_mode->clock = intel_dp_link_clock(intel_dp->link_bw); + DRM_DEBUG_KMS("eDP link bw %02x lane count %d clock %d\n", + intel_dp->link_bw, intel_dp->lane_count, + adjusted_mode->clock); + return true; + } + for (lane_count = 1; lane_count <= max_lane_count; lane_count <<= 1) { for (clock = 0; clock <= max_clock; clock++) { int link_avail = intel_dp_max_data_rate(intel_dp_link_clock(bws[clock]), lane_count); @@ -560,19 +613,6 @@ intel_dp_mode_fixup(struct drm_encoder *encoder, struct drm_display_mode *mode, } } - if (IS_eDP(intel_dp) || IS_PCH_eDP(intel_dp)) { - /* okay we failed just pick the highest */ - intel_dp->lane_count = max_lane_count; - intel_dp->link_bw = bws[max_clock]; - adjusted_mode->clock = intel_dp_link_clock(intel_dp->link_bw); - DRM_DEBUG_KMS("Force picking display port link bw %02x lane " - "count %d clock %d\n", - intel_dp->link_bw, intel_dp->lane_count, - adjusted_mode->clock); - - return true; - } - return false; } @@ -609,25 +649,6 @@ intel_dp_compute_m_n(int bpp, intel_reduce_ratio(&m_n->link_m, &m_n->link_n); } -bool intel_pch_has_edp(struct drm_crtc *crtc) -{ - struct drm_device *dev = crtc->dev; - struct drm_mode_config *mode_config = &dev->mode_config; - struct drm_encoder *encoder; - - list_for_each_entry(encoder, &mode_config->encoder_list, head) { - struct intel_dp *intel_dp; - - if (encoder->crtc != crtc) - continue; - - intel_dp = enc_to_intel_dp(encoder); - if (intel_dp->base.type == INTEL_OUTPUT_DISPLAYPORT) - return intel_dp->is_pch_edp; - } - return false; -} - void intel_dp_set_m_n(struct drm_crtc *crtc, struct drm_display_mode *mode, struct drm_display_mode *adjusted_mode) @@ -652,8 +673,10 @@ intel_dp_set_m_n(struct drm_crtc *crtc, struct drm_display_mode *mode, intel_dp = enc_to_intel_dp(encoder); if (intel_dp->base.type == INTEL_OUTPUT_DISPLAYPORT) { lane_count = intel_dp->lane_count; - if (IS_PCH_eDP(intel_dp)) - bpp = dev_priv->edp.bpp; + break; + } else if (is_edp(intel_dp)) { + lane_count = dev_priv->edp.lanes; + bpp = dev_priv->edp.bpp; break; } } @@ -720,7 +743,7 @@ intel_dp_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode, if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC) intel_dp->DP |= DP_SYNC_VS_HIGH; - if (HAS_PCH_CPT(dev) && !IS_eDP(intel_dp)) + if (HAS_PCH_CPT(dev) && !is_edp(intel_dp)) intel_dp->DP |= DP_LINK_TRAIN_OFF_CPT; else intel_dp->DP |= DP_LINK_TRAIN_OFF; @@ -755,7 +778,7 @@ intel_dp_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode, if (intel_crtc->pipe == 1 && !HAS_PCH_CPT(dev)) intel_dp->DP |= DP_PIPEB_SELECT; - if (IS_eDP(intel_dp)) { + if (is_edp(intel_dp) && !is_pch_edp(intel_dp)) { /* don't miss out required setting for eDP */ intel_dp->DP |= DP_PLL_ENABLE; if (adjusted_mode->clock < 200000) @@ -766,10 +789,11 @@ intel_dp_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode, } /* Returns true if the panel was already on when called */ -static bool ironlake_edp_panel_on (struct drm_device *dev) +static bool ironlake_edp_panel_on (struct intel_dp *intel_dp) { + struct drm_device *dev = intel_dp->base.base.dev; struct drm_i915_private *dev_priv = dev->dev_private; - u32 pp; + u32 pp, idle_on_mask = PP_ON | PP_SEQUENCE_STATE_ON_IDLE; if (I915_READ(PCH_PP_STATUS) & PP_ON) return true; @@ -781,19 +805,20 @@ static bool ironlake_edp_panel_on (struct drm_device *dev) I915_WRITE(PCH_PP_CONTROL, pp); POSTING_READ(PCH_PP_CONTROL); - pp |= POWER_TARGET_ON; + pp |= PANEL_UNLOCK_REGS | POWER_TARGET_ON; I915_WRITE(PCH_PP_CONTROL, pp); + POSTING_READ(PCH_PP_CONTROL); /* Ouch. We need to wait here for some panels, like Dell e6510 * https://bugs.freedesktop.org/show_bug.cgi?id=29278i */ msleep(300); - if (wait_for(I915_READ(PCH_PP_STATUS) & PP_ON, 5000)) + if (wait_for((I915_READ(PCH_PP_STATUS) & idle_on_mask) == idle_on_mask, + 5000)) DRM_ERROR("panel on wait timed out: 0x%08x\n", I915_READ(PCH_PP_STATUS)); - pp &= ~(PANEL_UNLOCK_REGS); pp |= PANEL_POWER_RESET; /* restore panel reset bit */ I915_WRITE(PCH_PP_CONTROL, pp); POSTING_READ(PCH_PP_CONTROL); @@ -804,7 +829,8 @@ static bool ironlake_edp_panel_on (struct drm_device *dev) static void ironlake_edp_panel_off (struct drm_device *dev) { struct drm_i915_private *dev_priv = dev->dev_private; - u32 pp; + u32 pp, idle_off_mask = PP_ON | PP_SEQUENCE_MASK | + PP_CYCLE_DELAY_ACTIVE | PP_SEQUENCE_STATE_MASK; pp = I915_READ(PCH_PP_CONTROL); @@ -815,12 +841,12 @@ static void ironlake_edp_panel_off (struct drm_device *dev) pp &= ~POWER_TARGET_ON; I915_WRITE(PCH_PP_CONTROL, pp); + POSTING_READ(PCH_PP_CONTROL); - if (wait_for((I915_READ(PCH_PP_STATUS) & PP_ON) == 0, 5000)) + if (wait_for((I915_READ(PCH_PP_STATUS) & idle_off_mask) == 0, 5000)) DRM_ERROR("panel off wait timed out: 0x%08x\n", I915_READ(PCH_PP_STATUS)); - /* Make sure VDD is enabled so DP AUX will work */ pp |= PANEL_POWER_RESET; /* restore panel reset bit */ I915_WRITE(PCH_PP_CONTROL, pp); POSTING_READ(PCH_PP_CONTROL); @@ -831,36 +857,19 @@ static void ironlake_edp_panel_off (struct drm_device *dev) msleep(300); } -static void ironlake_edp_panel_vdd_on(struct drm_device *dev) -{ - struct drm_i915_private *dev_priv = dev->dev_private; - u32 pp; - - pp = I915_READ(PCH_PP_CONTROL); - pp |= EDP_FORCE_VDD; - I915_WRITE(PCH_PP_CONTROL, pp); - POSTING_READ(PCH_PP_CONTROL); - msleep(300); -} - -static void ironlake_edp_panel_vdd_off(struct drm_device *dev) -{ - struct drm_i915_private *dev_priv = dev->dev_private; - u32 pp; - - pp = I915_READ(PCH_PP_CONTROL); - pp &= ~EDP_FORCE_VDD; - I915_WRITE(PCH_PP_CONTROL, pp); - POSTING_READ(PCH_PP_CONTROL); - msleep(300); -} - static void ironlake_edp_backlight_on (struct drm_device *dev) { struct drm_i915_private *dev_priv = dev->dev_private; u32 pp; DRM_DEBUG_KMS("\n"); + /* + * If we enable the backlight right away following a panel power + * on, we may see slight flicker as the panel syncs with the eDP + * link. So delay a bit to make sure the image is solid before + * allowing it to appear. + */ + msleep(300); pp = I915_READ(PCH_PP_CONTROL); pp |= EDP_BLC_ENABLE; I915_WRITE(PCH_PP_CONTROL, pp); @@ -885,8 +894,10 @@ static void ironlake_edp_pll_on(struct drm_encoder *encoder) DRM_DEBUG_KMS("\n"); dpa_ctl = I915_READ(DP_A); - dpa_ctl &= ~DP_PLL_ENABLE; + dpa_ctl |= DP_PLL_ENABLE; I915_WRITE(DP_A, dpa_ctl); + POSTING_READ(DP_A); + udelay(200); } static void ironlake_edp_pll_off(struct drm_encoder *encoder) @@ -896,7 +907,7 @@ static void ironlake_edp_pll_off(struct drm_encoder *encoder) u32 dpa_ctl; dpa_ctl = I915_READ(DP_A); - dpa_ctl |= DP_PLL_ENABLE; + dpa_ctl &= ~DP_PLL_ENABLE; I915_WRITE(DP_A, dpa_ctl); POSTING_READ(DP_A); udelay(200); @@ -906,17 +917,16 @@ static void intel_dp_prepare(struct drm_encoder *encoder) { struct intel_dp *intel_dp = enc_to_intel_dp(encoder); struct drm_device *dev = encoder->dev; - struct drm_i915_private *dev_priv = dev->dev_private; - uint32_t dp_reg = I915_READ(intel_dp->output_reg); - if (IS_eDP(intel_dp) || IS_PCH_eDP(intel_dp)) { - ironlake_edp_panel_off(dev); + if (is_edp(intel_dp)) { ironlake_edp_backlight_off(dev); - ironlake_edp_panel_vdd_on(dev); - ironlake_edp_pll_on(encoder); + ironlake_edp_panel_on(intel_dp); + if (!is_pch_edp(intel_dp)) + ironlake_edp_pll_on(encoder); + else + ironlake_edp_pll_off(encoder); } - if (dp_reg & DP_PORT_EN) - intel_dp_link_down(intel_dp); + intel_dp_link_down(intel_dp); } static void intel_dp_commit(struct drm_encoder *encoder) @@ -926,14 +936,13 @@ static void intel_dp_commit(struct drm_encoder *encoder) intel_dp_start_link_train(intel_dp); - if (IS_eDP(intel_dp) || IS_PCH_eDP(intel_dp)) - ironlake_edp_panel_on(dev); + if (is_edp(intel_dp)) + ironlake_edp_panel_on(intel_dp); intel_dp_complete_link_train(intel_dp); - if (IS_eDP(intel_dp) || IS_PCH_eDP(intel_dp)) + if (is_edp(intel_dp)) ironlake_edp_backlight_on(dev); - intel_dp->dpms_mode = DRM_MODE_DPMS_ON; } static void @@ -945,23 +954,22 @@ intel_dp_dpms(struct drm_encoder *encoder, int mode) uint32_t dp_reg = I915_READ(intel_dp->output_reg); if (mode != DRM_MODE_DPMS_ON) { - if (IS_eDP(intel_dp) || IS_PCH_eDP(intel_dp)) { + if (is_edp(intel_dp)) ironlake_edp_backlight_off(dev); + intel_dp_link_down(intel_dp); + if (is_edp(intel_dp)) ironlake_edp_panel_off(dev); - } - if (dp_reg & DP_PORT_EN) - intel_dp_link_down(intel_dp); - if (IS_eDP(intel_dp) || IS_PCH_eDP(intel_dp)) + if (is_edp(intel_dp) && !is_pch_edp(intel_dp)) ironlake_edp_pll_off(encoder); } else { + if (is_edp(intel_dp)) + ironlake_edp_panel_on(intel_dp); if (!(dp_reg & DP_PORT_EN)) { intel_dp_start_link_train(intel_dp); - if (IS_eDP(intel_dp) || IS_PCH_eDP(intel_dp)) - ironlake_edp_panel_on(dev); intel_dp_complete_link_train(intel_dp); - if (IS_eDP(intel_dp) || IS_PCH_eDP(intel_dp)) - ironlake_edp_backlight_on(dev); } + if (is_edp(intel_dp)) + ironlake_edp_backlight_on(dev); } intel_dp->dpms_mode = mode; } @@ -1079,11 +1087,21 @@ intel_get_adjust_train(struct intel_dp *intel_dp) } static uint32_t -intel_dp_signal_levels(uint8_t train_set, int lane_count) +intel_dp_signal_levels(struct intel_dp *intel_dp) { - uint32_t signal_levels = 0; + struct drm_device *dev = intel_dp->base.base.dev; + struct drm_i915_private *dev_priv = dev->dev_private; + uint32_t signal_levels = 0; + u8 train_set = intel_dp->train_set[0]; + u32 vswing = train_set & DP_TRAIN_VOLTAGE_SWING_MASK; + u32 preemphasis = train_set & DP_TRAIN_PRE_EMPHASIS_MASK; + + if (is_edp(intel_dp)) { + vswing = dev_priv->edp.vswing; + preemphasis = dev_priv->edp.preemphasis; + } - switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) { + switch (vswing) { case DP_TRAIN_VOLTAGE_SWING_400: default: signal_levels |= DP_VOLTAGE_0_4; @@ -1098,7 +1116,7 @@ intel_dp_signal_levels(uint8_t train_set, int lane_count) signal_levels |= DP_VOLTAGE_1_2; break; } - switch (train_set & DP_TRAIN_PRE_EMPHASIS_MASK) { + switch (preemphasis) { case DP_TRAIN_PRE_EMPHASIS_0: default: signal_levels |= DP_PRE_EMPHASIS_0; @@ -1185,6 +1203,18 @@ intel_channel_eq_ok(struct intel_dp *intel_dp) } static bool +intel_dp_aux_handshake_required(struct intel_dp *intel_dp) +{ + struct drm_device *dev = intel_dp->base.base.dev; + struct drm_i915_private *dev_priv = dev->dev_private; + + if (is_edp(intel_dp) && dev_priv->no_aux_handshake) + return false; + + return true; +} + +static bool intel_dp_set_link_train(struct intel_dp *intel_dp, uint32_t dp_reg_value, uint8_t dp_train_pat) @@ -1196,6 +1226,9 @@ intel_dp_set_link_train(struct intel_dp *intel_dp, I915_WRITE(intel_dp->output_reg, dp_reg_value); POSTING_READ(intel_dp->output_reg); + if (!intel_dp_aux_handshake_required(intel_dp)) + return true; + intel_dp_aux_native_write_1(intel_dp, DP_TRAINING_PATTERN_SET, dp_train_pat); @@ -1228,13 +1261,14 @@ intel_dp_start_link_train(struct intel_dp *intel_dp) POSTING_READ(intel_dp->output_reg); intel_wait_for_vblank(dev, intel_crtc->pipe); - /* Write the link configuration data */ - intel_dp_aux_native_write(intel_dp, DP_LINK_BW_SET, - intel_dp->link_configuration, - DP_LINK_CONFIGURATION_SIZE); + if (intel_dp_aux_handshake_required(intel_dp)) + /* Write the link configuration data */ + intel_dp_aux_native_write(intel_dp, DP_LINK_BW_SET, + intel_dp->link_configuration, + DP_LINK_CONFIGURATION_SIZE); DP |= DP_PORT_EN; - if (HAS_PCH_CPT(dev) && !IS_eDP(intel_dp)) + if (HAS_PCH_CPT(dev) && !is_edp(intel_dp)) DP &= ~DP_LINK_TRAIN_MASK_CPT; else DP &= ~DP_LINK_TRAIN_MASK; @@ -1245,15 +1279,15 @@ intel_dp_start_link_train(struct intel_dp *intel_dp) for (;;) { /* Use intel_dp->train_set[0] to set the voltage and pre emphasis values */ uint32_t signal_levels; - if (IS_GEN6(dev) && IS_eDP(intel_dp)) { + if (IS_GEN6(dev) && is_edp(intel_dp)) { signal_levels = intel_gen6_edp_signal_levels(intel_dp->train_set[0]); DP = (DP & ~EDP_LINK_TRAIN_VOL_EMP_MASK_SNB) | signal_levels; } else { - signal_levels = intel_dp_signal_levels(intel_dp->train_set[0], intel_dp->lane_count); + signal_levels = intel_dp_signal_levels(intel_dp); DP = (DP & ~(DP_VOLTAGE_MASK|DP_PRE_EMPHASIS_MASK)) | signal_levels; } - if (HAS_PCH_CPT(dev) && !IS_eDP(intel_dp)) + if (HAS_PCH_CPT(dev) && !is_edp(intel_dp)) reg = DP | DP_LINK_TRAIN_PAT_1_CPT; else reg = DP | DP_LINK_TRAIN_PAT_1; @@ -1263,33 +1297,37 @@ intel_dp_start_link_train(struct intel_dp *intel_dp) break; /* Set training pattern 1 */ - udelay(100); - if (!intel_dp_get_link_status(intel_dp)) - break; - - if (intel_clock_recovery_ok(intel_dp->link_status, intel_dp->lane_count)) { - clock_recovery = true; + udelay(500); + if (intel_dp_aux_handshake_required(intel_dp)) { break; - } + } else { + if (!intel_dp_get_link_status(intel_dp)) + break; - /* Check to see if we've tried the max voltage */ - for (i = 0; i < intel_dp->lane_count; i++) - if ((intel_dp->train_set[i] & DP_TRAIN_MAX_SWING_REACHED) == 0) + if (intel_clock_recovery_ok(intel_dp->link_status, intel_dp->lane_count)) { + clock_recovery = true; break; - if (i == intel_dp->lane_count) - break; + } - /* Check to see if we've tried the same voltage 5 times */ - if ((intel_dp->train_set[0] & DP_TRAIN_VOLTAGE_SWING_MASK) == voltage) { - ++tries; - if (tries == 5) + /* Check to see if we've tried the max voltage */ + for (i = 0; i < intel_dp->lane_count; i++) + if ((intel_dp->train_set[i] & DP_TRAIN_MAX_SWING_REACHED) == 0) + break; + if (i == intel_dp->lane_count) break; - } else - tries = 0; - voltage = intel_dp->train_set[0] & DP_TRAIN_VOLTAGE_SWING_MASK; - /* Compute new intel_dp->train_set as requested by target */ - intel_get_adjust_train(intel_dp); + /* Check to see if we've tried the same voltage 5 times */ + if ((intel_dp->train_set[0] & DP_TRAIN_VOLTAGE_SWING_MASK) == voltage) { + ++tries; + if (tries == 5) + break; + } else + tries = 0; + voltage = intel_dp->train_set[0] & DP_TRAIN_VOLTAGE_SWING_MASK; + + /* Compute new intel_dp->train_set as requested by target */ + intel_get_adjust_train(intel_dp); + } } intel_dp->DP = DP; @@ -1312,15 +1350,15 @@ intel_dp_complete_link_train(struct intel_dp *intel_dp) /* Use intel_dp->train_set[0] to set the voltage and pre emphasis values */ uint32_t signal_levels; - if (IS_GEN6(dev) && IS_eDP(intel_dp)) { + if (IS_GEN6(dev) && is_edp(intel_dp)) { signal_levels = intel_gen6_edp_signal_levels(intel_dp->train_set[0]); DP = (DP & ~EDP_LINK_TRAIN_VOL_EMP_MASK_SNB) | signal_levels; } else { - signal_levels = intel_dp_signal_levels(intel_dp->train_set[0], intel_dp->lane_count); + signal_levels = intel_dp_signal_levels(intel_dp); DP = (DP & ~(DP_VOLTAGE_MASK|DP_PRE_EMPHASIS_MASK)) | signal_levels; } - if (HAS_PCH_CPT(dev) && !IS_eDP(intel_dp)) + if (HAS_PCH_CPT(dev) && !is_edp(intel_dp)) reg = DP | DP_LINK_TRAIN_PAT_2_CPT; else reg = DP | DP_LINK_TRAIN_PAT_2; @@ -1330,25 +1368,29 @@ intel_dp_complete_link_train(struct intel_dp *intel_dp) DP_TRAINING_PATTERN_2)) break; - udelay(400); - if (!intel_dp_get_link_status(intel_dp)) - break; + udelay(500); - if (intel_channel_eq_ok(intel_dp)) { - channel_eq = true; + if (!intel_dp_aux_handshake_required(intel_dp)) { break; - } + } else { + if (!intel_dp_get_link_status(intel_dp)) + break; - /* Try 5 times */ - if (tries > 5) - break; + if (intel_channel_eq_ok(intel_dp)) { + channel_eq = true; + break; + } - /* Compute new intel_dp->train_set as requested by target */ - intel_get_adjust_train(intel_dp); - ++tries; - } + /* Try 5 times */ + if (tries > 5) + break; - if (HAS_PCH_CPT(dev) && !IS_eDP(intel_dp)) + /* Compute new intel_dp->train_set as requested by target */ + intel_get_adjust_train(intel_dp); + ++tries; + } + } + if (HAS_PCH_CPT(dev) && !is_edp(intel_dp)) reg = DP | DP_LINK_TRAIN_OFF_CPT; else reg = DP | DP_LINK_TRAIN_OFF; @@ -1368,14 +1410,14 @@ intel_dp_link_down(struct intel_dp *intel_dp) DRM_DEBUG_KMS("\n"); - if (IS_eDP(intel_dp)) { + if (is_edp(intel_dp)) { DP &= ~DP_PLL_ENABLE; I915_WRITE(intel_dp->output_reg, DP); POSTING_READ(intel_dp->output_reg); udelay(100); } - if (HAS_PCH_CPT(dev) && !IS_eDP(intel_dp)) { + if (HAS_PCH_CPT(dev) && !is_edp(intel_dp)) { DP &= ~DP_LINK_TRAIN_MASK_CPT; I915_WRITE(intel_dp->output_reg, DP | DP_LINK_TRAIN_PAT_IDLE_CPT); } else { @@ -1386,7 +1428,7 @@ intel_dp_link_down(struct intel_dp *intel_dp) msleep(17); - if (IS_eDP(intel_dp)) + if (is_edp(intel_dp)) DP |= DP_LINK_TRAIN_OFF; I915_WRITE(intel_dp->output_reg, DP & ~DP_PORT_EN); POSTING_READ(intel_dp->output_reg); @@ -1419,48 +1461,34 @@ intel_dp_check_link_status(struct intel_dp *intel_dp) } static enum drm_connector_status -ironlake_dp_detect(struct drm_connector *connector) +ironlake_dp_detect(struct intel_dp *intel_dp) { - struct intel_dp *intel_dp = intel_attached_dp(connector); enum drm_connector_status status; - /* Panel needs power for AUX to work */ - if (IS_eDP(intel_dp) || IS_PCH_eDP(intel_dp)) - ironlake_edp_panel_vdd_on(connector->dev); + /* Can't disconnect eDP */ + if (is_edp(intel_dp)) + return connector_status_connected; + status = connector_status_disconnected; if (intel_dp_aux_native_read(intel_dp, 0x000, intel_dp->dpcd, - sizeof (intel_dp->dpcd)) == sizeof (intel_dp->dpcd)) - { + sizeof (intel_dp->dpcd)) + == sizeof(intel_dp->dpcd)) { if (intel_dp->dpcd[0] != 0) status = connector_status_connected; } DRM_DEBUG_KMS("DPCD: %hx%hx%hx%hx\n", intel_dp->dpcd[0], intel_dp->dpcd[1], intel_dp->dpcd[2], intel_dp->dpcd[3]); - if (IS_eDP(intel_dp) || IS_PCH_eDP(intel_dp)) - ironlake_edp_panel_vdd_off(connector->dev); return status; } -/** - * Uses CRT_HOTPLUG_EN and CRT_HOTPLUG_STAT to detect DP connection. - * - * \return true if DP port is connected. - * \return false if DP port is disconnected. - */ static enum drm_connector_status -intel_dp_detect(struct drm_connector *connector, bool force) +g4x_dp_detect(struct intel_dp *intel_dp) { - struct intel_dp *intel_dp = intel_attached_dp(connector); struct drm_device *dev = intel_dp->base.base.dev; struct drm_i915_private *dev_priv = dev->dev_private; - uint32_t temp, bit; enum drm_connector_status status; - - intel_dp->has_audio = false; - - if (HAS_PCH_SPLIT(dev)) - return ironlake_dp_detect(connector); + uint32_t temp, bit; switch (intel_dp->output_reg) { case DP_B: @@ -1482,14 +1510,51 @@ intel_dp_detect(struct drm_connector *connector, bool force) return connector_status_disconnected; status = connector_status_disconnected; - if (intel_dp_aux_native_read(intel_dp, - 0x000, intel_dp->dpcd, + if (intel_dp_aux_native_read(intel_dp, 0x000, intel_dp->dpcd, sizeof (intel_dp->dpcd)) == sizeof (intel_dp->dpcd)) { if (intel_dp->dpcd[0] != 0) status = connector_status_connected; } - return status; + + return bit; +} + +/** + * Uses CRT_HOTPLUG_EN and CRT_HOTPLUG_STAT to detect DP connection. + * + * \return true if DP port is connected. + * \return false if DP port is disconnected. + */ +static enum drm_connector_status +intel_dp_detect(struct drm_connector *connector, bool force) +{ + struct intel_dp *intel_dp = intel_attached_dp(connector); + struct drm_device *dev = intel_dp->base.base.dev; + enum drm_connector_status status; + struct edid *edid = NULL; + + intel_dp->has_audio = false; + + if (HAS_PCH_SPLIT(dev)) + status = ironlake_dp_detect(intel_dp); + else + status = g4x_dp_detect(intel_dp); + if (status != connector_status_connected) + return status; + + if (intel_dp->force_audio) { + intel_dp->has_audio = intel_dp->force_audio > 0; + } else { + edid = drm_get_edid(connector, &intel_dp->adapter); + if (edid) { + intel_dp->has_audio = drm_detect_monitor_audio(edid); + connector->display_info.raw_edid = NULL; + kfree(edid); + } + } + + return connector_status_connected; } static int intel_dp_get_modes(struct drm_connector *connector) @@ -1504,8 +1569,7 @@ static int intel_dp_get_modes(struct drm_connector *connector) ret = intel_ddc_get_modes(connector, &intel_dp->adapter); if (ret) { - if ((IS_eDP(intel_dp) || IS_PCH_eDP(intel_dp)) && - !dev_priv->panel_fixed_mode) { + if (is_edp(intel_dp) && !dev_priv->panel_fixed_mode) { struct drm_display_mode *newmode; list_for_each_entry(newmode, &connector->probed_modes, head) { @@ -1521,7 +1585,7 @@ static int intel_dp_get_modes(struct drm_connector *connector) } /* if eDP has no EDID, try to use fixed panel mode from VBT */ - if (IS_eDP(intel_dp) || IS_PCH_eDP(intel_dp)) { + if (is_edp(intel_dp)) { if (dev_priv->panel_fixed_mode != NULL) { struct drm_display_mode *mode; mode = drm_mode_duplicate(dev, dev_priv->panel_fixed_mode); @@ -1532,6 +1596,46 @@ static int intel_dp_get_modes(struct drm_connector *connector) return 0; } +static int +intel_dp_set_property(struct drm_connector *connector, + struct drm_property *property, + uint64_t val) +{ + struct intel_dp *intel_dp = intel_attached_dp(connector); + int ret; + + ret = drm_connector_property_set_value(connector, property, val); + if (ret) + return ret; + + if (property == intel_dp->force_audio_property) { + if (val == intel_dp->force_audio) + return 0; + + intel_dp->force_audio = val; + + if (val > 0 && intel_dp->has_audio) + return 0; + if (val < 0 && !intel_dp->has_audio) + return 0; + + intel_dp->has_audio = val > 0; + goto done; + } + + return -EINVAL; + +done: + if (intel_dp->base.base.crtc) { + struct drm_crtc *crtc = intel_dp->base.base.crtc; + drm_crtc_helper_set_mode(crtc, &crtc->mode, + crtc->x, crtc->y, + crtc->fb); + } + + return 0; +} + static void intel_dp_destroy (struct drm_connector *connector) { @@ -1561,6 +1665,7 @@ static const struct drm_connector_funcs intel_dp_connector_funcs = { .dpms = drm_helper_connector_dpms, .detect = intel_dp_detect, .fill_modes = drm_helper_probe_single_connector_modes, + .set_property = intel_dp_set_property, .destroy = intel_dp_destroy, }; @@ -1625,6 +1730,20 @@ bool intel_dpd_is_edp(struct drm_device *dev) return false; } +static void +intel_dp_add_properties(struct intel_dp *intel_dp, struct drm_connector *connector) +{ + struct drm_device *dev = connector->dev; + + intel_dp->force_audio_property = + drm_property_create(dev, DRM_MODE_PROP_RANGE, "force_audio", 2); + if (intel_dp->force_audio_property) { + intel_dp->force_audio_property->values[0] = -1; + intel_dp->force_audio_property->values[1] = 1; + drm_connector_attach_property(connector, intel_dp->force_audio_property, 0); + } +} + void intel_dp_init(struct drm_device *dev, int output_reg) { @@ -1651,7 +1770,7 @@ intel_dp_init(struct drm_device *dev, int output_reg) if (intel_dpd_is_edp(dev)) intel_dp->is_pch_edp = true; - if (output_reg == DP_A || IS_PCH_eDP(intel_dp)) { + if (output_reg == DP_A || is_pch_edp(intel_dp)) { type = DRM_MODE_CONNECTOR_eDP; intel_encoder->type = INTEL_OUTPUT_EDP; } else { @@ -1672,7 +1791,7 @@ intel_dp_init(struct drm_device *dev, int output_reg) else if (output_reg == DP_D || output_reg == PCH_DP_D) intel_encoder->clone_mask = (1 << INTEL_DP_D_CLONE_BIT); - if (IS_eDP(intel_dp)) + if (is_edp(intel_dp)) intel_encoder->clone_mask = (1 << INTEL_EDP_CLONE_BIT); intel_encoder->crtc_mask = (1 << 0) | (1 << 1); @@ -1717,9 +1836,29 @@ intel_dp_init(struct drm_device *dev, int output_reg) intel_dp_i2c_init(intel_dp, intel_connector, name); + /* Cache some DPCD data in the eDP case */ + if (is_edp(intel_dp)) { + int ret; + bool was_on; + + was_on = ironlake_edp_panel_on(intel_dp); + ret = intel_dp_aux_native_read(intel_dp, DP_DPCD_REV, + intel_dp->dpcd, + sizeof(intel_dp->dpcd)); + if (ret == sizeof(intel_dp->dpcd)) { + if (intel_dp->dpcd[0] >= 0x11) + dev_priv->no_aux_handshake = intel_dp->dpcd[3] & + DP_NO_AUX_HANDSHAKE_LINK_TRAINING; + } else { + DRM_ERROR("failed to retrieve link info\n"); + } + if (!was_on) + ironlake_edp_panel_off(dev); + } + intel_encoder->hot_plug = intel_dp_hot_plug; - if (output_reg == DP_A || IS_PCH_eDP(intel_dp)) { + if (is_edp(intel_dp)) { /* initialize panel mode from VBT if available for eDP */ if (dev_priv->lfp_lvds_vbt_mode) { dev_priv->panel_fixed_mode = @@ -1731,6 +1870,8 @@ intel_dp_init(struct drm_device *dev, int output_reg) } } + intel_dp_add_properties(intel_dp, connector); + /* For G4X desktop chip, PEG_BAND_GAP_DATA 3:0 must first be written * 0xd. Failure to do so will result in spurious interrupts being * generated on the port when a cable is not attached. diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h index 40e99bf27ff7..9af9f86a8765 100644 --- a/drivers/gpu/drm/i915/intel_drv.h +++ b/drivers/gpu/drm/i915/intel_drv.h @@ -178,6 +178,38 @@ struct intel_crtc { #define to_intel_encoder(x) container_of(x, struct intel_encoder, base) #define to_intel_framebuffer(x) container_of(x, struct intel_framebuffer, base) +#define DIP_TYPE_AVI 0x82 +#define DIP_VERSION_AVI 0x2 +#define DIP_LEN_AVI 13 + +struct dip_infoframe { + uint8_t type; /* HB0 */ + uint8_t ver; /* HB1 */ + uint8_t len; /* HB2 - body len, not including checksum */ + uint8_t ecc; /* Header ECC */ + uint8_t checksum; /* PB0 */ + union { + struct { + /* PB1 - Y 6:5, A 4:4, B 3:2, S 1:0 */ + uint8_t Y_A_B_S; + /* PB2 - C 7:6, M 5:4, R 3:0 */ + uint8_t C_M_R; + /* PB3 - ITC 7:7, EC 6:4, Q 3:2, SC 1:0 */ + uint8_t ITC_EC_Q_SC; + /* PB4 - VIC 6:0 */ + uint8_t VIC; + /* PB5 - PR 3:0 */ + uint8_t PR; + /* PB6 to PB13 */ + uint16_t top_bar_end; + uint16_t bottom_bar_start; + uint16_t left_bar_end; + uint16_t right_bar_start; + } avi; + uint8_t payload[27]; + } __attribute__ ((packed)) body; +} __attribute__((packed)); + static inline struct drm_crtc * intel_get_crtc_for_pipe(struct drm_device *dev, int pipe) { @@ -200,6 +232,7 @@ extern bool intel_ddc_probe(struct intel_encoder *intel_encoder, int ddc_bus); extern void intel_crt_init(struct drm_device *dev); extern void intel_hdmi_init(struct drm_device *dev, int sdvox_reg); +void intel_dip_infoframe_csum(struct dip_infoframe *avi_if); extern bool intel_sdvo_init(struct drm_device *dev, int output_device); extern void intel_dvo_init(struct drm_device *dev); extern void intel_tv_init(struct drm_device *dev); @@ -209,9 +242,9 @@ extern void intel_dp_init(struct drm_device *dev, int dp_reg); void intel_dp_set_m_n(struct drm_crtc *crtc, struct drm_display_mode *mode, struct drm_display_mode *adjusted_mode); -extern bool intel_pch_has_edp(struct drm_crtc *crtc); extern bool intel_dpd_is_edp(struct drm_device *dev); extern void intel_edp_link_config (struct intel_encoder *, int *, int *); +extern bool intel_encoder_is_pch_edp(struct drm_encoder *encoder); /* intel_panel.c */ extern void intel_fixed_panel_mode(struct drm_display_mode *fixed_mode, diff --git a/drivers/gpu/drm/i915/intel_fb.c b/drivers/gpu/drm/i915/intel_fb.c index 521622b9be7a..af2a1dddc28e 100644 --- a/drivers/gpu/drm/i915/intel_fb.c +++ b/drivers/gpu/drm/i915/intel_fb.c @@ -225,7 +225,7 @@ static void intel_fbdev_destroy(struct drm_device *dev, drm_framebuffer_cleanup(&ifb->base); if (ifb->obj) { - drm_gem_object_unreference(ifb->obj); + drm_gem_object_unreference_unlocked(ifb->obj); ifb->obj = NULL; } } diff --git a/drivers/gpu/drm/i915/intel_hdmi.c b/drivers/gpu/drm/i915/intel_hdmi.c index 9fb9501f2d07..0d0273e7b029 100644 --- a/drivers/gpu/drm/i915/intel_hdmi.c +++ b/drivers/gpu/drm/i915/intel_hdmi.c @@ -42,6 +42,9 @@ struct intel_hdmi { u32 sdvox_reg; int ddc_bus; bool has_hdmi_sink; + bool has_audio; + int force_audio; + struct drm_property *force_audio_property; }; static struct intel_hdmi *enc_to_intel_hdmi(struct drm_encoder *encoder) @@ -55,6 +58,60 @@ static struct intel_hdmi *intel_attached_hdmi(struct drm_connector *connector) struct intel_hdmi, base); } +void intel_dip_infoframe_csum(struct dip_infoframe *avi_if) +{ + uint8_t *data = (uint8_t *)avi_if; + uint8_t sum = 0; + unsigned i; + + avi_if->checksum = 0; + avi_if->ecc = 0; + + for (i = 0; i < sizeof(*avi_if); i++) + sum += data[i]; + + avi_if->checksum = 0x100 - sum; +} + +static void intel_hdmi_set_avi_infoframe(struct drm_encoder *encoder) +{ + struct dip_infoframe avi_if = { + .type = DIP_TYPE_AVI, + .ver = DIP_VERSION_AVI, + .len = DIP_LEN_AVI, + }; + uint32_t *data = (uint32_t *)&avi_if; + struct drm_device *dev = encoder->dev; + struct drm_i915_private *dev_priv = dev->dev_private; + struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder); + u32 port; + unsigned i; + + if (!intel_hdmi->has_hdmi_sink) + return; + + /* XXX first guess at handling video port, is this corrent? */ + if (intel_hdmi->sdvox_reg == SDVOB) + port = VIDEO_DIP_PORT_B; + else if (intel_hdmi->sdvox_reg == SDVOC) + port = VIDEO_DIP_PORT_C; + else + return; + + I915_WRITE(VIDEO_DIP_CTL, VIDEO_DIP_ENABLE | port | + VIDEO_DIP_SELECT_AVI | VIDEO_DIP_FREQ_VSYNC); + + intel_dip_infoframe_csum(&avi_if); + for (i = 0; i < sizeof(avi_if); i += 4) { + I915_WRITE(VIDEO_DIP_DATA, *data); + data++; + } + + I915_WRITE(VIDEO_DIP_CTL, VIDEO_DIP_ENABLE | port | + VIDEO_DIP_SELECT_AVI | VIDEO_DIP_FREQ_VSYNC | + VIDEO_DIP_ENABLE_AVI); +} + static void intel_hdmi_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode, struct drm_display_mode *adjusted_mode) @@ -72,10 +129,13 @@ static void intel_hdmi_mode_set(struct drm_encoder *encoder, if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC) sdvox |= SDVO_HSYNC_ACTIVE_HIGH; - if (intel_hdmi->has_hdmi_sink) { + /* Required on CPT */ + if (intel_hdmi->has_hdmi_sink && HAS_PCH_CPT(dev)) + sdvox |= HDMI_MODE_SELECT; + + if (intel_hdmi->has_audio) { sdvox |= SDVO_AUDIO_ENABLE; - if (HAS_PCH_CPT(dev)) - sdvox |= HDMI_MODE_SELECT; + sdvox |= SDVO_NULL_PACKETS_DURING_VSYNC; } if (intel_crtc->pipe == 1) { @@ -87,6 +147,8 @@ static void intel_hdmi_mode_set(struct drm_encoder *encoder, I915_WRITE(intel_hdmi->sdvox_reg, sdvox); POSTING_READ(intel_hdmi->sdvox_reg); + + intel_hdmi_set_avi_infoframe(encoder); } static void intel_hdmi_dpms(struct drm_encoder *encoder, int mode) @@ -154,6 +216,7 @@ intel_hdmi_detect(struct drm_connector *connector, bool force) enum drm_connector_status status = connector_status_disconnected; intel_hdmi->has_hdmi_sink = false; + intel_hdmi->has_audio = false; edid = drm_get_edid(connector, &dev_priv->gmbus[intel_hdmi->ddc_bus].adapter); @@ -161,11 +224,17 @@ intel_hdmi_detect(struct drm_connector *connector, bool force) if (edid->input & DRM_EDID_INPUT_DIGITAL) { status = connector_status_connected; intel_hdmi->has_hdmi_sink = drm_detect_hdmi_monitor(edid); + intel_hdmi->has_audio = drm_detect_monitor_audio(edid); } connector->display_info.raw_edid = NULL; kfree(edid); } + if (status == connector_status_connected) { + if (intel_hdmi->force_audio) + intel_hdmi->has_audio = intel_hdmi->force_audio > 0; + } + return status; } @@ -182,6 +251,46 @@ static int intel_hdmi_get_modes(struct drm_connector *connector) &dev_priv->gmbus[intel_hdmi->ddc_bus].adapter); } +static int +intel_hdmi_set_property(struct drm_connector *connector, + struct drm_property *property, + uint64_t val) +{ + struct intel_hdmi *intel_hdmi = intel_attached_hdmi(connector); + int ret; + + ret = drm_connector_property_set_value(connector, property, val); + if (ret) + return ret; + + if (property == intel_hdmi->force_audio_property) { + if (val == intel_hdmi->force_audio) + return 0; + + intel_hdmi->force_audio = val; + + if (val > 0 && intel_hdmi->has_audio) + return 0; + if (val < 0 && !intel_hdmi->has_audio) + return 0; + + intel_hdmi->has_audio = val > 0; + goto done; + } + + return -EINVAL; + +done: + if (intel_hdmi->base.base.crtc) { + struct drm_crtc *crtc = intel_hdmi->base.base.crtc; + drm_crtc_helper_set_mode(crtc, &crtc->mode, + crtc->x, crtc->y, + crtc->fb); + } + + return 0; +} + static void intel_hdmi_destroy(struct drm_connector *connector) { drm_sysfs_connector_remove(connector); @@ -201,6 +310,7 @@ static const struct drm_connector_funcs intel_hdmi_connector_funcs = { .dpms = drm_helper_connector_dpms, .detect = intel_hdmi_detect, .fill_modes = drm_helper_probe_single_connector_modes, + .set_property = intel_hdmi_set_property, .destroy = intel_hdmi_destroy, }; @@ -214,6 +324,20 @@ static const struct drm_encoder_funcs intel_hdmi_enc_funcs = { .destroy = intel_encoder_destroy, }; +static void +intel_hdmi_add_properties(struct intel_hdmi *intel_hdmi, struct drm_connector *connector) +{ + struct drm_device *dev = connector->dev; + + intel_hdmi->force_audio_property = + drm_property_create(dev, DRM_MODE_PROP_RANGE, "force_audio", 2); + if (intel_hdmi->force_audio_property) { + intel_hdmi->force_audio_property->values[0] = -1; + intel_hdmi->force_audio_property->values[1] = 1; + drm_connector_attach_property(connector, intel_hdmi->force_audio_property, 0); + } +} + void intel_hdmi_init(struct drm_device *dev, int sdvox_reg) { struct drm_i915_private *dev_priv = dev->dev_private; @@ -275,6 +399,8 @@ void intel_hdmi_init(struct drm_device *dev, int sdvox_reg) drm_encoder_helper_add(&intel_encoder->base, &intel_hdmi_helper_funcs); + intel_hdmi_add_properties(intel_hdmi, connector); + intel_connector_attach_encoder(intel_connector, intel_encoder); drm_sysfs_connector_add(connector); diff --git a/drivers/gpu/drm/i915/intel_i2c.c b/drivers/gpu/drm/i915/intel_i2c.c index 2449a74d4d80..2be4f728ed0c 100644 --- a/drivers/gpu/drm/i915/intel_i2c.c +++ b/drivers/gpu/drm/i915/intel_i2c.c @@ -155,6 +155,7 @@ intel_gpio_create(struct drm_i915_private *dev_priv, u32 pin) GPIOC, GPIOD, GPIOE, + 0, GPIOF, }; struct intel_gpio *gpio; diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c index d89b88791aac..09f2dc353ae2 100644 --- a/drivers/gpu/drm/i915/intel_ringbuffer.c +++ b/drivers/gpu/drm/i915/intel_ringbuffer.c @@ -119,12 +119,12 @@ render_ring_flush(struct drm_device *dev, } } -static void ring_set_tail(struct drm_device *dev, - struct intel_ring_buffer *ring, - u32 value) +static void ring_write_tail(struct drm_device *dev, + struct intel_ring_buffer *ring, + u32 value) { drm_i915_private_t *dev_priv = dev->dev_private; - I915_WRITE_TAIL(ring, ring->tail); + I915_WRITE_TAIL(ring, value); } u32 intel_ring_get_active_head(struct drm_device *dev, @@ -148,7 +148,7 @@ static int init_ring_common(struct drm_device *dev, /* Stop the ring if it's running. */ I915_WRITE_CTL(ring, 0); I915_WRITE_HEAD(ring, 0); - ring->set_tail(dev, ring, 0); + ring->write_tail(dev, ring, 0); /* Initialize the ring. */ I915_WRITE_START(ring, obj_priv->gtt_offset); @@ -383,9 +383,9 @@ static int init_bsd_ring(struct drm_device *dev, } static u32 -bsd_ring_add_request(struct drm_device *dev, - struct intel_ring_buffer *ring, - u32 flush_domains) +ring_add_request(struct drm_device *dev, + struct intel_ring_buffer *ring, + u32 flush_domains) { u32 seqno; @@ -418,18 +418,18 @@ bsd_ring_put_user_irq(struct drm_device *dev, } static u32 -bsd_ring_get_seqno(struct drm_device *dev, - struct intel_ring_buffer *ring) +ring_status_page_get_seqno(struct drm_device *dev, + struct intel_ring_buffer *ring) { return intel_read_status_page(ring, I915_GEM_HWS_INDEX); } static int -bsd_ring_dispatch_gem_execbuffer(struct drm_device *dev, - struct intel_ring_buffer *ring, - struct drm_i915_gem_execbuffer2 *exec, - struct drm_clip_rect *cliprects, - uint64_t exec_offset) +ring_dispatch_gem_execbuffer(struct drm_device *dev, + struct intel_ring_buffer *ring, + struct drm_i915_gem_execbuffer2 *exec, + struct drm_clip_rect *cliprects, + uint64_t exec_offset) { uint32_t exec_start; exec_start = (uint32_t) exec_offset + exec->batch_start_offset; @@ -441,7 +441,6 @@ bsd_ring_dispatch_gem_execbuffer(struct drm_device *dev, return 0; } - static int render_ring_dispatch_gem_execbuffer(struct drm_device *dev, struct intel_ring_buffer *ring, @@ -476,7 +475,7 @@ render_ring_dispatch_gem_execbuffer(struct drm_device *dev, intel_ring_emit(dev, ring, exec_start + exec_len - 4); intel_ring_emit(dev, ring, 0); } else { - intel_ring_begin(dev, ring, 4); + intel_ring_begin(dev, ring, 2); if (INTEL_INFO(dev)->gen >= 4) { intel_ring_emit(dev, ring, MI_BATCH_BUFFER_START | (2 << 6) @@ -492,7 +491,7 @@ render_ring_dispatch_gem_execbuffer(struct drm_device *dev, intel_ring_advance(dev, ring); } - if (IS_G4X(dev) || IS_IRONLAKE(dev)) { + if (IS_G4X(dev) || IS_GEN5(dev)) { intel_ring_begin(dev, ring, 2); intel_ring_emit(dev, ring, MI_FLUSH | MI_NO_WRITE_FLUSH | @@ -581,6 +580,7 @@ int intel_init_ring_buffer(struct drm_device *dev, ring->dev = dev; INIT_LIST_HEAD(&ring->active_list); INIT_LIST_HEAD(&ring->request_list); + INIT_LIST_HEAD(&ring->gpu_write_list); if (I915_NEED_GFX_HWS(dev)) { ret = init_status_page(dev, ring); @@ -707,7 +707,7 @@ int intel_wait_ring_buffer(struct drm_device *dev, master_priv->sarea_priv->perf_boxes |= I915_BOX_WAIT; } - yield(); + msleep(1); } while (!time_after(jiffies, end)); trace_i915_ring_wait_end (dev); return -EBUSY; @@ -730,22 +730,7 @@ void intel_ring_advance(struct drm_device *dev, struct intel_ring_buffer *ring) { ring->tail &= ring->size - 1; - ring->set_tail(dev, ring, ring->tail); -} - -void intel_fill_struct(struct drm_device *dev, - struct intel_ring_buffer *ring, - void *data, - unsigned int len) -{ - unsigned int *virt = ring->virtual_start + ring->tail; - BUG_ON((len&~(4-1)) != 0); - intel_ring_begin(dev, ring, len/4); - memcpy(virt, data, len); - ring->tail += len; - ring->tail &= ring->size - 1; - ring->space -= len; - intel_ring_advance(dev, ring); + ring->write_tail(dev, ring, ring->tail); } static const struct intel_ring_buffer render_ring = { @@ -754,7 +739,7 @@ static const struct intel_ring_buffer render_ring = { .mmio_base = RENDER_RING_BASE, .size = 32 * PAGE_SIZE, .init = init_render_ring, - .set_tail = ring_set_tail, + .write_tail = ring_write_tail, .flush = render_ring_flush, .add_request = render_ring_add_request, .get_seqno = render_ring_get_seqno, @@ -771,19 +756,19 @@ static const struct intel_ring_buffer bsd_ring = { .mmio_base = BSD_RING_BASE, .size = 32 * PAGE_SIZE, .init = init_bsd_ring, - .set_tail = ring_set_tail, + .write_tail = ring_write_tail, .flush = bsd_ring_flush, - .add_request = bsd_ring_add_request, - .get_seqno = bsd_ring_get_seqno, + .add_request = ring_add_request, + .get_seqno = ring_status_page_get_seqno, .user_irq_get = bsd_ring_get_user_irq, .user_irq_put = bsd_ring_put_user_irq, - .dispatch_gem_execbuffer = bsd_ring_dispatch_gem_execbuffer, + .dispatch_gem_execbuffer = ring_dispatch_gem_execbuffer, }; -static void gen6_bsd_ring_set_tail(struct drm_device *dev, - struct intel_ring_buffer *ring, - u32 value) +static void gen6_bsd_ring_write_tail(struct drm_device *dev, + struct intel_ring_buffer *ring, + u32 value) { drm_i915_private_t *dev_priv = dev->dev_private; @@ -804,10 +789,10 @@ static void gen6_bsd_ring_set_tail(struct drm_device *dev, GEN6_BSD_SLEEP_PSMI_CONTROL_RC_ILDL_MESSAGE_ENABLE); } -static void gen6_bsd_ring_flush(struct drm_device *dev, - struct intel_ring_buffer *ring, - u32 invalidate_domains, - u32 flush_domains) +static void gen6_ring_flush(struct drm_device *dev, + struct intel_ring_buffer *ring, + u32 invalidate_domains, + u32 flush_domains) { intel_ring_begin(dev, ring, 4); intel_ring_emit(dev, ring, MI_FLUSH_DW); @@ -818,11 +803,11 @@ static void gen6_bsd_ring_flush(struct drm_device *dev, } static int -gen6_bsd_ring_dispatch_gem_execbuffer(struct drm_device *dev, - struct intel_ring_buffer *ring, - struct drm_i915_gem_execbuffer2 *exec, - struct drm_clip_rect *cliprects, - uint64_t exec_offset) +gen6_ring_dispatch_gem_execbuffer(struct drm_device *dev, + struct intel_ring_buffer *ring, + struct drm_i915_gem_execbuffer2 *exec, + struct drm_clip_rect *cliprects, + uint64_t exec_offset) { uint32_t exec_start; @@ -845,13 +830,43 @@ static const struct intel_ring_buffer gen6_bsd_ring = { .mmio_base = GEN6_BSD_RING_BASE, .size = 32 * PAGE_SIZE, .init = init_bsd_ring, - .set_tail = gen6_bsd_ring_set_tail, - .flush = gen6_bsd_ring_flush, - .add_request = bsd_ring_add_request, - .get_seqno = bsd_ring_get_seqno, + .write_tail = gen6_bsd_ring_write_tail, + .flush = gen6_ring_flush, + .add_request = ring_add_request, + .get_seqno = ring_status_page_get_seqno, .user_irq_get = bsd_ring_get_user_irq, .user_irq_put = bsd_ring_put_user_irq, - .dispatch_gem_execbuffer = gen6_bsd_ring_dispatch_gem_execbuffer, + .dispatch_gem_execbuffer = gen6_ring_dispatch_gem_execbuffer, +}; + +/* Blitter support (SandyBridge+) */ + +static void +blt_ring_get_user_irq(struct drm_device *dev, + struct intel_ring_buffer *ring) +{ + /* do nothing */ +} +static void +blt_ring_put_user_irq(struct drm_device *dev, + struct intel_ring_buffer *ring) +{ + /* do nothing */ +} + +static const struct intel_ring_buffer gen6_blt_ring = { + .name = "blt ring", + .id = RING_BLT, + .mmio_base = BLT_RING_BASE, + .size = 32 * PAGE_SIZE, + .init = init_ring_common, + .write_tail = ring_write_tail, + .flush = gen6_ring_flush, + .add_request = ring_add_request, + .get_seqno = ring_status_page_get_seqno, + .user_irq_get = blt_ring_get_user_irq, + .user_irq_put = blt_ring_put_user_irq, + .dispatch_gem_execbuffer = gen6_ring_dispatch_gem_execbuffer, }; int intel_init_render_ring_buffer(struct drm_device *dev) @@ -881,3 +896,12 @@ int intel_init_bsd_ring_buffer(struct drm_device *dev) return intel_init_ring_buffer(dev, &dev_priv->bsd_ring); } + +int intel_init_blt_ring_buffer(struct drm_device *dev) +{ + drm_i915_private_t *dev_priv = dev->dev_private; + + dev_priv->blt_ring = gen6_blt_ring; + + return intel_init_ring_buffer(dev, &dev_priv->blt_ring); +} diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.h b/drivers/gpu/drm/i915/intel_ringbuffer.h index 9725f783db20..a05aff0e5764 100644 --- a/drivers/gpu/drm/i915/intel_ringbuffer.h +++ b/drivers/gpu/drm/i915/intel_ringbuffer.h @@ -22,6 +22,7 @@ struct intel_ring_buffer { enum intel_ring_id { RING_RENDER = 0x1, RING_BSD = 0x2, + RING_BLT = 0x4, } id; u32 mmio_base; unsigned long size; @@ -45,9 +46,9 @@ struct intel_ring_buffer { int (*init)(struct drm_device *dev, struct intel_ring_buffer *ring); - void (*set_tail)(struct drm_device *dev, - struct intel_ring_buffer *ring, - u32 value); + void (*write_tail)(struct drm_device *dev, + struct intel_ring_buffer *ring, + u32 value); void (*flush)(struct drm_device *dev, struct intel_ring_buffer *ring, u32 invalidate_domains, @@ -82,6 +83,15 @@ struct intel_ring_buffer { struct list_head request_list; /** + * List of objects currently pending a GPU write flush. + * + * All elements on this list will belong to either the + * active_list or flushing_list, last_rendering_seqno can + * be used to differentiate between the two elements. + */ + struct list_head gpu_write_list; + + /** * Do we have some not yet emitted requests outstanding? */ bool outstanding_lazy_request; @@ -116,10 +126,6 @@ static inline void intel_ring_emit(struct drm_device *dev, ring->tail += 4; } -void intel_fill_struct(struct drm_device *dev, - struct intel_ring_buffer *ring, - void *data, - unsigned int len); void intel_ring_advance(struct drm_device *dev, struct intel_ring_buffer *ring); @@ -128,6 +134,7 @@ u32 intel_ring_get_seqno(struct drm_device *dev, int intel_init_render_ring_buffer(struct drm_device *dev); int intel_init_bsd_ring_buffer(struct drm_device *dev); +int intel_init_blt_ring_buffer(struct drm_device *dev); u32 intel_ring_get_active_head(struct drm_device *dev, struct intel_ring_buffer *ring); diff --git a/drivers/gpu/drm/i915/intel_sdvo.c b/drivers/gpu/drm/i915/intel_sdvo.c index a84224f37605..de158b76bcd5 100644 --- a/drivers/gpu/drm/i915/intel_sdvo.c +++ b/drivers/gpu/drm/i915/intel_sdvo.c @@ -107,6 +107,7 @@ struct intel_sdvo { * This is set if we treat the device as HDMI, instead of DVI. */ bool is_hdmi; + bool has_audio; /** * This is set if we detect output of sdvo device as LVDS and @@ -119,12 +120,6 @@ struct intel_sdvo { */ struct drm_display_mode *sdvo_lvds_fixed_mode; - /* - * supported encoding mode, used to determine whether HDMI is - * supported - */ - struct intel_sdvo_encode encode; - /* DDC bus used by this SDVO encoder */ uint8_t ddc_bus; @@ -138,11 +133,15 @@ struct intel_sdvo_connector { /* Mark the type of connector */ uint16_t output_flag; + int force_audio; + /* This contains all current supported TV format */ u8 tv_format_supported[TV_FORMAT_NUM]; int format_supported_num; struct drm_property *tv_format; + struct drm_property *force_audio_property; + /* add the property for the SDVO-TV */ struct drm_property *left; struct drm_property *right; @@ -794,17 +793,13 @@ static void intel_sdvo_get_mode_from_dtd(struct drm_display_mode * mode, mode->flags |= DRM_MODE_FLAG_PVSYNC; } -static bool intel_sdvo_get_supp_encode(struct intel_sdvo *intel_sdvo, - struct intel_sdvo_encode *encode) +static bool intel_sdvo_check_supp_encode(struct intel_sdvo *intel_sdvo) { - if (intel_sdvo_get_value(intel_sdvo, - SDVO_CMD_GET_SUPP_ENCODE, - encode, sizeof(*encode))) - return true; + struct intel_sdvo_encode encode; - /* non-support means DVI */ - memset(encode, 0, sizeof(*encode)); - return false; + return intel_sdvo_get_value(intel_sdvo, + SDVO_CMD_GET_SUPP_ENCODE, + &encode, sizeof(encode)); } static bool intel_sdvo_set_encode(struct intel_sdvo *intel_sdvo, @@ -849,115 +844,33 @@ static void intel_sdvo_dump_hdmi_buf(struct intel_sdvo *intel_sdvo) } #endif -static bool intel_sdvo_set_hdmi_buf(struct intel_sdvo *intel_sdvo, - int index, - uint8_t *data, int8_t size, uint8_t tx_rate) -{ - uint8_t set_buf_index[2]; - - set_buf_index[0] = index; - set_buf_index[1] = 0; - - if (!intel_sdvo_write_cmd(intel_sdvo, SDVO_CMD_SET_HBUF_INDEX, - set_buf_index, 2)) - return false; - - for (; size > 0; size -= 8) { - if (!intel_sdvo_write_cmd(intel_sdvo, SDVO_CMD_SET_HBUF_DATA, data, 8)) - return false; - - data += 8; - } - - return intel_sdvo_write_cmd(intel_sdvo, SDVO_CMD_SET_HBUF_TXRATE, &tx_rate, 1); -} - -static uint8_t intel_sdvo_calc_hbuf_csum(uint8_t *data, uint8_t size) -{ - uint8_t csum = 0; - int i; - - for (i = 0; i < size; i++) - csum += data[i]; - - return 0x100 - csum; -} - -#define DIP_TYPE_AVI 0x82 -#define DIP_VERSION_AVI 0x2 -#define DIP_LEN_AVI 13 - -struct dip_infoframe { - uint8_t type; - uint8_t version; - uint8_t len; - uint8_t checksum; - union { - struct { - /* Packet Byte #1 */ - uint8_t S:2; - uint8_t B:2; - uint8_t A:1; - uint8_t Y:2; - uint8_t rsvd1:1; - /* Packet Byte #2 */ - uint8_t R:4; - uint8_t M:2; - uint8_t C:2; - /* Packet Byte #3 */ - uint8_t SC:2; - uint8_t Q:2; - uint8_t EC:3; - uint8_t ITC:1; - /* Packet Byte #4 */ - uint8_t VIC:7; - uint8_t rsvd2:1; - /* Packet Byte #5 */ - uint8_t PR:4; - uint8_t rsvd3:4; - /* Packet Byte #6~13 */ - uint16_t top_bar_end; - uint16_t bottom_bar_start; - uint16_t left_bar_end; - uint16_t right_bar_start; - } avi; - struct { - /* Packet Byte #1 */ - uint8_t channel_count:3; - uint8_t rsvd1:1; - uint8_t coding_type:4; - /* Packet Byte #2 */ - uint8_t sample_size:2; /* SS0, SS1 */ - uint8_t sample_frequency:3; - uint8_t rsvd2:3; - /* Packet Byte #3 */ - uint8_t coding_type_private:5; - uint8_t rsvd3:3; - /* Packet Byte #4 */ - uint8_t channel_allocation; - /* Packet Byte #5 */ - uint8_t rsvd4:3; - uint8_t level_shift:4; - uint8_t downmix_inhibit:1; - } audio; - uint8_t payload[28]; - } __attribute__ ((packed)) u; -} __attribute__((packed)); - -static bool intel_sdvo_set_avi_infoframe(struct intel_sdvo *intel_sdvo, - struct drm_display_mode * mode) +static bool intel_sdvo_set_avi_infoframe(struct intel_sdvo *intel_sdvo) { struct dip_infoframe avi_if = { .type = DIP_TYPE_AVI, - .version = DIP_VERSION_AVI, + .ver = DIP_VERSION_AVI, .len = DIP_LEN_AVI, }; + uint8_t tx_rate = SDVO_HBUF_TX_VSYNC; + uint8_t set_buf_index[2] = { 1, 0 }; + uint64_t *data = (uint64_t *)&avi_if; + unsigned i; + + intel_dip_infoframe_csum(&avi_if); + + if (!intel_sdvo_write_cmd(intel_sdvo, SDVO_CMD_SET_HBUF_INDEX, + set_buf_index, 2)) + return false; - avi_if.checksum = intel_sdvo_calc_hbuf_csum((uint8_t *)&avi_if, - 4 + avi_if.len); - return intel_sdvo_set_hdmi_buf(intel_sdvo, 1, (uint8_t *)&avi_if, - 4 + avi_if.len, - SDVO_HBUF_TX_VSYNC); + for (i = 0; i < sizeof(avi_if); i += 8) { + if (!intel_sdvo_write_cmd(intel_sdvo, SDVO_CMD_SET_HBUF_DATA, + data, 8)) + return false; + data++; + } + + return intel_sdvo_write_cmd(intel_sdvo, SDVO_CMD_SET_HBUF_TXRATE, + &tx_rate, 1); } static bool intel_sdvo_set_tv_format(struct intel_sdvo *intel_sdvo) @@ -1111,7 +1024,7 @@ static void intel_sdvo_mode_set(struct drm_encoder *encoder, return; if (intel_sdvo->is_hdmi && - !intel_sdvo_set_avi_infoframe(intel_sdvo, mode)) + !intel_sdvo_set_avi_infoframe(intel_sdvo)) return; if (intel_sdvo->is_tv && @@ -1150,7 +1063,7 @@ static void intel_sdvo_mode_set(struct drm_encoder *encoder, } if (intel_crtc->pipe == 1) sdvox |= SDVO_PIPE_B_SELECT; - if (intel_sdvo->is_hdmi) + if (intel_sdvo->has_audio) sdvox |= SDVO_AUDIO_ENABLE; if (INTEL_INFO(dev)->gen >= 4) { @@ -1476,11 +1389,18 @@ intel_sdvo_hdmi_sink_detect(struct drm_connector *connector) if (edid->input & DRM_EDID_INPUT_DIGITAL) { status = connector_status_connected; intel_sdvo->is_hdmi = drm_detect_hdmi_monitor(edid); + intel_sdvo->has_audio = drm_detect_monitor_audio(edid); } connector->display_info.raw_edid = NULL; kfree(edid); } - + + if (status == connector_status_connected) { + struct intel_sdvo_connector *intel_sdvo_connector = to_intel_sdvo_connector(connector); + if (intel_sdvo_connector->force_audio) + intel_sdvo->has_audio = intel_sdvo_connector->force_audio > 0; + } + return status; } @@ -1787,6 +1707,21 @@ intel_sdvo_set_property(struct drm_connector *connector, if (ret) return ret; + if (property == intel_sdvo_connector->force_audio_property) { + if (val == intel_sdvo_connector->force_audio) + return 0; + + intel_sdvo_connector->force_audio = val; + + if (val > 0 && intel_sdvo->has_audio) + return 0; + if (val < 0 && !intel_sdvo->has_audio) + return 0; + + intel_sdvo->has_audio = val > 0; + goto done; + } + #define CHECK_PROPERTY(name, NAME) \ if (intel_sdvo_connector->name == property) { \ if (intel_sdvo_connector->cur_##name == temp_value) return 0; \ @@ -2013,12 +1948,22 @@ intel_sdvo_select_i2c_bus(struct drm_i915_private *dev_priv, } static bool -intel_sdvo_get_digital_encoding_mode(struct intel_sdvo *intel_sdvo, int device) +intel_sdvo_is_hdmi_connector(struct intel_sdvo *intel_sdvo, int device) { - return intel_sdvo_set_target_output(intel_sdvo, - device == 0 ? SDVO_OUTPUT_TMDS0 : SDVO_OUTPUT_TMDS1) && - intel_sdvo_get_value(intel_sdvo, SDVO_CMD_GET_ENCODE, - &intel_sdvo->is_hdmi, 1); + int is_hdmi; + + if (!intel_sdvo_check_supp_encode(intel_sdvo)) + return false; + + if (!intel_sdvo_set_target_output(intel_sdvo, + device == 0 ? SDVO_OUTPUT_TMDS0 : SDVO_OUTPUT_TMDS1)) + return false; + + is_hdmi = 0; + if (!intel_sdvo_get_value(intel_sdvo, SDVO_CMD_GET_ENCODE, &is_hdmi, 1)) + return false; + + return !!is_hdmi; } static u8 @@ -2078,6 +2023,21 @@ intel_sdvo_connector_init(struct intel_sdvo_connector *connector, drm_sysfs_connector_add(&connector->base.base); } +static void +intel_sdvo_add_hdmi_properties(struct intel_sdvo_connector *connector) +{ + struct drm_device *dev = connector->base.base.dev; + + connector->force_audio_property = + drm_property_create(dev, DRM_MODE_PROP_RANGE, "force_audio", 2); + if (connector->force_audio_property) { + connector->force_audio_property->values[0] = -1; + connector->force_audio_property->values[1] = 1; + drm_connector_attach_property(&connector->base.base, + connector->force_audio_property, 0); + } +} + static bool intel_sdvo_dvi_init(struct intel_sdvo *intel_sdvo, int device) { @@ -2104,20 +2064,21 @@ intel_sdvo_dvi_init(struct intel_sdvo *intel_sdvo, int device) encoder->encoder_type = DRM_MODE_ENCODER_TMDS; connector->connector_type = DRM_MODE_CONNECTOR_DVID; - if (intel_sdvo_get_supp_encode(intel_sdvo, &intel_sdvo->encode) - && intel_sdvo_get_digital_encoding_mode(intel_sdvo, device) - && intel_sdvo->is_hdmi) { + if (intel_sdvo_is_hdmi_connector(intel_sdvo, device)) { /* enable hdmi encoding mode if supported */ intel_sdvo_set_encode(intel_sdvo, SDVO_ENCODE_HDMI); intel_sdvo_set_colorimetry(intel_sdvo, SDVO_COLORIMETRY_RGB256); connector->connector_type = DRM_MODE_CONNECTOR_HDMIA; + intel_sdvo->is_hdmi = true; } intel_sdvo->base.clone_mask = ((1 << INTEL_SDVO_NON_TV_CLONE_BIT) | (1 << INTEL_ANALOG_CLONE_BIT)); intel_sdvo_connector_init(intel_sdvo_connector, intel_sdvo); + intel_sdvo_add_hdmi_properties(intel_sdvo_connector); + return true; } |