From 1fd1c624362819ecc36db2458c6a972c48ae92d6 Mon Sep 17 00:00:00 2001 From: Eric Anholt Date: Wed, 3 Jun 2009 07:26:58 +0000 Subject: drm/i915: Save/restore cursor state on suspend/resume. This may fix cursor corruption in X on resume, which would persist until the cursor was hidden and then shown again. V2: Also include the cursor control regs. Signed-off-by: Eric Anholt Reviewed-by: Jesse Barnes --- drivers/gpu/drm/i915/i915_drv.h | 8 ++++++++ 1 file changed, 8 insertions(+) (limited to 'drivers/gpu/drm/i915/i915_drv.h') diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index c431fa54bbb5..fcaa5444daa0 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h @@ -285,6 +285,13 @@ typedef struct drm_i915_private { u8 saveDACMASK; u8 saveCR[37]; uint64_t saveFENCE[16]; + u32 saveCURACNTR; + u32 saveCURAPOS; + u32 saveCURABASE; + u32 saveCURBCNTR; + u32 saveCURBPOS; + u32 saveCURBBASE; + u32 saveCURSIZE; struct { struct drm_mm gtt_space; @@ -642,6 +649,7 @@ void i915_gem_detach_phys_object(struct drm_device *dev, void i915_gem_free_all_phys_object(struct drm_device *dev); int i915_gem_object_get_pages(struct drm_gem_object *obj); void i915_gem_object_put_pages(struct drm_gem_object *obj); +void i915_gem_release(struct drm_device * dev, struct drm_file *file_priv); /* i915_gem_tiling.c */ void i915_gem_detect_bit_6_swizzle(struct drm_device *dev); -- cgit v1.2.3 From b962442e46a9340bdbc6711982c59ff0cc2b5afb Mon Sep 17 00:00:00 2001 From: Eric Anholt Date: Wed, 3 Jun 2009 07:27:35 +0000 Subject: drm/i915: Change GEM throttling to be 20ms like the comment says. keithp didn't like the original 20ms plan because a cooperative client could be starved by an uncooperative client. There may even have been problems with cooperative clients versus cooperative clients. So keithp changed throttle to just wait for the second to last seqno emitted by that client. It worked well, until we started getting more round-trips to the server due to DRI2 -- the server throttles in BlockHandler, and so if you did more than one round trip after finishing your frame, you'd end up unintentionally syncing to the swap. Fix this by keeping track of the client's requests, so the client can wait when it has an outstanding request over 20ms old. This should have non-starving behavior, good behavior in the presence of restarts, and less waiting. Improves high-settings openarena performance on my GM45 by 50%. Signed-off-by: Eric Anholt Reviewed-by: Jesse Barnes --- drivers/gpu/drm/i915/i915_dma.c | 4 +-- drivers/gpu/drm/i915/i915_drv.h | 7 +++-- drivers/gpu/drm/i915/i915_gem.c | 69 ++++++++++++++++++++++++++++++++--------- 3 files changed, 61 insertions(+), 19 deletions(-) (limited to 'drivers/gpu/drm/i915/i915_drv.h') diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c index 53d544552625..0c222c28b8c1 100644 --- a/drivers/gpu/drm/i915/i915_dma.c +++ b/drivers/gpu/drm/i915/i915_dma.c @@ -1273,8 +1273,7 @@ int i915_driver_open(struct drm_device *dev, struct drm_file *file_priv) file_priv->driver_priv = i915_file_priv; - i915_file_priv->mm.last_gem_seqno = 0; - i915_file_priv->mm.last_gem_throttle_seqno = 0; + INIT_LIST_HEAD(&i915_file_priv->mm.request_list); return 0; } @@ -1311,6 +1310,7 @@ void i915_driver_lastclose(struct drm_device * dev) void i915_driver_preclose(struct drm_device * dev, struct drm_file *file_priv) { drm_i915_private_t *dev_priv = dev->dev_private; + i915_gem_release(dev, file_priv); if (!drm_core_check_feature(dev, DRIVER_MODESET)) i915_mem_release(dev, file_priv, dev_priv->agp_heap); } diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index fcaa5444daa0..e0fac5f62c69 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h @@ -498,13 +498,16 @@ struct drm_i915_gem_request { /** Time at which this request was emitted, in jiffies. */ unsigned long emitted_jiffies; + /** global list entry for this request */ struct list_head list; + + /** file_priv list entry for this request */ + struct list_head client_list; }; struct drm_i915_file_private { struct { - uint32_t last_gem_seqno; - uint32_t last_gem_throttle_seqno; + struct list_head request_list; } mm; }; diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index 39f5c658ef5e..3fbd8a0c40d1 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c @@ -1481,14 +1481,19 @@ i915_gem_object_move_to_inactive(struct drm_gem_object *obj) * Returned sequence numbers are nonzero on success. */ static uint32_t -i915_add_request(struct drm_device *dev, uint32_t flush_domains) +i915_add_request(struct drm_device *dev, struct drm_file *file_priv, + uint32_t flush_domains) { drm_i915_private_t *dev_priv = dev->dev_private; + struct drm_i915_file_private *i915_file_priv = NULL; struct drm_i915_gem_request *request; uint32_t seqno; int was_empty; RING_LOCALS; + if (file_priv != NULL) + i915_file_priv = file_priv->driver_priv; + request = drm_calloc(1, sizeof(*request), DRM_MEM_DRIVER); if (request == NULL) return 0; @@ -1515,6 +1520,12 @@ i915_add_request(struct drm_device *dev, uint32_t flush_domains) request->emitted_jiffies = jiffies; was_empty = list_empty(&dev_priv->mm.request_list); list_add_tail(&request->list, &dev_priv->mm.request_list); + if (i915_file_priv) { + list_add_tail(&request->client_list, + &i915_file_priv->mm.request_list); + } else { + INIT_LIST_HEAD(&request->client_list); + } /* Associate any objects on the flushing list matching the write * domain we're flushing with our flush. @@ -1664,6 +1675,7 @@ i915_gem_retire_requests(struct drm_device *dev) i915_gem_retire_request(dev, request); list_del(&request->list); + list_del(&request->client_list); drm_free(request, sizeof(*request), DRM_MEM_DRIVER); } else break; @@ -1977,7 +1989,7 @@ i915_gem_evict_something(struct drm_device *dev) i915_gem_flush(dev, obj->write_domain, obj->write_domain); - i915_add_request(dev, obj->write_domain); + i915_add_request(dev, NULL, obj->write_domain); obj = NULL; continue; @@ -2248,7 +2260,7 @@ try_again: i915_gem_flush(dev, I915_GEM_GPU_DOMAINS, I915_GEM_GPU_DOMAINS); - seqno = i915_add_request(dev, + seqno = i915_add_request(dev, NULL, I915_GEM_GPU_DOMAINS); if (seqno == 0) return -ENOMEM; @@ -2452,7 +2464,7 @@ i915_gem_object_flush_gpu_write_domain(struct drm_gem_object *obj) /* Queue the GPU write cache flushing we need. */ i915_gem_flush(dev, 0, obj->write_domain); - seqno = i915_add_request(dev, obj->write_domain); + seqno = i915_add_request(dev, NULL, obj->write_domain); obj->write_domain = 0; i915_gem_object_move_to_active(obj, seqno); } @@ -3089,6 +3101,10 @@ i915_dispatch_gem_execbuffer(struct drm_device *dev, /* Throttle our rendering by waiting until the ring has completed our requests * emitted over 20 msec ago. * + * Note that if we were to use the current jiffies each time around the loop, + * we wouldn't escape the function with any frames outstanding if the time to + * render a frame was over 20ms. + * * This should get us reasonable parallelism between CPU and GPU but also * relatively low latency when blocking on a particular request to finish. */ @@ -3097,15 +3113,25 @@ i915_gem_ring_throttle(struct drm_device *dev, struct drm_file *file_priv) { struct drm_i915_file_private *i915_file_priv = file_priv->driver_priv; int ret = 0; - uint32_t seqno; + unsigned long recent_enough = jiffies - msecs_to_jiffies(20); mutex_lock(&dev->struct_mutex); - seqno = i915_file_priv->mm.last_gem_throttle_seqno; - i915_file_priv->mm.last_gem_throttle_seqno = - i915_file_priv->mm.last_gem_seqno; - if (seqno) - ret = i915_wait_request(dev, seqno); + while (!list_empty(&i915_file_priv->mm.request_list)) { + struct drm_i915_gem_request *request; + + request = list_first_entry(&i915_file_priv->mm.request_list, + struct drm_i915_gem_request, + client_list); + + if (time_after_eq(request->emitted_jiffies, recent_enough)) + break; + + ret = i915_wait_request(dev, request->seqno); + if (ret != 0) + break; + } mutex_unlock(&dev->struct_mutex); + return ret; } @@ -3187,7 +3213,6 @@ i915_gem_execbuffer(struct drm_device *dev, void *data, struct drm_file *file_priv) { drm_i915_private_t *dev_priv = dev->dev_private; - struct drm_i915_file_private *i915_file_priv = file_priv->driver_priv; struct drm_i915_gem_execbuffer *args = data; struct drm_i915_gem_exec_object *exec_list = NULL; struct drm_gem_object **object_list = NULL; @@ -3363,7 +3388,8 @@ i915_gem_execbuffer(struct drm_device *dev, void *data, dev->invalidate_domains, dev->flush_domains); if (dev->flush_domains) - (void)i915_add_request(dev, dev->flush_domains); + (void)i915_add_request(dev, file_priv, + dev->flush_domains); } for (i = 0; i < args->buffer_count; i++) { @@ -3412,9 +3438,8 @@ i915_gem_execbuffer(struct drm_device *dev, void *data, * *some* interrupts representing completion of buffers that we can * wait on when trying to clear up gtt space). */ - seqno = i915_add_request(dev, flush_domains); + seqno = i915_add_request(dev, file_priv, flush_domains); BUG_ON(seqno == 0); - i915_file_priv->mm.last_gem_seqno = seqno; for (i = 0; i < args->buffer_count; i++) { struct drm_gem_object *obj = object_list[i]; @@ -3802,7 +3827,7 @@ i915_gem_idle(struct drm_device *dev) */ i915_gem_flush(dev, ~(I915_GEM_DOMAIN_CPU|I915_GEM_DOMAIN_GTT), ~(I915_GEM_DOMAIN_CPU|I915_GEM_DOMAIN_GTT)); - seqno = i915_add_request(dev, ~I915_GEM_DOMAIN_CPU); + seqno = i915_add_request(dev, NULL, ~I915_GEM_DOMAIN_CPU); if (seqno == 0) { mutex_unlock(&dev->struct_mutex); @@ -4352,3 +4377,17 @@ i915_gem_phys_pwrite(struct drm_device *dev, struct drm_gem_object *obj, drm_agp_chipset_flush(dev); return 0; } + +void i915_gem_release(struct drm_device * dev, struct drm_file *file_priv) +{ + struct drm_i915_file_private *i915_file_priv = file_priv->driver_priv; + + /* Clean up our request list when the client is going away, so that + * later retire_requests won't dereference our soon-to-be-gone + * file_priv. + */ + mutex_lock(&dev->struct_mutex); + while (!list_empty(&i915_file_priv->mm.request_list)) + list_del_init(i915_file_priv->mm.request_list.next); + mutex_unlock(&dev->struct_mutex); +} -- cgit v1.2.3 From 280da227c870a50f669de0c8d46bfb2c62da9995 Mon Sep 17 00:00:00 2001 From: Zhenyu Wang Date: Fri, 5 Jun 2009 15:38:37 +0800 Subject: drm/i915: Add chipset/feature defines for for new chipsets Signed-off-by: Zhenyu Wang [anholt: dropped drm_pciids.h hunk to avoid loading an incomplete driver] Signed-off-by: Eric Anholt --- drivers/gpu/drm/i915/i915_drv.h | 18 +++++++++++++----- 1 file changed, 13 insertions(+), 5 deletions(-) (limited to 'drivers/gpu/drm/i915/i915_drv.h') diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index e0fac5f62c69..ded9e786883e 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h @@ -796,7 +796,9 @@ extern int i915_wait_ring(struct drm_device * dev, int n, const char *caller); (dev)->pci_device == 0x2E02 || \ (dev)->pci_device == 0x2E12 || \ (dev)->pci_device == 0x2E22 || \ - (dev)->pci_device == 0x2E32) + (dev)->pci_device == 0x2E32 || \ + (dev)->pci_device == 0x0042 || \ + (dev)->pci_device == 0x0046) #define IS_I965GM(dev) ((dev)->pci_device == 0x2A02 || \ (dev)->pci_device == 0x2A12) @@ -818,20 +820,26 @@ extern int i915_wait_ring(struct drm_device * dev, int n, const char *caller); (dev)->pci_device == 0x29D2 || \ (IS_IGD(dev))) +#define IS_IGDNG_D(dev) ((dev)->pci_device == 0x0042) +#define IS_IGDNG_M(dev) ((dev)->pci_device == 0x0046) +#define IS_IGDNG(dev) (IS_IGDNG_D(dev) || IS_IGDNG_M(dev)) + #define IS_I9XX(dev) (IS_I915G(dev) || IS_I915GM(dev) || IS_I945G(dev) || \ - IS_I945GM(dev) || IS_I965G(dev) || IS_G33(dev)) + IS_I945GM(dev) || IS_I965G(dev) || IS_G33(dev) || \ + IS_IGDNG(dev)) #define IS_MOBILE(dev) (IS_I830(dev) || IS_I85X(dev) || IS_I915GM(dev) || \ IS_I945GM(dev) || IS_I965GM(dev) || IS_GM45(dev) || \ - IS_IGD(dev)) + IS_IGD(dev) || IS_IGDNG_M(dev)) -#define I915_NEED_GFX_HWS(dev) (IS_G33(dev) || IS_GM45(dev) || IS_G4X(dev)) +#define I915_NEED_GFX_HWS(dev) (IS_G33(dev) || IS_GM45(dev) || IS_G4X(dev) || \ + IS_IGDNG(dev)) /* With the 945 and later, Y tiling got adjusted so that it was 32 128-byte * rows, which changed the alignment requirements and fence programming. */ #define HAS_128_BYTE_Y_TILING(dev) (IS_I9XX(dev) && !(IS_I915G(dev) || \ IS_I915GM(dev))) -#define SUPPORTS_INTEGRATED_HDMI(dev) (IS_G4X(dev)) +#define SUPPORTS_INTEGRATED_HDMI(dev) (IS_G4X(dev) || IS_IGDNG(dev)) #define I915_HAS_HOTPLUG(dev) (IS_I945G(dev) || IS_I945GM(dev) || IS_I965G(dev)) #define PRIMARY_RINGBUFFER_SIZE (128*1024) -- cgit v1.2.3 From 9b9d172d06b0f2d51cc9431e2c6c3055f0cf10ef Mon Sep 17 00:00:00 2001 From: yakui_zhao Date: Sun, 31 May 2009 17:17:17 +0800 Subject: drm/i915: parse VBT general definition block to get the SDVO device info The general definition block contains the child device tables, which include the SDVO device info. For example: device slave address, device dvo port, device type. We will get the info of SDVO device by parsing the general definition blocks. Only when a valid slave address is found, it is regarded as the SDVO device. And the info of DVO port and slave address is recorded. http://bugs.freedesktop.org/show_bug.cgi?id=20429 Signed-off-by: Zhao Yakui Signed-off-by: Eric Anholt --- drivers/gpu/drm/i915/i915_drv.h | 8 ++++ drivers/gpu/drm/i915/intel_bios.c | 86 ++++++++++++++++++++++++++++++++++++++- 2 files changed, 93 insertions(+), 1 deletion(-) (limited to 'drivers/gpu/drm/i915/i915_drv.h') diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index ded9e786883e..db81f5513daa 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h @@ -126,6 +126,13 @@ struct drm_i915_fence_reg { struct drm_gem_object *obj; }; +struct sdvo_device_mapping { + u8 dvo_port; + u8 slave_addr; + u8 dvo_wiring; + u8 initialized; +}; + typedef struct drm_i915_private { struct drm_device *dev; @@ -389,6 +396,7 @@ typedef struct drm_i915_private { /* storage for physical objects */ struct drm_i915_gem_phys_object *phys_objs[I915_MAX_PHYS_OBJECT]; } mm; + struct sdvo_device_mapping sdvo_mappings[2]; } drm_i915_private_t; /** driver private structure attached to each drm_gem_object */ diff --git a/drivers/gpu/drm/i915/intel_bios.c b/drivers/gpu/drm/i915/intel_bios.c index 9d78cff33b24..754dd22fdd77 100644 --- a/drivers/gpu/drm/i915/intel_bios.c +++ b/drivers/gpu/drm/i915/intel_bios.c @@ -30,6 +30,8 @@ #include "i915_drv.h" #include "intel_bios.h" +#define SLAVE_ADDR1 0x70 +#define SLAVE_ADDR2 0x72 static void * find_section(struct bdb_header *bdb, int section_id) @@ -193,6 +195,88 @@ parse_general_features(struct drm_i915_private *dev_priv, } } +static void +parse_sdvo_device_mapping(struct drm_i915_private *dev_priv, + struct bdb_header *bdb) +{ + struct sdvo_device_mapping *p_mapping; + struct bdb_general_definitions *p_defs; + struct child_device_config *p_child; + int i, child_device_num, count; + u16 block_size, *block_ptr; + + p_defs = find_section(bdb, BDB_GENERAL_DEFINITIONS); + if (!p_defs) { + DRM_DEBUG("No general definition block is found\n"); + return; + } + /* judge whether the size of child device meets the requirements. + * If the child device size obtained from general definition block + * is different with sizeof(struct child_device_config), skip the + * parsing of sdvo device info + */ + if (p_defs->child_dev_size != sizeof(*p_child)) { + /* different child dev size . Ignore it */ + DRM_DEBUG("different child size is found. Invalid.\n"); + return; + } + /* get the block size of general definitions */ + block_ptr = (u16 *)((char *)p_defs - 2); + block_size = *block_ptr; + /* get the number of child device */ + child_device_num = (block_size - sizeof(*p_defs)) / + sizeof(*p_child); + count = 0; + for (i = 0; i < child_device_num; i++) { + p_child = &(p_defs->devices[i]); + if (!p_child->device_type) { + /* skip the device block if device type is invalid */ + continue; + } + if (p_child->slave_addr != SLAVE_ADDR1 && + p_child->slave_addr != SLAVE_ADDR2) { + /* + * If the slave address is neither 0x70 nor 0x72, + * it is not a SDVO device. Skip it. + */ + continue; + } + if (p_child->dvo_port != DEVICE_PORT_DVOB && + p_child->dvo_port != DEVICE_PORT_DVOC) { + /* skip the incorrect SDVO port */ + DRM_DEBUG("Incorrect SDVO port. Skip it \n"); + continue; + } + DRM_DEBUG("the SDVO device with slave addr %2x is found on " + "%s port\n", + p_child->slave_addr, + (p_child->dvo_port == DEVICE_PORT_DVOB) ? + "SDVOB" : "SDVOC"); + p_mapping = &(dev_priv->sdvo_mappings[p_child->dvo_port - 1]); + if (!p_mapping->initialized) { + p_mapping->dvo_port = p_child->dvo_port; + p_mapping->slave_addr = p_child->slave_addr; + p_mapping->dvo_wiring = p_child->dvo_wiring; + p_mapping->initialized = 1; + } else { + DRM_DEBUG("Maybe one SDVO port is shared by " + "two SDVO device.\n"); + } + if (p_child->slave2_addr) { + /* Maybe this is a SDVO device with multiple inputs */ + /* And the mapping info is not added */ + DRM_DEBUG("there exists the slave2_addr. Maybe this " + "is a SDVO device with multiple inputs.\n"); + } + count++; + } + + if (!count) { + /* No SDVO device info is found */ + DRM_DEBUG("No SDVO device info is found in VBT\n"); + } + return; +} /** * intel_init_bios - initialize VBIOS settings & find VBT * @dev: DRM device @@ -242,7 +326,7 @@ intel_init_bios(struct drm_device *dev) parse_general_features(dev_priv, bdb); parse_lfp_panel_data(dev_priv, bdb); parse_sdvo_panel_data(dev_priv, bdb); - + parse_sdvo_device_mapping(dev_priv, bdb); pci_unmap_rom(pdev, bios); return 0; -- cgit v1.2.3 From d765898970f35acef960581f678b9da9d5c779fa Mon Sep 17 00:00:00 2001 From: Jesse Barnes Date: Fri, 5 Jun 2009 14:41:29 +0000 Subject: drm/i915: enable MCHBAR if needed Using the new PNP resource checking code, this patch allows the i915 driver to allocate MCHBAR space if needed and use the BAR to determine current memory settings. [apw@canonical.com: moved to the new generic PNP resource interface] Signed-off-by: Jesse Barnes Signed-off-by: Andy Whitcroft Signed-off-by: Eric Anholt failure to update-index after git-am --reject to hand-apply Signed-off-by: Eric Anholt --- drivers/gpu/drm/i915/i915_drv.h | 2 + drivers/gpu/drm/i915/i915_gem_tiling.c | 145 +++++++++++++++++++++++++++++++++ 2 files changed, 147 insertions(+) (limited to 'drivers/gpu/drm/i915/i915_drv.h') diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index db81f5513daa..6a471458d61a 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h @@ -150,6 +150,8 @@ typedef struct drm_i915_private { drm_local_map_t hws_map; struct drm_gem_object *hws_obj; + struct resource mch_res; + unsigned int cpp; int back_offset; int front_offset; diff --git a/drivers/gpu/drm/i915/i915_gem_tiling.c b/drivers/gpu/drm/i915/i915_gem_tiling.c index 07d976bf4931..9a05cadaa4ad 100644 --- a/drivers/gpu/drm/i915/i915_gem_tiling.c +++ b/drivers/gpu/drm/i915/i915_gem_tiling.c @@ -25,6 +25,8 @@ * */ +#include +#include #include "linux/string.h" #include "linux/bitops.h" #include "drmP.h" @@ -81,6 +83,143 @@ * to match what the GPU expects. */ +#define MCHBAR_I915 0x44 +#define MCHBAR_I965 0x48 +#define MCHBAR_SIZE (4*4096) + +#define DEVEN_REG 0x54 +#define DEVEN_MCHBAR_EN (1 << 28) + +/* Allocate space for the MCH regs if needed, return nonzero on error */ +static int +intel_alloc_mchbar_resource(struct drm_device *dev) +{ + struct pci_dev *bridge_dev; + drm_i915_private_t *dev_priv = dev->dev_private; + int reg = IS_I965G(dev) ? MCHBAR_I965 : MCHBAR_I915; + u32 temp_lo, temp_hi = 0; + u64 mchbar_addr; + int ret = 0; + + bridge_dev = pci_get_bus_and_slot(0, PCI_DEVFN(0,0)); + if (!bridge_dev) { + DRM_DEBUG("no bridge dev?!\n"); + ret = -ENODEV; + goto out; + } + + if (IS_I965G(dev)) + pci_read_config_dword(bridge_dev, reg + 4, &temp_hi); + pci_read_config_dword(bridge_dev, reg, &temp_lo); + mchbar_addr = ((u64)temp_hi << 32) | temp_lo; + + /* If ACPI doesn't have it, assume we need to allocate it ourselves */ + if (mchbar_addr && + pnp_range_reserved(mchbar_addr, mchbar_addr + MCHBAR_SIZE)) { + ret = 0; + goto out_put; + } + + /* Get some space for it */ + ret = pci_bus_alloc_resource(bridge_dev->bus, &dev_priv->mch_res, + MCHBAR_SIZE, MCHBAR_SIZE, + PCIBIOS_MIN_MEM, + 0, pcibios_align_resource, + bridge_dev); + if (ret) { + DRM_DEBUG("failed bus alloc: %d\n", ret); + dev_priv->mch_res.start = 0; + goto out_put; + } + + if (IS_I965G(dev)) + pci_write_config_dword(bridge_dev, reg + 4, + upper_32_bits(dev_priv->mch_res.start)); + + pci_write_config_dword(bridge_dev, reg, + lower_32_bits(dev_priv->mch_res.start)); +out_put: + pci_dev_put(bridge_dev); +out: + return ret; +} + +/* Setup MCHBAR if possible, return true if we should disable it again */ +static bool +intel_setup_mchbar(struct drm_device *dev) +{ + struct pci_dev *bridge_dev; + int mchbar_reg = IS_I965G(dev) ? MCHBAR_I965 : MCHBAR_I915; + u32 temp; + bool need_disable = false, enabled; + + bridge_dev = pci_get_bus_and_slot(0, PCI_DEVFN(0,0)); + if (!bridge_dev) { + DRM_DEBUG("no bridge dev?!\n"); + goto out; + } + + if (IS_I915G(dev) || IS_I915GM(dev)) { + pci_read_config_dword(bridge_dev, DEVEN_REG, &temp); + enabled = !!(temp & DEVEN_MCHBAR_EN); + } else { + pci_read_config_dword(bridge_dev, mchbar_reg, &temp); + enabled = temp & 1; + } + + /* If it's already enabled, don't have to do anything */ + if (enabled) + goto out_put; + + if (intel_alloc_mchbar_resource(dev)) + goto out_put; + + need_disable = true; + + /* Space is allocated or reserved, so enable it. */ + if (IS_I915G(dev) || IS_I915GM(dev)) { + pci_write_config_dword(bridge_dev, DEVEN_REG, + temp | DEVEN_MCHBAR_EN); + } else { + pci_read_config_dword(bridge_dev, mchbar_reg, &temp); + pci_write_config_dword(bridge_dev, mchbar_reg, temp | 1); + } +out_put: + pci_dev_put(bridge_dev); +out: + return need_disable; +} + +static void +intel_teardown_mchbar(struct drm_device *dev, bool disable) +{ + drm_i915_private_t *dev_priv = dev->dev_private; + struct pci_dev *bridge_dev; + int mchbar_reg = IS_I965G(dev) ? MCHBAR_I965 : MCHBAR_I915; + u32 temp; + + bridge_dev = pci_get_bus_and_slot(0, PCI_DEVFN(0,0)); + if (!bridge_dev) { + DRM_DEBUG("no bridge dev?!\n"); + return; + } + + if (disable) { + if (IS_I915G(dev) || IS_I915GM(dev)) { + pci_read_config_dword(bridge_dev, DEVEN_REG, &temp); + temp &= ~DEVEN_MCHBAR_EN; + pci_write_config_dword(bridge_dev, DEVEN_REG, temp); + } else { + pci_read_config_dword(bridge_dev, mchbar_reg, &temp); + temp &= ~1; + pci_write_config_dword(bridge_dev, mchbar_reg, temp); + } + } + + if (dev_priv->mch_res.start) + release_resource(&dev_priv->mch_res); +} + /** * Detects bit 6 swizzling of address lookup between IGD access and CPU * access through main memory. @@ -91,6 +230,7 @@ i915_gem_detect_bit_6_swizzle(struct drm_device *dev) drm_i915_private_t *dev_priv = dev->dev_private; uint32_t swizzle_x = I915_BIT_6_SWIZZLE_UNKNOWN; uint32_t swizzle_y = I915_BIT_6_SWIZZLE_UNKNOWN; + bool need_disable; if (!IS_I9XX(dev)) { /* As far as we know, the 865 doesn't have these bit 6 @@ -101,6 +241,9 @@ i915_gem_detect_bit_6_swizzle(struct drm_device *dev) } else if (IS_MOBILE(dev)) { uint32_t dcc; + /* Try to make sure MCHBAR is enabled before poking at it */ + need_disable = intel_setup_mchbar(dev); + /* On mobile 9xx chipsets, channel interleave by the CPU is * determined by DCC. For single-channel, neither the CPU * nor the GPU do swizzling. For dual channel interleaved, @@ -140,6 +283,8 @@ i915_gem_detect_bit_6_swizzle(struct drm_device *dev) swizzle_x = I915_BIT_6_SWIZZLE_UNKNOWN; swizzle_y = I915_BIT_6_SWIZZLE_UNKNOWN; } + + intel_teardown_mchbar(dev, need_disable); } else { /* The 965, G33, and newer, have a very flexible memory * configuration. It will enable dual-channel mode -- cgit v1.2.3 From 036a4a7d9272582fc7370359515d807393e2f728 Mon Sep 17 00:00:00 2001 From: Zhenyu Wang Date: Mon, 8 Jun 2009 14:40:19 +0800 Subject: drm/i915: handle interrupt on new chipset Update interrupt handling methods for IGDNG with new registers for display and graphics interrupt functions. As we won't use irq-based vblank sync in dri2, so display interrupt on new chip will be used for hotplug only in future. Signed-off-by: Zhenyu Wang Signed-off-by: Eric Anholt --- drivers/gpu/drm/i915/i915_dma.c | 2 +- drivers/gpu/drm/i915/i915_drv.h | 5 ++ drivers/gpu/drm/i915/i915_gem.c | 5 +- drivers/gpu/drm/i915/i915_irq.c | 186 ++++++++++++++++++++++++++++++++++++++-- 4 files changed, 190 insertions(+), 8 deletions(-) (limited to 'drivers/gpu/drm/i915/i915_drv.h') diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c index 68e882cd9651..6bc716d13a52 100644 --- a/drivers/gpu/drm/i915/i915_dma.c +++ b/drivers/gpu/drm/i915/i915_dma.c @@ -1162,7 +1162,7 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags) dev->driver->get_vblank_counter = i915_get_vblank_counter; dev->max_vblank_count = 0xffffff; /* only 24 bits of frame count */ - if (IS_G4X(dev)) { + if (IS_G4X(dev) || IS_IGDNG(dev)) { dev->max_vblank_count = 0xffffffff; /* full 32 bit counter */ dev->driver->get_vblank_counter = gm45_get_vblank_counter; } diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index 6a471458d61a..8ef6bcec211b 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h @@ -167,6 +167,11 @@ typedef struct drm_i915_private { /** Cached value of IMR to avoid reads in updating the bitfield */ u32 irq_mask_reg; u32 pipestat[2]; + /** splitted irq regs for graphics and display engine on IGDNG, + irq_mask_reg is still used for display irq. */ + u32 gt_irq_mask_reg; + u32 gt_irq_enable_reg; + u32 de_irq_enable_reg; u32 hotplug_supported_mask; struct work_struct hotplug_work; diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index 3fbd8a0c40d1..38e0f8301a14 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c @@ -1714,7 +1714,10 @@ i915_wait_request(struct drm_device *dev, uint32_t seqno) BUG_ON(seqno == 0); if (!i915_seqno_passed(i915_get_gem_seqno(dev), seqno)) { - ier = I915_READ(IER); + if (IS_IGDNG(dev)) + ier = I915_READ(DEIER) | I915_READ(GTIER); + else + ier = I915_READ(IER); if (!ier) { DRM_ERROR("something (likely vbetool) disabled " "interrupts, re-enabling\n"); diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c index 701d6809deb7..b86b7b7130c6 100644 --- a/drivers/gpu/drm/i915/i915_irq.c +++ b/drivers/gpu/drm/i915/i915_irq.c @@ -57,6 +57,47 @@ #define DRM_I915_VBLANK_PIPE_ALL (DRM_I915_VBLANK_PIPE_A | \ DRM_I915_VBLANK_PIPE_B) +void +igdng_enable_graphics_irq(drm_i915_private_t *dev_priv, u32 mask) +{ + if ((dev_priv->gt_irq_mask_reg & mask) != 0) { + dev_priv->gt_irq_mask_reg &= ~mask; + I915_WRITE(GTIMR, dev_priv->gt_irq_mask_reg); + (void) I915_READ(GTIMR); + } +} + +static inline void +igdng_disable_graphics_irq(drm_i915_private_t *dev_priv, u32 mask) +{ + if ((dev_priv->gt_irq_mask_reg & mask) != mask) { + dev_priv->gt_irq_mask_reg |= mask; + I915_WRITE(GTIMR, dev_priv->gt_irq_mask_reg); + (void) I915_READ(GTIMR); + } +} + +/* For display hotplug interrupt */ +void +igdng_enable_display_irq(drm_i915_private_t *dev_priv, u32 mask) +{ + if ((dev_priv->irq_mask_reg & mask) != 0) { + dev_priv->irq_mask_reg &= ~mask; + I915_WRITE(DEIMR, dev_priv->irq_mask_reg); + (void) I915_READ(DEIMR); + } +} + +static inline void +igdng_disable_display_irq(drm_i915_private_t *dev_priv, u32 mask) +{ + if ((dev_priv->irq_mask_reg & mask) != mask) { + dev_priv->irq_mask_reg |= mask; + I915_WRITE(DEIMR, dev_priv->irq_mask_reg); + (void) I915_READ(DEIMR); + } +} + void i915_enable_irq(drm_i915_private_t *dev_priv, u32 mask) { @@ -196,6 +237,47 @@ static void i915_hotplug_work_func(struct work_struct *work) drm_sysfs_hotplug_event(dev); } +irqreturn_t igdng_irq_handler(struct drm_device *dev) +{ + drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; + int ret = IRQ_NONE; + u32 de_iir, gt_iir; + u32 new_de_iir, new_gt_iir; + struct drm_i915_master_private *master_priv; + + de_iir = I915_READ(DEIIR); + gt_iir = I915_READ(GTIIR); + + for (;;) { + if (de_iir == 0 && gt_iir == 0) + break; + + ret = IRQ_HANDLED; + + I915_WRITE(DEIIR, de_iir); + new_de_iir = I915_READ(DEIIR); + I915_WRITE(GTIIR, gt_iir); + new_gt_iir = I915_READ(GTIIR); + + if (dev->primary->master) { + master_priv = dev->primary->master->driver_priv; + if (master_priv->sarea_priv) + master_priv->sarea_priv->last_dispatch = + READ_BREADCRUMB(dev_priv); + } + + if (gt_iir & GT_USER_INTERRUPT) { + dev_priv->mm.irq_gem_seqno = i915_get_gem_seqno(dev); + DRM_WAKEUP(&dev_priv->irq_queue); + } + + de_iir = new_de_iir; + gt_iir = new_gt_iir; + } + + return ret; +} + irqreturn_t i915_driver_irq_handler(DRM_IRQ_ARGS) { struct drm_device *dev = (struct drm_device *) arg; @@ -212,6 +294,9 @@ irqreturn_t i915_driver_irq_handler(DRM_IRQ_ARGS) atomic_inc(&dev_priv->irq_received); + if (IS_IGDNG(dev)) + return igdng_irq_handler(dev); + iir = I915_READ(IIR); if (IS_I965G(dev)) { @@ -349,8 +434,12 @@ void i915_user_irq_get(struct drm_device *dev) unsigned long irqflags; spin_lock_irqsave(&dev_priv->user_irq_lock, irqflags); - if (dev->irq_enabled && (++dev_priv->user_irq_refcount == 1)) - i915_enable_irq(dev_priv, I915_USER_INTERRUPT); + if (dev->irq_enabled && (++dev_priv->user_irq_refcount == 1)) { + if (IS_IGDNG(dev)) + igdng_enable_graphics_irq(dev_priv, GT_USER_INTERRUPT); + else + i915_enable_irq(dev_priv, I915_USER_INTERRUPT); + } spin_unlock_irqrestore(&dev_priv->user_irq_lock, irqflags); } @@ -361,8 +450,12 @@ void i915_user_irq_put(struct drm_device *dev) spin_lock_irqsave(&dev_priv->user_irq_lock, irqflags); BUG_ON(dev->irq_enabled && dev_priv->user_irq_refcount <= 0); - if (dev->irq_enabled && (--dev_priv->user_irq_refcount == 0)) - i915_disable_irq(dev_priv, I915_USER_INTERRUPT); + if (dev->irq_enabled && (--dev_priv->user_irq_refcount == 0)) { + if (IS_IGDNG(dev)) + igdng_disable_graphics_irq(dev_priv, GT_USER_INTERRUPT); + else + i915_disable_irq(dev_priv, I915_USER_INTERRUPT); + } spin_unlock_irqrestore(&dev_priv->user_irq_lock, irqflags); } @@ -455,6 +548,9 @@ int i915_enable_vblank(struct drm_device *dev, int pipe) if (!(pipeconf & PIPEACONF_ENABLE)) return -EINVAL; + if (IS_IGDNG(dev)) + return 0; + spin_lock_irqsave(&dev_priv->user_irq_lock, irqflags); if (IS_I965G(dev)) i915_enable_pipestat(dev_priv, pipe, @@ -474,6 +570,9 @@ void i915_disable_vblank(struct drm_device *dev, int pipe) drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; unsigned long irqflags; + if (IS_IGDNG(dev)) + return; + spin_lock_irqsave(&dev_priv->user_irq_lock, irqflags); i915_disable_pipestat(dev_priv, pipe, PIPE_VBLANK_INTERRUPT_ENABLE | @@ -547,12 +646,65 @@ int i915_vblank_swap(struct drm_device *dev, void *data, /* drm_dma.h hooks */ +static void igdng_irq_preinstall(struct drm_device *dev) +{ + drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; + + I915_WRITE(HWSTAM, 0xeffe); + + /* XXX hotplug from PCH */ + + I915_WRITE(DEIMR, 0xffffffff); + I915_WRITE(DEIER, 0x0); + (void) I915_READ(DEIER); + + /* and GT */ + I915_WRITE(GTIMR, 0xffffffff); + I915_WRITE(GTIER, 0x0); + (void) I915_READ(GTIER); +} + +static int igdng_irq_postinstall(struct drm_device *dev) +{ + drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; + /* enable kind of interrupts always enabled */ + u32 display_mask = DE_MASTER_IRQ_CONTROL /*| DE_PCH_EVENT */; + u32 render_mask = GT_USER_INTERRUPT; + + dev_priv->irq_mask_reg = ~display_mask; + dev_priv->de_irq_enable_reg = display_mask; + + /* should always can generate irq */ + I915_WRITE(DEIIR, I915_READ(DEIIR)); + I915_WRITE(DEIMR, dev_priv->irq_mask_reg); + I915_WRITE(DEIER, dev_priv->de_irq_enable_reg); + (void) I915_READ(DEIER); + + /* user interrupt should be enabled, but masked initial */ + dev_priv->gt_irq_mask_reg = 0xffffffff; + dev_priv->gt_irq_enable_reg = render_mask; + + I915_WRITE(GTIIR, I915_READ(GTIIR)); + I915_WRITE(GTIMR, dev_priv->gt_irq_mask_reg); + I915_WRITE(GTIER, dev_priv->gt_irq_enable_reg); + (void) I915_READ(GTIER); + + return 0; +} + void i915_driver_irq_preinstall(struct drm_device * dev) { drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; atomic_set(&dev_priv->irq_received, 0); + INIT_WORK(&dev_priv->hotplug_work, i915_hotplug_work_func); + + if (IS_IGDNG(dev)) { + igdng_irq_preinstall(dev); + return; + } + if (I915_HAS_HOTPLUG(dev)) { I915_WRITE(PORT_HOTPLUG_EN, 0); I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT)); @@ -564,7 +716,6 @@ void i915_driver_irq_preinstall(struct drm_device * dev) I915_WRITE(IMR, 0xffffffff); I915_WRITE(IER, 0x0); (void) I915_READ(IER); - INIT_WORK(&dev_priv->hotplug_work, i915_hotplug_work_func); } int i915_driver_irq_postinstall(struct drm_device *dev) @@ -572,8 +723,13 @@ int i915_driver_irq_postinstall(struct drm_device *dev) drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; u32 enable_mask = I915_INTERRUPT_ENABLE_FIX | I915_INTERRUPT_ENABLE_VAR; + DRM_INIT_WAITQUEUE(&dev_priv->irq_queue); + dev_priv->vblank_pipe = DRM_I915_VBLANK_PIPE_A | DRM_I915_VBLANK_PIPE_B; + if (IS_IGDNG(dev)) + return igdng_irq_postinstall(dev); + /* Unmask the interrupts that we always want on. */ dev_priv->irq_mask_reg = ~I915_INTERRUPT_ENABLE_FIX; @@ -613,11 +769,24 @@ int i915_driver_irq_postinstall(struct drm_device *dev) (void) I915_READ(IER); opregion_enable_asle(dev); - DRM_INIT_WAITQUEUE(&dev_priv->irq_queue); return 0; } +static void igdng_irq_uninstall(struct drm_device *dev) +{ + drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; + I915_WRITE(HWSTAM, 0xffffffff); + + I915_WRITE(DEIMR, 0xffffffff); + I915_WRITE(DEIER, 0x0); + I915_WRITE(DEIIR, I915_READ(DEIIR)); + + I915_WRITE(GTIMR, 0xffffffff); + I915_WRITE(GTIER, 0x0); + I915_WRITE(GTIIR, I915_READ(GTIIR)); +} + void i915_driver_irq_uninstall(struct drm_device * dev) { drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; @@ -627,6 +796,11 @@ void i915_driver_irq_uninstall(struct drm_device * dev) dev_priv->vblank_pipe = 0; + if (IS_IGDNG(dev)) { + igdng_irq_uninstall(dev); + return; + } + if (I915_HAS_HOTPLUG(dev)) { I915_WRITE(PORT_HOTPLUG_EN, 0); I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT)); -- cgit v1.2.3