summaryrefslogtreecommitdiffstats
path: root/drivers/gpu/drm/radeon
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu/drm/radeon')
-rw-r--r--drivers/gpu/drm/radeon/r600.c10
-rw-r--r--drivers/gpu/drm/radeon/r600_cs.c31
-rw-r--r--drivers/gpu/drm/radeon/radeon_asic.c4
-rw-r--r--drivers/gpu/drm/radeon/radeon_asic.h4
-rw-r--r--drivers/gpu/drm/radeon/radeon_device.c28
-rw-r--r--drivers/gpu/drm/radeon/radeon_prime.c1
-rw-r--r--drivers/gpu/drm/radeon/rv770.c74
7 files changed, 136 insertions, 16 deletions
diff --git a/drivers/gpu/drm/radeon/r600.c b/drivers/gpu/drm/radeon/r600.c
index 923f93647042..537e259b3837 100644
--- a/drivers/gpu/drm/radeon/r600.c
+++ b/drivers/gpu/drm/radeon/r600.c
@@ -2646,7 +2646,7 @@ int r600_copy_blit(struct radeon_device *rdev,
* @num_gpu_pages: number of GPU pages to xfer
* @fence: radeon fence object
*
- * Copy GPU paging using the DMA engine (r6xx-r7xx).
+ * Copy GPU paging using the DMA engine (r6xx).
* Used by the radeon ttm implementation to move pages if
* registered as the asic copy callback.
*/
@@ -2669,8 +2669,8 @@ int r600_copy_dma(struct radeon_device *rdev,
}
size_in_dw = (num_gpu_pages << RADEON_GPU_PAGE_SHIFT) / 4;
- num_loops = DIV_ROUND_UP(size_in_dw, 0xffff);
- r = radeon_ring_lock(rdev, ring, num_loops * 5 + 8);
+ num_loops = DIV_ROUND_UP(size_in_dw, 0xFFFE);
+ r = radeon_ring_lock(rdev, ring, num_loops * 4 + 8);
if (r) {
DRM_ERROR("radeon: moving bo (%d).\n", r);
radeon_semaphore_free(rdev, &sem, NULL);
@@ -2693,8 +2693,8 @@ int r600_copy_dma(struct radeon_device *rdev,
radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_COPY, 0, 0, cur_size_in_dw));
radeon_ring_write(ring, dst_offset & 0xfffffffc);
radeon_ring_write(ring, src_offset & 0xfffffffc);
- radeon_ring_write(ring, upper_32_bits(dst_offset) & 0xff);
- radeon_ring_write(ring, upper_32_bits(src_offset) & 0xff);
+ radeon_ring_write(ring, (((upper_32_bits(dst_offset) & 0xff) << 16) |
+ (upper_32_bits(src_offset) & 0xff)));
src_offset += cur_size_in_dw * 4;
dst_offset += cur_size_in_dw * 4;
}
diff --git a/drivers/gpu/drm/radeon/r600_cs.c b/drivers/gpu/drm/radeon/r600_cs.c
index 9ea13d07cc55..03191a56eb44 100644
--- a/drivers/gpu/drm/radeon/r600_cs.c
+++ b/drivers/gpu/drm/radeon/r600_cs.c
@@ -2677,16 +2677,29 @@ int r600_dma_cs_parse(struct radeon_cs_parser *p)
}
p->idx += 7;
} else {
- src_offset = ib[idx+2];
- src_offset |= ((u64)(ib[idx+4] & 0xff)) << 32;
- dst_offset = ib[idx+1];
- dst_offset |= ((u64)(ib[idx+3] & 0xff)) << 32;
+ if (p->family >= CHIP_RV770) {
+ src_offset = ib[idx+2];
+ src_offset |= ((u64)(ib[idx+4] & 0xff)) << 32;
+ dst_offset = ib[idx+1];
+ dst_offset |= ((u64)(ib[idx+3] & 0xff)) << 32;
- ib[idx+1] += (u32)(dst_reloc->lobj.gpu_offset & 0xfffffffc);
- ib[idx+2] += (u32)(src_reloc->lobj.gpu_offset & 0xfffffffc);
- ib[idx+3] += upper_32_bits(dst_reloc->lobj.gpu_offset) & 0xff;
- ib[idx+4] += upper_32_bits(src_reloc->lobj.gpu_offset) & 0xff;
- p->idx += 5;
+ ib[idx+1] += (u32)(dst_reloc->lobj.gpu_offset & 0xfffffffc);
+ ib[idx+2] += (u32)(src_reloc->lobj.gpu_offset & 0xfffffffc);
+ ib[idx+3] += upper_32_bits(dst_reloc->lobj.gpu_offset) & 0xff;
+ ib[idx+4] += upper_32_bits(src_reloc->lobj.gpu_offset) & 0xff;
+ p->idx += 5;
+ } else {
+ src_offset = ib[idx+2];
+ src_offset |= ((u64)(ib[idx+3] & 0xff)) << 32;
+ dst_offset = ib[idx+1];
+ dst_offset |= ((u64)(ib[idx+3] & 0xff0000)) << 16;
+
+ ib[idx+1] += (u32)(dst_reloc->lobj.gpu_offset & 0xfffffffc);
+ ib[idx+2] += (u32)(src_reloc->lobj.gpu_offset & 0xfffffffc);
+ ib[idx+3] += upper_32_bits(src_reloc->lobj.gpu_offset) & 0xff;
+ ib[idx+3] += (upper_32_bits(dst_reloc->lobj.gpu_offset) & 0xff) << 16;
+ p->idx += 4;
+ }
}
if ((src_offset + (count * 4)) > radeon_bo_size(src_reloc->robj)) {
dev_warn(p->dev, "DMA copy src buffer too small (%llu %lu)\n",
diff --git a/drivers/gpu/drm/radeon/radeon_asic.c b/drivers/gpu/drm/radeon/radeon_asic.c
index 596bcbe80ed0..9056fafb00ea 100644
--- a/drivers/gpu/drm/radeon/radeon_asic.c
+++ b/drivers/gpu/drm/radeon/radeon_asic.c
@@ -1140,9 +1140,9 @@ static struct radeon_asic rv770_asic = {
.copy = {
.blit = &r600_copy_blit,
.blit_ring_index = RADEON_RING_TYPE_GFX_INDEX,
- .dma = &r600_copy_dma,
+ .dma = &rv770_copy_dma,
.dma_ring_index = R600_RING_TYPE_DMA_INDEX,
- .copy = &r600_copy_dma,
+ .copy = &rv770_copy_dma,
.copy_ring_index = R600_RING_TYPE_DMA_INDEX,
},
.surface = {
diff --git a/drivers/gpu/drm/radeon/radeon_asic.h b/drivers/gpu/drm/radeon/radeon_asic.h
index 5f4882cc2152..15d70e613076 100644
--- a/drivers/gpu/drm/radeon/radeon_asic.h
+++ b/drivers/gpu/drm/radeon/radeon_asic.h
@@ -403,6 +403,10 @@ u32 rv770_page_flip(struct radeon_device *rdev, int crtc, u64 crtc_base);
void r700_vram_gtt_location(struct radeon_device *rdev, struct radeon_mc *mc);
void r700_cp_stop(struct radeon_device *rdev);
void r700_cp_fini(struct radeon_device *rdev);
+int rv770_copy_dma(struct radeon_device *rdev,
+ uint64_t src_offset, uint64_t dst_offset,
+ unsigned num_gpu_pages,
+ struct radeon_fence **fence);
/*
* evergreen
diff --git a/drivers/gpu/drm/radeon/radeon_device.c b/drivers/gpu/drm/radeon/radeon_device.c
index cd756262924d..edfc54e41842 100644
--- a/drivers/gpu/drm/radeon/radeon_device.c
+++ b/drivers/gpu/drm/radeon/radeon_device.c
@@ -897,6 +897,25 @@ static void radeon_check_arguments(struct radeon_device *rdev)
}
/**
+ * radeon_switcheroo_quirk_long_wakeup - return true if longer d3 delay is
+ * needed for waking up.
+ *
+ * @pdev: pci dev pointer
+ */
+static bool radeon_switcheroo_quirk_long_wakeup(struct pci_dev *pdev)
+{
+
+ /* 6600m in a macbook pro */
+ if (pdev->subsystem_vendor == PCI_VENDOR_ID_APPLE &&
+ pdev->subsystem_device == 0x00e2) {
+ printk(KERN_INFO "radeon: quirking longer d3 wakeup delay\n");
+ return true;
+ }
+
+ return false;
+}
+
+/**
* radeon_switcheroo_set_state - set switcheroo state
*
* @pdev: pci dev pointer
@@ -910,10 +929,19 @@ static void radeon_switcheroo_set_state(struct pci_dev *pdev, enum vga_switchero
struct drm_device *dev = pci_get_drvdata(pdev);
pm_message_t pmm = { .event = PM_EVENT_SUSPEND };
if (state == VGA_SWITCHEROO_ON) {
+ unsigned d3_delay = dev->pdev->d3_delay;
+
printk(KERN_INFO "radeon: switched on\n");
/* don't suspend or resume card normally */
dev->switch_power_state = DRM_SWITCH_POWER_CHANGING;
+
+ if (d3_delay < 20 && radeon_switcheroo_quirk_long_wakeup(pdev))
+ dev->pdev->d3_delay = 20;
+
radeon_resume_kms(dev);
+
+ dev->pdev->d3_delay = d3_delay;
+
dev->switch_power_state = DRM_SWITCH_POWER_ON;
drm_kms_helper_poll_enable(dev);
} else {
diff --git a/drivers/gpu/drm/radeon/radeon_prime.c b/drivers/gpu/drm/radeon/radeon_prime.c
index e09521858f64..26c23bb651c6 100644
--- a/drivers/gpu/drm/radeon/radeon_prime.c
+++ b/drivers/gpu/drm/radeon/radeon_prime.c
@@ -194,6 +194,7 @@ struct drm_gem_object *radeon_gem_prime_import(struct drm_device *dev,
bo = dma_buf->priv;
if (bo->gem_base.dev == dev) {
drm_gem_object_reference(&bo->gem_base);
+ dma_buf_put(dma_buf);
return &bo->gem_base;
}
}
diff --git a/drivers/gpu/drm/radeon/rv770.c b/drivers/gpu/drm/radeon/rv770.c
index 87c979c4f721..1b2444f4d8f4 100644
--- a/drivers/gpu/drm/radeon/rv770.c
+++ b/drivers/gpu/drm/radeon/rv770.c
@@ -887,6 +887,80 @@ static int rv770_mc_init(struct radeon_device *rdev)
return 0;
}
+/**
+ * rv770_copy_dma - copy pages using the DMA engine
+ *
+ * @rdev: radeon_device pointer
+ * @src_offset: src GPU address
+ * @dst_offset: dst GPU address
+ * @num_gpu_pages: number of GPU pages to xfer
+ * @fence: radeon fence object
+ *
+ * Copy GPU paging using the DMA engine (r7xx).
+ * Used by the radeon ttm implementation to move pages if
+ * registered as the asic copy callback.
+ */
+int rv770_copy_dma(struct radeon_device *rdev,
+ uint64_t src_offset, uint64_t dst_offset,
+ unsigned num_gpu_pages,
+ struct radeon_fence **fence)
+{
+ struct radeon_semaphore *sem = NULL;
+ int ring_index = rdev->asic->copy.dma_ring_index;
+ struct radeon_ring *ring = &rdev->ring[ring_index];
+ u32 size_in_dw, cur_size_in_dw;
+ int i, num_loops;
+ int r = 0;
+
+ r = radeon_semaphore_create(rdev, &sem);
+ if (r) {
+ DRM_ERROR("radeon: moving bo (%d).\n", r);
+ return r;
+ }
+
+ size_in_dw = (num_gpu_pages << RADEON_GPU_PAGE_SHIFT) / 4;
+ num_loops = DIV_ROUND_UP(size_in_dw, 0xFFFF);
+ r = radeon_ring_lock(rdev, ring, num_loops * 5 + 8);
+ if (r) {
+ DRM_ERROR("radeon: moving bo (%d).\n", r);
+ radeon_semaphore_free(rdev, &sem, NULL);
+ return r;
+ }
+
+ if (radeon_fence_need_sync(*fence, ring->idx)) {
+ radeon_semaphore_sync_rings(rdev, sem, (*fence)->ring,
+ ring->idx);
+ radeon_fence_note_sync(*fence, ring->idx);
+ } else {
+ radeon_semaphore_free(rdev, &sem, NULL);
+ }
+
+ for (i = 0; i < num_loops; i++) {
+ cur_size_in_dw = size_in_dw;
+ if (cur_size_in_dw > 0xFFFF)
+ cur_size_in_dw = 0xFFFF;
+ size_in_dw -= cur_size_in_dw;
+ radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_COPY, 0, 0, cur_size_in_dw));
+ radeon_ring_write(ring, dst_offset & 0xfffffffc);
+ radeon_ring_write(ring, src_offset & 0xfffffffc);
+ radeon_ring_write(ring, upper_32_bits(dst_offset) & 0xff);
+ radeon_ring_write(ring, upper_32_bits(src_offset) & 0xff);
+ src_offset += cur_size_in_dw * 4;
+ dst_offset += cur_size_in_dw * 4;
+ }
+
+ r = radeon_fence_emit(rdev, fence, ring->idx);
+ if (r) {
+ radeon_ring_unlock_undo(rdev, ring);
+ return r;
+ }
+
+ radeon_ring_unlock_commit(rdev, ring);
+ radeon_semaphore_free(rdev, &sem, *fence);
+
+ return r;
+}
+
static int rv770_startup(struct radeon_device *rdev)
{
struct radeon_ring *ring;