summaryrefslogtreecommitdiffstats
path: root/drivers/gpu/drm/nouveau/nv84_fence.c
diff options
context:
space:
mode:
authorBen Skeggs <bskeggs@redhat.com>2013-02-14 00:28:37 +0100
committerBen Skeggs <bskeggs@redhat.com>2013-02-20 07:00:51 +0100
commita34caf78f26bda63869471cb3f46f354f4658758 (patch)
treedc9b1ce9d37ea3d2f0c6a34f7e4c90cc5be3a8f0 /drivers/gpu/drm/nouveau/nv84_fence.c
parentdrm/nouveau/gpio/nve0: interrupt regs moved on kepler apparently (diff)
downloadlinux-a34caf78f26bda63869471cb3f46f354f4658758.tar.xz
linux-a34caf78f26bda63869471cb3f46f354f4658758.zip
drm/nv84/fence: access fences with full virtual address, not offset
Allows most of the code to be shared between nv84/nvc0 implementations, and paves the way for doing emit/sync on non-VRAM buffers (multi-gpu, dma-buf). Signed-off-by: Ben Skeggs <bskeggs@redhat.com>
Diffstat (limited to 'drivers/gpu/drm/nouveau/nv84_fence.c')
-rw-r--r--drivers/gpu/drm/nouveau/nv84_fence.c153
1 files changed, 99 insertions, 54 deletions
diff --git a/drivers/gpu/drm/nouveau/nv84_fence.c b/drivers/gpu/drm/nouveau/nv84_fence.c
index e64e8154a5af..58c2401b18ff 100644
--- a/drivers/gpu/drm/nouveau/nv84_fence.c
+++ b/drivers/gpu/drm/nouveau/nv84_fence.c
@@ -23,6 +23,7 @@
*/
#include <core/object.h>
+#include <core/client.h>
#include <core/class.h>
#include <engine/fifo.h>
@@ -33,80 +34,96 @@
#include "nv50_display.h"
-struct nv84_fence_chan {
- struct nouveau_fence_chan base;
-};
-
-struct nv84_fence_priv {
- struct nouveau_fence_priv base;
- struct nouveau_gpuobj *mem;
-};
+u64
+nv84_fence_crtc(struct nouveau_channel *chan, int crtc)
+{
+ struct nv84_fence_chan *fctx = chan->fence;
+ return fctx->dispc_vma[crtc].offset;
+}
static int
nv84_fence_emit(struct nouveau_fence *fence)
{
struct nouveau_channel *chan = fence->channel;
+ struct nv84_fence_chan *fctx = chan->fence;
struct nouveau_fifo_chan *fifo = (void *)chan->object;
- int ret = RING_SPACE(chan, 8);
+ u64 addr = fctx->vma.offset + fifo->chid * 16;
+ int ret;
+
+ ret = RING_SPACE(chan, 8);
if (ret == 0) {
BEGIN_NV04(chan, 0, NV11_SUBCHAN_DMA_SEMAPHORE, 1);
- OUT_RING (chan, NvSema);
+ OUT_RING (chan, chan->vram);
BEGIN_NV04(chan, 0, NV84_SUBCHAN_SEMAPHORE_ADDRESS_HIGH, 5);
- OUT_RING (chan, upper_32_bits(fifo->chid * 16));
- OUT_RING (chan, lower_32_bits(fifo->chid * 16));
+ OUT_RING (chan, upper_32_bits(addr));
+ OUT_RING (chan, lower_32_bits(addr));
OUT_RING (chan, fence->sequence);
OUT_RING (chan, NV84_SUBCHAN_SEMAPHORE_TRIGGER_WRITE_LONG);
OUT_RING (chan, 0x00000000);
FIRE_RING (chan);
}
+
return ret;
}
-
static int
nv84_fence_sync(struct nouveau_fence *fence,
struct nouveau_channel *prev, struct nouveau_channel *chan)
{
+ struct nv84_fence_chan *fctx = chan->fence;
struct nouveau_fifo_chan *fifo = (void *)prev->object;
- int ret = RING_SPACE(chan, 7);
+ u64 addr = fctx->vma.offset + fifo->chid * 16;
+ int ret;
+
+ ret = RING_SPACE(chan, 7);
if (ret == 0) {
BEGIN_NV04(chan, 0, NV11_SUBCHAN_DMA_SEMAPHORE, 1);
- OUT_RING (chan, NvSema);
+ OUT_RING (chan, chan->vram);
BEGIN_NV04(chan, 0, NV84_SUBCHAN_SEMAPHORE_ADDRESS_HIGH, 4);
- OUT_RING (chan, upper_32_bits(fifo->chid * 16));
- OUT_RING (chan, lower_32_bits(fifo->chid * 16));
+ OUT_RING (chan, upper_32_bits(addr));
+ OUT_RING (chan, lower_32_bits(addr));
OUT_RING (chan, fence->sequence);
OUT_RING (chan, NV84_SUBCHAN_SEMAPHORE_TRIGGER_ACQUIRE_GEQUAL);
FIRE_RING (chan);
}
+
return ret;
}
-static u32
+u32
nv84_fence_read(struct nouveau_channel *chan)
{
struct nouveau_fifo_chan *fifo = (void *)chan->object;
struct nv84_fence_priv *priv = chan->drm->fence;
- return nv_ro32(priv->mem, fifo->chid * 16);
+ return nouveau_bo_rd32(priv->bo, fifo->chid * 16/4);
}
-static void
+void
nv84_fence_context_del(struct nouveau_channel *chan)
{
+ struct drm_device *dev = chan->drm->dev;
+ struct nv84_fence_priv *priv = chan->drm->fence;
struct nv84_fence_chan *fctx = chan->fence;
+ int i;
+
+ for (i = 0; i < dev->mode_config.num_crtc; i++) {
+ struct nouveau_bo *bo = nv50_display_crtc_sema(dev, i);
+ nouveau_bo_vma_del(bo, &fctx->dispc_vma[i]);
+ }
+
+ nouveau_bo_vma_del(priv->bo, &fctx->vma);
nouveau_fence_context_del(&fctx->base);
chan->fence = NULL;
kfree(fctx);
}
-static int
+int
nv84_fence_context_new(struct nouveau_channel *chan)
{
- struct drm_device *dev = chan->drm->dev;
struct nouveau_fifo_chan *fifo = (void *)chan->object;
+ struct nouveau_client *client = nouveau_client(fifo);
struct nv84_fence_priv *priv = chan->drm->fence;
struct nv84_fence_chan *fctx;
- struct nouveau_object *object;
int ret, i;
fctx = chan->fence = kzalloc(sizeof(*fctx), GFP_KERNEL);
@@ -115,43 +132,59 @@ nv84_fence_context_new(struct nouveau_channel *chan)
nouveau_fence_context_new(&fctx->base);
- ret = nouveau_object_new(nv_object(chan->cli), chan->handle,
- NvSema, 0x0002,
- &(struct nv_dma_class) {
- .flags = NV_DMA_TARGET_VRAM |
- NV_DMA_ACCESS_RDWR,
- .start = priv->mem->addr,
- .limit = priv->mem->addr +
- priv->mem->size - 1,
- }, sizeof(struct nv_dma_class),
- &object);
-
- /* dma objects for display sync channel semaphore blocks */
- for (i = 0; !ret && i < dev->mode_config.num_crtc; i++) {
- struct nouveau_bo *bo = nv50_display_crtc_sema(dev, i);
+ ret = nouveau_bo_vma_add(priv->bo, client->vm, &fctx->vma);
+ if (ret)
+ nv84_fence_context_del(chan);
- ret = nouveau_object_new(nv_object(chan->cli), chan->handle,
- NvEvoSema0 + i, 0x003d,
- &(struct nv_dma_class) {
- .flags = NV_DMA_TARGET_VRAM |
- NV_DMA_ACCESS_RDWR,
- .start = bo->bo.offset,
- .limit = bo->bo.offset + 0xfff,
- }, sizeof(struct nv_dma_class),
- &object);
+ /* map display semaphore buffers into channel's vm */
+ for (i = 0; !ret && i < chan->drm->dev->mode_config.num_crtc; i++) {
+ struct nouveau_bo *bo = nv50_display_crtc_sema(chan->drm->dev, i);
+ ret = nouveau_bo_vma_add(bo, client->vm, &fctx->dispc_vma[i]);
}
- if (ret)
- nv84_fence_context_del(chan);
- nv_wo32(priv->mem, fifo->chid * 16, 0x00000000);
+ nouveau_bo_wr32(priv->bo, fifo->chid * 16/4, 0x00000000);
return ret;
}
-static void
+bool
+nv84_fence_suspend(struct nouveau_drm *drm)
+{
+ struct nouveau_fifo *pfifo = nouveau_fifo(drm->device);
+ struct nv84_fence_priv *priv = drm->fence;
+ int i;
+
+ priv->suspend = vmalloc((pfifo->max + 1) * sizeof(u32));
+ if (priv->suspend) {
+ for (i = 0; i <= pfifo->max; i++)
+ priv->suspend[i] = nouveau_bo_rd32(priv->bo, i*4);
+ }
+
+ return priv->suspend != NULL;
+}
+
+void
+nv84_fence_resume(struct nouveau_drm *drm)
+{
+ struct nouveau_fifo *pfifo = nouveau_fifo(drm->device);
+ struct nv84_fence_priv *priv = drm->fence;
+ int i;
+
+ if (priv->suspend) {
+ for (i = 0; i <= pfifo->max; i++)
+ nouveau_bo_wr32(priv->bo, i*4, priv->suspend[i]);
+ vfree(priv->suspend);
+ priv->suspend = NULL;
+ }
+}
+
+void
nv84_fence_destroy(struct nouveau_drm *drm)
{
struct nv84_fence_priv *priv = drm->fence;
- nouveau_gpuobj_ref(NULL, &priv->mem);
+ nouveau_bo_unmap(priv->bo);
+ if (priv->bo)
+ nouveau_bo_unpin(priv->bo);
+ nouveau_bo_ref(NULL, &priv->bo);
drm->fence = NULL;
kfree(priv);
}
@@ -161,7 +194,6 @@ nv84_fence_create(struct nouveau_drm *drm)
{
struct nouveau_fifo *pfifo = nouveau_fifo(drm->device);
struct nv84_fence_priv *priv;
- u32 chan = pfifo->max + 1;
int ret;
priv = drm->fence = kzalloc(sizeof(*priv), GFP_KERNEL);
@@ -169,6 +201,8 @@ nv84_fence_create(struct nouveau_drm *drm)
return -ENOMEM;
priv->base.dtor = nv84_fence_destroy;
+ priv->base.suspend = nv84_fence_suspend;
+ priv->base.resume = nv84_fence_resume;
priv->base.context_new = nv84_fence_context_new;
priv->base.context_del = nv84_fence_context_del;
priv->base.emit = nv84_fence_emit;
@@ -178,8 +212,19 @@ nv84_fence_create(struct nouveau_drm *drm)
init_waitqueue_head(&priv->base.waiting);
priv->base.uevent = true;
- ret = nouveau_gpuobj_new(drm->device, NULL, chan * 16, 0x1000, 0,
- &priv->mem);
+ ret = nouveau_bo_new(drm->dev, 16 * (pfifo->max + 1), 0,
+ TTM_PL_FLAG_VRAM, 0, 0, NULL, &priv->bo);
+ if (ret == 0) {
+ ret = nouveau_bo_pin(priv->bo, TTM_PL_FLAG_VRAM);
+ if (ret == 0) {
+ ret = nouveau_bo_map(priv->bo);
+ if (ret)
+ nouveau_bo_unpin(priv->bo);
+ }
+ if (ret)
+ nouveau_bo_ref(NULL, &priv->bo);
+ }
+
if (ret)
nv84_fence_destroy(drm);
return ret;