diff options
Diffstat (limited to 'drivers/gpu/drm/nouveau/nvkm/subdev/mc/base.c')
-rw-r--r-- | drivers/gpu/drm/nouveau/nvkm/subdev/mc/base.c | 118 |
1 files changed, 83 insertions, 35 deletions
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mc/base.c b/drivers/gpu/drm/nouveau/nvkm/subdev/mc/base.c index 350a8caa84c8..6b25e25f9eba 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/mc/base.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mc/base.c @@ -27,43 +27,67 @@ #include <subdev/top.h> void -nvkm_mc_unk260(struct nvkm_mc *mc, u32 data) +nvkm_mc_unk260(struct nvkm_device *device, u32 data) { - if (mc->func->unk260) + struct nvkm_mc *mc = device->mc; + if (likely(mc) && mc->func->unk260) mc->func->unk260(mc, data); } void -nvkm_mc_intr_unarm(struct nvkm_mc *mc) +nvkm_mc_intr_mask(struct nvkm_device *device, enum nvkm_devidx devidx, bool en) { - return mc->func->intr_unarm(mc); + struct nvkm_mc *mc = device->mc; + const struct nvkm_mc_map *map; + if (likely(mc) && mc->func->intr_mask) { + u32 mask = nvkm_top_intr_mask(device, devidx); + for (map = mc->func->intr; !mask && map->stat; map++) { + if (map->unit == devidx) + mask = map->stat; + } + mc->func->intr_mask(mc, mask, en ? mask : 0); + } +} + +void +nvkm_mc_intr_unarm(struct nvkm_device *device) +{ + struct nvkm_mc *mc = device->mc; + if (likely(mc)) + mc->func->intr_unarm(mc); } void -nvkm_mc_intr_rearm(struct nvkm_mc *mc) +nvkm_mc_intr_rearm(struct nvkm_device *device) { - return mc->func->intr_rearm(mc); + struct nvkm_mc *mc = device->mc; + if (likely(mc)) + mc->func->intr_rearm(mc); } static u32 -nvkm_mc_intr_mask(struct nvkm_mc *mc) +nvkm_mc_intr_stat(struct nvkm_mc *mc) { - u32 intr = mc->func->intr_mask(mc); + u32 intr = mc->func->intr_stat(mc); if (WARN_ON_ONCE(intr == 0xffffffff)) intr = 0; /* likely fallen off the bus */ return intr; } void -nvkm_mc_intr(struct nvkm_mc *mc, bool *handled) +nvkm_mc_intr(struct nvkm_device *device, bool *handled) { - struct nvkm_device *device = mc->subdev.device; + struct nvkm_mc *mc = device->mc; struct nvkm_subdev *subdev; - const struct nvkm_mc_map *map = mc->func->intr; - u32 stat, intr = nvkm_mc_intr_mask(mc); + const struct nvkm_mc_map *map; + u32 stat, intr; u64 subdevs; - stat = nvkm_top_intr(device->top, intr, &subdevs); + if (unlikely(!mc)) + return; + + intr = nvkm_mc_intr_stat(mc); + stat = nvkm_top_intr(device, intr, &subdevs); while (subdevs) { enum nvkm_devidx subidx = __ffs64(subdevs); subdev = nvkm_device_subdev(device, subidx); @@ -72,14 +96,13 @@ nvkm_mc_intr(struct nvkm_mc *mc, bool *handled) subdevs &= ~BIT_ULL(subidx); } - while (map->stat) { + for (map = mc->func->intr; map->stat; map++) { if (intr & map->stat) { subdev = nvkm_device_subdev(device, map->unit); if (subdev) nvkm_subdev_intr(subdev); stat &= ~map->stat; } - map++; } if (stat) @@ -87,22 +110,32 @@ nvkm_mc_intr(struct nvkm_mc *mc, bool *handled) *handled = intr != 0; } -static void -nvkm_mc_reset_(struct nvkm_mc *mc, enum nvkm_devidx devidx) +static u32 +nvkm_mc_reset_mask(struct nvkm_device *device, bool isauto, + enum nvkm_devidx devidx) { - struct nvkm_device *device = mc->subdev.device; + struct nvkm_mc *mc = device->mc; const struct nvkm_mc_map *map; - u64 pmc_enable; - - if (!(pmc_enable = nvkm_top_reset(device->top, devidx))) { - for (map = mc->func->reset; map && map->stat; map++) { - if (map->unit == devidx) { - pmc_enable = map->stat; - break; + u64 pmc_enable = 0; + if (likely(mc)) { + if (!(pmc_enable = nvkm_top_reset(device, devidx))) { + for (map = mc->func->reset; map && map->stat; map++) { + if (!isauto || !map->noauto) { + if (map->unit == devidx) { + pmc_enable = map->stat; + break; + } + } } } } + return pmc_enable; +} +void +nvkm_mc_reset(struct nvkm_device *device, enum nvkm_devidx devidx) +{ + u64 pmc_enable = nvkm_mc_reset_mask(device, true, devidx); if (pmc_enable) { nvkm_mask(device, 0x000200, pmc_enable, 0x00000000); nvkm_mask(device, 0x000200, pmc_enable, pmc_enable); @@ -111,17 +144,27 @@ nvkm_mc_reset_(struct nvkm_mc *mc, enum nvkm_devidx devidx) } void -nvkm_mc_reset(struct nvkm_mc *mc, enum nvkm_devidx devidx) +nvkm_mc_disable(struct nvkm_device *device, enum nvkm_devidx devidx) { - if (likely(mc)) - nvkm_mc_reset_(mc, devidx); + u64 pmc_enable = nvkm_mc_reset_mask(device, false, devidx); + if (pmc_enable) + nvkm_mask(device, 0x000200, pmc_enable, 0x00000000); +} + +void +nvkm_mc_enable(struct nvkm_device *device, enum nvkm_devidx devidx) +{ + u64 pmc_enable = nvkm_mc_reset_mask(device, false, devidx); + if (pmc_enable) { + nvkm_mask(device, 0x000200, pmc_enable, pmc_enable); + nvkm_rd32(device, 0x000200); + } } static int nvkm_mc_fini(struct nvkm_subdev *subdev, bool suspend) { - struct nvkm_mc *mc = nvkm_mc(subdev); - nvkm_mc_intr_unarm(mc); + nvkm_mc_intr_unarm(subdev->device); return 0; } @@ -131,7 +174,7 @@ nvkm_mc_init(struct nvkm_subdev *subdev) struct nvkm_mc *mc = nvkm_mc(subdev); if (mc->func->init) mc->func->init(mc); - nvkm_mc_intr_rearm(mc); + nvkm_mc_intr_rearm(subdev->device); return 0; } @@ -148,16 +191,21 @@ nvkm_mc = { .fini = nvkm_mc_fini, }; +void +nvkm_mc_ctor(const struct nvkm_mc_func *func, struct nvkm_device *device, + int index, struct nvkm_mc *mc) +{ + nvkm_subdev_ctor(&nvkm_mc, device, index, &mc->subdev); + mc->func = func; +} + int nvkm_mc_new_(const struct nvkm_mc_func *func, struct nvkm_device *device, int index, struct nvkm_mc **pmc) { struct nvkm_mc *mc; - if (!(mc = *pmc = kzalloc(sizeof(*mc), GFP_KERNEL))) return -ENOMEM; - - nvkm_subdev_ctor(&nvkm_mc, device, index, &mc->subdev); - mc->func = func; + nvkm_mc_ctor(func, device, index, *pmc); return 0; } |