summaryrefslogtreecommitdiffstats
path: root/drivers/gpu/drm/msm
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu/drm/msm')
-rw-r--r--drivers/gpu/drm/msm/Makefile1
-rw-r--r--drivers/gpu/drm/msm/adreno/a2xx.xml.h8
-rw-r--r--drivers/gpu/drm/msm/adreno/a3xx.xml.h8
-rw-r--r--drivers/gpu/drm/msm/adreno/a4xx.xml.h8
-rw-r--r--drivers/gpu/drm/msm/adreno/a5xx.xml.h8
-rw-r--r--drivers/gpu/drm/msm/adreno/a5xx_debugfs.c4
-rw-r--r--drivers/gpu/drm/msm/adreno/a5xx_gpu.c20
-rw-r--r--drivers/gpu/drm/msm/adreno/a5xx_power.c2
-rw-r--r--drivers/gpu/drm/msm/adreno/a5xx_preempt.c12
-rw-r--r--drivers/gpu/drm/msm/adreno/a6xx.xml.h1413
-rw-r--r--drivers/gpu/drm/msm/adreno/a6xx_gmu.c144
-rw-r--r--drivers/gpu/drm/msm/adreno/a6xx_gmu.h21
-rw-r--r--drivers/gpu/drm/msm/adreno/a6xx_gmu.xml.h12
-rw-r--r--drivers/gpu/drm/msm/adreno/a6xx_gpu.c38
-rw-r--r--drivers/gpu/drm/msm/adreno/a6xx_gpu.h3
-rw-r--r--drivers/gpu/drm/msm/adreno/a6xx_hfi.c161
-rw-r--r--drivers/gpu/drm/msm/adreno/adreno_common.xml.h8
-rw-r--r--drivers/gpu/drm/msm/adreno/adreno_device.c1
-rw-r--r--drivers/gpu/drm/msm/adreno/adreno_pm4.xml.h26
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.c737
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.h115
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c257
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.h10
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys.h23
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_cmd.c48
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_vid.c125
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.c109
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.h56
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_cdm.c323
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_cdm.h139
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_ctl.c16
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_ctl.h4
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_intf.c29
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_intf.h7
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_lm.c29
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_lm.h7
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_mdss.h39
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_top.c18
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_top.h17
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_util.c3
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_util.h16
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_kms.c45
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_mdss.c4
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_plane.c501
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_plane.h4
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_power_handle.c15
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_power_handle.h14
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_rm.c285
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_rm.h44
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_trace.h116
-rw-r--r--drivers/gpu/drm/msm/disp/mdp5/mdp5_plane.c5
-rw-r--r--drivers/gpu/drm/msm/dsi/dsi.c10
-rw-r--r--drivers/gpu/drm/msm/dsi/dsi_host.c2
-rw-r--r--drivers/gpu/drm/msm/dsi/dsi_manager.c4
-rw-r--r--drivers/gpu/drm/msm/msm_atomic.c2
-rw-r--r--drivers/gpu/drm/msm/msm_drv.c8
-rw-r--r--drivers/gpu/drm/msm/msm_drv.h2
-rw-r--r--drivers/gpu/drm/msm/msm_fence.c8
-rw-r--r--drivers/gpu/drm/msm/msm_gem_submit.c4
-rw-r--r--drivers/gpu/drm/msm/msm_gpu.c49
-rw-r--r--drivers/gpu/drm/msm/msm_gpu.h5
-rw-r--r--drivers/gpu/drm/msm/msm_rd.c6
62 files changed, 1910 insertions, 3248 deletions
diff --git a/drivers/gpu/drm/msm/Makefile b/drivers/gpu/drm/msm/Makefile
index 261fa79d456d..19ab521d4c3a 100644
--- a/drivers/gpu/drm/msm/Makefile
+++ b/drivers/gpu/drm/msm/Makefile
@@ -58,7 +58,6 @@ msm-y := \
disp/dpu1/dpu_formats.o \
disp/dpu1/dpu_hw_blk.o \
disp/dpu1/dpu_hw_catalog.o \
- disp/dpu1/dpu_hw_cdm.o \
disp/dpu1/dpu_hw_ctl.o \
disp/dpu1/dpu_hw_interrupts.o \
disp/dpu1/dpu_hw_intf.o \
diff --git a/drivers/gpu/drm/msm/adreno/a2xx.xml.h b/drivers/gpu/drm/msm/adreno/a2xx.xml.h
index 4bff0a740c7d..12b0ba270b5e 100644
--- a/drivers/gpu/drm/msm/adreno/a2xx.xml.h
+++ b/drivers/gpu/drm/msm/adreno/a2xx.xml.h
@@ -12,12 +12,12 @@ The rules-ng-ng source files this header was generated from are:
- /home/robclark/src/envytools/rnndb/freedreno_copyright.xml ( 1572 bytes, from 2018-07-03 19:37:13)
- /home/robclark/src/envytools/rnndb/adreno/a2xx.xml ( 36805 bytes, from 2018-07-03 19:37:13)
- /home/robclark/src/envytools/rnndb/adreno/adreno_common.xml ( 13634 bytes, from 2018-07-03 19:37:13)
-- /home/robclark/src/envytools/rnndb/adreno/adreno_pm4.xml ( 42393 bytes, from 2018-08-06 18:45:45)
+- /home/robclark/src/envytools/rnndb/adreno/adreno_pm4.xml ( 42585 bytes, from 2018-10-04 19:06:37)
- /home/robclark/src/envytools/rnndb/adreno/a3xx.xml ( 83840 bytes, from 2018-07-03 19:37:13)
- /home/robclark/src/envytools/rnndb/adreno/a4xx.xml ( 112086 bytes, from 2018-07-03 19:37:13)
-- /home/robclark/src/envytools/rnndb/adreno/a5xx.xml ( 147240 bytes, from 2018-08-06 18:45:45)
-- /home/robclark/src/envytools/rnndb/adreno/a6xx.xml ( 101627 bytes, from 2018-08-06 18:45:45)
-- /home/robclark/src/envytools/rnndb/adreno/a6xx_gmu.xml ( 10431 bytes, from 2018-07-03 19:37:13)
+- /home/robclark/src/envytools/rnndb/adreno/a5xx.xml ( 147240 bytes, from 2018-10-04 19:06:37)
+- /home/robclark/src/envytools/rnndb/adreno/a6xx.xml ( 139581 bytes, from 2018-10-04 19:06:42)
+- /home/robclark/src/envytools/rnndb/adreno/a6xx_gmu.xml ( 10431 bytes, from 2018-09-14 13:03:07)
- /home/robclark/src/envytools/rnndb/adreno/ocmem.xml ( 1773 bytes, from 2018-07-03 19:37:13)
Copyright (C) 2013-2018 by the following authors:
diff --git a/drivers/gpu/drm/msm/adreno/a3xx.xml.h b/drivers/gpu/drm/msm/adreno/a3xx.xml.h
index 645a19aef399..a89f7bb8b5cc 100644
--- a/drivers/gpu/drm/msm/adreno/a3xx.xml.h
+++ b/drivers/gpu/drm/msm/adreno/a3xx.xml.h
@@ -12,12 +12,12 @@ The rules-ng-ng source files this header was generated from are:
- /home/robclark/src/envytools/rnndb/freedreno_copyright.xml ( 1572 bytes, from 2018-07-03 19:37:13)
- /home/robclark/src/envytools/rnndb/adreno/a2xx.xml ( 36805 bytes, from 2018-07-03 19:37:13)
- /home/robclark/src/envytools/rnndb/adreno/adreno_common.xml ( 13634 bytes, from 2018-07-03 19:37:13)
-- /home/robclark/src/envytools/rnndb/adreno/adreno_pm4.xml ( 42393 bytes, from 2018-08-06 18:45:45)
+- /home/robclark/src/envytools/rnndb/adreno/adreno_pm4.xml ( 42585 bytes, from 2018-10-04 19:06:37)
- /home/robclark/src/envytools/rnndb/adreno/a3xx.xml ( 83840 bytes, from 2018-07-03 19:37:13)
- /home/robclark/src/envytools/rnndb/adreno/a4xx.xml ( 112086 bytes, from 2018-07-03 19:37:13)
-- /home/robclark/src/envytools/rnndb/adreno/a5xx.xml ( 147240 bytes, from 2018-08-06 18:45:45)
-- /home/robclark/src/envytools/rnndb/adreno/a6xx.xml ( 101627 bytes, from 2018-08-06 18:45:45)
-- /home/robclark/src/envytools/rnndb/adreno/a6xx_gmu.xml ( 10431 bytes, from 2018-07-03 19:37:13)
+- /home/robclark/src/envytools/rnndb/adreno/a5xx.xml ( 147240 bytes, from 2018-10-04 19:06:37)
+- /home/robclark/src/envytools/rnndb/adreno/a6xx.xml ( 139581 bytes, from 2018-10-04 19:06:42)
+- /home/robclark/src/envytools/rnndb/adreno/a6xx_gmu.xml ( 10431 bytes, from 2018-09-14 13:03:07)
- /home/robclark/src/envytools/rnndb/adreno/ocmem.xml ( 1773 bytes, from 2018-07-03 19:37:13)
Copyright (C) 2013-2018 by the following authors:
diff --git a/drivers/gpu/drm/msm/adreno/a4xx.xml.h b/drivers/gpu/drm/msm/adreno/a4xx.xml.h
index 19565e87aa7b..858690f52854 100644
--- a/drivers/gpu/drm/msm/adreno/a4xx.xml.h
+++ b/drivers/gpu/drm/msm/adreno/a4xx.xml.h
@@ -12,12 +12,12 @@ The rules-ng-ng source files this header was generated from are:
- /home/robclark/src/envytools/rnndb/freedreno_copyright.xml ( 1572 bytes, from 2018-07-03 19:37:13)
- /home/robclark/src/envytools/rnndb/adreno/a2xx.xml ( 36805 bytes, from 2018-07-03 19:37:13)
- /home/robclark/src/envytools/rnndb/adreno/adreno_common.xml ( 13634 bytes, from 2018-07-03 19:37:13)
-- /home/robclark/src/envytools/rnndb/adreno/adreno_pm4.xml ( 42393 bytes, from 2018-08-06 18:45:45)
+- /home/robclark/src/envytools/rnndb/adreno/adreno_pm4.xml ( 42585 bytes, from 2018-10-04 19:06:37)
- /home/robclark/src/envytools/rnndb/adreno/a3xx.xml ( 83840 bytes, from 2018-07-03 19:37:13)
- /home/robclark/src/envytools/rnndb/adreno/a4xx.xml ( 112086 bytes, from 2018-07-03 19:37:13)
-- /home/robclark/src/envytools/rnndb/adreno/a5xx.xml ( 147240 bytes, from 2018-08-06 18:45:45)
-- /home/robclark/src/envytools/rnndb/adreno/a6xx.xml ( 101627 bytes, from 2018-08-06 18:45:45)
-- /home/robclark/src/envytools/rnndb/adreno/a6xx_gmu.xml ( 10431 bytes, from 2018-07-03 19:37:13)
+- /home/robclark/src/envytools/rnndb/adreno/a5xx.xml ( 147240 bytes, from 2018-10-04 19:06:37)
+- /home/robclark/src/envytools/rnndb/adreno/a6xx.xml ( 139581 bytes, from 2018-10-04 19:06:42)
+- /home/robclark/src/envytools/rnndb/adreno/a6xx_gmu.xml ( 10431 bytes, from 2018-09-14 13:03:07)
- /home/robclark/src/envytools/rnndb/adreno/ocmem.xml ( 1773 bytes, from 2018-07-03 19:37:13)
Copyright (C) 2013-2018 by the following authors:
diff --git a/drivers/gpu/drm/msm/adreno/a5xx.xml.h b/drivers/gpu/drm/msm/adreno/a5xx.xml.h
index 182d37ff3794..b4944cc0e62f 100644
--- a/drivers/gpu/drm/msm/adreno/a5xx.xml.h
+++ b/drivers/gpu/drm/msm/adreno/a5xx.xml.h
@@ -12,12 +12,12 @@ The rules-ng-ng source files this header was generated from are:
- /home/robclark/src/envytools/rnndb/freedreno_copyright.xml ( 1572 bytes, from 2018-07-03 19:37:13)
- /home/robclark/src/envytools/rnndb/adreno/a2xx.xml ( 36805 bytes, from 2018-07-03 19:37:13)
- /home/robclark/src/envytools/rnndb/adreno/adreno_common.xml ( 13634 bytes, from 2018-07-03 19:37:13)
-- /home/robclark/src/envytools/rnndb/adreno/adreno_pm4.xml ( 42393 bytes, from 2018-08-06 18:45:45)
+- /home/robclark/src/envytools/rnndb/adreno/adreno_pm4.xml ( 42585 bytes, from 2018-10-04 19:06:37)
- /home/robclark/src/envytools/rnndb/adreno/a3xx.xml ( 83840 bytes, from 2018-07-03 19:37:13)
- /home/robclark/src/envytools/rnndb/adreno/a4xx.xml ( 112086 bytes, from 2018-07-03 19:37:13)
-- /home/robclark/src/envytools/rnndb/adreno/a5xx.xml ( 147240 bytes, from 2018-08-06 18:45:45)
-- /home/robclark/src/envytools/rnndb/adreno/a6xx.xml ( 101627 bytes, from 2018-08-06 18:45:45)
-- /home/robclark/src/envytools/rnndb/adreno/a6xx_gmu.xml ( 10431 bytes, from 2018-07-03 19:37:13)
+- /home/robclark/src/envytools/rnndb/adreno/a5xx.xml ( 147240 bytes, from 2018-10-04 19:06:37)
+- /home/robclark/src/envytools/rnndb/adreno/a6xx.xml ( 139581 bytes, from 2018-10-04 19:06:42)
+- /home/robclark/src/envytools/rnndb/adreno/a6xx_gmu.xml ( 10431 bytes, from 2018-09-14 13:03:07)
- /home/robclark/src/envytools/rnndb/adreno/ocmem.xml ( 1773 bytes, from 2018-07-03 19:37:13)
Copyright (C) 2013-2018 by the following authors:
diff --git a/drivers/gpu/drm/msm/adreno/a5xx_debugfs.c b/drivers/gpu/drm/msm/adreno/a5xx_debugfs.c
index 059ec7d394d0..d2127b1c4ece 100644
--- a/drivers/gpu/drm/msm/adreno/a5xx_debugfs.c
+++ b/drivers/gpu/drm/msm/adreno/a5xx_debugfs.c
@@ -132,14 +132,14 @@ reset_set(void *data, u64 val)
if (a5xx_gpu->pm4_bo) {
if (a5xx_gpu->pm4_iova)
msm_gem_put_iova(a5xx_gpu->pm4_bo, gpu->aspace);
- drm_gem_object_unreference(a5xx_gpu->pm4_bo);
+ drm_gem_object_put(a5xx_gpu->pm4_bo);
a5xx_gpu->pm4_bo = NULL;
}
if (a5xx_gpu->pfp_bo) {
if (a5xx_gpu->pfp_iova)
msm_gem_put_iova(a5xx_gpu->pfp_bo, gpu->aspace);
- drm_gem_object_unreference(a5xx_gpu->pfp_bo);
+ drm_gem_object_put(a5xx_gpu->pfp_bo);
a5xx_gpu->pfp_bo = NULL;
}
diff --git a/drivers/gpu/drm/msm/adreno/a5xx_gpu.c b/drivers/gpu/drm/msm/adreno/a5xx_gpu.c
index ab1d9308c311..48b5304f460c 100644
--- a/drivers/gpu/drm/msm/adreno/a5xx_gpu.c
+++ b/drivers/gpu/drm/msm/adreno/a5xx_gpu.c
@@ -1234,7 +1234,7 @@ static void a5xx_crashdumper_free(struct msm_gpu *gpu,
msm_gem_put_iova(dumper->bo, gpu->aspace);
msm_gem_put_vaddr(dumper->bo);
- drm_gem_object_unreference(dumper->bo);
+ drm_gem_object_put(dumper->bo);
}
static int a5xx_crashdumper_run(struct msm_gpu *gpu,
@@ -1436,12 +1436,22 @@ static struct msm_ringbuffer *a5xx_active_ring(struct msm_gpu *gpu)
return a5xx_gpu->cur_ring;
}
-static int a5xx_gpu_busy(struct msm_gpu *gpu, uint64_t *value)
+static unsigned long a5xx_gpu_busy(struct msm_gpu *gpu)
{
- *value = gpu_read64(gpu, REG_A5XX_RBBM_PERFCTR_RBBM_0_LO,
- REG_A5XX_RBBM_PERFCTR_RBBM_0_HI);
+ u64 busy_cycles, busy_time;
- return 0;
+ busy_cycles = gpu_read64(gpu, REG_A5XX_RBBM_PERFCTR_RBBM_0_LO,
+ REG_A5XX_RBBM_PERFCTR_RBBM_0_HI);
+
+ busy_time = busy_cycles - gpu->devfreq.busy_cycles;
+ do_div(busy_time, clk_get_rate(gpu->core_clk) / 1000000);
+
+ gpu->devfreq.busy_cycles = busy_cycles;
+
+ if (WARN_ON(busy_time > ~0LU))
+ return ~0LU;
+
+ return (unsigned long)busy_time;
}
static const struct adreno_gpu_funcs funcs = {
diff --git a/drivers/gpu/drm/msm/adreno/a5xx_power.c b/drivers/gpu/drm/msm/adreno/a5xx_power.c
index e9c0e56dbec0..7a41e1c147e4 100644
--- a/drivers/gpu/drm/msm/adreno/a5xx_power.c
+++ b/drivers/gpu/drm/msm/adreno/a5xx_power.c
@@ -323,7 +323,7 @@ err:
if (a5xx_gpu->gpmu_iova)
msm_gem_put_iova(a5xx_gpu->gpmu_bo, gpu->aspace);
if (a5xx_gpu->gpmu_bo)
- drm_gem_object_unreference(a5xx_gpu->gpmu_bo);
+ drm_gem_object_put(a5xx_gpu->gpmu_bo);
a5xx_gpu->gpmu_bo = NULL;
a5xx_gpu->gpmu_iova = 0;
diff --git a/drivers/gpu/drm/msm/adreno/a5xx_preempt.c b/drivers/gpu/drm/msm/adreno/a5xx_preempt.c
index 970c7963ae29..4c357ead1be6 100644
--- a/drivers/gpu/drm/msm/adreno/a5xx_preempt.c
+++ b/drivers/gpu/drm/msm/adreno/a5xx_preempt.c
@@ -208,6 +208,13 @@ void a5xx_preempt_hw_init(struct msm_gpu *gpu)
struct a5xx_gpu *a5xx_gpu = to_a5xx_gpu(adreno_gpu);
int i;
+ /* Always come up on rb 0 */
+ a5xx_gpu->cur_ring = gpu->rb[0];
+
+ /* No preemption if we only have one ring */
+ if (gpu->nr_rings == 1)
+ return;
+
for (i = 0; i < gpu->nr_rings; i++) {
a5xx_gpu->preempt[i]->wptr = 0;
a5xx_gpu->preempt[i]->rptr = 0;
@@ -220,9 +227,6 @@ void a5xx_preempt_hw_init(struct msm_gpu *gpu)
/* Reset the preemption state */
set_preempt_state(a5xx_gpu, PREEMPT_NONE);
-
- /* Always come up on rb 0 */
- a5xx_gpu->cur_ring = gpu->rb[0];
}
static int preempt_init_ring(struct a5xx_gpu *a5xx_gpu,
@@ -272,7 +276,7 @@ void a5xx_preempt_fini(struct msm_gpu *gpu)
if (a5xx_gpu->preempt_iova[i])
msm_gem_put_iova(a5xx_gpu->preempt_bo[i], gpu->aspace);
- drm_gem_object_unreference(a5xx_gpu->preempt_bo[i]);
+ drm_gem_object_put(a5xx_gpu->preempt_bo[i]);
a5xx_gpu->preempt_bo[i] = NULL;
}
}
diff --git a/drivers/gpu/drm/msm/adreno/a6xx.xml.h b/drivers/gpu/drm/msm/adreno/a6xx.xml.h
index 87eab51f7000..a6f7c40454a6 100644
--- a/drivers/gpu/drm/msm/adreno/a6xx.xml.h
+++ b/drivers/gpu/drm/msm/adreno/a6xx.xml.h
@@ -12,12 +12,12 @@ The rules-ng-ng source files this header was generated from are:
- /home/robclark/src/envytools/rnndb/freedreno_copyright.xml ( 1572 bytes, from 2018-07-03 19:37:13)
- /home/robclark/src/envytools/rnndb/adreno/a2xx.xml ( 36805 bytes, from 2018-07-03 19:37:13)
- /home/robclark/src/envytools/rnndb/adreno/adreno_common.xml ( 13634 bytes, from 2018-07-03 19:37:13)
-- /home/robclark/src/envytools/rnndb/adreno/adreno_pm4.xml ( 42393 bytes, from 2018-08-06 18:45:45)
+- /home/robclark/src/envytools/rnndb/adreno/adreno_pm4.xml ( 42585 bytes, from 2018-10-04 19:06:37)
- /home/robclark/src/envytools/rnndb/adreno/a3xx.xml ( 83840 bytes, from 2018-07-03 19:37:13)
- /home/robclark/src/envytools/rnndb/adreno/a4xx.xml ( 112086 bytes, from 2018-07-03 19:37:13)
-- /home/robclark/src/envytools/rnndb/adreno/a5xx.xml ( 147240 bytes, from 2018-08-06 18:45:45)
-- /home/robclark/src/envytools/rnndb/adreno/a6xx.xml ( 101627 bytes, from 2018-08-06 18:45:45)
-- /home/robclark/src/envytools/rnndb/adreno/a6xx_gmu.xml ( 10431 bytes, from 2018-07-03 19:37:13)
+- /home/robclark/src/envytools/rnndb/adreno/a5xx.xml ( 147240 bytes, from 2018-10-04 19:06:37)
+- /home/robclark/src/envytools/rnndb/adreno/a6xx.xml ( 139581 bytes, from 2018-10-04 19:06:42)
+- /home/robclark/src/envytools/rnndb/adreno/a6xx_gmu.xml ( 10431 bytes, from 2018-09-14 13:03:07)
- /home/robclark/src/envytools/rnndb/adreno/ocmem.xml ( 1773 bytes, from 2018-07-03 19:37:13)
Copyright (C) 2013-2018 by the following authors:
@@ -268,8 +268,687 @@ enum a6xx_depth_format {
DEPTH6_32 = 4,
};
+enum a6xx_shader_id {
+ A6XX_TP0_TMO_DATA = 9,
+ A6XX_TP0_SMO_DATA = 10,
+ A6XX_TP0_MIPMAP_BASE_DATA = 11,
+ A6XX_TP1_TMO_DATA = 25,
+ A6XX_TP1_SMO_DATA = 26,
+ A6XX_TP1_MIPMAP_BASE_DATA = 27,
+ A6XX_SP_INST_DATA = 41,
+ A6XX_SP_LB_0_DATA = 42,
+ A6XX_SP_LB_1_DATA = 43,
+ A6XX_SP_LB_2_DATA = 44,
+ A6XX_SP_LB_3_DATA = 45,
+ A6XX_SP_LB_4_DATA = 46,
+ A6XX_SP_LB_5_DATA = 47,
+ A6XX_SP_CB_BINDLESS_DATA = 48,
+ A6XX_SP_CB_LEGACY_DATA = 49,
+ A6XX_SP_UAV_DATA = 50,
+ A6XX_SP_INST_TAG = 51,
+ A6XX_SP_CB_BINDLESS_TAG = 52,
+ A6XX_SP_TMO_UMO_TAG = 53,
+ A6XX_SP_SMO_TAG = 54,
+ A6XX_SP_STATE_DATA = 55,
+ A6XX_HLSQ_CHUNK_CVS_RAM = 73,
+ A6XX_HLSQ_CHUNK_CPS_RAM = 74,
+ A6XX_HLSQ_CHUNK_CVS_RAM_TAG = 75,
+ A6XX_HLSQ_CHUNK_CPS_RAM_TAG = 76,
+ A6XX_HLSQ_ICB_CVS_CB_BASE_TAG = 77,
+ A6XX_HLSQ_ICB_CPS_CB_BASE_TAG = 78,
+ A6XX_HLSQ_CVS_MISC_RAM = 80,
+ A6XX_HLSQ_CPS_MISC_RAM = 81,
+ A6XX_HLSQ_INST_RAM = 82,
+ A6XX_HLSQ_GFX_CVS_CONST_RAM = 83,
+ A6XX_HLSQ_GFX_CPS_CONST_RAM = 84,
+ A6XX_HLSQ_CVS_MISC_RAM_TAG = 85,
+ A6XX_HLSQ_CPS_MISC_RAM_TAG = 86,
+ A6XX_HLSQ_INST_RAM_TAG = 87,
+ A6XX_HLSQ_GFX_CVS_CONST_RAM_TAG = 88,
+ A6XX_HLSQ_GFX_CPS_CONST_RAM_TAG = 89,
+ A6XX_HLSQ_PWR_REST_RAM = 90,
+ A6XX_HLSQ_PWR_REST_TAG = 91,
+ A6XX_HLSQ_DATAPATH_META = 96,
+ A6XX_HLSQ_FRONTEND_META = 97,
+ A6XX_HLSQ_INDIRECT_META = 98,
+ A6XX_HLSQ_BACKEND_META = 99,
+};
+
+enum a6xx_debugbus_id {
+ A6XX_DBGBUS_CP = 1,
+ A6XX_DBGBUS_RBBM = 2,
+ A6XX_DBGBUS_VBIF = 3,
+ A6XX_DBGBUS_HLSQ = 4,
+ A6XX_DBGBUS_UCHE = 5,
+ A6XX_DBGBUS_DPM = 6,
+ A6XX_DBGBUS_TESS = 7,
+ A6XX_DBGBUS_PC = 8,
+ A6XX_DBGBUS_VFDP = 9,
+ A6XX_DBGBUS_VPC = 10,
+ A6XX_DBGBUS_TSE = 11,
+ A6XX_DBGBUS_RAS = 12,
+ A6XX_DBGBUS_VSC = 13,
+ A6XX_DBGBUS_COM = 14,
+ A6XX_DBGBUS_LRZ = 16,
+ A6XX_DBGBUS_A2D = 17,
+ A6XX_DBGBUS_CCUFCHE = 18,
+ A6XX_DBGBUS_GMU_CX = 19,
+ A6XX_DBGBUS_RBP = 20,
+ A6XX_DBGBUS_DCS = 21,
+ A6XX_DBGBUS_DBGC = 22,
+ A6XX_DBGBUS_CX = 23,
+ A6XX_DBGBUS_GMU_GX = 24,
+ A6XX_DBGBUS_TPFCHE = 25,
+ A6XX_DBGBUS_GBIF_GX = 26,
+ A6XX_DBGBUS_GPC = 29,
+ A6XX_DBGBUS_LARC = 30,
+ A6XX_DBGBUS_HLSQ_SPTP = 31,
+ A6XX_DBGBUS_RB_0 = 32,
+ A6XX_DBGBUS_RB_1 = 33,
+ A6XX_DBGBUS_UCHE_WRAPPER = 36,
+ A6XX_DBGBUS_CCU_0 = 40,
+ A6XX_DBGBUS_CCU_1 = 41,
+ A6XX_DBGBUS_VFD_0 = 56,
+ A6XX_DBGBUS_VFD_1 = 57,
+ A6XX_DBGBUS_VFD_2 = 58,
+ A6XX_DBGBUS_VFD_3 = 59,
+ A6XX_DBGBUS_SP_0 = 64,
+ A6XX_DBGBUS_SP_1 = 65,
+ A6XX_DBGBUS_TPL1_0 = 72,
+ A6XX_DBGBUS_TPL1_1 = 73,
+ A6XX_DBGBUS_TPL1_2 = 74,
+ A6XX_DBGBUS_TPL1_3 = 75,
+};
+
enum a6xx_cp_perfcounter_select {
PERF_CP_ALWAYS_COUNT = 0,
+ PERF_CP_BUSY_GFX_CORE_IDLE = 1,
+ PERF_CP_BUSY_CYCLES = 2,
+ PERF_CP_NUM_PREEMPTIONS = 3,
+ PERF_CP_PREEMPTION_REACTION_DELAY = 4,
+ PERF_CP_PREEMPTION_SWITCH_OUT_TIME = 5,
+ PERF_CP_PREEMPTION_SWITCH_IN_TIME = 6,
+ PERF_CP_DEAD_DRAWS_IN_BIN_RENDER = 7,
+ PERF_CP_PREDICATED_DRAWS_KILLED = 8,
+ PERF_CP_MODE_SWITCH = 9,
+ PERF_CP_ZPASS_DONE = 10,
+ PERF_CP_CONTEXT_DONE = 11,
+ PERF_CP_CACHE_FLUSH = 12,
+ PERF_CP_LONG_PREEMPTIONS = 13,
+ PERF_CP_SQE_I_CACHE_STARVE = 14,
+ PERF_CP_SQE_IDLE = 15,
+ PERF_CP_SQE_PM4_STARVE_RB_IB = 16,
+ PERF_CP_SQE_PM4_STARVE_SDS = 17,
+ PERF_CP_SQE_MRB_STARVE = 18,
+ PERF_CP_SQE_RRB_STARVE = 19,
+ PERF_CP_SQE_VSD_STARVE = 20,
+ PERF_CP_VSD_DECODE_STARVE = 21,
+ PERF_CP_SQE_PIPE_OUT_STALL = 22,
+ PERF_CP_SQE_SYNC_STALL = 23,
+ PERF_CP_SQE_PM4_WFI_STALL = 24,
+ PERF_CP_SQE_SYS_WFI_STALL = 25,
+ PERF_CP_SQE_T4_EXEC = 26,
+ PERF_CP_SQE_LOAD_STATE_EXEC = 27,
+ PERF_CP_SQE_SAVE_SDS_STATE = 28,
+ PERF_CP_SQE_DRAW_EXEC = 29,
+ PERF_CP_SQE_CTXT_REG_BUNCH_EXEC = 30,
+ PERF_CP_SQE_EXEC_PROFILED = 31,
+ PERF_CP_MEMORY_POOL_EMPTY = 32,
+ PERF_CP_MEMORY_POOL_SYNC_STALL = 33,
+ PERF_CP_MEMORY_POOL_ABOVE_THRESH = 34,
+ PERF_CP_AHB_WR_STALL_PRE_DRAWS = 35,
+ PERF_CP_AHB_STALL_SQE_GMU = 36,
+ PERF_CP_AHB_STALL_SQE_WR_OTHER = 37,
+ PERF_CP_AHB_STALL_SQE_RD_OTHER = 38,
+ PERF_CP_CLUSTER0_EMPTY = 39,
+ PERF_CP_CLUSTER1_EMPTY = 40,
+ PERF_CP_CLUSTER2_EMPTY = 41,
+ PERF_CP_CLUSTER3_EMPTY = 42,
+ PERF_CP_CLUSTER4_EMPTY = 43,
+ PERF_CP_CLUSTER5_EMPTY = 44,
+ PERF_CP_PM4_DATA = 45,
+ PERF_CP_PM4_HEADERS = 46,
+ PERF_CP_VBIF_READ_BEATS = 47,
+ PERF_CP_VBIF_WRITE_BEATS = 48,
+ PERF_CP_SQE_INSTR_COUNTER = 49,
+};
+
+enum a6xx_rbbm_perfcounter_select {
+ PERF_RBBM_ALWAYS_COUNT = 0,
+ PERF_RBBM_ALWAYS_ON = 1,
+ PERF_RBBM_TSE_BUSY = 2,
+ PERF_RBBM_RAS_BUSY = 3,
+ PERF_RBBM_PC_DCALL_BUSY = 4,
+ PERF_RBBM_PC_VSD_BUSY = 5,
+ PERF_RBBM_STATUS_MASKED = 6,
+ PERF_RBBM_COM_BUSY = 7,
+ PERF_RBBM_DCOM_BUSY = 8,
+ PERF_RBBM_VBIF_BUSY = 9,
+ PERF_RBBM_VSC_BUSY = 10,
+ PERF_RBBM_TESS_BUSY = 11,
+ PERF_RBBM_UCHE_BUSY = 12,
+ PERF_RBBM_HLSQ_BUSY = 13,
+};
+
+enum a6xx_pc_perfcounter_select {
+ PERF_PC_BUSY_CYCLES = 0,
+ PERF_PC_WORKING_CYCLES = 1,
+ PERF_PC_STALL_CYCLES_VFD = 2,
+ PERF_PC_STALL_CYCLES_TSE = 3,
+ PERF_PC_STALL_CYCLES_VPC = 4,
+ PERF_PC_STALL_CYCLES_UCHE = 5,
+ PERF_PC_STALL_CYCLES_TESS = 6,
+ PERF_PC_STALL_CYCLES_TSE_ONLY = 7,
+ PERF_PC_STALL_CYCLES_VPC_ONLY = 8,
+ PERF_PC_PASS1_TF_STALL_CYCLES = 9,
+ PERF_PC_STARVE_CYCLES_FOR_INDEX = 10,
+ PERF_PC_STARVE_CYCLES_FOR_TESS_FACTOR = 11,
+ PERF_PC_STARVE_CYCLES_FOR_VIZ_STREAM = 12,
+ PERF_PC_STARVE_CYCLES_FOR_POSITION = 13,
+ PERF_PC_STARVE_CYCLES_DI = 14,
+ PERF_PC_VIS_STREAMS_LOADED = 15,
+ PERF_PC_INSTANCES = 16,
+ PERF_PC_VPC_PRIMITIVES = 17,
+ PERF_PC_DEAD_PRIM = 18,
+ PERF_PC_LIVE_PRIM = 19,
+ PERF_PC_VERTEX_HITS = 20,
+ PERF_PC_IA_VERTICES = 21,
+ PERF_PC_IA_PRIMITIVES = 22,
+ PERF_PC_GS_PRIMITIVES = 23,
+ PERF_PC_HS_INVOCATIONS = 24,
+ PERF_PC_DS_INVOCATIONS = 25,
+ PERF_PC_VS_INVOCATIONS = 26,
+ PERF_PC_GS_INVOCATIONS = 27,
+ PERF_PC_DS_PRIMITIVES = 28,
+ PERF_PC_VPC_POS_DATA_TRANSACTION = 29,
+ PERF_PC_3D_DRAWCALLS = 30,
+ PERF_PC_2D_DRAWCALLS = 31,
+ PERF_PC_NON_DRAWCALL_GLOBAL_EVENTS = 32,
+ PERF_TESS_BUSY_CYCLES = 33,
+ PERF_TESS_WORKING_CYCLES = 34,
+ PERF_TESS_STALL_CYCLES_PC = 35,
+ PERF_TESS_STARVE_CYCLES_PC = 36,
+ PERF_PC_TSE_TRANSACTION = 37,
+ PERF_PC_TSE_VERTEX = 38,
+ PERF_PC_TESS_PC_UV_TRANS = 39,
+ PERF_PC_TESS_PC_UV_PATCHES = 40,
+ PERF_PC_TESS_FACTOR_TRANS = 41,
+};
+
+enum a6xx_vfd_perfcounter_select {
+ PERF_VFD_BUSY_CYCLES = 0,
+ PERF_VFD_STALL_CYCLES_UCHE = 1,
+ PERF_VFD_STALL_CYCLES_VPC_ALLOC = 2,
+ PERF_VFD_STALL_CYCLES_SP_INFO = 3,
+ PERF_VFD_STALL_CYCLES_SP_ATTR = 4,
+ PERF_VFD_STARVE_CYCLES_UCHE = 5,
+ PERF_VFD_RBUFFER_FULL = 6,
+ PERF_VFD_ATTR_INFO_FIFO_FULL = 7,
+ PERF_VFD_DECODED_ATTRIBUTE_BYTES = 8,
+ PERF_VFD_NUM_ATTRIBUTES = 9,
+ PERF_VFD_UPPER_SHADER_FIBERS = 10,
+ PERF_VFD_LOWER_SHADER_FIBERS = 11,
+ PERF_VFD_MODE_0_FIBERS = 12,
+ PERF_VFD_MODE_1_FIBERS = 13,
+ PERF_VFD_MODE_2_FIBERS = 14,
+ PERF_VFD_MODE_3_FIBERS = 15,
+ PERF_VFD_MODE_4_FIBERS = 16,
+ PERF_VFD_TOTAL_VERTICES = 17,
+ PERF_VFDP_STALL_CYCLES_VFD = 18,
+ PERF_VFDP_STALL_CYCLES_VFD_INDEX = 19,
+ PERF_VFDP_STALL_CYCLES_VFD_PROG = 20,
+ PERF_VFDP_STARVE_CYCLES_PC = 21,
+ PERF_VFDP_VS_STAGE_WAVES = 22,
+};
+
+enum a6xx_hslq_perfcounter_select {
+ PERF_HLSQ_BUSY_CYCLES = 0,
+ PERF_HLSQ_STALL_CYCLES_UCHE = 1,
+ PERF_HLSQ_STALL_CYCLES_SP_STATE = 2,
+ PERF_HLSQ_STALL_CYCLES_SP_FS_STAGE = 3,
+ PERF_HLSQ_UCHE_LATENCY_CYCLES = 4,
+ PERF_HLSQ_UCHE_LATENCY_COUNT = 5,
+ PERF_HLSQ_FS_STAGE_1X_WAVES = 6,
+ PERF_HLSQ_FS_STAGE_2X_WAVES = 7,
+ PERF_HLSQ_QUADS = 8,
+ PERF_HLSQ_CS_INVOCATIONS = 9,
+ PERF_HLSQ_COMPUTE_DRAWCALLS = 10,
+ PERF_HLSQ_FS_DATA_WAIT_PROGRAMMING = 11,
+ PERF_HLSQ_DUAL_FS_PROG_ACTIVE = 12,
+ PERF_HLSQ_DUAL_VS_PROG_ACTIVE = 13,
+ PERF_HLSQ_FS_BATCH_COUNT_ZERO = 14,
+ PERF_HLSQ_VS_BATCH_COUNT_ZERO = 15,
+ PERF_HLSQ_WAVE_PENDING_NO_QUAD = 16,
+ PERF_HLSQ_WAVE_PENDING_NO_PRIM_BASE = 17,
+ PERF_HLSQ_STALL_CYCLES_VPC = 18,
+ PERF_HLSQ_PIXELS = 19,
+ PERF_HLSQ_DRAW_MODE_SWITCH_VSFS_SYNC = 20,
+};
+
+enum a6xx_vpc_perfcounter_select {
+ PERF_VPC_BUSY_CYCLES = 0,
+ PERF_VPC_WORKING_CYCLES = 1,
+ PERF_VPC_STALL_CYCLES_UCHE = 2,
+ PERF_VPC_STALL_CYCLES_VFD_WACK = 3,
+ PERF_VPC_STALL_CYCLES_HLSQ_PRIM_ALLOC = 4,
+ PERF_VPC_STALL_CYCLES_PC = 5,
+ PERF_VPC_STALL_CYCLES_SP_LM = 6,
+ PERF_VPC_STARVE_CYCLES_SP = 7,
+ PERF_VPC_STARVE_CYCLES_LRZ = 8,
+ PERF_VPC_PC_PRIMITIVES = 9,
+ PERF_VPC_SP_COMPONENTS = 10,
+ PERF_VPC_STALL_CYCLES_VPCRAM_POS = 11,
+ PERF_VPC_LRZ_ASSIGN_PRIMITIVES = 12,
+ PERF_VPC_RB_VISIBLE_PRIMITIVES = 13,
+ PERF_VPC_LM_TRANSACTION = 14,
+ PERF_VPC_STREAMOUT_TRANSACTION = 15,
+ PERF_VPC_VS_BUSY_CYCLES = 16,
+ PERF_VPC_PS_BUSY_CYCLES = 17,
+ PERF_VPC_VS_WORKING_CYCLES = 18,
+ PERF_VPC_PS_WORKING_CYCLES = 19,
+ PERF_VPC_STARVE_CYCLES_RB = 20,
+ PERF_VPC_NUM_VPCRAM_READ_POS = 21,
+ PERF_VPC_WIT_FULL_CYCLES = 22,
+ PERF_VPC_VPCRAM_FULL_CYCLES = 23,
+ PERF_VPC_LM_FULL_WAIT_FOR_INTP_END = 24,
+ PERF_VPC_NUM_VPCRAM_WRITE = 25,
+ PERF_VPC_NUM_VPCRAM_READ_SO = 26,
+ PERF_VPC_NUM_ATTR_REQ_LM = 27,
+};
+
+enum a6xx_tse_perfcounter_select {
+ PERF_TSE_BUSY_CYCLES = 0,
+ PERF_TSE_CLIPPING_CYCLES = 1,
+ PERF_TSE_STALL_CYCLES_RAS = 2,
+ PERF_TSE_STALL_CYCLES_LRZ_BARYPLANE = 3,
+ PERF_TSE_STALL_CYCLES_LRZ_ZPLANE = 4,
+ PERF_TSE_STARVE_CYCLES_PC = 5,
+ PERF_TSE_INPUT_PRIM = 6,
+ PERF_TSE_INPUT_NULL_PRIM = 7,
+ PERF_TSE_TRIVAL_REJ_PRIM = 8,
+ PERF_TSE_CLIPPED_PRIM = 9,
+ PERF_TSE_ZERO_AREA_PRIM = 10,
+ PERF_TSE_FACENESS_CULLED_PRIM = 11,
+ PERF_TSE_ZERO_PIXEL_PRIM = 12,
+ PERF_TSE_OUTPUT_NULL_PRIM = 13,
+ PERF_TSE_OUTPUT_VISIBLE_PRIM = 14,
+ PERF_TSE_CINVOCATION = 15,
+ PERF_TSE_CPRIMITIVES = 16,
+ PERF_TSE_2D_INPUT_PRIM = 17,
+ PERF_TSE_2D_ALIVE_CYCLES = 18,
+ PERF_TSE_CLIP_PLANES = 19,
+};
+
+enum a6xx_ras_perfcounter_select {
+ PERF_RAS_BUSY_CYCLES = 0,
+ PERF_RAS_SUPERTILE_ACTIVE_CYCLES = 1,
+ PERF_RAS_STALL_CYCLES_LRZ = 2,
+ PERF_RAS_STARVE_CYCLES_TSE = 3,
+ PERF_RAS_SUPER_TILES = 4,
+ PERF_RAS_8X4_TILES = 5,
+ PERF_RAS_MASKGEN_ACTIVE = 6,
+ PERF_RAS_FULLY_COVERED_SUPER_TILES = 7,
+ PERF_RAS_FULLY_COVERED_8X4_TILES = 8,
+ PERF_RAS_PRIM_KILLED_INVISILBE = 9,
+ PERF_RAS_SUPERTILE_GEN_ACTIVE_CYCLES = 10,
+ PERF_RAS_LRZ_INTF_WORKING_CYCLES = 11,
+ PERF_RAS_BLOCKS = 12,
+};
+
+enum a6xx_uche_perfcounter_select {
+ PERF_UCHE_BUSY_CYCLES = 0,
+ PERF_UCHE_STALL_CYCLES_ARBITER = 1,
+ PERF_UCHE_VBIF_LATENCY_CYCLES = 2,
+ PERF_UCHE_VBIF_LATENCY_SAMPLES = 3,
+ PERF_UCHE_VBIF_READ_BEATS_TP = 4,
+ PERF_UCHE_VBIF_READ_BEATS_VFD = 5,
+ PERF_UCHE_VBIF_READ_BEATS_HLSQ = 6,
+ PERF_UCHE_VBIF_READ_BEATS_LRZ = 7,
+ PERF_UCHE_VBIF_READ_BEATS_SP = 8,
+ PERF_UCHE_READ_REQUESTS_TP = 9,
+ PERF_UCHE_READ_REQUESTS_VFD = 10,
+ PERF_UCHE_READ_REQUESTS_HLSQ = 11,
+ PERF_UCHE_READ_REQUESTS_LRZ = 12,
+ PERF_UCHE_READ_REQUESTS_SP = 13,
+ PERF_UCHE_WRITE_REQUESTS_LRZ = 14,
+ PERF_UCHE_WRITE_REQUESTS_SP = 15,
+ PERF_UCHE_WRITE_REQUESTS_VPC = 16,
+ PERF_UCHE_WRITE_REQUESTS_VSC = 17,
+ PERF_UCHE_EVICTS = 18,
+ PERF_UCHE_BANK_REQ0 = 19,
+ PERF_UCHE_BANK_REQ1 = 20,
+ PERF_UCHE_BANK_REQ2 = 21,
+ PERF_UCHE_BANK_REQ3 = 22,
+ PERF_UCHE_BANK_REQ4 = 23,
+ PERF_UCHE_BANK_REQ5 = 24,
+ PERF_UCHE_BANK_REQ6 = 25,
+ PERF_UCHE_BANK_REQ7 = 26,
+ PERF_UCHE_VBIF_READ_BEATS_CH0 = 27,
+ PERF_UCHE_VBIF_READ_BEATS_CH1 = 28,
+ PERF_UCHE_GMEM_READ_BEATS = 29,
+ PERF_UCHE_TPH_REF_FULL = 30,
+ PERF_UCHE_TPH_VICTIM_FULL = 31,
+ PERF_UCHE_TPH_EXT_FULL = 32,
+ PERF_UCHE_VBIF_STALL_WRITE_DATA = 33,
+ PERF_UCHE_DCMP_LATENCY_SAMPLES = 34,
+ PERF_UCHE_DCMP_LATENCY_CYCLES = 35,
+ PERF_UCHE_VBIF_READ_BEATS_PC = 36,
+ PERF_UCHE_READ_REQUESTS_PC = 37,
+ PERF_UCHE_RAM_READ_REQ = 38,
+ PERF_UCHE_RAM_WRITE_REQ = 39,
+};
+
+enum a6xx_tp_perfcounter_select {
+ PERF_TP_BUSY_CYCLES = 0,
+ PERF_TP_STALL_CYCLES_UCHE = 1,
+ PERF_TP_LATENCY_CYCLES = 2,
+ PERF_TP_LATENCY_TRANS = 3,
+ PERF_TP_FLAG_CACHE_REQUEST_SAMPLES = 4,
+ PERF_TP_FLAG_CACHE_REQUEST_LATENCY = 5,
+ PERF_TP_L1_CACHELINE_REQUESTS = 6,
+ PERF_TP_L1_CACHELINE_MISSES = 7,
+ PERF_TP_SP_TP_TRANS = 8,
+ PERF_TP_TP_SP_TRANS = 9,
+ PERF_TP_OUTPUT_PIXELS = 10,
+ PERF_TP_FILTER_WORKLOAD_16BIT = 11,
+ PERF_TP_FILTER_WORKLOAD_32BIT = 12,
+ PERF_TP_QUADS_RECEIVED = 13,
+ PERF_TP_QUADS_OFFSET = 14,
+ PERF_TP_QUADS_SHADOW = 15,
+ PERF_TP_QUADS_ARRAY = 16,
+ PERF_TP_QUADS_GRADIENT = 17,
+ PERF_TP_QUADS_1D = 18,
+ PERF_TP_QUADS_2D = 19,
+ PERF_TP_QUADS_BUFFER = 20,
+ PERF_TP_QUADS_3D = 21,
+ PERF_TP_QUADS_CUBE = 22,
+ PERF_TP_DIVERGENT_QUADS_RECEIVED = 23,
+ PERF_TP_PRT_NON_RESIDENT_EVENTS = 24,
+ PERF_TP_OUTPUT_PIXELS_POINT = 25,
+ PERF_TP_OUTPUT_PIXELS_BILINEAR = 26,
+ PERF_TP_OUTPUT_PIXELS_MIP = 27,
+ PERF_TP_OUTPUT_PIXELS_ANISO = 28,
+ PERF_TP_OUTPUT_PIXELS_ZERO_LOD = 29,
+ PERF_TP_FLAG_CACHE_REQUESTS = 30,
+ PERF_TP_FLAG_CACHE_MISSES = 31,
+ PERF_TP_L1_5_L2_REQUESTS = 32,
+ PERF_TP_2D_OUTPUT_PIXELS = 33,
+ PERF_TP_2D_OUTPUT_PIXELS_POINT = 34,
+ PERF_TP_2D_OUTPUT_PIXELS_BILINEAR = 35,
+ PERF_TP_2D_FILTER_WORKLOAD_16BIT = 36,
+ PERF_TP_2D_FILTER_WORKLOAD_32BIT = 37,
+ PERF_TP_TPA2TPC_TRANS = 38,
+ PERF_TP_L1_MISSES_ASTC_1TILE = 39,
+ PERF_TP_L1_MISSES_ASTC_2TILE = 40,
+ PERF_TP_L1_MISSES_ASTC_4TILE = 41,
+ PERF_TP_L1_5_L2_COMPRESS_REQS = 42,
+ PERF_TP_L1_5_L2_COMPRESS_MISS = 43,
+ PERF_TP_L1_BANK_CONFLICT = 44,
+ PERF_TP_L1_5_MISS_LATENCY_CYCLES = 45,
+ PERF_TP_L1_5_MISS_LATENCY_TRANS = 46,
+ PERF_TP_QUADS_CONSTANT_MULTIPLIED = 47,
+ PERF_TP_FRONTEND_WORKING_CYCLES = 48,
+ PERF_TP_L1_TAG_WORKING_CYCLES = 49,
+ PERF_TP_L1_DATA_WRITE_WORKING_CYCLES = 50,
+ PERF_TP_PRE_L1_DECOM_WORKING_CYCLES = 51,
+ PERF_TP_BACKEND_WORKING_CYCLES = 52,
+ PERF_TP_FLAG_CACHE_WORKING_CYCLES = 53,
+ PERF_TP_L1_5_CACHE_WORKING_CYCLES = 54,
+ PERF_TP_STARVE_CYCLES_SP = 55,
+ PERF_TP_STARVE_CYCLES_UCHE = 56,
+};
+
+enum a6xx_sp_perfcounter_select {
+ PERF_SP_BUSY_CYCLES = 0,
+ PERF_SP_ALU_WORKING_CYCLES = 1,
+ PERF_SP_EFU_WORKING_CYCLES = 2,
+ PERF_SP_STALL_CYCLES_VPC = 3,
+ PERF_SP_STALL_CYCLES_TP = 4,
+ PERF_SP_STALL_CYCLES_UCHE = 5,
+ PERF_SP_STALL_CYCLES_RB = 6,
+ PERF_SP_NON_EXECUTION_CYCLES = 7,
+ PERF_SP_WAVE_CONTEXTS = 8,
+ PERF_SP_WAVE_CONTEXT_CYCLES = 9,
+ PERF_SP_FS_STAGE_WAVE_CYCLES = 10,
+ PERF_SP_FS_STAGE_WAVE_SAMPLES = 11,
+ PERF_SP_VS_STAGE_WAVE_CYCLES = 12,
+ PERF_SP_VS_STAGE_WAVE_SAMPLES = 13,
+ PERF_SP_FS_STAGE_DURATION_CYCLES = 14,
+ PERF_SP_VS_STAGE_DURATION_CYCLES = 15,
+ PERF_SP_WAVE_CTRL_CYCLES = 16,
+ PERF_SP_WAVE_LOAD_CYCLES = 17,
+ PERF_SP_WAVE_EMIT_CYCLES = 18,
+ PERF_SP_WAVE_NOP_CYCLES = 19,
+ PERF_SP_WAVE_WAIT_CYCLES = 20,
+ PERF_SP_WAVE_FETCH_CYCLES = 21,
+ PERF_SP_WAVE_IDLE_CYCLES = 22,
+ PERF_SP_WAVE_END_CYCLES = 23,
+ PERF_SP_WAVE_LONG_SYNC_CYCLES = 24,
+ PERF_SP_WAVE_SHORT_SYNC_CYCLES = 25,
+ PERF_SP_WAVE_JOIN_CYCLES = 26,
+ PERF_SP_LM_LOAD_INSTRUCTIONS = 27,
+ PERF_SP_LM_STORE_INSTRUCTIONS = 28,
+ PERF_SP_LM_ATOMICS = 29,
+ PERF_SP_GM_LOAD_INSTRUCTIONS = 30,
+ PERF_SP_GM_STORE_INSTRUCTIONS = 31,
+ PERF_SP_GM_ATOMICS = 32,
+ PERF_SP_VS_STAGE_TEX_INSTRUCTIONS = 33,
+ PERF_SP_VS_STAGE_EFU_INSTRUCTIONS = 34,
+ PERF_SP_VS_STAGE_FULL_ALU_INSTRUCTIONS = 35,
+ PERF_SP_VS_STAGE_HALF_ALU_INSTRUCTIONS = 36,
+ PERF_SP_FS_STAGE_TEX_INSTRUCTIONS = 37,
+ PERF_SP_FS_STAGE_CFLOW_INSTRUCTIONS = 38,
+ PERF_SP_FS_STAGE_EFU_INSTRUCTIONS = 39,
+ PERF_SP_FS_STAGE_FULL_ALU_INSTRUCTIONS = 40,
+ PERF_SP_FS_STAGE_HALF_ALU_INSTRUCTIONS = 41,
+ PERF_SP_FS_STAGE_BARY_INSTRUCTIONS = 42,
+ PERF_SP_VS_INSTRUCTIONS = 43,
+ PERF_SP_FS_INSTRUCTIONS = 44,
+ PERF_SP_ADDR_LOCK_COUNT = 45,
+ PERF_SP_UCHE_READ_TRANS = 46,
+ PERF_SP_UCHE_WRITE_TRANS = 47,
+ PERF_SP_EXPORT_VPC_TRANS = 48,
+ PERF_SP_EXPORT_RB_TRANS = 49,
+ PERF_SP_PIXELS_KILLED = 50,
+ PERF_SP_ICL1_REQUESTS = 51,
+ PERF_SP_ICL1_MISSES = 52,
+ PERF_SP_HS_INSTRUCTIONS = 53,
+ PERF_SP_DS_INSTRUCTIONS = 54,
+ PERF_SP_GS_INSTRUCTIONS = 55,
+ PERF_SP_CS_INSTRUCTIONS = 56,
+ PERF_SP_GPR_READ = 57,
+ PERF_SP_GPR_WRITE = 58,
+ PERF_SP_FS_STAGE_HALF_EFU_INSTRUCTIONS = 59,
+ PERF_SP_VS_STAGE_HALF_EFU_INSTRUCTIONS = 60,
+ PERF_SP_LM_BANK_CONFLICTS = 61,
+ PERF_SP_TEX_CONTROL_WORKING_CYCLES = 62,
+ PERF_SP_LOAD_CONTROL_WORKING_CYCLES = 63,
+ PERF_SP_FLOW_CONTROL_WORKING_CYCLES = 64,
+ PERF_SP_LM_WORKING_CYCLES = 65,
+ PERF_SP_DISPATCHER_WORKING_CYCLES = 66,
+ PERF_SP_SEQUENCER_WORKING_CYCLES = 67,
+ PERF_SP_LOW_EFFICIENCY_STARVED_BY_TP = 68,
+ PERF_SP_STARVE_CYCLES_HLSQ = 69,
+ PERF_SP_NON_EXECUTION_LS_CYCLES = 70,
+ PERF_SP_WORKING_EU = 71,
+ PERF_SP_ANY_EU_WORKING = 72,
+ PERF_SP_WORKING_EU_FS_STAGE = 73,
+ PERF_SP_ANY_EU_WORKING_FS_STAGE = 74,
+ PERF_SP_WORKING_EU_VS_STAGE = 75,
+ PERF_SP_ANY_EU_WORKING_VS_STAGE = 76,
+ PERF_SP_WORKING_EU_CS_STAGE = 77,
+ PERF_SP_ANY_EU_WORKING_CS_STAGE = 78,
+ PERF_SP_GPR_READ_PREFETCH = 79,
+ PERF_SP_GPR_READ_CONFLICT = 80,
+ PERF_SP_GPR_WRITE_CONFLICT = 81,
+ PERF_SP_GM_LOAD_LATENCY_CYCLES = 82,
+ PERF_SP_GM_LOAD_LATENCY_SAMPLES = 83,
+ PERF_SP_EXECUTABLE_WAVES = 84,
+};
+
+enum a6xx_rb_perfcounter_select {
+ PERF_RB_BUSY_CYCLES = 0,
+ PERF_RB_STALL_CYCLES_HLSQ = 1,
+ PERF_RB_STALL_CYCLES_FIFO0_FULL = 2,
+ PERF_RB_STALL_CYCLES_FIFO1_FULL = 3,
+ PERF_RB_STALL_CYCLES_FIFO2_FULL = 4,
+ PERF_RB_STARVE_CYCLES_SP = 5,
+ PERF_RB_STARVE_CYCLES_LRZ_TILE = 6,
+ PERF_RB_STARVE_CYCLES_CCU = 7,
+ PERF_RB_STARVE_CYCLES_Z_PLANE = 8,
+ PERF_RB_STARVE_CYCLES_BARY_PLANE = 9,
+ PERF_RB_Z_WORKLOAD = 10,
+ PERF_RB_HLSQ_ACTIVE = 11,
+ PERF_RB_Z_READ = 12,
+ PERF_RB_Z_WRITE = 13,
+ PERF_RB_C_READ = 14,
+ PERF_RB_C_WRITE = 15,
+ PERF_RB_TOTAL_PASS = 16,
+ PERF_RB_Z_PASS = 17,
+ PERF_RB_Z_FAIL = 18,
+ PERF_RB_S_FAIL = 19,
+ PERF_RB_BLENDED_FXP_COMPONENTS = 20,
+ PERF_RB_BLENDED_FP16_COMPONENTS = 21,
+ PERF_RB_PS_INVOCATIONS = 22,
+ PERF_RB_2D_ALIVE_CYCLES = 23,
+ PERF_RB_2D_STALL_CYCLES_A2D = 24,
+ PERF_RB_2D_STARVE_CYCLES_SRC = 25,
+ PERF_RB_2D_STARVE_CYCLES_SP = 26,
+ PERF_RB_2D_STARVE_CYCLES_DST = 27,
+ PERF_RB_2D_VALID_PIXELS = 28,
+ PERF_RB_3D_PIXELS = 29,
+ PERF_RB_BLENDER_WORKING_CYCLES = 30,
+ PERF_RB_ZPROC_WORKING_CYCLES = 31,
+ PERF_RB_CPROC_WORKING_CYCLES = 32,
+ PERF_RB_SAMPLER_WORKING_CYCLES = 33,
+ PERF_RB_STALL_CYCLES_CCU_COLOR_READ = 34,
+ PERF_RB_STALL_CYCLES_CCU_COLOR_WRITE = 35,
+ PERF_RB_STALL_CYCLES_CCU_DEPTH_READ = 36,
+ PERF_RB_STALL_CYCLES_CCU_DEPTH_WRITE = 37,
+ PERF_RB_STALL_CYCLES_VPC = 38,
+ PERF_RB_2D_INPUT_TRANS = 39,
+ PERF_RB_2D_OUTPUT_RB_DST_TRANS = 40,
+ PERF_RB_2D_OUTPUT_RB_SRC_TRANS = 41,
+ PERF_RB_BLENDED_FP32_COMPONENTS = 42,
+ PERF_RB_COLOR_PIX_TILES = 43,
+ PERF_RB_STALL_CYCLES_CCU = 44,
+ PERF_RB_EARLY_Z_ARB3_GRANT = 45,
+ PERF_RB_LATE_Z_ARB3_GRANT = 46,
+ PERF_RB_EARLY_Z_SKIP_GRANT = 47,
+};
+
+enum a6xx_vsc_perfcounter_select {
+ PERF_VSC_BUSY_CYCLES = 0,
+ PERF_VSC_WORKING_CYCLES = 1,
+ PERF_VSC_STALL_CYCLES_UCHE = 2,
+ PERF_VSC_EOT_NUM = 3,
+ PERF_VSC_INPUT_TILES = 4,
+};
+
+enum a6xx_ccu_perfcounter_select {
+ PERF_CCU_BUSY_CYCLES = 0,
+ PERF_CCU_STALL_CYCLES_RB_DEPTH_RETURN = 1,
+ PERF_CCU_STALL_CYCLES_RB_COLOR_RETURN = 2,
+ PERF_CCU_STARVE_CYCLES_FLAG_RETURN = 3,
+ PERF_CCU_DEPTH_BLOCKS = 4,
+ PERF_CCU_COLOR_BLOCKS = 5,
+ PERF_CCU_DEPTH_BLOCK_HIT = 6,
+ PERF_CCU_COLOR_BLOCK_HIT = 7,
+ PERF_CCU_PARTIAL_BLOCK_READ = 8,
+ PERF_CCU_GMEM_READ = 9,
+ PERF_CCU_GMEM_WRITE = 10,
+ PERF_CCU_DEPTH_READ_FLAG0_COUNT = 11,
+ PERF_CCU_DEPTH_READ_FLAG1_COUNT = 12,
+ PERF_CCU_DEPTH_READ_FLAG2_COUNT = 13,
+ PERF_CCU_DEPTH_READ_FLAG3_COUNT = 14,
+ PERF_CCU_DEPTH_READ_FLAG4_COUNT = 15,
+ PERF_CCU_DEPTH_READ_FLAG5_COUNT = 16,
+ PERF_CCU_DEPTH_READ_FLAG6_COUNT = 17,
+ PERF_CCU_DEPTH_READ_FLAG8_COUNT = 18,
+ PERF_CCU_COLOR_READ_FLAG0_COUNT = 19,
+ PERF_CCU_COLOR_READ_FLAG1_COUNT = 20,
+ PERF_CCU_COLOR_READ_FLAG2_COUNT = 21,
+ PERF_CCU_COLOR_READ_FLAG3_COUNT = 22,
+ PERF_CCU_COLOR_READ_FLAG4_COUNT = 23,
+ PERF_CCU_COLOR_READ_FLAG5_COUNT = 24,
+ PERF_CCU_COLOR_READ_FLAG6_COUNT = 25,
+ PERF_CCU_COLOR_READ_FLAG8_COUNT = 26,
+ PERF_CCU_2D_RD_REQ = 27,
+ PERF_CCU_2D_WR_REQ = 28,
+};
+
+enum a6xx_lrz_perfcounter_select {
+ PERF_LRZ_BUSY_CYCLES = 0,
+ PERF_LRZ_STARVE_CYCLES_RAS = 1,
+ PERF_LRZ_STALL_CYCLES_RB = 2,
+ PERF_LRZ_STALL_CYCLES_VSC = 3,
+ PERF_LRZ_STALL_CYCLES_VPC = 4,
+ PERF_LRZ_STALL_CYCLES_FLAG_PREFETCH = 5,
+ PERF_LRZ_STALL_CYCLES_UCHE = 6,
+ PERF_LRZ_LRZ_READ = 7,
+ PERF_LRZ_LRZ_WRITE = 8,
+ PERF_LRZ_READ_LATENCY = 9,
+ PERF_LRZ_MERGE_CACHE_UPDATING = 10,
+ PERF_LRZ_PRIM_KILLED_BY_MASKGEN = 11,
+ PERF_LRZ_PRIM_KILLED_BY_LRZ = 12,
+ PERF_LRZ_VISIBLE_PRIM_AFTER_LRZ = 13,
+ PERF_LRZ_FULL_8X8_TILES = 14,
+ PERF_LRZ_PARTIAL_8X8_TILES = 15,
+ PERF_LRZ_TILE_KILLED = 16,
+ PERF_LRZ_TOTAL_PIXEL = 17,
+ PERF_LRZ_VISIBLE_PIXEL_AFTER_LRZ = 18,
+ PERF_LRZ_FULLY_COVERED_TILES = 19,
+ PERF_LRZ_PARTIAL_COVERED_TILES = 20,
+ PERF_LRZ_FEEDBACK_ACCEPT = 21,
+ PERF_LRZ_FEEDBACK_DISCARD = 22,
+ PERF_LRZ_FEEDBACK_STALL = 23,
+ PERF_LRZ_STALL_CYCLES_RB_ZPLANE = 24,
+ PERF_LRZ_STALL_CYCLES_RB_BPLANE = 25,
+ PERF_LRZ_STALL_CYCLES_VC = 26,
+ PERF_LRZ_RAS_MASK_TRANS = 27,
+};
+
+enum a6xx_cmp_perfcounter_select {
+ PERF_CMPDECMP_STALL_CYCLES_ARB = 0,
+ PERF_CMPDECMP_VBIF_LATENCY_CYCLES = 1,
+ PERF_CMPDECMP_VBIF_LATENCY_SAMPLES = 2,
+ PERF_CMPDECMP_VBIF_READ_DATA_CCU = 3,
+ PERF_CMPDECMP_VBIF_WRITE_DATA_CCU = 4,
+ PERF_CMPDECMP_VBIF_READ_REQUEST = 5,
+ PERF_CMPDECMP_VBIF_WRITE_REQUEST = 6,
+ PERF_CMPDECMP_VBIF_READ_DATA = 7,
+ PERF_CMPDECMP_VBIF_WRITE_DATA = 8,
+ PERF_CMPDECMP_FLAG_FETCH_CYCLES = 9,
+ PERF_CMPDECMP_FLAG_FETCH_SAMPLES = 10,
+ PERF_CMPDECMP_DEPTH_WRITE_FLAG1_COUNT = 11,
+ PERF_CMPDECMP_DEPTH_WRITE_FLAG2_COUNT = 12,
+ PERF_CMPDECMP_DEPTH_WRITE_FLAG3_COUNT = 13,
+ PERF_CMPDECMP_DEPTH_WRITE_FLAG4_COUNT = 14,
+ PERF_CMPDECMP_DEPTH_WRITE_FLAG5_COUNT = 15,
+ PERF_CMPDECMP_DEPTH_WRITE_FLAG6_COUNT = 16,
+ PERF_CMPDECMP_DEPTH_WRITE_FLAG8_COUNT = 17,
+ PERF_CMPDECMP_COLOR_WRITE_FLAG1_COUNT = 18,
+ PERF_CMPDECMP_COLOR_WRITE_FLAG2_COUNT = 19,
+ PERF_CMPDECMP_COLOR_WRITE_FLAG3_COUNT = 20,
+ PERF_CMPDECMP_COLOR_WRITE_FLAG4_COUNT = 21,
+ PERF_CMPDECMP_COLOR_WRITE_FLAG5_COUNT = 22,
+ PERF_CMPDECMP_COLOR_WRITE_FLAG6_COUNT = 23,
+ PERF_CMPDECMP_COLOR_WRITE_FLAG8_COUNT = 24,
+ PERF_CMPDECMP_2D_STALL_CYCLES_VBIF_REQ = 25,
+ PERF_CMPDECMP_2D_STALL_CYCLES_VBIF_WR = 26,
+ PERF_CMPDECMP_2D_STALL_CYCLES_VBIF_RETURN = 27,
+ PERF_CMPDECMP_2D_RD_DATA = 28,
+ PERF_CMPDECMP_2D_WR_DATA = 29,
+ PERF_CMPDECMP_VBIF_READ_DATA_UCHE_CH0 = 30,
+ PERF_CMPDECMP_VBIF_READ_DATA_UCHE_CH1 = 31,
+ PERF_CMPDECMP_2D_OUTPUT_TRANS = 32,
+ PERF_CMPDECMP_VBIF_WRITE_DATA_UCHE = 33,
+ PERF_CMPDECMP_DEPTH_WRITE_FLAG0_COUNT = 34,
+ PERF_CMPDECMP_COLOR_WRITE_FLAG0_COUNT = 35,
+ PERF_CMPDECMP_COLOR_WRITE_FLAGALPHA_COUNT = 36,
+ PERF_CMPDECMP_2D_BUSY_CYCLES = 37,
+ PERF_CMPDECMP_2D_REORDER_STARVE_CYCLES = 38,
+ PERF_CMPDECMP_2D_PIXELS = 39,
};
enum a6xx_tex_filter {
@@ -1765,12 +2444,39 @@ static inline uint32_t A6XX_UCHE_CLIENT_PF_PERFSEL(uint32_t val)
#define REG_A6XX_VBIF_VERSION 0x00003000
+#define REG_A6XX_VBIF_CLKON 0x00003001
+#define A6XX_VBIF_CLKON_FORCE_ON_TESTBUS 0x00000002
+
#define REG_A6XX_VBIF_GATE_OFF_WRREQ_EN 0x0000302a
#define REG_A6XX_VBIF_XIN_HALT_CTRL0 0x00003080
#define REG_A6XX_VBIF_XIN_HALT_CTRL1 0x00003081
+#define REG_A6XX_VBIF_TEST_BUS_OUT_CTRL 0x00003084
+
+#define REG_A6XX_VBIF_TEST_BUS1_CTRL0 0x00003085
+
+#define REG_A6XX_VBIF_TEST_BUS1_CTRL1 0x00003086
+#define A6XX_VBIF_TEST_BUS1_CTRL1_DATA_SEL__MASK 0x0000000f
+#define A6XX_VBIF_TEST_BUS1_CTRL1_DATA_SEL__SHIFT 0
+static inline uint32_t A6XX_VBIF_TEST_BUS1_CTRL1_DATA_SEL(uint32_t val)
+{
+ return ((val) << A6XX_VBIF_TEST_BUS1_CTRL1_DATA_SEL__SHIFT) & A6XX_VBIF_TEST_BUS1_CTRL1_DATA_SEL__MASK;
+}
+
+#define REG_A6XX_VBIF_TEST_BUS2_CTRL0 0x00003087
+
+#define REG_A6XX_VBIF_TEST_BUS2_CTRL1 0x00003088
+#define A6XX_VBIF_TEST_BUS2_CTRL1_DATA_SEL__MASK 0x000001ff
+#define A6XX_VBIF_TEST_BUS2_CTRL1_DATA_SEL__SHIFT 0
+static inline uint32_t A6XX_VBIF_TEST_BUS2_CTRL1_DATA_SEL(uint32_t val)
+{
+ return ((val) << A6XX_VBIF_TEST_BUS2_CTRL1_DATA_SEL__SHIFT) & A6XX_VBIF_TEST_BUS2_CTRL1_DATA_SEL__MASK;
+}
+
+#define REG_A6XX_VBIF_TEST_BUS_OUT 0x0000308c
+
#define REG_A6XX_VBIF_PERF_CNT_SEL0 0x000030d0
#define REG_A6XX_VBIF_PERF_CNT_SEL1 0x000030d1
@@ -1813,313 +2519,79 @@ static inline uint32_t A6XX_UCHE_CLIENT_PF_PERFSEL(uint32_t val)
#define REG_A6XX_VBIF_PERF_PWR_CNT_HIGH2 0x0000311a
-#define REG_A6XX_CX_DBGC_CFG_DBGBUS_SEL_A 0x00018400
-
-#define REG_A6XX_CX_DBGC_CFG_DBGBUS_SEL_B 0x00018401
-
-#define REG_A6XX_CX_DBGC_CFG_DBGBUS_SEL_C 0x00018402
-
-#define REG_A6XX_CX_DBGC_CFG_DBGBUS_SEL_D 0x00018403
-#define A6XX_CX_DBGC_CFG_DBGBUS_SEL_D_PING_INDEX__MASK 0x000000ff
-#define A6XX_CX_DBGC_CFG_DBGBUS_SEL_D_PING_INDEX__SHIFT 0
-static inline uint32_t A6XX_CX_DBGC_CFG_DBGBUS_SEL_D_PING_INDEX(uint32_t val)
-{
- return ((val) << A6XX_CX_DBGC_CFG_DBGBUS_SEL_D_PING_INDEX__SHIFT) & A6XX_CX_DBGC_CFG_DBGBUS_SEL_D_PING_INDEX__MASK;
-}
-#define A6XX_CX_DBGC_CFG_DBGBUS_SEL_D_PING_BLK_SEL__MASK 0x0000ff00
-#define A6XX_CX_DBGC_CFG_DBGBUS_SEL_D_PING_BLK_SEL__SHIFT 8
-static inline uint32_t A6XX_CX_DBGC_CFG_DBGBUS_SEL_D_PING_BLK_SEL(uint32_t val)
-{
- return ((val) << A6XX_CX_DBGC_CFG_DBGBUS_SEL_D_PING_BLK_SEL__SHIFT) & A6XX_CX_DBGC_CFG_DBGBUS_SEL_D_PING_BLK_SEL__MASK;
-}
-
-#define REG_A6XX_CX_DBGC_CFG_DBGBUS_CNTLT 0x00018404
-#define A6XX_CX_DBGC_CFG_DBGBUS_CNTLT_TRACEEN__MASK 0x0000003f
-#define A6XX_CX_DBGC_CFG_DBGBUS_CNTLT_TRACEEN__SHIFT 0
-static inline uint32_t A6XX_CX_DBGC_CFG_DBGBUS_CNTLT_TRACEEN(uint32_t val)
-{
- return ((val) << A6XX_CX_DBGC_CFG_DBGBUS_CNTLT_TRACEEN__SHIFT) & A6XX_CX_DBGC_CFG_DBGBUS_CNTLT_TRACEEN__MASK;
-}
-#define A6XX_CX_DBGC_CFG_DBGBUS_CNTLT_GRANU__MASK 0x00007000
-#define A6XX_CX_DBGC_CFG_DBGBUS_CNTLT_GRANU__SHIFT 12
-static inline uint32_t A6XX_CX_DBGC_CFG_DBGBUS_CNTLT_GRANU(uint32_t val)
-{
- return ((val) << A6XX_CX_DBGC_CFG_DBGBUS_CNTLT_GRANU__SHIFT) & A6XX_CX_DBGC_CFG_DBGBUS_CNTLT_GRANU__MASK;
-}
-#define A6XX_CX_DBGC_CFG_DBGBUS_CNTLT_SEGT__MASK 0xf0000000
-#define A6XX_CX_DBGC_CFG_DBGBUS_CNTLT_SEGT__SHIFT 28
-static inline uint32_t A6XX_CX_DBGC_CFG_DBGBUS_CNTLT_SEGT(uint32_t val)
-{
- return ((val) << A6XX_CX_DBGC_CFG_DBGBUS_CNTLT_SEGT__SHIFT) & A6XX_CX_DBGC_CFG_DBGBUS_CNTLT_SEGT__MASK;
-}
-
-#define REG_A6XX_CX_DBGC_CFG_DBGBUS_CNTLM 0x00018405
-#define A6XX_CX_DBGC_CFG_DBGBUS_CNTLM_ENABLE__MASK 0x0f000000
-#define A6XX_CX_DBGC_CFG_DBGBUS_CNTLM_ENABLE__SHIFT 24
-static inline uint32_t A6XX_CX_DBGC_CFG_DBGBUS_CNTLM_ENABLE(uint32_t val)
-{
- return ((val) << A6XX_CX_DBGC_CFG_DBGBUS_CNTLM_ENABLE__SHIFT) & A6XX_CX_DBGC_CFG_DBGBUS_CNTLM_ENABLE__MASK;
-}
-
-#define REG_A6XX_CX_DBGC_CFG_DBGBUS_IVTL_0 0x00018408
-
-#define REG_A6XX_CX_DBGC_CFG_DBGBUS_IVTL_1 0x00018409
-
-#define REG_A6XX_CX_DBGC_CFG_DBGBUS_IVTL_2 0x0001840a
-
-#define REG_A6XX_CX_DBGC_CFG_DBGBUS_IVTL_3 0x0001840b
-
-#define REG_A6XX_CX_DBGC_CFG_DBGBUS_MASKL_0 0x0001840c
-
-#define REG_A6XX_CX_DBGC_CFG_DBGBUS_MASKL_1 0x0001840d
-
-#define REG_A6XX_CX_DBGC_CFG_DBGBUS_MASKL_2 0x0001840e
-
-#define REG_A6XX_CX_DBGC_CFG_DBGBUS_MASKL_3 0x0001840f
-
-#define REG_A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_0 0x00018410
-#define A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL0__MASK 0x0000000f
-#define A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL0__SHIFT 0
-static inline uint32_t A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL0(uint32_t val)
-{
- return ((val) << A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL0__SHIFT) & A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL0__MASK;
-}
-#define A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL1__MASK 0x000000f0
-#define A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL1__SHIFT 4
-static inline uint32_t A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL1(uint32_t val)
-{
- return ((val) << A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL1__SHIFT) & A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL1__MASK;
-}
-#define A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL2__MASK 0x00000f00
-#define A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL2__SHIFT 8
-static inline uint32_t A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL2(uint32_t val)
-{
- return ((val) << A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL2__SHIFT) & A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL2__MASK;
-}
-#define A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL3__MASK 0x0000f000
-#define A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL3__SHIFT 12
-static inline uint32_t A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL3(uint32_t val)
-{
- return ((val) << A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL3__SHIFT) & A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL3__MASK;
-}
-#define A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL4__MASK 0x000f0000
-#define A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL4__SHIFT 16
-static inline uint32_t A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL4(uint32_t val)
-{
- return ((val) << A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL4__SHIFT) & A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL4__MASK;
-}
-#define A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL5__MASK 0x00f00000
-#define A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL5__SHIFT 20
-static inline uint32_t A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL5(uint32_t val)
-{
- return ((val) << A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL5__SHIFT) & A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL5__MASK;
-}
-#define A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL6__MASK 0x0f000000
-#define A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL6__SHIFT 24
-static inline uint32_t A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL6(uint32_t val)
-{
- return ((val) << A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL6__SHIFT) & A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL6__MASK;
-}
-#define A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL7__MASK 0xf0000000
-#define A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL7__SHIFT 28
-static inline uint32_t A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL7(uint32_t val)
-{
- return ((val) << A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL7__SHIFT) & A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL7__MASK;
-}
-
-#define REG_A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_1 0x00018411
-#define A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL8__MASK 0x0000000f
-#define A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL8__SHIFT 0
-static inline uint32_t A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL8(uint32_t val)
-{
- return ((val) << A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL8__SHIFT) & A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL8__MASK;
-}
-#define A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL9__MASK 0x000000f0
-#define A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL9__SHIFT 4
-static inline uint32_t A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL9(uint32_t val)
-{
- return ((val) << A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL9__SHIFT) & A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL9__MASK;
-}
-#define A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL10__MASK 0x00000f00
-#define A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL10__SHIFT 8
-static inline uint32_t A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL10(uint32_t val)
-{
- return ((val) << A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL10__SHIFT) & A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL10__MASK;
-}
-#define A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL11__MASK 0x0000f000
-#define A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL11__SHIFT 12
-static inline uint32_t A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL11(uint32_t val)
-{
- return ((val) << A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL11__SHIFT) & A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL11__MASK;
-}
-#define A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL12__MASK 0x000f0000
-#define A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL12__SHIFT 16
-static inline uint32_t A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL12(uint32_t val)
-{
- return ((val) << A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL12__SHIFT) & A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL12__MASK;
-}
-#define A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL13__MASK 0x00f00000
-#define A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL13__SHIFT 20
-static inline uint32_t A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL13(uint32_t val)
-{
- return ((val) << A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL13__SHIFT) & A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL13__MASK;
-}
-#define A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL14__MASK 0x0f000000
-#define A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL14__SHIFT 24
-static inline uint32_t A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL14(uint32_t val)
-{
- return ((val) << A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL14__SHIFT) & A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL14__MASK;
-}
-#define A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL15__MASK 0xf0000000
-#define A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL15__SHIFT 28
-static inline uint32_t A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL15(uint32_t val)
-{
- return ((val) << A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL15__SHIFT) & A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL15__MASK;
-}
-
-#define REG_A6XX_CX_DBGC_CFG_DBGBUS_TRACE_BUF1 0x0001842f
-
-#define REG_A6XX_CX_DBGC_CFG_DBGBUS_TRACE_BUF2 0x00018430
-
-#define REG_A6XX_PDC_GPU_ENABLE_PDC 0x00021140
-
-#define REG_A6XX_PDC_GPU_SEQ_START_ADDR 0x00021148
-
-#define REG_A6XX_PDC_GPU_TCS0_CONTROL 0x00021540
-
-#define REG_A6XX_PDC_GPU_TCS0_CMD_ENABLE_BANK 0x00021541
-
-#define REG_A6XX_PDC_GPU_TCS0_CMD_WAIT_FOR_CMPL_BANK 0x00021542
-
-#define REG_A6XX_PDC_GPU_TCS0_CMD0_MSGID 0x00021543
-
-#define REG_A6XX_PDC_GPU_TCS0_CMD0_ADDR 0x00021544
-
-#define REG_A6XX_PDC_GPU_TCS0_CMD0_DATA 0x00021545
-
-#define REG_A6XX_PDC_GPU_TCS1_CONTROL 0x00021572
-
-#define REG_A6XX_PDC_GPU_TCS1_CMD_ENABLE_BANK 0x00021573
-
-#define REG_A6XX_PDC_GPU_TCS1_CMD_WAIT_FOR_CMPL_BANK 0x00021574
-
-#define REG_A6XX_PDC_GPU_TCS1_CMD0_MSGID 0x00021575
-
-#define REG_A6XX_PDC_GPU_TCS1_CMD0_ADDR 0x00021576
-
-#define REG_A6XX_PDC_GPU_TCS1_CMD0_DATA 0x00021577
-
-#define REG_A6XX_PDC_GPU_TCS2_CONTROL 0x000215a4
-
-#define REG_A6XX_PDC_GPU_TCS2_CMD_ENABLE_BANK 0x000215a5
-
-#define REG_A6XX_PDC_GPU_TCS2_CMD_WAIT_FOR_CMPL_BANK 0x000215a6
-
-#define REG_A6XX_PDC_GPU_TCS2_CMD0_MSGID 0x000215a7
-
-#define REG_A6XX_PDC_GPU_TCS2_CMD0_ADDR 0x000215a8
-
-#define REG_A6XX_PDC_GPU_TCS2_CMD0_DATA 0x000215a9
-
-#define REG_A6XX_PDC_GPU_TCS3_CONTROL 0x000215d6
-
-#define REG_A6XX_PDC_GPU_TCS3_CMD_ENABLE_BANK 0x000215d7
-
-#define REG_A6XX_PDC_GPU_TCS3_CMD_WAIT_FOR_CMPL_BANK 0x000215d8
-
-#define REG_A6XX_PDC_GPU_TCS3_CMD0_MSGID 0x000215d9
-
-#define REG_A6XX_PDC_GPU_TCS3_CMD0_ADDR 0x000215da
-
-#define REG_A6XX_PDC_GPU_TCS3_CMD0_DATA 0x000215db
-
-#define REG_A6XX_PDC_GPU_SEQ_MEM_0 0x000a0000
-
-#define REG_A6XX_X1_WINDOW_OFFSET 0x000088d4
-#define A6XX_X1_WINDOW_OFFSET_WINDOW_OFFSET_DISABLE 0x80000000
-#define A6XX_X1_WINDOW_OFFSET_X__MASK 0x00007fff
-#define A6XX_X1_WINDOW_OFFSET_X__SHIFT 0
-static inline uint32_t A6XX_X1_WINDOW_OFFSET_X(uint32_t val)
-{
- return ((val) << A6XX_X1_WINDOW_OFFSET_X__SHIFT) & A6XX_X1_WINDOW_OFFSET_X__MASK;
-}
-#define A6XX_X1_WINDOW_OFFSET_Y__MASK 0x7fff0000
-#define A6XX_X1_WINDOW_OFFSET_Y__SHIFT 16
-static inline uint32_t A6XX_X1_WINDOW_OFFSET_Y(uint32_t val)
-{
- return ((val) << A6XX_X1_WINDOW_OFFSET_Y__SHIFT) & A6XX_X1_WINDOW_OFFSET_Y__MASK;
-}
-
-#define REG_A6XX_X2_WINDOW_OFFSET 0x0000b4d1
-#define A6XX_X2_WINDOW_OFFSET_WINDOW_OFFSET_DISABLE 0x80000000
-#define A6XX_X2_WINDOW_OFFSET_X__MASK 0x00007fff
-#define A6XX_X2_WINDOW_OFFSET_X__SHIFT 0
-static inline uint32_t A6XX_X2_WINDOW_OFFSET_X(uint32_t val)
+#define REG_A6XX_RB_WINDOW_OFFSET2 0x000088d4
+#define A6XX_RB_WINDOW_OFFSET2_WINDOW_OFFSET_DISABLE 0x80000000
+#define A6XX_RB_WINDOW_OFFSET2_X__MASK 0x00007fff
+#define A6XX_RB_WINDOW_OFFSET2_X__SHIFT 0
+static inline uint32_t A6XX_RB_WINDOW_OFFSET2_X(uint32_t val)
{
- return ((val) << A6XX_X2_WINDOW_OFFSET_X__SHIFT) & A6XX_X2_WINDOW_OFFSET_X__MASK;
+ return ((val) << A6XX_RB_WINDOW_OFFSET2_X__SHIFT) & A6XX_RB_WINDOW_OFFSET2_X__MASK;
}
-#define A6XX_X2_WINDOW_OFFSET_Y__MASK 0x7fff0000
-#define A6XX_X2_WINDOW_OFFSET_Y__SHIFT 16
-static inline uint32_t A6XX_X2_WINDOW_OFFSET_Y(uint32_t val)
+#define A6XX_RB_WINDOW_OFFSET2_Y__MASK 0x7fff0000
+#define A6XX_RB_WINDOW_OFFSET2_Y__SHIFT 16
+static inline uint32_t A6XX_RB_WINDOW_OFFSET2_Y(uint32_t val)
{
- return ((val) << A6XX_X2_WINDOW_OFFSET_Y__SHIFT) & A6XX_X2_WINDOW_OFFSET_Y__MASK;
+ return ((val) << A6XX_RB_WINDOW_OFFSET2_Y__SHIFT) & A6XX_RB_WINDOW_OFFSET2_Y__MASK;
}
-#define REG_A6XX_X3_WINDOW_OFFSET 0x0000b307
-#define A6XX_X3_WINDOW_OFFSET_WINDOW_OFFSET_DISABLE 0x80000000
-#define A6XX_X3_WINDOW_OFFSET_X__MASK 0x00007fff
-#define A6XX_X3_WINDOW_OFFSET_X__SHIFT 0
-static inline uint32_t A6XX_X3_WINDOW_OFFSET_X(uint32_t val)
+#define REG_A6XX_SP_WINDOW_OFFSET 0x0000b4d1
+#define A6XX_SP_WINDOW_OFFSET_WINDOW_OFFSET_DISABLE 0x80000000
+#define A6XX_SP_WINDOW_OFFSET_X__MASK 0x00007fff
+#define A6XX_SP_WINDOW_OFFSET_X__SHIFT 0
+static inline uint32_t A6XX_SP_WINDOW_OFFSET_X(uint32_t val)
{
- return ((val) << A6XX_X3_WINDOW_OFFSET_X__SHIFT) & A6XX_X3_WINDOW_OFFSET_X__MASK;
+ return ((val) << A6XX_SP_WINDOW_OFFSET_X__SHIFT) & A6XX_SP_WINDOW_OFFSET_X__MASK;
}
-#define A6XX_X3_WINDOW_OFFSET_Y__MASK 0x7fff0000
-#define A6XX_X3_WINDOW_OFFSET_Y__SHIFT 16
-static inline uint32_t A6XX_X3_WINDOW_OFFSET_Y(uint32_t val)
+#define A6XX_SP_WINDOW_OFFSET_Y__MASK 0x7fff0000
+#define A6XX_SP_WINDOW_OFFSET_Y__SHIFT 16
+static inline uint32_t A6XX_SP_WINDOW_OFFSET_Y(uint32_t val)
{
- return ((val) << A6XX_X3_WINDOW_OFFSET_Y__SHIFT) & A6XX_X3_WINDOW_OFFSET_Y__MASK;
+ return ((val) << A6XX_SP_WINDOW_OFFSET_Y__SHIFT) & A6XX_SP_WINDOW_OFFSET_Y__MASK;
}
-#define REG_A6XX_X1_BIN_SIZE 0x000080a1
-#define A6XX_X1_BIN_SIZE_WIDTH__MASK 0x000000ff
-#define A6XX_X1_BIN_SIZE_WIDTH__SHIFT 0
-static inline uint32_t A6XX_X1_BIN_SIZE_WIDTH(uint32_t val)
+#define REG_A6XX_SP_TP_WINDOW_OFFSET 0x0000b307
+#define A6XX_SP_TP_WINDOW_OFFSET_WINDOW_OFFSET_DISABLE 0x80000000
+#define A6XX_SP_TP_WINDOW_OFFSET_X__MASK 0x00007fff
+#define A6XX_SP_TP_WINDOW_OFFSET_X__SHIFT 0
+static inline uint32_t A6XX_SP_TP_WINDOW_OFFSET_X(uint32_t val)
{
- return ((val >> 5) << A6XX_X1_BIN_SIZE_WIDTH__SHIFT) & A6XX_X1_BIN_SIZE_WIDTH__MASK;
+ return ((val) << A6XX_SP_TP_WINDOW_OFFSET_X__SHIFT) & A6XX_SP_TP_WINDOW_OFFSET_X__MASK;
}
-#define A6XX_X1_BIN_SIZE_HEIGHT__MASK 0x0001ff00
-#define A6XX_X1_BIN_SIZE_HEIGHT__SHIFT 8
-static inline uint32_t A6XX_X1_BIN_SIZE_HEIGHT(uint32_t val)
+#define A6XX_SP_TP_WINDOW_OFFSET_Y__MASK 0x7fff0000
+#define A6XX_SP_TP_WINDOW_OFFSET_Y__SHIFT 16
+static inline uint32_t A6XX_SP_TP_WINDOW_OFFSET_Y(uint32_t val)
{
- return ((val >> 4) << A6XX_X1_BIN_SIZE_HEIGHT__SHIFT) & A6XX_X1_BIN_SIZE_HEIGHT__MASK;
+ return ((val) << A6XX_SP_TP_WINDOW_OFFSET_Y__SHIFT) & A6XX_SP_TP_WINDOW_OFFSET_Y__MASK;
}
-#define REG_A6XX_X2_BIN_SIZE 0x00008800
-#define A6XX_X2_BIN_SIZE_WIDTH__MASK 0x000000ff
-#define A6XX_X2_BIN_SIZE_WIDTH__SHIFT 0
-static inline uint32_t A6XX_X2_BIN_SIZE_WIDTH(uint32_t val)
+#define REG_A6XX_GRAS_BIN_CONTROL 0x000080a1
+#define A6XX_GRAS_BIN_CONTROL_BINW__MASK 0x000000ff
+#define A6XX_GRAS_BIN_CONTROL_BINW__SHIFT 0
+static inline uint32_t A6XX_GRAS_BIN_CONTROL_BINW(uint32_t val)
{
- return ((val >> 5) << A6XX_X2_BIN_SIZE_WIDTH__SHIFT) & A6XX_X2_BIN_SIZE_WIDTH__MASK;
+ return ((val >> 5) << A6XX_GRAS_BIN_CONTROL_BINW__SHIFT) & A6XX_GRAS_BIN_CONTROL_BINW__MASK;
}
-#define A6XX_X2_BIN_SIZE_HEIGHT__MASK 0x0001ff00
-#define A6XX_X2_BIN_SIZE_HEIGHT__SHIFT 8
-static inline uint32_t A6XX_X2_BIN_SIZE_HEIGHT(uint32_t val)
+#define A6XX_GRAS_BIN_CONTROL_BINH__MASK 0x0001ff00
+#define A6XX_GRAS_BIN_CONTROL_BINH__SHIFT 8
+static inline uint32_t A6XX_GRAS_BIN_CONTROL_BINH(uint32_t val)
{
- return ((val >> 4) << A6XX_X2_BIN_SIZE_HEIGHT__SHIFT) & A6XX_X2_BIN_SIZE_HEIGHT__MASK;
+ return ((val >> 4) << A6XX_GRAS_BIN_CONTROL_BINH__SHIFT) & A6XX_GRAS_BIN_CONTROL_BINH__MASK;
}
+#define A6XX_GRAS_BIN_CONTROL_BINNING_PASS 0x00040000
+#define A6XX_GRAS_BIN_CONTROL_USE_VIZ 0x00200000
-#define REG_A6XX_X3_BIN_SIZE 0x000088d3
-#define A6XX_X3_BIN_SIZE_WIDTH__MASK 0x000000ff
-#define A6XX_X3_BIN_SIZE_WIDTH__SHIFT 0
-static inline uint32_t A6XX_X3_BIN_SIZE_WIDTH(uint32_t val)
+#define REG_A6XX_RB_BIN_CONTROL2 0x000088d3
+#define A6XX_RB_BIN_CONTROL2_BINW__MASK 0x000000ff
+#define A6XX_RB_BIN_CONTROL2_BINW__SHIFT 0
+static inline uint32_t A6XX_RB_BIN_CONTROL2_BINW(uint32_t val)
{
- return ((val >> 5) << A6XX_X3_BIN_SIZE_WIDTH__SHIFT) & A6XX_X3_BIN_SIZE_WIDTH__MASK;
+ return ((val >> 5) << A6XX_RB_BIN_CONTROL2_BINW__SHIFT) & A6XX_RB_BIN_CONTROL2_BINW__MASK;
}
-#define A6XX_X3_BIN_SIZE_HEIGHT__MASK 0x0001ff00
-#define A6XX_X3_BIN_SIZE_HEIGHT__SHIFT 8
-static inline uint32_t A6XX_X3_BIN_SIZE_HEIGHT(uint32_t val)
+#define A6XX_RB_BIN_CONTROL2_BINH__MASK 0x0001ff00
+#define A6XX_RB_BIN_CONTROL2_BINH__SHIFT 8
+static inline uint32_t A6XX_RB_BIN_CONTROL2_BINH(uint32_t val)
{
- return ((val >> 4) << A6XX_X3_BIN_SIZE_HEIGHT__SHIFT) & A6XX_X3_BIN_SIZE_HEIGHT__MASK;
+ return ((val >> 4) << A6XX_RB_BIN_CONTROL2_BINH__SHIFT) & A6XX_RB_BIN_CONTROL2_BINH__MASK;
}
#define REG_A6XX_VSC_BIN_SIZE 0x00000c02
@@ -2182,11 +2654,19 @@ static inline uint32_t A6XX_VSC_PIPE_CONFIG_REG_H(uint32_t val)
return ((val) << A6XX_VSC_PIPE_CONFIG_REG_H__SHIFT) & A6XX_VSC_PIPE_CONFIG_REG_H__MASK;
}
-#define REG_A6XX_VSC_XXX_ADDRESS_LO 0x00000c30
+#define REG_A6XX_VSC_PIPE_DATA2_ADDRESS_LO 0x00000c30
-#define REG_A6XX_VSC_XXX_ADDRESS_HI 0x00000c31
+#define REG_A6XX_VSC_PIPE_DATA2_ADDRESS_HI 0x00000c31
-#define REG_A6XX_VSC_XXX_PITCH 0x00000c32
+#define REG_A6XX_VSC_PIPE_DATA2_PITCH 0x00000c32
+
+#define REG_A6XX_VSC_PIPE_DATA2_ARRAY_PITCH 0x00000c33
+#define A6XX_VSC_PIPE_DATA2_ARRAY_PITCH__MASK 0xffffffff
+#define A6XX_VSC_PIPE_DATA2_ARRAY_PITCH__SHIFT 0
+static inline uint32_t A6XX_VSC_PIPE_DATA2_ARRAY_PITCH(uint32_t val)
+{
+ return ((val >> 4) << A6XX_VSC_PIPE_DATA2_ARRAY_PITCH__SHIFT) & A6XX_VSC_PIPE_DATA2_ARRAY_PITCH__MASK;
+}
#define REG_A6XX_VSC_PIPE_DATA_ADDRESS_LO 0x00000c34
@@ -2194,18 +2674,29 @@ static inline uint32_t A6XX_VSC_PIPE_CONFIG_REG_H(uint32_t val)
#define REG_A6XX_VSC_PIPE_DATA_PITCH 0x00000c36
+#define REG_A6XX_VSC_PIPE_DATA_ARRAY_PITCH 0x00000c37
+#define A6XX_VSC_PIPE_DATA_ARRAY_PITCH__MASK 0xffffffff
+#define A6XX_VSC_PIPE_DATA_ARRAY_PITCH__SHIFT 0
+static inline uint32_t A6XX_VSC_PIPE_DATA_ARRAY_PITCH(uint32_t val)
+{
+ return ((val >> 4) << A6XX_VSC_PIPE_DATA_ARRAY_PITCH__SHIFT) & A6XX_VSC_PIPE_DATA_ARRAY_PITCH__MASK;
+}
+
static inline uint32_t REG_A6XX_VSC_SIZE(uint32_t i0) { return 0x00000c78 + 0x1*i0; }
static inline uint32_t REG_A6XX_VSC_SIZE_REG(uint32_t i0) { return 0x00000c78 + 0x1*i0; }
#define REG_A6XX_UCHE_UNKNOWN_0E12 0x00000e12
+#define REG_A6XX_GRAS_UNKNOWN_8000 0x00008000
+
#define REG_A6XX_GRAS_UNKNOWN_8001 0x00008001
#define REG_A6XX_GRAS_UNKNOWN_8004 0x00008004
#define REG_A6XX_GRAS_CNTL 0x00008005
#define A6XX_GRAS_CNTL_VARYING 0x00000001
+#define A6XX_GRAS_CNTL_UNK3 0x00000008
#define A6XX_GRAS_CNTL_XCOORD 0x00000040
#define A6XX_GRAS_CNTL_YCOORD 0x00000080
#define A6XX_GRAS_CNTL_ZCOORD 0x00000100
@@ -2308,6 +2799,9 @@ static inline uint32_t A6XX_GRAS_SU_POINT_SIZE(float val)
return ((((int32_t)(val * 16.0))) << A6XX_GRAS_SU_POINT_SIZE__SHIFT) & A6XX_GRAS_SU_POINT_SIZE__MASK;
}
+#define REG_A6XX_GRAS_SU_DEPTH_PLANE_CNTL 0x00008094
+#define A6XX_GRAS_SU_DEPTH_PLANE_CNTL_FRAG_WRITES_Z 0x00000001
+
#define REG_A6XX_GRAS_SU_POLY_OFFSET_SCALE 0x00008095
#define A6XX_GRAS_SU_POLY_OFFSET_SCALE__MASK 0xffffffff
#define A6XX_GRAS_SU_POLY_OFFSET_SCALE__SHIFT 0
@@ -2344,6 +2838,8 @@ static inline uint32_t A6XX_GRAS_SU_DEPTH_BUFFER_INFO_DEPTH_FORMAT(enum a6xx_dep
#define REG_A6XX_GRAS_UNKNOWN_809B 0x0000809b
+#define REG_A6XX_GRAS_UNKNOWN_80A0 0x000080a0
+
#define REG_A6XX_GRAS_RAS_MSAA_CNTL 0x000080a2
#define A6XX_GRAS_RAS_MSAA_CNTL_SAMPLES__MASK 0x00000003
#define A6XX_GRAS_RAS_MSAA_CNTL_SAMPLES__SHIFT 0
@@ -2464,6 +2960,8 @@ static inline uint32_t A6XX_GRAS_SC_WINDOW_SCISSOR_BR_Y(uint32_t val)
#define A6XX_GRAS_LRZ_CNTL_LRZ_WRITE 0x00000002
#define A6XX_GRAS_LRZ_CNTL_GREATER 0x00000004
+#define REG_A6XX_GRAS_UNKNOWN_8101 0x00008101
+
#define REG_A6XX_GRAS_2D_BLIT_INFO 0x00008102
#define A6XX_GRAS_2D_BLIT_INFO_COLOR_FORMAT__MASK 0x000000ff
#define A6XX_GRAS_2D_BLIT_INFO_COLOR_FORMAT__SHIFT 0
@@ -2494,6 +2992,10 @@ static inline uint32_t A6XX_GRAS_LRZ_BUFFER_PITCH_ARRAY_PITCH(uint32_t val)
#define REG_A6XX_GRAS_LRZ_FAST_CLEAR_BUFFER_BASE_HI 0x00008107
+#define REG_A6XX_GRAS_UNKNOWN_8109 0x00008109
+
+#define REG_A6XX_GRAS_UNKNOWN_8110 0x00008110
+
#define REG_A6XX_GRAS_2D_BLIT_CNTL 0x00008400
#define REG_A6XX_GRAS_2D_SRC_TL_X 0x00008401
@@ -2590,6 +3092,33 @@ static inline uint32_t A6XX_GRAS_RESOLVE_CNTL_2_Y(uint32_t val)
#define REG_A6XX_GRAS_UNKNOWN_8600 0x00008600
+#define REG_A6XX_RB_BIN_CONTROL 0x00008800
+#define A6XX_RB_BIN_CONTROL_BINW__MASK 0x000000ff
+#define A6XX_RB_BIN_CONTROL_BINW__SHIFT 0
+static inline uint32_t A6XX_RB_BIN_CONTROL_BINW(uint32_t val)
+{
+ return ((val >> 5) << A6XX_RB_BIN_CONTROL_BINW__SHIFT) & A6XX_RB_BIN_CONTROL_BINW__MASK;
+}
+#define A6XX_RB_BIN_CONTROL_BINH__MASK 0x0001ff00
+#define A6XX_RB_BIN_CONTROL_BINH__SHIFT 8
+static inline uint32_t A6XX_RB_BIN_CONTROL_BINH(uint32_t val)
+{
+ return ((val >> 4) << A6XX_RB_BIN_CONTROL_BINH__SHIFT) & A6XX_RB_BIN_CONTROL_BINH__MASK;
+}
+#define A6XX_RB_BIN_CONTROL_BINNING_PASS 0x00040000
+#define A6XX_RB_BIN_CONTROL_USE_VIZ 0x00200000
+
+#define REG_A6XX_RB_RENDER_CNTL 0x00008801
+#define A6XX_RB_RENDER_CNTL_UNK4 0x00000010
+#define A6XX_RB_RENDER_CNTL_BINNING 0x00000080
+#define A6XX_RB_RENDER_CNTL_FLAG_DEPTH 0x00004000
+#define A6XX_RB_RENDER_CNTL_FLAG_MRTS__MASK 0x00ff0000
+#define A6XX_RB_RENDER_CNTL_FLAG_MRTS__SHIFT 16
+static inline uint32_t A6XX_RB_RENDER_CNTL_FLAG_MRTS(uint32_t val)
+{
+ return ((val) << A6XX_RB_RENDER_CNTL_FLAG_MRTS__SHIFT) & A6XX_RB_RENDER_CNTL_FLAG_MRTS__MASK;
+}
+
#define REG_A6XX_RB_RAS_MSAA_CNTL 0x00008802
#define A6XX_RB_RAS_MSAA_CNTL_SAMPLES__MASK 0x00000003
#define A6XX_RB_RAS_MSAA_CNTL_SAMPLES__SHIFT 0
@@ -2615,6 +3144,7 @@ static inline uint32_t A6XX_RB_DEST_MSAA_CNTL_SAMPLES(enum a3xx_msaa_samples val
#define REG_A6XX_RB_RENDER_CONTROL0 0x00008809
#define A6XX_RB_RENDER_CONTROL0_VARYING 0x00000001
+#define A6XX_RB_RENDER_CONTROL0_UNK3 0x00000008
#define A6XX_RB_RENDER_CONTROL0_XCOORD 0x00000040
#define A6XX_RB_RENDER_CONTROL0_YCOORD 0x00000080
#define A6XX_RB_RENDER_CONTROL0_ZCOORD 0x00000100
@@ -2747,6 +3277,10 @@ static inline uint32_t A6XX_RB_DITHER_CNTL_DITHER_MODE_MRT7(enum adreno_rb_dithe
#define A6XX_RB_SRGB_CNTL_SRGB_MRT6 0x00000040
#define A6XX_RB_SRGB_CNTL_SRGB_MRT7 0x00000080
+#define REG_A6XX_RB_UNKNOWN_8810 0x00008810
+
+#define REG_A6XX_RB_UNKNOWN_8811 0x00008811
+
#define REG_A6XX_RB_UNKNOWN_8818 0x00008818
#define REG_A6XX_RB_UNKNOWN_8819 0x00008819
@@ -2837,7 +3371,6 @@ static inline uint32_t A6XX_RB_MRT_BUF_INFO_COLOR_SWAP(enum a3xx_color_swap val)
{
return ((val) << A6XX_RB_MRT_BUF_INFO_COLOR_SWAP__SHIFT) & A6XX_RB_MRT_BUF_INFO_COLOR_SWAP__MASK;
}
-#define A6XX_RB_MRT_BUF_INFO_COLOR_SRGB 0x00008000
static inline uint32_t REG_A6XX_RB_MRT_PITCH(uint32_t i0) { return 0x00008823 + 0x8*i0; }
#define A6XX_RB_MRT_PITCH__MASK 0xffffffff
@@ -2923,6 +3456,9 @@ static inline uint32_t A6XX_RB_BLEND_CNTL_SAMPLE_MASK(uint32_t val)
return ((val) << A6XX_RB_BLEND_CNTL_SAMPLE_MASK__SHIFT) & A6XX_RB_BLEND_CNTL_SAMPLE_MASK__MASK;
}
+#define REG_A6XX_RB_DEPTH_PLANE_CNTL 0x00008870
+#define A6XX_RB_DEPTH_PLANE_CNTL_FRAG_WRITES_Z 0x00000001
+
#define REG_A6XX_RB_DEPTH_CNTL 0x00008871
#define A6XX_RB_DEPTH_CNTL_Z_ENABLE 0x00000001
#define A6XX_RB_DEPTH_CNTL_Z_WRITE_ENABLE 0x00000002
@@ -3053,6 +3589,12 @@ static inline uint32_t A6XX_RB_STENCILREF_REF(uint32_t val)
{
return ((val) << A6XX_RB_STENCILREF_REF__SHIFT) & A6XX_RB_STENCILREF_REF__MASK;
}
+#define A6XX_RB_STENCILREF_BFREF__MASK 0x0000ff00
+#define A6XX_RB_STENCILREF_BFREF__SHIFT 8
+static inline uint32_t A6XX_RB_STENCILREF_BFREF(uint32_t val)
+{
+ return ((val) << A6XX_RB_STENCILREF_BFREF__SHIFT) & A6XX_RB_STENCILREF_BFREF__MASK;
+}
#define REG_A6XX_RB_STENCILMASK 0x00008888
#define A6XX_RB_STENCILMASK_MASK__MASK 0x000000ff
@@ -3061,6 +3603,12 @@ static inline uint32_t A6XX_RB_STENCILMASK_MASK(uint32_t val)
{
return ((val) << A6XX_RB_STENCILMASK_MASK__SHIFT) & A6XX_RB_STENCILMASK_MASK__MASK;
}
+#define A6XX_RB_STENCILMASK_BFMASK__MASK 0x0000ff00
+#define A6XX_RB_STENCILMASK_BFMASK__SHIFT 8
+static inline uint32_t A6XX_RB_STENCILMASK_BFMASK(uint32_t val)
+{
+ return ((val) << A6XX_RB_STENCILMASK_BFMASK__SHIFT) & A6XX_RB_STENCILMASK_BFMASK__MASK;
+}
#define REG_A6XX_RB_STENCILWRMASK 0x00008889
#define A6XX_RB_STENCILWRMASK_WRMASK__MASK 0x000000ff
@@ -3069,6 +3617,12 @@ static inline uint32_t A6XX_RB_STENCILWRMASK_WRMASK(uint32_t val)
{
return ((val) << A6XX_RB_STENCILWRMASK_WRMASK__SHIFT) & A6XX_RB_STENCILWRMASK_WRMASK__MASK;
}
+#define A6XX_RB_STENCILWRMASK_BFWRMASK__MASK 0x0000ff00
+#define A6XX_RB_STENCILWRMASK_BFWRMASK__SHIFT 8
+static inline uint32_t A6XX_RB_STENCILWRMASK_BFWRMASK(uint32_t val)
+{
+ return ((val) << A6XX_RB_STENCILWRMASK_BFWRMASK__SHIFT) & A6XX_RB_STENCILWRMASK_BFWRMASK__MASK;
+}
#define REG_A6XX_RB_WINDOW_OFFSET 0x00008890
#define A6XX_RB_WINDOW_OFFSET_WINDOW_OFFSET_DISABLE 0x80000000
@@ -3177,14 +3731,14 @@ static inline uint32_t A6XX_RB_BLIT_DST_ARRAY_PITCH(uint32_t val)
#define REG_A6XX_RB_BLIT_INFO 0x000088e3
#define A6XX_RB_BLIT_INFO_UNK0 0x00000001
-#define A6XX_RB_BLIT_INFO_FAST_CLEAR 0x00000002
+#define A6XX_RB_BLIT_INFO_GMEM 0x00000002
#define A6XX_RB_BLIT_INFO_INTEGER 0x00000004
-#define A6XX_RB_BLIT_INFO_UNK3 0x00000008
-#define A6XX_RB_BLIT_INFO_MASK__MASK 0x000000f0
-#define A6XX_RB_BLIT_INFO_MASK__SHIFT 4
-static inline uint32_t A6XX_RB_BLIT_INFO_MASK(uint32_t val)
+#define A6XX_RB_BLIT_INFO_DEPTH 0x00000008
+#define A6XX_RB_BLIT_INFO_CLEAR_MASK__MASK 0x000000f0
+#define A6XX_RB_BLIT_INFO_CLEAR_MASK__SHIFT 4
+static inline uint32_t A6XX_RB_BLIT_INFO_CLEAR_MASK(uint32_t val)
{
- return ((val) << A6XX_RB_BLIT_INFO_MASK__SHIFT) & A6XX_RB_BLIT_INFO_MASK__MASK;
+ return ((val) << A6XX_RB_BLIT_INFO_CLEAR_MASK__SHIFT) & A6XX_RB_BLIT_INFO_CLEAR_MASK__MASK;
}
#define REG_A6XX_RB_UNKNOWN_88F0 0x000088f0
@@ -3274,12 +3828,16 @@ static inline uint32_t A6XX_RB_2D_DST_SIZE_PITCH(uint32_t val)
#define REG_A6XX_RB_UNKNOWN_8E01 0x00008e01
+#define REG_A6XX_RB_UNKNOWN_8E04 0x00008e04
+
#define REG_A6XX_RB_CCU_CNTL 0x00008e07
#define REG_A6XX_VPC_UNKNOWN_9101 0x00009101
#define REG_A6XX_VPC_GS_SIV_CNTL 0x00009104
+#define REG_A6XX_VPC_UNKNOWN_9107 0x00009107
+
#define REG_A6XX_VPC_UNKNOWN_9108 0x00009108
static inline uint32_t REG_A6XX_VPC_VARYING_INTERP(uint32_t i0) { return 0x00009200 + 0x1*i0; }
@@ -3385,6 +3943,9 @@ static inline uint32_t A6XX_VPC_CNTL_0_NUMNONPOSVAR(uint32_t val)
#define A6XX_VPC_SO_BUF_CNTL_BUF3 0x00000200
#define A6XX_VPC_SO_BUF_CNTL_ENABLE 0x00008000
+#define REG_A6XX_VPC_SO_OVERRIDE 0x00009306
+#define A6XX_VPC_SO_OVERRIDE_SO_DISABLE 0x00000001
+
#define REG_A6XX_VPC_UNKNOWN_9600 0x00009600
#define REG_A6XX_VPC_UNKNOWN_9602 0x00009602
@@ -3397,8 +3958,14 @@ static inline uint32_t A6XX_VPC_CNTL_0_NUMNONPOSVAR(uint32_t val)
#define REG_A6XX_PC_UNKNOWN_9805 0x00009805
+#define REG_A6XX_PC_UNKNOWN_9806 0x00009806
+
+#define REG_A6XX_PC_UNKNOWN_9980 0x00009980
+
#define REG_A6XX_PC_UNKNOWN_9981 0x00009981
+#define REG_A6XX_PC_UNKNOWN_9990 0x00009990
+
#define REG_A6XX_PC_PRIMITIVE_CNTL_0 0x00009b00
#define A6XX_PC_PRIMITIVE_CNTL_0_PRIMITIVE_RESTART 0x00000001
#define A6XX_PC_PRIMITIVE_CNTL_0_PROVOKING_VTX_LAST 0x00000002
@@ -3410,6 +3977,7 @@ static inline uint32_t A6XX_PC_PRIMITIVE_CNTL_1_STRIDE_IN_VPC(uint32_t val)
{
return ((val) << A6XX_PC_PRIMITIVE_CNTL_1_STRIDE_IN_VPC__SHIFT) & A6XX_PC_PRIMITIVE_CNTL_1_STRIDE_IN_VPC__MASK;
}
+#define A6XX_PC_PRIMITIVE_CNTL_1_PSIZE 0x00000100
#define REG_A6XX_PC_UNKNOWN_9B06 0x00009b06
@@ -3488,6 +4056,8 @@ static inline uint32_t A6XX_VFD_CONTROL_3_REGID_TESSY(uint32_t val)
#define REG_A6XX_VFD_UNKNOWN_A008 0x0000a008
+#define REG_A6XX_VFD_UNKNOWN_A009 0x0000a009
+
#define REG_A6XX_VFD_INDEX_OFFSET 0x0000a00e
#define REG_A6XX_VFD_INSTANCE_START_OFFSET 0x0000a00f
@@ -3640,6 +4210,8 @@ static inline uint32_t A6XX_SP_VS_CTRL_REG0_THREADSIZE(enum a3xx_threadsize val)
#define A6XX_SP_VS_CTRL_REG0_PIXLODENABLE 0x04000000
#define A6XX_SP_VS_CTRL_REG0_MERGEDREGS 0x80000000
+#define REG_A6XX_SP_UNKNOWN_A81B 0x0000a81b
+
#define REG_A6XX_SP_VS_OBJ_START_LO 0x0000a81c
#define REG_A6XX_SP_VS_OBJ_START_HI 0x0000a81d
@@ -3884,6 +4456,8 @@ static inline uint32_t A6XX_SP_FS_CTRL_REG0_THREADSIZE(enum a3xx_threadsize val)
#define A6XX_SP_FS_CTRL_REG0_PIXLODENABLE 0x04000000
#define A6XX_SP_FS_CTRL_REG0_MERGEDREGS 0x80000000
+#define REG_A6XX_SP_UNKNOWN_A982 0x0000a982
+
#define REG_A6XX_SP_FS_OBJ_START_LO 0x0000a983
#define REG_A6XX_SP_FS_OBJ_START_HI 0x0000a984
@@ -3979,7 +4553,8 @@ static inline uint32_t A6XX_SP_FS_MRT_REG_COLOR_FORMAT(enum a6xx_color_fmt val)
}
#define A6XX_SP_FS_MRT_REG_COLOR_SINT 0x00000100
#define A6XX_SP_FS_MRT_REG_COLOR_UINT 0x00000200
-#define A6XX_SP_FS_MRT_REG_COLOR_SRGB 0x00000400
+
+#define REG_A6XX_SP_UNKNOWN_A99E 0x0000a99e
#define REG_A6XX_SP_FS_TEX_COUNT 0x0000a9a7
@@ -4066,14 +4641,20 @@ static inline uint32_t A6XX_SP_FS_CONFIG_NSAMP(uint32_t val)
#define REG_A6XX_SP_FS_INSTRLEN 0x0000ab05
+#define REG_A6XX_SP_UNKNOWN_AB20 0x0000ab20
+
#define REG_A6XX_SP_UNKNOWN_AE00 0x0000ae00
+#define REG_A6XX_SP_UNKNOWN_AE03 0x0000ae03
+
#define REG_A6XX_SP_UNKNOWN_AE04 0x0000ae04
#define REG_A6XX_SP_UNKNOWN_AE0F 0x0000ae0f
#define REG_A6XX_SP_UNKNOWN_B182 0x0000b182
+#define REG_A6XX_SP_UNKNOWN_B183 0x0000b183
+
#define REG_A6XX_SP_TP_RAS_MSAA_CNTL 0x0000b300
#define A6XX_SP_TP_RAS_MSAA_CNTL_SAMPLES__MASK 0x00000003
#define A6XX_SP_TP_RAS_MSAA_CNTL_SAMPLES__SHIFT 0
@@ -4097,6 +4678,8 @@ static inline uint32_t A6XX_SP_TP_DEST_MSAA_CNTL_SAMPLES(enum a3xx_msaa_samples
#define REG_A6XX_SP_TP_UNKNOWN_B304 0x0000b304
+#define REG_A6XX_SP_TP_UNKNOWN_B309 0x0000b309
+
#define REG_A6XX_SP_PS_2D_SRC_INFO 0x0000b4c0
#define A6XX_SP_PS_2D_SRC_INFO_COLOR_FORMAT__MASK 0x000000ff
#define A6XX_SP_PS_2D_SRC_INFO_COLOR_FORMAT__SHIFT 0
@@ -4162,6 +4745,8 @@ static inline uint32_t A6XX_HLSQ_GS_CNTL_CONSTLEN(uint32_t val)
return ((val >> 2) << A6XX_HLSQ_GS_CNTL_CONSTLEN__SHIFT) & A6XX_HLSQ_GS_CNTL_CONSTLEN__MASK;
}
+#define REG_A6XX_HLSQ_UNKNOWN_B980 0x0000b980
+
#define REG_A6XX_HLSQ_CONTROL_1_REG 0x0000b982
#define REG_A6XX_HLSQ_CONTROL_2_REG 0x0000b983
@@ -4537,11 +5122,11 @@ static inline uint32_t A6XX_TEX_CONST_7_FLAG_LO(uint32_t val)
}
#define REG_A6XX_TEX_CONST_8 0x00000008
-#define A6XX_TEX_CONST_8_BASE_HI__MASK 0x0001ffff
-#define A6XX_TEX_CONST_8_BASE_HI__SHIFT 0
-static inline uint32_t A6XX_TEX_CONST_8_BASE_HI(uint32_t val)
+#define A6XX_TEX_CONST_8_FLAG_HI__MASK 0x0001ffff
+#define A6XX_TEX_CONST_8_FLAG_HI__SHIFT 0
+static inline uint32_t A6XX_TEX_CONST_8_FLAG_HI(uint32_t val)
{
- return ((val) << A6XX_TEX_CONST_8_BASE_HI__SHIFT) & A6XX_TEX_CONST_8_BASE_HI__MASK;
+ return ((val) << A6XX_TEX_CONST_8_FLAG_HI__SHIFT) & A6XX_TEX_CONST_8_FLAG_HI__MASK;
}
#define REG_A6XX_TEX_CONST_9 0x00000009
@@ -4558,5 +5143,227 @@ static inline uint32_t A6XX_TEX_CONST_8_BASE_HI(uint32_t val)
#define REG_A6XX_TEX_CONST_15 0x0000000f
+#define REG_A6XX_PDC_GPU_ENABLE_PDC 0x00001140
+
+#define REG_A6XX_PDC_GPU_SEQ_START_ADDR 0x00001148
+
+#define REG_A6XX_PDC_GPU_TCS0_CONTROL 0x00001540
+
+#define REG_A6XX_PDC_GPU_TCS0_CMD_ENABLE_BANK 0x00001541
+
+#define REG_A6XX_PDC_GPU_TCS0_CMD_WAIT_FOR_CMPL_BANK 0x00001542
+
+#define REG_A6XX_PDC_GPU_TCS0_CMD0_MSGID 0x00001543
+
+#define REG_A6XX_PDC_GPU_TCS0_CMD0_ADDR 0x00001544
+
+#define REG_A6XX_PDC_GPU_TCS0_CMD0_DATA 0x00001545
+
+#define REG_A6XX_PDC_GPU_TCS1_CONTROL 0x00001572
+
+#define REG_A6XX_PDC_GPU_TCS1_CMD_ENABLE_BANK 0x00001573
+
+#define REG_A6XX_PDC_GPU_TCS1_CMD_WAIT_FOR_CMPL_BANK 0x00001574
+
+#define REG_A6XX_PDC_GPU_TCS1_CMD0_MSGID 0x00001575
+
+#define REG_A6XX_PDC_GPU_TCS1_CMD0_ADDR 0x00001576
+
+#define REG_A6XX_PDC_GPU_TCS1_CMD0_DATA 0x00001577
+
+#define REG_A6XX_PDC_GPU_TCS2_CONTROL 0x000015a4
+
+#define REG_A6XX_PDC_GPU_TCS2_CMD_ENABLE_BANK 0x000015a5
+
+#define REG_A6XX_PDC_GPU_TCS2_CMD_WAIT_FOR_CMPL_BANK 0x000015a6
+
+#define REG_A6XX_PDC_GPU_TCS2_CMD0_MSGID 0x000015a7
+
+#define REG_A6XX_PDC_GPU_TCS2_CMD0_ADDR 0x000015a8
+
+#define REG_A6XX_PDC_GPU_TCS2_CMD0_DATA 0x000015a9
+
+#define REG_A6XX_PDC_GPU_TCS3_CONTROL 0x000015d6
+
+#define REG_A6XX_PDC_GPU_TCS3_CMD_ENABLE_BANK 0x000015d7
+
+#define REG_A6XX_PDC_GPU_TCS3_CMD_WAIT_FOR_CMPL_BANK 0x000015d8
+
+#define REG_A6XX_PDC_GPU_TCS3_CMD0_MSGID 0x000015d9
+
+#define REG_A6XX_PDC_GPU_TCS3_CMD0_ADDR 0x000015da
+
+#define REG_A6XX_PDC_GPU_TCS3_CMD0_DATA 0x000015db
+
+#define REG_A6XX_PDC_GPU_SEQ_MEM_0 0x00000000
+
+#define REG_A6XX_CX_DBGC_CFG_DBGBUS_SEL_A 0x00000000
+#define A6XX_CX_DBGC_CFG_DBGBUS_SEL_A_PING_INDEX__MASK 0x000000ff
+#define A6XX_CX_DBGC_CFG_DBGBUS_SEL_A_PING_INDEX__SHIFT 0
+static inline uint32_t A6XX_CX_DBGC_CFG_DBGBUS_SEL_A_PING_INDEX(uint32_t val)
+{
+ return ((val) << A6XX_CX_DBGC_CFG_DBGBUS_SEL_A_PING_INDEX__SHIFT) & A6XX_CX_DBGC_CFG_DBGBUS_SEL_A_PING_INDEX__MASK;
+}
+#define A6XX_CX_DBGC_CFG_DBGBUS_SEL_A_PING_BLK_SEL__MASK 0x0000ff00
+#define A6XX_CX_DBGC_CFG_DBGBUS_SEL_A_PING_BLK_SEL__SHIFT 8
+static inline uint32_t A6XX_CX_DBGC_CFG_DBGBUS_SEL_A_PING_BLK_SEL(uint32_t val)
+{
+ return ((val) << A6XX_CX_DBGC_CFG_DBGBUS_SEL_A_PING_BLK_SEL__SHIFT) & A6XX_CX_DBGC_CFG_DBGBUS_SEL_A_PING_BLK_SEL__MASK;
+}
+
+#define REG_A6XX_CX_DBGC_CFG_DBGBUS_SEL_B 0x00000001
+
+#define REG_A6XX_CX_DBGC_CFG_DBGBUS_SEL_C 0x00000002
+
+#define REG_A6XX_CX_DBGC_CFG_DBGBUS_SEL_D 0x00000003
+
+#define REG_A6XX_CX_DBGC_CFG_DBGBUS_CNTLT 0x00000004
+#define A6XX_CX_DBGC_CFG_DBGBUS_CNTLT_TRACEEN__MASK 0x0000003f
+#define A6XX_CX_DBGC_CFG_DBGBUS_CNTLT_TRACEEN__SHIFT 0
+static inline uint32_t A6XX_CX_DBGC_CFG_DBGBUS_CNTLT_TRACEEN(uint32_t val)
+{
+ return ((val) << A6XX_CX_DBGC_CFG_DBGBUS_CNTLT_TRACEEN__SHIFT) & A6XX_CX_DBGC_CFG_DBGBUS_CNTLT_TRACEEN__MASK;
+}
+#define A6XX_CX_DBGC_CFG_DBGBUS_CNTLT_GRANU__MASK 0x00007000
+#define A6XX_CX_DBGC_CFG_DBGBUS_CNTLT_GRANU__SHIFT 12
+static inline uint32_t A6XX_CX_DBGC_CFG_DBGBUS_CNTLT_GRANU(uint32_t val)
+{
+ return ((val) << A6XX_CX_DBGC_CFG_DBGBUS_CNTLT_GRANU__SHIFT) & A6XX_CX_DBGC_CFG_DBGBUS_CNTLT_GRANU__MASK;
+}
+#define A6XX_CX_DBGC_CFG_DBGBUS_CNTLT_SEGT__MASK 0xf0000000
+#define A6XX_CX_DBGC_CFG_DBGBUS_CNTLT_SEGT__SHIFT 28
+static inline uint32_t A6XX_CX_DBGC_CFG_DBGBUS_CNTLT_SEGT(uint32_t val)
+{
+ return ((val) << A6XX_CX_DBGC_CFG_DBGBUS_CNTLT_SEGT__SHIFT) & A6XX_CX_DBGC_CFG_DBGBUS_CNTLT_SEGT__MASK;
+}
+
+#define REG_A6XX_CX_DBGC_CFG_DBGBUS_CNTLM 0x00000005
+#define A6XX_CX_DBGC_CFG_DBGBUS_CNTLM_ENABLE__MASK 0x0f000000
+#define A6XX_CX_DBGC_CFG_DBGBUS_CNTLM_ENABLE__SHIFT 24
+static inline uint32_t A6XX_CX_DBGC_CFG_DBGBUS_CNTLM_ENABLE(uint32_t val)
+{
+ return ((val) << A6XX_CX_DBGC_CFG_DBGBUS_CNTLM_ENABLE__SHIFT) & A6XX_CX_DBGC_CFG_DBGBUS_CNTLM_ENABLE__MASK;
+}
+
+#define REG_A6XX_CX_DBGC_CFG_DBGBUS_IVTL_0 0x00000008
+
+#define REG_A6XX_CX_DBGC_CFG_DBGBUS_IVTL_1 0x00000009
+
+#define REG_A6XX_CX_DBGC_CFG_DBGBUS_IVTL_2 0x0000000a
+
+#define REG_A6XX_CX_DBGC_CFG_DBGBUS_IVTL_3 0x0000000b
+
+#define REG_A6XX_CX_DBGC_CFG_DBGBUS_MASKL_0 0x0000000c
+
+#define REG_A6XX_CX_DBGC_CFG_DBGBUS_MASKL_1 0x0000000d
+
+#define REG_A6XX_CX_DBGC_CFG_DBGBUS_MASKL_2 0x0000000e
+
+#define REG_A6XX_CX_DBGC_CFG_DBGBUS_MASKL_3 0x0000000f
+
+#define REG_A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_0 0x00000010
+#define A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL0__MASK 0x0000000f
+#define A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL0__SHIFT 0
+static inline uint32_t A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL0(uint32_t val)
+{
+ return ((val) << A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL0__SHIFT) & A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL0__MASK;
+}
+#define A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL1__MASK 0x000000f0
+#define A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL1__SHIFT 4
+static inline uint32_t A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL1(uint32_t val)
+{
+ return ((val) << A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL1__SHIFT) & A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL1__MASK;
+}
+#define A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL2__MASK 0x00000f00
+#define A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL2__SHIFT 8
+static inline uint32_t A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL2(uint32_t val)
+{
+ return ((val) << A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL2__SHIFT) & A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL2__MASK;
+}
+#define A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL3__MASK 0x0000f000
+#define A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL3__SHIFT 12
+static inline uint32_t A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL3(uint32_t val)
+{
+ return ((val) << A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL3__SHIFT) & A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL3__MASK;
+}
+#define A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL4__MASK 0x000f0000
+#define A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL4__SHIFT 16
+static inline uint32_t A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL4(uint32_t val)
+{
+ return ((val) << A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL4__SHIFT) & A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL4__MASK;
+}
+#define A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL5__MASK 0x00f00000
+#define A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL5__SHIFT 20
+static inline uint32_t A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL5(uint32_t val)
+{
+ return ((val) << A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL5__SHIFT) & A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL5__MASK;
+}
+#define A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL6__MASK 0x0f000000
+#define A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL6__SHIFT 24
+static inline uint32_t A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL6(uint32_t val)
+{
+ return ((val) << A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL6__SHIFT) & A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL6__MASK;
+}
+#define A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL7__MASK 0xf0000000
+#define A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL7__SHIFT 28
+static inline uint32_t A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL7(uint32_t val)
+{
+ return ((val) << A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL7__SHIFT) & A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL7__MASK;
+}
+
+#define REG_A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_1 0x00000011
+#define A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL8__MASK 0x0000000f
+#define A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL8__SHIFT 0
+static inline uint32_t A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL8(uint32_t val)
+{
+ return ((val) << A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL8__SHIFT) & A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL8__MASK;
+}
+#define A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL9__MASK 0x000000f0
+#define A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL9__SHIFT 4
+static inline uint32_t A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL9(uint32_t val)
+{
+ return ((val) << A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL9__SHIFT) & A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL9__MASK;
+}
+#define A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL10__MASK 0x00000f00
+#define A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL10__SHIFT 8
+static inline uint32_t A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL10(uint32_t val)
+{
+ return ((val) << A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL10__SHIFT) & A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL10__MASK;
+}
+#define A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL11__MASK 0x0000f000
+#define A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL11__SHIFT 12
+static inline uint32_t A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL11(uint32_t val)
+{
+ return ((val) << A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL11__SHIFT) & A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL11__MASK;
+}
+#define A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL12__MASK 0x000f0000
+#define A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL12__SHIFT 16
+static inline uint32_t A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL12(uint32_t val)
+{
+ return ((val) << A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL12__SHIFT) & A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL12__MASK;
+}
+#define A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL13__MASK 0x00f00000
+#define A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL13__SHIFT 20
+static inline uint32_t A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL13(uint32_t val)
+{
+ return ((val) << A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL13__SHIFT) & A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL13__MASK;
+}
+#define A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL14__MASK 0x0f000000
+#define A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL14__SHIFT 24
+static inline uint32_t A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL14(uint32_t val)
+{
+ return ((val) << A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL14__SHIFT) & A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL14__MASK;
+}
+#define A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL15__MASK 0xf0000000
+#define A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL15__SHIFT 28
+static inline uint32_t A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL15(uint32_t val)
+{
+ return ((val) << A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL15__SHIFT) & A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL15__MASK;
+}
+
+#define REG_A6XX_CX_DBGC_CFG_DBGBUS_TRACE_BUF1 0x0000002f
+
+#define REG_A6XX_CX_DBGC_CFG_DBGBUS_TRACE_BUF2 0x00000030
+
#endif /* A6XX_XML */
diff --git a/drivers/gpu/drm/msm/adreno/a6xx_gmu.c b/drivers/gpu/drm/msm/adreno/a6xx_gmu.c
index bbb8126ec5c5..d4e98e5876bc 100644
--- a/drivers/gpu/drm/msm/adreno/a6xx_gmu.c
+++ b/drivers/gpu/drm/msm/adreno/a6xx_gmu.c
@@ -2,7 +2,6 @@
/* Copyright (c) 2017-2018 The Linux Foundation. All rights reserved. */
#include <linux/clk.h>
-#include <linux/iopoll.h>
#include <linux/pm_opp.h>
#include <soc/qcom/cmd-db.h>
@@ -42,9 +41,6 @@ static irqreturn_t a6xx_hfi_irq(int irq, void *data)
status = gmu_read(gmu, REG_A6XX_GMU_GMU2HOST_INTR_INFO);
gmu_write(gmu, REG_A6XX_GMU_GMU2HOST_INTR_CLR, status);
- if (status & A6XX_GMU_GMU2HOST_INTR_INFO_MSGQ)
- tasklet_schedule(&gmu->hfi_tasklet);
-
if (status & A6XX_GMU_GMU2HOST_INTR_INFO_CM3_FAULT) {
dev_err_ratelimited(gmu->dev, "GMU firmware fault\n");
@@ -65,12 +61,14 @@ static bool a6xx_gmu_gx_is_on(struct a6xx_gmu *gmu)
A6XX_GMU_SPTPRAC_PWR_CLK_STATUS_GX_HM_CLK_OFF));
}
-static int a6xx_gmu_set_freq(struct a6xx_gmu *gmu, int index)
+static void __a6xx_gmu_set_freq(struct a6xx_gmu *gmu, int index)
{
+ int ret;
+
gmu_write(gmu, REG_A6XX_GMU_DCVS_ACK_OPTION, 0);
gmu_write(gmu, REG_A6XX_GMU_DCVS_PERF_SETTING,
- ((index << 24) & 0xff) | (3 & 0xf));
+ ((3 & 0xf) << 28) | index);
/*
* Send an invalid index as a vote for the bus bandwidth and let the
@@ -82,7 +80,37 @@ static int a6xx_gmu_set_freq(struct a6xx_gmu *gmu, int index)
a6xx_gmu_set_oob(gmu, GMU_OOB_DCVS_SET);
a6xx_gmu_clear_oob(gmu, GMU_OOB_DCVS_SET);
- return gmu_read(gmu, REG_A6XX_GMU_DCVS_RETURN);
+ ret = gmu_read(gmu, REG_A6XX_GMU_DCVS_RETURN);
+ if (ret)
+ dev_err(gmu->dev, "GMU set GPU frequency error: %d\n", ret);
+
+ gmu->freq = gmu->gpu_freqs[index];
+}
+
+void a6xx_gmu_set_freq(struct msm_gpu *gpu, unsigned long freq)
+{
+ struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
+ struct a6xx_gpu *a6xx_gpu = to_a6xx_gpu(adreno_gpu);
+ struct a6xx_gmu *gmu = &a6xx_gpu->gmu;
+ u32 perf_index = 0;
+
+ if (freq == gmu->freq)
+ return;
+
+ for (perf_index = 0; perf_index < gmu->nr_gpu_freqs - 1; perf_index++)
+ if (freq == gmu->gpu_freqs[perf_index])
+ break;
+
+ __a6xx_gmu_set_freq(gmu, perf_index);
+}
+
+unsigned long a6xx_gmu_get_freq(struct msm_gpu *gpu)
+{
+ struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
+ struct a6xx_gpu *a6xx_gpu = to_a6xx_gpu(adreno_gpu);
+ struct a6xx_gmu *gmu = &a6xx_gpu->gmu;
+
+ return gmu->freq;
}
static bool a6xx_gmu_check_idle_level(struct a6xx_gmu *gmu)
@@ -135,9 +163,6 @@ static int a6xx_gmu_hfi_start(struct a6xx_gmu *gmu)
u32 val;
int ret;
- gmu_rmw(gmu, REG_A6XX_GMU_GMU2HOST_INTR_MASK,
- A6XX_GMU_GMU2HOST_INTR_INFO_MSGQ, 0);
-
gmu_write(gmu, REG_A6XX_GMU_HFI_CTRL_INIT, 1);
ret = gmu_poll_timeout(gmu, REG_A6XX_GMU_HFI_CTRL_STATUS, val,
@@ -348,8 +373,23 @@ static void a6xx_rpmh_stop(struct a6xx_gmu *gmu)
gmu_write(gmu, REG_A6XX_GMU_RSCC_CONTROL_REQ, 0);
}
+static inline void pdc_write(void __iomem *ptr, u32 offset, u32 value)
+{
+ return msm_writel(value, ptr + (offset << 2));
+}
+
+static void __iomem *a6xx_gmu_get_mmio(struct platform_device *pdev,
+ const char *name);
+
static void a6xx_gmu_rpmh_init(struct a6xx_gmu *gmu)
{
+ struct platform_device *pdev = to_platform_device(gmu->dev);
+ void __iomem *pdcptr = a6xx_gmu_get_mmio(pdev, "gmu_pdc");
+ void __iomem *seqptr = a6xx_gmu_get_mmio(pdev, "gmu_pdc_seq");
+
+ if (!pdcptr || !seqptr)
+ goto err;
+
/* Disable SDE clock gating */
gmu_write(gmu, REG_A6XX_GPU_RSCC_RSC_STATUS0_DRV0, BIT(24));
@@ -374,44 +414,48 @@ static void a6xx_gmu_rpmh_init(struct a6xx_gmu *gmu)
gmu_write(gmu, REG_A6XX_RSCC_SEQ_MEM_0_DRV0 + 4, 0x0020e8a8);
/* Load PDC sequencer uCode for power up and power down sequence */
- pdc_write(gmu, REG_A6XX_PDC_GPU_SEQ_MEM_0, 0xfebea1e1);
- pdc_write(gmu, REG_A6XX_PDC_GPU_SEQ_MEM_0 + 1, 0xa5a4a3a2);
- pdc_write(gmu, REG_A6XX_PDC_GPU_SEQ_MEM_0 + 2, 0x8382a6e0);
- pdc_write(gmu, REG_A6XX_PDC_GPU_SEQ_MEM_0 + 3, 0xbce3e284);
- pdc_write(gmu, REG_A6XX_PDC_GPU_SEQ_MEM_0 + 4, 0x002081fc);
+ pdc_write(seqptr, REG_A6XX_PDC_GPU_SEQ_MEM_0, 0xfebea1e1);
+ pdc_write(seqptr, REG_A6XX_PDC_GPU_SEQ_MEM_0 + 1, 0xa5a4a3a2);
+ pdc_write(seqptr, REG_A6XX_PDC_GPU_SEQ_MEM_0 + 2, 0x8382a6e0);
+ pdc_write(seqptr, REG_A6XX_PDC_GPU_SEQ_MEM_0 + 3, 0xbce3e284);
+ pdc_write(seqptr, REG_A6XX_PDC_GPU_SEQ_MEM_0 + 4, 0x002081fc);
/* Set TCS commands used by PDC sequence for low power modes */
- pdc_write(gmu, REG_A6XX_PDC_GPU_TCS1_CMD_ENABLE_BANK, 7);
- pdc_write(gmu, REG_A6XX_PDC_GPU_TCS1_CMD_WAIT_FOR_CMPL_BANK, 0);
- pdc_write(gmu, REG_A6XX_PDC_GPU_TCS1_CONTROL, 0);
- pdc_write(gmu, REG_A6XX_PDC_GPU_TCS1_CMD0_MSGID, 0x10108);
- pdc_write(gmu, REG_A6XX_PDC_GPU_TCS1_CMD0_ADDR, 0x30010);
- pdc_write(gmu, REG_A6XX_PDC_GPU_TCS1_CMD0_DATA, 1);
- pdc_write(gmu, REG_A6XX_PDC_GPU_TCS1_CMD0_MSGID + 4, 0x10108);
- pdc_write(gmu, REG_A6XX_PDC_GPU_TCS1_CMD0_ADDR + 4, 0x30000);
- pdc_write(gmu, REG_A6XX_PDC_GPU_TCS1_CMD0_DATA + 4, 0x0);
- pdc_write(gmu, REG_A6XX_PDC_GPU_TCS1_CMD0_MSGID + 8, 0x10108);
- pdc_write(gmu, REG_A6XX_PDC_GPU_TCS1_CMD0_ADDR + 8, 0x30080);
- pdc_write(gmu, REG_A6XX_PDC_GPU_TCS1_CMD0_DATA + 8, 0x0);
- pdc_write(gmu, REG_A6XX_PDC_GPU_TCS3_CMD_ENABLE_BANK, 7);
- pdc_write(gmu, REG_A6XX_PDC_GPU_TCS3_CMD_WAIT_FOR_CMPL_BANK, 0);
- pdc_write(gmu, REG_A6XX_PDC_GPU_TCS3_CONTROL, 0);
- pdc_write(gmu, REG_A6XX_PDC_GPU_TCS3_CMD0_MSGID, 0x10108);
- pdc_write(gmu, REG_A6XX_PDC_GPU_TCS3_CMD0_ADDR, 0x30010);
- pdc_write(gmu, REG_A6XX_PDC_GPU_TCS3_CMD0_DATA, 2);
- pdc_write(gmu, REG_A6XX_PDC_GPU_TCS3_CMD0_MSGID + 4, 0x10108);
- pdc_write(gmu, REG_A6XX_PDC_GPU_TCS3_CMD0_ADDR + 4, 0x30000);
- pdc_write(gmu, REG_A6XX_PDC_GPU_TCS3_CMD0_DATA + 4, 0x3);
- pdc_write(gmu, REG_A6XX_PDC_GPU_TCS3_CMD0_MSGID + 8, 0x10108);
- pdc_write(gmu, REG_A6XX_PDC_GPU_TCS3_CMD0_ADDR + 8, 0x30080);
- pdc_write(gmu, REG_A6XX_PDC_GPU_TCS3_CMD0_DATA + 8, 0x3);
+ pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS1_CMD_ENABLE_BANK, 7);
+ pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS1_CMD_WAIT_FOR_CMPL_BANK, 0);
+ pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS1_CONTROL, 0);
+ pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS1_CMD0_MSGID, 0x10108);
+ pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS1_CMD0_ADDR, 0x30010);
+ pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS1_CMD0_DATA, 1);
+ pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS1_CMD0_MSGID + 4, 0x10108);
+ pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS1_CMD0_ADDR + 4, 0x30000);
+ pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS1_CMD0_DATA + 4, 0x0);
+ pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS1_CMD0_MSGID + 8, 0x10108);
+ pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS1_CMD0_ADDR + 8, 0x30080);
+ pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS1_CMD0_DATA + 8, 0x0);
+ pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS3_CMD_ENABLE_BANK, 7);
+ pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS3_CMD_WAIT_FOR_CMPL_BANK, 0);
+ pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS3_CONTROL, 0);
+ pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS3_CMD0_MSGID, 0x10108);
+ pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS3_CMD0_ADDR, 0x30010);
+ pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS3_CMD0_DATA, 2);
+ pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS3_CMD0_MSGID + 4, 0x10108);
+ pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS3_CMD0_ADDR + 4, 0x30000);
+ pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS3_CMD0_DATA + 4, 0x3);
+ pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS3_CMD0_MSGID + 8, 0x10108);
+ pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS3_CMD0_ADDR + 8, 0x30080);
+ pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS3_CMD0_DATA + 8, 0x3);
/* Setup GPU PDC */
- pdc_write(gmu, REG_A6XX_PDC_GPU_SEQ_START_ADDR, 0);
- pdc_write(gmu, REG_A6XX_PDC_GPU_ENABLE_PDC, 0x80000001);
+ pdc_write(pdcptr, REG_A6XX_PDC_GPU_SEQ_START_ADDR, 0);
+ pdc_write(pdcptr, REG_A6XX_PDC_GPU_ENABLE_PDC, 0x80000001);
/* ensure no writes happen before the uCode is fully written */
wmb();
+
+err:
+ devm_iounmap(gmu->dev, pdcptr);
+ devm_iounmap(gmu->dev, seqptr);
}
/*
@@ -547,8 +591,7 @@ static int a6xx_gmu_fw_start(struct a6xx_gmu *gmu, unsigned int state)
}
#define A6XX_HFI_IRQ_MASK \
- (A6XX_GMU_GMU2HOST_INTR_INFO_MSGQ | \
- A6XX_GMU_GMU2HOST_INTR_INFO_CM3_FAULT)
+ (A6XX_GMU_GMU2HOST_INTR_INFO_CM3_FAULT)
#define A6XX_GMU_IRQ_MASK \
(A6XX_GMU_AO_HOST_INTERRUPT_STATUS_WDOG_BITE | \
@@ -626,7 +669,7 @@ int a6xx_gmu_reset(struct a6xx_gpu *a6xx_gpu)
ret = a6xx_hfi_start(gmu, GMU_COLD_BOOT);
/* Set the GPU back to the highest power frequency */
- a6xx_gmu_set_freq(gmu, gmu->nr_gpu_freqs - 1);
+ __a6xx_gmu_set_freq(gmu, gmu->nr_gpu_freqs - 1);
out:
if (ret)
@@ -665,7 +708,7 @@ int a6xx_gmu_resume(struct a6xx_gpu *a6xx_gpu)
ret = a6xx_hfi_start(gmu, status);
/* Set the GPU to the highest power frequency */
- a6xx_gmu_set_freq(gmu, gmu->nr_gpu_freqs - 1);
+ __a6xx_gmu_set_freq(gmu, gmu->nr_gpu_freqs - 1);
out:
/* Make sure to turn off the boot OOB request on error */
@@ -1140,7 +1183,7 @@ int a6xx_gmu_probe(struct a6xx_gpu *a6xx_gpu, struct device_node *node)
gmu->dev = &pdev->dev;
- of_dma_configure(gmu->dev, node, false);
+ of_dma_configure(gmu->dev, node, true);
/* Fow now, don't do anything fancy until we get our feet under us */
gmu->idle_level = GMU_IDLE_STATE_ACTIVE;
@@ -1170,11 +1213,7 @@ int a6xx_gmu_probe(struct a6xx_gpu *a6xx_gpu, struct device_node *node)
/* Map the GMU registers */
gmu->mmio = a6xx_gmu_get_mmio(pdev, "gmu");
-
- /* Map the GPU power domain controller registers */
- gmu->pdc_mmio = a6xx_gmu_get_mmio(pdev, "gmu_pdc");
-
- if (IS_ERR(gmu->mmio) || IS_ERR(gmu->pdc_mmio))
+ if (IS_ERR(gmu->mmio))
goto err;
/* Get the HFI and GMU interrupts */
@@ -1184,9 +1223,6 @@ int a6xx_gmu_probe(struct a6xx_gpu *a6xx_gpu, struct device_node *node)
if (gmu->hfi_irq < 0 || gmu->gmu_irq < 0)
goto err;
- /* Set up a tasklet to handle GMU HFI responses */
- tasklet_init(&gmu->hfi_tasklet, a6xx_hfi_task, (unsigned long) gmu);
-
/* Get the power levels for the GMU and GPU */
a6xx_gmu_pwrlevels_probe(gmu);
diff --git a/drivers/gpu/drm/msm/adreno/a6xx_gmu.h b/drivers/gpu/drm/msm/adreno/a6xx_gmu.h
index d9a386c18799..35f765afae45 100644
--- a/drivers/gpu/drm/msm/adreno/a6xx_gmu.h
+++ b/drivers/gpu/drm/msm/adreno/a6xx_gmu.h
@@ -4,6 +4,7 @@
#ifndef _A6XX_GMU_H_
#define _A6XX_GMU_H_
+#include <linux/iopoll.h>
#include <linux/interrupt.h>
#include "msm_drv.h"
#include "a6xx_hfi.h"
@@ -47,7 +48,6 @@ struct a6xx_gmu {
struct device *dev;
void * __iomem mmio;
- void * __iomem pdc_mmio;
int hfi_irq;
int gmu_irq;
@@ -74,6 +74,8 @@ struct a6xx_gmu {
unsigned long gmu_freqs[4];
u32 cx_arc_votes[4];
+ unsigned long freq;
+
struct a6xx_hfi_queue queues[2];
struct tasklet_struct hfi_tasklet;
@@ -89,11 +91,6 @@ static inline void gmu_write(struct a6xx_gmu *gmu, u32 offset, u32 value)
return msm_writel(value, gmu->mmio + (offset << 2));
}
-static inline void pdc_write(struct a6xx_gmu *gmu, u32 offset, u32 value)
-{
- return msm_writel(value, gmu->pdc_mmio + (offset << 2));
-}
-
static inline void gmu_rmw(struct a6xx_gmu *gmu, u32 reg, u32 mask, u32 or)
{
u32 val = gmu_read(gmu, reg);
@@ -103,6 +100,16 @@ static inline void gmu_rmw(struct a6xx_gmu *gmu, u32 reg, u32 mask, u32 or)
gmu_write(gmu, reg, val | or);
}
+static inline u64 gmu_read64(struct a6xx_gmu *gmu, u32 lo, u32 hi)
+{
+ u64 val;
+
+ val = (u64) msm_readl(gmu->mmio + (lo << 2));
+ val |= ((u64) msm_readl(gmu->mmio + (hi << 2)) << 32);
+
+ return val;
+}
+
#define gmu_poll_timeout(gmu, addr, val, cond, interval, timeout) \
readl_poll_timeout((gmu)->mmio + ((addr) << 2), val, cond, \
interval, timeout)
@@ -157,6 +164,4 @@ void a6xx_hfi_init(struct a6xx_gmu *gmu);
int a6xx_hfi_start(struct a6xx_gmu *gmu, int boot_state);
void a6xx_hfi_stop(struct a6xx_gmu *gmu);
-void a6xx_hfi_task(unsigned long data);
-
#endif
diff --git a/drivers/gpu/drm/msm/adreno/a6xx_gmu.xml.h b/drivers/gpu/drm/msm/adreno/a6xx_gmu.xml.h
index ef68098d2adc..db56f263ed77 100644
--- a/drivers/gpu/drm/msm/adreno/a6xx_gmu.xml.h
+++ b/drivers/gpu/drm/msm/adreno/a6xx_gmu.xml.h
@@ -12,12 +12,12 @@ The rules-ng-ng source files this header was generated from are:
- /home/robclark/src/envytools/rnndb/freedreno_copyright.xml ( 1572 bytes, from 2018-07-03 19:37:13)
- /home/robclark/src/envytools/rnndb/adreno/a2xx.xml ( 36805 bytes, from 2018-07-03 19:37:13)
- /home/robclark/src/envytools/rnndb/adreno/adreno_common.xml ( 13634 bytes, from 2018-07-03 19:37:13)
-- /home/robclark/src/envytools/rnndb/adreno/adreno_pm4.xml ( 42393 bytes, from 2018-08-06 18:45:45)
+- /home/robclark/src/envytools/rnndb/adreno/adreno_pm4.xml ( 42585 bytes, from 2018-10-04 19:06:37)
- /home/robclark/src/envytools/rnndb/adreno/a3xx.xml ( 83840 bytes, from 2018-07-03 19:37:13)
- /home/robclark/src/envytools/rnndb/adreno/a4xx.xml ( 112086 bytes, from 2018-07-03 19:37:13)
-- /home/robclark/src/envytools/rnndb/adreno/a5xx.xml ( 147240 bytes, from 2018-08-06 18:45:45)
-- /home/robclark/src/envytools/rnndb/adreno/a6xx.xml ( 101627 bytes, from 2018-08-06 18:45:45)
-- /home/robclark/src/envytools/rnndb/adreno/a6xx_gmu.xml ( 10431 bytes, from 2018-07-03 19:37:13)
+- /home/robclark/src/envytools/rnndb/adreno/a5xx.xml ( 147240 bytes, from 2018-10-04 19:06:37)
+- /home/robclark/src/envytools/rnndb/adreno/a6xx.xml ( 139581 bytes, from 2018-10-04 19:06:42)
+- /home/robclark/src/envytools/rnndb/adreno/a6xx_gmu.xml ( 10431 bytes, from 2018-09-14 13:03:07)
- /home/robclark/src/envytools/rnndb/adreno/ocmem.xml ( 1773 bytes, from 2018-07-03 19:37:13)
Copyright (C) 2013-2018 by the following authors:
@@ -167,8 +167,8 @@ static inline uint32_t A6XX_GMU_PWR_COL_INTER_FRAME_CTRL_MIN_PASS_LENGTH(uint32_
#define REG_A6XX_GMU_SPTPRAC_PWR_CLK_STATUS 0x000050d0
#define A6XX_GMU_SPTPRAC_PWR_CLK_STATUS_SPTPRAC_GDSC_POWERING_OFF 0x00000001
#define A6XX_GMU_SPTPRAC_PWR_CLK_STATUS_SPTPRAC_GDSC_POWERING_ON 0x00000002
-#define A6XX_GMU_SPTPRAC_PWR_CLK_STATUS_SPTPRAC_GDSC_POWER_ON 0x00000004
-#define A6XX_GMU_SPTPRAC_PWR_CLK_STATUS_SPTPRAC_GDSC_POWER_OFF 0x00000008
+#define A6XX_GMU_SPTPRAC_PWR_CLK_STATUS_SPTPRAC_GDSC_POWER_OFF 0x00000004
+#define A6XX_GMU_SPTPRAC_PWR_CLK_STATUS_SPTPRAC_GDSC_POWER_ON 0x00000008
#define A6XX_GMU_SPTPRAC_PWR_CLK_STATUS_SP_CLOCK_OFF 0x00000010
#define A6XX_GMU_SPTPRAC_PWR_CLK_STATUS_GMU_UP_POWER_STATE 0x00000020
#define A6XX_GMU_SPTPRAC_PWR_CLK_STATUS_GX_HM_GDSC_POWER_OFF 0x00000040
diff --git a/drivers/gpu/drm/msm/adreno/a6xx_gpu.c b/drivers/gpu/drm/msm/adreno/a6xx_gpu.c
index c629f742a1d1..631257c297fd 100644
--- a/drivers/gpu/drm/msm/adreno/a6xx_gpu.c
+++ b/drivers/gpu/drm/msm/adreno/a6xx_gpu.c
@@ -7,6 +7,8 @@
#include "a6xx_gpu.h"
#include "a6xx_gmu.xml.h"
+#include <linux/devfreq.h>
+
static inline bool _a6xx_check_idle(struct msm_gpu *gpu)
{
struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
@@ -438,10 +440,8 @@ static int a6xx_hw_init(struct msm_gpu *gpu)
gpu_write(gpu, REG_A6XX_CP_PROTECT(22), A6XX_PROTECT_RW(0x900, 0x4d));
gpu_write(gpu, REG_A6XX_CP_PROTECT(23), A6XX_PROTECT_RW(0x98d, 0x76));
gpu_write(gpu, REG_A6XX_CP_PROTECT(24),
- A6XX_PROTECT_RDONLY(0x8d0, 0x23));
- gpu_write(gpu, REG_A6XX_CP_PROTECT(25),
A6XX_PROTECT_RDONLY(0x980, 0x4));
- gpu_write(gpu, REG_A6XX_CP_PROTECT(26), A6XX_PROTECT_RW(0xa630, 0x0));
+ gpu_write(gpu, REG_A6XX_CP_PROTECT(25), A6XX_PROTECT_RW(0xa630, 0x0));
/* Enable interrupts */
gpu_write(gpu, REG_A6XX_RBBM_INT_0_MASK, A6XX_INT_MASK);
@@ -682,6 +682,8 @@ static int a6xx_pm_resume(struct msm_gpu *gpu)
gpu->needs_hw_init = true;
+ msm_gpu_resume_devfreq(gpu);
+
return ret;
}
@@ -690,6 +692,8 @@ static int a6xx_pm_suspend(struct msm_gpu *gpu)
struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
struct a6xx_gpu *a6xx_gpu = to_a6xx_gpu(adreno_gpu);
+ devfreq_suspend_device(gpu->devfreq.devfreq);
+
/*
* Make sure the GMU is idle before continuing (because some transitions
* may use VBIF
@@ -744,7 +748,7 @@ static void a6xx_destroy(struct msm_gpu *gpu)
if (a6xx_gpu->sqe_bo) {
if (a6xx_gpu->sqe_iova)
msm_gem_put_iova(a6xx_gpu->sqe_bo, gpu->aspace);
- drm_gem_object_unreference_unlocked(a6xx_gpu->sqe_bo);
+ drm_gem_object_put_unlocked(a6xx_gpu->sqe_bo);
}
a6xx_gmu_remove(a6xx_gpu);
@@ -753,6 +757,27 @@ static void a6xx_destroy(struct msm_gpu *gpu)
kfree(a6xx_gpu);
}
+static unsigned long a6xx_gpu_busy(struct msm_gpu *gpu)
+{
+ struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
+ struct a6xx_gpu *a6xx_gpu = to_a6xx_gpu(adreno_gpu);
+ u64 busy_cycles, busy_time;
+
+ busy_cycles = gmu_read64(&a6xx_gpu->gmu,
+ REG_A6XX_GMU_CX_GMU_POWER_COUNTER_XOCLK_0_L,
+ REG_A6XX_GMU_CX_GMU_POWER_COUNTER_XOCLK_0_H);
+
+ busy_time = (busy_cycles - gpu->devfreq.busy_cycles) * 10;
+ do_div(busy_time, 192);
+
+ gpu->devfreq.busy_cycles = busy_cycles;
+
+ if (WARN_ON(busy_time > ~0LU))
+ return ~0LU;
+
+ return (unsigned long)busy_time;
+}
+
static const struct adreno_gpu_funcs funcs = {
.base = {
.get_param = adreno_get_param,
@@ -768,6 +793,9 @@ static const struct adreno_gpu_funcs funcs = {
#if defined(CONFIG_DEBUG_FS) || defined(CONFIG_DEV_COREDUMP)
.show = a6xx_show,
#endif
+ .gpu_busy = a6xx_gpu_busy,
+ .gpu_get_freq = a6xx_gmu_get_freq,
+ .gpu_set_freq = a6xx_gmu_set_freq,
},
.get_timestamp = a6xx_get_timestamp,
};
@@ -799,7 +827,7 @@ struct msm_gpu *a6xx_gpu_init(struct drm_device *dev)
}
/* Check if there is a GMU phandle and set it up */
- node = of_parse_phandle(pdev->dev.of_node, "gmu", 0);
+ node = of_parse_phandle(pdev->dev.of_node, "qcom,gmu", 0);
/* FIXME: How do we gracefully handle this? */
BUG_ON(!node);
diff --git a/drivers/gpu/drm/msm/adreno/a6xx_gpu.h b/drivers/gpu/drm/msm/adreno/a6xx_gpu.h
index dd69e5b0e692..4127dcebc202 100644
--- a/drivers/gpu/drm/msm/adreno/a6xx_gpu.h
+++ b/drivers/gpu/drm/msm/adreno/a6xx_gpu.h
@@ -56,5 +56,6 @@ void a6xx_gmu_clear_oob(struct a6xx_gmu *gmu, enum a6xx_gmu_oob_state state);
int a6xx_gmu_probe(struct a6xx_gpu *a6xx_gpu, struct device_node *node);
void a6xx_gmu_remove(struct a6xx_gpu *a6xx_gpu);
-
+void a6xx_gmu_set_freq(struct msm_gpu *gpu, unsigned long freq);
+unsigned long a6xx_gmu_get_freq(struct msm_gpu *gpu);
#endif /* __A6XX_GPU_H__ */
diff --git a/drivers/gpu/drm/msm/adreno/a6xx_hfi.c b/drivers/gpu/drm/msm/adreno/a6xx_hfi.c
index f19ef4cb6ea4..6ff9baec2658 100644
--- a/drivers/gpu/drm/msm/adreno/a6xx_hfi.c
+++ b/drivers/gpu/drm/msm/adreno/a6xx_hfi.c
@@ -79,83 +79,72 @@ static int a6xx_hfi_queue_write(struct a6xx_gmu *gmu,
return 0;
}
-struct a6xx_hfi_response {
- u32 id;
- u32 seqnum;
- struct list_head node;
- struct completion complete;
-
- u32 error;
- u32 payload[16];
-};
+static int a6xx_hfi_wait_for_ack(struct a6xx_gmu *gmu, u32 id, u32 seqnum,
+ u32 *payload, u32 payload_size)
+{
+ struct a6xx_hfi_queue *queue = &gmu->queues[HFI_RESPONSE_QUEUE];
+ u32 val;
+ int ret;
-/*
- * Incoming HFI ack messages can come in out of order so we need to store all
- * the pending messages on a list until they are handled.
- */
-static spinlock_t hfi_ack_lock = __SPIN_LOCK_UNLOCKED(message_lock);
-static LIST_HEAD(hfi_ack_list);
+ /* Wait for a response */
+ ret = gmu_poll_timeout(gmu, REG_A6XX_GMU_GMU2HOST_INTR_INFO, val,
+ val & A6XX_GMU_GMU2HOST_INTR_INFO_MSGQ, 100, 5000);
-static void a6xx_hfi_handle_ack(struct a6xx_gmu *gmu,
- struct a6xx_hfi_msg_response *msg)
-{
- struct a6xx_hfi_response *resp;
- u32 id, seqnum;
-
- /* msg->ret_header contains the header of the message being acked */
- id = HFI_HEADER_ID(msg->ret_header);
- seqnum = HFI_HEADER_SEQNUM(msg->ret_header);
-
- spin_lock(&hfi_ack_lock);
- list_for_each_entry(resp, &hfi_ack_list, node) {
- if (resp->id == id && resp->seqnum == seqnum) {
- resp->error = msg->error;
- memcpy(resp->payload, msg->payload,
- sizeof(resp->payload));
-
- complete(&resp->complete);
- spin_unlock(&hfi_ack_lock);
- return;
- }
+ if (ret) {
+ dev_err(gmu->dev,
+ "Message %s id %d timed out waiting for response\n",
+ a6xx_hfi_msg_id[id], seqnum);
+ return -ETIMEDOUT;
}
- spin_unlock(&hfi_ack_lock);
- dev_err(gmu->dev, "Nobody was waiting for HFI message %d\n", seqnum);
-}
+ /* Clear the interrupt */
+ gmu_write(gmu, REG_A6XX_GMU_GMU2HOST_INTR_CLR,
+ A6XX_GMU_GMU2HOST_INTR_INFO_MSGQ);
-static void a6xx_hfi_handle_error(struct a6xx_gmu *gmu,
- struct a6xx_hfi_msg_response *msg)
-{
- struct a6xx_hfi_msg_error *error = (struct a6xx_hfi_msg_error *) msg;
+ for (;;) {
+ struct a6xx_hfi_msg_response resp;
- dev_err(gmu->dev, "GMU firmware error %d\n", error->code);
-}
+ /* Get the next packet */
+ ret = a6xx_hfi_queue_read(queue, (u32 *) &resp,
+ sizeof(resp) >> 2);
-void a6xx_hfi_task(unsigned long data)
-{
- struct a6xx_gmu *gmu = (struct a6xx_gmu *) data;
- struct a6xx_hfi_queue *queue = &gmu->queues[HFI_RESPONSE_QUEUE];
- struct a6xx_hfi_msg_response resp;
+ /* If the queue is empty our response never made it */
+ if (!ret) {
+ dev_err(gmu->dev,
+ "The HFI response queue is unexpectedly empty\n");
- for (;;) {
- u32 id;
- int ret = a6xx_hfi_queue_read(queue, (u32 *) &resp,
- sizeof(resp) >> 2);
+ return -ENOENT;
+ }
+
+ if (HFI_HEADER_ID(resp.header) == HFI_F2H_MSG_ERROR) {
+ struct a6xx_hfi_msg_error *error =
+ (struct a6xx_hfi_msg_error *) &resp;
- /* Returns the number of bytes copied or negative on error */
- if (ret <= 0) {
- if (ret < 0)
- dev_err(gmu->dev,
- "Unable to read the HFI message queue\n");
- break;
+ dev_err(gmu->dev, "GMU firmware error %d\n",
+ error->code);
+ continue;
+ }
+
+ if (seqnum != HFI_HEADER_SEQNUM(resp.ret_header)) {
+ dev_err(gmu->dev,
+ "Unexpected message id %d on the response queue\n",
+ HFI_HEADER_SEQNUM(resp.ret_header));
+ continue;
+ }
+
+ if (resp.error) {
+ dev_err(gmu->dev,
+ "Message %s id %d returned error %d\n",
+ a6xx_hfi_msg_id[id], seqnum, resp.error);
+ return -EINVAL;
}
- id = HFI_HEADER_ID(resp.header);
+ /* All is well, copy over the buffer */
+ if (payload && payload_size)
+ memcpy(payload, resp.payload,
+ min_t(u32, payload_size, sizeof(resp.payload)));
- if (id == HFI_F2H_MSG_ACK)
- a6xx_hfi_handle_ack(gmu, &resp);
- else if (id == HFI_F2H_MSG_ERROR)
- a6xx_hfi_handle_error(gmu, &resp);
+ return 0;
}
}
@@ -163,7 +152,6 @@ static int a6xx_hfi_send_msg(struct a6xx_gmu *gmu, int id,
void *data, u32 size, u32 *payload, u32 payload_size)
{
struct a6xx_hfi_queue *queue = &gmu->queues[HFI_COMMAND_QUEUE];
- struct a6xx_hfi_response resp = { 0 };
int ret, dwords = size >> 2;
u32 seqnum;
@@ -173,53 +161,14 @@ static int a6xx_hfi_send_msg(struct a6xx_gmu *gmu, int id,
*((u32 *) data) = (seqnum << 20) | (HFI_MSG_CMD << 16) |
(dwords << 8) | id;
- init_completion(&resp.complete);
- resp.id = id;
- resp.seqnum = seqnum;
-
- spin_lock_bh(&hfi_ack_lock);
- list_add_tail(&resp.node, &hfi_ack_list);
- spin_unlock_bh(&hfi_ack_lock);
-
ret = a6xx_hfi_queue_write(gmu, queue, data, dwords);
if (ret) {
dev_err(gmu->dev, "Unable to send message %s id %d\n",
a6xx_hfi_msg_id[id], seqnum);
- goto out;
- }
-
- /* Wait up to 5 seconds for the response */
- ret = wait_for_completion_timeout(&resp.complete,
- msecs_to_jiffies(5000));
- if (!ret) {
- dev_err(gmu->dev,
- "Message %s id %d timed out waiting for response\n",
- a6xx_hfi_msg_id[id], seqnum);
- ret = -ETIMEDOUT;
- } else
- ret = 0;
-
-out:
- spin_lock_bh(&hfi_ack_lock);
- list_del(&resp.node);
- spin_unlock_bh(&hfi_ack_lock);
-
- if (ret)
return ret;
-
- if (resp.error) {
- dev_err(gmu->dev, "Message %s id %d returned error %d\n",
- a6xx_hfi_msg_id[id], seqnum, resp.error);
- return -EINVAL;
}
- if (payload && payload_size) {
- int copy = min_t(u32, payload_size, sizeof(resp.payload));
-
- memcpy(payload, resp.payload, copy);
- }
-
- return 0;
+ return a6xx_hfi_wait_for_ack(gmu, id, seqnum, payload, payload_size);
}
static int a6xx_hfi_send_gmu_init(struct a6xx_gmu *gmu, int boot_state)
diff --git a/drivers/gpu/drm/msm/adreno/adreno_common.xml.h b/drivers/gpu/drm/msm/adreno/adreno_common.xml.h
index 5dace1350810..1318959d504d 100644
--- a/drivers/gpu/drm/msm/adreno/adreno_common.xml.h
+++ b/drivers/gpu/drm/msm/adreno/adreno_common.xml.h
@@ -12,12 +12,12 @@ The rules-ng-ng source files this header was generated from are:
- /home/robclark/src/envytools/rnndb/freedreno_copyright.xml ( 1572 bytes, from 2018-07-03 19:37:13)
- /home/robclark/src/envytools/rnndb/adreno/a2xx.xml ( 36805 bytes, from 2018-07-03 19:37:13)
- /home/robclark/src/envytools/rnndb/adreno/adreno_common.xml ( 13634 bytes, from 2018-07-03 19:37:13)
-- /home/robclark/src/envytools/rnndb/adreno/adreno_pm4.xml ( 42393 bytes, from 2018-08-06 18:45:45)
+- /home/robclark/src/envytools/rnndb/adreno/adreno_pm4.xml ( 42585 bytes, from 2018-10-04 19:06:37)
- /home/robclark/src/envytools/rnndb/adreno/a3xx.xml ( 83840 bytes, from 2018-07-03 19:37:13)
- /home/robclark/src/envytools/rnndb/adreno/a4xx.xml ( 112086 bytes, from 2018-07-03 19:37:13)
-- /home/robclark/src/envytools/rnndb/adreno/a5xx.xml ( 147240 bytes, from 2018-08-06 18:45:45)
-- /home/robclark/src/envytools/rnndb/adreno/a6xx.xml ( 101627 bytes, from 2018-08-06 18:45:45)
-- /home/robclark/src/envytools/rnndb/adreno/a6xx_gmu.xml ( 10431 bytes, from 2018-07-03 19:37:13)
+- /home/robclark/src/envytools/rnndb/adreno/a5xx.xml ( 147240 bytes, from 2018-10-04 19:06:37)
+- /home/robclark/src/envytools/rnndb/adreno/a6xx.xml ( 139581 bytes, from 2018-10-04 19:06:42)
+- /home/robclark/src/envytools/rnndb/adreno/a6xx_gmu.xml ( 10431 bytes, from 2018-09-14 13:03:07)
- /home/robclark/src/envytools/rnndb/adreno/ocmem.xml ( 1773 bytes, from 2018-07-03 19:37:13)
Copyright (C) 2013-2018 by the following authors:
diff --git a/drivers/gpu/drm/msm/adreno/adreno_device.c b/drivers/gpu/drm/msm/adreno/adreno_device.c
index 7d3e9a129ac7..86abdb2b3a9c 100644
--- a/drivers/gpu/drm/msm/adreno/adreno_device.c
+++ b/drivers/gpu/drm/msm/adreno/adreno_device.c
@@ -120,6 +120,7 @@ static const struct adreno_info gpulist[] = {
[ADRENO_FW_GMU] = "a630_gmu.bin",
},
.gmem = SZ_1M,
+ .inactive_period = DRM_MSM_INACTIVE_PERIOD,
.init = a6xx_gpu_init,
},
};
diff --git a/drivers/gpu/drm/msm/adreno/adreno_pm4.xml.h b/drivers/gpu/drm/msm/adreno/adreno_pm4.xml.h
index 03a91e10b310..15eb03bed984 100644
--- a/drivers/gpu/drm/msm/adreno/adreno_pm4.xml.h
+++ b/drivers/gpu/drm/msm/adreno/adreno_pm4.xml.h
@@ -12,12 +12,12 @@ The rules-ng-ng source files this header was generated from are:
- /home/robclark/src/envytools/rnndb/freedreno_copyright.xml ( 1572 bytes, from 2018-07-03 19:37:13)
- /home/robclark/src/envytools/rnndb/adreno/a2xx.xml ( 36805 bytes, from 2018-07-03 19:37:13)
- /home/robclark/src/envytools/rnndb/adreno/adreno_common.xml ( 13634 bytes, from 2018-07-03 19:37:13)
-- /home/robclark/src/envytools/rnndb/adreno/adreno_pm4.xml ( 42393 bytes, from 2018-08-06 18:45:45)
+- /home/robclark/src/envytools/rnndb/adreno/adreno_pm4.xml ( 42585 bytes, from 2018-10-04 19:06:37)
- /home/robclark/src/envytools/rnndb/adreno/a3xx.xml ( 83840 bytes, from 2018-07-03 19:37:13)
- /home/robclark/src/envytools/rnndb/adreno/a4xx.xml ( 112086 bytes, from 2018-07-03 19:37:13)
-- /home/robclark/src/envytools/rnndb/adreno/a5xx.xml ( 147240 bytes, from 2018-08-06 18:45:45)
-- /home/robclark/src/envytools/rnndb/adreno/a6xx.xml ( 101627 bytes, from 2018-08-06 18:45:45)
-- /home/robclark/src/envytools/rnndb/adreno/a6xx_gmu.xml ( 10431 bytes, from 2018-07-03 19:37:13)
+- /home/robclark/src/envytools/rnndb/adreno/a5xx.xml ( 147240 bytes, from 2018-10-04 19:06:37)
+- /home/robclark/src/envytools/rnndb/adreno/a6xx.xml ( 139581 bytes, from 2018-10-04 19:06:42)
+- /home/robclark/src/envytools/rnndb/adreno/a6xx_gmu.xml ( 10431 bytes, from 2018-09-14 13:03:07)
- /home/robclark/src/envytools/rnndb/adreno/ocmem.xml ( 1773 bytes, from 2018-07-03 19:37:13)
Copyright (C) 2013-2018 by the following authors:
@@ -237,7 +237,7 @@ enum adreno_pm4_type3_packets {
CP_UNK_A6XX_14 = 20,
CP_UNK_A6XX_36 = 54,
CP_UNK_A6XX_55 = 85,
- UNK_A6XX_6D = 109,
+ CP_REG_WRITE = 109,
};
enum adreno_state_block {
@@ -968,19 +968,19 @@ static inline uint32_t CP_SET_BIN_DATA5_4_BIN_SIZE_ADDRESS_HI(uint32_t val)
}
#define REG_CP_SET_BIN_DATA5_5 0x00000005
-#define CP_SET_BIN_DATA5_5_XXX_ADDRESS_LO__MASK 0xffffffff
-#define CP_SET_BIN_DATA5_5_XXX_ADDRESS_LO__SHIFT 0
-static inline uint32_t CP_SET_BIN_DATA5_5_XXX_ADDRESS_LO(uint32_t val)
+#define CP_SET_BIN_DATA5_5_BIN_DATA_ADDR2_LO__MASK 0xffffffff
+#define CP_SET_BIN_DATA5_5_BIN_DATA_ADDR2_LO__SHIFT 0
+static inline uint32_t CP_SET_BIN_DATA5_5_BIN_DATA_ADDR2_LO(uint32_t val)
{
- return ((val) << CP_SET_BIN_DATA5_5_XXX_ADDRESS_LO__SHIFT) & CP_SET_BIN_DATA5_5_XXX_ADDRESS_LO__MASK;
+ return ((val) << CP_SET_BIN_DATA5_5_BIN_DATA_ADDR2_LO__SHIFT) & CP_SET_BIN_DATA5_5_BIN_DATA_ADDR2_LO__MASK;
}
#define REG_CP_SET_BIN_DATA5_6 0x00000006
-#define CP_SET_BIN_DATA5_6_XXX_ADDRESS_HI__MASK 0xffffffff
-#define CP_SET_BIN_DATA5_6_XXX_ADDRESS_HI__SHIFT 0
-static inline uint32_t CP_SET_BIN_DATA5_6_XXX_ADDRESS_HI(uint32_t val)
+#define CP_SET_BIN_DATA5_6_BIN_DATA_ADDR2_LO__MASK 0xffffffff
+#define CP_SET_BIN_DATA5_6_BIN_DATA_ADDR2_LO__SHIFT 0
+static inline uint32_t CP_SET_BIN_DATA5_6_BIN_DATA_ADDR2_LO(uint32_t val)
{
- return ((val) << CP_SET_BIN_DATA5_6_XXX_ADDRESS_HI__SHIFT) & CP_SET_BIN_DATA5_6_XXX_ADDRESS_HI__MASK;
+ return ((val) << CP_SET_BIN_DATA5_6_BIN_DATA_ADDR2_LO__SHIFT) & CP_SET_BIN_DATA5_6_BIN_DATA_ADDR2_LO__MASK;
}
#define REG_CP_REG_TO_MEM_0 0x00000000
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.c
index 80cbf75bc2ff..d4530d60767b 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.c
@@ -47,237 +47,17 @@
#define LEFT_MIXER 0
#define RIGHT_MIXER 1
-#define MISR_BUFF_SIZE 256
-
-static inline struct dpu_kms *_dpu_crtc_get_kms(struct drm_crtc *crtc)
-{
- struct msm_drm_private *priv;
-
- if (!crtc || !crtc->dev || !crtc->dev->dev_private) {
- DPU_ERROR("invalid crtc\n");
- return NULL;
- }
- priv = crtc->dev->dev_private;
- if (!priv || !priv->kms) {
- DPU_ERROR("invalid kms\n");
- return NULL;
- }
-
- return to_dpu_kms(priv->kms);
-}
-
-static inline int _dpu_crtc_power_enable(struct dpu_crtc *dpu_crtc, bool enable)
-{
- struct drm_crtc *crtc;
- struct msm_drm_private *priv;
- struct dpu_kms *dpu_kms;
-
- if (!dpu_crtc) {
- DPU_ERROR("invalid dpu crtc\n");
- return -EINVAL;
- }
-
- crtc = &dpu_crtc->base;
- if (!crtc->dev || !crtc->dev->dev_private) {
- DPU_ERROR("invalid drm device\n");
- return -EINVAL;
- }
-
- priv = crtc->dev->dev_private;
- if (!priv->kms) {
- DPU_ERROR("invalid kms\n");
- return -EINVAL;
- }
-
- dpu_kms = to_dpu_kms(priv->kms);
-
- if (enable)
- pm_runtime_get_sync(&dpu_kms->pdev->dev);
- else
- pm_runtime_put_sync(&dpu_kms->pdev->dev);
-
- return 0;
-}
-
-/**
- * _dpu_crtc_rp_to_crtc - get crtc from resource pool object
- * @rp: Pointer to resource pool
- * return: Pointer to drm crtc if success; null otherwise
- */
-static struct drm_crtc *_dpu_crtc_rp_to_crtc(struct dpu_crtc_respool *rp)
-{
- if (!rp)
- return NULL;
-
- return container_of(rp, struct dpu_crtc_state, rp)->base.crtc;
-}
-
-/**
- * _dpu_crtc_rp_reclaim - reclaim unused, or all if forced, resources in pool
- * @rp: Pointer to resource pool
- * @force: True to reclaim all resources; otherwise, reclaim only unused ones
- * return: None
- */
-static void _dpu_crtc_rp_reclaim(struct dpu_crtc_respool *rp, bool force)
+static inline int _dpu_crtc_get_mixer_width(struct dpu_crtc_state *cstate,
+ struct drm_display_mode *mode)
{
- struct dpu_crtc_res *res, *next;
- struct drm_crtc *crtc;
-
- crtc = _dpu_crtc_rp_to_crtc(rp);
- if (!crtc) {
- DPU_ERROR("invalid crtc\n");
- return;
- }
-
- DPU_DEBUG("crtc%d.%u %s\n", crtc->base.id, rp->sequence_id,
- force ? "destroy" : "free_unused");
-
- list_for_each_entry_safe(res, next, &rp->res_list, list) {
- if (!force && !(res->flags & DPU_CRTC_RES_FLAG_FREE))
- continue;
- DPU_DEBUG("crtc%d.%u reclaim res:0x%x/0x%llx/%pK/%d\n",
- crtc->base.id, rp->sequence_id,
- res->type, res->tag, res->val,
- atomic_read(&res->refcount));
- list_del(&res->list);
- if (res->ops.put)
- res->ops.put(res->val);
- kfree(res);
- }
+ return mode->hdisplay / cstate->num_mixers;
}
-/**
- * _dpu_crtc_rp_free_unused - free unused resource in pool
- * @rp: Pointer to resource pool
- * return: none
- */
-static void _dpu_crtc_rp_free_unused(struct dpu_crtc_respool *rp)
-{
- mutex_lock(rp->rp_lock);
- _dpu_crtc_rp_reclaim(rp, false);
- mutex_unlock(rp->rp_lock);
-}
-
-/**
- * _dpu_crtc_rp_destroy - destroy resource pool
- * @rp: Pointer to resource pool
- * return: None
- */
-static void _dpu_crtc_rp_destroy(struct dpu_crtc_respool *rp)
-{
- mutex_lock(rp->rp_lock);
- list_del_init(&rp->rp_list);
- _dpu_crtc_rp_reclaim(rp, true);
- mutex_unlock(rp->rp_lock);
-}
-
-/**
- * _dpu_crtc_hw_blk_get - get callback for hardware block
- * @val: Resource handle
- * @type: Resource type
- * @tag: Search tag for given resource
- * return: Resource handle
- */
-static void *_dpu_crtc_hw_blk_get(void *val, u32 type, u64 tag)
-{
- DPU_DEBUG("res:%d/0x%llx/%pK\n", type, tag, val);
- return dpu_hw_blk_get(val, type, tag);
-}
-
-/**
- * _dpu_crtc_hw_blk_put - put callback for hardware block
- * @val: Resource handle
- * return: None
- */
-static void _dpu_crtc_hw_blk_put(void *val)
-{
- DPU_DEBUG("res://%pK\n", val);
- dpu_hw_blk_put(val);
-}
-
-/**
- * _dpu_crtc_rp_duplicate - duplicate resource pool and reset reference count
- * @rp: Pointer to original resource pool
- * @dup_rp: Pointer to duplicated resource pool
- * return: None
- */
-static void _dpu_crtc_rp_duplicate(struct dpu_crtc_respool *rp,
- struct dpu_crtc_respool *dup_rp)
-{
- struct dpu_crtc_res *res, *dup_res;
- struct drm_crtc *crtc;
-
- if (!rp || !dup_rp || !rp->rp_head) {
- DPU_ERROR("invalid resource pool\n");
- return;
- }
-
- crtc = _dpu_crtc_rp_to_crtc(rp);
- if (!crtc) {
- DPU_ERROR("invalid crtc\n");
- return;
- }
-
- DPU_DEBUG("crtc%d.%u duplicate\n", crtc->base.id, rp->sequence_id);
-
- mutex_lock(rp->rp_lock);
- dup_rp->sequence_id = rp->sequence_id + 1;
- INIT_LIST_HEAD(&dup_rp->res_list);
- dup_rp->ops = rp->ops;
- list_for_each_entry(res, &rp->res_list, list) {
- dup_res = kzalloc(sizeof(struct dpu_crtc_res), GFP_KERNEL);
- if (!dup_res) {
- mutex_unlock(rp->rp_lock);
- return;
- }
- INIT_LIST_HEAD(&dup_res->list);
- atomic_set(&dup_res->refcount, 0);
- dup_res->type = res->type;
- dup_res->tag = res->tag;
- dup_res->val = res->val;
- dup_res->ops = res->ops;
- dup_res->flags = DPU_CRTC_RES_FLAG_FREE;
- DPU_DEBUG("crtc%d.%u dup res:0x%x/0x%llx/%pK/%d\n",
- crtc->base.id, dup_rp->sequence_id,
- dup_res->type, dup_res->tag, dup_res->val,
- atomic_read(&dup_res->refcount));
- list_add_tail(&dup_res->list, &dup_rp->res_list);
- if (dup_res->ops.get)
- dup_res->ops.get(dup_res->val, 0, -1);
- }
-
- dup_rp->rp_lock = rp->rp_lock;
- dup_rp->rp_head = rp->rp_head;
- INIT_LIST_HEAD(&dup_rp->rp_list);
- list_add_tail(&dup_rp->rp_list, rp->rp_head);
- mutex_unlock(rp->rp_lock);
-}
-
-/**
- * _dpu_crtc_rp_reset - reset resource pool after allocation
- * @rp: Pointer to original resource pool
- * @rp_lock: Pointer to serialization resource pool lock
- * @rp_head: Pointer to crtc resource pool head
- * return: None
- */
-static void _dpu_crtc_rp_reset(struct dpu_crtc_respool *rp,
- struct mutex *rp_lock, struct list_head *rp_head)
+static inline struct dpu_kms *_dpu_crtc_get_kms(struct drm_crtc *crtc)
{
- if (!rp || !rp_lock || !rp_head) {
- DPU_ERROR("invalid resource pool\n");
- return;
- }
+ struct msm_drm_private *priv = crtc->dev->dev_private;
- mutex_lock(rp_lock);
- rp->rp_lock = rp_lock;
- rp->rp_head = rp_head;
- INIT_LIST_HEAD(&rp->rp_list);
- rp->sequence_id = 0;
- INIT_LIST_HEAD(&rp->res_list);
- rp->ops.get = _dpu_crtc_hw_blk_get;
- rp->ops.put = _dpu_crtc_hw_blk_put;
- list_add_tail(&rp->rp_list, rp->rp_head);
- mutex_unlock(rp_lock);
+ return to_dpu_kms(priv->kms);
}
static void dpu_crtc_destroy(struct drm_crtc *crtc)
@@ -297,14 +77,29 @@ static void dpu_crtc_destroy(struct drm_crtc *crtc)
}
static void _dpu_crtc_setup_blend_cfg(struct dpu_crtc_mixer *mixer,
- struct dpu_plane_state *pstate)
+ struct dpu_plane_state *pstate, struct dpu_format *format)
{
struct dpu_hw_mixer *lm = mixer->hw_lm;
+ uint32_t blend_op;
+ struct drm_format_name_buf format_name;
/* default to opaque blending */
- lm->ops.setup_blend_config(lm, pstate->stage, 0XFF, 0,
- DPU_BLEND_FG_ALPHA_FG_CONST |
- DPU_BLEND_BG_ALPHA_BG_CONST);
+ blend_op = DPU_BLEND_FG_ALPHA_FG_CONST |
+ DPU_BLEND_BG_ALPHA_BG_CONST;
+
+ if (format->alpha_enable) {
+ /* coverage blending */
+ blend_op = DPU_BLEND_FG_ALPHA_FG_PIXEL |
+ DPU_BLEND_BG_ALPHA_FG_PIXEL |
+ DPU_BLEND_BG_INV_ALPHA;
+ }
+
+ lm->ops.setup_blend_config(lm, pstate->stage,
+ 0xFF, 0, blend_op);
+
+ DPU_DEBUG("format:%s, alpha_en:%u blend_op:0x%x\n",
+ drm_get_format_name(format->base.pixel_format, &format_name),
+ format->alpha_enable, blend_op);
}
static void _dpu_crtc_program_lm_output_roi(struct drm_crtc *crtc)
@@ -317,9 +112,9 @@ static void _dpu_crtc_program_lm_output_roi(struct drm_crtc *crtc)
crtc_state = to_dpu_crtc_state(crtc->state);
lm_horiz_position = 0;
- for (lm_idx = 0; lm_idx < dpu_crtc->num_mixers; lm_idx++) {
+ for (lm_idx = 0; lm_idx < crtc_state->num_mixers; lm_idx++) {
const struct drm_rect *lm_roi = &crtc_state->lm_bounds[lm_idx];
- struct dpu_hw_mixer *hw_lm = dpu_crtc->mixers[lm_idx].hw_lm;
+ struct dpu_hw_mixer *hw_lm = crtc_state->mixers[lm_idx].hw_lm;
struct dpu_hw_mixer_cfg cfg;
if (!lm_roi || !drm_rect_visible(lm_roi))
@@ -339,28 +134,17 @@ static void _dpu_crtc_blend_setup_mixer(struct drm_crtc *crtc,
struct drm_plane *plane;
struct drm_framebuffer *fb;
struct drm_plane_state *state;
- struct dpu_crtc_state *cstate;
+ struct dpu_crtc_state *cstate = to_dpu_crtc_state(crtc->state);
struct dpu_plane_state *pstate = NULL;
struct dpu_format *format;
- struct dpu_hw_ctl *ctl;
- struct dpu_hw_mixer *lm;
- struct dpu_hw_stage_cfg *stage_cfg;
+ struct dpu_hw_ctl *ctl = mixer->lm_ctl;
+ struct dpu_hw_stage_cfg *stage_cfg = &dpu_crtc->stage_cfg;
u32 flush_mask;
uint32_t stage_idx, lm_idx;
int zpos_cnt[DPU_STAGE_MAX + 1] = { 0 };
bool bg_alpha_enable = false;
- if (!dpu_crtc || !mixer) {
- DPU_ERROR("invalid dpu_crtc or mixer\n");
- return;
- }
-
- ctl = mixer->hw_ctl;
- lm = mixer->hw_lm;
- stage_cfg = &dpu_crtc->stage_cfg;
- cstate = to_dpu_crtc_state(crtc->state);
-
drm_atomic_crtc_for_each_plane(plane, crtc) {
state = plane->state;
if (!state)
@@ -379,10 +163,6 @@ static void _dpu_crtc_blend_setup_mixer(struct drm_crtc *crtc,
state->fb ? state->fb->base.id : -1);
format = to_dpu_format(msm_framebuffer_format(pstate->base.fb));
- if (!format) {
- DPU_ERROR("invalid format\n");
- return;
- }
if (pstate->stage == DPU_STAGE_BASE && format->alpha_enable)
bg_alpha_enable = true;
@@ -400,8 +180,9 @@ static void _dpu_crtc_blend_setup_mixer(struct drm_crtc *crtc,
fb ? fb->modifier : 0);
/* blend config update */
- for (lm_idx = 0; lm_idx < dpu_crtc->num_mixers; lm_idx++) {
- _dpu_crtc_setup_blend_cfg(mixer + lm_idx, pstate);
+ for (lm_idx = 0; lm_idx < cstate->num_mixers; lm_idx++) {
+ _dpu_crtc_setup_blend_cfg(mixer + lm_idx,
+ pstate, format);
mixer[lm_idx].flush_mask |= flush_mask;
@@ -422,38 +203,25 @@ static void _dpu_crtc_blend_setup_mixer(struct drm_crtc *crtc,
*/
static void _dpu_crtc_blend_setup(struct drm_crtc *crtc)
{
- struct dpu_crtc *dpu_crtc;
- struct dpu_crtc_state *dpu_crtc_state;
- struct dpu_crtc_mixer *mixer;
+ struct dpu_crtc *dpu_crtc = to_dpu_crtc(crtc);
+ struct dpu_crtc_state *cstate = to_dpu_crtc_state(crtc->state);
+ struct dpu_crtc_mixer *mixer = cstate->mixers;
struct dpu_hw_ctl *ctl;
struct dpu_hw_mixer *lm;
-
int i;
- if (!crtc)
- return;
-
- dpu_crtc = to_dpu_crtc(crtc);
- dpu_crtc_state = to_dpu_crtc_state(crtc->state);
- mixer = dpu_crtc->mixers;
-
DPU_DEBUG("%s\n", dpu_crtc->name);
- if (dpu_crtc->num_mixers > CRTC_DUAL_MIXERS) {
- DPU_ERROR("invalid number mixers: %d\n", dpu_crtc->num_mixers);
- return;
- }
-
- for (i = 0; i < dpu_crtc->num_mixers; i++) {
- if (!mixer[i].hw_lm || !mixer[i].hw_ctl) {
+ for (i = 0; i < cstate->num_mixers; i++) {
+ if (!mixer[i].hw_lm || !mixer[i].lm_ctl) {
DPU_ERROR("invalid lm or ctl assigned to mixer\n");
return;
}
mixer[i].mixer_op_mode = 0;
mixer[i].flush_mask = 0;
- if (mixer[i].hw_ctl->ops.clear_all_blendstages)
- mixer[i].hw_ctl->ops.clear_all_blendstages(
- mixer[i].hw_ctl);
+ if (mixer[i].lm_ctl->ops.clear_all_blendstages)
+ mixer[i].lm_ctl->ops.clear_all_blendstages(
+ mixer[i].lm_ctl);
}
/* initialize stage cfg */
@@ -461,8 +229,8 @@ static void _dpu_crtc_blend_setup(struct drm_crtc *crtc)
_dpu_crtc_blend_setup_mixer(crtc, dpu_crtc, mixer);
- for (i = 0; i < dpu_crtc->num_mixers; i++) {
- ctl = mixer[i].hw_ctl;
+ for (i = 0; i < cstate->num_mixers; i++) {
+ ctl = mixer[i].lm_ctl;
lm = mixer[i].hw_lm;
lm->ops.setup_alpha_out(lm, mixer[i].mixer_op_mode);
@@ -543,34 +311,13 @@ static void dpu_crtc_vblank_cb(void *data)
static void dpu_crtc_frame_event_work(struct kthread_work *work)
{
- struct msm_drm_private *priv;
- struct dpu_crtc_frame_event *fevent;
- struct drm_crtc *crtc;
- struct dpu_crtc *dpu_crtc;
- struct dpu_kms *dpu_kms;
+ struct dpu_crtc_frame_event *fevent = container_of(work,
+ struct dpu_crtc_frame_event, work);
+ struct drm_crtc *crtc = fevent->crtc;
+ struct dpu_crtc *dpu_crtc = to_dpu_crtc(crtc);
unsigned long flags;
bool frame_done = false;
- if (!work) {
- DPU_ERROR("invalid work handle\n");
- return;
- }
-
- fevent = container_of(work, struct dpu_crtc_frame_event, work);
- if (!fevent->crtc || !fevent->crtc->state) {
- DPU_ERROR("invalid crtc\n");
- return;
- }
-
- crtc = fevent->crtc;
- dpu_crtc = to_dpu_crtc(crtc);
-
- dpu_kms = _dpu_crtc_get_kms(crtc);
- if (!dpu_kms) {
- DPU_ERROR("invalid kms handle\n");
- return;
- }
- priv = dpu_kms->dev->dev_private;
DPU_ATRACE_BEGIN("crtc_frame_event");
DRM_DEBUG_KMS("crtc%d event:%u ts:%lld\n", crtc->base.id, fevent->event,
@@ -636,11 +383,6 @@ static void dpu_crtc_frame_event_cb(void *data, u32 event)
unsigned long flags;
u32 crtc_id;
- if (!crtc || !crtc->dev || !crtc->dev->dev_private) {
- DPU_ERROR("invalid parameters\n");
- return;
- }
-
/* Nothing to do on idle event */
if (event & DPU_ENCODER_FRAME_EVENT_IDLE)
return;
@@ -683,7 +425,7 @@ static void _dpu_crtc_setup_mixer_for_encoder(
struct drm_crtc *crtc,
struct drm_encoder *enc)
{
- struct dpu_crtc *dpu_crtc = to_dpu_crtc(crtc);
+ struct dpu_crtc_state *cstate = to_dpu_crtc_state(crtc->state);
struct dpu_kms *dpu_kms = _dpu_crtc_get_kms(crtc);
struct dpu_rm *rm = &dpu_kms->rm;
struct dpu_crtc_mixer *mixer;
@@ -695,8 +437,8 @@ static void _dpu_crtc_setup_mixer_for_encoder(
dpu_rm_init_hw_iter(&ctl_iter, enc->base.id, DPU_HW_BLK_CTL);
/* Set up all the mixers and ctls reserved by this encoder */
- for (i = dpu_crtc->num_mixers; i < ARRAY_SIZE(dpu_crtc->mixers); i++) {
- mixer = &dpu_crtc->mixers[i];
+ for (i = cstate->num_mixers; i < ARRAY_SIZE(cstate->mixers); i++) {
+ mixer = &cstate->mixers[i];
if (!dpu_rm_get_hw(rm, &lm_iter))
break;
@@ -706,14 +448,14 @@ static void _dpu_crtc_setup_mixer_for_encoder(
if (!dpu_rm_get_hw(rm, &ctl_iter)) {
DPU_DEBUG("no ctl assigned to lm %d, using previous\n",
mixer->hw_lm->idx - LM_0);
- mixer->hw_ctl = last_valid_ctl;
+ mixer->lm_ctl = last_valid_ctl;
} else {
- mixer->hw_ctl = (struct dpu_hw_ctl *)ctl_iter.hw;
- last_valid_ctl = mixer->hw_ctl;
+ mixer->lm_ctl = (struct dpu_hw_ctl *)ctl_iter.hw;
+ last_valid_ctl = mixer->lm_ctl;
}
/* Shouldn't happen, mixers are always >= ctls */
- if (!mixer->hw_ctl) {
+ if (!mixer->lm_ctl) {
DPU_ERROR("no valid ctls found for lm %d\n",
mixer->hw_lm->idx - LM_0);
return;
@@ -721,11 +463,11 @@ static void _dpu_crtc_setup_mixer_for_encoder(
mixer->encoder = enc;
- dpu_crtc->num_mixers++;
+ cstate->num_mixers++;
DPU_DEBUG("setup mixer %d: lm %d\n",
i, mixer->hw_lm->idx - LM_0);
DPU_DEBUG("setup mixer %d: ctl %d\n",
- i, mixer->hw_ctl->idx - CTL_0);
+ i, mixer->lm_ctl->idx - CTL_0);
}
}
@@ -734,10 +476,6 @@ static void _dpu_crtc_setup_mixers(struct drm_crtc *crtc)
struct dpu_crtc *dpu_crtc = to_dpu_crtc(crtc);
struct drm_encoder *enc;
- dpu_crtc->num_mixers = 0;
- dpu_crtc->mixers_swapped = false;
- memset(dpu_crtc->mixers, 0, sizeof(dpu_crtc->mixers));
-
mutex_lock(&dpu_crtc->crtc_lock);
/* Check for mixers on all encoders attached to this crtc */
list_for_each_entry(enc, &crtc->dev->mode_config.encoder_list, head) {
@@ -753,24 +491,13 @@ static void _dpu_crtc_setup_mixers(struct drm_crtc *crtc)
static void _dpu_crtc_setup_lm_bounds(struct drm_crtc *crtc,
struct drm_crtc_state *state)
{
- struct dpu_crtc *dpu_crtc;
- struct dpu_crtc_state *cstate;
- struct drm_display_mode *adj_mode;
- u32 crtc_split_width;
+ struct dpu_crtc *dpu_crtc = to_dpu_crtc(crtc);
+ struct dpu_crtc_state *cstate = to_dpu_crtc_state(state);
+ struct drm_display_mode *adj_mode = &state->adjusted_mode;
+ u32 crtc_split_width = _dpu_crtc_get_mixer_width(cstate, adj_mode);
int i;
- if (!crtc || !state) {
- DPU_ERROR("invalid args\n");
- return;
- }
-
- dpu_crtc = to_dpu_crtc(crtc);
- cstate = to_dpu_crtc_state(state);
-
- adj_mode = &state->adjusted_mode;
- crtc_split_width = dpu_crtc_get_mixer_width(dpu_crtc, cstate, adj_mode);
-
- for (i = 0; i < dpu_crtc->num_mixers; i++) {
+ for (i = 0; i < cstate->num_mixers; i++) {
struct drm_rect *r = &cstate->lm_bounds[i];
r->x1 = crtc_split_width * i;
r->y1 = 0;
@@ -787,6 +514,7 @@ static void dpu_crtc_atomic_begin(struct drm_crtc *crtc,
struct drm_crtc_state *old_state)
{
struct dpu_crtc *dpu_crtc;
+ struct dpu_crtc_state *cstate;
struct drm_encoder *encoder;
struct drm_device *dev;
unsigned long flags;
@@ -806,10 +534,11 @@ static void dpu_crtc_atomic_begin(struct drm_crtc *crtc,
DPU_DEBUG("crtc%d\n", crtc->base.id);
dpu_crtc = to_dpu_crtc(crtc);
+ cstate = to_dpu_crtc_state(crtc->state);
dev = crtc->dev;
smmu_state = &dpu_crtc->smmu_state;
- if (!dpu_crtc->num_mixers) {
+ if (!cstate->num_mixers) {
_dpu_crtc_setup_mixers(crtc);
_dpu_crtc_setup_lm_bounds(crtc, crtc->state);
}
@@ -836,7 +565,7 @@ static void dpu_crtc_atomic_begin(struct drm_crtc *crtc,
* it means we are trying to flush a CRTC whose state is disabled:
* nothing else needs to be done.
*/
- if (unlikely(!dpu_crtc->num_mixers))
+ if (unlikely(!cstate->num_mixers))
return;
_dpu_crtc_blend_setup(crtc);
@@ -861,11 +590,6 @@ static void dpu_crtc_atomic_flush(struct drm_crtc *crtc,
unsigned long flags;
struct dpu_crtc_state *cstate;
- if (!crtc || !crtc->dev || !crtc->dev->dev_private) {
- DPU_ERROR("invalid crtc\n");
- return;
- }
-
if (!crtc->state->enable) {
DPU_DEBUG("crtc%d -> enable %d, skip atomic_flush\n",
crtc->base.id, crtc->state->enable);
@@ -900,7 +624,7 @@ static void dpu_crtc_atomic_flush(struct drm_crtc *crtc,
* it means we are trying to flush a CRTC whose state is disabled:
* nothing else needs to be done.
*/
- if (unlikely(!dpu_crtc->num_mixers))
+ if (unlikely(!cstate->num_mixers))
return;
/*
@@ -951,8 +675,6 @@ static void dpu_crtc_destroy_state(struct drm_crtc *crtc,
DPU_DEBUG("crtc%d\n", crtc->base.id);
- _dpu_crtc_rp_destroy(&cstate->rp);
-
__drm_atomic_helper_crtc_destroy_state(state);
kfree(cstate);
@@ -960,15 +682,9 @@ static void dpu_crtc_destroy_state(struct drm_crtc *crtc,
static int _dpu_crtc_wait_for_frame_done(struct drm_crtc *crtc)
{
- struct dpu_crtc *dpu_crtc;
+ struct dpu_crtc *dpu_crtc = to_dpu_crtc(crtc);
int ret, rc = 0;
- if (!crtc) {
- DPU_ERROR("invalid argument\n");
- return -EINVAL;
- }
- dpu_crtc = to_dpu_crtc(crtc);
-
if (!atomic_read(&dpu_crtc->frame_pending)) {
DPU_DEBUG("no frames pending\n");
return 0;
@@ -989,35 +705,18 @@ static int _dpu_crtc_wait_for_frame_done(struct drm_crtc *crtc)
void dpu_crtc_commit_kickoff(struct drm_crtc *crtc)
{
struct drm_encoder *encoder;
- struct drm_device *dev;
- struct dpu_crtc *dpu_crtc;
- struct msm_drm_private *priv;
- struct dpu_kms *dpu_kms;
- struct dpu_crtc_state *cstate;
+ struct drm_device *dev = crtc->dev;
+ struct dpu_crtc *dpu_crtc = to_dpu_crtc(crtc);
+ struct dpu_kms *dpu_kms = _dpu_crtc_get_kms(crtc);
+ struct dpu_crtc_state *cstate = to_dpu_crtc_state(crtc->state);
int ret;
- if (!crtc) {
- DPU_ERROR("invalid argument\n");
- return;
- }
- dev = crtc->dev;
- dpu_crtc = to_dpu_crtc(crtc);
- dpu_kms = _dpu_crtc_get_kms(crtc);
-
- if (!dpu_kms || !dpu_kms->dev || !dpu_kms->dev->dev_private) {
- DPU_ERROR("invalid argument\n");
- return;
- }
-
- priv = dpu_kms->dev->dev_private;
- cstate = to_dpu_crtc_state(crtc->state);
-
/*
* If no mixers has been allocated in dpu_crtc_atomic_check(),
* it means we are trying to start a CRTC whose state is disabled:
* nothing else needs to be done.
*/
- if (unlikely(!dpu_crtc->num_mixers))
+ if (unlikely(!cstate->num_mixers))
return;
DPU_ATRACE_BEGIN("crtc_commit");
@@ -1072,33 +771,19 @@ end:
* _dpu_crtc_vblank_enable_no_lock - update power resource and vblank request
* @dpu_crtc: Pointer to dpu crtc structure
* @enable: Whether to enable/disable vblanks
- *
- * @Return: error code
*/
-static int _dpu_crtc_vblank_enable_no_lock(
+static void _dpu_crtc_vblank_enable_no_lock(
struct dpu_crtc *dpu_crtc, bool enable)
{
- struct drm_device *dev;
- struct drm_crtc *crtc;
+ struct drm_crtc *crtc = &dpu_crtc->base;
+ struct drm_device *dev = crtc->dev;
struct drm_encoder *enc;
- if (!dpu_crtc) {
- DPU_ERROR("invalid crtc\n");
- return -EINVAL;
- }
-
- crtc = &dpu_crtc->base;
- dev = crtc->dev;
-
if (enable) {
- int ret;
-
/* drop lock since power crtc cb may try to re-acquire lock */
mutex_unlock(&dpu_crtc->crtc_lock);
- ret = _dpu_crtc_power_enable(dpu_crtc, true);
+ pm_runtime_get_sync(dev->dev);
mutex_lock(&dpu_crtc->crtc_lock);
- if (ret)
- return ret;
list_for_each_entry(enc, &dev->mode_config.encoder_list, head) {
if (enc->crtc != crtc)
@@ -1125,11 +810,9 @@ static int _dpu_crtc_vblank_enable_no_lock(
/* drop lock since power crtc cb may try to re-acquire lock */
mutex_unlock(&dpu_crtc->crtc_lock);
- _dpu_crtc_power_enable(dpu_crtc, false);
+ pm_runtime_put_sync(dev->dev);
mutex_lock(&dpu_crtc->crtc_lock);
}
-
- return 0;
}
/**
@@ -1139,23 +822,7 @@ static int _dpu_crtc_vblank_enable_no_lock(
*/
static void _dpu_crtc_set_suspend(struct drm_crtc *crtc, bool enable)
{
- struct dpu_crtc *dpu_crtc;
- struct msm_drm_private *priv;
- struct dpu_kms *dpu_kms;
- int ret = 0;
-
- if (!crtc || !crtc->dev || !crtc->dev->dev_private) {
- DPU_ERROR("invalid crtc\n");
- return;
- }
- dpu_crtc = to_dpu_crtc(crtc);
- priv = crtc->dev->dev_private;
-
- if (!priv->kms) {
- DPU_ERROR("invalid crtc kms\n");
- return;
- }
- dpu_kms = to_dpu_kms(priv->kms);
+ struct dpu_crtc *dpu_crtc = to_dpu_crtc(crtc);
DRM_DEBUG_KMS("crtc%d suspend = %d\n", crtc->base.id, enable);
@@ -1170,10 +837,7 @@ static void _dpu_crtc_set_suspend(struct drm_crtc *crtc, bool enable)
DPU_DEBUG("crtc%d suspend already set to %d, ignoring update\n",
crtc->base.id, enable);
else if (dpu_crtc->enabled && dpu_crtc->vblank_requested) {
- ret = _dpu_crtc_vblank_enable_no_lock(dpu_crtc, !enable);
- if (ret)
- DPU_ERROR("%s vblank enable failed: %d\n",
- dpu_crtc->name, ret);
+ _dpu_crtc_vblank_enable_no_lock(dpu_crtc, !enable);
}
dpu_crtc->suspend = enable;
@@ -1206,8 +870,6 @@ static struct drm_crtc_state *dpu_crtc_duplicate_state(struct drm_crtc *crtc)
/* duplicate base helper */
__drm_atomic_helper_crtc_duplicate_state(crtc, &cstate->base);
- _dpu_crtc_rp_duplicate(&old_cstate->rp, &cstate->rp);
-
return &cstate->base;
}
@@ -1244,9 +906,6 @@ static void dpu_crtc_reset(struct drm_crtc *crtc)
return;
}
- _dpu_crtc_rp_reset(&cstate->rp, &dpu_crtc->rp_lock,
- &dpu_crtc->rp_head);
-
cstate->base.crtc = crtc;
crtc->state = &cstate->base;
}
@@ -1254,62 +913,19 @@ static void dpu_crtc_reset(struct drm_crtc *crtc)
static void dpu_crtc_handle_power_event(u32 event_type, void *arg)
{
struct drm_crtc *crtc = arg;
- struct dpu_crtc *dpu_crtc;
+ struct dpu_crtc *dpu_crtc = to_dpu_crtc(crtc);
struct drm_encoder *encoder;
- struct dpu_crtc_mixer *m;
- u32 i, misr_status;
-
- if (!crtc) {
- DPU_ERROR("invalid crtc\n");
- return;
- }
- dpu_crtc = to_dpu_crtc(crtc);
mutex_lock(&dpu_crtc->crtc_lock);
trace_dpu_crtc_handle_power_event(DRMID(crtc), event_type);
- switch (event_type) {
- case DPU_POWER_EVENT_POST_ENABLE:
- /* restore encoder; crtc will be programmed during commit */
- drm_for_each_encoder(encoder, crtc->dev) {
- if (encoder->crtc != crtc)
- continue;
-
- dpu_encoder_virt_restore(encoder);
- }
-
- for (i = 0; i < dpu_crtc->num_mixers; ++i) {
- m = &dpu_crtc->mixers[i];
- if (!m->hw_lm || !m->hw_lm->ops.setup_misr ||
- !dpu_crtc->misr_enable)
- continue;
-
- m->hw_lm->ops.setup_misr(m->hw_lm, true,
- dpu_crtc->misr_frame_count);
- }
- break;
- case DPU_POWER_EVENT_PRE_DISABLE:
- for (i = 0; i < dpu_crtc->num_mixers; ++i) {
- m = &dpu_crtc->mixers[i];
- if (!m->hw_lm || !m->hw_lm->ops.collect_misr ||
- !dpu_crtc->misr_enable)
- continue;
+ /* restore encoder; crtc will be programmed during commit */
+ drm_for_each_encoder(encoder, crtc->dev) {
+ if (encoder->crtc != crtc)
+ continue;
- misr_status = m->hw_lm->ops.collect_misr(m->hw_lm);
- dpu_crtc->misr_data[i] = misr_status ? misr_status :
- dpu_crtc->misr_data[i];
- }
- break;
- case DPU_POWER_EVENT_POST_DISABLE:
- /**
- * Nothing to do. All the planes on the CRTC will be
- * programmed for every frame
- */
- break;
- default:
- DPU_DEBUG("event:%d not handled\n", event_type);
- break;
+ dpu_encoder_virt_restore(encoder);
}
mutex_unlock(&dpu_crtc->crtc_lock);
@@ -1322,7 +938,6 @@ static void dpu_crtc_disable(struct drm_crtc *crtc)
struct drm_display_mode *mode;
struct drm_encoder *encoder;
struct msm_drm_private *priv;
- int ret;
unsigned long flags;
if (!crtc || !crtc->dev || !crtc->dev->dev_private || !crtc->state) {
@@ -1353,10 +968,7 @@ static void dpu_crtc_disable(struct drm_crtc *crtc)
trace_dpu_crtc_disable(DRMID(crtc), false, dpu_crtc);
if (dpu_crtc->enabled && !dpu_crtc->suspend &&
dpu_crtc->vblank_requested) {
- ret = _dpu_crtc_vblank_enable_no_lock(dpu_crtc, false);
- if (ret)
- DPU_ERROR("%s vblank enable failed: %d\n",
- dpu_crtc->name, ret);
+ _dpu_crtc_vblank_enable_no_lock(dpu_crtc, false);
}
dpu_crtc->enabled = false;
@@ -1379,9 +991,8 @@ static void dpu_crtc_disable(struct drm_crtc *crtc)
dpu_power_handle_unregister_event(dpu_crtc->phandle,
dpu_crtc->power_event);
- memset(dpu_crtc->mixers, 0, sizeof(dpu_crtc->mixers));
- dpu_crtc->num_mixers = 0;
- dpu_crtc->mixers_swapped = false;
+ memset(cstate->mixers, 0, sizeof(cstate->mixers));
+ cstate->num_mixers = 0;
/* disable clk & bw control until clk & bw properties are set */
cstate->bw_control = false;
@@ -1403,7 +1014,6 @@ static void dpu_crtc_enable(struct drm_crtc *crtc,
struct dpu_crtc *dpu_crtc;
struct drm_encoder *encoder;
struct msm_drm_private *priv;
- int ret;
if (!crtc || !crtc->dev || !crtc->dev->dev_private) {
DPU_ERROR("invalid crtc\n");
@@ -1425,10 +1035,7 @@ static void dpu_crtc_enable(struct drm_crtc *crtc,
trace_dpu_crtc_enable(DRMID(crtc), true, dpu_crtc);
if (!dpu_crtc->enabled && !dpu_crtc->suspend &&
dpu_crtc->vblank_requested) {
- ret = _dpu_crtc_vblank_enable_no_lock(dpu_crtc, true);
- if (ret)
- DPU_ERROR("%s vblank enable failed: %d\n",
- dpu_crtc->name, ret);
+ _dpu_crtc_vblank_enable_no_lock(dpu_crtc, true);
}
dpu_crtc->enabled = true;
@@ -1438,9 +1045,7 @@ static void dpu_crtc_enable(struct drm_crtc *crtc,
drm_crtc_vblank_on(crtc);
dpu_crtc->power_event = dpu_power_handle_register_event(
- dpu_crtc->phandle,
- DPU_POWER_EVENT_POST_ENABLE | DPU_POWER_EVENT_POST_DISABLE |
- DPU_POWER_EVENT_PRE_DISABLE,
+ dpu_crtc->phandle, DPU_POWER_EVENT_ENABLE,
dpu_crtc_handle_power_event, crtc, dpu_crtc->name);
}
@@ -1496,7 +1101,7 @@ static int dpu_crtc_atomic_check(struct drm_crtc *crtc,
memset(pipe_staged, 0, sizeof(pipe_staged));
- mixer_width = dpu_crtc_get_mixer_width(dpu_crtc, cstate, mode);
+ mixer_width = _dpu_crtc_get_mixer_width(cstate, mode);
_dpu_crtc_setup_lm_bounds(crtc, state);
@@ -1535,8 +1140,7 @@ static int dpu_crtc_atomic_check(struct drm_crtc *crtc,
cnt++;
dst = drm_plane_state_dest(pstate);
- if (!drm_rect_intersect(&clip, &dst) ||
- !drm_rect_equals(&clip, &dst)) {
+ if (!drm_rect_intersect(&clip, &dst)) {
DPU_ERROR("invalid vertical/horizontal destination\n");
DPU_ERROR("display: " DRM_RECT_FMT " plane: "
DRM_RECT_FMT "\n", DRM_RECT_ARG(&crtc_rect),
@@ -1679,7 +1283,6 @@ static int dpu_crtc_atomic_check(struct drm_crtc *crtc,
}
end:
- _dpu_crtc_rp_free_unused(&cstate->rp);
kfree(pstates);
return rc;
}
@@ -1687,7 +1290,6 @@ end:
int dpu_crtc_vblank(struct drm_crtc *crtc, bool en)
{
struct dpu_crtc *dpu_crtc;
- int ret;
if (!crtc) {
DPU_ERROR("invalid crtc\n");
@@ -1698,10 +1300,7 @@ int dpu_crtc_vblank(struct drm_crtc *crtc, bool en)
mutex_lock(&dpu_crtc->crtc_lock);
trace_dpu_crtc_vblank(DRMID(&dpu_crtc->base), en, dpu_crtc);
if (dpu_crtc->enabled && !dpu_crtc->suspend) {
- ret = _dpu_crtc_vblank_enable_no_lock(dpu_crtc, en);
- if (ret)
- DPU_ERROR("%s vblank enable failed: %d\n",
- dpu_crtc->name, ret);
+ _dpu_crtc_vblank_enable_no_lock(dpu_crtc, en);
}
dpu_crtc->vblank_requested = en;
mutex_unlock(&dpu_crtc->crtc_lock);
@@ -1730,26 +1329,28 @@ static int _dpu_debugfs_status_show(struct seq_file *s, void *data)
dpu_crtc = s->private;
crtc = &dpu_crtc->base;
+
+ drm_modeset_lock_all(crtc->dev);
cstate = to_dpu_crtc_state(crtc->state);
mutex_lock(&dpu_crtc->crtc_lock);
mode = &crtc->state->adjusted_mode;
- out_width = dpu_crtc_get_mixer_width(dpu_crtc, cstate, mode);
+ out_width = _dpu_crtc_get_mixer_width(cstate, mode);
seq_printf(s, "crtc:%d width:%d height:%d\n", crtc->base.id,
mode->hdisplay, mode->vdisplay);
seq_puts(s, "\n");
- for (i = 0; i < dpu_crtc->num_mixers; ++i) {
- m = &dpu_crtc->mixers[i];
+ for (i = 0; i < cstate->num_mixers; ++i) {
+ m = &cstate->mixers[i];
if (!m->hw_lm)
seq_printf(s, "\tmixer[%d] has no lm\n", i);
- else if (!m->hw_ctl)
+ else if (!m->lm_ctl)
seq_printf(s, "\tmixer[%d] has no ctl\n", i);
else
seq_printf(s, "\tmixer:%d ctl:%d width:%d height:%d\n",
- m->hw_lm->idx - LM_0, m->hw_ctl->idx - CTL_0,
+ m->hw_lm->idx - LM_0, m->lm_ctl->idx - CTL_0,
out_width, mode->vdisplay);
}
@@ -1822,6 +1423,7 @@ static int _dpu_debugfs_status_show(struct seq_file *s, void *data)
seq_printf(s, "vblank_enable:%d\n", dpu_crtc->vblank_requested);
mutex_unlock(&dpu_crtc->crtc_lock);
+ drm_modeset_unlock_all(crtc->dev);
return 0;
}
@@ -1831,113 +1433,6 @@ static int _dpu_debugfs_status_open(struct inode *inode, struct file *file)
return single_open(file, _dpu_debugfs_status_show, inode->i_private);
}
-static ssize_t _dpu_crtc_misr_setup(struct file *file,
- const char __user *user_buf, size_t count, loff_t *ppos)
-{
- struct dpu_crtc *dpu_crtc;
- struct dpu_crtc_mixer *m;
- int i = 0, rc;
- char buf[MISR_BUFF_SIZE + 1];
- u32 frame_count, enable;
- size_t buff_copy;
-
- if (!file || !file->private_data)
- return -EINVAL;
-
- dpu_crtc = file->private_data;
- buff_copy = min_t(size_t, count, MISR_BUFF_SIZE);
- if (copy_from_user(buf, user_buf, buff_copy)) {
- DPU_ERROR("buffer copy failed\n");
- return -EINVAL;
- }
-
- buf[buff_copy] = 0; /* end of string */
-
- if (sscanf(buf, "%u %u", &enable, &frame_count) != 2)
- return -EINVAL;
-
- rc = _dpu_crtc_power_enable(dpu_crtc, true);
- if (rc)
- return rc;
-
- mutex_lock(&dpu_crtc->crtc_lock);
- dpu_crtc->misr_enable = enable;
- dpu_crtc->misr_frame_count = frame_count;
- for (i = 0; i < dpu_crtc->num_mixers; ++i) {
- dpu_crtc->misr_data[i] = 0;
- m = &dpu_crtc->mixers[i];
- if (!m->hw_lm || !m->hw_lm->ops.setup_misr)
- continue;
-
- m->hw_lm->ops.setup_misr(m->hw_lm, enable, frame_count);
- }
- mutex_unlock(&dpu_crtc->crtc_lock);
- _dpu_crtc_power_enable(dpu_crtc, false);
-
- return count;
-}
-
-static ssize_t _dpu_crtc_misr_read(struct file *file,
- char __user *user_buff, size_t count, loff_t *ppos)
-{
- struct dpu_crtc *dpu_crtc;
- struct dpu_crtc_mixer *m;
- int i = 0, rc;
- u32 misr_status;
- ssize_t len = 0;
- char buf[MISR_BUFF_SIZE + 1] = {'\0'};
-
- if (*ppos)
- return 0;
-
- if (!file || !file->private_data)
- return -EINVAL;
-
- dpu_crtc = file->private_data;
- rc = _dpu_crtc_power_enable(dpu_crtc, true);
- if (rc)
- return rc;
-
- mutex_lock(&dpu_crtc->crtc_lock);
- if (!dpu_crtc->misr_enable) {
- len += snprintf(buf + len, MISR_BUFF_SIZE - len,
- "disabled\n");
- goto buff_check;
- }
-
- for (i = 0; i < dpu_crtc->num_mixers; ++i) {
- m = &dpu_crtc->mixers[i];
- if (!m->hw_lm || !m->hw_lm->ops.collect_misr)
- continue;
-
- misr_status = m->hw_lm->ops.collect_misr(m->hw_lm);
- dpu_crtc->misr_data[i] = misr_status ? misr_status :
- dpu_crtc->misr_data[i];
- len += snprintf(buf + len, MISR_BUFF_SIZE - len, "lm idx:%d\n",
- m->hw_lm->idx - LM_0);
- len += snprintf(buf + len, MISR_BUFF_SIZE - len, "0x%x\n",
- dpu_crtc->misr_data[i]);
- }
-
-buff_check:
- if (count <= len) {
- len = 0;
- goto end;
- }
-
- if (copy_to_user(user_buff, buf, len)) {
- len = -EFAULT;
- goto end;
- }
-
- *ppos += len; /* increase offset */
-
-end:
- mutex_unlock(&dpu_crtc->crtc_lock);
- _dpu_crtc_power_enable(dpu_crtc, false);
- return len;
-}
-
#define DEFINE_DPU_DEBUGFS_SEQ_FOPS(__prefix) \
static int __prefix ## _open(struct inode *inode, struct file *file) \
{ \
@@ -1955,8 +1450,6 @@ static int dpu_crtc_debugfs_state_show(struct seq_file *s, void *v)
{
struct drm_crtc *crtc = (struct drm_crtc *) s->private;
struct dpu_crtc *dpu_crtc = to_dpu_crtc(crtc);
- struct dpu_crtc_res *res;
- struct dpu_crtc_respool *rp;
int i;
seq_printf(s, "client type: %d\n", dpu_crtc_get_client_type(crtc));
@@ -1973,17 +1466,6 @@ static int dpu_crtc_debugfs_state_show(struct seq_file *s, void *v)
dpu_crtc->cur_perf.max_per_pipe_ib[i]);
}
- mutex_lock(&dpu_crtc->rp_lock);
- list_for_each_entry(rp, &dpu_crtc->rp_head, rp_list) {
- seq_printf(s, "rp.%d: ", rp->sequence_id);
- list_for_each_entry(res, &rp->res_list, list)
- seq_printf(s, "0x%x/0x%llx/%pK/%d ",
- res->type, res->tag, res->val,
- atomic_read(&res->refcount));
- seq_puts(s, "\n");
- }
- mutex_unlock(&dpu_crtc->rp_lock);
-
return 0;
}
DEFINE_DPU_DEBUGFS_SEQ_FOPS(dpu_crtc_debugfs_state);
@@ -1999,19 +1481,12 @@ static int _dpu_crtc_init_debugfs(struct drm_crtc *crtc)
.llseek = seq_lseek,
.release = single_release,
};
- static const struct file_operations debugfs_misr_fops = {
- .open = simple_open,
- .read = _dpu_crtc_misr_read,
- .write = _dpu_crtc_misr_setup,
- };
if (!crtc)
return -EINVAL;
dpu_crtc = to_dpu_crtc(crtc);
dpu_kms = _dpu_crtc_get_kms(crtc);
- if (!dpu_kms)
- return -EINVAL;
dpu_crtc->debugfs_root = debugfs_create_dir(dpu_crtc->name,
crtc->dev->primary->debugfs_root);
@@ -2026,8 +1501,6 @@ static int _dpu_crtc_init_debugfs(struct drm_crtc *crtc)
dpu_crtc->debugfs_root,
&dpu_crtc->base,
&dpu_crtc_debugfs_state_fops);
- debugfs_create_file("misr_data", 0600, dpu_crtc->debugfs_root,
- dpu_crtc, &debugfs_misr_fops);
return 0;
}
@@ -2082,7 +1555,8 @@ static const struct drm_crtc_helper_funcs dpu_crtc_helper_funcs = {
};
/* initialize crtc */
-struct drm_crtc *dpu_crtc_init(struct drm_device *dev, struct drm_plane *plane)
+struct drm_crtc *dpu_crtc_init(struct drm_device *dev, struct drm_plane *plane,
+ struct drm_plane *cursor)
{
struct drm_crtc *crtc = NULL;
struct dpu_crtc *dpu_crtc = NULL;
@@ -2104,9 +1578,6 @@ struct drm_crtc *dpu_crtc_init(struct drm_device *dev, struct drm_plane *plane)
spin_lock_init(&dpu_crtc->spin_lock);
atomic_set(&dpu_crtc->frame_pending, 0);
- mutex_init(&dpu_crtc->rp_lock);
- INIT_LIST_HEAD(&dpu_crtc->rp_head);
-
init_completion(&dpu_crtc->frame_done_comp);
INIT_LIST_HEAD(&dpu_crtc->frame_event_list);
@@ -2119,7 +1590,7 @@ struct drm_crtc *dpu_crtc_init(struct drm_device *dev, struct drm_plane *plane)
dpu_crtc_frame_event_work);
}
- drm_crtc_init_with_planes(dev, crtc, plane, NULL, &dpu_crtc_funcs,
+ drm_crtc_init_with_planes(dev, crtc, plane, cursor, &dpu_crtc_funcs,
NULL);
drm_crtc_helper_add(crtc, &dpu_crtc_helper_funcs);
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.h
index e87109e608e9..3723b4830335 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.h
@@ -83,14 +83,14 @@ struct dpu_crtc_smmu_state_data {
/**
* struct dpu_crtc_mixer: stores the map for each virtual pipeline in the CRTC
* @hw_lm: LM HW Driver context
- * @hw_ctl: CTL Path HW driver context
+ * @lm_ctl: CTL Path HW driver context
* @encoder: Encoder attached to this lm & ctl
* @mixer_op_mode: mixer blending operation mode
* @flush_mask: mixer flush mask for ctl, mixer and pipe
*/
struct dpu_crtc_mixer {
struct dpu_hw_mixer *hw_lm;
- struct dpu_hw_ctl *hw_ctl;
+ struct dpu_hw_ctl *lm_ctl;
struct drm_encoder *encoder;
u32 mixer_op_mode;
u32 flush_mask;
@@ -121,11 +121,6 @@ struct dpu_crtc_frame_event {
* struct dpu_crtc - virtualized CRTC data structure
* @base : Base drm crtc structure
* @name : ASCII description of this crtc
- * @num_ctls : Number of ctl paths in use
- * @num_mixers : Number of mixers in use
- * @mixers_swapped: Whether the mixers have been swapped for left/right update
- * especially in the case of DSC Merge.
- * @mixers : List of active mixers
* @event : Pointer to last received drm vblank event. If there is a
* pending vblank event, this will be non-null.
* @vsync_count : Running count of received vsync events
@@ -156,27 +151,14 @@ struct dpu_crtc_frame_event {
* @event_thread : Pointer to event handler thread
* @event_worker : Event worker queue
* @event_lock : Spinlock around event handling code
- * @misr_enable : boolean entry indicates misr enable/disable status.
- * @misr_frame_count : misr frame count provided by client
- * @misr_data : store misr data before turning off the clocks.
* @phandle: Pointer to power handler
* @power_event : registered power event handle
* @cur_perf : current performance committed to clock/bandwidth driver
- * @rp_lock : serialization lock for resource pool
- * @rp_head : list of active resource pool
- * @scl3_cfg_lut : qseed3 lut config
*/
struct dpu_crtc {
struct drm_crtc base;
char name[DPU_CRTC_NAME_SIZE];
- /* HW Resources reserved for the crtc */
- u32 num_ctls;
- u32 num_mixers;
- bool mixers_swapped;
- struct dpu_crtc_mixer mixers[CRTC_DUAL_MIXERS];
- struct dpu_hw_scaler3_lut_cfg *scl3_lut_cfg;
-
struct drm_pending_vblank_event *event;
u32 vsync_count;
@@ -206,77 +188,20 @@ struct dpu_crtc {
/* for handling internal event thread */
spinlock_t event_lock;
- bool misr_enable;
- u32 misr_frame_count;
- u32 misr_data[CRTC_DUAL_MIXERS];
struct dpu_power_handle *phandle;
struct dpu_power_event *power_event;
struct dpu_core_perf_params cur_perf;
- struct mutex rp_lock;
- struct list_head rp_head;
-
struct dpu_crtc_smmu_state_data smmu_state;
};
#define to_dpu_crtc(x) container_of(x, struct dpu_crtc, base)
/**
- * struct dpu_crtc_res_ops - common operations for crtc resources
- * @get: get given resource
- * @put: put given resource
- */
-struct dpu_crtc_res_ops {
- void *(*get)(void *val, u32 type, u64 tag);
- void (*put)(void *val);
-};
-
-#define DPU_CRTC_RES_FLAG_FREE BIT(0)
-
-/**
- * struct dpu_crtc_res - definition of crtc resources
- * @list: list of crtc resource
- * @type: crtc resource type
- * @tag: unique identifier per type
- * @refcount: reference/usage count
- * @ops: callback operations
- * @val: resource handle associated with type/tag
- * @flags: customization flags
- */
-struct dpu_crtc_res {
- struct list_head list;
- u32 type;
- u64 tag;
- atomic_t refcount;
- struct dpu_crtc_res_ops ops;
- void *val;
- u32 flags;
-};
-
-/**
- * dpu_crtc_respool - crtc resource pool
- * @rp_lock: pointer to serialization lock
- * @rp_head: pointer to head of active resource pools of this crtc
- * @rp_list: list of crtc resource pool
- * @sequence_id: sequence identifier, incremented per state duplication
- * @res_list: list of resource managed by this resource pool
- * @ops: resource operations for parent resource pool
- */
-struct dpu_crtc_respool {
- struct mutex *rp_lock;
- struct list_head *rp_head;
- struct list_head rp_list;
- u32 sequence_id;
- struct list_head res_list;
- struct dpu_crtc_res_ops ops;
-};
-
-/**
* struct dpu_crtc_state - dpu container for atomic crtc state
* @base: Base drm crtc state structure
- * @is_ppsplit : Whether current topology requires PPSplit special handling
* @bw_control : true if bw/clk controlled by core bw/clk properties
* @bw_split_vote : true if bw controlled by llcc/dram bw properties
* @lm_bounds : LM boundaries based on current mode full resolution, no ROI.
@@ -285,41 +210,41 @@ struct dpu_crtc_respool {
* @property_values: Current crtc property values
* @input_fence_timeout_ns : Cached input fence timeout, in ns
* @new_perf: new performance state being requested
+ * @num_mixers : Number of mixers in use
+ * @mixers : List of active mixers
+ * @num_ctls : Number of ctl paths in use
+ * @hw_ctls : List of active ctl paths
*/
struct dpu_crtc_state {
struct drm_crtc_state base;
bool bw_control;
bool bw_split_vote;
-
- bool is_ppsplit;
struct drm_rect lm_bounds[CRTC_DUAL_MIXERS];
uint64_t input_fence_timeout_ns;
struct dpu_core_perf_params new_perf;
- struct dpu_crtc_respool rp;
+
+ /* HW Resources reserved for the crtc */
+ u32 num_mixers;
+ struct dpu_crtc_mixer mixers[CRTC_DUAL_MIXERS];
+
+ u32 num_ctls;
+ struct dpu_hw_ctl *hw_ctls[CRTC_DUAL_MIXERS];
};
#define to_dpu_crtc_state(x) \
container_of(x, struct dpu_crtc_state, base)
/**
- * dpu_crtc_get_mixer_width - get the mixer width
- * Mixer width will be same as panel width(/2 for split)
+ * dpu_crtc_state_is_stereo - Is crtc virtualized with two mixers?
+ * @cstate: Pointer to dpu crtc state
+ * @Return: true - has two mixers, false - has one mixer
*/
-static inline int dpu_crtc_get_mixer_width(struct dpu_crtc *dpu_crtc,
- struct dpu_crtc_state *cstate, struct drm_display_mode *mode)
+static inline bool dpu_crtc_state_is_stereo(struct dpu_crtc_state *cstate)
{
- u32 mixer_width;
-
- if (!dpu_crtc || !cstate || !mode)
- return 0;
-
- mixer_width = (dpu_crtc->num_mixers == CRTC_DUAL_MIXERS ?
- mode->hdisplay / CRTC_DUAL_MIXERS : mode->hdisplay);
-
- return mixer_width;
+ return cstate->num_mixers == CRTC_DUAL_MIXERS;
}
/**
@@ -375,9 +300,11 @@ void dpu_crtc_complete_commit(struct drm_crtc *crtc,
* dpu_crtc_init - create a new crtc object
* @dev: dpu device
* @plane: base plane
+ * @cursor: cursor plane
* @Return: new crtc object or error
*/
-struct drm_crtc *dpu_crtc_init(struct drm_device *dev, struct drm_plane *plane);
+struct drm_crtc *dpu_crtc_init(struct drm_device *dev, struct drm_plane *plane,
+ struct drm_plane *cursor);
/**
* dpu_crtc_register_custom_event - api for enabling/disabling crtc event
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c
index 1b4de3486ef9..96cdf06e7da2 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c
@@ -65,8 +65,6 @@
#define MAX_CHANNELS_PER_ENC 2
-#define MISR_BUFF_SIZE 256
-
#define IDLE_SHORT_TIMEOUT 1
#define MAX_VDISPLAY_SPLIT 1080
@@ -161,8 +159,6 @@ enum dpu_enc_rc_states {
* @frame_done_timer: watchdog timer for frame done event
* @vsync_event_timer: vsync timer
* @disp_info: local copy of msm_display_info struct
- * @misr_enable: misr enable/disable status
- * @misr_frame_count: misr frame count before start capturing the data
* @idle_pc_supported: indicate if idle power collaps is supported
* @rc_lock: resource control mutex lock to protect
* virt encoder over various state changes
@@ -179,11 +175,10 @@ struct dpu_encoder_virt {
spinlock_t enc_spinlock;
uint32_t bus_scaling_client;
- uint32_t display_num_of_h_tiles;
-
unsigned int num_phys_encs;
struct dpu_encoder_phys *phys_encs[MAX_PHYS_ENCODERS_PER_VIRTUAL];
struct dpu_encoder_phys *cur_master;
+ struct dpu_encoder_phys *cur_slave;
struct dpu_hw_pingpong *hw_pp[MAX_CHANNELS_PER_ENC];
bool intfs_swapped;
@@ -202,8 +197,6 @@ struct dpu_encoder_virt {
struct timer_list vsync_event_timer;
struct msm_display_info disp_info;
- bool misr_enable;
- u32 misr_frame_count;
bool idle_pc_supported;
struct mutex rc_lock;
@@ -443,30 +436,22 @@ int dpu_encoder_helper_unregister_irq(struct dpu_encoder_phys *phys_enc,
}
void dpu_encoder_get_hw_resources(struct drm_encoder *drm_enc,
- struct dpu_encoder_hw_resources *hw_res,
- struct drm_connector_state *conn_state)
+ struct dpu_encoder_hw_resources *hw_res)
{
struct dpu_encoder_virt *dpu_enc = NULL;
int i = 0;
- if (!hw_res || !drm_enc || !conn_state) {
- DPU_ERROR("invalid argument(s), drm_enc %d, res %d, state %d\n",
- drm_enc != 0, hw_res != 0, conn_state != 0);
- return;
- }
-
dpu_enc = to_dpu_encoder_virt(drm_enc);
DPU_DEBUG_ENC(dpu_enc, "\n");
/* Query resources used by phys encs, expected to be without overlap */
memset(hw_res, 0, sizeof(*hw_res));
- hw_res->display_num_of_h_tiles = dpu_enc->display_num_of_h_tiles;
for (i = 0; i < dpu_enc->num_phys_encs; i++) {
struct dpu_encoder_phys *phys = dpu_enc->phys_encs[i];
if (phys && phys->ops.get_hw_resources)
- phys->ops.get_hw_resources(phys, hw_res, conn_state);
+ phys->ops.get_hw_resources(phys, hw_res);
}
}
@@ -525,7 +510,7 @@ void dpu_encoder_helper_split_config(
hw_mdptop = phys_enc->hw_mdptop;
disp_info = &dpu_enc->disp_info;
- if (disp_info->intf_type != DRM_MODE_CONNECTOR_DSI)
+ if (disp_info->intf_type != DRM_MODE_ENCODER_DSI)
return;
/**
@@ -660,7 +645,7 @@ static int dpu_encoder_virt_atomic_check(
if (drm_atomic_crtc_needs_modeset(crtc_state)
&& dpu_enc->mode_set_complete) {
ret = dpu_rm_reserve(&dpu_kms->rm, drm_enc, crtc_state,
- conn_state, topology, true);
+ topology, true);
dpu_enc->mode_set_complete = false;
}
}
@@ -1016,9 +1001,9 @@ static void dpu_encoder_virt_mode_set(struct drm_encoder *drm_enc,
struct dpu_kms *dpu_kms;
struct list_head *connector_list;
struct drm_connector *conn = NULL, *conn_iter;
- struct dpu_rm_hw_iter pp_iter;
+ struct dpu_rm_hw_iter pp_iter, ctl_iter;
struct msm_display_topology topology;
- enum dpu_rm_topology_name topology_name;
+ struct dpu_hw_ctl *hw_ctl[MAX_CHANNELS_PER_ENC] = { NULL };
int i = 0, ret;
if (!drm_enc) {
@@ -1051,7 +1036,7 @@ static void dpu_encoder_virt_mode_set(struct drm_encoder *drm_enc,
/* Reserve dynamic resources now. Indicating non-AtomicTest phase */
ret = dpu_rm_reserve(&dpu_kms->rm, drm_enc, drm_enc->crtc->state,
- conn->state, topology, false);
+ topology, false);
if (ret) {
DPU_ERROR_ENC(dpu_enc,
"failed to reserve hw resources, %d\n", ret);
@@ -1066,19 +1051,33 @@ static void dpu_encoder_virt_mode_set(struct drm_encoder *drm_enc,
dpu_enc->hw_pp[i] = (struct dpu_hw_pingpong *) pp_iter.hw;
}
- topology_name = dpu_rm_get_topology_name(topology);
+ dpu_rm_init_hw_iter(&ctl_iter, drm_enc->base.id, DPU_HW_BLK_CTL);
+ for (i = 0; i < MAX_CHANNELS_PER_ENC; i++) {
+ if (!dpu_rm_get_hw(&dpu_kms->rm, &ctl_iter))
+ break;
+ hw_ctl[i] = (struct dpu_hw_ctl *)ctl_iter.hw;
+ }
+
for (i = 0; i < dpu_enc->num_phys_encs; i++) {
struct dpu_encoder_phys *phys = dpu_enc->phys_encs[i];
if (phys) {
if (!dpu_enc->hw_pp[i]) {
- DPU_ERROR_ENC(dpu_enc,
- "invalid pingpong block for the encoder\n");
+ DPU_ERROR_ENC(dpu_enc, "no pp block assigned"
+ "at idx: %d\n", i);
return;
}
+
+ if (!hw_ctl[i]) {
+ DPU_ERROR_ENC(dpu_enc, "no ctl block assigned"
+ "at idx: %d\n", i);
+ return;
+ }
+
phys->hw_pp = dpu_enc->hw_pp[i];
+ phys->hw_ctl = hw_ctl[i];
+
phys->connector = conn->state->connector;
- phys->topology_name = topology_name;
if (phys->ops.mode_set)
phys->ops.mode_set(phys, mode, adj_mode);
}
@@ -1111,12 +1110,6 @@ static void _dpu_encoder_virt_enable_helper(struct drm_encoder *drm_enc)
return;
}
- if (dpu_enc->disp_info.intf_type == DRM_MODE_CONNECTOR_DisplayPort &&
- dpu_enc->cur_master->hw_mdptop &&
- dpu_enc->cur_master->hw_mdptop->ops.intf_audio_select)
- dpu_enc->cur_master->hw_mdptop->ops.intf_audio_select(
- dpu_enc->cur_master->hw_mdptop);
-
if (dpu_enc->cur_master->hw_mdptop &&
dpu_enc->cur_master->hw_mdptop->ops.reset_ubwc)
dpu_enc->cur_master->hw_mdptop->ops.reset_ubwc(
@@ -1153,7 +1146,7 @@ void dpu_encoder_virt_restore(struct drm_encoder *drm_enc)
static void dpu_encoder_virt_enable(struct drm_encoder *drm_enc)
{
struct dpu_encoder_virt *dpu_enc = NULL;
- int i, ret = 0;
+ int ret = 0;
struct drm_display_mode *cur_mode = NULL;
if (!drm_enc) {
@@ -1166,21 +1159,12 @@ static void dpu_encoder_virt_enable(struct drm_encoder *drm_enc)
trace_dpu_enc_enable(DRMID(drm_enc), cur_mode->hdisplay,
cur_mode->vdisplay);
- dpu_enc->cur_master = NULL;
- for (i = 0; i < dpu_enc->num_phys_encs; i++) {
- struct dpu_encoder_phys *phys = dpu_enc->phys_encs[i];
+ /* always enable slave encoder before master */
+ if (dpu_enc->cur_slave && dpu_enc->cur_slave->ops.enable)
+ dpu_enc->cur_slave->ops.enable(dpu_enc->cur_slave);
- if (phys && phys->ops.is_master && phys->ops.is_master(phys)) {
- DPU_DEBUG_ENC(dpu_enc, "master is now idx %d\n", i);
- dpu_enc->cur_master = phys;
- break;
- }
- }
-
- if (!dpu_enc->cur_master) {
- DPU_ERROR("virt encoder has no master! num_phys %d\n", i);
- return;
- }
+ if (dpu_enc->cur_master && dpu_enc->cur_master->ops.enable)
+ dpu_enc->cur_master->ops.enable(dpu_enc->cur_master);
ret = dpu_encoder_resource_control(drm_enc, DPU_ENC_RC_EVENT_KICKOFF);
if (ret) {
@@ -1189,26 +1173,6 @@ static void dpu_encoder_virt_enable(struct drm_encoder *drm_enc)
return;
}
- for (i = 0; i < dpu_enc->num_phys_encs; i++) {
- struct dpu_encoder_phys *phys = dpu_enc->phys_encs[i];
-
- if (!phys)
- continue;
-
- if (phys != dpu_enc->cur_master) {
- if (phys->ops.enable)
- phys->ops.enable(phys);
- }
-
- if (dpu_enc->misr_enable && (dpu_enc->disp_info.capabilities &
- MSM_DISPLAY_CAP_VID_MODE) && phys->ops.setup_misr)
- phys->ops.setup_misr(phys, true,
- dpu_enc->misr_frame_count);
- }
-
- if (dpu_enc->cur_master->ops.enable)
- dpu_enc->cur_master->ops.enable(dpu_enc->cur_master);
-
_dpu_encoder_virt_enable_helper(drm_enc);
}
@@ -1266,8 +1230,6 @@ static void dpu_encoder_virt_disable(struct drm_encoder *drm_enc)
dpu_enc->phys_encs[i]->connector = NULL;
}
- dpu_enc->cur_master = NULL;
-
DPU_DEBUG_ENC(dpu_enc, "encoder disabled\n");
dpu_rm_release(&dpu_kms->rm, drm_enc);
@@ -1397,9 +1359,9 @@ static void dpu_encoder_frame_done_callback(
/* One of the physical encoders has become idle */
for (i = 0; i < dpu_enc->num_phys_encs; i++) {
if (dpu_enc->phys_encs[i] == ready_phys) {
- clear_bit(i, dpu_enc->frame_busy_mask);
trace_dpu_enc_frame_done_cb(DRMID(drm_enc), i,
dpu_enc->frame_busy_mask[0]);
+ clear_bit(i, dpu_enc->frame_busy_mask);
}
}
@@ -1480,7 +1442,8 @@ static inline void _dpu_encoder_trigger_flush(struct drm_encoder *drm_enc,
ret = ctl->ops.get_pending_flush(ctl);
trace_dpu_enc_trigger_flush(DRMID(drm_enc), phys->intf_idx,
- pending_kickoff_cnt, ctl->idx, ret);
+ pending_kickoff_cnt, ctl->idx,
+ extra_flush_bits, ret);
}
/**
@@ -1879,7 +1842,7 @@ void dpu_encoder_kickoff(struct drm_encoder *drm_enc)
phys->ops.handle_post_kickoff(phys);
}
- if (dpu_enc->disp_info.intf_type == DRM_MODE_CONNECTOR_DSI &&
+ if (dpu_enc->disp_info.intf_type == DRM_MODE_ENCODER_DSI &&
!_dpu_encoder_wakeup_time(drm_enc, &wakeup_time)) {
trace_dpu_enc_early_kickoff(DRMID(drm_enc),
ktime_to_ms(wakeup_time));
@@ -1955,113 +1918,6 @@ static int _dpu_encoder_debugfs_status_open(struct inode *inode,
return single_open(file, _dpu_encoder_status_show, inode->i_private);
}
-static ssize_t _dpu_encoder_misr_setup(struct file *file,
- const char __user *user_buf, size_t count, loff_t *ppos)
-{
- struct dpu_encoder_virt *dpu_enc;
- int i = 0, rc;
- char buf[MISR_BUFF_SIZE + 1];
- size_t buff_copy;
- u32 frame_count, enable;
-
- if (!file || !file->private_data)
- return -EINVAL;
-
- dpu_enc = file->private_data;
-
- buff_copy = min_t(size_t, count, MISR_BUFF_SIZE);
- if (copy_from_user(buf, user_buf, buff_copy))
- return -EINVAL;
-
- buf[buff_copy] = 0; /* end of string */
-
- if (sscanf(buf, "%u %u", &enable, &frame_count) != 2)
- return -EINVAL;
-
- rc = _dpu_encoder_power_enable(dpu_enc, true);
- if (rc)
- return rc;
-
- mutex_lock(&dpu_enc->enc_lock);
- dpu_enc->misr_enable = enable;
- dpu_enc->misr_frame_count = frame_count;
- for (i = 0; i < dpu_enc->num_phys_encs; i++) {
- struct dpu_encoder_phys *phys = dpu_enc->phys_encs[i];
-
- if (!phys || !phys->ops.setup_misr)
- continue;
-
- phys->ops.setup_misr(phys, enable, frame_count);
- }
- mutex_unlock(&dpu_enc->enc_lock);
- _dpu_encoder_power_enable(dpu_enc, false);
-
- return count;
-}
-
-static ssize_t _dpu_encoder_misr_read(struct file *file,
- char __user *user_buff, size_t count, loff_t *ppos)
-{
- struct dpu_encoder_virt *dpu_enc;
- int i = 0, len = 0;
- char buf[MISR_BUFF_SIZE + 1] = {'\0'};
- int rc;
-
- if (*ppos)
- return 0;
-
- if (!file || !file->private_data)
- return -EINVAL;
-
- dpu_enc = file->private_data;
-
- rc = _dpu_encoder_power_enable(dpu_enc, true);
- if (rc)
- return rc;
-
- mutex_lock(&dpu_enc->enc_lock);
- if (!dpu_enc->misr_enable) {
- len += snprintf(buf + len, MISR_BUFF_SIZE - len,
- "disabled\n");
- goto buff_check;
- } else if (dpu_enc->disp_info.capabilities &
- ~MSM_DISPLAY_CAP_VID_MODE) {
- len += snprintf(buf + len, MISR_BUFF_SIZE - len,
- "unsupported\n");
- goto buff_check;
- }
-
- for (i = 0; i < dpu_enc->num_phys_encs; i++) {
- struct dpu_encoder_phys *phys = dpu_enc->phys_encs[i];
-
- if (!phys || !phys->ops.collect_misr)
- continue;
-
- len += snprintf(buf + len, MISR_BUFF_SIZE - len,
- "Intf idx:%d\n", phys->intf_idx - INTF_0);
- len += snprintf(buf + len, MISR_BUFF_SIZE - len, "0x%x\n",
- phys->ops.collect_misr(phys));
- }
-
-buff_check:
- if (count <= len) {
- len = 0;
- goto end;
- }
-
- if (copy_to_user(user_buff, buf, len)) {
- len = -EFAULT;
- goto end;
- }
-
- *ppos += len; /* increase offset */
-
-end:
- mutex_unlock(&dpu_enc->enc_lock);
- _dpu_encoder_power_enable(dpu_enc, false);
- return len;
-}
-
static int _dpu_encoder_init_debugfs(struct drm_encoder *drm_enc)
{
struct dpu_encoder_virt *dpu_enc;
@@ -2076,12 +1932,6 @@ static int _dpu_encoder_init_debugfs(struct drm_encoder *drm_enc)
.release = single_release,
};
- static const struct file_operations debugfs_misr_fops = {
- .open = simple_open,
- .read = _dpu_encoder_misr_read,
- .write = _dpu_encoder_misr_setup,
- };
-
char name[DPU_NAME_SIZE];
if (!drm_enc || !drm_enc->dev || !drm_enc->dev->dev_private) {
@@ -2105,9 +1955,6 @@ static int _dpu_encoder_init_debugfs(struct drm_encoder *drm_enc)
debugfs_create_file("status", 0600,
dpu_enc->debugfs_root, dpu_enc, &debugfs_status_fops);
- debugfs_create_file("misr_data", 0600,
- dpu_enc->debugfs_root, dpu_enc, &debugfs_misr_fops);
-
for (i = 0; i < dpu_enc->num_phys_encs; i++)
if (dpu_enc->phys_encs[i] &&
dpu_enc->phys_encs[i]->ops.late_register)
@@ -2195,6 +2042,11 @@ static int dpu_encoder_virt_add_phys_encs(
++dpu_enc->num_phys_encs;
}
+ if (params->split_role == ENC_ROLE_SLAVE)
+ dpu_enc->cur_slave = enc;
+ else
+ dpu_enc->cur_master = enc;
+
return 0;
}
@@ -2206,8 +2058,7 @@ static const struct dpu_encoder_virt_ops dpu_encoder_parent_ops = {
static int dpu_encoder_setup_display(struct dpu_encoder_virt *dpu_enc,
struct dpu_kms *dpu_kms,
- struct msm_display_info *disp_info,
- int *drm_enc_mode)
+ struct msm_display_info *disp_info)
{
int ret = 0;
int i = 0;
@@ -2220,6 +2071,8 @@ static int dpu_encoder_setup_display(struct dpu_encoder_virt *dpu_enc,
return -EINVAL;
}
+ dpu_enc->cur_master = NULL;
+
memset(&phys_params, 0, sizeof(phys_params));
phys_params.dpu_kms = dpu_kms;
phys_params.parent = &dpu_enc->base;
@@ -2228,24 +2081,17 @@ static int dpu_encoder_setup_display(struct dpu_encoder_virt *dpu_enc,
DPU_DEBUG("\n");
- if (disp_info->intf_type == DRM_MODE_CONNECTOR_DSI) {
- *drm_enc_mode = DRM_MODE_ENCODER_DSI;
+ switch (disp_info->intf_type) {
+ case DRM_MODE_ENCODER_DSI:
intf_type = INTF_DSI;
- } else if (disp_info->intf_type == DRM_MODE_CONNECTOR_HDMIA) {
- *drm_enc_mode = DRM_MODE_ENCODER_TMDS;
- intf_type = INTF_HDMI;
- } else if (disp_info->intf_type == DRM_MODE_CONNECTOR_DisplayPort) {
- *drm_enc_mode = DRM_MODE_ENCODER_TMDS;
- intf_type = INTF_DP;
- } else {
+ break;
+ default:
DPU_ERROR_ENC(dpu_enc, "unsupported display interface type\n");
return -EINVAL;
}
WARN_ON(disp_info->num_of_h_tiles < 1);
- dpu_enc->display_num_of_h_tiles = disp_info->num_of_h_tiles;
-
DPU_DEBUG("dsi_info->num_of_h_tiles %d\n", disp_info->num_of_h_tiles);
if ((disp_info->capabilities & MSM_DISPLAY_CAP_CMD_MODE) ||
@@ -2358,25 +2204,22 @@ int dpu_encoder_setup(struct drm_device *dev, struct drm_encoder *enc,
struct dpu_kms *dpu_kms = to_dpu_kms(priv->kms);
struct drm_encoder *drm_enc = NULL;
struct dpu_encoder_virt *dpu_enc = NULL;
- int drm_enc_mode = DRM_MODE_ENCODER_NONE;
int ret = 0;
dpu_enc = to_dpu_encoder_virt(enc);
mutex_init(&dpu_enc->enc_lock);
- ret = dpu_encoder_setup_display(dpu_enc, dpu_kms, disp_info,
- &drm_enc_mode);
+ ret = dpu_encoder_setup_display(dpu_enc, dpu_kms, disp_info);
if (ret)
goto fail;
- dpu_enc->cur_master = NULL;
spin_lock_init(&dpu_enc->enc_spinlock);
atomic_set(&dpu_enc->frame_done_timeout, 0);
timer_setup(&dpu_enc->frame_done_timer,
dpu_encoder_frame_done_timeout, 0);
- if (disp_info->intf_type == DRM_MODE_CONNECTOR_DSI)
+ if (disp_info->intf_type == DRM_MODE_ENCODER_DSI)
timer_setup(&dpu_enc->vsync_event_timer,
dpu_encoder_vsync_event_handler,
0);
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.h
index 60f809fc7c13..9dbf38f446d9 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.h
@@ -32,15 +32,9 @@
/**
* Encoder functions and data types
* @intfs: Interfaces this encoder is using, INTF_MODE_NONE if unused
- * @needs_cdm: Encoder requests a CDM based on pixel format conversion needs
- * @display_num_of_h_tiles: Number of horizontal tiles in case of split
- * interface
- * @topology: Topology of the display
*/
struct dpu_encoder_hw_resources {
enum dpu_intf_mode intfs[INTF_MAX];
- bool needs_cdm;
- u32 display_num_of_h_tiles;
};
/**
@@ -56,11 +50,9 @@ struct dpu_encoder_kickoff_params {
* dpu_encoder_get_hw_resources - Populate table of required hardware resources
* @encoder: encoder pointer
* @hw_res: resource table to populate with encoder required resources
- * @conn_state: report hw reqs based on this proposed connector state
*/
void dpu_encoder_get_hw_resources(struct drm_encoder *encoder,
- struct dpu_encoder_hw_resources *hw_res,
- struct drm_connector_state *conn_state);
+ struct dpu_encoder_hw_resources *hw_res);
/**
* dpu_encoder_register_vblank_callback - provide callback to encoder that
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys.h
index c7df8aad6613..964efcc757a4 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys.h
@@ -22,8 +22,8 @@
#include "dpu_hw_pingpong.h"
#include "dpu_hw_ctl.h"
#include "dpu_hw_top.h"
-#include "dpu_hw_cdm.h"
#include "dpu_encoder.h"
+#include "dpu_crtc.h"
#define DPU_ENCODER_NAME_MAX 16
@@ -114,8 +114,6 @@ struct dpu_encoder_virt_ops {
* @handle_post_kickoff: Do any work necessary post-kickoff work
* @trigger_start: Process start event on physical encoder
* @needs_single_flush: Whether encoder slaves need to be flushed
- * @setup_misr: Sets up MISR, enable and disables based on sysfs
- * @collect_misr: Collects MISR data on frame update
* @hw_reset: Issue HW recovery such as CTL reset and clear
* DPU_ENC_ERR_NEEDS_HW_RESET state
* @irq_control: Handler to enable/disable all the encoder IRQs
@@ -143,8 +141,7 @@ struct dpu_encoder_phys_ops {
struct drm_connector_state *conn_state);
void (*destroy)(struct dpu_encoder_phys *encoder);
void (*get_hw_resources)(struct dpu_encoder_phys *encoder,
- struct dpu_encoder_hw_resources *hw_res,
- struct drm_connector_state *conn_state);
+ struct dpu_encoder_hw_resources *hw_res);
int (*control_vblank_irq)(struct dpu_encoder_phys *enc, bool enable);
int (*wait_for_commit_done)(struct dpu_encoder_phys *phys_enc);
int (*wait_for_tx_complete)(struct dpu_encoder_phys *phys_enc);
@@ -154,10 +151,6 @@ struct dpu_encoder_phys_ops {
void (*handle_post_kickoff)(struct dpu_encoder_phys *phys_enc);
void (*trigger_start)(struct dpu_encoder_phys *phys_enc);
bool (*needs_single_flush)(struct dpu_encoder_phys *phys_enc);
-
- void (*setup_misr)(struct dpu_encoder_phys *phys_encs,
- bool enable, u32 frame_count);
- u32 (*collect_misr)(struct dpu_encoder_phys *phys_enc);
void (*hw_reset)(struct dpu_encoder_phys *phys_enc);
void (*irq_control)(struct dpu_encoder_phys *phys, bool enable);
void (*prepare_idle_pc)(struct dpu_encoder_phys *phys_enc);
@@ -210,8 +203,6 @@ struct dpu_encoder_irq {
* @parent_ops: Callbacks exposed by the parent to the phys_enc
* @hw_mdptop: Hardware interface to the top registers
* @hw_ctl: Hardware interface to the ctl registers
- * @hw_cdm: Hardware interface to the cdm registers
- * @cdm_cfg: Chroma-down hardware configuration
* @hw_pp: Hardware interface to the ping pong registers
* @dpu_kms: Pointer to the dpu_kms top level
* @cached_mode: DRM mode cached at mode_set time, acted on in enable
@@ -219,7 +210,6 @@ struct dpu_encoder_irq {
* @split_role: Role to play in a split-panel configuration
* @intf_mode: Interface mode
* @intf_idx: Interface index on dpu hardware
- * @topology_name: topology selected for the display
* @enc_spinlock: Virtual-Encoder-Wide Spin Lock for IRQ purposes
* @enable_state: Enable state tracking
* @vblank_refcount: Reference count of vblank request
@@ -241,15 +231,12 @@ struct dpu_encoder_phys {
const struct dpu_encoder_virt_ops *parent_ops;
struct dpu_hw_mdp *hw_mdptop;
struct dpu_hw_ctl *hw_ctl;
- struct dpu_hw_cdm *hw_cdm;
- struct dpu_hw_cdm_cfg cdm_cfg;
struct dpu_hw_pingpong *hw_pp;
struct dpu_kms *dpu_kms;
struct drm_display_mode cached_mode;
enum dpu_enc_split_role split_role;
enum dpu_intf_mode intf_mode;
enum dpu_intf intf_idx;
- enum dpu_rm_topology_name topology_name;
spinlock_t *enc_spinlock;
enum dpu_enc_enable_state enable_state;
atomic_t vblank_refcount;
@@ -367,11 +354,15 @@ void dpu_encoder_helper_hw_reset(struct dpu_encoder_phys *phys_enc);
static inline enum dpu_3d_blend_mode dpu_encoder_helper_get_3d_blend_mode(
struct dpu_encoder_phys *phys_enc)
{
+ struct dpu_crtc_state *dpu_cstate;
+
if (!phys_enc || phys_enc->enable_state == DPU_ENC_DISABLING)
return BLEND_3D_NONE;
+ dpu_cstate = to_dpu_crtc_state(phys_enc->parent->crtc->state);
+
if (phys_enc->split_role == ENC_ROLE_SOLO &&
- phys_enc->topology_name == DPU_RM_TOPOLOGY_DUALPIPE_3DMERGE)
+ dpu_crtc_state_is_stereo(dpu_cstate))
return BLEND_3D_H_ROW_INT;
return BLEND_3D_NONE;
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_cmd.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_cmd.c
index 3084675ed425..b2d7f0ded24c 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_cmd.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_cmd.c
@@ -196,9 +196,6 @@ static void dpu_encoder_phys_cmd_mode_set(
{
struct dpu_encoder_phys_cmd *cmd_enc =
to_dpu_encoder_phys_cmd(phys_enc);
- struct dpu_rm *rm = &phys_enc->dpu_kms->rm;
- struct dpu_rm_hw_iter iter;
- int i, instance;
if (!phys_enc || !mode || !adj_mode) {
DPU_ERROR("invalid args\n");
@@ -208,22 +205,6 @@ static void dpu_encoder_phys_cmd_mode_set(
DPU_DEBUG_CMDENC(cmd_enc, "caching mode:\n");
drm_mode_debug_printmodeline(adj_mode);
- instance = phys_enc->split_role == ENC_ROLE_SLAVE ? 1 : 0;
-
- /* Retrieve previously allocated HW Resources. Shouldn't fail */
- dpu_rm_init_hw_iter(&iter, phys_enc->parent->base.id, DPU_HW_BLK_CTL);
- for (i = 0; i <= instance; i++) {
- if (dpu_rm_get_hw(rm, &iter))
- phys_enc->hw_ctl = (struct dpu_hw_ctl *)iter.hw;
- }
-
- if (IS_ERR_OR_NULL(phys_enc->hw_ctl)) {
- DPU_ERROR_CMDENC(cmd_enc, "failed to init ctl: %ld\n",
- PTR_ERR(phys_enc->hw_ctl));
- phys_enc->hw_ctl = NULL;
- return;
- }
-
_dpu_encoder_phys_cmd_setup_irq_hw_idx(phys_enc);
}
@@ -618,23 +599,8 @@ static void dpu_encoder_phys_cmd_destroy(struct dpu_encoder_phys *phys_enc)
static void dpu_encoder_phys_cmd_get_hw_resources(
struct dpu_encoder_phys *phys_enc,
- struct dpu_encoder_hw_resources *hw_res,
- struct drm_connector_state *conn_state)
+ struct dpu_encoder_hw_resources *hw_res)
{
- struct dpu_encoder_phys_cmd *cmd_enc =
- to_dpu_encoder_phys_cmd(phys_enc);
-
- if (!phys_enc) {
- DPU_ERROR("invalid encoder\n");
- return;
- }
-
- if ((phys_enc->intf_idx - INTF_0) >= INTF_MAX) {
- DPU_ERROR("invalid intf idx:%d\n", phys_enc->intf_idx);
- return;
- }
-
- DPU_DEBUG_CMDENC(cmd_enc, "\n");
hw_res->intfs[phys_enc->intf_idx - INTF_0] = INTF_MODE_CMD;
}
@@ -823,7 +789,6 @@ struct dpu_encoder_phys *dpu_encoder_phys_cmd_init(
{
struct dpu_encoder_phys *phys_enc = NULL;
struct dpu_encoder_phys_cmd *cmd_enc = NULL;
- struct dpu_hw_mdp *hw_mdp;
struct dpu_encoder_irq *irq;
int i, ret = 0;
@@ -836,14 +801,7 @@ struct dpu_encoder_phys *dpu_encoder_phys_cmd_init(
goto fail;
}
phys_enc = &cmd_enc->base;
-
- hw_mdp = dpu_rm_get_mdp(&p->dpu_kms->rm);
- if (IS_ERR_OR_NULL(hw_mdp)) {
- ret = PTR_ERR(hw_mdp);
- DPU_ERROR("failed to get mdptop\n");
- goto fail_mdp_init;
- }
- phys_enc->hw_mdptop = hw_mdp;
+ phys_enc->hw_mdptop = p->dpu_kms->hw_mdp;
phys_enc->intf_idx = p->intf_idx;
dpu_encoder_phys_cmd_init_ops(&phys_enc->ops);
@@ -898,8 +856,6 @@ struct dpu_encoder_phys *dpu_encoder_phys_cmd_init(
return phys_enc;
-fail_mdp_init:
- kfree(cmd_enc);
fail:
return ERR_PTR(ret);
}
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_vid.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_vid.c
index 14fc7c2a6bb7..84de385a9f62 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_vid.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_vid.c
@@ -355,13 +355,14 @@ static void dpu_encoder_phys_vid_underrun_irq(void *arg, int irq_idx)
static bool _dpu_encoder_phys_is_dual_ctl(struct dpu_encoder_phys *phys_enc)
{
+ struct dpu_crtc_state *dpu_cstate;
+
if (!phys_enc)
return false;
- if (phys_enc->topology_name == DPU_RM_TOPOLOGY_DUALPIPE)
- return true;
+ dpu_cstate = to_dpu_crtc_state(phys_enc->parent->crtc->state);
- return false;
+ return dpu_cstate->num_ctls > 1;
}
static bool dpu_encoder_phys_vid_needs_single_flush(
@@ -395,9 +396,6 @@ static void dpu_encoder_phys_vid_mode_set(
struct drm_display_mode *mode,
struct drm_display_mode *adj_mode)
{
- struct dpu_rm *rm;
- struct dpu_rm_hw_iter iter;
- int i, instance;
struct dpu_encoder_phys_vid *vid_enc;
if (!phys_enc || !phys_enc->dpu_kms) {
@@ -405,7 +403,6 @@ static void dpu_encoder_phys_vid_mode_set(
return;
}
- rm = &phys_enc->dpu_kms->rm;
vid_enc = to_dpu_encoder_phys_vid(phys_enc);
if (adj_mode) {
@@ -414,21 +411,6 @@ static void dpu_encoder_phys_vid_mode_set(
DPU_DEBUG_VIDENC(vid_enc, "caching mode:\n");
}
- instance = phys_enc->split_role == ENC_ROLE_SLAVE ? 1 : 0;
-
- /* Retrieve previously allocated HW Resources. Shouldn't fail */
- dpu_rm_init_hw_iter(&iter, phys_enc->parent->base.id, DPU_HW_BLK_CTL);
- for (i = 0; i <= instance; i++) {
- if (dpu_rm_get_hw(rm, &iter))
- phys_enc->hw_ctl = (struct dpu_hw_ctl *)iter.hw;
- }
- if (IS_ERR_OR_NULL(phys_enc->hw_ctl)) {
- DPU_ERROR_VIDENC(vid_enc, "failed to init ctl, %ld\n",
- PTR_ERR(phys_enc->hw_ctl));
- phys_enc->hw_ctl = NULL;
- return;
- }
-
_dpu_encoder_phys_vid_setup_irq_hw_idx(phys_enc);
}
@@ -481,7 +463,7 @@ static void dpu_encoder_phys_vid_enable(struct dpu_encoder_phys *phys_enc)
{
struct msm_drm_private *priv;
struct dpu_encoder_phys_vid *vid_enc;
- struct dpu_hw_intf *intf;
+ struct dpu_rm_hw_iter iter;
struct dpu_hw_ctl *ctl;
u32 flush_mask = 0;
@@ -493,11 +475,20 @@ static void dpu_encoder_phys_vid_enable(struct dpu_encoder_phys *phys_enc)
priv = phys_enc->parent->dev->dev_private;
vid_enc = to_dpu_encoder_phys_vid(phys_enc);
- intf = vid_enc->hw_intf;
ctl = phys_enc->hw_ctl;
- if (!vid_enc->hw_intf || !phys_enc->hw_ctl) {
- DPU_ERROR("invalid hw_intf %d hw_ctl %d\n",
- vid_enc->hw_intf != 0, phys_enc->hw_ctl != 0);
+
+ dpu_rm_init_hw_iter(&iter, phys_enc->parent->base.id, DPU_HW_BLK_INTF);
+ while (dpu_rm_get_hw(&phys_enc->dpu_kms->rm, &iter)) {
+ struct dpu_hw_intf *hw_intf = (struct dpu_hw_intf *)iter.hw;
+
+ if (hw_intf->idx == phys_enc->intf_idx) {
+ vid_enc->hw_intf = hw_intf;
+ break;
+ }
+ }
+
+ if (!vid_enc->hw_intf) {
+ DPU_ERROR("hw_intf not assigned\n");
return;
}
@@ -519,7 +510,7 @@ static void dpu_encoder_phys_vid_enable(struct dpu_encoder_phys *phys_enc)
!dpu_encoder_phys_vid_is_master(phys_enc))
goto skip_flush;
- ctl->ops.get_bitmask_intf(ctl, &flush_mask, intf->idx);
+ ctl->ops.get_bitmask_intf(ctl, &flush_mask, vid_enc->hw_intf->idx);
ctl->ops.update_pending_flush(ctl, flush_mask);
skip_flush:
@@ -547,25 +538,9 @@ static void dpu_encoder_phys_vid_destroy(struct dpu_encoder_phys *phys_enc)
static void dpu_encoder_phys_vid_get_hw_resources(
struct dpu_encoder_phys *phys_enc,
- struct dpu_encoder_hw_resources *hw_res,
- struct drm_connector_state *conn_state)
+ struct dpu_encoder_hw_resources *hw_res)
{
- struct dpu_encoder_phys_vid *vid_enc;
-
- if (!phys_enc || !hw_res) {
- DPU_ERROR("invalid arg(s), enc %d hw_res %d conn_state %d\n",
- phys_enc != 0, hw_res != 0, conn_state != 0);
- return;
- }
-
- vid_enc = to_dpu_encoder_phys_vid(phys_enc);
- if (!vid_enc->hw_intf) {
- DPU_ERROR("invalid arg(s), hw_intf\n");
- return;
- }
-
- DPU_DEBUG_VIDENC(vid_enc, "\n");
- hw_res->intfs[vid_enc->hw_intf->idx - INTF_0] = INTF_MODE_VIDEO;
+ hw_res->intfs[phys_enc->intf_idx - INTF_0] = INTF_MODE_VIDEO;
}
static int _dpu_encoder_phys_vid_wait_for_vblank(
@@ -756,32 +731,6 @@ static void dpu_encoder_phys_vid_irq_control(struct dpu_encoder_phys *phys_enc,
}
}
-static void dpu_encoder_phys_vid_setup_misr(struct dpu_encoder_phys *phys_enc,
- bool enable, u32 frame_count)
-{
- struct dpu_encoder_phys_vid *vid_enc;
-
- if (!phys_enc)
- return;
- vid_enc = to_dpu_encoder_phys_vid(phys_enc);
-
- if (vid_enc->hw_intf && vid_enc->hw_intf->ops.setup_misr)
- vid_enc->hw_intf->ops.setup_misr(vid_enc->hw_intf,
- enable, frame_count);
-}
-
-static u32 dpu_encoder_phys_vid_collect_misr(struct dpu_encoder_phys *phys_enc)
-{
- struct dpu_encoder_phys_vid *vid_enc;
-
- if (!phys_enc)
- return 0;
- vid_enc = to_dpu_encoder_phys_vid(phys_enc);
-
- return vid_enc->hw_intf && vid_enc->hw_intf->ops.collect_misr ?
- vid_enc->hw_intf->ops.collect_misr(vid_enc->hw_intf) : 0;
-}
-
static int dpu_encoder_phys_vid_get_line_count(
struct dpu_encoder_phys *phys_enc)
{
@@ -817,8 +766,6 @@ static void dpu_encoder_phys_vid_init_ops(struct dpu_encoder_phys_ops *ops)
ops->prepare_for_kickoff = dpu_encoder_phys_vid_prepare_for_kickoff;
ops->handle_post_kickoff = dpu_encoder_phys_vid_handle_post_kickoff;
ops->needs_single_flush = dpu_encoder_phys_vid_needs_single_flush;
- ops->setup_misr = dpu_encoder_phys_vid_setup_misr;
- ops->collect_misr = dpu_encoder_phys_vid_collect_misr;
ops->hw_reset = dpu_encoder_helper_hw_reset;
ops->get_line_count = dpu_encoder_phys_vid_get_line_count;
}
@@ -828,8 +775,6 @@ struct dpu_encoder_phys *dpu_encoder_phys_vid_init(
{
struct dpu_encoder_phys *phys_enc = NULL;
struct dpu_encoder_phys_vid *vid_enc = NULL;
- struct dpu_rm_hw_iter iter;
- struct dpu_hw_mdp *hw_mdp;
struct dpu_encoder_irq *irq;
int i, ret = 0;
@@ -846,35 +791,9 @@ struct dpu_encoder_phys *dpu_encoder_phys_vid_init(
phys_enc = &vid_enc->base;
- hw_mdp = dpu_rm_get_mdp(&p->dpu_kms->rm);
- if (IS_ERR_OR_NULL(hw_mdp)) {
- ret = PTR_ERR(hw_mdp);
- DPU_ERROR("failed to get mdptop\n");
- goto fail;
- }
- phys_enc->hw_mdptop = hw_mdp;
+ phys_enc->hw_mdptop = p->dpu_kms->hw_mdp;
phys_enc->intf_idx = p->intf_idx;
- /**
- * hw_intf resource permanently assigned to this encoder
- * Other resources allocated at atomic commit time by use case
- */
- dpu_rm_init_hw_iter(&iter, 0, DPU_HW_BLK_INTF);
- while (dpu_rm_get_hw(&p->dpu_kms->rm, &iter)) {
- struct dpu_hw_intf *hw_intf = (struct dpu_hw_intf *)iter.hw;
-
- if (hw_intf->idx == p->intf_idx) {
- vid_enc->hw_intf = hw_intf;
- break;
- }
- }
-
- if (!vid_enc->hw_intf) {
- ret = -EINVAL;
- DPU_ERROR("failed to get hw_intf\n");
- goto fail;
- }
-
DPU_DEBUG_VIDENC(vid_enc, "\n");
dpu_encoder_phys_vid_init_ops(&phys_enc->ops);
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.c
index 44ee06398b1d..512ac0834d2b 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.c
@@ -29,6 +29,9 @@
BIT(DPU_SSPP_TS_PREFILL) | BIT(DPU_SSPP_TS_PREFILL_REC1) |\
BIT(DPU_SSPP_CDP) | BIT(DPU_SSPP_EXCL_RECT))
+#define DMA_CURSOR_SDM845_MASK \
+ (DMA_SDM845_MASK | BIT(DPU_SSPP_CURSOR))
+
#define MIXER_SDM845_MASK \
(BIT(DPU_MIXER_SOURCESPLIT) | BIT(DPU_DIM_LAYER))
@@ -71,7 +74,6 @@ static struct dpu_mdp_cfg sdm845_mdp[] = {
.base = 0x0, .len = 0x45C,
.features = 0,
.highest_bank_bit = 0x2,
- .has_dest_scaler = true,
.clk_ctrls[DPU_CLK_CTRL_VIG0] = {
.reg_off = 0x2AC, .bit_off = 0},
.clk_ctrls[DPU_CLK_CTRL_VIG1] = {
@@ -174,45 +176,35 @@ static const struct dpu_sspp_sub_blks sdm845_dma_sblk_1 = _DMA_SBLK("9", 2);
static const struct dpu_sspp_sub_blks sdm845_dma_sblk_2 = _DMA_SBLK("10", 3);
static const struct dpu_sspp_sub_blks sdm845_dma_sblk_3 = _DMA_SBLK("11", 4);
-#define SSPP_VIG_BLK(_name, _id, _base, _sblk, _xinid, _clkctrl) \
- { \
- .name = _name, .id = _id, \
- .base = _base, .len = 0x1c8, \
- .features = VIG_SDM845_MASK, \
- .sblk = &_sblk, \
- .xin_id = _xinid, \
- .type = SSPP_TYPE_VIG, \
- .clk_ctrl = _clkctrl \
- }
-
-#define SSPP_DMA_BLK(_name, _id, _base, _sblk, _xinid, _clkctrl) \
+#define SSPP_BLK(_name, _id, _base, _features, \
+ _sblk, _xinid, _type, _clkctrl) \
{ \
.name = _name, .id = _id, \
.base = _base, .len = 0x1c8, \
- .features = DMA_SDM845_MASK, \
+ .features = _features, \
.sblk = &_sblk, \
.xin_id = _xinid, \
- .type = SSPP_TYPE_DMA, \
+ .type = _type, \
.clk_ctrl = _clkctrl \
}
static struct dpu_sspp_cfg sdm845_sspp[] = {
- SSPP_VIG_BLK("sspp_0", SSPP_VIG0, 0x4000,
- sdm845_vig_sblk_0, 0, DPU_CLK_CTRL_VIG0),
- SSPP_VIG_BLK("sspp_1", SSPP_VIG1, 0x6000,
- sdm845_vig_sblk_1, 4, DPU_CLK_CTRL_VIG1),
- SSPP_VIG_BLK("sspp_2", SSPP_VIG2, 0x8000,
- sdm845_vig_sblk_2, 8, DPU_CLK_CTRL_VIG2),
- SSPP_VIG_BLK("sspp_3", SSPP_VIG3, 0xa000,
- sdm845_vig_sblk_3, 12, DPU_CLK_CTRL_VIG3),
- SSPP_DMA_BLK("sspp_8", SSPP_DMA0, 0x24000,
- sdm845_dma_sblk_0, 1, DPU_CLK_CTRL_DMA0),
- SSPP_DMA_BLK("sspp_9", SSPP_DMA1, 0x26000,
- sdm845_dma_sblk_1, 5, DPU_CLK_CTRL_DMA1),
- SSPP_DMA_BLK("sspp_10", SSPP_DMA2, 0x28000,
- sdm845_dma_sblk_2, 9, DPU_CLK_CTRL_CURSOR0),
- SSPP_DMA_BLK("sspp_11", SSPP_DMA3, 0x2a000,
- sdm845_dma_sblk_3, 13, DPU_CLK_CTRL_CURSOR1),
+ SSPP_BLK("sspp_0", SSPP_VIG0, 0x4000, VIG_SDM845_MASK,
+ sdm845_vig_sblk_0, 0, SSPP_TYPE_VIG, DPU_CLK_CTRL_VIG0),
+ SSPP_BLK("sspp_1", SSPP_VIG1, 0x6000, VIG_SDM845_MASK,
+ sdm845_vig_sblk_1, 4, SSPP_TYPE_VIG, DPU_CLK_CTRL_VIG1),
+ SSPP_BLK("sspp_2", SSPP_VIG2, 0x8000, VIG_SDM845_MASK,
+ sdm845_vig_sblk_2, 8, SSPP_TYPE_VIG, DPU_CLK_CTRL_VIG2),
+ SSPP_BLK("sspp_3", SSPP_VIG3, 0xa000, VIG_SDM845_MASK,
+ sdm845_vig_sblk_3, 12, SSPP_TYPE_VIG, DPU_CLK_CTRL_VIG3),
+ SSPP_BLK("sspp_8", SSPP_DMA0, 0x24000, DMA_SDM845_MASK,
+ sdm845_dma_sblk_0, 1, SSPP_TYPE_DMA, DPU_CLK_CTRL_DMA0),
+ SSPP_BLK("sspp_9", SSPP_DMA1, 0x26000, DMA_SDM845_MASK,
+ sdm845_dma_sblk_1, 5, SSPP_TYPE_DMA, DPU_CLK_CTRL_DMA1),
+ SSPP_BLK("sspp_10", SSPP_DMA2, 0x28000, DMA_CURSOR_SDM845_MASK,
+ sdm845_dma_sblk_2, 9, SSPP_TYPE_DMA, DPU_CLK_CTRL_CURSOR0),
+ SSPP_BLK("sspp_11", SSPP_DMA3, 0x2a000, DMA_CURSOR_SDM845_MASK,
+ sdm845_dma_sblk_3, 13, SSPP_TYPE_DMA, DPU_CLK_CTRL_CURSOR1),
};
/*************************************************************
@@ -227,48 +219,23 @@ static const struct dpu_lm_sub_blks sdm845_lm_sblk = {
},
};
-#define LM_BLK(_name, _id, _base, _ds, _pp, _lmpair) \
+#define LM_BLK(_name, _id, _base, _pp, _lmpair) \
{ \
.name = _name, .id = _id, \
.base = _base, .len = 0x320, \
.features = MIXER_SDM845_MASK, \
.sblk = &sdm845_lm_sblk, \
- .ds = _ds, \
.pingpong = _pp, \
.lm_pair_mask = (1 << _lmpair) \
}
static struct dpu_lm_cfg sdm845_lm[] = {
- LM_BLK("lm_0", LM_0, 0x44000, DS_0, PINGPONG_0, LM_1),
- LM_BLK("lm_1", LM_1, 0x45000, DS_1, PINGPONG_1, LM_0),
- LM_BLK("lm_2", LM_2, 0x46000, DS_MAX, PINGPONG_2, LM_5),
- LM_BLK("lm_3", LM_3, 0x0, DS_MAX, PINGPONG_MAX, 0),
- LM_BLK("lm_4", LM_4, 0x0, DS_MAX, PINGPONG_MAX, 0),
- LM_BLK("lm_5", LM_5, 0x49000, DS_MAX, PINGPONG_3, LM_2),
-};
-
-/*************************************************************
- * DS sub blocks config
- *************************************************************/
-static const struct dpu_ds_top_cfg sdm845_ds_top = {
- .name = "ds_top_0", .id = DS_TOP,
- .base = 0x60000, .len = 0xc,
- .maxinputwidth = DEFAULT_DPU_LINE_WIDTH,
- .maxoutputwidth = DEFAULT_DPU_OUTPUT_LINE_WIDTH,
- .maxupscale = MAX_UPSCALE_RATIO,
-};
-
-#define DS_BLK(_name, _id, _base) \
- {\
- .name = _name, .id = _id, \
- .base = _base, .len = 0x800, \
- .features = DPU_SSPP_SCALER_QSEED3, \
- .top = &sdm845_ds_top \
- }
-
-static struct dpu_ds_cfg sdm845_ds[] = {
- DS_BLK("ds_0", DS_0, 0x800),
- DS_BLK("ds_1", DS_1, 0x1000),
+ LM_BLK("lm_0", LM_0, 0x44000, PINGPONG_0, LM_1),
+ LM_BLK("lm_1", LM_1, 0x45000, PINGPONG_1, LM_0),
+ LM_BLK("lm_2", LM_2, 0x46000, PINGPONG_2, LM_5),
+ LM_BLK("lm_3", LM_3, 0x0, PINGPONG_MAX, 0),
+ LM_BLK("lm_4", LM_4, 0x0, PINGPONG_MAX, 0),
+ LM_BLK("lm_5", LM_5, 0x49000, PINGPONG_3, LM_2),
};
/*************************************************************
@@ -328,18 +295,6 @@ static struct dpu_intf_cfg sdm845_intf[] = {
};
/*************************************************************
- * CDM sub blocks config
- *************************************************************/
-static struct dpu_cdm_cfg sdm845_cdm[] = {
- {
- .name = "cdm_0", .id = CDM_0,
- .base = 0x79200, .len = 0x224,
- .features = 0,
- .intf_connect = BIT(INTF_3),
- },
-};
-
-/*************************************************************
* VBIF sub blocks config
*************************************************************/
/* VBIF QOS remap */
@@ -461,12 +416,8 @@ static void sdm845_cfg_init(struct dpu_mdss_cfg *dpu_cfg)
.sspp = sdm845_sspp,
.mixer_count = ARRAY_SIZE(sdm845_lm),
.mixer = sdm845_lm,
- .ds_count = ARRAY_SIZE(sdm845_ds),
- .ds = sdm845_ds,
.pingpong_count = ARRAY_SIZE(sdm845_pp),
.pingpong = sdm845_pp,
- .cdm_count = ARRAY_SIZE(sdm845_cdm),
- .cdm = sdm845_cdm,
.intf_count = ARRAY_SIZE(sdm845_intf),
.intf = sdm845_intf,
.vbif_count = ARRAY_SIZE(sdm845_vbif),
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.h
index f0cb0d4fc80e..dc060e7358e4 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.h
@@ -428,7 +428,6 @@ struct dpu_clk_ctrl_reg {
* @highest_bank_bit: UBWC parameter
* @ubwc_static: ubwc static configuration
* @ubwc_swizzle: ubwc default swizzle setting
- * @has_dest_scaler: indicates support of destination scaler
* @clk_ctrls clock control register definition
*/
struct dpu_mdp_cfg {
@@ -436,7 +435,6 @@ struct dpu_mdp_cfg {
u32 highest_bank_bit;
u32 ubwc_static;
u32 ubwc_swizzle;
- bool has_dest_scaler;
struct dpu_clk_ctrl_reg clk_ctrls[DPU_CLK_CTRL_MAX];
};
@@ -474,50 +472,16 @@ struct dpu_sspp_cfg {
* @features bit mask identifying sub-blocks/features
* @sblk: LM Sub-blocks information
* @pingpong: ID of connected PingPong, PINGPONG_MAX if unsupported
- * @ds: ID of connected DS, DS_MAX if unsupported
* @lm_pair_mask: Bitmask of LMs that can be controlled by same CTL
*/
struct dpu_lm_cfg {
DPU_HW_BLK_INFO;
const struct dpu_lm_sub_blks *sblk;
u32 pingpong;
- u32 ds;
unsigned long lm_pair_mask;
};
/**
- * struct dpu_ds_top_cfg - information of dest scaler top
- * @id enum identifying this block
- * @base register offset of this block
- * @features bit mask identifying features
- * @version hw version of dest scaler
- * @maxinputwidth maximum input line width
- * @maxoutputwidth maximum output line width
- * @maxupscale maximum upscale ratio
- */
-struct dpu_ds_top_cfg {
- DPU_HW_BLK_INFO;
- u32 version;
- u32 maxinputwidth;
- u32 maxoutputwidth;
- u32 maxupscale;
-};
-
-/**
- * struct dpu_ds_cfg - information of dest scaler blocks
- * @id enum identifying this block
- * @base register offset wrt DS top offset
- * @features bit mask identifying features
- * @version hw version of the qseed block
- * @top DS top information
- */
-struct dpu_ds_cfg {
- DPU_HW_BLK_INFO;
- u32 version;
- const struct dpu_ds_top_cfg *top;
-};
-
-/**
* struct dpu_pingpong_cfg - information of PING-PONG blocks
* @id enum identifying this block
* @base register offset of this block
@@ -530,18 +494,6 @@ struct dpu_pingpong_cfg {
};
/**
- * struct dpu_cdm_cfg - information of chroma down blocks
- * @id enum identifying this block
- * @base register offset of this block
- * @features bit mask identifying sub-blocks/features
- * @intf_connect Bitmask of INTF IDs this CDM can connect to
- */
-struct dpu_cdm_cfg {
- DPU_HW_BLK_INFO;
- unsigned long intf_connect;
-};
-
-/**
* struct dpu_intf_cfg - information of timing engine blocks
* @id enum identifying this block
* @base register offset of this block
@@ -728,15 +680,9 @@ struct dpu_mdss_cfg {
u32 mixer_count;
struct dpu_lm_cfg *mixer;
- u32 ds_count;
- struct dpu_ds_cfg *ds;
-
u32 pingpong_count;
struct dpu_pingpong_cfg *pingpong;
- u32 cdm_count;
- struct dpu_cdm_cfg *cdm;
-
u32 intf_count;
struct dpu_intf_cfg *intf;
@@ -771,9 +717,7 @@ struct dpu_mdss_hw_cfg_handler {
#define BLK_DMA(s) ((s)->dma)
#define BLK_CURSOR(s) ((s)->cursor)
#define BLK_MIXER(s) ((s)->mixer)
-#define BLK_DS(s) ((s)->ds)
#define BLK_PINGPONG(s) ((s)->pingpong)
-#define BLK_CDM(s) ((s)->cdm)
#define BLK_INTF(s) ((s)->intf)
#define BLK_AD(s) ((s)->ad)
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_cdm.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_cdm.c
deleted file mode 100644
index 554874ba0c3b..000000000000
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_cdm.c
+++ /dev/null
@@ -1,323 +0,0 @@
-/* Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 and
- * only version 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- */
-
-#include "dpu_hw_mdss.h"
-#include "dpu_hwio.h"
-#include "dpu_hw_catalog.h"
-#include "dpu_hw_cdm.h"
-#include "dpu_dbg.h"
-#include "dpu_kms.h"
-
-#define CDM_CSC_10_OPMODE 0x000
-#define CDM_CSC_10_BASE 0x004
-
-#define CDM_CDWN2_OP_MODE 0x100
-#define CDM_CDWN2_CLAMP_OUT 0x104
-#define CDM_CDWN2_PARAMS_3D_0 0x108
-#define CDM_CDWN2_PARAMS_3D_1 0x10C
-#define CDM_CDWN2_COEFF_COSITE_H_0 0x110
-#define CDM_CDWN2_COEFF_COSITE_H_1 0x114
-#define CDM_CDWN2_COEFF_COSITE_H_2 0x118
-#define CDM_CDWN2_COEFF_OFFSITE_H_0 0x11C
-#define CDM_CDWN2_COEFF_OFFSITE_H_1 0x120
-#define CDM_CDWN2_COEFF_OFFSITE_H_2 0x124
-#define CDM_CDWN2_COEFF_COSITE_V 0x128
-#define CDM_CDWN2_COEFF_OFFSITE_V 0x12C
-#define CDM_CDWN2_OUT_SIZE 0x130
-
-#define CDM_HDMI_PACK_OP_MODE 0x200
-#define CDM_CSC_10_MATRIX_COEFF_0 0x004
-
-/**
- * Horizontal coefficients for cosite chroma downscale
- * s13 representation of coefficients
- */
-static u32 cosite_h_coeff[] = {0x00000016, 0x000001cc, 0x0100009e};
-
-/**
- * Horizontal coefficients for offsite chroma downscale
- */
-static u32 offsite_h_coeff[] = {0x000b0005, 0x01db01eb, 0x00e40046};
-
-/**
- * Vertical coefficients for cosite chroma downscale
- */
-static u32 cosite_v_coeff[] = {0x00080004};
-/**
- * Vertical coefficients for offsite chroma downscale
- */
-static u32 offsite_v_coeff[] = {0x00060002};
-
-/* Limited Range rgb2yuv coeff with clamp and bias values for CSC 10 module */
-static struct dpu_csc_cfg rgb2yuv_cfg = {
- {
- 0x0083, 0x0102, 0x0032,
- 0x1fb5, 0x1f6c, 0x00e1,
- 0x00e1, 0x1f45, 0x1fdc
- },
- { 0x00, 0x00, 0x00 },
- { 0x0040, 0x0200, 0x0200 },
- { 0x000, 0x3ff, 0x000, 0x3ff, 0x000, 0x3ff },
- { 0x040, 0x3ac, 0x040, 0x3c0, 0x040, 0x3c0 },
-};
-
-static struct dpu_cdm_cfg *_cdm_offset(enum dpu_cdm cdm,
- struct dpu_mdss_cfg *m,
- void __iomem *addr,
- struct dpu_hw_blk_reg_map *b)
-{
- int i;
-
- for (i = 0; i < m->cdm_count; i++) {
- if (cdm == m->cdm[i].id) {
- b->base_off = addr;
- b->blk_off = m->cdm[i].base;
- b->length = m->cdm[i].len;
- b->hwversion = m->hwversion;
- b->log_mask = DPU_DBG_MASK_CDM;
- return &m->cdm[i];
- }
- }
-
- return ERR_PTR(-EINVAL);
-}
-
-static int dpu_hw_cdm_setup_csc_10bit(struct dpu_hw_cdm *ctx,
- struct dpu_csc_cfg *data)
-{
- dpu_hw_csc_setup(&ctx->hw, CDM_CSC_10_MATRIX_COEFF_0, data, true);
-
- return 0;
-}
-
-static int dpu_hw_cdm_setup_cdwn(struct dpu_hw_cdm *ctx,
- struct dpu_hw_cdm_cfg *cfg)
-{
- struct dpu_hw_blk_reg_map *c = &ctx->hw;
- u32 opmode = 0;
- u32 out_size = 0;
-
- if (cfg->output_bit_depth == CDM_CDWN_OUTPUT_10BIT)
- opmode &= ~BIT(7);
- else
- opmode |= BIT(7);
-
- /* ENABLE DWNS_H bit */
- opmode |= BIT(1);
-
- switch (cfg->h_cdwn_type) {
- case CDM_CDWN_DISABLE:
- /* CLEAR METHOD_H field */
- opmode &= ~(0x18);
- /* CLEAR DWNS_H bit */
- opmode &= ~BIT(1);
- break;
- case CDM_CDWN_PIXEL_DROP:
- /* Clear METHOD_H field (pixel drop is 0) */
- opmode &= ~(0x18);
- break;
- case CDM_CDWN_AVG:
- /* Clear METHOD_H field (Average is 0x1) */
- opmode &= ~(0x18);
- opmode |= (0x1 << 0x3);
- break;
- case CDM_CDWN_COSITE:
- /* Clear METHOD_H field (Average is 0x2) */
- opmode &= ~(0x18);
- opmode |= (0x2 << 0x3);
- /* Co-site horizontal coefficients */
- DPU_REG_WRITE(c, CDM_CDWN2_COEFF_COSITE_H_0,
- cosite_h_coeff[0]);
- DPU_REG_WRITE(c, CDM_CDWN2_COEFF_COSITE_H_1,
- cosite_h_coeff[1]);
- DPU_REG_WRITE(c, CDM_CDWN2_COEFF_COSITE_H_2,
- cosite_h_coeff[2]);
- break;
- case CDM_CDWN_OFFSITE:
- /* Clear METHOD_H field (Average is 0x3) */
- opmode &= ~(0x18);
- opmode |= (0x3 << 0x3);
-
- /* Off-site horizontal coefficients */
- DPU_REG_WRITE(c, CDM_CDWN2_COEFF_OFFSITE_H_0,
- offsite_h_coeff[0]);
- DPU_REG_WRITE(c, CDM_CDWN2_COEFF_OFFSITE_H_1,
- offsite_h_coeff[1]);
- DPU_REG_WRITE(c, CDM_CDWN2_COEFF_OFFSITE_H_2,
- offsite_h_coeff[2]);
- break;
- default:
- pr_err("%s invalid horz down sampling type\n", __func__);
- return -EINVAL;
- }
-
- /* ENABLE DWNS_V bit */
- opmode |= BIT(2);
-
- switch (cfg->v_cdwn_type) {
- case CDM_CDWN_DISABLE:
- /* CLEAR METHOD_V field */
- opmode &= ~(0x60);
- /* CLEAR DWNS_V bit */
- opmode &= ~BIT(2);
- break;
- case CDM_CDWN_PIXEL_DROP:
- /* Clear METHOD_V field (pixel drop is 0) */
- opmode &= ~(0x60);
- break;
- case CDM_CDWN_AVG:
- /* Clear METHOD_V field (Average is 0x1) */
- opmode &= ~(0x60);
- opmode |= (0x1 << 0x5);
- break;
- case CDM_CDWN_COSITE:
- /* Clear METHOD_V field (Average is 0x2) */
- opmode &= ~(0x60);
- opmode |= (0x2 << 0x5);
- /* Co-site vertical coefficients */
- DPU_REG_WRITE(c,
- CDM_CDWN2_COEFF_COSITE_V,
- cosite_v_coeff[0]);
- break;
- case CDM_CDWN_OFFSITE:
- /* Clear METHOD_V field (Average is 0x3) */
- opmode &= ~(0x60);
- opmode |= (0x3 << 0x5);
-
- /* Off-site vertical coefficients */
- DPU_REG_WRITE(c,
- CDM_CDWN2_COEFF_OFFSITE_V,
- offsite_v_coeff[0]);
- break;
- default:
- return -EINVAL;
- }
-
- if (cfg->v_cdwn_type || cfg->h_cdwn_type)
- opmode |= BIT(0); /* EN CDWN module */
- else
- opmode &= ~BIT(0);
-
- out_size = (cfg->output_width & 0xFFFF) |
- ((cfg->output_height & 0xFFFF) << 16);
- DPU_REG_WRITE(c, CDM_CDWN2_OUT_SIZE, out_size);
- DPU_REG_WRITE(c, CDM_CDWN2_OP_MODE, opmode);
- DPU_REG_WRITE(c, CDM_CDWN2_CLAMP_OUT,
- ((0x3FF << 16) | 0x0));
-
- return 0;
-}
-
-static int dpu_hw_cdm_enable(struct dpu_hw_cdm *ctx,
- struct dpu_hw_cdm_cfg *cdm)
-{
- struct dpu_hw_blk_reg_map *c = &ctx->hw;
- const struct dpu_format *fmt = cdm->output_fmt;
- struct cdm_output_cfg cdm_cfg = { 0 };
- u32 opmode = 0;
- u32 csc = 0;
-
- if (!DPU_FORMAT_IS_YUV(fmt))
- return -EINVAL;
-
- if (cdm->output_type == CDM_CDWN_OUTPUT_HDMI) {
- if (fmt->chroma_sample != DPU_CHROMA_H1V2)
- return -EINVAL; /*unsupported format */
- opmode = BIT(0);
- opmode |= (fmt->chroma_sample << 1);
- cdm_cfg.intf_en = true;
- }
-
- csc |= BIT(2);
- csc &= ~BIT(1);
- csc |= BIT(0);
-
- if (ctx->hw_mdp && ctx->hw_mdp->ops.setup_cdm_output)
- ctx->hw_mdp->ops.setup_cdm_output(ctx->hw_mdp, &cdm_cfg);
-
- DPU_REG_WRITE(c, CDM_CSC_10_OPMODE, csc);
- DPU_REG_WRITE(c, CDM_HDMI_PACK_OP_MODE, opmode);
- return 0;
-}
-
-static void dpu_hw_cdm_disable(struct dpu_hw_cdm *ctx)
-{
- struct cdm_output_cfg cdm_cfg = { 0 };
-
- if (ctx->hw_mdp && ctx->hw_mdp->ops.setup_cdm_output)
- ctx->hw_mdp->ops.setup_cdm_output(ctx->hw_mdp, &cdm_cfg);
-}
-
-static void _setup_cdm_ops(struct dpu_hw_cdm_ops *ops,
- unsigned long features)
-{
- ops->setup_csc_data = dpu_hw_cdm_setup_csc_10bit;
- ops->setup_cdwn = dpu_hw_cdm_setup_cdwn;
- ops->enable = dpu_hw_cdm_enable;
- ops->disable = dpu_hw_cdm_disable;
-}
-
-static struct dpu_hw_blk_ops dpu_hw_ops = {
- .start = NULL,
- .stop = NULL,
-};
-
-struct dpu_hw_cdm *dpu_hw_cdm_init(enum dpu_cdm idx,
- void __iomem *addr,
- struct dpu_mdss_cfg *m,
- struct dpu_hw_mdp *hw_mdp)
-{
- struct dpu_hw_cdm *c;
- struct dpu_cdm_cfg *cfg;
- int rc;
-
- c = kzalloc(sizeof(*c), GFP_KERNEL);
- if (!c)
- return ERR_PTR(-ENOMEM);
-
- cfg = _cdm_offset(idx, m, addr, &c->hw);
- if (IS_ERR_OR_NULL(cfg)) {
- kfree(c);
- return ERR_PTR(-EINVAL);
- }
-
- c->idx = idx;
- c->caps = cfg;
- _setup_cdm_ops(&c->ops, c->caps->features);
- c->hw_mdp = hw_mdp;
-
- rc = dpu_hw_blk_init(&c->base, DPU_HW_BLK_CDM, idx, &dpu_hw_ops);
- if (rc) {
- DPU_ERROR("failed to init hw blk %d\n", rc);
- goto blk_init_error;
- }
-
- /*
- * Perform any default initialization for the chroma down module
- * @setup default csc coefficients
- */
- dpu_hw_cdm_setup_csc_10bit(c, &rgb2yuv_cfg);
-
- return c;
-
-blk_init_error:
- kzfree(c);
-
- return ERR_PTR(rc);
-}
-
-void dpu_hw_cdm_destroy(struct dpu_hw_cdm *cdm)
-{
- if (cdm)
- dpu_hw_blk_destroy(&cdm->base);
- kfree(cdm);
-}
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_cdm.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_cdm.h
deleted file mode 100644
index 5cceb1ecb8e0..000000000000
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_cdm.h
+++ /dev/null
@@ -1,139 +0,0 @@
-/* Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 and
- * only version 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- */
-
-#ifndef _DPU_HW_CDM_H
-#define _DPU_HW_CDM_H
-
-#include "dpu_hw_mdss.h"
-#include "dpu_hw_top.h"
-#include "dpu_hw_blk.h"
-
-struct dpu_hw_cdm;
-
-struct dpu_hw_cdm_cfg {
- u32 output_width;
- u32 output_height;
- u32 output_bit_depth;
- u32 h_cdwn_type;
- u32 v_cdwn_type;
- const struct dpu_format *output_fmt;
- u32 output_type;
- int flags;
-};
-
-enum dpu_hw_cdwn_type {
- CDM_CDWN_DISABLE,
- CDM_CDWN_PIXEL_DROP,
- CDM_CDWN_AVG,
- CDM_CDWN_COSITE,
- CDM_CDWN_OFFSITE,
-};
-
-enum dpu_hw_cdwn_output_type {
- CDM_CDWN_OUTPUT_HDMI,
- CDM_CDWN_OUTPUT_WB,
-};
-
-enum dpu_hw_cdwn_output_bit_depth {
- CDM_CDWN_OUTPUT_8BIT,
- CDM_CDWN_OUTPUT_10BIT,
-};
-
-/**
- * struct dpu_hw_cdm_ops : Interface to the chroma down Hw driver functions
- * Assumption is these functions will be called after
- * clocks are enabled
- * @setup_csc: Programs the csc matrix
- * @setup_cdwn: Sets up the chroma down sub module
- * @enable: Enables the output to interface and programs the
- * output packer
- * @disable: Puts the cdm in bypass mode
- */
-struct dpu_hw_cdm_ops {
- /**
- * Programs the CSC matrix for conversion from RGB space to YUV space,
- * it is optional to call this function as this matrix is automatically
- * set during initialization, user should call this if it wants
- * to program a different matrix than default matrix.
- * @cdm: Pointer to the chroma down context structure
- * @data Pointer to CSC configuration data
- * return: 0 if success; error code otherwise
- */
- int (*setup_csc_data)(struct dpu_hw_cdm *cdm,
- struct dpu_csc_cfg *data);
-
- /**
- * Programs the Chroma downsample part.
- * @cdm Pointer to chroma down context
- */
- int (*setup_cdwn)(struct dpu_hw_cdm *cdm,
- struct dpu_hw_cdm_cfg *cfg);
-
- /**
- * Enable the CDM module
- * @cdm Pointer to chroma down context
- */
- int (*enable)(struct dpu_hw_cdm *cdm,
- struct dpu_hw_cdm_cfg *cfg);
-
- /**
- * Disable the CDM module
- * @cdm Pointer to chroma down context
- */
- void (*disable)(struct dpu_hw_cdm *cdm);
-};
-
-struct dpu_hw_cdm {
- struct dpu_hw_blk base;
- struct dpu_hw_blk_reg_map hw;
-
- /* chroma down */
- const struct dpu_cdm_cfg *caps;
- enum dpu_cdm idx;
-
- /* mdp top hw driver */
- struct dpu_hw_mdp *hw_mdp;
-
- /* ops */
- struct dpu_hw_cdm_ops ops;
-};
-
-/**
- * dpu_hw_cdm - convert base object dpu_hw_base to container
- * @hw: Pointer to base hardware block
- * return: Pointer to hardware block container
- */
-static inline struct dpu_hw_cdm *to_dpu_hw_cdm(struct dpu_hw_blk *hw)
-{
- return container_of(hw, struct dpu_hw_cdm, base);
-}
-
-/**
- * dpu_hw_cdm_init - initializes the cdm hw driver object.
- * should be called once before accessing every cdm.
- * @idx: cdm index for which driver object is required
- * @addr: mapped register io address of MDP
- * @m : pointer to mdss catalog data
- * @hw_mdp: pointer to mdp top hw driver object
- */
-struct dpu_hw_cdm *dpu_hw_cdm_init(enum dpu_cdm idx,
- void __iomem *addr,
- struct dpu_mdss_cfg *m,
- struct dpu_hw_mdp *hw_mdp);
-
-/**
- * dpu_hw_cdm_destroy - destroys CDM driver context
- * @cdm: pointer to CDM driver context
- */
-void dpu_hw_cdm_destroy(struct dpu_hw_cdm *cdm);
-
-#endif /*_DPU_HW_CDM_H */
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_ctl.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_ctl.c
index 06be7cf7ce50..eec1051f2afc 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_ctl.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_ctl.c
@@ -224,19 +224,6 @@ static inline int dpu_hw_ctl_get_bitmask_intf(struct dpu_hw_ctl *ctx,
return 0;
}
-static inline int dpu_hw_ctl_get_bitmask_cdm(struct dpu_hw_ctl *ctx,
- u32 *flushbits, enum dpu_cdm cdm)
-{
- switch (cdm) {
- case CDM_0:
- *flushbits |= BIT(26);
- break;
- default:
- return -EINVAL;
- }
- return 0;
-}
-
static u32 dpu_hw_ctl_poll_reset_status(struct dpu_hw_ctl *ctx, u32 timeout_us)
{
struct dpu_hw_blk_reg_map *c = &ctx->hw;
@@ -310,7 +297,7 @@ static void dpu_hw_ctl_setup_blendstage(struct dpu_hw_ctl *ctx,
u32 mixercfg = 0, mixercfg_ext = 0, mix, ext;
u32 mixercfg_ext2 = 0, mixercfg_ext3 = 0;
int i, j;
- u8 stages;
+ int stages;
int pipes_per_stage;
stages = _mixer_stages(ctx->mixer_hw_caps, ctx->mixer_count, lm);
@@ -485,7 +472,6 @@ static void _setup_ctl_ops(struct dpu_hw_ctl_ops *ops,
ops->get_bitmask_sspp = dpu_hw_ctl_get_bitmask_sspp;
ops->get_bitmask_mixer = dpu_hw_ctl_get_bitmask_mixer;
ops->get_bitmask_intf = dpu_hw_ctl_get_bitmask_intf;
- ops->get_bitmask_cdm = dpu_hw_ctl_get_bitmask_cdm;
};
static struct dpu_hw_blk_ops dpu_hw_ops = {
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_ctl.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_ctl.h
index c66a71f8b839..6f313faca43e 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_ctl.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_ctl.h
@@ -142,10 +142,6 @@ struct dpu_hw_ctl_ops {
u32 *flushbits,
enum dpu_intf blk);
- int (*get_bitmask_cdm)(struct dpu_hw_ctl *ctx,
- u32 *flushbits,
- enum dpu_cdm blk);
-
/**
* Set all blend stages to disabled
* @ctx : ctl path ctx pointer
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_intf.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_intf.c
index d280df5613c9..9c6bba0ac7c3 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_intf.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_intf.c
@@ -65,9 +65,6 @@
#define INTF_FRAME_COUNT 0x0AC
#define INTF_LINE_COUNT 0x0B0
-#define INTF_MISR_CTRL 0x180
-#define INTF_MISR_SIGNATURE 0x184
-
static struct dpu_intf_cfg *_intf_offset(enum dpu_intf intf,
struct dpu_mdss_cfg *m,
void __iomem *addr,
@@ -246,30 +243,6 @@ static void dpu_hw_intf_get_status(
}
}
-static void dpu_hw_intf_setup_misr(struct dpu_hw_intf *intf,
- bool enable, u32 frame_count)
-{
- struct dpu_hw_blk_reg_map *c = &intf->hw;
- u32 config = 0;
-
- DPU_REG_WRITE(c, INTF_MISR_CTRL, MISR_CTRL_STATUS_CLEAR);
- /* clear misr data */
- wmb();
-
- if (enable)
- config = (frame_count & MISR_FRAME_COUNT_MASK) |
- MISR_CTRL_ENABLE | INTF_MISR_CTRL_FREE_RUN_MASK;
-
- DPU_REG_WRITE(c, INTF_MISR_CTRL, config);
-}
-
-static u32 dpu_hw_intf_collect_misr(struct dpu_hw_intf *intf)
-{
- struct dpu_hw_blk_reg_map *c = &intf->hw;
-
- return DPU_REG_READ(c, INTF_MISR_SIGNATURE);
-}
-
static u32 dpu_hw_intf_get_line_count(struct dpu_hw_intf *intf)
{
struct dpu_hw_blk_reg_map *c;
@@ -289,8 +262,6 @@ static void _setup_intf_ops(struct dpu_hw_intf_ops *ops,
ops->setup_prg_fetch = dpu_hw_intf_setup_prg_fetch;
ops->get_status = dpu_hw_intf_get_status;
ops->enable_timing = dpu_hw_intf_enable_timing_engine;
- ops->setup_misr = dpu_hw_intf_setup_misr;
- ops->collect_misr = dpu_hw_intf_collect_misr;
ops->get_line_count = dpu_hw_intf_get_line_count;
}
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_intf.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_intf.h
index a79d735da68d..3b77df460dea 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_intf.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_intf.h
@@ -59,8 +59,6 @@ struct intf_status {
* @ setup_prog_fetch : enables/disables the programmable fetch logic
* @ enable_timing: enable/disable timing engine
* @ get_status: returns if timing engine is enabled or not
- * @ setup_misr: enables/disables MISR in HW register
- * @ collect_misr: reads and stores MISR data from HW register
* @ get_line_count: reads current vertical line counter
*/
struct dpu_hw_intf_ops {
@@ -77,11 +75,6 @@ struct dpu_hw_intf_ops {
void (*get_status)(struct dpu_hw_intf *intf,
struct intf_status *status);
- void (*setup_misr)(struct dpu_hw_intf *intf,
- bool enable, u32 frame_count);
-
- u32 (*collect_misr)(struct dpu_hw_intf *intf);
-
u32 (*get_line_count)(struct dpu_hw_intf *intf);
};
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_lm.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_lm.c
index 4ab72b0f07a5..acb8dc8acaa5 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_lm.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_lm.c
@@ -34,9 +34,6 @@
#define LM_BLEND0_FG_ALPHA 0x04
#define LM_BLEND0_BG_ALPHA 0x08
-#define LM_MISR_CTRL 0x310
-#define LM_MISR_SIGNATURE 0x314
-
static struct dpu_lm_cfg *_lm_offset(enum dpu_lm mixer,
struct dpu_mdss_cfg *m,
void __iomem *addr,
@@ -171,30 +168,6 @@ static void dpu_hw_lm_gc(struct dpu_hw_mixer *mixer,
{
}
-static void dpu_hw_lm_setup_misr(struct dpu_hw_mixer *ctx,
- bool enable, u32 frame_count)
-{
- struct dpu_hw_blk_reg_map *c = &ctx->hw;
- u32 config = 0;
-
- DPU_REG_WRITE(c, LM_MISR_CTRL, MISR_CTRL_STATUS_CLEAR);
- /* clear misr data */
- wmb();
-
- if (enable)
- config = (frame_count & MISR_FRAME_COUNT_MASK) |
- MISR_CTRL_ENABLE | INTF_MISR_CTRL_FREE_RUN_MASK;
-
- DPU_REG_WRITE(c, LM_MISR_CTRL, config);
-}
-
-static u32 dpu_hw_lm_collect_misr(struct dpu_hw_mixer *ctx)
-{
- struct dpu_hw_blk_reg_map *c = &ctx->hw;
-
- return DPU_REG_READ(c, LM_MISR_SIGNATURE);
-}
-
static void _setup_mixer_ops(struct dpu_mdss_cfg *m,
struct dpu_hw_lm_ops *ops,
unsigned long features)
@@ -207,8 +180,6 @@ static void _setup_mixer_ops(struct dpu_mdss_cfg *m,
ops->setup_alpha_out = dpu_hw_lm_setup_color3;
ops->setup_border_color = dpu_hw_lm_setup_border_color;
ops->setup_gc = dpu_hw_lm_gc;
- ops->setup_misr = dpu_hw_lm_setup_misr;
- ops->collect_misr = dpu_hw_lm_collect_misr;
};
static struct dpu_hw_blk_ops dpu_hw_ops = {
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_lm.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_lm.h
index e29e5dab31bf..5b036aca8340 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_lm.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_lm.h
@@ -66,13 +66,6 @@ struct dpu_hw_lm_ops {
*/
void (*setup_gc)(struct dpu_hw_mixer *mixer,
void *cfg);
-
- /* setup_misr: enables/disables MISR in HW register */
- void (*setup_misr)(struct dpu_hw_mixer *ctx,
- bool enable, u32 frame_count);
-
- /* collect_misr: reads and stores MISR data from HW register */
- u32 (*collect_misr)(struct dpu_hw_mixer *ctx);
};
struct dpu_hw_mixer {
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_mdss.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_mdss.h
index 35e6bf930924..68c54d2c9677 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_mdss.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_mdss.h
@@ -100,7 +100,6 @@ enum dpu_hw_blk_type {
DPU_HW_BLK_SSPP,
DPU_HW_BLK_LM,
DPU_HW_BLK_CTL,
- DPU_HW_BLK_CDM,
DPU_HW_BLK_PINGPONG,
DPU_HW_BLK_INTF,
DPU_HW_BLK_WB,
@@ -173,13 +172,6 @@ enum dpu_dspp {
DSPP_MAX
};
-enum dpu_ds {
- DS_TOP,
- DS_0,
- DS_1,
- DS_MAX
-};
-
enum dpu_ctl {
CTL_0 = 1,
CTL_1,
@@ -189,12 +181,6 @@ enum dpu_ctl {
CTL_MAX
};
-enum dpu_cdm {
- CDM_0 = 1,
- CDM_1,
- CDM_MAX
-};
-
enum dpu_pingpong {
PINGPONG_0 = 1,
PINGPONG_1,
@@ -246,12 +232,6 @@ enum dpu_wb {
WB_MAX
};
-enum dpu_ad {
- AD_0 = 0x1,
- AD_1,
- AD_MAX
-};
-
enum dpu_cwb {
CWB_0 = 0x1,
CWB_1,
@@ -451,15 +431,14 @@ struct dpu_mdss_color {
* Define bit masks for h/w logging.
*/
#define DPU_DBG_MASK_NONE (1 << 0)
-#define DPU_DBG_MASK_CDM (1 << 1)
-#define DPU_DBG_MASK_INTF (1 << 2)
-#define DPU_DBG_MASK_LM (1 << 3)
-#define DPU_DBG_MASK_CTL (1 << 4)
-#define DPU_DBG_MASK_PINGPONG (1 << 5)
-#define DPU_DBG_MASK_SSPP (1 << 6)
-#define DPU_DBG_MASK_WB (1 << 7)
-#define DPU_DBG_MASK_TOP (1 << 8)
-#define DPU_DBG_MASK_VBIF (1 << 9)
-#define DPU_DBG_MASK_ROT (1 << 10)
+#define DPU_DBG_MASK_INTF (1 << 1)
+#define DPU_DBG_MASK_LM (1 << 2)
+#define DPU_DBG_MASK_CTL (1 << 3)
+#define DPU_DBG_MASK_PINGPONG (1 << 4)
+#define DPU_DBG_MASK_SSPP (1 << 5)
+#define DPU_DBG_MASK_WB (1 << 6)
+#define DPU_DBG_MASK_TOP (1 << 7)
+#define DPU_DBG_MASK_VBIF (1 << 8)
+#define DPU_DBG_MASK_ROT (1 << 9)
#endif /* _DPU_HW_MDSS_H */
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_top.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_top.c
index db2798e862fc..b8781256e21b 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_top.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_top.c
@@ -98,23 +98,6 @@ static void dpu_hw_setup_split_pipe(struct dpu_hw_mdp *mdp,
DPU_REG_WRITE(c, SPLIT_DISPLAY_EN, cfg->en & 0x1);
}
-static void dpu_hw_setup_cdm_output(struct dpu_hw_mdp *mdp,
- struct cdm_output_cfg *cfg)
-{
- struct dpu_hw_blk_reg_map *c;
- u32 out_ctl = 0;
-
- if (!mdp || !cfg)
- return;
-
- c = &mdp->hw;
-
- if (cfg->intf_en)
- out_ctl |= BIT(19);
-
- DPU_REG_WRITE(c, MDP_OUT_CTL_0, out_ctl);
-}
-
static bool dpu_hw_setup_clk_force_ctrl(struct dpu_hw_mdp *mdp,
enum dpu_clk_ctrl_type clk_ctrl, bool enable)
{
@@ -307,7 +290,6 @@ static void _setup_mdp_ops(struct dpu_hw_mdp_ops *ops,
unsigned long cap)
{
ops->setup_split_pipe = dpu_hw_setup_split_pipe;
- ops->setup_cdm_output = dpu_hw_setup_cdm_output;
ops->setup_clk_force_ctrl = dpu_hw_setup_clk_force_ctrl;
ops->get_danger_status = dpu_hw_get_danger_status;
ops->setup_vsync_source = dpu_hw_setup_vsync_source;
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_top.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_top.h
index 899925aaa6d7..192e338f20bb 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_top.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_top.h
@@ -52,14 +52,6 @@ struct split_pipe_cfg {
};
/**
- * struct cdm_output_cfg: output configuration for cdm
- * @intf_en : enable/disable interface output
- */
-struct cdm_output_cfg {
- bool intf_en;
-};
-
-/**
* struct dpu_danger_safe_status: danger and safe status signals
* @mdp: top level status
* @sspp: source pipe status
@@ -89,7 +81,6 @@ struct dpu_vsync_source_cfg {
* Assumption is these functions will be called after clocks are enabled.
* @setup_split_pipe : Programs the pipe control registers
* @setup_pp_split : Programs the pp split control registers
- * @setup_cdm_output : programs cdm control
* @setup_traffic_shaper : programs traffic shaper control
*/
struct dpu_hw_mdp_ops {
@@ -102,14 +93,6 @@ struct dpu_hw_mdp_ops {
struct split_pipe_cfg *p);
/**
- * setup_cdm_output() : Setup selection control of the cdm data path
- * @mdp : mdp top context driver
- * @cfg : cdm output configuration
- */
- void (*setup_cdm_output)(struct dpu_hw_mdp *mdp,
- struct cdm_output_cfg *cfg);
-
- /**
* setup_traffic_shaper() : Setup traffic shaper control
* @mdp : mdp top context driver
* @cfg : traffic shaper configuration
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_util.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_util.c
index 4cabae480a7b..cb5c0170374b 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_util.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_util.c
@@ -50,9 +50,6 @@ static u32 dpu_hw_util_log_mask = DPU_DBG_MASK_NONE;
#define QSEED3_CLK_CTRL0 0x54
#define QSEED3_CLK_CTRL1 0x58
#define QSEED3_CLK_STATUS 0x5C
-#define QSEED3_MISR_CTRL 0x70
-#define QSEED3_MISR_SIGNATURE_0 0x74
-#define QSEED3_MISR_SIGNATURE_1 0x78
#define QSEED3_PHASE_INIT_Y_H 0x90
#define QSEED3_PHASE_INIT_Y_V 0x94
#define QSEED3_PHASE_INIT_UV_H 0x98
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_util.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_util.h
index 1240f505ca53..321fc64ddd0e 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_util.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_util.h
@@ -148,16 +148,6 @@ struct dpu_hw_scaler3_cfg {
struct dpu_hw_scaler3_de_cfg de;
};
-struct dpu_hw_scaler3_lut_cfg {
- bool is_configured;
- u32 *dir_lut;
- size_t dir_len;
- u32 *cir_lut;
- size_t cir_len;
- u32 *sep_lut;
- size_t sep_len;
-};
-
/**
* struct dpu_drm_pix_ext_v1 - version 1 of pixel ext structure
* @num_ext_pxls_lr: Number of total horizontal pixels
@@ -325,12 +315,6 @@ int dpu_reg_read(struct dpu_hw_blk_reg_map *c, u32 reg_off);
#define DPU_REG_WRITE(c, off, val) dpu_reg_write(c, off, val, #off)
#define DPU_REG_READ(c, off) dpu_reg_read(c, off)
-#define MISR_FRAME_COUNT_MASK 0xFF
-#define MISR_CTRL_ENABLE BIT(8)
-#define MISR_CTRL_STATUS BIT(9)
-#define MISR_CTRL_STATUS_CLEAR BIT(10)
-#define INTF_MISR_CTRL_FREE_RUN_MASK BIT(31)
-
void *dpu_hw_util_get_dir(void);
void dpu_hw_setup_scaler3(struct dpu_hw_blk_reg_map *c,
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_kms.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_kms.c
index 7dd6bd2d6d37..0a683e65a9f3 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_kms.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_kms.c
@@ -450,7 +450,7 @@ static void _dpu_kms_initialize_dsi(struct drm_device *dev,
int i, rc;
/*TODO: Support two independent DSI connectors */
- encoder = dpu_encoder_init(dev, DRM_MODE_CONNECTOR_DSI);
+ encoder = dpu_encoder_init(dev, DRM_MODE_ENCODER_DSI);
if (IS_ERR_OR_NULL(encoder)) {
DPU_ERROR("encoder init failed for dsi display\n");
return;
@@ -531,12 +531,13 @@ static int _dpu_kms_drm_obj_init(struct dpu_kms *dpu_kms)
{
struct drm_device *dev;
struct drm_plane *primary_planes[MAX_PLANES], *plane;
+ struct drm_plane *cursor_planes[MAX_PLANES] = { NULL };
struct drm_crtc *crtc;
struct msm_drm_private *priv;
struct dpu_mdss_cfg *catalog;
- int primary_planes_idx = 0, i, ret;
+ int primary_planes_idx = 0, cursor_planes_idx = 0, i, ret;
int max_crtc_count;
if (!dpu_kms || !dpu_kms->dev || !dpu_kms->dev->dev) {
@@ -556,16 +557,24 @@ static int _dpu_kms_drm_obj_init(struct dpu_kms *dpu_kms)
max_crtc_count = min(catalog->mixer_count, priv->num_encoders);
- /* Create the planes */
+ /* Create the planes, keeping track of one primary/cursor per crtc */
for (i = 0; i < catalog->sspp_count; i++) {
- bool primary = true;
-
- if (catalog->sspp[i].features & BIT(DPU_SSPP_CURSOR)
- || primary_planes_idx >= max_crtc_count)
- primary = false;
-
- plane = dpu_plane_init(dev, catalog->sspp[i].id, primary,
- (1UL << max_crtc_count) - 1, 0);
+ enum drm_plane_type type;
+
+ if ((catalog->sspp[i].features & BIT(DPU_SSPP_CURSOR))
+ && cursor_planes_idx < max_crtc_count)
+ type = DRM_PLANE_TYPE_CURSOR;
+ else if (primary_planes_idx < max_crtc_count)
+ type = DRM_PLANE_TYPE_PRIMARY;
+ else
+ type = DRM_PLANE_TYPE_OVERLAY;
+
+ DPU_DEBUG("Create plane type %d with features %lx (cur %lx)\n",
+ type, catalog->sspp[i].features,
+ catalog->sspp[i].features & BIT(DPU_SSPP_CURSOR));
+
+ plane = dpu_plane_init(dev, catalog->sspp[i].id, type,
+ (1UL << max_crtc_count) - 1, 0);
if (IS_ERR(plane)) {
DPU_ERROR("dpu_plane_init failed\n");
ret = PTR_ERR(plane);
@@ -573,7 +582,9 @@ static int _dpu_kms_drm_obj_init(struct dpu_kms *dpu_kms)
}
priv->planes[priv->num_planes++] = plane;
- if (primary)
+ if (type == DRM_PLANE_TYPE_CURSOR)
+ cursor_planes[cursor_planes_idx++] = plane;
+ else if (type == DRM_PLANE_TYPE_PRIMARY)
primary_planes[primary_planes_idx++] = plane;
}
@@ -581,7 +592,7 @@ static int _dpu_kms_drm_obj_init(struct dpu_kms *dpu_kms)
/* Create one CRTC per encoder */
for (i = 0; i < max_crtc_count; i++) {
- crtc = dpu_crtc_init(dev, primary_planes[i]);
+ crtc = dpu_crtc_init(dev, primary_planes[i], cursor_planes[i]);
if (IS_ERR(crtc)) {
ret = PTR_ERR(crtc);
goto fail;
@@ -956,8 +967,7 @@ static void dpu_kms_handle_power_event(u32 event_type, void *usr)
if (!dpu_kms)
return;
- if (event_type == DPU_POWER_EVENT_POST_ENABLE)
- dpu_vbif_init_memtypes(dpu_kms);
+ dpu_vbif_init_memtypes(dpu_kms);
}
static int dpu_kms_hw_init(struct msm_kms *kms)
@@ -1144,10 +1154,9 @@ static int dpu_kms_hw_init(struct msm_kms *kms)
/*
* Handle (re)initializations during power enable
*/
- dpu_kms_handle_power_event(DPU_POWER_EVENT_POST_ENABLE, dpu_kms);
+ dpu_kms_handle_power_event(DPU_POWER_EVENT_ENABLE, dpu_kms);
dpu_kms->power_event = dpu_power_handle_register_event(
- &dpu_kms->phandle,
- DPU_POWER_EVENT_POST_ENABLE,
+ &dpu_kms->phandle, DPU_POWER_EVENT_ENABLE,
dpu_kms_handle_power_event, dpu_kms, "kms");
pm_runtime_put_sync(&dpu_kms->pdev->dev);
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_mdss.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_mdss.c
index 9e533b86682c..2235ef8129f4 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_mdss.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_mdss.c
@@ -158,6 +158,8 @@ static void dpu_mdss_destroy(struct drm_device *dev)
_dpu_mdss_irq_domain_fini(dpu_mdss);
+ free_irq(platform_get_irq(pdev, 0), dpu_mdss);
+
msm_dss_put_clk(mp->clk_config, mp->num_clk);
devm_kfree(&pdev->dev, mp->clk_config);
@@ -215,7 +217,7 @@ int dpu_mdss_init(struct drm_device *dev)
if (ret)
goto irq_domain_error;
- ret = devm_request_irq(dev->dev, platform_get_irq(pdev, 0),
+ ret = request_irq(platform_get_irq(pdev, 0),
dpu_mdss_irq, 0, "dpu_mdss_isr", dpu_mdss);
if (ret) {
DPU_ERROR("failed to init irq: %d\n", ret);
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_plane.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_plane.c
index b640e39ebaca..f549daf30fe6 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_plane.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_plane.c
@@ -21,6 +21,8 @@
#include <linux/debugfs.h>
#include <linux/dma-buf.h>
+#include <drm/drm_atomic_uapi.h>
+
#include "msm_drv.h"
#include "dpu_kms.h"
#include "dpu_formats.h"
@@ -123,26 +125,11 @@ struct dpu_plane {
static struct dpu_kms *_dpu_plane_get_kms(struct drm_plane *plane)
{
- struct msm_drm_private *priv;
+ struct msm_drm_private *priv = plane->dev->dev_private;
- if (!plane || !plane->dev)
- return NULL;
- priv = plane->dev->dev_private;
- if (!priv)
- return NULL;
return to_dpu_kms(priv->kms);
}
-static bool dpu_plane_enabled(struct drm_plane_state *state)
-{
- return state && state->fb && state->crtc;
-}
-
-static bool dpu_plane_sspp_enabled(struct drm_plane_state *state)
-{
- return state && state->crtc;
-}
-
/**
* _dpu_plane_calc_fill_level - calculate fill level of the given source format
* @plane: Pointer to drm plane
@@ -158,7 +145,7 @@ static inline int _dpu_plane_calc_fill_level(struct drm_plane *plane,
u32 fixed_buff_size;
u32 total_fl;
- if (!plane || !fmt || !plane->state || !src_width || !fmt->bpp) {
+ if (!fmt || !plane->state || !src_width || !fmt->bpp) {
DPU_ERROR("invalid arguments\n");
return 0;
}
@@ -168,7 +155,7 @@ static inline int _dpu_plane_calc_fill_level(struct drm_plane *plane,
fixed_buff_size = pdpu->pipe_sblk->common->pixel_ram_size;
list_for_each_entry(tmp, &pdpu->mplane_list, mplane_list) {
- if (!dpu_plane_enabled(tmp->base.state))
+ if (!tmp->base.state->visible)
continue;
DPU_DEBUG("plane%d/%d src_width:%d/%d\n",
pdpu->base.base.id, tmp->base.base.id,
@@ -239,26 +226,11 @@ static u64 _dpu_plane_get_qos_lut(const struct dpu_qos_lut_tbl *tbl,
static void _dpu_plane_set_qos_lut(struct drm_plane *plane,
struct drm_framebuffer *fb)
{
- struct dpu_plane *pdpu;
+ struct dpu_plane *pdpu = to_dpu_plane(plane);
const struct dpu_format *fmt = NULL;
u64 qos_lut;
u32 total_fl = 0, lut_usage;
- if (!plane || !fb) {
- DPU_ERROR("invalid arguments plane %d fb %d\n",
- plane != 0, fb != 0);
- return;
- }
-
- pdpu = to_dpu_plane(plane);
-
- if (!pdpu->pipe_hw || !pdpu->pipe_sblk || !pdpu->catalog) {
- DPU_ERROR("invalid arguments\n");
- return;
- } else if (!pdpu->pipe_hw->ops.setup_creq_lut) {
- return;
- }
-
if (!pdpu->is_rt_pipe) {
lut_usage = DPU_QOS_LUT_USAGE_NRT;
} else {
@@ -300,24 +272,10 @@ static void _dpu_plane_set_qos_lut(struct drm_plane *plane,
static void _dpu_plane_set_danger_lut(struct drm_plane *plane,
struct drm_framebuffer *fb)
{
- struct dpu_plane *pdpu;
+ struct dpu_plane *pdpu = to_dpu_plane(plane);
const struct dpu_format *fmt = NULL;
u32 danger_lut, safe_lut;
- if (!plane || !fb) {
- DPU_ERROR("invalid arguments\n");
- return;
- }
-
- pdpu = to_dpu_plane(plane);
-
- if (!pdpu->pipe_hw || !pdpu->pipe_sblk || !pdpu->catalog) {
- DPU_ERROR("invalid arguments\n");
- return;
- } else if (!pdpu->pipe_hw->ops.setup_danger_safe_lut) {
- return;
- }
-
if (!pdpu->is_rt_pipe) {
danger_lut = pdpu->catalog->perf.danger_lut_tbl
[DPU_QOS_LUT_USAGE_NRT];
@@ -371,21 +329,7 @@ static void _dpu_plane_set_danger_lut(struct drm_plane *plane,
static void _dpu_plane_set_qos_ctrl(struct drm_plane *plane,
bool enable, u32 flags)
{
- struct dpu_plane *pdpu;
-
- if (!plane) {
- DPU_ERROR("invalid arguments\n");
- return;
- }
-
- pdpu = to_dpu_plane(plane);
-
- if (!pdpu->pipe_hw || !pdpu->pipe_sblk) {
- DPU_ERROR("invalid arguments\n");
- return;
- } else if (!pdpu->pipe_hw->ops.setup_qos_ctrl) {
- return;
- }
+ struct dpu_plane *pdpu = to_dpu_plane(plane);
if (flags & DPU_PLANE_QOS_VBLANK_CTRL) {
pdpu->pipe_qos_cfg.creq_vblank = pdpu->pipe_sblk->creq_vblank;
@@ -421,35 +365,17 @@ static void _dpu_plane_set_qos_ctrl(struct drm_plane *plane,
&pdpu->pipe_qos_cfg);
}
-int dpu_plane_danger_signal_ctrl(struct drm_plane *plane, bool enable)
+static void dpu_plane_danger_signal_ctrl(struct drm_plane *plane, bool enable)
{
- struct dpu_plane *pdpu;
- struct msm_drm_private *priv;
- struct dpu_kms *dpu_kms;
-
- if (!plane || !plane->dev) {
- DPU_ERROR("invalid arguments\n");
- return -EINVAL;
- }
-
- priv = plane->dev->dev_private;
- if (!priv || !priv->kms) {
- DPU_ERROR("invalid KMS reference\n");
- return -EINVAL;
- }
-
- dpu_kms = to_dpu_kms(priv->kms);
- pdpu = to_dpu_plane(plane);
+ struct dpu_plane *pdpu = to_dpu_plane(plane);
+ struct dpu_kms *dpu_kms = _dpu_plane_get_kms(plane);
if (!pdpu->is_rt_pipe)
- goto end;
+ return;
pm_runtime_get_sync(&dpu_kms->pdev->dev);
_dpu_plane_set_qos_ctrl(plane, enable, DPU_PLANE_QOS_PANIC_CTRL);
pm_runtime_put_sync(&dpu_kms->pdev->dev);
-
-end:
- return 0;
}
/**
@@ -460,29 +386,9 @@ end:
static void _dpu_plane_set_ot_limit(struct drm_plane *plane,
struct drm_crtc *crtc)
{
- struct dpu_plane *pdpu;
+ struct dpu_plane *pdpu = to_dpu_plane(plane);
struct dpu_vbif_set_ot_params ot_params;
- struct msm_drm_private *priv;
- struct dpu_kms *dpu_kms;
-
- if (!plane || !plane->dev || !crtc) {
- DPU_ERROR("invalid arguments plane %d crtc %d\n",
- plane != 0, crtc != 0);
- return;
- }
-
- priv = plane->dev->dev_private;
- if (!priv || !priv->kms) {
- DPU_ERROR("invalid KMS reference\n");
- return;
- }
-
- dpu_kms = to_dpu_kms(priv->kms);
- pdpu = to_dpu_plane(plane);
- if (!pdpu->pipe_hw) {
- DPU_ERROR("invalid pipe reference\n");
- return;
- }
+ struct dpu_kms *dpu_kms = _dpu_plane_get_kms(plane);
memset(&ot_params, 0, sizeof(ot_params));
ot_params.xin_id = pdpu->pipe_hw->cap->xin_id;
@@ -504,28 +410,9 @@ static void _dpu_plane_set_ot_limit(struct drm_plane *plane,
*/
static void _dpu_plane_set_qos_remap(struct drm_plane *plane)
{
- struct dpu_plane *pdpu;
+ struct dpu_plane *pdpu = to_dpu_plane(plane);
struct dpu_vbif_set_qos_params qos_params;
- struct msm_drm_private *priv;
- struct dpu_kms *dpu_kms;
-
- if (!plane || !plane->dev) {
- DPU_ERROR("invalid arguments\n");
- return;
- }
-
- priv = plane->dev->dev_private;
- if (!priv || !priv->kms) {
- DPU_ERROR("invalid KMS reference\n");
- return;
- }
-
- dpu_kms = to_dpu_kms(priv->kms);
- pdpu = to_dpu_plane(plane);
- if (!pdpu->pipe_hw) {
- DPU_ERROR("invalid pipe reference\n");
- return;
- }
+ struct dpu_kms *dpu_kms = _dpu_plane_get_kms(plane);
memset(&qos_params, 0, sizeof(qos_params));
qos_params.vbif_idx = VBIF_RT;
@@ -546,27 +433,12 @@ static void _dpu_plane_set_qos_remap(struct drm_plane *plane)
/**
* _dpu_plane_get_aspace: gets the address space
*/
-static int _dpu_plane_get_aspace(
- struct dpu_plane *pdpu,
- struct dpu_plane_state *pstate,
- struct msm_gem_address_space **aspace)
+static inline struct msm_gem_address_space *_dpu_plane_get_aspace(
+ struct dpu_plane *pdpu)
{
- struct dpu_kms *kms;
+ struct dpu_kms *kms = _dpu_plane_get_kms(&pdpu->base);
- if (!pdpu || !pstate || !aspace) {
- DPU_ERROR("invalid parameters\n");
- return -EINVAL;
- }
-
- kms = _dpu_plane_get_kms(&pdpu->base);
- if (!kms) {
- DPU_ERROR("invalid kms\n");
- return -EINVAL;
- }
-
- *aspace = kms->base.aspace;
-
- return 0;
+ return kms->base.aspace;
}
static inline void _dpu_plane_set_scanout(struct drm_plane *plane,
@@ -574,29 +446,10 @@ static inline void _dpu_plane_set_scanout(struct drm_plane *plane,
struct dpu_hw_pipe_cfg *pipe_cfg,
struct drm_framebuffer *fb)
{
- struct dpu_plane *pdpu;
- struct msm_gem_address_space *aspace = NULL;
+ struct dpu_plane *pdpu = to_dpu_plane(plane);
+ struct msm_gem_address_space *aspace = _dpu_plane_get_aspace(pdpu);
int ret;
- if (!plane || !pstate || !pipe_cfg || !fb) {
- DPU_ERROR(
- "invalid arg(s), plane %d state %d cfg %d fb %d\n",
- plane != 0, pstate != 0, pipe_cfg != 0, fb != 0);
- return;
- }
-
- pdpu = to_dpu_plane(plane);
- if (!pdpu->pipe_hw) {
- DPU_ERROR_PLANE(pdpu, "invalid pipe_hw\n");
- return;
- }
-
- ret = _dpu_plane_get_aspace(pdpu, pstate, &aspace);
- if (ret) {
- DPU_ERROR_PLANE(pdpu, "Failed to get aspace %d\n", ret);
- return;
- }
-
ret = dpu_format_populate_layout(aspace, fb, &pipe_cfg->layout);
if (ret == -EAGAIN)
DPU_DEBUG_PLANE(pdpu, "not updating same src addrs\n");
@@ -620,15 +473,6 @@ static void _dpu_plane_setup_scaler3(struct dpu_plane *pdpu,
{
uint32_t i;
- if (!pdpu || !pstate || !scale_cfg || !fmt || !chroma_subsmpl_h ||
- !chroma_subsmpl_v) {
- DPU_ERROR(
- "pdpu %d pstate %d scale_cfg %d fmt %d smp_h %d smp_v %d\n",
- !!pdpu, !!pstate, !!scale_cfg, !!fmt, chroma_subsmpl_h,
- chroma_subsmpl_v);
- return;
- }
-
memset(scale_cfg, 0, sizeof(*scale_cfg));
memset(&pstate->pixel_ext, 0, sizeof(struct dpu_hw_pixel_ext));
@@ -732,17 +576,8 @@ static void _dpu_plane_setup_scaler(struct dpu_plane *pdpu,
struct dpu_plane_state *pstate,
const struct dpu_format *fmt, bool color_fill)
{
- struct dpu_hw_pixel_ext *pe;
uint32_t chroma_subsmpl_h, chroma_subsmpl_v;
- if (!pdpu || !fmt || !pstate) {
- DPU_ERROR("invalid arg(s), plane %d fmt %d state %d\n",
- pdpu != 0, fmt != 0, pstate != 0);
- return;
- }
-
- pe = &pstate->pixel_ext;
-
/* don't chroma subsample if decimating */
chroma_subsmpl_h =
drm_format_horz_chroma_subsampling(fmt->base.pixel_format);
@@ -770,21 +605,8 @@ static int _dpu_plane_color_fill(struct dpu_plane *pdpu,
uint32_t color, uint32_t alpha)
{
const struct dpu_format *fmt;
- const struct drm_plane *plane;
- struct dpu_plane_state *pstate;
-
- if (!pdpu || !pdpu->base.state) {
- DPU_ERROR("invalid plane\n");
- return -EINVAL;
- }
-
- if (!pdpu->pipe_hw) {
- DPU_ERROR_PLANE(pdpu, "invalid plane h/w pointer\n");
- return -EINVAL;
- }
-
- plane = &pdpu->base;
- pstate = to_dpu_plane_state(plane->state);
+ const struct drm_plane *plane = &pdpu->base;
+ struct dpu_plane_state *pstate = to_dpu_plane_state(plane->state);
DPU_DEBUG_PLANE(pdpu, "\n");
@@ -835,12 +657,7 @@ static int _dpu_plane_color_fill(struct dpu_plane *pdpu,
void dpu_plane_clear_multirect(const struct drm_plane_state *drm_state)
{
- struct dpu_plane_state *pstate;
-
- if (!drm_state)
- return;
-
- pstate = to_dpu_plane_state(drm_state);
+ struct dpu_plane_state *pstate = to_dpu_plane_state(drm_state);
pstate->multirect_index = DPU_SSPP_RECT_SOLO;
pstate->multirect_mode = DPU_SSPP_MULTIRECT_NONE;
@@ -971,15 +788,6 @@ done:
void dpu_plane_get_ctl_flush(struct drm_plane *plane, struct dpu_hw_ctl *ctl,
u32 *flush_sspp)
{
- struct dpu_plane_state *pstate;
-
- if (!plane || !flush_sspp) {
- DPU_ERROR("invalid parameters\n");
- return;
- }
-
- pstate = to_dpu_plane_state(plane->state);
-
*flush_sspp = ctl->ops.get_bitmask_sspp(ctl, dpu_plane_pipe(plane));
}
@@ -993,7 +801,7 @@ static int dpu_plane_prepare_fb(struct drm_plane *plane,
struct drm_gem_object *obj;
struct msm_gem_object *msm_obj;
struct dma_fence *fence;
- struct msm_gem_address_space *aspace;
+ struct msm_gem_address_space *aspace = _dpu_plane_get_aspace(pdpu);
int ret;
if (!new_state->fb)
@@ -1001,12 +809,6 @@ static int dpu_plane_prepare_fb(struct drm_plane *plane,
DPU_DEBUG_PLANE(pdpu, "FB[%u]\n", fb->base.id);
- ret = _dpu_plane_get_aspace(pdpu, pstate, &aspace);
- if (ret) {
- DPU_ERROR_PLANE(pdpu, "Failed to get aspace\n");
- return ret;
- }
-
/* cache aspace */
pstate->aspace = aspace;
@@ -1076,33 +878,30 @@ static bool dpu_plane_validate_src(struct drm_rect *src,
drm_rect_equals(fb_rect, src);
}
-static int dpu_plane_sspp_atomic_check(struct drm_plane *plane,
- struct drm_plane_state *state)
+static int dpu_plane_atomic_check(struct drm_plane *plane,
+ struct drm_plane_state *state)
{
- int ret = 0;
- struct dpu_plane *pdpu;
- struct dpu_plane_state *pstate;
+ int ret = 0, min_scale;
+ struct dpu_plane *pdpu = to_dpu_plane(plane);
+ const struct drm_crtc_state *crtc_state = NULL;
const struct dpu_format *fmt;
struct drm_rect src, dst, fb_rect = { 0 };
- uint32_t max_upscale = 1, max_downscale = 1;
uint32_t min_src_size, max_linewidth;
- int hscale = 1, vscale = 1;
- if (!plane || !state) {
- DPU_ERROR("invalid arg(s), plane %d state %d\n",
- plane != 0, state != 0);
- ret = -EINVAL;
- goto exit;
- }
-
- pdpu = to_dpu_plane(plane);
- pstate = to_dpu_plane_state(state);
+ if (state->crtc)
+ crtc_state = drm_atomic_get_new_crtc_state(state->state,
+ state->crtc);
- if (!pdpu->pipe_sblk) {
- DPU_ERROR_PLANE(pdpu, "invalid catalog\n");
- ret = -EINVAL;
- goto exit;
+ min_scale = FRAC_16_16(1, pdpu->pipe_sblk->maxdwnscale);
+ ret = drm_atomic_helper_check_plane_state(state, crtc_state, min_scale,
+ pdpu->pipe_sblk->maxupscale << 16,
+ true, true);
+ if (ret) {
+ DPU_ERROR_PLANE(pdpu, "Check plane state failed (%d)\n", ret);
+ return ret;
}
+ if (!state->visible)
+ return 0;
src.x1 = state->src_x >> 16;
src.y1 = state->src_y >> 16;
@@ -1116,25 +915,6 @@ static int dpu_plane_sspp_atomic_check(struct drm_plane *plane,
max_linewidth = pdpu->pipe_sblk->common->maxlinewidth;
- if (pdpu->features & DPU_SSPP_SCALER) {
- max_downscale = pdpu->pipe_sblk->maxdwnscale;
- max_upscale = pdpu->pipe_sblk->maxupscale;
- }
- if (drm_rect_width(&src) < drm_rect_width(&dst))
- hscale = drm_rect_calc_hscale(&src, &dst, 1, max_upscale);
- else
- hscale = drm_rect_calc_hscale(&dst, &src, 1, max_downscale);
- if (drm_rect_height(&src) < drm_rect_height(&dst))
- vscale = drm_rect_calc_vscale(&src, &dst, 1, max_upscale);
- else
- vscale = drm_rect_calc_vscale(&dst, &src, 1, max_downscale);
-
- DPU_DEBUG_PLANE(pdpu, "check %d -> %d\n",
- dpu_plane_enabled(plane->state), dpu_plane_enabled(state));
-
- if (!dpu_plane_enabled(state))
- goto exit;
-
fmt = to_dpu_format(msm_framebuffer_format(state->fb));
min_src_size = DPU_FORMAT_IS_YUV(fmt) ? 2 : 1;
@@ -1145,13 +925,13 @@ static int dpu_plane_sspp_atomic_check(struct drm_plane *plane,
| BIT(DPU_SSPP_CSC_10BIT))))) {
DPU_ERROR_PLANE(pdpu,
"plane doesn't have scaler/csc for yuv\n");
- ret = -EINVAL;
+ return -EINVAL;
/* check src bounds */
} else if (!dpu_plane_validate_src(&src, &fb_rect, min_src_size)) {
DPU_ERROR_PLANE(pdpu, "invalid source " DRM_RECT_FMT "\n",
DRM_RECT_ARG(&src));
- ret = -E2BIG;
+ return -E2BIG;
/* valid yuv image */
} else if (DPU_FORMAT_IS_YUV(fmt) &&
@@ -1160,41 +940,22 @@ static int dpu_plane_sspp_atomic_check(struct drm_plane *plane,
drm_rect_height(&src) & 0x1)) {
DPU_ERROR_PLANE(pdpu, "invalid yuv source " DRM_RECT_FMT "\n",
DRM_RECT_ARG(&src));
- ret = -EINVAL;
+ return -EINVAL;
/* min dst support */
} else if (drm_rect_width(&dst) < 0x1 || drm_rect_height(&dst) < 0x1) {
DPU_ERROR_PLANE(pdpu, "invalid dest rect " DRM_RECT_FMT "\n",
DRM_RECT_ARG(&dst));
- ret = -EINVAL;
+ return -EINVAL;
/* check decimated source width */
} else if (drm_rect_width(&src) > max_linewidth) {
DPU_ERROR_PLANE(pdpu, "invalid src " DRM_RECT_FMT " line:%u\n",
DRM_RECT_ARG(&src), max_linewidth);
- ret = -E2BIG;
-
- /* check scaler capability */
- } else if (hscale < 0 || vscale < 0) {
- DPU_ERROR_PLANE(pdpu, "invalid scaling requested src="
- DRM_RECT_FMT " dst=" DRM_RECT_FMT "\n",
- DRM_RECT_ARG(&src), DRM_RECT_ARG(&dst));
- ret = -E2BIG;
+ return -E2BIG;
}
-exit:
- return ret;
-}
-
-static int dpu_plane_atomic_check(struct drm_plane *plane,
- struct drm_plane_state *state)
-{
- if (!state->fb)
- return 0;
-
- DPU_DEBUG_PLANE(to_dpu_plane(plane), "\n");
-
- return dpu_plane_sspp_atomic_check(plane, state);
+ return 0;
}
void dpu_plane_flush(struct drm_plane *plane)
@@ -1243,46 +1004,16 @@ void dpu_plane_set_error(struct drm_plane *plane, bool error)
pdpu->is_error = error;
}
-static int dpu_plane_sspp_atomic_update(struct drm_plane *plane,
- struct drm_plane_state *old_state)
+static void dpu_plane_sspp_atomic_update(struct drm_plane *plane)
{
- uint32_t nplanes, src_flags;
- struct dpu_plane *pdpu;
- struct drm_plane_state *state;
- struct dpu_plane_state *pstate;
- struct dpu_plane_state *old_pstate;
- const struct dpu_format *fmt;
- struct drm_crtc *crtc;
- struct drm_framebuffer *fb;
- struct drm_rect src, dst;
-
- if (!plane) {
- DPU_ERROR("invalid plane\n");
- return -EINVAL;
- } else if (!plane->state) {
- DPU_ERROR("invalid plane state\n");
- return -EINVAL;
- } else if (!old_state) {
- DPU_ERROR("invalid old state\n");
- return -EINVAL;
- }
-
- pdpu = to_dpu_plane(plane);
- state = plane->state;
-
- pstate = to_dpu_plane_state(state);
-
- old_pstate = to_dpu_plane_state(old_state);
-
- crtc = state->crtc;
- fb = state->fb;
- if (!crtc || !fb) {
- DPU_ERROR_PLANE(pdpu, "invalid crtc %d or fb %d\n",
- crtc != 0, fb != 0);
- return -EINVAL;
- }
- fmt = to_dpu_format(msm_framebuffer_format(fb));
- nplanes = fmt->num_planes;
+ uint32_t src_flags;
+ struct dpu_plane *pdpu = to_dpu_plane(plane);
+ struct drm_plane_state *state = plane->state;
+ struct dpu_plane_state *pstate = to_dpu_plane_state(state);
+ struct drm_crtc *crtc = state->crtc;
+ struct drm_framebuffer *fb = state->fb;
+ const struct dpu_format *fmt =
+ to_dpu_format(msm_framebuffer_format(fb));
memset(&(pdpu->pipe_cfg), 0, sizeof(struct dpu_hw_pipe_cfg));
@@ -1293,28 +1024,27 @@ static int dpu_plane_sspp_atomic_update(struct drm_plane *plane,
pdpu->is_rt_pipe = (dpu_crtc_get_client_type(crtc) != NRT_CLIENT);
_dpu_plane_set_qos_ctrl(plane, false, DPU_PLANE_QOS_PANIC_CTRL);
- src.x1 = state->src_x >> 16;
- src.y1 = state->src_y >> 16;
- src.x2 = src.x1 + (state->src_w >> 16);
- src.y2 = src.y1 + (state->src_h >> 16);
+ DPU_DEBUG_PLANE(pdpu, "FB[%u] " DRM_RECT_FP_FMT "->crtc%u " DRM_RECT_FMT
+ ", %4.4s ubwc %d\n", fb->base.id, DRM_RECT_FP_ARG(&state->src),
+ crtc->base.id, DRM_RECT_ARG(&state->dst),
+ (char *)&fmt->base.pixel_format, DPU_FORMAT_IS_UBWC(fmt));
- dst = drm_plane_state_dest(state);
+ pdpu->pipe_cfg.src_rect = state->src;
- DPU_DEBUG_PLANE(pdpu, "FB[%u] " DRM_RECT_FMT "->crtc%u " DRM_RECT_FMT
- ", %4.4s ubwc %d\n", fb->base.id, DRM_RECT_ARG(&src),
- crtc->base.id, DRM_RECT_ARG(&dst),
- (char *)&fmt->base.pixel_format,
- DPU_FORMAT_IS_UBWC(fmt));
+ /* state->src is 16.16, src_rect is not */
+ pdpu->pipe_cfg.src_rect.x1 >>= 16;
+ pdpu->pipe_cfg.src_rect.x2 >>= 16;
+ pdpu->pipe_cfg.src_rect.y1 >>= 16;
+ pdpu->pipe_cfg.src_rect.y2 >>= 16;
- pdpu->pipe_cfg.src_rect = src;
- pdpu->pipe_cfg.dst_rect = dst;
+ pdpu->pipe_cfg.dst_rect = state->dst;
_dpu_plane_setup_scaler(pdpu, pstate, fmt, false);
/* override for color fill */
if (pdpu->color_fill & DPU_PLANE_COLOR_FILL_FLAG) {
/* skip remaining processing on color fill */
- return 0;
+ return;
}
if (pdpu->pipe_hw->ops.setup_rects) {
@@ -1385,30 +1115,13 @@ static int dpu_plane_sspp_atomic_update(struct drm_plane *plane,
}
_dpu_plane_set_qos_remap(plane);
- return 0;
}
-static void _dpu_plane_atomic_disable(struct drm_plane *plane,
- struct drm_plane_state *old_state)
+static void _dpu_plane_atomic_disable(struct drm_plane *plane)
{
- struct dpu_plane *pdpu;
- struct drm_plane_state *state;
- struct dpu_plane_state *pstate;
-
- if (!plane) {
- DPU_ERROR("invalid plane\n");
- return;
- } else if (!plane->state) {
- DPU_ERROR("invalid plane state\n");
- return;
- } else if (!old_state) {
- DPU_ERROR("invalid old state\n");
- return;
- }
-
- pdpu = to_dpu_plane(plane);
- state = plane->state;
- pstate = to_dpu_plane_state(state);
+ struct dpu_plane *pdpu = to_dpu_plane(plane);
+ struct drm_plane_state *state = plane->state;
+ struct dpu_plane_state *pstate = to_dpu_plane_state(state);
trace_dpu_plane_disable(DRMID(plane), is_dpu_plane_virtual(plane),
pstate->multirect_mode);
@@ -1424,31 +1137,17 @@ static void _dpu_plane_atomic_disable(struct drm_plane *plane,
static void dpu_plane_atomic_update(struct drm_plane *plane,
struct drm_plane_state *old_state)
{
- struct dpu_plane *pdpu;
- struct drm_plane_state *state;
-
- if (!plane) {
- DPU_ERROR("invalid plane\n");
- return;
- } else if (!plane->state) {
- DPU_ERROR("invalid plane state\n");
- return;
- }
+ struct dpu_plane *pdpu = to_dpu_plane(plane);
+ struct drm_plane_state *state = plane->state;
- pdpu = to_dpu_plane(plane);
pdpu->is_error = false;
- state = plane->state;
DPU_DEBUG_PLANE(pdpu, "\n");
- if (!dpu_plane_sspp_enabled(state)) {
- _dpu_plane_atomic_disable(plane, old_state);
+ if (!state->visible) {
+ _dpu_plane_atomic_disable(plane);
} else {
- int ret;
-
- ret = dpu_plane_sspp_atomic_update(plane, old_state);
- /* atomic_check should have ensured that this doesn't fail */
- WARN_ON(ret < 0);
+ dpu_plane_sspp_atomic_update(plane);
}
}
@@ -1485,8 +1184,7 @@ static void dpu_plane_destroy(struct drm_plane *plane)
/* this will destroy the states as well */
drm_plane_cleanup(plane);
- if (pdpu->pipe_hw)
- dpu_hw_sspp_destroy(pdpu->pipe_hw);
+ dpu_hw_sspp_destroy(pdpu->pipe_hw);
kfree(pdpu);
}
@@ -1505,9 +1203,7 @@ static void dpu_plane_destroy_state(struct drm_plane *plane,
pstate = to_dpu_plane_state(state);
- /* remove ref count for frame buffers */
- if (state->fb)
- drm_framebuffer_put(state->fb);
+ __drm_atomic_helper_plane_destroy_state(state);
kfree(pstate);
}
@@ -1827,40 +1523,17 @@ bool is_dpu_plane_virtual(struct drm_plane *plane)
/* initialize plane */
struct drm_plane *dpu_plane_init(struct drm_device *dev,
- uint32_t pipe, bool primary_plane,
+ uint32_t pipe, enum drm_plane_type type,
unsigned long possible_crtcs, u32 master_plane_id)
{
struct drm_plane *plane = NULL, *master_plane = NULL;
const struct dpu_format_extended *format_list;
struct dpu_plane *pdpu;
- struct msm_drm_private *priv;
- struct dpu_kms *kms;
- enum drm_plane_type type;
+ struct msm_drm_private *priv = dev->dev_private;
+ struct dpu_kms *kms = to_dpu_kms(priv->kms);
int zpos_max = DPU_ZPOS_MAX;
int ret = -EINVAL;
- if (!dev) {
- DPU_ERROR("[%u]device is NULL\n", pipe);
- goto exit;
- }
-
- priv = dev->dev_private;
- if (!priv) {
- DPU_ERROR("[%u]private data is NULL\n", pipe);
- goto exit;
- }
-
- if (!priv->kms) {
- DPU_ERROR("[%u]invalid KMS reference\n", pipe);
- goto exit;
- }
- kms = to_dpu_kms(priv->kms);
-
- if (!kms->catalog) {
- DPU_ERROR("[%u]invalid catalog reference\n", pipe);
- goto exit;
- }
-
/* create and zero local structure */
pdpu = kzalloc(sizeof(*pdpu), GFP_KERNEL);
if (!pdpu) {
@@ -1916,12 +1589,6 @@ struct drm_plane *dpu_plane_init(struct drm_device *dev,
goto clean_sspp;
}
- if (pdpu->features & BIT(DPU_SSPP_CURSOR))
- type = DRM_PLANE_TYPE_CURSOR;
- else if (primary_plane)
- type = DRM_PLANE_TYPE_PRIMARY;
- else
- type = DRM_PLANE_TYPE_OVERLAY;
ret = drm_universal_plane_init(dev, plane, 0xff, &dpu_plane_funcs,
pdpu->formats, pdpu->nformats,
NULL, type, NULL);
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_plane.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_plane.h
index f6fe6ddc7a3a..7fed0b627708 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_plane.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_plane.h
@@ -122,7 +122,7 @@ void dpu_plane_set_error(struct drm_plane *plane, bool error);
* dpu_plane_init - create new dpu plane for the given pipe
* @dev: Pointer to DRM device
* @pipe: dpu hardware pipe identifier
- * @primary_plane: true if this pipe is primary plane for crtc
+ * @type: Plane type - PRIMARY/OVERLAY/CURSOR
* @possible_crtcs: bitmask of crtc that can be attached to the given pipe
* @master_plane_id: primary plane id of a multirect pipe. 0 value passed for
* a regular plane initialization. A non-zero primary plane
@@ -130,7 +130,7 @@ void dpu_plane_set_error(struct drm_plane *plane, bool error);
*
*/
struct drm_plane *dpu_plane_init(struct drm_device *dev,
- uint32_t pipe, bool primary_plane,
+ uint32_t pipe, enum drm_plane_type type,
unsigned long possible_crtcs, u32 master_plane_id);
/**
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_power_handle.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_power_handle.c
index a75eebca2f37..fc14116789f2 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_power_handle.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_power_handle.c
@@ -145,6 +145,7 @@ int dpu_power_resource_enable(struct dpu_power_handle *phandle,
bool changed = false;
u32 max_usecase_ndx = VOTE_INDEX_DISABLE, prev_usecase_ndx;
struct dpu_power_client *client;
+ u32 event_type;
if (!phandle || !pclient) {
pr_err("invalid input argument\n");
@@ -181,19 +182,9 @@ int dpu_power_resource_enable(struct dpu_power_handle *phandle,
if (!changed)
goto end;
- if (enable) {
- dpu_power_event_trigger_locked(phandle,
- DPU_POWER_EVENT_PRE_ENABLE);
- dpu_power_event_trigger_locked(phandle,
- DPU_POWER_EVENT_POST_ENABLE);
-
- } else {
- dpu_power_event_trigger_locked(phandle,
- DPU_POWER_EVENT_PRE_DISABLE);
- dpu_power_event_trigger_locked(phandle,
- DPU_POWER_EVENT_POST_DISABLE);
- }
+ event_type = enable ? DPU_POWER_EVENT_ENABLE : DPU_POWER_EVENT_DISABLE;
+ dpu_power_event_trigger_locked(phandle, event_type);
end:
mutex_unlock(&phandle->phandle_lock);
return 0;
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_power_handle.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_power_handle.h
index 344f74464eca..a65b7a297f21 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_power_handle.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_power_handle.h
@@ -23,17 +23,9 @@
#include "dpu_io_util.h"
-/* event will be triggered before power handler disable */
-#define DPU_POWER_EVENT_PRE_DISABLE 0x1
-
-/* event will be triggered after power handler disable */
-#define DPU_POWER_EVENT_POST_DISABLE 0x2
-
-/* event will be triggered before power handler enable */
-#define DPU_POWER_EVENT_PRE_ENABLE 0x4
-
-/* event will be triggered after power handler enable */
-#define DPU_POWER_EVENT_POST_ENABLE 0x8
+/* events will be triggered on power handler enable/disable */
+#define DPU_POWER_EVENT_DISABLE BIT(0)
+#define DPU_POWER_EVENT_ENABLE BIT(1)
/**
* mdss_bus_vote_type: register bus vote type
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_rm.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_rm.c
index 13c0a36d4ef9..bdb117709674 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_rm.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_rm.c
@@ -16,7 +16,6 @@
#include "dpu_kms.h"
#include "dpu_hw_lm.h"
#include "dpu_hw_ctl.h"
-#include "dpu_hw_cdm.h"
#include "dpu_hw_pingpong.h"
#include "dpu_hw_intf.h"
#include "dpu_encoder.h"
@@ -25,38 +24,13 @@
#define RESERVED_BY_OTHER(h, r) \
((h)->rsvp && ((h)->rsvp->enc_id != (r)->enc_id))
-#define RM_RQ_LOCK(r) ((r)->top_ctrl & BIT(DPU_RM_TOPCTL_RESERVE_LOCK))
-#define RM_RQ_CLEAR(r) ((r)->top_ctrl & BIT(DPU_RM_TOPCTL_RESERVE_CLEAR))
-#define RM_RQ_DS(r) ((r)->top_ctrl & BIT(DPU_RM_TOPCTL_DS))
-#define RM_IS_TOPOLOGY_MATCH(t, r) ((t).num_lm == (r).num_lm && \
- (t).num_comp_enc == (r).num_enc && \
- (t).num_intf == (r).num_intf)
-
-struct dpu_rm_topology_def {
- enum dpu_rm_topology_name top_name;
- int num_lm;
- int num_comp_enc;
- int num_intf;
- int num_ctl;
- int needs_split_display;
-};
-
-static const struct dpu_rm_topology_def g_top_table[] = {
- { DPU_RM_TOPOLOGY_NONE, 0, 0, 0, 0, false },
- { DPU_RM_TOPOLOGY_SINGLEPIPE, 1, 0, 1, 1, false },
- { DPU_RM_TOPOLOGY_DUALPIPE, 2, 0, 2, 2, true },
- { DPU_RM_TOPOLOGY_DUALPIPE_3DMERGE, 2, 0, 1, 1, false },
-};
-
/**
* struct dpu_rm_requirements - Reservation requirements parameter bundle
- * @top_ctrl: topology control preference from kernel client
- * @top: selected topology for the display
+ * @topology: selected topology for the display
* @hw_res: Hardware resources required as reported by the encoders
*/
struct dpu_rm_requirements {
- uint64_t top_ctrl;
- const struct dpu_rm_topology_def *topology;
+ struct msm_display_topology topology;
struct dpu_encoder_hw_resources hw_res;
};
@@ -72,13 +46,11 @@ struct dpu_rm_requirements {
* @enc_id: Reservations are tracked by Encoder DRM object ID.
* CRTCs may be connected to multiple Encoders.
* An encoder or connector id identifies the display path.
- * @topology DRM<->HW topology use case
*/
struct dpu_rm_rsvp {
struct list_head list;
uint32_t seq;
uint32_t enc_id;
- enum dpu_rm_topology_name topology;
};
/**
@@ -122,8 +94,8 @@ static void _dpu_rm_print_rsvps(
DPU_DEBUG("%d\n", stage);
list_for_each_entry(rsvp, &rm->rsvps, list) {
- DRM_DEBUG_KMS("%d rsvp[s%ue%u] topology %d\n", stage, rsvp->seq,
- rsvp->enc_id, rsvp->topology);
+ DRM_DEBUG_KMS("%d rsvp[s%ue%u]\n", stage, rsvp->seq,
+ rsvp->enc_id);
}
for (type = 0; type < DPU_HW_BLK_MAX; type++) {
@@ -146,18 +118,6 @@ struct dpu_hw_mdp *dpu_rm_get_mdp(struct dpu_rm *rm)
return rm->hw_mdp;
}
-enum dpu_rm_topology_name
-dpu_rm_get_topology_name(struct msm_display_topology topology)
-{
- int i;
-
- for (i = 0; i < DPU_RM_TOPOLOGY_MAX; i++)
- if (RM_IS_TOPOLOGY_MATCH(g_top_table[i], topology))
- return g_top_table[i].top_name;
-
- return DPU_RM_TOPOLOGY_NONE;
-}
-
void dpu_rm_init_hw_iter(
struct dpu_rm_hw_iter *iter,
uint32_t enc_id,
@@ -229,9 +189,6 @@ static void _dpu_rm_hw_destroy(enum dpu_hw_blk_type type, void *hw)
case DPU_HW_BLK_CTL:
dpu_hw_ctl_destroy(hw);
break;
- case DPU_HW_BLK_CDM:
- dpu_hw_cdm_destroy(hw);
- break;
case DPU_HW_BLK_PINGPONG:
dpu_hw_pingpong_destroy(hw);
break;
@@ -305,9 +262,6 @@ static int _dpu_rm_hw_blk_create(
case DPU_HW_BLK_CTL:
hw = dpu_hw_ctl_init(id, mmio, cat);
break;
- case DPU_HW_BLK_CDM:
- hw = dpu_hw_cdm_init(id, mmio, cat, hw_mdp);
- break;
case DPU_HW_BLK_PINGPONG:
hw = dpu_hw_pingpong_init(id, mmio, cat);
break;
@@ -438,15 +392,6 @@ int dpu_rm_init(struct dpu_rm *rm,
}
}
- for (i = 0; i < cat->cdm_count; i++) {
- rc = _dpu_rm_hw_blk_create(rm, cat, mmio, DPU_HW_BLK_CDM,
- cat->cdm[i].id, &cat->cdm[i]);
- if (rc) {
- DPU_ERROR("failed: cdm hw not available\n");
- goto fail;
- }
- }
-
return 0;
fail:
@@ -455,6 +400,11 @@ fail:
return rc;
}
+static bool _dpu_rm_needs_split_display(const struct msm_display_topology *top)
+{
+ return top->num_intf > 1;
+}
+
/**
* _dpu_rm_check_lm_and_get_connected_blks - check if proposed layer mixer meets
* proposed use case requirements, incl. hardwired dependent blocks like
@@ -538,14 +488,14 @@ static int _dpu_rm_reserve_lms(
int lm_count = 0;
int i, rc = 0;
- if (!reqs->topology->num_lm) {
- DPU_ERROR("invalid number of lm: %d\n", reqs->topology->num_lm);
+ if (!reqs->topology.num_lm) {
+ DPU_ERROR("invalid number of lm: %d\n", reqs->topology.num_lm);
return -EINVAL;
}
/* Find a primary mixer */
dpu_rm_init_hw_iter(&iter_i, 0, DPU_HW_BLK_LM);
- while (lm_count != reqs->topology->num_lm &&
+ while (lm_count != reqs->topology.num_lm &&
_dpu_rm_get_hw_locked(rm, &iter_i)) {
memset(&lm, 0, sizeof(lm));
memset(&pp, 0, sizeof(pp));
@@ -563,7 +513,7 @@ static int _dpu_rm_reserve_lms(
/* Valid primary mixer found, find matching peers */
dpu_rm_init_hw_iter(&iter_j, 0, DPU_HW_BLK_LM);
- while (lm_count != reqs->topology->num_lm &&
+ while (lm_count != reqs->topology.num_lm &&
_dpu_rm_get_hw_locked(rm, &iter_j)) {
if (iter_i.blk == iter_j.blk)
continue;
@@ -578,7 +528,7 @@ static int _dpu_rm_reserve_lms(
}
}
- if (lm_count != reqs->topology->num_lm) {
+ if (lm_count != reqs->topology.num_lm) {
DPU_DEBUG("unable to find appropriate mixers\n");
return -ENAVAIL;
}
@@ -600,14 +550,20 @@ static int _dpu_rm_reserve_lms(
static int _dpu_rm_reserve_ctls(
struct dpu_rm *rm,
struct dpu_rm_rsvp *rsvp,
- const struct dpu_rm_topology_def *top)
+ const struct msm_display_topology *top)
{
struct dpu_rm_hw_blk *ctls[MAX_BLOCKS];
struct dpu_rm_hw_iter iter;
- int i = 0;
+ int i = 0, num_ctls = 0;
+ bool needs_split_display = false;
memset(&ctls, 0, sizeof(ctls));
+ /* each hw_intf needs its own hw_ctrl to program its control path */
+ num_ctls = top->num_intf;
+
+ needs_split_display = _dpu_rm_needs_split_display(top);
+
dpu_rm_init_hw_iter(&iter, 0, DPU_HW_BLK_CTL);
while (_dpu_rm_get_hw_locked(rm, &iter)) {
const struct dpu_hw_ctl *ctl = to_dpu_hw_ctl(iter.blk->hw);
@@ -621,20 +577,20 @@ static int _dpu_rm_reserve_ctls(
DPU_DEBUG("ctl %d caps 0x%lX\n", iter.blk->id, features);
- if (top->needs_split_display != has_split_display)
+ if (needs_split_display != has_split_display)
continue;
ctls[i] = iter.blk;
DPU_DEBUG("ctl %d match\n", iter.blk->id);
- if (++i == top->num_ctl)
+ if (++i == num_ctls)
break;
}
- if (i != top->num_ctl)
+ if (i != num_ctls)
return -ENAVAIL;
- for (i = 0; i < ARRAY_SIZE(ctls) && i < top->num_ctl; i++) {
+ for (i = 0; i < ARRAY_SIZE(ctls) && i < num_ctls; i++) {
ctls[i]->rsvp_nxt = rsvp;
trace_dpu_rm_reserve_ctls(ctls[i]->id, ctls[i]->type,
rsvp->enc_id);
@@ -643,55 +599,11 @@ static int _dpu_rm_reserve_ctls(
return 0;
}
-static int _dpu_rm_reserve_cdm(
- struct dpu_rm *rm,
- struct dpu_rm_rsvp *rsvp,
- uint32_t id,
- enum dpu_hw_blk_type type)
-{
- struct dpu_rm_hw_iter iter;
-
- DRM_DEBUG_KMS("type %d id %d\n", type, id);
-
- dpu_rm_init_hw_iter(&iter, 0, DPU_HW_BLK_CDM);
- while (_dpu_rm_get_hw_locked(rm, &iter)) {
- const struct dpu_hw_cdm *cdm = to_dpu_hw_cdm(iter.blk->hw);
- const struct dpu_cdm_cfg *caps = cdm->caps;
- bool match = false;
-
- if (RESERVED_BY_OTHER(iter.blk, rsvp))
- continue;
-
- if (type == DPU_HW_BLK_INTF && id != INTF_MAX)
- match = test_bit(id, &caps->intf_connect);
-
- DRM_DEBUG_KMS("iter: type:%d id:%d enc:%d cdm:%lu match:%d\n",
- iter.blk->type, iter.blk->id, rsvp->enc_id,
- caps->intf_connect, match);
-
- if (!match)
- continue;
-
- trace_dpu_rm_reserve_cdm(iter.blk->id, iter.blk->type,
- rsvp->enc_id);
- iter.blk->rsvp_nxt = rsvp;
- break;
- }
-
- if (!iter.hw) {
- DPU_ERROR("couldn't reserve cdm for type %d id %d\n", type, id);
- return -ENAVAIL;
- }
-
- return 0;
-}
-
static int _dpu_rm_reserve_intf(
struct dpu_rm *rm,
struct dpu_rm_rsvp *rsvp,
uint32_t id,
- enum dpu_hw_blk_type type,
- bool needs_cdm)
+ enum dpu_hw_blk_type type)
{
struct dpu_rm_hw_iter iter;
int ret = 0;
@@ -719,9 +631,6 @@ static int _dpu_rm_reserve_intf(
return -EINVAL;
}
- if (needs_cdm)
- ret = _dpu_rm_reserve_cdm(rm, rsvp, id, type);
-
return ret;
}
@@ -738,7 +647,7 @@ static int _dpu_rm_reserve_intf_related_hw(
continue;
id = i + INTF_0;
ret = _dpu_rm_reserve_intf(rm, rsvp, id,
- DPU_HW_BLK_INTF, hw_res->needs_cdm);
+ DPU_HW_BLK_INTF);
if (ret)
return ret;
}
@@ -750,17 +659,14 @@ static int _dpu_rm_make_next_rsvp(
struct dpu_rm *rm,
struct drm_encoder *enc,
struct drm_crtc_state *crtc_state,
- struct drm_connector_state *conn_state,
struct dpu_rm_rsvp *rsvp,
struct dpu_rm_requirements *reqs)
{
int ret;
- struct dpu_rm_topology_def topology;
/* Create reservation info, tag reserved blocks with it as we go */
rsvp->seq = ++rm->rsvp_next_seq;
rsvp->enc_id = enc->base.id;
- rsvp->topology = reqs->topology->top_name;
list_add_tail(&rsvp->list, &rm->rsvps);
ret = _dpu_rm_reserve_lms(rm, rsvp, reqs);
@@ -769,23 +675,12 @@ static int _dpu_rm_make_next_rsvp(
return ret;
}
- /*
- * Do assignment preferring to give away low-resource CTLs first:
- * - Check mixers without Split Display
- * - Only then allow to grab from CTLs with split display capability
- */
- _dpu_rm_reserve_ctls(rm, rsvp, reqs->topology);
- if (ret && !reqs->topology->needs_split_display) {
- memcpy(&topology, reqs->topology, sizeof(topology));
- topology.needs_split_display = true;
- _dpu_rm_reserve_ctls(rm, rsvp, &topology);
- }
+ ret = _dpu_rm_reserve_ctls(rm, rsvp, &reqs->topology);
if (ret) {
DPU_ERROR("unable to find appropriate CTL\n");
return ret;
}
- /* Assign INTFs and blks whose usage is tied to them: CTL & CDM */
ret = _dpu_rm_reserve_intf_related_hw(rm, rsvp, &reqs->hw_res);
if (ret)
return ret;
@@ -797,44 +692,16 @@ static int _dpu_rm_populate_requirements(
struct dpu_rm *rm,
struct drm_encoder *enc,
struct drm_crtc_state *crtc_state,
- struct drm_connector_state *conn_state,
struct dpu_rm_requirements *reqs,
struct msm_display_topology req_topology)
{
- int i;
+ dpu_encoder_get_hw_resources(enc, &reqs->hw_res);
- memset(reqs, 0, sizeof(*reqs));
+ reqs->topology = req_topology;
- dpu_encoder_get_hw_resources(enc, &reqs->hw_res, conn_state);
-
- for (i = 0; i < DPU_RM_TOPOLOGY_MAX; i++) {
- if (RM_IS_TOPOLOGY_MATCH(g_top_table[i],
- req_topology)) {
- reqs->topology = &g_top_table[i];
- break;
- }
- }
-
- if (!reqs->topology) {
- DPU_ERROR("invalid topology for the display\n");
- return -EINVAL;
- }
-
- /**
- * Set the requirement based on caps if not set from user space
- * This will ensure to select LM tied with DS blocks
- * Currently, DS blocks are tied with LM 0 and LM 1 (primary display)
- */
- if (!RM_RQ_DS(reqs) && rm->hw_mdp->caps->has_dest_scaler &&
- conn_state->connector->connector_type == DRM_MODE_CONNECTOR_DSI)
- reqs->top_ctrl |= BIT(DPU_RM_TOPCTL_DS);
-
- DRM_DEBUG_KMS("top_ctrl: 0x%llX num_h_tiles: %d\n", reqs->top_ctrl,
- reqs->hw_res.display_num_of_h_tiles);
- DRM_DEBUG_KMS("num_lm: %d num_ctl: %d topology: %d split_display: %d\n",
- reqs->topology->num_lm, reqs->topology->num_ctl,
- reqs->topology->top_name,
- reqs->topology->needs_split_display);
+ DRM_DEBUG_KMS("num_lm: %d num_enc: %d num_intf: %d\n",
+ reqs->topology.num_lm, reqs->topology.num_enc,
+ reqs->topology.num_intf);
return 0;
}
@@ -860,29 +727,12 @@ static struct dpu_rm_rsvp *_dpu_rm_get_rsvp(
return NULL;
}
-static struct drm_connector *_dpu_rm_get_connector(
- struct drm_encoder *enc)
-{
- struct drm_connector *conn = NULL;
- struct list_head *connector_list =
- &enc->dev->mode_config.connector_list;
-
- list_for_each_entry(conn, connector_list, head)
- if (conn->encoder == enc)
- return conn;
-
- return NULL;
-}
-
/**
* _dpu_rm_release_rsvp - release resources and release a reservation
* @rm: KMS handle
* @rsvp: RSVP pointer to release and release resources for
*/
-static void _dpu_rm_release_rsvp(
- struct dpu_rm *rm,
- struct dpu_rm_rsvp *rsvp,
- struct drm_connector *conn)
+static void _dpu_rm_release_rsvp(struct dpu_rm *rm, struct dpu_rm_rsvp *rsvp)
{
struct dpu_rm_rsvp *rsvp_c, *rsvp_n;
struct dpu_rm_hw_blk *blk;
@@ -923,7 +773,6 @@ static void _dpu_rm_release_rsvp(
void dpu_rm_release(struct dpu_rm *rm, struct drm_encoder *enc)
{
struct dpu_rm_rsvp *rsvp;
- struct drm_connector *conn;
if (!rm || !enc) {
DPU_ERROR("invalid params\n");
@@ -938,25 +787,15 @@ void dpu_rm_release(struct dpu_rm *rm, struct drm_encoder *enc)
goto end;
}
- conn = _dpu_rm_get_connector(enc);
- if (!conn) {
- DPU_ERROR("failed to get connector for enc %d\n", enc->base.id);
- goto end;
- }
-
- _dpu_rm_release_rsvp(rm, rsvp, conn);
+ _dpu_rm_release_rsvp(rm, rsvp);
end:
mutex_unlock(&rm->rm_lock);
}
-static int _dpu_rm_commit_rsvp(
- struct dpu_rm *rm,
- struct dpu_rm_rsvp *rsvp,
- struct drm_connector_state *conn_state)
+static void _dpu_rm_commit_rsvp(struct dpu_rm *rm, struct dpu_rm_rsvp *rsvp)
{
struct dpu_rm_hw_blk *blk;
enum dpu_hw_blk_type type;
- int ret = 0;
/* Swap next rsvp to be the active */
for (type = 0; type < DPU_HW_BLK_MAX; type++) {
@@ -967,19 +806,12 @@ static int _dpu_rm_commit_rsvp(
}
}
}
-
- if (!ret)
- DRM_DEBUG_KMS("rsrv enc %d topology %d\n", rsvp->enc_id,
- rsvp->topology);
-
- return ret;
}
int dpu_rm_reserve(
struct dpu_rm *rm,
struct drm_encoder *enc,
struct drm_crtc_state *crtc_state,
- struct drm_connector_state *conn_state,
struct msm_display_topology topology,
bool test_only)
{
@@ -987,25 +819,19 @@ int dpu_rm_reserve(
struct dpu_rm_requirements reqs;
int ret;
- if (!rm || !enc || !crtc_state || !conn_state) {
- DPU_ERROR("invalid arguments\n");
- return -EINVAL;
- }
-
/* Check if this is just a page-flip */
if (!drm_atomic_crtc_needs_modeset(crtc_state))
return 0;
- DRM_DEBUG_KMS("reserving hw for conn %d enc %d crtc %d test_only %d\n",
- conn_state->connector->base.id, enc->base.id,
- crtc_state->crtc->base.id, test_only);
+ DRM_DEBUG_KMS("reserving hw for enc %d crtc %d test_only %d\n",
+ enc->base.id, crtc_state->crtc->base.id, test_only);
mutex_lock(&rm->rm_lock);
_dpu_rm_print_rsvps(rm, DPU_RM_STAGE_BEGIN);
- ret = _dpu_rm_populate_requirements(rm, enc, crtc_state,
- conn_state, &reqs, topology);
+ ret = _dpu_rm_populate_requirements(rm, enc, crtc_state, &reqs,
+ topology);
if (ret) {
DPU_ERROR("failed to populate hw requirements\n");
goto end;
@@ -1030,28 +856,15 @@ int dpu_rm_reserve(
rsvp_cur = _dpu_rm_get_rsvp(rm, enc);
- /*
- * User can request that we clear out any reservation during the
- * atomic_check phase by using this CLEAR bit
- */
- if (rsvp_cur && test_only && RM_RQ_CLEAR(&reqs)) {
- DPU_DEBUG("test_only & CLEAR: clear rsvp[s%de%d]\n",
- rsvp_cur->seq, rsvp_cur->enc_id);
- _dpu_rm_release_rsvp(rm, rsvp_cur, conn_state->connector);
- rsvp_cur = NULL;
- _dpu_rm_print_rsvps(rm, DPU_RM_STAGE_AFTER_CLEAR);
- }
-
/* Check the proposed reservation, store it in hw's "next" field */
- ret = _dpu_rm_make_next_rsvp(rm, enc, crtc_state, conn_state,
- rsvp_nxt, &reqs);
+ ret = _dpu_rm_make_next_rsvp(rm, enc, crtc_state, rsvp_nxt, &reqs);
_dpu_rm_print_rsvps(rm, DPU_RM_STAGE_AFTER_RSVPNEXT);
if (ret) {
DPU_ERROR("failed to reserve hw resources: %d\n", ret);
- _dpu_rm_release_rsvp(rm, rsvp_nxt, conn_state->connector);
- } else if (test_only && !RM_RQ_LOCK(&reqs)) {
+ _dpu_rm_release_rsvp(rm, rsvp_nxt);
+ } else if (test_only) {
/*
* Normally, if test_only, test the reservation and then undo
* However, if the user requests LOCK, then keep the reservation
@@ -1059,15 +872,11 @@ int dpu_rm_reserve(
*/
DPU_DEBUG("test_only: discard test rsvp[s%de%d]\n",
rsvp_nxt->seq, rsvp_nxt->enc_id);
- _dpu_rm_release_rsvp(rm, rsvp_nxt, conn_state->connector);
+ _dpu_rm_release_rsvp(rm, rsvp_nxt);
} else {
- if (test_only && RM_RQ_LOCK(&reqs))
- DPU_DEBUG("test_only & LOCK: lock rsvp[s%de%d]\n",
- rsvp_nxt->seq, rsvp_nxt->enc_id);
-
- _dpu_rm_release_rsvp(rm, rsvp_cur, conn_state->connector);
+ _dpu_rm_release_rsvp(rm, rsvp_cur);
- ret = _dpu_rm_commit_rsvp(rm, rsvp_nxt, conn_state);
+ _dpu_rm_commit_rsvp(rm, rsvp_nxt);
}
_dpu_rm_print_rsvps(rm, DPU_RM_STAGE_FINAL);
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_rm.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_rm.h
index ffd1841a6067..b8273bd23801 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_rm.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_rm.h
@@ -21,39 +21,6 @@
#include "dpu_hw_top.h"
/**
- * enum dpu_rm_topology_name - HW resource use case in use by connector
- * @DPU_RM_TOPOLOGY_NONE: No topology in use currently
- * @DPU_RM_TOPOLOGY_SINGLEPIPE: 1 LM, 1 PP, 1 INTF/WB
- * @DPU_RM_TOPOLOGY_DUALPIPE: 2 LM, 2 PP, 2 INTF/WB
- * @DPU_RM_TOPOLOGY_DUALPIPE_3DMERGE: 2 LM, 2 PP, 3DMux, 1 INTF/WB
- */
-enum dpu_rm_topology_name {
- DPU_RM_TOPOLOGY_NONE = 0,
- DPU_RM_TOPOLOGY_SINGLEPIPE,
- DPU_RM_TOPOLOGY_DUALPIPE,
- DPU_RM_TOPOLOGY_DUALPIPE_3DMERGE,
- DPU_RM_TOPOLOGY_MAX,
-};
-
-/**
- * enum dpu_rm_topology_control - HW resource use case in use by connector
- * @DPU_RM_TOPCTL_RESERVE_LOCK: If set, in AtomicTest phase, after a successful
- * test, reserve the resources for this display.
- * Normal behavior would not impact the reservation
- * list during the AtomicTest phase.
- * @DPU_RM_TOPCTL_RESERVE_CLEAR: If set, in AtomicTest phase, before testing,
- * release any reservation held by this display.
- * Normal behavior would not impact the
- * reservation list during the AtomicTest phase.
- * @DPU_RM_TOPCTL_DS : Require layer mixers with DS capabilities
- */
-enum dpu_rm_topology_control {
- DPU_RM_TOPCTL_RESERVE_LOCK,
- DPU_RM_TOPCTL_RESERVE_CLEAR,
- DPU_RM_TOPCTL_DS,
-};
-
-/**
* struct dpu_rm - DPU dynamic hardware resource manager
* @dev: device handle for event logging purposes
* @rsvps: list of hardware reservations by each crtc->encoder->connector
@@ -125,7 +92,6 @@ int dpu_rm_destroy(struct dpu_rm *rm);
* @rm: DPU Resource Manager handle
* @drm_enc: DRM Encoder handle
* @crtc_state: Proposed Atomic DRM CRTC State handle
- * @conn_state: Proposed Atomic DRM Connector State handle
* @topology: Pointer to topology info for the display
* @test_only: Atomic-Test phase, discard results (unless property overrides)
* @Return: 0 on Success otherwise -ERROR
@@ -133,7 +99,6 @@ int dpu_rm_destroy(struct dpu_rm *rm);
int dpu_rm_reserve(struct dpu_rm *rm,
struct drm_encoder *drm_enc,
struct drm_crtc_state *crtc_state,
- struct drm_connector_state *conn_state,
struct msm_display_topology topology,
bool test_only);
@@ -187,13 +152,4 @@ bool dpu_rm_get_hw(struct dpu_rm *rm, struct dpu_rm_hw_iter *iter);
*/
int dpu_rm_check_property_topctl(uint64_t val);
-/**
- * dpu_rm_get_topology_name - returns the name of the the given topology
- * definition
- * @topology: topology definition
- * @Return: name of the topology
- */
-enum dpu_rm_topology_name
-dpu_rm_get_topology_name(struct msm_display_topology topology);
-
#endif /* __DPU_RM_H__ */
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_trace.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_trace.h
index ae0ca5076238..e12c4cefb742 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_trace.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_trace.h
@@ -468,14 +468,16 @@ TRACE_EVENT(dpu_enc_frame_done_cb,
TRACE_EVENT(dpu_enc_trigger_flush,
TP_PROTO(uint32_t drm_id, enum dpu_intf intf_idx,
- int pending_kickoff_cnt, int ctl_idx, u32 pending_flush_ret),
+ int pending_kickoff_cnt, int ctl_idx, u32 extra_flush_bits,
+ u32 pending_flush_ret),
TP_ARGS(drm_id, intf_idx, pending_kickoff_cnt, ctl_idx,
- pending_flush_ret),
+ extra_flush_bits, pending_flush_ret),
TP_STRUCT__entry(
__field( uint32_t, drm_id )
__field( enum dpu_intf, intf_idx )
__field( int, pending_kickoff_cnt )
__field( int, ctl_idx )
+ __field( u32, extra_flush_bits )
__field( u32, pending_flush_ret )
),
TP_fast_assign(
@@ -483,12 +485,14 @@ TRACE_EVENT(dpu_enc_trigger_flush,
__entry->intf_idx = intf_idx;
__entry->pending_kickoff_cnt = pending_kickoff_cnt;
__entry->ctl_idx = ctl_idx;
+ __entry->extra_flush_bits = extra_flush_bits;
__entry->pending_flush_ret = pending_flush_ret;
),
TP_printk("id=%u, intf_idx=%d, pending_kickoff_cnt=%d ctl_idx=%d "
- "pending_flush_ret=%u", __entry->drm_id,
- __entry->intf_idx, __entry->pending_kickoff_cnt,
- __entry->ctl_idx, __entry->pending_flush_ret)
+ "extra_flush_bits=0x%x pending_flush_ret=0x%x",
+ __entry->drm_id, __entry->intf_idx,
+ __entry->pending_kickoff_cnt, __entry->ctl_idx,
+ __entry->extra_flush_bits, __entry->pending_flush_ret)
);
DECLARE_EVENT_CLASS(dpu_enc_ktime_template,
@@ -682,37 +686,41 @@ TRACE_EVENT(dpu_crtc_setup_mixer,
TP_STRUCT__entry(
__field( uint32_t, crtc_id )
__field( uint32_t, plane_id )
- __field( struct drm_plane_state*,state )
- __field( struct dpu_plane_state*,pstate )
+ __field( uint32_t, fb_id )
+ __field_struct( struct drm_rect, src_rect )
+ __field_struct( struct drm_rect, dst_rect )
__field( uint32_t, stage_idx )
+ __field( enum dpu_stage, stage )
__field( enum dpu_sspp, sspp )
+ __field( uint32_t, multirect_idx )
+ __field( uint32_t, multirect_mode )
__field( uint32_t, pixel_format )
__field( uint64_t, modifier )
),
TP_fast_assign(
__entry->crtc_id = crtc_id;
__entry->plane_id = plane_id;
- __entry->state = state;
- __entry->pstate = pstate;
+ __entry->fb_id = state ? state->fb->base.id : 0;
+ __entry->src_rect = drm_plane_state_src(state);
+ __entry->dst_rect = drm_plane_state_dest(state);
__entry->stage_idx = stage_idx;
+ __entry->stage = pstate->stage;
__entry->sspp = sspp;
+ __entry->multirect_idx = pstate->multirect_index;
+ __entry->multirect_mode = pstate->multirect_mode;
__entry->pixel_format = pixel_format;
__entry->modifier = modifier;
),
- TP_printk("crtc_id:%u plane_id:%u fb_id:%u src:{%ux%u+%ux%u} "
- "dst:{%ux%u+%ux%u} stage_idx:%u stage:%d, sspp:%d "
+ TP_printk("crtc_id:%u plane_id:%u fb_id:%u src:" DRM_RECT_FP_FMT
+ " dst:" DRM_RECT_FMT " stage_idx:%u stage:%d, sspp:%d "
"multirect_index:%d multirect_mode:%u pix_format:%u "
"modifier:%llu",
- __entry->crtc_id, __entry->plane_id,
- __entry->state->fb ? __entry->state->fb->base.id : -1,
- __entry->state->src_w >> 16, __entry->state->src_h >> 16,
- __entry->state->src_x >> 16, __entry->state->src_y >> 16,
- __entry->state->crtc_w, __entry->state->crtc_h,
- __entry->state->crtc_x, __entry->state->crtc_y,
- __entry->stage_idx, __entry->pstate->stage, __entry->sspp,
- __entry->pstate->multirect_index,
- __entry->pstate->multirect_mode, __entry->pixel_format,
- __entry->modifier)
+ __entry->crtc_id, __entry->plane_id, __entry->fb_id,
+ DRM_RECT_FP_ARG(&__entry->src_rect),
+ DRM_RECT_ARG(&__entry->dst_rect),
+ __entry->stage_idx, __entry->stage, __entry->sspp,
+ __entry->multirect_idx, __entry->multirect_mode,
+ __entry->pixel_format, __entry->modifier)
);
TRACE_EVENT(dpu_crtc_setup_lm_bounds,
@@ -721,15 +729,15 @@ TRACE_EVENT(dpu_crtc_setup_lm_bounds,
TP_STRUCT__entry(
__field( uint32_t, drm_id )
__field( int, mixer )
- __field( struct drm_rect *, bounds )
+ __field_struct( struct drm_rect, bounds )
),
TP_fast_assign(
__entry->drm_id = drm_id;
__entry->mixer = mixer;
- __entry->bounds = bounds;
+ __entry->bounds = *bounds;
),
TP_printk("id:%u mixer:%d bounds:" DRM_RECT_FMT, __entry->drm_id,
- __entry->mixer, DRM_RECT_ARG(__entry->bounds))
+ __entry->mixer, DRM_RECT_ARG(&__entry->bounds))
);
TRACE_EVENT(dpu_crtc_vblank_enable,
@@ -740,21 +748,25 @@ TRACE_EVENT(dpu_crtc_vblank_enable,
__field( uint32_t, drm_id )
__field( uint32_t, enc_id )
__field( bool, enable )
- __field( struct dpu_crtc *, crtc )
+ __field( bool, enabled )
+ __field( bool, suspend )
+ __field( bool, vblank_requested )
),
TP_fast_assign(
__entry->drm_id = drm_id;
__entry->enc_id = enc_id;
__entry->enable = enable;
- __entry->crtc = crtc;
+ __entry->enabled = crtc->enabled;
+ __entry->suspend = crtc->suspend;
+ __entry->vblank_requested = crtc->vblank_requested;
),
TP_printk("id:%u encoder:%u enable:%s state{enabled:%s suspend:%s "
"vblank_req:%s}",
__entry->drm_id, __entry->enc_id,
__entry->enable ? "true" : "false",
- __entry->crtc->enabled ? "true" : "false",
- __entry->crtc->suspend ? "true" : "false",
- __entry->crtc->vblank_requested ? "true" : "false")
+ __entry->enabled ? "true" : "false",
+ __entry->suspend ? "true" : "false",
+ __entry->vblank_requested ? "true" : "false")
);
DECLARE_EVENT_CLASS(dpu_crtc_enable_template,
@@ -763,18 +775,22 @@ DECLARE_EVENT_CLASS(dpu_crtc_enable_template,
TP_STRUCT__entry(
__field( uint32_t, drm_id )
__field( bool, enable )
- __field( struct dpu_crtc *, crtc )
+ __field( bool, enabled )
+ __field( bool, suspend )
+ __field( bool, vblank_requested )
),
TP_fast_assign(
__entry->drm_id = drm_id;
__entry->enable = enable;
- __entry->crtc = crtc;
+ __entry->enabled = crtc->enabled;
+ __entry->suspend = crtc->suspend;
+ __entry->vblank_requested = crtc->vblank_requested;
),
TP_printk("id:%u enable:%s state{enabled:%s suspend:%s vblank_req:%s}",
__entry->drm_id, __entry->enable ? "true" : "false",
- __entry->crtc->enabled ? "true" : "false",
- __entry->crtc->suspend ? "true" : "false",
- __entry->crtc->vblank_requested ? "true" : "false")
+ __entry->enabled ? "true" : "false",
+ __entry->suspend ? "true" : "false",
+ __entry->vblank_requested ? "true" : "false")
);
DEFINE_EVENT(dpu_crtc_enable_template, dpu_crtc_set_suspend,
TP_PROTO(uint32_t drm_id, bool enable, struct dpu_crtc *crtc),
@@ -814,24 +830,24 @@ TRACE_EVENT(dpu_plane_set_scanout,
TP_ARGS(index, layout, multirect_index),
TP_STRUCT__entry(
__field( enum dpu_sspp, index )
- __field( struct dpu_hw_fmt_layout*, layout )
+ __field_struct( struct dpu_hw_fmt_layout, layout )
__field( enum dpu_sspp_multirect_index, multirect_index)
),
TP_fast_assign(
__entry->index = index;
- __entry->layout = layout;
+ __entry->layout = *layout;
__entry->multirect_index = multirect_index;
),
TP_printk("index:%d layout:{%ux%u @ [%u/%u, %u/%u, %u/%u, %u/%u]} "
- "multirect_index:%d", __entry->index, __entry->layout->width,
- __entry->layout->height, __entry->layout->plane_addr[0],
- __entry->layout->plane_size[0],
- __entry->layout->plane_addr[1],
- __entry->layout->plane_size[1],
- __entry->layout->plane_addr[2],
- __entry->layout->plane_size[2],
- __entry->layout->plane_addr[3],
- __entry->layout->plane_size[3], __entry->multirect_index)
+ "multirect_index:%d", __entry->index, __entry->layout.width,
+ __entry->layout.height, __entry->layout.plane_addr[0],
+ __entry->layout.plane_size[0],
+ __entry->layout.plane_addr[1],
+ __entry->layout.plane_size[1],
+ __entry->layout.plane_addr[2],
+ __entry->layout.plane_size[2],
+ __entry->layout.plane_addr[3],
+ __entry->layout.plane_size[3], __entry->multirect_index)
);
TRACE_EVENT(dpu_plane_disable,
@@ -868,10 +884,6 @@ DECLARE_EVENT_CLASS(dpu_rm_iter_template,
TP_printk("id:%d type:%d enc_id:%u", __entry->id, __entry->type,
__entry->enc_id)
);
-DEFINE_EVENT(dpu_rm_iter_template, dpu_rm_reserve_cdm,
- TP_PROTO(uint32_t id, enum dpu_hw_blk_type type, uint32_t enc_id),
- TP_ARGS(id, type, enc_id)
-);
DEFINE_EVENT(dpu_rm_iter_template, dpu_rm_reserve_intf,
TP_PROTO(uint32_t id, enum dpu_hw_blk_type type, uint32_t enc_id),
TP_ARGS(id, type, enc_id)
@@ -979,16 +991,16 @@ TRACE_EVENT(dpu_core_perf_update_clk,
TP_PROTO(struct drm_device *dev, bool stop_req, u64 clk_rate),
TP_ARGS(dev, stop_req, clk_rate),
TP_STRUCT__entry(
- __field( struct drm_device *, dev )
+ __string( dev_name, dev->unique )
__field( bool, stop_req )
__field( u64, clk_rate )
),
TP_fast_assign(
- __entry->dev = dev;
+ __assign_str(dev_name, dev->unique);
__entry->stop_req = stop_req;
__entry->clk_rate = clk_rate;
),
- TP_printk("dev:%s stop_req:%s clk_rate:%llu", __entry->dev->unique,
+ TP_printk("dev:%s stop_req:%s clk_rate:%llu", __get_str(dev_name),
__entry->stop_req ? "true" : "false", __entry->clk_rate)
);
diff --git a/drivers/gpu/drm/msm/disp/mdp5/mdp5_plane.c b/drivers/gpu/drm/msm/disp/mdp5/mdp5_plane.c
index 7d306c5acd09..7f42c3e68a53 100644
--- a/drivers/gpu/drm/msm/disp/mdp5/mdp5_plane.c
+++ b/drivers/gpu/drm/msm/disp/mdp5/mdp5_plane.c
@@ -185,7 +185,7 @@ static void mdp5_plane_reset(struct drm_plane *plane)
struct mdp5_plane_state *mdp5_state;
if (plane->state && plane->state->fb)
- drm_framebuffer_unreference(plane->state->fb);
+ drm_framebuffer_put(plane->state->fb);
kfree(to_mdp5_plane_state(plane->state));
mdp5_state = kzalloc(sizeof(*mdp5_state), GFP_KERNEL);
@@ -228,7 +228,7 @@ static void mdp5_plane_destroy_state(struct drm_plane *plane,
struct mdp5_plane_state *pstate = to_mdp5_plane_state(state);
if (state->fb)
- drm_framebuffer_unreference(state->fb);
+ drm_framebuffer_put(state->fb);
kfree(pstate);
}
@@ -259,7 +259,6 @@ static void mdp5_plane_cleanup_fb(struct drm_plane *plane,
msm_framebuffer_cleanup(fb, kms->aspace);
}
-#define FRAC_16_16(mult, div) (((mult) << 16) / (div))
static int mdp5_plane_atomic_check_with_state(struct drm_crtc_state *crtc_state,
struct drm_plane_state *state)
{
diff --git a/drivers/gpu/drm/msm/dsi/dsi.c b/drivers/gpu/drm/msm/dsi/dsi.c
index ff8164cc6738..a9768f823290 100644
--- a/drivers/gpu/drm/msm/dsi/dsi.c
+++ b/drivers/gpu/drm/msm/dsi/dsi.c
@@ -83,6 +83,7 @@ static struct msm_dsi *dsi_init(struct platform_device *pdev)
return ERR_PTR(-ENOMEM);
DBG("dsi probed=%p", msm_dsi);
+ msm_dsi->id = -1;
msm_dsi->pdev = pdev;
platform_set_drvdata(pdev, msm_dsi);
@@ -117,8 +118,13 @@ static int dsi_bind(struct device *dev, struct device *master, void *data)
DBG("");
msm_dsi = dsi_init(pdev);
- if (IS_ERR(msm_dsi))
- return PTR_ERR(msm_dsi);
+ if (IS_ERR(msm_dsi)) {
+ /* Don't fail the bind if the dsi port is not connected */
+ if (PTR_ERR(msm_dsi) == -ENODEV)
+ return 0;
+ else
+ return PTR_ERR(msm_dsi);
+ }
priv->dsi[msm_dsi->id] = msm_dsi;
diff --git a/drivers/gpu/drm/msm/dsi/dsi_host.c b/drivers/gpu/drm/msm/dsi/dsi_host.c
index 96fb5f635314..9c6c523eacdc 100644
--- a/drivers/gpu/drm/msm/dsi/dsi_host.c
+++ b/drivers/gpu/drm/msm/dsi/dsi_host.c
@@ -1750,6 +1750,7 @@ static int dsi_host_parse_dt(struct msm_dsi_host *msm_host)
if (ret) {
dev_err(dev, "%s: invalid lane configuration %d\n",
__func__, ret);
+ ret = -EINVAL;
goto err;
}
@@ -1757,6 +1758,7 @@ static int dsi_host_parse_dt(struct msm_dsi_host *msm_host)
device_node = of_graph_get_remote_node(np, 1, 0);
if (!device_node) {
dev_dbg(dev, "%s: no valid device\n", __func__);
+ ret = -ENODEV;
goto err;
}
diff --git a/drivers/gpu/drm/msm/dsi/dsi_manager.c b/drivers/gpu/drm/msm/dsi/dsi_manager.c
index 5224010d90e4..80aa6344185e 100644
--- a/drivers/gpu/drm/msm/dsi/dsi_manager.c
+++ b/drivers/gpu/drm/msm/dsi/dsi_manager.c
@@ -839,6 +839,8 @@ void msm_dsi_manager_unregister(struct msm_dsi *msm_dsi)
if (msm_dsi->host)
msm_dsi_host_unregister(msm_dsi->host);
- msm_dsim->dsi[msm_dsi->id] = NULL;
+
+ if (msm_dsi->id >= 0)
+ msm_dsim->dsi[msm_dsi->id] = NULL;
}
diff --git a/drivers/gpu/drm/msm/msm_atomic.c b/drivers/gpu/drm/msm/msm_atomic.c
index c1f1779c980f..4bcdeca7479d 100644
--- a/drivers/gpu/drm/msm/msm_atomic.c
+++ b/drivers/gpu/drm/msm/msm_atomic.c
@@ -15,6 +15,8 @@
* this program. If not, see <http://www.gnu.org/licenses/>.
*/
+#include <drm/drm_atomic_uapi.h>
+
#include "msm_drv.h"
#include "msm_gem.h"
#include "msm_kms.h"
diff --git a/drivers/gpu/drm/msm/msm_drv.c b/drivers/gpu/drm/msm/msm_drv.c
index c1abad8a8612..4904d0d41409 100644
--- a/drivers/gpu/drm/msm/msm_drv.c
+++ b/drivers/gpu/drm/msm/msm_drv.c
@@ -337,7 +337,7 @@ static int msm_drm_uninit(struct device *dev)
mdss->funcs->destroy(ddev);
ddev->dev_private = NULL;
- drm_dev_unref(ddev);
+ drm_dev_put(ddev);
kfree(priv);
@@ -452,7 +452,7 @@ static int msm_drm_init(struct device *dev, struct drm_driver *drv)
priv = kzalloc(sizeof(*priv), GFP_KERNEL);
if (!priv) {
ret = -ENOMEM;
- goto err_unref_drm_dev;
+ goto err_put_drm_dev;
}
ddev->dev_private = priv;
@@ -653,8 +653,8 @@ err_destroy_mdss:
mdss->funcs->destroy(ddev);
err_free_priv:
kfree(priv);
-err_unref_drm_dev:
- drm_dev_unref(ddev);
+err_put_drm_dev:
+ drm_dev_put(ddev);
return ret;
}
diff --git a/drivers/gpu/drm/msm/msm_drv.h b/drivers/gpu/drm/msm/msm_drv.h
index 8e510d5c758a..9d11f321f5a9 100644
--- a/drivers/gpu/drm/msm/msm_drv.h
+++ b/drivers/gpu/drm/msm/msm_drv.h
@@ -62,6 +62,8 @@ struct msm_gem_vma;
#define MAX_BRIDGES 8
#define MAX_CONNECTORS 8
+#define FRAC_16_16(mult, div) (((mult) << 16) / (div))
+
struct msm_file_private {
rwlock_t queuelock;
struct list_head submitqueues;
diff --git a/drivers/gpu/drm/msm/msm_fence.c b/drivers/gpu/drm/msm/msm_fence.c
index 349c12f670eb..77263cf97b20 100644
--- a/drivers/gpu/drm/msm/msm_fence.c
+++ b/drivers/gpu/drm/msm/msm_fence.c
@@ -119,11 +119,6 @@ static const char *msm_fence_get_timeline_name(struct dma_fence *fence)
return f->fctx->name;
}
-static bool msm_fence_enable_signaling(struct dma_fence *fence)
-{
- return true;
-}
-
static bool msm_fence_signaled(struct dma_fence *fence)
{
struct msm_fence *f = to_msm_fence(fence);
@@ -133,10 +128,7 @@ static bool msm_fence_signaled(struct dma_fence *fence)
static const struct dma_fence_ops msm_fence_ops = {
.get_driver_name = msm_fence_get_driver_name,
.get_timeline_name = msm_fence_get_timeline_name,
- .enable_signaling = msm_fence_enable_signaling,
.signaled = msm_fence_signaled,
- .wait = dma_fence_default_wait,
- .release = dma_fence_free,
};
struct dma_fence *
diff --git a/drivers/gpu/drm/msm/msm_gem_submit.c b/drivers/gpu/drm/msm/msm_gem_submit.c
index 7bd83e0afa97..7a7923e6220d 100644
--- a/drivers/gpu/drm/msm/msm_gem_submit.c
+++ b/drivers/gpu/drm/msm/msm_gem_submit.c
@@ -144,7 +144,7 @@ static int submit_lookup_objects(struct msm_gem_submit *submit,
goto out_unlock;
}
- drm_gem_object_reference(obj);
+ drm_gem_object_get(obj);
submit->bos[i].obj = msm_obj;
@@ -396,7 +396,7 @@ static void submit_cleanup(struct msm_gem_submit *submit)
struct msm_gem_object *msm_obj = submit->bos[i].obj;
submit_unlock_unpin_bo(submit, i, false);
list_del_init(&msm_obj->submit_entry);
- drm_gem_object_unreference(&msm_obj->base);
+ drm_gem_object_put(&msm_obj->base);
}
ww_acquire_fini(&submit->ticket);
diff --git a/drivers/gpu/drm/msm/msm_gpu.c b/drivers/gpu/drm/msm/msm_gpu.c
index 5e808cfec345..11aac8337066 100644
--- a/drivers/gpu/drm/msm/msm_gpu.c
+++ b/drivers/gpu/drm/msm/msm_gpu.c
@@ -41,7 +41,11 @@ static int msm_devfreq_target(struct device *dev, unsigned long *freq,
if (IS_ERR(opp))
return PTR_ERR(opp);
- clk_set_rate(gpu->core_clk, *freq);
+ if (gpu->funcs->gpu_set_freq)
+ gpu->funcs->gpu_set_freq(gpu, (u64)*freq);
+ else
+ clk_set_rate(gpu->core_clk, *freq);
+
dev_pm_opp_put(opp);
return 0;
@@ -51,16 +55,14 @@ static int msm_devfreq_get_dev_status(struct device *dev,
struct devfreq_dev_status *status)
{
struct msm_gpu *gpu = platform_get_drvdata(to_platform_device(dev));
- u64 cycles;
- u32 freq = ((u32) status->current_frequency) / 1000000;
ktime_t time;
- status->current_frequency = (unsigned long) clk_get_rate(gpu->core_clk);
- gpu->funcs->gpu_busy(gpu, &cycles);
-
- status->busy_time = ((u32) (cycles - gpu->devfreq.busy_cycles)) / freq;
+ if (gpu->funcs->gpu_get_freq)
+ status->current_frequency = gpu->funcs->gpu_get_freq(gpu);
+ else
+ status->current_frequency = clk_get_rate(gpu->core_clk);
- gpu->devfreq.busy_cycles = cycles;
+ status->busy_time = gpu->funcs->gpu_busy(gpu);
time = ktime_get();
status->total_time = ktime_us_delta(time, gpu->devfreq.time);
@@ -73,7 +75,10 @@ static int msm_devfreq_get_cur_freq(struct device *dev, unsigned long *freq)
{
struct msm_gpu *gpu = platform_get_drvdata(to_platform_device(dev));
- *freq = (unsigned long) clk_get_rate(gpu->core_clk);
+ if (gpu->funcs->gpu_get_freq)
+ *freq = gpu->funcs->gpu_get_freq(gpu);
+ else
+ *freq = clk_get_rate(gpu->core_clk);
return 0;
}
@@ -88,7 +93,7 @@ static struct devfreq_dev_profile msm_devfreq_profile = {
static void msm_devfreq_init(struct msm_gpu *gpu)
{
/* We need target support to do devfreq */
- if (!gpu->funcs->gpu_busy || !gpu->core_clk)
+ if (!gpu->funcs->gpu_busy)
return;
msm_devfreq_profile.initial_freq = gpu->fast_rate;
@@ -105,6 +110,8 @@ static void msm_devfreq_init(struct msm_gpu *gpu)
dev_err(&gpu->pdev->dev, "Couldn't initialize GPU devfreq\n");
gpu->devfreq.devfreq = NULL;
}
+
+ devfreq_suspend_device(gpu->devfreq.devfreq);
}
static int enable_pwrrail(struct msm_gpu *gpu)
@@ -184,6 +191,14 @@ static int disable_axi(struct msm_gpu *gpu)
return 0;
}
+void msm_gpu_resume_devfreq(struct msm_gpu *gpu)
+{
+ gpu->devfreq.busy_cycles = 0;
+ gpu->devfreq.time = ktime_get();
+
+ devfreq_resume_device(gpu->devfreq.devfreq);
+}
+
int msm_gpu_pm_resume(struct msm_gpu *gpu)
{
int ret;
@@ -202,12 +217,7 @@ int msm_gpu_pm_resume(struct msm_gpu *gpu)
if (ret)
return ret;
- if (gpu->devfreq.devfreq) {
- gpu->devfreq.busy_cycles = 0;
- gpu->devfreq.time = ktime_get();
-
- devfreq_resume_device(gpu->devfreq.devfreq);
- }
+ msm_gpu_resume_devfreq(gpu);
gpu->needs_hw_init = true;
@@ -220,8 +230,7 @@ int msm_gpu_pm_suspend(struct msm_gpu *gpu)
DBG("%s", gpu->name);
- if (gpu->devfreq.devfreq)
- devfreq_suspend_device(gpu->devfreq.devfreq);
+ devfreq_suspend_device(gpu->devfreq.devfreq);
ret = disable_axi(gpu);
if (ret)
@@ -367,8 +376,8 @@ static void msm_gpu_crashstate_capture(struct msm_gpu *gpu,
msm_gpu_devcoredump_read, msm_gpu_devcoredump_free);
}
#else
-static void msm_gpu_crashstate_capture(struct msm_gpu *gpu, char *comm,
- char *cmd)
+static void msm_gpu_crashstate_capture(struct msm_gpu *gpu,
+ struct msm_gem_submit *submit, char *comm, char *cmd)
{
}
#endif
diff --git a/drivers/gpu/drm/msm/msm_gpu.h b/drivers/gpu/drm/msm/msm_gpu.h
index 9122ee6e55e4..f82bac086666 100644
--- a/drivers/gpu/drm/msm/msm_gpu.h
+++ b/drivers/gpu/drm/msm/msm_gpu.h
@@ -70,9 +70,11 @@ struct msm_gpu_funcs {
/* for generation specific debugfs: */
int (*debugfs_init)(struct msm_gpu *gpu, struct drm_minor *minor);
#endif
- int (*gpu_busy)(struct msm_gpu *gpu, uint64_t *value);
+ unsigned long (*gpu_busy)(struct msm_gpu *gpu);
struct msm_gpu_state *(*gpu_state_get)(struct msm_gpu *gpu);
int (*gpu_state_put)(struct msm_gpu_state *state);
+ unsigned long (*gpu_get_freq)(struct msm_gpu *gpu);
+ void (*gpu_set_freq)(struct msm_gpu *gpu, unsigned long freq);
};
struct msm_gpu {
@@ -264,6 +266,7 @@ static inline void gpu_write64(struct msm_gpu *gpu, u32 lo, u32 hi, u64 val)
int msm_gpu_pm_suspend(struct msm_gpu *gpu);
int msm_gpu_pm_resume(struct msm_gpu *gpu);
+void msm_gpu_resume_devfreq(struct msm_gpu *gpu);
int msm_gpu_hw_init(struct msm_gpu *gpu);
diff --git a/drivers/gpu/drm/msm/msm_rd.c b/drivers/gpu/drm/msm/msm_rd.c
index 3aa8a8576abe..cca933458439 100644
--- a/drivers/gpu/drm/msm/msm_rd.c
+++ b/drivers/gpu/drm/msm/msm_rd.c
@@ -366,7 +366,7 @@ void msm_rd_dump_submit(struct msm_rd_state *rd, struct msm_gem_submit *submit,
va_list args;
va_start(args, fmt);
- n = vsnprintf(msg, sizeof(msg), fmt, args);
+ n = vscnprintf(msg, sizeof(msg), fmt, args);
va_end(args);
rd_write_section(rd, RD_CMD, msg, ALIGN(n, 4));
@@ -375,11 +375,11 @@ void msm_rd_dump_submit(struct msm_rd_state *rd, struct msm_gem_submit *submit,
rcu_read_lock();
task = pid_task(submit->pid, PIDTYPE_PID);
if (task) {
- n = snprintf(msg, sizeof(msg), "%.*s/%d: fence=%u",
+ n = scnprintf(msg, sizeof(msg), "%.*s/%d: fence=%u",
TASK_COMM_LEN, task->comm,
pid_nr(submit->pid), submit->seqno);
} else {
- n = snprintf(msg, sizeof(msg), "???/%d: fence=%u",
+ n = scnprintf(msg, sizeof(msg), "???/%d: fence=%u",
pid_nr(submit->pid), submit->seqno);
}
rcu_read_unlock();