diff options
Diffstat (limited to 'drivers/gpu/drm/i915/intel_pm.c')
-rw-r--r-- | drivers/gpu/drm/i915/intel_pm.c | 589 |
1 files changed, 310 insertions, 279 deletions
diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c index 897a791662c5..a26b4eddda25 100644 --- a/drivers/gpu/drm/i915/intel_pm.c +++ b/drivers/gpu/drm/i915/intel_pm.c @@ -3951,68 +3951,68 @@ static void skl_ddb_get_hw_plane_state(struct drm_i915_private *dev_priv, const enum pipe pipe, const enum plane_id plane_id, - struct skl_ddb_allocation *ddb /* out */) + struct skl_ddb_entry *ddb_y, + struct skl_ddb_entry *ddb_uv) { - u32 val, val2 = 0; - int fourcc, pixel_format; + u32 val, val2; + u32 fourcc = 0; /* Cursor doesn't support NV12/planar, so no extra calculation needed */ if (plane_id == PLANE_CURSOR) { val = I915_READ(CUR_BUF_CFG(pipe)); - skl_ddb_entry_init_from_hw(dev_priv, - &ddb->plane[pipe][plane_id], val); + skl_ddb_entry_init_from_hw(dev_priv, ddb_y, val); return; } val = I915_READ(PLANE_CTL(pipe, plane_id)); /* No DDB allocated for disabled planes */ - if (!(val & PLANE_CTL_ENABLE)) - return; - - pixel_format = val & PLANE_CTL_FORMAT_MASK; - fourcc = skl_format_to_fourcc(pixel_format, - val & PLANE_CTL_ORDER_RGBX, - val & PLANE_CTL_ALPHA_MASK); + if (val & PLANE_CTL_ENABLE) + fourcc = skl_format_to_fourcc(val & PLANE_CTL_FORMAT_MASK, + val & PLANE_CTL_ORDER_RGBX, + val & PLANE_CTL_ALPHA_MASK); - val = I915_READ(PLANE_BUF_CFG(pipe, plane_id)); - if (fourcc == DRM_FORMAT_NV12 && INTEL_GEN(dev_priv) < 11) { + if (INTEL_GEN(dev_priv) >= 11) { + val = I915_READ(PLANE_BUF_CFG(pipe, plane_id)); + skl_ddb_entry_init_from_hw(dev_priv, ddb_y, val); + } else { + val = I915_READ(PLANE_BUF_CFG(pipe, plane_id)); val2 = I915_READ(PLANE_NV12_BUF_CFG(pipe, plane_id)); - skl_ddb_entry_init_from_hw(dev_priv, - &ddb->plane[pipe][plane_id], val2); - skl_ddb_entry_init_from_hw(dev_priv, - &ddb->uv_plane[pipe][plane_id], val); - } else { - skl_ddb_entry_init_from_hw(dev_priv, - &ddb->plane[pipe][plane_id], val); + if (fourcc == DRM_FORMAT_NV12) + swap(val, val2); + + skl_ddb_entry_init_from_hw(dev_priv, ddb_y, val); + skl_ddb_entry_init_from_hw(dev_priv, ddb_uv, val2); } } -void skl_ddb_get_hw_state(struct drm_i915_private *dev_priv, - struct skl_ddb_allocation *ddb /* out */) +void skl_pipe_ddb_get_hw_state(struct intel_crtc *crtc, + struct skl_ddb_entry *ddb_y, + struct skl_ddb_entry *ddb_uv) { - struct intel_crtc *crtc; - - memset(ddb, 0, sizeof(*ddb)); - - ddb->enabled_slices = intel_enabled_dbuf_slices_num(dev_priv); + struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); + enum intel_display_power_domain power_domain; + enum pipe pipe = crtc->pipe; + enum plane_id plane_id; - for_each_intel_crtc(&dev_priv->drm, crtc) { - enum intel_display_power_domain power_domain; - enum plane_id plane_id; - enum pipe pipe = crtc->pipe; + power_domain = POWER_DOMAIN_PIPE(pipe); + if (!intel_display_power_get_if_enabled(dev_priv, power_domain)) + return; - power_domain = POWER_DOMAIN_PIPE(pipe); - if (!intel_display_power_get_if_enabled(dev_priv, power_domain)) - continue; + for_each_plane_id_on_crtc(crtc, plane_id) + skl_ddb_get_hw_plane_state(dev_priv, pipe, + plane_id, + &ddb_y[plane_id], + &ddb_uv[plane_id]); - for_each_plane_id_on_crtc(crtc, plane_id) - skl_ddb_get_hw_plane_state(dev_priv, pipe, - plane_id, ddb); + intel_display_power_put(dev_priv, power_domain); +} - intel_display_power_put(dev_priv, power_domain); - } +void skl_ddb_get_hw_state(struct drm_i915_private *dev_priv, + struct skl_ddb_allocation *ddb /* out */) +{ + ddb->enabled_slices = intel_enabled_dbuf_slices_num(dev_priv); } /* @@ -4410,7 +4410,6 @@ skl_allocate_pipe_ddb(struct intel_crtc_state *cstate, struct drm_crtc *crtc = cstate->base.crtc; struct drm_i915_private *dev_priv = to_i915(crtc->dev); struct intel_crtc *intel_crtc = to_intel_crtc(crtc); - enum pipe pipe = intel_crtc->pipe; struct skl_ddb_entry *alloc = &cstate->wm.skl.ddb; uint16_t alloc_size, start; uint16_t minimum[I915_MAX_PLANES] = {}; @@ -4423,8 +4422,8 @@ skl_allocate_pipe_ddb(struct intel_crtc_state *cstate, uint16_t total_min_blocks = 0; /* Clear the partitioning for disabled planes. */ - memset(ddb->plane[pipe], 0, sizeof(ddb->plane[pipe])); - memset(ddb->uv_plane[pipe], 0, sizeof(ddb->uv_plane[pipe])); + memset(cstate->wm.skl.plane_ddb_y, 0, sizeof(cstate->wm.skl.plane_ddb_y)); + memset(cstate->wm.skl.plane_ddb_uv, 0, sizeof(cstate->wm.skl.plane_ddb_uv)); if (WARN_ON(!state)) return 0; @@ -4471,8 +4470,8 @@ skl_allocate_pipe_ddb(struct intel_crtc_state *cstate, } alloc_size -= total_min_blocks; - ddb->plane[pipe][PLANE_CURSOR].start = alloc->end - minimum[PLANE_CURSOR]; - ddb->plane[pipe][PLANE_CURSOR].end = alloc->end; + cstate->wm.skl.plane_ddb_y[PLANE_CURSOR].start = alloc->end - minimum[PLANE_CURSOR]; + cstate->wm.skl.plane_ddb_y[PLANE_CURSOR].end = alloc->end; /* * 2. Distribute the remaining space in proportion to the amount of @@ -4503,8 +4502,8 @@ skl_allocate_pipe_ddb(struct intel_crtc_state *cstate, /* Leave disabled planes at (0,0) */ if (data_rate) { - ddb->plane[pipe][plane_id].start = start; - ddb->plane[pipe][plane_id].end = start + plane_blocks; + cstate->wm.skl.plane_ddb_y[plane_id].start = start; + cstate->wm.skl.plane_ddb_y[plane_id].end = start + plane_blocks; } start += plane_blocks; @@ -4519,8 +4518,8 @@ skl_allocate_pipe_ddb(struct intel_crtc_state *cstate, WARN_ON(INTEL_GEN(dev_priv) >= 11 && uv_plane_blocks); if (uv_data_rate) { - ddb->uv_plane[pipe][plane_id].start = start; - ddb->uv_plane[pipe][plane_id].end = + cstate->wm.skl.plane_ddb_uv[plane_id].start = start; + cstate->wm.skl.plane_ddb_uv[plane_id].end = start + uv_plane_blocks; } @@ -4617,12 +4616,12 @@ skl_adjusted_plane_pixel_rate(const struct intel_crtc_state *cstate, } static int -skl_compute_plane_wm_params(const struct drm_i915_private *dev_priv, - const struct intel_crtc_state *cstate, +skl_compute_plane_wm_params(const struct intel_crtc_state *cstate, const struct intel_plane_state *intel_pstate, - struct skl_wm_params *wp, int plane_id) + struct skl_wm_params *wp, int color_plane) { struct intel_plane *plane = to_intel_plane(intel_pstate->base.plane); + struct drm_i915_private *dev_priv = to_i915(plane->base.dev); const struct drm_plane_state *pstate = &intel_pstate->base; const struct drm_framebuffer *fb = pstate->fb; uint32_t interm_pbpl; @@ -4630,11 +4629,8 @@ skl_compute_plane_wm_params(const struct drm_i915_private *dev_priv, to_intel_atomic_state(cstate->base.state); bool apply_memory_bw_wa = skl_needs_memory_bw_wa(state); - if (!intel_wm_plane_visible(cstate, intel_pstate)) - return 0; - /* only NV12 format has two planes */ - if (plane_id == 1 && fb->format->format != DRM_FORMAT_NV12) { + if (color_plane == 1 && fb->format->format != DRM_FORMAT_NV12) { DRM_DEBUG_KMS("Non NV12 format have single plane\n"); return -EINVAL; } @@ -4659,10 +4655,10 @@ skl_compute_plane_wm_params(const struct drm_i915_private *dev_priv, wp->width = drm_rect_width(&intel_pstate->base.src) >> 16; } - if (plane_id == 1 && wp->is_planar) + if (color_plane == 1 && wp->is_planar) wp->width /= 2; - wp->cpp = fb->format->cpp[plane_id]; + wp->cpp = fb->format->cpp[color_plane]; wp->plane_pixel_rate = skl_adjusted_plane_pixel_rate(cstate, intel_pstate); @@ -4724,8 +4720,7 @@ skl_compute_plane_wm_params(const struct drm_i915_private *dev_priv, return 0; } -static int skl_compute_plane_wm(const struct drm_i915_private *dev_priv, - const struct intel_crtc_state *cstate, +static int skl_compute_plane_wm(const struct intel_crtc_state *cstate, const struct intel_plane_state *intel_pstate, uint16_t ddb_allocation, int level, @@ -4733,6 +4728,8 @@ static int skl_compute_plane_wm(const struct drm_i915_private *dev_priv, const struct skl_wm_level *result_prev, struct skl_wm_level *result /* out */) { + struct drm_i915_private *dev_priv = + to_i915(intel_pstate->base.plane->dev); const struct drm_plane_state *pstate = &intel_pstate->base; uint32_t latency = dev_priv->wm.skl_latency[level]; uint_fixed_16_16_t method1, method2; @@ -4743,11 +4740,8 @@ static int skl_compute_plane_wm(const struct drm_i915_private *dev_priv, bool apply_memory_bw_wa = skl_needs_memory_bw_wa(state); uint32_t min_disp_buf_needed; - if (latency == 0 || - !intel_wm_plane_visible(cstate, intel_pstate)) { - result->plane_en = false; - return 0; - } + if (latency == 0) + return level == 0 ? -EINVAL : 0; /* Display WA #1141: kbl,cfl */ if ((IS_KABYLAKE(dev_priv) || IS_COFFEELAKE(dev_priv) || @@ -4844,8 +4838,6 @@ static int skl_compute_plane_wm(const struct drm_i915_private *dev_priv, if ((level > 0 && res_lines > 31) || res_blocks >= ddb_allocation || min_disp_buf_needed >= ddb_allocation) { - result->plane_en = false; - /* * If there are no valid level 0 watermarks, then we can't * support this display configuration. @@ -4872,27 +4864,22 @@ static int skl_compute_plane_wm(const struct drm_i915_private *dev_priv, } static int -skl_compute_wm_levels(const struct drm_i915_private *dev_priv, - struct skl_ddb_allocation *ddb, - const struct intel_crtc_state *cstate, +skl_compute_wm_levels(const struct intel_crtc_state *cstate, const struct intel_plane_state *intel_pstate, uint16_t ddb_blocks, const struct skl_wm_params *wm_params, - struct skl_plane_wm *wm, struct skl_wm_level *levels) { + struct drm_i915_private *dev_priv = + to_i915(intel_pstate->base.plane->dev); int level, max_level = ilk_wm_max_level(dev_priv); struct skl_wm_level *result_prev = &levels[0]; int ret; - if (WARN_ON(!intel_pstate->base.fb)) - return -EINVAL; - for (level = 0; level <= max_level; level++) { struct skl_wm_level *result = &levels[level]; - ret = skl_compute_plane_wm(dev_priv, - cstate, + ret = skl_compute_plane_wm(cstate, intel_pstate, ddb_blocks, level, @@ -4905,9 +4892,6 @@ skl_compute_wm_levels(const struct drm_i915_private *dev_priv, result_prev = result; } - if (intel_pstate->base.fb->format->format == DRM_FORMAT_NV12) - wm->is_planar = true; - return 0; } @@ -4935,10 +4919,9 @@ skl_compute_linetime_wm(const struct intel_crtc_state *cstate) } static void skl_compute_transition_wm(const struct intel_crtc_state *cstate, - struct skl_wm_params *wp, - struct skl_wm_level *wm_l0, - uint16_t ddb_allocation, - struct skl_wm_level *trans_wm /* out */) + const struct skl_wm_params *wp, + struct skl_plane_wm *wm, + uint16_t ddb_allocation) { struct drm_device *dev = cstate->base.crtc->dev; const struct drm_i915_private *dev_priv = to_i915(dev); @@ -4946,16 +4929,13 @@ static void skl_compute_transition_wm(const struct intel_crtc_state *cstate, const uint16_t trans_amount = 10; /* This is configurable amount */ uint16_t wm0_sel_res_b, trans_offset_b, res_blocks; - if (!cstate->base.active) - goto exit; - /* Transition WM are not recommended by HW team for GEN9 */ if (INTEL_GEN(dev_priv) <= 9) - goto exit; + return; /* Transition WM don't make any sense if ipc is disabled */ if (!dev_priv->ipc_enabled) - goto exit; + return; trans_min = 14; if (INTEL_GEN(dev_priv) >= 11) @@ -4973,7 +4953,7 @@ static void skl_compute_transition_wm(const struct intel_crtc_state *cstate, * Result Blocks is Result Blocks minus 1 and it should work for the * current platforms. */ - wm0_sel_res_b = wm_l0->plane_res_b - 1; + wm0_sel_res_b = wm->wm[0].plane_res_b - 1; if (wp->y_tiled) { trans_y_tile_min = (uint16_t) mul_round_up_u32_fixed16(2, @@ -4992,107 +4972,129 @@ static void skl_compute_transition_wm(const struct intel_crtc_state *cstate, res_blocks += 1; if (res_blocks < ddb_allocation) { - trans_wm->plane_res_b = res_blocks; - trans_wm->plane_en = true; - return; + wm->trans_wm.plane_res_b = res_blocks; + wm->trans_wm.plane_en = true; } - -exit: - trans_wm->plane_en = false; } -static int __skl_build_plane_wm_single(struct skl_ddb_allocation *ddb, - struct skl_pipe_wm *pipe_wm, - enum plane_id plane_id, - const struct intel_crtc_state *cstate, - const struct intel_plane_state *pstate, - int color_plane) +static int skl_build_plane_wm_single(struct intel_crtc_state *crtc_state, + const struct intel_plane_state *plane_state, + enum plane_id plane_id, int color_plane) { - struct drm_i915_private *dev_priv = to_i915(pstate->base.plane->dev); - struct skl_plane_wm *wm = &pipe_wm->planes[plane_id]; - enum pipe pipe = to_intel_plane(pstate->base.plane)->pipe; + struct skl_plane_wm *wm = &crtc_state->wm.skl.optimal.planes[plane_id]; + u16 ddb_blocks = skl_ddb_entry_size(&crtc_state->wm.skl.plane_ddb_y[plane_id]); struct skl_wm_params wm_params; - uint16_t ddb_blocks = skl_ddb_entry_size(&ddb->plane[pipe][plane_id]); int ret; - ret = skl_compute_plane_wm_params(dev_priv, cstate, pstate, + ret = skl_compute_plane_wm_params(crtc_state, plane_state, &wm_params, color_plane); if (ret) return ret; - ret = skl_compute_wm_levels(dev_priv, ddb, cstate, pstate, - ddb_blocks, &wm_params, wm, wm->wm); - + ret = skl_compute_wm_levels(crtc_state, plane_state, + ddb_blocks, &wm_params, wm->wm); if (ret) return ret; - skl_compute_transition_wm(cstate, &wm_params, &wm->wm[0], - ddb_blocks, &wm->trans_wm); + skl_compute_transition_wm(crtc_state, &wm_params, wm, ddb_blocks); return 0; } -static int skl_build_plane_wm_single(struct skl_ddb_allocation *ddb, - struct skl_pipe_wm *pipe_wm, - const struct intel_crtc_state *cstate, - const struct intel_plane_state *pstate) -{ - enum plane_id plane_id = to_intel_plane(pstate->base.plane)->id; - - return __skl_build_plane_wm_single(ddb, pipe_wm, plane_id, cstate, pstate, 0); -} - -static int skl_build_plane_wm_planar(struct skl_ddb_allocation *ddb, - struct skl_pipe_wm *pipe_wm, - const struct intel_crtc_state *cstate, - const struct intel_plane_state *pstate) +static int skl_build_plane_wm_uv(struct intel_crtc_state *crtc_state, + const struct intel_plane_state *plane_state, + enum plane_id plane_id) { - struct intel_plane *plane = to_intel_plane(pstate->base.plane); - struct drm_i915_private *dev_priv = to_i915(plane->base.dev); - enum plane_id plane_id = plane->id; - struct skl_plane_wm *wm = &pipe_wm->planes[plane_id]; + struct skl_plane_wm *wm = &crtc_state->wm.skl.optimal.planes[plane_id]; + u16 ddb_blocks = skl_ddb_entry_size(&crtc_state->wm.skl.plane_ddb_uv[plane_id]); struct skl_wm_params wm_params; - enum pipe pipe = plane->pipe; - uint16_t ddb_blocks = skl_ddb_entry_size(&ddb->plane[pipe][plane_id]); int ret; - ret = __skl_build_plane_wm_single(ddb, pipe_wm, plane_id, cstate, pstate, 0); - if (ret) - return ret; + wm->is_planar = true; /* uv plane watermarks must also be validated for NV12/Planar */ - ddb_blocks = skl_ddb_entry_size(&ddb->uv_plane[pipe][plane_id]); + ret = skl_compute_plane_wm_params(crtc_state, plane_state, + &wm_params, 1); + if (ret) + return ret; - ret = skl_compute_plane_wm_params(dev_priv, cstate, pstate, &wm_params, 1); + ret = skl_compute_wm_levels(crtc_state, plane_state, + ddb_blocks, &wm_params, wm->uv_wm); if (ret) return ret; - return skl_compute_wm_levels(dev_priv, ddb, cstate, pstate, - ddb_blocks, &wm_params, wm, wm->uv_wm); + return 0; } -static int icl_build_plane_wm_planar(struct skl_ddb_allocation *ddb, - struct skl_pipe_wm *pipe_wm, - const struct intel_crtc_state *cstate, - const struct intel_plane_state *pstate) +static int skl_build_plane_wm(struct skl_pipe_wm *pipe_wm, + struct intel_crtc_state *crtc_state, + const struct intel_plane_state *plane_state) { + struct intel_plane *plane = to_intel_plane(plane_state->base.plane); + const struct drm_framebuffer *fb = plane_state->base.fb; + enum plane_id plane_id = plane->id; int ret; - enum plane_id y_plane_id = pstate->linked_plane->id; - enum plane_id uv_plane_id = to_intel_plane(pstate->base.plane)->id; - ret = __skl_build_plane_wm_single(ddb, pipe_wm, y_plane_id, - cstate, pstate, 0); + if (!intel_wm_plane_visible(crtc_state, plane_state)) + return 0; + + ret = skl_build_plane_wm_single(crtc_state, plane_state, + plane_id, 0); if (ret) return ret; - return __skl_build_plane_wm_single(ddb, pipe_wm, uv_plane_id, - cstate, pstate, 1); + if (fb->format->is_yuv && fb->format->num_planes > 1) { + ret = skl_build_plane_wm_uv(crtc_state, plane_state, + plane_id); + if (ret) + return ret; + } + + return 0; +} + +static int icl_build_plane_wm(struct skl_pipe_wm *pipe_wm, + struct intel_crtc_state *crtc_state, + const struct intel_plane_state *plane_state) +{ + enum plane_id plane_id = to_intel_plane(plane_state->base.plane)->id; + int ret; + + /* Watermarks calculated in master */ + if (plane_state->slave) + return 0; + + if (plane_state->linked_plane) { + const struct drm_framebuffer *fb = plane_state->base.fb; + enum plane_id y_plane_id = plane_state->linked_plane->id; + + WARN_ON(!intel_wm_plane_visible(crtc_state, plane_state)); + WARN_ON(!fb->format->is_yuv || + fb->format->num_planes == 1); + + ret = skl_build_plane_wm_single(crtc_state, plane_state, + y_plane_id, 0); + if (ret) + return ret; + + ret = skl_build_plane_wm_single(crtc_state, plane_state, + plane_id, 1); + if (ret) + return ret; + } else if (intel_wm_plane_visible(crtc_state, plane_state)) { + ret = skl_build_plane_wm_single(crtc_state, plane_state, + plane_id, 0); + if (ret) + return ret; + } + + return 0; } static int skl_build_pipe_wm(struct intel_crtc_state *cstate, - struct skl_ddb_allocation *ddb, struct skl_pipe_wm *pipe_wm) { + struct drm_i915_private *dev_priv = to_i915(cstate->base.crtc->dev); struct drm_crtc_state *crtc_state = &cstate->base; struct drm_plane *plane; const struct drm_plane_state *pstate; @@ -5108,18 +5110,12 @@ static int skl_build_pipe_wm(struct intel_crtc_state *cstate, const struct intel_plane_state *intel_pstate = to_intel_plane_state(pstate); - /* Watermarks calculated in master */ - if (intel_pstate->slave) - continue; - - if (intel_pstate->linked_plane) - ret = icl_build_plane_wm_planar(ddb, pipe_wm, cstate, intel_pstate); - else if (intel_pstate->base.fb && - intel_pstate->base.fb->format->format == DRM_FORMAT_NV12) - ret = skl_build_plane_wm_planar(ddb, pipe_wm, cstate, intel_pstate); + if (INTEL_GEN(dev_priv) >= 11) + ret = icl_build_plane_wm(pipe_wm, + cstate, intel_pstate); else - ret = skl_build_plane_wm_single(ddb, pipe_wm, cstate, intel_pstate); - + ret = skl_build_plane_wm(pipe_wm, + cstate, intel_pstate); if (ret) return ret; } @@ -5134,9 +5130,9 @@ static void skl_ddb_entry_write(struct drm_i915_private *dev_priv, const struct skl_ddb_entry *entry) { if (entry->end) - I915_WRITE(reg, (entry->end - 1) << 16 | entry->start); + I915_WRITE_FW(reg, (entry->end - 1) << 16 | entry->start); else - I915_WRITE(reg, 0); + I915_WRITE_FW(reg, 0); } static void skl_write_wm_level(struct drm_i915_private *dev_priv, @@ -5151,19 +5147,22 @@ static void skl_write_wm_level(struct drm_i915_private *dev_priv, val |= level->plane_res_l << PLANE_WM_LINES_SHIFT; } - I915_WRITE(reg, val); + I915_WRITE_FW(reg, val); } -static void skl_write_plane_wm(struct intel_crtc *intel_crtc, - const struct skl_plane_wm *wm, - const struct skl_ddb_allocation *ddb, - enum plane_id plane_id) +void skl_write_plane_wm(struct intel_plane *plane, + const struct intel_crtc_state *crtc_state) { - struct drm_crtc *crtc = &intel_crtc->base; - struct drm_device *dev = crtc->dev; - struct drm_i915_private *dev_priv = to_i915(dev); + struct drm_i915_private *dev_priv = to_i915(plane->base.dev); int level, max_level = ilk_wm_max_level(dev_priv); - enum pipe pipe = intel_crtc->pipe; + enum plane_id plane_id = plane->id; + enum pipe pipe = plane->pipe; + const struct skl_plane_wm *wm = + &crtc_state->wm.skl.optimal.planes[plane_id]; + const struct skl_ddb_entry *ddb_y = + &crtc_state->wm.skl.plane_ddb_y[plane_id]; + const struct skl_ddb_entry *ddb_uv = + &crtc_state->wm.skl.plane_ddb_uv[plane_id]; for (level = 0; level <= max_level; level++) { skl_write_wm_level(dev_priv, PLANE_WM(pipe, plane_id, level), @@ -5172,29 +5171,32 @@ static void skl_write_plane_wm(struct intel_crtc *intel_crtc, skl_write_wm_level(dev_priv, PLANE_WM_TRANS(pipe, plane_id), &wm->trans_wm); - if (wm->is_planar && INTEL_GEN(dev_priv) < 11) { - skl_ddb_entry_write(dev_priv, PLANE_BUF_CFG(pipe, plane_id), - &ddb->uv_plane[pipe][plane_id]); + if (INTEL_GEN(dev_priv) >= 11) { skl_ddb_entry_write(dev_priv, - PLANE_NV12_BUF_CFG(pipe, plane_id), - &ddb->plane[pipe][plane_id]); - } else { - skl_ddb_entry_write(dev_priv, PLANE_BUF_CFG(pipe, plane_id), - &ddb->plane[pipe][plane_id]); - if (INTEL_GEN(dev_priv) < 11) - I915_WRITE(PLANE_NV12_BUF_CFG(pipe, plane_id), 0x0); + PLANE_BUF_CFG(pipe, plane_id), ddb_y); + return; } + + if (wm->is_planar) + swap(ddb_y, ddb_uv); + + skl_ddb_entry_write(dev_priv, + PLANE_BUF_CFG(pipe, plane_id), ddb_y); + skl_ddb_entry_write(dev_priv, + PLANE_NV12_BUF_CFG(pipe, plane_id), ddb_uv); } -static void skl_write_cursor_wm(struct intel_crtc *intel_crtc, - const struct skl_plane_wm *wm, - const struct skl_ddb_allocation *ddb) +void skl_write_cursor_wm(struct intel_plane *plane, + const struct intel_crtc_state *crtc_state) { - struct drm_crtc *crtc = &intel_crtc->base; - struct drm_device *dev = crtc->dev; - struct drm_i915_private *dev_priv = to_i915(dev); + struct drm_i915_private *dev_priv = to_i915(plane->base.dev); int level, max_level = ilk_wm_max_level(dev_priv); - enum pipe pipe = intel_crtc->pipe; + enum plane_id plane_id = plane->id; + enum pipe pipe = plane->pipe; + const struct skl_plane_wm *wm = + &crtc_state->wm.skl.optimal.planes[plane_id]; + const struct skl_ddb_entry *ddb = + &crtc_state->wm.skl.plane_ddb_y[plane_id]; for (level = 0; level <= max_level; level++) { skl_write_wm_level(dev_priv, CUR_WM(pipe, level), @@ -5202,22 +5204,30 @@ static void skl_write_cursor_wm(struct intel_crtc *intel_crtc, } skl_write_wm_level(dev_priv, CUR_WM_TRANS(pipe), &wm->trans_wm); - skl_ddb_entry_write(dev_priv, CUR_BUF_CFG(pipe), - &ddb->plane[pipe][PLANE_CURSOR]); + skl_ddb_entry_write(dev_priv, CUR_BUF_CFG(pipe), ddb); } bool skl_wm_level_equals(const struct skl_wm_level *l1, const struct skl_wm_level *l2) { - if (l1->plane_en != l2->plane_en) - return false; + return l1->plane_en == l2->plane_en && + l1->plane_res_l == l2->plane_res_l && + l1->plane_res_b == l2->plane_res_b; +} - /* If both planes aren't enabled, the rest shouldn't matter */ - if (!l1->plane_en) - return true; +static bool skl_plane_wm_equals(struct drm_i915_private *dev_priv, + const struct skl_plane_wm *wm1, + const struct skl_plane_wm *wm2) +{ + int level, max_level = ilk_wm_max_level(dev_priv); - return (l1->plane_res_l == l2->plane_res_l && - l1->plane_res_b == l2->plane_res_b); + for (level = 0; level <= max_level; level++) { + if (!skl_wm_level_equals(&wm1->wm[level], &wm2->wm[level]) || + !skl_wm_level_equals(&wm1->uv_wm[level], &wm2->uv_wm[level])) + return false; + } + + return skl_wm_level_equals(&wm1->trans_wm, &wm2->trans_wm); } static inline bool skl_ddb_entries_overlap(const struct skl_ddb_entry *a, @@ -5244,13 +5254,12 @@ bool skl_ddb_allocation_overlaps(const struct skl_ddb_entry *ddb, static int skl_update_pipe_wm(struct drm_crtc_state *cstate, const struct skl_pipe_wm *old_pipe_wm, struct skl_pipe_wm *pipe_wm, /* out */ - struct skl_ddb_allocation *ddb, /* out */ bool *changed /* out */) { struct intel_crtc_state *intel_cstate = to_intel_crtc_state(cstate); int ret; - ret = skl_build_pipe_wm(intel_cstate, ddb, pipe_wm); + ret = skl_build_pipe_wm(intel_cstate, pipe_wm); if (ret) return ret; @@ -5276,42 +5285,29 @@ pipes_modified(struct drm_atomic_state *state) } static int -skl_ddb_add_affected_planes(struct intel_crtc_state *cstate) +skl_ddb_add_affected_planes(const struct intel_crtc_state *old_crtc_state, + struct intel_crtc_state *new_crtc_state) { - struct drm_atomic_state *state = cstate->base.state; - struct drm_device *dev = state->dev; - struct drm_crtc *crtc = cstate->base.crtc; - struct intel_crtc *intel_crtc = to_intel_crtc(crtc); - struct drm_i915_private *dev_priv = to_i915(dev); - struct intel_atomic_state *intel_state = to_intel_atomic_state(state); - struct skl_ddb_allocation *new_ddb = &intel_state->wm_results.ddb; - struct skl_ddb_allocation *cur_ddb = &dev_priv->wm.skl_hw.ddb; - struct drm_plane *plane; - enum pipe pipe = intel_crtc->pipe; + struct intel_atomic_state *state = to_intel_atomic_state(new_crtc_state->base.state); + struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->base.crtc); + struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); + struct intel_plane *plane; - drm_for_each_plane_mask(plane, dev, cstate->base.plane_mask) { - struct drm_plane_state *plane_state; - struct intel_plane *linked; - enum plane_id plane_id = to_intel_plane(plane)->id; + for_each_intel_plane_on_crtc(&dev_priv->drm, crtc, plane) { + struct intel_plane_state *plane_state; + enum plane_id plane_id = plane->id; - if (skl_ddb_entry_equal(&cur_ddb->plane[pipe][plane_id], - &new_ddb->plane[pipe][plane_id]) && - skl_ddb_entry_equal(&cur_ddb->uv_plane[pipe][plane_id], - &new_ddb->uv_plane[pipe][plane_id])) + if (skl_ddb_entry_equal(&old_crtc_state->wm.skl.plane_ddb_y[plane_id], + &new_crtc_state->wm.skl.plane_ddb_y[plane_id]) && + skl_ddb_entry_equal(&old_crtc_state->wm.skl.plane_ddb_uv[plane_id], + &new_crtc_state->wm.skl.plane_ddb_uv[plane_id])) continue; - plane_state = drm_atomic_get_plane_state(state, plane); + plane_state = intel_atomic_get_plane_state(state, plane); if (IS_ERR(plane_state)) return PTR_ERR(plane_state); - /* Make sure linked plane is updated too */ - linked = to_intel_plane_state(plane_state)->linked_plane; - if (!linked) - continue; - - plane_state = drm_atomic_get_plane_state(state, &linked->base); - if (IS_ERR(plane_state)) - return PTR_ERR(plane_state); + new_crtc_state->update_planes |= BIT(plane_id); } return 0; @@ -5323,18 +5319,21 @@ skl_compute_ddb(struct drm_atomic_state *state) const struct drm_i915_private *dev_priv = to_i915(state->dev); struct intel_atomic_state *intel_state = to_intel_atomic_state(state); struct skl_ddb_allocation *ddb = &intel_state->wm_results.ddb; + struct intel_crtc_state *old_crtc_state; + struct intel_crtc_state *new_crtc_state; struct intel_crtc *crtc; - struct intel_crtc_state *cstate; int ret, i; memcpy(ddb, &dev_priv->wm.skl_hw.ddb, sizeof(*ddb)); - for_each_new_intel_crtc_in_state(intel_state, crtc, cstate, i) { - ret = skl_allocate_pipe_ddb(cstate, ddb); + for_each_oldnew_intel_crtc_in_state(intel_state, crtc, old_crtc_state, + new_crtc_state, i) { + ret = skl_allocate_pipe_ddb(new_crtc_state, ddb); if (ret) return ret; - ret = skl_ddb_add_affected_planes(cstate); + ret = skl_ddb_add_affected_planes(old_crtc_state, + new_crtc_state); if (ret) return ret; } @@ -5343,36 +5342,29 @@ skl_compute_ddb(struct drm_atomic_state *state) } static void -skl_print_wm_changes(const struct drm_atomic_state *state) +skl_print_wm_changes(struct intel_atomic_state *state) { - const struct drm_device *dev = state->dev; - const struct drm_i915_private *dev_priv = to_i915(dev); - const struct intel_atomic_state *intel_state = - to_intel_atomic_state(state); - const struct drm_crtc *crtc; - const struct drm_crtc_state *cstate; - const struct intel_plane *intel_plane; - const struct skl_ddb_allocation *old_ddb = &dev_priv->wm.skl_hw.ddb; - const struct skl_ddb_allocation *new_ddb = &intel_state->wm_results.ddb; + struct drm_i915_private *dev_priv = to_i915(state->base.dev); + const struct intel_crtc_state *old_crtc_state; + const struct intel_crtc_state *new_crtc_state; + struct intel_plane *plane; + struct intel_crtc *crtc; int i; - for_each_new_crtc_in_state(state, crtc, cstate, i) { - const struct intel_crtc *intel_crtc = to_intel_crtc(crtc); - enum pipe pipe = intel_crtc->pipe; - - for_each_intel_plane_on_crtc(dev, intel_crtc, intel_plane) { - enum plane_id plane_id = intel_plane->id; + for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state, + new_crtc_state, i) { + for_each_intel_plane_on_crtc(&dev_priv->drm, crtc, plane) { + enum plane_id plane_id = plane->id; const struct skl_ddb_entry *old, *new; - old = &old_ddb->plane[pipe][plane_id]; - new = &new_ddb->plane[pipe][plane_id]; + old = &old_crtc_state->wm.skl.plane_ddb_y[plane_id]; + new = &new_crtc_state->wm.skl.plane_ddb_y[plane_id]; if (skl_ddb_entry_equal(old, new)) continue; DRM_DEBUG_KMS("[PLANE:%d:%s] ddb (%d - %d) -> (%d - %d)\n", - intel_plane->base.base.id, - intel_plane->base.name, + plane->base.base.id, plane->base.name, old->start, old->end, new->start, new->end); } @@ -5469,6 +5461,66 @@ skl_ddb_add_affected_pipes(struct drm_atomic_state *state, bool *changed) return 0; } +/* + * To make sure the cursor watermark registers are always consistent + * with our computed state the following scenario needs special + * treatment: + * + * 1. enable cursor + * 2. move cursor entirely offscreen + * 3. disable cursor + * + * Step 2. does call .disable_plane() but does not zero the watermarks + * (since we consider an offscreen cursor still active for the purposes + * of watermarks). Step 3. would not normally call .disable_plane() + * because the actual plane visibility isn't changing, and we don't + * deallocate the cursor ddb until the pipe gets disabled. So we must + * force step 3. to call .disable_plane() to update the watermark + * registers properly. + * + * Other planes do not suffer from this issues as their watermarks are + * calculated based on the actual plane visibility. The only time this + * can trigger for the other planes is during the initial readout as the + * default value of the watermarks registers is not zero. + */ +static int skl_wm_add_affected_planes(struct intel_atomic_state *state, + struct intel_crtc *crtc) +{ + struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); + const struct intel_crtc_state *old_crtc_state = + intel_atomic_get_old_crtc_state(state, crtc); + struct intel_crtc_state *new_crtc_state = + intel_atomic_get_new_crtc_state(state, crtc); + struct intel_plane *plane; + + for_each_intel_plane_on_crtc(&dev_priv->drm, crtc, plane) { + struct intel_plane_state *plane_state; + enum plane_id plane_id = plane->id; + + /* + * Force a full wm update for every plane on modeset. + * Required because the reset value of the wm registers + * is non-zero, whereas we want all disabled planes to + * have zero watermarks. So if we turn off the relevant + * power well the hardware state will go out of sync + * with the software state. + */ + if (!drm_atomic_crtc_needs_modeset(&new_crtc_state->base) && + skl_plane_wm_equals(dev_priv, + &old_crtc_state->wm.skl.optimal.planes[plane_id], + &new_crtc_state->wm.skl.optimal.planes[plane_id])) + continue; + + plane_state = intel_atomic_get_plane_state(state, plane); + if (IS_ERR(plane_state)) + return PTR_ERR(plane_state); + + new_crtc_state->update_planes |= BIT(plane_id); + } + + return 0; +} + static int skl_compute_wm(struct drm_atomic_state *state) { @@ -5508,8 +5560,12 @@ skl_compute_wm(struct drm_atomic_state *state) &to_intel_crtc_state(crtc->state)->wm.skl.optimal; pipe_wm = &intel_cstate->wm.skl.optimal; - ret = skl_update_pipe_wm(cstate, old_pipe_wm, pipe_wm, - &results->ddb, &changed); + ret = skl_update_pipe_wm(cstate, old_pipe_wm, pipe_wm, &changed); + if (ret) + return ret; + + ret = skl_wm_add_affected_planes(intel_state, + to_intel_crtc(crtc)); if (ret) return ret; @@ -5523,7 +5579,7 @@ skl_compute_wm(struct drm_atomic_state *state) intel_cstate->update_wm_pre = true; } - skl_print_wm_changes(state); + skl_print_wm_changes(intel_state); return 0; } @@ -5534,23 +5590,12 @@ static void skl_atomic_update_crtc_wm(struct intel_atomic_state *state, struct intel_crtc *crtc = to_intel_crtc(cstate->base.crtc); struct drm_i915_private *dev_priv = to_i915(state->base.dev); struct skl_pipe_wm *pipe_wm = &cstate->wm.skl.optimal; - const struct skl_ddb_allocation *ddb = &state->wm_results.ddb; enum pipe pipe = crtc->pipe; - enum plane_id plane_id; if (!(state->wm_results.dirty_pipes & drm_crtc_mask(&crtc->base))) return; I915_WRITE(PIPE_WM_LINETIME(pipe), pipe_wm->linetime); - - for_each_plane_id_on_crtc(crtc, plane_id) { - if (plane_id != PLANE_CURSOR) - skl_write_plane_wm(crtc, &pipe_wm->planes[plane_id], - ddb, plane_id); - else - skl_write_cursor_wm(crtc, &pipe_wm->planes[plane_id], - ddb); - } } static void skl_initial_wm(struct intel_atomic_state *state, @@ -5560,8 +5605,6 @@ static void skl_initial_wm(struct intel_atomic_state *state, struct drm_device *dev = intel_crtc->base.dev; struct drm_i915_private *dev_priv = to_i915(dev); struct skl_ddb_values *results = &state->wm_results; - struct skl_ddb_values *hw_vals = &dev_priv->wm.skl_hw; - enum pipe pipe = intel_crtc->pipe; if ((results->dirty_pipes & drm_crtc_mask(&intel_crtc->base)) == 0) return; @@ -5571,11 +5614,6 @@ static void skl_initial_wm(struct intel_atomic_state *state, if (cstate->base.active_changed) skl_atomic_update_crtc_wm(state, cstate); - memcpy(hw_vals->ddb.uv_plane[pipe], results->ddb.uv_plane[pipe], - sizeof(hw_vals->ddb.uv_plane[pipe])); - memcpy(hw_vals->ddb.plane[pipe], results->ddb.plane[pipe], - sizeof(hw_vals->ddb.plane[pipe])); - mutex_unlock(&dev_priv->wm.wm_mutex); } @@ -5726,13 +5764,6 @@ void skl_wm_get_hw_state(struct drm_device *dev) if (dev_priv->active_crtcs) { /* Fully recompute DDB on first atomic commit */ dev_priv->wm.distrust_bios_wm = true; - } else { - /* - * Easy/common case; just sanitize DDB now if everything off - * Keep dbuf slice info intact - */ - memset(ddb->plane, 0, sizeof(ddb->plane)); - memset(ddb->uv_plane, 0, sizeof(ddb->uv_plane)); } } |