diff options
124 files changed, 8129 insertions, 7899 deletions
diff --git a/drivers/gpu/drm/i915/Makefile b/drivers/gpu/drm/i915/Makefile index 918470a04591..8e46f57e4569 100644 --- a/drivers/gpu/drm/i915/Makefile +++ b/drivers/gpu/drm/i915/Makefile @@ -239,6 +239,7 @@ i915-y += \ display/intel_display_power.o \ display/intel_display_power_map.o \ display/intel_display_power_well.o \ + display/intel_display_rps.o \ display/intel_dmc.o \ display/intel_dpio_phy.o \ display/intel_dpll.o \ @@ -269,7 +270,9 @@ i915-y += \ display/intel_tc.o \ display/intel_vblank.o \ display/intel_vga.o \ + display/intel_wm.o \ display/i9xx_plane.o \ + display/i9xx_wm.o \ display/skl_scaler.o \ display/skl_universal_plane.o \ display/skl_watermark.o diff --git a/drivers/gpu/drm/i915/display/g4x_dp.c b/drivers/gpu/drm/i915/display/g4x_dp.c index fa754038d669..920d570f7594 100644 --- a/drivers/gpu/drm/i915/display/g4x_dp.c +++ b/drivers/gpu/drm/i915/display/g4x_dp.c @@ -17,6 +17,7 @@ #include "intel_display_power.h" #include "intel_display_types.h" #include "intel_dp.h" +#include "intel_dp_aux.h" #include "intel_dp_link_training.h" #include "intel_dpio_phy.h" #include "intel_fifo_underrun.h" @@ -136,16 +137,12 @@ static void intel_dp_prepare(struct intel_encoder *encoder, intel_dp->DP |= DP_PIPE_SEL_IVB(crtc->pipe); } else if (HAS_PCH_CPT(dev_priv) && port != PORT_A) { - u32 trans_dp; - intel_dp->DP |= DP_LINK_TRAIN_OFF_CPT; - trans_dp = intel_de_read(dev_priv, TRANS_DP_CTL(crtc->pipe)); - if (drm_dp_enhanced_frame_cap(intel_dp->dpcd)) - trans_dp |= TRANS_DP_ENH_FRAMING; - else - trans_dp &= ~TRANS_DP_ENH_FRAMING; - intel_de_write(dev_priv, TRANS_DP_CTL(crtc->pipe), trans_dp); + intel_de_rmw(dev_priv, TRANS_DP_CTL(crtc->pipe), + TRANS_DP_ENH_FRAMING, + drm_dp_enhanced_frame_cap(intel_dp->dpcd) ? + TRANS_DP_ENH_FRAMING : 0); } else { if (IS_G4X(dev_priv) && pipe_config->limited_color_range) intel_dp->DP |= DP_COLOR_RANGE_16_235; @@ -1200,29 +1197,6 @@ static bool g4x_digital_port_connected(struct intel_encoder *encoder) return intel_de_read(dev_priv, PORT_HOTPLUG_STAT) & bit; } -static bool gm45_digital_port_connected(struct intel_encoder *encoder) -{ - struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); - u32 bit; - - switch (encoder->hpd_pin) { - case HPD_PORT_B: - bit = PORTB_HOTPLUG_LIVE_STATUS_GM45; - break; - case HPD_PORT_C: - bit = PORTC_HOTPLUG_LIVE_STATUS_GM45; - break; - case HPD_PORT_D: - bit = PORTD_HOTPLUG_LIVE_STATUS_GM45; - break; - default: - MISSING_CASE(encoder->hpd_pin); - return false; - } - - return intel_de_read(dev_priv, PORT_HOTPLUG_STAT) & bit; -} - static bool ilk_digital_port_connected(struct intel_encoder *encoder) { struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); @@ -1279,11 +1253,19 @@ static const struct drm_encoder_funcs intel_dp_enc_funcs = { bool g4x_dp_init(struct drm_i915_private *dev_priv, i915_reg_t output_reg, enum port port) { + const struct intel_bios_encoder_data *devdata; struct intel_digital_port *dig_port; struct intel_encoder *intel_encoder; struct drm_encoder *encoder; struct intel_connector *intel_connector; + devdata = intel_bios_encoder_data_lookup(dev_priv, port); + + /* FIXME bail? */ + if (!devdata) + drm_dbg_kms(&dev_priv->drm, "No VBT child device for DP-%c\n", + port_name(port)); + dig_port = kzalloc(sizeof(*dig_port), GFP_KERNEL); if (!dig_port) return false; @@ -1295,6 +1277,8 @@ bool g4x_dp_init(struct drm_i915_private *dev_priv, intel_encoder = &dig_port->base; encoder = &intel_encoder->base; + intel_encoder->devdata = devdata; + mutex_init(&dig_port->hdcp_mutex); if (drm_encoder_init(&dev_priv->drm, &intel_encoder->base, @@ -1377,10 +1361,7 @@ bool g4x_dp_init(struct drm_i915_private *dev_priv, dig_port->hpd_pulse = intel_dp_hpd_pulse; if (HAS_GMCH(dev_priv)) { - if (IS_GM45(dev_priv)) - dig_port->connected = gm45_digital_port_connected; - else - dig_port->connected = g4x_digital_port_connected; + dig_port->connected = g4x_digital_port_connected; } else { if (port == PORT_A) dig_port->connected = ilk_digital_port_connected; @@ -1391,7 +1372,7 @@ bool g4x_dp_init(struct drm_i915_private *dev_priv, if (port != PORT_A) intel_infoframe_init(dig_port); - dig_port->aux_ch = intel_bios_port_aux_ch(dev_priv, port); + dig_port->aux_ch = intel_dp_aux_ch(intel_encoder); if (!intel_dp_init_connector(dig_port, intel_connector)) goto err_init_connector; diff --git a/drivers/gpu/drm/i915/display/g4x_hdmi.c b/drivers/gpu/drm/i915/display/g4x_hdmi.c index 64c3b3990702..448ea26786e0 100644 --- a/drivers/gpu/drm/i915/display/g4x_hdmi.c +++ b/drivers/gpu/drm/i915/display/g4x_hdmi.c @@ -13,6 +13,7 @@ #include "intel_de.h" #include "intel_display_power.h" #include "intel_display_types.h" +#include "intel_dp_aux.h" #include "intel_dpio_phy.h" #include "intel_fifo_underrun.h" #include "intel_hdmi.h" @@ -273,8 +274,8 @@ static void cpt_enable_hdmi(struct intel_atomic_state *state, */ if (pipe_config->pipe_bpp > 24) { - intel_de_write(dev_priv, TRANS_CHICKEN1(pipe), - intel_de_read(dev_priv, TRANS_CHICKEN1(pipe)) | TRANS_CHICKEN1_HDMIUNIT_GC_DISABLE); + intel_de_rmw(dev_priv, TRANS_CHICKEN1(pipe), + 0, TRANS_CHICKEN1_HDMIUNIT_GC_DISABLE); temp &= ~SDVO_COLOR_FORMAT_MASK; temp |= SDVO_COLOR_FORMAT_8bpc; @@ -290,8 +291,8 @@ static void cpt_enable_hdmi(struct intel_atomic_state *state, intel_de_write(dev_priv, intel_hdmi->hdmi_reg, temp); intel_de_posting_read(dev_priv, intel_hdmi->hdmi_reg); - intel_de_write(dev_priv, TRANS_CHICKEN1(pipe), - intel_de_read(dev_priv, TRANS_CHICKEN1(pipe)) & ~TRANS_CHICKEN1_HDMIUNIT_GC_DISABLE); + intel_de_rmw(dev_priv, TRANS_CHICKEN1(pipe), + TRANS_CHICKEN1_HDMIUNIT_GC_DISABLE, 0); } drm_WARN_ON(&dev_priv->drm, pipe_config->has_audio && @@ -548,10 +549,18 @@ intel_hdmi_hotplug(struct intel_encoder *encoder, void g4x_hdmi_init(struct drm_i915_private *dev_priv, i915_reg_t hdmi_reg, enum port port) { + const struct intel_bios_encoder_data *devdata; struct intel_digital_port *dig_port; struct intel_encoder *intel_encoder; struct intel_connector *intel_connector; + devdata = intel_bios_encoder_data_lookup(dev_priv, port); + + /* FIXME bail? */ + if (!devdata) + drm_dbg_kms(&dev_priv->drm, "No VBT child device for HDMI-%c\n", + port_name(port)); + dig_port = kzalloc(sizeof(*dig_port), GFP_KERNEL); if (!dig_port) return; @@ -564,6 +573,8 @@ void g4x_hdmi_init(struct drm_i915_private *dev_priv, intel_encoder = &dig_port->base; + intel_encoder->devdata = devdata; + mutex_init(&dig_port->hdcp_mutex); drm_encoder_init(&dev_priv->drm, &intel_encoder->base, @@ -629,6 +640,6 @@ void g4x_hdmi_init(struct drm_i915_private *dev_priv, intel_infoframe_init(dig_port); - dig_port->aux_ch = intel_bios_port_aux_ch(dev_priv, port); + dig_port->aux_ch = intel_dp_aux_ch(intel_encoder); intel_hdmi_init_connector(dig_port, intel_connector); } diff --git a/drivers/gpu/drm/i915/display/i9xx_wm.c b/drivers/gpu/drm/i915/display/i9xx_wm.c new file mode 100644 index 000000000000..caef72d38798 --- /dev/null +++ b/drivers/gpu/drm/i915/display/i9xx_wm.c @@ -0,0 +1,4047 @@ +// SPDX-License-Identifier: MIT +/* + * Copyright © 2023 Intel Corporation + */ + +#include "i915_drv.h" +#include "i9xx_wm.h" +#include "intel_atomic.h" +#include "intel_display.h" +#include "intel_display_trace.h" +#include "intel_mchbar_regs.h" +#include "intel_wm.h" +#include "skl_watermark.h" +#include "vlv_sideband.h" + +/* used in computing the new watermarks state */ +struct intel_wm_config { + unsigned int num_pipes_active; + bool sprites_enabled; + bool sprites_scaled; +}; + +struct cxsr_latency { + bool is_desktop : 1; + bool is_ddr3 : 1; + u16 fsb_freq; + u16 mem_freq; + u16 display_sr; + u16 display_hpll_disable; + u16 cursor_sr; + u16 cursor_hpll_disable; +}; + +static const struct cxsr_latency cxsr_latency_table[] = { + {1, 0, 800, 400, 3382, 33382, 3983, 33983}, /* DDR2-400 SC */ + {1, 0, 800, 667, 3354, 33354, 3807, 33807}, /* DDR2-667 SC */ + {1, 0, 800, 800, 3347, 33347, 3763, 33763}, /* DDR2-800 SC */ + {1, 1, 800, 667, 6420, 36420, 6873, 36873}, /* DDR3-667 SC */ + {1, 1, 800, 800, 5902, 35902, 6318, 36318}, /* DDR3-800 SC */ + + {1, 0, 667, 400, 3400, 33400, 4021, 34021}, /* DDR2-400 SC */ + {1, 0, 667, 667, 3372, 33372, 3845, 33845}, /* DDR2-667 SC */ + {1, 0, 667, 800, 3386, 33386, 3822, 33822}, /* DDR2-800 SC */ + {1, 1, 667, 667, 6438, 36438, 6911, 36911}, /* DDR3-667 SC */ + {1, 1, 667, 800, 5941, 35941, 6377, 36377}, /* DDR3-800 SC */ + + {1, 0, 400, 400, 3472, 33472, 4173, 34173}, /* DDR2-400 SC */ + {1, 0, 400, 667, 3443, 33443, 3996, 33996}, /* DDR2-667 SC */ + {1, 0, 400, 800, 3430, 33430, 3946, 33946}, /* DDR2-800 SC */ + {1, 1, 400, 667, 6509, 36509, 7062, 37062}, /* DDR3-667 SC */ + {1, 1, 400, 800, 5985, 35985, 6501, 36501}, /* DDR3-800 SC */ + + {0, 0, 800, 400, 3438, 33438, 4065, 34065}, /* DDR2-400 SC */ + {0, 0, 800, 667, 3410, 33410, 3889, 33889}, /* DDR2-667 SC */ + {0, 0, 800, 800, 3403, 33403, 3845, 33845}, /* DDR2-800 SC */ + {0, 1, 800, 667, 6476, 36476, 6955, 36955}, /* DDR3-667 SC */ + {0, 1, 800, 800, 5958, 35958, 6400, 36400}, /* DDR3-800 SC */ + + {0, 0, 667, 400, 3456, 33456, 4103, 34106}, /* DDR2-400 SC */ + {0, 0, 667, 667, 3428, 33428, 3927, 33927}, /* DDR2-667 SC */ + {0, 0, 667, 800, 3443, 33443, 3905, 33905}, /* DDR2-800 SC */ + {0, 1, 667, 667, 6494, 36494, 6993, 36993}, /* DDR3-667 SC */ + {0, 1, 667, 800, 5998, 35998, 6460, 36460}, /* DDR3-800 SC */ + + {0, 0, 400, 400, 3528, 33528, 4255, 34255}, /* DDR2-400 SC */ + {0, 0, 400, 667, 3500, 33500, 4079, 34079}, /* DDR2-667 SC */ + {0, 0, 400, 800, 3487, 33487, 4029, 34029}, /* DDR2-800 SC */ + {0, 1, 400, 667, 6566, 36566, 7145, 37145}, /* DDR3-667 SC */ + {0, 1, 400, 800, 6042, 36042, 6584, 36584}, /* DDR3-800 SC */ +}; + +static const struct cxsr_latency *intel_get_cxsr_latency(bool is_desktop, + bool is_ddr3, + int fsb, + int mem) +{ + const struct cxsr_latency *latency; + int i; + + if (fsb == 0 || mem == 0) + return NULL; + + for (i = 0; i < ARRAY_SIZE(cxsr_latency_table); i++) { + latency = &cxsr_latency_table[i]; + if (is_desktop == latency->is_desktop && + is_ddr3 == latency->is_ddr3 && + fsb == latency->fsb_freq && mem == latency->mem_freq) + return latency; + } + + DRM_DEBUG_KMS("Unknown FSB/MEM found, disable CxSR\n"); + + return NULL; +} + +static void chv_set_memory_dvfs(struct drm_i915_private *dev_priv, bool enable) +{ + u32 val; + + vlv_punit_get(dev_priv); + + val = vlv_punit_read(dev_priv, PUNIT_REG_DDR_SETUP2); + if (enable) + val &= ~FORCE_DDR_HIGH_FREQ; + else + val |= FORCE_DDR_HIGH_FREQ; + val &= ~FORCE_DDR_LOW_FREQ; + val |= FORCE_DDR_FREQ_REQ_ACK; + vlv_punit_write(dev_priv, PUNIT_REG_DDR_SETUP2, val); + + if (wait_for((vlv_punit_read(dev_priv, PUNIT_REG_DDR_SETUP2) & + FORCE_DDR_FREQ_REQ_ACK) == 0, 3)) + drm_err(&dev_priv->drm, + "timed out waiting for Punit DDR DVFS request\n"); + + vlv_punit_put(dev_priv); +} + +static void chv_set_memory_pm5(struct drm_i915_private *dev_priv, bool enable) +{ + u32 val; + + vlv_punit_get(dev_priv); + + val = vlv_punit_read(dev_priv, PUNIT_REG_DSPSSPM); + if (enable) + val |= DSP_MAXFIFO_PM5_ENABLE; + else + val &= ~DSP_MAXFIFO_PM5_ENABLE; + vlv_punit_write(dev_priv, PUNIT_REG_DSPSSPM, val); + + vlv_punit_put(dev_priv); +} + +#define FW_WM(value, plane) \ + (((value) << DSPFW_ ## plane ## _SHIFT) & DSPFW_ ## plane ## _MASK) + +static bool _intel_set_memory_cxsr(struct drm_i915_private *dev_priv, bool enable) +{ + bool was_enabled; + u32 val; + + if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) { + was_enabled = intel_uncore_read(&dev_priv->uncore, FW_BLC_SELF_VLV) & FW_CSPWRDWNEN; + intel_uncore_write(&dev_priv->uncore, FW_BLC_SELF_VLV, enable ? FW_CSPWRDWNEN : 0); + intel_uncore_posting_read(&dev_priv->uncore, FW_BLC_SELF_VLV); + } else if (IS_G4X(dev_priv) || IS_I965GM(dev_priv)) { + was_enabled = intel_uncore_read(&dev_priv->uncore, FW_BLC_SELF) & FW_BLC_SELF_EN; + intel_uncore_write(&dev_priv->uncore, FW_BLC_SELF, enable ? FW_BLC_SELF_EN : 0); + intel_uncore_posting_read(&dev_priv->uncore, FW_BLC_SELF); + } else if (IS_PINEVIEW(dev_priv)) { + val = intel_uncore_read(&dev_priv->uncore, DSPFW3); + was_enabled = val & PINEVIEW_SELF_REFRESH_EN; + if (enable) + val |= PINEVIEW_SELF_REFRESH_EN; + else + val &= ~PINEVIEW_SELF_REFRESH_EN; + intel_uncore_write(&dev_priv->uncore, DSPFW3, val); + intel_uncore_posting_read(&dev_priv->uncore, DSPFW3); + } else if (IS_I945G(dev_priv) || IS_I945GM(dev_priv)) { + was_enabled = intel_uncore_read(&dev_priv->uncore, FW_BLC_SELF) & FW_BLC_SELF_EN; + val = enable ? _MASKED_BIT_ENABLE(FW_BLC_SELF_EN) : + _MASKED_BIT_DISABLE(FW_BLC_SELF_EN); + intel_uncore_write(&dev_priv->uncore, FW_BLC_SELF, val); + intel_uncore_posting_read(&dev_priv->uncore, FW_BLC_SELF); + } else if (IS_I915GM(dev_priv)) { + /* + * FIXME can't find a bit like this for 915G, and + * yet it does have the related watermark in + * FW_BLC_SELF. What's going on? + */ + was_enabled = intel_uncore_read(&dev_priv->uncore, INSTPM) & INSTPM_SELF_EN; + val = enable ? _MASKED_BIT_ENABLE(INSTPM_SELF_EN) : + _MASKED_BIT_DISABLE(INSTPM_SELF_EN); + intel_uncore_write(&dev_priv->uncore, INSTPM, val); + intel_uncore_posting_read(&dev_priv->uncore, INSTPM); + } else { + return false; + } + + trace_intel_memory_cxsr(dev_priv, was_enabled, enable); + + drm_dbg_kms(&dev_priv->drm, "memory self-refresh is %s (was %s)\n", + str_enabled_disabled(enable), + str_enabled_disabled(was_enabled)); + + return was_enabled; +} + +/** + * intel_set_memory_cxsr - Configure CxSR state + * @dev_priv: i915 device + * @enable: Allow vs. disallow CxSR + * + * Allow or disallow the system to enter a special CxSR + * (C-state self refresh) state. What typically happens in CxSR mode + * is that several display FIFOs may get combined into a single larger + * FIFO for a particular plane (so called max FIFO mode) to allow the + * system to defer memory fetches longer, and the memory will enter + * self refresh. + * + * Note that enabling CxSR does not guarantee that the system enter + * this special mode, nor does it guarantee that the system stays + * in that mode once entered. So this just allows/disallows the system + * to autonomously utilize the CxSR mode. Other factors such as core + * C-states will affect when/if the system actually enters/exits the + * CxSR mode. + * + * Note that on VLV/CHV this actually only controls the max FIFO mode, + * and the system is free to enter/exit memory self refresh at any time + * even when the use of CxSR has been disallowed. + * + * While the system is actually in the CxSR/max FIFO mode, some plane + * control registers will not get latched on vblank. Thus in order to + * guarantee the system will respond to changes in the plane registers + * we must always disallow CxSR prior to making changes to those registers. + * Unfortunately the system will re-evaluate the CxSR conditions at + * frame start which happens after vblank start (which is when the plane + * registers would get latched), so we can't proceed with the plane update + * during the same frame where we disallowed CxSR. + * + * Certain platforms also have a deeper HPLL SR mode. Fortunately the + * HPLL SR mode depends on CxSR itself, so we don't have to hand hold + * the hardware w.r.t. HPLL SR when writing to plane registers. + * Disallowing just CxSR is sufficient. + */ +bool intel_set_memory_cxsr(struct drm_i915_private *dev_priv, bool enable) +{ + bool ret; + + mutex_lock(&dev_priv->display.wm.wm_mutex); + ret = _intel_set_memory_cxsr(dev_priv, enable); + if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) + dev_priv->display.wm.vlv.cxsr = enable; + else if (IS_G4X(dev_priv)) + dev_priv->display.wm.g4x.cxsr = enable; + mutex_unlock(&dev_priv->display.wm.wm_mutex); + + return ret; +} + +/* + * Latency for FIFO fetches is dependent on several factors: + * - memory configuration (speed, channels) + * - chipset + * - current MCH state + * It can be fairly high in some situations, so here we assume a fairly + * pessimal value. It's a tradeoff between extra memory fetches (if we + * set this value too high, the FIFO will fetch frequently to stay full) + * and power consumption (set it too low to save power and we might see + * FIFO underruns and display "flicker"). + * + * A value of 5us seems to be a good balance; safe for very low end + * platforms but not overly aggressive on lower latency configs. + */ +static const int pessimal_latency_ns = 5000; + +#define VLV_FIFO_START(dsparb, dsparb2, lo_shift, hi_shift) \ + ((((dsparb) >> (lo_shift)) & 0xff) | ((((dsparb2) >> (hi_shift)) & 0x1) << 8)) + +static void vlv_get_fifo_size(struct intel_crtc_state *crtc_state) +{ + struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); + struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); + struct vlv_fifo_state *fifo_state = &crtc_state->wm.vlv.fifo_state; + enum pipe pipe = crtc->pipe; + int sprite0_start, sprite1_start; + u32 dsparb, dsparb2, dsparb3; + + switch (pipe) { + case PIPE_A: + dsparb = intel_uncore_read(&dev_priv->uncore, DSPARB); + dsparb2 = intel_uncore_read(&dev_priv->uncore, DSPARB2); + sprite0_start = VLV_FIFO_START(dsparb, dsparb2, 0, 0); + sprite1_start = VLV_FIFO_START(dsparb, dsparb2, 8, 4); + break; + case PIPE_B: + dsparb = intel_uncore_read(&dev_priv->uncore, DSPARB); + dsparb2 = intel_uncore_read(&dev_priv->uncore, DSPARB2); + sprite0_start = VLV_FIFO_START(dsparb, dsparb2, 16, 8); + sprite1_start = VLV_FIFO_START(dsparb, dsparb2, 24, 12); + break; + case PIPE_C: + dsparb2 = intel_uncore_read(&dev_priv->uncore, DSPARB2); + dsparb3 = intel_uncore_read(&dev_priv->uncore, DSPARB3); + sprite0_start = VLV_FIFO_START(dsparb3, dsparb2, 0, 16); + sprite1_start = VLV_FIFO_START(dsparb3, dsparb2, 8, 20); + break; + default: + MISSING_CASE(pipe); + return; + } + + fifo_state->plane[PLANE_PRIMARY] = sprite0_start; + fifo_state->plane[PLANE_SPRITE0] = sprite1_start - sprite0_start; + fifo_state->plane[PLANE_SPRITE1] = 511 - sprite1_start; + fifo_state->plane[PLANE_CURSOR] = 63; +} + +static int i9xx_get_fifo_size(struct drm_i915_private *dev_priv, + enum i9xx_plane_id i9xx_plane) +{ + u32 dsparb = intel_uncore_read(&dev_priv->uncore, DSPARB); + int size; + + size = dsparb & 0x7f; + if (i9xx_plane == PLANE_B) + size = ((dsparb >> DSPARB_CSTART_SHIFT) & 0x7f) - size; + + drm_dbg_kms(&dev_priv->drm, "FIFO size - (0x%08x) %c: %d\n", + dsparb, plane_name(i9xx_plane), size); + + return size; +} + +static int i830_get_fifo_size(struct drm_i915_private *dev_priv, + enum i9xx_plane_id i9xx_plane) +{ + u32 dsparb = intel_uncore_read(&dev_priv->uncore, DSPARB); + int size; + + size = dsparb & 0x1ff; + if (i9xx_plane == PLANE_B) + size = ((dsparb >> DSPARB_BEND_SHIFT) & 0x1ff) - size; + size >>= 1; /* Convert to cachelines */ + + drm_dbg_kms(&dev_priv->drm, "FIFO size - (0x%08x) %c: %d\n", + dsparb, plane_name(i9xx_plane), size); + + return size; +} + +static int i845_get_fifo_size(struct drm_i915_private *dev_priv, + enum i9xx_plane_id i9xx_plane) +{ + u32 dsparb = intel_uncore_read(&dev_priv->uncore, DSPARB); + int size; + + size = dsparb & 0x7f; + size >>= 2; /* Convert to cachelines */ + + drm_dbg_kms(&dev_priv->drm, "FIFO size - (0x%08x) %c: %d\n", + dsparb, plane_name(i9xx_plane), size); + + return size; +} + +/* Pineview has different values for various configs */ +static const struct intel_watermark_params pnv_display_wm = { + .fifo_size = PINEVIEW_DISPLAY_FIFO, + .max_wm = PINEVIEW_MAX_WM, + .default_wm = PINEVIEW_DFT_WM, + .guard_size = PINEVIEW_GUARD_WM, + .cacheline_size = PINEVIEW_FIFO_LINE_SIZE, +}; + +static const struct intel_watermark_params pnv_display_hplloff_wm = { + .fifo_size = PINEVIEW_DISPLAY_FIFO, + .max_wm = PINEVIEW_MAX_WM, + .default_wm = PINEVIEW_DFT_HPLLOFF_WM, + .guard_size = PINEVIEW_GUARD_WM, + .cacheline_size = PINEVIEW_FIFO_LINE_SIZE, +}; + +static const struct intel_watermark_params pnv_cursor_wm = { + .fifo_size = PINEVIEW_CURSOR_FIFO, + .max_wm = PINEVIEW_CURSOR_MAX_WM, + .default_wm = PINEVIEW_CURSOR_DFT_WM, + .guard_size = PINEVIEW_CURSOR_GUARD_WM, + .cacheline_size = PINEVIEW_FIFO_LINE_SIZE, +}; + +static const struct intel_watermark_params pnv_cursor_hplloff_wm = { + .fifo_size = PINEVIEW_CURSOR_FIFO, + .max_wm = PINEVIEW_CURSOR_MAX_WM, + .default_wm = PINEVIEW_CURSOR_DFT_WM, + .guard_size = PINEVIEW_CURSOR_GUARD_WM, + .cacheline_size = PINEVIEW_FIFO_LINE_SIZE, +}; + +static const struct intel_watermark_params i965_cursor_wm_info = { + .fifo_size = I965_CURSOR_FIFO, + .max_wm = I965_CURSOR_MAX_WM, + .default_wm = I965_CURSOR_DFT_WM, + .guard_size = 2, + .cacheline_size = I915_FIFO_LINE_SIZE, +}; + +static const struct intel_watermark_params i945_wm_info = { + .fifo_size = I945_FIFO_SIZE, + .max_wm = I915_MAX_WM, + .default_wm = 1, + .guard_size = 2, + .cacheline_size = I915_FIFO_LINE_SIZE, +}; + +static const struct intel_watermark_params i915_wm_info = { + .fifo_size = I915_FIFO_SIZE, + .max_wm = I915_MAX_WM, + .default_wm = 1, + .guard_size = 2, + .cacheline_size = I915_FIFO_LINE_SIZE, +}; + +static const struct intel_watermark_params i830_a_wm_info = { + .fifo_size = I855GM_FIFO_SIZE, + .max_wm = I915_MAX_WM, + .default_wm = 1, + .guard_size = 2, + .cacheline_size = I830_FIFO_LINE_SIZE, +}; + +static const struct intel_watermark_params i830_bc_wm_info = { + .fifo_size = I855GM_FIFO_SIZE, + .max_wm = I915_MAX_WM / 2, + .default_wm = 1, + .guard_size = 2, + .cacheline_size = I830_FIFO_LINE_SIZE, +}; + +static const struct intel_watermark_params i845_wm_info = { + .fifo_size = I830_FIFO_SIZE, + .max_wm = I915_MAX_WM, + .default_wm = 1, + .guard_size = 2, + .cacheline_size = I830_FIFO_LINE_SIZE, +}; + +/** + * intel_wm_method1 - Method 1 / "small buffer" watermark formula + * @pixel_rate: Pipe pixel rate in kHz + * @cpp: Plane bytes per pixel + * @latency: Memory wakeup latency in 0.1us units + * + * Compute the watermark using the method 1 or "small buffer" + * formula. The caller may additonally add extra cachelines + * to account for TLB misses and clock crossings. + * + * This method is concerned with the short term drain rate + * of the FIFO, ie. it does not account for blanking periods + * which would effectively reduce the average drain rate across + * a longer period. The name "small" refers to the fact the + * FIFO is relatively small compared to the amount of data + * fetched. + * + * The FIFO level vs. time graph might look something like: + * + * |\ |\ + * | \ | \ + * __---__---__ (- plane active, _ blanking) + * -> time + * + * or perhaps like this: + * + * |\|\ |\|\ + * __----__----__ (- plane active, _ blanking) + * -> time + * + * Returns: + * The watermark in bytes + */ +static unsigned int intel_wm_method1(unsigned int pixel_rate, + unsigned int cpp, + unsigned int latency) +{ + u64 ret; + + ret = mul_u32_u32(pixel_rate, cpp * latency); + ret = DIV_ROUND_UP_ULL(ret, 10000); + + return ret; +} + +/** + * intel_wm_method2 - Method 2 / "large buffer" watermark formula + * @pixel_rate: Pipe pixel rate in kHz + * @htotal: Pipe horizontal total + * @width: Plane width in pixels + * @cpp: Plane bytes per pixel + * @latency: Memory wakeup latency in 0.1us units + * + * Compute the watermark using the method 2 or "large buffer" + * formula. The caller may additonally add extra cachelines + * to account for TLB misses and clock crossings. + * + * This method is concerned with the long term drain rate + * of the FIFO, ie. it does account for blanking periods + * which effectively reduce the average drain rate across + * a longer period. The name "large" refers to the fact the + * FIFO is relatively large compared to the amount of data + * fetched. + * + * The FIFO level vs. time graph might look something like: + * + * |\___ |\___ + * | \___ | \___ + * | \ | \ + * __ --__--__--__--__--__--__ (- plane active, _ blanking) + * -> time + * + * Returns: + * The watermark in bytes + */ +static unsigned int intel_wm_method2(unsigned int pixel_rate, + unsigned int htotal, + unsigned int width, + unsigned int cpp, + unsigned int latency) +{ + unsigned int ret; + + /* + * FIXME remove once all users are computing + * watermarks in the correct place. + */ + if (WARN_ON_ONCE(htotal == 0)) + htotal = 1; + + ret = (latency * pixel_rate) / (htotal * 10000); + ret = (ret + 1) * width * cpp; + + return ret; +} + +/** + * intel_calculate_wm - calculate watermark level + * @pixel_rate: pixel clock + * @wm: chip FIFO params + * @fifo_size: size of the FIFO buffer + * @cpp: bytes per pixel + * @latency_ns: memory latency for the platform + * + * Calculate the watermark level (the level at which the display plane will + * start fetching from memory again). Each chip has a different display + * FIFO size and allocation, so the caller needs to figure that out and pass + * in the correct intel_watermark_params structure. + * + * As the pixel clock runs, the FIFO will be drained at a rate that depends + * on the pixel size. When it reaches the watermark level, it'll start + * fetching FIFO line sized based chunks from memory until the FIFO fills + * past the watermark point. If the FIFO drains completely, a FIFO underrun + * will occur, and a display engine hang could result. + */ +static unsigned int intel_calculate_wm(int pixel_rate, + const struct intel_watermark_params *wm, + int fifo_size, int cpp, + unsigned int latency_ns) +{ + int entries, wm_size; + + /* + * Note: we need to make sure we don't overflow for various clock & + * latency values. + * clocks go from a few thousand to several hundred thousand. + * latency is usually a few thousand + */ + entries = intel_wm_method1(pixel_rate, cpp, + latency_ns / 100); + entries = DIV_ROUND_UP(entries, wm->cacheline_size) + + wm->guard_size; + DRM_DEBUG_KMS("FIFO entries required for mode: %d\n", entries); + + wm_size = fifo_size - entries; + DRM_DEBUG_KMS("FIFO watermark level: %d\n", wm_size); + + /* Don't promote wm_size to unsigned... */ + if (wm_size > wm->max_wm) + wm_size = wm->max_wm; + if (wm_size <= 0) + wm_size = wm->default_wm; + + /* + * Bspec seems to indicate that the value shouldn't be lower than + * 'burst size + 1'. Certainly 830 is quite unhappy with low values. + * Lets go for 8 which is the burst size since certain platforms + * already use a hardcoded 8 (which is what the spec says should be + * done). + */ + if (wm_size <= 8) + wm_size = 8; + + return wm_size; +} + +static bool is_disabling(int old, int new, int threshold) +{ + return old >= threshold && new < threshold; +} + +static bool is_enabling(int old, int new, int threshold) +{ + return old < threshold && new >= threshold; +} + +static bool intel_crtc_active(struct intel_crtc *crtc) +{ + /* Be paranoid as we can arrive here with only partial + * state retrieved from the hardware during setup. + * + * We can ditch the adjusted_mode.crtc_clock check as soon + * as Haswell has gained clock readout/fastboot support. + * + * We can ditch the crtc->primary->state->fb check as soon as we can + * properly reconstruct framebuffers. + * + * FIXME: The intel_crtc->active here should be switched to + * crtc->state->active once we have proper CRTC states wired up + * for atomic. + */ + return crtc && crtc->active && crtc->base.primary->state->fb && + crtc->config->hw.adjusted_mode.crtc_clock; +} + +static struct intel_crtc *single_enabled_crtc(struct drm_i915_private *dev_priv) +{ + struct intel_crtc *crtc, *enabled = NULL; + + for_each_intel_crtc(&dev_priv->drm, crtc) { + if (intel_crtc_active(crtc)) { + if (enabled) + return NULL; + enabled = crtc; + } + } + + return enabled; +} + +static void pnv_update_wm(struct drm_i915_private *dev_priv) +{ + struct intel_crtc *crtc; + const struct cxsr_latency *latency; + u32 reg; + unsigned int wm; + + latency = intel_get_cxsr_latency(!IS_MOBILE(dev_priv), + dev_priv->is_ddr3, + dev_priv->fsb_freq, + dev_priv->mem_freq); + if (!latency) { + drm_dbg_kms(&dev_priv->drm, + "Unknown FSB/MEM found, disable CxSR\n"); + intel_set_memory_cxsr(dev_priv, false); + return; + } + + crtc = single_enabled_crtc(dev_priv); + if (crtc) { + const struct drm_framebuffer *fb = + crtc->base.primary->state->fb; + int pixel_rate = crtc->config->pixel_rate; + int cpp = fb->format->cpp[0]; + + /* Display SR */ + wm = intel_calculate_wm(pixel_rate, &pnv_display_wm, + pnv_display_wm.fifo_size, + cpp, latency->display_sr); + reg = intel_uncore_read(&dev_priv->uncore, DSPFW1); + reg &= ~DSPFW_SR_MASK; + reg |= FW_WM(wm, SR); + intel_uncore_write(&dev_priv->uncore, DSPFW1, reg); + drm_dbg_kms(&dev_priv->drm, "DSPFW1 register is %x\n", reg); + + /* cursor SR */ + wm = intel_calculate_wm(pixel_rate, &pnv_cursor_wm, + pnv_display_wm.fifo_size, + 4, latency->cursor_sr); + intel_uncore_rmw(&dev_priv->uncore, DSPFW3, DSPFW_CURSOR_SR_MASK, + FW_WM(wm, CURSOR_SR)); + + /* Display HPLL off SR */ + wm = intel_calculate_wm(pixel_rate, &pnv_display_hplloff_wm, + pnv_display_hplloff_wm.fifo_size, + cpp, latency->display_hpll_disable); + intel_uncore_rmw(&dev_priv->uncore, DSPFW3, DSPFW_HPLL_SR_MASK, FW_WM(wm, HPLL_SR)); + + /* cursor HPLL off SR */ + wm = intel_calculate_wm(pixel_rate, &pnv_cursor_hplloff_wm, + pnv_display_hplloff_wm.fifo_size, + 4, latency->cursor_hpll_disable); + reg = intel_uncore_read(&dev_priv->uncore, DSPFW3); + reg &= ~DSPFW_HPLL_CURSOR_MASK; + reg |= FW_WM(wm, HPLL_CURSOR); + intel_uncore_write(&dev_priv->uncore, DSPFW3, reg); + drm_dbg_kms(&dev_priv->drm, "DSPFW3 register is %x\n", reg); + + intel_set_memory_cxsr(dev_priv, true); + } else { + intel_set_memory_cxsr(dev_priv, false); + } +} + +/* + * Documentation says: + * "If the line size is small, the TLB fetches can get in the way of the + * data fetches, causing some lag in the pixel data return which is not + * accounted for in the above formulas. The following adjustment only + * needs to be applied if eight whole lines fit in the buffer at once. + * The WM is adjusted upwards by the difference between the FIFO size + * and the size of 8 whole lines. This adjustment is always performed + * in the actual pixel depth regardless of whether FBC is enabled or not." + */ +static unsigned int g4x_tlb_miss_wa(int fifo_size, int width, int cpp) +{ + int tlb_miss = fifo_size * 64 - width * cpp * 8; + + return max(0, tlb_miss); +} + +static void g4x_write_wm_values(struct drm_i915_private *dev_priv, + const struct g4x_wm_values *wm) +{ + enum pipe pipe; + + for_each_pipe(dev_priv, pipe) + trace_g4x_wm(intel_crtc_for_pipe(dev_priv, pipe), wm); + + intel_uncore_write(&dev_priv->uncore, DSPFW1, + FW_WM(wm->sr.plane, SR) | + FW_WM(wm->pipe[PIPE_B].plane[PLANE_CURSOR], CURSORB) | + FW_WM(wm->pipe[PIPE_B].plane[PLANE_PRIMARY], PLANEB) | + FW_WM(wm->pipe[PIPE_A].plane[PLANE_PRIMARY], PLANEA)); + intel_uncore_write(&dev_priv->uncore, DSPFW2, + (wm->fbc_en ? DSPFW_FBC_SR_EN : 0) | + FW_WM(wm->sr.fbc, FBC_SR) | + FW_WM(wm->hpll.fbc, FBC_HPLL_SR) | + FW_WM(wm->pipe[PIPE_B].plane[PLANE_SPRITE0], SPRITEB) | + FW_WM(wm->pipe[PIPE_A].plane[PLANE_CURSOR], CURSORA) | + FW_WM(wm->pipe[PIPE_A].plane[PLANE_SPRITE0], SPRITEA)); + intel_uncore_write(&dev_priv->uncore, DSPFW3, + (wm->hpll_en ? DSPFW_HPLL_SR_EN : 0) | + FW_WM(wm->sr.cursor, CURSOR_SR) | + FW_WM(wm->hpll.cursor, HPLL_CURSOR) | + FW_WM(wm->hpll.plane, HPLL_SR)); + + intel_uncore_posting_read(&dev_priv->uncore, DSPFW1); +} + +#define FW_WM_VLV(value, plane) \ + (((value) << DSPFW_ ## plane ## _SHIFT) & DSPFW_ ## plane ## _MASK_VLV) + +static void vlv_write_wm_values(struct drm_i915_private *dev_priv, + const struct vlv_wm_values *wm) +{ + enum pipe pipe; + + for_each_pipe(dev_priv, pipe) { + trace_vlv_wm(intel_crtc_for_pipe(dev_priv, pipe), wm); + + intel_uncore_write(&dev_priv->uncore, VLV_DDL(pipe), + (wm->ddl[pipe].plane[PLANE_CURSOR] << DDL_CURSOR_SHIFT) | + (wm->ddl[pipe].plane[PLANE_SPRITE1] << DDL_SPRITE_SHIFT(1)) | + (wm->ddl[pipe].plane[PLANE_SPRITE0] << DDL_SPRITE_SHIFT(0)) | + (wm->ddl[pipe].plane[PLANE_PRIMARY] << DDL_PLANE_SHIFT)); + } + + /* + * Zero the (unused) WM1 watermarks, and also clear all the + * high order bits so that there are no out of bounds values + * present in the registers during the reprogramming. + */ + intel_uncore_write(&dev_priv->uncore, DSPHOWM, 0); + intel_uncore_write(&dev_priv->uncore, DSPHOWM1, 0); + intel_uncore_write(&dev_priv->uncore, DSPFW4, 0); + intel_uncore_write(&dev_priv->uncore, DSPFW5, 0); + intel_uncore_write(&dev_priv->uncore, DSPFW6, 0); + + intel_uncore_write(&dev_priv->uncore, DSPFW1, + FW_WM(wm->sr.plane, SR) | + FW_WM(wm->pipe[PIPE_B].plane[PLANE_CURSOR], CURSORB) | + FW_WM_VLV(wm->pipe[PIPE_B].plane[PLANE_PRIMARY], PLANEB) | + FW_WM_VLV(wm->pipe[PIPE_A].plane[PLANE_PRIMARY], PLANEA)); + intel_uncore_write(&dev_priv->uncore, DSPFW2, + FW_WM_VLV(wm->pipe[PIPE_A].plane[PLANE_SPRITE1], SPRITEB) | + FW_WM(wm->pipe[PIPE_A].plane[PLANE_CURSOR], CURSORA) | + FW_WM_VLV(wm->pipe[PIPE_A].plane[PLANE_SPRITE0], SPRITEA)); + intel_uncore_write(&dev_priv->uncore, DSPFW3, + FW_WM(wm->sr.cursor, CURSOR_SR)); + + if (IS_CHERRYVIEW(dev_priv)) { + intel_uncore_write(&dev_priv->uncore, DSPFW7_CHV, + FW_WM_VLV(wm->pipe[PIPE_B].plane[PLANE_SPRITE1], SPRITED) | + FW_WM_VLV(wm->pipe[PIPE_B].plane[PLANE_SPRITE0], SPRITEC)); + intel_uncore_write(&dev_priv->uncore, DSPFW8_CHV, + FW_WM_VLV(wm->pipe[PIPE_C].plane[PLANE_SPRITE1], SPRITEF) | + FW_WM_VLV(wm->pipe[PIPE_C].plane[PLANE_SPRITE0], SPRITEE)); + intel_uncore_write(&dev_priv->uncore, DSPFW9_CHV, + FW_WM_VLV(wm->pipe[PIPE_C].plane[PLANE_PRIMARY], PLANEC) | + FW_WM(wm->pipe[PIPE_C].plane[PLANE_CURSOR], CURSORC)); + intel_uncore_write(&dev_priv->uncore, DSPHOWM, + FW_WM(wm->sr.plane >> 9, SR_HI) | + FW_WM(wm->pipe[PIPE_C].plane[PLANE_SPRITE1] >> 8, SPRITEF_HI) | + FW_WM(wm->pipe[PIPE_C].plane[PLANE_SPRITE0] >> 8, SPRITEE_HI) | + FW_WM(wm->pipe[PIPE_C].plane[PLANE_PRIMARY] >> 8, PLANEC_HI) | + FW_WM(wm->pipe[PIPE_B].plane[PLANE_SPRITE1] >> 8, SPRITED_HI) | + FW_WM(wm->pipe[PIPE_B].plane[PLANE_SPRITE0] >> 8, SPRITEC_HI) | + FW_WM(wm->pipe[PIPE_B].plane[PLANE_PRIMARY] >> 8, PLANEB_HI) | + FW_WM(wm->pipe[PIPE_A].plane[PLANE_SPRITE1] >> 8, SPRITEB_HI) | + FW_WM(wm->pipe[PIPE_A].plane[PLANE_SPRITE0] >> 8, SPRITEA_HI) | + FW_WM(wm->pipe[PIPE_A].plane[PLANE_PRIMARY] >> 8, PLANEA_HI)); + } else { + intel_uncore_write(&dev_priv->uncore, DSPFW7, + FW_WM_VLV(wm->pipe[PIPE_B].plane[PLANE_SPRITE1], SPRITED) | + FW_WM_VLV(wm->pipe[PIPE_B].plane[PLANE_SPRITE0], SPRITEC)); + intel_uncore_write(&dev_priv->uncore, DSPHOWM, + FW_WM(wm->sr.plane >> 9, SR_HI) | + FW_WM(wm->pipe[PIPE_B].plane[PLANE_SPRITE1] >> 8, SPRITED_HI) | + FW_WM(wm->pipe[PIPE_B].plane[PLANE_SPRITE0] >> 8, SPRITEC_HI) | + FW_WM(wm->pipe[PIPE_B].plane[PLANE_PRIMARY] >> 8, PLANEB_HI) | + FW_WM(wm->pipe[PIPE_A].plane[PLANE_SPRITE1] >> 8, SPRITEB_HI) | + FW_WM(wm->pipe[PIPE_A].plane[PLANE_SPRITE0] >> 8, SPRITEA_HI) | + FW_WM(wm->pipe[PIPE_A].plane[PLANE_PRIMARY] >> 8, PLANEA_HI)); + } + + intel_uncore_posting_read(&dev_priv->uncore, DSPFW1); +} + +#undef FW_WM_VLV + +static void g4x_setup_wm_latency(struct drm_i915_private *dev_priv) +{ + /* all latencies in usec */ + dev_priv->display.wm.pri_latency[G4X_WM_LEVEL_NORMAL] = 5; + dev_priv->display.wm.pri_latency[G4X_WM_LEVEL_SR] = 12; + dev_priv->display.wm.pri_latency[G4X_WM_LEVEL_HPLL] = 35; + + dev_priv->display.wm.num_levels = G4X_WM_LEVEL_HPLL + 1; +} + +static int g4x_plane_fifo_size(enum plane_id plane_id, int level) +{ + /* + * DSPCNTR[13] supposedly controls whether the + * primary plane can use the FIFO space otherwise + * reserved for the sprite plane. It's not 100% clear + * what the actual FIFO size is, but it looks like we + * can happily set both primary and sprite watermarks + * up to 127 cachelines. So that would seem to mean + * that either DSPCNTR[13] doesn't do anything, or that + * the total FIFO is >= 256 cachelines in size. Either + * way, we don't seem to have to worry about this + * repartitioning as the maximum watermark value the + * register can hold for each plane is lower than the + * minimum FIFO size. + */ + switch (plane_id) { + case PLANE_CURSOR: + return 63; + case PLANE_PRIMARY: + return level == G4X_WM_LEVEL_NORMAL ? 127 : 511; + case PLANE_SPRITE0: + return level == G4X_WM_LEVEL_NORMAL ? 127 : 0; + default: + MISSING_CASE(plane_id); + return 0; + } +} + +static int g4x_fbc_fifo_size(int level) +{ + switch (level) { + case G4X_WM_LEVEL_SR: + return 7; + case G4X_WM_LEVEL_HPLL: + return 15; + default: + MISSING_CASE(level); + return 0; + } +} + +static u16 g4x_compute_wm(const struct intel_crtc_state *crtc_state, + const struct intel_plane_state *plane_state, + int level) +{ + struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane); + struct drm_i915_private *dev_priv = to_i915(plane->base.dev); + const struct drm_display_mode *pipe_mode = + &crtc_state->hw.pipe_mode; + unsigned int latency = dev_priv->display.wm.pri_latency[level] * 10; + unsigned int pixel_rate, htotal, cpp, width, wm; + + if (latency == 0) + return USHRT_MAX; + + if (!intel_wm_plane_visible(crtc_state, plane_state)) + return 0; + + cpp = plane_state->hw.fb->format->cpp[0]; + + /* + * WaUse32BppForSRWM:ctg,elk + * + * The spec fails to list this restriction for the + * HPLL watermark, which seems a little strange. + * Let's use 32bpp for the HPLL watermark as well. + */ + if (plane->id == PLANE_PRIMARY && + level != G4X_WM_LEVEL_NORMAL) + cpp = max(cpp, 4u); + + pixel_rate = crtc_state->pixel_rate; + htotal = pipe_mode->crtc_htotal; + width = drm_rect_width(&plane_state->uapi.src) >> 16; + + if (plane->id == PLANE_CURSOR) { + wm = intel_wm_method2(pixel_rate, htotal, width, cpp, latency); + } else if (plane->id == PLANE_PRIMARY && + level == G4X_WM_LEVEL_NORMAL) { + wm = intel_wm_method1(pixel_rate, cpp, latency); + } else { + unsigned int small, large; + + small = intel_wm_method1(pixel_rate, cpp, latency); + large = intel_wm_method2(pixel_rate, htotal, width, cpp, latency); + + wm = min(small, large); + } + + wm += g4x_tlb_miss_wa(g4x_plane_fifo_size(plane->id, level), + width, cpp); + + wm = DIV_ROUND_UP(wm, 64) + 2; + + return min_t(unsigned int, wm, USHRT_MAX); +} + +static bool g4x_raw_plane_wm_set(struct intel_crtc_state *crtc_state, + int level, enum plane_id plane_id, u16 value) +{ + struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev); + bool dirty = false; + + for (; level < dev_priv->display.wm.num_levels; level++) { + struct g4x_pipe_wm *raw = &crtc_state->wm.g4x.raw[level]; + + dirty |= raw->plane[plane_id] != value; + raw->plane[plane_id] = value; + } + + return dirty; +} + +static bool g4x_raw_fbc_wm_set(struct intel_crtc_state *crtc_state, + int level, u16 value) +{ + struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev); + bool dirty = false; + + /* NORMAL level doesn't have an FBC watermark */ + level = max(level, G4X_WM_LEVEL_SR); + + for (; level < dev_priv->display.wm.num_levels; level++) { + struct g4x_pipe_wm *raw = &crtc_state->wm.g4x.raw[level]; + + dirty |= raw->fbc != value; + raw->fbc = value; + } + + return dirty; +} + +static u32 ilk_compute_fbc_wm(const struct intel_crtc_state *crtc_state, + const struct intel_plane_state *plane_state, + u32 pri_val); + +static bool g4x_raw_plane_wm_compute(struct intel_crtc_state *crtc_state, + const struct intel_plane_state *plane_state) +{ + struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane); + struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev); + enum plane_id plane_id = plane->id; + bool dirty = false; + int level; + + if (!intel_wm_plane_visible(crtc_state, plane_state)) { + dirty |= g4x_raw_plane_wm_set(crtc_state, 0, plane_id, 0); + if (plane_id == PLANE_PRIMARY) + dirty |= g4x_raw_fbc_wm_set(crtc_state, 0, 0); + goto out; + } + + for (level = 0; level < dev_priv->display.wm.num_levels; level++) { + struct g4x_pipe_wm *raw = &crtc_state->wm.g4x.raw[level]; + int wm, max_wm; + + wm = g4x_compute_wm(crtc_state, plane_state, level); + max_wm = g4x_plane_fifo_size(plane_id, level); + + if (wm > max_wm) + break; + + dirty |= raw->plane[plane_id] != wm; + raw->plane[plane_id] = wm; + + if (plane_id != PLANE_PRIMARY || + level == G4X_WM_LEVEL_NORMAL) + continue; + + wm = ilk_compute_fbc_wm(crtc_state, plane_state, + raw->plane[plane_id]); + max_wm = g4x_fbc_fifo_size(level); + + /* + * FBC wm is not mandatory as we + * can always just disable its use. + */ + if (wm > max_wm) + wm = USHRT_MAX; + + dirty |= raw->fbc != wm; + raw->fbc = wm; + } + + /* mark watermarks as invalid */ + dirty |= g4x_raw_plane_wm_set(crtc_state, level, plane_id, USHRT_MAX); + + if (plane_id == PLANE_PRIMARY) + dirty |= g4x_raw_fbc_wm_set(crtc_state, level, USHRT_MAX); + + out: + if (dirty) { + drm_dbg_kms(&dev_priv->drm, + "%s watermarks: normal=%d, SR=%d, HPLL=%d\n", + plane->base.name, + crtc_state->wm.g4x.raw[G4X_WM_LEVEL_NORMAL].plane[plane_id], + crtc_state->wm.g4x.raw[G4X_WM_LEVEL_SR].plane[plane_id], + crtc_state->wm.g4x.raw[G4X_WM_LEVEL_HPLL].plane[plane_id]); + + if (plane_id == PLANE_PRIMARY) + drm_dbg_kms(&dev_priv->drm, + "FBC watermarks: SR=%d, HPLL=%d\n", + crtc_state->wm.g4x.raw[G4X_WM_LEVEL_SR].fbc, + crtc_state->wm.g4x.raw[G4X_WM_LEVEL_HPLL].fbc); + } + + return dirty; +} + +static bool g4x_raw_plane_wm_is_valid(const struct intel_crtc_state *crtc_state, + enum plane_id plane_id, int level) +{ + const struct g4x_pipe_wm *raw = &crtc_state->wm.g4x.raw[level]; + + return raw->plane[plane_id] <= g4x_plane_fifo_size(plane_id, level); +} + +static bool g4x_raw_crtc_wm_is_valid(const struct intel_crtc_state *crtc_state, + int level) +{ + struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev); + + if (level >= dev_priv->display.wm.num_levels) + return false; + + return g4x_raw_plane_wm_is_valid(crtc_state, PLANE_PRIMARY, level) && + g4x_raw_plane_wm_is_valid(crtc_state, PLANE_SPRITE0, level) && + g4x_raw_plane_wm_is_valid(crtc_state, PLANE_CURSOR, level); +} + +/* mark all levels starting from 'level' as invalid */ +static void g4x_invalidate_wms(struct intel_crtc *crtc, + struct g4x_wm_state *wm_state, int level) +{ + if (level <= G4X_WM_LEVEL_NORMAL) { + enum plane_id plane_id; + + for_each_plane_id_on_crtc(crtc, plane_id) + wm_state->wm.plane[plane_id] = USHRT_MAX; + } + + if (level <= G4X_WM_LEVEL_SR) { + wm_state->cxsr = false; + wm_state->sr.cursor = USHRT_MAX; + wm_state->sr.plane = USHRT_MAX; + wm_state->sr.fbc = USHRT_MAX; + } + + if (level <= G4X_WM_LEVEL_HPLL) { + wm_state->hpll_en = false; + wm_state->hpll.cursor = USHRT_MAX; + wm_state->hpll.plane = USHRT_MAX; + wm_state->hpll.fbc = USHRT_MAX; + } +} + +static bool g4x_compute_fbc_en(const struct g4x_wm_state *wm_state, + int level) +{ + if (level < G4X_WM_LEVEL_SR) + return false; + + if (level >= G4X_WM_LEVEL_SR && + wm_state->sr.fbc > g4x_fbc_fifo_size(G4X_WM_LEVEL_SR)) + return false; + + if (level >= G4X_WM_LEVEL_HPLL && + wm_state->hpll.fbc > g4x_fbc_fifo_size(G4X_WM_LEVEL_HPLL)) + return false; + + return true; +} + +static int _g4x_compute_pipe_wm(struct intel_crtc_state *crtc_state) +{ + struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); + struct g4x_wm_state *wm_state = &crtc_state->wm.g4x.optimal; + u8 active_planes = crtc_state->active_planes & ~BIT(PLANE_CURSOR); + const struct g4x_pipe_wm *raw; + enum plane_id plane_id; + int level; + + level = G4X_WM_LEVEL_NORMAL; + if (!g4x_raw_crtc_wm_is_valid(crtc_state, level)) + goto out; + + raw = &crtc_state->wm.g4x.raw[level]; + for_each_plane_id_on_crtc(crtc, plane_id) + wm_state->wm.plane[plane_id] = raw->plane[plane_id]; + + level = G4X_WM_LEVEL_SR; + if (!g4x_raw_crtc_wm_is_valid(crtc_state, level)) + goto out; + + raw = &crtc_state->wm.g4x.raw[level]; + wm_state->sr.plane = raw->plane[PLANE_PRIMARY]; + wm_state->sr.cursor = raw->plane[PLANE_CURSOR]; + wm_state->sr.fbc = raw->fbc; + + wm_state->cxsr = active_planes == BIT(PLANE_PRIMARY); + + level = G4X_WM_LEVEL_HPLL; + if (!g4x_raw_crtc_wm_is_valid(crtc_state, level)) + goto out; + + raw = &crtc_state->wm.g4x.raw[level]; + wm_state->hpll.plane = raw->plane[PLANE_PRIMARY]; + wm_state->hpll.cursor = raw->plane[PLANE_CURSOR]; + wm_state->hpll.fbc = raw->fbc; + + wm_state->hpll_en = wm_state->cxsr; + + level++; + + out: + if (level == G4X_WM_LEVEL_NORMAL) + return -EINVAL; + + /* invalidate the higher levels */ + g4x_invalidate_wms(crtc, wm_state, level); + + /* + * Determine if the FBC watermark(s) can be used. IF + * this isn't the case we prefer to disable the FBC + * watermark(s) rather than disable the SR/HPLL + * level(s) entirely. 'level-1' is the highest valid + * level here. + */ + wm_state->fbc_en = g4x_compute_fbc_en(wm_state, level - 1); + + return 0; +} + +static int g4x_compute_pipe_wm(struct intel_atomic_state *state, + struct intel_crtc *crtc) +{ + struct intel_crtc_state *crtc_state = + intel_atomic_get_new_crtc_state(state, crtc); + const struct intel_plane_state *old_plane_state; + const struct intel_plane_state *new_plane_state; + struct intel_plane *plane; + unsigned int dirty = 0; + int i; + + for_each_oldnew_intel_plane_in_state(state, plane, + old_plane_state, + new_plane_state, i) { + if (new_plane_state->hw.crtc != &crtc->base && + old_plane_state->hw.crtc != &crtc->base) + continue; + + if (g4x_raw_plane_wm_compute(crtc_state, new_plane_state)) + dirty |= BIT(plane->id); + } + + if (!dirty) + return 0; + + return _g4x_compute_pipe_wm(crtc_state); +} + +static int g4x_compute_intermediate_wm(struct intel_atomic_state *state, + struct intel_crtc *crtc) +{ + struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); + struct intel_crtc_state *new_crtc_state = + intel_atomic_get_new_crtc_state(state, crtc); + const struct intel_crtc_state *old_crtc_state = + intel_atomic_get_old_crtc_state(state, crtc); + struct g4x_wm_state *intermediate = &new_crtc_state->wm.g4x.intermediate; + const struct g4x_wm_state *optimal = &new_crtc_state->wm.g4x.optimal; + const struct g4x_wm_state *active = &old_crtc_state->wm.g4x.optimal; + enum plane_id plane_id; + + if (!new_crtc_state->hw.active || + intel_crtc_needs_modeset(new_crtc_state)) { + *intermediate = *optimal; + + intermediate->cxsr = false; + intermediate->hpll_en = false; + goto out; + } + + intermediate->cxsr = optimal->cxsr && active->cxsr && + !new_crtc_state->disable_cxsr; + intermediate->hpll_en = optimal->hpll_en && active->hpll_en && + !new_crtc_state->disable_cxsr; + intermediate->fbc_en = optimal->fbc_en && active->fbc_en; + + for_each_plane_id_on_crtc(crtc, plane_id) { + intermediate->wm.plane[plane_id] = + max(optimal->wm.plane[plane_id], + active->wm.plane[plane_id]); + + drm_WARN_ON(&dev_priv->drm, intermediate->wm.plane[plane_id] > + g4x_plane_fifo_size(plane_id, G4X_WM_LEVEL_NORMAL)); + } + + intermediate->sr.plane = max(optimal->sr.plane, + active->sr.plane); + intermediate->sr.cursor = max(optimal->sr.cursor, + active->sr.cursor); + intermediate->sr.fbc = max(optimal->sr.fbc, + active->sr.fbc); + + intermediate->hpll.plane = max(optimal->hpll.plane, + active->hpll.plane); + intermediate->hpll.cursor = max(optimal->hpll.cursor, + active->hpll.cursor); + intermediate->hpll.fbc = max(optimal->hpll.fbc, + active->hpll.fbc); + + drm_WARN_ON(&dev_priv->drm, + (intermediate->sr.plane > + g4x_plane_fifo_size(PLANE_PRIMARY, G4X_WM_LEVEL_SR) || + intermediate->sr.cursor > + g4x_plane_fifo_size(PLANE_CURSOR, G4X_WM_LEVEL_SR)) && + intermediate->cxsr); + drm_WARN_ON(&dev_priv->drm, + (intermediate->sr.plane > + g4x_plane_fifo_size(PLANE_PRIMARY, G4X_WM_LEVEL_HPLL) || + intermediate->sr.cursor > + g4x_plane_fifo_size(PLANE_CURSOR, G4X_WM_LEVEL_HPLL)) && + intermediate->hpll_en); + + drm_WARN_ON(&dev_priv->drm, + intermediate->sr.fbc > g4x_fbc_fifo_size(1) && + intermediate->fbc_en && intermediate->cxsr); + drm_WARN_ON(&dev_priv->drm, + intermediate->hpll.fbc > g4x_fbc_fifo_size(2) && + intermediate->fbc_en && intermediate->hpll_en); + +out: + /* + * If our intermediate WM are identical to the final WM, then we can + * omit the post-vblank programming; only update if it's different. + */ + if (memcmp(intermediate, optimal, sizeof(*intermediate)) != 0) + new_crtc_state->wm.need_postvbl_update = true; + + return 0; +} + +static void g4x_merge_wm(struct drm_i915_private *dev_priv, + struct g4x_wm_values *wm) +{ + struct intel_crtc *crtc; + int num_active_pipes = 0; + + wm->cxsr = true; + wm->hpll_en = true; + wm->fbc_en = true; + + for_each_intel_crtc(&dev_priv->drm, crtc) { + const struct g4x_wm_state *wm_state = &crtc->wm.active.g4x; + + if (!crtc->active) + continue; + + if (!wm_state->cxsr) + wm->cxsr = false; + if (!wm_state->hpll_en) + wm->hpll_en = false; + if (!wm_state->fbc_en) + wm->fbc_en = false; + + num_active_pipes++; + } + + if (num_active_pipes != 1) { + wm->cxsr = false; + wm->hpll_en = false; + wm->fbc_en = false; + } + + for_each_intel_crtc(&dev_priv->drm, crtc) { + const struct g4x_wm_state *wm_state = &crtc->wm.active.g4x; + enum pipe pipe = crtc->pipe; + + wm->pipe[pipe] = wm_state->wm; + if (crtc->active && wm->cxsr) + wm->sr = wm_state->sr; + if (crtc->active && wm->hpll_en) + wm->hpll = wm_state->hpll; + } +} + +static void g4x_program_watermarks(struct drm_i915_private *dev_priv) +{ + struct g4x_wm_values *old_wm = &dev_priv->display.wm.g4x; + struct g4x_wm_values new_wm = {}; + + g4x_merge_wm(dev_priv, &new_wm); + + if (memcmp(old_wm, &new_wm, sizeof(new_wm)) == 0) + return; + + if (is_disabling(old_wm->cxsr, new_wm.cxsr, true)) + _intel_set_memory_cxsr(dev_priv, false); + + g4x_write_wm_values(dev_priv, &new_wm); + + if (is_enabling(old_wm->cxsr, new_wm.cxsr, true)) + _intel_set_memory_cxsr(dev_priv, true); + + *old_wm = new_wm; +} + +static void g4x_initial_watermarks(struct intel_atomic_state *state, + struct intel_crtc *crtc) +{ + struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); + const struct intel_crtc_state *crtc_state = + intel_atomic_get_new_crtc_state(state, crtc); + + mutex_lock(&dev_priv->display.wm.wm_mutex); + crtc->wm.active.g4x = crtc_state->wm.g4x.intermediate; + g4x_program_watermarks(dev_priv); + mutex_unlock(&dev_priv->display.wm.wm_mutex); +} + +static void g4x_optimize_watermarks(struct intel_atomic_state *state, + struct intel_crtc *crtc) +{ + struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); + const struct intel_crtc_state *crtc_state = + intel_atomic_get_new_crtc_state(state, crtc); + + if (!crtc_state->wm.need_postvbl_update) + return; + + mutex_lock(&dev_priv->display.wm.wm_mutex); + crtc->wm.active.g4x = crtc_state->wm.g4x.optimal; + g4x_program_watermarks(dev_priv); + mutex_unlock(&dev_priv->display.wm.wm_mutex); +} + +/* latency must be in 0.1us units. */ +static unsigned int vlv_wm_method2(unsigned int pixel_rate, + unsigned int htotal, + unsigned int width, + unsigned int cpp, + unsigned int latency) +{ + unsigned int ret; + + ret = intel_wm_method2(pixel_rate, htotal, + width, cpp, latency); + ret = DIV_ROUND_UP(ret, 64); + + return ret; +} + +static void vlv_setup_wm_latency(struct drm_i915_private *dev_priv) +{ + /* all latencies in usec */ + dev_priv->display.wm.pri_latency[VLV_WM_LEVEL_PM2] = 3; + + dev_priv->display.wm.num_levels = VLV_WM_LEVEL_PM2 + 1; + + if (IS_CHERRYVIEW(dev_priv)) { + dev_priv->display.wm.pri_latency[VLV_WM_LEVEL_PM5] = 12; + dev_priv->display.wm.pri_latency[VLV_WM_LEVEL_DDR_DVFS] = 33; + + dev_priv->display.wm.num_levels = VLV_WM_LEVEL_DDR_DVFS + 1; + } +} + +static u16 vlv_compute_wm_level(const struct intel_crtc_state *crtc_state, + const struct intel_plane_state *plane_state, + int level) +{ + struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane); + struct drm_i915_private *dev_priv = to_i915(plane->base.dev); + const struct drm_display_mode *pipe_mode = + &crtc_state->hw.pipe_mode; + unsigned int pixel_rate, htotal, cpp, width, wm; + + if (dev_priv->display.wm.pri_latency[level] == 0) + return USHRT_MAX; + + if (!intel_wm_plane_visible(crtc_state, plane_state)) + return 0; + + cpp = plane_state->hw.fb->format->cpp[0]; + pixel_rate = crtc_state->pixel_rate; + htotal = pipe_mode->crtc_htotal; + width = drm_rect_width(&plane_state->uapi.src) >> 16; + + if (plane->id == PLANE_CURSOR) { + /* + * FIXME the formula gives values that are + * too big for the cursor FIFO, and hence we + * would never be able to use cursors. For + * now just hardcode the watermark. + */ + wm = 63; + } else { + wm = vlv_wm_method2(pixel_rate, htotal, width, cpp, + dev_priv->display.wm.pri_latency[level] * 10); + } + + return min_t(unsigned int, wm, USHRT_MAX); +} + +static bool vlv_need_sprite0_fifo_workaround(unsigned int active_planes) +{ + return (active_planes & (BIT(PLANE_SPRITE0) | + BIT(PLANE_SPRITE1))) == BIT(PLANE_SPRITE1); +} + +static int vlv_compute_fifo(struct intel_crtc_state *crtc_state) +{ + struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); + struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); + const struct g4x_pipe_wm *raw = + &crtc_state->wm.vlv.raw[VLV_WM_LEVEL_PM2]; + struct vlv_fifo_state *fifo_state = &crtc_state->wm.vlv.fifo_state; + u8 active_planes = crtc_state->active_planes & ~BIT(PLANE_CURSOR); + int num_active_planes = hweight8(active_planes); + const int fifo_size = 511; + int fifo_extra, fifo_left = fifo_size; + int sprite0_fifo_extra = 0; + unsigned int total_rate; + enum plane_id plane_id; + + /* + * When enabling sprite0 after sprite1 has already been enabled + * we tend to get an underrun unless sprite0 already has some + * FIFO space allcoated. Hence we always allocate at least one + * cacheline for sprite0 whenever sprite1 is enabled. + * + * All other plane enable sequences appear immune to this problem. + */ + if (vlv_need_sprite0_fifo_workaround(active_planes)) + sprite0_fifo_extra = 1; + + total_rate = raw->plane[PLANE_PRIMARY] + + raw->plane[PLANE_SPRITE0] + + raw->plane[PLANE_SPRITE1] + + sprite0_fifo_extra; + + if (total_rate > fifo_size) + return -EINVAL; + + if (total_rate == 0) + total_rate = 1; + + for_each_plane_id_on_crtc(crtc, plane_id) { + unsigned int rate; + + if ((active_planes & BIT(plane_id)) == 0) { + fifo_state->plane[plane_id] = 0; + continue; + } + + rate = raw->plane[plane_id]; + fifo_state->plane[plane_id] = fifo_size * rate / total_rate; + fifo_left -= fifo_state->plane[plane_id]; + } + + fifo_state->plane[PLANE_SPRITE0] += sprite0_fifo_extra; + fifo_left -= sprite0_fifo_extra; + + fifo_state->plane[PLANE_CURSOR] = 63; + + fifo_extra = DIV_ROUND_UP(fifo_left, num_active_planes ?: 1); + + /* spread the remainder evenly */ + for_each_plane_id_on_crtc(crtc, plane_id) { + int plane_extra; + + if (fifo_left == 0) + break; + + if ((active_planes & BIT(plane_id)) == 0) + continue; + + plane_extra = min(fifo_extra, fifo_left); + fifo_state->plane[plane_id] += plane_extra; + fifo_left -= plane_extra; + } + + drm_WARN_ON(&dev_priv->drm, active_planes != 0 && fifo_left != 0); + + /* give it all to the first plane if none are active */ + if (active_planes == 0) { + drm_WARN_ON(&dev_priv->drm, fifo_left != fifo_size); + fifo_state->plane[PLANE_PRIMARY] = fifo_left; + } + + return 0; +} + +/* mark all levels starting from 'level' as invalid */ +static void vlv_invalidate_wms(struct intel_crtc *crtc, + struct vlv_wm_state *wm_state, int level) +{ + struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); + + for (; level < dev_priv->display.wm.num_levels; level++) { + enum plane_id plane_id; + + for_each_plane_id_on_crtc(crtc, plane_id) + wm_state->wm[level].plane[plane_id] = USHRT_MAX; + + wm_state->sr[level].cursor = USHRT_MAX; + wm_state->sr[level].plane = USHRT_MAX; + } +} + +static u16 vlv_invert_wm_value(u16 wm, u16 fifo_size) +{ + if (wm > fifo_size) + return USHRT_MAX; + else + return fifo_size - wm; +} + +/* + * Starting from 'level' set all higher + * levels to 'value' in the "raw" watermarks. + */ +static bool vlv_raw_plane_wm_set(struct intel_crtc_state *crtc_state, + int level, enum plane_id plane_id, u16 value) +{ + struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev); + bool dirty = false; + + for (; level < dev_priv->display.wm.num_levels; level++) { + struct g4x_pipe_wm *raw = &crtc_state->wm.vlv.raw[level]; + + dirty |= raw->plane[plane_id] != value; + raw->plane[plane_id] = value; + } + + return dirty; +} + +static bool vlv_raw_plane_wm_compute(struct intel_crtc_state *crtc_state, + const struct intel_plane_state *plane_state) +{ + struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane); + struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev); + enum plane_id plane_id = plane->id; + int level; + bool dirty = false; + + if (!intel_wm_plane_visible(crtc_state, plane_state)) { + dirty |= vlv_raw_plane_wm_set(crtc_state, 0, plane_id, 0); + goto out; + } + + for (level = 0; level < dev_priv->display.wm.num_levels; level++) { + struct g4x_pipe_wm *raw = &crtc_state->wm.vlv.raw[level]; + int wm = vlv_compute_wm_level(crtc_state, plane_state, level); + int max_wm = plane_id == PLANE_CURSOR ? 63 : 511; + + if (wm > max_wm) + break; + + dirty |= raw->plane[plane_id] != wm; + raw->plane[plane_id] = wm; + } + + /* mark all higher levels as invalid */ + dirty |= vlv_raw_plane_wm_set(crtc_state, level, plane_id, USHRT_MAX); + +out: + if (dirty) + drm_dbg_kms(&dev_priv->drm, + "%s watermarks: PM2=%d, PM5=%d, DDR DVFS=%d\n", + plane->base.name, + crtc_state->wm.vlv.raw[VLV_WM_LEVEL_PM2].plane[plane_id], + crtc_state->wm.vlv.raw[VLV_WM_LEVEL_PM5].plane[plane_id], + crtc_state->wm.vlv.raw[VLV_WM_LEVEL_DDR_DVFS].plane[plane_id]); + + return dirty; +} + +static bool vlv_raw_plane_wm_is_valid(const struct intel_crtc_state *crtc_state, + enum plane_id plane_id, int level) +{ + const struct g4x_pipe_wm *raw = + &crtc_state->wm.vlv.raw[level]; + const struct vlv_fifo_state *fifo_state = + &crtc_state->wm.vlv.fifo_state; + + return raw->plane[plane_id] <= fifo_state->plane[plane_id]; +} + +static bool vlv_raw_crtc_wm_is_valid(const struct intel_crtc_state *crtc_state, int level) +{ + return vlv_raw_plane_wm_is_valid(crtc_state, PLANE_PRIMARY, level) && + vlv_raw_plane_wm_is_valid(crtc_state, PLANE_SPRITE0, level) && + vlv_raw_plane_wm_is_valid(crtc_state, PLANE_SPRITE1, level) && + vlv_raw_plane_wm_is_valid(crtc_state, PLANE_CURSOR, level); +} + +static int _vlv_compute_pipe_wm(struct intel_crtc_state *crtc_state) +{ + struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); + struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); + struct vlv_wm_state *wm_state = &crtc_state->wm.vlv.optimal; + const struct vlv_fifo_state *fifo_state = + &crtc_state->wm.vlv.fifo_state; + u8 active_planes = crtc_state->active_planes & ~BIT(PLANE_CURSOR); + int num_active_planes = hweight8(active_planes); + enum plane_id plane_id; + int level; + + /* initially allow all levels */ + wm_state->num_levels = dev_priv->display.wm.num_levels; + /* + * Note that enabling cxsr with no primary/sprite planes + * enabled can wedge the pipe. Hence we only allow cxsr + * with exactly one enabled primary/sprite plane. + */ + wm_state->cxsr = crtc->pipe != PIPE_C && num_active_planes == 1; + + for (level = 0; level < wm_state->num_levels; level++) { + const struct g4x_pipe_wm *raw = &crtc_state->wm.vlv.raw[level]; + const int sr_fifo_size = INTEL_NUM_PIPES(dev_priv) * 512 - 1; + + if (!vlv_raw_crtc_wm_is_valid(crtc_state, level)) + break; + + for_each_plane_id_on_crtc(crtc, plane_id) { + wm_state->wm[level].plane[plane_id] = + vlv_invert_wm_value(raw->plane[plane_id], + fifo_state->plane[plane_id]); + } + + wm_state->sr[level].plane = + vlv_invert_wm_value(max3(raw->plane[PLANE_PRIMARY], + raw->plane[PLANE_SPRITE0], + raw->plane[PLANE_SPRITE1]), + sr_fifo_size); + + wm_state->sr[level].cursor = + vlv_invert_wm_value(raw->plane[PLANE_CURSOR], + 63); + } + + if (level == 0) + return -EINVAL; + + /* limit to only levels we can actually handle */ + wm_state->num_levels = level; + + /* invalidate the higher levels */ + vlv_invalidate_wms(crtc, wm_state, level); + + return 0; +} + +static int vlv_compute_pipe_wm(struct intel_atomic_state *state, + struct intel_crtc *crtc) +{ + struct intel_crtc_state *crtc_state = + intel_atomic_get_new_crtc_state(state, crtc); + const struct intel_plane_state *old_plane_state; + const struct intel_plane_state *new_plane_state; + struct intel_plane *plane; + unsigned int dirty = 0; + int i; + + for_each_oldnew_intel_plane_in_state(state, plane, + old_plane_state, + new_plane_state, i) { + if (new_plane_state->hw.crtc != &crtc->base && + old_plane_state->hw.crtc != &crtc->base) + continue; + + if (vlv_raw_plane_wm_compute(crtc_state, new_plane_state)) + dirty |= BIT(plane->id); + } + + /* + * DSPARB registers may have been reset due to the + * power well being turned off. Make sure we restore + * them to a consistent state even if no primary/sprite + * planes are initially active. We also force a FIFO + * recomputation so that we are sure to sanitize the + * FIFO setting we took over from the BIOS even if there + * are no active planes on the crtc. + */ + if (intel_crtc_needs_modeset(crtc_state)) + dirty = ~0; + + if (!dirty) + return 0; + + /* cursor changes don't warrant a FIFO recompute */ + if (dirty & ~BIT(PLANE_CURSOR)) { + const struct intel_crtc_state *old_crtc_state = + intel_atomic_get_old_crtc_state(state, crtc); + const struct vlv_fifo_state *old_fifo_state = + &old_crtc_state->wm.vlv.fifo_state; + const struct vlv_fifo_state *new_fifo_state = + &crtc_state->wm.vlv.fifo_state; + int ret; + + ret = vlv_compute_fifo(crtc_state); + if (ret) + return ret; + + if (intel_crtc_needs_modeset(crtc_state) || + memcmp(old_fifo_state, new_fifo_state, + sizeof(*new_fifo_state)) != 0) + crtc_state->fifo_changed = true; + } + + return _vlv_compute_pipe_wm(crtc_state); +} + +#define VLV_FIFO(plane, value) \ + (((value) << DSPARB_ ## plane ## _SHIFT_VLV) & DSPARB_ ## plane ## _MASK_VLV) + +static void vlv_atomic_update_fifo(struct intel_atomic_state *state, + struct intel_crtc *crtc) +{ + struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); + struct intel_uncore *uncore = &dev_priv->uncore; + const struct intel_crtc_state *crtc_state = + intel_atomic_get_new_crtc_state(state, crtc); + const struct vlv_fifo_state *fifo_state = + &crtc_state->wm.vlv.fifo_state; + int sprite0_start, sprite1_start, fifo_size; + u32 dsparb, dsparb2, dsparb3; + + if (!crtc_state->fifo_changed) + return; + + sprite0_start = fifo_state->plane[PLANE_PRIMARY]; + sprite1_start = fifo_state->plane[PLANE_SPRITE0] + sprite0_start; + fifo_size = fifo_state->plane[PLANE_SPRITE1] + sprite1_start; + + drm_WARN_ON(&dev_priv->drm, fifo_state->plane[PLANE_CURSOR] != 63); + drm_WARN_ON(&dev_priv->drm, fifo_size != 511); + + trace_vlv_fifo_size(crtc, sprite0_start, sprite1_start, fifo_size); + + /* + * uncore.lock serves a double purpose here. It allows us to + * use the less expensive I915_{READ,WRITE}_FW() functions, and + * it protects the DSPARB registers from getting clobbered by + * parallel updates from multiple pipes. + * + * intel_pipe_update_start() has already disabled interrupts + * for us, so a plain spin_lock() is sufficient here. + */ + spin_lock(&uncore->lock); + + switch (crtc->pipe) { + case PIPE_A: + dsparb = intel_uncore_read_fw(uncore, DSPARB); + dsparb2 = intel_uncore_read_fw(uncore, DSPARB2); + + dsparb &= ~(VLV_FIFO(SPRITEA, 0xff) | + VLV_FIFO(SPRITEB, 0xff)); + dsparb |= (VLV_FIFO(SPRITEA, sprite0_start) | + VLV_FIFO(SPRITEB, sprite1_start)); + + dsparb2 &= ~(VLV_FIFO(SPRITEA_HI, 0x1) | + VLV_FIFO(SPRITEB_HI, 0x1)); + dsparb2 |= (VLV_FIFO(SPRITEA_HI, sprite0_start >> 8) | + VLV_FIFO(SPRITEB_HI, sprite1_start >> 8)); + + intel_uncore_write_fw(uncore, DSPARB, dsparb); + intel_uncore_write_fw(uncore, DSPARB2, dsparb2); + break; + case PIPE_B: + dsparb = intel_uncore_read_fw(uncore, DSPARB); + dsparb2 = intel_uncore_read_fw(uncore, DSPARB2); + + dsparb &= ~(VLV_FIFO(SPRITEC, 0xff) | + VLV_FIFO(SPRITED, 0xff)); + dsparb |= (VLV_FIFO(SPRITEC, sprite0_start) | + VLV_FIFO(SPRITED, sprite1_start)); + + dsparb2 &= ~(VLV_FIFO(SPRITEC_HI, 0xff) | + VLV_FIFO(SPRITED_HI, 0xff)); + dsparb2 |= (VLV_FIFO(SPRITEC_HI, sprite0_start >> 8) | + VLV_FIFO(SPRITED_HI, sprite1_start >> 8)); + + intel_uncore_write_fw(uncore, DSPARB, dsparb); + intel_uncore_write_fw(uncore, DSPARB2, dsparb2); + break; + case PIPE_C: + dsparb3 = intel_uncore_read_fw(uncore, DSPARB3); + dsparb2 = intel_uncore_read_fw(uncore, DSPARB2); + + dsparb3 &= ~(VLV_FIFO(SPRITEE, 0xff) | + VLV_FIFO(SPRITEF, 0xff)); + dsparb3 |= (VLV_FIFO(SPRITEE, sprite0_start) | + VLV_FIFO(SPRITEF, sprite1_start)); + + dsparb2 &= ~(VLV_FIFO(SPRITEE_HI, 0xff) | + VLV_FIFO(SPRITEF_HI, 0xff)); + dsparb2 |= (VLV_FIFO(SPRITEE_HI, sprite0_start >> 8) | + VLV_FIFO(SPRITEF_HI, sprite1_start >> 8)); + + intel_uncore_write_fw(uncore, DSPARB3, dsparb3); + intel_uncore_write_fw(uncore, DSPARB2, dsparb2); + break; + default: + break; + } + + intel_uncore_posting_read_fw(uncore, DSPARB); + + spin_unlock(&uncore->lock); +} + +#undef VLV_FIFO + +static int vlv_compute_intermediate_wm(struct intel_atomic_state *state, + struct intel_crtc *crtc) +{ + struct intel_crtc_state *new_crtc_state = + intel_atomic_get_new_crtc_state(state, crtc); + const struct intel_crtc_state *old_crtc_state = + intel_atomic_get_old_crtc_state(state, crtc); + struct vlv_wm_state *intermediate = &new_crtc_state->wm.vlv.intermediate; + const struct vlv_wm_state *optimal = &new_crtc_state->wm.vlv.optimal; + const struct vlv_wm_state *active = &old_crtc_state->wm.vlv.optimal; + int level; + + if (!new_crtc_state->hw.active || + intel_crtc_needs_modeset(new_crtc_state)) { + *intermediate = *optimal; + + intermediate->cxsr = false; + goto out; + } + + intermediate->num_levels = min(optimal->num_levels, active->num_levels); + intermediate->cxsr = optimal->cxsr && active->cxsr && + !new_crtc_state->disable_cxsr; + + for (level = 0; level < intermediate->num_levels; level++) { + enum plane_id plane_id; + + for_each_plane_id_on_crtc(crtc, plane_id) { + intermediate->wm[level].plane[plane_id] = + min(optimal->wm[level].plane[plane_id], + active->wm[level].plane[plane_id]); + } + + intermediate->sr[level].plane = min(optimal->sr[level].plane, + active->sr[level].plane); + intermediate->sr[level].cursor = min(optimal->sr[level].cursor, + active->sr[level].cursor); + } + + vlv_invalidate_wms(crtc, intermediate, level); + +out: + /* + * If our intermediate WM are identical to the final WM, then we can + * omit the post-vblank programming; only update if it's different. + */ + if (memcmp(intermediate, optimal, sizeof(*intermediate)) != 0) + new_crtc_state->wm.need_postvbl_update = true; + + return 0; +} + +static void vlv_merge_wm(struct drm_i915_private *dev_priv, + struct vlv_wm_values *wm) +{ + struct intel_crtc *crtc; + int num_active_pipes = 0; + + wm->level = dev_priv->display.wm.num_levels - 1; + wm->cxsr = true; + + for_each_intel_crtc(&dev_priv->drm, crtc) { + const struct vlv_wm_state *wm_state = &crtc->wm.active.vlv; + + if (!crtc->active) + continue; + + if (!wm_state->cxsr) + wm->cxsr = false; + + num_active_pipes++; + wm->level = min_t(int, wm->level, wm_state->num_levels - 1); + } + + if (num_active_pipes != 1) + wm->cxsr = false; + + if (num_active_pipes > 1) + wm->level = VLV_WM_LEVEL_PM2; + + for_each_intel_crtc(&dev_priv->drm, crtc) { + const struct vlv_wm_state *wm_state = &crtc->wm.active.vlv; + enum pipe pipe = crtc->pipe; + + wm->pipe[pipe] = wm_state->wm[wm->level]; + if (crtc->active && wm->cxsr) + wm->sr = wm_state->sr[wm->level]; + + wm->ddl[pipe].plane[PLANE_PRIMARY] = DDL_PRECISION_HIGH | 2; + wm->ddl[pipe].plane[PLANE_SPRITE0] = DDL_PRECISION_HIGH | 2; + wm->ddl[pipe].plane[PLANE_SPRITE1] = DDL_PRECISION_HIGH | 2; + wm->ddl[pipe].plane[PLANE_CURSOR] = DDL_PRECISION_HIGH | 2; + } +} + +static void vlv_program_watermarks(struct drm_i915_private *dev_priv) +{ + struct vlv_wm_values *old_wm = &dev_priv->display.wm.vlv; + struct vlv_wm_values new_wm = {}; + + vlv_merge_wm(dev_priv, &new_wm); + + if (memcmp(old_wm, &new_wm, sizeof(new_wm)) == 0) + return; + + if (is_disabling(old_wm->level, new_wm.level, VLV_WM_LEVEL_DDR_DVFS)) + chv_set_memory_dvfs(dev_priv, false); + + if (is_disabling(old_wm->level, new_wm.level, VLV_WM_LEVEL_PM5)) + chv_set_memory_pm5(dev_priv, false); + + if (is_disabling(old_wm->cxsr, new_wm.cxsr, true)) + _intel_set_memory_cxsr(dev_priv, false); + + vlv_write_wm_values(dev_priv, &new_wm); + + if (is_enabling(old_wm->cxsr, new_wm.cxsr, true)) + _intel_set_memory_cxsr(dev_priv, true); + + if (is_enabling(old_wm->level, new_wm.level, VLV_WM_LEVEL_PM5)) + chv_set_memory_pm5(dev_priv, true); + + if (is_enabling(old_wm->level, new_wm.level, VLV_WM_LEVEL_DDR_DVFS)) + chv_set_memory_dvfs(dev_priv, true); + + *old_wm = new_wm; +} + +static void vlv_initial_watermarks(struct intel_atomic_state *state, + struct intel_crtc *crtc) +{ + struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); + const struct intel_crtc_state *crtc_state = + intel_atomic_get_new_crtc_state(state, crtc); + + mutex_lock(&dev_priv->display.wm.wm_mutex); + crtc->wm.active.vlv = crtc_state->wm.vlv.intermediate; + vlv_program_watermarks(dev_priv); + mutex_unlock(&dev_priv->display.wm.wm_mutex); +} + +static void vlv_optimize_watermarks(struct intel_atomic_state *state, + struct intel_crtc *crtc) +{ + struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); + const struct intel_crtc_state *crtc_state = + intel_atomic_get_new_crtc_state(state, crtc); + + if (!crtc_state->wm.need_postvbl_update) + return; + + mutex_lock(&dev_priv->display.wm.wm_mutex); + crtc->wm.active.vlv = crtc_state->wm.vlv.optimal; + vlv_program_watermarks(dev_priv); + mutex_unlock(&dev_priv->display.wm.wm_mutex); +} + +static void i965_update_wm(struct drm_i915_private *dev_priv) +{ + struct intel_crtc *crtc; + int srwm = 1; + int cursor_sr = 16; + bool cxsr_enabled; + + /* Calc sr entries for one plane configs */ + crtc = single_enabled_crtc(dev_priv); + if (crtc) { + /* self-refresh has much higher latency */ + static const int sr_latency_ns = 12000; + const struct drm_display_mode *pipe_mode = + &crtc->config->hw.pipe_mode; + const struct drm_framebuffer *fb = + crtc->base.primary->state->fb; + int pixel_rate = crtc->config->pixel_rate; + int htotal = pipe_mode->crtc_htotal; + int width = drm_rect_width(&crtc->base.primary->state->src) >> 16; + int cpp = fb->format->cpp[0]; + int entries; + + entries = intel_wm_method2(pixel_rate, htotal, + width, cpp, sr_latency_ns / 100); + entries = DIV_ROUND_UP(entries, I915_FIFO_LINE_SIZE); + srwm = I965_FIFO_SIZE - entries; + if (srwm < 0) + srwm = 1; + srwm &= 0x1ff; + drm_dbg_kms(&dev_priv->drm, + "self-refresh entries: %d, wm: %d\n", + entries, srwm); + + entries = intel_wm_method2(pixel_rate, htotal, + crtc->base.cursor->state->crtc_w, 4, + sr_latency_ns / 100); + entries = DIV_ROUND_UP(entries, + i965_cursor_wm_info.cacheline_size) + + i965_cursor_wm_info.guard_size; + + cursor_sr = i965_cursor_wm_info.fifo_size - entries; + if (cursor_sr > i965_cursor_wm_info.max_wm) + cursor_sr = i965_cursor_wm_info.max_wm; + + drm_dbg_kms(&dev_priv->drm, + "self-refresh watermark: display plane %d " + "cursor %d\n", srwm, cursor_sr); + + cxsr_enabled = true; + } else { + cxsr_enabled = false; + /* Turn off self refresh if both pipes are enabled */ + intel_set_memory_cxsr(dev_priv, false); + } + + drm_dbg_kms(&dev_priv->drm, + "Setting FIFO watermarks - A: 8, B: 8, C: 8, SR %d\n", + srwm); + + /* 965 has limitations... */ + intel_uncore_write(&dev_priv->uncore, DSPFW1, FW_WM(srwm, SR) | + FW_WM(8, CURSORB) | + FW_WM(8, PLANEB) | + FW_WM(8, PLANEA)); + intel_uncore_write(&dev_priv->uncore, DSPFW2, FW_WM(8, CURSORA) | + FW_WM(8, PLANEC_OLD)); + /* update cursor SR watermark */ + intel_uncore_write(&dev_priv->uncore, DSPFW3, FW_WM(cursor_sr, CURSOR_SR)); + + if (cxsr_enabled) + intel_set_memory_cxsr(dev_priv, true); +} + +#undef FW_WM + +static struct intel_crtc *intel_crtc_for_plane(struct drm_i915_private *i915, + enum i9xx_plane_id i9xx_plane) +{ + struct intel_plane *plane; + + for_each_intel_plane(&i915->drm, plane) { + if (plane->id == PLANE_PRIMARY && + plane->i9xx_plane == i9xx_plane) + return intel_crtc_for_pipe(i915, plane->pipe); + } + + return NULL; +} + +static void i9xx_update_wm(struct drm_i915_private *dev_priv) +{ + const struct intel_watermark_params *wm_info; + u32 fwater_lo; + u32 fwater_hi; + int cwm, srwm = 1; + int fifo_size; + int planea_wm, planeb_wm; + struct intel_crtc *crtc; + + if (IS_I945GM(dev_priv)) + wm_info = &i945_wm_info; + else if (DISPLAY_VER(dev_priv) != 2) + wm_info = &i915_wm_info; + else + wm_info = &i830_a_wm_info; + + if (DISPLAY_VER(dev_priv) == 2) + fifo_size = i830_get_fifo_size(dev_priv, PLANE_A); + else + fifo_size = i9xx_get_fifo_size(dev_priv, PLANE_A); + crtc = intel_crtc_for_plane(dev_priv, PLANE_A); + if (intel_crtc_active(crtc)) { + const struct drm_framebuffer *fb = + crtc->base.primary->state->fb; + int cpp; + + if (DISPLAY_VER(dev_priv) == 2) + cpp = 4; + else + cpp = fb->format->cpp[0]; + + planea_wm = intel_calculate_wm(crtc->config->pixel_rate, + wm_info, fifo_size, cpp, + pessimal_latency_ns); + } else { + planea_wm = fifo_size - wm_info->guard_size; + if (planea_wm > (long)wm_info->max_wm) + planea_wm = wm_info->max_wm; + } + + if (DISPLAY_VER(dev_priv) == 2) + wm_info = &i830_bc_wm_info; + + if (DISPLAY_VER(dev_priv) == 2) + fifo_size = i830_get_fifo_size(dev_priv, PLANE_B); + else + fifo_size = i9xx_get_fifo_size(dev_priv, PLANE_B); + crtc = intel_crtc_for_plane(dev_priv, PLANE_B); + if (intel_crtc_active(crtc)) { + const struct drm_framebuffer *fb = + crtc->base.primary->state->fb; + int cpp; + + if (DISPLAY_VER(dev_priv) == 2) + cpp = 4; + else + cpp = fb->format->cpp[0]; + + planeb_wm = intel_calculate_wm(crtc->config->pixel_rate, + wm_info, fifo_size, cpp, + pessimal_latency_ns); + } else { + planeb_wm = fifo_size - wm_info->guard_size; + if (planeb_wm > (long)wm_info->max_wm) + planeb_wm = wm_info->max_wm; + } + + drm_dbg_kms(&dev_priv->drm, + "FIFO watermarks - A: %d, B: %d\n", planea_wm, planeb_wm); + + crtc = single_enabled_crtc(dev_priv); + if (IS_I915GM(dev_priv) && crtc) { + struct drm_i915_gem_object *obj; + + obj = intel_fb_obj(crtc->base.primary->state->fb); + + /* self-refresh seems busted with untiled */ + if (!i915_gem_object_is_tiled(obj)) + crtc = NULL; + } + + /* + * Overlay gets an aggressive default since video jitter is bad. + */ + cwm = 2; + + /* Play safe and disable self-refresh before adjusting watermarks. */ + intel_set_memory_cxsr(dev_priv, false); + + /* Calc sr entries for one plane configs */ + if (HAS_FW_BLC(dev_priv) && crtc) { + /* self-refresh has much higher latency */ + static const int sr_latency_ns = 6000; + const struct drm_display_mode *pipe_mode = + &crtc->config->hw.pipe_mode; + const struct drm_framebuffer *fb = + crtc->base.primary->state->fb; + int pixel_rate = crtc->config->pixel_rate; + int htotal = pipe_mode->crtc_htotal; + int width = drm_rect_width(&crtc->base.primary->state->src) >> 16; + int cpp; + int entries; + + if (IS_I915GM(dev_priv) || IS_I945GM(dev_priv)) + cpp = 4; + else + cpp = fb->format->cpp[0]; + + entries = intel_wm_method2(pixel_rate, htotal, width, cpp, + sr_latency_ns / 100); + entries = DIV_ROUND_UP(entries, wm_info->cacheline_size); + drm_dbg_kms(&dev_priv->drm, + "self-refresh entries: %d\n", entries); + srwm = wm_info->fifo_size - entries; + if (srwm < 0) + srwm = 1; + + if (IS_I945G(dev_priv) || IS_I945GM(dev_priv)) + intel_uncore_write(&dev_priv->uncore, FW_BLC_SELF, + FW_BLC_SELF_FIFO_MASK | (srwm & 0xff)); + else + intel_uncore_write(&dev_priv->uncore, FW_BLC_SELF, srwm & 0x3f); + } + + drm_dbg_kms(&dev_priv->drm, + "Setting FIFO watermarks - A: %d, B: %d, C: %d, SR %d\n", + planea_wm, planeb_wm, cwm, srwm); + + fwater_lo = ((planeb_wm & 0x3f) << 16) | (planea_wm & 0x3f); + fwater_hi = (cwm & 0x1f); + + /* Set request length to 8 cachelines per fetch */ + fwater_lo = fwater_lo | (1 << 24) | (1 << 8); + fwater_hi = fwater_hi | (1 << 8); + + intel_uncore_write(&dev_priv->uncore, FW_BLC, fwater_lo); + intel_uncore_write(&dev_priv->uncore, FW_BLC2, fwater_hi); + + if (crtc) + intel_set_memory_cxsr(dev_priv, true); +} + +static void i845_update_wm(struct drm_i915_private *dev_priv) +{ + struct intel_crtc *crtc; + u32 fwater_lo; + int planea_wm; + + crtc = single_enabled_crtc(dev_priv); + if (crtc == NULL) + return; + + planea_wm = intel_calculate_wm(crtc->config->pixel_rate, + &i845_wm_info, + i845_get_fifo_size(dev_priv, PLANE_A), + 4, pessimal_latency_ns); + fwater_lo = intel_uncore_read(&dev_priv->uncore, FW_BLC) & ~0xfff; + fwater_lo |= (3<<8) | planea_wm; + + drm_dbg_kms(&dev_priv->drm, + "Setting FIFO watermarks - A: %d\n", planea_wm); + + intel_uncore_write(&dev_priv->uncore, FW_BLC, fwater_lo); +} + +/* latency must be in 0.1us units. */ +static unsigned int ilk_wm_method1(unsigned int pixel_rate, + unsigned int cpp, + unsigned int latency) +{ + unsigned int ret; + + ret = intel_wm_method1(pixel_rate, cpp, latency); + ret = DIV_ROUND_UP(ret, 64) + 2; + + return ret; +} + +/* latency must be in 0.1us units. */ +static unsigned int ilk_wm_method2(unsigned int pixel_rate, + unsigned int htotal, + unsigned int width, + unsigned int cpp, + unsigned int latency) +{ + unsigned int ret; + + ret = intel_wm_method2(pixel_rate, htotal, + width, cpp, latency); + ret = DIV_ROUND_UP(ret, 64) + 2; + + return ret; +} + +static u32 ilk_wm_fbc(u32 pri_val, u32 horiz_pixels, u8 cpp) +{ + /* + * Neither of these should be possible since this function shouldn't be + * called if the CRTC is off or the plane is invisible. But let's be + * extra paranoid to avoid a potential divide-by-zero if we screw up + * elsewhere in the driver. + */ + if (WARN_ON(!cpp)) + return 0; + if (WARN_ON(!horiz_pixels)) + return 0; + + return DIV_ROUND_UP(pri_val * 64, horiz_pixels * cpp) + 2; +} + +struct ilk_wm_maximums { + u16 pri; + u16 spr; + u16 cur; + u16 fbc; +}; + +/* + * For both WM_PIPE and WM_LP. + * mem_value must be in 0.1us units. + */ +static u32 ilk_compute_pri_wm(const struct intel_crtc_state *crtc_state, + const struct intel_plane_state *plane_state, + u32 mem_value, bool is_lp) +{ + u32 method1, method2; + int cpp; + + if (mem_value == 0) + return U32_MAX; + + if (!intel_wm_plane_visible(crtc_state, plane_state)) + return 0; + + cpp = plane_state->hw.fb->format->cpp[0]; + + method1 = ilk_wm_method1(crtc_state->pixel_rate, cpp, mem_value); + + if (!is_lp) + return method1; + + method2 = ilk_wm_method2(crtc_state->pixel_rate, + crtc_state->hw.pipe_mode.crtc_htotal, + drm_rect_width(&plane_state->uapi.src) >> 16, + cpp, mem_value); + + return min(method1, method2); +} + +/* + * For both WM_PIPE and WM_LP. + * mem_value must be in 0.1us units. + */ +static u32 ilk_compute_spr_wm(const struct intel_crtc_state *crtc_state, + const struct intel_plane_state *plane_state, + u32 mem_value) +{ + u32 method1, method2; + int cpp; + + if (mem_value == 0) + return U32_MAX; + + if (!intel_wm_plane_visible(crtc_state, plane_state)) + return 0; + + cpp = plane_state->hw.fb->format->cpp[0]; + + method1 = ilk_wm_method1(crtc_state->pixel_rate, cpp, mem_value); + method2 = ilk_wm_method2(crtc_state->pixel_rate, + crtc_state->hw.pipe_mode.crtc_htotal, + drm_rect_width(&plane_state->uapi.src) >> 16, + cpp, mem_value); + return min(method1, method2); +} + +/* + * For both WM_PIPE and WM_LP. + * mem_value must be in 0.1us units. + */ +static u32 ilk_compute_cur_wm(const struct intel_crtc_state *crtc_state, + const struct intel_plane_state *plane_state, + u32 mem_value) +{ + int cpp; + + if (mem_value == 0) + return U32_MAX; + + if (!intel_wm_plane_visible(crtc_state, plane_state)) + return 0; + + cpp = plane_state->hw.fb->format->cpp[0]; + + return ilk_wm_method2(crtc_state->pixel_rate, + crtc_state->hw.pipe_mode.crtc_htotal, + drm_rect_width(&plane_state->uapi.src) >> 16, + cpp, mem_value); +} + +/* Only for WM_LP. */ +static u32 ilk_compute_fbc_wm(const struct intel_crtc_state *crtc_state, + const struct intel_plane_state *plane_state, + u32 pri_val) +{ + int cpp; + + if (!intel_wm_plane_visible(crtc_state, plane_state)) + return 0; + + cpp = plane_state->hw.fb->format->cpp[0]; + + return ilk_wm_fbc(pri_val, drm_rect_width(&plane_state->uapi.src) >> 16, + cpp); +} + +static unsigned int +ilk_display_fifo_size(const struct drm_i915_private *dev_priv) +{ + if (DISPLAY_VER(dev_priv) >= 8) + return 3072; + else if (DISPLAY_VER(dev_priv) >= 7) + return 768; + else + return 512; +} + +static unsigned int +ilk_plane_wm_reg_max(const struct drm_i915_private *dev_priv, + int level, bool is_sprite) +{ + if (DISPLAY_VER(dev_priv) >= 8) + /* BDW primary/sprite plane watermarks */ + return level == 0 ? 255 : 2047; + else if (DISPLAY_VER(dev_priv) >= 7) + /* IVB/HSW primary/sprite plane watermarks */ + return level == 0 ? 127 : 1023; + else if (!is_sprite) + /* ILK/SNB primary plane watermarks */ + return level == 0 ? 127 : 511; + else + /* ILK/SNB sprite plane watermarks */ + return level == 0 ? 63 : 255; +} + +static unsigned int +ilk_cursor_wm_reg_max(const struct drm_i915_private *dev_priv, int level) +{ + if (DISPLAY_VER(dev_priv) >= 7) + return level == 0 ? 63 : 255; + else + return level == 0 ? 31 : 63; +} + +static unsigned int ilk_fbc_wm_reg_max(const struct drm_i915_private *dev_priv) +{ + if (DISPLAY_VER(dev_priv) >= 8) + return 31; + else + return 15; +} + +/* Calculate the maximum primary/sprite plane watermark */ +static unsigned int ilk_plane_wm_max(const struct drm_i915_private *dev_priv, + int level, + const struct intel_wm_config *config, + enum intel_ddb_partitioning ddb_partitioning, + bool is_sprite) +{ + unsigned int fifo_size = ilk_display_fifo_size(dev_priv); + + /* if sprites aren't enabled, sprites get nothing */ + if (is_sprite && !config->sprites_enabled) + return 0; + + /* HSW allows LP1+ watermarks even with multiple pipes */ + if (level == 0 || config->num_pipes_active > 1) { + fifo_size /= INTEL_NUM_PIPES(dev_priv); + + /* + * For some reason the non self refresh + * FIFO size is only half of the self + * refresh FIFO size on ILK/SNB. + */ + if (DISPLAY_VER(dev_priv) <= 6) + fifo_size /= 2; + } + + if (config->sprites_enabled) { + /* level 0 is always calculated with 1:1 split */ + if (level > 0 && ddb_partitioning == INTEL_DDB_PART_5_6) { + if (is_sprite) + fifo_size *= 5; + fifo_size /= 6; + } else { + fifo_size /= 2; + } + } + + /* clamp to max that the registers can hold */ + return min(fifo_size, ilk_plane_wm_reg_max(dev_priv, level, is_sprite)); +} + +/* Calculate the maximum cursor plane watermark */ +static unsigned int ilk_cursor_wm_max(const struct drm_i915_private *dev_priv, + int level, + const struct intel_wm_config *config) +{ + /* HSW LP1+ watermarks w/ multiple pipes */ + if (level > 0 && config->num_pipes_active > 1) + return 64; + + /* otherwise just report max that registers can hold */ + return ilk_cursor_wm_reg_max(dev_priv, level); +} + +static void ilk_compute_wm_maximums(const struct drm_i915_private *dev_priv, + int level, + const struct intel_wm_config *config, + enum intel_ddb_partitioning ddb_partitioning, + struct ilk_wm_maximums *max) +{ + max->pri = ilk_plane_wm_max(dev_priv, level, config, ddb_partitioning, false); + max->spr = ilk_plane_wm_max(dev_priv, level, config, ddb_partitioning, true); + max->cur = ilk_cursor_wm_max(dev_priv, level, config); + max->fbc = ilk_fbc_wm_reg_max(dev_priv); +} + +static void ilk_compute_wm_reg_maximums(const struct drm_i915_private *dev_priv, + int level, + struct ilk_wm_maximums *max) +{ + max->pri = ilk_plane_wm_reg_max(dev_priv, level, false); + max->spr = ilk_plane_wm_reg_max(dev_priv, level, true); + max->cur = ilk_cursor_wm_reg_max(dev_priv, level); + max->fbc = ilk_fbc_wm_reg_max(dev_priv); +} + +static bool ilk_validate_wm_level(int level, + const struct ilk_wm_maximums *max, + struct intel_wm_level *result) +{ + bool ret; + + /* already determined to be invalid? */ + if (!result->enable) + return false; + + result->enable = result->pri_val <= max->pri && + result->spr_val <= max->spr && + result->cur_val <= max->cur; + + ret = result->enable; + + /* + * HACK until we can pre-compute everything, + * and thus fail gracefully if LP0 watermarks + * are exceeded... + */ + if (level == 0 && !result->enable) { + if (result->pri_val > max->pri) + DRM_DEBUG_KMS("Primary WM%d too large %u (max %u)\n", + level, result->pri_val, max->pri); + if (result->spr_val > max->spr) + DRM_DEBUG_KMS("Sprite WM%d too large %u (max %u)\n", + level, result->spr_val, max->spr); + if (result->cur_val > max->cur) + DRM_DEBUG_KMS("Cursor WM%d too large %u (max %u)\n", + level, result->cur_val, max->cur); + + result->pri_val = min_t(u32, result->pri_val, max->pri); + result->spr_val = min_t(u32, result->spr_val, max->spr); + result->cur_val = min_t(u32, result->cur_val, max->cur); + result->enable = true; + } + + return ret; +} + +static void ilk_compute_wm_level(const struct drm_i915_private *dev_priv, + const struct intel_crtc *crtc, + int level, + struct intel_crtc_state *crtc_state, + const struct intel_plane_state *pristate, + const struct intel_plane_state *sprstate, + const struct intel_plane_state *curstate, + struct intel_wm_level *result) +{ + u16 pri_latency = dev_priv->display.wm.pri_latency[level]; + u16 spr_latency = dev_priv->display.wm.spr_latency[level]; + u16 cur_latency = dev_priv->display.wm.cur_latency[level]; + + /* WM1+ latency values stored in 0.5us units */ + if (level > 0) { + pri_latency *= 5; + spr_latency *= 5; + cur_latency *= 5; + } + + if (pristate) { + result->pri_val = ilk_compute_pri_wm(crtc_state, pristate, + pri_latency, level); + result->fbc_val = ilk_compute_fbc_wm(crtc_state, pristate, result->pri_val); + } + + if (sprstate) + result->spr_val = ilk_compute_spr_wm(crtc_state, sprstate, spr_latency); + + if (curstate) + result->cur_val = ilk_compute_cur_wm(crtc_state, curstate, cur_latency); + + result->enable = true; +} + +static void hsw_read_wm_latency(struct drm_i915_private *i915, u16 wm[]) +{ + u64 sskpd; + + i915->display.wm.num_levels = 5; + + sskpd = intel_uncore_read64(&i915->uncore, MCH_SSKPD); + + wm[0] = REG_FIELD_GET64(SSKPD_NEW_WM0_MASK_HSW, sskpd); + if (wm[0] == 0) + wm[0] = REG_FIELD_GET64(SSKPD_OLD_WM0_MASK_HSW, sskpd); + wm[1] = REG_FIELD_GET64(SSKPD_WM1_MASK_HSW, sskpd); + wm[2] = REG_FIELD_GET64(SSKPD_WM2_MASK_HSW, sskpd); + wm[3] = REG_FIELD_GET64(SSKPD_WM3_MASK_HSW, sskpd); + wm[4] = REG_FIELD_GET64(SSKPD_WM4_MASK_HSW, sskpd); +} + +static void snb_read_wm_latency(struct drm_i915_private *i915, u16 wm[]) +{ + u32 sskpd; + + i915->display.wm.num_levels = 4; + + sskpd = intel_uncore_read(&i915->uncore, MCH_SSKPD); + + wm[0] = REG_FIELD_GET(SSKPD_WM0_MASK_SNB, sskpd); + wm[1] = REG_FIELD_GET(SSKPD_WM1_MASK_SNB, sskpd); + wm[2] = REG_FIELD_GET(SSKPD_WM2_MASK_SNB, sskpd); + wm[3] = REG_FIELD_GET(SSKPD_WM3_MASK_SNB, sskpd); +} + +static void ilk_read_wm_latency(struct drm_i915_private *i915, u16 wm[]) +{ + u32 mltr; + + i915->display.wm.num_levels = 3; + + mltr = intel_uncore_read(&i915->uncore, MLTR_ILK); + + /* ILK primary LP0 latency is 700 ns */ + wm[0] = 7; + wm[1] = REG_FIELD_GET(MLTR_WM1_MASK, mltr); + wm[2] = REG_FIELD_GET(MLTR_WM2_MASK, mltr); +} + +static void intel_fixup_spr_wm_latency(struct drm_i915_private *dev_priv, + u16 wm[5]) +{ + /* ILK sprite LP0 latency is 1300 ns */ + if (DISPLAY_VER(dev_priv) == 5) + wm[0] = 13; +} + +static void intel_fixup_cur_wm_latency(struct drm_i915_private *dev_priv, + u16 wm[5]) +{ + /* ILK cursor LP0 latency is 1300 ns */ + if (DISPLAY_VER(dev_priv) == 5) + wm[0] = 13; +} + +static bool ilk_increase_wm_latency(struct drm_i915_private *dev_priv, + u16 wm[5], u16 min) +{ + int level; + + if (wm[0] >= min) + return false; + + wm[0] = max(wm[0], min); + for (level = 1; level < dev_priv->display.wm.num_levels; level++) + wm[level] = max_t(u16, wm[level], DIV_ROUND_UP(min, 5)); + + return true; +} + +static void snb_wm_latency_quirk(struct drm_i915_private *dev_priv) +{ + bool changed; + + /* + * The BIOS provided WM memory latency values are often + * inadequate for high resolution displays. Adjust them. + */ + changed = ilk_increase_wm_latency(dev_priv, dev_priv->display.wm.pri_latency, 12); + changed |= ilk_increase_wm_latency(dev_priv, dev_priv->display.wm.spr_latency, 12); + changed |= ilk_increase_wm_latency(dev_priv, dev_priv->display.wm.cur_latency, 12); + + if (!changed) + return; + + drm_dbg_kms(&dev_priv->drm, + "WM latency values increased to avoid potential underruns\n"); + intel_print_wm_latency(dev_priv, "Primary", dev_priv->display.wm.pri_latency); + intel_print_wm_latency(dev_priv, "Sprite", dev_priv->display.wm.spr_latency); + intel_print_wm_latency(dev_priv, "Cursor", dev_priv->display.wm.cur_latency); +} + +static void snb_wm_lp3_irq_quirk(struct drm_i915_private *dev_priv) +{ + /* + * On some SNB machines (Thinkpad X220 Tablet at least) + * LP3 usage can cause vblank interrupts to be lost. + * The DEIIR bit will go high but it looks like the CPU + * never gets interrupted. + * + * It's not clear whether other interrupt source could + * be affected or if this is somehow limited to vblank + * interrupts only. To play it safe we disable LP3 + * watermarks entirely. + */ + if (dev_priv->display.wm.pri_latency[3] == 0 && + dev_priv->display.wm.spr_latency[3] == 0 && + dev_priv->display.wm.cur_latency[3] == 0) + return; + + dev_priv->display.wm.pri_latency[3] = 0; + dev_priv->display.wm.spr_latency[3] = 0; + dev_priv->display.wm.cur_latency[3] = 0; + + drm_dbg_kms(&dev_priv->drm, + "LP3 watermarks disabled due to potential for lost interrupts\n"); + intel_print_wm_latency(dev_priv, "Primary", dev_priv->display.wm.pri_latency); + intel_print_wm_latency(dev_priv, "Sprite", dev_priv->display.wm.spr_latency); + intel_print_wm_latency(dev_priv, "Cursor", dev_priv->display.wm.cur_latency); +} + +static void ilk_setup_wm_latency(struct drm_i915_private *dev_priv) +{ + if (IS_BROADWELL(dev_priv) || IS_HASWELL(dev_priv)) + hsw_read_wm_latency(dev_priv, dev_priv->display.wm.pri_latency); + else if (DISPLAY_VER(dev_priv) >= 6) + snb_read_wm_latency(dev_priv, dev_priv->display.wm.pri_latency); + else + ilk_read_wm_latency(dev_priv, dev_priv->display.wm.pri_latency); + + memcpy(dev_priv->display.wm.spr_latency, dev_priv->display.wm.pri_latency, + sizeof(dev_priv->display.wm.pri_latency)); + memcpy(dev_priv->display.wm.cur_latency, dev_priv->display.wm.pri_latency, + sizeof(dev_priv->display.wm.pri_latency)); + + intel_fixup_spr_wm_latency(dev_priv, dev_priv->display.wm.spr_latency); + intel_fixup_cur_wm_latency(dev_priv, dev_priv->display.wm.cur_latency); + + intel_print_wm_latency(dev_priv, "Primary", dev_priv->display.wm.pri_latency); + intel_print_wm_latency(dev_priv, "Sprite", dev_priv->display.wm.spr_latency); + intel_print_wm_latency(dev_priv, "Cursor", dev_priv->display.wm.cur_latency); + + if (DISPLAY_VER(dev_priv) == 6) { + snb_wm_latency_quirk(dev_priv); + snb_wm_lp3_irq_quirk(dev_priv); + } +} + +static bool ilk_validate_pipe_wm(const struct drm_i915_private *dev_priv, + struct intel_pipe_wm *pipe_wm) +{ + /* LP0 watermark maximums depend on this pipe alone */ + const struct intel_wm_config config = { + .num_pipes_active = 1, + .sprites_enabled = pipe_wm->sprites_enabled, + .sprites_scaled = pipe_wm->sprites_scaled, + }; + struct ilk_wm_maximums max; + + /* LP0 watermarks always use 1/2 DDB partitioning */ + ilk_compute_wm_maximums(dev_priv, 0, &config, INTEL_DDB_PART_1_2, &max); + + /* At least LP0 must be valid */ + if (!ilk_validate_wm_level(0, &max, &pipe_wm->wm[0])) { + drm_dbg_kms(&dev_priv->drm, "LP0 watermark invalid\n"); + return false; + } + + return true; +} + +/* Compute new watermarks for the pipe */ +static int ilk_compute_pipe_wm(struct intel_atomic_state *state, + struct intel_crtc *crtc) +{ + struct drm_i915_private *dev_priv = to_i915(state->base.dev); + struct intel_crtc_state *crtc_state = + intel_atomic_get_new_crtc_state(state, crtc); + struct intel_pipe_wm *pipe_wm; + struct intel_plane *plane; + const struct intel_plane_state *plane_state; + const struct intel_plane_state *pristate = NULL; + const struct intel_plane_state *sprstate = NULL; + const struct intel_plane_state *curstate = NULL; + struct ilk_wm_maximums max; + int level, usable_level; + + pipe_wm = &crtc_state->wm.ilk.optimal; + + intel_atomic_crtc_state_for_each_plane_state(plane, plane_state, crtc_state) { + if (plane->base.type == DRM_PLANE_TYPE_PRIMARY) + pristate = plane_state; + else if (plane->base.type == DRM_PLANE_TYPE_OVERLAY) + sprstate = plane_state; + else if (plane->base.type == DRM_PLANE_TYPE_CURSOR) + curstate = plane_state; + } + + pipe_wm->pipe_enabled = crtc_state->hw.active; + pipe_wm->sprites_enabled = crtc_state->active_planes & BIT(PLANE_SPRITE0); + pipe_wm->sprites_scaled = crtc_state->scaled_planes & BIT(PLANE_SPRITE0); + + usable_level = dev_priv->display.wm.num_levels - 1; + + /* ILK/SNB: LP2+ watermarks only w/o sprites */ + if (DISPLAY_VER(dev_priv) <= 6 && pipe_wm->sprites_enabled) + usable_level = 1; + + /* ILK/SNB/IVB: LP1+ watermarks only w/o scaling */ + if (pipe_wm->sprites_scaled) + usable_level = 0; + + memset(&pipe_wm->wm, 0, sizeof(pipe_wm->wm)); + ilk_compute_wm_level(dev_priv, crtc, 0, crtc_state, + pristate, sprstate, curstate, &pipe_wm->wm[0]); + + if (!ilk_validate_pipe_wm(dev_priv, pipe_wm)) + return -EINVAL; + + ilk_compute_wm_reg_maximums(dev_priv, 1, &max); + + for (level = 1; level <= usable_level; level++) { + struct intel_wm_level *wm = &pipe_wm->wm[level]; + + ilk_compute_wm_level(dev_priv, crtc, level, crtc_state, + pristate, sprstate, curstate, wm); + + /* + * Disable any watermark level that exceeds the + * register maximums since such watermarks are + * always invalid. + */ + if (!ilk_validate_wm_level(level, &max, wm)) { + memset(wm, 0, sizeof(*wm)); + break; + } + } + + return 0; +} + +/* + * Build a set of 'intermediate' watermark values that satisfy both the old + * state and the new state. These can be programmed to the hardware + * immediately. + */ +static int ilk_compute_intermediate_wm(struct intel_atomic_state *state, + struct intel_crtc *crtc) +{ + struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); + struct intel_crtc_state *new_crtc_state = + intel_atomic_get_new_crtc_state(state, crtc); + const struct intel_crtc_state *old_crtc_state = + intel_atomic_get_old_crtc_state(state, crtc); + struct intel_pipe_wm *a = &new_crtc_state->wm.ilk.intermediate; + const struct intel_pipe_wm *b = &old_crtc_state->wm.ilk.optimal; + int level; + + /* + * Start with the final, target watermarks, then combine with the + * currently active watermarks to get values that are safe both before + * and after the vblank. + */ + *a = new_crtc_state->wm.ilk.optimal; + if (!new_crtc_state->hw.active || + intel_crtc_needs_modeset(new_crtc_state) || + state->skip_intermediate_wm) + return 0; + + a->pipe_enabled |= b->pipe_enabled; + a->sprites_enabled |= b->sprites_enabled; + a->sprites_scaled |= b->sprites_scaled; + + for (level = 0; level < dev_priv->display.wm.num_levels; level++) { + struct intel_wm_level *a_wm = &a->wm[level]; + const struct intel_wm_level *b_wm = &b->wm[level]; + + a_wm->enable &= b_wm->enable; + a_wm->pri_val = max(a_wm->pri_val, b_wm->pri_val); + a_wm->spr_val = max(a_wm->spr_val, b_wm->spr_val); + a_wm->cur_val = max(a_wm->cur_val, b_wm->cur_val); + a_wm->fbc_val = max(a_wm->fbc_val, b_wm->fbc_val); + } + + /* + * We need to make sure that these merged watermark values are + * actually a valid configuration themselves. If they're not, + * there's no safe way to transition from the old state to + * the new state, so we need to fail the atomic transaction. + */ + if (!ilk_validate_pipe_wm(dev_priv, a)) + return -EINVAL; + + /* + * If our intermediate WM are identical to the final WM, then we can + * omit the post-vblank programming; only update if it's different. + */ + if (memcmp(a, &new_crtc_state->wm.ilk.optimal, sizeof(*a)) != 0) + new_crtc_state->wm.need_postvbl_update = true; + + return 0; +} + +/* + * Merge the watermarks from all active pipes for a specific level. + */ +static void ilk_merge_wm_level(struct drm_i915_private *dev_priv, + int level, + struct intel_wm_level *ret_wm) +{ + const struct intel_crtc *crtc; + + ret_wm->enable = true; + + for_each_intel_crtc(&dev_priv->drm, crtc) { + const struct intel_pipe_wm *active = &crtc->wm.active.ilk; + const struct intel_wm_level *wm = &active->wm[level]; + + if (!active->pipe_enabled) + continue; + + /* + * The watermark values may have been used in the past, + * so we must maintain them in the registers for some + * time even if the level is now disabled. + */ + if (!wm->enable) + ret_wm->enable = false; + + ret_wm->pri_val = max(ret_wm->pri_val, wm->pri_val); + ret_wm->spr_val = max(ret_wm->spr_val, wm->spr_val); + ret_wm->cur_val = max(ret_wm->cur_val, wm->cur_val); + ret_wm->fbc_val = max(ret_wm->fbc_val, wm->fbc_val); + } +} + +/* + * Merge all low power watermarks for all active pipes. + */ +static void ilk_wm_merge(struct drm_i915_private *dev_priv, + const struct intel_wm_config *config, + const struct ilk_wm_maximums *max, + struct intel_pipe_wm *merged) +{ + int level, num_levels = dev_priv->display.wm.num_levels; + int last_enabled_level = num_levels - 1; + + /* ILK/SNB/IVB: LP1+ watermarks only w/ single pipe */ + if ((DISPLAY_VER(dev_priv) <= 6 || IS_IVYBRIDGE(dev_priv)) && + config->num_pipes_active > 1) + last_enabled_level = 0; + + /* ILK: FBC WM must be disabled always */ + merged->fbc_wm_enabled = DISPLAY_VER(dev_priv) >= 6; + + /* merge each WM1+ level */ + for (level = 1; level < num_levels; level++) { + struct intel_wm_level *wm = &merged->wm[level]; + + ilk_merge_wm_level(dev_priv, level, wm); + + if (level > last_enabled_level) + wm->enable = false; + else if (!ilk_validate_wm_level(level, max, wm)) + /* make sure all following levels get disabled */ + last_enabled_level = level - 1; + + /* + * The spec says it is preferred to disable + * FBC WMs instead of disabling a WM level. + */ + if (wm->fbc_val > max->fbc) { + if (wm->enable) + merged->fbc_wm_enabled = false; + wm->fbc_val = 0; + } + } + + /* ILK: LP2+ must be disabled when FBC WM is disabled but FBC enabled */ + if (DISPLAY_VER(dev_priv) == 5 && HAS_FBC(dev_priv) && + dev_priv->params.enable_fbc && !merged->fbc_wm_enabled) { + for (level = 2; level < num_levels; level++) { + struct intel_wm_level *wm = &merged->wm[level]; + + wm->enable = false; + } + } +} + +static int ilk_wm_lp_to_level(int wm_lp, const struct intel_pipe_wm *pipe_wm) +{ + /* LP1,LP2,LP3 levels are either 1,2,3 or 1,3,4 */ + return wm_lp + (wm_lp >= 2 && pipe_wm->wm[4].enable); +} + +/* The value we need to program into the WM_LPx latency field */ +static unsigned int ilk_wm_lp_latency(struct drm_i915_private *dev_priv, + int level) +{ + if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) + return 2 * level; + else + return dev_priv->display.wm.pri_latency[level]; +} + +static void ilk_compute_wm_results(struct drm_i915_private *dev_priv, + const struct intel_pipe_wm *merged, + enum intel_ddb_partitioning partitioning, + struct ilk_wm_values *results) +{ + struct intel_crtc *crtc; + int level, wm_lp; + + results->enable_fbc_wm = merged->fbc_wm_enabled; + results->partitioning = partitioning; + + /* LP1+ register values */ + for (wm_lp = 1; wm_lp <= 3; wm_lp++) { + const struct intel_wm_level *r; + + level = ilk_wm_lp_to_level(wm_lp, merged); + + r = &merged->wm[level]; + + /* + * Maintain the watermark values even if the level is + * disabled. Doing otherwise could cause underruns. + */ + results->wm_lp[wm_lp - 1] = + WM_LP_LATENCY(ilk_wm_lp_latency(dev_priv, level)) | + WM_LP_PRIMARY(r->pri_val) | + WM_LP_CURSOR(r->cur_val); + + if (r->enable) + results->wm_lp[wm_lp - 1] |= WM_LP_ENABLE; + + if (DISPLAY_VER(dev_priv) >= 8) + results->wm_lp[wm_lp - 1] |= WM_LP_FBC_BDW(r->fbc_val); + else + results->wm_lp[wm_lp - 1] |= WM_LP_FBC_ILK(r->fbc_val); + + results->wm_lp_spr[wm_lp - 1] = WM_LP_SPRITE(r->spr_val); + + /* + * Always set WM_LP_SPRITE_EN when spr_val != 0, even if the + * level is disabled. Doing otherwise could cause underruns. + */ + if (DISPLAY_VER(dev_priv) <= 6 && r->spr_val) { + drm_WARN_ON(&dev_priv->drm, wm_lp != 1); + results->wm_lp_spr[wm_lp - 1] |= WM_LP_SPRITE_ENABLE; + } + } + + /* LP0 register values */ + for_each_intel_crtc(&dev_priv->drm, crtc) { + enum pipe pipe = crtc->pipe; + const struct intel_pipe_wm *pipe_wm = &crtc->wm.active.ilk; + const struct intel_wm_level *r = &pipe_wm->wm[0]; + + if (drm_WARN_ON(&dev_priv->drm, !r->enable)) + continue; + + results->wm_pipe[pipe] = + WM0_PIPE_PRIMARY(r->pri_val) | + WM0_PIPE_SPRITE(r->spr_val) | + WM0_PIPE_CURSOR(r->cur_val); + } +} + +/* + * Find the result with the highest level enabled. Check for enable_fbc_wm in + * case both are at the same level. Prefer r1 in case they're the same. + */ +static struct intel_pipe_wm * +ilk_find_best_result(struct drm_i915_private *dev_priv, + struct intel_pipe_wm *r1, + struct intel_pipe_wm *r2) +{ + int level, level1 = 0, level2 = 0; + + for (level = 1; level < dev_priv->display.wm.num_levels; level++) { + if (r1->wm[level].enable) + level1 = level; + if (r2->wm[level].enable) + level2 = level; + } + + if (level1 == level2) { + if (r2->fbc_wm_enabled && !r1->fbc_wm_enabled) + return r2; + else + return r1; + } else if (level1 > level2) { + return r1; + } else { + return r2; + } +} + +/* dirty bits used to track which watermarks need changes */ +#define WM_DIRTY_PIPE(pipe) (1 << (pipe)) +#define WM_DIRTY_LP(wm_lp) (1 << (15 + (wm_lp))) +#define WM_DIRTY_LP_ALL (WM_DIRTY_LP(1) | WM_DIRTY_LP(2) | WM_DIRTY_LP(3)) +#define WM_DIRTY_FBC (1 << 24) +#define WM_DIRTY_DDB (1 << 25) + +static unsigned int ilk_compute_wm_dirty(struct drm_i915_private *dev_priv, + const struct ilk_wm_values *old, + const struct ilk_wm_values *new) +{ + unsigned int dirty = 0; + enum pipe pipe; + int wm_lp; + + for_each_pipe(dev_priv, pipe) { + if (old->wm_pipe[pipe] != new->wm_pipe[pipe]) { + dirty |= WM_DIRTY_PIPE(pipe); + /* Must disable LP1+ watermarks too */ + dirty |= WM_DIRTY_LP_ALL; + } + } + + if (old->enable_fbc_wm != new->enable_fbc_wm) { + dirty |= WM_DIRTY_FBC; + /* Must disable LP1+ watermarks too */ + dirty |= WM_DIRTY_LP_ALL; + } + + if (old->partitioning != new->partitioning) { + dirty |= WM_DIRTY_DDB; + /* Must disable LP1+ watermarks too */ + dirty |= WM_DIRTY_LP_ALL; + } + + /* LP1+ watermarks already deemed dirty, no need to continue */ + if (dirty & WM_DIRTY_LP_ALL) + return dirty; + + /* Find the lowest numbered LP1+ watermark in need of an update... */ + for (wm_lp = 1; wm_lp <= 3; wm_lp++) { + if (old->wm_lp[wm_lp - 1] != new->wm_lp[wm_lp - 1] || + old->wm_lp_spr[wm_lp - 1] != new->wm_lp_spr[wm_lp - 1]) + break; + } + + /* ...and mark it and all higher numbered LP1+ watermarks as dirty */ + for (; wm_lp <= 3; wm_lp++) + dirty |= WM_DIRTY_LP(wm_lp); + + return dirty; +} + +static bool _ilk_disable_lp_wm(struct drm_i915_private *dev_priv, + unsigned int dirty) +{ + struct ilk_wm_values *previous = &dev_priv->display.wm.hw; + bool changed = false; + + if (dirty & WM_DIRTY_LP(3) && previous->wm_lp[2] & WM_LP_ENABLE) { + previous->wm_lp[2] &= ~WM_LP_ENABLE; + intel_uncore_write(&dev_priv->uncore, WM3_LP_ILK, previous->wm_lp[2]); + changed = true; + } + if (dirty & WM_DIRTY_LP(2) && previous->wm_lp[1] & WM_LP_ENABLE) { + previous->wm_lp[1] &= ~WM_LP_ENABLE; + intel_uncore_write(&dev_priv->uncore, WM2_LP_ILK, previous->wm_lp[1]); + changed = true; + } + if (dirty & WM_DIRTY_LP(1) && previous->wm_lp[0] & WM_LP_ENABLE) { + previous->wm_lp[0] &= ~WM_LP_ENABLE; + intel_uncore_write(&dev_priv->uncore, WM1_LP_ILK, previous->wm_lp[0]); + changed = true; + } + + /* + * Don't touch WM_LP_SPRITE_ENABLE here. + * Doing so could cause underruns. + */ + + return changed; +} + +/* + * The spec says we shouldn't write when we don't need, because every write + * causes WMs to be re-evaluated, expending some power. + */ +static void ilk_write_wm_values(struct drm_i915_private *dev_priv, + struct ilk_wm_values *results) +{ + struct ilk_wm_values *previous = &dev_priv->display.wm.hw; + unsigned int dirty; + + dirty = ilk_compute_wm_dirty(dev_priv, previous, results); + if (!dirty) + return; + + _ilk_disable_lp_wm(dev_priv, dirty); + + if (dirty & WM_DIRTY_PIPE(PIPE_A)) + intel_uncore_write(&dev_priv->uncore, WM0_PIPE_ILK(PIPE_A), results->wm_pipe[0]); + if (dirty & WM_DIRTY_PIPE(PIPE_B)) + intel_uncore_write(&dev_priv->uncore, WM0_PIPE_ILK(PIPE_B), results->wm_pipe[1]); + if (dirty & WM_DIRTY_PIPE(PIPE_C)) + intel_uncore_write(&dev_priv->uncore, WM0_PIPE_ILK(PIPE_C), results->wm_pipe[2]); + + if (dirty & WM_DIRTY_DDB) { + if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) + intel_uncore_rmw(&dev_priv->uncore, WM_MISC, WM_MISC_DATA_PARTITION_5_6, + results->partitioning == INTEL_DDB_PART_1_2 ? 0 : + WM_MISC_DATA_PARTITION_5_6); + else + intel_uncore_rmw(&dev_priv->uncore, DISP_ARB_CTL2, DISP_DATA_PARTITION_5_6, + results->partitioning == INTEL_DDB_PART_1_2 ? 0 : + DISP_DATA_PARTITION_5_6); + } + + if (dirty & WM_DIRTY_FBC) + intel_uncore_rmw(&dev_priv->uncore, DISP_ARB_CTL, DISP_FBC_WM_DIS, + results->enable_fbc_wm ? 0 : DISP_FBC_WM_DIS); + + if (dirty & WM_DIRTY_LP(1) && + previous->wm_lp_spr[0] != results->wm_lp_spr[0]) + intel_uncore_write(&dev_priv->uncore, WM1S_LP_ILK, results->wm_lp_spr[0]); + + if (DISPLAY_VER(dev_priv) >= 7) { + if (dirty & WM_DIRTY_LP(2) && previous->wm_lp_spr[1] != results->wm_lp_spr[1]) + intel_uncore_write(&dev_priv->uncore, WM2S_LP_IVB, results->wm_lp_spr[1]); + if (dirty & WM_DIRTY_LP(3) && previous->wm_lp_spr[2] != results->wm_lp_spr[2]) + intel_uncore_write(&dev_priv->uncore, WM3S_LP_IVB, results->wm_lp_spr[2]); + } + + if (dirty & WM_DIRTY_LP(1) && previous->wm_lp[0] != results->wm_lp[0]) + intel_uncore_write(&dev_priv->uncore, WM1_LP_ILK, results->wm_lp[0]); + if (dirty & WM_DIRTY_LP(2) && previous->wm_lp[1] != results->wm_lp[1]) + intel_uncore_write(&dev_priv->uncore, WM2_LP_ILK, results->wm_lp[1]); + if (dirty & WM_DIRTY_LP(3) && previous->wm_lp[2] != results->wm_lp[2]) + intel_uncore_write(&dev_priv->uncore, WM3_LP_ILK, results->wm_lp[2]); + + dev_priv->display.wm.hw = *results; +} + +bool ilk_disable_lp_wm(struct drm_i915_private *dev_priv) +{ + return _ilk_disable_lp_wm(dev_priv, WM_DIRTY_LP_ALL); +} + +static void ilk_compute_wm_config(struct drm_i915_private *dev_priv, + struct intel_wm_config *config) +{ + struct intel_crtc *crtc; + + /* Compute the currently _active_ config */ + for_each_intel_crtc(&dev_priv->drm, crtc) { + const struct intel_pipe_wm *wm = &crtc->wm.active.ilk; + + if (!wm->pipe_enabled) + continue; + + config->sprites_enabled |= wm->sprites_enabled; + config->sprites_scaled |= wm->sprites_scaled; + config->num_pipes_active++; + } +} + +static void ilk_program_watermarks(struct drm_i915_private *dev_priv) +{ + struct intel_pipe_wm lp_wm_1_2 = {}, lp_wm_5_6 = {}, *best_lp_wm; + struct ilk_wm_maximums max; + struct intel_wm_config config = {}; + struct ilk_wm_values results = {}; + enum intel_ddb_partitioning partitioning; + + ilk_compute_wm_config(dev_priv, &config); + + ilk_compute_wm_maximums(dev_priv, 1, &config, INTEL_DDB_PART_1_2, &max); + ilk_wm_merge(dev_priv, &config, &max, &lp_wm_1_2); + + /* 5/6 split only in single pipe config on IVB+ */ + if (DISPLAY_VER(dev_priv) >= 7 && + config.num_pipes_active == 1 && config.sprites_enabled) { + ilk_compute_wm_maximums(dev_priv, 1, &config, INTEL_DDB_PART_5_6, &max); + ilk_wm_merge(dev_priv, &config, &max, &lp_wm_5_6); + + best_lp_wm = ilk_find_best_result(dev_priv, &lp_wm_1_2, &lp_wm_5_6); + } else { + best_lp_wm = &lp_wm_1_2; + } + + partitioning = (best_lp_wm == &lp_wm_1_2) ? + INTEL_DDB_PART_1_2 : INTEL_DDB_PART_5_6; + + ilk_compute_wm_results(dev_priv, best_lp_wm, partitioning, &results); + + ilk_write_wm_values(dev_priv, &results); +} + +static void ilk_initial_watermarks(struct intel_atomic_state *state, + struct intel_crtc *crtc) +{ + struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); + const struct intel_crtc_state *crtc_state = + intel_atomic_get_new_crtc_state(state, crtc); + + mutex_lock(&dev_priv->display.wm.wm_mutex); + crtc->wm.active.ilk = crtc_state->wm.ilk.intermediate; + ilk_program_watermarks(dev_priv); + mutex_unlock(&dev_priv->display.wm.wm_mutex); +} + +static void ilk_optimize_watermarks(struct intel_atomic_state *state, + struct intel_crtc *crtc) +{ + struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); + const struct intel_crtc_state *crtc_state = + intel_atomic_get_new_crtc_state(state, crtc); + + if (!crtc_state->wm.need_postvbl_update) + return; + + mutex_lock(&dev_priv->display.wm.wm_mutex); + crtc->wm.active.ilk = crtc_state->wm.ilk.optimal; + ilk_program_watermarks(dev_priv); + mutex_unlock(&dev_priv->display.wm.wm_mutex); +} + +static void ilk_pipe_wm_get_hw_state(struct intel_crtc *crtc) +{ + struct drm_device *dev = crtc->base.dev; + struct drm_i915_private *dev_priv = to_i915(dev); + struct ilk_wm_values *hw = &dev_priv->display.wm.hw; + struct intel_crtc_state *crtc_state = to_intel_crtc_state(crtc->base.state); + struct intel_pipe_wm *active = &crtc_state->wm.ilk.optimal; + enum pipe pipe = crtc->pipe; + + hw->wm_pipe[pipe] = intel_uncore_read(&dev_priv->uncore, WM0_PIPE_ILK(pipe)); + + memset(active, 0, sizeof(*active)); + + active->pipe_enabled = crtc->active; + + if (active->pipe_enabled) { + u32 tmp = hw->wm_pipe[pipe]; + + /* + * For active pipes LP0 watermark is marked as + * enabled, and LP1+ watermaks as disabled since + * we can't really reverse compute them in case + * multiple pipes are active. + */ + active->wm[0].enable = true; + active->wm[0].pri_val = REG_FIELD_GET(WM0_PIPE_PRIMARY_MASK, tmp); + active->wm[0].spr_val = REG_FIELD_GET(WM0_PIPE_SPRITE_MASK, tmp); + active->wm[0].cur_val = REG_FIELD_GET(WM0_PIPE_CURSOR_MASK, tmp); + } else { + int level; + + /* + * For inactive pipes, all watermark levels + * should be marked as enabled but zeroed, + * which is what we'd compute them to. + */ + for (level = 0; level < dev_priv->display.wm.num_levels; level++) + active->wm[level].enable = true; + } + + crtc->wm.active.ilk = *active; +} + +static int ilk_sanitize_watermarks_add_affected(struct drm_atomic_state *state) +{ + struct drm_plane *plane; + struct intel_crtc *crtc; + + for_each_intel_crtc(state->dev, crtc) { + struct intel_crtc_state *crtc_state; + + crtc_state = intel_atomic_get_crtc_state(state, crtc); + if (IS_ERR(crtc_state)) + return PTR_ERR(crtc_state); + + if (crtc_state->hw.active) { + /* + * Preserve the inherited flag to avoid + * taking the full modeset path. + */ + crtc_state->inherited = true; + } + } + + drm_for_each_plane(plane, state->dev) { + struct drm_plane_state *plane_state; + + plane_state = drm_atomic_get_plane_state(state, plane); + if (IS_ERR(plane_state)) + return PTR_ERR(plane_state); + } + + return 0; +} + +/* + * Calculate what we think the watermarks should be for the state we've read + * out of the hardware and then immediately program those watermarks so that + * we ensure the hardware settings match our internal state. + * + * We can calculate what we think WM's should be by creating a duplicate of the + * current state (which was constructed during hardware readout) and running it + * through the atomic check code to calculate new watermark values in the + * state object. + */ +void ilk_wm_sanitize(struct drm_i915_private *dev_priv) +{ + struct drm_atomic_state *state; + struct intel_atomic_state *intel_state; + struct intel_crtc *crtc; + struct intel_crtc_state *crtc_state; + struct drm_modeset_acquire_ctx ctx; + int ret; + int i; + + /* Only supported on platforms that use atomic watermark design */ + if (!dev_priv->display.funcs.wm->optimize_watermarks) + return; + + if (drm_WARN_ON(&dev_priv->drm, DISPLAY_VER(dev_priv) >= 9)) + return; + + state = drm_atomic_state_alloc(&dev_priv->drm); + if (drm_WARN_ON(&dev_priv->drm, !state)) + return; + + intel_state = to_intel_atomic_state(state); + + drm_modeset_acquire_init(&ctx, 0); + +retry: + state->acquire_ctx = &ctx; + + /* + * Hardware readout is the only time we don't want to calculate + * intermediate watermarks (since we don't trust the current + * watermarks). + */ + if (!HAS_GMCH(dev_priv)) + intel_state->skip_intermediate_wm = true; + + ret = ilk_sanitize_watermarks_add_affected(state); + if (ret) + goto fail; + + ret = intel_atomic_check(&dev_priv->drm, state); + if (ret) + goto fail; + + /* Write calculated watermark values back */ + for_each_new_intel_crtc_in_state(intel_state, crtc, crtc_state, i) { + crtc_state->wm.need_postvbl_update = true; + intel_optimize_watermarks(intel_state, crtc); + + to_intel_crtc_state(crtc->base.state)->wm = crtc_state->wm; + } + +fail: + if (ret == -EDEADLK) { + drm_atomic_state_clear(state); + drm_modeset_backoff(&ctx); + goto retry; + } + + /* + * If we fail here, it means that the hardware appears to be + * programmed in a way that shouldn't be possible, given our + * understanding of watermark requirements. This might mean a + * mistake in the hardware readout code or a mistake in the + * watermark calculations for a given platform. Raise a WARN + * so that this is noticeable. + * + * If this actually happens, we'll have to just leave the + * BIOS-programmed watermarks untouched and hope for the best. + */ + drm_WARN(&dev_priv->drm, ret, + "Could not determine valid watermarks for inherited state\n"); + + drm_atomic_state_put(state); + + drm_modeset_drop_locks(&ctx); + drm_modeset_acquire_fini(&ctx); +} + +#define _FW_WM(value, plane) \ + (((value) & DSPFW_ ## plane ## _MASK) >> DSPFW_ ## plane ## _SHIFT) +#define _FW_WM_VLV(value, plane) \ + (((value) & DSPFW_ ## plane ## _MASK_VLV) >> DSPFW_ ## plane ## _SHIFT) + +static void g4x_read_wm_values(struct drm_i915_private *dev_priv, + struct g4x_wm_values *wm) +{ + u32 tmp; + + tmp = intel_uncore_read(&dev_priv->uncore, DSPFW1); + wm->sr.plane = _FW_WM(tmp, SR); + wm->pipe[PIPE_B].plane[PLANE_CURSOR] = _FW_WM(tmp, CURSORB); + wm->pipe[PIPE_B].plane[PLANE_PRIMARY] = _FW_WM(tmp, PLANEB); + wm->pipe[PIPE_A].plane[PLANE_PRIMARY] = _FW_WM(tmp, PLANEA); + + tmp = intel_uncore_read(&dev_priv->uncore, DSPFW2); + wm->fbc_en = tmp & DSPFW_FBC_SR_EN; + wm->sr.fbc = _FW_WM(tmp, FBC_SR); + wm->hpll.fbc = _FW_WM(tmp, FBC_HPLL_SR); + wm->pipe[PIPE_B].plane[PLANE_SPRITE0] = _FW_WM(tmp, SPRITEB); + wm->pipe[PIPE_A].plane[PLANE_CURSOR] = _FW_WM(tmp, CURSORA); + wm->pipe[PIPE_A].plane[PLANE_SPRITE0] = _FW_WM(tmp, SPRITEA); + + tmp = intel_uncore_read(&dev_priv->uncore, DSPFW3); + wm->hpll_en = tmp & DSPFW_HPLL_SR_EN; + wm->sr.cursor = _FW_WM(tmp, CURSOR_SR); + wm->hpll.cursor = _FW_WM(tmp, HPLL_CURSOR); + wm->hpll.plane = _FW_WM(tmp, HPLL_SR); +} + +static void vlv_read_wm_values(struct drm_i915_private *dev_priv, + struct vlv_wm_values *wm) +{ + enum pipe pipe; + u32 tmp; + + for_each_pipe(dev_priv, pipe) { + tmp = intel_uncore_read(&dev_priv->uncore, VLV_DDL(pipe)); + + wm->ddl[pipe].plane[PLANE_PRIMARY] = + (tmp >> DDL_PLANE_SHIFT) & (DDL_PRECISION_HIGH | DRAIN_LATENCY_MASK); + wm->ddl[pipe].plane[PLANE_CURSOR] = + (tmp >> DDL_CURSOR_SHIFT) & (DDL_PRECISION_HIGH | DRAIN_LATENCY_MASK); + wm->ddl[pipe].plane[PLANE_SPRITE0] = + (tmp >> DDL_SPRITE_SHIFT(0)) & (DDL_PRECISION_HIGH | DRAIN_LATENCY_MASK); + wm->ddl[pipe].plane[PLANE_SPRITE1] = + (tmp >> DDL_SPRITE_SHIFT(1)) & (DDL_PRECISION_HIGH | DRAIN_LATENCY_MASK); + } + + tmp = intel_uncore_read(&dev_priv->uncore, DSPFW1); + wm->sr.plane = _FW_WM(tmp, SR); + wm->pipe[PIPE_B].plane[PLANE_CURSOR] = _FW_WM(tmp, CURSORB); + wm->pipe[PIPE_B].plane[PLANE_PRIMARY] = _FW_WM_VLV(tmp, PLANEB); + wm->pipe[PIPE_A].plane[PLANE_PRIMARY] = _FW_WM_VLV(tmp, PLANEA); + + tmp = intel_uncore_read(&dev_priv->uncore, DSPFW2); + wm->pipe[PIPE_A].plane[PLANE_SPRITE1] = _FW_WM_VLV(tmp, SPRITEB); + wm->pipe[PIPE_A].plane[PLANE_CURSOR] = _FW_WM(tmp, CURSORA); + wm->pipe[PIPE_A].plane[PLANE_SPRITE0] = _FW_WM_VLV(tmp, SPRITEA); + + tmp = intel_uncore_read(&dev_priv->uncore, DSPFW3); + wm->sr.cursor = _FW_WM(tmp, CURSOR_SR); + + if (IS_CHERRYVIEW(dev_priv)) { + tmp = intel_uncore_read(&dev_priv->uncore, DSPFW7_CHV); + wm->pipe[PIPE_B].plane[PLANE_SPRITE1] = _FW_WM_VLV(tmp, SPRITED); + wm->pipe[PIPE_B].plane[PLANE_SPRITE0] = _FW_WM_VLV(tmp, SPRITEC); + + tmp = intel_uncore_read(&dev_priv->uncore, DSPFW8_CHV); + wm->pipe[PIPE_C].plane[PLANE_SPRITE1] = _FW_WM_VLV(tmp, SPRITEF); + wm->pipe[PIPE_C].plane[PLANE_SPRITE0] = _FW_WM_VLV(tmp, SPRITEE); + + tmp = intel_uncore_read(&dev_priv->uncore, DSPFW9_CHV); + wm->pipe[PIPE_C].plane[PLANE_PRIMARY] = _FW_WM_VLV(tmp, PLANEC); + wm->pipe[PIPE_C].plane[PLANE_CURSOR] = _FW_WM(tmp, CURSORC); + + tmp = intel_uncore_read(&dev_priv->uncore, DSPHOWM); + wm->sr.plane |= _FW_WM(tmp, SR_HI) << 9; + wm->pipe[PIPE_C].plane[PLANE_SPRITE1] |= _FW_WM(tmp, SPRITEF_HI) << 8; + wm->pipe[PIPE_C].plane[PLANE_SPRITE0] |= _FW_WM(tmp, SPRITEE_HI) << 8; + wm->pipe[PIPE_C].plane[PLANE_PRIMARY] |= _FW_WM(tmp, PLANEC_HI) << 8; + wm->pipe[PIPE_B].plane[PLANE_SPRITE1] |= _FW_WM(tmp, SPRITED_HI) << 8; + wm->pipe[PIPE_B].plane[PLANE_SPRITE0] |= _FW_WM(tmp, SPRITEC_HI) << 8; + wm->pipe[PIPE_B].plane[PLANE_PRIMARY] |= _FW_WM(tmp, PLANEB_HI) << 8; + wm->pipe[PIPE_A].plane[PLANE_SPRITE1] |= _FW_WM(tmp, SPRITEB_HI) << 8; + wm->pipe[PIPE_A].plane[PLANE_SPRITE0] |= _FW_WM(tmp, SPRITEA_HI) << 8; + wm->pipe[PIPE_A].plane[PLANE_PRIMARY] |= _FW_WM(tmp, PLANEA_HI) << 8; + } else { + tmp = intel_uncore_read(&dev_priv->uncore, DSPFW7); + wm->pipe[PIPE_B].plane[PLANE_SPRITE1] = _FW_WM_VLV(tmp, SPRITED); + wm->pipe[PIPE_B].plane[PLANE_SPRITE0] = _FW_WM_VLV(tmp, SPRITEC); + + tmp = intel_uncore_read(&dev_priv->uncore, DSPHOWM); + wm->sr.plane |= _FW_WM(tmp, SR_HI) << 9; + wm->pipe[PIPE_B].plane[PLANE_SPRITE1] |= _FW_WM(tmp, SPRITED_HI) << 8; + wm->pipe[PIPE_B].plane[PLANE_SPRITE0] |= _FW_WM(tmp, SPRITEC_HI) << 8; + wm->pipe[PIPE_B].plane[PLANE_PRIMARY] |= _FW_WM(tmp, PLANEB_HI) << 8; + wm->pipe[PIPE_A].plane[PLANE_SPRITE1] |= _FW_WM(tmp, SPRITEB_HI) << 8; + wm->pipe[PIPE_A].plane[PLANE_SPRITE0] |= _FW_WM(tmp, SPRITEA_HI) << 8; + wm->pipe[PIPE_A].plane[PLANE_PRIMARY] |= _FW_WM(tmp, PLANEA_HI) << 8; + } +} + +#undef _FW_WM +#undef _FW_WM_VLV + +static void g4x_wm_get_hw_state(struct drm_i915_private *dev_priv) +{ + struct g4x_wm_values *wm = &dev_priv->display.wm.g4x; + struct intel_crtc *crtc; + + g4x_read_wm_values(dev_priv, wm); + + wm->cxsr = intel_uncore_read(&dev_priv->uncore, FW_BLC_SELF) & FW_BLC_SELF_EN; + + for_each_intel_crtc(&dev_priv->drm, crtc) { + struct intel_crtc_state *crtc_state = + to_intel_crtc_state(crtc->base.state); + struct g4x_wm_state *active = &crtc->wm.active.g4x; + struct g4x_pipe_wm *raw; + enum pipe pipe = crtc->pipe; + enum plane_id plane_id; + int level, max_level; + + active->cxsr = wm->cxsr; + active->hpll_en = wm->hpll_en; + active->fbc_en = wm->fbc_en; + + active->sr = wm->sr; + active->hpll = wm->hpll; + + for_each_plane_id_on_crtc(crtc, plane_id) { + active->wm.plane[plane_id] = + wm->pipe[pipe].plane[plane_id]; + } + + if (wm->cxsr && wm->hpll_en) + max_level = G4X_WM_LEVEL_HPLL; + else if (wm->cxsr) + max_level = G4X_WM_LEVEL_SR; + else + max_level = G4X_WM_LEVEL_NORMAL; + + level = G4X_WM_LEVEL_NORMAL; + raw = &crtc_state->wm.g4x.raw[level]; + for_each_plane_id_on_crtc(crtc, plane_id) + raw->plane[plane_id] = active->wm.plane[plane_id]; + + level = G4X_WM_LEVEL_SR; + if (level > max_level) + goto out; + + raw = &crtc_state->wm.g4x.raw[level]; + raw->plane[PLANE_PRIMARY] = active->sr.plane; + raw->plane[PLANE_CURSOR] = active->sr.cursor; + raw->plane[PLANE_SPRITE0] = 0; + raw->fbc = active->sr.fbc; + + level = G4X_WM_LEVEL_HPLL; + if (level > max_level) + goto out; + + raw = &crtc_state->wm.g4x.raw[level]; + raw->plane[PLANE_PRIMARY] = active->hpll.plane; + raw->plane[PLANE_CURSOR] = active->hpll.cursor; + raw->plane[PLANE_SPRITE0] = 0; + raw->fbc = active->hpll.fbc; + + level++; + out: + for_each_plane_id_on_crtc(crtc, plane_id) + g4x_raw_plane_wm_set(crtc_state, level, + plane_id, USHRT_MAX); + g4x_raw_fbc_wm_set(crtc_state, level, USHRT_MAX); + + g4x_invalidate_wms(crtc, active, level); + + crtc_state->wm.g4x.optimal = *active; + crtc_state->wm.g4x.intermediate = *active; + + drm_dbg_kms(&dev_priv->drm, + "Initial watermarks: pipe %c, plane=%d, cursor=%d, sprite=%d\n", + pipe_name(pipe), + wm->pipe[pipe].plane[PLANE_PRIMARY], + wm->pipe[pipe].plane[PLANE_CURSOR], + wm->pipe[pipe].plane[PLANE_SPRITE0]); + } + + drm_dbg_kms(&dev_priv->drm, + "Initial SR watermarks: plane=%d, cursor=%d fbc=%d\n", + wm->sr.plane, wm->sr.cursor, wm->sr.fbc); + drm_dbg_kms(&dev_priv->drm, + "Initial HPLL watermarks: plane=%d, SR cursor=%d fbc=%d\n", + wm->hpll.plane, wm->hpll.cursor, wm->hpll.fbc); + drm_dbg_kms(&dev_priv->drm, "Initial SR=%s HPLL=%s FBC=%s\n", + str_yes_no(wm->cxsr), str_yes_no(wm->hpll_en), + str_yes_no(wm->fbc_en)); +} + +static void g4x_wm_sanitize(struct drm_i915_private *dev_priv) +{ + struct intel_plane *plane; + struct intel_crtc *crtc; + + mutex_lock(&dev_priv->display.wm.wm_mutex); + + for_each_intel_plane(&dev_priv->drm, plane) { + struct intel_crtc *crtc = + intel_crtc_for_pipe(dev_priv, plane->pipe); + struct intel_crtc_state *crtc_state = + to_intel_crtc_state(crtc->base.state); + struct intel_plane_state *plane_state = + to_intel_plane_state(plane->base.state); + enum plane_id plane_id = plane->id; + int level; + + if (plane_state->uapi.visible) + continue; + + for (level = 0; level < dev_priv->display.wm.num_levels; level++) { + struct g4x_pipe_wm *raw = + &crtc_state->wm.g4x.raw[level]; + + raw->plane[plane_id] = 0; + + if (plane_id == PLANE_PRIMARY) + raw->fbc = 0; + } + } + + for_each_intel_crtc(&dev_priv->drm, crtc) { + struct intel_crtc_state *crtc_state = + to_intel_crtc_state(crtc->base.state); + int ret; + + ret = _g4x_compute_pipe_wm(crtc_state); + drm_WARN_ON(&dev_priv->drm, ret); + + crtc_state->wm.g4x.intermediate = + crtc_state->wm.g4x.optimal; + crtc->wm.active.g4x = crtc_state->wm.g4x.optimal; + } + + g4x_program_watermarks(dev_priv); + + mutex_unlock(&dev_priv->display.wm.wm_mutex); +} + +static void g4x_wm_get_hw_state_and_sanitize(struct drm_i915_private *i915) +{ + g4x_wm_get_hw_state(i915); + g4x_wm_sanitize(i915); +} + +static void vlv_wm_get_hw_state(struct drm_i915_private *dev_priv) +{ + struct vlv_wm_values *wm = &dev_priv->display.wm.vlv; + struct intel_crtc *crtc; + u32 val; + + vlv_read_wm_values(dev_priv, wm); + + wm->cxsr = intel_uncore_read(&dev_priv->uncore, FW_BLC_SELF_VLV) & FW_CSPWRDWNEN; + wm->level = VLV_WM_LEVEL_PM2; + + if (IS_CHERRYVIEW(dev_priv)) { + vlv_punit_get(dev_priv); + + val = vlv_punit_read(dev_priv, PUNIT_REG_DSPSSPM); + if (val & DSP_MAXFIFO_PM5_ENABLE) + wm->level = VLV_WM_LEVEL_PM5; + + /* + * If DDR DVFS is disabled in the BIOS, Punit + * will never ack the request. So if that happens + * assume we don't have to enable/disable DDR DVFS + * dynamically. To test that just set the REQ_ACK + * bit to poke the Punit, but don't change the + * HIGH/LOW bits so that we don't actually change + * the current state. + */ + val = vlv_punit_read(dev_priv, PUNIT_REG_DDR_SETUP2); + val |= FORCE_DDR_FREQ_REQ_ACK; + vlv_punit_write(dev_priv, PUNIT_REG_DDR_SETUP2, val); + + if (wait_for((vlv_punit_read(dev_priv, PUNIT_REG_DDR_SETUP2) & + FORCE_DDR_FREQ_REQ_ACK) == 0, 3)) { + drm_dbg_kms(&dev_priv->drm, + "Punit not acking DDR DVFS request, " + "assuming DDR DVFS is disabled\n"); + dev_priv->display.wm.num_levels = VLV_WM_LEVEL_PM5 + 1; + } else { + val = vlv_punit_read(dev_priv, PUNIT_REG_DDR_SETUP2); + if ((val & FORCE_DDR_HIGH_FREQ) == 0) + wm->level = VLV_WM_LEVEL_DDR_DVFS; + } + + vlv_punit_put(dev_priv); + } + + for_each_intel_crtc(&dev_priv->drm, crtc) { + struct intel_crtc_state *crtc_state = + to_intel_crtc_state(crtc->base.state); + struct vlv_wm_state *active = &crtc->wm.active.vlv; + const struct vlv_fifo_state *fifo_state = + &crtc_state->wm.vlv.fifo_state; + enum pipe pipe = crtc->pipe; + enum plane_id plane_id; + int level; + + vlv_get_fifo_size(crtc_state); + + active->num_levels = wm->level + 1; + active->cxsr = wm->cxsr; + + for (level = 0; level < active->num_levels; level++) { + struct g4x_pipe_wm *raw = + &crtc_state->wm.vlv.raw[level]; + + active->sr[level].plane = wm->sr.plane; + active->sr[level].cursor = wm->sr.cursor; + + for_each_plane_id_on_crtc(crtc, plane_id) { + active->wm[level].plane[plane_id] = + wm->pipe[pipe].plane[plane_id]; + + raw->plane[plane_id] = + vlv_invert_wm_value(active->wm[level].plane[plane_id], + fifo_state->plane[plane_id]); + } + } + + for_each_plane_id_on_crtc(crtc, plane_id) + vlv_raw_plane_wm_set(crtc_state, level, + plane_id, USHRT_MAX); + vlv_invalidate_wms(crtc, active, level); + + crtc_state->wm.vlv.optimal = *active; + crtc_state->wm.vlv.intermediate = *active; + + drm_dbg_kms(&dev_priv->drm, + "Initial watermarks: pipe %c, plane=%d, cursor=%d, sprite0=%d, sprite1=%d\n", + pipe_name(pipe), + wm->pipe[pipe].plane[PLANE_PRIMARY], + wm->pipe[pipe].plane[PLANE_CURSOR], + wm->pipe[pipe].plane[PLANE_SPRITE0], + wm->pipe[pipe].plane[PLANE_SPRITE1]); + } + + drm_dbg_kms(&dev_priv->drm, + "Initial watermarks: SR plane=%d, SR cursor=%d level=%d cxsr=%d\n", + wm->sr.plane, wm->sr.cursor, wm->level, wm->cxsr); +} + +static void vlv_wm_sanitize(struct drm_i915_private *dev_priv) +{ + struct intel_plane *plane; + struct intel_crtc *crtc; + + mutex_lock(&dev_priv->display.wm.wm_mutex); + + for_each_intel_plane(&dev_priv->drm, plane) { + struct intel_crtc *crtc = + intel_crtc_for_pipe(dev_priv, plane->pipe); + struct intel_crtc_state *crtc_state = + to_intel_crtc_state(crtc->base.state); + struct intel_plane_state *plane_state = + to_intel_plane_state(plane->base.state); + enum plane_id plane_id = plane->id; + int level; + + if (plane_state->uapi.visible) + continue; + + for (level = 0; level < dev_priv->display.wm.num_levels; level++) { + struct g4x_pipe_wm *raw = + &crtc_state->wm.vlv.raw[level]; + + raw->plane[plane_id] = 0; + } + } + + for_each_intel_crtc(&dev_priv->drm, crtc) { + struct intel_crtc_state *crtc_state = + to_intel_crtc_state(crtc->base.state); + int ret; + + ret = _vlv_compute_pipe_wm(crtc_state); + drm_WARN_ON(&dev_priv->drm, ret); + + crtc_state->wm.vlv.intermediate = + crtc_state->wm.vlv.optimal; + crtc->wm.active.vlv = crtc_state->wm.vlv.optimal; + } + + vlv_program_watermarks(dev_priv); + + mutex_unlock(&dev_priv->display.wm.wm_mutex); +} + +static void vlv_wm_get_hw_state_and_sanitize(struct drm_i915_private *i915) +{ + vlv_wm_get_hw_state(i915); + vlv_wm_sanitize(i915); +} + +/* + * FIXME should probably kill this and improve + * the real watermark readout/sanitation instead + */ +static void ilk_init_lp_watermarks(struct drm_i915_private *dev_priv) +{ + intel_uncore_rmw(&dev_priv->uncore, WM3_LP_ILK, WM_LP_ENABLE, 0); + intel_uncore_rmw(&dev_priv->uncore, WM2_LP_ILK, WM_LP_ENABLE, 0); + intel_uncore_rmw(&dev_priv->uncore, WM1_LP_ILK, WM_LP_ENABLE, 0); + + /* + * Don't touch WM_LP_SPRITE_ENABLE here. + * Doing so could cause underruns. + */ +} + +static void ilk_wm_get_hw_state(struct drm_i915_private *dev_priv) +{ + struct ilk_wm_values *hw = &dev_priv->display.wm.hw; + struct intel_crtc *crtc; + + ilk_init_lp_watermarks(dev_priv); + + for_each_intel_crtc(&dev_priv->drm, crtc) + ilk_pipe_wm_get_hw_state(crtc); + + hw->wm_lp[0] = intel_uncore_read(&dev_priv->uncore, WM1_LP_ILK); + hw->wm_lp[1] = intel_uncore_read(&dev_priv->uncore, WM2_LP_ILK); + hw->wm_lp[2] = intel_uncore_read(&dev_priv->uncore, WM3_LP_ILK); + + hw->wm_lp_spr[0] = intel_uncore_read(&dev_priv->uncore, WM1S_LP_ILK); + if (DISPLAY_VER(dev_priv) >= 7) { + hw->wm_lp_spr[1] = intel_uncore_read(&dev_priv->uncore, WM2S_LP_IVB); + hw->wm_lp_spr[2] = intel_uncore_read(&dev_priv->uncore, WM3S_LP_IVB); + } + + if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) + hw->partitioning = (intel_uncore_read(&dev_priv->uncore, WM_MISC) & + WM_MISC_DATA_PARTITION_5_6) ? + INTEL_DDB_PART_5_6 : INTEL_DDB_PART_1_2; + else if (IS_IVYBRIDGE(dev_priv)) + hw->partitioning = (intel_uncore_read(&dev_priv->uncore, DISP_ARB_CTL2) & + DISP_DATA_PARTITION_5_6) ? + INTEL_DDB_PART_5_6 : INTEL_DDB_PART_1_2; + + hw->enable_fbc_wm = + !(intel_uncore_read(&dev_priv->uncore, DISP_ARB_CTL) & DISP_FBC_WM_DIS); +} + +static const struct intel_wm_funcs ilk_wm_funcs = { + .compute_pipe_wm = ilk_compute_pipe_wm, + .compute_intermediate_wm = ilk_compute_intermediate_wm, + .initial_watermarks = ilk_initial_watermarks, + .optimize_watermarks = ilk_optimize_watermarks, + .get_hw_state = ilk_wm_get_hw_state, +}; + +static const struct intel_wm_funcs vlv_wm_funcs = { + .compute_pipe_wm = vlv_compute_pipe_wm, + .compute_intermediate_wm = vlv_compute_intermediate_wm, + .initial_watermarks = vlv_initial_watermarks, + .optimize_watermarks = vlv_optimize_watermarks, + .atomic_update_watermarks = vlv_atomic_update_fifo, + .get_hw_state = vlv_wm_get_hw_state_and_sanitize, +}; + +static const struct intel_wm_funcs g4x_wm_funcs = { + .compute_pipe_wm = g4x_compute_pipe_wm, + .compute_intermediate_wm = g4x_compute_intermediate_wm, + .initial_watermarks = g4x_initial_watermarks, + .optimize_watermarks = g4x_optimize_watermarks, + .get_hw_state = g4x_wm_get_hw_state_and_sanitize, +}; + +static const struct intel_wm_funcs pnv_wm_funcs = { + .update_wm = pnv_update_wm, +}; + +static const struct intel_wm_funcs i965_wm_funcs = { + .update_wm = i965_update_wm, +}; + +static const struct intel_wm_funcs i9xx_wm_funcs = { + .update_wm = i9xx_update_wm, +}; + +static const struct intel_wm_funcs i845_wm_funcs = { + .update_wm = i845_update_wm, +}; + +static const struct intel_wm_funcs nop_funcs = { +}; + +void i9xx_wm_init(struct drm_i915_private *dev_priv) +{ + /* For FIFO watermark updates */ + if (HAS_PCH_SPLIT(dev_priv)) { + ilk_setup_wm_latency(dev_priv); + dev_priv->display.funcs.wm = &ilk_wm_funcs; + } else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) { + vlv_setup_wm_latency(dev_priv); + dev_priv->display.funcs.wm = &vlv_wm_funcs; + } else if (IS_G4X(dev_priv)) { + g4x_setup_wm_latency(dev_priv); + dev_priv->display.funcs.wm = &g4x_wm_funcs; + } else if (IS_PINEVIEW(dev_priv)) { + if (!intel_get_cxsr_latency(!IS_MOBILE(dev_priv), + dev_priv->is_ddr3, + dev_priv->fsb_freq, + dev_priv->mem_freq)) { + drm_info(&dev_priv->drm, + "failed to find known CxSR latency " + "(found ddr%s fsb freq %d, mem freq %d), " + "disabling CxSR\n", + (dev_priv->is_ddr3 == 1) ? "3" : "2", + dev_priv->fsb_freq, dev_priv->mem_freq); + /* Disable CxSR and never update its watermark again */ + intel_set_memory_cxsr(dev_priv, false); + dev_priv->display.funcs.wm = &nop_funcs; + } else { + dev_priv->display.funcs.wm = &pnv_wm_funcs; + } + } else if (DISPLAY_VER(dev_priv) == 4) { + dev_priv->display.funcs.wm = &i965_wm_funcs; + } else if (DISPLAY_VER(dev_priv) == 3) { + dev_priv->display.funcs.wm = &i9xx_wm_funcs; + } else if (DISPLAY_VER(dev_priv) == 2) { + if (INTEL_NUM_PIPES(dev_priv) == 1) + dev_priv->display.funcs.wm = &i845_wm_funcs; + else + dev_priv->display.funcs.wm = &i9xx_wm_funcs; + } else { + drm_err(&dev_priv->drm, + "unexpected fall-through in %s\n", __func__); + dev_priv->display.funcs.wm = &nop_funcs; + } +} diff --git a/drivers/gpu/drm/i915/display/i9xx_wm.h b/drivers/gpu/drm/i915/display/i9xx_wm.h new file mode 100644 index 000000000000..a7875cbcd05a --- /dev/null +++ b/drivers/gpu/drm/i915/display/i9xx_wm.h @@ -0,0 +1,21 @@ +/* SPDX-License-Identifier: MIT */ +/* + * Copyright © 2023 Intel Corporation + */ + +#ifndef __I9XX_WM_H__ +#define __I9XX_WM_H__ + +#include <linux/types.h> + +struct drm_i915_private; +struct intel_crtc_state; +struct intel_plane_state; + +int ilk_wm_max_level(const struct drm_i915_private *i915); +bool ilk_disable_lp_wm(struct drm_i915_private *i915); +void ilk_wm_sanitize(struct drm_i915_private *i915); +bool intel_set_memory_cxsr(struct drm_i915_private *i915, bool enable); +void i9xx_wm_init(struct drm_i915_private *i915); + +#endif /* __I9XX_WM_H__ */ diff --git a/drivers/gpu/drm/i915/display/icl_dsi.c b/drivers/gpu/drm/i915/display/icl_dsi.c index 468a792e6a40..50dcaa895854 100644 --- a/drivers/gpu/drm/i915/display/icl_dsi.c +++ b/drivers/gpu/drm/i915/display/icl_dsi.c @@ -45,6 +45,7 @@ #include "intel_dsi_vbt.h" #include "intel_panel.h" #include "intel_vdsc.h" +#include "intel_vdsc_regs.h" #include "skl_scaler.h" #include "skl_universal_plane.h" @@ -207,7 +208,7 @@ void icl_dsi_frame_update(struct intel_crtc_state *crtc_state) { struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); - u32 tmp, mode_flags; + u32 mode_flags; enum port port; mode_flags = crtc_state->mode_flags; @@ -224,9 +225,7 @@ void icl_dsi_frame_update(struct intel_crtc_state *crtc_state) else return; - tmp = intel_de_read(dev_priv, DSI_CMD_FRMCTL(port)); - tmp |= DSI_FRAME_UPDATE_REQUEST; - intel_de_write(dev_priv, DSI_CMD_FRMCTL(port), tmp); + intel_de_rmw(dev_priv, DSI_CMD_FRMCTL(port), 0, DSI_FRAME_UPDATE_REQUEST); } static void dsi_program_swing_and_deemphasis(struct intel_encoder *encoder) @@ -234,7 +233,7 @@ static void dsi_program_swing_and_deemphasis(struct intel_encoder *encoder) struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder); enum phy phy; - u32 tmp; + u32 tmp, mask, val; int lane; for_each_dsi_phy(phy, intel_dsi->phys) { @@ -242,56 +241,35 @@ static void dsi_program_swing_and_deemphasis(struct intel_encoder *encoder) * Program voltage swing and pre-emphasis level values as per * table in BSPEC under DDI buffer programing */ + mask = SCALING_MODE_SEL_MASK | RTERM_SELECT_MASK; + val = SCALING_MODE_SEL(0x2) | TAP2_DISABLE | TAP3_DISABLE | + RTERM_SELECT(0x6); tmp = intel_de_read(dev_priv, ICL_PORT_TX_DW5_LN(0, phy)); - tmp &= ~(SCALING_MODE_SEL_MASK | RTERM_SELECT_MASK); - tmp |= SCALING_MODE_SEL(0x2); - tmp |= TAP2_DISABLE | TAP3_DISABLE; - tmp |= RTERM_SELECT(0x6); + tmp &= ~mask; + tmp |= val; intel_de_write(dev_priv, ICL_PORT_TX_DW5_GRP(phy), tmp); + intel_de_rmw(dev_priv, ICL_PORT_TX_DW5_AUX(phy), mask, val); - tmp = intel_de_read(dev_priv, ICL_PORT_TX_DW5_AUX(phy)); - tmp &= ~(SCALING_MODE_SEL_MASK | RTERM_SELECT_MASK); - tmp |= SCALING_MODE_SEL(0x2); - tmp |= TAP2_DISABLE | TAP3_DISABLE; - tmp |= RTERM_SELECT(0x6); - intel_de_write(dev_priv, ICL_PORT_TX_DW5_AUX(phy), tmp); - + mask = SWING_SEL_LOWER_MASK | SWING_SEL_UPPER_MASK | + RCOMP_SCALAR_MASK; + val = SWING_SEL_UPPER(0x2) | SWING_SEL_LOWER(0x2) | + RCOMP_SCALAR(0x98); tmp = intel_de_read(dev_priv, ICL_PORT_TX_DW2_LN(0, phy)); - tmp &= ~(SWING_SEL_LOWER_MASK | SWING_SEL_UPPER_MASK | - RCOMP_SCALAR_MASK); - tmp |= SWING_SEL_UPPER(0x2); - tmp |= SWING_SEL_LOWER(0x2); - tmp |= RCOMP_SCALAR(0x98); + tmp &= ~mask; + tmp |= val; intel_de_write(dev_priv, ICL_PORT_TX_DW2_GRP(phy), tmp); + intel_de_rmw(dev_priv, ICL_PORT_TX_DW2_AUX(phy), mask, val); - tmp = intel_de_read(dev_priv, ICL_PORT_TX_DW2_AUX(phy)); - tmp &= ~(SWING_SEL_LOWER_MASK | SWING_SEL_UPPER_MASK | - RCOMP_SCALAR_MASK); - tmp |= SWING_SEL_UPPER(0x2); - tmp |= SWING_SEL_LOWER(0x2); - tmp |= RCOMP_SCALAR(0x98); - intel_de_write(dev_priv, ICL_PORT_TX_DW2_AUX(phy), tmp); - - tmp = intel_de_read(dev_priv, ICL_PORT_TX_DW4_AUX(phy)); - tmp &= ~(POST_CURSOR_1_MASK | POST_CURSOR_2_MASK | - CURSOR_COEFF_MASK); - tmp |= POST_CURSOR_1(0x0); - tmp |= POST_CURSOR_2(0x0); - tmp |= CURSOR_COEFF(0x3f); - intel_de_write(dev_priv, ICL_PORT_TX_DW4_AUX(phy), tmp); - - for (lane = 0; lane <= 3; lane++) { - /* Bspec: must not use GRP register for write */ - tmp = intel_de_read(dev_priv, - ICL_PORT_TX_DW4_LN(lane, phy)); - tmp &= ~(POST_CURSOR_1_MASK | POST_CURSOR_2_MASK | - CURSOR_COEFF_MASK); - tmp |= POST_CURSOR_1(0x0); - tmp |= POST_CURSOR_2(0x0); - tmp |= CURSOR_COEFF(0x3f); - intel_de_write(dev_priv, - ICL_PORT_TX_DW4_LN(lane, phy), tmp); - } + mask = POST_CURSOR_1_MASK | POST_CURSOR_2_MASK | + CURSOR_COEFF_MASK; + val = POST_CURSOR_1(0x0) | POST_CURSOR_2(0x0) | + CURSOR_COEFF(0x3f); + intel_de_rmw(dev_priv, ICL_PORT_TX_DW4_AUX(phy), mask, val); + + /* Bspec: must not use GRP register for write */ + for (lane = 0; lane <= 3; lane++) + intel_de_rmw(dev_priv, ICL_PORT_TX_DW4_LN(lane, phy), + mask, val); } } @@ -300,9 +278,21 @@ static void configure_dual_link_mode(struct intel_encoder *encoder, { struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder); + i915_reg_t dss_ctl1_reg, dss_ctl2_reg; u32 dss_ctl1; - dss_ctl1 = intel_de_read(dev_priv, DSS_CTL1); + /* FIXME: Move all DSS handling to intel_vdsc.c */ + if (DISPLAY_VER(dev_priv) >= 12) { + struct intel_crtc *crtc = to_intel_crtc(pipe_config->uapi.crtc); + + dss_ctl1_reg = ICL_PIPE_DSS_CTL1(crtc->pipe); + dss_ctl2_reg = ICL_PIPE_DSS_CTL2(crtc->pipe); + } else { + dss_ctl1_reg = DSS_CTL1; + dss_ctl2_reg = DSS_CTL2; + } + + dss_ctl1 = intel_de_read(dev_priv, dss_ctl1_reg); dss_ctl1 |= SPLITTER_ENABLE; dss_ctl1 &= ~OVERLAP_PIXELS_MASK; dss_ctl1 |= OVERLAP_PIXELS(intel_dsi->pixel_overlap); @@ -310,7 +300,6 @@ static void configure_dual_link_mode(struct intel_encoder *encoder, if (intel_dsi->dual_link == DSI_DUAL_LINK_FRONT_BACK) { const struct drm_display_mode *adjusted_mode = &pipe_config->hw.adjusted_mode; - u32 dss_ctl2; u16 hactive = adjusted_mode->crtc_hdisplay; u16 dl_buffer_depth; @@ -323,16 +312,14 @@ static void configure_dual_link_mode(struct intel_encoder *encoder, dss_ctl1 &= ~LEFT_DL_BUF_TARGET_DEPTH_MASK; dss_ctl1 |= LEFT_DL_BUF_TARGET_DEPTH(dl_buffer_depth); - dss_ctl2 = intel_de_read(dev_priv, DSS_CTL2); - dss_ctl2 &= ~RIGHT_DL_BUF_TARGET_DEPTH_MASK; - dss_ctl2 |= RIGHT_DL_BUF_TARGET_DEPTH(dl_buffer_depth); - intel_de_write(dev_priv, DSS_CTL2, dss_ctl2); + intel_de_rmw(dev_priv, dss_ctl2_reg, RIGHT_DL_BUF_TARGET_DEPTH_MASK, + RIGHT_DL_BUF_TARGET_DEPTH(dl_buffer_depth)); } else { /* Interleave */ dss_ctl1 |= DUAL_LINK_MODE_INTERLEAVE; } - intel_de_write(dev_priv, DSS_CTL1, dss_ctl1); + intel_de_write(dev_priv, dss_ctl1_reg, dss_ctl1); } /* aka DSI 8X clock */ @@ -412,13 +399,10 @@ static void gen11_dsi_enable_io_power(struct intel_encoder *encoder) struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder); enum port port; - u32 tmp; - for_each_dsi_port(port, intel_dsi->ports) { - tmp = intel_de_read(dev_priv, ICL_DSI_IO_MODECTL(port)); - tmp |= COMBO_PHY_MODE_DSI; - intel_de_write(dev_priv, ICL_DSI_IO_MODECTL(port), tmp); - } + for_each_dsi_port(port, intel_dsi->ports) + intel_de_rmw(dev_priv, ICL_DSI_IO_MODECTL(port), + 0, COMBO_PHY_MODE_DSI); get_dsi_io_power_domains(dev_priv, intel_dsi); } @@ -444,26 +428,16 @@ static void gen11_dsi_config_phy_lanes_sequence(struct intel_encoder *encoder) /* Step 4b(i) set loadgen select for transmit and aux lanes */ for_each_dsi_phy(phy, intel_dsi->phys) { - tmp = intel_de_read(dev_priv, ICL_PORT_TX_DW4_AUX(phy)); - tmp &= ~LOADGEN_SELECT; - intel_de_write(dev_priv, ICL_PORT_TX_DW4_AUX(phy), tmp); - for (lane = 0; lane <= 3; lane++) { - tmp = intel_de_read(dev_priv, - ICL_PORT_TX_DW4_LN(lane, phy)); - tmp &= ~LOADGEN_SELECT; - if (lane != 2) - tmp |= LOADGEN_SELECT; - intel_de_write(dev_priv, - ICL_PORT_TX_DW4_LN(lane, phy), tmp); - } + intel_de_rmw(dev_priv, ICL_PORT_TX_DW4_AUX(phy), LOADGEN_SELECT, 0); + for (lane = 0; lane <= 3; lane++) + intel_de_rmw(dev_priv, ICL_PORT_TX_DW4_LN(lane, phy), + LOADGEN_SELECT, lane != 2 ? LOADGEN_SELECT : 0); } /* Step 4b(ii) set latency optimization for transmit and aux lanes */ for_each_dsi_phy(phy, intel_dsi->phys) { - tmp = intel_de_read(dev_priv, ICL_PORT_TX_DW2_AUX(phy)); - tmp &= ~FRC_LATENCY_OPTIM_MASK; - tmp |= FRC_LATENCY_OPTIM_VAL(0x5); - intel_de_write(dev_priv, ICL_PORT_TX_DW2_AUX(phy), tmp); + intel_de_rmw(dev_priv, ICL_PORT_TX_DW2_AUX(phy), + FRC_LATENCY_OPTIM_MASK, FRC_LATENCY_OPTIM_VAL(0x5)); tmp = intel_de_read(dev_priv, ICL_PORT_TX_DW2_LN(0, phy)); tmp &= ~FRC_LATENCY_OPTIM_MASK; tmp |= FRC_LATENCY_OPTIM_VAL(0x5); @@ -471,12 +445,8 @@ static void gen11_dsi_config_phy_lanes_sequence(struct intel_encoder *encoder) /* For EHL, TGL, set latency optimization for PCS_DW1 lanes */ if (IS_JSL_EHL(dev_priv) || (DISPLAY_VER(dev_priv) >= 12)) { - tmp = intel_de_read(dev_priv, - ICL_PORT_PCS_DW1_AUX(phy)); - tmp &= ~LATENCY_OPTIM_MASK; - tmp |= LATENCY_OPTIM_VAL(0); - intel_de_write(dev_priv, ICL_PORT_PCS_DW1_AUX(phy), - tmp); + intel_de_rmw(dev_priv, ICL_PORT_PCS_DW1_AUX(phy), + LATENCY_OPTIM_MASK, LATENCY_OPTIM_VAL(0)); tmp = intel_de_read(dev_priv, ICL_PORT_PCS_DW1_LN(0, phy)); @@ -501,9 +471,7 @@ static void gen11_dsi_voltage_swing_program_seq(struct intel_encoder *encoder) tmp = intel_de_read(dev_priv, ICL_PORT_PCS_DW1_LN(0, phy)); tmp &= ~COMMON_KEEPER_EN; intel_de_write(dev_priv, ICL_PORT_PCS_DW1_GRP(phy), tmp); - tmp = intel_de_read(dev_priv, ICL_PORT_PCS_DW1_AUX(phy)); - tmp &= ~COMMON_KEEPER_EN; - intel_de_write(dev_priv, ICL_PORT_PCS_DW1_AUX(phy), tmp); + intel_de_rmw(dev_priv, ICL_PORT_PCS_DW1_AUX(phy), COMMON_KEEPER_EN, 0); } /* @@ -511,20 +479,15 @@ static void gen11_dsi_voltage_swing_program_seq(struct intel_encoder *encoder) * Note: loadgen select program is done * as part of lane phy sequence configuration */ - for_each_dsi_phy(phy, intel_dsi->phys) { - tmp = intel_de_read(dev_priv, ICL_PORT_CL_DW5(phy)); - tmp |= SUS_CLOCK_CONFIG; - intel_de_write(dev_priv, ICL_PORT_CL_DW5(phy), tmp); - } + for_each_dsi_phy(phy, intel_dsi->phys) + intel_de_rmw(dev_priv, ICL_PORT_CL_DW5(phy), 0, SUS_CLOCK_CONFIG); /* Clear training enable to change swing values */ for_each_dsi_phy(phy, intel_dsi->phys) { tmp = intel_de_read(dev_priv, ICL_PORT_TX_DW5_LN(0, phy)); tmp &= ~TX_TRAINING_EN; intel_de_write(dev_priv, ICL_PORT_TX_DW5_GRP(phy), tmp); - tmp = intel_de_read(dev_priv, ICL_PORT_TX_DW5_AUX(phy)); - tmp &= ~TX_TRAINING_EN; - intel_de_write(dev_priv, ICL_PORT_TX_DW5_AUX(phy), tmp); + intel_de_rmw(dev_priv, ICL_PORT_TX_DW5_AUX(phy), TX_TRAINING_EN, 0); } /* Program swing and de-emphasis */ @@ -535,9 +498,7 @@ static void gen11_dsi_voltage_swing_program_seq(struct intel_encoder *encoder) tmp = intel_de_read(dev_priv, ICL_PORT_TX_DW5_LN(0, phy)); tmp |= TX_TRAINING_EN; intel_de_write(dev_priv, ICL_PORT_TX_DW5_GRP(phy), tmp); - tmp = intel_de_read(dev_priv, ICL_PORT_TX_DW5_AUX(phy)); - tmp |= TX_TRAINING_EN; - intel_de_write(dev_priv, ICL_PORT_TX_DW5_AUX(phy), tmp); + intel_de_rmw(dev_priv, ICL_PORT_TX_DW5_AUX(phy), 0, TX_TRAINING_EN); } } @@ -545,13 +506,10 @@ static void gen11_dsi_enable_ddi_buffer(struct intel_encoder *encoder) { struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder); - u32 tmp; enum port port; for_each_dsi_port(port, intel_dsi->ports) { - tmp = intel_de_read(dev_priv, DDI_BUF_CTL(port)); - tmp |= DDI_BUF_CTL_ENABLE; - intel_de_write(dev_priv, DDI_BUF_CTL(port), tmp); + intel_de_rmw(dev_priv, DDI_BUF_CTL(port), 0, DDI_BUF_CTL_ENABLE); if (wait_for_us(!(intel_de_read(dev_priv, DDI_BUF_CTL(port)) & DDI_BUF_IS_IDLE), @@ -567,17 +525,13 @@ gen11_dsi_setup_dphy_timings(struct intel_encoder *encoder, { struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder); - u32 tmp; enum port port; enum phy phy; /* Program T-INIT master registers */ - for_each_dsi_port(port, intel_dsi->ports) { - tmp = intel_de_read(dev_priv, ICL_DSI_T_INIT_MASTER(port)); - tmp &= ~DSI_T_INIT_MASTER_MASK; - tmp |= intel_dsi->init_count; - intel_de_write(dev_priv, ICL_DSI_T_INIT_MASTER(port), tmp); - } + for_each_dsi_port(port, intel_dsi->ports) + intel_de_rmw(dev_priv, ICL_DSI_T_INIT_MASTER(port), + DSI_T_INIT_MASTER_MASK, intel_dsi->init_count); /* Program DPHY clock lanes timings */ for_each_dsi_port(port, intel_dsi->ports) { @@ -608,31 +562,22 @@ gen11_dsi_setup_dphy_timings(struct intel_encoder *encoder, if (DISPLAY_VER(dev_priv) == 11) { if (afe_clk(encoder, crtc_state) <= 800000) { for_each_dsi_port(port, intel_dsi->ports) { - tmp = intel_de_read(dev_priv, - DPHY_TA_TIMING_PARAM(port)); - tmp &= ~TA_SURE_MASK; - tmp |= TA_SURE_OVERRIDE | TA_SURE(0); - intel_de_write(dev_priv, - DPHY_TA_TIMING_PARAM(port), - tmp); + intel_de_rmw(dev_priv, DPHY_TA_TIMING_PARAM(port), + TA_SURE_MASK, + TA_SURE_OVERRIDE | TA_SURE(0)); /* shadow register inside display core */ - tmp = intel_de_read(dev_priv, - DSI_TA_TIMING_PARAM(port)); - tmp &= ~TA_SURE_MASK; - tmp |= TA_SURE_OVERRIDE | TA_SURE(0); - intel_de_write(dev_priv, - DSI_TA_TIMING_PARAM(port), tmp); + intel_de_rmw(dev_priv, DSI_TA_TIMING_PARAM(port), + TA_SURE_MASK, + TA_SURE_OVERRIDE | TA_SURE(0)); } } } if (IS_JSL_EHL(dev_priv)) { - for_each_dsi_phy(phy, intel_dsi->phys) { - tmp = intel_de_read(dev_priv, ICL_DPHY_CHKN(phy)); - tmp |= ICL_DPHY_CHKN_AFE_OVER_PPI_STRAP; - intel_de_write(dev_priv, ICL_DPHY_CHKN(phy), tmp); - } + for_each_dsi_phy(phy, intel_dsi->phys) + intel_de_rmw(dev_priv, ICL_DPHY_CHKN(phy), + 0, ICL_DPHY_CHKN_AFE_OVER_PPI_STRAP); } } @@ -824,11 +769,8 @@ gen11_dsi_configure_transcoder(struct intel_encoder *encoder, if (intel_dsi->dual_link) { for_each_dsi_port(port, intel_dsi->ports) { dsi_trans = dsi_port_to_transcoder(port); - tmp = intel_de_read(dev_priv, - TRANS_DDI_FUNC_CTL2(dsi_trans)); - tmp |= PORT_SYNC_MODE_ENABLE; - intel_de_write(dev_priv, - TRANS_DDI_FUNC_CTL2(dsi_trans), tmp); + intel_de_rmw(dev_priv, TRANS_DDI_FUNC_CTL2(dsi_trans), + 0, PORT_SYNC_MODE_ENABLE); } /* configure stream splitting */ @@ -958,8 +900,8 @@ gen11_dsi_set_transcoder_timings(struct intel_encoder *encoder, /* program TRANS_HTOTAL register */ for_each_dsi_port(port, intel_dsi->ports) { dsi_trans = dsi_port_to_transcoder(port); - intel_de_write(dev_priv, HTOTAL(dsi_trans), - (hactive - 1) | ((htotal - 1) << 16)); + intel_de_write(dev_priv, TRANS_HTOTAL(dsi_trans), + HACTIVE(hactive - 1) | HTOTAL(htotal - 1)); } /* TRANS_HSYNC register to be programmed only for video mode */ @@ -981,8 +923,8 @@ gen11_dsi_set_transcoder_timings(struct intel_encoder *encoder, for_each_dsi_port(port, intel_dsi->ports) { dsi_trans = dsi_port_to_transcoder(port); - intel_de_write(dev_priv, HSYNC(dsi_trans), - (hsync_start - 1) | ((hsync_end - 1) << 16)); + intel_de_write(dev_priv, TRANS_HSYNC(dsi_trans), + HSYNC_START(hsync_start - 1) | HSYNC_END(hsync_end - 1)); } } @@ -995,8 +937,8 @@ gen11_dsi_set_transcoder_timings(struct intel_encoder *encoder, * struct drm_display_mode. * For interlace mode: program required pixel minus 2 */ - intel_de_write(dev_priv, VTOTAL(dsi_trans), - (vactive - 1) | ((vtotal - 1) << 16)); + intel_de_write(dev_priv, TRANS_VTOTAL(dsi_trans), + VACTIVE(vactive - 1) | VTOTAL(vtotal - 1)); } if (vsync_end < vsync_start || vsync_end > vtotal) @@ -1009,8 +951,8 @@ gen11_dsi_set_transcoder_timings(struct intel_encoder *encoder, if (is_vid_mode(intel_dsi)) { for_each_dsi_port(port, intel_dsi->ports) { dsi_trans = dsi_port_to_transcoder(port); - intel_de_write(dev_priv, VSYNC(dsi_trans), - (vsync_start - 1) | ((vsync_end - 1) << 16)); + intel_de_write(dev_priv, TRANS_VSYNC(dsi_trans), + VSYNC_START(vsync_start - 1) | VSYNC_END(vsync_end - 1)); } } @@ -1023,17 +965,22 @@ gen11_dsi_set_transcoder_timings(struct intel_encoder *encoder, if (is_vid_mode(intel_dsi)) { for_each_dsi_port(port, intel_dsi->ports) { dsi_trans = dsi_port_to_transcoder(port); - intel_de_write(dev_priv, VSYNCSHIFT(dsi_trans), + intel_de_write(dev_priv, TRANS_VSYNCSHIFT(dsi_trans), vsync_shift); } } - /* program TRANS_VBLANK register, should be same as vtotal programmed */ + /* + * program TRANS_VBLANK register, should be same as vtotal programmed + * + * FIXME get rid of these local hacks and do it right, + * this will not handle eg. delayed vblank correctly. + */ if (DISPLAY_VER(dev_priv) >= 12) { for_each_dsi_port(port, intel_dsi->ports) { dsi_trans = dsi_port_to_transcoder(port); - intel_de_write(dev_priv, VBLANK(dsi_trans), - (vactive - 1) | ((vtotal - 1) << 16)); + intel_de_write(dev_priv, TRANS_VBLANK(dsi_trans), + VBLANK_START(vactive - 1) | VBLANK_END(vtotal - 1)); } } } @@ -1044,17 +991,14 @@ static void gen11_dsi_enable_transcoder(struct intel_encoder *encoder) struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder); enum port port; enum transcoder dsi_trans; - u32 tmp; for_each_dsi_port(port, intel_dsi->ports) { dsi_trans = dsi_port_to_transcoder(port); - tmp = intel_de_read(dev_priv, PIPECONF(dsi_trans)); - tmp |= PIPECONF_ENABLE; - intel_de_write(dev_priv, PIPECONF(dsi_trans), tmp); + intel_de_rmw(dev_priv, TRANSCONF(dsi_trans), 0, TRANSCONF_ENABLE); /* wait for transcoder to be enabled */ - if (intel_de_wait_for_set(dev_priv, PIPECONF(dsi_trans), - PIPECONF_STATE_ENABLE, 10)) + if (intel_de_wait_for_set(dev_priv, TRANSCONF(dsi_trans), + TRANSCONF_STATE_ENABLE, 10)) drm_err(&dev_priv->drm, "DSI transcoder not enabled\n"); } @@ -1067,7 +1011,7 @@ static void gen11_dsi_setup_timeouts(struct intel_encoder *encoder, struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder); enum port port; enum transcoder dsi_trans; - u32 tmp, hs_tx_timeout, lp_rx_timeout, ta_timeout, divisor, mul; + u32 hs_tx_timeout, lp_rx_timeout, ta_timeout, divisor, mul; /* * escape clock count calculation: @@ -1087,26 +1031,23 @@ static void gen11_dsi_setup_timeouts(struct intel_encoder *encoder, dsi_trans = dsi_port_to_transcoder(port); /* program hst_tx_timeout */ - tmp = intel_de_read(dev_priv, DSI_HSTX_TO(dsi_trans)); - tmp &= ~HSTX_TIMEOUT_VALUE_MASK; - tmp |= HSTX_TIMEOUT_VALUE(hs_tx_timeout); - intel_de_write(dev_priv, DSI_HSTX_TO(dsi_trans), tmp); + intel_de_rmw(dev_priv, DSI_HSTX_TO(dsi_trans), + HSTX_TIMEOUT_VALUE_MASK, + HSTX_TIMEOUT_VALUE(hs_tx_timeout)); /* FIXME: DSI_CALIB_TO */ /* program lp_rx_host timeout */ - tmp = intel_de_read(dev_priv, DSI_LPRX_HOST_TO(dsi_trans)); - tmp &= ~LPRX_TIMEOUT_VALUE_MASK; - tmp |= LPRX_TIMEOUT_VALUE(lp_rx_timeout); - intel_de_write(dev_priv, DSI_LPRX_HOST_TO(dsi_trans), tmp); + intel_de_rmw(dev_priv, DSI_LPRX_HOST_TO(dsi_trans), + LPRX_TIMEOUT_VALUE_MASK, + LPRX_TIMEOUT_VALUE(lp_rx_timeout)); /* FIXME: DSI_PWAIT_TO */ /* program turn around timeout */ - tmp = intel_de_read(dev_priv, DSI_TA_TO(dsi_trans)); - tmp &= ~TA_TIMEOUT_VALUE_MASK; - tmp |= TA_TIMEOUT_VALUE(ta_timeout); - intel_de_write(dev_priv, DSI_TA_TO(dsi_trans), tmp); + intel_de_rmw(dev_priv, DSI_TA_TO(dsi_trans), + TA_TIMEOUT_VALUE_MASK, + TA_TIMEOUT_VALUE(ta_timeout)); } } @@ -1310,19 +1251,16 @@ static void gen11_dsi_disable_transcoder(struct intel_encoder *encoder) struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder); enum port port; enum transcoder dsi_trans; - u32 tmp; for_each_dsi_port(port, intel_dsi->ports) { dsi_trans = dsi_port_to_transcoder(port); /* disable transcoder */ - tmp = intel_de_read(dev_priv, PIPECONF(dsi_trans)); - tmp &= ~PIPECONF_ENABLE; - intel_de_write(dev_priv, PIPECONF(dsi_trans), tmp); + intel_de_rmw(dev_priv, TRANSCONF(dsi_trans), TRANSCONF_ENABLE, 0); /* wait for transcoder to be disabled */ - if (intel_de_wait_for_clear(dev_priv, PIPECONF(dsi_trans), - PIPECONF_STATE_ENABLE, 50)) + if (intel_de_wait_for_clear(dev_priv, TRANSCONF(dsi_trans), + TRANSCONF_STATE_ENABLE, 50)) drm_err(&dev_priv->drm, "DSI trancoder not disabled\n"); } @@ -1350,11 +1288,9 @@ static void gen11_dsi_deconfigure_trancoder(struct intel_encoder *encoder) /* disable periodic update mode */ if (is_cmd_mode(intel_dsi)) { - for_each_dsi_port(port, intel_dsi->ports) { - tmp = intel_de_read(dev_priv, DSI_CMD_FRMCTL(port)); - tmp &= ~DSI_PERIODIC_FRAME_UPDATE_ENABLE; - intel_de_write(dev_priv, DSI_CMD_FRMCTL(port), tmp); - } + for_each_dsi_port(port, intel_dsi->ports) + intel_de_rmw(dev_priv, DSI_CMD_FRMCTL(port), + DSI_PERIODIC_FRAME_UPDATE_ENABLE, 0); } /* put dsi link in ULPS */ @@ -1374,20 +1310,16 @@ static void gen11_dsi_deconfigure_trancoder(struct intel_encoder *encoder) /* disable ddi function */ for_each_dsi_port(port, intel_dsi->ports) { dsi_trans = dsi_port_to_transcoder(port); - tmp = intel_de_read(dev_priv, TRANS_DDI_FUNC_CTL(dsi_trans)); - tmp &= ~TRANS_DDI_FUNC_ENABLE; - intel_de_write(dev_priv, TRANS_DDI_FUNC_CTL(dsi_trans), tmp); + intel_de_rmw(dev_priv, TRANS_DDI_FUNC_CTL(dsi_trans), + TRANS_DDI_FUNC_ENABLE, 0); } /* disable port sync mode if dual link */ if (intel_dsi->dual_link) { for_each_dsi_port(port, intel_dsi->ports) { dsi_trans = dsi_port_to_transcoder(port); - tmp = intel_de_read(dev_priv, - TRANS_DDI_FUNC_CTL2(dsi_trans)); - tmp &= ~PORT_SYNC_MODE_ENABLE; - intel_de_write(dev_priv, - TRANS_DDI_FUNC_CTL2(dsi_trans), tmp); + intel_de_rmw(dev_priv, TRANS_DDI_FUNC_CTL2(dsi_trans), + PORT_SYNC_MODE_ENABLE, 0); } } } @@ -1396,14 +1328,11 @@ static void gen11_dsi_disable_port(struct intel_encoder *encoder) { struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder); - u32 tmp; enum port port; gen11_dsi_ungate_clocks(encoder); for_each_dsi_port(port, intel_dsi->ports) { - tmp = intel_de_read(dev_priv, DDI_BUF_CTL(port)); - tmp &= ~DDI_BUF_CTL_ENABLE; - intel_de_write(dev_priv, DDI_BUF_CTL(port), tmp); + intel_de_rmw(dev_priv, DDI_BUF_CTL(port), DDI_BUF_CTL_ENABLE, 0); if (wait_for_us((intel_de_read(dev_priv, DDI_BUF_CTL(port)) & DDI_BUF_IS_IDLE), @@ -1420,7 +1349,6 @@ static void gen11_dsi_disable_io_power(struct intel_encoder *encoder) struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder); enum port port; - u32 tmp; for_each_dsi_port(port, intel_dsi->ports) { intel_wakeref_t wakeref; @@ -1434,11 +1362,9 @@ static void gen11_dsi_disable_io_power(struct intel_encoder *encoder) } /* set mode to DDI */ - for_each_dsi_port(port, intel_dsi->ports) { - tmp = intel_de_read(dev_priv, ICL_DSI_IO_MODECTL(port)); - tmp &= ~COMBO_PHY_MODE_DSI; - intel_de_write(dev_priv, ICL_DSI_IO_MODECTL(port), tmp); - } + for_each_dsi_port(port, intel_dsi->ports) + intel_de_rmw(dev_priv, ICL_DSI_IO_MODECTL(port), + COMBO_PHY_MODE_DSI, 0); } static void gen11_dsi_disable(struct intel_atomic_state *state, @@ -1754,8 +1680,8 @@ static bool gen11_dsi_get_hw_state(struct intel_encoder *encoder, goto out; } - tmp = intel_de_read(dev_priv, PIPECONF(dsi_trans)); - ret = tmp & PIPECONF_ENABLE; + tmp = intel_de_read(dev_priv, TRANSCONF(dsi_trans)); + ret = tmp & TRANSCONF_ENABLE; } out: intel_display_power_put(dev_priv, encoder->power_domain, wakeref); diff --git a/drivers/gpu/drm/i915/display/intel_atomic_plane.c b/drivers/gpu/drm/i915/display/intel_atomic_plane.c index 1409bcfb6fd3..719a60e278f3 100644 --- a/drivers/gpu/drm/i915/display/intel_atomic_plane.c +++ b/drivers/gpu/drm/i915/display/intel_atomic_plane.c @@ -34,11 +34,10 @@ #include <drm/drm_atomic_helper.h> #include <drm/drm_fourcc.h> -#include "gt/intel_rps.h" - #include "i915_config.h" #include "intel_atomic_plane.h" #include "intel_cdclk.h" +#include "intel_display_rps.h" #include "intel_display_trace.h" #include "intel_display_types.h" #include "intel_fb.h" @@ -363,6 +362,7 @@ void intel_plane_set_invisible(struct intel_crtc_state *crtc_state, crtc_state->scaled_planes &= ~BIT(plane->id); crtc_state->nv12_planes &= ~BIT(plane->id); crtc_state->c8_planes &= ~BIT(plane->id); + crtc_state->async_flip_planes &= ~BIT(plane->id); crtc_state->data_rate[plane->id] = 0; crtc_state->data_rate_y[plane->id] = 0; crtc_state->rel_data_rate[plane->id] = 0; @@ -582,8 +582,10 @@ static int intel_plane_atomic_calc_changes(const struct intel_crtc_state *old_cr intel_plane_is_scaled(new_plane_state)))) new_crtc_state->disable_lp_wm = true; - if (intel_plane_do_async_flip(plane, old_crtc_state, new_crtc_state)) + if (intel_plane_do_async_flip(plane, old_crtc_state, new_crtc_state)) { new_crtc_state->do_async_flip = true; + new_crtc_state->async_flip_planes |= BIT(plane->id); + } return 0; } @@ -938,64 +940,6 @@ int intel_atomic_plane_check_clipping(struct intel_plane_state *plane_state, return 0; } -struct wait_rps_boost { - struct wait_queue_entry wait; - - struct drm_crtc *crtc; - struct i915_request *request; -}; - -static int do_rps_boost(struct wait_queue_entry *_wait, - unsigned mode, int sync, void *key) -{ - struct wait_rps_boost *wait = container_of(_wait, typeof(*wait), wait); - struct i915_request *rq = wait->request; - - /* - * If we missed the vblank, but the request is already running it - * is reasonable to assume that it will complete before the next - * vblank without our intervention, so leave RPS alone. - */ - if (!i915_request_started(rq)) - intel_rps_boost(rq); - i915_request_put(rq); - - drm_crtc_vblank_put(wait->crtc); - - list_del(&wait->wait.entry); - kfree(wait); - return 1; -} - -static void add_rps_boost_after_vblank(struct drm_crtc *crtc, - struct dma_fence *fence) -{ - struct wait_rps_boost *wait; - - if (!dma_fence_is_i915(fence)) - return; - - if (DISPLAY_VER(to_i915(crtc->dev)) < 6) - return; - - if (drm_crtc_vblank_get(crtc)) - return; - - wait = kmalloc(sizeof(*wait), GFP_KERNEL); - if (!wait) { - drm_crtc_vblank_put(crtc); - return; - } - - wait->request = to_request(dma_fence_get(fence)); - wait->crtc = crtc; - - wait->wait.func = do_rps_boost; - wait->wait.flags = 0; - - add_wait_queue(drm_crtc_vblank_waitqueue(crtc), &wait->wait); -} - /** * intel_prepare_plane_fb - Prepare fb for usage on plane * @_plane: drm plane to prepare for @@ -1086,13 +1030,13 @@ intel_prepare_plane_fb(struct drm_plane *_plane, dma_resv_iter_begin(&cursor, obj->base.resv, DMA_RESV_USAGE_WRITE); dma_resv_for_each_fence_unlocked(&cursor, fence) { - add_rps_boost_after_vblank(new_plane_state->hw.crtc, - fence); + intel_display_rps_boost_after_vblank(new_plane_state->hw.crtc, + fence); } dma_resv_iter_end(&cursor); } else { - add_rps_boost_after_vblank(new_plane_state->hw.crtc, - new_plane_state->uapi.fence); + intel_display_rps_boost_after_vblank(new_plane_state->hw.crtc, + new_plane_state->uapi.fence); } /* @@ -1103,10 +1047,7 @@ intel_prepare_plane_fb(struct drm_plane *_plane, * that are not quite steady state without resorting to forcing * maximum clocks following a vblank miss (see do_rps_boost()). */ - if (!state->rps_interactive) { - intel_rps_mark_interactive(&to_gt(dev_priv)->rps, true); - state->rps_interactive = true; - } + intel_display_rps_mark_interactive(dev_priv, state, true); return 0; @@ -1137,10 +1078,7 @@ intel_cleanup_plane_fb(struct drm_plane *plane, if (!obj) return; - if (state->rps_interactive) { - intel_rps_mark_interactive(&to_gt(dev_priv)->rps, false); - state->rps_interactive = false; - } + intel_display_rps_mark_interactive(dev_priv, state, false); /* Should only be called after a successful intel_prepare_plane_fb()! */ intel_plane_unpin_fb(old_plane_state); diff --git a/drivers/gpu/drm/i915/display/intel_audio.c b/drivers/gpu/drm/i915/display/intel_audio.c index a9335c856644..65151f5dcb15 100644 --- a/drivers/gpu/drm/i915/display/intel_audio.c +++ b/drivers/gpu/drm/i915/display/intel_audio.c @@ -581,8 +581,7 @@ static void enable_audio_dsc_wa(struct intel_encoder *encoder, const struct intel_crtc_state *crtc_state) { struct drm_i915_private *i915 = to_i915(encoder->base.dev); - struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); - enum pipe pipe = crtc->pipe; + enum transcoder cpu_transcoder = crtc_state->cpu_transcoder; unsigned int hblank_early_prog, samples_room; unsigned int val; @@ -592,32 +591,32 @@ static void enable_audio_dsc_wa(struct intel_encoder *encoder, val = intel_de_read(i915, AUD_CONFIG_BE); if (DISPLAY_VER(i915) == 11) - val |= HBLANK_EARLY_ENABLE_ICL(pipe); + val |= HBLANK_EARLY_ENABLE_ICL(cpu_transcoder); else if (DISPLAY_VER(i915) >= 12) - val |= HBLANK_EARLY_ENABLE_TGL(pipe); + val |= HBLANK_EARLY_ENABLE_TGL(cpu_transcoder); if (crtc_state->dsc.compression_enable && crtc_state->hw.adjusted_mode.hdisplay >= 3840 && crtc_state->hw.adjusted_mode.vdisplay >= 2160) { /* Get hblank early enable value required */ - val &= ~HBLANK_START_COUNT_MASK(pipe); + val &= ~HBLANK_START_COUNT_MASK(cpu_transcoder); hblank_early_prog = calc_hblank_early_prog(encoder, crtc_state); if (hblank_early_prog < 32) - val |= HBLANK_START_COUNT(pipe, HBLANK_START_COUNT_32); + val |= HBLANK_START_COUNT(cpu_transcoder, HBLANK_START_COUNT_32); else if (hblank_early_prog < 64) - val |= HBLANK_START_COUNT(pipe, HBLANK_START_COUNT_64); + val |= HBLANK_START_COUNT(cpu_transcoder, HBLANK_START_COUNT_64); else if (hblank_early_prog < 96) - val |= HBLANK_START_COUNT(pipe, HBLANK_START_COUNT_96); + val |= HBLANK_START_COUNT(cpu_transcoder, HBLANK_START_COUNT_96); else - val |= HBLANK_START_COUNT(pipe, HBLANK_START_COUNT_128); + val |= HBLANK_START_COUNT(cpu_transcoder, HBLANK_START_COUNT_128); /* Get samples room value required */ - val &= ~NUMBER_SAMPLES_PER_LINE_MASK(pipe); + val &= ~NUMBER_SAMPLES_PER_LINE_MASK(cpu_transcoder); samples_room = calc_samples_room(crtc_state); if (samples_room < 3) - val |= NUMBER_SAMPLES_PER_LINE(pipe, samples_room); + val |= NUMBER_SAMPLES_PER_LINE(cpu_transcoder, samples_room); else /* Program 0 i.e "All Samples available in buffer" */ - val |= NUMBER_SAMPLES_PER_LINE(pipe, 0x0); + val |= NUMBER_SAMPLES_PER_LINE(cpu_transcoder, 0x0); } intel_de_write(i915, AUD_CONFIG_BE, val); @@ -812,9 +811,9 @@ void intel_audio_codec_enable(struct intel_encoder *encoder, struct i915_audio_component *acomp = i915->display.audio.component; struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); struct intel_connector *connector = to_intel_connector(conn_state->connector); + enum transcoder cpu_transcoder = crtc_state->cpu_transcoder; struct intel_audio_state *audio_state; enum port port = encoder->port; - enum pipe pipe = crtc->pipe; if (!crtc_state->has_audio) return; @@ -832,7 +831,7 @@ void intel_audio_codec_enable(struct intel_encoder *encoder, mutex_lock(&i915->display.audio.mutex); - audio_state = &i915->display.audio.state[pipe]; + audio_state = &i915->display.audio.state[cpu_transcoder]; audio_state->encoder = encoder; BUILD_BUG_ON(sizeof(audio_state->eld) != sizeof(crtc_state->eld)); @@ -842,14 +841,14 @@ void intel_audio_codec_enable(struct intel_encoder *encoder, if (acomp && acomp->base.audio_ops && acomp->base.audio_ops->pin_eld_notify) { - /* audio drivers expect pipe = -1 to indicate Non-MST cases */ + /* audio drivers expect cpu_transcoder = -1 to indicate Non-MST cases */ if (!intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DP_MST)) - pipe = -1; + cpu_transcoder = -1; acomp->base.audio_ops->pin_eld_notify(acomp->base.audio_ops->audio_ptr, - (int)port, (int)pipe); + (int)port, (int)cpu_transcoder); } - intel_lpe_audio_notify(i915, pipe, port, crtc_state->eld, + intel_lpe_audio_notify(i915, cpu_transcoder, port, crtc_state->eld, crtc_state->port_clock, intel_crtc_has_dp_encoder(crtc_state)); } @@ -871,9 +870,9 @@ void intel_audio_codec_disable(struct intel_encoder *encoder, struct i915_audio_component *acomp = i915->display.audio.component; struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->uapi.crtc); struct intel_connector *connector = to_intel_connector(old_conn_state->connector); + enum transcoder cpu_transcoder = old_crtc_state->cpu_transcoder; struct intel_audio_state *audio_state; enum port port = encoder->port; - enum pipe pipe = crtc->pipe; if (!old_crtc_state->has_audio) return; @@ -890,7 +889,7 @@ void intel_audio_codec_disable(struct intel_encoder *encoder, mutex_lock(&i915->display.audio.mutex); - audio_state = &i915->display.audio.state[pipe]; + audio_state = &i915->display.audio.state[cpu_transcoder]; audio_state->encoder = NULL; memset(audio_state->eld, 0, sizeof(audio_state->eld)); @@ -899,27 +898,26 @@ void intel_audio_codec_disable(struct intel_encoder *encoder, if (acomp && acomp->base.audio_ops && acomp->base.audio_ops->pin_eld_notify) { - /* audio drivers expect pipe = -1 to indicate Non-MST cases */ + /* audio drivers expect cpu_transcoder = -1 to indicate Non-MST cases */ if (!intel_crtc_has_type(old_crtc_state, INTEL_OUTPUT_DP_MST)) - pipe = -1; + cpu_transcoder = -1; acomp->base.audio_ops->pin_eld_notify(acomp->base.audio_ops->audio_ptr, - (int)port, (int)pipe); + (int)port, (int)cpu_transcoder); } - intel_lpe_audio_notify(i915, pipe, port, NULL, 0, false); + intel_lpe_audio_notify(i915, cpu_transcoder, port, NULL, 0, false); } static void intel_acomp_get_config(struct intel_encoder *encoder, struct intel_crtc_state *crtc_state) { struct drm_i915_private *i915 = to_i915(encoder->base.dev); - struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); + enum transcoder cpu_transcoder = crtc_state->cpu_transcoder; struct intel_audio_state *audio_state; - enum pipe pipe = crtc->pipe; mutex_lock(&i915->display.audio.mutex); - audio_state = &i915->display.audio.state[pipe]; + audio_state = &i915->display.audio.state[cpu_transcoder]; if (audio_state->encoder) memcpy(crtc_state->eld, audio_state->eld, sizeof(audio_state->eld)); @@ -1147,27 +1145,27 @@ static int i915_audio_component_get_cdclk_freq(struct device *kdev) } /* - * get the intel audio state according to the parameter port and pipe - * MST & (pipe >= 0): return the audio.state[pipe].encoder], + * get the intel audio state according to the parameter port and cpu_transcoder + * MST & (cpu_transcoder >= 0): return the audio.state[cpu_transcoder].encoder], * when port is matched - * MST & (pipe < 0): this is invalid - * Non-MST & (pipe >= 0): only pipe = 0 (the first device entry) + * MST & (cpu_transcoder < 0): this is invalid + * Non-MST & (cpu_transcoder >= 0): only cpu_transcoder = 0 (the first device entry) * will get the right intel_encoder with port matched - * Non-MST & (pipe < 0): get the right intel_encoder with port matched + * Non-MST & (cpu_transcoder < 0): get the right intel_encoder with port matched */ static struct intel_audio_state *find_audio_state(struct drm_i915_private *i915, - int port, int pipe) + int port, int cpu_transcoder) { /* MST */ - if (pipe >= 0) { + if (cpu_transcoder >= 0) { struct intel_audio_state *audio_state; struct intel_encoder *encoder; if (drm_WARN_ON(&i915->drm, - pipe >= ARRAY_SIZE(i915->display.audio.state))) + cpu_transcoder >= ARRAY_SIZE(i915->display.audio.state))) return NULL; - audio_state = &i915->display.audio.state[pipe]; + audio_state = &i915->display.audio.state[cpu_transcoder]; encoder = audio_state->encoder; if (encoder && encoder->port == port && @@ -1176,14 +1174,14 @@ static struct intel_audio_state *find_audio_state(struct drm_i915_private *i915, } /* Non-MST */ - if (pipe > 0) + if (cpu_transcoder > 0) return NULL; - for_each_pipe(i915, pipe) { + for_each_cpu_transcoder(i915, cpu_transcoder) { struct intel_audio_state *audio_state; struct intel_encoder *encoder; - audio_state = &i915->display.audio.state[pipe]; + audio_state = &i915->display.audio.state[cpu_transcoder]; encoder = audio_state->encoder; if (encoder && encoder->port == port && @@ -1195,7 +1193,7 @@ static struct intel_audio_state *find_audio_state(struct drm_i915_private *i915, } static int i915_audio_component_sync_audio_rate(struct device *kdev, int port, - int pipe, int rate) + int cpu_transcoder, int rate) { struct drm_i915_private *i915 = kdev_to_i915(kdev); struct i915_audio_component *acomp = i915->display.audio.component; @@ -1211,7 +1209,7 @@ static int i915_audio_component_sync_audio_rate(struct device *kdev, int port, cookie = i915_audio_component_get_power(kdev); mutex_lock(&i915->display.audio.mutex); - audio_state = find_audio_state(i915, port, pipe); + audio_state = find_audio_state(i915, port, cpu_transcoder); if (!audio_state) { drm_dbg_kms(&i915->drm, "Not valid for port %c\n", port_name(port)); err = -ENODEV; @@ -1223,7 +1221,7 @@ static int i915_audio_component_sync_audio_rate(struct device *kdev, int port, /* FIXME stop using the legacy crtc pointer */ crtc = to_intel_crtc(encoder->base.crtc); - /* port must be valid now, otherwise the pipe will be invalid */ + /* port must be valid now, otherwise the cpu_transcoder will be invalid */ acomp->aud_sample_rate[port] = rate; /* FIXME get rid of the crtc->config stuff */ @@ -1236,7 +1234,7 @@ static int i915_audio_component_sync_audio_rate(struct device *kdev, int port, } static int i915_audio_component_get_eld(struct device *kdev, int port, - int pipe, bool *enabled, + int cpu_transcoder, bool *enabled, unsigned char *buf, int max_bytes) { struct drm_i915_private *i915 = kdev_to_i915(kdev); @@ -1245,7 +1243,7 @@ static int i915_audio_component_get_eld(struct device *kdev, int port, mutex_lock(&i915->display.audio.mutex); - audio_state = find_audio_state(i915, port, pipe); + audio_state = find_audio_state(i915, port, cpu_transcoder); if (!audio_state) { drm_dbg_kms(&i915->drm, "Not valid for port %c\n", port_name(port)); mutex_unlock(&i915->display.audio.mutex); diff --git a/drivers/gpu/drm/i915/display/intel_backlight.c b/drivers/gpu/drm/i915/display/intel_backlight.c index a4e4b7f79e4d..2e8f17c04522 100644 --- a/drivers/gpu/drm/i915/display/intel_backlight.c +++ b/drivers/gpu/drm/i915/display/intel_backlight.c @@ -105,7 +105,8 @@ void intel_backlight_set_pwm_level(const struct drm_connector_state *conn_state, struct drm_i915_private *i915 = to_i915(connector->base.dev); struct intel_panel *panel = &connector->panel; - drm_dbg_kms(&i915->drm, "set backlight PWM = %d\n", val); + drm_dbg_kms(&i915->drm, "[CONNECTOR:%d:%s] set backlight PWM = %d\n", + connector->base.base.id, connector->base.name, val); panel->backlight.pwm_funcs->set(conn_state, val); } @@ -283,7 +284,8 @@ intel_panel_actually_set_backlight(const struct drm_connector_state *conn_state, struct drm_i915_private *i915 = to_i915(connector->base.dev); struct intel_panel *panel = &connector->panel; - drm_dbg_kms(&i915->drm, "set backlight level = %d\n", level); + drm_dbg_kms(&i915->drm, "[CONNECTOR:%d:%s] set backlight level = %d\n", + connector->base.base.id, connector->base.name, level); panel->backlight.funcs->set(conn_state, level); } @@ -345,27 +347,24 @@ static void lpt_disable_backlight(const struct drm_connector_state *old_conn_sta */ tmp = intel_de_read(i915, BLC_PWM_CPU_CTL2); if (tmp & BLM_PWM_ENABLE) { - drm_dbg_kms(&i915->drm, "cpu backlight was enabled, disabling\n"); + drm_dbg_kms(&i915->drm, "[CONNECTOR:%d:%s] CPU backlight was enabled, disabling\n", + connector->base.base.id, connector->base.name); intel_de_write(i915, BLC_PWM_CPU_CTL2, tmp & ~BLM_PWM_ENABLE); } - tmp = intel_de_read(i915, BLC_PWM_PCH_CTL1); - intel_de_write(i915, BLC_PWM_PCH_CTL1, tmp & ~BLM_PCH_PWM_ENABLE); + intel_de_rmw(i915, BLC_PWM_PCH_CTL1, BLM_PCH_PWM_ENABLE, 0); } static void pch_disable_backlight(const struct drm_connector_state *old_conn_state, u32 val) { struct intel_connector *connector = to_intel_connector(old_conn_state->connector); struct drm_i915_private *i915 = to_i915(connector->base.dev); - u32 tmp; intel_backlight_set_pwm_level(old_conn_state, val); - tmp = intel_de_read(i915, BLC_PWM_CPU_CTL2); - intel_de_write(i915, BLC_PWM_CPU_CTL2, tmp & ~BLM_PWM_ENABLE); + intel_de_rmw(i915, BLC_PWM_CPU_CTL2, BLM_PWM_ENABLE, 0); - tmp = intel_de_read(i915, BLC_PWM_PCH_CTL1); - intel_de_write(i915, BLC_PWM_PCH_CTL1, tmp & ~BLM_PCH_PWM_ENABLE); + intel_de_rmw(i915, BLC_PWM_PCH_CTL1, BLM_PCH_PWM_ENABLE, 0); } static void i9xx_disable_backlight(const struct drm_connector_state *old_conn_state, u32 val) @@ -376,12 +375,10 @@ static void i9xx_disable_backlight(const struct drm_connector_state *old_conn_st static void i965_disable_backlight(const struct drm_connector_state *old_conn_state, u32 val) { struct drm_i915_private *i915 = to_i915(old_conn_state->connector->dev); - u32 tmp; intel_backlight_set_pwm_level(old_conn_state, val); - tmp = intel_de_read(i915, BLC_PWM_CTL2); - intel_de_write(i915, BLC_PWM_CTL2, tmp & ~BLM_PWM_ENABLE); + intel_de_rmw(i915, BLC_PWM_CTL2, BLM_PWM_ENABLE, 0); } static void vlv_disable_backlight(const struct drm_connector_state *old_conn_state, u32 val) @@ -389,12 +386,10 @@ static void vlv_disable_backlight(const struct drm_connector_state *old_conn_sta struct intel_connector *connector = to_intel_connector(old_conn_state->connector); struct drm_i915_private *i915 = to_i915(connector->base.dev); enum pipe pipe = to_intel_crtc(old_conn_state->crtc)->pipe; - u32 tmp; intel_backlight_set_pwm_level(old_conn_state, val); - tmp = intel_de_read(i915, VLV_BLC_PWM_CTL2(pipe)); - intel_de_write(i915, VLV_BLC_PWM_CTL2(pipe), tmp & ~BLM_PWM_ENABLE); + intel_de_rmw(i915, VLV_BLC_PWM_CTL2(pipe), BLM_PWM_ENABLE, 0); } static void bxt_disable_backlight(const struct drm_connector_state *old_conn_state, u32 val) @@ -402,19 +397,14 @@ static void bxt_disable_backlight(const struct drm_connector_state *old_conn_sta struct intel_connector *connector = to_intel_connector(old_conn_state->connector); struct drm_i915_private *i915 = to_i915(connector->base.dev); struct intel_panel *panel = &connector->panel; - u32 tmp; intel_backlight_set_pwm_level(old_conn_state, val); - tmp = intel_de_read(i915, BXT_BLC_PWM_CTL(panel->backlight.controller)); - intel_de_write(i915, BXT_BLC_PWM_CTL(panel->backlight.controller), - tmp & ~BXT_BLC_PWM_ENABLE); + intel_de_rmw(i915, BXT_BLC_PWM_CTL(panel->backlight.controller), + BXT_BLC_PWM_ENABLE, 0); - if (panel->backlight.controller == 1) { - val = intel_de_read(i915, UTIL_PIN_CTL); - val &= ~UTIL_PIN_ENABLE; - intel_de_write(i915, UTIL_PIN_CTL, val); - } + if (panel->backlight.controller == 1) + intel_de_rmw(i915, UTIL_PIN_CTL, UTIL_PIN_ENABLE, 0); } static void cnp_disable_backlight(const struct drm_connector_state *old_conn_state, u32 val) @@ -422,13 +412,11 @@ static void cnp_disable_backlight(const struct drm_connector_state *old_conn_sta struct intel_connector *connector = to_intel_connector(old_conn_state->connector); struct drm_i915_private *i915 = to_i915(connector->base.dev); struct intel_panel *panel = &connector->panel; - u32 tmp; intel_backlight_set_pwm_level(old_conn_state, val); - tmp = intel_de_read(i915, BXT_BLC_PWM_CTL(panel->backlight.controller)); - intel_de_write(i915, BXT_BLC_PWM_CTL(panel->backlight.controller), - tmp & ~BXT_BLC_PWM_ENABLE); + intel_de_rmw(i915, BXT_BLC_PWM_CTL(panel->backlight.controller), + BXT_BLC_PWM_ENABLE, 0); } static void ext_pwm_disable_backlight(const struct drm_connector_state *old_conn_state, u32 level) @@ -458,7 +446,8 @@ void intel_backlight_disable(const struct drm_connector_state *old_conn_state) * another client is not activated. */ if (i915->drm.switch_power_state == DRM_SWITCH_POWER_CHANGING) { - drm_dbg_kms(&i915->drm, "Skipping backlight disable on vga switch\n"); + drm_dbg_kms(&i915->drm, "[CONNECTOR:%d:%s] Skipping backlight disable on vga switch\n", + connector->base.base.id, connector->base.name); return; } @@ -478,30 +467,24 @@ static void lpt_enable_backlight(const struct intel_crtc_state *crtc_state, struct intel_connector *connector = to_intel_connector(conn_state->connector); struct drm_i915_private *i915 = to_i915(connector->base.dev); struct intel_panel *panel = &connector->panel; - u32 pch_ctl1, pch_ctl2, schicken; + u32 pch_ctl1, pch_ctl2; pch_ctl1 = intel_de_read(i915, BLC_PWM_PCH_CTL1); if (pch_ctl1 & BLM_PCH_PWM_ENABLE) { - drm_dbg_kms(&i915->drm, "pch backlight already enabled\n"); + drm_dbg_kms(&i915->drm, "[CONNECTOR:%d:%s] PCH backlight already enabled\n", + connector->base.base.id, connector->base.name); pch_ctl1 &= ~BLM_PCH_PWM_ENABLE; intel_de_write(i915, BLC_PWM_PCH_CTL1, pch_ctl1); } - if (HAS_PCH_LPT(i915)) { - schicken = intel_de_read(i915, SOUTH_CHICKEN2); - if (panel->backlight.alternate_pwm_increment) - schicken |= LPT_PWM_GRANULARITY; - else - schicken &= ~LPT_PWM_GRANULARITY; - intel_de_write(i915, SOUTH_CHICKEN2, schicken); - } else { - schicken = intel_de_read(i915, SOUTH_CHICKEN1); - if (panel->backlight.alternate_pwm_increment) - schicken |= SPT_PWM_GRANULARITY; - else - schicken &= ~SPT_PWM_GRANULARITY; - intel_de_write(i915, SOUTH_CHICKEN1, schicken); - } + if (HAS_PCH_LPT(i915)) + intel_de_rmw(i915, SOUTH_CHICKEN2, LPT_PWM_GRANULARITY, + panel->backlight.alternate_pwm_increment ? + LPT_PWM_GRANULARITY : 0); + else + intel_de_rmw(i915, SOUTH_CHICKEN1, SPT_PWM_GRANULARITY, + panel->backlight.alternate_pwm_increment ? + SPT_PWM_GRANULARITY : 0); pch_ctl2 = panel->backlight.pwm_level_max << 16; intel_de_write(i915, BLC_PWM_PCH_CTL2, pch_ctl2); @@ -533,14 +516,16 @@ static void pch_enable_backlight(const struct intel_crtc_state *crtc_state, cpu_ctl2 = intel_de_read(i915, BLC_PWM_CPU_CTL2); if (cpu_ctl2 & BLM_PWM_ENABLE) { - drm_dbg_kms(&i915->drm, "cpu backlight already enabled\n"); + drm_dbg_kms(&i915->drm, "[CONNECTOR:%d:%s] CPU backlight already enabled\n", + connector->base.base.id, connector->base.name); cpu_ctl2 &= ~BLM_PWM_ENABLE; intel_de_write(i915, BLC_PWM_CPU_CTL2, cpu_ctl2); } pch_ctl1 = intel_de_read(i915, BLC_PWM_PCH_CTL1); if (pch_ctl1 & BLM_PCH_PWM_ENABLE) { - drm_dbg_kms(&i915->drm, "pch backlight already enabled\n"); + drm_dbg_kms(&i915->drm, "[CONNECTOR:%d:%s] PCH backlight already enabled\n", + connector->base.base.id, connector->base.name); pch_ctl1 &= ~BLM_PCH_PWM_ENABLE; intel_de_write(i915, BLC_PWM_PCH_CTL1, pch_ctl1); } @@ -578,7 +563,8 @@ static void i9xx_enable_backlight(const struct intel_crtc_state *crtc_state, ctl = intel_de_read(i915, BLC_PWM_CTL); if (ctl & BACKLIGHT_DUTY_CYCLE_MASK_PNV) { - drm_dbg_kms(&i915->drm, "backlight already enabled\n"); + drm_dbg_kms(&i915->drm, "[CONNECTOR:%d:%s] backlight already enabled\n", + connector->base.base.id, connector->base.name); intel_de_write(i915, BLC_PWM_CTL, 0); } @@ -618,7 +604,8 @@ static void i965_enable_backlight(const struct intel_crtc_state *crtc_state, ctl2 = intel_de_read(i915, BLC_PWM_CTL2); if (ctl2 & BLM_PWM_ENABLE) { - drm_dbg_kms(&i915->drm, "backlight already enabled\n"); + drm_dbg_kms(&i915->drm, "[CONNECTOR:%d:%s] backlight already enabled\n", + connector->base.base.id, connector->base.name); ctl2 &= ~BLM_PWM_ENABLE; intel_de_write(i915, BLC_PWM_CTL2, ctl2); } @@ -653,7 +640,8 @@ static void vlv_enable_backlight(const struct intel_crtc_state *crtc_state, ctl2 = intel_de_read(i915, VLV_BLC_PWM_CTL2(pipe)); if (ctl2 & BLM_PWM_ENABLE) { - drm_dbg_kms(&i915->drm, "backlight already enabled\n"); + drm_dbg_kms(&i915->drm, "[CONNECTOR:%d:%s] backlight already enabled\n", + connector->base.base.id, connector->base.name); ctl2 &= ~BLM_PWM_ENABLE; intel_de_write(i915, VLV_BLC_PWM_CTL2(pipe), ctl2); } @@ -685,7 +673,8 @@ static void bxt_enable_backlight(const struct intel_crtc_state *crtc_state, if (panel->backlight.controller == 1) { val = intel_de_read(i915, UTIL_PIN_CTL); if (val & UTIL_PIN_ENABLE) { - drm_dbg_kms(&i915->drm, "util pin already enabled\n"); + drm_dbg_kms(&i915->drm, "[CONNECTOR:%d:%s] utility pin already enabled\n", + connector->base.base.id, connector->base.name); val &= ~UTIL_PIN_ENABLE; intel_de_write(i915, UTIL_PIN_CTL, val); } @@ -699,7 +688,8 @@ static void bxt_enable_backlight(const struct intel_crtc_state *crtc_state, pwm_ctl = intel_de_read(i915, BXT_BLC_PWM_CTL(panel->backlight.controller)); if (pwm_ctl & BXT_BLC_PWM_ENABLE) { - drm_dbg_kms(&i915->drm, "backlight already enabled\n"); + drm_dbg_kms(&i915->drm, "[CONNECTOR:%d:%s] backlight already enabled\n", + connector->base.base.id, connector->base.name); pwm_ctl &= ~BXT_BLC_PWM_ENABLE; intel_de_write(i915, BXT_BLC_PWM_CTL(panel->backlight.controller), pwm_ctl); @@ -1270,6 +1260,10 @@ static int lpt_setup_backlight(struct intel_connector *connector, enum pipe unus cpu_ctl2 & ~BLM_PWM_ENABLE); } + drm_dbg_kms(&i915->drm, + "[CONNECTOR:%d:%s] Using native PCH PWM for backlight control\n", + connector->base.base.id, connector->base.name); + return 0; } @@ -1297,6 +1291,10 @@ static int pch_setup_backlight(struct intel_connector *connector, enum pipe unus panel->backlight.pwm_enabled = (cpu_ctl2 & BLM_PWM_ENABLE) && (pch_ctl1 & BLM_PCH_PWM_ENABLE); + drm_dbg_kms(&i915->drm, + "[CONNECTOR:%d:%s] Using native PCH PWM for backlight control\n", + connector->base.base.id, connector->base.name); + return 0; } @@ -1335,6 +1333,10 @@ static int i9xx_setup_backlight(struct intel_connector *connector, enum pipe unu panel->backlight.pwm_enabled = val != 0; + drm_dbg_kms(&i915->drm, + "[CONNECTOR:%d:%s] Using native PWM for backlight control\n", + connector->base.base.id, connector->base.name); + return 0; } @@ -1364,6 +1366,10 @@ static int i965_setup_backlight(struct intel_connector *connector, enum pipe unu panel->backlight.pwm_enabled = ctl2 & BLM_PWM_ENABLE; + drm_dbg_kms(&i915->drm, + "[CONNECTOR:%d:%s] Using native PWM for backlight control\n", + connector->base.base.id, connector->base.name); + return 0; } @@ -1392,6 +1398,10 @@ static int vlv_setup_backlight(struct intel_connector *connector, enum pipe pipe panel->backlight.pwm_enabled = ctl2 & BLM_PWM_ENABLE; + drm_dbg_kms(&i915->drm, + "[CONNECTOR:%d:%s] Using native PWM for backlight control (on pipe %c)\n", + connector->base.base.id, connector->base.name, pipe_name(pipe)); + return 0; } @@ -1428,6 +1438,11 @@ bxt_setup_backlight(struct intel_connector *connector, enum pipe unused) panel->backlight.pwm_enabled = pwm_ctl & BXT_BLC_PWM_ENABLE; + drm_dbg_kms(&i915->drm, + "[CONNECTOR:%d:%s] Using native PWM for backlight control (controller=%d)\n", + connector->base.base.id, connector->base.name, + panel->backlight.controller); + return 0; } @@ -1468,7 +1483,8 @@ cnp_setup_backlight(struct intel_connector *connector, enum pipe unused) */ panel->backlight.controller = connector->panel.vbt.backlight.controller; if (!cnp_backlight_controller_is_valid(i915, panel->backlight.controller)) { - drm_dbg_kms(&i915->drm, "Invalid backlight controller %d, assuming 0\n", + drm_dbg_kms(&i915->drm, "[CONNECTOR:%d:%s] Invalid backlight controller %d, assuming 0\n", + connector->base.base.id, connector->base.name, panel->backlight.controller); panel->backlight.controller = 0; } @@ -1490,6 +1506,11 @@ cnp_setup_backlight(struct intel_connector *connector, enum pipe unused) panel->backlight.pwm_enabled = pwm_ctl & BXT_BLC_PWM_ENABLE; + drm_dbg_kms(&i915->drm, + "[CONNECTOR:%d:%s] Using native PCH PWM for backlight control (controller=%d)\n", + connector->base.base.id, connector->base.name, + panel->backlight.controller); + return 0; } @@ -1511,8 +1532,8 @@ static int ext_pwm_setup_backlight(struct intel_connector *connector, } if (IS_ERR(panel->backlight.pwm)) { - drm_err(&i915->drm, "Failed to get the %s PWM chip\n", - desc); + drm_err(&i915->drm, "[CONNECTOR:%d:%s] Failed to get the %s PWM chip\n", + connector->base.base.id, connector->base.name, desc); panel->backlight.pwm = NULL; return -ENODEV; } @@ -1529,7 +1550,8 @@ static int ext_pwm_setup_backlight(struct intel_connector *connector, level = intel_backlight_invert_pwm_level(connector, level); panel->backlight.pwm_enabled = true; - drm_dbg_kms(&i915->drm, "PWM already enabled at freq %ld, VBT freq %d, level %d\n", + drm_dbg_kms(&i915->drm, "[CONNECTOR:%d:%s] PWM already enabled at freq %ld, VBT freq %d, level %d\n", + connector->base.base.id, connector->base.name, NSEC_PER_SEC / (unsigned long)panel->backlight.pwm_state.period, get_vbt_pwm_freq(connector), level); } else { @@ -1538,8 +1560,10 @@ static int ext_pwm_setup_backlight(struct intel_connector *connector, NSEC_PER_SEC / get_vbt_pwm_freq(connector); } - drm_info(&i915->drm, "Using %s PWM for LCD backlight control\n", - desc); + drm_dbg_kms(&i915->drm, + "[CONNECTOR:%d:%s] Using %s PWM for backlight control\n", + connector->base.base.id, connector->base.name, desc); + return 0; } @@ -1582,8 +1606,9 @@ static void intel_pwm_disable_backlight(const struct drm_connector_state *conn_s static int intel_pwm_setup_backlight(struct intel_connector *connector, enum pipe pipe) { struct intel_panel *panel = &connector->panel; - int ret = panel->backlight.pwm_funcs->setup(connector, pipe); + int ret; + ret = panel->backlight.pwm_funcs->setup(connector, pipe); if (ret < 0) return ret; @@ -1623,10 +1648,12 @@ int intel_backlight_setup(struct intel_connector *connector, enum pipe pipe) if (!connector->panel.vbt.backlight.present) { if (intel_has_quirk(i915, QUIRK_BACKLIGHT_PRESENT)) { drm_dbg_kms(&i915->drm, - "no backlight present per VBT, but present per quirk\n"); + "[CONNECTOR:%d:%s] no backlight present per VBT, but present per quirk\n", + connector->base.base.id, connector->base.name); } else { drm_dbg_kms(&i915->drm, - "no backlight present per VBT\n"); + "[CONNECTOR:%d:%s] no backlight present per VBT\n", + connector->base.base.id, connector->base.name); return 0; } } @@ -1642,16 +1669,16 @@ int intel_backlight_setup(struct intel_connector *connector, enum pipe pipe) if (ret) { drm_dbg_kms(&i915->drm, - "failed to setup backlight for connector %s\n", - connector->base.name); + "[CONNECTOR:%d:%s] failed to setup backlight\n", + connector->base.base.id, connector->base.name); return ret; } panel->backlight.present = true; drm_dbg_kms(&i915->drm, - "Connector %s backlight initialized, %s, brightness %u/%u\n", - connector->base.name, + "[CONNECTOR:%d:%s] backlight initialized, %s, brightness %u/%u\n", + connector->base.base.id, connector->base.name, str_enabled_disabled(panel->backlight.enabled), panel->backlight.level, panel->backlight.max); diff --git a/drivers/gpu/drm/i915/display/intel_bios.c b/drivers/gpu/drm/i915/display/intel_bios.c index 04b846440de6..e54febd34ca9 100644 --- a/drivers/gpu/drm/i915/display/intel_bios.c +++ b/drivers/gpu/drm/i915/display/intel_bios.c @@ -1084,6 +1084,12 @@ parse_lfp_backlight(struct drm_i915_private *i915, panel->vbt.backlight.min_brightness = entry->min_brightness; } + if (i915->display.vbt.version >= 239) + panel->vbt.backlight.hdr_dpcd_refresh_timeout = + DIV_ROUND_UP(backlight_data->hdr_dpcd_refresh_timeout[panel_type], 100); + else + panel->vbt.backlight.hdr_dpcd_refresh_timeout = 30; + drm_dbg_kms(&i915->drm, "VBT backlight PWM modulation frequency %u Hz, " "active %s, min brightness %u, level %u, controller %u\n", @@ -1202,9 +1208,7 @@ child_device_ptr(const struct bdb_general_definitions *defs, int i) static void parse_sdvo_device_mapping(struct drm_i915_private *i915) { - struct sdvo_device_mapping *mapping; const struct intel_bios_encoder_data *devdata; - const struct child_device_config *child; int count = 0; /* @@ -1217,7 +1221,8 @@ parse_sdvo_device_mapping(struct drm_i915_private *i915) } list_for_each_entry(devdata, &i915->display.vbt.display_devices, node) { - child = &devdata->child; + const struct child_device_config *child = &devdata->child; + struct sdvo_device_mapping *mapping; if (child->slave_addr != SLAVE_ADDR1 && child->slave_addr != SLAVE_ADDR2) { @@ -2075,7 +2080,6 @@ parse_compression_parameters(struct drm_i915_private *i915) { const struct bdb_compression_parameters *params; struct intel_bios_encoder_data *devdata; - const struct child_device_config *child; u16 block_size; int index; @@ -2100,7 +2104,7 @@ parse_compression_parameters(struct drm_i915_private *i915) } list_for_each_entry(devdata, &i915->display.vbt.display_devices, node) { - child = &devdata->child; + const struct child_device_config *child = &devdata->child; if (!child->compression_enable) continue; @@ -2226,14 +2230,14 @@ static u8 map_ddc_pin(struct drm_i915_private *i915, u8 vbt_pin) static enum port get_port_by_ddc_pin(struct drm_i915_private *i915, u8 ddc_pin) { - const struct intel_bios_encoder_data *devdata; enum port port; if (!ddc_pin) return PORT_NONE; for_each_port(port) { - devdata = i915->display.vbt.ports[port]; + const struct intel_bios_encoder_data *devdata = + i915->display.vbt.ports[port]; if (devdata && ddc_pin == devdata->child.ddc_pin) return port; @@ -2292,14 +2296,14 @@ static void sanitize_ddc_pin(struct intel_bios_encoder_data *devdata, static enum port get_port_by_aux_ch(struct drm_i915_private *i915, u8 aux_ch) { - const struct intel_bios_encoder_data *devdata; enum port port; if (!aux_ch) return PORT_NONE; for_each_port(port) { - devdata = i915->display.vbt.ports[port]; + const struct intel_bios_encoder_data *devdata = + i915->display.vbt.ports[port]; if (devdata && aux_ch == devdata->child.aux_channel) return port; @@ -2522,7 +2526,7 @@ static int parse_bdb_216_dp_max_link_rate(const int vbt_max_link_rate) } } -static int _intel_bios_dp_max_link_rate(const struct intel_bios_encoder_data *devdata) +int intel_bios_dp_max_link_rate(const struct intel_bios_encoder_data *devdata) { if (!devdata || devdata->i915->display.vbt.version < 216) return 0; @@ -2533,7 +2537,7 @@ static int _intel_bios_dp_max_link_rate(const struct intel_bios_encoder_data *de return parse_bdb_216_dp_max_link_rate(devdata->child.dp_max_link_rate); } -static int _intel_bios_dp_max_lane_count(const struct intel_bios_encoder_data *devdata) +int intel_bios_dp_max_lane_count(const struct intel_bios_encoder_data *devdata) { if (!devdata || devdata->i915->display.vbt.version < 244) return 0; @@ -2587,7 +2591,7 @@ intel_bios_encoder_supports_dp(const struct intel_bios_encoder_data *devdata) return devdata->child.device_type & DEVICE_TYPE_DISPLAYPORT_OUTPUT; } -static bool +bool intel_bios_encoder_supports_edp(const struct intel_bios_encoder_data *devdata) { return intel_bios_encoder_supports_dp(devdata) && @@ -2600,7 +2604,14 @@ intel_bios_encoder_supports_dsi(const struct intel_bios_encoder_data *devdata) return devdata->child.device_type & DEVICE_TYPE_MIPI_OUTPUT; } -static int _intel_bios_hdmi_level_shift(const struct intel_bios_encoder_data *devdata) +bool +intel_bios_encoder_is_lspcon(const struct intel_bios_encoder_data *devdata) +{ + return devdata && HAS_LSPCON(devdata->i915) && devdata->child.lspcon; +} + +/* This is an index in the HDMI/DVI DDI buffer translation table, or -1 */ +int intel_bios_hdmi_level_shift(const struct intel_bios_encoder_data *devdata) { if (!devdata || devdata->i915->display.vbt.version < 158) return -1; @@ -2608,7 +2619,7 @@ static int _intel_bios_hdmi_level_shift(const struct intel_bios_encoder_data *de return devdata->child.hdmi_level_shifter_value; } -static int _intel_bios_max_tmds_clock(const struct intel_bios_encoder_data *devdata) +int intel_bios_hdmi_max_tmds_clock(const struct intel_bios_encoder_data *devdata) { if (!devdata || devdata->i915->display.vbt.version < 204) return 0; @@ -2666,37 +2677,37 @@ static void print_ddi_port(const struct intel_bios_encoder_data *devdata, drm_dbg_kms(&i915->drm, "Port %c VBT info: CRT:%d DVI:%d HDMI:%d DP:%d eDP:%d DSI:%d LSPCON:%d USB-Type-C:%d TBT:%d DSC:%d\n", port_name(port), is_crt, is_dvi, is_hdmi, is_dp, is_edp, is_dsi, - HAS_LSPCON(i915) && child->lspcon, + intel_bios_encoder_is_lspcon(devdata), supports_typec_usb, supports_tbt, devdata->dsc != NULL); - hdmi_level_shift = _intel_bios_hdmi_level_shift(devdata); + hdmi_level_shift = intel_bios_hdmi_level_shift(devdata); if (hdmi_level_shift >= 0) { drm_dbg_kms(&i915->drm, "Port %c VBT HDMI level shift: %d\n", port_name(port), hdmi_level_shift); } - max_tmds_clock = _intel_bios_max_tmds_clock(devdata); + max_tmds_clock = intel_bios_hdmi_max_tmds_clock(devdata); if (max_tmds_clock) drm_dbg_kms(&i915->drm, "Port %c VBT HDMI max TMDS clock: %d kHz\n", port_name(port), max_tmds_clock); /* I_boost config for SKL and above */ - dp_boost_level = intel_bios_encoder_dp_boost_level(devdata); + dp_boost_level = intel_bios_dp_boost_level(devdata); if (dp_boost_level) drm_dbg_kms(&i915->drm, "Port %c VBT (e)DP boost level: %d\n", port_name(port), dp_boost_level); - hdmi_boost_level = intel_bios_encoder_hdmi_boost_level(devdata); + hdmi_boost_level = intel_bios_hdmi_boost_level(devdata); if (hdmi_boost_level) drm_dbg_kms(&i915->drm, "Port %c VBT HDMI boost level: %d\n", port_name(port), hdmi_boost_level); - dp_max_link_rate = _intel_bios_dp_max_link_rate(devdata); + dp_max_link_rate = intel_bios_dp_max_link_rate(devdata); if (dp_max_link_rate) drm_dbg_kms(&i915->drm, "Port %c VBT DP max link rate: %d\n", @@ -2811,7 +2822,7 @@ parse_general_definitions(struct drm_i915_private *i915) expected_size = 37; } else if (i915->display.vbt.version <= 215) { expected_size = 38; - } else if (i915->display.vbt.version <= 237) { + } else if (i915->display.vbt.version <= 250) { expected_size = 39; } else { expected_size = sizeof(*child); @@ -3306,7 +3317,6 @@ void intel_bios_fini_panel(struct intel_panel *panel) bool intel_bios_is_tv_present(struct drm_i915_private *i915) { const struct intel_bios_encoder_data *devdata; - const struct child_device_config *child; if (!i915->display.vbt.int_tv_support) return false; @@ -3315,7 +3325,7 @@ bool intel_bios_is_tv_present(struct drm_i915_private *i915) return true; list_for_each_entry(devdata, &i915->display.vbt.display_devices, node) { - child = &devdata->child; + const struct child_device_config *child = &devdata->child; /* * If the device type is not TV, continue. @@ -3349,13 +3359,12 @@ bool intel_bios_is_tv_present(struct drm_i915_private *i915) bool intel_bios_is_lvds_present(struct drm_i915_private *i915, u8 *i2c_pin) { const struct intel_bios_encoder_data *devdata; - const struct child_device_config *child; if (list_empty(&i915->display.vbt.display_devices)) return true; list_for_each_entry(devdata, &i915->display.vbt.display_devices, node) { - child = &devdata->child; + const struct child_device_config *child = &devdata->child; /* If the device type is not LFP, continue. * We have to check both the new identifiers as well as the @@ -3397,25 +3406,22 @@ bool intel_bios_is_lvds_present(struct drm_i915_private *i915, u8 *i2c_pin) */ bool intel_bios_is_port_present(struct drm_i915_private *i915, enum port port) { + const struct intel_bios_encoder_data *devdata; + if (WARN_ON(!has_ddi_port_info(i915))) return true; - return i915->display.vbt.ports[port]; -} + if (!is_port_valid(i915, port)) + return false; -/** - * intel_bios_is_port_edp - is the device in given port eDP - * @i915: i915 device instance - * @port: port to check - * - * Return true if the device in %port is eDP. - */ -bool intel_bios_is_port_edp(struct drm_i915_private *i915, enum port port) -{ - const struct intel_bios_encoder_data *devdata = - intel_bios_encoder_data_lookup(i915, port); + list_for_each_entry(devdata, &i915->display.vbt.display_devices, node) { + const struct child_device_config *child = &devdata->child; + + if (dvo_port_to_port(i915, child->dvo_port) == port) + return true; + } - return devdata && intel_bios_encoder_supports_edp(devdata); + return false; } static bool intel_bios_encoder_supports_dp_dual_mode(const struct intel_bios_encoder_data *devdata) @@ -3457,17 +3463,14 @@ bool intel_bios_is_dsi_present(struct drm_i915_private *i915, enum port *port) { const struct intel_bios_encoder_data *devdata; - const struct child_device_config *child; - u8 dvo_port; list_for_each_entry(devdata, &i915->display.vbt.display_devices, node) { - child = &devdata->child; + const struct child_device_config *child = &devdata->child; + u8 dvo_port = child->dvo_port; if (!(child->device_type & DEVICE_TYPE_MIPI_OUTPUT)) continue; - dvo_port = child->dvo_port; - if (dsi_dvo_port_to_port(i915, dvo_port) == PORT_NONE) { drm_dbg_kms(&i915->drm, "VBT has unsupported DSI port %c\n", @@ -3554,10 +3557,9 @@ bool intel_bios_get_dsc_params(struct intel_encoder *encoder, { struct drm_i915_private *i915 = to_i915(encoder->base.dev); const struct intel_bios_encoder_data *devdata; - const struct child_device_config *child; list_for_each_entry(devdata, &i915->display.vbt.display_devices, node) { - child = &devdata->child; + const struct child_device_config *child = &devdata->child; if (!(child->device_type & DEVICE_TYPE_MIPI_OUTPUT)) continue; @@ -3576,73 +3578,10 @@ bool intel_bios_get_dsc_params(struct intel_encoder *encoder, return false; } -/** - * intel_bios_is_port_hpd_inverted - is HPD inverted for %port - * @i915: i915 device instance - * @port: port to check - * - * Return true if HPD should be inverted for %port. - */ -bool -intel_bios_is_port_hpd_inverted(const struct drm_i915_private *i915, - enum port port) +static enum aux_ch map_aux_ch(struct drm_i915_private *i915, u8 aux_channel) { - const struct intel_bios_encoder_data *devdata = i915->display.vbt.ports[port]; - - if (drm_WARN_ON_ONCE(&i915->drm, - !IS_GEMINILAKE(i915) && !IS_BROXTON(i915))) - return false; - - return devdata && devdata->child.hpd_invert; -} - -/** - * intel_bios_is_lspcon_present - if LSPCON is attached on %port - * @i915: i915 device instance - * @port: port to check - * - * Return true if LSPCON is present on this port - */ -bool -intel_bios_is_lspcon_present(const struct drm_i915_private *i915, - enum port port) -{ - const struct intel_bios_encoder_data *devdata = i915->display.vbt.ports[port]; - - return HAS_LSPCON(i915) && devdata && devdata->child.lspcon; -} - -/** - * intel_bios_is_lane_reversal_needed - if lane reversal needed on port - * @i915: i915 device instance - * @port: port to check - * - * Return true if port requires lane reversal - */ -bool -intel_bios_is_lane_reversal_needed(const struct drm_i915_private *i915, - enum port port) -{ - const struct intel_bios_encoder_data *devdata = i915->display.vbt.ports[port]; - - return devdata && devdata->child.lane_reversal; -} - -enum aux_ch intel_bios_port_aux_ch(struct drm_i915_private *i915, - enum port port) -{ - const struct intel_bios_encoder_data *devdata = i915->display.vbt.ports[port]; enum aux_ch aux_ch; - if (!devdata || !devdata->child.aux_channel) { - aux_ch = (enum aux_ch)port; - - drm_dbg_kms(&i915->drm, - "using AUX %c for port %c (platform default)\n", - aux_ch_name(aux_ch), port_name(port)); - return aux_ch; - } - /* * RKL/DG1 VBT uses PHY based mapping. Combo PHYs A,B,C,D * map to DDI A,B,TC1,TC2 respectively. @@ -3650,7 +3589,7 @@ enum aux_ch intel_bios_port_aux_ch(struct drm_i915_private *i915, * ADL-S VBT uses PHY based mapping. Combo PHYs A,B,C,D,E * map to DDI A,TC1,TC2,TC3,TC4 respectively. */ - switch (devdata->child.aux_channel) { + switch (aux_channel) { case DP_AUX_A: aux_ch = AUX_CH_A; break; @@ -3711,35 +3650,23 @@ enum aux_ch intel_bios_port_aux_ch(struct drm_i915_private *i915, aux_ch = AUX_CH_I; break; default: - MISSING_CASE(devdata->child.aux_channel); + MISSING_CASE(aux_channel); aux_ch = AUX_CH_A; break; } - drm_dbg_kms(&i915->drm, "using AUX %c for port %c (VBT)\n", - aux_ch_name(aux_ch), port_name(port)); - return aux_ch; } -int intel_bios_max_tmds_clock(struct intel_encoder *encoder) +enum aux_ch intel_bios_dp_aux_ch(const struct intel_bios_encoder_data *devdata) { - struct drm_i915_private *i915 = to_i915(encoder->base.dev); - const struct intel_bios_encoder_data *devdata = i915->display.vbt.ports[encoder->port]; + if (!devdata || !devdata->child.aux_channel) + return AUX_CH_NONE; - return _intel_bios_max_tmds_clock(devdata); + return map_aux_ch(devdata->i915, devdata->child.aux_channel); } -/* This is an index in the HDMI/DVI DDI buffer translation table, or -1 */ -int intel_bios_hdmi_level_shift(struct intel_encoder *encoder) -{ - struct drm_i915_private *i915 = to_i915(encoder->base.dev); - const struct intel_bios_encoder_data *devdata = i915->display.vbt.ports[encoder->port]; - - return _intel_bios_hdmi_level_shift(devdata); -} - -int intel_bios_encoder_dp_boost_level(const struct intel_bios_encoder_data *devdata) +int intel_bios_dp_boost_level(const struct intel_bios_encoder_data *devdata) { if (!devdata || devdata->i915->display.vbt.version < 196 || !devdata->child.iboost) return 0; @@ -3747,7 +3674,7 @@ int intel_bios_encoder_dp_boost_level(const struct intel_bios_encoder_data *devd return translate_iboost(devdata->child.dp_iboost_level); } -int intel_bios_encoder_hdmi_boost_level(const struct intel_bios_encoder_data *devdata) +int intel_bios_hdmi_boost_level(const struct intel_bios_encoder_data *devdata) { if (!devdata || devdata->i915->display.vbt.version < 196 || !devdata->child.iboost) return 0; @@ -3755,31 +3682,12 @@ int intel_bios_encoder_hdmi_boost_level(const struct intel_bios_encoder_data *de return translate_iboost(devdata->child.hdmi_iboost_level); } -int intel_bios_dp_max_link_rate(struct intel_encoder *encoder) -{ - struct drm_i915_private *i915 = to_i915(encoder->base.dev); - const struct intel_bios_encoder_data *devdata = i915->display.vbt.ports[encoder->port]; - - return _intel_bios_dp_max_link_rate(devdata); -} - -int intel_bios_dp_max_lane_count(struct intel_encoder *encoder) -{ - struct drm_i915_private *i915 = to_i915(encoder->base.dev); - const struct intel_bios_encoder_data *devdata = i915->display.vbt.ports[encoder->port]; - - return _intel_bios_dp_max_lane_count(devdata); -} - -int intel_bios_alternate_ddc_pin(struct intel_encoder *encoder) +int intel_bios_hdmi_ddc_pin(const struct intel_bios_encoder_data *devdata) { - struct drm_i915_private *i915 = to_i915(encoder->base.dev); - const struct intel_bios_encoder_data *devdata = i915->display.vbt.ports[encoder->port]; - if (!devdata || !devdata->child.ddc_pin) return 0; - return map_ddc_pin(i915, devdata->child.ddc_pin); + return map_ddc_pin(devdata->i915, devdata->child.ddc_pin); } bool intel_bios_encoder_supports_typec_usb(const struct intel_bios_encoder_data *devdata) @@ -3792,6 +3700,16 @@ bool intel_bios_encoder_supports_tbt(const struct intel_bios_encoder_data *devda return devdata->i915->display.vbt.version >= 209 && devdata->child.tbt; } +bool intel_bios_encoder_lane_reversal(const struct intel_bios_encoder_data *devdata) +{ + return devdata && devdata->child.lane_reversal; +} + +bool intel_bios_encoder_hpd_invert(const struct intel_bios_encoder_data *devdata) +{ + return devdata && devdata->child.hpd_invert; +} + const struct intel_bios_encoder_data * intel_bios_encoder_data_lookup(struct drm_i915_private *i915, enum port port) { diff --git a/drivers/gpu/drm/i915/display/intel_bios.h b/drivers/gpu/drm/i915/display/intel_bios.h index d221f784aa88..8a0730c9b48c 100644 --- a/drivers/gpu/drm/i915/display/intel_bios.h +++ b/drivers/gpu/drm/i915/display/intel_bios.h @@ -38,6 +38,7 @@ struct intel_bios_encoder_data; struct intel_crtc_state; struct intel_encoder; struct intel_panel; +enum aux_ch; enum port; enum intel_backlight_type { @@ -248,21 +249,9 @@ bool intel_bios_is_port_present(struct drm_i915_private *dev_priv, enum port por bool intel_bios_is_port_edp(struct drm_i915_private *dev_priv, enum port port); bool intel_bios_is_port_dp_dual_mode(struct drm_i915_private *dev_priv, enum port port); bool intel_bios_is_dsi_present(struct drm_i915_private *dev_priv, enum port *port); -bool intel_bios_is_port_hpd_inverted(const struct drm_i915_private *i915, - enum port port); -bool intel_bios_is_lspcon_present(const struct drm_i915_private *i915, - enum port port); -bool intel_bios_is_lane_reversal_needed(const struct drm_i915_private *i915, - enum port port); -enum aux_ch intel_bios_port_aux_ch(struct drm_i915_private *dev_priv, enum port port); bool intel_bios_get_dsc_params(struct intel_encoder *encoder, struct intel_crtc_state *crtc_state, int dsc_max_bpc); -int intel_bios_max_tmds_clock(struct intel_encoder *encoder); -int intel_bios_hdmi_level_shift(struct intel_encoder *encoder); -int intel_bios_dp_max_link_rate(struct intel_encoder *encoder); -int intel_bios_dp_max_lane_count(struct intel_encoder *encoder); -int intel_bios_alternate_ddc_pin(struct intel_encoder *encoder); bool intel_bios_port_supports_typec_usb(struct drm_i915_private *i915, enum port port); bool intel_bios_port_supports_tbt(struct drm_i915_private *i915, enum port port); @@ -272,9 +261,19 @@ intel_bios_encoder_data_lookup(struct drm_i915_private *i915, enum port port); bool intel_bios_encoder_supports_dvi(const struct intel_bios_encoder_data *devdata); bool intel_bios_encoder_supports_hdmi(const struct intel_bios_encoder_data *devdata); bool intel_bios_encoder_supports_dp(const struct intel_bios_encoder_data *devdata); +bool intel_bios_encoder_supports_edp(const struct intel_bios_encoder_data *devdata); bool intel_bios_encoder_supports_typec_usb(const struct intel_bios_encoder_data *devdata); bool intel_bios_encoder_supports_tbt(const struct intel_bios_encoder_data *devdata); -int intel_bios_encoder_dp_boost_level(const struct intel_bios_encoder_data *devdata); -int intel_bios_encoder_hdmi_boost_level(const struct intel_bios_encoder_data *devdata); +bool intel_bios_encoder_is_lspcon(const struct intel_bios_encoder_data *devdata); +bool intel_bios_encoder_lane_reversal(const struct intel_bios_encoder_data *devdata); +bool intel_bios_encoder_hpd_invert(const struct intel_bios_encoder_data *devdata); +enum aux_ch intel_bios_dp_aux_ch(const struct intel_bios_encoder_data *devdata); +int intel_bios_dp_boost_level(const struct intel_bios_encoder_data *devdata); +int intel_bios_dp_max_lane_count(const struct intel_bios_encoder_data *devdata); +int intel_bios_dp_max_link_rate(const struct intel_bios_encoder_data *devdata); +int intel_bios_hdmi_boost_level(const struct intel_bios_encoder_data *devdata); +int intel_bios_hdmi_ddc_pin(const struct intel_bios_encoder_data *devdata); +int intel_bios_hdmi_level_shift(const struct intel_bios_encoder_data *devdata); +int intel_bios_hdmi_max_tmds_clock(const struct intel_bios_encoder_data *devdata); #endif /* _INTEL_BIOS_H_ */ diff --git a/drivers/gpu/drm/i915/display/intel_bw.c b/drivers/gpu/drm/i915/display/intel_bw.c index 1c236f02b380..202321ffbe2a 100644 --- a/drivers/gpu/drm/i915/display/intel_bw.c +++ b/drivers/gpu/drm/i915/display/intel_bw.c @@ -119,6 +119,32 @@ static int adls_pcode_read_psf_gv_point_info(struct drm_i915_private *dev_priv, return 0; } +static u16 icl_qgv_points_mask(struct drm_i915_private *i915) +{ + unsigned int num_psf_gv_points = i915->display.bw.max[0].num_psf_gv_points; + unsigned int num_qgv_points = i915->display.bw.max[0].num_qgv_points; + u16 qgv_points = 0, psf_points = 0; + + /* + * We can _not_ use the whole ADLS_QGV_PT_MASK here, as PCode rejects + * it with failure if we try masking any unadvertised points. + * So need to operate only with those returned from PCode. + */ + if (num_qgv_points > 0) + qgv_points = GENMASK(num_qgv_points - 1, 0); + + if (num_psf_gv_points > 0) + psf_points = GENMASK(num_psf_gv_points - 1, 0); + + return ICL_PCODE_REQ_QGV_PT(qgv_points) | ADLS_PCODE_REQ_PSF_PT(psf_points); +} + +static bool is_sagv_enabled(struct drm_i915_private *i915, u16 points_mask) +{ + return !is_power_of_2(~points_mask & icl_qgv_points_mask(i915) & + ICL_PCODE_REQ_QGV_PT_MASK); +} + int icl_pcode_restrict_qgv_points(struct drm_i915_private *dev_priv, u32 points_mask) { @@ -136,6 +162,9 @@ int icl_pcode_restrict_qgv_points(struct drm_i915_private *dev_priv, return ret; } + dev_priv->display.sagv.status = is_sagv_enabled(dev_priv, points_mask) ? + I915_SAGV_ENABLED : I915_SAGV_DISABLED; + return 0; } @@ -965,26 +994,6 @@ int intel_bw_calc_min_cdclk(struct intel_atomic_state *state, return 0; } -static u16 icl_qgv_points_mask(struct drm_i915_private *i915) -{ - unsigned int num_psf_gv_points = i915->display.bw.max[0].num_psf_gv_points; - unsigned int num_qgv_points = i915->display.bw.max[0].num_qgv_points; - u16 qgv_points = 0, psf_points = 0; - - /* - * We can _not_ use the whole ADLS_QGV_PT_MASK here, as PCode rejects - * it with failure if we try masking any unadvertised points. - * So need to operate only with those returned from PCode. - */ - if (num_qgv_points > 0) - qgv_points = GENMASK(num_qgv_points - 1, 0); - - if (num_psf_gv_points > 0) - psf_points = GENMASK(num_psf_gv_points - 1, 0); - - return ICL_PCODE_REQ_QGV_PT(qgv_points) | ADLS_PCODE_REQ_PSF_PT(psf_points); -} - static int intel_bw_check_data_rate(struct intel_atomic_state *state, bool *changed) { struct drm_i915_private *i915 = to_i915(state->base.dev); diff --git a/drivers/gpu/drm/i915/display/intel_cdclk.c b/drivers/gpu/drm/i915/display/intel_cdclk.c index 7e16b655c833..084a483f9776 100644 --- a/drivers/gpu/drm/i915/display/intel_cdclk.c +++ b/drivers/gpu/drm/i915/display/intel_cdclk.c @@ -1329,6 +1329,30 @@ static const struct intel_cdclk_vals adlp_cdclk_table[] = { {} }; +static const struct intel_cdclk_vals rplu_cdclk_table[] = { + { .refclk = 19200, .cdclk = 172800, .divider = 3, .ratio = 27 }, + { .refclk = 19200, .cdclk = 192000, .divider = 2, .ratio = 20 }, + { .refclk = 19200, .cdclk = 307200, .divider = 2, .ratio = 32 }, + { .refclk = 19200, .cdclk = 480000, .divider = 2, .ratio = 50 }, + { .refclk = 19200, .cdclk = 556800, .divider = 2, .ratio = 58 }, + { .refclk = 19200, .cdclk = 652800, .divider = 2, .ratio = 68 }, + + { .refclk = 24000, .cdclk = 176000, .divider = 3, .ratio = 22 }, + { .refclk = 24000, .cdclk = 192000, .divider = 2, .ratio = 16 }, + { .refclk = 24000, .cdclk = 312000, .divider = 2, .ratio = 26 }, + { .refclk = 24000, .cdclk = 480000, .divider = 2, .ratio = 40 }, + { .refclk = 24000, .cdclk = 552000, .divider = 2, .ratio = 46 }, + { .refclk = 24000, .cdclk = 648000, .divider = 2, .ratio = 54 }, + + { .refclk = 38400, .cdclk = 179200, .divider = 3, .ratio = 14 }, + { .refclk = 38400, .cdclk = 192000, .divider = 2, .ratio = 10 }, + { .refclk = 38400, .cdclk = 307200, .divider = 2, .ratio = 16 }, + { .refclk = 38400, .cdclk = 480000, .divider = 2, .ratio = 25 }, + { .refclk = 38400, .cdclk = 556800, .divider = 2, .ratio = 29 }, + { .refclk = 38400, .cdclk = 652800, .divider = 2, .ratio = 34 }, + {} +}; + static const struct intel_cdclk_vals dg2_cdclk_table[] = { { .refclk = 38400, .cdclk = 163200, .divider = 2, .ratio = 34, .waveform = 0x8888 }, { .refclk = 38400, .cdclk = 204000, .divider = 2, .ratio = 34, .waveform = 0x9248 }, @@ -1801,6 +1825,13 @@ static bool cdclk_compute_crawl_and_squash_midpoint(struct drm_i915_private *i91 return true; } +static bool pll_enable_wa_needed(struct drm_i915_private *dev_priv) +{ + return ((IS_DG2(dev_priv) || IS_METEORLAKE(dev_priv)) && + dev_priv->display.cdclk.hw.vco > 0 && + HAS_CDCLK_SQUASH(dev_priv)); +} + static void _bxt_set_cdclk(struct drm_i915_private *dev_priv, const struct intel_cdclk_config *cdclk_config, enum pipe pipe) @@ -1815,9 +1846,13 @@ static void _bxt_set_cdclk(struct drm_i915_private *dev_priv, !cdclk_pll_is_unknown(dev_priv->display.cdclk.hw.vco)) { if (dev_priv->display.cdclk.hw.vco != vco) adlp_cdclk_pll_crawl(dev_priv, vco); - } else if (DISPLAY_VER(dev_priv) >= 11) + } else if (DISPLAY_VER(dev_priv) >= 11) { + /* wa_15010685871: dg2, mtl */ + if (pll_enable_wa_needed(dev_priv)) + dg2_cdclk_squash_program(dev_priv, 0); + icl_cdclk_pll_update(dev_priv, vco); - else + } else bxt_cdclk_pll_update(dev_priv, vco); waveform = cdclk_squash_waveform(dev_priv, cdclk); @@ -3353,6 +3388,8 @@ void intel_init_cdclk_hooks(struct drm_i915_private *dev_priv) /* Wa_22011320316:adl-p[a0] */ if (IS_ADLP_DISPLAY_STEP(dev_priv, STEP_A0, STEP_B0)) dev_priv->display.cdclk.table = adlp_a_step_cdclk_table; + else if (IS_ADLP_RPLU(dev_priv)) + dev_priv->display.cdclk.table = rplu_cdclk_table; else dev_priv->display.cdclk.table = adlp_cdclk_table; } else if (IS_ROCKETLAKE(dev_priv)) { diff --git a/drivers/gpu/drm/i915/display/intel_color.c b/drivers/gpu/drm/i915/display/intel_color.c index 8d97c299e657..a6dd08598233 100644 --- a/drivers/gpu/drm/i915/display/intel_color.c +++ b/drivers/gpu/drm/i915/display/intel_color.c @@ -257,7 +257,7 @@ static bool ilk_limited_range(const struct intel_crtc_state *crtc_state) if (DISPLAY_VER(i915) >= 11) return false; - /* pre-hsw have PIPECONF_COLOR_RANGE_SELECT */ + /* pre-hsw have TRANSCONF_COLOR_RANGE_SELECT */ if (DISPLAY_VER(i915) < 7 || IS_IVYBRIDGE(i915)) return false; @@ -624,7 +624,7 @@ static void ilk_color_commit_noarm(const struct intel_crtc_state *crtc_state) static void i9xx_color_commit_arm(const struct intel_crtc_state *crtc_state) { - /* update PIPECONF GAMMA_MODE */ + /* update TRANSCONF GAMMA_MODE */ i9xx_set_pipeconf(crtc_state); } @@ -633,7 +633,7 @@ static void ilk_color_commit_arm(const struct intel_crtc_state *crtc_state) struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); struct drm_i915_private *i915 = to_i915(crtc->base.dev); - /* update PIPECONF GAMMA_MODE */ + /* update TRANSCONF GAMMA_MODE */ ilk_set_pipeconf(crtc_state); intel_de_write_fw(i915, PIPE_CSC_MODE(crtc->pipe), @@ -1256,8 +1256,11 @@ static void icl_load_luts(const struct intel_crtc_state *crtc_state) break; } - if (crtc_state->dsb) - intel_dsb_commit(crtc_state->dsb); + if (crtc_state->dsb) { + intel_dsb_finish(crtc_state->dsb); + intel_dsb_commit(crtc_state->dsb, false); + intel_dsb_wait(crtc_state->dsb); + } } static u32 chv_cgm_degamma_ldw(const struct drm_color_lut *color) @@ -1380,6 +1383,9 @@ void intel_color_prepare_commit(struct intel_crtc_state *crtc_state) /* FIXME DSB has issues loading LUTs, disable it for now */ return; + if (!crtc_state->pre_csc_lut && !crtc_state->post_csc_lut) + return; + crtc_state->dsb = intel_dsb_prepare(crtc, 1024); } @@ -1500,6 +1506,8 @@ intel_color_add_affected_planes(struct intel_crtc_state *new_crtc_state) return PTR_ERR(plane_state); new_crtc_state->update_planes |= BIT(plane->id); + new_crtc_state->async_flip_planes = 0; + new_crtc_state->do_async_flip = false; /* plane control register changes blocked by CxSR */ if (HAS_GMCH(i915)) diff --git a/drivers/gpu/drm/i915/display/intel_combo_phy.c b/drivers/gpu/drm/i915/display/intel_combo_phy.c index 8b870b2dd4f9..922a6d87b553 100644 --- a/drivers/gpu/drm/i915/display/intel_combo_phy.c +++ b/drivers/gpu/drm/i915/display/intel_combo_phy.c @@ -78,14 +78,11 @@ static void icl_set_procmon_ref_values(struct drm_i915_private *dev_priv, enum phy phy) { const struct icl_procmon *procmon; - u32 val; procmon = icl_get_procmon_ref_values(dev_priv, phy); - val = intel_de_read(dev_priv, ICL_PORT_COMP_DW1(phy)); - val &= ~((0xff << 16) | 0xff); - val |= procmon->dw1; - intel_de_write(dev_priv, ICL_PORT_COMP_DW1(phy), val); + intel_de_rmw(dev_priv, ICL_PORT_COMP_DW1(phy), + (0xff << 16) | 0xff, procmon->dw1); intel_de_write(dev_priv, ICL_PORT_COMP_DW9(phy), procmon->dw9); intel_de_write(dev_priv, ICL_PORT_COMP_DW10(phy), procmon->dw10); @@ -236,8 +233,7 @@ static bool icl_combo_phy_verify_state(struct drm_i915_private *dev_priv, ICL_PORT_TX_DW8_ODCC_CLK_DIV_SEL_DIV2); ret &= check_phy_reg(dev_priv, phy, ICL_PORT_PCS_DW1_LN(0, phy), - DCC_MODE_SELECT_MASK, - DCC_MODE_SELECT_CONTINUOSLY); + DCC_MODE_SELECT_MASK, RUN_DCC_ONCE); } ret &= icl_verify_procmon_ref_values(dev_priv, phy); @@ -267,7 +263,6 @@ void intel_combo_phy_power_up_lanes(struct drm_i915_private *dev_priv, int lane_count, bool lane_reversal) { u8 lane_mask; - u32 val; if (is_dsi) { drm_WARN_ON(&dev_priv->drm, lane_reversal); @@ -308,10 +303,8 @@ void intel_combo_phy_power_up_lanes(struct drm_i915_private *dev_priv, } } - val = intel_de_read(dev_priv, ICL_PORT_CL_DW10(phy)); - val &= ~PWR_DOWN_LN_MASK; - val |= lane_mask; - intel_de_write(dev_priv, ICL_PORT_CL_DW10(phy), val); + intel_de_rmw(dev_priv, ICL_PORT_CL_DW10(phy), + PWR_DOWN_LN_MASK, lane_mask); } static void icl_combo_phys_init(struct drm_i915_private *dev_priv) @@ -360,25 +353,19 @@ skip_phy_misc: val = intel_de_read(dev_priv, ICL_PORT_PCS_DW1_LN(0, phy)); val &= ~DCC_MODE_SELECT_MASK; - val |= DCC_MODE_SELECT_CONTINUOSLY; + val |= RUN_DCC_ONCE; intel_de_write(dev_priv, ICL_PORT_PCS_DW1_GRP(phy), val); } icl_set_procmon_ref_values(dev_priv, phy); - if (phy_is_master(dev_priv, phy)) { - val = intel_de_read(dev_priv, ICL_PORT_COMP_DW8(phy)); - val |= IREFGEN; - intel_de_write(dev_priv, ICL_PORT_COMP_DW8(phy), val); - } - - val = intel_de_read(dev_priv, ICL_PORT_COMP_DW0(phy)); - val |= COMP_INIT; - intel_de_write(dev_priv, ICL_PORT_COMP_DW0(phy), val); + if (phy_is_master(dev_priv, phy)) + intel_de_rmw(dev_priv, ICL_PORT_COMP_DW8(phy), + 0, IREFGEN); - val = intel_de_read(dev_priv, ICL_PORT_CL_DW5(phy)); - val |= CL_POWER_DOWN_ENABLE; - intel_de_write(dev_priv, ICL_PORT_CL_DW5(phy), val); + intel_de_rmw(dev_priv, ICL_PORT_COMP_DW0(phy), 0, COMP_INIT); + intel_de_rmw(dev_priv, ICL_PORT_CL_DW5(phy), + 0, CL_POWER_DOWN_ENABLE); } } @@ -387,8 +374,6 @@ static void icl_combo_phys_uninit(struct drm_i915_private *dev_priv) enum phy phy; for_each_combo_phy_reverse(dev_priv, phy) { - u32 val; - if (phy == PHY_A && !icl_combo_phy_verify_state(dev_priv, phy)) { if (IS_TIGERLAKE(dev_priv) || IS_DG1(dev_priv)) { @@ -410,14 +395,11 @@ static void icl_combo_phys_uninit(struct drm_i915_private *dev_priv) if (!has_phy_misc(dev_priv, phy)) goto skip_phy_misc; - val = intel_de_read(dev_priv, ICL_PHY_MISC(phy)); - val |= ICL_PHY_MISC_DE_IO_COMP_PWR_DOWN; - intel_de_write(dev_priv, ICL_PHY_MISC(phy), val); + intel_de_rmw(dev_priv, ICL_PHY_MISC(phy), 0, + ICL_PHY_MISC_DE_IO_COMP_PWR_DOWN); skip_phy_misc: - val = intel_de_read(dev_priv, ICL_PORT_COMP_DW0(phy)); - val &= ~COMP_INIT; - intel_de_write(dev_priv, ICL_PORT_COMP_DW0(phy), val); + intel_de_rmw(dev_priv, ICL_PORT_COMP_DW0(phy), COMP_INIT, 0); } } diff --git a/drivers/gpu/drm/i915/display/intel_combo_phy_regs.h b/drivers/gpu/drm/i915/display/intel_combo_phy_regs.h index 2ed65193ca19..b0983edccf3f 100644 --- a/drivers/gpu/drm/i915/display/intel_combo_phy_regs.h +++ b/drivers/gpu/drm/i915/display/intel_combo_phy_regs.h @@ -90,8 +90,8 @@ #define ICL_PORT_PCS_DW1_AUX(phy) _MMIO(_ICL_PORT_PCS_DW_AUX(1, phy)) #define ICL_PORT_PCS_DW1_GRP(phy) _MMIO(_ICL_PORT_PCS_DW_GRP(1, phy)) #define ICL_PORT_PCS_DW1_LN(ln, phy) _MMIO(_ICL_PORT_PCS_DW_LN(1, ln, phy)) -#define DCC_MODE_SELECT_MASK (0x3 << 20) -#define DCC_MODE_SELECT_CONTINUOSLY (0x3 << 20) +#define DCC_MODE_SELECT_MASK REG_GENMASK(21, 20) +#define RUN_DCC_ONCE REG_FIELD_PREP(DCC_MODE_SELECT_MASK, 0) #define COMMON_KEEPER_EN (1 << 26) #define LATENCY_OPTIM_MASK (0x3 << 2) #define LATENCY_OPTIM_VAL(x) ((x) << 2) diff --git a/drivers/gpu/drm/i915/display/intel_crt.c b/drivers/gpu/drm/i915/display/intel_crt.c index 7267ffc7f539..8f2ebead0826 100644 --- a/drivers/gpu/drm/i915/display/intel_crt.c +++ b/drivers/gpu/drm/i915/display/intel_crt.c @@ -260,7 +260,7 @@ static void hsw_post_disable_crt(struct intel_atomic_state *state, ilk_pfit_disable(old_crtc_state); - intel_ddi_disable_pipe_clock(old_crtc_state); + intel_ddi_disable_transcoder_clock(old_crtc_state); pch_post_disable_crt(state, encoder, old_crtc_state, old_conn_state); @@ -300,7 +300,7 @@ static void hsw_pre_enable_crt(struct intel_atomic_state *state, hsw_fdi_link_train(encoder, crtc_state); - intel_ddi_enable_pipe_clock(encoder, crtc_state); + intel_ddi_enable_transcoder_clock(encoder, crtc_state); } static void hsw_enable_crt(struct intel_atomic_state *state, @@ -678,10 +678,11 @@ static bool intel_crt_detect_ddc(struct drm_connector *connector) } static enum drm_connector_status -intel_crt_load_detect(struct intel_crt *crt, u32 pipe) +intel_crt_load_detect(struct intel_crt *crt, enum pipe pipe) { struct drm_device *dev = crt->base.base.dev; struct drm_i915_private *dev_priv = to_i915(dev); + enum transcoder cpu_transcoder = (enum transcoder)pipe; u32 save_bclrpat; u32 save_vtotal; u32 vtotal, vactive; @@ -693,25 +694,25 @@ intel_crt_load_detect(struct intel_crt *crt, u32 pipe) drm_dbg_kms(&dev_priv->drm, "starting load-detect on CRT\n"); - save_bclrpat = intel_de_read(dev_priv, BCLRPAT(pipe)); - save_vtotal = intel_de_read(dev_priv, VTOTAL(pipe)); - vblank = intel_de_read(dev_priv, VBLANK(pipe)); + save_bclrpat = intel_de_read(dev_priv, BCLRPAT(cpu_transcoder)); + save_vtotal = intel_de_read(dev_priv, TRANS_VTOTAL(cpu_transcoder)); + vblank = intel_de_read(dev_priv, TRANS_VBLANK(cpu_transcoder)); - vtotal = ((save_vtotal >> 16) & 0xfff) + 1; - vactive = (save_vtotal & 0x7ff) + 1; + vtotal = REG_FIELD_GET(VTOTAL_MASK, save_vtotal) + 1; + vactive = REG_FIELD_GET(VACTIVE_MASK, save_vtotal) + 1; - vblank_start = (vblank & 0xfff) + 1; - vblank_end = ((vblank >> 16) & 0xfff) + 1; + vblank_start = REG_FIELD_GET(VBLANK_START_MASK, vblank) + 1; + vblank_end = REG_FIELD_GET(VBLANK_END_MASK, vblank) + 1; /* Set the border color to purple. */ - intel_de_write(dev_priv, BCLRPAT(pipe), 0x500050); + intel_de_write(dev_priv, BCLRPAT(cpu_transcoder), 0x500050); if (DISPLAY_VER(dev_priv) != 2) { - u32 pipeconf = intel_de_read(dev_priv, PIPECONF(pipe)); + u32 transconf = intel_de_read(dev_priv, TRANSCONF(cpu_transcoder)); - intel_de_write(dev_priv, PIPECONF(pipe), - pipeconf | PIPECONF_FORCE_BORDER); - intel_de_posting_read(dev_priv, PIPECONF(pipe)); + intel_de_write(dev_priv, TRANSCONF(cpu_transcoder), + transconf | TRANSCONF_FORCE_BORDER); + intel_de_posting_read(dev_priv, TRANSCONF(cpu_transcoder)); /* Wait for next Vblank to substitue * border color for Color info */ intel_crtc_wait_for_next_vblank(intel_crtc_for_pipe(dev_priv, pipe)); @@ -720,7 +721,7 @@ intel_crt_load_detect(struct intel_crt *crt, u32 pipe) connector_status_connected : connector_status_disconnected; - intel_de_write(dev_priv, PIPECONF(pipe), pipeconf); + intel_de_write(dev_priv, TRANSCONF(cpu_transcoder), transconf); } else { bool restore_vblank = false; int count, detect; @@ -730,12 +731,13 @@ intel_crt_load_detect(struct intel_crt *crt, u32 pipe) * Yes, this will flicker */ if (vblank_start <= vactive && vblank_end >= vtotal) { - u32 vsync = intel_de_read(dev_priv, VSYNC(pipe)); - u32 vsync_start = (vsync & 0xffff) + 1; + u32 vsync = intel_de_read(dev_priv, TRANS_VSYNC(cpu_transcoder)); + u32 vsync_start = REG_FIELD_GET(VSYNC_START_MASK, vsync) + 1; vblank_start = vsync_start; - intel_de_write(dev_priv, VBLANK(pipe), - (vblank_start - 1) | ((vblank_end - 1) << 16)); + intel_de_write(dev_priv, TRANS_VBLANK(cpu_transcoder), + VBLANK_START(vblank_start - 1) | + VBLANK_END(vblank_end - 1)); restore_vblank = true; } /* sample in the vertical border, selecting the larger one */ @@ -766,7 +768,7 @@ intel_crt_load_detect(struct intel_crt *crt, u32 pipe) /* restore vblank if necessary */ if (restore_vblank) - intel_de_write(dev_priv, VBLANK(pipe), vblank); + intel_de_write(dev_priv, TRANS_VBLANK(cpu_transcoder), vblank); /* * If more than 3/4 of the scanline detected a monitor, * then it is assumed to be present. This works even on i830, @@ -779,7 +781,7 @@ intel_crt_load_detect(struct intel_crt *crt, u32 pipe) } /* Restore previous settings */ - intel_de_write(dev_priv, BCLRPAT(pipe), save_bclrpat); + intel_de_write(dev_priv, BCLRPAT(cpu_transcoder), save_bclrpat); return status; } diff --git a/drivers/gpu/drm/i915/display/intel_crtc.c b/drivers/gpu/drm/i915/display/intel_crtc.c index 82be0fbe9934..b79a8834559f 100644 --- a/drivers/gpu/drm/i915/display/intel_crtc.c +++ b/drivers/gpu/drm/i915/display/intel_crtc.c @@ -25,6 +25,7 @@ #include "intel_display_types.h" #include "intel_drrs.h" #include "intel_dsi.h" +#include "intel_fifo_underrun.h" #include "intel_pipe_crc.h" #include "intel_psr.h" #include "intel_sprite.h" @@ -314,6 +315,8 @@ int intel_crtc_init(struct drm_i915_private *dev_priv, enum pipe pipe) } crtc->plane_ids_mask |= BIT(primary->id); + intel_init_fifo_underrun_reporting(dev_priv, crtc, false); + for_each_sprite(dev_priv, pipe, sprite) { struct intel_plane *plane; diff --git a/drivers/gpu/drm/i915/display/intel_crtc_state_dump.c b/drivers/gpu/drm/i915/display/intel_crtc_state_dump.c index 2422d6ef5777..766633566fd6 100644 --- a/drivers/gpu/drm/i915/display/intel_crtc_state_dump.c +++ b/drivers/gpu/drm/i915/display/intel_crtc_state_dump.c @@ -14,14 +14,16 @@ static void intel_dump_crtc_timings(struct drm_i915_private *i915, const struct drm_display_mode *mode) { - drm_dbg_kms(&i915->drm, "crtc timings: %d %d %d %d %d %d %d %d %d, " - "type: 0x%x flags: 0x%x\n", + drm_dbg_kms(&i915->drm, "crtc timings: clock=%d, " + "hd=%d hb=%d-%d hs=%d-%d ht=%d, " + "vd=%d vb=%d-%d vs=%d-%d vt=%d, " + "flags=0x%x\n", mode->crtc_clock, - mode->crtc_hdisplay, mode->crtc_hsync_start, - mode->crtc_hsync_end, mode->crtc_htotal, - mode->crtc_vdisplay, mode->crtc_vsync_start, - mode->crtc_vsync_end, mode->crtc_vtotal, - mode->type, mode->flags); + mode->crtc_hdisplay, mode->crtc_hblank_start, mode->crtc_hblank_end, + mode->crtc_hsync_start, mode->crtc_hsync_end, mode->crtc_htotal, + mode->crtc_vdisplay, mode->crtc_vblank_start, mode->crtc_vblank_end, + mode->crtc_vsync_start, mode->crtc_vsync_end, mode->crtc_vtotal, + mode->flags); } static void diff --git a/drivers/gpu/drm/i915/display/intel_cursor.c b/drivers/gpu/drm/i915/display/intel_cursor.c index d190fa0d393b..c3173c0c2068 100644 --- a/drivers/gpu/drm/i915/display/intel_cursor.c +++ b/drivers/gpu/drm/i915/display/intel_cursor.c @@ -532,9 +532,10 @@ static void i9xx_cursor_update_arm(struct intel_plane *plane, skl_write_cursor_wm(plane, crtc_state); if (plane_state) - intel_psr2_program_plane_sel_fetch(plane, crtc_state, plane_state, 0); + intel_psr2_program_plane_sel_fetch_arm(plane, crtc_state, + plane_state); else - intel_psr2_disable_plane_sel_fetch(plane, crtc_state); + intel_psr2_disable_plane_sel_fetch_arm(plane, crtc_state); if (plane->cursor.base != base || plane->cursor.size != fbc_ctl || diff --git a/drivers/gpu/drm/i915/display/intel_ddi.c b/drivers/gpu/drm/i915/display/intel_ddi.c index 254559abedfb..0950bcfea4c0 100644 --- a/drivers/gpu/drm/i915/display/intel_ddi.c +++ b/drivers/gpu/drm/i915/display/intel_ddi.c @@ -47,6 +47,7 @@ #include "intel_dkl_phy.h" #include "intel_dkl_phy_regs.h" #include "intel_dp.h" +#include "intel_dp_aux.h" #include "intel_dp_link_training.h" #include "intel_dp_mst.h" #include "intel_dpio_phy.h" @@ -67,6 +68,7 @@ #include "intel_sprite.h" #include "intel_tc.h" #include "intel_vdsc.h" +#include "intel_vdsc_regs.h" #include "intel_vrr.h" #include "skl_scaler.h" #include "skl_universal_plane.h" @@ -89,7 +91,7 @@ static int intel_ddi_hdmi_level(struct intel_encoder *encoder, { int level; - level = intel_bios_hdmi_level_shift(encoder); + level = intel_bios_hdmi_level_shift(encoder->devdata); if (level < 0) level = trans->hdmi_default_entry; @@ -126,7 +128,7 @@ void hsw_prepare_dp_ddi_buffers(struct intel_encoder *encoder, /* If we're boosting the current, set bit 31 of trans1 */ if (has_iboost(dev_priv) && - intel_bios_encoder_dp_boost_level(encoder->devdata)) + intel_bios_dp_boost_level(encoder->devdata)) iboost_bit = DDI_BUF_BALANCE_LEG_ENABLE; for (i = 0; i < n_entries; i++) { @@ -158,7 +160,7 @@ static void hsw_prepare_hdmi_ddi_buffers(struct intel_encoder *encoder, /* If we're boosting the current, set bit 31 of trans1 */ if (has_iboost(dev_priv) && - intel_bios_encoder_hdmi_boost_level(encoder->devdata)) + intel_bios_hdmi_boost_level(encoder->devdata)) iboost_bit = DDI_BUF_BALANCE_LEG_ENABLE; /* Entry 9 is for HDMI: */ @@ -644,19 +646,14 @@ int intel_ddi_toggle_hdcp_bits(struct intel_encoder *intel_encoder, struct drm_i915_private *dev_priv = to_i915(dev); intel_wakeref_t wakeref; int ret = 0; - u32 tmp; wakeref = intel_display_power_get_if_enabled(dev_priv, intel_encoder->power_domain); if (drm_WARN_ON(dev, !wakeref)) return -ENXIO; - tmp = intel_de_read(dev_priv, TRANS_DDI_FUNC_CTL(cpu_transcoder)); - if (enable) - tmp |= hdcp_mask; - else - tmp &= ~hdcp_mask; - intel_de_write(dev_priv, TRANS_DDI_FUNC_CTL(cpu_transcoder), tmp); + intel_de_rmw(dev_priv, TRANS_DDI_FUNC_CTL(cpu_transcoder), + hdcp_mask, enable ? hdcp_mask : 0); intel_display_power_put(dev_priv, intel_encoder->power_domain, wakeref); return ret; } @@ -948,8 +945,8 @@ static void intel_ddi_get_power_domains(struct intel_encoder *encoder, main_link_aux_power_domain_get(dig_port, crtc_state); } -void intel_ddi_enable_pipe_clock(struct intel_encoder *encoder, - const struct intel_crtc_state *crtc_state) +void intel_ddi_enable_transcoder_clock(struct intel_encoder *encoder, + const struct intel_crtc_state *crtc_state) { struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); @@ -957,33 +954,34 @@ void intel_ddi_enable_pipe_clock(struct intel_encoder *encoder, enum phy phy = intel_port_to_phy(dev_priv, encoder->port); u32 val; - if (cpu_transcoder != TRANSCODER_EDP) { - if (DISPLAY_VER(dev_priv) >= 13) - val = TGL_TRANS_CLK_SEL_PORT(phy); - else if (DISPLAY_VER(dev_priv) >= 12) - val = TGL_TRANS_CLK_SEL_PORT(encoder->port); - else - val = TRANS_CLK_SEL_PORT(encoder->port); + if (cpu_transcoder == TRANSCODER_EDP) + return; - intel_de_write(dev_priv, TRANS_CLK_SEL(cpu_transcoder), val); - } + if (DISPLAY_VER(dev_priv) >= 13) + val = TGL_TRANS_CLK_SEL_PORT(phy); + else if (DISPLAY_VER(dev_priv) >= 12) + val = TGL_TRANS_CLK_SEL_PORT(encoder->port); + else + val = TRANS_CLK_SEL_PORT(encoder->port); + + intel_de_write(dev_priv, TRANS_CLK_SEL(cpu_transcoder), val); } -void intel_ddi_disable_pipe_clock(const struct intel_crtc_state *crtc_state) +void intel_ddi_disable_transcoder_clock(const struct intel_crtc_state *crtc_state) { struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev); enum transcoder cpu_transcoder = crtc_state->cpu_transcoder; + u32 val; - if (cpu_transcoder != TRANSCODER_EDP) { - if (DISPLAY_VER(dev_priv) >= 12) - intel_de_write(dev_priv, - TRANS_CLK_SEL(cpu_transcoder), - TGL_TRANS_CLK_SEL_DISABLED); - else - intel_de_write(dev_priv, - TRANS_CLK_SEL(cpu_transcoder), - TRANS_CLK_SEL_DISABLED); - } + if (cpu_transcoder == TRANSCODER_EDP) + return; + + if (DISPLAY_VER(dev_priv) >= 12) + val = TGL_TRANS_CLK_SEL_DISABLED; + else + val = TRANS_CLK_SEL_DISABLED; + + intel_de_write(dev_priv, TRANS_CLK_SEL(cpu_transcoder), val); } static void _skl_ddi_set_iboost(struct drm_i915_private *dev_priv, @@ -1009,9 +1007,9 @@ static void skl_ddi_set_iboost(struct intel_encoder *encoder, u8 iboost; if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI)) - iboost = intel_bios_encoder_hdmi_boost_level(encoder->devdata); + iboost = intel_bios_hdmi_boost_level(encoder->devdata); else - iboost = intel_bios_encoder_dp_boost_level(encoder->devdata); + iboost = intel_bios_dp_boost_level(encoder->devdata); if (iboost == 0) { const struct intel_ddi_buf_trans *trans; @@ -2200,15 +2198,13 @@ static void intel_ddi_enable_fec(struct intel_encoder *encoder, { struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); struct intel_dp *intel_dp; - u32 val; if (!crtc_state->fec_enable) return; intel_dp = enc_to_intel_dp(encoder); - val = intel_de_read(dev_priv, dp_tp_ctl_reg(encoder, crtc_state)); - val |= DP_TP_CTL_FEC_ENABLE; - intel_de_write(dev_priv, dp_tp_ctl_reg(encoder, crtc_state), val); + intel_de_rmw(dev_priv, dp_tp_ctl_reg(encoder, crtc_state), + 0, DP_TP_CTL_FEC_ENABLE); } static void intel_ddi_disable_fec_state(struct intel_encoder *encoder, @@ -2216,15 +2212,13 @@ static void intel_ddi_disable_fec_state(struct intel_encoder *encoder, { struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); struct intel_dp *intel_dp; - u32 val; if (!crtc_state->fec_enable) return; intel_dp = enc_to_intel_dp(encoder); - val = intel_de_read(dev_priv, dp_tp_ctl_reg(encoder, crtc_state)); - val &= ~DP_TP_CTL_FEC_ENABLE; - intel_de_write(dev_priv, dp_tp_ctl_reg(encoder, crtc_state), val); + intel_de_rmw(dev_priv, dp_tp_ctl_reg(encoder, crtc_state), + DP_TP_CTL_FEC_ENABLE, 0); intel_de_posting_read(dev_priv, dp_tp_ctl_reg(encoder, crtc_state)); } @@ -2387,7 +2381,7 @@ static void tgl_ddi_pre_enable_dp(struct intel_atomic_state *state, * 7.a Configure Transcoder Clock Select to direct the Port clock to the * Transcoder. */ - intel_ddi_enable_pipe_clock(encoder, crtc_state); + intel_ddi_enable_transcoder_clock(encoder, crtc_state); if (HAS_DP20(dev_priv)) intel_ddi_config_transcoder_dp2(encoder, crtc_state); @@ -2514,7 +2508,7 @@ static void hsw_ddi_pre_enable_dp(struct intel_atomic_state *state, intel_ddi_enable_fec(encoder, crtc_state); if (!is_mst) - intel_ddi_enable_pipe_clock(encoder, crtc_state); + intel_ddi_enable_transcoder_clock(encoder, crtc_state); intel_dsc_dp_pps_write(encoder, crtc_state); } @@ -2556,7 +2550,7 @@ static void intel_ddi_pre_enable_hdmi(struct intel_atomic_state *state, icl_program_mg_dp_mode(dig_port, crtc_state); - intel_ddi_enable_pipe_clock(encoder, crtc_state); + intel_ddi_enable_transcoder_clock(encoder, crtc_state); dig_port->set_infoframes(encoder, crtc_state->has_infoframe, @@ -2622,12 +2616,10 @@ static void intel_disable_ddi_buf(struct intel_encoder *encoder, wait = true; } - if (intel_crtc_has_dp_encoder(crtc_state)) { - val = intel_de_read(dev_priv, dp_tp_ctl_reg(encoder, crtc_state)); - val &= ~(DP_TP_CTL_ENABLE | DP_TP_CTL_LINK_TRAIN_MASK); - val |= DP_TP_CTL_LINK_TRAIN_PAT1; - intel_de_write(dev_priv, dp_tp_ctl_reg(encoder, crtc_state), val); - } + if (intel_crtc_has_dp_encoder(crtc_state)) + intel_de_rmw(dev_priv, dp_tp_ctl_reg(encoder, crtc_state), + DP_TP_CTL_ENABLE | DP_TP_CTL_LINK_TRAIN_MASK, + DP_TP_CTL_LINK_TRAIN_PAT1); /* Disable FEC in DP Sink */ intel_ddi_disable_fec_state(encoder, crtc_state); @@ -2660,19 +2652,14 @@ static void intel_ddi_post_disable_dp(struct intel_atomic_state *state, if (DISPLAY_VER(dev_priv) >= 12) { if (is_mst) { enum transcoder cpu_transcoder = old_crtc_state->cpu_transcoder; - u32 val; - val = intel_de_read(dev_priv, - TRANS_DDI_FUNC_CTL(cpu_transcoder)); - val &= ~(TGL_TRANS_DDI_PORT_MASK | - TRANS_DDI_MODE_SELECT_MASK); - intel_de_write(dev_priv, - TRANS_DDI_FUNC_CTL(cpu_transcoder), - val); + intel_de_rmw(dev_priv, TRANS_DDI_FUNC_CTL(cpu_transcoder), + TGL_TRANS_DDI_PORT_MASK | TRANS_DDI_MODE_SELECT_MASK, + 0); } } else { if (!is_mst) - intel_ddi_disable_pipe_clock(old_crtc_state); + intel_ddi_disable_transcoder_clock(old_crtc_state); } intel_disable_ddi_buf(encoder, old_crtc_state); @@ -2683,7 +2670,7 @@ static void intel_ddi_post_disable_dp(struct intel_atomic_state *state, * transcoder" */ if (DISPLAY_VER(dev_priv) >= 12) - intel_ddi_disable_pipe_clock(old_crtc_state); + intel_ddi_disable_transcoder_clock(old_crtc_state); intel_pps_vdd_on(intel_dp); intel_pps_off(intel_dp); @@ -2709,12 +2696,12 @@ static void intel_ddi_post_disable_hdmi(struct intel_atomic_state *state, old_crtc_state, old_conn_state); if (DISPLAY_VER(dev_priv) < 12) - intel_ddi_disable_pipe_clock(old_crtc_state); + intel_ddi_disable_transcoder_clock(old_crtc_state); intel_disable_ddi_buf(encoder, old_crtc_state); if (DISPLAY_VER(dev_priv) >= 12) - intel_ddi_disable_pipe_clock(old_crtc_state); + intel_ddi_disable_transcoder_clock(old_crtc_state); intel_display_power_put(dev_priv, dig_port->ddi_io_power_domain, @@ -3222,12 +3209,9 @@ static void intel_ddi_set_idle_link_train(struct intel_dp *intel_dp, struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base; struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); enum port port = encoder->port; - u32 val; - val = intel_de_read(dev_priv, dp_tp_ctl_reg(encoder, crtc_state)); - val &= ~DP_TP_CTL_LINK_TRAIN_MASK; - val |= DP_TP_CTL_LINK_TRAIN_IDLE; - intel_de_write(dev_priv, dp_tp_ctl_reg(encoder, crtc_state), val); + intel_de_rmw(dev_priv, dp_tp_ctl_reg(encoder, crtc_state), + DP_TP_CTL_LINK_TRAIN_MASK, DP_TP_CTL_LINK_TRAIN_IDLE); /* * Until TGL on PORT_A we can have only eDP in SST mode. There the only @@ -4305,7 +4289,7 @@ void intel_ddi_init(struct drm_i915_private *dev_priv, enum port port) intel_bios_encoder_supports_hdmi(devdata); init_dp = intel_bios_encoder_supports_dp(devdata); - if (intel_bios_is_lspcon_present(dev_priv, port)) { + if (intel_bios_encoder_is_lspcon(devdata)) { /* * Lspcon device needs to be driven with DP connector * with special detection sequence. So make sure DP @@ -4500,12 +4484,12 @@ void intel_ddi_init(struct drm_i915_private *dev_priv, enum port port) intel_de_read(dev_priv, DDI_BUF_CTL(port)) & (DDI_BUF_PORT_REVERSAL | DDI_A_4_LANES); - if (intel_bios_is_lane_reversal_needed(dev_priv, port)) + if (intel_bios_encoder_lane_reversal(devdata)) dig_port->saved_port_bits |= DDI_BUF_PORT_REVERSAL; dig_port->dp.output_reg = INVALID_MMIO_REG; dig_port->max_lanes = intel_ddi_max_lanes(dig_port); - dig_port->aux_ch = intel_bios_port_aux_ch(dev_priv, port); + dig_port->aux_ch = intel_dp_aux_ch(encoder); if (intel_phy_is_tc(dev_priv, phy)) { bool is_legacy = @@ -4521,35 +4505,21 @@ void intel_ddi_init(struct drm_i915_private *dev_priv, enum port port) drm_WARN_ON(&dev_priv->drm, port > PORT_I); dig_port->ddi_io_power_domain = intel_display_power_ddi_io_domain(dev_priv, port); - if (init_dp) { - if (!intel_ddi_init_dp_connector(dig_port)) - goto err; - - dig_port->hpd_pulse = intel_dp_hpd_pulse; - - if (dig_port->dp.mso_link_count) - encoder->pipe_mask = intel_ddi_splitter_pipe_mask(dev_priv); - } - - /* In theory we don't need the encoder->type check, but leave it just in - * case we have some really bad VBTs... */ - if (encoder->type != INTEL_OUTPUT_EDP && init_hdmi) { - if (!intel_ddi_init_hdmi_connector(dig_port)) - goto err; - } - if (DISPLAY_VER(dev_priv) >= 11) { if (intel_phy_is_tc(dev_priv, phy)) dig_port->connected = intel_tc_port_connected; else dig_port->connected = lpt_digital_port_connected; - } else if (DISPLAY_VER(dev_priv) >= 8) { - if (port == PORT_A || IS_GEMINILAKE(dev_priv) || - IS_BROXTON(dev_priv)) + } else if (IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv)) { + dig_port->connected = bdw_digital_port_connected; + } else if (DISPLAY_VER(dev_priv) == 9) { + dig_port->connected = lpt_digital_port_connected; + } else if (IS_BROADWELL(dev_priv)) { + if (port == PORT_A) dig_port->connected = bdw_digital_port_connected; else dig_port->connected = lpt_digital_port_connected; - } else { + } else if (IS_HASWELL(dev_priv)) { if (port == PORT_A) dig_port->connected = hsw_digital_port_connected; else @@ -4558,6 +4528,25 @@ void intel_ddi_init(struct drm_i915_private *dev_priv, enum port port) intel_infoframe_init(dig_port); + if (init_dp) { + if (!intel_ddi_init_dp_connector(dig_port)) + goto err; + + dig_port->hpd_pulse = intel_dp_hpd_pulse; + + if (dig_port->dp.mso_link_count) + encoder->pipe_mask = intel_ddi_splitter_pipe_mask(dev_priv); + } + + /* + * In theory we don't need the encoder->type check, + * but leave it just in case we have some really bad VBTs... + */ + if (encoder->type != INTEL_OUTPUT_EDP && init_hdmi) { + if (!intel_ddi_init_hdmi_connector(dig_port)) + goto err; + } + return; err: diff --git a/drivers/gpu/drm/i915/display/intel_ddi.h b/drivers/gpu/drm/i915/display/intel_ddi.h index d39076facdce..361f6874dde5 100644 --- a/drivers/gpu/drm/i915/display/intel_ddi.h +++ b/drivers/gpu/drm/i915/display/intel_ddi.h @@ -52,9 +52,9 @@ bool intel_ddi_get_hw_state(struct intel_encoder *encoder, enum pipe *pipe); void intel_ddi_enable_transcoder_func(struct intel_encoder *encoder, const struct intel_crtc_state *crtc_state); void intel_ddi_disable_transcoder_func(const struct intel_crtc_state *crtc_state); -void intel_ddi_enable_pipe_clock(struct intel_encoder *encoder, - const struct intel_crtc_state *crtc_state); -void intel_ddi_disable_pipe_clock(const struct intel_crtc_state *crtc_state); +void intel_ddi_enable_transcoder_clock(struct intel_encoder *encoder, + const struct intel_crtc_state *crtc_state); +void intel_ddi_disable_transcoder_clock(const struct intel_crtc_state *crtc_state); void intel_ddi_set_dp_msa(const struct intel_crtc_state *crtc_state, const struct drm_connector_state *conn_state); bool intel_ddi_connector_get_hw_state(struct intel_connector *intel_connector); diff --git a/drivers/gpu/drm/i915/display/intel_display.c b/drivers/gpu/drm/i915/display/intel_display.c index d3994e2a7d63..edbcb1273ca2 100644 --- a/drivers/gpu/drm/i915/display/intel_display.c +++ b/drivers/gpu/drm/i915/display/intel_display.c @@ -55,6 +55,7 @@ #include "i915_reg.h" #include "i915_utils.h" #include "i9xx_plane.h" +#include "i9xx_wm.h" #include "icl_dsi.h" #include "intel_acpi.h" #include "intel_atomic.h" @@ -94,6 +95,7 @@ #include "intel_hotplug.h" #include "intel_hti.h" #include "intel_lvds.h" +#include "intel_lvds_regs.h" #include "intel_modeset_setup.h" #include "intel_modeset_verify.h" #include "intel_overlay.h" @@ -114,8 +116,10 @@ #include "intel_tv.h" #include "intel_vblank.h" #include "intel_vdsc.h" +#include "intel_vdsc_regs.h" #include "intel_vga.h" #include "intel_vrr.h" +#include "intel_wm.h" #include "skl_scaler.h" #include "skl_universal_plane.h" #include "skl_watermark.h" @@ -130,101 +134,6 @@ static void hsw_set_transconf(const struct intel_crtc_state *crtc_state); static void bdw_set_pipemisc(const struct intel_crtc_state *crtc_state); static void ilk_pfit_enable(const struct intel_crtc_state *crtc_state); -/** - * intel_update_watermarks - update FIFO watermark values based on current modes - * @dev_priv: i915 device - * - * Calculate watermark values for the various WM regs based on current mode - * and plane configuration. - * - * There are several cases to deal with here: - * - normal (i.e. non-self-refresh) - * - self-refresh (SR) mode - * - lines are large relative to FIFO size (buffer can hold up to 2) - * - lines are small relative to FIFO size (buffer can hold more than 2 - * lines), so need to account for TLB latency - * - * The normal calculation is: - * watermark = dotclock * bytes per pixel * latency - * where latency is platform & configuration dependent (we assume pessimal - * values here). - * - * The SR calculation is: - * watermark = (trunc(latency/line time)+1) * surface width * - * bytes per pixel - * where - * line time = htotal / dotclock - * surface width = hdisplay for normal plane and 64 for cursor - * and latency is assumed to be high, as above. - * - * The final value programmed to the register should always be rounded up, - * and include an extra 2 entries to account for clock crossings. - * - * We don't use the sprite, so we can ignore that. And on Crestline we have - * to set the non-SR watermarks to 8. - */ -void intel_update_watermarks(struct drm_i915_private *dev_priv) -{ - if (dev_priv->display.funcs.wm->update_wm) - dev_priv->display.funcs.wm->update_wm(dev_priv); -} - -static int intel_compute_pipe_wm(struct intel_atomic_state *state, - struct intel_crtc *crtc) -{ - struct drm_i915_private *dev_priv = to_i915(state->base.dev); - if (dev_priv->display.funcs.wm->compute_pipe_wm) - return dev_priv->display.funcs.wm->compute_pipe_wm(state, crtc); - return 0; -} - -static int intel_compute_intermediate_wm(struct intel_atomic_state *state, - struct intel_crtc *crtc) -{ - struct drm_i915_private *dev_priv = to_i915(state->base.dev); - if (!dev_priv->display.funcs.wm->compute_intermediate_wm) - return 0; - if (drm_WARN_ON(&dev_priv->drm, - !dev_priv->display.funcs.wm->compute_pipe_wm)) - return 0; - return dev_priv->display.funcs.wm->compute_intermediate_wm(state, crtc); -} - -static bool intel_initial_watermarks(struct intel_atomic_state *state, - struct intel_crtc *crtc) -{ - struct drm_i915_private *dev_priv = to_i915(state->base.dev); - if (dev_priv->display.funcs.wm->initial_watermarks) { - dev_priv->display.funcs.wm->initial_watermarks(state, crtc); - return true; - } - return false; -} - -static void intel_atomic_update_watermarks(struct intel_atomic_state *state, - struct intel_crtc *crtc) -{ - struct drm_i915_private *dev_priv = to_i915(state->base.dev); - if (dev_priv->display.funcs.wm->atomic_update_watermarks) - dev_priv->display.funcs.wm->atomic_update_watermarks(state, crtc); -} - -static void intel_optimize_watermarks(struct intel_atomic_state *state, - struct intel_crtc *crtc) -{ - struct drm_i915_private *dev_priv = to_i915(state->base.dev); - if (dev_priv->display.funcs.wm->optimize_watermarks) - dev_priv->display.funcs.wm->optimize_watermarks(state, crtc); -} - -static int intel_compute_global_watermarks(struct intel_atomic_state *state) -{ - struct drm_i915_private *dev_priv = to_i915(state->base.dev); - if (dev_priv->display.funcs.wm->compute_global_watermarks) - return dev_priv->display.funcs.wm->compute_global_watermarks(state); - return 0; -} - /* returns HPLL frequency in kHz */ int vlv_get_hpll_vco(struct drm_i915_private *dev_priv) { @@ -293,11 +202,11 @@ static void skl_wa_827(struct drm_i915_private *dev_priv, enum pipe pipe, bool enable) { if (enable) - intel_de_write(dev_priv, CLKGATE_DIS_PSL(pipe), - intel_de_read(dev_priv, CLKGATE_DIS_PSL(pipe)) | DUPS1_GATING_DIS | DUPS2_GATING_DIS); + intel_de_rmw(dev_priv, CLKGATE_DIS_PSL(pipe), + 0, DUPS1_GATING_DIS | DUPS2_GATING_DIS); else - intel_de_write(dev_priv, CLKGATE_DIS_PSL(pipe), - intel_de_read(dev_priv, CLKGATE_DIS_PSL(pipe)) & ~(DUPS1_GATING_DIS | DUPS2_GATING_DIS)); + intel_de_rmw(dev_priv, CLKGATE_DIS_PSL(pipe), + DUPS1_GATING_DIS | DUPS2_GATING_DIS, 0); } /* Wa_2006604312:icl,ehl */ @@ -306,11 +215,9 @@ icl_wa_scalerclkgating(struct drm_i915_private *dev_priv, enum pipe pipe, bool enable) { if (enable) - intel_de_write(dev_priv, CLKGATE_DIS_PSL(pipe), - intel_de_read(dev_priv, CLKGATE_DIS_PSL(pipe)) | DPFR_GATING_DIS); + intel_de_rmw(dev_priv, CLKGATE_DIS_PSL(pipe), 0, DPFR_GATING_DIS); else - intel_de_write(dev_priv, CLKGATE_DIS_PSL(pipe), - intel_de_read(dev_priv, CLKGATE_DIS_PSL(pipe)) & ~DPFR_GATING_DIS); + intel_de_rmw(dev_priv, CLKGATE_DIS_PSL(pipe), DPFR_GATING_DIS, 0); } /* Wa_1604331009:icl,jsl,ehl */ @@ -395,8 +302,8 @@ intel_wait_for_pipe_off(const struct intel_crtc_state *old_crtc_state) enum transcoder cpu_transcoder = old_crtc_state->cpu_transcoder; /* Wait for the Pipe State to go off */ - if (intel_de_wait_for_clear(dev_priv, PIPECONF(cpu_transcoder), - PIPECONF_STATE_ENABLE, 100)) + if (intel_de_wait_for_clear(dev_priv, TRANSCONF(cpu_transcoder), + TRANSCONF_STATE_ENABLE, 100)) drm_WARN(&dev_priv->drm, 1, "pipe_off wait timed out\n"); } else { intel_wait_for_pipe_scanline_stopped(crtc); @@ -417,8 +324,8 @@ void assert_transcoder(struct drm_i915_private *dev_priv, power_domain = POWER_DOMAIN_TRANSCODER(cpu_transcoder); wakeref = intel_display_power_get_if_enabled(dev_priv, power_domain); if (wakeref) { - u32 val = intel_de_read(dev_priv, PIPECONF(cpu_transcoder)); - cur_state = !!(val & PIPECONF_ENABLE); + u32 val = intel_de_read(dev_priv, TRANSCONF(cpu_transcoder)); + cur_state = !!(val & TRANSCONF_ENABLE); intel_display_power_put(dev_priv, power_domain, wakeref); } else { @@ -530,15 +437,15 @@ void intel_enable_transcoder(const struct intel_crtc_state *new_crtc_state) intel_de_rmw(dev_priv, PIPE_ARB_CTL(pipe), 0, PIPE_ARB_USE_PROG_SLOTS); - reg = PIPECONF(cpu_transcoder); + reg = TRANSCONF(cpu_transcoder); val = intel_de_read(dev_priv, reg); - if (val & PIPECONF_ENABLE) { + if (val & TRANSCONF_ENABLE) { /* we keep both pipes enabled on 830 */ drm_WARN_ON(&dev_priv->drm, !IS_I830(dev_priv)); return; } - intel_de_write(dev_priv, reg, val | PIPECONF_ENABLE); + intel_de_write(dev_priv, reg, val | TRANSCONF_ENABLE); intel_de_posting_read(dev_priv, reg); /* @@ -569,9 +476,9 @@ void intel_disable_transcoder(const struct intel_crtc_state *old_crtc_state) */ assert_planes_disabled(crtc); - reg = PIPECONF(cpu_transcoder); + reg = TRANSCONF(cpu_transcoder); val = intel_de_read(dev_priv, reg); - if ((val & PIPECONF_ENABLE) == 0) + if ((val & TRANSCONF_ENABLE) == 0) return; /* @@ -579,11 +486,11 @@ void intel_disable_transcoder(const struct intel_crtc_state *old_crtc_state) * so best keep it disabled when not needed. */ if (old_crtc_state->double_wide) - val &= ~PIPECONF_DOUBLE_WIDE; + val &= ~TRANSCONF_DOUBLE_WIDE; /* Don't disable pipe or pipe PLLs if needed */ if (!IS_I830(dev_priv)) - val &= ~PIPECONF_ENABLE; + val &= ~TRANSCONF_ENABLE; if (DISPLAY_VER(dev_priv) >= 14) intel_de_rmw(dev_priv, MTL_CHICKEN_TRANS(cpu_transcoder), @@ -593,7 +500,7 @@ void intel_disable_transcoder(const struct intel_crtc_state *old_crtc_state) FECSTALL_DIS_DPTSTREAM_DPTTG, 0); intel_de_write(dev_priv, reg, val); - if ((val & PIPECONF_ENABLE) == 0) + if ((val & TRANSCONF_ENABLE) == 0) intel_wait_for_pipe_off(old_crtc_state); } @@ -1252,7 +1159,8 @@ static void intel_crtc_async_flip_disable_wa(struct intel_atomic_state *state, intel_atomic_get_old_crtc_state(state, crtc); const struct intel_crtc_state *new_crtc_state = intel_atomic_get_new_crtc_state(state, crtc); - u8 update_planes = new_crtc_state->update_planes; + u8 disable_async_flip_planes = old_crtc_state->async_flip_planes & + ~new_crtc_state->async_flip_planes; const struct intel_plane_state *old_plane_state; struct intel_plane *plane; bool need_vbl_wait = false; @@ -1261,7 +1169,7 @@ static void intel_crtc_async_flip_disable_wa(struct intel_atomic_state *state, for_each_old_intel_plane_in_state(state, plane, old_plane_state, i) { if (plane->need_async_flip_disable_wa && plane->pipe == crtc->pipe && - update_planes & BIT(plane->id)) { + disable_async_flip_planes & BIT(plane->id)) { /* * Apart from the async flip bit we want to * preserve the old state for the plane. @@ -1378,7 +1286,7 @@ static void intel_pre_plane_update(struct intel_atomic_state *state, * WA for platforms where async address update enable bit * is double buffered and only latched at start of vblank. */ - if (old_crtc_state->uapi.async_flip && !new_crtc_state->uapi.async_flip) + if (old_crtc_state->async_flip_planes & ~new_crtc_state->async_flip_planes) intel_crtc_async_flip_disable_wa(state, crtc); } @@ -1801,12 +1709,10 @@ static void hsw_set_frame_start_delay(const struct intel_crtc_state *crtc_state) enum transcoder transcoder = crtc_state->cpu_transcoder; i915_reg_t reg = DISPLAY_VER(dev_priv) >= 14 ? MTL_CHICKEN_TRANS(transcoder) : CHICKEN_TRANS(transcoder); - u32 val; - val = intel_de_read(dev_priv, reg); - val &= ~HSW_FRAME_START_DELAY_MASK; - val |= HSW_FRAME_START_DELAY(crtc_state->framestart_delay - 1); - intel_de_write(dev_priv, reg, val); + intel_de_rmw(dev_priv, reg, + HSW_FRAME_START_DELAY_MASK, + HSW_FRAME_START_DELAY(crtc_state->framestart_delay - 1)); } static void icl_ddi_bigjoiner_pre_enable(struct intel_atomic_state *state, @@ -1846,7 +1752,7 @@ static void hsw_configure_cpu_transcoder(const struct intel_crtc_state *crtc_sta intel_set_transcoder_timings(crtc_state); if (cpu_transcoder != TRANSCODER_EDP) - intel_de_write(dev_priv, PIPE_MULT(cpu_transcoder), + intel_de_write(dev_priv, TRANS_MULT(cpu_transcoder), crtc_state->pixel_multiplier - 1); hsw_set_frame_start_delay(crtc_state); @@ -2819,12 +2725,14 @@ static void intel_set_transcoder_timings(const struct intel_crtc_state *crtc_sta enum pipe pipe = crtc->pipe; enum transcoder cpu_transcoder = crtc_state->cpu_transcoder; const struct drm_display_mode *adjusted_mode = &crtc_state->hw.adjusted_mode; - u32 crtc_vtotal, crtc_vblank_end; + u32 crtc_vdisplay, crtc_vtotal, crtc_vblank_start, crtc_vblank_end; int vsyncshift = 0; /* We need to be careful not to changed the adjusted mode, for otherwise * the hw state checker will get angry at the mismatch. */ + crtc_vdisplay = adjusted_mode->crtc_vdisplay; crtc_vtotal = adjusted_mode->crtc_vtotal; + crtc_vblank_start = adjusted_mode->crtc_vblank_start; crtc_vblank_end = adjusted_mode->crtc_vblank_end; if (adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE) { @@ -2841,23 +2749,44 @@ static void intel_set_transcoder_timings(const struct intel_crtc_state *crtc_sta vsyncshift += adjusted_mode->crtc_htotal; } + /* + * VBLANK_START no longer works on ADL+, instead we must use + * TRANS_SET_CONTEXT_LATENCY to configure the pipe vblank start. + */ + if (DISPLAY_VER(dev_priv) >= 13) { + intel_de_write(dev_priv, TRANS_SET_CONTEXT_LATENCY(cpu_transcoder), + crtc_vblank_start - crtc_vdisplay); + + /* + * VBLANK_START not used by hw, just clear it + * to make it stand out in register dumps. + */ + crtc_vblank_start = 1; + } + if (DISPLAY_VER(dev_priv) > 3) - intel_de_write(dev_priv, VSYNCSHIFT(cpu_transcoder), - vsyncshift); - - intel_de_write(dev_priv, HTOTAL(cpu_transcoder), - (adjusted_mode->crtc_hdisplay - 1) | ((adjusted_mode->crtc_htotal - 1) << 16)); - intel_de_write(dev_priv, HBLANK(cpu_transcoder), - (adjusted_mode->crtc_hblank_start - 1) | ((adjusted_mode->crtc_hblank_end - 1) << 16)); - intel_de_write(dev_priv, HSYNC(cpu_transcoder), - (adjusted_mode->crtc_hsync_start - 1) | ((adjusted_mode->crtc_hsync_end - 1) << 16)); - - intel_de_write(dev_priv, VTOTAL(cpu_transcoder), - (adjusted_mode->crtc_vdisplay - 1) | ((crtc_vtotal - 1) << 16)); - intel_de_write(dev_priv, VBLANK(cpu_transcoder), - (adjusted_mode->crtc_vblank_start - 1) | ((crtc_vblank_end - 1) << 16)); - intel_de_write(dev_priv, VSYNC(cpu_transcoder), - (adjusted_mode->crtc_vsync_start - 1) | ((adjusted_mode->crtc_vsync_end - 1) << 16)); + intel_de_write(dev_priv, TRANS_VSYNCSHIFT(cpu_transcoder), + vsyncshift); + + intel_de_write(dev_priv, TRANS_HTOTAL(cpu_transcoder), + HACTIVE(adjusted_mode->crtc_hdisplay - 1) | + HTOTAL(adjusted_mode->crtc_htotal - 1)); + intel_de_write(dev_priv, TRANS_HBLANK(cpu_transcoder), + HBLANK_START(adjusted_mode->crtc_hblank_start - 1) | + HBLANK_END(adjusted_mode->crtc_hblank_end - 1)); + intel_de_write(dev_priv, TRANS_HSYNC(cpu_transcoder), + HSYNC_START(adjusted_mode->crtc_hsync_start - 1) | + HSYNC_END(adjusted_mode->crtc_hsync_end - 1)); + + intel_de_write(dev_priv, TRANS_VTOTAL(cpu_transcoder), + VACTIVE(crtc_vdisplay - 1) | + VTOTAL(crtc_vtotal - 1)); + intel_de_write(dev_priv, TRANS_VBLANK(cpu_transcoder), + VBLANK_START(crtc_vblank_start - 1) | + VBLANK_END(crtc_vblank_end - 1)); + intel_de_write(dev_priv, TRANS_VSYNC(cpu_transcoder), + VSYNC_START(adjusted_mode->crtc_vsync_start - 1) | + VSYNC_END(adjusted_mode->crtc_vsync_end - 1)); /* Workaround: when the EDP input selection is B, the VTOTAL_B must be * programmed with the VTOTAL_EDP value. Same for VTOTAL_C. This is @@ -2865,9 +2794,9 @@ static void intel_set_transcoder_timings(const struct intel_crtc_state *crtc_sta * bits. */ if (IS_HASWELL(dev_priv) && cpu_transcoder == TRANSCODER_EDP && (pipe == PIPE_B || pipe == PIPE_C)) - intel_de_write(dev_priv, VTOTAL(pipe), - intel_de_read(dev_priv, VTOTAL(cpu_transcoder))); - + intel_de_write(dev_priv, TRANS_VTOTAL(pipe), + VACTIVE(crtc_vdisplay - 1) | + VTOTAL(crtc_vtotal - 1)); } static void intel_set_pipe_src_size(const struct intel_crtc_state *crtc_state) @@ -2895,9 +2824,9 @@ static bool intel_pipe_is_interlaced(const struct intel_crtc_state *crtc_state) if (DISPLAY_VER(dev_priv) >= 9 || IS_BROADWELL(dev_priv) || IS_HASWELL(dev_priv)) - return intel_de_read(dev_priv, PIPECONF(cpu_transcoder)) & PIPECONF_INTERLACE_MASK_HSW; + return intel_de_read(dev_priv, TRANSCONF(cpu_transcoder)) & TRANSCONF_INTERLACE_MASK_HSW; else - return intel_de_read(dev_priv, PIPECONF(cpu_transcoder)) & PIPECONF_INTERLACE_MASK; + return intel_de_read(dev_priv, TRANSCONF(cpu_transcoder)) & TRANSCONF_INTERLACE_MASK; } static void intel_get_transcoder_timings(struct intel_crtc *crtc, @@ -2906,43 +2835,47 @@ static void intel_get_transcoder_timings(struct intel_crtc *crtc, struct drm_device *dev = crtc->base.dev; struct drm_i915_private *dev_priv = to_i915(dev); enum transcoder cpu_transcoder = pipe_config->cpu_transcoder; + struct drm_display_mode *adjusted_mode = &pipe_config->hw.adjusted_mode; u32 tmp; - tmp = intel_de_read(dev_priv, HTOTAL(cpu_transcoder)); - pipe_config->hw.adjusted_mode.crtc_hdisplay = (tmp & 0xffff) + 1; - pipe_config->hw.adjusted_mode.crtc_htotal = ((tmp >> 16) & 0xffff) + 1; + tmp = intel_de_read(dev_priv, TRANS_HTOTAL(cpu_transcoder)); + adjusted_mode->crtc_hdisplay = REG_FIELD_GET(HACTIVE_MASK, tmp) + 1; + adjusted_mode->crtc_htotal = REG_FIELD_GET(HTOTAL_MASK, tmp) + 1; if (!transcoder_is_dsi(cpu_transcoder)) { - tmp = intel_de_read(dev_priv, HBLANK(cpu_transcoder)); - pipe_config->hw.adjusted_mode.crtc_hblank_start = - (tmp & 0xffff) + 1; - pipe_config->hw.adjusted_mode.crtc_hblank_end = - ((tmp >> 16) & 0xffff) + 1; + tmp = intel_de_read(dev_priv, TRANS_HBLANK(cpu_transcoder)); + adjusted_mode->crtc_hblank_start = REG_FIELD_GET(HBLANK_START_MASK, tmp) + 1; + adjusted_mode->crtc_hblank_end = REG_FIELD_GET(HBLANK_END_MASK, tmp) + 1; } - tmp = intel_de_read(dev_priv, HSYNC(cpu_transcoder)); - pipe_config->hw.adjusted_mode.crtc_hsync_start = (tmp & 0xffff) + 1; - pipe_config->hw.adjusted_mode.crtc_hsync_end = ((tmp >> 16) & 0xffff) + 1; - tmp = intel_de_read(dev_priv, VTOTAL(cpu_transcoder)); - pipe_config->hw.adjusted_mode.crtc_vdisplay = (tmp & 0xffff) + 1; - pipe_config->hw.adjusted_mode.crtc_vtotal = ((tmp >> 16) & 0xffff) + 1; + tmp = intel_de_read(dev_priv, TRANS_HSYNC(cpu_transcoder)); + adjusted_mode->crtc_hsync_start = REG_FIELD_GET(HSYNC_START_MASK, tmp) + 1; + adjusted_mode->crtc_hsync_end = REG_FIELD_GET(HSYNC_END_MASK, tmp) + 1; + + tmp = intel_de_read(dev_priv, TRANS_VTOTAL(cpu_transcoder)); + adjusted_mode->crtc_vdisplay = REG_FIELD_GET(VACTIVE_MASK, tmp) + 1; + adjusted_mode->crtc_vtotal = REG_FIELD_GET(VTOTAL_MASK, tmp) + 1; + /* FIXME TGL+ DSI transcoders have this! */ if (!transcoder_is_dsi(cpu_transcoder)) { - tmp = intel_de_read(dev_priv, VBLANK(cpu_transcoder)); - pipe_config->hw.adjusted_mode.crtc_vblank_start = - (tmp & 0xffff) + 1; - pipe_config->hw.adjusted_mode.crtc_vblank_end = - ((tmp >> 16) & 0xffff) + 1; + tmp = intel_de_read(dev_priv, TRANS_VBLANK(cpu_transcoder)); + adjusted_mode->crtc_vblank_start = REG_FIELD_GET(VBLANK_START_MASK, tmp) + 1; + adjusted_mode->crtc_vblank_end = REG_FIELD_GET(VBLANK_END_MASK, tmp) + 1; } - tmp = intel_de_read(dev_priv, VSYNC(cpu_transcoder)); - pipe_config->hw.adjusted_mode.crtc_vsync_start = (tmp & 0xffff) + 1; - pipe_config->hw.adjusted_mode.crtc_vsync_end = ((tmp >> 16) & 0xffff) + 1; + tmp = intel_de_read(dev_priv, TRANS_VSYNC(cpu_transcoder)); + adjusted_mode->crtc_vsync_start = REG_FIELD_GET(VSYNC_START_MASK, tmp) + 1; + adjusted_mode->crtc_vsync_end = REG_FIELD_GET(VSYNC_END_MASK, tmp) + 1; if (intel_pipe_is_interlaced(pipe_config)) { - pipe_config->hw.adjusted_mode.flags |= DRM_MODE_FLAG_INTERLACE; - pipe_config->hw.adjusted_mode.crtc_vtotal += 1; - pipe_config->hw.adjusted_mode.crtc_vblank_end += 1; + adjusted_mode->flags |= DRM_MODE_FLAG_INTERLACE; + adjusted_mode->crtc_vtotal += 1; + adjusted_mode->crtc_vblank_end += 1; } + + if (DISPLAY_VER(dev_priv) >= 13 && !transcoder_is_dsi(cpu_transcoder)) + adjusted_mode->crtc_vblank_start = + adjusted_mode->crtc_vdisplay + + intel_de_read(dev_priv, TRANS_SET_CONTEXT_LATENCY(cpu_transcoder)); } static void intel_bigjoiner_adjust_pipe_src(struct intel_crtc_state *crtc_state) @@ -2982,7 +2915,8 @@ void i9xx_set_pipeconf(const struct intel_crtc_state *crtc_state) { struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); - u32 pipeconf = 0; + enum transcoder cpu_transcoder = crtc_state->cpu_transcoder; + u32 val = 0; /* * - We keep both pipes enabled on 830 @@ -2990,18 +2924,18 @@ void i9xx_set_pipeconf(const struct intel_crtc_state *crtc_state) * - During fastset the pipe is already enabled and must remain so */ if (IS_I830(dev_priv) || !intel_crtc_needs_modeset(crtc_state)) - pipeconf |= PIPECONF_ENABLE; + val |= TRANSCONF_ENABLE; if (crtc_state->double_wide) - pipeconf |= PIPECONF_DOUBLE_WIDE; + val |= TRANSCONF_DOUBLE_WIDE; /* only g4x and later have fancy bpc/dither controls */ if (IS_G4X(dev_priv) || IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) { /* Bspec claims that we can't use dithering for 30bpp pipes. */ if (crtc_state->dither && crtc_state->pipe_bpp != 30) - pipeconf |= PIPECONF_DITHER_EN | - PIPECONF_DITHER_TYPE_SP; + val |= TRANSCONF_DITHER_EN | + TRANSCONF_DITHER_TYPE_SP; switch (crtc_state->pipe_bpp) { default: @@ -3009,13 +2943,13 @@ void i9xx_set_pipeconf(const struct intel_crtc_state *crtc_state) MISSING_CASE(crtc_state->pipe_bpp); fallthrough; case 18: - pipeconf |= PIPECONF_BPC_6; + val |= TRANSCONF_BPC_6; break; case 24: - pipeconf |= PIPECONF_BPC_8; + val |= TRANSCONF_BPC_8; break; case 30: - pipeconf |= PIPECONF_BPC_10; + val |= TRANSCONF_BPC_10; break; } } @@ -3023,23 +2957,23 @@ void i9xx_set_pipeconf(const struct intel_crtc_state *crtc_state) if (crtc_state->hw.adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE) { if (DISPLAY_VER(dev_priv) < 4 || intel_crtc_has_type(crtc_state, INTEL_OUTPUT_SDVO)) - pipeconf |= PIPECONF_INTERLACE_W_FIELD_INDICATION; + val |= TRANSCONF_INTERLACE_W_FIELD_INDICATION; else - pipeconf |= PIPECONF_INTERLACE_W_SYNC_SHIFT; + val |= TRANSCONF_INTERLACE_W_SYNC_SHIFT; } else { - pipeconf |= PIPECONF_INTERLACE_PROGRESSIVE; + val |= TRANSCONF_INTERLACE_PROGRESSIVE; } if ((IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) && crtc_state->limited_color_range) - pipeconf |= PIPECONF_COLOR_RANGE_SELECT; + val |= TRANSCONF_COLOR_RANGE_SELECT; - pipeconf |= PIPECONF_GAMMA_MODE(crtc_state->gamma_mode); + val |= TRANSCONF_GAMMA_MODE(crtc_state->gamma_mode); - pipeconf |= PIPECONF_FRAME_START_DELAY(crtc_state->framestart_delay - 1); + val |= TRANSCONF_FRAME_START_DELAY(crtc_state->framestart_delay - 1); - intel_de_write(dev_priv, PIPECONF(crtc->pipe), pipeconf); - intel_de_posting_read(dev_priv, PIPECONF(crtc->pipe)); + intel_de_write(dev_priv, TRANSCONF(cpu_transcoder), val); + intel_de_posting_read(dev_priv, TRANSCONF(cpu_transcoder)); } static bool i9xx_has_pfit(struct drm_i915_private *dev_priv) @@ -3198,20 +3132,20 @@ static bool i9xx_get_pipe_config(struct intel_crtc *crtc, ret = false; - tmp = intel_de_read(dev_priv, PIPECONF(crtc->pipe)); - if (!(tmp & PIPECONF_ENABLE)) + tmp = intel_de_read(dev_priv, TRANSCONF(pipe_config->cpu_transcoder)); + if (!(tmp & TRANSCONF_ENABLE)) goto out; if (IS_G4X(dev_priv) || IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) { - switch (tmp & PIPECONF_BPC_MASK) { - case PIPECONF_BPC_6: + switch (tmp & TRANSCONF_BPC_MASK) { + case TRANSCONF_BPC_6: pipe_config->pipe_bpp = 18; break; - case PIPECONF_BPC_8: + case TRANSCONF_BPC_8: pipe_config->pipe_bpp = 24; break; - case PIPECONF_BPC_10: + case TRANSCONF_BPC_10: pipe_config->pipe_bpp = 30; break; default: @@ -3221,12 +3155,12 @@ static bool i9xx_get_pipe_config(struct intel_crtc *crtc, } if ((IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) && - (tmp & PIPECONF_COLOR_RANGE_SELECT)) + (tmp & TRANSCONF_COLOR_RANGE_SELECT)) pipe_config->limited_color_range = true; - pipe_config->gamma_mode = REG_FIELD_GET(PIPECONF_GAMMA_MODE_MASK_I9XX, tmp); + pipe_config->gamma_mode = REG_FIELD_GET(TRANSCONF_GAMMA_MODE_MASK_I9XX, tmp); - pipe_config->framestart_delay = REG_FIELD_GET(PIPECONF_FRAME_START_DELAY_MASK, tmp) + 1; + pipe_config->framestart_delay = REG_FIELD_GET(TRANSCONF_FRAME_START_DELAY_MASK, tmp) + 1; if (IS_CHERRYVIEW(dev_priv)) pipe_config->cgm_mode = intel_de_read(dev_priv, @@ -3236,7 +3170,7 @@ static bool i9xx_get_pipe_config(struct intel_crtc *crtc, intel_color_get_config(pipe_config); if (DISPLAY_VER(dev_priv) < 4) - pipe_config->double_wide = tmp & PIPECONF_DOUBLE_WIDE; + pipe_config->double_wide = tmp & TRANSCONF_DOUBLE_WIDE; intel_get_transcoder_timings(crtc, pipe_config); intel_get_pipe_src_size(crtc, pipe_config); @@ -3306,7 +3240,7 @@ void ilk_set_pipeconf(const struct intel_crtc_state *crtc_state) { struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); - enum pipe pipe = crtc->pipe; + enum transcoder cpu_transcoder = crtc_state->cpu_transcoder; u32 val = 0; /* @@ -3314,7 +3248,7 @@ void ilk_set_pipeconf(const struct intel_crtc_state *crtc_state) * - During fastset the pipe is already enabled and must remain so */ if (!intel_crtc_needs_modeset(crtc_state)) - val |= PIPECONF_ENABLE; + val |= TRANSCONF_ENABLE; switch (crtc_state->pipe_bpp) { default: @@ -3322,26 +3256,26 @@ void ilk_set_pipeconf(const struct intel_crtc_state *crtc_state) MISSING_CASE(crtc_state->pipe_bpp); fallthrough; case 18: - val |= PIPECONF_BPC_6; + val |= TRANSCONF_BPC_6; break; case 24: - val |= PIPECONF_BPC_8; + val |= TRANSCONF_BPC_8; break; case 30: - val |= PIPECONF_BPC_10; + val |= TRANSCONF_BPC_10; break; case 36: - val |= PIPECONF_BPC_12; + val |= TRANSCONF_BPC_12; break; } if (crtc_state->dither) - val |= PIPECONF_DITHER_EN | PIPECONF_DITHER_TYPE_SP; + val |= TRANSCONF_DITHER_EN | TRANSCONF_DITHER_TYPE_SP; if (crtc_state->hw.adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE) - val |= PIPECONF_INTERLACE_IF_ID_ILK; + val |= TRANSCONF_INTERLACE_IF_ID_ILK; else - val |= PIPECONF_INTERLACE_PF_PD_ILK; + val |= TRANSCONF_INTERLACE_PF_PD_ILK; /* * This would end up with an odd purple hue over @@ -3352,18 +3286,18 @@ void ilk_set_pipeconf(const struct intel_crtc_state *crtc_state) if (crtc_state->limited_color_range && !intel_crtc_has_type(crtc_state, INTEL_OUTPUT_SDVO)) - val |= PIPECONF_COLOR_RANGE_SELECT; + val |= TRANSCONF_COLOR_RANGE_SELECT; if (crtc_state->output_format != INTEL_OUTPUT_FORMAT_RGB) - val |= PIPECONF_OUTPUT_COLORSPACE_YUV709; + val |= TRANSCONF_OUTPUT_COLORSPACE_YUV709; - val |= PIPECONF_GAMMA_MODE(crtc_state->gamma_mode); + val |= TRANSCONF_GAMMA_MODE(crtc_state->gamma_mode); - val |= PIPECONF_FRAME_START_DELAY(crtc_state->framestart_delay - 1); - val |= PIPECONF_MSA_TIMING_DELAY(crtc_state->msa_timing_delay); + val |= TRANSCONF_FRAME_START_DELAY(crtc_state->framestart_delay - 1); + val |= TRANSCONF_MSA_TIMING_DELAY(crtc_state->msa_timing_delay); - intel_de_write(dev_priv, PIPECONF(pipe), val); - intel_de_posting_read(dev_priv, PIPECONF(pipe)); + intel_de_write(dev_priv, TRANSCONF(cpu_transcoder), val); + intel_de_posting_read(dev_priv, TRANSCONF(cpu_transcoder)); } static void hsw_set_transconf(const struct intel_crtc_state *crtc_state) @@ -3378,22 +3312,22 @@ static void hsw_set_transconf(const struct intel_crtc_state *crtc_state) * - During fastset the pipe is already enabled and must remain so */ if (!intel_crtc_needs_modeset(crtc_state)) - val |= PIPECONF_ENABLE; + val |= TRANSCONF_ENABLE; if (IS_HASWELL(dev_priv) && crtc_state->dither) - val |= PIPECONF_DITHER_EN | PIPECONF_DITHER_TYPE_SP; + val |= TRANSCONF_DITHER_EN | TRANSCONF_DITHER_TYPE_SP; if (crtc_state->hw.adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE) - val |= PIPECONF_INTERLACE_IF_ID_ILK; + val |= TRANSCONF_INTERLACE_IF_ID_ILK; else - val |= PIPECONF_INTERLACE_PF_PD_ILK; + val |= TRANSCONF_INTERLACE_PF_PD_ILK; if (IS_HASWELL(dev_priv) && crtc_state->output_format != INTEL_OUTPUT_FORMAT_RGB) - val |= PIPECONF_OUTPUT_COLORSPACE_YUV_HSW; + val |= TRANSCONF_OUTPUT_COLORSPACE_YUV_HSW; - intel_de_write(dev_priv, PIPECONF(cpu_transcoder), val); - intel_de_posting_read(dev_priv, PIPECONF(cpu_transcoder)); + intel_de_write(dev_priv, TRANSCONF(cpu_transcoder), val); + intel_de_posting_read(dev_priv, TRANSCONF(cpu_transcoder)); } static void bdw_set_pipemisc(const struct intel_crtc_state *crtc_state) @@ -3618,33 +3552,33 @@ static bool ilk_get_pipe_config(struct intel_crtc *crtc, pipe_config->shared_dpll = NULL; ret = false; - tmp = intel_de_read(dev_priv, PIPECONF(crtc->pipe)); - if (!(tmp & PIPECONF_ENABLE)) + tmp = intel_de_read(dev_priv, TRANSCONF(pipe_config->cpu_transcoder)); + if (!(tmp & TRANSCONF_ENABLE)) goto out; - switch (tmp & PIPECONF_BPC_MASK) { - case PIPECONF_BPC_6: + switch (tmp & TRANSCONF_BPC_MASK) { + case TRANSCONF_BPC_6: pipe_config->pipe_bpp = 18; break; - case PIPECONF_BPC_8: + case TRANSCONF_BPC_8: pipe_config->pipe_bpp = 24; break; - case PIPECONF_BPC_10: + case TRANSCONF_BPC_10: pipe_config->pipe_bpp = 30; break; - case PIPECONF_BPC_12: + case TRANSCONF_BPC_12: pipe_config->pipe_bpp = 36; break; default: break; } - if (tmp & PIPECONF_COLOR_RANGE_SELECT) + if (tmp & TRANSCONF_COLOR_RANGE_SELECT) pipe_config->limited_color_range = true; - switch (tmp & PIPECONF_OUTPUT_COLORSPACE_MASK) { - case PIPECONF_OUTPUT_COLORSPACE_YUV601: - case PIPECONF_OUTPUT_COLORSPACE_YUV709: + switch (tmp & TRANSCONF_OUTPUT_COLORSPACE_MASK) { + case TRANSCONF_OUTPUT_COLORSPACE_YUV601: + case TRANSCONF_OUTPUT_COLORSPACE_YUV709: pipe_config->output_format = INTEL_OUTPUT_FORMAT_YCBCR444; break; default: @@ -3652,11 +3586,11 @@ static bool ilk_get_pipe_config(struct intel_crtc *crtc, break; } - pipe_config->gamma_mode = REG_FIELD_GET(PIPECONF_GAMMA_MODE_MASK_ILK, tmp); + pipe_config->gamma_mode = REG_FIELD_GET(TRANSCONF_GAMMA_MODE_MASK_ILK, tmp); - pipe_config->framestart_delay = REG_FIELD_GET(PIPECONF_FRAME_START_DELAY_MASK, tmp) + 1; + pipe_config->framestart_delay = REG_FIELD_GET(TRANSCONF_FRAME_START_DELAY_MASK, tmp) + 1; - pipe_config->msa_timing_delay = REG_FIELD_GET(PIPECONF_MSA_TIMING_DELAY_MASK, tmp); + pipe_config->msa_timing_delay = REG_FIELD_GET(TRANSCONF_MSA_TIMING_DELAY_MASK, tmp); pipe_config->csc_mode = intel_de_read(dev_priv, PIPE_CSC_MODE(crtc->pipe)); @@ -3933,9 +3867,9 @@ static bool hsw_get_transcoder_state(struct intel_crtc *crtc, pipe_config->pch_pfit.force_thru = true; } - tmp = intel_de_read(dev_priv, PIPECONF(pipe_config->cpu_transcoder)); + tmp = intel_de_read(dev_priv, TRANSCONF(pipe_config->cpu_transcoder)); - return tmp & PIPECONF_ENABLE; + return tmp & TRANSCONF_ENABLE; } static bool bxt_get_dsi_transcoder_state(struct intel_crtc *crtc, @@ -4039,9 +3973,9 @@ static bool hsw_get_pipe_config(struct intel_crtc *crtc, if (IS_HASWELL(dev_priv)) { u32 tmp = intel_de_read(dev_priv, - PIPECONF(pipe_config->cpu_transcoder)); + TRANSCONF(pipe_config->cpu_transcoder)); - if (tmp & PIPECONF_OUTPUT_COLORSPACE_YUV_HSW) + if (tmp & TRANSCONF_OUTPUT_COLORSPACE_YUV_HSW) pipe_config->output_format = INTEL_OUTPUT_FORMAT_YCBCR444; else pipe_config->output_format = INTEL_OUTPUT_FORMAT_RGB; @@ -4090,7 +4024,7 @@ static bool hsw_get_pipe_config(struct intel_crtc *crtc, !transcoder_is_dsi(pipe_config->cpu_transcoder)) { pipe_config->pixel_multiplier = intel_de_read(dev_priv, - PIPE_MULT(pipe_config->cpu_transcoder)) + 1; + TRANS_MULT(pipe_config->cpu_transcoder)) + 1; } else { pipe_config->pixel_multiplier = 1; } @@ -5439,6 +5373,20 @@ pipe_config_dp_vsc_sdp_mismatch(struct drm_i915_private *dev_priv, } } +/* Returns the length up to and including the last differing byte */ +static size_t +memcmp_diff_len(const u8 *a, const u8 *b, size_t len) +{ + int i; + + for (i = len - 1; i >= 0; i--) { + if (a[i] != b[i]) + return i + 1; + } + + return 0; +} + static void pipe_config_buffer_mismatch(struct drm_i915_private *dev_priv, bool fastset, const char *name, @@ -5448,6 +5396,9 @@ pipe_config_buffer_mismatch(struct drm_i915_private *dev_priv, if (!drm_debug_enabled(DRM_UT_KMS)) return; + /* only dump up to the last difference */ + len = memcmp_diff_len(a, b, len); + drm_dbg_kms(&dev_priv->drm, "fastset mismatch in %s buffer\n", name); print_hex_dump(KERN_DEBUG, "expected: ", DUMP_PREFIX_NONE, @@ -5455,6 +5406,9 @@ pipe_config_buffer_mismatch(struct drm_i915_private *dev_priv, print_hex_dump(KERN_DEBUG, "found: ", DUMP_PREFIX_NONE, 16, 0, b, len, false); } else { + /* only dump up to the last difference */ + len = memcmp_diff_len(a, b, len); + drm_err(&dev_priv->drm, "mismatch in %s buffer\n", name); print_hex_dump(KERN_ERR, "expected: ", DUMP_PREFIX_NONE, 16, 0, a, len, false); @@ -5943,6 +5897,8 @@ int intel_modeset_all_pipes(struct intel_atomic_state *state, return ret; crtc_state->update_planes |= crtc_state->active_planes; + crtc_state->async_flip_planes = 0; + crtc_state->do_async_flip = false; } return 0; @@ -6695,8 +6651,8 @@ static int intel_bigjoiner_add_affected_crtcs(struct intel_atomic_state *state) * @dev: drm device * @_state: state to validate */ -static int intel_atomic_check(struct drm_device *dev, - struct drm_atomic_state *_state) +int intel_atomic_check(struct drm_device *dev, + struct drm_atomic_state *_state) { struct drm_i915_private *dev_priv = to_i915(dev); struct intel_atomic_state *state = to_intel_atomic_state(_state); @@ -8356,124 +8312,6 @@ void intel_modeset_init_hw(struct drm_i915_private *i915) cdclk_state->logical = cdclk_state->actual = i915->display.cdclk.hw; } -static int sanitize_watermarks_add_affected(struct drm_atomic_state *state) -{ - struct drm_plane *plane; - struct intel_crtc *crtc; - - for_each_intel_crtc(state->dev, crtc) { - struct intel_crtc_state *crtc_state; - - crtc_state = intel_atomic_get_crtc_state(state, crtc); - if (IS_ERR(crtc_state)) - return PTR_ERR(crtc_state); - - if (crtc_state->hw.active) { - /* - * Preserve the inherited flag to avoid - * taking the full modeset path. - */ - crtc_state->inherited = true; - } - } - - drm_for_each_plane(plane, state->dev) { - struct drm_plane_state *plane_state; - - plane_state = drm_atomic_get_plane_state(state, plane); - if (IS_ERR(plane_state)) - return PTR_ERR(plane_state); - } - - return 0; -} - -/* - * Calculate what we think the watermarks should be for the state we've read - * out of the hardware and then immediately program those watermarks so that - * we ensure the hardware settings match our internal state. - * - * We can calculate what we think WM's should be by creating a duplicate of the - * current state (which was constructed during hardware readout) and running it - * through the atomic check code to calculate new watermark values in the - * state object. - */ -static void sanitize_watermarks(struct drm_i915_private *dev_priv) -{ - struct drm_atomic_state *state; - struct intel_atomic_state *intel_state; - struct intel_crtc *crtc; - struct intel_crtc_state *crtc_state; - struct drm_modeset_acquire_ctx ctx; - int ret; - int i; - - /* Only supported on platforms that use atomic watermark design */ - if (!dev_priv->display.funcs.wm->optimize_watermarks) - return; - - state = drm_atomic_state_alloc(&dev_priv->drm); - if (drm_WARN_ON(&dev_priv->drm, !state)) - return; - - intel_state = to_intel_atomic_state(state); - - drm_modeset_acquire_init(&ctx, 0); - -retry: - state->acquire_ctx = &ctx; - - /* - * Hardware readout is the only time we don't want to calculate - * intermediate watermarks (since we don't trust the current - * watermarks). - */ - if (!HAS_GMCH(dev_priv)) - intel_state->skip_intermediate_wm = true; - - ret = sanitize_watermarks_add_affected(state); - if (ret) - goto fail; - - ret = intel_atomic_check(&dev_priv->drm, state); - if (ret) - goto fail; - - /* Write calculated watermark values back */ - for_each_new_intel_crtc_in_state(intel_state, crtc, crtc_state, i) { - crtc_state->wm.need_postvbl_update = true; - intel_optimize_watermarks(intel_state, crtc); - - to_intel_crtc_state(crtc->base.state)->wm = crtc_state->wm; - } - -fail: - if (ret == -EDEADLK) { - drm_atomic_state_clear(state); - drm_modeset_backoff(&ctx); - goto retry; - } - - /* - * If we fail here, it means that the hardware appears to be - * programmed in a way that shouldn't be possible, given our - * understanding of watermark requirements. This might mean a - * mistake in the hardware readout code or a mistake in the - * watermark calculations for a given platform. Raise a WARN - * so that this is noticeable. - * - * If this actually happens, we'll have to just leave the - * BIOS-programmed watermarks untouched and hope for the best. - */ - drm_WARN(&dev_priv->drm, ret, - "Could not determine valid watermarks for inherited state\n"); - - drm_atomic_state_put(state); - - drm_modeset_drop_locks(&ctx); - drm_modeset_acquire_fini(&ctx); -} - static int intel_initial_commit(struct drm_device *dev) { struct drm_atomic_state *state = NULL; @@ -8634,12 +8472,16 @@ int intel_modeset_init_noirq(struct drm_i915_private *i915) goto cleanup_bios; /* FIXME: completely on the wrong abstraction layer */ + ret = intel_power_domains_init(i915); + if (ret < 0) + goto cleanup_vga; + intel_power_domains_init_hw(i915, false); if (!HAS_DISPLAY(i915)) return 0; - intel_dmc_ucode_init(i915); + intel_dmc_init(i915); i915->display.wq.modeset = alloc_ordered_workqueue("i915_modeset", 0); i915->display.wq.flip = alloc_workqueue("i915_flip", WQ_HIGHPRI | @@ -8674,8 +8516,9 @@ int intel_modeset_init_noirq(struct drm_i915_private *i915) return 0; cleanup_vga_client_pw_domain_dmc: - intel_dmc_ucode_fini(i915); + intel_dmc_fini(i915); intel_power_domains_driver_remove(i915); +cleanup_vga: intel_vga_unregister(i915); cleanup_bios: intel_bios_driver_remove(i915); @@ -8694,7 +8537,7 @@ int intel_modeset_init_nogem(struct drm_i915_private *i915) if (!HAS_DISPLAY(i915)) return 0; - intel_init_pm(i915); + intel_wm_init(i915); intel_panel_sanitize_ssc(i915); @@ -8750,7 +8593,7 @@ int intel_modeset_init_nogem(struct drm_i915_private *i915) * since the watermark calculation done here will use pstate->fb. */ if (!HAS_GMCH(i915)) - sanitize_watermarks(i915); + ilk_wm_sanitize(i915); return 0; } @@ -8791,6 +8634,7 @@ int intel_modeset_init(struct drm_i915_private *i915) void i830_enable_pipe(struct drm_i915_private *dev_priv, enum pipe pipe) { struct intel_crtc *crtc = intel_crtc_for_pipe(dev_priv, pipe); + enum transcoder cpu_transcoder = (enum transcoder)pipe; /* 640x480@60Hz, ~25175 kHz */ struct dpll clock = { .m1 = 18, @@ -8817,13 +8661,20 @@ void i830_enable_pipe(struct drm_i915_private *dev_priv, enum pipe pipe) PLL_REF_INPUT_DREFCLK | DPLL_VCO_ENABLE; - intel_de_write(dev_priv, HTOTAL(pipe), (640 - 1) | ((800 - 1) << 16)); - intel_de_write(dev_priv, HBLANK(pipe), (640 - 1) | ((800 - 1) << 16)); - intel_de_write(dev_priv, HSYNC(pipe), (656 - 1) | ((752 - 1) << 16)); - intel_de_write(dev_priv, VTOTAL(pipe), (480 - 1) | ((525 - 1) << 16)); - intel_de_write(dev_priv, VBLANK(pipe), (480 - 1) | ((525 - 1) << 16)); - intel_de_write(dev_priv, VSYNC(pipe), (490 - 1) | ((492 - 1) << 16)); - intel_de_write(dev_priv, PIPESRC(pipe), ((640 - 1) << 16) | (480 - 1)); + intel_de_write(dev_priv, TRANS_HTOTAL(cpu_transcoder), + HACTIVE(640 - 1) | HTOTAL(800 - 1)); + intel_de_write(dev_priv, TRANS_HBLANK(cpu_transcoder), + HBLANK_START(640 - 1) | HBLANK_END(800 - 1)); + intel_de_write(dev_priv, TRANS_HSYNC(cpu_transcoder), + HSYNC_START(656 - 1) | HSYNC_END(752 - 1)); + intel_de_write(dev_priv, TRANS_VTOTAL(cpu_transcoder), + VACTIVE(480 - 1) | VTOTAL(525 - 1)); + intel_de_write(dev_priv, TRANS_VBLANK(cpu_transcoder), + VBLANK_START(480 - 1) | VBLANK_END(525 - 1)); + intel_de_write(dev_priv, TRANS_VSYNC(cpu_transcoder), + VSYNC_START(490 - 1) | VSYNC_END(492 - 1)); + intel_de_write(dev_priv, PIPESRC(pipe), + PIPESRC_WIDTH(640 - 1) | PIPESRC_HEIGHT(480 - 1)); intel_de_write(dev_priv, FP0(pipe), fp); intel_de_write(dev_priv, FP1(pipe), fp); @@ -8854,8 +8705,8 @@ void i830_enable_pipe(struct drm_i915_private *dev_priv, enum pipe pipe) udelay(150); /* wait for warmup */ } - intel_de_write(dev_priv, PIPECONF(pipe), PIPECONF_ENABLE); - intel_de_posting_read(dev_priv, PIPECONF(pipe)); + intel_de_write(dev_priv, TRANSCONF(pipe), TRANSCONF_ENABLE); + intel_de_posting_read(dev_priv, TRANSCONF(pipe)); intel_wait_for_pipe_scanline_moving(crtc); } @@ -8878,8 +8729,8 @@ void i830_disable_pipe(struct drm_i915_private *dev_priv, enum pipe pipe) drm_WARN_ON(&dev_priv->drm, intel_de_read(dev_priv, CURCNTR(PIPE_B)) & MCURSOR_MODE_MASK); - intel_de_write(dev_priv, PIPECONF(pipe), 0); - intel_de_posting_read(dev_priv, PIPECONF(pipe)); + intel_de_write(dev_priv, TRANSCONF(pipe), 0); + intel_de_posting_read(dev_priv, TRANSCONF(pipe)); intel_wait_for_pipe_scanline_stopped(crtc); @@ -9000,7 +8851,7 @@ void intel_modeset_driver_remove_noirq(struct drm_i915_private *i915) /* part #3: call after gem init */ void intel_modeset_driver_remove_nogem(struct drm_i915_private *i915) { - intel_dmc_ucode_fini(i915); + intel_dmc_fini(i915); intel_power_domains_driver_remove(i915); @@ -9051,7 +8902,7 @@ void intel_display_driver_register(struct drm_i915_private *i915) * enabled. We do it last so that the async config cannot run * before the connectors are registered. */ - intel_fbdev_initial_config_async(&i915->drm); + intel_fbdev_initial_config_async(i915); /* * We need to coordinate the hotplugs with the asynchronous diff --git a/drivers/gpu/drm/i915/display/intel_display.h b/drivers/gpu/drm/i915/display/intel_display.h index cb6f520cc575..50285fb4fcf5 100644 --- a/drivers/gpu/drm/i915/display/intel_display.h +++ b/drivers/gpu/drm/i915/display/intel_display.h @@ -32,6 +32,7 @@ enum drm_scaling_filter; struct dpll; +struct drm_atomic_state; struct drm_connector; struct drm_device; struct drm_display_mode; @@ -171,6 +172,8 @@ enum tc_port_mode { }; enum aux_ch { + AUX_CH_NONE = -1, + AUX_CH_A, AUX_CH_B, AUX_CH_C, @@ -394,6 +397,7 @@ enum phy_fia { ((connector) = to_intel_connector((__state)->base.connectors[__i].ptr), \ (new_connector_state) = to_intel_digital_connector_state((__state)->base.connectors[__i].new_state), 1)) +int intel_atomic_check(struct drm_device *dev, struct drm_atomic_state *state); int intel_atomic_add_affected_planes(struct intel_atomic_state *state, struct intel_crtc *crtc); u8 intel_calc_active_pipes(struct intel_atomic_state *state, diff --git a/drivers/gpu/drm/i915/display/intel_display_core.h b/drivers/gpu/drm/i915/display/intel_display_core.h index fb8670aa2932..fdab7bb93a7d 100644 --- a/drivers/gpu/drm/i915/display/intel_display_core.h +++ b/drivers/gpu/drm/i915/display/intel_display_core.h @@ -19,13 +19,12 @@ #include "intel_cdclk.h" #include "intel_display_limits.h" #include "intel_display_power.h" -#include "intel_dmc.h" #include "intel_dpll_mgr.h" #include "intel_fbc.h" #include "intel_global_state.h" #include "intel_gmbus.h" #include "intel_opregion.h" -#include "intel_pm_types.h" +#include "intel_wm_types.h" struct drm_i915_private; struct drm_property; @@ -40,6 +39,7 @@ struct intel_cdclk_vals; struct intel_color_funcs; struct intel_crtc; struct intel_crtc_state; +struct intel_dmc; struct intel_dpll_funcs; struct intel_dpll_mgr; struct intel_fbdev; @@ -85,6 +85,7 @@ struct intel_wm_funcs { void (*optimize_watermarks)(struct intel_atomic_state *state, struct intel_crtc *crtc); int (*compute_global_watermarks)(struct intel_atomic_state *state); + void (*get_hw_state)(struct drm_i915_private *i915); }; struct intel_audio_state { @@ -102,7 +103,7 @@ struct intel_audio { u32 freq_cntrl; /* current audio state for the audio component hooks */ - struct intel_audio_state state[I915_MAX_PIPES]; + struct intel_audio_state state[I915_MAX_TRANSCODERS]; /* necessary resource sharing with HDMI LPE audio driver. */ struct { @@ -243,7 +244,7 @@ struct intel_wm { struct g4x_wm_values g4x; }; - u8 max_level; + u8 num_levels; /* * Should be held around atomic WM register writing; also @@ -340,6 +341,11 @@ struct intel_display { } dkl; struct { + struct intel_dmc *dmc; + intel_wakeref_t wakeref; + } dmc; + + struct { /* VLV/CHV/BXT/GLK DSI MMIO register base address */ u32 mmio_base; } dsi; @@ -466,7 +472,6 @@ struct intel_display { /* Grouping using named structs. Keep sorted. */ struct intel_audio audio; - struct intel_dmc dmc; struct intel_dpll dpll; struct intel_fbc *fbc[I915_MAX_FBCS]; struct intel_frontbuffer_tracking fb_tracking; diff --git a/drivers/gpu/drm/i915/display/intel_display_debugfs.c b/drivers/gpu/drm/i915/display/intel_display_debugfs.c index 7bcd90384a46..1e654ddd0815 100644 --- a/drivers/gpu/drm/i915/display/intel_display_debugfs.c +++ b/drivers/gpu/drm/i915/display/intel_display_debugfs.c @@ -26,10 +26,9 @@ #include "intel_hdmi.h" #include "intel_hotplug.h" #include "intel_panel.h" -#include "intel_pm.h" #include "intel_psr.h" #include "intel_sprite.h" -#include "skl_watermark.h" +#include "intel_wm.h" static inline struct drm_i915_private *node_to_i915(struct drm_info_node *node) { @@ -1282,237 +1281,6 @@ static int i915_displayport_test_type_show(struct seq_file *m, void *data) } DEFINE_SHOW_ATTRIBUTE(i915_displayport_test_type); -static void wm_latency_show(struct seq_file *m, const u16 wm[8]) -{ - struct drm_i915_private *dev_priv = m->private; - int level; - int num_levels; - - if (IS_CHERRYVIEW(dev_priv)) - num_levels = 3; - else if (IS_VALLEYVIEW(dev_priv)) - num_levels = 1; - else if (IS_G4X(dev_priv)) - num_levels = 3; - else - num_levels = ilk_wm_max_level(dev_priv) + 1; - - drm_modeset_lock_all(&dev_priv->drm); - - for (level = 0; level < num_levels; level++) { - unsigned int latency = wm[level]; - - /* - * - WM1+ latency values in 0.5us units - * - latencies are in us on gen9/vlv/chv - */ - if (DISPLAY_VER(dev_priv) >= 9 || - IS_VALLEYVIEW(dev_priv) || - IS_CHERRYVIEW(dev_priv) || - IS_G4X(dev_priv)) - latency *= 10; - else if (level > 0) - latency *= 5; - - seq_printf(m, "WM%d %u (%u.%u usec)\n", - level, wm[level], latency / 10, latency % 10); - } - - drm_modeset_unlock_all(&dev_priv->drm); -} - -static int pri_wm_latency_show(struct seq_file *m, void *data) -{ - struct drm_i915_private *dev_priv = m->private; - const u16 *latencies; - - if (DISPLAY_VER(dev_priv) >= 9) - latencies = dev_priv->display.wm.skl_latency; - else - latencies = dev_priv->display.wm.pri_latency; - - wm_latency_show(m, latencies); - - return 0; -} - -static int spr_wm_latency_show(struct seq_file *m, void *data) -{ - struct drm_i915_private *dev_priv = m->private; - const u16 *latencies; - - if (DISPLAY_VER(dev_priv) >= 9) - latencies = dev_priv->display.wm.skl_latency; - else - latencies = dev_priv->display.wm.spr_latency; - - wm_latency_show(m, latencies); - - return 0; -} - -static int cur_wm_latency_show(struct seq_file *m, void *data) -{ - struct drm_i915_private *dev_priv = m->private; - const u16 *latencies; - - if (DISPLAY_VER(dev_priv) >= 9) - latencies = dev_priv->display.wm.skl_latency; - else - latencies = dev_priv->display.wm.cur_latency; - - wm_latency_show(m, latencies); - - return 0; -} - -static int pri_wm_latency_open(struct inode *inode, struct file *file) -{ - struct drm_i915_private *dev_priv = inode->i_private; - - if (DISPLAY_VER(dev_priv) < 5 && !IS_G4X(dev_priv)) - return -ENODEV; - - return single_open(file, pri_wm_latency_show, dev_priv); -} - -static int spr_wm_latency_open(struct inode *inode, struct file *file) -{ - struct drm_i915_private *dev_priv = inode->i_private; - - if (HAS_GMCH(dev_priv)) - return -ENODEV; - - return single_open(file, spr_wm_latency_show, dev_priv); -} - -static int cur_wm_latency_open(struct inode *inode, struct file *file) -{ - struct drm_i915_private *dev_priv = inode->i_private; - - if (HAS_GMCH(dev_priv)) - return -ENODEV; - - return single_open(file, cur_wm_latency_show, dev_priv); -} - -static ssize_t wm_latency_write(struct file *file, const char __user *ubuf, - size_t len, loff_t *offp, u16 wm[8]) -{ - struct seq_file *m = file->private_data; - struct drm_i915_private *dev_priv = m->private; - u16 new[8] = { 0 }; - int num_levels; - int level; - int ret; - char tmp[32]; - - if (IS_CHERRYVIEW(dev_priv)) - num_levels = 3; - else if (IS_VALLEYVIEW(dev_priv)) - num_levels = 1; - else if (IS_G4X(dev_priv)) - num_levels = 3; - else - num_levels = ilk_wm_max_level(dev_priv) + 1; - - if (len >= sizeof(tmp)) - return -EINVAL; - - if (copy_from_user(tmp, ubuf, len)) - return -EFAULT; - - tmp[len] = '\0'; - - ret = sscanf(tmp, "%hu %hu %hu %hu %hu %hu %hu %hu", - &new[0], &new[1], &new[2], &new[3], - &new[4], &new[5], &new[6], &new[7]); - if (ret != num_levels) - return -EINVAL; - - drm_modeset_lock_all(&dev_priv->drm); - - for (level = 0; level < num_levels; level++) - wm[level] = new[level]; - - drm_modeset_unlock_all(&dev_priv->drm); - - return len; -} - - -static ssize_t pri_wm_latency_write(struct file *file, const char __user *ubuf, - size_t len, loff_t *offp) -{ - struct seq_file *m = file->private_data; - struct drm_i915_private *dev_priv = m->private; - u16 *latencies; - - if (DISPLAY_VER(dev_priv) >= 9) - latencies = dev_priv->display.wm.skl_latency; - else - latencies = dev_priv->display.wm.pri_latency; - - return wm_latency_write(file, ubuf, len, offp, latencies); -} - -static ssize_t spr_wm_latency_write(struct file *file, const char __user *ubuf, - size_t len, loff_t *offp) -{ - struct seq_file *m = file->private_data; - struct drm_i915_private *dev_priv = m->private; - u16 *latencies; - - if (DISPLAY_VER(dev_priv) >= 9) - latencies = dev_priv->display.wm.skl_latency; - else - latencies = dev_priv->display.wm.spr_latency; - - return wm_latency_write(file, ubuf, len, offp, latencies); -} - -static ssize_t cur_wm_latency_write(struct file *file, const char __user *ubuf, - size_t len, loff_t *offp) -{ - struct seq_file *m = file->private_data; - struct drm_i915_private *dev_priv = m->private; - u16 *latencies; - - if (DISPLAY_VER(dev_priv) >= 9) - latencies = dev_priv->display.wm.skl_latency; - else - latencies = dev_priv->display.wm.cur_latency; - - return wm_latency_write(file, ubuf, len, offp, latencies); -} - -static const struct file_operations i915_pri_wm_latency_fops = { - .owner = THIS_MODULE, - .open = pri_wm_latency_open, - .read = seq_read, - .llseek = seq_lseek, - .release = single_release, - .write = pri_wm_latency_write -}; - -static const struct file_operations i915_spr_wm_latency_fops = { - .owner = THIS_MODULE, - .open = spr_wm_latency_open, - .read = seq_read, - .llseek = seq_lseek, - .release = single_release, - .write = spr_wm_latency_write -}; - -static const struct file_operations i915_cur_wm_latency_fops = { - .owner = THIS_MODULE, - .open = cur_wm_latency_open, - .read = seq_read, - .llseek = seq_lseek, - .release = single_release, - .write = cur_wm_latency_write -}; - static ssize_t i915_fifo_underrun_reset_write(struct file *filp, const char __user *ubuf, @@ -1593,9 +1361,6 @@ static const struct { const struct file_operations *fops; } intel_display_debugfs_files[] = { {"i915_fifo_underrun_reset", &i915_fifo_underrun_reset_ops}, - {"i915_pri_wm_latency", &i915_pri_wm_latency_fops}, - {"i915_spr_wm_latency", &i915_spr_wm_latency_fops}, - {"i915_cur_wm_latency", &i915_cur_wm_latency_fops}, {"i915_dp_test_data", &i915_displayport_test_data_fops}, {"i915_dp_test_type", &i915_displayport_test_type_fops}, {"i915_dp_test_active", &i915_displayport_test_active_fops}, @@ -1622,7 +1387,7 @@ void intel_display_debugfs_register(struct drm_i915_private *i915) intel_dmc_debugfs_register(i915); intel_fbc_debugfs_register(i915); intel_hpd_debugfs_register(i915); - skl_watermark_ipc_debugfs_register(i915); + intel_wm_debugfs_register(i915); } static int i915_panel_show(struct seq_file *m, void *data) diff --git a/drivers/gpu/drm/i915/display/intel_display_power.c b/drivers/gpu/drm/i915/display/intel_display_power.c index 1a23ecd4623a..f085ae971150 100644 --- a/drivers/gpu/drm/i915/display/intel_display_power.c +++ b/drivers/gpu/drm/i915/display/intel_display_power.c @@ -264,9 +264,10 @@ bool intel_display_power_is_enabled(struct drm_i915_private *dev_priv, } static u32 -sanitize_target_dc_state(struct drm_i915_private *dev_priv, +sanitize_target_dc_state(struct drm_i915_private *i915, u32 target_dc_state) { + struct i915_power_domains *power_domains = &i915->display.power.domains; static const u32 states[] = { DC_STATE_EN_UPTO_DC6, DC_STATE_EN_UPTO_DC5, @@ -279,7 +280,7 @@ sanitize_target_dc_state(struct drm_i915_private *dev_priv, if (target_dc_state != states[i]) continue; - if (dev_priv->display.dmc.allowed_dc_mask & target_dc_state) + if (power_domains->allowed_dc_mask & target_dc_state) break; target_dc_state = states[i + 1]; @@ -312,7 +313,7 @@ void intel_display_power_set_target_dc_state(struct drm_i915_private *dev_priv, state = sanitize_target_dc_state(dev_priv, state); - if (state == dev_priv->display.dmc.target_dc_state) + if (state == power_domains->target_dc_state) goto unlock; dc_off_enabled = intel_power_well_is_enabled(dev_priv, power_well); @@ -323,7 +324,7 @@ void intel_display_power_set_target_dc_state(struct drm_i915_private *dev_priv, if (!dc_off_enabled) intel_power_well_enable(dev_priv, power_well); - dev_priv->display.dmc.target_dc_state = state; + power_domains->target_dc_state = state; if (!dc_off_enabled) intel_power_well_disable(dev_priv, power_well); @@ -992,10 +993,10 @@ int intel_power_domains_init(struct drm_i915_private *dev_priv) dev_priv->params.disable_power_well = sanitize_disable_power_well_option(dev_priv, dev_priv->params.disable_power_well); - dev_priv->display.dmc.allowed_dc_mask = + power_domains->allowed_dc_mask = get_allowed_dc_mask(dev_priv, dev_priv->params.enable_dc); - dev_priv->display.dmc.target_dc_state = + power_domains->target_dc_state = sanitize_target_dc_state(dev_priv, DC_STATE_EN_UPTO_DC6); mutex_init(&power_domains->lock); @@ -1260,9 +1261,7 @@ static void hsw_disable_lcpll(struct drm_i915_private *dev_priv, drm_err(&dev_priv->drm, "D_COMP RCOMP still in progress\n"); if (allow_power_down) { - val = intel_de_read(dev_priv, LCPLL_CTL); - val |= LCPLL_POWER_DOWN_ALLOW; - intel_de_write(dev_priv, LCPLL_CTL, val); + intel_de_rmw(dev_priv, LCPLL_CTL, 0, LCPLL_POWER_DOWN_ALLOW); intel_de_posting_read(dev_priv, LCPLL_CTL); } } @@ -1306,9 +1305,7 @@ static void hsw_restore_lcpll(struct drm_i915_private *dev_priv) drm_err(&dev_priv->drm, "LCPLL not locked yet\n"); if (val & LCPLL_CD_SOURCE_FCLK) { - val = intel_de_read(dev_priv, LCPLL_CTL); - val &= ~LCPLL_CD_SOURCE_FCLK; - intel_de_write(dev_priv, LCPLL_CTL, val); + intel_de_rmw(dev_priv, LCPLL_CTL, LCPLL_CD_SOURCE_FCLK, 0); if (wait_for_us((intel_de_read(dev_priv, LCPLL_CTL) & LCPLL_CD_SOURCE_FCLK_DONE) == 0, 1)) @@ -1347,15 +1344,11 @@ static void hsw_restore_lcpll(struct drm_i915_private *dev_priv) */ static void hsw_enable_pc8(struct drm_i915_private *dev_priv) { - u32 val; - drm_dbg_kms(&dev_priv->drm, "Enabling package C8+\n"); - if (HAS_PCH_LPT_LP(dev_priv)) { - val = intel_de_read(dev_priv, SOUTH_DSPCLK_GATE_D); - val &= ~PCH_LP_PARTITION_LEVEL_DISABLE; - intel_de_write(dev_priv, SOUTH_DSPCLK_GATE_D, val); - } + if (HAS_PCH_LPT_LP(dev_priv)) + intel_de_rmw(dev_priv, SOUTH_DSPCLK_GATE_D, + PCH_LP_PARTITION_LEVEL_DISABLE, 0); lpt_disable_clkout_dp(dev_priv); hsw_disable_lcpll(dev_priv, true, true); @@ -1363,25 +1356,21 @@ static void hsw_enable_pc8(struct drm_i915_private *dev_priv) static void hsw_disable_pc8(struct drm_i915_private *dev_priv) { - u32 val; - drm_dbg_kms(&dev_priv->drm, "Disabling package C8+\n"); hsw_restore_lcpll(dev_priv); intel_init_pch_refclk(dev_priv); - if (HAS_PCH_LPT_LP(dev_priv)) { - val = intel_de_read(dev_priv, SOUTH_DSPCLK_GATE_D); - val |= PCH_LP_PARTITION_LEVEL_DISABLE; - intel_de_write(dev_priv, SOUTH_DSPCLK_GATE_D, val); - } + if (HAS_PCH_LPT_LP(dev_priv)) + intel_de_rmw(dev_priv, SOUTH_DSPCLK_GATE_D, + 0, PCH_LP_PARTITION_LEVEL_DISABLE); } static void intel_pch_reset_handshake(struct drm_i915_private *dev_priv, bool enable) { i915_reg_t reg; - u32 reset_bits, val; + u32 reset_bits; if (IS_IVYBRIDGE(dev_priv)) { reg = GEN7_MSG_CTL; @@ -1394,14 +1383,7 @@ static void intel_pch_reset_handshake(struct drm_i915_private *dev_priv, if (DISPLAY_VER(dev_priv) >= 14) reset_bits |= MTL_RESET_PICA_HANDSHAKE_EN; - val = intel_de_read(dev_priv, reg); - - if (enable) - val |= reset_bits; - else - val &= ~reset_bits; - - intel_de_write(dev_priv, reg, val); + intel_de_rmw(dev_priv, reg, reset_bits, enable ? reset_bits : 0); } static void skl_display_core_init(struct drm_i915_private *dev_priv, @@ -1580,10 +1562,8 @@ static void tgl_bw_buddy_init(struct drm_i915_private *dev_priv) return; if (IS_ALDERLAKE_S(dev_priv) || - IS_DG1_DISPLAY_STEP(dev_priv, STEP_A0, STEP_B0) || - IS_RKL_DISPLAY_STEP(dev_priv, STEP_A0, STEP_B0) || - IS_TGL_DISPLAY_STEP(dev_priv, STEP_A0, STEP_C0)) - /* Wa_1409767108:tgl,dg1,adl-s */ + IS_RKL_DISPLAY_STEP(dev_priv, STEP_A0, STEP_B0)) + /* Wa_1409767108 */ table = wa_1409767108_buddy_page_masks; else table = tgl_buddy_page_masks; @@ -1618,7 +1598,6 @@ static void icl_display_core_init(struct drm_i915_private *dev_priv, { struct i915_power_domains *power_domains = &dev_priv->display.power.domains; struct i915_power_well *well; - u32 val; gen9_set_dc_state(dev_priv, DC_STATE_DISABLE); @@ -1670,11 +1649,10 @@ static void icl_display_core_init(struct drm_i915_private *dev_priv, intel_dmc_load_program(dev_priv); /* Wa_14011508470:tgl,dg1,rkl,adl-s,adl-p */ - if (DISPLAY_VER(dev_priv) >= 12) { - val = DCPR_CLEAR_MEMSTAT_DIS | DCPR_SEND_RESP_IMM | - DCPR_MASK_LPMODE | DCPR_MASK_MAXLATENCY_MEMUP_CLR; - intel_de_rmw(dev_priv, GEN11_CHICKEN_DCPR_2, 0, val); - } + if (DISPLAY_VER(dev_priv) >= 12) + intel_de_rmw(dev_priv, GEN11_CHICKEN_DCPR_2, 0, + DCPR_CLEAR_MEMSTAT_DIS | DCPR_SEND_RESP_IMM | + DCPR_MASK_LPMODE | DCPR_MASK_MAXLATENCY_MEMUP_CLR); /* Wa_14011503030:xelpd */ if (DISPLAY_VER(dev_priv) >= 13) @@ -2055,7 +2033,7 @@ void intel_power_domains_suspend(struct drm_i915_private *i915, * resources as required and also enable deeper system power states * that would be blocked if the firmware was inactive. */ - if (!(i915->display.dmc.allowed_dc_mask & DC_STATE_EN_DC9) && + if (!(power_domains->allowed_dc_mask & DC_STATE_EN_DC9) && suspend_mode == I915_DRM_SUSPEND_IDLE && intel_dmc_has_payload(i915)) { intel_display_power_flush_work(i915); @@ -2244,22 +2222,22 @@ void intel_display_power_suspend(struct drm_i915_private *i915) void intel_display_power_resume(struct drm_i915_private *i915) { + struct i915_power_domains *power_domains = &i915->display.power.domains; + if (DISPLAY_VER(i915) >= 11) { bxt_disable_dc9(i915); icl_display_core_init(i915, true); if (intel_dmc_has_payload(i915)) { - if (i915->display.dmc.allowed_dc_mask & - DC_STATE_EN_UPTO_DC6) + if (power_domains->allowed_dc_mask & DC_STATE_EN_UPTO_DC6) skl_enable_dc6(i915); - else if (i915->display.dmc.allowed_dc_mask & - DC_STATE_EN_UPTO_DC5) + else if (power_domains->allowed_dc_mask & DC_STATE_EN_UPTO_DC5) gen9_enable_dc5(i915); } } else if (IS_GEMINILAKE(i915) || IS_BROXTON(i915)) { bxt_disable_dc9(i915); bxt_display_core_init(i915, true); if (intel_dmc_has_payload(i915) && - (i915->display.dmc.allowed_dc_mask & DC_STATE_EN_UPTO_DC5)) + (power_domains->allowed_dc_mask & DC_STATE_EN_UPTO_DC5)) gen9_enable_dc5(i915); } else if (IS_HASWELL(i915) || IS_BROADWELL(i915)) { hsw_disable_pc8(i915); diff --git a/drivers/gpu/drm/i915/display/intel_display_power.h b/drivers/gpu/drm/i915/display/intel_display_power.h index 2154d900b1aa..8e96be8e6330 100644 --- a/drivers/gpu/drm/i915/display/intel_display_power.h +++ b/drivers/gpu/drm/i915/display/intel_display_power.h @@ -137,6 +137,10 @@ struct i915_power_domains { bool display_core_suspended; int power_well_count; + u32 dc_state; + u32 target_dc_state; + u32 allowed_dc_mask; + intel_wakeref_t init_wakeref; intel_wakeref_t disable_wakeref; diff --git a/drivers/gpu/drm/i915/display/intel_display_power_well.c b/drivers/gpu/drm/i915/display/intel_display_power_well.c index 8710dd41ffd4..1676df1dc066 100644 --- a/drivers/gpu/drm/i915/display/intel_display_power_well.c +++ b/drivers/gpu/drm/i915/display/intel_display_power_well.c @@ -333,7 +333,6 @@ static void hsw_power_well_enable(struct drm_i915_private *dev_priv, { const struct i915_power_well_regs *regs = power_well->desc->ops->regs; int pw_idx = i915_power_well_instance(power_well)->hsw.idx; - u32 val; if (power_well->desc->has_fuses) { enum skl_power_gate pg; @@ -356,9 +355,7 @@ static void hsw_power_well_enable(struct drm_i915_private *dev_priv, gen9_wait_for_power_well_fuses(dev_priv, SKL_PG0); } - val = intel_de_read(dev_priv, regs->driver); - intel_de_write(dev_priv, regs->driver, - val | HSW_PWR_WELL_CTL_REQ(pw_idx)); + intel_de_rmw(dev_priv, regs->driver, 0, HSW_PWR_WELL_CTL_REQ(pw_idx)); hsw_wait_for_power_well_enable(dev_priv, power_well, false); @@ -380,17 +377,27 @@ static void hsw_power_well_disable(struct drm_i915_private *dev_priv, { const struct i915_power_well_regs *regs = power_well->desc->ops->regs; int pw_idx = i915_power_well_instance(power_well)->hsw.idx; - u32 val; hsw_power_well_pre_disable(dev_priv, power_well->desc->irq_pipe_mask); - val = intel_de_read(dev_priv, regs->driver); - intel_de_write(dev_priv, regs->driver, - val & ~HSW_PWR_WELL_CTL_REQ(pw_idx)); + intel_de_rmw(dev_priv, regs->driver, HSW_PWR_WELL_CTL_REQ(pw_idx), 0); hsw_wait_for_power_well_disable(dev_priv, power_well); } +static bool intel_port_is_edp(struct drm_i915_private *i915, enum port port) +{ + struct intel_encoder *encoder; + + for_each_intel_encoder(&i915->drm, encoder) { + if (encoder->type == INTEL_OUTPUT_EDP && + encoder->port == port) + return true; + } + + return false; +} + static void icl_combo_phy_aux_power_well_enable(struct drm_i915_private *dev_priv, struct i915_power_well *power_well) @@ -398,29 +405,22 @@ icl_combo_phy_aux_power_well_enable(struct drm_i915_private *dev_priv, const struct i915_power_well_regs *regs = power_well->desc->ops->regs; int pw_idx = i915_power_well_instance(power_well)->hsw.idx; enum phy phy = icl_aux_pw_to_phy(dev_priv, power_well); - u32 val; drm_WARN_ON(&dev_priv->drm, !IS_ICELAKE(dev_priv)); - val = intel_de_read(dev_priv, regs->driver); - intel_de_write(dev_priv, regs->driver, - val | HSW_PWR_WELL_CTL_REQ(pw_idx)); + intel_de_rmw(dev_priv, regs->driver, 0, HSW_PWR_WELL_CTL_REQ(pw_idx)); - if (DISPLAY_VER(dev_priv) < 12) { - val = intel_de_read(dev_priv, ICL_PORT_CL_DW12(phy)); - intel_de_write(dev_priv, ICL_PORT_CL_DW12(phy), - val | ICL_LANE_ENABLE_AUX); - } + if (DISPLAY_VER(dev_priv) < 12) + intel_de_rmw(dev_priv, ICL_PORT_CL_DW12(phy), + 0, ICL_LANE_ENABLE_AUX); hsw_wait_for_power_well_enable(dev_priv, power_well, false); /* Display WA #1178: icl */ if (pw_idx >= ICL_PW_CTL_IDX_AUX_A && pw_idx <= ICL_PW_CTL_IDX_AUX_B && - !intel_bios_is_port_edp(dev_priv, (enum port)phy)) { - val = intel_de_read(dev_priv, ICL_AUX_ANAOVRD1(pw_idx)); - val |= ICL_AUX_ANAOVRD1_ENABLE | ICL_AUX_ANAOVRD1_LDO_BYPASS; - intel_de_write(dev_priv, ICL_AUX_ANAOVRD1(pw_idx), val); - } + !intel_port_is_edp(dev_priv, (enum port)phy)) + intel_de_rmw(dev_priv, ICL_AUX_ANAOVRD1(pw_idx), + 0, ICL_AUX_ANAOVRD1_ENABLE | ICL_AUX_ANAOVRD1_LDO_BYPASS); } static void @@ -430,17 +430,12 @@ icl_combo_phy_aux_power_well_disable(struct drm_i915_private *dev_priv, const struct i915_power_well_regs *regs = power_well->desc->ops->regs; int pw_idx = i915_power_well_instance(power_well)->hsw.idx; enum phy phy = icl_aux_pw_to_phy(dev_priv, power_well); - u32 val; drm_WARN_ON(&dev_priv->drm, !IS_ICELAKE(dev_priv)); - val = intel_de_read(dev_priv, ICL_PORT_CL_DW12(phy)); - intel_de_write(dev_priv, ICL_PORT_CL_DW12(phy), - val & ~ICL_LANE_ENABLE_AUX); + intel_de_rmw(dev_priv, ICL_PORT_CL_DW12(phy), ICL_LANE_ENABLE_AUX, 0); - val = intel_de_read(dev_priv, regs->driver); - intel_de_write(dev_priv, regs->driver, - val & ~HSW_PWR_WELL_CTL_REQ(pw_idx)); + intel_de_rmw(dev_priv, regs->driver, HSW_PWR_WELL_CTL_REQ(pw_idx), 0); hsw_wait_for_power_well_disable(dev_priv, power_well); } @@ -502,19 +497,15 @@ icl_tc_phy_aux_power_well_enable(struct drm_i915_private *dev_priv, const struct i915_power_well_regs *regs = power_well->desc->ops->regs; bool is_tbt = power_well->desc->is_tc_tbt; bool timeout_expected; - u32 val; icl_tc_port_assert_ref_held(dev_priv, power_well, dig_port); - val = intel_de_read(dev_priv, DP_AUX_CH_CTL(aux_ch)); - val &= ~DP_AUX_CH_CTL_TBT_IO; - if (is_tbt) - val |= DP_AUX_CH_CTL_TBT_IO; - intel_de_write(dev_priv, DP_AUX_CH_CTL(aux_ch), val); + intel_de_rmw(dev_priv, DP_AUX_CH_CTL(aux_ch), + DP_AUX_CH_CTL_TBT_IO, is_tbt ? DP_AUX_CH_CTL_TBT_IO : 0); - val = intel_de_read(dev_priv, regs->driver); - intel_de_write(dev_priv, regs->driver, - val | HSW_PWR_WELL_CTL_REQ(i915_power_well_instance(power_well)->hsw.idx)); + intel_de_rmw(dev_priv, regs->driver, + 0, + HSW_PWR_WELL_CTL_REQ(i915_power_well_instance(power_well)->hsw.idx)); /* * An AUX timeout is expected if the TBT DP tunnel is down, @@ -700,19 +691,20 @@ static u32 gen9_dc_mask(struct drm_i915_private *dev_priv) return mask; } -void gen9_sanitize_dc_state(struct drm_i915_private *dev_priv) +void gen9_sanitize_dc_state(struct drm_i915_private *i915) { + struct i915_power_domains *power_domains = &i915->display.power.domains; u32 val; - if (!HAS_DISPLAY(dev_priv)) + if (!HAS_DISPLAY(i915)) return; - val = intel_de_read(dev_priv, DC_STATE_EN) & gen9_dc_mask(dev_priv); + val = intel_de_read(i915, DC_STATE_EN) & gen9_dc_mask(i915); - drm_dbg_kms(&dev_priv->drm, + drm_dbg_kms(&i915->drm, "Resetting DC state tracking from %02x to %02x\n", - dev_priv->display.dmc.dc_state, val); - dev_priv->display.dmc.dc_state = val; + power_domains->dc_state, val); + power_domains->dc_state = val; } /** @@ -740,6 +732,7 @@ void gen9_sanitize_dc_state(struct drm_i915_private *dev_priv) */ void gen9_set_dc_state(struct drm_i915_private *dev_priv, u32 state) { + struct i915_power_domains *power_domains = &dev_priv->display.power.domains; u32 val; u32 mask; @@ -747,8 +740,8 @@ void gen9_set_dc_state(struct drm_i915_private *dev_priv, u32 state) return; if (drm_WARN_ON_ONCE(&dev_priv->drm, - state & ~dev_priv->display.dmc.allowed_dc_mask)) - state &= dev_priv->display.dmc.allowed_dc_mask; + state & ~power_domains->allowed_dc_mask)) + state &= power_domains->allowed_dc_mask; val = intel_de_read(dev_priv, DC_STATE_EN); mask = gen9_dc_mask(dev_priv); @@ -756,16 +749,16 @@ void gen9_set_dc_state(struct drm_i915_private *dev_priv, u32 state) val & mask, state); /* Check if DMC is ignoring our DC state requests */ - if ((val & mask) != dev_priv->display.dmc.dc_state) + if ((val & mask) != power_domains->dc_state) drm_err(&dev_priv->drm, "DC state mismatch (0x%x -> 0x%x)\n", - dev_priv->display.dmc.dc_state, val & mask); + power_domains->dc_state, val & mask); val &= ~mask; val |= state; gen9_write_dc_state(dev_priv, val); - dev_priv->display.dmc.dc_state = val & mask; + power_domains->dc_state = val & mask; } static void tgl_enable_dc3co(struct drm_i915_private *dev_priv) @@ -776,12 +769,8 @@ static void tgl_enable_dc3co(struct drm_i915_private *dev_priv) static void tgl_disable_dc3co(struct drm_i915_private *dev_priv) { - u32 val; - drm_dbg_kms(&dev_priv->drm, "Disabling DC3CO\n"); - val = intel_de_read(dev_priv, DC_STATE_EN); - val &= ~DC_STATE_DC3CO_STATUS; - intel_de_write(dev_priv, DC_STATE_EN, val); + intel_de_rmw(dev_priv, DC_STATE_EN, DC_STATE_DC3CO_STATUS, 0); gen9_set_dc_state(dev_priv, DC_STATE_DISABLE); /* * Delay of 200us DC3CO Exit time B.Spec 49196 @@ -820,8 +809,8 @@ void gen9_enable_dc5(struct drm_i915_private *dev_priv) /* Wa Display #1183: skl,kbl,cfl */ if (DISPLAY_VER(dev_priv) == 9 && !IS_BROXTON(dev_priv)) - intel_de_write(dev_priv, GEN8_CHICKEN_DCPR_1, - intel_de_read(dev_priv, GEN8_CHICKEN_DCPR_1) | SKL_SELECT_ALTERNATE_DC_EXIT); + intel_de_rmw(dev_priv, GEN8_CHICKEN_DCPR_1, + 0, SKL_SELECT_ALTERNATE_DC_EXIT); gen9_set_dc_state(dev_priv, DC_STATE_EN_UPTO_DC5); } @@ -847,8 +836,8 @@ void skl_enable_dc6(struct drm_i915_private *dev_priv) /* Wa Display #1183: skl,kbl,cfl */ if (DISPLAY_VER(dev_priv) == 9 && !IS_BROXTON(dev_priv)) - intel_de_write(dev_priv, GEN8_CHICKEN_DCPR_1, - intel_de_read(dev_priv, GEN8_CHICKEN_DCPR_1) | SKL_SELECT_ALTERNATE_DC_EXIT); + intel_de_rmw(dev_priv, GEN8_CHICKEN_DCPR_1, + 0, SKL_SELECT_ALTERNATE_DC_EXIT); gen9_set_dc_state(dev_priv, DC_STATE_EN_UPTO_DC6); } @@ -957,9 +946,10 @@ static void gen9_assert_dbuf_enabled(struct drm_i915_private *dev_priv) void gen9_disable_dc_states(struct drm_i915_private *dev_priv) { + struct i915_power_domains *power_domains = &dev_priv->display.power.domains; struct intel_cdclk_config cdclk_config = {}; - if (dev_priv->display.dmc.target_dc_state == DC_STATE_EN_DC3CO) { + if (power_domains->target_dc_state == DC_STATE_EN_DC3CO) { tgl_disable_dc3co(dev_priv); return; } @@ -998,10 +988,12 @@ static void gen9_dc_off_power_well_enable(struct drm_i915_private *dev_priv, static void gen9_dc_off_power_well_disable(struct drm_i915_private *dev_priv, struct i915_power_well *power_well) { + struct i915_power_domains *power_domains = &dev_priv->display.power.domains; + if (!intel_dmc_has_payload(dev_priv)) return; - switch (dev_priv->display.dmc.target_dc_state) { + switch (power_domains->target_dc_state) { case DC_STATE_EN_DC3CO: tgl_enable_dc3co(dev_priv); break; @@ -1033,9 +1025,9 @@ static bool i9xx_always_on_power_well_enabled(struct drm_i915_private *dev_priv, static void i830_pipes_power_well_enable(struct drm_i915_private *dev_priv, struct i915_power_well *power_well) { - if ((intel_de_read(dev_priv, PIPECONF(PIPE_A)) & PIPECONF_ENABLE) == 0) + if ((intel_de_read(dev_priv, TRANSCONF(PIPE_A)) & TRANSCONF_ENABLE) == 0) i830_enable_pipe(dev_priv, PIPE_A); - if ((intel_de_read(dev_priv, PIPECONF(PIPE_B)) & PIPECONF_ENABLE) == 0) + if ((intel_de_read(dev_priv, TRANSCONF(PIPE_B)) & TRANSCONF_ENABLE) == 0) i830_enable_pipe(dev_priv, PIPE_B); } @@ -1049,8 +1041,8 @@ static void i830_pipes_power_well_disable(struct drm_i915_private *dev_priv, static bool i830_pipes_power_well_enabled(struct drm_i915_private *dev_priv, struct i915_power_well *power_well) { - return intel_de_read(dev_priv, PIPECONF(PIPE_A)) & PIPECONF_ENABLE && - intel_de_read(dev_priv, PIPECONF(PIPE_B)) & PIPECONF_ENABLE; + return intel_de_read(dev_priv, TRANSCONF(PIPE_A)) & TRANSCONF_ENABLE && + intel_de_read(dev_priv, TRANSCONF(PIPE_B)) & TRANSCONF_ENABLE; } static void i830_pipes_power_well_sync_hw(struct drm_i915_private *dev_priv, @@ -1149,18 +1141,14 @@ static bool vlv_power_well_enabled(struct drm_i915_private *dev_priv, static void vlv_init_display_clock_gating(struct drm_i915_private *dev_priv) { - u32 val; - /* * On driver load, a pipe may be active and driving a DSI display. * Preserve DPOUNIT_CLOCK_GATE_DISABLE to avoid the pipe getting stuck * (and never recovering) in this case. intel_dsi_post_disable() will * clear it when we turn off the display. */ - val = intel_de_read(dev_priv, DSPCLK_GATE_D(dev_priv)); - val &= DPOUNIT_CLOCK_GATE_DISABLE; - val |= VRHUNIT_CLOCK_GATE_DISABLE; - intel_de_write(dev_priv, DSPCLK_GATE_D(dev_priv), val); + intel_de_rmw(dev_priv, DSPCLK_GATE_D(dev_priv), + ~DPOUNIT_CLOCK_GATE_DISABLE, VRHUNIT_CLOCK_GATE_DISABLE); /* * Disable trickle feed and enable pnd deadline calculation @@ -1276,8 +1264,7 @@ static void vlv_dpio_cmn_power_well_enable(struct drm_i915_private *dev_priv, * both PLLs disabled, or we risk losing DPIO and PLL * synchronization. */ - intel_de_write(dev_priv, DPIO_CTL, - intel_de_read(dev_priv, DPIO_CTL) | DPIO_CMNRST); + intel_de_rmw(dev_priv, DPIO_CTL, 0, DPIO_CMNRST); } static void vlv_dpio_cmn_power_well_disable(struct drm_i915_private *dev_priv, @@ -1289,8 +1276,7 @@ static void vlv_dpio_cmn_power_well_disable(struct drm_i915_private *dev_priv, assert_pll_disabled(dev_priv, pipe); /* Assert common reset */ - intel_de_write(dev_priv, DPIO_CTL, - intel_de_read(dev_priv, DPIO_CTL) & ~DPIO_CMNRST); + intel_de_rmw(dev_priv, DPIO_CTL, DPIO_CMNRST, 0); vlv_set_power_well(dev_priv, power_well, false); } diff --git a/drivers/gpu/drm/i915/display/intel_display_reg_defs.h b/drivers/gpu/drm/i915/display/intel_display_reg_defs.h index 02605418ff08..755c1ea8225c 100644 --- a/drivers/gpu/drm/i915/display/intel_display_reg_defs.h +++ b/drivers/gpu/drm/i915/display/intel_display_reg_defs.h @@ -13,7 +13,7 @@ #define VLV_DISPLAY_BASE 0x180000 /* - * Named helper wrappers around _PICK_EVEN() and _PICK(). + * Named helper wrappers around _PICK_EVEN() and _PICK_EVEN_2RANGES(). */ #define _PIPE(pipe, a, b) _PICK_EVEN(pipe, a, b) #define _PLANE(plane, a, b) _PICK_EVEN(plane, a, b) @@ -29,12 +29,8 @@ #define _MMIO_PLL(pll, a, b) _MMIO(_PLL(pll, a, b)) #define _MMIO_PHY(phy, a, b) _MMIO(_PHY(phy, a, b)) -#define _PHY3(phy, ...) _PICK(phy, __VA_ARGS__) - -#define _MMIO_PIPE3(pipe, a, b, c) _MMIO(_PICK(pipe, a, b, c)) -#define _MMIO_PORT3(pipe, a, b, c) _MMIO(_PICK(pipe, a, b, c)) -#define _MMIO_PHY3(phy, a, b, c) _MMIO(_PHY3(phy, a, b, c)) -#define _MMIO_PLL3(pll, ...) _MMIO(_PICK(pll, __VA_ARGS__)) +#define _MMIO_PIPE3(pipe, a, b, c) _MMIO(_PICK_EVEN_2RANGES(pipe, 1, a, a, b, c)) +#define _MMIO_PORT3(pipe, a, b, c) _MMIO(_PICK_EVEN_2RANGES(pipe, 1, a, a, b, c)) /* * Device info offset array based helpers for groups of registers with unevenly diff --git a/drivers/gpu/drm/i915/display/intel_display_rps.c b/drivers/gpu/drm/i915/display/intel_display_rps.c new file mode 100644 index 000000000000..918d0327169a --- /dev/null +++ b/drivers/gpu/drm/i915/display/intel_display_rps.c @@ -0,0 +1,81 @@ +// SPDX-License-Identifier: MIT +/* + * Copyright © 2023 Intel Corporation + */ + +#include <drm/drm_crtc.h> +#include <drm/drm_vblank.h> + +#include "gt/intel_rps.h" +#include "i915_drv.h" +#include "intel_display_rps.h" +#include "intel_display_types.h" + +struct wait_rps_boost { + struct wait_queue_entry wait; + + struct drm_crtc *crtc; + struct i915_request *request; +}; + +static int do_rps_boost(struct wait_queue_entry *_wait, + unsigned mode, int sync, void *key) +{ + struct wait_rps_boost *wait = container_of(_wait, typeof(*wait), wait); + struct i915_request *rq = wait->request; + + /* + * If we missed the vblank, but the request is already running it + * is reasonable to assume that it will complete before the next + * vblank without our intervention, so leave RPS alone. + */ + if (!i915_request_started(rq)) + intel_rps_boost(rq); + i915_request_put(rq); + + drm_crtc_vblank_put(wait->crtc); + + list_del(&wait->wait.entry); + kfree(wait); + return 1; +} + +void intel_display_rps_boost_after_vblank(struct drm_crtc *crtc, + struct dma_fence *fence) +{ + struct wait_rps_boost *wait; + + if (!dma_fence_is_i915(fence)) + return; + + if (DISPLAY_VER(to_i915(crtc->dev)) < 6) + return; + + if (drm_crtc_vblank_get(crtc)) + return; + + wait = kmalloc(sizeof(*wait), GFP_KERNEL); + if (!wait) { + drm_crtc_vblank_put(crtc); + return; + } + + wait->request = to_request(dma_fence_get(fence)); + wait->crtc = crtc; + + wait->wait.func = do_rps_boost; + wait->wait.flags = 0; + + add_wait_queue(drm_crtc_vblank_waitqueue(crtc), &wait->wait); +} + +void intel_display_rps_mark_interactive(struct drm_i915_private *i915, + struct intel_atomic_state *state, + bool interactive) +{ + if (state->rps_interactive == interactive) + return; + + intel_rps_mark_interactive(&to_gt(i915)->rps, interactive); + state->rps_interactive = interactive; +} diff --git a/drivers/gpu/drm/i915/display/intel_display_rps.h b/drivers/gpu/drm/i915/display/intel_display_rps.h new file mode 100644 index 000000000000..e19009c2371a --- /dev/null +++ b/drivers/gpu/drm/i915/display/intel_display_rps.h @@ -0,0 +1,22 @@ +/* SPDX-License-Identifier: MIT */ +/* + * Copyright © 2023 Intel Corporation + */ + +#ifndef __INTEL_DISPLAY_RPS_H__ +#define __INTEL_DISPLAY_RPS_H__ + +#include <linux/types.h> + +struct dma_fence; +struct drm_crtc; +struct drm_i915_private; +struct intel_atomic_state; + +void intel_display_rps_boost_after_vblank(struct drm_crtc *crtc, + struct dma_fence *fence); +void intel_display_rps_mark_interactive(struct drm_i915_private *i915, + struct intel_atomic_state *state, + bool interactive); + +#endif /* __INTEL_DISPLAY_RPS_H__ */ diff --git a/drivers/gpu/drm/i915/display/intel_display_types.h b/drivers/gpu/drm/i915/display/intel_display_types.h index 54c517ca9632..c32bfba06ca1 100644 --- a/drivers/gpu/drm/i915/display/intel_display_types.h +++ b/drivers/gpu/drm/i915/display/intel_display_types.h @@ -53,7 +53,7 @@ #include "intel_display_limits.h" #include "intel_display_power.h" #include "intel_dpll_mgr.h" -#include "intel_pm_types.h" +#include "intel_wm_types.h" struct drm_printer; struct __intel_global_objs_state; @@ -326,6 +326,7 @@ struct intel_vbt_panel_data { struct { u16 pwm_freq_hz; u16 brightness_precision_bits; + u16 hdr_dpcd_refresh_timeout; bool present; bool active_low_pwm; u8 min_brightness; /* min_brightness/255 of max */ @@ -1249,6 +1250,9 @@ struct intel_crtc_state { /* bitmask of planes that will be updated during the commit */ u8 update_planes; + /* bitmask of planes with async flip active */ + u8 async_flip_planes; + u8 framestart_delay; /* 1-4 */ u8 msa_timing_delay; /* 0-3 */ @@ -1502,17 +1506,6 @@ struct intel_watermark_params { u8 cacheline_size; }; -struct cxsr_latency { - bool is_desktop : 1; - bool is_ddr3 : 1; - u16 fsb_freq; - u16 mem_freq; - u16 display_sr; - u16 display_hpll_disable; - u16 cursor_sr; - u16 cursor_hpll_disable; -}; - #define to_intel_atomic_state(x) container_of(x, struct intel_atomic_state, base) #define to_intel_crtc(x) container_of(x, struct intel_crtc, base) #define to_intel_crtc_state(x) container_of(x, struct intel_crtc_state, uapi) @@ -1631,6 +1624,8 @@ struct intel_psr { bool psr2_sel_fetch_cff_enabled; bool req_psr2_sdp_prior_scanline; u8 sink_sync_latency; + u8 io_wake_lines; + u8 fast_wake_lines; ktime_t last_entry_attempt; ktime_t last_exit; bool sink_not_reliable; diff --git a/drivers/gpu/drm/i915/display/intel_dmc.c b/drivers/gpu/drm/i915/display/intel_dmc.c index 257aa2b7cf20..6b162f77340e 100644 --- a/drivers/gpu/drm/i915/display/intel_dmc.c +++ b/drivers/gpu/drm/i915/display/intel_dmc.c @@ -38,6 +38,39 @@ * low-power state and comes back to normal. */ +enum intel_dmc_id { + DMC_FW_MAIN = 0, + DMC_FW_PIPEA, + DMC_FW_PIPEB, + DMC_FW_PIPEC, + DMC_FW_PIPED, + DMC_FW_MAX +}; + +struct intel_dmc { + struct drm_i915_private *i915; + struct work_struct work; + const char *fw_path; + u32 max_fw_size; /* bytes */ + u32 version; + struct dmc_fw_info { + u32 mmio_count; + i915_reg_t mmioaddr[20]; + u32 mmiodata[20]; + u32 dmc_offset; + u32 start_mmioaddr; + u32 dmc_fw_size; /*dwords */ + u32 *payload; + bool present; + } dmc_info[DMC_FW_MAX]; +}; + +/* Note: This may be NULL. */ +static struct intel_dmc *i915_to_dmc(struct drm_i915_private *i915) +{ + return i915->display.dmc.dmc; +} + #define DMC_VERSION(major, minor) ((major) << 16 | (minor)) #define DMC_VERSION_MAJOR(version) ((version) >> 16) #define DMC_VERSION_MINOR(version) ((version) & 0xffff) @@ -249,9 +282,19 @@ struct stepping_info { char substepping; }; -static bool has_dmc_id_fw(struct drm_i915_private *i915, int dmc_id) +#define for_each_dmc_id(__dmc_id) \ + for ((__dmc_id) = DMC_FW_MAIN; (__dmc_id) < DMC_FW_MAX; (__dmc_id)++) + +static bool is_valid_dmc_id(enum intel_dmc_id dmc_id) { - return i915->display.dmc.dmc_info[dmc_id].payload; + return dmc_id >= DMC_FW_MAIN && dmc_id < DMC_FW_MAX; +} + +static bool has_dmc_id_fw(struct drm_i915_private *i915, enum intel_dmc_id dmc_id) +{ + struct intel_dmc *dmc = i915_to_dmc(i915); + + return dmc && dmc->dmc_info[dmc_id].payload; } bool intel_dmc_has_payload(struct drm_i915_private *i915) @@ -270,12 +313,12 @@ intel_get_stepping_info(struct drm_i915_private *i915, return si; } -static void gen9_set_dc_state_debugmask(struct drm_i915_private *dev_priv) +static void gen9_set_dc_state_debugmask(struct drm_i915_private *i915) { /* The below bit doesn't need to be cleared ever afterwards */ - intel_de_rmw(dev_priv, DC_STATE_DEBUG, 0, + intel_de_rmw(i915, DC_STATE_DEBUG, 0, DC_STATE_DEBUG_MASK_CORES | DC_STATE_DEBUG_MASK_MEMORY_UP); - intel_de_posting_read(dev_priv, DC_STATE_DEBUG); + intel_de_posting_read(i915, DC_STATE_DEBUG); } static void disable_event_handler(struct drm_i915_private *i915, @@ -315,26 +358,23 @@ disable_flip_queue_event(struct drm_i915_private *i915, } static bool -get_flip_queue_event_regs(struct drm_i915_private *i915, int dmc_id, +get_flip_queue_event_regs(struct drm_i915_private *i915, enum intel_dmc_id dmc_id, i915_reg_t *ctl_reg, i915_reg_t *htp_reg) { - switch (dmc_id) { - case DMC_FW_MAIN: + if (dmc_id == DMC_FW_MAIN) { if (DISPLAY_VER(i915) == 12) { *ctl_reg = DMC_EVT_CTL(i915, dmc_id, 3); *htp_reg = DMC_EVT_HTP(i915, dmc_id, 3); return true; } - break; - case DMC_FW_PIPEA ... DMC_FW_PIPED: + } else if (dmc_id >= DMC_FW_PIPEA && dmc_id <= DMC_FW_PIPED) { if (IS_DG2(i915)) { *ctl_reg = DMC_EVT_CTL(i915, dmc_id, 2); *htp_reg = DMC_EVT_HTP(i915, dmc_id, 2); return true; } - break; } return false; @@ -343,13 +383,13 @@ get_flip_queue_event_regs(struct drm_i915_private *i915, int dmc_id, static void disable_all_flip_queue_events(struct drm_i915_private *i915) { - int dmc_id; + enum intel_dmc_id dmc_id; /* TODO: check if the following applies to all D13+ platforms. */ if (!IS_DG2(i915) && !IS_TIGERLAKE(i915)) return; - for (dmc_id = 0; dmc_id < DMC_FW_MAX; dmc_id++) { + for_each_dmc_id(dmc_id) { i915_reg_t ctl_reg; i915_reg_t htp_reg; @@ -365,22 +405,22 @@ disable_all_flip_queue_events(struct drm_i915_private *i915) static void disable_all_event_handlers(struct drm_i915_private *i915) { - int id; + enum intel_dmc_id dmc_id; /* TODO: disable the event handlers on pre-GEN12 platforms as well */ if (DISPLAY_VER(i915) < 12) return; - for (id = DMC_FW_MAIN; id < DMC_FW_MAX; id++) { + for_each_dmc_id(dmc_id) { int handler; - if (!has_dmc_id_fw(i915, id)) + if (!has_dmc_id_fw(i915, dmc_id)) continue; for (handler = 0; handler < DMC_EVENT_HANDLER_COUNT_GEN12; handler++) disable_event_handler(i915, - DMC_EVT_CTL(i915, id, handler), - DMC_EVT_HTP(i915, id, handler)); + DMC_EVT_CTL(i915, dmc_id, handler), + DMC_EVT_HTP(i915, dmc_id, handler)); } } @@ -410,7 +450,9 @@ static void pipedmc_clock_gating_wa(struct drm_i915_private *i915, bool enable) void intel_dmc_enable_pipe(struct drm_i915_private *i915, enum pipe pipe) { - if (!has_dmc_id_fw(i915, PIPE_TO_DMC_ID(pipe))) + enum intel_dmc_id dmc_id = PIPE_TO_DMC_ID(pipe); + + if (!is_valid_dmc_id(dmc_id) || !has_dmc_id_fw(i915, dmc_id)) return; if (DISPLAY_VER(i915) >= 14) @@ -421,7 +463,9 @@ void intel_dmc_enable_pipe(struct drm_i915_private *i915, enum pipe pipe) void intel_dmc_disable_pipe(struct drm_i915_private *i915, enum pipe pipe) { - if (!has_dmc_id_fw(i915, PIPE_TO_DMC_ID(pipe))) + enum intel_dmc_id dmc_id = PIPE_TO_DMC_ID(pipe); + + if (!is_valid_dmc_id(dmc_id) || !has_dmc_id_fw(i915, dmc_id)) return; if (DISPLAY_VER(i915) >= 14) @@ -432,57 +476,59 @@ void intel_dmc_disable_pipe(struct drm_i915_private *i915, enum pipe pipe) /** * intel_dmc_load_program() - write the firmware from memory to register. - * @dev_priv: i915 drm device. + * @i915: i915 drm device. * * DMC firmware is read from a .bin file and kept in internal memory one time. * Everytime display comes back from low power state this function is called to * copy the firmware from internal memory to registers. */ -void intel_dmc_load_program(struct drm_i915_private *dev_priv) +void intel_dmc_load_program(struct drm_i915_private *i915) { - struct intel_dmc *dmc = &dev_priv->display.dmc; - u32 id, i; + struct i915_power_domains *power_domains = &i915->display.power.domains; + struct intel_dmc *dmc = i915_to_dmc(i915); + enum intel_dmc_id dmc_id; + u32 i; - if (!intel_dmc_has_payload(dev_priv)) + if (!intel_dmc_has_payload(i915)) return; - pipedmc_clock_gating_wa(dev_priv, true); + pipedmc_clock_gating_wa(i915, true); - disable_all_event_handlers(dev_priv); + disable_all_event_handlers(i915); - assert_rpm_wakelock_held(&dev_priv->runtime_pm); + assert_rpm_wakelock_held(&i915->runtime_pm); preempt_disable(); - for (id = 0; id < DMC_FW_MAX; id++) { - for (i = 0; i < dmc->dmc_info[id].dmc_fw_size; i++) { - intel_de_write_fw(dev_priv, - DMC_PROGRAM(dmc->dmc_info[id].start_mmioaddr, i), - dmc->dmc_info[id].payload[i]); + for_each_dmc_id(dmc_id) { + for (i = 0; i < dmc->dmc_info[dmc_id].dmc_fw_size; i++) { + intel_de_write_fw(i915, + DMC_PROGRAM(dmc->dmc_info[dmc_id].start_mmioaddr, i), + dmc->dmc_info[dmc_id].payload[i]); } } preempt_enable(); - for (id = 0; id < DMC_FW_MAX; id++) { - for (i = 0; i < dmc->dmc_info[id].mmio_count; i++) { - intel_de_write(dev_priv, dmc->dmc_info[id].mmioaddr[i], - dmc->dmc_info[id].mmiodata[i]); + for_each_dmc_id(dmc_id) { + for (i = 0; i < dmc->dmc_info[dmc_id].mmio_count; i++) { + intel_de_write(i915, dmc->dmc_info[dmc_id].mmioaddr[i], + dmc->dmc_info[dmc_id].mmiodata[i]); } } - dev_priv->display.dmc.dc_state = 0; + power_domains->dc_state = 0; - gen9_set_dc_state_debugmask(dev_priv); + gen9_set_dc_state_debugmask(i915); /* * Flip queue events need to be disabled before enabling DC5/6. * i915 doesn't use the flip queue feature, so disable it already * here. */ - disable_all_flip_queue_events(dev_priv); + disable_all_flip_queue_events(i915); - pipedmc_clock_gating_wa(dev_priv, false); + pipedmc_clock_gating_wa(i915, false); } /** @@ -504,8 +550,11 @@ void intel_dmc_disable_program(struct drm_i915_private *i915) void assert_dmc_loaded(struct drm_i915_private *i915) { - drm_WARN_ONCE(&i915->drm, - !intel_de_read(i915, DMC_PROGRAM(i915->display.dmc.dmc_info[DMC_FW_MAIN].start_mmioaddr, 0)), + struct intel_dmc *dmc = i915_to_dmc(i915); + + drm_WARN_ONCE(&i915->drm, !dmc, "DMC not initialized\n"); + drm_WARN_ONCE(&i915->drm, dmc && + !intel_de_read(i915, DMC_PROGRAM(dmc->dmc_info[DMC_FW_MAIN].start_mmioaddr, 0)), "DMC program storage start is NULL\n"); drm_WARN_ONCE(&i915->drm, !intel_de_read(i915, DMC_SSP_BASE), "DMC SSP Base Not fine\n"); @@ -540,15 +589,15 @@ static void dmc_set_fw_offset(struct intel_dmc *dmc, const struct stepping_info *si, u8 package_ver) { - unsigned int i, id; - - struct drm_i915_private *i915 = container_of(dmc, typeof(*i915), display.dmc); + struct drm_i915_private *i915 = dmc->i915; + enum intel_dmc_id dmc_id; + unsigned int i; for (i = 0; i < num_entries; i++) { - id = package_ver <= 1 ? DMC_FW_MAIN : fw_info[i].dmc_id; + dmc_id = package_ver <= 1 ? DMC_FW_MAIN : fw_info[i].dmc_id; - if (id >= DMC_FW_MAX) { - drm_dbg(&i915->drm, "Unsupported firmware id: %u\n", id); + if (!is_valid_dmc_id(dmc_id)) { + drm_dbg(&i915->drm, "Unsupported firmware id: %u\n", dmc_id); continue; } @@ -556,29 +605,24 @@ static void dmc_set_fw_offset(struct intel_dmc *dmc, * check for the stepping since we already found a previous FW * for this id. */ - if (dmc->dmc_info[id].present) + if (dmc->dmc_info[dmc_id].present) continue; if (fw_info_matches_stepping(&fw_info[i], si)) { - dmc->dmc_info[id].present = true; - dmc->dmc_info[id].dmc_offset = fw_info[i].offset; + dmc->dmc_info[dmc_id].present = true; + dmc->dmc_info[dmc_id].dmc_offset = fw_info[i].offset; } } } static bool dmc_mmio_addr_sanity_check(struct intel_dmc *dmc, const u32 *mmioaddr, u32 mmio_count, - int header_ver, u8 dmc_id) + int header_ver, enum intel_dmc_id dmc_id) { - struct drm_i915_private *i915 = container_of(dmc, typeof(*i915), display.dmc); + struct drm_i915_private *i915 = dmc->i915; u32 start_range, end_range; int i; - if (dmc_id >= DMC_FW_MAX) { - drm_warn(&i915->drm, "Unsupported firmware id %u\n", dmc_id); - return false; - } - if (header_ver == 1) { start_range = DMC_MMIO_START_RANGE; end_range = DMC_MMIO_END_RANGE; @@ -606,9 +650,9 @@ static bool dmc_mmio_addr_sanity_check(struct intel_dmc *dmc, static u32 parse_dmc_fw_header(struct intel_dmc *dmc, const struct intel_dmc_header_base *dmc_header, - size_t rem_size, u8 dmc_id) + size_t rem_size, enum intel_dmc_id dmc_id) { - struct drm_i915_private *i915 = container_of(dmc, typeof(*i915), display.dmc); + struct drm_i915_private *i915 = dmc->i915; struct dmc_fw_info *dmc_info = &dmc->dmc_info[dmc_id]; unsigned int header_len_bytes, dmc_header_size, payload_size, i; const u32 *mmioaddr, *mmiodata; @@ -719,7 +763,7 @@ parse_dmc_fw_package(struct intel_dmc *dmc, const struct stepping_info *si, size_t rem_size) { - struct drm_i915_private *i915 = container_of(dmc, typeof(*i915), display.dmc); + struct drm_i915_private *i915 = dmc->i915; u32 package_size = sizeof(struct intel_package_header); u32 num_entries, max_entries; const struct intel_fw_info *fw_info; @@ -773,7 +817,7 @@ static u32 parse_dmc_fw_css(struct intel_dmc *dmc, struct intel_css_header *css_header, size_t rem_size) { - struct drm_i915_private *i915 = container_of(dmc, typeof(*i915), display.dmc); + struct drm_i915_private *i915 = dmc->i915; if (rem_size < sizeof(struct intel_css_header)) { drm_err(&i915->drm, "Truncated DMC firmware, refusing.\n"); @@ -793,18 +837,17 @@ static u32 parse_dmc_fw_css(struct intel_dmc *dmc, return sizeof(struct intel_css_header); } -static void parse_dmc_fw(struct drm_i915_private *dev_priv, - const struct firmware *fw) +static void parse_dmc_fw(struct intel_dmc *dmc, const struct firmware *fw) { + struct drm_i915_private *i915 = dmc->i915; struct intel_css_header *css_header; struct intel_package_header *package_header; struct intel_dmc_header_base *dmc_header; - struct intel_dmc *dmc = &dev_priv->display.dmc; struct stepping_info display_info = { '*', '*'}; - const struct stepping_info *si = intel_get_stepping_info(dev_priv, &display_info); + const struct stepping_info *si = intel_get_stepping_info(i915, &display_info); + enum intel_dmc_id dmc_id; u32 readcount = 0; u32 r, offset; - int id; if (!fw) return; @@ -825,34 +868,33 @@ static void parse_dmc_fw(struct drm_i915_private *dev_priv, readcount += r; - for (id = 0; id < DMC_FW_MAX; id++) { - if (!dev_priv->display.dmc.dmc_info[id].present) + for_each_dmc_id(dmc_id) { + if (!dmc->dmc_info[dmc_id].present) continue; - offset = readcount + dmc->dmc_info[id].dmc_offset * 4; + offset = readcount + dmc->dmc_info[dmc_id].dmc_offset * 4; if (offset > fw->size) { - drm_err(&dev_priv->drm, "Reading beyond the fw_size\n"); + drm_err(&i915->drm, "Reading beyond the fw_size\n"); continue; } dmc_header = (struct intel_dmc_header_base *)&fw->data[offset]; - parse_dmc_fw_header(dmc, dmc_header, fw->size - offset, id); + parse_dmc_fw_header(dmc, dmc_header, fw->size - offset, dmc_id); } } -static void intel_dmc_runtime_pm_get(struct drm_i915_private *dev_priv) +static void intel_dmc_runtime_pm_get(struct drm_i915_private *i915) { - drm_WARN_ON(&dev_priv->drm, dev_priv->display.dmc.wakeref); - dev_priv->display.dmc.wakeref = - intel_display_power_get(dev_priv, POWER_DOMAIN_INIT); + drm_WARN_ON(&i915->drm, i915->display.dmc.wakeref); + i915->display.dmc.wakeref = intel_display_power_get(i915, POWER_DOMAIN_INIT); } -static void intel_dmc_runtime_pm_put(struct drm_i915_private *dev_priv) +static void intel_dmc_runtime_pm_put(struct drm_i915_private *i915) { intel_wakeref_t wakeref __maybe_unused = - fetch_and_zero(&dev_priv->display.dmc.wakeref); + fetch_and_zero(&i915->display.dmc.wakeref); - intel_display_power_put(dev_priv, POWER_DOMAIN_INIT, wakeref); + intel_display_power_put(i915, POWER_DOMAIN_INIT, wakeref); } static const char *dmc_fallback_path(struct drm_i915_private *i915) @@ -865,46 +907,40 @@ static const char *dmc_fallback_path(struct drm_i915_private *i915) static void dmc_load_work_fn(struct work_struct *work) { - struct drm_i915_private *dev_priv; - struct intel_dmc *dmc; + struct intel_dmc *dmc = container_of(work, typeof(*dmc), work); + struct drm_i915_private *i915 = dmc->i915; const struct firmware *fw = NULL; const char *fallback_path; int err; - dev_priv = container_of(work, typeof(*dev_priv), display.dmc.work); - dmc = &dev_priv->display.dmc; - - err = request_firmware(&fw, dev_priv->display.dmc.fw_path, dev_priv->drm.dev); + err = request_firmware(&fw, dmc->fw_path, i915->drm.dev); - if (err == -ENOENT && !dev_priv->params.dmc_firmware_path) { - fallback_path = dmc_fallback_path(dev_priv); + if (err == -ENOENT && !i915->params.dmc_firmware_path) { + fallback_path = dmc_fallback_path(i915); if (fallback_path) { - drm_dbg_kms(&dev_priv->drm, - "%s not found, falling back to %s\n", - dmc->fw_path, - fallback_path); - err = request_firmware(&fw, fallback_path, dev_priv->drm.dev); + drm_dbg_kms(&i915->drm, "%s not found, falling back to %s\n", + dmc->fw_path, fallback_path); + err = request_firmware(&fw, fallback_path, i915->drm.dev); if (err == 0) - dev_priv->display.dmc.fw_path = fallback_path; + dmc->fw_path = fallback_path; } } - parse_dmc_fw(dev_priv, fw); + parse_dmc_fw(dmc, fw); - if (intel_dmc_has_payload(dev_priv)) { - intel_dmc_load_program(dev_priv); - intel_dmc_runtime_pm_put(dev_priv); + if (intel_dmc_has_payload(i915)) { + intel_dmc_load_program(i915); + intel_dmc_runtime_pm_put(i915); - drm_info(&dev_priv->drm, - "Finished loading DMC firmware %s (v%u.%u)\n", - dev_priv->display.dmc.fw_path, DMC_VERSION_MAJOR(dmc->version), + drm_info(&i915->drm, "Finished loading DMC firmware %s (v%u.%u)\n", + dmc->fw_path, DMC_VERSION_MAJOR(dmc->version), DMC_VERSION_MINOR(dmc->version)); } else { - drm_notice(&dev_priv->drm, + drm_notice(&i915->drm, "Failed to load DMC firmware %s." " Disabling runtime power management.\n", dmc->fw_path); - drm_notice(&dev_priv->drm, "DMC firmware homepage: %s", + drm_notice(&i915->drm, "DMC firmware homepage: %s", INTEL_UC_FIRMWARE_URL); } @@ -912,19 +948,17 @@ static void dmc_load_work_fn(struct work_struct *work) } /** - * intel_dmc_ucode_init() - initialize the firmware loading. - * @dev_priv: i915 drm device. + * intel_dmc_init() - initialize the firmware loading. + * @i915: i915 drm device. * * This function is called at the time of loading the display driver to read * firmware from a .bin file and copied into a internal memory. */ -void intel_dmc_ucode_init(struct drm_i915_private *dev_priv) +void intel_dmc_init(struct drm_i915_private *i915) { - struct intel_dmc *dmc = &dev_priv->display.dmc; - - INIT_WORK(&dev_priv->display.dmc.work, dmc_load_work_fn); + struct intel_dmc *dmc; - if (!HAS_DMC(dev_priv)) + if (!HAS_DMC(i915)) return; /* @@ -935,168 +969,192 @@ void intel_dmc_ucode_init(struct drm_i915_private *dev_priv) * suspend as runtime suspend *requires* a working DMC for whatever * reason. */ - intel_dmc_runtime_pm_get(dev_priv); + intel_dmc_runtime_pm_get(i915); + + dmc = kzalloc(sizeof(*dmc), GFP_KERNEL); + if (!dmc) + return; - if (IS_DG2(dev_priv)) { + dmc->i915 = i915; + + INIT_WORK(&dmc->work, dmc_load_work_fn); + + if (IS_DG2(i915)) { dmc->fw_path = DG2_DMC_PATH; dmc->max_fw_size = DISPLAY_VER13_DMC_MAX_FW_SIZE; - } else if (IS_ALDERLAKE_P(dev_priv)) { + } else if (IS_ALDERLAKE_P(i915)) { dmc->fw_path = ADLP_DMC_PATH; dmc->max_fw_size = DISPLAY_VER13_DMC_MAX_FW_SIZE; - } else if (IS_ALDERLAKE_S(dev_priv)) { + } else if (IS_ALDERLAKE_S(i915)) { dmc->fw_path = ADLS_DMC_PATH; dmc->max_fw_size = DISPLAY_VER12_DMC_MAX_FW_SIZE; - } else if (IS_DG1(dev_priv)) { + } else if (IS_DG1(i915)) { dmc->fw_path = DG1_DMC_PATH; dmc->max_fw_size = DISPLAY_VER12_DMC_MAX_FW_SIZE; - } else if (IS_ROCKETLAKE(dev_priv)) { + } else if (IS_ROCKETLAKE(i915)) { dmc->fw_path = RKL_DMC_PATH; dmc->max_fw_size = DISPLAY_VER12_DMC_MAX_FW_SIZE; - } else if (IS_TIGERLAKE(dev_priv)) { + } else if (IS_TIGERLAKE(i915)) { dmc->fw_path = TGL_DMC_PATH; dmc->max_fw_size = DISPLAY_VER12_DMC_MAX_FW_SIZE; - } else if (DISPLAY_VER(dev_priv) == 11) { + } else if (DISPLAY_VER(i915) == 11) { dmc->fw_path = ICL_DMC_PATH; dmc->max_fw_size = ICL_DMC_MAX_FW_SIZE; - } else if (IS_GEMINILAKE(dev_priv)) { + } else if (IS_GEMINILAKE(i915)) { dmc->fw_path = GLK_DMC_PATH; dmc->max_fw_size = GLK_DMC_MAX_FW_SIZE; - } else if (IS_KABYLAKE(dev_priv) || - IS_COFFEELAKE(dev_priv) || - IS_COMETLAKE(dev_priv)) { + } else if (IS_KABYLAKE(i915) || + IS_COFFEELAKE(i915) || + IS_COMETLAKE(i915)) { dmc->fw_path = KBL_DMC_PATH; dmc->max_fw_size = KBL_DMC_MAX_FW_SIZE; - } else if (IS_SKYLAKE(dev_priv)) { + } else if (IS_SKYLAKE(i915)) { dmc->fw_path = SKL_DMC_PATH; dmc->max_fw_size = SKL_DMC_MAX_FW_SIZE; - } else if (IS_BROXTON(dev_priv)) { + } else if (IS_BROXTON(i915)) { dmc->fw_path = BXT_DMC_PATH; dmc->max_fw_size = BXT_DMC_MAX_FW_SIZE; } - if (dev_priv->params.dmc_firmware_path) { - if (strlen(dev_priv->params.dmc_firmware_path) == 0) { - dmc->fw_path = NULL; - drm_info(&dev_priv->drm, + if (i915->params.dmc_firmware_path) { + if (strlen(i915->params.dmc_firmware_path) == 0) { + drm_info(&i915->drm, "Disabling DMC firmware and runtime PM\n"); - return; + goto out; } - dmc->fw_path = dev_priv->params.dmc_firmware_path; + dmc->fw_path = i915->params.dmc_firmware_path; } if (!dmc->fw_path) { - drm_dbg_kms(&dev_priv->drm, + drm_dbg_kms(&i915->drm, "No known DMC firmware for platform, disabling runtime PM\n"); - return; + goto out; } - drm_dbg_kms(&dev_priv->drm, "Loading %s\n", dmc->fw_path); - schedule_work(&dev_priv->display.dmc.work); + i915->display.dmc.dmc = dmc; + + drm_dbg_kms(&i915->drm, "Loading %s\n", dmc->fw_path); + schedule_work(&dmc->work); + + return; + +out: + kfree(dmc); } /** - * intel_dmc_ucode_suspend() - prepare DMC firmware before system suspend - * @dev_priv: i915 drm device + * intel_dmc_suspend() - prepare DMC firmware before system suspend + * @i915: i915 drm device * * Prepare the DMC firmware before entering system suspend. This includes * flushing pending work items and releasing any resources acquired during * init. */ -void intel_dmc_ucode_suspend(struct drm_i915_private *dev_priv) +void intel_dmc_suspend(struct drm_i915_private *i915) { - if (!HAS_DMC(dev_priv)) + struct intel_dmc *dmc = i915_to_dmc(i915); + + if (!HAS_DMC(i915)) return; - flush_work(&dev_priv->display.dmc.work); + if (dmc) + flush_work(&dmc->work); /* Drop the reference held in case DMC isn't loaded. */ - if (!intel_dmc_has_payload(dev_priv)) - intel_dmc_runtime_pm_put(dev_priv); + if (!intel_dmc_has_payload(i915)) + intel_dmc_runtime_pm_put(i915); } /** - * intel_dmc_ucode_resume() - init DMC firmware during system resume - * @dev_priv: i915 drm device + * intel_dmc_resume() - init DMC firmware during system resume + * @i915: i915 drm device * * Reinitialize the DMC firmware during system resume, reacquiring any - * resources released in intel_dmc_ucode_suspend(). + * resources released in intel_dmc_suspend(). */ -void intel_dmc_ucode_resume(struct drm_i915_private *dev_priv) +void intel_dmc_resume(struct drm_i915_private *i915) { - if (!HAS_DMC(dev_priv)) + if (!HAS_DMC(i915)) return; /* * Reacquire the reference to keep RPM disabled in case DMC isn't * loaded. */ - if (!intel_dmc_has_payload(dev_priv)) - intel_dmc_runtime_pm_get(dev_priv); + if (!intel_dmc_has_payload(i915)) + intel_dmc_runtime_pm_get(i915); } /** - * intel_dmc_ucode_fini() - unload the DMC firmware. - * @dev_priv: i915 drm device. + * intel_dmc_fini() - unload the DMC firmware. + * @i915: i915 drm device. * * Firmmware unloading includes freeing the internal memory and reset the * firmware loading status. */ -void intel_dmc_ucode_fini(struct drm_i915_private *dev_priv) +void intel_dmc_fini(struct drm_i915_private *i915) { - int id; + struct intel_dmc *dmc = i915_to_dmc(i915); + enum intel_dmc_id dmc_id; - if (!HAS_DMC(dev_priv)) + if (!HAS_DMC(i915)) return; - intel_dmc_ucode_suspend(dev_priv); - drm_WARN_ON(&dev_priv->drm, dev_priv->display.dmc.wakeref); + intel_dmc_suspend(i915); + drm_WARN_ON(&i915->drm, i915->display.dmc.wakeref); - for (id = 0; id < DMC_FW_MAX; id++) - kfree(dev_priv->display.dmc.dmc_info[id].payload); + if (dmc) { + for_each_dmc_id(dmc_id) + kfree(dmc->dmc_info[dmc_id].payload); + + kfree(dmc); + i915->display.dmc.dmc = NULL; + } } void intel_dmc_print_error_state(struct drm_i915_error_state_buf *m, struct drm_i915_private *i915) { - struct intel_dmc *dmc = &i915->display.dmc; + struct intel_dmc *dmc = i915_to_dmc(i915); if (!HAS_DMC(i915)) return; + i915_error_printf(m, "DMC initialized: %s\n", str_yes_no(dmc)); i915_error_printf(m, "DMC loaded: %s\n", str_yes_no(intel_dmc_has_payload(i915))); - i915_error_printf(m, "DMC fw version: %d.%d\n", - DMC_VERSION_MAJOR(dmc->version), - DMC_VERSION_MINOR(dmc->version)); + if (dmc) + i915_error_printf(m, "DMC fw version: %d.%d\n", + DMC_VERSION_MAJOR(dmc->version), + DMC_VERSION_MINOR(dmc->version)); } static int intel_dmc_debugfs_status_show(struct seq_file *m, void *unused) { struct drm_i915_private *i915 = m->private; + struct intel_dmc *dmc = i915_to_dmc(i915); intel_wakeref_t wakeref; - struct intel_dmc *dmc; i915_reg_t dc5_reg, dc6_reg = INVALID_MMIO_REG; if (!HAS_DMC(i915)) return -ENODEV; - dmc = &i915->display.dmc; - wakeref = intel_runtime_pm_get(&i915->runtime_pm); + seq_printf(m, "DMC initialized: %s\n", str_yes_no(dmc)); seq_printf(m, "fw loaded: %s\n", str_yes_no(intel_dmc_has_payload(i915))); - seq_printf(m, "path: %s\n", dmc->fw_path); + seq_printf(m, "path: %s\n", dmc ? dmc->fw_path : "N/A"); seq_printf(m, "Pipe A fw needed: %s\n", str_yes_no(GRAPHICS_VER(i915) >= 12)); seq_printf(m, "Pipe A fw loaded: %s\n", - str_yes_no(dmc->dmc_info[DMC_FW_PIPEA].payload)); + str_yes_no(has_dmc_id_fw(i915, DMC_FW_PIPEA))); seq_printf(m, "Pipe B fw needed: %s\n", str_yes_no(IS_ALDERLAKE_P(i915) || DISPLAY_VER(i915) >= 14)); seq_printf(m, "Pipe B fw loaded: %s\n", - str_yes_no(dmc->dmc_info[DMC_FW_PIPEB].payload)); + str_yes_no(has_dmc_id_fw(i915, DMC_FW_PIPEB))); if (!intel_dmc_has_payload(i915)) goto out; @@ -1130,9 +1188,10 @@ static int intel_dmc_debugfs_status_show(struct seq_file *m, void *unused) seq_printf(m, "DC5 -> DC6 count: %d\n", intel_de_read(i915, dc6_reg)); -out: seq_printf(m, "program base: 0x%08x\n", intel_de_read(i915, DMC_PROGRAM(dmc->dmc_info[DMC_FW_MAIN].start_mmioaddr, 0))); + +out: seq_printf(m, "ssp base: 0x%08x\n", intel_de_read(i915, DMC_SSP_BASE)); seq_printf(m, "htp: 0x%08x\n", intel_de_read(i915, DMC_HTP_SKL)); diff --git a/drivers/gpu/drm/i915/display/intel_dmc.h b/drivers/gpu/drm/i915/display/intel_dmc.h index fd1725de4289..fd607afff2ef 100644 --- a/drivers/gpu/drm/i915/display/intel_dmc.h +++ b/drivers/gpu/drm/i915/display/intel_dmc.h @@ -6,54 +6,20 @@ #ifndef __INTEL_DMC_H__ #define __INTEL_DMC_H__ -#include "i915_reg_defs.h" -#include "intel_wakeref.h" -#include <linux/workqueue.h> +#include <linux/types.h> struct drm_i915_error_state_buf; struct drm_i915_private; - enum pipe; -enum { - DMC_FW_MAIN = 0, - DMC_FW_PIPEA, - DMC_FW_PIPEB, - DMC_FW_PIPEC, - DMC_FW_PIPED, - DMC_FW_MAX -}; - -struct intel_dmc { - struct work_struct work; - const char *fw_path; - u32 max_fw_size; /* bytes */ - u32 version; - struct dmc_fw_info { - u32 mmio_count; - i915_reg_t mmioaddr[20]; - u32 mmiodata[20]; - u32 dmc_offset; - u32 start_mmioaddr; - u32 dmc_fw_size; /*dwords */ - u32 *payload; - bool present; - } dmc_info[DMC_FW_MAX]; - - u32 dc_state; - u32 target_dc_state; - u32 allowed_dc_mask; - intel_wakeref_t wakeref; -}; - -void intel_dmc_ucode_init(struct drm_i915_private *i915); +void intel_dmc_init(struct drm_i915_private *i915); void intel_dmc_load_program(struct drm_i915_private *i915); void intel_dmc_disable_program(struct drm_i915_private *i915); void intel_dmc_enable_pipe(struct drm_i915_private *i915, enum pipe pipe); void intel_dmc_disable_pipe(struct drm_i915_private *i915, enum pipe pipe); -void intel_dmc_ucode_fini(struct drm_i915_private *i915); -void intel_dmc_ucode_suspend(struct drm_i915_private *i915); -void intel_dmc_ucode_resume(struct drm_i915_private *i915); +void intel_dmc_fini(struct drm_i915_private *i915); +void intel_dmc_suspend(struct drm_i915_private *i915); +void intel_dmc_resume(struct drm_i915_private *i915); bool intel_dmc_has_payload(struct drm_i915_private *i915); void intel_dmc_debugfs_register(struct drm_i915_private *i915); void intel_dmc_print_error_state(struct drm_i915_error_state_buf *m, diff --git a/drivers/gpu/drm/i915/display/intel_dp.c b/drivers/gpu/drm/i915/display/intel_dp.c index 62cbab7402e9..aee93b0d810e 100644 --- a/drivers/gpu/drm/i915/display/intel_dp.c +++ b/drivers/gpu/drm/i915/display/intel_dp.c @@ -288,7 +288,7 @@ static int intel_dp_max_common_rate(struct intel_dp *intel_dp) static int intel_dp_max_source_lane_count(struct intel_digital_port *dig_port) { - int vbt_max_lanes = intel_bios_dp_max_lane_count(&dig_port->base); + int vbt_max_lanes = intel_bios_dp_max_lane_count(dig_port->base.devdata); int max_lanes = dig_port->max_lanes; if (vbt_max_lanes) @@ -425,7 +425,7 @@ static int vbt_max_link_rate(struct intel_dp *intel_dp) struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base; int max_rate; - max_rate = intel_bios_dp_max_link_rate(encoder); + max_rate = intel_bios_dp_max_link_rate(encoder->devdata); if (intel_dp_is_edp(intel_dp)) { struct intel_connector *connector = intel_dp->attached_connector; @@ -1415,6 +1415,28 @@ static int intel_dp_sink_dsc_version_minor(struct intel_dp *intel_dp) DP_DSC_MINOR_SHIFT; } +static int intel_dp_get_slice_height(int vactive) +{ + int slice_height; + + /* + * VDSC 1.2a spec in Section 3.8 Options for Slices implies that 108 + * lines is an optimal slice height, but any size can be used as long as + * vertical active integer multiple and maximum vertical slice count + * requirements are met. + */ + for (slice_height = 108; slice_height <= vactive; slice_height += 2) + if (vactive % slice_height == 0) + return slice_height; + + /* + * Highly unlikely we reach here as most of the resolutions will end up + * finding appropriate slice_height in above loop but returning + * slice_height as 2 here as it should work with all resolutions. + */ + return 2; +} + static int intel_dp_dsc_compute_params(struct intel_encoder *encoder, struct intel_crtc_state *crtc_state) { @@ -1433,17 +1455,7 @@ static int intel_dp_dsc_compute_params(struct intel_encoder *encoder, vdsc_cfg->rc_model_size = DSC_RC_MODEL_SIZE_CONST; vdsc_cfg->pic_height = crtc_state->hw.adjusted_mode.crtc_vdisplay; - /* - * Slice Height of 8 works for all currently available panels. So start - * with that if pic_height is an integral multiple of 8. Eventually add - * logic to try multiple slice heights. - */ - if (vdsc_cfg->pic_height % 8 == 0) - vdsc_cfg->slice_height = 8; - else if (vdsc_cfg->pic_height % 4 == 0) - vdsc_cfg->slice_height = 4; - else - vdsc_cfg->slice_height = 2; + vdsc_cfg->slice_height = intel_dp_get_slice_height(vdsc_cfg->pic_height); ret = intel_dsc_compute_params(crtc_state); if (ret) @@ -1727,7 +1739,7 @@ bool intel_dp_limited_color_range(const struct intel_crtc_state *crtc_state, * Our YCbCr output is always limited range. * crtc_state->limited_color_range only applies to RGB, * and it must never be set for YCbCr or we risk setting - * some conflicting bits in PIPECONF which will mess up + * some conflicting bits in TRANSCONF which will mess up * the colors on the monitor. */ if (crtc_state->output_format != INTEL_OUTPUT_FORMAT_RGB) @@ -1991,7 +2003,6 @@ intel_dp_drrs_compute_config(struct intel_connector *connector, } static bool intel_dp_has_audio(struct intel_encoder *encoder, - const struct intel_crtc_state *crtc_state, const struct drm_connector_state *conn_state) { struct drm_i915_private *i915 = to_i915(encoder->base.dev); @@ -2057,7 +2068,7 @@ intel_dp_audio_compute_config(struct intel_encoder *encoder, struct drm_connector *connector = conn_state->connector; pipe_config->sdp_split_enable = - intel_dp_has_audio(encoder, pipe_config, conn_state) && + intel_dp_has_audio(encoder, conn_state) && intel_dp_is_uhbr(pipe_config); drm_dbg_kms(&i915->drm, "[CONNECTOR:%d:%s] SDP split enable: %s\n", @@ -2081,7 +2092,7 @@ intel_dp_compute_config(struct intel_encoder *encoder, pipe_config->has_pch_encoder = true; pipe_config->has_audio = - intel_dp_has_audio(encoder, pipe_config, conn_state) && + intel_dp_has_audio(encoder, conn_state) && intel_audio_compute_config(encoder, pipe_config, conn_state); fixed_mode = intel_panel_fixed_mode(connector, adjusted_mode); @@ -2281,10 +2292,15 @@ intel_edp_init_source_oui(struct intel_dp *intel_dp, bool careful) void intel_dp_wait_source_oui(struct intel_dp *intel_dp) { + struct intel_connector *connector = intel_dp->attached_connector; struct drm_i915_private *i915 = dp_to_i915(intel_dp); - drm_dbg_kms(&i915->drm, "Performing OUI wait\n"); - wait_remaining_ms_from_jiffies(intel_dp->last_oui_write, 30); + drm_dbg_kms(&i915->drm, "[CONNECTOR:%d:%s] Performing OUI wait (%u ms)\n", + connector->base.base.id, connector->base.name, + connector->panel.vbt.backlight.hdr_dpcd_refresh_timeout); + + wait_remaining_ms_from_jiffies(intel_dp->last_oui_write, + connector->panel.vbt.backlight.hdr_dpcd_refresh_timeout); } /* If the device supports it, try to set the power state appropriately */ @@ -4851,7 +4867,7 @@ intel_dp_connector_register(struct drm_connector *connector) if (!ret) drm_dp_cec_register_connector(&intel_dp->aux, connector); - if (!intel_bios_is_lspcon_present(i915, dig_port->base.port)) + if (!intel_bios_encoder_is_lspcon(dig_port->base.devdata)) return ret; /* @@ -5129,8 +5145,9 @@ intel_dp_hpd_pulse(struct intel_digital_port *dig_port, bool long_hpd) return IRQ_HANDLED; } -/* check the VBT to see whether the eDP is on another port */ -bool intel_dp_is_port_edp(struct drm_i915_private *dev_priv, enum port port) +static bool _intel_dp_is_port_edp(struct drm_i915_private *dev_priv, + const struct intel_bios_encoder_data *devdata, + enum port port) { /* * eDP not supported on g4x. so bail out early just @@ -5142,13 +5159,24 @@ bool intel_dp_is_port_edp(struct drm_i915_private *dev_priv, enum port port) if (DISPLAY_VER(dev_priv) < 9 && port == PORT_A) return true; - return intel_bios_is_port_edp(dev_priv, port); + return devdata && intel_bios_encoder_supports_edp(devdata); +} + +bool intel_dp_is_port_edp(struct drm_i915_private *i915, enum port port) +{ + const struct intel_bios_encoder_data *devdata = + intel_bios_encoder_data_lookup(i915, port); + + return _intel_dp_is_port_edp(i915, devdata, port); } static bool -has_gamut_metadata_dip(struct drm_i915_private *i915, enum port port) +has_gamut_metadata_dip(struct intel_encoder *encoder) { - if (intel_bios_is_lspcon_present(i915, port)) + struct drm_i915_private *i915 = to_i915(encoder->base.dev); + enum port port = encoder->port; + + if (intel_bios_encoder_is_lspcon(encoder->devdata)) return false; if (DISPLAY_VER(i915) >= 11) @@ -5183,14 +5211,14 @@ intel_dp_add_properties(struct intel_dp *intel_dp, struct drm_connector *connect drm_connector_attach_max_bpc_property(connector, 6, 12); /* Register HDMI colorspace for case of lspcon */ - if (intel_bios_is_lspcon_present(dev_priv, port)) { + if (intel_bios_encoder_is_lspcon(dp_to_dig_port(intel_dp)->base.devdata)) { drm_connector_attach_content_type_property(connector); intel_attach_hdmi_colorspace_property(connector); } else { intel_attach_dp_colorspace_property(connector); } - if (has_gamut_metadata_dip(dev_priv, port)) + if (has_gamut_metadata_dip(&dp_to_dig_port(intel_dp)->base)) drm_connector_attach_hdr_output_metadata_property(connector); if (HAS_VRR(dev_priv)) @@ -5232,11 +5260,6 @@ static void intel_edp_backlight_setup(struct intel_dp *intel_dp, if (pipe != PIPE_A && pipe != PIPE_B) pipe = PIPE_A; - - drm_dbg_kms(&i915->drm, - "[CONNECTOR:%d:%s] using pipe %c for initial backlight setup\n", - connector->base.base.id, connector->base.name, - pipe_name(pipe)); } intel_backlight_setup(connector, pipe); @@ -5412,7 +5435,7 @@ intel_dp_init_connector(struct intel_digital_port *dig_port, intel_dp->DP = intel_de_read(dev_priv, intel_dp->output_reg); intel_dp->attached_connector = intel_connector; - if (intel_dp_is_port_edp(dev_priv, port)) { + if (_intel_dp_is_port_edp(dev_priv, intel_encoder->devdata, port)) { /* * Currently we don't support eDP on TypeC ports, although in * theory it could work on TypeC legacy ports. diff --git a/drivers/gpu/drm/i915/display/intel_dp_aux.c b/drivers/gpu/drm/i915/display/intel_dp_aux.c index 5a176bfb10a2..96967e21c94c 100644 --- a/drivers/gpu/drm/i915/display/intel_dp_aux.c +++ b/drivers/gpu/drm/i915/display/intel_dp_aux.c @@ -6,6 +6,7 @@ #include "i915_drv.h" #include "i915_reg.h" #include "i915_trace.h" +#include "intel_bios.h" #include "intel_de.h" #include "intel_display_types.h" #include "intel_dp_aux.h" @@ -737,3 +738,37 @@ void intel_dp_aux_init(struct intel_dp *intel_dp) intel_dp->aux.transfer = intel_dp_aux_transfer; cpu_latency_qos_add_request(&intel_dp->pm_qos, PM_QOS_DEFAULT_VALUE); } + +static enum aux_ch default_aux_ch(struct intel_encoder *encoder) +{ + struct drm_i915_private *i915 = to_i915(encoder->base.dev); + + /* SKL has DDI E but no AUX E */ + if (DISPLAY_VER(i915) == 9 && encoder->port == PORT_E) + return AUX_CH_A; + + return (enum aux_ch)encoder->port; +} + +enum aux_ch intel_dp_aux_ch(struct intel_encoder *encoder) +{ + struct drm_i915_private *i915 = to_i915(encoder->base.dev); + enum aux_ch aux_ch; + + aux_ch = intel_bios_dp_aux_ch(encoder->devdata); + if (aux_ch != AUX_CH_NONE) { + drm_dbg_kms(&i915->drm, "[ENCODER:%d:%s] using AUX %c (VBT)\n", + encoder->base.base.id, encoder->base.name, + aux_ch_name(aux_ch)); + return aux_ch; + } + + aux_ch = default_aux_ch(encoder); + + drm_dbg_kms(&i915->drm, + "[ENCODER:%d:%s] using AUX %c (platform default)\n", + encoder->base.base.id, encoder->base.name, + aux_ch_name(aux_ch)); + + return aux_ch; +} diff --git a/drivers/gpu/drm/i915/display/intel_dp_aux.h b/drivers/gpu/drm/i915/display/intel_dp_aux.h index 738577537bc7..138e340f94ee 100644 --- a/drivers/gpu/drm/i915/display/intel_dp_aux.h +++ b/drivers/gpu/drm/i915/display/intel_dp_aux.h @@ -6,9 +6,13 @@ #ifndef __INTEL_DP_AUX_H__ #define __INTEL_DP_AUX_H__ +enum aux_ch; struct intel_dp; +struct intel_encoder; void intel_dp_aux_fini(struct intel_dp *intel_dp); void intel_dp_aux_init(struct intel_dp *intel_dp); +enum aux_ch intel_dp_aux_ch(struct intel_encoder *encoder); + #endif /* __INTEL_DP_AUX_H__ */ diff --git a/drivers/gpu/drm/i915/display/intel_dp_aux_backlight.c b/drivers/gpu/drm/i915/display/intel_dp_aux_backlight.c index 83af95bce98d..95cc5251843e 100644 --- a/drivers/gpu/drm/i915/display/intel_dp_aux_backlight.c +++ b/drivers/gpu/drm/i915/display/intel_dp_aux_backlight.c @@ -105,6 +105,11 @@ enum intel_dp_aux_backlight_modparam { INTEL_DP_AUX_BACKLIGHT_FORCE_INTEL = 3, }; +static bool is_intel_tcon_cap(const u8 tcon_cap[4]) +{ + return tcon_cap[0] >= 1; +} + /* Intel EDP backlight callbacks */ static bool intel_dp_aux_supports_hdr_backlight(struct intel_connector *connector) @@ -125,14 +130,12 @@ intel_dp_aux_supports_hdr_backlight(struct intel_connector *connector) if (!(tcon_cap[1] & INTEL_EDP_HDR_TCON_BRIGHTNESS_NITS_CAP)) return false; - if (tcon_cap[0] >= 1) { - drm_dbg_kms(&i915->drm, "Detected Intel HDR backlight interface version %d\n", - tcon_cap[0]); - } else { - drm_dbg_kms(&i915->drm, "Detected unsupported HDR backlight interface version %d\n", - tcon_cap[0]); + drm_dbg_kms(&i915->drm, "[CONNECTOR:%d:%s] Detected %s HDR backlight interface version %d\n", + connector->base.base.id, connector->base.name, + is_intel_tcon_cap(tcon_cap) ? "Intel" : "unsupported", tcon_cap[0]); + + if (!is_intel_tcon_cap(tcon_cap)) return false; - } /* * If we don't have HDR static metadata there is no way to @@ -147,7 +150,8 @@ intel_dp_aux_supports_hdr_backlight(struct intel_connector *connector) !(connector->base.hdr_sink_metadata.hdmi_type1.metadata_type & BIT(HDMI_STATIC_METADATA_TYPE1))) { drm_info(&i915->drm, - "Panel is missing HDR static metadata. Possible support for Intel HDR backlight interface is not used. If your backlight controls don't work try booting with i915.enable_dpcd_backlight=%d. needs this, please file a _new_ bug report on drm/i915, see " FDO_BUG_URL " for details.\n", + "[CONNECTOR:%d:%s] Panel is missing HDR static metadata. Possible support for Intel HDR backlight interface is not used. If your backlight controls don't work try booting with i915.enable_dpcd_backlight=%d. needs this, please file a _new_ bug report on drm/i915, see " FDO_BUG_URL " for details.\n", + connector->base.base.id, connector->base.name, INTEL_DP_AUX_BACKLIGHT_FORCE_INTEL); return false; } @@ -168,7 +172,8 @@ intel_dp_aux_hdr_get_backlight(struct intel_connector *connector, enum pipe pipe u8 buf[2] = { 0 }; if (drm_dp_dpcd_readb(&intel_dp->aux, INTEL_EDP_HDR_GETSET_CTRL_PARAMS, &tmp) != 1) { - drm_err(&i915->drm, "Failed to read current backlight mode from DPCD\n"); + drm_err(&i915->drm, "[CONNECTOR:%d:%s] Failed to read current backlight mode from DPCD\n", + connector->base.base.id, connector->base.name); return 0; } @@ -185,7 +190,8 @@ intel_dp_aux_hdr_get_backlight(struct intel_connector *connector, enum pipe pipe if (drm_dp_dpcd_read(&intel_dp->aux, INTEL_EDP_BRIGHTNESS_NITS_LSB, buf, sizeof(buf)) != sizeof(buf)) { - drm_err(&i915->drm, "Failed to read brightness from DPCD\n"); + drm_err(&i915->drm, "[CONNECTOR:%d:%s] Failed to read brightness from DPCD\n", + connector->base.base.id, connector->base.name); return 0; } @@ -205,7 +211,8 @@ intel_dp_aux_hdr_set_aux_backlight(const struct drm_connector_state *conn_state, if (drm_dp_dpcd_write(&intel_dp->aux, INTEL_EDP_BRIGHTNESS_NITS_LSB, buf, sizeof(buf)) != sizeof(buf)) - drm_err(dev, "Failed to write brightness level to DPCD\n"); + drm_err(dev, "[CONNECTOR:%d:%s] Failed to write brightness level to DPCD\n", + connector->base.base.id, connector->base.name); } static void @@ -238,7 +245,8 @@ intel_dp_aux_hdr_enable_backlight(const struct intel_crtc_state *crtc_state, ret = drm_dp_dpcd_readb(&intel_dp->aux, INTEL_EDP_HDR_GETSET_CTRL_PARAMS, &old_ctrl); if (ret != 1) { - drm_err(&i915->drm, "Failed to read current backlight control mode: %d\n", ret); + drm_err(&i915->drm, "[CONNECTOR:%d:%s] Failed to read current backlight control mode: %d\n", + connector->base.base.id, connector->base.name, ret); return; } @@ -254,9 +262,10 @@ intel_dp_aux_hdr_enable_backlight(const struct intel_crtc_state *crtc_state, ctrl &= ~INTEL_EDP_HDR_TCON_BRIGHTNESS_AUX_ENABLE; } - if (ctrl != old_ctrl) - if (drm_dp_dpcd_writeb(&intel_dp->aux, INTEL_EDP_HDR_GETSET_CTRL_PARAMS, ctrl) != 1) - drm_err(&i915->drm, "Failed to configure DPCD brightness controls\n"); + if (ctrl != old_ctrl && + drm_dp_dpcd_writeb(&intel_dp->aux, INTEL_EDP_HDR_GETSET_CTRL_PARAMS, ctrl) != 1) + drm_err(&i915->drm, "[CONNECTOR:%d:%s] Failed to configure DPCD brightness controls\n", + connector->base.base.id, connector->base.name); } static void @@ -273,6 +282,11 @@ intel_dp_aux_hdr_disable_backlight(const struct drm_connector_state *conn_state, panel->backlight.pwm_funcs->disable(conn_state, intel_backlight_invert_pwm_level(connector, 0)); } +static const char *dpcd_vs_pwm_str(bool aux) +{ + return aux ? "DPCD" : "PWM"; +} + static int intel_dp_aux_hdr_setup_backlight(struct intel_connector *connector, enum pipe pipe) { @@ -282,15 +296,16 @@ intel_dp_aux_hdr_setup_backlight(struct intel_connector *connector, enum pipe pi &connector->base.display_info.luminance_range; int ret; - if (panel->backlight.edp.intel.sdr_uses_aux) { - drm_dbg_kms(&i915->drm, "SDR backlight is controlled through DPCD\n"); - } else { - drm_dbg_kms(&i915->drm, "SDR backlight is controlled through PWM\n"); + drm_dbg_kms(&i915->drm, "[CONNECTOR:%d:%s] SDR backlight is controlled through %s\n", + connector->base.base.id, connector->base.name, + dpcd_vs_pwm_str(panel->backlight.edp.intel.sdr_uses_aux)); + if (!panel->backlight.edp.intel.sdr_uses_aux) { ret = panel->backlight.pwm_funcs->setup(connector, pipe); if (ret < 0) { drm_err(&i915->drm, - "Failed to setup SDR backlight controls through PWM: %d\n", ret); + "[CONNECTOR:%d:%s] Failed to setup SDR backlight controls through PWM: %d\n", + connector->base.base.id, connector->base.name, ret); return ret; } } @@ -303,8 +318,10 @@ intel_dp_aux_hdr_setup_backlight(struct intel_connector *connector, enum pipe pi panel->backlight.min = 0; } - drm_dbg_kms(&i915->drm, "Using backlight range %d..%d\n", panel->backlight.min, - panel->backlight.max); + drm_dbg_kms(&i915->drm, "[CONNECTOR:%d:%s] Using AUX HDR interface for backlight control (range %d..%d)\n", + connector->base.base.id, connector->base.name, + panel->backlight.min, panel->backlight.max); + panel->backlight.level = intel_dp_aux_hdr_get_backlight(connector, pipe); panel->backlight.enabled = panel->backlight.level != 0; @@ -386,12 +403,19 @@ static int intel_dp_aux_vesa_setup_backlight(struct intel_connector *connector, if (ret < 0) return ret; + drm_dbg_kms(&i915->drm, "[CONNECTOR:%d:%s] AUX VESA backlight enable is controlled through %s\n", + connector->base.base.id, connector->base.name, + dpcd_vs_pwm_str(panel->backlight.edp.vesa.info.aux_enable)); + drm_dbg_kms(&i915->drm, "[CONNECTOR:%d:%s] AUX VESA backlight level is controlled through %s\n", + connector->base.base.id, connector->base.name, + dpcd_vs_pwm_str(panel->backlight.edp.vesa.info.aux_set)); + if (!panel->backlight.edp.vesa.info.aux_set || !panel->backlight.edp.vesa.info.aux_enable) { ret = panel->backlight.pwm_funcs->setup(connector, pipe); if (ret < 0) { drm_err(&i915->drm, - "Failed to setup PWM backlight controls for eDP backlight: %d\n", - ret); + "[CONNECTOR:%d:%s] Failed to setup PWM backlight controls for eDP backlight: %d\n", + connector->base.base.id, connector->base.name, ret); return ret; } } @@ -418,6 +442,9 @@ static int intel_dp_aux_vesa_setup_backlight(struct intel_connector *connector, } } + drm_dbg_kms(&i915->drm, "[CONNECTOR:%d:%s] Using AUX VESA interface for backlight control\n", + connector->base.base.id, connector->base.name); + return 0; } @@ -428,7 +455,8 @@ intel_dp_aux_supports_vesa_backlight(struct intel_connector *connector) struct drm_i915_private *i915 = dp_to_i915(intel_dp); if (drm_edp_backlight_supported(intel_dp->edp_dpcd)) { - drm_dbg_kms(&i915->drm, "AUX Backlight Control Supported!\n"); + drm_dbg_kms(&i915->drm, "[CONNECTOR:%d:%s] AUX Backlight Control Supported!\n", + connector->base.base.id, connector->base.name); return true; } return false; @@ -504,13 +532,15 @@ int intel_dp_aux_init_backlight_funcs(struct intel_connector *connector) * interfaces is to probe for Intel's first, and VESA's second. */ if (try_intel_interface && intel_dp_aux_supports_hdr_backlight(connector)) { - drm_dbg_kms(dev, "Using Intel proprietary eDP backlight controls\n"); + drm_dbg_kms(dev, "[CONNECTOR:%d:%s] Using Intel proprietary eDP backlight controls\n", + connector->base.base.id, connector->base.name); panel->backlight.funcs = &intel_dp_hdr_bl_funcs; return 0; } if (try_vesa_interface && intel_dp_aux_supports_vesa_backlight(connector)) { - drm_dbg_kms(dev, "Using VESA eDP backlight controls\n"); + drm_dbg_kms(dev, "[CONNECTOR:%d:%s] Using VESA eDP backlight controls\n", + connector->base.base.id, connector->base.name); panel->backlight.funcs = &intel_dp_vesa_bl_funcs; return 0; } diff --git a/drivers/gpu/drm/i915/display/intel_dp_mst.c b/drivers/gpu/drm/i915/display/intel_dp_mst.c index 054a009e800d..a860cbc5dbea 100644 --- a/drivers/gpu/drm/i915/display/intel_dp_mst.c +++ b/drivers/gpu/drm/i915/display/intel_dp_mst.c @@ -265,6 +265,19 @@ static int intel_dp_mst_update_slots(struct intel_encoder *encoder, return 0; } +static bool intel_dp_mst_has_audio(const struct drm_connector_state *conn_state) +{ + const struct intel_digital_connector_state *intel_conn_state = + to_intel_digital_connector_state(conn_state); + struct intel_connector *connector = + to_intel_connector(conn_state->connector); + + if (intel_conn_state->force_audio == HDMI_AUDIO_AUTO) + return connector->port->has_audio; + else + return intel_conn_state->force_audio == HDMI_AUDIO_ON; +} + static int intel_dp_mst_compute_config(struct intel_encoder *encoder, struct intel_crtc_state *pipe_config, struct drm_connector_state *conn_state) @@ -272,10 +285,6 @@ static int intel_dp_mst_compute_config(struct intel_encoder *encoder, struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); struct intel_dp_mst_encoder *intel_mst = enc_to_mst(encoder); struct intel_dp *intel_dp = &intel_mst->primary->dp; - struct intel_connector *connector = - to_intel_connector(conn_state->connector); - struct intel_digital_connector_state *intel_conn_state = - to_intel_digital_connector_state(conn_state); const struct drm_display_mode *adjusted_mode = &pipe_config->hw.adjusted_mode; struct link_config_limits limits; @@ -287,11 +296,9 @@ static int intel_dp_mst_compute_config(struct intel_encoder *encoder, pipe_config->output_format = INTEL_OUTPUT_FORMAT_RGB; pipe_config->has_pch_encoder = false; - if (intel_conn_state->force_audio == HDMI_AUDIO_AUTO) - pipe_config->has_audio = connector->port->has_audio; - else - pipe_config->has_audio = - intel_conn_state->force_audio == HDMI_AUDIO_ON; + pipe_config->has_audio = + intel_dp_mst_has_audio(conn_state) && + intel_audio_compute_config(encoder, pipe_config, conn_state); /* * for MST we always configure max link bw - the spec doesn't @@ -604,7 +611,7 @@ static void intel_mst_post_disable_dp(struct intel_atomic_state *state, * no clock to the transcoder" */ if (DISPLAY_VER(dev_priv) < 12 || !last_mst_stream) - intel_ddi_disable_pipe_clock(old_crtc_state); + intel_ddi_disable_transcoder_clock(old_crtc_state); intel_mst->connector = NULL; @@ -684,7 +691,7 @@ static void intel_mst_pre_enable_dp(struct intel_atomic_state *state, * here for the following ones. */ if (DISPLAY_VER(dev_priv) < 12 || !first_mst_stream) - intel_ddi_enable_pipe_clock(encoder, pipe_config); + intel_ddi_enable_transcoder_clock(encoder, pipe_config); intel_ddi_set_dp_msa(pipe_config, conn_state); } diff --git a/drivers/gpu/drm/i915/display/intel_dpio_phy.c b/drivers/gpu/drm/i915/display/intel_dpio_phy.c index 565c06de2432..62b93d097e44 100644 --- a/drivers/gpu/drm/i915/display/intel_dpio_phy.c +++ b/drivers/gpu/drm/i915/display/intel_dpio_phy.c @@ -389,9 +389,7 @@ static void _bxt_ddi_phy_init(struct drm_i915_private *dev_priv, "force reprogramming it\n", phy); } - val = intel_de_read(dev_priv, BXT_P_CR_GT_DISP_PWRON); - val |= phy_info->pwron_mask; - intel_de_write(dev_priv, BXT_P_CR_GT_DISP_PWRON, val); + intel_de_rmw(dev_priv, BXT_P_CR_GT_DISP_PWRON, 0, phy_info->pwron_mask); /* * The PHY registers start out inaccessible and respond to reads with @@ -410,27 +408,19 @@ static void _bxt_ddi_phy_init(struct drm_i915_private *dev_priv, phy); /* Program PLL Rcomp code offset */ - val = intel_de_read(dev_priv, BXT_PORT_CL1CM_DW9(phy)); - val &= ~IREF0RC_OFFSET_MASK; - val |= 0xE4 << IREF0RC_OFFSET_SHIFT; - intel_de_write(dev_priv, BXT_PORT_CL1CM_DW9(phy), val); + intel_de_rmw(dev_priv, BXT_PORT_CL1CM_DW9(phy), IREF0RC_OFFSET_MASK, + 0xE4 << IREF0RC_OFFSET_SHIFT); - val = intel_de_read(dev_priv, BXT_PORT_CL1CM_DW10(phy)); - val &= ~IREF1RC_OFFSET_MASK; - val |= 0xE4 << IREF1RC_OFFSET_SHIFT; - intel_de_write(dev_priv, BXT_PORT_CL1CM_DW10(phy), val); + intel_de_rmw(dev_priv, BXT_PORT_CL1CM_DW10(phy), IREF1RC_OFFSET_MASK, + 0xE4 << IREF1RC_OFFSET_SHIFT); /* Program power gating */ - val = intel_de_read(dev_priv, BXT_PORT_CL1CM_DW28(phy)); - val |= OCL1_POWER_DOWN_EN | DW28_OLDO_DYN_PWR_DOWN_EN | - SUS_CLK_CONFIG; - intel_de_write(dev_priv, BXT_PORT_CL1CM_DW28(phy), val); - - if (phy_info->dual_channel) { - val = intel_de_read(dev_priv, BXT_PORT_CL2CM_DW6(phy)); - val |= DW6_OLDO_DYN_PWR_DOWN_EN; - intel_de_write(dev_priv, BXT_PORT_CL2CM_DW6(phy), val); - } + intel_de_rmw(dev_priv, BXT_PORT_CL1CM_DW28(phy), 0, + OCL1_POWER_DOWN_EN | DW28_OLDO_DYN_PWR_DOWN_EN | SUS_CLK_CONFIG); + + if (phy_info->dual_channel) + intel_de_rmw(dev_priv, BXT_PORT_CL2CM_DW6(phy), 0, + DW6_OLDO_DYN_PWR_DOWN_EN); if (phy_info->rcomp_phy != -1) { u32 grc_code; @@ -449,34 +439,25 @@ static void _bxt_ddi_phy_init(struct drm_i915_private *dev_priv, val << GRC_CODE_SLOW_SHIFT | val; intel_de_write(dev_priv, BXT_PORT_REF_DW6(phy), grc_code); - - val = intel_de_read(dev_priv, BXT_PORT_REF_DW8(phy)); - val |= GRC_DIS | GRC_RDY_OVRD; - intel_de_write(dev_priv, BXT_PORT_REF_DW8(phy), val); + intel_de_rmw(dev_priv, BXT_PORT_REF_DW8(phy), + 0, GRC_DIS | GRC_RDY_OVRD); } if (phy_info->reset_delay) udelay(phy_info->reset_delay); - val = intel_de_read(dev_priv, BXT_PHY_CTL_FAMILY(phy)); - val |= COMMON_RESET_DIS; - intel_de_write(dev_priv, BXT_PHY_CTL_FAMILY(phy), val); + intel_de_rmw(dev_priv, BXT_PHY_CTL_FAMILY(phy), 0, COMMON_RESET_DIS); } void bxt_ddi_phy_uninit(struct drm_i915_private *dev_priv, enum dpio_phy phy) { const struct bxt_ddi_phy_info *phy_info; - u32 val; phy_info = bxt_get_phy_info(dev_priv, phy); - val = intel_de_read(dev_priv, BXT_PHY_CTL_FAMILY(phy)); - val &= ~COMMON_RESET_DIS; - intel_de_write(dev_priv, BXT_PHY_CTL_FAMILY(phy), val); + intel_de_rmw(dev_priv, BXT_PHY_CTL_FAMILY(phy), COMMON_RESET_DIS, 0); - val = intel_de_read(dev_priv, BXT_P_CR_GT_DISP_PWRON); - val &= ~phy_info->pwron_mask; - intel_de_write(dev_priv, BXT_P_CR_GT_DISP_PWRON, val); + intel_de_rmw(dev_priv, BXT_P_CR_GT_DISP_PWRON, phy_info->pwron_mask, 0); } void bxt_ddi_phy_init(struct drm_i915_private *dev_priv, enum dpio_phy phy) diff --git a/drivers/gpu/drm/i915/display/intel_dpll_mgr.c b/drivers/gpu/drm/i915/display/intel_dpll_mgr.c index 380368eff31a..22fc908b7e5d 100644 --- a/drivers/gpu/drm/i915/display/intel_dpll_mgr.c +++ b/drivers/gpu/drm/i915/display/intel_dpll_mgr.c @@ -608,10 +608,8 @@ static void hsw_ddi_wrpll_disable(struct drm_i915_private *dev_priv, struct intel_shared_dpll *pll) { const enum intel_dpll_id id = pll->info->id; - u32 val; - val = intel_de_read(dev_priv, WRPLL_CTL(id)); - intel_de_write(dev_priv, WRPLL_CTL(id), val & ~WRPLL_PLL_ENABLE); + intel_de_rmw(dev_priv, WRPLL_CTL(id), WRPLL_PLL_ENABLE, 0); intel_de_posting_read(dev_priv, WRPLL_CTL(id)); /* @@ -626,10 +624,8 @@ static void hsw_ddi_spll_disable(struct drm_i915_private *dev_priv, struct intel_shared_dpll *pll) { enum intel_dpll_id id = pll->info->id; - u32 val; - val = intel_de_read(dev_priv, SPLL_CTL); - intel_de_write(dev_priv, SPLL_CTL, val & ~SPLL_PLL_ENABLE); + intel_de_rmw(dev_priv, SPLL_CTL, SPLL_PLL_ENABLE, 0); intel_de_posting_read(dev_priv, SPLL_CTL); /* @@ -1238,16 +1234,10 @@ static void skl_ddi_pll_write_ctrl1(struct drm_i915_private *dev_priv, struct intel_shared_dpll *pll) { const enum intel_dpll_id id = pll->info->id; - u32 val; - val = intel_de_read(dev_priv, DPLL_CTRL1); - - val &= ~(DPLL_CTRL1_HDMI_MODE(id) | - DPLL_CTRL1_SSC(id) | - DPLL_CTRL1_LINK_RATE_MASK(id)); - val |= pll->state.hw_state.ctrl1 << (id * 6); - - intel_de_write(dev_priv, DPLL_CTRL1, val); + intel_de_rmw(dev_priv, DPLL_CTRL1, + DPLL_CTRL1_HDMI_MODE(id) | DPLL_CTRL1_SSC(id) | DPLL_CTRL1_LINK_RATE_MASK(id), + pll->state.hw_state.ctrl1 << (id * 6)); intel_de_posting_read(dev_priv, DPLL_CTRL1); } @@ -1265,8 +1255,7 @@ static void skl_ddi_pll_enable(struct drm_i915_private *dev_priv, intel_de_posting_read(dev_priv, regs[id].cfgcr2); /* the enable bit is always bit 31 */ - intel_de_write(dev_priv, regs[id].ctl, - intel_de_read(dev_priv, regs[id].ctl) | LCPLL_PLL_ENABLE); + intel_de_rmw(dev_priv, regs[id].ctl, 0, LCPLL_PLL_ENABLE); if (intel_de_wait_for_set(dev_priv, DPLL_STATUS, DPLL_LOCK(id), 5)) drm_err(&dev_priv->drm, "DPLL %d not locked\n", id); @@ -1285,8 +1274,7 @@ static void skl_ddi_pll_disable(struct drm_i915_private *dev_priv, const enum intel_dpll_id id = pll->info->id; /* the enable bit is always bit 31 */ - intel_de_write(dev_priv, regs[id].ctl, - intel_de_read(dev_priv, regs[id].ctl) & ~LCPLL_PLL_ENABLE); + intel_de_rmw(dev_priv, regs[id].ctl, LCPLL_PLL_ENABLE, 0); intel_de_posting_read(dev_priv, regs[id].ctl); } @@ -1902,14 +1890,11 @@ static void bxt_ddi_pll_enable(struct drm_i915_private *dev_priv, bxt_port_to_phy_channel(dev_priv, port, &phy, &ch); /* Non-SSC reference */ - temp = intel_de_read(dev_priv, BXT_PORT_PLL_ENABLE(port)); - temp |= PORT_PLL_REF_SEL; - intel_de_write(dev_priv, BXT_PORT_PLL_ENABLE(port), temp); + intel_de_rmw(dev_priv, BXT_PORT_PLL_ENABLE(port), 0, PORT_PLL_REF_SEL); if (IS_GEMINILAKE(dev_priv)) { - temp = intel_de_read(dev_priv, BXT_PORT_PLL_ENABLE(port)); - temp |= PORT_PLL_POWER_ENABLE; - intel_de_write(dev_priv, BXT_PORT_PLL_ENABLE(port), temp); + intel_de_rmw(dev_priv, BXT_PORT_PLL_ENABLE(port), + 0, PORT_PLL_POWER_ENABLE); if (wait_for_us((intel_de_read(dev_priv, BXT_PORT_PLL_ENABLE(port)) & PORT_PLL_POWER_STATE), 200)) @@ -1918,39 +1903,28 @@ static void bxt_ddi_pll_enable(struct drm_i915_private *dev_priv, } /* Disable 10 bit clock */ - temp = intel_de_read(dev_priv, BXT_PORT_PLL_EBB_4(phy, ch)); - temp &= ~PORT_PLL_10BIT_CLK_ENABLE; - intel_de_write(dev_priv, BXT_PORT_PLL_EBB_4(phy, ch), temp); + intel_de_rmw(dev_priv, BXT_PORT_PLL_EBB_4(phy, ch), + PORT_PLL_10BIT_CLK_ENABLE, 0); /* Write P1 & P2 */ - temp = intel_de_read(dev_priv, BXT_PORT_PLL_EBB_0(phy, ch)); - temp &= ~(PORT_PLL_P1_MASK | PORT_PLL_P2_MASK); - temp |= pll->state.hw_state.ebb0; - intel_de_write(dev_priv, BXT_PORT_PLL_EBB_0(phy, ch), temp); + intel_de_rmw(dev_priv, BXT_PORT_PLL_EBB_0(phy, ch), + PORT_PLL_P1_MASK | PORT_PLL_P2_MASK, pll->state.hw_state.ebb0); /* Write M2 integer */ - temp = intel_de_read(dev_priv, BXT_PORT_PLL(phy, ch, 0)); - temp &= ~PORT_PLL_M2_INT_MASK; - temp |= pll->state.hw_state.pll0; - intel_de_write(dev_priv, BXT_PORT_PLL(phy, ch, 0), temp); + intel_de_rmw(dev_priv, BXT_PORT_PLL(phy, ch, 0), + PORT_PLL_M2_INT_MASK, pll->state.hw_state.pll0); /* Write N */ - temp = intel_de_read(dev_priv, BXT_PORT_PLL(phy, ch, 1)); - temp &= ~PORT_PLL_N_MASK; - temp |= pll->state.hw_state.pll1; - intel_de_write(dev_priv, BXT_PORT_PLL(phy, ch, 1), temp); + intel_de_rmw(dev_priv, BXT_PORT_PLL(phy, ch, 1), + PORT_PLL_N_MASK, pll->state.hw_state.pll1); /* Write M2 fraction */ - temp = intel_de_read(dev_priv, BXT_PORT_PLL(phy, ch, 2)); - temp &= ~PORT_PLL_M2_FRAC_MASK; - temp |= pll->state.hw_state.pll2; - intel_de_write(dev_priv, BXT_PORT_PLL(phy, ch, 2), temp); + intel_de_rmw(dev_priv, BXT_PORT_PLL(phy, ch, 2), + PORT_PLL_M2_FRAC_MASK, pll->state.hw_state.pll2); /* Write M2 fraction enable */ - temp = intel_de_read(dev_priv, BXT_PORT_PLL(phy, ch, 3)); - temp &= ~PORT_PLL_M2_FRAC_ENABLE; - temp |= pll->state.hw_state.pll3; - intel_de_write(dev_priv, BXT_PORT_PLL(phy, ch, 3), temp); + intel_de_rmw(dev_priv, BXT_PORT_PLL(phy, ch, 3), + PORT_PLL_M2_FRAC_ENABLE, pll->state.hw_state.pll3); /* Write coeff */ temp = intel_de_read(dev_priv, BXT_PORT_PLL(phy, ch, 6)); @@ -1961,15 +1935,11 @@ static void bxt_ddi_pll_enable(struct drm_i915_private *dev_priv, intel_de_write(dev_priv, BXT_PORT_PLL(phy, ch, 6), temp); /* Write calibration val */ - temp = intel_de_read(dev_priv, BXT_PORT_PLL(phy, ch, 8)); - temp &= ~PORT_PLL_TARGET_CNT_MASK; - temp |= pll->state.hw_state.pll8; - intel_de_write(dev_priv, BXT_PORT_PLL(phy, ch, 8), temp); + intel_de_rmw(dev_priv, BXT_PORT_PLL(phy, ch, 8), + PORT_PLL_TARGET_CNT_MASK, pll->state.hw_state.pll8); - temp = intel_de_read(dev_priv, BXT_PORT_PLL(phy, ch, 9)); - temp &= ~PORT_PLL_LOCK_THRESHOLD_MASK; - temp |= pll->state.hw_state.pll9; - intel_de_write(dev_priv, BXT_PORT_PLL(phy, ch, 9), temp); + intel_de_rmw(dev_priv, BXT_PORT_PLL(phy, ch, 9), + PORT_PLL_LOCK_THRESHOLD_MASK, pll->state.hw_state.pll9); temp = intel_de_read(dev_priv, BXT_PORT_PLL(phy, ch, 10)); temp &= ~PORT_PLL_DCO_AMP_OVR_EN_H; @@ -1986,9 +1956,7 @@ static void bxt_ddi_pll_enable(struct drm_i915_private *dev_priv, intel_de_write(dev_priv, BXT_PORT_PLL_EBB_4(phy, ch), temp); /* Enable PLL */ - temp = intel_de_read(dev_priv, BXT_PORT_PLL_ENABLE(port)); - temp |= PORT_PLL_ENABLE; - intel_de_write(dev_priv, BXT_PORT_PLL_ENABLE(port), temp); + intel_de_rmw(dev_priv, BXT_PORT_PLL_ENABLE(port), 0, PORT_PLL_ENABLE); intel_de_posting_read(dev_priv, BXT_PORT_PLL_ENABLE(port)); if (wait_for_us((intel_de_read(dev_priv, BXT_PORT_PLL_ENABLE(port)) & PORT_PLL_LOCK), @@ -2016,17 +1984,13 @@ static void bxt_ddi_pll_disable(struct drm_i915_private *dev_priv, struct intel_shared_dpll *pll) { enum port port = (enum port)pll->info->id; /* 1:1 port->PLL mapping */ - u32 temp; - temp = intel_de_read(dev_priv, BXT_PORT_PLL_ENABLE(port)); - temp &= ~PORT_PLL_ENABLE; - intel_de_write(dev_priv, BXT_PORT_PLL_ENABLE(port), temp); + intel_de_rmw(dev_priv, BXT_PORT_PLL_ENABLE(port), PORT_PLL_ENABLE, 0); intel_de_posting_read(dev_priv, BXT_PORT_PLL_ENABLE(port)); if (IS_GEMINILAKE(dev_priv)) { - temp = intel_de_read(dev_priv, BXT_PORT_PLL_ENABLE(port)); - temp &= ~PORT_PLL_POWER_ENABLE; - intel_de_write(dev_priv, BXT_PORT_PLL_ENABLE(port), temp); + intel_de_rmw(dev_priv, BXT_PORT_PLL_ENABLE(port), + PORT_PLL_POWER_ENABLE, 0); if (wait_for_us(!(intel_de_read(dev_priv, BXT_PORT_PLL_ENABLE(port)) & PORT_PLL_POWER_STATE), 200)) @@ -3641,8 +3605,8 @@ static void icl_dpll_write(struct drm_i915_private *dev_priv, !i915_mmio_reg_valid(div0_reg)); if (dev_priv->display.vbt.override_afc_startup && i915_mmio_reg_valid(div0_reg)) - intel_de_rmw(dev_priv, div0_reg, TGL_DPLL0_DIV0_AFC_STARTUP_MASK, - hw_state->div0); + intel_de_rmw(dev_priv, div0_reg, + TGL_DPLL0_DIV0_AFC_STARTUP_MASK, hw_state->div0); intel_de_posting_read(dev_priv, cfgcr1_reg); } @@ -3651,7 +3615,6 @@ static void icl_mg_pll_write(struct drm_i915_private *dev_priv, { struct intel_dpll_hw_state *hw_state = &pll->state.hw_state; enum tc_port tc_port = icl_pll_id_to_tc_port(pll->info->id); - u32 val; /* * Some of the following registers have reserved fields, so program @@ -3659,23 +3622,19 @@ static void icl_mg_pll_write(struct drm_i915_private *dev_priv, * during the calc/readout phase if the mask depends on some other HW * state like refclk, see icl_calc_mg_pll_state(). */ - val = intel_de_read(dev_priv, MG_REFCLKIN_CTL(tc_port)); - val &= ~MG_REFCLKIN_CTL_OD_2_MUX_MASK; - val |= hw_state->mg_refclkin_ctl; - intel_de_write(dev_priv, MG_REFCLKIN_CTL(tc_port), val); + intel_de_rmw(dev_priv, MG_REFCLKIN_CTL(tc_port), + MG_REFCLKIN_CTL_OD_2_MUX_MASK, hw_state->mg_refclkin_ctl); - val = intel_de_read(dev_priv, MG_CLKTOP2_CORECLKCTL1(tc_port)); - val &= ~MG_CLKTOP2_CORECLKCTL1_A_DIVRATIO_MASK; - val |= hw_state->mg_clktop2_coreclkctl1; - intel_de_write(dev_priv, MG_CLKTOP2_CORECLKCTL1(tc_port), val); + intel_de_rmw(dev_priv, MG_CLKTOP2_CORECLKCTL1(tc_port), + MG_CLKTOP2_CORECLKCTL1_A_DIVRATIO_MASK, + hw_state->mg_clktop2_coreclkctl1); - val = intel_de_read(dev_priv, MG_CLKTOP2_HSCLKCTL(tc_port)); - val &= ~(MG_CLKTOP2_HSCLKCTL_TLINEDRV_CLKSEL_MASK | - MG_CLKTOP2_HSCLKCTL_CORE_INPUTSEL_MASK | - MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_MASK | - MG_CLKTOP2_HSCLKCTL_DSDIV_RATIO_MASK); - val |= hw_state->mg_clktop2_hsclkctl; - intel_de_write(dev_priv, MG_CLKTOP2_HSCLKCTL(tc_port), val); + intel_de_rmw(dev_priv, MG_CLKTOP2_HSCLKCTL(tc_port), + MG_CLKTOP2_HSCLKCTL_TLINEDRV_CLKSEL_MASK | + MG_CLKTOP2_HSCLKCTL_CORE_INPUTSEL_MASK | + MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_MASK | + MG_CLKTOP2_HSCLKCTL_DSDIV_RATIO_MASK, + hw_state->mg_clktop2_hsclkctl); intel_de_write(dev_priv, MG_PLL_DIV0(tc_port), hw_state->mg_pll_div0); intel_de_write(dev_priv, MG_PLL_DIV1(tc_port), hw_state->mg_pll_div1); @@ -3684,15 +3643,12 @@ static void icl_mg_pll_write(struct drm_i915_private *dev_priv, hw_state->mg_pll_frac_lock); intel_de_write(dev_priv, MG_PLL_SSC(tc_port), hw_state->mg_pll_ssc); - val = intel_de_read(dev_priv, MG_PLL_BIAS(tc_port)); - val &= ~hw_state->mg_pll_bias_mask; - val |= hw_state->mg_pll_bias; - intel_de_write(dev_priv, MG_PLL_BIAS(tc_port), val); + intel_de_rmw(dev_priv, MG_PLL_BIAS(tc_port), + hw_state->mg_pll_bias_mask, hw_state->mg_pll_bias); - val = intel_de_read(dev_priv, MG_PLL_TDC_COLDST_BIAS(tc_port)); - val &= ~hw_state->mg_pll_tdc_coldst_bias_mask; - val |= hw_state->mg_pll_tdc_coldst_bias; - intel_de_write(dev_priv, MG_PLL_TDC_COLDST_BIAS(tc_port), val); + intel_de_rmw(dev_priv, MG_PLL_TDC_COLDST_BIAS(tc_port), + hw_state->mg_pll_tdc_coldst_bias_mask, + hw_state->mg_pll_tdc_coldst_bias); intel_de_posting_read(dev_priv, MG_PLL_TDC_COLDST_BIAS(tc_port)); } @@ -3766,11 +3722,7 @@ static void icl_pll_power_enable(struct drm_i915_private *dev_priv, struct intel_shared_dpll *pll, i915_reg_t enable_reg) { - u32 val; - - val = intel_de_read(dev_priv, enable_reg); - val |= PLL_POWER_ENABLE; - intel_de_write(dev_priv, enable_reg, val); + intel_de_rmw(dev_priv, enable_reg, 0, PLL_POWER_ENABLE); /* * The spec says we need to "wait" but it also says it should be @@ -3785,11 +3737,7 @@ static void icl_pll_enable(struct drm_i915_private *dev_priv, struct intel_shared_dpll *pll, i915_reg_t enable_reg) { - u32 val; - - val = intel_de_read(dev_priv, enable_reg); - val |= PLL_ENABLE; - intel_de_write(dev_priv, enable_reg, val); + intel_de_rmw(dev_priv, enable_reg, 0, PLL_ENABLE); /* Timeout is actually 600us. */ if (intel_de_wait_for_set(dev_priv, enable_reg, PLL_LOCK, 1)) @@ -3815,8 +3763,7 @@ static void adlp_cmtg_clock_gating_wa(struct drm_i915_private *i915, struct inte * since TRANS_CMTG_CHICKEN is only accessible while DPLL0 is enabled. */ val = intel_de_read(i915, TRANS_CMTG_CHICKEN); - val = intel_de_read(i915, TRANS_CMTG_CHICKEN); - intel_de_write(i915, TRANS_CMTG_CHICKEN, DISABLE_DPT_CLK_GATING); + val = intel_de_rmw(i915, TRANS_CMTG_CHICKEN, ~0, DISABLE_DPT_CLK_GATING); if (drm_WARN_ON(&i915->drm, val & ~DISABLE_DPT_CLK_GATING)) drm_dbg_kms(&i915->drm, "Unexpected flags in TRANS_CMTG_CHICKEN: %08x\n", val); } @@ -3900,8 +3847,6 @@ static void icl_pll_disable(struct drm_i915_private *dev_priv, struct intel_shared_dpll *pll, i915_reg_t enable_reg) { - u32 val; - /* The first steps are done by intel_ddi_post_disable(). */ /* @@ -3910,9 +3855,7 @@ static void icl_pll_disable(struct drm_i915_private *dev_priv, * nothing here. */ - val = intel_de_read(dev_priv, enable_reg); - val &= ~PLL_ENABLE; - intel_de_write(dev_priv, enable_reg, val); + intel_de_rmw(dev_priv, enable_reg, PLL_ENABLE, 0); /* Timeout is actually 1us. */ if (intel_de_wait_for_clear(dev_priv, enable_reg, PLL_LOCK, 1)) @@ -3920,9 +3863,7 @@ static void icl_pll_disable(struct drm_i915_private *dev_priv, /* DVFS post sequence would be here. See the comment above. */ - val = intel_de_read(dev_priv, enable_reg); - val &= ~PLL_POWER_ENABLE; - intel_de_write(dev_priv, enable_reg, val); + intel_de_rmw(dev_priv, enable_reg, PLL_POWER_ENABLE, 0); /* * The spec says we need to "wait" but it also says it should be diff --git a/drivers/gpu/drm/i915/display/intel_drrs.c b/drivers/gpu/drm/i915/display/intel_drrs.c index 29c6421cd666..760e63cdc0c8 100644 --- a/drivers/gpu/drm/i915/display/intel_drrs.c +++ b/drivers/gpu/drm/i915/display/intel_drrs.c @@ -68,21 +68,15 @@ intel_drrs_set_refresh_rate_pipeconf(struct intel_crtc *crtc, { struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); enum transcoder cpu_transcoder = crtc->drrs.cpu_transcoder; - u32 val, bit; + u32 bit; if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) - bit = PIPECONF_REFRESH_RATE_ALT_VLV; + bit = TRANSCONF_REFRESH_RATE_ALT_VLV; else - bit = PIPECONF_REFRESH_RATE_ALT_ILK; + bit = TRANSCONF_REFRESH_RATE_ALT_ILK; - val = intel_de_read(dev_priv, PIPECONF(cpu_transcoder)); - - if (refresh_rate == DRRS_REFRESH_RATE_LOW) - val |= bit; - else - val &= ~bit; - - intel_de_write(dev_priv, PIPECONF(cpu_transcoder), val); + intel_de_rmw(dev_priv, TRANSCONF(cpu_transcoder), + bit, refresh_rate == DRRS_REFRESH_RATE_LOW ? bit : 0); } static void diff --git a/drivers/gpu/drm/i915/display/intel_dsb.c b/drivers/gpu/drm/i915/display/intel_dsb.c index 96bc117fd6a0..19e422da57dc 100644 --- a/drivers/gpu/drm/i915/display/intel_dsb.c +++ b/drivers/gpu/drm/i915/display/intel_dsb.c @@ -88,7 +88,8 @@ static bool assert_dsb_has_room(struct intel_dsb *dsb) /* each instruction is 2 dwords */ return !drm_WARN(&i915->drm, dsb->free_pos > dsb->size - 2, - "DSB buffer overflow\n"); + "[CRTC:%d:%s] DSB %d buffer overflow\n", + crtc->base.base.id, crtc->base.name, dsb->id); } static bool is_dsb_busy(struct drm_i915_private *i915, enum pipe pipe, @@ -198,7 +199,7 @@ void intel_dsb_reg_write(struct intel_dsb *dsb, } } -static u32 intel_dsb_align_tail(struct intel_dsb *dsb) +static void intel_dsb_align_tail(struct intel_dsb *dsb) { u32 aligned_tail, tail; @@ -210,49 +211,58 @@ static u32 intel_dsb_align_tail(struct intel_dsb *dsb) aligned_tail - tail); dsb->free_pos = aligned_tail / 4; +} - return aligned_tail; +void intel_dsb_finish(struct intel_dsb *dsb) +{ + intel_dsb_align_tail(dsb); } /** * intel_dsb_commit() - Trigger workload execution of DSB. * @dsb: DSB context + * @wait_for_vblank: wait for vblank before executing * * This function is used to do actual write to hardware using DSB. */ -void intel_dsb_commit(struct intel_dsb *dsb) +void intel_dsb_commit(struct intel_dsb *dsb, bool wait_for_vblank) { struct intel_crtc *crtc = dsb->crtc; struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); enum pipe pipe = crtc->pipe; u32 tail; - tail = intel_dsb_align_tail(dsb); - if (tail == 0) + tail = dsb->free_pos * 4; + if (drm_WARN_ON(&dev_priv->drm, !IS_ALIGNED(tail, CACHELINE_BYTES))) return; if (is_dsb_busy(dev_priv, pipe, dsb->id)) { - drm_err(&dev_priv->drm, "DSB engine is busy.\n"); - goto reset; + drm_err(&dev_priv->drm, "[CRTC:%d:%s] DSB %d is busy\n", + crtc->base.base.id, crtc->base.name, dsb->id); + return; } intel_de_write(dev_priv, DSB_CTRL(pipe, dsb->id), + (wait_for_vblank ? DSB_WAIT_FOR_VBLANK : 0) | DSB_ENABLE); intel_de_write(dev_priv, DSB_HEAD(pipe, dsb->id), i915_ggtt_offset(dsb->vma)); intel_de_write(dev_priv, DSB_TAIL(pipe, dsb->id), i915_ggtt_offset(dsb->vma) + tail); +} - drm_dbg_kms(&dev_priv->drm, - "DSB execution started - head 0x%x, tail 0x%x\n", - i915_ggtt_offset(dsb->vma), - i915_ggtt_offset(dsb->vma) + tail); +void intel_dsb_wait(struct intel_dsb *dsb) +{ + struct intel_crtc *crtc = dsb->crtc; + struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); + enum pipe pipe = crtc->pipe; if (wait_for(!is_dsb_busy(dev_priv, pipe, dsb->id), 1)) drm_err(&dev_priv->drm, - "Timed out waiting for DSB workload completion.\n"); + "[CRTC:%d:%s] DSB %d timed out waiting for idle\n", + crtc->base.base.id, crtc->base.name, dsb->id); -reset: + /* Attempt to reset it */ dsb->free_pos = 0; dsb->ins_start_offset = 0; intel_de_write(dev_priv, DSB_CTRL(pipe, dsb->id), 0); @@ -325,7 +335,8 @@ out_put_rpm: kfree(dsb); out: drm_info_once(&i915->drm, - "DSB queue setup failed, will fallback to MMIO for display HW programming\n"); + "[CRTC:%d:%s] DSB %d queue setup failed, will fallback to MMIO for display HW programming\n", + crtc->base.base.id, crtc->base.name, DSB1); return NULL; } diff --git a/drivers/gpu/drm/i915/display/intel_dsb.h b/drivers/gpu/drm/i915/display/intel_dsb.h index 05c221b6d0a4..b8148b47022d 100644 --- a/drivers/gpu/drm/i915/display/intel_dsb.h +++ b/drivers/gpu/drm/i915/display/intel_dsb.h @@ -15,9 +15,12 @@ struct intel_dsb; struct intel_dsb *intel_dsb_prepare(struct intel_crtc *crtc, unsigned int max_cmds); +void intel_dsb_finish(struct intel_dsb *dsb); void intel_dsb_cleanup(struct intel_dsb *dsb); void intel_dsb_reg_write(struct intel_dsb *dsb, i915_reg_t reg, u32 val); -void intel_dsb_commit(struct intel_dsb *dsb); +void intel_dsb_commit(struct intel_dsb *dsb, + bool wait_for_vblank); +void intel_dsb_wait(struct intel_dsb *dsb); #endif diff --git a/drivers/gpu/drm/i915/display/intel_dsi_dcs_backlight.c b/drivers/gpu/drm/i915/display/intel_dsi_dcs_backlight.c index 20e466d843ce..049443245310 100644 --- a/drivers/gpu/drm/i915/display/intel_dsi_dcs_backlight.c +++ b/drivers/gpu/drm/i915/display/intel_dsi_dcs_backlight.c @@ -162,6 +162,7 @@ static void dcs_enable_backlight(const struct intel_crtc_state *crtc_state, static int dcs_setup_backlight(struct intel_connector *connector, enum pipe unused) { + struct drm_i915_private *i915 = to_i915(connector->base.dev); struct intel_panel *panel = &connector->panel; if (panel->vbt.backlight.brightness_precision_bits > 8) @@ -171,6 +172,10 @@ static int dcs_setup_backlight(struct intel_connector *connector, panel->backlight.level = panel->backlight.max; + drm_dbg_kms(&i915->drm, + "[CONNECTOR:%d:%s] Using DCS for backlight control\n", + connector->base.base.id, connector->base.name); + return 0; } diff --git a/drivers/gpu/drm/i915/display/intel_dvo.c b/drivers/gpu/drm/i915/display/intel_dvo.c index 0be8105cb18a..eb2dcd866cc8 100644 --- a/drivers/gpu/drm/i915/display/intel_dvo.c +++ b/drivers/gpu/drm/i915/display/intel_dvo.c @@ -444,11 +444,8 @@ static bool intel_dvo_init_dev(struct drm_i915_private *dev_priv, * the clock enabled before we attempt to initialize * the device. */ - for_each_pipe(dev_priv, pipe) { - dpll[pipe] = intel_de_read(dev_priv, DPLL(pipe)); - intel_de_write(dev_priv, DPLL(pipe), - dpll[pipe] | DPLL_DVO_2X_MODE); - } + for_each_pipe(dev_priv, pipe) + dpll[pipe] = intel_de_rmw(dev_priv, DPLL(pipe), 0, DPLL_DVO_2X_MODE); ret = dvo->dev_ops->init(&intel_dvo->dev, i2c); diff --git a/drivers/gpu/drm/i915/display/intel_fb.c b/drivers/gpu/drm/i915/display/intel_fb.c index 93d0e46e5481..799bdc81a6a9 100644 --- a/drivers/gpu/drm/i915/display/intel_fb.c +++ b/drivers/gpu/drm/i915/display/intel_fb.c @@ -2007,6 +2007,7 @@ int intel_framebuffer_init(struct intel_framebuffer *intel_fb, vm = intel_dpt_create(intel_fb); if (IS_ERR(vm)) { + drm_dbg_kms(&dev_priv->drm, "failed to create DPT\n"); ret = PTR_ERR(vm); goto err; } @@ -2017,11 +2018,14 @@ int intel_framebuffer_init(struct intel_framebuffer *intel_fb, ret = drm_framebuffer_init(&dev_priv->drm, fb, &intel_fb_funcs); if (ret) { drm_err(&dev_priv->drm, "framebuffer init failed %d\n", ret); - goto err; + goto err_free_dpt; } return 0; +err_free_dpt: + if (intel_fb_uses_dpt(fb)) + intel_dpt_destroy(intel_fb->dpt_vm); err: intel_frontbuffer_put(intel_fb->frontbuffer); return ret; @@ -2046,6 +2050,7 @@ intel_user_framebuffer_create(struct drm_device *dev, if (HAS_LMEM(i915) && !i915_gem_object_can_migrate(obj, INTEL_REGION_LMEM_0)) { /* object is "remote", not in local memory */ i915_gem_object_put(obj); + drm_dbg_kms(&i915->drm, "framebuffer must reside in local memory\n"); return ERR_PTR(-EREMOTE); } diff --git a/drivers/gpu/drm/i915/display/intel_fbdev.c b/drivers/gpu/drm/i915/display/intel_fbdev.c index f76b06293eb9..3659350061a7 100644 --- a/drivers/gpu/drm/i915/display/intel_fbdev.c +++ b/drivers/gpu/drm/i915/display/intel_fbdev.c @@ -561,9 +561,9 @@ static void intel_fbdev_initial_config(void *data, async_cookie_t cookie) intel_fbdev_unregister(to_i915(ifbdev->helper.dev)); } -void intel_fbdev_initial_config_async(struct drm_device *dev) +void intel_fbdev_initial_config_async(struct drm_i915_private *dev_priv) { - struct intel_fbdev *ifbdev = to_i915(dev)->display.fbdev.fbdev; + struct intel_fbdev *ifbdev = dev_priv->display.fbdev.fbdev; if (!ifbdev) return; @@ -706,9 +706,9 @@ void intel_fbdev_output_poll_changed(struct drm_device *dev) drm_fb_helper_hotplug_event(&ifbdev->helper); } -void intel_fbdev_restore_mode(struct drm_device *dev) +void intel_fbdev_restore_mode(struct drm_i915_private *dev_priv) { - struct intel_fbdev *ifbdev = to_i915(dev)->display.fbdev.fbdev; + struct intel_fbdev *ifbdev = dev_priv->display.fbdev.fbdev; if (!ifbdev) return; diff --git a/drivers/gpu/drm/i915/display/intel_fbdev.h b/drivers/gpu/drm/i915/display/intel_fbdev.h index 0e95e9472fa3..04fd523a5023 100644 --- a/drivers/gpu/drm/i915/display/intel_fbdev.h +++ b/drivers/gpu/drm/i915/display/intel_fbdev.h @@ -15,12 +15,12 @@ struct intel_framebuffer; #ifdef CONFIG_DRM_FBDEV_EMULATION int intel_fbdev_init(struct drm_device *dev); -void intel_fbdev_initial_config_async(struct drm_device *dev); +void intel_fbdev_initial_config_async(struct drm_i915_private *dev_priv); void intel_fbdev_unregister(struct drm_i915_private *dev_priv); void intel_fbdev_fini(struct drm_i915_private *dev_priv); void intel_fbdev_set_suspend(struct drm_device *dev, int state, bool synchronous); void intel_fbdev_output_poll_changed(struct drm_device *dev); -void intel_fbdev_restore_mode(struct drm_device *dev); +void intel_fbdev_restore_mode(struct drm_i915_private *dev_priv); struct intel_framebuffer *intel_fbdev_framebuffer(struct intel_fbdev *fbdev); #else static inline int intel_fbdev_init(struct drm_device *dev) @@ -28,7 +28,7 @@ static inline int intel_fbdev_init(struct drm_device *dev) return 0; } -static inline void intel_fbdev_initial_config_async(struct drm_device *dev) +static inline void intel_fbdev_initial_config_async(struct drm_i915_private *dev_priv) { } @@ -48,7 +48,7 @@ static inline void intel_fbdev_output_poll_changed(struct drm_device *dev) { } -static inline void intel_fbdev_restore_mode(struct drm_device *dev) +static inline void intel_fbdev_restore_mode(struct drm_i915_private *i915) { } static inline struct intel_framebuffer *intel_fbdev_framebuffer(struct intel_fbdev *fbdev) diff --git a/drivers/gpu/drm/i915/display/intel_fdi.c b/drivers/gpu/drm/i915/display/intel_fdi.c index 063f1da4f229..f55b4893c00f 100644 --- a/drivers/gpu/drm/i915/display/intel_fdi.c +++ b/drivers/gpu/drm/i915/display/intel_fdi.c @@ -366,8 +366,7 @@ void intel_fdi_normal_train(struct intel_crtc *crtc) /* IVB wants error correction enabled */ if (IS_IVYBRIDGE(dev_priv)) - intel_de_write(dev_priv, reg, - intel_de_read(dev_priv, reg) | FDI_FS_ERRC_ENABLE | FDI_FE_ERRC_ENABLE); + intel_de_rmw(dev_priv, reg, 0, FDI_FS_ERRC_ENABLE | FDI_FE_ERRC_ENABLE); } /* The FDI link training functions for ILK/Ibexpeak. */ @@ -439,19 +438,11 @@ static void ilk_fdi_link_train(struct intel_crtc *crtc, drm_err(&dev_priv->drm, "FDI train 1 fail!\n"); /* Train 2 */ - reg = FDI_TX_CTL(pipe); - temp = intel_de_read(dev_priv, reg); - temp &= ~FDI_LINK_TRAIN_NONE; - temp |= FDI_LINK_TRAIN_PATTERN_2; - intel_de_write(dev_priv, reg, temp); - - reg = FDI_RX_CTL(pipe); - temp = intel_de_read(dev_priv, reg); - temp &= ~FDI_LINK_TRAIN_NONE; - temp |= FDI_LINK_TRAIN_PATTERN_2; - intel_de_write(dev_priv, reg, temp); - - intel_de_posting_read(dev_priv, reg); + intel_de_rmw(dev_priv, FDI_TX_CTL(pipe), + FDI_LINK_TRAIN_NONE, FDI_LINK_TRAIN_PATTERN_2); + intel_de_rmw(dev_priv, FDI_RX_CTL(pipe), + FDI_LINK_TRAIN_NONE, FDI_LINK_TRAIN_PATTERN_2); + intel_de_posting_read(dev_priv, FDI_RX_CTL(pipe)); udelay(150); reg = FDI_RX_IIR(pipe); @@ -538,13 +529,9 @@ static void gen6_fdi_link_train(struct intel_crtc *crtc, udelay(150); for (i = 0; i < 4; i++) { - reg = FDI_TX_CTL(pipe); - temp = intel_de_read(dev_priv, reg); - temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK; - temp |= snb_b_fdi_train_param[i]; - intel_de_write(dev_priv, reg, temp); - - intel_de_posting_read(dev_priv, reg); + intel_de_rmw(dev_priv, FDI_TX_CTL(pipe), + FDI_LINK_TRAIN_VOL_EMP_MASK, snb_b_fdi_train_param[i]); + intel_de_posting_read(dev_priv, FDI_TX_CTL(pipe)); udelay(500); for (retry = 0; retry < 5; retry++) { @@ -593,13 +580,9 @@ static void gen6_fdi_link_train(struct intel_crtc *crtc, udelay(150); for (i = 0; i < 4; i++) { - reg = FDI_TX_CTL(pipe); - temp = intel_de_read(dev_priv, reg); - temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK; - temp |= snb_b_fdi_train_param[i]; - intel_de_write(dev_priv, reg, temp); - - intel_de_posting_read(dev_priv, reg); + intel_de_rmw(dev_priv, FDI_TX_CTL(pipe), + FDI_LINK_TRAIN_VOL_EMP_MASK, snb_b_fdi_train_param[i]); + intel_de_posting_read(dev_priv, FDI_TX_CTL(pipe)); udelay(500); for (retry = 0; retry < 5; retry++) { @@ -719,19 +702,13 @@ static void ivb_manual_fdi_link_train(struct intel_crtc *crtc, } /* Train 2 */ - reg = FDI_TX_CTL(pipe); - temp = intel_de_read(dev_priv, reg); - temp &= ~FDI_LINK_TRAIN_NONE_IVB; - temp |= FDI_LINK_TRAIN_PATTERN_2_IVB; - intel_de_write(dev_priv, reg, temp); - - reg = FDI_RX_CTL(pipe); - temp = intel_de_read(dev_priv, reg); - temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT; - temp |= FDI_LINK_TRAIN_PATTERN_2_CPT; - intel_de_write(dev_priv, reg, temp); - - intel_de_posting_read(dev_priv, reg); + intel_de_rmw(dev_priv, FDI_TX_CTL(pipe), + FDI_LINK_TRAIN_NONE_IVB, + FDI_LINK_TRAIN_PATTERN_2_IVB); + intel_de_rmw(dev_priv, FDI_RX_CTL(pipe), + FDI_LINK_TRAIN_PATTERN_MASK_CPT, + FDI_LINK_TRAIN_PATTERN_2_CPT); + intel_de_posting_read(dev_priv, FDI_RX_CTL(pipe)); udelay(2); /* should be 1.5us */ for (i = 0; i < 4; i++) { @@ -837,9 +814,8 @@ void hsw_fdi_link_train(struct intel_encoder *encoder, udelay(30); /* Unset FDI_RX_MISC pwrdn lanes */ - temp = intel_de_read(dev_priv, FDI_RX_MISC(PIPE_A)); - temp &= ~(FDI_RX_PWRDN_LANE1_MASK | FDI_RX_PWRDN_LANE0_MASK); - intel_de_write(dev_priv, FDI_RX_MISC(PIPE_A), temp); + intel_de_rmw(dev_priv, FDI_RX_MISC(PIPE_A), + FDI_RX_PWRDN_LANE1_MASK | FDI_RX_PWRDN_LANE0_MASK, 0); intel_de_posting_read(dev_priv, FDI_RX_MISC(PIPE_A)); /* Wait for FDI auto training time */ @@ -865,25 +841,21 @@ void hsw_fdi_link_train(struct intel_encoder *encoder, intel_de_write(dev_priv, FDI_RX_CTL(PIPE_A), rx_ctl_val); intel_de_posting_read(dev_priv, FDI_RX_CTL(PIPE_A)); - temp = intel_de_read(dev_priv, DDI_BUF_CTL(PORT_E)); - temp &= ~DDI_BUF_CTL_ENABLE; - intel_de_write(dev_priv, DDI_BUF_CTL(PORT_E), temp); + intel_de_rmw(dev_priv, DDI_BUF_CTL(PORT_E), DDI_BUF_CTL_ENABLE, 0); intel_de_posting_read(dev_priv, DDI_BUF_CTL(PORT_E)); /* Disable DP_TP_CTL and FDI_RX_CTL and retry */ - temp = intel_de_read(dev_priv, DP_TP_CTL(PORT_E)); - temp &= ~(DP_TP_CTL_ENABLE | DP_TP_CTL_LINK_TRAIN_MASK); - temp |= DP_TP_CTL_LINK_TRAIN_PAT1; - intel_de_write(dev_priv, DP_TP_CTL(PORT_E), temp); + intel_de_rmw(dev_priv, DP_TP_CTL(PORT_E), + DP_TP_CTL_ENABLE | DP_TP_CTL_LINK_TRAIN_MASK, + DP_TP_CTL_LINK_TRAIN_PAT1); intel_de_posting_read(dev_priv, DP_TP_CTL(PORT_E)); intel_wait_ddi_buf_idle(dev_priv, PORT_E); /* Reset FDI_RX_MISC pwrdn lanes */ - temp = intel_de_read(dev_priv, FDI_RX_MISC(PIPE_A)); - temp &= ~(FDI_RX_PWRDN_LANE1_MASK | FDI_RX_PWRDN_LANE0_MASK); - temp |= FDI_RX_PWRDN_LANE1_VAL(2) | FDI_RX_PWRDN_LANE0_VAL(2); - intel_de_write(dev_priv, FDI_RX_MISC(PIPE_A), temp); + intel_de_rmw(dev_priv, FDI_RX_MISC(PIPE_A), + FDI_RX_PWRDN_LANE1_MASK | FDI_RX_PWRDN_LANE0_MASK, + FDI_RX_PWRDN_LANE1_VAL(2) | FDI_RX_PWRDN_LANE0_VAL(2)); intel_de_posting_read(dev_priv, FDI_RX_MISC(PIPE_A)); } @@ -898,7 +870,6 @@ void hsw_fdi_link_train(struct intel_encoder *encoder, void hsw_fdi_disable(struct intel_encoder *encoder) { struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); - u32 val; /* * Bspec lists this as both step 13 (before DDI_BUF_CTL disable) @@ -906,30 +877,15 @@ void hsw_fdi_disable(struct intel_encoder *encoder) * step 13 is the correct place for it. Step 18 is where it was * originally before the BUN. */ - val = intel_de_read(dev_priv, FDI_RX_CTL(PIPE_A)); - val &= ~FDI_RX_ENABLE; - intel_de_write(dev_priv, FDI_RX_CTL(PIPE_A), val); - - val = intel_de_read(dev_priv, DDI_BUF_CTL(PORT_E)); - val &= ~DDI_BUF_CTL_ENABLE; - intel_de_write(dev_priv, DDI_BUF_CTL(PORT_E), val); - + intel_de_rmw(dev_priv, FDI_RX_CTL(PIPE_A), FDI_RX_ENABLE, 0); + intel_de_rmw(dev_priv, DDI_BUF_CTL(PORT_E), DDI_BUF_CTL_ENABLE, 0); intel_wait_ddi_buf_idle(dev_priv, PORT_E); - intel_ddi_disable_clock(encoder); - - val = intel_de_read(dev_priv, FDI_RX_MISC(PIPE_A)); - val &= ~(FDI_RX_PWRDN_LANE1_MASK | FDI_RX_PWRDN_LANE0_MASK); - val |= FDI_RX_PWRDN_LANE1_VAL(2) | FDI_RX_PWRDN_LANE0_VAL(2); - intel_de_write(dev_priv, FDI_RX_MISC(PIPE_A), val); - - val = intel_de_read(dev_priv, FDI_RX_CTL(PIPE_A)); - val &= ~FDI_PCDCLK; - intel_de_write(dev_priv, FDI_RX_CTL(PIPE_A), val); - - val = intel_de_read(dev_priv, FDI_RX_CTL(PIPE_A)); - val &= ~FDI_RX_PLL_ENABLE; - intel_de_write(dev_priv, FDI_RX_CTL(PIPE_A), val); + intel_de_rmw(dev_priv, FDI_RX_MISC(PIPE_A), + FDI_RX_PWRDN_LANE1_MASK | FDI_RX_PWRDN_LANE0_MASK, + FDI_RX_PWRDN_LANE1_VAL(2) | FDI_RX_PWRDN_LANE0_VAL(2)); + intel_de_rmw(dev_priv, FDI_RX_CTL(PIPE_A), FDI_PCDCLK, 0); + intel_de_rmw(dev_priv, FDI_RX_CTL(PIPE_A), FDI_RX_PLL_ENABLE, 0); } void ilk_fdi_pll_enable(const struct intel_crtc_state *crtc_state) @@ -945,16 +901,14 @@ void ilk_fdi_pll_enable(const struct intel_crtc_state *crtc_state) temp = intel_de_read(dev_priv, reg); temp &= ~(FDI_DP_PORT_WIDTH_MASK | (0x7 << 16)); temp |= FDI_DP_PORT_WIDTH(crtc_state->fdi_lanes); - temp |= (intel_de_read(dev_priv, PIPECONF(pipe)) & PIPECONF_BPC_MASK) << 11; + temp |= (intel_de_read(dev_priv, TRANSCONF(pipe)) & TRANSCONF_BPC_MASK) << 11; intel_de_write(dev_priv, reg, temp | FDI_RX_PLL_ENABLE); intel_de_posting_read(dev_priv, reg); udelay(200); /* Switch from Rawclk to PCDclk */ - temp = intel_de_read(dev_priv, reg); - intel_de_write(dev_priv, reg, temp | FDI_PCDCLK); - + intel_de_rmw(dev_priv, reg, 0, FDI_PCDCLK); intel_de_posting_read(dev_priv, reg); udelay(200); @@ -974,28 +928,18 @@ void ilk_fdi_pll_disable(struct intel_crtc *crtc) struct drm_device *dev = crtc->base.dev; struct drm_i915_private *dev_priv = to_i915(dev); enum pipe pipe = crtc->pipe; - i915_reg_t reg; - u32 temp; /* Switch from PCDclk to Rawclk */ - reg = FDI_RX_CTL(pipe); - temp = intel_de_read(dev_priv, reg); - intel_de_write(dev_priv, reg, temp & ~FDI_PCDCLK); + intel_de_rmw(dev_priv, FDI_RX_CTL(pipe), FDI_PCDCLK, 0); /* Disable CPU FDI TX PLL */ - reg = FDI_TX_CTL(pipe); - temp = intel_de_read(dev_priv, reg); - intel_de_write(dev_priv, reg, temp & ~FDI_TX_PLL_ENABLE); - - intel_de_posting_read(dev_priv, reg); + intel_de_rmw(dev_priv, FDI_TX_CTL(pipe), FDI_TX_PLL_ENABLE, 0); + intel_de_posting_read(dev_priv, FDI_TX_CTL(pipe)); udelay(100); - reg = FDI_RX_CTL(pipe); - temp = intel_de_read(dev_priv, reg); - intel_de_write(dev_priv, reg, temp & ~FDI_RX_PLL_ENABLE); - /* Wait for the clocks to turn off. */ - intel_de_posting_read(dev_priv, reg); + intel_de_rmw(dev_priv, FDI_RX_CTL(pipe), FDI_RX_PLL_ENABLE, 0); + intel_de_posting_read(dev_priv, FDI_RX_CTL(pipe)); udelay(100); } @@ -1007,15 +951,13 @@ void ilk_fdi_disable(struct intel_crtc *crtc) u32 temp; /* disable CPU FDI tx and PCH FDI rx */ - reg = FDI_TX_CTL(pipe); - temp = intel_de_read(dev_priv, reg); - intel_de_write(dev_priv, reg, temp & ~FDI_TX_ENABLE); - intel_de_posting_read(dev_priv, reg); + intel_de_rmw(dev_priv, FDI_TX_CTL(pipe), FDI_TX_ENABLE, 0); + intel_de_posting_read(dev_priv, FDI_TX_CTL(pipe)); reg = FDI_RX_CTL(pipe); temp = intel_de_read(dev_priv, reg); temp &= ~(0x7 << 16); - temp |= (intel_de_read(dev_priv, PIPECONF(pipe)) & PIPECONF_BPC_MASK) << 11; + temp |= (intel_de_read(dev_priv, TRANSCONF(pipe)) & TRANSCONF_BPC_MASK) << 11; intel_de_write(dev_priv, reg, temp & ~FDI_RX_ENABLE); intel_de_posting_read(dev_priv, reg); @@ -1027,11 +969,8 @@ void ilk_fdi_disable(struct intel_crtc *crtc) FDI_RX_PHASE_SYNC_POINTER_OVR); /* still set train pattern 1 */ - reg = FDI_TX_CTL(pipe); - temp = intel_de_read(dev_priv, reg); - temp &= ~FDI_LINK_TRAIN_NONE; - temp |= FDI_LINK_TRAIN_PATTERN_1; - intel_de_write(dev_priv, reg, temp); + intel_de_rmw(dev_priv, FDI_TX_CTL(pipe), + FDI_LINK_TRAIN_NONE, FDI_LINK_TRAIN_PATTERN_1); reg = FDI_RX_CTL(pipe); temp = intel_de_read(dev_priv, reg); @@ -1042,9 +981,9 @@ void ilk_fdi_disable(struct intel_crtc *crtc) temp &= ~FDI_LINK_TRAIN_NONE; temp |= FDI_LINK_TRAIN_PATTERN_1; } - /* BPC in FDI rx is consistent with that in PIPECONF */ + /* BPC in FDI rx is consistent with that in TRANSCONF */ temp &= ~(0x07 << 16); - temp |= (intel_de_read(dev_priv, PIPECONF(pipe)) & PIPECONF_BPC_MASK) << 11; + temp |= (intel_de_read(dev_priv, TRANSCONF(pipe)) & TRANSCONF_BPC_MASK) << 11; intel_de_write(dev_priv, reg, temp); intel_de_posting_read(dev_priv, reg); diff --git a/drivers/gpu/drm/i915/display/intel_fifo_underrun.c b/drivers/gpu/drm/i915/display/intel_fifo_underrun.c index d636d21fa9ce..b708a62e509a 100644 --- a/drivers/gpu/drm/i915/display/intel_fifo_underrun.c +++ b/drivers/gpu/drm/i915/display/intel_fifo_underrun.c @@ -31,6 +31,7 @@ #include "intel_display_types.h" #include "intel_fbc.h" #include "intel_fifo_underrun.h" +#include "intel_pch_display.h" /** * DOC: fifo underrun handling @@ -509,3 +510,22 @@ void intel_check_pch_fifo_underruns(struct drm_i915_private *dev_priv) spin_unlock_irq(&dev_priv->irq_lock); } + +void intel_init_fifo_underrun_reporting(struct drm_i915_private *i915, + struct intel_crtc *crtc, + bool enable) +{ + crtc->cpu_fifo_underrun_disabled = !enable; + + /* + * We track the PCH trancoder underrun reporting state + * within the crtc. With crtc for pipe A housing the underrun + * reporting state for PCH transcoder A, crtc for pipe B housing + * it for PCH transcoder B, etc. LPT-H has only PCH transcoder A, + * and marking underrun reporting as disabled for the non-existing + * PCH transcoders B and C would prevent enabling the south + * error interrupt (see cpt_can_enable_serr_int()). + */ + if (intel_has_pch_trancoder(i915, crtc->pipe)) + crtc->pch_fifo_underrun_disabled = !enable; +} diff --git a/drivers/gpu/drm/i915/display/intel_fifo_underrun.h b/drivers/gpu/drm/i915/display/intel_fifo_underrun.h index 2e47d7d3c101..b00d8abebcf9 100644 --- a/drivers/gpu/drm/i915/display/intel_fifo_underrun.h +++ b/drivers/gpu/drm/i915/display/intel_fifo_underrun.h @@ -9,8 +9,11 @@ #include <linux/types.h> struct drm_i915_private; +struct intel_crtc; enum pipe; +void intel_init_fifo_underrun_reporting(struct drm_i915_private *i915, + struct intel_crtc *crtc, bool enable); bool intel_set_cpu_fifo_underrun_reporting(struct drm_i915_private *dev_priv, enum pipe pipe, bool enable); bool intel_set_pch_fifo_underrun_reporting(struct drm_i915_private *dev_priv, diff --git a/drivers/gpu/drm/i915/display/intel_gmbus.c b/drivers/gpu/drm/i915/display/intel_gmbus.c index 0bc4f6b48e80..3ddfc8080ee8 100644 --- a/drivers/gpu/drm/i915/display/intel_gmbus.c +++ b/drivers/gpu/drm/i915/display/intel_gmbus.c @@ -215,41 +215,23 @@ intel_gmbus_reset(struct drm_i915_private *i915) static void pnv_gmbus_clock_gating(struct drm_i915_private *i915, bool enable) { - u32 val; - /* When using bit bashing for I2C, this bit needs to be set to 1 */ - val = intel_de_read(i915, DSPCLK_GATE_D(i915)); - if (!enable) - val |= PNV_GMBUSUNIT_CLOCK_GATE_DISABLE; - else - val &= ~PNV_GMBUSUNIT_CLOCK_GATE_DISABLE; - intel_de_write(i915, DSPCLK_GATE_D(i915), val); + intel_de_rmw(i915, DSPCLK_GATE_D(i915), PNV_GMBUSUNIT_CLOCK_GATE_DISABLE, + !enable ? PNV_GMBUSUNIT_CLOCK_GATE_DISABLE : 0); } static void pch_gmbus_clock_gating(struct drm_i915_private *i915, bool enable) { - u32 val; - - val = intel_de_read(i915, SOUTH_DSPCLK_GATE_D); - if (!enable) - val |= PCH_GMBUSUNIT_CLOCK_GATE_DISABLE; - else - val &= ~PCH_GMBUSUNIT_CLOCK_GATE_DISABLE; - intel_de_write(i915, SOUTH_DSPCLK_GATE_D, val); + intel_de_rmw(i915, SOUTH_DSPCLK_GATE_D, PCH_GMBUSUNIT_CLOCK_GATE_DISABLE, + !enable ? PCH_GMBUSUNIT_CLOCK_GATE_DISABLE : 0); } static void bxt_gmbus_clock_gating(struct drm_i915_private *i915, bool enable) { - u32 val; - - val = intel_de_read(i915, GEN9_CLKGATE_DIS_4); - if (!enable) - val |= BXT_GMBUS_GATING_DIS; - else - val &= ~BXT_GMBUS_GATING_DIS; - intel_de_write(i915, GEN9_CLKGATE_DIS_4, val); + intel_de_rmw(i915, GEN9_CLKGATE_DIS_4, BXT_GMBUS_GATING_DIS, + !enable ? BXT_GMBUS_GATING_DIS : 0); } static u32 get_reserved(struct intel_gmbus *bus) diff --git a/drivers/gpu/drm/i915/display/intel_hdcp.c b/drivers/gpu/drm/i915/display/intel_hdcp.c index 6406fd487ee5..2984d2810e42 100644 --- a/drivers/gpu/drm/i915/display/intel_hdcp.c +++ b/drivers/gpu/drm/i915/display/intel_hdcp.c @@ -943,8 +943,7 @@ static int _intel_hdcp_disable(struct intel_connector *connector) repeater_ctl = intel_hdcp_get_repeater_ctl(dev_priv, cpu_transcoder, port); - intel_de_write(dev_priv, HDCP_REP_CTL, - intel_de_read(dev_priv, HDCP_REP_CTL) & ~repeater_ctl); + intel_de_rmw(dev_priv, HDCP_REP_CTL, repeater_ctl, 0); ret = hdcp->shim->toggle_signalling(dig_port, cpu_transcoder, false); if (ret) { @@ -1819,12 +1818,10 @@ static int hdcp2_enable_encryption(struct intel_connector *connector) } if (intel_de_read(dev_priv, HDCP2_STATUS(dev_priv, cpu_transcoder, port)) & - LINK_AUTH_STATUS) { + LINK_AUTH_STATUS) /* Link is Authenticated. Now set for Encryption */ - intel_de_write(dev_priv, - HDCP2_CTL(dev_priv, cpu_transcoder, port), - intel_de_read(dev_priv, HDCP2_CTL(dev_priv, cpu_transcoder, port)) | CTL_LINK_ENCRYPTION_REQ); - } + intel_de_rmw(dev_priv, HDCP2_CTL(dev_priv, cpu_transcoder, port), + 0, CTL_LINK_ENCRYPTION_REQ); ret = intel_de_wait_for_set(dev_priv, HDCP2_STATUS(dev_priv, cpu_transcoder, @@ -1848,8 +1845,8 @@ static int hdcp2_disable_encryption(struct intel_connector *connector) drm_WARN_ON(&dev_priv->drm, !(intel_de_read(dev_priv, HDCP2_STATUS(dev_priv, cpu_transcoder, port)) & LINK_ENCRYPTION_STATUS)); - intel_de_write(dev_priv, HDCP2_CTL(dev_priv, cpu_transcoder, port), - intel_de_read(dev_priv, HDCP2_CTL(dev_priv, cpu_transcoder, port)) & ~CTL_LINK_ENCRYPTION_REQ); + intel_de_rmw(dev_priv, HDCP2_CTL(dev_priv, cpu_transcoder, port), + CTL_LINK_ENCRYPTION_REQ, 0); ret = intel_de_wait_for_clear(dev_priv, HDCP2_STATUS(dev_priv, cpu_transcoder, diff --git a/drivers/gpu/drm/i915/display/intel_hdmi.c b/drivers/gpu/drm/i915/display/intel_hdmi.c index c0ce6d3dc505..c7e9e1fbed37 100644 --- a/drivers/gpu/drm/i915/display/intel_hdmi.c +++ b/drivers/gpu/drm/i915/display/intel_hdmi.c @@ -238,15 +238,11 @@ static void g4x_read_infoframe(struct intel_encoder *encoder, void *frame, ssize_t len) { struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); - u32 val, *data = frame; + u32 *data = frame; int i; - val = intel_de_read(dev_priv, VIDEO_DIP_CTL); - - val &= ~(VIDEO_DIP_SELECT_MASK | 0xf); /* clear DIP data offset */ - val |= g4x_infoframe_index(type); - - intel_de_write(dev_priv, VIDEO_DIP_CTL, val); + intel_de_rmw(dev_priv, VIDEO_DIP_CTL, + VIDEO_DIP_SELECT_MASK | 0xf, g4x_infoframe_index(type)); for (i = 0; i < len; i += 4) *data++ = intel_de_read(dev_priv, VIDEO_DIP_DATA); @@ -314,15 +310,11 @@ static void ibx_read_infoframe(struct intel_encoder *encoder, { struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); - u32 val, *data = frame; + u32 *data = frame; int i; - val = intel_de_read(dev_priv, TVIDEO_DIP_CTL(crtc->pipe)); - - val &= ~(VIDEO_DIP_SELECT_MASK | 0xf); /* clear DIP data offset */ - val |= g4x_infoframe_index(type); - - intel_de_write(dev_priv, TVIDEO_DIP_CTL(crtc->pipe), val); + intel_de_rmw(dev_priv, TVIDEO_DIP_CTL(crtc->pipe), + VIDEO_DIP_SELECT_MASK | 0xf, g4x_infoframe_index(type)); for (i = 0; i < len; i += 4) *data++ = intel_de_read(dev_priv, TVIDEO_DIP_DATA(crtc->pipe)); @@ -396,15 +388,11 @@ static void cpt_read_infoframe(struct intel_encoder *encoder, { struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); - u32 val, *data = frame; + u32 *data = frame; int i; - val = intel_de_read(dev_priv, TVIDEO_DIP_CTL(crtc->pipe)); - - val &= ~(VIDEO_DIP_SELECT_MASK | 0xf); /* clear DIP data offset */ - val |= g4x_infoframe_index(type); - - intel_de_write(dev_priv, TVIDEO_DIP_CTL(crtc->pipe), val); + intel_de_rmw(dev_priv, TVIDEO_DIP_CTL(crtc->pipe), + VIDEO_DIP_SELECT_MASK | 0xf, g4x_infoframe_index(type)); for (i = 0; i < len; i += 4) *data++ = intel_de_read(dev_priv, TVIDEO_DIP_DATA(crtc->pipe)); @@ -472,15 +460,11 @@ static void vlv_read_infoframe(struct intel_encoder *encoder, { struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); - u32 val, *data = frame; + u32 *data = frame; int i; - val = intel_de_read(dev_priv, VLV_TVIDEO_DIP_CTL(crtc->pipe)); - - val &= ~(VIDEO_DIP_SELECT_MASK | 0xf); /* clear DIP data offset */ - val |= g4x_infoframe_index(type); - - intel_de_write(dev_priv, VLV_TVIDEO_DIP_CTL(crtc->pipe), val); + intel_de_rmw(dev_priv, VLV_TVIDEO_DIP_CTL(crtc->pipe), + VIDEO_DIP_SELECT_MASK | 0xf, g4x_infoframe_index(type)); for (i = 0; i < len; i += 4) *data++ = intel_de_read(dev_priv, @@ -1795,7 +1779,7 @@ static int intel_hdmi_source_max_tmds_clock(struct intel_encoder *encoder) else max_tmds_clock = 165000; - vbt_max_tmds_clock = intel_bios_max_tmds_clock(encoder); + vbt_max_tmds_clock = intel_bios_hdmi_max_tmds_clock(encoder->devdata); if (vbt_max_tmds_clock) max_tmds_clock = min(max_tmds_clock, vbt_max_tmds_clock); @@ -2152,7 +2136,7 @@ bool intel_hdmi_limited_color_range(const struct intel_crtc_state *crtc_state, * Our YCbCr output is always limited range. * crtc_state->limited_color_range only applies to RGB, * and it must never be set for YCbCr or we risk setting - * some conflicting bits in PIPECONF which will mess up + * some conflicting bits in TRANSCONF which will mess up * the colors on the monitor. */ if (crtc_state->output_format != INTEL_OUTPUT_FORMAT_RGB) @@ -2240,6 +2224,25 @@ static bool intel_hdmi_is_cloned(const struct intel_crtc_state *crtc_state) !is_power_of_2(crtc_state->uapi.encoder_mask); } +static bool source_supports_scrambling(struct intel_encoder *encoder) +{ + /* + * Gen 10+ support HDMI 2.0 : the max tmds clock is 594MHz, and + * scrambling is supported. + * But there seem to be cases where certain platforms that support + * HDMI 2.0, have an HDMI1.4 retimer chip, and the max tmds clock is + * capped by VBT to less than 340MHz. + * + * In such cases when an HDMI2.0 sink is connected, it creates a + * problem : the platform and the sink both support scrambling but the + * HDMI 1.4 retimer chip doesn't. + * + * So go for scrambling, based on the max tmds clock taking into account, + * restrictions coming from VBT. + */ + return intel_hdmi_source_max_tmds_clock(encoder) > 340000; +} + int intel_hdmi_compute_config(struct intel_encoder *encoder, struct intel_crtc_state *pipe_config, struct drm_connector_state *conn_state) @@ -2302,7 +2305,7 @@ int intel_hdmi_compute_config(struct intel_encoder *encoder, pipe_config->lane_count = 4; - if (scdc->scrambling.supported && DISPLAY_VER(dev_priv) >= 10) { + if (scdc->scrambling.supported && source_supports_scrambling(encoder)) { if (scdc->scrambling.low_rates) pipe_config->hdmi_scrambling = true; @@ -2852,11 +2855,12 @@ static u8 intel_hdmi_ddc_pin(struct intel_encoder *encoder) enum port port = encoder->port; u8 ddc_pin; - ddc_pin = intel_bios_alternate_ddc_pin(encoder); + ddc_pin = intel_bios_hdmi_ddc_pin(encoder->devdata); if (ddc_pin) { drm_dbg_kms(&dev_priv->drm, - "Using DDC pin 0x%x for port %c (VBT)\n", - ddc_pin, port_name(port)); + "[ENCODER:%d:%s] Using DDC pin 0x%x (VBT)\n", + encoder->base.base.id, encoder->base.name, + ddc_pin); return ddc_pin; } @@ -2882,8 +2886,9 @@ static u8 intel_hdmi_ddc_pin(struct intel_encoder *encoder) ddc_pin = g4x_port_to_ddc_pin(dev_priv, port); drm_dbg_kms(&dev_priv->drm, - "Using DDC pin 0x%x for port %c (platform default)\n", - ddc_pin, port_name(port)); + "[ENCODER:%d:%s] Using DDC pin 0x%x (platform default)\n", + encoder->base.base.id, encoder->base.name, + ddc_pin); return ddc_pin; } @@ -2904,7 +2909,7 @@ void intel_infoframe_init(struct intel_digital_port *dig_port) dig_port->set_infoframes = g4x_set_infoframes; dig_port->infoframes_enabled = g4x_infoframes_enabled; } else if (HAS_DDI(dev_priv)) { - if (intel_bios_is_lspcon_present(dev_priv, dig_port->base.port)) { + if (intel_bios_encoder_is_lspcon(dig_port->base.devdata)) { dig_port->write_infoframe = lspcon_write_infoframe; dig_port->read_infoframe = lspcon_read_infoframe; dig_port->set_infoframes = lspcon_set_infoframes; diff --git a/drivers/gpu/drm/i915/display/intel_lpe_audio.c b/drivers/gpu/drm/i915/display/intel_lpe_audio.c index 8aaaef4d7856..5863763de530 100644 --- a/drivers/gpu/drm/i915/display/intel_lpe_audio.c +++ b/drivers/gpu/drm/i915/display/intel_lpe_audio.c @@ -315,7 +315,7 @@ void intel_lpe_audio_teardown(struct drm_i915_private *dev_priv) * intel_lpe_audio_notify() - notify lpe audio event * audio driver and i915 * @dev_priv: the i915 drm device private data - * @pipe: pipe + * @cpu_transcoder: CPU transcoder * @port: port * @eld : ELD data * @ls_clock: Link symbol clock in kHz @@ -324,7 +324,7 @@ void intel_lpe_audio_teardown(struct drm_i915_private *dev_priv) * Notify lpe audio driver of eld change. */ void intel_lpe_audio_notify(struct drm_i915_private *dev_priv, - enum pipe pipe, enum port port, + enum transcoder cpu_transcoder, enum port port, const void *eld, int ls_clock, bool dp_output) { unsigned long irqflags; @@ -344,7 +344,7 @@ void intel_lpe_audio_notify(struct drm_i915_private *dev_priv, if (eld != NULL) { memcpy(ppdata->eld, eld, HDMI_MAX_ELD_BYTES); - ppdata->pipe = pipe; + ppdata->pipe = cpu_transcoder; ppdata->ls_clock = ls_clock; ppdata->dp_output = dp_output; diff --git a/drivers/gpu/drm/i915/display/intel_lpe_audio.h b/drivers/gpu/drm/i915/display/intel_lpe_audio.h index f848c5038714..0beecac267ae 100644 --- a/drivers/gpu/drm/i915/display/intel_lpe_audio.h +++ b/drivers/gpu/drm/i915/display/intel_lpe_audio.h @@ -8,15 +8,15 @@ #include <linux/types.h> -enum pipe; enum port; +enum transcoder; struct drm_i915_private; int intel_lpe_audio_init(struct drm_i915_private *dev_priv); void intel_lpe_audio_teardown(struct drm_i915_private *dev_priv); void intel_lpe_audio_irq_handler(struct drm_i915_private *dev_priv); void intel_lpe_audio_notify(struct drm_i915_private *dev_priv, - enum pipe pipe, enum port port, + enum transcoder cpu_transcoder, enum port port, const void *eld, int ls_clock, bool dp_output); #endif /* __INTEL_LPE_AUDIO_H__ */ diff --git a/drivers/gpu/drm/i915/display/intel_lspcon.c b/drivers/gpu/drm/i915/display/intel_lspcon.c index 9ff1c0b223ad..bb3b5355a0d9 100644 --- a/drivers/gpu/drm/i915/display/intel_lspcon.c +++ b/drivers/gpu/drm/i915/display/intel_lspcon.c @@ -689,7 +689,7 @@ void lspcon_resume(struct intel_digital_port *dig_port) struct drm_i915_private *i915 = to_i915(dev); enum drm_lspcon_mode expected_mode; - if (!intel_bios_is_lspcon_present(i915, dig_port->base.port)) + if (!intel_bios_encoder_is_lspcon(dig_port->base.devdata)) return; if (!lspcon->active) { diff --git a/drivers/gpu/drm/i915/display/intel_lvds.c b/drivers/gpu/drm/i915/display/intel_lvds.c index a1557d84ce0a..a504b3a7fbd5 100644 --- a/drivers/gpu/drm/i915/display/intel_lvds.c +++ b/drivers/gpu/drm/i915/display/intel_lvds.c @@ -49,6 +49,7 @@ #include "intel_fdi.h" #include "intel_gmbus.h" #include "intel_lvds.h" +#include "intel_lvds_regs.h" #include "intel_panel.h" /* Private structure for the integrated LVDS support */ @@ -84,18 +85,18 @@ static struct intel_lvds_encoder *to_lvds_encoder(struct intel_encoder *encoder) return container_of(encoder, struct intel_lvds_encoder, base); } -bool intel_lvds_port_enabled(struct drm_i915_private *dev_priv, +bool intel_lvds_port_enabled(struct drm_i915_private *i915, i915_reg_t lvds_reg, enum pipe *pipe) { u32 val; - val = intel_de_read(dev_priv, lvds_reg); + val = intel_de_read(i915, lvds_reg); /* asserts want to know the pipe even if the port is disabled */ - if (HAS_PCH_CPT(dev_priv)) - *pipe = (val & LVDS_PIPE_SEL_MASK_CPT) >> LVDS_PIPE_SEL_SHIFT_CPT; + if (HAS_PCH_CPT(i915)) + *pipe = REG_FIELD_GET(LVDS_PIPE_SEL_MASK_CPT, val); else - *pipe = (val & LVDS_PIPE_SEL_MASK) >> LVDS_PIPE_SEL_SHIFT; + *pipe = REG_FIELD_GET(LVDS_PIPE_SEL_MASK, val); return val & LVDS_PORT_EN; } @@ -103,31 +104,30 @@ bool intel_lvds_port_enabled(struct drm_i915_private *dev_priv, static bool intel_lvds_get_hw_state(struct intel_encoder *encoder, enum pipe *pipe) { - struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); + struct drm_i915_private *i915 = to_i915(encoder->base.dev); struct intel_lvds_encoder *lvds_encoder = to_lvds_encoder(encoder); intel_wakeref_t wakeref; bool ret; - wakeref = intel_display_power_get_if_enabled(dev_priv, - encoder->power_domain); + wakeref = intel_display_power_get_if_enabled(i915, encoder->power_domain); if (!wakeref) return false; - ret = intel_lvds_port_enabled(dev_priv, lvds_encoder->reg, pipe); + ret = intel_lvds_port_enabled(i915, lvds_encoder->reg, pipe); - intel_display_power_put(dev_priv, encoder->power_domain, wakeref); + intel_display_power_put(i915, encoder->power_domain, wakeref); return ret; } static void intel_lvds_get_config(struct intel_encoder *encoder, - struct intel_crtc_state *pipe_config) + struct intel_crtc_state *crtc_state) { struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); struct intel_lvds_encoder *lvds_encoder = to_lvds_encoder(encoder); u32 tmp, flags = 0; - pipe_config->output_types |= BIT(INTEL_OUTPUT_LVDS); + crtc_state->output_types |= BIT(INTEL_OUTPUT_LVDS); tmp = intel_de_read(dev_priv, lvds_encoder->reg); if (tmp & LVDS_HSYNC_POLARITY) @@ -139,20 +139,20 @@ static void intel_lvds_get_config(struct intel_encoder *encoder, else flags |= DRM_MODE_FLAG_PVSYNC; - pipe_config->hw.adjusted_mode.flags |= flags; + crtc_state->hw.adjusted_mode.flags |= flags; if (DISPLAY_VER(dev_priv) < 5) - pipe_config->gmch_pfit.lvds_border_bits = + crtc_state->gmch_pfit.lvds_border_bits = tmp & LVDS_BORDER_ENABLE; /* gen2/3 store dither state in pfit control, needs to match */ if (DISPLAY_VER(dev_priv) < 4) { tmp = intel_de_read(dev_priv, PFIT_CONTROL); - pipe_config->gmch_pfit.control |= tmp & PANEL_8TO6_DITHER_ENABLE; + crtc_state->gmch_pfit.control |= tmp & PANEL_8TO6_DITHER_ENABLE; } - pipe_config->hw.adjusted_mode.crtc_clock = pipe_config->port_clock; + crtc_state->hw.adjusted_mode.crtc_clock = crtc_state->port_clock; } static void intel_lvds_pps_get_hw_state(struct drm_i915_private *dev_priv, @@ -216,41 +216,44 @@ static void intel_lvds_pps_init_hw(struct drm_i915_private *dev_priv, intel_de_write(dev_priv, PP_CONTROL(0), val); intel_de_write(dev_priv, PP_ON_DELAYS(0), - REG_FIELD_PREP(PANEL_PORT_SELECT_MASK, pps->port) | REG_FIELD_PREP(PANEL_POWER_UP_DELAY_MASK, pps->t1_t2) | REG_FIELD_PREP(PANEL_LIGHT_ON_DELAY_MASK, pps->t5)); + REG_FIELD_PREP(PANEL_PORT_SELECT_MASK, pps->port) | + REG_FIELD_PREP(PANEL_POWER_UP_DELAY_MASK, pps->t1_t2) | + REG_FIELD_PREP(PANEL_LIGHT_ON_DELAY_MASK, pps->t5)); intel_de_write(dev_priv, PP_OFF_DELAYS(0), - REG_FIELD_PREP(PANEL_POWER_DOWN_DELAY_MASK, pps->t3) | REG_FIELD_PREP(PANEL_LIGHT_OFF_DELAY_MASK, pps->tx)); + REG_FIELD_PREP(PANEL_POWER_DOWN_DELAY_MASK, pps->t3) | + REG_FIELD_PREP(PANEL_LIGHT_OFF_DELAY_MASK, pps->tx)); intel_de_write(dev_priv, PP_DIVISOR(0), - REG_FIELD_PREP(PP_REFERENCE_DIVIDER_MASK, pps->divider) | REG_FIELD_PREP(PANEL_POWER_CYCLE_DELAY_MASK, DIV_ROUND_UP(pps->t4, 1000) + 1)); + REG_FIELD_PREP(PP_REFERENCE_DIVIDER_MASK, pps->divider) | + REG_FIELD_PREP(PANEL_POWER_CYCLE_DELAY_MASK, DIV_ROUND_UP(pps->t4, 1000) + 1)); } static void intel_pre_enable_lvds(struct intel_atomic_state *state, struct intel_encoder *encoder, - const struct intel_crtc_state *pipe_config, + const struct intel_crtc_state *crtc_state, const struct drm_connector_state *conn_state) { struct intel_lvds_encoder *lvds_encoder = to_lvds_encoder(encoder); - struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); - struct intel_crtc *crtc = to_intel_crtc(pipe_config->uapi.crtc); - const struct drm_display_mode *adjusted_mode = &pipe_config->hw.adjusted_mode; + struct drm_i915_private *i915 = to_i915(encoder->base.dev); + struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); + const struct drm_display_mode *adjusted_mode = &crtc_state->hw.adjusted_mode; enum pipe pipe = crtc->pipe; u32 temp; - if (HAS_PCH_SPLIT(dev_priv)) { - assert_fdi_rx_pll_disabled(dev_priv, pipe); - assert_shared_dpll_disabled(dev_priv, - pipe_config->shared_dpll); + if (HAS_PCH_SPLIT(i915)) { + assert_fdi_rx_pll_disabled(i915, pipe); + assert_shared_dpll_disabled(i915, crtc_state->shared_dpll); } else { - assert_pll_disabled(dev_priv, pipe); + assert_pll_disabled(i915, pipe); } - intel_lvds_pps_init_hw(dev_priv, &lvds_encoder->init_pps); + intel_lvds_pps_init_hw(i915, &lvds_encoder->init_pps); temp = lvds_encoder->init_lvds_val; temp |= LVDS_PORT_EN | LVDS_A0A2_CLKA_POWER_UP; - if (HAS_PCH_CPT(dev_priv)) { + if (HAS_PCH_CPT(i915)) { temp &= ~LVDS_PIPE_SEL_MASK_CPT; temp |= LVDS_PIPE_SEL_CPT(pipe); } else { @@ -260,7 +263,7 @@ static void intel_pre_enable_lvds(struct intel_atomic_state *state, /* set the corresponsding LVDS_BORDER bit */ temp &= ~LVDS_BORDER_ENABLE; - temp |= pipe_config->gmch_pfit.lvds_border_bits; + temp |= crtc_state->gmch_pfit.lvds_border_bits; /* * Set the B0-B3 data pairs corresponding to whether we're going to @@ -283,14 +286,14 @@ static void intel_pre_enable_lvds(struct intel_atomic_state *state, /* * Set the dithering flag on LVDS as needed, note that there is no * special lvds dither control bit on pch-split platforms, dithering is - * only controlled through the PIPECONF reg. + * only controlled through the TRANSCONF reg. */ - if (DISPLAY_VER(dev_priv) == 4) { + if (DISPLAY_VER(i915) == 4) { /* * Bspec wording suggests that LVDS port dithering only exists * for 18bpp panels. */ - if (pipe_config->dither && pipe_config->pipe_bpp == 18) + if (crtc_state->dither && crtc_state->pipe_bpp == 18) temp |= LVDS_ENABLE_DITHER; else temp &= ~LVDS_ENABLE_DITHER; @@ -301,7 +304,7 @@ static void intel_pre_enable_lvds(struct intel_atomic_state *state, if (adjusted_mode->flags & DRM_MODE_FLAG_NVSYNC) temp |= LVDS_VSYNC_POLARITY; - intel_de_write(dev_priv, lvds_encoder->reg, temp); + intel_de_write(i915, lvds_encoder->reg, temp); } /* @@ -309,25 +312,22 @@ static void intel_pre_enable_lvds(struct intel_atomic_state *state, */ static void intel_enable_lvds(struct intel_atomic_state *state, struct intel_encoder *encoder, - const struct intel_crtc_state *pipe_config, + const struct intel_crtc_state *crtc_state, const struct drm_connector_state *conn_state) { - struct drm_device *dev = encoder->base.dev; struct intel_lvds_encoder *lvds_encoder = to_lvds_encoder(encoder); - struct drm_i915_private *dev_priv = to_i915(dev); + struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); - intel_de_write(dev_priv, lvds_encoder->reg, - intel_de_read(dev_priv, lvds_encoder->reg) | LVDS_PORT_EN); + intel_de_rmw(dev_priv, lvds_encoder->reg, 0, LVDS_PORT_EN); - intel_de_write(dev_priv, PP_CONTROL(0), - intel_de_read(dev_priv, PP_CONTROL(0)) | PANEL_POWER_ON); + intel_de_rmw(dev_priv, PP_CONTROL(0), 0, PANEL_POWER_ON); intel_de_posting_read(dev_priv, lvds_encoder->reg); if (intel_de_wait_for_set(dev_priv, PP_STATUS(0), PP_ON, 5000)) drm_err(&dev_priv->drm, "timed out waiting for panel to power on\n"); - intel_backlight_enable(pipe_config, conn_state); + intel_backlight_enable(crtc_state, conn_state); } static void intel_disable_lvds(struct intel_atomic_state *state, @@ -338,14 +338,12 @@ static void intel_disable_lvds(struct intel_atomic_state *state, struct intel_lvds_encoder *lvds_encoder = to_lvds_encoder(encoder); struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); - intel_de_write(dev_priv, PP_CONTROL(0), - intel_de_read(dev_priv, PP_CONTROL(0)) & ~PANEL_POWER_ON); + intel_de_rmw(dev_priv, PP_CONTROL(0), PANEL_POWER_ON, 0); if (intel_de_wait_for_clear(dev_priv, PP_STATUS(0), PP_ON, 1000)) drm_err(&dev_priv->drm, "timed out waiting for panel to power off\n"); - intel_de_write(dev_priv, lvds_encoder->reg, - intel_de_read(dev_priv, lvds_encoder->reg) & ~LVDS_PORT_EN); + intel_de_rmw(dev_priv, lvds_encoder->reg, LVDS_PORT_EN, 0); intel_de_posting_read(dev_priv, lvds_encoder->reg); } @@ -386,19 +384,19 @@ static void intel_lvds_shutdown(struct intel_encoder *encoder) } static enum drm_mode_status -intel_lvds_mode_valid(struct drm_connector *connector, +intel_lvds_mode_valid(struct drm_connector *_connector, struct drm_display_mode *mode) { - struct intel_connector *intel_connector = to_intel_connector(connector); + struct intel_connector *connector = to_intel_connector(_connector); const struct drm_display_mode *fixed_mode = - intel_panel_fixed_mode(intel_connector, mode); - int max_pixclk = to_i915(connector->dev)->max_dotclk_freq; + intel_panel_fixed_mode(connector, mode); + int max_pixclk = to_i915(connector->base.dev)->max_dotclk_freq; enum drm_mode_status status; if (mode->flags & DRM_MODE_FLAG_DBLSCAN) return MODE_NO_DBLESCAN; - status = intel_panel_mode_valid(intel_connector, mode); + status = intel_panel_mode_valid(connector, mode); if (status != MODE_OK) return status; @@ -408,23 +406,21 @@ intel_lvds_mode_valid(struct drm_connector *connector, return MODE_OK; } -static int intel_lvds_compute_config(struct intel_encoder *intel_encoder, - struct intel_crtc_state *pipe_config, +static int intel_lvds_compute_config(struct intel_encoder *encoder, + struct intel_crtc_state *crtc_state, struct drm_connector_state *conn_state) { - struct drm_i915_private *dev_priv = to_i915(intel_encoder->base.dev); - struct intel_lvds_encoder *lvds_encoder = - to_lvds_encoder(intel_encoder); - struct intel_connector *intel_connector = - lvds_encoder->attached_connector; - struct drm_display_mode *adjusted_mode = &pipe_config->hw.adjusted_mode; - struct intel_crtc *crtc = to_intel_crtc(pipe_config->uapi.crtc); + struct drm_i915_private *i915 = to_i915(encoder->base.dev); + struct intel_lvds_encoder *lvds_encoder = to_lvds_encoder(encoder); + struct intel_connector *connector = lvds_encoder->attached_connector; + struct drm_display_mode *adjusted_mode = &crtc_state->hw.adjusted_mode; + struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); unsigned int lvds_bpp; int ret; /* Should never happen!! */ - if (DISPLAY_VER(dev_priv) < 4 && crtc->pipe == 0) { - drm_err(&dev_priv->drm, "Can't support LVDS on pipe A\n"); + if (DISPLAY_VER(i915) < 4 && crtc->pipe == 0) { + drm_err(&i915->drm, "Can't support LVDS on pipe A\n"); return -EINVAL; } @@ -433,14 +429,14 @@ static int intel_lvds_compute_config(struct intel_encoder *intel_encoder, else lvds_bpp = 6*3; - if (lvds_bpp != pipe_config->pipe_bpp && !pipe_config->bw_constrained) { - drm_dbg_kms(&dev_priv->drm, + if (lvds_bpp != crtc_state->pipe_bpp && !crtc_state->bw_constrained) { + drm_dbg_kms(&i915->drm, "forcing display bpp (was %d) to LVDS (%d)\n", - pipe_config->pipe_bpp, lvds_bpp); - pipe_config->pipe_bpp = lvds_bpp; + crtc_state->pipe_bpp, lvds_bpp); + crtc_state->pipe_bpp = lvds_bpp; } - pipe_config->output_format = INTEL_OUTPUT_FORMAT_RGB; + crtc_state->output_format = INTEL_OUTPUT_FORMAT_RGB; /* * We have timings from the BIOS for the panel, put them in @@ -448,17 +444,17 @@ static int intel_lvds_compute_config(struct intel_encoder *intel_encoder, * with the panel scaling set up to source from the H/VDisplay * of the original mode. */ - ret = intel_panel_compute_config(intel_connector, adjusted_mode); + ret = intel_panel_compute_config(connector, adjusted_mode); if (ret) return ret; if (adjusted_mode->flags & DRM_MODE_FLAG_DBLSCAN) return -EINVAL; - if (HAS_PCH_SPLIT(dev_priv)) - pipe_config->has_pch_encoder = true; + if (HAS_PCH_SPLIT(i915)) + crtc_state->has_pch_encoder = true; - ret = intel_panel_fitting(pipe_config, conn_state); + ret = intel_panel_fitting(crtc_state, conn_state); if (ret) return ret; @@ -474,19 +470,19 @@ static int intel_lvds_compute_config(struct intel_encoder *intel_encoder, /* * Return the list of DDC modes if available, or the BIOS fixed mode otherwise. */ -static int intel_lvds_get_modes(struct drm_connector *connector) +static int intel_lvds_get_modes(struct drm_connector *_connector) { - struct intel_connector *intel_connector = to_intel_connector(connector); - const struct drm_edid *fixed_edid = intel_connector->panel.fixed_edid; + struct intel_connector *connector = to_intel_connector(_connector); + const struct drm_edid *fixed_edid = connector->panel.fixed_edid; /* Use panel fixed edid if we have one */ if (!IS_ERR_OR_NULL(fixed_edid)) { - drm_edid_connector_update(connector, fixed_edid); + drm_edid_connector_update(&connector->base, fixed_edid); - return drm_edid_connector_add_modes(connector); + return drm_edid_connector_add_modes(&connector->base); } - return intel_panel_get_modes(intel_connector); + return intel_panel_get_modes(connector); } static const struct drm_connector_helper_funcs intel_lvds_connector_helper_funcs = { @@ -585,12 +581,12 @@ static const struct dmi_system_id intel_no_lvds[] = { }, { .callback = intel_no_lvds_dmi_callback, - .ident = "AOpen i45GMx-I", - .matches = { - DMI_MATCH(DMI_BOARD_VENDOR, "AOpen"), - DMI_MATCH(DMI_BOARD_NAME, "i45GMx-I"), - }, - }, + .ident = "AOpen i45GMx-I", + .matches = { + DMI_MATCH(DMI_BOARD_VENDOR, "AOpen"), + DMI_MATCH(DMI_BOARD_NAME, "i45GMx-I"), + }, + }, { .callback = intel_no_lvds_dmi_callback, .ident = "Aopen i945GTt-VFA", @@ -607,14 +603,14 @@ static const struct dmi_system_id intel_no_lvds[] = { }, }, { - .callback = intel_no_lvds_dmi_callback, - .ident = "Clientron E830", - .matches = { - DMI_MATCH(DMI_SYS_VENDOR, "Clientron"), - DMI_MATCH(DMI_PRODUCT_NAME, "E830"), - }, - }, - { + .callback = intel_no_lvds_dmi_callback, + .ident = "Clientron E830", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "Clientron"), + DMI_MATCH(DMI_PRODUCT_NAME, "E830"), + }, + }, + { .callback = intel_no_lvds_dmi_callback, .ident = "Asus EeeBox PC EB1007", .matches = { @@ -764,11 +760,11 @@ static const struct dmi_system_id intel_dual_link_lvds[] = { { } /* terminating entry */ }; -struct intel_encoder *intel_get_lvds_encoder(struct drm_i915_private *dev_priv) +struct intel_encoder *intel_get_lvds_encoder(struct drm_i915_private *i915) { struct intel_encoder *encoder; - for_each_intel_encoder(&dev_priv->drm, encoder) { + for_each_intel_encoder(&i915->drm, encoder) { if (encoder->type == INTEL_OUTPUT_LVDS) return encoder; } @@ -776,24 +772,24 @@ struct intel_encoder *intel_get_lvds_encoder(struct drm_i915_private *dev_priv) return NULL; } -bool intel_is_dual_link_lvds(struct drm_i915_private *dev_priv) +bool intel_is_dual_link_lvds(struct drm_i915_private *i915) { - struct intel_encoder *encoder = intel_get_lvds_encoder(dev_priv); + struct intel_encoder *encoder = intel_get_lvds_encoder(i915); return encoder && to_lvds_encoder(encoder)->is_dual_link; } static bool compute_is_dual_link_lvds(struct intel_lvds_encoder *lvds_encoder) { - struct drm_i915_private *dev_priv = to_i915(lvds_encoder->base.base.dev); + struct drm_i915_private *i915 = to_i915(lvds_encoder->base.base.dev); struct intel_connector *connector = lvds_encoder->attached_connector; const struct drm_display_mode *fixed_mode = intel_panel_preferred_fixed_mode(connector); unsigned int val; /* use the module option value if specified */ - if (dev_priv->params.lvds_channel_mode > 0) - return dev_priv->params.lvds_channel_mode == 2; + if (i915->params.lvds_channel_mode > 0) + return i915->params.lvds_channel_mode == 2; /* single channel LVDS is limited to 112 MHz */ if (fixed_mode->clock > 112999) @@ -808,8 +804,8 @@ static bool compute_is_dual_link_lvds(struct intel_lvds_encoder *lvds_encoder) * we need to check "the value to be set" in VBT when LVDS * register is uninitialized. */ - val = intel_de_read(dev_priv, lvds_encoder->reg); - if (HAS_PCH_CPT(dev_priv)) + val = intel_de_read(i915, lvds_encoder->reg); + if (HAS_PCH_CPT(i915)) val &= ~(LVDS_DETECTED | LVDS_PIPE_SEL_MASK_CPT); else val &= ~(LVDS_DETECTED | LVDS_PIPE_SEL_MASK); @@ -826,56 +822,54 @@ static void intel_lvds_add_properties(struct drm_connector *connector) /** * intel_lvds_init - setup LVDS connectors on this device - * @dev_priv: i915 device + * @i915: i915 device * * Create the connector, register the LVDS DDC bus, and try to figure out what * modes we can display on the LVDS panel (if present). */ -void intel_lvds_init(struct drm_i915_private *dev_priv) +void intel_lvds_init(struct drm_i915_private *i915) { struct intel_lvds_encoder *lvds_encoder; - struct intel_encoder *intel_encoder; - struct intel_connector *intel_connector; - struct drm_connector *connector; - struct drm_encoder *encoder; + struct intel_connector *connector; const struct drm_edid *drm_edid; + struct intel_encoder *encoder; i915_reg_t lvds_reg; u32 lvds; u8 pin; /* Skip init on machines we know falsely report LVDS */ if (dmi_check_system(intel_no_lvds)) { - drm_WARN(&dev_priv->drm, !dev_priv->display.vbt.int_lvds_support, + drm_WARN(&i915->drm, !i915->display.vbt.int_lvds_support, "Useless DMI match. Internal LVDS support disabled by VBT\n"); return; } - if (!dev_priv->display.vbt.int_lvds_support) { - drm_dbg_kms(&dev_priv->drm, + if (!i915->display.vbt.int_lvds_support) { + drm_dbg_kms(&i915->drm, "Internal LVDS support disabled by VBT\n"); return; } - if (HAS_PCH_SPLIT(dev_priv)) + if (HAS_PCH_SPLIT(i915)) lvds_reg = PCH_LVDS; else lvds_reg = LVDS; - lvds = intel_de_read(dev_priv, lvds_reg); + lvds = intel_de_read(i915, lvds_reg); - if (HAS_PCH_SPLIT(dev_priv)) { + if (HAS_PCH_SPLIT(i915)) { if ((lvds & LVDS_DETECTED) == 0) return; } pin = GMBUS_PIN_PANEL; - if (!intel_bios_is_lvds_present(dev_priv, &pin)) { + if (!intel_bios_is_lvds_present(i915, &pin)) { if ((lvds & LVDS_PORT_EN) == 0) { - drm_dbg_kms(&dev_priv->drm, + drm_dbg_kms(&i915->drm, "LVDS is not present in VBT\n"); return; } - drm_dbg_kms(&dev_priv->drm, + drm_dbg_kms(&i915->drm, "LVDS is not present in VBT, but enabled anyway\n"); } @@ -883,57 +877,55 @@ void intel_lvds_init(struct drm_i915_private *dev_priv) if (!lvds_encoder) return; - intel_connector = intel_connector_alloc(); - if (!intel_connector) { + connector = intel_connector_alloc(); + if (!connector) { kfree(lvds_encoder); return; } - lvds_encoder->attached_connector = intel_connector; + lvds_encoder->attached_connector = connector; + encoder = &lvds_encoder->base; - intel_encoder = &lvds_encoder->base; - encoder = &intel_encoder->base; - connector = &intel_connector->base; - drm_connector_init(&dev_priv->drm, &intel_connector->base, &intel_lvds_connector_funcs, + drm_connector_init(&i915->drm, &connector->base, &intel_lvds_connector_funcs, DRM_MODE_CONNECTOR_LVDS); - drm_encoder_init(&dev_priv->drm, &intel_encoder->base, &intel_lvds_enc_funcs, + drm_encoder_init(&i915->drm, &encoder->base, &intel_lvds_enc_funcs, DRM_MODE_ENCODER_LVDS, "LVDS"); - intel_encoder->enable = intel_enable_lvds; - intel_encoder->pre_enable = intel_pre_enable_lvds; - intel_encoder->compute_config = intel_lvds_compute_config; - if (HAS_PCH_SPLIT(dev_priv)) { - intel_encoder->disable = pch_disable_lvds; - intel_encoder->post_disable = pch_post_disable_lvds; + encoder->enable = intel_enable_lvds; + encoder->pre_enable = intel_pre_enable_lvds; + encoder->compute_config = intel_lvds_compute_config; + if (HAS_PCH_SPLIT(i915)) { + encoder->disable = pch_disable_lvds; + encoder->post_disable = pch_post_disable_lvds; } else { - intel_encoder->disable = gmch_disable_lvds; + encoder->disable = gmch_disable_lvds; } - intel_encoder->get_hw_state = intel_lvds_get_hw_state; - intel_encoder->get_config = intel_lvds_get_config; - intel_encoder->update_pipe = intel_backlight_update; - intel_encoder->shutdown = intel_lvds_shutdown; - intel_connector->get_hw_state = intel_connector_get_hw_state; - - intel_connector_attach_encoder(intel_connector, intel_encoder); - - intel_encoder->type = INTEL_OUTPUT_LVDS; - intel_encoder->power_domain = POWER_DOMAIN_PORT_OTHER; - intel_encoder->port = PORT_NONE; - intel_encoder->cloneable = 0; - if (DISPLAY_VER(dev_priv) < 4) - intel_encoder->pipe_mask = BIT(PIPE_B); + encoder->get_hw_state = intel_lvds_get_hw_state; + encoder->get_config = intel_lvds_get_config; + encoder->update_pipe = intel_backlight_update; + encoder->shutdown = intel_lvds_shutdown; + connector->get_hw_state = intel_connector_get_hw_state; + + intel_connector_attach_encoder(connector, encoder); + + encoder->type = INTEL_OUTPUT_LVDS; + encoder->power_domain = POWER_DOMAIN_PORT_OTHER; + encoder->port = PORT_NONE; + encoder->cloneable = 0; + if (DISPLAY_VER(i915) < 4) + encoder->pipe_mask = BIT(PIPE_B); else - intel_encoder->pipe_mask = ~0; + encoder->pipe_mask = ~0; - drm_connector_helper_add(connector, &intel_lvds_connector_helper_funcs); - connector->display_info.subpixel_order = SubPixelHorizontalRGB; + drm_connector_helper_add(&connector->base, &intel_lvds_connector_helper_funcs); + connector->base.display_info.subpixel_order = SubPixelHorizontalRGB; lvds_encoder->reg = lvds_reg; - intel_lvds_add_properties(connector); + intel_lvds_add_properties(&connector->base); - intel_lvds_pps_get_hw_state(dev_priv, &lvds_encoder->init_pps); + intel_lvds_pps_get_hw_state(i915, &lvds_encoder->init_pps); lvds_encoder->init_lvds_val = lvds; /* @@ -948,13 +940,13 @@ void intel_lvds_init(struct drm_i915_private *dev_priv) * Attempt to get the fixed panel mode from DDC. Assume that the * preferred mode is the right one. */ - mutex_lock(&dev_priv->drm.mode_config.mutex); + mutex_lock(&i915->drm.mode_config.mutex); if (vga_switcheroo_handler_flags() & VGA_SWITCHEROO_CAN_SWITCH_DDC) { const struct edid *edid; /* FIXME: Make drm_get_edid_switcheroo() return drm_edid */ - edid = drm_get_edid_switcheroo(connector, - intel_gmbus_get_adapter(dev_priv, pin)); + edid = drm_get_edid_switcheroo(&connector->base, + intel_gmbus_get_adapter(i915, pin)); if (edid) { drm_edid = drm_edid_alloc(edid, (edid->extensions + 1) * EDID_LENGTH); kfree(edid); @@ -962,49 +954,49 @@ void intel_lvds_init(struct drm_i915_private *dev_priv) drm_edid = NULL; } } else { - drm_edid = drm_edid_read_ddc(connector, - intel_gmbus_get_adapter(dev_priv, pin)); + drm_edid = drm_edid_read_ddc(&connector->base, + intel_gmbus_get_adapter(i915, pin)); } if (drm_edid) { - if (drm_edid_connector_update(connector, drm_edid) || - !drm_edid_connector_add_modes(connector)) { - drm_edid_connector_update(connector, NULL); + if (drm_edid_connector_update(&connector->base, drm_edid) || + !drm_edid_connector_add_modes(&connector->base)) { + drm_edid_connector_update(&connector->base, NULL); drm_edid_free(drm_edid); drm_edid = ERR_PTR(-EINVAL); } } else { drm_edid = ERR_PTR(-ENOENT); } - intel_bios_init_panel_late(dev_priv, &intel_connector->panel, NULL, + intel_bios_init_panel_late(i915, &connector->panel, NULL, IS_ERR(drm_edid) ? NULL : drm_edid); /* Try EDID first */ - intel_panel_add_edid_fixed_modes(intel_connector, true); + intel_panel_add_edid_fixed_modes(connector, true); /* Failed to get EDID, what about VBT? */ - if (!intel_panel_preferred_fixed_mode(intel_connector)) - intel_panel_add_vbt_lfp_fixed_mode(intel_connector); + if (!intel_panel_preferred_fixed_mode(connector)) + intel_panel_add_vbt_lfp_fixed_mode(connector); /* * If we didn't get a fixed mode from EDID or VBT, try checking * if the panel is already turned on. If so, assume that * whatever is currently programmed is the correct mode. */ - if (!intel_panel_preferred_fixed_mode(intel_connector)) - intel_panel_add_encoder_fixed_mode(intel_connector, intel_encoder); + if (!intel_panel_preferred_fixed_mode(connector)) + intel_panel_add_encoder_fixed_mode(connector, encoder); - mutex_unlock(&dev_priv->drm.mode_config.mutex); + mutex_unlock(&i915->drm.mode_config.mutex); /* If we still don't have a mode after all that, give up. */ - if (!intel_panel_preferred_fixed_mode(intel_connector)) + if (!intel_panel_preferred_fixed_mode(connector)) goto failed; - intel_panel_init(intel_connector, drm_edid); + intel_panel_init(connector, drm_edid); - intel_backlight_setup(intel_connector, INVALID_PIPE); + intel_backlight_setup(connector, INVALID_PIPE); lvds_encoder->is_dual_link = compute_is_dual_link_lvds(lvds_encoder); - drm_dbg_kms(&dev_priv->drm, "detected %s-link lvds configuration\n", + drm_dbg_kms(&i915->drm, "detected %s-link lvds configuration\n", lvds_encoder->is_dual_link ? "dual" : "single"); lvds_encoder->a3_power = lvds & LVDS_A3_POWER_MASK; @@ -1012,10 +1004,10 @@ void intel_lvds_init(struct drm_i915_private *dev_priv) return; failed: - drm_dbg_kms(&dev_priv->drm, "No LVDS modes found, disabling.\n"); - drm_connector_cleanup(connector); - drm_encoder_cleanup(encoder); + drm_dbg_kms(&i915->drm, "No LVDS modes found, disabling.\n"); + drm_connector_cleanup(&connector->base); + drm_encoder_cleanup(&encoder->base); kfree(lvds_encoder); - intel_connector_free(intel_connector); + intel_connector_free(connector); return; } diff --git a/drivers/gpu/drm/i915/display/intel_lvds_regs.h b/drivers/gpu/drm/i915/display/intel_lvds_regs.h new file mode 100644 index 000000000000..47c1832819ee --- /dev/null +++ b/drivers/gpu/drm/i915/display/intel_lvds_regs.h @@ -0,0 +1,65 @@ +/* SPDX-License-Identifier: MIT */ +/* + * Copyright © 2023 Intel Corporation + */ + +#ifndef __INTEL_LVDS_REGS_H__ +#define __INTEL_LVDS_REGS_H__ + +#include "intel_display_reg_defs.h" + +/* LVDS port control */ +#define LVDS _MMIO(0x61180) +/* + * Enables the LVDS port. This bit must be set before DPLLs are enabled, as + * the DPLL semantics change when the LVDS is assigned to that pipe. + */ +#define LVDS_PORT_EN REG_BIT(31) +/* Selects pipe B for LVDS data. Must be set on pre-965. */ +#define LVDS_PIPE_SEL_MASK REG_BIT(30) +#define LVDS_PIPE_SEL(pipe) REG_FIELD_PREP(LVDS_PIPE_SEL_MASK, (pipe)) +#define LVDS_PIPE_SEL_MASK_CPT REG_GENMASK(30, 29) +#define LVDS_PIPE_SEL_CPT(pipe) REG_FIELD_PREP(LVDS_PIPE_SEL_MASK_CPT, (pipe)) +/* LVDS dithering flag on 965/g4x platform */ +#define LVDS_ENABLE_DITHER REG_BIT(25) +/* LVDS sync polarity flags. Set to invert (i.e. negative) */ +#define LVDS_VSYNC_POLARITY REG_BIT(21) +#define LVDS_HSYNC_POLARITY REG_BIT(20) + +/* Enable border for unscaled (or aspect-scaled) display */ +#define LVDS_BORDER_ENABLE REG_BIT(15) +/* + * Enables the A0-A2 data pairs and CLKA, containing 18 bits of color data per + * pixel. + */ +#define LVDS_A0A2_CLKA_POWER_MASK REG_GENMASK(9, 8) +#define LVDS_A0A2_CLKA_POWER_DOWN REG_FIELD_PREP(LVDS_A0A2_CLKA_POWER_MASK, 0) +#define LVDS_A0A2_CLKA_POWER_UP REG_FIELD_PREP(LVDS_A0A2_CLKA_POWER_MASK, 3) +/* + * Controls the A3 data pair, which contains the additional LSBs for 24 bit + * mode. Only enabled if LVDS_A0A2_CLKA_POWER_UP also indicates it should be + * on. + */ +#define LVDS_A3_POWER_MASK REG_GENMASK(7, 6) +#define LVDS_A3_POWER_DOWN REG_FIELD_PREP(LVDS_A3_POWER_MASK, 0) +#define LVDS_A3_POWER_UP REG_FIELD_PREP(LVDS_A3_POWER_MASK, 3) +/* + * Controls the CLKB pair. This should only be set when LVDS_B0B3_POWER_UP + * is set. + */ +#define LVDS_CLKB_POWER_MASK REG_GENMASK(5, 4) +#define LVDS_CLKB_POWER_DOWN REG_FIELD_PREP(LVDS_CLKB_POWER_MASK, 0) +#define LVDS_CLKB_POWER_UP REG_FIELD_PREP(LVDS_CLKB_POWER_MASK, 3) +/* + * Controls the B0-B3 data pairs. This must be set to match the DPLL p2 + * setting for whether we are in dual-channel mode. The B3 pair will + * additionally only be powered up when LVDS_A3_POWER_UP is set. + */ +#define LVDS_B0B3_POWER_MASK REG_GENMASK(3, 2) +#define LVDS_B0B3_POWER_DOWN REG_FIELD_PREP(LVDS_B0B3_POWER_MASK, 0) +#define LVDS_B0B3_POWER_UP REG_FIELD_PREP(LVDS_B0B3_POWER_MASK, 3) + +#define PCH_LVDS _MMIO(0xe1180) +#define LVDS_DETECTED REG_BIT(1) + +#endif /* __INTEL_LVDS_REGS_H__ */ diff --git a/drivers/gpu/drm/i915/display/intel_mg_phy_regs.h b/drivers/gpu/drm/i915/display/intel_mg_phy_regs.h index 0e8248bce52d..0306ade2bc30 100644 --- a/drivers/gpu/drm/i915/display/intel_mg_phy_regs.h +++ b/drivers/gpu/drm/i915/display/intel_mg_phy_regs.h @@ -142,7 +142,9 @@ #define FIA1_BASE 0x163000 #define FIA2_BASE 0x16E000 #define FIA3_BASE 0x16F000 -#define _FIA(fia) _PICK((fia), FIA1_BASE, FIA2_BASE, FIA3_BASE) +#define _FIA(fia) _PICK_EVEN_2RANGES((fia), 1, \ + FIA1_BASE, FIA1_BASE,\ + FIA2_BASE, FIA3_BASE) #define _MMIO_FIA(fia, off) _MMIO(_FIA(fia) + (off)) /* ICL PHY DFLEX registers */ diff --git a/drivers/gpu/drm/i915/display/intel_modeset_setup.c b/drivers/gpu/drm/i915/display/intel_modeset_setup.c index 52cdbd4fc2fa..1d0c9e247c42 100644 --- a/drivers/gpu/drm/i915/display/intel_modeset_setup.c +++ b/drivers/gpu/drm/i915/display/intel_modeset_setup.c @@ -11,6 +11,7 @@ #include "i915_drv.h" #include "i915_reg.h" +#include "i9xx_wm.h" #include "intel_atomic.h" #include "intel_bw.h" #include "intel_color.h" @@ -21,9 +22,11 @@ #include "intel_display.h" #include "intel_display_power.h" #include "intel_display_types.h" +#include "intel_dmc.h" +#include "intel_fifo_underrun.h" #include "intel_modeset_setup.h" #include "intel_pch_display.h" -#include "intel_pm.h" +#include "intel_wm.h" #include "skl_watermark.h" static void intel_crtc_disable_noatomic(struct intel_crtc *crtc, @@ -234,12 +237,9 @@ static void intel_sanitize_fifo_underrun_reporting(const struct intel_crtc_state struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); struct drm_i915_private *i915 = to_i915(crtc->base.dev); - if (!crtc_state->hw.active && !HAS_GMCH(i915)) - return; - /* - * We start out with underrun reporting disabled to avoid races. - * For correct bookkeeping mark this on active crtcs. + * We start out with underrun reporting disabled on active + * pipes to avoid races. * * Also on gmch platforms we dont have any hardware bits to * disable the underrun reporting. Which means we need to start @@ -250,19 +250,9 @@ static void intel_sanitize_fifo_underrun_reporting(const struct intel_crtc_state * No protection against concurrent access is required - at * worst a fifo underrun happens which also sets this to false. */ - crtc->cpu_fifo_underrun_disabled = true; - - /* - * We track the PCH trancoder underrun reporting state - * within the crtc. With crtc for pipe A housing the underrun - * reporting state for PCH transcoder A, crtc for pipe B housing - * it for PCH transcoder B, etc. LPT-H has only PCH transcoder A, - * and marking underrun reporting as disabled for the non-existing - * PCH transcoders B and C would prevent enabling the south - * error interrupt (see cpt_can_enable_serr_int()). - */ - if (intel_has_pch_trancoder(i915, crtc->pipe)) - crtc->pch_fifo_underrun_disabled = true; + intel_init_fifo_underrun_reporting(i915, crtc, + !crtc_state->hw.active && + !HAS_GMCH(i915)); } static void intel_sanitize_crtc(struct intel_crtc *crtc, @@ -647,17 +637,14 @@ static void intel_early_display_was(struct drm_i915_private *i915) * Also known as Wa_14010480278. */ if (IS_DISPLAY_VER(i915, 10, 12)) - intel_de_write(i915, GEN9_CLKGATE_DIS_0, - intel_de_read(i915, GEN9_CLKGATE_DIS_0) | DARBF_GATING_DIS); + intel_de_rmw(i915, GEN9_CLKGATE_DIS_0, 0, DARBF_GATING_DIS); - if (IS_HASWELL(i915)) { - /* - * WaRsPkgCStateDisplayPMReq:hsw - * System hang if this isn't done before disabling all planes! - */ - intel_de_write(i915, CHICKEN_PAR1_1, - intel_de_read(i915, CHICKEN_PAR1_1) | FORCE_ARB_IDLE_PLANES); - } + /* + * WaRsPkgCStateDisplayPMReq:hsw + * System hang if this isn't done before disabling all planes! + */ + if (IS_HASWELL(i915)) + intel_de_rmw(i915, CHICKEN_PAR1_1, 0, FORCE_ARB_IDLE_PLANES); if (IS_KABYLAKE(i915) || IS_COFFEELAKE(i915) || IS_COMETLAKE(i915)) { /* Display WA #1142:kbl,cfl,cml */ @@ -723,18 +710,7 @@ void intel_modeset_setup_hw_state(struct drm_i915_private *i915, intel_dpll_sanitize_state(i915); - if (IS_G4X(i915)) { - g4x_wm_get_hw_state(i915); - g4x_wm_sanitize(i915); - } else if (IS_VALLEYVIEW(i915) || IS_CHERRYVIEW(i915)) { - vlv_wm_get_hw_state(i915); - vlv_wm_sanitize(i915); - } else if (DISPLAY_VER(i915) >= 9) { - skl_wm_get_hw_state(i915); - skl_wm_sanitize(i915); - } else if (HAS_PCH_SPLIT(i915)) { - ilk_wm_get_hw_state(i915); - } + intel_wm_get_hw_state(i915); for_each_intel_crtc(&i915->drm, crtc) { struct intel_crtc_state *crtc_state = diff --git a/drivers/gpu/drm/i915/display/intel_panel.c b/drivers/gpu/drm/i915/display/intel_panel.c index 42aa04bac261..ce2a34a25211 100644 --- a/drivers/gpu/drm/i915/display/intel_panel.c +++ b/drivers/gpu/drm/i915/display/intel_panel.c @@ -39,6 +39,7 @@ #include "intel_de.h" #include "intel_display_types.h" #include "intel_drrs.h" +#include "intel_lvds_regs.h" #include "intel_panel.h" #include "intel_quirks.h" diff --git a/drivers/gpu/drm/i915/display/intel_pch_display.c b/drivers/gpu/drm/i915/display/intel_pch_display.c index cecc0d007cf3..22507da0b5f0 100644 --- a/drivers/gpu/drm/i915/display/intel_pch_display.c +++ b/drivers/gpu/drm/i915/display/intel_pch_display.c @@ -10,6 +10,7 @@ #include "intel_display_types.h" #include "intel_fdi.h" #include "intel_lvds.h" +#include "intel_lvds_regs.h" #include "intel_pch_display.h" #include "intel_pch_refclk.h" #include "intel_pps.h" @@ -219,20 +220,20 @@ static void ilk_pch_transcoder_set_timings(const struct intel_crtc_state *crtc_s enum transcoder cpu_transcoder = crtc_state->cpu_transcoder; intel_de_write(dev_priv, PCH_TRANS_HTOTAL(pch_transcoder), - intel_de_read(dev_priv, HTOTAL(cpu_transcoder))); + intel_de_read(dev_priv, TRANS_HTOTAL(cpu_transcoder))); intel_de_write(dev_priv, PCH_TRANS_HBLANK(pch_transcoder), - intel_de_read(dev_priv, HBLANK(cpu_transcoder))); + intel_de_read(dev_priv, TRANS_HBLANK(cpu_transcoder))); intel_de_write(dev_priv, PCH_TRANS_HSYNC(pch_transcoder), - intel_de_read(dev_priv, HSYNC(cpu_transcoder))); + intel_de_read(dev_priv, TRANS_HSYNC(cpu_transcoder))); intel_de_write(dev_priv, PCH_TRANS_VTOTAL(pch_transcoder), - intel_de_read(dev_priv, VTOTAL(cpu_transcoder))); + intel_de_read(dev_priv, TRANS_VTOTAL(cpu_transcoder))); intel_de_write(dev_priv, PCH_TRANS_VBLANK(pch_transcoder), - intel_de_read(dev_priv, VBLANK(cpu_transcoder))); + intel_de_read(dev_priv, TRANS_VBLANK(cpu_transcoder))); intel_de_write(dev_priv, PCH_TRANS_VSYNC(pch_transcoder), - intel_de_read(dev_priv, VSYNC(cpu_transcoder))); + intel_de_read(dev_priv, TRANS_VSYNC(cpu_transcoder))); intel_de_write(dev_priv, PCH_TRANS_VSYNCSHIFT(pch_transcoder), - intel_de_read(dev_priv, VSYNCSHIFT(cpu_transcoder))); + intel_de_read(dev_priv, TRANS_VSYNCSHIFT(cpu_transcoder))); } static void ilk_enable_pch_transcoder(const struct intel_crtc_state *crtc_state) @@ -266,7 +267,7 @@ static void ilk_enable_pch_transcoder(const struct intel_crtc_state *crtc_state) reg = PCH_TRANSCONF(pipe); val = intel_de_read(dev_priv, reg); - pipeconf_val = intel_de_read(dev_priv, PIPECONF(pipe)); + pipeconf_val = intel_de_read(dev_priv, TRANSCONF(pipe)); if (HAS_PCH_IBX(dev_priv)) { /* Configure frame start delay to match the CPU */ @@ -278,15 +279,15 @@ static void ilk_enable_pch_transcoder(const struct intel_crtc_state *crtc_state) * that in pipeconf reg. For HDMI we must use 8bpc * here for both 8bpc and 12bpc. */ - val &= ~PIPECONF_BPC_MASK; + val &= ~TRANSCONF_BPC_MASK; if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI)) - val |= PIPECONF_BPC_8; + val |= TRANSCONF_BPC_8; else - val |= pipeconf_val & PIPECONF_BPC_MASK; + val |= pipeconf_val & TRANSCONF_BPC_MASK; } val &= ~TRANS_INTERLACE_MASK; - if ((pipeconf_val & PIPECONF_INTERLACE_MASK_ILK) == PIPECONF_INTERLACE_IF_ID_ILK) { + if ((pipeconf_val & TRANSCONF_INTERLACE_MASK_ILK) == TRANSCONF_INTERLACE_IF_ID_ILK) { if (HAS_PCH_IBX(dev_priv) && intel_crtc_has_type(crtc_state, INTEL_OUTPUT_SDVO)) val |= TRANS_INTERLACE_LEGACY_VSYNC_IBX; @@ -307,7 +308,6 @@ static void ilk_disable_pch_transcoder(struct intel_crtc *crtc) struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); enum pipe pipe = crtc->pipe; i915_reg_t reg; - u32 val; /* FDI relies on the transcoder */ assert_fdi_tx_disabled(dev_priv, pipe); @@ -317,21 +317,16 @@ static void ilk_disable_pch_transcoder(struct intel_crtc *crtc) assert_pch_ports_disabled(dev_priv, pipe); reg = PCH_TRANSCONF(pipe); - val = intel_de_read(dev_priv, reg); - val &= ~TRANS_ENABLE; - intel_de_write(dev_priv, reg, val); + intel_de_rmw(dev_priv, reg, TRANS_ENABLE, 0); /* wait for PCH transcoder off, transcoder state */ if (intel_de_wait_for_clear(dev_priv, reg, TRANS_STATE_ENABLE, 50)) drm_err(&dev_priv->drm, "failed to disable transcoder %c\n", pipe_name(pipe)); - if (HAS_PCH_CPT(dev_priv)) { + if (HAS_PCH_CPT(dev_priv)) /* Workaround: Clear the timing override chicken bit again. */ - reg = TRANS_CHICKEN2(pipe); - val = intel_de_read(dev_priv, reg); - val &= ~TRANS_CHICKEN2_TIMING_OVERRIDE; - intel_de_write(dev_priv, reg, val); - } + intel_de_rmw(dev_priv, TRANS_CHICKEN2(pipe), + TRANS_CHICKEN2_TIMING_OVERRIDE, 0); } void ilk_pch_pre_enable(struct intel_atomic_state *state, @@ -414,7 +409,7 @@ void ilk_pch_enable(struct intel_atomic_state *state, intel_crtc_has_dp_encoder(crtc_state)) { const struct drm_display_mode *adjusted_mode = &crtc_state->hw.adjusted_mode; - u32 bpc = (intel_de_read(dev_priv, PIPECONF(pipe)) & PIPECONF_BPC_MASK) >> 5; + u32 bpc = (intel_de_read(dev_priv, TRANSCONF(pipe)) & TRANSCONF_BPC_MASK) >> 5; i915_reg_t reg = TRANS_DP_CTL(pipe); enum port port; @@ -456,21 +451,14 @@ void ilk_pch_post_disable(struct intel_atomic_state *state, ilk_disable_pch_transcoder(crtc); if (HAS_PCH_CPT(dev_priv)) { - i915_reg_t reg; - u32 temp; - /* disable TRANS_DP_CTL */ - reg = TRANS_DP_CTL(pipe); - temp = intel_de_read(dev_priv, reg); - temp &= ~(TRANS_DP_OUTPUT_ENABLE | - TRANS_DP_PORT_SEL_MASK); - temp |= TRANS_DP_PORT_SEL_NONE; - intel_de_write(dev_priv, reg, temp); + intel_de_rmw(dev_priv, TRANS_DP_CTL(pipe), + TRANS_DP_OUTPUT_ENABLE | TRANS_DP_PORT_SEL_MASK, + TRANS_DP_PORT_SEL_NONE); /* disable DPLL_SEL */ - temp = intel_de_read(dev_priv, PCH_DPLL_SEL); - temp &= ~(TRANS_DPLL_ENABLE(pipe) | TRANS_DPLLB_SEL(pipe)); - intel_de_write(dev_priv, PCH_DPLL_SEL, temp); + intel_de_rmw(dev_priv, PCH_DPLL_SEL, + TRANS_DPLL_ENABLE(pipe) | TRANS_DPLLB_SEL(pipe), 0); } ilk_fdi_pll_disable(crtc); @@ -565,9 +553,9 @@ static void lpt_enable_pch_transcoder(const struct intel_crtc_state *crtc_state) intel_de_write(dev_priv, TRANS_CHICKEN2(PIPE_A), val); val = TRANS_ENABLE; - pipeconf_val = intel_de_read(dev_priv, PIPECONF(cpu_transcoder)); + pipeconf_val = intel_de_read(dev_priv, TRANSCONF(cpu_transcoder)); - if ((pipeconf_val & PIPECONF_INTERLACE_MASK_HSW) == PIPECONF_INTERLACE_IF_ID_ILK) + if ((pipeconf_val & TRANSCONF_INTERLACE_MASK_HSW) == TRANSCONF_INTERLACE_IF_ID_ILK) val |= TRANS_INTERLACE_INTERLACED; else val |= TRANS_INTERLACE_PROGRESSIVE; @@ -580,20 +568,14 @@ static void lpt_enable_pch_transcoder(const struct intel_crtc_state *crtc_state) static void lpt_disable_pch_transcoder(struct drm_i915_private *dev_priv) { - u32 val; - - val = intel_de_read(dev_priv, LPT_TRANSCONF); - val &= ~TRANS_ENABLE; - intel_de_write(dev_priv, LPT_TRANSCONF, val); + intel_de_rmw(dev_priv, LPT_TRANSCONF, TRANS_ENABLE, 0); /* wait for PCH transcoder off, transcoder state */ if (intel_de_wait_for_clear(dev_priv, LPT_TRANSCONF, TRANS_STATE_ENABLE, 50)) drm_err(&dev_priv->drm, "Failed to disable PCH transcoder\n"); /* Workaround: clear timing override bit. */ - val = intel_de_read(dev_priv, TRANS_CHICKEN2(PIPE_A)); - val &= ~TRANS_CHICKEN2_TIMING_OVERRIDE; - intel_de_write(dev_priv, TRANS_CHICKEN2(PIPE_A), val); + intel_de_rmw(dev_priv, TRANS_CHICKEN2(PIPE_A), TRANS_CHICKEN2_TIMING_OVERRIDE, 0); } void lpt_pch_enable(struct intel_atomic_state *state, diff --git a/drivers/gpu/drm/i915/display/intel_pch_refclk.c b/drivers/gpu/drm/i915/display/intel_pch_refclk.c index 3657b2940702..f4c09cc37a5e 100644 --- a/drivers/gpu/drm/i915/display/intel_pch_refclk.c +++ b/drivers/gpu/drm/i915/display/intel_pch_refclk.c @@ -12,19 +12,13 @@ static void lpt_fdi_reset_mphy(struct drm_i915_private *dev_priv) { - u32 tmp; - - tmp = intel_de_read(dev_priv, SOUTH_CHICKEN2); - tmp |= FDI_MPHY_IOSFSB_RESET_CTL; - intel_de_write(dev_priv, SOUTH_CHICKEN2, tmp); + intel_de_rmw(dev_priv, SOUTH_CHICKEN2, 0, FDI_MPHY_IOSFSB_RESET_CTL); if (wait_for_us(intel_de_read(dev_priv, SOUTH_CHICKEN2) & FDI_MPHY_IOSFSB_RESET_STATUS, 100)) drm_err(&dev_priv->drm, "FDI mPHY reset assert timeout\n"); - tmp = intel_de_read(dev_priv, SOUTH_CHICKEN2); - tmp &= ~FDI_MPHY_IOSFSB_RESET_CTL; - intel_de_write(dev_priv, SOUTH_CHICKEN2, tmp); + intel_de_rmw(dev_priv, SOUTH_CHICKEN2, FDI_MPHY_IOSFSB_RESET_CTL, 0); if (wait_for_us((intel_de_read(dev_priv, SOUTH_CHICKEN2) & FDI_MPHY_IOSFSB_RESET_STATUS) == 0, 100)) diff --git a/drivers/gpu/drm/i915/display/intel_pps.c b/drivers/gpu/drm/i915/display/intel_pps.c index 7b21438edd9b..24b5b12f7732 100644 --- a/drivers/gpu/drm/i915/display/intel_pps.c +++ b/drivers/gpu/drm/i915/display/intel_pps.c @@ -13,6 +13,7 @@ #include "intel_dpio_phy.h" #include "intel_dpll.h" #include "intel_lvds.h" +#include "intel_lvds_regs.h" #include "intel_pps.h" #include "intel_quirks.h" @@ -1534,17 +1535,13 @@ static void pps_init_registers(struct intel_dp *intel_dp, bool force_disable_vdd /* * Compute the divisor for the pp clock, simply match the Bspec formula. */ - if (i915_mmio_reg_valid(regs.pp_div)) { + if (i915_mmio_reg_valid(regs.pp_div)) intel_de_write(dev_priv, regs.pp_div, REG_FIELD_PREP(PP_REFERENCE_DIVIDER_MASK, (100 * div) / 2 - 1) | REG_FIELD_PREP(PANEL_POWER_CYCLE_DELAY_MASK, DIV_ROUND_UP(seq->t11_t12, 1000))); - } else { - u32 pp_ctl; - - pp_ctl = intel_de_read(dev_priv, regs.pp_ctrl); - pp_ctl &= ~BXT_POWER_CYCLE_DELAY_MASK; - pp_ctl |= REG_FIELD_PREP(BXT_POWER_CYCLE_DELAY_MASK, DIV_ROUND_UP(seq->t11_t12, 1000)); - intel_de_write(dev_priv, regs.pp_ctrl, pp_ctl); - } + else + intel_de_rmw(dev_priv, regs.pp_ctrl, BXT_POWER_CYCLE_DELAY_MASK, + REG_FIELD_PREP(BXT_POWER_CYCLE_DELAY_MASK, + DIV_ROUND_UP(seq->t11_t12, 1000))); drm_dbg_kms(&dev_priv->drm, "panel power sequencer register settings: PP_ON %#x, PP_OFF %#x, PP_DIV %#x\n", diff --git a/drivers/gpu/drm/i915/display/intel_psr.c b/drivers/gpu/drm/i915/display/intel_psr.c index 7a72e15e6836..44610b20cd29 100644 --- a/drivers/gpu/drm/i915/display/intel_psr.c +++ b/drivers/gpu/drm/i915/display/intel_psr.c @@ -152,7 +152,7 @@ static void psr_irq_control(struct intel_dp *intel_dp) { struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); i915_reg_t imr_reg; - u32 mask, val; + u32 mask; if (DISPLAY_VER(dev_priv) >= 12) imr_reg = TRANS_PSR_IMR(intel_dp->psr.transcoder); @@ -164,10 +164,7 @@ static void psr_irq_control(struct intel_dp *intel_dp) mask |= psr_irq_post_exit_bit_get(intel_dp) | psr_irq_pre_entry_bit_get(intel_dp); - val = intel_de_read(dev_priv, imr_reg); - val &= ~psr_irq_mask_get(intel_dp); - val |= ~mask; - intel_de_write(dev_priv, imr_reg, val); + intel_de_rmw(dev_priv, imr_reg, psr_irq_mask_get(intel_dp), ~mask); } static void psr_event_print(struct drm_i915_private *i915, @@ -245,8 +242,6 @@ void intel_psr_irq_handler(struct intel_dp *intel_dp, u32 psr_iir) } if (psr_iir & psr_irq_psr_error_bit_get(intel_dp)) { - u32 val; - drm_warn(&dev_priv->drm, "[transcoder %s] PSR aux error\n", transcoder_name(cpu_transcoder)); @@ -260,9 +255,7 @@ void intel_psr_irq_handler(struct intel_dp *intel_dp, u32 psr_iir) * again so we don't care about unmask the interruption * or unset irq_aux_error. */ - val = intel_de_read(dev_priv, imr_reg); - val |= psr_irq_psr_error_bit_get(intel_dp); - intel_de_write(dev_priv, imr_reg, val); + intel_de_rmw(dev_priv, imr_reg, 0, psr_irq_psr_error_bit_get(intel_dp)); schedule_work(&intel_dp->psr.work); } @@ -542,6 +535,14 @@ static void hsw_activate_psr2(struct intel_dp *intel_dp) val |= EDP_PSR2_FRAME_BEFORE_SU(max_t(u8, intel_dp->psr.sink_sync_latency + 1, 2)); val |= intel_psr2_get_tp_time(intel_dp); + if (DISPLAY_VER(dev_priv) >= 12) { + if (intel_dp->psr.io_wake_lines < 9 && + intel_dp->psr.fast_wake_lines < 9) + val |= TGL_EDP_PSR2_BLOCK_COUNT_NUM_2; + else + val |= TGL_EDP_PSR2_BLOCK_COUNT_NUM_3; + } + /* Wa_22012278275:adl-p */ if (IS_ADLP_DISPLAY_STEP(dev_priv, STEP_A0, STEP_E0)) { static const u8 map[] = { @@ -558,31 +559,21 @@ static void hsw_activate_psr2(struct intel_dp *intel_dp) * Still using the default IO_BUFFER_WAKE and FAST_WAKE, see * comments bellow for more information */ - u32 tmp, lines = 7; - - val |= TGL_EDP_PSR2_BLOCK_COUNT_NUM_2; + u32 tmp; - tmp = map[lines - TGL_EDP_PSR2_IO_BUFFER_WAKE_MIN_LINES]; + tmp = map[intel_dp->psr.io_wake_lines - TGL_EDP_PSR2_IO_BUFFER_WAKE_MIN_LINES]; tmp = tmp << TGL_EDP_PSR2_IO_BUFFER_WAKE_SHIFT; val |= tmp; - tmp = map[lines - TGL_EDP_PSR2_FAST_WAKE_MIN_LINES]; + tmp = map[intel_dp->psr.fast_wake_lines - TGL_EDP_PSR2_FAST_WAKE_MIN_LINES]; tmp = tmp << TGL_EDP_PSR2_FAST_WAKE_MIN_SHIFT; val |= tmp; } else if (DISPLAY_VER(dev_priv) >= 12) { - /* - * TODO: 7 lines of IO_BUFFER_WAKE and FAST_WAKE are default - * values from BSpec. In order to setting an optimal power - * consumption, lower than 4k resolution mode needs to decrease - * IO_BUFFER_WAKE and FAST_WAKE. And higher than 4K resolution - * mode needs to increase IO_BUFFER_WAKE and FAST_WAKE. - */ - val |= TGL_EDP_PSR2_BLOCK_COUNT_NUM_2; - val |= TGL_EDP_PSR2_IO_BUFFER_WAKE(7); - val |= TGL_EDP_PSR2_FAST_WAKE(7); + val |= TGL_EDP_PSR2_IO_BUFFER_WAKE(intel_dp->psr.io_wake_lines); + val |= TGL_EDP_PSR2_FAST_WAKE(intel_dp->psr.fast_wake_lines); } else if (DISPLAY_VER(dev_priv) >= 9) { - val |= EDP_PSR2_IO_BUFFER_WAKE(7); - val |= EDP_PSR2_FAST_WAKE(7); + val |= EDP_PSR2_IO_BUFFER_WAKE(intel_dp->psr.io_wake_lines); + val |= EDP_PSR2_FAST_WAKE(intel_dp->psr.fast_wake_lines); } if (intel_dp->psr.req_psr2_sdp_prior_scanline) @@ -591,12 +582,6 @@ static void hsw_activate_psr2(struct intel_dp *intel_dp) if (intel_dp->psr.psr2_sel_fetch_enabled) { u32 tmp; - /* Wa_1408330847 */ - if (IS_TGL_DISPLAY_STEP(dev_priv, STEP_A0, STEP_B0)) - intel_de_rmw(dev_priv, CHICKEN_PAR1_1, - DIS_RAM_BYPASS_PSR2_MAN_TRACK, - DIS_RAM_BYPASS_PSR2_MAN_TRACK); - tmp = intel_de_read(dev_priv, PSR2_MAN_TRK_CTL(intel_dp->psr.transcoder)); drm_WARN_ON(&dev_priv->drm, !(tmp & PSR2_MAN_TRK_CTL_ENABLE)); } else if (HAS_PSR2_SEL_FETCH(dev_priv)) { @@ -637,13 +622,10 @@ static void psr2_program_idle_frames(struct intel_dp *intel_dp, u32 idle_frames) { struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); - u32 val; idle_frames <<= EDP_PSR2_IDLE_FRAME_SHIFT; - val = intel_de_read(dev_priv, EDP_PSR2_CTL(intel_dp->psr.transcoder)); - val &= ~EDP_PSR2_IDLE_FRAME_MASK; - val |= idle_frames; - intel_de_write(dev_priv, EDP_PSR2_CTL(intel_dp->psr.transcoder), val); + intel_de_rmw(dev_priv, EDP_PSR2_CTL(intel_dp->psr.transcoder), + EDP_PSR2_IDLE_FRAME_MASK, idle_frames); } static void tgl_psr2_enable_dc3co(struct intel_dp *intel_dp) @@ -708,6 +690,7 @@ tgl_dc3co_exitline_compute_config(struct intel_dp *intel_dp, { const u32 crtc_vdisplay = crtc_state->uapi.adjusted_mode.crtc_vdisplay; struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); + struct i915_power_domains *power_domains = &dev_priv->display.power.domains; u32 exit_scanlines; /* @@ -724,7 +707,7 @@ tgl_dc3co_exitline_compute_config(struct intel_dp *intel_dp, if (crtc_state->enable_psr2_sel_fetch) return; - if (!(dev_priv->display.dmc.allowed_dc_mask & DC_STATE_EN_DC3CO)) + if (!(power_domains->allowed_dc_mask & DC_STATE_EN_DC3CO)) return; if (!dc3co_is_pipe_port_compatible(intel_dp, crtc_state)) @@ -765,13 +748,6 @@ static bool intel_psr2_sel_fetch_config_valid(struct intel_dp *intel_dp, return false; } - /* Wa_14010254185 Wa_14010103792 */ - if (IS_TGL_DISPLAY_STEP(dev_priv, STEP_A0, STEP_C0)) { - drm_dbg_kms(&dev_priv->drm, - "PSR2 sel fetch not enabled, missing the implementation of WAs\n"); - return false; - } - return crtc_state->enable_psr2_sel_fetch = true; } @@ -842,6 +818,46 @@ static bool _compute_psr2_sdp_prior_scanline_indication(struct intel_dp *intel_d return true; } +static bool _compute_psr2_wake_times(struct intel_dp *intel_dp, + struct intel_crtc_state *crtc_state) +{ + struct drm_i915_private *i915 = dp_to_i915(intel_dp); + int io_wake_lines, io_wake_time, fast_wake_lines, fast_wake_time; + u8 max_wake_lines; + + if (DISPLAY_VER(i915) >= 12) { + io_wake_time = 42; + /* + * According to Bspec it's 42us, but based on testing + * it is not enough -> use 45 us. + */ + fast_wake_time = 45; + max_wake_lines = 12; + } else { + io_wake_time = 50; + fast_wake_time = 32; + max_wake_lines = 8; + } + + io_wake_lines = intel_usecs_to_scanlines( + &crtc_state->uapi.adjusted_mode, io_wake_time); + fast_wake_lines = intel_usecs_to_scanlines( + &crtc_state->uapi.adjusted_mode, fast_wake_time); + + if (io_wake_lines > max_wake_lines || + fast_wake_lines > max_wake_lines) + return false; + + if (i915->params.psr_safest_params) + io_wake_lines = fast_wake_lines = max_wake_lines; + + /* According to Bspec lower limit should be set as 7 lines. */ + intel_dp->psr.io_wake_lines = max(io_wake_lines, 7); + intel_dp->psr.fast_wake_lines = max(fast_wake_lines, 7); + + return true; +} + static bool intel_psr2_config_valid(struct intel_dp *intel_dp, struct intel_crtc_state *crtc_state) { @@ -936,6 +952,12 @@ static bool intel_psr2_config_valid(struct intel_dp *intel_dp, return false; } + if (!_compute_psr2_wake_times(intel_dp, crtc_state)) { + drm_dbg_kms(&dev_priv->drm, + "PSR2 not enabled, Unable to use long enough wake times\n"); + return false; + } + if (HAS_PSR2_SEL_FETCH(dev_priv)) { if (!intel_psr2_sel_fetch_config_valid(intel_dp, crtc_state) && !HAS_PSR_HW_TRACKING(dev_priv)) { @@ -945,13 +967,6 @@ static bool intel_psr2_config_valid(struct intel_dp *intel_dp, } } - /* Wa_2209313811 */ - if (!crtc_state->enable_psr2_sel_fetch && - IS_TGL_DISPLAY_STEP(dev_priv, STEP_A0, STEP_C0)) { - drm_dbg_kms(&dev_priv->drm, "PSR2 HW tracking is not supported this Display stepping\n"); - goto unsupported; - } - if (!psr2_granularity_check(intel_dp, crtc_state)) { drm_dbg_kms(&dev_priv->drm, "PSR2 not enabled, SU granularity not compatible\n"); goto unsupported; @@ -1071,7 +1086,7 @@ void intel_psr_get_config(struct intel_encoder *encoder, } if (DISPLAY_VER(dev_priv) >= 12) { - val = intel_de_read(dev_priv, EXITLINE(intel_dp->psr.transcoder)); + val = intel_de_read(dev_priv, TRANS_EXITLINE(intel_dp->psr.transcoder)); val &= EXITLINE_MASK; pipe_config->dc3co_exitline = val; } @@ -1145,19 +1160,13 @@ static void intel_psr_enable_source(struct intel_dp *intel_dp, psr_irq_control(intel_dp); - if (intel_dp->psr.dc3co_exitline) { - u32 val; - - /* - * TODO: if future platforms supports DC3CO in more than one - * transcoder, EXITLINE will need to be unset when disabling PSR - */ - val = intel_de_read(dev_priv, EXITLINE(cpu_transcoder)); - val &= ~EXITLINE_MASK; - val |= intel_dp->psr.dc3co_exitline << EXITLINE_SHIFT; - val |= EXITLINE_ENABLE; - intel_de_write(dev_priv, EXITLINE(cpu_transcoder), val); - } + /* + * TODO: if future platforms supports DC3CO in more than one + * transcoder, EXITLINE will need to be unset when disabling PSR + */ + if (intel_dp->psr.dc3co_exitline) + intel_de_rmw(dev_priv, TRANS_EXITLINE(cpu_transcoder), EXITLINE_MASK, + intel_dp->psr.dc3co_exitline << EXITLINE_SHIFT | EXITLINE_ENABLE); if (HAS_PSR_HW_TRACKING(dev_priv) && HAS_PSR2_SEL_FETCH(dev_priv)) intel_de_rmw(dev_priv, CHICKEN_PAR1_1, IGNORE_PSR2_HW_TRACKING, @@ -1170,13 +1179,8 @@ static void intel_psr_enable_source(struct intel_dp *intel_dp, */ if (IS_MTL_DISPLAY_STEP(dev_priv, STEP_A0, STEP_B0) || IS_DISPLAY_VER(dev_priv, 12, 13)) { - u16 vtotal, vblank; - - vtotal = crtc_state->uapi.adjusted_mode.crtc_vtotal - - crtc_state->uapi.adjusted_mode.crtc_vdisplay; - vblank = crtc_state->uapi.adjusted_mode.crtc_vblank_end - - crtc_state->uapi.adjusted_mode.crtc_vblank_start; - if (vblank > vtotal) + if (crtc_state->hw.adjusted_mode.crtc_vblank_start != + crtc_state->hw.adjusted_mode.crtc_vdisplay) intel_de_rmw(dev_priv, GEN8_CHICKEN_DCPR_1, 0, wa_16013835468_bit_get(intel_dp)); } @@ -1199,13 +1203,6 @@ static void intel_psr_enable_source(struct intel_dp *intel_dp, intel_de_rmw(dev_priv, CHICKEN_TRANS(cpu_transcoder), 0, ADLP_1_BASED_X_GRANULARITY); - /* Wa_16011168373:adl-p */ - if (IS_ADLP_DISPLAY_STEP(dev_priv, STEP_A0, STEP_B0)) - intel_de_rmw(dev_priv, - TRANS_SET_CONTEXT_LATENCY(intel_dp->psr.transcoder), - TRANS_SET_CONTEXT_LATENCY_MASK, - TRANS_SET_CONTEXT_LATENCY_VALUE(1)); - /* Wa_16012604467:adlp,mtl[a0,b0] */ if (IS_MTL_DISPLAY_STEP(dev_priv, STEP_A0, STEP_B0)) intel_de_rmw(dev_priv, @@ -1360,12 +1357,6 @@ static void intel_psr_disable_locked(struct intel_dp *intel_dp) intel_psr_exit(intel_dp); intel_psr_wait_exit_locked(intel_dp); - /* Wa_1408330847 */ - if (intel_dp->psr.psr2_sel_fetch_enabled && - IS_TGL_DISPLAY_STEP(dev_priv, STEP_A0, STEP_B0)) - intel_de_rmw(dev_priv, CHICKEN_PAR1_1, - DIS_RAM_BYPASS_PSR2_MAN_TRACK, 0); - /* * Wa_16013835468 * Wa_14015648006 @@ -1376,12 +1367,6 @@ static void intel_psr_disable_locked(struct intel_dp *intel_dp) wa_16013835468_bit_get(intel_dp), 0); if (intel_dp->psr.psr2_enabled) { - /* Wa_16011168373:adl-p */ - if (IS_ADLP_DISPLAY_STEP(dev_priv, STEP_A0, STEP_B0)) - intel_de_rmw(dev_priv, - TRANS_SET_CONTEXT_LATENCY(intel_dp->psr.transcoder), - TRANS_SET_CONTEXT_LATENCY_MASK, 0); - /* Wa_16012604467:adlp,mtl[a0,b0] */ if (IS_MTL_DISPLAY_STEP(dev_priv, STEP_A0, STEP_B0)) intel_de_rmw(dev_priv, @@ -1547,8 +1532,8 @@ static void psr_force_hw_tracking_exit(struct intel_dp *intel_dp) intel_de_write(dev_priv, CURSURFLIVE(intel_dp->psr.pipe), 0); } -void intel_psr2_disable_plane_sel_fetch(struct intel_plane *plane, - const struct intel_crtc_state *crtc_state) +void intel_psr2_disable_plane_sel_fetch_arm(struct intel_plane *plane, + const struct intel_crtc_state *crtc_state) { struct drm_i915_private *dev_priv = to_i915(plane->base.dev); enum pipe pipe = plane->pipe; @@ -1559,10 +1544,28 @@ void intel_psr2_disable_plane_sel_fetch(struct intel_plane *plane, intel_de_write_fw(dev_priv, PLANE_SEL_FETCH_CTL(pipe, plane->id), 0); } -void intel_psr2_program_plane_sel_fetch(struct intel_plane *plane, - const struct intel_crtc_state *crtc_state, - const struct intel_plane_state *plane_state, - int color_plane) +void intel_psr2_program_plane_sel_fetch_arm(struct intel_plane *plane, + const struct intel_crtc_state *crtc_state, + const struct intel_plane_state *plane_state) +{ + struct drm_i915_private *i915 = to_i915(plane->base.dev); + enum pipe pipe = plane->pipe; + + if (!crtc_state->enable_psr2_sel_fetch) + return; + + if (plane->id == PLANE_CURSOR) + intel_de_write_fw(i915, PLANE_SEL_FETCH_CTL(pipe, plane->id), + plane_state->ctl); + else + intel_de_write_fw(i915, PLANE_SEL_FETCH_CTL(pipe, plane->id), + PLANE_SEL_FETCH_CTL_ENABLE); +} + +void intel_psr2_program_plane_sel_fetch_noarm(struct intel_plane *plane, + const struct intel_crtc_state *crtc_state, + const struct intel_plane_state *plane_state, + int color_plane) { struct drm_i915_private *dev_priv = to_i915(plane->base.dev); enum pipe pipe = plane->pipe; @@ -1573,11 +1576,8 @@ void intel_psr2_program_plane_sel_fetch(struct intel_plane *plane, if (!crtc_state->enable_psr2_sel_fetch) return; - if (plane->id == PLANE_CURSOR) { - intel_de_write_fw(dev_priv, PLANE_SEL_FETCH_CTL(pipe, plane->id), - plane_state->ctl); + if (plane->id == PLANE_CURSOR) return; - } clip = &plane_state->psr2_sel_fetch_area; @@ -1605,9 +1605,6 @@ void intel_psr2_program_plane_sel_fetch(struct intel_plane *plane, val = (drm_rect_height(clip) - 1) << 16; val |= (drm_rect_width(&plane_state->uapi.src) >> 16) - 1; intel_de_write_fw(dev_priv, PLANE_SEL_FETCH_SIZE(pipe, plane->id), val); - - intel_de_write_fw(dev_priv, PLANE_SEL_FETCH_CTL(pipe, plane->id), - PLANE_SEL_FETCH_CTL_ENABLE); } void intel_psr2_program_trans_man_trk_ctl(const struct intel_crtc_state *crtc_state) diff --git a/drivers/gpu/drm/i915/display/intel_psr.h b/drivers/gpu/drm/i915/display/intel_psr.h index 2ac3a46cccc5..7a38a9e7fa5b 100644 --- a/drivers/gpu/drm/i915/display/intel_psr.h +++ b/drivers/gpu/drm/i915/display/intel_psr.h @@ -46,12 +46,16 @@ bool intel_psr_enabled(struct intel_dp *intel_dp); int intel_psr2_sel_fetch_update(struct intel_atomic_state *state, struct intel_crtc *crtc); void intel_psr2_program_trans_man_trk_ctl(const struct intel_crtc_state *crtc_state); -void intel_psr2_program_plane_sel_fetch(struct intel_plane *plane, - const struct intel_crtc_state *crtc_state, - const struct intel_plane_state *plane_state, - int color_plane); -void intel_psr2_disable_plane_sel_fetch(struct intel_plane *plane, - const struct intel_crtc_state *crtc_state); +void intel_psr2_program_plane_sel_fetch_noarm(struct intel_plane *plane, + const struct intel_crtc_state *crtc_state, + const struct intel_plane_state *plane_state, + int color_plane); +void intel_psr2_program_plane_sel_fetch_arm(struct intel_plane *plane, + const struct intel_crtc_state *crtc_state, + const struct intel_plane_state *plane_state); + +void intel_psr2_disable_plane_sel_fetch_arm(struct intel_plane *plane, + const struct intel_crtc_state *crtc_state); void intel_psr_pause(struct intel_dp *intel_dp); void intel_psr_resume(struct intel_dp *intel_dp); diff --git a/drivers/gpu/drm/i915/display/intel_snps_phy.c b/drivers/gpu/drm/i915/display/intel_snps_phy.c index c65c771f5c46..1cfb94b5cedb 100644 --- a/drivers/gpu/drm/i915/display/intel_snps_phy.c +++ b/drivers/gpu/drm/i915/display/intel_snps_phy.c @@ -1419,6 +1419,36 @@ static const struct intel_mpllb_state dg2_hdmi_262750 = { REG_FIELD_PREP(SNPS_PHY_MPLLB_SSC_UP_SPREAD, 1), }; +static const struct intel_mpllb_state dg2_hdmi_267300 = { + .clock = 267300, + .ref_control = + REG_FIELD_PREP(SNPS_PHY_REF_CONTROL_REF_RANGE, 3), + .mpllb_cp = + REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_INT, 7) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_PROP, 14) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_INT_GS, 64) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_PROP_GS, 124), + .mpllb_div = + REG_FIELD_PREP(SNPS_PHY_MPLLB_DIV5_CLK_EN, 1) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_TX_CLK_DIV, 1) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_PMIX_EN, 1) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_V2I, 2) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_FREQ_VCO, 3), + .mpllb_div2 = + REG_FIELD_PREP(SNPS_PHY_MPLLB_REF_CLK_DIV, 1) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_MULTIPLIER, 74) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_HDMI_DIV, 1), + .mpllb_fracn1 = + REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_CGG_UPDATE_EN, 1) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_EN, 1) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_DEN, 65535), + .mpllb_fracn2 = + REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_QUOT, 30146) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_REM, 36699), + .mpllb_sscen = + REG_FIELD_PREP(SNPS_PHY_MPLLB_SSC_UP_SPREAD, 1), +}; + static const struct intel_mpllb_state dg2_hdmi_268500 = { .clock = 268500, .ref_control = @@ -1509,6 +1539,36 @@ static const struct intel_mpllb_state dg2_hdmi_241500 = { REG_FIELD_PREP(SNPS_PHY_MPLLB_SSC_UP_SPREAD, 1), }; +static const struct intel_mpllb_state dg2_hdmi_319890 = { + .clock = 319890, + .ref_control = + REG_FIELD_PREP(SNPS_PHY_REF_CONTROL_REF_RANGE, 3), + .mpllb_cp = + REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_INT, 6) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_PROP, 14) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_INT_GS, 64) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_PROP_GS, 124), + .mpllb_div = + REG_FIELD_PREP(SNPS_PHY_MPLLB_DIV5_CLK_EN, 1) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_TX_CLK_DIV, 1) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_PMIX_EN, 1) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_V2I, 2) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_FREQ_VCO, 2), + .mpllb_div2 = + REG_FIELD_PREP(SNPS_PHY_MPLLB_REF_CLK_DIV, 1) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_MULTIPLIER, 94) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_HDMI_DIV, 1), + .mpllb_fracn1 = + REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_CGG_UPDATE_EN, 1) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_EN, 1) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_DEN, 65535), + .mpllb_fracn2 = + REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_QUOT, 64094) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_REM, 13631), + .mpllb_sscen = + REG_FIELD_PREP(SNPS_PHY_MPLLB_SSC_UP_SPREAD, 1), +}; + static const struct intel_mpllb_state dg2_hdmi_497750 = { .clock = 497750, .ref_control = @@ -1696,8 +1756,10 @@ static const struct intel_mpllb_state * const dg2_hdmi_tables[] = { &dg2_hdmi_209800, &dg2_hdmi_241500, &dg2_hdmi_262750, + &dg2_hdmi_267300, &dg2_hdmi_268500, &dg2_hdmi_296703, + &dg2_hdmi_319890, &dg2_hdmi_497750, &dg2_hdmi_592000, &dg2_hdmi_593407, diff --git a/drivers/gpu/drm/i915/display/intel_sprite.c b/drivers/gpu/drm/i915/display/intel_sprite.c index e6b4d24b9cd0..a16e56a60c30 100644 --- a/drivers/gpu/drm/i915/display/intel_sprite.c +++ b/drivers/gpu/drm/i915/display/intel_sprite.c @@ -1217,7 +1217,8 @@ g4x_sprite_update_arm(struct intel_plane *plane, } intel_de_write_fw(dev_priv, DVSLINOFF(pipe), linear_offset); - intel_de_write_fw(dev_priv, DVSTILEOFF(pipe), (y << 16) | x); + intel_de_write_fw(dev_priv, DVSTILEOFF(pipe), + DVS_OFFSET_Y(y) | DVS_OFFSET_X(x)); /* * The control register self-arms if the plane was previously diff --git a/drivers/gpu/drm/i915/display/intel_tv.c b/drivers/gpu/drm/i915/display/intel_tv.c index b986bf075889..3b5ff84dc615 100644 --- a/drivers/gpu/drm/i915/display/intel_tv.c +++ b/drivers/gpu/drm/i915/display/intel_tv.c @@ -930,8 +930,7 @@ intel_enable_tv(struct intel_atomic_state *state, /* Prevents vblank waits from timing out in intel_tv_detect_type() */ intel_crtc_wait_for_next_vblank(to_intel_crtc(pipe_config->uapi.crtc)); - intel_de_write(dev_priv, TV_CTL, - intel_de_read(dev_priv, TV_CTL) | TV_ENC_ENABLE); + intel_de_rmw(dev_priv, TV_CTL, 0, TV_ENC_ENABLE); } static void @@ -943,8 +942,7 @@ intel_disable_tv(struct intel_atomic_state *state, struct drm_device *dev = encoder->base.dev; struct drm_i915_private *dev_priv = to_i915(dev); - intel_de_write(dev_priv, TV_CTL, - intel_de_read(dev_priv, TV_CTL) & ~TV_ENC_ENABLE); + intel_de_rmw(dev_priv, TV_CTL, TV_ENC_ENABLE, 0); } static const struct tv_mode *intel_tv_mode_find(const struct drm_connector_state *conn_state) diff --git a/drivers/gpu/drm/i915/display/intel_vblank.c b/drivers/gpu/drm/i915/display/intel_vblank.c index 4c83e2320bca..571f5dda1e66 100644 --- a/drivers/gpu/drm/i915/display/intel_vblank.c +++ b/drivers/gpu/drm/i915/display/intel_vblank.c @@ -26,7 +26,7 @@ * | * | frame start: * | generate frame start interrupt (aka. vblank interrupt) (gmch) - * | may be shifted forward 1-3 extra lines via PIPECONF + * | may be shifted forward 1-3 extra lines via TRANSCONF * | | * | | start of vsync: * | | generate vsync interrupt @@ -54,7 +54,7 @@ * Summary: * - most events happen at the start of horizontal sync * - frame start happens at the start of horizontal blank, 1-4 lines - * (depending on PIPECONF settings) after the start of vblank + * (depending on TRANSCONF settings) after the start of vblank * - gen3/4 pixel and frame counter are synchronized with the start * of horizontal active on the first line of vertical active */ diff --git a/drivers/gpu/drm/i915/display/intel_vdsc.c b/drivers/gpu/drm/i915/display/intel_vdsc.c index 207b2a648d32..09b32ffdc552 100644 --- a/drivers/gpu/drm/i915/display/intel_vdsc.c +++ b/drivers/gpu/drm/i915/display/intel_vdsc.c @@ -17,6 +17,7 @@ #include "intel_dsi.h" #include "intel_qp_tables.h" #include "intel_vdsc.h" +#include "intel_vdsc_regs.h" enum ROW_INDEX_BPP { ROW_INDEX_6BPP = 0, diff --git a/drivers/gpu/drm/i915/display/intel_vdsc_regs.h b/drivers/gpu/drm/i915/display/intel_vdsc_regs.h new file mode 100644 index 000000000000..4fd883463752 --- /dev/null +++ b/drivers/gpu/drm/i915/display/intel_vdsc_regs.h @@ -0,0 +1,461 @@ +/* SPDX-License-Identifier: MIT */ +/* + * Copyright © 2023 Intel Corporation + */ + +#ifndef __INTEL_VDSC_REGS_H__ +#define __INTEL_VDSC_REGS_H__ + +#include "intel_display_reg_defs.h" + +/* Display Stream Splitter Control */ +#define DSS_CTL1 _MMIO(0x67400) +#define SPLITTER_ENABLE (1 << 31) +#define JOINER_ENABLE (1 << 30) +#define DUAL_LINK_MODE_INTERLEAVE (1 << 24) +#define DUAL_LINK_MODE_FRONTBACK (0 << 24) +#define OVERLAP_PIXELS_MASK (0xf << 16) +#define OVERLAP_PIXELS(pixels) ((pixels) << 16) +#define LEFT_DL_BUF_TARGET_DEPTH_MASK (0xfff << 0) +#define LEFT_DL_BUF_TARGET_DEPTH(pixels) ((pixels) << 0) +#define MAX_DL_BUFFER_TARGET_DEPTH 0x5a0 + +#define DSS_CTL2 _MMIO(0x67404) +#define LEFT_BRANCH_VDSC_ENABLE (1 << 31) +#define RIGHT_BRANCH_VDSC_ENABLE (1 << 15) +#define RIGHT_DL_BUF_TARGET_DEPTH_MASK (0xfff << 0) +#define RIGHT_DL_BUF_TARGET_DEPTH(pixels) ((pixels) << 0) + +#define _ICL_PIPE_DSS_CTL1_PB 0x78200 +#define _ICL_PIPE_DSS_CTL1_PC 0x78400 +#define ICL_PIPE_DSS_CTL1(pipe) _MMIO_PIPE((pipe) - PIPE_B, \ + _ICL_PIPE_DSS_CTL1_PB, \ + _ICL_PIPE_DSS_CTL1_PC) +#define BIG_JOINER_ENABLE (1 << 29) +#define MASTER_BIG_JOINER_ENABLE (1 << 28) +#define VGA_CENTERING_ENABLE (1 << 27) +#define SPLITTER_CONFIGURATION_MASK REG_GENMASK(26, 25) +#define SPLITTER_CONFIGURATION_2_SEGMENT REG_FIELD_PREP(SPLITTER_CONFIGURATION_MASK, 0) +#define SPLITTER_CONFIGURATION_4_SEGMENT REG_FIELD_PREP(SPLITTER_CONFIGURATION_MASK, 1) +#define UNCOMPRESSED_JOINER_MASTER (1 << 21) +#define UNCOMPRESSED_JOINER_SLAVE (1 << 20) + +#define _ICL_PIPE_DSS_CTL2_PB 0x78204 +#define _ICL_PIPE_DSS_CTL2_PC 0x78404 +#define ICL_PIPE_DSS_CTL2(pipe) _MMIO_PIPE((pipe) - PIPE_B, \ + _ICL_PIPE_DSS_CTL2_PB, \ + _ICL_PIPE_DSS_CTL2_PC) + +/* Icelake Display Stream Compression Registers */ +#define DSCA_PICTURE_PARAMETER_SET_0 _MMIO(0x6B200) +#define DSCC_PICTURE_PARAMETER_SET_0 _MMIO(0x6BA00) +#define _ICL_DSC0_PICTURE_PARAMETER_SET_0_PB 0x78270 +#define _ICL_DSC1_PICTURE_PARAMETER_SET_0_PB 0x78370 +#define _ICL_DSC0_PICTURE_PARAMETER_SET_0_PC 0x78470 +#define _ICL_DSC1_PICTURE_PARAMETER_SET_0_PC 0x78570 +#define ICL_DSC0_PICTURE_PARAMETER_SET_0(pipe) _MMIO_PIPE((pipe) - PIPE_B, \ + _ICL_DSC0_PICTURE_PARAMETER_SET_0_PB, \ + _ICL_DSC0_PICTURE_PARAMETER_SET_0_PC) +#define ICL_DSC1_PICTURE_PARAMETER_SET_0(pipe) _MMIO_PIPE((pipe) - PIPE_B, \ + _ICL_DSC1_PICTURE_PARAMETER_SET_0_PB, \ + _ICL_DSC1_PICTURE_PARAMETER_SET_0_PC) +#define DSC_ALT_ICH_SEL (1 << 20) +#define DSC_VBR_ENABLE (1 << 19) +#define DSC_422_ENABLE (1 << 18) +#define DSC_COLOR_SPACE_CONVERSION (1 << 17) +#define DSC_BLOCK_PREDICTION (1 << 16) +#define DSC_LINE_BUF_DEPTH_SHIFT 12 +#define DSC_BPC_SHIFT 8 +#define DSC_VER_MIN_SHIFT 4 +#define DSC_VER_MAJ (0x1 << 0) + +#define DSCA_PICTURE_PARAMETER_SET_1 _MMIO(0x6B204) +#define DSCC_PICTURE_PARAMETER_SET_1 _MMIO(0x6BA04) +#define _ICL_DSC0_PICTURE_PARAMETER_SET_1_PB 0x78274 +#define _ICL_DSC1_PICTURE_PARAMETER_SET_1_PB 0x78374 +#define _ICL_DSC0_PICTURE_PARAMETER_SET_1_PC 0x78474 +#define _ICL_DSC1_PICTURE_PARAMETER_SET_1_PC 0x78574 +#define ICL_DSC0_PICTURE_PARAMETER_SET_1(pipe) _MMIO_PIPE((pipe) - PIPE_B, \ + _ICL_DSC0_PICTURE_PARAMETER_SET_1_PB, \ + _ICL_DSC0_PICTURE_PARAMETER_SET_1_PC) +#define ICL_DSC1_PICTURE_PARAMETER_SET_1(pipe) _MMIO_PIPE((pipe) - PIPE_B, \ + _ICL_DSC1_PICTURE_PARAMETER_SET_1_PB, \ + _ICL_DSC1_PICTURE_PARAMETER_SET_1_PC) +#define DSC_BPP(bpp) ((bpp) << 0) + +#define DSCA_PICTURE_PARAMETER_SET_2 _MMIO(0x6B208) +#define DSCC_PICTURE_PARAMETER_SET_2 _MMIO(0x6BA08) +#define _ICL_DSC0_PICTURE_PARAMETER_SET_2_PB 0x78278 +#define _ICL_DSC1_PICTURE_PARAMETER_SET_2_PB 0x78378 +#define _ICL_DSC0_PICTURE_PARAMETER_SET_2_PC 0x78478 +#define _ICL_DSC1_PICTURE_PARAMETER_SET_2_PC 0x78578 +#define ICL_DSC0_PICTURE_PARAMETER_SET_2(pipe) _MMIO_PIPE((pipe) - PIPE_B, \ + _ICL_DSC0_PICTURE_PARAMETER_SET_2_PB, \ + _ICL_DSC0_PICTURE_PARAMETER_SET_2_PC) +#define ICL_DSC1_PICTURE_PARAMETER_SET_2(pipe) _MMIO_PIPE((pipe) - PIPE_B, \ + _ICL_DSC1_PICTURE_PARAMETER_SET_2_PB, \ + _ICL_DSC1_PICTURE_PARAMETER_SET_2_PC) +#define DSC_PIC_WIDTH(pic_width) ((pic_width) << 16) +#define DSC_PIC_HEIGHT(pic_height) ((pic_height) << 0) + +#define DSCA_PICTURE_PARAMETER_SET_3 _MMIO(0x6B20C) +#define DSCC_PICTURE_PARAMETER_SET_3 _MMIO(0x6BA0C) +#define _ICL_DSC0_PICTURE_PARAMETER_SET_3_PB 0x7827C +#define _ICL_DSC1_PICTURE_PARAMETER_SET_3_PB 0x7837C +#define _ICL_DSC0_PICTURE_PARAMETER_SET_3_PC 0x7847C +#define _ICL_DSC1_PICTURE_PARAMETER_SET_3_PC 0x7857C +#define ICL_DSC0_PICTURE_PARAMETER_SET_3(pipe) _MMIO_PIPE((pipe) - PIPE_B, \ + _ICL_DSC0_PICTURE_PARAMETER_SET_3_PB, \ + _ICL_DSC0_PICTURE_PARAMETER_SET_3_PC) +#define ICL_DSC1_PICTURE_PARAMETER_SET_3(pipe) _MMIO_PIPE((pipe) - PIPE_B, \ + _ICL_DSC1_PICTURE_PARAMETER_SET_3_PB, \ + _ICL_DSC1_PICTURE_PARAMETER_SET_3_PC) +#define DSC_SLICE_WIDTH(slice_width) ((slice_width) << 16) +#define DSC_SLICE_HEIGHT(slice_height) ((slice_height) << 0) + +#define DSCA_PICTURE_PARAMETER_SET_4 _MMIO(0x6B210) +#define DSCC_PICTURE_PARAMETER_SET_4 _MMIO(0x6BA10) +#define _ICL_DSC0_PICTURE_PARAMETER_SET_4_PB 0x78280 +#define _ICL_DSC1_PICTURE_PARAMETER_SET_4_PB 0x78380 +#define _ICL_DSC0_PICTURE_PARAMETER_SET_4_PC 0x78480 +#define _ICL_DSC1_PICTURE_PARAMETER_SET_4_PC 0x78580 +#define ICL_DSC0_PICTURE_PARAMETER_SET_4(pipe) _MMIO_PIPE((pipe) - PIPE_B, \ + _ICL_DSC0_PICTURE_PARAMETER_SET_4_PB, \ + _ICL_DSC0_PICTURE_PARAMETER_SET_4_PC) +#define ICL_DSC1_PICTURE_PARAMETER_SET_4(pipe) _MMIO_PIPE((pipe) - PIPE_B, \ + _ICL_DSC1_PICTURE_PARAMETER_SET_4_PB, \ + _ICL_DSC1_PICTURE_PARAMETER_SET_4_PC) +#define DSC_INITIAL_DEC_DELAY(dec_delay) ((dec_delay) << 16) +#define DSC_INITIAL_XMIT_DELAY(xmit_delay) ((xmit_delay) << 0) + +#define DSCA_PICTURE_PARAMETER_SET_5 _MMIO(0x6B214) +#define DSCC_PICTURE_PARAMETER_SET_5 _MMIO(0x6BA14) +#define _ICL_DSC0_PICTURE_PARAMETER_SET_5_PB 0x78284 +#define _ICL_DSC1_PICTURE_PARAMETER_SET_5_PB 0x78384 +#define _ICL_DSC0_PICTURE_PARAMETER_SET_5_PC 0x78484 +#define _ICL_DSC1_PICTURE_PARAMETER_SET_5_PC 0x78584 +#define ICL_DSC0_PICTURE_PARAMETER_SET_5(pipe) _MMIO_PIPE((pipe) - PIPE_B, \ + _ICL_DSC0_PICTURE_PARAMETER_SET_5_PB, \ + _ICL_DSC0_PICTURE_PARAMETER_SET_5_PC) +#define ICL_DSC1_PICTURE_PARAMETER_SET_5(pipe) _MMIO_PIPE((pipe) - PIPE_B, \ + _ICL_DSC1_PICTURE_PARAMETER_SET_5_PB, \ + _ICL_DSC1_PICTURE_PARAMETER_SET_5_PC) +#define DSC_SCALE_DEC_INT(scale_dec) ((scale_dec) << 16) +#define DSC_SCALE_INC_INT(scale_inc) ((scale_inc) << 0) + +#define DSCA_PICTURE_PARAMETER_SET_6 _MMIO(0x6B218) +#define DSCC_PICTURE_PARAMETER_SET_6 _MMIO(0x6BA18) +#define _ICL_DSC0_PICTURE_PARAMETER_SET_6_PB 0x78288 +#define _ICL_DSC1_PICTURE_PARAMETER_SET_6_PB 0x78388 +#define _ICL_DSC0_PICTURE_PARAMETER_SET_6_PC 0x78488 +#define _ICL_DSC1_PICTURE_PARAMETER_SET_6_PC 0x78588 +#define ICL_DSC0_PICTURE_PARAMETER_SET_6(pipe) _MMIO_PIPE((pipe) - PIPE_B, \ + _ICL_DSC0_PICTURE_PARAMETER_SET_6_PB, \ + _ICL_DSC0_PICTURE_PARAMETER_SET_6_PC) +#define ICL_DSC1_PICTURE_PARAMETER_SET_6(pipe) _MMIO_PIPE((pipe) - PIPE_B, \ + _ICL_DSC1_PICTURE_PARAMETER_SET_6_PB, \ + _ICL_DSC1_PICTURE_PARAMETER_SET_6_PC) +#define DSC_FLATNESS_MAX_QP(max_qp) ((max_qp) << 24) +#define DSC_FLATNESS_MIN_QP(min_qp) ((min_qp) << 16) +#define DSC_FIRST_LINE_BPG_OFFSET(offset) ((offset) << 8) +#define DSC_INITIAL_SCALE_VALUE(value) ((value) << 0) + +#define DSCA_PICTURE_PARAMETER_SET_7 _MMIO(0x6B21C) +#define DSCC_PICTURE_PARAMETER_SET_7 _MMIO(0x6BA1C) +#define _ICL_DSC0_PICTURE_PARAMETER_SET_7_PB 0x7828C +#define _ICL_DSC1_PICTURE_PARAMETER_SET_7_PB 0x7838C +#define _ICL_DSC0_PICTURE_PARAMETER_SET_7_PC 0x7848C +#define _ICL_DSC1_PICTURE_PARAMETER_SET_7_PC 0x7858C +#define ICL_DSC0_PICTURE_PARAMETER_SET_7(pipe) _MMIO_PIPE((pipe) - PIPE_B, \ + _ICL_DSC0_PICTURE_PARAMETER_SET_7_PB, \ + _ICL_DSC0_PICTURE_PARAMETER_SET_7_PC) +#define ICL_DSC1_PICTURE_PARAMETER_SET_7(pipe) _MMIO_PIPE((pipe) - PIPE_B, \ + _ICL_DSC1_PICTURE_PARAMETER_SET_7_PB, \ + _ICL_DSC1_PICTURE_PARAMETER_SET_7_PC) +#define DSC_NFL_BPG_OFFSET(bpg_offset) ((bpg_offset) << 16) +#define DSC_SLICE_BPG_OFFSET(bpg_offset) ((bpg_offset) << 0) + +#define DSCA_PICTURE_PARAMETER_SET_8 _MMIO(0x6B220) +#define DSCC_PICTURE_PARAMETER_SET_8 _MMIO(0x6BA20) +#define _ICL_DSC0_PICTURE_PARAMETER_SET_8_PB 0x78290 +#define _ICL_DSC1_PICTURE_PARAMETER_SET_8_PB 0x78390 +#define _ICL_DSC0_PICTURE_PARAMETER_SET_8_PC 0x78490 +#define _ICL_DSC1_PICTURE_PARAMETER_SET_8_PC 0x78590 +#define ICL_DSC0_PICTURE_PARAMETER_SET_8(pipe) _MMIO_PIPE((pipe) - PIPE_B, \ + _ICL_DSC0_PICTURE_PARAMETER_SET_8_PB, \ + _ICL_DSC0_PICTURE_PARAMETER_SET_8_PC) +#define ICL_DSC1_PICTURE_PARAMETER_SET_8(pipe) _MMIO_PIPE((pipe) - PIPE_B, \ + _ICL_DSC1_PICTURE_PARAMETER_SET_8_PB, \ + _ICL_DSC1_PICTURE_PARAMETER_SET_8_PC) +#define DSC_INITIAL_OFFSET(initial_offset) ((initial_offset) << 16) +#define DSC_FINAL_OFFSET(final_offset) ((final_offset) << 0) + +#define DSCA_PICTURE_PARAMETER_SET_9 _MMIO(0x6B224) +#define DSCC_PICTURE_PARAMETER_SET_9 _MMIO(0x6BA24) +#define _ICL_DSC0_PICTURE_PARAMETER_SET_9_PB 0x78294 +#define _ICL_DSC1_PICTURE_PARAMETER_SET_9_PB 0x78394 +#define _ICL_DSC0_PICTURE_PARAMETER_SET_9_PC 0x78494 +#define _ICL_DSC1_PICTURE_PARAMETER_SET_9_PC 0x78594 +#define ICL_DSC0_PICTURE_PARAMETER_SET_9(pipe) _MMIO_PIPE((pipe) - PIPE_B, \ + _ICL_DSC0_PICTURE_PARAMETER_SET_9_PB, \ + _ICL_DSC0_PICTURE_PARAMETER_SET_9_PC) +#define ICL_DSC1_PICTURE_PARAMETER_SET_9(pipe) _MMIO_PIPE((pipe) - PIPE_B, \ + _ICL_DSC1_PICTURE_PARAMETER_SET_9_PB, \ + _ICL_DSC1_PICTURE_PARAMETER_SET_9_PC) +#define DSC_RC_EDGE_FACTOR(rc_edge_fact) ((rc_edge_fact) << 16) +#define DSC_RC_MODEL_SIZE(rc_model_size) ((rc_model_size) << 0) + +#define DSCA_PICTURE_PARAMETER_SET_10 _MMIO(0x6B228) +#define DSCC_PICTURE_PARAMETER_SET_10 _MMIO(0x6BA28) +#define _ICL_DSC0_PICTURE_PARAMETER_SET_10_PB 0x78298 +#define _ICL_DSC1_PICTURE_PARAMETER_SET_10_PB 0x78398 +#define _ICL_DSC0_PICTURE_PARAMETER_SET_10_PC 0x78498 +#define _ICL_DSC1_PICTURE_PARAMETER_SET_10_PC 0x78598 +#define ICL_DSC0_PICTURE_PARAMETER_SET_10(pipe) _MMIO_PIPE((pipe) - PIPE_B, \ + _ICL_DSC0_PICTURE_PARAMETER_SET_10_PB, \ + _ICL_DSC0_PICTURE_PARAMETER_SET_10_PC) +#define ICL_DSC1_PICTURE_PARAMETER_SET_10(pipe) _MMIO_PIPE((pipe) - PIPE_B, \ + _ICL_DSC1_PICTURE_PARAMETER_SET_10_PB, \ + _ICL_DSC1_PICTURE_PARAMETER_SET_10_PC) +#define DSC_RC_TARGET_OFF_LOW(rc_tgt_off_low) ((rc_tgt_off_low) << 20) +#define DSC_RC_TARGET_OFF_HIGH(rc_tgt_off_high) ((rc_tgt_off_high) << 16) +#define DSC_RC_QUANT_INC_LIMIT1(lim) ((lim) << 8) +#define DSC_RC_QUANT_INC_LIMIT0(lim) ((lim) << 0) + +#define DSCA_PICTURE_PARAMETER_SET_11 _MMIO(0x6B22C) +#define DSCC_PICTURE_PARAMETER_SET_11 _MMIO(0x6BA2C) +#define _ICL_DSC0_PICTURE_PARAMETER_SET_11_PB 0x7829C +#define _ICL_DSC1_PICTURE_PARAMETER_SET_11_PB 0x7839C +#define _ICL_DSC0_PICTURE_PARAMETER_SET_11_PC 0x7849C +#define _ICL_DSC1_PICTURE_PARAMETER_SET_11_PC 0x7859C +#define ICL_DSC0_PICTURE_PARAMETER_SET_11(pipe) _MMIO_PIPE((pipe) - PIPE_B, \ + _ICL_DSC0_PICTURE_PARAMETER_SET_11_PB, \ + _ICL_DSC0_PICTURE_PARAMETER_SET_11_PC) +#define ICL_DSC1_PICTURE_PARAMETER_SET_11(pipe) _MMIO_PIPE((pipe) - PIPE_B, \ + _ICL_DSC1_PICTURE_PARAMETER_SET_11_PB, \ + _ICL_DSC1_PICTURE_PARAMETER_SET_11_PC) + +#define DSCA_PICTURE_PARAMETER_SET_12 _MMIO(0x6B260) +#define DSCC_PICTURE_PARAMETER_SET_12 _MMIO(0x6BA60) +#define _ICL_DSC0_PICTURE_PARAMETER_SET_12_PB 0x782A0 +#define _ICL_DSC1_PICTURE_PARAMETER_SET_12_PB 0x783A0 +#define _ICL_DSC0_PICTURE_PARAMETER_SET_12_PC 0x784A0 +#define _ICL_DSC1_PICTURE_PARAMETER_SET_12_PC 0x785A0 +#define ICL_DSC0_PICTURE_PARAMETER_SET_12(pipe) _MMIO_PIPE((pipe) - PIPE_B, \ + _ICL_DSC0_PICTURE_PARAMETER_SET_12_PB, \ + _ICL_DSC0_PICTURE_PARAMETER_SET_12_PC) +#define ICL_DSC1_PICTURE_PARAMETER_SET_12(pipe) _MMIO_PIPE((pipe) - PIPE_B, \ + _ICL_DSC1_PICTURE_PARAMETER_SET_12_PB, \ + _ICL_DSC1_PICTURE_PARAMETER_SET_12_PC) + +#define DSCA_PICTURE_PARAMETER_SET_13 _MMIO(0x6B264) +#define DSCC_PICTURE_PARAMETER_SET_13 _MMIO(0x6BA64) +#define _ICL_DSC0_PICTURE_PARAMETER_SET_13_PB 0x782A4 +#define _ICL_DSC1_PICTURE_PARAMETER_SET_13_PB 0x783A4 +#define _ICL_DSC0_PICTURE_PARAMETER_SET_13_PC 0x784A4 +#define _ICL_DSC1_PICTURE_PARAMETER_SET_13_PC 0x785A4 +#define ICL_DSC0_PICTURE_PARAMETER_SET_13(pipe) _MMIO_PIPE((pipe) - PIPE_B, \ + _ICL_DSC0_PICTURE_PARAMETER_SET_13_PB, \ + _ICL_DSC0_PICTURE_PARAMETER_SET_13_PC) +#define ICL_DSC1_PICTURE_PARAMETER_SET_13(pipe) _MMIO_PIPE((pipe) - PIPE_B, \ + _ICL_DSC1_PICTURE_PARAMETER_SET_13_PB, \ + _ICL_DSC1_PICTURE_PARAMETER_SET_13_PC) + +#define DSCA_PICTURE_PARAMETER_SET_14 _MMIO(0x6B268) +#define DSCC_PICTURE_PARAMETER_SET_14 _MMIO(0x6BA68) +#define _ICL_DSC0_PICTURE_PARAMETER_SET_14_PB 0x782A8 +#define _ICL_DSC1_PICTURE_PARAMETER_SET_14_PB 0x783A8 +#define _ICL_DSC0_PICTURE_PARAMETER_SET_14_PC 0x784A8 +#define _ICL_DSC1_PICTURE_PARAMETER_SET_14_PC 0x785A8 +#define ICL_DSC0_PICTURE_PARAMETER_SET_14(pipe) _MMIO_PIPE((pipe) - PIPE_B, \ + _ICL_DSC0_PICTURE_PARAMETER_SET_14_PB, \ + _ICL_DSC0_PICTURE_PARAMETER_SET_14_PC) +#define ICL_DSC1_PICTURE_PARAMETER_SET_14(pipe) _MMIO_PIPE((pipe) - PIPE_B, \ + _ICL_DSC1_PICTURE_PARAMETER_SET_14_PB, \ + _ICL_DSC1_PICTURE_PARAMETER_SET_14_PC) + +#define DSCA_PICTURE_PARAMETER_SET_15 _MMIO(0x6B26C) +#define DSCC_PICTURE_PARAMETER_SET_15 _MMIO(0x6BA6C) +#define _ICL_DSC0_PICTURE_PARAMETER_SET_15_PB 0x782AC +#define _ICL_DSC1_PICTURE_PARAMETER_SET_15_PB 0x783AC +#define _ICL_DSC0_PICTURE_PARAMETER_SET_15_PC 0x784AC +#define _ICL_DSC1_PICTURE_PARAMETER_SET_15_PC 0x785AC +#define ICL_DSC0_PICTURE_PARAMETER_SET_15(pipe) _MMIO_PIPE((pipe) - PIPE_B, \ + _ICL_DSC0_PICTURE_PARAMETER_SET_15_PB, \ + _ICL_DSC0_PICTURE_PARAMETER_SET_15_PC) +#define ICL_DSC1_PICTURE_PARAMETER_SET_15(pipe) _MMIO_PIPE((pipe) - PIPE_B, \ + _ICL_DSC1_PICTURE_PARAMETER_SET_15_PB, \ + _ICL_DSC1_PICTURE_PARAMETER_SET_15_PC) + +#define DSCA_PICTURE_PARAMETER_SET_16 _MMIO(0x6B270) +#define DSCC_PICTURE_PARAMETER_SET_16 _MMIO(0x6BA70) +#define _ICL_DSC0_PICTURE_PARAMETER_SET_16_PB 0x782B0 +#define _ICL_DSC1_PICTURE_PARAMETER_SET_16_PB 0x783B0 +#define _ICL_DSC0_PICTURE_PARAMETER_SET_16_PC 0x784B0 +#define _ICL_DSC1_PICTURE_PARAMETER_SET_16_PC 0x785B0 +#define ICL_DSC0_PICTURE_PARAMETER_SET_16(pipe) _MMIO_PIPE((pipe) - PIPE_B, \ + _ICL_DSC0_PICTURE_PARAMETER_SET_16_PB, \ + _ICL_DSC0_PICTURE_PARAMETER_SET_16_PC) +#define ICL_DSC1_PICTURE_PARAMETER_SET_16(pipe) _MMIO_PIPE((pipe) - PIPE_B, \ + _ICL_DSC1_PICTURE_PARAMETER_SET_16_PB, \ + _ICL_DSC1_PICTURE_PARAMETER_SET_16_PC) +#define DSC_SLICE_ROW_PER_FRAME(slice_row_per_frame) ((slice_row_per_frame) << 20) +#define DSC_SLICE_PER_LINE(slice_per_line) ((slice_per_line) << 16) +#define DSC_SLICE_CHUNK_SIZE(slice_chunk_size) ((slice_chunk_size) << 0) + +/* Icelake Rate Control Buffer Threshold Registers */ +#define DSCA_RC_BUF_THRESH_0 _MMIO(0x6B230) +#define DSCA_RC_BUF_THRESH_0_UDW _MMIO(0x6B230 + 4) +#define DSCC_RC_BUF_THRESH_0 _MMIO(0x6BA30) +#define DSCC_RC_BUF_THRESH_0_UDW _MMIO(0x6BA30 + 4) +#define _ICL_DSC0_RC_BUF_THRESH_0_PB (0x78254) +#define _ICL_DSC0_RC_BUF_THRESH_0_UDW_PB (0x78254 + 4) +#define _ICL_DSC1_RC_BUF_THRESH_0_PB (0x78354) +#define _ICL_DSC1_RC_BUF_THRESH_0_UDW_PB (0x78354 + 4) +#define _ICL_DSC0_RC_BUF_THRESH_0_PC (0x78454) +#define _ICL_DSC0_RC_BUF_THRESH_0_UDW_PC (0x78454 + 4) +#define _ICL_DSC1_RC_BUF_THRESH_0_PC (0x78554) +#define _ICL_DSC1_RC_BUF_THRESH_0_UDW_PC (0x78554 + 4) +#define ICL_DSC0_RC_BUF_THRESH_0(pipe) _MMIO_PIPE((pipe) - PIPE_B, \ + _ICL_DSC0_RC_BUF_THRESH_0_PB, \ + _ICL_DSC0_RC_BUF_THRESH_0_PC) +#define ICL_DSC0_RC_BUF_THRESH_0_UDW(pipe) _MMIO_PIPE((pipe) - PIPE_B, \ + _ICL_DSC0_RC_BUF_THRESH_0_UDW_PB, \ + _ICL_DSC0_RC_BUF_THRESH_0_UDW_PC) +#define ICL_DSC1_RC_BUF_THRESH_0(pipe) _MMIO_PIPE((pipe) - PIPE_B, \ + _ICL_DSC1_RC_BUF_THRESH_0_PB, \ + _ICL_DSC1_RC_BUF_THRESH_0_PC) +#define ICL_DSC1_RC_BUF_THRESH_0_UDW(pipe) _MMIO_PIPE((pipe) - PIPE_B, \ + _ICL_DSC1_RC_BUF_THRESH_0_UDW_PB, \ + _ICL_DSC1_RC_BUF_THRESH_0_UDW_PC) + +#define DSCA_RC_BUF_THRESH_1 _MMIO(0x6B238) +#define DSCA_RC_BUF_THRESH_1_UDW _MMIO(0x6B238 + 4) +#define DSCC_RC_BUF_THRESH_1 _MMIO(0x6BA38) +#define DSCC_RC_BUF_THRESH_1_UDW _MMIO(0x6BA38 + 4) +#define _ICL_DSC0_RC_BUF_THRESH_1_PB (0x7825C) +#define _ICL_DSC0_RC_BUF_THRESH_1_UDW_PB (0x7825C + 4) +#define _ICL_DSC1_RC_BUF_THRESH_1_PB (0x7835C) +#define _ICL_DSC1_RC_BUF_THRESH_1_UDW_PB (0x7835C + 4) +#define _ICL_DSC0_RC_BUF_THRESH_1_PC (0x7845C) +#define _ICL_DSC0_RC_BUF_THRESH_1_UDW_PC (0x7845C + 4) +#define _ICL_DSC1_RC_BUF_THRESH_1_PC (0x7855C) +#define _ICL_DSC1_RC_BUF_THRESH_1_UDW_PC (0x7855C + 4) +#define ICL_DSC0_RC_BUF_THRESH_1(pipe) _MMIO_PIPE((pipe) - PIPE_B, \ + _ICL_DSC0_RC_BUF_THRESH_1_PB, \ + _ICL_DSC0_RC_BUF_THRESH_1_PC) +#define ICL_DSC0_RC_BUF_THRESH_1_UDW(pipe) _MMIO_PIPE((pipe) - PIPE_B, \ + _ICL_DSC0_RC_BUF_THRESH_1_UDW_PB, \ + _ICL_DSC0_RC_BUF_THRESH_1_UDW_PC) +#define ICL_DSC1_RC_BUF_THRESH_1(pipe) _MMIO_PIPE((pipe) - PIPE_B, \ + _ICL_DSC1_RC_BUF_THRESH_1_PB, \ + _ICL_DSC1_RC_BUF_THRESH_1_PC) +#define ICL_DSC1_RC_BUF_THRESH_1_UDW(pipe) _MMIO_PIPE((pipe) - PIPE_B, \ + _ICL_DSC1_RC_BUF_THRESH_1_UDW_PB, \ + _ICL_DSC1_RC_BUF_THRESH_1_UDW_PC) + +/* Icelake DSC Rate Control Range Parameter Registers */ +#define DSCA_RC_RANGE_PARAMETERS_0 _MMIO(0x6B240) +#define DSCA_RC_RANGE_PARAMETERS_0_UDW _MMIO(0x6B240 + 4) +#define DSCC_RC_RANGE_PARAMETERS_0 _MMIO(0x6BA40) +#define DSCC_RC_RANGE_PARAMETERS_0_UDW _MMIO(0x6BA40 + 4) +#define _ICL_DSC0_RC_RANGE_PARAMETERS_0_PB (0x78208) +#define _ICL_DSC0_RC_RANGE_PARAMETERS_0_UDW_PB (0x78208 + 4) +#define _ICL_DSC1_RC_RANGE_PARAMETERS_0_PB (0x78308) +#define _ICL_DSC1_RC_RANGE_PARAMETERS_0_UDW_PB (0x78308 + 4) +#define _ICL_DSC0_RC_RANGE_PARAMETERS_0_PC (0x78408) +#define _ICL_DSC0_RC_RANGE_PARAMETERS_0_UDW_PC (0x78408 + 4) +#define _ICL_DSC1_RC_RANGE_PARAMETERS_0_PC (0x78508) +#define _ICL_DSC1_RC_RANGE_PARAMETERS_0_UDW_PC (0x78508 + 4) +#define ICL_DSC0_RC_RANGE_PARAMETERS_0(pipe) _MMIO_PIPE((pipe) - PIPE_B, \ + _ICL_DSC0_RC_RANGE_PARAMETERS_0_PB, \ + _ICL_DSC0_RC_RANGE_PARAMETERS_0_PC) +#define ICL_DSC0_RC_RANGE_PARAMETERS_0_UDW(pipe) _MMIO_PIPE((pipe) - PIPE_B, \ + _ICL_DSC0_RC_RANGE_PARAMETERS_0_UDW_PB, \ + _ICL_DSC0_RC_RANGE_PARAMETERS_0_UDW_PC) +#define ICL_DSC1_RC_RANGE_PARAMETERS_0(pipe) _MMIO_PIPE((pipe) - PIPE_B, \ + _ICL_DSC1_RC_RANGE_PARAMETERS_0_PB, \ + _ICL_DSC1_RC_RANGE_PARAMETERS_0_PC) +#define ICL_DSC1_RC_RANGE_PARAMETERS_0_UDW(pipe) _MMIO_PIPE((pipe) - PIPE_B, \ + _ICL_DSC1_RC_RANGE_PARAMETERS_0_UDW_PB, \ + _ICL_DSC1_RC_RANGE_PARAMETERS_0_UDW_PC) +#define RC_BPG_OFFSET_SHIFT 10 +#define RC_MAX_QP_SHIFT 5 +#define RC_MIN_QP_SHIFT 0 + +#define DSCA_RC_RANGE_PARAMETERS_1 _MMIO(0x6B248) +#define DSCA_RC_RANGE_PARAMETERS_1_UDW _MMIO(0x6B248 + 4) +#define DSCC_RC_RANGE_PARAMETERS_1 _MMIO(0x6BA48) +#define DSCC_RC_RANGE_PARAMETERS_1_UDW _MMIO(0x6BA48 + 4) +#define _ICL_DSC0_RC_RANGE_PARAMETERS_1_PB (0x78210) +#define _ICL_DSC0_RC_RANGE_PARAMETERS_1_UDW_PB (0x78210 + 4) +#define _ICL_DSC1_RC_RANGE_PARAMETERS_1_PB (0x78310) +#define _ICL_DSC1_RC_RANGE_PARAMETERS_1_UDW_PB (0x78310 + 4) +#define _ICL_DSC0_RC_RANGE_PARAMETERS_1_PC (0x78410) +#define _ICL_DSC0_RC_RANGE_PARAMETERS_1_UDW_PC (0x78410 + 4) +#define _ICL_DSC1_RC_RANGE_PARAMETERS_1_PC (0x78510) +#define _ICL_DSC1_RC_RANGE_PARAMETERS_1_UDW_PC (0x78510 + 4) +#define ICL_DSC0_RC_RANGE_PARAMETERS_1(pipe) _MMIO_PIPE((pipe) - PIPE_B, \ + _ICL_DSC0_RC_RANGE_PARAMETERS_1_PB, \ + _ICL_DSC0_RC_RANGE_PARAMETERS_1_PC) +#define ICL_DSC0_RC_RANGE_PARAMETERS_1_UDW(pipe) _MMIO_PIPE((pipe) - PIPE_B, \ + _ICL_DSC0_RC_RANGE_PARAMETERS_1_UDW_PB, \ + _ICL_DSC0_RC_RANGE_PARAMETERS_1_UDW_PC) +#define ICL_DSC1_RC_RANGE_PARAMETERS_1(pipe) _MMIO_PIPE((pipe) - PIPE_B, \ + _ICL_DSC1_RC_RANGE_PARAMETERS_1_PB, \ + _ICL_DSC1_RC_RANGE_PARAMETERS_1_PC) +#define ICL_DSC1_RC_RANGE_PARAMETERS_1_UDW(pipe) _MMIO_PIPE((pipe) - PIPE_B, \ + _ICL_DSC1_RC_RANGE_PARAMETERS_1_UDW_PB, \ + _ICL_DSC1_RC_RANGE_PARAMETERS_1_UDW_PC) + +#define DSCA_RC_RANGE_PARAMETERS_2 _MMIO(0x6B250) +#define DSCA_RC_RANGE_PARAMETERS_2_UDW _MMIO(0x6B250 + 4) +#define DSCC_RC_RANGE_PARAMETERS_2 _MMIO(0x6BA50) +#define DSCC_RC_RANGE_PARAMETERS_2_UDW _MMIO(0x6BA50 + 4) +#define _ICL_DSC0_RC_RANGE_PARAMETERS_2_PB (0x78218) +#define _ICL_DSC0_RC_RANGE_PARAMETERS_2_UDW_PB (0x78218 + 4) +#define _ICL_DSC1_RC_RANGE_PARAMETERS_2_PB (0x78318) +#define _ICL_DSC1_RC_RANGE_PARAMETERS_2_UDW_PB (0x78318 + 4) +#define _ICL_DSC0_RC_RANGE_PARAMETERS_2_PC (0x78418) +#define _ICL_DSC0_RC_RANGE_PARAMETERS_2_UDW_PC (0x78418 + 4) +#define _ICL_DSC1_RC_RANGE_PARAMETERS_2_PC (0x78518) +#define _ICL_DSC1_RC_RANGE_PARAMETERS_2_UDW_PC (0x78518 + 4) +#define ICL_DSC0_RC_RANGE_PARAMETERS_2(pipe) _MMIO_PIPE((pipe) - PIPE_B, \ + _ICL_DSC0_RC_RANGE_PARAMETERS_2_PB, \ + _ICL_DSC0_RC_RANGE_PARAMETERS_2_PC) +#define ICL_DSC0_RC_RANGE_PARAMETERS_2_UDW(pipe) _MMIO_PIPE((pipe) - PIPE_B, \ + _ICL_DSC0_RC_RANGE_PARAMETERS_2_UDW_PB, \ + _ICL_DSC0_RC_RANGE_PARAMETERS_2_UDW_PC) +#define ICL_DSC1_RC_RANGE_PARAMETERS_2(pipe) _MMIO_PIPE((pipe) - PIPE_B, \ + _ICL_DSC1_RC_RANGE_PARAMETERS_2_PB, \ + _ICL_DSC1_RC_RANGE_PARAMETERS_2_PC) +#define ICL_DSC1_RC_RANGE_PARAMETERS_2_UDW(pipe) _MMIO_PIPE((pipe) - PIPE_B, \ + _ICL_DSC1_RC_RANGE_PARAMETERS_2_UDW_PB, \ + _ICL_DSC1_RC_RANGE_PARAMETERS_2_UDW_PC) + +#define DSCA_RC_RANGE_PARAMETERS_3 _MMIO(0x6B258) +#define DSCA_RC_RANGE_PARAMETERS_3_UDW _MMIO(0x6B258 + 4) +#define DSCC_RC_RANGE_PARAMETERS_3 _MMIO(0x6BA58) +#define DSCC_RC_RANGE_PARAMETERS_3_UDW _MMIO(0x6BA58 + 4) +#define _ICL_DSC0_RC_RANGE_PARAMETERS_3_PB (0x78220) +#define _ICL_DSC0_RC_RANGE_PARAMETERS_3_UDW_PB (0x78220 + 4) +#define _ICL_DSC1_RC_RANGE_PARAMETERS_3_PB (0x78320) +#define _ICL_DSC1_RC_RANGE_PARAMETERS_3_UDW_PB (0x78320 + 4) +#define _ICL_DSC0_RC_RANGE_PARAMETERS_3_PC (0x78420) +#define _ICL_DSC0_RC_RANGE_PARAMETERS_3_UDW_PC (0x78420 + 4) +#define _ICL_DSC1_RC_RANGE_PARAMETERS_3_PC (0x78520) +#define _ICL_DSC1_RC_RANGE_PARAMETERS_3_UDW_PC (0x78520 + 4) +#define ICL_DSC0_RC_RANGE_PARAMETERS_3(pipe) _MMIO_PIPE((pipe) - PIPE_B, \ + _ICL_DSC0_RC_RANGE_PARAMETERS_3_PB, \ + _ICL_DSC0_RC_RANGE_PARAMETERS_3_PC) +#define ICL_DSC0_RC_RANGE_PARAMETERS_3_UDW(pipe) _MMIO_PIPE((pipe) - PIPE_B, \ + _ICL_DSC0_RC_RANGE_PARAMETERS_3_UDW_PB, \ + _ICL_DSC0_RC_RANGE_PARAMETERS_3_UDW_PC) +#define ICL_DSC1_RC_RANGE_PARAMETERS_3(pipe) _MMIO_PIPE((pipe) - PIPE_B, \ + _ICL_DSC1_RC_RANGE_PARAMETERS_3_PB, \ + _ICL_DSC1_RC_RANGE_PARAMETERS_3_PC) +#define ICL_DSC1_RC_RANGE_PARAMETERS_3_UDW(pipe) _MMIO_PIPE((pipe) - PIPE_B, \ + _ICL_DSC1_RC_RANGE_PARAMETERS_3_UDW_PB, \ + _ICL_DSC1_RC_RANGE_PARAMETERS_3_UDW_PC) + +#endif /* __INTEL_VDSC_REGS_H__ */ diff --git a/drivers/gpu/drm/i915/display/intel_vrr.c b/drivers/gpu/drm/i915/display/intel_vrr.c index 5ff6aed9575e..4228f26b4c11 100644 --- a/drivers/gpu/drm/i915/display/intel_vrr.c +++ b/drivers/gpu/drm/i915/display/intel_vrr.c @@ -144,17 +144,11 @@ intel_vrr_compute_config(struct intel_crtc_state *crtc_state, * is deprecated. */ if (DISPLAY_VER(i915) >= 13) { - /* - * FIXME: Subtract Window2 delay from below value. - * - * Window2 specifies time required to program DSB (Window2) in - * number of scan lines. Assuming 0 for no DSB. - */ crtc_state->vrr.guardband = - crtc_state->vrr.vmin + 1 - adjusted_mode->crtc_vdisplay; + crtc_state->vrr.vmin + 1 - adjusted_mode->crtc_vblank_start; } else { crtc_state->vrr.pipeline_full = - min(255, crtc_state->vrr.vmin - adjusted_mode->crtc_vdisplay - + min(255, crtc_state->vrr.vmin - adjusted_mode->crtc_vblank_start - crtc_state->framestart_delay - 1); } diff --git a/drivers/gpu/drm/i915/display/intel_wm.c b/drivers/gpu/drm/i915/display/intel_wm.c new file mode 100644 index 000000000000..bb99179cd5fd --- /dev/null +++ b/drivers/gpu/drm/i915/display/intel_wm.c @@ -0,0 +1,408 @@ +// SPDX-License-Identifier: MIT +/* + * Copyright © 2023 Intel Corporation + */ + +#include "i915_drv.h" +#include "i9xx_wm.h" +#include "intel_display_types.h" +#include "intel_wm.h" +#include "skl_watermark.h" + +/** + * intel_update_watermarks - update FIFO watermark values based on current modes + * @dev_priv: i915 device + * + * Calculate watermark values for the various WM regs based on current mode + * and plane configuration. + * + * There are several cases to deal with here: + * - normal (i.e. non-self-refresh) + * - self-refresh (SR) mode + * - lines are large relative to FIFO size (buffer can hold up to 2) + * - lines are small relative to FIFO size (buffer can hold more than 2 + * lines), so need to account for TLB latency + * + * The normal calculation is: + * watermark = dotclock * bytes per pixel * latency + * where latency is platform & configuration dependent (we assume pessimal + * values here). + * + * The SR calculation is: + * watermark = (trunc(latency/line time)+1) * surface width * + * bytes per pixel + * where + * line time = htotal / dotclock + * surface width = hdisplay for normal plane and 64 for cursor + * and latency is assumed to be high, as above. + * + * The final value programmed to the register should always be rounded up, + * and include an extra 2 entries to account for clock crossings. + * + * We don't use the sprite, so we can ignore that. And on Crestline we have + * to set the non-SR watermarks to 8. + */ +void intel_update_watermarks(struct drm_i915_private *i915) +{ + if (i915->display.funcs.wm->update_wm) + i915->display.funcs.wm->update_wm(i915); +} + +int intel_compute_pipe_wm(struct intel_atomic_state *state, + struct intel_crtc *crtc) +{ + struct drm_i915_private *i915 = to_i915(state->base.dev); + + if (i915->display.funcs.wm->compute_pipe_wm) + return i915->display.funcs.wm->compute_pipe_wm(state, crtc); + + return 0; +} + +int intel_compute_intermediate_wm(struct intel_atomic_state *state, + struct intel_crtc *crtc) +{ + struct drm_i915_private *i915 = to_i915(state->base.dev); + + if (!i915->display.funcs.wm->compute_intermediate_wm) + return 0; + + if (drm_WARN_ON(&i915->drm, !i915->display.funcs.wm->compute_pipe_wm)) + return 0; + + return i915->display.funcs.wm->compute_intermediate_wm(state, crtc); +} + +bool intel_initial_watermarks(struct intel_atomic_state *state, + struct intel_crtc *crtc) +{ + struct drm_i915_private *i915 = to_i915(state->base.dev); + + if (i915->display.funcs.wm->initial_watermarks) { + i915->display.funcs.wm->initial_watermarks(state, crtc); + return true; + } + + return false; +} + +void intel_atomic_update_watermarks(struct intel_atomic_state *state, + struct intel_crtc *crtc) +{ + struct drm_i915_private *i915 = to_i915(state->base.dev); + + if (i915->display.funcs.wm->atomic_update_watermarks) + i915->display.funcs.wm->atomic_update_watermarks(state, crtc); +} + +void intel_optimize_watermarks(struct intel_atomic_state *state, + struct intel_crtc *crtc) +{ + struct drm_i915_private *i915 = to_i915(state->base.dev); + + if (i915->display.funcs.wm->optimize_watermarks) + i915->display.funcs.wm->optimize_watermarks(state, crtc); +} + +int intel_compute_global_watermarks(struct intel_atomic_state *state) +{ + struct drm_i915_private *i915 = to_i915(state->base.dev); + + if (i915->display.funcs.wm->compute_global_watermarks) + return i915->display.funcs.wm->compute_global_watermarks(state); + + return 0; +} + +void intel_wm_get_hw_state(struct drm_i915_private *i915) +{ + if (i915->display.funcs.wm->get_hw_state) + return i915->display.funcs.wm->get_hw_state(i915); +} + +bool intel_wm_plane_visible(const struct intel_crtc_state *crtc_state, + const struct intel_plane_state *plane_state) +{ + struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane); + + /* FIXME check the 'enable' instead */ + if (!crtc_state->hw.active) + return false; + + /* + * Treat cursor with fb as always visible since cursor updates + * can happen faster than the vrefresh rate, and the current + * watermark code doesn't handle that correctly. Cursor updates + * which set/clear the fb or change the cursor size are going + * to get throttled by intel_legacy_cursor_update() to work + * around this problem with the watermark code. + */ + if (plane->id == PLANE_CURSOR) + return plane_state->hw.fb != NULL; + else + return plane_state->uapi.visible; +} + +void intel_print_wm_latency(struct drm_i915_private *dev_priv, + const char *name, const u16 wm[]) +{ + int level; + + for (level = 0; level < dev_priv->display.wm.num_levels; level++) { + unsigned int latency = wm[level]; + + if (latency == 0) { + drm_dbg_kms(&dev_priv->drm, + "%s WM%d latency not provided\n", + name, level); + continue; + } + + /* + * - latencies are in us on gen9. + * - before then, WM1+ latency values are in 0.5us units + */ + if (DISPLAY_VER(dev_priv) >= 9) + latency *= 10; + else if (level > 0) + latency *= 5; + + drm_dbg_kms(&dev_priv->drm, + "%s WM%d latency %u (%u.%u usec)\n", name, level, + wm[level], latency / 10, latency % 10); + } +} + +void intel_wm_init(struct drm_i915_private *i915) +{ + if (DISPLAY_VER(i915) >= 9) + skl_wm_init(i915); + else + i9xx_wm_init(i915); +} + +static void wm_latency_show(struct seq_file *m, const u16 wm[8]) +{ + struct drm_i915_private *dev_priv = m->private; + int level; + + drm_modeset_lock_all(&dev_priv->drm); + + for (level = 0; level < dev_priv->display.wm.num_levels; level++) { + unsigned int latency = wm[level]; + + /* + * - WM1+ latency values in 0.5us units + * - latencies are in us on gen9/vlv/chv + */ + if (DISPLAY_VER(dev_priv) >= 9 || + IS_VALLEYVIEW(dev_priv) || + IS_CHERRYVIEW(dev_priv) || + IS_G4X(dev_priv)) + latency *= 10; + else if (level > 0) + latency *= 5; + + seq_printf(m, "WM%d %u (%u.%u usec)\n", + level, wm[level], latency / 10, latency % 10); + } + + drm_modeset_unlock_all(&dev_priv->drm); +} + +static int pri_wm_latency_show(struct seq_file *m, void *data) +{ + struct drm_i915_private *dev_priv = m->private; + const u16 *latencies; + + if (DISPLAY_VER(dev_priv) >= 9) + latencies = dev_priv->display.wm.skl_latency; + else + latencies = dev_priv->display.wm.pri_latency; + + wm_latency_show(m, latencies); + + return 0; +} + +static int spr_wm_latency_show(struct seq_file *m, void *data) +{ + struct drm_i915_private *dev_priv = m->private; + const u16 *latencies; + + if (DISPLAY_VER(dev_priv) >= 9) + latencies = dev_priv->display.wm.skl_latency; + else + latencies = dev_priv->display.wm.spr_latency; + + wm_latency_show(m, latencies); + + return 0; +} + +static int cur_wm_latency_show(struct seq_file *m, void *data) +{ + struct drm_i915_private *dev_priv = m->private; + const u16 *latencies; + + if (DISPLAY_VER(dev_priv) >= 9) + latencies = dev_priv->display.wm.skl_latency; + else + latencies = dev_priv->display.wm.cur_latency; + + wm_latency_show(m, latencies); + + return 0; +} + +static int pri_wm_latency_open(struct inode *inode, struct file *file) +{ + struct drm_i915_private *dev_priv = inode->i_private; + + if (DISPLAY_VER(dev_priv) < 5 && !IS_G4X(dev_priv)) + return -ENODEV; + + return single_open(file, pri_wm_latency_show, dev_priv); +} + +static int spr_wm_latency_open(struct inode *inode, struct file *file) +{ + struct drm_i915_private *dev_priv = inode->i_private; + + if (HAS_GMCH(dev_priv)) + return -ENODEV; + + return single_open(file, spr_wm_latency_show, dev_priv); +} + +static int cur_wm_latency_open(struct inode *inode, struct file *file) +{ + struct drm_i915_private *dev_priv = inode->i_private; + + if (HAS_GMCH(dev_priv)) + return -ENODEV; + + return single_open(file, cur_wm_latency_show, dev_priv); +} + +static ssize_t wm_latency_write(struct file *file, const char __user *ubuf, + size_t len, loff_t *offp, u16 wm[8]) +{ + struct seq_file *m = file->private_data; + struct drm_i915_private *dev_priv = m->private; + u16 new[8] = { 0 }; + int level; + int ret; + char tmp[32]; + + if (len >= sizeof(tmp)) + return -EINVAL; + + if (copy_from_user(tmp, ubuf, len)) + return -EFAULT; + + tmp[len] = '\0'; + + ret = sscanf(tmp, "%hu %hu %hu %hu %hu %hu %hu %hu", + &new[0], &new[1], &new[2], &new[3], + &new[4], &new[5], &new[6], &new[7]); + if (ret != dev_priv->display.wm.num_levels) + return -EINVAL; + + drm_modeset_lock_all(&dev_priv->drm); + + for (level = 0; level < dev_priv->display.wm.num_levels; level++) + wm[level] = new[level]; + + drm_modeset_unlock_all(&dev_priv->drm); + + return len; +} + +static ssize_t pri_wm_latency_write(struct file *file, const char __user *ubuf, + size_t len, loff_t *offp) +{ + struct seq_file *m = file->private_data; + struct drm_i915_private *dev_priv = m->private; + u16 *latencies; + + if (DISPLAY_VER(dev_priv) >= 9) + latencies = dev_priv->display.wm.skl_latency; + else + latencies = dev_priv->display.wm.pri_latency; + + return wm_latency_write(file, ubuf, len, offp, latencies); +} + +static ssize_t spr_wm_latency_write(struct file *file, const char __user *ubuf, + size_t len, loff_t *offp) +{ + struct seq_file *m = file->private_data; + struct drm_i915_private *dev_priv = m->private; + u16 *latencies; + + if (DISPLAY_VER(dev_priv) >= 9) + latencies = dev_priv->display.wm.skl_latency; + else + latencies = dev_priv->display.wm.spr_latency; + + return wm_latency_write(file, ubuf, len, offp, latencies); +} + +static ssize_t cur_wm_latency_write(struct file *file, const char __user *ubuf, + size_t len, loff_t *offp) +{ + struct seq_file *m = file->private_data; + struct drm_i915_private *dev_priv = m->private; + u16 *latencies; + + if (DISPLAY_VER(dev_priv) >= 9) + latencies = dev_priv->display.wm.skl_latency; + else + latencies = dev_priv->display.wm.cur_latency; + + return wm_latency_write(file, ubuf, len, offp, latencies); +} + +static const struct file_operations i915_pri_wm_latency_fops = { + .owner = THIS_MODULE, + .open = pri_wm_latency_open, + .read = seq_read, + .llseek = seq_lseek, + .release = single_release, + .write = pri_wm_latency_write +}; + +static const struct file_operations i915_spr_wm_latency_fops = { + .owner = THIS_MODULE, + .open = spr_wm_latency_open, + .read = seq_read, + .llseek = seq_lseek, + .release = single_release, + .write = spr_wm_latency_write +}; + +static const struct file_operations i915_cur_wm_latency_fops = { + .owner = THIS_MODULE, + .open = cur_wm_latency_open, + .read = seq_read, + .llseek = seq_lseek, + .release = single_release, + .write = cur_wm_latency_write +}; + +void intel_wm_debugfs_register(struct drm_i915_private *i915) +{ + struct drm_minor *minor = i915->drm.primary; + + debugfs_create_file("i915_pri_wm_latency", 0644, minor->debugfs_root, + i915, &i915_pri_wm_latency_fops); + + debugfs_create_file("i915_spr_wm_latency", 0644, minor->debugfs_root, + i915, &i915_spr_wm_latency_fops); + + debugfs_create_file("i915_cur_wm_latency", 0644, minor->debugfs_root, + i915, &i915_cur_wm_latency_fops); + + skl_watermark_debugfs_register(i915); +} diff --git a/drivers/gpu/drm/i915/display/intel_wm.h b/drivers/gpu/drm/i915/display/intel_wm.h new file mode 100644 index 000000000000..48429ac140d2 --- /dev/null +++ b/drivers/gpu/drm/i915/display/intel_wm.h @@ -0,0 +1,37 @@ +/* SPDX-License-Identifier: MIT */ +/* + * Copyright © 2023 Intel Corporation + */ + +#ifndef __INTEL_WM_H__ +#define __INTEL_WM_H__ + +#include <linux/types.h> + +struct drm_i915_private; +struct intel_atomic_state; +struct intel_crtc; +struct intel_crtc_state; +struct intel_plane_state; + +void intel_update_watermarks(struct drm_i915_private *i915); +int intel_compute_pipe_wm(struct intel_atomic_state *state, + struct intel_crtc *crtc); +int intel_compute_intermediate_wm(struct intel_atomic_state *state, + struct intel_crtc *crtc); +bool intel_initial_watermarks(struct intel_atomic_state *state, + struct intel_crtc *crtc); +void intel_atomic_update_watermarks(struct intel_atomic_state *state, + struct intel_crtc *crtc); +void intel_optimize_watermarks(struct intel_atomic_state *state, + struct intel_crtc *crtc); +int intel_compute_global_watermarks(struct intel_atomic_state *state); +void intel_wm_get_hw_state(struct drm_i915_private *i915); +bool intel_wm_plane_visible(const struct intel_crtc_state *crtc_state, + const struct intel_plane_state *plane_state); +void intel_print_wm_latency(struct drm_i915_private *i915, + const char *name, const u16 wm[]); +void intel_wm_init(struct drm_i915_private *i915); +void intel_wm_debugfs_register(struct drm_i915_private *i915); + +#endif /* __INTEL_WM_H__ */ diff --git a/drivers/gpu/drm/i915/intel_pm_types.h b/drivers/gpu/drm/i915/display/intel_wm_types.h index 93152537b420..628b7c0ce484 100644 --- a/drivers/gpu/drm/i915/intel_pm_types.h +++ b/drivers/gpu/drm/i915/display/intel_wm_types.h @@ -3,12 +3,12 @@ * Copyright © 2021 Intel Corporation */ -#ifndef __INTEL_PM_TYPES_H__ -#define __INTEL_PM_TYPES_H__ +#ifndef __INTEL_WM_TYPES_H__ +#define __INTEL_WM_TYPES_H__ #include <linux/types.h> -#include "display/intel_display_limits.h" +#include "intel_display_limits.h" enum intel_ddb_partitioning { INTEL_DDB_PART_1_2, @@ -73,4 +73,4 @@ static inline bool skl_ddb_entry_equal(const struct skl_ddb_entry *e1, return false; } -#endif /* __INTEL_PM_TYPES_H__ */ +#endif /* __INTEL_WM_TYPES_H__ */ diff --git a/drivers/gpu/drm/i915/display/skl_universal_plane.c b/drivers/gpu/drm/i915/display/skl_universal_plane.c index 9b172a1e90de..ce55b8f09301 100644 --- a/drivers/gpu/drm/i915/display/skl_universal_plane.c +++ b/drivers/gpu/drm/i915/display/skl_universal_plane.c @@ -642,7 +642,7 @@ icl_plane_disable_arm(struct intel_plane *plane, skl_write_plane_wm(plane, crtc_state); - intel_psr2_disable_plane_sel_fetch(plane, crtc_state); + intel_psr2_disable_plane_sel_fetch_arm(plane, crtc_state); intel_de_write_fw(dev_priv, PLANE_CTL(pipe, plane_id), 0); intel_de_write_fw(dev_priv, PLANE_SURF(pipe, plane_id), 0); } @@ -1260,7 +1260,7 @@ icl_plane_update_noarm(struct intel_plane *plane, if (plane_state->force_black) icl_plane_csc_load_black(plane); - intel_psr2_program_plane_sel_fetch(plane, crtc_state, plane_state, color_plane); + intel_psr2_program_plane_sel_fetch_noarm(plane, crtc_state, plane_state, color_plane); } static void @@ -1287,6 +1287,8 @@ icl_plane_update_arm(struct intel_plane *plane, if (plane_state->scaler_id >= 0) skl_program_plane_scaler(plane, crtc_state, plane_state); + intel_psr2_program_plane_sel_fetch_arm(plane, crtc_state, plane_state); + /* * The control register self-arms if the plane was previously * disabled. Try to make the plane enable atomic by writing @@ -2180,7 +2182,7 @@ static bool gen12_plane_has_mc_ccs(struct drm_i915_private *i915, if (DISPLAY_VER(i915) < 12) return false; - /* Wa_14010477008:tgl[a0..c0],rkl[all],dg1[all] */ + /* Wa_14010477008 */ if (IS_DG1(i915) || IS_ROCKETLAKE(i915) || IS_TGL_DISPLAY_STEP(i915, STEP_A0, STEP_D0)) return false; diff --git a/drivers/gpu/drm/i915/display/skl_watermark.c b/drivers/gpu/drm/i915/display/skl_watermark.c index d1670cc3eff2..f0af997d2a23 100644 --- a/drivers/gpu/drm/i915/display/skl_watermark.c +++ b/drivers/gpu/drm/i915/display/skl_watermark.c @@ -5,6 +5,10 @@ #include <drm/drm_blend.h> +#include "i915_drv.h" +#include "i915_fixed.h" +#include "i915_reg.h" +#include "i9xx_wm.h" #include "intel_atomic.h" #include "intel_atomic_plane.h" #include "intel_bw.h" @@ -13,13 +17,9 @@ #include "intel_display_power.h" #include "intel_display_types.h" #include "intel_fb.h" -#include "skl_watermark.h" - -#include "i915_drv.h" -#include "i915_fixed.h" -#include "i915_reg.h" #include "intel_pcode.h" -#include "intel_pm.h" +#include "intel_wm.h" +#include "skl_watermark.h" static void skl_sagv_disable(struct drm_i915_private *i915); @@ -64,7 +64,7 @@ static bool skl_needs_memory_bw_wa(struct drm_i915_private *i915) static bool intel_has_sagv(struct drm_i915_private *i915) { - return DISPLAY_VER(i915) >= 9 && !IS_LP(i915) && + return HAS_SAGV(i915) && i915->display.sagv.status != I915_SAGV_NOT_CONTROLLED; } @@ -92,7 +92,7 @@ intel_sagv_block_time(struct drm_i915_private *i915) return val; } else if (DISPLAY_VER(i915) == 11) { return 10; - } else if (DISPLAY_VER(i915) == 9 && !IS_LP(i915)) { + } else if (HAS_SAGV(i915)) { return 30; } else { return 0; @@ -101,7 +101,7 @@ intel_sagv_block_time(struct drm_i915_private *i915) static void intel_sagv_init(struct drm_i915_private *i915) { - if (!intel_has_sagv(i915)) + if (!HAS_SAGV(i915)) i915->display.sagv.status = I915_SAGV_NOT_CONTROLLED; /* @@ -359,7 +359,7 @@ static bool skl_crtc_can_enable_sagv(const struct intel_crtc_state *crtc_state) continue; /* Find the highest enabled wm level for this plane */ - for (level = ilk_wm_max_level(i915); + for (level = i915->display.wm.num_levels - 1; !wm->wm[level].enable; --level) { } @@ -710,10 +710,10 @@ skl_cursor_allocation(const struct intel_crtc_state *crtc_state, { struct intel_plane *plane = to_intel_plane(crtc_state->uapi.crtc->cursor); struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev); - int level, max_level = ilk_wm_max_level(i915); struct skl_wm_level wm = {}; int ret, min_ddb_alloc = 0; struct skl_wm_params wp; + int level; ret = skl_compute_wm_params(crtc_state, 256, drm_format_info(DRM_FORMAT_ARGB8888), @@ -722,7 +722,7 @@ skl_cursor_allocation(const struct intel_crtc_state *crtc_state, crtc_state->pixel_rate, &wp, 0); drm_WARN_ON(&i915->drm, ret); - for (level = 0; level <= max_level; level++) { + for (level = 0; level < i915->display.wm.num_levels; level++) { unsigned int latency = i915->display.wm.skl_latency[level]; skl_compute_plane_wm(crtc_state, plane, level, latency, &wp, &wm, &wm); @@ -1407,16 +1407,22 @@ skl_check_nv12_wm_level(struct skl_wm_level *wm, struct skl_wm_level *uv_wm, } } -static bool icl_need_wm1_wa(struct drm_i915_private *i915, - enum plane_id plane_id) +static bool skl_need_wm_copy_wa(struct drm_i915_private *i915, int level, + const struct skl_plane_wm *wm) { /* * Wa_1408961008:icl, ehl * Wa_14012656716:tgl, adl - * Underruns with WM1+ disabled + * Wa_14017887344:icl + * Wa_14017868169:adl, tgl + * Due to some power saving optimizations, different subsystems + * like PSR, might still use even disabled wm level registers, + * for "reference", so lets keep at least the values sane. + * Considering amount of WA requiring us to do similar things, was + * decided to simply do it for all of the platforms, as those wm + * levels are disabled, this isn't going to do harm anyway. */ - return DISPLAY_VER(i915) == 11 || - (IS_DISPLAY_VER(i915, 12, 13) && plane_id == PLANE_CURSOR); + return level > 0 && !wm->wm[level].enable; } struct skl_plane_ddb_iter { @@ -1492,7 +1498,7 @@ skl_crtc_allocate_plane_ddb(struct intel_atomic_state *state, * Find the highest watermark level for which we can satisfy the block * requirement of active planes. */ - for (level = ilk_wm_max_level(i915); level >= 0; level--) { + for (level = i915->display.wm.num_levels - 1; level >= 0; level--) { blocks = 0; for_each_plane_id_on_crtc(crtc, plane_id) { const struct skl_plane_wm *wm = @@ -1568,7 +1574,7 @@ skl_crtc_allocate_plane_ddb(struct intel_atomic_state *state, * all levels as "enabled." Go back now and disable the ones * that aren't actually possible. */ - for (level++; level <= ilk_wm_max_level(i915); level++) { + for (level++; level < i915->display.wm.num_levels; level++) { for_each_plane_id_on_crtc(crtc, plane_id) { const struct skl_ddb_entry *ddb = &crtc_state->wm.skl.plane_ddb[plane_id]; @@ -1585,12 +1591,10 @@ skl_crtc_allocate_plane_ddb(struct intel_atomic_state *state, else skl_check_wm_level(&wm->wm[level], ddb); - if (icl_need_wm1_wa(i915, plane_id) && - level == 1 && !wm->wm[level].enable && - wm->wm[0].enable) { - wm->wm[level].blocks = wm->wm[0].blocks; - wm->wm[level].lines = wm->wm[0].lines; - wm->wm[level].ignore_lines = wm->wm[0].ignore_lines; + if (skl_need_wm_copy_wa(i915, level, wm)) { + wm->wm[level].blocks = wm->wm[level - 1].blocks; + wm->wm[level].lines = wm->wm[level - 1].lines; + wm->wm[level].ignore_lines = wm->wm[level - 1].ignore_lines; } } } @@ -1967,10 +1971,10 @@ skl_compute_wm_levels(const struct intel_crtc_state *crtc_state, struct skl_wm_level *levels) { struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev); - int level, max_level = ilk_wm_max_level(i915); struct skl_wm_level *result_prev = &levels[0]; + int level; - for (level = 0; level <= max_level; level++) { + for (level = 0; level < i915->display.wm.num_levels; level++) { struct skl_wm_level *result = &levels[level]; unsigned int latency = i915->display.wm.skl_latency[level]; @@ -2248,7 +2252,6 @@ void skl_write_plane_wm(struct intel_plane *plane, const struct intel_crtc_state *crtc_state) { struct drm_i915_private *i915 = to_i915(plane->base.dev); - int level, max_level = ilk_wm_max_level(i915); enum plane_id plane_id = plane->id; enum pipe pipe = plane->pipe; const struct skl_pipe_wm *pipe_wm = &crtc_state->wm.skl.optimal; @@ -2256,8 +2259,9 @@ void skl_write_plane_wm(struct intel_plane *plane, &crtc_state->wm.skl.plane_ddb[plane_id]; const struct skl_ddb_entry *ddb_y = &crtc_state->wm.skl.plane_ddb_y[plane_id]; + int level; - for (level = 0; level <= max_level; level++) + for (level = 0; level < i915->display.wm.num_levels; level++) skl_write_wm_level(i915, PLANE_WM(pipe, plane_id, level), skl_plane_wm_level(pipe_wm, plane_id, level)); @@ -2285,14 +2289,14 @@ void skl_write_cursor_wm(struct intel_plane *plane, const struct intel_crtc_state *crtc_state) { struct drm_i915_private *i915 = to_i915(plane->base.dev); - int level, max_level = ilk_wm_max_level(i915); enum plane_id plane_id = plane->id; enum pipe pipe = plane->pipe; const struct skl_pipe_wm *pipe_wm = &crtc_state->wm.skl.optimal; const struct skl_ddb_entry *ddb = &crtc_state->wm.skl.plane_ddb[plane_id]; + int level; - for (level = 0; level <= max_level; level++) + for (level = 0; level < i915->display.wm.num_levels; level++) skl_write_wm_level(i915, CUR_WM(pipe, level), skl_plane_wm_level(pipe_wm, plane_id, level)); @@ -2324,9 +2328,9 @@ static bool skl_plane_wm_equals(struct drm_i915_private *i915, const struct skl_plane_wm *wm1, const struct skl_plane_wm *wm2) { - int level, max_level = ilk_wm_max_level(i915); + int level; - for (level = 0; level <= max_level; level++) { + for (level = 0; level < i915->display.wm.num_levels; level++) { /* * We don't check uv_wm as the hardware doesn't actually * use it. It only gets used for calculating the required @@ -2398,6 +2402,8 @@ skl_ddb_add_affected_planes(const struct intel_crtc_state *old_crtc_state, return PTR_ERR(plane_state); new_crtc_state->update_planes |= BIT(plane_id); + new_crtc_state->async_flip_planes = 0; + new_crtc_state->do_async_flip = false; } return 0; @@ -2674,9 +2680,9 @@ static bool skl_plane_selected_wm_equals(struct intel_plane *plane, const struct skl_pipe_wm *new_pipe_wm) { struct drm_i915_private *i915 = to_i915(plane->base.dev); - int level, max_level = ilk_wm_max_level(i915); + int level; - for (level = 0; level <= max_level; level++) { + for (level = 0; level < i915->display.wm.num_levels; level++) { /* * We don't check uv_wm as the hardware doesn't actually * use it. It only gets used for calculating the required @@ -2755,6 +2761,8 @@ static int skl_wm_add_affected_planes(struct intel_atomic_state *state, return PTR_ERR(plane_state); new_crtc_state->update_planes |= BIT(plane_id); + new_crtc_state->async_flip_planes = 0; + new_crtc_state->do_async_flip = false; } return 0; @@ -2810,16 +2818,14 @@ static void skl_pipe_wm_get_hw_state(struct intel_crtc *crtc, { struct drm_i915_private *i915 = to_i915(crtc->base.dev); enum pipe pipe = crtc->pipe; - int level, max_level; enum plane_id plane_id; + int level; u32 val; - max_level = ilk_wm_max_level(i915); - for_each_plane_id_on_crtc(crtc, plane_id) { struct skl_plane_wm *wm = &out->planes[plane_id]; - for (level = 0; level <= max_level; level++) { + for (level = 0; level < i915->display.wm.num_levels; level++) { if (plane_id != PLANE_CURSOR) val = intel_de_read(i915, PLANE_WM(pipe, plane_id, level)); else @@ -2856,7 +2862,7 @@ static void skl_pipe_wm_get_hw_state(struct intel_crtc *crtc, } } -void skl_wm_get_hw_state(struct drm_i915_private *i915) +static void skl_wm_get_hw_state(struct drm_i915_private *i915) { struct intel_dbuf_state *dbuf_state = to_intel_dbuf_state(i915->display.dbuf.obj.state); @@ -2956,7 +2962,7 @@ static bool skl_dbuf_is_misconfigured(struct drm_i915_private *i915) return false; } -void skl_wm_sanitize(struct drm_i915_private *i915) +static void skl_wm_sanitize(struct drm_i915_private *i915) { struct intel_crtc *crtc; @@ -2992,6 +2998,12 @@ void skl_wm_sanitize(struct drm_i915_private *i915) } } +static void skl_wm_get_hw_state_and_sanitize(struct drm_i915_private *i915) +{ + skl_wm_get_hw_state(i915); + skl_wm_sanitize(i915); +} + void intel_wm_state_verify(struct intel_crtc *crtc, struct intel_crtc_state *new_crtc_state) { @@ -3002,9 +3014,9 @@ void intel_wm_state_verify(struct intel_crtc *crtc, struct skl_pipe_wm wm; } *hw; const struct skl_pipe_wm *sw_wm = &new_crtc_state->wm.skl.optimal; - int level, max_level = ilk_wm_max_level(i915); struct intel_plane *plane; u8 hw_enabled_slices; + int level; if (DISPLAY_VER(i915) < 9 || !new_crtc_state->hw.active) return; @@ -3031,7 +3043,7 @@ void intel_wm_state_verify(struct intel_crtc *crtc, const struct skl_wm_level *hw_wm_level, *sw_wm_level; /* Watermarks */ - for (level = 0; level <= max_level; level++) { + for (level = 0; level < i915->display.wm.num_levels; level++) { hw_wm_level = &hw->wm.planes[plane->id].wm[level]; sw_wm_level = skl_plane_wm_level(sw_wm, plane->id, level); @@ -3153,7 +3165,7 @@ void skl_watermark_ipc_init(struct drm_i915_private *i915) static void adjust_wm_latency(struct drm_i915_private *i915, - u16 wm[], int max_level, int read_latency) + u16 wm[], int num_levels, int read_latency) { bool wm_lv_0_adjust_needed = i915->dram_info.wm_lv_0_adjust_needed; int i, level; @@ -3163,12 +3175,12 @@ adjust_wm_latency(struct drm_i915_private *i915, * need to be disabled. We make sure to sanitize the values out * of the punit to satisfy this requirement. */ - for (level = 1; level <= max_level; level++) { + for (level = 1; level < num_levels; level++) { if (wm[level] == 0) { - for (i = level + 1; i <= max_level; i++) + for (i = level + 1; i < num_levels; i++) wm[i] = 0; - max_level = level - 1; + num_levels = level; break; } } @@ -3181,7 +3193,7 @@ adjust_wm_latency(struct drm_i915_private *i915, * from the punit when level 0 response data is 0us. */ if (wm[0] == 0) { - for (level = 0; level <= max_level; level++) + for (level = 0; level < num_levels; level++) wm[level] += read_latency; } @@ -3197,7 +3209,7 @@ adjust_wm_latency(struct drm_i915_private *i915, static void mtl_read_wm_latency(struct drm_i915_private *i915, u16 wm[]) { - int max_level = ilk_wm_max_level(i915); + int num_levels = i915->display.wm.num_levels; u32 val; val = intel_de_read(i915, MTL_LATENCY_LP0_LP1); @@ -3212,12 +3224,12 @@ static void mtl_read_wm_latency(struct drm_i915_private *i915, u16 wm[]) wm[4] = REG_FIELD_GET(MTL_LATENCY_LEVEL_EVEN_MASK, val); wm[5] = REG_FIELD_GET(MTL_LATENCY_LEVEL_ODD_MASK, val); - adjust_wm_latency(i915, wm, max_level, 6); + adjust_wm_latency(i915, wm, num_levels, 6); } static void skl_read_wm_latency(struct drm_i915_private *i915, u16 wm[]) { - int max_level = ilk_wm_max_level(i915); + int num_levels = i915->display.wm.num_levels; int read_latency = DISPLAY_VER(i915) >= 12 ? 3 : 2; int mult = IS_DG2(i915) ? 2 : 1; u32 val; @@ -3249,11 +3261,16 @@ static void skl_read_wm_latency(struct drm_i915_private *i915, u16 wm[]) wm[6] = REG_FIELD_GET(GEN9_MEM_LATENCY_LEVEL_2_6_MASK, val) * mult; wm[7] = REG_FIELD_GET(GEN9_MEM_LATENCY_LEVEL_3_7_MASK, val) * mult; - adjust_wm_latency(i915, wm, max_level, read_latency); + adjust_wm_latency(i915, wm, num_levels, read_latency); } static void skl_setup_wm_latency(struct drm_i915_private *i915) { + if (HAS_HW_SAGV_WM(i915)) + i915->display.wm.num_levels = 6; + else + i915->display.wm.num_levels = 8; + if (DISPLAY_VER(i915) >= 14) mtl_read_wm_latency(i915, i915->display.wm.skl_latency); else @@ -3264,6 +3281,7 @@ static void skl_setup_wm_latency(struct drm_i915_private *i915) static const struct intel_wm_funcs skl_wm_funcs = { .compute_global_watermarks = skl_compute_wm, + .get_hw_state = skl_wm_get_hw_state_and_sanitize, }; void skl_wm_init(struct drm_i915_private *i915) @@ -3541,13 +3559,34 @@ static const struct file_operations skl_watermark_ipc_status_fops = { .write = skl_watermark_ipc_status_write }; -void skl_watermark_ipc_debugfs_register(struct drm_i915_private *i915) +static int intel_sagv_status_show(struct seq_file *m, void *unused) +{ + struct drm_i915_private *i915 = m->private; + static const char * const sagv_status[] = { + [I915_SAGV_UNKNOWN] = "unknown", + [I915_SAGV_DISABLED] = "disabled", + [I915_SAGV_ENABLED] = "enabled", + [I915_SAGV_NOT_CONTROLLED] = "not controlled", + }; + + seq_printf(m, "SAGV available: %s\n", str_yes_no(intel_has_sagv(i915))); + seq_printf(m, "SAGV status: %s\n", sagv_status[i915->display.sagv.status]); + seq_printf(m, "SAGV block time: %d usec\n", i915->display.sagv.block_time_us); + + return 0; +} + +DEFINE_SHOW_ATTRIBUTE(intel_sagv_status); + +void skl_watermark_debugfs_register(struct drm_i915_private *i915) { struct drm_minor *minor = i915->drm.primary; - if (!HAS_IPC(i915)) - return; + if (HAS_IPC(i915)) + debugfs_create_file("i915_ipc_status", 0644, minor->debugfs_root, i915, + &skl_watermark_ipc_status_fops); - debugfs_create_file("i915_ipc_status", 0644, minor->debugfs_root, i915, - &skl_watermark_ipc_status_fops); + if (HAS_SAGV(i915)) + debugfs_create_file("i915_sagv_status", 0444, minor->debugfs_root, i915, + &intel_sagv_status_fops); } diff --git a/drivers/gpu/drm/i915/display/skl_watermark.h b/drivers/gpu/drm/i915/display/skl_watermark.h index 37954c472070..f91a3d4ddc07 100644 --- a/drivers/gpu/drm/i915/display/skl_watermark.h +++ b/drivers/gpu/drm/i915/display/skl_watermark.h @@ -10,7 +10,7 @@ #include "intel_display_limits.h" #include "intel_global_state.h" -#include "intel_pm_types.h" +#include "intel_wm_types.h" struct drm_i915_private; struct intel_atomic_state; @@ -38,16 +38,13 @@ bool skl_ddb_allocation_overlaps(const struct skl_ddb_entry *ddb, const struct skl_ddb_entry *entries, int num_entries, int ignore_idx); -void skl_wm_get_hw_state(struct drm_i915_private *i915); -void skl_wm_sanitize(struct drm_i915_private *i915); - void intel_wm_state_verify(struct intel_crtc *crtc, struct intel_crtc_state *new_crtc_state); void skl_watermark_ipc_init(struct drm_i915_private *i915); void skl_watermark_ipc_update(struct drm_i915_private *i915); bool skl_watermark_ipc_enabled(struct drm_i915_private *i915); -void skl_watermark_ipc_debugfs_register(struct drm_i915_private *i915); +void skl_watermark_debugfs_register(struct drm_i915_private *i915); void skl_wm_init(struct drm_i915_private *i915); diff --git a/drivers/gpu/drm/i915/display/vlv_dsi.c b/drivers/gpu/drm/i915/display/vlv_dsi.c index 2289f6b1b4eb..8d2e6e151ba0 100644 --- a/drivers/gpu/drm/i915/display/vlv_dsi.c +++ b/drivers/gpu/drm/i915/display/vlv_dsi.c @@ -331,32 +331,23 @@ static bool glk_dsi_enable_io(struct intel_encoder *encoder) struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder); enum port port; - u32 tmp; bool cold_boot = false; /* Set the MIPI mode * If MIPI_Mode is off, then writing to LP_Wake bit is not reflecting. * Power ON MIPI IO first and then write into IO reset and LP wake bits */ - for_each_dsi_port(port, intel_dsi->ports) { - tmp = intel_de_read(dev_priv, MIPI_CTRL(port)); - intel_de_write(dev_priv, MIPI_CTRL(port), - tmp | GLK_MIPIIO_ENABLE); - } + for_each_dsi_port(port, intel_dsi->ports) + intel_de_rmw(dev_priv, MIPI_CTRL(port), 0, GLK_MIPIIO_ENABLE); /* Put the IO into reset */ - tmp = intel_de_read(dev_priv, MIPI_CTRL(PORT_A)); - tmp &= ~GLK_MIPIIO_RESET_RELEASED; - intel_de_write(dev_priv, MIPI_CTRL(PORT_A), tmp); + intel_de_rmw(dev_priv, MIPI_CTRL(PORT_A), GLK_MIPIIO_RESET_RELEASED, 0); /* Program LP Wake */ for_each_dsi_port(port, intel_dsi->ports) { - tmp = intel_de_read(dev_priv, MIPI_CTRL(port)); - if (!(intel_de_read(dev_priv, MIPI_DEVICE_READY(port)) & DEVICE_READY)) - tmp &= ~GLK_LP_WAKE; - else - tmp |= GLK_LP_WAKE; - intel_de_write(dev_priv, MIPI_CTRL(port), tmp); + u32 tmp = intel_de_read(dev_priv, MIPI_DEVICE_READY(port)); + intel_de_rmw(dev_priv, MIPI_CTRL(port), + GLK_LP_WAKE, (tmp & DEVICE_READY) ? GLK_LP_WAKE : 0); } /* Wait for Pwr ACK */ @@ -380,7 +371,6 @@ static void glk_dsi_device_ready(struct intel_encoder *encoder) struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder); enum port port; - u32 val; /* Wait for MIPI PHY status bit to set */ for_each_dsi_port(port, intel_dsi->ports) { @@ -390,24 +380,18 @@ static void glk_dsi_device_ready(struct intel_encoder *encoder) } /* Get IO out of reset */ - val = intel_de_read(dev_priv, MIPI_CTRL(PORT_A)); - intel_de_write(dev_priv, MIPI_CTRL(PORT_A), - val | GLK_MIPIIO_RESET_RELEASED); + intel_de_rmw(dev_priv, MIPI_CTRL(PORT_A), 0, GLK_MIPIIO_RESET_RELEASED); /* Get IO out of Low power state*/ for_each_dsi_port(port, intel_dsi->ports) { if (!(intel_de_read(dev_priv, MIPI_DEVICE_READY(port)) & DEVICE_READY)) { - val = intel_de_read(dev_priv, MIPI_DEVICE_READY(port)); - val &= ~ULPS_STATE_MASK; - val |= DEVICE_READY; - intel_de_write(dev_priv, MIPI_DEVICE_READY(port), val); + intel_de_rmw(dev_priv, MIPI_DEVICE_READY(port), + ULPS_STATE_MASK, DEVICE_READY); usleep_range(10, 15); } else { /* Enter ULPS */ - val = intel_de_read(dev_priv, MIPI_DEVICE_READY(port)); - val &= ~ULPS_STATE_MASK; - val |= (ULPS_STATE_ENTER | DEVICE_READY); - intel_de_write(dev_priv, MIPI_DEVICE_READY(port), val); + intel_de_rmw(dev_priv, MIPI_DEVICE_READY(port), + ULPS_STATE_MASK, ULPS_STATE_ENTER | DEVICE_READY); /* Wait for ULPS active */ if (intel_de_wait_for_clear(dev_priv, MIPI_CTRL(port), @@ -415,20 +399,15 @@ static void glk_dsi_device_ready(struct intel_encoder *encoder) drm_err(&dev_priv->drm, "ULPS not active\n"); /* Exit ULPS */ - val = intel_de_read(dev_priv, MIPI_DEVICE_READY(port)); - val &= ~ULPS_STATE_MASK; - val |= (ULPS_STATE_EXIT | DEVICE_READY); - intel_de_write(dev_priv, MIPI_DEVICE_READY(port), val); + intel_de_rmw(dev_priv, MIPI_DEVICE_READY(port), + ULPS_STATE_MASK, ULPS_STATE_EXIT | DEVICE_READY); /* Enter Normal Mode */ - val = intel_de_read(dev_priv, MIPI_DEVICE_READY(port)); - val &= ~ULPS_STATE_MASK; - val |= (ULPS_STATE_NORMAL_OPERATION | DEVICE_READY); - intel_de_write(dev_priv, MIPI_DEVICE_READY(port), val); - - val = intel_de_read(dev_priv, MIPI_CTRL(port)); - val &= ~GLK_LP_WAKE; - intel_de_write(dev_priv, MIPI_CTRL(port), val); + intel_de_rmw(dev_priv, MIPI_DEVICE_READY(port), + ULPS_STATE_MASK, + ULPS_STATE_NORMAL_OPERATION | DEVICE_READY); + + intel_de_rmw(dev_priv, MIPI_CTRL(port), GLK_LP_WAKE, 0); } } @@ -460,9 +439,7 @@ static void bxt_dsi_device_ready(struct intel_encoder *encoder) /* Enable MIPI PHY transparent latch */ for_each_dsi_port(port, intel_dsi->ports) { - val = intel_de_read(dev_priv, BXT_MIPI_PORT_CTRL(port)); - intel_de_write(dev_priv, BXT_MIPI_PORT_CTRL(port), - val | LP_OUTPUT_HOLD); + intel_de_rmw(dev_priv, BXT_MIPI_PORT_CTRL(port), 0, LP_OUTPUT_HOLD); usleep_range(2000, 2500); } @@ -482,7 +459,6 @@ static void vlv_dsi_device_ready(struct intel_encoder *encoder) struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder); enum port port; - u32 val; drm_dbg_kms(&dev_priv->drm, "\n"); @@ -505,9 +481,7 @@ static void vlv_dsi_device_ready(struct intel_encoder *encoder) * Common bit for both MIPI Port A & MIPI Port C * No similar bit in MIPI Port C reg */ - val = intel_de_read(dev_priv, MIPI_PORT_CTRL(PORT_A)); - intel_de_write(dev_priv, MIPI_PORT_CTRL(PORT_A), - val | LP_OUTPUT_HOLD); + intel_de_rmw(dev_priv, MIPI_PORT_CTRL(PORT_A), 0, LP_OUTPUT_HOLD); usleep_range(1000, 1500); intel_de_write(dev_priv, MIPI_DEVICE_READY(port), @@ -537,15 +511,11 @@ static void glk_dsi_enter_low_power_mode(struct intel_encoder *encoder) struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder); enum port port; - u32 val; /* Enter ULPS */ - for_each_dsi_port(port, intel_dsi->ports) { - val = intel_de_read(dev_priv, MIPI_DEVICE_READY(port)); - val &= ~ULPS_STATE_MASK; - val |= (ULPS_STATE_ENTER | DEVICE_READY); - intel_de_write(dev_priv, MIPI_DEVICE_READY(port), val); - } + for_each_dsi_port(port, intel_dsi->ports) + intel_de_rmw(dev_priv, MIPI_DEVICE_READY(port), + ULPS_STATE_MASK, ULPS_STATE_ENTER | DEVICE_READY); /* Wait for MIPI PHY status bit to unset */ for_each_dsi_port(port, intel_dsi->ports) { @@ -568,12 +538,9 @@ static void glk_dsi_disable_mipi_io(struct intel_encoder *encoder) struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder); enum port port; - u32 tmp; /* Put the IO into reset */ - tmp = intel_de_read(dev_priv, MIPI_CTRL(PORT_A)); - tmp &= ~GLK_MIPIIO_RESET_RELEASED; - intel_de_write(dev_priv, MIPI_CTRL(PORT_A), tmp); + intel_de_rmw(dev_priv, MIPI_CTRL(PORT_A), GLK_MIPIIO_RESET_RELEASED, 0); /* Wait for MIPI PHY status bit to unset */ for_each_dsi_port(port, intel_dsi->ports) { @@ -583,11 +550,8 @@ static void glk_dsi_disable_mipi_io(struct intel_encoder *encoder) } /* Clear MIPI mode */ - for_each_dsi_port(port, intel_dsi->ports) { - tmp = intel_de_read(dev_priv, MIPI_CTRL(port)); - tmp &= ~GLK_MIPIIO_ENABLE; - intel_de_write(dev_priv, MIPI_CTRL(port), tmp); - } + for_each_dsi_port(port, intel_dsi->ports) + intel_de_rmw(dev_priv, MIPI_CTRL(port), GLK_MIPIIO_ENABLE, 0); } static void glk_dsi_clear_device_ready(struct intel_encoder *encoder) @@ -607,7 +571,6 @@ static void vlv_dsi_clear_device_ready(struct intel_encoder *encoder) /* Common bit for both MIPI Port A & MIPI Port C on VLV/CHV */ i915_reg_t port_ctrl = IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv) ? BXT_MIPI_PORT_CTRL(port) : MIPI_PORT_CTRL(PORT_A); - u32 val; intel_de_write(dev_priv, MIPI_DEVICE_READY(port), DEVICE_READY | ULPS_STATE_ENTER); @@ -631,8 +594,7 @@ static void vlv_dsi_clear_device_ready(struct intel_encoder *encoder) drm_err(&dev_priv->drm, "DSI LP not going Low\n"); /* Disable MIPI PHY transparent latch */ - val = intel_de_read(dev_priv, port_ctrl); - intel_de_write(dev_priv, port_ctrl, val & ~LP_OUTPUT_HOLD); + intel_de_rmw(dev_priv, port_ctrl, LP_OUTPUT_HOLD, 0); usleep_range(1000, 1500); intel_de_write(dev_priv, MIPI_DEVICE_READY(port), 0x00); @@ -649,23 +611,17 @@ static void intel_dsi_port_enable(struct intel_encoder *encoder, enum port port; if (intel_dsi->dual_link == DSI_DUAL_LINK_FRONT_BACK) { - u32 temp; + u32 temp = intel_dsi->pixel_overlap; + if (IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv)) { - for_each_dsi_port(port, intel_dsi->ports) { - temp = intel_de_read(dev_priv, - MIPI_CTRL(port)); - temp &= ~BXT_PIXEL_OVERLAP_CNT_MASK | - intel_dsi->pixel_overlap << - BXT_PIXEL_OVERLAP_CNT_SHIFT; - intel_de_write(dev_priv, MIPI_CTRL(port), - temp); - } + for_each_dsi_port(port, intel_dsi->ports) + intel_de_rmw(dev_priv, MIPI_CTRL(port), + BXT_PIXEL_OVERLAP_CNT_MASK, + temp << BXT_PIXEL_OVERLAP_CNT_SHIFT); } else { - temp = intel_de_read(dev_priv, VLV_CHICKEN_3); - temp &= ~PIXEL_OVERLAP_CNT_MASK | - intel_dsi->pixel_overlap << - PIXEL_OVERLAP_CNT_SHIFT; - intel_de_write(dev_priv, VLV_CHICKEN_3, temp); + intel_de_rmw(dev_priv, VLV_CHICKEN_3, + PIXEL_OVERLAP_CNT_MASK, + temp << PIXEL_OVERLAP_CNT_SHIFT); } } @@ -709,11 +665,9 @@ static void intel_dsi_port_disable(struct intel_encoder *encoder) for_each_dsi_port(port, intel_dsi->ports) { i915_reg_t port_ctrl = IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv) ? BXT_MIPI_PORT_CTRL(port) : MIPI_PORT_CTRL(port); - u32 temp; /* de-assert ip_tg_enable signal */ - temp = intel_de_read(dev_priv, port_ctrl); - intel_de_write(dev_priv, port_ctrl, temp & ~DPI_ENABLE); + intel_de_rmw(dev_priv, port_ctrl, DPI_ENABLE, 0); intel_de_posting_read(dev_priv, port_ctrl); } } @@ -787,7 +741,6 @@ static void intel_dsi_pre_enable(struct intel_atomic_state *state, struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); enum pipe pipe = crtc->pipe; enum port port; - u32 val; bool glk_cold_boot = false; drm_dbg_kms(&dev_priv->drm, "\n"); @@ -810,9 +763,7 @@ static void intel_dsi_pre_enable(struct intel_atomic_state *state, if (IS_BROXTON(dev_priv)) { /* Add MIPI IO reset programming for modeset */ - val = intel_de_read(dev_priv, BXT_P_CR_GT_DISP_PWRON); - intel_de_write(dev_priv, BXT_P_CR_GT_DISP_PWRON, - val | MIPIO_RST_CTRL); + intel_de_rmw(dev_priv, BXT_P_CR_GT_DISP_PWRON, 0, MIPIO_RST_CTRL); /* Power up DSI regulator */ intel_de_write(dev_priv, BXT_P_DSI_REGULATOR_CFG, STAP_SELECT); @@ -820,12 +771,9 @@ static void intel_dsi_pre_enable(struct intel_atomic_state *state, } if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) { - u32 val; - /* Disable DPOunit clock gating, can stall pipe */ - val = intel_de_read(dev_priv, DSPCLK_GATE_D(dev_priv)); - val |= DPOUNIT_CLOCK_GATE_DISABLE; - intel_de_write(dev_priv, DSPCLK_GATE_D(dev_priv), val); + intel_de_rmw(dev_priv, DSPCLK_GATE_D(dev_priv), + 0, DPOUNIT_CLOCK_GATE_DISABLE); } if (!IS_GEMINILAKE(dev_priv)) @@ -949,7 +897,6 @@ static void intel_dsi_post_disable(struct intel_atomic_state *state, struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder); enum port port; - u32 val; drm_dbg_kms(&dev_priv->drm, "\n"); @@ -987,21 +934,16 @@ static void intel_dsi_post_disable(struct intel_atomic_state *state, HS_IO_CTRL_SELECT); /* Add MIPI IO reset programming for modeset */ - val = intel_de_read(dev_priv, BXT_P_CR_GT_DISP_PWRON); - intel_de_write(dev_priv, BXT_P_CR_GT_DISP_PWRON, - val & ~MIPIO_RST_CTRL); + intel_de_rmw(dev_priv, BXT_P_CR_GT_DISP_PWRON, MIPIO_RST_CTRL, 0); } if (IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv)) { bxt_dsi_pll_disable(encoder); } else { - u32 val; - vlv_dsi_pll_disable(encoder); - val = intel_de_read(dev_priv, DSPCLK_GATE_D(dev_priv)); - val &= ~DPOUNIT_CLOCK_GATE_DISABLE; - intel_de_write(dev_priv, DSPCLK_GATE_D(dev_priv), val); + intel_de_rmw(dev_priv, DSPCLK_GATE_D(dev_priv), + DPOUNIT_CLOCK_GATE_DISABLE, 0); } /* Assert reset */ @@ -1058,7 +1000,7 @@ static bool intel_dsi_get_hw_state(struct intel_encoder *encoder, */ if ((IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) && port == PORT_C) - enabled = intel_de_read(dev_priv, PIPECONF(PIPE_B)) & PIPECONF_ENABLE; + enabled = intel_de_read(dev_priv, TRANSCONF(PIPE_B)) & TRANSCONF_ENABLE; /* Try command mode if video mode not enabled */ if (!enabled) { @@ -1432,11 +1374,8 @@ static void intel_dsi_prepare(struct intel_encoder *intel_encoder, } else if (IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv)) { enum pipe pipe = crtc->pipe; - tmp = intel_de_read(dev_priv, MIPI_CTRL(port)); - tmp &= ~BXT_PIPE_SELECT_MASK; - - tmp |= BXT_PIPE_SELECT(pipe); - intel_de_write(dev_priv, MIPI_CTRL(port), tmp); + intel_de_rmw(dev_priv, MIPI_CTRL(port), + BXT_PIPE_SELECT_MASK, BXT_PIPE_SELECT(pipe)); } /* XXX: why here, why like this? handling in irq handler?! */ @@ -1605,7 +1544,6 @@ static void intel_dsi_unprepare(struct intel_encoder *encoder) struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder); enum port port; - u32 val; if (IS_GEMINILAKE(dev_priv)) return; @@ -1620,9 +1558,7 @@ static void intel_dsi_unprepare(struct intel_encoder *encoder) vlv_dsi_reset_clocks(encoder, port); intel_de_write(dev_priv, MIPI_EOT_DISABLE(port), CLOCKSTOP); - val = intel_de_read(dev_priv, MIPI_DSI_FUNC_PRG(port)); - val &= ~VID_MODE_FORMAT_MASK; - intel_de_write(dev_priv, MIPI_DSI_FUNC_PRG(port), val); + intel_de_rmw(dev_priv, MIPI_DSI_FUNC_PRG(port), VID_MODE_FORMAT_MASK, 0); intel_de_write(dev_priv, MIPI_DEVICE_READY(port), 0x1); } diff --git a/drivers/gpu/drm/i915/display/vlv_dsi_pll.c b/drivers/gpu/drm/i915/display/vlv_dsi_pll.c index af7402127cd9..b697badbbe71 100644 --- a/drivers/gpu/drm/i915/display/vlv_dsi_pll.c +++ b/drivers/gpu/drm/i915/display/vlv_dsi_pll.c @@ -302,13 +302,10 @@ bool bxt_dsi_pll_is_enabled(struct drm_i915_private *dev_priv) void bxt_dsi_pll_disable(struct intel_encoder *encoder) { struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); - u32 val; drm_dbg_kms(&dev_priv->drm, "\n"); - val = intel_de_read(dev_priv, BXT_DSI_PLL_ENABLE); - val &= ~BXT_DSI_PLL_DO_ENABLE; - intel_de_write(dev_priv, BXT_DSI_PLL_ENABLE, val); + intel_de_rmw(dev_priv, BXT_DSI_PLL_ENABLE, BXT_DSI_PLL_DO_ENABLE, 0); /* * PLL lock should deassert within 200us. @@ -542,7 +539,6 @@ void bxt_dsi_pll_enable(struct intel_encoder *encoder, struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder); enum port port; - u32 val; drm_dbg_kms(&dev_priv->drm, "\n"); @@ -559,9 +555,7 @@ void bxt_dsi_pll_enable(struct intel_encoder *encoder, } /* Enable DSI PLL */ - val = intel_de_read(dev_priv, BXT_DSI_PLL_ENABLE); - val |= BXT_DSI_PLL_DO_ENABLE; - intel_de_write(dev_priv, BXT_DSI_PLL_ENABLE, val); + intel_de_rmw(dev_priv, BXT_DSI_PLL_ENABLE, 0, BXT_DSI_PLL_DO_ENABLE); /* Timeout and fail if PLL not locked */ if (intel_de_wait_for_set(dev_priv, BXT_DSI_PLL_ENABLE, @@ -589,13 +583,9 @@ void bxt_dsi_reset_clocks(struct intel_encoder *encoder, enum port port) tmp &= ~(BXT_MIPI_RX_ESCLK_LOWER_FIXDIV_MASK(port)); intel_de_write(dev_priv, BXT_MIPI_CLOCK_CTL, tmp); } else { - tmp = intel_de_read(dev_priv, MIPIO_TXESC_CLK_DIV1); - tmp &= ~GLK_TX_ESC_CLK_DIV1_MASK; - intel_de_write(dev_priv, MIPIO_TXESC_CLK_DIV1, tmp); + intel_de_rmw(dev_priv, MIPIO_TXESC_CLK_DIV1, GLK_TX_ESC_CLK_DIV1_MASK, 0); - tmp = intel_de_read(dev_priv, MIPIO_TXESC_CLK_DIV2); - tmp &= ~GLK_TX_ESC_CLK_DIV2_MASK; - intel_de_write(dev_priv, MIPIO_TXESC_CLK_DIV2, tmp); + intel_de_rmw(dev_priv, MIPIO_TXESC_CLK_DIV2, GLK_TX_ESC_CLK_DIV2_MASK, 0); } intel_de_write(dev_priv, MIPI_EOT_DISABLE(port), CLOCKSTOP); } diff --git a/drivers/gpu/drm/i915/gt/intel_gt.c b/drivers/gpu/drm/i915/gt/intel_gt.c index f0dbfc434e07..f4f694f12907 100644 --- a/drivers/gpu/drm/i915/gt/intel_gt.c +++ b/drivers/gpu/drm/i915/gt/intel_gt.c @@ -28,7 +28,6 @@ #include "intel_migrate.h" #include "intel_mocs.h" #include "intel_pci_config.h" -#include "intel_pm.h" #include "intel_rc6.h" #include "intel_renderstate.h" #include "intel_rps.h" diff --git a/drivers/gpu/drm/i915/gt/intel_gt_pm.c b/drivers/gpu/drm/i915/gt/intel_gt_pm.c index cef3d6f5c34e..85ae7dc079f2 100644 --- a/drivers/gpu/drm/i915/gt/intel_gt_pm.c +++ b/drivers/gpu/drm/i915/gt/intel_gt_pm.c @@ -17,7 +17,6 @@ #include "intel_gt_print.h" #include "intel_gt_requests.h" #include "intel_llc.h" -#include "intel_pm.h" #include "intel_rc6.h" #include "intel_rps.h" #include "intel_wakeref.h" diff --git a/drivers/gpu/drm/i915/gt/intel_gt_regs.h b/drivers/gpu/drm/i915/gt/intel_gt_regs.h index be0f6e305c88..df07e1e799e3 100644 --- a/drivers/gpu/drm/i915/gt/intel_gt_regs.h +++ b/drivers/gpu/drm/i915/gt/intel_gt_regs.h @@ -9,8 +9,6 @@ #include "i915_reg_defs.h" #include "display/intel_display_reg_defs.h" /* VLV_DISPLAY_BASE */ -#define MCR_REG(offset) ((const i915_mcr_reg_t){ .reg = (offset) }) - /* * The perf control registers are technically multicast registers, but the * driver never needs to read/write them directly; we only use them to build diff --git a/drivers/gpu/drm/i915/gt/intel_region_lmem.c b/drivers/gpu/drm/i915/gt/intel_region_lmem.c index f3ad93db0b21..89fdfc67f8d1 100644 --- a/drivers/gpu/drm/i915/gt/intel_region_lmem.c +++ b/drivers/gpu/drm/i915/gt/intel_region_lmem.c @@ -158,7 +158,7 @@ static const struct intel_memory_region_ops intel_region_lmem_ops = { static bool get_legacy_lowmem_region(struct intel_uncore *uncore, u64 *start, u32 *size) { - if (!IS_DG1_GRAPHICS_STEP(uncore->i915, STEP_A0, STEP_C0)) + if (!IS_DG1(uncore->i915)) return false; *start = 0; diff --git a/drivers/gpu/drm/i915/gt/intel_rps.c b/drivers/gpu/drm/i915/gt/intel_rps.c index f5d7b5126433..4d0dc9de23f9 100644 --- a/drivers/gpu/drm/i915/gt/intel_rps.c +++ b/drivers/gpu/drm/i915/gt/intel_rps.c @@ -1677,7 +1677,6 @@ static void vlv_init_gpll_ref_freq(struct intel_rps *rps) static void vlv_rps_init(struct intel_rps *rps) { struct drm_i915_private *i915 = rps_to_i915(rps); - u32 val; vlv_iosf_sb_get(i915, BIT(VLV_IOSF_SB_PUNIT) | @@ -1686,21 +1685,6 @@ static void vlv_rps_init(struct intel_rps *rps) vlv_init_gpll_ref_freq(rps); - val = vlv_punit_read(i915, PUNIT_REG_GPU_FREQ_STS); - switch ((val >> 6) & 3) { - case 0: - case 1: - i915->mem_freq = 800; - break; - case 2: - i915->mem_freq = 1066; - break; - case 3: - i915->mem_freq = 1333; - break; - } - drm_dbg(&i915->drm, "DDR speed: %d MHz\n", i915->mem_freq); - rps->max_freq = vlv_rps_max_freq(rps); rps->rp0_freq = rps->max_freq; drm_dbg(&i915->drm, "max GPU freq: %d MHz (%u)\n", @@ -1727,7 +1711,6 @@ static void vlv_rps_init(struct intel_rps *rps) static void chv_rps_init(struct intel_rps *rps) { struct drm_i915_private *i915 = rps_to_i915(rps); - u32 val; vlv_iosf_sb_get(i915, BIT(VLV_IOSF_SB_PUNIT) | @@ -1736,18 +1719,6 @@ static void chv_rps_init(struct intel_rps *rps) vlv_init_gpll_ref_freq(rps); - val = vlv_cck_read(i915, CCK_FUSE_REG); - - switch ((val >> 2) & 0x7) { - case 3: - i915->mem_freq = 2000; - break; - default: - i915->mem_freq = 1600; - break; - } - drm_dbg(&i915->drm, "DDR speed: %d MHz\n", i915->mem_freq); - rps->max_freq = chv_rps_max_freq(rps); rps->rp0_freq = rps->max_freq; drm_dbg(&i915->drm, "max GPU freq: %d MHz (%u)\n", diff --git a/drivers/gpu/drm/i915/gt/intel_workarounds.c b/drivers/gpu/drm/i915/gt/intel_workarounds.c index 485c5cc5d0f9..8859eb118510 100644 --- a/drivers/gpu/drm/i915/gt/intel_workarounds.c +++ b/drivers/gpu/drm/i915/gt/intel_workarounds.c @@ -1470,43 +1470,12 @@ gen12_gt_workarounds_init(struct intel_gt *gt, struct i915_wa_list *wal) } static void -tgl_gt_workarounds_init(struct intel_gt *gt, struct i915_wa_list *wal) -{ - struct drm_i915_private *i915 = gt->i915; - - gen12_gt_workarounds_init(gt, wal); - - /* Wa_1409420604:tgl */ - if (IS_TGL_UY_GRAPHICS_STEP(i915, STEP_A0, STEP_B0)) - wa_mcr_write_or(wal, - SUBSLICE_UNIT_LEVEL_CLKGATE2, - CPSSUNIT_CLKGATE_DIS); - - /* Wa_1607087056:tgl also know as BUG:1409180338 */ - if (IS_TGL_UY_GRAPHICS_STEP(i915, STEP_A0, STEP_B0)) - wa_write_or(wal, - GEN11_SLICE_UNIT_LEVEL_CLKGATE, - L3_CLKGATE_DIS | L3_CR2X_CLKGATE_DIS); - - /* Wa_1408615072:tgl[a0] */ - if (IS_TGL_UY_GRAPHICS_STEP(i915, STEP_A0, STEP_B0)) - wa_write_or(wal, UNSLICE_UNIT_LEVEL_CLKGATE2, - VSUNIT_CLKGATE_DIS_TGL); -} - -static void dg1_gt_workarounds_init(struct intel_gt *gt, struct i915_wa_list *wal) { struct drm_i915_private *i915 = gt->i915; gen12_gt_workarounds_init(gt, wal); - /* Wa_1607087056:dg1 */ - if (IS_DG1_GRAPHICS_STEP(i915, STEP_A0, STEP_B0)) - wa_write_or(wal, - GEN11_SLICE_UNIT_LEVEL_CLKGATE, - L3_CLKGATE_DIS | L3_CR2X_CLKGATE_DIS); - /* Wa_1409420604:dg1 */ if (IS_DG1(i915)) wa_mcr_write_or(wal, @@ -1779,8 +1748,6 @@ gt_init_workarounds(struct intel_gt *gt, struct i915_wa_list *wal) xehpsdv_gt_workarounds_init(gt, wal); else if (IS_DG1(i915)) dg1_gt_workarounds_init(gt, wal); - else if (IS_TIGERLAKE(i915)) - tgl_gt_workarounds_init(gt, wal); else if (GRAPHICS_VER(i915) == 12) gen12_gt_workarounds_init(gt, wal); else if (GRAPHICS_VER(i915) == 11) @@ -2193,20 +2160,6 @@ static void tgl_whitelist_build(struct intel_engine_cs *engine) } } -static void dg1_whitelist_build(struct intel_engine_cs *engine) -{ - struct i915_wa_list *w = &engine->whitelist; - - tgl_whitelist_build(engine); - - /* GEN:BUG:1409280441:dg1 */ - if (IS_DG1_GRAPHICS_STEP(engine->i915, STEP_A0, STEP_B0) && - (engine->class == RENDER_CLASS || - engine->class == COPY_ENGINE_CLASS)) - whitelist_reg_ext(w, RING_ID(engine->mmio_base), - RING_FORCE_TO_NONPRIV_ACCESS_RD); -} - static void xehpsdv_whitelist_build(struct intel_engine_cs *engine) { allow_read_ctx_timestamp(engine); @@ -2286,8 +2239,6 @@ void intel_engine_init_whitelist(struct intel_engine_cs *engine) dg2_whitelist_build(engine); else if (IS_XEHPSDV(i915)) xehpsdv_whitelist_build(engine); - else if (IS_DG1(i915)) - dg1_whitelist_build(engine); else if (GRAPHICS_VER(i915) == 12) tgl_whitelist_build(engine); else if (GRAPHICS_VER(i915) == 11) @@ -2482,27 +2433,6 @@ rcs_engine_wa_init(struct intel_engine_cs *engine, struct i915_wa_list *wal) true); } - if (IS_DG1_GRAPHICS_STEP(i915, STEP_A0, STEP_B0) || - IS_TGL_UY_GRAPHICS_STEP(i915, STEP_A0, STEP_B0)) { - /* - * Wa_1607138336:tgl[a0],dg1[a0] - * Wa_1607063988:tgl[a0],dg1[a0] - */ - wa_write_or(wal, - GEN9_CTX_PREEMPT_REG, - GEN12_DISABLE_POSH_BUSY_FF_DOP_CG); - } - - if (IS_TGL_UY_GRAPHICS_STEP(i915, STEP_A0, STEP_B0)) { - /* - * Wa_1606679103:tgl - * (see also Wa_1606682166:icl) - */ - wa_write_or(wal, - GEN7_SARCHKMD, - GEN7_DISABLE_SAMPLER_PREFETCH); - } - if (IS_ALDERLAKE_P(i915) || IS_ALDERLAKE_S(i915) || IS_DG1(i915) || IS_ROCKETLAKE(i915) || IS_TIGERLAKE(i915)) { /* Wa_1606931601:tgl,rkl,dg1,adl-s,adl-p */ @@ -2532,30 +2462,22 @@ rcs_engine_wa_init(struct intel_engine_cs *engine, struct i915_wa_list *wal) } if (IS_ALDERLAKE_P(i915) || IS_ALDERLAKE_S(i915) || - IS_DG1_GRAPHICS_STEP(i915, STEP_A0, STEP_B0) || IS_ROCKETLAKE(i915) || IS_TIGERLAKE(i915)) { - /* Wa_1409804808:tgl,rkl,dg1[a0],adl-s,adl-p */ + /* Wa_1409804808 */ wa_mcr_masked_en(wal, GEN8_ROW_CHICKEN2, GEN12_PUSH_CONST_DEREF_HOLD_DIS); - /* - * Wa_1409085225:tgl - * Wa_14010229206:tgl,rkl,dg1[a0],adl-s,adl-p - */ + /* Wa_14010229206 */ wa_mcr_masked_en(wal, GEN9_ROW_CHICKEN4, GEN12_DISABLE_TDL_PUSH); } - if (IS_DG1_GRAPHICS_STEP(i915, STEP_A0, STEP_B0) || - IS_ROCKETLAKE(i915) || IS_TIGERLAKE(i915) || IS_ALDERLAKE_P(i915)) { + if (IS_ROCKETLAKE(i915) || IS_TIGERLAKE(i915) || IS_ALDERLAKE_P(i915)) { /* - * Wa_1607030317:tgl - * Wa_1607186500:tgl - * Wa_1607297627:tgl,rkl,dg1[a0],adlp + * Wa_1607297627 * * On TGL and RKL there are multiple entries for this WA in the * BSpec; some indicate this is an A0-only WA, others indicate * it applies to all steppings so we trust the "all steppings." - * For DG1 this only applies to A0. */ wa_masked_en(wal, RING_PSMI_CTL(RENDER_RING_BASE), diff --git a/drivers/gpu/drm/i915/gt/selftest_llc.c b/drivers/gpu/drm/i915/gt/selftest_llc.c index cfd736d88939..779fadcec7c4 100644 --- a/drivers/gpu/drm/i915/gt/selftest_llc.c +++ b/drivers/gpu/drm/i915/gt/selftest_llc.c @@ -3,7 +3,6 @@ * Copyright © 2019 Intel Corporation */ -#include "intel_pm.h" /* intel_gpu_freq() */ #include "selftest_llc.h" #include "intel_rps.h" diff --git a/drivers/gpu/drm/i915/gvt/display.c b/drivers/gpu/drm/i915/gvt/display.c index 4d898b14de93..e0c5dfb788eb 100644 --- a/drivers/gpu/drm/i915/gvt/display.c +++ b/drivers/gpu/drm/i915/gvt/display.c @@ -63,7 +63,7 @@ static int edp_pipe_is_enabled(struct intel_vgpu *vgpu) { struct drm_i915_private *dev_priv = vgpu->gvt->gt->i915; - if (!(vgpu_vreg_t(vgpu, PIPECONF(_PIPE_EDP)) & PIPECONF_ENABLE)) + if (!(vgpu_vreg_t(vgpu, TRANSCONF(TRANSCODER_EDP)) & TRANSCONF_ENABLE)) return 0; if (!(vgpu_vreg(vgpu, _TRANS_DDI_FUNC_CTL_EDP) & TRANS_DDI_FUNC_ENABLE)) @@ -79,7 +79,7 @@ int pipe_is_enabled(struct intel_vgpu *vgpu, int pipe) pipe < PIPE_A || pipe >= I915_MAX_PIPES)) return -EINVAL; - if (vgpu_vreg_t(vgpu, PIPECONF(pipe)) & PIPECONF_ENABLE) + if (vgpu_vreg_t(vgpu, TRANSCONF(pipe)) & TRANSCONF_ENABLE) return 1; if (edp_pipe_is_enabled(vgpu) && @@ -187,8 +187,8 @@ static void emulate_monitor_status_change(struct intel_vgpu *vgpu) GEN8_DE_PORT_HOTPLUG(HPD_PORT_C)); for_each_pipe(dev_priv, pipe) { - vgpu_vreg_t(vgpu, PIPECONF(pipe)) &= - ~(PIPECONF_ENABLE | PIPECONF_STATE_ENABLE); + vgpu_vreg_t(vgpu, TRANSCONF(pipe)) &= + ~(TRANSCONF_ENABLE | TRANSCONF_STATE_ENABLE); vgpu_vreg_t(vgpu, DSPCNTR(pipe)) &= ~DISP_ENABLE; vgpu_vreg_t(vgpu, SPRCTL(pipe)) &= ~SPRITE_ENABLE; vgpu_vreg_t(vgpu, CURCNTR(pipe)) &= ~MCURSOR_MODE_MASK; @@ -248,8 +248,8 @@ static void emulate_monitor_status_change(struct intel_vgpu *vgpu) * TRANSCODER_A can be enabled. PORT_x depends on the input of * setup_virtual_dp_monitor. */ - vgpu_vreg_t(vgpu, PIPECONF(PIPE_A)) |= PIPECONF_ENABLE; - vgpu_vreg_t(vgpu, PIPECONF(PIPE_A)) |= PIPECONF_STATE_ENABLE; + vgpu_vreg_t(vgpu, TRANSCONF(TRANSCODER_A)) |= TRANSCONF_ENABLE; + vgpu_vreg_t(vgpu, TRANSCONF(TRANSCODER_A)) |= TRANSCONF_STATE_ENABLE; /* * Golden M/N are calculated based on: @@ -506,7 +506,7 @@ static void emulate_monitor_status_change(struct intel_vgpu *vgpu) vgpu_vreg_t(vgpu, CURCNTR(pipe)) |= MCURSOR_MODE_DISABLE; } - vgpu_vreg_t(vgpu, PIPECONF(PIPE_A)) |= PIPECONF_ENABLE; + vgpu_vreg_t(vgpu, TRANSCONF(TRANSCODER_A)) |= TRANSCONF_ENABLE; } static void clean_virtual_dp_monitor(struct intel_vgpu *vgpu, int port_num) @@ -584,7 +584,7 @@ static int setup_virtual_dp_monitor(struct intel_vgpu *vgpu, int port_num, * @turnon: Turn ON/OFF vblank_timer * * This function is used to turn on/off or update the per-vGPU vblank_timer - * when PIPECONF is enabled or disabled. vblank_timer period is also updated + * when TRANSCONF is enabled or disabled. vblank_timer period is also updated * if guest changed the refresh rate. * */ diff --git a/drivers/gpu/drm/i915/gvt/handlers.c b/drivers/gpu/drm/i915/gvt/handlers.c index 735fc83e7026..3c8e0d198c4f 100644 --- a/drivers/gpu/drm/i915/gvt/handlers.c +++ b/drivers/gpu/drm/i915/gvt/handlers.c @@ -666,8 +666,8 @@ static void vgpu_update_refresh_rate(struct intel_vgpu *vgpu) link_n = vgpu_vreg_t(vgpu, PIPE_LINK_N1(TRANSCODER_A)); /* Get H/V total from transcoder timing */ - htotal = (vgpu_vreg_t(vgpu, HTOTAL(TRANSCODER_A)) >> TRANS_HTOTAL_SHIFT); - vtotal = (vgpu_vreg_t(vgpu, VTOTAL(TRANSCODER_A)) >> TRANS_VTOTAL_SHIFT); + htotal = (vgpu_vreg_t(vgpu, TRANS_HTOTAL(TRANSCODER_A)) >> TRANS_HTOTAL_SHIFT); + vtotal = (vgpu_vreg_t(vgpu, TRANS_VTOTAL(TRANSCODER_A)) >> TRANS_VTOTAL_SHIFT); if (dp_br && link_n && htotal && vtotal) { u64 pixel_clk = 0; @@ -697,12 +697,12 @@ static int pipeconf_mmio_write(struct intel_vgpu *vgpu, unsigned int offset, write_vreg(vgpu, offset, p_data, bytes); data = vgpu_vreg(vgpu, offset); - if (data & PIPECONF_ENABLE) { - vgpu_vreg(vgpu, offset) |= PIPECONF_STATE_ENABLE; + if (data & TRANSCONF_ENABLE) { + vgpu_vreg(vgpu, offset) |= TRANSCONF_STATE_ENABLE; vgpu_update_refresh_rate(vgpu); vgpu_update_vblank_emulation(vgpu, true); } else { - vgpu_vreg(vgpu, offset) &= ~PIPECONF_STATE_ENABLE; + vgpu_vreg(vgpu, offset) &= ~TRANSCONF_STATE_ENABLE; vgpu_update_vblank_emulation(vgpu, false); } return 0; @@ -2262,10 +2262,10 @@ static int init_generic_mmio_info(struct intel_gvt *gvt) MMIO_DFH(GEN7_HALF_SLICE_CHICKEN1, D_ALL, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL); /* display */ - MMIO_DH(PIPECONF(PIPE_A), D_ALL, NULL, pipeconf_mmio_write); - MMIO_DH(PIPECONF(PIPE_B), D_ALL, NULL, pipeconf_mmio_write); - MMIO_DH(PIPECONF(PIPE_C), D_ALL, NULL, pipeconf_mmio_write); - MMIO_DH(PIPECONF(_PIPE_EDP), D_ALL, NULL, pipeconf_mmio_write); + MMIO_DH(TRANSCONF(TRANSCODER_A), D_ALL, NULL, pipeconf_mmio_write); + MMIO_DH(TRANSCONF(TRANSCODER_B), D_ALL, NULL, pipeconf_mmio_write); + MMIO_DH(TRANSCONF(TRANSCODER_C), D_ALL, NULL, pipeconf_mmio_write); + MMIO_DH(TRANSCONF(TRANSCODER_EDP), D_ALL, NULL, pipeconf_mmio_write); MMIO_DH(DSPSURF(PIPE_A), D_ALL, NULL, pri_surf_mmio_write); MMIO_DH(REG_50080(PIPE_A, PLANE_PRIMARY), D_ALL, NULL, reg50080_mmio_write); diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c index 45773ce1deac..16011c0286ad 100644 --- a/drivers/gpu/drm/i915/i915_debugfs.c +++ b/drivers/gpu/drm/i915/i915_debugfs.c @@ -52,7 +52,6 @@ #include "i915_irq.h" #include "i915_scheduler.h" #include "intel_mchbar_regs.h" -#include "intel_pm.h" static inline struct drm_i915_private *node_to_i915(struct drm_info_node *node) { diff --git a/drivers/gpu/drm/i915/i915_driver.c b/drivers/gpu/drm/i915/i915_driver.c index cf1c0970ecb4..db7a86def7e2 100644 --- a/drivers/gpu/drm/i915/i915_driver.c +++ b/drivers/gpu/drm/i915/i915_driver.c @@ -167,6 +167,8 @@ static void intel_detect_preproduction_hw(struct drm_i915_private *dev_priv) pre |= IS_KABYLAKE(dev_priv) && INTEL_REVID(dev_priv) < 0x1; pre |= IS_GEMINILAKE(dev_priv) && INTEL_REVID(dev_priv) < 0x3; pre |= IS_ICELAKE(dev_priv) && INTEL_REVID(dev_priv) < 0x7; + pre |= IS_TIGERLAKE(dev_priv) && INTEL_REVID(dev_priv) < 0x1; + pre |= IS_DG1(dev_priv) && INTEL_REVID(dev_priv) < 0x1; if (pre) { drm_err(&dev_priv->drm, "This is a pre-production stepping. " @@ -248,10 +250,6 @@ static int i915_driver_early_probe(struct drm_i915_private *dev_priv) /* This must be called before any calls to HAS_PCH_* */ intel_detect_pch(dev_priv); - intel_pm_setup(dev_priv); - ret = intel_power_domains_init(dev_priv); - if (ret < 0) - goto err_gem; intel_irq_init(dev_priv); intel_init_display_hooks(dev_priv); intel_init_clock_gating_hooks(dev_priv); @@ -260,10 +258,6 @@ static int i915_driver_early_probe(struct drm_i915_private *dev_priv) return 0; -err_gem: - i915_gem_cleanup_early(dev_priv); - intel_gt_driver_late_release_all(dev_priv); - i915_drm_clients_fini(&dev_priv->clients); err_rootgt: intel_region_ttm_device_fini(dev_priv); err_ttm: @@ -936,7 +930,9 @@ static int i915_driver_open(struct drm_device *dev, struct drm_file *file) */ static void i915_driver_lastclose(struct drm_device *dev) { - intel_fbdev_restore_mode(dev); + struct drm_i915_private *i915 = to_i915(dev); + + intel_fbdev_restore_mode(i915); vga_switcheroo_process_delayed_switch(); } @@ -1002,7 +998,7 @@ void i915_driver_shutdown(struct drm_i915_private *i915) intel_suspend_encoders(i915); intel_shutdown_encoders(i915); - intel_dmc_ucode_suspend(i915); + intel_dmc_suspend(i915); i915_gem_suspend(i915); @@ -1032,6 +1028,13 @@ static bool suspend_to_idle(struct drm_i915_private *dev_priv) return false; } +static void i915_drm_complete(struct drm_device *dev) +{ + struct drm_i915_private *i915 = to_i915(dev); + + intel_pxp_resume_complete(i915->pxp); +} + static int i915_drm_prepare(struct drm_device *dev) { struct drm_i915_private *i915 = to_i915(dev); @@ -1072,8 +1075,6 @@ static int i915_drm_suspend(struct drm_device *dev) intel_suspend_encoders(dev_priv); - intel_suspend_hw(dev_priv); - /* Must be called before GGTT is suspended. */ intel_dpt_suspend(dev_priv); i915_ggtt_suspend(to_gt(dev_priv)->ggtt); @@ -1087,7 +1088,7 @@ static int i915_drm_suspend(struct drm_device *dev) dev_priv->suspend_count++; - intel_dmc_ucode_suspend(dev_priv); + intel_dmc_suspend(dev_priv); enable_rpm_wakeref_asserts(&dev_priv->runtime_pm); @@ -1208,7 +1209,7 @@ static int i915_drm_resume(struct drm_device *dev) /* Must be called after GGTT is resumed. */ intel_dpt_resume(dev_priv); - intel_dmc_ucode_resume(dev_priv); + intel_dmc_resume(dev_priv); i915_restore_display(dev_priv); intel_pps_unlock_regs_wa(dev_priv); @@ -1232,8 +1233,6 @@ static int i915_drm_resume(struct drm_device *dev) i915_gem_resume(dev_priv); - intel_pxp_resume(dev_priv->pxp); - intel_modeset_init_hw(dev_priv); intel_init_clock_gating(dev_priv); intel_hpd_init(dev_priv); @@ -1425,6 +1424,16 @@ static int i915_pm_resume(struct device *kdev) return i915_drm_resume(&i915->drm); } +static void i915_pm_complete(struct device *kdev) +{ + struct drm_i915_private *i915 = kdev_to_i915(kdev); + + if (i915->drm.switch_power_state == DRM_SWITCH_POWER_OFF) + return; + + i915_drm_complete(&i915->drm); +} + /* freeze: before creating the hibernation_image */ static int i915_pm_freeze(struct device *kdev) { @@ -1645,6 +1654,7 @@ const struct dev_pm_ops i915_pm_ops = { .suspend_late = i915_pm_suspend_late, .resume_early = i915_pm_resume_early, .resume = i915_pm_resume, + .complete = i915_pm_complete, /* * S4 event handlers diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index 4295306487c7..6254aa977398 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h @@ -580,6 +580,8 @@ IS_SUBPLATFORM(const struct drm_i915_private *i915, IS_SUBPLATFORM(dev_priv, INTEL_ALDERLAKE_P, INTEL_SUBPLATFORM_N) #define IS_ADLP_RPLP(dev_priv) \ IS_SUBPLATFORM(dev_priv, INTEL_ALDERLAKE_P, INTEL_SUBPLATFORM_RPL) +#define IS_ADLP_RPLU(dev_priv) \ + IS_SUBPLATFORM(dev_priv, INTEL_ALDERLAKE_P, INTEL_SUBPLATFORM_RPLU) #define IS_HSW_EARLY_SDV(dev_priv) (IS_HASWELL(dev_priv) && \ (INTEL_DEVID(dev_priv) & 0xFF00) == 0x0C00) #define IS_BDW_ULT(dev_priv) \ @@ -653,22 +655,9 @@ IS_SUBPLATFORM(const struct drm_i915_private *i915, (IS_TIGERLAKE(__i915) && \ IS_DISPLAY_STEP(__i915, since, until)) -#define IS_TGL_UY_GRAPHICS_STEP(__i915, since, until) \ - (IS_TGL_UY(__i915) && \ - IS_GRAPHICS_STEP(__i915, since, until)) - -#define IS_TGL_GRAPHICS_STEP(__i915, since, until) \ - (IS_TIGERLAKE(__i915) && !IS_TGL_UY(__i915)) && \ - IS_GRAPHICS_STEP(__i915, since, until)) - #define IS_RKL_DISPLAY_STEP(p, since, until) \ (IS_ROCKETLAKE(p) && IS_DISPLAY_STEP(p, since, until)) -#define IS_DG1_GRAPHICS_STEP(p, since, until) \ - (IS_DG1(p) && IS_GRAPHICS_STEP(p, since, until)) -#define IS_DG1_DISPLAY_STEP(p, since, until) \ - (IS_DG1(p) && IS_DISPLAY_STEP(p, since, until)) - #define IS_ADLS_DISPLAY_STEP(__i915, since, until) \ (IS_ALDERLAKE_S(__i915) && \ IS_DISPLAY_STEP(__i915, since, until)) @@ -876,7 +865,8 @@ IS_SUBPLATFORM(const struct drm_i915_private *i915, */ #define HAS_64K_PAGES(dev_priv) (INTEL_INFO(dev_priv)->has_64k_pages) -#define HAS_IPC(dev_priv) (INTEL_INFO(dev_priv)->display.has_ipc) +#define HAS_IPC(dev_priv) (INTEL_INFO(dev_priv)->display.has_ipc) +#define HAS_SAGV(dev_priv) (DISPLAY_VER(dev_priv) >= 9 && !IS_LP(dev_priv)) #define HAS_REGION(i915, i) (RUNTIME_INFO(i915)->memory_regions & (i)) #define HAS_LMEM(i915) HAS_REGION(i915, REGION_LMEM) diff --git a/drivers/gpu/drm/i915/i915_hwmon.c b/drivers/gpu/drm/i915/i915_hwmon.c index 1225bc432f0d..596dd2c07010 100644 --- a/drivers/gpu/drm/i915/i915_hwmon.c +++ b/drivers/gpu/drm/i915/i915_hwmon.c @@ -99,20 +99,6 @@ hwm_field_read_and_scale(struct hwm_drvdata *ddat, i915_reg_t rgadr, return mul_u64_u32_shr(reg_value, scale_factor, nshift); } -static void -hwm_field_scale_and_write(struct hwm_drvdata *ddat, i915_reg_t rgadr, - int nshift, unsigned int scale_factor, long lval) -{ - u32 nval; - - /* Computation in 64-bits to avoid overflow. Round to nearest. */ - nval = DIV_ROUND_CLOSEST_ULL((u64)lval << nshift, scale_factor); - - hwm_locked_with_pm_intel_uncore_rmw(ddat, rgadr, - PKG_PWR_LIM_1, - REG_FIELD_PREP(PKG_PWR_LIM_1, nval)); -} - /* * hwm_energy - Obtain energy value * @@ -232,11 +218,15 @@ hwm_power1_max_interval_store(struct device *dev, /* val in hw units */ val = DIV_ROUND_CLOSEST_ULL((u64)val << hwmon->scl_shift_time, SF_TIME); /* Convert to 1.x * power(2,y) */ - if (!val) - return -EINVAL; - y = ilog2(val); - /* x = (val - (1 << y)) >> (y - 2); */ - x = (val - (1ul << y)) << x_w >> y; + if (!val) { + /* Avoid ilog2(0) */ + y = 0; + x = 0; + } else { + y = ilog2(val); + /* x = (val - (1 << y)) >> (y - 2); */ + x = (val - (1ul << y)) << x_w >> y; + } rxy = REG_FIELD_PREP(PKG_PWR_LIM_1_TIME_X, x) | REG_FIELD_PREP(PKG_PWR_LIM_1_TIME_Y, y); @@ -392,6 +382,22 @@ hwm_power_max_read(struct hwm_drvdata *ddat, long *val) } static int +hwm_power_max_write(struct hwm_drvdata *ddat, long val) +{ + struct i915_hwmon *hwmon = ddat->hwmon; + u32 nval; + + /* Computation in 64-bits to avoid overflow. Round to nearest. */ + nval = DIV_ROUND_CLOSEST_ULL((u64)val << hwmon->scl_shift_power, SF_POWER); + nval = PKG_PWR_LIM_1_EN | REG_FIELD_PREP(PKG_PWR_LIM_1, nval); + + hwm_locked_with_pm_intel_uncore_rmw(ddat, hwmon->rg.pkg_rapl_limit, + PKG_PWR_LIM_1_EN | PKG_PWR_LIM_1, + nval); + return 0; +} + +static int hwm_power_read(struct hwm_drvdata *ddat, u32 attr, int chan, long *val) { struct i915_hwmon *hwmon = ddat->hwmon; @@ -425,16 +431,11 @@ hwm_power_read(struct hwm_drvdata *ddat, u32 attr, int chan, long *val) static int hwm_power_write(struct hwm_drvdata *ddat, u32 attr, int chan, long val) { - struct i915_hwmon *hwmon = ddat->hwmon; u32 uval; switch (attr) { case hwmon_power_max: - hwm_field_scale_and_write(ddat, - hwmon->rg.pkg_rapl_limit, - hwmon->scl_shift_power, - SF_POWER, val); - return 0; + return hwm_power_max_write(ddat, val); case hwmon_power_crit: uval = DIV_ROUND_CLOSEST_ULL(val << POWER_SETUP_I1_SHIFT, SF_POWER); return hwm_pcode_write_i1(ddat->uncore->i915, uval); diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c index 240d5e198904..31271c30a8cf 100644 --- a/drivers/gpu/drm/i915/i915_irq.c +++ b/drivers/gpu/drm/i915/i915_irq.c @@ -52,7 +52,6 @@ #include "i915_driver.h" #include "i915_drv.h" #include "i915_irq.h" -#include "intel_pm.h" /** * DOC: interrupt handling @@ -81,8 +80,7 @@ static inline void pmu_irq_stats(struct drm_i915_private *i915, } typedef bool (*long_pulse_detect_func)(enum hpd_pin pin, u32 val); -typedef u32 (*hotplug_enables_func)(struct drm_i915_private *i915, - enum hpd_pin pin); +typedef u32 (*hotplug_enables_func)(struct intel_encoder *encoder); static const u32 hpd_ilk[HPD_NUM_PINS] = { [HPD_PORT_A] = DE_DP_A_HOTPLUG, @@ -199,6 +197,8 @@ static void intel_hpd_init_pins(struct drm_i915_private *dev_priv) hpd->hpd = hpd_gen11; else if (IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv)) hpd->hpd = hpd_bxt; + else if (DISPLAY_VER(dev_priv) == 9) + hpd->hpd = NULL; /* no north HPD on SKL */ else if (DISPLAY_VER(dev_priv) >= 8) hpd->hpd = hpd_bdw; else if (DISPLAY_VER(dev_priv) >= 7) @@ -884,7 +884,7 @@ static u32 intel_hpd_hotplug_enables(struct drm_i915_private *i915, u32 hotplug = 0; for_each_intel_encoder(&i915->drm, encoder) - hotplug |= hotplug_enables(i915, encoder->hpd_pin); + hotplug |= hotplug_enables(encoder); return hotplug; } @@ -2835,10 +2835,11 @@ static void cherryview_irq_reset(struct drm_i915_private *dev_priv) spin_unlock_irq(&dev_priv->irq_lock); } -static u32 ibx_hotplug_enables(struct drm_i915_private *i915, - enum hpd_pin pin) +static u32 ibx_hotplug_enables(struct intel_encoder *encoder) { - switch (pin) { + struct drm_i915_private *i915 = to_i915(encoder->base.dev); + + switch (encoder->hpd_pin) { case HPD_PORT_A: /* * When CPU and PCH are on the same package, port A @@ -2890,31 +2891,29 @@ static void ibx_hpd_irq_setup(struct drm_i915_private *dev_priv) ibx_hpd_detection_setup(dev_priv); } -static u32 icp_ddi_hotplug_enables(struct drm_i915_private *i915, - enum hpd_pin pin) +static u32 icp_ddi_hotplug_enables(struct intel_encoder *encoder) { - switch (pin) { + switch (encoder->hpd_pin) { case HPD_PORT_A: case HPD_PORT_B: case HPD_PORT_C: case HPD_PORT_D: - return SHOTPLUG_CTL_DDI_HPD_ENABLE(pin); + return SHOTPLUG_CTL_DDI_HPD_ENABLE(encoder->hpd_pin); default: return 0; } } -static u32 icp_tc_hotplug_enables(struct drm_i915_private *i915, - enum hpd_pin pin) +static u32 icp_tc_hotplug_enables(struct intel_encoder *encoder) { - switch (pin) { + switch (encoder->hpd_pin) { case HPD_PORT_TC1: case HPD_PORT_TC2: case HPD_PORT_TC3: case HPD_PORT_TC4: case HPD_PORT_TC5: case HPD_PORT_TC6: - return ICP_TC_HPD_ENABLE(pin); + return ICP_TC_HPD_ENABLE(encoder->hpd_pin); default: return 0; } @@ -2958,17 +2957,16 @@ static void icp_hpd_irq_setup(struct drm_i915_private *dev_priv) icp_tc_hpd_detection_setup(dev_priv); } -static u32 gen11_hotplug_enables(struct drm_i915_private *i915, - enum hpd_pin pin) +static u32 gen11_hotplug_enables(struct intel_encoder *encoder) { - switch (pin) { + switch (encoder->hpd_pin) { case HPD_PORT_TC1: case HPD_PORT_TC2: case HPD_PORT_TC3: case HPD_PORT_TC4: case HPD_PORT_TC5: case HPD_PORT_TC6: - return GEN11_HOTPLUG_CTL_ENABLE(pin); + return GEN11_HOTPLUG_CTL_ENABLE(encoder->hpd_pin); default: return 0; } @@ -3031,10 +3029,9 @@ static void gen11_hpd_irq_setup(struct drm_i915_private *dev_priv) icp_hpd_irq_setup(dev_priv); } -static u32 spt_hotplug_enables(struct drm_i915_private *i915, - enum hpd_pin pin) +static u32 spt_hotplug_enables(struct intel_encoder *encoder) { - switch (pin) { + switch (encoder->hpd_pin) { case HPD_PORT_A: return PORTA_HOTPLUG_ENABLE; case HPD_PORT_B: @@ -3048,10 +3045,9 @@ static u32 spt_hotplug_enables(struct drm_i915_private *i915, } } -static u32 spt_hotplug2_enables(struct drm_i915_private *i915, - enum hpd_pin pin) +static u32 spt_hotplug2_enables(struct intel_encoder *encoder) { - switch (pin) { + switch (encoder->hpd_pin) { case HPD_PORT_E: return PORTE_HOTPLUG_ENABLE; default: @@ -3094,10 +3090,9 @@ static void spt_hpd_irq_setup(struct drm_i915_private *dev_priv) spt_hpd_detection_setup(dev_priv); } -static u32 ilk_hotplug_enables(struct drm_i915_private *i915, - enum hpd_pin pin) +static u32 ilk_hotplug_enables(struct intel_encoder *encoder) { - switch (pin) { + switch (encoder->hpd_pin) { case HPD_PORT_A: return DIGITAL_PORTA_HOTPLUG_ENABLE | DIGITAL_PORTA_PULSE_DURATION_2ms; @@ -3135,25 +3130,24 @@ static void ilk_hpd_irq_setup(struct drm_i915_private *dev_priv) ibx_hpd_irq_setup(dev_priv); } -static u32 bxt_hotplug_enables(struct drm_i915_private *i915, - enum hpd_pin pin) +static u32 bxt_hotplug_enables(struct intel_encoder *encoder) { u32 hotplug; - switch (pin) { + switch (encoder->hpd_pin) { case HPD_PORT_A: hotplug = PORTA_HOTPLUG_ENABLE; - if (intel_bios_is_port_hpd_inverted(i915, PORT_A)) + if (intel_bios_encoder_hpd_invert(encoder->devdata)) hotplug |= BXT_DDIA_HPD_INVERT; return hotplug; case HPD_PORT_B: hotplug = PORTB_HOTPLUG_ENABLE; - if (intel_bios_is_port_hpd_inverted(i915, PORT_B)) + if (intel_bios_encoder_hpd_invert(encoder->devdata)) hotplug |= BXT_DDIB_HPD_INVERT; return hotplug; case HPD_PORT_C: hotplug = PORTC_HOTPLUG_ENABLE; - if (intel_bios_is_port_hpd_inverted(i915, PORT_C)) + if (intel_bios_encoder_hpd_invert(encoder->devdata)) hotplug |= BXT_DDIC_HPD_INVERT; return hotplug; default: @@ -3471,15 +3465,33 @@ static void i8xx_irq_reset(struct drm_i915_private *dev_priv) dev_priv->irq_mask = ~0u; } +static u32 i9xx_error_mask(struct drm_i915_private *i915) +{ + /* + * On gen2/3 FBC generates (seemingly spurious) + * display INVALID_GTT/INVALID_GTT_PTE table errors. + * + * Also gen3 bspec has this to say: + * "DISPA_INVALID_GTT_PTE + " [DevNapa] : Reserved. This bit does not reflect the page + " table error for the display plane A." + * + * Unfortunately we can't mask off individual PGTBL_ER bits, + * so we just have to mask off all page table errors via EMR. + */ + if (HAS_FBC(i915)) + return ~I915_ERROR_MEMORY_REFRESH; + else + return ~(I915_ERROR_PAGE_TABLE | + I915_ERROR_MEMORY_REFRESH); +} + static void i8xx_irq_postinstall(struct drm_i915_private *dev_priv) { struct intel_uncore *uncore = &dev_priv->uncore; u16 enable_mask; - intel_uncore_write16(uncore, - EMR, - ~(I915_ERROR_PAGE_TABLE | - I915_ERROR_MEMORY_REFRESH)); + intel_uncore_write16(uncore, EMR, i9xx_error_mask(dev_priv)); /* Unmask the interrupts that we always want on. */ dev_priv->irq_mask = @@ -3510,9 +3522,7 @@ static void i8xx_error_irq_ack(struct drm_i915_private *i915, u16 emr; *eir = intel_uncore_read16(uncore, EIR); - - if (*eir) - intel_uncore_write16(uncore, EIR, *eir); + intel_uncore_write16(uncore, EIR, *eir); *eir_stuck = intel_uncore_read16(uncore, EIR); if (*eir_stuck == 0) @@ -3541,6 +3551,9 @@ static void i8xx_error_irq_handler(struct drm_i915_private *dev_priv, if (eir_stuck) drm_dbg(&dev_priv->drm, "EIR stuck: 0x%04x, masked\n", eir_stuck); + + drm_dbg(&dev_priv->drm, "PGTBL_ER: 0x%08x\n", + intel_uncore_read(&dev_priv->uncore, PGTBL_ER)); } static void i9xx_error_irq_ack(struct drm_i915_private *dev_priv, @@ -3548,7 +3561,8 @@ static void i9xx_error_irq_ack(struct drm_i915_private *dev_priv, { u32 emr; - *eir = intel_uncore_rmw(&dev_priv->uncore, EIR, 0, 0); + *eir = intel_uncore_read(&dev_priv->uncore, EIR); + intel_uncore_write(&dev_priv->uncore, EIR, *eir); *eir_stuck = intel_uncore_read(&dev_priv->uncore, EIR); if (*eir_stuck == 0) @@ -3564,7 +3578,8 @@ static void i9xx_error_irq_ack(struct drm_i915_private *dev_priv, * (or by a GPU reset) so we mask any bit that * remains set. */ - emr = intel_uncore_rmw(&dev_priv->uncore, EMR, ~0, 0xffffffff); + emr = intel_uncore_read(&dev_priv->uncore, EMR); + intel_uncore_write(&dev_priv->uncore, EMR, 0xffffffff); intel_uncore_write(&dev_priv->uncore, EMR, emr | *eir_stuck); } @@ -3576,6 +3591,9 @@ static void i9xx_error_irq_handler(struct drm_i915_private *dev_priv, if (eir_stuck) drm_dbg(&dev_priv->drm, "EIR stuck: 0x%08x, masked\n", eir_stuck); + + drm_dbg(&dev_priv->drm, "PGTBL_ER: 0x%08x\n", + intel_uncore_read(&dev_priv->uncore, PGTBL_ER)); } static irqreturn_t i8xx_irq_handler(int irq, void *arg) @@ -3645,8 +3663,7 @@ static void i915_irq_postinstall(struct drm_i915_private *dev_priv) struct intel_uncore *uncore = &dev_priv->uncore; u32 enable_mask; - intel_uncore_write(uncore, EMR, ~(I915_ERROR_PAGE_TABLE | - I915_ERROR_MEMORY_REFRESH)); + intel_uncore_write(uncore, EMR, i9xx_error_mask(dev_priv)); /* Unmask the interrupts that we always want on. */ dev_priv->irq_mask = @@ -3749,26 +3766,31 @@ static void i965_irq_reset(struct drm_i915_private *dev_priv) dev_priv->irq_mask = ~0u; } -static void i965_irq_postinstall(struct drm_i915_private *dev_priv) +static u32 i965_error_mask(struct drm_i915_private *i915) { - struct intel_uncore *uncore = &dev_priv->uncore; - u32 enable_mask; - u32 error_mask; - /* * Enable some error detection, note the instruction error mask * bit is reserved, so we leave it masked. + * + * i965 FBC no longer generates spurious GTT errors, + * so we can always enable the page table errors. */ - if (IS_G4X(dev_priv)) { - error_mask = ~(GM45_ERROR_PAGE_TABLE | - GM45_ERROR_MEM_PRIV | - GM45_ERROR_CP_PRIV | - I915_ERROR_MEMORY_REFRESH); - } else { - error_mask = ~(I915_ERROR_PAGE_TABLE | - I915_ERROR_MEMORY_REFRESH); - } - intel_uncore_write(uncore, EMR, error_mask); + if (IS_G4X(i915)) + return ~(GM45_ERROR_PAGE_TABLE | + GM45_ERROR_MEM_PRIV | + GM45_ERROR_CP_PRIV | + I915_ERROR_MEMORY_REFRESH); + else + return ~(I915_ERROR_PAGE_TABLE | + I915_ERROR_MEMORY_REFRESH); +} + +static void i965_irq_postinstall(struct drm_i915_private *dev_priv) +{ + struct intel_uncore *uncore = &dev_priv->uncore; + u32 enable_mask; + + intel_uncore_write(uncore, EMR, i965_error_mask(dev_priv)); /* Unmask the interrupts that we always want on. */ dev_priv->irq_mask = diff --git a/drivers/gpu/drm/i915/i915_pmu.c b/drivers/gpu/drm/i915/i915_pmu.c index 52531ab28c5f..a76c5ce9513d 100644 --- a/drivers/gpu/drm/i915/i915_pmu.c +++ b/drivers/gpu/drm/i915/i915_pmu.c @@ -17,7 +17,6 @@ #include "i915_drv.h" #include "i915_pmu.h" -#include "intel_pm.h" /* Frequency for the sampling timer for events which need it. */ #define FREQUENCY 200 diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h index 3b2642397b82..1757fb8fdf5b 100644 --- a/drivers/gpu/drm/i915/i915_reg.h +++ b/drivers/gpu/drm/i915/i915_reg.h @@ -116,6 +116,9 @@ * #define GEN8_BAR _MMIO(0xb888) */ +#define GU_CNTL_PROTECTED _MMIO(0x10100C) +#define DEPRESENT REG_BIT(9) + #define GU_CNTL _MMIO(0x101010) #define LMEM_INIT REG_BIT(7) #define DRIVERFLR REG_BIT(31) @@ -541,9 +544,10 @@ #define _BXT_PHY0_BASE 0x6C000 #define _BXT_PHY1_BASE 0x162000 #define _BXT_PHY2_BASE 0x163000 -#define BXT_PHY_BASE(phy) _PHY3((phy), _BXT_PHY0_BASE, \ - _BXT_PHY1_BASE, \ - _BXT_PHY2_BASE) +#define BXT_PHY_BASE(phy) \ + _PICK_EVEN_2RANGES(phy, 1, \ + _BXT_PHY0_BASE, _BXT_PHY0_BASE, \ + _BXT_PHY1_BASE, _BXT_PHY2_BASE) #define _BXT_PHY(phy, reg) \ _MMIO(BXT_PHY_BASE(phy) - _BXT_PHY0_BASE + (reg)) @@ -566,13 +570,14 @@ #define BXT_PHY_CTL(port) _MMIO_PORT(port, _BXT_PHY_CTL_DDI_A, \ _BXT_PHY_CTL_DDI_B) -#define _PHY_CTL_FAMILY_EDP 0x64C80 #define _PHY_CTL_FAMILY_DDI 0x64C90 +#define _PHY_CTL_FAMILY_EDP 0x64C80 #define _PHY_CTL_FAMILY_DDI_C 0x64CA0 #define COMMON_RESET_DIS (1 << 31) -#define BXT_PHY_CTL_FAMILY(phy) _MMIO_PHY3((phy), _PHY_CTL_FAMILY_DDI, \ - _PHY_CTL_FAMILY_EDP, \ - _PHY_CTL_FAMILY_DDI_C) +#define BXT_PHY_CTL_FAMILY(phy) \ + _MMIO(_PICK_EVEN_2RANGES(phy, 1, \ + _PHY_CTL_FAMILY_DDI, _PHY_CTL_FAMILY_DDI, \ + _PHY_CTL_FAMILY_EDP, _PHY_CTL_FAMILY_DDI_C)) /* BXT PHY PLL registers */ #define _PORT_PLL_A 0x46074 @@ -1038,9 +1043,11 @@ #define _MBUS_ABOX0_CTL 0x45038 #define _MBUS_ABOX1_CTL 0x45048 #define _MBUS_ABOX2_CTL 0x4504C -#define MBUS_ABOX_CTL(x) _MMIO(_PICK(x, _MBUS_ABOX0_CTL, \ - _MBUS_ABOX1_CTL, \ - _MBUS_ABOX2_CTL)) +#define MBUS_ABOX_CTL(x) \ + _MMIO(_PICK_EVEN_2RANGES(x, 2, \ + _MBUS_ABOX0_CTL, _MBUS_ABOX1_CTL, \ + _MBUS_ABOX2_CTL, _MBUS_ABOX2_CTL)) + #define MBUS_ABOX_BW_CREDIT_MASK (3 << 20) #define MBUS_ABOX_BW_CREDIT(x) ((x) << 20) #define MBUS_ABOX_B_CREDIT_MASK (0xF << 16) @@ -1730,10 +1737,11 @@ #define PALETTE_10BIT_BLUE_EXP_MASK REG_GENMASK(7, 6) #define PALETTE_10BIT_BLUE_MANT_MASK REG_GENMASK(5, 2) #define PALETTE_10BIT_BLUE_UDW_MASK REG_GENMASK(1, 0) -#define PALETTE(pipe, i) _MMIO(DISPLAY_MMIO_BASE(dev_priv) + \ - _PICK((pipe), _PALETTE_A, \ - _PALETTE_B, _CHV_PALETTE_C) + \ - (i) * 4) +#define PALETTE(pipe, i) _MMIO(DISPLAY_MMIO_BASE(dev_priv) + \ + _PICK_EVEN_2RANGES(pipe, 2, \ + _PALETTE_A, _PALETTE_B, \ + _CHV_PALETTE_C, _CHV_PALETTE_C) + \ + (i) * 4) #define PEG_BAND_GAP_DATA _MMIO(0x14d68) @@ -1906,48 +1914,72 @@ #define PIPE_CRC_RES_RES1_I915(pipe) _MMIO_TRANS2(pipe, _PIPE_CRC_RES_RES1_A_I915) #define PIPE_CRC_RES_RES2_G4X(pipe) _MMIO_TRANS2(pipe, _PIPE_CRC_RES_RES2_A_G4X) -/* Pipe A timing regs */ -#define _HTOTAL_A 0x60000 -#define _HBLANK_A 0x60004 -#define _HSYNC_A 0x60008 -#define _VTOTAL_A 0x6000c -#define _VBLANK_A 0x60010 -#define _VSYNC_A 0x60014 -#define _EXITLINE_A 0x60018 -#define _PIPEASRC 0x6001c +/* Pipe/transcoder A timing regs */ +#define _TRANS_HTOTAL_A 0x60000 +#define HTOTAL_MASK REG_GENMASK(31, 16) +#define HTOTAL(htotal) REG_FIELD_PREP(HTOTAL_MASK, (htotal)) +#define HACTIVE_MASK REG_GENMASK(15, 0) +#define HACTIVE(hdisplay) REG_FIELD_PREP(HACTIVE_MASK, (hdisplay)) +#define _TRANS_HBLANK_A 0x60004 +#define HBLANK_END_MASK REG_GENMASK(31, 16) +#define HBLANK_END(hblank_end) REG_FIELD_PREP(HBLANK_END_MASK, (hblank_end)) +#define HBLANK_START_MASK REG_GENMASK(15, 0) +#define HBLANK_START(hblank_start) REG_FIELD_PREP(HBLANK_START_MASK, (hblank_start)) +#define _TRANS_HSYNC_A 0x60008 +#define HSYNC_END_MASK REG_GENMASK(31, 16) +#define HSYNC_END(hsync_end) REG_FIELD_PREP(HSYNC_END_MASK, (hsync_end)) +#define HSYNC_START_MASK REG_GENMASK(15, 0) +#define HSYNC_START(hsync_start) REG_FIELD_PREP(HSYNC_START_MASK, (hsync_start)) +#define _TRANS_VTOTAL_A 0x6000c +#define VTOTAL_MASK REG_GENMASK(31, 16) +#define VTOTAL(vtotal) REG_FIELD_PREP(VTOTAL_MASK, (vtotal)) +#define VACTIVE_MASK REG_GENMASK(15, 0) +#define VACTIVE(vdisplay) REG_FIELD_PREP(VACTIVE_MASK, (vdisplay)) +#define _TRANS_VBLANK_A 0x60010 +#define VBLANK_END_MASK REG_GENMASK(31, 16) +#define VBLANK_END(vblank_end) REG_FIELD_PREP(VBLANK_END_MASK, (vblank_end)) +#define VBLANK_START_MASK REG_GENMASK(15, 0) +#define VBLANK_START(vblank_start) REG_FIELD_PREP(VBLANK_START_MASK, (vblank_start)) +#define _TRANS_VSYNC_A 0x60014 +#define VSYNC_END_MASK REG_GENMASK(31, 16) +#define VSYNC_END(vsync_end) REG_FIELD_PREP(VSYNC_END_MASK, (vsync_end)) +#define VSYNC_START_MASK REG_GENMASK(15, 0) +#define VSYNC_START(vsync_start) REG_FIELD_PREP(VSYNC_START_MASK, (vsync_start)) +#define _TRANS_EXITLINE_A 0x60018 +#define _PIPEASRC 0x6001c #define PIPESRC_WIDTH_MASK REG_GENMASK(31, 16) #define PIPESRC_WIDTH(w) REG_FIELD_PREP(PIPESRC_WIDTH_MASK, (w)) #define PIPESRC_HEIGHT_MASK REG_GENMASK(15, 0) #define PIPESRC_HEIGHT(h) REG_FIELD_PREP(PIPESRC_HEIGHT_MASK, (h)) -#define _BCLRPAT_A 0x60020 -#define _VSYNCSHIFT_A 0x60028 -#define _PIPE_MULT_A 0x6002c - -/* Pipe B timing regs */ -#define _HTOTAL_B 0x61000 -#define _HBLANK_B 0x61004 -#define _HSYNC_B 0x61008 -#define _VTOTAL_B 0x6100c -#define _VBLANK_B 0x61010 -#define _VSYNC_B 0x61014 -#define _PIPEBSRC 0x6101c -#define _BCLRPAT_B 0x61020 -#define _VSYNCSHIFT_B 0x61028 -#define _PIPE_MULT_B 0x6102c +#define _BCLRPAT_A 0x60020 +#define _TRANS_VSYNCSHIFT_A 0x60028 +#define _TRANS_MULT_A 0x6002c + +/* Pipe/transcoder B timing regs */ +#define _TRANS_HTOTAL_B 0x61000 +#define _TRANS_HBLANK_B 0x61004 +#define _TRANS_HSYNC_B 0x61008 +#define _TRANS_VTOTAL_B 0x6100c +#define _TRANS_VBLANK_B 0x61010 +#define _TRANS_VSYNC_B 0x61014 +#define _PIPEBSRC 0x6101c +#define _BCLRPAT_B 0x61020 +#define _TRANS_VSYNCSHIFT_B 0x61028 +#define _TRANS_MULT_B 0x6102c /* DSI 0 timing regs */ -#define _HTOTAL_DSI0 0x6b000 -#define _HSYNC_DSI0 0x6b008 -#define _VTOTAL_DSI0 0x6b00c -#define _VSYNC_DSI0 0x6b014 -#define _VSYNCSHIFT_DSI0 0x6b028 +#define _TRANS_HTOTAL_DSI0 0x6b000 +#define _TRANS_HSYNC_DSI0 0x6b008 +#define _TRANS_VTOTAL_DSI0 0x6b00c +#define _TRANS_VSYNC_DSI0 0x6b014 +#define _TRANS_VSYNCSHIFT_DSI0 0x6b028 /* DSI 1 timing regs */ -#define _HTOTAL_DSI1 0x6b800 -#define _HSYNC_DSI1 0x6b808 -#define _VTOTAL_DSI1 0x6b80c -#define _VSYNC_DSI1 0x6b814 -#define _VSYNCSHIFT_DSI1 0x6b828 +#define _TRANS_HTOTAL_DSI1 0x6b800 +#define _TRANS_HSYNC_DSI1 0x6b808 +#define _TRANS_VTOTAL_DSI1 0x6b80c +#define _TRANS_VSYNC_DSI1 0x6b814 +#define _TRANS_VSYNCSHIFT_DSI1 0x6b828 #define TRANSCODER_A_OFFSET 0x60000 #define TRANSCODER_B_OFFSET 0x61000 @@ -1958,18 +1990,18 @@ #define TRANSCODER_DSI0_OFFSET 0x6b000 #define TRANSCODER_DSI1_OFFSET 0x6b800 -#define HTOTAL(trans) _MMIO_TRANS2(trans, _HTOTAL_A) -#define HBLANK(trans) _MMIO_TRANS2(trans, _HBLANK_A) -#define HSYNC(trans) _MMIO_TRANS2(trans, _HSYNC_A) -#define VTOTAL(trans) _MMIO_TRANS2(trans, _VTOTAL_A) -#define VBLANK(trans) _MMIO_TRANS2(trans, _VBLANK_A) -#define VSYNC(trans) _MMIO_TRANS2(trans, _VSYNC_A) -#define BCLRPAT(trans) _MMIO_TRANS2(trans, _BCLRPAT_A) -#define VSYNCSHIFT(trans) _MMIO_TRANS2(trans, _VSYNCSHIFT_A) -#define PIPESRC(trans) _MMIO_TRANS2(trans, _PIPEASRC) -#define PIPE_MULT(trans) _MMIO_TRANS2(trans, _PIPE_MULT_A) - -#define EXITLINE(trans) _MMIO_TRANS2(trans, _EXITLINE_A) +#define TRANS_HTOTAL(trans) _MMIO_TRANS2((trans), _TRANS_HTOTAL_A) +#define TRANS_HBLANK(trans) _MMIO_TRANS2((trans), _TRANS_HBLANK_A) +#define TRANS_HSYNC(trans) _MMIO_TRANS2((trans), _TRANS_HSYNC_A) +#define TRANS_VTOTAL(trans) _MMIO_TRANS2((trans), _TRANS_VTOTAL_A) +#define TRANS_VBLANK(trans) _MMIO_TRANS2((trans), _TRANS_VBLANK_A) +#define TRANS_VSYNC(trans) _MMIO_TRANS2((trans), _TRANS_VSYNC_A) +#define BCLRPAT(trans) _MMIO_TRANS2((trans), _BCLRPAT_A) +#define TRANS_VSYNCSHIFT(trans) _MMIO_TRANS2((trans), _TRANS_VSYNCSHIFT_A) +#define PIPESRC(pipe) _MMIO_TRANS2((pipe), _PIPEASRC) +#define TRANS_MULT(trans) _MMIO_TRANS2((trans), _TRANS_MULT_A) + +#define TRANS_EXITLINE(trans) _MMIO_TRANS2((trans), _TRANS_EXITLINE_A) #define EXITLINE_ENABLE REG_BIT(31) #define EXITLINE_MASK REG_GENMASK(12, 0) #define EXITLINE_SHIFT 0 @@ -2266,110 +2298,6 @@ #define ADLP_PSR2_MAN_TRK_CTL_SF_SINGLE_FULL_FRAME REG_BIT(14) #define ADLP_PSR2_MAN_TRK_CTL_SF_CONTINUOS_FULL_FRAME REG_BIT(13) -/* Icelake DSC Rate Control Range Parameter Registers */ -#define DSCA_RC_RANGE_PARAMETERS_0 _MMIO(0x6B240) -#define DSCA_RC_RANGE_PARAMETERS_0_UDW _MMIO(0x6B240 + 4) -#define DSCC_RC_RANGE_PARAMETERS_0 _MMIO(0x6BA40) -#define DSCC_RC_RANGE_PARAMETERS_0_UDW _MMIO(0x6BA40 + 4) -#define _ICL_DSC0_RC_RANGE_PARAMETERS_0_PB (0x78208) -#define _ICL_DSC0_RC_RANGE_PARAMETERS_0_UDW_PB (0x78208 + 4) -#define _ICL_DSC1_RC_RANGE_PARAMETERS_0_PB (0x78308) -#define _ICL_DSC1_RC_RANGE_PARAMETERS_0_UDW_PB (0x78308 + 4) -#define _ICL_DSC0_RC_RANGE_PARAMETERS_0_PC (0x78408) -#define _ICL_DSC0_RC_RANGE_PARAMETERS_0_UDW_PC (0x78408 + 4) -#define _ICL_DSC1_RC_RANGE_PARAMETERS_0_PC (0x78508) -#define _ICL_DSC1_RC_RANGE_PARAMETERS_0_UDW_PC (0x78508 + 4) -#define ICL_DSC0_RC_RANGE_PARAMETERS_0(pipe) _MMIO_PIPE((pipe) - PIPE_B, \ - _ICL_DSC0_RC_RANGE_PARAMETERS_0_PB, \ - _ICL_DSC0_RC_RANGE_PARAMETERS_0_PC) -#define ICL_DSC0_RC_RANGE_PARAMETERS_0_UDW(pipe) _MMIO_PIPE((pipe) - PIPE_B, \ - _ICL_DSC0_RC_RANGE_PARAMETERS_0_UDW_PB, \ - _ICL_DSC0_RC_RANGE_PARAMETERS_0_UDW_PC) -#define ICL_DSC1_RC_RANGE_PARAMETERS_0(pipe) _MMIO_PIPE((pipe) - PIPE_B, \ - _ICL_DSC1_RC_RANGE_PARAMETERS_0_PB, \ - _ICL_DSC1_RC_RANGE_PARAMETERS_0_PC) -#define ICL_DSC1_RC_RANGE_PARAMETERS_0_UDW(pipe) _MMIO_PIPE((pipe) - PIPE_B, \ - _ICL_DSC1_RC_RANGE_PARAMETERS_0_UDW_PB, \ - _ICL_DSC1_RC_RANGE_PARAMETERS_0_UDW_PC) -#define RC_BPG_OFFSET_SHIFT 10 -#define RC_MAX_QP_SHIFT 5 -#define RC_MIN_QP_SHIFT 0 - -#define DSCA_RC_RANGE_PARAMETERS_1 _MMIO(0x6B248) -#define DSCA_RC_RANGE_PARAMETERS_1_UDW _MMIO(0x6B248 + 4) -#define DSCC_RC_RANGE_PARAMETERS_1 _MMIO(0x6BA48) -#define DSCC_RC_RANGE_PARAMETERS_1_UDW _MMIO(0x6BA48 + 4) -#define _ICL_DSC0_RC_RANGE_PARAMETERS_1_PB (0x78210) -#define _ICL_DSC0_RC_RANGE_PARAMETERS_1_UDW_PB (0x78210 + 4) -#define _ICL_DSC1_RC_RANGE_PARAMETERS_1_PB (0x78310) -#define _ICL_DSC1_RC_RANGE_PARAMETERS_1_UDW_PB (0x78310 + 4) -#define _ICL_DSC0_RC_RANGE_PARAMETERS_1_PC (0x78410) -#define _ICL_DSC0_RC_RANGE_PARAMETERS_1_UDW_PC (0x78410 + 4) -#define _ICL_DSC1_RC_RANGE_PARAMETERS_1_PC (0x78510) -#define _ICL_DSC1_RC_RANGE_PARAMETERS_1_UDW_PC (0x78510 + 4) -#define ICL_DSC0_RC_RANGE_PARAMETERS_1(pipe) _MMIO_PIPE((pipe) - PIPE_B, \ - _ICL_DSC0_RC_RANGE_PARAMETERS_1_PB, \ - _ICL_DSC0_RC_RANGE_PARAMETERS_1_PC) -#define ICL_DSC0_RC_RANGE_PARAMETERS_1_UDW(pipe) _MMIO_PIPE((pipe) - PIPE_B, \ - _ICL_DSC0_RC_RANGE_PARAMETERS_1_UDW_PB, \ - _ICL_DSC0_RC_RANGE_PARAMETERS_1_UDW_PC) -#define ICL_DSC1_RC_RANGE_PARAMETERS_1(pipe) _MMIO_PIPE((pipe) - PIPE_B, \ - _ICL_DSC1_RC_RANGE_PARAMETERS_1_PB, \ - _ICL_DSC1_RC_RANGE_PARAMETERS_1_PC) -#define ICL_DSC1_RC_RANGE_PARAMETERS_1_UDW(pipe) _MMIO_PIPE((pipe) - PIPE_B, \ - _ICL_DSC1_RC_RANGE_PARAMETERS_1_UDW_PB, \ - _ICL_DSC1_RC_RANGE_PARAMETERS_1_UDW_PC) - -#define DSCA_RC_RANGE_PARAMETERS_2 _MMIO(0x6B250) -#define DSCA_RC_RANGE_PARAMETERS_2_UDW _MMIO(0x6B250 + 4) -#define DSCC_RC_RANGE_PARAMETERS_2 _MMIO(0x6BA50) -#define DSCC_RC_RANGE_PARAMETERS_2_UDW _MMIO(0x6BA50 + 4) -#define _ICL_DSC0_RC_RANGE_PARAMETERS_2_PB (0x78218) -#define _ICL_DSC0_RC_RANGE_PARAMETERS_2_UDW_PB (0x78218 + 4) -#define _ICL_DSC1_RC_RANGE_PARAMETERS_2_PB (0x78318) -#define _ICL_DSC1_RC_RANGE_PARAMETERS_2_UDW_PB (0x78318 + 4) -#define _ICL_DSC0_RC_RANGE_PARAMETERS_2_PC (0x78418) -#define _ICL_DSC0_RC_RANGE_PARAMETERS_2_UDW_PC (0x78418 + 4) -#define _ICL_DSC1_RC_RANGE_PARAMETERS_2_PC (0x78518) -#define _ICL_DSC1_RC_RANGE_PARAMETERS_2_UDW_PC (0x78518 + 4) -#define ICL_DSC0_RC_RANGE_PARAMETERS_2(pipe) _MMIO_PIPE((pipe) - PIPE_B, \ - _ICL_DSC0_RC_RANGE_PARAMETERS_2_PB, \ - _ICL_DSC0_RC_RANGE_PARAMETERS_2_PC) -#define ICL_DSC0_RC_RANGE_PARAMETERS_2_UDW(pipe) _MMIO_PIPE((pipe) - PIPE_B, \ - _ICL_DSC0_RC_RANGE_PARAMETERS_2_UDW_PB, \ - _ICL_DSC0_RC_RANGE_PARAMETERS_2_UDW_PC) -#define ICL_DSC1_RC_RANGE_PARAMETERS_2(pipe) _MMIO_PIPE((pipe) - PIPE_B, \ - _ICL_DSC1_RC_RANGE_PARAMETERS_2_PB, \ - _ICL_DSC1_RC_RANGE_PARAMETERS_2_PC) -#define ICL_DSC1_RC_RANGE_PARAMETERS_2_UDW(pipe) _MMIO_PIPE((pipe) - PIPE_B, \ - _ICL_DSC1_RC_RANGE_PARAMETERS_2_UDW_PB, \ - _ICL_DSC1_RC_RANGE_PARAMETERS_2_UDW_PC) - -#define DSCA_RC_RANGE_PARAMETERS_3 _MMIO(0x6B258) -#define DSCA_RC_RANGE_PARAMETERS_3_UDW _MMIO(0x6B258 + 4) -#define DSCC_RC_RANGE_PARAMETERS_3 _MMIO(0x6BA58) -#define DSCC_RC_RANGE_PARAMETERS_3_UDW _MMIO(0x6BA58 + 4) -#define _ICL_DSC0_RC_RANGE_PARAMETERS_3_PB (0x78220) -#define _ICL_DSC0_RC_RANGE_PARAMETERS_3_UDW_PB (0x78220 + 4) -#define _ICL_DSC1_RC_RANGE_PARAMETERS_3_PB (0x78320) -#define _ICL_DSC1_RC_RANGE_PARAMETERS_3_UDW_PB (0x78320 + 4) -#define _ICL_DSC0_RC_RANGE_PARAMETERS_3_PC (0x78420) -#define _ICL_DSC0_RC_RANGE_PARAMETERS_3_UDW_PC (0x78420 + 4) -#define _ICL_DSC1_RC_RANGE_PARAMETERS_3_PC (0x78520) -#define _ICL_DSC1_RC_RANGE_PARAMETERS_3_UDW_PC (0x78520 + 4) -#define ICL_DSC0_RC_RANGE_PARAMETERS_3(pipe) _MMIO_PIPE((pipe) - PIPE_B, \ - _ICL_DSC0_RC_RANGE_PARAMETERS_3_PB, \ - _ICL_DSC0_RC_RANGE_PARAMETERS_3_PC) -#define ICL_DSC0_RC_RANGE_PARAMETERS_3_UDW(pipe) _MMIO_PIPE((pipe) - PIPE_B, \ - _ICL_DSC0_RC_RANGE_PARAMETERS_3_UDW_PB, \ - _ICL_DSC0_RC_RANGE_PARAMETERS_3_UDW_PC) -#define ICL_DSC1_RC_RANGE_PARAMETERS_3(pipe) _MMIO_PIPE((pipe) - PIPE_B, \ - _ICL_DSC1_RC_RANGE_PARAMETERS_3_PB, \ - _ICL_DSC1_RC_RANGE_PARAMETERS_3_PC) -#define ICL_DSC1_RC_RANGE_PARAMETERS_3_UDW(pipe) _MMIO_PIPE((pipe) - PIPE_B, \ - _ICL_DSC1_RC_RANGE_PARAMETERS_3_UDW_PB, \ - _ICL_DSC1_RC_RANGE_PARAMETERS_3_UDW_PC) - /* VGA port control */ #define ADPA _MMIO(0x61100) #define PCH_ADPA _MMIO(0xe1100) @@ -2451,18 +2379,7 @@ #define CRT_HOTPLUG_DETECT_VOLTAGE_475MV (1 << 2) #define PORT_HOTPLUG_STAT _MMIO(DISPLAY_MMIO_BASE(dev_priv) + 0x61114) -/* - * HDMI/DP bits are g4x+ - * - * WARNING: Bspec for hpd status bits on gen4 seems to be completely confused. - * Please check the detailed lore in the commit message for for experimental - * evidence. - */ -/* Bspec says GM45 should match G4X/VLV/CHV, but reality disagrees */ -#define PORTD_HOTPLUG_LIVE_STATUS_GM45 (1 << 29) -#define PORTC_HOTPLUG_LIVE_STATUS_GM45 (1 << 28) -#define PORTB_HOTPLUG_LIVE_STATUS_GM45 (1 << 27) -/* G4X/VLV/CHV DP/HDMI bits again match Bspec */ +/* HDMI/DP bits are g4x+ */ #define PORTD_HOTPLUG_LIVE_STATUS_G4X (1 << 27) #define PORTC_HOTPLUG_LIVE_STATUS_G4X (1 << 28) #define PORTB_HOTPLUG_LIVE_STATUS_G4X (1 << 29) @@ -2592,59 +2509,6 @@ #define SDVO_PIPE_SEL_MASK_CHV (3 << 24) #define SDVO_PIPE_SEL_CHV(pipe) ((pipe) << 24) -/* LVDS port control */ -#define LVDS _MMIO(0x61180) -/* - * Enables the LVDS port. This bit must be set before DPLLs are enabled, as - * the DPLL semantics change when the LVDS is assigned to that pipe. - */ -#define LVDS_PORT_EN (1 << 31) -/* Selects pipe B for LVDS data. Must be set on pre-965. */ -#define LVDS_PIPE_SEL_SHIFT 30 -#define LVDS_PIPE_SEL_MASK (1 << 30) -#define LVDS_PIPE_SEL(pipe) ((pipe) << 30) -#define LVDS_PIPE_SEL_SHIFT_CPT 29 -#define LVDS_PIPE_SEL_MASK_CPT (3 << 29) -#define LVDS_PIPE_SEL_CPT(pipe) ((pipe) << 29) -/* LVDS dithering flag on 965/g4x platform */ -#define LVDS_ENABLE_DITHER (1 << 25) -/* LVDS sync polarity flags. Set to invert (i.e. negative) */ -#define LVDS_VSYNC_POLARITY (1 << 21) -#define LVDS_HSYNC_POLARITY (1 << 20) - -/* Enable border for unscaled (or aspect-scaled) display */ -#define LVDS_BORDER_ENABLE (1 << 15) -/* - * Enables the A0-A2 data pairs and CLKA, containing 18 bits of color data per - * pixel. - */ -#define LVDS_A0A2_CLKA_POWER_MASK (3 << 8) -#define LVDS_A0A2_CLKA_POWER_DOWN (0 << 8) -#define LVDS_A0A2_CLKA_POWER_UP (3 << 8) -/* - * Controls the A3 data pair, which contains the additional LSBs for 24 bit - * mode. Only enabled if LVDS_A0A2_CLKA_POWER_UP also indicates it should be - * on. - */ -#define LVDS_A3_POWER_MASK (3 << 6) -#define LVDS_A3_POWER_DOWN (0 << 6) -#define LVDS_A3_POWER_UP (3 << 6) -/* - * Controls the CLKB pair. This should only be set when LVDS_B0B3_POWER_UP - * is set. - */ -#define LVDS_CLKB_POWER_MASK (3 << 4) -#define LVDS_CLKB_POWER_DOWN (0 << 4) -#define LVDS_CLKB_POWER_UP (3 << 4) -/* - * Controls the B0-B3 data pairs. This must be set to match the DPLL p2 - * setting for whether we are in dual-channel mode. The B3 pair will - * additionally only be powered up when LVDS_A3_POWER_UP is set. - */ -#define LVDS_B0B3_POWER_MASK (3 << 2) -#define LVDS_B0B3_POWER_DOWN (0 << 2) -#define LVDS_B0B3_POWER_UP (3 << 2) - /* Video Data Island Packet control */ #define VIDEO_DIP_DATA _MMIO(0x61178) /* Read the description of VIDEO_DIP_DATA (before Haswell) or VIDEO_DIP_ECC @@ -3492,61 +3356,61 @@ #define _PIPEADSL 0x70000 #define PIPEDSL_CURR_FIELD REG_BIT(31) /* ctg+ */ #define PIPEDSL_LINE_MASK REG_GENMASK(19, 0) -#define _PIPEACONF 0x70008 -#define PIPECONF_ENABLE REG_BIT(31) -#define PIPECONF_DOUBLE_WIDE REG_BIT(30) /* pre-i965 */ -#define PIPECONF_STATE_ENABLE REG_BIT(30) /* i965+ */ -#define PIPECONF_DSI_PLL_LOCKED REG_BIT(29) /* vlv & pipe A only */ -#define PIPECONF_FRAME_START_DELAY_MASK REG_GENMASK(28, 27) /* pre-hsw */ -#define PIPECONF_FRAME_START_DELAY(x) REG_FIELD_PREP(PIPECONF_FRAME_START_DELAY_MASK, (x)) /* pre-hsw: 0-3 */ -#define PIPECONF_PIPE_LOCKED REG_BIT(25) -#define PIPECONF_FORCE_BORDER REG_BIT(25) -#define PIPECONF_GAMMA_MODE_MASK_I9XX REG_BIT(24) /* gmch */ -#define PIPECONF_GAMMA_MODE_MASK_ILK REG_GENMASK(25, 24) /* ilk-ivb */ -#define PIPECONF_GAMMA_MODE_8BIT REG_FIELD_PREP(PIPECONF_GAMMA_MODE_MASK, 0) -#define PIPECONF_GAMMA_MODE_10BIT REG_FIELD_PREP(PIPECONF_GAMMA_MODE_MASK, 1) -#define PIPECONF_GAMMA_MODE_12BIT REG_FIELD_PREP(PIPECONF_GAMMA_MODE_MASK_ILK, 2) /* ilk-ivb */ -#define PIPECONF_GAMMA_MODE_SPLIT REG_FIELD_PREP(PIPECONF_GAMMA_MODE_MASK_ILK, 3) /* ivb */ -#define PIPECONF_GAMMA_MODE(x) REG_FIELD_PREP(PIPECONF_GAMMA_MODE_MASK_ILK, (x)) /* pass in GAMMA_MODE_MODE_* */ -#define PIPECONF_INTERLACE_MASK REG_GENMASK(23, 21) /* gen3+ */ -#define PIPECONF_INTERLACE_PROGRESSIVE REG_FIELD_PREP(PIPECONF_INTERLACE_MASK, 0) -#define PIPECONF_INTERLACE_W_SYNC_SHIFT_PANEL REG_FIELD_PREP(PIPECONF_INTERLACE_MASK, 4) /* gen4 only */ -#define PIPECONF_INTERLACE_W_SYNC_SHIFT REG_FIELD_PREP(PIPECONF_INTERLACE_MASK, 5) /* gen4 only */ -#define PIPECONF_INTERLACE_W_FIELD_INDICATION REG_FIELD_PREP(PIPECONF_INTERLACE_MASK, 6) -#define PIPECONF_INTERLACE_FIELD_0_ONLY REG_FIELD_PREP(PIPECONF_INTERLACE_MASK, 7) /* gen3 only */ +#define _TRANSACONF 0x70008 +#define TRANSCONF_ENABLE REG_BIT(31) +#define TRANSCONF_DOUBLE_WIDE REG_BIT(30) /* pre-i965 */ +#define TRANSCONF_STATE_ENABLE REG_BIT(30) /* i965+ */ +#define TRANSCONF_DSI_PLL_LOCKED REG_BIT(29) /* vlv & pipe A only */ +#define TRANSCONF_FRAME_START_DELAY_MASK REG_GENMASK(28, 27) /* pre-hsw */ +#define TRANSCONF_FRAME_START_DELAY(x) REG_FIELD_PREP(TRANSCONF_FRAME_START_DELAY_MASK, (x)) /* pre-hsw: 0-3 */ +#define TRANSCONF_PIPE_LOCKED REG_BIT(25) +#define TRANSCONF_FORCE_BORDER REG_BIT(25) +#define TRANSCONF_GAMMA_MODE_MASK_I9XX REG_BIT(24) /* gmch */ +#define TRANSCONF_GAMMA_MODE_MASK_ILK REG_GENMASK(25, 24) /* ilk-ivb */ +#define TRANSCONF_GAMMA_MODE_8BIT REG_FIELD_PREP(TRANSCONF_GAMMA_MODE_MASK, 0) +#define TRANSCONF_GAMMA_MODE_10BIT REG_FIELD_PREP(TRANSCONF_GAMMA_MODE_MASK, 1) +#define TRANSCONF_GAMMA_MODE_12BIT REG_FIELD_PREP(TRANSCONF_GAMMA_MODE_MASK_ILK, 2) /* ilk-ivb */ +#define TRANSCONF_GAMMA_MODE_SPLIT REG_FIELD_PREP(TRANSCONF_GAMMA_MODE_MASK_ILK, 3) /* ivb */ +#define TRANSCONF_GAMMA_MODE(x) REG_FIELD_PREP(TRANSCONF_GAMMA_MODE_MASK_ILK, (x)) /* pass in GAMMA_MODE_MODE_* */ +#define TRANSCONF_INTERLACE_MASK REG_GENMASK(23, 21) /* gen3+ */ +#define TRANSCONF_INTERLACE_PROGRESSIVE REG_FIELD_PREP(TRANSCONF_INTERLACE_MASK, 0) +#define TRANSCONF_INTERLACE_W_SYNC_SHIFT_PANEL REG_FIELD_PREP(TRANSCONF_INTERLACE_MASK, 4) /* gen4 only */ +#define TRANSCONF_INTERLACE_W_SYNC_SHIFT REG_FIELD_PREP(TRANSCONF_INTERLACE_MASK, 5) /* gen4 only */ +#define TRANSCONF_INTERLACE_W_FIELD_INDICATION REG_FIELD_PREP(TRANSCONF_INTERLACE_MASK, 6) +#define TRANSCONF_INTERLACE_FIELD_0_ONLY REG_FIELD_PREP(TRANSCONF_INTERLACE_MASK, 7) /* gen3 only */ /* * ilk+: PF/D=progressive fetch/display, IF/D=interlaced fetch/display, * DBL=power saving pixel doubling, PF-ID* requires panel fitter */ -#define PIPECONF_INTERLACE_MASK_ILK REG_GENMASK(23, 21) /* ilk+ */ -#define PIPECONF_INTERLACE_MASK_HSW REG_GENMASK(22, 21) /* hsw+ */ -#define PIPECONF_INTERLACE_PF_PD_ILK REG_FIELD_PREP(PIPECONF_INTERLACE_MASK_ILK, 0) -#define PIPECONF_INTERLACE_PF_ID_ILK REG_FIELD_PREP(PIPECONF_INTERLACE_MASK_ILK, 1) -#define PIPECONF_INTERLACE_IF_ID_ILK REG_FIELD_PREP(PIPECONF_INTERLACE_MASK_ILK, 3) -#define PIPECONF_INTERLACE_IF_ID_DBL_ILK REG_FIELD_PREP(PIPECONF_INTERLACE_MASK_ILK, 4) /* ilk/snb only */ -#define PIPECONF_INTERLACE_PF_ID_DBL_ILK REG_FIELD_PREP(PIPECONF_INTERLACE_MASK_ILK, 5) /* ilk/snb only */ -#define PIPECONF_REFRESH_RATE_ALT_ILK REG_BIT(20) -#define PIPECONF_MSA_TIMING_DELAY_MASK REG_GENMASK(19, 18) /* ilk/snb/ivb */ -#define PIPECONF_MSA_TIMING_DELAY(x) REG_FIELD_PREP(PIPECONF_MSA_TIMING_DELAY_MASK, (x)) -#define PIPECONF_CXSR_DOWNCLOCK REG_BIT(16) -#define PIPECONF_REFRESH_RATE_ALT_VLV REG_BIT(14) -#define PIPECONF_COLOR_RANGE_SELECT REG_BIT(13) -#define PIPECONF_OUTPUT_COLORSPACE_MASK REG_GENMASK(12, 11) /* ilk-ivb */ -#define PIPECONF_OUTPUT_COLORSPACE_RGB REG_FIELD_PREP(PIPECONF_OUTPUT_COLORSPACE_MASK, 0) /* ilk-ivb */ -#define PIPECONF_OUTPUT_COLORSPACE_YUV601 REG_FIELD_PREP(PIPECONF_OUTPUT_COLORSPACE_MASK, 1) /* ilk-ivb */ -#define PIPECONF_OUTPUT_COLORSPACE_YUV709 REG_FIELD_PREP(PIPECONF_OUTPUT_COLORSPACE_MASK, 2) /* ilk-ivb */ -#define PIPECONF_OUTPUT_COLORSPACE_YUV_HSW REG_BIT(11) /* hsw only */ -#define PIPECONF_BPC_MASK REG_GENMASK(7, 5) /* ctg-ivb */ -#define PIPECONF_BPC_8 REG_FIELD_PREP(PIPECONF_BPC_MASK, 0) -#define PIPECONF_BPC_10 REG_FIELD_PREP(PIPECONF_BPC_MASK, 1) -#define PIPECONF_BPC_6 REG_FIELD_PREP(PIPECONF_BPC_MASK, 2) -#define PIPECONF_BPC_12 REG_FIELD_PREP(PIPECONF_BPC_MASK, 3) -#define PIPECONF_DITHER_EN REG_BIT(4) -#define PIPECONF_DITHER_TYPE_MASK REG_GENMASK(3, 2) -#define PIPECONF_DITHER_TYPE_SP REG_FIELD_PREP(PIPECONF_DITHER_TYPE_MASK, 0) -#define PIPECONF_DITHER_TYPE_ST1 REG_FIELD_PREP(PIPECONF_DITHER_TYPE_MASK, 1) -#define PIPECONF_DITHER_TYPE_ST2 REG_FIELD_PREP(PIPECONF_DITHER_TYPE_MASK, 2) -#define PIPECONF_DITHER_TYPE_TEMP REG_FIELD_PREP(PIPECONF_DITHER_TYPE_MASK, 3) +#define TRANSCONF_INTERLACE_MASK_ILK REG_GENMASK(23, 21) /* ilk+ */ +#define TRANSCONF_INTERLACE_MASK_HSW REG_GENMASK(22, 21) /* hsw+ */ +#define TRANSCONF_INTERLACE_PF_PD_ILK REG_FIELD_PREP(TRANSCONF_INTERLACE_MASK_ILK, 0) +#define TRANSCONF_INTERLACE_PF_ID_ILK REG_FIELD_PREP(TRANSCONF_INTERLACE_MASK_ILK, 1) +#define TRANSCONF_INTERLACE_IF_ID_ILK REG_FIELD_PREP(TRANSCONF_INTERLACE_MASK_ILK, 3) +#define TRANSCONF_INTERLACE_IF_ID_DBL_ILK REG_FIELD_PREP(TRANSCONF_INTERLACE_MASK_ILK, 4) /* ilk/snb only */ +#define TRANSCONF_INTERLACE_PF_ID_DBL_ILK REG_FIELD_PREP(TRANSCONF_INTERLACE_MASK_ILK, 5) /* ilk/snb only */ +#define TRANSCONF_REFRESH_RATE_ALT_ILK REG_BIT(20) +#define TRANSCONF_MSA_TIMING_DELAY_MASK REG_GENMASK(19, 18) /* ilk/snb/ivb */ +#define TRANSCONF_MSA_TIMING_DELAY(x) REG_FIELD_PREP(TRANSCONF_MSA_TIMING_DELAY_MASK, (x)) +#define TRANSCONF_CXSR_DOWNCLOCK REG_BIT(16) +#define TRANSCONF_REFRESH_RATE_ALT_VLV REG_BIT(14) +#define TRANSCONF_COLOR_RANGE_SELECT REG_BIT(13) +#define TRANSCONF_OUTPUT_COLORSPACE_MASK REG_GENMASK(12, 11) /* ilk-ivb */ +#define TRANSCONF_OUTPUT_COLORSPACE_RGB REG_FIELD_PREP(TRANSCONF_OUTPUT_COLORSPACE_MASK, 0) /* ilk-ivb */ +#define TRANSCONF_OUTPUT_COLORSPACE_YUV601 REG_FIELD_PREP(TRANSCONF_OUTPUT_COLORSPACE_MASK, 1) /* ilk-ivb */ +#define TRANSCONF_OUTPUT_COLORSPACE_YUV709 REG_FIELD_PREP(TRANSCONF_OUTPUT_COLORSPACE_MASK, 2) /* ilk-ivb */ +#define TRANSCONF_OUTPUT_COLORSPACE_YUV_HSW REG_BIT(11) /* hsw only */ +#define TRANSCONF_BPC_MASK REG_GENMASK(7, 5) /* ctg-ivb */ +#define TRANSCONF_BPC_8 REG_FIELD_PREP(TRANSCONF_BPC_MASK, 0) +#define TRANSCONF_BPC_10 REG_FIELD_PREP(TRANSCONF_BPC_MASK, 1) +#define TRANSCONF_BPC_6 REG_FIELD_PREP(TRANSCONF_BPC_MASK, 2) +#define TRANSCONF_BPC_12 REG_FIELD_PREP(TRANSCONF_BPC_MASK, 3) +#define TRANSCONF_DITHER_EN REG_BIT(4) +#define TRANSCONF_DITHER_TYPE_MASK REG_GENMASK(3, 2) +#define TRANSCONF_DITHER_TYPE_SP REG_FIELD_PREP(TRANSCONF_DITHER_TYPE_MASK, 0) +#define TRANSCONF_DITHER_TYPE_ST1 REG_FIELD_PREP(TRANSCONF_DITHER_TYPE_MASK, 1) +#define TRANSCONF_DITHER_TYPE_ST2 REG_FIELD_PREP(TRANSCONF_DITHER_TYPE_MASK, 2) +#define TRANSCONF_DITHER_TYPE_TEMP REG_FIELD_PREP(TRANSCONF_DITHER_TYPE_MASK, 3) #define _PIPEASTAT 0x70024 #define PIPE_FIFO_UNDERRUN_STATUS (1UL << 31) #define SPRITE1_FLIP_DONE_INT_EN_VLV (1UL << 30) @@ -3615,7 +3479,7 @@ #define PIPE_DSI0_OFFSET 0x7b000 #define PIPE_DSI1_OFFSET 0x7b800 -#define PIPECONF(pipe) _MMIO_PIPE2(pipe, _PIPEACONF) +#define TRANSCONF(trans) _MMIO_PIPE2((trans), _TRANSACONF) #define PIPEDSL(pipe) _MMIO_PIPE2(pipe, _PIPEADSL) #define PIPEFRAME(pipe) _MMIO_PIPE2(pipe, _PIPEAFRAMEHIGH) #define PIPEFRAMEPIXEL(pipe) _MMIO_PIPE2(pipe, _PIPEAFRAMEPIXEL) @@ -4255,7 +4119,7 @@ /* Pipe B */ #define _PIPEBDSL (DISPLAY_MMIO_BASE(dev_priv) + 0x71000) -#define _PIPEBCONF (DISPLAY_MMIO_BASE(dev_priv) + 0x71008) +#define _TRANSBCONF (DISPLAY_MMIO_BASE(dev_priv) + 0x71008) #define _PIPEBSTAT (DISPLAY_MMIO_BASE(dev_priv) + 0x71024) #define _PIPEBFRAMEHIGH 0x71040 #define _PIPEBFRAMEPIXEL 0x71044 @@ -5432,6 +5296,7 @@ #define GEN8_PIPE_CDCLK_CRC_DONE (1 << 28) #define XELPD_PIPE_SOFT_UNDERRUN (1 << 22) #define XELPD_PIPE_HARD_UNDERRUN (1 << 21) +#define GEN12_PIPE_VBLANK_UNMOD (1 << 19) #define GEN8_PIPE_CURSOR_FAULT (1 << 10) #define GEN8_PIPE_SPRITE_FAULT (1 << 9) #define GEN8_PIPE_PRIMARY_FAULT (1 << 8) @@ -6392,9 +6257,6 @@ #define FDI_PLL_CTL_1 _MMIO(0xfe000) #define FDI_PLL_CTL_2 _MMIO(0xfe004) -#define PCH_LVDS _MMIO(0xe1180) -#define LVDS_DETECTED (1 << 1) - #define _PCH_DP_B 0xe4100 #define PCH_DP_B _MMIO(_PCH_DP_B) #define _PCH_DPB_AUX_CH_CTL 0xe4110 @@ -7224,21 +7086,23 @@ enum skl_power_gate { ADLS_DPCLKA_DDIK_SEL_MASK) /* ICL PLL */ -#define DPLL0_ENABLE 0x46010 -#define DPLL1_ENABLE 0x46014 +#define _DPLL0_ENABLE 0x46010 +#define _DPLL1_ENABLE 0x46014 #define _ADLS_DPLL2_ENABLE 0x46018 #define _ADLS_DPLL3_ENABLE 0x46030 -#define PLL_ENABLE (1 << 31) -#define PLL_LOCK (1 << 30) -#define PLL_POWER_ENABLE (1 << 27) -#define PLL_POWER_STATE (1 << 26) -#define ICL_DPLL_ENABLE(pll) _MMIO_PLL3(pll, DPLL0_ENABLE, DPLL1_ENABLE, \ - _ADLS_DPLL2_ENABLE, _ADLS_DPLL3_ENABLE) +#define PLL_ENABLE REG_BIT(31) +#define PLL_LOCK REG_BIT(30) +#define PLL_POWER_ENABLE REG_BIT(27) +#define PLL_POWER_STATE REG_BIT(26) +#define ICL_DPLL_ENABLE(pll) _MMIO(_PICK_EVEN_2RANGES(pll, 3, \ + _DPLL0_ENABLE, _DPLL1_ENABLE, \ + _ADLS_DPLL3_ENABLE, _ADLS_DPLL3_ENABLE)) #define _DG2_PLL3_ENABLE 0x4601C -#define DG2_PLL_ENABLE(pll) _MMIO_PLL3(pll, DPLL0_ENABLE, DPLL1_ENABLE, \ - _ADLS_DPLL2_ENABLE, _DG2_PLL3_ENABLE) +#define DG2_PLL_ENABLE(pll) _MMIO(_PICK_EVEN_2RANGES(pll, 3, \ + _DPLL0_ENABLE, _DPLL1_ENABLE, \ + _DG2_PLL3_ENABLE, _DG2_PLL3_ENABLE)) #define TBT_PLL_ENABLE _MMIO(0x46020) @@ -7246,13 +7110,14 @@ enum skl_power_gate { #define _MG_PLL2_ENABLE 0x46034 #define _MG_PLL3_ENABLE 0x46038 #define _MG_PLL4_ENABLE 0x4603C -/* Bits are the same as DPLL0_ENABLE */ +/* Bits are the same as _DPLL0_ENABLE */ #define MG_PLL_ENABLE(tc_port) _MMIO_PORT((tc_port), _MG_PLL1_ENABLE, \ _MG_PLL2_ENABLE) /* DG1 PLL */ -#define DG1_DPLL_ENABLE(pll) _MMIO_PLL3(pll, DPLL0_ENABLE, DPLL1_ENABLE, \ - _MG_PLL1_ENABLE, _MG_PLL2_ENABLE) +#define DG1_DPLL_ENABLE(pll) _MMIO(_PICK_EVEN_2RANGES(pll, 2, \ + _DPLL0_ENABLE, _DPLL1_ENABLE, \ + _MG_PLL1_ENABLE, _MG_PLL2_ENABLE)) /* ADL-P Type C PLL */ #define PORTTC1_PLL_ENABLE 0x46038 @@ -7312,9 +7177,9 @@ enum skl_power_gate { #define _TGL_DPLL0_CFGCR0 0x164284 #define _TGL_DPLL1_CFGCR0 0x16428C #define _TGL_TBTPLL_CFGCR0 0x16429C -#define TGL_DPLL_CFGCR0(pll) _MMIO_PLL3(pll, _TGL_DPLL0_CFGCR0, \ - _TGL_DPLL1_CFGCR0, \ - _TGL_TBTPLL_CFGCR0) +#define TGL_DPLL_CFGCR0(pll) _MMIO(_PICK_EVEN_2RANGES(pll, 2, \ + _TGL_DPLL0_CFGCR0, _TGL_DPLL1_CFGCR0, \ + _TGL_TBTPLL_CFGCR0, _TGL_TBTPLL_CFGCR0)) #define RKL_DPLL_CFGCR0(pll) _MMIO_PLL(pll, _TGL_DPLL0_CFGCR0, \ _TGL_DPLL1_CFGCR0) @@ -7327,40 +7192,36 @@ enum skl_power_gate { #define _TGL_DPLL0_CFGCR1 0x164288 #define _TGL_DPLL1_CFGCR1 0x164290 #define _TGL_TBTPLL_CFGCR1 0x1642A0 -#define TGL_DPLL_CFGCR1(pll) _MMIO_PLL3(pll, _TGL_DPLL0_CFGCR1, \ - _TGL_DPLL1_CFGCR1, \ - _TGL_TBTPLL_CFGCR1) +#define TGL_DPLL_CFGCR1(pll) _MMIO(_PICK_EVEN_2RANGES(pll, 2, \ + _TGL_DPLL0_CFGCR1, _TGL_DPLL1_CFGCR1, \ + _TGL_TBTPLL_CFGCR1, _TGL_TBTPLL_CFGCR1)) #define RKL_DPLL_CFGCR1(pll) _MMIO_PLL(pll, _TGL_DPLL0_CFGCR1, \ _TGL_DPLL1_CFGCR1) #define _DG1_DPLL2_CFGCR0 0x16C284 #define _DG1_DPLL3_CFGCR0 0x16C28C -#define DG1_DPLL_CFGCR0(pll) _MMIO_PLL3(pll, _TGL_DPLL0_CFGCR0, \ - _TGL_DPLL1_CFGCR0, \ - _DG1_DPLL2_CFGCR0, \ - _DG1_DPLL3_CFGCR0) +#define DG1_DPLL_CFGCR0(pll) _MMIO(_PICK_EVEN_2RANGES(pll, 2, \ + _TGL_DPLL0_CFGCR0, _TGL_DPLL1_CFGCR0, \ + _DG1_DPLL2_CFGCR0, _DG1_DPLL3_CFGCR0)) #define _DG1_DPLL2_CFGCR1 0x16C288 #define _DG1_DPLL3_CFGCR1 0x16C290 -#define DG1_DPLL_CFGCR1(pll) _MMIO_PLL3(pll, _TGL_DPLL0_CFGCR1, \ - _TGL_DPLL1_CFGCR1, \ - _DG1_DPLL2_CFGCR1, \ - _DG1_DPLL3_CFGCR1) +#define DG1_DPLL_CFGCR1(pll) _MMIO(_PICK_EVEN_2RANGES(pll, 2, \ + _TGL_DPLL0_CFGCR1, _TGL_DPLL1_CFGCR1, \ + _DG1_DPLL2_CFGCR1, _DG1_DPLL3_CFGCR1)) /* For ADL-S DPLL4_CFGCR0/1 are used to control DPLL2 */ -#define _ADLS_DPLL3_CFGCR0 0x1642C0 #define _ADLS_DPLL4_CFGCR0 0x164294 -#define ADLS_DPLL_CFGCR0(pll) _MMIO_PLL3(pll, _TGL_DPLL0_CFGCR0, \ - _TGL_DPLL1_CFGCR0, \ - _ADLS_DPLL4_CFGCR0, \ - _ADLS_DPLL3_CFGCR0) +#define _ADLS_DPLL3_CFGCR0 0x1642C0 +#define ADLS_DPLL_CFGCR0(pll) _MMIO(_PICK_EVEN_2RANGES(pll, 2, \ + _TGL_DPLL0_CFGCR0, _TGL_DPLL1_CFGCR0, \ + _ADLS_DPLL4_CFGCR0, _ADLS_DPLL3_CFGCR0)) -#define _ADLS_DPLL3_CFGCR1 0x1642C4 #define _ADLS_DPLL4_CFGCR1 0x164298 -#define ADLS_DPLL_CFGCR1(pll) _MMIO_PLL3(pll, _TGL_DPLL0_CFGCR1, \ - _TGL_DPLL1_CFGCR1, \ - _ADLS_DPLL4_CFGCR1, \ - _ADLS_DPLL3_CFGCR1) +#define _ADLS_DPLL3_CFGCR1 0x1642C4 +#define ADLS_DPLL_CFGCR1(pll) _MMIO(_PICK_EVEN_2RANGES(pll, 2, \ + _TGL_DPLL0_CFGCR1, _TGL_DPLL1_CFGCR1, \ + _ADLS_DPLL4_CFGCR1, _ADLS_DPLL3_CFGCR1)) /* BXT display engine PLL */ #define BXT_DE_PLL_CTL _MMIO(0x6d000) @@ -7693,44 +7554,6 @@ enum skl_power_gate { #define PIPE_FRMTMSTMP(pipe) \ _MMIO_PIPE2(pipe, _PIPE_FRMTMSTMP_A) -/* Display Stream Splitter Control */ -#define DSS_CTL1 _MMIO(0x67400) -#define SPLITTER_ENABLE (1 << 31) -#define JOINER_ENABLE (1 << 30) -#define DUAL_LINK_MODE_INTERLEAVE (1 << 24) -#define DUAL_LINK_MODE_FRONTBACK (0 << 24) -#define OVERLAP_PIXELS_MASK (0xf << 16) -#define OVERLAP_PIXELS(pixels) ((pixels) << 16) -#define LEFT_DL_BUF_TARGET_DEPTH_MASK (0xfff << 0) -#define LEFT_DL_BUF_TARGET_DEPTH(pixels) ((pixels) << 0) -#define MAX_DL_BUFFER_TARGET_DEPTH 0x5a0 - -#define DSS_CTL2 _MMIO(0x67404) -#define LEFT_BRANCH_VDSC_ENABLE (1 << 31) -#define RIGHT_BRANCH_VDSC_ENABLE (1 << 15) -#define RIGHT_DL_BUF_TARGET_DEPTH_MASK (0xfff << 0) -#define RIGHT_DL_BUF_TARGET_DEPTH(pixels) ((pixels) << 0) - -#define _ICL_PIPE_DSS_CTL1_PB 0x78200 -#define _ICL_PIPE_DSS_CTL1_PC 0x78400 -#define ICL_PIPE_DSS_CTL1(pipe) _MMIO_PIPE((pipe) - PIPE_B, \ - _ICL_PIPE_DSS_CTL1_PB, \ - _ICL_PIPE_DSS_CTL1_PC) -#define BIG_JOINER_ENABLE (1 << 29) -#define MASTER_BIG_JOINER_ENABLE (1 << 28) -#define VGA_CENTERING_ENABLE (1 << 27) -#define SPLITTER_CONFIGURATION_MASK REG_GENMASK(26, 25) -#define SPLITTER_CONFIGURATION_2_SEGMENT REG_FIELD_PREP(SPLITTER_CONFIGURATION_MASK, 0) -#define SPLITTER_CONFIGURATION_4_SEGMENT REG_FIELD_PREP(SPLITTER_CONFIGURATION_MASK, 1) -#define UNCOMPRESSED_JOINER_MASTER (1 << 21) -#define UNCOMPRESSED_JOINER_SLAVE (1 << 20) - -#define _ICL_PIPE_DSS_CTL2_PB 0x78204 -#define _ICL_PIPE_DSS_CTL2_PC 0x78404 -#define ICL_PIPE_DSS_CTL2(pipe) _MMIO_PIPE((pipe) - PIPE_B, \ - _ICL_PIPE_DSS_CTL2_PB, \ - _ICL_PIPE_DSS_CTL2_PC) - #define GGC _MMIO(0x108040) #define GMS_MASK REG_GENMASK(15, 8) #define GGMS_MASK REG_GENMASK(7, 6) @@ -7754,314 +7577,6 @@ enum skl_power_gate { #define ICL_PHY_MISC_DE_IO_COMP_PWR_DOWN (1 << 23) #define DG2_PHY_DP_TX_ACK_MASK REG_GENMASK(23, 20) -/* Icelake Display Stream Compression Registers */ -#define DSCA_PICTURE_PARAMETER_SET_0 _MMIO(0x6B200) -#define DSCC_PICTURE_PARAMETER_SET_0 _MMIO(0x6BA00) -#define _ICL_DSC0_PICTURE_PARAMETER_SET_0_PB 0x78270 -#define _ICL_DSC1_PICTURE_PARAMETER_SET_0_PB 0x78370 -#define _ICL_DSC0_PICTURE_PARAMETER_SET_0_PC 0x78470 -#define _ICL_DSC1_PICTURE_PARAMETER_SET_0_PC 0x78570 -#define ICL_DSC0_PICTURE_PARAMETER_SET_0(pipe) _MMIO_PIPE((pipe) - PIPE_B, \ - _ICL_DSC0_PICTURE_PARAMETER_SET_0_PB, \ - _ICL_DSC0_PICTURE_PARAMETER_SET_0_PC) -#define ICL_DSC1_PICTURE_PARAMETER_SET_0(pipe) _MMIO_PIPE((pipe) - PIPE_B, \ - _ICL_DSC1_PICTURE_PARAMETER_SET_0_PB, \ - _ICL_DSC1_PICTURE_PARAMETER_SET_0_PC) -#define DSC_ALT_ICH_SEL (1 << 20) -#define DSC_VBR_ENABLE (1 << 19) -#define DSC_422_ENABLE (1 << 18) -#define DSC_COLOR_SPACE_CONVERSION (1 << 17) -#define DSC_BLOCK_PREDICTION (1 << 16) -#define DSC_LINE_BUF_DEPTH_SHIFT 12 -#define DSC_BPC_SHIFT 8 -#define DSC_VER_MIN_SHIFT 4 -#define DSC_VER_MAJ (0x1 << 0) - -#define DSCA_PICTURE_PARAMETER_SET_1 _MMIO(0x6B204) -#define DSCC_PICTURE_PARAMETER_SET_1 _MMIO(0x6BA04) -#define _ICL_DSC0_PICTURE_PARAMETER_SET_1_PB 0x78274 -#define _ICL_DSC1_PICTURE_PARAMETER_SET_1_PB 0x78374 -#define _ICL_DSC0_PICTURE_PARAMETER_SET_1_PC 0x78474 -#define _ICL_DSC1_PICTURE_PARAMETER_SET_1_PC 0x78574 -#define ICL_DSC0_PICTURE_PARAMETER_SET_1(pipe) _MMIO_PIPE((pipe) - PIPE_B, \ - _ICL_DSC0_PICTURE_PARAMETER_SET_1_PB, \ - _ICL_DSC0_PICTURE_PARAMETER_SET_1_PC) -#define ICL_DSC1_PICTURE_PARAMETER_SET_1(pipe) _MMIO_PIPE((pipe) - PIPE_B, \ - _ICL_DSC1_PICTURE_PARAMETER_SET_1_PB, \ - _ICL_DSC1_PICTURE_PARAMETER_SET_1_PC) -#define DSC_BPP(bpp) ((bpp) << 0) - -#define DSCA_PICTURE_PARAMETER_SET_2 _MMIO(0x6B208) -#define DSCC_PICTURE_PARAMETER_SET_2 _MMIO(0x6BA08) -#define _ICL_DSC0_PICTURE_PARAMETER_SET_2_PB 0x78278 -#define _ICL_DSC1_PICTURE_PARAMETER_SET_2_PB 0x78378 -#define _ICL_DSC0_PICTURE_PARAMETER_SET_2_PC 0x78478 -#define _ICL_DSC1_PICTURE_PARAMETER_SET_2_PC 0x78578 -#define ICL_DSC0_PICTURE_PARAMETER_SET_2(pipe) _MMIO_PIPE((pipe) - PIPE_B, \ - _ICL_DSC0_PICTURE_PARAMETER_SET_2_PB, \ - _ICL_DSC0_PICTURE_PARAMETER_SET_2_PC) -#define ICL_DSC1_PICTURE_PARAMETER_SET_2(pipe) _MMIO_PIPE((pipe) - PIPE_B, \ - _ICL_DSC1_PICTURE_PARAMETER_SET_2_PB, \ - _ICL_DSC1_PICTURE_PARAMETER_SET_2_PC) -#define DSC_PIC_WIDTH(pic_width) ((pic_width) << 16) -#define DSC_PIC_HEIGHT(pic_height) ((pic_height) << 0) - -#define DSCA_PICTURE_PARAMETER_SET_3 _MMIO(0x6B20C) -#define DSCC_PICTURE_PARAMETER_SET_3 _MMIO(0x6BA0C) -#define _ICL_DSC0_PICTURE_PARAMETER_SET_3_PB 0x7827C -#define _ICL_DSC1_PICTURE_PARAMETER_SET_3_PB 0x7837C -#define _ICL_DSC0_PICTURE_PARAMETER_SET_3_PC 0x7847C -#define _ICL_DSC1_PICTURE_PARAMETER_SET_3_PC 0x7857C -#define ICL_DSC0_PICTURE_PARAMETER_SET_3(pipe) _MMIO_PIPE((pipe) - PIPE_B, \ - _ICL_DSC0_PICTURE_PARAMETER_SET_3_PB, \ - _ICL_DSC0_PICTURE_PARAMETER_SET_3_PC) -#define ICL_DSC1_PICTURE_PARAMETER_SET_3(pipe) _MMIO_PIPE((pipe) - PIPE_B, \ - _ICL_DSC1_PICTURE_PARAMETER_SET_3_PB, \ - _ICL_DSC1_PICTURE_PARAMETER_SET_3_PC) -#define DSC_SLICE_WIDTH(slice_width) ((slice_width) << 16) -#define DSC_SLICE_HEIGHT(slice_height) ((slice_height) << 0) - -#define DSCA_PICTURE_PARAMETER_SET_4 _MMIO(0x6B210) -#define DSCC_PICTURE_PARAMETER_SET_4 _MMIO(0x6BA10) -#define _ICL_DSC0_PICTURE_PARAMETER_SET_4_PB 0x78280 -#define _ICL_DSC1_PICTURE_PARAMETER_SET_4_PB 0x78380 -#define _ICL_DSC0_PICTURE_PARAMETER_SET_4_PC 0x78480 -#define _ICL_DSC1_PICTURE_PARAMETER_SET_4_PC 0x78580 -#define ICL_DSC0_PICTURE_PARAMETER_SET_4(pipe) _MMIO_PIPE((pipe) - PIPE_B, \ - _ICL_DSC0_PICTURE_PARAMETER_SET_4_PB, \ - _ICL_DSC0_PICTURE_PARAMETER_SET_4_PC) -#define ICL_DSC1_PICTURE_PARAMETER_SET_4(pipe) _MMIO_PIPE((pipe) - PIPE_B, \ - _ICL_DSC1_PICTURE_PARAMETER_SET_4_PB, \ - _ICL_DSC1_PICTURE_PARAMETER_SET_4_PC) -#define DSC_INITIAL_DEC_DELAY(dec_delay) ((dec_delay) << 16) -#define DSC_INITIAL_XMIT_DELAY(xmit_delay) ((xmit_delay) << 0) - -#define DSCA_PICTURE_PARAMETER_SET_5 _MMIO(0x6B214) -#define DSCC_PICTURE_PARAMETER_SET_5 _MMIO(0x6BA14) -#define _ICL_DSC0_PICTURE_PARAMETER_SET_5_PB 0x78284 -#define _ICL_DSC1_PICTURE_PARAMETER_SET_5_PB 0x78384 -#define _ICL_DSC0_PICTURE_PARAMETER_SET_5_PC 0x78484 -#define _ICL_DSC1_PICTURE_PARAMETER_SET_5_PC 0x78584 -#define ICL_DSC0_PICTURE_PARAMETER_SET_5(pipe) _MMIO_PIPE((pipe) - PIPE_B, \ - _ICL_DSC0_PICTURE_PARAMETER_SET_5_PB, \ - _ICL_DSC0_PICTURE_PARAMETER_SET_5_PC) -#define ICL_DSC1_PICTURE_PARAMETER_SET_5(pipe) _MMIO_PIPE((pipe) - PIPE_B, \ - _ICL_DSC1_PICTURE_PARAMETER_SET_5_PB, \ - _ICL_DSC1_PICTURE_PARAMETER_SET_5_PC) -#define DSC_SCALE_DEC_INT(scale_dec) ((scale_dec) << 16) -#define DSC_SCALE_INC_INT(scale_inc) ((scale_inc) << 0) - -#define DSCA_PICTURE_PARAMETER_SET_6 _MMIO(0x6B218) -#define DSCC_PICTURE_PARAMETER_SET_6 _MMIO(0x6BA18) -#define _ICL_DSC0_PICTURE_PARAMETER_SET_6_PB 0x78288 -#define _ICL_DSC1_PICTURE_PARAMETER_SET_6_PB 0x78388 -#define _ICL_DSC0_PICTURE_PARAMETER_SET_6_PC 0x78488 -#define _ICL_DSC1_PICTURE_PARAMETER_SET_6_PC 0x78588 -#define ICL_DSC0_PICTURE_PARAMETER_SET_6(pipe) _MMIO_PIPE((pipe) - PIPE_B, \ - _ICL_DSC0_PICTURE_PARAMETER_SET_6_PB, \ - _ICL_DSC0_PICTURE_PARAMETER_SET_6_PC) -#define ICL_DSC1_PICTURE_PARAMETER_SET_6(pipe) _MMIO_PIPE((pipe) - PIPE_B, \ - _ICL_DSC1_PICTURE_PARAMETER_SET_6_PB, \ - _ICL_DSC1_PICTURE_PARAMETER_SET_6_PC) -#define DSC_FLATNESS_MAX_QP(max_qp) ((max_qp) << 24) -#define DSC_FLATNESS_MIN_QP(min_qp) ((min_qp) << 16) -#define DSC_FIRST_LINE_BPG_OFFSET(offset) ((offset) << 8) -#define DSC_INITIAL_SCALE_VALUE(value) ((value) << 0) - -#define DSCA_PICTURE_PARAMETER_SET_7 _MMIO(0x6B21C) -#define DSCC_PICTURE_PARAMETER_SET_7 _MMIO(0x6BA1C) -#define _ICL_DSC0_PICTURE_PARAMETER_SET_7_PB 0x7828C -#define _ICL_DSC1_PICTURE_PARAMETER_SET_7_PB 0x7838C -#define _ICL_DSC0_PICTURE_PARAMETER_SET_7_PC 0x7848C -#define _ICL_DSC1_PICTURE_PARAMETER_SET_7_PC 0x7858C -#define ICL_DSC0_PICTURE_PARAMETER_SET_7(pipe) _MMIO_PIPE((pipe) - PIPE_B, \ - _ICL_DSC0_PICTURE_PARAMETER_SET_7_PB, \ - _ICL_DSC0_PICTURE_PARAMETER_SET_7_PC) -#define ICL_DSC1_PICTURE_PARAMETER_SET_7(pipe) _MMIO_PIPE((pipe) - PIPE_B, \ - _ICL_DSC1_PICTURE_PARAMETER_SET_7_PB, \ - _ICL_DSC1_PICTURE_PARAMETER_SET_7_PC) -#define DSC_NFL_BPG_OFFSET(bpg_offset) ((bpg_offset) << 16) -#define DSC_SLICE_BPG_OFFSET(bpg_offset) ((bpg_offset) << 0) - -#define DSCA_PICTURE_PARAMETER_SET_8 _MMIO(0x6B220) -#define DSCC_PICTURE_PARAMETER_SET_8 _MMIO(0x6BA20) -#define _ICL_DSC0_PICTURE_PARAMETER_SET_8_PB 0x78290 -#define _ICL_DSC1_PICTURE_PARAMETER_SET_8_PB 0x78390 -#define _ICL_DSC0_PICTURE_PARAMETER_SET_8_PC 0x78490 -#define _ICL_DSC1_PICTURE_PARAMETER_SET_8_PC 0x78590 -#define ICL_DSC0_PICTURE_PARAMETER_SET_8(pipe) _MMIO_PIPE((pipe) - PIPE_B, \ - _ICL_DSC0_PICTURE_PARAMETER_SET_8_PB, \ - _ICL_DSC0_PICTURE_PARAMETER_SET_8_PC) -#define ICL_DSC1_PICTURE_PARAMETER_SET_8(pipe) _MMIO_PIPE((pipe) - PIPE_B, \ - _ICL_DSC1_PICTURE_PARAMETER_SET_8_PB, \ - _ICL_DSC1_PICTURE_PARAMETER_SET_8_PC) -#define DSC_INITIAL_OFFSET(initial_offset) ((initial_offset) << 16) -#define DSC_FINAL_OFFSET(final_offset) ((final_offset) << 0) - -#define DSCA_PICTURE_PARAMETER_SET_9 _MMIO(0x6B224) -#define DSCC_PICTURE_PARAMETER_SET_9 _MMIO(0x6BA24) -#define _ICL_DSC0_PICTURE_PARAMETER_SET_9_PB 0x78294 -#define _ICL_DSC1_PICTURE_PARAMETER_SET_9_PB 0x78394 -#define _ICL_DSC0_PICTURE_PARAMETER_SET_9_PC 0x78494 -#define _ICL_DSC1_PICTURE_PARAMETER_SET_9_PC 0x78594 -#define ICL_DSC0_PICTURE_PARAMETER_SET_9(pipe) _MMIO_PIPE((pipe) - PIPE_B, \ - _ICL_DSC0_PICTURE_PARAMETER_SET_9_PB, \ - _ICL_DSC0_PICTURE_PARAMETER_SET_9_PC) -#define ICL_DSC1_PICTURE_PARAMETER_SET_9(pipe) _MMIO_PIPE((pipe) - PIPE_B, \ - _ICL_DSC1_PICTURE_PARAMETER_SET_9_PB, \ - _ICL_DSC1_PICTURE_PARAMETER_SET_9_PC) -#define DSC_RC_EDGE_FACTOR(rc_edge_fact) ((rc_edge_fact) << 16) -#define DSC_RC_MODEL_SIZE(rc_model_size) ((rc_model_size) << 0) - -#define DSCA_PICTURE_PARAMETER_SET_10 _MMIO(0x6B228) -#define DSCC_PICTURE_PARAMETER_SET_10 _MMIO(0x6BA28) -#define _ICL_DSC0_PICTURE_PARAMETER_SET_10_PB 0x78298 -#define _ICL_DSC1_PICTURE_PARAMETER_SET_10_PB 0x78398 -#define _ICL_DSC0_PICTURE_PARAMETER_SET_10_PC 0x78498 -#define _ICL_DSC1_PICTURE_PARAMETER_SET_10_PC 0x78598 -#define ICL_DSC0_PICTURE_PARAMETER_SET_10(pipe) _MMIO_PIPE((pipe) - PIPE_B, \ - _ICL_DSC0_PICTURE_PARAMETER_SET_10_PB, \ - _ICL_DSC0_PICTURE_PARAMETER_SET_10_PC) -#define ICL_DSC1_PICTURE_PARAMETER_SET_10(pipe) _MMIO_PIPE((pipe) - PIPE_B, \ - _ICL_DSC1_PICTURE_PARAMETER_SET_10_PB, \ - _ICL_DSC1_PICTURE_PARAMETER_SET_10_PC) -#define DSC_RC_TARGET_OFF_LOW(rc_tgt_off_low) ((rc_tgt_off_low) << 20) -#define DSC_RC_TARGET_OFF_HIGH(rc_tgt_off_high) ((rc_tgt_off_high) << 16) -#define DSC_RC_QUANT_INC_LIMIT1(lim) ((lim) << 8) -#define DSC_RC_QUANT_INC_LIMIT0(lim) ((lim) << 0) - -#define DSCA_PICTURE_PARAMETER_SET_11 _MMIO(0x6B22C) -#define DSCC_PICTURE_PARAMETER_SET_11 _MMIO(0x6BA2C) -#define _ICL_DSC0_PICTURE_PARAMETER_SET_11_PB 0x7829C -#define _ICL_DSC1_PICTURE_PARAMETER_SET_11_PB 0x7839C -#define _ICL_DSC0_PICTURE_PARAMETER_SET_11_PC 0x7849C -#define _ICL_DSC1_PICTURE_PARAMETER_SET_11_PC 0x7859C -#define ICL_DSC0_PICTURE_PARAMETER_SET_11(pipe) _MMIO_PIPE((pipe) - PIPE_B, \ - _ICL_DSC0_PICTURE_PARAMETER_SET_11_PB, \ - _ICL_DSC0_PICTURE_PARAMETER_SET_11_PC) -#define ICL_DSC1_PICTURE_PARAMETER_SET_11(pipe) _MMIO_PIPE((pipe) - PIPE_B, \ - _ICL_DSC1_PICTURE_PARAMETER_SET_11_PB, \ - _ICL_DSC1_PICTURE_PARAMETER_SET_11_PC) - -#define DSCA_PICTURE_PARAMETER_SET_12 _MMIO(0x6B260) -#define DSCC_PICTURE_PARAMETER_SET_12 _MMIO(0x6BA60) -#define _ICL_DSC0_PICTURE_PARAMETER_SET_12_PB 0x782A0 -#define _ICL_DSC1_PICTURE_PARAMETER_SET_12_PB 0x783A0 -#define _ICL_DSC0_PICTURE_PARAMETER_SET_12_PC 0x784A0 -#define _ICL_DSC1_PICTURE_PARAMETER_SET_12_PC 0x785A0 -#define ICL_DSC0_PICTURE_PARAMETER_SET_12(pipe) _MMIO_PIPE((pipe) - PIPE_B, \ - _ICL_DSC0_PICTURE_PARAMETER_SET_12_PB, \ - _ICL_DSC0_PICTURE_PARAMETER_SET_12_PC) -#define ICL_DSC1_PICTURE_PARAMETER_SET_12(pipe) _MMIO_PIPE((pipe) - PIPE_B, \ - _ICL_DSC1_PICTURE_PARAMETER_SET_12_PB, \ - _ICL_DSC1_PICTURE_PARAMETER_SET_12_PC) - -#define DSCA_PICTURE_PARAMETER_SET_13 _MMIO(0x6B264) -#define DSCC_PICTURE_PARAMETER_SET_13 _MMIO(0x6BA64) -#define _ICL_DSC0_PICTURE_PARAMETER_SET_13_PB 0x782A4 -#define _ICL_DSC1_PICTURE_PARAMETER_SET_13_PB 0x783A4 -#define _ICL_DSC0_PICTURE_PARAMETER_SET_13_PC 0x784A4 -#define _ICL_DSC1_PICTURE_PARAMETER_SET_13_PC 0x785A4 -#define ICL_DSC0_PICTURE_PARAMETER_SET_13(pipe) _MMIO_PIPE((pipe) - PIPE_B, \ - _ICL_DSC0_PICTURE_PARAMETER_SET_13_PB, \ - _ICL_DSC0_PICTURE_PARAMETER_SET_13_PC) -#define ICL_DSC1_PICTURE_PARAMETER_SET_13(pipe) _MMIO_PIPE((pipe) - PIPE_B, \ - _ICL_DSC1_PICTURE_PARAMETER_SET_13_PB, \ - _ICL_DSC1_PICTURE_PARAMETER_SET_13_PC) - -#define DSCA_PICTURE_PARAMETER_SET_14 _MMIO(0x6B268) -#define DSCC_PICTURE_PARAMETER_SET_14 _MMIO(0x6BA68) -#define _ICL_DSC0_PICTURE_PARAMETER_SET_14_PB 0x782A8 -#define _ICL_DSC1_PICTURE_PARAMETER_SET_14_PB 0x783A8 -#define _ICL_DSC0_PICTURE_PARAMETER_SET_14_PC 0x784A8 -#define _ICL_DSC1_PICTURE_PARAMETER_SET_14_PC 0x785A8 -#define ICL_DSC0_PICTURE_PARAMETER_SET_14(pipe) _MMIO_PIPE((pipe) - PIPE_B, \ - _ICL_DSC0_PICTURE_PARAMETER_SET_14_PB, \ - _ICL_DSC0_PICTURE_PARAMETER_SET_14_PC) -#define ICL_DSC1_PICTURE_PARAMETER_SET_14(pipe) _MMIO_PIPE((pipe) - PIPE_B, \ - _ICL_DSC1_PICTURE_PARAMETER_SET_14_PB, \ - _ICL_DSC1_PICTURE_PARAMETER_SET_14_PC) - -#define DSCA_PICTURE_PARAMETER_SET_15 _MMIO(0x6B26C) -#define DSCC_PICTURE_PARAMETER_SET_15 _MMIO(0x6BA6C) -#define _ICL_DSC0_PICTURE_PARAMETER_SET_15_PB 0x782AC -#define _ICL_DSC1_PICTURE_PARAMETER_SET_15_PB 0x783AC -#define _ICL_DSC0_PICTURE_PARAMETER_SET_15_PC 0x784AC -#define _ICL_DSC1_PICTURE_PARAMETER_SET_15_PC 0x785AC -#define ICL_DSC0_PICTURE_PARAMETER_SET_15(pipe) _MMIO_PIPE((pipe) - PIPE_B, \ - _ICL_DSC0_PICTURE_PARAMETER_SET_15_PB, \ - _ICL_DSC0_PICTURE_PARAMETER_SET_15_PC) -#define ICL_DSC1_PICTURE_PARAMETER_SET_15(pipe) _MMIO_PIPE((pipe) - PIPE_B, \ - _ICL_DSC1_PICTURE_PARAMETER_SET_15_PB, \ - _ICL_DSC1_PICTURE_PARAMETER_SET_15_PC) - -#define DSCA_PICTURE_PARAMETER_SET_16 _MMIO(0x6B270) -#define DSCC_PICTURE_PARAMETER_SET_16 _MMIO(0x6BA70) -#define _ICL_DSC0_PICTURE_PARAMETER_SET_16_PB 0x782B0 -#define _ICL_DSC1_PICTURE_PARAMETER_SET_16_PB 0x783B0 -#define _ICL_DSC0_PICTURE_PARAMETER_SET_16_PC 0x784B0 -#define _ICL_DSC1_PICTURE_PARAMETER_SET_16_PC 0x785B0 -#define ICL_DSC0_PICTURE_PARAMETER_SET_16(pipe) _MMIO_PIPE((pipe) - PIPE_B, \ - _ICL_DSC0_PICTURE_PARAMETER_SET_16_PB, \ - _ICL_DSC0_PICTURE_PARAMETER_SET_16_PC) -#define ICL_DSC1_PICTURE_PARAMETER_SET_16(pipe) _MMIO_PIPE((pipe) - PIPE_B, \ - _ICL_DSC1_PICTURE_PARAMETER_SET_16_PB, \ - _ICL_DSC1_PICTURE_PARAMETER_SET_16_PC) -#define DSC_SLICE_ROW_PER_FRAME(slice_row_per_frame) ((slice_row_per_frame) << 20) -#define DSC_SLICE_PER_LINE(slice_per_line) ((slice_per_line) << 16) -#define DSC_SLICE_CHUNK_SIZE(slice_chunk_size) ((slice_chunk_size) << 0) - -/* Icelake Rate Control Buffer Threshold Registers */ -#define DSCA_RC_BUF_THRESH_0 _MMIO(0x6B230) -#define DSCA_RC_BUF_THRESH_0_UDW _MMIO(0x6B230 + 4) -#define DSCC_RC_BUF_THRESH_0 _MMIO(0x6BA30) -#define DSCC_RC_BUF_THRESH_0_UDW _MMIO(0x6BA30 + 4) -#define _ICL_DSC0_RC_BUF_THRESH_0_PB (0x78254) -#define _ICL_DSC0_RC_BUF_THRESH_0_UDW_PB (0x78254 + 4) -#define _ICL_DSC1_RC_BUF_THRESH_0_PB (0x78354) -#define _ICL_DSC1_RC_BUF_THRESH_0_UDW_PB (0x78354 + 4) -#define _ICL_DSC0_RC_BUF_THRESH_0_PC (0x78454) -#define _ICL_DSC0_RC_BUF_THRESH_0_UDW_PC (0x78454 + 4) -#define _ICL_DSC1_RC_BUF_THRESH_0_PC (0x78554) -#define _ICL_DSC1_RC_BUF_THRESH_0_UDW_PC (0x78554 + 4) -#define ICL_DSC0_RC_BUF_THRESH_0(pipe) _MMIO_PIPE((pipe) - PIPE_B, \ - _ICL_DSC0_RC_BUF_THRESH_0_PB, \ - _ICL_DSC0_RC_BUF_THRESH_0_PC) -#define ICL_DSC0_RC_BUF_THRESH_0_UDW(pipe) _MMIO_PIPE((pipe) - PIPE_B, \ - _ICL_DSC0_RC_BUF_THRESH_0_UDW_PB, \ - _ICL_DSC0_RC_BUF_THRESH_0_UDW_PC) -#define ICL_DSC1_RC_BUF_THRESH_0(pipe) _MMIO_PIPE((pipe) - PIPE_B, \ - _ICL_DSC1_RC_BUF_THRESH_0_PB, \ - _ICL_DSC1_RC_BUF_THRESH_0_PC) -#define ICL_DSC1_RC_BUF_THRESH_0_UDW(pipe) _MMIO_PIPE((pipe) - PIPE_B, \ - _ICL_DSC1_RC_BUF_THRESH_0_UDW_PB, \ - _ICL_DSC1_RC_BUF_THRESH_0_UDW_PC) - -#define DSCA_RC_BUF_THRESH_1 _MMIO(0x6B238) -#define DSCA_RC_BUF_THRESH_1_UDW _MMIO(0x6B238 + 4) -#define DSCC_RC_BUF_THRESH_1 _MMIO(0x6BA38) -#define DSCC_RC_BUF_THRESH_1_UDW _MMIO(0x6BA38 + 4) -#define _ICL_DSC0_RC_BUF_THRESH_1_PB (0x7825C) -#define _ICL_DSC0_RC_BUF_THRESH_1_UDW_PB (0x7825C + 4) -#define _ICL_DSC1_RC_BUF_THRESH_1_PB (0x7835C) -#define _ICL_DSC1_RC_BUF_THRESH_1_UDW_PB (0x7835C + 4) -#define _ICL_DSC0_RC_BUF_THRESH_1_PC (0x7845C) -#define _ICL_DSC0_RC_BUF_THRESH_1_UDW_PC (0x7845C + 4) -#define _ICL_DSC1_RC_BUF_THRESH_1_PC (0x7855C) -#define _ICL_DSC1_RC_BUF_THRESH_1_UDW_PC (0x7855C + 4) -#define ICL_DSC0_RC_BUF_THRESH_1(pipe) _MMIO_PIPE((pipe) - PIPE_B, \ - _ICL_DSC0_RC_BUF_THRESH_1_PB, \ - _ICL_DSC0_RC_BUF_THRESH_1_PC) -#define ICL_DSC0_RC_BUF_THRESH_1_UDW(pipe) _MMIO_PIPE((pipe) - PIPE_B, \ - _ICL_DSC0_RC_BUF_THRESH_1_UDW_PB, \ - _ICL_DSC0_RC_BUF_THRESH_1_UDW_PC) -#define ICL_DSC1_RC_BUF_THRESH_1(pipe) _MMIO_PIPE((pipe) - PIPE_B, \ - _ICL_DSC1_RC_BUF_THRESH_1_PB, \ - _ICL_DSC1_RC_BUF_THRESH_1_PC) -#define ICL_DSC1_RC_BUF_THRESH_1_UDW(pipe) _MMIO_PIPE((pipe) - PIPE_B, \ - _ICL_DSC1_RC_BUF_THRESH_1_UDW_PB, \ - _ICL_DSC1_RC_BUF_THRESH_1_UDW_PC) - #define PORT_TX_DFLEXDPSP(fia) _MMIO_FIA((fia), 0x008A0) #define MODULAR_FIA_MASK (1 << 4) #define TC_LIVE_STATE_TBT(idx) (1 << ((idx) * 8 + 6)) @@ -8105,8 +7620,54 @@ enum skl_power_gate { #define DSB_HEAD(pipe, id) _MMIO(DSBSL_INSTANCE(pipe, id) + 0x0) #define DSB_TAIL(pipe, id) _MMIO(DSBSL_INSTANCE(pipe, id) + 0x4) #define DSB_CTRL(pipe, id) _MMIO(DSBSL_INSTANCE(pipe, id) + 0x8) -#define DSB_ENABLE (1 << 31) -#define DSB_STATUS_BUSY (1 << 0) +#define DSB_ENABLE REG_BIT(31) +#define DSB_BUF_REITERATE REG_BIT(29) +#define DSB_WAIT_FOR_VBLANK REG_BIT(28) +#define DSB_WAIT_FOR_LINE_IN REG_BIT(27) +#define DSB_HALT REG_BIT(16) +#define DSB_NON_POSTED REG_BIT(8) +#define DSB_STATUS_BUSY REG_BIT(0) +#define DSB_MMIOCTRL(pipe, id) _MMIO(DSBSL_INSTANCE(pipe, id) + 0xc) +#define DSB_MMIO_DEAD_CLOCKS_ENABLE REG_BIT(31) +#define DSB_MMIO_DEAD_CLOCKS_COUNT_MASK REG_GENMASK(15, 8) +#define DSB_MMIO_DEAD_CLOCKS_COUNT(x) REG_FIELD_PREP(DSB_MMIO_DEAD_CLOCK_COUNT_MASK, (x)) +#define DSB_MMIO_CYCLES_MASK REG_GENMASK(7, 0) +#define DSB_MMIO_CYCLES(x) REG_FIELD_PREP(DSB_MMIO_CYCLES_MASK, (x)) +#define DSB_POLLFUNC(pipe, id) _MMIO(DSBSL_INSTANCE(pipe, id) + 0x10) +#define DSB_POLL_ENABLE REG_BIT(31) +#define DSB_POLL_WAIT_MASK REG_GENMASK(30, 23) +#define DSB_POLL_WAIT(x) REG_FIELD_PREP(DSB_POLL_WAIT_MASK, (x)) /* usec */ +#define DSB_POLL_COUNT_MASK REG_GENMASK(22, 15) +#define DSB_POLL_COUNT(x) REG_FIELD_PREP(DSB_POLL_COUNT_MASK, (x)) +#define DSB_DEBUG(pipe, id) _MMIO(DSBSL_INSTANCE(pipe, id) + 0x14) +#define DSB_POLLMASK(pipe, id) _MMIO(DSBSL_INSTANCE(pipe, id) + 0x1c) +#define DSB_STATUS(pipe, id) _MMIO(DSBSL_INSTANCE(pipe, id) + 0x24) +#define DSB_INTERRUPT(pipe, id) _MMIO(DSBSL_INSTANCE(pipe, id) + 0x28) +#define DSB_ATS_FAULT_INT_EN REG_BIT(20) +#define DSB_GTT_FAULT_INT_EN REG_BIT(19) +#define DSB_RSPTIMEOUT_INT_EN REG_BIT(18) +#define DSB_POLL_ERR_INT_EN REG_BIT(17) +#define DSB_PROG_INT_EN REG_BIT(16) +#define DSB_ATS_FAULT_INT_STATUS REG_BIT(4) +#define DSB_GTT_FAULT_INT_STATUS REG_BIT(3) +#define DSB_RSPTIMEOUT_INT_STATUS REG_BIT(2) +#define DSB_POLL_ERR_INT_STATUS REG_BIT(1) +#define DSB_PROG_INT_STATUS REG_BIT(0) +#define DSB_CURRENT_HEAD(pipe, id) _MMIO(DSBSL_INSTANCE(pipe, id) + 0x2c) +#define DSB_RM_TIMEOUT(pipe, id) _MMIO(DSBSL_INSTANCE(pipe, id) + 0x30) +#define DSB_RM_CLAIM_TIMEOUT REG_BIT(31) +#define DSB_RM_READY_TIMEOUT REG_BIT(30) +#define DSB_RM_CLAIM_TIMEOUT_COUNT_MASK REG_GENMASK(23, 16) +#define DSB_RM_CLAIM_TIMEOUT_COUNT(x) REG_FIELD_PREP(DSB_RM_CLAIM_TIMEOUT_COUNT_MASK, (x)) /* clocks */ +#define DSB_RM_READY_TIMEOUT_VALUE_MASK REG_GENMASK(15, 0) +#define DSB_RM_READY_TIMEOUT_VALUE(x) REG_FIELD_PREP(DSB_RM_READY_TIMEOUT_VALUE, (x)) /* usec */ +#define DSB_RMTIMEOUTREG_CAPTURE(pipe, id) _MMIO(DSBSL_INSTANCE(pipe, id) + 0x34) +#define DSB_PMCTRL(pipe, id) _MMIO(DSBSL_INSTANCE(pipe, id) + 0x38) +#define DSB_PMCTRL_2(pipe, id) _MMIO(DSBSL_INSTANCE(pipe, id) + 0x3c) +#define DSB_PF_LN_LOWER(pipe, id) _MMIO(DSBSL_INSTANCE(pipe, id) + 0x40) +#define DSB_PF_LN_UPPER(pipe, id) _MMIO(DSBSL_INSTANCE(pipe, id) + 0x44) +#define DSB_BUFRPT_CNT(pipe, id) _MMIO(DSBSL_INSTANCE(pipe, id) + 0x48) +#define DSB_CHICKEN(pipe, id) _MMIO(DSBSL_INSTANCE(pipe, id) + 0xf0) #define CLKREQ_POLICY _MMIO(0x101038) #define CLKREQ_POLICY_MEM_UP_OVRD REG_BIT(1) diff --git a/drivers/gpu/drm/i915/i915_reg_defs.h b/drivers/gpu/drm/i915/i915_reg_defs.h index be43580a6979..db26de6b57bc 100644 --- a/drivers/gpu/drm/i915/i915_reg_defs.h +++ b/drivers/gpu/drm/i915/i915_reg_defs.h @@ -120,6 +120,35 @@ #define _PICK_EVEN(__index, __a, __b) ((__a) + (__index) * ((__b) - (__a))) /* + * Like _PICK_EVEN(), but supports 2 ranges of evenly spaced address offsets. + * @__c_index corresponds to the index in which the second range starts to be + * used. Using math interval notation, the first range is used for indexes [ 0, + * @__c_index), while the second range is used for [ @__c_index, ... ). Example: + * + * #define _FOO_A 0xf000 + * #define _FOO_B 0xf004 + * #define _FOO_C 0xf008 + * #define _SUPER_FOO_A 0xa000 + * #define _SUPER_FOO_B 0xa100 + * #define FOO(x) _MMIO(_PICK_EVEN_2RANGES(x, 3, \ + * _FOO_A, _FOO_B, \ + * _SUPER_FOO_A, _SUPER_FOO_B)) + * + * This expands to: + * 0: 0xf000, + * 1: 0xf004, + * 2: 0xf008, + * 3: 0xa000, + * 4: 0xa100, + * 5: 0xa200, + * ... + */ +#define _PICK_EVEN_2RANGES(__index, __c_index, __a, __b, __c, __d) \ + (BUILD_BUG_ON_ZERO(!__is_constexpr(__c_index)) + \ + ((__index) < (__c_index) ? _PICK_EVEN(__index, __a, __b) : \ + _PICK_EVEN((__index) - (__c_index), __c, __d))) + +/* * Given the arbitrary numbers in varargs, pick the 0-based __index'th number. * * Always prefer _PICK_EVEN() over this if the numbers are evenly spaced. @@ -136,6 +165,8 @@ typedef struct { u32 reg; } i915_mcr_reg_t; +#define MCR_REG(offset) ((const i915_mcr_reg_t){ .reg = (offset) }) + #define INVALID_MMIO_REG _MMIO(0) /* diff --git a/drivers/gpu/drm/i915/i915_request.c b/drivers/gpu/drm/i915/i915_request.c index 7503dcb9043b..630a732aaecc 100644 --- a/drivers/gpu/drm/i915/i915_request.c +++ b/drivers/gpu/drm/i915/i915_request.c @@ -48,7 +48,6 @@ #include "i915_driver.h" #include "i915_drv.h" #include "i915_trace.h" -#include "intel_pm.h" struct execute_cb { struct irq_work work; diff --git a/drivers/gpu/drm/i915/i915_sysfs.c b/drivers/gpu/drm/i915/i915_sysfs.c index 595e8b574990..e88bb4f04305 100644 --- a/drivers/gpu/drm/i915/i915_sysfs.c +++ b/drivers/gpu/drm/i915/i915_sysfs.c @@ -37,7 +37,6 @@ #include "i915_drv.h" #include "i915_sysfs.h" -#include "intel_pm.h" struct drm_i915_private *kdev_minor_to_i915(struct device *kdev) { diff --git a/drivers/gpu/drm/i915/intel_device_info.c b/drivers/gpu/drm/i915/intel_device_info.c index 98769e5f2c3d..fc5cd14adfcc 100644 --- a/drivers/gpu/drm/i915/intel_device_info.c +++ b/drivers/gpu/drm/i915/intel_device_info.c @@ -119,9 +119,14 @@ void intel_device_info_print(const struct intel_device_info *info, drm_printf(p, "display version: %u\n", runtime->display.ip.ver); + drm_printf(p, "graphics stepping: %s\n", intel_step_name(runtime->step.graphics_step)); + drm_printf(p, "media stepping: %s\n", intel_step_name(runtime->step.media_step)); + drm_printf(p, "display stepping: %s\n", intel_step_name(runtime->step.display_step)); + drm_printf(p, "base die stepping: %s\n", intel_step_name(runtime->step.basedie_step)); + drm_printf(p, "gt: %d\n", info->gt); - drm_printf(p, "memory-regions: %x\n", runtime->memory_regions); - drm_printf(p, "page-sizes: %x\n", runtime->page_sizes); + drm_printf(p, "memory-regions: 0x%x\n", runtime->memory_regions); + drm_printf(p, "page-sizes: 0x%x\n", runtime->page_sizes); drm_printf(p, "platform: %s\n", intel_platform_name(info->platform)); drm_printf(p, "ppgtt-size: %d\n", runtime->ppgtt_size); drm_printf(p, "ppgtt-type: %d\n", runtime->ppgtt_type); @@ -202,6 +207,10 @@ static const u16 subplatform_rpl_ids[] = { INTEL_RPLP_IDS(0), }; +static const u16 subplatform_rplu_ids[] = { + INTEL_RPLU_IDS(0), +}; + static const u16 subplatform_g10_ids[] = { INTEL_DG2_G10_IDS(0), INTEL_ATS_M150_IDS(0), @@ -269,6 +278,9 @@ static void intel_device_info_subplatform_init(struct drm_i915_private *i915) } else if (find_devid(devid, subplatform_rpl_ids, ARRAY_SIZE(subplatform_rpl_ids))) { mask = BIT(INTEL_SUBPLATFORM_RPL); + if (find_devid(devid, subplatform_rplu_ids, + ARRAY_SIZE(subplatform_rplu_ids))) + mask |= BIT(INTEL_SUBPLATFORM_RPLU); } else if (find_devid(devid, subplatform_g10_ids, ARRAY_SIZE(subplatform_g10_ids))) { mask = BIT(INTEL_SUBPLATFORM_G10); @@ -436,6 +448,14 @@ void intel_device_info_runtime_init(struct drm_i915_private *dev_priv) runtime->num_sprites[pipe] = 1; } + if (HAS_DISPLAY(dev_priv) && + (IS_DGFX(dev_priv) || DISPLAY_VER(dev_priv) >= 14) && + !(intel_de_read(dev_priv, GU_CNTL_PROTECTED) & DEPRESENT)) { + drm_info(&dev_priv->drm, "Display not present, disabling\n"); + + runtime->pipe_mask = 0; + } + if (HAS_DISPLAY(dev_priv) && IS_GRAPHICS_VER(dev_priv, 7, 8) && HAS_PCH_SPLIT(dev_priv)) { u32 fuse_strap = intel_de_read(dev_priv, FUSE_STRAP); @@ -457,8 +477,6 @@ void intel_device_info_runtime_init(struct drm_i915_private *dev_priv) drm_info(&dev_priv->drm, "Display fused off, disabling\n"); runtime->pipe_mask = 0; - runtime->cpu_transcoder_mask = 0; - runtime->fbc_mask = 0; } else if (fuse_strap & IVB_PIPE_C_DISABLE) { drm_info(&dev_priv->drm, "PipeC fused off\n"); runtime->pipe_mask &= ~BIT(PIPE_C); @@ -535,5 +553,5 @@ void intel_driver_caps_print(const struct intel_driver_caps *caps, { drm_printf(p, "Has logical contexts? %s\n", str_yes_no(caps->has_logical_contexts)); - drm_printf(p, "scheduler: %x\n", caps->scheduler); + drm_printf(p, "scheduler: 0x%x\n", caps->scheduler); } diff --git a/drivers/gpu/drm/i915/intel_device_info.h b/drivers/gpu/drm/i915/intel_device_info.h index 80bda653d61b..b30cc8b97c3a 100644 --- a/drivers/gpu/drm/i915/intel_device_info.h +++ b/drivers/gpu/drm/i915/intel_device_info.h @@ -127,6 +127,7 @@ enum intel_platform { * bit set */ #define INTEL_SUBPLATFORM_N 1 +#define INTEL_SUBPLATFORM_RPLU 2 /* MTL */ #define INTEL_SUBPLATFORM_M 0 diff --git a/drivers/gpu/drm/i915/intel_gvt_mmio_table.c b/drivers/gpu/drm/i915/intel_gvt_mmio_table.c index 1f4805aa2b08..2b3fe469b360 100644 --- a/drivers/gpu/drm/i915/intel_gvt_mmio_table.c +++ b/drivers/gpu/drm/i915/intel_gvt_mmio_table.c @@ -8,6 +8,7 @@ #include "display/intel_display_types.h" #include "display/intel_dmc_regs.h" #include "display/intel_dpio_phy.h" +#include "display/intel_lvds_regs.h" #include "display/vlv_dsi_pll_regs.h" #include "gt/intel_gt_regs.h" #include "gvt/gvt.h" @@ -117,10 +118,10 @@ static int iterate_generic_mmio(struct intel_gvt_mmio_table_iter *iter) MMIO_D(PIPEDSL(PIPE_B)); MMIO_D(PIPEDSL(PIPE_C)); MMIO_D(PIPEDSL(_PIPE_EDP)); - MMIO_D(PIPECONF(PIPE_A)); - MMIO_D(PIPECONF(PIPE_B)); - MMIO_D(PIPECONF(PIPE_C)); - MMIO_D(PIPECONF(_PIPE_EDP)); + MMIO_D(TRANSCONF(TRANSCODER_A)); + MMIO_D(TRANSCONF(TRANSCODER_B)); + MMIO_D(TRANSCONF(TRANSCODER_C)); + MMIO_D(TRANSCONF(TRANSCODER_EDP)); MMIO_D(PIPESTAT(PIPE_A)); MMIO_D(PIPESTAT(PIPE_B)); MMIO_D(PIPESTAT(PIPE_C)); @@ -218,41 +219,41 @@ static int iterate_generic_mmio(struct intel_gvt_mmio_table_iter *iter) MMIO_D(SPRSCALE(PIPE_C)); MMIO_D(SPRSURFLIVE(PIPE_C)); MMIO_D(REG_50080(PIPE_C, PLANE_SPRITE0)); - MMIO_D(HTOTAL(TRANSCODER_A)); - MMIO_D(HBLANK(TRANSCODER_A)); - MMIO_D(HSYNC(TRANSCODER_A)); - MMIO_D(VTOTAL(TRANSCODER_A)); - MMIO_D(VBLANK(TRANSCODER_A)); - MMIO_D(VSYNC(TRANSCODER_A)); + MMIO_D(TRANS_HTOTAL(TRANSCODER_A)); + MMIO_D(TRANS_HBLANK(TRANSCODER_A)); + MMIO_D(TRANS_HSYNC(TRANSCODER_A)); + MMIO_D(TRANS_VTOTAL(TRANSCODER_A)); + MMIO_D(TRANS_VBLANK(TRANSCODER_A)); + MMIO_D(TRANS_VSYNC(TRANSCODER_A)); MMIO_D(BCLRPAT(TRANSCODER_A)); - MMIO_D(VSYNCSHIFT(TRANSCODER_A)); + MMIO_D(TRANS_VSYNCSHIFT(TRANSCODER_A)); MMIO_D(PIPESRC(TRANSCODER_A)); - MMIO_D(HTOTAL(TRANSCODER_B)); - MMIO_D(HBLANK(TRANSCODER_B)); - MMIO_D(HSYNC(TRANSCODER_B)); - MMIO_D(VTOTAL(TRANSCODER_B)); - MMIO_D(VBLANK(TRANSCODER_B)); - MMIO_D(VSYNC(TRANSCODER_B)); + MMIO_D(TRANS_HTOTAL(TRANSCODER_B)); + MMIO_D(TRANS_HBLANK(TRANSCODER_B)); + MMIO_D(TRANS_HSYNC(TRANSCODER_B)); + MMIO_D(TRANS_VTOTAL(TRANSCODER_B)); + MMIO_D(TRANS_VBLANK(TRANSCODER_B)); + MMIO_D(TRANS_VSYNC(TRANSCODER_B)); MMIO_D(BCLRPAT(TRANSCODER_B)); - MMIO_D(VSYNCSHIFT(TRANSCODER_B)); + MMIO_D(TRANS_VSYNCSHIFT(TRANSCODER_B)); MMIO_D(PIPESRC(TRANSCODER_B)); - MMIO_D(HTOTAL(TRANSCODER_C)); - MMIO_D(HBLANK(TRANSCODER_C)); - MMIO_D(HSYNC(TRANSCODER_C)); - MMIO_D(VTOTAL(TRANSCODER_C)); - MMIO_D(VBLANK(TRANSCODER_C)); - MMIO_D(VSYNC(TRANSCODER_C)); + MMIO_D(TRANS_HTOTAL(TRANSCODER_C)); + MMIO_D(TRANS_HBLANK(TRANSCODER_C)); + MMIO_D(TRANS_HSYNC(TRANSCODER_C)); + MMIO_D(TRANS_VTOTAL(TRANSCODER_C)); + MMIO_D(TRANS_VBLANK(TRANSCODER_C)); + MMIO_D(TRANS_VSYNC(TRANSCODER_C)); MMIO_D(BCLRPAT(TRANSCODER_C)); - MMIO_D(VSYNCSHIFT(TRANSCODER_C)); + MMIO_D(TRANS_VSYNCSHIFT(TRANSCODER_C)); MMIO_D(PIPESRC(TRANSCODER_C)); - MMIO_D(HTOTAL(TRANSCODER_EDP)); - MMIO_D(HBLANK(TRANSCODER_EDP)); - MMIO_D(HSYNC(TRANSCODER_EDP)); - MMIO_D(VTOTAL(TRANSCODER_EDP)); - MMIO_D(VBLANK(TRANSCODER_EDP)); - MMIO_D(VSYNC(TRANSCODER_EDP)); + MMIO_D(TRANS_HTOTAL(TRANSCODER_EDP)); + MMIO_D(TRANS_HBLANK(TRANSCODER_EDP)); + MMIO_D(TRANS_HSYNC(TRANSCODER_EDP)); + MMIO_D(TRANS_VTOTAL(TRANSCODER_EDP)); + MMIO_D(TRANS_VBLANK(TRANSCODER_EDP)); + MMIO_D(TRANS_VSYNC(TRANSCODER_EDP)); MMIO_D(BCLRPAT(TRANSCODER_EDP)); - MMIO_D(VSYNCSHIFT(TRANSCODER_EDP)); + MMIO_D(TRANS_VSYNCSHIFT(TRANSCODER_EDP)); MMIO_D(PIPE_DATA_M1(TRANSCODER_A)); MMIO_D(PIPE_DATA_N1(TRANSCODER_A)); MMIO_D(PIPE_DATA_M2(TRANSCODER_A)); @@ -493,9 +494,9 @@ static int iterate_generic_mmio(struct intel_gvt_mmio_table_iter *iter) MMIO_D(GAMMA_MODE(PIPE_A)); MMIO_D(GAMMA_MODE(PIPE_B)); MMIO_D(GAMMA_MODE(PIPE_C)); - MMIO_D(PIPE_MULT(PIPE_A)); - MMIO_D(PIPE_MULT(PIPE_B)); - MMIO_D(PIPE_MULT(PIPE_C)); + MMIO_D(TRANS_MULT(TRANSCODER_A)); + MMIO_D(TRANS_MULT(TRANSCODER_B)); + MMIO_D(TRANS_MULT(TRANSCODER_C)); MMIO_D(HSW_TVIDEO_DIP_CTL(TRANSCODER_A)); MMIO_D(HSW_TVIDEO_DIP_CTL(TRANSCODER_B)); MMIO_D(HSW_TVIDEO_DIP_CTL(TRANSCODER_C)); diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c index 59714b1080d4..c45af0d981fd 100644 --- a/drivers/gpu/drm/i915/intel_pm.c +++ b/drivers/gpu/drm/i915/intel_pm.c @@ -44,13 +44,6 @@ struct drm_i915_clock_gating_funcs { void (*init_clock_gating)(struct drm_i915_private *i915); }; -/* used in computing the new watermarks state */ -struct intel_wm_config { - unsigned int num_pipes_active; - bool sprites_enabled; - bool sprites_scaled; -}; - static void gen9_init_clock_gating(struct drm_i915_private *dev_priv) { if (HAS_LLC(dev_priv)) { @@ -131,3961 +124,6 @@ static void glk_init_clock_gating(struct drm_i915_private *dev_priv) PWM1_GATING_DIS | PWM2_GATING_DIS); } -static void pnv_get_mem_freq(struct drm_i915_private *dev_priv) -{ - u32 tmp; - - tmp = intel_uncore_read(&dev_priv->uncore, CLKCFG); - - switch (tmp & CLKCFG_FSB_MASK) { - case CLKCFG_FSB_533: - dev_priv->fsb_freq = 533; /* 133*4 */ - break; - case CLKCFG_FSB_800: - dev_priv->fsb_freq = 800; /* 200*4 */ - break; - case CLKCFG_FSB_667: - dev_priv->fsb_freq = 667; /* 167*4 */ - break; - case CLKCFG_FSB_400: - dev_priv->fsb_freq = 400; /* 100*4 */ - break; - } - - switch (tmp & CLKCFG_MEM_MASK) { - case CLKCFG_MEM_533: - dev_priv->mem_freq = 533; - break; - case CLKCFG_MEM_667: - dev_priv->mem_freq = 667; - break; - case CLKCFG_MEM_800: - dev_priv->mem_freq = 800; - break; - } - - /* detect pineview DDR3 setting */ - tmp = intel_uncore_read(&dev_priv->uncore, CSHRDDR3CTL); - dev_priv->is_ddr3 = (tmp & CSHRDDR3CTL_DDR3) ? 1 : 0; -} - -static void ilk_get_mem_freq(struct drm_i915_private *dev_priv) -{ - u16 ddrpll, csipll; - - ddrpll = intel_uncore_read16(&dev_priv->uncore, DDRMPLL1); - csipll = intel_uncore_read16(&dev_priv->uncore, CSIPLL0); - - switch (ddrpll & 0xff) { - case 0xc: - dev_priv->mem_freq = 800; - break; - case 0x10: - dev_priv->mem_freq = 1066; - break; - case 0x14: - dev_priv->mem_freq = 1333; - break; - case 0x18: - dev_priv->mem_freq = 1600; - break; - default: - drm_dbg(&dev_priv->drm, "unknown memory frequency 0x%02x\n", - ddrpll & 0xff); - dev_priv->mem_freq = 0; - break; - } - - switch (csipll & 0x3ff) { - case 0x00c: - dev_priv->fsb_freq = 3200; - break; - case 0x00e: - dev_priv->fsb_freq = 3733; - break; - case 0x010: - dev_priv->fsb_freq = 4266; - break; - case 0x012: - dev_priv->fsb_freq = 4800; - break; - case 0x014: - dev_priv->fsb_freq = 5333; - break; - case 0x016: - dev_priv->fsb_freq = 5866; - break; - case 0x018: - dev_priv->fsb_freq = 6400; - break; - default: - drm_dbg(&dev_priv->drm, "unknown fsb frequency 0x%04x\n", - csipll & 0x3ff); - dev_priv->fsb_freq = 0; - break; - } -} - -static const struct cxsr_latency cxsr_latency_table[] = { - {1, 0, 800, 400, 3382, 33382, 3983, 33983}, /* DDR2-400 SC */ - {1, 0, 800, 667, 3354, 33354, 3807, 33807}, /* DDR2-667 SC */ - {1, 0, 800, 800, 3347, 33347, 3763, 33763}, /* DDR2-800 SC */ - {1, 1, 800, 667, 6420, 36420, 6873, 36873}, /* DDR3-667 SC */ - {1, 1, 800, 800, 5902, 35902, 6318, 36318}, /* DDR3-800 SC */ - - {1, 0, 667, 400, 3400, 33400, 4021, 34021}, /* DDR2-400 SC */ - {1, 0, 667, 667, 3372, 33372, 3845, 33845}, /* DDR2-667 SC */ - {1, 0, 667, 800, 3386, 33386, 3822, 33822}, /* DDR2-800 SC */ - {1, 1, 667, 667, 6438, 36438, 6911, 36911}, /* DDR3-667 SC */ - {1, 1, 667, 800, 5941, 35941, 6377, 36377}, /* DDR3-800 SC */ - - {1, 0, 400, 400, 3472, 33472, 4173, 34173}, /* DDR2-400 SC */ - {1, 0, 400, 667, 3443, 33443, 3996, 33996}, /* DDR2-667 SC */ - {1, 0, 400, 800, 3430, 33430, 3946, 33946}, /* DDR2-800 SC */ - {1, 1, 400, 667, 6509, 36509, 7062, 37062}, /* DDR3-667 SC */ - {1, 1, 400, 800, 5985, 35985, 6501, 36501}, /* DDR3-800 SC */ - - {0, 0, 800, 400, 3438, 33438, 4065, 34065}, /* DDR2-400 SC */ - {0, 0, 800, 667, 3410, 33410, 3889, 33889}, /* DDR2-667 SC */ - {0, 0, 800, 800, 3403, 33403, 3845, 33845}, /* DDR2-800 SC */ - {0, 1, 800, 667, 6476, 36476, 6955, 36955}, /* DDR3-667 SC */ - {0, 1, 800, 800, 5958, 35958, 6400, 36400}, /* DDR3-800 SC */ - - {0, 0, 667, 400, 3456, 33456, 4103, 34106}, /* DDR2-400 SC */ - {0, 0, 667, 667, 3428, 33428, 3927, 33927}, /* DDR2-667 SC */ - {0, 0, 667, 800, 3443, 33443, 3905, 33905}, /* DDR2-800 SC */ - {0, 1, 667, 667, 6494, 36494, 6993, 36993}, /* DDR3-667 SC */ - {0, 1, 667, 800, 5998, 35998, 6460, 36460}, /* DDR3-800 SC */ - - {0, 0, 400, 400, 3528, 33528, 4255, 34255}, /* DDR2-400 SC */ - {0, 0, 400, 667, 3500, 33500, 4079, 34079}, /* DDR2-667 SC */ - {0, 0, 400, 800, 3487, 33487, 4029, 34029}, /* DDR2-800 SC */ - {0, 1, 400, 667, 6566, 36566, 7145, 37145}, /* DDR3-667 SC */ - {0, 1, 400, 800, 6042, 36042, 6584, 36584}, /* DDR3-800 SC */ -}; - -static const struct cxsr_latency *intel_get_cxsr_latency(bool is_desktop, - bool is_ddr3, - int fsb, - int mem) -{ - const struct cxsr_latency *latency; - int i; - - if (fsb == 0 || mem == 0) - return NULL; - - for (i = 0; i < ARRAY_SIZE(cxsr_latency_table); i++) { - latency = &cxsr_latency_table[i]; - if (is_desktop == latency->is_desktop && - is_ddr3 == latency->is_ddr3 && - fsb == latency->fsb_freq && mem == latency->mem_freq) - return latency; - } - - DRM_DEBUG_KMS("Unknown FSB/MEM found, disable CxSR\n"); - - return NULL; -} - -static void chv_set_memory_dvfs(struct drm_i915_private *dev_priv, bool enable) -{ - u32 val; - - vlv_punit_get(dev_priv); - - val = vlv_punit_read(dev_priv, PUNIT_REG_DDR_SETUP2); - if (enable) - val &= ~FORCE_DDR_HIGH_FREQ; - else - val |= FORCE_DDR_HIGH_FREQ; - val &= ~FORCE_DDR_LOW_FREQ; - val |= FORCE_DDR_FREQ_REQ_ACK; - vlv_punit_write(dev_priv, PUNIT_REG_DDR_SETUP2, val); - - if (wait_for((vlv_punit_read(dev_priv, PUNIT_REG_DDR_SETUP2) & - FORCE_DDR_FREQ_REQ_ACK) == 0, 3)) - drm_err(&dev_priv->drm, - "timed out waiting for Punit DDR DVFS request\n"); - - vlv_punit_put(dev_priv); -} - -static void chv_set_memory_pm5(struct drm_i915_private *dev_priv, bool enable) -{ - u32 val; - - vlv_punit_get(dev_priv); - - val = vlv_punit_read(dev_priv, PUNIT_REG_DSPSSPM); - if (enable) - val |= DSP_MAXFIFO_PM5_ENABLE; - else - val &= ~DSP_MAXFIFO_PM5_ENABLE; - vlv_punit_write(dev_priv, PUNIT_REG_DSPSSPM, val); - - vlv_punit_put(dev_priv); -} - -#define FW_WM(value, plane) \ - (((value) << DSPFW_ ## plane ## _SHIFT) & DSPFW_ ## plane ## _MASK) - -static bool _intel_set_memory_cxsr(struct drm_i915_private *dev_priv, bool enable) -{ - bool was_enabled; - u32 val; - - if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) { - was_enabled = intel_uncore_read(&dev_priv->uncore, FW_BLC_SELF_VLV) & FW_CSPWRDWNEN; - intel_uncore_write(&dev_priv->uncore, FW_BLC_SELF_VLV, enable ? FW_CSPWRDWNEN : 0); - intel_uncore_posting_read(&dev_priv->uncore, FW_BLC_SELF_VLV); - } else if (IS_G4X(dev_priv) || IS_I965GM(dev_priv)) { - was_enabled = intel_uncore_read(&dev_priv->uncore, FW_BLC_SELF) & FW_BLC_SELF_EN; - intel_uncore_write(&dev_priv->uncore, FW_BLC_SELF, enable ? FW_BLC_SELF_EN : 0); - intel_uncore_posting_read(&dev_priv->uncore, FW_BLC_SELF); - } else if (IS_PINEVIEW(dev_priv)) { - val = intel_uncore_read(&dev_priv->uncore, DSPFW3); - was_enabled = val & PINEVIEW_SELF_REFRESH_EN; - if (enable) - val |= PINEVIEW_SELF_REFRESH_EN; - else - val &= ~PINEVIEW_SELF_REFRESH_EN; - intel_uncore_write(&dev_priv->uncore, DSPFW3, val); - intel_uncore_posting_read(&dev_priv->uncore, DSPFW3); - } else if (IS_I945G(dev_priv) || IS_I945GM(dev_priv)) { - was_enabled = intel_uncore_read(&dev_priv->uncore, FW_BLC_SELF) & FW_BLC_SELF_EN; - val = enable ? _MASKED_BIT_ENABLE(FW_BLC_SELF_EN) : - _MASKED_BIT_DISABLE(FW_BLC_SELF_EN); - intel_uncore_write(&dev_priv->uncore, FW_BLC_SELF, val); - intel_uncore_posting_read(&dev_priv->uncore, FW_BLC_SELF); - } else if (IS_I915GM(dev_priv)) { - /* - * FIXME can't find a bit like this for 915G, and - * and yet it does have the related watermark in - * FW_BLC_SELF. What's going on? - */ - was_enabled = intel_uncore_read(&dev_priv->uncore, INSTPM) & INSTPM_SELF_EN; - val = enable ? _MASKED_BIT_ENABLE(INSTPM_SELF_EN) : - _MASKED_BIT_DISABLE(INSTPM_SELF_EN); - intel_uncore_write(&dev_priv->uncore, INSTPM, val); - intel_uncore_posting_read(&dev_priv->uncore, INSTPM); - } else { - return false; - } - - trace_intel_memory_cxsr(dev_priv, was_enabled, enable); - - drm_dbg_kms(&dev_priv->drm, "memory self-refresh is %s (was %s)\n", - str_enabled_disabled(enable), - str_enabled_disabled(was_enabled)); - - return was_enabled; -} - -/** - * intel_set_memory_cxsr - Configure CxSR state - * @dev_priv: i915 device - * @enable: Allow vs. disallow CxSR - * - * Allow or disallow the system to enter a special CxSR - * (C-state self refresh) state. What typically happens in CxSR mode - * is that several display FIFOs may get combined into a single larger - * FIFO for a particular plane (so called max FIFO mode) to allow the - * system to defer memory fetches longer, and the memory will enter - * self refresh. - * - * Note that enabling CxSR does not guarantee that the system enter - * this special mode, nor does it guarantee that the system stays - * in that mode once entered. So this just allows/disallows the system - * to autonomously utilize the CxSR mode. Other factors such as core - * C-states will affect when/if the system actually enters/exits the - * CxSR mode. - * - * Note that on VLV/CHV this actually only controls the max FIFO mode, - * and the system is free to enter/exit memory self refresh at any time - * even when the use of CxSR has been disallowed. - * - * While the system is actually in the CxSR/max FIFO mode, some plane - * control registers will not get latched on vblank. Thus in order to - * guarantee the system will respond to changes in the plane registers - * we must always disallow CxSR prior to making changes to those registers. - * Unfortunately the system will re-evaluate the CxSR conditions at - * frame start which happens after vblank start (which is when the plane - * registers would get latched), so we can't proceed with the plane update - * during the same frame where we disallowed CxSR. - * - * Certain platforms also have a deeper HPLL SR mode. Fortunately the - * HPLL SR mode depends on CxSR itself, so we don't have to hand hold - * the hardware w.r.t. HPLL SR when writing to plane registers. - * Disallowing just CxSR is sufficient. - */ -bool intel_set_memory_cxsr(struct drm_i915_private *dev_priv, bool enable) -{ - bool ret; - - mutex_lock(&dev_priv->display.wm.wm_mutex); - ret = _intel_set_memory_cxsr(dev_priv, enable); - if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) - dev_priv->display.wm.vlv.cxsr = enable; - else if (IS_G4X(dev_priv)) - dev_priv->display.wm.g4x.cxsr = enable; - mutex_unlock(&dev_priv->display.wm.wm_mutex); - - return ret; -} - -/* - * Latency for FIFO fetches is dependent on several factors: - * - memory configuration (speed, channels) - * - chipset - * - current MCH state - * It can be fairly high in some situations, so here we assume a fairly - * pessimal value. It's a tradeoff between extra memory fetches (if we - * set this value too high, the FIFO will fetch frequently to stay full) - * and power consumption (set it too low to save power and we might see - * FIFO underruns and display "flicker"). - * - * A value of 5us seems to be a good balance; safe for very low end - * platforms but not overly aggressive on lower latency configs. - */ -static const int pessimal_latency_ns = 5000; - -#define VLV_FIFO_START(dsparb, dsparb2, lo_shift, hi_shift) \ - ((((dsparb) >> (lo_shift)) & 0xff) | ((((dsparb2) >> (hi_shift)) & 0x1) << 8)) - -static void vlv_get_fifo_size(struct intel_crtc_state *crtc_state) -{ - struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); - struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); - struct vlv_fifo_state *fifo_state = &crtc_state->wm.vlv.fifo_state; - enum pipe pipe = crtc->pipe; - int sprite0_start, sprite1_start; - u32 dsparb, dsparb2, dsparb3; - - switch (pipe) { - case PIPE_A: - dsparb = intel_uncore_read(&dev_priv->uncore, DSPARB); - dsparb2 = intel_uncore_read(&dev_priv->uncore, DSPARB2); - sprite0_start = VLV_FIFO_START(dsparb, dsparb2, 0, 0); - sprite1_start = VLV_FIFO_START(dsparb, dsparb2, 8, 4); - break; - case PIPE_B: - dsparb = intel_uncore_read(&dev_priv->uncore, DSPARB); - dsparb2 = intel_uncore_read(&dev_priv->uncore, DSPARB2); - sprite0_start = VLV_FIFO_START(dsparb, dsparb2, 16, 8); - sprite1_start = VLV_FIFO_START(dsparb, dsparb2, 24, 12); - break; - case PIPE_C: - dsparb2 = intel_uncore_read(&dev_priv->uncore, DSPARB2); - dsparb3 = intel_uncore_read(&dev_priv->uncore, DSPARB3); - sprite0_start = VLV_FIFO_START(dsparb3, dsparb2, 0, 16); - sprite1_start = VLV_FIFO_START(dsparb3, dsparb2, 8, 20); - break; - default: - MISSING_CASE(pipe); - return; - } - - fifo_state->plane[PLANE_PRIMARY] = sprite0_start; - fifo_state->plane[PLANE_SPRITE0] = sprite1_start - sprite0_start; - fifo_state->plane[PLANE_SPRITE1] = 511 - sprite1_start; - fifo_state->plane[PLANE_CURSOR] = 63; -} - -static int i9xx_get_fifo_size(struct drm_i915_private *dev_priv, - enum i9xx_plane_id i9xx_plane) -{ - u32 dsparb = intel_uncore_read(&dev_priv->uncore, DSPARB); - int size; - - size = dsparb & 0x7f; - if (i9xx_plane == PLANE_B) - size = ((dsparb >> DSPARB_CSTART_SHIFT) & 0x7f) - size; - - drm_dbg_kms(&dev_priv->drm, "FIFO size - (0x%08x) %c: %d\n", - dsparb, plane_name(i9xx_plane), size); - - return size; -} - -static int i830_get_fifo_size(struct drm_i915_private *dev_priv, - enum i9xx_plane_id i9xx_plane) -{ - u32 dsparb = intel_uncore_read(&dev_priv->uncore, DSPARB); - int size; - - size = dsparb & 0x1ff; - if (i9xx_plane == PLANE_B) - size = ((dsparb >> DSPARB_BEND_SHIFT) & 0x1ff) - size; - size >>= 1; /* Convert to cachelines */ - - drm_dbg_kms(&dev_priv->drm, "FIFO size - (0x%08x) %c: %d\n", - dsparb, plane_name(i9xx_plane), size); - - return size; -} - -static int i845_get_fifo_size(struct drm_i915_private *dev_priv, - enum i9xx_plane_id i9xx_plane) -{ - u32 dsparb = intel_uncore_read(&dev_priv->uncore, DSPARB); - int size; - - size = dsparb & 0x7f; - size >>= 2; /* Convert to cachelines */ - - drm_dbg_kms(&dev_priv->drm, "FIFO size - (0x%08x) %c: %d\n", - dsparb, plane_name(i9xx_plane), size); - - return size; -} - -/* Pineview has different values for various configs */ -static const struct intel_watermark_params pnv_display_wm = { - .fifo_size = PINEVIEW_DISPLAY_FIFO, - .max_wm = PINEVIEW_MAX_WM, - .default_wm = PINEVIEW_DFT_WM, - .guard_size = PINEVIEW_GUARD_WM, - .cacheline_size = PINEVIEW_FIFO_LINE_SIZE, -}; - -static const struct intel_watermark_params pnv_display_hplloff_wm = { - .fifo_size = PINEVIEW_DISPLAY_FIFO, - .max_wm = PINEVIEW_MAX_WM, - .default_wm = PINEVIEW_DFT_HPLLOFF_WM, - .guard_size = PINEVIEW_GUARD_WM, - .cacheline_size = PINEVIEW_FIFO_LINE_SIZE, -}; - -static const struct intel_watermark_params pnv_cursor_wm = { - .fifo_size = PINEVIEW_CURSOR_FIFO, - .max_wm = PINEVIEW_CURSOR_MAX_WM, - .default_wm = PINEVIEW_CURSOR_DFT_WM, - .guard_size = PINEVIEW_CURSOR_GUARD_WM, - .cacheline_size = PINEVIEW_FIFO_LINE_SIZE, -}; - -static const struct intel_watermark_params pnv_cursor_hplloff_wm = { - .fifo_size = PINEVIEW_CURSOR_FIFO, - .max_wm = PINEVIEW_CURSOR_MAX_WM, - .default_wm = PINEVIEW_CURSOR_DFT_WM, - .guard_size = PINEVIEW_CURSOR_GUARD_WM, - .cacheline_size = PINEVIEW_FIFO_LINE_SIZE, -}; - -static const struct intel_watermark_params i965_cursor_wm_info = { - .fifo_size = I965_CURSOR_FIFO, - .max_wm = I965_CURSOR_MAX_WM, - .default_wm = I965_CURSOR_DFT_WM, - .guard_size = 2, - .cacheline_size = I915_FIFO_LINE_SIZE, -}; - -static const struct intel_watermark_params i945_wm_info = { - .fifo_size = I945_FIFO_SIZE, - .max_wm = I915_MAX_WM, - .default_wm = 1, - .guard_size = 2, - .cacheline_size = I915_FIFO_LINE_SIZE, -}; - -static const struct intel_watermark_params i915_wm_info = { - .fifo_size = I915_FIFO_SIZE, - .max_wm = I915_MAX_WM, - .default_wm = 1, - .guard_size = 2, - .cacheline_size = I915_FIFO_LINE_SIZE, -}; - -static const struct intel_watermark_params i830_a_wm_info = { - .fifo_size = I855GM_FIFO_SIZE, - .max_wm = I915_MAX_WM, - .default_wm = 1, - .guard_size = 2, - .cacheline_size = I830_FIFO_LINE_SIZE, -}; - -static const struct intel_watermark_params i830_bc_wm_info = { - .fifo_size = I855GM_FIFO_SIZE, - .max_wm = I915_MAX_WM/2, - .default_wm = 1, - .guard_size = 2, - .cacheline_size = I830_FIFO_LINE_SIZE, -}; - -static const struct intel_watermark_params i845_wm_info = { - .fifo_size = I830_FIFO_SIZE, - .max_wm = I915_MAX_WM, - .default_wm = 1, - .guard_size = 2, - .cacheline_size = I830_FIFO_LINE_SIZE, -}; - -/** - * intel_wm_method1 - Method 1 / "small buffer" watermark formula - * @pixel_rate: Pipe pixel rate in kHz - * @cpp: Plane bytes per pixel - * @latency: Memory wakeup latency in 0.1us units - * - * Compute the watermark using the method 1 or "small buffer" - * formula. The caller may additonally add extra cachelines - * to account for TLB misses and clock crossings. - * - * This method is concerned with the short term drain rate - * of the FIFO, ie. it does not account for blanking periods - * which would effectively reduce the average drain rate across - * a longer period. The name "small" refers to the fact the - * FIFO is relatively small compared to the amount of data - * fetched. - * - * The FIFO level vs. time graph might look something like: - * - * |\ |\ - * | \ | \ - * __---__---__ (- plane active, _ blanking) - * -> time - * - * or perhaps like this: - * - * |\|\ |\|\ - * __----__----__ (- plane active, _ blanking) - * -> time - * - * Returns: - * The watermark in bytes - */ -static unsigned int intel_wm_method1(unsigned int pixel_rate, - unsigned int cpp, - unsigned int latency) -{ - u64 ret; - - ret = mul_u32_u32(pixel_rate, cpp * latency); - ret = DIV_ROUND_UP_ULL(ret, 10000); - - return ret; -} - -/** - * intel_wm_method2 - Method 2 / "large buffer" watermark formula - * @pixel_rate: Pipe pixel rate in kHz - * @htotal: Pipe horizontal total - * @width: Plane width in pixels - * @cpp: Plane bytes per pixel - * @latency: Memory wakeup latency in 0.1us units - * - * Compute the watermark using the method 2 or "large buffer" - * formula. The caller may additonally add extra cachelines - * to account for TLB misses and clock crossings. - * - * This method is concerned with the long term drain rate - * of the FIFO, ie. it does account for blanking periods - * which effectively reduce the average drain rate across - * a longer period. The name "large" refers to the fact the - * FIFO is relatively large compared to the amount of data - * fetched. - * - * The FIFO level vs. time graph might look something like: - * - * |\___ |\___ - * | \___ | \___ - * | \ | \ - * __ --__--__--__--__--__--__ (- plane active, _ blanking) - * -> time - * - * Returns: - * The watermark in bytes - */ -static unsigned int intel_wm_method2(unsigned int pixel_rate, - unsigned int htotal, - unsigned int width, - unsigned int cpp, - unsigned int latency) -{ - unsigned int ret; - - /* - * FIXME remove once all users are computing - * watermarks in the correct place. - */ - if (WARN_ON_ONCE(htotal == 0)) - htotal = 1; - - ret = (latency * pixel_rate) / (htotal * 10000); - ret = (ret + 1) * width * cpp; - - return ret; -} - -/** - * intel_calculate_wm - calculate watermark level - * @pixel_rate: pixel clock - * @wm: chip FIFO params - * @fifo_size: size of the FIFO buffer - * @cpp: bytes per pixel - * @latency_ns: memory latency for the platform - * - * Calculate the watermark level (the level at which the display plane will - * start fetching from memory again). Each chip has a different display - * FIFO size and allocation, so the caller needs to figure that out and pass - * in the correct intel_watermark_params structure. - * - * As the pixel clock runs, the FIFO will be drained at a rate that depends - * on the pixel size. When it reaches the watermark level, it'll start - * fetching FIFO line sized based chunks from memory until the FIFO fills - * past the watermark point. If the FIFO drains completely, a FIFO underrun - * will occur, and a display engine hang could result. - */ -static unsigned int intel_calculate_wm(int pixel_rate, - const struct intel_watermark_params *wm, - int fifo_size, int cpp, - unsigned int latency_ns) -{ - int entries, wm_size; - - /* - * Note: we need to make sure we don't overflow for various clock & - * latency values. - * clocks go from a few thousand to several hundred thousand. - * latency is usually a few thousand - */ - entries = intel_wm_method1(pixel_rate, cpp, - latency_ns / 100); - entries = DIV_ROUND_UP(entries, wm->cacheline_size) + - wm->guard_size; - DRM_DEBUG_KMS("FIFO entries required for mode: %d\n", entries); - - wm_size = fifo_size - entries; - DRM_DEBUG_KMS("FIFO watermark level: %d\n", wm_size); - - /* Don't promote wm_size to unsigned... */ - if (wm_size > wm->max_wm) - wm_size = wm->max_wm; - if (wm_size <= 0) - wm_size = wm->default_wm; - - /* - * Bspec seems to indicate that the value shouldn't be lower than - * 'burst size + 1'. Certainly 830 is quite unhappy with low values. - * Lets go for 8 which is the burst size since certain platforms - * already use a hardcoded 8 (which is what the spec says should be - * done). - */ - if (wm_size <= 8) - wm_size = 8; - - return wm_size; -} - -static bool is_disabling(int old, int new, int threshold) -{ - return old >= threshold && new < threshold; -} - -static bool is_enabling(int old, int new, int threshold) -{ - return old < threshold && new >= threshold; -} - -static int intel_wm_num_levels(struct drm_i915_private *dev_priv) -{ - return dev_priv->display.wm.max_level + 1; -} - -bool intel_wm_plane_visible(const struct intel_crtc_state *crtc_state, - const struct intel_plane_state *plane_state) -{ - struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane); - - /* FIXME check the 'enable' instead */ - if (!crtc_state->hw.active) - return false; - - /* - * Treat cursor with fb as always visible since cursor updates - * can happen faster than the vrefresh rate, and the current - * watermark code doesn't handle that correctly. Cursor updates - * which set/clear the fb or change the cursor size are going - * to get throttled by intel_legacy_cursor_update() to work - * around this problem with the watermark code. - */ - if (plane->id == PLANE_CURSOR) - return plane_state->hw.fb != NULL; - else - return plane_state->uapi.visible; -} - -static bool intel_crtc_active(struct intel_crtc *crtc) -{ - /* Be paranoid as we can arrive here with only partial - * state retrieved from the hardware during setup. - * - * We can ditch the adjusted_mode.crtc_clock check as soon - * as Haswell has gained clock readout/fastboot support. - * - * We can ditch the crtc->primary->state->fb check as soon as we can - * properly reconstruct framebuffers. - * - * FIXME: The intel_crtc->active here should be switched to - * crtc->state->active once we have proper CRTC states wired up - * for atomic. - */ - return crtc && crtc->active && crtc->base.primary->state->fb && - crtc->config->hw.adjusted_mode.crtc_clock; -} - -static struct intel_crtc *single_enabled_crtc(struct drm_i915_private *dev_priv) -{ - struct intel_crtc *crtc, *enabled = NULL; - - for_each_intel_crtc(&dev_priv->drm, crtc) { - if (intel_crtc_active(crtc)) { - if (enabled) - return NULL; - enabled = crtc; - } - } - - return enabled; -} - -static void pnv_update_wm(struct drm_i915_private *dev_priv) -{ - struct intel_crtc *crtc; - const struct cxsr_latency *latency; - u32 reg; - unsigned int wm; - - latency = intel_get_cxsr_latency(!IS_MOBILE(dev_priv), - dev_priv->is_ddr3, - dev_priv->fsb_freq, - dev_priv->mem_freq); - if (!latency) { - drm_dbg_kms(&dev_priv->drm, - "Unknown FSB/MEM found, disable CxSR\n"); - intel_set_memory_cxsr(dev_priv, false); - return; - } - - crtc = single_enabled_crtc(dev_priv); - if (crtc) { - const struct drm_framebuffer *fb = - crtc->base.primary->state->fb; - int pixel_rate = crtc->config->pixel_rate; - int cpp = fb->format->cpp[0]; - - /* Display SR */ - wm = intel_calculate_wm(pixel_rate, &pnv_display_wm, - pnv_display_wm.fifo_size, - cpp, latency->display_sr); - reg = intel_uncore_read(&dev_priv->uncore, DSPFW1); - reg &= ~DSPFW_SR_MASK; - reg |= FW_WM(wm, SR); - intel_uncore_write(&dev_priv->uncore, DSPFW1, reg); - drm_dbg_kms(&dev_priv->drm, "DSPFW1 register is %x\n", reg); - - /* cursor SR */ - wm = intel_calculate_wm(pixel_rate, &pnv_cursor_wm, - pnv_display_wm.fifo_size, - 4, latency->cursor_sr); - intel_uncore_rmw(&dev_priv->uncore, DSPFW3, DSPFW_CURSOR_SR_MASK, - FW_WM(wm, CURSOR_SR)); - - /* Display HPLL off SR */ - wm = intel_calculate_wm(pixel_rate, &pnv_display_hplloff_wm, - pnv_display_hplloff_wm.fifo_size, - cpp, latency->display_hpll_disable); - intel_uncore_rmw(&dev_priv->uncore, DSPFW3, DSPFW_HPLL_SR_MASK, FW_WM(wm, HPLL_SR)); - - /* cursor HPLL off SR */ - wm = intel_calculate_wm(pixel_rate, &pnv_cursor_hplloff_wm, - pnv_display_hplloff_wm.fifo_size, - 4, latency->cursor_hpll_disable); - reg = intel_uncore_read(&dev_priv->uncore, DSPFW3); - reg &= ~DSPFW_HPLL_CURSOR_MASK; - reg |= FW_WM(wm, HPLL_CURSOR); - intel_uncore_write(&dev_priv->uncore, DSPFW3, reg); - drm_dbg_kms(&dev_priv->drm, "DSPFW3 register is %x\n", reg); - - intel_set_memory_cxsr(dev_priv, true); - } else { - intel_set_memory_cxsr(dev_priv, false); - } -} - -/* - * Documentation says: - * "If the line size is small, the TLB fetches can get in the way of the - * data fetches, causing some lag in the pixel data return which is not - * accounted for in the above formulas. The following adjustment only - * needs to be applied if eight whole lines fit in the buffer at once. - * The WM is adjusted upwards by the difference between the FIFO size - * and the size of 8 whole lines. This adjustment is always performed - * in the actual pixel depth regardless of whether FBC is enabled or not." - */ -static unsigned int g4x_tlb_miss_wa(int fifo_size, int width, int cpp) -{ - int tlb_miss = fifo_size * 64 - width * cpp * 8; - - return max(0, tlb_miss); -} - -static void g4x_write_wm_values(struct drm_i915_private *dev_priv, - const struct g4x_wm_values *wm) -{ - enum pipe pipe; - - for_each_pipe(dev_priv, pipe) - trace_g4x_wm(intel_crtc_for_pipe(dev_priv, pipe), wm); - - intel_uncore_write(&dev_priv->uncore, DSPFW1, - FW_WM(wm->sr.plane, SR) | - FW_WM(wm->pipe[PIPE_B].plane[PLANE_CURSOR], CURSORB) | - FW_WM(wm->pipe[PIPE_B].plane[PLANE_PRIMARY], PLANEB) | - FW_WM(wm->pipe[PIPE_A].plane[PLANE_PRIMARY], PLANEA)); - intel_uncore_write(&dev_priv->uncore, DSPFW2, - (wm->fbc_en ? DSPFW_FBC_SR_EN : 0) | - FW_WM(wm->sr.fbc, FBC_SR) | - FW_WM(wm->hpll.fbc, FBC_HPLL_SR) | - FW_WM(wm->pipe[PIPE_B].plane[PLANE_SPRITE0], SPRITEB) | - FW_WM(wm->pipe[PIPE_A].plane[PLANE_CURSOR], CURSORA) | - FW_WM(wm->pipe[PIPE_A].plane[PLANE_SPRITE0], SPRITEA)); - intel_uncore_write(&dev_priv->uncore, DSPFW3, - (wm->hpll_en ? DSPFW_HPLL_SR_EN : 0) | - FW_WM(wm->sr.cursor, CURSOR_SR) | - FW_WM(wm->hpll.cursor, HPLL_CURSOR) | - FW_WM(wm->hpll.plane, HPLL_SR)); - - intel_uncore_posting_read(&dev_priv->uncore, DSPFW1); -} - -#define FW_WM_VLV(value, plane) \ - (((value) << DSPFW_ ## plane ## _SHIFT) & DSPFW_ ## plane ## _MASK_VLV) - -static void vlv_write_wm_values(struct drm_i915_private *dev_priv, - const struct vlv_wm_values *wm) -{ - enum pipe pipe; - - for_each_pipe(dev_priv, pipe) { - trace_vlv_wm(intel_crtc_for_pipe(dev_priv, pipe), wm); - - intel_uncore_write(&dev_priv->uncore, VLV_DDL(pipe), - (wm->ddl[pipe].plane[PLANE_CURSOR] << DDL_CURSOR_SHIFT) | - (wm->ddl[pipe].plane[PLANE_SPRITE1] << DDL_SPRITE_SHIFT(1)) | - (wm->ddl[pipe].plane[PLANE_SPRITE0] << DDL_SPRITE_SHIFT(0)) | - (wm->ddl[pipe].plane[PLANE_PRIMARY] << DDL_PLANE_SHIFT)); - } - - /* - * Zero the (unused) WM1 watermarks, and also clear all the - * high order bits so that there are no out of bounds values - * present in the registers during the reprogramming. - */ - intel_uncore_write(&dev_priv->uncore, DSPHOWM, 0); - intel_uncore_write(&dev_priv->uncore, DSPHOWM1, 0); - intel_uncore_write(&dev_priv->uncore, DSPFW4, 0); - intel_uncore_write(&dev_priv->uncore, DSPFW5, 0); - intel_uncore_write(&dev_priv->uncore, DSPFW6, 0); - - intel_uncore_write(&dev_priv->uncore, DSPFW1, - FW_WM(wm->sr.plane, SR) | - FW_WM(wm->pipe[PIPE_B].plane[PLANE_CURSOR], CURSORB) | - FW_WM_VLV(wm->pipe[PIPE_B].plane[PLANE_PRIMARY], PLANEB) | - FW_WM_VLV(wm->pipe[PIPE_A].plane[PLANE_PRIMARY], PLANEA)); - intel_uncore_write(&dev_priv->uncore, DSPFW2, - FW_WM_VLV(wm->pipe[PIPE_A].plane[PLANE_SPRITE1], SPRITEB) | - FW_WM(wm->pipe[PIPE_A].plane[PLANE_CURSOR], CURSORA) | - FW_WM_VLV(wm->pipe[PIPE_A].plane[PLANE_SPRITE0], SPRITEA)); - intel_uncore_write(&dev_priv->uncore, DSPFW3, - FW_WM(wm->sr.cursor, CURSOR_SR)); - - if (IS_CHERRYVIEW(dev_priv)) { - intel_uncore_write(&dev_priv->uncore, DSPFW7_CHV, - FW_WM_VLV(wm->pipe[PIPE_B].plane[PLANE_SPRITE1], SPRITED) | - FW_WM_VLV(wm->pipe[PIPE_B].plane[PLANE_SPRITE0], SPRITEC)); - intel_uncore_write(&dev_priv->uncore, DSPFW8_CHV, - FW_WM_VLV(wm->pipe[PIPE_C].plane[PLANE_SPRITE1], SPRITEF) | - FW_WM_VLV(wm->pipe[PIPE_C].plane[PLANE_SPRITE0], SPRITEE)); - intel_uncore_write(&dev_priv->uncore, DSPFW9_CHV, - FW_WM_VLV(wm->pipe[PIPE_C].plane[PLANE_PRIMARY], PLANEC) | - FW_WM(wm->pipe[PIPE_C].plane[PLANE_CURSOR], CURSORC)); - intel_uncore_write(&dev_priv->uncore, DSPHOWM, - FW_WM(wm->sr.plane >> 9, SR_HI) | - FW_WM(wm->pipe[PIPE_C].plane[PLANE_SPRITE1] >> 8, SPRITEF_HI) | - FW_WM(wm->pipe[PIPE_C].plane[PLANE_SPRITE0] >> 8, SPRITEE_HI) | - FW_WM(wm->pipe[PIPE_C].plane[PLANE_PRIMARY] >> 8, PLANEC_HI) | - FW_WM(wm->pipe[PIPE_B].plane[PLANE_SPRITE1] >> 8, SPRITED_HI) | - FW_WM(wm->pipe[PIPE_B].plane[PLANE_SPRITE0] >> 8, SPRITEC_HI) | - FW_WM(wm->pipe[PIPE_B].plane[PLANE_PRIMARY] >> 8, PLANEB_HI) | - FW_WM(wm->pipe[PIPE_A].plane[PLANE_SPRITE1] >> 8, SPRITEB_HI) | - FW_WM(wm->pipe[PIPE_A].plane[PLANE_SPRITE0] >> 8, SPRITEA_HI) | - FW_WM(wm->pipe[PIPE_A].plane[PLANE_PRIMARY] >> 8, PLANEA_HI)); - } else { - intel_uncore_write(&dev_priv->uncore, DSPFW7, - FW_WM_VLV(wm->pipe[PIPE_B].plane[PLANE_SPRITE1], SPRITED) | - FW_WM_VLV(wm->pipe[PIPE_B].plane[PLANE_SPRITE0], SPRITEC)); - intel_uncore_write(&dev_priv->uncore, DSPHOWM, - FW_WM(wm->sr.plane >> 9, SR_HI) | - FW_WM(wm->pipe[PIPE_B].plane[PLANE_SPRITE1] >> 8, SPRITED_HI) | - FW_WM(wm->pipe[PIPE_B].plane[PLANE_SPRITE0] >> 8, SPRITEC_HI) | - FW_WM(wm->pipe[PIPE_B].plane[PLANE_PRIMARY] >> 8, PLANEB_HI) | - FW_WM(wm->pipe[PIPE_A].plane[PLANE_SPRITE1] >> 8, SPRITEB_HI) | - FW_WM(wm->pipe[PIPE_A].plane[PLANE_SPRITE0] >> 8, SPRITEA_HI) | - FW_WM(wm->pipe[PIPE_A].plane[PLANE_PRIMARY] >> 8, PLANEA_HI)); - } - - intel_uncore_posting_read(&dev_priv->uncore, DSPFW1); -} - -#undef FW_WM_VLV - -static void g4x_setup_wm_latency(struct drm_i915_private *dev_priv) -{ - /* all latencies in usec */ - dev_priv->display.wm.pri_latency[G4X_WM_LEVEL_NORMAL] = 5; - dev_priv->display.wm.pri_latency[G4X_WM_LEVEL_SR] = 12; - dev_priv->display.wm.pri_latency[G4X_WM_LEVEL_HPLL] = 35; - - dev_priv->display.wm.max_level = G4X_WM_LEVEL_HPLL; -} - -static int g4x_plane_fifo_size(enum plane_id plane_id, int level) -{ - /* - * DSPCNTR[13] supposedly controls whether the - * primary plane can use the FIFO space otherwise - * reserved for the sprite plane. It's not 100% clear - * what the actual FIFO size is, but it looks like we - * can happily set both primary and sprite watermarks - * up to 127 cachelines. So that would seem to mean - * that either DSPCNTR[13] doesn't do anything, or that - * the total FIFO is >= 256 cachelines in size. Either - * way, we don't seem to have to worry about this - * repartitioning as the maximum watermark value the - * register can hold for each plane is lower than the - * minimum FIFO size. - */ - switch (plane_id) { - case PLANE_CURSOR: - return 63; - case PLANE_PRIMARY: - return level == G4X_WM_LEVEL_NORMAL ? 127 : 511; - case PLANE_SPRITE0: - return level == G4X_WM_LEVEL_NORMAL ? 127 : 0; - default: - MISSING_CASE(plane_id); - return 0; - } -} - -static int g4x_fbc_fifo_size(int level) -{ - switch (level) { - case G4X_WM_LEVEL_SR: - return 7; - case G4X_WM_LEVEL_HPLL: - return 15; - default: - MISSING_CASE(level); - return 0; - } -} - -static u16 g4x_compute_wm(const struct intel_crtc_state *crtc_state, - const struct intel_plane_state *plane_state, - int level) -{ - struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane); - struct drm_i915_private *dev_priv = to_i915(plane->base.dev); - const struct drm_display_mode *pipe_mode = - &crtc_state->hw.pipe_mode; - unsigned int latency = dev_priv->display.wm.pri_latency[level] * 10; - unsigned int pixel_rate, htotal, cpp, width, wm; - - if (latency == 0) - return USHRT_MAX; - - if (!intel_wm_plane_visible(crtc_state, plane_state)) - return 0; - - cpp = plane_state->hw.fb->format->cpp[0]; - - /* - * WaUse32BppForSRWM:ctg,elk - * - * The spec fails to list this restriction for the - * HPLL watermark, which seems a little strange. - * Let's use 32bpp for the HPLL watermark as well. - */ - if (plane->id == PLANE_PRIMARY && - level != G4X_WM_LEVEL_NORMAL) - cpp = max(cpp, 4u); - - pixel_rate = crtc_state->pixel_rate; - htotal = pipe_mode->crtc_htotal; - width = drm_rect_width(&plane_state->uapi.src) >> 16; - - if (plane->id == PLANE_CURSOR) { - wm = intel_wm_method2(pixel_rate, htotal, width, cpp, latency); - } else if (plane->id == PLANE_PRIMARY && - level == G4X_WM_LEVEL_NORMAL) { - wm = intel_wm_method1(pixel_rate, cpp, latency); - } else { - unsigned int small, large; - - small = intel_wm_method1(pixel_rate, cpp, latency); - large = intel_wm_method2(pixel_rate, htotal, width, cpp, latency); - - wm = min(small, large); - } - - wm += g4x_tlb_miss_wa(g4x_plane_fifo_size(plane->id, level), - width, cpp); - - wm = DIV_ROUND_UP(wm, 64) + 2; - - return min_t(unsigned int, wm, USHRT_MAX); -} - -static bool g4x_raw_plane_wm_set(struct intel_crtc_state *crtc_state, - int level, enum plane_id plane_id, u16 value) -{ - struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev); - bool dirty = false; - - for (; level < intel_wm_num_levels(dev_priv); level++) { - struct g4x_pipe_wm *raw = &crtc_state->wm.g4x.raw[level]; - - dirty |= raw->plane[plane_id] != value; - raw->plane[plane_id] = value; - } - - return dirty; -} - -static bool g4x_raw_fbc_wm_set(struct intel_crtc_state *crtc_state, - int level, u16 value) -{ - struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev); - bool dirty = false; - - /* NORMAL level doesn't have an FBC watermark */ - level = max(level, G4X_WM_LEVEL_SR); - - for (; level < intel_wm_num_levels(dev_priv); level++) { - struct g4x_pipe_wm *raw = &crtc_state->wm.g4x.raw[level]; - - dirty |= raw->fbc != value; - raw->fbc = value; - } - - return dirty; -} - -static u32 ilk_compute_fbc_wm(const struct intel_crtc_state *crtc_state, - const struct intel_plane_state *plane_state, - u32 pri_val); - -static bool g4x_raw_plane_wm_compute(struct intel_crtc_state *crtc_state, - const struct intel_plane_state *plane_state) -{ - struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane); - struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev); - int num_levels = intel_wm_num_levels(to_i915(plane->base.dev)); - enum plane_id plane_id = plane->id; - bool dirty = false; - int level; - - if (!intel_wm_plane_visible(crtc_state, plane_state)) { - dirty |= g4x_raw_plane_wm_set(crtc_state, 0, plane_id, 0); - if (plane_id == PLANE_PRIMARY) - dirty |= g4x_raw_fbc_wm_set(crtc_state, 0, 0); - goto out; - } - - for (level = 0; level < num_levels; level++) { - struct g4x_pipe_wm *raw = &crtc_state->wm.g4x.raw[level]; - int wm, max_wm; - - wm = g4x_compute_wm(crtc_state, plane_state, level); - max_wm = g4x_plane_fifo_size(plane_id, level); - - if (wm > max_wm) - break; - - dirty |= raw->plane[plane_id] != wm; - raw->plane[plane_id] = wm; - - if (plane_id != PLANE_PRIMARY || - level == G4X_WM_LEVEL_NORMAL) - continue; - - wm = ilk_compute_fbc_wm(crtc_state, plane_state, - raw->plane[plane_id]); - max_wm = g4x_fbc_fifo_size(level); - - /* - * FBC wm is not mandatory as we - * can always just disable its use. - */ - if (wm > max_wm) - wm = USHRT_MAX; - - dirty |= raw->fbc != wm; - raw->fbc = wm; - } - - /* mark watermarks as invalid */ - dirty |= g4x_raw_plane_wm_set(crtc_state, level, plane_id, USHRT_MAX); - - if (plane_id == PLANE_PRIMARY) - dirty |= g4x_raw_fbc_wm_set(crtc_state, level, USHRT_MAX); - - out: - if (dirty) { - drm_dbg_kms(&dev_priv->drm, - "%s watermarks: normal=%d, SR=%d, HPLL=%d\n", - plane->base.name, - crtc_state->wm.g4x.raw[G4X_WM_LEVEL_NORMAL].plane[plane_id], - crtc_state->wm.g4x.raw[G4X_WM_LEVEL_SR].plane[plane_id], - crtc_state->wm.g4x.raw[G4X_WM_LEVEL_HPLL].plane[plane_id]); - - if (plane_id == PLANE_PRIMARY) - drm_dbg_kms(&dev_priv->drm, - "FBC watermarks: SR=%d, HPLL=%d\n", - crtc_state->wm.g4x.raw[G4X_WM_LEVEL_SR].fbc, - crtc_state->wm.g4x.raw[G4X_WM_LEVEL_HPLL].fbc); - } - - return dirty; -} - -static bool g4x_raw_plane_wm_is_valid(const struct intel_crtc_state *crtc_state, - enum plane_id plane_id, int level) -{ - const struct g4x_pipe_wm *raw = &crtc_state->wm.g4x.raw[level]; - - return raw->plane[plane_id] <= g4x_plane_fifo_size(plane_id, level); -} - -static bool g4x_raw_crtc_wm_is_valid(const struct intel_crtc_state *crtc_state, - int level) -{ - struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev); - - if (level > dev_priv->display.wm.max_level) - return false; - - return g4x_raw_plane_wm_is_valid(crtc_state, PLANE_PRIMARY, level) && - g4x_raw_plane_wm_is_valid(crtc_state, PLANE_SPRITE0, level) && - g4x_raw_plane_wm_is_valid(crtc_state, PLANE_CURSOR, level); -} - -/* mark all levels starting from 'level' as invalid */ -static void g4x_invalidate_wms(struct intel_crtc *crtc, - struct g4x_wm_state *wm_state, int level) -{ - if (level <= G4X_WM_LEVEL_NORMAL) { - enum plane_id plane_id; - - for_each_plane_id_on_crtc(crtc, plane_id) - wm_state->wm.plane[plane_id] = USHRT_MAX; - } - - if (level <= G4X_WM_LEVEL_SR) { - wm_state->cxsr = false; - wm_state->sr.cursor = USHRT_MAX; - wm_state->sr.plane = USHRT_MAX; - wm_state->sr.fbc = USHRT_MAX; - } - - if (level <= G4X_WM_LEVEL_HPLL) { - wm_state->hpll_en = false; - wm_state->hpll.cursor = USHRT_MAX; - wm_state->hpll.plane = USHRT_MAX; - wm_state->hpll.fbc = USHRT_MAX; - } -} - -static bool g4x_compute_fbc_en(const struct g4x_wm_state *wm_state, - int level) -{ - if (level < G4X_WM_LEVEL_SR) - return false; - - if (level >= G4X_WM_LEVEL_SR && - wm_state->sr.fbc > g4x_fbc_fifo_size(G4X_WM_LEVEL_SR)) - return false; - - if (level >= G4X_WM_LEVEL_HPLL && - wm_state->hpll.fbc > g4x_fbc_fifo_size(G4X_WM_LEVEL_HPLL)) - return false; - - return true; -} - -static int _g4x_compute_pipe_wm(struct intel_crtc_state *crtc_state) -{ - struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); - struct g4x_wm_state *wm_state = &crtc_state->wm.g4x.optimal; - u8 active_planes = crtc_state->active_planes & ~BIT(PLANE_CURSOR); - const struct g4x_pipe_wm *raw; - enum plane_id plane_id; - int level; - - level = G4X_WM_LEVEL_NORMAL; - if (!g4x_raw_crtc_wm_is_valid(crtc_state, level)) - goto out; - - raw = &crtc_state->wm.g4x.raw[level]; - for_each_plane_id_on_crtc(crtc, plane_id) - wm_state->wm.plane[plane_id] = raw->plane[plane_id]; - - level = G4X_WM_LEVEL_SR; - if (!g4x_raw_crtc_wm_is_valid(crtc_state, level)) - goto out; - - raw = &crtc_state->wm.g4x.raw[level]; - wm_state->sr.plane = raw->plane[PLANE_PRIMARY]; - wm_state->sr.cursor = raw->plane[PLANE_CURSOR]; - wm_state->sr.fbc = raw->fbc; - - wm_state->cxsr = active_planes == BIT(PLANE_PRIMARY); - - level = G4X_WM_LEVEL_HPLL; - if (!g4x_raw_crtc_wm_is_valid(crtc_state, level)) - goto out; - - raw = &crtc_state->wm.g4x.raw[level]; - wm_state->hpll.plane = raw->plane[PLANE_PRIMARY]; - wm_state->hpll.cursor = raw->plane[PLANE_CURSOR]; - wm_state->hpll.fbc = raw->fbc; - - wm_state->hpll_en = wm_state->cxsr; - - level++; - - out: - if (level == G4X_WM_LEVEL_NORMAL) - return -EINVAL; - - /* invalidate the higher levels */ - g4x_invalidate_wms(crtc, wm_state, level); - - /* - * Determine if the FBC watermark(s) can be used. IF - * this isn't the case we prefer to disable the FBC - * watermark(s) rather than disable the SR/HPLL - * level(s) entirely. 'level-1' is the highest valid - * level here. - */ - wm_state->fbc_en = g4x_compute_fbc_en(wm_state, level - 1); - - return 0; -} - -static int g4x_compute_pipe_wm(struct intel_atomic_state *state, - struct intel_crtc *crtc) -{ - struct intel_crtc_state *crtc_state = - intel_atomic_get_new_crtc_state(state, crtc); - const struct intel_plane_state *old_plane_state; - const struct intel_plane_state *new_plane_state; - struct intel_plane *plane; - unsigned int dirty = 0; - int i; - - for_each_oldnew_intel_plane_in_state(state, plane, - old_plane_state, - new_plane_state, i) { - if (new_plane_state->hw.crtc != &crtc->base && - old_plane_state->hw.crtc != &crtc->base) - continue; - - if (g4x_raw_plane_wm_compute(crtc_state, new_plane_state)) - dirty |= BIT(plane->id); - } - - if (!dirty) - return 0; - - return _g4x_compute_pipe_wm(crtc_state); -} - -static int g4x_compute_intermediate_wm(struct intel_atomic_state *state, - struct intel_crtc *crtc) -{ - struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); - struct intel_crtc_state *new_crtc_state = - intel_atomic_get_new_crtc_state(state, crtc); - const struct intel_crtc_state *old_crtc_state = - intel_atomic_get_old_crtc_state(state, crtc); - struct g4x_wm_state *intermediate = &new_crtc_state->wm.g4x.intermediate; - const struct g4x_wm_state *optimal = &new_crtc_state->wm.g4x.optimal; - const struct g4x_wm_state *active = &old_crtc_state->wm.g4x.optimal; - enum plane_id plane_id; - - if (!new_crtc_state->hw.active || - intel_crtc_needs_modeset(new_crtc_state)) { - *intermediate = *optimal; - - intermediate->cxsr = false; - intermediate->hpll_en = false; - goto out; - } - - intermediate->cxsr = optimal->cxsr && active->cxsr && - !new_crtc_state->disable_cxsr; - intermediate->hpll_en = optimal->hpll_en && active->hpll_en && - !new_crtc_state->disable_cxsr; - intermediate->fbc_en = optimal->fbc_en && active->fbc_en; - - for_each_plane_id_on_crtc(crtc, plane_id) { - intermediate->wm.plane[plane_id] = - max(optimal->wm.plane[plane_id], - active->wm.plane[plane_id]); - - drm_WARN_ON(&dev_priv->drm, intermediate->wm.plane[plane_id] > - g4x_plane_fifo_size(plane_id, G4X_WM_LEVEL_NORMAL)); - } - - intermediate->sr.plane = max(optimal->sr.plane, - active->sr.plane); - intermediate->sr.cursor = max(optimal->sr.cursor, - active->sr.cursor); - intermediate->sr.fbc = max(optimal->sr.fbc, - active->sr.fbc); - - intermediate->hpll.plane = max(optimal->hpll.plane, - active->hpll.plane); - intermediate->hpll.cursor = max(optimal->hpll.cursor, - active->hpll.cursor); - intermediate->hpll.fbc = max(optimal->hpll.fbc, - active->hpll.fbc); - - drm_WARN_ON(&dev_priv->drm, - (intermediate->sr.plane > - g4x_plane_fifo_size(PLANE_PRIMARY, G4X_WM_LEVEL_SR) || - intermediate->sr.cursor > - g4x_plane_fifo_size(PLANE_CURSOR, G4X_WM_LEVEL_SR)) && - intermediate->cxsr); - drm_WARN_ON(&dev_priv->drm, - (intermediate->sr.plane > - g4x_plane_fifo_size(PLANE_PRIMARY, G4X_WM_LEVEL_HPLL) || - intermediate->sr.cursor > - g4x_plane_fifo_size(PLANE_CURSOR, G4X_WM_LEVEL_HPLL)) && - intermediate->hpll_en); - - drm_WARN_ON(&dev_priv->drm, - intermediate->sr.fbc > g4x_fbc_fifo_size(1) && - intermediate->fbc_en && intermediate->cxsr); - drm_WARN_ON(&dev_priv->drm, - intermediate->hpll.fbc > g4x_fbc_fifo_size(2) && - intermediate->fbc_en && intermediate->hpll_en); - -out: - /* - * If our intermediate WM are identical to the final WM, then we can - * omit the post-vblank programming; only update if it's different. - */ - if (memcmp(intermediate, optimal, sizeof(*intermediate)) != 0) - new_crtc_state->wm.need_postvbl_update = true; - - return 0; -} - -static void g4x_merge_wm(struct drm_i915_private *dev_priv, - struct g4x_wm_values *wm) -{ - struct intel_crtc *crtc; - int num_active_pipes = 0; - - wm->cxsr = true; - wm->hpll_en = true; - wm->fbc_en = true; - - for_each_intel_crtc(&dev_priv->drm, crtc) { - const struct g4x_wm_state *wm_state = &crtc->wm.active.g4x; - - if (!crtc->active) - continue; - - if (!wm_state->cxsr) - wm->cxsr = false; - if (!wm_state->hpll_en) - wm->hpll_en = false; - if (!wm_state->fbc_en) - wm->fbc_en = false; - - num_active_pipes++; - } - - if (num_active_pipes != 1) { - wm->cxsr = false; - wm->hpll_en = false; - wm->fbc_en = false; - } - - for_each_intel_crtc(&dev_priv->drm, crtc) { - const struct g4x_wm_state *wm_state = &crtc->wm.active.g4x; - enum pipe pipe = crtc->pipe; - - wm->pipe[pipe] = wm_state->wm; - if (crtc->active && wm->cxsr) - wm->sr = wm_state->sr; - if (crtc->active && wm->hpll_en) - wm->hpll = wm_state->hpll; - } -} - -static void g4x_program_watermarks(struct drm_i915_private *dev_priv) -{ - struct g4x_wm_values *old_wm = &dev_priv->display.wm.g4x; - struct g4x_wm_values new_wm = {}; - - g4x_merge_wm(dev_priv, &new_wm); - - if (memcmp(old_wm, &new_wm, sizeof(new_wm)) == 0) - return; - - if (is_disabling(old_wm->cxsr, new_wm.cxsr, true)) - _intel_set_memory_cxsr(dev_priv, false); - - g4x_write_wm_values(dev_priv, &new_wm); - - if (is_enabling(old_wm->cxsr, new_wm.cxsr, true)) - _intel_set_memory_cxsr(dev_priv, true); - - *old_wm = new_wm; -} - -static void g4x_initial_watermarks(struct intel_atomic_state *state, - struct intel_crtc *crtc) -{ - struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); - const struct intel_crtc_state *crtc_state = - intel_atomic_get_new_crtc_state(state, crtc); - - mutex_lock(&dev_priv->display.wm.wm_mutex); - crtc->wm.active.g4x = crtc_state->wm.g4x.intermediate; - g4x_program_watermarks(dev_priv); - mutex_unlock(&dev_priv->display.wm.wm_mutex); -} - -static void g4x_optimize_watermarks(struct intel_atomic_state *state, - struct intel_crtc *crtc) -{ - struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); - const struct intel_crtc_state *crtc_state = - intel_atomic_get_new_crtc_state(state, crtc); - - if (!crtc_state->wm.need_postvbl_update) - return; - - mutex_lock(&dev_priv->display.wm.wm_mutex); - crtc->wm.active.g4x = crtc_state->wm.g4x.optimal; - g4x_program_watermarks(dev_priv); - mutex_unlock(&dev_priv->display.wm.wm_mutex); -} - -/* latency must be in 0.1us units. */ -static unsigned int vlv_wm_method2(unsigned int pixel_rate, - unsigned int htotal, - unsigned int width, - unsigned int cpp, - unsigned int latency) -{ - unsigned int ret; - - ret = intel_wm_method2(pixel_rate, htotal, - width, cpp, latency); - ret = DIV_ROUND_UP(ret, 64); - - return ret; -} - -static void vlv_setup_wm_latency(struct drm_i915_private *dev_priv) -{ - /* all latencies in usec */ - dev_priv->display.wm.pri_latency[VLV_WM_LEVEL_PM2] = 3; - - dev_priv->display.wm.max_level = VLV_WM_LEVEL_PM2; - - if (IS_CHERRYVIEW(dev_priv)) { - dev_priv->display.wm.pri_latency[VLV_WM_LEVEL_PM5] = 12; - dev_priv->display.wm.pri_latency[VLV_WM_LEVEL_DDR_DVFS] = 33; - - dev_priv->display.wm.max_level = VLV_WM_LEVEL_DDR_DVFS; - } -} - -static u16 vlv_compute_wm_level(const struct intel_crtc_state *crtc_state, - const struct intel_plane_state *plane_state, - int level) -{ - struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane); - struct drm_i915_private *dev_priv = to_i915(plane->base.dev); - const struct drm_display_mode *pipe_mode = - &crtc_state->hw.pipe_mode; - unsigned int pixel_rate, htotal, cpp, width, wm; - - if (dev_priv->display.wm.pri_latency[level] == 0) - return USHRT_MAX; - - if (!intel_wm_plane_visible(crtc_state, plane_state)) - return 0; - - cpp = plane_state->hw.fb->format->cpp[0]; - pixel_rate = crtc_state->pixel_rate; - htotal = pipe_mode->crtc_htotal; - width = drm_rect_width(&plane_state->uapi.src) >> 16; - - if (plane->id == PLANE_CURSOR) { - /* - * FIXME the formula gives values that are - * too big for the cursor FIFO, and hence we - * would never be able to use cursors. For - * now just hardcode the watermark. - */ - wm = 63; - } else { - wm = vlv_wm_method2(pixel_rate, htotal, width, cpp, - dev_priv->display.wm.pri_latency[level] * 10); - } - - return min_t(unsigned int, wm, USHRT_MAX); -} - -static bool vlv_need_sprite0_fifo_workaround(unsigned int active_planes) -{ - return (active_planes & (BIT(PLANE_SPRITE0) | - BIT(PLANE_SPRITE1))) == BIT(PLANE_SPRITE1); -} - -static int vlv_compute_fifo(struct intel_crtc_state *crtc_state) -{ - struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); - struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); - const struct g4x_pipe_wm *raw = - &crtc_state->wm.vlv.raw[VLV_WM_LEVEL_PM2]; - struct vlv_fifo_state *fifo_state = &crtc_state->wm.vlv.fifo_state; - u8 active_planes = crtc_state->active_planes & ~BIT(PLANE_CURSOR); - int num_active_planes = hweight8(active_planes); - const int fifo_size = 511; - int fifo_extra, fifo_left = fifo_size; - int sprite0_fifo_extra = 0; - unsigned int total_rate; - enum plane_id plane_id; - - /* - * When enabling sprite0 after sprite1 has already been enabled - * we tend to get an underrun unless sprite0 already has some - * FIFO space allcoated. Hence we always allocate at least one - * cacheline for sprite0 whenever sprite1 is enabled. - * - * All other plane enable sequences appear immune to this problem. - */ - if (vlv_need_sprite0_fifo_workaround(active_planes)) - sprite0_fifo_extra = 1; - - total_rate = raw->plane[PLANE_PRIMARY] + - raw->plane[PLANE_SPRITE0] + - raw->plane[PLANE_SPRITE1] + - sprite0_fifo_extra; - - if (total_rate > fifo_size) - return -EINVAL; - - if (total_rate == 0) - total_rate = 1; - - for_each_plane_id_on_crtc(crtc, plane_id) { - unsigned int rate; - - if ((active_planes & BIT(plane_id)) == 0) { - fifo_state->plane[plane_id] = 0; - continue; - } - - rate = raw->plane[plane_id]; - fifo_state->plane[plane_id] = fifo_size * rate / total_rate; - fifo_left -= fifo_state->plane[plane_id]; - } - - fifo_state->plane[PLANE_SPRITE0] += sprite0_fifo_extra; - fifo_left -= sprite0_fifo_extra; - - fifo_state->plane[PLANE_CURSOR] = 63; - - fifo_extra = DIV_ROUND_UP(fifo_left, num_active_planes ?: 1); - - /* spread the remainder evenly */ - for_each_plane_id_on_crtc(crtc, plane_id) { - int plane_extra; - - if (fifo_left == 0) - break; - - if ((active_planes & BIT(plane_id)) == 0) - continue; - - plane_extra = min(fifo_extra, fifo_left); - fifo_state->plane[plane_id] += plane_extra; - fifo_left -= plane_extra; - } - - drm_WARN_ON(&dev_priv->drm, active_planes != 0 && fifo_left != 0); - - /* give it all to the first plane if none are active */ - if (active_planes == 0) { - drm_WARN_ON(&dev_priv->drm, fifo_left != fifo_size); - fifo_state->plane[PLANE_PRIMARY] = fifo_left; - } - - return 0; -} - -/* mark all levels starting from 'level' as invalid */ -static void vlv_invalidate_wms(struct intel_crtc *crtc, - struct vlv_wm_state *wm_state, int level) -{ - struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); - - for (; level < intel_wm_num_levels(dev_priv); level++) { - enum plane_id plane_id; - - for_each_plane_id_on_crtc(crtc, plane_id) - wm_state->wm[level].plane[plane_id] = USHRT_MAX; - - wm_state->sr[level].cursor = USHRT_MAX; - wm_state->sr[level].plane = USHRT_MAX; - } -} - -static u16 vlv_invert_wm_value(u16 wm, u16 fifo_size) -{ - if (wm > fifo_size) - return USHRT_MAX; - else - return fifo_size - wm; -} - -/* - * Starting from 'level' set all higher - * levels to 'value' in the "raw" watermarks. - */ -static bool vlv_raw_plane_wm_set(struct intel_crtc_state *crtc_state, - int level, enum plane_id plane_id, u16 value) -{ - struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev); - int num_levels = intel_wm_num_levels(dev_priv); - bool dirty = false; - - for (; level < num_levels; level++) { - struct g4x_pipe_wm *raw = &crtc_state->wm.vlv.raw[level]; - - dirty |= raw->plane[plane_id] != value; - raw->plane[plane_id] = value; - } - - return dirty; -} - -static bool vlv_raw_plane_wm_compute(struct intel_crtc_state *crtc_state, - const struct intel_plane_state *plane_state) -{ - struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane); - struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev); - enum plane_id plane_id = plane->id; - int num_levels = intel_wm_num_levels(to_i915(plane->base.dev)); - int level; - bool dirty = false; - - if (!intel_wm_plane_visible(crtc_state, plane_state)) { - dirty |= vlv_raw_plane_wm_set(crtc_state, 0, plane_id, 0); - goto out; - } - - for (level = 0; level < num_levels; level++) { - struct g4x_pipe_wm *raw = &crtc_state->wm.vlv.raw[level]; - int wm = vlv_compute_wm_level(crtc_state, plane_state, level); - int max_wm = plane_id == PLANE_CURSOR ? 63 : 511; - - if (wm > max_wm) - break; - - dirty |= raw->plane[plane_id] != wm; - raw->plane[plane_id] = wm; - } - - /* mark all higher levels as invalid */ - dirty |= vlv_raw_plane_wm_set(crtc_state, level, plane_id, USHRT_MAX); - -out: - if (dirty) - drm_dbg_kms(&dev_priv->drm, - "%s watermarks: PM2=%d, PM5=%d, DDR DVFS=%d\n", - plane->base.name, - crtc_state->wm.vlv.raw[VLV_WM_LEVEL_PM2].plane[plane_id], - crtc_state->wm.vlv.raw[VLV_WM_LEVEL_PM5].plane[plane_id], - crtc_state->wm.vlv.raw[VLV_WM_LEVEL_DDR_DVFS].plane[plane_id]); - - return dirty; -} - -static bool vlv_raw_plane_wm_is_valid(const struct intel_crtc_state *crtc_state, - enum plane_id plane_id, int level) -{ - const struct g4x_pipe_wm *raw = - &crtc_state->wm.vlv.raw[level]; - const struct vlv_fifo_state *fifo_state = - &crtc_state->wm.vlv.fifo_state; - - return raw->plane[plane_id] <= fifo_state->plane[plane_id]; -} - -static bool vlv_raw_crtc_wm_is_valid(const struct intel_crtc_state *crtc_state, int level) -{ - return vlv_raw_plane_wm_is_valid(crtc_state, PLANE_PRIMARY, level) && - vlv_raw_plane_wm_is_valid(crtc_state, PLANE_SPRITE0, level) && - vlv_raw_plane_wm_is_valid(crtc_state, PLANE_SPRITE1, level) && - vlv_raw_plane_wm_is_valid(crtc_state, PLANE_CURSOR, level); -} - -static int _vlv_compute_pipe_wm(struct intel_crtc_state *crtc_state) -{ - struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); - struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); - struct vlv_wm_state *wm_state = &crtc_state->wm.vlv.optimal; - const struct vlv_fifo_state *fifo_state = - &crtc_state->wm.vlv.fifo_state; - u8 active_planes = crtc_state->active_planes & ~BIT(PLANE_CURSOR); - int num_active_planes = hweight8(active_planes); - enum plane_id plane_id; - int level; - - /* initially allow all levels */ - wm_state->num_levels = intel_wm_num_levels(dev_priv); - /* - * Note that enabling cxsr with no primary/sprite planes - * enabled can wedge the pipe. Hence we only allow cxsr - * with exactly one enabled primary/sprite plane. - */ - wm_state->cxsr = crtc->pipe != PIPE_C && num_active_planes == 1; - - for (level = 0; level < wm_state->num_levels; level++) { - const struct g4x_pipe_wm *raw = &crtc_state->wm.vlv.raw[level]; - const int sr_fifo_size = INTEL_NUM_PIPES(dev_priv) * 512 - 1; - - if (!vlv_raw_crtc_wm_is_valid(crtc_state, level)) - break; - - for_each_plane_id_on_crtc(crtc, plane_id) { - wm_state->wm[level].plane[plane_id] = - vlv_invert_wm_value(raw->plane[plane_id], - fifo_state->plane[plane_id]); - } - - wm_state->sr[level].plane = - vlv_invert_wm_value(max3(raw->plane[PLANE_PRIMARY], - raw->plane[PLANE_SPRITE0], - raw->plane[PLANE_SPRITE1]), - sr_fifo_size); - - wm_state->sr[level].cursor = - vlv_invert_wm_value(raw->plane[PLANE_CURSOR], - 63); - } - - if (level == 0) - return -EINVAL; - - /* limit to only levels we can actually handle */ - wm_state->num_levels = level; - - /* invalidate the higher levels */ - vlv_invalidate_wms(crtc, wm_state, level); - - return 0; -} - -static int vlv_compute_pipe_wm(struct intel_atomic_state *state, - struct intel_crtc *crtc) -{ - struct intel_crtc_state *crtc_state = - intel_atomic_get_new_crtc_state(state, crtc); - const struct intel_plane_state *old_plane_state; - const struct intel_plane_state *new_plane_state; - struct intel_plane *plane; - unsigned int dirty = 0; - int i; - - for_each_oldnew_intel_plane_in_state(state, plane, - old_plane_state, - new_plane_state, i) { - if (new_plane_state->hw.crtc != &crtc->base && - old_plane_state->hw.crtc != &crtc->base) - continue; - - if (vlv_raw_plane_wm_compute(crtc_state, new_plane_state)) - dirty |= BIT(plane->id); - } - - /* - * DSPARB registers may have been reset due to the - * power well being turned off. Make sure we restore - * them to a consistent state even if no primary/sprite - * planes are initially active. We also force a FIFO - * recomputation so that we are sure to sanitize the - * FIFO setting we took over from the BIOS even if there - * are no active planes on the crtc. - */ - if (intel_crtc_needs_modeset(crtc_state)) - dirty = ~0; - - if (!dirty) - return 0; - - /* cursor changes don't warrant a FIFO recompute */ - if (dirty & ~BIT(PLANE_CURSOR)) { - const struct intel_crtc_state *old_crtc_state = - intel_atomic_get_old_crtc_state(state, crtc); - const struct vlv_fifo_state *old_fifo_state = - &old_crtc_state->wm.vlv.fifo_state; - const struct vlv_fifo_state *new_fifo_state = - &crtc_state->wm.vlv.fifo_state; - int ret; - - ret = vlv_compute_fifo(crtc_state); - if (ret) - return ret; - - if (intel_crtc_needs_modeset(crtc_state) || - memcmp(old_fifo_state, new_fifo_state, - sizeof(*new_fifo_state)) != 0) - crtc_state->fifo_changed = true; - } - - return _vlv_compute_pipe_wm(crtc_state); -} - -#define VLV_FIFO(plane, value) \ - (((value) << DSPARB_ ## plane ## _SHIFT_VLV) & DSPARB_ ## plane ## _MASK_VLV) - -static void vlv_atomic_update_fifo(struct intel_atomic_state *state, - struct intel_crtc *crtc) -{ - struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); - struct intel_uncore *uncore = &dev_priv->uncore; - const struct intel_crtc_state *crtc_state = - intel_atomic_get_new_crtc_state(state, crtc); - const struct vlv_fifo_state *fifo_state = - &crtc_state->wm.vlv.fifo_state; - int sprite0_start, sprite1_start, fifo_size; - u32 dsparb, dsparb2, dsparb3; - - if (!crtc_state->fifo_changed) - return; - - sprite0_start = fifo_state->plane[PLANE_PRIMARY]; - sprite1_start = fifo_state->plane[PLANE_SPRITE0] + sprite0_start; - fifo_size = fifo_state->plane[PLANE_SPRITE1] + sprite1_start; - - drm_WARN_ON(&dev_priv->drm, fifo_state->plane[PLANE_CURSOR] != 63); - drm_WARN_ON(&dev_priv->drm, fifo_size != 511); - - trace_vlv_fifo_size(crtc, sprite0_start, sprite1_start, fifo_size); - - /* - * uncore.lock serves a double purpose here. It allows us to - * use the less expensive I915_{READ,WRITE}_FW() functions, and - * it protects the DSPARB registers from getting clobbered by - * parallel updates from multiple pipes. - * - * intel_pipe_update_start() has already disabled interrupts - * for us, so a plain spin_lock() is sufficient here. - */ - spin_lock(&uncore->lock); - - switch (crtc->pipe) { - case PIPE_A: - dsparb = intel_uncore_read_fw(uncore, DSPARB); - dsparb2 = intel_uncore_read_fw(uncore, DSPARB2); - - dsparb &= ~(VLV_FIFO(SPRITEA, 0xff) | - VLV_FIFO(SPRITEB, 0xff)); - dsparb |= (VLV_FIFO(SPRITEA, sprite0_start) | - VLV_FIFO(SPRITEB, sprite1_start)); - - dsparb2 &= ~(VLV_FIFO(SPRITEA_HI, 0x1) | - VLV_FIFO(SPRITEB_HI, 0x1)); - dsparb2 |= (VLV_FIFO(SPRITEA_HI, sprite0_start >> 8) | - VLV_FIFO(SPRITEB_HI, sprite1_start >> 8)); - - intel_uncore_write_fw(uncore, DSPARB, dsparb); - intel_uncore_write_fw(uncore, DSPARB2, dsparb2); - break; - case PIPE_B: - dsparb = intel_uncore_read_fw(uncore, DSPARB); - dsparb2 = intel_uncore_read_fw(uncore, DSPARB2); - - dsparb &= ~(VLV_FIFO(SPRITEC, 0xff) | - VLV_FIFO(SPRITED, 0xff)); - dsparb |= (VLV_FIFO(SPRITEC, sprite0_start) | - VLV_FIFO(SPRITED, sprite1_start)); - - dsparb2 &= ~(VLV_FIFO(SPRITEC_HI, 0xff) | - VLV_FIFO(SPRITED_HI, 0xff)); - dsparb2 |= (VLV_FIFO(SPRITEC_HI, sprite0_start >> 8) | - VLV_FIFO(SPRITED_HI, sprite1_start >> 8)); - - intel_uncore_write_fw(uncore, DSPARB, dsparb); - intel_uncore_write_fw(uncore, DSPARB2, dsparb2); - break; - case PIPE_C: - dsparb3 = intel_uncore_read_fw(uncore, DSPARB3); - dsparb2 = intel_uncore_read_fw(uncore, DSPARB2); - - dsparb3 &= ~(VLV_FIFO(SPRITEE, 0xff) | - VLV_FIFO(SPRITEF, 0xff)); - dsparb3 |= (VLV_FIFO(SPRITEE, sprite0_start) | - VLV_FIFO(SPRITEF, sprite1_start)); - - dsparb2 &= ~(VLV_FIFO(SPRITEE_HI, 0xff) | - VLV_FIFO(SPRITEF_HI, 0xff)); - dsparb2 |= (VLV_FIFO(SPRITEE_HI, sprite0_start >> 8) | - VLV_FIFO(SPRITEF_HI, sprite1_start >> 8)); - - intel_uncore_write_fw(uncore, DSPARB3, dsparb3); - intel_uncore_write_fw(uncore, DSPARB2, dsparb2); - break; - default: - break; - } - - intel_uncore_posting_read_fw(uncore, DSPARB); - - spin_unlock(&uncore->lock); -} - -#undef VLV_FIFO - -static int vlv_compute_intermediate_wm(struct intel_atomic_state *state, - struct intel_crtc *crtc) -{ - struct intel_crtc_state *new_crtc_state = - intel_atomic_get_new_crtc_state(state, crtc); - const struct intel_crtc_state *old_crtc_state = - intel_atomic_get_old_crtc_state(state, crtc); - struct vlv_wm_state *intermediate = &new_crtc_state->wm.vlv.intermediate; - const struct vlv_wm_state *optimal = &new_crtc_state->wm.vlv.optimal; - const struct vlv_wm_state *active = &old_crtc_state->wm.vlv.optimal; - int level; - - if (!new_crtc_state->hw.active || - intel_crtc_needs_modeset(new_crtc_state)) { - *intermediate = *optimal; - - intermediate->cxsr = false; - goto out; - } - - intermediate->num_levels = min(optimal->num_levels, active->num_levels); - intermediate->cxsr = optimal->cxsr && active->cxsr && - !new_crtc_state->disable_cxsr; - - for (level = 0; level < intermediate->num_levels; level++) { - enum plane_id plane_id; - - for_each_plane_id_on_crtc(crtc, plane_id) { - intermediate->wm[level].plane[plane_id] = - min(optimal->wm[level].plane[plane_id], - active->wm[level].plane[plane_id]); - } - - intermediate->sr[level].plane = min(optimal->sr[level].plane, - active->sr[level].plane); - intermediate->sr[level].cursor = min(optimal->sr[level].cursor, - active->sr[level].cursor); - } - - vlv_invalidate_wms(crtc, intermediate, level); - -out: - /* - * If our intermediate WM are identical to the final WM, then we can - * omit the post-vblank programming; only update if it's different. - */ - if (memcmp(intermediate, optimal, sizeof(*intermediate)) != 0) - new_crtc_state->wm.need_postvbl_update = true; - - return 0; -} - -static void vlv_merge_wm(struct drm_i915_private *dev_priv, - struct vlv_wm_values *wm) -{ - struct intel_crtc *crtc; - int num_active_pipes = 0; - - wm->level = dev_priv->display.wm.max_level; - wm->cxsr = true; - - for_each_intel_crtc(&dev_priv->drm, crtc) { - const struct vlv_wm_state *wm_state = &crtc->wm.active.vlv; - - if (!crtc->active) - continue; - - if (!wm_state->cxsr) - wm->cxsr = false; - - num_active_pipes++; - wm->level = min_t(int, wm->level, wm_state->num_levels - 1); - } - - if (num_active_pipes != 1) - wm->cxsr = false; - - if (num_active_pipes > 1) - wm->level = VLV_WM_LEVEL_PM2; - - for_each_intel_crtc(&dev_priv->drm, crtc) { - const struct vlv_wm_state *wm_state = &crtc->wm.active.vlv; - enum pipe pipe = crtc->pipe; - - wm->pipe[pipe] = wm_state->wm[wm->level]; - if (crtc->active && wm->cxsr) - wm->sr = wm_state->sr[wm->level]; - - wm->ddl[pipe].plane[PLANE_PRIMARY] = DDL_PRECISION_HIGH | 2; - wm->ddl[pipe].plane[PLANE_SPRITE0] = DDL_PRECISION_HIGH | 2; - wm->ddl[pipe].plane[PLANE_SPRITE1] = DDL_PRECISION_HIGH | 2; - wm->ddl[pipe].plane[PLANE_CURSOR] = DDL_PRECISION_HIGH | 2; - } -} - -static void vlv_program_watermarks(struct drm_i915_private *dev_priv) -{ - struct vlv_wm_values *old_wm = &dev_priv->display.wm.vlv; - struct vlv_wm_values new_wm = {}; - - vlv_merge_wm(dev_priv, &new_wm); - - if (memcmp(old_wm, &new_wm, sizeof(new_wm)) == 0) - return; - - if (is_disabling(old_wm->level, new_wm.level, VLV_WM_LEVEL_DDR_DVFS)) - chv_set_memory_dvfs(dev_priv, false); - - if (is_disabling(old_wm->level, new_wm.level, VLV_WM_LEVEL_PM5)) - chv_set_memory_pm5(dev_priv, false); - - if (is_disabling(old_wm->cxsr, new_wm.cxsr, true)) - _intel_set_memory_cxsr(dev_priv, false); - - vlv_write_wm_values(dev_priv, &new_wm); - - if (is_enabling(old_wm->cxsr, new_wm.cxsr, true)) - _intel_set_memory_cxsr(dev_priv, true); - - if (is_enabling(old_wm->level, new_wm.level, VLV_WM_LEVEL_PM5)) - chv_set_memory_pm5(dev_priv, true); - - if (is_enabling(old_wm->level, new_wm.level, VLV_WM_LEVEL_DDR_DVFS)) - chv_set_memory_dvfs(dev_priv, true); - - *old_wm = new_wm; -} - -static void vlv_initial_watermarks(struct intel_atomic_state *state, - struct intel_crtc *crtc) -{ - struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); - const struct intel_crtc_state *crtc_state = - intel_atomic_get_new_crtc_state(state, crtc); - - mutex_lock(&dev_priv->display.wm.wm_mutex); - crtc->wm.active.vlv = crtc_state->wm.vlv.intermediate; - vlv_program_watermarks(dev_priv); - mutex_unlock(&dev_priv->display.wm.wm_mutex); -} - -static void vlv_optimize_watermarks(struct intel_atomic_state *state, - struct intel_crtc *crtc) -{ - struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); - const struct intel_crtc_state *crtc_state = - intel_atomic_get_new_crtc_state(state, crtc); - - if (!crtc_state->wm.need_postvbl_update) - return; - - mutex_lock(&dev_priv->display.wm.wm_mutex); - crtc->wm.active.vlv = crtc_state->wm.vlv.optimal; - vlv_program_watermarks(dev_priv); - mutex_unlock(&dev_priv->display.wm.wm_mutex); -} - -static void i965_update_wm(struct drm_i915_private *dev_priv) -{ - struct intel_crtc *crtc; - int srwm = 1; - int cursor_sr = 16; - bool cxsr_enabled; - - /* Calc sr entries for one plane configs */ - crtc = single_enabled_crtc(dev_priv); - if (crtc) { - /* self-refresh has much higher latency */ - static const int sr_latency_ns = 12000; - const struct drm_display_mode *pipe_mode = - &crtc->config->hw.pipe_mode; - const struct drm_framebuffer *fb = - crtc->base.primary->state->fb; - int pixel_rate = crtc->config->pixel_rate; - int htotal = pipe_mode->crtc_htotal; - int width = drm_rect_width(&crtc->base.primary->state->src) >> 16; - int cpp = fb->format->cpp[0]; - int entries; - - entries = intel_wm_method2(pixel_rate, htotal, - width, cpp, sr_latency_ns / 100); - entries = DIV_ROUND_UP(entries, I915_FIFO_LINE_SIZE); - srwm = I965_FIFO_SIZE - entries; - if (srwm < 0) - srwm = 1; - srwm &= 0x1ff; - drm_dbg_kms(&dev_priv->drm, - "self-refresh entries: %d, wm: %d\n", - entries, srwm); - - entries = intel_wm_method2(pixel_rate, htotal, - crtc->base.cursor->state->crtc_w, 4, - sr_latency_ns / 100); - entries = DIV_ROUND_UP(entries, - i965_cursor_wm_info.cacheline_size) + - i965_cursor_wm_info.guard_size; - - cursor_sr = i965_cursor_wm_info.fifo_size - entries; - if (cursor_sr > i965_cursor_wm_info.max_wm) - cursor_sr = i965_cursor_wm_info.max_wm; - - drm_dbg_kms(&dev_priv->drm, - "self-refresh watermark: display plane %d " - "cursor %d\n", srwm, cursor_sr); - - cxsr_enabled = true; - } else { - cxsr_enabled = false; - /* Turn off self refresh if both pipes are enabled */ - intel_set_memory_cxsr(dev_priv, false); - } - - drm_dbg_kms(&dev_priv->drm, - "Setting FIFO watermarks - A: 8, B: 8, C: 8, SR %d\n", - srwm); - - /* 965 has limitations... */ - intel_uncore_write(&dev_priv->uncore, DSPFW1, FW_WM(srwm, SR) | - FW_WM(8, CURSORB) | - FW_WM(8, PLANEB) | - FW_WM(8, PLANEA)); - intel_uncore_write(&dev_priv->uncore, DSPFW2, FW_WM(8, CURSORA) | - FW_WM(8, PLANEC_OLD)); - /* update cursor SR watermark */ - intel_uncore_write(&dev_priv->uncore, DSPFW3, FW_WM(cursor_sr, CURSOR_SR)); - - if (cxsr_enabled) - intel_set_memory_cxsr(dev_priv, true); -} - -#undef FW_WM - -static struct intel_crtc *intel_crtc_for_plane(struct drm_i915_private *i915, - enum i9xx_plane_id i9xx_plane) -{ - struct intel_plane *plane; - - for_each_intel_plane(&i915->drm, plane) { - if (plane->id == PLANE_PRIMARY && - plane->i9xx_plane == i9xx_plane) - return intel_crtc_for_pipe(i915, plane->pipe); - } - - return NULL; -} - -static void i9xx_update_wm(struct drm_i915_private *dev_priv) -{ - const struct intel_watermark_params *wm_info; - u32 fwater_lo; - u32 fwater_hi; - int cwm, srwm = 1; - int fifo_size; - int planea_wm, planeb_wm; - struct intel_crtc *crtc; - - if (IS_I945GM(dev_priv)) - wm_info = &i945_wm_info; - else if (DISPLAY_VER(dev_priv) != 2) - wm_info = &i915_wm_info; - else - wm_info = &i830_a_wm_info; - - if (DISPLAY_VER(dev_priv) == 2) - fifo_size = i830_get_fifo_size(dev_priv, PLANE_A); - else - fifo_size = i9xx_get_fifo_size(dev_priv, PLANE_A); - crtc = intel_crtc_for_plane(dev_priv, PLANE_A); - if (intel_crtc_active(crtc)) { - const struct drm_framebuffer *fb = - crtc->base.primary->state->fb; - int cpp; - - if (DISPLAY_VER(dev_priv) == 2) - cpp = 4; - else - cpp = fb->format->cpp[0]; - - planea_wm = intel_calculate_wm(crtc->config->pixel_rate, - wm_info, fifo_size, cpp, - pessimal_latency_ns); - } else { - planea_wm = fifo_size - wm_info->guard_size; - if (planea_wm > (long)wm_info->max_wm) - planea_wm = wm_info->max_wm; - } - - if (DISPLAY_VER(dev_priv) == 2) - wm_info = &i830_bc_wm_info; - - if (DISPLAY_VER(dev_priv) == 2) - fifo_size = i830_get_fifo_size(dev_priv, PLANE_B); - else - fifo_size = i9xx_get_fifo_size(dev_priv, PLANE_B); - crtc = intel_crtc_for_plane(dev_priv, PLANE_B); - if (intel_crtc_active(crtc)) { - const struct drm_framebuffer *fb = - crtc->base.primary->state->fb; - int cpp; - - if (DISPLAY_VER(dev_priv) == 2) - cpp = 4; - else - cpp = fb->format->cpp[0]; - - planeb_wm = intel_calculate_wm(crtc->config->pixel_rate, - wm_info, fifo_size, cpp, - pessimal_latency_ns); - } else { - planeb_wm = fifo_size - wm_info->guard_size; - if (planeb_wm > (long)wm_info->max_wm) - planeb_wm = wm_info->max_wm; - } - - drm_dbg_kms(&dev_priv->drm, - "FIFO watermarks - A: %d, B: %d\n", planea_wm, planeb_wm); - - crtc = single_enabled_crtc(dev_priv); - if (IS_I915GM(dev_priv) && crtc) { - struct drm_i915_gem_object *obj; - - obj = intel_fb_obj(crtc->base.primary->state->fb); - - /* self-refresh seems busted with untiled */ - if (!i915_gem_object_is_tiled(obj)) - crtc = NULL; - } - - /* - * Overlay gets an aggressive default since video jitter is bad. - */ - cwm = 2; - - /* Play safe and disable self-refresh before adjusting watermarks. */ - intel_set_memory_cxsr(dev_priv, false); - - /* Calc sr entries for one plane configs */ - if (HAS_FW_BLC(dev_priv) && crtc) { - /* self-refresh has much higher latency */ - static const int sr_latency_ns = 6000; - const struct drm_display_mode *pipe_mode = - &crtc->config->hw.pipe_mode; - const struct drm_framebuffer *fb = - crtc->base.primary->state->fb; - int pixel_rate = crtc->config->pixel_rate; - int htotal = pipe_mode->crtc_htotal; - int width = drm_rect_width(&crtc->base.primary->state->src) >> 16; - int cpp; - int entries; - - if (IS_I915GM(dev_priv) || IS_I945GM(dev_priv)) - cpp = 4; - else - cpp = fb->format->cpp[0]; - - entries = intel_wm_method2(pixel_rate, htotal, width, cpp, - sr_latency_ns / 100); - entries = DIV_ROUND_UP(entries, wm_info->cacheline_size); - drm_dbg_kms(&dev_priv->drm, - "self-refresh entries: %d\n", entries); - srwm = wm_info->fifo_size - entries; - if (srwm < 0) - srwm = 1; - - if (IS_I945G(dev_priv) || IS_I945GM(dev_priv)) - intel_uncore_write(&dev_priv->uncore, FW_BLC_SELF, - FW_BLC_SELF_FIFO_MASK | (srwm & 0xff)); - else - intel_uncore_write(&dev_priv->uncore, FW_BLC_SELF, srwm & 0x3f); - } - - drm_dbg_kms(&dev_priv->drm, - "Setting FIFO watermarks - A: %d, B: %d, C: %d, SR %d\n", - planea_wm, planeb_wm, cwm, srwm); - - fwater_lo = ((planeb_wm & 0x3f) << 16) | (planea_wm & 0x3f); - fwater_hi = (cwm & 0x1f); - - /* Set request length to 8 cachelines per fetch */ - fwater_lo = fwater_lo | (1 << 24) | (1 << 8); - fwater_hi = fwater_hi | (1 << 8); - - intel_uncore_write(&dev_priv->uncore, FW_BLC, fwater_lo); - intel_uncore_write(&dev_priv->uncore, FW_BLC2, fwater_hi); - - if (crtc) - intel_set_memory_cxsr(dev_priv, true); -} - -static void i845_update_wm(struct drm_i915_private *dev_priv) -{ - struct intel_crtc *crtc; - u32 fwater_lo; - int planea_wm; - - crtc = single_enabled_crtc(dev_priv); - if (crtc == NULL) - return; - - planea_wm = intel_calculate_wm(crtc->config->pixel_rate, - &i845_wm_info, - i845_get_fifo_size(dev_priv, PLANE_A), - 4, pessimal_latency_ns); - fwater_lo = intel_uncore_read(&dev_priv->uncore, FW_BLC) & ~0xfff; - fwater_lo |= (3<<8) | planea_wm; - - drm_dbg_kms(&dev_priv->drm, - "Setting FIFO watermarks - A: %d\n", planea_wm); - - intel_uncore_write(&dev_priv->uncore, FW_BLC, fwater_lo); -} - -/* latency must be in 0.1us units. */ -static unsigned int ilk_wm_method1(unsigned int pixel_rate, - unsigned int cpp, - unsigned int latency) -{ - unsigned int ret; - - ret = intel_wm_method1(pixel_rate, cpp, latency); - ret = DIV_ROUND_UP(ret, 64) + 2; - - return ret; -} - -/* latency must be in 0.1us units. */ -static unsigned int ilk_wm_method2(unsigned int pixel_rate, - unsigned int htotal, - unsigned int width, - unsigned int cpp, - unsigned int latency) -{ - unsigned int ret; - - ret = intel_wm_method2(pixel_rate, htotal, - width, cpp, latency); - ret = DIV_ROUND_UP(ret, 64) + 2; - - return ret; -} - -static u32 ilk_wm_fbc(u32 pri_val, u32 horiz_pixels, u8 cpp) -{ - /* - * Neither of these should be possible since this function shouldn't be - * called if the CRTC is off or the plane is invisible. But let's be - * extra paranoid to avoid a potential divide-by-zero if we screw up - * elsewhere in the driver. - */ - if (WARN_ON(!cpp)) - return 0; - if (WARN_ON(!horiz_pixels)) - return 0; - - return DIV_ROUND_UP(pri_val * 64, horiz_pixels * cpp) + 2; -} - -struct ilk_wm_maximums { - u16 pri; - u16 spr; - u16 cur; - u16 fbc; -}; - -/* - * For both WM_PIPE and WM_LP. - * mem_value must be in 0.1us units. - */ -static u32 ilk_compute_pri_wm(const struct intel_crtc_state *crtc_state, - const struct intel_plane_state *plane_state, - u32 mem_value, bool is_lp) -{ - u32 method1, method2; - int cpp; - - if (mem_value == 0) - return U32_MAX; - - if (!intel_wm_plane_visible(crtc_state, plane_state)) - return 0; - - cpp = plane_state->hw.fb->format->cpp[0]; - - method1 = ilk_wm_method1(crtc_state->pixel_rate, cpp, mem_value); - - if (!is_lp) - return method1; - - method2 = ilk_wm_method2(crtc_state->pixel_rate, - crtc_state->hw.pipe_mode.crtc_htotal, - drm_rect_width(&plane_state->uapi.src) >> 16, - cpp, mem_value); - - return min(method1, method2); -} - -/* - * For both WM_PIPE and WM_LP. - * mem_value must be in 0.1us units. - */ -static u32 ilk_compute_spr_wm(const struct intel_crtc_state *crtc_state, - const struct intel_plane_state *plane_state, - u32 mem_value) -{ - u32 method1, method2; - int cpp; - - if (mem_value == 0) - return U32_MAX; - - if (!intel_wm_plane_visible(crtc_state, plane_state)) - return 0; - - cpp = plane_state->hw.fb->format->cpp[0]; - - method1 = ilk_wm_method1(crtc_state->pixel_rate, cpp, mem_value); - method2 = ilk_wm_method2(crtc_state->pixel_rate, - crtc_state->hw.pipe_mode.crtc_htotal, - drm_rect_width(&plane_state->uapi.src) >> 16, - cpp, mem_value); - return min(method1, method2); -} - -/* - * For both WM_PIPE and WM_LP. - * mem_value must be in 0.1us units. - */ -static u32 ilk_compute_cur_wm(const struct intel_crtc_state *crtc_state, - const struct intel_plane_state *plane_state, - u32 mem_value) -{ - int cpp; - - if (mem_value == 0) - return U32_MAX; - - if (!intel_wm_plane_visible(crtc_state, plane_state)) - return 0; - - cpp = plane_state->hw.fb->format->cpp[0]; - - return ilk_wm_method2(crtc_state->pixel_rate, - crtc_state->hw.pipe_mode.crtc_htotal, - drm_rect_width(&plane_state->uapi.src) >> 16, - cpp, mem_value); -} - -/* Only for WM_LP. */ -static u32 ilk_compute_fbc_wm(const struct intel_crtc_state *crtc_state, - const struct intel_plane_state *plane_state, - u32 pri_val) -{ - int cpp; - - if (!intel_wm_plane_visible(crtc_state, plane_state)) - return 0; - - cpp = plane_state->hw.fb->format->cpp[0]; - - return ilk_wm_fbc(pri_val, drm_rect_width(&plane_state->uapi.src) >> 16, - cpp); -} - -static unsigned int -ilk_display_fifo_size(const struct drm_i915_private *dev_priv) -{ - if (DISPLAY_VER(dev_priv) >= 8) - return 3072; - else if (DISPLAY_VER(dev_priv) >= 7) - return 768; - else - return 512; -} - -static unsigned int -ilk_plane_wm_reg_max(const struct drm_i915_private *dev_priv, - int level, bool is_sprite) -{ - if (DISPLAY_VER(dev_priv) >= 8) - /* BDW primary/sprite plane watermarks */ - return level == 0 ? 255 : 2047; - else if (DISPLAY_VER(dev_priv) >= 7) - /* IVB/HSW primary/sprite plane watermarks */ - return level == 0 ? 127 : 1023; - else if (!is_sprite) - /* ILK/SNB primary plane watermarks */ - return level == 0 ? 127 : 511; - else - /* ILK/SNB sprite plane watermarks */ - return level == 0 ? 63 : 255; -} - -static unsigned int -ilk_cursor_wm_reg_max(const struct drm_i915_private *dev_priv, int level) -{ - if (DISPLAY_VER(dev_priv) >= 7) - return level == 0 ? 63 : 255; - else - return level == 0 ? 31 : 63; -} - -static unsigned int ilk_fbc_wm_reg_max(const struct drm_i915_private *dev_priv) -{ - if (DISPLAY_VER(dev_priv) >= 8) - return 31; - else - return 15; -} - -/* Calculate the maximum primary/sprite plane watermark */ -static unsigned int ilk_plane_wm_max(const struct drm_i915_private *dev_priv, - int level, - const struct intel_wm_config *config, - enum intel_ddb_partitioning ddb_partitioning, - bool is_sprite) -{ - unsigned int fifo_size = ilk_display_fifo_size(dev_priv); - - /* if sprites aren't enabled, sprites get nothing */ - if (is_sprite && !config->sprites_enabled) - return 0; - - /* HSW allows LP1+ watermarks even with multiple pipes */ - if (level == 0 || config->num_pipes_active > 1) { - fifo_size /= INTEL_NUM_PIPES(dev_priv); - - /* - * For some reason the non self refresh - * FIFO size is only half of the self - * refresh FIFO size on ILK/SNB. - */ - if (DISPLAY_VER(dev_priv) <= 6) - fifo_size /= 2; - } - - if (config->sprites_enabled) { - /* level 0 is always calculated with 1:1 split */ - if (level > 0 && ddb_partitioning == INTEL_DDB_PART_5_6) { - if (is_sprite) - fifo_size *= 5; - fifo_size /= 6; - } else { - fifo_size /= 2; - } - } - - /* clamp to max that the registers can hold */ - return min(fifo_size, ilk_plane_wm_reg_max(dev_priv, level, is_sprite)); -} - -/* Calculate the maximum cursor plane watermark */ -static unsigned int ilk_cursor_wm_max(const struct drm_i915_private *dev_priv, - int level, - const struct intel_wm_config *config) -{ - /* HSW LP1+ watermarks w/ multiple pipes */ - if (level > 0 && config->num_pipes_active > 1) - return 64; - - /* otherwise just report max that registers can hold */ - return ilk_cursor_wm_reg_max(dev_priv, level); -} - -static void ilk_compute_wm_maximums(const struct drm_i915_private *dev_priv, - int level, - const struct intel_wm_config *config, - enum intel_ddb_partitioning ddb_partitioning, - struct ilk_wm_maximums *max) -{ - max->pri = ilk_plane_wm_max(dev_priv, level, config, ddb_partitioning, false); - max->spr = ilk_plane_wm_max(dev_priv, level, config, ddb_partitioning, true); - max->cur = ilk_cursor_wm_max(dev_priv, level, config); - max->fbc = ilk_fbc_wm_reg_max(dev_priv); -} - -static void ilk_compute_wm_reg_maximums(const struct drm_i915_private *dev_priv, - int level, - struct ilk_wm_maximums *max) -{ - max->pri = ilk_plane_wm_reg_max(dev_priv, level, false); - max->spr = ilk_plane_wm_reg_max(dev_priv, level, true); - max->cur = ilk_cursor_wm_reg_max(dev_priv, level); - max->fbc = ilk_fbc_wm_reg_max(dev_priv); -} - -static bool ilk_validate_wm_level(int level, - const struct ilk_wm_maximums *max, - struct intel_wm_level *result) -{ - bool ret; - - /* already determined to be invalid? */ - if (!result->enable) - return false; - - result->enable = result->pri_val <= max->pri && - result->spr_val <= max->spr && - result->cur_val <= max->cur; - - ret = result->enable; - - /* - * HACK until we can pre-compute everything, - * and thus fail gracefully if LP0 watermarks - * are exceeded... - */ - if (level == 0 && !result->enable) { - if (result->pri_val > max->pri) - DRM_DEBUG_KMS("Primary WM%d too large %u (max %u)\n", - level, result->pri_val, max->pri); - if (result->spr_val > max->spr) - DRM_DEBUG_KMS("Sprite WM%d too large %u (max %u)\n", - level, result->spr_val, max->spr); - if (result->cur_val > max->cur) - DRM_DEBUG_KMS("Cursor WM%d too large %u (max %u)\n", - level, result->cur_val, max->cur); - - result->pri_val = min_t(u32, result->pri_val, max->pri); - result->spr_val = min_t(u32, result->spr_val, max->spr); - result->cur_val = min_t(u32, result->cur_val, max->cur); - result->enable = true; - } - - return ret; -} - -static void ilk_compute_wm_level(const struct drm_i915_private *dev_priv, - const struct intel_crtc *crtc, - int level, - struct intel_crtc_state *crtc_state, - const struct intel_plane_state *pristate, - const struct intel_plane_state *sprstate, - const struct intel_plane_state *curstate, - struct intel_wm_level *result) -{ - u16 pri_latency = dev_priv->display.wm.pri_latency[level]; - u16 spr_latency = dev_priv->display.wm.spr_latency[level]; - u16 cur_latency = dev_priv->display.wm.cur_latency[level]; - - /* WM1+ latency values stored in 0.5us units */ - if (level > 0) { - pri_latency *= 5; - spr_latency *= 5; - cur_latency *= 5; - } - - if (pristate) { - result->pri_val = ilk_compute_pri_wm(crtc_state, pristate, - pri_latency, level); - result->fbc_val = ilk_compute_fbc_wm(crtc_state, pristate, result->pri_val); - } - - if (sprstate) - result->spr_val = ilk_compute_spr_wm(crtc_state, sprstate, spr_latency); - - if (curstate) - result->cur_val = ilk_compute_cur_wm(crtc_state, curstate, cur_latency); - - result->enable = true; -} - -static void hsw_read_wm_latency(struct drm_i915_private *i915, u16 wm[]) -{ - u64 sskpd; - - sskpd = intel_uncore_read64(&i915->uncore, MCH_SSKPD); - - wm[0] = REG_FIELD_GET64(SSKPD_NEW_WM0_MASK_HSW, sskpd); - if (wm[0] == 0) - wm[0] = REG_FIELD_GET64(SSKPD_OLD_WM0_MASK_HSW, sskpd); - wm[1] = REG_FIELD_GET64(SSKPD_WM1_MASK_HSW, sskpd); - wm[2] = REG_FIELD_GET64(SSKPD_WM2_MASK_HSW, sskpd); - wm[3] = REG_FIELD_GET64(SSKPD_WM3_MASK_HSW, sskpd); - wm[4] = REG_FIELD_GET64(SSKPD_WM4_MASK_HSW, sskpd); -} - -static void snb_read_wm_latency(struct drm_i915_private *i915, u16 wm[]) -{ - u32 sskpd; - - sskpd = intel_uncore_read(&i915->uncore, MCH_SSKPD); - - wm[0] = REG_FIELD_GET(SSKPD_WM0_MASK_SNB, sskpd); - wm[1] = REG_FIELD_GET(SSKPD_WM1_MASK_SNB, sskpd); - wm[2] = REG_FIELD_GET(SSKPD_WM2_MASK_SNB, sskpd); - wm[3] = REG_FIELD_GET(SSKPD_WM3_MASK_SNB, sskpd); -} - -static void ilk_read_wm_latency(struct drm_i915_private *i915, u16 wm[]) -{ - u32 mltr; - - mltr = intel_uncore_read(&i915->uncore, MLTR_ILK); - - /* ILK primary LP0 latency is 700 ns */ - wm[0] = 7; - wm[1] = REG_FIELD_GET(MLTR_WM1_MASK, mltr); - wm[2] = REG_FIELD_GET(MLTR_WM2_MASK, mltr); -} - -static void intel_fixup_spr_wm_latency(struct drm_i915_private *dev_priv, - u16 wm[5]) -{ - /* ILK sprite LP0 latency is 1300 ns */ - if (DISPLAY_VER(dev_priv) == 5) - wm[0] = 13; -} - -static void intel_fixup_cur_wm_latency(struct drm_i915_private *dev_priv, - u16 wm[5]) -{ - /* ILK cursor LP0 latency is 1300 ns */ - if (DISPLAY_VER(dev_priv) == 5) - wm[0] = 13; -} - -int ilk_wm_max_level(const struct drm_i915_private *dev_priv) -{ - /* how many WM levels are we expecting */ - if (HAS_HW_SAGV_WM(dev_priv)) - return 5; - else if (DISPLAY_VER(dev_priv) >= 9) - return 7; - else if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) - return 4; - else if (DISPLAY_VER(dev_priv) >= 6) - return 3; - else - return 2; -} - -void intel_print_wm_latency(struct drm_i915_private *dev_priv, - const char *name, const u16 wm[]) -{ - int level, max_level = ilk_wm_max_level(dev_priv); - - for (level = 0; level <= max_level; level++) { - unsigned int latency = wm[level]; - - if (latency == 0) { - drm_dbg_kms(&dev_priv->drm, - "%s WM%d latency not provided\n", - name, level); - continue; - } - - /* - * - latencies are in us on gen9. - * - before then, WM1+ latency values are in 0.5us units - */ - if (DISPLAY_VER(dev_priv) >= 9) - latency *= 10; - else if (level > 0) - latency *= 5; - - drm_dbg_kms(&dev_priv->drm, - "%s WM%d latency %u (%u.%u usec)\n", name, level, - wm[level], latency / 10, latency % 10); - } -} - -static bool ilk_increase_wm_latency(struct drm_i915_private *dev_priv, - u16 wm[5], u16 min) -{ - int level, max_level = ilk_wm_max_level(dev_priv); - - if (wm[0] >= min) - return false; - - wm[0] = max(wm[0], min); - for (level = 1; level <= max_level; level++) - wm[level] = max_t(u16, wm[level], DIV_ROUND_UP(min, 5)); - - return true; -} - -static void snb_wm_latency_quirk(struct drm_i915_private *dev_priv) -{ - bool changed; - - /* - * The BIOS provided WM memory latency values are often - * inadequate for high resolution displays. Adjust them. - */ - changed = ilk_increase_wm_latency(dev_priv, dev_priv->display.wm.pri_latency, 12); - changed |= ilk_increase_wm_latency(dev_priv, dev_priv->display.wm.spr_latency, 12); - changed |= ilk_increase_wm_latency(dev_priv, dev_priv->display.wm.cur_latency, 12); - - if (!changed) - return; - - drm_dbg_kms(&dev_priv->drm, - "WM latency values increased to avoid potential underruns\n"); - intel_print_wm_latency(dev_priv, "Primary", dev_priv->display.wm.pri_latency); - intel_print_wm_latency(dev_priv, "Sprite", dev_priv->display.wm.spr_latency); - intel_print_wm_latency(dev_priv, "Cursor", dev_priv->display.wm.cur_latency); -} - -static void snb_wm_lp3_irq_quirk(struct drm_i915_private *dev_priv) -{ - /* - * On some SNB machines (Thinkpad X220 Tablet at least) - * LP3 usage can cause vblank interrupts to be lost. - * The DEIIR bit will go high but it looks like the CPU - * never gets interrupted. - * - * It's not clear whether other interrupt source could - * be affected or if this is somehow limited to vblank - * interrupts only. To play it safe we disable LP3 - * watermarks entirely. - */ - if (dev_priv->display.wm.pri_latency[3] == 0 && - dev_priv->display.wm.spr_latency[3] == 0 && - dev_priv->display.wm.cur_latency[3] == 0) - return; - - dev_priv->display.wm.pri_latency[3] = 0; - dev_priv->display.wm.spr_latency[3] = 0; - dev_priv->display.wm.cur_latency[3] = 0; - - drm_dbg_kms(&dev_priv->drm, - "LP3 watermarks disabled due to potential for lost interrupts\n"); - intel_print_wm_latency(dev_priv, "Primary", dev_priv->display.wm.pri_latency); - intel_print_wm_latency(dev_priv, "Sprite", dev_priv->display.wm.spr_latency); - intel_print_wm_latency(dev_priv, "Cursor", dev_priv->display.wm.cur_latency); -} - -static void ilk_setup_wm_latency(struct drm_i915_private *dev_priv) -{ - if (IS_BROADWELL(dev_priv) || IS_HASWELL(dev_priv)) - hsw_read_wm_latency(dev_priv, dev_priv->display.wm.pri_latency); - else if (DISPLAY_VER(dev_priv) >= 6) - snb_read_wm_latency(dev_priv, dev_priv->display.wm.pri_latency); - else - ilk_read_wm_latency(dev_priv, dev_priv->display.wm.pri_latency); - - memcpy(dev_priv->display.wm.spr_latency, dev_priv->display.wm.pri_latency, - sizeof(dev_priv->display.wm.pri_latency)); - memcpy(dev_priv->display.wm.cur_latency, dev_priv->display.wm.pri_latency, - sizeof(dev_priv->display.wm.pri_latency)); - - intel_fixup_spr_wm_latency(dev_priv, dev_priv->display.wm.spr_latency); - intel_fixup_cur_wm_latency(dev_priv, dev_priv->display.wm.cur_latency); - - intel_print_wm_latency(dev_priv, "Primary", dev_priv->display.wm.pri_latency); - intel_print_wm_latency(dev_priv, "Sprite", dev_priv->display.wm.spr_latency); - intel_print_wm_latency(dev_priv, "Cursor", dev_priv->display.wm.cur_latency); - - if (DISPLAY_VER(dev_priv) == 6) { - snb_wm_latency_quirk(dev_priv); - snb_wm_lp3_irq_quirk(dev_priv); - } -} - -static bool ilk_validate_pipe_wm(const struct drm_i915_private *dev_priv, - struct intel_pipe_wm *pipe_wm) -{ - /* LP0 watermark maximums depend on this pipe alone */ - const struct intel_wm_config config = { - .num_pipes_active = 1, - .sprites_enabled = pipe_wm->sprites_enabled, - .sprites_scaled = pipe_wm->sprites_scaled, - }; - struct ilk_wm_maximums max; - - /* LP0 watermarks always use 1/2 DDB partitioning */ - ilk_compute_wm_maximums(dev_priv, 0, &config, INTEL_DDB_PART_1_2, &max); - - /* At least LP0 must be valid */ - if (!ilk_validate_wm_level(0, &max, &pipe_wm->wm[0])) { - drm_dbg_kms(&dev_priv->drm, "LP0 watermark invalid\n"); - return false; - } - - return true; -} - -/* Compute new watermarks for the pipe */ -static int ilk_compute_pipe_wm(struct intel_atomic_state *state, - struct intel_crtc *crtc) -{ - struct drm_i915_private *dev_priv = to_i915(state->base.dev); - struct intel_crtc_state *crtc_state = - intel_atomic_get_new_crtc_state(state, crtc); - struct intel_pipe_wm *pipe_wm; - struct intel_plane *plane; - const struct intel_plane_state *plane_state; - const struct intel_plane_state *pristate = NULL; - const struct intel_plane_state *sprstate = NULL; - const struct intel_plane_state *curstate = NULL; - int level, max_level = ilk_wm_max_level(dev_priv), usable_level; - struct ilk_wm_maximums max; - - pipe_wm = &crtc_state->wm.ilk.optimal; - - intel_atomic_crtc_state_for_each_plane_state(plane, plane_state, crtc_state) { - if (plane->base.type == DRM_PLANE_TYPE_PRIMARY) - pristate = plane_state; - else if (plane->base.type == DRM_PLANE_TYPE_OVERLAY) - sprstate = plane_state; - else if (plane->base.type == DRM_PLANE_TYPE_CURSOR) - curstate = plane_state; - } - - pipe_wm->pipe_enabled = crtc_state->hw.active; - pipe_wm->sprites_enabled = crtc_state->active_planes & BIT(PLANE_SPRITE0); - pipe_wm->sprites_scaled = crtc_state->scaled_planes & BIT(PLANE_SPRITE0); - - usable_level = max_level; - - /* ILK/SNB: LP2+ watermarks only w/o sprites */ - if (DISPLAY_VER(dev_priv) <= 6 && pipe_wm->sprites_enabled) - usable_level = 1; - - /* ILK/SNB/IVB: LP1+ watermarks only w/o scaling */ - if (pipe_wm->sprites_scaled) - usable_level = 0; - - memset(&pipe_wm->wm, 0, sizeof(pipe_wm->wm)); - ilk_compute_wm_level(dev_priv, crtc, 0, crtc_state, - pristate, sprstate, curstate, &pipe_wm->wm[0]); - - if (!ilk_validate_pipe_wm(dev_priv, pipe_wm)) - return -EINVAL; - - ilk_compute_wm_reg_maximums(dev_priv, 1, &max); - - for (level = 1; level <= usable_level; level++) { - struct intel_wm_level *wm = &pipe_wm->wm[level]; - - ilk_compute_wm_level(dev_priv, crtc, level, crtc_state, - pristate, sprstate, curstate, wm); - - /* - * Disable any watermark level that exceeds the - * register maximums since such watermarks are - * always invalid. - */ - if (!ilk_validate_wm_level(level, &max, wm)) { - memset(wm, 0, sizeof(*wm)); - break; - } - } - - return 0; -} - -/* - * Build a set of 'intermediate' watermark values that satisfy both the old - * state and the new state. These can be programmed to the hardware - * immediately. - */ -static int ilk_compute_intermediate_wm(struct intel_atomic_state *state, - struct intel_crtc *crtc) -{ - struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); - struct intel_crtc_state *new_crtc_state = - intel_atomic_get_new_crtc_state(state, crtc); - const struct intel_crtc_state *old_crtc_state = - intel_atomic_get_old_crtc_state(state, crtc); - struct intel_pipe_wm *a = &new_crtc_state->wm.ilk.intermediate; - const struct intel_pipe_wm *b = &old_crtc_state->wm.ilk.optimal; - int level, max_level = ilk_wm_max_level(dev_priv); - - /* - * Start with the final, target watermarks, then combine with the - * currently active watermarks to get values that are safe both before - * and after the vblank. - */ - *a = new_crtc_state->wm.ilk.optimal; - if (!new_crtc_state->hw.active || - intel_crtc_needs_modeset(new_crtc_state) || - state->skip_intermediate_wm) - return 0; - - a->pipe_enabled |= b->pipe_enabled; - a->sprites_enabled |= b->sprites_enabled; - a->sprites_scaled |= b->sprites_scaled; - - for (level = 0; level <= max_level; level++) { - struct intel_wm_level *a_wm = &a->wm[level]; - const struct intel_wm_level *b_wm = &b->wm[level]; - - a_wm->enable &= b_wm->enable; - a_wm->pri_val = max(a_wm->pri_val, b_wm->pri_val); - a_wm->spr_val = max(a_wm->spr_val, b_wm->spr_val); - a_wm->cur_val = max(a_wm->cur_val, b_wm->cur_val); - a_wm->fbc_val = max(a_wm->fbc_val, b_wm->fbc_val); - } - - /* - * We need to make sure that these merged watermark values are - * actually a valid configuration themselves. If they're not, - * there's no safe way to transition from the old state to - * the new state, so we need to fail the atomic transaction. - */ - if (!ilk_validate_pipe_wm(dev_priv, a)) - return -EINVAL; - - /* - * If our intermediate WM are identical to the final WM, then we can - * omit the post-vblank programming; only update if it's different. - */ - if (memcmp(a, &new_crtc_state->wm.ilk.optimal, sizeof(*a)) != 0) - new_crtc_state->wm.need_postvbl_update = true; - - return 0; -} - -/* - * Merge the watermarks from all active pipes for a specific level. - */ -static void ilk_merge_wm_level(struct drm_i915_private *dev_priv, - int level, - struct intel_wm_level *ret_wm) -{ - const struct intel_crtc *crtc; - - ret_wm->enable = true; - - for_each_intel_crtc(&dev_priv->drm, crtc) { - const struct intel_pipe_wm *active = &crtc->wm.active.ilk; - const struct intel_wm_level *wm = &active->wm[level]; - - if (!active->pipe_enabled) - continue; - - /* - * The watermark values may have been used in the past, - * so we must maintain them in the registers for some - * time even if the level is now disabled. - */ - if (!wm->enable) - ret_wm->enable = false; - - ret_wm->pri_val = max(ret_wm->pri_val, wm->pri_val); - ret_wm->spr_val = max(ret_wm->spr_val, wm->spr_val); - ret_wm->cur_val = max(ret_wm->cur_val, wm->cur_val); - ret_wm->fbc_val = max(ret_wm->fbc_val, wm->fbc_val); - } -} - -/* - * Merge all low power watermarks for all active pipes. - */ -static void ilk_wm_merge(struct drm_i915_private *dev_priv, - const struct intel_wm_config *config, - const struct ilk_wm_maximums *max, - struct intel_pipe_wm *merged) -{ - int level, max_level = ilk_wm_max_level(dev_priv); - int last_enabled_level = max_level; - - /* ILK/SNB/IVB: LP1+ watermarks only w/ single pipe */ - if ((DISPLAY_VER(dev_priv) <= 6 || IS_IVYBRIDGE(dev_priv)) && - config->num_pipes_active > 1) - last_enabled_level = 0; - - /* ILK: FBC WM must be disabled always */ - merged->fbc_wm_enabled = DISPLAY_VER(dev_priv) >= 6; - - /* merge each WM1+ level */ - for (level = 1; level <= max_level; level++) { - struct intel_wm_level *wm = &merged->wm[level]; - - ilk_merge_wm_level(dev_priv, level, wm); - - if (level > last_enabled_level) - wm->enable = false; - else if (!ilk_validate_wm_level(level, max, wm)) - /* make sure all following levels get disabled */ - last_enabled_level = level - 1; - - /* - * The spec says it is preferred to disable - * FBC WMs instead of disabling a WM level. - */ - if (wm->fbc_val > max->fbc) { - if (wm->enable) - merged->fbc_wm_enabled = false; - wm->fbc_val = 0; - } - } - - /* ILK: LP2+ must be disabled when FBC WM is disabled but FBC enabled */ - if (DISPLAY_VER(dev_priv) == 5 && HAS_FBC(dev_priv) && - dev_priv->params.enable_fbc && !merged->fbc_wm_enabled) { - for (level = 2; level <= max_level; level++) { - struct intel_wm_level *wm = &merged->wm[level]; - - wm->enable = false; - } - } -} - -static int ilk_wm_lp_to_level(int wm_lp, const struct intel_pipe_wm *pipe_wm) -{ - /* LP1,LP2,LP3 levels are either 1,2,3 or 1,3,4 */ - return wm_lp + (wm_lp >= 2 && pipe_wm->wm[4].enable); -} - -/* The value we need to program into the WM_LPx latency field */ -static unsigned int ilk_wm_lp_latency(struct drm_i915_private *dev_priv, - int level) -{ - if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) - return 2 * level; - else - return dev_priv->display.wm.pri_latency[level]; -} - -static void ilk_compute_wm_results(struct drm_i915_private *dev_priv, - const struct intel_pipe_wm *merged, - enum intel_ddb_partitioning partitioning, - struct ilk_wm_values *results) -{ - struct intel_crtc *crtc; - int level, wm_lp; - - results->enable_fbc_wm = merged->fbc_wm_enabled; - results->partitioning = partitioning; - - /* LP1+ register values */ - for (wm_lp = 1; wm_lp <= 3; wm_lp++) { - const struct intel_wm_level *r; - - level = ilk_wm_lp_to_level(wm_lp, merged); - - r = &merged->wm[level]; - - /* - * Maintain the watermark values even if the level is - * disabled. Doing otherwise could cause underruns. - */ - results->wm_lp[wm_lp - 1] = - WM_LP_LATENCY(ilk_wm_lp_latency(dev_priv, level)) | - WM_LP_PRIMARY(r->pri_val) | - WM_LP_CURSOR(r->cur_val); - - if (r->enable) - results->wm_lp[wm_lp - 1] |= WM_LP_ENABLE; - - if (DISPLAY_VER(dev_priv) >= 8) - results->wm_lp[wm_lp - 1] |= WM_LP_FBC_BDW(r->fbc_val); - else - results->wm_lp[wm_lp - 1] |= WM_LP_FBC_ILK(r->fbc_val); - - results->wm_lp_spr[wm_lp - 1] = WM_LP_SPRITE(r->spr_val); - - /* - * Always set WM_LP_SPRITE_EN when spr_val != 0, even if the - * level is disabled. Doing otherwise could cause underruns. - */ - if (DISPLAY_VER(dev_priv) <= 6 && r->spr_val) { - drm_WARN_ON(&dev_priv->drm, wm_lp != 1); - results->wm_lp_spr[wm_lp - 1] |= WM_LP_SPRITE_ENABLE; - } - } - - /* LP0 register values */ - for_each_intel_crtc(&dev_priv->drm, crtc) { - enum pipe pipe = crtc->pipe; - const struct intel_pipe_wm *pipe_wm = &crtc->wm.active.ilk; - const struct intel_wm_level *r = &pipe_wm->wm[0]; - - if (drm_WARN_ON(&dev_priv->drm, !r->enable)) - continue; - - results->wm_pipe[pipe] = - WM0_PIPE_PRIMARY(r->pri_val) | - WM0_PIPE_SPRITE(r->spr_val) | - WM0_PIPE_CURSOR(r->cur_val); - } -} - -/* Find the result with the highest level enabled. Check for enable_fbc_wm in - * case both are at the same level. Prefer r1 in case they're the same. */ -static struct intel_pipe_wm * -ilk_find_best_result(struct drm_i915_private *dev_priv, - struct intel_pipe_wm *r1, - struct intel_pipe_wm *r2) -{ - int level, max_level = ilk_wm_max_level(dev_priv); - int level1 = 0, level2 = 0; - - for (level = 1; level <= max_level; level++) { - if (r1->wm[level].enable) - level1 = level; - if (r2->wm[level].enable) - level2 = level; - } - - if (level1 == level2) { - if (r2->fbc_wm_enabled && !r1->fbc_wm_enabled) - return r2; - else - return r1; - } else if (level1 > level2) { - return r1; - } else { - return r2; - } -} - -/* dirty bits used to track which watermarks need changes */ -#define WM_DIRTY_PIPE(pipe) (1 << (pipe)) -#define WM_DIRTY_LP(wm_lp) (1 << (15 + (wm_lp))) -#define WM_DIRTY_LP_ALL (WM_DIRTY_LP(1) | WM_DIRTY_LP(2) | WM_DIRTY_LP(3)) -#define WM_DIRTY_FBC (1 << 24) -#define WM_DIRTY_DDB (1 << 25) - -static unsigned int ilk_compute_wm_dirty(struct drm_i915_private *dev_priv, - const struct ilk_wm_values *old, - const struct ilk_wm_values *new) -{ - unsigned int dirty = 0; - enum pipe pipe; - int wm_lp; - - for_each_pipe(dev_priv, pipe) { - if (old->wm_pipe[pipe] != new->wm_pipe[pipe]) { - dirty |= WM_DIRTY_PIPE(pipe); - /* Must disable LP1+ watermarks too */ - dirty |= WM_DIRTY_LP_ALL; - } - } - - if (old->enable_fbc_wm != new->enable_fbc_wm) { - dirty |= WM_DIRTY_FBC; - /* Must disable LP1+ watermarks too */ - dirty |= WM_DIRTY_LP_ALL; - } - - if (old->partitioning != new->partitioning) { - dirty |= WM_DIRTY_DDB; - /* Must disable LP1+ watermarks too */ - dirty |= WM_DIRTY_LP_ALL; - } - - /* LP1+ watermarks already deemed dirty, no need to continue */ - if (dirty & WM_DIRTY_LP_ALL) - return dirty; - - /* Find the lowest numbered LP1+ watermark in need of an update... */ - for (wm_lp = 1; wm_lp <= 3; wm_lp++) { - if (old->wm_lp[wm_lp - 1] != new->wm_lp[wm_lp - 1] || - old->wm_lp_spr[wm_lp - 1] != new->wm_lp_spr[wm_lp - 1]) - break; - } - - /* ...and mark it and all higher numbered LP1+ watermarks as dirty */ - for (; wm_lp <= 3; wm_lp++) - dirty |= WM_DIRTY_LP(wm_lp); - - return dirty; -} - -static bool _ilk_disable_lp_wm(struct drm_i915_private *dev_priv, - unsigned int dirty) -{ - struct ilk_wm_values *previous = &dev_priv->display.wm.hw; - bool changed = false; - - if (dirty & WM_DIRTY_LP(3) && previous->wm_lp[2] & WM_LP_ENABLE) { - previous->wm_lp[2] &= ~WM_LP_ENABLE; - intel_uncore_write(&dev_priv->uncore, WM3_LP_ILK, previous->wm_lp[2]); - changed = true; - } - if (dirty & WM_DIRTY_LP(2) && previous->wm_lp[1] & WM_LP_ENABLE) { - previous->wm_lp[1] &= ~WM_LP_ENABLE; - intel_uncore_write(&dev_priv->uncore, WM2_LP_ILK, previous->wm_lp[1]); - changed = true; - } - if (dirty & WM_DIRTY_LP(1) && previous->wm_lp[0] & WM_LP_ENABLE) { - previous->wm_lp[0] &= ~WM_LP_ENABLE; - intel_uncore_write(&dev_priv->uncore, WM1_LP_ILK, previous->wm_lp[0]); - changed = true; - } - - /* - * Don't touch WM_LP_SPRITE_ENABLE here. - * Doing so could cause underruns. - */ - - return changed; -} - -/* - * The spec says we shouldn't write when we don't need, because every write - * causes WMs to be re-evaluated, expending some power. - */ -static void ilk_write_wm_values(struct drm_i915_private *dev_priv, - struct ilk_wm_values *results) -{ - struct ilk_wm_values *previous = &dev_priv->display.wm.hw; - unsigned int dirty; - - dirty = ilk_compute_wm_dirty(dev_priv, previous, results); - if (!dirty) - return; - - _ilk_disable_lp_wm(dev_priv, dirty); - - if (dirty & WM_DIRTY_PIPE(PIPE_A)) - intel_uncore_write(&dev_priv->uncore, WM0_PIPE_ILK(PIPE_A), results->wm_pipe[0]); - if (dirty & WM_DIRTY_PIPE(PIPE_B)) - intel_uncore_write(&dev_priv->uncore, WM0_PIPE_ILK(PIPE_B), results->wm_pipe[1]); - if (dirty & WM_DIRTY_PIPE(PIPE_C)) - intel_uncore_write(&dev_priv->uncore, WM0_PIPE_ILK(PIPE_C), results->wm_pipe[2]); - - if (dirty & WM_DIRTY_DDB) { - if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) - intel_uncore_rmw(&dev_priv->uncore, WM_MISC, WM_MISC_DATA_PARTITION_5_6, - results->partitioning == INTEL_DDB_PART_1_2 ? 0 : - WM_MISC_DATA_PARTITION_5_6); - else - intel_uncore_rmw(&dev_priv->uncore, DISP_ARB_CTL2, DISP_DATA_PARTITION_5_6, - results->partitioning == INTEL_DDB_PART_1_2 ? 0 : - DISP_DATA_PARTITION_5_6); - } - - if (dirty & WM_DIRTY_FBC) - intel_uncore_rmw(&dev_priv->uncore, DISP_ARB_CTL, DISP_FBC_WM_DIS, - results->enable_fbc_wm ? 0 : DISP_FBC_WM_DIS); - - if (dirty & WM_DIRTY_LP(1) && - previous->wm_lp_spr[0] != results->wm_lp_spr[0]) - intel_uncore_write(&dev_priv->uncore, WM1S_LP_ILK, results->wm_lp_spr[0]); - - if (DISPLAY_VER(dev_priv) >= 7) { - if (dirty & WM_DIRTY_LP(2) && previous->wm_lp_spr[1] != results->wm_lp_spr[1]) - intel_uncore_write(&dev_priv->uncore, WM2S_LP_IVB, results->wm_lp_spr[1]); - if (dirty & WM_DIRTY_LP(3) && previous->wm_lp_spr[2] != results->wm_lp_spr[2]) - intel_uncore_write(&dev_priv->uncore, WM3S_LP_IVB, results->wm_lp_spr[2]); - } - - if (dirty & WM_DIRTY_LP(1) && previous->wm_lp[0] != results->wm_lp[0]) - intel_uncore_write(&dev_priv->uncore, WM1_LP_ILK, results->wm_lp[0]); - if (dirty & WM_DIRTY_LP(2) && previous->wm_lp[1] != results->wm_lp[1]) - intel_uncore_write(&dev_priv->uncore, WM2_LP_ILK, results->wm_lp[1]); - if (dirty & WM_DIRTY_LP(3) && previous->wm_lp[2] != results->wm_lp[2]) - intel_uncore_write(&dev_priv->uncore, WM3_LP_ILK, results->wm_lp[2]); - - dev_priv->display.wm.hw = *results; -} - -bool ilk_disable_lp_wm(struct drm_i915_private *dev_priv) -{ - return _ilk_disable_lp_wm(dev_priv, WM_DIRTY_LP_ALL); -} - -static void ilk_compute_wm_config(struct drm_i915_private *dev_priv, - struct intel_wm_config *config) -{ - struct intel_crtc *crtc; - - /* Compute the currently _active_ config */ - for_each_intel_crtc(&dev_priv->drm, crtc) { - const struct intel_pipe_wm *wm = &crtc->wm.active.ilk; - - if (!wm->pipe_enabled) - continue; - - config->sprites_enabled |= wm->sprites_enabled; - config->sprites_scaled |= wm->sprites_scaled; - config->num_pipes_active++; - } -} - -static void ilk_program_watermarks(struct drm_i915_private *dev_priv) -{ - struct intel_pipe_wm lp_wm_1_2 = {}, lp_wm_5_6 = {}, *best_lp_wm; - struct ilk_wm_maximums max; - struct intel_wm_config config = {}; - struct ilk_wm_values results = {}; - enum intel_ddb_partitioning partitioning; - - ilk_compute_wm_config(dev_priv, &config); - - ilk_compute_wm_maximums(dev_priv, 1, &config, INTEL_DDB_PART_1_2, &max); - ilk_wm_merge(dev_priv, &config, &max, &lp_wm_1_2); - - /* 5/6 split only in single pipe config on IVB+ */ - if (DISPLAY_VER(dev_priv) >= 7 && - config.num_pipes_active == 1 && config.sprites_enabled) { - ilk_compute_wm_maximums(dev_priv, 1, &config, INTEL_DDB_PART_5_6, &max); - ilk_wm_merge(dev_priv, &config, &max, &lp_wm_5_6); - - best_lp_wm = ilk_find_best_result(dev_priv, &lp_wm_1_2, &lp_wm_5_6); - } else { - best_lp_wm = &lp_wm_1_2; - } - - partitioning = (best_lp_wm == &lp_wm_1_2) ? - INTEL_DDB_PART_1_2 : INTEL_DDB_PART_5_6; - - ilk_compute_wm_results(dev_priv, best_lp_wm, partitioning, &results); - - ilk_write_wm_values(dev_priv, &results); -} - -static void ilk_initial_watermarks(struct intel_atomic_state *state, - struct intel_crtc *crtc) -{ - struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); - const struct intel_crtc_state *crtc_state = - intel_atomic_get_new_crtc_state(state, crtc); - - mutex_lock(&dev_priv->display.wm.wm_mutex); - crtc->wm.active.ilk = crtc_state->wm.ilk.intermediate; - ilk_program_watermarks(dev_priv); - mutex_unlock(&dev_priv->display.wm.wm_mutex); -} - -static void ilk_optimize_watermarks(struct intel_atomic_state *state, - struct intel_crtc *crtc) -{ - struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); - const struct intel_crtc_state *crtc_state = - intel_atomic_get_new_crtc_state(state, crtc); - - if (!crtc_state->wm.need_postvbl_update) - return; - - mutex_lock(&dev_priv->display.wm.wm_mutex); - crtc->wm.active.ilk = crtc_state->wm.ilk.optimal; - ilk_program_watermarks(dev_priv); - mutex_unlock(&dev_priv->display.wm.wm_mutex); -} - -static void ilk_pipe_wm_get_hw_state(struct intel_crtc *crtc) -{ - struct drm_device *dev = crtc->base.dev; - struct drm_i915_private *dev_priv = to_i915(dev); - struct ilk_wm_values *hw = &dev_priv->display.wm.hw; - struct intel_crtc_state *crtc_state = to_intel_crtc_state(crtc->base.state); - struct intel_pipe_wm *active = &crtc_state->wm.ilk.optimal; - enum pipe pipe = crtc->pipe; - - hw->wm_pipe[pipe] = intel_uncore_read(&dev_priv->uncore, WM0_PIPE_ILK(pipe)); - - memset(active, 0, sizeof(*active)); - - active->pipe_enabled = crtc->active; - - if (active->pipe_enabled) { - u32 tmp = hw->wm_pipe[pipe]; - - /* - * For active pipes LP0 watermark is marked as - * enabled, and LP1+ watermaks as disabled since - * we can't really reverse compute them in case - * multiple pipes are active. - */ - active->wm[0].enable = true; - active->wm[0].pri_val = REG_FIELD_GET(WM0_PIPE_PRIMARY_MASK, tmp); - active->wm[0].spr_val = REG_FIELD_GET(WM0_PIPE_SPRITE_MASK, tmp); - active->wm[0].cur_val = REG_FIELD_GET(WM0_PIPE_CURSOR_MASK, tmp); - } else { - int level, max_level = ilk_wm_max_level(dev_priv); - - /* - * For inactive pipes, all watermark levels - * should be marked as enabled but zeroed, - * which is what we'd compute them to. - */ - for (level = 0; level <= max_level; level++) - active->wm[level].enable = true; - } - - crtc->wm.active.ilk = *active; -} - -#define _FW_WM(value, plane) \ - (((value) & DSPFW_ ## plane ## _MASK) >> DSPFW_ ## plane ## _SHIFT) -#define _FW_WM_VLV(value, plane) \ - (((value) & DSPFW_ ## plane ## _MASK_VLV) >> DSPFW_ ## plane ## _SHIFT) - -static void g4x_read_wm_values(struct drm_i915_private *dev_priv, - struct g4x_wm_values *wm) -{ - u32 tmp; - - tmp = intel_uncore_read(&dev_priv->uncore, DSPFW1); - wm->sr.plane = _FW_WM(tmp, SR); - wm->pipe[PIPE_B].plane[PLANE_CURSOR] = _FW_WM(tmp, CURSORB); - wm->pipe[PIPE_B].plane[PLANE_PRIMARY] = _FW_WM(tmp, PLANEB); - wm->pipe[PIPE_A].plane[PLANE_PRIMARY] = _FW_WM(tmp, PLANEA); - - tmp = intel_uncore_read(&dev_priv->uncore, DSPFW2); - wm->fbc_en = tmp & DSPFW_FBC_SR_EN; - wm->sr.fbc = _FW_WM(tmp, FBC_SR); - wm->hpll.fbc = _FW_WM(tmp, FBC_HPLL_SR); - wm->pipe[PIPE_B].plane[PLANE_SPRITE0] = _FW_WM(tmp, SPRITEB); - wm->pipe[PIPE_A].plane[PLANE_CURSOR] = _FW_WM(tmp, CURSORA); - wm->pipe[PIPE_A].plane[PLANE_SPRITE0] = _FW_WM(tmp, SPRITEA); - - tmp = intel_uncore_read(&dev_priv->uncore, DSPFW3); - wm->hpll_en = tmp & DSPFW_HPLL_SR_EN; - wm->sr.cursor = _FW_WM(tmp, CURSOR_SR); - wm->hpll.cursor = _FW_WM(tmp, HPLL_CURSOR); - wm->hpll.plane = _FW_WM(tmp, HPLL_SR); -} - -static void vlv_read_wm_values(struct drm_i915_private *dev_priv, - struct vlv_wm_values *wm) -{ - enum pipe pipe; - u32 tmp; - - for_each_pipe(dev_priv, pipe) { - tmp = intel_uncore_read(&dev_priv->uncore, VLV_DDL(pipe)); - - wm->ddl[pipe].plane[PLANE_PRIMARY] = - (tmp >> DDL_PLANE_SHIFT) & (DDL_PRECISION_HIGH | DRAIN_LATENCY_MASK); - wm->ddl[pipe].plane[PLANE_CURSOR] = - (tmp >> DDL_CURSOR_SHIFT) & (DDL_PRECISION_HIGH | DRAIN_LATENCY_MASK); - wm->ddl[pipe].plane[PLANE_SPRITE0] = - (tmp >> DDL_SPRITE_SHIFT(0)) & (DDL_PRECISION_HIGH | DRAIN_LATENCY_MASK); - wm->ddl[pipe].plane[PLANE_SPRITE1] = - (tmp >> DDL_SPRITE_SHIFT(1)) & (DDL_PRECISION_HIGH | DRAIN_LATENCY_MASK); - } - - tmp = intel_uncore_read(&dev_priv->uncore, DSPFW1); - wm->sr.plane = _FW_WM(tmp, SR); - wm->pipe[PIPE_B].plane[PLANE_CURSOR] = _FW_WM(tmp, CURSORB); - wm->pipe[PIPE_B].plane[PLANE_PRIMARY] = _FW_WM_VLV(tmp, PLANEB); - wm->pipe[PIPE_A].plane[PLANE_PRIMARY] = _FW_WM_VLV(tmp, PLANEA); - - tmp = intel_uncore_read(&dev_priv->uncore, DSPFW2); - wm->pipe[PIPE_A].plane[PLANE_SPRITE1] = _FW_WM_VLV(tmp, SPRITEB); - wm->pipe[PIPE_A].plane[PLANE_CURSOR] = _FW_WM(tmp, CURSORA); - wm->pipe[PIPE_A].plane[PLANE_SPRITE0] = _FW_WM_VLV(tmp, SPRITEA); - - tmp = intel_uncore_read(&dev_priv->uncore, DSPFW3); - wm->sr.cursor = _FW_WM(tmp, CURSOR_SR); - - if (IS_CHERRYVIEW(dev_priv)) { - tmp = intel_uncore_read(&dev_priv->uncore, DSPFW7_CHV); - wm->pipe[PIPE_B].plane[PLANE_SPRITE1] = _FW_WM_VLV(tmp, SPRITED); - wm->pipe[PIPE_B].plane[PLANE_SPRITE0] = _FW_WM_VLV(tmp, SPRITEC); - - tmp = intel_uncore_read(&dev_priv->uncore, DSPFW8_CHV); - wm->pipe[PIPE_C].plane[PLANE_SPRITE1] = _FW_WM_VLV(tmp, SPRITEF); - wm->pipe[PIPE_C].plane[PLANE_SPRITE0] = _FW_WM_VLV(tmp, SPRITEE); - - tmp = intel_uncore_read(&dev_priv->uncore, DSPFW9_CHV); - wm->pipe[PIPE_C].plane[PLANE_PRIMARY] = _FW_WM_VLV(tmp, PLANEC); - wm->pipe[PIPE_C].plane[PLANE_CURSOR] = _FW_WM(tmp, CURSORC); - - tmp = intel_uncore_read(&dev_priv->uncore, DSPHOWM); - wm->sr.plane |= _FW_WM(tmp, SR_HI) << 9; - wm->pipe[PIPE_C].plane[PLANE_SPRITE1] |= _FW_WM(tmp, SPRITEF_HI) << 8; - wm->pipe[PIPE_C].plane[PLANE_SPRITE0] |= _FW_WM(tmp, SPRITEE_HI) << 8; - wm->pipe[PIPE_C].plane[PLANE_PRIMARY] |= _FW_WM(tmp, PLANEC_HI) << 8; - wm->pipe[PIPE_B].plane[PLANE_SPRITE1] |= _FW_WM(tmp, SPRITED_HI) << 8; - wm->pipe[PIPE_B].plane[PLANE_SPRITE0] |= _FW_WM(tmp, SPRITEC_HI) << 8; - wm->pipe[PIPE_B].plane[PLANE_PRIMARY] |= _FW_WM(tmp, PLANEB_HI) << 8; - wm->pipe[PIPE_A].plane[PLANE_SPRITE1] |= _FW_WM(tmp, SPRITEB_HI) << 8; - wm->pipe[PIPE_A].plane[PLANE_SPRITE0] |= _FW_WM(tmp, SPRITEA_HI) << 8; - wm->pipe[PIPE_A].plane[PLANE_PRIMARY] |= _FW_WM(tmp, PLANEA_HI) << 8; - } else { - tmp = intel_uncore_read(&dev_priv->uncore, DSPFW7); - wm->pipe[PIPE_B].plane[PLANE_SPRITE1] = _FW_WM_VLV(tmp, SPRITED); - wm->pipe[PIPE_B].plane[PLANE_SPRITE0] = _FW_WM_VLV(tmp, SPRITEC); - - tmp = intel_uncore_read(&dev_priv->uncore, DSPHOWM); - wm->sr.plane |= _FW_WM(tmp, SR_HI) << 9; - wm->pipe[PIPE_B].plane[PLANE_SPRITE1] |= _FW_WM(tmp, SPRITED_HI) << 8; - wm->pipe[PIPE_B].plane[PLANE_SPRITE0] |= _FW_WM(tmp, SPRITEC_HI) << 8; - wm->pipe[PIPE_B].plane[PLANE_PRIMARY] |= _FW_WM(tmp, PLANEB_HI) << 8; - wm->pipe[PIPE_A].plane[PLANE_SPRITE1] |= _FW_WM(tmp, SPRITEB_HI) << 8; - wm->pipe[PIPE_A].plane[PLANE_SPRITE0] |= _FW_WM(tmp, SPRITEA_HI) << 8; - wm->pipe[PIPE_A].plane[PLANE_PRIMARY] |= _FW_WM(tmp, PLANEA_HI) << 8; - } -} - -#undef _FW_WM -#undef _FW_WM_VLV - -void g4x_wm_get_hw_state(struct drm_i915_private *dev_priv) -{ - struct g4x_wm_values *wm = &dev_priv->display.wm.g4x; - struct intel_crtc *crtc; - - g4x_read_wm_values(dev_priv, wm); - - wm->cxsr = intel_uncore_read(&dev_priv->uncore, FW_BLC_SELF) & FW_BLC_SELF_EN; - - for_each_intel_crtc(&dev_priv->drm, crtc) { - struct intel_crtc_state *crtc_state = - to_intel_crtc_state(crtc->base.state); - struct g4x_wm_state *active = &crtc->wm.active.g4x; - struct g4x_pipe_wm *raw; - enum pipe pipe = crtc->pipe; - enum plane_id plane_id; - int level, max_level; - - active->cxsr = wm->cxsr; - active->hpll_en = wm->hpll_en; - active->fbc_en = wm->fbc_en; - - active->sr = wm->sr; - active->hpll = wm->hpll; - - for_each_plane_id_on_crtc(crtc, plane_id) { - active->wm.plane[plane_id] = - wm->pipe[pipe].plane[plane_id]; - } - - if (wm->cxsr && wm->hpll_en) - max_level = G4X_WM_LEVEL_HPLL; - else if (wm->cxsr) - max_level = G4X_WM_LEVEL_SR; - else - max_level = G4X_WM_LEVEL_NORMAL; - - level = G4X_WM_LEVEL_NORMAL; - raw = &crtc_state->wm.g4x.raw[level]; - for_each_plane_id_on_crtc(crtc, plane_id) - raw->plane[plane_id] = active->wm.plane[plane_id]; - - level = G4X_WM_LEVEL_SR; - if (level > max_level) - goto out; - - raw = &crtc_state->wm.g4x.raw[level]; - raw->plane[PLANE_PRIMARY] = active->sr.plane; - raw->plane[PLANE_CURSOR] = active->sr.cursor; - raw->plane[PLANE_SPRITE0] = 0; - raw->fbc = active->sr.fbc; - - level = G4X_WM_LEVEL_HPLL; - if (level > max_level) - goto out; - - raw = &crtc_state->wm.g4x.raw[level]; - raw->plane[PLANE_PRIMARY] = active->hpll.plane; - raw->plane[PLANE_CURSOR] = active->hpll.cursor; - raw->plane[PLANE_SPRITE0] = 0; - raw->fbc = active->hpll.fbc; - - level++; - out: - for_each_plane_id_on_crtc(crtc, plane_id) - g4x_raw_plane_wm_set(crtc_state, level, - plane_id, USHRT_MAX); - g4x_raw_fbc_wm_set(crtc_state, level, USHRT_MAX); - - g4x_invalidate_wms(crtc, active, level); - - crtc_state->wm.g4x.optimal = *active; - crtc_state->wm.g4x.intermediate = *active; - - drm_dbg_kms(&dev_priv->drm, - "Initial watermarks: pipe %c, plane=%d, cursor=%d, sprite=%d\n", - pipe_name(pipe), - wm->pipe[pipe].plane[PLANE_PRIMARY], - wm->pipe[pipe].plane[PLANE_CURSOR], - wm->pipe[pipe].plane[PLANE_SPRITE0]); - } - - drm_dbg_kms(&dev_priv->drm, - "Initial SR watermarks: plane=%d, cursor=%d fbc=%d\n", - wm->sr.plane, wm->sr.cursor, wm->sr.fbc); - drm_dbg_kms(&dev_priv->drm, - "Initial HPLL watermarks: plane=%d, SR cursor=%d fbc=%d\n", - wm->hpll.plane, wm->hpll.cursor, wm->hpll.fbc); - drm_dbg_kms(&dev_priv->drm, "Initial SR=%s HPLL=%s FBC=%s\n", - str_yes_no(wm->cxsr), str_yes_no(wm->hpll_en), - str_yes_no(wm->fbc_en)); -} - -void g4x_wm_sanitize(struct drm_i915_private *dev_priv) -{ - struct intel_plane *plane; - struct intel_crtc *crtc; - - mutex_lock(&dev_priv->display.wm.wm_mutex); - - for_each_intel_plane(&dev_priv->drm, plane) { - struct intel_crtc *crtc = - intel_crtc_for_pipe(dev_priv, plane->pipe); - struct intel_crtc_state *crtc_state = - to_intel_crtc_state(crtc->base.state); - struct intel_plane_state *plane_state = - to_intel_plane_state(plane->base.state); - enum plane_id plane_id = plane->id; - int level, num_levels = intel_wm_num_levels(dev_priv); - - if (plane_state->uapi.visible) - continue; - - for (level = 0; level < num_levels; level++) { - struct g4x_pipe_wm *raw = - &crtc_state->wm.g4x.raw[level]; - - raw->plane[plane_id] = 0; - - if (plane_id == PLANE_PRIMARY) - raw->fbc = 0; - } - } - - for_each_intel_crtc(&dev_priv->drm, crtc) { - struct intel_crtc_state *crtc_state = - to_intel_crtc_state(crtc->base.state); - int ret; - - ret = _g4x_compute_pipe_wm(crtc_state); - drm_WARN_ON(&dev_priv->drm, ret); - - crtc_state->wm.g4x.intermediate = - crtc_state->wm.g4x.optimal; - crtc->wm.active.g4x = crtc_state->wm.g4x.optimal; - } - - g4x_program_watermarks(dev_priv); - - mutex_unlock(&dev_priv->display.wm.wm_mutex); -} - -void vlv_wm_get_hw_state(struct drm_i915_private *dev_priv) -{ - struct vlv_wm_values *wm = &dev_priv->display.wm.vlv; - struct intel_crtc *crtc; - u32 val; - - vlv_read_wm_values(dev_priv, wm); - - wm->cxsr = intel_uncore_read(&dev_priv->uncore, FW_BLC_SELF_VLV) & FW_CSPWRDWNEN; - wm->level = VLV_WM_LEVEL_PM2; - - if (IS_CHERRYVIEW(dev_priv)) { - vlv_punit_get(dev_priv); - - val = vlv_punit_read(dev_priv, PUNIT_REG_DSPSSPM); - if (val & DSP_MAXFIFO_PM5_ENABLE) - wm->level = VLV_WM_LEVEL_PM5; - - /* - * If DDR DVFS is disabled in the BIOS, Punit - * will never ack the request. So if that happens - * assume we don't have to enable/disable DDR DVFS - * dynamically. To test that just set the REQ_ACK - * bit to poke the Punit, but don't change the - * HIGH/LOW bits so that we don't actually change - * the current state. - */ - val = vlv_punit_read(dev_priv, PUNIT_REG_DDR_SETUP2); - val |= FORCE_DDR_FREQ_REQ_ACK; - vlv_punit_write(dev_priv, PUNIT_REG_DDR_SETUP2, val); - - if (wait_for((vlv_punit_read(dev_priv, PUNIT_REG_DDR_SETUP2) & - FORCE_DDR_FREQ_REQ_ACK) == 0, 3)) { - drm_dbg_kms(&dev_priv->drm, - "Punit not acking DDR DVFS request, " - "assuming DDR DVFS is disabled\n"); - dev_priv->display.wm.max_level = VLV_WM_LEVEL_PM5; - } else { - val = vlv_punit_read(dev_priv, PUNIT_REG_DDR_SETUP2); - if ((val & FORCE_DDR_HIGH_FREQ) == 0) - wm->level = VLV_WM_LEVEL_DDR_DVFS; - } - - vlv_punit_put(dev_priv); - } - - for_each_intel_crtc(&dev_priv->drm, crtc) { - struct intel_crtc_state *crtc_state = - to_intel_crtc_state(crtc->base.state); - struct vlv_wm_state *active = &crtc->wm.active.vlv; - const struct vlv_fifo_state *fifo_state = - &crtc_state->wm.vlv.fifo_state; - enum pipe pipe = crtc->pipe; - enum plane_id plane_id; - int level; - - vlv_get_fifo_size(crtc_state); - - active->num_levels = wm->level + 1; - active->cxsr = wm->cxsr; - - for (level = 0; level < active->num_levels; level++) { - struct g4x_pipe_wm *raw = - &crtc_state->wm.vlv.raw[level]; - - active->sr[level].plane = wm->sr.plane; - active->sr[level].cursor = wm->sr.cursor; - - for_each_plane_id_on_crtc(crtc, plane_id) { - active->wm[level].plane[plane_id] = - wm->pipe[pipe].plane[plane_id]; - - raw->plane[plane_id] = - vlv_invert_wm_value(active->wm[level].plane[plane_id], - fifo_state->plane[plane_id]); - } - } - - for_each_plane_id_on_crtc(crtc, plane_id) - vlv_raw_plane_wm_set(crtc_state, level, - plane_id, USHRT_MAX); - vlv_invalidate_wms(crtc, active, level); - - crtc_state->wm.vlv.optimal = *active; - crtc_state->wm.vlv.intermediate = *active; - - drm_dbg_kms(&dev_priv->drm, - "Initial watermarks: pipe %c, plane=%d, cursor=%d, sprite0=%d, sprite1=%d\n", - pipe_name(pipe), - wm->pipe[pipe].plane[PLANE_PRIMARY], - wm->pipe[pipe].plane[PLANE_CURSOR], - wm->pipe[pipe].plane[PLANE_SPRITE0], - wm->pipe[pipe].plane[PLANE_SPRITE1]); - } - - drm_dbg_kms(&dev_priv->drm, - "Initial watermarks: SR plane=%d, SR cursor=%d level=%d cxsr=%d\n", - wm->sr.plane, wm->sr.cursor, wm->level, wm->cxsr); -} - -void vlv_wm_sanitize(struct drm_i915_private *dev_priv) -{ - struct intel_plane *plane; - struct intel_crtc *crtc; - - mutex_lock(&dev_priv->display.wm.wm_mutex); - - for_each_intel_plane(&dev_priv->drm, plane) { - struct intel_crtc *crtc = - intel_crtc_for_pipe(dev_priv, plane->pipe); - struct intel_crtc_state *crtc_state = - to_intel_crtc_state(crtc->base.state); - struct intel_plane_state *plane_state = - to_intel_plane_state(plane->base.state); - enum plane_id plane_id = plane->id; - int level, num_levels = intel_wm_num_levels(dev_priv); - - if (plane_state->uapi.visible) - continue; - - for (level = 0; level < num_levels; level++) { - struct g4x_pipe_wm *raw = - &crtc_state->wm.vlv.raw[level]; - - raw->plane[plane_id] = 0; - } - } - - for_each_intel_crtc(&dev_priv->drm, crtc) { - struct intel_crtc_state *crtc_state = - to_intel_crtc_state(crtc->base.state); - int ret; - - ret = _vlv_compute_pipe_wm(crtc_state); - drm_WARN_ON(&dev_priv->drm, ret); - - crtc_state->wm.vlv.intermediate = - crtc_state->wm.vlv.optimal; - crtc->wm.active.vlv = crtc_state->wm.vlv.optimal; - } - - vlv_program_watermarks(dev_priv); - - mutex_unlock(&dev_priv->display.wm.wm_mutex); -} - -/* - * FIXME should probably kill this and improve - * the real watermark readout/sanitation instead - */ -static void ilk_init_lp_watermarks(struct drm_i915_private *dev_priv) -{ - intel_uncore_rmw(&dev_priv->uncore, WM3_LP_ILK, WM_LP_ENABLE, 0); - intel_uncore_rmw(&dev_priv->uncore, WM2_LP_ILK, WM_LP_ENABLE, 0); - intel_uncore_rmw(&dev_priv->uncore, WM1_LP_ILK, WM_LP_ENABLE, 0); - - /* - * Don't touch WM_LP_SPRITE_ENABLE here. - * Doing so could cause underruns. - */ -} - -void ilk_wm_get_hw_state(struct drm_i915_private *dev_priv) -{ - struct ilk_wm_values *hw = &dev_priv->display.wm.hw; - struct intel_crtc *crtc; - - ilk_init_lp_watermarks(dev_priv); - - for_each_intel_crtc(&dev_priv->drm, crtc) - ilk_pipe_wm_get_hw_state(crtc); - - hw->wm_lp[0] = intel_uncore_read(&dev_priv->uncore, WM1_LP_ILK); - hw->wm_lp[1] = intel_uncore_read(&dev_priv->uncore, WM2_LP_ILK); - hw->wm_lp[2] = intel_uncore_read(&dev_priv->uncore, WM3_LP_ILK); - - hw->wm_lp_spr[0] = intel_uncore_read(&dev_priv->uncore, WM1S_LP_ILK); - if (DISPLAY_VER(dev_priv) >= 7) { - hw->wm_lp_spr[1] = intel_uncore_read(&dev_priv->uncore, WM2S_LP_IVB); - hw->wm_lp_spr[2] = intel_uncore_read(&dev_priv->uncore, WM3S_LP_IVB); - } - - if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) - hw->partitioning = (intel_uncore_read(&dev_priv->uncore, WM_MISC) & WM_MISC_DATA_PARTITION_5_6) ? - INTEL_DDB_PART_5_6 : INTEL_DDB_PART_1_2; - else if (IS_IVYBRIDGE(dev_priv)) - hw->partitioning = (intel_uncore_read(&dev_priv->uncore, DISP_ARB_CTL2) & DISP_DATA_PARTITION_5_6) ? - INTEL_DDB_PART_5_6 : INTEL_DDB_PART_1_2; - - hw->enable_fbc_wm = - !(intel_uncore_read(&dev_priv->uncore, DISP_ARB_CTL) & DISP_FBC_WM_DIS); -} - static void ibx_init_clock_gating(struct drm_i915_private *dev_priv) { /* @@ -4282,16 +320,6 @@ static void lpt_init_clock_gating(struct drm_i915_private *dev_priv) 0, TRANS_CHICKEN1_DP0UNIT_GC_DISABLE); } -static void lpt_suspend_hw(struct drm_i915_private *dev_priv) -{ - if (HAS_PCH_LPT_LP(dev_priv)) { - u32 val = intel_uncore_read(&dev_priv->uncore, SOUTH_DSPCLK_GATE_D); - - val &= ~PCH_LP_PARTITION_LEVEL_DISABLE; - intel_uncore_write(&dev_priv->uncore, SOUTH_DSPCLK_GATE_D, val); - } -} - static void gen8_set_l3sqc_credits(struct drm_i915_private *dev_priv, int general_prio_credits, int high_prio_credits) @@ -4336,10 +364,6 @@ static void gen12lp_init_clock_gating(struct drm_i915_private *dev_priv) intel_uncore_write(&dev_priv->uncore, ILK_DPFC_CHICKEN(INTEL_FBC_A), DPFC_CHICKEN_COMP_DUMMY_PIXEL); - /* Wa_1409825376:tgl (pre-prod)*/ - if (IS_TGL_DISPLAY_STEP(dev_priv, STEP_A0, STEP_C0)) - intel_uncore_rmw(&dev_priv->uncore, GEN9_CLKGATE_DIS_3, 0, TGL_VRH_GATING_DIS); - /* Wa_14013723622:tgl,rkl,dg1,adl-s */ if (DISPLAY_VER(dev_priv) == 12) intel_uncore_rmw(&dev_priv->uncore, CLKREQ_POLICY, @@ -4357,15 +381,6 @@ static void adlp_init_clock_gating(struct drm_i915_private *dev_priv) intel_de_rmw(dev_priv, GEN8_CHICKEN_DCPR_1, DDI_CLOCK_REG_ACCESS, 0); } -static void dg1_init_clock_gating(struct drm_i915_private *dev_priv) -{ - gen12lp_init_clock_gating(dev_priv); - - /* Wa_1409836686:dg1[a0] */ - if (IS_DG1_GRAPHICS_STEP(dev_priv, STEP_A0, STEP_B0)) - intel_uncore_rmw(&dev_priv->uncore, GEN9_CLKGATE_DIS_3, 0, DPT_GATING_DIS); -} - static void xehpsdv_init_clock_gating(struct drm_i915_private *dev_priv) { /* Wa_22010146351:xehpsdv */ @@ -4764,12 +779,6 @@ void intel_init_clock_gating(struct drm_i915_private *dev_priv) dev_priv->clock_gating_funcs->init_clock_gating(dev_priv); } -void intel_suspend_hw(struct drm_i915_private *dev_priv) -{ - if (HAS_PCH_LPT(dev_priv)) - lpt_suspend_hw(dev_priv); -} - static void nop_init_clock_gating(struct drm_i915_private *dev_priv) { drm_dbg_kms(&dev_priv->drm, @@ -4785,7 +794,6 @@ CG_FUNCS(pvc); CG_FUNCS(dg2); CG_FUNCS(xehpsdv); CG_FUNCS(adlp); -CG_FUNCS(dg1); CG_FUNCS(gen12lp); CG_FUNCS(icl); CG_FUNCS(cfl); @@ -4820,7 +828,9 @@ CG_FUNCS(nop); */ void intel_init_clock_gating_hooks(struct drm_i915_private *dev_priv) { - if (IS_PONTEVECCHIO(dev_priv)) + if (IS_METEORLAKE(dev_priv)) + dev_priv->clock_gating_funcs = &nop_clock_gating_funcs; + else if (IS_PONTEVECCHIO(dev_priv)) dev_priv->clock_gating_funcs = &pvc_clock_gating_funcs; else if (IS_DG2(dev_priv)) dev_priv->clock_gating_funcs = &dg2_clock_gating_funcs; @@ -4828,8 +838,6 @@ void intel_init_clock_gating_hooks(struct drm_i915_private *dev_priv) dev_priv->clock_gating_funcs = &xehpsdv_clock_gating_funcs; else if (IS_ALDERLAKE_P(dev_priv)) dev_priv->clock_gating_funcs = &adlp_clock_gating_funcs; - else if (IS_DG1(dev_priv)) - dev_priv->clock_gating_funcs = &dg1_clock_gating_funcs; else if (GRAPHICS_VER(dev_priv) == 12) dev_priv->clock_gating_funcs = &gen12lp_clock_gating_funcs; else if (GRAPHICS_VER(dev_priv) == 11) @@ -4875,117 +883,3 @@ void intel_init_clock_gating_hooks(struct drm_i915_private *dev_priv) dev_priv->clock_gating_funcs = &nop_clock_gating_funcs; } } - -static const struct intel_wm_funcs ilk_wm_funcs = { - .compute_pipe_wm = ilk_compute_pipe_wm, - .compute_intermediate_wm = ilk_compute_intermediate_wm, - .initial_watermarks = ilk_initial_watermarks, - .optimize_watermarks = ilk_optimize_watermarks, -}; - -static const struct intel_wm_funcs vlv_wm_funcs = { - .compute_pipe_wm = vlv_compute_pipe_wm, - .compute_intermediate_wm = vlv_compute_intermediate_wm, - .initial_watermarks = vlv_initial_watermarks, - .optimize_watermarks = vlv_optimize_watermarks, - .atomic_update_watermarks = vlv_atomic_update_fifo, -}; - -static const struct intel_wm_funcs g4x_wm_funcs = { - .compute_pipe_wm = g4x_compute_pipe_wm, - .compute_intermediate_wm = g4x_compute_intermediate_wm, - .initial_watermarks = g4x_initial_watermarks, - .optimize_watermarks = g4x_optimize_watermarks, -}; - -static const struct intel_wm_funcs pnv_wm_funcs = { - .update_wm = pnv_update_wm, -}; - -static const struct intel_wm_funcs i965_wm_funcs = { - .update_wm = i965_update_wm, -}; - -static const struct intel_wm_funcs i9xx_wm_funcs = { - .update_wm = i9xx_update_wm, -}; - -static const struct intel_wm_funcs i845_wm_funcs = { - .update_wm = i845_update_wm, -}; - -static const struct intel_wm_funcs nop_funcs = { -}; - -/* Set up chip specific power management-related functions */ -void intel_init_pm(struct drm_i915_private *dev_priv) -{ - if (DISPLAY_VER(dev_priv) >= 9) { - skl_wm_init(dev_priv); - return; - } - - /* For cxsr */ - if (IS_PINEVIEW(dev_priv)) - pnv_get_mem_freq(dev_priv); - else if (GRAPHICS_VER(dev_priv) == 5) - ilk_get_mem_freq(dev_priv); - - /* For FIFO watermark updates */ - if (HAS_PCH_SPLIT(dev_priv)) { - ilk_setup_wm_latency(dev_priv); - - if ((DISPLAY_VER(dev_priv) == 5 && dev_priv->display.wm.pri_latency[1] && - dev_priv->display.wm.spr_latency[1] && dev_priv->display.wm.cur_latency[1]) || - (DISPLAY_VER(dev_priv) != 5 && dev_priv->display.wm.pri_latency[0] && - dev_priv->display.wm.spr_latency[0] && dev_priv->display.wm.cur_latency[0])) { - dev_priv->display.funcs.wm = &ilk_wm_funcs; - } else { - drm_dbg_kms(&dev_priv->drm, - "Failed to read display plane latency. " - "Disable CxSR\n"); - dev_priv->display.funcs.wm = &nop_funcs; - } - } else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) { - vlv_setup_wm_latency(dev_priv); - dev_priv->display.funcs.wm = &vlv_wm_funcs; - } else if (IS_G4X(dev_priv)) { - g4x_setup_wm_latency(dev_priv); - dev_priv->display.funcs.wm = &g4x_wm_funcs; - } else if (IS_PINEVIEW(dev_priv)) { - if (!intel_get_cxsr_latency(!IS_MOBILE(dev_priv), - dev_priv->is_ddr3, - dev_priv->fsb_freq, - dev_priv->mem_freq)) { - drm_info(&dev_priv->drm, - "failed to find known CxSR latency " - "(found ddr%s fsb freq %d, mem freq %d), " - "disabling CxSR\n", - (dev_priv->is_ddr3 == 1) ? "3" : "2", - dev_priv->fsb_freq, dev_priv->mem_freq); - /* Disable CxSR and never update its watermark again */ - intel_set_memory_cxsr(dev_priv, false); - dev_priv->display.funcs.wm = &nop_funcs; - } else - dev_priv->display.funcs.wm = &pnv_wm_funcs; - } else if (DISPLAY_VER(dev_priv) == 4) { - dev_priv->display.funcs.wm = &i965_wm_funcs; - } else if (DISPLAY_VER(dev_priv) == 3) { - dev_priv->display.funcs.wm = &i9xx_wm_funcs; - } else if (DISPLAY_VER(dev_priv) == 2) { - if (INTEL_NUM_PIPES(dev_priv) == 1) - dev_priv->display.funcs.wm = &i845_wm_funcs; - else - dev_priv->display.funcs.wm = &i9xx_wm_funcs; - } else { - drm_err(&dev_priv->drm, - "unexpected fall-through in %s\n", __func__); - dev_priv->display.funcs.wm = &nop_funcs; - } -} - -void intel_pm_setup(struct drm_i915_private *dev_priv) -{ - dev_priv->runtime_pm.suspended = false; - atomic_set(&dev_priv->runtime_pm.wakeref_count, 0); -} diff --git a/drivers/gpu/drm/i915/intel_pm.h b/drivers/gpu/drm/i915/intel_pm.h index c09b872d65c8..f774bddcdca6 100644 --- a/drivers/gpu/drm/i915/intel_pm.h +++ b/drivers/gpu/drm/i915/intel_pm.h @@ -13,22 +13,6 @@ struct intel_crtc_state; struct intel_plane_state; void intel_init_clock_gating(struct drm_i915_private *dev_priv); -void intel_suspend_hw(struct drm_i915_private *dev_priv); -int ilk_wm_max_level(const struct drm_i915_private *dev_priv); -void intel_init_pm(struct drm_i915_private *dev_priv); void intel_init_clock_gating_hooks(struct drm_i915_private *dev_priv); -void intel_pm_setup(struct drm_i915_private *dev_priv); -void g4x_wm_get_hw_state(struct drm_i915_private *dev_priv); -void vlv_wm_get_hw_state(struct drm_i915_private *dev_priv); -void ilk_wm_get_hw_state(struct drm_i915_private *dev_priv); -void g4x_wm_sanitize(struct drm_i915_private *dev_priv); -void vlv_wm_sanitize(struct drm_i915_private *dev_priv); -bool ilk_disable_lp_wm(struct drm_i915_private *dev_priv); -bool intel_wm_plane_visible(const struct intel_crtc_state *crtc_state, - const struct intel_plane_state *plane_state); -void intel_print_wm_latency(struct drm_i915_private *dev_priv, - const char *name, const u16 wm[]); - -bool intel_set_memory_cxsr(struct drm_i915_private *dev_priv, bool enable); #endif /* __INTEL_PM_H__ */ diff --git a/drivers/gpu/drm/i915/intel_runtime_pm.c b/drivers/gpu/drm/i915/intel_runtime_pm.c index 129746713d07..cf5122299b6b 100644 --- a/drivers/gpu/drm/i915/intel_runtime_pm.c +++ b/drivers/gpu/drm/i915/intel_runtime_pm.c @@ -652,6 +652,8 @@ void intel_runtime_pm_init_early(struct intel_runtime_pm *rpm) rpm->kdev = kdev; rpm->available = HAS_RUNTIME_PM(i915); + rpm->suspended = false; + atomic_set(&rpm->wakeref_count, 0); init_intel_runtime_pm_wakeref(rpm); INIT_LIST_HEAD(&rpm->lmem_userfault_list); diff --git a/drivers/gpu/drm/i915/intel_uncore.c b/drivers/gpu/drm/i915/intel_uncore.c index 8dee9e62a73e..f4b3b2063018 100644 --- a/drivers/gpu/drm/i915/intel_uncore.c +++ b/drivers/gpu/drm/i915/intel_uncore.c @@ -32,7 +32,6 @@ #include "i915_reg.h" #include "i915_trace.h" #include "i915_vgpu.h" -#include "intel_pm.h" #define FORCEWAKE_ACK_TIMEOUT_MS 50 #define GT_FIFO_TIMEOUT_MS 10 @@ -2460,7 +2459,7 @@ static int i915_pmic_bus_access_notifier(struct notifier_block *nb, static void uncore_unmap_mmio(struct drm_device *drm, void *regs) { - iounmap(regs); + iounmap((void __iomem *)regs); } int intel_uncore_setup_mmio(struct intel_uncore *uncore, phys_addr_t phys_addr) @@ -2491,7 +2490,8 @@ int intel_uncore_setup_mmio(struct intel_uncore *uncore, phys_addr_t phys_addr) return -EIO; } - return drmm_add_action_or_reset(&i915->drm, uncore_unmap_mmio, uncore->regs); + return drmm_add_action_or_reset(&i915->drm, uncore_unmap_mmio, + (void __force *)uncore->regs); } void intel_uncore_init_early(struct intel_uncore *uncore, diff --git a/drivers/gpu/drm/i915/pxp/intel_pxp.c b/drivers/gpu/drm/i915/pxp/intel_pxp.c index cfc9af8b3d21..9d4c7724e98e 100644 --- a/drivers/gpu/drm/i915/pxp/intel_pxp.c +++ b/drivers/gpu/drm/i915/pxp/intel_pxp.c @@ -270,6 +270,60 @@ static bool pxp_component_bound(struct intel_pxp *pxp) return bound; } +static int __pxp_global_teardown_final(struct intel_pxp *pxp) +{ + if (!pxp->arb_is_valid) + return 0; + /* + * To ensure synchronous and coherent session teardown completion + * in response to suspend or shutdown triggers, don't use a worker. + */ + intel_pxp_mark_termination_in_progress(pxp); + intel_pxp_terminate(pxp, false); + + if (!wait_for_completion_timeout(&pxp->termination, msecs_to_jiffies(250))) + return -ETIMEDOUT; + + return 0; +} + +static int __pxp_global_teardown_restart(struct intel_pxp *pxp) +{ + if (pxp->arb_is_valid) + return 0; + /* + * The arb-session is currently inactive and we are doing a reset and restart + * due to a runtime event. Use the worker that was designed for this. + */ + pxp_queue_termination(pxp); + + if (!wait_for_completion_timeout(&pxp->termination, msecs_to_jiffies(250))) + return -ETIMEDOUT; + + return 0; +} + +void intel_pxp_end(struct intel_pxp *pxp) +{ + struct drm_i915_private *i915 = pxp->ctrl_gt->i915; + intel_wakeref_t wakeref; + + if (!intel_pxp_is_enabled(pxp)) + return; + + wakeref = intel_runtime_pm_get(&i915->runtime_pm); + + mutex_lock(&pxp->arb_mutex); + + if (__pxp_global_teardown_final(pxp)) + drm_dbg(&i915->drm, "PXP end timed out\n"); + + mutex_unlock(&pxp->arb_mutex); + + intel_pxp_fini_hw(pxp); + intel_runtime_pm_put(&i915->runtime_pm, wakeref); +} + /* * the arb session is restarted from the irq work when we receive the * termination completion interrupt @@ -286,16 +340,9 @@ int intel_pxp_start(struct intel_pxp *pxp) mutex_lock(&pxp->arb_mutex); - if (pxp->arb_is_valid) - goto unlock; - - pxp_queue_termination(pxp); - - if (!wait_for_completion_timeout(&pxp->termination, - msecs_to_jiffies(250))) { - ret = -ETIMEDOUT; + ret = __pxp_global_teardown_restart(pxp); + if (ret) goto unlock; - } /* make sure the compiler doesn't optimize the double access */ barrier(); diff --git a/drivers/gpu/drm/i915/pxp/intel_pxp.h b/drivers/gpu/drm/i915/pxp/intel_pxp.h index 04440fada711..3ded0890cd27 100644 --- a/drivers/gpu/drm/i915/pxp/intel_pxp.h +++ b/drivers/gpu/drm/i915/pxp/intel_pxp.h @@ -24,8 +24,10 @@ void intel_pxp_init_hw(struct intel_pxp *pxp); void intel_pxp_fini_hw(struct intel_pxp *pxp); void intel_pxp_mark_termination_in_progress(struct intel_pxp *pxp); +void intel_pxp_tee_end_arb_fw_session(struct intel_pxp *pxp, u32 arb_session_id); int intel_pxp_start(struct intel_pxp *pxp); +void intel_pxp_end(struct intel_pxp *pxp); int intel_pxp_key_check(struct intel_pxp *pxp, struct drm_i915_gem_object *obj, diff --git a/drivers/gpu/drm/i915/pxp/intel_pxp_cmd_interface_42.h b/drivers/gpu/drm/i915/pxp/intel_pxp_cmd_interface_42.h index 739f9072fa5f..26f7d9f01bf3 100644 --- a/drivers/gpu/drm/i915/pxp/intel_pxp_cmd_interface_42.h +++ b/drivers/gpu/drm/i915/pxp/intel_pxp_cmd_interface_42.h @@ -12,6 +12,9 @@ /* PXP-Opcode for Init Session */ #define PXP42_CMDID_INIT_SESSION 0x1e +/* PXP-Opcode for Invalidate Stream Key */ +#define PXP42_CMDID_INVALIDATE_STREAM_KEY 0x00000007 + /* PXP-Input-Packet: Init Session (Arb-Session) */ struct pxp42_create_arb_in { struct pxp_cmd_header header; @@ -25,4 +28,16 @@ struct pxp42_create_arb_out { struct pxp_cmd_header header; } __packed; +/* PXP-Input-Packet: Invalidate Stream Key */ +struct pxp42_inv_stream_key_in { + struct pxp_cmd_header header; + u32 rsvd[3]; +} __packed; + +/* PXP-Output-Packet: Invalidate Stream Key */ +struct pxp42_inv_stream_key_out { + struct pxp_cmd_header header; + u32 rsvd; +} __packed; + #endif /* __INTEL_PXP_FW_INTERFACE_42_H__ */ diff --git a/drivers/gpu/drm/i915/pxp/intel_pxp_cmd_interface_cmn.h b/drivers/gpu/drm/i915/pxp/intel_pxp_cmd_interface_cmn.h index aaa8187a0afb..ae9b151b7cb7 100644 --- a/drivers/gpu/drm/i915/pxp/intel_pxp_cmd_interface_cmn.h +++ b/drivers/gpu/drm/i915/pxp/intel_pxp_cmd_interface_cmn.h @@ -28,6 +28,9 @@ struct pxp_cmd_header { union { u32 status; /* out */ u32 stream_id; /* in */ +#define PXP_CMDHDR_EXTDATA_SESSION_VALID GENMASK(0, 0) +#define PXP_CMDHDR_EXTDATA_APP_TYPE GENMASK(1, 1) +#define PXP_CMDHDR_EXTDATA_SESSION_ID GENMASK(17, 2) }; /* Length of the message (excluding the header) */ u32 buffer_len; diff --git a/drivers/gpu/drm/i915/pxp/intel_pxp_pm.c b/drivers/gpu/drm/i915/pxp/intel_pxp_pm.c index 892d39cc61c1..4f836b317424 100644 --- a/drivers/gpu/drm/i915/pxp/intel_pxp_pm.c +++ b/drivers/gpu/drm/i915/pxp/intel_pxp_pm.c @@ -16,7 +16,7 @@ void intel_pxp_suspend_prepare(struct intel_pxp *pxp) if (!intel_pxp_is_enabled(pxp)) return; - pxp->arb_is_valid = false; + intel_pxp_end(pxp); intel_pxp_invalidate(pxp); } @@ -34,7 +34,7 @@ void intel_pxp_suspend(struct intel_pxp *pxp) } } -void intel_pxp_resume(struct intel_pxp *pxp) +void intel_pxp_resume_complete(struct intel_pxp *pxp) { if (!intel_pxp_is_enabled(pxp)) return; diff --git a/drivers/gpu/drm/i915/pxp/intel_pxp_pm.h b/drivers/gpu/drm/i915/pxp/intel_pxp_pm.h index 586be769104f..06b46f535b42 100644 --- a/drivers/gpu/drm/i915/pxp/intel_pxp_pm.h +++ b/drivers/gpu/drm/i915/pxp/intel_pxp_pm.h @@ -11,7 +11,7 @@ struct intel_pxp; #ifdef CONFIG_DRM_I915_PXP void intel_pxp_suspend_prepare(struct intel_pxp *pxp); void intel_pxp_suspend(struct intel_pxp *pxp); -void intel_pxp_resume(struct intel_pxp *pxp); +void intel_pxp_resume_complete(struct intel_pxp *pxp); void intel_pxp_runtime_suspend(struct intel_pxp *pxp); #else static inline void intel_pxp_suspend_prepare(struct intel_pxp *pxp) @@ -22,7 +22,7 @@ static inline void intel_pxp_suspend(struct intel_pxp *pxp) { } -static inline void intel_pxp_resume(struct intel_pxp *pxp) +static inline void intel_pxp_resume_complete(struct intel_pxp *pxp) { } @@ -32,6 +32,6 @@ static inline void intel_pxp_runtime_suspend(struct intel_pxp *pxp) #endif static inline void intel_pxp_runtime_resume(struct intel_pxp *pxp) { - intel_pxp_resume(pxp); + intel_pxp_resume_complete(pxp); } #endif /* __INTEL_PXP_PM_H__ */ diff --git a/drivers/gpu/drm/i915/pxp/intel_pxp_session.c b/drivers/gpu/drm/i915/pxp/intel_pxp_session.c index ae413580b81a..448cacb0465d 100644 --- a/drivers/gpu/drm/i915/pxp/intel_pxp_session.c +++ b/drivers/gpu/drm/i915/pxp/intel_pxp_session.c @@ -110,14 +110,16 @@ static int pxp_terminate_arb_session_and_global(struct intel_pxp *pxp) intel_uncore_write(gt->uncore, PXP_GLOBAL_TERMINATE, 1); + intel_pxp_tee_end_arb_fw_session(pxp, ARB_SESSION); + return ret; } -static void pxp_terminate(struct intel_pxp *pxp) +void intel_pxp_terminate(struct intel_pxp *pxp, bool post_invalidation_needs_restart) { int ret; - pxp->hw_state_invalidated = true; + pxp->hw_state_invalidated = post_invalidation_needs_restart; /* * if we fail to submit the termination there is no point in waiting for @@ -165,7 +167,7 @@ static void pxp_session_work(struct work_struct *work) if (events & PXP_TERMINATION_REQUEST) { events &= ~PXP_TERMINATION_COMPLETE; - pxp_terminate(pxp); + intel_pxp_terminate(pxp, true); } if (events & PXP_TERMINATION_COMPLETE) diff --git a/drivers/gpu/drm/i915/pxp/intel_pxp_session.h b/drivers/gpu/drm/i915/pxp/intel_pxp_session.h index 903ac52cffa1..ba5788127109 100644 --- a/drivers/gpu/drm/i915/pxp/intel_pxp_session.h +++ b/drivers/gpu/drm/i915/pxp/intel_pxp_session.h @@ -12,9 +12,14 @@ struct intel_pxp; #ifdef CONFIG_DRM_I915_PXP void intel_pxp_session_management_init(struct intel_pxp *pxp); +void intel_pxp_terminate(struct intel_pxp *pxp, bool post_invalidation_needs_restart); #else static inline void intel_pxp_session_management_init(struct intel_pxp *pxp) { } + +static inline void intel_pxp_terminate(struct intel_pxp *pxp, bool post_invalidation_needs_restart) +{ +} #endif #endif /* __INTEL_PXP_SESSION_H__ */ diff --git a/drivers/gpu/drm/i915/pxp/intel_pxp_tee.c b/drivers/gpu/drm/i915/pxp/intel_pxp_tee.c index 73aa8015f828..d9d248b48093 100644 --- a/drivers/gpu/drm/i915/pxp/intel_pxp_tee.c +++ b/drivers/gpu/drm/i915/pxp/intel_pxp_tee.c @@ -127,6 +127,12 @@ static int i915_pxp_tee_component_bind(struct device *i915_kdev, intel_wakeref_t wakeref; int ret = 0; + if (!HAS_HECI_PXP(i915)) { + pxp->dev_link = device_link_add(i915_kdev, tee_kdev, DL_FLAG_STATELESS); + if (drm_WARN_ON(&i915->drm, !pxp->dev_link)) + return -ENODEV; + } + mutex_lock(&pxp->tee_mutex); pxp->pxp_component = data; pxp->pxp_component->tee_dev = tee_kdev; @@ -169,6 +175,11 @@ static void i915_pxp_tee_component_unbind(struct device *i915_kdev, mutex_lock(&pxp->tee_mutex); pxp->pxp_component = NULL; mutex_unlock(&pxp->tee_mutex); + + if (pxp->dev_link) { + device_link_del(pxp->dev_link); + pxp->dev_link = NULL; + } } static const struct component_ops i915_pxp_tee_component_ops = { @@ -308,3 +319,38 @@ int intel_pxp_tee_cmd_create_arb_session(struct intel_pxp *pxp, return ret; } + +void intel_pxp_tee_end_arb_fw_session(struct intel_pxp *pxp, u32 session_id) +{ + struct drm_i915_private *i915 = pxp->ctrl_gt->i915; + struct pxp42_inv_stream_key_in msg_in = {0}; + struct pxp42_inv_stream_key_out msg_out = {0}; + int ret, trials = 0; + +try_again: + memset(&msg_in, 0, sizeof(msg_in)); + memset(&msg_out, 0, sizeof(msg_out)); + msg_in.header.api_version = PXP_APIVER(4, 2); + msg_in.header.command_id = PXP42_CMDID_INVALIDATE_STREAM_KEY; + msg_in.header.buffer_len = sizeof(msg_in) - sizeof(msg_in.header); + + msg_in.header.stream_id = FIELD_PREP(PXP_CMDHDR_EXTDATA_SESSION_VALID, 1); + msg_in.header.stream_id |= FIELD_PREP(PXP_CMDHDR_EXTDATA_APP_TYPE, 0); + msg_in.header.stream_id |= FIELD_PREP(PXP_CMDHDR_EXTDATA_SESSION_ID, session_id); + + ret = intel_pxp_tee_io_message(pxp, + &msg_in, sizeof(msg_in), + &msg_out, sizeof(msg_out), + NULL); + + /* Cleanup coherency between GT and Firmware is critical, so try again if it fails */ + if ((ret || msg_out.header.status != 0x0) && ++trials < 3) + goto try_again; + + if (ret) + drm_err(&i915->drm, "Failed to send tee msg for inv-stream-key-%d, ret=[%d]\n", + session_id, ret); + else if (msg_out.header.status != 0x0) + drm_warn(&i915->drm, "PXP firmware failed inv-stream-key-%d with status 0x%08x\n", + session_id, msg_out.header.status); +} diff --git a/drivers/gpu/drm/i915/pxp/intel_pxp_types.h b/drivers/gpu/drm/i915/pxp/intel_pxp_types.h index 7dc5f08d1583..007de49e1ea4 100644 --- a/drivers/gpu/drm/i915/pxp/intel_pxp_types.h +++ b/drivers/gpu/drm/i915/pxp/intel_pxp_types.h @@ -32,6 +32,9 @@ struct intel_pxp { * which are protected by &tee_mutex. */ struct i915_pxp_component *pxp_component; + + /* @dev_link: Enforce module relationship for power management ordering. */ + struct device_link *dev_link; /** * @pxp_component_added: track if the pxp component has been added. * Set and cleared in tee init and fini functions respectively. diff --git a/drivers/gpu/drm/i915/soc/intel_dram.c b/drivers/gpu/drm/i915/soc/intel_dram.c index bba8cb6e8ae4..9f0651d48d41 100644 --- a/drivers/gpu/drm/i915/soc/intel_dram.c +++ b/drivers/gpu/drm/i915/soc/intel_dram.c @@ -10,6 +10,7 @@ #include "intel_dram.h" #include "intel_mchbar_regs.h" #include "intel_pcode.h" +#include "vlv_sideband.h" struct dram_dimm_info { u16 size; @@ -42,6 +43,155 @@ static const char *intel_dram_type_str(enum intel_dram_type type) #undef DRAM_TYPE_STR +static void pnv_detect_mem_freq(struct drm_i915_private *dev_priv) +{ + u32 tmp; + + tmp = intel_uncore_read(&dev_priv->uncore, CLKCFG); + + switch (tmp & CLKCFG_FSB_MASK) { + case CLKCFG_FSB_533: + dev_priv->fsb_freq = 533; /* 133*4 */ + break; + case CLKCFG_FSB_800: + dev_priv->fsb_freq = 800; /* 200*4 */ + break; + case CLKCFG_FSB_667: + dev_priv->fsb_freq = 667; /* 167*4 */ + break; + case CLKCFG_FSB_400: + dev_priv->fsb_freq = 400; /* 100*4 */ + break; + } + + switch (tmp & CLKCFG_MEM_MASK) { + case CLKCFG_MEM_533: + dev_priv->mem_freq = 533; + break; + case CLKCFG_MEM_667: + dev_priv->mem_freq = 667; + break; + case CLKCFG_MEM_800: + dev_priv->mem_freq = 800; + break; + } + + /* detect pineview DDR3 setting */ + tmp = intel_uncore_read(&dev_priv->uncore, CSHRDDR3CTL); + dev_priv->is_ddr3 = (tmp & CSHRDDR3CTL_DDR3) ? 1 : 0; +} + +static void ilk_detect_mem_freq(struct drm_i915_private *dev_priv) +{ + u16 ddrpll, csipll; + + ddrpll = intel_uncore_read16(&dev_priv->uncore, DDRMPLL1); + switch (ddrpll & 0xff) { + case 0xc: + dev_priv->mem_freq = 800; + break; + case 0x10: + dev_priv->mem_freq = 1066; + break; + case 0x14: + dev_priv->mem_freq = 1333; + break; + case 0x18: + dev_priv->mem_freq = 1600; + break; + default: + drm_dbg(&dev_priv->drm, "unknown memory frequency 0x%02x\n", + ddrpll & 0xff); + dev_priv->mem_freq = 0; + break; + } + + csipll = intel_uncore_read16(&dev_priv->uncore, CSIPLL0); + switch (csipll & 0x3ff) { + case 0x00c: + dev_priv->fsb_freq = 3200; + break; + case 0x00e: + dev_priv->fsb_freq = 3733; + break; + case 0x010: + dev_priv->fsb_freq = 4266; + break; + case 0x012: + dev_priv->fsb_freq = 4800; + break; + case 0x014: + dev_priv->fsb_freq = 5333; + break; + case 0x016: + dev_priv->fsb_freq = 5866; + break; + case 0x018: + dev_priv->fsb_freq = 6400; + break; + default: + drm_dbg(&dev_priv->drm, "unknown fsb frequency 0x%04x\n", + csipll & 0x3ff); + dev_priv->fsb_freq = 0; + break; + } +} + +static void chv_detect_mem_freq(struct drm_i915_private *i915) +{ + u32 val; + + vlv_iosf_sb_get(i915, BIT(VLV_IOSF_SB_CCK)); + val = vlv_cck_read(i915, CCK_FUSE_REG); + vlv_iosf_sb_put(i915, BIT(VLV_IOSF_SB_CCK)); + + switch ((val >> 2) & 0x7) { + case 3: + i915->mem_freq = 2000; + break; + default: + i915->mem_freq = 1600; + break; + } +} + +static void vlv_detect_mem_freq(struct drm_i915_private *i915) +{ + u32 val; + + vlv_iosf_sb_get(i915, BIT(VLV_IOSF_SB_PUNIT)); + val = vlv_punit_read(i915, PUNIT_REG_GPU_FREQ_STS); + vlv_iosf_sb_put(i915, BIT(VLV_IOSF_SB_PUNIT)); + + switch ((val >> 6) & 3) { + case 0: + case 1: + i915->mem_freq = 800; + break; + case 2: + i915->mem_freq = 1066; + break; + case 3: + i915->mem_freq = 1333; + break; + } +} + +static void detect_mem_freq(struct drm_i915_private *i915) +{ + if (IS_PINEVIEW(i915)) + pnv_detect_mem_freq(i915); + else if (GRAPHICS_VER(i915) == 5) + ilk_detect_mem_freq(i915); + else if (IS_CHERRYVIEW(i915)) + chv_detect_mem_freq(i915); + else if (IS_VALLEYVIEW(i915)) + vlv_detect_mem_freq(i915); + + if (i915->mem_freq) + drm_dbg(&i915->drm, "DDR speed: %d MHz\n", i915->mem_freq); +} + static int intel_dimm_num_devices(const struct dram_dimm_info *dimm) { return dimm->ranks * 64 / (dimm->width ?: 1); @@ -507,6 +657,8 @@ void intel_dram_detect(struct drm_i915_private *i915) struct dram_info *dram_info = &i915->dram_info; int ret; + detect_mem_freq(i915); + if (GRAPHICS_VER(i915) < 9 || IS_DG2(i915) || !HAS_DISPLAY(i915)) return; diff --git a/drivers/misc/mei/client.c b/drivers/misc/mei/client.c index 9ddb854b8155..5c19097266fe 100644 --- a/drivers/misc/mei/client.c +++ b/drivers/misc/mei/client.c @@ -1343,7 +1343,9 @@ static void mei_cl_reset_read_by_vtag(const struct mei_cl *cl, u8 vtag) struct mei_cl_vtag *vtag_l; list_for_each_entry(vtag_l, &cl->vtag_map, list) { - if (vtag_l->vtag == vtag) { + /* The client on bus has one fixed vtag map */ + if ((cl->cldev && mei_cldev_enabled(cl->cldev)) || + vtag_l->vtag == vtag) { vtag_l->pending_read = false; break; } diff --git a/drivers/misc/mei/pci-me.c b/drivers/misc/mei/pci-me.c index 5bf0d50d55a0..676d566f38dd 100644 --- a/drivers/misc/mei/pci-me.c +++ b/drivers/misc/mei/pci-me.c @@ -342,6 +342,12 @@ static void mei_me_remove(struct pci_dev *pdev) } #ifdef CONFIG_PM_SLEEP +static int mei_me_pci_prepare(struct device *device) +{ + pm_runtime_resume(device); + return 0; +} + static int mei_me_pci_suspend(struct device *device) { struct pci_dev *pdev = to_pci_dev(device); @@ -398,7 +404,17 @@ static int mei_me_pci_resume(struct device *device) return 0; } -#endif /* CONFIG_PM_SLEEP */ + +static void mei_me_pci_complete(struct device *device) +{ + pm_runtime_suspend(device); +} +#else /* CONFIG_PM_SLEEP */ + +#define mei_me_pci_prepare NULL +#define mei_me_pci_complete NULL + +#endif /* !CONFIG_PM_SLEEP */ #ifdef CONFIG_PM static int mei_me_pm_runtime_idle(struct device *device) @@ -501,6 +517,8 @@ static inline void mei_me_unset_pm_domain(struct mei_device *dev) } static const struct dev_pm_ops mei_me_pm_ops = { + .prepare = mei_me_pci_prepare, + .complete = mei_me_pci_complete, SET_SYSTEM_SLEEP_PM_OPS(mei_me_pci_suspend, mei_me_pci_resume) SET_RUNTIME_PM_OPS( diff --git a/include/drm/i915_pciids.h b/include/drm/i915_pciids.h index 4a4c190f7698..e1e10dfbb661 100644 --- a/include/drm/i915_pciids.h +++ b/include/drm/i915_pciids.h @@ -588,6 +588,7 @@ INTEL_VGA_DEVICE(0x4551, info), \ INTEL_VGA_DEVICE(0x4555, info), \ INTEL_VGA_DEVICE(0x4557, info), \ + INTEL_VGA_DEVICE(0x4570, info), \ INTEL_VGA_DEVICE(0x4571, info) /* JSL */ @@ -684,14 +685,18 @@ INTEL_VGA_DEVICE(0xA78A, info), \ INTEL_VGA_DEVICE(0xA78B, info) +/* RPL-U */ +#define INTEL_RPLU_IDS(info) \ + INTEL_VGA_DEVICE(0xA721, info), \ + INTEL_VGA_DEVICE(0xA7A1, info), \ + INTEL_VGA_DEVICE(0xA7A9, info) + /* RPL-P */ #define INTEL_RPLP_IDS(info) \ + INTEL_RPLU_IDS(info), \ INTEL_VGA_DEVICE(0xA720, info), \ - INTEL_VGA_DEVICE(0xA721, info), \ INTEL_VGA_DEVICE(0xA7A0, info), \ - INTEL_VGA_DEVICE(0xA7A1, info), \ - INTEL_VGA_DEVICE(0xA7A8, info), \ - INTEL_VGA_DEVICE(0xA7A9, info) + INTEL_VGA_DEVICE(0xA7A8, info) /* DG2 */ #define INTEL_DG2_G10_IDS(info) \ @@ -706,7 +711,6 @@ INTEL_VGA_DEVICE(0x5693, info), \ INTEL_VGA_DEVICE(0x5694, info), \ INTEL_VGA_DEVICE(0x5695, info), \ - INTEL_VGA_DEVICE(0x5698, info), \ INTEL_VGA_DEVICE(0x56A5, info), \ INTEL_VGA_DEVICE(0x56A6, info), \ INTEL_VGA_DEVICE(0x56B0, info), \ |