From 7801f3b792b0fd171b02f2cb974e758295e68e0f Mon Sep 17 00:00:00 2001 From: Lucas De Marchi Date: Tue, 30 Jun 2020 21:50:54 -0700 Subject: drm/i915/display: prefer dig_port to reference intel_digital_port We have a mix of dport, intel_dport, intel_dig_port and dig_port to reference a intel_digital_port struct. Numbers are around 5 intel_dport 36 dport 479 intel_dig_port 352 dig_port Since we already removed the intel_ prefix from most of our other structs, do the same here and prefer dig_port. v2: rename everything in i915, not just a few display sources and reword commit message (from Matt Roper) Signed-off-by: Lucas De Marchi Reviewed-by: Rodrigo Vivi Link: https://patchwork.freedesktop.org/patch/msgid/20200701045054.23357-1-lucas.demarchi@intel.com --- drivers/gpu/drm/i915/display/intel_ddi.c | 143 +++++---- drivers/gpu/drm/i915/display/intel_display.c | 6 +- drivers/gpu/drm/i915/display/intel_display.h | 2 +- .../gpu/drm/i915/display/intel_display_debugfs.c | 12 +- drivers/gpu/drm/i915/display/intel_display_power.c | 4 +- drivers/gpu/drm/i915/display/intel_display_types.h | 40 +-- drivers/gpu/drm/i915/display/intel_dp.c | 338 ++++++++++----------- drivers/gpu/drm/i915/display/intel_dp.h | 4 +- drivers/gpu/drm/i915/display/intel_dp_mst.c | 74 ++--- drivers/gpu/drm/i915/display/intel_dp_mst.h | 6 +- drivers/gpu/drm/i915/display/intel_dpio_phy.c | 38 +-- drivers/gpu/drm/i915/display/intel_hdcp.c | 118 +++---- drivers/gpu/drm/i915/display/intel_hdmi.c | 258 ++++++++-------- drivers/gpu/drm/i915/display/intel_hdmi.h | 4 +- drivers/gpu/drm/i915/display/intel_lspcon.c | 8 +- drivers/gpu/drm/i915/display/intel_lspcon.h | 2 +- drivers/gpu/drm/i915/display/intel_psr.c | 4 +- drivers/gpu/drm/i915/display/intel_vdsc.c | 8 +- 18 files changed, 533 insertions(+), 536 deletions(-) diff --git a/drivers/gpu/drm/i915/display/intel_ddi.c b/drivers/gpu/drm/i915/display/intel_ddi.c index 025d4052f6f8..583170e73881 100644 --- a/drivers/gpu/drm/i915/display/intel_ddi.c +++ b/drivers/gpu/drm/i915/display/intel_ddi.c @@ -1394,10 +1394,9 @@ void hsw_fdi_link_train(struct intel_encoder *encoder, static void intel_ddi_init_dp_buf_reg(struct intel_encoder *encoder) { struct intel_dp *intel_dp = enc_to_intel_dp(encoder); - struct intel_digital_port *intel_dig_port = - enc_to_dig_port(encoder); + struct intel_digital_port *dig_port = enc_to_dig_port(encoder); - intel_dp->DP = intel_dig_port->saved_port_bits | + intel_dp->DP = dig_port->saved_port_bits | DDI_BUF_CTL_ENABLE | DDI_BUF_TRANS_SELECT(0); intel_dp->DP |= DDI_PORT_WIDTH(intel_dp->lane_count); } @@ -2070,7 +2069,7 @@ static void _skl_ddi_set_iboost(struct drm_i915_private *dev_priv, static void skl_ddi_set_iboost(struct intel_encoder *encoder, int level, enum intel_output_type type) { - struct intel_digital_port *intel_dig_port = enc_to_dig_port(encoder); + struct intel_digital_port *dig_port = enc_to_dig_port(encoder); struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); enum port port = encoder->port; u8 iboost; @@ -2107,7 +2106,7 @@ static void skl_ddi_set_iboost(struct intel_encoder *encoder, _skl_ddi_set_iboost(dev_priv, port, iboost); - if (port == PORT_A && intel_dig_port->max_lanes == 4) + if (port == PORT_A && dig_port->max_lanes == 4) _skl_ddi_set_iboost(dev_priv, PORT_E, iboost); } @@ -3000,15 +2999,15 @@ static void intel_ddi_clk_disable(struct intel_encoder *encoder) } static void -icl_program_mg_dp_mode(struct intel_digital_port *intel_dig_port, +icl_program_mg_dp_mode(struct intel_digital_port *dig_port, const struct intel_crtc_state *crtc_state) { - struct drm_i915_private *dev_priv = to_i915(intel_dig_port->base.base.dev); - enum tc_port tc_port = intel_port_to_tc(dev_priv, intel_dig_port->base.port); + struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev); + enum tc_port tc_port = intel_port_to_tc(dev_priv, dig_port->base.port); u32 ln0, ln1, pin_assignment; u8 width; - if (intel_dig_port->tc_mode == TC_PORT_TBT_ALT) + if (dig_port->tc_mode == TC_PORT_TBT_ALT) return; if (INTEL_GEN(dev_priv) >= 12) { @@ -3027,13 +3026,13 @@ icl_program_mg_dp_mode(struct intel_digital_port *intel_dig_port, ln1 &= ~(MG_DP_MODE_CFG_DP_X1_MODE | MG_DP_MODE_CFG_DP_X2_MODE); /* DPPATC */ - pin_assignment = intel_tc_port_get_pin_assignment_mask(intel_dig_port); + pin_assignment = intel_tc_port_get_pin_assignment_mask(dig_port); width = crtc_state->lane_count; switch (pin_assignment) { case 0x0: drm_WARN_ON(&dev_priv->drm, - intel_dig_port->tc_mode != TC_PORT_LEGACY); + dig_port->tc_mode != TC_PORT_LEGACY); if (width == 1) { ln1 |= MG_DP_MODE_CFG_DP_X1_MODE; } else { @@ -3978,10 +3977,9 @@ intel_ddi_pre_pll_enable(struct intel_atomic_state *state, static void intel_ddi_prepare_link_retrain(struct intel_dp *intel_dp) { - struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp); - struct drm_i915_private *dev_priv = - to_i915(intel_dig_port->base.base.dev); - enum port port = intel_dig_port->base.port; + struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); + struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev); + enum port port = dig_port->base.port; u32 dp_tp_ctl, ddi_buf_ctl; bool wait = false; @@ -4536,42 +4534,41 @@ static const struct drm_encoder_funcs intel_ddi_funcs = { }; static struct intel_connector * -intel_ddi_init_dp_connector(struct intel_digital_port *intel_dig_port) +intel_ddi_init_dp_connector(struct intel_digital_port *dig_port) { - struct drm_i915_private *dev_priv = to_i915(intel_dig_port->base.base.dev); + struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev); struct intel_connector *connector; - enum port port = intel_dig_port->base.port; + enum port port = dig_port->base.port; connector = intel_connector_alloc(); if (!connector) return NULL; - intel_dig_port->dp.output_reg = DDI_BUF_CTL(port); - intel_dig_port->dp.prepare_link_retrain = - intel_ddi_prepare_link_retrain; - intel_dig_port->dp.set_link_train = intel_ddi_set_link_train; - intel_dig_port->dp.set_idle_link_train = intel_ddi_set_idle_link_train; + dig_port->dp.output_reg = DDI_BUF_CTL(port); + dig_port->dp.prepare_link_retrain = intel_ddi_prepare_link_retrain; + dig_port->dp.set_link_train = intel_ddi_set_link_train; + dig_port->dp.set_idle_link_train = intel_ddi_set_idle_link_train; if (INTEL_GEN(dev_priv) >= 12) - intel_dig_port->dp.set_signal_levels = tgl_set_signal_levels; + dig_port->dp.set_signal_levels = tgl_set_signal_levels; else if (INTEL_GEN(dev_priv) >= 11) - intel_dig_port->dp.set_signal_levels = icl_set_signal_levels; + dig_port->dp.set_signal_levels = icl_set_signal_levels; else if (IS_CANNONLAKE(dev_priv)) - intel_dig_port->dp.set_signal_levels = cnl_set_signal_levels; + dig_port->dp.set_signal_levels = cnl_set_signal_levels; else if (IS_GEN9_LP(dev_priv)) - intel_dig_port->dp.set_signal_levels = bxt_set_signal_levels; + dig_port->dp.set_signal_levels = bxt_set_signal_levels; else - intel_dig_port->dp.set_signal_levels = hsw_set_signal_levels; + dig_port->dp.set_signal_levels = hsw_set_signal_levels; - intel_dig_port->dp.voltage_max = intel_ddi_dp_voltage_max; - intel_dig_port->dp.preemph_max = intel_ddi_dp_preemph_max; + dig_port->dp.voltage_max = intel_ddi_dp_voltage_max; + dig_port->dp.preemph_max = intel_ddi_dp_preemph_max; if (INTEL_GEN(dev_priv) < 12) { - intel_dig_port->dp.regs.dp_tp_ctl = DP_TP_CTL(port); - intel_dig_port->dp.regs.dp_tp_status = DP_TP_STATUS(port); + dig_port->dp.regs.dp_tp_ctl = DP_TP_CTL(port); + dig_port->dp.regs.dp_tp_status = DP_TP_STATUS(port); } - if (!intel_dp_init_connector(intel_dig_port, connector)) { + if (!intel_dp_init_connector(dig_port, connector)) { kfree(connector); return NULL; } @@ -4770,29 +4767,29 @@ static bool bdw_digital_port_connected(struct intel_encoder *encoder) } static struct intel_connector * -intel_ddi_init_hdmi_connector(struct intel_digital_port *intel_dig_port) +intel_ddi_init_hdmi_connector(struct intel_digital_port *dig_port) { struct intel_connector *connector; - enum port port = intel_dig_port->base.port; + enum port port = dig_port->base.port; connector = intel_connector_alloc(); if (!connector) return NULL; - intel_dig_port->hdmi.hdmi_reg = DDI_BUF_CTL(port); - intel_hdmi_init_connector(intel_dig_port, connector); + dig_port->hdmi.hdmi_reg = DDI_BUF_CTL(port); + intel_hdmi_init_connector(dig_port, connector); return connector; } -static bool intel_ddi_a_force_4_lanes(struct intel_digital_port *dport) +static bool intel_ddi_a_force_4_lanes(struct intel_digital_port *dig_port) { - struct drm_i915_private *dev_priv = to_i915(dport->base.base.dev); + struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev); - if (dport->base.port != PORT_A) + if (dig_port->base.port != PORT_A) return false; - if (dport->saved_port_bits & DDI_A_4_LANES) + if (dig_port->saved_port_bits & DDI_A_4_LANES) return false; /* Broxton/Geminilake: Bspec says that DDI_A_4_LANES is the only @@ -4814,10 +4811,10 @@ static bool intel_ddi_a_force_4_lanes(struct intel_digital_port *dport) } static int -intel_ddi_max_lanes(struct intel_digital_port *intel_dport) +intel_ddi_max_lanes(struct intel_digital_port *dig_port) { - struct drm_i915_private *dev_priv = to_i915(intel_dport->base.base.dev); - enum port port = intel_dport->base.port; + struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev); + enum port port = dig_port->base.port; int max_lanes = 4; if (INTEL_GEN(dev_priv) >= 11) @@ -4836,10 +4833,10 @@ intel_ddi_max_lanes(struct intel_digital_port *intel_dport) * wasn't lit up at boot. Force this bit set when needed * so we use the proper lane count for our calculations. */ - if (intel_ddi_a_force_4_lanes(intel_dport)) { + if (intel_ddi_a_force_4_lanes(dig_port)) { drm_dbg_kms(&dev_priv->drm, "Forcing DDI_A_4_LANES for port A\n"); - intel_dport->saved_port_bits |= DDI_A_4_LANES; + dig_port->saved_port_bits |= DDI_A_4_LANES; max_lanes = 4; } @@ -4848,7 +4845,7 @@ intel_ddi_max_lanes(struct intel_digital_port *intel_dport) void intel_ddi_init(struct drm_i915_private *dev_priv, enum port port) { - struct intel_digital_port *intel_dig_port; + struct intel_digital_port *dig_port; struct intel_encoder *encoder; bool init_hdmi, init_dp, init_lspcon = false; enum phy phy = intel_port_to_phy(dev_priv, port); @@ -4877,11 +4874,11 @@ void intel_ddi_init(struct drm_i915_private *dev_priv, enum port port) return; } - intel_dig_port = kzalloc(sizeof(*intel_dig_port), GFP_KERNEL); - if (!intel_dig_port) + dig_port = kzalloc(sizeof(*dig_port), GFP_KERNEL); + if (!dig_port) return; - encoder = &intel_dig_port->base; + encoder = &dig_port->base; drm_encoder_init(&dev_priv->drm, &encoder->base, &intel_ddi_funcs, DRM_MODE_ENCODER_TMDS, "DDI %c", port_name(port)); @@ -4908,49 +4905,49 @@ void intel_ddi_init(struct drm_i915_private *dev_priv, enum port port) encoder->pipe_mask = ~0; if (INTEL_GEN(dev_priv) >= 11) - intel_dig_port->saved_port_bits = intel_de_read(dev_priv, - DDI_BUF_CTL(port)) & - DDI_BUF_PORT_REVERSAL; + dig_port->saved_port_bits = + intel_de_read(dev_priv, DDI_BUF_CTL(port)) + & DDI_BUF_PORT_REVERSAL; else - intel_dig_port->saved_port_bits = intel_de_read(dev_priv, - DDI_BUF_CTL(port)) & - (DDI_BUF_PORT_REVERSAL | DDI_A_4_LANES); + dig_port->saved_port_bits = + intel_de_read(dev_priv, DDI_BUF_CTL(port)) + & (DDI_BUF_PORT_REVERSAL | DDI_A_4_LANES); - intel_dig_port->dp.output_reg = INVALID_MMIO_REG; - intel_dig_port->max_lanes = intel_ddi_max_lanes(intel_dig_port); - intel_dig_port->aux_ch = intel_bios_port_aux_ch(dev_priv, port); + dig_port->dp.output_reg = INVALID_MMIO_REG; + dig_port->max_lanes = intel_ddi_max_lanes(dig_port); + dig_port->aux_ch = intel_bios_port_aux_ch(dev_priv, port); if (intel_phy_is_tc(dev_priv, phy)) { bool is_legacy = !intel_bios_port_supports_typec_usb(dev_priv, port) && !intel_bios_port_supports_tbt(dev_priv, port); - intel_tc_port_init(intel_dig_port, is_legacy); + intel_tc_port_init(dig_port, is_legacy); encoder->update_prepare = intel_ddi_update_prepare; encoder->update_complete = intel_ddi_update_complete; } drm_WARN_ON(&dev_priv->drm, port > PORT_I); - intel_dig_port->ddi_io_power_domain = POWER_DOMAIN_PORT_DDI_A_IO + + dig_port->ddi_io_power_domain = POWER_DOMAIN_PORT_DDI_A_IO + port - PORT_A; if (init_dp) { - if (!intel_ddi_init_dp_connector(intel_dig_port)) + if (!intel_ddi_init_dp_connector(dig_port)) goto err; - intel_dig_port->hpd_pulse = intel_dp_hpd_pulse; + dig_port->hpd_pulse = intel_dp_hpd_pulse; } /* In theory we don't need the encoder->type check, but leave it just in * case we have some really bad VBTs... */ if (encoder->type != INTEL_OUTPUT_EDP && init_hdmi) { - if (!intel_ddi_init_hdmi_connector(intel_dig_port)) + if (!intel_ddi_init_hdmi_connector(dig_port)) goto err; } if (init_lspcon) { - if (lspcon_init(intel_dig_port)) + if (lspcon_init(dig_port)) /* TODO: handle hdmi info frame part */ drm_dbg_kms(&dev_priv->drm, "LSPCON init success on port %c\n", @@ -4967,26 +4964,26 @@ void intel_ddi_init(struct drm_i915_private *dev_priv, enum port port) if (INTEL_GEN(dev_priv) >= 11) { if (intel_phy_is_tc(dev_priv, phy)) - intel_dig_port->connected = intel_tc_port_connected; + dig_port->connected = intel_tc_port_connected; else - intel_dig_port->connected = lpt_digital_port_connected; + dig_port->connected = lpt_digital_port_connected; } else if (INTEL_GEN(dev_priv) >= 8) { if (port == PORT_A || IS_GEN9_LP(dev_priv)) - intel_dig_port->connected = bdw_digital_port_connected; + dig_port->connected = bdw_digital_port_connected; else - intel_dig_port->connected = lpt_digital_port_connected; + dig_port->connected = lpt_digital_port_connected; } else { if (port == PORT_A) - intel_dig_port->connected = hsw_digital_port_connected; + dig_port->connected = hsw_digital_port_connected; else - intel_dig_port->connected = lpt_digital_port_connected; + dig_port->connected = lpt_digital_port_connected; } - intel_infoframe_init(intel_dig_port); + intel_infoframe_init(dig_port); return; err: drm_encoder_cleanup(&encoder->base); - kfree(intel_dig_port); + kfree(dig_port); } diff --git a/drivers/gpu/drm/i915/display/intel_display.c b/drivers/gpu/drm/i915/display/intel_display.c index 84e2a17b5ecb..98a59e014b75 100644 --- a/drivers/gpu/drm/i915/display/intel_display.c +++ b/drivers/gpu/drm/i915/display/intel_display.c @@ -1612,13 +1612,13 @@ static void chv_disable_pll(struct drm_i915_private *dev_priv, enum pipe pipe) } void vlv_wait_port_ready(struct drm_i915_private *dev_priv, - struct intel_digital_port *dport, + struct intel_digital_port *dig_port, unsigned int expected_mask) { u32 port_mask; i915_reg_t dpll_reg; - switch (dport->base.port) { + switch (dig_port->base.port) { case PORT_B: port_mask = DPLL_PORTB_READY_MASK; dpll_reg = DPLL(0); @@ -1640,7 +1640,7 @@ void vlv_wait_port_ready(struct drm_i915_private *dev_priv, port_mask, expected_mask, 1000)) drm_WARN(&dev_priv->drm, 1, "timed out waiting for [ENCODER:%d:%s] port ready: got 0x%x, expected 0x%x\n", - dport->base.base.base.id, dport->base.base.name, + dig_port->base.base.base.id, dig_port->base.base.name, intel_de_read(dev_priv, dpll_reg) & port_mask, expected_mask); } diff --git a/drivers/gpu/drm/i915/display/intel_display.h b/drivers/gpu/drm/i915/display/intel_display.h index f68007ff8a13..e890c8fb779b 100644 --- a/drivers/gpu/drm/i915/display/intel_display.h +++ b/drivers/gpu/drm/i915/display/intel_display.h @@ -542,7 +542,7 @@ void intel_crtc_vblank_off(const struct intel_crtc_state *crtc_state); int ilk_get_lanes_required(int target_clock, int link_bw, int bpp); void vlv_wait_port_ready(struct drm_i915_private *dev_priv, - struct intel_digital_port *dport, + struct intel_digital_port *dig_port, unsigned int expected_mask); int intel_get_load_detect_pipe(struct drm_connector *connector, struct intel_load_detect_pipe *old, diff --git a/drivers/gpu/drm/i915/display/intel_display_debugfs.c b/drivers/gpu/drm/i915/display/intel_display_debugfs.c index d1cb48b3f462..3644752cc5ec 100644 --- a/drivers/gpu/drm/i915/display/intel_display_debugfs.c +++ b/drivers/gpu/drm/i915/display/intel_display_debugfs.c @@ -1194,7 +1194,7 @@ static int i915_dp_mst_info(struct seq_file *m, void *unused) struct drm_i915_private *dev_priv = node_to_i915(m->private); struct drm_device *dev = &dev_priv->drm; struct intel_encoder *intel_encoder; - struct intel_digital_port *intel_dig_port; + struct intel_digital_port *dig_port; struct drm_connector *connector; struct drm_connector_list_iter conn_iter; @@ -1207,14 +1207,14 @@ static int i915_dp_mst_info(struct seq_file *m, void *unused) if (!intel_encoder || intel_encoder->type == INTEL_OUTPUT_DP_MST) continue; - intel_dig_port = enc_to_dig_port(intel_encoder); - if (!intel_dig_port->dp.can_mst) + dig_port = enc_to_dig_port(intel_encoder); + if (!dig_port->dp.can_mst) continue; seq_printf(m, "MST Source Port [ENCODER:%d:%s]\n", - intel_dig_port->base.base.base.id, - intel_dig_port->base.base.name); - drm_dp_mst_dump_topology(m, &intel_dig_port->dp.mst_mgr); + dig_port->base.base.base.id, + dig_port->base.base.name); + drm_dp_mst_dump_topology(m, &dig_port->dp.mst_mgr); } drm_connector_list_iter_end(&conn_iter); diff --git a/drivers/gpu/drm/i915/display/intel_display_power.c b/drivers/gpu/drm/i915/display/intel_display_power.c index 8a277dfbc070..0c713e83274d 100644 --- a/drivers/gpu/drm/i915/display/intel_display_power.c +++ b/drivers/gpu/drm/i915/display/intel_display_power.c @@ -1817,8 +1817,8 @@ void chv_phy_powergate_lanes(struct intel_encoder *encoder, { struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); struct i915_power_domains *power_domains = &dev_priv->power_domains; - enum dpio_phy phy = vlv_dport_to_phy(enc_to_dig_port(encoder)); - enum dpio_channel ch = vlv_dport_to_channel(enc_to_dig_port(encoder)); + enum dpio_phy phy = vlv_dig_port_to_phy(enc_to_dig_port(encoder)); + enum dpio_channel ch = vlv_dig_port_to_channel(enc_to_dig_port(encoder)); mutex_lock(&power_domains->lock); diff --git a/drivers/gpu/drm/i915/display/intel_display_types.h b/drivers/gpu/drm/i915/display/intel_display_types.h index 4b0aaa3081c9..e8f809161c75 100644 --- a/drivers/gpu/drm/i915/display/intel_display_types.h +++ b/drivers/gpu/drm/i915/display/intel_display_types.h @@ -279,10 +279,10 @@ enum check_link_response { */ struct intel_hdcp_shim { /* Outputs the transmitter's An and Aksv values to the receiver. */ - int (*write_an_aksv)(struct intel_digital_port *intel_dig_port, u8 *an); + int (*write_an_aksv)(struct intel_digital_port *dig_port, u8 *an); /* Reads the receiver's key selection vector */ - int (*read_bksv)(struct intel_digital_port *intel_dig_port, u8 *bksv); + int (*read_bksv)(struct intel_digital_port *dig_port, u8 *bksv); /* * Reads BINFO from DP receivers and BSTATUS from HDMI receivers. The @@ -290,52 +290,52 @@ struct intel_hdcp_shim { * different. Call it BSTATUS since that's the name the HDMI spec * uses and it was there first. */ - int (*read_bstatus)(struct intel_digital_port *intel_dig_port, + int (*read_bstatus)(struct intel_digital_port *dig_port, u8 *bstatus); /* Determines whether a repeater is present downstream */ - int (*repeater_present)(struct intel_digital_port *intel_dig_port, + int (*repeater_present)(struct intel_digital_port *dig_port, bool *repeater_present); /* Reads the receiver's Ri' value */ - int (*read_ri_prime)(struct intel_digital_port *intel_dig_port, u8 *ri); + int (*read_ri_prime)(struct intel_digital_port *dig_port, u8 *ri); /* Determines if the receiver's KSV FIFO is ready for consumption */ - int (*read_ksv_ready)(struct intel_digital_port *intel_dig_port, + int (*read_ksv_ready)(struct intel_digital_port *dig_port, bool *ksv_ready); /* Reads the ksv fifo for num_downstream devices */ - int (*read_ksv_fifo)(struct intel_digital_port *intel_dig_port, + int (*read_ksv_fifo)(struct intel_digital_port *dig_port, int num_downstream, u8 *ksv_fifo); /* Reads a 32-bit part of V' from the receiver */ - int (*read_v_prime_part)(struct intel_digital_port *intel_dig_port, + int (*read_v_prime_part)(struct intel_digital_port *dig_port, int i, u32 *part); /* Enables HDCP signalling on the port */ - int (*toggle_signalling)(struct intel_digital_port *intel_dig_port, + int (*toggle_signalling)(struct intel_digital_port *dig_port, bool enable); /* Ensures the link is still protected */ - bool (*check_link)(struct intel_digital_port *intel_dig_port); + bool (*check_link)(struct intel_digital_port *dig_port); /* Detects panel's hdcp capability. This is optional for HDMI. */ - int (*hdcp_capable)(struct intel_digital_port *intel_dig_port, + int (*hdcp_capable)(struct intel_digital_port *dig_port, bool *hdcp_capable); /* HDCP adaptation(DP/HDMI) required on the port */ enum hdcp_wired_protocol protocol; /* Detects whether sink is HDCP2.2 capable */ - int (*hdcp_2_2_capable)(struct intel_digital_port *intel_dig_port, + int (*hdcp_2_2_capable)(struct intel_digital_port *dig_port, bool *capable); /* Write HDCP2.2 messages */ - int (*write_2_2_msg)(struct intel_digital_port *intel_dig_port, + int (*write_2_2_msg)(struct intel_digital_port *dig_port, void *buf, size_t size); /* Read HDCP2.2 messages */ - int (*read_2_2_msg)(struct intel_digital_port *intel_dig_port, + int (*read_2_2_msg)(struct intel_digital_port *dig_port, u8 msg_id, void *buf, size_t size); /* @@ -343,11 +343,11 @@ struct intel_hdcp_shim { * type to Receivers. In DP HDCP2.2 Stream type is one of the input to * the HDCP2.2 Cipher for En/De-Cryption. Not applicable for HDMI. */ - int (*config_stream_type)(struct intel_digital_port *intel_dig_port, + int (*config_stream_type)(struct intel_digital_port *dig_port, bool is_repeater, u8 type); /* HDCP2.2 Link Integrity Check */ - int (*check_2_2_link)(struct intel_digital_port *intel_dig_port); + int (*check_2_2_link)(struct intel_digital_port *dig_port); }; struct intel_hdcp { @@ -1434,9 +1434,9 @@ struct intel_dp_mst_encoder { }; static inline enum dpio_channel -vlv_dport_to_channel(struct intel_digital_port *dport) +vlv_dig_port_to_channel(struct intel_digital_port *dig_port) { - switch (dport->base.port) { + switch (dig_port->base.port) { case PORT_B: case PORT_D: return DPIO_CH0; @@ -1448,9 +1448,9 @@ vlv_dport_to_channel(struct intel_digital_port *dport) } static inline enum dpio_phy -vlv_dport_to_phy(struct intel_digital_port *dport) +vlv_dig_port_to_phy(struct intel_digital_port *dig_port) { - switch (dport->base.port) { + switch (dig_port->base.port) { case PORT_B: case PORT_C: return DPIO_PHY0; diff --git a/drivers/gpu/drm/i915/display/intel_dp.c b/drivers/gpu/drm/i915/display/intel_dp.c index c9b93c5706af..a5ab405d3a12 100644 --- a/drivers/gpu/drm/i915/display/intel_dp.c +++ b/drivers/gpu/drm/i915/display/intel_dp.c @@ -142,9 +142,9 @@ static const u8 valid_dsc_slicecount[] = {1, 2, 4}; */ bool intel_dp_is_edp(struct intel_dp *intel_dp) { - struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp); + struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); - return intel_dig_port->base.type == INTEL_OUTPUT_EDP; + return dig_port->base.type == INTEL_OUTPUT_EDP; } static void intel_dp_link_down(struct intel_encoder *encoder, @@ -218,10 +218,10 @@ static int intel_dp_max_common_rate(struct intel_dp *intel_dp) /* Theoretical max between source and sink */ static int intel_dp_max_common_lane_count(struct intel_dp *intel_dp) { - struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp); - int source_max = intel_dig_port->max_lanes; + struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); + int source_max = dig_port->max_lanes; int sink_max = drm_dp_max_lane_count(intel_dp->dpcd); - int fia_max = intel_tc_port_fia_max_lane_count(intel_dig_port); + int fia_max = intel_tc_port_fia_max_lane_count(dig_port); return min3(source_max, sink_max, fia_max); } @@ -253,8 +253,8 @@ intel_dp_max_data_rate(int max_link_clock, int max_lanes) static int intel_dp_downstream_max_dotclock(struct intel_dp *intel_dp) { - struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp); - struct intel_encoder *encoder = &intel_dig_port->base; + struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); + struct intel_encoder *encoder = &dig_port->base; struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); int max_dotclk = dev_priv->max_dotclk_freq; int ds_max_dotclk; @@ -780,7 +780,7 @@ static void vlv_power_sequencer_kick(struct intel_dp *intel_dp) { struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); - struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp); + struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); enum pipe pipe = intel_dp->pps_pipe; bool pll_enabled, release_cl_override = false; enum dpio_phy phy = DPIO_PHY(pipe); @@ -790,14 +790,14 @@ vlv_power_sequencer_kick(struct intel_dp *intel_dp) if (drm_WARN(&dev_priv->drm, intel_de_read(dev_priv, intel_dp->output_reg) & DP_PORT_EN, "skipping pipe %c power sequencer kick due to [ENCODER:%d:%s] being active\n", - pipe_name(pipe), intel_dig_port->base.base.base.id, - intel_dig_port->base.base.name)) + pipe_name(pipe), dig_port->base.base.base.id, + dig_port->base.base.name)) return; drm_dbg_kms(&dev_priv->drm, "kicking pipe %c power sequencer for [ENCODER:%d:%s]\n", - pipe_name(pipe), intel_dig_port->base.base.base.id, - intel_dig_port->base.base.name); + pipe_name(pipe), dig_port->base.base.base.id, + dig_port->base.base.name); /* Preserve the BIOS-computed detected bit. This is * supposed to be read-only. @@ -893,7 +893,7 @@ static enum pipe vlv_power_sequencer_pipe(struct intel_dp *intel_dp) { struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); - struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp); + struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); enum pipe pipe; lockdep_assert_held(&dev_priv->pps_mutex); @@ -922,8 +922,8 @@ vlv_power_sequencer_pipe(struct intel_dp *intel_dp) drm_dbg_kms(&dev_priv->drm, "picked pipe %c power sequencer for [ENCODER:%d:%s]\n", pipe_name(intel_dp->pps_pipe), - intel_dig_port->base.base.base.id, - intel_dig_port->base.base.name); + dig_port->base.base.base.id, + dig_port->base.base.name); /* init power sequencer on this pipe and port */ intel_dp_init_panel_power_sequencer(intel_dp); @@ -1011,8 +1011,8 @@ static void vlv_initial_power_sequencer_setup(struct intel_dp *intel_dp) { struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); - struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp); - enum port port = intel_dig_port->base.port; + struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); + enum port port = dig_port->base.port; lockdep_assert_held(&dev_priv->pps_mutex); @@ -1033,15 +1033,15 @@ vlv_initial_power_sequencer_setup(struct intel_dp *intel_dp) if (intel_dp->pps_pipe == INVALID_PIPE) { drm_dbg_kms(&dev_priv->drm, "no initial power sequencer for [ENCODER:%d:%s]\n", - intel_dig_port->base.base.base.id, - intel_dig_port->base.base.name); + dig_port->base.base.base.id, + dig_port->base.base.name); return; } drm_dbg_kms(&dev_priv->drm, "initial power sequencer for [ENCODER:%d:%s]: pipe %c\n", - intel_dig_port->base.base.base.id, - intel_dig_port->base.base.name, + dig_port->base.base.base.id, + dig_port->base.base.name, pipe_name(intel_dp->pps_pipe)); intel_dp_init_panel_power_sequencer(intel_dp); @@ -1306,9 +1306,9 @@ static u32 g4x_get_aux_send_ctl(struct intel_dp *intel_dp, int send_bytes, u32 aux_clock_divider) { - struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp); + struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); struct drm_i915_private *dev_priv = - to_i915(intel_dig_port->base.base.dev); + to_i915(dig_port->base.base.dev); u32 precharge, timeout; if (IS_GEN(dev_priv, 6)) @@ -1336,10 +1336,10 @@ static u32 skl_get_aux_send_ctl(struct intel_dp *intel_dp, int send_bytes, u32 unused) { - struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp); + struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); struct drm_i915_private *i915 = - to_i915(intel_dig_port->base.base.dev); - enum phy phy = intel_port_to_phy(i915, intel_dig_port->base.port); + to_i915(dig_port->base.base.dev); + enum phy phy = intel_port_to_phy(i915, dig_port->base.port); u32 ret; ret = DP_AUX_CH_CTL_SEND_BUSY | @@ -1353,7 +1353,7 @@ static u32 skl_get_aux_send_ctl(struct intel_dp *intel_dp, DP_AUX_CH_CTL_SYNC_PULSE_SKL(32); if (intel_phy_is_tc(i915, phy) && - intel_dig_port->tc_mode == TC_PORT_TBT_ALT) + dig_port->tc_mode == TC_PORT_TBT_ALT) ret |= DP_AUX_CH_CTL_TBT_IO; return ret; @@ -1365,11 +1365,11 @@ intel_dp_aux_xfer(struct intel_dp *intel_dp, u8 *recv, int recv_size, u32 aux_send_ctl_flags) { - struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp); + struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); struct drm_i915_private *i915 = - to_i915(intel_dig_port->base.base.dev); + to_i915(dig_port->base.base.dev); struct intel_uncore *uncore = &i915->uncore; - enum phy phy = intel_port_to_phy(i915, intel_dig_port->base.port); + enum phy phy = intel_port_to_phy(i915, dig_port->base.port); bool is_tc_port = intel_phy_is_tc(i915, phy); i915_reg_t ch_ctl, ch_data[5]; u32 aux_clock_divider; @@ -1386,9 +1386,9 @@ intel_dp_aux_xfer(struct intel_dp *intel_dp, ch_data[i] = intel_dp->aux_ch_data_reg(intel_dp, i); if (is_tc_port) - intel_tc_port_lock(intel_dig_port); + intel_tc_port_lock(dig_port); - aux_domain = intel_aux_power_domain(intel_dig_port); + aux_domain = intel_aux_power_domain(dig_port); aux_wakeref = intel_display_power_get(i915, aux_domain); pps_wakeref = pps_lock(intel_dp); @@ -1547,7 +1547,7 @@ out: intel_display_power_put_async(i915, aux_domain, aux_wakeref); if (is_tc_port) - intel_tc_port_unlock(intel_dig_port); + intel_tc_port_unlock(dig_port); return ret; } @@ -2893,7 +2893,7 @@ static u32 ilk_get_pp_control(struct intel_dp *intel_dp) static bool edp_panel_vdd_on(struct intel_dp *intel_dp) { struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); - struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp); + struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); u32 pp; i915_reg_t pp_stat_reg, pp_ctrl_reg; bool need_to_disable = !intel_dp->want_panel_vdd; @@ -2910,11 +2910,11 @@ static bool edp_panel_vdd_on(struct intel_dp *intel_dp) return need_to_disable; intel_display_power_get(dev_priv, - intel_aux_power_domain(intel_dig_port)); + intel_aux_power_domain(dig_port)); drm_dbg_kms(&dev_priv->drm, "Turning [ENCODER:%d:%s] VDD on\n", - intel_dig_port->base.base.base.id, - intel_dig_port->base.base.name); + dig_port->base.base.base.id, + dig_port->base.base.name); if (!edp_have_panel_power(intel_dp)) wait_panel_power_cycle(intel_dp); @@ -2936,8 +2936,8 @@ static bool edp_panel_vdd_on(struct intel_dp *intel_dp) if (!edp_have_panel_power(intel_dp)) { drm_dbg_kms(&dev_priv->drm, "[ENCODER:%d:%s] panel power wasn't enabled\n", - intel_dig_port->base.base.base.id, - intel_dig_port->base.base.name); + dig_port->base.base.base.id, + dig_port->base.base.name); msleep(intel_dp->panel_power_up_delay); } @@ -2970,7 +2970,7 @@ void intel_edp_panel_vdd_on(struct intel_dp *intel_dp) static void edp_panel_vdd_off_sync(struct intel_dp *intel_dp) { struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); - struct intel_digital_port *intel_dig_port = + struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); u32 pp; i915_reg_t pp_stat_reg, pp_ctrl_reg; @@ -2983,8 +2983,8 @@ static void edp_panel_vdd_off_sync(struct intel_dp *intel_dp) return; drm_dbg_kms(&dev_priv->drm, "Turning [ENCODER:%d:%s] VDD off\n", - intel_dig_port->base.base.base.id, - intel_dig_port->base.base.name); + dig_port->base.base.base.id, + dig_port->base.base.name); pp = ilk_get_pp_control(intel_dp); pp &= ~EDP_FORCE_VDD; @@ -3004,7 +3004,7 @@ static void edp_panel_vdd_off_sync(struct intel_dp *intel_dp) intel_dp->panel_power_off_time = ktime_get_boottime(); intel_display_power_put_unchecked(dev_priv, - intel_aux_power_domain(intel_dig_port)); + intel_aux_power_domain(dig_port)); } static void edp_panel_vdd_work(struct work_struct *__work) @@ -3835,8 +3835,8 @@ static void g4x_pre_enable_dp(struct intel_atomic_state *state, static void vlv_detach_power_sequencer(struct intel_dp *intel_dp) { - struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp); - struct drm_i915_private *dev_priv = to_i915(intel_dig_port->base.base.dev); + struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); + struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev); enum pipe pipe = intel_dp->pps_pipe; i915_reg_t pp_on_reg = PP_ON_DELAYS(pipe); @@ -3858,8 +3858,8 @@ static void vlv_detach_power_sequencer(struct intel_dp *intel_dp) */ drm_dbg_kms(&dev_priv->drm, "detaching pipe %c power sequencer from [ENCODER:%d:%s]\n", - pipe_name(pipe), intel_dig_port->base.base.base.id, - intel_dig_port->base.base.name); + pipe_name(pipe), dig_port->base.base.base.id, + dig_port->base.base.name); intel_de_write(dev_priv, pp_on_reg, 0); intel_de_posting_read(dev_priv, pp_on_reg); @@ -4925,7 +4925,7 @@ static void intel_write_dp_sdp(struct intel_encoder *encoder, const struct intel_crtc_state *crtc_state, unsigned int type) { - struct intel_digital_port *intel_dig_port = enc_to_dig_port(encoder); + struct intel_digital_port *dig_port = enc_to_dig_port(encoder); struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); struct dp_sdp sdp = {}; ssize_t len; @@ -4951,14 +4951,14 @@ static void intel_write_dp_sdp(struct intel_encoder *encoder, if (drm_WARN_ON(&dev_priv->drm, len < 0)) return; - intel_dig_port->write_infoframe(encoder, crtc_state, type, &sdp, len); + dig_port->write_infoframe(encoder, crtc_state, type, &sdp, len); } void intel_write_dp_vsc_sdp(struct intel_encoder *encoder, const struct intel_crtc_state *crtc_state, struct drm_dp_vsc_sdp *vsc) { - struct intel_digital_port *intel_dig_port = enc_to_dig_port(encoder); + struct intel_digital_port *dig_port = enc_to_dig_port(encoder); struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); struct dp_sdp sdp = {}; ssize_t len; @@ -4968,7 +4968,7 @@ void intel_write_dp_vsc_sdp(struct intel_encoder *encoder, if (drm_WARN_ON(&dev_priv->drm, len < 0)) return; - intel_dig_port->write_infoframe(encoder, crtc_state, DP_SDP_VSC, + dig_port->write_infoframe(encoder, crtc_state, DP_SDP_VSC, &sdp, len); } @@ -5128,7 +5128,7 @@ static void intel_read_dp_vsc_sdp(struct intel_encoder *encoder, struct intel_crtc_state *crtc_state, struct drm_dp_vsc_sdp *vsc) { - struct intel_digital_port *intel_dig_port = enc_to_dig_port(encoder); + struct intel_digital_port *dig_port = enc_to_dig_port(encoder); struct intel_dp *intel_dp = enc_to_intel_dp(encoder); struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); unsigned int type = DP_SDP_VSC; @@ -5143,7 +5143,7 @@ static void intel_read_dp_vsc_sdp(struct intel_encoder *encoder, intel_hdmi_infoframe_enable(type)) == 0) return; - intel_dig_port->read_infoframe(encoder, crtc_state, type, &sdp, sizeof(sdp)); + dig_port->read_infoframe(encoder, crtc_state, type, &sdp, sizeof(sdp)); ret = intel_dp_vsc_sdp_unpack(vsc, &sdp, sizeof(sdp)); @@ -5155,7 +5155,7 @@ static void intel_read_dp_hdr_metadata_infoframe_sdp(struct intel_encoder *encod struct intel_crtc_state *crtc_state, struct hdmi_drm_infoframe *drm_infoframe) { - struct intel_digital_port *intel_dig_port = enc_to_dig_port(encoder); + struct intel_digital_port *dig_port = enc_to_dig_port(encoder); struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); unsigned int type = HDMI_PACKET_TYPE_GAMUT_METADATA; struct dp_sdp sdp = {}; @@ -5165,8 +5165,8 @@ static void intel_read_dp_hdr_metadata_infoframe_sdp(struct intel_encoder *encod intel_hdmi_infoframe_enable(type)) == 0) return; - intel_dig_port->read_infoframe(encoder, crtc_state, type, &sdp, - sizeof(sdp)); + dig_port->read_infoframe(encoder, crtc_state, type, &sdp, + sizeof(sdp)); ret = intel_dp_hdr_metadata_infoframe_sdp_unpack(drm_infoframe, &sdp, sizeof(sdp)); @@ -5368,10 +5368,10 @@ static void intel_dp_phy_pattern_update(struct intel_dp *intel_dp) { struct drm_i915_private *dev_priv = to_i915(dp_to_dig_port(intel_dp)->base.base.dev); - struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp); + struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); struct drm_dp_phy_test_params *data = &intel_dp->compliance.test_data.phytest; - struct intel_crtc *crtc = to_intel_crtc(intel_dig_port->base.base.crtc); + struct intel_crtc *crtc = to_intel_crtc(dig_port->base.base.crtc); enum pipe pipe = crtc->pipe; u32 pattern_val; @@ -5433,10 +5433,10 @@ static void intel_dp_phy_pattern_update(struct intel_dp *intel_dp) static void intel_dp_autotest_phy_ddi_disable(struct intel_dp *intel_dp) { - struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp); - struct drm_device *dev = intel_dig_port->base.base.dev; + struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); + struct drm_device *dev = dig_port->base.base.dev; struct drm_i915_private *dev_priv = to_i915(dev); - struct intel_crtc *crtc = to_intel_crtc(intel_dig_port->base.base.crtc); + struct intel_crtc *crtc = to_intel_crtc(dig_port->base.base.crtc); enum pipe pipe = crtc->pipe; u32 trans_ddi_func_ctl_value, trans_conf_value, dp_tp_ctl_value; @@ -5459,11 +5459,11 @@ intel_dp_autotest_phy_ddi_disable(struct intel_dp *intel_dp) static void intel_dp_autotest_phy_ddi_enable(struct intel_dp *intel_dp, uint8_t lane_cnt) { - struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp); - struct drm_device *dev = intel_dig_port->base.base.dev; + struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); + struct drm_device *dev = dig_port->base.base.dev; struct drm_i915_private *dev_priv = to_i915(dev); - enum port port = intel_dig_port->base.port; - struct intel_crtc *crtc = to_intel_crtc(intel_dig_port->base.base.crtc); + enum port port = dig_port->base.port; + struct intel_crtc *crtc = to_intel_crtc(dig_port->base.base.crtc); enum pipe pipe = crtc->pipe; u32 trans_ddi_func_ctl_value, trans_conf_value, dp_tp_ctl_value; @@ -6334,10 +6334,10 @@ intel_dp_connector_unregister(struct drm_connector *connector) void intel_dp_encoder_flush_work(struct drm_encoder *encoder) { - struct intel_digital_port *intel_dig_port = enc_to_dig_port(to_intel_encoder(encoder)); - struct intel_dp *intel_dp = &intel_dig_port->dp; + struct intel_digital_port *dig_port = enc_to_dig_port(to_intel_encoder(encoder)); + struct intel_dp *intel_dp = &dig_port->dp; - intel_dp_mst_encoder_cleanup(intel_dig_port); + intel_dp_mst_encoder_cleanup(dig_port); if (intel_dp_is_edp(intel_dp)) { intel_wakeref_t wakeref; @@ -6396,11 +6396,11 @@ static void intel_dp_hdcp_wait_for_cp_irq(struct intel_hdcp *hdcp, int timeout) } static -int intel_dp_hdcp_write_an_aksv(struct intel_digital_port *intel_dig_port, +int intel_dp_hdcp_write_an_aksv(struct intel_digital_port *dig_port, u8 *an) { - struct drm_i915_private *i915 = to_i915(intel_dig_port->base.base.dev); - struct intel_dp *intel_dp = enc_to_intel_dp(to_intel_encoder(&intel_dig_port->base.base)); + struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev); + struct intel_dp *intel_dp = enc_to_intel_dp(to_intel_encoder(&dig_port->base.base)); static const struct drm_dp_aux_msg msg = { .request = DP_AUX_NATIVE_WRITE, .address = DP_AUX_HDCP_AKSV, @@ -6411,7 +6411,7 @@ int intel_dp_hdcp_write_an_aksv(struct intel_digital_port *intel_dig_port, int ret; /* Output An first, that's easy */ - dpcd_ret = drm_dp_dpcd_write(&intel_dig_port->dp.aux, DP_AUX_HDCP_AN, + dpcd_ret = drm_dp_dpcd_write(&dig_port->dp.aux, DP_AUX_HDCP_AN, an, DRM_HDCP_AN_LEN); if (dpcd_ret != DRM_HDCP_AN_LEN) { drm_dbg_kms(&i915->drm, @@ -6450,13 +6450,13 @@ int intel_dp_hdcp_write_an_aksv(struct intel_digital_port *intel_dig_port, return 0; } -static int intel_dp_hdcp_read_bksv(struct intel_digital_port *intel_dig_port, +static int intel_dp_hdcp_read_bksv(struct intel_digital_port *dig_port, u8 *bksv) { - struct drm_i915_private *i915 = to_i915(intel_dig_port->base.base.dev); + struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev); ssize_t ret; - ret = drm_dp_dpcd_read(&intel_dig_port->dp.aux, DP_AUX_HDCP_BKSV, bksv, + ret = drm_dp_dpcd_read(&dig_port->dp.aux, DP_AUX_HDCP_BKSV, bksv, DRM_HDCP_KSV_LEN); if (ret != DRM_HDCP_KSV_LEN) { drm_dbg_kms(&i915->drm, @@ -6466,10 +6466,10 @@ static int intel_dp_hdcp_read_bksv(struct intel_digital_port *intel_dig_port, return 0; } -static int intel_dp_hdcp_read_bstatus(struct intel_digital_port *intel_dig_port, +static int intel_dp_hdcp_read_bstatus(struct intel_digital_port *dig_port, u8 *bstatus) { - struct drm_i915_private *i915 = to_i915(intel_dig_port->base.base.dev); + struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev); ssize_t ret; /* @@ -6477,7 +6477,7 @@ static int intel_dp_hdcp_read_bstatus(struct intel_digital_port *intel_dig_port, * definition by different names. In the HDMI spec, it's called BSTATUS, * but in DP it's called BINFO. */ - ret = drm_dp_dpcd_read(&intel_dig_port->dp.aux, DP_AUX_HDCP_BINFO, + ret = drm_dp_dpcd_read(&dig_port->dp.aux, DP_AUX_HDCP_BINFO, bstatus, DRM_HDCP_BSTATUS_LEN); if (ret != DRM_HDCP_BSTATUS_LEN) { drm_dbg_kms(&i915->drm, @@ -6488,13 +6488,13 @@ static int intel_dp_hdcp_read_bstatus(struct intel_digital_port *intel_dig_port, } static -int intel_dp_hdcp_read_bcaps(struct intel_digital_port *intel_dig_port, +int intel_dp_hdcp_read_bcaps(struct intel_digital_port *dig_port, u8 *bcaps) { - struct drm_i915_private *i915 = to_i915(intel_dig_port->base.base.dev); + struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev); ssize_t ret; - ret = drm_dp_dpcd_read(&intel_dig_port->dp.aux, DP_AUX_HDCP_BCAPS, + ret = drm_dp_dpcd_read(&dig_port->dp.aux, DP_AUX_HDCP_BCAPS, bcaps, 1); if (ret != 1) { drm_dbg_kms(&i915->drm, @@ -6506,13 +6506,13 @@ int intel_dp_hdcp_read_bcaps(struct intel_digital_port *intel_dig_port, } static -int intel_dp_hdcp_repeater_present(struct intel_digital_port *intel_dig_port, +int intel_dp_hdcp_repeater_present(struct intel_digital_port *dig_port, bool *repeater_present) { ssize_t ret; u8 bcaps; - ret = intel_dp_hdcp_read_bcaps(intel_dig_port, &bcaps); + ret = intel_dp_hdcp_read_bcaps(dig_port, &bcaps); if (ret) return ret; @@ -6521,13 +6521,13 @@ int intel_dp_hdcp_repeater_present(struct intel_digital_port *intel_dig_port, } static -int intel_dp_hdcp_read_ri_prime(struct intel_digital_port *intel_dig_port, +int intel_dp_hdcp_read_ri_prime(struct intel_digital_port *dig_port, u8 *ri_prime) { - struct drm_i915_private *i915 = to_i915(intel_dig_port->base.base.dev); + struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev); ssize_t ret; - ret = drm_dp_dpcd_read(&intel_dig_port->dp.aux, DP_AUX_HDCP_RI_PRIME, + ret = drm_dp_dpcd_read(&dig_port->dp.aux, DP_AUX_HDCP_RI_PRIME, ri_prime, DRM_HDCP_RI_LEN); if (ret != DRM_HDCP_RI_LEN) { drm_dbg_kms(&i915->drm, "Read Ri' from DP/AUX failed (%zd)\n", @@ -6538,14 +6538,14 @@ int intel_dp_hdcp_read_ri_prime(struct intel_digital_port *intel_dig_port, } static -int intel_dp_hdcp_read_ksv_ready(struct intel_digital_port *intel_dig_port, +int intel_dp_hdcp_read_ksv_ready(struct intel_digital_port *dig_port, bool *ksv_ready) { - struct drm_i915_private *i915 = to_i915(intel_dig_port->base.base.dev); + struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev); ssize_t ret; u8 bstatus; - ret = drm_dp_dpcd_read(&intel_dig_port->dp.aux, DP_AUX_HDCP_BSTATUS, + ret = drm_dp_dpcd_read(&dig_port->dp.aux, DP_AUX_HDCP_BSTATUS, &bstatus, 1); if (ret != 1) { drm_dbg_kms(&i915->drm, @@ -6557,17 +6557,17 @@ int intel_dp_hdcp_read_ksv_ready(struct intel_digital_port *intel_dig_port, } static -int intel_dp_hdcp_read_ksv_fifo(struct intel_digital_port *intel_dig_port, +int intel_dp_hdcp_read_ksv_fifo(struct intel_digital_port *dig_port, int num_downstream, u8 *ksv_fifo) { - struct drm_i915_private *i915 = to_i915(intel_dig_port->base.base.dev); + struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev); ssize_t ret; int i; /* KSV list is read via 15 byte window (3 entries @ 5 bytes each) */ for (i = 0; i < num_downstream; i += 3) { size_t len = min(num_downstream - i, 3) * DRM_HDCP_KSV_LEN; - ret = drm_dp_dpcd_read(&intel_dig_port->dp.aux, + ret = drm_dp_dpcd_read(&dig_port->dp.aux, DP_AUX_HDCP_KSV_FIFO, ksv_fifo + i * DRM_HDCP_KSV_LEN, len); @@ -6582,16 +6582,16 @@ int intel_dp_hdcp_read_ksv_fifo(struct intel_digital_port *intel_dig_port, } static -int intel_dp_hdcp_read_v_prime_part(struct intel_digital_port *intel_dig_port, +int intel_dp_hdcp_read_v_prime_part(struct intel_digital_port *dig_port, int i, u32 *part) { - struct drm_i915_private *i915 = to_i915(intel_dig_port->base.base.dev); + struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev); ssize_t ret; if (i >= DRM_HDCP_V_PRIME_NUM_PARTS) return -EINVAL; - ret = drm_dp_dpcd_read(&intel_dig_port->dp.aux, + ret = drm_dp_dpcd_read(&dig_port->dp.aux, DP_AUX_HDCP_V_PRIME(i), part, DRM_HDCP_V_PRIME_PART_LEN); if (ret != DRM_HDCP_V_PRIME_PART_LEN) { @@ -6603,7 +6603,7 @@ int intel_dp_hdcp_read_v_prime_part(struct intel_digital_port *intel_dig_port, } static -int intel_dp_hdcp_toggle_signalling(struct intel_digital_port *intel_dig_port, +int intel_dp_hdcp_toggle_signalling(struct intel_digital_port *dig_port, bool enable) { /* Not used for single stream DisplayPort setups */ @@ -6611,13 +6611,13 @@ int intel_dp_hdcp_toggle_signalling(struct intel_digital_port *intel_dig_port, } static -bool intel_dp_hdcp_check_link(struct intel_digital_port *intel_dig_port) +bool intel_dp_hdcp_check_link(struct intel_digital_port *dig_port) { - struct drm_i915_private *i915 = to_i915(intel_dig_port->base.base.dev); + struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev); ssize_t ret; u8 bstatus; - ret = drm_dp_dpcd_read(&intel_dig_port->dp.aux, DP_AUX_HDCP_BSTATUS, + ret = drm_dp_dpcd_read(&dig_port->dp.aux, DP_AUX_HDCP_BSTATUS, &bstatus, 1); if (ret != 1) { drm_dbg_kms(&i915->drm, @@ -6629,13 +6629,13 @@ bool intel_dp_hdcp_check_link(struct intel_digital_port *intel_dig_port) } static -int intel_dp_hdcp_capable(struct intel_digital_port *intel_dig_port, +int intel_dp_hdcp_capable(struct intel_digital_port *dig_port, bool *hdcp_capable) { ssize_t ret; u8 bcaps; - ret = intel_dp_hdcp_read_bcaps(intel_dig_port, &bcaps); + ret = intel_dp_hdcp_read_bcaps(dig_port, &bcaps); if (ret) return ret; @@ -6693,13 +6693,13 @@ static const struct hdcp2_dp_msg_data hdcp2_dp_msg_data[] = { }; static int -intel_dp_hdcp2_read_rx_status(struct intel_digital_port *intel_dig_port, +intel_dp_hdcp2_read_rx_status(struct intel_digital_port *dig_port, u8 *rx_status) { - struct drm_i915_private *i915 = to_i915(intel_dig_port->base.base.dev); + struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev); ssize_t ret; - ret = drm_dp_dpcd_read(&intel_dig_port->dp.aux, + ret = drm_dp_dpcd_read(&dig_port->dp.aux, DP_HDCP_2_2_REG_RXSTATUS_OFFSET, rx_status, HDCP_2_2_DP_RXSTATUS_LEN); if (ret != HDCP_2_2_DP_RXSTATUS_LEN) { @@ -6712,14 +6712,14 @@ intel_dp_hdcp2_read_rx_status(struct intel_digital_port *intel_dig_port, } static -int hdcp2_detect_msg_availability(struct intel_digital_port *intel_dig_port, +int hdcp2_detect_msg_availability(struct intel_digital_port *dig_port, u8 msg_id, bool *msg_ready) { u8 rx_status; int ret; *msg_ready = false; - ret = intel_dp_hdcp2_read_rx_status(intel_dig_port, &rx_status); + ret = intel_dp_hdcp2_read_rx_status(dig_port, &rx_status); if (ret < 0) return ret; @@ -6745,11 +6745,11 @@ int hdcp2_detect_msg_availability(struct intel_digital_port *intel_dig_port, } static ssize_t -intel_dp_hdcp2_wait_for_msg(struct intel_digital_port *intel_dig_port, +intel_dp_hdcp2_wait_for_msg(struct intel_digital_port *dig_port, const struct hdcp2_dp_msg_data *hdcp2_msg_data) { - struct drm_i915_private *i915 = to_i915(intel_dig_port->base.base.dev); - struct intel_dp *dp = &intel_dig_port->dp; + struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev); + struct intel_dp *dp = &dig_port->dp; struct intel_hdcp *hdcp = &dp->attached_connector->hdcp; u8 msg_id = hdcp2_msg_data->msg_id; int ret, timeout; @@ -6773,7 +6773,7 @@ intel_dp_hdcp2_wait_for_msg(struct intel_digital_port *intel_dig_port, * the timeout at wait for CP_IRQ. */ intel_dp_hdcp_wait_for_cp_irq(hdcp, timeout); - ret = hdcp2_detect_msg_availability(intel_dig_port, + ret = hdcp2_detect_msg_availability(dig_port, msg_id, &msg_ready); if (!msg_ready) ret = -ETIMEDOUT; @@ -6799,10 +6799,10 @@ static const struct hdcp2_dp_msg_data *get_hdcp2_dp_msg_data(u8 msg_id) } static -int intel_dp_hdcp2_write_msg(struct intel_digital_port *intel_dig_port, +int intel_dp_hdcp2_write_msg(struct intel_digital_port *dig_port, void *buf, size_t size) { - struct intel_dp *dp = &intel_dig_port->dp; + struct intel_dp *dp = &dig_port->dp; struct intel_hdcp *hdcp = &dp->attached_connector->hdcp; unsigned int offset; u8 *byte = buf; @@ -6825,7 +6825,7 @@ int intel_dp_hdcp2_write_msg(struct intel_digital_port *intel_dig_port, len = bytes_to_write > DP_AUX_MAX_PAYLOAD_BYTES ? DP_AUX_MAX_PAYLOAD_BYTES : bytes_to_write; - ret = drm_dp_dpcd_write(&intel_dig_port->dp.aux, + ret = drm_dp_dpcd_write(&dig_port->dp.aux, offset, (void *)byte, len); if (ret < 0) return ret; @@ -6839,13 +6839,13 @@ int intel_dp_hdcp2_write_msg(struct intel_digital_port *intel_dig_port, } static -ssize_t get_receiver_id_list_size(struct intel_digital_port *intel_dig_port) +ssize_t get_receiver_id_list_size(struct intel_digital_port *dig_port) { u8 rx_info[HDCP_2_2_RXINFO_LEN]; u32 dev_cnt; ssize_t ret; - ret = drm_dp_dpcd_read(&intel_dig_port->dp.aux, + ret = drm_dp_dpcd_read(&dig_port->dp.aux, DP_HDCP_2_2_REG_RXINFO_OFFSET, (void *)rx_info, HDCP_2_2_RXINFO_LEN); if (ret != HDCP_2_2_RXINFO_LEN) @@ -6865,10 +6865,10 @@ ssize_t get_receiver_id_list_size(struct intel_digital_port *intel_dig_port) } static -int intel_dp_hdcp2_read_msg(struct intel_digital_port *intel_dig_port, +int intel_dp_hdcp2_read_msg(struct intel_digital_port *dig_port, u8 msg_id, void *buf, size_t size) { - struct drm_i915_private *i915 = to_i915(intel_dig_port->base.base.dev); + struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev); unsigned int offset; u8 *byte = buf; ssize_t ret, bytes_to_recv, len; @@ -6879,12 +6879,12 @@ int intel_dp_hdcp2_read_msg(struct intel_digital_port *intel_dig_port, return -EINVAL; offset = hdcp2_msg_data->offset; - ret = intel_dp_hdcp2_wait_for_msg(intel_dig_port, hdcp2_msg_data); + ret = intel_dp_hdcp2_wait_for_msg(dig_port, hdcp2_msg_data); if (ret < 0) return ret; if (msg_id == HDCP_2_2_REP_SEND_RECVID_LIST) { - ret = get_receiver_id_list_size(intel_dig_port); + ret = get_receiver_id_list_size(dig_port); if (ret < 0) return ret; @@ -6899,7 +6899,7 @@ int intel_dp_hdcp2_read_msg(struct intel_digital_port *intel_dig_port, len = bytes_to_recv > DP_AUX_MAX_PAYLOAD_BYTES ? DP_AUX_MAX_PAYLOAD_BYTES : bytes_to_recv; - ret = drm_dp_dpcd_read(&intel_dig_port->dp.aux, offset, + ret = drm_dp_dpcd_read(&dig_port->dp.aux, offset, (void *)byte, len); if (ret < 0) { drm_dbg_kms(&i915->drm, "msg_id %d, ret %zd\n", @@ -6918,7 +6918,7 @@ int intel_dp_hdcp2_read_msg(struct intel_digital_port *intel_dig_port, } static -int intel_dp_hdcp2_config_stream_type(struct intel_digital_port *intel_dig_port, +int intel_dp_hdcp2_config_stream_type(struct intel_digital_port *dig_port, bool is_repeater, u8 content_type) { int ret; @@ -6937,7 +6937,7 @@ int intel_dp_hdcp2_config_stream_type(struct intel_digital_port *intel_dig_port, stream_type_msg.msg_id = HDCP_2_2_ERRATA_DP_STREAM_TYPE; stream_type_msg.stream_type = content_type; - ret = intel_dp_hdcp2_write_msg(intel_dig_port, &stream_type_msg, + ret = intel_dp_hdcp2_write_msg(dig_port, &stream_type_msg, sizeof(stream_type_msg)); return ret < 0 ? ret : 0; @@ -6945,12 +6945,12 @@ int intel_dp_hdcp2_config_stream_type(struct intel_digital_port *intel_dig_port, } static -int intel_dp_hdcp2_check_link(struct intel_digital_port *intel_dig_port) +int intel_dp_hdcp2_check_link(struct intel_digital_port *dig_port) { u8 rx_status; int ret; - ret = intel_dp_hdcp2_read_rx_status(intel_dig_port, &rx_status); + ret = intel_dp_hdcp2_read_rx_status(dig_port, &rx_status); if (ret) return ret; @@ -6965,14 +6965,14 @@ int intel_dp_hdcp2_check_link(struct intel_digital_port *intel_dig_port) } static -int intel_dp_hdcp2_capable(struct intel_digital_port *intel_dig_port, +int intel_dp_hdcp2_capable(struct intel_digital_port *dig_port, bool *capable) { u8 rx_caps[3]; int ret; *capable = false; - ret = drm_dp_dpcd_read(&intel_dig_port->dp.aux, + ret = drm_dp_dpcd_read(&dig_port->dp.aux, DP_HDCP_2_2_REG_RX_CAPS_OFFSET, rx_caps, HDCP_2_2_RXCAPS_LEN); if (ret != HDCP_2_2_RXCAPS_LEN) @@ -7251,12 +7251,12 @@ static bool intel_edp_have_power(struct intel_dp *intel_dp) } enum irqreturn -intel_dp_hpd_pulse(struct intel_digital_port *intel_dig_port, bool long_hpd) +intel_dp_hpd_pulse(struct intel_digital_port *dig_port, bool long_hpd) { - struct drm_i915_private *i915 = to_i915(intel_dig_port->base.base.dev); - struct intel_dp *intel_dp = &intel_dig_port->dp; + struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev); + struct intel_dp *intel_dp = &dig_port->dp; - if (intel_dig_port->base.type == INTEL_OUTPUT_EDP && + if (dig_port->base.type == INTEL_OUTPUT_EDP && (long_hpd || !intel_edp_have_power(intel_dp))) { /* * vdd off can generate a long/short pulse on eDP which @@ -7267,14 +7267,14 @@ intel_dp_hpd_pulse(struct intel_digital_port *intel_dig_port, bool long_hpd) drm_dbg_kms(&i915->drm, "ignoring %s hpd on eDP [ENCODER:%d:%s]\n", long_hpd ? "long" : "short", - intel_dig_port->base.base.base.id, - intel_dig_port->base.base.name); + dig_port->base.base.base.id, + dig_port->base.base.name); return IRQ_HANDLED; } drm_dbg_kms(&i915->drm, "got hpd irq on [ENCODER:%d:%s] - %s\n", - intel_dig_port->base.base.base.id, - intel_dig_port->base.base.name, + dig_port->base.base.base.id, + dig_port->base.base.name, long_hpd ? "long" : "short"); if (long_hpd) { @@ -8137,12 +8137,12 @@ static void intel_dp_modeset_retry_work_fn(struct work_struct *work) } bool -intel_dp_init_connector(struct intel_digital_port *intel_dig_port, +intel_dp_init_connector(struct intel_digital_port *dig_port, struct intel_connector *intel_connector) { struct drm_connector *connector = &intel_connector->base; - struct intel_dp *intel_dp = &intel_dig_port->dp; - struct intel_encoder *intel_encoder = &intel_dig_port->base; + struct intel_dp *intel_dp = &dig_port->dp; + struct intel_encoder *intel_encoder = &dig_port->base; struct drm_device *dev = intel_encoder->base.dev; struct drm_i915_private *dev_priv = to_i915(dev); enum port port = intel_encoder->port; @@ -8153,9 +8153,9 @@ intel_dp_init_connector(struct intel_digital_port *intel_dig_port, INIT_WORK(&intel_connector->modeset_retry_work, intel_dp_modeset_retry_work_fn); - if (drm_WARN(dev, intel_dig_port->max_lanes < 1, + if (drm_WARN(dev, dig_port->max_lanes < 1, "Not enough lanes (%d) for DP on [ENCODER:%d:%s]\n", - intel_dig_port->max_lanes, intel_encoder->base.base.id, + dig_port->max_lanes, intel_encoder->base.base.id, intel_encoder->base.name)) return false; @@ -8218,12 +8218,12 @@ intel_dp_init_connector(struct intel_digital_port *intel_dig_port, intel_connector->get_hw_state = intel_connector_get_hw_state; /* init MST on ports that can support it */ - intel_dp_mst_encoder_init(intel_dig_port, + intel_dp_mst_encoder_init(dig_port, intel_connector->base.base.id); if (!intel_edp_init_connector(intel_dp, intel_connector)) { intel_dp_aux_fini(intel_dp); - intel_dp_mst_encoder_cleanup(intel_dig_port); + intel_dp_mst_encoder_cleanup(dig_port); goto fail; } @@ -8258,20 +8258,20 @@ bool intel_dp_init(struct drm_i915_private *dev_priv, i915_reg_t output_reg, enum port port) { - struct intel_digital_port *intel_dig_port; + struct intel_digital_port *dig_port; struct intel_encoder *intel_encoder; struct drm_encoder *encoder; struct intel_connector *intel_connector; - intel_dig_port = kzalloc(sizeof(*intel_dig_port), GFP_KERNEL); - if (!intel_dig_port) + dig_port = kzalloc(sizeof(*dig_port), GFP_KERNEL); + if (!dig_port) return false; intel_connector = intel_connector_alloc(); if (!intel_connector) goto err_connector_alloc; - intel_encoder = &intel_dig_port->base; + intel_encoder = &dig_port->base; encoder = &intel_encoder->base; if (drm_encoder_init(&dev_priv->drm, &intel_encoder->base, @@ -8307,34 +8307,34 @@ bool intel_dp_init(struct drm_i915_private *dev_priv, if ((IS_IVYBRIDGE(dev_priv) && port == PORT_A) || (HAS_PCH_CPT(dev_priv) && port != PORT_A)) - intel_dig_port->dp.set_link_train = cpt_set_link_train; + dig_port->dp.set_link_train = cpt_set_link_train; else - intel_dig_port->dp.set_link_train = g4x_set_link_train; + dig_port->dp.set_link_train = g4x_set_link_train; if (IS_CHERRYVIEW(dev_priv)) - intel_dig_port->dp.set_signal_levels = chv_set_signal_levels; + dig_port->dp.set_signal_levels = chv_set_signal_levels; else if (IS_VALLEYVIEW(dev_priv)) - intel_dig_port->dp.set_signal_levels = vlv_set_signal_levels; + dig_port->dp.set_signal_levels = vlv_set_signal_levels; else if (IS_IVYBRIDGE(dev_priv) && port == PORT_A) - intel_dig_port->dp.set_signal_levels = ivb_cpu_edp_set_signal_levels; + dig_port->dp.set_signal_levels = ivb_cpu_edp_set_signal_levels; else if (IS_GEN(dev_priv, 6) && port == PORT_A) - intel_dig_port->dp.set_signal_levels = snb_cpu_edp_set_signal_levels; + dig_port->dp.set_signal_levels = snb_cpu_edp_set_signal_levels; else - intel_dig_port->dp.set_signal_levels = g4x_set_signal_levels; + dig_port->dp.set_signal_levels = g4x_set_signal_levels; if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv) || (HAS_PCH_SPLIT(dev_priv) && port != PORT_A)) { - intel_dig_port->dp.preemph_max = intel_dp_pre_empemph_max_3; - intel_dig_port->dp.voltage_max = intel_dp_voltage_max_3; + dig_port->dp.preemph_max = intel_dp_pre_empemph_max_3; + dig_port->dp.voltage_max = intel_dp_voltage_max_3; } else { - intel_dig_port->dp.preemph_max = intel_dp_pre_empemph_max_2; - intel_dig_port->dp.voltage_max = intel_dp_voltage_max_2; + dig_port->dp.preemph_max = intel_dp_pre_empemph_max_2; + dig_port->dp.voltage_max = intel_dp_voltage_max_2; } - intel_dig_port->dp.output_reg = output_reg; - intel_dig_port->max_lanes = 4; - intel_dig_port->dp.regs.dp_tp_ctl = DP_TP_CTL(port); - intel_dig_port->dp.regs.dp_tp_status = DP_TP_STATUS(port); + dig_port->dp.output_reg = output_reg; + dig_port->max_lanes = 4; + dig_port->dp.regs.dp_tp_ctl = DP_TP_CTL(port); + dig_port->dp.regs.dp_tp_status = DP_TP_STATUS(port); intel_encoder->type = INTEL_OUTPUT_DP; intel_encoder->power_domain = intel_port_to_power_domain(port); @@ -8349,25 +8349,25 @@ bool intel_dp_init(struct drm_i915_private *dev_priv, intel_encoder->cloneable = 0; intel_encoder->port = port; - intel_dig_port->hpd_pulse = intel_dp_hpd_pulse; + dig_port->hpd_pulse = intel_dp_hpd_pulse; if (HAS_GMCH(dev_priv)) { if (IS_GM45(dev_priv)) - intel_dig_port->connected = gm45_digital_port_connected; + dig_port->connected = gm45_digital_port_connected; else - intel_dig_port->connected = g4x_digital_port_connected; + dig_port->connected = g4x_digital_port_connected; } else { if (port == PORT_A) - intel_dig_port->connected = ilk_digital_port_connected; + dig_port->connected = ilk_digital_port_connected; else - intel_dig_port->connected = ibx_digital_port_connected; + dig_port->connected = ibx_digital_port_connected; } if (port != PORT_A) - intel_infoframe_init(intel_dig_port); + intel_infoframe_init(dig_port); - intel_dig_port->aux_ch = intel_bios_port_aux_ch(dev_priv, port); - if (!intel_dp_init_connector(intel_dig_port, intel_connector)) + dig_port->aux_ch = intel_bios_port_aux_ch(dev_priv, port); + if (!intel_dp_init_connector(dig_port, intel_connector)) goto err_init_connector; return true; @@ -8377,7 +8377,7 @@ err_init_connector: err_encoder_init: kfree(intel_connector); err_connector_alloc: - kfree(intel_dig_port); + kfree(dig_port); return false; } diff --git a/drivers/gpu/drm/i915/display/intel_dp.h b/drivers/gpu/drm/i915/display/intel_dp.h index 0a8950f744f6..b901ab850cbd 100644 --- a/drivers/gpu/drm/i915/display/intel_dp.h +++ b/drivers/gpu/drm/i915/display/intel_dp.h @@ -40,7 +40,7 @@ bool intel_dp_port_enabled(struct drm_i915_private *dev_priv, enum pipe *pipe); bool intel_dp_init(struct drm_i915_private *dev_priv, i915_reg_t output_reg, enum port port); -bool intel_dp_init_connector(struct intel_digital_port *intel_dig_port, +bool intel_dp_init_connector(struct intel_digital_port *dig_port, struct intel_connector *intel_connector); void intel_dp_set_link_params(struct intel_dp *intel_dp, int link_rate, u8 lane_count, @@ -61,7 +61,7 @@ int intel_dp_compute_config(struct intel_encoder *encoder, struct drm_connector_state *conn_state); bool intel_dp_is_edp(struct intel_dp *intel_dp); bool intel_dp_is_port_edp(struct drm_i915_private *dev_priv, enum port port); -enum irqreturn intel_dp_hpd_pulse(struct intel_digital_port *intel_dig_port, +enum irqreturn intel_dp_hpd_pulse(struct intel_digital_port *dig_port, bool long_hpd); void intel_edp_backlight_on(const struct intel_crtc_state *crtc_state, const struct drm_connector_state *conn_state); diff --git a/drivers/gpu/drm/i915/display/intel_dp_mst.c b/drivers/gpu/drm/i915/display/intel_dp_mst.c index 8273f2e07427..bdc19b04b2c1 100644 --- a/drivers/gpu/drm/i915/display/intel_dp_mst.c +++ b/drivers/gpu/drm/i915/display/intel_dp_mst.c @@ -342,8 +342,8 @@ static void intel_mst_disable_dp(struct intel_atomic_state *state, const struct drm_connector_state *old_conn_state) { struct intel_dp_mst_encoder *intel_mst = enc_to_mst(encoder); - struct intel_digital_port *intel_dig_port = intel_mst->primary; - struct intel_dp *intel_dp = &intel_dig_port->dp; + struct intel_digital_port *dig_port = intel_mst->primary; + struct intel_dp *intel_dp = &dig_port->dp; struct intel_connector *connector = to_intel_connector(old_conn_state->connector); struct drm_i915_private *i915 = to_i915(connector->base.dev); @@ -369,8 +369,8 @@ static void intel_mst_post_disable_dp(struct intel_atomic_state *state, const struct drm_connector_state *old_conn_state) { struct intel_dp_mst_encoder *intel_mst = enc_to_mst(encoder); - struct intel_digital_port *intel_dig_port = intel_mst->primary; - struct intel_dp *intel_dp = &intel_dig_port->dp; + struct intel_digital_port *dig_port = intel_mst->primary; + struct intel_dp *intel_dp = &dig_port->dp; struct intel_connector *connector = to_intel_connector(old_conn_state->connector); struct drm_i915_private *dev_priv = to_i915(connector->base.dev); @@ -421,7 +421,7 @@ static void intel_mst_post_disable_dp(struct intel_atomic_state *state, * the transcoder clock select is set to none. */ if (last_mst_stream) - intel_dp_set_infoframes(&intel_dig_port->base, false, + intel_dp_set_infoframes(&dig_port->base, false, old_crtc_state, NULL); /* * From TGL spec: "If multi-stream slave transcoder: Configure @@ -436,7 +436,7 @@ static void intel_mst_post_disable_dp(struct intel_atomic_state *state, intel_mst->connector = NULL; if (last_mst_stream) - intel_dig_port->base.post_disable(state, &intel_dig_port->base, + dig_port->base.post_disable(state, &dig_port->base, old_crtc_state, NULL); drm_dbg_kms(&dev_priv->drm, "active links %d\n", @@ -449,11 +449,11 @@ static void intel_mst_pre_pll_enable_dp(struct intel_atomic_state *state, const struct drm_connector_state *conn_state) { struct intel_dp_mst_encoder *intel_mst = enc_to_mst(encoder); - struct intel_digital_port *intel_dig_port = intel_mst->primary; - struct intel_dp *intel_dp = &intel_dig_port->dp; + struct intel_digital_port *dig_port = intel_mst->primary; + struct intel_dp *intel_dp = &dig_port->dp; if (intel_dp->active_mst_links == 0) - intel_dig_port->base.pre_pll_enable(state, &intel_dig_port->base, + dig_port->base.pre_pll_enable(state, &dig_port->base, pipe_config, NULL); } @@ -463,8 +463,8 @@ static void intel_mst_pre_enable_dp(struct intel_atomic_state *state, const struct drm_connector_state *conn_state) { struct intel_dp_mst_encoder *intel_mst = enc_to_mst(encoder); - struct intel_digital_port *intel_dig_port = intel_mst->primary; - struct intel_dp *intel_dp = &intel_dig_port->dp; + struct intel_digital_port *dig_port = intel_mst->primary; + struct intel_dp *intel_dp = &dig_port->dp; struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); struct intel_connector *connector = to_intel_connector(conn_state->connector); @@ -490,7 +490,7 @@ static void intel_mst_pre_enable_dp(struct intel_atomic_state *state, drm_dp_send_power_updown_phy(&intel_dp->mst_mgr, connector->port, true); if (first_mst_stream) - intel_dig_port->base.pre_enable(state, &intel_dig_port->base, + dig_port->base.pre_enable(state, &dig_port->base, pipe_config, NULL); ret = drm_dp_mst_allocate_vcpi(&intel_dp->mst_mgr, @@ -506,7 +506,7 @@ static void intel_mst_pre_enable_dp(struct intel_atomic_state *state, /* * Before Gen 12 this is not done as part of - * intel_dig_port->base.pre_enable() and should be done here. For + * dig_port->base.pre_enable() and should be done here. For * Gen 12+ the step in which this should be done is different for the * first MST stream, so it's done on the DDI for the first stream and * here for the following ones. @@ -525,8 +525,8 @@ static void intel_mst_enable_dp(struct intel_atomic_state *state, const struct drm_connector_state *conn_state) { struct intel_dp_mst_encoder *intel_mst = enc_to_mst(encoder); - struct intel_digital_port *intel_dig_port = intel_mst->primary; - struct intel_dp *intel_dp = &intel_dig_port->dp; + struct intel_digital_port *dig_port = intel_mst->primary; + struct intel_dp *intel_dp = &dig_port->dp; struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); u32 val; @@ -572,9 +572,9 @@ static void intel_dp_mst_enc_get_config(struct intel_encoder *encoder, struct intel_crtc_state *pipe_config) { struct intel_dp_mst_encoder *intel_mst = enc_to_mst(encoder); - struct intel_digital_port *intel_dig_port = intel_mst->primary; + struct intel_digital_port *dig_port = intel_mst->primary; - intel_ddi_get_config(&intel_dig_port->base, pipe_config); + intel_ddi_get_config(&dig_port->base, pipe_config); } static int intel_dp_mst_get_ddc_modes(struct drm_connector *connector) @@ -732,8 +732,8 @@ static bool intel_dp_mst_get_hw_state(struct intel_connector *connector) static struct drm_connector *intel_dp_add_mst_connector(struct drm_dp_mst_topology_mgr *mgr, struct drm_dp_mst_port *port, const char *pathprop) { struct intel_dp *intel_dp = container_of(mgr, struct intel_dp, mst_mgr); - struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp); - struct drm_device *dev = intel_dig_port->base.base.dev; + struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); + struct drm_device *dev = dig_port->base.base.dev; struct drm_i915_private *dev_priv = to_i915(dev); struct intel_connector *intel_connector; struct drm_connector *connector; @@ -808,11 +808,11 @@ static const struct drm_dp_mst_topology_cbs mst_cbs = { }; static struct intel_dp_mst_encoder * -intel_dp_create_fake_mst_encoder(struct intel_digital_port *intel_dig_port, enum pipe pipe) +intel_dp_create_fake_mst_encoder(struct intel_digital_port *dig_port, enum pipe pipe) { struct intel_dp_mst_encoder *intel_mst; struct intel_encoder *intel_encoder; - struct drm_device *dev = intel_dig_port->base.base.dev; + struct drm_device *dev = dig_port->base.base.dev; intel_mst = kzalloc(sizeof(*intel_mst), GFP_KERNEL); @@ -821,14 +821,14 @@ intel_dp_create_fake_mst_encoder(struct intel_digital_port *intel_dig_port, enum intel_mst->pipe = pipe; intel_encoder = &intel_mst->base; - intel_mst->primary = intel_dig_port; + intel_mst->primary = dig_port; drm_encoder_init(dev, &intel_encoder->base, &intel_dp_mst_enc_funcs, DRM_MODE_ENCODER_DPMST, "DP-MST %c", pipe_name(pipe)); intel_encoder->type = INTEL_OUTPUT_DP_MST; - intel_encoder->power_domain = intel_dig_port->base.power_domain; - intel_encoder->port = intel_dig_port->base.port; + intel_encoder->power_domain = dig_port->base.power_domain; + intel_encoder->port = dig_port->base.port; intel_encoder->cloneable = 0; /* * This is wrong, but broken userspace uses the intersection @@ -855,29 +855,29 @@ intel_dp_create_fake_mst_encoder(struct intel_digital_port *intel_dig_port, enum } static bool -intel_dp_create_fake_mst_encoders(struct intel_digital_port *intel_dig_port) +intel_dp_create_fake_mst_encoders(struct intel_digital_port *dig_port) { - struct intel_dp *intel_dp = &intel_dig_port->dp; - struct drm_i915_private *dev_priv = to_i915(intel_dig_port->base.base.dev); + struct intel_dp *intel_dp = &dig_port->dp; + struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev); enum pipe pipe; for_each_pipe(dev_priv, pipe) - intel_dp->mst_encoders[pipe] = intel_dp_create_fake_mst_encoder(intel_dig_port, pipe); + intel_dp->mst_encoders[pipe] = intel_dp_create_fake_mst_encoder(dig_port, pipe); return true; } int -intel_dp_mst_encoder_active_links(struct intel_digital_port *intel_dig_port) +intel_dp_mst_encoder_active_links(struct intel_digital_port *dig_port) { - return intel_dig_port->dp.active_mst_links; + return dig_port->dp.active_mst_links; } int -intel_dp_mst_encoder_init(struct intel_digital_port *intel_dig_port, int conn_base_id) +intel_dp_mst_encoder_init(struct intel_digital_port *dig_port, int conn_base_id) { - struct drm_i915_private *i915 = to_i915(intel_dig_port->base.base.dev); - struct intel_dp *intel_dp = &intel_dig_port->dp; - enum port port = intel_dig_port->base.port; + struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev); + struct intel_dp *intel_dp = &dig_port->dp; + enum port port = dig_port->base.port; int ret; if (!HAS_DP_MST(i915) || intel_dp_is_edp(intel_dp)) @@ -892,7 +892,7 @@ intel_dp_mst_encoder_init(struct intel_digital_port *intel_dig_port, int conn_ba intel_dp->mst_mgr.cbs = &mst_cbs; /* create encoders */ - intel_dp_create_fake_mst_encoders(intel_dig_port); + intel_dp_create_fake_mst_encoders(dig_port); ret = drm_dp_mst_topology_mgr_init(&intel_dp->mst_mgr, &i915->drm, &intel_dp->aux, 16, 3, conn_base_id); if (ret) @@ -904,9 +904,9 @@ intel_dp_mst_encoder_init(struct intel_digital_port *intel_dig_port, int conn_ba } void -intel_dp_mst_encoder_cleanup(struct intel_digital_port *intel_dig_port) +intel_dp_mst_encoder_cleanup(struct intel_digital_port *dig_port) { - struct intel_dp *intel_dp = &intel_dig_port->dp; + struct intel_dp *intel_dp = &dig_port->dp; if (!intel_dp->can_mst) return; diff --git a/drivers/gpu/drm/i915/display/intel_dp_mst.h b/drivers/gpu/drm/i915/display/intel_dp_mst.h index 854724f68f09..6afda4e86b3c 100644 --- a/drivers/gpu/drm/i915/display/intel_dp_mst.h +++ b/drivers/gpu/drm/i915/display/intel_dp_mst.h @@ -11,9 +11,9 @@ struct intel_digital_port; struct intel_crtc_state; -int intel_dp_mst_encoder_init(struct intel_digital_port *intel_dig_port, int conn_id); -void intel_dp_mst_encoder_cleanup(struct intel_digital_port *intel_dig_port); -int intel_dp_mst_encoder_active_links(struct intel_digital_port *intel_dig_port); +int intel_dp_mst_encoder_init(struct intel_digital_port *dig_port, int conn_id); +void intel_dp_mst_encoder_cleanup(struct intel_digital_port *dig_port); +int intel_dp_mst_encoder_active_links(struct intel_digital_port *dig_port); bool intel_dp_mst_is_master_trans(const struct intel_crtc_state *crtc_state); bool intel_dp_mst_is_slave_trans(const struct intel_crtc_state *crtc_state); diff --git a/drivers/gpu/drm/i915/display/intel_dpio_phy.c b/drivers/gpu/drm/i915/display/intel_dpio_phy.c index 399a7edb4568..7910522273b2 100644 --- a/drivers/gpu/drm/i915/display/intel_dpio_phy.c +++ b/drivers/gpu/drm/i915/display/intel_dpio_phy.c @@ -650,9 +650,9 @@ void chv_set_phy_signal_level(struct intel_encoder *encoder, bool uniq_trans_scale) { struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); - struct intel_digital_port *dport = enc_to_dig_port(encoder); + struct intel_digital_port *dig_port = enc_to_dig_port(encoder); struct intel_crtc *intel_crtc = to_intel_crtc(encoder->base.crtc); - enum dpio_channel ch = vlv_dport_to_channel(dport); + enum dpio_channel ch = vlv_dig_port_to_channel(dig_port); enum pipe pipe = intel_crtc->pipe; u32 val; int i; @@ -746,7 +746,7 @@ void chv_data_lane_soft_reset(struct intel_encoder *encoder, bool reset) { struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); - enum dpio_channel ch = vlv_dport_to_channel(enc_to_dig_port(encoder)); + enum dpio_channel ch = vlv_dig_port_to_channel(enc_to_dig_port(encoder)); struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); enum pipe pipe = crtc->pipe; u32 val; @@ -789,10 +789,10 @@ void chv_data_lane_soft_reset(struct intel_encoder *encoder, void chv_phy_pre_pll_enable(struct intel_encoder *encoder, const struct intel_crtc_state *crtc_state) { - struct intel_digital_port *dport = enc_to_dig_port(encoder); + struct intel_digital_port *dig_port = enc_to_dig_port(encoder); struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); - enum dpio_channel ch = vlv_dport_to_channel(dport); + enum dpio_channel ch = vlv_dig_port_to_channel(dig_port); enum pipe pipe = crtc->pipe; unsigned int lane_mask = intel_dp_unused_lane_mask(crtc_state->lane_count); @@ -803,7 +803,7 @@ void chv_phy_pre_pll_enable(struct intel_encoder *encoder, * Otherwise we can't even access the PLL. */ if (ch == DPIO_CH0 && pipe == PIPE_B) - dport->release_cl2_override = + dig_port->release_cl2_override = !chv_phy_powergate_ch(dev_priv, DPIO_PHY0, DPIO_CH1, true); chv_phy_powergate_lanes(encoder, true, lane_mask); @@ -870,10 +870,10 @@ void chv_phy_pre_encoder_enable(struct intel_encoder *encoder, const struct intel_crtc_state *crtc_state) { struct intel_dp *intel_dp = enc_to_intel_dp(encoder); - struct intel_digital_port *dport = dp_to_dig_port(intel_dp); + struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); - enum dpio_channel ch = vlv_dport_to_channel(dport); + enum dpio_channel ch = vlv_dig_port_to_channel(dig_port); enum pipe pipe = crtc->pipe; int data, i, stagger; u32 val; @@ -948,12 +948,12 @@ void chv_phy_pre_encoder_enable(struct intel_encoder *encoder, void chv_phy_release_cl2_override(struct intel_encoder *encoder) { - struct intel_digital_port *dport = enc_to_dig_port(encoder); + struct intel_digital_port *dig_port = enc_to_dig_port(encoder); struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); - if (dport->release_cl2_override) { + if (dig_port->release_cl2_override) { chv_phy_powergate_ch(dev_priv, DPIO_PHY0, DPIO_CH1, false); - dport->release_cl2_override = false; + dig_port->release_cl2_override = false; } } @@ -997,8 +997,8 @@ void vlv_set_phy_signal_level(struct intel_encoder *encoder, { struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); struct intel_crtc *intel_crtc = to_intel_crtc(encoder->base.crtc); - struct intel_digital_port *dport = enc_to_dig_port(encoder); - enum dpio_channel port = vlv_dport_to_channel(dport); + struct intel_digital_port *dig_port = enc_to_dig_port(encoder); + enum dpio_channel port = vlv_dig_port_to_channel(dig_port); enum pipe pipe = intel_crtc->pipe; vlv_dpio_get(dev_priv); @@ -1022,10 +1022,10 @@ void vlv_set_phy_signal_level(struct intel_encoder *encoder, void vlv_phy_pre_pll_enable(struct intel_encoder *encoder, const struct intel_crtc_state *crtc_state) { - struct intel_digital_port *dport = enc_to_dig_port(encoder); + struct intel_digital_port *dig_port = enc_to_dig_port(encoder); struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); - enum dpio_channel port = vlv_dport_to_channel(dport); + enum dpio_channel port = vlv_dig_port_to_channel(dig_port); enum pipe pipe = crtc->pipe; /* Program Tx lane resets to default */ @@ -1052,10 +1052,10 @@ void vlv_phy_pre_encoder_enable(struct intel_encoder *encoder, const struct intel_crtc_state *crtc_state) { struct intel_dp *intel_dp = enc_to_intel_dp(encoder); - struct intel_digital_port *dport = dp_to_dig_port(intel_dp); + struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); - enum dpio_channel port = vlv_dport_to_channel(dport); + enum dpio_channel port = vlv_dig_port_to_channel(dig_port); enum pipe pipe = crtc->pipe; u32 val; @@ -1081,10 +1081,10 @@ void vlv_phy_pre_encoder_enable(struct intel_encoder *encoder, void vlv_phy_reset_lanes(struct intel_encoder *encoder, const struct intel_crtc_state *old_crtc_state) { - struct intel_digital_port *dport = enc_to_dig_port(encoder); + struct intel_digital_port *dig_port = enc_to_dig_port(encoder); struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->uapi.crtc); - enum dpio_channel port = vlv_dport_to_channel(dport); + enum dpio_channel port = vlv_dig_port_to_channel(dig_port); enum pipe pipe = crtc->pipe; vlv_dpio_get(dev_priv); diff --git a/drivers/gpu/drm/i915/display/intel_hdcp.c b/drivers/gpu/drm/i915/display/intel_hdcp.c index 815b054bb167..1b6dadfce4eb 100644 --- a/drivers/gpu/drm/i915/display/intel_hdcp.c +++ b/drivers/gpu/drm/i915/display/intel_hdcp.c @@ -40,15 +40,15 @@ bool intel_hdcp_is_ksv_valid(u8 *ksv) } static -int intel_hdcp_read_valid_bksv(struct intel_digital_port *intel_dig_port, +int intel_hdcp_read_valid_bksv(struct intel_digital_port *dig_port, const struct intel_hdcp_shim *shim, u8 *bksv) { - struct drm_i915_private *i915 = to_i915(intel_dig_port->base.base.dev); + struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev); int ret, i, tries = 2; /* HDCP spec states that we must retry the bksv if it is invalid */ for (i = 0; i < tries; i++) { - ret = shim->read_bksv(intel_dig_port, bksv); + ret = shim->read_bksv(dig_port, bksv); if (ret) return ret; if (intel_hdcp_is_ksv_valid(bksv)) @@ -65,7 +65,7 @@ int intel_hdcp_read_valid_bksv(struct intel_digital_port *intel_dig_port, /* Is HDCP1.4 capable on Platform and Sink */ bool intel_hdcp_capable(struct intel_connector *connector) { - struct intel_digital_port *intel_dig_port = intel_attached_dig_port(connector); + struct intel_digital_port *dig_port = intel_attached_dig_port(connector); const struct intel_hdcp_shim *shim = connector->hdcp.shim; bool capable = false; u8 bksv[5]; @@ -74,9 +74,9 @@ bool intel_hdcp_capable(struct intel_connector *connector) return capable; if (shim->hdcp_capable) { - shim->hdcp_capable(intel_dig_port, &capable); + shim->hdcp_capable(dig_port, &capable); } else { - if (!intel_hdcp_read_valid_bksv(intel_dig_port, shim, bksv)) + if (!intel_hdcp_read_valid_bksv(dig_port, shim, bksv)) capable = true; } @@ -86,7 +86,7 @@ bool intel_hdcp_capable(struct intel_connector *connector) /* Is HDCP2.2 capable on Platform and Sink */ bool intel_hdcp2_capable(struct intel_connector *connector) { - struct intel_digital_port *intel_dig_port = intel_attached_dig_port(connector); + struct intel_digital_port *dig_port = intel_attached_dig_port(connector); struct drm_i915_private *dev_priv = to_i915(connector->base.dev); struct intel_hdcp *hdcp = &connector->hdcp; bool capable = false; @@ -104,7 +104,7 @@ bool intel_hdcp2_capable(struct intel_connector *connector) mutex_unlock(&dev_priv->hdcp_comp_mutex); /* Sink's capability for HDCP2.2 */ - hdcp->shim->hdcp_2_2_capable(intel_dig_port, &capable); + hdcp->shim->hdcp_2_2_capable(dig_port, &capable); return capable; } @@ -125,14 +125,14 @@ static bool intel_hdcp2_in_use(struct drm_i915_private *dev_priv, LINK_ENCRYPTION_STATUS; } -static int intel_hdcp_poll_ksv_fifo(struct intel_digital_port *intel_dig_port, +static int intel_hdcp_poll_ksv_fifo(struct intel_digital_port *dig_port, const struct intel_hdcp_shim *shim) { int ret, read_ret; bool ksv_ready; /* Poll for ksv list ready (spec says max time allowed is 5s) */ - ret = __wait_for(read_ret = shim->read_ksv_ready(intel_dig_port, + ret = __wait_for(read_ret = shim->read_ksv_ready(dig_port, &ksv_ready), read_ret || ksv_ready, 5 * 1000 * 1000, 1000, 100 * 1000); @@ -300,16 +300,16 @@ int intel_hdcp_validate_v_prime(struct intel_connector *connector, const struct intel_hdcp_shim *shim, u8 *ksv_fifo, u8 num_downstream, u8 *bstatus) { - struct intel_digital_port *intel_dig_port = intel_attached_dig_port(connector); + struct intel_digital_port *dig_port = intel_attached_dig_port(connector); struct drm_i915_private *dev_priv = to_i915(connector->base.dev); enum transcoder cpu_transcoder = connector->hdcp.cpu_transcoder; - enum port port = intel_dig_port->base.port; + enum port port = dig_port->base.port; u32 vprime, sha_text, sha_leftovers, rep_ctl; int ret, i, j, sha_idx; /* Process V' values from the receiver */ for (i = 0; i < DRM_HDCP_V_PRIME_NUM_PARTS; i++) { - ret = shim->read_v_prime_part(intel_dig_port, i, &vprime); + ret = shim->read_v_prime_part(dig_port, i, &vprime); if (ret) return ret; intel_de_write(dev_priv, HDCP_SHA_V_PRIME(i), vprime); @@ -528,20 +528,20 @@ int intel_hdcp_validate_v_prime(struct intel_connector *connector, static int intel_hdcp_auth_downstream(struct intel_connector *connector) { - struct intel_digital_port *intel_dig_port = intel_attached_dig_port(connector); + struct intel_digital_port *dig_port = intel_attached_dig_port(connector); struct drm_i915_private *dev_priv = to_i915(connector->base.dev); const struct intel_hdcp_shim *shim = connector->hdcp.shim; u8 bstatus[2], num_downstream, *ksv_fifo; int ret, i, tries = 3; - ret = intel_hdcp_poll_ksv_fifo(intel_dig_port, shim); + ret = intel_hdcp_poll_ksv_fifo(dig_port, shim); if (ret) { drm_dbg_kms(&dev_priv->drm, "KSV list failed to become ready (%d)\n", ret); return ret; } - ret = shim->read_bstatus(intel_dig_port, bstatus); + ret = shim->read_bstatus(dig_port, bstatus); if (ret) return ret; @@ -571,7 +571,7 @@ int intel_hdcp_auth_downstream(struct intel_connector *connector) return -ENOMEM; } - ret = shim->read_ksv_fifo(intel_dig_port, num_downstream, ksv_fifo); + ret = shim->read_ksv_fifo(dig_port, num_downstream, ksv_fifo); if (ret) goto err; @@ -611,12 +611,12 @@ err: /* Implements Part 1 of the HDCP authorization procedure */ static int intel_hdcp_auth(struct intel_connector *connector) { - struct intel_digital_port *intel_dig_port = intel_attached_dig_port(connector); + struct intel_digital_port *dig_port = intel_attached_dig_port(connector); struct drm_i915_private *dev_priv = to_i915(connector->base.dev); struct intel_hdcp *hdcp = &connector->hdcp; const struct intel_hdcp_shim *shim = hdcp->shim; enum transcoder cpu_transcoder = connector->hdcp.cpu_transcoder; - enum port port = intel_dig_port->base.port; + enum port port = dig_port->base.port; unsigned long r0_prime_gen_start; int ret, i, tries = 2; union { @@ -640,7 +640,7 @@ static int intel_hdcp_auth(struct intel_connector *connector) * displays, this is not necessary. */ if (shim->hdcp_capable) { - ret = shim->hdcp_capable(intel_dig_port, &hdcp_capable); + ret = shim->hdcp_capable(dig_port, &hdcp_capable); if (ret) return ret; if (!hdcp_capable) { @@ -670,7 +670,7 @@ static int intel_hdcp_auth(struct intel_connector *connector) HDCP_ANLO(dev_priv, cpu_transcoder, port)); an.reg[1] = intel_de_read(dev_priv, HDCP_ANHI(dev_priv, cpu_transcoder, port)); - ret = shim->write_an_aksv(intel_dig_port, an.shim); + ret = shim->write_an_aksv(dig_port, an.shim); if (ret) return ret; @@ -678,7 +678,7 @@ static int intel_hdcp_auth(struct intel_connector *connector) memset(&bksv, 0, sizeof(bksv)); - ret = intel_hdcp_read_valid_bksv(intel_dig_port, shim, bksv.shim); + ret = intel_hdcp_read_valid_bksv(dig_port, shim, bksv.shim); if (ret < 0) return ret; @@ -692,14 +692,14 @@ static int intel_hdcp_auth(struct intel_connector *connector) intel_de_write(dev_priv, HDCP_BKSVHI(dev_priv, cpu_transcoder, port), bksv.reg[1]); - ret = shim->repeater_present(intel_dig_port, &repeater_present); + ret = shim->repeater_present(dig_port, &repeater_present); if (ret) return ret; if (repeater_present) intel_de_write(dev_priv, HDCP_REP_CTL, intel_hdcp_get_repeater_ctl(dev_priv, cpu_transcoder, port)); - ret = shim->toggle_signalling(intel_dig_port, true); + ret = shim->toggle_signalling(dig_port, true); if (ret) return ret; @@ -732,7 +732,7 @@ static int intel_hdcp_auth(struct intel_connector *connector) */ for (i = 0; i < tries; i++) { ri.reg = 0; - ret = shim->read_ri_prime(intel_dig_port, ri.shim); + ret = shim->read_ri_prime(dig_port, ri.shim); if (ret) return ret; intel_de_write(dev_priv, @@ -776,10 +776,10 @@ static int intel_hdcp_auth(struct intel_connector *connector) static int _intel_hdcp_disable(struct intel_connector *connector) { - struct intel_digital_port *intel_dig_port = intel_attached_dig_port(connector); + struct intel_digital_port *dig_port = intel_attached_dig_port(connector); struct drm_i915_private *dev_priv = to_i915(connector->base.dev); struct intel_hdcp *hdcp = &connector->hdcp; - enum port port = intel_dig_port->base.port; + enum port port = dig_port->base.port; enum transcoder cpu_transcoder = hdcp->cpu_transcoder; int ret; @@ -796,7 +796,7 @@ static int _intel_hdcp_disable(struct intel_connector *connector) return -ETIMEDOUT; } - ret = hdcp->shim->toggle_signalling(intel_dig_port, false); + ret = hdcp->shim->toggle_signalling(dig_port, false); if (ret) { drm_err(&dev_priv->drm, "Failed to disable HDCP signalling\n"); return ret; @@ -859,10 +859,10 @@ static struct intel_connector *intel_hdcp_to_connector(struct intel_hdcp *hdcp) /* Implements Part 3 of the HDCP authorization procedure */ static int intel_hdcp_check_link(struct intel_connector *connector) { - struct intel_digital_port *intel_dig_port = intel_attached_dig_port(connector); + struct intel_digital_port *dig_port = intel_attached_dig_port(connector); struct drm_i915_private *dev_priv = to_i915(connector->base.dev); struct intel_hdcp *hdcp = &connector->hdcp; - enum port port = intel_dig_port->base.port; + enum port port = dig_port->base.port; enum transcoder cpu_transcoder; int ret = 0; @@ -888,7 +888,7 @@ static int intel_hdcp_check_link(struct intel_connector *connector) goto out; } - if (hdcp->shim->check_link(intel_dig_port)) { + if (hdcp->shim->check_link(dig_port)) { if (hdcp->value != DRM_MODE_CONTENT_PROTECTION_UNDESIRED) { hdcp->value = DRM_MODE_CONTENT_PROTECTION_ENABLED; schedule_work(&hdcp->prop_work); @@ -1242,7 +1242,7 @@ static int hdcp2_deauthenticate_port(struct intel_connector *connector) /* Authentication flow starts from here */ static int hdcp2_authentication_key_exchange(struct intel_connector *connector) { - struct intel_digital_port *intel_dig_port = intel_attached_dig_port(connector); + struct intel_digital_port *dig_port = intel_attached_dig_port(connector); struct drm_i915_private *dev_priv = to_i915(connector->base.dev); struct intel_hdcp *hdcp = &connector->hdcp; union { @@ -1264,12 +1264,12 @@ static int hdcp2_authentication_key_exchange(struct intel_connector *connector) if (ret < 0) return ret; - ret = shim->write_2_2_msg(intel_dig_port, &msgs.ake_init, + ret = shim->write_2_2_msg(dig_port, &msgs.ake_init, sizeof(msgs.ake_init)); if (ret < 0) return ret; - ret = shim->read_2_2_msg(intel_dig_port, HDCP_2_2_AKE_SEND_CERT, + ret = shim->read_2_2_msg(dig_port, HDCP_2_2_AKE_SEND_CERT, &msgs.send_cert, sizeof(msgs.send_cert)); if (ret < 0) return ret; @@ -1298,11 +1298,11 @@ static int hdcp2_authentication_key_exchange(struct intel_connector *connector) if (ret < 0) return ret; - ret = shim->write_2_2_msg(intel_dig_port, &msgs.no_stored_km, size); + ret = shim->write_2_2_msg(dig_port, &msgs.no_stored_km, size); if (ret < 0) return ret; - ret = shim->read_2_2_msg(intel_dig_port, HDCP_2_2_AKE_SEND_HPRIME, + ret = shim->read_2_2_msg(dig_port, HDCP_2_2_AKE_SEND_HPRIME, &msgs.send_hprime, sizeof(msgs.send_hprime)); if (ret < 0) return ret; @@ -1313,7 +1313,7 @@ static int hdcp2_authentication_key_exchange(struct intel_connector *connector) if (!hdcp->is_paired) { /* Pairing is required */ - ret = shim->read_2_2_msg(intel_dig_port, + ret = shim->read_2_2_msg(dig_port, HDCP_2_2_AKE_SEND_PAIRING_INFO, &msgs.pairing_info, sizeof(msgs.pairing_info)); @@ -1331,7 +1331,7 @@ static int hdcp2_authentication_key_exchange(struct intel_connector *connector) static int hdcp2_locality_check(struct intel_connector *connector) { - struct intel_digital_port *intel_dig_port = intel_attached_dig_port(connector); + struct intel_digital_port *dig_port = intel_attached_dig_port(connector); struct intel_hdcp *hdcp = &connector->hdcp; union { struct hdcp2_lc_init lc_init; @@ -1345,12 +1345,12 @@ static int hdcp2_locality_check(struct intel_connector *connector) if (ret < 0) continue; - ret = shim->write_2_2_msg(intel_dig_port, &msgs.lc_init, + ret = shim->write_2_2_msg(dig_port, &msgs.lc_init, sizeof(msgs.lc_init)); if (ret < 0) continue; - ret = shim->read_2_2_msg(intel_dig_port, + ret = shim->read_2_2_msg(dig_port, HDCP_2_2_LC_SEND_LPRIME, &msgs.send_lprime, sizeof(msgs.send_lprime)); @@ -1367,7 +1367,7 @@ static int hdcp2_locality_check(struct intel_connector *connector) static int hdcp2_session_key_exchange(struct intel_connector *connector) { - struct intel_digital_port *intel_dig_port = intel_attached_dig_port(connector); + struct intel_digital_port *dig_port = intel_attached_dig_port(connector); struct intel_hdcp *hdcp = &connector->hdcp; struct hdcp2_ske_send_eks send_eks; int ret; @@ -1376,7 +1376,7 @@ static int hdcp2_session_key_exchange(struct intel_connector *connector) if (ret < 0) return ret; - ret = hdcp->shim->write_2_2_msg(intel_dig_port, &send_eks, + ret = hdcp->shim->write_2_2_msg(dig_port, &send_eks, sizeof(send_eks)); if (ret < 0) return ret; @@ -1387,7 +1387,7 @@ static int hdcp2_session_key_exchange(struct intel_connector *connector) static int hdcp2_propagate_stream_management_info(struct intel_connector *connector) { - struct intel_digital_port *intel_dig_port = intel_attached_dig_port(connector); + struct intel_digital_port *dig_port = intel_attached_dig_port(connector); struct drm_i915_private *i915 = to_i915(connector->base.dev); struct intel_hdcp *hdcp = &connector->hdcp; union { @@ -1409,12 +1409,12 @@ int hdcp2_propagate_stream_management_info(struct intel_connector *connector) msgs.stream_manage.streams[0].stream_type = hdcp->content_type; /* Send it to Repeater */ - ret = shim->write_2_2_msg(intel_dig_port, &msgs.stream_manage, + ret = shim->write_2_2_msg(dig_port, &msgs.stream_manage, sizeof(msgs.stream_manage)); if (ret < 0) return ret; - ret = shim->read_2_2_msg(intel_dig_port, HDCP_2_2_REP_STREAM_READY, + ret = shim->read_2_2_msg(dig_port, HDCP_2_2_REP_STREAM_READY, &msgs.stream_ready, sizeof(msgs.stream_ready)); if (ret < 0) return ret; @@ -1439,7 +1439,7 @@ int hdcp2_propagate_stream_management_info(struct intel_connector *connector) static int hdcp2_authenticate_repeater_topology(struct intel_connector *connector) { - struct intel_digital_port *intel_dig_port = intel_attached_dig_port(connector); + struct intel_digital_port *dig_port = intel_attached_dig_port(connector); struct drm_i915_private *dev_priv = to_i915(connector->base.dev); struct intel_hdcp *hdcp = &connector->hdcp; union { @@ -1451,7 +1451,7 @@ int hdcp2_authenticate_repeater_topology(struct intel_connector *connector) u8 *rx_info; int ret; - ret = shim->read_2_2_msg(intel_dig_port, HDCP_2_2_REP_SEND_RECVID_LIST, + ret = shim->read_2_2_msg(dig_port, HDCP_2_2_REP_SEND_RECVID_LIST, &msgs.recvid_list, sizeof(msgs.recvid_list)); if (ret < 0) return ret; @@ -1496,7 +1496,7 @@ int hdcp2_authenticate_repeater_topology(struct intel_connector *connector) return ret; hdcp->seq_num_v = seq_num_v; - ret = shim->write_2_2_msg(intel_dig_port, &msgs.rep_ack, + ret = shim->write_2_2_msg(dig_port, &msgs.rep_ack, sizeof(msgs.rep_ack)); if (ret < 0) return ret; @@ -1517,7 +1517,7 @@ static int hdcp2_authenticate_repeater(struct intel_connector *connector) static int hdcp2_authenticate_sink(struct intel_connector *connector) { - struct intel_digital_port *intel_dig_port = intel_attached_dig_port(connector); + struct intel_digital_port *dig_port = intel_attached_dig_port(connector); struct drm_i915_private *i915 = to_i915(connector->base.dev); struct intel_hdcp *hdcp = &connector->hdcp; const struct intel_hdcp_shim *shim = hdcp->shim; @@ -1543,7 +1543,7 @@ static int hdcp2_authenticate_sink(struct intel_connector *connector) } if (shim->config_stream_type) { - ret = shim->config_stream_type(intel_dig_port, + ret = shim->config_stream_type(dig_port, hdcp->is_repeater, hdcp->content_type); if (ret < 0) @@ -1569,10 +1569,10 @@ static int hdcp2_authenticate_sink(struct intel_connector *connector) static int hdcp2_enable_encryption(struct intel_connector *connector) { - struct intel_digital_port *intel_dig_port = intel_attached_dig_port(connector); + struct intel_digital_port *dig_port = intel_attached_dig_port(connector); struct drm_i915_private *dev_priv = to_i915(connector->base.dev); struct intel_hdcp *hdcp = &connector->hdcp; - enum port port = intel_dig_port->base.port; + enum port port = dig_port->base.port; enum transcoder cpu_transcoder = hdcp->cpu_transcoder; int ret; @@ -1580,7 +1580,7 @@ static int hdcp2_enable_encryption(struct intel_connector *connector) intel_de_read(dev_priv, HDCP2_STATUS(dev_priv, cpu_transcoder, port)) & LINK_ENCRYPTION_STATUS); if (hdcp->shim->toggle_signalling) { - ret = hdcp->shim->toggle_signalling(intel_dig_port, true); + ret = hdcp->shim->toggle_signalling(dig_port, true); if (ret) { drm_err(&dev_priv->drm, "Failed to enable HDCP signalling. %d\n", @@ -1608,10 +1608,10 @@ static int hdcp2_enable_encryption(struct intel_connector *connector) static int hdcp2_disable_encryption(struct intel_connector *connector) { - struct intel_digital_port *intel_dig_port = intel_attached_dig_port(connector); + struct intel_digital_port *dig_port = intel_attached_dig_port(connector); struct drm_i915_private *dev_priv = to_i915(connector->base.dev); struct intel_hdcp *hdcp = &connector->hdcp; - enum port port = intel_dig_port->base.port; + enum port port = dig_port->base.port; enum transcoder cpu_transcoder = hdcp->cpu_transcoder; int ret; @@ -1630,7 +1630,7 @@ static int hdcp2_disable_encryption(struct intel_connector *connector) drm_dbg_kms(&dev_priv->drm, "Disable Encryption Timedout"); if (hdcp->shim->toggle_signalling) { - ret = hdcp->shim->toggle_signalling(intel_dig_port, false); + ret = hdcp->shim->toggle_signalling(dig_port, false); if (ret) { drm_err(&dev_priv->drm, "Failed to disable HDCP signalling. %d\n", @@ -1723,10 +1723,10 @@ static int _intel_hdcp2_disable(struct intel_connector *connector) /* Implements the Link Integrity Check for HDCP2.2 */ static int intel_hdcp2_check_link(struct intel_connector *connector) { - struct intel_digital_port *intel_dig_port = intel_attached_dig_port(connector); + struct intel_digital_port *dig_port = intel_attached_dig_port(connector); struct drm_i915_private *dev_priv = to_i915(connector->base.dev); struct intel_hdcp *hdcp = &connector->hdcp; - enum port port = intel_dig_port->base.port; + enum port port = dig_port->base.port; enum transcoder cpu_transcoder; int ret = 0; @@ -1751,7 +1751,7 @@ static int intel_hdcp2_check_link(struct intel_connector *connector) goto out; } - ret = hdcp->shim->check_2_2_link(intel_dig_port); + ret = hdcp->shim->check_2_2_link(dig_port); if (ret == HDCP_LINK_PROTECTED) { if (hdcp->value != DRM_MODE_CONTENT_PROTECTION_UNDESIRED) { hdcp->value = DRM_MODE_CONTENT_PROTECTION_ENABLED; diff --git a/drivers/gpu/drm/i915/display/intel_hdmi.c b/drivers/gpu/drm/i915/display/intel_hdmi.c index 864a1642e81c..973450a393d2 100644 --- a/drivers/gpu/drm/i915/display/intel_hdmi.c +++ b/drivers/gpu/drm/i915/display/intel_hdmi.c @@ -88,10 +88,10 @@ assert_hdmi_transcoder_func_disabled(struct drm_i915_private *dev_priv, struct intel_hdmi *enc_to_intel_hdmi(struct intel_encoder *encoder) { - struct intel_digital_port *intel_dig_port = + struct intel_digital_port *dig_port = container_of(&encoder->base, struct intel_digital_port, base.base); - return &intel_dig_port->hdmi; + return &dig_port->hdmi; } static struct intel_hdmi *intel_attached_hdmi(struct intel_connector *connector) @@ -660,7 +660,7 @@ static void intel_write_infoframe(struct intel_encoder *encoder, enum hdmi_infoframe_type type, const union hdmi_infoframe *frame) { - struct intel_digital_port *intel_dig_port = enc_to_dig_port(encoder); + struct intel_digital_port *dig_port = enc_to_dig_port(encoder); u8 buffer[VIDEO_DIP_DATA_SIZE]; ssize_t len; @@ -681,7 +681,7 @@ static void intel_write_infoframe(struct intel_encoder *encoder, buffer[3] = 0; len++; - intel_dig_port->write_infoframe(encoder, crtc_state, type, buffer, len); + dig_port->write_infoframe(encoder, crtc_state, type, buffer, len); } void intel_read_infoframe(struct intel_encoder *encoder, @@ -689,7 +689,7 @@ void intel_read_infoframe(struct intel_encoder *encoder, enum hdmi_infoframe_type type, union hdmi_infoframe *frame) { - struct intel_digital_port *intel_dig_port = enc_to_dig_port(encoder); + struct intel_digital_port *dig_port = enc_to_dig_port(encoder); u8 buffer[VIDEO_DIP_DATA_SIZE]; int ret; @@ -697,7 +697,7 @@ void intel_read_infoframe(struct intel_encoder *encoder, intel_hdmi_infoframe_enable(type)) == 0) return; - intel_dig_port->read_infoframe(encoder, crtc_state, + dig_port->read_infoframe(encoder, crtc_state, type, buffer, sizeof(buffer)); /* Fill the 'hole' (see big comment above) at position 3 */ @@ -872,8 +872,8 @@ static void g4x_set_infoframes(struct intel_encoder *encoder, const struct drm_connector_state *conn_state) { struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); - struct intel_digital_port *intel_dig_port = enc_to_dig_port(encoder); - struct intel_hdmi *intel_hdmi = &intel_dig_port->hdmi; + struct intel_digital_port *dig_port = enc_to_dig_port(encoder); + struct intel_hdmi *intel_hdmi = &dig_port->hdmi; i915_reg_t reg = VIDEO_DIP_CTL; u32 val = intel_de_read(dev_priv, reg); u32 port = VIDEO_DIP_PORT(encoder->port); @@ -1057,8 +1057,8 @@ static void ibx_set_infoframes(struct intel_encoder *encoder, { struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); struct intel_crtc *intel_crtc = to_intel_crtc(crtc_state->uapi.crtc); - struct intel_digital_port *intel_dig_port = enc_to_dig_port(encoder); - struct intel_hdmi *intel_hdmi = &intel_dig_port->hdmi; + struct intel_digital_port *dig_port = enc_to_dig_port(encoder); + struct intel_hdmi *intel_hdmi = &dig_port->hdmi; i915_reg_t reg = TVIDEO_DIP_CTL(intel_crtc->pipe); u32 val = intel_de_read(dev_priv, reg); u32 port = VIDEO_DIP_PORT(encoder->port); @@ -1275,11 +1275,11 @@ void intel_dp_dual_mode_set_tmds_output(struct intel_hdmi *hdmi, bool enable) adapter, enable); } -static int intel_hdmi_hdcp_read(struct intel_digital_port *intel_dig_port, +static int intel_hdmi_hdcp_read(struct intel_digital_port *dig_port, unsigned int offset, void *buffer, size_t size) { - struct drm_i915_private *i915 = to_i915(intel_dig_port->base.base.dev); - struct intel_hdmi *hdmi = &intel_dig_port->hdmi; + struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev); + struct intel_hdmi *hdmi = &dig_port->hdmi; struct i2c_adapter *adapter = intel_gmbus_get_adapter(i915, hdmi->ddc_bus); int ret; @@ -1304,11 +1304,11 @@ static int intel_hdmi_hdcp_read(struct intel_digital_port *intel_dig_port, return ret >= 0 ? -EIO : ret; } -static int intel_hdmi_hdcp_write(struct intel_digital_port *intel_dig_port, +static int intel_hdmi_hdcp_write(struct intel_digital_port *dig_port, unsigned int offset, void *buffer, size_t size) { - struct drm_i915_private *i915 = to_i915(intel_dig_port->base.base.dev); - struct intel_hdmi *hdmi = &intel_dig_port->hdmi; + struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev); + struct intel_hdmi *hdmi = &dig_port->hdmi; struct i2c_adapter *adapter = intel_gmbus_get_adapter(i915, hdmi->ddc_bus); int ret; @@ -1338,16 +1338,16 @@ static int intel_hdmi_hdcp_write(struct intel_digital_port *intel_dig_port, } static -int intel_hdmi_hdcp_write_an_aksv(struct intel_digital_port *intel_dig_port, +int intel_hdmi_hdcp_write_an_aksv(struct intel_digital_port *dig_port, u8 *an) { - struct drm_i915_private *i915 = to_i915(intel_dig_port->base.base.dev); - struct intel_hdmi *hdmi = &intel_dig_port->hdmi; + struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev); + struct intel_hdmi *hdmi = &dig_port->hdmi; struct i2c_adapter *adapter = intel_gmbus_get_adapter(i915, hdmi->ddc_bus); int ret; - ret = intel_hdmi_hdcp_write(intel_dig_port, DRM_HDCP_DDC_AN, an, + ret = intel_hdmi_hdcp_write(dig_port, DRM_HDCP_DDC_AN, an, DRM_HDCP_AN_LEN); if (ret) { drm_dbg_kms(&i915->drm, "Write An over DDC failed (%d)\n", @@ -1363,13 +1363,13 @@ int intel_hdmi_hdcp_write_an_aksv(struct intel_digital_port *intel_dig_port, return 0; } -static int intel_hdmi_hdcp_read_bksv(struct intel_digital_port *intel_dig_port, +static int intel_hdmi_hdcp_read_bksv(struct intel_digital_port *dig_port, u8 *bksv) { - struct drm_i915_private *i915 = to_i915(intel_dig_port->base.base.dev); + struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev); int ret; - ret = intel_hdmi_hdcp_read(intel_dig_port, DRM_HDCP_DDC_BKSV, bksv, + ret = intel_hdmi_hdcp_read(dig_port, DRM_HDCP_DDC_BKSV, bksv, DRM_HDCP_KSV_LEN); if (ret) drm_dbg_kms(&i915->drm, "Read Bksv over DDC failed (%d)\n", @@ -1378,13 +1378,13 @@ static int intel_hdmi_hdcp_read_bksv(struct intel_digital_port *intel_dig_port, } static -int intel_hdmi_hdcp_read_bstatus(struct intel_digital_port *intel_dig_port, +int intel_hdmi_hdcp_read_bstatus(struct intel_digital_port *dig_port, u8 *bstatus) { - struct drm_i915_private *i915 = to_i915(intel_dig_port->base.base.dev); + struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev); int ret; - ret = intel_hdmi_hdcp_read(intel_dig_port, DRM_HDCP_DDC_BSTATUS, + ret = intel_hdmi_hdcp_read(dig_port, DRM_HDCP_DDC_BSTATUS, bstatus, DRM_HDCP_BSTATUS_LEN); if (ret) drm_dbg_kms(&i915->drm, "Read bstatus over DDC failed (%d)\n", @@ -1393,14 +1393,14 @@ int intel_hdmi_hdcp_read_bstatus(struct intel_digital_port *intel_dig_port, } static -int intel_hdmi_hdcp_repeater_present(struct intel_digital_port *intel_dig_port, +int intel_hdmi_hdcp_repeater_present(struct intel_digital_port *dig_port, bool *repeater_present) { - struct drm_i915_private *i915 = to_i915(intel_dig_port->base.base.dev); + struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev); int ret; u8 val; - ret = intel_hdmi_hdcp_read(intel_dig_port, DRM_HDCP_DDC_BCAPS, &val, 1); + ret = intel_hdmi_hdcp_read(dig_port, DRM_HDCP_DDC_BCAPS, &val, 1); if (ret) { drm_dbg_kms(&i915->drm, "Read bcaps over DDC failed (%d)\n", ret); @@ -1411,13 +1411,13 @@ int intel_hdmi_hdcp_repeater_present(struct intel_digital_port *intel_dig_port, } static -int intel_hdmi_hdcp_read_ri_prime(struct intel_digital_port *intel_dig_port, +int intel_hdmi_hdcp_read_ri_prime(struct intel_digital_port *dig_port, u8 *ri_prime) { - struct drm_i915_private *i915 = to_i915(intel_dig_port->base.base.dev); + struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev); int ret; - ret = intel_hdmi_hdcp_read(intel_dig_port, DRM_HDCP_DDC_RI_PRIME, + ret = intel_hdmi_hdcp_read(dig_port, DRM_HDCP_DDC_RI_PRIME, ri_prime, DRM_HDCP_RI_LEN); if (ret) drm_dbg_kms(&i915->drm, "Read Ri' over DDC failed (%d)\n", @@ -1426,14 +1426,14 @@ int intel_hdmi_hdcp_read_ri_prime(struct intel_digital_port *intel_dig_port, } static -int intel_hdmi_hdcp_read_ksv_ready(struct intel_digital_port *intel_dig_port, +int intel_hdmi_hdcp_read_ksv_ready(struct intel_digital_port *dig_port, bool *ksv_ready) { - struct drm_i915_private *i915 = to_i915(intel_dig_port->base.base.dev); + struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev); int ret; u8 val; - ret = intel_hdmi_hdcp_read(intel_dig_port, DRM_HDCP_DDC_BCAPS, &val, 1); + ret = intel_hdmi_hdcp_read(dig_port, DRM_HDCP_DDC_BCAPS, &val, 1); if (ret) { drm_dbg_kms(&i915->drm, "Read bcaps over DDC failed (%d)\n", ret); @@ -1444,12 +1444,12 @@ int intel_hdmi_hdcp_read_ksv_ready(struct intel_digital_port *intel_dig_port, } static -int intel_hdmi_hdcp_read_ksv_fifo(struct intel_digital_port *intel_dig_port, +int intel_hdmi_hdcp_read_ksv_fifo(struct intel_digital_port *dig_port, int num_downstream, u8 *ksv_fifo) { - struct drm_i915_private *i915 = to_i915(intel_dig_port->base.base.dev); + struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev); int ret; - ret = intel_hdmi_hdcp_read(intel_dig_port, DRM_HDCP_DDC_KSV_FIFO, + ret = intel_hdmi_hdcp_read(dig_port, DRM_HDCP_DDC_KSV_FIFO, ksv_fifo, num_downstream * DRM_HDCP_KSV_LEN); if (ret) { drm_dbg_kms(&i915->drm, @@ -1460,16 +1460,16 @@ int intel_hdmi_hdcp_read_ksv_fifo(struct intel_digital_port *intel_dig_port, } static -int intel_hdmi_hdcp_read_v_prime_part(struct intel_digital_port *intel_dig_port, +int intel_hdmi_hdcp_read_v_prime_part(struct intel_digital_port *dig_port, int i, u32 *part) { - struct drm_i915_private *i915 = to_i915(intel_dig_port->base.base.dev); + struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev); int ret; if (i >= DRM_HDCP_V_PRIME_NUM_PARTS) return -EINVAL; - ret = intel_hdmi_hdcp_read(intel_dig_port, DRM_HDCP_DDC_V_PRIME(i), + ret = intel_hdmi_hdcp_read(dig_port, DRM_HDCP_DDC_V_PRIME(i), part, DRM_HDCP_V_PRIME_PART_LEN); if (ret) drm_dbg_kms(&i915->drm, "Read V'[%d] over DDC failed (%d)\n", @@ -1480,7 +1480,7 @@ int intel_hdmi_hdcp_read_v_prime_part(struct intel_digital_port *intel_dig_port, static int kbl_repositioning_enc_en_signal(struct intel_connector *connector) { struct drm_i915_private *dev_priv = to_i915(connector->base.dev); - struct intel_digital_port *intel_dig_port = intel_attached_dig_port(connector); + struct intel_digital_port *dig_port = intel_attached_dig_port(connector); struct drm_crtc *crtc = connector->base.state->crtc; struct intel_crtc *intel_crtc = container_of(crtc, struct intel_crtc, base); @@ -1494,13 +1494,13 @@ static int kbl_repositioning_enc_en_signal(struct intel_connector *connector) usleep_range(25, 50); } - ret = intel_ddi_toggle_hdcp_signalling(&intel_dig_port->base, false); + ret = intel_ddi_toggle_hdcp_signalling(&dig_port->base, false); if (ret) { drm_err(&dev_priv->drm, "Disable HDCP signalling failed (%d)\n", ret); return ret; } - ret = intel_ddi_toggle_hdcp_signalling(&intel_dig_port->base, true); + ret = intel_ddi_toggle_hdcp_signalling(&dig_port->base, true); if (ret) { drm_err(&dev_priv->drm, "Enable HDCP signalling failed (%d)\n", ret); @@ -1511,10 +1511,10 @@ static int kbl_repositioning_enc_en_signal(struct intel_connector *connector) } static -int intel_hdmi_hdcp_toggle_signalling(struct intel_digital_port *intel_dig_port, +int intel_hdmi_hdcp_toggle_signalling(struct intel_digital_port *dig_port, bool enable) { - struct intel_hdmi *hdmi = &intel_dig_port->hdmi; + struct intel_hdmi *hdmi = &dig_port->hdmi; struct intel_connector *connector = hdmi->attached_connector; struct drm_i915_private *dev_priv = to_i915(connector->base.dev); int ret; @@ -1522,7 +1522,7 @@ int intel_hdmi_hdcp_toggle_signalling(struct intel_digital_port *intel_dig_port, if (!enable) usleep_range(6, 60); /* Bspec says >= 6us */ - ret = intel_ddi_toggle_hdcp_signalling(&intel_dig_port->base, enable); + ret = intel_ddi_toggle_hdcp_signalling(&dig_port->base, enable); if (ret) { drm_err(&dev_priv->drm, "%s HDCP signalling failed (%d)\n", enable ? "Enable" : "Disable", ret); @@ -1540,12 +1540,12 @@ int intel_hdmi_hdcp_toggle_signalling(struct intel_digital_port *intel_dig_port, } static -bool intel_hdmi_hdcp_check_link_once(struct intel_digital_port *intel_dig_port) +bool intel_hdmi_hdcp_check_link_once(struct intel_digital_port *dig_port) { - struct drm_i915_private *i915 = to_i915(intel_dig_port->base.base.dev); + struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev); struct intel_connector *connector = - intel_dig_port->hdmi.attached_connector; - enum port port = intel_dig_port->base.port; + dig_port->hdmi.attached_connector; + enum port port = dig_port->base.port; enum transcoder cpu_transcoder = connector->hdcp.cpu_transcoder; int ret; union { @@ -1553,7 +1553,7 @@ bool intel_hdmi_hdcp_check_link_once(struct intel_digital_port *intel_dig_port) u8 shim[DRM_HDCP_RI_LEN]; } ri; - ret = intel_hdmi_hdcp_read_ri_prime(intel_dig_port, ri.shim); + ret = intel_hdmi_hdcp_read_ri_prime(dig_port, ri.shim); if (ret) return false; @@ -1572,13 +1572,13 @@ bool intel_hdmi_hdcp_check_link_once(struct intel_digital_port *intel_dig_port) } static -bool intel_hdmi_hdcp_check_link(struct intel_digital_port *intel_dig_port) +bool intel_hdmi_hdcp_check_link(struct intel_digital_port *dig_port) { - struct drm_i915_private *i915 = to_i915(intel_dig_port->base.base.dev); + struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev); int retry; for (retry = 0; retry < 3; retry++) - if (intel_hdmi_hdcp_check_link_once(intel_dig_port)) + if (intel_hdmi_hdcp_check_link_once(dig_port)) return true; drm_err(&i915->drm, "Link check failed\n"); @@ -1599,10 +1599,10 @@ static const struct hdcp2_hdmi_msg_timeout hdcp2_msg_timeout[] = { }; static -int intel_hdmi_hdcp2_read_rx_status(struct intel_digital_port *intel_dig_port, +int intel_hdmi_hdcp2_read_rx_status(struct intel_digital_port *dig_port, u8 *rx_status) { - return intel_hdmi_hdcp_read(intel_dig_port, + return intel_hdmi_hdcp_read(dig_port, HDCP_2_2_HDMI_REG_RXSTATUS_OFFSET, rx_status, HDCP_2_2_HDMI_RXSTATUS_LEN); @@ -1628,15 +1628,15 @@ static int get_hdcp2_msg_timeout(u8 msg_id, bool is_paired) } static int -hdcp2_detect_msg_availability(struct intel_digital_port *intel_dig_port, +hdcp2_detect_msg_availability(struct intel_digital_port *dig_port, u8 msg_id, bool *msg_ready, ssize_t *msg_sz) { - struct drm_i915_private *i915 = to_i915(intel_dig_port->base.base.dev); + struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev); u8 rx_status[HDCP_2_2_HDMI_RXSTATUS_LEN]; int ret; - ret = intel_hdmi_hdcp2_read_rx_status(intel_dig_port, rx_status); + ret = intel_hdmi_hdcp2_read_rx_status(dig_port, rx_status); if (ret < 0) { drm_dbg_kms(&i915->drm, "rx_status read failed. Err %d\n", ret); @@ -1656,10 +1656,10 @@ hdcp2_detect_msg_availability(struct intel_digital_port *intel_dig_port, } static ssize_t -intel_hdmi_hdcp2_wait_for_msg(struct intel_digital_port *intel_dig_port, +intel_hdmi_hdcp2_wait_for_msg(struct intel_digital_port *dig_port, u8 msg_id, bool paired) { - struct drm_i915_private *i915 = to_i915(intel_dig_port->base.base.dev); + struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev); bool msg_ready = false; int timeout, ret; ssize_t msg_sz = 0; @@ -1668,7 +1668,7 @@ intel_hdmi_hdcp2_wait_for_msg(struct intel_digital_port *intel_dig_port, if (timeout < 0) return timeout; - ret = __wait_for(ret = hdcp2_detect_msg_availability(intel_dig_port, + ret = __wait_for(ret = hdcp2_detect_msg_availability(dig_port, msg_id, &msg_ready, &msg_sz), !ret && msg_ready && msg_sz, timeout * 1000, @@ -1681,26 +1681,26 @@ intel_hdmi_hdcp2_wait_for_msg(struct intel_digital_port *intel_dig_port, } static -int intel_hdmi_hdcp2_write_msg(struct intel_digital_port *intel_dig_port, +int intel_hdmi_hdcp2_write_msg(struct intel_digital_port *dig_port, void *buf, size_t size) { unsigned int offset; offset = HDCP_2_2_HDMI_REG_WR_MSG_OFFSET; - return intel_hdmi_hdcp_write(intel_dig_port, offset, buf, size); + return intel_hdmi_hdcp_write(dig_port, offset, buf, size); } static -int intel_hdmi_hdcp2_read_msg(struct intel_digital_port *intel_dig_port, +int intel_hdmi_hdcp2_read_msg(struct intel_digital_port *dig_port, u8 msg_id, void *buf, size_t size) { - struct drm_i915_private *i915 = to_i915(intel_dig_port->base.base.dev); - struct intel_hdmi *hdmi = &intel_dig_port->hdmi; + struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev); + struct intel_hdmi *hdmi = &dig_port->hdmi; struct intel_hdcp *hdcp = &hdmi->attached_connector->hdcp; unsigned int offset; ssize_t ret; - ret = intel_hdmi_hdcp2_wait_for_msg(intel_dig_port, msg_id, + ret = intel_hdmi_hdcp2_wait_for_msg(dig_port, msg_id, hdcp->is_paired); if (ret < 0) return ret; @@ -1717,7 +1717,7 @@ int intel_hdmi_hdcp2_read_msg(struct intel_digital_port *intel_dig_port, } offset = HDCP_2_2_HDMI_REG_RD_MSG_OFFSET; - ret = intel_hdmi_hdcp_read(intel_dig_port, offset, buf, ret); + ret = intel_hdmi_hdcp_read(dig_port, offset, buf, ret); if (ret) drm_dbg_kms(&i915->drm, "Failed to read msg_id: %d(%zd)\n", msg_id, ret); @@ -1726,12 +1726,12 @@ int intel_hdmi_hdcp2_read_msg(struct intel_digital_port *intel_dig_port, } static -int intel_hdmi_hdcp2_check_link(struct intel_digital_port *intel_dig_port) +int intel_hdmi_hdcp2_check_link(struct intel_digital_port *dig_port) { u8 rx_status[HDCP_2_2_HDMI_RXSTATUS_LEN]; int ret; - ret = intel_hdmi_hdcp2_read_rx_status(intel_dig_port, rx_status); + ret = intel_hdmi_hdcp2_read_rx_status(dig_port, rx_status); if (ret) return ret; @@ -1748,14 +1748,14 @@ int intel_hdmi_hdcp2_check_link(struct intel_digital_port *intel_dig_port) } static -int intel_hdmi_hdcp2_capable(struct intel_digital_port *intel_dig_port, +int intel_hdmi_hdcp2_capable(struct intel_digital_port *dig_port, bool *capable) { u8 hdcp2_version; int ret; *capable = false; - ret = intel_hdmi_hdcp_read(intel_dig_port, HDCP_2_2_HDMI_REG_VER_OFFSET, + ret = intel_hdmi_hdcp_read(dig_port, HDCP_2_2_HDMI_REG_VER_OFFSET, &hdcp2_version, sizeof(hdcp2_version)); if (!ret && hdcp2_version & HDCP_2_2_HDMI_SUPPORT_MASK) *capable = true; @@ -2063,7 +2063,7 @@ static void intel_disable_hdmi(struct intel_atomic_state *state, struct drm_device *dev = encoder->base.dev; struct drm_i915_private *dev_priv = to_i915(dev); struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder); - struct intel_digital_port *intel_dig_port = + struct intel_digital_port *dig_port = hdmi_to_dig_port(intel_hdmi); struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->uapi.crtc); u32 temp; @@ -2107,7 +2107,7 @@ static void intel_disable_hdmi(struct intel_atomic_state *state, intel_set_pch_fifo_underrun_reporting(dev_priv, PIPE_A, true); } - intel_dig_port->set_infoframes(encoder, + dig_port->set_infoframes(encoder, false, old_crtc_state, old_conn_state); @@ -2722,12 +2722,12 @@ static void intel_hdmi_pre_enable(struct intel_atomic_state *state, const struct intel_crtc_state *pipe_config, const struct drm_connector_state *conn_state) { - struct intel_digital_port *intel_dig_port = + struct intel_digital_port *dig_port = enc_to_dig_port(encoder); intel_hdmi_prepare(encoder, pipe_config); - intel_dig_port->set_infoframes(encoder, + dig_port->set_infoframes(encoder, pipe_config->has_infoframe, pipe_config, conn_state); } @@ -2737,7 +2737,7 @@ static void vlv_hdmi_pre_enable(struct intel_atomic_state *state, const struct intel_crtc_state *pipe_config, const struct drm_connector_state *conn_state) { - struct intel_digital_port *dport = enc_to_dig_port(encoder); + struct intel_digital_port *dig_port = enc_to_dig_port(encoder); struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); vlv_phy_pre_encoder_enable(encoder, pipe_config); @@ -2746,13 +2746,13 @@ static void vlv_hdmi_pre_enable(struct intel_atomic_state *state, vlv_set_phy_signal_level(encoder, 0x2b245f5f, 0x00002000, 0x5578b83a, 0x2b247878); - dport->set_infoframes(encoder, + dig_port->set_infoframes(encoder, pipe_config->has_infoframe, pipe_config, conn_state); g4x_enable_hdmi(state, encoder, pipe_config, conn_state); - vlv_wait_port_ready(dev_priv, dport, 0x0); + vlv_wait_port_ready(dev_priv, dig_port, 0x0); } static void vlv_hdmi_pre_pll_enable(struct intel_atomic_state *state, @@ -2813,7 +2813,7 @@ static void chv_hdmi_pre_enable(struct intel_atomic_state *state, const struct intel_crtc_state *pipe_config, const struct drm_connector_state *conn_state) { - struct intel_digital_port *dport = enc_to_dig_port(encoder); + struct intel_digital_port *dig_port = enc_to_dig_port(encoder); struct drm_device *dev = encoder->base.dev; struct drm_i915_private *dev_priv = to_i915(dev); @@ -2823,13 +2823,13 @@ static void chv_hdmi_pre_enable(struct intel_atomic_state *state, /* Use 800mV-0dB */ chv_set_phy_signal_level(encoder, 128, 102, false); - dport->set_infoframes(encoder, + dig_port->set_infoframes(encoder, pipe_config->has_infoframe, pipe_config, conn_state); g4x_enable_hdmi(state, encoder, pipe_config, conn_state); - vlv_wait_port_ready(dev_priv, dport, 0x0); + vlv_wait_port_ready(dev_priv, dig_port, 0x0); /* Second common lane will stay alive on its own now */ chv_phy_release_cl2_override(encoder); @@ -2923,7 +2923,7 @@ static void intel_hdmi_add_properties(struct intel_hdmi *intel_hdmi, struct drm_connector *connector) { struct drm_i915_private *dev_priv = to_i915(connector->dev); - struct intel_digital_port *intel_dig_port = + struct intel_digital_port *dig_port = hdmi_to_dig_port(intel_hdmi); intel_attach_force_audio_property(connector); @@ -2935,7 +2935,7 @@ intel_hdmi_add_properties(struct intel_hdmi *intel_hdmi, struct drm_connector *c * ToDo: This needs to be extended for LSPCON implementation * as well. Will be implemented separately. */ - if (!intel_dig_port->lspcon.active) + if (!dig_port->lspcon.active) intel_attach_colorspace_property(connector); drm_connector_attach_content_type_property(connector); @@ -3172,52 +3172,52 @@ static u8 intel_hdmi_ddc_pin(struct intel_encoder *encoder) return ddc_pin; } -void intel_infoframe_init(struct intel_digital_port *intel_dig_port) +void intel_infoframe_init(struct intel_digital_port *dig_port) { struct drm_i915_private *dev_priv = - to_i915(intel_dig_port->base.base.dev); + to_i915(dig_port->base.base.dev); if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) { - intel_dig_port->write_infoframe = vlv_write_infoframe; - intel_dig_port->read_infoframe = vlv_read_infoframe; - intel_dig_port->set_infoframes = vlv_set_infoframes; - intel_dig_port->infoframes_enabled = vlv_infoframes_enabled; + dig_port->write_infoframe = vlv_write_infoframe; + dig_port->read_infoframe = vlv_read_infoframe; + dig_port->set_infoframes = vlv_set_infoframes; + dig_port->infoframes_enabled = vlv_infoframes_enabled; } else if (IS_G4X(dev_priv)) { - intel_dig_port->write_infoframe = g4x_write_infoframe; - intel_dig_port->read_infoframe = g4x_read_infoframe; - intel_dig_port->set_infoframes = g4x_set_infoframes; - intel_dig_port->infoframes_enabled = g4x_infoframes_enabled; + dig_port->write_infoframe = g4x_write_infoframe; + dig_port->read_infoframe = g4x_read_infoframe; + dig_port->set_infoframes = g4x_set_infoframes; + dig_port->infoframes_enabled = g4x_infoframes_enabled; } else if (HAS_DDI(dev_priv)) { - if (intel_dig_port->lspcon.active) { - intel_dig_port->write_infoframe = lspcon_write_infoframe; - intel_dig_port->read_infoframe = lspcon_read_infoframe; - intel_dig_port->set_infoframes = lspcon_set_infoframes; - intel_dig_port->infoframes_enabled = lspcon_infoframes_enabled; + if (dig_port->lspcon.active) { + dig_port->write_infoframe = lspcon_write_infoframe; + dig_port->read_infoframe = lspcon_read_infoframe; + dig_port->set_infoframes = lspcon_set_infoframes; + dig_port->infoframes_enabled = lspcon_infoframes_enabled; } else { - intel_dig_port->write_infoframe = hsw_write_infoframe; - intel_dig_port->read_infoframe = hsw_read_infoframe; - intel_dig_port->set_infoframes = hsw_set_infoframes; - intel_dig_port->infoframes_enabled = hsw_infoframes_enabled; + dig_port->write_infoframe = hsw_write_infoframe; + dig_port->read_infoframe = hsw_read_infoframe; + dig_port->set_infoframes = hsw_set_infoframes; + dig_port->infoframes_enabled = hsw_infoframes_enabled; } } else if (HAS_PCH_IBX(dev_priv)) { - intel_dig_port->write_infoframe = ibx_write_infoframe; - intel_dig_port->read_infoframe = ibx_read_infoframe; - intel_dig_port->set_infoframes = ibx_set_infoframes; - intel_dig_port->infoframes_enabled = ibx_infoframes_enabled; + dig_port->write_infoframe = ibx_write_infoframe; + dig_port->read_infoframe = ibx_read_infoframe; + dig_port->set_infoframes = ibx_set_infoframes; + dig_port->infoframes_enabled = ibx_infoframes_enabled; } else { - intel_dig_port->write_infoframe = cpt_write_infoframe; - intel_dig_port->read_infoframe = cpt_read_infoframe; - intel_dig_port->set_infoframes = cpt_set_infoframes; - intel_dig_port->infoframes_enabled = cpt_infoframes_enabled; + dig_port->write_infoframe = cpt_write_infoframe; + dig_port->read_infoframe = cpt_read_infoframe; + dig_port->set_infoframes = cpt_set_infoframes; + dig_port->infoframes_enabled = cpt_infoframes_enabled; } } -void intel_hdmi_init_connector(struct intel_digital_port *intel_dig_port, +void intel_hdmi_init_connector(struct intel_digital_port *dig_port, struct intel_connector *intel_connector) { struct drm_connector *connector = &intel_connector->base; - struct intel_hdmi *intel_hdmi = &intel_dig_port->hdmi; - struct intel_encoder *intel_encoder = &intel_dig_port->base; + struct intel_hdmi *intel_hdmi = &dig_port->hdmi; + struct intel_encoder *intel_encoder = &dig_port->base; struct drm_device *dev = intel_encoder->base.dev; struct drm_i915_private *dev_priv = to_i915(dev); struct i2c_adapter *ddc; @@ -3231,9 +3231,9 @@ void intel_hdmi_init_connector(struct intel_digital_port *intel_dig_port, if (INTEL_GEN(dev_priv) < 12 && drm_WARN_ON(dev, port == PORT_A)) return; - if (drm_WARN(dev, intel_dig_port->max_lanes < 4, + if (drm_WARN(dev, dig_port->max_lanes < 4, "Not enough lanes (%d) for HDMI on [ENCODER:%d:%s]\n", - intel_dig_port->max_lanes, intel_encoder->base.base.id, + dig_port->max_lanes, intel_encoder->base.base.id, intel_encoder->base.name)) return; @@ -3322,21 +3322,21 @@ intel_hdmi_hotplug(struct intel_encoder *encoder, void intel_hdmi_init(struct drm_i915_private *dev_priv, i915_reg_t hdmi_reg, enum port port) { - struct intel_digital_port *intel_dig_port; + struct intel_digital_port *dig_port; struct intel_encoder *intel_encoder; struct intel_connector *intel_connector; - intel_dig_port = kzalloc(sizeof(*intel_dig_port), GFP_KERNEL); - if (!intel_dig_port) + dig_port = kzalloc(sizeof(*dig_port), GFP_KERNEL); + if (!dig_port) return; intel_connector = intel_connector_alloc(); if (!intel_connector) { - kfree(intel_dig_port); + kfree(dig_port); return; } - intel_encoder = &intel_dig_port->base; + intel_encoder = &dig_port->base; drm_encoder_init(&dev_priv->drm, &intel_encoder->base, &intel_hdmi_enc_funcs, DRM_MODE_ENCODER_TMDS, @@ -3393,12 +3393,12 @@ void intel_hdmi_init(struct drm_i915_private *dev_priv, if (IS_G4X(dev_priv)) intel_encoder->cloneable |= 1 << INTEL_OUTPUT_HDMI; - intel_dig_port->hdmi.hdmi_reg = hdmi_reg; - intel_dig_port->dp.output_reg = INVALID_MMIO_REG; - intel_dig_port->max_lanes = 4; + dig_port->hdmi.hdmi_reg = hdmi_reg; + dig_port->dp.output_reg = INVALID_MMIO_REG; + dig_port->max_lanes = 4; - intel_infoframe_init(intel_dig_port); + intel_infoframe_init(dig_port); - intel_dig_port->aux_ch = intel_bios_port_aux_ch(dev_priv, port); - intel_hdmi_init_connector(intel_dig_port, intel_connector); + dig_port->aux_ch = intel_bios_port_aux_ch(dev_priv, port); + intel_hdmi_init_connector(dig_port, intel_connector); } diff --git a/drivers/gpu/drm/i915/display/intel_hdmi.h b/drivers/gpu/drm/i915/display/intel_hdmi.h index 8ff1f76a63df..213ff24befde 100644 --- a/drivers/gpu/drm/i915/display/intel_hdmi.h +++ b/drivers/gpu/drm/i915/display/intel_hdmi.h @@ -25,7 +25,7 @@ enum port; void intel_hdmi_init(struct drm_i915_private *dev_priv, i915_reg_t hdmi_reg, enum port port); -void intel_hdmi_init_connector(struct intel_digital_port *intel_dig_port, +void intel_hdmi_init_connector(struct intel_digital_port *dig_port, struct intel_connector *intel_connector); struct intel_hdmi *enc_to_intel_hdmi(struct intel_encoder *encoder); int intel_hdmi_compute_config(struct intel_encoder *encoder, @@ -36,7 +36,7 @@ bool intel_hdmi_handle_sink_scrambling(struct intel_encoder *encoder, bool high_tmds_clock_ratio, bool scrambling); void intel_dp_dual_mode_set_tmds_output(struct intel_hdmi *hdmi, bool enable); -void intel_infoframe_init(struct intel_digital_port *intel_dig_port); +void intel_infoframe_init(struct intel_digital_port *dig_port); u32 intel_hdmi_infoframes_enabled(struct intel_encoder *encoder, const struct intel_crtc_state *crtc_state); u32 intel_hdmi_infoframe_enable(unsigned int type); diff --git a/drivers/gpu/drm/i915/display/intel_lspcon.c b/drivers/gpu/drm/i915/display/intel_lspcon.c index 6ff7b226f0a1..b781bf469644 100644 --- a/drivers/gpu/drm/i915/display/intel_lspcon.c +++ b/drivers/gpu/drm/i915/display/intel_lspcon.c @@ -550,11 +550,11 @@ void lspcon_wait_pcon_mode(struct intel_lspcon *lspcon) lspcon_wait_mode(lspcon, DRM_LSPCON_MODE_PCON); } -bool lspcon_init(struct intel_digital_port *intel_dig_port) +bool lspcon_init(struct intel_digital_port *dig_port) { - struct intel_dp *dp = &intel_dig_port->dp; - struct intel_lspcon *lspcon = &intel_dig_port->lspcon; - struct drm_device *dev = intel_dig_port->base.base.dev; + struct intel_dp *dp = &dig_port->dp; + struct intel_lspcon *lspcon = &dig_port->lspcon; + struct drm_device *dev = dig_port->base.base.dev; struct drm_i915_private *dev_priv = to_i915(dev); struct drm_connector *connector = &dp->attached_connector->base; diff --git a/drivers/gpu/drm/i915/display/intel_lspcon.h b/drivers/gpu/drm/i915/display/intel_lspcon.h index 37cfddf8a9c5..1cffe8a42a08 100644 --- a/drivers/gpu/drm/i915/display/intel_lspcon.h +++ b/drivers/gpu/drm/i915/display/intel_lspcon.h @@ -15,7 +15,7 @@ struct intel_digital_port; struct intel_encoder; struct intel_lspcon; -bool lspcon_init(struct intel_digital_port *intel_dig_port); +bool lspcon_init(struct intel_digital_port *dig_port); void lspcon_resume(struct intel_lspcon *lspcon); void lspcon_wait_pcon_mode(struct intel_lspcon *lspcon); void lspcon_write_infoframe(struct intel_encoder *encoder, diff --git a/drivers/gpu/drm/i915/display/intel_psr.c b/drivers/gpu/drm/i915/display/intel_psr.c index 611cb8d74811..bf9e320c547d 100644 --- a/drivers/gpu/drm/i915/display/intel_psr.c +++ b/drivers/gpu/drm/i915/display/intel_psr.c @@ -905,8 +905,8 @@ static void intel_psr_enable_locked(struct drm_i915_private *dev_priv, const struct drm_connector_state *conn_state) { struct intel_dp *intel_dp = dev_priv->psr.dp; - struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp); - struct intel_encoder *encoder = &intel_dig_port->base; + struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); + struct intel_encoder *encoder = &dig_port->base; u32 val; drm_WARN_ON(&dev_priv->drm, dev_priv->psr.enabled); diff --git a/drivers/gpu/drm/i915/display/intel_vdsc.c b/drivers/gpu/drm/i915/display/intel_vdsc.c index d145fe2bed81..c5735c365659 100644 --- a/drivers/gpu/drm/i915/display/intel_vdsc.c +++ b/drivers/gpu/drm/i915/display/intel_vdsc.c @@ -1045,7 +1045,7 @@ static void intel_dsc_dp_pps_write(struct intel_encoder *encoder, const struct intel_crtc_state *crtc_state) { struct intel_dp *intel_dp = enc_to_intel_dp(encoder); - struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp); + struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); const struct drm_dsc_config *vdsc_cfg = &crtc_state->dsc.config; struct drm_dsc_pps_infoframe dp_dsc_pps_sdp; @@ -1055,9 +1055,9 @@ static void intel_dsc_dp_pps_write(struct intel_encoder *encoder, /* Fill the PPS payload bytes as per DSC spec 1.2 Table 4-1 */ drm_dsc_pps_payload_pack(&dp_dsc_pps_sdp.pps_payload, vdsc_cfg); - intel_dig_port->write_infoframe(encoder, crtc_state, - DP_SDP_PPS, &dp_dsc_pps_sdp, - sizeof(dp_dsc_pps_sdp)); + dig_port->write_infoframe(encoder, crtc_state, + DP_SDP_PPS, &dp_dsc_pps_sdp, + sizeof(dp_dsc_pps_sdp)); } void intel_dsc_enable(struct intel_encoder *encoder, -- cgit v1.2.3 From cb2baf42dcec630acc15f9ff2f8c09c8b70b03da Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Thu, 2 Jul 2020 17:36:22 +0100 Subject: drm/i915/gem: Only revoke the GGTT mmappings on aperture detiling changes Only a GGTT mmapping will use the aperture detiling registers, so on a tiling change for an object, we only need to revoke those mmappings and not the CPU mmappings (which are always linear irrespective of the tiling). Signed-off-by: Chris Wilson Cc: Tvrtko Ursulin Reviewed-by: Tvrtko Ursulin Link: https://patchwork.freedesktop.org/patch/msgid/20200702163623.6402-1-chris@chris-wilson.co.uk --- drivers/gpu/drm/i915/gem/i915_gem_mman.c | 2 +- drivers/gpu/drm/i915/gem/i915_gem_mman.h | 5 ++++- drivers/gpu/drm/i915/gem/i915_gem_tiling.c | 2 +- 3 files changed, 6 insertions(+), 3 deletions(-) diff --git a/drivers/gpu/drm/i915/gem/i915_gem_mman.c b/drivers/gpu/drm/i915/gem/i915_gem_mman.c index fe27c5b344e3..7c2650cfb070 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_mman.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_mman.c @@ -448,7 +448,7 @@ void __i915_gem_object_release_mmap_gtt(struct drm_i915_gem_object *obj) * mapping will then trigger a page fault on the next user access, allowing * fixup by vm_fault_gtt(). */ -static void i915_gem_object_release_mmap_gtt(struct drm_i915_gem_object *obj) +void i915_gem_object_release_mmap_gtt(struct drm_i915_gem_object *obj) { struct drm_i915_private *i915 = to_i915(obj->base.dev); intel_wakeref_t wakeref; diff --git a/drivers/gpu/drm/i915/gem/i915_gem_mman.h b/drivers/gpu/drm/i915/gem/i915_gem_mman.h index 862e01b7cb69..7c5ccdf59359 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_mman.h +++ b/drivers/gpu/drm/i915/gem/i915_gem_mman.h @@ -24,8 +24,11 @@ int i915_gem_dumb_mmap_offset(struct drm_file *file_priv, struct drm_device *dev, u32 handle, u64 *offset); -void __i915_gem_object_release_mmap_gtt(struct drm_i915_gem_object *obj); void i915_gem_object_release_mmap(struct drm_i915_gem_object *obj); + +void __i915_gem_object_release_mmap_gtt(struct drm_i915_gem_object *obj); +void i915_gem_object_release_mmap_gtt(struct drm_i915_gem_object *obj); + void i915_gem_object_release_mmap_offset(struct drm_i915_gem_object *obj); #endif diff --git a/drivers/gpu/drm/i915/gem/i915_gem_tiling.c b/drivers/gpu/drm/i915/gem/i915_gem_tiling.c index 0158e49bf9bb..ff72ee2fd9cd 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_tiling.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_tiling.c @@ -299,7 +299,7 @@ i915_gem_object_set_tiling(struct drm_i915_gem_object *obj, i915_gem_object_unlock(obj); /* Force the fence to be reacquired for GTT access */ - i915_gem_object_release_mmap(obj); + i915_gem_object_release_mmap_gtt(obj); /* Try to preallocate memory required to save swizzling on put-pages */ if (i915_gem_object_needs_bit17_swizzle(obj)) { -- cgit v1.2.3 From db8337853b834f806ab537200d35f0d1909e6b03 Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Thu, 2 Jul 2020 17:36:23 +0100 Subject: drm/i915/gem: Only revoke mmap handlers if active Avoid waking up the device and taking stale locks if we know that the object is not currently mmapped. This is particularly useful as not many object are actually mmapped and so we can destroy them without waking the device up, and gives us a little more freedom of workqueue ordering during shutdown. v2: Pull the release_mmap() into its single user in freeing the objects, where there can not be any race with a concurrent user of the freed object. Or so one hopes! Signed-off-by: Chris Wilson Cc: Tvrtko Ursulin , Reviewed-by: Tvrtko Ursulin , Link: https://patchwork.freedesktop.org/patch/msgid/20200702163623.6402-2-chris@chris-wilson.co.uk --- drivers/gpu/drm/i915/gem/i915_gem_mman.c | 13 ----------- drivers/gpu/drm/i915/gem/i915_gem_mman.h | 2 -- drivers/gpu/drm/i915/gem/i915_gem_object.c | 37 +++++++++++++++++++----------- 3 files changed, 24 insertions(+), 28 deletions(-) diff --git a/drivers/gpu/drm/i915/gem/i915_gem_mman.c b/drivers/gpu/drm/i915/gem/i915_gem_mman.c index 7c2650cfb070..b23368529a40 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_mman.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_mman.c @@ -507,19 +507,6 @@ void i915_gem_object_release_mmap_offset(struct drm_i915_gem_object *obj) spin_unlock(&obj->mmo.lock); } -/** - * i915_gem_object_release_mmap - remove physical page mappings - * @obj: obj in question - * - * Preserve the reservation of the mmapping with the DRM core code, but - * relinquish ownership of the pages back to the system. - */ -void i915_gem_object_release_mmap(struct drm_i915_gem_object *obj) -{ - i915_gem_object_release_mmap_gtt(obj); - i915_gem_object_release_mmap_offset(obj); -} - static struct i915_mmap_offset * lookup_mmo(struct drm_i915_gem_object *obj, enum i915_mmap_type mmap_type) diff --git a/drivers/gpu/drm/i915/gem/i915_gem_mman.h b/drivers/gpu/drm/i915/gem/i915_gem_mman.h index 7c5ccdf59359..efee9e0d2508 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_mman.h +++ b/drivers/gpu/drm/i915/gem/i915_gem_mman.h @@ -24,8 +24,6 @@ int i915_gem_dumb_mmap_offset(struct drm_file *file_priv, struct drm_device *dev, u32 handle, u64 *offset); -void i915_gem_object_release_mmap(struct drm_i915_gem_object *obj); - void __i915_gem_object_release_mmap_gtt(struct drm_i915_gem_object *obj); void i915_gem_object_release_mmap_gtt(struct drm_i915_gem_object *obj); diff --git a/drivers/gpu/drm/i915/gem/i915_gem_object.c b/drivers/gpu/drm/i915/gem/i915_gem_object.c index 6b69191c5543..eb35bdd10c09 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_object.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_object.c @@ -171,14 +171,35 @@ static void __i915_gem_free_object_rcu(struct rcu_head *head) atomic_dec(&i915->mm.free_count); } +static void __i915_gem_object_free_mmaps(struct drm_i915_gem_object *obj) +{ + /* Skip serialisation and waking the device if known to be not used. */ + + if (obj->userfault_count) + i915_gem_object_release_mmap_gtt(obj); + + if (!RB_EMPTY_ROOT(&obj->mmo.offsets)) { + struct i915_mmap_offset *mmo, *mn; + + i915_gem_object_release_mmap_offset(obj); + + rbtree_postorder_for_each_entry_safe(mmo, mn, + &obj->mmo.offsets, + offset) { + drm_vma_offset_remove(obj->base.dev->vma_offset_manager, + &mmo->vma_node); + kfree(mmo); + } + obj->mmo.offsets = RB_ROOT; + } +} + static void __i915_gem_free_objects(struct drm_i915_private *i915, struct llist_node *freed) { struct drm_i915_gem_object *obj, *on; llist_for_each_entry_safe(obj, on, freed, freed) { - struct i915_mmap_offset *mmo, *mn; - trace_i915_gem_object_destroy(obj); if (!list_empty(&obj->vma.list)) { @@ -204,18 +225,8 @@ static void __i915_gem_free_objects(struct drm_i915_private *i915, spin_unlock(&obj->vma.lock); } - i915_gem_object_release_mmap(obj); - - rbtree_postorder_for_each_entry_safe(mmo, mn, - &obj->mmo.offsets, - offset) { - drm_vma_offset_remove(obj->base.dev->vma_offset_manager, - &mmo->vma_node); - kfree(mmo); - } - obj->mmo.offsets = RB_ROOT; + __i915_gem_object_free_mmaps(obj); - GEM_BUG_ON(obj->userfault_count); GEM_BUG_ON(!list_empty(&obj->lut_list)); atomic_set(&obj->mm.pages_pin_count, 0); -- cgit v1.2.3 From a85f22288d4a338f598c9f3e822a89fe71f891bf Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Thu, 2 Jul 2020 09:32:05 +0100 Subject: drm/i915/gem: Drop forced struct_mutex from shrinker_taints_mutex Since we no longer always take struct_mutex around everything, and want the freedom to create GEM objects, actually taking struct_mutex inside the lock creation ends up pulling the mutex inside other looks. Since we don't use generally use struct_mutex, we can relax the tainting. Signed-off-by: Chris Wilson Reviewed-by: Andi Shyti Link: https://patchwork.freedesktop.org/patch/msgid/20200702083225.20044-3-chris@chris-wilson.co.uk --- drivers/gpu/drm/i915/gem/i915_gem_shrinker.c | 11 ----------- 1 file changed, 11 deletions(-) diff --git a/drivers/gpu/drm/i915/gem/i915_gem_shrinker.c b/drivers/gpu/drm/i915/gem/i915_gem_shrinker.c index 5b65ce738b16..1ced1e5d2ec0 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_shrinker.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_shrinker.c @@ -408,26 +408,15 @@ void i915_gem_driver_unregister__shrinker(struct drm_i915_private *i915) void i915_gem_shrinker_taints_mutex(struct drm_i915_private *i915, struct mutex *mutex) { - bool unlock = false; - if (!IS_ENABLED(CONFIG_LOCKDEP)) return; - if (!lockdep_is_held_type(&i915->drm.struct_mutex, -1)) { - mutex_acquire(&i915->drm.struct_mutex.dep_map, - I915_MM_NORMAL, 0, _RET_IP_); - unlock = true; - } - fs_reclaim_acquire(GFP_KERNEL); mutex_acquire(&mutex->dep_map, 0, 0, _RET_IP_); mutex_release(&mutex->dep_map, _RET_IP_); fs_reclaim_release(GFP_KERNEL); - - if (unlock) - mutex_release(&i915->drm.struct_mutex.dep_map, _RET_IP_); } #define obj_to_i915(obj__) to_i915((obj__)->base.dev) -- cgit v1.2.3 From 03fca66b7a36b52da8915341eee388267f6d5b73 Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Thu, 2 Jul 2020 22:10:15 +0100 Subject: drm/i915: Also drop vm.ref along error paths for vma construction Not only do we need to release the vm.ref we acquired for the vma on the duplicate insert branch, but also for the normal error paths, so roll them all into one. Reported-by: Andi Shyti Suggested-by: Andi Shyti Fixes: 2850748ef876 ("drm/i915: Pull i915_vma_pin under the vm->mutex") Signed-off-by: Chris Wilson Cc: Andi Shyti Cc: # v5.5+ Reviewed-by: Andi Shyti Link: https://patchwork.freedesktop.org/patch/msgid/20200702211015.29604-1-chris@chris-wilson.co.uk --- drivers/gpu/drm/i915/i915_vma.c | 16 ++++++---------- 1 file changed, 6 insertions(+), 10 deletions(-) diff --git a/drivers/gpu/drm/i915/i915_vma.c b/drivers/gpu/drm/i915/i915_vma.c index 7fe1f317cd2b..f4e22e256ac6 100644 --- a/drivers/gpu/drm/i915/i915_vma.c +++ b/drivers/gpu/drm/i915/i915_vma.c @@ -104,6 +104,7 @@ vma_create(struct drm_i915_gem_object *obj, struct i915_address_space *vm, const struct i915_ggtt_view *view) { + struct i915_vma *pos = ERR_PTR(-E2BIG); struct i915_vma *vma; struct rb_node *rb, **p; @@ -184,7 +185,6 @@ vma_create(struct drm_i915_gem_object *obj, rb = NULL; p = &obj->vma.tree.rb_node; while (*p) { - struct i915_vma *pos; long cmp; rb = *p; @@ -196,17 +196,12 @@ vma_create(struct drm_i915_gem_object *obj, * and dispose of ours. */ cmp = i915_vma_compare(pos, vm, view); - if (cmp == 0) { - spin_unlock(&obj->vma.lock); - i915_vm_put(vm); - i915_vma_free(vma); - return pos; - } - if (cmp < 0) p = &rb->rb_right; - else + else if (cmp > 0) p = &rb->rb_left; + else + goto err_unlock; } rb_link_node(&vma->obj_node, rb, p); rb_insert_color(&vma->obj_node, &obj->vma.tree); @@ -229,8 +224,9 @@ vma_create(struct drm_i915_gem_object *obj, err_unlock: spin_unlock(&obj->vma.lock); err_vma: + i915_vm_put(vm); i915_vma_free(vma); - return ERR_PTR(-E2BIG); + return pos; } static struct i915_vma * -- cgit v1.2.3 From f7ce8639f6ff7f9445cb18114282371a41ceb840 Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Fri, 3 Jul 2020 01:43:06 +0100 Subject: drm/i915/gem: Split the context's obj:vma lut into its own mutex Rather than reuse the common ctx->mutex for locking the execbuffer LUT, split it into its own lock to avoid being taken [as part of ctx->mutex] at inappropriate times. In particular to avoid the inversion from taking the timeline->mutex for the whole execbuf submission in the next patch. Signed-off-by: Chris Wilson Reviewed-by: Andi Shyti Link: https://patchwork.freedesktop.org/patch/msgid/20200703004306.11117-1-chris@chris-wilson.co.uk --- drivers/gpu/drm/i915/gem/i915_gem_context.c | 11 +++++++---- drivers/gpu/drm/i915/gem/i915_gem_context_types.h | 1 + drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c | 19 +++++++++++++------ drivers/gpu/drm/i915/gem/i915_gem_object.c | 4 ++-- drivers/gpu/drm/i915/gem/selftests/mock_context.c | 4 +++- 5 files changed, 26 insertions(+), 13 deletions(-) diff --git a/drivers/gpu/drm/i915/gem/i915_gem_context.c b/drivers/gpu/drm/i915/gem/i915_gem_context.c index 6675447a47b9..41784df51e58 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_context.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_context.c @@ -101,8 +101,7 @@ static void lut_close(struct i915_gem_context *ctx) struct radix_tree_iter iter; void __rcu **slot; - lockdep_assert_held(&ctx->mutex); - + mutex_lock(&ctx->lut_mutex); rcu_read_lock(); radix_tree_for_each_slot(slot, &ctx->handles_vma, &iter, 0) { struct i915_vma *vma = rcu_dereference_raw(*slot); @@ -135,6 +134,7 @@ static void lut_close(struct i915_gem_context *ctx) i915_gem_object_put(obj); } rcu_read_unlock(); + mutex_unlock(&ctx->lut_mutex); } static struct intel_context * @@ -342,6 +342,7 @@ static void i915_gem_context_free(struct i915_gem_context *ctx) spin_unlock(&ctx->i915->gem.contexts.lock); mutex_destroy(&ctx->engines_mutex); + mutex_destroy(&ctx->lut_mutex); if (ctx->timeline) intel_timeline_put(ctx->timeline); @@ -725,6 +726,7 @@ __create_context(struct drm_i915_private *i915) RCU_INIT_POINTER(ctx->engines, e); INIT_RADIX_TREE(&ctx->handles_vma, GFP_KERNEL); + mutex_init(&ctx->lut_mutex); /* NB: Mark all slices as needing a remap so that when the context first * loads it will restore whatever remap state already exists. If there @@ -1312,11 +1314,11 @@ static int set_ppgtt(struct drm_i915_file_private *file_priv, if (vm == rcu_access_pointer(ctx->vm)) goto unlock; + old = __set_ppgtt(ctx, vm); + /* Teardown the existing obj:vma cache, it will have to be rebuilt. */ lut_close(ctx); - old = __set_ppgtt(ctx, vm); - /* * We need to flush any requests using the current ppgtt before * we release it as the requests do not hold a reference themselves, @@ -1330,6 +1332,7 @@ static int set_ppgtt(struct drm_i915_file_private *file_priv, if (err) { i915_vm_close(__set_ppgtt(ctx, old)); i915_vm_close(old); + lut_close(ctx); /* force a rebuild of the old obj:vma cache */ } unlock: diff --git a/drivers/gpu/drm/i915/gem/i915_gem_context_types.h b/drivers/gpu/drm/i915/gem/i915_gem_context_types.h index 28760bd03265..ae14ca24a11f 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_context_types.h +++ b/drivers/gpu/drm/i915/gem/i915_gem_context_types.h @@ -170,6 +170,7 @@ struct i915_gem_context { * per vm, which may be one per context or shared with the global GTT) */ struct radix_tree_root handles_vma; + struct mutex lut_mutex; /** * @name: arbitrary name, used for user debug diff --git a/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c index b4862afaaf28..dd15a799f9d6 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c @@ -782,10 +782,15 @@ static int __eb_add_lut(struct i915_execbuffer *eb, /* Check that the context hasn't been closed in the meantime */ err = -EINTR; - if (!mutex_lock_interruptible(&ctx->mutex)) { - err = -ENOENT; - if (likely(!i915_gem_context_is_closed(ctx))) + if (!mutex_lock_interruptible(&ctx->lut_mutex)) { + struct i915_address_space *vm = rcu_access_pointer(ctx->vm); + + if (unlikely(vm && vma->vm != vm)) + err = -EAGAIN; /* user racing with ctx set-vm */ + else if (likely(!i915_gem_context_is_closed(ctx))) err = radix_tree_insert(&ctx->handles_vma, handle, vma); + else + err = -ENOENT; if (err == 0) { /* And nor has this handle */ struct drm_i915_gem_object *obj = vma->obj; @@ -798,7 +803,7 @@ static int __eb_add_lut(struct i915_execbuffer *eb, } spin_unlock(&obj->lut_lock); } - mutex_unlock(&ctx->mutex); + mutex_unlock(&ctx->lut_mutex); } if (unlikely(err)) goto err; @@ -814,6 +819,8 @@ err: static struct i915_vma *eb_lookup_vma(struct i915_execbuffer *eb, u32 handle) { + struct i915_address_space *vm = eb->context->vm; + do { struct drm_i915_gem_object *obj; struct i915_vma *vma; @@ -821,7 +828,7 @@ static struct i915_vma *eb_lookup_vma(struct i915_execbuffer *eb, u32 handle) rcu_read_lock(); vma = radix_tree_lookup(&eb->gem_context->handles_vma, handle); - if (likely(vma)) + if (likely(vma && vma->vm == vm)) vma = i915_vma_tryget(vma); rcu_read_unlock(); if (likely(vma)) @@ -831,7 +838,7 @@ static struct i915_vma *eb_lookup_vma(struct i915_execbuffer *eb, u32 handle) if (unlikely(!obj)) return ERR_PTR(-ENOENT); - vma = i915_vma_instance(obj, eb->context->vm, NULL); + vma = i915_vma_instance(obj, vm, NULL); if (IS_ERR(vma)) { i915_gem_object_put(obj); return vma; diff --git a/drivers/gpu/drm/i915/gem/i915_gem_object.c b/drivers/gpu/drm/i915/gem/i915_gem_object.c index eb35bdd10c09..c8421fd9d2dc 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_object.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_object.c @@ -143,14 +143,14 @@ void i915_gem_close_object(struct drm_gem_object *gem, struct drm_file *file) * vma, in the same fd namespace, by virtue of flink/open. */ - mutex_lock(&ctx->mutex); + mutex_lock(&ctx->lut_mutex); vma = radix_tree_delete(&ctx->handles_vma, lut->handle); if (vma) { GEM_BUG_ON(vma->obj != obj); GEM_BUG_ON(!atomic_read(&vma->open_count)); i915_vma_close(vma); } - mutex_unlock(&ctx->mutex); + mutex_unlock(&ctx->lut_mutex); i915_gem_context_put(lut->ctx); i915_lut_handle_free(lut); diff --git a/drivers/gpu/drm/i915/gem/selftests/mock_context.c b/drivers/gpu/drm/i915/gem/selftests/mock_context.c index aa0d06cf1903..51b5a3421b40 100644 --- a/drivers/gpu/drm/i915/gem/selftests/mock_context.c +++ b/drivers/gpu/drm/i915/gem/selftests/mock_context.c @@ -23,6 +23,8 @@ mock_context(struct drm_i915_private *i915, INIT_LIST_HEAD(&ctx->link); ctx->i915 = i915; + mutex_init(&ctx->mutex); + spin_lock_init(&ctx->stale.lock); INIT_LIST_HEAD(&ctx->stale.engines); @@ -35,7 +37,7 @@ mock_context(struct drm_i915_private *i915, RCU_INIT_POINTER(ctx->engines, e); INIT_RADIX_TREE(&ctx->handles_vma, GFP_KERNEL); - mutex_init(&ctx->mutex); + mutex_init(&ctx->lut_mutex); if (name) { struct i915_ppgtt *ppgtt; -- cgit v1.2.3 From 1fe541ccef9ffaf654a29d1bfd9206a2c350d11b Mon Sep 17 00:00:00 2001 From: Lucas De Marchi Date: Thu, 2 Jul 2020 13:07:14 -0700 Subject: drm/i915: do not read swizzle info if unavailable Since gen8 we don't use swizzle anymore. Don't dump registers related to it: registers may or may not be there. v2: pull the rest of driver state reporting before the read out (Chris) Cc: Matt Roper Signed-off-by: Lucas De Marchi Reviewed-by: Chris Wilson Signed-off-by: Chris Wilson Link: https://patchwork.freedesktop.org/patch/msgid/20200702200714.1278-1-lucas.demarchi@intel.com --- drivers/gpu/drm/i915/i915_debugfs.c | 14 +++++++++----- 1 file changed, 9 insertions(+), 5 deletions(-) diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c index 9ca94a435b75..94ed442910d6 100644 --- a/drivers/gpu/drm/i915/i915_debugfs.c +++ b/drivers/gpu/drm/i915/i915_debugfs.c @@ -1138,13 +1138,20 @@ static int i915_swizzle_info(struct seq_file *m, void *data) struct intel_uncore *uncore = &dev_priv->uncore; intel_wakeref_t wakeref; - wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm); - seq_printf(m, "bit6 swizzle for X-tiling = %s\n", swizzle_string(dev_priv->ggtt.bit_6_swizzle_x)); seq_printf(m, "bit6 swizzle for Y-tiling = %s\n", swizzle_string(dev_priv->ggtt.bit_6_swizzle_y)); + if (dev_priv->quirks & QUIRK_PIN_SWIZZLED_PAGES) + seq_puts(m, "L-shaped memory detected\n"); + + /* On BDW+, swizzling is not used. See detect_bit_6_swizzle() */ + if (INTEL_GEN(dev_priv) >= 8 || IS_VALLEYVIEW(dev_priv)) + return 0; + + wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm); + if (IS_GEN_RANGE(dev_priv, 3, 4)) { seq_printf(m, "DDC = 0x%08x\n", intel_uncore_read(uncore, DCC)); @@ -1173,9 +1180,6 @@ static int i915_swizzle_info(struct seq_file *m, void *data) intel_uncore_read(uncore, DISP_ARB_CTL)); } - if (dev_priv->quirks & QUIRK_PIN_SWIZZLED_PAGES) - seq_puts(m, "L-shaped memory detected\n"); - intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref); return 0; -- cgit v1.2.3 From ba06216d00275f07dc7750bd21e9c3385e1188aa Mon Sep 17 00:00:00 2001 From: Michał Winiarski Date: Wed, 1 Jul 2020 16:27:52 +0200 Subject: drm/i915/guc: Expand guc_info debugfs with more information MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit The information about platform/driver/user view of GuC firmware usage currently requires user to either go through kernel log or parse the combination of "enable_guc" modparam and various debugfs entries. Let's keep things simple and add a "supported/used/wanted" matrix (already used internally by i915) in guc_info debugfs. Signed-off-by: Michał Winiarski Cc: Daniele Ceraolo Spurio Cc: Lukasz Fiedorowicz Cc: Michal Wajdeczko Reviewed-by: Lukasz Fiedorowicz Signed-off-by: Chris Wilson Link: https://patchwork.freedesktop.org/patch/msgid/20200701142752.419878-1-michal@hardline.pl --- drivers/gpu/drm/i915/gt/uc/intel_guc.c | 23 ++++++++++++++++------- 1 file changed, 16 insertions(+), 7 deletions(-) diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc.c b/drivers/gpu/drm/i915/gt/uc/intel_guc.c index 861657897c0f..446a41946f56 100644 --- a/drivers/gpu/drm/i915/gt/uc/intel_guc.c +++ b/drivers/gpu/drm/i915/gt/uc/intel_guc.c @@ -733,19 +733,28 @@ int intel_guc_allocate_and_map_vma(struct intel_guc *guc, u32 size, */ void intel_guc_load_status(struct intel_guc *guc, struct drm_printer *p) { + struct intel_uc *uc = container_of(guc, struct intel_uc, guc); struct intel_gt *gt = guc_to_gt(guc); struct intel_uncore *uncore = gt->uncore; intel_wakeref_t wakeref; - if (!intel_guc_is_supported(guc)) { - drm_printf(p, "GuC not supported\n"); + drm_printf(p, "[guc] supported:%s wanted:%s used:%s\n", + yesno(intel_uc_supports_guc(uc)), + yesno(intel_uc_wants_guc(uc)), + yesno(intel_uc_uses_guc(uc))); + drm_printf(p, "[huc] supported:%s wanted:%s used:%s\n", + yesno(intel_uc_supports_huc(uc)), + yesno(intel_uc_wants_huc(uc)), + yesno(intel_uc_uses_huc(uc))); + drm_printf(p, "[submission] supported:%s wanted:%s used:%s\n", + yesno(intel_uc_supports_guc_submission(uc)), + yesno(intel_uc_wants_guc_submission(uc)), + yesno(intel_uc_uses_guc_submission(uc))); + + if (!intel_guc_is_supported(guc) || !intel_guc_is_wanted(guc)) return; - } - if (!intel_guc_is_wanted(guc)) { - drm_printf(p, "GuC disabled\n"); - return; - } + drm_puts(p, "\n"); intel_uc_fw_dump(&guc->fw, p); -- cgit v1.2.3 From 3fe4818e5dbcad466d4f70dba8790a9fc978b9c5 Mon Sep 17 00:00:00 2001 From: Stanislav Lisovskiy Date: Thu, 2 Jul 2020 12:15:26 +0300 Subject: drm/i915/tgl: Clamp min_cdclk to max_cdclk_freq to unblock 8K MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit We still need "Bump up CDCLK" workaround otherwise getting underruns - however currently it blocks 8K as CDCLK = Pixel rate, in 8K case would require CDCLK to be around 1 Ghz which is not possible. v2: - Convert to expression(max(min_cdclk, min(pixel_rate, max_cdclk)) (Ville Syrjälä) - Use type specific min_t, max_t(Ville Syrjälä) Fixes: 46d53e271cea ("Revert "drm/i915: Remove unneeded hack now for CDCLK"") Signed-off-by: Stanislav Lisovskiy Reviewed-by: Manasi Navare Signed-off-by: Jani Nikula Link: https://patchwork.freedesktop.org/patch/msgid/20200702091526.10096-1-stanislav.lisovskiy@intel.com --- drivers/gpu/drm/i915/display/intel_cdclk.c | 11 +++++++++-- 1 file changed, 9 insertions(+), 2 deletions(-) diff --git a/drivers/gpu/drm/i915/display/intel_cdclk.c b/drivers/gpu/drm/i915/display/intel_cdclk.c index 45f7f33d1144..bb91dace304a 100644 --- a/drivers/gpu/drm/i915/display/intel_cdclk.c +++ b/drivers/gpu/drm/i915/display/intel_cdclk.c @@ -2080,8 +2080,15 @@ int intel_crtc_compute_min_cdclk(const struct intel_crtc_state *crtc_state) * Explicitly stating here that this seems to be currently * rather a Hack, than final solution. */ - if (IS_TIGERLAKE(dev_priv)) - min_cdclk = max(min_cdclk, (int)crtc_state->pixel_rate); + if (IS_TIGERLAKE(dev_priv)) { + /* + * Clamp to max_cdclk_freq in case pixel rate is higher, + * in order not to break an 8K, but still leave W/A at place. + */ + min_cdclk = max_t(int, min_cdclk, + min_t(int, crtc_state->pixel_rate, + dev_priv->max_cdclk_freq)); + } if (min_cdclk > dev_priv->max_cdclk_freq) { drm_dbg_kms(&dev_priv->drm, -- cgit v1.2.3 From e5ec1f95486957f9668da4e1f2b1666c085d307f Mon Sep 17 00:00:00 2001 From: Ville Syrjälä Date: Thu, 2 Jul 2020 18:37:20 +0300 Subject: drm/i915/fbc: Use the correct plane stride MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Consult the actual plane stride instead of the fb stride. The two will disagree when we remap the gtt. The plane stride is what the hw will be fed so that's what we should look at for the FBC retrictions/cfb allocation. Since we no longer require a fence we are going to attempt using FBC with remapping, and so we should look at correct stride. With 90/270 degree rotation the plane stride is stored in units of pixels, so we need to conver it to bytes for the purposes of calculating the cfb stride. Not entirely sure if this matches the hw behaviour though. Need to reverse engineer that at some point... We also need to reorder the pixel format check vs. stride check to avoid triggering a spurious WARN(stride & 63) with cpp==1 and plane stride==32. v2: Try to deal with rotated stride and related WARN Cc: José Roberto de Souza Fixes: 691f7ba58d52 ("drm/i915/display/fbc: Make fences a nice-to-have for GEN9+") Signed-off-by: Ville Syrjälä Link: https://patchwork.freedesktop.org/patch/msgid/20200702153723.24327-2-ville.syrjala@linux.intel.com Reviewed-by: José Roberto de Souza --- drivers/gpu/drm/i915/display/intel_fbc.c | 16 ++++++++++------ 1 file changed, 10 insertions(+), 6 deletions(-) diff --git a/drivers/gpu/drm/i915/display/intel_fbc.c b/drivers/gpu/drm/i915/display/intel_fbc.c index 69a0682ddb6a..d30c2a389294 100644 --- a/drivers/gpu/drm/i915/display/intel_fbc.c +++ b/drivers/gpu/drm/i915/display/intel_fbc.c @@ -695,9 +695,13 @@ static void intel_fbc_update_state_cache(struct intel_crtc *crtc, cache->plane.pixel_blend_mode = plane_state->hw.pixel_blend_mode; cache->fb.format = fb->format; - cache->fb.stride = fb->pitches[0]; cache->fb.modifier = fb->modifier; + /* FIXME is this correct? */ + cache->fb.stride = plane_state->color_plane[0].stride; + if (drm_rotation_90_or_270(plane_state->hw.rotation)) + cache->fb.stride *= fb->format->cpp[0]; + /* FBC1 compression interval: arbitrary choice of 1 second */ cache->interval = drm_mode_vrefresh(&crtc_state->hw.adjusted_mode); @@ -797,6 +801,11 @@ static bool intel_fbc_can_activate(struct intel_crtc *crtc) return false; } + if (!pixel_format_is_valid(dev_priv, cache->fb.format->format)) { + fbc->no_fbc_reason = "pixel format is invalid"; + return false; + } + if (!rotation_is_valid(dev_priv, cache->fb.format->format, cache->plane.rotation)) { fbc->no_fbc_reason = "rotation unsupported"; @@ -813,11 +822,6 @@ static bool intel_fbc_can_activate(struct intel_crtc *crtc) return false; } - if (!pixel_format_is_valid(dev_priv, cache->fb.format->format)) { - fbc->no_fbc_reason = "pixel format is invalid"; - return false; - } - if (cache->plane.pixel_blend_mode != DRM_MODE_BLEND_PIXEL_NONE && cache->fb.format->has_alpha) { fbc->no_fbc_reason = "per-pixel alpha blending is incompatible with FBC"; -- cgit v1.2.3 From 2a4d632cc03e5a881a1e44bbc9e2f0248ea434d5 Mon Sep 17 00:00:00 2001 From: Ville Syrjälä Date: Thu, 2 Jul 2020 18:37:21 +0300 Subject: drm/i915/fbc: Fix nuke for pre-snb platforms MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit The MSG_FBC_REND_STATE register only exists on snb+. For older platforms (would also work for snb+) we can simply rewite DSPSURF to trigger a flip nuke. While generally RMW is considered harmful we'll use it here for simplicity. And since FBC doesn't exist in i830 we don't have to worry about the DSPSURF double buffering hardware fails present on that platform. Signed-off-by: Ville Syrjälä Link: https://patchwork.freedesktop.org/patch/msgid/20200702153723.24327-3-ville.syrjala@linux.intel.com Reviewed-by: José Roberto de Souza --- drivers/gpu/drm/i915/display/intel_fbc.c | 34 +++++++++++++++++++++++++++++++- 1 file changed, 33 insertions(+), 1 deletion(-) diff --git a/drivers/gpu/drm/i915/display/intel_fbc.c b/drivers/gpu/drm/i915/display/intel_fbc.c index d30c2a389294..036546ce8db8 100644 --- a/drivers/gpu/drm/i915/display/intel_fbc.c +++ b/drivers/gpu/drm/i915/display/intel_fbc.c @@ -187,8 +187,30 @@ static bool g4x_fbc_is_active(struct drm_i915_private *dev_priv) return intel_de_read(dev_priv, DPFC_CONTROL) & DPFC_CTL_EN; } +static void i8xx_fbc_recompress(struct drm_i915_private *dev_priv) +{ + struct intel_fbc_reg_params *params = &dev_priv->fbc.params; + enum i9xx_plane_id i9xx_plane = params->crtc.i9xx_plane; + + spin_lock_irq(&dev_priv->uncore.lock); + intel_de_write_fw(dev_priv, DSPADDR(i9xx_plane), + intel_de_read_fw(dev_priv, DSPADDR(i9xx_plane))); + spin_unlock_irq(&dev_priv->uncore.lock); +} + +static void i965_fbc_recompress(struct drm_i915_private *dev_priv) +{ + struct intel_fbc_reg_params *params = &dev_priv->fbc.params; + enum i9xx_plane_id i9xx_plane = params->crtc.i9xx_plane; + + spin_lock_irq(&dev_priv->uncore.lock); + intel_de_write_fw(dev_priv, DSPSURF(i9xx_plane), + intel_de_read_fw(dev_priv, DSPSURF(i9xx_plane))); + spin_unlock_irq(&dev_priv->uncore.lock); +} + /* This function forces a CFB recompression through the nuke operation. */ -static void intel_fbc_recompress(struct drm_i915_private *dev_priv) +static void snb_fbc_recompress(struct drm_i915_private *dev_priv) { struct intel_fbc *fbc = &dev_priv->fbc; @@ -198,6 +220,16 @@ static void intel_fbc_recompress(struct drm_i915_private *dev_priv) intel_de_posting_read(dev_priv, MSG_FBC_REND_STATE); } +static void intel_fbc_recompress(struct drm_i915_private *dev_priv) +{ + if (INTEL_GEN(dev_priv) >= 6) + snb_fbc_recompress(dev_priv); + else if (INTEL_GEN(dev_priv) >= 4) + i965_fbc_recompress(dev_priv); + else + i8xx_fbc_recompress(dev_priv); +} + static void ilk_fbc_activate(struct drm_i915_private *dev_priv) { struct intel_fbc_reg_params *params = &dev_priv->fbc.params; -- cgit v1.2.3 From ddf08d320de6f210adb4f17c8cb91b7543dd99db Mon Sep 17 00:00:00 2001 From: Ville Syrjälä Date: Thu, 2 Jul 2020 18:37:22 +0300 Subject: drm/i915/fbc: Enable fbc on i865 MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Unlike all the other pre-snb desktop platforms i865 actually supports FBC. Let's enable it. Quote from the spec: "DevSDG provides the same Run-Length Encoded Frame Buffer Compression (RLEFBC) function as exists in DevMGM." As i865 only has the one pipe we want to skip massaging the plane<->pipe assignment aimed at getting FBC+LVDS working on the mobile platforms. Signed-off-by: Ville Syrjälä Link: https://patchwork.freedesktop.org/patch/msgid/20200702153723.24327-4-ville.syrjala@linux.intel.com Reviewed-by: José Roberto de Souza --- drivers/gpu/drm/i915/display/intel_display.c | 3 ++- drivers/gpu/drm/i915/i915_pci.c | 1 + 2 files changed, 3 insertions(+), 1 deletion(-) diff --git a/drivers/gpu/drm/i915/display/intel_display.c b/drivers/gpu/drm/i915/display/intel_display.c index 98a59e014b75..dff7c17f3d2b 100644 --- a/drivers/gpu/drm/i915/display/intel_display.c +++ b/drivers/gpu/drm/i915/display/intel_display.c @@ -16332,7 +16332,8 @@ intel_primary_plane_create(struct drm_i915_private *dev_priv, enum pipe pipe) * On gen2/3 only plane A can do FBC, but the panel fitter and LVDS * port is hooked to pipe B. Hence we want plane A feeding pipe B. */ - if (HAS_FBC(dev_priv) && INTEL_GEN(dev_priv) < 4) + if (HAS_FBC(dev_priv) && INTEL_GEN(dev_priv) < 4 && + INTEL_NUM_PIPES(dev_priv) == 2) plane->i9xx_plane = (enum i9xx_plane_id) !pipe; else plane->i9xx_plane = (enum i9xx_plane_id) pipe; diff --git a/drivers/gpu/drm/i915/i915_pci.c b/drivers/gpu/drm/i915/i915_pci.c index e5fdf17cd9cd..0be3b66ce666 100644 --- a/drivers/gpu/drm/i915/i915_pci.c +++ b/drivers/gpu/drm/i915/i915_pci.c @@ -217,6 +217,7 @@ static const struct intel_device_info i85x_info = { static const struct intel_device_info i865g_info = { I845_FEATURES, PLATFORM(INTEL_I865G), + .display.has_fbc = 1, }; #define GEN3_FEATURES \ -- cgit v1.2.3 From 5cecf5070fd83796ee88189bdd60f9545dfb7d35 Mon Sep 17 00:00:00 2001 From: Ville Syrjälä Date: Thu, 2 Jul 2020 18:37:23 +0300 Subject: drm/i915/fbc: Allow FBC to recompress after a 3D workload on i85x/i865 MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Normally i85x/i865 3D activity will block FBC until a 2D blit occurs. I suppose this was meant to avoid recompression while 3D activity is still going on but the frame hasn't yet been presented. Unfortunately that also means that a page flipped 3D workload will permanently block FBC even if it only renders a single frame and then does nothing. Since we are using software render tracking anyway we might as well flip the chicken bit so that 3D does not block FBC. This will avoid the permament FBC blockage in the aforemention use case, but thanks to the software tracking the compressor will not disturb 3D rendering activity. Signed-off-by: Ville Syrjälä Link: https://patchwork.freedesktop.org/patch/msgid/20200702153723.24327-5-ville.syrjala@linux.intel.com Reviewed-by: José Roberto de Souza --- drivers/gpu/drm/i915/i915_reg.h | 1 + drivers/gpu/drm/i915/intel_pm.c | 10 ++++++++++ 2 files changed, 11 insertions(+) diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h index 9d6536afc94b..03590d2d75f7 100644 --- a/drivers/gpu/drm/i915/i915_reg.h +++ b/drivers/gpu/drm/i915/i915_reg.h @@ -2827,6 +2827,7 @@ static inline bool i915_mmio_reg_valid(i915_reg_t reg) #define VLV_GU_CTL0 _MMIO(VLV_DISPLAY_BASE + 0x2030) #define VLV_GU_CTL1 _MMIO(VLV_DISPLAY_BASE + 0x2034) #define SCPD0 _MMIO(0x209c) /* 915+ only */ +#define SCPD_FBC_IGNORE_3D (1 << 6) #define CSTATE_RENDER_CLOCK_GATE_DISABLE (1 << 5) #define GEN2_IER _MMIO(0x20a0) #define GEN2_IIR _MMIO(0x20a4) diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c index 565a2b9da3b3..2d980b83a1f1 100644 --- a/drivers/gpu/drm/i915/intel_pm.c +++ b/drivers/gpu/drm/i915/intel_pm.c @@ -7471,6 +7471,16 @@ static void i85x_init_clock_gating(struct drm_i915_private *dev_priv) I915_WRITE(MEM_MODE, _MASKED_BIT_ENABLE(MEM_DISPLAY_TRICKLE_FEED_DISABLE)); + + /* + * Have FBC ignore 3D activity since we use software + * render tracking, and otherwise a pure 3D workload + * (even if it just renders a single frame and then does + * abosultely nothing) would not allow FBC to recompress + * until a 2D blit occurs. + */ + I915_WRITE(SCPD0, + _MASKED_BIT_ENABLE(SCPD_FBC_IGNORE_3D)); } static void i830_init_clock_gating(struct drm_i915_private *dev_priv) -- cgit v1.2.3 From 12b07256c22399e10590c994f3e93970b7600053 Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Fri, 3 Jul 2020 11:25:19 +0100 Subject: drm/i915: Export ppgtt_bind_vma Reuse the ppgtt_bind_vma() for aliasing_ppgtt_bind_vma() so we can reduce some code near-duplication. The catch is that we need to then pass along the i915_address_space and not rely on vma->vm, as they differ with the aliasing-ppgtt. Signed-off-by: Chris Wilson Reviewed-by: Andi Shyti Link: https://patchwork.freedesktop.org/patch/msgid/20200703102519.26539-1-chris@chris-wilson.co.uk --- drivers/gpu/drm/i915/gem/i915_gem_client_blt.c | 9 ++--- drivers/gpu/drm/i915/gt/gen6_ppgtt.c | 7 ++-- drivers/gpu/drm/i915/gt/intel_ggtt.c | 49 +++++++++----------------- drivers/gpu/drm/i915/gt/intel_gtt.h | 13 +++++-- drivers/gpu/drm/i915/gt/intel_ppgtt.c | 19 +++++----- drivers/gpu/drm/i915/i915_vma.c | 8 ++--- drivers/gpu/drm/i915/i915_vma_types.h | 1 - drivers/gpu/drm/i915/selftests/mock_gtt.c | 12 ++++--- 8 files changed, 58 insertions(+), 60 deletions(-) diff --git a/drivers/gpu/drm/i915/gem/i915_gem_client_blt.c b/drivers/gpu/drm/i915/gem/i915_gem_client_blt.c index d3a86a4d5c04..278664f831e7 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_client_blt.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_client_blt.c @@ -32,16 +32,17 @@ static void vma_clear_pages(struct i915_vma *vma) vma->pages = NULL; } -static int vma_bind(struct i915_vma *vma, +static int vma_bind(struct i915_address_space *vm, + struct i915_vma *vma, enum i915_cache_level cache_level, u32 flags) { - return vma->vm->vma_ops.bind_vma(vma, cache_level, flags); + return vm->vma_ops.bind_vma(vm, vma, cache_level, flags); } -static void vma_unbind(struct i915_vma *vma) +static void vma_unbind(struct i915_address_space *vm, struct i915_vma *vma) { - vma->vm->vma_ops.unbind_vma(vma); + vm->vma_ops.unbind_vma(vm, vma); } static const struct i915_vma_ops proxy_vma_ops = { diff --git a/drivers/gpu/drm/i915/gt/gen6_ppgtt.c b/drivers/gpu/drm/i915/gt/gen6_ppgtt.c index f4fec7eb4064..05497b50103f 100644 --- a/drivers/gpu/drm/i915/gt/gen6_ppgtt.c +++ b/drivers/gpu/drm/i915/gt/gen6_ppgtt.c @@ -299,11 +299,12 @@ static void pd_vma_clear_pages(struct i915_vma *vma) vma->pages = NULL; } -static int pd_vma_bind(struct i915_vma *vma, +static int pd_vma_bind(struct i915_address_space *vm, + struct i915_vma *vma, enum i915_cache_level cache_level, u32 unused) { - struct i915_ggtt *ggtt = i915_vm_to_ggtt(vma->vm); + struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm); struct gen6_ppgtt *ppgtt = vma->private; u32 ggtt_offset = i915_ggtt_offset(vma) / I915_GTT_PAGE_SIZE; @@ -314,7 +315,7 @@ static int pd_vma_bind(struct i915_vma *vma, return 0; } -static void pd_vma_unbind(struct i915_vma *vma) +static void pd_vma_unbind(struct i915_address_space *vm, struct i915_vma *vma) { struct gen6_ppgtt *ppgtt = vma->private; struct i915_page_directory * const pd = ppgtt->base.pd; diff --git a/drivers/gpu/drm/i915/gt/intel_ggtt.c b/drivers/gpu/drm/i915/gt/intel_ggtt.c index 323c328d444a..62979ea591f0 100644 --- a/drivers/gpu/drm/i915/gt/intel_ggtt.c +++ b/drivers/gpu/drm/i915/gt/intel_ggtt.c @@ -436,7 +436,8 @@ static void i915_ggtt_clear_range(struct i915_address_space *vm, intel_gtt_clear_range(start >> PAGE_SHIFT, length >> PAGE_SHIFT); } -static int ggtt_bind_vma(struct i915_vma *vma, +static int ggtt_bind_vma(struct i915_address_space *vm, + struct i915_vma *vma, enum i915_cache_level cache_level, u32 flags) { @@ -451,15 +452,15 @@ static int ggtt_bind_vma(struct i915_vma *vma, if (i915_gem_object_is_readonly(obj)) pte_flags |= PTE_READ_ONLY; - vma->vm->insert_entries(vma->vm, vma, cache_level, pte_flags); + vm->insert_entries(vm, vma, cache_level, pte_flags); vma->page_sizes.gtt = I915_GTT_PAGE_SIZE; return 0; } -static void ggtt_unbind_vma(struct i915_vma *vma) +static void ggtt_unbind_vma(struct i915_address_space *vm, struct i915_vma *vma) { - vma->vm->clear_range(vma->vm, vma->node.start, vma->size); + vm->clear_range(vm, vma->node.start, vma->size); } static int ggtt_reserve_guc_top(struct i915_ggtt *ggtt) @@ -567,7 +568,8 @@ err: return ret; } -static int aliasing_gtt_bind_vma(struct i915_vma *vma, +static int aliasing_gtt_bind_vma(struct i915_address_space *vm, + struct i915_vma *vma, enum i915_cache_level cache_level, u32 flags) { @@ -580,44 +582,27 @@ static int aliasing_gtt_bind_vma(struct i915_vma *vma, pte_flags |= PTE_READ_ONLY; if (flags & I915_VMA_LOCAL_BIND) { - struct i915_ppgtt *alias = i915_vm_to_ggtt(vma->vm)->alias; + struct i915_ppgtt *alias = i915_vm_to_ggtt(vm)->alias; - if (flags & I915_VMA_ALLOC) { - ret = alias->vm.allocate_va_range(&alias->vm, - vma->node.start, - vma->size); - if (ret) - return ret; - - set_bit(I915_VMA_ALLOC_BIT, __i915_vma_flags(vma)); - } - - GEM_BUG_ON(!test_bit(I915_VMA_ALLOC_BIT, - __i915_vma_flags(vma))); - alias->vm.insert_entries(&alias->vm, vma, - cache_level, pte_flags); + ret = ppgtt_bind_vma(&alias->vm, vma, cache_level, flags); + if (ret) + return ret; } if (flags & I915_VMA_GLOBAL_BIND) - vma->vm->insert_entries(vma->vm, vma, cache_level, pte_flags); + vm->insert_entries(vm, vma, cache_level, pte_flags); return 0; } -static void aliasing_gtt_unbind_vma(struct i915_vma *vma) +static void aliasing_gtt_unbind_vma(struct i915_address_space *vm, + struct i915_vma *vma) { - if (i915_vma_is_bound(vma, I915_VMA_GLOBAL_BIND)) { - struct i915_address_space *vm = vma->vm; - + if (i915_vma_is_bound(vma, I915_VMA_GLOBAL_BIND)) vm->clear_range(vm, vma->node.start, vma->size); - } - - if (test_and_clear_bit(I915_VMA_ALLOC_BIT, __i915_vma_flags(vma))) { - struct i915_address_space *vm = - &i915_vm_to_ggtt(vma->vm)->alias->vm; - vm->clear_range(vm, vma->node.start, vma->size); - } + if (i915_vma_is_bound(vma, I915_VMA_LOCAL_BIND)) + ppgtt_unbind_vma(&i915_vm_to_ggtt(vm)->alias->vm, vma); } static int init_aliasing_ppgtt(struct i915_ggtt *ggtt) diff --git a/drivers/gpu/drm/i915/gt/intel_gtt.h b/drivers/gpu/drm/i915/gt/intel_gtt.h index d93ebdf3fa0e..f2b75078e05f 100644 --- a/drivers/gpu/drm/i915/gt/intel_gtt.h +++ b/drivers/gpu/drm/i915/gt/intel_gtt.h @@ -198,14 +198,16 @@ struct intel_gt; struct i915_vma_ops { /* Map an object into an address space with the given cache flags. */ - int (*bind_vma)(struct i915_vma *vma, + int (*bind_vma)(struct i915_address_space *vm, + struct i915_vma *vma, enum i915_cache_level cache_level, u32 flags); /* * Unmap an object from an address space. This usually consists of * setting the valid PTE entries to a reserved scratch page. */ - void (*unbind_vma)(struct i915_vma *vma); + void (*unbind_vma)(struct i915_address_space *vm, + struct i915_vma *vma); int (*set_pages)(struct i915_vma *vma); void (*clear_pages)(struct i915_vma *vma); @@ -566,6 +568,13 @@ int ggtt_set_pages(struct i915_vma *vma); int ppgtt_set_pages(struct i915_vma *vma); void clear_pages(struct i915_vma *vma); +int ppgtt_bind_vma(struct i915_address_space *vm, + struct i915_vma *vma, + enum i915_cache_level cache_level, + u32 flags); +void ppgtt_unbind_vma(struct i915_address_space *vm, + struct i915_vma *vma); + void gtt_write_workarounds(struct intel_gt *gt); void setup_private_pat(struct intel_uncore *uncore); diff --git a/drivers/gpu/drm/i915/gt/intel_ppgtt.c b/drivers/gpu/drm/i915/gt/intel_ppgtt.c index f86f7e68ce5e..f0862e924d11 100644 --- a/drivers/gpu/drm/i915/gt/intel_ppgtt.c +++ b/drivers/gpu/drm/i915/gt/intel_ppgtt.c @@ -155,16 +155,16 @@ struct i915_ppgtt *i915_ppgtt_create(struct intel_gt *gt) return ppgtt; } -static int ppgtt_bind_vma(struct i915_vma *vma, - enum i915_cache_level cache_level, - u32 flags) +int ppgtt_bind_vma(struct i915_address_space *vm, + struct i915_vma *vma, + enum i915_cache_level cache_level, + u32 flags) { u32 pte_flags; int err; - if (flags & I915_VMA_ALLOC) { - err = vma->vm->allocate_va_range(vma->vm, - vma->node.start, vma->size); + if (!test_bit(I915_VMA_ALLOC_BIT, __i915_vma_flags(vma))) { + err = vm->allocate_va_range(vm, vma->node.start, vma->size); if (err) return err; @@ -176,17 +176,16 @@ static int ppgtt_bind_vma(struct i915_vma *vma, if (i915_gem_object_is_readonly(vma->obj)) pte_flags |= PTE_READ_ONLY; - GEM_BUG_ON(!test_bit(I915_VMA_ALLOC_BIT, __i915_vma_flags(vma))); - vma->vm->insert_entries(vma->vm, vma, cache_level, pte_flags); + vm->insert_entries(vm, vma, cache_level, pte_flags); wmb(); return 0; } -static void ppgtt_unbind_vma(struct i915_vma *vma) +void ppgtt_unbind_vma(struct i915_address_space *vm, struct i915_vma *vma) { if (test_and_clear_bit(I915_VMA_ALLOC_BIT, __i915_vma_flags(vma))) - vma->vm->clear_range(vma->vm, vma->node.start, vma->size); + vm->clear_range(vm, vma->node.start, vma->size); } int ppgtt_set_pages(struct i915_vma *vma) diff --git a/drivers/gpu/drm/i915/i915_vma.c b/drivers/gpu/drm/i915/i915_vma.c index f4e22e256ac6..bc64f773dcdb 100644 --- a/drivers/gpu/drm/i915/i915_vma.c +++ b/drivers/gpu/drm/i915/i915_vma.c @@ -304,7 +304,7 @@ static int __vma_bind(struct dma_fence_work *work) struct i915_vma *vma = vw->vma; int err; - err = vma->ops->bind_vma(vma, vw->cache_level, vw->flags); + err = vma->ops->bind_vma(vma->vm, vma, vw->cache_level, vw->flags); if (err) atomic_or(I915_VMA_ERROR, &vma->flags); @@ -407,7 +407,7 @@ int i915_vma_bind(struct i915_vma *vma, work->vma = vma; work->cache_level = cache_level; - work->flags = bind_flags | I915_VMA_ALLOC; + work->flags = bind_flags; /* * Note we only want to chain up to the migration fence on @@ -433,7 +433,7 @@ int i915_vma_bind(struct i915_vma *vma, work->pinned = vma->obj; } } else { - ret = vma->ops->bind_vma(vma, cache_level, bind_flags); + ret = vma->ops->bind_vma(vma->vm, vma, cache_level, bind_flags); if (ret) return ret; } @@ -1261,7 +1261,7 @@ void __i915_vma_evict(struct i915_vma *vma) if (likely(atomic_read(&vma->vm->open))) { trace_i915_vma_unbind(vma); - vma->ops->unbind_vma(vma); + vma->ops->unbind_vma(vma->vm, vma); } atomic_and(~(I915_VMA_BIND_MASK | I915_VMA_ERROR | I915_VMA_GGTT_WRITE), &vma->flags); diff --git a/drivers/gpu/drm/i915/i915_vma_types.h b/drivers/gpu/drm/i915/i915_vma_types.h index 63831cdb7402..9e9082dc8f4b 100644 --- a/drivers/gpu/drm/i915/i915_vma_types.h +++ b/drivers/gpu/drm/i915/i915_vma_types.h @@ -235,7 +235,6 @@ struct i915_vma { #define I915_VMA_BIND_MASK (I915_VMA_GLOBAL_BIND | I915_VMA_LOCAL_BIND) #define I915_VMA_ALLOC_BIT 12 -#define I915_VMA_ALLOC ((int)BIT(I915_VMA_ALLOC_BIT)) #define I915_VMA_ERROR_BIT 13 #define I915_VMA_ERROR ((int)BIT(I915_VMA_ERROR_BIT)) diff --git a/drivers/gpu/drm/i915/selftests/mock_gtt.c b/drivers/gpu/drm/i915/selftests/mock_gtt.c index edc5e3dda8ca..b173086411ef 100644 --- a/drivers/gpu/drm/i915/selftests/mock_gtt.c +++ b/drivers/gpu/drm/i915/selftests/mock_gtt.c @@ -38,7 +38,8 @@ static void mock_insert_entries(struct i915_address_space *vm, { } -static int mock_bind_ppgtt(struct i915_vma *vma, +static int mock_bind_ppgtt(struct i915_address_space *vm, + struct i915_vma *vma, enum i915_cache_level cache_level, u32 flags) { @@ -47,7 +48,8 @@ static int mock_bind_ppgtt(struct i915_vma *vma, return 0; } -static void mock_unbind_ppgtt(struct i915_vma *vma) +static void mock_unbind_ppgtt(struct i915_address_space *vm, + struct i915_vma *vma) { } @@ -88,7 +90,8 @@ struct i915_ppgtt *mock_ppgtt(struct drm_i915_private *i915, const char *name) return ppgtt; } -static int mock_bind_ggtt(struct i915_vma *vma, +static int mock_bind_ggtt(struct i915_address_space *vm, + struct i915_vma *vma, enum i915_cache_level cache_level, u32 flags) { @@ -96,7 +99,8 @@ static int mock_bind_ggtt(struct i915_vma *vma, return 0; } -static void mock_unbind_ggtt(struct i915_vma *vma) +static void mock_unbind_ggtt(struct i915_address_space *vm, + struct i915_vma *vma) { } -- cgit v1.2.3 From d3913019602e32ef6fbba8eb0167e83250cdab22 Mon Sep 17 00:00:00 2001 From: Matt Atwood Date: Thu, 2 Jul 2020 16:09:57 -0700 Subject: Revert "drm/i915/dp: Correctly advertise HBR3 for GEN11+" MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit The initial CI results did not include a TGL system which includes a panel that is having issues with patch. Revert while we triage. This reverts commit 680c45c767f63e35f063d3ea04f388a9f7ae7079. Signed-off-by: Matt Atwood Reviewed-by: Manasi Navare Signed-off-by: José Roberto de Souza Link: https://patchwork.freedesktop.org/patch/msgid/20200702230957.30536-1-matthew.s.atwood@intel.com --- drivers/gpu/drm/i915/display/intel_dp.c | 28 +++++++++++++++++----------- 1 file changed, 17 insertions(+), 11 deletions(-) diff --git a/drivers/gpu/drm/i915/display/intel_dp.c b/drivers/gpu/drm/i915/display/intel_dp.c index a5ab405d3a12..d6295eb20b63 100644 --- a/drivers/gpu/drm/i915/display/intel_dp.c +++ b/drivers/gpu/drm/i915/display/intel_dp.c @@ -137,8 +137,6 @@ static const u8 valid_dsc_slicecount[] = {1, 2, 4}; * * If a CPU or PCH DP output is attached to an eDP panel, this function * will return true, and false otherwise. - * - * This function is not safe to use prior to encoder type being set. */ bool intel_dp_is_edp(struct intel_dp *intel_dp) { @@ -8159,6 +8157,8 @@ intel_dp_init_connector(struct intel_digital_port *dig_port, intel_encoder->base.name)) return false; + intel_dp_set_source_rates(intel_dp); + intel_dp->reset_link_params = true; intel_dp->pps_pipe = INVALID_PIPE; intel_dp->active_pipe = INVALID_PIPE; @@ -8174,22 +8174,28 @@ intel_dp_init_connector(struct intel_digital_port *dig_port, */ drm_WARN_ON(dev, intel_phy_is_tc(dev_priv, phy)); type = DRM_MODE_CONNECTOR_eDP; - intel_encoder->type = INTEL_OUTPUT_EDP; - - /* eDP only on port B and/or C on vlv/chv */ - if (drm_WARN_ON(dev, (IS_VALLEYVIEW(dev_priv) || - IS_CHERRYVIEW(dev_priv)) && - port != PORT_B && port != PORT_C)) - return false; } else { type = DRM_MODE_CONNECTOR_DisplayPort; } - intel_dp_set_source_rates(intel_dp); - if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) intel_dp->active_pipe = vlv_active_pipe(intel_dp); + /* + * For eDP we always set the encoder type to INTEL_OUTPUT_EDP, but + * for DP the encoder type can be set by the caller to + * INTEL_OUTPUT_UNKNOWN for DDI, so don't rewrite it. + */ + if (type == DRM_MODE_CONNECTOR_eDP) + intel_encoder->type = INTEL_OUTPUT_EDP; + + /* eDP only on port B and/or C on vlv/chv */ + if (drm_WARN_ON(dev, (IS_VALLEYVIEW(dev_priv) || + IS_CHERRYVIEW(dev_priv)) && + intel_dp_is_edp(intel_dp) && + port != PORT_B && port != PORT_C)) + return false; + drm_dbg_kms(&dev_priv->drm, "Adding %s connector on [ENCODER:%d:%s]\n", type == DRM_MODE_CONNECTOR_eDP ? "eDP" : "DP", -- cgit v1.2.3 From 3f04bdce72408eb4ff578f96abe599588fac0bbb Mon Sep 17 00:00:00 2001 From: Michał Winiarski Date: Mon, 6 Jul 2020 16:41:05 +0200 Subject: drm/i915: Reboot CI if we get wedged during driver init MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Getting wedged device on driver init is pretty much unrecoverable. Since we're running various scenarios that may potentially hit this in CI (module reload / selftests / hotunplug), and if it happens, it means that we can't trust any subsequent CI results, we should just apply the taint to let the CI know that it should reboot (CI checks taint between test runs). v2: Comment that WEDGED_ON_INIT is non-recoverable, distinguish WEDGED_ON_INIT from WEDGED_ON_FINI (Chris) v3: Appease checkpatch, fixup search-replace logic expression mindbomb in assert (Chris) Signed-off-by: Michał Winiarski Cc: Chris Wilson Cc: Michal Wajdeczko Cc: Petri Latvala Reviewed-by: Chris Wilson Signed-off-by: Chris Wilson Link: https://patchwork.freedesktop.org/patch/msgid/20200706144107.204821-1-michal@hardline.pl --- drivers/gpu/drm/i915/gt/intel_engine_user.c | 2 +- drivers/gpu/drm/i915/gt/intel_gt.c | 2 +- drivers/gpu/drm/i915/gt/intel_gt.h | 12 ++++++++---- drivers/gpu/drm/i915/gt/intel_gt_pm.c | 2 +- drivers/gpu/drm/i915/gt/intel_reset.c | 13 +++++++++++-- drivers/gpu/drm/i915/gt/intel_reset.h | 10 ++-------- drivers/gpu/drm/i915/gt/intel_reset_types.h | 7 ++++++- 7 files changed, 30 insertions(+), 18 deletions(-) diff --git a/drivers/gpu/drm/i915/gt/intel_engine_user.c b/drivers/gpu/drm/i915/gt/intel_engine_user.c index 848decee9066..34e6096f196e 100644 --- a/drivers/gpu/drm/i915/gt/intel_engine_user.c +++ b/drivers/gpu/drm/i915/gt/intel_engine_user.c @@ -201,7 +201,7 @@ void intel_engines_driver_register(struct drm_i915_private *i915) uabi_node); char old[sizeof(engine->name)]; - if (intel_gt_has_init_error(engine->gt)) + if (intel_gt_has_unrecoverable_error(engine->gt)) continue; /* ignore incomplete engines */ GEM_BUG_ON(engine->class >= ARRAY_SIZE(uabi_classes)); diff --git a/drivers/gpu/drm/i915/gt/intel_gt.c b/drivers/gpu/drm/i915/gt/intel_gt.c index ebc29b6ee86c..876f78759095 100644 --- a/drivers/gpu/drm/i915/gt/intel_gt.c +++ b/drivers/gpu/drm/i915/gt/intel_gt.c @@ -510,7 +510,7 @@ static int __engines_verify_workarounds(struct intel_gt *gt) static void __intel_gt_disable(struct intel_gt *gt) { - intel_gt_set_wedged_on_init(gt); + intel_gt_set_wedged_on_fini(gt); intel_gt_suspend_prepare(gt); intel_gt_suspend_late(gt); diff --git a/drivers/gpu/drm/i915/gt/intel_gt.h b/drivers/gpu/drm/i915/gt/intel_gt.h index 4fac043750aa..982957ca4e62 100644 --- a/drivers/gpu/drm/i915/gt/intel_gt.h +++ b/drivers/gpu/drm/i915/gt/intel_gt.h @@ -58,14 +58,18 @@ static inline u32 intel_gt_scratch_offset(const struct intel_gt *gt, return i915_ggtt_offset(gt->scratch) + field; } -static inline bool intel_gt_is_wedged(const struct intel_gt *gt) +static inline bool intel_gt_has_unrecoverable_error(const struct intel_gt *gt) { - return __intel_reset_failed(>->reset); + return test_bit(I915_WEDGED_ON_INIT, >->reset.flags) || + test_bit(I915_WEDGED_ON_FINI, >->reset.flags); } -static inline bool intel_gt_has_init_error(const struct intel_gt *gt) +static inline bool intel_gt_is_wedged(const struct intel_gt *gt) { - return test_bit(I915_WEDGED_ON_INIT, >->reset.flags); + GEM_BUG_ON(intel_gt_has_unrecoverable_error(gt) && + !test_bit(I915_WEDGED, >->reset.flags)); + + return unlikely(test_bit(I915_WEDGED, >->reset.flags)); } #endif /* __INTEL_GT_H__ */ diff --git a/drivers/gpu/drm/i915/gt/intel_gt_pm.c b/drivers/gpu/drm/i915/gt/intel_gt_pm.c index f1d5333f9456..274aa0dd7050 100644 --- a/drivers/gpu/drm/i915/gt/intel_gt_pm.c +++ b/drivers/gpu/drm/i915/gt/intel_gt_pm.c @@ -188,7 +188,7 @@ int intel_gt_resume(struct intel_gt *gt) enum intel_engine_id id; int err; - err = intel_gt_has_init_error(gt); + err = intel_gt_has_unrecoverable_error(gt); if (err) return err; diff --git a/drivers/gpu/drm/i915/gt/intel_reset.c b/drivers/gpu/drm/i915/gt/intel_reset.c index 0156f1f5c736..6f94b6479a2f 100644 --- a/drivers/gpu/drm/i915/gt/intel_reset.c +++ b/drivers/gpu/drm/i915/gt/intel_reset.c @@ -880,7 +880,7 @@ static bool __intel_gt_unset_wedged(struct intel_gt *gt) return true; /* Never fully initialised, recovery impossible */ - if (test_bit(I915_WEDGED_ON_INIT, >->reset.flags)) + if (intel_gt_has_unrecoverable_error(gt)) return false; GT_TRACE(gt, "start\n"); @@ -1342,7 +1342,7 @@ int intel_gt_terminally_wedged(struct intel_gt *gt) if (!intel_gt_is_wedged(gt)) return 0; - if (intel_gt_has_init_error(gt)) + if (intel_gt_has_unrecoverable_error(gt)) return -EIO; /* Reset still in progress? Maybe we will recover? */ @@ -1360,6 +1360,15 @@ void intel_gt_set_wedged_on_init(struct intel_gt *gt) I915_WEDGED_ON_INIT); intel_gt_set_wedged(gt); set_bit(I915_WEDGED_ON_INIT, >->reset.flags); + + /* Wedged on init is non-recoverable */ + add_taint_for_CI(TAINT_WARN); +} + +void intel_gt_set_wedged_on_fini(struct intel_gt *gt) +{ + intel_gt_set_wedged(gt); + set_bit(I915_WEDGED_ON_FINI, >->reset.flags); } void intel_gt_init_reset(struct intel_gt *gt) diff --git a/drivers/gpu/drm/i915/gt/intel_reset.h b/drivers/gpu/drm/i915/gt/intel_reset.h index 8e8d5f761166..a0eec7c11c0c 100644 --- a/drivers/gpu/drm/i915/gt/intel_reset.h +++ b/drivers/gpu/drm/i915/gt/intel_reset.h @@ -47,8 +47,10 @@ int intel_gt_terminally_wedged(struct intel_gt *gt); /* * There's no unset_wedged_on_init paired with this one. * Once we're wedged on init, there's no going back. + * Same thing for unset_wedged_on_fini. */ void intel_gt_set_wedged_on_init(struct intel_gt *gt); +void intel_gt_set_wedged_on_fini(struct intel_gt *gt); int __intel_gt_reset(struct intel_gt *gt, intel_engine_mask_t engine_mask); @@ -71,14 +73,6 @@ void __intel_fini_wedge(struct intel_wedge_me *w); (W)->gt; \ __intel_fini_wedge((W))) -static inline bool __intel_reset_failed(const struct intel_reset *reset) -{ - GEM_BUG_ON(test_bit(I915_WEDGED_ON_INIT, &reset->flags) ? - !test_bit(I915_WEDGED, &reset->flags) : false); - - return unlikely(test_bit(I915_WEDGED, &reset->flags)); -} - bool intel_has_gpu_reset(const struct intel_gt *gt); bool intel_has_reset_engine(const struct intel_gt *gt); diff --git a/drivers/gpu/drm/i915/gt/intel_reset_types.h b/drivers/gpu/drm/i915/gt/intel_reset_types.h index f43bc3a0fe4f..add6b86d9d03 100644 --- a/drivers/gpu/drm/i915/gt/intel_reset_types.h +++ b/drivers/gpu/drm/i915/gt/intel_reset_types.h @@ -34,12 +34,17 @@ struct intel_reset { * longer use the GPU - similar to #I915_WEDGED bit. The difference in * in the way we're handling "forced" unwedged (e.g. through debugfs), * which is not allowed in case we failed to initialize. + * + * #I915_WEDGED_ON_FINI - Similar to #I915_WEDGED_ON_INIT, except we + * use it to mark that the GPU is no longer available (and prevent + * users from using it). */ unsigned long flags; #define I915_RESET_BACKOFF 0 #define I915_RESET_MODESET 1 #define I915_RESET_ENGINE 2 -#define I915_WEDGED_ON_INIT (BITS_PER_LONG - 2) +#define I915_WEDGED_ON_INIT (BITS_PER_LONG - 3) +#define I915_WEDGED_ON_FINI (BITS_PER_LONG - 2) #define I915_WEDGED (BITS_PER_LONG - 1) struct mutex mutex; /* serialises wedging/unwedging */ -- cgit v1.2.3 From 65706203d159db3353839fe97561397e8c188aa1 Mon Sep 17 00:00:00 2001 From: Michał Winiarski Date: Mon, 6 Jul 2020 16:41:06 +0200 Subject: drm/i915: Print caller when tainting for CI MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit We can add taint from multiple places, printing the caller allows us to have a better overview of what exactly caused us to do the tainting. v2: Tweak format and print the device (Chris) v3: Move things around (Chris) Suggested-by: Michal Wajdeczko Signed-off-by: Michał Winiarski Cc: Chris Wilson Cc: Michal Wajdeczko Cc: Petri Latvala Reviewed-by: Chris Wilson Signed-off-by: Chris Wilson Link: https://patchwork.freedesktop.org/patch/msgid/20200706144107.204821-2-michal@hardline.pl --- drivers/gpu/drm/i915/gt/intel_reset.c | 6 +++--- drivers/gpu/drm/i915/gt/selftest_rc6.c | 2 +- drivers/gpu/drm/i915/i915_gem.h | 2 +- drivers/gpu/drm/i915/i915_utils.c | 7 +++++++ drivers/gpu/drm/i915/i915_utils.h | 3 ++- drivers/gpu/drm/i915/intel_uncore.c | 4 ++-- 6 files changed, 16 insertions(+), 8 deletions(-) diff --git a/drivers/gpu/drm/i915/gt/intel_reset.c b/drivers/gpu/drm/i915/gt/intel_reset.c index 6f94b6479a2f..121bf39a6f3e 100644 --- a/drivers/gpu/drm/i915/gt/intel_reset.c +++ b/drivers/gpu/drm/i915/gt/intel_reset.c @@ -930,7 +930,7 @@ static bool __intel_gt_unset_wedged(struct intel_gt *gt) * Warn CI about the unrecoverable wedged condition. * Time for a reboot. */ - add_taint_for_CI(TAINT_WARN); + add_taint_for_CI(gt->i915, TAINT_WARN); return false; } @@ -1097,7 +1097,7 @@ taint: * rather than continue on into oblivion. For everyone else, * the system should still plod along, but they have been warned! */ - add_taint_for_CI(TAINT_WARN); + add_taint_for_CI(gt->i915, TAINT_WARN); error: __intel_gt_set_wedged(gt); goto finish; @@ -1362,7 +1362,7 @@ void intel_gt_set_wedged_on_init(struct intel_gt *gt) set_bit(I915_WEDGED_ON_INIT, >->reset.flags); /* Wedged on init is non-recoverable */ - add_taint_for_CI(TAINT_WARN); + add_taint_for_CI(gt->i915, TAINT_WARN); } void intel_gt_set_wedged_on_fini(struct intel_gt *gt) diff --git a/drivers/gpu/drm/i915/gt/selftest_rc6.c b/drivers/gpu/drm/i915/gt/selftest_rc6.c index 3c8434846fa1..64ef5ee5decf 100644 --- a/drivers/gpu/drm/i915/gt/selftest_rc6.c +++ b/drivers/gpu/drm/i915/gt/selftest_rc6.c @@ -233,7 +233,7 @@ int live_rc6_ctx_wa(void *arg) i915_reset_engine_count(error, engine)) { pr_err("%s: GPU reset required\n", engine->name); - add_taint_for_CI(TAINT_WARN); + add_taint_for_CI(gt->i915, TAINT_WARN); err = -EIO; goto out; } diff --git a/drivers/gpu/drm/i915/i915_gem.h b/drivers/gpu/drm/i915/i915_gem.h index 1753c84d6c0d..f333e88a2b6e 100644 --- a/drivers/gpu/drm/i915/i915_gem.h +++ b/drivers/gpu/drm/i915/i915_gem.h @@ -72,7 +72,7 @@ struct drm_i915_private; trace_printk(__VA_ARGS__); \ } while (0) #define GEM_TRACE_DUMP() \ - do { ftrace_dump(DUMP_ALL); add_taint_for_CI(TAINT_WARN); } while (0) + do { ftrace_dump(DUMP_ALL); __add_taint_for_CI(TAINT_WARN); } while (0) #define GEM_TRACE_DUMP_ON(expr) \ do { if (expr) GEM_TRACE_DUMP(); } while (0) #else diff --git a/drivers/gpu/drm/i915/i915_utils.c b/drivers/gpu/drm/i915/i915_utils.c index f42a9e9a0b4f..01a3d3c941bf 100644 --- a/drivers/gpu/drm/i915/i915_utils.c +++ b/drivers/gpu/drm/i915/i915_utils.c @@ -49,6 +49,13 @@ __i915_printk(struct drm_i915_private *dev_priv, const char *level, } } +void add_taint_for_CI(struct drm_i915_private *i915, unsigned int taint) +{ + __i915_printk(i915, KERN_NOTICE, "CI tainted:%#x by %pS\n", + taint, (void *)_RET_IP_); + __add_taint_for_CI(taint); +} + #if IS_ENABLED(CONFIG_DRM_I915_DEBUG) static unsigned int i915_probe_fail_count; diff --git a/drivers/gpu/drm/i915/i915_utils.h b/drivers/gpu/drm/i915/i915_utils.h index 03a73d2bd50d..b1c5955a52e1 100644 --- a/drivers/gpu/drm/i915/i915_utils.h +++ b/drivers/gpu/drm/i915/i915_utils.h @@ -436,7 +436,8 @@ static inline const char *enableddisabled(bool v) return v ? "enabled" : "disabled"; } -static inline void add_taint_for_CI(unsigned int taint) +void add_taint_for_CI(struct drm_i915_private *i915, unsigned int taint); +static inline void __add_taint_for_CI(unsigned int taint) { /* * The system is "ok", just about surviving for the user, but diff --git a/drivers/gpu/drm/i915/intel_uncore.c b/drivers/gpu/drm/i915/intel_uncore.c index 592364aed2da..8e2c073da1aa 100644 --- a/drivers/gpu/drm/i915/intel_uncore.c +++ b/drivers/gpu/drm/i915/intel_uncore.c @@ -142,7 +142,7 @@ fw_domain_wait_ack_clear(const struct intel_uncore_forcewake_domain *d) if (wait_ack_clear(d, FORCEWAKE_KERNEL)) { DRM_ERROR("%s: timed out waiting for forcewake ack to clear.\n", intel_uncore_forcewake_domain_to_str(d->id)); - add_taint_for_CI(TAINT_WARN); /* CI now unreliable */ + add_taint_for_CI(d->uncore->i915, TAINT_WARN); /* CI now unreliable */ } } @@ -219,7 +219,7 @@ fw_domain_wait_ack_set(const struct intel_uncore_forcewake_domain *d) if (wait_ack_set(d, FORCEWAKE_KERNEL)) { DRM_ERROR("%s: timed out waiting for forcewake ack request.\n", intel_uncore_forcewake_domain_to_str(d->id)); - add_taint_for_CI(TAINT_WARN); /* CI now unreliable */ + add_taint_for_CI(d->uncore->i915, TAINT_WARN); /* CI now unreliable */ } } -- cgit v1.2.3 From fcab594a3030d49db933d68bdad398ab8fea265c Mon Sep 17 00:00:00 2001 From: Michał Winiarski Date: Mon, 6 Jul 2020 16:41:07 +0200 Subject: drm/i915: Don't taint when using fault injection MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit It is not really unexpected to hit wedge on init this way. We're already downgrading error printk when running with fault injection, let's use the same approach for CI tainting. v2: Don't check fault inject in trace dump (Chris) Signed-off-by: Michał Winiarski Cc: Chris Wilson Cc: Michal Wajdeczko Cc: Petri Latvala Reviewed-by: Chris Wilson Signed-off-by: Chris Wilson Link: https://patchwork.freedesktop.org/patch/msgid/20200706144107.204821-3-michal@hardline.pl --- drivers/gpu/drm/i915/i915_utils.c | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/drivers/gpu/drm/i915/i915_utils.c b/drivers/gpu/drm/i915/i915_utils.c index 01a3d3c941bf..4c305d838016 100644 --- a/drivers/gpu/drm/i915/i915_utils.c +++ b/drivers/gpu/drm/i915/i915_utils.c @@ -53,7 +53,10 @@ void add_taint_for_CI(struct drm_i915_private *i915, unsigned int taint) { __i915_printk(i915, KERN_NOTICE, "CI tainted:%#x by %pS\n", taint, (void *)_RET_IP_); - __add_taint_for_CI(taint); + + /* Failures that occur during fault injection testing are expected */ + if (!i915_error_injected()) + __add_taint_for_CI(taint); } #if IS_ENABLED(CONFIG_DRM_I915_DEBUG) -- cgit v1.2.3 From 6f48fd8a4e99118377bdabf3cb38fc95aa097a2f Mon Sep 17 00:00:00 2001 From: Flavio Suligoi Date: Fri, 3 Jul 2020 14:50:46 +0200 Subject: drm/i915: Fix spelling mistake in i915_reg.h Fix typo: "TRIGER" --> "TRIGGER" The two misplelled macros: 1) OAREPORTTRIG1_EDGE_LEVEL_TRIGER_SELECT_MASK 2) OAREPORTTRIG5_EDGE_LEVEL_TRIGER_SELECT_MASK are not used in any other sources of the kernel, so this change can be consider only a local change for the i915_reg.h file. Signed-off-by: Flavio Suligoi Reviewed-by: Chris Wilson Signed-off-by: Chris Wilson Link: https://patchwork.freedesktop.org/patch/msgid/20200703125046.8395-1-f.suligoi@asem.it --- drivers/gpu/drm/i915/i915_reg.h | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h index 03590d2d75f7..5bee4e212866 100644 --- a/drivers/gpu/drm/i915/i915_reg.h +++ b/drivers/gpu/drm/i915/i915_reg.h @@ -868,7 +868,7 @@ static inline bool i915_mmio_reg_valid(i915_reg_t reg) #define OAREPORTTRIG1 _MMIO(0x2740) #define OAREPORTTRIG1_THRESHOLD_MASK 0xffff -#define OAREPORTTRIG1_EDGE_LEVEL_TRIGER_SELECT_MASK 0xffff0000 /* 0=level */ +#define OAREPORTTRIG1_EDGE_LEVEL_TRIGGER_SELECT_MASK 0xffff0000 /* 0=level */ #define OAREPORTTRIG2 _MMIO(0x2744) #define OAREPORTTRIG2_INVERT_A_0 (1 << 0) @@ -921,7 +921,7 @@ static inline bool i915_mmio_reg_valid(i915_reg_t reg) #define OAREPORTTRIG5 _MMIO(0x2750) #define OAREPORTTRIG5_THRESHOLD_MASK 0xffff -#define OAREPORTTRIG5_EDGE_LEVEL_TRIGER_SELECT_MASK 0xffff0000 /* 0=level */ +#define OAREPORTTRIG5_EDGE_LEVEL_TRIGGER_SELECT_MASK 0xffff0000 /* 0=level */ #define OAREPORTTRIG6 _MMIO(0x2754) #define OAREPORTTRIG6_INVERT_A_0 (1 << 0) -- cgit v1.2.3 From 8567774e87e23a57155e5102f81208729b992ae6 Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Mon, 6 Jul 2020 18:01:38 +0100 Subject: drm/i915/gt: Pin the rings before marking active On eviction, we acquire the vm->mutex and then wait on the vma->active. Therefore when binding and pinning the vma, we must follow the same sequence, lock/pin the vma then mark it active. Otherwise, we mark the vma as active, then wait for the vm->mutex, and meanwhile the evictor holding the mutex waits upon us to complete our activity. Fixes: 8ccfc20a7d56 ("drm/i915/gt: Mark ring->vma as active while pinned") Signed-off-by: Chris Wilson Cc: Tvrtko Ursulin Cc: # v5.6+ Reviewed-by: Matthew Auld Link: https://patchwork.freedesktop.org/patch/msgid/20200706170138.8993-1-chris@chris-wilson.co.uk --- drivers/gpu/drm/i915/gt/intel_context.c | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/drivers/gpu/drm/i915/gt/intel_context.c b/drivers/gpu/drm/i915/gt/intel_context.c index e4aece20bc80..52db2bde44a3 100644 --- a/drivers/gpu/drm/i915/gt/intel_context.c +++ b/drivers/gpu/drm/i915/gt/intel_context.c @@ -204,25 +204,25 @@ static int __ring_active(struct intel_ring *ring) { int err; - err = i915_active_acquire(&ring->vma->active); + err = intel_ring_pin(ring); if (err) return err; - err = intel_ring_pin(ring); + err = i915_active_acquire(&ring->vma->active); if (err) - goto err_active; + goto err_pin; return 0; -err_active: - i915_active_release(&ring->vma->active); +err_pin: + intel_ring_unpin(ring); return err; } static void __ring_retire(struct intel_ring *ring) { - intel_ring_unpin(ring); i915_active_release(&ring->vma->active); + intel_ring_unpin(ring); } __i915_active_call -- cgit v1.2.3 From a3a402840662a7449b1ca0baa58eddf36ef23158 Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Mon, 6 Jul 2020 23:43:08 +0100 Subject: drm/i915: Update dma-attributes for our sg DMA Looking through the attributes for DMA mappings, it appears that by default dma_map_sg will try and create a kernel accessible map of the page. We never access this, as we either have a struct page already or an iomap, so we can request that the dma mapper does not create one. Without a kernel map in place, one presumes the rest of the memory control attributes do not apply. We also explicitly control the caches around the mappings, so we can ask it not to bother synchronising itself. Signed-off-by: Chris Wilson Cc: Matthew Auld Reviewed-by: Matthew Auld Link: https://patchwork.freedesktop.org/patch/msgid/20200706224308.22636-1-chris@chris-wilson.co.uk --- drivers/gpu/drm/i915/i915_gem_gtt.c | 2 ++ 1 file changed, 2 insertions(+) diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c index cb43381b0d37..c5ee1567f3d1 100644 --- a/drivers/gpu/drm/i915/i915_gem_gtt.c +++ b/drivers/gpu/drm/i915/i915_gem_gtt.c @@ -31,6 +31,8 @@ int i915_gem_gtt_prepare_pages(struct drm_i915_gem_object *obj, if (dma_map_sg_attrs(&obj->base.dev->pdev->dev, pages->sgl, pages->nents, PCI_DMA_BIDIRECTIONAL, + DMA_ATTR_SKIP_CPU_SYNC | + DMA_ATTR_NO_KERNEL_MAPPING | DMA_ATTR_NO_WARN)) return 0; -- cgit v1.2.3 From 239bef676d8e7118f6d75f208e6d6081ff16a5a1 Mon Sep 17 00:00:00 2001 From: José Roberto de Souza Date: Thu, 25 Jun 2020 12:52:52 -0700 Subject: drm/i915/display: Implement new combo phy initialization step MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit This is new step that was recently added to the combo phy initialization. v2: - using intel_de_rmw() v3: - going back to read() modify and write() as group register can't be read BSpec: 49291 Cc: Clinton A Taylor Cc: Lucas De Marchi Reviewed-by: Matt Roper Signed-off-by: José Roberto de Souza Link: https://patchwork.freedesktop.org/patch/msgid/20200625195252.39312-1-jose.souza@intel.com --- drivers/gpu/drm/i915/display/intel_combo_phy.c | 25 +++++++++++++++++++++++++ drivers/gpu/drm/i915/i915_reg.h | 9 +++++++++ 2 files changed, 34 insertions(+) diff --git a/drivers/gpu/drm/i915/display/intel_combo_phy.c b/drivers/gpu/drm/i915/display/intel_combo_phy.c index 77b04bb3ec62..eccaa79cb4a9 100644 --- a/drivers/gpu/drm/i915/display/intel_combo_phy.c +++ b/drivers/gpu/drm/i915/display/intel_combo_phy.c @@ -264,6 +264,18 @@ static bool icl_combo_phy_verify_state(struct drm_i915_private *dev_priv, if (!icl_combo_phy_enabled(dev_priv, phy)) return false; + if (INTEL_GEN(dev_priv) >= 12) { + ret &= check_phy_reg(dev_priv, phy, ICL_PORT_TX_DW8_LN0(phy), + ICL_PORT_TX_DW8_ODCC_CLK_SEL | + ICL_PORT_TX_DW8_ODCC_CLK_DIV_SEL_MASK, + ICL_PORT_TX_DW8_ODCC_CLK_SEL | + ICL_PORT_TX_DW8_ODCC_CLK_DIV_SEL_DIV2); + + ret &= check_phy_reg(dev_priv, phy, ICL_PORT_PCS_DW1_LN0(phy), + DCC_MODE_SELECT_MASK, + DCC_MODE_SELECT_CONTINUOSLY); + } + ret = cnl_verify_procmon_ref_values(dev_priv, phy); if (phy_is_master(dev_priv, phy)) { @@ -375,6 +387,19 @@ static void icl_combo_phys_init(struct drm_i915_private *dev_priv) intel_de_write(dev_priv, ICL_PHY_MISC(phy), val); skip_phy_misc: + if (INTEL_GEN(dev_priv) >= 12) { + val = intel_de_read(dev_priv, ICL_PORT_TX_DW8_LN0(phy)); + val &= ~ICL_PORT_TX_DW8_ODCC_CLK_DIV_SEL_MASK; + val |= ICL_PORT_TX_DW8_ODCC_CLK_SEL; + val |= ICL_PORT_TX_DW8_ODCC_CLK_DIV_SEL_DIV2; + intel_de_write(dev_priv, ICL_PORT_TX_DW8_GRP(phy), val); + + val = intel_de_read(dev_priv, ICL_PORT_PCS_DW1_LN0(phy)); + val &= ~DCC_MODE_SELECT_MASK; + val |= DCC_MODE_SELECT_CONTINUOSLY; + intel_de_write(dev_priv, ICL_PORT_PCS_DW1_GRP(phy), val); + } + cnl_set_procmon_ref_values(dev_priv, phy); if (phy_is_master(dev_priv, phy)) { diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h index 5bee4e212866..86a23ced051b 100644 --- a/drivers/gpu/drm/i915/i915_reg.h +++ b/drivers/gpu/drm/i915/i915_reg.h @@ -1974,6 +1974,8 @@ static inline bool i915_mmio_reg_valid(i915_reg_t reg) #define ICL_PORT_PCS_DW1_AUX(phy) _MMIO(_ICL_PORT_PCS_DW_AUX(1, phy)) #define ICL_PORT_PCS_DW1_GRP(phy) _MMIO(_ICL_PORT_PCS_DW_GRP(1, phy)) #define ICL_PORT_PCS_DW1_LN0(phy) _MMIO(_ICL_PORT_PCS_DW_LN(1, 0, phy)) +#define DCC_MODE_SELECT_MASK (0x3 << 20) +#define DCC_MODE_SELECT_CONTINUOSLY (0x3 << 20) #define COMMON_KEEPER_EN (1 << 26) #define LATENCY_OPTIM_MASK (0x3 << 2) #define LATENCY_OPTIM_VAL(x) ((x) << 2) @@ -2072,6 +2074,13 @@ static inline bool i915_mmio_reg_valid(i915_reg_t reg) #define N_SCALAR(x) ((x) << 24) #define N_SCALAR_MASK (0x7F << 24) +#define ICL_PORT_TX_DW8_AUX(phy) _MMIO(_ICL_PORT_TX_DW_AUX(8, phy)) +#define ICL_PORT_TX_DW8_GRP(phy) _MMIO(_ICL_PORT_TX_DW_GRP(8, phy)) +#define ICL_PORT_TX_DW8_LN0(phy) _MMIO(_ICL_PORT_TX_DW_LN(8, 0, phy)) +#define ICL_PORT_TX_DW8_ODCC_CLK_SEL REG_BIT(31) +#define ICL_PORT_TX_DW8_ODCC_CLK_DIV_SEL_MASK REG_GENMASK(30, 29) +#define ICL_PORT_TX_DW8_ODCC_CLK_DIV_SEL_DIV2 REG_FIELD_PREP(ICL_PORT_TX_DW8_ODCC_CLK_DIV_SEL_MASK, 0x1) + #define _ICL_DPHY_CHKN_REG 0x194 #define ICL_DPHY_CHKN(port) _MMIO(_ICL_COMBOPHY(port) + _ICL_DPHY_CHKN_REG) #define ICL_DPHY_CHKN_AFE_OVER_PPI_STRAP REG_BIT(7) -- cgit v1.2.3 From 52797a8e8529507e3831ed9b4ed6fd7d8671c63f Mon Sep 17 00:00:00 2001 From: José Roberto de Souza Date: Tue, 7 Jul 2020 13:45:30 -0700 Subject: drm/i915/ehl: Add new PCI ids MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Two new PCI ids added to ehl. v2: added two additional PCI ids BSpec: 29153 Cc: Matt Roper Cc: Anusha Srivatsa Signed-off-by: José Roberto de Souza Reviewed-by: Matt Roper Link: https://patchwork.freedesktop.org/patch/msgid/20200707204530.42289-1-jose.souza@intel.com --- include/drm/i915_pciids.h | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/include/drm/i915_pciids.h b/include/drm/i915_pciids.h index bc989de2aac2..d6cb28992ba0 100644 --- a/include/drm/i915_pciids.h +++ b/include/drm/i915_pciids.h @@ -588,7 +588,11 @@ INTEL_VGA_DEVICE(0x4551, info), \ INTEL_VGA_DEVICE(0x4541, info), \ INTEL_VGA_DEVICE(0x4E71, info), \ + INTEL_VGA_DEVICE(0x4557, info), \ + INTEL_VGA_DEVICE(0x4555, info), \ INTEL_VGA_DEVICE(0x4E61, info), \ + INTEL_VGA_DEVICE(0x4E57, info), \ + INTEL_VGA_DEVICE(0x4E55, info), \ INTEL_VGA_DEVICE(0x4E51, info) /* TGL */ -- cgit v1.2.3 From 33f9a623bfc6bed3b9586f68cb4a9ea332589791 Mon Sep 17 00:00:00 2001 From: Anshuman Gupta Date: Tue, 30 Jun 2020 13:50:48 +0530 Subject: drm/i915/hdcp: Update CP as per the kernel internal state Content Protection property should be updated as per the kernel internal state. Let's say if Content protection is disabled by userspace, CP property should be set to UNDESIRED so that reauthentication will not happen until userspace request it again, but when kernel disables the HDCP due to any DDI disabling sequences like modeset/DPMS operation, kernel should set the property to DESIRED, so that when opportunity arises, kernel will start the HDCP authentication on its own. Somewhere in the line, state machine to set content protection to DESIRED from kernel was broken and IGT coverage was missing for it. This patch fixes it. v2: - Fixing hdcp CP state in connector atomic check function intel_hdcp_atomic_check(). [Maarten] This will require to check hdcp->value in intel_hdcp_update_pipe() in order to avoid enabling hdcp, if it was already enabled. v3: - Rebased. Cc: Ramalingam C Cc: Maarten Lankhorst Reviewed-by: Uma Shankar Signed-off-by: Anshuman Gupta Link: https://patchwork.freedesktop.org/patch/350962/?series=72664&rev=2 #v1 Link: https://patchwork.freedesktop.org/patch/359396/?series=72251&rev=3 #v2 Reviewed-by: Ramalingam C Signed-off-by: Ramalingam C Link: https://patchwork.freedesktop.org/patch/msgid/20200630082048.22308-1-anshuman.gupta@intel.com --- drivers/gpu/drm/i915/display/intel_hdcp.c | 27 +++++++++++++++++++++++---- 1 file changed, 23 insertions(+), 4 deletions(-) diff --git a/drivers/gpu/drm/i915/display/intel_hdcp.c b/drivers/gpu/drm/i915/display/intel_hdcp.c index 1b6dadfce4eb..3d42888d2020 100644 --- a/drivers/gpu/drm/i915/display/intel_hdcp.c +++ b/drivers/gpu/drm/i915/display/intel_hdcp.c @@ -2086,6 +2086,7 @@ void intel_hdcp_update_pipe(struct intel_atomic_state *state, (conn_state->hdcp_content_type != hdcp->content_type && conn_state->content_protection != DRM_MODE_CONTENT_PROTECTION_UNDESIRED); + bool desired_and_not_enabled = false; /* * During the HDCP encryption session if Type change is requested, @@ -2108,8 +2109,15 @@ void intel_hdcp_update_pipe(struct intel_atomic_state *state, } if (conn_state->content_protection == - DRM_MODE_CONTENT_PROTECTION_DESIRED || - content_protection_type_changed) + DRM_MODE_CONTENT_PROTECTION_DESIRED) { + mutex_lock(&hdcp->mutex); + /* Avoid enabling hdcp, if it already ENABLED */ + desired_and_not_enabled = + hdcp->value != DRM_MODE_CONTENT_PROTECTION_ENABLED; + mutex_unlock(&hdcp->mutex); + } + + if (desired_and_not_enabled || content_protection_type_changed) intel_hdcp_enable(connector, crtc_state->cpu_transcoder, (u8)conn_state->hdcp_content_type); @@ -2158,6 +2166,19 @@ void intel_hdcp_atomic_check(struct drm_connector *connector, return; } + crtc_state = drm_atomic_get_new_crtc_state(new_state->state, + new_state->crtc); + /* + * Fix the HDCP uapi content protection state in case of modeset. + * FIXME: As per HDCP content protection property uapi doc, an uevent() + * need to be sent if there is transition from ENABLED->DESIRED. + */ + if (drm_atomic_crtc_needs_modeset(crtc_state) && + (old_cp == DRM_MODE_CONTENT_PROTECTION_ENABLED && + new_cp != DRM_MODE_CONTENT_PROTECTION_UNDESIRED)) + new_state->content_protection = + DRM_MODE_CONTENT_PROTECTION_DESIRED; + /* * Nothing to do if the state didn't change, or HDCP was activated since * the last commit. And also no change in hdcp content type. @@ -2170,8 +2191,6 @@ void intel_hdcp_atomic_check(struct drm_connector *connector, return; } - crtc_state = drm_atomic_get_new_crtc_state(new_state->state, - new_state->crtc); crtc_state->mode_changed = true; } -- cgit v1.2.3 From 018532e940557d606d849139d515b4cfbc221f09 Mon Sep 17 00:00:00 2001 From: Ramalingam C Date: Wed, 29 Apr 2020 19:15:55 +0530 Subject: drm/i915/hdcp: Fix the return handling of drm_hdcp_check_ksvs_revoked drm_hdcp_check_ksvs_revoked() returns the number of revoked keys and error codes when the SRM parsing is failed. Errors in SRM parsing can't affect the HDCP auth, hence with this patch, I915 will look out for revoked key count alone. Signed-off-by: Ramalingam C cc: Sean Paul Reviewed-by: Sean Paul Link: https://patchwork.freedesktop.org/patch/msgid/20200429134555.22106-1-ramalingam.c@intel.com --- drivers/gpu/drm/i915/display/intel_hdcp.c | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/drivers/gpu/drm/i915/display/intel_hdcp.c b/drivers/gpu/drm/i915/display/intel_hdcp.c index 3d42888d2020..89a4d294822d 100644 --- a/drivers/gpu/drm/i915/display/intel_hdcp.c +++ b/drivers/gpu/drm/i915/display/intel_hdcp.c @@ -576,7 +576,7 @@ int intel_hdcp_auth_downstream(struct intel_connector *connector) goto err; if (drm_hdcp_check_ksvs_revoked(&dev_priv->drm, ksv_fifo, - num_downstream)) { + num_downstream) > 0) { drm_err(&dev_priv->drm, "Revoked Ksv(s) in ksv_fifo\n"); ret = -EPERM; goto err; @@ -682,7 +682,7 @@ static int intel_hdcp_auth(struct intel_connector *connector) if (ret < 0) return ret; - if (drm_hdcp_check_ksvs_revoked(&dev_priv->drm, bksv.shim, 1)) { + if (drm_hdcp_check_ksvs_revoked(&dev_priv->drm, bksv.shim, 1) > 0) { drm_err(&dev_priv->drm, "BKSV is revoked\n"); return -EPERM; } @@ -1283,7 +1283,7 @@ static int hdcp2_authentication_key_exchange(struct intel_connector *connector) if (drm_hdcp_check_ksvs_revoked(&dev_priv->drm, msgs.send_cert.cert_rx.receiver_id, - 1)) { + 1) > 0) { drm_err(&dev_priv->drm, "Receiver ID is revoked\n"); return -EPERM; } @@ -1484,7 +1484,7 @@ int hdcp2_authenticate_repeater_topology(struct intel_connector *connector) HDCP_2_2_DEV_COUNT_LO(rx_info[1])); if (drm_hdcp_check_ksvs_revoked(&dev_priv->drm, msgs.recvid_list.receiver_ids, - device_cnt)) { + device_cnt) > 0) { drm_err(&dev_priv->drm, "Revoked receiver ID(s) is in list\n"); return -EPERM; } -- cgit v1.2.3 From 7f67deeb7f42030cee03348d43a2743f001b5a3d Mon Sep 17 00:00:00 2001 From: Michał Winiarski Date: Wed, 8 Jul 2020 12:08:42 +0200 Subject: drm/i915/uc: Extract uc usage details into separate debugfs MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit It has been pointed out that information about HuC usage doesn't belong in guc_info debugfs. Let's move "supported/used/wanted" matrix to a separate debugfs file, keeping guc_info strictly about GuC. Suggested-by: Daniele Ceraolo Spurio Signed-off-by: Michał Winiarski Cc: Daniele Ceraolo Spurio Cc: Lukasz Fiedorowicz Cc: Michal Wajdeczko Reviewed-by: Daniele Ceraolo Spurio Signed-off-by: Chris Wilson Link: https://patchwork.freedesktop.org/patch/msgid/20200708100843.297655-1-michal@hardline.pl --- drivers/gpu/drm/i915/gt/uc/intel_guc.c | 23 +++++++-------------- drivers/gpu/drm/i915/gt/uc/intel_uc_debugfs.c | 29 +++++++++++++++++++++++++++ 2 files changed, 36 insertions(+), 16 deletions(-) diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc.c b/drivers/gpu/drm/i915/gt/uc/intel_guc.c index 446a41946f56..861657897c0f 100644 --- a/drivers/gpu/drm/i915/gt/uc/intel_guc.c +++ b/drivers/gpu/drm/i915/gt/uc/intel_guc.c @@ -733,28 +733,19 @@ int intel_guc_allocate_and_map_vma(struct intel_guc *guc, u32 size, */ void intel_guc_load_status(struct intel_guc *guc, struct drm_printer *p) { - struct intel_uc *uc = container_of(guc, struct intel_uc, guc); struct intel_gt *gt = guc_to_gt(guc); struct intel_uncore *uncore = gt->uncore; intel_wakeref_t wakeref; - drm_printf(p, "[guc] supported:%s wanted:%s used:%s\n", - yesno(intel_uc_supports_guc(uc)), - yesno(intel_uc_wants_guc(uc)), - yesno(intel_uc_uses_guc(uc))); - drm_printf(p, "[huc] supported:%s wanted:%s used:%s\n", - yesno(intel_uc_supports_huc(uc)), - yesno(intel_uc_wants_huc(uc)), - yesno(intel_uc_uses_huc(uc))); - drm_printf(p, "[submission] supported:%s wanted:%s used:%s\n", - yesno(intel_uc_supports_guc_submission(uc)), - yesno(intel_uc_wants_guc_submission(uc)), - yesno(intel_uc_uses_guc_submission(uc))); - - if (!intel_guc_is_supported(guc) || !intel_guc_is_wanted(guc)) + if (!intel_guc_is_supported(guc)) { + drm_printf(p, "GuC not supported\n"); return; + } - drm_puts(p, "\n"); + if (!intel_guc_is_wanted(guc)) { + drm_printf(p, "GuC disabled\n"); + return; + } intel_uc_fw_dump(&guc->fw, p); diff --git a/drivers/gpu/drm/i915/gt/uc/intel_uc_debugfs.c b/drivers/gpu/drm/i915/gt/uc/intel_uc_debugfs.c index 9d16b784aa0d..089d98662f46 100644 --- a/drivers/gpu/drm/i915/gt/uc/intel_uc_debugfs.c +++ b/drivers/gpu/drm/i915/gt/uc/intel_uc_debugfs.c @@ -4,14 +4,41 @@ */ #include +#include +#include "gt/debugfs_gt.h" #include "intel_guc_debugfs.h" #include "intel_huc_debugfs.h" #include "intel_uc.h" #include "intel_uc_debugfs.h" +static int uc_usage_show(struct seq_file *m, void *data) +{ + struct intel_uc *uc = m->private; + struct drm_printer p = drm_seq_file_printer(m); + + drm_printf(&p, "[guc] supported:%s wanted:%s used:%s\n", + yesno(intel_uc_supports_guc(uc)), + yesno(intel_uc_wants_guc(uc)), + yesno(intel_uc_uses_guc(uc))); + drm_printf(&p, "[huc] supported:%s wanted:%s used:%s\n", + yesno(intel_uc_supports_huc(uc)), + yesno(intel_uc_wants_huc(uc)), + yesno(intel_uc_uses_huc(uc))); + drm_printf(&p, "[submission] supported:%s wanted:%s used:%s\n", + yesno(intel_uc_supports_guc_submission(uc)), + yesno(intel_uc_wants_guc_submission(uc)), + yesno(intel_uc_uses_guc_submission(uc))); + + return 0; +} +DEFINE_GT_DEBUGFS_ATTRIBUTE(uc_usage); + void intel_uc_debugfs_register(struct intel_uc *uc, struct dentry *gt_root) { + static const struct debugfs_gt_file files[] = { + { "usage", &uc_usage_fops, NULL }, + }; struct dentry *root; if (!gt_root) @@ -25,6 +52,8 @@ void intel_uc_debugfs_register(struct intel_uc *uc, struct dentry *gt_root) if (IS_ERR(root)) return; + intel_gt_debugfs_register_files(root, files, ARRAY_SIZE(files), uc); + intel_guc_debugfs_register(&uc->guc, root); intel_huc_debugfs_register(&uc->huc, root); } -- cgit v1.2.3 From 9459fd5945f66d9d2b85824fea4ce57723e04ab8 Mon Sep 17 00:00:00 2001 From: Michał Winiarski Date: Wed, 8 Jul 2020 12:08:43 +0200 Subject: drm/i915/huc: Adjust HuC state accordingly after GuC fetch error MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Firmware "Selected" state is a transient state - we don't expect to see it after finishing driver probe, we even have asserts sprinkled over i915 to confirm whether that's the case. Unfortunately - we don't handle the transition out of "Selected" in case of GuC fetch error, leading those asserts to fire when calling "intel_huc_is_used()". v2: Add dbg print when moving HuC into error state (Daniele) Reported-by: Marcin Bernatowicz Signed-off-by: Michał Winiarski Cc: Chris Wilson Cc: Daniele Ceraolo Spurio Cc: Marcin Bernatowicz Cc: Michal Wajdeczko Reviewed-by: Daniele Ceraolo Spurio Signed-off-by: Chris Wilson Link: https://patchwork.freedesktop.org/patch/msgid/20200708100843.297655-2-michal@hardline.pl --- drivers/gpu/drm/i915/gt/uc/intel_uc.c | 11 ++++++++++- 1 file changed, 10 insertions(+), 1 deletion(-) diff --git a/drivers/gpu/drm/i915/gt/uc/intel_uc.c b/drivers/gpu/drm/i915/gt/uc/intel_uc.c index 1c2d6358826c..d6f55f70889d 100644 --- a/drivers/gpu/drm/i915/gt/uc/intel_uc.c +++ b/drivers/gpu/drm/i915/gt/uc/intel_uc.c @@ -267,8 +267,17 @@ static void __uc_fetch_firmwares(struct intel_uc *uc) GEM_BUG_ON(!intel_uc_wants_guc(uc)); err = intel_uc_fw_fetch(&uc->guc.fw); - if (err) + if (err) { + /* Make sure we transition out of transient "SELECTED" state */ + if (intel_uc_wants_huc(uc)) { + drm_dbg(&uc_to_gt(uc)->i915->drm, + "Failed to fetch GuC: %d disabling HuC\n", err); + intel_uc_fw_change_status(&uc->huc.fw, + INTEL_UC_FIRMWARE_ERROR); + } + return; + } if (intel_uc_wants_huc(uc)) intel_uc_fw_fetch(&uc->huc.fw); -- cgit v1.2.3 From 5a2ad99bcedf0ebbfb29e5d7a9764aae301ac64b Mon Sep 17 00:00:00 2001 From: Manasi Navare Date: Wed, 1 Jul 2020 15:10:51 -0700 Subject: drm/i915/dp: Helper for checking DDI_BUF_CTL Idle status MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Modify the helper to add a fixed delay or poll with timeout based on platform specification to check for either Idle bit set (DDI_BUF_CTL is idle for disable case) v2: * Use 2 separate functions or idle and active (Ville) v3: * Change the timeout to 16usecs (Ville) v4: * Change the timeout 8, follow spec (Ville) Cc: Ville Syrjälä Cc: Imre Deak Signed-off-by: Manasi Navare Reviewed-by: Ville Syrjälä Link: https://patchwork.freedesktop.org/patch/msgid/20200701221052.8946-1-manasi.d.navare@intel.com --- drivers/gpu/drm/i915/display/intel_ddi.c | 17 ++++++++--------- 1 file changed, 8 insertions(+), 9 deletions(-) diff --git a/drivers/gpu/drm/i915/display/intel_ddi.c b/drivers/gpu/drm/i915/display/intel_ddi.c index 583170e73881..2120d6da3fcc 100644 --- a/drivers/gpu/drm/i915/display/intel_ddi.c +++ b/drivers/gpu/drm/i915/display/intel_ddi.c @@ -1184,16 +1184,15 @@ static void intel_prepare_hdmi_ddi_buffers(struct intel_encoder *encoder, static void intel_wait_ddi_buf_idle(struct drm_i915_private *dev_priv, enum port port) { - i915_reg_t reg = DDI_BUF_CTL(port); - int i; - - for (i = 0; i < 16; i++) { - udelay(1); - if (intel_de_read(dev_priv, reg) & DDI_BUF_IS_IDLE) - return; + if (IS_BROXTON(dev_priv)) { + udelay(16); + return; } - drm_err(&dev_priv->drm, "Timeout waiting for DDI BUF %c idle bit\n", - port_name(port)); + + if (wait_for_us((intel_de_read(dev_priv, DDI_BUF_CTL(port)) & + DDI_BUF_IS_IDLE), 8)) + drm_err(&dev_priv->drm, "Timeout waiting for DDI BUF %c to get idle\n", + port_name(port)); } static u32 hsw_pll_to_ddi_pll_sel(const struct intel_shared_dpll *pll) -- cgit v1.2.3 From e828da3028b06cba43d4d91b94cb884d8927be09 Mon Sep 17 00:00:00 2001 From: Manasi Navare Date: Wed, 1 Jul 2020 15:10:52 -0700 Subject: drm/i915/dp: Helper to check for DDI BUF status to get active MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Based on the platform, Bspec expects us to wait or poll with timeout for DDI BUF IDLE bit to be set to 0 (non idle) or get active after enabling DDI_BUF_CTL. v2: * Based on platform, fixed delay or poll (Ville) * Use a helper to do this (Imre, Ville) v3: * Add a new function _active for DDI BUF CTL to be non idle (Ville) v4: * Use the timeout for GLK (Ville) v5: * Add bspec quote, change timeout to 500us (Ville) Cc: Ville Syrjälä Cc: Imre Deak Signed-off-by: Manasi Navare Reviewed-by: Ville Syrjälä Link: https://patchwork.freedesktop.org/patch/msgid/20200701221052.8946-2-manasi.d.navare@intel.com --- drivers/gpu/drm/i915/display/intel_ddi.c | 17 ++++++++++++++++- 1 file changed, 16 insertions(+), 1 deletion(-) diff --git a/drivers/gpu/drm/i915/display/intel_ddi.c b/drivers/gpu/drm/i915/display/intel_ddi.c index 2120d6da3fcc..5773ebefffc7 100644 --- a/drivers/gpu/drm/i915/display/intel_ddi.c +++ b/drivers/gpu/drm/i915/display/intel_ddi.c @@ -1195,6 +1195,21 @@ static void intel_wait_ddi_buf_idle(struct drm_i915_private *dev_priv, port_name(port)); } +static void intel_wait_ddi_buf_active(struct drm_i915_private *dev_priv, + enum port port) +{ + /* Wait > 518 usecs for DDI_BUF_CTL to be non idle */ + if (INTEL_GEN(dev_priv) < 10 && !IS_GEMINILAKE(dev_priv)) { + usleep_range(518, 1000); + return; + } + + if (wait_for_us(!(intel_de_read(dev_priv, DDI_BUF_CTL(port)) & + DDI_BUF_IS_IDLE), 500)) + drm_err(&dev_priv->drm, "Timeout waiting for DDI BUF %c to get active\n", + port_name(port)); +} + static u32 hsw_pll_to_ddi_pll_sel(const struct intel_shared_dpll *pll) { switch (pll->info->id) { @@ -4017,7 +4032,7 @@ static void intel_ddi_prepare_link_retrain(struct intel_dp *intel_dp) intel_de_write(dev_priv, DDI_BUF_CTL(port), intel_dp->DP); intel_de_posting_read(dev_priv, DDI_BUF_CTL(port)); - udelay(600); + intel_wait_ddi_buf_active(dev_priv, port); } static void intel_ddi_set_link_train(struct intel_dp *intel_dp, -- cgit v1.2.3 From fdeb6d02686f0ca88c12a4e73c926eab8f728f2a Mon Sep 17 00:00:00 2001 From: Daniele Ceraolo Spurio Date: Tue, 7 Jul 2020 17:39:44 -0700 Subject: drm/i915: Convert device_info to uncore/de_read Use intel__read instead of I915_READ to read the informational registers. Extended from an original sseu-only patch by Sandeep. Signed-off-by: Daniele Ceraolo Spurio Cc: Tvrtko Ursulin Cc: Andi Shyti Cc: Venkata Sandeep Dhanalakota Reviewed-by: Tvrtko Ursulin Signed-off-by: Chris Wilson Link: https://patchwork.freedesktop.org/patch/msgid/20200708003952.21831-2-daniele.ceraolospurio@intel.com --- drivers/gpu/drm/i915/intel_device_info.c | 77 +++++++++++++++++++------------- 1 file changed, 47 insertions(+), 30 deletions(-) diff --git a/drivers/gpu/drm/i915/intel_device_info.c b/drivers/gpu/drm/i915/intel_device_info.c index 544ac61fbc36..c27a56aff5de 100644 --- a/drivers/gpu/drm/i915/intel_device_info.c +++ b/drivers/gpu/drm/i915/intel_device_info.c @@ -26,6 +26,7 @@ #include #include "display/intel_cdclk.h" +#include "display/intel_de.h" #include "intel_device_info.h" #include "i915_drv.h" @@ -237,6 +238,7 @@ static void gen11_compute_sseu_info(struct sseu_dev_info *sseu, static void gen12_sseu_info_init(struct drm_i915_private *dev_priv) { struct sseu_dev_info *sseu = &RUNTIME_INFO(dev_priv)->sseu; + struct intel_uncore *uncore = &dev_priv->uncore; u8 s_en; u32 dss_en; u16 eu_en = 0; @@ -250,12 +252,14 @@ static void gen12_sseu_info_init(struct drm_i915_private *dev_priv) */ intel_sseu_set_info(sseu, 1, 6, 16); - s_en = I915_READ(GEN11_GT_SLICE_ENABLE) & GEN11_GT_S_ENA_MASK; + s_en = intel_uncore_read(uncore, GEN11_GT_SLICE_ENABLE) & + GEN11_GT_S_ENA_MASK; - dss_en = I915_READ(GEN12_GT_DSS_ENABLE); + dss_en = intel_uncore_read(uncore, GEN12_GT_DSS_ENABLE); /* one bit per pair of EUs */ - eu_en_fuse = ~(I915_READ(GEN11_EU_DISABLE) & GEN11_EU_DIS_MASK); + eu_en_fuse = ~(intel_uncore_read(uncore, GEN11_EU_DISABLE) & + GEN11_EU_DIS_MASK); for (eu = 0; eu < sseu->max_eus_per_subslice / 2; eu++) if (eu_en_fuse & BIT(eu)) eu_en |= BIT(eu * 2) | BIT(eu * 2 + 1); @@ -269,6 +273,7 @@ static void gen12_sseu_info_init(struct drm_i915_private *dev_priv) static void gen11_sseu_info_init(struct drm_i915_private *dev_priv) { struct sseu_dev_info *sseu = &RUNTIME_INFO(dev_priv)->sseu; + struct intel_uncore *uncore = &dev_priv->uncore; u8 s_en; u32 ss_en; u8 eu_en; @@ -278,9 +283,12 @@ static void gen11_sseu_info_init(struct drm_i915_private *dev_priv) else intel_sseu_set_info(sseu, 1, 8, 8); - s_en = I915_READ(GEN11_GT_SLICE_ENABLE) & GEN11_GT_S_ENA_MASK; - ss_en = ~I915_READ(GEN11_GT_SUBSLICE_DISABLE); - eu_en = ~(I915_READ(GEN11_EU_DISABLE) & GEN11_EU_DIS_MASK); + s_en = intel_uncore_read(uncore, GEN11_GT_SLICE_ENABLE) & + GEN11_GT_S_ENA_MASK; + ss_en = ~intel_uncore_read(uncore, GEN11_GT_SUBSLICE_DISABLE); + + eu_en = ~(intel_uncore_read(uncore, GEN11_EU_DISABLE) & + GEN11_EU_DIS_MASK); gen11_compute_sseu_info(sseu, s_en, ss_en, eu_en); @@ -292,8 +300,9 @@ static void gen11_sseu_info_init(struct drm_i915_private *dev_priv) static void gen10_sseu_info_init(struct drm_i915_private *dev_priv) { + struct intel_uncore *uncore = &dev_priv->uncore; struct sseu_dev_info *sseu = &RUNTIME_INFO(dev_priv)->sseu; - const u32 fuse2 = I915_READ(GEN8_FUSE2); + const u32 fuse2 = intel_uncore_read(uncore, GEN8_FUSE2); int s, ss; const int eu_mask = 0xff; u32 subslice_mask, eu_en; @@ -304,26 +313,26 @@ static void gen10_sseu_info_init(struct drm_i915_private *dev_priv) GEN10_F2_S_ENA_SHIFT; /* Slice0 */ - eu_en = ~I915_READ(GEN8_EU_DISABLE0); + eu_en = ~intel_uncore_read(uncore, GEN8_EU_DISABLE0); for (ss = 0; ss < sseu->max_subslices; ss++) sseu_set_eus(sseu, 0, ss, (eu_en >> (8 * ss)) & eu_mask); /* Slice1 */ sseu_set_eus(sseu, 1, 0, (eu_en >> 24) & eu_mask); - eu_en = ~I915_READ(GEN8_EU_DISABLE1); + eu_en = ~intel_uncore_read(uncore, GEN8_EU_DISABLE1); sseu_set_eus(sseu, 1, 1, eu_en & eu_mask); /* Slice2 */ sseu_set_eus(sseu, 2, 0, (eu_en >> 8) & eu_mask); sseu_set_eus(sseu, 2, 1, (eu_en >> 16) & eu_mask); /* Slice3 */ sseu_set_eus(sseu, 3, 0, (eu_en >> 24) & eu_mask); - eu_en = ~I915_READ(GEN8_EU_DISABLE2); + eu_en = ~intel_uncore_read(uncore, GEN8_EU_DISABLE2); sseu_set_eus(sseu, 3, 1, eu_en & eu_mask); /* Slice4 */ sseu_set_eus(sseu, 4, 0, (eu_en >> 8) & eu_mask); sseu_set_eus(sseu, 4, 1, (eu_en >> 16) & eu_mask); /* Slice5 */ sseu_set_eus(sseu, 5, 0, (eu_en >> 24) & eu_mask); - eu_en = ~I915_READ(GEN10_EU_DISABLE3); + eu_en = ~intel_uncore_read(uncore, GEN10_EU_DISABLE3); sseu_set_eus(sseu, 5, 1, eu_en & eu_mask); subslice_mask = (1 << 4) - 1; @@ -372,7 +381,7 @@ static void cherryview_sseu_info_init(struct drm_i915_private *dev_priv) u32 fuse; u8 subslice_mask = 0; - fuse = I915_READ(CHV_FUSE_GT); + fuse = intel_uncore_read(&dev_priv->uncore, CHV_FUSE_GT); sseu->slice_mask = BIT(0); intel_sseu_set_info(sseu, 1, 2, 8); @@ -425,11 +434,12 @@ static void gen9_sseu_info_init(struct drm_i915_private *dev_priv) { struct intel_device_info *info = mkwrite_device_info(dev_priv); struct sseu_dev_info *sseu = &RUNTIME_INFO(dev_priv)->sseu; + struct intel_uncore *uncore = &dev_priv->uncore; int s, ss; u32 fuse2, eu_disable, subslice_mask; const u8 eu_mask = 0xff; - fuse2 = I915_READ(GEN8_FUSE2); + fuse2 = intel_uncore_read(uncore, GEN8_FUSE2); sseu->slice_mask = (fuse2 & GEN8_F2_S_ENA_MASK) >> GEN8_F2_S_ENA_SHIFT; /* BXT has a single slice and at most 3 subslices. */ @@ -455,7 +465,7 @@ static void gen9_sseu_info_init(struct drm_i915_private *dev_priv) intel_sseu_set_subslices(sseu, s, subslice_mask); - eu_disable = I915_READ(GEN9_EU_DISABLE(s)); + eu_disable = intel_uncore_read(uncore, GEN9_EU_DISABLE(s)); for (ss = 0; ss < sseu->max_subslices; ss++) { int eu_per_ss; u8 eu_disabled_mask; @@ -528,10 +538,12 @@ static void gen9_sseu_info_init(struct drm_i915_private *dev_priv) static void bdw_sseu_info_init(struct drm_i915_private *dev_priv) { struct sseu_dev_info *sseu = &RUNTIME_INFO(dev_priv)->sseu; + struct intel_uncore *uncore = &dev_priv->uncore; int s, ss; u32 fuse2, subslice_mask, eu_disable[3]; /* s_max */ + u32 eu_disable0, eu_disable1, eu_disable2; - fuse2 = I915_READ(GEN8_FUSE2); + fuse2 = intel_uncore_read(uncore, GEN8_FUSE2); sseu->slice_mask = (fuse2 & GEN8_F2_S_ENA_MASK) >> GEN8_F2_S_ENA_SHIFT; intel_sseu_set_info(sseu, 3, 3, 8); @@ -542,13 +554,15 @@ static void bdw_sseu_info_init(struct drm_i915_private *dev_priv) subslice_mask = GENMASK(sseu->max_subslices - 1, 0); subslice_mask &= ~((fuse2 & GEN8_F2_SS_DIS_MASK) >> GEN8_F2_SS_DIS_SHIFT); - - eu_disable[0] = I915_READ(GEN8_EU_DISABLE0) & GEN8_EU_DIS0_S0_MASK; - eu_disable[1] = (I915_READ(GEN8_EU_DISABLE0) >> GEN8_EU_DIS0_S1_SHIFT) | - ((I915_READ(GEN8_EU_DISABLE1) & GEN8_EU_DIS1_S1_MASK) << + eu_disable0 = intel_uncore_read(uncore, GEN8_EU_DISABLE0); + eu_disable1 = intel_uncore_read(uncore, GEN8_EU_DISABLE1); + eu_disable2 = intel_uncore_read(uncore, GEN8_EU_DISABLE2); + eu_disable[0] = eu_disable0 & GEN8_EU_DIS0_S0_MASK; + eu_disable[1] = (eu_disable0 >> GEN8_EU_DIS0_S1_SHIFT) | + ((eu_disable1 & GEN8_EU_DIS1_S1_MASK) << (32 - GEN8_EU_DIS0_S1_SHIFT)); - eu_disable[2] = (I915_READ(GEN8_EU_DISABLE1) >> GEN8_EU_DIS1_S2_SHIFT) | - ((I915_READ(GEN8_EU_DISABLE2) & GEN8_EU_DIS2_S2_MASK) << + eu_disable[2] = (eu_disable1 >> GEN8_EU_DIS1_S2_SHIFT) | + ((eu_disable2 & GEN8_EU_DIS2_S2_MASK) << (32 - GEN8_EU_DIS1_S2_SHIFT)); /* @@ -635,7 +649,7 @@ static void hsw_sseu_info_init(struct drm_i915_private *dev_priv) break; } - fuse1 = I915_READ(HSW_PAVP_FUSE1); + fuse1 = intel_uncore_read(&dev_priv->uncore, HSW_PAVP_FUSE1); switch ((fuse1 & HSW_F1_EU_DIS_MASK) >> HSW_F1_EU_DIS_SHIFT) { default: MISSING_CASE((fuse1 & HSW_F1_EU_DIS_MASK) >> @@ -675,7 +689,8 @@ static void hsw_sseu_info_init(struct drm_i915_private *dev_priv) static u32 read_reference_ts_freq(struct drm_i915_private *dev_priv) { - u32 ts_override = I915_READ(GEN9_TIMESTAMP_OVERRIDE); + u32 ts_override = intel_uncore_read(&dev_priv->uncore, + GEN9_TIMESTAMP_OVERRIDE); u32 base_freq, frac_freq; base_freq = ((ts_override & GEN9_TIMESTAMP_OVERRIDE_US_COUNTER_DIVIDER_MASK) >> @@ -738,6 +753,7 @@ static u32 gen11_get_crystal_clock_freq(struct drm_i915_private *dev_priv, static u32 read_timestamp_frequency(struct drm_i915_private *dev_priv) { + struct intel_uncore *uncore = &dev_priv->uncore; u32 f12_5_mhz = 12500000; u32 f19_2_mhz = 19200000; u32 f24_mhz = 24000000; @@ -759,7 +775,7 @@ static u32 read_timestamp_frequency(struct drm_i915_private *dev_priv) */ return f12_5_mhz; } else if (INTEL_GEN(dev_priv) <= 9) { - u32 ctc_reg = I915_READ(CTC_MODE); + u32 ctc_reg = intel_uncore_read(uncore, CTC_MODE); u32 freq = 0; if ((ctc_reg & CTC_SOURCE_PARAMETER_MASK) == CTC_SOURCE_DIVIDE_LOGIC) { @@ -777,7 +793,7 @@ static u32 read_timestamp_frequency(struct drm_i915_private *dev_priv) return freq; } else if (INTEL_GEN(dev_priv) <= 12) { - u32 ctc_reg = I915_READ(CTC_MODE); + u32 ctc_reg = intel_uncore_read(uncore, CTC_MODE); u32 freq = 0; /* First figure out the reference frequency. There are 2 ways @@ -788,7 +804,7 @@ static u32 read_timestamp_frequency(struct drm_i915_private *dev_priv) if ((ctc_reg & CTC_SOURCE_PARAMETER_MASK) == CTC_SOURCE_DIVIDE_LOGIC) { freq = read_reference_ts_freq(dev_priv); } else { - u32 rpm_config_reg = I915_READ(RPM_CONFIG0); + u32 rpm_config_reg = intel_uncore_read(uncore, RPM_CONFIG0); if (INTEL_GEN(dev_priv) <= 10) freq = gen10_get_crystal_clock_freq(dev_priv, @@ -967,8 +983,8 @@ void intel_device_info_runtime_init(struct drm_i915_private *dev_priv) if (HAS_DISPLAY(dev_priv) && IS_GEN_RANGE(dev_priv, 7, 8) && HAS_PCH_SPLIT(dev_priv)) { - u32 fuse_strap = I915_READ(FUSE_STRAP); - u32 sfuse_strap = I915_READ(SFUSE_STRAP); + u32 fuse_strap = intel_de_read(dev_priv, FUSE_STRAP); + u32 sfuse_strap = intel_de_read(dev_priv, SFUSE_STRAP); /* * SFUSE_STRAP is supposed to have a bit signalling the display @@ -993,7 +1009,7 @@ void intel_device_info_runtime_init(struct drm_i915_private *dev_priv) info->cpu_transcoder_mask &= ~BIT(TRANSCODER_C); } } else if (HAS_DISPLAY(dev_priv) && INTEL_GEN(dev_priv) >= 9) { - u32 dfsm = I915_READ(SKL_DFSM); + u32 dfsm = intel_de_read(dev_priv, SKL_DFSM); if (dfsm & SKL_DFSM_PIPE_A_DISABLE) { info->pipe_mask &= ~BIT(PIPE_A); @@ -1083,6 +1099,7 @@ void intel_driver_caps_print(const struct intel_driver_caps *caps, void intel_device_info_init_mmio(struct drm_i915_private *dev_priv) { struct intel_device_info *info = mkwrite_device_info(dev_priv); + struct intel_uncore *uncore = &dev_priv->uncore; unsigned int logical_vdbox = 0; unsigned int i; u32 media_fuse; @@ -1092,7 +1109,7 @@ void intel_device_info_init_mmio(struct drm_i915_private *dev_priv) if (INTEL_GEN(dev_priv) < 11) return; - media_fuse = ~I915_READ(GEN11_GT_VEBOX_VDBOX_DISABLE); + media_fuse = ~intel_uncore_read(uncore, GEN11_GT_VEBOX_VDBOX_DISABLE); vdbox_mask = media_fuse & GEN11_GT_VDBOX_DISABLE_MASK; vebox_mask = (media_fuse & GEN11_GT_VEBOX_DISABLE_MASK) >> -- cgit v1.2.3 From 242613af557fbb3842c3e496ffca232103955bc2 Mon Sep 17 00:00:00 2001 From: Daniele Ceraolo Spurio Date: Tue, 7 Jul 2020 17:39:45 -0700 Subject: drm/i915: Use the gt in HAS_ENGINE A follow up patch will move the engine mask under the gt structure, so get ready for that. v2: switch the remaining gvt case using dev_priv->gt to gvt->gt (Chris) Signed-off-by: Daniele Ceraolo Spurio Cc: Tvrtko Ursulin Cc: Andi Shyti Cc: Chris Wilson Reviewed-by: Tvrtko Ursulin #v1 Signed-off-by: Chris Wilson Link: https://patchwork.freedesktop.org/patch/msgid/20200708003952.21831-3-daniele.ceraolospurio@intel.com --- drivers/gpu/drm/i915/gt/intel_engine_cs.c | 2 +- drivers/gpu/drm/i915/gt/intel_gt_irq.c | 2 +- drivers/gpu/drm/i915/gt/uc/intel_guc_ads.c | 7 ++++--- drivers/gpu/drm/i915/gvt/handlers.c | 2 +- drivers/gpu/drm/i915/gvt/interrupt.c | 2 +- drivers/gpu/drm/i915/gvt/mmio_context.c | 2 +- drivers/gpu/drm/i915/i915_drv.c | 2 +- drivers/gpu/drm/i915/i915_drv.h | 15 ++++++++------- drivers/gpu/drm/i915/intel_device_info.c | 13 +++++++------ drivers/gpu/drm/i915/intel_pm.c | 2 +- drivers/gpu/drm/i915/intel_uncore.c | 16 +++++++++------- drivers/gpu/drm/i915/intel_uncore.h | 4 +++- 12 files changed, 38 insertions(+), 31 deletions(-) diff --git a/drivers/gpu/drm/i915/gt/intel_engine_cs.c b/drivers/gpu/drm/i915/gt/intel_engine_cs.c index 7bf2f76212f0..be92d1ef9aa9 100644 --- a/drivers/gpu/drm/i915/gt/intel_engine_cs.c +++ b/drivers/gpu/drm/i915/gt/intel_engine_cs.c @@ -473,7 +473,7 @@ int intel_engines_init_mmio(struct intel_gt *gt) return -ENODEV; for (i = 0; i < ARRAY_SIZE(intel_engines); i++) { - if (!HAS_ENGINE(i915, i)) + if (!HAS_ENGINE(gt, i)) continue; err = intel_engine_setup(gt, i); diff --git a/drivers/gpu/drm/i915/gt/intel_gt_irq.c b/drivers/gpu/drm/i915/gt/intel_gt_irq.c index 0cc7dd54f4f9..e1964cf40fd6 100644 --- a/drivers/gpu/drm/i915/gt/intel_gt_irq.c +++ b/drivers/gpu/drm/i915/gt/intel_gt_irq.c @@ -457,7 +457,7 @@ void gen5_gt_irq_postinstall(struct intel_gt *gt) * RPS interrupts will get enabled/disabled on demand when RPS * itself is enabled/disabled. */ - if (HAS_ENGINE(gt->i915, VECS0)) { + if (HAS_ENGINE(gt, VECS0)) { pm_irqs |= PM_VEBOX_USER_INTERRUPT; gt->pm_ier |= PM_VEBOX_USER_INTERRUPT; } diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_ads.c b/drivers/gpu/drm/i915/gt/uc/intel_guc_ads.c index 101728006ae9..fbdd6b0677db 100644 --- a/drivers/gpu/drm/i915/gt/uc/intel_guc_ads.c +++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_ads.c @@ -67,7 +67,8 @@ struct __guc_ads_blob { static void __guc_ads_init(struct intel_guc *guc) { - struct drm_i915_private *dev_priv = guc_to_gt(guc)->i915; + struct intel_gt *gt = guc_to_gt(guc); + struct drm_i915_private *dev_priv = gt->i915; struct __guc_ads_blob *blob = guc->ads_blob; const u32 skipped_size = LRC_PPHWSP_SZ * PAGE_SIZE + LR_HW_CONTEXT_SIZE; u32 base; @@ -103,8 +104,8 @@ static void __guc_ads_init(struct intel_guc *guc) blob->system_info.rcs_enabled = 1; blob->system_info.bcs_enabled = 1; - blob->system_info.vdbox_enable_mask = VDBOX_MASK(dev_priv); - blob->system_info.vebox_enable_mask = VEBOX_MASK(dev_priv); + blob->system_info.vdbox_enable_mask = VDBOX_MASK(gt); + blob->system_info.vebox_enable_mask = VEBOX_MASK(gt); blob->system_info.vdbox_sfc_support_mask = RUNTIME_INFO(dev_priv)->vdbox_sfc_access; base = intel_guc_ggtt_offset(guc, guc->ads_vma); diff --git a/drivers/gpu/drm/i915/gvt/handlers.c b/drivers/gpu/drm/i915/gvt/handlers.c index 26cae4846c82..686013f4a2f8 100644 --- a/drivers/gpu/drm/i915/gvt/handlers.c +++ b/drivers/gpu/drm/i915/gvt/handlers.c @@ -1867,7 +1867,7 @@ static int csfe_chicken1_mmio_write(struct intel_vgpu *vgpu, MMIO_F(prefix(BLT_RING_BASE), s, f, am, rm, d, r, w); \ MMIO_F(prefix(GEN6_BSD_RING_BASE), s, f, am, rm, d, r, w); \ MMIO_F(prefix(VEBOX_RING_BASE), s, f, am, rm, d, r, w); \ - if (HAS_ENGINE(dev_priv, VCS1)) \ + if (HAS_ENGINE(gvt->gt, VCS1)) \ MMIO_F(prefix(GEN8_BSD2_RING_BASE), s, f, am, rm, d, r, w); \ } while (0) diff --git a/drivers/gpu/drm/i915/gvt/interrupt.c b/drivers/gpu/drm/i915/gvt/interrupt.c index 540017fed908..7498878e6289 100644 --- a/drivers/gpu/drm/i915/gvt/interrupt.c +++ b/drivers/gpu/drm/i915/gvt/interrupt.c @@ -540,7 +540,7 @@ static void gen8_init_irq( SET_BIT_INFO(irq, 4, VCS_MI_FLUSH_DW, INTEL_GVT_IRQ_INFO_GT1); SET_BIT_INFO(irq, 8, VCS_AS_CONTEXT_SWITCH, INTEL_GVT_IRQ_INFO_GT1); - if (HAS_ENGINE(gvt->gt->i915, VCS1)) { + if (HAS_ENGINE(gvt->gt, VCS1)) { SET_BIT_INFO(irq, 16, VCS2_MI_USER_INTERRUPT, INTEL_GVT_IRQ_INFO_GT1); SET_BIT_INFO(irq, 20, VCS2_MI_FLUSH_DW, diff --git a/drivers/gpu/drm/i915/gvt/mmio_context.c b/drivers/gpu/drm/i915/gvt/mmio_context.c index 2ccaf78f96e8..86a60bdf0818 100644 --- a/drivers/gpu/drm/i915/gvt/mmio_context.c +++ b/drivers/gpu/drm/i915/gvt/mmio_context.c @@ -171,7 +171,7 @@ static void load_render_mocs(const struct intel_engine_cs *engine) return; for (ring_id = 0; ring_id < cnt; ring_id++) { - if (!HAS_ENGINE(engine->i915, ring_id)) + if (!HAS_ENGINE(engine->gt, ring_id)) continue; offset.reg = regs[ring_id]; diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c index 67102dc26fce..1f9c40cf10ae 100644 --- a/drivers/gpu/drm/i915/i915_drv.c +++ b/drivers/gpu/drm/i915/i915_drv.c @@ -533,7 +533,7 @@ static int i915_driver_mmio_probe(struct drm_i915_private *dev_priv) intel_device_info_init_mmio(dev_priv); - intel_uncore_prune_mmio_domains(&dev_priv->uncore); + intel_uncore_prune_engine_fw_domains(&dev_priv->uncore, &dev_priv->gt); intel_uc_init_mmio(&dev_priv->gt.uc); diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index 2c2e88d49f3e..b3968beb7048 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h @@ -1562,18 +1562,19 @@ IS_SUBPLATFORM(const struct drm_i915_private *i915, #define IS_GEN9_LP(dev_priv) (IS_GEN(dev_priv, 9) && IS_LP(dev_priv)) #define IS_GEN9_BC(dev_priv) (IS_GEN(dev_priv, 9) && !IS_LP(dev_priv)) -#define HAS_ENGINE(dev_priv, id) (INTEL_INFO(dev_priv)->engine_mask & BIT(id)) +#define __HAS_ENGINE(engine_mask, id) ((engine_mask) & BIT(id)) +#define HAS_ENGINE(gt, id) __HAS_ENGINE(INTEL_INFO((gt)->i915)->engine_mask, id) -#define ENGINE_INSTANCES_MASK(dev_priv, first, count) ({ \ +#define ENGINE_INSTANCES_MASK(gt, first, count) ({ \ unsigned int first__ = (first); \ unsigned int count__ = (count); \ - (INTEL_INFO(dev_priv)->engine_mask & \ + (INTEL_INFO((gt)->i915)->engine_mask & \ GENMASK(first__ + count__ - 1, first__)) >> first__; \ }) -#define VDBOX_MASK(dev_priv) \ - ENGINE_INSTANCES_MASK(dev_priv, VCS0, I915_MAX_VCS) -#define VEBOX_MASK(dev_priv) \ - ENGINE_INSTANCES_MASK(dev_priv, VECS0, I915_MAX_VECS) +#define VDBOX_MASK(gt) \ + ENGINE_INSTANCES_MASK(gt, VCS0, I915_MAX_VCS) +#define VEBOX_MASK(gt) \ + ENGINE_INSTANCES_MASK(gt, VECS0, I915_MAX_VECS) /* * The Gen7 cmdparser copies the scanned buffer to the ggtt for execution diff --git a/drivers/gpu/drm/i915/intel_device_info.c b/drivers/gpu/drm/i915/intel_device_info.c index c27a56aff5de..c0443afa12b9 100644 --- a/drivers/gpu/drm/i915/intel_device_info.c +++ b/drivers/gpu/drm/i915/intel_device_info.c @@ -1100,6 +1100,7 @@ void intel_device_info_init_mmio(struct drm_i915_private *dev_priv) { struct intel_device_info *info = mkwrite_device_info(dev_priv); struct intel_uncore *uncore = &dev_priv->uncore; + struct intel_gt *gt = &dev_priv->gt; unsigned int logical_vdbox = 0; unsigned int i; u32 media_fuse; @@ -1116,7 +1117,7 @@ void intel_device_info_init_mmio(struct drm_i915_private *dev_priv) GEN11_GT_VEBOX_DISABLE_SHIFT; for (i = 0; i < I915_MAX_VCS; i++) { - if (!HAS_ENGINE(dev_priv, _VCS(i))) { + if (!HAS_ENGINE(gt, _VCS(i))) { vdbox_mask &= ~BIT(i); continue; } @@ -1136,11 +1137,11 @@ void intel_device_info_init_mmio(struct drm_i915_private *dev_priv) RUNTIME_INFO(dev_priv)->vdbox_sfc_access |= BIT(i); } drm_dbg(&dev_priv->drm, "vdbox enable: %04x, instances: %04lx\n", - vdbox_mask, VDBOX_MASK(dev_priv)); - GEM_BUG_ON(vdbox_mask != VDBOX_MASK(dev_priv)); + vdbox_mask, VDBOX_MASK(gt)); + GEM_BUG_ON(vdbox_mask != VDBOX_MASK(gt)); for (i = 0; i < I915_MAX_VECS; i++) { - if (!HAS_ENGINE(dev_priv, _VECS(i))) { + if (!HAS_ENGINE(gt, _VECS(i))) { vebox_mask &= ~BIT(i); continue; } @@ -1151,6 +1152,6 @@ void intel_device_info_init_mmio(struct drm_i915_private *dev_priv) } } drm_dbg(&dev_priv->drm, "vebox enable: %04x, instances: %04lx\n", - vebox_mask, VEBOX_MASK(dev_priv)); - GEM_BUG_ON(vebox_mask != VEBOX_MASK(dev_priv)); + vebox_mask, VEBOX_MASK(gt)); + GEM_BUG_ON(vebox_mask != VEBOX_MASK(gt)); } diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c index 2d980b83a1f1..ea1f79d36f7c 100644 --- a/drivers/gpu/drm/i915/intel_pm.c +++ b/drivers/gpu/drm/i915/intel_pm.c @@ -7114,7 +7114,7 @@ static void tgl_init_clock_gating(struct drm_i915_private *dev_priv) /* This is not a WA. Enable VD HCP & MFX_ENC powergate */ for (i = 0; i < I915_MAX_VCS; i++) { - if (HAS_ENGINE(dev_priv, _VCS(i))) + if (HAS_ENGINE(&dev_priv->gt, _VCS(i))) vd_pg_enable |= VDN_HCP_POWERGATE_ENABLE(i) | VDN_MFX_POWERGATE_ENABLE(i); } diff --git a/drivers/gpu/drm/i915/intel_uncore.c b/drivers/gpu/drm/i915/intel_uncore.c index 8e2c073da1aa..83e576cff161 100644 --- a/drivers/gpu/drm/i915/intel_uncore.c +++ b/drivers/gpu/drm/i915/intel_uncore.c @@ -1529,6 +1529,8 @@ static int intel_uncore_fw_domains_init(struct intel_uncore *uncore) (ret ?: (ret = __fw_domain_init((uncore__), (id__), (set__), (ack__)))) if (INTEL_GEN(i915) >= 11) { + /* we'll prune the domains of missing engines later */ + intel_engine_mask_t emask = INTEL_INFO(i915)->engine_mask; int i; uncore->funcs.force_wake_get = fw_domains_get_with_fallback; @@ -1541,7 +1543,7 @@ static int intel_uncore_fw_domains_init(struct intel_uncore *uncore) FORCEWAKE_ACK_BLITTER_GEN9); for (i = 0; i < I915_MAX_VCS; i++) { - if (!HAS_ENGINE(i915, _VCS(i))) + if (!__HAS_ENGINE(emask, _VCS(i))) continue; fw_domain_init(uncore, FW_DOMAIN_ID_MEDIA_VDBOX0 + i, @@ -1549,7 +1551,7 @@ static int intel_uncore_fw_domains_init(struct intel_uncore *uncore) FORCEWAKE_ACK_MEDIA_VDBOX_GEN11(i)); } for (i = 0; i < I915_MAX_VECS; i++) { - if (!HAS_ENGINE(i915, _VECS(i))) + if (!__HAS_ENGINE(emask, _VECS(i))) continue; fw_domain_init(uncore, FW_DOMAIN_ID_MEDIA_VEBOX0 + i, @@ -1844,20 +1846,20 @@ out_mmio_cleanup: * the forcewake domains. Prune them, to make sure they only reference existing * engines. */ -void intel_uncore_prune_mmio_domains(struct intel_uncore *uncore) +void intel_uncore_prune_engine_fw_domains(struct intel_uncore *uncore, + struct intel_gt *gt) { - struct drm_i915_private *i915 = uncore->i915; enum forcewake_domains fw_domains = uncore->fw_domains; enum forcewake_domain_id domain_id; int i; - if (!intel_uncore_has_forcewake(uncore) || INTEL_GEN(i915) < 11) + if (!intel_uncore_has_forcewake(uncore) || INTEL_GEN(uncore->i915) < 11) return; for (i = 0; i < I915_MAX_VCS; i++) { domain_id = FW_DOMAIN_ID_MEDIA_VDBOX0 + i; - if (HAS_ENGINE(i915, _VCS(i))) + if (HAS_ENGINE(gt, _VCS(i))) continue; if (fw_domains & BIT(domain_id)) @@ -1867,7 +1869,7 @@ void intel_uncore_prune_mmio_domains(struct intel_uncore *uncore) for (i = 0; i < I915_MAX_VECS; i++) { domain_id = FW_DOMAIN_ID_MEDIA_VEBOX0 + i; - if (HAS_ENGINE(i915, _VECS(i))) + if (HAS_ENGINE(gt, _VECS(i))) continue; if (fw_domains & BIT(domain_id)) diff --git a/drivers/gpu/drm/i915/intel_uncore.h b/drivers/gpu/drm/i915/intel_uncore.h index 8d3aa8b9acf9..c4b22d9d0b45 100644 --- a/drivers/gpu/drm/i915/intel_uncore.h +++ b/drivers/gpu/drm/i915/intel_uncore.h @@ -35,6 +35,7 @@ struct drm_i915_private; struct intel_runtime_pm; struct intel_uncore; +struct intel_gt; struct intel_uncore_mmio_debug { spinlock_t lock; /** lock is also taken in irq contexts. */ @@ -186,7 +187,8 @@ intel_uncore_mmio_debug_init_early(struct intel_uncore_mmio_debug *mmio_debug); void intel_uncore_init_early(struct intel_uncore *uncore, struct drm_i915_private *i915); int intel_uncore_init_mmio(struct intel_uncore *uncore); -void intel_uncore_prune_mmio_domains(struct intel_uncore *uncore); +void intel_uncore_prune_engine_fw_domains(struct intel_uncore *uncore, + struct intel_gt *gt); bool intel_uncore_unclaimed_mmio(struct intel_uncore *uncore); bool intel_uncore_arm_unclaimed_mmio_detection(struct intel_uncore *uncore); void intel_uncore_fini_mmio(struct intel_uncore *uncore); -- cgit v1.2.3 From f6beb38100778b7b2b18900b039dcb5118714a0a Mon Sep 17 00:00:00 2001 From: Daniele Ceraolo Spurio Date: Tue, 7 Jul 2020 17:39:46 -0700 Subject: drm/i915: Move engine-related mmio init to engines_init_mmio All the info we read in intel_device_info_init_mmio are engine-related and since we already have an engine_init_mmio function we can just perform the operations from there. v2: clarify comment about forcewake requirements and pruning (Chris) Signed-off-by: Daniele Ceraolo Spurio Cc: Tvrtko Ursulin Cc: Andi Shyti Cc: Chris Wilson Reviewed-by: Tvrtko Ursulin #v1 Signed-off-by: Chris Wilson Link: https://patchwork.freedesktop.org/patch/msgid/20200708003952.21831-4-daniele.ceraolospurio@intel.com --- drivers/gpu/drm/i915/gt/intel_engine_cs.c | 76 ++++++++++++++++++++++++++++++- drivers/gpu/drm/i915/i915_drv.c | 4 -- drivers/gpu/drm/i915/intel_device_info.c | 66 --------------------------- drivers/gpu/drm/i915/intel_device_info.h | 2 - 4 files changed, 75 insertions(+), 73 deletions(-) diff --git a/drivers/gpu/drm/i915/gt/intel_engine_cs.c b/drivers/gpu/drm/i915/gt/intel_engine_cs.c index be92d1ef9aa9..04114de15fe3 100644 --- a/drivers/gpu/drm/i915/gt/intel_engine_cs.c +++ b/drivers/gpu/drm/i915/gt/intel_engine_cs.c @@ -450,6 +450,78 @@ void intel_engines_free(struct intel_gt *gt) } } +/* + * Determine which engines are fused off in our particular hardware. + * Note that we have a catch-22 situation where we need to be able to access + * the blitter forcewake domain to read the engine fuses, but at the same time + * we need to know which engines are available on the system to know which + * forcewake domains are present. We solve this by intializing the forcewake + * domains based on the full engine mask in the platform capabilities before + * calling this function and pruning the domains for fused-off engines + * afterwards. + */ +static intel_engine_mask_t init_engine_mask(struct intel_gt *gt) +{ + struct drm_i915_private *i915 = gt->i915; + struct intel_device_info *info = mkwrite_device_info(i915); + struct intel_uncore *uncore = gt->uncore; + unsigned int logical_vdbox = 0; + unsigned int i; + u32 media_fuse; + u16 vdbox_mask; + u16 vebox_mask; + + if (INTEL_GEN(i915) < 11) + return info->engine_mask; + + media_fuse = ~intel_uncore_read(uncore, GEN11_GT_VEBOX_VDBOX_DISABLE); + + vdbox_mask = media_fuse & GEN11_GT_VDBOX_DISABLE_MASK; + vebox_mask = (media_fuse & GEN11_GT_VEBOX_DISABLE_MASK) >> + GEN11_GT_VEBOX_DISABLE_SHIFT; + + for (i = 0; i < I915_MAX_VCS; i++) { + if (!HAS_ENGINE(gt, _VCS(i))) { + vdbox_mask &= ~BIT(i); + continue; + } + + if (!(BIT(i) & vdbox_mask)) { + info->engine_mask &= ~BIT(_VCS(i)); + drm_dbg(&i915->drm, "vcs%u fused off\n", i); + continue; + } + + /* + * In Gen11, only even numbered logical VDBOXes are + * hooked up to an SFC (Scaler & Format Converter) unit. + * In TGL each VDBOX has access to an SFC. + */ + if (INTEL_GEN(i915) >= 12 || logical_vdbox++ % 2 == 0) + RUNTIME_INFO(i915)->vdbox_sfc_access |= BIT(i); + } + drm_dbg(&i915->drm, "vdbox enable: %04x, instances: %04lx\n", + vdbox_mask, VDBOX_MASK(gt)); + GEM_BUG_ON(vdbox_mask != VDBOX_MASK(gt)); + + for (i = 0; i < I915_MAX_VECS; i++) { + if (!HAS_ENGINE(gt, _VECS(i))) { + vebox_mask &= ~BIT(i); + continue; + } + + if (!(BIT(i) & vebox_mask)) { + info->engine_mask &= ~BIT(_VECS(i)); + drm_dbg(&i915->drm, "vecs%u fused off\n", i); + } + } + drm_dbg(&i915->drm, "vebox enable: %04x, instances: %04lx\n", + vebox_mask, VEBOX_MASK(gt)); + GEM_BUG_ON(vebox_mask != VEBOX_MASK(gt)); + + return info->engine_mask; +} + /** * intel_engines_init_mmio() - allocate and prepare the Engine Command Streamers * @gt: pointer to struct intel_gt @@ -460,7 +532,7 @@ int intel_engines_init_mmio(struct intel_gt *gt) { struct drm_i915_private *i915 = gt->i915; struct intel_device_info *device_info = mkwrite_device_info(i915); - const unsigned int engine_mask = INTEL_INFO(i915)->engine_mask; + const unsigned int engine_mask = init_engine_mask(gt); unsigned int mask = 0; unsigned int i; int err; @@ -497,6 +569,8 @@ int intel_engines_init_mmio(struct intel_gt *gt) intel_setup_engine_capabilities(gt); + intel_uncore_prune_engine_fw_domains(gt->uncore, gt); + return 0; cleanup: diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c index 1f9c40cf10ae..611287353420 100644 --- a/drivers/gpu/drm/i915/i915_drv.c +++ b/drivers/gpu/drm/i915/i915_drv.c @@ -531,10 +531,6 @@ static int i915_driver_mmio_probe(struct drm_i915_private *dev_priv) /* Try to make sure MCHBAR is enabled before poking at it */ intel_setup_mchbar(dev_priv); - intel_device_info_init_mmio(dev_priv); - - intel_uncore_prune_engine_fw_domains(&dev_priv->uncore, &dev_priv->gt); - intel_uc_init_mmio(&dev_priv->gt.uc); ret = intel_engines_init_mmio(&dev_priv->gt); diff --git a/drivers/gpu/drm/i915/intel_device_info.c b/drivers/gpu/drm/i915/intel_device_info.c index c0443afa12b9..92ebea35c752 100644 --- a/drivers/gpu/drm/i915/intel_device_info.c +++ b/drivers/gpu/drm/i915/intel_device_info.c @@ -1089,69 +1089,3 @@ void intel_driver_caps_print(const struct intel_driver_caps *caps, yesno(caps->has_logical_contexts)); drm_printf(p, "scheduler: %x\n", caps->scheduler); } - -/* - * Determine which engines are fused off in our particular hardware. Since the - * fuse register is in the blitter powerwell, we need forcewake to be ready at - * this point (but later we need to prune the forcewake domains for engines that - * are indeed fused off). - */ -void intel_device_info_init_mmio(struct drm_i915_private *dev_priv) -{ - struct intel_device_info *info = mkwrite_device_info(dev_priv); - struct intel_uncore *uncore = &dev_priv->uncore; - struct intel_gt *gt = &dev_priv->gt; - unsigned int logical_vdbox = 0; - unsigned int i; - u32 media_fuse; - u16 vdbox_mask; - u16 vebox_mask; - - if (INTEL_GEN(dev_priv) < 11) - return; - - media_fuse = ~intel_uncore_read(uncore, GEN11_GT_VEBOX_VDBOX_DISABLE); - - vdbox_mask = media_fuse & GEN11_GT_VDBOX_DISABLE_MASK; - vebox_mask = (media_fuse & GEN11_GT_VEBOX_DISABLE_MASK) >> - GEN11_GT_VEBOX_DISABLE_SHIFT; - - for (i = 0; i < I915_MAX_VCS; i++) { - if (!HAS_ENGINE(gt, _VCS(i))) { - vdbox_mask &= ~BIT(i); - continue; - } - - if (!(BIT(i) & vdbox_mask)) { - info->engine_mask &= ~BIT(_VCS(i)); - drm_dbg(&dev_priv->drm, "vcs%u fused off\n", i); - continue; - } - - /* - * In Gen11, only even numbered logical VDBOXes are - * hooked up to an SFC (Scaler & Format Converter) unit. - * In TGL each VDBOX has access to an SFC. - */ - if (INTEL_GEN(dev_priv) >= 12 || logical_vdbox++ % 2 == 0) - RUNTIME_INFO(dev_priv)->vdbox_sfc_access |= BIT(i); - } - drm_dbg(&dev_priv->drm, "vdbox enable: %04x, instances: %04lx\n", - vdbox_mask, VDBOX_MASK(gt)); - GEM_BUG_ON(vdbox_mask != VDBOX_MASK(gt)); - - for (i = 0; i < I915_MAX_VECS; i++) { - if (!HAS_ENGINE(gt, _VECS(i))) { - vebox_mask &= ~BIT(i); - continue; - } - - if (!(BIT(i) & vebox_mask)) { - info->engine_mask &= ~BIT(_VECS(i)); - drm_dbg(&dev_priv->drm, "vecs%u fused off\n", i); - } - } - drm_dbg(&dev_priv->drm, "vebox enable: %04x, instances: %04lx\n", - vebox_mask, VEBOX_MASK(gt)); - GEM_BUG_ON(vebox_mask != VEBOX_MASK(gt)); -} diff --git a/drivers/gpu/drm/i915/intel_device_info.h b/drivers/gpu/drm/i915/intel_device_info.h index 8d62b8538585..fa60fdc1d75a 100644 --- a/drivers/gpu/drm/i915/intel_device_info.h +++ b/drivers/gpu/drm/i915/intel_device_info.h @@ -250,8 +250,6 @@ void intel_device_info_print_runtime(const struct intel_runtime_info *info, void intel_device_info_print_topology(const struct sseu_dev_info *sseu, struct drm_printer *p); -void intel_device_info_init_mmio(struct drm_i915_private *dev_priv); - void intel_driver_caps_print(const struct intel_driver_caps *caps, struct drm_printer *p); -- cgit v1.2.3 From 792592e72aba4162c35cd3155d4d5b99d5fb5762 Mon Sep 17 00:00:00 2001 From: Daniele Ceraolo Spurio Date: Tue, 7 Jul 2020 17:39:47 -0700 Subject: drm/i915: Move the engine mask to intel_gt_info Since the engines belong to the GT, move the runtime-updated list of available engines to the intel_gt struct. The original mask has been renamed to indicate it contains the maximum engine list that can be found on a matching device. In preparation for other info being moved to the gt in follow up patches (sseu), introduce an intel_gt_info structure to group all gt-related runtime info. v2: s/max_engine_mask/platform_engine_mask (tvrtko), fix selftest Signed-off-by: Daniele Ceraolo Spurio Cc: Tvrtko Ursulin Cc: Andi Shyti Cc: Venkata Sandeep Dhanalakota Reviewed-by: Tvrtko Ursulin #v1 Signed-off-by: Chris Wilson Link: https://patchwork.freedesktop.org/patch/msgid/20200708003952.21831-5-daniele.ceraolospurio@intel.com --- drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c | 3 +- drivers/gpu/drm/i915/gt/intel_engine_cs.c | 13 ++++---- drivers/gpu/drm/i915/gt/intel_gt.c | 6 ++++ drivers/gpu/drm/i915/gt/intel_gt.h | 4 +++ drivers/gpu/drm/i915/gt/intel_gt_types.h | 8 +++++ drivers/gpu/drm/i915/gt/intel_reset.c | 6 ++-- drivers/gpu/drm/i915/gt/intel_ring_submission.c | 2 +- drivers/gpu/drm/i915/gt/selftest_lrc.c | 8 ++--- drivers/gpu/drm/i915/gt/uc/intel_guc_ads.c | 2 +- drivers/gpu/drm/i915/gvt/handlers.c | 2 +- drivers/gpu/drm/i915/i915_debugfs.c | 2 ++ drivers/gpu/drm/i915/i915_drv.c | 1 + drivers/gpu/drm/i915/i915_drv.h | 6 ++-- drivers/gpu/drm/i915/i915_gpu_error.c | 23 ++++++++----- drivers/gpu/drm/i915/i915_gpu_error.h | 3 ++ drivers/gpu/drm/i915/i915_pci.c | 42 ++++++++++++------------ drivers/gpu/drm/i915/intel_device_info.c | 1 - drivers/gpu/drm/i915/intel_device_info.h | 7 +--- drivers/gpu/drm/i915/intel_uncore.c | 2 +- drivers/gpu/drm/i915/selftests/i915_request.c | 2 +- drivers/gpu/drm/i915/selftests/mock_gem_device.c | 3 +- 21 files changed, 84 insertions(+), 62 deletions(-) diff --git a/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c index dd15a799f9d6..6b4ec66cb558 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c @@ -1980,8 +1980,7 @@ static int eb_submit(struct i915_execbuffer *eb, struct i915_vma *batch) static int num_vcs_engines(const struct drm_i915_private *i915) { - return hweight64(INTEL_INFO(i915)->engine_mask & - GENMASK_ULL(VCS0 + I915_MAX_VCS - 1, VCS0)); + return hweight64(VDBOX_MASK(&i915->gt)); } /* diff --git a/drivers/gpu/drm/i915/gt/intel_engine_cs.c b/drivers/gpu/drm/i915/gt/intel_engine_cs.c index 04114de15fe3..fca3c2348e5e 100644 --- a/drivers/gpu/drm/i915/gt/intel_engine_cs.c +++ b/drivers/gpu/drm/i915/gt/intel_engine_cs.c @@ -370,7 +370,7 @@ static void __setup_engine_capabilities(struct intel_engine_cs *engine) * instances. */ if ((INTEL_GEN(i915) >= 11 && - RUNTIME_INFO(i915)->vdbox_sfc_access & engine->mask) || + engine->gt->info.vdbox_sfc_access & engine->mask) || (INTEL_GEN(i915) >= 9 && engine->instance == 0)) engine->uabi_capabilities |= I915_VIDEO_AND_ENHANCE_CLASS_CAPABILITY_SFC; @@ -463,7 +463,7 @@ void intel_engines_free(struct intel_gt *gt) static intel_engine_mask_t init_engine_mask(struct intel_gt *gt) { struct drm_i915_private *i915 = gt->i915; - struct intel_device_info *info = mkwrite_device_info(i915); + struct intel_gt_info *info = >->info; struct intel_uncore *uncore = gt->uncore; unsigned int logical_vdbox = 0; unsigned int i; @@ -471,6 +471,8 @@ static intel_engine_mask_t init_engine_mask(struct intel_gt *gt) u16 vdbox_mask; u16 vebox_mask; + info->engine_mask = INTEL_INFO(i915)->platform_engine_mask; + if (INTEL_GEN(i915) < 11) return info->engine_mask; @@ -498,7 +500,7 @@ static intel_engine_mask_t init_engine_mask(struct intel_gt *gt) * In TGL each VDBOX has access to an SFC. */ if (INTEL_GEN(i915) >= 12 || logical_vdbox++ % 2 == 0) - RUNTIME_INFO(i915)->vdbox_sfc_access |= BIT(i); + gt->info.vdbox_sfc_access |= BIT(i); } drm_dbg(&i915->drm, "vdbox enable: %04x, instances: %04lx\n", vdbox_mask, VDBOX_MASK(gt)); @@ -531,7 +533,6 @@ static intel_engine_mask_t init_engine_mask(struct intel_gt *gt) int intel_engines_init_mmio(struct intel_gt *gt) { struct drm_i915_private *i915 = gt->i915; - struct intel_device_info *device_info = mkwrite_device_info(i915); const unsigned int engine_mask = init_engine_mask(gt); unsigned int mask = 0; unsigned int i; @@ -561,9 +562,9 @@ int intel_engines_init_mmio(struct intel_gt *gt) * engines. */ if (drm_WARN_ON(&i915->drm, mask != engine_mask)) - device_info->engine_mask = mask; + gt->info.engine_mask = mask; - RUNTIME_INFO(i915)->num_engines = hweight32(mask); + gt->info.num_engines = hweight32(mask); intel_gt_check_and_clear_faults(gt); diff --git a/drivers/gpu/drm/i915/gt/intel_gt.c b/drivers/gpu/drm/i915/gt/intel_gt.c index 876f78759095..6a268c6d6a6f 100644 --- a/drivers/gpu/drm/i915/gt/intel_gt.c +++ b/drivers/gpu/drm/i915/gt/intel_gt.c @@ -642,3 +642,9 @@ void intel_gt_driver_late_release(struct intel_gt *gt) intel_gt_fini_timelines(gt); intel_engines_free(gt); } + +void intel_gt_info_print(const struct intel_gt_info *info, + struct drm_printer *p) +{ + drm_printf(p, "available engines: %x\n", info->engine_mask); +} diff --git a/drivers/gpu/drm/i915/gt/intel_gt.h b/drivers/gpu/drm/i915/gt/intel_gt.h index 982957ca4e62..908fc5dea885 100644 --- a/drivers/gpu/drm/i915/gt/intel_gt.h +++ b/drivers/gpu/drm/i915/gt/intel_gt.h @@ -11,6 +11,7 @@ #include "intel_reset.h" struct drm_i915_private; +struct drm_printer; #define GT_TRACE(gt, fmt, ...) do { \ const struct intel_gt *gt__ __maybe_unused = (gt); \ @@ -72,4 +73,7 @@ static inline bool intel_gt_is_wedged(const struct intel_gt *gt) return unlikely(test_bit(I915_WEDGED, >->reset.flags)); } +void intel_gt_info_print(const struct intel_gt_info *info, + struct drm_printer *p); + #endif /* __INTEL_GT_H__ */ diff --git a/drivers/gpu/drm/i915/gt/intel_gt_types.h b/drivers/gpu/drm/i915/gt/intel_gt_types.h index 0cc1d6b185dc..bb7551867c00 100644 --- a/drivers/gpu/drm/i915/gt/intel_gt_types.h +++ b/drivers/gpu/drm/i915/gt/intel_gt_types.h @@ -109,6 +109,14 @@ struct intel_gt { struct intel_gt_buffer_pool buffer_pool; struct i915_vma *scratch; + + struct intel_gt_info { + intel_engine_mask_t engine_mask; + u8 num_engines; + + /* Media engine access to SFC per instance */ + u8 vdbox_sfc_access; + } info; }; enum intel_gt_scratch_field { diff --git a/drivers/gpu/drm/i915/gt/intel_reset.c b/drivers/gpu/drm/i915/gt/intel_reset.c index 121bf39a6f3e..46a5ceffc22f 100644 --- a/drivers/gpu/drm/i915/gt/intel_reset.c +++ b/drivers/gpu/drm/i915/gt/intel_reset.c @@ -342,7 +342,7 @@ static int gen6_reset_engines(struct intel_gt *gt, static int gen11_lock_sfc(struct intel_engine_cs *engine, u32 *hw_mask) { struct intel_uncore *uncore = engine->uncore; - u8 vdbox_sfc_access = RUNTIME_INFO(engine->i915)->vdbox_sfc_access; + u8 vdbox_sfc_access = engine->gt->info.vdbox_sfc_access; i915_reg_t sfc_forced_lock, sfc_forced_lock_ack; u32 sfc_forced_lock_bit, sfc_forced_lock_ack_bit; i915_reg_t sfc_usage; @@ -417,7 +417,7 @@ static int gen11_lock_sfc(struct intel_engine_cs *engine, u32 *hw_mask) static void gen11_unlock_sfc(struct intel_engine_cs *engine) { struct intel_uncore *uncore = engine->uncore; - u8 vdbox_sfc_access = RUNTIME_INFO(engine->i915)->vdbox_sfc_access; + u8 vdbox_sfc_access = engine->gt->info.vdbox_sfc_access; i915_reg_t sfc_forced_lock; u32 sfc_forced_lock_bit; @@ -1246,7 +1246,7 @@ void intel_gt_handle_error(struct intel_gt *gt, */ wakeref = intel_runtime_pm_get(gt->uncore->rpm); - engine_mask &= INTEL_INFO(gt->i915)->engine_mask; + engine_mask &= gt->info.engine_mask; if (flags & I915_ERROR_CAPTURE) { i915_capture_error_state(gt->i915); diff --git a/drivers/gpu/drm/i915/gt/intel_ring_submission.c b/drivers/gpu/drm/i915/gt/intel_ring_submission.c index 68a08486fc87..b09b83deecef 100644 --- a/drivers/gpu/drm/i915/gt/intel_ring_submission.c +++ b/drivers/gpu/drm/i915/gt/intel_ring_submission.c @@ -649,7 +649,7 @@ static inline int mi_set_context(struct i915_request *rq, struct drm_i915_private *i915 = engine->i915; enum intel_engine_id id; const int num_engines = - IS_HASWELL(i915) ? RUNTIME_INFO(i915)->num_engines - 1 : 0; + IS_HASWELL(i915) ? engine->gt->info.num_engines - 1 : 0; bool force_restore = false; int len; u32 *cs; diff --git a/drivers/gpu/drm/i915/gt/selftest_lrc.c b/drivers/gpu/drm/i915/gt/selftest_lrc.c index daa4aabab9a7..3fc5de961280 100644 --- a/drivers/gpu/drm/i915/gt/selftest_lrc.c +++ b/drivers/gpu/drm/i915/gt/selftest_lrc.c @@ -963,7 +963,7 @@ slice_semaphore_queue(struct intel_engine_cs *outer, goto out; if (i915_request_wait(head, 0, - 2 * RUNTIME_INFO(outer->i915)->num_engines * (count + 2) * (count + 3)) < 0) { + 2 * outer->gt->info.num_engines * (count + 2) * (count + 3)) < 0) { pr_err("Failed to slice along semaphore chain of length (%d, %d)!\n", count, n); GEM_TRACE_DUMP(); @@ -3569,8 +3569,7 @@ static int smoke_crescendo(struct preempt_smoke *smoke, unsigned int flags) } pr_info("Submitted %lu crescendo:%x requests across %d engines and %d contexts\n", - count, flags, - RUNTIME_INFO(smoke->gt->i915)->num_engines, smoke->ncontext); + count, flags, smoke->gt->info.num_engines, smoke->ncontext); return 0; } @@ -3597,8 +3596,7 @@ static int smoke_random(struct preempt_smoke *smoke, unsigned int flags) } while (count < smoke->ncontext && !__igt_timeout(end_time, NULL)); pr_info("Submitted %lu random:%x requests across %d engines and %d contexts\n", - count, flags, - RUNTIME_INFO(smoke->gt->i915)->num_engines, smoke->ncontext); + count, flags, smoke->gt->info.num_engines, smoke->ncontext); return 0; } diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_ads.c b/drivers/gpu/drm/i915/gt/uc/intel_guc_ads.c index fbdd6b0677db..c10ae1660e53 100644 --- a/drivers/gpu/drm/i915/gt/uc/intel_guc_ads.c +++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_ads.c @@ -106,7 +106,7 @@ static void __guc_ads_init(struct intel_guc *guc) blob->system_info.vdbox_enable_mask = VDBOX_MASK(gt); blob->system_info.vebox_enable_mask = VEBOX_MASK(gt); - blob->system_info.vdbox_sfc_support_mask = RUNTIME_INFO(dev_priv)->vdbox_sfc_access; + blob->system_info.vdbox_sfc_support_mask = gt->info.vdbox_sfc_access; base = intel_guc_ggtt_offset(guc, guc->ads_vma); diff --git a/drivers/gpu/drm/i915/gvt/handlers.c b/drivers/gpu/drm/i915/gvt/handlers.c index 686013f4a2f8..b58177f0858a 100644 --- a/drivers/gpu/drm/i915/gvt/handlers.c +++ b/drivers/gpu/drm/i915/gvt/handlers.c @@ -347,7 +347,7 @@ static int gdrst_mmio_write(struct intel_vgpu *vgpu, unsigned int offset, gvt_dbg_mmio("vgpu%d: request GUC Reset\n", vgpu->id); vgpu_vreg_t(vgpu, GUC_STATUS) |= GS_MIA_IN_RESET; } - engine_mask &= INTEL_INFO(vgpu->gvt->gt->i915)->engine_mask; + engine_mask &= vgpu->gvt->gt->info.engine_mask; } /* vgpu_lock already hold by emulate mmio r/w */ diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c index 94ed442910d6..41ca8ff2aa16 100644 --- a/drivers/gpu/drm/i915/i915_debugfs.c +++ b/drivers/gpu/drm/i915/i915_debugfs.c @@ -34,6 +34,7 @@ #include "gem/i915_gem_context.h" #include "gt/intel_gt_buffer_pool.h" #include "gt/intel_gt_clock_utils.h" +#include "gt/intel_gt.h" #include "gt/intel_gt_pm.h" #include "gt/intel_gt_requests.h" #include "gt/intel_reset.h" @@ -61,6 +62,7 @@ static int i915_capabilities(struct seq_file *m, void *data) intel_device_info_print_static(INTEL_INFO(i915), &p); intel_device_info_print_runtime(RUNTIME_INFO(i915), &p); + intel_gt_info_print(&i915->gt.info, &p); intel_driver_caps_print(&i915->caps, &p); kernel_param_lock(THIS_MODULE); diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c index 611287353420..67789df42be8 100644 --- a/drivers/gpu/drm/i915/i915_drv.c +++ b/drivers/gpu/drm/i915/i915_drv.c @@ -886,6 +886,7 @@ static void i915_welcome_messages(struct drm_i915_private *dev_priv) intel_device_info_print_static(INTEL_INFO(dev_priv), &p); intel_device_info_print_runtime(RUNTIME_INFO(dev_priv), &p); + intel_gt_info_print(&dev_priv->gt.info, &p); } if (IS_ENABLED(CONFIG_DRM_I915_DEBUG)) diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index b3968beb7048..21bb9f7cc452 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h @@ -1256,7 +1256,7 @@ static inline struct drm_i915_private *pdev_to_i915(struct pci_dev *pdev) /* Iterator over subset of engines selected by mask */ #define for_each_engine_masked(engine__, gt__, mask__, tmp__) \ - for ((tmp__) = (mask__) & INTEL_INFO((gt__)->i915)->engine_mask; \ + for ((tmp__) = (mask__) & (gt__)->info.engine_mask; \ (tmp__) ? \ ((engine__) = (gt__)->engine[__mask_next_bit(tmp__)]), 1 : \ 0;) @@ -1563,12 +1563,12 @@ IS_SUBPLATFORM(const struct drm_i915_private *i915, #define IS_GEN9_BC(dev_priv) (IS_GEN(dev_priv, 9) && !IS_LP(dev_priv)) #define __HAS_ENGINE(engine_mask, id) ((engine_mask) & BIT(id)) -#define HAS_ENGINE(gt, id) __HAS_ENGINE(INTEL_INFO((gt)->i915)->engine_mask, id) +#define HAS_ENGINE(gt, id) __HAS_ENGINE((gt)->info.engine_mask, id) #define ENGINE_INSTANCES_MASK(gt, first, count) ({ \ unsigned int first__ = (first); \ unsigned int count__ = (count); \ - (INTEL_INFO((gt)->i915)->engine_mask & \ + ((gt)->info.engine_mask & \ GENMASK(first__ + count__ - 1, first__)) >> first__; \ }) #define VDBOX_MASK(gt) \ diff --git a/drivers/gpu/drm/i915/i915_gpu_error.c b/drivers/gpu/drm/i915/i915_gpu_error.c index 866166ada10e..9cb9aa39c33d 100644 --- a/drivers/gpu/drm/i915/i915_gpu_error.c +++ b/drivers/gpu/drm/i915/i915_gpu_error.c @@ -42,6 +42,7 @@ #include "gem/i915_gem_context.h" #include "gem/i915_gem_lmem.h" +#include "gt/intel_gt.h" #include "gt/intel_gt_pm.h" #include "i915_drv.h" @@ -619,16 +620,15 @@ static void print_error_vma(struct drm_i915_error_state_buf *m, } static void err_print_capabilities(struct drm_i915_error_state_buf *m, - const struct intel_device_info *info, - const struct intel_runtime_info *runtime, - const struct intel_driver_caps *caps) + struct i915_gpu_coredump *error) { struct drm_printer p = i915_error_printer(m); - intel_device_info_print_static(info, &p); - intel_device_info_print_runtime(runtime, &p); - intel_device_info_print_topology(&runtime->sseu, &p); - intel_driver_caps_print(caps, &p); + intel_device_info_print_static(&error->device_info, &p); + intel_device_info_print_runtime(&error->runtime_info, &p); + intel_device_info_print_topology(&error->runtime_info.sseu, &p); + intel_gt_info_print(&error->gt->info, &p); + intel_driver_caps_print(&error->driver_caps, &p); } static void err_print_params(struct drm_i915_error_state_buf *m, @@ -798,8 +798,7 @@ static void __err_print_to_sgl(struct drm_i915_error_state_buf *m, if (error->display) intel_display_print_error_state(m, error->display); - err_print_capabilities(m, &error->device_info, &error->runtime_info, - &error->driver_caps); + err_print_capabilities(m, error); err_print_params(m, &error->params); } @@ -1630,6 +1629,11 @@ static void gt_record_regs(struct intel_gt_coredump *gt) gt->pgtbl_er = intel_uncore_read(uncore, PGTBL_ER); } +static void gt_record_info(struct intel_gt_coredump *gt) +{ + memcpy(>->info, >->_gt->info, sizeof(struct intel_gt_info)); +} + /* * Generate a semi-unique error code. The code is not meant to have meaning, The * code's only purpose is to try to prevent false duplicated bug reports by @@ -1808,6 +1812,7 @@ struct i915_gpu_coredump *i915_gpu_coredump(struct drm_i915_private *i915) return ERR_PTR(-ENOMEM); } + gt_record_info(error->gt); gt_record_engines(error->gt, compress); if (INTEL_INFO(i915)->has_gt_uc) diff --git a/drivers/gpu/drm/i915/i915_gpu_error.h b/drivers/gpu/drm/i915/i915_gpu_error.h index 76b80fbfb7e9..0220b0992808 100644 --- a/drivers/gpu/drm/i915/i915_gpu_error.h +++ b/drivers/gpu/drm/i915/i915_gpu_error.h @@ -15,6 +15,7 @@ #include #include "gt/intel_engine.h" +#include "gt/intel_gt_types.h" #include "gt/uc/intel_uc_fw.h" #include "intel_device_info.h" @@ -118,6 +119,8 @@ struct intel_gt_coredump { bool awake; bool simulated; + struct intel_gt_info info; + /* Generic register state */ u32 eir; u32 pgtbl_er; diff --git a/drivers/gpu/drm/i915/i915_pci.c b/drivers/gpu/drm/i915/i915_pci.c index 0be3b66ce666..db916fff3f0d 100644 --- a/drivers/gpu/drm/i915/i915_pci.c +++ b/drivers/gpu/drm/i915/i915_pci.c @@ -168,7 +168,7 @@ .gpu_reset_clobbers_display = true, \ .hws_needs_physical = 1, \ .unfenced_needs_alignment = 1, \ - .engine_mask = BIT(RCS0), \ + .platform_engine_mask = BIT(RCS0), \ .has_snoop = true, \ .has_coherent_ggtt = false, \ .dma_mask_size = 32, \ @@ -188,7 +188,7 @@ .gpu_reset_clobbers_display = true, \ .hws_needs_physical = 1, \ .unfenced_needs_alignment = 1, \ - .engine_mask = BIT(RCS0), \ + .platform_engine_mask = BIT(RCS0), \ .has_snoop = true, \ .has_coherent_ggtt = false, \ .dma_mask_size = 32, \ @@ -226,7 +226,7 @@ static const struct intel_device_info i865g_info = { .cpu_transcoder_mask = BIT(TRANSCODER_A) | BIT(TRANSCODER_B), \ .display.has_gmch = 1, \ .gpu_reset_clobbers_display = true, \ - .engine_mask = BIT(RCS0), \ + .platform_engine_mask = BIT(RCS0), \ .has_snoop = true, \ .has_coherent_ggtt = true, \ .dma_mask_size = 32, \ @@ -317,7 +317,7 @@ static const struct intel_device_info pnv_m_info = { .display.has_hotplug = 1, \ .display.has_gmch = 1, \ .gpu_reset_clobbers_display = true, \ - .engine_mask = BIT(RCS0), \ + .platform_engine_mask = BIT(RCS0), \ .has_snoop = true, \ .has_coherent_ggtt = true, \ .dma_mask_size = 36, \ @@ -349,7 +349,7 @@ static const struct intel_device_info i965gm_info = { static const struct intel_device_info g45_info = { GEN4_FEATURES, PLATFORM(INTEL_G45), - .engine_mask = BIT(RCS0) | BIT(VCS0), + .platform_engine_mask = BIT(RCS0) | BIT(VCS0), .gpu_reset_clobbers_display = false, }; @@ -359,7 +359,7 @@ static const struct intel_device_info gm45_info = { .is_mobile = 1, .display.has_fbc = 1, .display.supports_tv = 1, - .engine_mask = BIT(RCS0) | BIT(VCS0), + .platform_engine_mask = BIT(RCS0) | BIT(VCS0), .gpu_reset_clobbers_display = false, }; @@ -368,7 +368,7 @@ static const struct intel_device_info gm45_info = { .pipe_mask = BIT(PIPE_A) | BIT(PIPE_B), \ .cpu_transcoder_mask = BIT(TRANSCODER_A) | BIT(TRANSCODER_B), \ .display.has_hotplug = 1, \ - .engine_mask = BIT(RCS0) | BIT(VCS0), \ + .platform_engine_mask = BIT(RCS0) | BIT(VCS0), \ .has_snoop = true, \ .has_coherent_ggtt = true, \ /* ilk does support rc6, but we do not implement [power] contexts */ \ @@ -398,7 +398,7 @@ static const struct intel_device_info ilk_m_info = { .cpu_transcoder_mask = BIT(TRANSCODER_A) | BIT(TRANSCODER_B), \ .display.has_hotplug = 1, \ .display.has_fbc = 1, \ - .engine_mask = BIT(RCS0) | BIT(VCS0) | BIT(BCS0), \ + .platform_engine_mask = BIT(RCS0) | BIT(VCS0) | BIT(BCS0), \ .has_coherent_ggtt = true, \ .has_llc = 1, \ .has_rc6 = 1, \ @@ -449,7 +449,7 @@ static const struct intel_device_info snb_m_gt2_info = { .cpu_transcoder_mask = BIT(TRANSCODER_A) | BIT(TRANSCODER_B) | BIT(TRANSCODER_C), \ .display.has_hotplug = 1, \ .display.has_fbc = 1, \ - .engine_mask = BIT(RCS0) | BIT(VCS0) | BIT(BCS0), \ + .platform_engine_mask = BIT(RCS0) | BIT(VCS0) | BIT(BCS0), \ .has_coherent_ggtt = true, \ .has_llc = 1, \ .has_rc6 = 1, \ @@ -520,7 +520,7 @@ static const struct intel_device_info vlv_info = { .ppgtt_size = 31, .has_snoop = true, .has_coherent_ggtt = false, - .engine_mask = BIT(RCS0) | BIT(VCS0) | BIT(BCS0), + .platform_engine_mask = BIT(RCS0) | BIT(VCS0) | BIT(BCS0), .display_mmio_offset = VLV_DISPLAY_BASE, I9XX_PIPE_OFFSETS, I9XX_CURSOR_OFFSETS, @@ -531,7 +531,7 @@ static const struct intel_device_info vlv_info = { #define G75_FEATURES \ GEN7_FEATURES, \ - .engine_mask = BIT(RCS0) | BIT(VCS0) | BIT(BCS0) | BIT(VECS0), \ + .platform_engine_mask = BIT(RCS0) | BIT(VCS0) | BIT(BCS0) | BIT(VECS0), \ .cpu_transcoder_mask = BIT(TRANSCODER_A) | BIT(TRANSCODER_B) | \ BIT(TRANSCODER_C) | BIT(TRANSCODER_EDP), \ .display.has_ddi = 1, \ @@ -598,7 +598,7 @@ static const struct intel_device_info bdw_rsvd_info = { static const struct intel_device_info bdw_gt3_info = { BDW_PLATFORM, .gt = 3, - .engine_mask = + .platform_engine_mask = BIT(RCS0) | BIT(VCS0) | BIT(BCS0) | BIT(VECS0) | BIT(VCS1), }; @@ -609,7 +609,7 @@ static const struct intel_device_info chv_info = { .cpu_transcoder_mask = BIT(TRANSCODER_A) | BIT(TRANSCODER_B) | BIT(TRANSCODER_C), .display.has_hotplug = 1, .is_lp = 1, - .engine_mask = BIT(RCS0) | BIT(VCS0) | BIT(BCS0) | BIT(VECS0), + .platform_engine_mask = BIT(RCS0) | BIT(VCS0) | BIT(BCS0) | BIT(VECS0), .has_64bit_reloc = 1, .has_runtime_pm = 1, .has_rc6 = 1, @@ -662,7 +662,7 @@ static const struct intel_device_info skl_gt2_info = { #define SKL_GT3_PLUS_PLATFORM \ SKL_PLATFORM, \ - .engine_mask = \ + .platform_engine_mask = \ BIT(RCS0) | BIT(VCS0) | BIT(BCS0) | BIT(VECS0) | BIT(VCS1) @@ -681,7 +681,7 @@ static const struct intel_device_info skl_gt4_info = { .is_lp = 1, \ .num_supported_dbuf_slices = 1, \ .display.has_hotplug = 1, \ - .engine_mask = BIT(RCS0) | BIT(VCS0) | BIT(BCS0) | BIT(VECS0), \ + .platform_engine_mask = BIT(RCS0) | BIT(VCS0) | BIT(BCS0) | BIT(VECS0), \ .pipe_mask = BIT(PIPE_A) | BIT(PIPE_B) | BIT(PIPE_C), \ .cpu_transcoder_mask = BIT(TRANSCODER_A) | BIT(TRANSCODER_B) | \ BIT(TRANSCODER_C) | BIT(TRANSCODER_EDP) | \ @@ -744,7 +744,7 @@ static const struct intel_device_info kbl_gt2_info = { static const struct intel_device_info kbl_gt3_info = { KBL_PLATFORM, .gt = 3, - .engine_mask = + .platform_engine_mask = BIT(RCS0) | BIT(VCS0) | BIT(BCS0) | BIT(VECS0) | BIT(VCS1), }; @@ -765,7 +765,7 @@ static const struct intel_device_info cfl_gt2_info = { static const struct intel_device_info cfl_gt3_info = { CFL_PLATFORM, .gt = 3, - .engine_mask = + .platform_engine_mask = BIT(RCS0) | BIT(VCS0) | BIT(BCS0) | BIT(VECS0) | BIT(VCS1), }; @@ -834,7 +834,7 @@ static const struct intel_device_info cnl_info = { static const struct intel_device_info icl_info = { GEN11_FEATURES, PLATFORM(INTEL_ICELAKE), - .engine_mask = + .platform_engine_mask = BIT(RCS0) | BIT(BCS0) | BIT(VECS0) | BIT(VCS0) | BIT(VCS2), }; @@ -842,7 +842,7 @@ static const struct intel_device_info ehl_info = { GEN11_FEATURES, PLATFORM(INTEL_ELKHARTLAKE), .require_force_probe = 1, - .engine_mask = BIT(RCS0) | BIT(BCS0) | BIT(VCS0) | BIT(VECS0), + .platform_engine_mask = BIT(RCS0) | BIT(BCS0) | BIT(VCS0) | BIT(VECS0), .ppgtt_size = 36, }; @@ -878,7 +878,7 @@ static const struct intel_device_info tgl_info = { GEN12_FEATURES, PLATFORM(INTEL_TIGERLAKE), .display.has_modular_fia = 1, - .engine_mask = + .platform_engine_mask = BIT(RCS0) | BIT(BCS0) | BIT(VECS0) | BIT(VCS0) | BIT(VCS2), }; @@ -891,7 +891,7 @@ static const struct intel_device_info rkl_info = { BIT(TRANSCODER_C), .require_force_probe = 1, .display.has_psr_hw_tracking = 0, - .engine_mask = + .platform_engine_mask = BIT(RCS0) | BIT(BCS0) | BIT(VECS0) | BIT(VCS0), }; diff --git a/drivers/gpu/drm/i915/intel_device_info.c b/drivers/gpu/drm/i915/intel_device_info.c index 92ebea35c752..a362a66fce11 100644 --- a/drivers/gpu/drm/i915/intel_device_info.c +++ b/drivers/gpu/drm/i915/intel_device_info.c @@ -92,7 +92,6 @@ static const char *iommu_name(void) void intel_device_info_print_static(const struct intel_device_info *info, struct drm_printer *p) { - drm_printf(p, "engines: %x\n", info->engine_mask); drm_printf(p, "gen: %d\n", info->gen); drm_printf(p, "gt: %d\n", info->gt); drm_printf(p, "iommu: %s\n", iommu_name()); diff --git a/drivers/gpu/drm/i915/intel_device_info.h b/drivers/gpu/drm/i915/intel_device_info.h index fa60fdc1d75a..b010b6728432 100644 --- a/drivers/gpu/drm/i915/intel_device_info.h +++ b/drivers/gpu/drm/i915/intel_device_info.h @@ -157,7 +157,7 @@ struct intel_device_info { u8 gen; u8 gt; /* GT number, 0 if undefined */ - intel_engine_mask_t engine_mask; /* Engines supported by the HW */ + intel_engine_mask_t platform_engine_mask; /* Engines supported by the HW */ enum intel_platform platform; @@ -219,8 +219,6 @@ struct intel_runtime_info { u8 num_sprites[I915_MAX_PIPES]; u8 num_scalers[I915_MAX_PIPES]; - u8 num_engines; - /* Slice/subslice/EU info */ struct sseu_dev_info sseu; @@ -228,9 +226,6 @@ struct intel_runtime_info { u32 cs_timestamp_frequency_hz; u32 cs_timestamp_period_ns; - - /* Media engine access to SFC per instance */ - u8 vdbox_sfc_access; }; struct intel_driver_caps { diff --git a/drivers/gpu/drm/i915/intel_uncore.c b/drivers/gpu/drm/i915/intel_uncore.c index 83e576cff161..f5edee17902a 100644 --- a/drivers/gpu/drm/i915/intel_uncore.c +++ b/drivers/gpu/drm/i915/intel_uncore.c @@ -1530,7 +1530,7 @@ static int intel_uncore_fw_domains_init(struct intel_uncore *uncore) if (INTEL_GEN(i915) >= 11) { /* we'll prune the domains of missing engines later */ - intel_engine_mask_t emask = INTEL_INFO(i915)->engine_mask; + intel_engine_mask_t emask = INTEL_INFO(i915)->platform_engine_mask; int i; uncore->funcs.force_wake_get = fw_domains_get_with_fallback; diff --git a/drivers/gpu/drm/i915/selftests/i915_request.c b/drivers/gpu/drm/i915/selftests/i915_request.c index 9271aad7f779..57dd6f5122ee 100644 --- a/drivers/gpu/drm/i915/selftests/i915_request.c +++ b/drivers/gpu/drm/i915/selftests/i915_request.c @@ -1454,7 +1454,7 @@ out_flush: idx++; } pr_info("Completed %lu waits for %lu fences across %d engines and %d cpus\n", - num_waits, num_fences, RUNTIME_INFO(i915)->num_engines, ncpus); + num_waits, num_fences, idx, ncpus); ret = igt_live_test_end(&live) ?: ret; out_contexts: diff --git a/drivers/gpu/drm/i915/selftests/mock_gem_device.c b/drivers/gpu/drm/i915/selftests/mock_gem_device.c index 9b105b811f1f..9a46be05425a 100644 --- a/drivers/gpu/drm/i915/selftests/mock_gem_device.c +++ b/drivers/gpu/drm/i915/selftests/mock_gem_device.c @@ -190,7 +190,8 @@ struct drm_i915_private *mock_gem_device(void) mock_init_ggtt(i915, &i915->ggtt); i915->gt.vm = i915_vm_get(&i915->ggtt.vm); - mkwrite_device_info(i915)->engine_mask = BIT(0); + mkwrite_device_info(i915)->platform_engine_mask = BIT(0); + i915->gt.info.engine_mask = BIT(0); i915->gt.engine[RCS0] = mock_engine(i915, "mock", RCS0); if (!i915->gt.engine[RCS0]) -- cgit v1.2.3 From d0eb6866879fbd14f798aacfa4379c206249c1a6 Mon Sep 17 00:00:00 2001 From: Daniele Ceraolo Spurio Date: Tue, 7 Jul 2020 17:39:48 -0700 Subject: drm/i915: Introduce gt_init_mmio We already call 2 gt-related init_mmio functions in driver_mmio_probe and a 3rd one will be added by a follow-up patch, so pre-emptively introduce a gt_init_mmio function to group them. Signed-off-by: Daniele Ceraolo Spurio Cc: Tvrtko Ursulin Cc: Andi Shyti Reviewed-by: Tvrtko Ursulin Signed-off-by: Chris Wilson Link: https://patchwork.freedesktop.org/patch/msgid/20200708003952.21831-6-daniele.ceraolospurio@intel.com --- drivers/gpu/drm/i915/gt/intel_gt.c | 7 +++++++ drivers/gpu/drm/i915/gt/intel_gt.h | 1 + drivers/gpu/drm/i915/i915_drv.c | 4 +--- 3 files changed, 9 insertions(+), 3 deletions(-) diff --git a/drivers/gpu/drm/i915/gt/intel_gt.c b/drivers/gpu/drm/i915/gt/intel_gt.c index 6a268c6d6a6f..d96c34802e2b 100644 --- a/drivers/gpu/drm/i915/gt/intel_gt.c +++ b/drivers/gpu/drm/i915/gt/intel_gt.c @@ -44,6 +44,13 @@ void intel_gt_init_hw_early(struct intel_gt *gt, struct i915_ggtt *ggtt) gt->ggtt = ggtt; } +int intel_gt_init_mmio(struct intel_gt *gt) +{ + intel_uc_init_mmio(>->uc); + + return intel_engines_init_mmio(gt); +} + static void init_unused_ring(struct intel_gt *gt, u32 base) { struct intel_uncore *uncore = gt->uncore; diff --git a/drivers/gpu/drm/i915/gt/intel_gt.h b/drivers/gpu/drm/i915/gt/intel_gt.h index 908fc5dea885..9157c7411f60 100644 --- a/drivers/gpu/drm/i915/gt/intel_gt.h +++ b/drivers/gpu/drm/i915/gt/intel_gt.h @@ -36,6 +36,7 @@ static inline struct intel_gt *huc_to_gt(struct intel_huc *huc) void intel_gt_init_early(struct intel_gt *gt, struct drm_i915_private *i915); void intel_gt_init_hw_early(struct intel_gt *gt, struct i915_ggtt *ggtt); +int intel_gt_init_mmio(struct intel_gt *gt); int __must_check intel_gt_init_hw(struct intel_gt *gt); int intel_gt_init(struct intel_gt *gt); void intel_gt_driver_register(struct intel_gt *gt); diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c index 67789df42be8..5fd5af4bc855 100644 --- a/drivers/gpu/drm/i915/i915_drv.c +++ b/drivers/gpu/drm/i915/i915_drv.c @@ -531,9 +531,7 @@ static int i915_driver_mmio_probe(struct drm_i915_private *dev_priv) /* Try to make sure MCHBAR is enabled before poking at it */ intel_setup_mchbar(dev_priv); - intel_uc_init_mmio(&dev_priv->gt.uc); - - ret = intel_engines_init_mmio(&dev_priv->gt); + ret = intel_gt_init_mmio(&dev_priv->gt); if (ret) goto err_uncore; -- cgit v1.2.3 From 9b413f011c2c62f4d7e1a02ac8add4899a4bc79b Mon Sep 17 00:00:00 2001 From: Daniele Ceraolo Spurio Date: Tue, 7 Jul 2020 17:39:49 -0700 Subject: drm/i915/sseu: Move sseu detection and dump to intel_sseu Keep all the SSEU code in the relevant file. The code has also been updated to use intel_gt instead of dev_priv. Based on an original patch by Sandeep. Signed-off-by: Daniele Ceraolo Spurio Cc: Tvrtko Ursulin Cc: Andi Shyti Cc: Venkata Sandeep Dhanalakota Reviewed-by: Tvrtko Ursulin Signed-off-by: Chris Wilson Link: https://patchwork.freedesktop.org/patch/msgid/20200708003952.21831-7-daniele.ceraolospurio@intel.com --- drivers/gpu/drm/i915/gt/intel_gt.c | 1 + drivers/gpu/drm/i915/gt/intel_sseu.c | 586 +++++++++++++++++++++++++++++++ drivers/gpu/drm/i915/gt/intel_sseu.h | 8 + drivers/gpu/drm/i915/i915_debugfs.c | 2 +- drivers/gpu/drm/i915/i915_gpu_error.c | 2 +- drivers/gpu/drm/i915/intel_device_info.c | 584 +----------------------------- drivers/gpu/drm/i915/intel_device_info.h | 2 - 7 files changed, 599 insertions(+), 586 deletions(-) diff --git a/drivers/gpu/drm/i915/gt/intel_gt.c b/drivers/gpu/drm/i915/gt/intel_gt.c index d96c34802e2b..2c20fe693714 100644 --- a/drivers/gpu/drm/i915/gt/intel_gt.c +++ b/drivers/gpu/drm/i915/gt/intel_gt.c @@ -47,6 +47,7 @@ void intel_gt_init_hw_early(struct intel_gt *gt, struct i915_ggtt *ggtt) int intel_gt_init_mmio(struct intel_gt *gt) { intel_uc_init_mmio(>->uc); + intel_sseu_info_init(gt); return intel_engines_init_mmio(gt); } diff --git a/drivers/gpu/drm/i915/gt/intel_sseu.c b/drivers/gpu/drm/i915/gt/intel_sseu.c index d173271c7397..14c15cca3371 100644 --- a/drivers/gpu/drm/i915/gt/intel_sseu.c +++ b/drivers/gpu/drm/i915/gt/intel_sseu.c @@ -60,6 +60,547 @@ intel_sseu_subslices_per_slice(const struct sseu_dev_info *sseu, u8 slice) return hweight32(intel_sseu_get_subslices(sseu, slice)); } +static int sseu_eu_idx(const struct sseu_dev_info *sseu, int slice, + int subslice) +{ + int slice_stride = sseu->max_subslices * sseu->eu_stride; + + return slice * slice_stride + subslice * sseu->eu_stride; +} + +static u16 sseu_get_eus(const struct sseu_dev_info *sseu, int slice, + int subslice) +{ + int i, offset = sseu_eu_idx(sseu, slice, subslice); + u16 eu_mask = 0; + + for (i = 0; i < sseu->eu_stride; i++) + eu_mask |= + ((u16)sseu->eu_mask[offset + i]) << (i * BITS_PER_BYTE); + + return eu_mask; +} + +static void sseu_set_eus(struct sseu_dev_info *sseu, int slice, int subslice, + u16 eu_mask) +{ + int i, offset = sseu_eu_idx(sseu, slice, subslice); + + for (i = 0; i < sseu->eu_stride; i++) + sseu->eu_mask[offset + i] = + (eu_mask >> (BITS_PER_BYTE * i)) & 0xff; +} + +static u16 compute_eu_total(const struct sseu_dev_info *sseu) +{ + u16 i, total = 0; + + for (i = 0; i < ARRAY_SIZE(sseu->eu_mask); i++) + total += hweight8(sseu->eu_mask[i]); + + return total; +} + +static void gen11_compute_sseu_info(struct sseu_dev_info *sseu, + u8 s_en, u32 ss_en, u16 eu_en) +{ + int s, ss; + + /* ss_en represents entire subslice mask across all slices */ + GEM_BUG_ON(sseu->max_slices * sseu->max_subslices > + sizeof(ss_en) * BITS_PER_BYTE); + + for (s = 0; s < sseu->max_slices; s++) { + if ((s_en & BIT(s)) == 0) + continue; + + sseu->slice_mask |= BIT(s); + + intel_sseu_set_subslices(sseu, s, ss_en); + + for (ss = 0; ss < sseu->max_subslices; ss++) + if (intel_sseu_has_subslice(sseu, s, ss)) + sseu_set_eus(sseu, s, ss, eu_en); + } + sseu->eu_per_subslice = hweight16(eu_en); + sseu->eu_total = compute_eu_total(sseu); +} + +static void gen12_sseu_info_init(struct intel_gt *gt) +{ + struct sseu_dev_info *sseu = &RUNTIME_INFO(gt->i915)->sseu; + struct intel_uncore *uncore = gt->uncore; + u32 dss_en; + u16 eu_en = 0; + u8 eu_en_fuse; + u8 s_en; + int eu; + + /* + * Gen12 has Dual-Subslices, which behave similarly to 2 gen11 SS. + * Instead of splitting these, provide userspace with an array + * of DSS to more closely represent the hardware resource. + */ + intel_sseu_set_info(sseu, 1, 6, 16); + + s_en = intel_uncore_read(uncore, GEN11_GT_SLICE_ENABLE) & + GEN11_GT_S_ENA_MASK; + + dss_en = intel_uncore_read(uncore, GEN12_GT_DSS_ENABLE); + + /* one bit per pair of EUs */ + eu_en_fuse = ~(intel_uncore_read(uncore, GEN11_EU_DISABLE) & + GEN11_EU_DIS_MASK); + for (eu = 0; eu < sseu->max_eus_per_subslice / 2; eu++) + if (eu_en_fuse & BIT(eu)) + eu_en |= BIT(eu * 2) | BIT(eu * 2 + 1); + + gen11_compute_sseu_info(sseu, s_en, dss_en, eu_en); + + /* TGL only supports slice-level power gating */ + sseu->has_slice_pg = 1; +} + +static void gen11_sseu_info_init(struct intel_gt *gt) +{ + struct sseu_dev_info *sseu = &RUNTIME_INFO(gt->i915)->sseu; + struct intel_uncore *uncore = gt->uncore; + u32 ss_en; + u8 eu_en; + u8 s_en; + + if (IS_ELKHARTLAKE(gt->i915)) + intel_sseu_set_info(sseu, 1, 4, 8); + else + intel_sseu_set_info(sseu, 1, 8, 8); + + s_en = intel_uncore_read(uncore, GEN11_GT_SLICE_ENABLE) & + GEN11_GT_S_ENA_MASK; + ss_en = ~intel_uncore_read(uncore, GEN11_GT_SUBSLICE_DISABLE); + + eu_en = ~(intel_uncore_read(uncore, GEN11_EU_DISABLE) & + GEN11_EU_DIS_MASK); + + gen11_compute_sseu_info(sseu, s_en, ss_en, eu_en); + + /* ICL has no power gating restrictions. */ + sseu->has_slice_pg = 1; + sseu->has_subslice_pg = 1; + sseu->has_eu_pg = 1; +} + +static void gen10_sseu_info_init(struct intel_gt *gt) +{ + struct intel_uncore *uncore = gt->uncore; + struct sseu_dev_info *sseu = &RUNTIME_INFO(gt->i915)->sseu; + const u32 fuse2 = intel_uncore_read(uncore, GEN8_FUSE2); + const int eu_mask = 0xff; + u32 subslice_mask, eu_en; + int s, ss; + + intel_sseu_set_info(sseu, 6, 4, 8); + + sseu->slice_mask = (fuse2 & GEN10_F2_S_ENA_MASK) >> + GEN10_F2_S_ENA_SHIFT; + + /* Slice0 */ + eu_en = ~intel_uncore_read(uncore, GEN8_EU_DISABLE0); + for (ss = 0; ss < sseu->max_subslices; ss++) + sseu_set_eus(sseu, 0, ss, (eu_en >> (8 * ss)) & eu_mask); + /* Slice1 */ + sseu_set_eus(sseu, 1, 0, (eu_en >> 24) & eu_mask); + eu_en = ~intel_uncore_read(uncore, GEN8_EU_DISABLE1); + sseu_set_eus(sseu, 1, 1, eu_en & eu_mask); + /* Slice2 */ + sseu_set_eus(sseu, 2, 0, (eu_en >> 8) & eu_mask); + sseu_set_eus(sseu, 2, 1, (eu_en >> 16) & eu_mask); + /* Slice3 */ + sseu_set_eus(sseu, 3, 0, (eu_en >> 24) & eu_mask); + eu_en = ~intel_uncore_read(uncore, GEN8_EU_DISABLE2); + sseu_set_eus(sseu, 3, 1, eu_en & eu_mask); + /* Slice4 */ + sseu_set_eus(sseu, 4, 0, (eu_en >> 8) & eu_mask); + sseu_set_eus(sseu, 4, 1, (eu_en >> 16) & eu_mask); + /* Slice5 */ + sseu_set_eus(sseu, 5, 0, (eu_en >> 24) & eu_mask); + eu_en = ~intel_uncore_read(uncore, GEN10_EU_DISABLE3); + sseu_set_eus(sseu, 5, 1, eu_en & eu_mask); + + subslice_mask = (1 << 4) - 1; + subslice_mask &= ~((fuse2 & GEN10_F2_SS_DIS_MASK) >> + GEN10_F2_SS_DIS_SHIFT); + + for (s = 0; s < sseu->max_slices; s++) { + u32 subslice_mask_with_eus = subslice_mask; + + for (ss = 0; ss < sseu->max_subslices; ss++) { + if (sseu_get_eus(sseu, s, ss) == 0) + subslice_mask_with_eus &= ~BIT(ss); + } + + /* + * Slice0 can have up to 3 subslices, but there are only 2 in + * slice1/2. + */ + intel_sseu_set_subslices(sseu, s, s == 0 ? + subslice_mask_with_eus : + subslice_mask_with_eus & 0x3); + } + + sseu->eu_total = compute_eu_total(sseu); + + /* + * CNL is expected to always have a uniform distribution + * of EU across subslices with the exception that any one + * EU in any one subslice may be fused off for die + * recovery. + */ + sseu->eu_per_subslice = + intel_sseu_subslice_total(sseu) ? + DIV_ROUND_UP(sseu->eu_total, intel_sseu_subslice_total(sseu)) : + 0; + + /* No restrictions on Power Gating */ + sseu->has_slice_pg = 1; + sseu->has_subslice_pg = 1; + sseu->has_eu_pg = 1; +} + +static void cherryview_sseu_info_init(struct intel_gt *gt) +{ + struct sseu_dev_info *sseu = &RUNTIME_INFO(gt->i915)->sseu; + u32 fuse; + u8 subslice_mask = 0; + + fuse = intel_uncore_read(gt->uncore, CHV_FUSE_GT); + + sseu->slice_mask = BIT(0); + intel_sseu_set_info(sseu, 1, 2, 8); + + if (!(fuse & CHV_FGT_DISABLE_SS0)) { + u8 disabled_mask = + ((fuse & CHV_FGT_EU_DIS_SS0_R0_MASK) >> + CHV_FGT_EU_DIS_SS0_R0_SHIFT) | + (((fuse & CHV_FGT_EU_DIS_SS0_R1_MASK) >> + CHV_FGT_EU_DIS_SS0_R1_SHIFT) << 4); + + subslice_mask |= BIT(0); + sseu_set_eus(sseu, 0, 0, ~disabled_mask); + } + + if (!(fuse & CHV_FGT_DISABLE_SS1)) { + u8 disabled_mask = + ((fuse & CHV_FGT_EU_DIS_SS1_R0_MASK) >> + CHV_FGT_EU_DIS_SS1_R0_SHIFT) | + (((fuse & CHV_FGT_EU_DIS_SS1_R1_MASK) >> + CHV_FGT_EU_DIS_SS1_R1_SHIFT) << 4); + + subslice_mask |= BIT(1); + sseu_set_eus(sseu, 0, 1, ~disabled_mask); + } + + intel_sseu_set_subslices(sseu, 0, subslice_mask); + + sseu->eu_total = compute_eu_total(sseu); + + /* + * CHV expected to always have a uniform distribution of EU + * across subslices. + */ + sseu->eu_per_subslice = intel_sseu_subslice_total(sseu) ? + sseu->eu_total / + intel_sseu_subslice_total(sseu) : + 0; + /* + * CHV supports subslice power gating on devices with more than + * one subslice, and supports EU power gating on devices with + * more than one EU pair per subslice. + */ + sseu->has_slice_pg = 0; + sseu->has_subslice_pg = intel_sseu_subslice_total(sseu) > 1; + sseu->has_eu_pg = (sseu->eu_per_subslice > 2); +} + +static void gen9_sseu_info_init(struct intel_gt *gt) +{ + struct drm_i915_private *i915 = gt->i915; + struct intel_device_info *info = mkwrite_device_info(i915); + struct sseu_dev_info *sseu = &RUNTIME_INFO(i915)->sseu; + struct intel_uncore *uncore = gt->uncore; + u32 fuse2, eu_disable, subslice_mask; + const u8 eu_mask = 0xff; + int s, ss; + + fuse2 = intel_uncore_read(uncore, GEN8_FUSE2); + sseu->slice_mask = (fuse2 & GEN8_F2_S_ENA_MASK) >> GEN8_F2_S_ENA_SHIFT; + + /* BXT has a single slice and at most 3 subslices. */ + intel_sseu_set_info(sseu, IS_GEN9_LP(i915) ? 1 : 3, + IS_GEN9_LP(i915) ? 3 : 4, 8); + + /* + * The subslice disable field is global, i.e. it applies + * to each of the enabled slices. + */ + subslice_mask = (1 << sseu->max_subslices) - 1; + subslice_mask &= ~((fuse2 & GEN9_F2_SS_DIS_MASK) >> + GEN9_F2_SS_DIS_SHIFT); + + /* + * Iterate through enabled slices and subslices to + * count the total enabled EU. + */ + for (s = 0; s < sseu->max_slices; s++) { + if (!(sseu->slice_mask & BIT(s))) + /* skip disabled slice */ + continue; + + intel_sseu_set_subslices(sseu, s, subslice_mask); + + eu_disable = intel_uncore_read(uncore, GEN9_EU_DISABLE(s)); + for (ss = 0; ss < sseu->max_subslices; ss++) { + int eu_per_ss; + u8 eu_disabled_mask; + + if (!intel_sseu_has_subslice(sseu, s, ss)) + /* skip disabled subslice */ + continue; + + eu_disabled_mask = (eu_disable >> (ss * 8)) & eu_mask; + + sseu_set_eus(sseu, s, ss, ~eu_disabled_mask); + + eu_per_ss = sseu->max_eus_per_subslice - + hweight8(eu_disabled_mask); + + /* + * Record which subslice(s) has(have) 7 EUs. we + * can tune the hash used to spread work among + * subslices if they are unbalanced. + */ + if (eu_per_ss == 7) + sseu->subslice_7eu[s] |= BIT(ss); + } + } + + sseu->eu_total = compute_eu_total(sseu); + + /* + * SKL is expected to always have a uniform distribution + * of EU across subslices with the exception that any one + * EU in any one subslice may be fused off for die + * recovery. BXT is expected to be perfectly uniform in EU + * distribution. + */ + sseu->eu_per_subslice = + intel_sseu_subslice_total(sseu) ? + DIV_ROUND_UP(sseu->eu_total, intel_sseu_subslice_total(sseu)) : + 0; + + /* + * SKL+ supports slice power gating on devices with more than + * one slice, and supports EU power gating on devices with + * more than one EU pair per subslice. BXT+ supports subslice + * power gating on devices with more than one subslice, and + * supports EU power gating on devices with more than one EU + * pair per subslice. + */ + sseu->has_slice_pg = + !IS_GEN9_LP(i915) && hweight8(sseu->slice_mask) > 1; + sseu->has_subslice_pg = + IS_GEN9_LP(i915) && intel_sseu_subslice_total(sseu) > 1; + sseu->has_eu_pg = sseu->eu_per_subslice > 2; + + if (IS_GEN9_LP(i915)) { +#define IS_SS_DISABLED(ss) (!(sseu->subslice_mask[0] & BIT(ss))) + info->has_pooled_eu = hweight8(sseu->subslice_mask[0]) == 3; + + sseu->min_eu_in_pool = 0; + if (info->has_pooled_eu) { + if (IS_SS_DISABLED(2) || IS_SS_DISABLED(0)) + sseu->min_eu_in_pool = 3; + else if (IS_SS_DISABLED(1)) + sseu->min_eu_in_pool = 6; + else + sseu->min_eu_in_pool = 9; + } +#undef IS_SS_DISABLED + } +} + +static void bdw_sseu_info_init(struct intel_gt *gt) +{ + struct sseu_dev_info *sseu = &RUNTIME_INFO(gt->i915)->sseu; + struct intel_uncore *uncore = gt->uncore; + int s, ss; + u32 fuse2, subslice_mask, eu_disable[3]; /* s_max */ + u32 eu_disable0, eu_disable1, eu_disable2; + + fuse2 = intel_uncore_read(uncore, GEN8_FUSE2); + sseu->slice_mask = (fuse2 & GEN8_F2_S_ENA_MASK) >> GEN8_F2_S_ENA_SHIFT; + intel_sseu_set_info(sseu, 3, 3, 8); + + /* + * The subslice disable field is global, i.e. it applies + * to each of the enabled slices. + */ + subslice_mask = GENMASK(sseu->max_subslices - 1, 0); + subslice_mask &= ~((fuse2 & GEN8_F2_SS_DIS_MASK) >> + GEN8_F2_SS_DIS_SHIFT); + eu_disable0 = intel_uncore_read(uncore, GEN8_EU_DISABLE0); + eu_disable1 = intel_uncore_read(uncore, GEN8_EU_DISABLE1); + eu_disable2 = intel_uncore_read(uncore, GEN8_EU_DISABLE2); + eu_disable[0] = eu_disable0 & GEN8_EU_DIS0_S0_MASK; + eu_disable[1] = (eu_disable0 >> GEN8_EU_DIS0_S1_SHIFT) | + ((eu_disable1 & GEN8_EU_DIS1_S1_MASK) << + (32 - GEN8_EU_DIS0_S1_SHIFT)); + eu_disable[2] = (eu_disable1 >> GEN8_EU_DIS1_S2_SHIFT) | + ((eu_disable2 & GEN8_EU_DIS2_S2_MASK) << + (32 - GEN8_EU_DIS1_S2_SHIFT)); + + /* + * Iterate through enabled slices and subslices to + * count the total enabled EU. + */ + for (s = 0; s < sseu->max_slices; s++) { + if (!(sseu->slice_mask & BIT(s))) + /* skip disabled slice */ + continue; + + intel_sseu_set_subslices(sseu, s, subslice_mask); + + for (ss = 0; ss < sseu->max_subslices; ss++) { + u8 eu_disabled_mask; + u32 n_disabled; + + if (!intel_sseu_has_subslice(sseu, s, ss)) + /* skip disabled subslice */ + continue; + + eu_disabled_mask = + eu_disable[s] >> (ss * sseu->max_eus_per_subslice); + + sseu_set_eus(sseu, s, ss, ~eu_disabled_mask); + + n_disabled = hweight8(eu_disabled_mask); + + /* + * Record which subslices have 7 EUs. + */ + if (sseu->max_eus_per_subslice - n_disabled == 7) + sseu->subslice_7eu[s] |= 1 << ss; + } + } + + sseu->eu_total = compute_eu_total(sseu); + + /* + * BDW is expected to always have a uniform distribution of EU across + * subslices with the exception that any one EU in any one subslice may + * be fused off for die recovery. + */ + sseu->eu_per_subslice = + intel_sseu_subslice_total(sseu) ? + DIV_ROUND_UP(sseu->eu_total, intel_sseu_subslice_total(sseu)) : + 0; + + /* + * BDW supports slice power gating on devices with more than + * one slice. + */ + sseu->has_slice_pg = hweight8(sseu->slice_mask) > 1; + sseu->has_subslice_pg = 0; + sseu->has_eu_pg = 0; +} + +static void hsw_sseu_info_init(struct intel_gt *gt) +{ + struct drm_i915_private *i915 = gt->i915; + struct sseu_dev_info *sseu = &RUNTIME_INFO(gt->i915)->sseu; + u32 fuse1; + u8 subslice_mask = 0; + int s, ss; + + /* + * There isn't a register to tell us how many slices/subslices. We + * work off the PCI-ids here. + */ + switch (INTEL_INFO(i915)->gt) { + default: + MISSING_CASE(INTEL_INFO(i915)->gt); + fallthrough; + case 1: + sseu->slice_mask = BIT(0); + subslice_mask = BIT(0); + break; + case 2: + sseu->slice_mask = BIT(0); + subslice_mask = BIT(0) | BIT(1); + break; + case 3: + sseu->slice_mask = BIT(0) | BIT(1); + subslice_mask = BIT(0) | BIT(1); + break; + } + + fuse1 = intel_uncore_read(gt->uncore, HSW_PAVP_FUSE1); + switch ((fuse1 & HSW_F1_EU_DIS_MASK) >> HSW_F1_EU_DIS_SHIFT) { + default: + MISSING_CASE((fuse1 & HSW_F1_EU_DIS_MASK) >> + HSW_F1_EU_DIS_SHIFT); + fallthrough; + case HSW_F1_EU_DIS_10EUS: + sseu->eu_per_subslice = 10; + break; + case HSW_F1_EU_DIS_8EUS: + sseu->eu_per_subslice = 8; + break; + case HSW_F1_EU_DIS_6EUS: + sseu->eu_per_subslice = 6; + break; + } + + intel_sseu_set_info(sseu, hweight8(sseu->slice_mask), + hweight8(subslice_mask), + sseu->eu_per_subslice); + + for (s = 0; s < sseu->max_slices; s++) { + intel_sseu_set_subslices(sseu, s, subslice_mask); + + for (ss = 0; ss < sseu->max_subslices; ss++) { + sseu_set_eus(sseu, s, ss, + (1UL << sseu->eu_per_subslice) - 1); + } + } + + sseu->eu_total = compute_eu_total(sseu); + + /* No powergating for you. */ + sseu->has_slice_pg = 0; + sseu->has_subslice_pg = 0; + sseu->has_eu_pg = 0; +} + +void intel_sseu_info_init(struct intel_gt *gt) +{ + struct drm_i915_private *i915 = gt->i915; + + if (IS_HASWELL(i915)) + hsw_sseu_info_init(gt); + else if (IS_CHERRYVIEW(i915)) + cherryview_sseu_info_init(gt); + else if (IS_BROADWELL(i915)) + bdw_sseu_info_init(gt); + else if (IS_GEN(i915, 9)) + gen9_sseu_info_init(gt); + else if (IS_GEN(i915, 10)) + gen10_sseu_info_init(gt); + else if (IS_GEN(i915, 11)) + gen11_sseu_info_init(gt); + else if (INTEL_GEN(i915) >= 12) + gen12_sseu_info_init(gt); +} + u32 intel_sseu_make_rpcs(struct drm_i915_private *i915, const struct intel_sseu *req_sseu) { @@ -173,3 +714,48 @@ u32 intel_sseu_make_rpcs(struct drm_i915_private *i915, return rpcs; } + +void intel_sseu_dump(const struct sseu_dev_info *sseu, struct drm_printer *p) +{ + int s; + + drm_printf(p, "slice total: %u, mask=%04x\n", + hweight8(sseu->slice_mask), sseu->slice_mask); + drm_printf(p, "subslice total: %u\n", intel_sseu_subslice_total(sseu)); + for (s = 0; s < sseu->max_slices; s++) { + drm_printf(p, "slice%d: %u subslices, mask=%08x\n", + s, intel_sseu_subslices_per_slice(sseu, s), + intel_sseu_get_subslices(sseu, s)); + } + drm_printf(p, "EU total: %u\n", sseu->eu_total); + drm_printf(p, "EU per subslice: %u\n", sseu->eu_per_subslice); + drm_printf(p, "has slice power gating: %s\n", + yesno(sseu->has_slice_pg)); + drm_printf(p, "has subslice power gating: %s\n", + yesno(sseu->has_subslice_pg)); + drm_printf(p, "has EU power gating: %s\n", yesno(sseu->has_eu_pg)); +} + +void intel_sseu_print_topology(const struct sseu_dev_info *sseu, + struct drm_printer *p) +{ + int s, ss; + + if (sseu->max_slices == 0) { + drm_printf(p, "Unavailable\n"); + return; + } + + for (s = 0; s < sseu->max_slices; s++) { + drm_printf(p, "slice%d: %u subslice(s) (0x%08x):\n", + s, intel_sseu_subslices_per_slice(sseu, s), + intel_sseu_get_subslices(sseu, s)); + + for (ss = 0; ss < sseu->max_subslices; ss++) { + u16 enabled_eus = sseu_get_eus(sseu, s, ss); + + drm_printf(p, "\tsubslice%d: %u EUs (0x%hx)\n", + ss, hweight16(enabled_eus), enabled_eus); + } + } +} diff --git a/drivers/gpu/drm/i915/gt/intel_sseu.h b/drivers/gpu/drm/i915/gt/intel_sseu.h index d1d225204f09..f9c007f001e7 100644 --- a/drivers/gpu/drm/i915/gt/intel_sseu.h +++ b/drivers/gpu/drm/i915/gt/intel_sseu.h @@ -13,6 +13,8 @@ #include "i915_gem.h" struct drm_i915_private; +struct intel_gt; +struct drm_printer; #define GEN_MAX_SLICES (6) /* CNL upper bound */ #define GEN_MAX_SUBSLICES (8) /* ICL upper bound */ @@ -94,7 +96,13 @@ u32 intel_sseu_get_subslices(const struct sseu_dev_info *sseu, u8 slice); void intel_sseu_set_subslices(struct sseu_dev_info *sseu, int slice, u32 ss_mask); +void intel_sseu_info_init(struct intel_gt *gt); + u32 intel_sseu_make_rpcs(struct drm_i915_private *i915, const struct intel_sseu *req_sseu); +void intel_sseu_dump(const struct sseu_dev_info *sseu, struct drm_printer *p); +void intel_sseu_print_topology(const struct sseu_dev_info *sseu, + struct drm_printer *p); + #endif /* __INTEL_SSEU_H__ */ diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c index 41ca8ff2aa16..bfba0dff0b85 100644 --- a/drivers/gpu/drm/i915/i915_debugfs.c +++ b/drivers/gpu/drm/i915/i915_debugfs.c @@ -1327,7 +1327,7 @@ static int i915_rcs_topology(struct seq_file *m, void *unused) struct drm_i915_private *dev_priv = node_to_i915(m->private); struct drm_printer p = drm_seq_file_printer(m); - intel_device_info_print_topology(&RUNTIME_INFO(dev_priv)->sseu, &p); + intel_sseu_print_topology(&RUNTIME_INFO(dev_priv)->sseu, &p); return 0; } diff --git a/drivers/gpu/drm/i915/i915_gpu_error.c b/drivers/gpu/drm/i915/i915_gpu_error.c index 9cb9aa39c33d..99b4a0261b13 100644 --- a/drivers/gpu/drm/i915/i915_gpu_error.c +++ b/drivers/gpu/drm/i915/i915_gpu_error.c @@ -626,7 +626,7 @@ static void err_print_capabilities(struct drm_i915_error_state_buf *m, intel_device_info_print_static(&error->device_info, &p); intel_device_info_print_runtime(&error->runtime_info, &p); - intel_device_info_print_topology(&error->runtime_info.sseu, &p); + intel_sseu_print_topology(&error->runtime_info.sseu, &p); intel_gt_info_print(&error->gt->info, &p); intel_driver_caps_print(&error->driver_caps, &p); } diff --git a/drivers/gpu/drm/i915/intel_device_info.c b/drivers/gpu/drm/i915/intel_device_info.c index a362a66fce11..d8daf224cbd3 100644 --- a/drivers/gpu/drm/i915/intel_device_info.c +++ b/drivers/gpu/drm/i915/intel_device_info.c @@ -29,6 +29,7 @@ #include "display/intel_de.h" #include "intel_device_info.h" #include "i915_drv.h" +#include "gt/intel_sseu.h" #define PLATFORM_NAME(x) [INTEL_##x] = #x static const char * const platform_names[] = { @@ -111,581 +112,16 @@ void intel_device_info_print_static(const struct intel_device_info *info, #undef PRINT_FLAG } -static void sseu_dump(const struct sseu_dev_info *sseu, struct drm_printer *p) -{ - int s; - - drm_printf(p, "slice total: %u, mask=%04x\n", - hweight8(sseu->slice_mask), sseu->slice_mask); - drm_printf(p, "subslice total: %u\n", intel_sseu_subslice_total(sseu)); - for (s = 0; s < sseu->max_slices; s++) { - drm_printf(p, "slice%d: %u subslices, mask=%08x\n", - s, intel_sseu_subslices_per_slice(sseu, s), - intel_sseu_get_subslices(sseu, s)); - } - drm_printf(p, "EU total: %u\n", sseu->eu_total); - drm_printf(p, "EU per subslice: %u\n", sseu->eu_per_subslice); - drm_printf(p, "has slice power gating: %s\n", - yesno(sseu->has_slice_pg)); - drm_printf(p, "has subslice power gating: %s\n", - yesno(sseu->has_subslice_pg)); - drm_printf(p, "has EU power gating: %s\n", yesno(sseu->has_eu_pg)); -} - void intel_device_info_print_runtime(const struct intel_runtime_info *info, struct drm_printer *p) { - sseu_dump(&info->sseu, p); + intel_sseu_dump(&info->sseu, p); drm_printf(p, "rawclk rate: %u kHz\n", info->rawclk_freq); drm_printf(p, "CS timestamp frequency: %u Hz\n", info->cs_timestamp_frequency_hz); } -static int sseu_eu_idx(const struct sseu_dev_info *sseu, int slice, - int subslice) -{ - int slice_stride = sseu->max_subslices * sseu->eu_stride; - - return slice * slice_stride + subslice * sseu->eu_stride; -} - -static u16 sseu_get_eus(const struct sseu_dev_info *sseu, int slice, - int subslice) -{ - int i, offset = sseu_eu_idx(sseu, slice, subslice); - u16 eu_mask = 0; - - for (i = 0; i < sseu->eu_stride; i++) { - eu_mask |= ((u16)sseu->eu_mask[offset + i]) << - (i * BITS_PER_BYTE); - } - - return eu_mask; -} - -static void sseu_set_eus(struct sseu_dev_info *sseu, int slice, int subslice, - u16 eu_mask) -{ - int i, offset = sseu_eu_idx(sseu, slice, subslice); - - for (i = 0; i < sseu->eu_stride; i++) { - sseu->eu_mask[offset + i] = - (eu_mask >> (BITS_PER_BYTE * i)) & 0xff; - } -} - -void intel_device_info_print_topology(const struct sseu_dev_info *sseu, - struct drm_printer *p) -{ - int s, ss; - - if (sseu->max_slices == 0) { - drm_printf(p, "Unavailable\n"); - return; - } - - for (s = 0; s < sseu->max_slices; s++) { - drm_printf(p, "slice%d: %u subslice(s) (0x%08x):\n", - s, intel_sseu_subslices_per_slice(sseu, s), - intel_sseu_get_subslices(sseu, s)); - - for (ss = 0; ss < sseu->max_subslices; ss++) { - u16 enabled_eus = sseu_get_eus(sseu, s, ss); - - drm_printf(p, "\tsubslice%d: %u EUs (0x%hx)\n", - ss, hweight16(enabled_eus), enabled_eus); - } - } -} - -static u16 compute_eu_total(const struct sseu_dev_info *sseu) -{ - u16 i, total = 0; - - for (i = 0; i < ARRAY_SIZE(sseu->eu_mask); i++) - total += hweight8(sseu->eu_mask[i]); - - return total; -} - -static void gen11_compute_sseu_info(struct sseu_dev_info *sseu, - u8 s_en, u32 ss_en, u16 eu_en) -{ - int s, ss; - - /* ss_en represents entire subslice mask across all slices */ - GEM_BUG_ON(sseu->max_slices * sseu->max_subslices > - sizeof(ss_en) * BITS_PER_BYTE); - - for (s = 0; s < sseu->max_slices; s++) { - if ((s_en & BIT(s)) == 0) - continue; - - sseu->slice_mask |= BIT(s); - - intel_sseu_set_subslices(sseu, s, ss_en); - - for (ss = 0; ss < sseu->max_subslices; ss++) - if (intel_sseu_has_subslice(sseu, s, ss)) - sseu_set_eus(sseu, s, ss, eu_en); - } - sseu->eu_per_subslice = hweight16(eu_en); - sseu->eu_total = compute_eu_total(sseu); -} - -static void gen12_sseu_info_init(struct drm_i915_private *dev_priv) -{ - struct sseu_dev_info *sseu = &RUNTIME_INFO(dev_priv)->sseu; - struct intel_uncore *uncore = &dev_priv->uncore; - u8 s_en; - u32 dss_en; - u16 eu_en = 0; - u8 eu_en_fuse; - int eu; - - /* - * Gen12 has Dual-Subslices, which behave similarly to 2 gen11 SS. - * Instead of splitting these, provide userspace with an array - * of DSS to more closely represent the hardware resource. - */ - intel_sseu_set_info(sseu, 1, 6, 16); - - s_en = intel_uncore_read(uncore, GEN11_GT_SLICE_ENABLE) & - GEN11_GT_S_ENA_MASK; - - dss_en = intel_uncore_read(uncore, GEN12_GT_DSS_ENABLE); - - /* one bit per pair of EUs */ - eu_en_fuse = ~(intel_uncore_read(uncore, GEN11_EU_DISABLE) & - GEN11_EU_DIS_MASK); - for (eu = 0; eu < sseu->max_eus_per_subslice / 2; eu++) - if (eu_en_fuse & BIT(eu)) - eu_en |= BIT(eu * 2) | BIT(eu * 2 + 1); - - gen11_compute_sseu_info(sseu, s_en, dss_en, eu_en); - - /* TGL only supports slice-level power gating */ - sseu->has_slice_pg = 1; -} - -static void gen11_sseu_info_init(struct drm_i915_private *dev_priv) -{ - struct sseu_dev_info *sseu = &RUNTIME_INFO(dev_priv)->sseu; - struct intel_uncore *uncore = &dev_priv->uncore; - u8 s_en; - u32 ss_en; - u8 eu_en; - - if (IS_ELKHARTLAKE(dev_priv)) - intel_sseu_set_info(sseu, 1, 4, 8); - else - intel_sseu_set_info(sseu, 1, 8, 8); - - s_en = intel_uncore_read(uncore, GEN11_GT_SLICE_ENABLE) & - GEN11_GT_S_ENA_MASK; - ss_en = ~intel_uncore_read(uncore, GEN11_GT_SUBSLICE_DISABLE); - - eu_en = ~(intel_uncore_read(uncore, GEN11_EU_DISABLE) & - GEN11_EU_DIS_MASK); - - gen11_compute_sseu_info(sseu, s_en, ss_en, eu_en); - - /* ICL has no power gating restrictions. */ - sseu->has_slice_pg = 1; - sseu->has_subslice_pg = 1; - sseu->has_eu_pg = 1; -} - -static void gen10_sseu_info_init(struct drm_i915_private *dev_priv) -{ - struct intel_uncore *uncore = &dev_priv->uncore; - struct sseu_dev_info *sseu = &RUNTIME_INFO(dev_priv)->sseu; - const u32 fuse2 = intel_uncore_read(uncore, GEN8_FUSE2); - int s, ss; - const int eu_mask = 0xff; - u32 subslice_mask, eu_en; - - intel_sseu_set_info(sseu, 6, 4, 8); - - sseu->slice_mask = (fuse2 & GEN10_F2_S_ENA_MASK) >> - GEN10_F2_S_ENA_SHIFT; - - /* Slice0 */ - eu_en = ~intel_uncore_read(uncore, GEN8_EU_DISABLE0); - for (ss = 0; ss < sseu->max_subslices; ss++) - sseu_set_eus(sseu, 0, ss, (eu_en >> (8 * ss)) & eu_mask); - /* Slice1 */ - sseu_set_eus(sseu, 1, 0, (eu_en >> 24) & eu_mask); - eu_en = ~intel_uncore_read(uncore, GEN8_EU_DISABLE1); - sseu_set_eus(sseu, 1, 1, eu_en & eu_mask); - /* Slice2 */ - sseu_set_eus(sseu, 2, 0, (eu_en >> 8) & eu_mask); - sseu_set_eus(sseu, 2, 1, (eu_en >> 16) & eu_mask); - /* Slice3 */ - sseu_set_eus(sseu, 3, 0, (eu_en >> 24) & eu_mask); - eu_en = ~intel_uncore_read(uncore, GEN8_EU_DISABLE2); - sseu_set_eus(sseu, 3, 1, eu_en & eu_mask); - /* Slice4 */ - sseu_set_eus(sseu, 4, 0, (eu_en >> 8) & eu_mask); - sseu_set_eus(sseu, 4, 1, (eu_en >> 16) & eu_mask); - /* Slice5 */ - sseu_set_eus(sseu, 5, 0, (eu_en >> 24) & eu_mask); - eu_en = ~intel_uncore_read(uncore, GEN10_EU_DISABLE3); - sseu_set_eus(sseu, 5, 1, eu_en & eu_mask); - - subslice_mask = (1 << 4) - 1; - subslice_mask &= ~((fuse2 & GEN10_F2_SS_DIS_MASK) >> - GEN10_F2_SS_DIS_SHIFT); - - for (s = 0; s < sseu->max_slices; s++) { - u32 subslice_mask_with_eus = subslice_mask; - - for (ss = 0; ss < sseu->max_subslices; ss++) { - if (sseu_get_eus(sseu, s, ss) == 0) - subslice_mask_with_eus &= ~BIT(ss); - } - - /* - * Slice0 can have up to 3 subslices, but there are only 2 in - * slice1/2. - */ - intel_sseu_set_subslices(sseu, s, s == 0 ? - subslice_mask_with_eus : - subslice_mask_with_eus & 0x3); - } - - sseu->eu_total = compute_eu_total(sseu); - - /* - * CNL is expected to always have a uniform distribution - * of EU across subslices with the exception that any one - * EU in any one subslice may be fused off for die - * recovery. - */ - sseu->eu_per_subslice = intel_sseu_subslice_total(sseu) ? - DIV_ROUND_UP(sseu->eu_total, - intel_sseu_subslice_total(sseu)) : - 0; - - /* No restrictions on Power Gating */ - sseu->has_slice_pg = 1; - sseu->has_subslice_pg = 1; - sseu->has_eu_pg = 1; -} - -static void cherryview_sseu_info_init(struct drm_i915_private *dev_priv) -{ - struct sseu_dev_info *sseu = &RUNTIME_INFO(dev_priv)->sseu; - u32 fuse; - u8 subslice_mask = 0; - - fuse = intel_uncore_read(&dev_priv->uncore, CHV_FUSE_GT); - - sseu->slice_mask = BIT(0); - intel_sseu_set_info(sseu, 1, 2, 8); - - if (!(fuse & CHV_FGT_DISABLE_SS0)) { - u8 disabled_mask = - ((fuse & CHV_FGT_EU_DIS_SS0_R0_MASK) >> - CHV_FGT_EU_DIS_SS0_R0_SHIFT) | - (((fuse & CHV_FGT_EU_DIS_SS0_R1_MASK) >> - CHV_FGT_EU_DIS_SS0_R1_SHIFT) << 4); - - subslice_mask |= BIT(0); - sseu_set_eus(sseu, 0, 0, ~disabled_mask); - } - - if (!(fuse & CHV_FGT_DISABLE_SS1)) { - u8 disabled_mask = - ((fuse & CHV_FGT_EU_DIS_SS1_R0_MASK) >> - CHV_FGT_EU_DIS_SS1_R0_SHIFT) | - (((fuse & CHV_FGT_EU_DIS_SS1_R1_MASK) >> - CHV_FGT_EU_DIS_SS1_R1_SHIFT) << 4); - - subslice_mask |= BIT(1); - sseu_set_eus(sseu, 0, 1, ~disabled_mask); - } - - intel_sseu_set_subslices(sseu, 0, subslice_mask); - - sseu->eu_total = compute_eu_total(sseu); - - /* - * CHV expected to always have a uniform distribution of EU - * across subslices. - */ - sseu->eu_per_subslice = intel_sseu_subslice_total(sseu) ? - sseu->eu_total / - intel_sseu_subslice_total(sseu) : - 0; - /* - * CHV supports subslice power gating on devices with more than - * one subslice, and supports EU power gating on devices with - * more than one EU pair per subslice. - */ - sseu->has_slice_pg = 0; - sseu->has_subslice_pg = intel_sseu_subslice_total(sseu) > 1; - sseu->has_eu_pg = (sseu->eu_per_subslice > 2); -} - -static void gen9_sseu_info_init(struct drm_i915_private *dev_priv) -{ - struct intel_device_info *info = mkwrite_device_info(dev_priv); - struct sseu_dev_info *sseu = &RUNTIME_INFO(dev_priv)->sseu; - struct intel_uncore *uncore = &dev_priv->uncore; - int s, ss; - u32 fuse2, eu_disable, subslice_mask; - const u8 eu_mask = 0xff; - - fuse2 = intel_uncore_read(uncore, GEN8_FUSE2); - sseu->slice_mask = (fuse2 & GEN8_F2_S_ENA_MASK) >> GEN8_F2_S_ENA_SHIFT; - - /* BXT has a single slice and at most 3 subslices. */ - intel_sseu_set_info(sseu, IS_GEN9_LP(dev_priv) ? 1 : 3, - IS_GEN9_LP(dev_priv) ? 3 : 4, 8); - - /* - * The subslice disable field is global, i.e. it applies - * to each of the enabled slices. - */ - subslice_mask = (1 << sseu->max_subslices) - 1; - subslice_mask &= ~((fuse2 & GEN9_F2_SS_DIS_MASK) >> - GEN9_F2_SS_DIS_SHIFT); - - /* - * Iterate through enabled slices and subslices to - * count the total enabled EU. - */ - for (s = 0; s < sseu->max_slices; s++) { - if (!(sseu->slice_mask & BIT(s))) - /* skip disabled slice */ - continue; - - intel_sseu_set_subslices(sseu, s, subslice_mask); - - eu_disable = intel_uncore_read(uncore, GEN9_EU_DISABLE(s)); - for (ss = 0; ss < sseu->max_subslices; ss++) { - int eu_per_ss; - u8 eu_disabled_mask; - - if (!intel_sseu_has_subslice(sseu, s, ss)) - /* skip disabled subslice */ - continue; - - eu_disabled_mask = (eu_disable >> (ss * 8)) & eu_mask; - - sseu_set_eus(sseu, s, ss, ~eu_disabled_mask); - - eu_per_ss = sseu->max_eus_per_subslice - - hweight8(eu_disabled_mask); - - /* - * Record which subslice(s) has(have) 7 EUs. we - * can tune the hash used to spread work among - * subslices if they are unbalanced. - */ - if (eu_per_ss == 7) - sseu->subslice_7eu[s] |= BIT(ss); - } - } - - sseu->eu_total = compute_eu_total(sseu); - - /* - * SKL is expected to always have a uniform distribution - * of EU across subslices with the exception that any one - * EU in any one subslice may be fused off for die - * recovery. BXT is expected to be perfectly uniform in EU - * distribution. - */ - sseu->eu_per_subslice = intel_sseu_subslice_total(sseu) ? - DIV_ROUND_UP(sseu->eu_total, - intel_sseu_subslice_total(sseu)) : - 0; - /* - * SKL+ supports slice power gating on devices with more than - * one slice, and supports EU power gating on devices with - * more than one EU pair per subslice. BXT+ supports subslice - * power gating on devices with more than one subslice, and - * supports EU power gating on devices with more than one EU - * pair per subslice. - */ - sseu->has_slice_pg = - !IS_GEN9_LP(dev_priv) && hweight8(sseu->slice_mask) > 1; - sseu->has_subslice_pg = - IS_GEN9_LP(dev_priv) && intel_sseu_subslice_total(sseu) > 1; - sseu->has_eu_pg = sseu->eu_per_subslice > 2; - - if (IS_GEN9_LP(dev_priv)) { -#define IS_SS_DISABLED(ss) (!(sseu->subslice_mask[0] & BIT(ss))) - info->has_pooled_eu = hweight8(sseu->subslice_mask[0]) == 3; - - sseu->min_eu_in_pool = 0; - if (info->has_pooled_eu) { - if (IS_SS_DISABLED(2) || IS_SS_DISABLED(0)) - sseu->min_eu_in_pool = 3; - else if (IS_SS_DISABLED(1)) - sseu->min_eu_in_pool = 6; - else - sseu->min_eu_in_pool = 9; - } -#undef IS_SS_DISABLED - } -} - -static void bdw_sseu_info_init(struct drm_i915_private *dev_priv) -{ - struct sseu_dev_info *sseu = &RUNTIME_INFO(dev_priv)->sseu; - struct intel_uncore *uncore = &dev_priv->uncore; - int s, ss; - u32 fuse2, subslice_mask, eu_disable[3]; /* s_max */ - u32 eu_disable0, eu_disable1, eu_disable2; - - fuse2 = intel_uncore_read(uncore, GEN8_FUSE2); - sseu->slice_mask = (fuse2 & GEN8_F2_S_ENA_MASK) >> GEN8_F2_S_ENA_SHIFT; - intel_sseu_set_info(sseu, 3, 3, 8); - - /* - * The subslice disable field is global, i.e. it applies - * to each of the enabled slices. - */ - subslice_mask = GENMASK(sseu->max_subslices - 1, 0); - subslice_mask &= ~((fuse2 & GEN8_F2_SS_DIS_MASK) >> - GEN8_F2_SS_DIS_SHIFT); - eu_disable0 = intel_uncore_read(uncore, GEN8_EU_DISABLE0); - eu_disable1 = intel_uncore_read(uncore, GEN8_EU_DISABLE1); - eu_disable2 = intel_uncore_read(uncore, GEN8_EU_DISABLE2); - eu_disable[0] = eu_disable0 & GEN8_EU_DIS0_S0_MASK; - eu_disable[1] = (eu_disable0 >> GEN8_EU_DIS0_S1_SHIFT) | - ((eu_disable1 & GEN8_EU_DIS1_S1_MASK) << - (32 - GEN8_EU_DIS0_S1_SHIFT)); - eu_disable[2] = (eu_disable1 >> GEN8_EU_DIS1_S2_SHIFT) | - ((eu_disable2 & GEN8_EU_DIS2_S2_MASK) << - (32 - GEN8_EU_DIS1_S2_SHIFT)); - - /* - * Iterate through enabled slices and subslices to - * count the total enabled EU. - */ - for (s = 0; s < sseu->max_slices; s++) { - if (!(sseu->slice_mask & BIT(s))) - /* skip disabled slice */ - continue; - - intel_sseu_set_subslices(sseu, s, subslice_mask); - - for (ss = 0; ss < sseu->max_subslices; ss++) { - u8 eu_disabled_mask; - u32 n_disabled; - - if (!intel_sseu_has_subslice(sseu, s, ss)) - /* skip disabled subslice */ - continue; - - eu_disabled_mask = - eu_disable[s] >> (ss * sseu->max_eus_per_subslice); - - sseu_set_eus(sseu, s, ss, ~eu_disabled_mask); - - n_disabled = hweight8(eu_disabled_mask); - - /* - * Record which subslices have 7 EUs. - */ - if (sseu->max_eus_per_subslice - n_disabled == 7) - sseu->subslice_7eu[s] |= 1 << ss; - } - } - - sseu->eu_total = compute_eu_total(sseu); - - /* - * BDW is expected to always have a uniform distribution of EU across - * subslices with the exception that any one EU in any one subslice may - * be fused off for die recovery. - */ - sseu->eu_per_subslice = intel_sseu_subslice_total(sseu) ? - DIV_ROUND_UP(sseu->eu_total, - intel_sseu_subslice_total(sseu)) : - 0; - - /* - * BDW supports slice power gating on devices with more than - * one slice. - */ - sseu->has_slice_pg = hweight8(sseu->slice_mask) > 1; - sseu->has_subslice_pg = 0; - sseu->has_eu_pg = 0; -} - -static void hsw_sseu_info_init(struct drm_i915_private *dev_priv) -{ - struct sseu_dev_info *sseu = &RUNTIME_INFO(dev_priv)->sseu; - u32 fuse1; - u8 subslice_mask = 0; - int s, ss; - - /* - * There isn't a register to tell us how many slices/subslices. We - * work off the PCI-ids here. - */ - switch (INTEL_INFO(dev_priv)->gt) { - default: - MISSING_CASE(INTEL_INFO(dev_priv)->gt); - /* fall through */ - case 1: - sseu->slice_mask = BIT(0); - subslice_mask = BIT(0); - break; - case 2: - sseu->slice_mask = BIT(0); - subslice_mask = BIT(0) | BIT(1); - break; - case 3: - sseu->slice_mask = BIT(0) | BIT(1); - subslice_mask = BIT(0) | BIT(1); - break; - } - - fuse1 = intel_uncore_read(&dev_priv->uncore, HSW_PAVP_FUSE1); - switch ((fuse1 & HSW_F1_EU_DIS_MASK) >> HSW_F1_EU_DIS_SHIFT) { - default: - MISSING_CASE((fuse1 & HSW_F1_EU_DIS_MASK) >> - HSW_F1_EU_DIS_SHIFT); - /* fall through */ - case HSW_F1_EU_DIS_10EUS: - sseu->eu_per_subslice = 10; - break; - case HSW_F1_EU_DIS_8EUS: - sseu->eu_per_subslice = 8; - break; - case HSW_F1_EU_DIS_6EUS: - sseu->eu_per_subslice = 6; - break; - } - - intel_sseu_set_info(sseu, hweight8(sseu->slice_mask), - hweight8(subslice_mask), - sseu->eu_per_subslice); - - for (s = 0; s < sseu->max_slices; s++) { - intel_sseu_set_subslices(sseu, s, subslice_mask); - - for (ss = 0; ss < sseu->max_subslices; ss++) { - sseu_set_eus(sseu, s, ss, - (1UL << sseu->eu_per_subslice) - 1); - } - } - - sseu->eu_total = compute_eu_total(sseu); - - /* No powergating for you. */ - sseu->has_slice_pg = 0; - sseu->has_subslice_pg = 0; - sseu->has_eu_pg = 0; -} - static u32 read_reference_ts_freq(struct drm_i915_private *dev_priv) { u32 ts_override = intel_uncore_read(&dev_priv->uncore, @@ -1042,22 +478,6 @@ void intel_device_info_runtime_init(struct drm_i915_private *dev_priv) info->display.has_dsc = 0; } - /* Initialize slice/subslice/EU info */ - if (IS_HASWELL(dev_priv)) - hsw_sseu_info_init(dev_priv); - else if (IS_CHERRYVIEW(dev_priv)) - cherryview_sseu_info_init(dev_priv); - else if (IS_BROADWELL(dev_priv)) - bdw_sseu_info_init(dev_priv); - else if (IS_GEN(dev_priv, 9)) - gen9_sseu_info_init(dev_priv); - else if (IS_GEN(dev_priv, 10)) - gen10_sseu_info_init(dev_priv); - else if (IS_GEN(dev_priv, 11)) - gen11_sseu_info_init(dev_priv); - else if (INTEL_GEN(dev_priv) >= 12) - gen12_sseu_info_init(dev_priv); - if (IS_GEN(dev_priv, 6) && intel_vtd_active()) { drm_info(&dev_priv->drm, "Disabling ppGTT for VT-d support\n"); diff --git a/drivers/gpu/drm/i915/intel_device_info.h b/drivers/gpu/drm/i915/intel_device_info.h index b010b6728432..40057abbb662 100644 --- a/drivers/gpu/drm/i915/intel_device_info.h +++ b/drivers/gpu/drm/i915/intel_device_info.h @@ -242,8 +242,6 @@ void intel_device_info_print_static(const struct intel_device_info *info, struct drm_printer *p); void intel_device_info_print_runtime(const struct intel_runtime_info *info, struct drm_printer *p); -void intel_device_info_print_topology(const struct sseu_dev_info *sseu, - struct drm_printer *p); void intel_driver_caps_print(const struct intel_driver_caps *caps, struct drm_printer *p); -- cgit v1.2.3 From 0b6613c6b91ec5f437495c438ca047af4e47b423 Mon Sep 17 00:00:00 2001 From: Venkata Sandeep Dhanalakota Date: Tue, 7 Jul 2020 17:39:50 -0700 Subject: drm/i915/sseu: Move sseu_info under gt_info SSEUs are a GT capability, so track them under gt_info. Signed-off-by: Venkata Sandeep Dhanalakota Signed-off-by: Daniele Ceraolo Spurio Cc: Tvrtko Ursulin Cc: Andi Shyti Reviewed-by: Chris Wilson Signed-off-by: Chris Wilson Link: https://patchwork.freedesktop.org/patch/msgid/20200708003952.21831-8-daniele.ceraolospurio@intel.com --- drivers/gpu/drm/i915/gem/i915_gem_context.c | 7 ++++--- drivers/gpu/drm/i915/gem/i915_gem_context.h | 2 +- drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c | 5 ++++- drivers/gpu/drm/i915/gt/intel_context_sseu.c | 2 +- drivers/gpu/drm/i915/gt/intel_engine_cs.c | 4 ++-- drivers/gpu/drm/i915/gt/intel_gt.c | 2 ++ drivers/gpu/drm/i915/gt/intel_gt_types.h | 3 +++ drivers/gpu/drm/i915/gt/intel_lrc.c | 2 +- drivers/gpu/drm/i915/gt/intel_rps.c | 3 ++- drivers/gpu/drm/i915/gt/intel_sseu.c | 19 ++++++++++--------- drivers/gpu/drm/i915/gt/intel_sseu.h | 2 +- drivers/gpu/drm/i915/gt/intel_workarounds.c | 8 ++++---- drivers/gpu/drm/i915/gt/uc/intel_guc_ads.c | 3 +-- drivers/gpu/drm/i915/i915_debugfs.c | 10 +++++----- drivers/gpu/drm/i915/i915_getparam.c | 2 +- drivers/gpu/drm/i915/i915_gpu_error.c | 4 ++-- drivers/gpu/drm/i915/i915_perf.c | 9 ++++----- drivers/gpu/drm/i915/i915_query.c | 2 +- drivers/gpu/drm/i915/intel_device_info.c | 3 --- drivers/gpu/drm/i915/intel_device_info.h | 3 --- 20 files changed, 49 insertions(+), 46 deletions(-) diff --git a/drivers/gpu/drm/i915/gem/i915_gem_context.c b/drivers/gpu/drm/i915/gem/i915_gem_context.c index 41784df51e58..d0bdb6d447ed 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_context.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_context.c @@ -1400,11 +1400,12 @@ static int get_ringsize(struct i915_gem_context *ctx, } int -i915_gem_user_to_context_sseu(struct drm_i915_private *i915, +i915_gem_user_to_context_sseu(struct intel_gt *gt, const struct drm_i915_gem_context_param_sseu *user, struct intel_sseu *context) { - const struct sseu_dev_info *device = &RUNTIME_INFO(i915)->sseu; + const struct sseu_dev_info *device = >->info.sseu; + struct drm_i915_private *i915 = gt->i915; /* No zeros in any field. */ if (!user->slice_mask || !user->subslice_mask || @@ -1537,7 +1538,7 @@ static int set_sseu(struct i915_gem_context *ctx, goto out_ce; } - ret = i915_gem_user_to_context_sseu(i915, &user_sseu, &sseu); + ret = i915_gem_user_to_context_sseu(ce->engine->gt, &user_sseu, &sseu); if (ret) goto out_ce; diff --git a/drivers/gpu/drm/i915/gem/i915_gem_context.h b/drivers/gpu/drm/i915/gem/i915_gem_context.h index 3702b2fb27ab..a133f92bbedb 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_context.h +++ b/drivers/gpu/drm/i915/gem/i915_gem_context.h @@ -225,7 +225,7 @@ i915_gem_engines_iter_next(struct i915_gem_engines_iter *it); struct i915_lut_handle *i915_lut_handle_alloc(void); void i915_lut_handle_free(struct i915_lut_handle *lut); -int i915_gem_user_to_context_sseu(struct drm_i915_private *i915, +int i915_gem_user_to_context_sseu(struct intel_gt *gt, const struct drm_i915_gem_context_param_sseu *user, struct intel_sseu *context); diff --git a/drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c b/drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c index b81978890641..7ffc3c751432 100644 --- a/drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c +++ b/drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c @@ -1229,7 +1229,7 @@ __igt_ctx_sseu(struct drm_i915_private *i915, int inst = 0; int ret = 0; - if (INTEL_GEN(i915) < 9 || !RUNTIME_INFO(i915)->sseu.has_slice_pg) + if (INTEL_GEN(i915) < 9) return 0; if (flags & TEST_RESET) @@ -1255,6 +1255,9 @@ __igt_ctx_sseu(struct drm_i915_private *i915, if (hweight32(engine->sseu.slice_mask) < 2) continue; + if (!engine->gt->info.sseu.has_slice_pg) + continue; + /* * Gen11 VME friendly power-gated configuration with * half enabled sub-slices. diff --git a/drivers/gpu/drm/i915/gt/intel_context_sseu.c b/drivers/gpu/drm/i915/gt/intel_context_sseu.c index 27ae48049239..b9c8163978a3 100644 --- a/drivers/gpu/drm/i915/gt/intel_context_sseu.c +++ b/drivers/gpu/drm/i915/gt/intel_context_sseu.c @@ -30,7 +30,7 @@ static int gen8_emit_rpcs_config(struct i915_request *rq, *cs++ = MI_STORE_DWORD_IMM_GEN4 | MI_USE_GGTT; *cs++ = lower_32_bits(offset); *cs++ = upper_32_bits(offset); - *cs++ = intel_sseu_make_rpcs(rq->engine->i915, &sseu); + *cs++ = intel_sseu_make_rpcs(rq->engine->gt, &sseu); intel_ring_advance(rq, cs); diff --git a/drivers/gpu/drm/i915/gt/intel_engine_cs.c b/drivers/gpu/drm/i915/gt/intel_engine_cs.c index fca3c2348e5e..dd1a42c4d344 100644 --- a/drivers/gpu/drm/i915/gt/intel_engine_cs.c +++ b/drivers/gpu/drm/i915/gt/intel_engine_cs.c @@ -709,7 +709,7 @@ static int engine_setup_common(struct intel_engine_cs *engine) /* Use the whole device by default */ engine->sseu = - intel_sseu_from_device_info(&RUNTIME_INFO(engine->i915)->sseu); + intel_sseu_from_device_info(&engine->gt->info.sseu); intel_engine_init_workarounds(engine); intel_engine_init_whitelist(engine); @@ -1075,7 +1075,7 @@ void intel_engine_get_instdone(const struct intel_engine_cs *engine, struct intel_instdone *instdone) { struct drm_i915_private *i915 = engine->i915; - const struct sseu_dev_info *sseu = &RUNTIME_INFO(i915)->sseu; + const struct sseu_dev_info *sseu = &engine->gt->info.sseu; struct intel_uncore *uncore = engine->uncore; u32 mmio_base = engine->mmio_base; int slice; diff --git a/drivers/gpu/drm/i915/gt/intel_gt.c b/drivers/gpu/drm/i915/gt/intel_gt.c index 2c20fe693714..e0755f1a904b 100644 --- a/drivers/gpu/drm/i915/gt/intel_gt.c +++ b/drivers/gpu/drm/i915/gt/intel_gt.c @@ -655,4 +655,6 @@ void intel_gt_info_print(const struct intel_gt_info *info, struct drm_printer *p) { drm_printf(p, "available engines: %x\n", info->engine_mask); + + intel_sseu_dump(&info->sseu, p); } diff --git a/drivers/gpu/drm/i915/gt/intel_gt_types.h b/drivers/gpu/drm/i915/gt/intel_gt_types.h index bb7551867c00..6d39a4a11bf3 100644 --- a/drivers/gpu/drm/i915/gt/intel_gt_types.h +++ b/drivers/gpu/drm/i915/gt/intel_gt_types.h @@ -116,6 +116,9 @@ struct intel_gt { /* Media engine access to SFC per instance */ u8 vdbox_sfc_access; + + /* Slice/subslice/EU info */ + struct sseu_dev_info sseu; } info; }; diff --git a/drivers/gpu/drm/i915/gt/intel_lrc.c b/drivers/gpu/drm/i915/gt/intel_lrc.c index e866b8d721ed..9e28b2f9df72 100644 --- a/drivers/gpu/drm/i915/gt/intel_lrc.c +++ b/drivers/gpu/drm/i915/gt/intel_lrc.c @@ -3422,7 +3422,7 @@ __execlists_update_reg_state(const struct intel_context *ce, /* RPCS */ if (engine->class == RENDER_CLASS) { regs[CTX_R_PWR_CLK_STATE] = - intel_sseu_make_rpcs(engine->i915, &ce->sseu); + intel_sseu_make_rpcs(engine->gt, &ce->sseu); i915_oa_init_reg_state(ce, engine); } diff --git a/drivers/gpu/drm/i915/gt/intel_rps.c b/drivers/gpu/drm/i915/gt/intel_rps.c index 296391deeb94..97ba14ad52e4 100644 --- a/drivers/gpu/drm/i915/gt/intel_rps.c +++ b/drivers/gpu/drm/i915/gt/intel_rps.c @@ -1062,11 +1062,12 @@ static bool gen6_rps_enable(struct intel_rps *rps) static int chv_rps_max_freq(struct intel_rps *rps) { struct drm_i915_private *i915 = rps_to_i915(rps); + struct intel_gt *gt = rps_to_gt(rps); u32 val; val = vlv_punit_read(i915, FB_GFX_FMAX_AT_VMAX_FUSE); - switch (RUNTIME_INFO(i915)->sseu.eu_total) { + switch (gt->info.sseu.eu_total) { case 8: /* (2 * 4) config */ val >>= FB_GFX_FMAX_AT_VMAX_2SS4EU_FUSE_SHIFT; diff --git a/drivers/gpu/drm/i915/gt/intel_sseu.c b/drivers/gpu/drm/i915/gt/intel_sseu.c index 14c15cca3371..f1c039e1b5ad 100644 --- a/drivers/gpu/drm/i915/gt/intel_sseu.c +++ b/drivers/gpu/drm/i915/gt/intel_sseu.c @@ -128,7 +128,7 @@ static void gen11_compute_sseu_info(struct sseu_dev_info *sseu, static void gen12_sseu_info_init(struct intel_gt *gt) { - struct sseu_dev_info *sseu = &RUNTIME_INFO(gt->i915)->sseu; + struct sseu_dev_info *sseu = >->info.sseu; struct intel_uncore *uncore = gt->uncore; u32 dss_en; u16 eu_en = 0; @@ -163,7 +163,7 @@ static void gen12_sseu_info_init(struct intel_gt *gt) static void gen11_sseu_info_init(struct intel_gt *gt) { - struct sseu_dev_info *sseu = &RUNTIME_INFO(gt->i915)->sseu; + struct sseu_dev_info *sseu = >->info.sseu; struct intel_uncore *uncore = gt->uncore; u32 ss_en; u8 eu_en; @@ -192,7 +192,7 @@ static void gen11_sseu_info_init(struct intel_gt *gt) static void gen10_sseu_info_init(struct intel_gt *gt) { struct intel_uncore *uncore = gt->uncore; - struct sseu_dev_info *sseu = &RUNTIME_INFO(gt->i915)->sseu; + struct sseu_dev_info *sseu = >->info.sseu; const u32 fuse2 = intel_uncore_read(uncore, GEN8_FUSE2); const int eu_mask = 0xff; u32 subslice_mask, eu_en; @@ -268,7 +268,7 @@ static void gen10_sseu_info_init(struct intel_gt *gt) static void cherryview_sseu_info_init(struct intel_gt *gt) { - struct sseu_dev_info *sseu = &RUNTIME_INFO(gt->i915)->sseu; + struct sseu_dev_info *sseu = >->info.sseu; u32 fuse; u8 subslice_mask = 0; @@ -325,7 +325,7 @@ static void gen9_sseu_info_init(struct intel_gt *gt) { struct drm_i915_private *i915 = gt->i915; struct intel_device_info *info = mkwrite_device_info(i915); - struct sseu_dev_info *sseu = &RUNTIME_INFO(i915)->sseu; + struct sseu_dev_info *sseu = >->info.sseu; struct intel_uncore *uncore = gt->uncore; u32 fuse2, eu_disable, subslice_mask; const u8 eu_mask = 0xff; @@ -430,7 +430,7 @@ static void gen9_sseu_info_init(struct intel_gt *gt) static void bdw_sseu_info_init(struct intel_gt *gt) { - struct sseu_dev_info *sseu = &RUNTIME_INFO(gt->i915)->sseu; + struct sseu_dev_info *sseu = >->info.sseu; struct intel_uncore *uncore = gt->uncore; int s, ss; u32 fuse2, subslice_mask, eu_disable[3]; /* s_max */ @@ -516,7 +516,7 @@ static void bdw_sseu_info_init(struct intel_gt *gt) static void hsw_sseu_info_init(struct intel_gt *gt) { struct drm_i915_private *i915 = gt->i915; - struct sseu_dev_info *sseu = &RUNTIME_INFO(gt->i915)->sseu; + struct sseu_dev_info *sseu = >->info.sseu; u32 fuse1; u8 subslice_mask = 0; int s, ss; @@ -601,10 +601,11 @@ void intel_sseu_info_init(struct intel_gt *gt) gen12_sseu_info_init(gt); } -u32 intel_sseu_make_rpcs(struct drm_i915_private *i915, +u32 intel_sseu_make_rpcs(struct intel_gt *gt, const struct intel_sseu *req_sseu) { - const struct sseu_dev_info *sseu = &RUNTIME_INFO(i915)->sseu; + struct drm_i915_private *i915 = gt->i915; + const struct sseu_dev_info *sseu = >->info.sseu; bool subslice_pg = sseu->has_subslice_pg; u8 slices, subslices; u32 rpcs = 0; diff --git a/drivers/gpu/drm/i915/gt/intel_sseu.h b/drivers/gpu/drm/i915/gt/intel_sseu.h index f9c007f001e7..23ba6c2ebe70 100644 --- a/drivers/gpu/drm/i915/gt/intel_sseu.h +++ b/drivers/gpu/drm/i915/gt/intel_sseu.h @@ -98,7 +98,7 @@ void intel_sseu_set_subslices(struct sseu_dev_info *sseu, int slice, void intel_sseu_info_init(struct intel_gt *gt); -u32 intel_sseu_make_rpcs(struct drm_i915_private *i915, +u32 intel_sseu_make_rpcs(struct intel_gt *gt, const struct intel_sseu *req_sseu); void intel_sseu_dump(const struct sseu_dev_info *sseu, struct drm_printer *p); diff --git a/drivers/gpu/drm/i915/gt/intel_workarounds.c b/drivers/gpu/drm/i915/gt/intel_workarounds.c index 2da366821dda..dbafd923e5a1 100644 --- a/drivers/gpu/drm/i915/gt/intel_workarounds.c +++ b/drivers/gpu/drm/i915/gt/intel_workarounds.c @@ -404,7 +404,7 @@ static void gen9_ctx_workarounds_init(struct intel_engine_cs *engine, static void skl_tune_iz_hashing(struct intel_engine_cs *engine, struct i915_wa_list *wal) { - struct drm_i915_private *i915 = engine->i915; + struct intel_gt *gt = engine->gt; u8 vals[3] = { 0, 0, 0 }; unsigned int i; @@ -415,7 +415,7 @@ static void skl_tune_iz_hashing(struct intel_engine_cs *engine, * Only consider slices where one, and only one, subslice has 7 * EUs */ - if (!is_power_of_2(RUNTIME_INFO(i915)->sseu.subslice_7eu[i])) + if (!is_power_of_2(gt->info.sseu.subslice_7eu[i])) continue; /* @@ -424,7 +424,7 @@ static void skl_tune_iz_hashing(struct intel_engine_cs *engine, * * -> 0 <= ss <= 3; */ - ss = ffs(RUNTIME_INFO(i915)->sseu.subslice_7eu[i]) - 1; + ss = ffs(gt->info.sseu.subslice_7eu[i]) - 1; vals[i] = 3 - ss; } @@ -1036,7 +1036,7 @@ cfl_gt_workarounds_init(struct drm_i915_private *i915, struct i915_wa_list *wal) static void wa_init_mcr(struct drm_i915_private *i915, struct i915_wa_list *wal) { - const struct sseu_dev_info *sseu = &RUNTIME_INFO(i915)->sseu; + const struct sseu_dev_info *sseu = &i915->gt.info.sseu; unsigned int slice, subslice; u32 l3_en, mcr, mcr_mask; diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_ads.c b/drivers/gpu/drm/i915/gt/uc/intel_guc_ads.c index c10ae1660e53..d44061033f23 100644 --- a/drivers/gpu/drm/i915/gt/uc/intel_guc_ads.c +++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_ads.c @@ -68,7 +68,6 @@ struct __guc_ads_blob { static void __guc_ads_init(struct intel_guc *guc) { struct intel_gt *gt = guc_to_gt(guc); - struct drm_i915_private *dev_priv = gt->i915; struct __guc_ads_blob *blob = guc->ads_blob; const u32 skipped_size = LRC_PPHWSP_SZ * PAGE_SIZE + LR_HW_CONTEXT_SIZE; u32 base; @@ -100,7 +99,7 @@ static void __guc_ads_init(struct intel_guc *guc) } /* System info */ - blob->system_info.slice_enabled = hweight8(RUNTIME_INFO(dev_priv)->sseu.slice_mask); + blob->system_info.slice_enabled = hweight8(gt->info.sseu.slice_mask); blob->system_info.rcs_enabled = 1; blob->system_info.bcs_enabled = 1; diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c index bfba0dff0b85..69acc6990a66 100644 --- a/drivers/gpu/drm/i915/i915_debugfs.c +++ b/drivers/gpu/drm/i915/i915_debugfs.c @@ -1327,7 +1327,7 @@ static int i915_rcs_topology(struct seq_file *m, void *unused) struct drm_i915_private *dev_priv = node_to_i915(m->private); struct drm_printer p = drm_seq_file_printer(m); - intel_sseu_print_topology(&RUNTIME_INFO(dev_priv)->sseu, &p); + intel_sseu_print_topology(&dev_priv->gt.info.sseu, &p); return 0; } @@ -1628,7 +1628,7 @@ static void gen10_sseu_device_status(struct drm_i915_private *dev_priv, struct sseu_dev_info *sseu) { #define SS_MAX 6 - const struct intel_runtime_info *info = RUNTIME_INFO(dev_priv); + const struct intel_gt_info *info = &dev_priv->gt.info; u32 s_reg[SS_MAX], eu_reg[2 * SS_MAX], eu_mask[2]; int s, ss; @@ -1685,7 +1685,7 @@ static void gen9_sseu_device_status(struct drm_i915_private *dev_priv, struct sseu_dev_info *sseu) { #define SS_MAX 3 - const struct intel_runtime_info *info = RUNTIME_INFO(dev_priv); + const struct intel_gt_info *info = &dev_priv->gt.info; u32 s_reg[SS_MAX], eu_reg[2 * SS_MAX], eu_mask[2]; int s, ss; @@ -1743,7 +1743,7 @@ static void gen9_sseu_device_status(struct drm_i915_private *dev_priv, static void bdw_sseu_device_status(struct drm_i915_private *dev_priv, struct sseu_dev_info *sseu) { - const struct intel_runtime_info *info = RUNTIME_INFO(dev_priv); + const struct intel_gt_info *info = &dev_priv->gt.info; u32 slice_info = I915_READ(GEN8_GT_SLICE_INFO); int s; @@ -1806,7 +1806,7 @@ static void i915_print_sseu_info(struct seq_file *m, bool is_available_info, static int i915_sseu_status(struct seq_file *m, void *unused) { struct drm_i915_private *dev_priv = node_to_i915(m->private); - const struct intel_runtime_info *info = RUNTIME_INFO(dev_priv); + const struct intel_gt_info *info = &dev_priv->gt.info; struct sseu_dev_info sseu; intel_wakeref_t wakeref; diff --git a/drivers/gpu/drm/i915/i915_getparam.c b/drivers/gpu/drm/i915/i915_getparam.c index 40390b2352b1..421613219ae9 100644 --- a/drivers/gpu/drm/i915/i915_getparam.c +++ b/drivers/gpu/drm/i915/i915_getparam.c @@ -12,7 +12,7 @@ int i915_getparam_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv) { struct drm_i915_private *i915 = to_i915(dev); - const struct sseu_dev_info *sseu = &RUNTIME_INFO(i915)->sseu; + const struct sseu_dev_info *sseu = &i915->gt.info.sseu; drm_i915_getparam_t *param = data; int value; diff --git a/drivers/gpu/drm/i915/i915_gpu_error.c b/drivers/gpu/drm/i915/i915_gpu_error.c index 99b4a0261b13..678ddec3237f 100644 --- a/drivers/gpu/drm/i915/i915_gpu_error.c +++ b/drivers/gpu/drm/i915/i915_gpu_error.c @@ -426,7 +426,7 @@ static void err_compression_marker(struct drm_i915_error_state_buf *m) static void error_print_instdone(struct drm_i915_error_state_buf *m, const struct intel_engine_coredump *ee) { - const struct sseu_dev_info *sseu = &RUNTIME_INFO(m->i915)->sseu; + const struct sseu_dev_info *sseu = &ee->engine->gt->info.sseu; int slice; int subslice; @@ -626,8 +626,8 @@ static void err_print_capabilities(struct drm_i915_error_state_buf *m, intel_device_info_print_static(&error->device_info, &p); intel_device_info_print_runtime(&error->runtime_info, &p); - intel_sseu_print_topology(&error->runtime_info.sseu, &p); intel_gt_info_print(&error->gt->info, &p); + intel_sseu_print_topology(&error->gt->info.sseu, &p); intel_driver_caps_print(&error->driver_caps, &p); } diff --git a/drivers/gpu/drm/i915/i915_perf.c b/drivers/gpu/drm/i915/i915_perf.c index 25329b7600c9..37631ce0699b 100644 --- a/drivers/gpu/drm/i915/i915_perf.c +++ b/drivers/gpu/drm/i915/i915_perf.c @@ -2196,7 +2196,7 @@ static int gen8_configure_context(struct i915_gem_context *ctx, if (!intel_context_pin_if_active(ce)) continue; - flex->value = intel_sseu_make_rpcs(ctx->i915, &ce->sseu); + flex->value = intel_sseu_make_rpcs(ce->engine->gt, &ce->sseu); err = gen8_modify_context(ce, flex, count); intel_context_unpin(ce); @@ -2340,7 +2340,7 @@ oa_configure_all_contexts(struct i915_perf_stream *stream, if (engine->class != RENDER_CLASS) continue; - regs[0].value = intel_sseu_make_rpcs(i915, &ce->sseu); + regs[0].value = intel_sseu_make_rpcs(engine->gt, &ce->sseu); err = gen8_modify_self(ce, regs, num_regs, active); if (err) @@ -2740,8 +2740,7 @@ static void get_default_sseu_config(struct intel_sseu *out_sseu, struct intel_engine_cs *engine) { - const struct sseu_dev_info *devinfo_sseu = - &RUNTIME_INFO(engine->i915)->sseu; + const struct sseu_dev_info *devinfo_sseu = &engine->gt->info.sseu; *out_sseu = intel_sseu_from_device_info(devinfo_sseu); @@ -2766,7 +2765,7 @@ get_sseu_config(struct intel_sseu *out_sseu, drm_sseu->engine.engine_instance != engine->uabi_instance) return -EINVAL; - return i915_gem_user_to_context_sseu(engine->i915, drm_sseu, out_sseu); + return i915_gem_user_to_context_sseu(engine->gt, drm_sseu, out_sseu); } /** diff --git a/drivers/gpu/drm/i915/i915_query.c b/drivers/gpu/drm/i915/i915_query.c index c1ebda9b5627..fed337ad7b68 100644 --- a/drivers/gpu/drm/i915/i915_query.c +++ b/drivers/gpu/drm/i915/i915_query.c @@ -31,7 +31,7 @@ static int copy_query_item(void *query_hdr, size_t query_sz, static int query_topology_info(struct drm_i915_private *dev_priv, struct drm_i915_query_item *query_item) { - const struct sseu_dev_info *sseu = &RUNTIME_INFO(dev_priv)->sseu; + const struct sseu_dev_info *sseu = &dev_priv->gt.info.sseu; struct drm_i915_query_topology_info topo; u32 slice_length, subslice_length, eu_length, total_length; int ret; diff --git a/drivers/gpu/drm/i915/intel_device_info.c b/drivers/gpu/drm/i915/intel_device_info.c index d8daf224cbd3..3f5dc37d2b7c 100644 --- a/drivers/gpu/drm/i915/intel_device_info.c +++ b/drivers/gpu/drm/i915/intel_device_info.c @@ -29,7 +29,6 @@ #include "display/intel_de.h" #include "intel_device_info.h" #include "i915_drv.h" -#include "gt/intel_sseu.h" #define PLATFORM_NAME(x) [INTEL_##x] = #x static const char * const platform_names[] = { @@ -115,8 +114,6 @@ void intel_device_info_print_static(const struct intel_device_info *info, void intel_device_info_print_runtime(const struct intel_runtime_info *info, struct drm_printer *p) { - intel_sseu_dump(&info->sseu, p); - drm_printf(p, "rawclk rate: %u kHz\n", info->rawclk_freq); drm_printf(p, "CS timestamp frequency: %u Hz\n", info->cs_timestamp_frequency_hz); diff --git a/drivers/gpu/drm/i915/intel_device_info.h b/drivers/gpu/drm/i915/intel_device_info.h index 40057abbb662..242d00862b1a 100644 --- a/drivers/gpu/drm/i915/intel_device_info.h +++ b/drivers/gpu/drm/i915/intel_device_info.h @@ -219,9 +219,6 @@ struct intel_runtime_info { u8 num_sprites[I915_MAX_PIPES]; u8 num_scalers[I915_MAX_PIPES]; - /* Slice/subslice/EU info */ - struct sseu_dev_info sseu; - u32 rawclk_freq; u32 cs_timestamp_frequency_hz; -- cgit v1.2.3 From 5df79ff13554e82feb301da485077aa6680020ea Mon Sep 17 00:00:00 2001 From: Daniele Ceraolo Spurio Date: Tue, 7 Jul 2020 17:39:51 -0700 Subject: drm/i915: gt-fy sseu debugfs Ahead of moving the sseu debugfs logic under gt/, update the functions to use intel_gt where possible to make the move cleaner. Signed-off-by: Daniele Ceraolo Spurio Cc: Tvrtko Ursulin Cc: Andi Shyti Reviewed-by: Chris Wilson Signed-off-by: Chris Wilson Link: https://patchwork.freedesktop.org/patch/msgid/20200708003952.21831-9-daniele.ceraolospurio@intel.com --- drivers/gpu/drm/i915/i915_debugfs.c | 95 ++++++++++++++++++++----------------- 1 file changed, 51 insertions(+), 44 deletions(-) diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c index 69acc6990a66..1ba94dc176fb 100644 --- a/drivers/gpu/drm/i915/i915_debugfs.c +++ b/drivers/gpu/drm/i915/i915_debugfs.c @@ -1578,31 +1578,31 @@ i915_cache_sharing_set(void *data, u64 val) return 0; } -static void -intel_sseu_copy_subslices(const struct sseu_dev_info *sseu, int slice, - u8 *to_mask) +DEFINE_SIMPLE_ATTRIBUTE(i915_cache_sharing_fops, + i915_cache_sharing_get, i915_cache_sharing_set, + "%llu\n"); + +static void sseu_copy_subslices(const struct sseu_dev_info *sseu, + int slice, u8 *to_mask) { int offset = slice * sseu->ss_stride; memcpy(&to_mask[offset], &sseu->subslice_mask[offset], sseu->ss_stride); } -DEFINE_SIMPLE_ATTRIBUTE(i915_cache_sharing_fops, - i915_cache_sharing_get, i915_cache_sharing_set, - "%llu\n"); - -static void cherryview_sseu_device_status(struct drm_i915_private *dev_priv, +static void cherryview_sseu_device_status(struct intel_gt *gt, struct sseu_dev_info *sseu) { #define SS_MAX 2 + struct intel_uncore *uncore = gt->uncore; const int ss_max = SS_MAX; u32 sig1[SS_MAX], sig2[SS_MAX]; int ss; - sig1[0] = I915_READ(CHV_POWER_SS0_SIG1); - sig1[1] = I915_READ(CHV_POWER_SS1_SIG1); - sig2[0] = I915_READ(CHV_POWER_SS0_SIG2); - sig2[1] = I915_READ(CHV_POWER_SS1_SIG2); + sig1[0] = intel_uncore_read(uncore, CHV_POWER_SS0_SIG1); + sig1[1] = intel_uncore_read(uncore, CHV_POWER_SS1_SIG1); + sig2[0] = intel_uncore_read(uncore, CHV_POWER_SS0_SIG2); + sig2[1] = intel_uncore_read(uncore, CHV_POWER_SS1_SIG2); for (ss = 0; ss < ss_max; ss++) { unsigned int eu_cnt; @@ -1624,11 +1624,12 @@ static void cherryview_sseu_device_status(struct drm_i915_private *dev_priv, #undef SS_MAX } -static void gen10_sseu_device_status(struct drm_i915_private *dev_priv, +static void gen10_sseu_device_status(struct intel_gt *gt, struct sseu_dev_info *sseu) { #define SS_MAX 6 - const struct intel_gt_info *info = &dev_priv->gt.info; + struct intel_uncore *uncore = gt->uncore; + const struct intel_gt_info *info = >->info; u32 s_reg[SS_MAX], eu_reg[2 * SS_MAX], eu_mask[2]; int s, ss; @@ -1639,10 +1640,12 @@ static void gen10_sseu_device_status(struct drm_i915_private *dev_priv, * although this seems wrong because it would leave many * subslices without ACK. */ - s_reg[s] = I915_READ(GEN10_SLICE_PGCTL_ACK(s)) & + s_reg[s] = intel_uncore_read(uncore, GEN10_SLICE_PGCTL_ACK(s)) & GEN10_PGCTL_VALID_SS_MASK(s); - eu_reg[2 * s] = I915_READ(GEN10_SS01_EU_PGCTL_ACK(s)); - eu_reg[2 * s + 1] = I915_READ(GEN10_SS23_EU_PGCTL_ACK(s)); + eu_reg[2 * s] = intel_uncore_read(uncore, + GEN10_SS01_EU_PGCTL_ACK(s)); + eu_reg[2 * s + 1] = intel_uncore_read(uncore, + GEN10_SS23_EU_PGCTL_ACK(s)); } eu_mask[0] = GEN9_PGCTL_SSA_EU08_ACK | @@ -1660,7 +1663,7 @@ static void gen10_sseu_device_status(struct drm_i915_private *dev_priv, continue; sseu->slice_mask |= BIT(s); - intel_sseu_copy_subslices(&info->sseu, s, sseu->subslice_mask); + sseu_copy_subslices(&info->sseu, s, sseu->subslice_mask); for (ss = 0; ss < info->sseu.max_subslices; ss++) { unsigned int eu_cnt; @@ -1681,18 +1684,21 @@ static void gen10_sseu_device_status(struct drm_i915_private *dev_priv, #undef SS_MAX } -static void gen9_sseu_device_status(struct drm_i915_private *dev_priv, +static void gen9_sseu_device_status(struct intel_gt *gt, struct sseu_dev_info *sseu) { #define SS_MAX 3 - const struct intel_gt_info *info = &dev_priv->gt.info; + struct intel_uncore *uncore = gt->uncore; + const struct intel_gt_info *info = >->info; u32 s_reg[SS_MAX], eu_reg[2 * SS_MAX], eu_mask[2]; int s, ss; for (s = 0; s < info->sseu.max_slices; s++) { - s_reg[s] = I915_READ(GEN9_SLICE_PGCTL_ACK(s)); - eu_reg[2*s] = I915_READ(GEN9_SS01_EU_PGCTL_ACK(s)); - eu_reg[2*s + 1] = I915_READ(GEN9_SS23_EU_PGCTL_ACK(s)); + s_reg[s] = intel_uncore_read(uncore, GEN9_SLICE_PGCTL_ACK(s)); + eu_reg[2 * s] = + intel_uncore_read(uncore, GEN9_SS01_EU_PGCTL_ACK(s)); + eu_reg[2 * s + 1] = + intel_uncore_read(uncore, GEN9_SS23_EU_PGCTL_ACK(s)); } eu_mask[0] = GEN9_PGCTL_SSA_EU08_ACK | @@ -1711,16 +1717,16 @@ static void gen9_sseu_device_status(struct drm_i915_private *dev_priv, sseu->slice_mask |= BIT(s); - if (IS_GEN9_BC(dev_priv)) - intel_sseu_copy_subslices(&info->sseu, s, - sseu->subslice_mask); + if (IS_GEN9_BC(gt->i915)) + sseu_copy_subslices(&info->sseu, s, + sseu->subslice_mask); for (ss = 0; ss < info->sseu.max_subslices; ss++) { unsigned int eu_cnt; u8 ss_idx = s * info->sseu.ss_stride + ss / BITS_PER_BYTE; - if (IS_GEN9_LP(dev_priv)) { + if (IS_GEN9_LP(gt->i915)) { if (!(s_reg[s] & (GEN9_PGCTL_SS_ACK(ss)))) /* skip disabled subslice */ continue; @@ -1740,11 +1746,11 @@ static void gen9_sseu_device_status(struct drm_i915_private *dev_priv, #undef SS_MAX } -static void bdw_sseu_device_status(struct drm_i915_private *dev_priv, +static void bdw_sseu_device_status(struct intel_gt *gt, struct sseu_dev_info *sseu) { - const struct intel_gt_info *info = &dev_priv->gt.info; - u32 slice_info = I915_READ(GEN8_GT_SLICE_INFO); + const struct intel_gt_info *info = >->info; + u32 slice_info = intel_uncore_read(gt->uncore, GEN8_GT_SLICE_INFO); int s; sseu->slice_mask = slice_info & GEN8_LSLICESTAT_MASK; @@ -1752,8 +1758,8 @@ static void bdw_sseu_device_status(struct drm_i915_private *dev_priv, if (sseu->slice_mask) { sseu->eu_per_subslice = info->sseu.eu_per_subslice; for (s = 0; s < fls(sseu->slice_mask); s++) - intel_sseu_copy_subslices(&info->sseu, s, - sseu->subslice_mask); + sseu_copy_subslices(&info->sseu, s, + sseu->subslice_mask); sseu->eu_total = sseu->eu_per_subslice * intel_sseu_subslice_total(sseu); @@ -1805,12 +1811,13 @@ static void i915_print_sseu_info(struct seq_file *m, bool is_available_info, static int i915_sseu_status(struct seq_file *m, void *unused) { - struct drm_i915_private *dev_priv = node_to_i915(m->private); - const struct intel_gt_info *info = &dev_priv->gt.info; + struct drm_i915_private *i915 = node_to_i915(m->private); + struct intel_gt *gt = &i915->gt; + const struct intel_gt_info *info = >->info; struct sseu_dev_info sseu; intel_wakeref_t wakeref; - if (INTEL_GEN(dev_priv) < 8) + if (INTEL_GEN(i915) < 8) return -ENODEV; seq_puts(m, "SSEU Device Info\n"); @@ -1822,15 +1829,15 @@ static int i915_sseu_status(struct seq_file *m, void *unused) info->sseu.max_subslices, info->sseu.max_eus_per_subslice); - with_intel_runtime_pm(&dev_priv->runtime_pm, wakeref) { - if (IS_CHERRYVIEW(dev_priv)) - cherryview_sseu_device_status(dev_priv, &sseu); - else if (IS_BROADWELL(dev_priv)) - bdw_sseu_device_status(dev_priv, &sseu); - else if (IS_GEN(dev_priv, 9)) - gen9_sseu_device_status(dev_priv, &sseu); - else if (INTEL_GEN(dev_priv) >= 10) - gen10_sseu_device_status(dev_priv, &sseu); + with_intel_runtime_pm(&i915->runtime_pm, wakeref) { + if (IS_CHERRYVIEW(i915)) + cherryview_sseu_device_status(gt, &sseu); + else if (IS_BROADWELL(i915)) + bdw_sseu_device_status(gt, &sseu); + else if (IS_GEN(i915, 9)) + gen9_sseu_device_status(gt, &sseu); + else if (INTEL_GEN(i915) >= 10) + gen10_sseu_device_status(gt, &sseu); } i915_print_sseu_info(m, false, &sseu); -- cgit v1.2.3 From a00eda7d8996803320bc63c574ee78f8a2fb72ee Mon Sep 17 00:00:00 2001 From: Daniele Ceraolo Spurio Date: Tue, 7 Jul 2020 17:39:52 -0700 Subject: drm/i915: Move sseu debugfs under gt/ In line with what happened for other gt-related features, move the sseu debugfs files under gt/. The sseu_status debugfs has also been kept at the top level as we do have tests that use it; it will be removed once we teach the tests to look into the new path. Suggested-by: Tvrtko Ursulin Signed-off-by: Daniele Ceraolo Spurio Cc: Tvrtko Ursulin Cc: Andi Shyti Reviewed-by: Chris Wilson Signed-off-by: Chris Wilson Link: https://patchwork.freedesktop.org/patch/msgid/20200708003952.21831-10-daniele.ceraolospurio@intel.com --- drivers/gpu/drm/i915/Makefile | 1 + drivers/gpu/drm/i915/gt/debugfs_gt.c | 2 + drivers/gpu/drm/i915/gt/intel_sseu_debugfs.c | 306 +++++++++++++++++++++++++++ drivers/gpu/drm/i915/gt/intel_sseu_debugfs.h | 17 ++ drivers/gpu/drm/i915/i915_debugfs.c | 269 +---------------------- 5 files changed, 328 insertions(+), 267 deletions(-) create mode 100644 drivers/gpu/drm/i915/gt/intel_sseu_debugfs.c create mode 100644 drivers/gpu/drm/i915/gt/intel_sseu_debugfs.h diff --git a/drivers/gpu/drm/i915/Makefile b/drivers/gpu/drm/i915/Makefile index 41a27fd5dbc7..bda4c0e408f8 100644 --- a/drivers/gpu/drm/i915/Makefile +++ b/drivers/gpu/drm/i915/Makefile @@ -112,6 +112,7 @@ gt-y += \ gt/intel_ring_submission.o \ gt/intel_rps.o \ gt/intel_sseu.o \ + gt/intel_sseu_debugfs.o \ gt/intel_timeline.o \ gt/intel_workarounds.o \ gt/shmem_utils.o \ diff --git a/drivers/gpu/drm/i915/gt/debugfs_gt.c b/drivers/gpu/drm/i915/gt/debugfs_gt.c index 1de5fbaa1cf9..3a21cf63b3f0 100644 --- a/drivers/gpu/drm/i915/gt/debugfs_gt.c +++ b/drivers/gpu/drm/i915/gt/debugfs_gt.c @@ -9,6 +9,7 @@ #include "debugfs_engines.h" #include "debugfs_gt.h" #include "debugfs_gt_pm.h" +#include "intel_sseu_debugfs.h" #include "uc/intel_uc_debugfs.h" #include "i915_drv.h" @@ -25,6 +26,7 @@ void debugfs_gt_register(struct intel_gt *gt) debugfs_engines_register(gt, root); debugfs_gt_pm_register(gt, root); + intel_sseu_debugfs_register(gt, root); intel_uc_debugfs_register(>->uc, root); } diff --git a/drivers/gpu/drm/i915/gt/intel_sseu_debugfs.c b/drivers/gpu/drm/i915/gt/intel_sseu_debugfs.c new file mode 100644 index 000000000000..51780282d872 --- /dev/null +++ b/drivers/gpu/drm/i915/gt/intel_sseu_debugfs.c @@ -0,0 +1,306 @@ +// SPDX-License-Identifier: MIT + +/* + * Copyright © 2020 Intel Corporation + */ + +#include "debugfs_gt.h" +#include "intel_sseu_debugfs.h" +#include "i915_drv.h" + +static void sseu_copy_subslices(const struct sseu_dev_info *sseu, + int slice, u8 *to_mask) +{ + int offset = slice * sseu->ss_stride; + + memcpy(&to_mask[offset], &sseu->subslice_mask[offset], sseu->ss_stride); +} + +static void cherryview_sseu_device_status(struct intel_gt *gt, + struct sseu_dev_info *sseu) +{ +#define SS_MAX 2 + struct intel_uncore *uncore = gt->uncore; + const int ss_max = SS_MAX; + u32 sig1[SS_MAX], sig2[SS_MAX]; + int ss; + + sig1[0] = intel_uncore_read(uncore, CHV_POWER_SS0_SIG1); + sig1[1] = intel_uncore_read(uncore, CHV_POWER_SS1_SIG1); + sig2[0] = intel_uncore_read(uncore, CHV_POWER_SS0_SIG2); + sig2[1] = intel_uncore_read(uncore, CHV_POWER_SS1_SIG2); + + for (ss = 0; ss < ss_max; ss++) { + unsigned int eu_cnt; + + if (sig1[ss] & CHV_SS_PG_ENABLE) + /* skip disabled subslice */ + continue; + + sseu->slice_mask = BIT(0); + sseu->subslice_mask[0] |= BIT(ss); + eu_cnt = ((sig1[ss] & CHV_EU08_PG_ENABLE) ? 0 : 2) + + ((sig1[ss] & CHV_EU19_PG_ENABLE) ? 0 : 2) + + ((sig1[ss] & CHV_EU210_PG_ENABLE) ? 0 : 2) + + ((sig2[ss] & CHV_EU311_PG_ENABLE) ? 0 : 2); + sseu->eu_total += eu_cnt; + sseu->eu_per_subslice = max_t(unsigned int, + sseu->eu_per_subslice, eu_cnt); + } +#undef SS_MAX +} + +static void gen10_sseu_device_status(struct intel_gt *gt, + struct sseu_dev_info *sseu) +{ +#define SS_MAX 6 + struct intel_uncore *uncore = gt->uncore; + const struct intel_gt_info *info = >->info; + u32 s_reg[SS_MAX], eu_reg[2 * SS_MAX], eu_mask[2]; + int s, ss; + + for (s = 0; s < info->sseu.max_slices; s++) { + /* + * FIXME: Valid SS Mask respects the spec and read + * only valid bits for those registers, excluding reserved + * although this seems wrong because it would leave many + * subslices without ACK. + */ + s_reg[s] = intel_uncore_read(uncore, GEN10_SLICE_PGCTL_ACK(s)) & + GEN10_PGCTL_VALID_SS_MASK(s); + eu_reg[2 * s] = intel_uncore_read(uncore, + GEN10_SS01_EU_PGCTL_ACK(s)); + eu_reg[2 * s + 1] = intel_uncore_read(uncore, + GEN10_SS23_EU_PGCTL_ACK(s)); + } + + eu_mask[0] = GEN9_PGCTL_SSA_EU08_ACK | + GEN9_PGCTL_SSA_EU19_ACK | + GEN9_PGCTL_SSA_EU210_ACK | + GEN9_PGCTL_SSA_EU311_ACK; + eu_mask[1] = GEN9_PGCTL_SSB_EU08_ACK | + GEN9_PGCTL_SSB_EU19_ACK | + GEN9_PGCTL_SSB_EU210_ACK | + GEN9_PGCTL_SSB_EU311_ACK; + + for (s = 0; s < info->sseu.max_slices; s++) { + if ((s_reg[s] & GEN9_PGCTL_SLICE_ACK) == 0) + /* skip disabled slice */ + continue; + + sseu->slice_mask |= BIT(s); + sseu_copy_subslices(&info->sseu, s, sseu->subslice_mask); + + for (ss = 0; ss < info->sseu.max_subslices; ss++) { + unsigned int eu_cnt; + + if (info->sseu.has_subslice_pg && + !(s_reg[s] & (GEN9_PGCTL_SS_ACK(ss)))) + /* skip disabled subslice */ + continue; + + eu_cnt = 2 * hweight32(eu_reg[2 * s + ss / 2] & + eu_mask[ss % 2]); + sseu->eu_total += eu_cnt; + sseu->eu_per_subslice = max_t(unsigned int, + sseu->eu_per_subslice, + eu_cnt); + } + } +#undef SS_MAX +} + +static void gen9_sseu_device_status(struct intel_gt *gt, + struct sseu_dev_info *sseu) +{ +#define SS_MAX 3 + struct intel_uncore *uncore = gt->uncore; + const struct intel_gt_info *info = >->info; + u32 s_reg[SS_MAX], eu_reg[2 * SS_MAX], eu_mask[2]; + int s, ss; + + for (s = 0; s < info->sseu.max_slices; s++) { + s_reg[s] = intel_uncore_read(uncore, GEN9_SLICE_PGCTL_ACK(s)); + eu_reg[2 * s] = + intel_uncore_read(uncore, GEN9_SS01_EU_PGCTL_ACK(s)); + eu_reg[2 * s + 1] = + intel_uncore_read(uncore, GEN9_SS23_EU_PGCTL_ACK(s)); + } + + eu_mask[0] = GEN9_PGCTL_SSA_EU08_ACK | + GEN9_PGCTL_SSA_EU19_ACK | + GEN9_PGCTL_SSA_EU210_ACK | + GEN9_PGCTL_SSA_EU311_ACK; + eu_mask[1] = GEN9_PGCTL_SSB_EU08_ACK | + GEN9_PGCTL_SSB_EU19_ACK | + GEN9_PGCTL_SSB_EU210_ACK | + GEN9_PGCTL_SSB_EU311_ACK; + + for (s = 0; s < info->sseu.max_slices; s++) { + if ((s_reg[s] & GEN9_PGCTL_SLICE_ACK) == 0) + /* skip disabled slice */ + continue; + + sseu->slice_mask |= BIT(s); + + if (IS_GEN9_BC(gt->i915)) + sseu_copy_subslices(&info->sseu, s, + sseu->subslice_mask); + + for (ss = 0; ss < info->sseu.max_subslices; ss++) { + unsigned int eu_cnt; + u8 ss_idx = s * info->sseu.ss_stride + + ss / BITS_PER_BYTE; + + if (IS_GEN9_LP(gt->i915)) { + if (!(s_reg[s] & (GEN9_PGCTL_SS_ACK(ss)))) + /* skip disabled subslice */ + continue; + + sseu->subslice_mask[ss_idx] |= + BIT(ss % BITS_PER_BYTE); + } + + eu_cnt = eu_reg[2 * s + ss / 2] & eu_mask[ss % 2]; + eu_cnt = 2 * hweight32(eu_cnt); + + sseu->eu_total += eu_cnt; + sseu->eu_per_subslice = max_t(unsigned int, + sseu->eu_per_subslice, + eu_cnt); + } + } +#undef SS_MAX +} + +static void bdw_sseu_device_status(struct intel_gt *gt, + struct sseu_dev_info *sseu) +{ + const struct intel_gt_info *info = >->info; + u32 slice_info = intel_uncore_read(gt->uncore, GEN8_GT_SLICE_INFO); + int s; + + sseu->slice_mask = slice_info & GEN8_LSLICESTAT_MASK; + + if (sseu->slice_mask) { + sseu->eu_per_subslice = info->sseu.eu_per_subslice; + for (s = 0; s < fls(sseu->slice_mask); s++) + sseu_copy_subslices(&info->sseu, s, + sseu->subslice_mask); + sseu->eu_total = sseu->eu_per_subslice * + intel_sseu_subslice_total(sseu); + + /* subtract fused off EU(s) from enabled slice(s) */ + for (s = 0; s < fls(sseu->slice_mask); s++) { + u8 subslice_7eu = info->sseu.subslice_7eu[s]; + + sseu->eu_total -= hweight8(subslice_7eu); + } + } +} + +static void i915_print_sseu_info(struct seq_file *m, + bool is_available_info, + bool has_pooled_eu, + const struct sseu_dev_info *sseu) +{ + const char *type = is_available_info ? "Available" : "Enabled"; + int s; + + seq_printf(m, " %s Slice Mask: %04x\n", type, + sseu->slice_mask); + seq_printf(m, " %s Slice Total: %u\n", type, + hweight8(sseu->slice_mask)); + seq_printf(m, " %s Subslice Total: %u\n", type, + intel_sseu_subslice_total(sseu)); + for (s = 0; s < fls(sseu->slice_mask); s++) { + seq_printf(m, " %s Slice%i subslices: %u\n", type, + s, intel_sseu_subslices_per_slice(sseu, s)); + } + seq_printf(m, " %s EU Total: %u\n", type, + sseu->eu_total); + seq_printf(m, " %s EU Per Subslice: %u\n", type, + sseu->eu_per_subslice); + + if (!is_available_info) + return; + + seq_printf(m, " Has Pooled EU: %s\n", yesno(has_pooled_eu)); + if (has_pooled_eu) + seq_printf(m, " Min EU in pool: %u\n", sseu->min_eu_in_pool); + + seq_printf(m, " Has Slice Power Gating: %s\n", + yesno(sseu->has_slice_pg)); + seq_printf(m, " Has Subslice Power Gating: %s\n", + yesno(sseu->has_subslice_pg)); + seq_printf(m, " Has EU Power Gating: %s\n", + yesno(sseu->has_eu_pg)); +} + +/* + * this is called from top-level debugfs as well, so we can't get the gt from + * the seq_file. + */ +int intel_sseu_status(struct seq_file *m, struct intel_gt *gt) +{ + struct drm_i915_private *i915 = gt->i915; + const struct intel_gt_info *info = >->info; + struct sseu_dev_info sseu; + intel_wakeref_t wakeref; + + if (INTEL_GEN(i915) < 8) + return -ENODEV; + + seq_puts(m, "SSEU Device Info\n"); + i915_print_sseu_info(m, true, HAS_POOLED_EU(i915), &info->sseu); + + seq_puts(m, "SSEU Device Status\n"); + memset(&sseu, 0, sizeof(sseu)); + intel_sseu_set_info(&sseu, info->sseu.max_slices, + info->sseu.max_subslices, + info->sseu.max_eus_per_subslice); + + with_intel_runtime_pm(&i915->runtime_pm, wakeref) { + if (IS_CHERRYVIEW(i915)) + cherryview_sseu_device_status(gt, &sseu); + else if (IS_BROADWELL(i915)) + bdw_sseu_device_status(gt, &sseu); + else if (IS_GEN(i915, 9)) + gen9_sseu_device_status(gt, &sseu); + else if (INTEL_GEN(i915) >= 10) + gen10_sseu_device_status(gt, &sseu); + } + + i915_print_sseu_info(m, false, HAS_POOLED_EU(i915), &sseu); + + return 0; +} + +static int sseu_status_show(struct seq_file *m, void *unused) +{ + struct intel_gt *gt = m->private; + + return intel_sseu_status(m, gt); +} +DEFINE_GT_DEBUGFS_ATTRIBUTE(sseu_status); + +static int rcs_topology_show(struct seq_file *m, void *unused) +{ + struct intel_gt *gt = m->private; + struct drm_printer p = drm_seq_file_printer(m); + + intel_sseu_print_topology(>->info.sseu, &p); + + return 0; +} +DEFINE_GT_DEBUGFS_ATTRIBUTE(rcs_topology); + +void intel_sseu_debugfs_register(struct intel_gt *gt, struct dentry *root) +{ + static const struct debugfs_gt_file files[] = { + { "sseu_status", &sseu_status_fops, NULL }, + { "rcs_topology", &rcs_topology_fops, NULL }, + }; + + intel_gt_debugfs_register_files(root, files, ARRAY_SIZE(files), gt); +} diff --git a/drivers/gpu/drm/i915/gt/intel_sseu_debugfs.h b/drivers/gpu/drm/i915/gt/intel_sseu_debugfs.h new file mode 100644 index 000000000000..73f001589e90 --- /dev/null +++ b/drivers/gpu/drm/i915/gt/intel_sseu_debugfs.h @@ -0,0 +1,17 @@ +/* SPDX-License-Identifier: MIT */ + +/* + * Copyright © 2020 Intel Corporation + */ + +#ifndef INTEL_SSEU_DEBUGFS_H +#define INTEL_SSEU_DEBUGFS_H + +struct intel_gt; +struct dentry; +struct seq_file; + +int intel_sseu_status(struct seq_file *m, struct intel_gt *gt); +void intel_sseu_debugfs_register(struct intel_gt *gt, struct dentry *root); + +#endif /* INTEL_SSEU_DEBUGFS_H */ diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c index 1ba94dc176fb..78ebede51fb3 100644 --- a/drivers/gpu/drm/i915/i915_debugfs.c +++ b/drivers/gpu/drm/i915/i915_debugfs.c @@ -40,6 +40,7 @@ #include "gt/intel_reset.h" #include "gt/intel_rc6.h" #include "gt/intel_rps.h" +#include "gt/intel_sseu_debugfs.h" #include "i915_debugfs.h" #include "i915_debugfs_params.h" @@ -1322,16 +1323,6 @@ static int i915_engine_info(struct seq_file *m, void *unused) return 0; } -static int i915_rcs_topology(struct seq_file *m, void *unused) -{ - struct drm_i915_private *dev_priv = node_to_i915(m->private); - struct drm_printer p = drm_seq_file_printer(m); - - intel_sseu_print_topology(&dev_priv->gt.info.sseu, &p); - - return 0; -} - static int i915_shrinker_info(struct seq_file *m, void *unused) { struct drm_i915_private *i915 = node_to_i915(m->private); @@ -1582,267 +1573,12 @@ DEFINE_SIMPLE_ATTRIBUTE(i915_cache_sharing_fops, i915_cache_sharing_get, i915_cache_sharing_set, "%llu\n"); -static void sseu_copy_subslices(const struct sseu_dev_info *sseu, - int slice, u8 *to_mask) -{ - int offset = slice * sseu->ss_stride; - - memcpy(&to_mask[offset], &sseu->subslice_mask[offset], sseu->ss_stride); -} - -static void cherryview_sseu_device_status(struct intel_gt *gt, - struct sseu_dev_info *sseu) -{ -#define SS_MAX 2 - struct intel_uncore *uncore = gt->uncore; - const int ss_max = SS_MAX; - u32 sig1[SS_MAX], sig2[SS_MAX]; - int ss; - - sig1[0] = intel_uncore_read(uncore, CHV_POWER_SS0_SIG1); - sig1[1] = intel_uncore_read(uncore, CHV_POWER_SS1_SIG1); - sig2[0] = intel_uncore_read(uncore, CHV_POWER_SS0_SIG2); - sig2[1] = intel_uncore_read(uncore, CHV_POWER_SS1_SIG2); - - for (ss = 0; ss < ss_max; ss++) { - unsigned int eu_cnt; - - if (sig1[ss] & CHV_SS_PG_ENABLE) - /* skip disabled subslice */ - continue; - - sseu->slice_mask = BIT(0); - sseu->subslice_mask[0] |= BIT(ss); - eu_cnt = ((sig1[ss] & CHV_EU08_PG_ENABLE) ? 0 : 2) + - ((sig1[ss] & CHV_EU19_PG_ENABLE) ? 0 : 2) + - ((sig1[ss] & CHV_EU210_PG_ENABLE) ? 0 : 2) + - ((sig2[ss] & CHV_EU311_PG_ENABLE) ? 0 : 2); - sseu->eu_total += eu_cnt; - sseu->eu_per_subslice = max_t(unsigned int, - sseu->eu_per_subslice, eu_cnt); - } -#undef SS_MAX -} - -static void gen10_sseu_device_status(struct intel_gt *gt, - struct sseu_dev_info *sseu) -{ -#define SS_MAX 6 - struct intel_uncore *uncore = gt->uncore; - const struct intel_gt_info *info = >->info; - u32 s_reg[SS_MAX], eu_reg[2 * SS_MAX], eu_mask[2]; - int s, ss; - - for (s = 0; s < info->sseu.max_slices; s++) { - /* - * FIXME: Valid SS Mask respects the spec and read - * only valid bits for those registers, excluding reserved - * although this seems wrong because it would leave many - * subslices without ACK. - */ - s_reg[s] = intel_uncore_read(uncore, GEN10_SLICE_PGCTL_ACK(s)) & - GEN10_PGCTL_VALID_SS_MASK(s); - eu_reg[2 * s] = intel_uncore_read(uncore, - GEN10_SS01_EU_PGCTL_ACK(s)); - eu_reg[2 * s + 1] = intel_uncore_read(uncore, - GEN10_SS23_EU_PGCTL_ACK(s)); - } - - eu_mask[0] = GEN9_PGCTL_SSA_EU08_ACK | - GEN9_PGCTL_SSA_EU19_ACK | - GEN9_PGCTL_SSA_EU210_ACK | - GEN9_PGCTL_SSA_EU311_ACK; - eu_mask[1] = GEN9_PGCTL_SSB_EU08_ACK | - GEN9_PGCTL_SSB_EU19_ACK | - GEN9_PGCTL_SSB_EU210_ACK | - GEN9_PGCTL_SSB_EU311_ACK; - - for (s = 0; s < info->sseu.max_slices; s++) { - if ((s_reg[s] & GEN9_PGCTL_SLICE_ACK) == 0) - /* skip disabled slice */ - continue; - - sseu->slice_mask |= BIT(s); - sseu_copy_subslices(&info->sseu, s, sseu->subslice_mask); - - for (ss = 0; ss < info->sseu.max_subslices; ss++) { - unsigned int eu_cnt; - - if (info->sseu.has_subslice_pg && - !(s_reg[s] & (GEN9_PGCTL_SS_ACK(ss)))) - /* skip disabled subslice */ - continue; - - eu_cnt = 2 * hweight32(eu_reg[2 * s + ss / 2] & - eu_mask[ss % 2]); - sseu->eu_total += eu_cnt; - sseu->eu_per_subslice = max_t(unsigned int, - sseu->eu_per_subslice, - eu_cnt); - } - } -#undef SS_MAX -} - -static void gen9_sseu_device_status(struct intel_gt *gt, - struct sseu_dev_info *sseu) -{ -#define SS_MAX 3 - struct intel_uncore *uncore = gt->uncore; - const struct intel_gt_info *info = >->info; - u32 s_reg[SS_MAX], eu_reg[2 * SS_MAX], eu_mask[2]; - int s, ss; - - for (s = 0; s < info->sseu.max_slices; s++) { - s_reg[s] = intel_uncore_read(uncore, GEN9_SLICE_PGCTL_ACK(s)); - eu_reg[2 * s] = - intel_uncore_read(uncore, GEN9_SS01_EU_PGCTL_ACK(s)); - eu_reg[2 * s + 1] = - intel_uncore_read(uncore, GEN9_SS23_EU_PGCTL_ACK(s)); - } - - eu_mask[0] = GEN9_PGCTL_SSA_EU08_ACK | - GEN9_PGCTL_SSA_EU19_ACK | - GEN9_PGCTL_SSA_EU210_ACK | - GEN9_PGCTL_SSA_EU311_ACK; - eu_mask[1] = GEN9_PGCTL_SSB_EU08_ACK | - GEN9_PGCTL_SSB_EU19_ACK | - GEN9_PGCTL_SSB_EU210_ACK | - GEN9_PGCTL_SSB_EU311_ACK; - - for (s = 0; s < info->sseu.max_slices; s++) { - if ((s_reg[s] & GEN9_PGCTL_SLICE_ACK) == 0) - /* skip disabled slice */ - continue; - - sseu->slice_mask |= BIT(s); - - if (IS_GEN9_BC(gt->i915)) - sseu_copy_subslices(&info->sseu, s, - sseu->subslice_mask); - - for (ss = 0; ss < info->sseu.max_subslices; ss++) { - unsigned int eu_cnt; - u8 ss_idx = s * info->sseu.ss_stride + - ss / BITS_PER_BYTE; - - if (IS_GEN9_LP(gt->i915)) { - if (!(s_reg[s] & (GEN9_PGCTL_SS_ACK(ss)))) - /* skip disabled subslice */ - continue; - - sseu->subslice_mask[ss_idx] |= - BIT(ss % BITS_PER_BYTE); - } - - eu_cnt = 2 * hweight32(eu_reg[2*s + ss/2] & - eu_mask[ss%2]); - sseu->eu_total += eu_cnt; - sseu->eu_per_subslice = max_t(unsigned int, - sseu->eu_per_subslice, - eu_cnt); - } - } -#undef SS_MAX -} - -static void bdw_sseu_device_status(struct intel_gt *gt, - struct sseu_dev_info *sseu) -{ - const struct intel_gt_info *info = >->info; - u32 slice_info = intel_uncore_read(gt->uncore, GEN8_GT_SLICE_INFO); - int s; - - sseu->slice_mask = slice_info & GEN8_LSLICESTAT_MASK; - - if (sseu->slice_mask) { - sseu->eu_per_subslice = info->sseu.eu_per_subslice; - for (s = 0; s < fls(sseu->slice_mask); s++) - sseu_copy_subslices(&info->sseu, s, - sseu->subslice_mask); - sseu->eu_total = sseu->eu_per_subslice * - intel_sseu_subslice_total(sseu); - - /* subtract fused off EU(s) from enabled slice(s) */ - for (s = 0; s < fls(sseu->slice_mask); s++) { - u8 subslice_7eu = info->sseu.subslice_7eu[s]; - - sseu->eu_total -= hweight8(subslice_7eu); - } - } -} - -static void i915_print_sseu_info(struct seq_file *m, bool is_available_info, - const struct sseu_dev_info *sseu) -{ - struct drm_i915_private *dev_priv = node_to_i915(m->private); - const char *type = is_available_info ? "Available" : "Enabled"; - int s; - - seq_printf(m, " %s Slice Mask: %04x\n", type, - sseu->slice_mask); - seq_printf(m, " %s Slice Total: %u\n", type, - hweight8(sseu->slice_mask)); - seq_printf(m, " %s Subslice Total: %u\n", type, - intel_sseu_subslice_total(sseu)); - for (s = 0; s < fls(sseu->slice_mask); s++) { - seq_printf(m, " %s Slice%i subslices: %u\n", type, - s, intel_sseu_subslices_per_slice(sseu, s)); - } - seq_printf(m, " %s EU Total: %u\n", type, - sseu->eu_total); - seq_printf(m, " %s EU Per Subslice: %u\n", type, - sseu->eu_per_subslice); - - if (!is_available_info) - return; - - seq_printf(m, " Has Pooled EU: %s\n", yesno(HAS_POOLED_EU(dev_priv))); - if (HAS_POOLED_EU(dev_priv)) - seq_printf(m, " Min EU in pool: %u\n", sseu->min_eu_in_pool); - - seq_printf(m, " Has Slice Power Gating: %s\n", - yesno(sseu->has_slice_pg)); - seq_printf(m, " Has Subslice Power Gating: %s\n", - yesno(sseu->has_subslice_pg)); - seq_printf(m, " Has EU Power Gating: %s\n", - yesno(sseu->has_eu_pg)); -} - static int i915_sseu_status(struct seq_file *m, void *unused) { struct drm_i915_private *i915 = node_to_i915(m->private); struct intel_gt *gt = &i915->gt; - const struct intel_gt_info *info = >->info; - struct sseu_dev_info sseu; - intel_wakeref_t wakeref; - - if (INTEL_GEN(i915) < 8) - return -ENODEV; - seq_puts(m, "SSEU Device Info\n"); - i915_print_sseu_info(m, true, &info->sseu); - - seq_puts(m, "SSEU Device Status\n"); - memset(&sseu, 0, sizeof(sseu)); - intel_sseu_set_info(&sseu, info->sseu.max_slices, - info->sseu.max_subslices, - info->sseu.max_eus_per_subslice); - - with_intel_runtime_pm(&i915->runtime_pm, wakeref) { - if (IS_CHERRYVIEW(i915)) - cherryview_sseu_device_status(gt, &sseu); - else if (IS_BROADWELL(i915)) - bdw_sseu_device_status(gt, &sseu); - else if (IS_GEN(i915, 9)) - gen9_sseu_device_status(gt, &sseu); - else if (INTEL_GEN(i915) >= 10) - gen10_sseu_device_status(gt, &sseu); - } - - i915_print_sseu_info(m, false, &sseu); - - return 0; + return intel_sseu_status(m, gt); } static int i915_forcewake_open(struct inode *inode, struct file *file) @@ -1889,7 +1625,6 @@ static const struct drm_info_list i915_debugfs_list[] = { {"i915_llc", i915_llc, 0}, {"i915_runtime_pm_status", i915_runtime_pm_status, 0}, {"i915_engine_info", i915_engine_info, 0}, - {"i915_rcs_topology", i915_rcs_topology, 0}, {"i915_shrinker_info", i915_shrinker_info, 0}, {"i915_wa_registers", i915_wa_registers, 0}, {"i915_sseu_status", i915_sseu_status, 0}, -- cgit v1.2.3 From 09137e94543761154f464433c019d51d6ac32fcf Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Wed, 8 Jul 2020 18:37:45 +0100 Subject: drm/i915/gem: Unpin idle contexts from kswapd reclaim We removed retiring requests from the shrinker in order to decouple the mutexes from reclaim in preparation for unravelling the struct_mutex. The impact of not retiring is that we are much less agressive in making global objects available for shrinking, as such objects remain pinned until they are flushed by a heartbeat pulse following the last retired request along their timeline. In order to ensure that pulse occurs in time for memory reclamation, we should kick it from kswapd. The catch is that we have added some flush_work() into the retirement phase (to ensure that we reach a global idle in a timely manner), but these flush_work() are not eligible (i.e do not belong to WQ_MEM_RELCAIM) for use from inside kswapd. To avoid flushing those workqueues, we teach the retirer not to do so unless we are actually waiting, and only do the plain retire from inside the shrinker. Note that for execlists, we already retire completed contexts as they are scheduled out, so it should not be keeping global state unnecessarily pinned. The legacy ringbuffer however... References: 9e9539800dd4 ("drm/i915: Remove waiting & retiring from shrinker paths") Signed-off-by: Chris Wilson Reviewed-by: Matthew Auld Link: https://patchwork.freedesktop.org/patch/msgid/20200708173748.32734-1-chris@chris-wilson.co.uk --- drivers/gpu/drm/i915/gem/i915_gem_shrinker.c | 25 ++++++++++++++++--------- drivers/gpu/drm/i915/gt/intel_gt_requests.c | 9 ++++++--- 2 files changed, 22 insertions(+), 12 deletions(-) diff --git a/drivers/gpu/drm/i915/gem/i915_gem_shrinker.c b/drivers/gpu/drm/i915/gem/i915_gem_shrinker.c index 1ced1e5d2ec0..dc8f052a0ffe 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_shrinker.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_shrinker.c @@ -13,6 +13,8 @@ #include #include +#include "gt/intel_gt_requests.h" + #include "i915_trace.h" static bool swap_available(void) @@ -111,15 +113,6 @@ i915_gem_shrink(struct drm_i915_private *i915, unsigned long count = 0; unsigned long scanned = 0; - /* - * When shrinking the active list, we should also consider active - * contexts. Active contexts are pinned until they are retired, and - * so can not be simply unbound to retire and unpin their pages. To - * shrink the contexts, we must wait until the gpu is idle and - * completed its switch to the kernel context. In short, we do - * not have a good mechanism for idling a specific context. - */ - trace_i915_gem_shrink(i915, target, shrink); /* @@ -133,6 +126,20 @@ i915_gem_shrink(struct drm_i915_private *i915, shrink &= ~I915_SHRINK_BOUND; } + /* + * When shrinking the active list, we should also consider active + * contexts. Active contexts are pinned until they are retired, and + * so can not be simply unbound to retire and unpin their pages. To + * shrink the contexts, we must wait until the gpu is idle and + * completed its switch to the kernel context. In short, we do + * not have a good mechanism for idling a specific context, but + * what we can do is give them a kick so that we do not keep idle + * contexts around longer than is necessary. + */ + if (shrink & I915_SHRINK_ACTIVE) + /* Retire requests to unpin all idle contexts */ + intel_gt_retire_requests(&i915->gt); + /* * As we may completely rewrite the (un)bound list whilst unbinding * (due to retiring requests) we have to strictly process only diff --git a/drivers/gpu/drm/i915/gt/intel_gt_requests.c b/drivers/gpu/drm/i915/gt/intel_gt_requests.c index 16ff47c83bd5..66fcbf9d0fdd 100644 --- a/drivers/gpu/drm/i915/gt/intel_gt_requests.c +++ b/drivers/gpu/drm/i915/gt/intel_gt_requests.c @@ -31,12 +31,15 @@ static bool engine_active(const struct intel_engine_cs *engine) return !list_empty(&engine->kernel_context->timeline->requests); } -static bool flush_submission(struct intel_gt *gt) +static bool flush_submission(struct intel_gt *gt, long timeout) { struct intel_engine_cs *engine; enum intel_engine_id id; bool active = false; + if (!timeout) + return false; + if (!intel_gt_pm_is_awake(gt)) return false; @@ -139,7 +142,7 @@ long intel_gt_retire_requests_timeout(struct intel_gt *gt, long timeout) if (unlikely(timeout < 0)) timeout = -timeout, interruptible = false; - flush_submission(gt); /* kick the ksoftirqd tasklets */ + flush_submission(gt, timeout); /* kick the ksoftirqd tasklets */ spin_lock(&timelines->lock); list_for_each_entry_safe(tl, tn, &timelines->active_list, link) { if (!mutex_trylock(&tl->mutex)) { @@ -194,7 +197,7 @@ out_active: spin_lock(&timelines->lock); list_for_each_entry_safe(tl, tn, &free, link) __intel_timeline_free(&tl->kref); - if (flush_submission(gt)) /* Wait, there's more! */ + if (flush_submission(gt, timeout)) /* Wait, there's more! */ active_count++; return active_count ? timeout : 0; -- cgit v1.2.3 From 59c94b9d26dfee413238b012238837e7028e40cc Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Wed, 8 Jul 2020 18:37:46 +0100 Subject: drm/i915/gt: Replace opencoded i915_gem_object_pin_map() As we have a pin_map interface, that knows how to flush the data to the device, use it. The only downside is that we keep the kmap around, as once acquired we keep the mapping cached until the object's backing store is released. Signed-off-by: Chris Wilson Reviewed-by: Matthew Auld Link: https://patchwork.freedesktop.org/patch/msgid/20200708173748.32734-2-chris@chris-wilson.co.uk --- drivers/gpu/drm/i915/gt/intel_lrc.c | 11 +++++------ 1 file changed, 5 insertions(+), 6 deletions(-) diff --git a/drivers/gpu/drm/i915/gt/intel_lrc.c b/drivers/gpu/drm/i915/gt/intel_lrc.c index 9e28b2f9df72..890ad041ea5c 100644 --- a/drivers/gpu/drm/i915/gt/intel_lrc.c +++ b/drivers/gpu/drm/i915/gt/intel_lrc.c @@ -3880,7 +3880,6 @@ static int intel_init_workaround_bb(struct intel_engine_cs *engine) struct i915_wa_ctx_bb *wa_bb[2] = { &wa_ctx->indirect_ctx, &wa_ctx->per_ctx }; wa_bb_func_t wa_bb_fn[2]; - struct page *page; void *batch, *batch_ptr; unsigned int i; int ret; @@ -3916,14 +3915,14 @@ static int intel_init_workaround_bb(struct intel_engine_cs *engine) return ret; } - page = i915_gem_object_get_dirty_page(wa_ctx->vma->obj, 0); - batch = batch_ptr = kmap_atomic(page); + batch = i915_gem_object_pin_map(wa_ctx->vma->obj, I915_MAP_WB); /* * Emit the two workaround batch buffers, recording the offset from the * start of the workaround batch buffer object for each and their * respective sizes. */ + batch_ptr = batch; for (i = 0; i < ARRAY_SIZE(wa_bb_fn); i++) { wa_bb[i]->offset = batch_ptr - batch; if (GEM_DEBUG_WARN_ON(!IS_ALIGNED(wa_bb[i]->offset, @@ -3935,10 +3934,10 @@ static int intel_init_workaround_bb(struct intel_engine_cs *engine) batch_ptr = wa_bb_fn[i](engine, batch_ptr); wa_bb[i]->size = batch_ptr - (batch + wa_bb[i]->offset); } + GEM_BUG_ON(batch_ptr - batch > CTX_WA_BB_OBJ_SIZE); - BUG_ON(batch_ptr - batch > CTX_WA_BB_OBJ_SIZE); - - kunmap_atomic(batch); + __i915_gem_object_flush_map(wa_ctx->vma->obj, 0, batch_ptr - batch); + i915_gem_object_unpin_map(wa_ctx->vma->obj); if (ret) lrc_destroy_wa_ctx(engine); -- cgit v1.2.3 From 89d19b2b456af1b8e009eee5966bf29c850b200a Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Wed, 8 Jul 2020 18:37:47 +0100 Subject: drm/i915: Release shortlived maps of longlived objects Some objects we map once during their construction, and then never access their mappings again, even if they are kept around for the duration of the driver. Keeping those pages mapped, often vmapped, is therefore wasteful and we should release the maps as soon as we no longer need them. Signed-off-by: Chris Wilson Reviewed-by: Matthew Auld Link: https://patchwork.freedesktop.org/patch/msgid/20200708173748.32734-3-chris@chris-wilson.co.uk --- drivers/gpu/drm/i915/gem/i915_gem_object.h | 2 ++ drivers/gpu/drm/i915/gem/i915_gem_pages.c | 15 +++++++++++++++ drivers/gpu/drm/i915/gt/gen7_renderclear.c | 2 +- drivers/gpu/drm/i915/gt/intel_lrc.c | 2 +- drivers/gpu/drm/i915/gt/intel_renderstate.c | 2 +- drivers/gpu/drm/i915/gt/intel_ring_submission.c | 2 +- drivers/gpu/drm/i915/i915_perf.c | 4 ++-- 7 files changed, 23 insertions(+), 6 deletions(-) diff --git a/drivers/gpu/drm/i915/gem/i915_gem_object.h b/drivers/gpu/drm/i915/gem/i915_gem_object.h index 2faa481cc18f..9cf4ad78ece6 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_object.h +++ b/drivers/gpu/drm/i915/gem/i915_gem_object.h @@ -394,6 +394,8 @@ static inline void i915_gem_object_unpin_map(struct drm_i915_gem_object *obj) i915_gem_object_unpin_pages(obj); } +void __i915_gem_object_release_map(struct drm_i915_gem_object *obj); + void i915_gem_object_flush_write_domain(struct drm_i915_gem_object *obj, unsigned int flush_domains); diff --git a/drivers/gpu/drm/i915/gem/i915_gem_pages.c b/drivers/gpu/drm/i915/gem/i915_gem_pages.c index af9e48ee4a33..7e54657cf67b 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_pages.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_pages.c @@ -408,6 +408,21 @@ void __i915_gem_object_flush_map(struct drm_i915_gem_object *obj, } } +void __i915_gem_object_release_map(struct drm_i915_gem_object *obj) +{ + GEM_BUG_ON(!obj->mm.mapping); + + /* + * We allow removing the mapping from underneath pinned pages! + * + * Furthermore, since this is an unsafe operation reserved only + * for construction time manipulation, we ignore locking prudence. + */ + unmap_object(obj, page_mask_bits(fetch_and_zero(&obj->mm.mapping))); + + i915_gem_object_unpin_map(obj); +} + struct scatterlist * i915_gem_object_get_sg(struct drm_i915_gem_object *obj, unsigned int n, diff --git a/drivers/gpu/drm/i915/gt/gen7_renderclear.c b/drivers/gpu/drm/i915/gt/gen7_renderclear.c index de595b66a746..d93d85cd3027 100644 --- a/drivers/gpu/drm/i915/gt/gen7_renderclear.c +++ b/drivers/gpu/drm/i915/gt/gen7_renderclear.c @@ -396,7 +396,7 @@ int gen7_setup_clear_gpr_bb(struct intel_engine_cs * const engine, emit_batch(vma, memset(batch, 0, bv.max_size), &bv); i915_gem_object_flush_map(vma->obj); - i915_gem_object_unpin_map(vma->obj); + __i915_gem_object_release_map(vma->obj); return 0; } diff --git a/drivers/gpu/drm/i915/gt/intel_lrc.c b/drivers/gpu/drm/i915/gt/intel_lrc.c index 890ad041ea5c..fbcfeaed6441 100644 --- a/drivers/gpu/drm/i915/gt/intel_lrc.c +++ b/drivers/gpu/drm/i915/gt/intel_lrc.c @@ -3937,7 +3937,7 @@ static int intel_init_workaround_bb(struct intel_engine_cs *engine) GEM_BUG_ON(batch_ptr - batch > CTX_WA_BB_OBJ_SIZE); __i915_gem_object_flush_map(wa_ctx->vma->obj, 0, batch_ptr - batch); - i915_gem_object_unpin_map(wa_ctx->vma->obj); + __i915_gem_object_release_map(wa_ctx->vma->obj); if (ret) lrc_destroy_wa_ctx(engine); diff --git a/drivers/gpu/drm/i915/gt/intel_renderstate.c b/drivers/gpu/drm/i915/gt/intel_renderstate.c index 6db23389e427..1bfad589c63b 100644 --- a/drivers/gpu/drm/i915/gt/intel_renderstate.c +++ b/drivers/gpu/drm/i915/gt/intel_renderstate.c @@ -150,7 +150,7 @@ static int render_state_setup(struct intel_renderstate *so, ret = 0; out: __i915_gem_object_flush_map(so->vma->obj, 0, i * sizeof(u32)); - i915_gem_object_unpin_map(so->vma->obj); + __i915_gem_object_release_map(so->vma->obj); return ret; } diff --git a/drivers/gpu/drm/i915/gt/intel_ring_submission.c b/drivers/gpu/drm/i915/gt/intel_ring_submission.c index b09b83deecef..94915f668715 100644 --- a/drivers/gpu/drm/i915/gt/intel_ring_submission.c +++ b/drivers/gpu/drm/i915/gt/intel_ring_submission.c @@ -543,7 +543,7 @@ alloc_context_vma(struct intel_engine_cs *engine) vaddr, engine->context_size); i915_gem_object_flush_map(obj); - i915_gem_object_unpin_map(obj); + __i915_gem_object_release_map(obj); } vma = i915_vma_instance(obj, &engine->gt->ggtt->vm, NULL); diff --git a/drivers/gpu/drm/i915/i915_perf.c b/drivers/gpu/drm/i915/i915_perf.c index 37631ce0699b..de69d430b1ed 100644 --- a/drivers/gpu/drm/i915/i915_perf.c +++ b/drivers/gpu/drm/i915/i915_perf.c @@ -1772,7 +1772,7 @@ static int alloc_noa_wait(struct i915_perf_stream *stream) GEM_BUG_ON(cs - batch > PAGE_SIZE / sizeof(*batch)); i915_gem_object_flush_map(bo); - i915_gem_object_unpin_map(bo); + __i915_gem_object_release_map(bo); stream->noa_wait = vma; return 0; @@ -1867,7 +1867,7 @@ alloc_oa_config_buffer(struct i915_perf_stream *stream, *cs++ = 0; i915_gem_object_flush_map(obj); - i915_gem_object_unpin_map(obj); + __i915_gem_object_release_map(obj); oa_bo->vma = i915_vma_instance(obj, &stream->engine->gt->ggtt->vm, -- cgit v1.2.3 From 763fedd6a216f94c2eb98d2f7ca21be3d3806e69 Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Wed, 8 Jul 2020 18:37:48 +0100 Subject: drm/i915: Remove i915_gem_object_get_dirty_page() Last user removed, remove the get_dirty_page convenience function. Signed-off-by: Chris Wilson Reviewed-by: Matthew Auld Link: https://patchwork.freedesktop.org/patch/msgid/20200708173748.32734-4-chris@chris-wilson.co.uk --- drivers/gpu/drm/i915/gem/i915_gem_object.h | 4 ---- drivers/gpu/drm/i915/gem/i915_gem_pages.c | 14 -------------- 2 files changed, 18 deletions(-) diff --git a/drivers/gpu/drm/i915/gem/i915_gem_object.h b/drivers/gpu/drm/i915/gem/i915_gem_object.h index 9cf4ad78ece6..e5b9276d254c 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_object.h +++ b/drivers/gpu/drm/i915/gem/i915_gem_object.h @@ -258,10 +258,6 @@ struct page * i915_gem_object_get_page(struct drm_i915_gem_object *obj, unsigned int n); -struct page * -i915_gem_object_get_dirty_page(struct drm_i915_gem_object *obj, - unsigned int n); - dma_addr_t i915_gem_object_get_dma_address_len(struct drm_i915_gem_object *obj, unsigned long n, diff --git a/drivers/gpu/drm/i915/gem/i915_gem_pages.c b/drivers/gpu/drm/i915/gem/i915_gem_pages.c index 7e54657cf67b..7050519c87a4 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_pages.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_pages.c @@ -548,20 +548,6 @@ i915_gem_object_get_page(struct drm_i915_gem_object *obj, unsigned int n) return nth_page(sg_page(sg), offset); } -/* Like i915_gem_object_get_page(), but mark the returned page dirty */ -struct page * -i915_gem_object_get_dirty_page(struct drm_i915_gem_object *obj, - unsigned int n) -{ - struct page *page; - - page = i915_gem_object_get_page(obj, n); - if (!obj->mm.dirty) - set_page_dirty(page); - - return page; -} - dma_addr_t i915_gem_object_get_dma_address_len(struct drm_i915_gem_object *obj, unsigned long n, -- cgit v1.2.3 From 80f5ad62b6ecef424d25da2680150c828cbe7227 Mon Sep 17 00:00:00 2001 From: Ville Syrjälä Date: Wed, 8 Jan 2020 20:12:37 +0200 Subject: drm/i915/sdvo: Fix SDVO colorimetry bit defines MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Fix up the SDVO colorimetry bits to match the spec. Signed-off-by: Ville Syrjälä Link: https://patchwork.freedesktop.org/patch/msgid/20200108181242.13650-4-ville.syrjala@linux.intel.com Reviewed-by: Imre Deak --- drivers/gpu/drm/i915/display/intel_sdvo_regs.h | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/drivers/gpu/drm/i915/display/intel_sdvo_regs.h b/drivers/gpu/drm/i915/display/intel_sdvo_regs.h index 13b9a8e257bb..74dc6c042b6e 100644 --- a/drivers/gpu/drm/i915/display/intel_sdvo_regs.h +++ b/drivers/gpu/drm/i915/display/intel_sdvo_regs.h @@ -705,10 +705,10 @@ struct intel_sdvo_enhancements_arg { #define SDVO_CMD_GET_PIXEL_REPLI 0x8c #define SDVO_CMD_GET_COLORIMETRY_CAP 0x8d #define SDVO_CMD_SET_COLORIMETRY 0x8e - #define SDVO_COLORIMETRY_RGB256 0x0 - #define SDVO_COLORIMETRY_RGB220 0x1 - #define SDVO_COLORIMETRY_YCrCb422 0x3 - #define SDVO_COLORIMETRY_YCrCb444 0x4 + #define SDVO_COLORIMETRY_RGB256 (1 << 0) + #define SDVO_COLORIMETRY_RGB220 (1 << 1) + #define SDVO_COLORIMETRY_YCrCb422 (1 << 2) + #define SDVO_COLORIMETRY_YCrCb444 (1 << 3) #define SDVO_CMD_GET_COLORIMETRY 0x8f #define SDVO_CMD_GET_AUDIO_ENCRYPT_PREFER 0x90 #define SDVO_CMD_SET_AUDIO_STAT 0x91 -- cgit v1.2.3 From 90f8ed85c6f31a53d8bdf04d80900d87296f6701 Mon Sep 17 00:00:00 2001 From: Ville Syrjälä Date: Wed, 8 Jan 2020 20:12:38 +0200 Subject: drm/i915/sdvo: Implement limited color range for SDVO HDMI properly MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit The SDVO/HDMI port register limited color range bit can only be used with TMDS encoding and not SDVO encoding, ie. to be used only when using the port as a HDMI port as opposed to a SDVO port. The SDVO spec does have a note that some GMCHs might allow that, but gen4 bspec vehemently disagrees. I suppose on ILK+ it might work since the color range handling is on the CPU side rather than on the PCH side, so there is no clear linkage between the TMDS vs. SDVO encoding and color range. Alas, I have no hardware to test that theory. To implement limited color range support for SDVO->HDMI we need to ask the SDVO device to do the range compression. Do so, but first check if the device even supports the colorimetry selection. Signed-off-by: Ville Syrjälä Link: https://patchwork.freedesktop.org/patch/msgid/20200108181242.13650-5-ville.syrjala@linux.intel.com Reviewed-by: Imre Deak --- drivers/gpu/drm/i915/display/intel_display.c | 3 +- drivers/gpu/drm/i915/display/intel_hdmi.c | 4 +- drivers/gpu/drm/i915/display/intel_hdmi.h | 2 + drivers/gpu/drm/i915/display/intel_sdvo.c | 62 +++++++++++++++++----------- 4 files changed, 45 insertions(+), 26 deletions(-) diff --git a/drivers/gpu/drm/i915/display/intel_display.c b/drivers/gpu/drm/i915/display/intel_display.c index dff7c17f3d2b..729ec6e0d43a 100644 --- a/drivers/gpu/drm/i915/display/intel_display.c +++ b/drivers/gpu/drm/i915/display/intel_display.c @@ -10073,7 +10073,8 @@ static void ilk_set_pipeconf(const struct intel_crtc_state *crtc_state) drm_WARN_ON(&dev_priv->drm, crtc_state->limited_color_range && crtc_state->output_format != INTEL_OUTPUT_FORMAT_RGB); - if (crtc_state->limited_color_range) + if (crtc_state->limited_color_range && + !intel_crtc_has_type(crtc_state, INTEL_OUTPUT_SDVO)) val |= PIPECONF_COLOR_RANGE_SELECT; if (crtc_state->output_format != INTEL_OUTPUT_FORMAT_RGB) diff --git a/drivers/gpu/drm/i915/display/intel_hdmi.c b/drivers/gpu/drm/i915/display/intel_hdmi.c index 973450a393d2..8e9abb28072c 100644 --- a/drivers/gpu/drm/i915/display/intel_hdmi.c +++ b/drivers/gpu/drm/i915/display/intel_hdmi.c @@ -2428,8 +2428,8 @@ static int intel_hdmi_compute_clock(struct intel_encoder *encoder, return 0; } -static bool intel_hdmi_limited_color_range(const struct intel_crtc_state *crtc_state, - const struct drm_connector_state *conn_state) +bool intel_hdmi_limited_color_range(const struct intel_crtc_state *crtc_state, + const struct drm_connector_state *conn_state) { const struct intel_digital_connector_state *intel_conn_state = to_intel_digital_connector_state(conn_state); diff --git a/drivers/gpu/drm/i915/display/intel_hdmi.h b/drivers/gpu/drm/i915/display/intel_hdmi.h index 213ff24befde..5b348dcab77a 100644 --- a/drivers/gpu/drm/i915/display/intel_hdmi.h +++ b/drivers/gpu/drm/i915/display/intel_hdmi.h @@ -46,5 +46,7 @@ void intel_read_infoframe(struct intel_encoder *encoder, const struct intel_crtc_state *crtc_state, enum hdmi_infoframe_type type, union hdmi_infoframe *frame); +bool intel_hdmi_limited_color_range(const struct intel_crtc_state *crtc_state, + const struct drm_connector_state *conn_state); #endif /* __INTEL_HDMI_H__ */ diff --git a/drivers/gpu/drm/i915/display/intel_sdvo.c b/drivers/gpu/drm/i915/display/intel_sdvo.c index 773523dcd107..fe681c7e3936 100644 --- a/drivers/gpu/drm/i915/display/intel_sdvo.c +++ b/drivers/gpu/drm/i915/display/intel_sdvo.c @@ -94,6 +94,8 @@ struct intel_sdvo { */ struct intel_sdvo_caps caps; + u8 colorimetry_cap; + /* Pixel clock limitations reported by the SDVO device, in kHz */ int pixel_clock_min, pixel_clock_max; @@ -1277,6 +1279,18 @@ static bool intel_has_hdmi_sink(struct intel_sdvo *sdvo, READ_ONCE(to_intel_digital_connector_state(conn_state)->force_audio) != HDMI_AUDIO_OFF_DVI; } +static bool intel_sdvo_limited_color_range(struct intel_encoder *encoder, + const struct intel_crtc_state *crtc_state, + const struct drm_connector_state *conn_state) +{ + struct intel_sdvo *intel_sdvo = to_sdvo(encoder); + + if ((intel_sdvo->colorimetry_cap & SDVO_COLORIMETRY_RGB220) == 0) + return false; + + return intel_hdmi_limited_color_range(crtc_state, conn_state); +} + static int intel_sdvo_compute_config(struct intel_encoder *encoder, struct intel_crtc_state *pipe_config, struct drm_connector_state *conn_state) @@ -1342,21 +1356,9 @@ static int intel_sdvo_compute_config(struct intel_encoder *encoder, intel_sdvo_state->base.force_audio == HDMI_AUDIO_ON; } - if (intel_sdvo_state->base.broadcast_rgb == INTEL_BROADCAST_RGB_AUTO) { - /* - * See CEA-861-E - 5.1 Default Encoding Parameters - * - * FIXME: This bit is only valid when using TMDS encoding and 8 - * bit per color mode. - */ - if (pipe_config->has_hdmi_sink && - drm_match_cea_mode(adjusted_mode) > 1) - pipe_config->limited_color_range = true; - } else { - if (pipe_config->has_hdmi_sink && - intel_sdvo_state->base.broadcast_rgb == INTEL_BROADCAST_RGB_LIMITED) - pipe_config->limited_color_range = true; - } + pipe_config->limited_color_range = + intel_sdvo_limited_color_range(encoder, pipe_config, + conn_state); /* Clock computation needs to happen after pixel multiplier. */ if (IS_TV(intel_sdvo_connector)) @@ -1495,6 +1497,8 @@ static void intel_sdvo_pre_enable(struct intel_atomic_state *state, if (crtc_state->has_hdmi_sink) { intel_sdvo_set_encode(intel_sdvo, SDVO_ENCODE_HDMI); intel_sdvo_set_colorimetry(intel_sdvo, + crtc_state->limited_color_range ? + SDVO_COLORIMETRY_RGB220 : SDVO_COLORIMETRY_RGB256); intel_sdvo_set_avi_infoframe(intel_sdvo, crtc_state); } else @@ -1530,8 +1534,6 @@ static void intel_sdvo_pre_enable(struct intel_atomic_state *state, /* The real mode polarity is set by the SDVO commands, using * struct intel_sdvo_dtd. */ sdvox = SDVO_VSYNC_ACTIVE_HIGH | SDVO_HSYNC_ACTIVE_HIGH; - if (!HAS_PCH_SPLIT(dev_priv) && crtc_state->limited_color_range) - sdvox |= HDMI_COLOR_RANGE_16_235; if (INTEL_GEN(dev_priv) < 5) sdvox |= SDVO_BORDER_ENABLE; } else { @@ -1689,8 +1691,11 @@ static void intel_sdvo_get_config(struct intel_encoder *encoder, "SDVO pixel multiplier mismatch, port: %i, encoder: %i\n", pipe_config->pixel_multiplier, encoder_pixel_multiplier); - if (sdvox & HDMI_COLOR_RANGE_16_235) - pipe_config->limited_color_range = true; + if (intel_sdvo_get_value(intel_sdvo, SDVO_CMD_GET_COLORIMETRY, + &val, 1)) { + if (val == SDVO_COLORIMETRY_RGB220) + pipe_config->limited_color_range = true; + } if (intel_sdvo_get_value(intel_sdvo, SDVO_CMD_GET_AUDIO_STAT, &val, 1)) { @@ -1914,6 +1919,17 @@ static bool intel_sdvo_get_capabilities(struct intel_sdvo *intel_sdvo, struct in return true; } +static u8 intel_sdvo_get_colorimetry_cap(struct intel_sdvo *intel_sdvo) +{ + u8 cap; + + if (!intel_sdvo_get_value(intel_sdvo, SDVO_CMD_GET_COLORIMETRY_CAP, + &cap, sizeof(cap))) + return SDVO_COLORIMETRY_RGB256; + + return cap; +} + static u16 intel_sdvo_get_hotplug_support(struct intel_sdvo *intel_sdvo) { struct drm_i915_private *dev_priv = to_i915(intel_sdvo->base.base.dev); @@ -2669,12 +2685,9 @@ static void intel_sdvo_add_hdmi_properties(struct intel_sdvo *intel_sdvo, struct intel_sdvo_connector *connector) { - struct drm_i915_private *dev_priv = to_i915(connector->base.base.dev); - intel_attach_force_audio_property(&connector->base.base); - if (INTEL_GEN(dev_priv) >= 4 && IS_MOBILE(dev_priv)) { + if (intel_sdvo->colorimetry_cap & SDVO_COLORIMETRY_RGB220) intel_attach_broadcast_rgb_property(&connector->base.base); - } intel_attach_aspect_ratio_property(&connector->base.base); } @@ -3315,6 +3328,9 @@ bool intel_sdvo_init(struct drm_i915_private *dev_priv, if (!intel_sdvo_get_capabilities(intel_sdvo, &intel_sdvo->caps)) goto err; + intel_sdvo->colorimetry_cap = + intel_sdvo_get_colorimetry_cap(intel_sdvo); + if (intel_sdvo_output_setup(intel_sdvo, intel_sdvo->caps.output_flags) != true) { drm_dbg_kms(&dev_priv->drm, -- cgit v1.2.3 From c35ad31401c079e1fe1bebb1b7d5be2940668676 Mon Sep 17 00:00:00 2001 From: Ville Syrjälä Date: Wed, 8 Jan 2020 20:12:39 +0200 Subject: drm/i915: Reject DRM_MODE_FLAG_DBLCLK with DVI sinks MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit The code assumes that DRM_MODE_FLAG_DBLCLK means that we enable the pixel repeat feature. That only works with HDMI since it requires AVI infoframe to signal the information to the sink. Hence even if the mode dotclock would be valid we cannot currently assume that we can just ignore the DBLCLK flag. Reject it for DVI sinks. Signed-off-by: Ville Syrjälä Link: https://patchwork.freedesktop.org/patch/msgid/20200108181242.13650-6-ville.syrjala@linux.intel.com Reviewed-by: Imre Deak --- drivers/gpu/drm/i915/display/intel_hdmi.c | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/drivers/gpu/drm/i915/display/intel_hdmi.c b/drivers/gpu/drm/i915/display/intel_hdmi.c index 8e9abb28072c..7dc11ea4c6e4 100644 --- a/drivers/gpu/drm/i915/display/intel_hdmi.c +++ b/drivers/gpu/drm/i915/display/intel_hdmi.c @@ -2242,8 +2242,11 @@ intel_hdmi_mode_valid(struct drm_connector *connector, if (clock > max_dotclk) return MODE_CLOCK_HIGH; - if (mode->flags & DRM_MODE_FLAG_DBLCLK) + if (mode->flags & DRM_MODE_FLAG_DBLCLK) { + if (!has_hdmi_sink) + return MODE_CLOCK_LOW; clock *= 2; + } if (drm_mode_is_420_only(&connector->display_info, mode)) clock /= 2; -- cgit v1.2.3 From d97571938ef34a7ab02fb3a1bd9323b825f7f7e8 Mon Sep 17 00:00:00 2001 From: Ville Syrjälä Date: Wed, 8 Jan 2020 20:12:40 +0200 Subject: drm/i915/sdvo: Make SDVO deal with HDMI pixel repeat MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit With SDVO the pipe config pixel_multiplier only concerns itself with the data on the SDVO bus. Any HDMI specific pixel repeat must be handled by the SDVO device itself. To do that simply configure the SDVO pixel replication factor appropriately. We already set up the infoframe PRB values correctly via the infoframe helpers. There is no cap we can check for this. The spec says that 1X,2X,4X are mandatory, anything else is optional. 1X and 2X are all we need so we should be able to assume they work. Signed-off-by: Ville Syrjälä Link: https://patchwork.freedesktop.org/patch/msgid/20200108181242.13650-7-ville.syrjala@linux.intel.com Reviewed-by: Imre Deak --- drivers/gpu/drm/i915/display/intel_sdvo.c | 27 +++++++++++++++++++++++---- 1 file changed, 23 insertions(+), 4 deletions(-) diff --git a/drivers/gpu/drm/i915/display/intel_sdvo.c b/drivers/gpu/drm/i915/display/intel_sdvo.c index fe681c7e3936..5df08b78b577 100644 --- a/drivers/gpu/drm/i915/display/intel_sdvo.c +++ b/drivers/gpu/drm/i915/display/intel_sdvo.c @@ -944,6 +944,13 @@ static bool intel_sdvo_set_colorimetry(struct intel_sdvo *intel_sdvo, return intel_sdvo_set_value(intel_sdvo, SDVO_CMD_SET_COLORIMETRY, &mode, 1); } +static bool intel_sdvo_set_pixel_replication(struct intel_sdvo *intel_sdvo, + u8 pixel_repeat) +{ + return intel_sdvo_set_value(intel_sdvo, SDVO_CMD_SET_PIXEL_REPLI, + &pixel_repeat, 1); +} + static bool intel_sdvo_set_audio_state(struct intel_sdvo *intel_sdvo, u8 audio_state) { @@ -1501,6 +1508,9 @@ static void intel_sdvo_pre_enable(struct intel_atomic_state *state, SDVO_COLORIMETRY_RGB220 : SDVO_COLORIMETRY_RGB256); intel_sdvo_set_avi_infoframe(intel_sdvo, crtc_state); + intel_sdvo_set_pixel_replication(intel_sdvo, + !!(adjusted_mode->flags & + DRM_MODE_FLAG_DBLCLK)); } else intel_sdvo_set_encode(intel_sdvo, SDVO_ENCODE_DVI); @@ -1855,17 +1865,26 @@ intel_sdvo_mode_valid(struct drm_connector *connector, struct intel_sdvo_connector *intel_sdvo_connector = to_intel_sdvo_connector(connector); int max_dotclk = to_i915(connector->dev)->max_dotclk_freq; + bool has_hdmi_sink = intel_has_hdmi_sink(intel_sdvo, connector->state); + int clock = mode->clock; if (mode->flags & DRM_MODE_FLAG_DBLSCAN) return MODE_NO_DBLESCAN; - if (intel_sdvo->pixel_clock_min > mode->clock) - return MODE_CLOCK_LOW; - if (intel_sdvo->pixel_clock_max < mode->clock) + if (clock > max_dotclk) return MODE_CLOCK_HIGH; - if (mode->clock > max_dotclk) + if (mode->flags & DRM_MODE_FLAG_DBLCLK) { + if (!has_hdmi_sink) + return MODE_CLOCK_LOW; + clock *= 2; + } + + if (intel_sdvo->pixel_clock_min > clock) + return MODE_CLOCK_LOW; + + if (intel_sdvo->pixel_clock_max < clock) return MODE_CLOCK_HIGH; if (IS_LVDS(intel_sdvo_connector)) { -- cgit v1.2.3 From 7407ec6e5567ac3a1adf451bd5c1d4718341f104 Mon Sep 17 00:00:00 2001 From: Ville Syrjälä Date: Wed, 8 Jan 2020 20:12:41 +0200 Subject: drm/i915/sdvo: Make .get_modes() return the number of modes MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit .get_modes() is supposed to return the number of modes added to the probed_modes list (not that anyone actually checks for anything except zero vs. not zero). Let's do that. Also switch over to using intel_connector_update_modes() instead of hand rolling it. Signed-off-by: Ville Syrjälä Link: https://patchwork.freedesktop.org/patch/msgid/20200108181242.13650-8-ville.syrjala@linux.intel.com Reviewed-by: Imre Deak --- drivers/gpu/drm/i915/display/intel_sdvo.c | 56 ++++++++++++++++++------------- 1 file changed, 33 insertions(+), 23 deletions(-) diff --git a/drivers/gpu/drm/i915/display/intel_sdvo.c b/drivers/gpu/drm/i915/display/intel_sdvo.c index 5df08b78b577..2da4388e1540 100644 --- a/drivers/gpu/drm/i915/display/intel_sdvo.c +++ b/drivers/gpu/drm/i915/display/intel_sdvo.c @@ -2135,8 +2135,9 @@ intel_sdvo_detect(struct drm_connector *connector, bool force) return ret; } -static void intel_sdvo_get_ddc_modes(struct drm_connector *connector) +static int intel_sdvo_get_ddc_modes(struct drm_connector *connector) { + int num_modes = 0; struct edid *edid; DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n", @@ -2151,18 +2152,19 @@ static void intel_sdvo_get_ddc_modes(struct drm_connector *connector) * DDC fails, check to see if the analog output is disconnected, in * which case we'll look there for the digital DDC data. */ - if (edid == NULL) + if (!edid) edid = intel_sdvo_get_analog_edid(connector); - if (edid != NULL) { - if (intel_sdvo_connector_matches_edid(to_intel_sdvo_connector(connector), - edid)) { - drm_connector_update_edid_property(connector, edid); - drm_add_edid_modes(connector, edid); - } + if (!edid) + return 0; - kfree(edid); - } + if (intel_sdvo_connector_matches_edid(to_intel_sdvo_connector(connector), + edid)) + num_modes += intel_connector_update_modes(connector, edid); + + kfree(edid); + + return num_modes; } /* @@ -2230,12 +2232,13 @@ static const struct drm_display_mode sdvo_tv_modes[] = { DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) }, }; -static void intel_sdvo_get_tv_modes(struct drm_connector *connector) +static int intel_sdvo_get_tv_modes(struct drm_connector *connector) { struct intel_sdvo *intel_sdvo = intel_attached_sdvo(to_intel_connector(connector)); const struct drm_connector_state *conn_state = connector->state; struct intel_sdvo_sdtv_resolution_request tv_res; u32 reply = 0, format_map = 0; + int num_modes = 0; int i; DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n", @@ -2250,31 +2253,37 @@ static void intel_sdvo_get_tv_modes(struct drm_connector *connector) min(sizeof(format_map), sizeof(struct intel_sdvo_sdtv_resolution_request))); if (!intel_sdvo_set_target_output(intel_sdvo, intel_sdvo->attached_output)) - return; + return 0; BUILD_BUG_ON(sizeof(tv_res) != 3); if (!intel_sdvo_write_cmd(intel_sdvo, SDVO_CMD_GET_SDTV_RESOLUTION_SUPPORT, &tv_res, sizeof(tv_res))) - return; + return 0; if (!intel_sdvo_read_response(intel_sdvo, &reply, 3)) - return; + return 0; - for (i = 0; i < ARRAY_SIZE(sdvo_tv_modes); i++) + for (i = 0; i < ARRAY_SIZE(sdvo_tv_modes); i++) { if (reply & (1 << i)) { struct drm_display_mode *nmode; nmode = drm_mode_duplicate(connector->dev, &sdvo_tv_modes[i]); - if (nmode) + if (nmode) { drm_mode_probed_add(connector, nmode); + num_modes++; + } } + } + + return num_modes; } -static void intel_sdvo_get_lvds_modes(struct drm_connector *connector) +static int intel_sdvo_get_lvds_modes(struct drm_connector *connector) { struct intel_sdvo *intel_sdvo = intel_attached_sdvo(to_intel_connector(connector)); struct drm_i915_private *dev_priv = to_i915(connector->dev); struct drm_display_mode *newmode; + int num_modes = 0; drm_dbg_kms(&dev_priv->drm, "[CONNECTOR:%d:%s]\n", connector->base.id, connector->name); @@ -2291,6 +2300,7 @@ static void intel_sdvo_get_lvds_modes(struct drm_connector *connector) newmode->type = (DRM_MODE_TYPE_PREFERRED | DRM_MODE_TYPE_DRIVER); drm_mode_probed_add(connector, newmode); + num_modes++; } } @@ -2299,7 +2309,9 @@ static void intel_sdvo_get_lvds_modes(struct drm_connector *connector) * Assume that the preferred modes are * arranged in priority order. */ - intel_ddc_get_modes(connector, &intel_sdvo->ddc); + num_modes += intel_ddc_get_modes(connector, &intel_sdvo->ddc); + + return num_modes; } static int intel_sdvo_get_modes(struct drm_connector *connector) @@ -2307,13 +2319,11 @@ static int intel_sdvo_get_modes(struct drm_connector *connector) struct intel_sdvo_connector *intel_sdvo_connector = to_intel_sdvo_connector(connector); if (IS_TV(intel_sdvo_connector)) - intel_sdvo_get_tv_modes(connector); + return intel_sdvo_get_tv_modes(connector); else if (IS_LVDS(intel_sdvo_connector)) - intel_sdvo_get_lvds_modes(connector); + return intel_sdvo_get_lvds_modes(connector); else - intel_sdvo_get_ddc_modes(connector); - - return !list_empty(&connector->probed_modes); + return intel_sdvo_get_ddc_modes(connector); } static int -- cgit v1.2.3 From 4a2236f9a74a37d1737d3ceff980b1945ded43be Mon Sep 17 00:00:00 2001 From: Ville Syrjälä Date: Wed, 8 Jan 2020 20:12:42 +0200 Subject: drm/i915/dvo: Make .get_modes() return the number of modes MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit .get_modes() is supposed to return the number of modes added to the probed_modes list (not that anyone actually checks for anything except zero vs. not zero). Let's do that. Signed-off-by: Ville Syrjälä Link: https://patchwork.freedesktop.org/patch/msgid/20200108181242.13650-9-ville.syrjala@linux.intel.com Reviewed-by: Imre Deak --- drivers/gpu/drm/i915/display/intel_dvo.c | 14 ++++++++------ 1 file changed, 8 insertions(+), 6 deletions(-) diff --git a/drivers/gpu/drm/i915/display/intel_dvo.c b/drivers/gpu/drm/i915/display/intel_dvo.c index 5cd09034519b..307ed8ae9a19 100644 --- a/drivers/gpu/drm/i915/display/intel_dvo.c +++ b/drivers/gpu/drm/i915/display/intel_dvo.c @@ -324,6 +324,7 @@ static int intel_dvo_get_modes(struct drm_connector *connector) struct drm_i915_private *dev_priv = to_i915(connector->dev); const struct drm_display_mode *fixed_mode = to_intel_connector(connector)->panel.fixed_mode; + int num_modes; /* * We should probably have an i2c driver get_modes function for those @@ -331,21 +332,22 @@ static int intel_dvo_get_modes(struct drm_connector *connector) * (TV-out, for example), but for now with just TMDS and LVDS, * that's not the case. */ - intel_ddc_get_modes(connector, - intel_gmbus_get_adapter(dev_priv, GMBUS_PIN_DPC)); - if (!list_empty(&connector->probed_modes)) - return 1; + num_modes = intel_ddc_get_modes(connector, + intel_gmbus_get_adapter(dev_priv, GMBUS_PIN_DPC)); + if (num_modes) + return num_modes; if (fixed_mode) { struct drm_display_mode *mode; + mode = drm_mode_duplicate(connector->dev, fixed_mode); if (mode) { drm_mode_probed_add(connector, mode); - return 1; + num_modes++; } } - return 0; + return num_modes; } static const struct drm_connector_funcs intel_dvo_connector_funcs = { -- cgit v1.2.3 From 885f182cd6ec94f69a6ff9aa08e6e56371adce72 Mon Sep 17 00:00:00 2001 From: Ville Syrjälä Date: Wed, 8 Jul 2020 16:12:20 +0300 Subject: drm/i915: Move all FBC w/as to .init_clock_gating() MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Some platforms apply the FBC w/as in .init_clock_gating(), some in fbc_activate(). Move them all to .init_clock_gating() for consistentce. Also safer since we don't have to worry about the RMWs clashing with any other runtime use of the same registers. Signed-off-by: Ville Syrjälä Link: https://patchwork.freedesktop.org/patch/msgid/20200708131223.9519-1-ville.syrjala@linux.intel.com Reviewed-by: José Roberto de Souza --- drivers/gpu/drm/i915/display/intel_fbc.c | 15 --------------- drivers/gpu/drm/i915/intel_pm.c | 23 +++++++++++++++++++++++ 2 files changed, 23 insertions(+), 15 deletions(-) diff --git a/drivers/gpu/drm/i915/display/intel_fbc.c b/drivers/gpu/drm/i915/display/intel_fbc.c index 036546ce8db8..ef2eb14f6157 100644 --- a/drivers/gpu/drm/i915/display/intel_fbc.c +++ b/drivers/gpu/drm/i915/display/intel_fbc.c @@ -347,21 +347,6 @@ static void gen7_fbc_activate(struct drm_i915_private *dev_priv) if (dev_priv->fbc.false_color) dpfc_ctl |= FBC_CTL_FALSE_COLOR; - if (IS_IVYBRIDGE(dev_priv)) { - /* WaFbcAsynchFlipDisableFbcQueue:ivb */ - intel_de_write(dev_priv, ILK_DISPLAY_CHICKEN1, - intel_de_read(dev_priv, ILK_DISPLAY_CHICKEN1) | ILK_FBCQ_DIS); - } else if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) { - /* WaFbcAsynchFlipDisableFbcQueue:hsw,bdw */ - intel_de_write(dev_priv, CHICKEN_PIPESL_1(params->crtc.pipe), - intel_de_read(dev_priv, CHICKEN_PIPESL_1(params->crtc.pipe)) | HSW_FBCQ_DIS); - } - - if (INTEL_GEN(dev_priv) >= 11) - /* Wa_1409120013:icl,ehl,tgl */ - intel_de_write(dev_priv, ILK_DPFC_CHICKEN, - ILK_DPFC_CHICKEN_COMP_DUMMY_PIXEL); - intel_de_write(dev_priv, ILK_DPFC_CONTROL, dpfc_ctl | DPFC_CTL_EN); intel_fbc_recompress(dev_priv); diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c index ea1f79d36f7c..eda7626cf61d 100644 --- a/drivers/gpu/drm/i915/intel_pm.c +++ b/drivers/gpu/drm/i915/intel_pm.c @@ -7098,6 +7098,10 @@ static void gen8_set_l3sqc_credits(struct drm_i915_private *dev_priv, static void icl_init_clock_gating(struct drm_i915_private *dev_priv) { + /* Wa_1409120013:icl,ehl */ + I915_WRITE(ILK_DPFC_CHICKEN, + ILK_DPFC_CHICKEN_COMP_DUMMY_PIXEL); + /* This is not an Wa. Enable to reduce Sampler power */ I915_WRITE(GEN10_DFR_RATIO_EN_AND_CHICKEN, I915_READ(GEN10_DFR_RATIO_EN_AND_CHICKEN) & ~DFR_DISABLE); @@ -7112,6 +7116,10 @@ static void tgl_init_clock_gating(struct drm_i915_private *dev_priv) u32 vd_pg_enable = 0; unsigned int i; + /* Wa_1409120013:tgl */ + I915_WRITE(ILK_DPFC_CHICKEN, + ILK_DPFC_CHICKEN_COMP_DUMMY_PIXEL); + /* This is not a WA. Enable VD HCP & MFX_ENC powergate */ for (i = 0; i < I915_MAX_VCS; i++) { if (HAS_ENGINE(&dev_priv->gt, _VCS(i))) @@ -7222,6 +7230,11 @@ static void bdw_init_clock_gating(struct drm_i915_private *dev_priv) { enum pipe pipe; + /* WaFbcAsynchFlipDisableFbcQueue:hsw,bdw */ + I915_WRITE(CHICKEN_PIPESL_1(PIPE_A), + I915_READ(CHICKEN_PIPESL_1(PIPE_A)) | + HSW_FBCQ_DIS); + /* WaSwitchSolVfFArbitrationPriority:bdw */ I915_WRITE(GAM_ECOCHK, I915_READ(GAM_ECOCHK) | HSW_ECOCHK_ARB_PRIO_SOL); @@ -7269,6 +7282,11 @@ static void bdw_init_clock_gating(struct drm_i915_private *dev_priv) static void hsw_init_clock_gating(struct drm_i915_private *dev_priv) { + /* WaFbcAsynchFlipDisableFbcQueue:hsw,bdw */ + I915_WRITE(CHICKEN_PIPESL_1(PIPE_A), + I915_READ(CHICKEN_PIPESL_1(PIPE_A)) | + HSW_FBCQ_DIS); + /* This is required by WaCatErrorRejectionIssue:hsw */ I915_WRITE(GEN7_SQ_CHICKEN_MBCUNIT_CONFIG, I915_READ(GEN7_SQ_CHICKEN_MBCUNIT_CONFIG) | @@ -7286,6 +7304,11 @@ static void ivb_init_clock_gating(struct drm_i915_private *dev_priv) I915_WRITE(ILK_DSPCLK_GATE_D, ILK_VRHUNIT_CLOCK_GATE_DISABLE); + /* WaFbcAsynchFlipDisableFbcQueue:ivb */ + I915_WRITE(ILK_DISPLAY_CHICKEN1, + I915_READ(ILK_DISPLAY_CHICKEN1) | + ILK_FBCQ_DIS); + /* WaDisableBackToBackFlipFix:ivb */ I915_WRITE(IVB_CHICKEN3, CHICKEN3_DGMG_REQ_OUT_FIX_DISABLE | -- cgit v1.2.3 From c4615b2b4f3e5789b48395321f679519fd4f2b81 Mon Sep 17 00:00:00 2001 From: Ville Syrjälä Date: Wed, 8 Jul 2020 16:12:21 +0300 Subject: drm/i915: Don't do WaFbcTurnOffFbcWatermark for glk MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit GLK supposedly does not need WaFbcTurnOffFbcWatermark, so let's not apply it. Signed-off-by: Ville Syrjälä Link: https://patchwork.freedesktop.org/patch/msgid/20200708131223.9519-2-ville.syrjala@linux.intel.com Reviewed-by: José Roberto de Souza --- drivers/gpu/drm/i915/intel_pm.c | 18 ++++++++++++++++-- 1 file changed, 16 insertions(+), 2 deletions(-) diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c index eda7626cf61d..7aef450481ea 100644 --- a/drivers/gpu/drm/i915/intel_pm.c +++ b/drivers/gpu/drm/i915/intel_pm.c @@ -94,10 +94,8 @@ static void gen9_init_clock_gating(struct drm_i915_private *dev_priv) I915_WRITE(GEN8_CHICKEN_DCPR_1, I915_READ(GEN8_CHICKEN_DCPR_1) | MASK_WAKEMEM); - /* WaFbcTurnOffFbcWatermark:skl,bxt,kbl,cfl */ /* WaFbcWakeMemOn:skl,bxt,kbl,glk,cfl */ I915_WRITE(DISP_ARB_CTL, I915_READ(DISP_ARB_CTL) | - DISP_FBC_WM_DIS | DISP_FBC_MEMORY_WAKE); /* WaFbcHighMemBwCorruptionAvoidance:skl,bxt,kbl,cfl */ @@ -140,6 +138,10 @@ static void bxt_init_clock_gating(struct drm_i915_private *dev_priv) * application, using batch buffers or any other means. */ I915_WRITE(RM_TIMEOUT, MMIO_TIMEOUT_US(950)); + + /* WaFbcTurnOffFbcWatermark:bxt */ + I915_WRITE(DISP_ARB_CTL, I915_READ(DISP_ARB_CTL) | + DISP_FBC_WM_DIS); } static void glk_init_clock_gating(struct drm_i915_private *dev_priv) @@ -7189,6 +7191,10 @@ static void cfl_init_clock_gating(struct drm_i915_private *dev_priv) cnp_init_clock_gating(dev_priv); gen9_init_clock_gating(dev_priv); + /* WaFbcTurnOffFbcWatermark:cfl */ + I915_WRITE(DISP_ARB_CTL, I915_READ(DISP_ARB_CTL) | + DISP_FBC_WM_DIS); + /* WaFbcNukeOnHostModify:cfl */ I915_WRITE(ILK_DPFC_CHICKEN, I915_READ(ILK_DPFC_CHICKEN) | ILK_DPFC_NUKE_ON_ANY_MODIFICATION); @@ -7208,6 +7214,10 @@ static void kbl_init_clock_gating(struct drm_i915_private *dev_priv) I915_WRITE(GEN6_UCGCTL1, I915_READ(GEN6_UCGCTL1) | GEN6_GAMUNIT_CLOCK_GATE_DISABLE); + /* WaFbcTurnOffFbcWatermark:kbl */ + I915_WRITE(DISP_ARB_CTL, I915_READ(DISP_ARB_CTL) | + DISP_FBC_WM_DIS); + /* WaFbcNukeOnHostModify:kbl */ I915_WRITE(ILK_DPFC_CHICKEN, I915_READ(ILK_DPFC_CHICKEN) | ILK_DPFC_NUKE_ON_ANY_MODIFICATION); @@ -7221,6 +7231,10 @@ static void skl_init_clock_gating(struct drm_i915_private *dev_priv) I915_WRITE(FBC_LLC_READ_CTRL, I915_READ(FBC_LLC_READ_CTRL) | FBC_LLC_FULLY_OPEN); + /* WaFbcTurnOffFbcWatermark:skl */ + I915_WRITE(DISP_ARB_CTL, I915_READ(DISP_ARB_CTL) | + DISP_FBC_WM_DIS); + /* WaFbcNukeOnHostModify:skl */ I915_WRITE(ILK_DPFC_CHICKEN, I915_READ(ILK_DPFC_CHICKEN) | ILK_DPFC_NUKE_ON_ANY_MODIFICATION); -- cgit v1.2.3 From cd7a88113d102e42320b897c306bb52018669a21 Mon Sep 17 00:00:00 2001 From: Ville Syrjälä Date: Wed, 8 Jul 2020 16:12:22 +0300 Subject: drm/i915: Limit WaFbcHighMemBwCorruptionAvoidance to skl and bxt MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Supposedly only skl/bxt need WaFbcHighMemBwCorruptionAvoidance. Do not apply to the other gen9 platforms. Signed-off-by: Ville Syrjälä Link: https://patchwork.freedesktop.org/patch/msgid/20200708131223.9519-3-ville.syrjala@linux.intel.com Reviewed-by: José Roberto de Souza --- drivers/gpu/drm/i915/intel_pm.c | 12 ++++++++---- 1 file changed, 8 insertions(+), 4 deletions(-) diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c index 7aef450481ea..88e7d6b3212e 100644 --- a/drivers/gpu/drm/i915/intel_pm.c +++ b/drivers/gpu/drm/i915/intel_pm.c @@ -98,10 +98,6 @@ static void gen9_init_clock_gating(struct drm_i915_private *dev_priv) I915_WRITE(DISP_ARB_CTL, I915_READ(DISP_ARB_CTL) | DISP_FBC_MEMORY_WAKE); - /* WaFbcHighMemBwCorruptionAvoidance:skl,bxt,kbl,cfl */ - I915_WRITE(ILK_DPFC_CHICKEN, I915_READ(ILK_DPFC_CHICKEN) | - ILK_DPFC_DISABLE_DUMMY0); - if (IS_SKYLAKE(dev_priv)) { /* WaDisableDopClockGating */ I915_WRITE(GEN7_MISCCPCTL, I915_READ(GEN7_MISCCPCTL) @@ -142,6 +138,10 @@ static void bxt_init_clock_gating(struct drm_i915_private *dev_priv) /* WaFbcTurnOffFbcWatermark:bxt */ I915_WRITE(DISP_ARB_CTL, I915_READ(DISP_ARB_CTL) | DISP_FBC_WM_DIS); + + /* WaFbcHighMemBwCorruptionAvoidance:bxt */ + I915_WRITE(ILK_DPFC_CHICKEN, I915_READ(ILK_DPFC_CHICKEN) | + ILK_DPFC_DISABLE_DUMMY0); } static void glk_init_clock_gating(struct drm_i915_private *dev_priv) @@ -7238,6 +7238,10 @@ static void skl_init_clock_gating(struct drm_i915_private *dev_priv) /* WaFbcNukeOnHostModify:skl */ I915_WRITE(ILK_DPFC_CHICKEN, I915_READ(ILK_DPFC_CHICKEN) | ILK_DPFC_NUKE_ON_ANY_MODIFICATION); + + /* WaFbcHighMemBwCorruptionAvoidance:skl */ + I915_WRITE(ILK_DPFC_CHICKEN, I915_READ(ILK_DPFC_CHICKEN) | + ILK_DPFC_DISABLE_DUMMY0); } static void bdw_init_clock_gating(struct drm_i915_private *dev_priv) -- cgit v1.2.3 From 99bcf64e1c1460ac6e0275e7024a3abb5f96ac14 Mon Sep 17 00:00:00 2001 From: Ville Syrjälä Date: Wed, 8 Jul 2020 16:12:23 +0300 Subject: drm/i915: Document FBC related w/as more thoroughly MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Pimp the comments for the FBC related workarounds. Signed-off-by: Ville Syrjälä Link: https://patchwork.freedesktop.org/patch/msgid/20200708131223.9519-4-ville.syrjala@linux.intel.com Reviewed-by: José Roberto de Souza --- drivers/gpu/drm/i915/intel_pm.c | 55 ++++++++++++++++++++++++++++++++--------- 1 file changed, 44 insertions(+), 11 deletions(-) diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c index 88e7d6b3212e..cfabbe0481ab 100644 --- a/drivers/gpu/drm/i915/intel_pm.c +++ b/drivers/gpu/drm/i915/intel_pm.c @@ -94,7 +94,10 @@ static void gen9_init_clock_gating(struct drm_i915_private *dev_priv) I915_WRITE(GEN8_CHICKEN_DCPR_1, I915_READ(GEN8_CHICKEN_DCPR_1) | MASK_WAKEMEM); - /* WaFbcWakeMemOn:skl,bxt,kbl,glk,cfl */ + /* + * WaFbcWakeMemOn:skl,bxt,kbl,glk,cfl + * Display WA #0859: skl,bxt,kbl,glk,cfl + */ I915_WRITE(DISP_ARB_CTL, I915_READ(DISP_ARB_CTL) | DISP_FBC_MEMORY_WAKE); @@ -135,11 +138,17 @@ static void bxt_init_clock_gating(struct drm_i915_private *dev_priv) */ I915_WRITE(RM_TIMEOUT, MMIO_TIMEOUT_US(950)); - /* WaFbcTurnOffFbcWatermark:bxt */ + /* + * WaFbcTurnOffFbcWatermark:bxt + * Display WA #0562: bxt + */ I915_WRITE(DISP_ARB_CTL, I915_READ(DISP_ARB_CTL) | DISP_FBC_WM_DIS); - /* WaFbcHighMemBwCorruptionAvoidance:bxt */ + /* + * WaFbcHighMemBwCorruptionAvoidance:bxt + * Display WA #0883: bxt + */ I915_WRITE(ILK_DPFC_CHICKEN, I915_READ(ILK_DPFC_CHICKEN) | ILK_DPFC_DISABLE_DUMMY0); } @@ -7165,7 +7174,10 @@ static void cnl_init_clock_gating(struct drm_i915_private *dev_priv) I915_WRITE(GEN8_CHICKEN_DCPR_1, I915_READ(GEN8_CHICKEN_DCPR_1) | MASK_WAKEMEM); - /* WaFbcWakeMemOn:cnl */ + /* + * WaFbcWakeMemOn:cnl + * Display WA #0859: cnl + */ I915_WRITE(DISP_ARB_CTL, I915_READ(DISP_ARB_CTL) | DISP_FBC_MEMORY_WAKE); @@ -7191,11 +7203,17 @@ static void cfl_init_clock_gating(struct drm_i915_private *dev_priv) cnp_init_clock_gating(dev_priv); gen9_init_clock_gating(dev_priv); - /* WaFbcTurnOffFbcWatermark:cfl */ + /* + * WaFbcTurnOffFbcWatermark:cfl + * Display WA #0562: cfl + */ I915_WRITE(DISP_ARB_CTL, I915_READ(DISP_ARB_CTL) | DISP_FBC_WM_DIS); - /* WaFbcNukeOnHostModify:cfl */ + /* + * WaFbcNukeOnHostModify:cfl + * Display WA #0873: cfl + */ I915_WRITE(ILK_DPFC_CHICKEN, I915_READ(ILK_DPFC_CHICKEN) | ILK_DPFC_NUKE_ON_ANY_MODIFICATION); } @@ -7214,11 +7232,17 @@ static void kbl_init_clock_gating(struct drm_i915_private *dev_priv) I915_WRITE(GEN6_UCGCTL1, I915_READ(GEN6_UCGCTL1) | GEN6_GAMUNIT_CLOCK_GATE_DISABLE); - /* WaFbcTurnOffFbcWatermark:kbl */ + /* + * WaFbcTurnOffFbcWatermark:kbl + * Display WA #0562: kbl + */ I915_WRITE(DISP_ARB_CTL, I915_READ(DISP_ARB_CTL) | DISP_FBC_WM_DIS); - /* WaFbcNukeOnHostModify:kbl */ + /* + * WaFbcNukeOnHostModify:kbl + * Display WA #0873: kbl + */ I915_WRITE(ILK_DPFC_CHICKEN, I915_READ(ILK_DPFC_CHICKEN) | ILK_DPFC_NUKE_ON_ANY_MODIFICATION); } @@ -7231,15 +7255,24 @@ static void skl_init_clock_gating(struct drm_i915_private *dev_priv) I915_WRITE(FBC_LLC_READ_CTRL, I915_READ(FBC_LLC_READ_CTRL) | FBC_LLC_FULLY_OPEN); - /* WaFbcTurnOffFbcWatermark:skl */ + /* + * WaFbcTurnOffFbcWatermark:skl + * Display WA #0562: skl + */ I915_WRITE(DISP_ARB_CTL, I915_READ(DISP_ARB_CTL) | DISP_FBC_WM_DIS); - /* WaFbcNukeOnHostModify:skl */ + /* + * WaFbcNukeOnHostModify:skl + * Display WA #0873: skl + */ I915_WRITE(ILK_DPFC_CHICKEN, I915_READ(ILK_DPFC_CHICKEN) | ILK_DPFC_NUKE_ON_ANY_MODIFICATION); - /* WaFbcHighMemBwCorruptionAvoidance:skl */ + /* + * WaFbcHighMemBwCorruptionAvoidance:skl + * Display WA #0883: skl + */ I915_WRITE(ILK_DPFC_CHICKEN, I915_READ(ILK_DPFC_CHICKEN) | ILK_DPFC_DISABLE_DUMMY0); } -- cgit v1.2.3 From 3d702d06cb3c06b34a58f01471e094d8156181ad Mon Sep 17 00:00:00 2001 From: José Roberto de Souza Date: Wed, 8 Jul 2020 14:29:47 -0700 Subject: drm/i915/tgl: Implement WAs 18011464164 and 22010931296 MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit As today those 2 WAs have different implementation between TGL and DG1 WA pages but checking the HSD it is clear that DG1 implementation should be used for both, also to do so is easier as we just need to extend WA 1407928979 to B* stepping. Both WAs are need to fix some possible render corruptions. DG1 initial patches were not merged yet, as soon it is this WAs should be applied to DG1 as well. BSpec: 53508 BSpec: 52890 Cc: Lucas De Marchi Cc: Ville Syrjälä Signed-off-by: José Roberto de Souza Reviewed-by: Ville Syrjälä Link: https://patchwork.freedesktop.org/patch/msgid/20200708212947.40178-1-jose.souza@intel.com --- drivers/gpu/drm/i915/gt/intel_workarounds.c | 13 ++++++++----- 1 file changed, 8 insertions(+), 5 deletions(-) diff --git a/drivers/gpu/drm/i915/gt/intel_workarounds.c b/drivers/gpu/drm/i915/gt/intel_workarounds.c index dbafd923e5a1..5726cd0a37e0 100644 --- a/drivers/gpu/drm/i915/gt/intel_workarounds.c +++ b/drivers/gpu/drm/i915/gt/intel_workarounds.c @@ -1649,11 +1649,6 @@ rcs_engine_wa_init(struct intel_engine_cs *engine, struct i915_wa_list *wal) GEN7_SARCHKMD, GEN7_DISABLE_SAMPLER_PREFETCH); - /* Wa_1407928979:tgl */ - wa_write_or(wal, - GEN7_FF_THREAD_MODE, - GEN12_FF_TESSELATION_DOP_GATE_DISABLE); - /* Wa_1408615072:tgl */ wa_write_or(wal, UNSLICE_UNIT_LEVEL_CLKGATE2, VSUNIT_CLKGATE_DIS_TGL); @@ -1677,6 +1672,14 @@ rcs_engine_wa_init(struct intel_engine_cs *engine, struct i915_wa_list *wal) * Wa_14010229206:tgl */ wa_masked_en(wal, GEN9_ROW_CHICKEN4, GEN12_DISABLE_TDL_PUSH); + + /* + * Wa_1407928979:tgl A* + * Wa_18011464164:tgl B0+ + * Wa_22010931296:tgl B0+ + */ + wa_write_or(wal, GEN7_FF_THREAD_MODE, + GEN12_FF_TESSELATION_DOP_GATE_DISABLE); } if (IS_GEN(i915, 11)) { -- cgit v1.2.3 From 2196dfea896f7027b43bae848890ce4aec5c8724 Mon Sep 17 00:00:00 2001 From: Sudeep Holla Date: Thu, 9 Jul 2020 16:49:31 +0100 Subject: drm/i915/selftests: Fix compare functions provided for sorting Both cmp_u32 and cmp_u64 are comparing the pointers instead of the value at those pointers. This will result in incorrect/unsorted list. Fix it by deferencing the pointers before comparison. Fixes: 4ba74e53ada3 ("drm/i915/selftests: Verify frequency scaling with RPS") Fixes: 8757797ff9c9 ("drm/i915/selftests: Repeat the rps clock frequency measurement") Cc: Chris Wilson Cc: Mika Kuoppala Signed-off-by: Sudeep Holla Reviewed-by: Chris Wilson Signed-off-by: Chris Wilson Link: https://patchwork.freedesktop.org/patch/msgid/20200709154931.23310-1-sudeep.holla@arm.com --- drivers/gpu/drm/i915/gt/selftest_rps.c | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/drivers/gpu/drm/i915/gt/selftest_rps.c b/drivers/gpu/drm/i915/gt/selftest_rps.c index bb753f0c12eb..8624f5d2a1f3 100644 --- a/drivers/gpu/drm/i915/gt/selftest_rps.c +++ b/drivers/gpu/drm/i915/gt/selftest_rps.c @@ -29,9 +29,9 @@ static int cmp_u64(const void *A, const void *B) { const u64 *a = A, *b = B; - if (a < b) + if (*a < *b) return -1; - else if (a > b) + else if (*a > *b) return 1; else return 0; @@ -41,9 +41,9 @@ static int cmp_u32(const void *A, const void *B) { const u32 *a = A, *b = B; - if (a < b) + if (*a < *b) return -1; - else if (a > b) + else if (*a > *b) return 1; else return 0; -- cgit v1.2.3 From a8143150faa73c424c4d63fbff774a9c06a98ddc Mon Sep 17 00:00:00 2001 From: José Roberto de Souza Date: Wed, 8 Jul 2020 13:55:08 -0700 Subject: drm/i915/display: Replace drm_i915_private in voltage swing functions by intel_encoder MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit intel_encoder will be needed inside of vswing functions in a future patch, so here doing this change in all vswing functions since HSW. Signed-off-by: José Roberto de Souza Reviewed-by: Ville Syrjälä Link: https://patchwork.freedesktop.org/patch/msgid/20200708205512.21625-1-jose.souza@intel.com --- drivers/gpu/drm/i915/display/intel_ddi.c | 164 ++++++++++++++++++------------- 1 file changed, 95 insertions(+), 69 deletions(-) diff --git a/drivers/gpu/drm/i915/display/intel_ddi.c b/drivers/gpu/drm/i915/display/intel_ddi.c index 5773ebefffc7..e80319aa7cf0 100644 --- a/drivers/gpu/drm/i915/display/intel_ddi.c +++ b/drivers/gpu/drm/i915/display/intel_ddi.c @@ -707,8 +707,10 @@ static const struct cnl_ddi_buf_trans tgl_combo_phy_ddi_translations_dp_hbr2[] = }; static const struct ddi_buf_trans * -bdw_get_buf_trans_edp(struct drm_i915_private *dev_priv, int *n_entries) +bdw_get_buf_trans_edp(struct intel_encoder *encoder, int *n_entries) { + struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); + if (dev_priv->vbt.edp.low_vswing) { *n_entries = ARRAY_SIZE(bdw_ddi_translations_edp); return bdw_ddi_translations_edp; @@ -719,8 +721,10 @@ bdw_get_buf_trans_edp(struct drm_i915_private *dev_priv, int *n_entries) } static const struct ddi_buf_trans * -skl_get_buf_trans_dp(struct drm_i915_private *dev_priv, int *n_entries) +skl_get_buf_trans_dp(struct intel_encoder *encoder, int *n_entries) { + struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); + if (IS_SKL_ULX(dev_priv)) { *n_entries = ARRAY_SIZE(skl_y_ddi_translations_dp); return skl_y_ddi_translations_dp; @@ -734,8 +738,10 @@ skl_get_buf_trans_dp(struct drm_i915_private *dev_priv, int *n_entries) } static const struct ddi_buf_trans * -kbl_get_buf_trans_dp(struct drm_i915_private *dev_priv, int *n_entries) +kbl_get_buf_trans_dp(struct intel_encoder *encoder, int *n_entries) { + struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); + if (IS_KBL_ULX(dev_priv) || IS_CFL_ULX(dev_priv) || IS_CML_ULX(dev_priv)) { @@ -753,8 +759,10 @@ kbl_get_buf_trans_dp(struct drm_i915_private *dev_priv, int *n_entries) } static const struct ddi_buf_trans * -skl_get_buf_trans_edp(struct drm_i915_private *dev_priv, int *n_entries) +skl_get_buf_trans_edp(struct intel_encoder *encoder, int *n_entries) { + struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); + if (dev_priv->vbt.edp.low_vswing) { if (IS_SKL_ULX(dev_priv) || IS_KBL_ULX(dev_priv) || @@ -777,9 +785,9 @@ skl_get_buf_trans_edp(struct drm_i915_private *dev_priv, int *n_entries) if (IS_KABYLAKE(dev_priv) || IS_COFFEELAKE(dev_priv) || IS_COMETLAKE(dev_priv)) - return kbl_get_buf_trans_dp(dev_priv, n_entries); + return kbl_get_buf_trans_dp(encoder, n_entries); else - return skl_get_buf_trans_dp(dev_priv, n_entries); + return skl_get_buf_trans_dp(encoder, n_entries); } static const struct ddi_buf_trans * @@ -807,19 +815,21 @@ static int skl_buf_trans_num_entries(enum port port, int n_entries) } static const struct ddi_buf_trans * -intel_ddi_get_buf_trans_dp(struct drm_i915_private *dev_priv, +intel_ddi_get_buf_trans_dp(struct intel_encoder *encoder, enum port port, int *n_entries) { + struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); + if (IS_KABYLAKE(dev_priv) || IS_COFFEELAKE(dev_priv) || IS_COMETLAKE(dev_priv)) { const struct ddi_buf_trans *ddi_translations = - kbl_get_buf_trans_dp(dev_priv, n_entries); + kbl_get_buf_trans_dp(encoder, n_entries); *n_entries = skl_buf_trans_num_entries(port, *n_entries); return ddi_translations; } else if (IS_SKYLAKE(dev_priv)) { const struct ddi_buf_trans *ddi_translations = - skl_get_buf_trans_dp(dev_priv, n_entries); + skl_get_buf_trans_dp(encoder, n_entries); *n_entries = skl_buf_trans_num_entries(port, *n_entries); return ddi_translations; } else if (IS_BROADWELL(dev_priv)) { @@ -835,16 +845,18 @@ intel_ddi_get_buf_trans_dp(struct drm_i915_private *dev_priv, } static const struct ddi_buf_trans * -intel_ddi_get_buf_trans_edp(struct drm_i915_private *dev_priv, +intel_ddi_get_buf_trans_edp(struct intel_encoder *encoder, enum port port, int *n_entries) { + struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); + if (IS_GEN9_BC(dev_priv)) { const struct ddi_buf_trans *ddi_translations = - skl_get_buf_trans_edp(dev_priv, n_entries); + skl_get_buf_trans_edp(encoder, n_entries); *n_entries = skl_buf_trans_num_entries(port, *n_entries); return ddi_translations; } else if (IS_BROADWELL(dev_priv)) { - return bdw_get_buf_trans_edp(dev_priv, n_entries); + return bdw_get_buf_trans_edp(encoder, n_entries); } else if (IS_HASWELL(dev_priv)) { *n_entries = ARRAY_SIZE(hsw_ddi_translations_dp); return hsw_ddi_translations_dp; @@ -871,9 +883,11 @@ intel_ddi_get_buf_trans_fdi(struct drm_i915_private *dev_priv, } static const struct ddi_buf_trans * -intel_ddi_get_buf_trans_hdmi(struct drm_i915_private *dev_priv, +intel_ddi_get_buf_trans_hdmi(struct intel_encoder *encoder, int *n_entries) { + struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); + if (IS_GEN9_BC(dev_priv)) { return skl_get_buf_trans_hdmi(dev_priv, n_entries); } else if (IS_BROADWELL(dev_priv)) { @@ -889,33 +903,36 @@ intel_ddi_get_buf_trans_hdmi(struct drm_i915_private *dev_priv, } static const struct bxt_ddi_buf_trans * -bxt_get_buf_trans_dp(struct drm_i915_private *dev_priv, int *n_entries) +bxt_get_buf_trans_dp(struct intel_encoder *encoder, int *n_entries) { *n_entries = ARRAY_SIZE(bxt_ddi_translations_dp); return bxt_ddi_translations_dp; } static const struct bxt_ddi_buf_trans * -bxt_get_buf_trans_edp(struct drm_i915_private *dev_priv, int *n_entries) +bxt_get_buf_trans_edp(struct intel_encoder *encoder, int *n_entries) { + struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); + if (dev_priv->vbt.edp.low_vswing) { *n_entries = ARRAY_SIZE(bxt_ddi_translations_edp); return bxt_ddi_translations_edp; } - return bxt_get_buf_trans_dp(dev_priv, n_entries); + return bxt_get_buf_trans_dp(encoder, n_entries); } static const struct bxt_ddi_buf_trans * -bxt_get_buf_trans_hdmi(struct drm_i915_private *dev_priv, int *n_entries) +bxt_get_buf_trans_hdmi(struct intel_encoder *encoder, int *n_entries) { *n_entries = ARRAY_SIZE(bxt_ddi_translations_hdmi); return bxt_ddi_translations_hdmi; } static const struct cnl_ddi_buf_trans * -cnl_get_buf_trans_hdmi(struct drm_i915_private *dev_priv, int *n_entries) +cnl_get_buf_trans_hdmi(struct intel_encoder *encoder, int *n_entries) { + struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); u32 voltage = intel_de_read(dev_priv, CNL_PORT_COMP_DW3) & VOLTAGE_INFO_MASK; if (voltage == VOLTAGE_INFO_0_85V) { @@ -935,8 +952,9 @@ cnl_get_buf_trans_hdmi(struct drm_i915_private *dev_priv, int *n_entries) } static const struct cnl_ddi_buf_trans * -cnl_get_buf_trans_dp(struct drm_i915_private *dev_priv, int *n_entries) +cnl_get_buf_trans_dp(struct intel_encoder *encoder, int *n_entries) { + struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); u32 voltage = intel_de_read(dev_priv, CNL_PORT_COMP_DW3) & VOLTAGE_INFO_MASK; if (voltage == VOLTAGE_INFO_0_85V) { @@ -956,8 +974,9 @@ cnl_get_buf_trans_dp(struct drm_i915_private *dev_priv, int *n_entries) } static const struct cnl_ddi_buf_trans * -cnl_get_buf_trans_edp(struct drm_i915_private *dev_priv, int *n_entries) +cnl_get_buf_trans_edp(struct intel_encoder *encoder, int *n_entries) { + struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); u32 voltage = intel_de_read(dev_priv, CNL_PORT_COMP_DW3) & VOLTAGE_INFO_MASK; if (dev_priv->vbt.edp.low_vswing) { @@ -976,14 +995,16 @@ cnl_get_buf_trans_edp(struct drm_i915_private *dev_priv, int *n_entries) } return NULL; } else { - return cnl_get_buf_trans_dp(dev_priv, n_entries); + return cnl_get_buf_trans_dp(encoder, n_entries); } } static const struct cnl_ddi_buf_trans * -icl_get_combo_buf_trans(struct drm_i915_private *dev_priv, int type, int rate, +icl_get_combo_buf_trans(struct intel_encoder *encoder, int type, int rate, int *n_entries) { + struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); + if (type == INTEL_OUTPUT_HDMI) { *n_entries = ARRAY_SIZE(icl_combo_phy_ddi_translations_hdmi); return icl_combo_phy_ddi_translations_hdmi; @@ -1000,7 +1021,7 @@ icl_get_combo_buf_trans(struct drm_i915_private *dev_priv, int type, int rate, } static const struct icl_mg_phy_ddi_buf_trans * -icl_get_mg_buf_trans(struct drm_i915_private *dev_priv, int type, int rate, +icl_get_mg_buf_trans(struct intel_encoder *encoder, int type, int rate, int *n_entries) { if (type == INTEL_OUTPUT_HDMI) { @@ -1016,7 +1037,7 @@ icl_get_mg_buf_trans(struct drm_i915_private *dev_priv, int type, int rate, } static const struct cnl_ddi_buf_trans * -ehl_get_combo_buf_trans(struct drm_i915_private *dev_priv, int type, int rate, +ehl_get_combo_buf_trans(struct intel_encoder *encoder, int type, int rate, int *n_entries) { if (type != INTEL_OUTPUT_HDMI && type != INTEL_OUTPUT_EDP) { @@ -1024,15 +1045,15 @@ ehl_get_combo_buf_trans(struct drm_i915_private *dev_priv, int type, int rate, return ehl_combo_phy_ddi_translations_dp; } - return icl_get_combo_buf_trans(dev_priv, type, rate, n_entries); + return icl_get_combo_buf_trans(encoder, type, rate, n_entries); } static const struct cnl_ddi_buf_trans * -tgl_get_combo_buf_trans(struct drm_i915_private *dev_priv, int type, int rate, +tgl_get_combo_buf_trans(struct intel_encoder *encoder, int type, int rate, int *n_entries) { if (type == INTEL_OUTPUT_HDMI || type == INTEL_OUTPUT_EDP) { - return icl_get_combo_buf_trans(dev_priv, type, rate, n_entries); + return icl_get_combo_buf_trans(encoder, type, rate, n_entries); } else if (rate > 270000) { *n_entries = ARRAY_SIZE(tgl_combo_phy_ddi_translations_dp_hbr2); return tgl_combo_phy_ddi_translations_dp_hbr2; @@ -1043,7 +1064,7 @@ tgl_get_combo_buf_trans(struct drm_i915_private *dev_priv, int type, int rate, } static const struct tgl_dkl_phy_ddi_buf_trans * -tgl_get_dkl_buf_trans(struct drm_i915_private *dev_priv, int type, int rate, +tgl_get_dkl_buf_trans(struct intel_encoder *encoder, int type, int rate, int *n_entries) { if (type == INTEL_OUTPUT_HDMI) { @@ -1066,34 +1087,34 @@ static int intel_ddi_hdmi_level(struct intel_encoder *encoder) if (INTEL_GEN(dev_priv) >= 12) { if (intel_phy_is_combo(dev_priv, phy)) - tgl_get_combo_buf_trans(dev_priv, INTEL_OUTPUT_HDMI, + tgl_get_combo_buf_trans(encoder, INTEL_OUTPUT_HDMI, 0, &n_entries); else - tgl_get_dkl_buf_trans(dev_priv, INTEL_OUTPUT_HDMI, 0, + tgl_get_dkl_buf_trans(encoder, INTEL_OUTPUT_HDMI, 0, &n_entries); default_entry = n_entries - 1; } else if (INTEL_GEN(dev_priv) == 11) { if (intel_phy_is_combo(dev_priv, phy)) - icl_get_combo_buf_trans(dev_priv, INTEL_OUTPUT_HDMI, + icl_get_combo_buf_trans(encoder, INTEL_OUTPUT_HDMI, 0, &n_entries); else - icl_get_mg_buf_trans(dev_priv, INTEL_OUTPUT_HDMI, 0, + icl_get_mg_buf_trans(encoder, INTEL_OUTPUT_HDMI, 0, &n_entries); default_entry = n_entries - 1; } else if (IS_CANNONLAKE(dev_priv)) { - cnl_get_buf_trans_hdmi(dev_priv, &n_entries); + cnl_get_buf_trans_hdmi(encoder, &n_entries); default_entry = n_entries - 1; } else if (IS_GEN9_LP(dev_priv)) { - bxt_get_buf_trans_hdmi(dev_priv, &n_entries); + bxt_get_buf_trans_hdmi(encoder, &n_entries); default_entry = n_entries - 1; } else if (IS_GEN9_BC(dev_priv)) { - intel_ddi_get_buf_trans_hdmi(dev_priv, &n_entries); + intel_ddi_get_buf_trans_hdmi(encoder, &n_entries); default_entry = 8; } else if (IS_BROADWELL(dev_priv)) { - intel_ddi_get_buf_trans_hdmi(dev_priv, &n_entries); + intel_ddi_get_buf_trans_hdmi(encoder, &n_entries); default_entry = 7; } else if (IS_HASWELL(dev_priv)) { - intel_ddi_get_buf_trans_hdmi(dev_priv, &n_entries); + intel_ddi_get_buf_trans_hdmi(encoder, &n_entries); default_entry = 6; } else { drm_WARN(&dev_priv->drm, 1, "ddi translation table missing\n"); @@ -1131,10 +1152,10 @@ static void intel_prepare_dp_ddi_buffers(struct intel_encoder *encoder, ddi_translations = intel_ddi_get_buf_trans_fdi(dev_priv, &n_entries); else if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_EDP)) - ddi_translations = intel_ddi_get_buf_trans_edp(dev_priv, port, + ddi_translations = intel_ddi_get_buf_trans_edp(encoder, port, &n_entries); else - ddi_translations = intel_ddi_get_buf_trans_dp(dev_priv, port, + ddi_translations = intel_ddi_get_buf_trans_dp(encoder, port, &n_entries); /* If we're boosting the current, set bit 31 of trans1 */ @@ -1163,7 +1184,7 @@ static void intel_prepare_hdmi_ddi_buffers(struct intel_encoder *encoder, enum port port = encoder->port; const struct ddi_buf_trans *ddi_translations; - ddi_translations = intel_ddi_get_buf_trans_hdmi(dev_priv, &n_entries); + ddi_translations = intel_ddi_get_buf_trans_hdmi(encoder, &n_entries); if (drm_WARN_ON_ONCE(&dev_priv->drm, !ddi_translations)) return; @@ -2098,11 +2119,15 @@ static void skl_ddi_set_iboost(struct intel_encoder *encoder, int n_entries; if (type == INTEL_OUTPUT_HDMI) - ddi_translations = intel_ddi_get_buf_trans_hdmi(dev_priv, &n_entries); + ddi_translations = intel_ddi_get_buf_trans_hdmi(encoder, &n_entries); else if (type == INTEL_OUTPUT_EDP) - ddi_translations = intel_ddi_get_buf_trans_edp(dev_priv, port, &n_entries); + ddi_translations = intel_ddi_get_buf_trans_edp(encoder, + port, + &n_entries); else - ddi_translations = intel_ddi_get_buf_trans_dp(dev_priv, port, &n_entries); + ddi_translations = intel_ddi_get_buf_trans_dp(encoder, + port, + &n_entries); if (drm_WARN_ON_ONCE(&dev_priv->drm, !ddi_translations)) return; @@ -2133,11 +2158,11 @@ static void bxt_ddi_vswing_sequence(struct intel_encoder *encoder, int n_entries; if (type == INTEL_OUTPUT_HDMI) - ddi_translations = bxt_get_buf_trans_hdmi(dev_priv, &n_entries); + ddi_translations = bxt_get_buf_trans_hdmi(encoder, &n_entries); else if (type == INTEL_OUTPUT_EDP) - ddi_translations = bxt_get_buf_trans_edp(dev_priv, &n_entries); + ddi_translations = bxt_get_buf_trans_edp(encoder, &n_entries); else - ddi_translations = bxt_get_buf_trans_dp(dev_priv, &n_entries); + ddi_translations = bxt_get_buf_trans_dp(encoder, &n_entries); if (drm_WARN_ON_ONCE(&dev_priv->drm, !ddi_translations)) return; @@ -2161,36 +2186,36 @@ static u8 intel_ddi_dp_voltage_max(struct intel_dp *intel_dp) if (INTEL_GEN(dev_priv) >= 12) { if (intel_phy_is_combo(dev_priv, phy)) - tgl_get_combo_buf_trans(dev_priv, encoder->type, + tgl_get_combo_buf_trans(encoder, encoder->type, intel_dp->link_rate, &n_entries); else - tgl_get_dkl_buf_trans(dev_priv, encoder->type, + tgl_get_dkl_buf_trans(encoder, encoder->type, intel_dp->link_rate, &n_entries); } else if (INTEL_GEN(dev_priv) == 11) { if (IS_ELKHARTLAKE(dev_priv)) - ehl_get_combo_buf_trans(dev_priv, encoder->type, + ehl_get_combo_buf_trans(encoder, encoder->type, intel_dp->link_rate, &n_entries); else if (intel_phy_is_combo(dev_priv, phy)) - icl_get_combo_buf_trans(dev_priv, encoder->type, + icl_get_combo_buf_trans(encoder, encoder->type, intel_dp->link_rate, &n_entries); else - icl_get_mg_buf_trans(dev_priv, encoder->type, + icl_get_mg_buf_trans(encoder, encoder->type, intel_dp->link_rate, &n_entries); } else if (IS_CANNONLAKE(dev_priv)) { if (encoder->type == INTEL_OUTPUT_EDP) - cnl_get_buf_trans_edp(dev_priv, &n_entries); + cnl_get_buf_trans_edp(encoder, &n_entries); else - cnl_get_buf_trans_dp(dev_priv, &n_entries); + cnl_get_buf_trans_dp(encoder, &n_entries); } else if (IS_GEN9_LP(dev_priv)) { if (encoder->type == INTEL_OUTPUT_EDP) - bxt_get_buf_trans_edp(dev_priv, &n_entries); + bxt_get_buf_trans_edp(encoder, &n_entries); else - bxt_get_buf_trans_dp(dev_priv, &n_entries); + bxt_get_buf_trans_dp(encoder, &n_entries); } else { if (encoder->type == INTEL_OUTPUT_EDP) - intel_ddi_get_buf_trans_edp(dev_priv, port, &n_entries); + intel_ddi_get_buf_trans_edp(encoder, port, &n_entries); else - intel_ddi_get_buf_trans_dp(dev_priv, port, &n_entries); + intel_ddi_get_buf_trans_dp(encoder, port, &n_entries); } if (drm_WARN_ON(&dev_priv->drm, n_entries < 1)) @@ -2223,11 +2248,11 @@ static void cnl_ddi_vswing_program(struct intel_encoder *encoder, u32 val; if (type == INTEL_OUTPUT_HDMI) - ddi_translations = cnl_get_buf_trans_hdmi(dev_priv, &n_entries); + ddi_translations = cnl_get_buf_trans_hdmi(encoder, &n_entries); else if (type == INTEL_OUTPUT_EDP) - ddi_translations = cnl_get_buf_trans_edp(dev_priv, &n_entries); + ddi_translations = cnl_get_buf_trans_edp(encoder, &n_entries); else - ddi_translations = cnl_get_buf_trans_dp(dev_priv, &n_entries); + ddi_translations = cnl_get_buf_trans_dp(encoder, &n_entries); if (drm_WARN_ON_ONCE(&dev_priv->drm, !ddi_translations)) return; @@ -2344,22 +2369,23 @@ static void cnl_ddi_vswing_sequence(struct intel_encoder *encoder, intel_de_write(dev_priv, CNL_PORT_TX_DW5_GRP(port), val); } -static void icl_ddi_combo_vswing_program(struct drm_i915_private *dev_priv, - u32 level, enum phy phy, int type, - int rate) +static void icl_ddi_combo_vswing_program(struct intel_encoder *encoder, + u32 level, enum phy phy, int type, + int rate) { + struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); const struct cnl_ddi_buf_trans *ddi_translations = NULL; u32 n_entries, val; int ln; if (INTEL_GEN(dev_priv) >= 12) - ddi_translations = tgl_get_combo_buf_trans(dev_priv, type, rate, + ddi_translations = tgl_get_combo_buf_trans(encoder, type, rate, &n_entries); else if (IS_ELKHARTLAKE(dev_priv)) - ddi_translations = ehl_get_combo_buf_trans(dev_priv, type, rate, + ddi_translations = ehl_get_combo_buf_trans(encoder, type, rate, &n_entries); else - ddi_translations = icl_get_combo_buf_trans(dev_priv, type, rate, + ddi_translations = icl_get_combo_buf_trans(encoder, type, rate, &n_entries); if (!ddi_translations) return; @@ -2471,7 +2497,7 @@ static void icl_combo_phy_ddi_vswing_sequence(struct intel_encoder *encoder, intel_de_write(dev_priv, ICL_PORT_TX_DW5_GRP(phy), val); /* 5. Program swing and de-emphasis */ - icl_ddi_combo_vswing_program(dev_priv, level, phy, type, rate); + icl_ddi_combo_vswing_program(encoder, level, phy, type, rate); /* 6. Set training enable to trigger update */ val = intel_de_read(dev_priv, ICL_PORT_TX_DW5_LN0(phy)); @@ -2495,7 +2521,7 @@ static void icl_mg_phy_ddi_vswing_sequence(struct intel_encoder *encoder, rate = intel_dp->link_rate; } - ddi_translations = icl_get_mg_buf_trans(dev_priv, type, rate, + ddi_translations = icl_get_mg_buf_trans(encoder, type, rate, &n_entries); /* The table does not have values for level 3 and level 9. */ if (level >= n_entries || level == 3 || level == 9) { @@ -2640,7 +2666,7 @@ tgl_dkl_phy_ddi_vswing_sequence(struct intel_encoder *encoder, int link_clock, rate = intel_dp->link_rate; } - ddi_translations = tgl_get_dkl_buf_trans(dev_priv, encoder->type, rate, + ddi_translations = tgl_get_dkl_buf_trans(encoder, encoder->type, rate, &n_entries); if (level >= n_entries) -- cgit v1.2.3 From f0e86e0520977eda55324ae0e0505e2836fad633 Mon Sep 17 00:00:00 2001 From: José Roberto de Souza Date: Wed, 8 Jul 2020 13:55:09 -0700 Subject: drm/i915/display: Remove port and phy from voltage swing functions MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit This information can be get directly from intel_encoder so no need of those parameters. Signed-off-by: José Roberto de Souza Reviewed-by: Ville Syrjälä Link: https://patchwork.freedesktop.org/patch/msgid/20200708205512.21625-2-jose.souza@intel.com --- drivers/gpu/drm/i915/display/intel_ddi.c | 33 ++++++++++++++------------------ 1 file changed, 14 insertions(+), 19 deletions(-) diff --git a/drivers/gpu/drm/i915/display/intel_ddi.c b/drivers/gpu/drm/i915/display/intel_ddi.c index e80319aa7cf0..2c484b55bcdf 100644 --- a/drivers/gpu/drm/i915/display/intel_ddi.c +++ b/drivers/gpu/drm/i915/display/intel_ddi.c @@ -815,8 +815,7 @@ static int skl_buf_trans_num_entries(enum port port, int n_entries) } static const struct ddi_buf_trans * -intel_ddi_get_buf_trans_dp(struct intel_encoder *encoder, - enum port port, int *n_entries) +intel_ddi_get_buf_trans_dp(struct intel_encoder *encoder, int *n_entries) { struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); @@ -825,12 +824,12 @@ intel_ddi_get_buf_trans_dp(struct intel_encoder *encoder, IS_COMETLAKE(dev_priv)) { const struct ddi_buf_trans *ddi_translations = kbl_get_buf_trans_dp(encoder, n_entries); - *n_entries = skl_buf_trans_num_entries(port, *n_entries); + *n_entries = skl_buf_trans_num_entries(encoder->port, *n_entries); return ddi_translations; } else if (IS_SKYLAKE(dev_priv)) { const struct ddi_buf_trans *ddi_translations = skl_get_buf_trans_dp(encoder, n_entries); - *n_entries = skl_buf_trans_num_entries(port, *n_entries); + *n_entries = skl_buf_trans_num_entries(encoder->port, *n_entries); return ddi_translations; } else if (IS_BROADWELL(dev_priv)) { *n_entries = ARRAY_SIZE(bdw_ddi_translations_dp); @@ -845,15 +844,14 @@ intel_ddi_get_buf_trans_dp(struct intel_encoder *encoder, } static const struct ddi_buf_trans * -intel_ddi_get_buf_trans_edp(struct intel_encoder *encoder, - enum port port, int *n_entries) +intel_ddi_get_buf_trans_edp(struct intel_encoder *encoder, int *n_entries) { struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); if (IS_GEN9_BC(dev_priv)) { const struct ddi_buf_trans *ddi_translations = skl_get_buf_trans_edp(encoder, n_entries); - *n_entries = skl_buf_trans_num_entries(port, *n_entries); + *n_entries = skl_buf_trans_num_entries(encoder->port, *n_entries); return ddi_translations; } else if (IS_BROADWELL(dev_priv)) { return bdw_get_buf_trans_edp(encoder, n_entries); @@ -1152,10 +1150,10 @@ static void intel_prepare_dp_ddi_buffers(struct intel_encoder *encoder, ddi_translations = intel_ddi_get_buf_trans_fdi(dev_priv, &n_entries); else if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_EDP)) - ddi_translations = intel_ddi_get_buf_trans_edp(encoder, port, + ddi_translations = intel_ddi_get_buf_trans_edp(encoder, &n_entries); else - ddi_translations = intel_ddi_get_buf_trans_dp(encoder, port, + ddi_translations = intel_ddi_get_buf_trans_dp(encoder, &n_entries); /* If we're boosting the current, set bit 31 of trans1 */ @@ -2106,7 +2104,6 @@ static void skl_ddi_set_iboost(struct intel_encoder *encoder, { struct intel_digital_port *dig_port = enc_to_dig_port(encoder); struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); - enum port port = encoder->port; u8 iboost; if (type == INTEL_OUTPUT_HDMI) @@ -2122,11 +2119,9 @@ static void skl_ddi_set_iboost(struct intel_encoder *encoder, ddi_translations = intel_ddi_get_buf_trans_hdmi(encoder, &n_entries); else if (type == INTEL_OUTPUT_EDP) ddi_translations = intel_ddi_get_buf_trans_edp(encoder, - port, &n_entries); else ddi_translations = intel_ddi_get_buf_trans_dp(encoder, - port, &n_entries); if (drm_WARN_ON_ONCE(&dev_priv->drm, !ddi_translations)) @@ -2143,9 +2138,9 @@ static void skl_ddi_set_iboost(struct intel_encoder *encoder, return; } - _skl_ddi_set_iboost(dev_priv, port, iboost); + _skl_ddi_set_iboost(dev_priv, encoder->port, iboost); - if (port == PORT_A && dig_port->max_lanes == 4) + if (encoder->port == PORT_A && dig_port->max_lanes == 4) _skl_ddi_set_iboost(dev_priv, PORT_E, iboost); } @@ -2213,9 +2208,9 @@ static u8 intel_ddi_dp_voltage_max(struct intel_dp *intel_dp) bxt_get_buf_trans_dp(encoder, &n_entries); } else { if (encoder->type == INTEL_OUTPUT_EDP) - intel_ddi_get_buf_trans_edp(encoder, port, &n_entries); + intel_ddi_get_buf_trans_edp(encoder, &n_entries); else - intel_ddi_get_buf_trans_dp(encoder, port, &n_entries); + intel_ddi_get_buf_trans_dp(encoder, &n_entries); } if (drm_WARN_ON(&dev_priv->drm, n_entries < 1)) @@ -2370,10 +2365,10 @@ static void cnl_ddi_vswing_sequence(struct intel_encoder *encoder, } static void icl_ddi_combo_vswing_program(struct intel_encoder *encoder, - u32 level, enum phy phy, int type, - int rate) + u32 level, int type, int rate) { struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); + enum phy phy = intel_port_to_phy(dev_priv, encoder->port); const struct cnl_ddi_buf_trans *ddi_translations = NULL; u32 n_entries, val; int ln; @@ -2497,7 +2492,7 @@ static void icl_combo_phy_ddi_vswing_sequence(struct intel_encoder *encoder, intel_de_write(dev_priv, ICL_PORT_TX_DW5_GRP(phy), val); /* 5. Program swing and de-emphasis */ - icl_ddi_combo_vswing_program(encoder, level, phy, type, rate); + icl_ddi_combo_vswing_program(encoder, level, type, rate); /* 6. Set training enable to trigger update */ val = intel_de_read(dev_priv, ICL_PORT_TX_DW5_LN0(phy)); -- cgit v1.2.3 From f615cb6a8a42c5497b88fbf152f791a1699de9aa Mon Sep 17 00:00:00 2001 From: José Roberto de Souza Date: Wed, 8 Jul 2020 13:55:10 -0700 Subject: drm/i915/bios: Parse HOBL parameter MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit HOBL means hours of battery life, it is a power-saving feature were supported motherboards can use a special voltage swing table that uses less power. So here parsing the VBT to check if this feature is supported. BSpec: 20150 Reviewed-by: Anusha Srivatsa Signed-off-by: José Roberto de Souza Link: https://patchwork.freedesktop.org/patch/msgid/20200708205512.21625-3-jose.souza@intel.com --- drivers/gpu/drm/i915/display/intel_bios.c | 3 +++ drivers/gpu/drm/i915/display/intel_vbt_defs.h | 1 + drivers/gpu/drm/i915/i915_drv.h | 1 + 3 files changed, 5 insertions(+) diff --git a/drivers/gpu/drm/i915/display/intel_bios.c b/drivers/gpu/drm/i915/display/intel_bios.c index 6593e2c38043..c53c85d38fa5 100644 --- a/drivers/gpu/drm/i915/display/intel_bios.c +++ b/drivers/gpu/drm/i915/display/intel_bios.c @@ -722,6 +722,9 @@ parse_power_conservation_features(struct drm_i915_private *dev_priv, */ if (!(power->drrs & BIT(panel_type))) dev_priv->vbt.drrs_type = DRRS_NOT_SUPPORTED; + + if (bdb->version >= 232) + dev_priv->vbt.edp.hobl = power->hobl & BIT(panel_type); } static void diff --git a/drivers/gpu/drm/i915/display/intel_vbt_defs.h b/drivers/gpu/drm/i915/display/intel_vbt_defs.h index aef7fe932d1a..6faabd4f6d49 100644 --- a/drivers/gpu/drm/i915/display/intel_vbt_defs.h +++ b/drivers/gpu/drm/i915/display/intel_vbt_defs.h @@ -820,6 +820,7 @@ struct bdb_lfp_power { u16 adb; u16 lace_enabled_status; struct agressiveness_profile_entry aggressivenes[16]; + u16 hobl; /* 232+ */ } __packed; /* diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index 21bb9f7cc452..87973dedf8e7 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h @@ -692,6 +692,7 @@ struct intel_vbt_data { bool initialized; int bpp; struct edp_power_seq pps; + bool hobl; } edp; struct { -- cgit v1.2.3 From b297bde16c0fb0052e53aa0cfc2ea8ca6ef210aa Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Thu, 9 Jul 2020 20:01:11 +0100 Subject: drm/i915/gt: Optimise aliasing-ppgtt allocations Since the aliasing-ppgtt remains the default for gen6/gen7, it is worth optimising the ppgtt allocation for it. In this case, we do not need to flush the GGTT page directories entries as they are fixed during setup. Signed-off-by: Chris Wilson Cc: Matthew Auld Reviewed-by: Matthew Auld Link: https://patchwork.freedesktop.org/patch/msgid/20200709190111.5492-1-chris@chris-wilson.co.uk --- drivers/gpu/drm/i915/gt/gen6_ppgtt.c | 15 +++++++++------ 1 file changed, 9 insertions(+), 6 deletions(-) diff --git a/drivers/gpu/drm/i915/gt/gen6_ppgtt.c b/drivers/gpu/drm/i915/gt/gen6_ppgtt.c index 05497b50103f..cdc0b9c54305 100644 --- a/drivers/gpu/drm/i915/gt/gen6_ppgtt.c +++ b/drivers/gpu/drm/i915/gt/gen6_ppgtt.c @@ -183,13 +183,11 @@ static int gen6_alloc_va_range(struct i915_address_space *vm, struct gen6_ppgtt *ppgtt = to_gen6_ppgtt(i915_vm_to_ppgtt(vm)); struct i915_page_directory * const pd = ppgtt->base.pd; struct i915_page_table *pt, *alloc = NULL; - intel_wakeref_t wakeref; + bool flush = false; u64 from = start; unsigned int pde; int ret = 0; - wakeref = intel_runtime_pm_get(&vm->i915->runtime_pm); - spin_lock(&pd->lock); gen6_for_each_pde(pt, pd, start, length, pde) { const unsigned int count = gen6_pte_count(start, length); @@ -214,14 +212,20 @@ static int gen6_alloc_va_range(struct i915_address_space *vm, alloc = pt; pt = pd->entry[pde]; } + + flush = true; } atomic_add(count, &pt->used); } spin_unlock(&pd->lock); - if (i915_vma_is_bound(ppgtt->vma, I915_VMA_GLOBAL_BIND)) - gen6_flush_pd(ppgtt, from, start); + if (flush && i915_vma_is_bound(ppgtt->vma, I915_VMA_GLOBAL_BIND)) { + intel_wakeref_t wakeref; + + with_intel_runtime_pm(&vm->i915->runtime_pm, wakeref) + gen6_flush_pd(ppgtt, from, start); + } goto out; @@ -230,7 +234,6 @@ unwind_out: out: if (alloc) free_px(vm, alloc); - intel_runtime_pm_put(&vm->i915->runtime_pm, wakeref); return ret; } -- cgit v1.2.3 From e43ff99c8deda85234e6233e0f4af6cb09566a37 Mon Sep 17 00:00:00 2001 From: Umesh Nerlige Ramappa Date: Thu, 9 Jul 2020 23:45:03 +0100 Subject: drm/i915/perf: Use GTT when saving/restoring engine GPR MI_STORE_REGISTER_MEM and MI_LOAD_REGISTER_MEM need to know which translation to use when saving restoring the engine general purpose registers to and from the GT scratch. Since GT scratch is mapped to ggtt, we need to set an additional bit in the command to use GTT. Fixes: daed3e44396d17 ("drm/i915/perf: implement active wait for noa configurations") Suggested-by: Prathap Kumar Valsan Signed-off-by: Umesh Nerlige Ramappa Reviewed-by: Lionel Landwerlin Reviewed-by: Chris Wilson Link: https://patchwork.freedesktop.org/patch/msgid/20200709224504.11345-1-chris@chris-wilson.co.uk Signed-off-by: Chris Wilson --- drivers/gpu/drm/i915/i915_perf.c | 1 + 1 file changed, 1 insertion(+) diff --git a/drivers/gpu/drm/i915/i915_perf.c b/drivers/gpu/drm/i915/i915_perf.c index de69d430b1ed..c6f6370283cf 100644 --- a/drivers/gpu/drm/i915/i915_perf.c +++ b/drivers/gpu/drm/i915/i915_perf.c @@ -1592,6 +1592,7 @@ static u32 *save_restore_register(struct i915_perf_stream *stream, u32 *cs, u32 d; cmd = save ? MI_STORE_REGISTER_MEM : MI_LOAD_REGISTER_MEM; + cmd |= MI_SRM_LRM_GLOBAL_GTT; if (INTEL_GEN(stream->perf->i915) >= 8) cmd++; -- cgit v1.2.3 From ed2690a9ca896882a124ee0bd4eaff9678ed1162 Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Thu, 9 Jul 2020 23:45:04 +0100 Subject: drm/i915/selftest: Check that GPR are restored across noa_wait Perf implements a GPU delay (noa_wait) by looping until the CS timestamp has passed a certain point. This use MI_MATH and the general purpose registers of the user's context, and since it is clobbering the user state it must carefully save and restore the user's data around the noa_wait. We can verify this by loading some values in the GPR that we know will be clobbered by the noa_wait, and then inspecting the GPR after the noa_wait completes and confirming that they have been restored. Signed-off-by: Chris Wilson Cc: Umesh Nerlige Ramappa Cc: Lionel Landwerlin Reviewed-by: Lionel Landwerlin Link: https://patchwork.freedesktop.org/patch/msgid/20200709224504.11345-2-chris@chris-wilson.co.uk --- drivers/gpu/drm/i915/selftests/i915_perf.c | 131 +++++++++++++++++++++++++++++ 1 file changed, 131 insertions(+) diff --git a/drivers/gpu/drm/i915/selftests/i915_perf.c b/drivers/gpu/drm/i915/selftests/i915_perf.c index be54570c407c..deb6dec1b5ab 100644 --- a/drivers/gpu/drm/i915/selftests/i915_perf.c +++ b/drivers/gpu/drm/i915/selftests/i915_perf.c @@ -280,11 +280,142 @@ out: return err; } +static int live_noa_gpr(void *arg) +{ + struct drm_i915_private *i915 = arg; + struct i915_perf_stream *stream; + struct intel_context *ce; + struct i915_request *rq; + u32 *cs, *store; + void *scratch; + u32 gpr0; + int err; + int i; + + /* Check that the delay does not clobber user context state (GPR) */ + + stream = test_stream(&i915->perf); + if (!stream) + return -ENOMEM; + + gpr0 = i915_mmio_reg_offset(GEN8_RING_CS_GPR(stream->engine->mmio_base, 0)); + + ce = intel_context_create(stream->engine); + if (IS_ERR(ce)) { + err = PTR_ERR(ce); + goto out; + } + + /* Poison the ce->vm so we detect writes not to the GGTT gt->scratch */ + scratch = kmap(ce->vm->scratch[0].base.page); + memset(scratch, POISON_FREE, PAGE_SIZE); + + rq = intel_context_create_request(ce); + if (IS_ERR(rq)) { + err = PTR_ERR(rq); + goto out_ce; + } + i915_request_get(rq); + + if (rq->engine->emit_init_breadcrumb) { + err = rq->engine->emit_init_breadcrumb(rq); + if (err) { + i915_request_add(rq); + goto out_rq; + } + } + + /* Fill the 16 qword [32 dword] GPR with a known unlikely value */ + cs = intel_ring_begin(rq, 2 * 32 + 2); + if (IS_ERR(cs)) { + i915_request_add(rq); + goto out_rq; + } + + *cs++ = MI_LOAD_REGISTER_IMM(32); + for (i = 0; i < 32; i++) { + *cs++ = gpr0 + i * sizeof(u32); + *cs++ = STACK_MAGIC; + } + *cs++ = MI_NOOP; + intel_ring_advance(rq, cs); + + /* Execute the GPU delay */ + err = rq->engine->emit_bb_start(rq, + i915_ggtt_offset(stream->noa_wait), 0, + I915_DISPATCH_SECURE); + if (err) { + i915_request_add(rq); + goto out_rq; + } + + /* Read the GPR back, using the pinned global HWSP for convenience */ + store = memset32(rq->engine->status_page.addr + 512, 0, 32); + for (i = 0; i < 32; i++) { + u32 cmd; + + cs = intel_ring_begin(rq, 4); + if (IS_ERR(cs)) { + i915_request_add(rq); + goto out_rq; + } + + cmd = MI_STORE_REGISTER_MEM; + if (INTEL_GEN(i915) >= 8) + cmd++; + cmd |= MI_USE_GGTT; + + *cs++ = cmd; + *cs++ = gpr0 + i * sizeof(u32); + *cs++ = i915_ggtt_offset(rq->engine->status_page.vma) + + offset_in_page(store) + + i * sizeof(u32); + *cs++ = 0; + intel_ring_advance(rq, cs); + } + + i915_request_add(rq); + + if (i915_request_wait(rq, I915_WAIT_INTERRUPTIBLE, HZ / 2) < 0) { + pr_err("noa_wait timed out\n"); + intel_gt_set_wedged(stream->engine->gt); + err = -EIO; + goto out_rq; + } + + /* Verify that the GPR contain our expected values */ + for (i = 0; i < 32; i++) { + if (store[i] == STACK_MAGIC) + continue; + + pr_err("GPR[%d] lost, found:%08x, expected:%08x!\n", + i, store[i], STACK_MAGIC); + err = -EINVAL; + } + + /* Verify that the user's scratch page was not used for GPR storage */ + if (memchr_inv(scratch, POISON_FREE, PAGE_SIZE)) { + pr_err("Scratch page overwritten!\n"); + igt_hexdump(scratch, 4096); + err = -EINVAL; + } + +out_rq: + i915_request_put(rq); +out_ce: + kunmap(ce->vm->scratch[0].base.page); + intel_context_put(ce); +out: + stream_destroy(stream); + return err; +} + int i915_perf_live_selftests(struct drm_i915_private *i915) { static const struct i915_subtest tests[] = { SUBTEST(live_sanitycheck), SUBTEST(live_noa_delay), + SUBTEST(live_noa_gpr), }; struct i915_perf *perf = &i915->perf; int err; -- cgit v1.2.3 From b2295e2ecc04d189477cb08a96129ff1b3606f3a Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Fri, 10 Jul 2020 14:31:25 +0100 Subject: drm/i915/gt: Be defensive in the face of false CS events If the HW throws a curve ball and reports either en event before it is possible, or just a completely impossible event, we have to grin and bear it. The first few events, we will likely not notice as we would be expecting some event, but as soon as we stop expecting an event and yet they still keep coming, then we enter into undefined state territory. In which case, bail out, stop processing the events, and reset the engine and our set of queued requests to recover. The sporadic hangs and warnings will continue to plague CI, but at least system stability should not be compromised. v2: Commentary and force the reset-on-error. v3: Customised user facing message for forced resets from internal errors. Closes: https://gitlab.freedesktop.org/drm/intel/-/issues/2045 Signed-off-by: Chris Wilson Cc: Tvrtko Ursulin Reviewed-by: Tvrtko Ursulin Link: https://patchwork.freedesktop.org/patch/msgid/20200710133125.30194-1-chris@chris-wilson.co.uk --- drivers/gpu/drm/i915/gt/intel_engine_types.h | 4 +++ drivers/gpu/drm/i915/gt/intel_gt_irq.c | 3 +- drivers/gpu/drm/i915/gt/intel_lrc.c | 45 ++++++++++++++++++++++++---- 3 files changed, 45 insertions(+), 7 deletions(-) diff --git a/drivers/gpu/drm/i915/gt/intel_engine_types.h b/drivers/gpu/drm/i915/gt/intel_engine_types.h index 490af81bd6f3..8de92fd7d392 100644 --- a/drivers/gpu/drm/i915/gt/intel_engine_types.h +++ b/drivers/gpu/drm/i915/gt/intel_engine_types.h @@ -177,8 +177,12 @@ struct intel_engine_execlists { * the first error interrupt, record the EIR and schedule the tasklet. * In the tasklet, we process the pending CS events to ensure we have * the guilty request, and then reset the engine. + * + * Low 16b are used by HW, with the upper 16b used as the enabling mask. + * Reserve the upper 16b for tracking internal errors. */ u32 error_interrupt; +#define ERROR_CSB BIT(31) /** * @reset_ccid: Active CCID [EXECLISTS_STATUS_HI] at the time of reset diff --git a/drivers/gpu/drm/i915/gt/intel_gt_irq.c b/drivers/gpu/drm/i915/gt/intel_gt_irq.c index e1964cf40fd6..b05da68e52f4 100644 --- a/drivers/gpu/drm/i915/gt/intel_gt_irq.c +++ b/drivers/gpu/drm/i915/gt/intel_gt_irq.c @@ -27,7 +27,8 @@ cs_irq_handler(struct intel_engine_cs *engine, u32 iir) if (unlikely(iir & GT_CS_MASTER_ERROR_INTERRUPT)) { u32 eir; - eir = ENGINE_READ(engine, RING_EIR); + /* Upper 16b are the enabling mask, rsvd for internal errors */ + eir = ENGINE_READ(engine, RING_EIR) & GENMASK(15, 0); ENGINE_TRACE(engine, "CS error: %x\n", eir); /* Disable the error interrupt until after the reset */ diff --git a/drivers/gpu/drm/i915/gt/intel_lrc.c b/drivers/gpu/drm/i915/gt/intel_lrc.c index fbcfeaed6441..cd4262cc96e2 100644 --- a/drivers/gpu/drm/i915/gt/intel_lrc.c +++ b/drivers/gpu/drm/i915/gt/intel_lrc.c @@ -2568,6 +2568,25 @@ static void process_csb(struct intel_engine_cs *engine) if (unlikely(head == tail)) return; + /* + * We will consume all events from HW, or at least pretend to. + * + * The sequence of events from the HW is deterministic, and derived + * from our writes to the ELSP, with a smidgen of variability for + * the arrival of the asynchronous requests wrt to the inflight + * execution. If the HW sends an event that does not correspond with + * the one we are expecting, we have to abandon all hope as we lose + * all tracking of what the engine is actually executing. We will + * only detect we are out of sequence with the HW when we get an + * 'impossible' event because we have already drained our own + * preemption/promotion queue. If this occurs, we know that we likely + * lost track of execution earlier and must unwind and restart, the + * simplest way is by stop processing the event queue and force the + * engine to reset. + */ + execlists->csb_head = tail; + ENGINE_TRACE(engine, "cs-irq head=%d, tail=%d\n", head, tail); + /* * Hopefully paired with a wmb() in HW! * @@ -2577,8 +2596,6 @@ static void process_csb(struct intel_engine_cs *engine) * we perform the READ_ONCE(*csb_write). */ rmb(); - - ENGINE_TRACE(engine, "cs-irq head=%d, tail=%d\n", head, tail); do { bool promote; @@ -2613,6 +2630,11 @@ static void process_csb(struct intel_engine_cs *engine) if (promote) { struct i915_request * const *old = execlists->active; + if (GEM_WARN_ON(!*execlists->pending)) { + execlists->error_interrupt |= ERROR_CSB; + break; + } + ring_set_paused(engine, 0); /* Point active to the new ELSP; prevent overwriting */ @@ -2635,7 +2657,10 @@ static void process_csb(struct intel_engine_cs *engine) WRITE_ONCE(execlists->pending[0], NULL); } else { - GEM_BUG_ON(!*execlists->active); + if (GEM_WARN_ON(!*execlists->active)) { + execlists->error_interrupt |= ERROR_CSB; + break; + } /* port0 completed, advanced to port1 */ trace_ports(execlists, "completed", execlists->active); @@ -2686,7 +2711,6 @@ static void process_csb(struct intel_engine_cs *engine) } } while (head != tail); - execlists->csb_head = head; set_timeslice(engine); /* @@ -3117,9 +3141,18 @@ static void execlists_submission_tasklet(unsigned long data) process_csb(engine); if (unlikely(READ_ONCE(engine->execlists.error_interrupt))) { + const char *msg; + + /* Generate the error message in priority wrt to the user! */ + if (engine->execlists.error_interrupt & GENMASK(15, 0)) + msg = "CS error"; /* thrown by a user payload */ + else if (engine->execlists.error_interrupt & ERROR_CSB) + msg = "invalid CSB event"; + else + msg = "internal error"; + engine->execlists.error_interrupt = 0; - if (ENGINE_READ(engine, RING_ESR)) /* confirm the error */ - execlists_reset(engine, "CS error"); + execlists_reset(engine, msg); } if (!READ_ONCE(engine->execlists.pending[0]) || timeout) { -- cgit v1.2.3 From 68172f2c0b8acc5609284651f1316246f3a3a65b Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Fri, 10 Jul 2020 20:32:39 +0100 Subject: drm/i915: Pull printing GT capabilities on error to err_print_gt We try not to assume that we captured any information, and so have to check that error->gt exists before reporting. This check was missed in err_print_capabilities, so lets break up the capability info and push it into the GT dump. We are still a long way from yamlifying this output! Reported-by: Dan Carpenter Fixes: 792592e72aba ("drm/i915: Move the engine mask to intel_gt_info") Signed-off-by: Chris Wilson Cc: Daniele Ceraolo Spurio Cc: Dan Carpenter Reviewed-by: Daniele Ceraolo Spurio Link: https://patchwork.freedesktop.org/patch/msgid/20200710193239.5419-1-chris@chris-wilson.co.uk --- drivers/gpu/drm/i915/i915_gpu_error.c | 13 +++++++++++-- 1 file changed, 11 insertions(+), 2 deletions(-) diff --git a/drivers/gpu/drm/i915/i915_gpu_error.c b/drivers/gpu/drm/i915/i915_gpu_error.c index 678ddec3237f..6a3a2ce0b394 100644 --- a/drivers/gpu/drm/i915/i915_gpu_error.c +++ b/drivers/gpu/drm/i915/i915_gpu_error.c @@ -626,8 +626,6 @@ static void err_print_capabilities(struct drm_i915_error_state_buf *m, intel_device_info_print_static(&error->device_info, &p); intel_device_info_print_runtime(&error->runtime_info, &p); - intel_gt_info_print(&error->gt->info, &p); - intel_sseu_print_topology(&error->gt->info.sseu, &p); intel_driver_caps_print(&error->driver_caps, &p); } @@ -678,6 +676,15 @@ static void err_free_sgl(struct scatterlist *sgl) } } +static void err_print_gt_info(struct drm_i915_error_state_buf *m, + struct intel_gt_coredump *gt) +{ + struct drm_printer p = i915_error_printer(m); + + intel_gt_info_print(>->info, &p); + intel_sseu_print_topology(>->info.sseu, &p); +} + static void err_print_gt(struct drm_i915_error_state_buf *m, struct intel_gt_coredump *gt) { @@ -734,6 +741,8 @@ static void err_print_gt(struct drm_i915_error_state_buf *m, if (gt->uc) err_print_uc(m, gt->uc); + + err_print_gt_info(m, gt); } static void __err_print_to_sgl(struct drm_i915_error_state_buf *m, -- cgit v1.2.3 From 2730055dcb9a07f8aca27e6911defc29b41c71df Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Sat, 11 Jul 2020 10:13:49 +0100 Subject: drm/i915/gt: Always reset the engine, even if inactive, on execlists failure If something has gone awry with the CSB processing, we need to pause, unwind and restart the request submission and event processing. However, currently we skip the engine reset if we raise an error but discover no active context, in the mistaken belief that it was merely a glitch in the matrix. The glitches are real enough, and we do need to unwind even if the engine appears idle (as it has gone permanently idle!) The simplest way to unwind and recover is simply do the engine reset, which should be very fast and _safe_ as nothing is active. Signed-off-by: Chris Wilson Cc: Tvrtko Ursulin Reviewed-by: Mika Kuoppala Link: https://patchwork.freedesktop.org/patch/msgid/20200711091349.28865-1-chris@chris-wilson.co.uk --- drivers/gpu/drm/i915/gt/intel_lrc.c | 15 ++++++--------- 1 file changed, 6 insertions(+), 9 deletions(-) diff --git a/drivers/gpu/drm/i915/gt/intel_lrc.c b/drivers/gpu/drm/i915/gt/intel_lrc.c index cd4262cc96e2..3ea05a86dc95 100644 --- a/drivers/gpu/drm/i915/gt/intel_lrc.c +++ b/drivers/gpu/drm/i915/gt/intel_lrc.c @@ -3029,12 +3029,12 @@ static u32 active_ccid(struct intel_engine_cs *engine) return ENGINE_READ_FW(engine, RING_EXECLIST_STATUS_HI); } -static bool execlists_capture(struct intel_engine_cs *engine) +static void execlists_capture(struct intel_engine_cs *engine) { struct execlists_capture *cap; if (!IS_ENABLED(CONFIG_DRM_I915_CAPTURE_ERROR)) - return true; + return; /* * We need to _quickly_ capture the engine state before we reset. @@ -3043,7 +3043,7 @@ static bool execlists_capture(struct intel_engine_cs *engine) */ cap = capture_regs(engine); if (!cap) - return true; + return; spin_lock_irq(&engine->active.lock); cap->rq = active_context(engine, active_ccid(engine)); @@ -3080,14 +3080,13 @@ static bool execlists_capture(struct intel_engine_cs *engine) INIT_WORK(&cap->work, execlists_capture_work); schedule_work(&cap->work); - return true; + return; err_rq: i915_request_put(cap->rq); err_free: i915_gpu_coredump_put(cap->error); kfree(cap); - return false; } static void execlists_reset(struct intel_engine_cs *engine, const char *msg) @@ -3107,10 +3106,8 @@ static void execlists_reset(struct intel_engine_cs *engine, const char *msg) tasklet_disable_nosync(&engine->execlists.tasklet); ring_set_paused(engine, 1); /* Freeze the current request in place */ - if (execlists_capture(engine)) - intel_engine_reset(engine, msg); - else - ring_set_paused(engine, 0); + execlists_capture(engine); + intel_engine_reset(engine, msg); tasklet_enable(&engine->execlists.tasklet); clear_and_wake_up_bit(bit, lock); -- cgit v1.2.3 From 4fe6abb8f51355224808ab02a9febf65d184c40b Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Sat, 11 Jul 2020 21:32:36 +0100 Subject: drm/i915/gt: Ignore irq enabling on the virtual engines We do not use the virtual engines for interrupts (they have physical components), but we do use them to decouple the fence signaling during submission. Currently, when we submit a completed request, we try to enable the interrupt handler for the virtual engine, but we never disarm it. A quick fix is then to mark the irq as enabled, and it will then remain enabled -- and this prevents us from waking the device and never letting it sleep again. Fixes: f8db4d051b5e ("drm/i915: Initialise breadcrumb lists on the virtual engine") Signed-off-by: Chris Wilson Cc: Mika Kuoppala Cc: Tvrtko Ursulin Cc: # v5.5+ Reviewed-by: Tvrtko Ursulin Link: https://patchwork.freedesktop.org/patch/msgid/20200711203236.12330-1-chris@chris-wilson.co.uk --- drivers/gpu/drm/i915/gt/intel_lrc.c | 1 + 1 file changed, 1 insertion(+) diff --git a/drivers/gpu/drm/i915/gt/intel_lrc.c b/drivers/gpu/drm/i915/gt/intel_lrc.c index 3ea05a86dc95..028d698a7f98 100644 --- a/drivers/gpu/drm/i915/gt/intel_lrc.c +++ b/drivers/gpu/drm/i915/gt/intel_lrc.c @@ -5724,6 +5724,7 @@ intel_execlists_create_virtual(struct intel_engine_cs **siblings, intel_engine_init_active(&ve->base, ENGINE_VIRTUAL); intel_engine_init_breadcrumbs(&ve->base); intel_engine_init_execlists(&ve->base); + ve->base.breadcrumbs.irq_armed = true; /* fake HW, used for irq_work */ ve->base.cops = &virtual_context_ops; ve->base.request_alloc = execlists_request_alloc; -- cgit v1.2.3 From d2921096e707bdde92bc635cd89ec1fe85a9faee Mon Sep 17 00:00:00 2001 From: Colin Ian King Date: Mon, 13 Jul 2020 15:25:51 +0100 Subject: drm/i915/selftest: fix an error return path where err is not being set There is an error condition where err is not being set and an uninitialized garbage value in err is being returned. Fix this by assigning err to an appropriate error return value before taking the error exit path. Addresses-Coverity: ("Uninitialized scalar value") Fixes: ed2690a9ca89 ("drm/i915/selftest: Check that GPR are restored across noa_wait") Signed-off-by: Colin Ian King Reviewed-by: Chris Wilson Signed-off-by: Chris Wilson Link: https://patchwork.freedesktop.org/patch/msgid/20200713142551.423649-1-colin.king@canonical.com --- drivers/gpu/drm/i915/selftests/i915_perf.c | 1 + 1 file changed, 1 insertion(+) diff --git a/drivers/gpu/drm/i915/selftests/i915_perf.c b/drivers/gpu/drm/i915/selftests/i915_perf.c index deb6dec1b5ab..0aa151501fb3 100644 --- a/drivers/gpu/drm/i915/selftests/i915_perf.c +++ b/drivers/gpu/drm/i915/selftests/i915_perf.c @@ -328,6 +328,7 @@ static int live_noa_gpr(void *arg) /* Fill the 16 qword [32 dword] GPR with a known unlikely value */ cs = intel_ring_begin(rq, 2 * 32 + 2); if (IS_ERR(cs)) { + err = PTR_ERR(cs); i915_request_add(rq); goto out_rq; } -- cgit v1.2.3 From 90a987205c6cf74116a102ed446d22d92cdaf915 Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Mon, 13 Jul 2020 17:05:49 +0100 Subject: drm/i915/gt: Only swap to a random sibling once upon creation The danger in switching at random upon intel_context_pin is that the context may still actually be inflight, as it will not be scheduled out until a context switch after it is complete -- that may be a long time after we do a final intel_context_unpin. Closes: https://gitlab.freedesktop.org/drm/intel/-/issues/2118 Fixes: 6d06779e8672 ("drm/i915: Load balancing across a virtual engine") Signed-off-by: Chris Wilson Cc: Tvrtko Ursulin Cc: # v5.3+ Reviewed-by: Tvrtko Ursulin Link: https://patchwork.freedesktop.org/patch/msgid/20200713160549.17344-1-chris@chris-wilson.co.uk --- drivers/gpu/drm/i915/gt/intel_lrc.c | 18 ++++-------------- 1 file changed, 4 insertions(+), 14 deletions(-) diff --git a/drivers/gpu/drm/i915/gt/intel_lrc.c b/drivers/gpu/drm/i915/gt/intel_lrc.c index 028d698a7f98..e0280a672f1d 100644 --- a/drivers/gpu/drm/i915/gt/intel_lrc.c +++ b/drivers/gpu/drm/i915/gt/intel_lrc.c @@ -5432,13 +5432,8 @@ static void virtual_engine_initial_hint(struct virtual_engine *ve) * typically be the first we inspect for submission. */ swp = prandom_u32_max(ve->num_siblings); - if (!swp) - return; - - swap(ve->siblings[swp], ve->siblings[0]); - if (!intel_engine_has_relative_mmio(ve->siblings[0])) - virtual_update_register_offsets(ve->context.lrc_reg_state, - ve->siblings[0]); + if (swp) + swap(ve->siblings[swp], ve->siblings[0]); } static int virtual_context_alloc(struct intel_context *ce) @@ -5451,15 +5446,9 @@ static int virtual_context_alloc(struct intel_context *ce) static int virtual_context_pin(struct intel_context *ce) { struct virtual_engine *ve = container_of(ce, typeof(*ve), context); - int err; /* Note: we must use a real engine class for setting up reg state */ - err = __execlists_context_pin(ce, ve->siblings[0]); - if (err) - return err; - - virtual_engine_initial_hint(ve); - return 0; + return __execlists_context_pin(ce, ve->siblings[0]); } static void virtual_context_enter(struct intel_context *ce) @@ -5806,6 +5795,7 @@ intel_execlists_create_virtual(struct intel_engine_cs **siblings, ve->base.flags |= I915_ENGINE_IS_VIRTUAL; + virtual_engine_initial_hint(ve); return &ve->context; err_put: -- cgit v1.2.3 From 1d9221e9d395c8c80d99d002bea3c9b6dd192d6a Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Mon, 13 Jul 2020 15:16:36 +0100 Subject: drm/i915: Skip signaling a signaled request Preempt-to-busy introduces various fascinating complications in that the requests may complete as we are unsubmitting them from HW. As they may then signal after unsubmission, we may find ourselves having to cleanup the signaling request from within the signaling callback. This causes us to recurse onto the same i915_request.lock. However, if the request is already signaled (as it will be before we enter the signal callbacks), we know we can skip the signaling of that request during submission, neatly evading the spinlock recursion. unsubmit(ve.rq0) # timeslice expiration or other preemption -> virtual_submit_request(ve.rq0) dma_fence_signal(ve.rq0) # request completed before preemption ack -> submit_notify(ve.rq1) -> virtual_submit_request(ve.rq1) # sees that we have completed ve.rq0 -> __i915_request_submit(ve.rq0) [ 264.210142] BUG: spinlock recursion on CPU#2, sample_multi_tr/2093 [ 264.210150] lock: 0xffff9efd6ac55080, .magic: dead4ead, .owner: sample_multi_tr/2093, .owner_cpu: 2 [ 264.210155] CPU: 2 PID: 2093 Comm: sample_multi_tr Tainted: G U [ 264.210158] Hardware name: Intel Corporation CoffeeLake Client Platform/CoffeeLake S UDIMM RVP, BIOS CNLSFWR1.R00.X212.B01.1909060036 09/06/2019 [ 264.210160] Call Trace: [ 264.210167] dump_stack+0x98/0xda [ 264.210174] spin_dump.cold+0x24/0x3c [ 264.210178] do_raw_spin_lock+0x9a/0xd0 [ 264.210184] _raw_spin_lock_nested+0x6a/0x70 [ 264.210314] __i915_request_submit+0x10a/0x3c0 [i915] [ 264.210415] virtual_submit_request+0x9b/0x380 [i915] [ 264.210516] submit_notify+0xaf/0x14c [i915] [ 264.210602] __i915_sw_fence_complete+0x8a/0x230 [i915] [ 264.210692] i915_sw_fence_complete+0x2d/0x40 [i915] [ 264.210762] __dma_i915_sw_fence_wake+0x19/0x30 [i915] [ 264.210767] dma_fence_signal_locked+0xb1/0x1c0 [ 264.210772] dma_fence_signal+0x29/0x50 [ 264.210871] i915_request_wait+0x5cb/0x830 [i915] [ 264.210876] ? dma_resv_get_fences_rcu+0x294/0x5d0 [ 264.210974] i915_gem_object_wait_fence+0x2f/0x40 [i915] [ 264.211084] i915_gem_object_wait+0xce/0x400 [i915] [ 264.211178] i915_gem_wait_ioctl+0xff/0x290 [i915] Fixes: 22b7a426bbe1 ("drm/i915/execlists: Preempt-to-busy") References: 6d06779e8672 ("drm/i915: Load balancing across a virtual engine") Signed-off-by: Chris Wilson Cc: Tvrtko Ursulin Cc: "Nayana, Venkata Ramana" Cc: # v5.4+ Reviewed-by: Tvrtko Ursulin Link: https://patchwork.freedesktop.org/patch/msgid/20200713141636.29326-1-chris@chris-wilson.co.uk --- drivers/gpu/drm/i915/gt/intel_breadcrumbs.c | 7 ++++++- drivers/gpu/drm/i915/i915_request.c | 23 +++++++++++++---------- 2 files changed, 19 insertions(+), 11 deletions(-) diff --git a/drivers/gpu/drm/i915/gt/intel_breadcrumbs.c b/drivers/gpu/drm/i915/gt/intel_breadcrumbs.c index d907d538176e..91786310c114 100644 --- a/drivers/gpu/drm/i915/gt/intel_breadcrumbs.c +++ b/drivers/gpu/drm/i915/gt/intel_breadcrumbs.c @@ -314,13 +314,18 @@ bool i915_request_enable_breadcrumb(struct i915_request *rq) { lockdep_assert_held(&rq->lock); + if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &rq->fence.flags)) + return true; + if (test_bit(I915_FENCE_FLAG_ACTIVE, &rq->fence.flags)) { struct intel_breadcrumbs *b = &rq->engine->breadcrumbs; struct intel_context *ce = rq->context; struct list_head *pos; spin_lock(&b->irq_lock); - GEM_BUG_ON(test_bit(I915_FENCE_FLAG_SIGNAL, &rq->fence.flags)); + + if (test_bit(I915_FENCE_FLAG_SIGNAL, &rq->fence.flags)) + goto unlock; if (!__intel_breadcrumbs_arm_irq(b)) goto unlock; diff --git a/drivers/gpu/drm/i915/i915_request.c b/drivers/gpu/drm/i915/i915_request.c index 3bb7320249ae..0b2fe55e6194 100644 --- a/drivers/gpu/drm/i915/i915_request.c +++ b/drivers/gpu/drm/i915/i915_request.c @@ -560,22 +560,25 @@ bool __i915_request_submit(struct i915_request *request) engine->serial++; result = true; -xfer: /* We may be recursing from the signal callback of another i915 fence */ - spin_lock_nested(&request->lock, SINGLE_DEPTH_NESTING); - +xfer: if (!test_and_set_bit(I915_FENCE_FLAG_ACTIVE, &request->fence.flags)) { list_move_tail(&request->sched.link, &engine->active.requests); clear_bit(I915_FENCE_FLAG_PQUEUE, &request->fence.flags); - __notify_execute_cb(request); } - GEM_BUG_ON(!llist_empty(&request->execute_cb)); - if (test_bit(DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT, &request->fence.flags) && - !test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &request->fence.flags) && - !i915_request_enable_breadcrumb(request)) - intel_engine_signal_breadcrumbs(engine); + /* We may be recursing from the signal callback of another i915 fence */ + if (!i915_request_signaled(request)) { + spin_lock_nested(&request->lock, SINGLE_DEPTH_NESTING); + + __notify_execute_cb(request); + if (test_bit(DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT, + &request->fence.flags) && + !i915_request_enable_breadcrumb(request)) + intel_engine_signal_breadcrumbs(engine); - spin_unlock(&request->lock); + spin_unlock(&request->lock); + GEM_BUG_ON(!llist_empty(&request->execute_cb)); + } return result; } -- cgit v1.2.3 From 1c26b8e09004477c070f0dccb2369438ef9e1fc9 Mon Sep 17 00:00:00 2001 From: Lyude Paul Date: Mon, 13 Jul 2020 13:07:45 -0400 Subject: drm/probe_helper: Add drm_connector_helper_funcs.mode_valid_ctx This is just an atomic version of mode_valid, which is intended to be used for situations where a driver might need to check the atomic state of objects other than the connector itself. One such example is with MST, where the maximum possible bandwidth on a connector can change dynamically irregardless of the display configuration. Changes since v1: * Use new drm logging functions * Make some corrections in the mode_valid_ctx kdoc * Return error codes or 0 from ->mode_valid_ctx() on fail, and store the drm_mode_status in an additional function parameter Changes since v2: * Don't accidentally assign ret to mode->status on success, or we'll squash legitimate mode validation results * Don't forget to assign MODE_OK to status in drm_connector_mode_valid() if we have no callbacks * Drop leftover hunk in drm_modes.h around enum drm_mode_status Changes since v3: * s/return ret/return 0/ in drm_mode_validate_pipeline() * Minor cleanup in drm_connector_mode_valid() Tested-by: Imre Deak Reviewed-by: Imre Deak Cc: Lee Shawn C Signed-off-by: Lyude Paul Link: https://patchwork.freedesktop.org/patch/msgid/20200713170746.254388-2-lyude@redhat.com --- drivers/gpu/drm/drm_crtc_helper_internal.h | 7 ++- drivers/gpu/drm/drm_probe_helper.c | 97 ++++++++++++++++++++---------- include/drm/drm_modeset_helper_vtables.h | 42 +++++++++++++ 3 files changed, 111 insertions(+), 35 deletions(-) diff --git a/drivers/gpu/drm/drm_crtc_helper_internal.h b/drivers/gpu/drm/drm_crtc_helper_internal.h index f0a66ef47e5a..25ce42e79995 100644 --- a/drivers/gpu/drm/drm_crtc_helper_internal.h +++ b/drivers/gpu/drm/drm_crtc_helper_internal.h @@ -73,8 +73,11 @@ enum drm_mode_status drm_crtc_mode_valid(struct drm_crtc *crtc, const struct drm_display_mode *mode); enum drm_mode_status drm_encoder_mode_valid(struct drm_encoder *encoder, const struct drm_display_mode *mode); -enum drm_mode_status drm_connector_mode_valid(struct drm_connector *connector, - struct drm_display_mode *mode); +int +drm_connector_mode_valid(struct drm_connector *connector, + struct drm_display_mode *mode, + struct drm_modeset_acquire_ctx *ctx, + enum drm_mode_status *status); struct drm_encoder * drm_connector_get_single_encoder(struct drm_connector *connector); diff --git a/drivers/gpu/drm/drm_probe_helper.c b/drivers/gpu/drm/drm_probe_helper.c index 26e997f1524f..601a4f25bb47 100644 --- a/drivers/gpu/drm/drm_probe_helper.c +++ b/drivers/gpu/drm/drm_probe_helper.c @@ -86,17 +86,19 @@ drm_mode_validate_flag(const struct drm_display_mode *mode, return MODE_OK; } -static enum drm_mode_status +static int drm_mode_validate_pipeline(struct drm_display_mode *mode, - struct drm_connector *connector) + struct drm_connector *connector, + struct drm_modeset_acquire_ctx *ctx, + enum drm_mode_status *status) { struct drm_device *dev = connector->dev; - enum drm_mode_status ret = MODE_OK; struct drm_encoder *encoder; + int ret; /* Step 1: Validate against connector */ - ret = drm_connector_mode_valid(connector, mode); - if (ret != MODE_OK) + ret = drm_connector_mode_valid(connector, mode, ctx, status); + if (ret || *status != MODE_OK) return ret; /* Step 2: Validate against encoders and crtcs */ @@ -104,8 +106,8 @@ drm_mode_validate_pipeline(struct drm_display_mode *mode, struct drm_bridge *bridge; struct drm_crtc *crtc; - ret = drm_encoder_mode_valid(encoder, mode); - if (ret != MODE_OK) { + *status = drm_encoder_mode_valid(encoder, mode); + if (*status != MODE_OK) { /* No point in continuing for crtc check as this encoder * will not accept the mode anyway. If all encoders * reject the mode then, at exit, ret will not be @@ -114,8 +116,8 @@ drm_mode_validate_pipeline(struct drm_display_mode *mode, } bridge = drm_bridge_chain_get_first_bridge(encoder); - ret = drm_bridge_chain_mode_valid(bridge, mode); - if (ret != MODE_OK) { + *status = drm_bridge_chain_mode_valid(bridge, mode); + if (*status != MODE_OK) { /* There is also no point in continuing for crtc check * here. */ continue; @@ -125,17 +127,17 @@ drm_mode_validate_pipeline(struct drm_display_mode *mode, if (!drm_encoder_crtc_ok(encoder, crtc)) continue; - ret = drm_crtc_mode_valid(crtc, mode); - if (ret == MODE_OK) { + *status = drm_crtc_mode_valid(crtc, mode); + if (*status == MODE_OK) { /* If we get to this point there is at least * one combination of encoder+crtc that works * for this mode. Lets return now. */ - return ret; + return 0; } } } - return ret; + return 0; } static int drm_helper_probe_add_cmdline_mode(struct drm_connector *connector) @@ -196,16 +198,27 @@ enum drm_mode_status drm_encoder_mode_valid(struct drm_encoder *encoder, return encoder_funcs->mode_valid(encoder, mode); } -enum drm_mode_status drm_connector_mode_valid(struct drm_connector *connector, - struct drm_display_mode *mode) +int +drm_connector_mode_valid(struct drm_connector *connector, + struct drm_display_mode *mode, + struct drm_modeset_acquire_ctx *ctx, + enum drm_mode_status *status) { const struct drm_connector_helper_funcs *connector_funcs = connector->helper_private; + int ret = 0; + + if (!connector_funcs) + *status = MODE_OK; + else if (connector_funcs->mode_valid_ctx) + ret = connector_funcs->mode_valid_ctx(connector, mode, ctx, + status); + else if (connector_funcs->mode_valid) + *status = connector_funcs->mode_valid(connector, mode); + else + *status = MODE_OK; - if (!connector_funcs || !connector_funcs->mode_valid) - return MODE_OK; - - return connector_funcs->mode_valid(connector, mode); + return ret; } #define DRM_OUTPUT_POLL_PERIOD (10*HZ) @@ -375,8 +388,9 @@ EXPORT_SYMBOL(drm_helper_probe_detect); * (if specified) * - drm_mode_validate_flag() checks the modes against basic connector * capabilities (interlace_allowed,doublescan_allowed,stereo_allowed) - * - the optional &drm_connector_helper_funcs.mode_valid helper can perform - * driver and/or sink specific checks + * - the optional &drm_connector_helper_funcs.mode_valid or + * &drm_connector_helper_funcs.mode_valid_ctx helpers can perform driver + * and/or sink specific checks * - the optional &drm_crtc_helper_funcs.mode_valid, * &drm_bridge_funcs.mode_valid and &drm_encoder_helper_funcs.mode_valid * helpers can perform driver and/or source specific checks which are also @@ -507,22 +521,39 @@ retry: mode_flags |= DRM_MODE_FLAG_3D_MASK; list_for_each_entry(mode, &connector->modes, head) { - if (mode->status == MODE_OK) - mode->status = drm_mode_validate_driver(dev, mode); + if (mode->status != MODE_OK) + continue; + + mode->status = drm_mode_validate_driver(dev, mode); + if (mode->status != MODE_OK) + continue; - if (mode->status == MODE_OK) - mode->status = drm_mode_validate_size(mode, maxX, maxY); + mode->status = drm_mode_validate_size(mode, maxX, maxY); + if (mode->status != MODE_OK) + continue; - if (mode->status == MODE_OK) - mode->status = drm_mode_validate_flag(mode, mode_flags); + mode->status = drm_mode_validate_flag(mode, mode_flags); + if (mode->status != MODE_OK) + continue; - if (mode->status == MODE_OK) - mode->status = drm_mode_validate_pipeline(mode, - connector); + ret = drm_mode_validate_pipeline(mode, connector, &ctx, + &mode->status); + if (ret) { + drm_dbg_kms(dev, + "drm_mode_validate_pipeline failed: %d\n", + ret); + + if (drm_WARN_ON_ONCE(dev, ret != -EDEADLK)) { + mode->status = MODE_ERROR; + } else { + drm_modeset_backoff(&ctx); + goto retry; + } + } - if (mode->status == MODE_OK) - mode->status = drm_mode_validate_ycbcr420(mode, - connector); + if (mode->status != MODE_OK) + continue; + mode->status = drm_mode_validate_ycbcr420(mode, connector); } prune: diff --git a/include/drm/drm_modeset_helper_vtables.h b/include/drm/drm_modeset_helper_vtables.h index 421a30f08463..4efec30f8bad 100644 --- a/include/drm/drm_modeset_helper_vtables.h +++ b/include/drm/drm_modeset_helper_vtables.h @@ -968,6 +968,48 @@ struct drm_connector_helper_funcs { */ enum drm_mode_status (*mode_valid)(struct drm_connector *connector, struct drm_display_mode *mode); + + /** + * @mode_valid_ctx: + * + * Callback to validate a mode for a connector, irrespective of the + * specific display configuration. + * + * This callback is used by the probe helpers to filter the mode list + * (which is usually derived from the EDID data block from the sink). + * See e.g. drm_helper_probe_single_connector_modes(). + * + * This function is optional, and is the atomic version of + * &drm_connector_helper_funcs.mode_valid. + * + * To allow for accessing the atomic state of modesetting objects, the + * helper libraries always call this with ctx set to a valid context, + * and &drm_mode_config.connection_mutex will always be locked with + * the ctx parameter set to @ctx. This allows for taking additional + * locks as required. + * + * Even though additional locks may be acquired, this callback is + * still expected not to take any constraints into account which would + * be influenced by the currently set display state - such constraints + * should be handled in the driver's atomic check. For example, if a + * connector shares display bandwidth with other connectors then it + * would be ok to validate the minimum bandwidth requirement of a mode + * against the maximum possible bandwidth of the connector. But it + * wouldn't be ok to take the current bandwidth usage of other + * connectors into account, as this would change depending on the + * display state. + * + * Returns: + * 0 if &drm_connector_helper_funcs.mode_valid_ctx succeeded and wrote + * the &enum drm_mode_status value to @status, or a negative error + * code otherwise. + * + */ + int (*mode_valid_ctx)(struct drm_connector *connector, + struct drm_display_mode *mode, + struct drm_modeset_acquire_ctx *ctx, + enum drm_mode_status *status); + /** * @best_encoder: * -- cgit v1.2.3 From e398d7c126c10eb40f87edb6d4ce6dea6c123e7f Mon Sep 17 00:00:00 2001 From: Lee Shawn C Date: Mon, 13 Jul 2020 13:07:46 -0400 Subject: drm/i915/mst: filter out the display mode exceed sink's capability So far, max dot clock rate for MST mode rely on physcial bandwidth limitation. It would caused compatibility issue if source display resolution exceed MST hub output ability. For example, source DUT had DP 1.2 output capability. And MST docking just support HDMI 1.4 spec. When a HDMI 2.0 monitor connected. Source would retrieve EDID from external and get max resolution 4k@60fps. DP 1.2 can support 4K@60fps because it did not surpass DP physical bandwidth limitation. Do modeset to 4k@60fps, source output display data but MST docking can't output HDMI properly due to this resolution already over HDMI 1.4 spec. Refer to commit ("drm/dp_mst: Use full_pbn instead of available_pbn for bandwidth checks"). Source driver should refer to full_pbn to evaluate sink output capability. And filter out the resolution surpass sink output limitation. Changes since v1: * Using mgr->base.lock to protect full_pbn. Changes since v2: * Add ctx lock. Changes since v3: * s/intel_dp_mst_mode_clock_exceed_pbn_bandwidth/ intel_dp_mst_mode_clock_exceeds_pbn_bw/ * Use the new drm_connector_helper_funcs.mode_valid_ctx to properly pipe down the drm_modeset_acquire_ctx that the probe helpers are using, so we can safely grab &mgr->base.lock without deadlocking Changes since v4: * Move drm_dp_calc_pbn_mode(mode->clock, bpp, false) > port->full_pbn check * Fix the bpp we use in drm_dp_calc_pbn_mode() * Drop leftover (!mgr) check * Don't check for if full_pbn is unset. To be clear - it _can_ be unset, but if it is then it's certainly a bug in DRM or a non-compliant sink as full_pbn should always be populated by the time we call ->mode_valid_ctx. We should workaround non-compliant sinks with full_pbn=0, but that should happen in the DP MST helpers so we can estimate the full_pbn value as best we can. Tested-by: Imre Deak Reviewed-by: Imre Deak Cc: Manasi Navare Cc: Jani Nikula Cc: Ville Syrjala Cc: Cooper Chiou Co-developed-by: Lyude Paul Signed-off-by: Lee Shawn C Signed-off-by: Lyude Paul Link: https://patchwork.freedesktop.org/patch/msgid/20200713170746.254388-3-lyude@redhat.com --- drivers/gpu/drm/i915/display/intel_dp_mst.c | 55 ++++++++++++++++++++--------- 1 file changed, 38 insertions(+), 17 deletions(-) diff --git a/drivers/gpu/drm/i915/display/intel_dp_mst.c b/drivers/gpu/drm/i915/display/intel_dp_mst.c index bdc19b04b2c1..a2d91a499700 100644 --- a/drivers/gpu/drm/i915/display/intel_dp_mst.c +++ b/drivers/gpu/drm/i915/display/intel_dp_mst.c @@ -639,39 +639,60 @@ static int intel_dp_mst_get_modes(struct drm_connector *connector) return intel_dp_mst_get_ddc_modes(connector); } -static enum drm_mode_status -intel_dp_mst_mode_valid(struct drm_connector *connector, - struct drm_display_mode *mode) +static int +intel_dp_mst_mode_valid_ctx(struct drm_connector *connector, + struct drm_display_mode *mode, + struct drm_modeset_acquire_ctx *ctx, + enum drm_mode_status *status) { struct drm_i915_private *dev_priv = to_i915(connector->dev); struct intel_connector *intel_connector = to_intel_connector(connector); struct intel_dp *intel_dp = intel_connector->mst_port; + struct drm_dp_mst_topology_mgr *mgr = &intel_dp->mst_mgr; + struct drm_dp_mst_port *port = intel_connector->port; + const int min_bpp = 18; int max_dotclk = to_i915(connector->dev)->max_dotclk_freq; int max_rate, mode_rate, max_lanes, max_link_clock; + int ret; - if (drm_connector_is_unregistered(connector)) - return MODE_ERROR; + if (drm_connector_is_unregistered(connector)) { + *status = MODE_ERROR; + return 0; + } - if (mode->flags & DRM_MODE_FLAG_DBLSCAN) - return MODE_NO_DBLESCAN; + if (mode->flags & DRM_MODE_FLAG_DBLSCAN) { + *status = MODE_NO_DBLESCAN; + return 0; + } max_link_clock = intel_dp_max_link_rate(intel_dp); max_lanes = intel_dp_max_lane_count(intel_dp); max_rate = intel_dp_max_data_rate(max_link_clock, max_lanes); - mode_rate = intel_dp_link_required(mode->clock, 18); + mode_rate = intel_dp_link_required(mode->clock, min_bpp); - /* TODO - validate mode against available PBN for link */ - if (mode->clock < 10000) - return MODE_CLOCK_LOW; + ret = drm_modeset_lock(&mgr->base.lock, ctx); + if (ret) + return ret; - if (mode->flags & DRM_MODE_FLAG_DBLCLK) - return MODE_H_ILLEGAL; + if (mode_rate > max_rate || mode->clock > max_dotclk || + drm_dp_calc_pbn_mode(mode->clock, min_bpp, false) > port->full_pbn) { + *status = MODE_CLOCK_HIGH; + return 0; + } + + if (mode->clock < 10000) { + *status = MODE_CLOCK_LOW; + return 0; + } - if (mode_rate > max_rate || mode->clock > max_dotclk) - return MODE_CLOCK_HIGH; + if (mode->flags & DRM_MODE_FLAG_DBLCLK) { + *status = MODE_H_ILLEGAL; + return 0; + } - return intel_mode_valid_max_plane_size(dev_priv, mode); + *status = intel_mode_valid_max_plane_size(dev_priv, mode); + return 0; } static struct drm_encoder *intel_mst_atomic_best_encoder(struct drm_connector *connector, @@ -700,7 +721,7 @@ intel_dp_mst_detect(struct drm_connector *connector, static const struct drm_connector_helper_funcs intel_dp_mst_connector_helper_funcs = { .get_modes = intel_dp_mst_get_modes, - .mode_valid = intel_dp_mst_mode_valid, + .mode_valid_ctx = intel_dp_mst_mode_valid_ctx, .atomic_best_encoder = intel_mst_atomic_best_encoder, .atomic_check = intel_dp_mst_atomic_check, .detect_ctx = intel_dp_mst_detect, -- cgit v1.2.3 From a133c6988f7080fb8baf86ecf5161065bec65fb6 Mon Sep 17 00:00:00 2001 From: Ville Syrjälä Date: Thu, 9 Jul 2020 17:58:45 +0300 Subject: drm/i915: WARN if max vswing/pre-emphasis violates the DP spec MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit According to the DP spec a DPTX must support vswing/pre-emphasis up to and including level 2. Level 3 is optional (actually DP 1.4a seems to make even level 3 mandatory for HBR2/3, while leaving it optional for RBR/HBR1). WARN if out encoders' .voltage_max()/.preemph_max() return an illegal value. Signed-off-by: Ville Syrjälä Reviewed-by: José Roberto de Souza Signed-off-by: José Roberto de Souza Link: https://patchwork.freedesktop.org/patch/msgid/20200709145845.18118-1-ville.syrjala@linux.intel.com --- drivers/gpu/drm/i915/display/intel_dp_link_training.c | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/drivers/gpu/drm/i915/display/intel_dp_link_training.c b/drivers/gpu/drm/i915/display/intel_dp_link_training.c index 2493142a70e9..a23ed7290843 100644 --- a/drivers/gpu/drm/i915/display/intel_dp_link_training.c +++ b/drivers/gpu/drm/i915/display/intel_dp_link_training.c @@ -52,6 +52,7 @@ static u8 dp_voltage_max(u8 preemph) void intel_dp_get_adjust_train(struct intel_dp *intel_dp, const u8 link_status[DP_LINK_STATUS_SIZE]) { + struct drm_i915_private *i915 = dp_to_i915(intel_dp); u8 v = 0; u8 p = 0; int lane; @@ -64,12 +65,20 @@ void intel_dp_get_adjust_train(struct intel_dp *intel_dp, } preemph_max = intel_dp->preemph_max(intel_dp); + drm_WARN_ON_ONCE(&i915->drm, + preemph_max != DP_TRAIN_PRE_EMPH_LEVEL_2 && + preemph_max != DP_TRAIN_PRE_EMPH_LEVEL_3); + if (p >= preemph_max) p = preemph_max | DP_TRAIN_MAX_PRE_EMPHASIS_REACHED; v = min(v, dp_voltage_max(p)); voltage_max = intel_dp->voltage_max(intel_dp); + drm_WARN_ON_ONCE(&i915->drm, + voltage_max != DP_TRAIN_VOLTAGE_SWING_LEVEL_2 && + voltage_max != DP_TRAIN_VOLTAGE_SWING_LEVEL_3); + if (v >= voltage_max) v = voltage_max | DP_TRAIN_MAX_SWING_REACHED; -- cgit v1.2.3 From 2ffcfd8def00265c2f1f9fc711104fcd656101f9 Mon Sep 17 00:00:00 2001 From: Stuart Summers Date: Mon, 13 Jul 2020 11:23:16 -0700 Subject: drm/i915: Add has_master_unit_irq flag MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Add flag to differentiate platforms with and without the master IRQ control bit. Signed-off-by: Stuart Summers Signed-off-by: Lucas De Marchi Reviewed-by: José Roberto de Souza Link: https://patchwork.freedesktop.org/patch/msgid/20200713182321.12390-1-lucas.demarchi@intel.com --- drivers/gpu/drm/i915/i915_drv.h | 2 ++ drivers/gpu/drm/i915/intel_device_info.h | 1 + 2 files changed, 3 insertions(+) diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index 87973dedf8e7..d357043db44e 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h @@ -1599,6 +1599,8 @@ IS_SUBPLATFORM(const struct drm_i915_private *i915, #define HAS_LOGICAL_RING_PREEMPTION(dev_priv) \ (INTEL_INFO(dev_priv)->has_logical_ring_preemption) +#define HAS_MASTER_UNIT_IRQ(dev_priv) (INTEL_INFO(dev_priv)->has_master_unit_irq) + #define HAS_EXECLISTS(dev_priv) HAS_LOGICAL_RING_CONTEXTS(dev_priv) #define INTEL_PPGTT(dev_priv) (INTEL_INFO(dev_priv)->ppgtt_type) diff --git a/drivers/gpu/drm/i915/intel_device_info.h b/drivers/gpu/drm/i915/intel_device_info.h index 242d00862b1a..71edb396b31a 100644 --- a/drivers/gpu/drm/i915/intel_device_info.h +++ b/drivers/gpu/drm/i915/intel_device_info.h @@ -122,6 +122,7 @@ enum intel_ppgtt_type { func(has_logical_ring_contexts); \ func(has_logical_ring_elsq); \ func(has_logical_ring_preemption); \ + func(has_master_unit_irq); \ func(has_pooled_eu); \ func(has_rc6); \ func(has_rc6p); \ -- cgit v1.2.3 From 05e265841f7eb87f84308935d78e8c18fa2a76ce Mon Sep 17 00:00:00 2001 From: Abdiel Janulgue Date: Mon, 13 Jul 2020 11:23:17 -0700 Subject: drm/i915/dg1: add initial DG-1 definitions MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Bspec: 33617, 33617 v2: s/intel_dg1_info/dg1_info/ as done for other platforms before and try to shut up compiler about ununsed variable that we know shouldn't be used (Lucas) v3: replace explicit attribute with __maybe_unused (Lucas) Cc: José Roberto de Souza Cc: Daniele Ceraolo Spurio Cc: Stuart Summers Cc: Vanshidhar Konda Cc: Lucas De Marchi Cc: Aravind Iddamsetty Cc: Matt Roper Signed-off-by: Abdiel Janulgue Signed-off-by: Lucas De Marchi Reviewed-by: José Roberto de Souza Link: https://patchwork.freedesktop.org/patch/msgid/20200713182321.12390-2-lucas.demarchi@intel.com --- drivers/gpu/drm/i915/i915_drv.h | 7 +++++++ drivers/gpu/drm/i915/i915_pci.c | 12 ++++++++++++ drivers/gpu/drm/i915/intel_device_info.c | 1 + drivers/gpu/drm/i915/intel_device_info.h | 1 + 4 files changed, 21 insertions(+) diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index d357043db44e..7f668a49b03d 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h @@ -1431,6 +1431,7 @@ IS_SUBPLATFORM(const struct drm_i915_private *i915, #define IS_ELKHARTLAKE(dev_priv) IS_PLATFORM(dev_priv, INTEL_ELKHARTLAKE) #define IS_TIGERLAKE(dev_priv) IS_PLATFORM(dev_priv, INTEL_TIGERLAKE) #define IS_ROCKETLAKE(dev_priv) IS_PLATFORM(dev_priv, INTEL_ROCKETLAKE) +#define IS_DG1(dev_priv) IS_PLATFORM(dev_priv, INTEL_DG1) #define IS_HSW_EARLY_SDV(dev_priv) (IS_HASWELL(dev_priv) && \ (INTEL_DEVID(dev_priv) & 0xFF00) == 0x0C00) #define IS_BDW_ULT(dev_priv) \ @@ -1559,6 +1560,12 @@ IS_SUBPLATFORM(const struct drm_i915_private *i915, #define IS_RKL_REVID(p, since, until) \ (IS_ROCKETLAKE(p) && IS_REVID(p, since, until)) +#define DG1_REVID_A0 0x0 +#define DG1_REVID_B0 0x1 + +#define IS_DG1_REVID(p, since, until) \ + (IS_DG1(p) && IS_REVID(p, since, until)) + #define IS_LP(dev_priv) (INTEL_INFO(dev_priv)->is_lp) #define IS_GEN9_LP(dev_priv) (IS_GEN(dev_priv, 9) && IS_LP(dev_priv)) #define IS_GEN9_BC(dev_priv) (IS_GEN(dev_priv, 9) && !IS_LP(dev_priv)) diff --git a/drivers/gpu/drm/i915/i915_pci.c b/drivers/gpu/drm/i915/i915_pci.c index db916fff3f0d..2338f92ce490 100644 --- a/drivers/gpu/drm/i915/i915_pci.c +++ b/drivers/gpu/drm/i915/i915_pci.c @@ -897,8 +897,20 @@ static const struct intel_device_info rkl_info = { #define GEN12_DGFX_FEATURES \ GEN12_FEATURES, \ + .memory_regions = REGION_SMEM | REGION_LMEM, \ + .has_master_unit_irq = 1, \ .is_dgfx = 1 +static const struct intel_device_info dg1_info __maybe_unused = { + GEN12_DGFX_FEATURES, + PLATFORM(INTEL_DG1), + .pipe_mask = BIT(PIPE_A) | BIT(PIPE_B) | BIT(PIPE_C) | BIT(PIPE_D), + .require_force_probe = 1, + .platform_engine_mask = + BIT(RCS0) | BIT(BCS0) | BIT(VECS0) | + BIT(VCS0) | BIT(VCS2), +}; + #undef GEN #undef PLATFORM diff --git a/drivers/gpu/drm/i915/intel_device_info.c b/drivers/gpu/drm/i915/intel_device_info.c index 3f5dc37d2b7c..40c590db3c76 100644 --- a/drivers/gpu/drm/i915/intel_device_info.c +++ b/drivers/gpu/drm/i915/intel_device_info.c @@ -64,6 +64,7 @@ static const char * const platform_names[] = { PLATFORM_NAME(ELKHARTLAKE), PLATFORM_NAME(TIGERLAKE), PLATFORM_NAME(ROCKETLAKE), + PLATFORM_NAME(DG1), }; #undef PLATFORM_NAME diff --git a/drivers/gpu/drm/i915/intel_device_info.h b/drivers/gpu/drm/i915/intel_device_info.h index 71edb396b31a..fd2385457ab6 100644 --- a/drivers/gpu/drm/i915/intel_device_info.h +++ b/drivers/gpu/drm/i915/intel_device_info.h @@ -82,6 +82,7 @@ enum intel_platform { /* gen12 */ INTEL_TIGERLAKE, INTEL_ROCKETLAKE, + INTEL_DG1, INTEL_MAX_PLATFORMS }; -- cgit v1.2.3 From fd38cdb8110533f7c7150e79f18b716fd2981e8c Mon Sep 17 00:00:00 2001 From: Abdiel Janulgue Date: Mon, 13 Jul 2020 11:23:18 -0700 Subject: drm/i915/dg1: Add DG1 PCI IDs MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Add the PCI ID for DG1, but keep it out of the table we use to register the driver. At this point we can't consider the driver ready to bind to the device since we basically miss support for everything. When more support is merged we can enable it to work partially for example as a display-only driver. v2: remove DG1 from the pci table and reword commit message (Lucas) Bspec: 44463 Cc: Matthew Auld Cc: James Ausmus Cc: Joonas Lahtinen Cc: Matt Roper Signed-off-by: Abdiel Janulgue Signed-off-by: Lucas De Marchi Reviewed-by: José Roberto de Souza # v1 Acked-by: Daniel Vetter Link: https://patchwork.freedesktop.org/patch/msgid/20200713182321.12390-3-lucas.demarchi@intel.com --- include/drm/i915_pciids.h | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/include/drm/i915_pciids.h b/include/drm/i915_pciids.h index d6cb28992ba0..96e408b4bdc9 100644 --- a/include/drm/i915_pciids.h +++ b/include/drm/i915_pciids.h @@ -618,4 +618,8 @@ INTEL_VGA_DEVICE(0x4C90, info), \ INTEL_VGA_DEVICE(0x4C9A, info) +/* DG1 */ +#define INTEL_DG1_IDS(info) \ + INTEL_VGA_DEVICE(0x4905, info) + #endif /* _I915_PCIIDS_H */ -- cgit v1.2.3 From 97b492f5f98357c5238d74b1070d42c5fe4ac75d Mon Sep 17 00:00:00 2001 From: Lucas De Marchi Date: Mon, 13 Jul 2020 11:23:19 -0700 Subject: drm/i915/dg1: add support for the master unit interrupt MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit DG1 has master unit interrupt register which is used to indicate the correct source of interrupt. v2: fix coding style on register definition Cc: Radhakrishna Sripada Cc: Daniele Spurio Ceraolo Cc: Matt Roper Signed-off-by: Lucas De Marchi Reviewed-by: José Roberto de Souza Link: https://patchwork.freedesktop.org/patch/msgid/20200713182321.12390-4-lucas.demarchi@intel.com --- drivers/gpu/drm/i915/i915_debugfs.c | 4 +++ drivers/gpu/drm/i915/i915_irq.c | 56 +++++++++++++++++++++++++++++++++++-- drivers/gpu/drm/i915/i915_reg.h | 4 +++ 3 files changed, 61 insertions(+), 3 deletions(-) diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c index 78ebede51fb3..784219962193 100644 --- a/drivers/gpu/drm/i915/i915_debugfs.c +++ b/drivers/gpu/drm/i915/i915_debugfs.c @@ -495,6 +495,10 @@ static int i915_interrupt_info(struct seq_file *m, void *data) seq_printf(m, "PCU interrupt enable:\t%08x\n", I915_READ(GEN8_PCU_IER)); } else if (INTEL_GEN(dev_priv) >= 11) { + if (HAS_MASTER_UNIT_IRQ(dev_priv)) + seq_printf(m, "Master Unit Interrupt Control: %08x\n", + I915_READ(DG1_MSTR_UNIT_INTR)); + seq_printf(m, "Master Interrupt Control: %08x\n", I915_READ(GEN11_GFX_MSTR_IRQ)); diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c index 562b43ed077f..4c9d0a4a2476 100644 --- a/drivers/gpu/drm/i915/i915_irq.c +++ b/drivers/gpu/drm/i915/i915_irq.c @@ -2584,6 +2584,46 @@ static irqreturn_t gen11_irq_handler(int irq, void *arg) gen11_master_intr_enable); } +static u32 dg1_master_intr_disable_and_ack(void __iomem * const regs) +{ + u32 val; + + /* First disable interrupts */ + raw_reg_write(regs, DG1_MSTR_UNIT_INTR, 0); + + /* Get the indication levels and ack the master unit */ + val = raw_reg_read(regs, DG1_MSTR_UNIT_INTR); + if (unlikely(!val)) + return 0; + + raw_reg_write(regs, DG1_MSTR_UNIT_INTR, val); + + /* + * Now with master disabled, get a sample of level indications + * for this interrupt and ack them right away - we keep GEN11_MASTER_IRQ + * out as this bit doesn't exist anymore for DG1 + */ + val = raw_reg_read(regs, GEN11_GFX_MSTR_IRQ) & ~GEN11_MASTER_IRQ; + if (unlikely(!val)) + return 0; + + raw_reg_write(regs, GEN11_GFX_MSTR_IRQ, val); + + return val; +} + +static inline void dg1_master_intr_enable(void __iomem * const regs) +{ + raw_reg_write(regs, DG1_MSTR_UNIT_INTR, DG1_MSTR_IRQ); +} + +static irqreturn_t dg1_irq_handler(int irq, void *arg) +{ + return __gen11_irq_handler(arg, + dg1_master_intr_disable_and_ack, + dg1_master_intr_enable); +} + /* Called from drm generic code, passed 'crtc' which * we use as a pipe index */ @@ -2920,7 +2960,10 @@ static void gen11_irq_reset(struct drm_i915_private *dev_priv) { struct intel_uncore *uncore = &dev_priv->uncore; - gen11_master_intr_disable(dev_priv->uncore.regs); + if (HAS_MASTER_UNIT_IRQ(dev_priv)) + dg1_master_intr_disable_and_ack(dev_priv->uncore.regs); + else + gen11_master_intr_disable(dev_priv->uncore.regs); gen11_gt_irq_reset(&dev_priv->gt); gen11_display_irq_reset(dev_priv); @@ -3517,8 +3560,13 @@ static void gen11_irq_postinstall(struct drm_i915_private *dev_priv) I915_WRITE(GEN11_DISPLAY_INT_CTL, GEN11_DISPLAY_IRQ_ENABLE); - gen11_master_intr_enable(uncore->regs); - POSTING_READ(GEN11_GFX_MSTR_IRQ); + if (HAS_MASTER_UNIT_IRQ(dev_priv)) { + dg1_master_intr_enable(uncore->regs); + POSTING_READ(DG1_MSTR_UNIT_INTR); + } else { + gen11_master_intr_enable(uncore->regs); + POSTING_READ(GEN11_GFX_MSTR_IRQ); + } } static void cherryview_irq_postinstall(struct drm_i915_private *dev_priv) @@ -4043,6 +4091,8 @@ static irq_handler_t intel_irq_handler(struct drm_i915_private *dev_priv) else return i8xx_irq_handler; } else { + if (HAS_MASTER_UNIT_IRQ(dev_priv)) + return dg1_irq_handler; if (INTEL_GEN(dev_priv) >= 11) return gen11_irq_handler; else if (INTEL_GEN(dev_priv) >= 8) diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h index 86a23ced051b..4e796ff4d7d0 100644 --- a/drivers/gpu/drm/i915/i915_reg.h +++ b/drivers/gpu/drm/i915/i915_reg.h @@ -7733,6 +7733,10 @@ enum { #define GEN11_GT_DW1_IRQ (1 << 1) #define GEN11_GT_DW0_IRQ (1 << 0) +#define DG1_MSTR_UNIT_INTR _MMIO(0x190008) +#define DG1_MSTR_IRQ REG_BIT(31) +#define DG1_MSTR_UNIT(u) REG_BIT(u) + #define GEN11_DISPLAY_INT_CTL _MMIO(0x44200) #define GEN11_DISPLAY_IRQ_ENABLE (1 << 31) #define GEN11_AUDIO_CODEC_IRQ (1 << 24) -- cgit v1.2.3 From f619e51672e867f86433b63c896b30b39a4ca8a0 Mon Sep 17 00:00:00 2001 From: Anusha Srivatsa Date: Mon, 13 Jul 2020 11:23:20 -0700 Subject: drm/i915/dg1: Remove SHPD_FILTER_CNT register programming MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Bspec asks us to remove the special programming of the SHPD_FILTER_CNT register which we have been doing since CNP+. Bspec: 49305 Cc: Matt Roper Signed-off-by: Anusha Srivatsa Signed-off-by: Lucas De Marchi Reviewed-by: José Roberto de Souza Link: https://patchwork.freedesktop.org/patch/msgid/20200713182321.12390-5-lucas.demarchi@intel.com --- drivers/gpu/drm/i915/i915_irq.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c index 4c9d0a4a2476..1fa67700d8f4 100644 --- a/drivers/gpu/drm/i915/i915_irq.c +++ b/drivers/gpu/drm/i915/i915_irq.c @@ -3114,7 +3114,8 @@ static void icp_hpd_irq_setup(struct drm_i915_private *dev_priv, hotplug_irqs = sde_ddi_mask | sde_tc_mask; enabled_irqs = intel_hpd_enabled_irqs(dev_priv, dev_priv->hotplug.pch_hpd); - I915_WRITE(SHPD_FILTER_CNT, SHPD_FILTER_CNT_500_ADJ); + if (INTEL_PCH_TYPE(dev_priv) <= PCH_TGP) + I915_WRITE(SHPD_FILTER_CNT, SHPD_FILTER_CNT_500_ADJ); ibx_display_interrupt_update(dev_priv, hotplug_irqs, enabled_irqs); -- cgit v1.2.3 From 51e3a64fafd534dfe1da1c53dc6917a51f0ba75c Mon Sep 17 00:00:00 2001 From: Lucas De Marchi Date: Mon, 13 Jul 2020 11:23:21 -0700 Subject: drm/i915/dg1: Add fake PCH DG1 has the south engine display on the same PCI device. Ideally we could use HAS_PCH_SPLIT(), but that macro is misused all across the code base to rather signify a range of gens. So add a fake one for DG1 to be used where needed. Cc: Aditya Swarup Signed-off-by: Lucas De Marchi Reviewed-by: Anusha Srivatsa Link: https://patchwork.freedesktop.org/patch/msgid/20200713182321.12390-6-lucas.demarchi@intel.com --- drivers/gpu/drm/i915/intel_pch.c | 6 ++++++ drivers/gpu/drm/i915/intel_pch.h | 4 ++++ 2 files changed, 10 insertions(+) diff --git a/drivers/gpu/drm/i915/intel_pch.c b/drivers/gpu/drm/i915/intel_pch.c index c668e99eb2e4..6c97192e9ca8 100644 --- a/drivers/gpu/drm/i915/intel_pch.c +++ b/drivers/gpu/drm/i915/intel_pch.c @@ -188,6 +188,12 @@ void intel_detect_pch(struct drm_i915_private *dev_priv) { struct pci_dev *pch = NULL; + /* DG1 has south engine display on the same PCI device */ + if (IS_DG1(dev_priv)) { + dev_priv->pch_type = PCH_DG1; + return; + } + /* * The reason to probe ISA bridge instead of Dev31:Fun0 is to * make graphics device passthrough work easy for VMM, that only diff --git a/drivers/gpu/drm/i915/intel_pch.h b/drivers/gpu/drm/i915/intel_pch.h index 3053d1ce398b..06d2cd50af0b 100644 --- a/drivers/gpu/drm/i915/intel_pch.h +++ b/drivers/gpu/drm/i915/intel_pch.h @@ -26,6 +26,9 @@ enum intel_pch { PCH_JSP, /* Jasper Lake PCH */ PCH_MCC, /* Mule Creek Canyon PCH */ PCH_TGP, /* Tiger Lake PCH */ + + /* Fake PCHs, functionality handled on the same PCI dev */ + PCH_DG1 = 1024, }; #define INTEL_PCH_DEVICE_ID_MASK 0xff80 @@ -56,6 +59,7 @@ enum intel_pch { #define INTEL_PCH_TYPE(dev_priv) ((dev_priv)->pch_type) #define INTEL_PCH_ID(dev_priv) ((dev_priv)->pch_id) +#define HAS_PCH_DG1(dev_priv) (INTEL_PCH_TYPE(dev_priv) == PCH_DG1) #define HAS_PCH_JSP(dev_priv) (INTEL_PCH_TYPE(dev_priv) == PCH_JSP) #define HAS_PCH_MCC(dev_priv) (INTEL_PCH_TYPE(dev_priv) == PCH_MCC) #define HAS_PCH_TGP(dev_priv) (INTEL_PCH_TYPE(dev_priv) == PCH_TGP) -- cgit v1.2.3 From a581483b1e5466d28fc50ff623fba31cea2cccb6 Mon Sep 17 00:00:00 2001 From: Maarten Lankhorst Date: Wed, 12 Feb 2020 14:54:45 +0100 Subject: drm/i915: Move cec_notifier to intel_hdmi_connector_unregister, v2. This fixes the following KASAN splash on module reload: [ 145.136327] ================================================================== [ 145.136502] BUG: KASAN: use-after-free in intel_hdmi_destroy+0x74/0x80 [i915] [ 145.136514] Read of size 8 at addr ffff888216641830 by task kworker/1:1/134 [ 145.136535] CPU: 1 PID: 134 Comm: kworker/1:1 Tainted: G U T 5.5.0-rc7-valkyria+ #5783 [ 145.136539] Hardware name: GIGABYTE GB-BKi3A-7100/MFLP3AP-00, BIOS F1 07/27/2016 [ 145.136546] Workqueue: events drm_connector_free_work_fn [ 145.136551] Call Trace: [ 145.136560] dump_stack+0xa1/0xe0 [ 145.136571] print_address_description.constprop.0+0x1e/0x210 [ 145.136639] ? intel_hdmi_destroy+0x74/0x80 [i915] [ 145.136703] ? intel_hdmi_destroy+0x74/0x80 [i915] [ 145.136710] __kasan_report.cold+0x1b/0x37 [ 145.136790] ? intel_hdmi_destroy+0x74/0x80 [i915] [ 145.136863] ? intel_hdmi_destroy+0x74/0x80 [i915] [ 145.136870] kasan_report+0x27/0x30 [ 145.136881] __asan_report_load8_noabort+0x1c/0x20 [ 145.136946] intel_hdmi_destroy+0x74/0x80 [i915] [ 145.136954] drm_connector_free_work_fn+0xd1/0x100 [ 145.136967] process_one_work+0x86e/0x1610 [ 145.136987] ? pwq_dec_nr_in_flight+0x2f0/0x2f0 [ 145.137004] ? move_linked_works+0x128/0x2c0 [ 145.137021] worker_thread+0x63e/0xc90 [ 145.137048] kthread+0x2f6/0x3f0 [ 145.137054] ? calculate_sigpending+0x81/0xa0 [ 145.137059] ? process_one_work+0x1610/0x1610 [ 145.137064] ? kthread_bind+0x40/0x40 [ 145.137075] ret_from_fork+0x24/0x30 [ 145.137111] Allocated by task 0: [ 145.137119] (stack is not available) [ 145.137137] Freed by task 5053: [ 145.137147] save_stack+0x28/0x90 [ 145.137152] __kasan_slab_free+0x136/0x180 [ 145.137157] kasan_slab_free+0x26/0x30 [ 145.137161] kfree+0xe6/0x350 [ 145.137242] intel_ddi_encoder_destroy+0x60/0x80 [i915] [ 145.137252] drm_mode_config_cleanup+0x11d/0x8f0 [ 145.137329] intel_modeset_driver_remove+0x1f5/0x350 [i915] [ 145.137403] i915_driver_remove+0xc4/0x130 [i915] [ 145.137482] i915_pci_remove+0x3e/0x90 [i915] [ 145.137489] pci_device_remove+0x108/0x2d0 [ 145.137494] device_release_driver_internal+0x1e6/0x4a0 [ 145.137499] driver_detach+0xcb/0x198 [ 145.137503] bus_remove_driver+0xde/0x204 [ 145.137508] driver_unregister+0x6d/0xa0 [ 145.137513] pci_unregister_driver+0x2e/0x230 [ 145.137576] i915_exit+0x1f/0x26 [i915] [ 145.137157] kasan_slab_free+0x26/0x30 [ 145.137161] kfree+0xe6/0x350 [ 145.137242] intel_ddi_encoder_destroy+0x60/0x80 [i915] [ 145.137252] drm_mode_config_cleanup+0x11d/0x8f0 [ 145.137329] intel_modeset_driver_remove+0x1f5/0x350 [i915] [ 145.137403] i915_driver_remove+0xc4/0x130 [i915] [ 145.137482] i915_pci_remove+0x3e/0x90 [i915] [ 145.137489] pci_device_remove+0x108/0x2d0 [ 145.137494] device_release_driver_internal+0x1e6/0x4a0 [ 145.137499] driver_detach+0xcb/0x198 [ 145.137503] bus_remove_driver+0xde/0x204 [ 145.137508] driver_unregister+0x6d/0xa0 [ 145.137513] pci_unregister_driver+0x2e/0x230 [ 145.137576] i915_exit+0x1f/0x26 [i915] [ 145.137581] __x64_sys_delete_module+0x35b/0x470 [ 145.137586] do_syscall_64+0x99/0x4e0 [ 145.137591] entry_SYSCALL_64_after_hwframe+0x49/0xbe [ 145.137606] The buggy address belongs to the object at ffff888216640000 which belongs to the cache kmalloc-8k of size 8192 [ 145.137618] The buggy address is located 6192 bytes inside of 8192-byte region [ffff888216640000, ffff888216642000) [ 145.137630] The buggy address belongs to the page: [ 145.137640] page:ffffea0008599000 refcount:1 mapcount:0 mapping:ffff888107c02a80 index:0xffff888216644000 compound_mapcount: 0 [ 145.137647] raw: 0200000000010200 0000000000000000 0000000100000001 ffff888107c02a80 [ 145.137652] raw: ffff888216644000 0000000080020001 00000001ffffffff 0000000000000000 [ 145.137656] page dumped because: kasan: bad access detected [ 145.137668] Memory state around the buggy address: [ 145.137678] ffff888216641700: fb fb fb fb fb fb fb fb fb fb fb fb fb fb fb fb [ 145.137687] ffff888216641780: fb fb fb fb fb fb fb fb fb fb fb fb fb fb fb fb [ 145.137697] >ffff888216641800: fb fb fb fb fb fb fb fb fb fb fb fb fb fb fb fb [ 145.137706] ^ [ 145.137715] ffff888216641880: fb fb fb fb fb fb fb fb fb fb fb fb fb fb fb fb [ 145.137724] ffff888216641900: fb fb fb fb fb fb fb fb fb fb fb fb fb fb fb fb [ 145.137733] ================================================================== [ 145.137742] Disabling lock debugging due to kernel taint Changes since v1: - Add fixes tags. - Use early unregister. Signed-off-by: Maarten Lankhorst Fixes: 9c229127aee2 ("drm/i915: hdmi: add CEC notifier to intel_hdmi") Cc: # v4.19+ Reviewed-by: Daniel Vetter Link: https://patchwork.freedesktop.org/patch/msgid/20200212135445.1469133-1-maarten.lankhorst@linux.intel.com --- drivers/gpu/drm/i915/display/intel_hdmi.c | 10 ++-------- 1 file changed, 2 insertions(+), 8 deletions(-) diff --git a/drivers/gpu/drm/i915/display/intel_hdmi.c b/drivers/gpu/drm/i915/display/intel_hdmi.c index 7dc11ea4c6e4..de2ce5632b94 100644 --- a/drivers/gpu/drm/i915/display/intel_hdmi.c +++ b/drivers/gpu/drm/i915/display/intel_hdmi.c @@ -2883,19 +2883,13 @@ intel_hdmi_connector_register(struct drm_connector *connector) return ret; } -static void intel_hdmi_destroy(struct drm_connector *connector) +static void intel_hdmi_connector_unregister(struct drm_connector *connector) { struct cec_notifier *n = intel_attached_hdmi(to_intel_connector(connector))->cec_notifier; cec_notifier_conn_unregister(n); - intel_connector_destroy(connector); -} - -static void intel_hdmi_connector_unregister(struct drm_connector *connector) -{ intel_hdmi_remove_i2c_symlink(connector); - intel_connector_unregister(connector); } @@ -2907,7 +2901,7 @@ static const struct drm_connector_funcs intel_hdmi_connector_funcs = { .atomic_set_property = intel_digital_connector_atomic_set_property, .late_register = intel_hdmi_connector_register, .early_unregister = intel_hdmi_connector_unregister, - .destroy = intel_hdmi_destroy, + .destroy = intel_connector_destroy, .atomic_destroy_state = drm_atomic_helper_connector_destroy_state, .atomic_duplicate_state = intel_digital_connector_duplicate_state, }; -- cgit v1.2.3 From 0428ab013fdd39dbfb8f4cd8ad2b60af3776c6b9 Mon Sep 17 00:00:00 2001 From: Ville Syrjälä Date: Sat, 11 Jul 2020 11:03:36 +0300 Subject: drm/i915: Recalculate FBC w/a stride when needed MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Currently we're failing to recalculate the gen9 FBC w/a stride unless something more drastic than just the modifier itself has changed. This often leaves us with FBC enabled with the linear fbdev framebuffer without the w/a stride enabled. That will cause an immediate underrun and FBC will get promptly disabled. Fix the problem by checking if the w/a stride is about to change, and go through the full dance if so. This part of the FBC code is still pretty much a disaster and will need lots more work. But this should at least fix the immediate issue. v2: Deactivate FBC when the modifier changes since that will likely require resetting the w/a CFB stride Cc: stable@vger.kernel.org Signed-off-by: Ville Syrjälä Link: https://patchwork.freedesktop.org/patch/msgid/20200711080336.13423-1-ville.syrjala@linux.intel.com Reviewed-by: José Roberto de Souza --- drivers/gpu/drm/i915/display/intel_fbc.c | 33 +++++++++++++++++++++++++------- drivers/gpu/drm/i915/i915_drv.h | 1 + 2 files changed, 27 insertions(+), 7 deletions(-) diff --git a/drivers/gpu/drm/i915/display/intel_fbc.c b/drivers/gpu/drm/i915/display/intel_fbc.c index ef2eb14f6157..85723fba6002 100644 --- a/drivers/gpu/drm/i915/display/intel_fbc.c +++ b/drivers/gpu/drm/i915/display/intel_fbc.c @@ -742,6 +742,25 @@ static bool intel_fbc_cfb_size_changed(struct drm_i915_private *dev_priv) fbc->compressed_fb.size * fbc->threshold; } +static u16 intel_fbc_gen9_wa_cfb_stride(struct drm_i915_private *dev_priv) +{ + struct intel_fbc *fbc = &dev_priv->fbc; + struct intel_fbc_state_cache *cache = &fbc->state_cache; + + if ((IS_GEN9_BC(dev_priv) || IS_BROXTON(dev_priv)) && + cache->fb.modifier != I915_FORMAT_MOD_X_TILED) + return DIV_ROUND_UP(cache->plane.src_w, 32 * fbc->threshold) * 8; + else + return 0; +} + +static bool intel_fbc_gen9_wa_cfb_stride_changed(struct drm_i915_private *dev_priv) +{ + struct intel_fbc *fbc = &dev_priv->fbc; + + return fbc->params.gen9_wa_cfb_stride != intel_fbc_gen9_wa_cfb_stride(dev_priv); +} + static bool intel_fbc_can_enable(struct drm_i915_private *dev_priv) { struct intel_fbc *fbc = &dev_priv->fbc; @@ -902,6 +921,7 @@ static void intel_fbc_get_reg_params(struct intel_crtc *crtc, params->crtc.i9xx_plane = to_intel_plane(crtc->base.primary)->i9xx_plane; params->fb.format = cache->fb.format; + params->fb.modifier = cache->fb.modifier; params->fb.stride = cache->fb.stride; params->cfb_size = intel_fbc_calculate_cfb_size(dev_priv, cache); @@ -931,6 +951,9 @@ static bool intel_fbc_can_flip_nuke(const struct intel_crtc_state *crtc_state) if (params->fb.format != cache->fb.format) return false; + if (params->fb.modifier != cache->fb.modifier) + return false; + if (params->fb.stride != cache->fb.stride) return false; @@ -1218,7 +1241,8 @@ void intel_fbc_enable(struct intel_atomic_state *state, if (fbc->crtc) { if (fbc->crtc != crtc || - !intel_fbc_cfb_size_changed(dev_priv)) + (!intel_fbc_cfb_size_changed(dev_priv) && + !intel_fbc_gen9_wa_cfb_stride_changed(dev_priv))) goto out; __intel_fbc_disable(dev_priv); @@ -1240,12 +1264,7 @@ void intel_fbc_enable(struct intel_atomic_state *state, goto out; } - if ((IS_GEN9_BC(dev_priv) || IS_BROXTON(dev_priv)) && - plane_state->hw.fb->modifier != I915_FORMAT_MOD_X_TILED) - cache->gen9_wa_cfb_stride = - DIV_ROUND_UP(cache->plane.src_w, 32 * fbc->threshold) * 8; - else - cache->gen9_wa_cfb_stride = 0; + cache->gen9_wa_cfb_stride = intel_fbc_gen9_wa_cfb_stride(dev_priv); drm_dbg_kms(&dev_priv->drm, "Enabling FBC on pipe %c\n", pipe_name(crtc->pipe)); diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index 7f668a49b03d..9ba6cfff9e3f 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h @@ -442,6 +442,7 @@ struct intel_fbc { struct { const struct drm_format_info *format; unsigned int stride; + u64 modifier; } fb; int cfb_size; -- cgit v1.2.3 From 23ec9f42241a4625b954023aae3591c0baa12ea1 Mon Sep 17 00:00:00 2001 From: Dan Carpenter Date: Tue, 14 Jul 2020 17:36:52 +0300 Subject: drm/i915/selftest: Fix an error code in live_noa_gpr() The error code needs to be set on this path. It currently returns success. Fixes: ed2690a9ca89 ("drm/i915/selftest: Check that GPR are restored across noa_wait") Reported-by: Lionel Landwerlin Signed-off-by: Dan Carpenter Reviewed-by: Lionel Landwerlin Signed-off-by: Chris Wilson Link: https://patchwork.freedesktop.org/patch/msgid/20200714143652.GA337376@mwanda --- drivers/gpu/drm/i915/selftests/i915_perf.c | 1 + 1 file changed, 1 insertion(+) diff --git a/drivers/gpu/drm/i915/selftests/i915_perf.c b/drivers/gpu/drm/i915/selftests/i915_perf.c index 0aa151501fb3..c2d001d9c0ec 100644 --- a/drivers/gpu/drm/i915/selftests/i915_perf.c +++ b/drivers/gpu/drm/i915/selftests/i915_perf.c @@ -357,6 +357,7 @@ static int live_noa_gpr(void *arg) cs = intel_ring_begin(rq, 4); if (IS_ERR(cs)) { + err = PTR_ERR(cs); i915_request_add(rq); goto out_rq; } -- cgit v1.2.3 From 3e6761fd2a3a9a0a13ec16f25897d3dde6414497 Mon Sep 17 00:00:00 2001 From: YueHaibing Date: Wed, 15 Jul 2020 11:21:04 +0800 Subject: drm/i915: Remove unused inline function drain_delayed_work() It is not used since commit 058179e72e09 ("drm/i915/gt: Replace hangcheck by heartbeats") Signed-off-by: YueHaibing Reviewed-by: Chris Wilson Signed-off-by: Chris Wilson Link: https://patchwork.freedesktop.org/patch/msgid/20200715032104.32052-1-yuehaibing@huawei.com --- drivers/gpu/drm/i915/i915_utils.h | 13 ------------- 1 file changed, 13 deletions(-) diff --git a/drivers/gpu/drm/i915/i915_utils.h b/drivers/gpu/drm/i915/i915_utils.h index b1c5955a52e1..54773371e6bd 100644 --- a/drivers/gpu/drm/i915/i915_utils.h +++ b/drivers/gpu/drm/i915/i915_utils.h @@ -266,19 +266,6 @@ static inline int list_is_last_rcu(const struct list_head *list, return READ_ONCE(list->next) == head; } -/* - * Wait until the work is finally complete, even if it tries to postpone - * by requeueing itself. Note, that if the worker never cancels itself, - * we will spin forever. - */ -static inline void drain_delayed_work(struct delayed_work *dw) -{ - do { - while (flush_delayed_work(dw)) - ; - } while (delayed_work_pending(dw)); -} - static inline unsigned long msecs_to_jiffies_timeout(const unsigned int m) { unsigned long j = msecs_to_jiffies(m); -- cgit v1.2.3 From d45171ac186d0d264f6f3b5c345d5f855db8c8aa Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Tue, 14 Jul 2020 14:50:02 +0100 Subject: drm/i915/gt: Trace placement of timeline HWSP Track the position of the HWSP for each timeline. References: https://gitlab.freedesktop.org/drm/intel/-/issues/2169 Signed-off-by: Chris Wilson Cc: Mika Kuoppala Reviewed-by: Mika Kuoppala Link: https://patchwork.freedesktop.org/patch/msgid/20200714135002.17508-1-chris@chris-wilson.co.uk --- drivers/gpu/drm/i915/gt/intel_timeline.c | 7 +++++++ drivers/gpu/drm/i915/gt/selftest_timeline.c | 13 ++++++++----- 2 files changed, 15 insertions(+), 5 deletions(-) diff --git a/drivers/gpu/drm/i915/gt/intel_timeline.c b/drivers/gpu/drm/i915/gt/intel_timeline.c index 4546284fede1..46d20f5f3ddc 100644 --- a/drivers/gpu/drm/i915/gt/intel_timeline.c +++ b/drivers/gpu/drm/i915/gt/intel_timeline.c @@ -73,6 +73,8 @@ hwsp_alloc(struct intel_timeline *timeline, unsigned int *cacheline) return vma; } + GT_TRACE(timeline->gt, "new HWSP allocated\n"); + vma->private = hwsp; hwsp->gt = timeline->gt; hwsp->vma = vma; @@ -327,6 +329,8 @@ int intel_timeline_pin(struct intel_timeline *tl) tl->hwsp_offset = i915_ggtt_offset(tl->hwsp_ggtt) + offset_in_page(tl->hwsp_offset); + GT_TRACE(tl->gt, "timeline:%llx using HWSP offset:%x\n", + tl->fence_context, tl->hwsp_offset); cacheline_acquire(tl->hwsp_cacheline); if (atomic_fetch_inc(&tl->pin_count)) { @@ -434,6 +438,7 @@ __intel_timeline_get_seqno(struct intel_timeline *tl, int err; might_lock(&tl->gt->ggtt->vm.mutex); + GT_TRACE(tl->gt, "timeline:%llx wrapped\n", tl->fence_context); /* * If there is an outstanding GPU reference to this cacheline, @@ -497,6 +502,8 @@ __intel_timeline_get_seqno(struct intel_timeline *tl, memset(vaddr + tl->hwsp_offset, 0, CACHELINE_BYTES); tl->hwsp_offset += i915_ggtt_offset(vma); + GT_TRACE(tl->gt, "timeline:%llx using HWSP offset:%x\n", + tl->fence_context, tl->hwsp_offset); cacheline_acquire(cl); tl->hwsp_cacheline = cl; diff --git a/drivers/gpu/drm/i915/gt/selftest_timeline.c b/drivers/gpu/drm/i915/gt/selftest_timeline.c index fcdee951579b..fb5b7d3498a6 100644 --- a/drivers/gpu/drm/i915/gt/selftest_timeline.c +++ b/drivers/gpu/drm/i915/gt/selftest_timeline.c @@ -562,8 +562,9 @@ static int live_hwsp_engine(void *arg) struct intel_timeline *tl = timelines[n]; if (!err && *tl->hwsp_seqno != n) { - pr_err("Invalid seqno stored in timeline %lu, found 0x%x\n", - n, *tl->hwsp_seqno); + pr_err("Invalid seqno stored in timeline %lu @ %x, found 0x%x\n", + n, tl->hwsp_offset, *tl->hwsp_seqno); + GEM_TRACE_DUMP(); err = -EINVAL; } intel_timeline_put(tl); @@ -633,8 +634,9 @@ out: struct intel_timeline *tl = timelines[n]; if (!err && *tl->hwsp_seqno != n) { - pr_err("Invalid seqno stored in timeline %lu, found 0x%x\n", - n, *tl->hwsp_seqno); + pr_err("Invalid seqno stored in timeline %lu @ %x, found 0x%x\n", + n, tl->hwsp_offset, *tl->hwsp_seqno); + GEM_TRACE_DUMP(); err = -EINVAL; } intel_timeline_put(tl); @@ -965,8 +967,9 @@ static int live_hwsp_recycle(void *arg) } if (*tl->hwsp_seqno != count) { - pr_err("Invalid seqno stored in timeline %lu, found 0x%x\n", + pr_err("Invalid seqno stored in timeline %lu @ tl->hwsp_offset, found 0x%x\n", count, *tl->hwsp_seqno); + GEM_TRACE_DUMP(); err = -EINVAL; } -- cgit v1.2.3 From 2a19abb006292d741e670ea6be804c3a575676ce Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Tue, 14 Jul 2020 12:44:19 +0100 Subject: drm/i915/gt: Assert the kernel context is using the HWSP We need to ensure that the kernel context is using the permanently pinned HWSP so that we can always submit a pm request from any context. By construction, the engine->kernel_context should only be using the engine->status_page.vma so let's assert that is still true when we have to submit a request for parking the engine. Signed-off-by: Chris Wilson Cc: Tvrtko Ursulin Reviewed-by: Mika Kuoppala Link: https://patchwork.freedesktop.org/patch/msgid/20200714114419.28713-1-chris@chris-wilson.co.uk --- drivers/gpu/drm/i915/gt/intel_engine_pm.c | 1 + 1 file changed, 1 insertion(+) diff --git a/drivers/gpu/drm/i915/gt/intel_engine_pm.c b/drivers/gpu/drm/i915/gt/intel_engine_pm.c index d0a1078ef632..8ec3eecf3e39 100644 --- a/drivers/gpu/drm/i915/gt/intel_engine_pm.c +++ b/drivers/gpu/drm/i915/gt/intel_engine_pm.c @@ -142,6 +142,7 @@ static bool switch_to_kernel_context(struct intel_engine_cs *engine) return true; GEM_BUG_ON(!intel_context_is_barrier(ce)); + GEM_BUG_ON(ce->timeline->hwsp_ggtt != engine->status_page.vma); /* Already inside the kernel context, safe to power down. */ if (engine->wakeref_serial == engine->serial) -- cgit v1.2.3 From e57bd05ec0d2d82d63725dedf9f5a063f879de25 Mon Sep 17 00:00:00 2001 From: Jani Nikula Date: Wed, 15 Jul 2020 14:18:02 +0300 Subject: drm/i915: Update DRIVER_DATE to 20200715 Signed-off-by: Jani Nikula --- drivers/gpu/drm/i915/i915_drv.h | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index 9ba6cfff9e3f..e4f7f6518945 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h @@ -108,8 +108,8 @@ #define DRIVER_NAME "i915" #define DRIVER_DESC "Intel Graphics" -#define DRIVER_DATE "20200702" -#define DRIVER_TIMESTAMP 1593714328 +#define DRIVER_DATE "20200715" +#define DRIVER_TIMESTAMP 1594811881 struct drm_i915_gem_object; -- cgit v1.2.3