From 4ab4fa1032170b6ec50ffff0767096b3200dde8b Mon Sep 17 00:00:00 2001 From: José Roberto de Souza Date: Tue, 20 Aug 2019 15:33:23 -0700 Subject: drm/i915/psr: Make PSR registers relative to transcoders MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit PSR registers are a mess, some have the full address while others just have the additional offset from psr_mmio_base. For BDW+ psr_mmio_base is nothing more than TRANSCODER_EDP_OFFSET + 0x800 and using it makes more difficult for people with an PSR register address or PSR register name from from BSpec as i915 also don't match the BSpec names. For HSW psr_mmio_base is _DDI_BUF_CTL_A + 0x800 and PSR registers are only available in DDIA. Other reason to make relative to transcoder is that since BDW every transcoder have PSR registers, so in theory it should be possible to have PSR enabled in a non-eDP transcoder. So for BDW+ we can use _TRANS2() to get the register offset of any PSR register in any transcoder while for HSW we have _HSW_PSR_ADJ that will calculate the register offset for the single PSR instance, noting that we are already guarded about trying to enable PSR in other port than DDIA on HSW by the 'if (dig_port->base.port != PORT_A)' in intel_psr_compute_config(), this check should only be valid for HSW and will be changed in future. PSR2 registers and PSR_EVENT was added after Haswell so that is why _PSR_ADJ() is not used in some macros. The only registers that can not be relative to transcoder are PSR_IMR and PSR_IIR that are not relative to anything, so keeping it hardcoded. That changed for TGL but it will be handled in another patch. Also removing BDW_EDP_PSR_BASE from GVT because it is not used as it is the only PSR register that GVT have. v5: - Macros changed to be more explicit about HSW (Dhinakaran) - Squashed with the patch that added the tran parameter to the macros (Dhinakaran) v6: - Checking for interruption errors after module reload in the transcoder that will be used (Dhinakaran) - Using lowercase to the registers offsets v7: - Removing IS_HASWELL() from registers macros(Jani) Cc: Dhinakaran Pandiyan Cc: Rodrigo Vivi Cc: Jani Nikula Cc: Ville Syrjälä Cc: Zhi Wang Reviewed-by: Lucas De Marchi Signed-off-by: José Roberto de Souza Signed-off-by: Lucas De Marchi Link: https://patchwork.freedesktop.org/patch/msgid/20190820223325.27490-1-jose.souza@intel.com --- drivers/gpu/drm/i915/display/intel_psr.c | 104 ++++++++++++++++++------------- drivers/gpu/drm/i915/gvt/handlers.c | 2 +- drivers/gpu/drm/i915/i915_debugfs.c | 18 +++--- drivers/gpu/drm/i915/i915_drv.h | 5 +- drivers/gpu/drm/i915/i915_reg.h | 57 +++++++++++------ 5 files changed, 113 insertions(+), 73 deletions(-) diff --git a/drivers/gpu/drm/i915/display/intel_psr.c b/drivers/gpu/drm/i915/display/intel_psr.c index 3bfb720560c2..77232f6bca17 100644 --- a/drivers/gpu/drm/i915/display/intel_psr.c +++ b/drivers/gpu/drm/i915/display/intel_psr.c @@ -390,7 +390,7 @@ static void hsw_psr_setup_aux(struct intel_dp *intel_dp) BUILD_BUG_ON(sizeof(aux_msg) > 20); for (i = 0; i < sizeof(aux_msg); i += 4) - I915_WRITE(EDP_PSR_AUX_DATA(i >> 2), + I915_WRITE(EDP_PSR_AUX_DATA(dev_priv->psr.transcoder, i >> 2), intel_dp_pack_aux(&aux_msg[i], sizeof(aux_msg) - i)); aux_clock_divider = intel_dp->get_aux_clock_divider(intel_dp, 0); @@ -401,7 +401,7 @@ static void hsw_psr_setup_aux(struct intel_dp *intel_dp) /* Select only valid bits for SRD_AUX_CTL */ aux_ctl &= psr_aux_mask; - I915_WRITE(EDP_PSR_AUX_CTL, aux_ctl); + I915_WRITE(EDP_PSR_AUX_CTL(dev_priv->psr.transcoder), aux_ctl); } static void intel_psr_enable_sink(struct intel_dp *intel_dp) @@ -491,8 +491,9 @@ static void hsw_activate_psr1(struct intel_dp *intel_dp) if (INTEL_GEN(dev_priv) >= 8) val |= EDP_PSR_CRC_ENABLE; - val |= I915_READ(EDP_PSR_CTL) & EDP_PSR_RESTORE_PSR_ACTIVE_CTX_MASK; - I915_WRITE(EDP_PSR_CTL, val); + val |= (I915_READ(EDP_PSR_CTL(dev_priv->psr.transcoder)) & + EDP_PSR_RESTORE_PSR_ACTIVE_CTX_MASK); + I915_WRITE(EDP_PSR_CTL(dev_priv->psr.transcoder), val); } static void hsw_activate_psr2(struct intel_dp *intel_dp) @@ -528,9 +529,9 @@ static void hsw_activate_psr2(struct intel_dp *intel_dp) * PSR2 HW is incorrectly using EDP_PSR_TP1_TP3_SEL and BSpec is * recommending keep this bit unset while PSR2 is enabled. */ - I915_WRITE(EDP_PSR_CTL, 0); + I915_WRITE(EDP_PSR_CTL(dev_priv->psr.transcoder), 0); - I915_WRITE(EDP_PSR2_CTL, val); + I915_WRITE(EDP_PSR2_CTL(dev_priv->psr.transcoder), val); } static bool intel_psr2_config_valid(struct intel_dp *intel_dp, @@ -606,10 +607,9 @@ void intel_psr_compute_config(struct intel_dp *intel_dp, /* * HSW spec explicitly says PSR is tied to port A. - * BDW+ platforms with DDI implementation of PSR have different - * PSR registers per transcoder and we only implement transcoder EDP - * ones. Since by Display design transcoder EDP is tied to port A - * we can safely escape based on the port A. + * BDW+ platforms have a instance of PSR registers per transcoder but + * for now it only supports one instance of PSR, so lets keep it + * hardcoded to PORT_A */ if (dig_port->base.port != PORT_A) { DRM_DEBUG_KMS("PSR condition failed: Port not supported\n"); @@ -649,8 +649,8 @@ static void intel_psr_activate(struct intel_dp *intel_dp) struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); if (INTEL_GEN(dev_priv) >= 9) - WARN_ON(I915_READ(EDP_PSR2_CTL) & EDP_PSR2_ENABLE); - WARN_ON(I915_READ(EDP_PSR_CTL) & EDP_PSR_ENABLE); + WARN_ON(I915_READ(EDP_PSR2_CTL(dev_priv->psr.transcoder)) & EDP_PSR2_ENABLE); + WARN_ON(I915_READ(EDP_PSR_CTL(dev_priv->psr.transcoder)) & EDP_PSR_ENABLE); WARN_ON(dev_priv->psr.active); lockdep_assert_held(&dev_priv->psr.lock); @@ -720,19 +720,37 @@ static void intel_psr_enable_source(struct intel_dp *intel_dp, if (INTEL_GEN(dev_priv) < 11) mask |= EDP_PSR_DEBUG_MASK_DISP_REG_WRITE; - I915_WRITE(EDP_PSR_DEBUG, mask); + I915_WRITE(EDP_PSR_DEBUG(dev_priv->psr.transcoder), mask); } static void intel_psr_enable_locked(struct drm_i915_private *dev_priv, const struct intel_crtc_state *crtc_state) { struct intel_dp *intel_dp = dev_priv->psr.dp; + u32 val; WARN_ON(dev_priv->psr.enabled); dev_priv->psr.psr2_enabled = intel_psr2_enabled(dev_priv, crtc_state); dev_priv->psr.busy_frontbuffer_bits = 0; dev_priv->psr.pipe = to_intel_crtc(crtc_state->base.crtc)->pipe; + dev_priv->psr.transcoder = crtc_state->cpu_transcoder; + + /* + * If a PSR error happened and the driver is reloaded, the EDP_PSR_IIR + * will still keep the error set even after the reset done in the + * irq_preinstall and irq_uninstall hooks. + * And enabling in this situation cause the screen to freeze in the + * first time that PSR HW tries to activate so lets keep PSR disabled + * to avoid any rendering problems. + */ + val = I915_READ(EDP_PSR_IIR); + val &= EDP_PSR_ERROR(edp_psr_shift(dev_priv->psr.transcoder)); + if (val) { + dev_priv->psr.sink_not_reliable = true; + DRM_DEBUG_KMS("PSR interruption error set, not enabling PSR\n"); + return; + } DRM_DEBUG_KMS("Enabling PSR%s\n", dev_priv->psr.psr2_enabled ? "2" : "1"); @@ -782,20 +800,27 @@ static void intel_psr_exit(struct drm_i915_private *dev_priv) u32 val; if (!dev_priv->psr.active) { - if (INTEL_GEN(dev_priv) >= 9) - WARN_ON(I915_READ(EDP_PSR2_CTL) & EDP_PSR2_ENABLE); - WARN_ON(I915_READ(EDP_PSR_CTL) & EDP_PSR_ENABLE); + if (INTEL_GEN(dev_priv) >= 9) { + val = I915_READ(EDP_PSR2_CTL(dev_priv->psr.transcoder)); + WARN_ON(val & EDP_PSR2_ENABLE); + } + + val = I915_READ(EDP_PSR_CTL(dev_priv->psr.transcoder)); + WARN_ON(val & EDP_PSR_ENABLE); + return; } if (dev_priv->psr.psr2_enabled) { - val = I915_READ(EDP_PSR2_CTL); + val = I915_READ(EDP_PSR2_CTL(dev_priv->psr.transcoder)); WARN_ON(!(val & EDP_PSR2_ENABLE)); - I915_WRITE(EDP_PSR2_CTL, val & ~EDP_PSR2_ENABLE); + val &= ~EDP_PSR2_ENABLE; + I915_WRITE(EDP_PSR2_CTL(dev_priv->psr.transcoder), val); } else { - val = I915_READ(EDP_PSR_CTL); + val = I915_READ(EDP_PSR_CTL(dev_priv->psr.transcoder)); WARN_ON(!(val & EDP_PSR_ENABLE)); - I915_WRITE(EDP_PSR_CTL, val & ~EDP_PSR_ENABLE); + val &= ~EDP_PSR_ENABLE; + I915_WRITE(EDP_PSR_CTL(dev_priv->psr.transcoder), val); } dev_priv->psr.active = false; } @@ -817,10 +842,10 @@ static void intel_psr_disable_locked(struct intel_dp *intel_dp) intel_psr_exit(dev_priv); if (dev_priv->psr.psr2_enabled) { - psr_status = EDP_PSR2_STATUS; + psr_status = EDP_PSR2_STATUS(dev_priv->psr.transcoder); psr_status_mask = EDP_PSR2_STATUS_STATE_MASK; } else { - psr_status = EDP_PSR_STATUS; + psr_status = EDP_PSR_STATUS(dev_priv->psr.transcoder); psr_status_mask = EDP_PSR_STATUS_STATE_MASK; } @@ -963,7 +988,8 @@ int intel_psr_wait_for_idle(const struct intel_crtc_state *new_crtc_state, * defensive enough to cover everything. */ - return __intel_wait_for_register(&dev_priv->uncore, EDP_PSR_STATUS, + return __intel_wait_for_register(&dev_priv->uncore, + EDP_PSR_STATUS(dev_priv->psr.transcoder), EDP_PSR_STATUS_STATE_MASK, EDP_PSR_STATUS_STATE_IDLE, 2, 50, out_value); @@ -979,10 +1005,10 @@ static bool __psr_wait_for_idle_locked(struct drm_i915_private *dev_priv) return false; if (dev_priv->psr.psr2_enabled) { - reg = EDP_PSR2_STATUS; + reg = EDP_PSR2_STATUS(dev_priv->psr.transcoder); mask = EDP_PSR2_STATUS_STATE_MASK; } else { - reg = EDP_PSR_STATUS; + reg = EDP_PSR_STATUS(dev_priv->psr.transcoder); mask = EDP_PSR_STATUS_STATE_MASK; } @@ -1208,36 +1234,24 @@ void intel_psr_flush(struct drm_i915_private *dev_priv, */ void intel_psr_init(struct drm_i915_private *dev_priv) { - u32 val; - if (!HAS_PSR(dev_priv)) return; - dev_priv->psr_mmio_base = IS_HASWELL(dev_priv) ? - HSW_EDP_PSR_BASE : BDW_EDP_PSR_BASE; - if (!dev_priv->psr.sink_support) return; + if (IS_HASWELL(dev_priv)) + /* + * HSW don't have PSR registers on the same space as transcoder + * so set this to a value that when subtract to the register + * in transcoder space results in the right offset for HSW + */ + dev_priv->hsw_psr_mmio_adjust = _SRD_CTL_EDP - _HSW_EDP_PSR_BASE; + if (i915_modparams.enable_psr == -1) if (INTEL_GEN(dev_priv) < 9 || !dev_priv->vbt.psr.enable) i915_modparams.enable_psr = 0; - /* - * If a PSR error happened and the driver is reloaded, the EDP_PSR_IIR - * will still keep the error set even after the reset done in the - * irq_preinstall and irq_uninstall hooks. - * And enabling in this situation cause the screen to freeze in the - * first time that PSR HW tries to activate so lets keep PSR disabled - * to avoid any rendering problems. - */ - val = I915_READ(EDP_PSR_IIR); - val &= EDP_PSR_ERROR(edp_psr_shift(TRANSCODER_EDP)); - if (val) { - DRM_DEBUG_KMS("PSR interruption error set\n"); - dev_priv->psr.sink_not_reliable = true; - } - /* Set link_standby x link_off defaults */ if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) /* HSW and BDW require workarounds that we don't implement. */ diff --git a/drivers/gpu/drm/i915/gvt/handlers.c b/drivers/gpu/drm/i915/gvt/handlers.c index 25f78196b964..45a9124e53b6 100644 --- a/drivers/gpu/drm/i915/gvt/handlers.c +++ b/drivers/gpu/drm/i915/gvt/handlers.c @@ -2796,7 +2796,7 @@ static int init_broadwell_mmio_info(struct intel_gvt *gvt) MMIO_D(CHICKEN_PIPESL_1(PIPE_C), D_BDW_PLUS); MMIO_D(WM_MISC, D_BDW); - MMIO_D(_MMIO(BDW_EDP_PSR_BASE), D_BDW); + MMIO_D(_MMIO(_SRD_CTL_EDP), D_BDW); MMIO_D(_MMIO(0x6671c), D_BDW_PLUS); MMIO_D(_MMIO(0x66c00), D_BDW_PLUS); diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c index b0f51591f2e4..e103fcba6435 100644 --- a/drivers/gpu/drm/i915/i915_debugfs.c +++ b/drivers/gpu/drm/i915/i915_debugfs.c @@ -2133,7 +2133,7 @@ psr_source_status(struct drm_i915_private *dev_priv, struct seq_file *m) "BUF_ON", "TG_ON" }; - val = I915_READ(EDP_PSR2_STATUS); + val = I915_READ(EDP_PSR2_STATUS(dev_priv->psr.transcoder)); status_val = (val & EDP_PSR2_STATUS_STATE_MASK) >> EDP_PSR2_STATUS_STATE_SHIFT; if (status_val < ARRAY_SIZE(live_status)) @@ -2149,7 +2149,7 @@ psr_source_status(struct drm_i915_private *dev_priv, struct seq_file *m) "SRDOFFACK", "SRDENT_ON", }; - val = I915_READ(EDP_PSR_STATUS); + val = I915_READ(EDP_PSR_STATUS(dev_priv->psr.transcoder)); status_val = (val & EDP_PSR_STATUS_STATE_MASK) >> EDP_PSR_STATUS_STATE_SHIFT; if (status_val < ARRAY_SIZE(live_status)) @@ -2192,10 +2192,10 @@ static int i915_edp_psr_status(struct seq_file *m, void *data) goto unlock; if (psr->psr2_enabled) { - val = I915_READ(EDP_PSR2_CTL); + val = I915_READ(EDP_PSR2_CTL(dev_priv->psr.transcoder)); enabled = val & EDP_PSR2_ENABLE; } else { - val = I915_READ(EDP_PSR_CTL); + val = I915_READ(EDP_PSR_CTL(dev_priv->psr.transcoder)); enabled = val & EDP_PSR_ENABLE; } seq_printf(m, "Source PSR ctl: %s [0x%08x]\n", @@ -2208,7 +2208,8 @@ static int i915_edp_psr_status(struct seq_file *m, void *data) * SKL+ Perf counter is reset to 0 everytime DC state is entered */ if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) { - val = I915_READ(EDP_PSR_PERF_CNT) & EDP_PSR_PERF_CNT_MASK; + val = I915_READ(EDP_PSR_PERF_CNT(dev_priv->psr.transcoder)); + val &= EDP_PSR_PERF_CNT_MASK; seq_printf(m, "Performance counter: %u\n", val); } @@ -2226,8 +2227,11 @@ static int i915_edp_psr_status(struct seq_file *m, void *data) * Reading all 3 registers before hand to minimize crossing a * frame boundary between register reads */ - for (frame = 0; frame < PSR2_SU_STATUS_FRAMES; frame += 3) - su_frames_val[frame / 3] = I915_READ(PSR2_SU_STATUS(frame)); + for (frame = 0; frame < PSR2_SU_STATUS_FRAMES; frame += 3) { + val = I915_READ(PSR2_SU_STATUS(dev_priv->psr.transcoder, + frame)); + su_frames_val[frame / 3] = val; + } seq_puts(m, "Frame:\tPSR2 SU blocks:\n"); diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index 1d725e0bba40..048896b1348b 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h @@ -479,6 +479,7 @@ struct i915_psr { bool enabled; struct intel_dp *dp; enum pipe pipe; + enum transcoder transcoder; bool active; struct work_struct work; unsigned busy_frontbuffer_bits; @@ -1330,11 +1331,11 @@ struct drm_i915_private { */ u32 gpio_mmio_base; + u32 hsw_psr_mmio_adjust; + /* MMIO base address for MIPI regs */ u32 mipi_mmio_base; - u32 psr_mmio_base; - u32 pps_mmio_base; wait_queue_head_t gmbus_wait_queue; diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h index 2abd199093c5..a092b34c269d 100644 --- a/drivers/gpu/drm/i915/i915_reg.h +++ b/drivers/gpu/drm/i915/i915_reg.h @@ -4186,10 +4186,17 @@ enum { #define PIPESRC(trans) _MMIO_TRANS2(trans, _PIPEASRC) #define PIPE_MULT(trans) _MMIO_TRANS2(trans, _PIPE_MULT_A) -/* HSW+ eDP PSR registers */ -#define HSW_EDP_PSR_BASE 0x64800 -#define BDW_EDP_PSR_BASE 0x6f800 -#define EDP_PSR_CTL _MMIO(dev_priv->psr_mmio_base + 0) +/* + * HSW+ eDP PSR registers + * + * HSW PSR registers are relative to DDIA(_DDI_BUF_CTL_A + 0x800) with just one + * instance of it + */ +#define _HSW_EDP_PSR_BASE 0x64800 +#define _SRD_CTL_A 0x60800 +#define _SRD_CTL_EDP 0x6f800 +#define _PSR_ADJ(tran, reg) (_TRANS2(tran, reg) - dev_priv->hsw_psr_mmio_adjust) +#define EDP_PSR_CTL(tran) _MMIO(_PSR_ADJ(tran, _SRD_CTL_A)) #define EDP_PSR_ENABLE (1 << 31) #define BDW_PSR_SINGLE_FRAME (1 << 30) #define EDP_PSR_RESTORE_PSR_ACTIVE_CTX_MASK (1 << 29) /* SW can't modify */ @@ -4226,16 +4233,22 @@ enum { #define EDP_PSR_TRANSCODER_A_SHIFT 8 #define EDP_PSR_TRANSCODER_EDP_SHIFT 0 -#define EDP_PSR_AUX_CTL _MMIO(dev_priv->psr_mmio_base + 0x10) +#define _SRD_AUX_CTL_A 0x60810 +#define _SRD_AUX_CTL_EDP 0x6f810 +#define EDP_PSR_AUX_CTL(tran) _MMIO(_PSR_ADJ(tran, _SRD_AUX_CTL_A)) #define EDP_PSR_AUX_CTL_TIME_OUT_MASK (3 << 26) #define EDP_PSR_AUX_CTL_MESSAGE_SIZE_MASK (0x1f << 20) #define EDP_PSR_AUX_CTL_PRECHARGE_2US_MASK (0xf << 16) #define EDP_PSR_AUX_CTL_ERROR_INTERRUPT (1 << 11) #define EDP_PSR_AUX_CTL_BIT_CLOCK_2X_MASK (0x7ff) -#define EDP_PSR_AUX_DATA(i) _MMIO(dev_priv->psr_mmio_base + 0x14 + (i) * 4) /* 5 registers */ +#define _SRD_AUX_DATA_A 0x60814 +#define _SRD_AUX_DATA_EDP 0x6f814 +#define EDP_PSR_AUX_DATA(tran, i) _MMIO(_PSR_ADJ(tran, _SRD_AUX_DATA_A) + (i) + 4) /* 5 registers */ -#define EDP_PSR_STATUS _MMIO(dev_priv->psr_mmio_base + 0x40) +#define _SRD_STATUS_A 0x60840 +#define _SRD_STATUS_EDP 0x6f840 +#define EDP_PSR_STATUS(tran) _MMIO(_PSR_ADJ(tran, _SRD_STATUS_A)) #define EDP_PSR_STATUS_STATE_MASK (7 << 29) #define EDP_PSR_STATUS_STATE_SHIFT 29 #define EDP_PSR_STATUS_STATE_IDLE (0 << 29) @@ -4260,10 +4273,15 @@ enum { #define EDP_PSR_STATUS_SENDING_TP1 (1 << 4) #define EDP_PSR_STATUS_IDLE_MASK 0xf -#define EDP_PSR_PERF_CNT _MMIO(dev_priv->psr_mmio_base + 0x44) +#define _SRD_PERF_CNT_A 0x60844 +#define _SRD_PERF_CNT_EDP 0x6f844 +#define EDP_PSR_PERF_CNT(tran) _MMIO(_PSR_ADJ(tran, _SRD_PERF_CNT_A)) #define EDP_PSR_PERF_CNT_MASK 0xffffff -#define EDP_PSR_DEBUG _MMIO(dev_priv->psr_mmio_base + 0x60) /* PSR_MASK on SKL+ */ +/* PSR_MASK on SKL+ */ +#define _SRD_DEBUG_A 0x60860 +#define _SRD_DEBUG_EDP 0x6f860 +#define EDP_PSR_DEBUG(tran) _MMIO(_PSR_ADJ(tran, _SRD_DEBUG_A)) #define EDP_PSR_DEBUG_MASK_MAX_SLEEP (1 << 28) #define EDP_PSR_DEBUG_MASK_LPSP (1 << 27) #define EDP_PSR_DEBUG_MASK_MEMUP (1 << 26) @@ -4271,7 +4289,9 @@ enum { #define EDP_PSR_DEBUG_MASK_DISP_REG_WRITE (1 << 16) /* Reserved in ICL+ */ #define EDP_PSR_DEBUG_EXIT_ON_PIXEL_UNDERRUN (1 << 15) /* SKL+ */ -#define EDP_PSR2_CTL _MMIO(0x6f900) +#define _PSR2_CTL_A 0x60900 +#define _PSR2_CTL_EDP 0x6f900 +#define EDP_PSR2_CTL(tran) _MMIO_TRANS2(tran, _PSR2_CTL_A) #define EDP_PSR2_ENABLE (1 << 31) #define EDP_SU_TRACK_ENABLE (1 << 30) #define EDP_Y_COORDINATE_VALID (1 << 26) /* GLK and CNL+ */ @@ -4293,8 +4313,8 @@ enum { #define _PSR_EVENT_TRANS_B 0x61848 #define _PSR_EVENT_TRANS_C 0x62848 #define _PSR_EVENT_TRANS_D 0x63848 -#define _PSR_EVENT_TRANS_EDP 0x6F848 -#define PSR_EVENT(trans) _MMIO_TRANS2(trans, _PSR_EVENT_TRANS_A) +#define _PSR_EVENT_TRANS_EDP 0x6f848 +#define PSR_EVENT(tran) _MMIO_TRANS2(tran, _PSR_EVENT_TRANS_A) #define PSR_EVENT_PSR2_WD_TIMER_EXPIRE (1 << 17) #define PSR_EVENT_PSR2_DISABLED (1 << 16) #define PSR_EVENT_SU_DIRTY_FIFO_UNDERRUN (1 << 15) @@ -4312,15 +4332,16 @@ enum { #define PSR_EVENT_LPSP_MODE_EXIT (1 << 1) #define PSR_EVENT_PSR_DISABLE (1 << 0) -#define EDP_PSR2_STATUS _MMIO(0x6f940) +#define _PSR2_STATUS_A 0x60940 +#define _PSR2_STATUS_EDP 0x6f940 +#define EDP_PSR2_STATUS(tran) _MMIO_TRANS2(tran, _PSR2_STATUS_A) #define EDP_PSR2_STATUS_STATE_MASK (0xf << 28) #define EDP_PSR2_STATUS_STATE_SHIFT 28 -#define _PSR2_SU_STATUS_0 0x6F914 -#define _PSR2_SU_STATUS_1 0x6F918 -#define _PSR2_SU_STATUS_2 0x6F91C -#define _PSR2_SU_STATUS(index) _MMIO(_PICK_EVEN((index), _PSR2_SU_STATUS_0, _PSR2_SU_STATUS_1)) -#define PSR2_SU_STATUS(frame) (_PSR2_SU_STATUS((frame) / 3)) +#define _PSR2_SU_STATUS_A 0x60914 +#define _PSR2_SU_STATUS_EDP 0x6f914 +#define _PSR2_SU_STATUS(tran, index) _MMIO(_TRANS2(tran, _PSR2_SU_STATUS_A) + (index) * 4) +#define PSR2_SU_STATUS(tran, frame) (_PSR2_SU_STATUS(tran, (frame) / 3)) #define PSR2_SU_STATUS_SHIFT(frame) (((frame) % 3) * 10) #define PSR2_SU_STATUS_MASK(frame) (0x3ff << PSR2_SU_STATUS_SHIFT(frame)) #define PSR2_SU_STATUS_FRAMES 8 -- cgit v1.2.3 From 99fc38b12095cd0ca9fc5b8c8e27e827dff6ed34 Mon Sep 17 00:00:00 2001 From: José Roberto de Souza Date: Tue, 20 Aug 2019 15:33:24 -0700 Subject: drm/i915: Add transcoder restriction to PSR2 MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit According to PSR2_CTL definition in BSpec there is only one instance of PSR2_CTL. Platforms gen < 12 with EDP transcoder only support PSR2 on TRANSCODER_EDP while on TGL PSR2 is only supported by TRANSCODER_A. Since BDW PSR is allowed on any port, but we need to restrict by transcoder. v8: Renamed _psr2_supported_in_trans() to psr2_supported() (Lucas) v9: Renamed psr2_supported() to transcoder_has_psr2() (Ville) BSpec: 7713 BSpec: 20584 Cc: Dhinakaran Pandiyan Cc: Rodrigo Vivi Reviewed-by: Lucas De Marchi Signed-off-by: José Roberto de Souza Signed-off-by: Lucas De Marchi Reviewed-by: Anshuman Gupta Link: https://patchwork.freedesktop.org/patch/msgid/20190820223325.27490-2-jose.souza@intel.com --- drivers/gpu/drm/i915/display/intel_psr.c | 15 +++++++++++++++ 1 file changed, 15 insertions(+) diff --git a/drivers/gpu/drm/i915/display/intel_psr.c b/drivers/gpu/drm/i915/display/intel_psr.c index 77232f6bca17..771d9a40bf12 100644 --- a/drivers/gpu/drm/i915/display/intel_psr.c +++ b/drivers/gpu/drm/i915/display/intel_psr.c @@ -534,6 +534,15 @@ static void hsw_activate_psr2(struct intel_dp *intel_dp) I915_WRITE(EDP_PSR2_CTL(dev_priv->psr.transcoder), val); } +static bool +transcoder_has_psr2(struct drm_i915_private *dev_priv, enum transcoder trans) +{ + if (INTEL_GEN(dev_priv) >= 12) + return trans == TRANSCODER_A; + else + return trans == TRANSCODER_EDP; +} + static bool intel_psr2_config_valid(struct intel_dp *intel_dp, struct intel_crtc_state *crtc_state) { @@ -545,6 +554,12 @@ static bool intel_psr2_config_valid(struct intel_dp *intel_dp, if (!dev_priv->psr.sink_psr2_support) return false; + if (!transcoder_has_psr2(dev_priv, crtc_state->cpu_transcoder)) { + DRM_DEBUG_KMS("PSR2 not supported in transcoder %s\n", + transcoder_name(crtc_state->cpu_transcoder)); + return false; + } + /* * DSC and PSR2 cannot be enabled simultaneously. If a requested * resolution requires DSC to be enabled, priority is given to DSC -- cgit v1.2.3 From df7415bfc06f8ee482a510aea39c12121e409dcc Mon Sep 17 00:00:00 2001 From: José Roberto de Souza Date: Tue, 20 Aug 2019 15:33:25 -0700 Subject: drm/i915: Do not unmask PSR interruption in IRQ postinstall MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit No need to unmask PSR interrutpion if PSR is not enabled, better move the call to intel_psr_enable_source(). v2: Renamed intel_psr_irq_control() to psr_irq_control() (Lucas) Cc: Rodrigo Vivi Cc: Dhinakaran Pandiyan Signed-off-by: José Roberto de Souza Signed-off-by: Lucas De Marchi Reviewed-by: Lucas De Marchi Link: https://patchwork.freedesktop.org/patch/msgid/20190820223325.27490-3-jose.souza@intel.com --- drivers/gpu/drm/i915/display/intel_psr.c | 6 ++++-- drivers/gpu/drm/i915/display/intel_psr.h | 1 - drivers/gpu/drm/i915/i915_irq.c | 2 -- 3 files changed, 4 insertions(+), 5 deletions(-) diff --git a/drivers/gpu/drm/i915/display/intel_psr.c b/drivers/gpu/drm/i915/display/intel_psr.c index 771d9a40bf12..28b62e587204 100644 --- a/drivers/gpu/drm/i915/display/intel_psr.c +++ b/drivers/gpu/drm/i915/display/intel_psr.c @@ -105,7 +105,7 @@ static int edp_psr_shift(enum transcoder cpu_transcoder) } } -void intel_psr_irq_control(struct drm_i915_private *dev_priv, u32 debug) +static void psr_irq_control(struct drm_i915_private *dev_priv, u32 debug) { u32 debug_mask, mask; enum transcoder cpu_transcoder; @@ -736,6 +736,8 @@ static void intel_psr_enable_source(struct intel_dp *intel_dp, mask |= EDP_PSR_DEBUG_MASK_DISP_REG_WRITE; I915_WRITE(EDP_PSR_DEBUG(dev_priv->psr.transcoder), mask); + + psr_irq_control(dev_priv, dev_priv->psr.debug); } static void intel_psr_enable_locked(struct drm_i915_private *dev_priv, @@ -1108,7 +1110,7 @@ int intel_psr_debug_set(struct drm_i915_private *dev_priv, u64 val) old_mode = dev_priv->psr.debug & I915_PSR_DEBUG_MODE_MASK; dev_priv->psr.debug = val; - intel_psr_irq_control(dev_priv, dev_priv->psr.debug); + psr_irq_control(dev_priv, dev_priv->psr.debug); mutex_unlock(&dev_priv->psr.lock); diff --git a/drivers/gpu/drm/i915/display/intel_psr.h b/drivers/gpu/drm/i915/display/intel_psr.h index dc818826f36d..46e4de8b8cd5 100644 --- a/drivers/gpu/drm/i915/display/intel_psr.h +++ b/drivers/gpu/drm/i915/display/intel_psr.h @@ -30,7 +30,6 @@ void intel_psr_flush(struct drm_i915_private *dev_priv, void intel_psr_init(struct drm_i915_private *dev_priv); void intel_psr_compute_config(struct intel_dp *intel_dp, struct intel_crtc_state *crtc_state); -void intel_psr_irq_control(struct drm_i915_private *dev_priv, u32 debug); void intel_psr_irq_handler(struct drm_i915_private *dev_priv, u32 psr_iir); void intel_psr_short_pulse(struct intel_dp *intel_dp); int intel_psr_wait_for_idle(const struct intel_crtc_state *new_crtc_state, diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c index 37e3dd3c1a9d..77391d8325bf 100644 --- a/drivers/gpu/drm/i915/i915_irq.c +++ b/drivers/gpu/drm/i915/i915_irq.c @@ -3684,7 +3684,6 @@ static void ironlake_irq_postinstall(struct drm_i915_private *dev_priv) if (IS_HASWELL(dev_priv)) { gen3_assert_iir_is_zero(uncore, EDP_PSR_IIR); - intel_psr_irq_control(dev_priv, dev_priv->psr.debug); display_mask |= DE_EDP_PSR_INT_HSW; } @@ -3795,7 +3794,6 @@ static void gen8_de_irq_postinstall(struct drm_i915_private *dev_priv) de_port_enables |= GEN8_PORT_DP_A_HOTPLUG; gen3_assert_iir_is_zero(uncore, EDP_PSR_IIR); - intel_psr_irq_control(dev_priv, dev_priv->psr.debug); for_each_pipe(dev_priv, pipe) { dev_priv->de_irq_mask[pipe] = ~de_pipe_masked; -- cgit v1.2.3 From cee508a0bddba1627575de2e41fb8e321fa380a8 Mon Sep 17 00:00:00 2001 From: Anusha Srivatsa Date: Tue, 20 Aug 2019 15:30:59 -0700 Subject: drm/dp/dsc: Add Support for all BPCs supported by TGL MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit DSC engine on ICL supports only 8 and 10 BPC as the input BPC. But DSC engine in TGL supports 8, 10 and 12 BPC. Add 12 BPC support for DSC while calculating compression configuration. v2: Remove the separate define TGL_DP_DSC_MAX_SUPPORTED_BPC and use the value directly.(More such defines can be removed as part of future patches). (Ville) v3: Use values directly instead of accessing the defines everytime for min and max DSC BPC. Cc: Ville Syrjälä Cc: Manasi Navare Signed-off-by: Anusha Srivatsa Reviewed-by: Manasi Navare Signed-off-by: Manasi Navare Link: https://patchwork.freedesktop.org/patch/msgid/20190820223059.18052-1-anusha.srivatsa@intel.com --- drivers/gpu/drm/i915/display/intel_dp.c | 14 +++++++++----- 1 file changed, 9 insertions(+), 5 deletions(-) diff --git a/drivers/gpu/drm/i915/display/intel_dp.c b/drivers/gpu/drm/i915/display/intel_dp.c index 921ad0a2f7ba..23908da1cd5d 100644 --- a/drivers/gpu/drm/i915/display/intel_dp.c +++ b/drivers/gpu/drm/i915/display/intel_dp.c @@ -70,8 +70,6 @@ /* DP DSC small joiner has 2 FIFOs each of 640 x 6 bytes */ #define DP_DSC_MAX_SMALL_JOINER_RAM_BUFFER 61440 -#define DP_DSC_MIN_SUPPORTED_BPC 8 -#define DP_DSC_MAX_SUPPORTED_BPC 10 /* DP DSC throughput values used for slice count calculations KPixels/s */ #define DP_DSC_PEAK_PIXEL_RATE 2720000 @@ -1915,11 +1913,17 @@ static int intel_dp_dsc_compute_config(struct intel_dp *intel_dp, if (!intel_dp_supports_dsc(intel_dp, pipe_config)) return -EINVAL; - dsc_max_bpc = min_t(u8, DP_DSC_MAX_SUPPORTED_BPC, - conn_state->max_requested_bpc); + /* Max DSC Input BPC for ICL is 10 and for TGL+ is 12 */ + if (INTEL_GEN(dev_priv) >= 12) + dsc_max_bpc = min_t(u8, 12, conn_state->max_requested_bpc); + else + dsc_max_bpc = min_t(u8, 10, + conn_state->max_requested_bpc); pipe_bpp = intel_dp_dsc_compute_bpp(intel_dp, dsc_max_bpc); - if (pipe_bpp < DP_DSC_MIN_SUPPORTED_BPC * 3) { + + /* Min Input BPC for ICL+ is 8 */ + if (pipe_bpp < 8 * 3) { DRM_DEBUG_KMS("No DSC support for less than 8bpc\n"); return -EINVAL; } -- cgit v1.2.3 From d4c61c4a16decd8ace8660f22c81609a539fccba Mon Sep 17 00:00:00 2001 From: Manasi Navare Date: Wed, 21 Aug 2019 14:59:50 -0700 Subject: drm/i915/dp: Fix DSC enable code to use cpu_transcoder instead of encoder->type This patch fixes the intel_configure_pps_for_dsc_encoder() function to use cpu_transcoder instead of encoder->type to select the correct DSC registers that was wrongly used in the original patch for one DSC register isntance. Fixes: 7182414e2530 ("drm/i915/dp: Configure i915 Picture parameter Set registers during DSC enabling") Cc: Ville Syrjala Cc: Maarten Lankhorst Cc: Jani Nikula Cc: Ville Syrjala Cc: # v5.0+ Signed-off-by: Manasi Navare Reviewed-by: Maarten Lankhorst Link: https://patchwork.freedesktop.org/patch/msgid/20190821215950.24223-1-manasi.d.navare@intel.com --- drivers/gpu/drm/i915/display/intel_vdsc.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/gpu/drm/i915/display/intel_vdsc.c b/drivers/gpu/drm/i915/display/intel_vdsc.c index 598ddb60f9fb..d4fb7f16f9f6 100644 --- a/drivers/gpu/drm/i915/display/intel_vdsc.c +++ b/drivers/gpu/drm/i915/display/intel_vdsc.c @@ -547,7 +547,7 @@ static void intel_configure_pps_for_dsc_encoder(struct intel_encoder *encoder, pps_val |= DSC_PIC_HEIGHT(vdsc_cfg->pic_height) | DSC_PIC_WIDTH(vdsc_cfg->pic_width / num_vdsc_instances); DRM_INFO("PPS2 = 0x%08x\n", pps_val); - if (encoder->type == INTEL_OUTPUT_EDP) { + if (cpu_transcoder == TRANSCODER_EDP) { I915_WRITE(DSCA_PICTURE_PARAMETER_SET_2, pps_val); /* * If 2 VDSC instances are needed, configure PPS for second -- cgit v1.2.3 From b3c0692f36a44bd02ee8ccc5bed972a36fbf2e99 Mon Sep 17 00:00:00 2001 From: Michel Thierry Date: Fri, 23 Aug 2019 01:20:33 -0700 Subject: drm/i915/tgl: Move GTCR register to cope with GAM MMIO address remap GAM registers located in the 0x4xxx range have been relocated to 0xCxxx; this is to make space for global MOCS registers. v2: Rename register and bitfield to its new name (suggested by Mika) HSD: 399379 Cc: Daniele Ceraolo Spurio Signed-off-by: Michel Thierry Reviewed-by: Lucas De Marchi Reviewed-by: Mika Kuoppala Signed-off-by: Lucas De Marchi Link: https://patchwork.freedesktop.org/patch/msgid/20190823082055.5992-2-lucas.demarchi@intel.com --- drivers/gpu/drm/i915/gt/uc/intel_guc_reg.h | 3 +++ drivers/gpu/drm/i915/i915_gem_gtt.c | 8 +++++++- 2 files changed, 10 insertions(+), 1 deletion(-) diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_reg.h b/drivers/gpu/drm/i915/gt/uc/intel_guc_reg.h index edf194d23c6b..1949346e714e 100644 --- a/drivers/gpu/drm/i915/gt/uc/intel_guc_reg.h +++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_reg.h @@ -83,6 +83,9 @@ #define GEN8_GTCR _MMIO(0x4274) #define GEN8_GTCR_INVALIDATE (1<<0) +#define GEN12_GUC_TLB_INV_CR _MMIO(0xcee8) +#define GEN12_GUC_TLB_INV_CR_INVALIDATE (1 << 0) + #define GUC_ARAT_C6DIS _MMIO(0xA178) #define GUC_SHIM_CONTROL _MMIO(0xc064) diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c index 0b81e0b64393..2a425db1cfd8 100644 --- a/drivers/gpu/drm/i915/i915_gem_gtt.c +++ b/drivers/gpu/drm/i915/i915_gem_gtt.c @@ -132,9 +132,15 @@ static void gen6_ggtt_invalidate(struct i915_ggtt *ggtt) static void guc_ggtt_invalidate(struct i915_ggtt *ggtt) { struct intel_uncore *uncore = ggtt->vm.gt->uncore; + struct drm_i915_private *i915 = ggtt->vm.i915; gen6_ggtt_invalidate(ggtt); - intel_uncore_write_fw(uncore, GEN8_GTCR, GEN8_GTCR_INVALIDATE); + + if (INTEL_GEN(i915) >= 12) + intel_uncore_write_fw(uncore, GEN12_GUC_TLB_INV_CR, + GEN12_GUC_TLB_INV_CR_INVALIDATE); + else + intel_uncore_write_fw(uncore, GEN8_GTCR, GEN8_GTCR_INVALIDATE); } static void gmch_ggtt_invalidate(struct i915_ggtt *ggtt) -- cgit v1.2.3 From 5d86923060fce88a4dda8d8c9c5d5eb32f37c37b Mon Sep 17 00:00:00 2001 From: Michel Thierry Date: Fri, 23 Aug 2019 01:20:34 -0700 Subject: drm/i915/tgl: Enable VD HCP/MFX sub-pipe power gating HCP/MFX power gating is disabled by default, turn it on for the vd units available. User space will also issue a MI_FORCE_WAKEUP properly to wake up proper subwell. During driver load, init_clock_gating happens after device_info_init_mmio read the vdbox disable fuse register, so only present vd units will have these enabled. BSpec: 14214 HSDES: 1209977827 Signed-off-by: Michel Thierry Reviewed-by: Lucas De Marchi Signed-off-by: Lucas De Marchi Reviewed-by: Tony Ye Link: https://patchwork.freedesktop.org/patch/msgid/20190823082055.5992-3-lucas.demarchi@intel.com --- drivers/gpu/drm/i915/i915_reg.h | 4 ++++ drivers/gpu/drm/i915/intel_pm.c | 18 +++++++++++++++++- 2 files changed, 21 insertions(+), 1 deletion(-) diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h index a092b34c269d..02e1ef10c47e 100644 --- a/drivers/gpu/drm/i915/i915_reg.h +++ b/drivers/gpu/drm/i915/i915_reg.h @@ -8615,6 +8615,10 @@ enum { #define GEN9_PWRGT_MEDIA_STATUS_MASK (1 << 0) #define GEN9_PWRGT_RENDER_STATUS_MASK (1 << 1) +#define POWERGATE_ENABLE _MMIO(0xa210) +#define VDN_HCP_POWERGATE_ENABLE(n) BIT(((n) * 2) + 3) +#define VDN_MFX_POWERGATE_ENABLE(n) BIT(((n) * 2) + 4) + #define GTFIFODBG _MMIO(0x120000) #define GT_FIFO_SBDEDICATE_FREE_ENTRY_CHV (0x1f << 20) #define GT_FIFO_FREE_ENTRIES_CHV (0x7f << 13) diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c index 75ee027abb80..d3ea193cd093 100644 --- a/drivers/gpu/drm/i915/intel_pm.c +++ b/drivers/gpu/drm/i915/intel_pm.c @@ -9078,6 +9078,22 @@ static void icl_init_clock_gating(struct drm_i915_private *dev_priv) _MASKED_BIT_ENABLE(GEN11_ENABLE_32_PLANE_MODE)); } +static void tgl_init_clock_gating(struct drm_i915_private *dev_priv) +{ + u32 vd_pg_enable = 0; + unsigned int i; + + /* This is not a WA. Enable VD HCP & MFX_ENC powergate */ + for (i = 0; i < I915_MAX_VCS; i++) { + if (HAS_ENGINE(dev_priv, _VCS(i))) + vd_pg_enable |= VDN_HCP_POWERGATE_ENABLE(i) | + VDN_MFX_POWERGATE_ENABLE(i); + } + + I915_WRITE(POWERGATE_ENABLE, + I915_READ(POWERGATE_ENABLE) | vd_pg_enable); +} + static void cnp_init_clock_gating(struct drm_i915_private *dev_priv) { if (!HAS_PCH_CNP(dev_priv)) @@ -9598,7 +9614,7 @@ static void nop_init_clock_gating(struct drm_i915_private *dev_priv) void intel_init_clock_gating_hooks(struct drm_i915_private *dev_priv) { if (IS_GEN(dev_priv, 12)) - dev_priv->display.init_clock_gating = nop_init_clock_gating; + dev_priv->display.init_clock_gating = tgl_init_clock_gating; else if (IS_GEN(dev_priv, 11)) dev_priv->display.init_clock_gating = icl_init_clock_gating; else if (IS_CANNONLAKE(dev_priv)) -- cgit v1.2.3 From 4087f873df1fbf9d9590953119ea78dde6a759dd Mon Sep 17 00:00:00 2001 From: Stuart Summers Date: Fri, 23 Aug 2019 09:02:57 -0700 Subject: drm/i915: Use variable for debugfs device status Use a local variable to find SSEU runtime information in various debugfs functions. v2: Remove extra line breaks per feedback from Chris Signed-off-by: Stuart Summers Reviewed-by: Chris Wilson Signed-off-by: Chris Wilson Link: https://patchwork.freedesktop.org/patch/msgid/20190823160307.180813-2-stuart.summers@intel.com --- drivers/gpu/drm/i915/i915_debugfs.c | 26 +++++++++++--------------- 1 file changed, 11 insertions(+), 15 deletions(-) diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c index e103fcba6435..806db87affb2 100644 --- a/drivers/gpu/drm/i915/i915_debugfs.c +++ b/drivers/gpu/drm/i915/i915_debugfs.c @@ -3849,8 +3849,7 @@ static void gen9_sseu_device_status(struct drm_i915_private *dev_priv, sseu->slice_mask |= BIT(s); if (IS_GEN9_BC(dev_priv)) - sseu->subslice_mask[s] = - RUNTIME_INFO(dev_priv)->sseu.subslice_mask[s]; + sseu->subslice_mask[s] = info->sseu.subslice_mask[s]; for (ss = 0; ss < info->sseu.max_subslices; ss++) { unsigned int eu_cnt; @@ -3877,25 +3876,22 @@ static void gen9_sseu_device_status(struct drm_i915_private *dev_priv, static void broadwell_sseu_device_status(struct drm_i915_private *dev_priv, struct sseu_dev_info *sseu) { + const struct intel_runtime_info *info = RUNTIME_INFO(dev_priv); u32 slice_info = I915_READ(GEN8_GT_SLICE_INFO); int s; sseu->slice_mask = slice_info & GEN8_LSLICESTAT_MASK; if (sseu->slice_mask) { - sseu->eu_per_subslice = - RUNTIME_INFO(dev_priv)->sseu.eu_per_subslice; - for (s = 0; s < fls(sseu->slice_mask); s++) { - sseu->subslice_mask[s] = - RUNTIME_INFO(dev_priv)->sseu.subslice_mask[s]; - } + sseu->eu_per_subslice = info->sseu.eu_per_subslice; + for (s = 0; s < fls(sseu->slice_mask); s++) + sseu->subslice_mask[s] = info->sseu.subslice_mask[s]; sseu->eu_total = sseu->eu_per_subslice * intel_sseu_subslice_total(sseu); /* subtract fused off EU(s) from enabled slice(s) */ for (s = 0; s < fls(sseu->slice_mask); s++) { - u8 subslice_7eu = - RUNTIME_INFO(dev_priv)->sseu.subslice_7eu[s]; + u8 subslice_7eu = info->sseu.subslice_7eu[s]; sseu->eu_total -= hweight8(subslice_7eu); } @@ -3942,6 +3938,7 @@ static void i915_print_sseu_info(struct seq_file *m, bool is_available_info, static int i915_sseu_status(struct seq_file *m, void *unused) { struct drm_i915_private *dev_priv = node_to_i915(m->private); + const struct intel_runtime_info *info = RUNTIME_INFO(dev_priv); struct sseu_dev_info sseu; intel_wakeref_t wakeref; @@ -3949,14 +3946,13 @@ static int i915_sseu_status(struct seq_file *m, void *unused) return -ENODEV; seq_puts(m, "SSEU Device Info\n"); - i915_print_sseu_info(m, true, &RUNTIME_INFO(dev_priv)->sseu); + i915_print_sseu_info(m, true, &info->sseu); seq_puts(m, "SSEU Device Status\n"); memset(&sseu, 0, sizeof(sseu)); - sseu.max_slices = RUNTIME_INFO(dev_priv)->sseu.max_slices; - sseu.max_subslices = RUNTIME_INFO(dev_priv)->sseu.max_subslices; - sseu.max_eus_per_subslice = - RUNTIME_INFO(dev_priv)->sseu.max_eus_per_subslice; + sseu.max_slices = info->sseu.max_slices; + sseu.max_subslices = info->sseu.max_subslices; + sseu.max_eus_per_subslice = info->sseu.max_eus_per_subslice; with_intel_runtime_pm(&dev_priv->runtime_pm, wakeref) { if (IS_CHERRYVIEW(dev_priv)) -- cgit v1.2.3 From 8b355db99cfbdaea848e05c288d6d80c391f0b13 Mon Sep 17 00:00:00 2001 From: Stuart Summers Date: Fri, 23 Aug 2019 09:02:58 -0700 Subject: drm/i915: Add function to set SSEU info per platform Add a new function to allow each platform to set maximum slice, subslice, and EU information to reduce code duplication. Signed-off-by: Stuart Summers Reviewed-by: Mika Kuoppala Signed-off-by: Chris Wilson Link: https://patchwork.freedesktop.org/patch/msgid/20190823160307.180813-3-stuart.summers@intel.com --- drivers/gpu/drm/i915/gt/intel_sseu.c | 8 +++++++ drivers/gpu/drm/i915/gt/intel_sseu.h | 3 +++ drivers/gpu/drm/i915/i915_debugfs.c | 6 ++--- drivers/gpu/drm/i915/intel_device_info.c | 39 ++++++++++++-------------------- 4 files changed, 28 insertions(+), 28 deletions(-) diff --git a/drivers/gpu/drm/i915/gt/intel_sseu.c b/drivers/gpu/drm/i915/gt/intel_sseu.c index 6bf2d87da109..6727079eb9b6 100644 --- a/drivers/gpu/drm/i915/gt/intel_sseu.c +++ b/drivers/gpu/drm/i915/gt/intel_sseu.c @@ -8,6 +8,14 @@ #include "intel_lrc_reg.h" #include "intel_sseu.h" +void intel_sseu_set_info(struct sseu_dev_info *sseu, u8 max_slices, + u8 max_subslices, u8 max_eus_per_subslice) +{ + sseu->max_slices = max_slices; + sseu->max_subslices = max_subslices; + sseu->max_eus_per_subslice = max_eus_per_subslice; +} + unsigned int intel_sseu_subslice_total(const struct sseu_dev_info *sseu) { diff --git a/drivers/gpu/drm/i915/gt/intel_sseu.h b/drivers/gpu/drm/i915/gt/intel_sseu.h index b50d0401a4e2..64e47dad07be 100644 --- a/drivers/gpu/drm/i915/gt/intel_sseu.h +++ b/drivers/gpu/drm/i915/gt/intel_sseu.h @@ -63,6 +63,9 @@ intel_sseu_from_device_info(const struct sseu_dev_info *sseu) return value; } +void intel_sseu_set_info(struct sseu_dev_info *sseu, u8 max_slices, + u8 max_subslices, u8 max_eus_per_subslice); + unsigned int intel_sseu_subslice_total(const struct sseu_dev_info *sseu); diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c index 806db87affb2..28713c9c98ea 100644 --- a/drivers/gpu/drm/i915/i915_debugfs.c +++ b/drivers/gpu/drm/i915/i915_debugfs.c @@ -3950,9 +3950,9 @@ static int i915_sseu_status(struct seq_file *m, void *unused) seq_puts(m, "SSEU Device Status\n"); memset(&sseu, 0, sizeof(sseu)); - sseu.max_slices = info->sseu.max_slices; - sseu.max_subslices = info->sseu.max_subslices; - sseu.max_eus_per_subslice = info->sseu.max_eus_per_subslice; + intel_sseu_set_info(&sseu, info->sseu.max_slices, + info->sseu.max_subslices, + info->sseu.max_eus_per_subslice); with_intel_runtime_pm(&dev_priv->runtime_pm, wakeref) { if (IS_CHERRYVIEW(dev_priv)) diff --git a/drivers/gpu/drm/i915/intel_device_info.c b/drivers/gpu/drm/i915/intel_device_info.c index d0ed44d33484..77d7bbaa49f3 100644 --- a/drivers/gpu/drm/i915/intel_device_info.c +++ b/drivers/gpu/drm/i915/intel_device_info.c @@ -191,15 +191,10 @@ static void gen11_sseu_info_init(struct drm_i915_private *dev_priv) u8 eu_en; int s; - if (IS_ELKHARTLAKE(dev_priv)) { - sseu->max_slices = 1; - sseu->max_subslices = 4; - sseu->max_eus_per_subslice = 8; - } else { - sseu->max_slices = 1; - sseu->max_subslices = 8; - sseu->max_eus_per_subslice = 8; - } + if (IS_ELKHARTLAKE(dev_priv)) + intel_sseu_set_info(sseu, 1, 4, 8); + else + intel_sseu_set_info(sseu, 1, 8, 8); s_en = I915_READ(GEN11_GT_SLICE_ENABLE) & GEN11_GT_S_ENA_MASK; ss_en = ~I915_READ(GEN11_GT_SUBSLICE_DISABLE); @@ -236,11 +231,10 @@ static void gen10_sseu_info_init(struct drm_i915_private *dev_priv) const int eu_mask = 0xff; u32 subslice_mask, eu_en; + intel_sseu_set_info(sseu, 6, 4, 8); + sseu->slice_mask = (fuse2 & GEN10_F2_S_ENA_MASK) >> GEN10_F2_S_ENA_SHIFT; - sseu->max_slices = 6; - sseu->max_subslices = 4; - sseu->max_eus_per_subslice = 8; subslice_mask = (1 << 4) - 1; subslice_mask &= ~((fuse2 & GEN10_F2_SS_DIS_MASK) >> @@ -314,9 +308,7 @@ static void cherryview_sseu_info_init(struct drm_i915_private *dev_priv) fuse = I915_READ(CHV_FUSE_GT); sseu->slice_mask = BIT(0); - sseu->max_slices = 1; - sseu->max_subslices = 2; - sseu->max_eus_per_subslice = 8; + intel_sseu_set_info(sseu, 1, 2, 8); if (!(fuse & CHV_FGT_DISABLE_SS0)) { u8 disabled_mask = @@ -372,9 +364,8 @@ static void gen9_sseu_info_init(struct drm_i915_private *dev_priv) sseu->slice_mask = (fuse2 & GEN8_F2_S_ENA_MASK) >> GEN8_F2_S_ENA_SHIFT; /* BXT has a single slice and at most 3 subslices. */ - sseu->max_slices = IS_GEN9_LP(dev_priv) ? 1 : 3; - sseu->max_subslices = IS_GEN9_LP(dev_priv) ? 3 : 4; - sseu->max_eus_per_subslice = 8; + intel_sseu_set_info(sseu, IS_GEN9_LP(dev_priv) ? 1 : 3, + IS_GEN9_LP(dev_priv) ? 3 : 4, 8); /* * The subslice disable field is global, i.e. it applies @@ -473,9 +464,7 @@ static void broadwell_sseu_info_init(struct drm_i915_private *dev_priv) fuse2 = I915_READ(GEN8_FUSE2); sseu->slice_mask = (fuse2 & GEN8_F2_S_ENA_MASK) >> GEN8_F2_S_ENA_SHIFT; - sseu->max_slices = 3; - sseu->max_subslices = 3; - sseu->max_eus_per_subslice = 8; + intel_sseu_set_info(sseu, 3, 3, 8); /* * The subslice disable field is global, i.e. it applies @@ -577,9 +566,6 @@ static void haswell_sseu_info_init(struct drm_i915_private *dev_priv) break; } - sseu->max_slices = hweight8(sseu->slice_mask); - sseu->max_subslices = hweight8(sseu->subslice_mask[0]); - fuse1 = I915_READ(HSW_PAVP_FUSE1); switch ((fuse1 & HSW_F1_EU_DIS_MASK) >> HSW_F1_EU_DIS_SHIFT) { default: @@ -596,7 +582,10 @@ static void haswell_sseu_info_init(struct drm_i915_private *dev_priv) sseu->eu_per_subslice = 6; break; } - sseu->max_eus_per_subslice = sseu->eu_per_subslice; + + intel_sseu_set_info(sseu, hweight8(sseu->slice_mask), + hweight8(sseu->subslice_mask[0]), + sseu->eu_per_subslice); for (s = 0; s < sseu->max_slices; s++) { for (ss = 0; ss < sseu->max_subslices; ss++) { -- cgit v1.2.3 From 7a200aad1127e98cbf4da9685f652899598fe357 Mon Sep 17 00:00:00 2001 From: Stuart Summers Date: Fri, 23 Aug 2019 09:02:59 -0700 Subject: drm/i915: Add subslice stride runtime parameter Add a new parameter, ss_stride, to the runtime info structure. This is used to mirror the userspace concept of subslice stride, which is a range of subslices per slice. This patch simply adds the definition and updates usage in the QUERY_TOPOLOGY_INFO handler. v2: Add GEM_BUG_ON to make sure ss_stride is valid Signed-off-by: Stuart Summers Reviewed-by: Chris Wilson Signed-off-by: Chris Wilson Link: https://patchwork.freedesktop.org/patch/msgid/20190823160307.180813-4-stuart.summers@intel.com --- drivers/gpu/drm/i915/gt/intel_sseu.c | 3 +++ drivers/gpu/drm/i915/gt/intel_sseu.h | 3 +++ drivers/gpu/drm/i915/i915_query.c | 5 ++--- 3 files changed, 8 insertions(+), 3 deletions(-) diff --git a/drivers/gpu/drm/i915/gt/intel_sseu.c b/drivers/gpu/drm/i915/gt/intel_sseu.c index 6727079eb9b6..edf39ae132c3 100644 --- a/drivers/gpu/drm/i915/gt/intel_sseu.c +++ b/drivers/gpu/drm/i915/gt/intel_sseu.c @@ -14,6 +14,9 @@ void intel_sseu_set_info(struct sseu_dev_info *sseu, u8 max_slices, sseu->max_slices = max_slices; sseu->max_subslices = max_subslices; sseu->max_eus_per_subslice = max_eus_per_subslice; + + sseu->ss_stride = GEN_SSEU_STRIDE(sseu->max_subslices); + GEM_BUG_ON(sseu->ss_stride > GEN_MAX_SUBSLICE_STRIDE); } unsigned int diff --git a/drivers/gpu/drm/i915/gt/intel_sseu.h b/drivers/gpu/drm/i915/gt/intel_sseu.h index 64e47dad07be..8b8b562ff773 100644 --- a/drivers/gpu/drm/i915/gt/intel_sseu.h +++ b/drivers/gpu/drm/i915/gt/intel_sseu.h @@ -15,6 +15,7 @@ struct drm_i915_private; #define GEN_MAX_SLICES (6) /* CNL upper bound */ #define GEN_MAX_SUBSLICES (8) /* ICL upper bound */ #define GEN_SSEU_STRIDE(max_entries) DIV_ROUND_UP(max_entries, BITS_PER_BYTE) +#define GEN_MAX_SUBSLICE_STRIDE GEN_SSEU_STRIDE(GEN_MAX_SUBSLICES) struct sseu_dev_info { u8 slice_mask; @@ -33,6 +34,8 @@ struct sseu_dev_info { u8 max_subslices; u8 max_eus_per_subslice; + u8 ss_stride; + /* We don't have more than 8 eus per subslice at the moment and as we * store eus enabled using bits, no need to multiply by eus per * subslice. diff --git a/drivers/gpu/drm/i915/i915_query.c b/drivers/gpu/drm/i915/i915_query.c index ad9240a0817a..d8e25dcf5f0b 100644 --- a/drivers/gpu/drm/i915/i915_query.c +++ b/drivers/gpu/drm/i915/i915_query.c @@ -37,7 +37,6 @@ static int query_topology_info(struct drm_i915_private *dev_priv, const struct sseu_dev_info *sseu = &RUNTIME_INFO(dev_priv)->sseu; struct drm_i915_query_topology_info topo; u32 slice_length, subslice_length, eu_length, total_length; - u8 subslice_stride = GEN_SSEU_STRIDE(sseu->max_subslices); u8 eu_stride = GEN_SSEU_STRIDE(sseu->max_eus_per_subslice); int ret; @@ -50,7 +49,7 @@ static int query_topology_info(struct drm_i915_private *dev_priv, BUILD_BUG_ON(sizeof(u8) != sizeof(sseu->slice_mask)); slice_length = sizeof(sseu->slice_mask); - subslice_length = sseu->max_slices * subslice_stride; + subslice_length = sseu->max_slices * sseu->ss_stride; eu_length = sseu->max_slices * sseu->max_subslices * eu_stride; total_length = sizeof(topo) + slice_length + subslice_length + eu_length; @@ -69,7 +68,7 @@ static int query_topology_info(struct drm_i915_private *dev_priv, topo.max_eus_per_subslice = sseu->max_eus_per_subslice; topo.subslice_offset = slice_length; - topo.subslice_stride = subslice_stride; + topo.subslice_stride = sseu->ss_stride; topo.eu_offset = slice_length + subslice_length; topo.eu_stride = eu_stride; -- cgit v1.2.3 From 49610c377be7f32d948f0970705b4ad015923aa0 Mon Sep 17 00:00:00 2001 From: Stuart Summers Date: Fri, 23 Aug 2019 09:03:00 -0700 Subject: drm/i915: Add EU stride runtime parameter Add a new SSEU runtime parameter, eu_stride, which is used to mirror the userspace concept of a range of EUs per subslice. This patch simply adds the parameter and updates usage in the QUERY_TOPOLOGY_INFO handler. v2: Add GEM_BUG_ON to make sure eu_stride is valid Signed-off-by: Stuart Summers Reviewed-by: Chris Wilson Signed-off-by: Chris Wilson Link: https://patchwork.freedesktop.org/patch/msgid/20190823160307.180813-5-stuart.summers@intel.com --- drivers/gpu/drm/i915/gt/intel_sseu.c | 2 ++ drivers/gpu/drm/i915/gt/intel_sseu.h | 3 +++ drivers/gpu/drm/i915/i915_query.c | 5 ++--- drivers/gpu/drm/i915/intel_device_info.c | 9 ++++----- 4 files changed, 11 insertions(+), 8 deletions(-) diff --git a/drivers/gpu/drm/i915/gt/intel_sseu.c b/drivers/gpu/drm/i915/gt/intel_sseu.c index edf39ae132c3..d52686a1afdc 100644 --- a/drivers/gpu/drm/i915/gt/intel_sseu.c +++ b/drivers/gpu/drm/i915/gt/intel_sseu.c @@ -17,6 +17,8 @@ void intel_sseu_set_info(struct sseu_dev_info *sseu, u8 max_slices, sseu->ss_stride = GEN_SSEU_STRIDE(sseu->max_subslices); GEM_BUG_ON(sseu->ss_stride > GEN_MAX_SUBSLICE_STRIDE); + sseu->eu_stride = GEN_SSEU_STRIDE(sseu->max_eus_per_subslice); + GEM_BUG_ON(sseu->eu_stride > GEN_MAX_EU_STRIDE); } unsigned int diff --git a/drivers/gpu/drm/i915/gt/intel_sseu.h b/drivers/gpu/drm/i915/gt/intel_sseu.h index 8b8b562ff773..7f2355ce963d 100644 --- a/drivers/gpu/drm/i915/gt/intel_sseu.h +++ b/drivers/gpu/drm/i915/gt/intel_sseu.h @@ -16,6 +16,8 @@ struct drm_i915_private; #define GEN_MAX_SUBSLICES (8) /* ICL upper bound */ #define GEN_SSEU_STRIDE(max_entries) DIV_ROUND_UP(max_entries, BITS_PER_BYTE) #define GEN_MAX_SUBSLICE_STRIDE GEN_SSEU_STRIDE(GEN_MAX_SUBSLICES) +#define GEN_MAX_EUS (10) /* HSW upper bound */ +#define GEN_MAX_EU_STRIDE GEN_SSEU_STRIDE(GEN_MAX_EUS) struct sseu_dev_info { u8 slice_mask; @@ -35,6 +37,7 @@ struct sseu_dev_info { u8 max_eus_per_subslice; u8 ss_stride; + u8 eu_stride; /* We don't have more than 8 eus per subslice at the moment and as we * store eus enabled using bits, no need to multiply by eus per diff --git a/drivers/gpu/drm/i915/i915_query.c b/drivers/gpu/drm/i915/i915_query.c index d8e25dcf5f0b..abac5042da2b 100644 --- a/drivers/gpu/drm/i915/i915_query.c +++ b/drivers/gpu/drm/i915/i915_query.c @@ -37,7 +37,6 @@ static int query_topology_info(struct drm_i915_private *dev_priv, const struct sseu_dev_info *sseu = &RUNTIME_INFO(dev_priv)->sseu; struct drm_i915_query_topology_info topo; u32 slice_length, subslice_length, eu_length, total_length; - u8 eu_stride = GEN_SSEU_STRIDE(sseu->max_eus_per_subslice); int ret; if (query_item->flags != 0) @@ -50,7 +49,7 @@ static int query_topology_info(struct drm_i915_private *dev_priv, slice_length = sizeof(sseu->slice_mask); subslice_length = sseu->max_slices * sseu->ss_stride; - eu_length = sseu->max_slices * sseu->max_subslices * eu_stride; + eu_length = sseu->max_slices * sseu->max_subslices * sseu->eu_stride; total_length = sizeof(topo) + slice_length + subslice_length + eu_length; @@ -70,7 +69,7 @@ static int query_topology_info(struct drm_i915_private *dev_priv, topo.subslice_offset = slice_length; topo.subslice_stride = sseu->ss_stride; topo.eu_offset = slice_length + subslice_length; - topo.eu_stride = eu_stride; + topo.eu_stride = sseu->eu_stride; if (__copy_to_user(u64_to_user_ptr(query_item->data_ptr), &topo, sizeof(topo))) diff --git a/drivers/gpu/drm/i915/intel_device_info.c b/drivers/gpu/drm/i915/intel_device_info.c index 77d7bbaa49f3..b1a79ed408eb 100644 --- a/drivers/gpu/drm/i915/intel_device_info.c +++ b/drivers/gpu/drm/i915/intel_device_info.c @@ -118,10 +118,9 @@ void intel_device_info_dump_runtime(const struct intel_runtime_info *info, static int sseu_eu_idx(const struct sseu_dev_info *sseu, int slice, int subslice) { - int subslice_stride = GEN_SSEU_STRIDE(sseu->max_eus_per_subslice); - int slice_stride = sseu->max_subslices * subslice_stride; + int slice_stride = sseu->max_subslices * sseu->eu_stride; - return slice * slice_stride + subslice * subslice_stride; + return slice * slice_stride + subslice * sseu->eu_stride; } static u16 sseu_get_eus(const struct sseu_dev_info *sseu, int slice, @@ -130,7 +129,7 @@ static u16 sseu_get_eus(const struct sseu_dev_info *sseu, int slice, int i, offset = sseu_eu_idx(sseu, slice, subslice); u16 eu_mask = 0; - for (i = 0; i < GEN_SSEU_STRIDE(sseu->max_eus_per_subslice); i++) { + for (i = 0; i < sseu->eu_stride; i++) { eu_mask |= ((u16)sseu->eu_mask[offset + i]) << (i * BITS_PER_BYTE); } @@ -143,7 +142,7 @@ static void sseu_set_eus(struct sseu_dev_info *sseu, int slice, int subslice, { int i, offset = sseu_eu_idx(sseu, slice, subslice); - for (i = 0; i < GEN_SSEU_STRIDE(sseu->max_eus_per_subslice); i++) { + for (i = 0; i < sseu->eu_stride; i++) { sseu->eu_mask[offset + i] = (eu_mask >> (BITS_PER_BYTE * i)) & 0xff; } -- cgit v1.2.3 From 33ee9e8680963fbfa298fec08eed7b45db0a1ebb Mon Sep 17 00:00:00 2001 From: Stuart Summers Date: Fri, 23 Aug 2019 09:03:01 -0700 Subject: drm/i915: Use local variables for subslice_mask for device info When setting up subslice_mask, instead of operating on the slice array directly, use a local variable to start bits per slice, then use this to set the per slice array in one step. Signed-off-by: Stuart Summers Reviewed-by: Chris Wilson Signed-off-by: Chris Wilson Link: https://patchwork.freedesktop.org/patch/msgid/20190823160307.180813-6-stuart.summers@intel.com --- drivers/gpu/drm/i915/intel_device_info.c | 49 +++++++++++++++++--------------- 1 file changed, 26 insertions(+), 23 deletions(-) diff --git a/drivers/gpu/drm/i915/intel_device_info.c b/drivers/gpu/drm/i915/intel_device_info.c index b1a79ed408eb..52515efe9f4e 100644 --- a/drivers/gpu/drm/i915/intel_device_info.c +++ b/drivers/gpu/drm/i915/intel_device_info.c @@ -235,18 +235,6 @@ static void gen10_sseu_info_init(struct drm_i915_private *dev_priv) sseu->slice_mask = (fuse2 & GEN10_F2_S_ENA_MASK) >> GEN10_F2_S_ENA_SHIFT; - subslice_mask = (1 << 4) - 1; - subslice_mask &= ~((fuse2 & GEN10_F2_SS_DIS_MASK) >> - GEN10_F2_SS_DIS_SHIFT); - - /* - * Slice0 can have up to 3 subslices, but there are only 2 in - * slice1/2. - */ - sseu->subslice_mask[0] = subslice_mask; - for (s = 1; s < sseu->max_slices; s++) - sseu->subslice_mask[s] = subslice_mask & 0x3; - /* Slice0 */ eu_en = ~I915_READ(GEN8_EU_DISABLE0); for (ss = 0; ss < sseu->max_subslices; ss++) @@ -270,14 +258,24 @@ static void gen10_sseu_info_init(struct drm_i915_private *dev_priv) eu_en = ~I915_READ(GEN10_EU_DISABLE3); sseu_set_eus(sseu, 5, 1, eu_en & eu_mask); - /* Do a second pass where we mark the subslices disabled if all their - * eus are off. - */ + subslice_mask = (1 << 4) - 1; + subslice_mask &= ~((fuse2 & GEN10_F2_SS_DIS_MASK) >> + GEN10_F2_SS_DIS_SHIFT); + for (s = 0; s < sseu->max_slices; s++) { + u32 subslice_mask_with_eus = subslice_mask; + for (ss = 0; ss < sseu->max_subslices; ss++) { if (sseu_get_eus(sseu, s, ss) == 0) - sseu->subslice_mask[s] &= ~BIT(ss); + subslice_mask_with_eus &= ~BIT(ss); } + + /* + * Slice0 can have up to 3 subslices, but there are only 2 in + * slice1/2. + */ + sseu->subslice_mask[s] = s == 0 ? subslice_mask_with_eus : + subslice_mask_with_eus & 0x3; } sseu->eu_total = compute_eu_total(sseu); @@ -303,6 +301,7 @@ static void cherryview_sseu_info_init(struct drm_i915_private *dev_priv) { struct sseu_dev_info *sseu = &RUNTIME_INFO(dev_priv)->sseu; u32 fuse; + u8 subslice_mask = 0; fuse = I915_READ(CHV_FUSE_GT); @@ -316,7 +315,7 @@ static void cherryview_sseu_info_init(struct drm_i915_private *dev_priv) (((fuse & CHV_FGT_EU_DIS_SS0_R1_MASK) >> CHV_FGT_EU_DIS_SS0_R1_SHIFT) << 4); - sseu->subslice_mask[0] |= BIT(0); + subslice_mask |= BIT(0); sseu_set_eus(sseu, 0, 0, ~disabled_mask); } @@ -327,10 +326,12 @@ static void cherryview_sseu_info_init(struct drm_i915_private *dev_priv) (((fuse & CHV_FGT_EU_DIS_SS1_R1_MASK) >> CHV_FGT_EU_DIS_SS1_R1_SHIFT) << 4); - sseu->subslice_mask[0] |= BIT(1); + subslice_mask |= BIT(1); sseu_set_eus(sseu, 0, 1, ~disabled_mask); } + sseu->subslice_mask[0] = subslice_mask; + sseu->eu_total = compute_eu_total(sseu); /* @@ -540,6 +541,7 @@ static void haswell_sseu_info_init(struct drm_i915_private *dev_priv) { struct sseu_dev_info *sseu = &RUNTIME_INFO(dev_priv)->sseu; u32 fuse1; + u8 subslice_mask = 0; int s, ss; /* @@ -552,16 +554,15 @@ static void haswell_sseu_info_init(struct drm_i915_private *dev_priv) /* fall through */ case 1: sseu->slice_mask = BIT(0); - sseu->subslice_mask[0] = BIT(0); + subslice_mask = BIT(0); break; case 2: sseu->slice_mask = BIT(0); - sseu->subslice_mask[0] = BIT(0) | BIT(1); + subslice_mask = BIT(0) | BIT(1); break; case 3: sseu->slice_mask = BIT(0) | BIT(1); - sseu->subslice_mask[0] = BIT(0) | BIT(1); - sseu->subslice_mask[1] = BIT(0) | BIT(1); + subslice_mask = BIT(0) | BIT(1); break; } @@ -583,10 +584,12 @@ static void haswell_sseu_info_init(struct drm_i915_private *dev_priv) } intel_sseu_set_info(sseu, hweight8(sseu->slice_mask), - hweight8(sseu->subslice_mask[0]), + hweight8(subslice_mask), sseu->eu_per_subslice); for (s = 0; s < sseu->max_slices; s++) { + sseu->subslice_mask[s] = subslice_mask; + for (ss = 0; ss < sseu->max_subslices; ss++) { sseu_set_eus(sseu, s, ss, (1UL << sseu->eu_per_subslice) - 1); -- cgit v1.2.3 From 9e8a135ed5a414ba4333cda22b01ae77291f6b9b Mon Sep 17 00:00:00 2001 From: Stuart Summers Date: Fri, 23 Aug 2019 09:03:02 -0700 Subject: drm/i915: Add function to set subslices Add a new function to set a set of subslices for a given slice. v2: Fix typo in subslice_mask assignment Signed-off-by: Stuart Summers Reviewed-by: Chris Wilson Signed-off-by: Chris Wilson Link: https://patchwork.freedesktop.org/patch/msgid/20190823160307.180813-7-stuart.summers@intel.com --- drivers/gpu/drm/i915/gt/intel_sseu.c | 6 ++++++ drivers/gpu/drm/i915/gt/intel_sseu.h | 3 +++ drivers/gpu/drm/i915/intel_device_info.c | 18 +++++++++++------- 3 files changed, 20 insertions(+), 7 deletions(-) diff --git a/drivers/gpu/drm/i915/gt/intel_sseu.c b/drivers/gpu/drm/i915/gt/intel_sseu.c index d52686a1afdc..3a5db0dbac72 100644 --- a/drivers/gpu/drm/i915/gt/intel_sseu.c +++ b/drivers/gpu/drm/i915/gt/intel_sseu.c @@ -32,6 +32,12 @@ intel_sseu_subslice_total(const struct sseu_dev_info *sseu) return total; } +void intel_sseu_set_subslices(struct sseu_dev_info *sseu, int slice, + u8 ss_mask) +{ + sseu->subslice_mask[slice] = ss_mask; +} + unsigned int intel_sseu_subslices_per_slice(const struct sseu_dev_info *sseu, u8 slice) { diff --git a/drivers/gpu/drm/i915/gt/intel_sseu.h b/drivers/gpu/drm/i915/gt/intel_sseu.h index 7f2355ce963d..7f600f50dedb 100644 --- a/drivers/gpu/drm/i915/gt/intel_sseu.h +++ b/drivers/gpu/drm/i915/gt/intel_sseu.h @@ -78,6 +78,9 @@ intel_sseu_subslice_total(const struct sseu_dev_info *sseu); unsigned int intel_sseu_subslices_per_slice(const struct sseu_dev_info *sseu, u8 slice); +void intel_sseu_set_subslices(struct sseu_dev_info *sseu, int slice, + u8 ss_mask); + u32 intel_sseu_make_rpcs(struct drm_i915_private *i915, const struct intel_sseu *req_sseu); diff --git a/drivers/gpu/drm/i915/intel_device_info.c b/drivers/gpu/drm/i915/intel_device_info.c index 52515efe9f4e..1a45728ac712 100644 --- a/drivers/gpu/drm/i915/intel_device_info.c +++ b/drivers/gpu/drm/i915/intel_device_info.c @@ -206,7 +206,10 @@ static void gen11_sseu_info_init(struct drm_i915_private *dev_priv) int ss; sseu->slice_mask |= BIT(s); - sseu->subslice_mask[s] = (ss_en >> ss_idx) & ss_en_mask; + + intel_sseu_set_subslices(sseu, s, (ss_en >> ss_idx) & + ss_en_mask); + for (ss = 0; ss < sseu->max_subslices; ss++) { if (sseu->subslice_mask[s] & BIT(ss)) sseu_set_eus(sseu, s, ss, eu_en); @@ -274,8 +277,9 @@ static void gen10_sseu_info_init(struct drm_i915_private *dev_priv) * Slice0 can have up to 3 subslices, but there are only 2 in * slice1/2. */ - sseu->subslice_mask[s] = s == 0 ? subslice_mask_with_eus : - subslice_mask_with_eus & 0x3; + intel_sseu_set_subslices(sseu, s, s == 0 ? + subslice_mask_with_eus : + subslice_mask_with_eus & 0x3); } sseu->eu_total = compute_eu_total(sseu); @@ -330,7 +334,7 @@ static void cherryview_sseu_info_init(struct drm_i915_private *dev_priv) sseu_set_eus(sseu, 0, 1, ~disabled_mask); } - sseu->subslice_mask[0] = subslice_mask; + intel_sseu_set_subslices(sseu, 0, subslice_mask); sseu->eu_total = compute_eu_total(sseu); @@ -384,7 +388,7 @@ static void gen9_sseu_info_init(struct drm_i915_private *dev_priv) /* skip disabled slice */ continue; - sseu->subslice_mask[s] = subslice_mask; + intel_sseu_set_subslices(sseu, s, subslice_mask); eu_disable = I915_READ(GEN9_EU_DISABLE(s)); for (ss = 0; ss < sseu->max_subslices; ss++) { @@ -491,7 +495,7 @@ static void broadwell_sseu_info_init(struct drm_i915_private *dev_priv) /* skip disabled slice */ continue; - sseu->subslice_mask[s] = subslice_mask; + intel_sseu_set_subslices(sseu, s, subslice_mask); for (ss = 0; ss < sseu->max_subslices; ss++) { u8 eu_disabled_mask; @@ -588,7 +592,7 @@ static void haswell_sseu_info_init(struct drm_i915_private *dev_priv) sseu->eu_per_subslice); for (s = 0; s < sseu->max_slices; s++) { - sseu->subslice_mask[s] = subslice_mask; + intel_sseu_set_subslices(sseu, s, subslice_mask); for (ss = 0; ss < sseu->max_subslices; ss++) { sseu_set_eus(sseu, s, ss, -- cgit v1.2.3 From 6db40ec80f544e289fe836319b20fcc19a454e8d Mon Sep 17 00:00:00 2001 From: Stuart Summers Date: Fri, 23 Aug 2019 09:03:03 -0700 Subject: drm/i915: Use subslice stride to set subslices for a given slice Add a subslice stride calculation when setting subslices. This aligns more closely with the userspace expectation of the subslice mask structure. v2: Use local variable for subslice_mask on HSW and clean up a few other subslice_mask local variable changes v3: Add GEM_BUG_ON for ss_stride to prevent array overflow (Chris) Split main set function and refactors in intel_device_info.c into separate patches (Chris) v4: Reduce ss_stride size check when setting subslices per slice based on actual expected max stride (Chris) Move that GEM_BUG_ON check for the ss_stride out to the patch which adds the ss_stride v5: Use memcpy instead of looping through each stride index Signed-off-by: Stuart Summers Reviewed-by: Chris Wilson Signed-off-by: Chris Wilson Link: https://patchwork.freedesktop.org/patch/msgid/20190823160307.180813-8-stuart.summers@intel.com --- drivers/gpu/drm/i915/gt/intel_sseu.c | 6 ++++-- drivers/gpu/drm/i915/gt/intel_sseu.h | 2 +- 2 files changed, 5 insertions(+), 3 deletions(-) diff --git a/drivers/gpu/drm/i915/gt/intel_sseu.c b/drivers/gpu/drm/i915/gt/intel_sseu.c index 3a5db0dbac72..1505042d7b5d 100644 --- a/drivers/gpu/drm/i915/gt/intel_sseu.c +++ b/drivers/gpu/drm/i915/gt/intel_sseu.c @@ -33,9 +33,11 @@ intel_sseu_subslice_total(const struct sseu_dev_info *sseu) } void intel_sseu_set_subslices(struct sseu_dev_info *sseu, int slice, - u8 ss_mask) + u32 ss_mask) { - sseu->subslice_mask[slice] = ss_mask; + int offset = slice * sseu->ss_stride; + + memcpy(&sseu->subslice_mask[offset], &ss_mask, sseu->ss_stride); } unsigned int diff --git a/drivers/gpu/drm/i915/gt/intel_sseu.h b/drivers/gpu/drm/i915/gt/intel_sseu.h index 7f600f50dedb..73a9064291a2 100644 --- a/drivers/gpu/drm/i915/gt/intel_sseu.h +++ b/drivers/gpu/drm/i915/gt/intel_sseu.h @@ -79,7 +79,7 @@ unsigned int intel_sseu_subslices_per_slice(const struct sseu_dev_info *sseu, u8 slice); void intel_sseu_set_subslices(struct sseu_dev_info *sseu, int slice, - u8 ss_mask); + u32 ss_mask); u32 intel_sseu_make_rpcs(struct drm_i915_private *i915, const struct intel_sseu *req_sseu); -- cgit v1.2.3 From e1210bbfb1f8dc2865e8ebd6884afa895333b6e4 Mon Sep 17 00:00:00 2001 From: Stuart Summers Date: Fri, 23 Aug 2019 09:03:04 -0700 Subject: drm/i915: Add function to determine if a slice has a subslice Add a new function to determine whether a particular slice has a given subslice. Signed-off-by: Stuart Summers Reviewed-by: Chris Wilson Signed-off-by: Chris Wilson Link: https://patchwork.freedesktop.org/patch/msgid/20190823160307.180813-9-stuart.summers@intel.com --- drivers/gpu/drm/i915/gt/intel_sseu.h | 16 ++++++++++++++++ drivers/gpu/drm/i915/intel_device_info.c | 9 ++++----- 2 files changed, 20 insertions(+), 5 deletions(-) diff --git a/drivers/gpu/drm/i915/gt/intel_sseu.h b/drivers/gpu/drm/i915/gt/intel_sseu.h index 73a9064291a2..7703d75f2da3 100644 --- a/drivers/gpu/drm/i915/gt/intel_sseu.h +++ b/drivers/gpu/drm/i915/gt/intel_sseu.h @@ -10,6 +10,8 @@ #include #include +#include "i915_gem.h" + struct drm_i915_private; #define GEN_MAX_SLICES (6) /* CNL upper bound */ @@ -69,6 +71,20 @@ intel_sseu_from_device_info(const struct sseu_dev_info *sseu) return value; } +static inline bool +intel_sseu_has_subslice(const struct sseu_dev_info *sseu, int slice, + int subslice) +{ + u8 mask; + int ss_idx = subslice / BITS_PER_BYTE; + + GEM_BUG_ON(ss_idx >= sseu->ss_stride); + + mask = sseu->subslice_mask[slice * sseu->ss_stride + ss_idx]; + + return mask & BIT(subslice % BITS_PER_BYTE); +} + void intel_sseu_set_info(struct sseu_dev_info *sseu, u8 max_slices, u8 max_subslices, u8 max_eus_per_subslice); diff --git a/drivers/gpu/drm/i915/intel_device_info.c b/drivers/gpu/drm/i915/intel_device_info.c index 1a45728ac712..c20f74ee5f22 100644 --- a/drivers/gpu/drm/i915/intel_device_info.c +++ b/drivers/gpu/drm/i915/intel_device_info.c @@ -210,10 +210,9 @@ static void gen11_sseu_info_init(struct drm_i915_private *dev_priv) intel_sseu_set_subslices(sseu, s, (ss_en >> ss_idx) & ss_en_mask); - for (ss = 0; ss < sseu->max_subslices; ss++) { - if (sseu->subslice_mask[s] & BIT(ss)) + for (ss = 0; ss < sseu->max_subslices; ss++) + if (intel_sseu_has_subslice(sseu, s, ss)) sseu_set_eus(sseu, s, ss, eu_en); - } } } sseu->eu_per_subslice = hweight8(eu_en); @@ -395,7 +394,7 @@ static void gen9_sseu_info_init(struct drm_i915_private *dev_priv) int eu_per_ss; u8 eu_disabled_mask; - if (!(sseu->subslice_mask[s] & BIT(ss))) + if (!intel_sseu_has_subslice(sseu, s, ss)) /* skip disabled subslice */ continue; @@ -501,7 +500,7 @@ static void broadwell_sseu_info_init(struct drm_i915_private *dev_priv) u8 eu_disabled_mask; u32 n_disabled; - if (!(sseu->subslice_mask[s] & BIT(ss))) + if (!intel_sseu_has_subslice(sseu, s, ss)) /* skip disabled subslice */ continue; -- cgit v1.2.3 From eaef5b3c411337db73a205d0e009c72f7a39f3a3 Mon Sep 17 00:00:00 2001 From: Stuart Summers Date: Fri, 23 Aug 2019 09:03:05 -0700 Subject: drm/i915: Refactor instdone loops on new subslice functions Refactor instdone loops to use the new intel_sseu_has_subslice function. Signed-off-by: Stuart Summers Reviewed-by: Chris Wilson Signed-off-by: Chris Wilson Link: https://patchwork.freedesktop.org/patch/msgid/20190823160307.180813-10-stuart.summers@intel.com --- drivers/gpu/drm/i915/gt/intel_engine_cs.c | 3 ++- drivers/gpu/drm/i915/gt/intel_engine_types.h | 30 +++++++++++++--------------- drivers/gpu/drm/i915/gt/intel_hangcheck.c | 3 ++- drivers/gpu/drm/i915/i915_debugfs.c | 5 +++-- drivers/gpu/drm/i915/i915_gpu_error.c | 5 +++-- 5 files changed, 24 insertions(+), 22 deletions(-) diff --git a/drivers/gpu/drm/i915/gt/intel_engine_cs.c b/drivers/gpu/drm/i915/gt/intel_engine_cs.c index 82630db0394b..17006d50b63f 100644 --- a/drivers/gpu/drm/i915/gt/intel_engine_cs.c +++ b/drivers/gpu/drm/i915/gt/intel_engine_cs.c @@ -948,6 +948,7 @@ void intel_engine_get_instdone(struct intel_engine_cs *engine, struct intel_instdone *instdone) { struct drm_i915_private *i915 = engine->i915; + const struct sseu_dev_info *sseu = &RUNTIME_INFO(i915)->sseu; struct intel_uncore *uncore = engine->uncore; u32 mmio_base = engine->mmio_base; int slice; @@ -965,7 +966,7 @@ void intel_engine_get_instdone(struct intel_engine_cs *engine, instdone->slice_common = intel_uncore_read(uncore, GEN7_SC_INSTDONE); - for_each_instdone_slice_subslice(i915, slice, subslice) { + for_each_instdone_slice_subslice(i915, sseu, slice, subslice) { instdone->sampler[slice][subslice] = read_subslice_reg(engine, slice, subslice, GEN7_SAMPLER_INSTDONE); diff --git a/drivers/gpu/drm/i915/gt/intel_engine_types.h b/drivers/gpu/drm/i915/gt/intel_engine_types.h index a82cea95c2f2..15e02cb58a67 100644 --- a/drivers/gpu/drm/i915/gt/intel_engine_types.h +++ b/drivers/gpu/drm/i915/gt/intel_engine_types.h @@ -576,20 +576,18 @@ intel_engine_is_virtual(const struct intel_engine_cs *engine) return engine->flags & I915_ENGINE_IS_VIRTUAL; } -#define instdone_slice_mask(dev_priv__) \ - (IS_GEN(dev_priv__, 7) ? \ - 1 : RUNTIME_INFO(dev_priv__)->sseu.slice_mask) - -#define instdone_subslice_mask(dev_priv__) \ - (IS_GEN(dev_priv__, 7) ? \ - 1 : RUNTIME_INFO(dev_priv__)->sseu.subslice_mask[0]) - -#define for_each_instdone_slice_subslice(dev_priv__, slice__, subslice__) \ - for ((slice__) = 0, (subslice__) = 0; \ - (slice__) < I915_MAX_SLICES; \ - (subslice__) = ((subslice__) + 1) < I915_MAX_SUBSLICES ? (subslice__) + 1 : 0, \ - (slice__) += ((subslice__) == 0)) \ - for_each_if((BIT(slice__) & instdone_slice_mask(dev_priv__)) && \ - (BIT(subslice__) & instdone_subslice_mask(dev_priv__))) - +#define instdone_has_slice(dev_priv___, sseu___, slice___) \ + ((IS_GEN(dev_priv___, 7) ? 1 : ((sseu___)->slice_mask)) & BIT(slice___)) + +#define instdone_has_subslice(dev_priv__, sseu__, slice__, subslice__) \ + (IS_GEN(dev_priv__, 7) ? (1 & BIT(subslice__)) : \ + intel_sseu_has_subslice(sseu__, 0, subslice__)) + +#define for_each_instdone_slice_subslice(dev_priv_, sseu_, slice_, subslice_) \ + for ((slice_) = 0, (subslice_) = 0; (slice_) < I915_MAX_SLICES; \ + (subslice_) = ((subslice_) + 1) % I915_MAX_SUBSLICES, \ + (slice_) += ((subslice_) == 0)) \ + for_each_if((instdone_has_slice(dev_priv_, sseu_, slice_)) && \ + (instdone_has_subslice(dev_priv_, sseu_, slice_, \ + subslice_))) #endif /* __INTEL_ENGINE_TYPES_H__ */ diff --git a/drivers/gpu/drm/i915/gt/intel_hangcheck.c b/drivers/gpu/drm/i915/gt/intel_hangcheck.c index 05d042cdefe2..40f62f780be5 100644 --- a/drivers/gpu/drm/i915/gt/intel_hangcheck.c +++ b/drivers/gpu/drm/i915/gt/intel_hangcheck.c @@ -53,6 +53,7 @@ static bool instdone_unchanged(u32 current_instdone, u32 *old_instdone) static bool subunits_stuck(struct intel_engine_cs *engine) { struct drm_i915_private *dev_priv = engine->i915; + const struct sseu_dev_info *sseu = &RUNTIME_INFO(dev_priv)->sseu; struct intel_instdone instdone; struct intel_instdone *accu_instdone = &engine->hangcheck.instdone; bool stuck; @@ -71,7 +72,7 @@ static bool subunits_stuck(struct intel_engine_cs *engine) stuck &= instdone_unchanged(instdone.slice_common, &accu_instdone->slice_common); - for_each_instdone_slice_subslice(dev_priv, slice, subslice) { + for_each_instdone_slice_subslice(dev_priv, sseu, slice, subslice) { stuck &= instdone_unchanged(instdone.sampler[slice][subslice], &accu_instdone->sampler[slice][subslice]); stuck &= instdone_unchanged(instdone.row[slice][subslice], diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c index 28713c9c98ea..44797f8b7c50 100644 --- a/drivers/gpu/drm/i915/i915_debugfs.c +++ b/drivers/gpu/drm/i915/i915_debugfs.c @@ -996,6 +996,7 @@ static void i915_instdone_info(struct drm_i915_private *dev_priv, struct seq_file *m, struct intel_instdone *instdone) { + const struct sseu_dev_info *sseu = &RUNTIME_INFO(dev_priv)->sseu; int slice; int subslice; @@ -1011,11 +1012,11 @@ static void i915_instdone_info(struct drm_i915_private *dev_priv, if (INTEL_GEN(dev_priv) <= 6) return; - for_each_instdone_slice_subslice(dev_priv, slice, subslice) + for_each_instdone_slice_subslice(dev_priv, sseu, slice, subslice) seq_printf(m, "\t\tSAMPLER_INSTDONE[%d][%d]: 0x%08x\n", slice, subslice, instdone->sampler[slice][subslice]); - for_each_instdone_slice_subslice(dev_priv, slice, subslice) + for_each_instdone_slice_subslice(dev_priv, sseu, slice, subslice) seq_printf(m, "\t\tROW_INSTDONE[%d][%d]: 0x%08x\n", slice, subslice, instdone->row[slice][subslice]); } diff --git a/drivers/gpu/drm/i915/i915_gpu_error.c b/drivers/gpu/drm/i915/i915_gpu_error.c index e284bd76fa86..4aff342b8944 100644 --- a/drivers/gpu/drm/i915/i915_gpu_error.c +++ b/drivers/gpu/drm/i915/i915_gpu_error.c @@ -421,6 +421,7 @@ static void err_compression_marker(struct drm_i915_error_state_buf *m) static void error_print_instdone(struct drm_i915_error_state_buf *m, const struct drm_i915_error_engine *ee) { + const struct sseu_dev_info *sseu = &RUNTIME_INFO(m->i915)->sseu; int slice; int subslice; @@ -436,12 +437,12 @@ static void error_print_instdone(struct drm_i915_error_state_buf *m, if (INTEL_GEN(m->i915) <= 6) return; - for_each_instdone_slice_subslice(m->i915, slice, subslice) + for_each_instdone_slice_subslice(m->i915, sseu, slice, subslice) err_printf(m, " SAMPLER_INSTDONE[%d][%d]: 0x%08x\n", slice, subslice, ee->instdone.sampler[slice][subslice]); - for_each_instdone_slice_subslice(m->i915, slice, subslice) + for_each_instdone_slice_subslice(m->i915, sseu, slice, subslice) err_printf(m, " ROW_INSTDONE[%d][%d]: 0x%08x\n", slice, subslice, ee->instdone.row[slice][subslice]); -- cgit v1.2.3 From 668df17f594d6f57e822ceee680ace3233e97f02 Mon Sep 17 00:00:00 2001 From: Stuart Summers Date: Fri, 23 Aug 2019 09:03:06 -0700 Subject: drm/i915: Add new function to copy subslices for a slice Add a new function to copy subslices for a specified slice between intel_sseu structures for the purpose of determining power-gate status. Note that currently ss_stride has a max of 1. Signed-off-by: Stuart Summers Reviewed-by: Chris Wilson Signed-off-by: Chris Wilson Link: https://patchwork.freedesktop.org/patch/msgid/20190823160307.180813-11-stuart.summers@intel.com --- drivers/gpu/drm/i915/i915_debugfs.c | 17 ++++++++++++++--- 1 file changed, 14 insertions(+), 3 deletions(-) diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c index 44797f8b7c50..6e8b40299939 100644 --- a/drivers/gpu/drm/i915/i915_debugfs.c +++ b/drivers/gpu/drm/i915/i915_debugfs.c @@ -3726,6 +3726,15 @@ i915_cache_sharing_set(void *data, u64 val) return 0; } +static void +intel_sseu_copy_subslices(const struct sseu_dev_info *sseu, int slice, + u8 *to_mask) +{ + int offset = slice * sseu->ss_stride; + + memcpy(&to_mask[offset], &sseu->subslice_mask[offset], sseu->ss_stride); +} + DEFINE_SIMPLE_ATTRIBUTE(i915_cache_sharing_fops, i915_cache_sharing_get, i915_cache_sharing_set, "%llu\n"); @@ -3799,7 +3808,7 @@ static void gen10_sseu_device_status(struct drm_i915_private *dev_priv, continue; sseu->slice_mask |= BIT(s); - sseu->subslice_mask[s] = info->sseu.subslice_mask[s]; + intel_sseu_copy_subslices(&info->sseu, s, sseu->subslice_mask); for (ss = 0; ss < info->sseu.max_subslices; ss++) { unsigned int eu_cnt; @@ -3850,7 +3859,8 @@ static void gen9_sseu_device_status(struct drm_i915_private *dev_priv, sseu->slice_mask |= BIT(s); if (IS_GEN9_BC(dev_priv)) - sseu->subslice_mask[s] = info->sseu.subslice_mask[s]; + intel_sseu_copy_subslices(&info->sseu, s, + sseu->subslice_mask); for (ss = 0; ss < info->sseu.max_subslices; ss++) { unsigned int eu_cnt; @@ -3886,7 +3896,8 @@ static void broadwell_sseu_device_status(struct drm_i915_private *dev_priv, if (sseu->slice_mask) { sseu->eu_per_subslice = info->sseu.eu_per_subslice; for (s = 0; s < fls(sseu->slice_mask); s++) - sseu->subslice_mask[s] = info->sseu.subslice_mask[s]; + intel_sseu_copy_subslices(&info->sseu, s, + sseu->subslice_mask); sseu->eu_total = sseu->eu_per_subslice * intel_sseu_subslice_total(sseu); -- cgit v1.2.3 From 100f5f7fbc3e23f58511a11a3751dfacf1ccb5a7 Mon Sep 17 00:00:00 2001 From: Stuart Summers Date: Fri, 23 Aug 2019 09:03:07 -0700 Subject: drm/i915: Expand subslice mask Currently, the subslice_mask runtime parameter is stored as an array of subslices per slice. Expand the subslice mask array to better match what is presented to userspace through the I915_QUERY_TOPOLOGY_INFO ioctl. The index into this array is then calculated: slice * subslice stride + subslice index / 8 v2: Fix 32-bit build v3: Use new helper function in SSEU workaround warning message v4: Use GEM_BUG_ON to force developers to use valid SSEU configurations per platform (Chris) Signed-off-by: Stuart Summers Reviewed-by: Chris Wilson Signed-off-by: Chris Wilson Link: https://patchwork.freedesktop.org/patch/msgid/20190823160307.180813-12-stuart.summers@intel.com --- drivers/gpu/drm/i915/gt/intel_sseu.c | 16 +++++++++++++++- drivers/gpu/drm/i915/gt/intel_sseu.h | 4 +++- drivers/gpu/drm/i915/gt/intel_workarounds.c | 5 ++--- drivers/gpu/drm/i915/i915_debugfs.c | 5 ++++- drivers/gpu/drm/i915/intel_device_info.c | 8 ++++---- 5 files changed, 28 insertions(+), 10 deletions(-) diff --git a/drivers/gpu/drm/i915/gt/intel_sseu.c b/drivers/gpu/drm/i915/gt/intel_sseu.c index 1505042d7b5d..74f793423231 100644 --- a/drivers/gpu/drm/i915/gt/intel_sseu.c +++ b/drivers/gpu/drm/i915/gt/intel_sseu.c @@ -32,6 +32,20 @@ intel_sseu_subslice_total(const struct sseu_dev_info *sseu) return total; } +u32 intel_sseu_get_subslices(const struct sseu_dev_info *sseu, u8 slice) +{ + int i, offset = slice * sseu->ss_stride; + u32 mask = 0; + + GEM_BUG_ON(slice >= sseu->max_slices); + + for (i = 0; i < sseu->ss_stride; i++) + mask |= (u32)sseu->subslice_mask[offset + i] << + i * BITS_PER_BYTE; + + return mask; +} + void intel_sseu_set_subslices(struct sseu_dev_info *sseu, int slice, u32 ss_mask) { @@ -43,7 +57,7 @@ void intel_sseu_set_subslices(struct sseu_dev_info *sseu, int slice, unsigned int intel_sseu_subslices_per_slice(const struct sseu_dev_info *sseu, u8 slice) { - return hweight8(sseu->subslice_mask[slice]); + return hweight32(intel_sseu_get_subslices(sseu, slice)); } u32 intel_sseu_make_rpcs(struct drm_i915_private *i915, diff --git a/drivers/gpu/drm/i915/gt/intel_sseu.h b/drivers/gpu/drm/i915/gt/intel_sseu.h index 7703d75f2da3..4070f6ff1db6 100644 --- a/drivers/gpu/drm/i915/gt/intel_sseu.h +++ b/drivers/gpu/drm/i915/gt/intel_sseu.h @@ -23,7 +23,7 @@ struct drm_i915_private; struct sseu_dev_info { u8 slice_mask; - u8 subslice_mask[GEN_MAX_SLICES]; + u8 subslice_mask[GEN_MAX_SLICES * GEN_MAX_SUBSLICE_STRIDE]; u16 eu_total; u8 eu_per_subslice; u8 min_eu_in_pool; @@ -94,6 +94,8 @@ intel_sseu_subslice_total(const struct sseu_dev_info *sseu); unsigned int intel_sseu_subslices_per_slice(const struct sseu_dev_info *sseu, u8 slice); +u32 intel_sseu_get_subslices(const struct sseu_dev_info *sseu, u8 slice); + void intel_sseu_set_subslices(struct sseu_dev_info *sseu, int slice, u32 ss_mask); diff --git a/drivers/gpu/drm/i915/gt/intel_workarounds.c b/drivers/gpu/drm/i915/gt/intel_workarounds.c index 126ab3667919..ad2261e0cba8 100644 --- a/drivers/gpu/drm/i915/gt/intel_workarounds.c +++ b/drivers/gpu/drm/i915/gt/intel_workarounds.c @@ -801,11 +801,10 @@ wa_init_mcr(struct drm_i915_private *i915, struct i915_wa_list *wal) } slice = fls(sseu->slice_mask) - 1; - GEM_BUG_ON(slice >= ARRAY_SIZE(sseu->subslice_mask)); - subslice = fls(l3_en & sseu->subslice_mask[slice]); + subslice = fls(l3_en & intel_sseu_get_subslices(sseu, slice)); if (!subslice) { DRM_WARN("No common index found between subslice mask %x and L3 bank mask %x!\n", - sseu->subslice_mask[slice], l3_en); + intel_sseu_get_subslices(sseu, slice), l3_en); subslice = fls(l3_en); WARN_ON(!subslice); } diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c index 6e8b40299939..8c1d70425424 100644 --- a/drivers/gpu/drm/i915/i915_debugfs.c +++ b/drivers/gpu/drm/i915/i915_debugfs.c @@ -3864,13 +3864,16 @@ static void gen9_sseu_device_status(struct drm_i915_private *dev_priv, for (ss = 0; ss < info->sseu.max_subslices; ss++) { unsigned int eu_cnt; + u8 ss_idx = s * info->sseu.ss_stride + + ss / BITS_PER_BYTE; if (IS_GEN9_LP(dev_priv)) { if (!(s_reg[s] & (GEN9_PGCTL_SS_ACK(ss)))) /* skip disabled subslice */ continue; - sseu->subslice_mask[s] |= BIT(ss); + sseu->subslice_mask[ss_idx] |= + BIT(ss % BITS_PER_BYTE); } eu_cnt = 2 * hweight32(eu_reg[2*s + ss/2] & diff --git a/drivers/gpu/drm/i915/intel_device_info.c b/drivers/gpu/drm/i915/intel_device_info.c index c20f74ee5f22..d9b5baaef5d0 100644 --- a/drivers/gpu/drm/i915/intel_device_info.c +++ b/drivers/gpu/drm/i915/intel_device_info.c @@ -93,9 +93,9 @@ static void sseu_dump(const struct sseu_dev_info *sseu, struct drm_printer *p) hweight8(sseu->slice_mask), sseu->slice_mask); drm_printf(p, "subslice total: %u\n", intel_sseu_subslice_total(sseu)); for (s = 0; s < sseu->max_slices; s++) { - drm_printf(p, "slice%d: %u subslices, mask=%04x\n", + drm_printf(p, "slice%d: %u subslices, mask=%08x\n", s, intel_sseu_subslices_per_slice(sseu, s), - sseu->subslice_mask[s]); + intel_sseu_get_subslices(sseu, s)); } drm_printf(p, "EU total: %u\n", sseu->eu_total); drm_printf(p, "EU per subslice: %u\n", sseu->eu_per_subslice); @@ -159,9 +159,9 @@ void intel_device_info_dump_topology(const struct sseu_dev_info *sseu, } for (s = 0; s < sseu->max_slices; s++) { - drm_printf(p, "slice%d: %u subslice(s) (0x%hhx):\n", + drm_printf(p, "slice%d: %u subslice(s) (0x%08x):\n", s, intel_sseu_subslices_per_slice(sseu, s), - sseu->subslice_mask[s]); + intel_sseu_get_subslices(sseu, s)); for (ss = 0; ss < sseu->max_subslices; ss++) { u16 enabled_eus = sseu_get_eus(sseu, s, ss); -- cgit v1.2.3 From d06a79d33e0f97373a29decb66791758a6d2e968 Mon Sep 17 00:00:00 2001 From: Ville Syrjälä Date: Wed, 21 Aug 2019 20:30:29 +0300 Subject: drm/i915: Use enum pipe instead of crtc index to track active pipes MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit We may need to eliminate the crtc->index == pipe assumptions from the code to support arbitrary pipes being fused off. Start that by switching some bitmasks over to using pipe instead of the crtc index. Signed-off-by: Ville Syrjälä Link: https://patchwork.freedesktop.org/patch/msgid/20190821173033.24123-1-ville.syrjala@linux.intel.com Reviewed-by: Jani Nikula --- drivers/gpu/drm/i915/display/intel_cdclk.c | 12 ++++++------ drivers/gpu/drm/i915/display/intel_display.c | 20 ++++++++++---------- drivers/gpu/drm/i915/display/intel_display_types.h | 4 ++-- drivers/gpu/drm/i915/i915_drv.h | 2 +- drivers/gpu/drm/i915/intel_pm.c | 20 ++++++++++---------- 5 files changed, 29 insertions(+), 29 deletions(-) diff --git a/drivers/gpu/drm/i915/display/intel_cdclk.c b/drivers/gpu/drm/i915/display/intel_cdclk.c index d0bc42e5039c..939088c7d814 100644 --- a/drivers/gpu/drm/i915/display/intel_cdclk.c +++ b/drivers/gpu/drm/i915/display/intel_cdclk.c @@ -2369,7 +2369,7 @@ static int vlv_modeset_calc_cdclk(struct intel_atomic_state *state) state->cdclk.logical.voltage_level = vlv_calc_voltage_level(dev_priv, cdclk); - if (!state->active_crtcs) { + if (!state->active_pipes) { cdclk = vlv_calc_cdclk(dev_priv, state->cdclk.force_min_cdclk); state->cdclk.actual.cdclk = cdclk; @@ -2400,7 +2400,7 @@ static int bdw_modeset_calc_cdclk(struct intel_atomic_state *state) state->cdclk.logical.voltage_level = bdw_calc_voltage_level(cdclk); - if (!state->active_crtcs) { + if (!state->active_pipes) { cdclk = bdw_calc_cdclk(state->cdclk.force_min_cdclk); state->cdclk.actual.cdclk = cdclk; @@ -2470,7 +2470,7 @@ static int skl_modeset_calc_cdclk(struct intel_atomic_state *state) state->cdclk.logical.voltage_level = skl_calc_voltage_level(cdclk); - if (!state->active_crtcs) { + if (!state->active_pipes) { cdclk = skl_calc_cdclk(state->cdclk.force_min_cdclk, vco); state->cdclk.actual.vco = vco; @@ -2506,7 +2506,7 @@ static int bxt_modeset_calc_cdclk(struct intel_atomic_state *state) state->cdclk.logical.voltage_level = bxt_calc_voltage_level(cdclk); - if (!state->active_crtcs) { + if (!state->active_pipes) { if (IS_GEMINILAKE(dev_priv)) { cdclk = glk_calc_cdclk(state->cdclk.force_min_cdclk); vco = glk_de_pll_vco(dev_priv, cdclk); @@ -2544,7 +2544,7 @@ static int cnl_modeset_calc_cdclk(struct intel_atomic_state *state) max(cnl_calc_voltage_level(cdclk), cnl_compute_min_voltage_level(state)); - if (!state->active_crtcs) { + if (!state->active_pipes) { cdclk = cnl_calc_cdclk(state->cdclk.force_min_cdclk); vco = cnl_cdclk_pll_vco(dev_priv, cdclk); @@ -2578,7 +2578,7 @@ static int icl_modeset_calc_cdclk(struct intel_atomic_state *state) max(icl_calc_voltage_level(dev_priv, cdclk), cnl_compute_min_voltage_level(state)); - if (!state->active_crtcs) { + if (!state->active_pipes) { cdclk = icl_calc_cdclk(state->cdclk.force_min_cdclk, ref); vco = icl_calc_cdclk_pll_vco(dev_priv, cdclk); diff --git a/drivers/gpu/drm/i915/display/intel_display.c b/drivers/gpu/drm/i915/display/intel_display.c index b51d1ceb8739..7b74df03a1ef 100644 --- a/drivers/gpu/drm/i915/display/intel_display.c +++ b/drivers/gpu/drm/i915/display/intel_display.c @@ -7080,7 +7080,7 @@ static void intel_crtc_disable_noatomic(struct drm_crtc *crtc, intel_display_power_put_unchecked(dev_priv, domain); intel_crtc->enabled_power_domains = 0; - dev_priv->active_crtcs &= ~(1 << intel_crtc->pipe); + dev_priv->active_pipes &= ~BIT(intel_crtc->pipe); dev_priv->min_cdclk[intel_crtc->pipe] = 0; dev_priv->min_voltage_level[intel_crtc->pipe] = 0; @@ -13452,7 +13452,7 @@ static int intel_modeset_checks(struct intel_atomic_state *state) state->cdclk.force_min_cdclk = dev_priv->cdclk.force_min_cdclk; state->modeset = true; - state->active_crtcs = dev_priv->active_crtcs; + state->active_pipes = dev_priv->active_pipes; state->cdclk.logical = dev_priv->cdclk.logical; state->cdclk.actual = dev_priv->cdclk.actual; state->cdclk.pipe = INVALID_PIPE; @@ -13460,12 +13460,12 @@ static int intel_modeset_checks(struct intel_atomic_state *state) for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) { if (new_crtc_state->base.active) - state->active_crtcs |= 1 << i; + state->active_pipes |= BIT(crtc->pipe); else - state->active_crtcs &= ~(1 << i); + state->active_pipes &= ~BIT(crtc->pipe); if (old_crtc_state->base.active != new_crtc_state->base.active) - state->active_pipe_changes |= drm_crtc_mask(&crtc->base); + state->active_pipe_changes |= BIT(crtc->pipe); } /* @@ -13494,11 +13494,11 @@ static int intel_modeset_checks(struct intel_atomic_state *state) return ret; } - if (is_power_of_2(state->active_crtcs)) { + if (is_power_of_2(state->active_pipes)) { struct intel_crtc *crtc; struct intel_crtc_state *crtc_state; - pipe = ilog2(state->active_crtcs); + pipe = ilog2(state->active_pipes); crtc = intel_get_crtc_for_pipe(dev_priv, pipe); crtc_state = intel_atomic_get_new_crtc_state(state, crtc); if (crtc_state && needs_modeset(crtc_state)) @@ -14191,7 +14191,7 @@ static int intel_atomic_commit(struct drm_device *dev, sizeof(state->min_cdclk)); memcpy(dev_priv->min_voltage_level, state->min_voltage_level, sizeof(state->min_voltage_level)); - dev_priv->active_crtcs = state->active_crtcs; + dev_priv->active_pipes = state->active_pipes; dev_priv->cdclk.force_min_cdclk = state->cdclk.force_min_cdclk; intel_cdclk_swap_state(state); @@ -16640,7 +16640,7 @@ static void intel_modeset_readout_hw_state(struct drm_device *dev) struct drm_connector_list_iter conn_iter; int i; - dev_priv->active_crtcs = 0; + dev_priv->active_pipes = 0; for_each_intel_crtc(dev, crtc) { struct intel_crtc_state *crtc_state = @@ -16657,7 +16657,7 @@ static void intel_modeset_readout_hw_state(struct drm_device *dev) crtc->active = crtc_state->base.active; if (crtc_state->base.active) - dev_priv->active_crtcs |= 1 << crtc->pipe; + dev_priv->active_pipes |= BIT(crtc->pipe); DRM_DEBUG_KMS("[CRTC:%d:%s] hw state readout: %s\n", crtc->base.base.id, crtc->base.name, diff --git a/drivers/gpu/drm/i915/display/intel_display_types.h b/drivers/gpu/drm/i915/display/intel_display_types.h index 449abaea619f..12523456143f 100644 --- a/drivers/gpu/drm/i915/display/intel_display_types.h +++ b/drivers/gpu/drm/i915/display/intel_display_types.h @@ -481,9 +481,9 @@ struct intel_atomic_state { * but the converse is not necessarily true; simply changing a mode may * not flip the final active status of any CRTC's */ - unsigned int active_pipe_changes; + u8 active_pipe_changes; - unsigned int active_crtcs; + u8 active_pipes; /* minimum acceptable cdclk for each pipe */ int min_cdclk[I915_MAX_PIPES]; /* minimum acceptable voltage level for each pipe */ diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index 048896b1348b..ba3f6d82ef26 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h @@ -1468,7 +1468,7 @@ struct drm_i915_private { */ struct mutex dpll_lock; - unsigned int active_crtcs; + u8 active_pipes; /* minimum acceptable cdclk for each pipe */ int min_cdclk[I915_MAX_PIPES]; /* minimum acceptable voltage level for each pipe */ diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c index d3ea193cd093..09f29a337313 100644 --- a/drivers/gpu/drm/i915/intel_pm.c +++ b/drivers/gpu/drm/i915/intel_pm.c @@ -3761,18 +3761,18 @@ bool intel_can_enable_sagv(struct intel_atomic_state *state) /* * If there are no active CRTCs, no additional checks need be performed */ - if (hweight32(state->active_crtcs) == 0) + if (hweight32(state->active_pipes) == 0) return true; /* * SKL+ workaround: bspec recommends we disable SAGV when we have * more then one pipe enabled */ - if (hweight32(state->active_crtcs) > 1) + if (hweight32(state->active_pipes) > 1) return false; /* Since we're now guaranteed to only have one active CRTC... */ - pipe = ffs(state->active_crtcs) - 1; + pipe = ffs(state->active_pipes) - 1; crtc = intel_get_crtc_for_pipe(dev_priv, pipe); crtc_state = to_intel_crtc_state(crtc->base.state); @@ -3867,14 +3867,14 @@ skl_ddb_get_pipe_allocation_limits(struct drm_i915_private *dev_priv, if (WARN_ON(!state) || !crtc_state->base.active) { alloc->start = 0; alloc->end = 0; - *num_active = hweight32(dev_priv->active_crtcs); + *num_active = hweight32(dev_priv->active_pipes); return; } if (intel_state->active_pipe_changes) - *num_active = hweight32(intel_state->active_crtcs); + *num_active = hweight32(intel_state->active_pipes); else - *num_active = hweight32(dev_priv->active_crtcs); + *num_active = hweight32(dev_priv->active_pipes); ddb_size = intel_get_ddb_size(dev_priv, crtc_state, total_data_rate, *num_active, ddb); @@ -5464,7 +5464,7 @@ skl_ddb_add_affected_pipes(struct intel_atomic_state *state, bool *changed) * If this transaction isn't actually touching any CRTC's, don't * bother with watermark calculation. Note that if we pass this * test, we're guaranteed to hold at least one CRTC state mutex, - * which means we can safely use values like dev_priv->active_crtcs + * which means we can safely use values like dev_priv->active_pipes * since any racing commits that want to update them would need to * hold _all_ CRTC state mutexes. */ @@ -5489,13 +5489,13 @@ skl_ddb_add_affected_pipes(struct intel_atomic_state *state, bool *changed) state->active_pipe_changes = ~0; /* - * We usually only initialize state->active_crtcs if we + * We usually only initialize state->active_pipes if we * we're doing a modeset; make sure this field is always * initialized during the sanitization process that happens * on the first commit too. */ if (!state->modeset) - state->active_crtcs = dev_priv->active_crtcs; + state->active_pipes = dev_priv->active_pipes; } /* @@ -5811,7 +5811,7 @@ void skl_wm_get_hw_state(struct drm_i915_private *dev_priv) hw->dirty_pipes |= drm_crtc_mask(&crtc->base); } - if (dev_priv->active_crtcs) { + if (dev_priv->active_pipes) { /* Fully recompute DDB on first atomic commit */ dev_priv->wm.distrust_bios_wm = true; } -- cgit v1.2.3 From e8edae54c593bc9aea949fbf6b2a59fe53cd5d83 Mon Sep 17 00:00:00 2001 From: Ville Syrjälä Date: Wed, 21 Aug 2019 20:30:30 +0300 Subject: drm/i915: Unconfuse pipe vs. crtc->index in i915_get_crtc_scanoutpos() MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit The "pipe" argument passed in by the vblank code is in fact the crtc index. Don't assume that is the same as the pipe. Signed-off-by: Ville Syrjälä Link: https://patchwork.freedesktop.org/patch/msgid/20190821173033.24123-2-ville.syrjala@linux.intel.com Reviewed-by: Jani Nikula --- drivers/gpu/drm/i915/i915_irq.c | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c index 77391d8325bf..8ac6f6849981 100644 --- a/drivers/gpu/drm/i915/i915_irq.c +++ b/drivers/gpu/drm/i915/i915_irq.c @@ -942,14 +942,14 @@ static int __intel_get_crtc_scanline(struct intel_crtc *crtc) return (position + crtc->scanline_offset) % vtotal; } -bool i915_get_crtc_scanoutpos(struct drm_device *dev, unsigned int pipe, +bool i915_get_crtc_scanoutpos(struct drm_device *dev, unsigned int index, bool in_vblank_irq, int *vpos, int *hpos, ktime_t *stime, ktime_t *etime, const struct drm_display_mode *mode) { struct drm_i915_private *dev_priv = to_i915(dev); - struct intel_crtc *intel_crtc = intel_get_crtc_for_pipe(dev_priv, - pipe); + struct intel_crtc *crtc = to_intel_crtc(drm_crtc_from_index(dev, index)); + enum pipe pipe = crtc->pipe; int position; int vbl_start, vbl_end, hsync_start, htotal, vtotal; unsigned long irqflags; @@ -992,7 +992,7 @@ bool i915_get_crtc_scanoutpos(struct drm_device *dev, unsigned int pipe, /* No obvious pixelcount register. Only query vertical * scanout position from Display scan line register. */ - position = __intel_get_crtc_scanline(intel_crtc); + position = __intel_get_crtc_scanline(crtc); } else { /* Have access to pixelcount since start of frame. * We can split this into vertical and horizontal -- cgit v1.2.3 From d048a2684a413b33a28be277a7e9cee960ec8bb2 Mon Sep 17 00:00:00 2001 From: Ville Syrjälä Date: Wed, 21 Aug 2019 20:30:31 +0300 Subject: drm/i915: Use enum pipe consistently MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Replace all "int pipe"s with "enum pipe pipe"s to make it clear what we're dealing with. Signed-off-by: Ville Syrjälä Link: https://patchwork.freedesktop.org/patch/msgid/20190821173033.24123-3-ville.syrjala@linux.intel.com Reviewed-by: Jani Nikula --- drivers/gpu/drm/i915/display/intel_display.c | 42 +++++++++++----------- drivers/gpu/drm/i915/display/intel_display_types.h | 2 +- drivers/gpu/drm/i915/display/intel_dvo.c | 2 +- drivers/gpu/drm/i915/display/intel_lvds.c | 2 +- drivers/gpu/drm/i915/display/vlv_dsi.c | 2 +- drivers/gpu/drm/i915/i915_debugfs.c | 2 +- drivers/gpu/drm/i915/i915_irq.c | 11 +++--- drivers/gpu/drm/i915/intel_pm.c | 2 +- 8 files changed, 33 insertions(+), 32 deletions(-) diff --git a/drivers/gpu/drm/i915/display/intel_display.c b/drivers/gpu/drm/i915/display/intel_display.c index 7b74df03a1ef..ea2915dde6ab 100644 --- a/drivers/gpu/drm/i915/display/intel_display.c +++ b/drivers/gpu/drm/i915/display/intel_display.c @@ -490,7 +490,7 @@ static const struct intel_limit intel_limits_bxt = { /* WA Display #0827: Gen9:all */ static void -skl_wa_827(struct drm_i915_private *dev_priv, int pipe, bool enable) +skl_wa_827(struct drm_i915_private *dev_priv, enum pipe pipe, bool enable) { if (enable) I915_WRITE(CLKGATE_DIS_PSL(pipe), @@ -4421,7 +4421,7 @@ static void intel_fdi_normal_train(struct intel_crtc *crtc) { struct drm_device *dev = crtc->base.dev; struct drm_i915_private *dev_priv = to_i915(dev); - int pipe = crtc->pipe; + enum pipe pipe = crtc->pipe; i915_reg_t reg; u32 temp; @@ -4464,7 +4464,7 @@ static void ironlake_fdi_link_train(struct intel_crtc *crtc, { struct drm_device *dev = crtc->base.dev; struct drm_i915_private *dev_priv = to_i915(dev); - int pipe = crtc->pipe; + enum pipe pipe = crtc->pipe; i915_reg_t reg; u32 temp, tries; @@ -4565,7 +4565,7 @@ static void gen6_fdi_link_train(struct intel_crtc *crtc, { struct drm_device *dev = crtc->base.dev; struct drm_i915_private *dev_priv = to_i915(dev); - int pipe = crtc->pipe; + enum pipe pipe = crtc->pipe; i915_reg_t reg; u32 temp, i, retry; @@ -4698,7 +4698,7 @@ static void ivb_manual_fdi_link_train(struct intel_crtc *crtc, { struct drm_device *dev = crtc->base.dev; struct drm_i915_private *dev_priv = to_i915(dev); - int pipe = crtc->pipe; + enum pipe pipe = crtc->pipe; i915_reg_t reg; u32 temp, i, j; @@ -4816,7 +4816,7 @@ static void ironlake_fdi_pll_enable(const struct intel_crtc_state *crtc_state) { struct intel_crtc *intel_crtc = to_intel_crtc(crtc_state->base.crtc); struct drm_i915_private *dev_priv = to_i915(intel_crtc->base.dev); - int pipe = intel_crtc->pipe; + enum pipe pipe = intel_crtc->pipe; i915_reg_t reg; u32 temp; @@ -4853,7 +4853,7 @@ static void ironlake_fdi_pll_disable(struct intel_crtc *intel_crtc) { struct drm_device *dev = intel_crtc->base.dev; struct drm_i915_private *dev_priv = to_i915(dev); - int pipe = intel_crtc->pipe; + enum pipe pipe = intel_crtc->pipe; i915_reg_t reg; u32 temp; @@ -4884,7 +4884,7 @@ static void ironlake_fdi_disable(struct drm_crtc *crtc) struct drm_device *dev = crtc->dev; struct drm_i915_private *dev_priv = to_i915(dev); struct intel_crtc *intel_crtc = to_intel_crtc(crtc); - int pipe = intel_crtc->pipe; + enum pipe pipe = intel_crtc->pipe; i915_reg_t reg; u32 temp; @@ -5199,7 +5199,7 @@ static void ironlake_pch_enable(const struct intel_atomic_state *state, struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc); struct drm_device *dev = crtc->base.dev; struct drm_i915_private *dev_priv = to_i915(dev); - int pipe = crtc->pipe; + enum pipe pipe = crtc->pipe; u32 temp; assert_pch_transcoder_disabled(dev_priv, pipe); @@ -5294,7 +5294,7 @@ static void lpt_pch_enable(const struct intel_atomic_state *state, lpt_enable_pch_transcoder(dev_priv, cpu_transcoder); } -static void cpt_verify_modeset(struct drm_device *dev, int pipe) +static void cpt_verify_modeset(struct drm_device *dev, enum pipe pipe) { struct drm_i915_private *dev_priv = to_i915(dev); i915_reg_t dslreg = PIPEDSL(pipe); @@ -5633,7 +5633,7 @@ static void ironlake_pfit_enable(const struct intel_crtc_state *crtc_state) { struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc); struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); - int pipe = crtc->pipe; + enum pipe pipe = crtc->pipe; if (crtc_state->pch_pfit.enabled) { /* Force use of hard-coded filter coefficients @@ -5746,7 +5746,7 @@ intel_post_enable_primary(struct drm_crtc *crtc, struct drm_device *dev = crtc->dev; struct drm_i915_private *dev_priv = to_i915(dev); struct intel_crtc *intel_crtc = to_intel_crtc(crtc); - int pipe = intel_crtc->pipe; + enum pipe pipe = intel_crtc->pipe; /* * Gen2 reports pipe underruns whenever all planes are disabled. @@ -5770,7 +5770,7 @@ intel_pre_disable_primary_noatomic(struct drm_crtc *crtc) struct drm_device *dev = crtc->dev; struct drm_i915_private *dev_priv = to_i915(dev); struct intel_crtc *intel_crtc = to_intel_crtc(crtc); - int pipe = intel_crtc->pipe; + enum pipe pipe = intel_crtc->pipe; /* * Gen2 reports pipe underruns whenever all planes are disabled. @@ -6293,7 +6293,7 @@ static void ironlake_crtc_enable(struct intel_crtc_state *pipe_config, struct drm_device *dev = crtc->dev; struct drm_i915_private *dev_priv = to_i915(dev); struct intel_crtc *intel_crtc = to_intel_crtc(crtc); - int pipe = intel_crtc->pipe; + enum pipe pipe = intel_crtc->pipe; if (WARN_ON(intel_crtc->active)) return; @@ -6426,7 +6426,7 @@ static void haswell_crtc_enable(struct intel_crtc_state *pipe_config, struct drm_crtc *crtc = pipe_config->base.crtc; struct drm_i915_private *dev_priv = to_i915(crtc->dev); struct intel_crtc *intel_crtc = to_intel_crtc(crtc); - int pipe = intel_crtc->pipe, hsw_workaround_pipe; + enum pipe pipe = intel_crtc->pipe, hsw_workaround_pipe; enum transcoder cpu_transcoder = pipe_config->cpu_transcoder; bool psl_clkgate_wa; @@ -6552,7 +6552,7 @@ static void ironlake_crtc_disable(struct intel_crtc_state *old_crtc_state, struct drm_device *dev = crtc->dev; struct drm_i915_private *dev_priv = to_i915(dev); struct intel_crtc *intel_crtc = to_intel_crtc(crtc); - int pipe = intel_crtc->pipe; + enum pipe pipe = intel_crtc->pipe; /* * Sometimes spurious CPU pipe underruns happen when the @@ -6839,7 +6839,7 @@ static void valleyview_crtc_enable(struct intel_crtc_state *pipe_config, struct drm_device *dev = crtc->dev; struct drm_i915_private *dev_priv = to_i915(dev); struct intel_crtc *intel_crtc = to_intel_crtc(crtc); - int pipe = intel_crtc->pipe; + enum pipe pipe = intel_crtc->pipe; if (WARN_ON(intel_crtc->active)) return; @@ -6971,7 +6971,7 @@ static void i9xx_crtc_disable(struct intel_crtc_state *old_crtc_state, struct drm_device *dev = crtc->dev; struct drm_i915_private *dev_priv = to_i915(dev); struct intel_crtc *intel_crtc = to_intel_crtc(crtc); - int pipe = intel_crtc->pipe; + enum pipe pipe = intel_crtc->pipe; /* * On gen2 planes are double buffered but the pipe isn't, so we must @@ -8543,7 +8543,7 @@ static void vlv_crtc_clock_get(struct intel_crtc *crtc, { struct drm_device *dev = crtc->base.dev; struct drm_i915_private *dev_priv = to_i915(dev); - int pipe = pipe_config->cpu_transcoder; + enum pipe pipe = crtc->pipe; struct dpll clock; u32 mdiv; int refclk = 100000; @@ -8653,7 +8653,7 @@ static void chv_crtc_clock_get(struct intel_crtc *crtc, { struct drm_device *dev = crtc->base.dev; struct drm_i915_private *dev_priv = to_i915(dev); - int pipe = pipe_config->cpu_transcoder; + enum pipe pipe = crtc->pipe; enum dpio_channel port = vlv_pipe_to_channel(pipe); struct dpll clock; u32 cmn_dw13, pll_dw0, pll_dw1, pll_dw2, pll_dw3; @@ -11265,7 +11265,7 @@ static void i9xx_crtc_clock_get(struct intel_crtc *crtc, { struct drm_device *dev = crtc->base.dev; struct drm_i915_private *dev_priv = to_i915(dev); - int pipe = pipe_config->cpu_transcoder; + enum pipe pipe = crtc->pipe; u32 dpll = pipe_config->dpll_hw_state.dpll; u32 fp; struct dpll clock; diff --git a/drivers/gpu/drm/i915/display/intel_display_types.h b/drivers/gpu/drm/i915/display/intel_display_types.h index 12523456143f..96514dcc7812 100644 --- a/drivers/gpu/drm/i915/display/intel_display_types.h +++ b/drivers/gpu/drm/i915/display/intel_display_types.h @@ -1509,7 +1509,7 @@ intel_wait_for_vblank(struct drm_i915_private *dev_priv, enum pipe pipe) drm_wait_one_vblank(&dev_priv->drm, pipe); } static inline void -intel_wait_for_vblank_if_active(struct drm_i915_private *dev_priv, int pipe) +intel_wait_for_vblank_if_active(struct drm_i915_private *dev_priv, enum pipe pipe) { const struct intel_crtc *crtc = intel_get_crtc_for_pipe(dev_priv, pipe); diff --git a/drivers/gpu/drm/i915/display/intel_dvo.c b/drivers/gpu/drm/i915/display/intel_dvo.c index 93baf366692e..34193d04597a 100644 --- a/drivers/gpu/drm/i915/display/intel_dvo.c +++ b/drivers/gpu/drm/i915/display/intel_dvo.c @@ -280,7 +280,7 @@ static void intel_dvo_pre_enable(struct intel_encoder *encoder, struct intel_crtc *crtc = to_intel_crtc(pipe_config->base.crtc); const struct drm_display_mode *adjusted_mode = &pipe_config->base.adjusted_mode; struct intel_dvo *intel_dvo = enc_to_dvo(encoder); - int pipe = crtc->pipe; + enum pipe pipe = crtc->pipe; u32 dvo_val; i915_reg_t dvo_reg = intel_dvo->dev.dvo_reg; i915_reg_t dvo_srcdim_reg = intel_dvo->dev.dvo_srcdim_reg; diff --git a/drivers/gpu/drm/i915/display/intel_lvds.c b/drivers/gpu/drm/i915/display/intel_lvds.c index b7c459a8931c..c786abdc3336 100644 --- a/drivers/gpu/drm/i915/display/intel_lvds.c +++ b/drivers/gpu/drm/i915/display/intel_lvds.c @@ -232,7 +232,7 @@ static void intel_pre_enable_lvds(struct intel_encoder *encoder, struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); struct intel_crtc *crtc = to_intel_crtc(pipe_config->base.crtc); const struct drm_display_mode *adjusted_mode = &pipe_config->base.adjusted_mode; - int pipe = crtc->pipe; + enum pipe pipe = crtc->pipe; u32 temp; if (HAS_PCH_SPLIT(dev_priv)) { diff --git a/drivers/gpu/drm/i915/display/vlv_dsi.c b/drivers/gpu/drm/i915/display/vlv_dsi.c index a71b22bdd95b..50064cde0724 100644 --- a/drivers/gpu/drm/i915/display/vlv_dsi.c +++ b/drivers/gpu/drm/i915/display/vlv_dsi.c @@ -749,7 +749,7 @@ static void intel_dsi_pre_enable(struct intel_encoder *encoder, struct drm_crtc *crtc = pipe_config->base.crtc; struct drm_i915_private *dev_priv = to_i915(crtc->dev); struct intel_crtc *intel_crtc = to_intel_crtc(crtc); - int pipe = intel_crtc->pipe; + enum pipe pipe = intel_crtc->pipe; enum port port; u32 val; bool glk_cold_boot = false; diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c index 8c1d70425424..5c1a2b1e7d34 100644 --- a/drivers/gpu/drm/i915/i915_debugfs.c +++ b/drivers/gpu/drm/i915/i915_debugfs.c @@ -376,7 +376,7 @@ static int i915_gem_object_info(struct seq_file *m, void *data) static void gen8_display_interrupt_info(struct seq_file *m) { struct drm_i915_private *dev_priv = node_to_i915(m->private); - int pipe; + enum pipe pipe; for_each_pipe(dev_priv, pipe) { enum intel_display_power_domain power_domain; diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c index 8ac6f6849981..3f1b6ee157ba 100644 --- a/drivers/gpu/drm/i915/i915_irq.c +++ b/drivers/gpu/drm/i915/i915_irq.c @@ -1716,7 +1716,7 @@ static void i9xx_pipestat_irq_reset(struct drm_i915_private *dev_priv) static void i9xx_pipestat_irq_ack(struct drm_i915_private *dev_priv, u32 iir, u32 pipe_stats[I915_MAX_PIPES]) { - int pipe; + enum pipe pipe; spin_lock(&dev_priv->irq_lock); @@ -1741,6 +1741,7 @@ static void i9xx_pipestat_irq_ack(struct drm_i915_private *dev_priv, status_mask = PIPE_FIFO_UNDERRUN_STATUS; switch (pipe) { + default: case PIPE_A: iir_bit = I915_DISPLAY_PIPE_A_EVENT_INTERRUPT; break; @@ -2136,7 +2137,7 @@ static void ibx_hpd_irq_handler(struct drm_i915_private *dev_priv, static void ibx_irq_handler(struct drm_i915_private *dev_priv, u32 pch_iir) { - int pipe; + enum pipe pipe; u32 hotplug_trigger = pch_iir & SDE_HOTPLUG_MASK; ibx_hpd_irq_handler(dev_priv, hotplug_trigger, hpd_ibx); @@ -2222,7 +2223,7 @@ static void cpt_serr_int_handler(struct drm_i915_private *dev_priv) static void cpt_irq_handler(struct drm_i915_private *dev_priv, u32 pch_iir) { - int pipe; + enum pipe pipe; u32 hotplug_trigger = pch_iir & SDE_HOTPLUG_MASK_CPT; ibx_hpd_irq_handler(dev_priv, hotplug_trigger, hpd_cpt); @@ -3246,7 +3247,7 @@ static void valleyview_irq_reset(struct drm_i915_private *dev_priv) static void gen8_irq_reset(struct drm_i915_private *dev_priv) { struct intel_uncore *uncore = &dev_priv->uncore; - int pipe; + enum pipe pipe; gen8_master_intr_disable(dev_priv->uncore.regs); @@ -3271,7 +3272,7 @@ static void gen8_irq_reset(struct drm_i915_private *dev_priv) static void gen11_irq_reset(struct drm_i915_private *dev_priv) { struct intel_uncore *uncore = &dev_priv->uncore; - int pipe; + enum pipe pipe; gen11_master_intr_disable(dev_priv->uncore.regs); diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c index 09f29a337313..437cd50e5d06 100644 --- a/drivers/gpu/drm/i915/intel_pm.c +++ b/drivers/gpu/drm/i915/intel_pm.c @@ -8858,7 +8858,7 @@ static void ilk_init_clock_gating(struct drm_i915_private *dev_priv) static void cpt_init_clock_gating(struct drm_i915_private *dev_priv) { - int pipe; + enum pipe pipe; u32 val; /* -- cgit v1.2.3 From c08e91323920ec4b3a4d889af3988e300b1902f1 Mon Sep 17 00:00:00 2001 From: Ville Syrjälä Date: Wed, 21 Aug 2019 20:30:32 +0300 Subject: drm/i915: s/num_active_crtcs/num_active_pipes/ MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Set a good example and talk about pipes rather than crtcs. Signed-off-by: Ville Syrjälä Link: https://patchwork.freedesktop.org/patch/msgid/20190821173033.24123-4-ville.syrjala@linux.intel.com Reviewed-by: Jani Nikula --- drivers/gpu/drm/i915/intel_pm.c | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c index 437cd50e5d06..b4b9609db092 100644 --- a/drivers/gpu/drm/i915/intel_pm.c +++ b/drivers/gpu/drm/i915/intel_pm.c @@ -1490,7 +1490,7 @@ static void g4x_merge_wm(struct drm_i915_private *dev_priv, struct g4x_wm_values *wm) { struct intel_crtc *crtc; - int num_active_crtcs = 0; + int num_active_pipes = 0; wm->cxsr = true; wm->hpll_en = true; @@ -1509,10 +1509,10 @@ static void g4x_merge_wm(struct drm_i915_private *dev_priv, if (!wm_state->fbc_en) wm->fbc_en = false; - num_active_crtcs++; + num_active_pipes++; } - if (num_active_crtcs != 1) { + if (num_active_pipes != 1) { wm->cxsr = false; wm->hpll_en = false; wm->fbc_en = false; @@ -2098,7 +2098,7 @@ static void vlv_merge_wm(struct drm_i915_private *dev_priv, struct vlv_wm_values *wm) { struct intel_crtc *crtc; - int num_active_crtcs = 0; + int num_active_pipes = 0; wm->level = dev_priv->wm.max_level; wm->cxsr = true; @@ -2112,14 +2112,14 @@ static void vlv_merge_wm(struct drm_i915_private *dev_priv, if (!wm_state->cxsr) wm->cxsr = false; - num_active_crtcs++; + num_active_pipes++; wm->level = min_t(int, wm->level, wm_state->num_levels - 1); } - if (num_active_crtcs != 1) + if (num_active_pipes != 1) wm->cxsr = false; - if (num_active_crtcs > 1) + if (num_active_pipes > 1) wm->level = VLV_WM_LEVEL_PM2; for_each_intel_crtc(&dev_priv->drm, crtc) { -- cgit v1.2.3 From 0b14d96820d1fd05ea610a0f5f952b34e5a24f2c Mon Sep 17 00:00:00 2001 From: Ville Syrjälä Date: Wed, 21 Aug 2019 20:30:33 +0300 Subject: drm/i915: Use hweight8() for 8bit masks MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Use hweight8() instead of hweight32() for 8bit masks. Doesn't actually matter for us since the arch code will go for hweight32() anyway, but maybe we stil want to do this for documentation purposes? Signed-off-by: Ville Syrjälä Link: https://patchwork.freedesktop.org/patch/msgid/20190821173033.24123-5-ville.syrjala@linux.intel.com Reviewed-by: Jani Nikula --- drivers/gpu/drm/i915/intel_pm.c | 20 ++++++++++---------- 1 file changed, 10 insertions(+), 10 deletions(-) diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c index b4b9609db092..4fa9bc83c8b4 100644 --- a/drivers/gpu/drm/i915/intel_pm.c +++ b/drivers/gpu/drm/i915/intel_pm.c @@ -1327,8 +1327,8 @@ static int g4x_compute_pipe_wm(struct intel_crtc_state *crtc_state) struct intel_atomic_state *state = to_intel_atomic_state(crtc_state->base.state); struct g4x_wm_state *wm_state = &crtc_state->wm.g4x.optimal; - int num_active_planes = hweight32(crtc_state->active_planes & - ~BIT(PLANE_CURSOR)); + int num_active_planes = hweight8(crtc_state->active_planes & + ~BIT(PLANE_CURSOR)); const struct g4x_pipe_wm *raw; const struct intel_plane_state *old_plane_state; const struct intel_plane_state *new_plane_state; @@ -1659,7 +1659,7 @@ static int vlv_compute_fifo(struct intel_crtc_state *crtc_state) &crtc_state->wm.vlv.raw[VLV_WM_LEVEL_PM2]; struct vlv_fifo_state *fifo_state = &crtc_state->wm.vlv.fifo_state; unsigned int active_planes = crtc_state->active_planes & ~BIT(PLANE_CURSOR); - int num_active_planes = hweight32(active_planes); + int num_active_planes = hweight8(active_planes); const int fifo_size = 511; int fifo_extra, fifo_left = fifo_size; int sprite0_fifo_extra = 0; @@ -1848,8 +1848,8 @@ static int vlv_compute_pipe_wm(struct intel_crtc_state *crtc_state) struct vlv_wm_state *wm_state = &crtc_state->wm.vlv.optimal; const struct vlv_fifo_state *fifo_state = &crtc_state->wm.vlv.fifo_state; - int num_active_planes = hweight32(crtc_state->active_planes & - ~BIT(PLANE_CURSOR)); + int num_active_planes = hweight8(crtc_state->active_planes & + ~BIT(PLANE_CURSOR)); bool needs_modeset = drm_atomic_crtc_needs_modeset(&crtc_state->base); const struct intel_plane_state *old_plane_state; const struct intel_plane_state *new_plane_state; @@ -3761,14 +3761,14 @@ bool intel_can_enable_sagv(struct intel_atomic_state *state) /* * If there are no active CRTCs, no additional checks need be performed */ - if (hweight32(state->active_pipes) == 0) + if (hweight8(state->active_pipes) == 0) return true; /* * SKL+ workaround: bspec recommends we disable SAGV when we have * more then one pipe enabled */ - if (hweight32(state->active_pipes) > 1) + if (hweight8(state->active_pipes) > 1) return false; /* Since we're now guaranteed to only have one active CRTC... */ @@ -3867,14 +3867,14 @@ skl_ddb_get_pipe_allocation_limits(struct drm_i915_private *dev_priv, if (WARN_ON(!state) || !crtc_state->base.active) { alloc->start = 0; alloc->end = 0; - *num_active = hweight32(dev_priv->active_pipes); + *num_active = hweight8(dev_priv->active_pipes); return; } if (intel_state->active_pipe_changes) - *num_active = hweight32(intel_state->active_pipes); + *num_active = hweight8(intel_state->active_pipes); else - *num_active = hweight32(dev_priv->active_pipes); + *num_active = hweight8(dev_priv->active_pipes); ddb_size = intel_get_ddb_size(dev_priv, crtc_state, total_data_rate, *num_active, ddb); -- cgit v1.2.3 From 6dcb85a0ad990455ae7c596e3fc966ad9c1ba9c5 Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Fri, 23 Aug 2019 14:26:46 +0100 Subject: drm/i915: Hold irq-off for the entire fake lock period Sadly lockdep records when the irqs are re-enabled and then marks up the fake lock as being irq-unsafe. Our hand is forced and so we must mark up the entire fake lock critical section as irq-off. Hopefully this is the last tweak required! v2: Not quite, we need to mark the timeline spinlock as irqsafe. That was a genuine bug being hidden by the earlier lockdep splat. Fixes: d67739268cf0 ("drm/i915/gt: Mark up the nested engine-pm timeline lock as irqsafe") Signed-off-by: Chris Wilson Cc: Tvrtko Ursulin Cc: Mika Kuoppala Reviewed-by: Mika Kuoppala Link: https://patchwork.freedesktop.org/patch/msgid/20190823132700.25286-2-chris@chris-wilson.co.uk --- drivers/gpu/drm/i915/gt/intel_engine_pm.c | 28 ++++++++++++++++++---------- drivers/gpu/drm/i915/gt/intel_reset.c | 9 +++++---- drivers/gpu/drm/i915/gt/intel_timeline.c | 10 ++++++---- drivers/gpu/drm/i915/i915_gem.c | 15 ++++++++------- drivers/gpu/drm/i915/i915_request.c | 9 +++++---- 5 files changed, 42 insertions(+), 29 deletions(-) diff --git a/drivers/gpu/drm/i915/gt/intel_engine_pm.c b/drivers/gpu/drm/i915/gt/intel_engine_pm.c index a372d4ea9370..65b5ca74b394 100644 --- a/drivers/gpu/drm/i915/gt/intel_engine_pm.c +++ b/drivers/gpu/drm/i915/gt/intel_engine_pm.c @@ -39,27 +39,32 @@ static int __engine_unpark(struct intel_wakeref *wf) #if IS_ENABLED(CONFIG_LOCKDEP) -static inline void __timeline_mark_lock(struct intel_context *ce) +static inline unsigned long __timeline_mark_lock(struct intel_context *ce) { unsigned long flags; local_irq_save(flags); mutex_acquire(&ce->timeline->mutex.dep_map, 2, 0, _THIS_IP_); - local_irq_restore(flags); + + return flags; } -static inline void __timeline_mark_unlock(struct intel_context *ce) +static inline void __timeline_mark_unlock(struct intel_context *ce, + unsigned long flags) { mutex_release(&ce->timeline->mutex.dep_map, 0, _THIS_IP_); + local_irq_restore(flags); } #else -static inline void __timeline_mark_lock(struct intel_context *ce) +static inline unsigned long __timeline_mark_lock(struct intel_context *ce) { + return 0; } -static inline void __timeline_mark_unlock(struct intel_context *ce) +static inline void __timeline_mark_unlock(struct intel_context *ce, + unsigned long flags) { } @@ -68,6 +73,8 @@ static inline void __timeline_mark_unlock(struct intel_context *ce) static bool switch_to_kernel_context(struct intel_engine_cs *engine) { struct i915_request *rq; + unsigned long flags; + bool result = true; /* Already inside the kernel context, safe to power down. */ if (engine->wakeref_serial == engine->serial) @@ -89,12 +96,12 @@ static bool switch_to_kernel_context(struct intel_engine_cs *engine) * retiring the last request, thus all rings should be empty and * all timelines idle. */ - __timeline_mark_lock(engine->kernel_context); + flags = __timeline_mark_lock(engine->kernel_context); rq = __i915_request_create(engine->kernel_context, GFP_NOWAIT); if (IS_ERR(rq)) /* Context switch failed, hope for the best! Maybe reset? */ - return true; + goto out_unlock; intel_timeline_enter(rq->timeline); @@ -110,9 +117,10 @@ static bool switch_to_kernel_context(struct intel_engine_cs *engine) __intel_wakeref_defer_park(&engine->wakeref); __i915_request_queue(rq, NULL); - __timeline_mark_unlock(engine->kernel_context); - - return false; + result = false; +out_unlock: + __timeline_mark_unlock(engine->kernel_context, flags); + return result; } static int __engine_park(struct intel_wakeref *wf) diff --git a/drivers/gpu/drm/i915/gt/intel_reset.c b/drivers/gpu/drm/i915/gt/intel_reset.c index 077716442c90..b9d84d52e986 100644 --- a/drivers/gpu/drm/i915/gt/intel_reset.c +++ b/drivers/gpu/drm/i915/gt/intel_reset.c @@ -792,6 +792,7 @@ static bool __intel_gt_unset_wedged(struct intel_gt *gt) { struct intel_gt_timelines *timelines = >->timelines; struct intel_timeline *tl; + unsigned long flags; if (!test_bit(I915_WEDGED, >->reset.flags)) return true; @@ -811,7 +812,7 @@ static bool __intel_gt_unset_wedged(struct intel_gt *gt) * * No more can be submitted until we reset the wedged bit. */ - spin_lock(&timelines->lock); + spin_lock_irqsave(&timelines->lock, flags); list_for_each_entry(tl, &timelines->active_list, link) { struct i915_request *rq; @@ -819,7 +820,7 @@ static bool __intel_gt_unset_wedged(struct intel_gt *gt) if (!rq) continue; - spin_unlock(&timelines->lock); + spin_unlock_irqrestore(&timelines->lock, flags); /* * All internal dependencies (i915_requests) will have @@ -832,10 +833,10 @@ static bool __intel_gt_unset_wedged(struct intel_gt *gt) i915_request_put(rq); /* Restart iteration after droping lock */ - spin_lock(&timelines->lock); + spin_lock_irqsave(&timelines->lock, flags); tl = list_entry(&timelines->active_list, typeof(*tl), link); } - spin_unlock(&timelines->lock); + spin_unlock_irqrestore(&timelines->lock, flags); intel_gt_sanitize(gt, false); diff --git a/drivers/gpu/drm/i915/gt/intel_timeline.c b/drivers/gpu/drm/i915/gt/intel_timeline.c index 02fbe11b671b..9cb01d9828f1 100644 --- a/drivers/gpu/drm/i915/gt/intel_timeline.c +++ b/drivers/gpu/drm/i915/gt/intel_timeline.c @@ -337,6 +337,7 @@ int intel_timeline_pin(struct intel_timeline *tl) void intel_timeline_enter(struct intel_timeline *tl) { struct intel_gt_timelines *timelines = &tl->gt->timelines; + unsigned long flags; lockdep_assert_held(&tl->mutex); @@ -345,14 +346,15 @@ void intel_timeline_enter(struct intel_timeline *tl) return; GEM_BUG_ON(!tl->active_count); /* overflow? */ - spin_lock(&timelines->lock); + spin_lock_irqsave(&timelines->lock, flags); list_add(&tl->link, &timelines->active_list); - spin_unlock(&timelines->lock); + spin_unlock_irqrestore(&timelines->lock, flags); } void intel_timeline_exit(struct intel_timeline *tl) { struct intel_gt_timelines *timelines = &tl->gt->timelines; + unsigned long flags; lockdep_assert_held(&tl->mutex); @@ -360,9 +362,9 @@ void intel_timeline_exit(struct intel_timeline *tl) if (--tl->active_count) return; - spin_lock(&timelines->lock); + spin_lock_irqsave(&timelines->lock, flags); list_del(&tl->link); - spin_unlock(&timelines->lock); + spin_unlock_irqrestore(&timelines->lock, flags); /* * Since this timeline is idle, all bariers upon which we were waiting diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index eb31b69a316a..ec9a46c276de 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c @@ -889,12 +889,13 @@ void i915_gem_runtime_suspend(struct drm_i915_private *i915) static long wait_for_timelines(struct drm_i915_private *i915, - unsigned int flags, long timeout) + unsigned int wait, long timeout) { struct intel_gt_timelines *timelines = &i915->gt.timelines; struct intel_timeline *tl; + unsigned long flags; - spin_lock(&timelines->lock); + spin_lock_irqsave(&timelines->lock, flags); list_for_each_entry(tl, &timelines->active_list, link) { struct i915_request *rq; @@ -902,7 +903,7 @@ wait_for_timelines(struct drm_i915_private *i915, if (!rq) continue; - spin_unlock(&timelines->lock); + spin_unlock_irqrestore(&timelines->lock, flags); /* * "Race-to-idle". @@ -913,19 +914,19 @@ wait_for_timelines(struct drm_i915_private *i915, * want to complete as quickly as possible to avoid prolonged * stalls, so allow the gpu to boost to maximum clocks. */ - if (flags & I915_WAIT_FOR_IDLE_BOOST) + if (wait & I915_WAIT_FOR_IDLE_BOOST) gen6_rps_boost(rq); - timeout = i915_request_wait(rq, flags, timeout); + timeout = i915_request_wait(rq, wait, timeout); i915_request_put(rq); if (timeout < 0) return timeout; /* restart after reacquiring the lock */ - spin_lock(&timelines->lock); + spin_lock_irqsave(&timelines->lock, flags); tl = list_entry(&timelines->active_list, typeof(*tl), link); } - spin_unlock(&timelines->lock); + spin_unlock_irqrestore(&timelines->lock, flags); return timeout; } diff --git a/drivers/gpu/drm/i915/i915_request.c b/drivers/gpu/drm/i915/i915_request.c index f1a0a57fc6fc..a53777dd371c 100644 --- a/drivers/gpu/drm/i915/i915_request.c +++ b/drivers/gpu/drm/i915/i915_request.c @@ -1465,9 +1465,10 @@ bool i915_retire_requests(struct drm_i915_private *i915) { struct intel_gt_timelines *timelines = &i915->gt.timelines; struct intel_timeline *tl, *tn; + unsigned long flags; LIST_HEAD(free); - spin_lock(&timelines->lock); + spin_lock_irqsave(&timelines->lock, flags); list_for_each_entry_safe(tl, tn, &timelines->active_list, link) { if (!mutex_trylock(&tl->mutex)) continue; @@ -1475,11 +1476,11 @@ bool i915_retire_requests(struct drm_i915_private *i915) intel_timeline_get(tl); GEM_BUG_ON(!tl->active_count); tl->active_count++; /* pin the list element */ - spin_unlock(&timelines->lock); + spin_unlock_irqrestore(&timelines->lock, flags); retire_requests(tl); - spin_lock(&timelines->lock); + spin_lock_irqsave(&timelines->lock, flags); /* Resume iteration after dropping lock */ list_safe_reset_next(tl, tn, link); @@ -1494,7 +1495,7 @@ bool i915_retire_requests(struct drm_i915_private *i915) list_add(&tl->link, &free); } } - spin_unlock(&timelines->lock); + spin_unlock_irqrestore(&timelines->lock, flags); list_for_each_entry_safe(tl, tn, &free, link) __intel_timeline_free(&tl->kref); -- cgit v1.2.3 From 191797a892c91aec6cdffc4e05696b722d779fe3 Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Fri, 23 Aug 2019 15:14:21 +0100 Subject: drm/i915/gtt: Preallocate Braswell top-level page directory In order for the Braswell top-level PD to remain the same from the time of request construction to its submission onto HW, as we may be asynchronously rewriting the page tables (thus changing the expected register state after having already stored the old addresses in the request), the top level PD must be preallocated. So wave goodbye to our lazy allocation of those 4x2 pages. v2: A little bit of write-flushing required (presumably it always has been required, but now we are more susceptible and it is showing up!) v3: Put back the forced-PD-reload on every batch, we can't survive without it and explicitly marking the context for PD reload makes Braswell turn nasty. Signed-off-by: Chris Wilson Cc: Mika Kuoppala Reviewed-by: Mika Kuoppala Link: https://patchwork.freedesktop.org/patch/msgid/20190823141421.2398-1-chris@chris-wilson.co.uk --- drivers/gpu/drm/i915/gem/i915_gem_context.c | 8 +++++++- drivers/gpu/drm/i915/i915_gem_gtt.c | 10 +++++----- 2 files changed, 12 insertions(+), 6 deletions(-) diff --git a/drivers/gpu/drm/i915/gem/i915_gem_context.c b/drivers/gpu/drm/i915/gem/i915_gem_context.c index 1cdfe05514c3..1f735ca9b173 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_context.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_context.c @@ -1003,12 +1003,18 @@ static int emit_ppgtt_update(struct i915_request *rq, void *data) intel_ring_advance(rq, cs); } else if (HAS_LOGICAL_RING_CONTEXTS(engine->i915)) { struct i915_ppgtt *ppgtt = i915_vm_to_ppgtt(vm); + int err; + + /* Magic required to prevent forcewake errors! */ + err = engine->emit_flush(rq, EMIT_INVALIDATE); + if (err) + return err; cs = intel_ring_begin(rq, 4 * GEN8_3LVL_PDPES + 2); if (IS_ERR(cs)) return PTR_ERR(cs); - *cs++ = MI_LOAD_REGISTER_IMM(2 * GEN8_3LVL_PDPES); + *cs++ = MI_LOAD_REGISTER_IMM(2 * GEN8_3LVL_PDPES) | MI_LRI_FORCE_POSTED; for (i = GEN8_3LVL_PDPES; i--; ) { const dma_addr_t pd_daddr = i915_page_dir_dma_addr(ppgtt, i); diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c index 2a425db1cfd8..e0e9b9b52544 100644 --- a/drivers/gpu/drm/i915/i915_gem_gtt.c +++ b/drivers/gpu/drm/i915/i915_gem_gtt.c @@ -168,6 +168,7 @@ static int ppgtt_bind_vma(struct i915_vma *vma, pte_flags |= PTE_READ_ONLY; vma->vm->insert_entries(vma->vm, vma, cache_level, pte_flags); + wmb(); return 0; } @@ -1426,6 +1427,7 @@ static int gen8_preallocate_top_level_pdp(struct i915_ppgtt *ppgtt) set_pd_entry(pd, idx, pde); atomic_inc(px_used(pde)); /* keep pinned */ } + wmb(); return 0; } @@ -1513,11 +1515,9 @@ static struct i915_ppgtt *gen8_ppgtt_create(struct drm_i915_private *i915) } if (!i915_vm_is_4lvl(&ppgtt->vm)) { - if (intel_vgpu_active(i915)) { - err = gen8_preallocate_top_level_pdp(ppgtt); - if (err) - goto err_free_pd; - } + err = gen8_preallocate_top_level_pdp(ppgtt); + if (err) + goto err_free_pd; } ppgtt->vm.insert_entries = gen8_ppgtt_insert; -- cgit v1.2.3 From 636e83f2f208555c3d19d8b454ebdd8d8f4652cc Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Fri, 23 Aug 2019 16:39:44 +0100 Subject: drm/i915: Flush the existing fence before GGTT read/write Our fence management is lazy, very lazy. If the user marks an object as untiled, we do not immediately flush the fence but merely mark it as dirty. On the next use we have to remember to check and remove the fence, by which time we hope it is idle and we do not have to wait. v2: Throw away the old fence on the next ggtt_pin. Bugzilla: https://bugs.freedesktop.org/show_bug.cgi?id=111468 Fixes: 1f7fd484fff1 ("drm/i915: Replace i915_vma_put_fence()") Signed-off-by: Chris Wilson Cc: Matthew Auld Reviewed-by: Matthew Auld Link: https://patchwork.freedesktop.org/patch/msgid/20190823153944.20630-1-chris@chris-wilson.co.uk --- drivers/gpu/drm/i915/i915_gem.c | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index ec9a46c276de..95e7c52cf8ed 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c @@ -1027,6 +1027,14 @@ i915_gem_object_ggtt_pin(struct drm_i915_gem_object *obj, return ERR_PTR(ret); } + if (vma->fence && !i915_gem_object_is_tiled(obj)) { + mutex_lock(&vma->vm->mutex); + ret = i915_vma_revoke_fence(vma); + mutex_unlock(&vma->vm->mutex); + if (ret) + return ERR_PTR(ret); + } + ret = i915_vma_pin(vma, size, alignment, flags | PIN_GLOBAL); if (ret) return ERR_PTR(ret); -- cgit v1.2.3 From 936ad29de8129747bbfeec8a85e38c8a830bd5da Mon Sep 17 00:00:00 2001 From: Daniele Ceraolo Spurio Date: Mon, 19 Aug 2019 18:23:27 -0700 Subject: drm/i915/uc: define GuC and HuC FWs for EHL First uc firmware release for EHL. Signed-off-by: Daniele Ceraolo Spurio Cc: Matt Roper Cc: Anusha Srivatsa Cc: Michal Wajdeczko Reviewed-by: Stuart Summers Tested-by: Matt Roper Reviewed-by: Matt Roper Link: https://patchwork.freedesktop.org/patch/msgid/20190820012327.36443-1-daniele.ceraolospurio@intel.com --- drivers/gpu/drm/i915/gt/uc/intel_uc_fw.c | 13 +++++++------ 1 file changed, 7 insertions(+), 6 deletions(-) diff --git a/drivers/gpu/drm/i915/gt/uc/intel_uc_fw.c b/drivers/gpu/drm/i915/gt/uc/intel_uc_fw.c index bd22bf11adad..296a82603be0 100644 --- a/drivers/gpu/drm/i915/gt/uc/intel_uc_fw.c +++ b/drivers/gpu/drm/i915/gt/uc/intel_uc_fw.c @@ -39,12 +39,13 @@ void intel_uc_fw_change_status(struct intel_uc_fw *uc_fw, * Must be ordered based on platform + revid, from newer to older. */ #define INTEL_UC_FIRMWARE_DEFS(fw_def, guc_def, huc_def) \ - fw_def(ICELAKE, 0, guc_def(icl, 33, 0, 0), huc_def(icl, 8, 4, 3238)) \ - fw_def(COFFEELAKE, 0, guc_def(kbl, 33, 0, 0), huc_def(kbl, 02, 00, 1810)) \ - fw_def(GEMINILAKE, 0, guc_def(glk, 33, 0, 0), huc_def(glk, 03, 01, 2893)) \ - fw_def(KABYLAKE, 0, guc_def(kbl, 33, 0, 0), huc_def(kbl, 02, 00, 1810)) \ - fw_def(BROXTON, 0, guc_def(bxt, 33, 0, 0), huc_def(bxt, 01, 8, 2893)) \ - fw_def(SKYLAKE, 0, guc_def(skl, 33, 0, 0), huc_def(skl, 01, 07, 1398)) + fw_def(ELKHARTLAKE, 0, guc_def(ehl, 33, 0, 4), huc_def(ehl, 9, 0, 0)) \ + fw_def(ICELAKE, 0, guc_def(icl, 33, 0, 0), huc_def(icl, 8, 4, 3238)) \ + fw_def(COFFEELAKE, 0, guc_def(kbl, 33, 0, 0), huc_def(kbl, 02, 00, 1810)) \ + fw_def(GEMINILAKE, 0, guc_def(glk, 33, 0, 0), huc_def(glk, 03, 01, 2893)) \ + fw_def(KABYLAKE, 0, guc_def(kbl, 33, 0, 0), huc_def(kbl, 02, 00, 1810)) \ + fw_def(BROXTON, 0, guc_def(bxt, 33, 0, 0), huc_def(bxt, 01, 8, 2893)) \ + fw_def(SKYLAKE, 0, guc_def(skl, 33, 0, 0), huc_def(skl, 01, 07, 1398)) #define __MAKE_UC_FW_PATH(prefix_, name_, separator_, major_, minor_, patch_) \ "i915/" \ -- cgit v1.2.3 From 77715906921251bd9f75bcf4825f176df3f91208 Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Fri, 23 Aug 2019 19:14:55 +0100 Subject: drm/i915: Keep drm_i915_file_private around under RCU Ensure that the drm_i915_file_private continues to exist as we attempt to remove a request from its list, which may race with the destruction of the file. <6> [38.380714] [IGT] gem_ctx_create: starting subtest basic-files <0> [42.201329] BUG: spinlock bad magic on CPU#0, kworker/u16:0/7 <4> [42.201356] general protection fault: 0000 [#1] PREEMPT SMP PTI <4> [42.201371] CPU: 0 PID: 7 Comm: kworker/u16:0 Tainted: G U 5.3.0-rc5-CI-Patchwork_14169+ #1 <4> [42.201391] Hardware name: Dell Inc. OptiPlex 745 /0GW726, BIOS 2.3.1 05/21/2007 <4> [42.201594] Workqueue: i915 retire_work_handler [i915] <4> [42.201614] RIP: 0010:spin_dump+0x5a/0x90 <4> [42.201625] Code: 00 48 8d 88 c0 06 00 00 48 c7 c7 00 71 09 82 e8 35 ef 00 00 48 85 db 44 8b 4d 08 41 b8 ff ff ff ff 48 c7 c1 0b cd 0f 82 74 0e <44> 8b 83 e0 04 00 00 48 8d 8b c0 06 00 00 8b 55 04 48 89 ee 48 c7 <4> [42.201660] RSP: 0018:ffffc9000004bd80 EFLAGS: 00010202 <4> [42.201673] RAX: 0000000000000031 RBX: 6b6b6b6b6b6b6b6b RCX: ffffffff820fcd0b <4> [42.201688] RDX: 0000000000000000 RSI: ffff88803de266f8 RDI: 00000000ffffffff <4> [42.201703] RBP: ffff888038381ff8 R08: 00000000ffffffff R09: 000000006b6b6b6b <4> [42.201718] R10: 0000000041cb0b89 R11: 646162206b636f6c R12: ffff88802a618500 <4> [42.201733] R13: ffff88802b32c288 R14: ffff888038381ff8 R15: ffff88802b32c250 <4> [42.201748] FS: 0000000000000000(0000) GS:ffff88803de00000(0000) knlGS:0000000000000000 <4> [42.201765] CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033 <4> [42.201778] CR2: 00007f2cefc6d180 CR3: 00000000381ee000 CR4: 00000000000006f0 <4> [42.201793] Call Trace: <4> [42.201805] do_raw_spin_lock+0x66/0xb0 <4> [42.201898] i915_request_retire+0x548/0x7c0 [i915] <4> [42.201989] retire_requests+0x4d/0x60 [i915] <4> [42.202078] i915_retire_requests+0x144/0x2e0 [i915] <4> [42.202169] retire_work_handler+0x10/0x40 [i915] Recently, in commit 44c22f3f1a0a ("drm/i915: Serialize insertion into the file->mm.request_list"), we fixed a race on insertion. Now, it appears we also have a race with destruction! Signed-off-by: Chris Wilson Cc: Matthew Auld Reviewed-by: Matthew Auld Link: https://patchwork.freedesktop.org/patch/msgid/20190823181455.31910-1-chris@chris-wilson.co.uk --- drivers/gpu/drm/i915/gem/i915_gem_throttle.c | 4 +--- drivers/gpu/drm/i915/i915_drv.c | 2 +- drivers/gpu/drm/i915/i915_drv.h | 6 +++++- drivers/gpu/drm/i915/i915_request.c | 13 +++++++------ 4 files changed, 14 insertions(+), 11 deletions(-) diff --git a/drivers/gpu/drm/i915/gem/i915_gem_throttle.c b/drivers/gpu/drm/i915/gem/i915_gem_throttle.c index 1e372420771b..540ef0551789 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_throttle.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_throttle.c @@ -50,10 +50,8 @@ i915_gem_throttle_ioctl(struct drm_device *dev, void *data, if (time_after_eq(request->emitted_jiffies, recent_enough)) break; - if (target) { + if (target && xchg(&target->file_priv, NULL)) list_del(&target->client_link); - target->file_priv = NULL; - } target = request; } diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c index b5b2a64753e6..723b9b7202b0 100644 --- a/drivers/gpu/drm/i915/i915_drv.c +++ b/drivers/gpu/drm/i915/i915_drv.c @@ -1730,7 +1730,7 @@ static void i915_driver_postclose(struct drm_device *dev, struct drm_file *file) i915_gem_release(dev, file); mutex_unlock(&dev->struct_mutex); - kfree(file_priv); + kfree_rcu(file_priv, rcu); /* Catch up with all the deferred frees from "this" client */ i915_gem_flush_free_objects(to_i915(dev)); diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index ba3f6d82ef26..f5e39a3bfff6 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h @@ -185,7 +185,11 @@ struct i915_mmu_object; struct drm_i915_file_private { struct drm_i915_private *dev_priv; - struct drm_file *file; + + union { + struct drm_file *file; + struct rcu_head rcu; + }; struct { spinlock_t lock; diff --git a/drivers/gpu/drm/i915/i915_request.c b/drivers/gpu/drm/i915/i915_request.c index a53777dd371c..18865ce04e13 100644 --- a/drivers/gpu/drm/i915/i915_request.c +++ b/drivers/gpu/drm/i915/i915_request.c @@ -169,16 +169,17 @@ remove_from_client(struct i915_request *request) { struct drm_i915_file_private *file_priv; - file_priv = READ_ONCE(request->file_priv); - if (!file_priv) + if (!READ_ONCE(request->file_priv)) return; - spin_lock(&file_priv->mm.lock); - if (request->file_priv) { + rcu_read_lock(); + file_priv = xchg(&request->file_priv, NULL); + if (file_priv) { + spin_lock(&file_priv->mm.lock); list_del(&request->client_link); - request->file_priv = NULL; + spin_unlock(&file_priv->mm.lock); } - spin_unlock(&file_priv->mm.lock); + rcu_read_unlock(); } static void free_capture_list(struct i915_request *request) -- cgit v1.2.3 From 75b974a859e5d9a3ceaa3aa03bbc27d404b32231 Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Sat, 24 Aug 2019 00:51:41 +0100 Subject: drm/i915/selftests: Teach igt_gpu_fill_dw() to take intel_context Avoid having to pass around (ctx, engine) everywhere by passing the actual intel_context we intend to use. Today we preach this lesson to igt_gpu_fill_dw and its callers' callers. The immediate benefit for the GEM selftests is that we aim to use the GEM context as the control, the source of the engines on which to test the GEM context. Signed-off-by: Chris Wilson Cc: Matthew Auld Reviewed-by: Matthew Auld Link: https://patchwork.freedesktop.org/patch/msgid/20190823235141.31799-1-chris@chris-wilson.co.uk --- drivers/gpu/drm/i915/gem/selftests/huge_pages.c | 103 +++++++++++---------- .../gpu/drm/i915/gem/selftests/i915_gem_context.c | 70 ++++++++------ drivers/gpu/drm/i915/gem/selftests/igt_gem_utils.c | 26 +++--- drivers/gpu/drm/i915/gem/selftests/igt_gem_utils.h | 13 ++- 4 files changed, 116 insertions(+), 96 deletions(-) diff --git a/drivers/gpu/drm/i915/gem/selftests/huge_pages.c b/drivers/gpu/drm/i915/gem/selftests/huge_pages.c index 8de83c6d81f5..c5cea4379216 100644 --- a/drivers/gpu/drm/i915/gem/selftests/huge_pages.c +++ b/drivers/gpu/drm/i915/gem/selftests/huge_pages.c @@ -879,9 +879,8 @@ out_object_put: return err; } -static int gpu_write(struct i915_vma *vma, - struct i915_gem_context *ctx, - struct intel_engine_cs *engine, +static int gpu_write(struct intel_context *ce, + struct i915_vma *vma, u32 dw, u32 val) { @@ -893,7 +892,7 @@ static int gpu_write(struct i915_vma *vma, if (err) return err; - return igt_gpu_fill_dw(vma, ctx, engine, dw * sizeof(u32), + return igt_gpu_fill_dw(ce, vma, dw * sizeof(u32), vma->size >> PAGE_SHIFT, val); } @@ -929,18 +928,16 @@ static int cpu_check(struct drm_i915_gem_object *obj, u32 dword, u32 val) return err; } -static int __igt_write_huge(struct i915_gem_context *ctx, - struct intel_engine_cs *engine, +static int __igt_write_huge(struct intel_context *ce, struct drm_i915_gem_object *obj, u64 size, u64 offset, u32 dword, u32 val) { - struct i915_address_space *vm = ctx->vm ?: &engine->gt->ggtt->vm; unsigned int flags = PIN_USER | PIN_OFFSET_FIXED; struct i915_vma *vma; int err; - vma = i915_vma_instance(obj, vm, NULL); + vma = i915_vma_instance(obj, ce->vm, NULL); if (IS_ERR(vma)) return PTR_ERR(vma); @@ -954,7 +951,7 @@ static int __igt_write_huge(struct i915_gem_context *ctx, * The ggtt may have some pages reserved so * refrain from erroring out. */ - if (err == -ENOSPC && i915_is_ggtt(vm)) + if (err == -ENOSPC && i915_is_ggtt(ce->vm)) err = 0; goto out_vma_close; @@ -964,7 +961,7 @@ static int __igt_write_huge(struct i915_gem_context *ctx, if (err) goto out_vma_unpin; - err = gpu_write(vma, ctx, engine, dword, val); + err = gpu_write(ce, vma, dword, val); if (err) { pr_err("gpu-write failed at offset=%llx\n", offset); goto out_vma_unpin; @@ -987,14 +984,13 @@ out_vma_close: static int igt_write_huge(struct i915_gem_context *ctx, struct drm_i915_gem_object *obj) { - struct drm_i915_private *i915 = to_i915(obj->base.dev); - struct i915_address_space *vm = ctx->vm ?: &i915->ggtt.vm; - static struct intel_engine_cs *engines[I915_NUM_ENGINES]; - struct intel_engine_cs *engine; + struct i915_gem_engines *engines; + struct i915_gem_engines_iter it; + struct intel_context *ce; I915_RND_STATE(prng); IGT_TIMEOUT(end_time); unsigned int max_page_size; - unsigned int id; + unsigned int count; u64 max; u64 num; u64 size; @@ -1008,19 +1004,18 @@ static int igt_write_huge(struct i915_gem_context *ctx, if (obj->mm.page_sizes.sg & I915_GTT_PAGE_SIZE_64K) size = round_up(size, I915_GTT_PAGE_SIZE_2M); - max_page_size = rounddown_pow_of_two(obj->mm.page_sizes.sg); - max = div_u64((vm->total - size), max_page_size); - n = 0; - for_each_engine(engine, i915, id) { - if (!intel_engine_can_store_dword(engine)) { - pr_info("store-dword-imm not supported on engine=%u\n", - id); + count = 0; + max = U64_MAX; + for_each_gem_engine(ce, i915_gem_context_lock_engines(ctx), it) { + count++; + if (!intel_engine_can_store_dword(ce->engine)) continue; - } - engines[n++] = engine; - } + max = min(max, ce->vm->total); + n++; + } + i915_gem_context_unlock_engines(ctx); if (!n) return 0; @@ -1029,23 +1024,30 @@ static int igt_write_huge(struct i915_gem_context *ctx, * randomized order, lets also make feeding to the same engine a few * times in succession a possibility by enlarging the permutation array. */ - order = i915_random_order(n * I915_NUM_ENGINES, &prng); + order = i915_random_order(count * count, &prng); if (!order) return -ENOMEM; + max_page_size = rounddown_pow_of_two(obj->mm.page_sizes.sg); + max = div_u64(max - size, max_page_size); + /* * Try various offsets in an ascending/descending fashion until we * timeout -- we want to avoid issues hidden by effectively always using * offset = 0. */ i = 0; + engines = i915_gem_context_lock_engines(ctx); for_each_prime_number_from(num, 0, max) { u64 offset_low = num * max_page_size; u64 offset_high = (max - num) * max_page_size; u32 dword = offset_in_page(num) / 4; + struct intel_context *ce; - engine = engines[order[i] % n]; - i = (i + 1) % (n * I915_NUM_ENGINES); + ce = engines->engines[order[i] % engines->num_engines]; + i = (i + 1) % (count * count); + if (!ce || !intel_engine_can_store_dword(ce->engine)) + continue; /* * In order to utilize 64K pages we need to both pad the vma @@ -1057,22 +1059,23 @@ static int igt_write_huge(struct i915_gem_context *ctx, offset_low = round_down(offset_low, I915_GTT_PAGE_SIZE_2M); - err = __igt_write_huge(ctx, engine, obj, size, offset_low, + err = __igt_write_huge(ce, obj, size, offset_low, dword, num + 1); if (err) break; - err = __igt_write_huge(ctx, engine, obj, size, offset_high, + err = __igt_write_huge(ce, obj, size, offset_high, dword, num + 1); if (err) break; if (igt_timeout(end_time, - "%s timed out on engine=%u, offset_low=%llx offset_high=%llx, max_page_size=%x\n", - __func__, engine->id, offset_low, offset_high, + "%s timed out on %s, offset_low=%llx offset_high=%llx, max_page_size=%x\n", + __func__, ce->engine->name, offset_low, offset_high, max_page_size)) break; } + i915_gem_context_unlock_engines(ctx); kfree(order); @@ -1316,10 +1319,10 @@ static int igt_ppgtt_pin_update(void *arg) unsigned long supported = INTEL_INFO(dev_priv)->page_sizes; struct i915_address_space *vm = ctx->vm; struct drm_i915_gem_object *obj; + struct i915_gem_engines_iter it; + struct intel_context *ce; struct i915_vma *vma; unsigned int flags = PIN_USER | PIN_OFFSET_FIXED; - struct intel_engine_cs *engine; - enum intel_engine_id id; unsigned int n; int first, last; int err; @@ -1419,14 +1422,18 @@ static int igt_ppgtt_pin_update(void *arg) */ n = 0; - for_each_engine(engine, dev_priv, id) { - if (!intel_engine_can_store_dword(engine)) + for_each_gem_engine(ce, i915_gem_context_lock_engines(ctx), it) { + if (!intel_engine_can_store_dword(ce->engine)) continue; - err = gpu_write(vma, ctx, engine, n++, 0xdeadbeaf); + err = gpu_write(ce, vma, n++, 0xdeadbeaf); if (err) - goto out_unpin; + break; } + i915_gem_context_unlock_engines(ctx); + if (err) + goto out_unpin; + while (n--) { err = cpu_check(obj, n, 0xdeadbeaf); if (err) @@ -1507,8 +1514,8 @@ static int igt_shrink_thp(void *arg) struct drm_i915_private *i915 = ctx->i915; struct i915_address_space *vm = ctx->vm ?: &i915->ggtt.vm; struct drm_i915_gem_object *obj; - struct intel_engine_cs *engine; - enum intel_engine_id id; + struct i915_gem_engines_iter it; + struct intel_context *ce; struct i915_vma *vma; unsigned int flags = PIN_USER; unsigned int n; @@ -1548,16 +1555,19 @@ static int igt_shrink_thp(void *arg) goto out_unpin; n = 0; - for_each_engine(engine, i915, id) { - if (!intel_engine_can_store_dword(engine)) + + for_each_gem_engine(ce, i915_gem_context_lock_engines(ctx), it) { + if (!intel_engine_can_store_dword(ce->engine)) continue; - err = gpu_write(vma, ctx, engine, n++, 0xdeadbeaf); + err = gpu_write(ce, vma, n++, 0xdeadbeaf); if (err) - goto out_unpin; + break; } - + i915_gem_context_unlock_engines(ctx); i915_vma_unpin(vma); + if (err) + goto out_close; /* * Now that the pages are *unpinned* shrink-all should invoke @@ -1583,10 +1593,9 @@ static int igt_shrink_thp(void *arg) while (n--) { err = cpu_check(obj, n, 0xdeadbeaf); if (err) - goto out_unpin; + break; } - out_unpin: i915_vma_unpin(vma); out_close: diff --git a/drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c b/drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c index 3e6f4a65d356..3adb60c2fd1f 100644 --- a/drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c +++ b/drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c @@ -166,19 +166,17 @@ static unsigned long fake_page_count(struct drm_i915_gem_object *obj) return huge_gem_object_dma_size(obj) >> PAGE_SHIFT; } -static int gpu_fill(struct drm_i915_gem_object *obj, - struct i915_gem_context *ctx, - struct intel_engine_cs *engine, +static int gpu_fill(struct intel_context *ce, + struct drm_i915_gem_object *obj, unsigned int dw) { - struct i915_address_space *vm = ctx->vm ?: &engine->gt->ggtt->vm; struct i915_vma *vma; int err; - GEM_BUG_ON(obj->base.size > vm->total); - GEM_BUG_ON(!intel_engine_can_store_dword(engine)); + GEM_BUG_ON(obj->base.size > ce->vm->total); + GEM_BUG_ON(!intel_engine_can_store_dword(ce->engine)); - vma = i915_vma_instance(obj, vm, NULL); + vma = i915_vma_instance(obj, ce->vm, NULL); if (IS_ERR(vma)) return PTR_ERR(vma); @@ -200,9 +198,7 @@ static int gpu_fill(struct drm_i915_gem_object *obj, * whilst checking that each context provides a unique view * into the object. */ - err = igt_gpu_fill_dw(vma, - ctx, - engine, + err = igt_gpu_fill_dw(ce, vma, (dw * real_page_count(obj)) << PAGE_SHIFT | (dw * sizeof(u32)), real_page_count(obj), @@ -305,22 +301,21 @@ static int file_add_object(struct drm_file *file, } static struct drm_i915_gem_object * -create_test_object(struct i915_gem_context *ctx, +create_test_object(struct i915_address_space *vm, struct drm_file *file, struct list_head *objects) { struct drm_i915_gem_object *obj; - struct i915_address_space *vm = ctx->vm ?: &ctx->i915->ggtt.vm; u64 size; int err; /* Keep in GEM's good graces */ - i915_retire_requests(ctx->i915); + i915_retire_requests(vm->i915); size = min(vm->total / 2, 1024ull * DW_PER_PAGE * PAGE_SIZE); size = round_down(size, DW_PER_PAGE * PAGE_SIZE); - obj = huge_gem_object(ctx->i915, DW_PER_PAGE * PAGE_SIZE, size); + obj = huge_gem_object(vm->i915, DW_PER_PAGE * PAGE_SIZE, size); if (IS_ERR(obj)) return obj; @@ -393,6 +388,7 @@ static int igt_ctx_exec(void *arg) dw = 0; while (!time_after(jiffies, end_time)) { struct i915_gem_context *ctx; + struct intel_context *ce; ctx = live_context(i915, file); if (IS_ERR(ctx)) { @@ -400,15 +396,20 @@ static int igt_ctx_exec(void *arg) goto out_unlock; } + ce = i915_gem_context_get_engine(ctx, engine->legacy_idx); + if (!obj) { - obj = create_test_object(ctx, file, &objects); + obj = create_test_object(ce->vm, file, &objects); if (IS_ERR(obj)) { err = PTR_ERR(obj); + intel_context_put(ce); goto out_unlock; } } - err = gpu_fill(obj, ctx, engine, dw); + err = gpu_fill(ce, obj, dw); + intel_context_put(ce); + if (err) { pr_err("Failed to fill dword %lu [%lu/%lu] with gpu (%s) in ctx %u [full-ppgtt? %s], err=%d\n", ndwords, dw, max_dwords(obj), @@ -509,6 +510,7 @@ static int igt_shared_ctx_exec(void *arg) ncontexts = 0; while (!time_after(jiffies, end_time)) { struct i915_gem_context *ctx; + struct intel_context *ce; ctx = kernel_context(i915); if (IS_ERR(ctx)) { @@ -518,22 +520,26 @@ static int igt_shared_ctx_exec(void *arg) __assign_ppgtt(ctx, parent->vm); + ce = i915_gem_context_get_engine(ctx, engine->legacy_idx); if (!obj) { - obj = create_test_object(parent, file, &objects); + obj = create_test_object(parent->vm, file, &objects); if (IS_ERR(obj)) { err = PTR_ERR(obj); + intel_context_put(ce); kernel_context_close(ctx); goto out_test; } } - err = gpu_fill(obj, ctx, engine, dw); + err = gpu_fill(ce, obj, dw); + intel_context_put(ce); + kernel_context_close(ctx); + if (err) { pr_err("Failed to fill dword %lu [%lu/%lu] with gpu (%s) in ctx %u [full-ppgtt? %s], err=%d\n", ndwords, dw, max_dwords(obj), engine->name, ctx->hw_id, yesno(!!ctx->vm), err); - kernel_context_close(ctx); goto out_test; } @@ -544,8 +550,6 @@ static int igt_shared_ctx_exec(void *arg) ndwords++; ncontexts++; - - kernel_context_close(ctx); } pr_info("Submitted %lu contexts to %s, filling %lu dwords\n", ncontexts, engine->name, ndwords); @@ -604,6 +608,8 @@ static struct i915_vma *rpcs_query_batch(struct i915_vma *vma) __i915_gem_object_flush_map(obj, 0, 64); i915_gem_object_unpin_map(obj); + intel_gt_chipset_flush(vma->vm->gt); + vma = i915_vma_instance(obj, vma->vm, NULL); if (IS_ERR(vma)) { err = PTR_ERR(vma); @@ -1082,17 +1088,19 @@ static int igt_ctx_readonly(void *arg) ndwords = 0; dw = 0; while (!time_after(jiffies, end_time)) { - struct intel_engine_cs *engine; - unsigned int id; + struct i915_gem_engines_iter it; + struct intel_context *ce; - for_each_engine(engine, i915, id) { - if (!intel_engine_can_store_dword(engine)) + for_each_gem_engine(ce, + i915_gem_context_lock_engines(ctx), it) { + if (!intel_engine_can_store_dword(ce->engine)) continue; if (!obj) { - obj = create_test_object(ctx, file, &objects); + obj = create_test_object(ce->vm, file, &objects); if (IS_ERR(obj)) { err = PTR_ERR(obj); + i915_gem_context_unlock_engines(ctx); goto out_unlock; } @@ -1100,12 +1108,13 @@ static int igt_ctx_readonly(void *arg) i915_gem_object_set_readonly(obj); } - err = gpu_fill(obj, ctx, engine, dw); + err = gpu_fill(ce, obj, dw); if (err) { pr_err("Failed to fill dword %lu [%lu/%lu] with gpu (%s) in ctx %u [full-ppgtt? %s], err=%d\n", ndwords, dw, max_dwords(obj), - engine->name, ctx->hw_id, + ce->engine->name, ctx->hw_id, yesno(!!ctx->vm), err); + i915_gem_context_unlock_engines(ctx); goto out_unlock; } @@ -1115,6 +1124,7 @@ static int igt_ctx_readonly(void *arg) } ndwords++; } + i915_gem_context_unlock_engines(ctx); } pr_info("Submitted %lu dwords (across %u engines)\n", ndwords, RUNTIME_INFO(i915)->num_engines); @@ -1197,6 +1207,8 @@ static int write_to_scratch(struct i915_gem_context *ctx, __i915_gem_object_flush_map(obj, 0, 64); i915_gem_object_unpin_map(obj); + intel_gt_chipset_flush(engine->gt); + vma = i915_vma_instance(obj, ctx->vm, NULL); if (IS_ERR(vma)) { err = PTR_ERR(vma); @@ -1296,6 +1308,8 @@ static int read_from_scratch(struct i915_gem_context *ctx, i915_gem_object_flush_map(obj); i915_gem_object_unpin_map(obj); + intel_gt_chipset_flush(engine->gt); + vma = i915_vma_instance(obj, ctx->vm, NULL); if (IS_ERR(vma)) { err = PTR_ERR(vma); diff --git a/drivers/gpu/drm/i915/gem/selftests/igt_gem_utils.c b/drivers/gpu/drm/i915/gem/selftests/igt_gem_utils.c index 57ece53c1075..ee5dc13a30b3 100644 --- a/drivers/gpu/drm/i915/gem/selftests/igt_gem_utils.c +++ b/drivers/gpu/drm/i915/gem/selftests/igt_gem_utils.c @@ -9,6 +9,7 @@ #include "gem/i915_gem_context.h" #include "gem/i915_gem_pm.h" #include "gt/intel_context.h" +#include "gt/intel_gt.h" #include "i915_vma.h" #include "i915_drv.h" @@ -84,6 +85,8 @@ igt_emit_store_dw(struct i915_vma *vma, *cmd = MI_BATCH_BUFFER_END; i915_gem_object_unpin_map(obj); + intel_gt_chipset_flush(vma->vm->gt); + vma = i915_vma_instance(obj, vma->vm, NULL); if (IS_ERR(vma)) { err = PTR_ERR(vma); @@ -101,40 +104,35 @@ err: return ERR_PTR(err); } -int igt_gpu_fill_dw(struct i915_vma *vma, - struct i915_gem_context *ctx, - struct intel_engine_cs *engine, - u64 offset, - unsigned long count, - u32 val) +int igt_gpu_fill_dw(struct intel_context *ce, + struct i915_vma *vma, u64 offset, + unsigned long count, u32 val) { - struct i915_address_space *vm = ctx->vm ?: &engine->gt->ggtt->vm; struct i915_request *rq; struct i915_vma *batch; unsigned int flags; int err; - GEM_BUG_ON(vma->size > vm->total); - GEM_BUG_ON(!intel_engine_can_store_dword(engine)); + GEM_BUG_ON(!intel_engine_can_store_dword(ce->engine)); GEM_BUG_ON(!i915_vma_is_pinned(vma)); batch = igt_emit_store_dw(vma, offset, count, val); if (IS_ERR(batch)) return PTR_ERR(batch); - rq = igt_request_alloc(ctx, engine); + rq = intel_context_create_request(ce); if (IS_ERR(rq)) { err = PTR_ERR(rq); goto err_batch; } flags = 0; - if (INTEL_GEN(vm->i915) <= 5) + if (INTEL_GEN(ce->vm->i915) <= 5) flags |= I915_DISPATCH_SECURE; - err = engine->emit_bb_start(rq, - batch->node.start, batch->node.size, - flags); + err = rq->engine->emit_bb_start(rq, + batch->node.start, batch->node.size, + flags); if (err) goto err_request; diff --git a/drivers/gpu/drm/i915/gem/selftests/igt_gem_utils.h b/drivers/gpu/drm/i915/gem/selftests/igt_gem_utils.h index 361a7ef866b0..4221cf84d175 100644 --- a/drivers/gpu/drm/i915/gem/selftests/igt_gem_utils.h +++ b/drivers/gpu/drm/i915/gem/selftests/igt_gem_utils.h @@ -11,9 +11,11 @@ struct i915_request; struct i915_gem_context; -struct intel_engine_cs; struct i915_vma; +struct intel_context; +struct intel_engine_cs; + struct i915_request * igt_request_alloc(struct i915_gem_context *ctx, struct intel_engine_cs *engine); @@ -23,11 +25,8 @@ igt_emit_store_dw(struct i915_vma *vma, unsigned long count, u32 val); -int igt_gpu_fill_dw(struct i915_vma *vma, - struct i915_gem_context *ctx, - struct intel_engine_cs *engine, - u64 offset, - unsigned long count, - u32 val); +int igt_gpu_fill_dw(struct intel_context *ce, + struct i915_vma *vma, u64 offset, + unsigned long count, u32 val); #endif /* __IGT_GEM_UTILS_H__ */ -- cgit v1.2.3 From 52988009843160c5b366b4082ed6df48041c655c Mon Sep 17 00:00:00 2001 From: Xiaolin Zhang Date: Fri, 23 Aug 2019 14:57:31 +0800 Subject: drm/i915: to make vgpu ppgtt notificaiton as atomic operation vgpu ppgtt notification was split into 2 steps, the first step is to update PVINFO's pdp register and then write PVINFO's g2v_notify register with action code to tirgger ppgtt notification to GVT side. currently these steps were not atomic operations due to no any protection, so it is easy to enter race condition state during the MTBF, stress and IGT test to cause GPU hang. the solution is to add a lock to make vgpu ppgtt notication as atomic operation. Cc: stable@vger.kernel.org Signed-off-by: Xiaolin Zhang Acked-by: Chris Wilson Signed-off-by: Chris Wilson Link: https://patchwork.freedesktop.org/patch/msgid/1566543451-13955-1-git-send-email-xiaolin.zhang@intel.com --- drivers/gpu/drm/i915/i915_drv.h | 1 + drivers/gpu/drm/i915/i915_gem_gtt.c | 12 +++++++----- drivers/gpu/drm/i915/i915_vgpu.c | 1 + 3 files changed, 9 insertions(+), 5 deletions(-) diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index f5e39a3bfff6..b42651a387d9 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h @@ -966,6 +966,7 @@ struct i915_frontbuffer_tracking { }; struct i915_virtual_gpu { + struct mutex lock; /* serialises sending of g2v_notify command pkts */ bool active; u32 caps; }; diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c index e0e9b9b52544..0db82921fb38 100644 --- a/drivers/gpu/drm/i915/i915_gem_gtt.c +++ b/drivers/gpu/drm/i915/i915_gem_gtt.c @@ -834,10 +834,9 @@ static void mark_tlbs_dirty(struct i915_ppgtt *ppgtt) ppgtt->pd_dirty_engines = ALL_ENGINES; } -static int gen8_ppgtt_notify_vgt(struct i915_ppgtt *ppgtt, bool create) +static void gen8_ppgtt_notify_vgt(struct i915_ppgtt *ppgtt, bool create) { - struct i915_address_space *vm = &ppgtt->vm; - struct drm_i915_private *dev_priv = vm->i915; + struct drm_i915_private *dev_priv = ppgtt->vm.i915; enum vgt_g2v_type msg; int i; @@ -846,7 +845,9 @@ static int gen8_ppgtt_notify_vgt(struct i915_ppgtt *ppgtt, bool create) else atomic_dec(px_used(ppgtt->pd)); - if (i915_vm_is_4lvl(vm)) { + mutex_lock(&dev_priv->vgpu.lock); + + if (i915_vm_is_4lvl(&ppgtt->vm)) { const u64 daddr = px_dma(ppgtt->pd); I915_WRITE(vgtif_reg(pdp[0].lo), lower_32_bits(daddr)); @@ -866,9 +867,10 @@ static int gen8_ppgtt_notify_vgt(struct i915_ppgtt *ppgtt, bool create) VGT_G2V_PPGTT_L3_PAGE_TABLE_DESTROY); } + /* g2v_notify atomically (via hv trap) consumes the message packet. */ I915_WRITE(vgtif_reg(g2v_notify), msg); - return 0; + mutex_unlock(&dev_priv->vgpu.lock); } /* Index shifts into the pagetable are offset by GEN8_PTE_SHIFT [12] */ diff --git a/drivers/gpu/drm/i915/i915_vgpu.c b/drivers/gpu/drm/i915/i915_vgpu.c index 39bebf16edbe..968be26735c5 100644 --- a/drivers/gpu/drm/i915/i915_vgpu.c +++ b/drivers/gpu/drm/i915/i915_vgpu.c @@ -94,6 +94,7 @@ void i915_detect_vgpu(struct drm_i915_private *dev_priv) dev_priv->vgpu.caps = readl(shared_area + vgtif_offset(vgt_caps)); dev_priv->vgpu.active = true; + mutex_init(&dev_priv->vgpu.lock); DRM_INFO("Virtual GPU for Intel GVT-g detected.\n"); out: -- cgit v1.2.3 From acd674af95d3f627062007429b9c195c6b32361d Mon Sep 17 00:00:00 2001 From: Lyude Paul Date: Fri, 23 Aug 2019 16:52:51 -0400 Subject: drm/i915: Call dma_set_max_seg_size() in i915_driver_hw_probe() Currently, we don't call dma_set_max_seg_size() for i915 because we intentionally do not limit the segment length that the device supports. However, this results in a warning being emitted if we try to map anything larger than SZ_64K on a kernel with CONFIG_DMA_API_DEBUG_SG enabled: [ 7.751926] DMA-API: i915 0000:00:02.0: mapping sg segment longer than device claims to support [len=98304] [max=65536] [ 7.751934] WARNING: CPU: 5 PID: 474 at kernel/dma/debug.c:1220 debug_dma_map_sg+0x20f/0x340 This was originally brought up on https://bugs.freedesktop.org/show_bug.cgi?id=108517 , and the consensus there was it wasn't really useful to set a limit (and that dma-debug isn't really all that useful for i915 in the first place). Unfortunately though, CONFIG_DMA_API_DEBUG_SG is enabled in the debug configs for various distro kernels. Since a WARN_ON() will disable automatic problem reporting (and cause any CI with said option enabled to start complaining), we really should just fix the problem. Note that as me and Chris Wilson discussed, the other solution for this would be to make DMA-API not make such assumptions when a driver hasn't explicitly set a maximum segment size. But, taking a look at the commit which originally introduced this behavior, commit 78c47830a5cb ("dma-debug: check scatterlist segments"), there is an explicit mention of this assumption and how it applies to devices with no segment size: Conversely, devices which are less limited than the rather conservative defaults, or indeed have no limitations at all (e.g. GPUs with their own internal MMU), should be encouraged to set appropriate dma_parms, as they may get more efficient DMA mapping performance out of it. So unless there's any concerns (I'm open to discussion!), let's just follow suite and call dma_set_max_seg_size() with UINT_MAX as our limit to silence any warnings. Changes since v3: * Drop patch for enabling CONFIG_DMA_API_DEBUG_SG in CI. It looks like just turning it on causes the kernel to spit out bogus WARN_ONs() during some igt tests which would otherwise require teaching igt to disable the various DMA-API debugging options causing this. This is too much work to be worth it, since DMA-API debugging is useless for us. So, we'll just settle with this single patch to squelch WARN_ONs() during driver load for users that have CONFIG_DMA_API_DEBUG_SG turned on for some reason. * Move dma_set_max_seg_size() call into i915_driver_hw_probe() - Chris Wilson Signed-off-by: Lyude Paul Reviewed-by: Chris Wilson Cc: # v4.18+ Link: https://patchwork.freedesktop.org/patch/msgid/20190823205251.14298-1-lyude@redhat.com --- drivers/gpu/drm/i915/i915_drv.c | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c index 723b9b7202b0..9478f704a315 100644 --- a/drivers/gpu/drm/i915/i915_drv.c +++ b/drivers/gpu/drm/i915/i915_drv.c @@ -1279,6 +1279,12 @@ static int i915_driver_hw_probe(struct drm_i915_private *dev_priv) pci_set_master(pdev); + /* + * We don't have a max segment size, so set it to the max so sg's + * debugging layer doesn't complain + */ + dma_set_max_seg_size(&pdev->dev, UINT_MAX); + /* overlay on gen2 is broken and can't address above 1G */ if (IS_GEN(dev_priv, 2)) { ret = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(30)); -- cgit v1.2.3 From 1d5b7773314994d23f2fc4acfa4278929b03ffd5 Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Mon, 26 Aug 2019 08:21:22 +0100 Subject: drm/i915/selftests: Add the usual batch vma managements to st_workarounds To properly handle asynchronous migration of batch objects, we need to couple the fences on the incoming batch into the request and should not assume that they always start idle. Signed-off-by: Chris Wilson Cc: Matthew Auld Reviewed-by: Matthew Auld Link: https://patchwork.freedesktop.org/patch/msgid/20190826072149.9447-1-chris@chris-wilson.co.uk --- drivers/gpu/drm/i915/gt/selftest_workarounds.c | 16 ++++++++++++++++ 1 file changed, 16 insertions(+) diff --git a/drivers/gpu/drm/i915/gt/selftest_workarounds.c b/drivers/gpu/drm/i915/gt/selftest_workarounds.c index d06d68ac2a3b..999a98f00494 100644 --- a/drivers/gpu/drm/i915/gt/selftest_workarounds.c +++ b/drivers/gpu/drm/i915/gt/selftest_workarounds.c @@ -565,6 +565,14 @@ static int check_dirty_whitelist(struct i915_gem_context *ctx, goto err_request; } + i915_vma_lock(batch); + err = i915_request_await_object(rq, batch->obj, false); + if (err == 0) + err = i915_vma_move_to_active(batch, rq, 0); + i915_vma_unlock(batch); + if (err) + goto err_request; + err = engine->emit_bb_start(rq, batch->node.start, PAGE_SIZE, 0); @@ -850,6 +858,14 @@ static int scrub_whitelisted_registers(struct i915_gem_context *ctx, goto err_request; } + i915_vma_lock(batch); + err = i915_request_await_object(rq, batch->obj, false); + if (err == 0) + err = i915_vma_move_to_active(batch, rq, 0); + i915_vma_unlock(batch); + if (err) + goto err_request; + /* Perform the writes from an unprivileged "user" batch */ err = engine->emit_bb_start(rq, batch->node.start, 0, 0); -- cgit v1.2.3 From ebfdf5cd806b3bbf1ff79e69bce6a28df8bbe39d Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Mon, 26 Aug 2019 14:07:50 +0100 Subject: drm/i915: Use NOEVICT for first pass on attemping to pin a GGTT mmap The intention is that we first try to pin the current vma into the mappable aperture only if it is already in use or it fits in the free space and will not cause contention. The first attempt was meant to be using PIN_NOEVICT to reuse the current vma if possible, following up with different eviction strategies. Bugzilla: https://bugs.freedesktop.org/show_bug.cgi?id=111485 Fixes: 6846895fde05 ("drm/i915: Replace PIN_NONFAULT with calls to PIN_NOEVICT") Signed-off-by: Chris Wilson Cc: Matthew Auld Reviewed-by: Matthew Auld Link: https://patchwork.freedesktop.org/patch/msgid/20190826130750.17272-1-chris@chris-wilson.co.uk --- drivers/gpu/drm/i915/gem/i915_gem_mman.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/gpu/drm/i915/gem/i915_gem_mman.c b/drivers/gpu/drm/i915/gem/i915_gem_mman.c index 595539a09e38..261c9bd83f51 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_mman.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_mman.c @@ -265,7 +265,7 @@ vm_fault_t i915_gem_fault(struct vm_fault *vmf) vma = i915_gem_object_ggtt_pin(obj, NULL, 0, 0, PIN_MAPPABLE | PIN_NONBLOCK /* NOWARN */ | - PIN_NOSEARCH); + PIN_NOEVICT); if (IS_ERR(vma)) { /* Use a partial view if it is bigger than available space */ struct i915_ggtt_view view = -- cgit v1.2.3 From 21b0c32bdaba7c2e365d9faca0536fc40dd056d4 Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Tue, 27 Aug 2019 10:49:33 +0100 Subject: drm/i915/selftests: Markup impossible error pointers If we create a new live_context() we should have a mapping for each engine. Document that assumption with an assertion. Reported-by: Dan Carpenter Signed-off-by: Chris Wilson Cc: Matthew Auld Reviewed-by: Matthew Auld Link: https://patchwork.freedesktop.org/patch/msgid/20190827094933.13778-1-chris@chris-wilson.co.uk --- drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c | 3 +++ 1 file changed, 3 insertions(+) diff --git a/drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c b/drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c index 3adb60c2fd1f..37a177e37665 100644 --- a/drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c +++ b/drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c @@ -397,6 +397,7 @@ static int igt_ctx_exec(void *arg) } ce = i915_gem_context_get_engine(ctx, engine->legacy_idx); + GEM_BUG_ON(IS_ERR(ce)); if (!obj) { obj = create_test_object(ce->vm, file, &objects); @@ -521,6 +522,8 @@ static int igt_shared_ctx_exec(void *arg) __assign_ppgtt(ctx, parent->vm); ce = i915_gem_context_get_engine(ctx, engine->legacy_idx); + GEM_BUG_ON(IS_ERR(ce)); + if (!obj) { obj = create_test_object(parent->vm, file, &objects); if (IS_ERR(obj)) { -- cgit v1.2.3 From f52c6d0df6909a0412c347c8e442acc22ce94747 Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Tue, 27 Aug 2019 14:26:31 +0100 Subject: drm/i915: Only activate i915_active debugobject once The point of debug_object_activate is to mark the first, and only the first, acquisition. The object then remains active until the last release. However, we marked up all successful first acquires even though we allowed concurrent parties to try and acquire the i915_active simultaneously (serialised by the i915_active.mutex). Testcase: igt/gem_mmap_gtt/fault-concurrent Signed-off-by: Chris Wilson Cc: Matthew Auld Reviewed-by: Matthew Auld Link: https://patchwork.freedesktop.org/patch/msgid/20190827132631.18627-1-chris@chris-wilson.co.uk --- drivers/gpu/drm/i915/i915_active.c | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/drivers/gpu/drm/i915/i915_active.c b/drivers/gpu/drm/i915/i915_active.c index 48e16ad93bbd..6a447f1d0110 100644 --- a/drivers/gpu/drm/i915/i915_active.c +++ b/drivers/gpu/drm/i915/i915_active.c @@ -92,12 +92,16 @@ static void debug_active_init(struct i915_active *ref) static void debug_active_activate(struct i915_active *ref) { - debug_object_activate(ref, &active_debug_desc); + lockdep_assert_held(&ref->mutex); + if (!atomic_read(&ref->count)) /* before the first inc */ + debug_object_activate(ref, &active_debug_desc); } static void debug_active_deactivate(struct i915_active *ref) { - debug_object_deactivate(ref, &active_debug_desc); + lockdep_assert_held(&ref->mutex); + if (!atomic_read(&ref->count)) /* after the last dec */ + debug_object_deactivate(ref, &active_debug_desc); } static void debug_active_fini(struct i915_active *ref) -- cgit v1.2.3 From cccdce1dd061c0b8ae156f026a3ee2c9d58613d3 Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Tue, 27 Aug 2019 14:59:35 +0100 Subject: drm/i915: Make engine's batch pool safe for use with virtual engines A virtual engine itself does not have a batch pool, but we can gleefully use any of its siblings instead. Signed-off-by: Chris Wilson Cc: Matthew Auld Reviewed-by: Matthew Auld Link: https://patchwork.freedesktop.org/patch/msgid/20190827135935.3831-1-chris@chris-wilson.co.uk --- drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c | 4 ++-- drivers/gpu/drm/i915/gem/i915_gem_object_blt.c | 4 ++-- drivers/gpu/drm/i915/gt/intel_engine_pool.c | 12 +++++++++++- drivers/gpu/drm/i915/gt/intel_engine_pool.h | 2 +- drivers/gpu/drm/i915/gt/intel_lrc.c | 12 ++++++++++++ drivers/gpu/drm/i915/gt/intel_lrc.h | 4 ++++ 6 files changed, 32 insertions(+), 6 deletions(-) diff --git a/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c index f813fcb8ceb6..5a2238d44423 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c @@ -1145,7 +1145,7 @@ static int __reloc_gpu_alloc(struct i915_execbuffer *eb, u32 *cmd; int err; - pool = intel_engine_pool_get(&eb->engine->pool, PAGE_SIZE); + pool = intel_engine_get_pool(eb->engine, PAGE_SIZE); if (IS_ERR(pool)) return PTR_ERR(pool); @@ -1963,7 +1963,7 @@ static struct i915_vma *eb_parse(struct i915_execbuffer *eb, bool is_master) struct i915_vma *vma; int err; - pool = intel_engine_pool_get(&eb->engine->pool, eb->batch_len); + pool = intel_engine_get_pool(eb->engine, eb->batch_len); if (IS_ERR(pool)) return ERR_CAST(pool); diff --git a/drivers/gpu/drm/i915/gem/i915_gem_object_blt.c b/drivers/gpu/drm/i915/gem/i915_gem_object_blt.c index 6415f9a17e2d..5bd8de124d74 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_object_blt.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_object_blt.c @@ -32,7 +32,7 @@ struct i915_vma *intel_emit_vma_fill_blt(struct intel_context *ce, count = div_u64(vma->size, block_size); size = (1 + 8 * count) * sizeof(u32); size = round_up(size, PAGE_SIZE); - pool = intel_engine_pool_get(&ce->engine->pool, size); + pool = intel_engine_get_pool(ce->engine, size); if (IS_ERR(pool)) { err = PTR_ERR(pool); goto out_pm; @@ -216,7 +216,7 @@ struct i915_vma *intel_emit_vma_copy_blt(struct intel_context *ce, count = div_u64(dst->size, block_size); size = (1 + 11 * count) * sizeof(u32); size = round_up(size, PAGE_SIZE); - pool = intel_engine_pool_get(&ce->engine->pool, size); + pool = intel_engine_get_pool(ce->engine, size); if (IS_ERR(pool)) { err = PTR_ERR(pool); goto out_pm; diff --git a/drivers/gpu/drm/i915/gt/intel_engine_pool.c b/drivers/gpu/drm/i915/gt/intel_engine_pool.c index 4cd54c569911..97d36cca8ded 100644 --- a/drivers/gpu/drm/i915/gt/intel_engine_pool.c +++ b/drivers/gpu/drm/i915/gt/intel_engine_pool.c @@ -107,9 +107,19 @@ node_create(struct intel_engine_pool *pool, size_t sz) return node; } +static struct intel_engine_pool *lookup_pool(struct intel_engine_cs *engine) +{ + if (intel_engine_is_virtual(engine)) + engine = intel_virtual_engine_get_sibling(engine, 0); + + GEM_BUG_ON(!engine); + return &engine->pool; +} + struct intel_engine_pool_node * -intel_engine_pool_get(struct intel_engine_pool *pool, size_t size) +intel_engine_get_pool(struct intel_engine_cs *engine, size_t size) { + struct intel_engine_pool *pool = lookup_pool(engine); struct intel_engine_pool_node *node; struct list_head *list; unsigned long flags; diff --git a/drivers/gpu/drm/i915/gt/intel_engine_pool.h b/drivers/gpu/drm/i915/gt/intel_engine_pool.h index 8d069efd9457..7e2123b33594 100644 --- a/drivers/gpu/drm/i915/gt/intel_engine_pool.h +++ b/drivers/gpu/drm/i915/gt/intel_engine_pool.h @@ -12,7 +12,7 @@ #include "i915_request.h" struct intel_engine_pool_node * -intel_engine_pool_get(struct intel_engine_pool *pool, size_t size); +intel_engine_get_pool(struct intel_engine_cs *engine, size_t size); static inline int intel_engine_pool_mark_active(struct intel_engine_pool_node *node, diff --git a/drivers/gpu/drm/i915/gt/intel_lrc.c b/drivers/gpu/drm/i915/gt/intel_lrc.c index d42584439f51..cfbdcca76ff0 100644 --- a/drivers/gpu/drm/i915/gt/intel_lrc.c +++ b/drivers/gpu/drm/i915/gt/intel_lrc.c @@ -3851,6 +3851,18 @@ int intel_virtual_engine_attach_bond(struct intel_engine_cs *engine, return 0; } +struct intel_engine_cs * +intel_virtual_engine_get_sibling(struct intel_engine_cs *engine, + unsigned int sibling) +{ + struct virtual_engine *ve = to_virtual_engine(engine); + + if (sibling >= ve->num_siblings) + return NULL; + + return ve->siblings[sibling]; +} + void intel_execlists_show_requests(struct intel_engine_cs *engine, struct drm_printer *m, void (*show_request)(struct drm_printer *m, diff --git a/drivers/gpu/drm/i915/gt/intel_lrc.h b/drivers/gpu/drm/i915/gt/intel_lrc.h index c2bba82bcc16..dc0252e0589e 100644 --- a/drivers/gpu/drm/i915/gt/intel_lrc.h +++ b/drivers/gpu/drm/i915/gt/intel_lrc.h @@ -131,4 +131,8 @@ int intel_virtual_engine_attach_bond(struct intel_engine_cs *engine, const struct intel_engine_cs *master, const struct intel_engine_cs *sibling); +struct intel_engine_cs * +intel_virtual_engine_get_sibling(struct intel_engine_cs *engine, + unsigned int sibling); + #endif /* _INTEL_LRC_H_ */ -- cgit v1.2.3 From 6056517ab8c30fb239b08c06ef577677cbcd409c Mon Sep 17 00:00:00 2001 From: José Roberto de Souza Date: Fri, 23 Aug 2019 01:20:37 -0700 Subject: drm/i915/tgl: Guard and warn if more than one eDP panel is present MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit On TGL+ it's possible to have PSR1 enabled in other ports besides DDIA. PSR2 is still limited to DDIA. However currently we handle only one instance of PSR struct. Lets guard intel_psr_init_dpcd() against multiple eDP panels and warn about it. v2: Reword commit message to be TGL+ only and with the info where PSR1/PSR2 are supported (Lucas) Cc: Dhinakaran Pandiyan Cc: Rodrigo Vivi Signed-off-by: José Roberto de Souza Signed-off-by: Lucas De Marchi Reviewed-by: Anshuman Gupta Link: https://patchwork.freedesktop.org/patch/msgid/20190823082055.5992-6-lucas.demarchi@intel.com --- drivers/gpu/drm/i915/display/intel_psr.c | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/drivers/gpu/drm/i915/display/intel_psr.c b/drivers/gpu/drm/i915/display/intel_psr.c index 28b62e587204..78e920015a00 100644 --- a/drivers/gpu/drm/i915/display/intel_psr.c +++ b/drivers/gpu/drm/i915/display/intel_psr.c @@ -283,6 +283,11 @@ void intel_psr_init_dpcd(struct intel_dp *intel_dp) struct drm_i915_private *dev_priv = to_i915(dp_to_dig_port(intel_dp)->base.base.dev); + if (dev_priv->psr.dp) { + DRM_WARN("More than one eDP panel found, PSR support should be extended\n"); + return; + } + drm_dp_dpcd_read(&intel_dp->aux, DP_PSR_SUPPORT, intel_dp->psr_dpcd, sizeof(intel_dp->psr_dpcd)); @@ -305,7 +310,6 @@ void intel_psr_init_dpcd(struct intel_dp *intel_dp) dev_priv->psr.sink_sync_latency = intel_dp_get_sink_sync_latency(intel_dp); - WARN_ON(dev_priv->psr.dp); dev_priv->psr.dp = intel_dp; if (INTEL_GEN(dev_priv) >= 9 && -- cgit v1.2.3 From 0f81e645eb1ed224af43bb7b347806cc74d189f6 Mon Sep 17 00:00:00 2001 From: José Roberto de Souza Date: Sat, 17 Aug 2019 02:38:33 -0700 Subject: drm/i915: Do not read PSR2 register in transcoders without PSR2 MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit This fix unclaimed access warnings: [ 245.525788] ------------[ cut here ]------------ [ 245.525884] Unclaimed read from register 0x62900 [ 245.526154] WARNING: CPU: 0 PID: 1234 at drivers/gpu/drm/i915/intel_uncore.c:1100 __unclaimed_reg_debug+0x40/0x50 [i915] [ 245.526160] Modules linked in: i915 x86_pkg_temp_thermal ax88179_178a coretemp usbnet crct10dif_pclmul mii crc32_pclmul ghash_clmulni_intel e1000e [last unloaded: i915] [ 245.526191] CPU: 0 PID: 1234 Comm: kms_fullmodeset Not tainted 5.1.0-rc6+ #915 [ 245.526197] Hardware name: Intel Corporation Tiger Lake Client Platform/TigerLake U DDR4 SODIMM RVP, BIOS TGLSFWR1.D00.2081.A10.1904182155 04/18/2019 [ 245.526273] RIP: 0010:__unclaimed_reg_debug+0x40/0x50 [i915] [ 245.526281] Code: 74 05 5b 5d 41 5c c3 45 84 e4 48 c7 c0 76 97 21 a0 48 c7 c6 6c 97 21 a0 89 ea 48 0f 44 f0 48 c7 c7 7f 97 21 a0 e8 4f 1e fe e0 <0f> 0b 83 2d 6f d9 1c 00 01 5b 5d 41 5c c3 66 90 41 57 41 56 41 55 [ 245.526288] RSP: 0018:ffffc900006bf7d8 EFLAGS: 00010086 [ 245.526297] RAX: 0000000000000000 RBX: 0000000000000000 RCX: 0000000000000000 [ 245.526304] RDX: 0000000000000007 RSI: 0000000000000000 RDI: 00000000ffffffff [ 245.526310] RBP: 0000000000061900 R08: 0000000000000000 R09: 0000000000000001 [ 245.526317] R10: 0000000000000006 R11: 0000000000000000 R12: 0000000000000001 [ 245.526324] R13: 0000000000000000 R14: ffff8882914f0d58 R15: 0000000000000206 [ 245.526332] FS: 00007fed2a3c39c0(0000) GS:ffff8882a8600000(0000) knlGS:0000000000000000 [ 245.526340] CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033 [ 245.526347] CR2: 00007fed28dff000 CR3: 00000002a086c006 CR4: 0000000000760ef0 [ 245.526354] DR0: 0000000000000000 DR1: 0000000000000000 DR2: 0000000000000000 [ 245.526361] DR3: 0000000000000000 DR6: 00000000fffe0ff0 DR7: 0000000000000400 [ 245.526367] PKRU: 55555554 [ 245.526373] Call Trace: [ 245.526454] gen11_fwtable_read32+0x219/0x250 [i915] [ 245.526576] intel_psr_activate+0x57/0x400 [i915] [ 245.526697] intel_psr_enable_locked+0x367/0x4b0 [i915] [ 245.526828] intel_psr_enable+0xa4/0xd0 [i915] [ 245.526946] intel_enable_ddi+0x127/0x2f0 [i915] [ 245.527075] intel_encoders_enable.isra.79+0x62/0x90 [i915] [ 245.527202] haswell_crtc_enable+0x2a2/0x850 [i915] [ 245.527337] intel_update_crtc+0x51/0x360 [i915] [ 245.527466] skl_update_crtcs+0x26c/0x300 [i915] [ 245.527603] intel_atomic_commit_tail+0x3e5/0x13c0 [i915] [ 245.527757] intel_atomic_commit+0x24d/0x2d0 [i915] [ 245.527782] drm_atomic_helper_set_config+0x7b/0x90 [ 245.527799] drm_mode_setcrtc+0x1b4/0x6f0 [ 245.527856] ? drm_mode_getcrtc+0x180/0x180 [ 245.527867] drm_ioctl_kernel+0xad/0xf0 [ 245.527886] drm_ioctl+0x2f4/0x3b0 [ 245.527902] ? drm_mode_getcrtc+0x180/0x180 [ 245.527935] ? rcu_read_lock_sched_held+0x6f/0x80 [ 245.527956] do_vfs_ioctl+0xa0/0x6d0 [ 245.527970] ? __task_pid_nr_ns+0xb6/0x200 [ 245.527991] ksys_ioctl+0x35/0x70 [ 245.528009] __x64_sys_ioctl+0x11/0x20 [ 245.528020] do_syscall_64+0x55/0x180 [ 245.528034] entry_SYSCALL_64_after_hwframe+0x49/0xbe [ 245.528042] RIP: 0033:0x7fed2cc7c3c7 [ 245.528050] Code: 00 00 90 48 8b 05 c9 3a 0d 00 64 c7 00 26 00 00 00 48 c7 c0 ff ff ff ff c3 66 2e 0f 1f 84 00 00 00 00 00 b8 10 00 00 00 0f 05 <48> 3d 01 f0 ff ff 73 01 c3 48 8b 0d 99 3a 0d 00 f7 d8 64 89 01 48 [ 245.528057] RSP: 002b:00007ffe36944378 EFLAGS: 00000246 ORIG_RAX: 0000000000000010 [ 245.528067] RAX: ffffffffffffffda RBX: 00007ffe369443b0 RCX: 00007fed2cc7c3c7 [ 245.528074] RDX: 00007ffe369443b0 RSI: 00000000c06864a2 RDI: 0000000000000003 [ 245.528081] RBP: 00007ffe369443b0 R08: 0000000000000000 R09: 0000564c0173ae98 [ 245.528088] R10: 0000564c0173aeb8 R11: 0000000000000246 R12: 00000000c06864a2 [ 245.528095] R13: 0000000000000003 R14: 0000000000000000 R15: 0000000000000000 [ 245.528128] irq event stamp: 140866 [ 245.528138] hardirqs last enabled at (140865): [] _raw_spin_unlock_irqrestore+0x4c/0x60 [ 245.528148] hardirqs last disabled at (140866): [] _raw_spin_lock_irqsave+0xd/0x50 [ 245.528158] softirqs last enabled at (140860): [] __do_softirq+0x38c/0x499 [ 245.528170] softirqs last disabled at (140853): [] irq_exit+0xa9/0xc0 [ 245.528247] WARNING: CPU: 0 PID: 1234 at drivers/gpu/drm/i915/intel_uncore.c:1100 __unclaimed_reg_debug+0x40/0x50 [i915] [ 245.528254] ---[ end trace 366069676e98a410 ]--- Signed-off-by: José Roberto de Souza Signed-off-by: Lucas De Marchi Reviewed-by: Imre Deak Reviewed-by: Lucas De Marchi Link: https://patchwork.freedesktop.org/patch/msgid/20190823082055.5992-7-lucas.demarchi@intel.com --- drivers/gpu/drm/i915/display/intel_psr.c | 9 ++++++--- 1 file changed, 6 insertions(+), 3 deletions(-) diff --git a/drivers/gpu/drm/i915/display/intel_psr.c b/drivers/gpu/drm/i915/display/intel_psr.c index 78e920015a00..dafd3737cc5a 100644 --- a/drivers/gpu/drm/i915/display/intel_psr.c +++ b/drivers/gpu/drm/i915/display/intel_psr.c @@ -541,7 +541,9 @@ static void hsw_activate_psr2(struct intel_dp *intel_dp) static bool transcoder_has_psr2(struct drm_i915_private *dev_priv, enum transcoder trans) { - if (INTEL_GEN(dev_priv) >= 12) + if (INTEL_GEN(dev_priv) < 9) + return false; + else if (INTEL_GEN(dev_priv) >= 12) return trans == TRANSCODER_A; else return trans == TRANSCODER_EDP; @@ -667,8 +669,9 @@ static void intel_psr_activate(struct intel_dp *intel_dp) { struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); - if (INTEL_GEN(dev_priv) >= 9) + if (transcoder_has_psr2(dev_priv, dev_priv->psr.transcoder)) WARN_ON(I915_READ(EDP_PSR2_CTL(dev_priv->psr.transcoder)) & EDP_PSR2_ENABLE); + WARN_ON(I915_READ(EDP_PSR_CTL(dev_priv->psr.transcoder)) & EDP_PSR_ENABLE); WARN_ON(dev_priv->psr.active); lockdep_assert_held(&dev_priv->psr.lock); @@ -821,7 +824,7 @@ static void intel_psr_exit(struct drm_i915_private *dev_priv) u32 val; if (!dev_priv->psr.active) { - if (INTEL_GEN(dev_priv) >= 9) { + if (transcoder_has_psr2(dev_priv, dev_priv->psr.transcoder)) { val = I915_READ(EDP_PSR2_CTL(dev_priv->psr.transcoder)); WARN_ON(val & EDP_PSR2_ENABLE); } -- cgit v1.2.3 From f7b3c22619a44ce7599fc9fa1df5df6281a26217 Mon Sep 17 00:00:00 2001 From: José Roberto de Souza Date: Fri, 23 Aug 2019 01:20:41 -0700 Subject: drm/i915/tgl: Add maximum resolution supported by PSR2 HW MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit TGL PSR2 HW supports a bigger resolution, so lets add it BSpec: 50422, 49199 Cc: Dhinakaran Pandiyan Cc: Rodrigo Vivi Signed-off-by: José Roberto de Souza Signed-off-by: Lucas De Marchi Reviewed-by: Lucas De Marchi Reviewed-by: Anshuman Gupta Link: https://patchwork.freedesktop.org/patch/msgid/20190823082055.5992-10-lucas.demarchi@intel.com --- drivers/gpu/drm/i915/display/intel_psr.c | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/drivers/gpu/drm/i915/display/intel_psr.c b/drivers/gpu/drm/i915/display/intel_psr.c index dafd3737cc5a..2af3826121fa 100644 --- a/drivers/gpu/drm/i915/display/intel_psr.c +++ b/drivers/gpu/drm/i915/display/intel_psr.c @@ -576,7 +576,10 @@ static bool intel_psr2_config_valid(struct intel_dp *intel_dp, return false; } - if (INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv)) { + if (INTEL_GEN(dev_priv) >= 12) { + psr_max_h = 5120; + psr_max_v = 3200; + } else if (INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv)) { psr_max_h = 4096; psr_max_v = 2304; } else if (IS_GEN(dev_priv, 9)) { -- cgit v1.2.3 From 0456417ef680aab9a2aa93b950443e25104101b9 Mon Sep 17 00:00:00 2001 From: José Roberto de Souza Date: Fri, 23 Aug 2019 01:20:43 -0700 Subject: drm: Add for_each_oldnew_intel_crtc_in_state_reverse() MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Same as for_each_oldnew_intel_crtc_in_state() but iterates in reverse order. v2: Fix additional blank line v3: Rebase Cc: Rodrigo Vivi Cc: Ville Syrjälä Signed-off-by: José Roberto de Souza Signed-off-by: Lucas De Marchi Reviewed-by: Mika Kahola Link: https://patchwork.freedesktop.org/patch/msgid/20190823082055.5992-12-lucas.demarchi@intel.com --- drivers/gpu/drm/i915/display/intel_display.h | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/drivers/gpu/drm/i915/display/intel_display.h b/drivers/gpu/drm/i915/display/intel_display.h index e57e6969051d..03321fb4a703 100644 --- a/drivers/gpu/drm/i915/display/intel_display.h +++ b/drivers/gpu/drm/i915/display/intel_display.h @@ -411,6 +411,15 @@ enum phy_fia { (__i)++) \ for_each_if(crtc) +#define for_each_oldnew_intel_crtc_in_state_reverse(__state, crtc, old_crtc_state, new_crtc_state, __i) \ + for ((__i) = (__state)->base.dev->mode_config.num_crtc - 1; \ + (__i) >= 0 && \ + ((crtc) = to_intel_crtc((__state)->base.crtcs[__i].ptr), \ + (old_crtc_state) = to_intel_crtc_state((__state)->base.crtcs[__i].old_state), \ + (new_crtc_state) = to_intel_crtc_state((__state)->base.crtcs[__i].new_state), 1); \ + (__i)--) \ + for_each_if(crtc) + void intel_link_compute_m_n(u16 bpp, int nlanes, int pixel_clock, int link_clock, struct intel_link_m_n *m_n, -- cgit v1.2.3 From 9c722e17c1b91d7dc5ec77cb4fcaaa6b30653fe3 Mon Sep 17 00:00:00 2001 From: José Roberto de Souza Date: Fri, 23 Aug 2019 01:20:44 -0700 Subject: drm/i915: Disable pipes in reverse order MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Disable CRTC/pipes in reverse order because some features (MST in TGL+) requires master and slave relationship between pipes, so it should always pick the lowest pipe as master as it will be enabled first and disable in the reverse order so the master will be the last one to be disabled. Cc: Rodrigo Vivi Cc: Ville Syrjälä Signed-off-by: José Roberto de Souza Signed-off-by: Lucas De Marchi Reviewed-by: Mika Kahola Link: https://patchwork.freedesktop.org/patch/msgid/20190823082055.5992-13-lucas.demarchi@intel.com --- drivers/gpu/drm/i915/display/intel_display.c | 10 +++++++++- 1 file changed, 9 insertions(+), 1 deletion(-) diff --git a/drivers/gpu/drm/i915/display/intel_display.c b/drivers/gpu/drm/i915/display/intel_display.c index ea2915dde6ab..a9f8be2cd364 100644 --- a/drivers/gpu/drm/i915/display/intel_display.c +++ b/drivers/gpu/drm/i915/display/intel_display.c @@ -13908,7 +13908,15 @@ static void intel_atomic_commit_tail(struct intel_atomic_state *state) if (state->modeset) wakeref = intel_display_power_get(dev_priv, POWER_DOMAIN_MODESET); - for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) { + /* + * Disable CRTC/pipes in reverse order because some features(MST in + * TGL+) requires master and slave relationship between pipes, so it + * should always pick the lowest pipe as master as it will be enabled + * first and disable in the reverse order so the master will be the + * last one to be disabled. + */ + for_each_oldnew_intel_crtc_in_state_reverse(state, crtc, old_crtc_state, + new_crtc_state, i) { if (needs_modeset(new_crtc_state) || new_crtc_state->update_pipe) { -- cgit v1.2.3 From 99389390fef50c880d2ca0b6667c12423b3d5260 Mon Sep 17 00:00:00 2001 From: José Roberto de Souza Date: Fri, 23 Aug 2019 01:20:47 -0700 Subject: drm/i915/tgl: Implement TGL DisplayPort training sequence MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit On TGL some registers moved from DDI to transcoder and the DisplayPort training sequence has a separate BSpec page. I started adding 'ifs' to the original intel_ddi_pre_enable_dp() but it was becoming really hard to follow, so a new and cleaner function for TGL was added with comments of all steps. It's similar to ICL, but different enough to deserve a new function. The rest of DisplayPort enable and the whole disable sequences remained the same. v2: FEC and DSC should be enabled on sink side before start link training(Maarten reported and Manasi confirmed the DSC part) v3: Add call to enable FEC on step 7.l(Manasi) BSpec: 49190 Cc: Maarten Lankhorst Cc: Manasi Navare Cc: Ville Syrjälä Signed-off-by: José Roberto de Souza Signed-off-by: Lucas De Marchi Reviewed-by: Maarten Lankhorst Link: https://patchwork.freedesktop.org/patch/msgid/20190823082055.5992-16-lucas.demarchi@intel.com --- drivers/gpu/drm/i915/display/intel_ddi.c | 140 ++++++++++++++++++++++++++++++- drivers/gpu/drm/i915/display/intel_dp.c | 8 +- 2 files changed, 140 insertions(+), 8 deletions(-) diff --git a/drivers/gpu/drm/i915/display/intel_ddi.c b/drivers/gpu/drm/i915/display/intel_ddi.c index 8eb2b3ec01ed..3180dacb5be4 100644 --- a/drivers/gpu/drm/i915/display/intel_ddi.c +++ b/drivers/gpu/drm/i915/display/intel_ddi.c @@ -1761,7 +1761,14 @@ void intel_ddi_set_vc_payload_alloc(const struct intel_crtc_state *crtc_state, I915_WRITE(TRANS_DDI_FUNC_CTL(cpu_transcoder), temp); } -void intel_ddi_enable_transcoder_func(const struct intel_crtc_state *crtc_state) +/* + * Returns the TRANS_DDI_FUNC_CTL value based on CRTC state. + * + * Only intended to be used by intel_ddi_enable_transcoder_func() and + * intel_ddi_config_transcoder_func(). + */ +static u32 +intel_ddi_transcoder_func_reg_val_get(const struct intel_crtc_state *crtc_state) { struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc); struct intel_encoder *encoder = intel_ddi_get_crtc_encoder(crtc); @@ -1845,6 +1852,34 @@ void intel_ddi_enable_transcoder_func(const struct intel_crtc_state *crtc_state) temp |= DDI_PORT_WIDTH(crtc_state->lane_count); } + return temp; +} + +void intel_ddi_enable_transcoder_func(const struct intel_crtc_state *crtc_state) +{ + struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc); + struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); + enum transcoder cpu_transcoder = crtc_state->cpu_transcoder; + u32 temp; + + temp = intel_ddi_transcoder_func_reg_val_get(crtc_state); + I915_WRITE(TRANS_DDI_FUNC_CTL(cpu_transcoder), temp); +} + +/* + * Same as intel_ddi_enable_transcoder_func(), but it does not set the enable + * bit. + */ +static void +intel_ddi_config_transcoder_func(const struct intel_crtc_state *crtc_state) +{ + struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc); + struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); + enum transcoder cpu_transcoder = crtc_state->cpu_transcoder; + u32 temp; + + temp = intel_ddi_transcoder_func_reg_val_get(crtc_state); + temp &= ~TRANS_DDI_FUNC_ENABLE; I915_WRITE(TRANS_DDI_FUNC_CTL(cpu_transcoder), temp); } @@ -3160,9 +3195,94 @@ static void intel_ddi_disable_fec_state(struct intel_encoder *encoder, POSTING_READ(DP_TP_CTL(port)); } -static void intel_ddi_pre_enable_dp(struct intel_encoder *encoder, - const struct intel_crtc_state *crtc_state, - const struct drm_connector_state *conn_state) +static void tgl_ddi_pre_enable_dp(struct intel_encoder *encoder, + const struct intel_crtc_state *crtc_state, + const struct drm_connector_state *conn_state) +{ + struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base); + struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); + enum phy phy = intel_port_to_phy(dev_priv, encoder->port); + struct intel_digital_port *dig_port = enc_to_dig_port(&encoder->base); + bool is_mst = intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DP_MST); + int level = intel_ddi_dp_level(intel_dp); + + intel_dp_set_link_params(intel_dp, crtc_state->port_clock, + crtc_state->lane_count, is_mst); + + /* 1.a got on intel_atomic_commit_tail() */ + + /* 2. */ + intel_edp_panel_on(intel_dp); + + /* + * 1.b, 3. and 4. is done before tgl_ddi_pre_enable_dp() by: + * haswell_crtc_enable()->intel_encoders_pre_pll_enable() and + * haswell_crtc_enable()->intel_enable_shared_dpll() + */ + + /* 5. */ + if (!intel_phy_is_tc(dev_priv, phy) || + dig_port->tc_mode != TC_PORT_TBT_ALT) + intel_display_power_get(dev_priv, + dig_port->ddi_io_power_domain); + + /* 6. */ + icl_program_mg_dp_mode(dig_port); + + /* + * 7.a - Steps in this function should only be executed over MST + * master, what will be taken in care by MST hook + * intel_mst_pre_enable_dp() + */ + intel_ddi_enable_pipe_clock(crtc_state); + + /* 7.b */ + intel_ddi_config_transcoder_func(crtc_state); + + /* 7.d */ + icl_disable_phy_clock_gating(dig_port); + + /* 7.e */ + icl_ddi_vswing_sequence(encoder, crtc_state->port_clock, level, + encoder->type); + + /* 7.f */ + if (intel_phy_is_combo(dev_priv, phy)) { + bool lane_reversal = + dig_port->saved_port_bits & DDI_BUF_PORT_REVERSAL; + + intel_combo_phy_power_up_lanes(dev_priv, phy, false, + crtc_state->lane_count, + lane_reversal); + } + + /* 7.g */ + intel_ddi_init_dp_buf_reg(encoder); + + if (!is_mst) + intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_ON); + + intel_dp_sink_set_decompression_state(intel_dp, crtc_state, true); + /* + * DDI FEC: "anticipates enabling FEC encoding sets the FEC_READY bit + * in the FEC_CONFIGURATION register to 1 before initiating link + * training + */ + intel_dp_sink_set_fec_ready(intel_dp, crtc_state); + /* 7.c, 7.h, 7.i, 7.j */ + intel_dp_start_link_train(intel_dp); + + /* 7.k */ + intel_dp_stop_link_train(intel_dp); + + /* 7.l */ + intel_ddi_enable_fec(encoder, crtc_state); + intel_dsc_enable(encoder, crtc_state); +} + +static void hsw_ddi_pre_enable_dp(struct intel_encoder *encoder, + const struct intel_crtc_state *crtc_state, + const struct drm_connector_state *conn_state) { struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base); struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); @@ -3228,6 +3348,18 @@ static void intel_ddi_pre_enable_dp(struct intel_encoder *encoder, intel_dsc_enable(encoder, crtc_state); } +static void intel_ddi_pre_enable_dp(struct intel_encoder *encoder, + const struct intel_crtc_state *crtc_state, + const struct drm_connector_state *conn_state) +{ + struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); + + if (INTEL_GEN(dev_priv) >= 12) + tgl_ddi_pre_enable_dp(encoder, crtc_state, conn_state); + else + hsw_ddi_pre_enable_dp(encoder, crtc_state, conn_state); +} + static void intel_ddi_pre_enable_hdmi(struct intel_encoder *encoder, const struct intel_crtc_state *crtc_state, const struct drm_connector_state *conn_state) diff --git a/drivers/gpu/drm/i915/display/intel_dp.c b/drivers/gpu/drm/i915/display/intel_dp.c index 23908da1cd5d..f74594d8d9df 100644 --- a/drivers/gpu/drm/i915/display/intel_dp.c +++ b/drivers/gpu/drm/i915/display/intel_dp.c @@ -3954,13 +3954,13 @@ void intel_dp_set_idle_link_train(struct intel_dp *intel_dp) I915_WRITE(DP_TP_CTL(port), val); /* - * On PORT_A we can have only eDP in SST mode. There the only reason - * we need to set idle transmission mode is to work around a HW issue - * where we enable the pipe while not in idle link-training mode. + * Until TGL on PORT_A we can have only eDP in SST mode. There the only + * reason we need to set idle transmission mode is to work around a HW + * issue where we enable the pipe while not in idle link-training mode. * In this case there is requirement to wait for a minimum number of * idle patterns to be sent. */ - if (port == PORT_A) + if (port == PORT_A && INTEL_GEN(dev_priv) < 12) return; if (intel_de_wait_for_set(dev_priv, DP_TP_STATUS(port), -- cgit v1.2.3 From a8ff5d405e90e7044ab45f315f2dbcbe6aebf5f8 Mon Sep 17 00:00:00 2001 From: Michel Thierry Date: Fri, 23 Aug 2019 01:20:48 -0700 Subject: drm/i915/tgl: Do not apply WaIncreaseDefaultTLBEntries from GEN12 onwards Workaround no longer needed (plus L3_LRA_1_GPGPU doesn't exist). Cc: Daniele Ceraolo Spurio Cc: Michal Wajdeczko Signed-off-by: Michel Thierry Signed-off-by: Lucas De Marchi Reviewed-by: Stuart Summers Link: https://patchwork.freedesktop.org/patch/msgid/20190823082055.5992-17-lucas.demarchi@intel.com --- drivers/gpu/drm/i915/i915_gem_gtt.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c index 0db82921fb38..c94dfa562247 100644 --- a/drivers/gpu/drm/i915/i915_gem_gtt.c +++ b/drivers/gpu/drm/i915/i915_gem_gtt.c @@ -2029,7 +2029,7 @@ static void gtt_write_workarounds(struct intel_gt *gt) intel_uncore_write(uncore, GEN8_L3_LRA_1_GPGPU, GEN9_L3_LRA_1_GPGPU_DEFAULT_VALUE_BXT); - else if (INTEL_GEN(i915) >= 9) + else if (INTEL_GEN(i915) >= 9 && INTEL_GEN(i915) <= 11) intel_uncore_write(uncore, GEN8_L3_LRA_1_GPGPU, GEN9_L3_LRA_1_GPGPU_DEFAULT_VALUE_SKL); -- cgit v1.2.3 From 45e9c829ebeaa13b3b999f76963b9920d2147128 Mon Sep 17 00:00:00 2001 From: Michel Thierry Date: Fri, 23 Aug 2019 01:20:50 -0700 Subject: drm/i915/tgl/perf: use the same oa ctx_id format as icl Compared to Icelake, Tigerlake's MAX_CONTEXT_HW_ID is smaller by one, but since we just use the upper 32 bits of the lrc_desc, it's guaranteed OA will use the correct one. Cc: Lionel Landwerlin Signed-off-by: Michel Thierry Signed-off-by: Lucas De Marchi Reviewed-by: Umesh Nerlige Ramappa Link: https://patchwork.freedesktop.org/patch/msgid/20190823082055.5992-19-lucas.demarchi@intel.com --- drivers/gpu/drm/i915/i915_perf.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/drivers/gpu/drm/i915/i915_perf.c b/drivers/gpu/drm/i915/i915_perf.c index e42b86827d6b..2c9f46e12622 100644 --- a/drivers/gpu/drm/i915/i915_perf.c +++ b/drivers/gpu/drm/i915/i915_perf.c @@ -1299,7 +1299,8 @@ static int oa_get_render_ctx_id(struct i915_perf_stream *stream) } break; - case 11: { + case 11: + case 12: { stream->specific_ctx_id_mask = ((1U << GEN11_SW_CTX_ID_WIDTH) - 1) << (GEN11_SW_CTX_ID_SHIFT - 32) | ((1U << GEN11_ENGINE_INSTANCE_WIDTH) - 1) << (GEN11_ENGINE_INSTANCE_SHIFT - 32) | -- cgit v1.2.3 From 8a9a982767b7c89b2f23290e4f0f21f194b79dfe Mon Sep 17 00:00:00 2001 From: Daniele Ceraolo Spurio Date: Tue, 27 Aug 2019 11:58:05 -0700 Subject: drm/i915: use a separate context for gpu relocs The CS pre-parser can pre-fetch commands across memory sync points and starting from gen12 it is able to pre-fetch across BB_START and BB_END boundaries as well, so when we emit gpu relocs the pre-parser might fetch the target location of the reloc before the memory write lands. The parser can't pre-fetch across the ctx switch, so we use a separate context to guarantee that the memory is synchronized before the parser can get to it. Note that there is no risk of the CS doing a lite restore from the reloc context to the user context, even if the two have the same hw_id, because since gen11 the CS also checks the LRCA when deciding if it can lite-restore. v2: limit new context to gen12+, release in eb_destroy, add a comment in emit_fini_breadcrumb (Chris). Suggested-by: Chris Wilson Signed-off-by: Daniele Ceraolo Spurio Cc: Chris Wilson Reviewed-by: Chris Wilson Signed-off-by: Chris Wilson Link: https://patchwork.freedesktop.org/patch/msgid/20190827185805.21799-1-daniele.ceraolospurio@intel.com --- drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c | 30 +++++++++++++++++++++++++- drivers/gpu/drm/i915/gt/intel_lrc.c | 18 ++++++++++++++++ 2 files changed, 47 insertions(+), 1 deletion(-) diff --git a/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c index 5a2238d44423..8fbb454cfd6b 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c @@ -252,6 +252,7 @@ struct i915_execbuffer { bool has_fence : 1; bool needs_unfenced : 1; + struct intel_context *ce; struct i915_request *rq; u32 *rq_cmd; unsigned int rq_size; @@ -880,6 +881,9 @@ static void eb_destroy(const struct i915_execbuffer *eb) { GEM_BUG_ON(eb->reloc_cache.rq); + if (eb->reloc_cache.ce) + intel_context_put(eb->reloc_cache.ce); + if (eb->lut_size > 0) kfree(eb->buckets); } @@ -903,6 +907,7 @@ static void reloc_cache_init(struct reloc_cache *cache, cache->has_fence = cache->gen < 4; cache->needs_unfenced = INTEL_INFO(i915)->unfenced_needs_alignment; cache->node.allocated = false; + cache->ce = NULL; cache->rq = NULL; cache->rq_size = 0; } @@ -1168,7 +1173,7 @@ static int __reloc_gpu_alloc(struct i915_execbuffer *eb, if (err) goto err_unmap; - rq = i915_request_create(eb->context); + rq = intel_context_create_request(cache->ce); if (IS_ERR(rq)) { err = PTR_ERR(rq); goto err_unpin; @@ -1239,6 +1244,29 @@ static u32 *reloc_gpu(struct i915_execbuffer *eb, if (!intel_engine_can_store_dword(eb->engine)) return ERR_PTR(-ENODEV); + if (!cache->ce) { + struct intel_context *ce; + + /* + * The CS pre-parser can pre-fetch commands across + * memory sync points and starting gen12 it is able to + * pre-fetch across BB_START and BB_END boundaries + * (within the same context). We therefore use a + * separate context gen12+ to guarantee that the reloc + * writes land before the parser gets to the target + * memory location. + */ + if (cache->gen >= 12) + ce = intel_context_create(eb->context->gem_context, + eb->engine); + else + ce = intel_context_get(eb->context); + if (IS_ERR(ce)) + return ERR_CAST(ce); + + cache->ce = ce; + } + err = __reloc_gpu_alloc(eb, vma, len); if (unlikely(err)) return ERR_PTR(err); diff --git a/drivers/gpu/drm/i915/gt/intel_lrc.c b/drivers/gpu/drm/i915/gt/intel_lrc.c index cfbdcca76ff0..a141e9e37bf7 100644 --- a/drivers/gpu/drm/i915/gt/intel_lrc.c +++ b/drivers/gpu/drm/i915/gt/intel_lrc.c @@ -2931,6 +2931,24 @@ static u32 *gen8_emit_fini_breadcrumb_rcs(struct i915_request *request, u32 *cs) return gen8_emit_fini_breadcrumb_footer(request, cs); } +/* + * Note that the CS instruction pre-parser will not stall on the breadcrumb + * flush and will continue pre-fetching the instructions after it before the + * memory sync is completed. On pre-gen12 HW, the pre-parser will stop at + * BB_START/END instructions, so, even though we might pre-fetch the pre-amble + * of the next request before the memory has been flushed, we're guaranteed that + * we won't access the batch itself too early. + * However, on gen12+ the parser can pre-fetch across the BB_START/END commands, + * so, if the current request is modifying an instruction in the next request on + * the same intel_context, we might pre-fetch and then execute the pre-update + * instruction. To avoid this, the users of self-modifying code should either + * disable the parser around the code emitting the memory writes, via a new flag + * added to MI_ARB_CHECK, or emit the writes from a different intel_context. For + * the in-kernel use-cases we've opted to use a separate context, see + * reloc_gpu() as an example. + * All the above applies only to the instructions themselves. Non-inline data + * used by the instructions is not pre-fetched. + */ static u32 *gen11_emit_fini_breadcrumb_rcs(struct i915_request *request, u32 *cs) { -- cgit v1.2.3 From f2085c8e950d536c7982182b0c9c015804011dd2 Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Tue, 27 Aug 2019 17:17:25 +0100 Subject: drm/i915/selftests: Remove accidental serialization between gpu_fill Upon object creation for live_gem_contexts, we fill the object with known scratch and flush it out of the CPU cache. Before performing the GPU fill, we don't need to flush it again and so avoid serialising with previous fills. However, we do need some throttling on the internal interfaces if we do not want to run out of memory! Signed-off-by: Chris Wilson Reviewed-by: Matthew Auld Link: https://patchwork.freedesktop.org/patch/msgid/20190827161726.3640-1-chris@chris-wilson.co.uk --- .../gpu/drm/i915/gem/selftests/i915_gem_context.c | 83 +++++++++++++++++++--- 1 file changed, 72 insertions(+), 11 deletions(-) diff --git a/drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c b/drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c index 37a177e37665..63116c4fa8ba 100644 --- a/drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c +++ b/drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c @@ -180,12 +180,6 @@ static int gpu_fill(struct intel_context *ce, if (IS_ERR(vma)) return PTR_ERR(vma); - i915_gem_object_lock(obj); - err = i915_gem_object_set_to_gtt_domain(obj, true); - i915_gem_object_unlock(obj); - if (err) - return err; - err = i915_vma_pin(vma, 0, 0, PIN_HIGH | PIN_USER); if (err) return err; @@ -343,6 +337,45 @@ static unsigned long max_dwords(struct drm_i915_gem_object *obj) return npages / DW_PER_PAGE; } +static void throttle_release(struct i915_request **q, int count) +{ + int i; + + for (i = 0; i < count; i++) { + if (IS_ERR_OR_NULL(q[i])) + continue; + + i915_request_put(fetch_and_zero(&q[i])); + } +} + +static int throttle(struct intel_context *ce, + struct i915_request **q, int count) +{ + int i; + + if (!IS_ERR_OR_NULL(q[0])) { + if (i915_request_wait(q[0], + I915_WAIT_INTERRUPTIBLE, + MAX_SCHEDULE_TIMEOUT) < 0) + return -EINTR; + + i915_request_put(q[0]); + } + + for (i = 0; i < count - 1; i++) + q[i] = q[i + 1]; + + q[i] = intel_context_create_request(ce); + if (IS_ERR(q[i])) + return PTR_ERR(q[i]); + + i915_request_get(q[i]); + i915_request_add(q[i]); + + return 0; +} + static int igt_ctx_exec(void *arg) { struct drm_i915_private *i915 = arg; @@ -362,6 +395,7 @@ static int igt_ctx_exec(void *arg) for_each_engine(engine, i915, id) { struct drm_i915_gem_object *obj = NULL; unsigned long ncontexts, ndwords, dw; + struct i915_request *tq[5] = {}; struct igt_live_test t; struct drm_file *file; IGT_TIMEOUT(end_time); @@ -409,13 +443,18 @@ static int igt_ctx_exec(void *arg) } err = gpu_fill(ce, obj, dw); - intel_context_put(ce); - if (err) { pr_err("Failed to fill dword %lu [%lu/%lu] with gpu (%s) in ctx %u [full-ppgtt? %s], err=%d\n", ndwords, dw, max_dwords(obj), engine->name, ctx->hw_id, yesno(!!ctx->vm), err); + intel_context_put(ce); + goto out_unlock; + } + + err = throttle(ce, tq, ARRAY_SIZE(tq)); + if (err) { + intel_context_put(ce); goto out_unlock; } @@ -426,6 +465,8 @@ static int igt_ctx_exec(void *arg) ndwords++; ncontexts++; + + intel_context_put(ce); } pr_info("Submitted %lu contexts to %s, filling %lu dwords\n", @@ -444,6 +485,7 @@ static int igt_ctx_exec(void *arg) } out_unlock: + throttle_release(tq, ARRAY_SIZE(tq)); if (igt_live_test_end(&t)) err = -EIO; mutex_unlock(&i915->drm.struct_mutex); @@ -461,6 +503,7 @@ out_unlock: static int igt_shared_ctx_exec(void *arg) { struct drm_i915_private *i915 = arg; + struct i915_request *tq[5] = {}; struct i915_gem_context *parent; struct intel_engine_cs *engine; enum intel_engine_id id; @@ -535,14 +578,20 @@ static int igt_shared_ctx_exec(void *arg) } err = gpu_fill(ce, obj, dw); - intel_context_put(ce); - kernel_context_close(ctx); - if (err) { pr_err("Failed to fill dword %lu [%lu/%lu] with gpu (%s) in ctx %u [full-ppgtt? %s], err=%d\n", ndwords, dw, max_dwords(obj), engine->name, ctx->hw_id, yesno(!!ctx->vm), err); + intel_context_put(ce); + kernel_context_close(ctx); + goto out_test; + } + + err = throttle(ce, tq, ARRAY_SIZE(tq)); + if (err) { + intel_context_put(ce); + kernel_context_close(ctx); goto out_test; } @@ -553,6 +602,9 @@ static int igt_shared_ctx_exec(void *arg) ndwords++; ncontexts++; + + intel_context_put(ce); + kernel_context_close(ctx); } pr_info("Submitted %lu contexts to %s, filling %lu dwords\n", ncontexts, engine->name, ndwords); @@ -574,6 +626,7 @@ static int igt_shared_ctx_exec(void *arg) mutex_lock(&i915->drm.struct_mutex); } out_test: + throttle_release(tq, ARRAY_SIZE(tq)); if (igt_live_test_end(&t)) err = -EIO; out_unlock: @@ -1050,6 +1103,7 @@ static int igt_ctx_readonly(void *arg) { struct drm_i915_private *i915 = arg; struct drm_i915_gem_object *obj = NULL; + struct i915_request *tq[5] = {}; struct i915_address_space *vm; struct i915_gem_context *ctx; unsigned long idx, ndwords, dw; @@ -1121,6 +1175,12 @@ static int igt_ctx_readonly(void *arg) goto out_unlock; } + err = throttle(ce, tq, ARRAY_SIZE(tq)); + if (err) { + i915_gem_context_unlock_engines(ctx); + goto out_unlock; + } + if (++dw == max_dwords(obj)) { obj = NULL; dw = 0; @@ -1151,6 +1211,7 @@ static int igt_ctx_readonly(void *arg) } out_unlock: + throttle_release(tq, ARRAY_SIZE(tq)); if (igt_live_test_end(&t)) err = -EIO; mutex_unlock(&i915->drm.struct_mutex); -- cgit v1.2.3 From c4e6488120e9ef1ceced8fb0caf134c5242ddaf4 Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Tue, 27 Aug 2019 17:17:26 +0100 Subject: drm/i915/selftests: Try to recycle context allocations igt_ctx_exec allocates a new context for each iteration, keeping them all allocated until the end. Instead, release the local ctx reference at the end of each iteration, allowing ourselves to reap those if under mempressure. Signed-off-by: Chris Wilson Reviewed-by: Matthew Auld Link: https://patchwork.freedesktop.org/patch/msgid/20190827161726.3640-2-chris@chris-wilson.co.uk --- drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c b/drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c index 63116c4fa8ba..da54a718c712 100644 --- a/drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c +++ b/drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c @@ -424,7 +424,7 @@ static int igt_ctx_exec(void *arg) struct i915_gem_context *ctx; struct intel_context *ce; - ctx = live_context(i915, file); + ctx = kernel_context(i915); if (IS_ERR(ctx)) { err = PTR_ERR(ctx); goto out_unlock; @@ -438,6 +438,7 @@ static int igt_ctx_exec(void *arg) if (IS_ERR(obj)) { err = PTR_ERR(obj); intel_context_put(ce); + kernel_context_close(ctx); goto out_unlock; } } @@ -449,12 +450,14 @@ static int igt_ctx_exec(void *arg) engine->name, ctx->hw_id, yesno(!!ctx->vm), err); intel_context_put(ce); + kernel_context_close(ctx); goto out_unlock; } err = throttle(ce, tq, ARRAY_SIZE(tq)); if (err) { intel_context_put(ce); + kernel_context_close(ctx); goto out_unlock; } @@ -467,6 +470,7 @@ static int igt_ctx_exec(void *arg) ncontexts++; intel_context_put(ce); + kernel_context_close(ctx); } pr_info("Submitted %lu contexts to %s, filling %lu dwords\n", -- cgit v1.2.3 From e8f6b4952ec54a9d7e43f908d39dc168b0310599 Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Tue, 27 Aug 2019 13:06:15 +0100 Subject: drm/i915/execlists: Flush the post-sync breadcrumb write harder Quite rarely we see that the CS completion event fires before the breadcrumb is coherent, which presumably is a result of the CS_STALL not waiting for the post-sync operation. Try throwing in a DC_FLUSH into the following pipecontrol to see if that makes any difference. Signed-off-by: Chris Wilson Cc: Mika Kuoppala Acked-by: Mika Kuoppala Link: https://patchwork.freedesktop.org/patch/msgid/20190827120615.31390-1-chris@chris-wilson.co.uk --- drivers/gpu/drm/i915/gt/intel_lrc.c | 2 ++ 1 file changed, 2 insertions(+) diff --git a/drivers/gpu/drm/i915/gt/intel_lrc.c b/drivers/gpu/drm/i915/gt/intel_lrc.c index a141e9e37bf7..171d5205962c 100644 --- a/drivers/gpu/drm/i915/gt/intel_lrc.c +++ b/drivers/gpu/drm/i915/gt/intel_lrc.c @@ -2923,8 +2923,10 @@ static u32 *gen8_emit_fini_breadcrumb_rcs(struct i915_request *request, u32 *cs) PIPE_CONTROL_DC_FLUSH_ENABLE); /* XXX flush+write+CS_STALL all in one upsets gem_concurrent_blt:kbl */ + /* XXX DC_FLUSH for post-sync write? (cf early context-switch bug) */ cs = gen8_emit_pipe_control(cs, PIPE_CONTROL_FLUSH_ENABLE | + PIPE_CONTROL_DC_FLUSH_ENABLE | PIPE_CONTROL_CS_STALL, 0); -- cgit v1.2.3 From 592a7c5e082e237c7508f37b2c3494a731acad88 Mon Sep 17 00:00:00 2001 From: Mika Kuoppala Date: Fri, 9 Aug 2019 17:56:53 +0300 Subject: drm/i915: Extend non readable mcr range Our current avoidance of non readable mcr range was not inclusive enough. Extend the start and end. References: HSDES#1405586840 Cc: Tvrtko Ursulin Signed-off-by: Mika Kuoppala Acked-by: Chris Wilson Link: https://patchwork.freedesktop.org/patch/msgid/20190809145653.2279-1-mika.kuoppala@linux.intel.com --- drivers/gpu/drm/i915/gt/intel_workarounds.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/gpu/drm/i915/gt/intel_workarounds.c b/drivers/gpu/drm/i915/gt/intel_workarounds.c index ad2261e0cba8..8639fcccdb42 100644 --- a/drivers/gpu/drm/i915/gt/intel_workarounds.c +++ b/drivers/gpu/drm/i915/gt/intel_workarounds.c @@ -1453,7 +1453,7 @@ static bool mcr_range(struct drm_i915_private *i915, u32 offset) * which only controls CPU initiated MMIO. Routing does not * work for CS access so we cannot verify them on this path. */ - if (INTEL_GEN(i915) >= 8 && (offset >= 0xb100 && offset <= 0xb3ff)) + if (INTEL_GEN(i915) >= 8 && (offset >= 0xb000 && offset <= 0xb4ff)) return true; return false; -- cgit v1.2.3 From 325b916a9e1b11d28c40b02e24734750946a1785 Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Mon, 26 Aug 2019 14:38:37 +0100 Subject: drm/i915/selftests: Ignore coherency failures on Broadwater We've been ignoring similar coherency issues in IGT for Broadwater, and specifically Broadwater (original gen4) and not, for example, Crestline (same generation as Broadwater, but the mobile variant). Without any means to reproduce locally (I have a 965GM but alas no 965G), fixing will be slow, so tell CI to ignore any failure until we are ready with a fix. Signed-off-by: Chris Wilson Cc: Joonas Lahtinen Acked-by: Matthew Auld Link: https://patchwork.freedesktop.org/patch/msgid/20190826133837.6784-1-chris@chris-wilson.co.uk --- drivers/gpu/drm/i915/gt/intel_engine_cs.c | 2 ++ 1 file changed, 2 insertions(+) diff --git a/drivers/gpu/drm/i915/gt/intel_engine_cs.c b/drivers/gpu/drm/i915/gt/intel_engine_cs.c index 17006d50b63f..a8014c59b388 100644 --- a/drivers/gpu/drm/i915/gt/intel_engine_cs.c +++ b/drivers/gpu/drm/i915/gt/intel_engine_cs.c @@ -1119,6 +1119,8 @@ bool intel_engine_can_store_dword(struct intel_engine_cs *engine) case 3: /* maybe only uses physical not virtual addresses */ return !(IS_I915G(engine->i915) || IS_I915GM(engine->i915)); + case 4: + return !IS_I965G(engine->i915); /* who knows! */ case 6: return engine->class != VIDEO_DECODE_CLASS; /* b0rked */ default: -- cgit v1.2.3 From 8a84bacba19c154bdcb278657d0b7594ce2d939c Mon Sep 17 00:00:00 2001 From: Imre Deak Date: Fri, 23 Aug 2019 13:07:11 +0300 Subject: drm/i915: Align power domain names with port names MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit There is a difference in BSpec's and the driver's designation of DDI ports. BSpec uses the following names: - before GEN11: BSpec/driver: port A/B/C/D etc - GEN11: BSpec/driver: port A-F - GEN12: BSpec: port A/B/C for combo PHY ports port TC1-6 for Type C PHY ports driver: port A-I. The driver's port D name matches BSpec's TC1 port name. So far power domains were named according to the BSpec designation, to make it easier to match the code against the specification. That however can be confusing when a power domain needs to be matched to a port on GEN12+. To resolve that use the driver's port A-I designation for power domain names too and rename the corresponding power wells so that they reflect the mapping from the driver's to BSpec's port name. Cc: Lucas De Marchi Cc: Ville Syrjälä Signed-off-by: Imre Deak Reviewed-by: Stanislav Lisovskiy Link: https://patchwork.freedesktop.org/patch/msgid/20190823100711.27833-1-imre.deak@intel.com --- drivers/gpu/drm/i915/display/intel_display.c | 10 +- drivers/gpu/drm/i915/display/intel_display_power.c | 361 ++++++++++----------- drivers/gpu/drm/i915/display/intel_display_power.h | 40 +-- drivers/gpu/drm/i915/i915_debugfs.c | 3 +- 4 files changed, 198 insertions(+), 216 deletions(-) diff --git a/drivers/gpu/drm/i915/display/intel_display.c b/drivers/gpu/drm/i915/display/intel_display.c index a9f8be2cd364..4773832b0f49 100644 --- a/drivers/gpu/drm/i915/display/intel_display.c +++ b/drivers/gpu/drm/i915/display/intel_display.c @@ -6737,16 +6737,16 @@ intel_aux_power_domain(struct intel_digital_port *dig_port) dig_port->tc_mode == TC_PORT_TBT_ALT) { switch (dig_port->aux_ch) { case AUX_CH_C: - return POWER_DOMAIN_AUX_TBT1; + return POWER_DOMAIN_AUX_C_TBT; case AUX_CH_D: - return POWER_DOMAIN_AUX_TBT2; + return POWER_DOMAIN_AUX_D_TBT; case AUX_CH_E: - return POWER_DOMAIN_AUX_TBT3; + return POWER_DOMAIN_AUX_E_TBT; case AUX_CH_F: - return POWER_DOMAIN_AUX_TBT4; + return POWER_DOMAIN_AUX_F_TBT; default: MISSING_CASE(dig_port->aux_ch); - return POWER_DOMAIN_AUX_TBT1; + return POWER_DOMAIN_AUX_C_TBT; } } diff --git a/drivers/gpu/drm/i915/display/intel_display_power.c b/drivers/gpu/drm/i915/display/intel_display_power.c index 12099760d99e..ce88a27229ef 100644 --- a/drivers/gpu/drm/i915/display/intel_display_power.c +++ b/drivers/gpu/drm/i915/display/intel_display_power.c @@ -24,11 +24,8 @@ bool intel_display_power_well_is_enabled(struct drm_i915_private *dev_priv, enum i915_power_well_id power_well_id); const char * -intel_display_power_domain_str(struct drm_i915_private *i915, - enum intel_display_power_domain domain) +intel_display_power_domain_str(enum intel_display_power_domain domain) { - bool ddi_tc_ports = IS_GEN(i915, 12); - switch (domain) { case POWER_DOMAIN_DISPLAY_CORE: return "DISPLAY_CORE"; @@ -71,23 +68,17 @@ intel_display_power_domain_str(struct drm_i915_private *i915, case POWER_DOMAIN_PORT_DDI_C_LANES: return "PORT_DDI_C_LANES"; case POWER_DOMAIN_PORT_DDI_D_LANES: - BUILD_BUG_ON(POWER_DOMAIN_PORT_DDI_D_LANES != - POWER_DOMAIN_PORT_DDI_TC1_LANES); - return ddi_tc_ports ? "PORT_DDI_TC1_LANES" : "PORT_DDI_D_LANES"; + return "PORT_DDI_D_LANES"; case POWER_DOMAIN_PORT_DDI_E_LANES: - BUILD_BUG_ON(POWER_DOMAIN_PORT_DDI_E_LANES != - POWER_DOMAIN_PORT_DDI_TC2_LANES); - return ddi_tc_ports ? "PORT_DDI_TC2_LANES" : "PORT_DDI_E_LANES"; + return "PORT_DDI_E_LANES"; case POWER_DOMAIN_PORT_DDI_F_LANES: - BUILD_BUG_ON(POWER_DOMAIN_PORT_DDI_F_LANES != - POWER_DOMAIN_PORT_DDI_TC3_LANES); - return ddi_tc_ports ? "PORT_DDI_TC3_LANES" : "PORT_DDI_F_LANES"; - case POWER_DOMAIN_PORT_DDI_TC4_LANES: - return "PORT_DDI_TC4_LANES"; - case POWER_DOMAIN_PORT_DDI_TC5_LANES: - return "PORT_DDI_TC5_LANES"; - case POWER_DOMAIN_PORT_DDI_TC6_LANES: - return "PORT_DDI_TC6_LANES"; + return "PORT_DDI_F_LANES"; + case POWER_DOMAIN_PORT_DDI_G_LANES: + return "PORT_DDI_G_LANES"; + case POWER_DOMAIN_PORT_DDI_H_LANES: + return "PORT_DDI_H_LANES"; + case POWER_DOMAIN_PORT_DDI_I_LANES: + return "PORT_DDI_I_LANES"; case POWER_DOMAIN_PORT_DDI_A_IO: return "PORT_DDI_A_IO"; case POWER_DOMAIN_PORT_DDI_B_IO: @@ -95,23 +86,17 @@ intel_display_power_domain_str(struct drm_i915_private *i915, case POWER_DOMAIN_PORT_DDI_C_IO: return "PORT_DDI_C_IO"; case POWER_DOMAIN_PORT_DDI_D_IO: - BUILD_BUG_ON(POWER_DOMAIN_PORT_DDI_D_IO != - POWER_DOMAIN_PORT_DDI_TC1_IO); - return ddi_tc_ports ? "PORT_DDI_TC1_IO" : "PORT_DDI_D_IO"; + return "PORT_DDI_D_IO"; case POWER_DOMAIN_PORT_DDI_E_IO: - BUILD_BUG_ON(POWER_DOMAIN_PORT_DDI_E_IO != - POWER_DOMAIN_PORT_DDI_TC2_IO); - return ddi_tc_ports ? "PORT_DDI_TC2_IO" : "PORT_DDI_E_IO"; + return "PORT_DDI_E_IO"; case POWER_DOMAIN_PORT_DDI_F_IO: - BUILD_BUG_ON(POWER_DOMAIN_PORT_DDI_F_IO != - POWER_DOMAIN_PORT_DDI_TC3_IO); - return ddi_tc_ports ? "PORT_DDI_TC3_IO" : "PORT_DDI_F_IO"; - case POWER_DOMAIN_PORT_DDI_TC4_IO: - return "PORT_DDI_TC4_IO"; - case POWER_DOMAIN_PORT_DDI_TC5_IO: - return "PORT_DDI_TC5_IO"; - case POWER_DOMAIN_PORT_DDI_TC6_IO: - return "PORT_DDI_TC6_IO"; + return "PORT_DDI_F_IO"; + case POWER_DOMAIN_PORT_DDI_G_IO: + return "PORT_DDI_G_IO"; + case POWER_DOMAIN_PORT_DDI_H_IO: + return "PORT_DDI_H_IO"; + case POWER_DOMAIN_PORT_DDI_I_IO: + return "PORT_DDI_I_IO"; case POWER_DOMAIN_PORT_DSI: return "PORT_DSI"; case POWER_DOMAIN_PORT_CRT: @@ -129,34 +114,33 @@ intel_display_power_domain_str(struct drm_i915_private *i915, case POWER_DOMAIN_AUX_C: return "AUX_C"; case POWER_DOMAIN_AUX_D: - BUILD_BUG_ON(POWER_DOMAIN_AUX_D != POWER_DOMAIN_AUX_TC1); - return ddi_tc_ports ? "AUX_TC1" : "AUX_D"; + return "AUX_D"; case POWER_DOMAIN_AUX_E: - BUILD_BUG_ON(POWER_DOMAIN_AUX_E != POWER_DOMAIN_AUX_TC2); - return ddi_tc_ports ? "AUX_TC2" : "AUX_E"; + return "AUX_E"; case POWER_DOMAIN_AUX_F: - BUILD_BUG_ON(POWER_DOMAIN_AUX_F != POWER_DOMAIN_AUX_TC3); - return ddi_tc_ports ? "AUX_TC3" : "AUX_F"; - case POWER_DOMAIN_AUX_TC4: - return "AUX_TC4"; - case POWER_DOMAIN_AUX_TC5: - return "AUX_TC5"; - case POWER_DOMAIN_AUX_TC6: - return "AUX_TC6"; + return "AUX_F"; + case POWER_DOMAIN_AUX_G: + return "AUX_G"; + case POWER_DOMAIN_AUX_H: + return "AUX_H"; + case POWER_DOMAIN_AUX_I: + return "AUX_I"; case POWER_DOMAIN_AUX_IO_A: return "AUX_IO_A"; - case POWER_DOMAIN_AUX_TBT1: - return "AUX_TBT1"; - case POWER_DOMAIN_AUX_TBT2: - return "AUX_TBT2"; - case POWER_DOMAIN_AUX_TBT3: - return "AUX_TBT3"; - case POWER_DOMAIN_AUX_TBT4: - return "AUX_TBT4"; - case POWER_DOMAIN_AUX_TBT5: - return "AUX_TBT5"; - case POWER_DOMAIN_AUX_TBT6: - return "AUX_TBT6"; + case POWER_DOMAIN_AUX_C_TBT: + return "AUX_C_TBT"; + case POWER_DOMAIN_AUX_D_TBT: + return "AUX_D_TBT"; + case POWER_DOMAIN_AUX_E_TBT: + return "AUX_E_TBT"; + case POWER_DOMAIN_AUX_F_TBT: + return "AUX_F_TBT"; + case POWER_DOMAIN_AUX_G_TBT: + return "AUX_G_TBT"; + case POWER_DOMAIN_AUX_H_TBT: + return "AUX_H_TBT"; + case POWER_DOMAIN_AUX_I_TBT: + return "AUX_I_TBT"; case POWER_DOMAIN_GMBUS: return "GMBUS"; case POWER_DOMAIN_INIT: @@ -1718,15 +1702,12 @@ __async_put_domains_state_ok(struct i915_power_domains *power_domains) static void print_power_domains(struct i915_power_domains *power_domains, const char *prefix, u64 mask) { - struct drm_i915_private *i915 = - container_of(power_domains, struct drm_i915_private, - power_domains); enum intel_display_power_domain domain; DRM_DEBUG_DRIVER("%s (%lu):\n", prefix, hweight64(mask)); for_each_power_domain(domain, mask) DRM_DEBUG_DRIVER("%s use_count %d\n", - intel_display_power_domain_str(i915, domain), + intel_display_power_domain_str(domain), power_domains->domain_use_count[domain]); } @@ -1896,7 +1877,7 @@ __intel_display_power_put_domain(struct drm_i915_private *dev_priv, { struct i915_power_domains *power_domains; struct i915_power_well *power_well; - const char *name = intel_display_power_domain_str(dev_priv, domain); + const char *name = intel_display_power_domain_str(domain); power_domains = &dev_priv->power_domains; @@ -2487,10 +2468,10 @@ void intel_display_power_put(struct drm_i915_private *dev_priv, BIT_ULL(POWER_DOMAIN_AUX_D) | \ BIT_ULL(POWER_DOMAIN_AUX_E) | \ BIT_ULL(POWER_DOMAIN_AUX_F) | \ - BIT_ULL(POWER_DOMAIN_AUX_TBT1) | \ - BIT_ULL(POWER_DOMAIN_AUX_TBT2) | \ - BIT_ULL(POWER_DOMAIN_AUX_TBT3) | \ - BIT_ULL(POWER_DOMAIN_AUX_TBT4) | \ + BIT_ULL(POWER_DOMAIN_AUX_C_TBT) | \ + BIT_ULL(POWER_DOMAIN_AUX_D_TBT) | \ + BIT_ULL(POWER_DOMAIN_AUX_E_TBT) | \ + BIT_ULL(POWER_DOMAIN_AUX_F_TBT) | \ BIT_ULL(POWER_DOMAIN_VGA) | \ BIT_ULL(POWER_DOMAIN_AUDIO) | \ BIT_ULL(POWER_DOMAIN_INIT)) @@ -2530,22 +2511,22 @@ void intel_display_power_put(struct drm_i915_private *dev_priv, BIT_ULL(POWER_DOMAIN_AUX_A)) #define ICL_AUX_B_IO_POWER_DOMAINS ( \ BIT_ULL(POWER_DOMAIN_AUX_B)) -#define ICL_AUX_C_IO_POWER_DOMAINS ( \ +#define ICL_AUX_C_TC1_IO_POWER_DOMAINS ( \ BIT_ULL(POWER_DOMAIN_AUX_C)) -#define ICL_AUX_D_IO_POWER_DOMAINS ( \ +#define ICL_AUX_D_TC2_IO_POWER_DOMAINS ( \ BIT_ULL(POWER_DOMAIN_AUX_D)) -#define ICL_AUX_E_IO_POWER_DOMAINS ( \ +#define ICL_AUX_E_TC3_IO_POWER_DOMAINS ( \ BIT_ULL(POWER_DOMAIN_AUX_E)) -#define ICL_AUX_F_IO_POWER_DOMAINS ( \ +#define ICL_AUX_F_TC4_IO_POWER_DOMAINS ( \ BIT_ULL(POWER_DOMAIN_AUX_F)) -#define ICL_AUX_TBT1_IO_POWER_DOMAINS ( \ - BIT_ULL(POWER_DOMAIN_AUX_TBT1)) -#define ICL_AUX_TBT2_IO_POWER_DOMAINS ( \ - BIT_ULL(POWER_DOMAIN_AUX_TBT2)) -#define ICL_AUX_TBT3_IO_POWER_DOMAINS ( \ - BIT_ULL(POWER_DOMAIN_AUX_TBT3)) -#define ICL_AUX_TBT4_IO_POWER_DOMAINS ( \ - BIT_ULL(POWER_DOMAIN_AUX_TBT4)) +#define ICL_AUX_C_TBT1_IO_POWER_DOMAINS ( \ + BIT_ULL(POWER_DOMAIN_AUX_C_TBT)) +#define ICL_AUX_D_TBT2_IO_POWER_DOMAINS ( \ + BIT_ULL(POWER_DOMAIN_AUX_D_TBT)) +#define ICL_AUX_E_TBT3_IO_POWER_DOMAINS ( \ + BIT_ULL(POWER_DOMAIN_AUX_E_TBT)) +#define ICL_AUX_F_TBT4_IO_POWER_DOMAINS ( \ + BIT_ULL(POWER_DOMAIN_AUX_F_TBT)) #define TGL_PW_5_POWER_DOMAINS ( \ BIT_ULL(POWER_DOMAIN_PIPE_D) | \ @@ -2565,24 +2546,24 @@ void intel_display_power_put(struct drm_i915_private *dev_priv, BIT_ULL(POWER_DOMAIN_PIPE_B) | \ BIT_ULL(POWER_DOMAIN_TRANSCODER_B) | \ BIT_ULL(POWER_DOMAIN_PIPE_B_PANEL_FITTER) | \ - BIT_ULL(POWER_DOMAIN_PORT_DDI_TC1_LANES) | \ - BIT_ULL(POWER_DOMAIN_PORT_DDI_TC2_LANES) | \ - BIT_ULL(POWER_DOMAIN_PORT_DDI_TC3_LANES) | \ - BIT_ULL(POWER_DOMAIN_PORT_DDI_TC4_LANES) | \ - BIT_ULL(POWER_DOMAIN_PORT_DDI_TC5_LANES) | \ - BIT_ULL(POWER_DOMAIN_PORT_DDI_TC6_LANES) | \ - BIT_ULL(POWER_DOMAIN_AUX_TC1) | \ - BIT_ULL(POWER_DOMAIN_AUX_TC2) | \ - BIT_ULL(POWER_DOMAIN_AUX_TC3) | \ - BIT_ULL(POWER_DOMAIN_AUX_TC4) | \ - BIT_ULL(POWER_DOMAIN_AUX_TC5) | \ - BIT_ULL(POWER_DOMAIN_AUX_TC6) | \ - BIT_ULL(POWER_DOMAIN_AUX_TBT1) | \ - BIT_ULL(POWER_DOMAIN_AUX_TBT2) | \ - BIT_ULL(POWER_DOMAIN_AUX_TBT3) | \ - BIT_ULL(POWER_DOMAIN_AUX_TBT4) | \ - BIT_ULL(POWER_DOMAIN_AUX_TBT5) | \ - BIT_ULL(POWER_DOMAIN_AUX_TBT6) | \ + BIT_ULL(POWER_DOMAIN_PORT_DDI_D_LANES) | \ + BIT_ULL(POWER_DOMAIN_PORT_DDI_E_LANES) | \ + BIT_ULL(POWER_DOMAIN_PORT_DDI_F_LANES) | \ + BIT_ULL(POWER_DOMAIN_PORT_DDI_G_LANES) | \ + BIT_ULL(POWER_DOMAIN_PORT_DDI_H_LANES) | \ + BIT_ULL(POWER_DOMAIN_PORT_DDI_I_LANES) | \ + BIT_ULL(POWER_DOMAIN_AUX_D) | \ + BIT_ULL(POWER_DOMAIN_AUX_E) | \ + BIT_ULL(POWER_DOMAIN_AUX_F) | \ + BIT_ULL(POWER_DOMAIN_AUX_G) | \ + BIT_ULL(POWER_DOMAIN_AUX_H) | \ + BIT_ULL(POWER_DOMAIN_AUX_I) | \ + BIT_ULL(POWER_DOMAIN_AUX_D_TBT) | \ + BIT_ULL(POWER_DOMAIN_AUX_E_TBT) | \ + BIT_ULL(POWER_DOMAIN_AUX_F_TBT) | \ + BIT_ULL(POWER_DOMAIN_AUX_G_TBT) | \ + BIT_ULL(POWER_DOMAIN_AUX_H_TBT) | \ + BIT_ULL(POWER_DOMAIN_AUX_I_TBT) | \ BIT_ULL(POWER_DOMAIN_VGA) | \ BIT_ULL(POWER_DOMAIN_AUDIO) | \ BIT_ULL(POWER_DOMAIN_INIT)) @@ -2598,35 +2579,50 @@ void intel_display_power_put(struct drm_i915_private *dev_priv, BIT_ULL(POWER_DOMAIN_AUX_A) | \ BIT_ULL(POWER_DOMAIN_INIT)) -#define TGL_DDI_IO_TC1_POWER_DOMAINS ( \ - BIT_ULL(POWER_DOMAIN_PORT_DDI_TC1_IO)) -#define TGL_DDI_IO_TC2_POWER_DOMAINS ( \ - BIT_ULL(POWER_DOMAIN_PORT_DDI_TC2_IO)) -#define TGL_DDI_IO_TC3_POWER_DOMAINS ( \ - BIT_ULL(POWER_DOMAIN_PORT_DDI_TC3_IO)) -#define TGL_DDI_IO_TC4_POWER_DOMAINS ( \ - BIT_ULL(POWER_DOMAIN_PORT_DDI_TC4_IO)) -#define TGL_DDI_IO_TC5_POWER_DOMAINS ( \ - BIT_ULL(POWER_DOMAIN_PORT_DDI_TC5_IO)) -#define TGL_DDI_IO_TC6_POWER_DOMAINS ( \ - BIT_ULL(POWER_DOMAIN_PORT_DDI_TC6_IO)) - -#define TGL_AUX_TC1_IO_POWER_DOMAINS ( \ - BIT_ULL(POWER_DOMAIN_AUX_TC1)) -#define TGL_AUX_TC2_IO_POWER_DOMAINS ( \ - BIT_ULL(POWER_DOMAIN_AUX_TC2)) -#define TGL_AUX_TC3_IO_POWER_DOMAINS ( \ - BIT_ULL(POWER_DOMAIN_AUX_TC3)) -#define TGL_AUX_TC4_IO_POWER_DOMAINS ( \ - BIT_ULL(POWER_DOMAIN_AUX_TC4)) -#define TGL_AUX_TC5_IO_POWER_DOMAINS ( \ - BIT_ULL(POWER_DOMAIN_AUX_TC5)) -#define TGL_AUX_TC6_IO_POWER_DOMAINS ( \ - BIT_ULL(POWER_DOMAIN_AUX_TC6)) -#define TGL_AUX_TBT5_IO_POWER_DOMAINS ( \ - BIT_ULL(POWER_DOMAIN_AUX_TBT5)) -#define TGL_AUX_TBT6_IO_POWER_DOMAINS ( \ - BIT_ULL(POWER_DOMAIN_AUX_TBT6)) +#define TGL_DDI_IO_D_TC1_POWER_DOMAINS ( \ + BIT_ULL(POWER_DOMAIN_PORT_DDI_D_IO)) +#define TGL_DDI_IO_E_TC2_POWER_DOMAINS ( \ + BIT_ULL(POWER_DOMAIN_PORT_DDI_E_IO)) +#define TGL_DDI_IO_F_TC3_POWER_DOMAINS ( \ + BIT_ULL(POWER_DOMAIN_PORT_DDI_F_IO)) +#define TGL_DDI_IO_G_TC4_POWER_DOMAINS ( \ + BIT_ULL(POWER_DOMAIN_PORT_DDI_G_IO)) +#define TGL_DDI_IO_H_TC5_POWER_DOMAINS ( \ + BIT_ULL(POWER_DOMAIN_PORT_DDI_H_IO)) +#define TGL_DDI_IO_I_TC6_POWER_DOMAINS ( \ + BIT_ULL(POWER_DOMAIN_PORT_DDI_I_IO)) + +#define TGL_AUX_A_IO_POWER_DOMAINS ( \ + BIT_ULL(POWER_DOMAIN_AUX_IO_A) | \ + BIT_ULL(POWER_DOMAIN_AUX_A)) +#define TGL_AUX_B_IO_POWER_DOMAINS ( \ + BIT_ULL(POWER_DOMAIN_AUX_B)) +#define TGL_AUX_C_IO_POWER_DOMAINS ( \ + BIT_ULL(POWER_DOMAIN_AUX_C)) +#define TGL_AUX_D_TC1_IO_POWER_DOMAINS ( \ + BIT_ULL(POWER_DOMAIN_AUX_D)) +#define TGL_AUX_E_TC2_IO_POWER_DOMAINS ( \ + BIT_ULL(POWER_DOMAIN_AUX_E)) +#define TGL_AUX_F_TC3_IO_POWER_DOMAINS ( \ + BIT_ULL(POWER_DOMAIN_AUX_F)) +#define TGL_AUX_G_TC4_IO_POWER_DOMAINS ( \ + BIT_ULL(POWER_DOMAIN_AUX_G)) +#define TGL_AUX_H_TC5_IO_POWER_DOMAINS ( \ + BIT_ULL(POWER_DOMAIN_AUX_H)) +#define TGL_AUX_I_TC6_IO_POWER_DOMAINS ( \ + BIT_ULL(POWER_DOMAIN_AUX_I)) +#define TGL_AUX_D_TBT1_IO_POWER_DOMAINS ( \ + BIT_ULL(POWER_DOMAIN_AUX_D_TBT)) +#define TGL_AUX_E_TBT2_IO_POWER_DOMAINS ( \ + BIT_ULL(POWER_DOMAIN_AUX_E_TBT)) +#define TGL_AUX_F_TBT3_IO_POWER_DOMAINS ( \ + BIT_ULL(POWER_DOMAIN_AUX_F_TBT)) +#define TGL_AUX_G_TBT4_IO_POWER_DOMAINS ( \ + BIT_ULL(POWER_DOMAIN_AUX_G_TBT)) +#define TGL_AUX_H_TBT5_IO_POWER_DOMAINS ( \ + BIT_ULL(POWER_DOMAIN_AUX_H_TBT)) +#define TGL_AUX_I_TBT6_IO_POWER_DOMAINS ( \ + BIT_ULL(POWER_DOMAIN_AUX_I_TBT)) static const struct i915_power_well_ops i9xx_always_on_power_well_ops = { .sync_hw = i9xx_power_well_sync_hw_noop, @@ -3484,8 +3480,8 @@ static const struct i915_power_well_desc icl_power_wells[] = { }, }, { - .name = "AUX C", - .domains = ICL_AUX_C_IO_POWER_DOMAINS, + .name = "AUX C TC1", + .domains = ICL_AUX_C_TC1_IO_POWER_DOMAINS, .ops = &icl_tc_phy_aux_power_well_ops, .id = DISP_PW_ID_NONE, { @@ -3495,8 +3491,8 @@ static const struct i915_power_well_desc icl_power_wells[] = { }, }, { - .name = "AUX D", - .domains = ICL_AUX_D_IO_POWER_DOMAINS, + .name = "AUX D TC2", + .domains = ICL_AUX_D_TC2_IO_POWER_DOMAINS, .ops = &icl_tc_phy_aux_power_well_ops, .id = DISP_PW_ID_NONE, { @@ -3506,8 +3502,8 @@ static const struct i915_power_well_desc icl_power_wells[] = { }, }, { - .name = "AUX E", - .domains = ICL_AUX_E_IO_POWER_DOMAINS, + .name = "AUX E TC3", + .domains = ICL_AUX_E_TC3_IO_POWER_DOMAINS, .ops = &icl_tc_phy_aux_power_well_ops, .id = DISP_PW_ID_NONE, { @@ -3517,8 +3513,8 @@ static const struct i915_power_well_desc icl_power_wells[] = { }, }, { - .name = "AUX F", - .domains = ICL_AUX_F_IO_POWER_DOMAINS, + .name = "AUX F TC4", + .domains = ICL_AUX_F_TC4_IO_POWER_DOMAINS, .ops = &icl_tc_phy_aux_power_well_ops, .id = DISP_PW_ID_NONE, { @@ -3528,8 +3524,8 @@ static const struct i915_power_well_desc icl_power_wells[] = { }, }, { - .name = "AUX TBT1", - .domains = ICL_AUX_TBT1_IO_POWER_DOMAINS, + .name = "AUX C TBT1", + .domains = ICL_AUX_C_TBT1_IO_POWER_DOMAINS, .ops = &icl_tc_phy_aux_power_well_ops, .id = DISP_PW_ID_NONE, { @@ -3539,8 +3535,8 @@ static const struct i915_power_well_desc icl_power_wells[] = { }, }, { - .name = "AUX TBT2", - .domains = ICL_AUX_TBT2_IO_POWER_DOMAINS, + .name = "AUX D TBT2", + .domains = ICL_AUX_D_TBT2_IO_POWER_DOMAINS, .ops = &icl_tc_phy_aux_power_well_ops, .id = DISP_PW_ID_NONE, { @@ -3550,8 +3546,8 @@ static const struct i915_power_well_desc icl_power_wells[] = { }, }, { - .name = "AUX TBT3", - .domains = ICL_AUX_TBT3_IO_POWER_DOMAINS, + .name = "AUX E TBT3", + .domains = ICL_AUX_E_TBT3_IO_POWER_DOMAINS, .ops = &icl_tc_phy_aux_power_well_ops, .id = DISP_PW_ID_NONE, { @@ -3561,8 +3557,8 @@ static const struct i915_power_well_desc icl_power_wells[] = { }, }, { - .name = "AUX TBT4", - .domains = ICL_AUX_TBT4_IO_POWER_DOMAINS, + .name = "AUX F TBT4", + .domains = ICL_AUX_F_TBT4_IO_POWER_DOMAINS, .ops = &icl_tc_phy_aux_power_well_ops, .id = DISP_PW_ID_NONE, { @@ -3667,8 +3663,8 @@ static const struct i915_power_well_desc tgl_power_wells[] = { } }, { - .name = "DDI TC1 IO", - .domains = TGL_DDI_IO_TC1_POWER_DOMAINS, + .name = "DDI D TC1 IO", + .domains = TGL_DDI_IO_D_TC1_POWER_DOMAINS, .ops = &hsw_power_well_ops, .id = DISP_PW_ID_NONE, { @@ -3677,8 +3673,8 @@ static const struct i915_power_well_desc tgl_power_wells[] = { }, }, { - .name = "DDI TC2 IO", - .domains = TGL_DDI_IO_TC2_POWER_DOMAINS, + .name = "DDI E TC2 IO", + .domains = TGL_DDI_IO_E_TC2_POWER_DOMAINS, .ops = &hsw_power_well_ops, .id = DISP_PW_ID_NONE, { @@ -3687,8 +3683,8 @@ static const struct i915_power_well_desc tgl_power_wells[] = { }, }, { - .name = "DDI TC3 IO", - .domains = TGL_DDI_IO_TC3_POWER_DOMAINS, + .name = "DDI F TC3 IO", + .domains = TGL_DDI_IO_F_TC3_POWER_DOMAINS, .ops = &hsw_power_well_ops, .id = DISP_PW_ID_NONE, { @@ -3697,8 +3693,8 @@ static const struct i915_power_well_desc tgl_power_wells[] = { }, }, { - .name = "DDI TC4 IO", - .domains = TGL_DDI_IO_TC4_POWER_DOMAINS, + .name = "DDI G TC4 IO", + .domains = TGL_DDI_IO_G_TC4_POWER_DOMAINS, .ops = &hsw_power_well_ops, .id = DISP_PW_ID_NONE, { @@ -3707,8 +3703,8 @@ static const struct i915_power_well_desc tgl_power_wells[] = { }, }, { - .name = "DDI TC5 IO", - .domains = TGL_DDI_IO_TC5_POWER_DOMAINS, + .name = "DDI H TC5 IO", + .domains = TGL_DDI_IO_H_TC5_POWER_DOMAINS, .ops = &hsw_power_well_ops, .id = DISP_PW_ID_NONE, { @@ -3717,8 +3713,8 @@ static const struct i915_power_well_desc tgl_power_wells[] = { }, }, { - .name = "DDI TC6 IO", - .domains = TGL_DDI_IO_TC6_POWER_DOMAINS, + .name = "DDI I TC6 IO", + .domains = TGL_DDI_IO_I_TC6_POWER_DOMAINS, .ops = &hsw_power_well_ops, .id = DISP_PW_ID_NONE, { @@ -3728,7 +3724,7 @@ static const struct i915_power_well_desc tgl_power_wells[] = { }, { .name = "AUX A", - .domains = ICL_AUX_A_IO_POWER_DOMAINS, + .domains = TGL_AUX_A_IO_POWER_DOMAINS, .ops = &icl_combo_phy_aux_power_well_ops, .id = DISP_PW_ID_NONE, { @@ -3738,7 +3734,7 @@ static const struct i915_power_well_desc tgl_power_wells[] = { }, { .name = "AUX B", - .domains = ICL_AUX_B_IO_POWER_DOMAINS, + .domains = TGL_AUX_B_IO_POWER_DOMAINS, .ops = &icl_combo_phy_aux_power_well_ops, .id = DISP_PW_ID_NONE, { @@ -3748,7 +3744,7 @@ static const struct i915_power_well_desc tgl_power_wells[] = { }, { .name = "AUX C", - .domains = ICL_AUX_C_IO_POWER_DOMAINS, + .domains = TGL_AUX_C_IO_POWER_DOMAINS, .ops = &icl_combo_phy_aux_power_well_ops, .id = DISP_PW_ID_NONE, { @@ -3757,8 +3753,8 @@ static const struct i915_power_well_desc tgl_power_wells[] = { }, }, { - .name = "AUX TC1", - .domains = TGL_AUX_TC1_IO_POWER_DOMAINS, + .name = "AUX D TC1", + .domains = TGL_AUX_D_TC1_IO_POWER_DOMAINS, .ops = &icl_tc_phy_aux_power_well_ops, .id = DISP_PW_ID_NONE, { @@ -3768,8 +3764,8 @@ static const struct i915_power_well_desc tgl_power_wells[] = { }, }, { - .name = "AUX TC2", - .domains = TGL_AUX_TC2_IO_POWER_DOMAINS, + .name = "AUX E TC2", + .domains = TGL_AUX_E_TC2_IO_POWER_DOMAINS, .ops = &icl_tc_phy_aux_power_well_ops, .id = DISP_PW_ID_NONE, { @@ -3779,8 +3775,8 @@ static const struct i915_power_well_desc tgl_power_wells[] = { }, }, { - .name = "AUX TC3", - .domains = TGL_AUX_TC3_IO_POWER_DOMAINS, + .name = "AUX F TC3", + .domains = TGL_AUX_F_TC3_IO_POWER_DOMAINS, .ops = &icl_tc_phy_aux_power_well_ops, .id = DISP_PW_ID_NONE, { @@ -3790,8 +3786,8 @@ static const struct i915_power_well_desc tgl_power_wells[] = { }, }, { - .name = "AUX TC4", - .domains = TGL_AUX_TC4_IO_POWER_DOMAINS, + .name = "AUX G TC4", + .domains = TGL_AUX_G_TC4_IO_POWER_DOMAINS, .ops = &icl_tc_phy_aux_power_well_ops, .id = DISP_PW_ID_NONE, { @@ -3801,8 +3797,8 @@ static const struct i915_power_well_desc tgl_power_wells[] = { }, }, { - .name = "AUX TC5", - .domains = TGL_AUX_TC5_IO_POWER_DOMAINS, + .name = "AUX H TC5", + .domains = TGL_AUX_H_TC5_IO_POWER_DOMAINS, .ops = &icl_tc_phy_aux_power_well_ops, .id = DISP_PW_ID_NONE, { @@ -3812,8 +3808,8 @@ static const struct i915_power_well_desc tgl_power_wells[] = { }, }, { - .name = "AUX TC6", - .domains = TGL_AUX_TC6_IO_POWER_DOMAINS, + .name = "AUX I TC6", + .domains = TGL_AUX_I_TC6_IO_POWER_DOMAINS, .ops = &icl_tc_phy_aux_power_well_ops, .id = DISP_PW_ID_NONE, { @@ -3823,8 +3819,8 @@ static const struct i915_power_well_desc tgl_power_wells[] = { }, }, { - .name = "AUX TBT1", - .domains = ICL_AUX_TBT1_IO_POWER_DOMAINS, + .name = "AUX D TBT1", + .domains = TGL_AUX_D_TBT1_IO_POWER_DOMAINS, .ops = &hsw_power_well_ops, .id = DISP_PW_ID_NONE, { @@ -3834,8 +3830,8 @@ static const struct i915_power_well_desc tgl_power_wells[] = { }, }, { - .name = "AUX TBT2", - .domains = ICL_AUX_TBT2_IO_POWER_DOMAINS, + .name = "AUX E TBT2", + .domains = TGL_AUX_E_TBT2_IO_POWER_DOMAINS, .ops = &hsw_power_well_ops, .id = DISP_PW_ID_NONE, { @@ -3845,8 +3841,8 @@ static const struct i915_power_well_desc tgl_power_wells[] = { }, }, { - .name = "AUX TBT3", - .domains = ICL_AUX_TBT3_IO_POWER_DOMAINS, + .name = "AUX F TBT3", + .domains = TGL_AUX_F_TBT3_IO_POWER_DOMAINS, .ops = &hsw_power_well_ops, .id = DISP_PW_ID_NONE, { @@ -3856,8 +3852,8 @@ static const struct i915_power_well_desc tgl_power_wells[] = { }, }, { - .name = "AUX TBT4", - .domains = ICL_AUX_TBT4_IO_POWER_DOMAINS, + .name = "AUX G TBT4", + .domains = TGL_AUX_G_TBT4_IO_POWER_DOMAINS, .ops = &hsw_power_well_ops, .id = DISP_PW_ID_NONE, { @@ -3867,8 +3863,8 @@ static const struct i915_power_well_desc tgl_power_wells[] = { }, }, { - .name = "AUX TBT5", - .domains = TGL_AUX_TBT5_IO_POWER_DOMAINS, + .name = "AUX H TBT5", + .domains = TGL_AUX_H_TBT5_IO_POWER_DOMAINS, .ops = &hsw_power_well_ops, .id = DISP_PW_ID_NONE, { @@ -3878,8 +3874,8 @@ static const struct i915_power_well_desc tgl_power_wells[] = { }, }, { - .name = "AUX TBT6", - .domains = TGL_AUX_TBT6_IO_POWER_DOMAINS, + .name = "AUX I TBT6", + .domains = TGL_AUX_I_TBT6_IO_POWER_DOMAINS, .ops = &hsw_power_well_ops, .id = DISP_PW_ID_NONE, { @@ -5104,8 +5100,7 @@ static void intel_power_domains_dump_info(struct drm_i915_private *i915) for_each_power_domain(domain, power_well->desc->domains) DRM_DEBUG_DRIVER(" %-23s %d\n", - intel_display_power_domain_str(i915, - domain), + intel_display_power_domain_str(domain), power_domains->domain_use_count[domain]); } } diff --git a/drivers/gpu/drm/i915/display/intel_display_power.h b/drivers/gpu/drm/i915/display/intel_display_power.h index a50605b8b1ad..737b5def7fc6 100644 --- a/drivers/gpu/drm/i915/display/intel_display_power.h +++ b/drivers/gpu/drm/i915/display/intel_display_power.h @@ -36,29 +36,20 @@ enum intel_display_power_domain { POWER_DOMAIN_PORT_DDI_B_LANES, POWER_DOMAIN_PORT_DDI_C_LANES, POWER_DOMAIN_PORT_DDI_D_LANES, - POWER_DOMAIN_PORT_DDI_TC1_LANES = POWER_DOMAIN_PORT_DDI_D_LANES, POWER_DOMAIN_PORT_DDI_E_LANES, - POWER_DOMAIN_PORT_DDI_TC2_LANES = POWER_DOMAIN_PORT_DDI_E_LANES, POWER_DOMAIN_PORT_DDI_F_LANES, - POWER_DOMAIN_PORT_DDI_TC3_LANES = POWER_DOMAIN_PORT_DDI_F_LANES, - POWER_DOMAIN_PORT_DDI_TC4_LANES, - POWER_DOMAIN_PORT_DDI_TC5_LANES, - POWER_DOMAIN_PORT_DDI_TC6_LANES, + POWER_DOMAIN_PORT_DDI_G_LANES, + POWER_DOMAIN_PORT_DDI_H_LANES, + POWER_DOMAIN_PORT_DDI_I_LANES, POWER_DOMAIN_PORT_DDI_A_IO, POWER_DOMAIN_PORT_DDI_B_IO, POWER_DOMAIN_PORT_DDI_C_IO, POWER_DOMAIN_PORT_DDI_D_IO, - POWER_DOMAIN_PORT_DDI_TC1_IO = POWER_DOMAIN_PORT_DDI_D_IO, POWER_DOMAIN_PORT_DDI_E_IO, - POWER_DOMAIN_PORT_DDI_TC2_IO = POWER_DOMAIN_PORT_DDI_E_IO, POWER_DOMAIN_PORT_DDI_F_IO, - POWER_DOMAIN_PORT_DDI_TC3_IO = POWER_DOMAIN_PORT_DDI_F_IO, POWER_DOMAIN_PORT_DDI_G_IO, - POWER_DOMAIN_PORT_DDI_TC4_IO = POWER_DOMAIN_PORT_DDI_G_IO, POWER_DOMAIN_PORT_DDI_H_IO, - POWER_DOMAIN_PORT_DDI_TC5_IO = POWER_DOMAIN_PORT_DDI_H_IO, POWER_DOMAIN_PORT_DDI_I_IO, - POWER_DOMAIN_PORT_DDI_TC6_IO = POWER_DOMAIN_PORT_DDI_I_IO, POWER_DOMAIN_PORT_DSI, POWER_DOMAIN_PORT_CRT, POWER_DOMAIN_PORT_OTHER, @@ -68,21 +59,19 @@ enum intel_display_power_domain { POWER_DOMAIN_AUX_B, POWER_DOMAIN_AUX_C, POWER_DOMAIN_AUX_D, - POWER_DOMAIN_AUX_TC1 = POWER_DOMAIN_AUX_D, POWER_DOMAIN_AUX_E, - POWER_DOMAIN_AUX_TC2 = POWER_DOMAIN_AUX_E, POWER_DOMAIN_AUX_F, - POWER_DOMAIN_AUX_TC3 = POWER_DOMAIN_AUX_F, - POWER_DOMAIN_AUX_TC4, - POWER_DOMAIN_AUX_TC5, - POWER_DOMAIN_AUX_TC6, + POWER_DOMAIN_AUX_G, + POWER_DOMAIN_AUX_H, + POWER_DOMAIN_AUX_I, POWER_DOMAIN_AUX_IO_A, - POWER_DOMAIN_AUX_TBT1, - POWER_DOMAIN_AUX_TBT2, - POWER_DOMAIN_AUX_TBT3, - POWER_DOMAIN_AUX_TBT4, - POWER_DOMAIN_AUX_TBT5, - POWER_DOMAIN_AUX_TBT6, + POWER_DOMAIN_AUX_C_TBT, + POWER_DOMAIN_AUX_D_TBT, + POWER_DOMAIN_AUX_E_TBT, + POWER_DOMAIN_AUX_F_TBT, + POWER_DOMAIN_AUX_G_TBT, + POWER_DOMAIN_AUX_H_TBT, + POWER_DOMAIN_AUX_I_TBT, POWER_DOMAIN_GMBUS, POWER_DOMAIN_MODESET, POWER_DOMAIN_GT_IRQ, @@ -269,8 +258,7 @@ void intel_display_power_suspend(struct drm_i915_private *i915); void intel_display_power_resume(struct drm_i915_private *i915); const char * -intel_display_power_domain_str(struct drm_i915_private *i915, - enum intel_display_power_domain domain); +intel_display_power_domain_str(enum intel_display_power_domain domain); bool intel_display_power_is_enabled(struct drm_i915_private *dev_priv, enum intel_display_power_domain domain); diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c index 5c1a2b1e7d34..5e81c4fc13ae 100644 --- a/drivers/gpu/drm/i915/i915_debugfs.c +++ b/drivers/gpu/drm/i915/i915_debugfs.c @@ -2365,8 +2365,7 @@ static int i915_power_domain_info(struct seq_file *m, void *unused) for_each_power_domain(power_domain, power_well->desc->domains) seq_printf(m, " %-23s %d\n", - intel_display_power_domain_str(dev_priv, - power_domain), + intel_display_power_domain_str(power_domain), power_domains->domain_use_count[power_domain]); } -- cgit v1.2.3 From 0f7dc62068bb2b929e213e759dba1eb038448a5c Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Mon, 26 Aug 2019 08:21:27 +0100 Subject: drm/i915: Protect our local workers against I915_FENCE_TIMEOUT Trust our own workers to not cause unnecessary delays and disable the automatic timeout on their asynchronous fence waits. (Along the same lines that we trust our own requests to complete eventually, if necessary by force.) Signed-off-by: Chris Wilson Reviewed-by: Matthew Auld Link: https://patchwork.freedesktop.org/patch/msgid/20190826072149.9447-6-chris@chris-wilson.co.uk --- drivers/gpu/drm/i915/i915_request.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/gpu/drm/i915/i915_request.c b/drivers/gpu/drm/i915/i915_request.c index 18865ce04e13..754a78364a63 100644 --- a/drivers/gpu/drm/i915/i915_request.c +++ b/drivers/gpu/drm/i915/i915_request.c @@ -924,7 +924,7 @@ i915_request_await_dma_fence(struct i915_request *rq, struct dma_fence *fence) ret = i915_request_await_request(rq, to_request(fence)); else ret = i915_sw_fence_await_dma_fence(&rq->submit, fence, - I915_FENCE_TIMEOUT, + fence->context ? I915_FENCE_TIMEOUT : 0, I915_FENCE_GFP); if (ret < 0) return ret; -- cgit v1.2.3 From 9770f22077981cac2bd2f4dd6caa6269892cd3f7 Mon Sep 17 00:00:00 2001 From: Madhumitha Tolakanahalli Pradeep Date: Thu, 22 Aug 2019 17:46:55 -0700 Subject: drm/i915/tgl: Enabling DSC on Pipe A for TGL DSC was not supported on Pipe A for previous platforms. Tigerlake onwards, all the pipes support DSC. Hence, the DSC and FEC restriction on Pipe A needs to be removed. v2: Changes in the logic around removing the restriction around Pipe A (Manasi, Lucas) Cc: Manasi Navare Signed-off-by: Madhumitha Tolakanahalli Pradeep Reviewed-by: Manasi Navare Reviewed-by: Lucas De Marchi Signed-off-by: Manasi Navare Link: https://patchwork.freedesktop.org/patch/msgid/20190823004655.28905-1-madhumitha.tolakanahalli.pradeep@intel.com --- drivers/gpu/drm/i915/display/intel_dp.c | 21 +++++++++++++++++---- 1 file changed, 17 insertions(+), 4 deletions(-) diff --git a/drivers/gpu/drm/i915/display/intel_dp.c b/drivers/gpu/drm/i915/display/intel_dp.c index f74594d8d9df..c9986c7a3f5b 100644 --- a/drivers/gpu/drm/i915/display/intel_dp.c +++ b/drivers/gpu/drm/i915/display/intel_dp.c @@ -1737,8 +1737,14 @@ static bool intel_dp_source_supports_fec(struct intel_dp *intel_dp, { struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); - return INTEL_GEN(dev_priv) >= 11 && - pipe_config->cpu_transcoder != TRANSCODER_A; + /* On TGL, FEC is supported on all Pipes */ + if (INTEL_GEN(dev_priv) >= 12) + return true; + + if (IS_GEN(dev_priv, 11) && pipe_config->cpu_transcoder != TRANSCODER_A) + return true; + + return false; } static bool intel_dp_supports_fec(struct intel_dp *intel_dp, @@ -1753,8 +1759,15 @@ static bool intel_dp_source_supports_dsc(struct intel_dp *intel_dp, { struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); - return INTEL_GEN(dev_priv) >= 10 && - pipe_config->cpu_transcoder != TRANSCODER_A; + /* On TGL, DSC is supported on all Pipes */ + if (INTEL_GEN(dev_priv) >= 12) + return true; + + if (INTEL_GEN(dev_priv) >= 10 && + pipe_config->cpu_transcoder != TRANSCODER_A) + return true; + + return false; } static bool intel_dp_supports_dsc(struct intel_dp *intel_dp, -- cgit v1.2.3 From 074c77e3ec636fa5e2664e3c79aba397ec569761 Mon Sep 17 00:00:00 2001 From: Dhinakaran Pandiyan Date: Tue, 27 Aug 2019 01:45:16 -0700 Subject: drm/i915/tgl: Gen-12 display loses Yf tiling and legacy CCS support MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Yf tiling was removed in gen-12, so do not expose Yf modifiers to user space. Gen-12 display also is incompatible with pre-gen12 Y-tiled CCS, so do not expose I915_FORMAT_MOD_Y_TILED_CCS. v2: Rebase to carry forward recently added gen11 formats. Cc: Ville Syrjälä Cc: Stanislav Lisovskiy Signed-off-by: Dhinakaran Pandiyan Reviewed-by: Ville Syrjälä Signed-off-by: Lucas De Marchi Link: https://patchwork.freedesktop.org/patch/msgid/20190827084516.6748-1-dhinakaran.pandiyan@intel.com --- drivers/gpu/drm/i915/display/intel_sprite.c | 86 +++++++++++++++++++++++++++-- 1 file changed, 80 insertions(+), 6 deletions(-) diff --git a/drivers/gpu/drm/i915/display/intel_sprite.c b/drivers/gpu/drm/i915/display/intel_sprite.c index dea63be1964f..8ca2fbc2af1d 100644 --- a/drivers/gpu/drm/i915/display/intel_sprite.c +++ b/drivers/gpu/drm/i915/display/intel_sprite.c @@ -2157,6 +2157,13 @@ static const u64 skl_plane_format_modifiers_ccs[] = { DRM_FORMAT_MOD_INVALID }; +static const u64 gen12_plane_format_modifiers_noccs[] = { + I915_FORMAT_MOD_Y_TILED, + I915_FORMAT_MOD_X_TILED, + DRM_FORMAT_MOD_LINEAR, + DRM_FORMAT_MOD_INVALID +}; + static bool g4x_sprite_format_mod_supported(struct drm_plane *_plane, u32 format, u64 modifier) { @@ -2305,6 +2312,55 @@ static bool skl_plane_format_mod_supported(struct drm_plane *_plane, } } +static bool gen12_plane_format_mod_supported(struct drm_plane *_plane, + u32 format, u64 modifier) +{ + switch (modifier) { + case DRM_FORMAT_MOD_LINEAR: + case I915_FORMAT_MOD_X_TILED: + case I915_FORMAT_MOD_Y_TILED: + break; + default: + return false; + } + + switch (format) { + case DRM_FORMAT_XRGB8888: + case DRM_FORMAT_XBGR8888: + case DRM_FORMAT_ARGB8888: + case DRM_FORMAT_ABGR8888: + case DRM_FORMAT_RGB565: + case DRM_FORMAT_XRGB2101010: + case DRM_FORMAT_XBGR2101010: + case DRM_FORMAT_YUYV: + case DRM_FORMAT_YVYU: + case DRM_FORMAT_UYVY: + case DRM_FORMAT_VYUY: + case DRM_FORMAT_NV12: + case DRM_FORMAT_P010: + case DRM_FORMAT_P012: + case DRM_FORMAT_P016: + case DRM_FORMAT_XVYU2101010: + case DRM_FORMAT_C8: + case DRM_FORMAT_XBGR16161616F: + case DRM_FORMAT_ABGR16161616F: + case DRM_FORMAT_XRGB16161616F: + case DRM_FORMAT_ARGB16161616F: + case DRM_FORMAT_Y210: + case DRM_FORMAT_Y212: + case DRM_FORMAT_Y216: + case DRM_FORMAT_XVYU12_16161616: + case DRM_FORMAT_XVYU16161616: + if (modifier == DRM_FORMAT_MOD_LINEAR || + modifier == I915_FORMAT_MOD_X_TILED || + modifier == I915_FORMAT_MOD_Y_TILED) + return true; + /* fall through */ + default: + return false; + } +} + static const struct drm_plane_funcs g4x_sprite_funcs = { .update_plane = drm_atomic_helper_update_plane, .disable_plane = drm_atomic_helper_disable_plane, @@ -2341,6 +2397,15 @@ static const struct drm_plane_funcs skl_plane_funcs = { .format_mod_supported = skl_plane_format_mod_supported, }; +static const struct drm_plane_funcs gen12_plane_funcs = { + .update_plane = drm_atomic_helper_update_plane, + .disable_plane = drm_atomic_helper_disable_plane, + .destroy = intel_plane_destroy, + .atomic_duplicate_state = intel_plane_duplicate_state, + .atomic_destroy_state = intel_plane_destroy_state, + .format_mod_supported = gen12_plane_format_mod_supported, +}; + static bool skl_plane_has_fbc(struct drm_i915_private *dev_priv, enum pipe pipe, enum plane_id plane_id) { @@ -2429,6 +2494,7 @@ struct intel_plane * skl_universal_plane_create(struct drm_i915_private *dev_priv, enum pipe pipe, enum plane_id plane_id) { + static const struct drm_plane_funcs *plane_funcs; struct intel_plane *plane; enum drm_plane_type plane_type; unsigned int supported_rotations; @@ -2471,11 +2537,19 @@ skl_universal_plane_create(struct drm_i915_private *dev_priv, formats = skl_get_plane_formats(dev_priv, pipe, plane_id, &num_formats); - plane->has_ccs = skl_plane_has_ccs(dev_priv, pipe, plane_id); - if (plane->has_ccs) - modifiers = skl_plane_format_modifiers_ccs; - else - modifiers = skl_plane_format_modifiers_noccs; + if (INTEL_GEN(dev_priv) >= 12) { + /* TODO: Implement support for gen-12 CCS modifiers */ + plane->has_ccs = false; + modifiers = gen12_plane_format_modifiers_noccs; + plane_funcs = &gen12_plane_funcs; + } else { + plane->has_ccs = skl_plane_has_ccs(dev_priv, pipe, plane_id); + if (plane->has_ccs) + modifiers = skl_plane_format_modifiers_ccs; + else + modifiers = skl_plane_format_modifiers_noccs; + plane_funcs = &skl_plane_funcs; + } if (plane_id == PLANE_PRIMARY) plane_type = DRM_PLANE_TYPE_PRIMARY; @@ -2485,7 +2559,7 @@ skl_universal_plane_create(struct drm_i915_private *dev_priv, possible_crtcs = BIT(pipe); ret = drm_universal_plane_init(&dev_priv->drm, &plane->base, - possible_crtcs, &skl_plane_funcs, + possible_crtcs, plane_funcs, formats, num_formats, modifiers, plane_type, "plane %d%c", plane_id + 1, -- cgit v1.2.3 From 99d7a74110ef4c97f921b90fa8d897c6d71610a9 Mon Sep 17 00:00:00 2001 From: José Roberto de Souza Date: Fri, 23 Aug 2019 01:20:39 -0700 Subject: drm/i915/tgl: PSR link standby is not supported anymore MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit According to BSpc if link standby is set on TGL+, PSR will not be enabled. Vendors should not use panels that requires link standby and even if they do, panel should assert a PSR error that will cause PSR to be disabled. BSpec: 50434 Signed-off-by: José Roberto de Souza Signed-off-by: Lucas De Marchi Reviewed-by: Anshuman Gupta Link: https://patchwork.freedesktop.org/patch/msgid/20190823082055.5992-8-lucas.demarchi@intel.com --- drivers/gpu/drm/i915/display/intel_psr.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/gpu/drm/i915/display/intel_psr.c b/drivers/gpu/drm/i915/display/intel_psr.c index 2af3826121fa..629b8b98a97f 100644 --- a/drivers/gpu/drm/i915/display/intel_psr.c +++ b/drivers/gpu/drm/i915/display/intel_psr.c @@ -1283,8 +1283,8 @@ void intel_psr_init(struct drm_i915_private *dev_priv) if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) /* HSW and BDW require workarounds that we don't implement. */ dev_priv->psr.link_standby = false; - else - /* For new platforms let's respect VBT back again */ + else if (INTEL_GEN(dev_priv) < 12) + /* For new platforms up to TGL let's respect VBT back again */ dev_priv->psr.link_standby = dev_priv->vbt.psr.full_link; INIT_WORK(&dev_priv->psr.work, intel_psr_work); -- cgit v1.2.3 From e7b6affd0baef7b3c5e2acd94236093dcf437105 Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Thu, 29 Aug 2019 18:08:48 +0100 Subject: drm/i915/selftests: cond_resched() within the longer buddy tests Let the scheduler have a breather in between passes of the longer buddy tests. Important if we are running under kasan etc and this takes far longer than usual! Signed-off-by: Chris Wilson Cc: Matthew Auld Reviewed-by: Matthew Auld Link: https://patchwork.freedesktop.org/patch/msgid/20190829170848.969-1-chris@chris-wilson.co.uk --- drivers/gpu/drm/i915/selftests/i915_buddy.c | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/drivers/gpu/drm/i915/selftests/i915_buddy.c b/drivers/gpu/drm/i915/selftests/i915_buddy.c index 23f784eae1e7..1b856bae67b5 100644 --- a/drivers/gpu/drm/i915/selftests/i915_buddy.c +++ b/drivers/gpu/drm/i915/selftests/i915_buddy.c @@ -375,6 +375,8 @@ retry: if (err) break; + + cond_resched(); } if (err == -ENOMEM) @@ -687,6 +689,8 @@ static int igt_buddy_alloc_range(void *arg) rem -= size; if (!rem) break; + + cond_resched(); } if (err == -ENOMEM) -- cgit v1.2.3 From 9be02fde93e5b564663f10a48f19159fba718e3d Mon Sep 17 00:00:00 2001 From: Fernando Pacheco Date: Thu, 29 Aug 2019 10:41:53 -0700 Subject: drm/i915/uc: Extract common code from GuC stop/disable comm During normal driver unload we attempt to disable GuC communication while it is currently stopped. This results in a nop'd call to intel_guc_ct_disable within guc_disable_communication because stop/disable rely on the same flag to prevent further comms with CT. We can avoid the call to disable and still leave communication in a satisfactory state by extracting a set of shared steps from stop/disable. This set can include guc_disable_interrupts as we do not require the single caller of guc_stop_communication to be atomic: "drm/i915/selftests: Fixup atomic reset checking". This situation (stop -> disable) only occurs during intel_uc_fini_hw, so during fini, call guc_disable_communication only if currently enabled. The symmetric calls to enable/disable remain unmodified for all other scenarios. Bugzilla: https://bugs.freedesktop.org/show_bug.cgi?id=110943 Signed-off-by: Fernando Pacheco Cc: Chris Wilson Cc: Michal Wajdeczko Cc: Daniele Ceraolo Spurio Reviewed-by: Daniele Ceraolo Spurio Signed-off-by: Chris Wilson Link: https://patchwork.freedesktop.org/patch/msgid/20190829174154.14675-1-fernando.pacheco@intel.com --- drivers/gpu/drm/i915/gt/uc/intel_uc.c | 30 ++++++++++++++++++------------ 1 file changed, 18 insertions(+), 12 deletions(-) diff --git a/drivers/gpu/drm/i915/gt/uc/intel_uc.c b/drivers/gpu/drm/i915/gt/uc/intel_uc.c index 71ee7ab035cc..29a9eec60d2e 100644 --- a/drivers/gpu/drm/i915/gt/uc/intel_uc.c +++ b/drivers/gpu/drm/i915/gt/uc/intel_uc.c @@ -224,17 +224,7 @@ static int guc_enable_communication(struct intel_guc *guc) return 0; } -static void guc_stop_communication(struct intel_guc *guc) -{ - intel_guc_ct_stop(&guc->ct); - - guc->send = intel_guc_send_nop; - guc->handler = intel_guc_to_host_event_handler_nop; - - guc_clear_mmio_msg(guc); -} - -static void guc_disable_communication(struct intel_guc *guc) +static void __guc_stop_communication(struct intel_guc *guc) { /* * Events generated during or after CT disable are logged by guc in @@ -247,6 +237,20 @@ static void guc_disable_communication(struct intel_guc *guc) guc->send = intel_guc_send_nop; guc->handler = intel_guc_to_host_event_handler_nop; +} + +static void guc_stop_communication(struct intel_guc *guc) +{ + intel_guc_ct_stop(&guc->ct); + + __guc_stop_communication(guc); + + DRM_INFO("GuC communication stopped\n"); +} + +static void guc_disable_communication(struct intel_guc *guc) +{ + __guc_stop_communication(guc); intel_guc_ct_disable(&guc->ct); @@ -537,7 +541,9 @@ void intel_uc_fini_hw(struct intel_uc *uc) if (intel_uc_supports_guc_submission(uc)) intel_guc_submission_disable(guc); - guc_disable_communication(guc); + if (guc_communication_enabled(guc)) + guc_disable_communication(guc); + __uc_sanitize(uc); } -- cgit v1.2.3 From 31444afb460ee627cb7800acee46548449c52368 Mon Sep 17 00:00:00 2001 From: Matthew Auld Date: Thu, 29 Aug 2019 21:19:19 +0100 Subject: drm/i915: s/for_each_sgt_dma/for_each_sgt_daddr/ The sg_table for our backing store might contain addresses from stolen-memory or in the future local-memory, at which point this is no longer a dma-iterator. As a consequence we should now break on NULL iter.sgp, instead of dmap == 0 which is considered an invalid dma address. As a bonus, gcc much prefers this construct, Function old new delta gen8_ggtt_insert_entries 211 192 -19 gen6_ggtt_insert_entries 292 262 -30 i915_error_object_create 996 954 -42 Signed-off-by: Matthew Auld Cc: Chris Wilson Reviewed-by: Chris Wilson Signed-off-by: Chris Wilson Link: https://patchwork.freedesktop.org/patch/msgid/20190829201919.21493-1-matthew.auld@intel.com --- drivers/gpu/drm/i915/i915_gem_gtt.c | 4 ++-- drivers/gpu/drm/i915/i915_gem_gtt.h | 4 ++-- drivers/gpu/drm/i915/i915_gpu_error.c | 2 +- drivers/gpu/drm/i915/i915_scatterlist.h | 8 ++++---- drivers/gpu/drm/i915/selftests/i915_vma.c | 2 +- 5 files changed, 10 insertions(+), 10 deletions(-) diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c index c94dfa562247..3c50cea76dc5 100644 --- a/drivers/gpu/drm/i915/i915_gem_gtt.c +++ b/drivers/gpu/drm/i915/i915_gem_gtt.c @@ -2208,7 +2208,7 @@ static void gen8_ggtt_insert_entries(struct i915_address_space *vm, gtt_entries = (gen8_pte_t __iomem *)ggtt->gsm; gtt_entries += vma->node.start / I915_GTT_PAGE_SIZE; - for_each_sgt_dma(addr, sgt_iter, vma->pages) + for_each_sgt_daddr(addr, sgt_iter, vma->pages) gen8_set_pte(gtt_entries++, pte_encode | addr); /* @@ -2249,7 +2249,7 @@ static void gen6_ggtt_insert_entries(struct i915_address_space *vm, unsigned int i = vma->node.start / I915_GTT_PAGE_SIZE; struct sgt_iter iter; dma_addr_t addr; - for_each_sgt_dma(addr, iter, vma->pages) + for_each_sgt_daddr(addr, iter, vma->pages) iowrite32(vm->pte_encode(addr, level, flags), &entries[i++]); /* diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.h b/drivers/gpu/drm/i915/i915_gem_gtt.h index b97a47fc7a68..576f6fb95d13 100644 --- a/drivers/gpu/drm/i915/i915_gem_gtt.h +++ b/drivers/gpu/drm/i915/i915_gem_gtt.h @@ -148,8 +148,8 @@ typedef u64 gen8_pte_t; #define GEN8_PDE_IPS_64K BIT(11) #define GEN8_PDE_PS_2M BIT(7) -#define for_each_sgt_dma(__dmap, __iter, __sgt) \ - __for_each_sgt_dma(__dmap, __iter, __sgt, I915_GTT_PAGE_SIZE) +#define for_each_sgt_daddr(__dp, __iter, __sgt) \ + __for_each_sgt_daddr(__dp, __iter, __sgt, I915_GTT_PAGE_SIZE) struct intel_remapped_plane_info { /* in gtt pages */ diff --git a/drivers/gpu/drm/i915/i915_gpu_error.c b/drivers/gpu/drm/i915/i915_gpu_error.c index 4aff342b8944..3ccf7fd9307f 100644 --- a/drivers/gpu/drm/i915/i915_gpu_error.c +++ b/drivers/gpu/drm/i915/i915_gpu_error.c @@ -990,7 +990,7 @@ i915_error_object_create(struct drm_i915_private *i915, dst->unused = 0; ret = -EINVAL; - for_each_sgt_dma(dma, iter, vma->pages) { + for_each_sgt_daddr(dma, iter, vma->pages) { void __iomem *s; ggtt->vm.insert_page(&ggtt->vm, dma, slot, I915_CACHE_NONE, 0); diff --git a/drivers/gpu/drm/i915/i915_scatterlist.h b/drivers/gpu/drm/i915/i915_scatterlist.h index 6617963df9ed..b7b59328cb76 100644 --- a/drivers/gpu/drm/i915/i915_scatterlist.h +++ b/drivers/gpu/drm/i915/i915_scatterlist.h @@ -67,15 +67,15 @@ static inline struct scatterlist *__sg_next(struct scatterlist *sg) } /** - * __for_each_sgt_dma - iterate over the DMA addresses of the given sg_table - * @__dmap: DMA address (output) + * __for_each_sgt_daddr - iterate over the device addresses of the given sg_table + * @__dp: Device address (output) * @__iter: 'struct sgt_iter' (iterator state, internal) * @__sgt: sg_table to iterate over (input) * @__step: step size */ -#define __for_each_sgt_dma(__dmap, __iter, __sgt, __step) \ +#define __for_each_sgt_daddr(__dp, __iter, __sgt, __step) \ for ((__iter) = __sgt_iter((__sgt)->sgl, true); \ - ((__dmap) = (__iter).dma + (__iter).curr); \ + ((__dp) = (__iter).dma + (__iter).curr), (__iter).sgp; \ (((__iter).curr += (__step)) >= (__iter).max) ? \ (__iter) = __sgt_iter(__sg_next((__iter).sgp), true), 0 : 0) diff --git a/drivers/gpu/drm/i915/selftests/i915_vma.c b/drivers/gpu/drm/i915/selftests/i915_vma.c index a5bec0a4cdcc..97752deecccb 100644 --- a/drivers/gpu/drm/i915/selftests/i915_vma.c +++ b/drivers/gpu/drm/i915/selftests/i915_vma.c @@ -623,7 +623,7 @@ static bool assert_partial(struct drm_i915_gem_object *obj, struct sgt_iter sgt; dma_addr_t dma; - for_each_sgt_dma(dma, sgt, vma->pages) { + for_each_sgt_daddr(dma, sgt, vma->pages) { dma_addr_t src; if (!size) { -- cgit v1.2.3 From 0c84127102ee240e3039d6e78a3dba0afff2f810 Mon Sep 17 00:00:00 2001 From: Manasi Navare Date: Tue, 27 Aug 2019 15:17:34 -0700 Subject: drm/i915/display: Rename update_crtcs() to commit_modeset_enables() MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit This patch has no functional changes. This just renames the update_crtcs() hooks to commit_modeset_enables() to match the drm_atomic helper naming conventions. v2: * Rebase on drm-tip Suggested-by: Daniel Vetter Cc: Ville Syrjälä Cc: Maarten Lankhorst Cc: Matt Roper Cc: Jani Nikula Reviewed-by: Maarten Lankhorst Signed-off-by: Manasi Navare Link: https://patchwork.freedesktop.org/patch/msgid/20190827221735.29351-2-manasi.d.navare@intel.com --- drivers/gpu/drm/i915/display/intel_display.c | 10 +++++----- drivers/gpu/drm/i915/i915_drv.h | 2 +- 2 files changed, 6 insertions(+), 6 deletions(-) diff --git a/drivers/gpu/drm/i915/display/intel_display.c b/drivers/gpu/drm/i915/display/intel_display.c index 4773832b0f49..85183ef80c97 100644 --- a/drivers/gpu/drm/i915/display/intel_display.c +++ b/drivers/gpu/drm/i915/display/intel_display.c @@ -13743,7 +13743,7 @@ static void intel_update_crtc(struct intel_crtc *crtc, intel_finish_crtc_commit(state, crtc); } -static void intel_update_crtcs(struct intel_atomic_state *state) +static void intel_commit_modeset_enables(struct intel_atomic_state *state) { struct intel_crtc *crtc; struct intel_crtc_state *old_crtc_state, *new_crtc_state; @@ -13758,7 +13758,7 @@ static void intel_update_crtcs(struct intel_atomic_state *state) } } -static void skl_update_crtcs(struct intel_atomic_state *state) +static void skl_commit_modeset_enables(struct intel_atomic_state *state) { struct drm_i915_private *dev_priv = to_i915(state->base.dev); struct intel_crtc *crtc; @@ -13999,7 +13999,7 @@ static void intel_atomic_commit_tail(struct intel_atomic_state *state) intel_encoders_update_prepare(state); /* Now enable the clocks, plane, pipe, and connectors that we set up. */ - dev_priv->display.update_crtcs(state); + dev_priv->display.commit_modeset_enables(state); if (state->modeset) { intel_encoders_update_complete(state); @@ -15906,9 +15906,9 @@ void intel_init_display_hooks(struct drm_i915_private *dev_priv) } if (INTEL_GEN(dev_priv) >= 9) - dev_priv->display.update_crtcs = skl_update_crtcs; + dev_priv->display.commit_modeset_enables = skl_commit_modeset_enables; else - dev_priv->display.update_crtcs = intel_update_crtcs; + dev_priv->display.commit_modeset_enables = intel_commit_modeset_enables; } static i915_reg_t i915_vgacntrl_reg(struct drm_i915_private *dev_priv) diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index b42651a387d9..75a42e8df67e 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h @@ -288,7 +288,7 @@ struct drm_i915_display_funcs { struct intel_atomic_state *old_state); void (*crtc_disable)(struct intel_crtc_state *old_crtc_state, struct intel_atomic_state *old_state); - void (*update_crtcs)(struct intel_atomic_state *state); + void (*commit_modeset_enables)(struct intel_atomic_state *state); void (*audio_codec_enable)(struct intel_encoder *encoder, const struct intel_crtc_state *crtc_state, const struct drm_connector_state *conn_state); -- cgit v1.2.3 From 66d9cec8a6c9f3eb1d81507d5f045e28272359fb Mon Sep 17 00:00:00 2001 From: Manasi Navare Date: Wed, 28 Aug 2019 15:47:01 -0700 Subject: drm/i915/display: Move the commit_tail() disable sequence to separate function MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Create a new function intel_commit_modeset_disables() consistent with the naming in drm atomic helpers and similar to the enable function. This helps better organize the disable sequence in atomic_commit_tail() No functional change v4: * Do not create a function pointer, just a function (Maarten) v3: * Rebase (Manasi) v2: * Create a helper for old_crtc_state disables (Lucas) Suggested-by: Daniel Vetter Cc: Ville Syrjälä Cc: Maarten Lankhorst Cc: Matt Roper Cc: Jani Nikula Signed-off-by: Manasi Navare Reviewed-by: Maarten Lankhorst Link: https://patchwork.freedesktop.org/patch/msgid/20190828224701.422-1-manasi.d.navare@intel.com --- drivers/gpu/drm/i915/display/intel_display.c | 92 ++++++++++++++++++---------- drivers/gpu/drm/i915/i915_drv.h | 1 + 2 files changed, 59 insertions(+), 34 deletions(-) diff --git a/drivers/gpu/drm/i915/display/intel_display.c b/drivers/gpu/drm/i915/display/intel_display.c index 85183ef80c97..b38d842ff6ec 100644 --- a/drivers/gpu/drm/i915/display/intel_display.c +++ b/drivers/gpu/drm/i915/display/intel_display.c @@ -13743,6 +13743,61 @@ static void intel_update_crtc(struct intel_crtc *crtc, intel_finish_crtc_commit(state, crtc); } +static void intel_old_crtc_state_disables(struct intel_atomic_state *state, + struct intel_crtc_state *old_crtc_state, + struct intel_crtc_state *new_crtc_state, + struct intel_crtc *crtc) +{ + struct drm_i915_private *dev_priv = to_i915(state->base.dev); + + intel_crtc_disable_planes(state, crtc); + + /* + * We need to disable pipe CRC before disabling the pipe, + * or we race against vblank off. + */ + intel_crtc_disable_pipe_crc(crtc); + + dev_priv->display.crtc_disable(old_crtc_state, state); + crtc->active = false; + intel_fbc_disable(crtc); + intel_disable_shared_dpll(old_crtc_state); + + /* + * Underruns don't always raise interrupts, + * so check manually. + */ + intel_check_cpu_fifo_underruns(dev_priv); + intel_check_pch_fifo_underruns(dev_priv); + + /* FIXME unify this for all platforms */ + if (!new_crtc_state->base.active && + !HAS_GMCH(dev_priv) && + dev_priv->display.initial_watermarks) + dev_priv->display.initial_watermarks(state, + new_crtc_state); +} + +static void intel_commit_modeset_disables(struct intel_atomic_state *state) +{ + struct intel_crtc_state *new_crtc_state, *old_crtc_state; + struct intel_crtc *crtc; + int i; + + for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) { + if (!needs_modeset(new_crtc_state)) + continue; + + intel_pre_plane_update(old_crtc_state, new_crtc_state); + + if (old_crtc_state->base.active) + intel_old_crtc_state_disables(state, + old_crtc_state, + new_crtc_state, + crtc); + } +} + static void intel_commit_modeset_enables(struct intel_atomic_state *state) { struct intel_crtc *crtc; @@ -13923,42 +13978,10 @@ static void intel_atomic_commit_tail(struct intel_atomic_state *state) put_domains[crtc->pipe] = modeset_get_crtc_power_domains(new_crtc_state); } - - if (!needs_modeset(new_crtc_state)) - continue; - - intel_pre_plane_update(old_crtc_state, new_crtc_state); - - if (old_crtc_state->base.active) { - intel_crtc_disable_planes(state, crtc); - - /* - * We need to disable pipe CRC before disabling the pipe, - * or we race against vblank off. - */ - intel_crtc_disable_pipe_crc(crtc); - - dev_priv->display.crtc_disable(old_crtc_state, state); - crtc->active = false; - intel_fbc_disable(crtc); - intel_disable_shared_dpll(old_crtc_state); - - /* - * Underruns don't always raise - * interrupts, so check manually. - */ - intel_check_cpu_fifo_underruns(dev_priv); - intel_check_pch_fifo_underruns(dev_priv); - - /* FIXME unify this for all platforms */ - if (!new_crtc_state->base.active && - !HAS_GMCH(dev_priv) && - dev_priv->display.initial_watermarks) - dev_priv->display.initial_watermarks(state, - new_crtc_state); - } } + intel_commit_modeset_disables(state); + /* FIXME: Eventually get rid of our crtc->config pointer */ for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) crtc->config = new_crtc_state; @@ -15909,6 +15932,7 @@ void intel_init_display_hooks(struct drm_i915_private *dev_priv) dev_priv->display.commit_modeset_enables = skl_commit_modeset_enables; else dev_priv->display.commit_modeset_enables = intel_commit_modeset_enables; + } static i915_reg_t i915_vgacntrl_reg(struct drm_i915_private *dev_priv) diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index 75a42e8df67e..db7480831e52 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h @@ -289,6 +289,7 @@ struct drm_i915_display_funcs { void (*crtc_disable)(struct intel_crtc_state *old_crtc_state, struct intel_atomic_state *old_state); void (*commit_modeset_enables)(struct intel_atomic_state *state); + void (*commit_modeset_disables)(struct intel_atomic_state *state); void (*audio_codec_enable)(struct intel_encoder *encoder, const struct intel_crtc_state *crtc_state, const struct drm_connector_state *conn_state); -- cgit v1.2.3 From 11988e393813f520974f69bf0e55c367a1cbac48 Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Thu, 29 Aug 2019 09:11:15 +0100 Subject: drm/i915/execlists: Try rearranging breadcrumb flush The addition of the DC_FLUSH failed to ensure sanctity of the post-sync write as CI immediately got a completion CS-event before the breadcrumb was coherent. So let's try the other idea of moving the post-sync write into the CS_STALL. References: https://bugs.freedesktop.org/show_bug.cgi?id=111514 References: e8f6b4952ec5 ("drm/i915/execlists: Flush the post-sync breadcrumb write harder") Signed-off-by: Chris Wilson Cc: Mika Kuoppala Acked-by: Mika Kuoppala Link: https://patchwork.freedesktop.org/patch/msgid/20190829081150.10271-2-chris@chris-wilson.co.uk --- drivers/gpu/drm/i915/gt/intel_lrc.c | 20 +++++++++----------- 1 file changed, 9 insertions(+), 11 deletions(-) diff --git a/drivers/gpu/drm/i915/gt/intel_lrc.c b/drivers/gpu/drm/i915/gt/intel_lrc.c index 171d5205962c..7d460b1842dd 100644 --- a/drivers/gpu/drm/i915/gt/intel_lrc.c +++ b/drivers/gpu/drm/i915/gt/intel_lrc.c @@ -2915,20 +2915,18 @@ static u32 *gen8_emit_fini_breadcrumb(struct i915_request *request, u32 *cs) static u32 *gen8_emit_fini_breadcrumb_rcs(struct i915_request *request, u32 *cs) { + cs = gen8_emit_pipe_control(cs, + PIPE_CONTROL_RENDER_TARGET_CACHE_FLUSH | + PIPE_CONTROL_DEPTH_CACHE_FLUSH | + PIPE_CONTROL_DC_FLUSH_ENABLE, + 0); + + /* XXX flush+write+CS_STALL all in one upsets gem_concurrent_blt:kbl */ cs = gen8_emit_ggtt_write_rcs(cs, request->fence.seqno, request->timeline->hwsp_offset, - PIPE_CONTROL_RENDER_TARGET_CACHE_FLUSH | - PIPE_CONTROL_DEPTH_CACHE_FLUSH | - PIPE_CONTROL_DC_FLUSH_ENABLE); - - /* XXX flush+write+CS_STALL all in one upsets gem_concurrent_blt:kbl */ - /* XXX DC_FLUSH for post-sync write? (cf early context-switch bug) */ - cs = gen8_emit_pipe_control(cs, - PIPE_CONTROL_FLUSH_ENABLE | - PIPE_CONTROL_DC_FLUSH_ENABLE | - PIPE_CONTROL_CS_STALL, - 0); + PIPE_CONTROL_FLUSH_ENABLE | + PIPE_CONTROL_CS_STALL); return gen8_emit_fini_breadcrumb_footer(request, cs); } -- cgit v1.2.3 From 0dcceb35a13de07d8c28305b69a33b339c7eba0a Mon Sep 17 00:00:00 2001 From: Ramalingam C Date: Wed, 28 Aug 2019 22:12:11 +0530 Subject: drm/i915: mei_hdcp: I915 sends ddi index as per ME FW I915 converts it's port value into ddi index defiend by ME FW and pass it as a member of hdcp_port_data structure. Hence expose the enum mei_fw_ddi to I915 through i915_mei_interface.h. v2: Copyright years are bumped [Tomas] Signed-off-by: Ramalingam C Acked-by: Jani Nikula Reviewed-by: Shashank Sharma Acked-by: Tomas Winkler Signed-off-by: Uma Shankar Link: https://patchwork.freedesktop.org/patch/msgid/20190828164216.405-2-ramalingam.c@intel.com --- drivers/gpu/drm/i915/display/intel_hdcp.c | 17 +++++++++++++++- drivers/misc/mei/hdcp/mei_hdcp.c | 34 ++++++++++--------------------- drivers/misc/mei/hdcp/mei_hdcp.h | 12 ----------- include/drm/i915_mei_hdcp_interface.h | 18 +++++++++++++--- 4 files changed, 42 insertions(+), 39 deletions(-) diff --git a/drivers/gpu/drm/i915/display/intel_hdcp.c b/drivers/gpu/drm/i915/display/intel_hdcp.c index 6ec5ceeab601..e8b04cc8fcb1 100644 --- a/drivers/gpu/drm/i915/display/intel_hdcp.c +++ b/drivers/gpu/drm/i915/display/intel_hdcp.c @@ -1,9 +1,11 @@ /* SPDX-License-Identifier: MIT */ /* * Copyright (C) 2017 Google, Inc. + * Copyright _ 2017-2019, Intel Corporation. * * Authors: * Sean Paul + * Ramalingam C */ #include @@ -1749,13 +1751,26 @@ static const struct component_ops i915_hdcp_component_ops = { .unbind = i915_hdcp_component_unbind, }; +static inline +enum mei_fw_ddi intel_get_mei_fw_ddi_index(enum port port) +{ + switch (port) { + case PORT_A: + return MEI_DDI_A; + case PORT_B ... PORT_F: + return (enum mei_fw_ddi)port; + default: + return MEI_DDI_INVALID_PORT; + } +} + static inline int initialize_hdcp_port_data(struct intel_connector *connector, const struct intel_hdcp_shim *shim) { struct intel_hdcp *hdcp = &connector->hdcp; struct hdcp_port_data *data = &hdcp->port_data; - data->port = connector->encoder->port; + data->fw_ddi = intel_get_mei_fw_ddi_index(connector->encoder->port); data->port_type = (u8)HDCP_PORT_TYPE_INTEGRATED; data->protocol = (u8)shim->protocol; diff --git a/drivers/misc/mei/hdcp/mei_hdcp.c b/drivers/misc/mei/hdcp/mei_hdcp.c index c681f6fab342..3638c77eba26 100644 --- a/drivers/misc/mei/hdcp/mei_hdcp.c +++ b/drivers/misc/mei/hdcp/mei_hdcp.c @@ -27,18 +27,6 @@ #include "mei_hdcp.h" -static inline u8 mei_get_ddi_index(enum port port) -{ - switch (port) { - case PORT_A: - return MEI_DDI_A; - case PORT_B ... PORT_F: - return (u8)port; - default: - return MEI_DDI_INVALID_PORT; - } -} - /** * mei_hdcp_initiate_session() - Initiate a Wired HDCP2.2 Tx Session in ME FW * @dev: device corresponding to the mei_cl_device @@ -69,7 +57,7 @@ mei_hdcp_initiate_session(struct device *dev, struct hdcp_port_data *data, WIRED_CMD_BUF_LEN_INITIATE_HDCP2_SESSION_IN; session_init_in.port.integrated_port_type = data->port_type; - session_init_in.port.physical_port = mei_get_ddi_index(data->port); + session_init_in.port.physical_port = (u8)data->fw_ddi; session_init_in.protocol = data->protocol; byte = mei_cldev_send(cldev, (u8 *)&session_init_in, @@ -138,7 +126,7 @@ mei_hdcp_verify_receiver_cert_prepare_km(struct device *dev, WIRED_CMD_BUF_LEN_VERIFY_RECEIVER_CERT_IN; verify_rxcert_in.port.integrated_port_type = data->port_type; - verify_rxcert_in.port.physical_port = mei_get_ddi_index(data->port); + verify_rxcert_in.port.physical_port = (u8)data->fw_ddi; verify_rxcert_in.cert_rx = rx_cert->cert_rx; memcpy(verify_rxcert_in.r_rx, &rx_cert->r_rx, HDCP_2_2_RRX_LEN); @@ -208,7 +196,7 @@ mei_hdcp_verify_hprime(struct device *dev, struct hdcp_port_data *data, send_hprime_in.header.buffer_len = WIRED_CMD_BUF_LEN_AKE_SEND_HPRIME_IN; send_hprime_in.port.integrated_port_type = data->port_type; - send_hprime_in.port.physical_port = mei_get_ddi_index(data->port); + send_hprime_in.port.physical_port = (u8)data->fw_ddi; memcpy(send_hprime_in.h_prime, rx_hprime->h_prime, HDCP_2_2_H_PRIME_LEN); @@ -265,7 +253,7 @@ mei_hdcp_store_pairing_info(struct device *dev, struct hdcp_port_data *data, WIRED_CMD_BUF_LEN_SEND_PAIRING_INFO_IN; pairing_info_in.port.integrated_port_type = data->port_type; - pairing_info_in.port.physical_port = mei_get_ddi_index(data->port); + pairing_info_in.port.physical_port = (u8)data->fw_ddi; memcpy(pairing_info_in.e_kh_km, pairing_info->e_kh_km, HDCP_2_2_E_KH_KM_LEN); @@ -323,7 +311,7 @@ mei_hdcp_initiate_locality_check(struct device *dev, lc_init_in.header.buffer_len = WIRED_CMD_BUF_LEN_INIT_LOCALITY_CHECK_IN; lc_init_in.port.integrated_port_type = data->port_type; - lc_init_in.port.physical_port = mei_get_ddi_index(data->port); + lc_init_in.port.physical_port = (u8)data->fw_ddi; byte = mei_cldev_send(cldev, (u8 *)&lc_init_in, sizeof(lc_init_in)); if (byte < 0) { @@ -378,7 +366,7 @@ mei_hdcp_verify_lprime(struct device *dev, struct hdcp_port_data *data, WIRED_CMD_BUF_LEN_VALIDATE_LOCALITY_IN; verify_lprime_in.port.integrated_port_type = data->port_type; - verify_lprime_in.port.physical_port = mei_get_ddi_index(data->port); + verify_lprime_in.port.physical_port = (u8)data->fw_ddi; memcpy(verify_lprime_in.l_prime, rx_lprime->l_prime, HDCP_2_2_L_PRIME_LEN); @@ -435,7 +423,7 @@ static int mei_hdcp_get_session_key(struct device *dev, get_skey_in.header.buffer_len = WIRED_CMD_BUF_LEN_GET_SESSION_KEY_IN; get_skey_in.port.integrated_port_type = data->port_type; - get_skey_in.port.physical_port = mei_get_ddi_index(data->port); + get_skey_in.port.physical_port = (u8)data->fw_ddi; byte = mei_cldev_send(cldev, (u8 *)&get_skey_in, sizeof(get_skey_in)); if (byte < 0) { @@ -499,7 +487,7 @@ mei_hdcp_repeater_check_flow_prepare_ack(struct device *dev, WIRED_CMD_BUF_LEN_VERIFY_REPEATER_IN; verify_repeater_in.port.integrated_port_type = data->port_type; - verify_repeater_in.port.physical_port = mei_get_ddi_index(data->port); + verify_repeater_in.port.physical_port = (u8)data->fw_ddi; memcpy(verify_repeater_in.rx_info, rep_topology->rx_info, HDCP_2_2_RXINFO_LEN); @@ -569,7 +557,7 @@ static int mei_hdcp_verify_mprime(struct device *dev, WIRED_CMD_BUF_LEN_REPEATER_AUTH_STREAM_REQ_MIN_IN; verify_mprime_in.port.integrated_port_type = data->port_type; - verify_mprime_in.port.physical_port = mei_get_ddi_index(data->port); + verify_mprime_in.port.physical_port = (u8)data->fw_ddi; memcpy(verify_mprime_in.m_prime, stream_ready->m_prime, HDCP_2_2_MPRIME_LEN); @@ -630,7 +618,7 @@ static int mei_hdcp_enable_authentication(struct device *dev, enable_auth_in.header.buffer_len = WIRED_CMD_BUF_LEN_ENABLE_AUTH_IN; enable_auth_in.port.integrated_port_type = data->port_type; - enable_auth_in.port.physical_port = mei_get_ddi_index(data->port); + enable_auth_in.port.physical_port = (u8)data->fw_ddi; enable_auth_in.stream_type = data->streams[0].stream_type; byte = mei_cldev_send(cldev, (u8 *)&enable_auth_in, @@ -684,7 +672,7 @@ mei_hdcp_close_session(struct device *dev, struct hdcp_port_data *data) WIRED_CMD_BUF_LEN_CLOSE_SESSION_IN; session_close_in.port.integrated_port_type = data->port_type; - session_close_in.port.physical_port = mei_get_ddi_index(data->port); + session_close_in.port.physical_port = (u8)data->fw_ddi; byte = mei_cldev_send(cldev, (u8 *)&session_close_in, sizeof(session_close_in)); diff --git a/drivers/misc/mei/hdcp/mei_hdcp.h b/drivers/misc/mei/hdcp/mei_hdcp.h index e4b1cd54c853..e60282eb2d48 100644 --- a/drivers/misc/mei/hdcp/mei_hdcp.h +++ b/drivers/misc/mei/hdcp/mei_hdcp.h @@ -362,16 +362,4 @@ struct wired_cmd_repeater_auth_stream_req_out { struct hdcp_cmd_header header; struct hdcp_port_id port; } __packed; - -enum mei_fw_ddi { - MEI_DDI_INVALID_PORT = 0x0, - - MEI_DDI_B = 1, - MEI_DDI_C, - MEI_DDI_D, - MEI_DDI_E, - MEI_DDI_F, - MEI_DDI_A = 7, - MEI_DDI_RANGE_END = MEI_DDI_A, -}; #endif /* __MEI_HDCP_H__ */ diff --git a/include/drm/i915_mei_hdcp_interface.h b/include/drm/i915_mei_hdcp_interface.h index 8c344255146a..08670aa650d4 100644 --- a/include/drm/i915_mei_hdcp_interface.h +++ b/include/drm/i915_mei_hdcp_interface.h @@ -1,6 +1,6 @@ /* SPDX-License-Identifier: (GPL-2.0+) */ /* - * Copyright © 2017-2018 Intel Corporation + * Copyright © 2017-2019 Intel Corporation * * Authors: * Ramalingam C @@ -42,9 +42,21 @@ enum hdcp_wired_protocol { HDCP_PROTOCOL_DP }; +enum mei_fw_ddi { + MEI_DDI_INVALID_PORT = 0x0, + + MEI_DDI_B = 1, + MEI_DDI_C, + MEI_DDI_D, + MEI_DDI_E, + MEI_DDI_F, + MEI_DDI_A = 7, + MEI_DDI_RANGE_END = MEI_DDI_A, +}; + /** * struct hdcp_port_data - intel specific HDCP port data - * @port: port index as per I915 + * @fw_ddi: ddi index as per ME FW * @port_type: HDCP port type as per ME FW classification * @protocol: HDCP adaptation as per ME FW * @k: No of streams transmitted on a port. Only on DP MST this is != 1 @@ -56,7 +68,7 @@ enum hdcp_wired_protocol { * streams */ struct hdcp_port_data { - enum port port; + enum mei_fw_ddi fw_ddi; u8 port_type; u8 protocol; u16 k; -- cgit v1.2.3 From 5b6030da28cd6d6f5d129d7039ea0db83bcee6cf Mon Sep 17 00:00:00 2001 From: Ramalingam C Date: Wed, 28 Aug 2019 22:12:12 +0530 Subject: drm: Move port definition back to i915 header We dont need the definition of the enum port outside I915, anymore. Hence move enum port definition into I915 driver itself. v2: intel_display.h is included in intel_hdcp.h v3: enum port is declared in headers. v4: commit msg is rephrased. v5: copyright year is updated [Tomas] Signed-off-by: Ramalingam C Reviewed-by: Jani Nikula Reviewed-by: Shashank Sharma Reviewed-by: Tomas Winkler Signed-off-by: Uma Shankar Link: https://patchwork.freedesktop.org/patch/msgid/20190828164216.405-3-ramalingam.c@intel.com --- drivers/gpu/drm/i915/display/intel_bios.h | 3 ++- drivers/gpu/drm/i915/display/intel_display.h | 20 +++++++++++++++++++- drivers/gpu/drm/i915/display/intel_dp.h | 1 + drivers/gpu/drm/i915/display/intel_hdcp.h | 1 + drivers/gpu/drm/i915/display/intel_hdmi.h | 1 + drivers/gpu/drm/i915/display/intel_hotplug.h | 1 + drivers/gpu/drm/i915/display/intel_sdvo.h | 1 + include/drm/i915_drm.h | 18 ------------------ 8 files changed, 26 insertions(+), 20 deletions(-) diff --git a/drivers/gpu/drm/i915/display/intel_bios.h b/drivers/gpu/drm/i915/display/intel_bios.h index 4969189e620f..98f064828a57 100644 --- a/drivers/gpu/drm/i915/display/intel_bios.h +++ b/drivers/gpu/drm/i915/display/intel_bios.h @@ -1,5 +1,5 @@ /* - * Copyright © 2016 Intel Corporation + * Copyright © 2016-2019 Intel Corporation * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), @@ -35,6 +35,7 @@ #include struct drm_i915_private; +enum port; enum intel_backlight_type { INTEL_BACKLIGHT_PMIC, diff --git a/drivers/gpu/drm/i915/display/intel_display.h b/drivers/gpu/drm/i915/display/intel_display.h index 03321fb4a703..33fd523c4622 100644 --- a/drivers/gpu/drm/i915/display/intel_display.h +++ b/drivers/gpu/drm/i915/display/intel_display.h @@ -1,5 +1,5 @@ /* - * Copyright © 2006-2017 Intel Corporation + * Copyright © 2006-2019 Intel Corporation * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), @@ -182,6 +182,24 @@ enum plane_id { for ((__p) = PLANE_PRIMARY; (__p) < I915_MAX_PLANES; (__p)++) \ for_each_if((__crtc)->plane_ids_mask & BIT(__p)) +enum port { + PORT_NONE = -1, + + PORT_A = 0, + PORT_B, + PORT_C, + PORT_D, + PORT_E, + PORT_F, + PORT_G, + PORT_H, + PORT_I, + + I915_MAX_PORTS +}; + +#define port_name(p) ((p) + 'A') + /* * Ports identifier referenced from other drivers. * Expected to remain stable over time diff --git a/drivers/gpu/drm/i915/display/intel_dp.h b/drivers/gpu/drm/i915/display/intel_dp.h index 657bbb1f5ed0..e01d1f89409d 100644 --- a/drivers/gpu/drm/i915/display/intel_dp.h +++ b/drivers/gpu/drm/i915/display/intel_dp.h @@ -13,6 +13,7 @@ #include "i915_reg.h" enum pipe; +enum port; struct drm_connector_state; struct drm_encoder; struct drm_i915_private; diff --git a/drivers/gpu/drm/i915/display/intel_hdcp.h b/drivers/gpu/drm/i915/display/intel_hdcp.h index 13555b054930..59a2b40405cc 100644 --- a/drivers/gpu/drm/i915/display/intel_hdcp.h +++ b/drivers/gpu/drm/i915/display/intel_hdcp.h @@ -15,6 +15,7 @@ struct drm_connector_state; struct drm_i915_private; struct intel_connector; struct intel_hdcp_shim; +enum port; void intel_hdcp_atomic_check(struct drm_connector *connector, struct drm_connector_state *old_state, diff --git a/drivers/gpu/drm/i915/display/intel_hdmi.h b/drivers/gpu/drm/i915/display/intel_hdmi.h index 106c2e0bc3c9..cf1ea5427639 100644 --- a/drivers/gpu/drm/i915/display/intel_hdmi.h +++ b/drivers/gpu/drm/i915/display/intel_hdmi.h @@ -23,6 +23,7 @@ struct intel_crtc_state; struct intel_hdmi; struct drm_connector_state; union hdmi_infoframe; +enum port; void intel_hdmi_init(struct drm_i915_private *dev_priv, i915_reg_t hdmi_reg, enum port port); diff --git a/drivers/gpu/drm/i915/display/intel_hotplug.h b/drivers/gpu/drm/i915/display/intel_hotplug.h index b0cd447b7fbc..087b5f57b321 100644 --- a/drivers/gpu/drm/i915/display/intel_hotplug.h +++ b/drivers/gpu/drm/i915/display/intel_hotplug.h @@ -13,6 +13,7 @@ struct drm_i915_private; struct intel_connector; struct intel_encoder; +enum port; void intel_hpd_poll_init(struct drm_i915_private *dev_priv); enum intel_hotplug_state intel_encoder_hotplug(struct intel_encoder *encoder, diff --git a/drivers/gpu/drm/i915/display/intel_sdvo.h b/drivers/gpu/drm/i915/display/intel_sdvo.h index c9e05bcdd141..a66f224aa17d 100644 --- a/drivers/gpu/drm/i915/display/intel_sdvo.h +++ b/drivers/gpu/drm/i915/display/intel_sdvo.h @@ -14,6 +14,7 @@ struct drm_i915_private; enum pipe; +enum port; bool intel_sdvo_port_enabled(struct drm_i915_private *dev_priv, i915_reg_t sdvo_reg, enum pipe *pipe); diff --git a/include/drm/i915_drm.h b/include/drm/i915_drm.h index 23274cf92712..6722005884db 100644 --- a/include/drm/i915_drm.h +++ b/include/drm/i915_drm.h @@ -100,22 +100,4 @@ extern struct resource intel_graphics_stolen_res; #define INTEL_GEN11_BSM_DW1 0xc4 #define INTEL_BSM_MASK (-(1u << 20)) -enum port { - PORT_NONE = -1, - - PORT_A = 0, - PORT_B, - PORT_C, - PORT_D, - PORT_E, - PORT_F, - PORT_G, - PORT_H, - PORT_I, - - I915_MAX_PORTS -}; - -#define port_name(p) ((p) + 'A') - #endif /* _I915_DRM_H_ */ -- cgit v1.2.3 From 807c71d59a1063badc8bf60ee71470c2ff4be1cc Mon Sep 17 00:00:00 2001 From: Ramalingam C Date: Wed, 28 Aug 2019 22:12:13 +0530 Subject: drm: Extend I915 mei interface for transcoder info I915 needs to send the index of the transcoder as per ME FW. To support this, define enum mei_fw_tc and add as a member into the struct hdcp_port_data. v2: Typo in commit msg is fixed [Shashank] v3: kdoc is added for mei_fw_tc [Tomas] s/MEI_TC_x/MEI_TRANSCODER_x Signed-off-by: Ramalingam C Acked-by: Jani Nikula Acked-by: Tomas Winkler Reviewed-by: Shashank Sharma Signed-off-by: Uma Shankar Link: https://patchwork.freedesktop.org/patch/msgid/20190828164216.405-4-ramalingam.c@intel.com --- include/drm/i915_mei_hdcp_interface.h | 24 ++++++++++++++++++++++++ 1 file changed, 24 insertions(+) diff --git a/include/drm/i915_mei_hdcp_interface.h b/include/drm/i915_mei_hdcp_interface.h index 08670aa650d4..4d48de8890ca 100644 --- a/include/drm/i915_mei_hdcp_interface.h +++ b/include/drm/i915_mei_hdcp_interface.h @@ -54,9 +54,32 @@ enum mei_fw_ddi { MEI_DDI_RANGE_END = MEI_DDI_A, }; +/** + * enum mei_fw_tc - ME Firmware defined index for transcoders + * @MEI_INVALID_TRANSCODER: Index for Invalid transcoder + * @MEI_TRANSCODER_EDP: Index for EDP Transcoder + * @MEI_TRANSCODER_DSI0: Index for DSI0 Transcoder + * @MEI_TRANSCODER_DSI1: Index for DSI1 Transcoder + * @MEI_TRANSCODER_A: Index for Transcoder A + * @MEI_TRANSCODER_B: Index for Transcoder B + * @MEI_TRANSCODER_C: Index for Transcoder C + * @MEI_TRANSCODER_D: Index for Transcoder D + */ +enum mei_fw_tc { + MEI_INVALID_TRANSCODER = 0x00, + MEI_TRANSCODER_EDP, + MEI_TRANSCODER_DSI0, + MEI_TRANSCODER_DSI1, + MEI_TRANSCODER_A = 0x10, + MEI_TRANSCODER_B, + MEI_TRANSCODER_C, + MEI_TRANSCODER_D +}; + /** * struct hdcp_port_data - intel specific HDCP port data * @fw_ddi: ddi index as per ME FW + * @fw_tc: transcoder index as per ME FW * @port_type: HDCP port type as per ME FW classification * @protocol: HDCP adaptation as per ME FW * @k: No of streams transmitted on a port. Only on DP MST this is != 1 @@ -69,6 +92,7 @@ enum mei_fw_ddi { */ struct hdcp_port_data { enum mei_fw_ddi fw_ddi; + enum mei_fw_tc fw_tc; u8 port_type; u8 protocol; u16 k; -- cgit v1.2.3 From 2d15cf1b9ae4c623b1a05838317f4c18fd534c9a Mon Sep 17 00:00:00 2001 From: Ramalingam C Date: Wed, 28 Aug 2019 22:12:14 +0530 Subject: misc/mei/hdcp: Fill transcoder index in port info For gen12+ platform we need to pass the transcoder info as part of the port info into ME FW. This change fills the payload for ME FW from hdcp_port_data. v2: Doc is enhanced for physical_port and attached_transcoder [Tomas] Signed-off-by: Ramalingam C Acked-by: Jani Nikula Reviewed-by: Shashank Sharma Acked-by: Tomas Winkler Signed-off-by: Uma Shankar Link: https://patchwork.freedesktop.org/patch/msgid/20190828164216.405-5-ramalingam.c@intel.com --- drivers/misc/mei/hdcp/mei_hdcp.c | 11 +++++++++++ drivers/misc/mei/hdcp/mei_hdcp.h | 5 ++++- 2 files changed, 15 insertions(+), 1 deletion(-) diff --git a/drivers/misc/mei/hdcp/mei_hdcp.c b/drivers/misc/mei/hdcp/mei_hdcp.c index 3638c77eba26..93027fd96c71 100644 --- a/drivers/misc/mei/hdcp/mei_hdcp.c +++ b/drivers/misc/mei/hdcp/mei_hdcp.c @@ -58,6 +58,7 @@ mei_hdcp_initiate_session(struct device *dev, struct hdcp_port_data *data, session_init_in.port.integrated_port_type = data->port_type; session_init_in.port.physical_port = (u8)data->fw_ddi; + session_init_in.port.attached_transcoder = (u8)data->fw_tc; session_init_in.protocol = data->protocol; byte = mei_cldev_send(cldev, (u8 *)&session_init_in, @@ -127,6 +128,7 @@ mei_hdcp_verify_receiver_cert_prepare_km(struct device *dev, verify_rxcert_in.port.integrated_port_type = data->port_type; verify_rxcert_in.port.physical_port = (u8)data->fw_ddi; + verify_rxcert_in.port.attached_transcoder = (u8)data->fw_tc; verify_rxcert_in.cert_rx = rx_cert->cert_rx; memcpy(verify_rxcert_in.r_rx, &rx_cert->r_rx, HDCP_2_2_RRX_LEN); @@ -197,6 +199,7 @@ mei_hdcp_verify_hprime(struct device *dev, struct hdcp_port_data *data, send_hprime_in.port.integrated_port_type = data->port_type; send_hprime_in.port.physical_port = (u8)data->fw_ddi; + send_hprime_in.port.attached_transcoder = (u8)data->fw_tc; memcpy(send_hprime_in.h_prime, rx_hprime->h_prime, HDCP_2_2_H_PRIME_LEN); @@ -254,6 +257,7 @@ mei_hdcp_store_pairing_info(struct device *dev, struct hdcp_port_data *data, pairing_info_in.port.integrated_port_type = data->port_type; pairing_info_in.port.physical_port = (u8)data->fw_ddi; + pairing_info_in.port.attached_transcoder = (u8)data->fw_tc; memcpy(pairing_info_in.e_kh_km, pairing_info->e_kh_km, HDCP_2_2_E_KH_KM_LEN); @@ -312,6 +316,7 @@ mei_hdcp_initiate_locality_check(struct device *dev, lc_init_in.port.integrated_port_type = data->port_type; lc_init_in.port.physical_port = (u8)data->fw_ddi; + lc_init_in.port.attached_transcoder = (u8)data->fw_tc; byte = mei_cldev_send(cldev, (u8 *)&lc_init_in, sizeof(lc_init_in)); if (byte < 0) { @@ -367,6 +372,7 @@ mei_hdcp_verify_lprime(struct device *dev, struct hdcp_port_data *data, verify_lprime_in.port.integrated_port_type = data->port_type; verify_lprime_in.port.physical_port = (u8)data->fw_ddi; + verify_lprime_in.port.attached_transcoder = (u8)data->fw_tc; memcpy(verify_lprime_in.l_prime, rx_lprime->l_prime, HDCP_2_2_L_PRIME_LEN); @@ -424,6 +430,7 @@ static int mei_hdcp_get_session_key(struct device *dev, get_skey_in.port.integrated_port_type = data->port_type; get_skey_in.port.physical_port = (u8)data->fw_ddi; + get_skey_in.port.attached_transcoder = (u8)data->fw_tc; byte = mei_cldev_send(cldev, (u8 *)&get_skey_in, sizeof(get_skey_in)); if (byte < 0) { @@ -488,6 +495,7 @@ mei_hdcp_repeater_check_flow_prepare_ack(struct device *dev, verify_repeater_in.port.integrated_port_type = data->port_type; verify_repeater_in.port.physical_port = (u8)data->fw_ddi; + verify_repeater_in.port.attached_transcoder = (u8)data->fw_tc; memcpy(verify_repeater_in.rx_info, rep_topology->rx_info, HDCP_2_2_RXINFO_LEN); @@ -558,6 +566,7 @@ static int mei_hdcp_verify_mprime(struct device *dev, verify_mprime_in.port.integrated_port_type = data->port_type; verify_mprime_in.port.physical_port = (u8)data->fw_ddi; + verify_mprime_in.port.attached_transcoder = (u8)data->fw_tc; memcpy(verify_mprime_in.m_prime, stream_ready->m_prime, HDCP_2_2_MPRIME_LEN); @@ -619,6 +628,7 @@ static int mei_hdcp_enable_authentication(struct device *dev, enable_auth_in.port.integrated_port_type = data->port_type; enable_auth_in.port.physical_port = (u8)data->fw_ddi; + enable_auth_in.port.attached_transcoder = (u8)data->fw_tc; enable_auth_in.stream_type = data->streams[0].stream_type; byte = mei_cldev_send(cldev, (u8 *)&enable_auth_in, @@ -673,6 +683,7 @@ mei_hdcp_close_session(struct device *dev, struct hdcp_port_data *data) session_close_in.port.integrated_port_type = data->port_type; session_close_in.port.physical_port = (u8)data->fw_ddi; + session_close_in.port.attached_transcoder = (u8)data->fw_tc; byte = mei_cldev_send(cldev, (u8 *)&session_close_in, sizeof(session_close_in)); diff --git a/drivers/misc/mei/hdcp/mei_hdcp.h b/drivers/misc/mei/hdcp/mei_hdcp.h index e60282eb2d48..18ffc773fa18 100644 --- a/drivers/misc/mei/hdcp/mei_hdcp.h +++ b/drivers/misc/mei/hdcp/mei_hdcp.h @@ -184,8 +184,11 @@ struct hdcp_cmd_no_data { /* Uniquely identifies the hdcp port being addressed for a given command. */ struct hdcp_port_id { u8 integrated_port_type; + /* physical_port is used until Gen11.5. Must be zero for Gen11.5+ */ u8 physical_port; - u16 reserved; + /* attached_transcoder is for Gen11.5+. Set to zero for Date: Wed, 28 Aug 2019 22:12:15 +0530 Subject: drm/i915/hdcp: update current transcoder into intel_hdcp On gen12+ platforms, HDCP HW is associated to the transcoder. Hence on every modeset update associated transcoder into the intel_hdcp of the port. v2: s/trans/cpu_transcoder [Jani] v3: comment is added for fw_ddi init for gen12+ [Shashank] only hdcp capable transcoder is translated into fw_tc [Shashank] v4: fw_tc initialization is kept for modeset. [Tomas] few extra doc is added at port_data init [Tomas] v5: Few comments are improvised [Tomas] Signed-off-by: Ramalingam C Acked-by: Jani Nikula Reviewed-by: Shashank Sharma Signed-off-by: Uma Shankar Link: https://patchwork.freedesktop.org/patch/msgid/20190828164216.405-6-ramalingam.c@intel.com --- drivers/gpu/drm/i915/display/intel_display_types.h | 7 ++++ drivers/gpu/drm/i915/display/intel_dp.c | 3 ++ drivers/gpu/drm/i915/display/intel_hdcp.c | 47 +++++++++++++++++++++- drivers/gpu/drm/i915/display/intel_hdcp.h | 3 ++ drivers/gpu/drm/i915/display/intel_hdmi.c | 3 ++ 5 files changed, 62 insertions(+), 1 deletion(-) diff --git a/drivers/gpu/drm/i915/display/intel_display_types.h b/drivers/gpu/drm/i915/display/intel_display_types.h index 96514dcc7812..61277a87dbe7 100644 --- a/drivers/gpu/drm/i915/display/intel_display_types.h +++ b/drivers/gpu/drm/i915/display/intel_display_types.h @@ -388,6 +388,13 @@ struct intel_hdcp { wait_queue_head_t cp_irq_queue; atomic_t cp_irq_count; int cp_irq_count_cached; + + /* + * HDCP register access for gen12+ need the transcoder associated. + * Transcoder attached to the connector could be changed at modeset. + * Hence caching the transcoder here. + */ + enum transcoder cpu_transcoder; }; struct intel_connector { diff --git a/drivers/gpu/drm/i915/display/intel_dp.c b/drivers/gpu/drm/i915/display/intel_dp.c index c9986c7a3f5b..aaa90992a7f0 100644 --- a/drivers/gpu/drm/i915/display/intel_dp.c +++ b/drivers/gpu/drm/i915/display/intel_dp.c @@ -2261,6 +2261,9 @@ intel_dp_compute_config(struct intel_encoder *encoder, intel_psr_compute_config(intel_dp, pipe_config); + intel_hdcp_transcoder_config(intel_connector, + pipe_config->cpu_transcoder); + return 0; } diff --git a/drivers/gpu/drm/i915/display/intel_hdcp.c b/drivers/gpu/drm/i915/display/intel_hdcp.c index e8b04cc8fcb1..edcec64a2c11 100644 --- a/drivers/gpu/drm/i915/display/intel_hdcp.c +++ b/drivers/gpu/drm/i915/display/intel_hdcp.c @@ -1764,13 +1764,58 @@ enum mei_fw_ddi intel_get_mei_fw_ddi_index(enum port port) } } +static inline +enum mei_fw_tc intel_get_mei_fw_tc(enum transcoder cpu_transcoder) +{ + switch (cpu_transcoder) { + case TRANSCODER_A ... TRANSCODER_D: + return (enum mei_fw_tc)(cpu_transcoder | 0x10); + default: /* eDP, DSI TRANSCODERS are non HDCP capable */ + return MEI_INVALID_TRANSCODER; + } +} + +void intel_hdcp_transcoder_config(struct intel_connector *connector, + enum transcoder cpu_transcoder) +{ + struct drm_i915_private *dev_priv = to_i915(connector->base.dev); + struct intel_hdcp *hdcp = &connector->hdcp; + + if (!hdcp->shim) + return; + + if (INTEL_GEN(dev_priv) >= 12) { + mutex_lock(&hdcp->mutex); + hdcp->cpu_transcoder = cpu_transcoder; + hdcp->port_data.fw_tc = intel_get_mei_fw_tc(cpu_transcoder); + mutex_unlock(&hdcp->mutex); + } +} + static inline int initialize_hdcp_port_data(struct intel_connector *connector, const struct intel_hdcp_shim *shim) { + struct drm_i915_private *dev_priv = to_i915(connector->base.dev); struct intel_hdcp *hdcp = &connector->hdcp; struct hdcp_port_data *data = &hdcp->port_data; - data->fw_ddi = intel_get_mei_fw_ddi_index(connector->encoder->port); + if (INTEL_GEN(dev_priv) < 12) + data->fw_ddi = + intel_get_mei_fw_ddi_index(connector->encoder->port); + else + /* + * As per ME FW API expectation, for GEN 12+, fw_ddi is filled + * with zero(INVALID PORT index). + */ + data->fw_ddi = MEI_DDI_INVALID_PORT; + + /* + * As associated transcoder is set and modified at modeset, here fw_tc + * is initialized to zero (invalid transcoder index). This will be + * retained for fw_tc = MEI_INVALID_TRANSCODER; + data->port_type = (u8)HDCP_PORT_TYPE_INTEGRATED; data->protocol = (u8)shim->protocol; diff --git a/drivers/gpu/drm/i915/display/intel_hdcp.h b/drivers/gpu/drm/i915/display/intel_hdcp.h index 59a2b40405cc..41c1053d9e38 100644 --- a/drivers/gpu/drm/i915/display/intel_hdcp.h +++ b/drivers/gpu/drm/i915/display/intel_hdcp.h @@ -16,10 +16,13 @@ struct drm_i915_private; struct intel_connector; struct intel_hdcp_shim; enum port; +enum transcoder; void intel_hdcp_atomic_check(struct drm_connector *connector, struct drm_connector_state *old_state, struct drm_connector_state *new_state); +void intel_hdcp_transcoder_config(struct intel_connector *connector, + enum transcoder cpu_transcoder); int intel_hdcp_init(struct intel_connector *connector, const struct intel_hdcp_shim *hdcp_shim); int intel_hdcp_enable(struct intel_connector *connector, u8 content_type); diff --git a/drivers/gpu/drm/i915/display/intel_hdmi.c b/drivers/gpu/drm/i915/display/intel_hdmi.c index e02f0faecf02..6e9bb6bd1ee2 100644 --- a/drivers/gpu/drm/i915/display/intel_hdmi.c +++ b/drivers/gpu/drm/i915/display/intel_hdmi.c @@ -2431,6 +2431,9 @@ int intel_hdmi_compute_config(struct intel_encoder *encoder, return -EINVAL; } + intel_hdcp_transcoder_config(intel_hdmi->attached_connector, + pipe_config->cpu_transcoder); + return 0; } -- cgit v1.2.3 From 692059318c0fc6c3584b861adc67abbb0c1598f0 Mon Sep 17 00:00:00 2001 From: Ramalingam C Date: Wed, 28 Aug 2019 22:12:16 +0530 Subject: drm/i915/hdcp: Enable HDCP 1.4 and 2.2 on Gen12+ >From Gen12 onwards, HDCP HW block is implemented within transcoders. Till Gen11 HDCP HW block was part of DDI. Hence required changes in HW programming is handled here. As ME FW needs the transcoder detail on which HDCP is enabled on Gen12+ platform, we are populating the detail in hdcp_port_data. v2: _MMIO_TRANS is used [Lucas and Daniel] platform check is moved into the caller [Lucas] v3: platform check is moved into a macro [Shashank] v4: Few optimizations in the coding [Shashank] v5: Fixed alignment in macro definition in i915_reg.h [Shashank] unused variables "reg" is removed. v6: Configuring the transcoder at compute_config. transcoder is used instead of pipe in macros. Rebased. v7: transcoder is cached at intel_hdcp hdcp_port_data is configured with transcoder index asper ME FW. v8: s/trans/cpu_transcoder s/tc/cpu_transcoder v9: rep_ctl is prepared for TCD too. return moved into deault of rep_ctl prepare function [Shashank] Signed-off-by: Ramalingam C Reviewed-by: Shashank Sharma Acked-by: Jani Nikula Signed-off-by: Uma Shankar Link: https://patchwork.freedesktop.org/patch/msgid/20190828164216.405-7-ramalingam.c@intel.com --- drivers/gpu/drm/i915/display/intel_hdcp.c | 152 +++++++++++++++++++----------- drivers/gpu/drm/i915/display/intel_hdmi.c | 10 +- drivers/gpu/drm/i915/i915_reg.h | 124 ++++++++++++++++++++++-- 3 files changed, 221 insertions(+), 65 deletions(-) diff --git a/drivers/gpu/drm/i915/display/intel_hdcp.c b/drivers/gpu/drm/i915/display/intel_hdcp.c index edcec64a2c11..e69fa34528df 100644 --- a/drivers/gpu/drm/i915/display/intel_hdcp.c +++ b/drivers/gpu/drm/i915/display/intel_hdcp.c @@ -20,6 +20,7 @@ #include "intel_display_types.h" #include "intel_hdcp.h" #include "intel_sideband.h" +#include "intel_connector.h" #define KEY_LOAD_TRIES 5 #define ENCRYPT_STATUS_CHANGE_TIMEOUT_MS 50 @@ -107,24 +108,20 @@ bool intel_hdcp2_capable(struct intel_connector *connector) return capable; } -static inline bool intel_hdcp_in_use(struct intel_connector *connector) +static inline +bool intel_hdcp_in_use(struct drm_i915_private *dev_priv, + enum transcoder cpu_transcoder, enum port port) { - struct drm_i915_private *dev_priv = to_i915(connector->base.dev); - enum port port = connector->encoder->port; - u32 reg; - - reg = I915_READ(PORT_HDCP_STATUS(port)); - return reg & HDCP_STATUS_ENC; + return I915_READ(HDCP_STATUS(dev_priv, cpu_transcoder, port)) & + HDCP_STATUS_ENC; } -static inline bool intel_hdcp2_in_use(struct intel_connector *connector) +static inline +bool intel_hdcp2_in_use(struct drm_i915_private *dev_priv, + enum transcoder cpu_transcoder, enum port port) { - struct drm_i915_private *dev_priv = to_i915(connector->base.dev); - enum port port = connector->encoder->port; - u32 reg; - - reg = I915_READ(HDCP2_STATUS_DDI(port)); - return reg & LINK_ENCRYPTION_STATUS; + return I915_READ(HDCP2_STATUS(dev_priv, cpu_transcoder, port)) & + LINK_ENCRYPTION_STATUS; } static int intel_hdcp_poll_ksv_fifo(struct intel_digital_port *intel_dig_port, @@ -255,9 +252,29 @@ static int intel_write_sha_text(struct drm_i915_private *dev_priv, u32 sha_text) } static -u32 intel_hdcp_get_repeater_ctl(struct intel_digital_port *intel_dig_port) +u32 intel_hdcp_get_repeater_ctl(struct drm_i915_private *dev_priv, + enum transcoder cpu_transcoder, enum port port) { - enum port port = intel_dig_port->base.port; + if (INTEL_GEN(dev_priv) >= 12) { + switch (cpu_transcoder) { + case TRANSCODER_A: + return HDCP_TRANSA_REP_PRESENT | + HDCP_TRANSA_SHA1_M0; + case TRANSCODER_B: + return HDCP_TRANSB_REP_PRESENT | + HDCP_TRANSB_SHA1_M0; + case TRANSCODER_C: + return HDCP_TRANSC_REP_PRESENT | + HDCP_TRANSC_SHA1_M0; + case TRANSCODER_D: + return HDCP_TRANSD_REP_PRESENT | + HDCP_TRANSD_SHA1_M0; + default: + DRM_ERROR("Unknown transcoder %d\n", cpu_transcoder); + return -EINVAL; + } + } + switch (port) { case PORT_A: return HDCP_DDIA_REP_PRESENT | HDCP_DDIA_SHA1_M0; @@ -270,18 +287,20 @@ u32 intel_hdcp_get_repeater_ctl(struct intel_digital_port *intel_dig_port) case PORT_E: return HDCP_DDIE_REP_PRESENT | HDCP_DDIE_SHA1_M0; default: - break; + DRM_ERROR("Unknown port %d\n", port); + return -EINVAL; } - DRM_ERROR("Unknown port %d\n", port); - return -EINVAL; } static -int intel_hdcp_validate_v_prime(struct intel_digital_port *intel_dig_port, +int intel_hdcp_validate_v_prime(struct intel_connector *connector, const struct intel_hdcp_shim *shim, u8 *ksv_fifo, u8 num_downstream, u8 *bstatus) { + struct intel_digital_port *intel_dig_port = conn_to_dig_port(connector); struct drm_i915_private *dev_priv; + enum transcoder cpu_transcoder = connector->hdcp.cpu_transcoder; + enum port port = intel_dig_port->base.port; u32 vprime, sha_text, sha_leftovers, rep_ctl; int ret, i, j, sha_idx; @@ -308,7 +327,7 @@ int intel_hdcp_validate_v_prime(struct intel_digital_port *intel_dig_port, sha_idx = 0; sha_text = 0; sha_leftovers = 0; - rep_ctl = intel_hdcp_get_repeater_ctl(intel_dig_port); + rep_ctl = intel_hdcp_get_repeater_ctl(dev_priv, cpu_transcoder, port); I915_WRITE(HDCP_REP_CTL, rep_ctl | HDCP_SHA1_TEXT_32); for (i = 0; i < num_downstream; i++) { unsigned int sha_empty; @@ -550,7 +569,7 @@ int intel_hdcp_auth_downstream(struct intel_connector *connector) * V prime atleast twice. */ for (i = 0; i < tries; i++) { - ret = intel_hdcp_validate_v_prime(intel_dig_port, shim, + ret = intel_hdcp_validate_v_prime(connector, shim, ksv_fifo, num_downstream, bstatus); if (!ret) @@ -578,6 +597,7 @@ static int intel_hdcp_auth(struct intel_connector *connector) struct drm_device *dev = connector->base.dev; const struct intel_hdcp_shim *shim = hdcp->shim; struct drm_i915_private *dev_priv; + enum transcoder cpu_transcoder = connector->hdcp.cpu_transcoder; enum port port; unsigned long r0_prime_gen_start; int ret, i, tries = 2; @@ -617,18 +637,21 @@ static int intel_hdcp_auth(struct intel_connector *connector) /* Initialize An with 2 random values and acquire it */ for (i = 0; i < 2; i++) - I915_WRITE(PORT_HDCP_ANINIT(port), get_random_u32()); - I915_WRITE(PORT_HDCP_CONF(port), HDCP_CONF_CAPTURE_AN); + I915_WRITE(HDCP_ANINIT(dev_priv, cpu_transcoder, port), + get_random_u32()); + I915_WRITE(HDCP_CONF(dev_priv, cpu_transcoder, port), + HDCP_CONF_CAPTURE_AN); /* Wait for An to be acquired */ - if (intel_de_wait_for_set(dev_priv, PORT_HDCP_STATUS(port), + if (intel_de_wait_for_set(dev_priv, + HDCP_STATUS(dev_priv, cpu_transcoder, port), HDCP_STATUS_AN_READY, 1)) { DRM_ERROR("Timed out waiting for An\n"); return -ETIMEDOUT; } - an.reg[0] = I915_READ(PORT_HDCP_ANLO(port)); - an.reg[1] = I915_READ(PORT_HDCP_ANHI(port)); + an.reg[0] = I915_READ(HDCP_ANLO(dev_priv, cpu_transcoder, port)); + an.reg[1] = I915_READ(HDCP_ANHI(dev_priv, cpu_transcoder, port)); ret = shim->write_an_aksv(intel_dig_port, an.shim); if (ret) return ret; @@ -646,24 +669,26 @@ static int intel_hdcp_auth(struct intel_connector *connector) return -EPERM; } - I915_WRITE(PORT_HDCP_BKSVLO(port), bksv.reg[0]); - I915_WRITE(PORT_HDCP_BKSVHI(port), bksv.reg[1]); + I915_WRITE(HDCP_BKSVLO(dev_priv, cpu_transcoder, port), bksv.reg[0]); + I915_WRITE(HDCP_BKSVHI(dev_priv, cpu_transcoder, port), bksv.reg[1]); ret = shim->repeater_present(intel_dig_port, &repeater_present); if (ret) return ret; if (repeater_present) I915_WRITE(HDCP_REP_CTL, - intel_hdcp_get_repeater_ctl(intel_dig_port)); + intel_hdcp_get_repeater_ctl(dev_priv, cpu_transcoder, + port)); ret = shim->toggle_signalling(intel_dig_port, true); if (ret) return ret; - I915_WRITE(PORT_HDCP_CONF(port), HDCP_CONF_AUTH_AND_ENC); + I915_WRITE(HDCP_CONF(dev_priv, cpu_transcoder, port), + HDCP_CONF_AUTH_AND_ENC); /* Wait for R0 ready */ - if (wait_for(I915_READ(PORT_HDCP_STATUS(port)) & + if (wait_for(I915_READ(HDCP_STATUS(dev_priv, cpu_transcoder, port)) & (HDCP_STATUS_R0_READY | HDCP_STATUS_ENC), 1)) { DRM_ERROR("Timed out waiting for R0 ready\n"); return -ETIMEDOUT; @@ -691,22 +716,25 @@ static int intel_hdcp_auth(struct intel_connector *connector) ret = shim->read_ri_prime(intel_dig_port, ri.shim); if (ret) return ret; - I915_WRITE(PORT_HDCP_RPRIME(port), ri.reg); + I915_WRITE(HDCP_RPRIME(dev_priv, cpu_transcoder, port), ri.reg); /* Wait for Ri prime match */ - if (!wait_for(I915_READ(PORT_HDCP_STATUS(port)) & + if (!wait_for(I915_READ(HDCP_STATUS(dev_priv, cpu_transcoder, + port)) & (HDCP_STATUS_RI_MATCH | HDCP_STATUS_ENC), 1)) break; } if (i == tries) { DRM_DEBUG_KMS("Timed out waiting for Ri prime match (%x)\n", - I915_READ(PORT_HDCP_STATUS(port))); + I915_READ(HDCP_STATUS(dev_priv, cpu_transcoder, + port))); return -ETIMEDOUT; } /* Wait for encryption confirmation */ - if (intel_de_wait_for_set(dev_priv, PORT_HDCP_STATUS(port), + if (intel_de_wait_for_set(dev_priv, + HDCP_STATUS(dev_priv, cpu_transcoder, port), HDCP_STATUS_ENC, ENCRYPT_STATUS_CHANGE_TIMEOUT_MS)) { DRM_ERROR("Timed out waiting for encryption\n"); @@ -731,15 +759,17 @@ static int _intel_hdcp_disable(struct intel_connector *connector) struct drm_i915_private *dev_priv = connector->base.dev->dev_private; struct intel_digital_port *intel_dig_port = conn_to_dig_port(connector); enum port port = intel_dig_port->base.port; + enum transcoder cpu_transcoder = hdcp->cpu_transcoder; int ret; DRM_DEBUG_KMS("[%s:%d] HDCP is being disabled...\n", connector->base.name, connector->base.base.id); hdcp->hdcp_encrypted = false; - I915_WRITE(PORT_HDCP_CONF(port), 0); - if (intel_de_wait_for_clear(dev_priv, PORT_HDCP_STATUS(port), ~0, - ENCRYPT_STATUS_CHANGE_TIMEOUT_MS)) { + I915_WRITE(HDCP_CONF(dev_priv, cpu_transcoder, port), 0); + if (intel_de_wait_for_clear(dev_priv, + HDCP_STATUS(dev_priv, cpu_transcoder, port), + ~0, ENCRYPT_STATUS_CHANGE_TIMEOUT_MS)) { DRM_ERROR("Failed to disable HDCP, timeout clearing status\n"); return -ETIMEDOUT; } @@ -810,9 +840,11 @@ static int intel_hdcp_check_link(struct intel_connector *connector) struct drm_i915_private *dev_priv = connector->base.dev->dev_private; struct intel_digital_port *intel_dig_port = conn_to_dig_port(connector); enum port port = intel_dig_port->base.port; + enum transcoder cpu_transcoder; int ret = 0; mutex_lock(&hdcp->mutex); + cpu_transcoder = hdcp->cpu_transcoder; /* Check_link valid only when HDCP1.4 is enabled */ if (hdcp->value != DRM_MODE_CONTENT_PROTECTION_ENABLED || @@ -821,10 +853,11 @@ static int intel_hdcp_check_link(struct intel_connector *connector) goto out; } - if (WARN_ON(!intel_hdcp_in_use(connector))) { + if (WARN_ON(!intel_hdcp_in_use(dev_priv, cpu_transcoder, port))) { DRM_ERROR("%s:%d HDCP link stopped encryption,%x\n", connector->base.name, connector->base.base.id, - I915_READ(PORT_HDCP_STATUS(port))); + I915_READ(HDCP_STATUS(dev_priv, cpu_transcoder, + port))); ret = -ENXIO; hdcp->value = DRM_MODE_CONTENT_PROTECTION_DESIRED; schedule_work(&hdcp->prop_work); @@ -1495,10 +1528,11 @@ static int hdcp2_enable_encryption(struct intel_connector *connector) struct drm_i915_private *dev_priv = to_i915(connector->base.dev); struct intel_hdcp *hdcp = &connector->hdcp; enum port port = connector->encoder->port; + enum transcoder cpu_transcoder = hdcp->cpu_transcoder; int ret; - WARN_ON(I915_READ(HDCP2_STATUS_DDI(port)) & LINK_ENCRYPTION_STATUS); - + WARN_ON(I915_READ(HDCP2_STATUS(dev_priv, cpu_transcoder, port)) & + LINK_ENCRYPTION_STATUS); if (hdcp->shim->toggle_signalling) { ret = hdcp->shim->toggle_signalling(intel_dig_port, true); if (ret) { @@ -1508,14 +1542,18 @@ static int hdcp2_enable_encryption(struct intel_connector *connector) } } - if (I915_READ(HDCP2_STATUS_DDI(port)) & LINK_AUTH_STATUS) { + if (I915_READ(HDCP2_STATUS(dev_priv, cpu_transcoder, port)) & + LINK_AUTH_STATUS) { /* Link is Authenticated. Now set for Encryption */ - I915_WRITE(HDCP2_CTL_DDI(port), - I915_READ(HDCP2_CTL_DDI(port)) | + I915_WRITE(HDCP2_CTL(dev_priv, cpu_transcoder, port), + I915_READ(HDCP2_CTL(dev_priv, cpu_transcoder, + port)) | CTL_LINK_ENCRYPTION_REQ); } - ret = intel_de_wait_for_set(dev_priv, HDCP2_STATUS_DDI(port), + ret = intel_de_wait_for_set(dev_priv, + HDCP2_STATUS(dev_priv, cpu_transcoder, + port), LINK_ENCRYPTION_STATUS, ENCRYPT_STATUS_CHANGE_TIMEOUT_MS); @@ -1528,14 +1566,19 @@ static int hdcp2_disable_encryption(struct intel_connector *connector) struct drm_i915_private *dev_priv = to_i915(connector->base.dev); struct intel_hdcp *hdcp = &connector->hdcp; enum port port = connector->encoder->port; + enum transcoder cpu_transcoder = hdcp->cpu_transcoder; int ret; - WARN_ON(!(I915_READ(HDCP2_STATUS_DDI(port)) & LINK_ENCRYPTION_STATUS)); + WARN_ON(!(I915_READ(HDCP2_STATUS(dev_priv, cpu_transcoder, port)) & + LINK_ENCRYPTION_STATUS)); - I915_WRITE(HDCP2_CTL_DDI(port), - I915_READ(HDCP2_CTL_DDI(port)) & ~CTL_LINK_ENCRYPTION_REQ); + I915_WRITE(HDCP2_CTL(dev_priv, cpu_transcoder, port), + I915_READ(HDCP2_CTL(dev_priv, cpu_transcoder, port)) & + ~CTL_LINK_ENCRYPTION_REQ); - ret = intel_de_wait_for_clear(dev_priv, HDCP2_STATUS_DDI(port), + ret = intel_de_wait_for_clear(dev_priv, + HDCP2_STATUS(dev_priv, cpu_transcoder, + port), LINK_ENCRYPTION_STATUS, ENCRYPT_STATUS_CHANGE_TIMEOUT_MS); if (ret == -ETIMEDOUT) @@ -1634,9 +1677,11 @@ static int intel_hdcp2_check_link(struct intel_connector *connector) struct drm_i915_private *dev_priv = to_i915(connector->base.dev); struct intel_hdcp *hdcp = &connector->hdcp; enum port port = connector->encoder->port; + enum transcoder cpu_transcoder; int ret = 0; mutex_lock(&hdcp->mutex); + cpu_transcoder = hdcp->cpu_transcoder; /* hdcp2_check_link is expected only when HDCP2.2 is Enabled */ if (hdcp->value != DRM_MODE_CONTENT_PROTECTION_ENABLED || @@ -1645,9 +1690,10 @@ static int intel_hdcp2_check_link(struct intel_connector *connector) goto out; } - if (WARN_ON(!intel_hdcp2_in_use(connector))) { + if (WARN_ON(!intel_hdcp2_in_use(dev_priv, cpu_transcoder, port))) { DRM_ERROR("HDCP2.2 link stopped the encryption, %x\n", - I915_READ(HDCP2_STATUS_DDI(port))); + I915_READ(HDCP2_STATUS(dev_priv, cpu_transcoder, + port))); ret = -ENXIO; hdcp->value = DRM_MODE_CONTENT_PROTECTION_DESIRED; schedule_work(&hdcp->prop_work); diff --git a/drivers/gpu/drm/i915/display/intel_hdmi.c b/drivers/gpu/drm/i915/display/intel_hdmi.c index 6e9bb6bd1ee2..f6bb77fd0930 100644 --- a/drivers/gpu/drm/i915/display/intel_hdmi.c +++ b/drivers/gpu/drm/i915/display/intel_hdmi.c @@ -1491,7 +1491,10 @@ bool intel_hdmi_hdcp_check_link(struct intel_digital_port *intel_dig_port) { struct drm_i915_private *dev_priv = intel_dig_port->base.base.dev->dev_private; + struct intel_connector *connector = + intel_dig_port->hdmi.attached_connector; enum port port = intel_dig_port->base.port; + enum transcoder cpu_transcoder = connector->hdcp.cpu_transcoder; int ret; union { u32 reg; @@ -1502,13 +1505,14 @@ bool intel_hdmi_hdcp_check_link(struct intel_digital_port *intel_dig_port) if (ret) return false; - I915_WRITE(PORT_HDCP_RPRIME(port), ri.reg); + I915_WRITE(HDCP_RPRIME(dev_priv, cpu_transcoder, port), ri.reg); /* Wait for Ri prime match */ - if (wait_for(I915_READ(PORT_HDCP_STATUS(port)) & + if (wait_for(I915_READ(HDCP_STATUS(dev_priv, cpu_transcoder, port)) & (HDCP_STATUS_RI_MATCH | HDCP_STATUS_ENC), 1)) { DRM_ERROR("Ri' mismatch detected, link check failed (%x)\n", - I915_READ(PORT_HDCP_STATUS(port))); + I915_READ(HDCP_STATUS(dev_priv, cpu_transcoder, + port))); return false; } return true; diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h index 02e1ef10c47e..3cfdab18c0cf 100644 --- a/drivers/gpu/drm/i915/i915_reg.h +++ b/drivers/gpu/drm/i915/i915_reg.h @@ -9281,12 +9281,20 @@ enum skl_power_gate { /* HDCP Repeater Registers */ #define HDCP_REP_CTL _MMIO(0x66d00) +#define HDCP_TRANSA_REP_PRESENT BIT(31) +#define HDCP_TRANSB_REP_PRESENT BIT(30) +#define HDCP_TRANSC_REP_PRESENT BIT(29) +#define HDCP_TRANSD_REP_PRESENT BIT(28) #define HDCP_DDIB_REP_PRESENT BIT(30) #define HDCP_DDIA_REP_PRESENT BIT(29) #define HDCP_DDIC_REP_PRESENT BIT(28) #define HDCP_DDID_REP_PRESENT BIT(27) #define HDCP_DDIF_REP_PRESENT BIT(26) #define HDCP_DDIE_REP_PRESENT BIT(25) +#define HDCP_TRANSA_SHA1_M0 (1 << 20) +#define HDCP_TRANSB_SHA1_M0 (2 << 20) +#define HDCP_TRANSC_SHA1_M0 (3 << 20) +#define HDCP_TRANSD_SHA1_M0 (4 << 20) #define HDCP_DDIB_SHA1_M0 (1 << 20) #define HDCP_DDIA_SHA1_M0 (2 << 20) #define HDCP_DDIC_SHA1_M0 (3 << 20) @@ -9326,15 +9334,92 @@ enum skl_power_gate { _PORTE_HDCP_AUTHENC, \ _PORTF_HDCP_AUTHENC) + (x)) #define PORT_HDCP_CONF(port) _PORT_HDCP_AUTHENC(port, 0x0) +#define _TRANSA_HDCP_CONF 0x66400 +#define _TRANSB_HDCP_CONF 0x66500 +#define TRANS_HDCP_CONF(trans) _MMIO_TRANS(trans, _TRANSA_HDCP_CONF, \ + _TRANSB_HDCP_CONF) +#define HDCP_CONF(dev_priv, trans, port) \ + (INTEL_GEN(dev_priv) >= 12 ? \ + TRANS_HDCP_CONF(trans) : \ + PORT_HDCP_CONF(port)) + #define HDCP_CONF_CAPTURE_AN BIT(0) #define HDCP_CONF_AUTH_AND_ENC (BIT(1) | BIT(0)) #define PORT_HDCP_ANINIT(port) _PORT_HDCP_AUTHENC(port, 0x4) +#define _TRANSA_HDCP_ANINIT 0x66404 +#define _TRANSB_HDCP_ANINIT 0x66504 +#define TRANS_HDCP_ANINIT(trans) _MMIO_TRANS(trans, \ + _TRANSA_HDCP_ANINIT, \ + _TRANSB_HDCP_ANINIT) +#define HDCP_ANINIT(dev_priv, trans, port) \ + (INTEL_GEN(dev_priv) >= 12 ? \ + TRANS_HDCP_ANINIT(trans) : \ + PORT_HDCP_ANINIT(port)) + #define PORT_HDCP_ANLO(port) _PORT_HDCP_AUTHENC(port, 0x8) +#define _TRANSA_HDCP_ANLO 0x66408 +#define _TRANSB_HDCP_ANLO 0x66508 +#define TRANS_HDCP_ANLO(trans) _MMIO_TRANS(trans, _TRANSA_HDCP_ANLO, \ + _TRANSB_HDCP_ANLO) +#define HDCP_ANLO(dev_priv, trans, port) \ + (INTEL_GEN(dev_priv) >= 12 ? \ + TRANS_HDCP_ANLO(trans) : \ + PORT_HDCP_ANLO(port)) + #define PORT_HDCP_ANHI(port) _PORT_HDCP_AUTHENC(port, 0xC) +#define _TRANSA_HDCP_ANHI 0x6640C +#define _TRANSB_HDCP_ANHI 0x6650C +#define TRANS_HDCP_ANHI(trans) _MMIO_TRANS(trans, _TRANSA_HDCP_ANHI, \ + _TRANSB_HDCP_ANHI) +#define HDCP_ANHI(dev_priv, trans, port) \ + (INTEL_GEN(dev_priv) >= 12 ? \ + TRANS_HDCP_ANHI(trans) : \ + PORT_HDCP_ANHI(port)) + #define PORT_HDCP_BKSVLO(port) _PORT_HDCP_AUTHENC(port, 0x10) +#define _TRANSA_HDCP_BKSVLO 0x66410 +#define _TRANSB_HDCP_BKSVLO 0x66510 +#define TRANS_HDCP_BKSVLO(trans) _MMIO_TRANS(trans, \ + _TRANSA_HDCP_BKSVLO, \ + _TRANSB_HDCP_BKSVLO) +#define HDCP_BKSVLO(dev_priv, trans, port) \ + (INTEL_GEN(dev_priv) >= 12 ? \ + TRANS_HDCP_BKSVLO(trans) : \ + PORT_HDCP_BKSVLO(port)) + #define PORT_HDCP_BKSVHI(port) _PORT_HDCP_AUTHENC(port, 0x14) +#define _TRANSA_HDCP_BKSVHI 0x66414 +#define _TRANSB_HDCP_BKSVHI 0x66514 +#define TRANS_HDCP_BKSVHI(trans) _MMIO_TRANS(trans, \ + _TRANSA_HDCP_BKSVHI, \ + _TRANSB_HDCP_BKSVHI) +#define HDCP_BKSVHI(dev_priv, trans, port) \ + (INTEL_GEN(dev_priv) >= 12 ? \ + TRANS_HDCP_BKSVHI(trans) : \ + PORT_HDCP_BKSVHI(port)) + #define PORT_HDCP_RPRIME(port) _PORT_HDCP_AUTHENC(port, 0x18) +#define _TRANSA_HDCP_RPRIME 0x66418 +#define _TRANSB_HDCP_RPRIME 0x66518 +#define TRANS_HDCP_RPRIME(trans) _MMIO_TRANS(trans, \ + _TRANSA_HDCP_RPRIME, \ + _TRANSB_HDCP_RPRIME) +#define HDCP_RPRIME(dev_priv, trans, port) \ + (INTEL_GEN(dev_priv) >= 12 ? \ + TRANS_HDCP_RPRIME(trans) : \ + PORT_HDCP_RPRIME(port)) + #define PORT_HDCP_STATUS(port) _PORT_HDCP_AUTHENC(port, 0x1C) +#define _TRANSA_HDCP_STATUS 0x6641C +#define _TRANSB_HDCP_STATUS 0x6651C +#define TRANS_HDCP_STATUS(trans) _MMIO_TRANS(trans, \ + _TRANSA_HDCP_STATUS, \ + _TRANSB_HDCP_STATUS) +#define HDCP_STATUS(dev_priv, trans, port) \ + (INTEL_GEN(dev_priv) >= 12 ? \ + TRANS_HDCP_STATUS(trans) : \ + PORT_HDCP_STATUS(port)) + #define HDCP_STATUS_STREAM_A_ENC BIT(31) #define HDCP_STATUS_STREAM_B_ENC BIT(30) #define HDCP_STATUS_STREAM_C_ENC BIT(29) @@ -9361,23 +9446,44 @@ enum skl_power_gate { _PORTD_HDCP2_BASE, \ _PORTE_HDCP2_BASE, \ _PORTF_HDCP2_BASE) + (x)) - -#define HDCP2_AUTH_DDI(port) _PORT_HDCP2_BASE(port, 0x98) +#define PORT_HDCP2_AUTH(port) _PORT_HDCP2_BASE(port, 0x98) +#define _TRANSA_HDCP2_AUTH 0x66498 +#define _TRANSB_HDCP2_AUTH 0x66598 +#define TRANS_HDCP2_AUTH(trans) _MMIO_TRANS(trans, _TRANSA_HDCP2_AUTH, \ + _TRANSB_HDCP2_AUTH) #define AUTH_LINK_AUTHENTICATED BIT(31) #define AUTH_LINK_TYPE BIT(30) #define AUTH_FORCE_CLR_INPUTCTR BIT(19) #define AUTH_CLR_KEYS BIT(18) - -#define HDCP2_CTL_DDI(port) _PORT_HDCP2_BASE(port, 0xB0) +#define HDCP2_AUTH(dev_priv, trans, port) \ + (INTEL_GEN(dev_priv) >= 12 ? \ + TRANS_HDCP2_AUTH(trans) : \ + PORT_HDCP2_AUTH(port)) + +#define PORT_HDCP2_CTL(port) _PORT_HDCP2_BASE(port, 0xB0) +#define _TRANSA_HDCP2_CTL 0x664B0 +#define _TRANSB_HDCP2_CTL 0x665B0 +#define TRANS_HDCP2_CTL(trans) _MMIO_TRANS(trans, _TRANSA_HDCP2_CTL, \ + _TRANSB_HDCP2_CTL) #define CTL_LINK_ENCRYPTION_REQ BIT(31) - -#define HDCP2_STATUS_DDI(port) _PORT_HDCP2_BASE(port, 0xB4) -#define STREAM_ENCRYPTION_STATUS_A BIT(31) -#define STREAM_ENCRYPTION_STATUS_B BIT(30) -#define STREAM_ENCRYPTION_STATUS_C BIT(29) +#define HDCP2_CTL(dev_priv, trans, port) \ + (INTEL_GEN(dev_priv) >= 12 ? \ + TRANS_HDCP2_CTL(trans) : \ + PORT_HDCP2_CTL(port)) + +#define PORT_HDCP2_STATUS(port) _PORT_HDCP2_BASE(port, 0xB4) +#define _TRANSA_HDCP2_STATUS 0x664B4 +#define _TRANSB_HDCP2_STATUS 0x665B4 +#define TRANS_HDCP2_STATUS(trans) _MMIO_TRANS(trans, \ + _TRANSA_HDCP2_STATUS, \ + _TRANSB_HDCP2_STATUS) #define LINK_TYPE_STATUS BIT(22) #define LINK_AUTH_STATUS BIT(21) #define LINK_ENCRYPTION_STATUS BIT(20) +#define HDCP2_STATUS(dev_priv, trans, port) \ + (INTEL_GEN(dev_priv) >= 12 ? \ + TRANS_HDCP2_STATUS(trans) : \ + PORT_HDCP2_STATUS(port)) /* Per-pipe DDI Function Control */ #define _TRANS_DDI_FUNC_CTL_A 0x60400 -- cgit v1.2.3 From b047463c852272ef9956ad3a4c706f78f8b06c17 Mon Sep 17 00:00:00 2001 From: Joonas Lahtinen Date: Fri, 30 Aug 2019 11:58:48 +0300 Subject: drm/i915: Remove link to missing "Batchbuffer Pools" documentation The referenced documentation section has been removed. Remove the link to avoid warning when building the documentation. Signed-off-by: Joonas Lahtinen Cc: Chris Wilson Cc: Matthew Auld Reviewed-by: Chris Wilson Signed-off-by: Chris Wilson Link: https://patchwork.freedesktop.org/patch/msgid/20190830085849.12519-1-joonas.lahtinen@linux.intel.com --- Documentation/gpu/i915.rst | 9 --------- 1 file changed, 9 deletions(-) diff --git a/Documentation/gpu/i915.rst b/Documentation/gpu/i915.rst index 3415255ad3dc..7d1f65612856 100644 --- a/Documentation/gpu/i915.rst +++ b/Documentation/gpu/i915.rst @@ -358,15 +358,6 @@ Batchbuffer Parsing .. kernel-doc:: drivers/gpu/drm/i915/i915_cmd_parser.c :internal: -Batchbuffer Pools ------------------ - -.. kernel-doc:: drivers/gpu/drm/i915/i915_gem_batch_pool.c - :doc: batch pool - -.. kernel-doc:: drivers/gpu/drm/i915/i915_gem_batch_pool.c - :internal: - User Batchbuffer Execution -------------------------- -- cgit v1.2.3 From 4072761b981c630acbc6195391e5a22bae114e39 Mon Sep 17 00:00:00 2001 From: Joonas Lahtinen Date: Fri, 30 Aug 2019 11:58:49 +0300 Subject: drm/i915: Indent GuC/WOPCM documentation sections Indent GuC/WOPCM documentation correctly to reside under "Memory Management and Command Submission" section to avoid it escaping to the upper level navigation. Signed-off-by: Joonas Lahtinen Cc: Michal Wajdeczko Cc: Daniele Ceraolo Spurio Cc: Chris Wilson Acked-by: Chris Wilson Signed-off-by: Chris Wilson Link: https://patchwork.freedesktop.org/patch/msgid/20190830085849.12519-2-joonas.lahtinen@linux.intel.com --- Documentation/gpu/i915.rst | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/Documentation/gpu/i915.rst b/Documentation/gpu/i915.rst index 7d1f65612856..e249ea7b0ec7 100644 --- a/Documentation/gpu/i915.rst +++ b/Documentation/gpu/i915.rst @@ -407,31 +407,31 @@ Object Tiling IOCTLs :doc: buffer object tiling WOPCM -===== +----- WOPCM Layout ------------- +~~~~~~~~~~~~ .. kernel-doc:: drivers/gpu/drm/i915/intel_wopcm.c :doc: WOPCM Layout GuC -=== +--- Firmware Layout -------------------- +~~~~~~~~~~~~~~~ .. kernel-doc:: drivers/gpu/drm/i915/gt/uc/intel_uc_fw_abi.h :doc: Firmware Layout GuC-specific firmware loader ----------------------------- +~~~~~~~~~~~~~~~~~~~~~~~~~~~~ .. kernel-doc:: drivers/gpu/drm/i915/gt/uc/intel_guc_fw.c :internal: GuC-based command submission ----------------------------- +~~~~~~~~~~~~~~~~~~~~~~~~~~~~ .. kernel-doc:: drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c :doc: GuC-based command submission @@ -440,7 +440,7 @@ GuC-based command submission :internal: GuC Address Space ------------------ +~~~~~~~~~~~~~~~~~ .. kernel-doc:: drivers/gpu/drm/i915/gt/uc/intel_guc.c :doc: GuC Address Space -- cgit v1.2.3 From ed3126fa0d393825f2f134668b31363581035825 Mon Sep 17 00:00:00 2001 From: Lucas De Marchi Date: Thu, 29 Aug 2019 14:15:23 -0700 Subject: drm/i915: parameterize south hpd macros MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit South, follow the north. Instead of defining separate macros for each port, make them take port as parameter as done for TC ports and for north engine. This will allow us to easily extend this as needed. tgp_ddi_port_hotplug_long_detect() is also removed as after the EHL introduction the tgp variant is an exact copy of icp. Signed-off-by: Lucas De Marchi Reviewed-by: Matt Roper Link: https://patchwork.freedesktop.org/patch/msgid/20190829211526.30525-1-jose.souza@intel.com Signed-off-by: José Roberto de Souza --- drivers/gpu/drm/i915/i915_irq.c | 22 ++++------------------ drivers/gpu/drm/i915/i915_reg.h | 36 ++++++++++++------------------------ 2 files changed, 16 insertions(+), 42 deletions(-) diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c index 3f1b6ee157ba..084e322ec15b 100644 --- a/drivers/gpu/drm/i915/i915_irq.c +++ b/drivers/gpu/drm/i915/i915_irq.c @@ -1401,11 +1401,11 @@ static bool icp_ddi_port_hotplug_long_detect(enum hpd_pin pin, u32 val) { switch (pin) { case HPD_PORT_A: - return val & ICP_DDIA_HPD_LONG_DETECT; + return val & SHOTPLUG_CTL_DDI_HPD_LONG_DETECT(PORT_A); case HPD_PORT_B: - return val & ICP_DDIB_HPD_LONG_DETECT; + return val & SHOTPLUG_CTL_DDI_HPD_LONG_DETECT(PORT_B); case HPD_PORT_C: - return val & TGP_DDIC_HPD_LONG_DETECT; + return val & SHOTPLUG_CTL_DDI_HPD_LONG_DETECT(PORT_C); default: return false; } @@ -1427,20 +1427,6 @@ static bool icp_tc_port_hotplug_long_detect(enum hpd_pin pin, u32 val) } } -static bool tgp_ddi_port_hotplug_long_detect(enum hpd_pin pin, u32 val) -{ - switch (pin) { - case HPD_PORT_A: - return val & ICP_DDIA_HPD_LONG_DETECT; - case HPD_PORT_B: - return val & ICP_DDIB_HPD_LONG_DETECT; - case HPD_PORT_C: - return val & TGP_DDIC_HPD_LONG_DETECT; - default: - return false; - } -} - static bool tgp_tc_port_hotplug_long_detect(enum hpd_pin pin, u32 val) { switch (pin) { @@ -2318,7 +2304,7 @@ static void tgp_irq_handler(struct drm_i915_private *dev_priv, u32 pch_iir) intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask, ddi_hotplug_trigger, dig_hotplug_reg, hpd_tgp, - tgp_ddi_port_hotplug_long_detect); + icp_ddi_port_hotplug_long_detect); } if (tc_hotplug_trigger) { diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h index 3cfdab18c0cf..946beb9e295e 100644 --- a/drivers/gpu/drm/i915/i915_reg.h +++ b/drivers/gpu/drm/i915/i915_reg.h @@ -7928,26 +7928,13 @@ enum { * SHOTPLUG_CTL_DDI and SHOTPLUG_CTL_TC. */ -#define SHOTPLUG_CTL_DDI _MMIO(0xc4030) -#define TGP_DDIC_HPD_ENABLE (1 << 11) -#define TGP_DDIC_HPD_STATUS_MASK (3 << 8) -#define TGP_DDIC_HPD_NO_DETECT (0 << 8) -#define TGP_DDIC_HPD_SHORT_DETECT (1 << 8) -#define TGP_DDIC_HPD_LONG_DETECT (2 << 8) -#define TGP_DDIC_HPD_SHORT_LONG_DETECT (3 << 8) -#define ICP_DDIB_HPD_ENABLE (1 << 7) -#define ICP_DDIB_HPD_STATUS_MASK (3 << 4) -#define ICP_DDIB_HPD_NO_DETECT (0 << 4) -#define ICP_DDIB_HPD_SHORT_DETECT (1 << 4) -#define ICP_DDIB_HPD_LONG_DETECT (2 << 4) -#define ICP_DDIB_HPD_SHORT_LONG_DETECT (3 << 4) -#define ICP_DDIA_HPD_ENABLE (1 << 3) -#define ICP_DDIA_HPD_OP_DRIVE_1 (1 << 2) -#define ICP_DDIA_HPD_STATUS_MASK (3 << 0) -#define ICP_DDIA_HPD_NO_DETECT (0 << 0) -#define ICP_DDIA_HPD_SHORT_DETECT (1 << 0) -#define ICP_DDIA_HPD_LONG_DETECT (2 << 0) -#define ICP_DDIA_HPD_SHORT_LONG_DETECT (3 << 0) +#define SHOTPLUG_CTL_DDI _MMIO(0xc4030) +#define SHOTPLUG_CTL_DDI_HPD_ENABLE(port) (0x8 << (4 * (port))) +#define SHOTPLUG_CTL_DDI_HPD_STATUS_MASK(port) (0x3 << (4 * (port))) +#define SHOTPLUG_CTL_DDI_HPD_NO_DETECT(port) (0x0 << (4 * (port))) +#define SHOTPLUG_CTL_DDI_HPD_SHORT_DETECT(port) (0x1 << (4 * (port))) +#define SHOTPLUG_CTL_DDI_HPD_LONG_DETECT(port) (0x2 << (4 * (port))) +#define SHOTPLUG_CTL_DDI_HPD_SHORT_LONG_DETECT(port) (0x3 << (4 * (port))) #define SHOTPLUG_CTL_TC _MMIO(0xc4034) #define ICP_TC_HPD_ENABLE(tc_port) (8 << (tc_port) * 4) @@ -8058,14 +8045,15 @@ enum { #define ICP_TC_HPD_LONG_DETECT(tc_port) (2 << (tc_port) * 4) #define ICP_TC_HPD_SHORT_DETECT(tc_port) (1 << (tc_port) * 4) -#define ICP_DDI_HPD_ENABLE_MASK (ICP_DDIB_HPD_ENABLE | \ - ICP_DDIA_HPD_ENABLE) +#define ICP_DDI_HPD_ENABLE_MASK (SHOTPLUG_CTL_DDI_HPD_ENABLE(PORT_B) | \ + SHOTPLUG_CTL_DDI_HPD_ENABLE(PORT_A)) #define ICP_TC_HPD_ENABLE_MASK (ICP_TC_HPD_ENABLE(PORT_TC4) | \ ICP_TC_HPD_ENABLE(PORT_TC3) | \ ICP_TC_HPD_ENABLE(PORT_TC2) | \ ICP_TC_HPD_ENABLE(PORT_TC1)) -#define TGP_DDI_HPD_ENABLE_MASK (TGP_DDIC_HPD_ENABLE | \ - ICP_DDI_HPD_ENABLE_MASK) +#define TGP_DDI_HPD_ENABLE_MASK (SHOTPLUG_CTL_DDI_HPD_ENABLE(PORT_C) | \ + SHOTPLUG_CTL_DDI_HPD_ENABLE(PORT_B) | \ + SHOTPLUG_CTL_DDI_HPD_ENABLE(PORT_A)) #define TGP_TC_HPD_ENABLE_MASK (ICP_TC_HPD_ENABLE(PORT_TC6) | \ ICP_TC_HPD_ENABLE(PORT_TC5) | \ ICP_TC_HPD_ENABLE_MASK) -- cgit v1.2.3 From 58676af69c2eb1e0202215fcfc1d421f186226ed Mon Sep 17 00:00:00 2001 From: Lucas De Marchi Date: Thu, 29 Aug 2019 14:15:24 -0700 Subject: drm/i915: unify icp, tgp and mcc irq handling MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit The differences are only on the pins, trigger and long_detect function. The MCC handling is already partially merged, so merge TGP as well. Remove the pins argument from icp_irq_handler() so we have all the differences between the 3 set in a common if ladder. Signed-off-by: Lucas De Marchi Reviewed-by: Matt Roper Link: https://patchwork.freedesktop.org/patch/msgid/20190829211526.30525-2-jose.souza@intel.com Signed-off-by: José Roberto de Souza --- drivers/gpu/drm/i915/i915_irq.c | 65 ++++++++++------------------------------- 1 file changed, 16 insertions(+), 49 deletions(-) diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c index 084e322ec15b..5f590987dcd5 100644 --- a/drivers/gpu/drm/i915/i915_irq.c +++ b/drivers/gpu/drm/i915/i915_irq.c @@ -2243,19 +2243,27 @@ static void cpt_irq_handler(struct drm_i915_private *dev_priv, u32 pch_iir) cpt_serr_int_handler(dev_priv); } -static void icp_irq_handler(struct drm_i915_private *dev_priv, u32 pch_iir, - const u32 *pins) +static void icp_irq_handler(struct drm_i915_private *dev_priv, u32 pch_iir) { - u32 ddi_hotplug_trigger; - u32 tc_hotplug_trigger; + u32 ddi_hotplug_trigger, tc_hotplug_trigger; u32 pin_mask = 0, long_mask = 0; + bool (*tc_port_hotplug_long_detect)(enum hpd_pin pin, u32 val); + const u32 *pins; - if (HAS_PCH_MCC(dev_priv)) { + if (HAS_PCH_TGP(dev_priv)) { + ddi_hotplug_trigger = pch_iir & SDE_DDI_MASK_TGP; + tc_hotplug_trigger = pch_iir & SDE_TC_MASK_TGP; + tc_port_hotplug_long_detect = tgp_tc_port_hotplug_long_detect; + pins = hpd_tgp; + } else if (HAS_PCH_MCC(dev_priv)) { ddi_hotplug_trigger = pch_iir & SDE_DDI_MASK_TGP; tc_hotplug_trigger = 0; + pins = hpd_mcc; } else { ddi_hotplug_trigger = pch_iir & SDE_DDI_MASK_ICP; tc_hotplug_trigger = pch_iir & SDE_TC_MASK_ICP; + tc_port_hotplug_long_detect = icp_tc_port_hotplug_long_detect; + pins = hpd_icp; } if (ddi_hotplug_trigger) { @@ -2279,44 +2287,7 @@ static void icp_irq_handler(struct drm_i915_private *dev_priv, u32 pch_iir, intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask, tc_hotplug_trigger, dig_hotplug_reg, pins, - icp_tc_port_hotplug_long_detect); - } - - if (pin_mask) - intel_hpd_irq_handler(dev_priv, pin_mask, long_mask); - - if (pch_iir & SDE_GMBUS_ICP) - gmbus_irq_handler(dev_priv); -} - -static void tgp_irq_handler(struct drm_i915_private *dev_priv, u32 pch_iir) -{ - u32 ddi_hotplug_trigger = pch_iir & SDE_DDI_MASK_TGP; - u32 tc_hotplug_trigger = pch_iir & SDE_TC_MASK_TGP; - u32 pin_mask = 0, long_mask = 0; - - if (ddi_hotplug_trigger) { - u32 dig_hotplug_reg; - - dig_hotplug_reg = I915_READ(SHOTPLUG_CTL_DDI); - I915_WRITE(SHOTPLUG_CTL_DDI, dig_hotplug_reg); - - intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask, - ddi_hotplug_trigger, - dig_hotplug_reg, hpd_tgp, - icp_ddi_port_hotplug_long_detect); - } - - if (tc_hotplug_trigger) { - u32 dig_hotplug_reg; - - dig_hotplug_reg = I915_READ(SHOTPLUG_CTL_TC); - I915_WRITE(SHOTPLUG_CTL_TC, dig_hotplug_reg); - - intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask, - tc_hotplug_trigger, - dig_hotplug_reg, hpd_tgp, - tgp_tc_port_hotplug_long_detect); + tc_port_hotplug_long_detect); } if (pin_mask) @@ -2767,12 +2738,8 @@ gen8_de_irq_handler(struct drm_i915_private *dev_priv, u32 master_ctl) I915_WRITE(SDEIIR, iir); ret = IRQ_HANDLED; - if (INTEL_PCH_TYPE(dev_priv) >= PCH_TGP) - tgp_irq_handler(dev_priv, iir); - else if (INTEL_PCH_TYPE(dev_priv) >= PCH_MCC) - icp_irq_handler(dev_priv, iir, hpd_mcc); - else if (INTEL_PCH_TYPE(dev_priv) >= PCH_ICP) - icp_irq_handler(dev_priv, iir, hpd_icp); + if (INTEL_PCH_TYPE(dev_priv) >= PCH_ICP) + icp_irq_handler(dev_priv, iir); else if (INTEL_PCH_TYPE(dev_priv) >= PCH_SPT) spt_irq_handler(dev_priv, iir); else -- cgit v1.2.3 From b32821c036310c49cdc45639b1d451b21fc3f17b Mon Sep 17 00:00:00 2001 From: Lucas De Marchi Date: Thu, 29 Aug 2019 14:15:25 -0700 Subject: drm/i915: parameterize SDE hotplug registers MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Ice Lake, Tiger Lake and Elkhart Lake all have different port configurations and all of them can be parameterized the same way to form the SDE hotplug bitmask. Avoid making them a special case an just use the parameterized macros. Signed-off-by: Lucas De Marchi Reviewed-by: Matt Roper Link: https://patchwork.freedesktop.org/patch/msgid/20190829211526.30525-3-jose.souza@intel.com Signed-off-by: José Roberto de Souza --- drivers/gpu/drm/i915/i915_irq.c | 36 ++++++++++++++++++------------------ drivers/gpu/drm/i915/i915_reg.h | 35 +++++++++++++++-------------------- 2 files changed, 33 insertions(+), 38 deletions(-) diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c index 5f590987dcd5..541382832126 100644 --- a/drivers/gpu/drm/i915/i915_irq.c +++ b/drivers/gpu/drm/i915/i915_irq.c @@ -149,30 +149,30 @@ static const u32 hpd_gen12[HPD_NUM_PINS] = { }; static const u32 hpd_icp[HPD_NUM_PINS] = { - [HPD_PORT_A] = SDE_DDIA_HOTPLUG_ICP, - [HPD_PORT_B] = SDE_DDIB_HOTPLUG_ICP, - [HPD_PORT_C] = SDE_TC1_HOTPLUG_ICP, - [HPD_PORT_D] = SDE_TC2_HOTPLUG_ICP, - [HPD_PORT_E] = SDE_TC3_HOTPLUG_ICP, - [HPD_PORT_F] = SDE_TC4_HOTPLUG_ICP + [HPD_PORT_A] = SDE_DDI_HOTPLUG_ICP(PORT_A), + [HPD_PORT_B] = SDE_DDI_HOTPLUG_ICP(PORT_B), + [HPD_PORT_C] = SDE_TC_HOTPLUG_ICP(PORT_TC1), + [HPD_PORT_D] = SDE_TC_HOTPLUG_ICP(PORT_TC2), + [HPD_PORT_E] = SDE_TC_HOTPLUG_ICP(PORT_TC3), + [HPD_PORT_F] = SDE_TC_HOTPLUG_ICP(PORT_TC4), }; static const u32 hpd_mcc[HPD_NUM_PINS] = { - [HPD_PORT_A] = SDE_DDIA_HOTPLUG_ICP, - [HPD_PORT_B] = SDE_DDIB_HOTPLUG_ICP, - [HPD_PORT_C] = SDE_TC1_HOTPLUG_ICP + [HPD_PORT_A] = SDE_DDI_HOTPLUG_ICP(PORT_A), + [HPD_PORT_B] = SDE_DDI_HOTPLUG_ICP(PORT_B), + [HPD_PORT_C] = SDE_TC_HOTPLUG_ICP(PORT_TC1), }; static const u32 hpd_tgp[HPD_NUM_PINS] = { - [HPD_PORT_A] = SDE_DDIA_HOTPLUG_ICP, - [HPD_PORT_B] = SDE_DDIB_HOTPLUG_ICP, - [HPD_PORT_C] = SDE_DDIC_HOTPLUG_TGP, - [HPD_PORT_D] = SDE_TC1_HOTPLUG_ICP, - [HPD_PORT_E] = SDE_TC2_HOTPLUG_ICP, - [HPD_PORT_F] = SDE_TC3_HOTPLUG_ICP, - [HPD_PORT_G] = SDE_TC4_HOTPLUG_ICP, - [HPD_PORT_H] = SDE_TC5_HOTPLUG_TGP, - [HPD_PORT_I] = SDE_TC6_HOTPLUG_TGP, + [HPD_PORT_A] = SDE_DDI_HOTPLUG_ICP(PORT_A), + [HPD_PORT_B] = SDE_DDI_HOTPLUG_ICP(PORT_B), + [HPD_PORT_C] = SDE_DDI_HOTPLUG_ICP(PORT_C), + [HPD_PORT_D] = SDE_TC_HOTPLUG_ICP(PORT_TC1), + [HPD_PORT_E] = SDE_TC_HOTPLUG_ICP(PORT_TC2), + [HPD_PORT_F] = SDE_TC_HOTPLUG_ICP(PORT_TC3), + [HPD_PORT_G] = SDE_TC_HOTPLUG_ICP(PORT_TC4), + [HPD_PORT_H] = SDE_TC_HOTPLUG_ICP(PORT_TC5), + [HPD_PORT_I] = SDE_TC_HOTPLUG_ICP(PORT_TC6), }; void gen3_irq_reset(struct intel_uncore *uncore, i915_reg_t imr, diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h index 946beb9e295e..53735433fe20 100644 --- a/drivers/gpu/drm/i915/i915_reg.h +++ b/drivers/gpu/drm/i915/i915_reg.h @@ -7839,29 +7839,24 @@ enum { SDE_FDI_RXA_CPT) /* south display engine interrupt: ICP/TGP */ -#define SDE_TC6_HOTPLUG_TGP (1 << 29) -#define SDE_TC5_HOTPLUG_TGP (1 << 28) -#define SDE_TC4_HOTPLUG_ICP (1 << 27) -#define SDE_TC3_HOTPLUG_ICP (1 << 26) -#define SDE_TC2_HOTPLUG_ICP (1 << 25) -#define SDE_TC1_HOTPLUG_ICP (1 << 24) #define SDE_GMBUS_ICP (1 << 23) -#define SDE_DDIC_HOTPLUG_TGP (1 << 18) -#define SDE_DDIB_HOTPLUG_ICP (1 << 17) -#define SDE_DDIA_HOTPLUG_ICP (1 << 16) #define SDE_TC_HOTPLUG_ICP(tc_port) (1 << ((tc_port) + 24)) #define SDE_DDI_HOTPLUG_ICP(port) (1 << ((port) + 16)) -#define SDE_DDI_MASK_ICP (SDE_DDIB_HOTPLUG_ICP | \ - SDE_DDIA_HOTPLUG_ICP) -#define SDE_TC_MASK_ICP (SDE_TC4_HOTPLUG_ICP | \ - SDE_TC3_HOTPLUG_ICP | \ - SDE_TC2_HOTPLUG_ICP | \ - SDE_TC1_HOTPLUG_ICP) -#define SDE_DDI_MASK_TGP (SDE_DDIC_HOTPLUG_TGP | \ - SDE_DDI_MASK_ICP) -#define SDE_TC_MASK_TGP (SDE_TC6_HOTPLUG_TGP | \ - SDE_TC5_HOTPLUG_TGP | \ - SDE_TC_MASK_ICP) +#define SDE_DDI_MASK_ICP (SDE_DDI_HOTPLUG_ICP(PORT_B) | \ + SDE_DDI_HOTPLUG_ICP(PORT_A)) +#define SDE_TC_MASK_ICP (SDE_TC_HOTPLUG_ICP(PORT_TC4) | \ + SDE_TC_HOTPLUG_ICP(PORT_TC3) | \ + SDE_TC_HOTPLUG_ICP(PORT_TC2) | \ + SDE_TC_HOTPLUG_ICP(PORT_TC1)) +#define SDE_DDI_MASK_TGP (SDE_DDI_HOTPLUG_ICP(PORT_C) | \ + SDE_DDI_HOTPLUG_ICP(PORT_B) | \ + SDE_DDI_HOTPLUG_ICP(PORT_A)) +#define SDE_TC_MASK_TGP (SDE_TC_HOTPLUG_ICP(PORT_TC6) | \ + SDE_TC_HOTPLUG_ICP(PORT_TC5) | \ + SDE_TC_HOTPLUG_ICP(PORT_TC4) | \ + SDE_TC_HOTPLUG_ICP(PORT_TC3) | \ + SDE_TC_HOTPLUG_ICP(PORT_TC2) | \ + SDE_TC_HOTPLUG_ICP(PORT_TC1)) #define SDEISR _MMIO(0xc4000) #define SDEIMR _MMIO(0xc4004) -- cgit v1.2.3 From 40e98130c32889e4ed128d129b824107b90501fe Mon Sep 17 00:00:00 2001 From: Lucas De Marchi Date: Thu, 29 Aug 2019 14:15:26 -0700 Subject: drm/i915: unify icp, tgp and mcc irq setup MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Use a single function to setup the SDE irq and make MCC, ICP and TGP use it, just like was done for the irq handler. Signed-off-by: Lucas De Marchi Reviewed-by: Matt Roper Link: https://patchwork.freedesktop.org/patch/msgid/20190829211526.30525-4-jose.souza@intel.com Signed-off-by: José Roberto de Souza --- drivers/gpu/drm/i915/i915_irq.c | 50 +++++++++++++++++------------------------ 1 file changed, 21 insertions(+), 29 deletions(-) diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c index 541382832126..135c9ee55e07 100644 --- a/drivers/gpu/drm/i915/i915_irq.c +++ b/drivers/gpu/drm/i915/i915_irq.c @@ -3385,42 +3385,31 @@ static void icp_hpd_detection_setup(struct drm_i915_private *dev_priv, } } -static void icp_hpd_irq_setup(struct drm_i915_private *dev_priv) +static void icp_hpd_irq_setup(struct drm_i915_private *dev_priv, + u32 sde_ddi_mask, u32 sde_tc_mask, + u32 ddi_enable_mask, u32 tc_enable_mask, + const u32 *pins) { u32 hotplug_irqs, enabled_irqs; - hotplug_irqs = SDE_DDI_MASK_ICP | SDE_TC_MASK_ICP; - enabled_irqs = intel_hpd_enabled_irqs(dev_priv, hpd_icp); + hotplug_irqs = sde_ddi_mask | sde_tc_mask; + enabled_irqs = intel_hpd_enabled_irqs(dev_priv, pins); ibx_display_interrupt_update(dev_priv, hotplug_irqs, enabled_irqs); - icp_hpd_detection_setup(dev_priv, ICP_DDI_HPD_ENABLE_MASK, - ICP_TC_HPD_ENABLE_MASK); + icp_hpd_detection_setup(dev_priv, ddi_enable_mask, tc_enable_mask); } +/* + * EHL doesn't need most of gen11_hpd_irq_setup, it's handling only the + * equivalent of SDE. + */ static void mcc_hpd_irq_setup(struct drm_i915_private *dev_priv) { - u32 hotplug_irqs, enabled_irqs; - - hotplug_irqs = SDE_DDI_MASK_TGP; - enabled_irqs = intel_hpd_enabled_irqs(dev_priv, hpd_mcc); - - ibx_display_interrupt_update(dev_priv, hotplug_irqs, enabled_irqs); - - icp_hpd_detection_setup(dev_priv, TGP_DDI_HPD_ENABLE_MASK, 0); -} - -static void tgp_hpd_irq_setup(struct drm_i915_private *dev_priv) -{ - u32 hotplug_irqs, enabled_irqs; - - hotplug_irqs = SDE_DDI_MASK_TGP | SDE_TC_MASK_TGP; - enabled_irqs = intel_hpd_enabled_irqs(dev_priv, hpd_tgp); - - ibx_display_interrupt_update(dev_priv, hotplug_irqs, enabled_irqs); - - icp_hpd_detection_setup(dev_priv, TGP_DDI_HPD_ENABLE_MASK, - TGP_TC_HPD_ENABLE_MASK); + icp_hpd_irq_setup(dev_priv, + SDE_DDI_MASK_TGP, 0, + TGP_DDI_HPD_ENABLE_MASK, 0, + hpd_mcc); } static void gen11_hpd_detection_setup(struct drm_i915_private *dev_priv) @@ -3460,9 +3449,13 @@ static void gen11_hpd_irq_setup(struct drm_i915_private *dev_priv) gen11_hpd_detection_setup(dev_priv); if (INTEL_PCH_TYPE(dev_priv) >= PCH_TGP) - tgp_hpd_irq_setup(dev_priv); + icp_hpd_irq_setup(dev_priv, SDE_DDI_MASK_TGP, SDE_TC_MASK_TGP, + TGP_DDI_HPD_ENABLE_MASK, + TGP_TC_HPD_ENABLE_MASK, hpd_tgp); else if (INTEL_PCH_TYPE(dev_priv) >= PCH_ICP) - icp_hpd_irq_setup(dev_priv); + icp_hpd_irq_setup(dev_priv, SDE_DDI_MASK_ICP, SDE_TC_MASK_ICP, + ICP_DDI_HPD_ENABLE_MASK, + ICP_TC_HPD_ENABLE_MASK, hpd_icp); } static void spt_hpd_detection_setup(struct drm_i915_private *dev_priv) @@ -4340,7 +4333,6 @@ void intel_irq_init(struct drm_i915_private *dev_priv) dev_priv->display.hpd_irq_setup = i915_hpd_irq_setup; } else { if (HAS_PCH_MCC(dev_priv)) - /* EHL doesn't need most of gen11_hpd_irq_setup */ dev_priv->display.hpd_irq_setup = mcc_hpd_irq_setup; else if (INTEL_GEN(dev_priv) >= 11) dev_priv->display.hpd_irq_setup = gen11_hpd_irq_setup; -- cgit v1.2.3 From 3dc007fe9b2b256d2779bb9e9a0d372eb1d90643 Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Fri, 30 Aug 2019 18:59:58 +0100 Subject: drm/i915/gtt: Downgrade gen7 (ivb, byt, hsw) back to aliasing-ppgtt With the upcoming change in timing (dramatically reducing the latency between manipulating the ppGTT and execution), no amount of tweaking could save Baytrail, it would always fail to invalidate its TLB. Ville was right, Baytrail is beyond hope. v2: Rollback on all gen7; same timing instability on TLB invalidation. Signed-off-by: Chris Wilson Acked-by: Matthew Auld Link: https://patchwork.freedesktop.org/patch/msgid/20190830180000.24608-1-chris@chris-wilson.co.uk --- drivers/gpu/drm/i915/gt/intel_ringbuffer.c | 63 +++++++++--------------------- drivers/gpu/drm/i915/i915_pci.c | 4 +- 2 files changed, 20 insertions(+), 47 deletions(-) diff --git a/drivers/gpu/drm/i915/gt/intel_ringbuffer.c b/drivers/gpu/drm/i915/gt/intel_ringbuffer.c index 601c16239fdf..ac55a0d054bd 100644 --- a/drivers/gpu/drm/i915/gt/intel_ringbuffer.c +++ b/drivers/gpu/drm/i915/gt/intel_ringbuffer.c @@ -1741,46 +1741,22 @@ static int remap_l3(struct i915_request *rq) static int switch_context(struct i915_request *rq) { - struct intel_engine_cs *engine = rq->engine; - struct i915_address_space *vm = vm_alias(rq->hw_context); - unsigned int unwind_mm = 0; - u32 hw_flags = 0; + struct intel_context *ce = rq->hw_context; + struct i915_address_space *vm = vm_alias(ce); int ret; GEM_BUG_ON(HAS_EXECLISTS(rq->i915)); if (vm) { - struct i915_ppgtt *ppgtt = i915_vm_to_ppgtt(vm); - int loops; - - /* - * Baytail takes a little more convincing that it really needs - * to reload the PD between contexts. It is not just a little - * longer, as adding more stalls after the load_pd_dir (i.e. - * adding a long loop around flush_pd_dir) is not as effective - * as reloading the PD umpteen times. 32 is derived from - * experimentation (gem_exec_parallel/fds) and has no good - * explanation. - */ - loops = 1; - if (engine->id == BCS0 && IS_VALLEYVIEW(engine->i915)) - loops = 32; - - do { - ret = load_pd_dir(rq, ppgtt); - if (ret) - goto err; - } while (--loops); - - if (ppgtt->pd_dirty_engines & engine->mask) { - unwind_mm = engine->mask; - ppgtt->pd_dirty_engines &= ~unwind_mm; - hw_flags = MI_FORCE_RESTORE; - } + ret = load_pd_dir(rq, i915_vm_to_ppgtt(vm)); + if (ret) + return ret; } - if (rq->hw_context->state) { - GEM_BUG_ON(engine->id != RCS0); + if (ce->state) { + u32 hw_flags; + + GEM_BUG_ON(rq->engine->id != RCS0); /* * The kernel context(s) is treated as pure scratch and is not @@ -1789,22 +1765,25 @@ static int switch_context(struct i915_request *rq) * as nothing actually executes using the kernel context; it * is purely used for flushing user contexts. */ + hw_flags = 0; if (i915_gem_context_is_kernel(rq->gem_context)) hw_flags = MI_RESTORE_INHIBIT; ret = mi_set_context(rq, hw_flags); if (ret) - goto err_mm; + return ret; } if (vm) { + struct intel_engine_cs *engine = rq->engine; + ret = engine->emit_flush(rq, EMIT_INVALIDATE); if (ret) - goto err_mm; + return ret; ret = flush_pd_dir(rq); if (ret) - goto err_mm; + return ret; /* * Not only do we need a full barrier (post-sync write) after @@ -1816,24 +1795,18 @@ static int switch_context(struct i915_request *rq) */ ret = engine->emit_flush(rq, EMIT_INVALIDATE); if (ret) - goto err_mm; + return ret; ret = engine->emit_flush(rq, EMIT_FLUSH); if (ret) - goto err_mm; + return ret; } ret = remap_l3(rq); if (ret) - goto err_mm; + return ret; return 0; - -err_mm: - if (unwind_mm) - i915_vm_to_ppgtt(vm)->pd_dirty_engines |= unwind_mm; -err: - return ret; } static int ring_request_alloc(struct i915_request *request) diff --git a/drivers/gpu/drm/i915/i915_pci.c b/drivers/gpu/drm/i915/i915_pci.c index 1974e4c78a43..18212c39a9ba 100644 --- a/drivers/gpu/drm/i915/i915_pci.c +++ b/drivers/gpu/drm/i915/i915_pci.c @@ -420,7 +420,7 @@ static const struct intel_device_info intel_sandybridge_m_gt2_info = { .has_rc6 = 1, \ .has_rc6p = 1, \ .has_rps = true, \ - .ppgtt_type = INTEL_PPGTT_FULL, \ + .ppgtt_type = INTEL_PPGTT_ALIASING, \ .ppgtt_size = 31, \ IVB_PIPE_OFFSETS, \ IVB_CURSOR_OFFSETS, \ @@ -476,7 +476,7 @@ static const struct intel_device_info intel_valleyview_info = { .has_rps = true, .display.has_gmch = 1, .display.has_hotplug = 1, - .ppgtt_type = INTEL_PPGTT_FULL, + .ppgtt_type = INTEL_PPGTT_ALIASING, .ppgtt_size = 31, .has_snoop = true, .has_coherent_ggtt = false, -- cgit v1.2.3 From 0b718ba1e884f64dce27c19311dd2859b87e56b9 Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Fri, 30 Aug 2019 18:59:59 +0100 Subject: drm/i915/gtt: Downgrade Cherryview back to aliasing-ppgtt With the upcoming change in timing (dramatically reducing the latency between manipulating the ppGTT and execution), no amount of tweaking could save Cherryview, it would always fail to invalidate its TLB. Signed-off-by: Chris Wilson Acked-by: Matthew Auld Link: https://patchwork.freedesktop.org/patch/msgid/20190830180000.24608-2-chris@chris-wilson.co.uk --- drivers/gpu/drm/i915/gt/intel_lrc.c | 69 ++++++------------------------------- drivers/gpu/drm/i915/i915_pci.c | 2 +- 2 files changed, 11 insertions(+), 60 deletions(-) diff --git a/drivers/gpu/drm/i915/gt/intel_lrc.c b/drivers/gpu/drm/i915/gt/intel_lrc.c index 7d460b1842dd..0e28263c7b87 100644 --- a/drivers/gpu/drm/i915/gt/intel_lrc.c +++ b/drivers/gpu/drm/i915/gt/intel_lrc.c @@ -1871,60 +1871,6 @@ static int gen8_emit_init_breadcrumb(struct i915_request *rq) return 0; } -static int emit_pdps(struct i915_request *rq) -{ - const struct intel_engine_cs * const engine = rq->engine; - struct i915_ppgtt * const ppgtt = i915_vm_to_ppgtt(rq->hw_context->vm); - int err, i; - u32 *cs; - - GEM_BUG_ON(intel_vgpu_active(rq->i915)); - - /* - * Beware ye of the dragons, this sequence is magic! - * - * Small changes to this sequence can cause anything from - * GPU hangs to forcewake errors and machine lockups! - */ - - /* Flush any residual operations from the context load */ - err = engine->emit_flush(rq, EMIT_FLUSH); - if (err) - return err; - - /* Magic required to prevent forcewake errors! */ - err = engine->emit_flush(rq, EMIT_INVALIDATE); - if (err) - return err; - - cs = intel_ring_begin(rq, 4 * GEN8_3LVL_PDPES + 2); - if (IS_ERR(cs)) - return PTR_ERR(cs); - - /* Ensure the LRI have landed before we invalidate & continue */ - *cs++ = MI_LOAD_REGISTER_IMM(2 * GEN8_3LVL_PDPES) | MI_LRI_FORCE_POSTED; - for (i = GEN8_3LVL_PDPES; i--; ) { - const dma_addr_t pd_daddr = i915_page_dir_dma_addr(ppgtt, i); - u32 base = engine->mmio_base; - - *cs++ = i915_mmio_reg_offset(GEN8_RING_PDP_UDW(base, i)); - *cs++ = upper_32_bits(pd_daddr); - *cs++ = i915_mmio_reg_offset(GEN8_RING_PDP_LDW(base, i)); - *cs++ = lower_32_bits(pd_daddr); - } - *cs++ = MI_NOOP; - - intel_ring_advance(rq, cs); - - /* Be doubly sure the LRI have landed before proceeding */ - err = engine->emit_flush(rq, EMIT_FLUSH); - if (err) - return err; - - /* Re-invalidate the TLB for luck */ - return engine->emit_flush(rq, EMIT_INVALIDATE); -} - static int execlists_request_alloc(struct i915_request *request) { int ret; @@ -1947,10 +1893,7 @@ static int execlists_request_alloc(struct i915_request *request) */ /* Unconditionally invalidate GPU caches and TLBs. */ - if (i915_vm_is_4lvl(request->hw_context->vm)) - ret = request->engine->emit_flush(request, EMIT_INVALIDATE); - else - ret = emit_pdps(request); + ret = request->engine->emit_flush(request, EMIT_INVALIDATE); if (ret) return ret; @@ -3167,12 +3110,20 @@ static u32 intel_lr_indirect_ctx_offset(struct intel_engine_cs *engine) return indirect_ctx_offset; } +static struct i915_ppgtt *vm_alias(struct i915_address_space *vm) +{ + if (i915_is_ggtt(vm)) + return i915_vm_to_ggtt(vm)->alias; + else + return i915_vm_to_ppgtt(vm); +} + static void execlists_init_reg_state(u32 *regs, struct intel_context *ce, struct intel_engine_cs *engine, struct intel_ring *ring) { - struct i915_ppgtt *ppgtt = i915_vm_to_ppgtt(ce->vm); + struct i915_ppgtt *ppgtt = vm_alias(ce->vm); bool rcs = engine->class == RENDER_CLASS; u32 base = engine->mmio_base; diff --git a/drivers/gpu/drm/i915/i915_pci.c b/drivers/gpu/drm/i915/i915_pci.c index 18212c39a9ba..fbe98a2db88e 100644 --- a/drivers/gpu/drm/i915/i915_pci.c +++ b/drivers/gpu/drm/i915/i915_pci.c @@ -570,7 +570,7 @@ static const struct intel_device_info intel_cherryview_info = { .has_rps = true, .has_logical_ring_contexts = 1, .display.has_gmch = 1, - .ppgtt_type = INTEL_PPGTT_FULL, + .ppgtt_type = INTEL_PPGTT_ALIASING, .ppgtt_size = 32, .has_reset_engine = 1, .has_snoop = true, -- cgit v1.2.3 From c1d143dd2ac8d481500fec4c0d715ff301d2766f Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Fri, 30 Aug 2019 19:00:00 +0100 Subject: drm/i915: Remove ppgtt->dirty_engines This is no longer used anywhere and so can be removed. However, tracking the dirty status on the ppgtt doesn't work very well if the ppgtt is shared, so perhaps for the best that it is no longer required. Signed-off-by: Chris Wilson Reviewed-by: Matthew Auld Link: https://patchwork.freedesktop.org/patch/msgid/20190830180000.24608-3-chris@chris-wilson.co.uk --- drivers/gpu/drm/i915/i915_gem_gtt.c | 16 +--------------- drivers/gpu/drm/i915/i915_gem_gtt.h | 1 - 2 files changed, 1 insertion(+), 16 deletions(-) diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c index 3c50cea76dc5..ee51fd1a6207 100644 --- a/drivers/gpu/drm/i915/i915_gem_gtt.c +++ b/drivers/gpu/drm/i915/i915_gem_gtt.c @@ -823,17 +823,6 @@ release_pd_entry(struct i915_page_directory * const pd, return free; } -/* - * PDE TLBs are a pain to invalidate on GEN8+. When we modify - * the page table structures, we mark them dirty so that - * context switching/execlist queuing code takes extra steps - * to ensure that tlbs are flushed. - */ -static void mark_tlbs_dirty(struct i915_ppgtt *ppgtt) -{ - ppgtt->pd_dirty_engines = ALL_ENGINES; -} - static void gen8_ppgtt_notify_vgt(struct i915_ppgtt *ppgtt, bool create) { struct drm_i915_private *dev_priv = ppgtt->vm.i915; @@ -1735,10 +1724,8 @@ static int gen6_alloc_va_range(struct i915_address_space *vm, } spin_unlock(&pd->lock); - if (flush) { - mark_tlbs_dirty(&ppgtt->base); + if (flush) gen6_ggtt_invalidate(vm->gt->ggtt); - } goto out; @@ -1833,7 +1820,6 @@ static int pd_vma_bind(struct i915_vma *vma, gen6_for_all_pdes(pt, ppgtt->base.pd, pde) gen6_write_pde(ppgtt, pde, pt); - mark_tlbs_dirty(&ppgtt->base); gen6_ggtt_invalidate(ggtt); return 0; diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.h b/drivers/gpu/drm/i915/i915_gem_gtt.h index 576f6fb95d13..07c85c134d4c 100644 --- a/drivers/gpu/drm/i915/i915_gem_gtt.h +++ b/drivers/gpu/drm/i915/i915_gem_gtt.h @@ -422,7 +422,6 @@ struct i915_ggtt { struct i915_ppgtt { struct i915_address_space vm; - intel_engine_mask_t pd_dirty_engines; struct i915_page_directory *pd; }; -- cgit v1.2.3 From aabbe344dc3ca5f7d8263a02608ba6179e8a4499 Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Fri, 30 Aug 2019 19:03:25 +0100 Subject: drm/i915: Use RCU for unlocked vm_idr lookup Since i915_address_space is now RCU protected, we can do the vm_idr lookup without taking the vm_idr_mutex, just with the rcu_read_lock() instead. Signed-off-by: Chris Wilson Reviewed-by: Matthew Auld Link: https://patchwork.freedesktop.org/patch/msgid/20190830180325.7755-1-chris@chris-wilson.co.uk --- drivers/gpu/drm/i915/gem/i915_gem_context.c | 11 ++++------- 1 file changed, 4 insertions(+), 7 deletions(-) diff --git a/drivers/gpu/drm/i915/gem/i915_gem_context.c b/drivers/gpu/drm/i915/gem/i915_gem_context.c index 1f735ca9b173..b8969605f4e8 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_context.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_context.c @@ -1057,14 +1057,11 @@ static int set_ppgtt(struct drm_i915_file_private *file_priv, if (upper_32_bits(args->value)) return -ENOENT; - err = mutex_lock_interruptible(&file_priv->vm_idr_lock); - if (err) - return err; - + rcu_read_lock(); vm = idr_find(&file_priv->vm_idr, args->value); - if (vm) - i915_vm_get(vm); - mutex_unlock(&file_priv->vm_idr_lock); + if (vm && !kref_get_unless_zero(&vm->ref)) + vm = NULL; + rcu_read_unlock(); if (!vm) return -ENOENT; -- cgit v1.2.3 From 7bff9779d76917adfa7c67712aa6ae3b7e5098ba Mon Sep 17 00:00:00 2001 From: Maarten Lankhorst Date: Fri, 30 Aug 2019 12:16:44 +0200 Subject: drm/i915: Fix regression with crtc disable ordering MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit When we moved the code to disable crtc's to a separate patch, we forgot to ensure that for_each_oldnew_intel_crtc_in_state_reverse() was moved as well. Signed-off-by: Maarten Lankhorst Fixes: 66d9cec8a6c9 ("drm/i915/display: Move the commit_tail() disable sequence to separate function") Cc: Ville Syrjälä Cc: Manasi Navare Cc: José Roberto de Souza Reviewed-by: Manasi Navare Reviewed-by: José Roberto de Souza Signed-off-by: Manasi Navare Link: https://patchwork.freedesktop.org/patch/msgid/20190830101644.8740-1-maarten.lankhorst@linux.intel.com --- drivers/gpu/drm/i915/display/intel_display.c | 21 +++++++++++---------- 1 file changed, 11 insertions(+), 10 deletions(-) diff --git a/drivers/gpu/drm/i915/display/intel_display.c b/drivers/gpu/drm/i915/display/intel_display.c index b38d842ff6ec..e661e2099118 100644 --- a/drivers/gpu/drm/i915/display/intel_display.c +++ b/drivers/gpu/drm/i915/display/intel_display.c @@ -13784,7 +13784,15 @@ static void intel_commit_modeset_disables(struct intel_atomic_state *state) struct intel_crtc *crtc; int i; - for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) { + /* + * Disable CRTC/pipes in reverse order because some features(MST in + * TGL+) requires master and slave relationship between pipes, so it + * should always pick the lowest pipe as master as it will be enabled + * first and disable in the reverse order so the master will be the + * last one to be disabled. + */ + for_each_oldnew_intel_crtc_in_state_reverse(state, crtc, old_crtc_state, + new_crtc_state, i) { if (!needs_modeset(new_crtc_state)) continue; @@ -13963,15 +13971,8 @@ static void intel_atomic_commit_tail(struct intel_atomic_state *state) if (state->modeset) wakeref = intel_display_power_get(dev_priv, POWER_DOMAIN_MODESET); - /* - * Disable CRTC/pipes in reverse order because some features(MST in - * TGL+) requires master and slave relationship between pipes, so it - * should always pick the lowest pipe as master as it will be enabled - * first and disable in the reverse order so the master will be the - * last one to be disabled. - */ - for_each_oldnew_intel_crtc_in_state_reverse(state, crtc, old_crtc_state, - new_crtc_state, i) { + for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state, + new_crtc_state, i) { if (needs_modeset(new_crtc_state) || new_crtc_state->update_pipe) { -- cgit v1.2.3 From 385ba629aa1cdca5373c1fdbf6693ade5062ad27 Mon Sep 17 00:00:00 2001 From: Matt Roper Date: Thu, 29 Aug 2019 17:48:28 -0700 Subject: drm/i915: Allow /2 CD2X divider on gen11+ MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit The bspec has just recently been updated with new cdclk values that require the use of a /2 CD2X divider rather than a /1 divider. Once we add the divider selection logic to ICL+ cdclk programming, we have pretty much the same logic we were already using on CNL, so it's simpler to drop icl_set_cdclk() completely and reuse cnl_set_cdclk() on gen11+ platforms as well. v2: - Using ICL_CDCLK_CD2X_PIPE_NONE + BXT_CDCLK_CD2X_PIPE(pipe) for TGL is correct, but looks really confusing. Add some TGL_ macros that alias these to avoid confusion. (Ville) - Use DIV_ROUND_CLOSEST rather than / when applying the divider. (Ville) Cc: José Roberto de Souza Cc: Lucas De Marchi Cc: Ville Syrjälä Signed-off-by: Matt Roper Reviewed-by: Ville Syrjälä Link: https://patchwork.freedesktop.org/patch/msgid/20190830004828.19359-1-matthew.d.roper@intel.com --- drivers/gpu/drm/i915/display/intel_cdclk.c | 90 ++++++++++++------------------ drivers/gpu/drm/i915/i915_reg.h | 3 + 2 files changed, 38 insertions(+), 55 deletions(-) diff --git a/drivers/gpu/drm/i915/display/intel_cdclk.c b/drivers/gpu/drm/i915/display/intel_cdclk.c index 939088c7d814..58ba42dcf23f 100644 --- a/drivers/gpu/drm/i915/display/intel_cdclk.c +++ b/drivers/gpu/drm/i915/display/intel_cdclk.c @@ -1659,10 +1659,23 @@ static void cnl_set_cdclk(struct drm_i915_private *dev_priv, cnl_cdclk_pll_enable(dev_priv, vco); val = divider | skl_cdclk_decimal(cdclk); - if (pipe == INVALID_PIPE) - val |= BXT_CDCLK_CD2X_PIPE_NONE; - else - val |= BXT_CDCLK_CD2X_PIPE(pipe); + + if (INTEL_GEN(dev_priv) >= 12) { + if (pipe == INVALID_PIPE) + val |= TGL_CDCLK_CD2X_PIPE_NONE; + else + val |= TGL_CDCLK_CD2X_PIPE(pipe); + } else if (INTEL_GEN(dev_priv) >= 11) { + if (pipe == INVALID_PIPE) + val |= ICL_CDCLK_CD2X_PIPE_NONE; + else + val |= ICL_CDCLK_CD2X_PIPE(pipe); + } else { + if (pipe == INVALID_PIPE) + val |= BXT_CDCLK_CD2X_PIPE_NONE; + else + val |= BXT_CDCLK_CD2X_PIPE(pipe); + } I915_WRITE(CDCLK_CTL, val); if (pipe != INVALID_PIPE) @@ -1813,51 +1826,6 @@ static int icl_calc_cdclk_pll_vco(struct drm_i915_private *dev_priv, int cdclk) return dev_priv->cdclk.hw.ref * ratio; } -static void icl_set_cdclk(struct drm_i915_private *dev_priv, - const struct intel_cdclk_state *cdclk_state, - enum pipe pipe) -{ - unsigned int cdclk = cdclk_state->cdclk; - unsigned int vco = cdclk_state->vco; - int ret; - - ret = skl_pcode_request(dev_priv, SKL_PCODE_CDCLK_CONTROL, - SKL_CDCLK_PREPARE_FOR_CHANGE, - SKL_CDCLK_READY_FOR_CHANGE, - SKL_CDCLK_READY_FOR_CHANGE, 3); - if (ret) { - DRM_ERROR("Failed to inform PCU about cdclk change (%d)\n", - ret); - return; - } - - if (dev_priv->cdclk.hw.vco != 0 && - dev_priv->cdclk.hw.vco != vco) - cnl_cdclk_pll_disable(dev_priv); - - if (dev_priv->cdclk.hw.vco != vco) - cnl_cdclk_pll_enable(dev_priv, vco); - - /* - * On ICL CD2X_DIV can only be 1, so we'll never end up changing the - * divider here synchronized to a pipe while CDCLK is on, nor will we - * need the corresponding vblank wait. - */ - I915_WRITE(CDCLK_CTL, ICL_CDCLK_CD2X_PIPE_NONE | - skl_cdclk_decimal(cdclk)); - - sandybridge_pcode_write(dev_priv, SKL_PCODE_CDCLK_CONTROL, - cdclk_state->voltage_level); - - intel_update_cdclk(dev_priv); - - /* - * Can't read out the voltage level :( - * Let's just assume everything is as expected. - */ - dev_priv->cdclk.hw.voltage_level = cdclk_state->voltage_level; -} - static u8 icl_calc_voltage_level(struct drm_i915_private *dev_priv, int cdclk) { if (IS_ELKHARTLAKE(dev_priv)) { @@ -1881,6 +1849,7 @@ static void icl_get_cdclk(struct drm_i915_private *dev_priv, struct intel_cdclk_state *cdclk_state) { u32 val; + int div; cdclk_state->bypass = 50000; @@ -1914,10 +1883,21 @@ static void icl_get_cdclk(struct drm_i915_private *dev_priv, cdclk_state->vco = (val & BXT_DE_PLL_RATIO_MASK) * cdclk_state->ref; - val = I915_READ(CDCLK_CTL); - WARN_ON((val & BXT_CDCLK_CD2X_DIV_SEL_MASK) != 0); + val = I915_READ(CDCLK_CTL) & BXT_CDCLK_CD2X_DIV_SEL_MASK; + switch (val) { + case BXT_CDCLK_CD2X_DIV_SEL_1: + div = 2; + break; + case BXT_CDCLK_CD2X_DIV_SEL_2: + div = 4; + break; + default: + MISSING_CASE(val); + div = 2; + break; + } - cdclk_state->cdclk = cdclk_state->vco / 2; + cdclk_state->cdclk = DIV_ROUND_CLOSEST(cdclk_state->vco, div); out: /* @@ -1963,7 +1943,7 @@ sanitize: icl_calc_voltage_level(dev_priv, sanitized_state.cdclk); - icl_set_cdclk(dev_priv, &sanitized_state, INVALID_PIPE); + cnl_set_cdclk(dev_priv, &sanitized_state, INVALID_PIPE); } static void icl_uninit_cdclk(struct drm_i915_private *dev_priv) @@ -1975,7 +1955,7 @@ static void icl_uninit_cdclk(struct drm_i915_private *dev_priv) cdclk_state.voltage_level = icl_calc_voltage_level(dev_priv, cdclk_state.cdclk); - icl_set_cdclk(dev_priv, &cdclk_state, INVALID_PIPE); + cnl_set_cdclk(dev_priv, &cdclk_state, INVALID_PIPE); } static void cnl_init_cdclk(struct drm_i915_private *dev_priv) @@ -2810,7 +2790,7 @@ void intel_update_rawclk(struct drm_i915_private *dev_priv) void intel_init_cdclk_hooks(struct drm_i915_private *dev_priv) { if (INTEL_GEN(dev_priv) >= 11) { - dev_priv->display.set_cdclk = icl_set_cdclk; + dev_priv->display.set_cdclk = cnl_set_cdclk; dev_priv->display.modeset_calc_cdclk = icl_modeset_calc_cdclk; } else if (IS_CANNONLAKE(dev_priv)) { dev_priv->display.set_cdclk = cnl_set_cdclk; diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h index 53735433fe20..6c43b8c583bb 100644 --- a/drivers/gpu/drm/i915/i915_reg.h +++ b/drivers/gpu/drm/i915/i915_reg.h @@ -9759,7 +9759,10 @@ enum skl_power_gate { #define BXT_CDCLK_CD2X_PIPE(pipe) ((pipe) << 20) #define CDCLK_DIVMUX_CD_OVERRIDE (1 << 19) #define BXT_CDCLK_CD2X_PIPE_NONE BXT_CDCLK_CD2X_PIPE(3) +#define ICL_CDCLK_CD2X_PIPE(pipe) (_PICK(pipe, 0, 2, 6) << 19) #define ICL_CDCLK_CD2X_PIPE_NONE (7 << 19) +#define TGL_CDCLK_CD2X_PIPE(pipe) BXT_CDCLK_CD2X_PIPE(pipe) +#define TGL_CDCLK_CD2X_PIPE_NONE ICL_CDCLK_CD2X_PIPE_NONE #define BXT_CDCLK_SSA_PRECHARGE_ENABLE (1 << 16) #define CDCLK_FREQ_DECIMAL_MASK (0x7ff) -- cgit v1.2.3 From 3d1da92baffe69cf847699ceccf4297356da58fa Mon Sep 17 00:00:00 2001 From: Matt Roper Date: Mon, 26 Aug 2019 15:55:40 -0700 Subject: drm/i915: Add 324mhz and 326.4mhz cdclks for gen11+ MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit The bspec was recently updated with these new cdclk values for ICL, EHL, and TGL. Bspec: 20598 Bspec: 49201 Cc: José Roberto de Souza Cc: Lucas De Marchi Signed-off-by: Matt Roper Reviewed-by: José Roberto de Souza Link: https://patchwork.freedesktop.org/patch/msgid/20190826225540.11987-3-matthew.d.roper@intel.com --- drivers/gpu/drm/i915/display/intel_cdclk.c | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/drivers/gpu/drm/i915/display/intel_cdclk.c b/drivers/gpu/drm/i915/display/intel_cdclk.c index 58ba42dcf23f..76f11d465e91 100644 --- a/drivers/gpu/drm/i915/display/intel_cdclk.c +++ b/drivers/gpu/drm/i915/display/intel_cdclk.c @@ -1761,8 +1761,10 @@ sanitize: static int icl_calc_cdclk(int min_cdclk, unsigned int ref) { - static const int ranges_24[] = { 180000, 192000, 312000, 552000, 648000 }; - static const int ranges_19_38[] = { 172800, 192000, 307200, 556800, 652800 }; + static const int ranges_24[] = { 180000, 192000, 312000, 324000, + 552000, 648000 }; + static const int ranges_19_38[] = { 172800, 192000, 307200, 326400, + 556800, 652800 }; const int *ranges; int len, i; @@ -1803,6 +1805,7 @@ static int icl_calc_cdclk_pll_vco(struct drm_i915_private *dev_priv, int cdclk) /* fall through */ case 172800: case 307200: + case 326400: case 556800: case 652800: WARN_ON(dev_priv->cdclk.hw.ref != 19200 && @@ -1810,6 +1813,7 @@ static int icl_calc_cdclk_pll_vco(struct drm_i915_private *dev_priv, int cdclk) break; case 180000: case 312000: + case 324000: case 552000: case 648000: WARN_ON(dev_priv->cdclk.hw.ref != 24000); -- cgit v1.2.3 From dffa8feb308455f9b3ce0eeb55a4eac3afc0786b Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Fri, 30 Aug 2019 19:19:29 +0100 Subject: drm/i915/perf: Assert locking for i915_init_oa_perf_state() We use the context->pin_mutex to serialise updates to the OA config and the registers values written into each new context. Document this relationship and assert we do hold the context->pin_mutex as used by gen8_configure_all_contexts() to serialise updates to the OA config itself. v2: Add a white-lie for when we call intel_gt_resume() from init. v3: Lie while we have the context pinned inside atomic reset. Signed-off-by: Chris Wilson Cc: Lionel Landwerlin Reviewed-by: Lionel Landwerlin #v1 Link: https://patchwork.freedesktop.org/patch/msgid/20190830181929.18663-1-chris@chris-wilson.co.uk --- drivers/gpu/drm/i915/gt/intel_gt_pm.c | 7 ++++++- drivers/gpu/drm/i915/gt/intel_lrc.c | 10 ++++++++++ drivers/gpu/drm/i915/i915_perf.c | 3 +++ 3 files changed, 19 insertions(+), 1 deletion(-) diff --git a/drivers/gpu/drm/i915/gt/intel_gt_pm.c b/drivers/gpu/drm/i915/gt/intel_gt_pm.c index 1363e069ec83..aa6cf0152ce7 100644 --- a/drivers/gpu/drm/i915/gt/intel_gt_pm.c +++ b/drivers/gpu/drm/i915/gt/intel_gt_pm.c @@ -6,6 +6,7 @@ #include "i915_drv.h" #include "i915_params.h" +#include "intel_context.h" #include "intel_engine_pm.h" #include "intel_gt.h" #include "intel_gt_pm.h" @@ -142,8 +143,12 @@ int intel_gt_resume(struct intel_gt *gt) intel_engine_pm_get(engine); ce = engine->kernel_context; - if (ce) + if (ce) { + GEM_BUG_ON(!intel_context_is_pinned(ce)); + mutex_acquire(&ce->pin_mutex.dep_map, 0, 0, _THIS_IP_); ce->ops->reset(ce); + mutex_release(&ce->pin_mutex.dep_map, 0, _THIS_IP_); + } engine->serial++; /* kernel context lost */ err = engine->resume(engine); diff --git a/drivers/gpu/drm/i915/gt/intel_lrc.c b/drivers/gpu/drm/i915/gt/intel_lrc.c index 0e28263c7b87..87b7473a6dfb 100644 --- a/drivers/gpu/drm/i915/gt/intel_lrc.c +++ b/drivers/gpu/drm/i915/gt/intel_lrc.c @@ -2383,6 +2383,11 @@ static void __execlists_reset(struct intel_engine_cs *engine, bool stalled) ce = rq->hw_context; GEM_BUG_ON(i915_active_is_idle(&ce->active)); GEM_BUG_ON(!i915_vma_is_pinned(ce->state)); + + /* Proclaim we have exclusive access to the context image! */ + GEM_BUG_ON(!intel_context_is_pinned(ce)); + mutex_acquire(&ce->pin_mutex.dep_map, 2, 0, _THIS_IP_); + rq = active_request(rq); if (!rq) { ce->ring->head = ce->ring->tail; @@ -2442,6 +2447,7 @@ out_replay: engine->name, ce->ring->head, ce->ring->tail); intel_ring_update_space(ce->ring); __execlists_update_reg_state(ce, engine); + mutex_release(&ce->pin_mutex.dep_map, 0, _THIS_IP_); unwind: /* Push back any incomplete requests for replay after the reset. */ @@ -3920,6 +3926,9 @@ void intel_lr_context_reset(struct intel_engine_cs *engine, u32 head, bool scrub) { + GEM_BUG_ON(!intel_context_is_pinned(ce)); + mutex_acquire(&ce->pin_mutex.dep_map, 2, 0, _THIS_IP_); + /* * We want a simple context + ring to execute the breadcrumb update. * We cannot rely on the context being intact across the GPU hang, @@ -3944,6 +3953,7 @@ void intel_lr_context_reset(struct intel_engine_cs *engine, intel_ring_update_space(ce->ring); __execlists_update_reg_state(ce, engine); + mutex_release(&ce->pin_mutex.dep_map, 0, _THIS_IP_); } #if IS_ENABLED(CONFIG_DRM_I915_SELFTEST) diff --git a/drivers/gpu/drm/i915/i915_perf.c b/drivers/gpu/drm/i915/i915_perf.c index 2c9f46e12622..c1b764233761 100644 --- a/drivers/gpu/drm/i915/i915_perf.c +++ b/drivers/gpu/drm/i915/i915_perf.c @@ -2305,6 +2305,9 @@ void i915_oa_init_reg_state(struct intel_engine_cs *engine, { struct i915_perf_stream *stream; + /* perf.exclusive_stream serialised by gen8_configure_all_contexts() */ + lockdep_assert_held(&ce->pin_mutex); + if (engine->class != RENDER_CLASS) return; -- cgit v1.2.3 From 75427b2a2bffc083d51dec389c235722a9c69b05 Mon Sep 17 00:00:00 2001 From: Ville Syrjälä Date: Wed, 28 Aug 2019 13:20:59 +0300 Subject: drm/i915: Limit MST to <= 8bpc once again MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit My attempt at allowing MST to use the higher color depths has regressed some configurations. Apparently people have setups where all MST streams will fit into the DP link with 8bpc but won't fit with higher color depths. What we really should be doing is reducing the bpc for all the streams on the same link until they start to fit. But that requires a bit more work, so in the meantime let's revert back closer to the old behavior and limit MST to at most 8bpc. Cc: stable@vger.kernel.org Cc: Lyude Paul Tested-by: Geoffrey Bennett Fixes: f1477219869c ("drm/i915: Remove the 8bpc shackles from DP MST") Bugzilla: https://bugs.freedesktop.org/show_bug.cgi?id=111505 Signed-off-by: Ville Syrjälä Link: https://patchwork.freedesktop.org/patch/msgid/20190828102059.2512-1-ville.syrjala@linux.intel.com Reviewed-by: Lyude Paul --- drivers/gpu/drm/i915/display/intel_dp_mst.c | 10 +++++++++- 1 file changed, 9 insertions(+), 1 deletion(-) diff --git a/drivers/gpu/drm/i915/display/intel_dp_mst.c b/drivers/gpu/drm/i915/display/intel_dp_mst.c index 2c5ac3dd647f..6df240a01b8c 100644 --- a/drivers/gpu/drm/i915/display/intel_dp_mst.c +++ b/drivers/gpu/drm/i915/display/intel_dp_mst.c @@ -128,7 +128,15 @@ static int intel_dp_mst_compute_config(struct intel_encoder *encoder, limits.max_lane_count = intel_dp_max_lane_count(intel_dp); limits.min_bpp = intel_dp_min_bpp(pipe_config); - limits.max_bpp = pipe_config->pipe_bpp; + /* + * FIXME: If all the streams can't fit into the link with + * their current pipe_bpp we should reduce pipe_bpp across + * the board until things start to fit. Until then we + * limit to <= 8bpc since that's what was hardcoded for all + * MST streams previously. This hack should be removed once + * we have the proper retry logic in place. + */ + limits.max_bpp = min(pipe_config->pipe_bpp, 24); intel_dp_adjust_compliance_config(intel_dp, pipe_config, &limits); -- cgit v1.2.3 From 66a990dd0c49b534b2396934c1659b3fef34233d Mon Sep 17 00:00:00 2001 From: Ville Syrjälä Date: Fri, 30 Aug 2019 21:27:19 +0300 Subject: drm/i915: Prefer encoder->name over port_name() MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit enum port is a mess now because it no longer matches the spec at all. Let's start to dig ourselves out of this hole by reducing our reliance on port_name(). This should at least make a bunch of debug messages a bit more sensible while we think how to fill the the hole properly. Based on the following cocci script with a lot of manual cleanup (all the format strings etc.): @@ expression E; @@ ( - port_name(E->port) + E->base.base.id, E->base.name | - port_name(E.port) + E.base.base.id, E.base.name ) @@ enum port P; expression E; @@ P = E->port <... - port_name(P) + E->base.base.id, E->base.name ...> @@ enum port P; expression E; @@ P = E.port <... - port_name(P) + E.base.base.id, E.base.name ...> @@ expression E; @@ { - enum port P = E; ... when != P } Signed-off-by: Ville Syrjälä Link: https://patchwork.freedesktop.org/patch/msgid/20190830182719.32608-1-ville.syrjala@linux.intel.com Reviewed-by: Chris Wilson --- drivers/gpu/drm/i915/display/intel_audio.c | 10 ++- drivers/gpu/drm/i915/display/intel_ddi.c | 19 ++-- drivers/gpu/drm/i915/display/intel_display.c | 4 +- drivers/gpu/drm/i915/display/intel_dp.c | 122 +++++++++++++++----------- drivers/gpu/drm/i915/display/intel_dpll_mgr.c | 4 +- drivers/gpu/drm/i915/display/intel_hdmi.c | 9 +- drivers/gpu/drm/i915/display/intel_hotplug.c | 3 +- drivers/gpu/drm/i915/i915_debugfs.c | 5 +- 8 files changed, 102 insertions(+), 74 deletions(-) diff --git a/drivers/gpu/drm/i915/display/intel_audio.c b/drivers/gpu/drm/i915/display/intel_audio.c index ddcccf4408c3..aac089c79ceb 100644 --- a/drivers/gpu/drm/i915/display/intel_audio.c +++ b/drivers/gpu/drm/i915/display/intel_audio.c @@ -560,8 +560,9 @@ static void ilk_audio_codec_disable(struct intel_encoder *encoder, u32 tmp, eldv; i915_reg_t aud_config, aud_cntrl_st2; - DRM_DEBUG_KMS("Disable audio codec on port %c, pipe %c\n", - port_name(port), pipe_name(pipe)); + DRM_DEBUG_KMS("Disable audio codec on [ENCODER:%d:%s], pipe %c\n", + encoder->base.base.id, encoder->base.name, + pipe_name(pipe)); if (WARN_ON(port == PORT_A)) return; @@ -609,8 +610,9 @@ static void ilk_audio_codec_enable(struct intel_encoder *encoder, int len, i; i915_reg_t hdmiw_hdmiedid, aud_config, aud_cntl_st, aud_cntrl_st2; - DRM_DEBUG_KMS("Enable audio codec on port %c, pipe %c, %u bytes ELD\n", - port_name(port), pipe_name(pipe), drm_eld_size(eld)); + DRM_DEBUG_KMS("Enable audio codec on [ENCODER:%d:%s], pipe %c, %u bytes ELD\n", + encoder->base.base.id, encoder->base.name, + pipe_name(pipe), drm_eld_size(eld)); if (WARN_ON(port == PORT_A)) return; diff --git a/drivers/gpu/drm/i915/display/intel_ddi.c b/drivers/gpu/drm/i915/display/intel_ddi.c index 3180dacb5be4..1fe0bf01e580 100644 --- a/drivers/gpu/drm/i915/display/intel_ddi.c +++ b/drivers/gpu/drm/i915/display/intel_ddi.c @@ -2080,18 +2080,20 @@ static void intel_ddi_get_encoder_pipes(struct intel_encoder *encoder, } if (!*pipe_mask) - DRM_DEBUG_KMS("No pipe for ddi port %c found\n", - port_name(port)); + DRM_DEBUG_KMS("No pipe for [ENCODER:%d:%s] found\n", + encoder->base.base.id, encoder->base.name); if (!mst_pipe_mask && hweight8(*pipe_mask) > 1) { - DRM_DEBUG_KMS("Multiple pipes for non DP-MST port %c (pipe_mask %02x)\n", - port_name(port), *pipe_mask); + DRM_DEBUG_KMS("Multiple pipes for [ENCODER:%d:%s] (pipe_mask %02x)\n", + encoder->base.base.id, encoder->base.name, + *pipe_mask); *pipe_mask = BIT(ffs(*pipe_mask) - 1); } if (mst_pipe_mask && mst_pipe_mask != *pipe_mask) - DRM_DEBUG_KMS("Conflicting MST and non-MST encoders for port %c (pipe_mask %02x mst_pipe_mask %02x)\n", - port_name(port), *pipe_mask, mst_pipe_mask); + DRM_DEBUG_KMS("Conflicting MST and non-MST state for [ENCODER:%d:%s] (pipe_mask %02x mst_pipe_mask %02x)\n", + encoder->base.base.id, encoder->base.name, + *pipe_mask, mst_pipe_mask); else *is_dp_mst = mst_pipe_mask; @@ -2101,8 +2103,9 @@ out: if ((tmp & (BXT_PHY_CMNLANE_POWERDOWN_ACK | BXT_PHY_LANE_POWERDOWN_ACK | BXT_PHY_LANE_ENABLED)) != BXT_PHY_LANE_ENABLED) - DRM_ERROR("Port %c enabled but PHY powered down? " - "(PHY_CTL %08x)\n", port_name(port), tmp); + DRM_ERROR("[ENCODER:%d:%s] enabled but PHY powered down? " + "(PHY_CTL %08x)\n", encoder->base.base.id, + encoder->base.name, tmp); } intel_display_power_put(dev_priv, encoder->power_domain, wakeref); diff --git a/drivers/gpu/drm/i915/display/intel_display.c b/drivers/gpu/drm/i915/display/intel_display.c index e661e2099118..a03dc6eb61f8 100644 --- a/drivers/gpu/drm/i915/display/intel_display.c +++ b/drivers/gpu/drm/i915/display/intel_display.c @@ -1612,8 +1612,8 @@ void vlv_wait_port_ready(struct drm_i915_private *dev_priv, if (intel_de_wait_for_register(dev_priv, dpll_reg, port_mask, expected_mask, 1000)) - WARN(1, "timed out waiting for port %c ready: got 0x%x, expected 0x%x\n", - port_name(dport->base.port), + WARN(1, "timed out waiting for [ENCODER:%d:%s] port ready: got 0x%x, expected 0x%x\n", + dport->base.base.base.id, dport->base.base.name, I915_READ(dpll_reg) & port_mask, expected_mask); } diff --git a/drivers/gpu/drm/i915/display/intel_dp.c b/drivers/gpu/drm/i915/display/intel_dp.c index aaa90992a7f0..4e3e696e7fc8 100644 --- a/drivers/gpu/drm/i915/display/intel_dp.c +++ b/drivers/gpu/drm/i915/display/intel_dp.c @@ -639,12 +639,14 @@ vlv_power_sequencer_kick(struct intel_dp *intel_dp) u32 DP; if (WARN(I915_READ(intel_dp->output_reg) & DP_PORT_EN, - "skipping pipe %c power sequencer kick due to port %c being active\n", - pipe_name(pipe), port_name(intel_dig_port->base.port))) + "skipping pipe %c power sequencer kick due to [ENCODER:%d:%s] being active\n", + pipe_name(pipe), intel_dig_port->base.base.base.id, + intel_dig_port->base.base.name)) return; - DRM_DEBUG_KMS("kicking pipe %c power sequencer for port %c\n", - pipe_name(pipe), port_name(intel_dig_port->base.port)); + DRM_DEBUG_KMS("kicking pipe %c power sequencer for [ENCODER:%d:%s]\n", + pipe_name(pipe), intel_dig_port->base.base.base.id, + intel_dig_port->base.base.name); /* Preserve the BIOS-computed detected bit. This is * supposed to be read-only. @@ -762,9 +764,10 @@ vlv_power_sequencer_pipe(struct intel_dp *intel_dp) vlv_steal_power_sequencer(dev_priv, pipe); intel_dp->pps_pipe = pipe; - DRM_DEBUG_KMS("picked pipe %c power sequencer for port %c\n", + DRM_DEBUG_KMS("picked pipe %c power sequencer for [ENCODER:%d:%s]\n", pipe_name(intel_dp->pps_pipe), - port_name(intel_dig_port->base.port)); + intel_dig_port->base.base.base.id, + intel_dig_port->base.base.name); /* init power sequencer on this pipe and port */ intel_dp_init_panel_power_sequencer(intel_dp); @@ -872,13 +875,16 @@ vlv_initial_power_sequencer_setup(struct intel_dp *intel_dp) /* didn't find one? just let vlv_power_sequencer_pipe() pick one when needed */ if (intel_dp->pps_pipe == INVALID_PIPE) { - DRM_DEBUG_KMS("no initial power sequencer for port %c\n", - port_name(port)); + DRM_DEBUG_KMS("no initial power sequencer for [ENCODER:%d:%s]\n", + intel_dig_port->base.base.base.id, + intel_dig_port->base.base.name); return; } - DRM_DEBUG_KMS("initial power sequencer for port %c: pipe %c\n", - port_name(port), pipe_name(intel_dp->pps_pipe)); + DRM_DEBUG_KMS("initial power sequencer for [ENCODER:%d:%s]: pipe %c\n", + intel_dig_port->base.base.base.id, + intel_dig_port->base.base.name, + pipe_name(intel_dp->pps_pipe)); intel_dp_init_panel_power_sequencer(intel_dp); intel_dp_init_panel_power_sequencer_registers(intel_dp, false); @@ -2492,8 +2498,9 @@ static bool edp_panel_vdd_on(struct intel_dp *intel_dp) intel_display_power_get(dev_priv, intel_aux_power_domain(intel_dig_port)); - DRM_DEBUG_KMS("Turning eDP port %c VDD on\n", - port_name(intel_dig_port->base.port)); + DRM_DEBUG_KMS("Turning [ENCODER:%d:%s] VDD on\n", + intel_dig_port->base.base.base.id, + intel_dig_port->base.base.name); if (!edp_have_panel_power(intel_dp)) wait_panel_power_cycle(intel_dp); @@ -2512,8 +2519,9 @@ static bool edp_panel_vdd_on(struct intel_dp *intel_dp) * If the panel wasn't on, delay before accessing aux channel */ if (!edp_have_panel_power(intel_dp)) { - DRM_DEBUG_KMS("eDP port %c panel power wasn't enabled\n", - port_name(intel_dig_port->base.port)); + DRM_DEBUG_KMS("[ENCODER:%d:%s] panel power wasn't enabled\n", + intel_dig_port->base.base.base.id, + intel_dig_port->base.base.name); msleep(intel_dp->panel_power_up_delay); } @@ -2538,8 +2546,9 @@ void intel_edp_panel_vdd_on(struct intel_dp *intel_dp) vdd = false; with_pps_lock(intel_dp, wakeref) vdd = edp_panel_vdd_on(intel_dp); - I915_STATE_WARN(!vdd, "eDP port %c VDD already requested on\n", - port_name(dp_to_dig_port(intel_dp)->base.port)); + I915_STATE_WARN(!vdd, "[ENCODER:%d:%s] VDD already requested on\n", + dp_to_dig_port(intel_dp)->base.base.base.id, + dp_to_dig_port(intel_dp)->base.base.name); } static void edp_panel_vdd_off_sync(struct intel_dp *intel_dp) @@ -2557,8 +2566,9 @@ static void edp_panel_vdd_off_sync(struct intel_dp *intel_dp) if (!edp_have_panel_vdd(intel_dp)) return; - DRM_DEBUG_KMS("Turning eDP port %c VDD off\n", - port_name(intel_dig_port->base.port)); + DRM_DEBUG_KMS("Turning [ENCODER:%d:%s] VDD off\n", + intel_dig_port->base.base.base.id, + intel_dig_port->base.base.name); pp = ironlake_get_pp_control(intel_dp); pp &= ~EDP_FORCE_VDD; @@ -2620,8 +2630,9 @@ static void edp_panel_vdd_off(struct intel_dp *intel_dp, bool sync) if (!intel_dp_is_edp(intel_dp)) return; - I915_STATE_WARN(!intel_dp->want_panel_vdd, "eDP port %c VDD not forced on", - port_name(dp_to_dig_port(intel_dp)->base.port)); + I915_STATE_WARN(!intel_dp->want_panel_vdd, "[ENCODER:%d:%s] VDD not forced on", + dp_to_dig_port(intel_dp)->base.base.base.id, + dp_to_dig_port(intel_dp)->base.base.name); intel_dp->want_panel_vdd = false; @@ -2642,12 +2653,14 @@ static void edp_panel_on(struct intel_dp *intel_dp) if (!intel_dp_is_edp(intel_dp)) return; - DRM_DEBUG_KMS("Turn eDP port %c panel power on\n", - port_name(dp_to_dig_port(intel_dp)->base.port)); + DRM_DEBUG_KMS("Turn [ENCODER:%d:%s] panel power on\n", + dp_to_dig_port(intel_dp)->base.base.base.id, + dp_to_dig_port(intel_dp)->base.base.name); if (WARN(edp_have_panel_power(intel_dp), - "eDP port %c panel power already on\n", - port_name(dp_to_dig_port(intel_dp)->base.port))) + "[ENCODER:%d:%s] panel power already on\n", + dp_to_dig_port(intel_dp)->base.base.base.id, + dp_to_dig_port(intel_dp)->base.base.name)) return; wait_panel_power_cycle(intel_dp); @@ -2702,11 +2715,11 @@ static void edp_panel_off(struct intel_dp *intel_dp) if (!intel_dp_is_edp(intel_dp)) return; - DRM_DEBUG_KMS("Turn eDP port %c panel power off\n", - port_name(dig_port->base.port)); + DRM_DEBUG_KMS("Turn [ENCODER:%d:%s] panel power off\n", + dig_port->base.base.base.id, dig_port->base.base.name); - WARN(!intel_dp->want_panel_vdd, "Need eDP port %c VDD to turn off panel\n", - port_name(dig_port->base.port)); + WARN(!intel_dp->want_panel_vdd, "Need [ENCODER:%d:%s] VDD to turn off panel\n", + dig_port->base.base.base.id, dig_port->base.base.name); pp = ironlake_get_pp_control(intel_dp); /* We need to switch off panel power _and_ force vdd, for otherwise some @@ -2851,8 +2864,8 @@ static void assert_dp_port(struct intel_dp *intel_dp, bool state) bool cur_state = I915_READ(intel_dp->output_reg) & DP_PORT_EN; I915_STATE_WARN(cur_state != state, - "DP port %c state assertion failure (expected %s, current %s)\n", - port_name(dig_port->base.port), + "[ENCODER:%d:%s] state assertion failure (expected %s, current %s)\n", + dig_port->base.base.base.id, dig_port->base.base.name, onoff(state), onoff(cur_state)); } #define assert_dp_port_disabled(d) assert_dp_port((d), false) @@ -3430,8 +3443,9 @@ static void vlv_detach_power_sequencer(struct intel_dp *intel_dp) * port select always when logically disconnecting a power sequencer * from a port. */ - DRM_DEBUG_KMS("detaching pipe %c power sequencer from port %c\n", - pipe_name(pipe), port_name(intel_dig_port->base.port)); + DRM_DEBUG_KMS("detaching pipe %c power sequencer from [ENCODER:%d:%s]\n", + pipe_name(pipe), intel_dig_port->base.base.base.id, + intel_dig_port->base.base.name); I915_WRITE(pp_on_reg, 0); POSTING_READ(pp_on_reg); @@ -3447,17 +3461,18 @@ static void vlv_steal_power_sequencer(struct drm_i915_private *dev_priv, for_each_intel_dp(&dev_priv->drm, encoder) { struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base); - enum port port = encoder->port; WARN(intel_dp->active_pipe == pipe, - "stealing pipe %c power sequencer from active (e)DP port %c\n", - pipe_name(pipe), port_name(port)); + "stealing pipe %c power sequencer from active [ENCODER:%d:%s]\n", + pipe_name(pipe), encoder->base.base.id, + encoder->base.name); if (intel_dp->pps_pipe != pipe) continue; - DRM_DEBUG_KMS("stealing pipe %c power sequencer from port %c\n", - pipe_name(pipe), port_name(port)); + DRM_DEBUG_KMS("stealing pipe %c power sequencer from [ENCODER:%d:%s]\n", + pipe_name(pipe), encoder->base.base.id, + encoder->base.name); /* make sure vdd is off before we steal it */ vlv_detach_power_sequencer(intel_dp); @@ -3499,8 +3514,9 @@ static void vlv_init_panel_power_sequencer(struct intel_encoder *encoder, /* now it's all ours */ intel_dp->pps_pipe = crtc->pipe; - DRM_DEBUG_KMS("initializing pipe %c power sequencer for port %c\n", - pipe_name(intel_dp->pps_pipe), port_name(encoder->port)); + DRM_DEBUG_KMS("initializing pipe %c power sequencer for [ENCODER:%d:%s]\n", + pipe_name(intel_dp->pps_pipe), encoder->base.base.id, + encoder->base.name); /* init power sequencer on this pipe and port */ intel_dp_init_panel_power_sequencer(intel_dp); @@ -4321,9 +4337,10 @@ intel_dp_configure_mst(struct intel_dp *intel_dp) &dp_to_dig_port(intel_dp)->base; bool sink_can_mst = intel_dp_sink_can_mst(intel_dp); - DRM_DEBUG_KMS("MST support? port %c: %s, sink: %s, modparam: %s\n", - port_name(encoder->port), yesno(intel_dp->can_mst), - yesno(sink_can_mst), yesno(i915_modparams.enable_dp_mst)); + DRM_DEBUG_KMS("[ENCODER:%d:%s] MST support? port: %s, sink: %s, modparam: %s\n", + encoder->base.base.id, encoder->base.name, + yesno(intel_dp->can_mst), yesno(sink_can_mst), + yesno(i915_modparams.enable_dp_mst)); if (!intel_dp->can_mst) return; @@ -6284,13 +6301,15 @@ intel_dp_hpd_pulse(struct intel_digital_port *intel_dig_port, bool long_hpd) * would end up in an endless cycle of * "vdd off -> long hpd -> vdd on -> detect -> vdd off -> ..." */ - DRM_DEBUG_KMS("ignoring long hpd on eDP port %c\n", - port_name(intel_dig_port->base.port)); + DRM_DEBUG_KMS("ignoring long hpd on eDP [ENCODER:%d:%s]\n", + intel_dig_port->base.base.base.id, + intel_dig_port->base.base.name); return IRQ_HANDLED; } - DRM_DEBUG_KMS("got hpd irq on port %c - %s\n", - port_name(intel_dig_port->base.port), + DRM_DEBUG_KMS("got hpd irq on [ENCODER:%d:%s] - %s\n", + intel_dig_port->base.base.base.id, + intel_dig_port->base.base.name, long_hpd ? "long" : "short"); if (long_hpd) { @@ -7154,8 +7173,9 @@ intel_dp_init_connector(struct intel_digital_port *intel_dig_port, intel_dp_modeset_retry_work_fn); if (WARN(intel_dig_port->max_lanes < 1, - "Not enough lanes (%d) for DP on port %c\n", - intel_dig_port->max_lanes, port_name(port))) + "Not enough lanes (%d) for DP on [ENCODER:%d:%s]\n", + intel_dig_port->max_lanes, intel_encoder->base.base.id, + intel_encoder->base.name)) return false; intel_dp_set_source_rates(intel_dp); @@ -7196,9 +7216,9 @@ intel_dp_init_connector(struct intel_digital_port *intel_dig_port, port != PORT_B && port != PORT_C)) return false; - DRM_DEBUG_KMS("Adding %s connector on port %c\n", - type == DRM_MODE_CONNECTOR_eDP ? "eDP" : "DP", - port_name(port)); + DRM_DEBUG_KMS("Adding %s connector on [ENCODER:%d:%s]\n", + type == DRM_MODE_CONNECTOR_eDP ? "eDP" : "DP", + intel_encoder->base.base.id, intel_encoder->base.name); drm_connector_init(dev, connector, &intel_dp_connector_funcs, type); drm_connector_helper_add(connector, &intel_dp_connector_helper_funcs); diff --git a/drivers/gpu/drm/i915/display/intel_dpll_mgr.c b/drivers/gpu/drm/i915/display/intel_dpll_mgr.c index b8148f838354..98288edf88f0 100644 --- a/drivers/gpu/drm/i915/display/intel_dpll_mgr.c +++ b/drivers/gpu/drm/i915/display/intel_dpll_mgr.c @@ -2910,8 +2910,8 @@ static bool icl_get_combo_phy_dpll(struct intel_atomic_state *state, has_dpll4 ? DPLL_ID_EHL_DPLL4 : DPLL_ID_ICL_DPLL1); if (!port_dpll->pll) { - DRM_DEBUG_KMS("No combo PHY PLL found for port %c\n", - port_name(encoder->port)); + DRM_DEBUG_KMS("No combo PHY PLL found for [ENCODER:%d:%s]\n", + encoder->base.base.id, encoder->base.name); return false; } diff --git a/drivers/gpu/drm/i915/display/intel_hdmi.c b/drivers/gpu/drm/i915/display/intel_hdmi.c index f6bb77fd0930..d1e26a00c642 100644 --- a/drivers/gpu/drm/i915/display/intel_hdmi.c +++ b/drivers/gpu/drm/i915/display/intel_hdmi.c @@ -3075,12 +3075,13 @@ void intel_hdmi_init_connector(struct intel_digital_port *intel_dig_port, struct drm_i915_private *dev_priv = to_i915(dev); enum port port = intel_encoder->port; - DRM_DEBUG_KMS("Adding HDMI connector on port %c\n", - port_name(port)); + DRM_DEBUG_KMS("Adding HDMI connector on [ENCODER:%d:%s]\n", + intel_encoder->base.base.id, intel_encoder->base.name); if (WARN(intel_dig_port->max_lanes < 4, - "Not enough lanes (%d) for HDMI on port %c\n", - intel_dig_port->max_lanes, port_name(port))) + "Not enough lanes (%d) for HDMI on [ENCODER:%d:%s]\n", + intel_dig_port->max_lanes, intel_encoder->base.base.id, + intel_encoder->base.name)) return; drm_connector_init(dev, connector, &intel_hdmi_connector_funcs, diff --git a/drivers/gpu/drm/i915/display/intel_hotplug.c b/drivers/gpu/drm/i915/display/intel_hotplug.c index 56be20f6f47e..fc29046d48ea 100644 --- a/drivers/gpu/drm/i915/display/intel_hotplug.c +++ b/drivers/gpu/drm/i915/display/intel_hotplug.c @@ -481,7 +481,8 @@ void intel_hpd_irq_handler(struct drm_i915_private *dev_priv, long_hpd = long_mask & BIT(pin); - DRM_DEBUG_DRIVER("digital hpd port %c - %s\n", port_name(port), + DRM_DEBUG_DRIVER("digital hpd on [ENCODER:%d:%s] - %s\n", + encoder->base.base.id, encoder->base.name, long_hpd ? "long" : "short"); queue_dig = true; diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c index 5e81c4fc13ae..974936917329 100644 --- a/drivers/gpu/drm/i915/i915_debugfs.c +++ b/drivers/gpu/drm/i915/i915_debugfs.c @@ -3114,8 +3114,9 @@ static int i915_dp_mst_info(struct seq_file *m, void *unused) if (!intel_dig_port->dp.can_mst) continue; - seq_printf(m, "MST Source Port %c\n", - port_name(intel_dig_port->base.port)); + seq_printf(m, "MST Source Port [ENCODER:%d:%s]\n", + intel_dig_port->base.base.base.id, + intel_dig_port->base.base.name); drm_dp_mst_dump_topology(m, &intel_dig_port->dp.mst_mgr); } drm_connector_list_iter_end(&conn_iter); -- cgit v1.2.3 From 9e362992ff34cab695e669ef1e3f4f65463087e5 Mon Sep 17 00:00:00 2001 From: Ville Syrjälä Date: Wed, 28 Aug 2019 21:34:24 +0300 Subject: drm/i915: Clean up HDMI deep color handling a bit MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Reogranize the HDMI deep color state computation to just loop over possible bpc values. Avoids having to maintain so many variants of the clock etc. The current code also looks confused w.r.t. port_clock vs. bw_constrained. It would happily update port_clock for deep color but then not actually enable deep color due to bw_constrained being set. The new logic handles that case correctly. v2: Pull stuff into separate funcs (Jani) Reviewed-by: Jani Nikula Signed-off-by: Ville Syrjälä Link: https://patchwork.freedesktop.org/patch/msgid/20190828183424.7856-1-ville.syrjala@linux.intel.com --- drivers/gpu/drm/i915/display/intel_hdmi.c | 134 +++++++++++++++++------------- 1 file changed, 77 insertions(+), 57 deletions(-) diff --git a/drivers/gpu/drm/i915/display/intel_hdmi.c b/drivers/gpu/drm/i915/display/intel_hdmi.c index d1e26a00c642..640254feef27 100644 --- a/drivers/gpu/drm/i915/display/intel_hdmi.c +++ b/drivers/gpu/drm/i915/display/intel_hdmi.c @@ -2265,9 +2265,7 @@ static bool hdmi_deep_color_possible(const struct intel_crtc_state *crtc_state, static bool intel_hdmi_ycbcr420_config(struct drm_connector *connector, - struct intel_crtc_state *config, - int *clock_12bpc, int *clock_10bpc, - int *clock_8bpc) + struct intel_crtc_state *config) { struct intel_crtc *intel_crtc = to_intel_crtc(config->base.crtc); @@ -2276,11 +2274,6 @@ intel_hdmi_ycbcr420_config(struct drm_connector *connector, return false; } - /* YCBCR420 TMDS rate requirement is half the pixel clock */ - config->port_clock /= 2; - *clock_12bpc /= 2; - *clock_10bpc /= 2; - *clock_8bpc /= 2; config->output_format = INTEL_OUTPUT_FORMAT_YCBCR420; /* YCBCR 420 output conversion needs a scaler */ @@ -2295,6 +2288,76 @@ intel_hdmi_ycbcr420_config(struct drm_connector *connector, return true; } +static int intel_hdmi_port_clock(int clock, int bpc) +{ + /* + * Need to adjust the port link by: + * 1.5x for 12bpc + * 1.25x for 10bpc + */ + return clock * bpc / 8; +} + +static int intel_hdmi_compute_bpc(struct intel_encoder *encoder, + struct intel_crtc_state *crtc_state, + int clock, bool force_dvi) +{ + struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(&encoder->base); + int bpc; + + for (bpc = 12; bpc >= 10; bpc -= 2) { + if (hdmi_deep_color_possible(crtc_state, bpc) && + hdmi_port_clock_valid(intel_hdmi, + intel_hdmi_port_clock(clock, bpc), + true, force_dvi) == MODE_OK) + return bpc; + } + + return 8; +} + +static int intel_hdmi_compute_clock(struct intel_encoder *encoder, + struct intel_crtc_state *crtc_state, + bool force_dvi) +{ + struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(&encoder->base); + const struct drm_display_mode *adjusted_mode = + &crtc_state->base.adjusted_mode; + int bpc, clock = adjusted_mode->crtc_clock; + + if (adjusted_mode->flags & DRM_MODE_FLAG_DBLCLK) + clock *= 2; + + /* YCBCR420 TMDS rate requirement is half the pixel clock */ + if (crtc_state->output_format == INTEL_OUTPUT_FORMAT_YCBCR420) + clock /= 2; + + bpc = intel_hdmi_compute_bpc(encoder, crtc_state, + clock, force_dvi); + + crtc_state->port_clock = intel_hdmi_port_clock(clock, bpc); + + /* + * pipe_bpp could already be below 8bpc due to + * FDI bandwidth constraints. We shouldn't bump it + * back up to 8bpc in that case. + */ + if (crtc_state->pipe_bpp > bpc * 3) + crtc_state->pipe_bpp = bpc * 3; + + DRM_DEBUG_KMS("picking %d bpc for HDMI output (pipe bpp: %d)\n", + bpc, crtc_state->pipe_bpp); + + if (hdmi_port_clock_valid(intel_hdmi, crtc_state->port_clock, + false, force_dvi) != MODE_OK) { + DRM_DEBUG_KMS("unsupported HDMI clock (%d kHz), rejecting mode\n", + crtc_state->port_clock); + return -EINVAL; + } + + return 0; +} + int intel_hdmi_compute_config(struct intel_encoder *encoder, struct intel_crtc_state *pipe_config, struct drm_connector_state *conn_state) @@ -2306,11 +2369,8 @@ int intel_hdmi_compute_config(struct intel_encoder *encoder, struct drm_scdc *scdc = &connector->display_info.hdmi.scdc; struct intel_digital_connector_state *intel_conn_state = to_intel_digital_connector_state(conn_state); - int clock_8bpc = pipe_config->base.adjusted_mode.crtc_clock; - int clock_10bpc = clock_8bpc * 5 / 4; - int clock_12bpc = clock_8bpc * 3 / 2; - int desired_bpp; bool force_dvi = intel_conn_state->force_audio == HDMI_AUDIO_OFF_DVI; + int ret; if (adjusted_mode->flags & DRM_MODE_FLAG_DBLSCAN) return -EINVAL; @@ -2332,17 +2392,11 @@ int intel_hdmi_compute_config(struct intel_encoder *encoder, intel_conn_state->broadcast_rgb == INTEL_BROADCAST_RGB_LIMITED; } - if (adjusted_mode->flags & DRM_MODE_FLAG_DBLCLK) { + if (adjusted_mode->flags & DRM_MODE_FLAG_DBLCLK) pipe_config->pixel_multiplier = 2; - clock_8bpc *= 2; - clock_10bpc *= 2; - clock_12bpc *= 2; - } if (drm_mode_is_420_only(&connector->display_info, adjusted_mode)) { - if (!intel_hdmi_ycbcr420_config(connector, pipe_config, - &clock_12bpc, &clock_10bpc, - &clock_8bpc)) { + if (!intel_hdmi_ycbcr420_config(connector, pipe_config)) { DRM_ERROR("Can't support YCBCR420 output\n"); return -EINVAL; } @@ -2359,43 +2413,9 @@ int intel_hdmi_compute_config(struct intel_encoder *encoder, intel_conn_state->force_audio == HDMI_AUDIO_ON; } - /* - * Note that g4x/vlv don't support 12bpc hdmi outputs. We also need - * to check that the higher clock still fits within limits. - */ - if (hdmi_deep_color_possible(pipe_config, 12) && - hdmi_port_clock_valid(intel_hdmi, clock_12bpc, - true, force_dvi) == MODE_OK) { - DRM_DEBUG_KMS("picking bpc to 12 for HDMI output\n"); - desired_bpp = 12*3; - - /* Need to adjust the port link by 1.5x for 12bpc. */ - pipe_config->port_clock = clock_12bpc; - } else if (hdmi_deep_color_possible(pipe_config, 10) && - hdmi_port_clock_valid(intel_hdmi, clock_10bpc, - true, force_dvi) == MODE_OK) { - DRM_DEBUG_KMS("picking bpc to 10 for HDMI output\n"); - desired_bpp = 10 * 3; - - /* Need to adjust the port link by 1.25x for 10bpc. */ - pipe_config->port_clock = clock_10bpc; - } else { - DRM_DEBUG_KMS("picking bpc to 8 for HDMI output\n"); - desired_bpp = 8*3; - - pipe_config->port_clock = clock_8bpc; - } - - if (!pipe_config->bw_constrained) { - DRM_DEBUG_KMS("forcing pipe bpp to %i for HDMI\n", desired_bpp); - pipe_config->pipe_bpp = desired_bpp; - } - - if (hdmi_port_clock_valid(intel_hdmi, pipe_config->port_clock, - false, force_dvi) != MODE_OK) { - DRM_DEBUG_KMS("unsupported HDMI clock, rejecting mode\n"); - return -EINVAL; - } + ret = intel_hdmi_compute_clock(encoder, pipe_config, force_dvi); + if (ret) + return ret; /* Set user selected PAR to incoming mode's member */ adjusted_mode->picture_aspect_ratio = conn_state->picture_aspect_ratio; -- cgit v1.2.3 From 8f5e2b306b4e26db0ec65e0a27b232b179c9122b Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Mon, 2 Sep 2019 05:02:43 +0100 Subject: drm/i915: Restrict the aliasing-ppgtt to the size of the ggtt The aliasing-ppgtt is not allowed to be smaller than the ggtt, nor should we advertise it as being any bigger, or else we may get sued for false advertisement. Testcase: igt/gem_exec_big Fixes: 0b718ba1e884 ("drm/i915/gtt: Downgrade Cherryview back to aliasing-ppgtt") Signed-off-by: Chris Wilson Cc: Matthew Auld Reviewed-by: Matthew Auld Link: https://patchwork.freedesktop.org/patch/msgid/20190902040303.14195-1-chris@chris-wilson.co.uk --- drivers/gpu/drm/i915/i915_gem_gtt.c | 2 ++ 1 file changed, 2 insertions(+) diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c index ee51fd1a6207..906dc6fff383 100644 --- a/drivers/gpu/drm/i915/i915_gem_gtt.c +++ b/drivers/gpu/drm/i915/i915_gem_gtt.c @@ -2597,6 +2597,8 @@ static int init_aliasing_ppgtt(struct i915_ggtt *ggtt) GEM_BUG_ON(ggtt->vm.vma_ops.unbind_vma != ggtt_unbind_vma); ggtt->vm.vma_ops.unbind_vma = aliasing_gtt_unbind_vma; + ppgtt->vm.total = ggtt->vm.total; + return 0; err_ppgtt: -- cgit v1.2.3 From 4f36ef2ee1876b2e145327f03f675a0577f258cb Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Mon, 2 Sep 2019 05:02:44 +0100 Subject: drm/i915: Report aliasing ppgtt size as ggtt size The aliasing-ppgtt is constrained to be the same size as the Global GTT since it aliases the same address space. Simplifying gtt size reporting in this case. Signed-off-by: Chris Wilson Reviewed-by: Matthew Auld Link: https://patchwork.freedesktop.org/patch/msgid/20190902040303.14195-2-chris@chris-wilson.co.uk --- drivers/gpu/drm/i915/gem/i915_gem_context.c | 2 -- 1 file changed, 2 deletions(-) diff --git a/drivers/gpu/drm/i915/gem/i915_gem_context.c b/drivers/gpu/drm/i915/gem/i915_gem_context.c index b8969605f4e8..f1c0e5d958f3 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_context.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_context.c @@ -2231,8 +2231,6 @@ int i915_gem_context_getparam_ioctl(struct drm_device *dev, void *data, args->size = 0; if (ctx->vm) args->value = ctx->vm->total; - else if (to_i915(dev)->ggtt.alias) - args->value = to_i915(dev)->ggtt.alias->vm.total; else args->value = to_i915(dev)->ggtt.vm.total; break; -- cgit v1.2.3 From 5a90606df7cb73eceb46897a87154e94b31af93a Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Mon, 2 Sep 2019 05:02:47 +0100 Subject: drm/i915: Replace obj->pin_global with obj->frontbuffer MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit obj->pin_global was originally used as a means to keep the shrinker off the active scanout, but we use the vma->pin_count itself for that and the obj->frontbuffer to delay shrinking active framebuffers. The other role that obj->pin_global gained was for spotting display objects inside GEM and working harder to keep those coherent; for which we can again simply inspect obj->frontbuffer directly. Coming up next, we will want to manipulate the pin_global counter outside of the principle locks, so would need to make pin_global atomic. However, since obj->frontbuffer is already managed atomically, it makes sense to use that the primary key for display objects instead of having pin_global. Ville pointed out the principle difference is that obj->frontbuffer is set for as long as an intel_framebuffer is attached to an object, but obj->pin_global was only raised for as long as the object was active. In practice, this means that we consider the object as being on the scanout for longer than is strictly required, causing us to be more proactive in flushing -- though it should be true that we would have flushed eventually when the back became the front, except that on the flip path that flush is async but when hit from another ioctl it will be synchronous. v2: i915_gem_object_is_framebuffer() Signed-off-by: Chris Wilson Cc: Mika Kuoppala Cc: Ville Syrjälä Reviewed-by: Ville Syrjälä Link: https://patchwork.freedesktop.org/patch/msgid/20190902040303.14195-5-chris@chris-wilson.co.uk --- drivers/gpu/drm/i915/display/intel_display.c | 2 ++ drivers/gpu/drm/i915/display/intel_frontbuffer.c | 13 ++++++--- drivers/gpu/drm/i915/gem/i915_gem_domain.c | 34 +++++++----------------- drivers/gpu/drm/i915/gem/i915_gem_object.h | 3 ++- drivers/gpu/drm/i915/gem/i915_gem_object_types.h | 2 -- drivers/gpu/drm/i915/gem/i915_gem_shrinker.c | 15 +++-------- drivers/gpu/drm/i915/i915_debugfs.c | 12 +++------ 7 files changed, 31 insertions(+), 50 deletions(-) diff --git a/drivers/gpu/drm/i915/display/intel_display.c b/drivers/gpu/drm/i915/display/intel_display.c index a03dc6eb61f8..12a94c6e3d51 100644 --- a/drivers/gpu/drm/i915/display/intel_display.c +++ b/drivers/gpu/drm/i915/display/intel_display.c @@ -2080,6 +2080,8 @@ intel_pin_and_fence_fb_obj(struct drm_framebuffer *fb, u32 alignment; WARN_ON(!mutex_is_locked(&dev->struct_mutex)); + if (WARN_ON(!i915_gem_object_is_framebuffer(obj))) + return ERR_PTR(-EINVAL); alignment = intel_surf_alignment(fb, 0); diff --git a/drivers/gpu/drm/i915/display/intel_frontbuffer.c b/drivers/gpu/drm/i915/display/intel_frontbuffer.c index 719379774fa5..fc40dc1fdbcc 100644 --- a/drivers/gpu/drm/i915/display/intel_frontbuffer.c +++ b/drivers/gpu/drm/i915/display/intel_frontbuffer.c @@ -220,11 +220,18 @@ static void frontbuffer_release(struct kref *ref) { struct intel_frontbuffer *front = container_of(ref, typeof(*front), ref); + struct drm_i915_gem_object *obj = front->obj; + struct i915_vma *vma; - front->obj->frontbuffer = NULL; - spin_unlock(&to_i915(front->obj->base.dev)->fb_tracking.lock); + spin_lock(&obj->vma.lock); + for_each_ggtt_vma(vma, obj) + vma->display_alignment = I915_GTT_MIN_ALIGNMENT; + spin_unlock(&obj->vma.lock); - i915_gem_object_put(front->obj); + obj->frontbuffer = NULL; + spin_unlock(&to_i915(obj->base.dev)->fb_tracking.lock); + + i915_gem_object_put(obj); kfree(front); } diff --git a/drivers/gpu/drm/i915/gem/i915_gem_domain.c b/drivers/gpu/drm/i915/gem/i915_gem_domain.c index 9c58e8fac1d9..6af740a5e3db 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_domain.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_domain.c @@ -27,7 +27,7 @@ static void __i915_gem_object_flush_for_display(struct drm_i915_gem_object *obj) void i915_gem_object_flush_if_display(struct drm_i915_gem_object *obj) { - if (!READ_ONCE(obj->pin_global)) + if (!i915_gem_object_is_framebuffer(obj)) return; i915_gem_object_lock(obj); @@ -422,12 +422,8 @@ i915_gem_object_pin_to_display_plane(struct drm_i915_gem_object *obj, assert_object_held(obj); - /* Mark the global pin early so that we account for the - * display coherency whilst setting up the cache domains. - */ - obj->pin_global++; - - /* The display engine is not coherent with the LLC cache on gen6. As + /* + * The display engine is not coherent with the LLC cache on gen6. As * a result, we make sure that the pinning that is about to occur is * done with uncached PTEs. This is lowest common denominator for all * chipsets. @@ -439,12 +435,11 @@ i915_gem_object_pin_to_display_plane(struct drm_i915_gem_object *obj, ret = i915_gem_object_set_cache_level(obj, HAS_WT(to_i915(obj->base.dev)) ? I915_CACHE_WT : I915_CACHE_NONE); - if (ret) { - vma = ERR_PTR(ret); - goto err_unpin_global; - } + if (ret) + return ERR_PTR(ret); - /* As the user may map the buffer once pinned in the display plane + /* + * As the user may map the buffer once pinned in the display plane * (e.g. libkms for the bootup splash), we have to ensure that we * always use map_and_fenceable for all scanout buffers. However, * it may simply be too big to fit into mappable, in which case @@ -461,22 +456,19 @@ i915_gem_object_pin_to_display_plane(struct drm_i915_gem_object *obj, if (IS_ERR(vma)) vma = i915_gem_object_ggtt_pin(obj, view, 0, alignment, flags); if (IS_ERR(vma)) - goto err_unpin_global; + return vma; vma->display_alignment = max_t(u64, vma->display_alignment, alignment); __i915_gem_object_flush_for_display(obj); - /* It should now be out of any other write domains, and we can update + /* + * It should now be out of any other write domains, and we can update * the domain values for our changes. */ obj->read_domains |= I915_GEM_DOMAIN_GTT; return vma; - -err_unpin_global: - obj->pin_global--; - return vma; } static void i915_gem_object_bump_inactive_ggtt(struct drm_i915_gem_object *obj) @@ -514,12 +506,6 @@ i915_gem_object_unpin_from_display_plane(struct i915_vma *vma) assert_object_held(obj); - if (WARN_ON(obj->pin_global == 0)) - return; - - if (--obj->pin_global == 0) - vma->display_alignment = I915_GTT_MIN_ALIGNMENT; - /* Bump the LRU to try and avoid premature eviction whilst flipping */ i915_gem_object_bump_inactive_ggtt(obj); diff --git a/drivers/gpu/drm/i915/gem/i915_gem_object.h b/drivers/gpu/drm/i915/gem/i915_gem_object.h index 5efb9936e05b..29b9eddc4c7f 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_object.h +++ b/drivers/gpu/drm/i915/gem/i915_gem_object.h @@ -406,7 +406,8 @@ static inline bool cpu_write_needs_clflush(struct drm_i915_gem_object *obj) if (!(obj->cache_coherent & I915_BO_CACHE_COHERENT_FOR_WRITE)) return true; - return obj->pin_global; /* currently in use by HW, keep flushed */ + /* Currently in use by HW (display engine)? Keep flushed. */ + return i915_gem_object_is_framebuffer(obj); } static inline void __start_cpu_write(struct drm_i915_gem_object *obj) diff --git a/drivers/gpu/drm/i915/gem/i915_gem_object_types.h b/drivers/gpu/drm/i915/gem/i915_gem_object_types.h index ede0eb4218a8..13b9dc0e1a89 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_object_types.h +++ b/drivers/gpu/drm/i915/gem/i915_gem_object_types.h @@ -152,8 +152,6 @@ struct drm_i915_gem_object { /** Count of VMA actually bound by this object */ atomic_t bind_count; - /** Count of how many global VMA are currently pinned for use by HW */ - unsigned int pin_global; struct { struct mutex lock; /* protects the pages and their use */ diff --git a/drivers/gpu/drm/i915/gem/i915_gem_shrinker.c b/drivers/gpu/drm/i915/gem/i915_gem_shrinker.c index edd21d14e64f..4e55cfc2b0dc 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_shrinker.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_shrinker.c @@ -61,7 +61,8 @@ static bool can_release_pages(struct drm_i915_gem_object *obj) if (!i915_gem_object_is_shrinkable(obj)) return false; - /* Only report true if by unbinding the object and putting its pages + /* + * Only report true if by unbinding the object and putting its pages * we can actually make forward progress towards freeing physical * pages. * @@ -72,16 +73,8 @@ static bool can_release_pages(struct drm_i915_gem_object *obj) if (atomic_read(&obj->mm.pages_pin_count) > atomic_read(&obj->bind_count)) return false; - /* If any vma are "permanently" pinned, it will prevent us from - * reclaiming the obj->mm.pages. We only allow scanout objects to claim - * a permanent pin, along with a few others like the context objects. - * To simplify the scan, and to avoid walking the list of vma under the - * object, we just check the count of its permanently pinned. - */ - if (READ_ONCE(obj->pin_global)) - return false; - - /* We can only return physical pages to the system if we can either + /* + * We can only return physical pages to the system if we can either * discard the contents (because the user has marked them as being * purgeable) or if we can move their contents out to swap. */ diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c index 974936917329..9798f27a697a 100644 --- a/drivers/gpu/drm/i915/i915_debugfs.c +++ b/drivers/gpu/drm/i915/i915_debugfs.c @@ -77,11 +77,6 @@ static int i915_capabilities(struct seq_file *m, void *data) return 0; } -static char get_pin_flag(struct drm_i915_gem_object *obj) -{ - return obj->pin_global ? 'p' : ' '; -} - static char get_tiling_flag(struct drm_i915_gem_object *obj) { switch (i915_gem_object_get_tiling(obj)) { @@ -140,9 +135,8 @@ describe_obj(struct seq_file *m, struct drm_i915_gem_object *obj) struct i915_vma *vma; int pin_count = 0; - seq_printf(m, "%pK: %c%c%c%c %8zdKiB %02x %02x %s%s%s", + seq_printf(m, "%pK: %c%c%c %8zdKiB %02x %02x %s%s%s", &obj->base, - get_pin_flag(obj), get_tiling_flag(obj), get_global_flag(obj), get_pin_mapped_flag(obj), @@ -221,8 +215,8 @@ describe_obj(struct seq_file *m, struct drm_i915_gem_object *obj) seq_printf(m, " (pinned x %d)", pin_count); if (obj->stolen) seq_printf(m, " (stolen: %08llx)", obj->stolen->start); - if (obj->pin_global) - seq_printf(m, " (global)"); + if (i915_gem_object_is_framebuffer(obj)) + seq_printf(m, " (fb)"); engine = i915_gem_object_last_write_engine(obj); if (engine) -- cgit v1.2.3 From f2690074462bdb99530179f41ae73b45e765b761 Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Sun, 1 Sep 2019 12:04:31 +0100 Subject: drm/i915/selftests: Remove unused __engines_name() This function was never used and probably will never be used, so remove it. Signed-off-by: Chris Wilson Reviewed-by: Joonas Lahtinen Link: https://patchwork.freedesktop.org/patch/msgid/20190901110431.12393-1-chris@chris-wilson.co.uk --- drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c | 15 --------------- 1 file changed, 15 deletions(-) diff --git a/drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c b/drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c index da54a718c712..dc25bcc3e372 100644 --- a/drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c +++ b/drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c @@ -1546,21 +1546,6 @@ out_unlock: return err; } -static __maybe_unused const char * -__engine_name(struct drm_i915_private *i915, intel_engine_mask_t engines) -{ - struct intel_engine_cs *engine; - intel_engine_mask_t tmp; - - if (engines == ALL_ENGINES) - return "all"; - - for_each_engine_masked(engine, i915, engines, tmp) - return engine->name; - - return "none"; -} - static bool skip_unused_engines(struct intel_context *ce, void *data) { return !ce->state; -- cgit v1.2.3 From 8f9fb61caed13e282e1e3387e64905b90cc65abd Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Mon, 2 Sep 2019 05:02:46 +0100 Subject: drm/i915: Refresh the errno to vmf_fault translations It's been a long time since we accidentally reported -EIO upon wedging, it can now only be generated by failure to swap in a page. Signed-off-by: Chris Wilson Cc: Abdiel Janulgue Reviewed-by: Abdiel Janulgue Link: https://patchwork.freedesktop.org/patch/msgid/20190902040303.14195-4-chris@chris-wilson.co.uk --- drivers/gpu/drm/i915/gem/i915_gem_mman.c | 39 ++++++++++++-------------------- 1 file changed, 15 insertions(+), 24 deletions(-) diff --git a/drivers/gpu/drm/i915/gem/i915_gem_mman.c b/drivers/gpu/drm/i915/gem/i915_gem_mman.c index 261c9bd83f51..82db2b783123 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_mman.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_mman.c @@ -287,6 +287,9 @@ vm_fault_t i915_gem_fault(struct vm_fault *vmf) view.type = I915_GGTT_VIEW_PARTIAL; vma = i915_gem_object_ggtt_pin(obj, &view, 0, 0, flags); } + + /* The entire mappable GGTT is pinned? Unexpected! */ + GEM_BUG_ON(vma == ERR_PTR(-ENOSPC)); } if (IS_ERR(vma)) { ret = PTR_ERR(vma); @@ -333,23 +336,19 @@ err_rpm: i915_gem_object_unpin_pages(obj); err: switch (ret) { - case -EIO: - /* - * We eat errors when the gpu is terminally wedged to avoid - * userspace unduly crashing (gl has no provisions for mmaps to - * fail). But any other -EIO isn't ours (e.g. swap in failure) - * and so needs to be reported. - */ - if (!intel_gt_is_wedged(ggtt->vm.gt)) - return VM_FAULT_SIGBUS; - /* else, fall through */ - case -EAGAIN: - /* - * EAGAIN means the gpu is hung and we'll wait for the error - * handler to reset everything when re-faulting in - * i915_mutex_lock_interruptible. - */ + default: + WARN_ONCE(ret, "unhandled error in %s: %i\n", __func__, ret); + /* fallthrough */ + case -EIO: /* shmemfs failure from swap device */ + case -EFAULT: /* purged object */ + return VM_FAULT_SIGBUS; + + case -ENOSPC: /* shmemfs allocation failure */ + case -ENOMEM: /* our allocation failure */ + return VM_FAULT_OOM; + case 0: + case -EAGAIN: case -ERESTARTSYS: case -EINTR: case -EBUSY: @@ -358,14 +357,6 @@ err: * already did the job. */ return VM_FAULT_NOPAGE; - case -ENOMEM: - return VM_FAULT_OOM; - case -ENOSPC: - case -EFAULT: - return VM_FAULT_SIGBUS; - default: - WARN_ONCE(ret, "unhandled error in %s: %i\n", __func__, ret); - return VM_FAULT_SIGBUS; } } -- cgit v1.2.3 From b1a4383d1e6e6003fceba9aae14bda77bfe03dc3 Mon Sep 17 00:00:00 2001 From: Swati Sharma Date: Wed, 4 Sep 2019 00:52:52 +0530 Subject: drm/i915/display: Add debug log for color parameters Add debug log for color related parameters like gamma_mode, gamma_enable, csc_enable, etc inside intel_dump_pipe_config(). v6: -Added debug log for color para in intel_dump_pipe_config [Jani] v7: -Split patch 3 into 4 patches v8: -Corrected alignment [Uma] Signed-off-by: Swati Sharma Reviewed-by: Jani Nikula Signed-off-by: Jani Nikula Link: https://patchwork.freedesktop.org/patch/msgid/1567538578-4489-3-git-send-email-swati2.sharma@intel.com --- drivers/gpu/drm/i915/display/intel_display.c | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/drivers/gpu/drm/i915/display/intel_display.c b/drivers/gpu/drm/i915/display/intel_display.c index 12a94c6e3d51..540dffa0a601 100644 --- a/drivers/gpu/drm/i915/display/intel_display.c +++ b/drivers/gpu/drm/i915/display/intel_display.c @@ -12140,6 +12140,15 @@ static void intel_dump_pipe_config(const struct intel_crtc_state *pipe_config, intel_dpll_dump_hw_state(dev_priv, &pipe_config->dpll_hw_state); + if (IS_CHERRYVIEW(dev_priv)) + DRM_DEBUG_KMS("cgm_mode: 0x%x gamma_mode: 0x%x gamma_enable: %d csc_enable: %d\n", + pipe_config->cgm_mode, pipe_config->gamma_mode, + pipe_config->gamma_enable, pipe_config->csc_enable); + else + DRM_DEBUG_KMS("csc_mode: 0x%x gamma_mode: 0x%x gamma_enable: %d csc_enable: %d\n", + pipe_config->csc_mode, pipe_config->gamma_mode, + pipe_config->gamma_enable, pipe_config->csc_enable); + dump_planes: if (!state) return; -- cgit v1.2.3 From 145450f6a42df99523967a0da42bdb00dacb413b Mon Sep 17 00:00:00 2001 From: Swati Sharma Date: Wed, 4 Sep 2019 00:52:51 +0530 Subject: drm/i915/display: Add func to get gamma bit precision Each platform supports different gamma modes and each gamma mode has different bit precision. Here bit precision corresponds to number of bits the hw LUT supports. Add func per platform to return bit precision corresponding to gamma mode which will be later used as a parameter in lut comparison function intel_color_lut_equal(). This is done for legacy, ilk, glk and their variant platforms. v6: -Added func intel_color_get_bit_precision() to get bit precision for gamma and degamma lut readout depending upon platform and corresponding to load_luts() [Ankit] -Made patch11 as patch3 [Jani] v7: -Renamed func intel_color_get_bit_precision() to intel_color_get_gamma_bit_precision() -Added separate function/platform for gamma bit precision [Ville] -Corrected checkpatch warnings v8: -Split patch 3 into 4 separate patches v9: -Changed commit message, gave more info [Uma] -Added precision func for icl+ platform v10: -Removed precision func for chv and icl+ platforms [Jani] -Added gamma_enable check once [Jani] Signed-off-by: Swati Sharma Reviewed-by: Jani Nikula Signed-off-by: Jani Nikula Link: https://patchwork.freedesktop.org/patch/msgid/1567538578-4489-2-git-send-email-swati2.sharma@intel.com --- drivers/gpu/drm/i915/display/intel_color.c | 60 ++++++++++++++++++++++++++++++ drivers/gpu/drm/i915/display/intel_color.h | 1 + 2 files changed, 61 insertions(+) diff --git a/drivers/gpu/drm/i915/display/intel_color.c b/drivers/gpu/drm/i915/display/intel_color.c index 71a0201437a9..b5c0c6583568 100644 --- a/drivers/gpu/drm/i915/display/intel_color.c +++ b/drivers/gpu/drm/i915/display/intel_color.c @@ -1371,6 +1371,66 @@ static int icl_color_check(struct intel_crtc_state *crtc_state) return 0; } +static int i9xx_gamma_precision(const struct intel_crtc_state *crtc_state) +{ + switch (crtc_state->gamma_mode) { + case GAMMA_MODE_MODE_8BIT: + return 8; + case GAMMA_MODE_MODE_10BIT: + return 16; + default: + MISSING_CASE(crtc_state->gamma_mode); + return 0; + } +} + +static int ilk_gamma_precision(const struct intel_crtc_state *crtc_state) +{ + if ((crtc_state->csc_mode & CSC_POSITION_BEFORE_GAMMA) == 0) + return 0; + + switch (crtc_state->gamma_mode) { + case GAMMA_MODE_MODE_8BIT: + return 8; + case GAMMA_MODE_MODE_10BIT: + return 10; + default: + MISSING_CASE(crtc_state->gamma_mode); + return 0; + } +} + +static int glk_gamma_precision(const struct intel_crtc_state *crtc_state) +{ + switch (crtc_state->gamma_mode) { + case GAMMA_MODE_MODE_8BIT: + return 8; + case GAMMA_MODE_MODE_10BIT: + return 10; + default: + MISSING_CASE(crtc_state->gamma_mode); + return 0; + } +} + +int intel_color_get_gamma_bit_precision(const struct intel_crtc_state *crtc_state) +{ + struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc); + struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); + + if (!crtc_state->gamma_enable) + return 0; + + if (HAS_GMCH(dev_priv) && !IS_CHERRYVIEW(dev_priv)) + return i9xx_gamma_precision(crtc_state); + else if (IS_CANNONLAKE(dev_priv) || IS_GEMINILAKE(dev_priv)) + return glk_gamma_precision(crtc_state); + else if (IS_IRONLAKE(dev_priv)) + return ilk_gamma_precision(crtc_state); + + return 0; +} + void intel_color_init(struct intel_crtc *crtc) { struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); diff --git a/drivers/gpu/drm/i915/display/intel_color.h b/drivers/gpu/drm/i915/display/intel_color.h index 057e8ac63555..0226d3a784b6 100644 --- a/drivers/gpu/drm/i915/display/intel_color.h +++ b/drivers/gpu/drm/i915/display/intel_color.h @@ -14,5 +14,6 @@ int intel_color_check(struct intel_crtc_state *crtc_state); void intel_color_commit(const struct intel_crtc_state *crtc_state); void intel_color_load_luts(const struct intel_crtc_state *crtc_state); void intel_color_get_config(struct intel_crtc_state *crtc_state); +int intel_color_get_gamma_bit_precision(const struct intel_crtc_state *crtc_state); #endif /* __INTEL_COLOR_H__ */ -- cgit v1.2.3 From e9c8f591445d7b99883889caab3c5d1aafcee360 Mon Sep 17 00:00:00 2001 From: Swati Sharma Date: Wed, 4 Sep 2019 00:52:53 +0530 Subject: drm/i915/display: Add func to compare hw/sw gamma lut Add func intel_color_lut_equal() to compare hw/sw gamma lut values. Since hw/sw gamma lut sizes and lut entries comparison will be different for different gamma modes, add gamma mode dependent checks. v3: -Rebase v4: -Renamed intel_compare_color_lut() to intel_color_lut_equal() [Jani] -Added the default label above the correct label [Jani] -Corrected smatch warn "variable dereferenced before check" [Dan Carpenter] v5: -Added condition (!blob1 && !blob2) return true [Jani] v6: -Made patch11 as patch3 [Jani] v8: -Split patch 3 into 4 patches -Optimized blob check condition [Ville] v9: -Exclude spilt gamma mode (bdw and ivb platforms) as there is exception in way gamma values are written in hardware [Ville] -Added exception made in commit [Uma] -Dropped else, character limit and indentation [Uma] -Added multi segmented gama mode for icl+ platforms [Uma] v10: -Dropped multi segmented mode for icl+ platforms [Jani] -Removed references of sw and hw state in compare code [Jani] -Dropped inline from func [Jani] Signed-off-by: Swati Sharma Reviewed-by: Jani Nikula Signed-off-by: Jani Nikula Link: https://patchwork.freedesktop.org/patch/msgid/1567538578-4489-4-git-send-email-swati2.sharma@intel.com --- drivers/gpu/drm/i915/display/intel_color.c | 72 ++++++++++++++++++++++++++++++ drivers/gpu/drm/i915/display/intel_color.h | 6 +++ 2 files changed, 78 insertions(+) diff --git a/drivers/gpu/drm/i915/display/intel_color.c b/drivers/gpu/drm/i915/display/intel_color.c index b5c0c6583568..1ab561d79706 100644 --- a/drivers/gpu/drm/i915/display/intel_color.c +++ b/drivers/gpu/drm/i915/display/intel_color.c @@ -1431,6 +1431,78 @@ int intel_color_get_gamma_bit_precision(const struct intel_crtc_state *crtc_stat return 0; } +static bool err_check(struct drm_color_lut *lut1, + struct drm_color_lut *lut2, u32 err) +{ + return ((abs((long)lut2->red - lut1->red)) <= err) && + ((abs((long)lut2->blue - lut1->blue)) <= err) && + ((abs((long)lut2->green - lut1->green)) <= err); +} + +static bool intel_color_lut_entry_equal(struct drm_color_lut *lut1, + struct drm_color_lut *lut2, + int lut_size, u32 err) +{ + int i; + + for (i = 0; i < lut_size; i++) { + if (!err_check(&lut1[i], &lut2[i], err)) + return false; + } + + return true; +} + +bool intel_color_lut_equal(struct drm_property_blob *blob1, + struct drm_property_blob *blob2, + u32 gamma_mode, u32 bit_precision) +{ + struct drm_color_lut *lut1, *lut2; + int lut_size1, lut_size2; + u32 err; + + if (!blob1 != !blob2) + return false; + + if (!blob1) + return true; + + lut_size1 = drm_color_lut_size(blob1); + lut_size2 = drm_color_lut_size(blob2); + + /* check sw and hw lut size */ + switch (gamma_mode) { + case GAMMA_MODE_MODE_8BIT: + case GAMMA_MODE_MODE_10BIT: + if (lut_size1 != lut_size2) + return false; + break; + default: + MISSING_CASE(gamma_mode); + return false; + } + + lut1 = blob1->data; + lut2 = blob2->data; + + err = 0xffff >> bit_precision; + + /* check sw and hw lut entry to be equal */ + switch (gamma_mode) { + case GAMMA_MODE_MODE_8BIT: + case GAMMA_MODE_MODE_10BIT: + if (!intel_color_lut_entry_equal(lut1, lut2, + lut_size2, err)) + return false; + break; + default: + MISSING_CASE(gamma_mode); + return false; + } + + return true; +} + void intel_color_init(struct intel_crtc *crtc) { struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); diff --git a/drivers/gpu/drm/i915/display/intel_color.h b/drivers/gpu/drm/i915/display/intel_color.h index 0226d3a784b6..173727aaa24d 100644 --- a/drivers/gpu/drm/i915/display/intel_color.h +++ b/drivers/gpu/drm/i915/display/intel_color.h @@ -6,8 +6,11 @@ #ifndef __INTEL_COLOR_H__ #define __INTEL_COLOR_H__ +#include + struct intel_crtc_state; struct intel_crtc; +struct drm_property_blob; void intel_color_init(struct intel_crtc *crtc); int intel_color_check(struct intel_crtc_state *crtc_state); @@ -15,5 +18,8 @@ void intel_color_commit(const struct intel_crtc_state *crtc_state); void intel_color_load_luts(const struct intel_crtc_state *crtc_state); void intel_color_get_config(struct intel_crtc_state *crtc_state); int intel_color_get_gamma_bit_precision(const struct intel_crtc_state *crtc_state); +bool intel_color_lut_equal(struct drm_property_blob *blob1, + struct drm_property_blob *blob2, + u32 gamma_mode, u32 bit_precision); #endif /* __INTEL_COLOR_H__ */ -- cgit v1.2.3 From 7e764059cf708f262aa3c406af91f9f8f79e7f52 Mon Sep 17 00:00:00 2001 From: Swati Sharma Date: Wed, 4 Sep 2019 00:52:54 +0530 Subject: drm/i915/display: Add macro to compare gamma hw/sw lut Add macro to compare hw/sw gamma lut values. First need to check whether hw/sw gamma mode matches or not. If not no need to compare lut values, if matches then only compare lut entries. v5: -Called PIPE_CONF_CHECK_COLOR_LUT inside if (!adjust) [Jani] -Added #undef PIPE_CONF_CHECK_COLOR_LUT [Jani] v8: -Added check for gamma mode before gamma lut entry comparison [Jani] -Split patch 3 into 4 patches Signed-off-by: Swati Sharma Reviewed-by: Uma Shankar Reviewed-by: Jani Nikula Signed-off-by: Jani Nikula Link: https://patchwork.freedesktop.org/patch/msgid/1567538578-4489-5-git-send-email-swati2.sharma@intel.com --- drivers/gpu/drm/i915/display/intel_display.c | 25 +++++++++++++++++++++++++ 1 file changed, 25 insertions(+) diff --git a/drivers/gpu/drm/i915/display/intel_display.c b/drivers/gpu/drm/i915/display/intel_display.c index 540dffa0a601..06cf2171474d 100644 --- a/drivers/gpu/drm/i915/display/intel_display.c +++ b/drivers/gpu/drm/i915/display/intel_display.c @@ -12531,6 +12531,7 @@ intel_pipe_config_compare(const struct intel_crtc_state *current_config, { struct drm_i915_private *dev_priv = to_i915(current_config->base.crtc->dev); bool ret = true; + u32 bp_gamma = 0; bool fixup_inherited = fastset && (current_config->base.mode.private_flags & I915_MODE_FLAG_INHERITED) && !(pipe_config->base.mode.private_flags & I915_MODE_FLAG_INHERITED); @@ -12682,6 +12683,24 @@ intel_pipe_config_compare(const struct intel_crtc_state *current_config, } \ } while (0) +#define PIPE_CONF_CHECK_COLOR_LUT(name1, name2, bit_precision) do { \ + if (current_config->name1 != pipe_config->name1) { \ + pipe_config_mismatch(fastset, __stringify(name1), \ + "(expected %i, found %i, won't compare lut values)\n", \ + current_config->name1, \ + pipe_config->name1); \ + ret = false;\ + } else { \ + if (!intel_color_lut_equal(current_config->name2, \ + pipe_config->name2, pipe_config->name1, \ + bit_precision)) { \ + pipe_config_mismatch(fastset, __stringify(name2), \ + "hw_state doesn't match sw_state\n"); \ + ret = false; \ + } \ + } \ +} while (0) + #define PIPE_CONF_QUIRK(quirk) \ ((current_config->quirks | pipe_config->quirks) & (quirk)) @@ -12777,6 +12796,11 @@ intel_pipe_config_compare(const struct intel_crtc_state *current_config, PIPE_CONF_CHECK_X(csc_mode); PIPE_CONF_CHECK_BOOL(gamma_enable); PIPE_CONF_CHECK_BOOL(csc_enable); + + bp_gamma = intel_color_get_gamma_bit_precision(pipe_config); + if (bp_gamma) + PIPE_CONF_CHECK_COLOR_LUT(gamma_mode, base.gamma_lut, bp_gamma); + } PIPE_CONF_CHECK_BOOL(double_wide); @@ -12839,6 +12863,7 @@ intel_pipe_config_compare(const struct intel_crtc_state *current_config, #undef PIPE_CONF_CHECK_P #undef PIPE_CONF_CHECK_FLAGS #undef PIPE_CONF_CHECK_CLOCK_FUZZY +#undef PIPE_CONF_CHECK_COLOR_LUT #undef PIPE_CONF_QUIRK return ret; -- cgit v1.2.3 From 1af22383829864299102ca0c2eab458f755a9971 Mon Sep 17 00:00:00 2001 From: Swati Sharma Date: Wed, 4 Sep 2019 00:52:55 +0530 Subject: drm/i915/display: Extract i9xx_read_luts() For the legacy(gen < 4) gamma, add hw read out to create hw blob of gamma lut values. Also, add function intel_color_lut_pack to convert hw value with given bit precision to lut property val. v4: -No need to initialize *blob [Jani] -Removed right shifts [Jani] -Dropped dev local var [Jani] v5: -Returned blob instead of assigning it internally within the function [Ville] -Renamed function i9xx_get_color_config() to i9xx_read_luts() -Renamed i9xx_get_config_internal() to i9xx_read_lut_8() [Ville] v9: -Change in commit message [Jani, Uma] -Wrap commit within 75 characters [Uma] -Use macro for 256 [Uma] -Made read func para as const [Ville, Uma] v10: -Made i9xx_read_luts() static [Jani] Signed-off-by: Swati Sharma Reviewed-by: Jani Nikula Signed-off-by: Jani Nikula Link: https://patchwork.freedesktop.org/patch/msgid/1567538578-4489-6-git-send-email-swati2.sharma@intel.com --- drivers/gpu/drm/i915/display/intel_color.c | 54 ++++++++++++++++++++++++++++++ drivers/gpu/drm/i915/i915_reg.h | 3 ++ 2 files changed, 57 insertions(+) diff --git a/drivers/gpu/drm/i915/display/intel_color.c b/drivers/gpu/drm/i915/display/intel_color.c index 1ab561d79706..55076def6a98 100644 --- a/drivers/gpu/drm/i915/display/intel_color.c +++ b/drivers/gpu/drm/i915/display/intel_color.c @@ -1503,6 +1503,59 @@ bool intel_color_lut_equal(struct drm_property_blob *blob1, return true; } +/* convert hw value with given bit_precision to lut property val */ +static u32 intel_color_lut_pack(u32 val, u32 bit_precision) +{ + u32 max = 0xffff >> (16 - bit_precision); + + val = clamp_val(val, 0, max); + + if (bit_precision < 16) + val <<= 16 - bit_precision; + + return val; +} + +static struct drm_property_blob * +i9xx_read_lut_8(const struct intel_crtc_state *crtc_state) +{ + struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc); + struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); + enum pipe pipe = crtc->pipe; + struct drm_property_blob *blob; + struct drm_color_lut *blob_data; + u32 i, val; + + blob = drm_property_create_blob(&dev_priv->drm, + sizeof(struct drm_color_lut) * LEGACY_LUT_LENGTH, + NULL); + if (IS_ERR(blob)) + return NULL; + + blob_data = blob->data; + + for (i = 0; i < LEGACY_LUT_LENGTH; i++) { + if (HAS_GMCH(dev_priv)) + val = I915_READ(PALETTE(pipe, i)); + else + val = I915_READ(LGC_PALETTE(pipe, i)); + + blob_data[i].red = intel_color_lut_pack(REG_FIELD_GET( + LGC_PALETTE_RED_MASK, val), 8); + blob_data[i].green = intel_color_lut_pack(REG_FIELD_GET( + LGC_PALETTE_GREEN_MASK, val), 8); + blob_data[i].blue = intel_color_lut_pack(REG_FIELD_GET( + LGC_PALETTE_BLUE_MASK, val), 8); + } + + return blob; +} + +static void i9xx_read_luts(struct intel_crtc_state *crtc_state) +{ + crtc_state->base.gamma_lut = i9xx_read_lut_8(crtc_state); +} + void intel_color_init(struct intel_crtc *crtc) { struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); @@ -1523,6 +1576,7 @@ void intel_color_init(struct intel_crtc *crtc) dev_priv->display.color_check = i9xx_color_check; dev_priv->display.color_commit = i9xx_color_commit; dev_priv->display.load_luts = i9xx_load_luts; + dev_priv->display.read_luts = i9xx_read_luts; } } else { if (INTEL_GEN(dev_priv) >= 11) diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h index 6c43b8c583bb..aac717fa4e25 100644 --- a/drivers/gpu/drm/i915/i915_reg.h +++ b/drivers/gpu/drm/i915/i915_reg.h @@ -7192,6 +7192,9 @@ enum { /* legacy palette */ #define _LGC_PALETTE_A 0x4a000 #define _LGC_PALETTE_B 0x4a800 +#define LGC_PALETTE_RED_MASK REG_GENMASK(23, 16) +#define LGC_PALETTE_GREEN_MASK REG_GENMASK(15, 8) +#define LGC_PALETTE_BLUE_MASK REG_GENMASK(7, 0) #define LGC_PALETTE(pipe, i) _MMIO(_PIPE(pipe, _LGC_PALETTE_A, _LGC_PALETTE_B) + (i) * 4) /* ilk/snb precision palette */ -- cgit v1.2.3 From 6b97b118d4d542c7bc25b725c6de3947fffb921b Mon Sep 17 00:00:00 2001 From: Swati Sharma Date: Wed, 4 Sep 2019 00:52:56 +0530 Subject: drm/i915/display: Extract ilk_read_luts() For ilk, add hw read out to create hw blob of gamma lut values. v4: -No need to initialize *blob [Jani] -Removed right shifts [Jani] -Dropped dev local var [Jani] v5: -Returned blob instead of assigning it internally within the function [Ville] -Renamed ilk_get_color_config() to ilk_read_luts() [Ville] v9: -80 character limit [Uma] -Made read func para as const [Ville, Uma] -Renamed ilk_read_gamma_lut() to ilk_read_lut_10() [Uma, Ville] v10: -Made ilk_read_luts() static [Jani] -ilk_load_lut_10 has lut_size, not (lut_size - 1) [Jani] Signed-off-by: Swati Sharma Reviewed-by: Jani Nikula Signed-off-by: Jani Nikula Link: https://patchwork.freedesktop.org/patch/msgid/1567538578-4489-7-git-send-email-swati2.sharma@intel.com --- drivers/gpu/drm/i915/display/intel_color.c | 45 +++++++++++++++++++++++++++++- drivers/gpu/drm/i915/i915_reg.h | 3 ++ 2 files changed, 47 insertions(+), 1 deletion(-) diff --git a/drivers/gpu/drm/i915/display/intel_color.c b/drivers/gpu/drm/i915/display/intel_color.c index 55076def6a98..80f82b2bd284 100644 --- a/drivers/gpu/drm/i915/display/intel_color.c +++ b/drivers/gpu/drm/i915/display/intel_color.c @@ -1556,6 +1556,47 @@ static void i9xx_read_luts(struct intel_crtc_state *crtc_state) crtc_state->base.gamma_lut = i9xx_read_lut_8(crtc_state); } +static struct drm_property_blob * +ilk_read_lut_10(const struct intel_crtc_state *crtc_state) +{ + struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc); + struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); + u32 lut_size = INTEL_INFO(dev_priv)->color.gamma_lut_size; + enum pipe pipe = crtc->pipe; + struct drm_property_blob *blob; + struct drm_color_lut *blob_data; + u32 i, val; + + blob = drm_property_create_blob(&dev_priv->drm, + sizeof(struct drm_color_lut) * lut_size, + NULL); + if (IS_ERR(blob)) + return NULL; + + blob_data = blob->data; + + for (i = 0; i < lut_size; i++) { + val = I915_READ(PREC_PALETTE(pipe, i)); + + blob_data[i].red = intel_color_lut_pack(REG_FIELD_GET( + PREC_PALETTE_RED_MASK, val), 10); + blob_data[i].green = intel_color_lut_pack(REG_FIELD_GET( + PREC_PALETTE_GREEN_MASK, val), 10); + blob_data[i].blue = intel_color_lut_pack(REG_FIELD_GET( + PREC_PALETTE_BLUE_MASK, val), 10); + } + + return blob; +} + +static void ilk_read_luts(struct intel_crtc_state *crtc_state) +{ + if (crtc_state->gamma_mode == GAMMA_MODE_MODE_8BIT) + crtc_state->base.gamma_lut = i9xx_read_lut_8(crtc_state); + else + crtc_state->base.gamma_lut = ilk_read_lut_10(crtc_state); +} + void intel_color_init(struct intel_crtc *crtc) { struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); @@ -1603,8 +1644,10 @@ void intel_color_init(struct intel_crtc *crtc) dev_priv->display.load_luts = bdw_load_luts; else if (INTEL_GEN(dev_priv) >= 7) dev_priv->display.load_luts = ivb_load_luts; - else + else { dev_priv->display.load_luts = ilk_load_luts; + dev_priv->display.read_luts = ilk_read_luts; + } } drm_crtc_enable_color_mgmt(&crtc->base, diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h index aac717fa4e25..6ebb76a430e4 100644 --- a/drivers/gpu/drm/i915/i915_reg.h +++ b/drivers/gpu/drm/i915/i915_reg.h @@ -7200,6 +7200,9 @@ enum { /* ilk/snb precision palette */ #define _PREC_PALETTE_A 0x4b000 #define _PREC_PALETTE_B 0x4c000 +#define PREC_PALETTE_RED_MASK REG_GENMASK(29, 20) +#define PREC_PALETTE_GREEN_MASK REG_GENMASK(19, 10) +#define PREC_PALETTE_BLUE_MASK REG_GENMASK(9, 0) #define PREC_PALETTE(pipe, i) _MMIO(_PIPE(pipe, _PREC_PALETTE_A, _PREC_PALETTE_B) + (i) * 4) #define _PREC_PIPEAGCMAX 0x4d000 -- cgit v1.2.3 From 4bb6a9d5d9a8289673c4cb0786d44be8a63c21db Mon Sep 17 00:00:00 2001 From: Swati Sharma Date: Wed, 4 Sep 2019 00:52:57 +0530 Subject: drm/i915/display: Extract glk_read_luts() For glk, add hw read out to create hw blob of gamma lut values. v4: -No need to initialize *blob [Jani] -Removed right shifts [Jani] -Dropped dev local var [Jani] v5: -Returned blob instead of assigning it internally within the function [Ville] -Renamed glk_get_color_config() to glk_read_luts() [Ville] -Added degamma validation [Ville] v9: -80 character limit [Uma] -Made read func para as const [Ville, Uma] Signed-off-by: Swati Sharma Reviewed-by: Jani Nikula Signed-off-by: Jani Nikula Link: https://patchwork.freedesktop.org/patch/msgid/1567538578-4489-8-git-send-email-swati2.sharma@intel.com --- drivers/gpu/drm/i915/display/intel_color.c | 51 ++++++++++++++++++++++++++++-- drivers/gpu/drm/i915/i915_reg.h | 3 ++ 2 files changed, 52 insertions(+), 2 deletions(-) diff --git a/drivers/gpu/drm/i915/display/intel_color.c b/drivers/gpu/drm/i915/display/intel_color.c index 80f82b2bd284..6d641e159899 100644 --- a/drivers/gpu/drm/i915/display/intel_color.c +++ b/drivers/gpu/drm/i915/display/intel_color.c @@ -1597,6 +1597,52 @@ static void ilk_read_luts(struct intel_crtc_state *crtc_state) crtc_state->base.gamma_lut = ilk_read_lut_10(crtc_state); } +static struct drm_property_blob * +glk_read_lut_10(const struct intel_crtc_state *crtc_state, u32 prec_index) +{ + struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc); + struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); + int hw_lut_size = ivb_lut_10_size(prec_index); + enum pipe pipe = crtc->pipe; + struct drm_property_blob *blob; + struct drm_color_lut *blob_data; + u32 i, val; + + I915_WRITE(PREC_PAL_INDEX(pipe), prec_index | + PAL_PREC_AUTO_INCREMENT); + + blob = drm_property_create_blob(&dev_priv->drm, + sizeof(struct drm_color_lut) * hw_lut_size, + NULL); + if (IS_ERR(blob)) + return NULL; + + blob_data = blob->data; + + for (i = 0; i < hw_lut_size; i++) { + val = I915_READ(PREC_PAL_DATA(pipe)); + + blob_data[i].red = intel_color_lut_pack(REG_FIELD_GET( + PREC_PAL_DATA_RED_MASK, val), 10); + blob_data[i].green = intel_color_lut_pack(REG_FIELD_GET( + PREC_PAL_DATA_GREEN_MASK, val), 10); + blob_data[i].blue = intel_color_lut_pack(REG_FIELD_GET( + PREC_PAL_DATA_BLUE_MASK, val), 10); + } + + I915_WRITE(PREC_PAL_INDEX(pipe), 0); + + return blob; +} + +static void glk_read_luts(struct intel_crtc_state *crtc_state) +{ + if (crtc_state->gamma_mode == GAMMA_MODE_MODE_8BIT) + crtc_state->base.gamma_lut = i9xx_read_lut_8(crtc_state); + else + crtc_state->base.gamma_lut = glk_read_lut_10(crtc_state, PAL_PREC_INDEX_VALUE(0)); +} + void intel_color_init(struct intel_crtc *crtc) { struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); @@ -1638,9 +1684,10 @@ void intel_color_init(struct intel_crtc *crtc) if (INTEL_GEN(dev_priv) >= 11) dev_priv->display.load_luts = icl_load_luts; - else if (IS_CANNONLAKE(dev_priv) || IS_GEMINILAKE(dev_priv)) + else if (IS_CANNONLAKE(dev_priv) || IS_GEMINILAKE(dev_priv)) { dev_priv->display.load_luts = glk_load_luts; - else if (INTEL_GEN(dev_priv) >= 8) + dev_priv->display.read_luts = glk_read_luts; + } else if (INTEL_GEN(dev_priv) >= 8) dev_priv->display.load_luts = bdw_load_luts; else if (INTEL_GEN(dev_priv) >= 7) dev_priv->display.load_luts = ivb_load_luts; diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h index 6ebb76a430e4..45ed96d7c599 100644 --- a/drivers/gpu/drm/i915/i915_reg.h +++ b/drivers/gpu/drm/i915/i915_reg.h @@ -10351,6 +10351,9 @@ enum skl_power_gate { #define _PAL_PREC_GC_MAX_A 0x4A410 #define _PAL_PREC_GC_MAX_B 0x4AC10 #define _PAL_PREC_GC_MAX_C 0x4B410 +#define PREC_PAL_DATA_RED_MASK REG_GENMASK(29, 20) +#define PREC_PAL_DATA_GREEN_MASK REG_GENMASK(19, 10) +#define PREC_PAL_DATA_BLUE_MASK REG_GENMASK(9, 0) #define _PAL_PREC_EXT_GC_MAX_A 0x4A420 #define _PAL_PREC_EXT_GC_MAX_B 0x4AC20 #define _PAL_PREC_EXT_GC_MAX_C 0x4B420 -- cgit v1.2.3 From 9d7b01e93526efe79dbf75b69cc5972b5a4f7b37 Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Wed, 4 Sep 2019 11:07:07 +0100 Subject: drm/i915: Restore relaxed padding (OCL_OOB_SUPPRES_ENABLE) for skl+ This bit was fliped on for "syncing dependencies between camera and graphics". BSpec has no recollection why, and it is causing unrecoverable GPU hangs with Vulkan compute workloads. From BSpec, setting bit5 to 0 enables relaxed padding requirements for buffers, 1D and 2D non-array, non-MSAA, non-mip-mapped linear surfaces; and *must* be set to 0h on skl+ to ensure "Out of Bounds" case is suppressed. Reported-by: Jason Ekstrand Suggested-by: Jason Ekstrand Bugzilla: https://bugs.freedesktop.org/show_bug.cgi?id=110998 Fixes: 8424171e135c ("drm/i915/gen9: h/w w/a: syncing dependencies between camera and graphics") Signed-off-by: Chris Wilson Tested-by: denys.kostin@globallogic.com Cc: Jason Ekstrand Cc: Mika Kuoppala Cc: # v4.1+ Reviewed-by: Mika Kuoppala Link: https://patchwork.freedesktop.org/patch/msgid/20190904100707.7377-1-chris@chris-wilson.co.uk --- drivers/gpu/drm/i915/gt/intel_workarounds.c | 5 ----- 1 file changed, 5 deletions(-) diff --git a/drivers/gpu/drm/i915/gt/intel_workarounds.c b/drivers/gpu/drm/i915/gt/intel_workarounds.c index 8639fcccdb42..243d3f77be13 100644 --- a/drivers/gpu/drm/i915/gt/intel_workarounds.c +++ b/drivers/gpu/drm/i915/gt/intel_workarounds.c @@ -297,11 +297,6 @@ static void gen9_ctx_workarounds_init(struct intel_engine_cs *engine, FLOW_CONTROL_ENABLE | PARTIAL_INSTRUCTION_SHOOTDOWN_DISABLE); - /* Syncing dependencies between camera and graphics:skl,bxt,kbl */ - if (!IS_COFFEELAKE(i915)) - WA_SET_BIT_MASKED(HALF_SLICE_CHICKEN3, - GEN9_DISABLE_OCL_OOB_SUPPRESS_LOGIC); - /* WaEnableYV12BugFixInHalfSliceChicken7:skl,bxt,kbl,glk,cfl */ /* WaEnableSamplerGPGPUPreemptionSupport:skl,bxt,kbl,cfl */ WA_SET_BIT_MASKED(GEN9_HALF_SLICE_CHICKEN7, -- cgit v1.2.3 From e838bfa8e170415fa3cc8e83ecb171e809c0c422 Mon Sep 17 00:00:00 2001 From: Ville Syrjälä Date: Tue, 3 Sep 2019 18:40:18 +0300 Subject: Revert "drm/i915: Fix DP-MST crtc_mask" MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit This reverts commit 4eaceea3a00f8e936a7f48dcd0c975a57f88930f. Several userspace clients (modesetting ddx and mutter+wayland at least) handle encoder.possible_crtcs incorrectly. What they essentially do is the following: possible_crtcs = ~0; for_each_possible_encoder(connector) possible_crtcs &= encoder->possible_crtcs; Ie. they calculate the intersection of the possible_crtcs for the connector when they really should be calculating the union instead. In our case each MST encoder now has just one unique bit set, and so the intersection is always zero. The end result is that MST connectors can't be lit up because no crtc can be found to drive them. I've submitted a fix for the modesetting ddx [1], and complained on #wayland about mutter, so hopefully the situation will improve in the future. In the meantime we have regression, and so must go back to the old way of misconfiguring possible_crtcs in the kernel. [1] https://gitlab.freedesktop.org/xorg/xserver/merge_requests/277 Cc: Jonas Ådahl Cc: Stanislav Lisovskiy Cc: Lionel Landwerlin Cc: Dhinakaran Pandiyan Cc: Lucas De Marchi Bugzilla: https://bugs.freedesktop.org/show_bug.cgi?id=111507 Signed-off-by: Ville Syrjälä Link: https://patchwork.freedesktop.org/patch/msgid/20190903154018.26357-1-ville.syrjala@linux.intel.com Reviewed-by: José Roberto de Souza --- drivers/gpu/drm/i915/display/intel_dp_mst.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/gpu/drm/i915/display/intel_dp_mst.c b/drivers/gpu/drm/i915/display/intel_dp_mst.c index 6df240a01b8c..37366f81255b 100644 --- a/drivers/gpu/drm/i915/display/intel_dp_mst.c +++ b/drivers/gpu/drm/i915/display/intel_dp_mst.c @@ -615,7 +615,7 @@ intel_dp_create_fake_mst_encoder(struct intel_digital_port *intel_dig_port, enum intel_encoder->type = INTEL_OUTPUT_DP_MST; intel_encoder->power_domain = intel_dig_port->base.power_domain; intel_encoder->port = intel_dig_port->base.port; - intel_encoder->crtc_mask = BIT(pipe); + intel_encoder->crtc_mask = 0x7; intel_encoder->cloneable = 0; intel_encoder->compute_config = intel_dp_mst_compute_config; -- cgit v1.2.3 From ab016914984e2a7405756dfc8a8e4cb54a4c1b48 Mon Sep 17 00:00:00 2001 From: Daniel Vetter Date: Tue, 20 Aug 2019 21:54:51 +0200 Subject: drm/i915: disable set/get_tiling ioctl on gen12+ MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit The cpu (de)tiler hw is gone, this stopped being useful. Plus it never supported any of the fancy new tiling formats, which means userspace also stopped using the magic side-channel this provides. This would totally break a lot of the igts, but they're already broken for the same reasons as userspace on gen12 would be. v2: Look at ggtt->num_fences instead, that also avoids the need for a comment (Chris). This also means that gen12 support really needs to make sure num_fences is set to 0. There is a patch for that, but it checks for HAS_MAPPABLE_APERTURE, which I'm not sure is the right thing really. Adding relevant people. Cc: Daniele Ceraolo Spurio Cc: Stuart Summers Cc: Matthew Auld Cc: Kenneth Graunke Cc: Jason Ekstrand Cc: Chris Wilson Cc: Lucas De Marchi Signed-off-by: Daniel Vetter Signed-off-by: Daniel Vetter Signed-off-by: José Roberto de Souza Reviewed-by: José Roberto de Souza Link: https://patchwork.freedesktop.org/patch/msgid/20190820195451.15671-1-daniel.vetter@ffwll.ch --- drivers/gpu/drm/i915/gem/i915_gem_tiling.c | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/drivers/gpu/drm/i915/gem/i915_gem_tiling.c b/drivers/gpu/drm/i915/gem/i915_gem_tiling.c index ca0c2f451742..e5d1ae8d4dba 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_tiling.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_tiling.c @@ -313,10 +313,14 @@ int i915_gem_set_tiling_ioctl(struct drm_device *dev, void *data, struct drm_file *file) { + struct drm_i915_private *dev_priv = to_i915(dev); struct drm_i915_gem_set_tiling *args = data; struct drm_i915_gem_object *obj; int err; + if (!dev_priv->ggtt.num_fences) + return -EOPNOTSUPP; + obj = i915_gem_object_lookup(file, args->handle); if (!obj) return -ENOENT; @@ -402,6 +406,9 @@ i915_gem_get_tiling_ioctl(struct drm_device *dev, void *data, struct drm_i915_gem_object *obj; int err = -ENOENT; + if (!dev_priv->ggtt.num_fences) + return -EOPNOTSUPP; + rcu_read_lock(); obj = i915_gem_object_lookup_rcu(file, args->handle); if (obj) { -- cgit v1.2.3 From 2f3b87124b9f08518b43abf2266035dd22fdbd3c Mon Sep 17 00:00:00 2001 From: José Roberto de Souza Date: Wed, 4 Sep 2019 14:34:14 -0700 Subject: drm/i915/psr: Only handle interruptions of the transcoder in use MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit It was enabling and checking PSR interruptions in every transcoder while it should keep the interruptions on the non-used transcoders masked. While doing this it gives us trouble on Tiger Lake if we are reading/writing to registers of disabled transcoders since from gen12 onwards the registers are relative to the transcoder. Instead of forcing them ON to access those registers, just avoid the accesses as they are not needed. v2 (Lucas): - Explain why we can't keep accessing all transcoders - Remove TODO about extending the irq handling to multiple instances: when/if implementing multiple instances it's pretty clear by the singleton psr that it needs to be extended - Fix intel_psr_debug_set() calling psr_irq_control() with psr.transcoder not set yet (from Imre). Now we only set the debug register right away if psr is already enabled. Otherwise we just record the value to be set when enabling the source. - Do not depend on the value of TRANSCODER_A. Just be relative to it (from Imre) - handle psr error last so we don't schedule the work before handling the other flags v3: - Adding a warning about setting reserverd bits on EDP_PSR_IMR Cc: Imre Deak Cc: Dhinakaran Pandiyan Signed-off-by: José Roberto de Souza Signed-off-by: Lucas De Marchi Reviewed-by: Matt Roper Link: https://patchwork.freedesktop.org/patch/msgid/20190904213419.27547-2-jose.souza@intel.com --- drivers/gpu/drm/i915/display/intel_psr.c | 142 ++++++++++++------------------- drivers/gpu/drm/i915/i915_reg.h | 13 ++- 2 files changed, 60 insertions(+), 95 deletions(-) diff --git a/drivers/gpu/drm/i915/display/intel_psr.c b/drivers/gpu/drm/i915/display/intel_psr.c index 629b8b98a97f..1d99ffeaa36a 100644 --- a/drivers/gpu/drm/i915/display/intel_psr.c +++ b/drivers/gpu/drm/i915/display/intel_psr.c @@ -88,48 +88,20 @@ static bool intel_psr2_enabled(struct drm_i915_private *dev_priv, } } -static int edp_psr_shift(enum transcoder cpu_transcoder) +static void psr_irq_control(struct drm_i915_private *dev_priv) { - switch (cpu_transcoder) { - case TRANSCODER_A: - return EDP_PSR_TRANSCODER_A_SHIFT; - case TRANSCODER_B: - return EDP_PSR_TRANSCODER_B_SHIFT; - case TRANSCODER_C: - return EDP_PSR_TRANSCODER_C_SHIFT; - default: - MISSING_CASE(cpu_transcoder); - /* fallthrough */ - case TRANSCODER_EDP: - return EDP_PSR_TRANSCODER_EDP_SHIFT; - } -} - -static void psr_irq_control(struct drm_i915_private *dev_priv, u32 debug) -{ - u32 debug_mask, mask; - enum transcoder cpu_transcoder; - u32 transcoders = BIT(TRANSCODER_EDP); - - if (INTEL_GEN(dev_priv) >= 8) - transcoders |= BIT(TRANSCODER_A) | - BIT(TRANSCODER_B) | - BIT(TRANSCODER_C); - - debug_mask = 0; - mask = 0; - for_each_cpu_transcoder_masked(dev_priv, cpu_transcoder, transcoders) { - int shift = edp_psr_shift(cpu_transcoder); - - mask |= EDP_PSR_ERROR(shift); - debug_mask |= EDP_PSR_POST_EXIT(shift) | - EDP_PSR_PRE_ENTRY(shift); - } - - if (debug & I915_PSR_DEBUG_IRQ) - mask |= debug_mask; - - I915_WRITE(EDP_PSR_IMR, ~mask); + enum transcoder trans = dev_priv->psr.transcoder; + u32 val, mask; + + mask = EDP_PSR_ERROR(trans); + if (dev_priv->psr.debug & I915_PSR_DEBUG_IRQ) + mask |= EDP_PSR_POST_EXIT(trans) | EDP_PSR_PRE_ENTRY(trans); + + /* Warning: it is masking/setting reserved bits too */ + val = I915_READ(EDP_PSR_IMR); + val &= ~EDP_PSR_TRANS_MASK(trans); + val |= ~mask; + I915_WRITE(EDP_PSR_IMR, val); } static void psr_event_print(u32 val, bool psr2_enabled) @@ -171,60 +143,48 @@ static void psr_event_print(u32 val, bool psr2_enabled) void intel_psr_irq_handler(struct drm_i915_private *dev_priv, u32 psr_iir) { - u32 transcoders = BIT(TRANSCODER_EDP); - enum transcoder cpu_transcoder; + enum transcoder cpu_transcoder = dev_priv->psr.transcoder; ktime_t time_ns = ktime_get(); - u32 mask = 0; - - if (INTEL_GEN(dev_priv) >= 8) - transcoders |= BIT(TRANSCODER_A) | - BIT(TRANSCODER_B) | - BIT(TRANSCODER_C); - - for_each_cpu_transcoder_masked(dev_priv, cpu_transcoder, transcoders) { - int shift = edp_psr_shift(cpu_transcoder); - if (psr_iir & EDP_PSR_ERROR(shift)) { - DRM_WARN("[transcoder %s] PSR aux error\n", - transcoder_name(cpu_transcoder)); + if (psr_iir & EDP_PSR_PRE_ENTRY(cpu_transcoder)) { + dev_priv->psr.last_entry_attempt = time_ns; + DRM_DEBUG_KMS("[transcoder %s] PSR entry attempt in 2 vblanks\n", + transcoder_name(cpu_transcoder)); + } - dev_priv->psr.irq_aux_error = true; + if (psr_iir & EDP_PSR_POST_EXIT(cpu_transcoder)) { + dev_priv->psr.last_exit = time_ns; + DRM_DEBUG_KMS("[transcoder %s] PSR exit completed\n", + transcoder_name(cpu_transcoder)); - /* - * If this interruption is not masked it will keep - * interrupting so fast that it prevents the scheduled - * work to run. - * Also after a PSR error, we don't want to arm PSR - * again so we don't care about unmask the interruption - * or unset irq_aux_error. - */ - mask |= EDP_PSR_ERROR(shift); - } + if (INTEL_GEN(dev_priv) >= 9) { + u32 val = I915_READ(PSR_EVENT(cpu_transcoder)); + bool psr2_enabled = dev_priv->psr.psr2_enabled; - if (psr_iir & EDP_PSR_PRE_ENTRY(shift)) { - dev_priv->psr.last_entry_attempt = time_ns; - DRM_DEBUG_KMS("[transcoder %s] PSR entry attempt in 2 vblanks\n", - transcoder_name(cpu_transcoder)); + I915_WRITE(PSR_EVENT(cpu_transcoder), val); + psr_event_print(val, psr2_enabled); } + } - if (psr_iir & EDP_PSR_POST_EXIT(shift)) { - dev_priv->psr.last_exit = time_ns; - DRM_DEBUG_KMS("[transcoder %s] PSR exit completed\n", - transcoder_name(cpu_transcoder)); + if (psr_iir & EDP_PSR_ERROR(cpu_transcoder)) { + u32 val; - if (INTEL_GEN(dev_priv) >= 9) { - u32 val = I915_READ(PSR_EVENT(cpu_transcoder)); - bool psr2_enabled = dev_priv->psr.psr2_enabled; + DRM_WARN("[transcoder %s] PSR aux error\n", + transcoder_name(cpu_transcoder)); - I915_WRITE(PSR_EVENT(cpu_transcoder), val); - psr_event_print(val, psr2_enabled); - } - } - } + dev_priv->psr.irq_aux_error = true; - if (mask) { - mask |= I915_READ(EDP_PSR_IMR); - I915_WRITE(EDP_PSR_IMR, mask); + /* + * If this interruption is not masked it will keep + * interrupting so fast that it prevents the scheduled + * work to run. + * Also after a PSR error, we don't want to arm PSR + * again so we don't care about unmask the interruption + * or unset irq_aux_error. + */ + val = I915_READ(EDP_PSR_IMR); + val |= EDP_PSR_ERROR(cpu_transcoder); + I915_WRITE(EDP_PSR_IMR, val); schedule_work(&dev_priv->psr.work); } @@ -747,7 +707,7 @@ static void intel_psr_enable_source(struct intel_dp *intel_dp, I915_WRITE(EDP_PSR_DEBUG(dev_priv->psr.transcoder), mask); - psr_irq_control(dev_priv, dev_priv->psr.debug); + psr_irq_control(dev_priv); } static void intel_psr_enable_locked(struct drm_i915_private *dev_priv, @@ -772,7 +732,7 @@ static void intel_psr_enable_locked(struct drm_i915_private *dev_priv, * to avoid any rendering problems. */ val = I915_READ(EDP_PSR_IIR); - val &= EDP_PSR_ERROR(edp_psr_shift(dev_priv->psr.transcoder)); + val &= EDP_PSR_ERROR(dev_priv->psr.transcoder); if (val) { dev_priv->psr.sink_not_reliable = true; DRM_DEBUG_KMS("PSR interruption error set, not enabling PSR\n"); @@ -1120,7 +1080,13 @@ int intel_psr_debug_set(struct drm_i915_private *dev_priv, u64 val) old_mode = dev_priv->psr.debug & I915_PSR_DEBUG_MODE_MASK; dev_priv->psr.debug = val; - psr_irq_control(dev_priv, dev_priv->psr.debug); + + /* + * Do it right away if it's already enabled, otherwise it will be done + * when enabling the source. + */ + if (dev_priv->psr.enabled) + psr_irq_control(dev_priv); mutex_unlock(&dev_priv->psr.lock); diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h index 45ed96d7c599..81f68455d492 100644 --- a/drivers/gpu/drm/i915/i915_reg.h +++ b/drivers/gpu/drm/i915/i915_reg.h @@ -4225,13 +4225,12 @@ enum { /* Bspec claims those aren't shifted but stay at 0x64800 */ #define EDP_PSR_IMR _MMIO(0x64834) #define EDP_PSR_IIR _MMIO(0x64838) -#define EDP_PSR_ERROR(shift) (1 << ((shift) + 2)) -#define EDP_PSR_POST_EXIT(shift) (1 << ((shift) + 1)) -#define EDP_PSR_PRE_ENTRY(shift) (1 << (shift)) -#define EDP_PSR_TRANSCODER_C_SHIFT 24 -#define EDP_PSR_TRANSCODER_B_SHIFT 16 -#define EDP_PSR_TRANSCODER_A_SHIFT 8 -#define EDP_PSR_TRANSCODER_EDP_SHIFT 0 +#define _EDP_PSR_TRANS_SHIFT(trans) ((trans) == TRANSCODER_EDP ? \ + 0 : ((trans) - TRANSCODER_A + 1) * 8) +#define EDP_PSR_TRANS_MASK(trans) (0x7 << _EDP_PSR_TRANS_SHIFT(trans)) +#define EDP_PSR_ERROR(trans) (0x4 << _EDP_PSR_TRANS_SHIFT(trans)) +#define EDP_PSR_POST_EXIT(trans) (0x2 << _EDP_PSR_TRANS_SHIFT(trans)) +#define EDP_PSR_PRE_ENTRY(trans) (0x1 << _EDP_PSR_TRANS_SHIFT(trans)) #define _SRD_AUX_CTL_A 0x60810 #define _SRD_AUX_CTL_EDP 0x6f810 -- cgit v1.2.3 From 8241cfbe67f4082eee5fc72e5a8025c5b58c2ddf Mon Sep 17 00:00:00 2001 From: José Roberto de Souza Date: Wed, 4 Sep 2019 14:34:15 -0700 Subject: drm/i915/tgl: Access the right register when handling PSR interruptions MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit For older gens PSR IIR and IMR have fixed addresses. From TGL onwards those registers moved to each transcoder offset. The bits for the registers are defined without an offset per transcoder as right now we have one register per transcoder. So add a fake "trans_shift" when calculating the bits offsets: it will be 0 for gen12+ and psr.transcoder otherwise. v2 (Lucas): change the implementation to use trans_shift instead of getting each bit value with a different macro Cc: Imre Deak Cc: Dhinakaran Pandiyan Cc: Rodrigo Vivi Signed-off-by: José Roberto de Souza Signed-off-by: Lucas De Marchi Reviewed-by: Matt Roper Link: https://patchwork.freedesktop.org/patch/msgid/20190904213419.27547-3-jose.souza@intel.com --- drivers/gpu/drm/i915/display/intel_psr.c | 60 ++++++++++++++++++++++++-------- drivers/gpu/drm/i915/i915_irq.c | 51 +++++++++++++++++++++++---- drivers/gpu/drm/i915/i915_reg.h | 10 +++++- 3 files changed, 99 insertions(+), 22 deletions(-) diff --git a/drivers/gpu/drm/i915/display/intel_psr.c b/drivers/gpu/drm/i915/display/intel_psr.c index 1d99ffeaa36a..b3c7eef53bf3 100644 --- a/drivers/gpu/drm/i915/display/intel_psr.c +++ b/drivers/gpu/drm/i915/display/intel_psr.c @@ -90,18 +90,33 @@ static bool intel_psr2_enabled(struct drm_i915_private *dev_priv, static void psr_irq_control(struct drm_i915_private *dev_priv) { - enum transcoder trans = dev_priv->psr.transcoder; - u32 val, mask; + enum transcoder trans_shift; + u32 mask, val; + i915_reg_t imr_reg; - mask = EDP_PSR_ERROR(trans); + /* + * gen12+ has registers relative to transcoder and one per transcoder + * using the same bit definition: handle it as TRANSCODER_EDP to force + * 0 shift in bit definition + */ + if (INTEL_GEN(dev_priv) >= 12) { + trans_shift = 0; + imr_reg = TRANS_PSR_IMR(dev_priv->psr.transcoder); + } else { + trans_shift = dev_priv->psr.transcoder; + imr_reg = EDP_PSR_IMR; + } + + mask = EDP_PSR_ERROR(trans_shift); if (dev_priv->psr.debug & I915_PSR_DEBUG_IRQ) - mask |= EDP_PSR_POST_EXIT(trans) | EDP_PSR_PRE_ENTRY(trans); + mask |= EDP_PSR_POST_EXIT(trans_shift) | + EDP_PSR_PRE_ENTRY(trans_shift); /* Warning: it is masking/setting reserved bits too */ - val = I915_READ(EDP_PSR_IMR); - val &= ~EDP_PSR_TRANS_MASK(trans); + val = I915_READ(imr_reg); + val &= ~EDP_PSR_TRANS_MASK(trans_shift); val |= ~mask; - I915_WRITE(EDP_PSR_IMR, val); + I915_WRITE(imr_reg, val); } static void psr_event_print(u32 val, bool psr2_enabled) @@ -144,15 +159,25 @@ static void psr_event_print(u32 val, bool psr2_enabled) void intel_psr_irq_handler(struct drm_i915_private *dev_priv, u32 psr_iir) { enum transcoder cpu_transcoder = dev_priv->psr.transcoder; + enum transcoder trans_shift; + i915_reg_t imr_reg; ktime_t time_ns = ktime_get(); - if (psr_iir & EDP_PSR_PRE_ENTRY(cpu_transcoder)) { + if (INTEL_GEN(dev_priv) >= 12) { + trans_shift = 0; + imr_reg = TRANS_PSR_IMR(dev_priv->psr.transcoder); + } else { + trans_shift = dev_priv->psr.transcoder; + imr_reg = EDP_PSR_IMR; + } + + if (psr_iir & EDP_PSR_PRE_ENTRY(trans_shift)) { dev_priv->psr.last_entry_attempt = time_ns; DRM_DEBUG_KMS("[transcoder %s] PSR entry attempt in 2 vblanks\n", transcoder_name(cpu_transcoder)); } - if (psr_iir & EDP_PSR_POST_EXIT(cpu_transcoder)) { + if (psr_iir & EDP_PSR_POST_EXIT(trans_shift)) { dev_priv->psr.last_exit = time_ns; DRM_DEBUG_KMS("[transcoder %s] PSR exit completed\n", transcoder_name(cpu_transcoder)); @@ -166,7 +191,7 @@ void intel_psr_irq_handler(struct drm_i915_private *dev_priv, u32 psr_iir) } } - if (psr_iir & EDP_PSR_ERROR(cpu_transcoder)) { + if (psr_iir & EDP_PSR_ERROR(trans_shift)) { u32 val; DRM_WARN("[transcoder %s] PSR aux error\n", @@ -182,9 +207,9 @@ void intel_psr_irq_handler(struct drm_i915_private *dev_priv, u32 psr_iir) * again so we don't care about unmask the interruption * or unset irq_aux_error. */ - val = I915_READ(EDP_PSR_IMR); - val |= EDP_PSR_ERROR(cpu_transcoder); - I915_WRITE(EDP_PSR_IMR, val); + val = I915_READ(imr_reg); + val |= EDP_PSR_ERROR(trans_shift); + I915_WRITE(imr_reg, val); schedule_work(&dev_priv->psr.work); } @@ -731,8 +756,13 @@ static void intel_psr_enable_locked(struct drm_i915_private *dev_priv, * first time that PSR HW tries to activate so lets keep PSR disabled * to avoid any rendering problems. */ - val = I915_READ(EDP_PSR_IIR); - val &= EDP_PSR_ERROR(dev_priv->psr.transcoder); + if (INTEL_GEN(dev_priv) >= 12) { + val = I915_READ(TRANS_PSR_IIR(dev_priv->psr.transcoder)); + val &= EDP_PSR_ERROR(0); + } else { + val = I915_READ(EDP_PSR_IIR); + val &= EDP_PSR_ERROR(dev_priv->psr.transcoder); + } if (val) { dev_priv->psr.sink_not_reliable = true; DRM_DEBUG_KMS("PSR interruption error set, not enabling PSR\n"); diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c index 135c9ee55e07..ae7228032d2c 100644 --- a/drivers/gpu/drm/i915/i915_irq.c +++ b/drivers/gpu/drm/i915/i915_irq.c @@ -2613,11 +2613,21 @@ gen8_de_misc_irq_handler(struct drm_i915_private *dev_priv, u32 iir) } if (iir & GEN8_DE_EDP_PSR) { - u32 psr_iir = I915_READ(EDP_PSR_IIR); + u32 psr_iir; + i915_reg_t iir_reg; + + if (INTEL_GEN(dev_priv) >= 12) + iir_reg = TRANS_PSR_IIR(dev_priv->psr.transcoder); + else + iir_reg = EDP_PSR_IIR; + + psr_iir = I915_READ(iir_reg); + I915_WRITE(iir_reg, psr_iir); + + if (psr_iir) + found = true; intel_psr_irq_handler(dev_priv, psr_iir); - I915_WRITE(EDP_PSR_IIR, psr_iir); - found = true; } if (!found) @@ -3233,8 +3243,23 @@ static void gen11_irq_reset(struct drm_i915_private *dev_priv) intel_uncore_write(uncore, GEN11_DISPLAY_INT_CTL, 0); - intel_uncore_write(uncore, EDP_PSR_IMR, 0xffffffff); - intel_uncore_write(uncore, EDP_PSR_IIR, 0xffffffff); + if (INTEL_GEN(dev_priv) >= 12) { + enum transcoder trans; + + for (trans = TRANSCODER_A; trans <= TRANSCODER_D; trans++) { + enum intel_display_power_domain domain; + + domain = POWER_DOMAIN_TRANSCODER(trans); + if (!intel_display_power_is_enabled(dev_priv, domain)) + continue; + + intel_uncore_write(uncore, TRANS_PSR_IMR(trans), 0xffffffff); + intel_uncore_write(uncore, TRANS_PSR_IIR(trans), 0xffffffff); + } + } else { + intel_uncore_write(uncore, EDP_PSR_IMR, 0xffffffff); + intel_uncore_write(uncore, EDP_PSR_IIR, 0xffffffff); + } for_each_pipe(dev_priv, pipe) if (intel_display_power_is_enabled(dev_priv, @@ -3740,7 +3765,21 @@ static void gen8_de_irq_postinstall(struct drm_i915_private *dev_priv) else if (IS_BROADWELL(dev_priv)) de_port_enables |= GEN8_PORT_DP_A_HOTPLUG; - gen3_assert_iir_is_zero(uncore, EDP_PSR_IIR); + if (INTEL_GEN(dev_priv) >= 12) { + enum transcoder trans; + + for (trans = TRANSCODER_A; trans <= TRANSCODER_D; trans++) { + enum intel_display_power_domain domain; + + domain = POWER_DOMAIN_TRANSCODER(trans); + if (!intel_display_power_is_enabled(dev_priv, domain)) + continue; + + gen3_assert_iir_is_zero(uncore, TRANS_PSR_IIR(trans)); + } + } else { + gen3_assert_iir_is_zero(uncore, EDP_PSR_IIR); + } for_each_pipe(dev_priv, pipe) { dev_priv->de_irq_mask[pipe] = ~de_pipe_masked; diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h index 81f68455d492..de9e679e90bb 100644 --- a/drivers/gpu/drm/i915/i915_reg.h +++ b/drivers/gpu/drm/i915/i915_reg.h @@ -4222,9 +4222,17 @@ enum { #define EDP_PSR_TP1_TIME_0us (3 << 4) #define EDP_PSR_IDLE_FRAME_SHIFT 0 -/* Bspec claims those aren't shifted but stay at 0x64800 */ +/* + * Until TGL, IMR/IIR are fixed at 0x648xx. On TGL+ those registers are relative + * to transcoder and bits defined for each one as if using no shift (i.e. as if + * it was for TRANSCODER_EDP) + */ #define EDP_PSR_IMR _MMIO(0x64834) #define EDP_PSR_IIR _MMIO(0x64838) +#define _PSR_IMR_A 0x60814 +#define _PSR_IIR_A 0x60818 +#define TRANS_PSR_IMR(tran) _MMIO_TRANS2(tran, _PSR_IMR_A) +#define TRANS_PSR_IIR(tran) _MMIO_TRANS2(tran, _PSR_IIR_A) #define _EDP_PSR_TRANS_SHIFT(trans) ((trans) == TRANSCODER_EDP ? \ 0 : ((trans) - TRANSCODER_A + 1) * 8) #define EDP_PSR_TRANS_MASK(trans) (0x7 << _EDP_PSR_TRANS_SHIFT(trans)) -- cgit v1.2.3 From e468ff06157a3bb1b70ac8ea807429446b8bf4b2 Mon Sep 17 00:00:00 2001 From: Lucas De Marchi Date: Wed, 4 Sep 2019 14:34:16 -0700 Subject: drm/i915: protect access to DP_TP_* on non-dp MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit DP_TP_{CTL,STATUS} should only be programmed when the encoder is intel_dp. Checking its current usages intel_disable_ddi_buf() is the only offender, with other places being protected by checks like pipe_config->fec_enable that is only set by intel_dp. v3 (José): - Using intel_crtc_has_dp_encoder() instead of intel_encoder_is_dp() (Ville) Cc: Matt Roper Cc: Ville Syrjälä Signed-off-by: Lucas De Marchi Reviewed-by: Matt Roper Link: https://patchwork.freedesktop.org/patch/msgid/20190904213419.27547-4-jose.souza@intel.com Signed-off-by: José Roberto de Souza --- drivers/gpu/drm/i915/display/intel_ddi.c | 10 ++++++---- 1 file changed, 6 insertions(+), 4 deletions(-) diff --git a/drivers/gpu/drm/i915/display/intel_ddi.c b/drivers/gpu/drm/i915/display/intel_ddi.c index 1fe0bf01e580..ec132cd6add8 100644 --- a/drivers/gpu/drm/i915/display/intel_ddi.c +++ b/drivers/gpu/drm/i915/display/intel_ddi.c @@ -3465,10 +3465,12 @@ static void intel_disable_ddi_buf(struct intel_encoder *encoder, wait = true; } - val = I915_READ(DP_TP_CTL(port)); - val &= ~(DP_TP_CTL_ENABLE | DP_TP_CTL_LINK_TRAIN_MASK); - val |= DP_TP_CTL_LINK_TRAIN_PAT1; - I915_WRITE(DP_TP_CTL(port), val); + if (intel_crtc_has_dp_encoder(crtc_state)) { + val = I915_READ(DP_TP_CTL(port)); + val &= ~(DP_TP_CTL_ENABLE | DP_TP_CTL_LINK_TRAIN_MASK); + val |= DP_TP_CTL_LINK_TRAIN_PAT1; + I915_WRITE(DP_TP_CTL(port), val); + } /* Disable FEC in DP Sink */ intel_ddi_disable_fec_state(encoder, crtc_state); -- cgit v1.2.3 From 4444df6e205bc1dc16ba112588815defe3c5a724 Mon Sep 17 00:00:00 2001 From: Lucas De Marchi Date: Wed, 4 Sep 2019 14:34:17 -0700 Subject: drm/i915/tgl: move DP_TP_* to transcoder MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Gen 12 onwards moves the DP_TP_* registers to be transcoder-based rather than port-based. This adds the new register addresses and changes all the callers to use the register saved in intel_dp->regs.*. This is filled out when preparing to enable the port so we take into account if we should use the transcoder or the port. v2: reimplement by stashing the registers we want to access under intel_dp->reg. Now they are initialized when enabling the port. Ville suggested to store the transcoder to be used exclusively by TGL+. After implementing I thought just storing the register directly made it cleaner. Cc: Matt Roper Cc: Ville Syrjälä Signed-off-by: Lucas De Marchi Reviewed-by: Matt Roper Reviewed-by: José Roberto de Souza Link: https://patchwork.freedesktop.org/patch/msgid/20190904213419.27547-5-jose.souza@intel.com Signed-off-by: José Roberto de Souza --- drivers/gpu/drm/i915/display/intel_ddi.c | 43 ++++++++++++++-------- drivers/gpu/drm/i915/display/intel_display_types.h | 9 +++++ drivers/gpu/drm/i915/display/intel_dp.c | 13 ++++--- drivers/gpu/drm/i915/display/intel_dp_mst.c | 8 ++-- drivers/gpu/drm/i915/i915_reg.h | 4 ++ 5 files changed, 51 insertions(+), 26 deletions(-) diff --git a/drivers/gpu/drm/i915/display/intel_ddi.c b/drivers/gpu/drm/i915/display/intel_ddi.c index ec132cd6add8..3e6394139964 100644 --- a/drivers/gpu/drm/i915/display/intel_ddi.c +++ b/drivers/gpu/drm/i915/display/intel_ddi.c @@ -3167,17 +3167,18 @@ static void intel_ddi_enable_fec(struct intel_encoder *encoder, const struct intel_crtc_state *crtc_state) { struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); - enum port port = encoder->port; + struct intel_dp *intel_dp; u32 val; if (!crtc_state->fec_enable) return; - val = I915_READ(DP_TP_CTL(port)); + intel_dp = enc_to_intel_dp(&encoder->base); + val = I915_READ(intel_dp->regs.dp_tp_ctl); val |= DP_TP_CTL_FEC_ENABLE; - I915_WRITE(DP_TP_CTL(port), val); + I915_WRITE(intel_dp->regs.dp_tp_ctl, val); - if (intel_de_wait_for_set(dev_priv, DP_TP_STATUS(port), + if (intel_de_wait_for_set(dev_priv, intel_dp->regs.dp_tp_status, DP_TP_STATUS_FEC_ENABLE_LIVE, 1)) DRM_ERROR("Timed out waiting for FEC Enable Status\n"); } @@ -3186,16 +3187,17 @@ static void intel_ddi_disable_fec_state(struct intel_encoder *encoder, const struct intel_crtc_state *crtc_state) { struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); - enum port port = encoder->port; + struct intel_dp *intel_dp; u32 val; if (!crtc_state->fec_enable) return; - val = I915_READ(DP_TP_CTL(port)); + intel_dp = enc_to_intel_dp(&encoder->base); + val = I915_READ(intel_dp->regs.dp_tp_ctl); val &= ~DP_TP_CTL_FEC_ENABLE; - I915_WRITE(DP_TP_CTL(port), val); - POSTING_READ(DP_TP_CTL(port)); + I915_WRITE(intel_dp->regs.dp_tp_ctl, val); + POSTING_READ(intel_dp->regs.dp_tp_ctl); } static void tgl_ddi_pre_enable_dp(struct intel_encoder *encoder, @@ -3208,10 +3210,14 @@ static void tgl_ddi_pre_enable_dp(struct intel_encoder *encoder, struct intel_digital_port *dig_port = enc_to_dig_port(&encoder->base); bool is_mst = intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DP_MST); int level = intel_ddi_dp_level(intel_dp); + enum transcoder transcoder = crtc_state->cpu_transcoder; intel_dp_set_link_params(intel_dp, crtc_state->port_clock, crtc_state->lane_count, is_mst); + intel_dp->regs.dp_tp_ctl = TGL_DP_TP_CTL(transcoder); + intel_dp->regs.dp_tp_status = TGL_DP_TP_STATUS(transcoder); + /* 1.a got on intel_atomic_commit_tail() */ /* 2. */ @@ -3300,6 +3306,9 @@ static void hsw_ddi_pre_enable_dp(struct intel_encoder *encoder, intel_dp_set_link_params(intel_dp, crtc_state->port_clock, crtc_state->lane_count, is_mst); + intel_dp->regs.dp_tp_ctl = DP_TP_CTL(port); + intel_dp->regs.dp_tp_status = DP_TP_STATUS(port); + intel_edp_panel_on(intel_dp); intel_ddi_clk_select(encoder, crtc_state); @@ -3466,10 +3475,12 @@ static void intel_disable_ddi_buf(struct intel_encoder *encoder, } if (intel_crtc_has_dp_encoder(crtc_state)) { - val = I915_READ(DP_TP_CTL(port)); + struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base); + + val = I915_READ(intel_dp->regs.dp_tp_ctl); val &= ~(DP_TP_CTL_ENABLE | DP_TP_CTL_LINK_TRAIN_MASK); val |= DP_TP_CTL_LINK_TRAIN_PAT1; - I915_WRITE(DP_TP_CTL(port), val); + I915_WRITE(intel_dp->regs.dp_tp_ctl, val); } /* Disable FEC in DP Sink */ @@ -3898,7 +3909,7 @@ static void intel_ddi_prepare_link_retrain(struct intel_dp *intel_dp) u32 val; bool wait = false; - if (I915_READ(DP_TP_CTL(port)) & DP_TP_CTL_ENABLE) { + if (I915_READ(intel_dp->regs.dp_tp_ctl) & DP_TP_CTL_ENABLE) { val = I915_READ(DDI_BUF_CTL(port)); if (val & DDI_BUF_CTL_ENABLE) { val &= ~DDI_BUF_CTL_ENABLE; @@ -3906,11 +3917,11 @@ static void intel_ddi_prepare_link_retrain(struct intel_dp *intel_dp) wait = true; } - val = I915_READ(DP_TP_CTL(port)); + val = I915_READ(intel_dp->regs.dp_tp_ctl); val &= ~(DP_TP_CTL_ENABLE | DP_TP_CTL_LINK_TRAIN_MASK); val |= DP_TP_CTL_LINK_TRAIN_PAT1; - I915_WRITE(DP_TP_CTL(port), val); - POSTING_READ(DP_TP_CTL(port)); + I915_WRITE(intel_dp->regs.dp_tp_ctl, val); + POSTING_READ(intel_dp->regs.dp_tp_ctl); if (wait) intel_wait_ddi_buf_idle(dev_priv, port); @@ -3925,8 +3936,8 @@ static void intel_ddi_prepare_link_retrain(struct intel_dp *intel_dp) if (drm_dp_enhanced_frame_cap(intel_dp->dpcd)) val |= DP_TP_CTL_ENHANCED_FRAME_ENABLE; } - I915_WRITE(DP_TP_CTL(port), val); - POSTING_READ(DP_TP_CTL(port)); + I915_WRITE(intel_dp->regs.dp_tp_ctl, val); + POSTING_READ(intel_dp->regs.dp_tp_ctl); intel_dp->DP |= DDI_BUF_CTL_ENABLE; I915_WRITE(DDI_BUF_CTL(port), intel_dp->DP); diff --git a/drivers/gpu/drm/i915/display/intel_display_types.h b/drivers/gpu/drm/i915/display/intel_display_types.h index 61277a87dbe7..d5cc4b810d9e 100644 --- a/drivers/gpu/drm/i915/display/intel_display_types.h +++ b/drivers/gpu/drm/i915/display/intel_display_types.h @@ -1218,6 +1218,15 @@ struct intel_dp { bool can_mst; /* this port supports mst */ bool is_mst; int active_mst_links; + + /* + * DP_TP_* registers may be either on port or transcoder register space. + */ + struct { + i915_reg_t dp_tp_ctl; + i915_reg_t dp_tp_status; + } regs; + /* connector directly attached - won't be use for modeset in mst world */ struct intel_connector *attached_connector; diff --git a/drivers/gpu/drm/i915/display/intel_dp.c b/drivers/gpu/drm/i915/display/intel_dp.c index 4e3e696e7fc8..f06fbca5cb3e 100644 --- a/drivers/gpu/drm/i915/display/intel_dp.c +++ b/drivers/gpu/drm/i915/display/intel_dp.c @@ -2297,6 +2297,9 @@ static void intel_dp_prepare(struct intel_encoder *encoder, intel_crtc_has_type(pipe_config, INTEL_OUTPUT_DP_MST)); + intel_dp->regs.dp_tp_ctl = DP_TP_CTL(port); + intel_dp->regs.dp_tp_status = DP_TP_STATUS(port); + /* * There are four kinds of DP registers: * @@ -3253,7 +3256,7 @@ _intel_dp_set_link_train(struct intel_dp *intel_dp, dp_train_pat & train_pat_mask); if (HAS_DDI(dev_priv)) { - u32 temp = I915_READ(DP_TP_CTL(port)); + u32 temp = I915_READ(intel_dp->regs.dp_tp_ctl); if (dp_train_pat & DP_LINK_SCRAMBLING_DISABLE) temp |= DP_TP_CTL_SCRAMBLE_DISABLE; @@ -3279,7 +3282,7 @@ _intel_dp_set_link_train(struct intel_dp *intel_dp, temp |= DP_TP_CTL_LINK_TRAIN_PAT4; break; } - I915_WRITE(DP_TP_CTL(port), temp); + I915_WRITE(intel_dp->regs.dp_tp_ctl, temp); } else if ((IS_IVYBRIDGE(dev_priv) && port == PORT_A) || (HAS_PCH_CPT(dev_priv) && port != PORT_A)) { @@ -3980,10 +3983,10 @@ void intel_dp_set_idle_link_train(struct intel_dp *intel_dp) if (!HAS_DDI(dev_priv)) return; - val = I915_READ(DP_TP_CTL(port)); + val = I915_READ(intel_dp->regs.dp_tp_ctl); val &= ~DP_TP_CTL_LINK_TRAIN_MASK; val |= DP_TP_CTL_LINK_TRAIN_IDLE; - I915_WRITE(DP_TP_CTL(port), val); + I915_WRITE(intel_dp->regs.dp_tp_ctl, val); /* * Until TGL on PORT_A we can have only eDP in SST mode. There the only @@ -3995,7 +3998,7 @@ void intel_dp_set_idle_link_train(struct intel_dp *intel_dp) if (port == PORT_A && INTEL_GEN(dev_priv) < 12) return; - if (intel_de_wait_for_set(dev_priv, DP_TP_STATUS(port), + if (intel_de_wait_for_set(dev_priv, intel_dp->regs.dp_tp_status, DP_TP_STATUS_IDLE_DONE, 1)) DRM_ERROR("Timed out waiting for DP idle patterns\n"); } diff --git a/drivers/gpu/drm/i915/display/intel_dp_mst.c b/drivers/gpu/drm/i915/display/intel_dp_mst.c index 37366f81255b..3f9cdbaab95e 100644 --- a/drivers/gpu/drm/i915/display/intel_dp_mst.c +++ b/drivers/gpu/drm/i915/display/intel_dp_mst.c @@ -295,7 +295,6 @@ static void intel_mst_pre_enable_dp(struct intel_encoder *encoder, struct intel_digital_port *intel_dig_port = intel_mst->primary; struct intel_dp *intel_dp = &intel_dig_port->dp; struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); - enum port port = intel_dig_port->base.port; struct intel_connector *connector = to_intel_connector(conn_state->connector); int ret; @@ -326,8 +325,8 @@ static void intel_mst_pre_enable_dp(struct intel_encoder *encoder, DRM_ERROR("failed to allocate vcpi\n"); intel_dp->active_mst_links++; - temp = I915_READ(DP_TP_STATUS(port)); - I915_WRITE(DP_TP_STATUS(port), temp); + temp = I915_READ(intel_dp->regs.dp_tp_status); + I915_WRITE(intel_dp->regs.dp_tp_status, temp); ret = drm_dp_update_payload_part1(&intel_dp->mst_mgr); @@ -342,11 +341,10 @@ static void intel_mst_enable_dp(struct intel_encoder *encoder, struct intel_digital_port *intel_dig_port = intel_mst->primary; struct intel_dp *intel_dp = &intel_dig_port->dp; struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); - enum port port = intel_dig_port->base.port; DRM_DEBUG_KMS("active links %d\n", intel_dp->active_mst_links); - if (intel_de_wait_for_set(dev_priv, DP_TP_STATUS(port), + if (intel_de_wait_for_set(dev_priv, intel_dp->regs.dp_tp_status, DP_TP_STATUS_ACT_SENT, 1)) DRM_ERROR("Timed out waiting for ACT sent\n"); diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h index de9e679e90bb..006cffd56be2 100644 --- a/drivers/gpu/drm/i915/i915_reg.h +++ b/drivers/gpu/drm/i915/i915_reg.h @@ -9546,7 +9546,9 @@ enum skl_power_gate { /* DisplayPort Transport Control */ #define _DP_TP_CTL_A 0x64040 #define _DP_TP_CTL_B 0x64140 +#define _TGL_DP_TP_CTL_A 0x60540 #define DP_TP_CTL(port) _MMIO_PORT(port, _DP_TP_CTL_A, _DP_TP_CTL_B) +#define TGL_DP_TP_CTL(tran) _MMIO_TRANS2((tran), _TGL_DP_TP_CTL_A) #define DP_TP_CTL_ENABLE (1 << 31) #define DP_TP_CTL_FEC_ENABLE (1 << 30) #define DP_TP_CTL_MODE_SST (0 << 27) @@ -9566,7 +9568,9 @@ enum skl_power_gate { /* DisplayPort Transport Status */ #define _DP_TP_STATUS_A 0x64044 #define _DP_TP_STATUS_B 0x64144 +#define _TGL_DP_TP_STATUS_A 0x60544 #define DP_TP_STATUS(port) _MMIO_PORT(port, _DP_TP_STATUS_A, _DP_TP_STATUS_B) +#define TGL_DP_TP_STATUS(tran) _MMIO_TRANS2((tran), _TGL_DP_TP_STATUS_A) #define DP_TP_STATUS_FEC_ENABLE_LIVE (1 << 28) #define DP_TP_STATUS_IDLE_DONE (1 << 25) #define DP_TP_STATUS_ACT_SENT (1 << 24) -- cgit v1.2.3 From 8ffa4392a32e27b3945b8c0585550d3dd831c68b Mon Sep 17 00:00:00 2001 From: Lucas De Marchi Date: Wed, 4 Sep 2019 14:34:18 -0700 Subject: drm/i915/tgl: disable SAGV temporarily MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit SAGV is not currently working for Tiger Lake. We better disable it until the implementation is stabilized and we can enable it. HSDES: 1409542895 2208191909 Signed-off-by: Lucas De Marchi Reviewed-by: José Roberto de Souza Link: https://patchwork.freedesktop.org/patch/msgid/20190904213419.27547-6-jose.souza@intel.com Signed-off-by: José Roberto de Souza --- drivers/gpu/drm/i915/intel_pm.c | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c index 4fa9bc83c8b4..7294fcf05323 100644 --- a/drivers/gpu/drm/i915/intel_pm.c +++ b/drivers/gpu/drm/i915/intel_pm.c @@ -3654,6 +3654,10 @@ static bool skl_needs_memory_bw_wa(struct drm_i915_private *dev_priv) static bool intel_has_sagv(struct drm_i915_private *dev_priv) { + /* HACK! */ + if (IS_GEN(dev_priv, 12)) + return false; + return (IS_GEN9_BC(dev_priv) || INTEL_GEN(dev_priv) >= 10) && dev_priv->sagv_status != I915_SAGV_NOT_CONTROLLED; } -- cgit v1.2.3 From 5b548ae63d584c76d41926e9efd63b0d2d56127f Mon Sep 17 00:00:00 2001 From: Lucas De Marchi Date: Wed, 4 Sep 2019 14:34:19 -0700 Subject: drm/i915/tgl: add gen12 to stolen initialization MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Add case for gen == 12 and add MISSING_CASE() for future gens. We were already handling gen12 as the default, so this doesn't change the current behavior. BSpec: 19481 and 44980 Cc: CQ Tang Signed-off-by: Lucas De Marchi Reviewed-by: José Roberto de Souza Reviewed-by: Matt Roper Link: https://patchwork.freedesktop.org/patch/msgid/20190904213419.27547-7-jose.souza@intel.com Signed-off-by: José Roberto de Souza --- drivers/gpu/drm/i915/gem/i915_gem_stolen.c | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/drivers/gpu/drm/i915/gem/i915_gem_stolen.c b/drivers/gpu/drm/i915/gem/i915_gem_stolen.c index aa533b4ab5f5..7ce5259d73d6 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_stolen.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_stolen.c @@ -425,8 +425,11 @@ int i915_gem_init_stolen(struct drm_i915_private *dev_priv) bdw_get_stolen_reserved(dev_priv, &reserved_base, &reserved_size); break; - case 11: default: + MISSING_CASE(INTEL_GEN(dev_priv)); + /* fall-through */ + case 11: + case 12: icl_get_stolen_reserved(dev_priv, &reserved_base, &reserved_size); break; -- cgit v1.2.3 From d10e0cb7591ef88fa8edfb164327a30d323b9d3d Mon Sep 17 00:00:00 2001 From: José Roberto de Souza Date: Wed, 4 Sep 2019 16:02:40 -0700 Subject: drm/i915: Apply FBC WA for TGL too MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit WA 1409120013 is also valid for TGL, so lets check for ">= 11". BSpec: 52890 Cc: Matt Roper Cc: Clinton Taylor Signed-off-by: José Roberto de Souza Reviewed-by: Matt Roper Link: https://patchwork.freedesktop.org/patch/msgid/20190904230241.20638-1-jose.souza@intel.com --- drivers/gpu/drm/i915/display/intel_fbc.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/gpu/drm/i915/display/intel_fbc.c b/drivers/gpu/drm/i915/display/intel_fbc.c index 16ed44bfd734..dc34b23e2320 100644 --- a/drivers/gpu/drm/i915/display/intel_fbc.c +++ b/drivers/gpu/drm/i915/display/intel_fbc.c @@ -343,8 +343,8 @@ static void gen7_fbc_activate(struct drm_i915_private *dev_priv) HSW_FBCQ_DIS); } - if (IS_GEN(dev_priv, 11)) - /* Wa_1409120013:icl,ehl */ + if (INTEL_GEN(dev_priv) >= 11) + /* Wa_1409120013:icl,ehl,tgl */ I915_WRITE(ILK_DPFC_CHICKEN, ILK_DPFC_CHICKEN_COMP_DUMMY_PIXEL); I915_WRITE(ILK_DPFC_CONTROL, dpfc_ctl | DPFC_CTL_EN); -- cgit v1.2.3 From aaef851083ed577a4a6094dec3caf7f00f1fd0db Mon Sep 17 00:00:00 2001 From: José Roberto de Souza Date: Wed, 4 Sep 2019 16:02:41 -0700 Subject: drm/i915/mst: Do not hardcoded the crtcs that encoder can connect MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Tiger Lake has up to 4 pipes so the mask would need to be 0xf instead of 0x7. Do not hardcode the mask so it allows the fake MST encoders to connect to all pipes no matter how many the platform has. Iterating over all pipes to keep consistent with intel_ddi_init(). Initialy this patch was replaced by commit 4eaceea3a00f ("drm/i915: Fix DP-MST crtc_mask") but userspace it not correctly using encoder.possible_crtcs and it was reverted by commit e838bfa8e170 ("Revert "drm/i915: Fix DP-MST crtc_mask"") Userspace should be fixed but it might take a while, so bringing this patch back for now. Cc: Lucas De Marchi Cc: Ville Syrjälä Signed-off-by: José Roberto de Souza Signed-off-by: Lucas De Marchi Reviewed-by: Ville Syrjälä Link: https://patchwork.freedesktop.org/patch/msgid/20190904230241.20638-2-jose.souza@intel.com --- drivers/gpu/drm/i915/display/intel_dp_mst.c | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/drivers/gpu/drm/i915/display/intel_dp_mst.c b/drivers/gpu/drm/i915/display/intel_dp_mst.c index 3f9cdbaab95e..eeeb3f933aa4 100644 --- a/drivers/gpu/drm/i915/display/intel_dp_mst.c +++ b/drivers/gpu/drm/i915/display/intel_dp_mst.c @@ -597,6 +597,8 @@ intel_dp_create_fake_mst_encoder(struct intel_digital_port *intel_dig_port, enum struct intel_dp_mst_encoder *intel_mst; struct intel_encoder *intel_encoder; struct drm_device *dev = intel_dig_port->base.base.dev; + struct drm_i915_private *dev_priv = to_i915(dev); + enum pipe pipe_iter; intel_mst = kzalloc(sizeof(*intel_mst), GFP_KERNEL); @@ -613,8 +615,9 @@ intel_dp_create_fake_mst_encoder(struct intel_digital_port *intel_dig_port, enum intel_encoder->type = INTEL_OUTPUT_DP_MST; intel_encoder->power_domain = intel_dig_port->base.power_domain; intel_encoder->port = intel_dig_port->base.port; - intel_encoder->crtc_mask = 0x7; intel_encoder->cloneable = 0; + for_each_pipe(dev_priv, pipe_iter) + intel_encoder->crtc_mask |= BIT(pipe_iter); intel_encoder->compute_config = intel_dp_mst_compute_config; intel_encoder->disable = intel_mst_disable_dp; -- cgit v1.2.3 From a8c15954d64ac7177559c9f5eeb6593e5883fd69 Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Tue, 3 Sep 2019 07:21:33 +0100 Subject: drm/i915: Protect debugfs per_file_stats with RCU lock If we make sure we grab a strong reference to each object as we dump it, we can reduce the locks outside of our iterators to an rcu_read_lock. This should prevent errors like: [ 2138.371911] BUG: KASAN: use-after-free in per_file_stats+0x43/0x380 [i915] [ 2138.371924] Read of size 8 at addr ffff888223651000 by task cat/8293 [ 2138.371947] CPU: 0 PID: 8293 Comm: cat Not tainted 5.3.0-rc6-CI-Custom_4352+ #1 [ 2138.371953] Hardware name: To Be Filled By O.E.M. To Be Filled By O.E.M./J4205-ITX, BIOS P1.40 07/14/2017 [ 2138.371959] Call Trace: [ 2138.371974] dump_stack+0x7c/0xbb [ 2138.372099] ? per_file_stats+0x43/0x380 [i915] [ 2138.372108] print_address_description+0x73/0x3a0 [ 2138.372231] ? per_file_stats+0x43/0x380 [i915] [ 2138.372352] ? per_file_stats+0x43/0x380 [i915] [ 2138.372362] __kasan_report+0x14e/0x192 [ 2138.372489] ? per_file_stats+0x43/0x380 [i915] [ 2138.372502] kasan_report+0xe/0x20 [ 2138.372625] per_file_stats+0x43/0x380 [i915] [ 2138.372751] ? i915_panel_show+0x110/0x110 [i915] [ 2138.372761] idr_for_each+0xa7/0x160 [ 2138.372773] ? idr_get_next_ul+0x110/0x110 [ 2138.372782] ? do_raw_spin_lock+0x10a/0x1d0 [ 2138.372923] print_context_stats+0x264/0x510 [i915] Signed-off-by: Chris Wilson Tested-by: David Weinehall Reviewed-by: Mika Kuoppala Link: https://patchwork.freedesktop.org/patch/msgid/20190903062133.27360-1-chris@chris-wilson.co.uk --- drivers/gpu/drm/i915/i915_debugfs.c | 10 ++++++++-- 1 file changed, 8 insertions(+), 2 deletions(-) diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c index 9798f27a697a..708855e051b5 100644 --- a/drivers/gpu/drm/i915/i915_debugfs.c +++ b/drivers/gpu/drm/i915/i915_debugfs.c @@ -237,6 +237,9 @@ static int per_file_stats(int id, void *ptr, void *data) struct file_stats *stats = data; struct i915_vma *vma; + if (!kref_get_unless_zero(&obj->base.refcount)) + return 0; + stats->count++; stats->total += obj->base.size; if (!atomic_read(&obj->bind_count)) @@ -284,6 +287,7 @@ static int per_file_stats(int id, void *ptr, void *data) } spin_unlock(&obj->vma.lock); + i915_gem_object_put(obj); return 0; } @@ -313,10 +317,12 @@ static void print_context_stats(struct seq_file *m, i915_gem_context_lock_engines(ctx), it) { intel_context_lock_pinned(ce); if (intel_context_is_pinned(ce)) { + rcu_read_lock(); if (ce->state) per_file_stats(0, ce->state->obj, &kstats); per_file_stats(0, ce->ring->vma->obj, &kstats); + rcu_read_unlock(); } intel_context_unlock_pinned(ce); } @@ -328,9 +334,9 @@ static void print_context_stats(struct seq_file *m, struct task_struct *task; char name[80]; - spin_lock(&file->table_lock); + rcu_read_lock(); idr_for_each(&file->object_idr, per_file_stats, &stats); - spin_unlock(&file->table_lock); + rcu_read_unlock(); rcu_read_lock(); task = pid_task(ctx->pid ?: file->pid, PIDTYPE_PID); -- cgit v1.2.3 From ca9cab183449787058f700fd0a74a8c91b277268 Mon Sep 17 00:00:00 2001 From: Ville Syrjälä Date: Sat, 13 Apr 2019 11:13:27 +0000 Subject: drm/i915: add immutable zpos plane properties MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit This adds basic immutable support for the zpos property. The zpos increases from bottom to top: primary, sprites, cursor. Signed-off-by: Ville Syrjälä [contact@emersion.fr: adapted for latest drm-tip] Signed-off-by: Simon Ser Link: https://patchwork.freedesktop.org/patch/msgid/YSH9PasoADJJdNJCSdI4m55ankIBsCaoSgkw-NQ5dlruCAxc8J-SQwVl5n3ddSAMDLTdbdyQvkONmtbjkUU-TQk5VIu1p-aZRO1OjjuSxjY=@emersion.fr Reviewed-by: Maarten Lankhorst --- drivers/gpu/drm/i915/display/intel_display.c | 10 ++++++++-- drivers/gpu/drm/i915/display/intel_sprite.c | 7 ++++++- 2 files changed, 14 insertions(+), 3 deletions(-) diff --git a/drivers/gpu/drm/i915/display/intel_display.c b/drivers/gpu/drm/i915/display/intel_display.c index 06cf2171474d..3b5275ab66cf 100644 --- a/drivers/gpu/drm/i915/display/intel_display.c +++ b/drivers/gpu/drm/i915/display/intel_display.c @@ -14887,7 +14887,7 @@ intel_primary_plane_create(struct drm_i915_private *dev_priv, enum pipe pipe) const u64 *modifiers; const u32 *formats; int num_formats; - int ret; + int ret, zpos; if (INTEL_GEN(dev_priv) >= 9) return skl_universal_plane_create(dev_priv, pipe, @@ -14976,6 +14976,9 @@ intel_primary_plane_create(struct drm_i915_private *dev_priv, enum pipe pipe) DRM_MODE_ROTATE_0, supported_rotations); + zpos = 0; + drm_plane_create_zpos_immutable_property(&plane->base, zpos); + drm_plane_helper_add(&plane->base, &intel_plane_helper_funcs); return plane; @@ -14992,7 +14995,7 @@ intel_cursor_plane_create(struct drm_i915_private *dev_priv, { unsigned int possible_crtcs; struct intel_plane *cursor; - int ret; + int ret, zpos; cursor = intel_plane_alloc(); if (IS_ERR(cursor)) @@ -15041,6 +15044,9 @@ intel_cursor_plane_create(struct drm_i915_private *dev_priv, DRM_MODE_ROTATE_0 | DRM_MODE_ROTATE_180); + zpos = RUNTIME_INFO(dev_priv)->num_sprites[pipe] + 1; + drm_plane_create_zpos_immutable_property(&cursor->base, zpos); + drm_plane_helper_add(&cursor->base, &intel_plane_helper_funcs); return cursor; diff --git a/drivers/gpu/drm/i915/display/intel_sprite.c b/drivers/gpu/drm/i915/display/intel_sprite.c index 8ca2fbc2af1d..e415b0ad4a42 100644 --- a/drivers/gpu/drm/i915/display/intel_sprite.c +++ b/drivers/gpu/drm/i915/display/intel_sprite.c @@ -2592,6 +2592,8 @@ skl_universal_plane_create(struct drm_i915_private *dev_priv, BIT(DRM_MODE_BLEND_PREMULTI) | BIT(DRM_MODE_BLEND_COVERAGE)); + drm_plane_create_zpos_immutable_property(&plane->base, plane_id); + drm_plane_helper_add(&plane->base, &intel_plane_helper_funcs); return plane; @@ -2613,7 +2615,7 @@ intel_sprite_plane_create(struct drm_i915_private *dev_priv, const u64 *modifiers; const u32 *formats; int num_formats; - int ret; + int ret, zpos; if (INTEL_GEN(dev_priv) >= 9) return skl_universal_plane_create(dev_priv, pipe, @@ -2703,6 +2705,9 @@ intel_sprite_plane_create(struct drm_i915_private *dev_priv, DRM_COLOR_YCBCR_BT709, DRM_COLOR_YCBCR_LIMITED_RANGE); + zpos = sprite + 1; + drm_plane_create_zpos_immutable_property(&plane->base, zpos); + drm_plane_helper_add(&plane->base, &intel_plane_helper_funcs); return plane; -- cgit v1.2.3 From 71cd86cfaa12645ca39e5bbeceb2039af74fba2e Mon Sep 17 00:00:00 2001 From: Matt Roper Date: Thu, 5 Sep 2019 11:13:37 -0700 Subject: drm/i915/tgl: Use refclk/2 as bypass frequency MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Unlike gen11, which always ran at 50MHz when the cdclk PLL was disabled, TGL runs at refclk/2. The 50MHz croclk/2 is only used by hardware during some power state transitions. Bspec: 49201 Cc: José Roberto de Souza Signed-off-by: Matt Roper Link: https://patchwork.freedesktop.org/patch/msgid/20190905181337.23727-1-matthew.d.roper@intel.com Reviewed-by: Ville Syrjälä --- drivers/gpu/drm/i915/display/intel_cdclk.c | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/drivers/gpu/drm/i915/display/intel_cdclk.c b/drivers/gpu/drm/i915/display/intel_cdclk.c index 76f11d465e91..d3e56628af70 100644 --- a/drivers/gpu/drm/i915/display/intel_cdclk.c +++ b/drivers/gpu/drm/i915/display/intel_cdclk.c @@ -1855,8 +1855,6 @@ static void icl_get_cdclk(struct drm_i915_private *dev_priv, u32 val; int div; - cdclk_state->bypass = 50000; - val = I915_READ(SKL_DSSM); switch (val & ICL_DSSM_CDCLK_PLL_REFCLK_MASK) { default: @@ -1873,6 +1871,11 @@ static void icl_get_cdclk(struct drm_i915_private *dev_priv, break; } + if (INTEL_GEN(dev_priv) >= 12) + cdclk_state->bypass = cdclk_state->ref / 2; + else + cdclk_state->bypass = 50000; + val = I915_READ(BXT_DE_PLL_ENABLE); if ((val & BXT_DE_PLL_PLL_ENABLE) == 0 || (val & BXT_DE_PLL_LOCK) == 0) { -- cgit v1.2.3 From cdb736fa8b8b65740030ba8f9dc87f0fc3313858 Mon Sep 17 00:00:00 2001 From: Mika Kuoppala Date: Fri, 6 Sep 2019 16:49:57 +0300 Subject: drm/i915: Use engine relative LRIs on context setup Daniele pointed out that relative mmio works differently in on context restore. Instead of adding the engine mmio base to offset, it masks out the base and adds bits [12:2] to current engine base. This should allow us to construct context register state to be applicable to all instances, including virtual. And avoid the trouble of updating the registers on virtual instances when submitting work. v2: only enable for gen12 for now (Mika) v3: make enabling readable (Chris) Bspec: 20206 Cc: Chris Wilson Cc: Tvrtko Ursulin Cc: Michal Wajdeczko Cc: Joonas Lahtinen Cc: Lucas De Marchi Cc: John Harrison Suggested-by: Daniele Ceraolo Spurio Signed-off-by: Mika Kuoppala Reviewed-by: Chris Wilson Signed-off-by: Chris Wilson Link: https://patchwork.freedesktop.org/patch/msgid/20190906134957.25909-1-mika.kuoppala@linux.intel.com --- drivers/gpu/drm/i915/gt/intel_engine_types.h | 7 ++++++ drivers/gpu/drm/i915/gt/intel_gpu_commands.h | 2 ++ drivers/gpu/drm/i915/gt/intel_lrc.c | 32 ++++++++++++++++++++-------- 3 files changed, 32 insertions(+), 9 deletions(-) diff --git a/drivers/gpu/drm/i915/gt/intel_engine_types.h b/drivers/gpu/drm/i915/gt/intel_engine_types.h index 15e02cb58a67..943f0663837e 100644 --- a/drivers/gpu/drm/i915/gt/intel_engine_types.h +++ b/drivers/gpu/drm/i915/gt/intel_engine_types.h @@ -481,6 +481,7 @@ struct intel_engine_cs { #define I915_ENGINE_HAS_SEMAPHORES BIT(3) #define I915_ENGINE_NEEDS_BREADCRUMB_TASKLET BIT(4) #define I915_ENGINE_IS_VIRTUAL BIT(5) +#define I915_ENGINE_HAS_RELATIVE_MMIO BIT(6) unsigned int flags; /* @@ -576,6 +577,12 @@ intel_engine_is_virtual(const struct intel_engine_cs *engine) return engine->flags & I915_ENGINE_IS_VIRTUAL; } +static inline bool +intel_engine_has_relative_mmio(const struct intel_engine_cs * const engine) +{ + return engine->flags & I915_ENGINE_HAS_RELATIVE_MMIO; +} + #define instdone_has_slice(dev_priv___, sseu___, slice___) \ ((IS_GEN(dev_priv___, 7) ? 1 : ((sseu___)->slice_mask)) & BIT(slice___)) diff --git a/drivers/gpu/drm/i915/gt/intel_gpu_commands.h b/drivers/gpu/drm/i915/gt/intel_gpu_commands.h index 86e00a2db8a4..fbad403ab7ac 100644 --- a/drivers/gpu/drm/i915/gt/intel_gpu_commands.h +++ b/drivers/gpu/drm/i915/gt/intel_gpu_commands.h @@ -132,6 +132,8 @@ * address/value pairs. Don't overdue it, though, x <= 2^4 must hold! */ #define MI_LOAD_REGISTER_IMM(x) MI_INSTR(0x22, 2*(x)-1) +/* Gen11+. addr = base + (ctx_restore ? offset & GENMASK(12,2) : offset) */ +#define MI_LRI_CS_MMIO (1<<19) #define MI_LRI_FORCE_POSTED (1<<12) #define MI_STORE_REGISTER_MEM MI_INSTR(0x24, 1) #define MI_STORE_REGISTER_MEM_GEN8 MI_INSTR(0x24, 2) diff --git a/drivers/gpu/drm/i915/gt/intel_lrc.c b/drivers/gpu/drm/i915/gt/intel_lrc.c index 87b7473a6dfb..de86ca6add09 100644 --- a/drivers/gpu/drm/i915/gt/intel_lrc.c +++ b/drivers/gpu/drm/i915/gt/intel_lrc.c @@ -1214,7 +1214,10 @@ static void execlists_dequeue(struct intel_engine_cs *engine) unsigned int n; GEM_BUG_ON(READ_ONCE(ve->context.inflight)); - virtual_update_register_offsets(regs, engine); + + if (!intel_engine_has_relative_mmio(engine)) + virtual_update_register_offsets(regs, + engine); if (!list_empty(&ve->context.signals)) virtual_xfer_breadcrumbs(ve, engine); @@ -2939,6 +2942,9 @@ void intel_execlists_set_default_submission(struct intel_engine_cs *engine) if (HAS_LOGICAL_RING_PREEMPTION(engine->i915)) engine->flags |= I915_ENGINE_HAS_PREEMPTION; } + + if (engine->class != COPY_ENGINE_CLASS && INTEL_GEN(engine->i915) >= 12) + engine->flags |= I915_ENGINE_HAS_RELATIVE_MMIO; } static void execlists_destroy(struct intel_engine_cs *engine) @@ -3130,8 +3136,10 @@ static void execlists_init_reg_state(u32 *regs, struct intel_ring *ring) { struct i915_ppgtt *ppgtt = vm_alias(ce->vm); - bool rcs = engine->class == RENDER_CLASS; - u32 base = engine->mmio_base; + const bool rcs = engine->class == RENDER_CLASS; + const u32 base = engine->mmio_base; + const u32 lri_base = + intel_engine_has_relative_mmio(engine) ? MI_LRI_CS_MMIO : 0; /* * A context is actually a big batch buffer with several @@ -3143,8 +3151,10 @@ static void execlists_init_reg_state(u32 *regs, * * Must keep consistent with virtual_update_register_offsets(). */ - regs[CTX_LRI_HEADER_0] = MI_LOAD_REGISTER_IMM(rcs ? 14 : 11) | - MI_LRI_FORCE_POSTED; + regs[CTX_LRI_HEADER_0] = + MI_LOAD_REGISTER_IMM(rcs ? 14 : 11) | + MI_LRI_FORCE_POSTED | + lri_base; CTX_REG(regs, CTX_CONTEXT_CONTROL, RING_CONTEXT_CONTROL(base), _MASKED_BIT_DISABLE(CTX_CTRL_ENGINE_CTX_RESTORE_INHIBIT) | @@ -3191,7 +3201,10 @@ static void execlists_init_reg_state(u32 *regs, } } - regs[CTX_LRI_HEADER_1] = MI_LOAD_REGISTER_IMM(9) | MI_LRI_FORCE_POSTED; + regs[CTX_LRI_HEADER_1] = + MI_LOAD_REGISTER_IMM(9) | + MI_LRI_FORCE_POSTED | + lri_base; CTX_REG(regs, CTX_CTX_TIMESTAMP, RING_CTX_TIMESTAMP(base), 0); /* PDP values well be assigned later if needed */ @@ -3218,7 +3231,7 @@ static void execlists_init_reg_state(u32 *regs, } if (rcs) { - regs[CTX_LRI_HEADER_2] = MI_LOAD_REGISTER_IMM(1); + regs[CTX_LRI_HEADER_2] = MI_LOAD_REGISTER_IMM(1) | lri_base; CTX_REG(regs, CTX_R_PWR_CLK_STATE, GEN8_R_PWR_CLK_STATE, 0); } @@ -3411,8 +3424,9 @@ static void virtual_engine_initial_hint(struct virtual_engine *ve) return; swap(ve->siblings[swp], ve->siblings[0]); - virtual_update_register_offsets(ve->context.lrc_reg_state, - ve->siblings[0]); + if (!intel_engine_has_relative_mmio(ve->siblings[0])) + virtual_update_register_offsets(ve->context.lrc_reg_state, + ve->siblings[0]); } static int virtual_context_pin(struct intel_context *ce) -- cgit v1.2.3 From 5bf05dc58d65b215437df3013163a7eea78d5d4c Mon Sep 17 00:00:00 2001 From: Michel Thierry Date: Fri, 6 Sep 2019 15:23:14 +0300 Subject: drm/i915/tgl: Register state context definition for Gen12 MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Gen12 has subtle changes in the reg state context offsets (some fields are gone, some are in a different location), compared to previous Gens. The simplest approach seems to be keeping Gen12 (and future platform) changes apart from the previous gens, while keeping the registers that are contiguous in functions we can reuse. v2: alias, virtual engine, rpcs, prune unused regs v3: use engine base (Daniele), take ctx_bb for all Bspec: 46255 Cc: Michal Wajdeczko Cc: Daniele Ceraolo Spurio Cc: Chris Wilson Cc: Joonas Lahtinen Cc: José Roberto de Souza Signed-off-by: Michel Thierry Signed-off-by: Lucas De Marchi Signed-off-by: Mika Kuoppala Reviewed-by: Daniele Ceraolo Spurio Tested-by: Daniele Ceraolo Spurio [ickle: Tweaked the GEM_WARN_ON after settling on a compromise with Daniele] Signed-off-by: Chris Wilson Link: https://patchwork.freedesktop.org/patch/msgid/20190906122314.2146-2-mika.kuoppala@linux.intel.com --- drivers/gpu/drm/i915/gt/intel_lrc.c | 200 +++++++++++++++++++++++--------- drivers/gpu/drm/i915/gt/intel_lrc_reg.h | 6 +- 2 files changed, 147 insertions(+), 59 deletions(-) diff --git a/drivers/gpu/drm/i915/gt/intel_lrc.c b/drivers/gpu/drm/i915/gt/intel_lrc.c index de86ca6add09..0ddfbebbcbbc 100644 --- a/drivers/gpu/drm/i915/gt/intel_lrc.c +++ b/drivers/gpu/drm/i915/gt/intel_lrc.c @@ -808,8 +808,12 @@ static void virtual_update_register_offsets(u32 *regs, { u32 base = engine->mmio_base; + /* Refactor so that we only have one place that knows all the offsets! */ + GEM_WARN_ON(INTEL_GEN(engine->i915) >= 12); + /* Must match execlists_init_reg_state()! */ + /* Common part */ regs[CTX_CONTEXT_CONTROL] = i915_mmio_reg_offset(RING_CONTEXT_CONTROL(base)); regs[CTX_RING_HEAD] = i915_mmio_reg_offset(RING_HEAD(base)); @@ -820,13 +824,16 @@ static void virtual_update_register_offsets(u32 *regs, regs[CTX_BB_HEAD_U] = i915_mmio_reg_offset(RING_BBADDR_UDW(base)); regs[CTX_BB_HEAD_L] = i915_mmio_reg_offset(RING_BBADDR(base)); regs[CTX_BB_STATE] = i915_mmio_reg_offset(RING_BBSTATE(base)); + regs[CTX_SECOND_BB_HEAD_U] = i915_mmio_reg_offset(RING_SBBADDR_UDW(base)); regs[CTX_SECOND_BB_HEAD_L] = i915_mmio_reg_offset(RING_SBBADDR(base)); regs[CTX_SECOND_BB_STATE] = i915_mmio_reg_offset(RING_SBBSTATE(base)); + /* PPGTT part */ regs[CTX_CTX_TIMESTAMP] = i915_mmio_reg_offset(RING_CTX_TIMESTAMP(base)); + regs[CTX_PDP3_UDW] = i915_mmio_reg_offset(GEN8_RING_PDP_UDW(base, 3)); regs[CTX_PDP3_LDW] = i915_mmio_reg_offset(GEN8_RING_PDP_LDW(base, 3)); regs[CTX_PDP2_UDW] = i915_mmio_reg_offset(GEN8_RING_PDP_UDW(base, 2)); @@ -3122,39 +3129,13 @@ static u32 intel_lr_indirect_ctx_offset(struct intel_engine_cs *engine) return indirect_ctx_offset; } -static struct i915_ppgtt *vm_alias(struct i915_address_space *vm) -{ - if (i915_is_ggtt(vm)) - return i915_vm_to_ggtt(vm)->alias; - else - return i915_vm_to_ppgtt(vm); -} -static void execlists_init_reg_state(u32 *regs, - struct intel_context *ce, - struct intel_engine_cs *engine, - struct intel_ring *ring) +static void init_common_reg_state(u32 * const regs, + struct i915_ppgtt * const ppgtt, + struct intel_engine_cs *engine, + struct intel_ring *ring) { - struct i915_ppgtt *ppgtt = vm_alias(ce->vm); - const bool rcs = engine->class == RENDER_CLASS; const u32 base = engine->mmio_base; - const u32 lri_base = - intel_engine_has_relative_mmio(engine) ? MI_LRI_CS_MMIO : 0; - - /* - * A context is actually a big batch buffer with several - * MI_LOAD_REGISTER_IMM commands followed by (reg, value) pairs. The - * values we are setting here are only for the first context restore: - * on a subsequent save, the GPU will recreate this batchbuffer with new - * values (including all the missing MI_LOAD_REGISTER_IMM commands that - * we are not initializing here). - * - * Must keep consistent with virtual_update_register_offsets(). - */ - regs[CTX_LRI_HEADER_0] = - MI_LOAD_REGISTER_IMM(rcs ? 14 : 11) | - MI_LRI_FORCE_POSTED | - lri_base; CTX_REG(regs, CTX_CONTEXT_CONTROL, RING_CONTEXT_CONTROL(base), _MASKED_BIT_DISABLE(CTX_CTRL_ENGINE_CTX_RESTORE_INHIBIT) | @@ -3172,41 +3153,43 @@ static void execlists_init_reg_state(u32 *regs, CTX_REG(regs, CTX_BB_HEAD_U, RING_BBADDR_UDW(base), 0); CTX_REG(regs, CTX_BB_HEAD_L, RING_BBADDR(base), 0); CTX_REG(regs, CTX_BB_STATE, RING_BBSTATE(base), RING_BB_PPGTT); - CTX_REG(regs, CTX_SECOND_BB_HEAD_U, RING_SBBADDR_UDW(base), 0); - CTX_REG(regs, CTX_SECOND_BB_HEAD_L, RING_SBBADDR(base), 0); - CTX_REG(regs, CTX_SECOND_BB_STATE, RING_SBBSTATE(base), 0); - if (rcs) { - struct i915_ctx_workarounds *wa_ctx = &engine->wa_ctx; - - CTX_REG(regs, CTX_RCS_INDIRECT_CTX, RING_INDIRECT_CTX(base), 0); - CTX_REG(regs, CTX_RCS_INDIRECT_CTX_OFFSET, - RING_INDIRECT_CTX_OFFSET(base), 0); - if (wa_ctx->indirect_ctx.size) { - u32 ggtt_offset = i915_ggtt_offset(wa_ctx->vma); +} - regs[CTX_RCS_INDIRECT_CTX + 1] = - (ggtt_offset + wa_ctx->indirect_ctx.offset) | - (wa_ctx->indirect_ctx.size / CACHELINE_BYTES); +static void init_wa_bb_reg_state(u32 * const regs, + struct intel_engine_cs *engine, + u32 pos_bb_per_ctx) +{ + struct i915_ctx_workarounds * const wa_ctx = &engine->wa_ctx; + const u32 base = engine->mmio_base; + const u32 pos_indirect_ctx = pos_bb_per_ctx + 2; + const u32 pos_indirect_ctx_offset = pos_indirect_ctx + 2; - regs[CTX_RCS_INDIRECT_CTX_OFFSET + 1] = - intel_lr_indirect_ctx_offset(engine) << 6; - } + CTX_REG(regs, pos_indirect_ctx, RING_INDIRECT_CTX(base), 0); + CTX_REG(regs, pos_indirect_ctx_offset, + RING_INDIRECT_CTX_OFFSET(base), 0); + if (wa_ctx->indirect_ctx.size) { + const u32 ggtt_offset = i915_ggtt_offset(wa_ctx->vma); - CTX_REG(regs, CTX_BB_PER_CTX_PTR, RING_BB_PER_CTX_PTR(base), 0); - if (wa_ctx->per_ctx.size) { - u32 ggtt_offset = i915_ggtt_offset(wa_ctx->vma); + regs[pos_indirect_ctx + 1] = + (ggtt_offset + wa_ctx->indirect_ctx.offset) | + (wa_ctx->indirect_ctx.size / CACHELINE_BYTES); - regs[CTX_BB_PER_CTX_PTR + 1] = - (ggtt_offset + wa_ctx->per_ctx.offset) | 0x01; - } + regs[pos_indirect_ctx_offset + 1] = + intel_lr_indirect_ctx_offset(engine) << 6; } - regs[CTX_LRI_HEADER_1] = - MI_LOAD_REGISTER_IMM(9) | - MI_LRI_FORCE_POSTED | - lri_base; + CTX_REG(regs, pos_bb_per_ctx, RING_BB_PER_CTX_PTR(base), 0); + if (wa_ctx->per_ctx.size) { + const u32 ggtt_offset = i915_ggtt_offset(wa_ctx->vma); - CTX_REG(regs, CTX_CTX_TIMESTAMP, RING_CTX_TIMESTAMP(base), 0); + regs[pos_bb_per_ctx + 1] = + (ggtt_offset + wa_ctx->per_ctx.offset) | 0x01; + } +} + +static void init_ppgtt_reg_state(u32 *regs, u32 base, + struct i915_ppgtt *ppgtt) +{ /* PDP values well be assigned later if needed */ CTX_REG(regs, CTX_PDP3_UDW, GEN8_RING_PDP_UDW(base, 3), 0); CTX_REG(regs, CTX_PDP3_LDW, GEN8_RING_PDP_LDW(base, 3), 0); @@ -3229,6 +3212,47 @@ static void execlists_init_reg_state(u32 *regs, ASSIGN_CTX_PDP(ppgtt, regs, 1); ASSIGN_CTX_PDP(ppgtt, regs, 0); } +} + +static struct i915_ppgtt *vm_alias(struct i915_address_space *vm) +{ + if (i915_is_ggtt(vm)) + return i915_vm_to_ggtt(vm)->alias; + else + return i915_vm_to_ppgtt(vm); +} + +static void gen8_init_reg_state(u32 * const regs, + struct intel_context *ce, + struct intel_engine_cs *engine, + struct intel_ring *ring) +{ + struct i915_ppgtt * const ppgtt = vm_alias(ce->vm); + const bool rcs = engine->class == RENDER_CLASS; + const u32 base = engine->mmio_base; + const u32 lri_base = + intel_engine_has_relative_mmio(engine) ? MI_LRI_CS_MMIO : 0; + + regs[CTX_LRI_HEADER_0] = + MI_LOAD_REGISTER_IMM(rcs ? 14 : 11) | + MI_LRI_FORCE_POSTED | + lri_base; + + init_common_reg_state(regs, ppgtt, engine, ring); + CTX_REG(regs, CTX_SECOND_BB_HEAD_U, RING_SBBADDR_UDW(base), 0); + CTX_REG(regs, CTX_SECOND_BB_HEAD_L, RING_SBBADDR(base), 0); + CTX_REG(regs, CTX_SECOND_BB_STATE, RING_SBBSTATE(base), 0); + if (rcs) + init_wa_bb_reg_state(regs, engine, CTX_BB_PER_CTX_PTR); + + regs[CTX_LRI_HEADER_1] = + MI_LOAD_REGISTER_IMM(9) | + MI_LRI_FORCE_POSTED | + lri_base; + + CTX_REG(regs, CTX_CTX_TIMESTAMP, RING_CTX_TIMESTAMP(base), 0); + + init_ppgtt_reg_state(regs, base, ppgtt); if (rcs) { regs[CTX_LRI_HEADER_2] = MI_LOAD_REGISTER_IMM(1) | lri_base; @@ -3240,6 +3264,66 @@ static void execlists_init_reg_state(u32 *regs, regs[CTX_END] |= BIT(0); } +static void gen12_init_reg_state(u32 * const regs, + struct intel_context *ce, + struct intel_engine_cs *engine, + struct intel_ring *ring) +{ + struct i915_ppgtt * const ppgtt = i915_vm_to_ppgtt(ce->vm); + const bool rcs = engine->class == RENDER_CLASS; + const u32 base = engine->mmio_base; + const u32 lri_base = + intel_engine_has_relative_mmio(engine) ? MI_LRI_CS_MMIO : 0; + + regs[CTX_LRI_HEADER_0] = + MI_LOAD_REGISTER_IMM(rcs ? 11 : 9) | + MI_LRI_FORCE_POSTED | + lri_base; + + init_common_reg_state(regs, ppgtt, engine, ring); + + /* We want ctx_ptr for all engines to be set */ + init_wa_bb_reg_state(regs, engine, GEN12_CTX_BB_PER_CTX_PTR); + + regs[CTX_LRI_HEADER_1] = + MI_LOAD_REGISTER_IMM(9) | + MI_LRI_FORCE_POSTED | + lri_base; + + CTX_REG(regs, CTX_CTX_TIMESTAMP, RING_CTX_TIMESTAMP(base), 0); + + init_ppgtt_reg_state(regs, base, ppgtt); + + if (rcs) { + regs[GEN12_CTX_LRI_HEADER_3] = + MI_LOAD_REGISTER_IMM(1) | lri_base; + CTX_REG(regs, CTX_R_PWR_CLK_STATE, GEN8_R_PWR_CLK_STATE, 0); + + /* TODO: oa_init_reg_state ? */ + } +} + +static void execlists_init_reg_state(u32 *regs, + struct intel_context *ce, + struct intel_engine_cs *engine, + struct intel_ring *ring) +{ + /* + * A context is actually a big batch buffer with several + * MI_LOAD_REGISTER_IMM commands followed by (reg, value) pairs. The + * values we are setting here are only for the first context restore: + * on a subsequent save, the GPU will recreate this batchbuffer with new + * values (including all the missing MI_LOAD_REGISTER_IMM commands that + * we are not initializing here). + * + * Must keep consistent with virtual_update_register_offsets(). + */ + if (INTEL_GEN(engine->i915) >= 12) + gen12_init_reg_state(regs, ce, engine, ring); + else + gen8_init_reg_state(regs, ce, engine, ring); +} + static int populate_lr_context(struct intel_context *ce, struct drm_i915_gem_object *ctx_obj, diff --git a/drivers/gpu/drm/i915/gt/intel_lrc_reg.h b/drivers/gpu/drm/i915/gt/intel_lrc_reg.h index b8f20ad71169..68caf8541866 100644 --- a/drivers/gpu/drm/i915/gt/intel_lrc_reg.h +++ b/drivers/gpu/drm/i915/gt/intel_lrc_reg.h @@ -9,7 +9,7 @@ #include -/* GEN8+ Reg State Context */ +/* GEN8 to GEN11 Reg State Context */ #define CTX_LRI_HEADER_0 0x01 #define CTX_CONTEXT_CONTROL 0x02 #define CTX_RING_HEAD 0x04 @@ -39,6 +39,10 @@ #define CTX_R_PWR_CLK_STATE 0x42 #define CTX_END 0x44 +/* GEN12+ Reg State Context */ +#define GEN12_CTX_BB_PER_CTX_PTR 0x12 +#define GEN12_CTX_LRI_HEADER_3 0x41 + #define CTX_REG(reg_state, pos, reg, val) do { \ u32 *reg_state__ = (reg_state); \ const u32 pos__ = (pos); \ -- cgit v1.2.3 From 42014f69bb235f1123a6f8befb58b3e350a6f180 Mon Sep 17 00:00:00 2001 From: Andi Shyti Date: Thu, 5 Sep 2019 14:14:03 +0300 Subject: drm/i915: Hook up GT power management Refactor the GT power management interface to work through the GT now that it is under the control of gt/ Based on a patch by Chris Wilson. Signed-off-by: Andi Shyti Cc: Chris Wilson Reviewed-by: Chris Wilson Signed-off-by: Chris Wilson Link: https://patchwork.freedesktop.org/patch/msgid/20190905111403.10071-1-andi.shyti@intel.com --- drivers/gpu/drm/i915/gem/i915_gem_pm.c | 1 + drivers/gpu/drm/i915/gt/intel_gt.c | 45 ++++++++++++++++++++++-- drivers/gpu/drm/i915/gt/intel_gt.h | 9 +++-- drivers/gpu/drm/i915/gt/intel_gt_pm.c | 36 +++++++++++++++++++ drivers/gpu/drm/i915/gt/intel_gt_pm.h | 2 ++ drivers/gpu/drm/i915/i915_drv.c | 21 +++-------- drivers/gpu/drm/i915/i915_gem.c | 30 +++------------- drivers/gpu/drm/i915/intel_pm.c | 4 ++- drivers/gpu/drm/i915/selftests/mock_gem_device.c | 2 +- 9 files changed, 100 insertions(+), 50 deletions(-) diff --git a/drivers/gpu/drm/i915/gem/i915_gem_pm.c b/drivers/gpu/drm/i915/gem/i915_gem_pm.c index 92e53c25424c..b3993d24b83d 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_pm.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_pm.c @@ -137,6 +137,7 @@ static bool switch_to_kernel_context_sync(struct intel_gt *gt) bool i915_gem_load_power_context(struct drm_i915_private *i915) { + intel_gt_pm_enable(&i915->gt); return switch_to_kernel_context_sync(&i915->gt); } diff --git a/drivers/gpu/drm/i915/gt/intel_gt.c b/drivers/gpu/drm/i915/gt/intel_gt.c index d48ec9a76ed1..e2cc697d27fb 100644 --- a/drivers/gpu/drm/i915/gt/intel_gt.c +++ b/drivers/gpu/drm/i915/gt/intel_gt.c @@ -7,6 +7,7 @@ #include "intel_gt.h" #include "intel_gt_pm.h" #include "intel_uncore.h" +#include "intel_pm.h" void intel_gt_init_early(struct intel_gt *gt, struct drm_i915_private *i915) { @@ -27,6 +28,9 @@ void intel_gt_init_early(struct intel_gt *gt, struct drm_i915_private *i915) void intel_gt_init_hw(struct drm_i915_private *i915) { i915->gt.ggtt = &i915->ggtt; + + /* BIOS often leaves RC6 enabled, but disable it for hw init */ + intel_gt_pm_disable(&i915->gt); } static void rmw_set(struct intel_uncore *uncore, i915_reg_t reg, u32 set) @@ -222,7 +226,13 @@ void intel_gt_chipset_flush(struct intel_gt *gt) intel_gtt_chipset_flush(); } -int intel_gt_init_scratch(struct intel_gt *gt, unsigned int size) +void intel_gt_driver_register(struct intel_gt *gt) +{ + if (IS_GEN(gt->i915, 5)) + intel_gpu_ips_init(gt->i915); +} + +static int intel_gt_init_scratch(struct intel_gt *gt, unsigned int size) { struct drm_i915_private *i915 = gt->i915; struct drm_i915_gem_object *obj; @@ -256,11 +266,42 @@ err_unref: return ret; } -void intel_gt_fini_scratch(struct intel_gt *gt) +static void intel_gt_fini_scratch(struct intel_gt *gt) { i915_vma_unpin_and_release(>->scratch, 0); } +int intel_gt_init(struct intel_gt *gt) +{ + int err; + + err = intel_gt_init_scratch(gt, IS_GEN(gt->i915, 2) ? SZ_256K : SZ_4K); + if (err) + return err; + + return 0; +} + +void intel_gt_driver_remove(struct intel_gt *gt) +{ + GEM_BUG_ON(gt->awake); + intel_gt_pm_disable(gt); +} + +void intel_gt_driver_unregister(struct intel_gt *gt) +{ + intel_gpu_ips_teardown(); +} + +void intel_gt_driver_release(struct intel_gt *gt) +{ + /* Paranoia: make sure we have disabled everything before we exit. */ + intel_gt_pm_disable(gt); + + intel_cleanup_gt_powersave(gt->i915); + intel_gt_fini_scratch(gt); +} + void intel_gt_driver_late_release(struct intel_gt *gt) { intel_uc_driver_late_release(>->uc); diff --git a/drivers/gpu/drm/i915/gt/intel_gt.h b/drivers/gpu/drm/i915/gt/intel_gt.h index 4920cb351f10..17af21cb7ed3 100644 --- a/drivers/gpu/drm/i915/gt/intel_gt.h +++ b/drivers/gpu/drm/i915/gt/intel_gt.h @@ -29,6 +29,12 @@ static inline struct intel_gt *huc_to_gt(struct intel_huc *huc) void intel_gt_init_early(struct intel_gt *gt, struct drm_i915_private *i915); void intel_gt_init_hw(struct drm_i915_private *i915); +int intel_gt_init(struct intel_gt *gt); +void intel_gt_driver_register(struct intel_gt *gt); + +void intel_gt_driver_unregister(struct intel_gt *gt); +void intel_gt_driver_remove(struct intel_gt *gt); +void intel_gt_driver_release(struct intel_gt *gt); void intel_gt_driver_late_release(struct intel_gt *gt); @@ -41,9 +47,6 @@ void intel_gt_chipset_flush(struct intel_gt *gt); void intel_gt_init_hangcheck(struct intel_gt *gt); -int intel_gt_init_scratch(struct intel_gt *gt, unsigned int size); -void intel_gt_fini_scratch(struct intel_gt *gt); - static inline u32 intel_gt_scratch_offset(const struct intel_gt *gt, enum intel_gt_scratch_field field) { diff --git a/drivers/gpu/drm/i915/gt/intel_gt_pm.c b/drivers/gpu/drm/i915/gt/intel_gt_pm.c index aa6cf0152ce7..6ba0d2069f87 100644 --- a/drivers/gpu/drm/i915/gt/intel_gt_pm.c +++ b/drivers/gpu/drm/i915/gt/intel_gt_pm.c @@ -124,6 +124,42 @@ void intel_gt_sanitize(struct intel_gt *gt, bool force) __intel_engine_reset(engine, false); } +static bool is_mock_device(const struct intel_gt *gt) +{ + return I915_SELFTEST_ONLY(gt->awake == -1); +} + +void intel_gt_pm_enable(struct intel_gt *gt) +{ + struct intel_engine_cs *engine; + enum intel_engine_id id; + + /* Powersaving is controlled by the host when inside a VM */ + if (intel_vgpu_active(gt->i915)) + return; + + if (is_mock_device(gt)) + return; + + intel_gt_pm_get(gt); + + for_each_engine(engine, gt->i915, id) { + intel_engine_pm_get(engine); + engine->serial++; /* force kernel context reload */ + intel_engine_pm_put(engine); + } + + intel_gt_pm_put(gt); +} + +void intel_gt_pm_disable(struct intel_gt *gt) +{ + if (is_mock_device(gt)) + return; + + intel_sanitize_gt_powersave(gt->i915); +} + int intel_gt_resume(struct intel_gt *gt) { struct intel_engine_cs *engine; diff --git a/drivers/gpu/drm/i915/gt/intel_gt_pm.h b/drivers/gpu/drm/i915/gt/intel_gt_pm.h index fb39d99cd6ee..d1f3e2e23937 100644 --- a/drivers/gpu/drm/i915/gt/intel_gt_pm.h +++ b/drivers/gpu/drm/i915/gt/intel_gt_pm.h @@ -43,6 +43,8 @@ static inline int intel_gt_pm_wait_for_idle(struct intel_gt *gt) } void intel_gt_pm_init_early(struct intel_gt *gt); +void intel_gt_pm_enable(struct intel_gt *gt); +void intel_gt_pm_disable(struct intel_gt *gt); void intel_gt_sanitize(struct intel_gt *gt, bool force); int intel_gt_resume(struct intel_gt *gt); diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c index 9478f704a315..63e52e153414 100644 --- a/drivers/gpu/drm/i915/i915_drv.c +++ b/drivers/gpu/drm/i915/i915_drv.c @@ -1316,9 +1316,6 @@ static int i915_driver_hw_probe(struct drm_i915_private *dev_priv) pm_qos_add_request(&dev_priv->pm_qos, PM_QOS_CPU_DMA_LATENCY, PM_QOS_DEFAULT_VALUE); - /* BIOS often leaves RC6 enabled, but disable it for hw init */ - intel_sanitize_gt_powersave(dev_priv); - intel_gt_init_workarounds(dev_priv); /* On the 945G/GM, the chipset reports the MSI capability on the @@ -1424,8 +1421,7 @@ static void i915_driver_register(struct drm_i915_private *dev_priv) acpi_video_register(); } - if (IS_GEN(dev_priv, 5)) - intel_gpu_ips_init(dev_priv); + intel_gt_driver_register(&dev_priv->gt); intel_audio_init(dev_priv); @@ -1468,7 +1464,7 @@ static void i915_driver_unregister(struct drm_i915_private *dev_priv) */ drm_kms_helper_poll_fini(&dev_priv->drm); - intel_gpu_ips_teardown(); + intel_gt_driver_unregister(&dev_priv->gt); acpi_video_unregister(); intel_opregion_unregister(dev_priv); @@ -1612,9 +1608,6 @@ int i915_driver_probe(struct pci_dev *pdev, const struct pci_device_id *ent) out_cleanup_hw: i915_driver_hw_remove(dev_priv); i915_ggtt_driver_release(dev_priv); - - /* Paranoia: make sure we have disabled everything before we exit. */ - intel_sanitize_gt_powersave(dev_priv); out_cleanup_mmio: i915_driver_mmio_release(dev_priv); out_runtime_pm_put: @@ -1685,9 +1678,6 @@ static void i915_driver_release(struct drm_device *dev) i915_ggtt_driver_release(dev_priv); - /* Paranoia: make sure we have disabled everything before we exit. */ - intel_sanitize_gt_powersave(dev_priv); - i915_driver_mmio_release(dev_priv); enable_rpm_wakeref_asserts(rpm); @@ -1916,7 +1906,7 @@ static int i915_drm_resume(struct drm_device *dev) int ret; disable_rpm_wakeref_asserts(&dev_priv->runtime_pm); - intel_sanitize_gt_powersave(dev_priv); + intel_gt_pm_disable(&dev_priv->gt); i915_gem_sanitize(dev_priv); @@ -2044,7 +2034,7 @@ static int i915_drm_resume_early(struct drm_device *dev) intel_display_power_resume_early(dev_priv); - intel_sanitize_gt_powersave(dev_priv); + intel_gt_pm_disable(&dev_priv->gt); intel_power_domains_resume(dev_priv); @@ -2588,9 +2578,6 @@ static int intel_runtime_suspend(struct device *kdev) struct intel_runtime_pm *rpm = &dev_priv->runtime_pm; int ret = 0; - if (WARN_ON_ONCE(!(dev_priv->gt_pm.rc6.enabled && HAS_RC6(dev_priv)))) - return -ENODEV; - if (WARN_ON_ONCE(!HAS_RUNTIME_PM(dev_priv))) return -ENODEV; diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index 95e7c52cf8ed..141024c66f36 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c @@ -1375,17 +1375,6 @@ out: return err; } -static int -i915_gem_init_scratch(struct drm_i915_private *i915, unsigned int size) -{ - return intel_gt_init_scratch(&i915->gt, size); -} - -static void i915_gem_fini_scratch(struct drm_i915_private *i915) -{ - intel_gt_fini_scratch(&i915->gt); -} - static int intel_engines_verify_workarounds(struct drm_i915_private *i915) { struct intel_engine_cs *engine; @@ -1436,12 +1425,7 @@ int i915_gem_init(struct drm_i915_private *dev_priv) goto err_unlock; } - ret = i915_gem_init_scratch(dev_priv, - IS_GEN(dev_priv, 2) ? SZ_256K : PAGE_SIZE); - if (ret) { - GEM_BUG_ON(ret == -EIO); - goto err_ggtt; - } + intel_gt_init(&dev_priv->gt); ret = intel_engines_setup(dev_priv); if (ret) { @@ -1527,15 +1511,13 @@ err_init_hw: err_uc_init: if (ret != -EIO) { intel_uc_fini(&dev_priv->gt.uc); - intel_cleanup_gt_powersave(dev_priv); intel_engines_cleanup(dev_priv); } err_context: if (ret != -EIO) i915_gem_contexts_fini(dev_priv); err_scratch: - i915_gem_fini_scratch(dev_priv); -err_ggtt: + intel_gt_driver_release(&dev_priv->gt); err_unlock: intel_uncore_forcewake_put(&dev_priv->uncore, FORCEWAKE_ALL); mutex_unlock(&dev_priv->drm.struct_mutex); @@ -1587,12 +1569,10 @@ void i915_gem_driver_unregister(struct drm_i915_private *i915) void i915_gem_driver_remove(struct drm_i915_private *dev_priv) { - GEM_BUG_ON(dev_priv->gt.awake); - intel_wakeref_auto_fini(&dev_priv->ggtt.userfault_wakeref); i915_gem_suspend_late(dev_priv); - intel_disable_gt_powersave(dev_priv); + intel_gt_driver_remove(&dev_priv->gt); /* Flush any outstanding unpin_work. */ i915_gem_drain_workqueue(dev_priv); @@ -1610,13 +1590,11 @@ void i915_gem_driver_release(struct drm_i915_private *dev_priv) mutex_lock(&dev_priv->drm.struct_mutex); intel_engines_cleanup(dev_priv); i915_gem_contexts_fini(dev_priv); - i915_gem_fini_scratch(dev_priv); + intel_gt_driver_release(&dev_priv->gt); mutex_unlock(&dev_priv->drm.struct_mutex); intel_wa_list_free(&dev_priv->gt_wa_list); - intel_cleanup_gt_powersave(dev_priv); - intel_uc_cleanup_firmwares(&dev_priv->gt.uc); i915_gem_cleanup_userptr(dev_priv); intel_timelines_fini(dev_priv); diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c index 7294fcf05323..528e90ed5de4 100644 --- a/drivers/gpu/drm/i915/intel_pm.c +++ b/drivers/gpu/drm/i915/intel_pm.c @@ -8671,7 +8671,9 @@ void intel_disable_gt_powersave(struct drm_i915_private *dev_priv) { mutex_lock(&dev_priv->gt_pm.rps.lock); - intel_disable_rc6(dev_priv); + if (HAS_RC6(dev_priv)) + intel_disable_rc6(dev_priv); + intel_disable_rps(dev_priv); if (HAS_LLC(dev_priv)) intel_disable_llc_pstate(dev_priv); diff --git a/drivers/gpu/drm/i915/selftests/mock_gem_device.c b/drivers/gpu/drm/i915/selftests/mock_gem_device.c index 01a89c071bf5..91f15fa728cd 100644 --- a/drivers/gpu/drm/i915/selftests/mock_gem_device.c +++ b/drivers/gpu/drm/i915/selftests/mock_gem_device.c @@ -192,7 +192,7 @@ struct drm_i915_private *mock_gem_device(void) INIT_DELAYED_WORK(&i915->gem.retire_work, mock_retire_work_handler); INIT_WORK(&i915->gem.idle_work, mock_idle_work_handler); - i915->gt.awake = true; + i915->gt.awake = -1; intel_timelines_init(i915); -- cgit v1.2.3 From de10ac47597e7a3596b27631d0d5ce5f48d2c099 Mon Sep 17 00:00:00 2001 From: Remi Pommarel Date: Sun, 1 Sep 2019 12:54:10 +0200 Subject: iio: adc: meson_saradc: Fix memory allocation order meson_saradc's irq handler uses priv->regmap so make sure that it is allocated before the irq get enabled. This also fixes crash when CONFIG_DEBUG_SHIRQ is enabled, as device managed resources are freed in the inverted order they had been allocated, priv->regmap was freed before the spurious fake irq that CONFIG_DEBUG_SHIRQ adds called the handler. Fixes: 3af109131b7eb8 ("iio: adc: meson-saradc: switch from polling to interrupt mode") Reported-by: Elie Roudninski Signed-off-by: Remi Pommarel Reviewed-by: Martin Blumenstingl Tested-by: Elie ROUDNINSKI Reviewed-by: Kevin Hilman Signed-off-by: Jonathan Cameron --- drivers/iio/adc/meson_saradc.c | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/drivers/iio/adc/meson_saradc.c b/drivers/iio/adc/meson_saradc.c index 7b28d045d271..7b27306330a3 100644 --- a/drivers/iio/adc/meson_saradc.c +++ b/drivers/iio/adc/meson_saradc.c @@ -1219,6 +1219,11 @@ static int meson_sar_adc_probe(struct platform_device *pdev) if (IS_ERR(base)) return PTR_ERR(base); + priv->regmap = devm_regmap_init_mmio(&pdev->dev, base, + priv->param->regmap_config); + if (IS_ERR(priv->regmap)) + return PTR_ERR(priv->regmap); + irq = irq_of_parse_and_map(pdev->dev.of_node, 0); if (!irq) return -EINVAL; @@ -1228,11 +1233,6 @@ static int meson_sar_adc_probe(struct platform_device *pdev) if (ret) return ret; - priv->regmap = devm_regmap_init_mmio(&pdev->dev, base, - priv->param->regmap_config); - if (IS_ERR(priv->regmap)) - return PTR_ERR(priv->regmap); - priv->clkin = devm_clk_get(&pdev->dev, "clkin"); if (IS_ERR(priv->clkin)) { dev_err(&pdev->dev, "failed to get clkin\n"); -- cgit v1.2.3 From 85ae3aeedeccb6febb0c6f9d5346d9c6419ad925 Mon Sep 17 00:00:00 2001 From: Lorenzo Bianconi Date: Sat, 31 Aug 2019 13:18:22 +0200 Subject: iio: imu: st_lsm6dsx: forbid 0 sensor sensitivity Do not allow configuring null sensor gain since it will force to 0 device outputs Fixes: c8d4066c7246 ("iio: imu: st_lsm6dsx: remove invalid gain value for LSM9DS1") Signed-off-by: Lorenzo Bianconi Signed-off-by: Jonathan Cameron --- drivers/iio/imu/st_lsm6dsx/st_lsm6dsx.h | 2 ++ drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_core.c | 28 +++++++++++++++++++--------- drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_shub.c | 11 ++++------- 3 files changed, 25 insertions(+), 16 deletions(-) diff --git a/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx.h b/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx.h index 80e42c7dbcbe..0fe6999b8257 100644 --- a/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx.h +++ b/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx.h @@ -99,7 +99,9 @@ struct st_lsm6dsx_fs { #define ST_LSM6DSX_FS_LIST_SIZE 4 struct st_lsm6dsx_fs_table_entry { struct st_lsm6dsx_reg reg; + struct st_lsm6dsx_fs fs_avl[ST_LSM6DSX_FS_LIST_SIZE]; + int fs_len; }; /** diff --git a/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_core.c b/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_core.c index 2d3495560136..fd5ebe1e1594 100644 --- a/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_core.c +++ b/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_core.c @@ -145,6 +145,7 @@ static const struct st_lsm6dsx_settings st_lsm6dsx_sensor_settings[] = { .fs_avl[1] = { IIO_G_TO_M_S_2(122), 0x2 }, .fs_avl[2] = { IIO_G_TO_M_S_2(244), 0x3 }, .fs_avl[3] = { IIO_G_TO_M_S_2(732), 0x1 }, + .fs_len = 4, }, [ST_LSM6DSX_ID_GYRO] = { .reg = { @@ -154,6 +155,7 @@ static const struct st_lsm6dsx_settings st_lsm6dsx_sensor_settings[] = { .fs_avl[0] = { IIO_DEGREE_TO_RAD(245), 0x0 }, .fs_avl[1] = { IIO_DEGREE_TO_RAD(500), 0x1 }, .fs_avl[2] = { IIO_DEGREE_TO_RAD(2000), 0x3 }, + .fs_len = 3, }, }, }, @@ -215,6 +217,7 @@ static const struct st_lsm6dsx_settings st_lsm6dsx_sensor_settings[] = { .fs_avl[1] = { IIO_G_TO_M_S_2(122), 0x2 }, .fs_avl[2] = { IIO_G_TO_M_S_2(244), 0x3 }, .fs_avl[3] = { IIO_G_TO_M_S_2(488), 0x1 }, + .fs_len = 4, }, [ST_LSM6DSX_ID_GYRO] = { .reg = { @@ -225,6 +228,7 @@ static const struct st_lsm6dsx_settings st_lsm6dsx_sensor_settings[] = { .fs_avl[1] = { IIO_DEGREE_TO_RAD(17500), 0x1 }, .fs_avl[2] = { IIO_DEGREE_TO_RAD(35000), 0x2 }, .fs_avl[3] = { IIO_DEGREE_TO_RAD(70000), 0x3 }, + .fs_len = 4, }, }, .decimator = { @@ -327,6 +331,7 @@ static const struct st_lsm6dsx_settings st_lsm6dsx_sensor_settings[] = { .fs_avl[1] = { IIO_G_TO_M_S_2(122), 0x2 }, .fs_avl[2] = { IIO_G_TO_M_S_2(244), 0x3 }, .fs_avl[3] = { IIO_G_TO_M_S_2(488), 0x1 }, + .fs_len = 4, }, [ST_LSM6DSX_ID_GYRO] = { .reg = { @@ -337,6 +342,7 @@ static const struct st_lsm6dsx_settings st_lsm6dsx_sensor_settings[] = { .fs_avl[1] = { IIO_DEGREE_TO_RAD(17500), 0x1 }, .fs_avl[2] = { IIO_DEGREE_TO_RAD(35000), 0x2 }, .fs_avl[3] = { IIO_DEGREE_TO_RAD(70000), 0x3 }, + .fs_len = 4, }, }, .decimator = { @@ -448,6 +454,7 @@ static const struct st_lsm6dsx_settings st_lsm6dsx_sensor_settings[] = { .fs_avl[1] = { IIO_G_TO_M_S_2(122), 0x2 }, .fs_avl[2] = { IIO_G_TO_M_S_2(244), 0x3 }, .fs_avl[3] = { IIO_G_TO_M_S_2(488), 0x1 }, + .fs_len = 4, }, [ST_LSM6DSX_ID_GYRO] = { .reg = { @@ -458,6 +465,7 @@ static const struct st_lsm6dsx_settings st_lsm6dsx_sensor_settings[] = { .fs_avl[1] = { IIO_DEGREE_TO_RAD(17500), 0x1 }, .fs_avl[2] = { IIO_DEGREE_TO_RAD(35000), 0x2 }, .fs_avl[3] = { IIO_DEGREE_TO_RAD(70000), 0x3 }, + .fs_len = 4, }, }, .decimator = { @@ -563,6 +571,7 @@ static const struct st_lsm6dsx_settings st_lsm6dsx_sensor_settings[] = { .fs_avl[1] = { IIO_G_TO_M_S_2(122), 0x2 }, .fs_avl[2] = { IIO_G_TO_M_S_2(244), 0x3 }, .fs_avl[3] = { IIO_G_TO_M_S_2(488), 0x1 }, + .fs_len = 4, }, [ST_LSM6DSX_ID_GYRO] = { .reg = { @@ -573,6 +582,7 @@ static const struct st_lsm6dsx_settings st_lsm6dsx_sensor_settings[] = { .fs_avl[1] = { IIO_DEGREE_TO_RAD(17500), 0x1 }, .fs_avl[2] = { IIO_DEGREE_TO_RAD(35000), 0x2 }, .fs_avl[3] = { IIO_DEGREE_TO_RAD(70000), 0x3 }, + .fs_len = 4, }, }, .batch = { @@ -693,6 +703,7 @@ static const struct st_lsm6dsx_settings st_lsm6dsx_sensor_settings[] = { .fs_avl[1] = { IIO_G_TO_M_S_2(122), 0x2 }, .fs_avl[2] = { IIO_G_TO_M_S_2(244), 0x3 }, .fs_avl[3] = { IIO_G_TO_M_S_2(488), 0x1 }, + .fs_len = 4, }, [ST_LSM6DSX_ID_GYRO] = { .reg = { @@ -703,6 +714,7 @@ static const struct st_lsm6dsx_settings st_lsm6dsx_sensor_settings[] = { .fs_avl[1] = { IIO_DEGREE_TO_RAD(17500), 0x1 }, .fs_avl[2] = { IIO_DEGREE_TO_RAD(35000), 0x2 }, .fs_avl[3] = { IIO_DEGREE_TO_RAD(70000), 0x3 }, + .fs_len = 4, }, }, .batch = { @@ -800,6 +812,7 @@ static const struct st_lsm6dsx_settings st_lsm6dsx_sensor_settings[] = { .fs_avl[1] = { IIO_G_TO_M_S_2(122), 0x2 }, .fs_avl[2] = { IIO_G_TO_M_S_2(244), 0x3 }, .fs_avl[3] = { IIO_G_TO_M_S_2(488), 0x1 }, + .fs_len = 4, }, [ST_LSM6DSX_ID_GYRO] = { .reg = { @@ -810,6 +823,7 @@ static const struct st_lsm6dsx_settings st_lsm6dsx_sensor_settings[] = { .fs_avl[1] = { IIO_DEGREE_TO_RAD(17500), 0x1 }, .fs_avl[2] = { IIO_DEGREE_TO_RAD(35000), 0x2 }, .fs_avl[3] = { IIO_DEGREE_TO_RAD(70000), 0x3 }, + .fs_len = 4, }, }, .batch = { @@ -933,11 +947,12 @@ static int st_lsm6dsx_set_full_scale(struct st_lsm6dsx_sensor *sensor, int i, err; fs_table = &sensor->hw->settings->fs_table[sensor->id]; - for (i = 0; i < ST_LSM6DSX_FS_LIST_SIZE; i++) + for (i = 0; i < fs_table->fs_len; i++) { if (fs_table->fs_avl[i].gain == gain) break; + } - if (i == ST_LSM6DSX_FS_LIST_SIZE) + if (i == fs_table->fs_len) return -EINVAL; data = ST_LSM6DSX_SHIFT_VAL(fs_table->fs_avl[i].val, @@ -1196,18 +1211,13 @@ static ssize_t st_lsm6dsx_sysfs_scale_avail(struct device *dev, { struct st_lsm6dsx_sensor *sensor = iio_priv(dev_get_drvdata(dev)); const struct st_lsm6dsx_fs_table_entry *fs_table; - enum st_lsm6dsx_sensor_id id = sensor->id; struct st_lsm6dsx_hw *hw = sensor->hw; int i, len = 0; - fs_table = &hw->settings->fs_table[id]; - for (i = 0; i < ST_LSM6DSX_FS_LIST_SIZE; i++) { - if (!fs_table->fs_avl[i].gain) - break; - + fs_table = &hw->settings->fs_table[sensor->id]; + for (i = 0; i < fs_table->fs_len; i++) len += scnprintf(buf + len, PAGE_SIZE - len, "0.%06u ", fs_table->fs_avl[i].gain); - } buf[len - 1] = '\n'; return len; diff --git a/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_shub.c b/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_shub.c index 66fbcd94642d..f5fca2171954 100644 --- a/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_shub.c +++ b/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_shub.c @@ -61,6 +61,7 @@ static const struct st_lsm6dsx_ext_dev_settings st_lsm6dsx_ext_dev_table[] = { .gain = 1500, .val = 0x0, }, /* 1500 uG/LSB */ + .fs_len = 1, }, .temp_comp = { .addr = 0x60, @@ -555,13 +556,9 @@ static ssize_t st_lsm6dsx_shub_scale_avail(struct device *dev, int i, len = 0; settings = sensor->ext_info.settings; - for (i = 0; i < ST_LSM6DSX_FS_LIST_SIZE; i++) { - u16 val = settings->fs_table.fs_avl[i].gain; - - if (val > 0) - len += scnprintf(buf + len, PAGE_SIZE - len, "0.%06u ", - val); - } + for (i = 0; i < settings->fs_table.fs_len; i++) + len += scnprintf(buf + len, PAGE_SIZE - len, "0.%06u ", + settings->fs_table.fs_avl[i].gain); buf[len - 1] = '\n'; return len; -- cgit v1.2.3 From 6c59a962e081df6d8fe43325bbfabec57e0d4751 Mon Sep 17 00:00:00 2001 From: Pascal Bouwmann Date: Thu, 29 Aug 2019 07:29:41 +0200 Subject: iio: fix center temperature of bmc150-accel-core The center temperature of the supported devices stored in the constant BMC150_ACCEL_TEMP_CENTER_VAL is not 24 degrees but 23 degrees. It seems that some datasheets were inconsistent on this value leading to the error. For most usecases will only make minor difference so not queued for stable. Signed-off-by: Pascal Bouwmann Signed-off-by: Jonathan Cameron --- drivers/iio/accel/bmc150-accel-core.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/iio/accel/bmc150-accel-core.c b/drivers/iio/accel/bmc150-accel-core.c index cf6c0e3a83d3..121b4e89f038 100644 --- a/drivers/iio/accel/bmc150-accel-core.c +++ b/drivers/iio/accel/bmc150-accel-core.c @@ -117,7 +117,7 @@ #define BMC150_ACCEL_SLEEP_1_SEC 0x0F #define BMC150_ACCEL_REG_TEMP 0x08 -#define BMC150_ACCEL_TEMP_CENTER_VAL 24 +#define BMC150_ACCEL_TEMP_CENTER_VAL 23 #define BMC150_ACCEL_AXIS_TO_REG(axis) (BMC150_ACCEL_REG_XOUT_L + (axis * 2)) #define BMC150_AUTO_SUSPEND_DELAY_MS 2000 -- cgit v1.2.3 From 5d7f965e5675ae0cbec5fe56af79da9039f723ed Mon Sep 17 00:00:00 2001 From: Matthew Auld Date: Thu, 5 Sep 2019 10:29:21 +0300 Subject: drm/i915/buddy: add missing call to i915_global_register We are meant to register the kmem cache at init, such the supplied exit and shrink hooks can be called. Signed-off-by: Matthew Auld Reviewed-by: Mika Kuoppala Signed-off-by: Chris Wilson Link: https://patchwork.freedesktop.org/patch/msgid/20190905072921.7979-1-matthew.auld@intel.com --- drivers/gpu/drm/i915/i915_buddy.c | 1 + 1 file changed, 1 insertion(+) diff --git a/drivers/gpu/drm/i915/i915_buddy.c b/drivers/gpu/drm/i915/i915_buddy.c index fe1871d7c126..e9d4200ce3bc 100644 --- a/drivers/gpu/drm/i915/i915_buddy.c +++ b/drivers/gpu/drm/i915/i915_buddy.c @@ -38,6 +38,7 @@ int __init i915_global_buddy_init(void) if (!global.slab_blocks) return -ENOMEM; + i915_global_register(&global.base); return 0; } -- cgit v1.2.3 From d810583fc2fcf139cc766eb2303500b2d9cf064d Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Sat, 7 Sep 2019 11:50:46 +0100 Subject: drm/i915/execlists: Remove incorrect BUG_ON for schedule-out As we may unwind incomplete requests (for preemption) prior to processing the CSB and the schedule-out events, we may update rq->engine (resetting it to point back to the parent virtual engine) prior to calling execlists_schedule_out(), invalidating the assertion that the request still points to the inflight engine. (The likelihood of this is increased if the CSB interrupt processing is pushed to the ksoftirqd for being too slow and direct submission overtakes it.) Tvrtko summarised it as: "So unwind from direct submission resets rq->engine and races with process_csb from the tasklet which notices request has actually completed." Reported-by: Vinay Belgaumkar Fixes: df403069029d ("drm/i915/execlists: Lift process_csb() out of the irq-off spinlock") Signed-off-by: Chris Wilson Cc: Mika Kuoppala Cc: Tvrtko Ursulin Cc: Vinay Belgaumkar Reviewed-by: Tvrtko Ursulin Link: https://patchwork.freedesktop.org/patch/msgid/20190907105046.19934-1-chris@chris-wilson.co.uk --- drivers/gpu/drm/i915/gt/intel_lrc.c | 1 - 1 file changed, 1 deletion(-) diff --git a/drivers/gpu/drm/i915/gt/intel_lrc.c b/drivers/gpu/drm/i915/gt/intel_lrc.c index 0ddfbebbcbbc..f073aea6a1fe 100644 --- a/drivers/gpu/drm/i915/gt/intel_lrc.c +++ b/drivers/gpu/drm/i915/gt/intel_lrc.c @@ -631,7 +631,6 @@ execlists_schedule_out(struct i915_request *rq) struct intel_engine_cs *cur, *old; trace_i915_request_out(rq); - GEM_BUG_ON(intel_context_inflight(ce) != rq->engine); old = READ_ONCE(ce->inflight); do -- cgit v1.2.3 From 9c426b770bd088f18899f836093d810a83b59b98 Mon Sep 17 00:00:00 2001 From: Talel Shenhar Date: Mon, 9 Sep 2019 11:39:18 +0300 Subject: irqchip/al-fic: Add support for irq retrigger Introduce interrupts retrigger support for Amazon's Annapurna Labs Fabric Interrupt Controller. Signed-off-by: Talel Shenhar Signed-off-by: Marc Zyngier Link: https://lore.kernel.org/r/1568018358-18985-1-git-send-email-talel@amazon.com --- drivers/irqchip/irq-al-fic.c | 12 ++++++++++++ 1 file changed, 12 insertions(+) diff --git a/drivers/irqchip/irq-al-fic.c b/drivers/irqchip/irq-al-fic.c index 1a57cee3efab..0b0a73739756 100644 --- a/drivers/irqchip/irq-al-fic.c +++ b/drivers/irqchip/irq-al-fic.c @@ -15,6 +15,7 @@ /* FIC Registers */ #define AL_FIC_CAUSE 0x00 +#define AL_FIC_SET_CAUSE 0x08 #define AL_FIC_MASK 0x10 #define AL_FIC_CONTROL 0x28 @@ -126,6 +127,16 @@ static void al_fic_irq_handler(struct irq_desc *desc) chained_irq_exit(irqchip, desc); } +static int al_fic_irq_retrigger(struct irq_data *data) +{ + struct irq_chip_generic *gc = irq_data_get_irq_chip_data(data); + struct al_fic *fic = gc->private; + + writel_relaxed(BIT(data->hwirq), fic->base + AL_FIC_SET_CAUSE); + + return 1; +} + static int al_fic_register(struct device_node *node, struct al_fic *fic) { @@ -159,6 +170,7 @@ static int al_fic_register(struct device_node *node, gc->chip_types->chip.irq_unmask = irq_gc_mask_clr_bit; gc->chip_types->chip.irq_ack = irq_gc_ack_clr_bit; gc->chip_types->chip.irq_set_type = al_fic_irq_set_type; + gc->chip_types->chip.irq_retrigger = al_fic_irq_retrigger; gc->chip_types->chip.flags = IRQCHIP_SKIP_SET_WAKE; gc->private = fic; -- cgit v1.2.3 From 212fbf2c9e84ceb267cadd8342156b69b54b8135 Mon Sep 17 00:00:00 2001 From: Sandeep Sheriker Mallikarjun Date: Mon, 9 Sep 2019 14:00:35 +0300 Subject: irqchip/atmel-aic5: Add support for sam9x60 irqchip Add support for SAM9X60 irqchip. Signed-off-by: Sandeep Sheriker Mallikarjun Signed-off-by: Claudiu Beznea Signed-off-by: Marc Zyngier Link: https://lore.kernel.org/r/1568026835-6646-1-git-send-email-claudiu.beznea@microchip.com [claudiu.beznea@microchip.com: update aic5_irq_fixups[], update documentation] --- .../devicetree/bindings/interrupt-controller/atmel,aic.txt | 7 +++++-- drivers/irqchip/irq-atmel-aic5.c | 10 ++++++++++ 2 files changed, 15 insertions(+), 2 deletions(-) diff --git a/Documentation/devicetree/bindings/interrupt-controller/atmel,aic.txt b/Documentation/devicetree/bindings/interrupt-controller/atmel,aic.txt index f4c5d34c4111..7079d44bf3ba 100644 --- a/Documentation/devicetree/bindings/interrupt-controller/atmel,aic.txt +++ b/Documentation/devicetree/bindings/interrupt-controller/atmel,aic.txt @@ -1,8 +1,11 @@ * Advanced Interrupt Controller (AIC) Required properties: -- compatible: Should be "atmel,-aic" - can be "at91rm9200", "sama5d2", "sama5d3" or "sama5d4" +- compatible: Should be: + - "atmel,-aic" where can be "at91rm9200", "sama5d2", + "sama5d3" or "sama5d4" + - "microchip,-aic" where can be "sam9x60" + - interrupt-controller: Identifies the node as an interrupt controller. - #interrupt-cells: The number of cells to define the interrupts. It should be 3. The first cell is the IRQ number (aka "Peripheral IDentifier" on datasheet). diff --git a/drivers/irqchip/irq-atmel-aic5.c b/drivers/irqchip/irq-atmel-aic5.c index 6acad2ea0fb3..29333497ba10 100644 --- a/drivers/irqchip/irq-atmel-aic5.c +++ b/drivers/irqchip/irq-atmel-aic5.c @@ -313,6 +313,7 @@ static void __init sama5d3_aic_irq_fixup(void) static const struct of_device_id aic5_irq_fixups[] __initconst = { { .compatible = "atmel,sama5d3", .data = sama5d3_aic_irq_fixup }, { .compatible = "atmel,sama5d4", .data = sama5d3_aic_irq_fixup }, + { .compatible = "microchip,sam9x60", .data = sama5d3_aic_irq_fixup }, { /* sentinel */ }, }; @@ -390,3 +391,12 @@ static int __init sama5d4_aic5_of_init(struct device_node *node, return aic5_of_init(node, parent, NR_SAMA5D4_IRQS); } IRQCHIP_DECLARE(sama5d4_aic5, "atmel,sama5d4-aic", sama5d4_aic5_of_init); + +#define NR_SAM9X60_IRQS 50 + +static int __init sam9x60_aic5_of_init(struct device_node *node, + struct device_node *parent) +{ + return aic5_of_init(node, parent, NR_SAM9X60_IRQS); +} +IRQCHIP_DECLARE(sam9x60_aic5, "microchip,sam9x60-aic", sam9x60_aic5_of_init); -- cgit v1.2.3 From b6749e20d5710c955fc6d4322f8fd98c915b2573 Mon Sep 17 00:00:00 2001 From: Marc Zyngier Date: Sun, 1 Sep 2019 22:12:35 +0100 Subject: arm64: KVM: Drop hyp_alternate_select for checking for ARM64_WORKAROUND_834220 There is no reason for using hyp_alternate_select when checking for ARM64_WORKAROUND_834220, as each of the capabilities is also backed by a static key. Just replace the KVM-specific construct with cpus_have_const_cap(ARM64_WORKAROUND_834220). Signed-off-by: Marc Zyngier Reviewed-by: Christoffer Dall Reviewed-by: Andrew Jones --- arch/arm64/kvm/hyp/switch.c | 17 ++--------------- 1 file changed, 2 insertions(+), 15 deletions(-) diff --git a/arch/arm64/kvm/hyp/switch.c b/arch/arm64/kvm/hyp/switch.c index adaf266d8de8..a15baca9aca0 100644 --- a/arch/arm64/kvm/hyp/switch.c +++ b/arch/arm64/kvm/hyp/switch.c @@ -229,20 +229,6 @@ static void __hyp_text __hyp_vgic_restore_state(struct kvm_vcpu *vcpu) } } -static bool __hyp_text __true_value(void) -{ - return true; -} - -static bool __hyp_text __false_value(void) -{ - return false; -} - -static hyp_alternate_select(__check_arm_834220, - __false_value, __true_value, - ARM64_WORKAROUND_834220); - static bool __hyp_text __translate_far_to_hpfar(u64 far, u64 *hpfar) { u64 par, tmp; @@ -298,7 +284,8 @@ static bool __hyp_text __populate_fault_info(struct kvm_vcpu *vcpu) * resolve the IPA using the AT instruction. */ if (!(esr & ESR_ELx_S1PTW) && - (__check_arm_834220()() || (esr & ESR_ELx_FSC_TYPE) == FSC_PERM)) { + (cpus_have_const_cap(ARM64_WORKAROUND_834220) || + (esr & ESR_ELx_FSC_TYPE) == FSC_PERM)) { if (!__translate_far_to_hpfar(far, &hpfar)) return false; } else { -- cgit v1.2.3 From aa979fa899c5cf592e63ce7b34bc767224225c6a Mon Sep 17 00:00:00 2001 From: Marc Zyngier Date: Sun, 1 Sep 2019 22:12:36 +0100 Subject: arm64: KVM: Replace hyp_alternate_select with has_vhe() Given that the TLB invalidation path is pretty rarely used, there was never any advantage to using hyp_alternate_select() here. has_vhe(), being a glorified static key, is the right tool for the job. Off you go. Signed-off-by: Marc Zyngier Reviewed-by: Christoffer Dall Reviewed-by: Andrew Jones --- arch/arm64/kvm/hyp/tlb.c | 36 ++++++++++++++++++++++-------------- 1 file changed, 22 insertions(+), 14 deletions(-) diff --git a/arch/arm64/kvm/hyp/tlb.c b/arch/arm64/kvm/hyp/tlb.c index c466060b76d6..eb0efc5557f3 100644 --- a/arch/arm64/kvm/hyp/tlb.c +++ b/arch/arm64/kvm/hyp/tlb.c @@ -67,10 +67,14 @@ static void __hyp_text __tlb_switch_to_guest_nvhe(struct kvm *kvm, isb(); } -static hyp_alternate_select(__tlb_switch_to_guest, - __tlb_switch_to_guest_nvhe, - __tlb_switch_to_guest_vhe, - ARM64_HAS_VIRT_HOST_EXTN); +static void __hyp_text __tlb_switch_to_guest(struct kvm *kvm, + struct tlb_inv_context *cxt) +{ + if (has_vhe()) + __tlb_switch_to_guest_vhe(kvm, cxt); + else + __tlb_switch_to_guest_nvhe(kvm, cxt); +} static void __hyp_text __tlb_switch_to_host_vhe(struct kvm *kvm, struct tlb_inv_context *cxt) @@ -98,10 +102,14 @@ static void __hyp_text __tlb_switch_to_host_nvhe(struct kvm *kvm, write_sysreg(0, vttbr_el2); } -static hyp_alternate_select(__tlb_switch_to_host, - __tlb_switch_to_host_nvhe, - __tlb_switch_to_host_vhe, - ARM64_HAS_VIRT_HOST_EXTN); +static void __hyp_text __tlb_switch_to_host(struct kvm *kvm, + struct tlb_inv_context *cxt) +{ + if (has_vhe()) + __tlb_switch_to_host_vhe(kvm, cxt); + else + __tlb_switch_to_host_nvhe(kvm, cxt); +} void __hyp_text __kvm_tlb_flush_vmid_ipa(struct kvm *kvm, phys_addr_t ipa) { @@ -111,7 +119,7 @@ void __hyp_text __kvm_tlb_flush_vmid_ipa(struct kvm *kvm, phys_addr_t ipa) /* Switch to requested VMID */ kvm = kern_hyp_va(kvm); - __tlb_switch_to_guest()(kvm, &cxt); + __tlb_switch_to_guest(kvm, &cxt); /* * We could do so much better if we had the VA as well. @@ -154,7 +162,7 @@ void __hyp_text __kvm_tlb_flush_vmid_ipa(struct kvm *kvm, phys_addr_t ipa) if (!has_vhe() && icache_is_vpipt()) __flush_icache_all(); - __tlb_switch_to_host()(kvm, &cxt); + __tlb_switch_to_host(kvm, &cxt); } void __hyp_text __kvm_tlb_flush_vmid(struct kvm *kvm) @@ -165,13 +173,13 @@ void __hyp_text __kvm_tlb_flush_vmid(struct kvm *kvm) /* Switch to requested VMID */ kvm = kern_hyp_va(kvm); - __tlb_switch_to_guest()(kvm, &cxt); + __tlb_switch_to_guest(kvm, &cxt); __tlbi(vmalls12e1is); dsb(ish); isb(); - __tlb_switch_to_host()(kvm, &cxt); + __tlb_switch_to_host(kvm, &cxt); } void __hyp_text __kvm_tlb_flush_local_vmid(struct kvm_vcpu *vcpu) @@ -180,13 +188,13 @@ void __hyp_text __kvm_tlb_flush_local_vmid(struct kvm_vcpu *vcpu) struct tlb_inv_context cxt; /* Switch to requested VMID */ - __tlb_switch_to_guest()(kvm, &cxt); + __tlb_switch_to_guest(kvm, &cxt); __tlbi(vmalle1); dsb(nsh); isb(); - __tlb_switch_to_host()(kvm, &cxt); + __tlb_switch_to_host(kvm, &cxt); } void __hyp_text __kvm_flush_vm_context(void) -- cgit v1.2.3 From 084b5a80e87258997632dbd975ec3e5fda6bb943 Mon Sep 17 00:00:00 2001 From: Marc Zyngier Date: Sun, 1 Sep 2019 22:12:37 +0100 Subject: arm64: KVM: Kill hyp_alternate_select() hyp_alternate_select() is now completely unused. Goodbye. Signed-off-by: Marc Zyngier Reviewed-by: Christoffer Dall Reviewed-by: Andrew Jones --- arch/arm64/include/asm/kvm_hyp.h | 24 ------------------------ 1 file changed, 24 deletions(-) diff --git a/arch/arm64/include/asm/kvm_hyp.h b/arch/arm64/include/asm/kvm_hyp.h index 86825aa20852..97f21cc66657 100644 --- a/arch/arm64/include/asm/kvm_hyp.h +++ b/arch/arm64/include/asm/kvm_hyp.h @@ -47,30 +47,6 @@ #define read_sysreg_el2(r) read_sysreg_elx(r, _EL2, _EL1) #define write_sysreg_el2(v,r) write_sysreg_elx(v, r, _EL2, _EL1) -/** - * hyp_alternate_select - Generates patchable code sequences that are - * used to switch between two implementations of a function, depending - * on the availability of a feature. - * - * @fname: a symbol name that will be defined as a function returning a - * function pointer whose type will match @orig and @alt - * @orig: A pointer to the default function, as returned by @fname when - * @cond doesn't hold - * @alt: A pointer to the alternate function, as returned by @fname - * when @cond holds - * @cond: a CPU feature (as described in asm/cpufeature.h) - */ -#define hyp_alternate_select(fname, orig, alt, cond) \ -typeof(orig) * __hyp_text fname(void) \ -{ \ - typeof(alt) *val = orig; \ - asm volatile(ALTERNATIVE("nop \n", \ - "mov %0, %1 \n", \ - cond) \ - : "+r" (val) : "r" (alt)); \ - return val; \ -} - int __vgic_v2_perform_cpuif_access(struct kvm_vcpu *vcpu); void __vgic_v3_save_state(struct kvm_vcpu *vcpu); -- cgit v1.2.3 From 1e0a96e508827ad88229bff32ea2641c83fc52a8 Mon Sep 17 00:00:00 2001 From: Matthew Auld Date: Mon, 9 Sep 2019 13:40:50 +0100 Subject: drm/i915: export color_differs Export color_differs so that we can use it elsewhere. Signed-off-by: Matthew Auld Cc: Chris Wilson Cc: Joonas Lahtinen Cc: Rodrigo Vivi Reviewed-by: Chris Wilson Signed-off-by: Chris Wilson Link: https://patchwork.freedesktop.org/patch/msgid/20190909124052.22900-1-matthew.auld@intel.com --- drivers/gpu/drm/i915/i915_gem_gtt.c | 2 +- drivers/gpu/drm/i915/i915_vma.c | 11 ++++------- drivers/gpu/drm/i915/i915_vma.h | 6 ++++++ 3 files changed, 11 insertions(+), 8 deletions(-) diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c index 906dc6fff383..095f5e358a58 100644 --- a/drivers/gpu/drm/i915/i915_gem_gtt.c +++ b/drivers/gpu/drm/i915/i915_gem_gtt.c @@ -2552,7 +2552,7 @@ static void i915_gtt_color_adjust(const struct drm_mm_node *node, u64 *start, u64 *end) { - if (node->allocated && node->color != color) + if (i915_node_color_differs(node, color)) *start += I915_GTT_PAGE_SIZE; /* Also leave a space between the unallocated reserved node after the diff --git a/drivers/gpu/drm/i915/i915_vma.c b/drivers/gpu/drm/i915/i915_vma.c index e0e677b2a3a9..a90bd2678353 100644 --- a/drivers/gpu/drm/i915/i915_vma.c +++ b/drivers/gpu/drm/i915/i915_vma.c @@ -477,11 +477,6 @@ void __i915_vma_set_map_and_fenceable(struct i915_vma *vma) vma->flags &= ~I915_VMA_CAN_FENCE; } -static bool color_differs(struct drm_mm_node *node, unsigned long color) -{ - return node->allocated && node->color != color; -} - bool i915_gem_valid_gtt_space(struct i915_vma *vma, unsigned long cache_level) { struct drm_mm_node *node = &vma->node; @@ -502,11 +497,13 @@ bool i915_gem_valid_gtt_space(struct i915_vma *vma, unsigned long cache_level) GEM_BUG_ON(list_empty(&node->node_list)); other = list_prev_entry(node, node_list); - if (color_differs(other, cache_level) && !drm_mm_hole_follows(other)) + if (i915_node_color_differs(other, cache_level) && + !drm_mm_hole_follows(other)) return false; other = list_next_entry(node, node_list); - if (color_differs(other, cache_level) && !drm_mm_hole_follows(node)) + if (i915_node_color_differs(other, cache_level) && + !drm_mm_hole_follows(node)) return false; return true; diff --git a/drivers/gpu/drm/i915/i915_vma.h b/drivers/gpu/drm/i915/i915_vma.h index 889fc7cb910a..5b1e0cf7669d 100644 --- a/drivers/gpu/drm/i915/i915_vma.h +++ b/drivers/gpu/drm/i915/i915_vma.h @@ -373,6 +373,12 @@ static inline bool i915_vma_is_bound(const struct i915_vma *vma, return vma->flags & where; } +static inline bool i915_node_color_differs(const struct drm_mm_node *node, + unsigned long color) +{ + return node->allocated && node->color != color; +} + /** * i915_vma_pin_iomap - calls ioremap_wc to map the GGTT VMA via the aperture * @vma: VMA to iomap -- cgit v1.2.3 From e9ceb751ad4ee69591bcd33b2a220738855395ea Mon Sep 17 00:00:00 2001 From: Matthew Auld Date: Mon, 9 Sep 2019 13:40:51 +0100 Subject: drm/i915: s/i915_gtt_color_adjust/i915_ggtt_color_adjust Make it clear that the color adjust callback applies to the ggtt. Signed-off-by: Matthew Auld Cc: Chris Wilson Cc: Joonas Lahtinen Cc: Rodrigo Vivi Reviewed-by: Chris Wilson Signed-off-by: Chris Wilson Link: https://patchwork.freedesktop.org/patch/msgid/20190909124052.22900-2-matthew.auld@intel.com --- drivers/gpu/drm/i915/i915_gem_gtt.c | 10 +++++----- drivers/gpu/drm/i915/selftests/i915_gem_evict.c | 2 +- 2 files changed, 6 insertions(+), 6 deletions(-) diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c index 095f5e358a58..48688d683e95 100644 --- a/drivers/gpu/drm/i915/i915_gem_gtt.c +++ b/drivers/gpu/drm/i915/i915_gem_gtt.c @@ -2547,10 +2547,10 @@ static int ggtt_set_pages(struct i915_vma *vma) return 0; } -static void i915_gtt_color_adjust(const struct drm_mm_node *node, - unsigned long color, - u64 *start, - u64 *end) +static void i915_ggtt_color_adjust(const struct drm_mm_node *node, + unsigned long color, + u64 *start, + u64 *end) { if (i915_node_color_differs(node, color)) *start += I915_GTT_PAGE_SIZE; @@ -3206,7 +3206,7 @@ static int ggtt_init_hw(struct i915_ggtt *ggtt) ggtt->vm.has_read_only = IS_VALLEYVIEW(i915); if (!HAS_LLC(i915) && !HAS_PPGTT(i915)) - ggtt->vm.mm.color_adjust = i915_gtt_color_adjust; + ggtt->vm.mm.color_adjust = i915_ggtt_color_adjust; if (!io_mapping_init_wc(&ggtt->iomap, ggtt->gmadr.start, diff --git a/drivers/gpu/drm/i915/selftests/i915_gem_evict.c b/drivers/gpu/drm/i915/selftests/i915_gem_evict.c index cb30c669b1b7..fca38167bdce 100644 --- a/drivers/gpu/drm/i915/selftests/i915_gem_evict.c +++ b/drivers/gpu/drm/i915/selftests/i915_gem_evict.c @@ -276,7 +276,7 @@ static int igt_evict_for_cache_color(void *arg) /* Currently the use of color_adjust is limited to cache domains within * the ggtt, and so the presence of mm.color_adjust is assumed to be - * i915_gtt_color_adjust throughout our driver, so using a mock color + * i915_ggtt_color_adjust throughout our driver, so using a mock color * adjust will work just fine for our purposes. */ ggtt->vm.mm.color_adjust = mock_color_adjust; -- cgit v1.2.3 From 33dd8899231372a438bf7d96afa1dbe13a5e17c7 Mon Sep 17 00:00:00 2001 From: Matthew Auld Date: Mon, 9 Sep 2019 13:40:52 +0100 Subject: drm/i915: cleanup cache-coloring Try to tidy up the cache-coloring such that we rid the code of any mm.color_adjust assumptions, this should hopefully make it more obvious in the code when we need to actually use the cache-level as the color, and as a bonus should make adding a different color-scheme simpler. Signed-off-by: Matthew Auld Cc: Chris Wilson Cc: Joonas Lahtinen Cc: Rodrigo Vivi Reviewed-by: Chris Wilson Signed-off-by: Chris Wilson Link: https://patchwork.freedesktop.org/patch/msgid/20190909124052.22900-3-matthew.auld@intel.com --- drivers/gpu/drm/i915/gem/i915_gem_domain.c | 6 ++++-- drivers/gpu/drm/i915/i915_drv.h | 2 +- drivers/gpu/drm/i915/i915_gem_evict.c | 12 +++++------- drivers/gpu/drm/i915/i915_gem_gtt.h | 6 ++++++ drivers/gpu/drm/i915/i915_vma.c | 22 +++++++++++----------- drivers/gpu/drm/i915/i915_vma.h | 2 +- drivers/gpu/drm/i915/selftests/i915_gem_evict.c | 10 ++++++---- 7 files changed, 34 insertions(+), 26 deletions(-) diff --git a/drivers/gpu/drm/i915/gem/i915_gem_domain.c b/drivers/gpu/drm/i915/gem/i915_gem_domain.c index 6af740a5e3db..da3e7cf12aa1 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_domain.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_domain.c @@ -294,8 +294,10 @@ restart: } } - list_for_each_entry(vma, &obj->vma.list, obj_link) - vma->node.color = cache_level; + list_for_each_entry(vma, &obj->vma.list, obj_link) { + if (i915_vm_has_cache_coloring(vma->vm)) + vma->node.color = cache_level; + } i915_gem_object_set_cache_coherency(obj, cache_level); obj->cache_dirty = true; /* Always invalidate stale cachelines */ diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index db7480831e52..e289b4ffd34b 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h @@ -2364,7 +2364,7 @@ i915_gem_context_lookup(struct drm_i915_file_private *file_priv, u32 id) /* i915_gem_evict.c */ int __must_check i915_gem_evict_something(struct i915_address_space *vm, u64 min_size, u64 alignment, - unsigned cache_level, + unsigned long color, u64 start, u64 end, unsigned flags); int __must_check i915_gem_evict_for_node(struct i915_address_space *vm, diff --git a/drivers/gpu/drm/i915/i915_gem_evict.c b/drivers/gpu/drm/i915/i915_gem_evict.c index 52c86c6e0673..e76c9da9992d 100644 --- a/drivers/gpu/drm/i915/i915_gem_evict.c +++ b/drivers/gpu/drm/i915/i915_gem_evict.c @@ -70,7 +70,7 @@ mark_free(struct drm_mm_scan *scan, * @vm: address space to evict from * @min_size: size of the desired free space * @alignment: alignment constraint of the desired free space - * @cache_level: cache_level for the desired space + * @color: color for the desired space * @start: start (inclusive) of the range from which to evict objects * @end: end (exclusive) of the range from which to evict objects * @flags: additional flags to control the eviction algorithm @@ -91,7 +91,7 @@ mark_free(struct drm_mm_scan *scan, int i915_gem_evict_something(struct i915_address_space *vm, u64 min_size, u64 alignment, - unsigned cache_level, + unsigned long color, u64 start, u64 end, unsigned flags) { @@ -124,7 +124,7 @@ i915_gem_evict_something(struct i915_address_space *vm, if (flags & PIN_MAPPABLE) mode = DRM_MM_INSERT_LOW; drm_mm_scan_init_with_range(&scan, &vm->mm, - min_size, alignment, cache_level, + min_size, alignment, color, start, end, mode); /* @@ -266,7 +266,6 @@ int i915_gem_evict_for_node(struct i915_address_space *vm, u64 start = target->start; u64 end = start + target->size; struct i915_vma *vma, *next; - bool check_color; int ret = 0; lockdep_assert_held(&vm->i915->drm.struct_mutex); @@ -283,8 +282,7 @@ int i915_gem_evict_for_node(struct i915_address_space *vm, if (!(flags & PIN_NONBLOCK)) i915_retire_requests(vm->i915); - check_color = vm->mm.color_adjust; - if (check_color) { + if (i915_vm_has_cache_coloring(vm)) { /* Expand search to cover neighbouring guard pages (or lack!) */ if (start) start -= I915_GTT_PAGE_SIZE; @@ -310,7 +308,7 @@ int i915_gem_evict_for_node(struct i915_address_space *vm, * abutt and conflict. If they are in conflict, then we evict * those as well to make room for our guard pages. */ - if (check_color) { + if (i915_vm_has_cache_coloring(vm)) { if (node->start + node->size == target->start) { if (node->color == target->color) continue; diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.h b/drivers/gpu/drm/i915/i915_gem_gtt.h index 07c85c134d4c..201788126a89 100644 --- a/drivers/gpu/drm/i915/i915_gem_gtt.h +++ b/drivers/gpu/drm/i915/i915_gem_gtt.h @@ -376,6 +376,12 @@ i915_vm_has_scratch_64K(struct i915_address_space *vm) return vm->scratch_order == get_order(I915_GTT_PAGE_SIZE_64K); } +static inline bool +i915_vm_has_cache_coloring(struct i915_address_space *vm) +{ + return i915_is_ggtt(vm) && vm->mm.color_adjust; +} + /* The Graphics Translation Table is the way in which GEN hardware translates a * Graphics Virtual Address into a Physical Address. In addition to the normal * collateral associated with any va->pa translations GEN hardware also has a diff --git a/drivers/gpu/drm/i915/i915_vma.c b/drivers/gpu/drm/i915/i915_vma.c index a90bd2678353..68d9f9b4d050 100644 --- a/drivers/gpu/drm/i915/i915_vma.c +++ b/drivers/gpu/drm/i915/i915_vma.c @@ -477,7 +477,7 @@ void __i915_vma_set_map_and_fenceable(struct i915_vma *vma) vma->flags &= ~I915_VMA_CAN_FENCE; } -bool i915_gem_valid_gtt_space(struct i915_vma *vma, unsigned long cache_level) +bool i915_gem_valid_gtt_space(struct i915_vma *vma, unsigned long color) { struct drm_mm_node *node = &vma->node; struct drm_mm_node *other; @@ -489,7 +489,7 @@ bool i915_gem_valid_gtt_space(struct i915_vma *vma, unsigned long cache_level) * these constraints apply and set the drm_mm.color_adjust * appropriately. */ - if (vma->vm->mm.color_adjust == NULL) + if (!i915_vm_has_cache_coloring(vma->vm)) return true; /* Only valid to be called on an already inserted vma */ @@ -497,12 +497,12 @@ bool i915_gem_valid_gtt_space(struct i915_vma *vma, unsigned long cache_level) GEM_BUG_ON(list_empty(&node->node_list)); other = list_prev_entry(node, node_list); - if (i915_node_color_differs(other, cache_level) && + if (i915_node_color_differs(other, color) && !drm_mm_hole_follows(other)) return false; other = list_next_entry(node, node_list); - if (i915_node_color_differs(other, cache_level) && + if (i915_node_color_differs(other, color) && !drm_mm_hole_follows(node)) return false; @@ -539,7 +539,7 @@ static int i915_vma_insert(struct i915_vma *vma, u64 size, u64 alignment, u64 flags) { struct drm_i915_private *dev_priv = vma->vm->i915; - unsigned int cache_level; + unsigned long color; u64 start, end; int ret; @@ -580,14 +580,14 @@ i915_vma_insert(struct i915_vma *vma, u64 size, u64 alignment, u64 flags) return -ENOSPC; } + color = 0; if (vma->obj) { ret = i915_gem_object_pin_pages(vma->obj); if (ret) return ret; - cache_level = vma->obj->cache_level; - } else { - cache_level = 0; + if (i915_vm_has_cache_coloring(vma->vm)) + color = vma->obj->cache_level; } GEM_BUG_ON(vma->pages); @@ -605,7 +605,7 @@ i915_vma_insert(struct i915_vma *vma, u64 size, u64 alignment, u64 flags) } ret = i915_gem_gtt_reserve(vma->vm, &vma->node, - size, offset, cache_level, + size, offset, color, flags); if (ret) goto err_clear; @@ -644,7 +644,7 @@ i915_vma_insert(struct i915_vma *vma, u64 size, u64 alignment, u64 flags) } ret = i915_gem_gtt_insert(vma->vm, &vma->node, - size, alignment, cache_level, + size, alignment, color, start, end, flags); if (ret) goto err_clear; @@ -653,7 +653,7 @@ i915_vma_insert(struct i915_vma *vma, u64 size, u64 alignment, u64 flags) GEM_BUG_ON(vma->node.start + vma->node.size > end); } GEM_BUG_ON(!drm_mm_node_allocated(&vma->node)); - GEM_BUG_ON(!i915_gem_valid_gtt_space(vma, cache_level)); + GEM_BUG_ON(!i915_gem_valid_gtt_space(vma, color)); mutex_lock(&vma->vm->mutex); list_move_tail(&vma->vm_link, &vma->vm->bound_list); diff --git a/drivers/gpu/drm/i915/i915_vma.h b/drivers/gpu/drm/i915/i915_vma.h index 5b1e0cf7669d..425bda4db2c2 100644 --- a/drivers/gpu/drm/i915/i915_vma.h +++ b/drivers/gpu/drm/i915/i915_vma.h @@ -295,7 +295,7 @@ i915_vma_compare(struct i915_vma *vma, int i915_vma_bind(struct i915_vma *vma, enum i915_cache_level cache_level, u32 flags); -bool i915_gem_valid_gtt_space(struct i915_vma *vma, unsigned long cache_level); +bool i915_gem_valid_gtt_space(struct i915_vma *vma, unsigned long color); bool i915_vma_misplaced(const struct i915_vma *vma, u64 size, u64 alignment, u64 flags); void __i915_vma_set_map_and_fenceable(struct i915_vma *vma); diff --git a/drivers/gpu/drm/i915/selftests/i915_gem_evict.c b/drivers/gpu/drm/i915/selftests/i915_gem_evict.c index fca38167bdce..2905fb21d866 100644 --- a/drivers/gpu/drm/i915/selftests/i915_gem_evict.c +++ b/drivers/gpu/drm/i915/selftests/i915_gem_evict.c @@ -274,12 +274,14 @@ static int igt_evict_for_cache_color(void *arg) LIST_HEAD(objects); int err; - /* Currently the use of color_adjust is limited to cache domains within - * the ggtt, and so the presence of mm.color_adjust is assumed to be - * i915_ggtt_color_adjust throughout our driver, so using a mock color - * adjust will work just fine for our purposes. + /* + * Currently the use of color_adjust for the GGTT is limited to cache + * coloring and guard pages, and so the presence of mm.color_adjust for + * the GGTT is assumed to be i915_ggtt_color_adjust, hence using a mock + * color adjust will work just fine for our purposes. */ ggtt->vm.mm.color_adjust = mock_color_adjust; + GEM_BUG_ON(!i915_vm_has_cache_coloring(&ggtt->vm)); obj = i915_gem_object_create_internal(i915, I915_GTT_PAGE_SIZE); if (IS_ERR(obj)) { -- cgit v1.2.3 From fd521d3b0ed2eb8b8c67c701795ecb08b9c3f544 Mon Sep 17 00:00:00 2001 From: Matthew Auld Date: Mon, 9 Sep 2019 18:16:46 +0100 Subject: drm/i915: include GTT page-size info in error state It might prove useful in the future to know if the vma is utilising huge-GTT-pages. Related to this is the GTT cache, where there is some HW "quirkiness" where it must be disabled if using 2M pages, so include that for good measure. Suggested-by: Chris Wilson Signed-off-by: Matthew Auld Cc: Chris Wilson Reviewed-by: Chris Wilson Signed-off-by: Chris Wilson Link: https://patchwork.freedesktop.org/patch/msgid/20190909171646.22090-1-matthew.auld@intel.com --- drivers/gpu/drm/i915/gem/i915_gem_object_types.h | 1 - drivers/gpu/drm/i915/i915_gpu_error.c | 10 ++++++++++ drivers/gpu/drm/i915/i915_gpu_error.h | 2 ++ 3 files changed, 12 insertions(+), 1 deletion(-) diff --git a/drivers/gpu/drm/i915/gem/i915_gem_object_types.h b/drivers/gpu/drm/i915/gem/i915_gem_object_types.h index 13b9dc0e1a89..a558edf15ec8 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_object_types.h +++ b/drivers/gpu/drm/i915/gem/i915_gem_object_types.h @@ -160,7 +160,6 @@ struct drm_i915_gem_object { struct sg_table *pages; void *mapping; - /* TODO: whack some of this into the error state */ struct i915_page_sizes { /** * The sg mask of the pages sg_table. i.e the mask of diff --git a/drivers/gpu/drm/i915/i915_gpu_error.c b/drivers/gpu/drm/i915/i915_gpu_error.c index 3ccf7fd9307f..6384a06aa5bf 100644 --- a/drivers/gpu/drm/i915/i915_gpu_error.c +++ b/drivers/gpu/drm/i915/i915_gpu_error.c @@ -575,6 +575,9 @@ static void print_error_obj(struct drm_i915_error_state_buf *m, lower_32_bits(obj->gtt_offset)); } + if (obj->gtt_page_sizes > I915_GTT_PAGE_SIZE_4K) + err_printf(m, "gtt_page_sizes = 0x%08x\n", obj->gtt_page_sizes); + err_compression_marker(m); for (page = 0; page < obj->page_count; page++) { int i, len; @@ -735,6 +738,9 @@ static void __err_print_to_sgl(struct drm_i915_error_state_buf *m, if (IS_GEN(m->i915, 7)) err_printf(m, "ERR_INT: 0x%08x\n", error->err_int); + if (IS_GEN_RANGE(m->i915, 8, 11)) + err_printf(m, "GTT_CACHE_EN: 0x%08x\n", error->gtt_cache); + for (ee = error->engine; ee; ee = ee->next) error_print_engine(m, ee, error->epoch); @@ -985,6 +991,7 @@ i915_error_object_create(struct drm_i915_private *i915, dst->gtt_offset = vma->node.start; dst->gtt_size = vma->node.size; + dst->gtt_page_sizes = vma->page_sizes.gtt; dst->num_pages = num_pages; dst->page_count = 0; dst->unused = 0; @@ -1554,6 +1561,9 @@ static void capture_reg_state(struct i915_gpu_state *error) error->gac_eco = intel_uncore_read(uncore, GAC_ECO_BITS); } + if (IS_GEN_RANGE(i915, 8, 11)) + error->gtt_cache = intel_uncore_read(uncore, HSW_GTT_CACHE_EN); + /* 4: Everything else */ if (INTEL_GEN(i915) >= 11) { error->ier = intel_uncore_read(uncore, GEN8_DE_MISC_IER); diff --git a/drivers/gpu/drm/i915/i915_gpu_error.h b/drivers/gpu/drm/i915/i915_gpu_error.h index df9f57766626..63cf387411e0 100644 --- a/drivers/gpu/drm/i915/i915_gpu_error.h +++ b/drivers/gpu/drm/i915/i915_gpu_error.h @@ -74,6 +74,7 @@ struct i915_gpu_state { u32 gam_ecochk; u32 gab_ctl; u32 gfx_mode; + u32 gtt_cache; u32 nfence; u64 fence[I915_MAX_NUM_FENCES]; @@ -127,6 +128,7 @@ struct i915_gpu_state { struct drm_i915_error_object { u64 gtt_offset; u64 gtt_size; + u32 gtt_page_sizes; int num_pages; int page_count; int unused; -- cgit v1.2.3 From fa9a09f15065650d97e3b1336d11e4ad9672b759 Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Tue, 10 Sep 2019 09:02:08 +0100 Subject: drm/i915/execlists: Clear STOP_RING bit on reset During reset, we try to ensure no forward progress of the CS prior to the reset by setting the STOP_RING bit in RING_MI_MODE. Since gen9, this register is context saved and do we end up in the odd situation where we save the STOP_RING bit and so try to stop the engine again immediately upon resume. This is quite unexpected and causes us to complain about an early CS completion event! Bugzilla: https://bugs.freedesktop.org/show_bug.cgi?id=111514 Signed-off-by: Chris Wilson Cc: Mika Kuoppala Reviewed-by: Mika Kuoppala Link: https://patchwork.freedesktop.org/patch/msgid/20190910080208.4223-1-chris@chris-wilson.co.uk --- drivers/gpu/drm/i915/gt/intel_lrc.c | 12 ++++++++++++ drivers/gpu/drm/i915/gt/intel_lrc_reg.h | 2 ++ 2 files changed, 14 insertions(+) diff --git a/drivers/gpu/drm/i915/gt/intel_lrc.c b/drivers/gpu/drm/i915/gt/intel_lrc.c index f073aea6a1fe..64170ce0c91b 100644 --- a/drivers/gpu/drm/i915/gt/intel_lrc.c +++ b/drivers/gpu/drm/i915/gt/intel_lrc.c @@ -2368,6 +2368,17 @@ static struct i915_request *active_request(struct i915_request *rq) return active; } +static void __execlists_reset_reg_state(const struct intel_context *ce, + const struct intel_engine_cs *engine) +{ + u32 *regs = ce->lrc_reg_state; + + if (INTEL_GEN(engine->i915) >= 9) { + regs[GEN9_CTX_RING_MI_MODE + 1] &= ~STOP_RING; + regs[GEN9_CTX_RING_MI_MODE + 1] |= STOP_RING << 16; + } +} + static void __execlists_reset(struct intel_engine_cs *engine, bool stalled) { struct intel_engine_execlists * const execlists = &engine->execlists; @@ -2455,6 +2466,7 @@ out_replay: GEM_TRACE("%s replay {head:%04x, tail:%04x\n", engine->name, ce->ring->head, ce->ring->tail); intel_ring_update_space(ce->ring); + __execlists_reset_reg_state(ce, engine); __execlists_update_reg_state(ce, engine); mutex_release(&ce->pin_mutex.dep_map, 0, _THIS_IP_); diff --git a/drivers/gpu/drm/i915/gt/intel_lrc_reg.h b/drivers/gpu/drm/i915/gt/intel_lrc_reg.h index 68caf8541866..7e773e74a3fe 100644 --- a/drivers/gpu/drm/i915/gt/intel_lrc_reg.h +++ b/drivers/gpu/drm/i915/gt/intel_lrc_reg.h @@ -39,6 +39,8 @@ #define CTX_R_PWR_CLK_STATE 0x42 #define CTX_END 0x44 +#define GEN9_CTX_RING_MI_MODE 0x54 + /* GEN12+ Reg State Context */ #define GEN12_CTX_BB_PER_CTX_PTR 0x12 #define GEN12_CTX_LRI_HEADER_3 0x41 -- cgit v1.2.3 From b0a7c754140bee6a1124b5bfad17205149942fd7 Mon Sep 17 00:00:00 2001 From: Swati Sharma Date: Mon, 9 Sep 2019 17:31:41 +0530 Subject: drm/i915/display: Add gamma precision function for CHV intel_color_get_gamma_bit_precision() is extended for cherryview by adding chv_gamma_precision(), i965 will use existing i9xx_gamma_precision() func only. Signed-off-by: Swati Sharma Reviewed-by: Jani Nikula Signed-off-by: Jani Nikula Link: https://patchwork.freedesktop.org/patch/msgid/1568030503-26747-2-git-send-email-swati2.sharma@intel.com --- drivers/gpu/drm/i915/display/intel_color.c | 25 +++++++++++++++++++------ 1 file changed, 19 insertions(+), 6 deletions(-) diff --git a/drivers/gpu/drm/i915/display/intel_color.c b/drivers/gpu/drm/i915/display/intel_color.c index 6d641e159899..4d9a5686a4d7 100644 --- a/drivers/gpu/drm/i915/display/intel_color.c +++ b/drivers/gpu/drm/i915/display/intel_color.c @@ -1400,6 +1400,14 @@ static int ilk_gamma_precision(const struct intel_crtc_state *crtc_state) } } +static int chv_gamma_precision(const struct intel_crtc_state *crtc_state) +{ + if (crtc_state->cgm_mode & CGM_PIPE_MODE_GAMMA) + return 10; + else + return i9xx_gamma_precision(crtc_state); +} + static int glk_gamma_precision(const struct intel_crtc_state *crtc_state) { switch (crtc_state->gamma_mode) { @@ -1421,12 +1429,17 @@ int intel_color_get_gamma_bit_precision(const struct intel_crtc_state *crtc_stat if (!crtc_state->gamma_enable) return 0; - if (HAS_GMCH(dev_priv) && !IS_CHERRYVIEW(dev_priv)) - return i9xx_gamma_precision(crtc_state); - else if (IS_CANNONLAKE(dev_priv) || IS_GEMINILAKE(dev_priv)) - return glk_gamma_precision(crtc_state); - else if (IS_IRONLAKE(dev_priv)) - return ilk_gamma_precision(crtc_state); + if (HAS_GMCH(dev_priv)) { + if (IS_CHERRYVIEW(dev_priv)) + return chv_gamma_precision(crtc_state); + else + return i9xx_gamma_precision(crtc_state); + } else { + if (IS_CANNONLAKE(dev_priv) || IS_GEMINILAKE(dev_priv)) + return glk_gamma_precision(crtc_state); + else if (IS_IRONLAKE(dev_priv)) + return ilk_gamma_precision(crtc_state); + } return 0; } -- cgit v1.2.3 From 8efd06989df45fc81eb675e18e05b579bf064e8a Mon Sep 17 00:00:00 2001 From: Swati Sharma Date: Mon, 9 Sep 2019 17:31:42 +0530 Subject: drm/i915/display: Extract i965_read_luts() For i965, add hw read out to create hw blob of gamma lut values. Review comments from old series: https://patchwork.freedesktop.org/series/58039/ v4: -No need to initialize *blob [Jani] -Removed right shifts [Jani] -Dropped dev local var [Jani] v5: -Returned blob instead of assigning it internally within the function [Ville] -Renamed i965_get_color_config() to i965_read_lut() [Ville] -Renamed i965_get_gamma_config_10p6() to i965_read_gamma_lut_10p6() [Ville] v9: -Typo and 80 character limit [Uma] -Made read func para as const [Ville, Uma] -Renamed i965_read_gamma_lut_10p6() to i965_read_lut_10p6() [Ville, Uma] v10: -Swapped ldw and udw while creating hw blob [Jani] -Added last index rgb lut value from PIPEGCMAX to h/w blob [Jani] Signed-off-by: Swati Sharma Signed-off-by: Jani Nikula Link: https://patchwork.freedesktop.org/patch/msgid/1568030503-26747-3-git-send-email-swati2.sharma@intel.com --- drivers/gpu/drm/i915/display/intel_color.c | 50 ++++++++++++++++++++++++++++++ drivers/gpu/drm/i915/i915_reg.h | 4 +++ 2 files changed, 54 insertions(+) diff --git a/drivers/gpu/drm/i915/display/intel_color.c b/drivers/gpu/drm/i915/display/intel_color.c index 4d9a5686a4d7..765f858f66d7 100644 --- a/drivers/gpu/drm/i915/display/intel_color.c +++ b/drivers/gpu/drm/i915/display/intel_color.c @@ -1569,6 +1569,55 @@ static void i9xx_read_luts(struct intel_crtc_state *crtc_state) crtc_state->base.gamma_lut = i9xx_read_lut_8(crtc_state); } +static struct drm_property_blob * +i965_read_lut_10p6(const struct intel_crtc_state *crtc_state) +{ + struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc); + struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); + u32 lut_size = INTEL_INFO(dev_priv)->color.gamma_lut_size; + enum pipe pipe = crtc->pipe; + struct drm_property_blob *blob; + struct drm_color_lut *blob_data; + u32 i, val1, val2; + + blob = drm_property_create_blob(&dev_priv->drm, + sizeof(struct drm_color_lut) * lut_size, + NULL); + if (IS_ERR(blob)) + return NULL; + + blob_data = blob->data; + + for (i = 0; i < lut_size - 1; i++) { + val1 = I915_READ(PALETTE(pipe, 2 * i + 0)); + val2 = I915_READ(PALETTE(pipe, 2 * i + 1)); + + blob_data[i].red = REG_FIELD_GET(PALETTE_RED_MASK, val2) << 8 | + REG_FIELD_GET(PALETTE_RED_MASK, val1); + blob_data[i].green = REG_FIELD_GET(PALETTE_GREEN_MASK, val2) << 8 | + REG_FIELD_GET(PALETTE_GREEN_MASK, val1); + blob_data[i].blue = REG_FIELD_GET(PALETTE_BLUE_MASK, val2) << 8 | + REG_FIELD_GET(PALETTE_BLUE_MASK, val1); + } + + blob_data[i].red = REG_FIELD_GET(PIPEGCMAX_RGB_MASK, + I915_READ(PIPEGCMAX(pipe, 0))); + blob_data[i].green = REG_FIELD_GET(PIPEGCMAX_RGB_MASK, + I915_READ(PIPEGCMAX(pipe, 1))); + blob_data[i].blue = REG_FIELD_GET(PIPEGCMAX_RGB_MASK, + I915_READ(PIPEGCMAX(pipe, 2))); + + return blob; +} + +static void i965_read_luts(struct intel_crtc_state *crtc_state) +{ + if (crtc_state->gamma_mode == GAMMA_MODE_MODE_8BIT) + crtc_state->base.gamma_lut = i9xx_read_lut_8(crtc_state); + else + crtc_state->base.gamma_lut = i965_read_lut_10p6(crtc_state); +} + static struct drm_property_blob * ilk_read_lut_10(const struct intel_crtc_state *crtc_state) { @@ -1672,6 +1721,7 @@ void intel_color_init(struct intel_crtc *crtc) dev_priv->display.color_check = i9xx_color_check; dev_priv->display.color_commit = i9xx_color_commit; dev_priv->display.load_luts = i965_load_luts; + dev_priv->display.read_luts = i965_read_luts; } else { dev_priv->display.color_check = i9xx_color_check; dev_priv->display.color_commit = i9xx_color_commit; diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h index 006cffd56be2..1dc5487593f1 100644 --- a/drivers/gpu/drm/i915/i915_reg.h +++ b/drivers/gpu/drm/i915/i915_reg.h @@ -3558,6 +3558,9 @@ static inline bool i915_mmio_reg_valid(i915_reg_t reg) #define _PALETTE_A 0xa000 #define _PALETTE_B 0xa800 #define _CHV_PALETTE_C 0xc000 +#define PALETTE_RED_MASK REG_GENMASK(23, 16) +#define PALETTE_GREEN_MASK REG_GENMASK(15, 8) +#define PALETTE_BLUE_MASK REG_GENMASK(7, 0) #define PALETTE(pipe, i) _MMIO(DISPLAY_MMIO_BASE(dev_priv) + \ _PICK((pipe), _PALETTE_A, \ _PALETTE_B, _CHV_PALETTE_C) + \ @@ -5767,6 +5770,7 @@ enum { #define _PIPEAGCMAX 0x70010 #define _PIPEBGCMAX 0x71010 +#define PIPEGCMAX_RGB_MASK REG_GENMASK(15, 0) #define PIPEGCMAX(pipe, i) _MMIO_PIPE2(pipe, _PIPEAGCMAX + (i) * 4) #define _PIPE_MISC_A 0x70030 -- cgit v1.2.3 From 4d154d33941dad07b9bb49c9dee356342a98e00a Mon Sep 17 00:00:00 2001 From: Swati Sharma Date: Mon, 9 Sep 2019 17:31:43 +0530 Subject: drm/i915/display: Extract chv_read_luts() For cherryview, add hw read out to create hw blob of gamma lut values. Review comments from previous series: https://patchwork.freedesktop.org/patch/328252 v4: -No need to initialize *blob [Jani] -Removed right shifts [Jani] -Dropped dev local var [Jani] v5: -Returned blob instead of assigning it internally within the function [Ville] -Renamed function cherryview_get_color_config() to chv_read_luts() -Renamed cherryview_get_gamma_config() to chv_read_cgm_gamma_lut() [Ville] v9: -80 character limit [Uma] -Made read func para as const [Ville, Uma] -Renamed chv_read_cgm_gamma_lut() to chv_read_cgm_gamma_lut() [Ville, Uma] Signed-off-by: Swati Sharma Reviewed-by: Jani Nikula Signed-off-by: Jani Nikula Link: https://patchwork.freedesktop.org/patch/msgid/1568030503-26747-4-git-send-email-swati2.sharma@intel.com --- drivers/gpu/drm/i915/display/intel_color.c | 43 ++++++++++++++++++++++++++++++ drivers/gpu/drm/i915/i915_reg.h | 3 +++ 2 files changed, 46 insertions(+) diff --git a/drivers/gpu/drm/i915/display/intel_color.c b/drivers/gpu/drm/i915/display/intel_color.c index 765f858f66d7..318308dc136c 100644 --- a/drivers/gpu/drm/i915/display/intel_color.c +++ b/drivers/gpu/drm/i915/display/intel_color.c @@ -1618,6 +1618,48 @@ static void i965_read_luts(struct intel_crtc_state *crtc_state) crtc_state->base.gamma_lut = i965_read_lut_10p6(crtc_state); } +static struct drm_property_blob * +chv_read_cgm_lut(const struct intel_crtc_state *crtc_state) +{ + struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc); + struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); + u32 lut_size = INTEL_INFO(dev_priv)->color.gamma_lut_size; + enum pipe pipe = crtc->pipe; + struct drm_property_blob *blob; + struct drm_color_lut *blob_data; + u32 i, val; + + blob = drm_property_create_blob(&dev_priv->drm, + sizeof(struct drm_color_lut) * lut_size, + NULL); + if (IS_ERR(blob)) + return NULL; + + blob_data = blob->data; + + for (i = 0; i < lut_size; i++) { + val = I915_READ(CGM_PIPE_GAMMA(pipe, i, 0)); + blob_data[i].green = intel_color_lut_pack(REG_FIELD_GET( + CGM_PIPE_GAMMA_GREEN_MASK, val), 10); + blob_data[i].blue = intel_color_lut_pack(REG_FIELD_GET( + CGM_PIPE_GAMMA_BLUE_MASK, val), 10); + + val = I915_READ(CGM_PIPE_GAMMA(pipe, i, 1)); + blob_data[i].red = intel_color_lut_pack(REG_FIELD_GET( + CGM_PIPE_GAMMA_RED_MASK, val), 10); + } + + return blob; +} + +static void chv_read_luts(struct intel_crtc_state *crtc_state) +{ + if (crtc_state->gamma_mode == GAMMA_MODE_MODE_8BIT) + crtc_state->base.gamma_lut = i9xx_read_lut_8(crtc_state); + else + crtc_state->base.gamma_lut = chv_read_cgm_lut(crtc_state); +} + static struct drm_property_blob * ilk_read_lut_10(const struct intel_crtc_state *crtc_state) { @@ -1717,6 +1759,7 @@ void intel_color_init(struct intel_crtc *crtc) dev_priv->display.color_check = chv_color_check; dev_priv->display.color_commit = i9xx_color_commit; dev_priv->display.load_luts = chv_load_luts; + dev_priv->display.read_luts = chv_read_luts; } else if (INTEL_GEN(dev_priv) >= 4) { dev_priv->display.color_check = i9xx_color_check; dev_priv->display.color_commit = i9xx_color_commit; diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h index 1dc5487593f1..bf37ecebc82f 100644 --- a/drivers/gpu/drm/i915/i915_reg.h +++ b/drivers/gpu/drm/i915/i915_reg.h @@ -10421,6 +10421,9 @@ enum skl_power_gate { #define CGM_PIPE_MODE_GAMMA (1 << 2) #define CGM_PIPE_MODE_CSC (1 << 1) #define CGM_PIPE_MODE_DEGAMMA (1 << 0) +#define CGM_PIPE_GAMMA_RED_MASK REG_GENMASK(9, 0) +#define CGM_PIPE_GAMMA_GREEN_MASK REG_GENMASK(25, 16) +#define CGM_PIPE_GAMMA_BLUE_MASK REG_GENMASK(9, 0) #define _CGM_PIPE_B_CSC_COEFF01 (VLV_DISPLAY_BASE + 0x69900) #define _CGM_PIPE_B_CSC_COEFF23 (VLV_DISPLAY_BASE + 0x69904) -- cgit v1.2.3 From 198d2533669bbe162e52eee2c35ddd8594967556 Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Sat, 7 Sep 2019 09:43:34 +0100 Subject: drm/i915/execlists: Ignore lost completion events Icelake hit an issue where it missed reporting a completion event and instead jumped straight to a idle->active event (skipping over the active->idle and not even hitting the lite-restore preemption). 661497511us : process_csb: rcs0 cs-irq head=11, tail=0 661497512us : process_csb: rcs0 csb[0]: status=0x10008002:0x00000020 [lite-restore] 661497512us : trace_ports: rcs0: preempted { 28cc8:11052, 0:0 } 661497513us : trace_ports: rcs0: promote { 28cc8:11054, 0:0 } 661497514us : __i915_request_submit: rcs0 fence 28cc8:11056, current 11052 661497514us : __execlists_submission_tasklet: rcs0: queue_priority_hint:-2147483648, submit:yes 661497515us : trace_ports: rcs0: submit { 28cc8:11056, 0:0 } 661497530us : process_csb: rcs0 cs-irq head=0, tail=1 661497530us : process_csb: rcs0 csb[1]: status=0x10008002:0x00000020 [lite-restore] 661497531us : trace_ports: rcs0: preempted { 28cc8:11054!, 0:0 } 661497535us : trace_ports: rcs0: promote { 28cc8:11056, 0:0 } 661497540us : __i915_request_submit: rcs0 fence 28cc8:11058, current 11054 661497544us : __execlists_submission_tasklet: rcs0: queue_priority_hint:-2147483648, submit:yes 661497545us : trace_ports: rcs0: submit { 28cc8:11058, 0:0 } 661497553us : process_csb: rcs0 cs-irq head=1, tail=2 661497553us : process_csb: rcs0 csb[2]: status=0x10000001:0x00000000 [idle->active] 661497574us : process_csb: process_csb:1538 GEM_BUG_ON(*execlists->active) Signed-off-by: Chris Wilson Cc: Mika Kuoppala Reviewed-by: Mika Kuoppala Link: https://patchwork.freedesktop.org/patch/msgid/20190907084334.28952-1-chris@chris-wilson.co.uk --- drivers/gpu/drm/i915/gt/intel_lrc.c | 63 +++++++++++-------------------------- 1 file changed, 18 insertions(+), 45 deletions(-) diff --git a/drivers/gpu/drm/i915/gt/intel_lrc.c b/drivers/gpu/drm/i915/gt/intel_lrc.c index 64170ce0c91b..dcdf7cf66e7e 100644 --- a/drivers/gpu/drm/i915/gt/intel_lrc.c +++ b/drivers/gpu/drm/i915/gt/intel_lrc.c @@ -692,6 +692,9 @@ trace_ports(const struct intel_engine_execlists *execlists, const struct intel_engine_cs *engine = container_of(execlists, typeof(*engine), execlists); + if (!ports[0]) + return; + GEM_TRACE("%s: %s { %llx:%lld%s, %llx:%lld }\n", engine->name, msg, ports[0]->fence.context, @@ -1380,13 +1383,6 @@ reset_in_progress(const struct intel_engine_execlists *execlists) return unlikely(!__tasklet_is_enabled(&execlists->tasklet)); } -enum csb_step { - CSB_NOP, - CSB_PROMOTE, - CSB_PREEMPT, - CSB_COMPLETE, -}; - /* * Starting with Gen12, the status has a new format: * @@ -1413,7 +1409,7 @@ enum csb_step { * bits 47-57: sw context id of the lrc the GT switched away from * bits 58-63: sw counter of the lrc the GT switched away from */ -static inline enum csb_step +static inline bool gen12_csb_parse(const struct intel_engine_execlists *execlists, const u32 *csb) { u32 lower_dw = csb[0]; @@ -1423,7 +1419,7 @@ gen12_csb_parse(const struct intel_engine_execlists *execlists, const u32 *csb) bool new_queue = lower_dw & GEN12_CTX_STATUS_SWITCHED_TO_NEW_QUEUE; if (!ctx_away_valid && ctx_to_valid) - return CSB_PROMOTE; + return true; /* * The context switch detail is not guaranteed to be 5 when a preemption @@ -1433,7 +1429,7 @@ gen12_csb_parse(const struct intel_engine_execlists *execlists, const u32 *csb) * would require some extra handling, but we don't support that. */ if (new_queue && ctx_away_valid) - return CSB_PREEMPT; + return true; /* * switch detail = 5 is covered by the case above and we do not expect a @@ -1442,29 +1438,13 @@ gen12_csb_parse(const struct intel_engine_execlists *execlists, const u32 *csb) */ GEM_BUG_ON(GEN12_CTX_SWITCH_DETAIL(upper_dw)); - if (*execlists->active) { - GEM_BUG_ON(!ctx_away_valid); - return CSB_COMPLETE; - } - - return CSB_NOP; + return false; } -static inline enum csb_step +static inline bool gen8_csb_parse(const struct intel_engine_execlists *execlists, const u32 *csb) { - unsigned int status = *csb; - - if (status & GEN8_CTX_STATUS_IDLE_ACTIVE) - return CSB_PROMOTE; - - if (status & GEN8_CTX_STATUS_PREEMPTED) - return CSB_PREEMPT; - - if (*execlists->active) - return CSB_COMPLETE; - - return CSB_NOP; + return *csb & (GEN8_CTX_STATUS_IDLE_ACTIVE | GEN8_CTX_STATUS_PREEMPTED); } static void process_csb(struct intel_engine_cs *engine) @@ -1503,7 +1483,7 @@ static void process_csb(struct intel_engine_cs *engine) rmb(); do { - enum csb_step csb_step; + bool promote; if (++head == num_entries) head = 0; @@ -1531,20 +1511,16 @@ static void process_csb(struct intel_engine_cs *engine) buf[2 * head + 0], buf[2 * head + 1]); if (INTEL_GEN(engine->i915) >= 12) - csb_step = gen12_csb_parse(execlists, buf + 2 * head); + promote = gen12_csb_parse(execlists, buf + 2 * head); else - csb_step = gen8_csb_parse(execlists, buf + 2 * head); - - switch (csb_step) { - case CSB_PREEMPT: /* cancel old inflight, prepare for switch */ + promote = gen8_csb_parse(execlists, buf + 2 * head); + if (promote) { + /* cancel old inflight, prepare for switch */ trace_ports(execlists, "preempted", execlists->active); - while (*execlists->active) execlists_schedule_out(*execlists->active++); - /* fallthrough */ - case CSB_PROMOTE: /* switch pending to inflight */ - GEM_BUG_ON(*execlists->active); + /* switch pending to inflight */ GEM_BUG_ON(!assert_pending_valid(execlists, "promote")); execlists->active = memcpy(execlists->inflight, @@ -1559,9 +1535,10 @@ static void process_csb(struct intel_engine_cs *engine) ring_set_paused(engine, 0); WRITE_ONCE(execlists->pending[0], NULL); - break; + } else { + GEM_BUG_ON(!*execlists->active); - case CSB_COMPLETE: /* port0 completed, advanced to port1 */ + /* port0 completed, advanced to port1 */ trace_ports(execlists, "completed", execlists->active); /* @@ -1576,10 +1553,6 @@ static void process_csb(struct intel_engine_cs *engine) GEM_BUG_ON(execlists->active - execlists->inflight > execlists_num_ports(execlists)); - break; - - case CSB_NOP: - break; } } while (head != tail); -- cgit v1.2.3 From 0efa99dd58754d23e884b9ba41cd601f01b58c3d Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Mon, 9 Sep 2019 12:30:18 +0100 Subject: drm/i915/ringbuffer: Flush writes before RING_TAIL update Be paranoid and make sure we flush any and all writes out of the WCB before performing the UC mmio to update the RING_TAIL. (An UC write should itself be enough to do the flush, hence the paranoia here.) Quite infrequently, we see problems where the GPU seems to overshoot the RING_TAIL and so executes garbage, hence the speculation. References: https://bugs.freedesktop.org/show_bug.cgi?id=111598 References: https://bugs.freedesktop.org/show_bug.cgi?id=111417 References: https://bugs.freedesktop.org/show_bug.cgi?id=111034 Signed-off-by: Chris Wilson Reviewed-by: Mika Kuoppala Link: https://patchwork.freedesktop.org/patch/msgid/20190909113018.13300-1-chris@chris-wilson.co.uk --- drivers/gpu/drm/i915/gt/intel_ringbuffer.c | 1 + 1 file changed, 1 insertion(+) diff --git a/drivers/gpu/drm/i915/gt/intel_ringbuffer.c b/drivers/gpu/drm/i915/gt/intel_ringbuffer.c index ac55a0d054bd..a73296e6b13d 100644 --- a/drivers/gpu/drm/i915/gt/intel_ringbuffer.c +++ b/drivers/gpu/drm/i915/gt/intel_ringbuffer.c @@ -930,6 +930,7 @@ static void cancel_requests(struct intel_engine_cs *engine) static void i9xx_submit_request(struct i915_request *request) { i915_request_submit(request); + wmb(); /* paranoid flush writes out of the WCB before mmio */ ENGINE_WRITE(request->engine, RING_TAIL, intel_ring_set_tail(request->ring, request->tail)); -- cgit v1.2.3 From cec5ca08e36fd18d2939b98055346b3b06f56c6c Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Mon, 9 Sep 2019 12:00:08 +0100 Subject: drm/i915: Perform GGTT restore much earlier during resume As soon as we re-enable the various functions within the HW, they may go off and read data via a GGTT offset. Hence, if we have not yet restored the GGTT PTE before then, they may read and even *write* random locations in memory. Detected by DMAR faults during resume. Signed-off-by: Chris Wilson Cc: Mika Kuoppala Cc: Martin Peres Cc: Joonas Lahtinen Cc: stable@vger.kernel.org Reviewed-by: Mika Kuoppala Link: https://patchwork.freedesktop.org/patch/msgid/20190909110011.8958-4-chris@chris-wilson.co.uk --- drivers/gpu/drm/i915/gem/i915_gem_pm.c | 3 --- drivers/gpu/drm/i915/i915_drv.c | 5 +++++ drivers/gpu/drm/i915/selftests/i915_gem.c | 6 ++++++ 3 files changed, 11 insertions(+), 3 deletions(-) diff --git a/drivers/gpu/drm/i915/gem/i915_gem_pm.c b/drivers/gpu/drm/i915/gem/i915_gem_pm.c index b3993d24b83d..9b1129aaacfe 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_pm.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_pm.c @@ -242,9 +242,6 @@ void i915_gem_resume(struct drm_i915_private *i915) mutex_lock(&i915->drm.struct_mutex); intel_uncore_forcewake_get(&i915->uncore, FORCEWAKE_ALL); - i915_gem_restore_gtt_mappings(i915); - i915_gem_restore_fences(i915); - if (i915_gem_init_hw(i915)) goto err_wedged; diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c index 63e52e153414..0aadff9b8c88 100644 --- a/drivers/gpu/drm/i915/i915_drv.c +++ b/drivers/gpu/drm/i915/i915_drv.c @@ -1914,6 +1914,11 @@ static int i915_drm_resume(struct drm_device *dev) if (ret) DRM_ERROR("failed to re-enable GGTT\n"); + mutex_lock(&dev_priv->drm.struct_mutex); + i915_gem_restore_gtt_mappings(dev_priv); + i915_gem_restore_fences(dev_priv); + mutex_unlock(&dev_priv->drm.struct_mutex); + intel_csr_ucode_resume(dev_priv); i915_restore_state(dev_priv); diff --git a/drivers/gpu/drm/i915/selftests/i915_gem.c b/drivers/gpu/drm/i915/selftests/i915_gem.c index bb6dd54a6ff3..37593831b539 100644 --- a/drivers/gpu/drm/i915/selftests/i915_gem.c +++ b/drivers/gpu/drm/i915/selftests/i915_gem.c @@ -118,6 +118,12 @@ static void pm_resume(struct drm_i915_private *i915) with_intel_runtime_pm(&i915->runtime_pm, wakeref) { intel_gt_sanitize(&i915->gt, false); i915_gem_sanitize(i915); + + mutex_lock(&i915->drm.struct_mutex); + i915_gem_restore_gtt_mappings(i915); + i915_gem_restore_fences(i915); + mutex_unlock(&i915->drm.struct_mutex); + i915_gem_resume(i915); } } -- cgit v1.2.3 From 7c465310fefc6a600eb69521b8420564f0f37b2d Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Mon, 9 Sep 2019 12:00:06 +0100 Subject: drm/i915/selftests: Take runtime wakeref for igt_ggtt_lowlevel Being a "low-level" test, we opt to bypass the normal bind/unbind hooks for the lower level insert_entries/clear_range. For ggtt, the bind/unbind hooks provide the runtime wakeref and so we must also handle this in exercising the low level hooks. <4> [538.151672] RPM raw-wakeref not held <4> [538.151825] WARNING: CPU: 0 PID: 11 at ./drivers/gpu/drm/i915/intel_runtime_pm.h:107 fwtable_read32+0x1be/0x300 [i915] <4> [538.151830] Modules linked in: i915(+) amdgpu gpu_sched ttm vgem snd_hda_codec_hdmi snd_hda_codec_realtek snd_hda_codec_generic mei_hdcp btusb btrtl btbcm x86_pkg_temp_thermal coretemp btintel crct10dif_pclmul bluetooth crc32_pclmul snd_intel_nhlt snd_hda_codec ecdh_generic ghash_clmulni_intel ecc snd_hwdep snd_hda_core lpc_ich r8169 realtek snd_pcm mei_me mei prime_numbers pinctrl_broxton pinctrl_intel [last unloaded: i915] <4> [538.151861] CPU: 0 PID: 11 Comm: migration/0 Tainted: G U 5.3.0-rc7-CI-Trybot_4938+ #1 <4> [538.151864] Hardware name: Intel corporation NUC6CAYS/NUC6CAYB, BIOS AYAPLCEL.86A.0056.2018.0926.1100 09/26/2018 <4> [538.151960] RIP: 0010:fwtable_read32+0x1be/0x300 [i915] <4> [538.151965] Code: e8 e7 f9 5f e0 e9 0b ff ff ff 80 3d d5 8d 26 00 00 0f 85 81 fe ff ff 48 c7 c7 ef 01 bd a0 c6 05 c1 8d 26 00 01 e8 b2 e4 6a e0 <0f> 0b e9 67 fe ff ff 80 3d ad 8d 26 00 00 0f 85 65 fe ff ff 48 c7 <4> [538.151969] RSP: 0018:ffffc9000007be10 EFLAGS: 00010086 <4> [538.151972] RAX: 0000000000000000 RBX: ffff88826be10d50 RCX: 0000000000000002 <4> [538.151975] RDX: 0000000080000002 RSI: 0000000000000000 RDI: 00000000ffffffff <4> [538.151978] RBP: 0000000000000000 R08: 0000000000000000 R09: 0000000000000000 <4> [538.151981] R10: 0000000000000000 R11: ffffc9000007bcb0 R12: 0000000000101008 <4> [538.151984] R13: 0000000000000000 R14: ffffc9000036f638 R15: 0000000000000002 <4> [538.151987] FS: 0000000000000000(0000) GS:ffff888277a00000(0000) knlGS:0000000000000000 <4> [538.151990] CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033 <4> [538.151993] CR2: 00007fd48e7052f8 CR3: 0000000005210000 CR4: 00000000003406f0 <4> [538.151995] Call Trace: <4> [538.152106] bxt_vtd_ggtt_clear_range__cb+0x38/0x40 [i915] Signed-off-by: Chris Wilson Reviewed-by: Matthew Auld Link: https://patchwork.freedesktop.org/patch/msgid/20190909110011.8958-2-chris@chris-wilson.co.uk --- drivers/gpu/drm/i915/selftests/i915_gem_gtt.c | 10 ++++++---- 1 file changed, 6 insertions(+), 4 deletions(-) diff --git a/drivers/gpu/drm/i915/selftests/i915_gem_gtt.c b/drivers/gpu/drm/i915/selftests/i915_gem_gtt.c index 31a51ca1ddcb..598c18d10640 100644 --- a/drivers/gpu/drm/i915/selftests/i915_gem_gtt.c +++ b/drivers/gpu/drm/i915/selftests/i915_gem_gtt.c @@ -293,18 +293,20 @@ static int lowlevel_hole(struct drm_i915_private *i915, mock_vma.node.size = BIT_ULL(size); mock_vma.node.start = addr; - wakeref = intel_runtime_pm_get(&i915->runtime_pm); - vm->insert_entries(vm, &mock_vma, I915_CACHE_NONE, 0); - intel_runtime_pm_put(&i915->runtime_pm, wakeref); + with_intel_runtime_pm(&i915->runtime_pm, wakeref) + vm->insert_entries(vm, &mock_vma, + I915_CACHE_NONE, 0); } count = n; i915_random_reorder(order, count, &prng); for (n = 0; n < count; n++) { u64 addr = hole_start + order[n] * BIT_ULL(size); + intel_wakeref_t wakeref; GEM_BUG_ON(addr + BIT_ULL(size) > vm->total); - vm->clear_range(vm, addr, BIT_ULL(size)); + with_intel_runtime_pm(&i915->runtime_pm, wakeref) + vm->clear_range(vm, addr, BIT_ULL(size)); } i915_gem_object_unpin_pages(obj); -- cgit v1.2.3 From 07e98eb0a17482557198ae12a07a6e3a89de33a3 Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Tue, 10 Sep 2019 13:10:09 +0100 Subject: drm/i915/selftests: Tighten the timeout testing for partial mmaps Currently, if there is time remaining before the start of the loop, we do one full iteration over many possible different chunks within the object. A full loop may take 50+s (depending on speed of indirect GTT mmapings) and we try separately with LINEAR, X and Y -- at which point igt times out. If we check more frequently, we will interrupt the loop upon our timeout -- it is hard to argue for as this significantly reduces the test coverage as we dramatically reduce the runtime. In practical terms, the coverage we should prioritise is in using different fence setups, forcing verification of the tile row computations over the current preference of checking extracting chunks. Though the exhaustive search is great given an infinite timeout, to improve our current coverage, we also add a randomised smoketest of partial mmaps. So let's do both, add a randomised smoketest of partial tiling chunks and the exhaustive (though time limited) search for failures. Even in adding another subtest, we should shave 100s off BAT! (With, hopefully, no loss in coverage, at least over multiple runs.) Signed-off-by: Chris Wilson Cc: Matthew Auld Reviewed-by: Matthew Auld Link: https://patchwork.freedesktop.org/patch/msgid/20190910121009.13431-1-chris@chris-wilson.co.uk --- drivers/gpu/drm/i915/gem/selftests/i915_gem_mman.c | 255 ++++++++++++++++++--- 1 file changed, 223 insertions(+), 32 deletions(-) diff --git a/drivers/gpu/drm/i915/gem/selftests/i915_gem_mman.c b/drivers/gpu/drm/i915/gem/selftests/i915_gem_mman.c index 1d27babff0ce..aefe557527f8 100644 --- a/drivers/gpu/drm/i915/gem/selftests/i915_gem_mman.c +++ b/drivers/gpu/drm/i915/gem/selftests/i915_gem_mman.c @@ -10,6 +10,7 @@ #include "gt/intel_gt_pm.h" #include "huge_gem_object.h" #include "i915_selftest.h" +#include "selftests/i915_random.h" #include "selftests/igt_flush_test.h" struct tile { @@ -76,18 +77,103 @@ static u64 tiled_offset(const struct tile *tile, u64 v) static int check_partial_mapping(struct drm_i915_gem_object *obj, const struct tile *tile, - unsigned long end_time) + struct rnd_state *prng) { - const unsigned int nreal = obj->scratch / PAGE_SIZE; const unsigned long npages = obj->base.size / PAGE_SIZE; + struct i915_ggtt_view view; struct i915_vma *vma; unsigned long page; + u32 __iomem *io; + struct page *p; + unsigned int n; + u64 offset; + u32 *cpu; int err; - if (igt_timeout(end_time, - "%s: timed out before tiling=%d stride=%d\n", - __func__, tile->tiling, tile->stride)) - return -EINTR; + err = i915_gem_object_set_tiling(obj, tile->tiling, tile->stride); + if (err) { + pr_err("Failed to set tiling mode=%u, stride=%u, err=%d\n", + tile->tiling, tile->stride, err); + return err; + } + + GEM_BUG_ON(i915_gem_object_get_tiling(obj) != tile->tiling); + GEM_BUG_ON(i915_gem_object_get_stride(obj) != tile->stride); + + i915_gem_object_lock(obj); + err = i915_gem_object_set_to_gtt_domain(obj, true); + i915_gem_object_unlock(obj); + if (err) { + pr_err("Failed to flush to GTT write domain; err=%d\n", err); + return err; + } + + page = i915_prandom_u32_max_state(npages, prng); + view = compute_partial_view(obj, page, MIN_CHUNK_PAGES); + + vma = i915_gem_object_ggtt_pin(obj, &view, 0, 0, PIN_MAPPABLE); + if (IS_ERR(vma)) { + pr_err("Failed to pin partial view: offset=%lu; err=%d\n", + page, (int)PTR_ERR(vma)); + return PTR_ERR(vma); + } + + n = page - view.partial.offset; + GEM_BUG_ON(n >= view.partial.size); + + io = i915_vma_pin_iomap(vma); + i915_vma_unpin(vma); + if (IS_ERR(io)) { + pr_err("Failed to iomap partial view: offset=%lu; err=%d\n", + page, (int)PTR_ERR(io)); + err = PTR_ERR(io); + goto out; + } + + iowrite32(page, io + n * PAGE_SIZE / sizeof(*io)); + i915_vma_unpin_iomap(vma); + + offset = tiled_offset(tile, page << PAGE_SHIFT); + if (offset >= obj->base.size) + goto out; + + intel_gt_flush_ggtt_writes(&to_i915(obj->base.dev)->gt); + + p = i915_gem_object_get_page(obj, offset >> PAGE_SHIFT); + cpu = kmap(p) + offset_in_page(offset); + drm_clflush_virt_range(cpu, sizeof(*cpu)); + if (*cpu != (u32)page) { + pr_err("Partial view for %lu [%u] (offset=%llu, size=%u [%llu, row size %u], fence=%d, tiling=%d, stride=%d) misalignment, expected write to page (%llu + %u [0x%llx]) of 0x%x, found 0x%x\n", + page, n, + view.partial.offset, + view.partial.size, + vma->size >> PAGE_SHIFT, + tile->tiling ? tile_row_pages(obj) : 0, + vma->fence ? vma->fence->id : -1, tile->tiling, tile->stride, + offset >> PAGE_SHIFT, + (unsigned int)offset_in_page(offset), + offset, + (u32)page, *cpu); + err = -EINVAL; + } + *cpu = 0; + drm_clflush_virt_range(cpu, sizeof(*cpu)); + kunmap(p); + +out: + i915_vma_destroy(vma); + return err; +} + +static int check_partial_mappings(struct drm_i915_gem_object *obj, + const struct tile *tile, + unsigned long end_time) +{ + const unsigned int nreal = obj->scratch / PAGE_SIZE; + const unsigned long npages = obj->base.size / PAGE_SIZE; + struct i915_vma *vma; + unsigned long page; + int err; err = i915_gem_object_set_tiling(obj, tile->tiling, tile->stride); if (err) { @@ -170,11 +256,42 @@ static int check_partial_mapping(struct drm_i915_gem_object *obj, return err; i915_vma_destroy(vma); + + if (igt_timeout(end_time, + "%s: timed out after tiling=%d stride=%d\n", + __func__, tile->tiling, tile->stride)) + return -EINTR; } return 0; } +static unsigned int +setup_tile_size(struct tile *tile, struct drm_i915_private *i915) +{ + if (INTEL_GEN(i915) <= 2) { + tile->height = 16; + tile->width = 128; + tile->size = 11; + } else if (tile->tiling == I915_TILING_Y && + HAS_128_BYTE_Y_TILING(i915)) { + tile->height = 32; + tile->width = 128; + tile->size = 12; + } else { + tile->height = 8; + tile->width = 512; + tile->size = 12; + } + + if (INTEL_GEN(i915) < 4) + return 8192 / tile->width; + else if (INTEL_GEN(i915) < 7) + return 128 * I965_FENCE_MAX_PITCH_VAL / tile->width; + else + return 128 * GEN7_FENCE_MAX_PITCH_VAL / tile->width; +} + static int igt_partial_tiling(void *arg) { const unsigned int nreal = 1 << 12; /* largest tile row x2 */ @@ -219,7 +336,7 @@ static int igt_partial_tiling(void *arg) tile.swizzle = I915_BIT_6_SWIZZLE_NONE; tile.tiling = I915_TILING_NONE; - err = check_partial_mapping(obj, &tile, end); + err = check_partial_mappings(obj, &tile, end); if (err && err != -EINTR) goto out_unlock; } @@ -253,31 +370,11 @@ static int igt_partial_tiling(void *arg) tile.swizzle == I915_BIT_6_SWIZZLE_9_10_17) continue; - if (INTEL_GEN(i915) <= 2) { - tile.height = 16; - tile.width = 128; - tile.size = 11; - } else if (tile.tiling == I915_TILING_Y && - HAS_128_BYTE_Y_TILING(i915)) { - tile.height = 32; - tile.width = 128; - tile.size = 12; - } else { - tile.height = 8; - tile.width = 512; - tile.size = 12; - } - - if (INTEL_GEN(i915) < 4) - max_pitch = 8192 / tile.width; - else if (INTEL_GEN(i915) < 7) - max_pitch = 128 * I965_FENCE_MAX_PITCH_VAL / tile.width; - else - max_pitch = 128 * GEN7_FENCE_MAX_PITCH_VAL / tile.width; + max_pitch = setup_tile_size(&tile, i915); for (pitch = max_pitch; pitch; pitch >>= 1) { tile.stride = tile.width * pitch; - err = check_partial_mapping(obj, &tile, end); + err = check_partial_mappings(obj, &tile, end); if (err == -EINTR) goto next_tiling; if (err) @@ -285,7 +382,7 @@ static int igt_partial_tiling(void *arg) if (pitch > 2 && INTEL_GEN(i915) >= 4) { tile.stride = tile.width * (pitch - 1); - err = check_partial_mapping(obj, &tile, end); + err = check_partial_mappings(obj, &tile, end); if (err == -EINTR) goto next_tiling; if (err) @@ -294,7 +391,7 @@ static int igt_partial_tiling(void *arg) if (pitch < max_pitch && INTEL_GEN(i915) >= 4) { tile.stride = tile.width * (pitch + 1); - err = check_partial_mapping(obj, &tile, end); + err = check_partial_mappings(obj, &tile, end); if (err == -EINTR) goto next_tiling; if (err) @@ -305,7 +402,7 @@ static int igt_partial_tiling(void *arg) if (INTEL_GEN(i915) >= 4) { for_each_prime_number(pitch, max_pitch) { tile.stride = tile.width * pitch; - err = check_partial_mapping(obj, &tile, end); + err = check_partial_mappings(obj, &tile, end); if (err == -EINTR) goto next_tiling; if (err) @@ -325,6 +422,99 @@ out: return err; } +static int igt_smoke_tiling(void *arg) +{ + const unsigned int nreal = 1 << 12; /* largest tile row x2 */ + struct drm_i915_private *i915 = arg; + struct drm_i915_gem_object *obj; + intel_wakeref_t wakeref; + I915_RND_STATE(prng); + unsigned long count; + IGT_TIMEOUT(end); + int err; + + /* + * igt_partial_tiling() does an exhastive check of partial tiling + * chunking, but will undoubtably run out of time. Here, we do a + * randomised search and hope over many runs of 1s with different + * seeds we will do a thorough check. + * + * Remember to look at the st_seed if we see a flip-flop in BAT! + */ + + if (i915->quirks & QUIRK_PIN_SWIZZLED_PAGES) + return 0; + + obj = huge_gem_object(i915, + nreal << PAGE_SHIFT, + (1 + next_prime_number(i915->ggtt.vm.total >> PAGE_SHIFT)) << PAGE_SHIFT); + if (IS_ERR(obj)) + return PTR_ERR(obj); + + err = i915_gem_object_pin_pages(obj); + if (err) { + pr_err("Failed to allocate %u pages (%lu total), err=%d\n", + nreal, obj->base.size / PAGE_SIZE, err); + goto out; + } + + mutex_lock(&i915->drm.struct_mutex); + wakeref = intel_runtime_pm_get(&i915->runtime_pm); + + count = 0; + do { + struct tile tile; + + tile.tiling = + i915_prandom_u32_max_state(I915_TILING_Y + 1, &prng); + switch (tile.tiling) { + case I915_TILING_NONE: + tile.height = 1; + tile.width = 1; + tile.size = 0; + tile.stride = 0; + tile.swizzle = I915_BIT_6_SWIZZLE_NONE; + break; + + case I915_TILING_X: + tile.swizzle = i915->mm.bit_6_swizzle_x; + break; + case I915_TILING_Y: + tile.swizzle = i915->mm.bit_6_swizzle_y; + break; + } + + if (tile.swizzle == I915_BIT_6_SWIZZLE_9_17 || + tile.swizzle == I915_BIT_6_SWIZZLE_9_10_17) + continue; + + if (tile.tiling != I915_TILING_NONE) { + unsigned int max_pitch = setup_tile_size(&tile, i915); + + tile.stride = + i915_prandom_u32_max_state(max_pitch, &prng); + tile.stride = (1 + tile.stride) * tile.width; + if (INTEL_GEN(i915) < 4) + tile.stride = rounddown_pow_of_two(tile.stride); + } + + err = check_partial_mapping(obj, &tile, &prng); + if (err) + break; + + count++; + } while (!__igt_timeout(end, NULL)); + + pr_info("%s: Completed %lu trials\n", __func__, count); + + intel_runtime_pm_put(&i915->runtime_pm, wakeref); + mutex_unlock(&i915->drm.struct_mutex); + i915_gem_object_unpin_pages(obj); +out: + i915_gem_object_put(obj); + return err; +} + static int make_obj_busy(struct drm_i915_gem_object *obj) { struct drm_i915_private *i915 = to_i915(obj->base.dev); @@ -515,6 +705,7 @@ int i915_gem_mman_live_selftests(struct drm_i915_private *i915) { static const struct i915_subtest tests[] = { SUBTEST(igt_partial_tiling), + SUBTEST(igt_smoke_tiling), SUBTEST(igt_mmap_offset_exhaustion), }; -- cgit v1.2.3 From ab37c4d712c8b35c0c1e3237e04fe49f717055a4 Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Tue, 10 Sep 2019 17:16:57 +0100 Subject: drm/i915/tgl: Disable rc6 for debugging Empirical evidence from CI tells us that our rc6 setup for Tigerlake is off. Disable rc6 on tgl temporary so that we gain CI coverage as we prepare a fix. It also appears that the BIOS on our tgl leaves rc6 enabled, so we have to explicitly disable it on init. References: https://bugs.freedesktop.org/show_bug.cgi?id=111593 Signed-off-by: Chris Wilson Cc: Mika Kuoppala Acked-by: Matthew Auld Link: https://patchwork.freedesktop.org/patch/msgid/20190910161657.23037-1-chris@chris-wilson.co.uk --- drivers/gpu/drm/i915/i915_pci.c | 1 + drivers/gpu/drm/i915/intel_pm.c | 3 +-- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/gpu/drm/i915/i915_pci.c b/drivers/gpu/drm/i915/i915_pci.c index fbe98a2db88e..b3cc8560696b 100644 --- a/drivers/gpu/drm/i915/i915_pci.c +++ b/drivers/gpu/drm/i915/i915_pci.c @@ -797,6 +797,7 @@ static const struct intel_device_info intel_tigerlake_12_info = { .display.has_modular_fia = 1, .engine_mask = BIT(RCS0) | BIT(BCS0) | BIT(VECS0) | BIT(VCS0) | BIT(VCS2), + .has_rc6 = false, /* XXX disabled for debugging */ }; #undef GEN diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c index 528e90ed5de4..54a8b7705bc6 100644 --- a/drivers/gpu/drm/i915/intel_pm.c +++ b/drivers/gpu/drm/i915/intel_pm.c @@ -8671,8 +8671,7 @@ void intel_disable_gt_powersave(struct drm_i915_private *dev_priv) { mutex_lock(&dev_priv->gt_pm.rps.lock); - if (HAS_RC6(dev_priv)) - intel_disable_rc6(dev_priv); + intel_disable_rc6(dev_priv); intel_disable_rps(dev_priv); if (HAS_LLC(dev_priv)) -- cgit v1.2.3 From 71dc367e2bc37e1c235029003fbbc56a07fb6d58 Mon Sep 17 00:00:00 2001 From: Matt Roper Date: Tue, 10 Sep 2019 09:05:20 -0700 Subject: drm/i915: Consolidate bxt/cnl/icl cdclk readout MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Aside from a few minor register changes and some different clock values, cdclk design hasn't changed much since gen9lp. Let's consolidate the handlers for bxt, cnl, and icl to keep the codeflow consistent. Also, while we're at it, s/bxt_de_pll_update/bxt_de_pll_readout/ since "update" makes me think we should be writing to hardware rather than reading from it. v2: - Fix icl_calc_voltage_level() limits. (Ville) - Use CNL_CDCLK_PLL_RATIO_MASK rather than BXT_DE_PLL_RATIO_MASK on gen10+ to avoid confusion. (Ville) v3: - Also fix ehl_calc_voltage_level() limits. (Ville) Cc: Ville Syrjälä Suggested-by: Ville Syrjälä Signed-off-by: Matt Roper Reviewed-by: Ville Syrjälä Link: https://patchwork.freedesktop.org/patch/msgid/20190910160520.6587-1-matthew.d.roper@intel.com --- drivers/gpu/drm/i915/display/intel_cdclk.c | 325 ++++++++++++----------------- 1 file changed, 138 insertions(+), 187 deletions(-) diff --git a/drivers/gpu/drm/i915/display/intel_cdclk.c b/drivers/gpu/drm/i915/display/intel_cdclk.c index d3e56628af70..dfcb1cc58951 100644 --- a/drivers/gpu/drm/i915/display/intel_cdclk.c +++ b/drivers/gpu/drm/i915/display/intel_cdclk.c @@ -1190,6 +1190,36 @@ static u8 bxt_calc_voltage_level(int cdclk) return DIV_ROUND_UP(cdclk, 25000); } +static u8 cnl_calc_voltage_level(int cdclk) +{ + if (cdclk > 336000) + return 2; + else if (cdclk > 168000) + return 1; + else + return 0; +} + +static u8 icl_calc_voltage_level(int cdclk) +{ + if (cdclk > 556800) + return 2; + else if (cdclk > 312000) + return 1; + else + return 0; +} + +static u8 ehl_calc_voltage_level(int cdclk) +{ + if (cdclk > 312000) + return 2; + else if (cdclk > 180000) + return 1; + else + return 0; +} + static int bxt_de_pll_vco(struct drm_i915_private *dev_priv, int cdclk) { int ratio; @@ -1236,23 +1266,69 @@ static int glk_de_pll_vco(struct drm_i915_private *dev_priv, int cdclk) return dev_priv->cdclk.hw.ref * ratio; } -static void bxt_de_pll_update(struct drm_i915_private *dev_priv, - struct intel_cdclk_state *cdclk_state) +static void cnl_readout_refclk(struct drm_i915_private *dev_priv, + struct intel_cdclk_state *cdclk_state) { - u32 val; + if (I915_READ(SKL_DSSM) & CNL_DSSM_CDCLK_PLL_REFCLK_24MHz) + cdclk_state->ref = 24000; + else + cdclk_state->ref = 19200; +} - cdclk_state->ref = 19200; - cdclk_state->vco = 0; +static void icl_readout_refclk(struct drm_i915_private *dev_priv, + struct intel_cdclk_state *cdclk_state) +{ + u32 dssm = I915_READ(SKL_DSSM) & ICL_DSSM_CDCLK_PLL_REFCLK_MASK; + + switch (dssm) { + default: + MISSING_CASE(dssm); + /* fall through */ + case ICL_DSSM_CDCLK_PLL_REFCLK_24MHz: + cdclk_state->ref = 24000; + break; + case ICL_DSSM_CDCLK_PLL_REFCLK_19_2MHz: + cdclk_state->ref = 19200; + break; + case ICL_DSSM_CDCLK_PLL_REFCLK_38_4MHz: + cdclk_state->ref = 38400; + break; + } +} + +static void bxt_de_pll_readout(struct drm_i915_private *dev_priv, + struct intel_cdclk_state *cdclk_state) +{ + u32 val, ratio; + + if (INTEL_GEN(dev_priv) >= 11) + icl_readout_refclk(dev_priv, cdclk_state); + else if (IS_CANNONLAKE(dev_priv)) + cnl_readout_refclk(dev_priv, cdclk_state); + else + cdclk_state->ref = 19200; val = I915_READ(BXT_DE_PLL_ENABLE); - if ((val & BXT_DE_PLL_PLL_ENABLE) == 0) + if ((val & BXT_DE_PLL_PLL_ENABLE) == 0 || + (val & BXT_DE_PLL_LOCK) == 0) { + /* + * CDCLK PLL is disabled, the VCO/ratio doesn't matter, but + * setting it to zero is a way to signal that. + */ + cdclk_state->vco = 0; return; + } - if (WARN_ON((val & BXT_DE_PLL_LOCK) == 0)) - return; + /* + * CNL+ have the ratio directly in the PLL enable register, gen9lp had + * it in a separate PLL control register. + */ + if (INTEL_GEN(dev_priv) >= 10) + ratio = val & CNL_CDCLK_PLL_RATIO_MASK; + else + ratio = I915_READ(BXT_DE_PLL_CTL) & BXT_DE_PLL_RATIO_MASK; - val = I915_READ(BXT_DE_PLL_CTL); - cdclk_state->vco = (val & BXT_DE_PLL_RATIO_MASK) * cdclk_state->ref; + cdclk_state->vco = ratio * cdclk_state->ref; } static void bxt_get_cdclk(struct drm_i915_private *dev_priv, @@ -1261,12 +1337,18 @@ static void bxt_get_cdclk(struct drm_i915_private *dev_priv, u32 divider; int div; - bxt_de_pll_update(dev_priv, cdclk_state); - - cdclk_state->cdclk = cdclk_state->bypass = cdclk_state->ref; + if (INTEL_GEN(dev_priv) >= 12) + cdclk_state->bypass = cdclk_state->ref / 2; + else if (INTEL_GEN(dev_priv) >= 11) + cdclk_state->bypass = 50000; + else + cdclk_state->bypass = cdclk_state->ref; - if (cdclk_state->vco == 0) + bxt_de_pll_readout(dev_priv, cdclk_state); + if (cdclk_state->vco == 0) { + cdclk_state->cdclk = cdclk_state->bypass; goto out; + } divider = I915_READ(CDCLK_CTL) & BXT_CDCLK_CD2X_DIV_SEL_MASK; @@ -1275,13 +1357,15 @@ static void bxt_get_cdclk(struct drm_i915_private *dev_priv, div = 2; break; case BXT_CDCLK_CD2X_DIV_SEL_1_5: - WARN(IS_GEMINILAKE(dev_priv), "Unsupported divider\n"); + WARN(IS_GEMINILAKE(dev_priv) || INTEL_GEN(dev_priv) >= 10, + "Unsupported divider\n"); div = 3; break; case BXT_CDCLK_CD2X_DIV_SEL_2: div = 4; break; case BXT_CDCLK_CD2X_DIV_SEL_4: + WARN(INTEL_GEN(dev_priv) >= 10, "Unsupported divider\n"); div = 8; break; default: @@ -1296,8 +1380,18 @@ static void bxt_get_cdclk(struct drm_i915_private *dev_priv, * Can't read this out :( Let's assume it's * at least what the CDCLK frequency requires. */ - cdclk_state->voltage_level = - bxt_calc_voltage_level(cdclk_state->cdclk); + if (IS_ELKHARTLAKE(dev_priv)) + cdclk_state->voltage_level = + ehl_calc_voltage_level(cdclk_state->cdclk); + else if (INTEL_GEN(dev_priv) >= 11) + cdclk_state->voltage_level = + icl_calc_voltage_level(cdclk_state->cdclk); + else if (INTEL_GEN(dev_priv) >= 10) + cdclk_state->voltage_level = + cnl_calc_voltage_level(cdclk_state->cdclk); + else + cdclk_state->voltage_level = + bxt_calc_voltage_level(cdclk_state->cdclk); } static void bxt_de_pll_disable(struct drm_i915_private *dev_priv) @@ -1515,76 +1609,6 @@ static int cnl_calc_cdclk(int min_cdclk) return 168000; } -static u8 cnl_calc_voltage_level(int cdclk) -{ - if (cdclk > 336000) - return 2; - else if (cdclk > 168000) - return 1; - else - return 0; -} - -static void cnl_cdclk_pll_update(struct drm_i915_private *dev_priv, - struct intel_cdclk_state *cdclk_state) -{ - u32 val; - - if (I915_READ(SKL_DSSM) & CNL_DSSM_CDCLK_PLL_REFCLK_24MHz) - cdclk_state->ref = 24000; - else - cdclk_state->ref = 19200; - - cdclk_state->vco = 0; - - val = I915_READ(BXT_DE_PLL_ENABLE); - if ((val & BXT_DE_PLL_PLL_ENABLE) == 0) - return; - - if (WARN_ON((val & BXT_DE_PLL_LOCK) == 0)) - return; - - cdclk_state->vco = (val & CNL_CDCLK_PLL_RATIO_MASK) * cdclk_state->ref; -} - -static void cnl_get_cdclk(struct drm_i915_private *dev_priv, - struct intel_cdclk_state *cdclk_state) -{ - u32 divider; - int div; - - cnl_cdclk_pll_update(dev_priv, cdclk_state); - - cdclk_state->cdclk = cdclk_state->bypass = cdclk_state->ref; - - if (cdclk_state->vco == 0) - goto out; - - divider = I915_READ(CDCLK_CTL) & BXT_CDCLK_CD2X_DIV_SEL_MASK; - - switch (divider) { - case BXT_CDCLK_CD2X_DIV_SEL_1: - div = 2; - break; - case BXT_CDCLK_CD2X_DIV_SEL_2: - div = 4; - break; - default: - MISSING_CASE(divider); - return; - } - - cdclk_state->cdclk = DIV_ROUND_CLOSEST(cdclk_state->vco, div); - - out: - /* - * Can't read this out :( Let's assume it's - * at least what the CDCLK frequency requires. - */ - cdclk_state->voltage_level = - cnl_calc_voltage_level(cdclk_state->cdclk); -} - static void cnl_cdclk_pll_disable(struct drm_i915_private *dev_priv) { u32 val; @@ -1830,91 +1854,6 @@ static int icl_calc_cdclk_pll_vco(struct drm_i915_private *dev_priv, int cdclk) return dev_priv->cdclk.hw.ref * ratio; } -static u8 icl_calc_voltage_level(struct drm_i915_private *dev_priv, int cdclk) -{ - if (IS_ELKHARTLAKE(dev_priv)) { - if (cdclk > 312000) - return 2; - else if (cdclk > 180000) - return 1; - else - return 0; - } else { - if (cdclk > 556800) - return 2; - else if (cdclk > 312000) - return 1; - else - return 0; - } -} - -static void icl_get_cdclk(struct drm_i915_private *dev_priv, - struct intel_cdclk_state *cdclk_state) -{ - u32 val; - int div; - - val = I915_READ(SKL_DSSM); - switch (val & ICL_DSSM_CDCLK_PLL_REFCLK_MASK) { - default: - MISSING_CASE(val); - /* fall through */ - case ICL_DSSM_CDCLK_PLL_REFCLK_24MHz: - cdclk_state->ref = 24000; - break; - case ICL_DSSM_CDCLK_PLL_REFCLK_19_2MHz: - cdclk_state->ref = 19200; - break; - case ICL_DSSM_CDCLK_PLL_REFCLK_38_4MHz: - cdclk_state->ref = 38400; - break; - } - - if (INTEL_GEN(dev_priv) >= 12) - cdclk_state->bypass = cdclk_state->ref / 2; - else - cdclk_state->bypass = 50000; - - val = I915_READ(BXT_DE_PLL_ENABLE); - if ((val & BXT_DE_PLL_PLL_ENABLE) == 0 || - (val & BXT_DE_PLL_LOCK) == 0) { - /* - * CDCLK PLL is disabled, the VCO/ratio doesn't matter, but - * setting it to zero is a way to signal that. - */ - cdclk_state->vco = 0; - cdclk_state->cdclk = cdclk_state->bypass; - goto out; - } - - cdclk_state->vco = (val & BXT_DE_PLL_RATIO_MASK) * cdclk_state->ref; - - val = I915_READ(CDCLK_CTL) & BXT_CDCLK_CD2X_DIV_SEL_MASK; - switch (val) { - case BXT_CDCLK_CD2X_DIV_SEL_1: - div = 2; - break; - case BXT_CDCLK_CD2X_DIV_SEL_2: - div = 4; - break; - default: - MISSING_CASE(val); - div = 2; - break; - } - - cdclk_state->cdclk = DIV_ROUND_CLOSEST(cdclk_state->vco, div); - -out: - /* - * Can't read this out :( Let's assume it's - * at least what the CDCLK frequency requires. - */ - cdclk_state->voltage_level = - icl_calc_voltage_level(dev_priv, cdclk_state->cdclk); -} - static void icl_init_cdclk(struct drm_i915_private *dev_priv) { struct intel_cdclk_state sanitized_state; @@ -1946,9 +1885,12 @@ sanitize: sanitized_state.cdclk = icl_calc_cdclk(0, sanitized_state.ref); sanitized_state.vco = icl_calc_cdclk_pll_vco(dev_priv, sanitized_state.cdclk); - sanitized_state.voltage_level = - icl_calc_voltage_level(dev_priv, - sanitized_state.cdclk); + if (IS_ELKHARTLAKE(dev_priv)) + sanitized_state.voltage_level = + ehl_calc_voltage_level(sanitized_state.cdclk); + else + sanitized_state.voltage_level = + icl_calc_voltage_level(sanitized_state.cdclk); cnl_set_cdclk(dev_priv, &sanitized_state, INVALID_PIPE); } @@ -1959,8 +1901,12 @@ static void icl_uninit_cdclk(struct drm_i915_private *dev_priv) cdclk_state.cdclk = cdclk_state.bypass; cdclk_state.vco = 0; - cdclk_state.voltage_level = icl_calc_voltage_level(dev_priv, - cdclk_state.cdclk); + if (IS_ELKHARTLAKE(dev_priv)) + cdclk_state.voltage_level = + ehl_calc_voltage_level(cdclk_state.cdclk); + else + cdclk_state.voltage_level = + icl_calc_voltage_level(cdclk_state.cdclk); cnl_set_cdclk(dev_priv, &cdclk_state, INVALID_PIPE); } @@ -2561,9 +2507,14 @@ static int icl_modeset_calc_cdclk(struct intel_atomic_state *state) state->cdclk.logical.vco = vco; state->cdclk.logical.cdclk = cdclk; - state->cdclk.logical.voltage_level = - max(icl_calc_voltage_level(dev_priv, cdclk), - cnl_compute_min_voltage_level(state)); + if (IS_ELKHARTLAKE(dev_priv)) + state->cdclk.logical.voltage_level = + max(ehl_calc_voltage_level(cdclk), + cnl_compute_min_voltage_level(state)); + else + state->cdclk.logical.voltage_level = + max(icl_calc_voltage_level(cdclk), + cnl_compute_min_voltage_level(state)); if (!state->active_pipes) { cdclk = icl_calc_cdclk(state->cdclk.force_min_cdclk, ref); @@ -2571,8 +2522,12 @@ static int icl_modeset_calc_cdclk(struct intel_atomic_state *state) state->cdclk.actual.vco = vco; state->cdclk.actual.cdclk = cdclk; - state->cdclk.actual.voltage_level = - icl_calc_voltage_level(dev_priv, cdclk); + if (IS_ELKHARTLAKE(dev_priv)) + state->cdclk.actual.voltage_level = + ehl_calc_voltage_level(cdclk); + else + state->cdclk.actual.voltage_level = + icl_calc_voltage_level(cdclk); } else { state->cdclk.actual = state->cdclk.logical; } @@ -2819,11 +2774,7 @@ void intel_init_cdclk_hooks(struct drm_i915_private *dev_priv) dev_priv->display.modeset_calc_cdclk = vlv_modeset_calc_cdclk; } - if (INTEL_GEN(dev_priv) >= 11) - dev_priv->display.get_cdclk = icl_get_cdclk; - else if (IS_CANNONLAKE(dev_priv)) - dev_priv->display.get_cdclk = cnl_get_cdclk; - else if (IS_GEN9_LP(dev_priv)) + if (INTEL_GEN(dev_priv) >= 10 || IS_GEN9_LP(dev_priv)) dev_priv->display.get_cdclk = bxt_get_cdclk; else if (IS_GEN9_BC(dev_priv)) dev_priv->display.get_cdclk = skl_get_cdclk; -- cgit v1.2.3 From 736da8112fee663deed6efc53333c37dc0b04696 Mon Sep 17 00:00:00 2001 From: Matt Roper Date: Tue, 10 Sep 2019 09:15:06 -0700 Subject: drm/i915: Use literal representation of cdclk tables MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit The bspec lays out legal cdclk frequencies, PLL ratios, and CD2X dividers in an easy-to-read table for most recent platforms. We've been translating the data from that table into platform-specific code logic, but it's easy to overlook an area we need to update when adding new cdclk values or enabling new platforms. Let's just add a form of the bspec table to the code and then adjust our functions to pull what they need directly out of the table. v2: Fix comparison when finding best cdclk. v3: Another logic fix for calc_cdclk. v4: - Use named initializers for cdclk tables. (Ville) - Include refclk as a field in the table instead of adding all three ratios for each entry. (Ville) - Terminate tables with an empty entry to avoid needing to store the table size. (Ville) - Don't try so hard to return reasonable values from our lookup functions if we get impossible inputs; just WARN and return 0. (Ville) - Keep a bxt_ prefix on the lookup functions since they're still only used on bxt+ for now. We can rename them later if we extend this table-based approach back to older platforms. (Ville) v5: - Fix cnl table's ratios for 24mhz refclk. (Ville) - Don't miss the named initializers on the cnl table. (Ville) - Represent refclk in table as u16 rather than u32. (Ville) Cc: Ville Syrjälä Signed-off-by: Matt Roper Reviewed-by: Ville Syrjälä Link: https://patchwork.freedesktop.org/patch/msgid/20190910161506.7158-1-matthew.d.roper@intel.com --- drivers/gpu/drm/i915/display/intel_cdclk.c | 303 ++++++++++------------------- drivers/gpu/drm/i915/display/intel_cdclk.h | 7 + drivers/gpu/drm/i915/i915_drv.h | 3 + 3 files changed, 110 insertions(+), 203 deletions(-) diff --git a/drivers/gpu/drm/i915/display/intel_cdclk.c b/drivers/gpu/drm/i915/display/intel_cdclk.c index dfcb1cc58951..f3431b530966 100644 --- a/drivers/gpu/drm/i915/display/intel_cdclk.c +++ b/drivers/gpu/drm/i915/display/intel_cdclk.c @@ -1161,28 +1161,88 @@ static void skl_uninit_cdclk(struct drm_i915_private *dev_priv) skl_set_cdclk(dev_priv, &cdclk_state, INVALID_PIPE); } -static int bxt_calc_cdclk(int min_cdclk) -{ - if (min_cdclk > 576000) - return 624000; - else if (min_cdclk > 384000) - return 576000; - else if (min_cdclk > 288000) - return 384000; - else if (min_cdclk > 144000) - return 288000; - else - return 144000; +static const struct intel_cdclk_vals bxt_cdclk_table[] = { + { .refclk = 19200, .cdclk = 144000, .divider = 8, .ratio = 60 }, + { .refclk = 19200, .cdclk = 288000, .divider = 4, .ratio = 60 }, + { .refclk = 19200, .cdclk = 384000, .divider = 3, .ratio = 60 }, + { .refclk = 19200, .cdclk = 576000, .divider = 2, .ratio = 60 }, + { .refclk = 19200, .cdclk = 624000, .divider = 2, .ratio = 65 }, + {} +}; + +static const struct intel_cdclk_vals glk_cdclk_table[] = { + { .refclk = 19200, .cdclk = 79200, .divider = 8, .ratio = 33 }, + { .refclk = 19200, .cdclk = 158400, .divider = 4, .ratio = 33 }, + { .refclk = 19200, .cdclk = 316800, .divider = 2, .ratio = 33 }, + {} +}; + +static const struct intel_cdclk_vals cnl_cdclk_table[] = { + { .refclk = 19200, .cdclk = 168000, .divider = 4, .ratio = 35 }, + { .refclk = 19200, .cdclk = 336000, .divider = 2, .ratio = 35 }, + { .refclk = 19200, .cdclk = 528000, .divider = 2, .ratio = 55 }, + + { .refclk = 24000, .cdclk = 168000, .divider = 4, .ratio = 28 }, + { .refclk = 24000, .cdclk = 336000, .divider = 2, .ratio = 28 }, + { .refclk = 24000, .cdclk = 528000, .divider = 2, .ratio = 44 }, + {} +}; + +static const struct intel_cdclk_vals icl_cdclk_table[] = { + { .refclk = 19200, .cdclk = 172800, .divider = 2, .ratio = 18 }, + { .refclk = 19200, .cdclk = 192000, .divider = 2, .ratio = 20 }, + { .refclk = 19200, .cdclk = 307200, .divider = 2, .ratio = 32 }, + { .refclk = 19200, .cdclk = 326400, .divider = 4, .ratio = 68 }, + { .refclk = 19200, .cdclk = 556800, .divider = 2, .ratio = 58 }, + { .refclk = 19200, .cdclk = 652800, .divider = 2, .ratio = 68 }, + + { .refclk = 24000, .cdclk = 180000, .divider = 2, .ratio = 15 }, + { .refclk = 24000, .cdclk = 192000, .divider = 2, .ratio = 16 }, + { .refclk = 24000, .cdclk = 312000, .divider = 2, .ratio = 26 }, + { .refclk = 24000, .cdclk = 324000, .divider = 4, .ratio = 54 }, + { .refclk = 24000, .cdclk = 552000, .divider = 2, .ratio = 46 }, + { .refclk = 24000, .cdclk = 648000, .divider = 2, .ratio = 54 }, + + { .refclk = 38400, .cdclk = 172800, .divider = 2, .ratio = 9 }, + { .refclk = 38400, .cdclk = 192000, .divider = 2, .ratio = 10 }, + { .refclk = 38400, .cdclk = 307200, .divider = 2, .ratio = 16 }, + { .refclk = 38400, .cdclk = 326400, .divider = 4, .ratio = 34 }, + { .refclk = 38400, .cdclk = 556800, .divider = 2, .ratio = 29 }, + { .refclk = 38400, .cdclk = 652800, .divider = 2, .ratio = 34 }, + {} +}; + +static int bxt_calc_cdclk(struct drm_i915_private *dev_priv, int min_cdclk) +{ + const struct intel_cdclk_vals *table = dev_priv->cdclk.table; + int i; + + for (i = 0; table[i].refclk; i++) + if (table[i].refclk == dev_priv->cdclk.hw.ref && + table[i].cdclk >= min_cdclk) + return table[i].cdclk; + + WARN(1, "Cannot satisfy minimum cdclk %d with refclk %u\n", + min_cdclk, dev_priv->cdclk.hw.ref); + return 0; } -static int glk_calc_cdclk(int min_cdclk) +static int bxt_calc_cdclk_pll_vco(struct drm_i915_private *dev_priv, int cdclk) { - if (min_cdclk > 158400) - return 316800; - else if (min_cdclk > 79200) - return 158400; - else - return 79200; + const struct intel_cdclk_vals *table = dev_priv->cdclk.table; + int i; + + if (cdclk == dev_priv->cdclk.hw.bypass) + return 0; + + for (i = 0; table[i].refclk; i++) + if (table[i].refclk == dev_priv->cdclk.hw.ref && + table[i].cdclk == cdclk) + return dev_priv->cdclk.hw.ref * table[i].ratio; + + WARN(1, "cdclk %d not valid for refclk %u\n", + cdclk, dev_priv->cdclk.hw.ref); + return 0; } static u8 bxt_calc_voltage_level(int cdclk) @@ -1220,52 +1280,6 @@ static u8 ehl_calc_voltage_level(int cdclk) return 0; } -static int bxt_de_pll_vco(struct drm_i915_private *dev_priv, int cdclk) -{ - int ratio; - - if (cdclk == dev_priv->cdclk.hw.bypass) - return 0; - - switch (cdclk) { - default: - MISSING_CASE(cdclk); - /* fall through */ - case 144000: - case 288000: - case 384000: - case 576000: - ratio = 60; - break; - case 624000: - ratio = 65; - break; - } - - return dev_priv->cdclk.hw.ref * ratio; -} - -static int glk_de_pll_vco(struct drm_i915_private *dev_priv, int cdclk) -{ - int ratio; - - if (cdclk == dev_priv->cdclk.hw.bypass) - return 0; - - switch (cdclk) { - default: - MISSING_CASE(cdclk); - /* fall through */ - case 79200: - case 158400: - case 316800: - ratio = 33; - break; - } - - return dev_priv->cdclk.hw.ref * ratio; -} - static void cnl_readout_refclk(struct drm_i915_private *dev_priv, struct intel_cdclk_state *cdclk_state) { @@ -1576,13 +1590,8 @@ static void bxt_init_cdclk(struct drm_i915_private *dev_priv) * - The initial CDCLK needs to be read from VBT. * Need to make this change after VBT has changes for BXT. */ - if (IS_GEMINILAKE(dev_priv)) { - cdclk_state.cdclk = glk_calc_cdclk(0); - cdclk_state.vco = glk_de_pll_vco(dev_priv, cdclk_state.cdclk); - } else { - cdclk_state.cdclk = bxt_calc_cdclk(0); - cdclk_state.vco = bxt_de_pll_vco(dev_priv, cdclk_state.cdclk); - } + cdclk_state.cdclk = bxt_calc_cdclk(dev_priv, 0); + cdclk_state.vco = bxt_calc_cdclk_pll_vco(dev_priv, cdclk_state.cdclk); cdclk_state.voltage_level = bxt_calc_voltage_level(cdclk_state.cdclk); bxt_set_cdclk(dev_priv, &cdclk_state, INVALID_PIPE); @@ -1599,16 +1608,6 @@ static void bxt_uninit_cdclk(struct drm_i915_private *dev_priv) bxt_set_cdclk(dev_priv, &cdclk_state, INVALID_PIPE); } -static int cnl_calc_cdclk(int min_cdclk) -{ - if (min_cdclk > 336000) - return 528000; - else if (min_cdclk > 168000) - return 336000; - else - return 168000; -} - static void cnl_cdclk_pll_disable(struct drm_i915_private *dev_priv) { u32 val; @@ -1718,29 +1717,6 @@ static void cnl_set_cdclk(struct drm_i915_private *dev_priv, dev_priv->cdclk.hw.voltage_level = cdclk_state->voltage_level; } -static int cnl_cdclk_pll_vco(struct drm_i915_private *dev_priv, int cdclk) -{ - int ratio; - - if (cdclk == dev_priv->cdclk.hw.bypass) - return 0; - - switch (cdclk) { - default: - MISSING_CASE(cdclk); - /* fall through */ - case 168000: - case 336000: - ratio = dev_priv->cdclk.hw.ref == 19200 ? 35 : 28; - break; - case 528000: - ratio = dev_priv->cdclk.hw.ref == 19200 ? 55 : 44; - break; - } - - return dev_priv->cdclk.hw.ref * ratio; -} - static void cnl_sanitize_cdclk(struct drm_i915_private *dev_priv) { u32 cdctl, expected; @@ -1783,77 +1759,6 @@ sanitize: dev_priv->cdclk.hw.vco = -1; } -static int icl_calc_cdclk(int min_cdclk, unsigned int ref) -{ - static const int ranges_24[] = { 180000, 192000, 312000, 324000, - 552000, 648000 }; - static const int ranges_19_38[] = { 172800, 192000, 307200, 326400, - 556800, 652800 }; - const int *ranges; - int len, i; - - switch (ref) { - default: - MISSING_CASE(ref); - /* fall through */ - case 24000: - ranges = ranges_24; - len = ARRAY_SIZE(ranges_24); - break; - case 19200: - case 38400: - ranges = ranges_19_38; - len = ARRAY_SIZE(ranges_19_38); - break; - } - - for (i = 0; i < len; i++) { - if (min_cdclk <= ranges[i]) - return ranges[i]; - } - - WARN_ON(min_cdclk > ranges[len - 1]); - return ranges[len - 1]; -} - -static int icl_calc_cdclk_pll_vco(struct drm_i915_private *dev_priv, int cdclk) -{ - int ratio; - - if (cdclk == dev_priv->cdclk.hw.bypass) - return 0; - - switch (cdclk) { - default: - MISSING_CASE(cdclk); - /* fall through */ - case 172800: - case 307200: - case 326400: - case 556800: - case 652800: - WARN_ON(dev_priv->cdclk.hw.ref != 19200 && - dev_priv->cdclk.hw.ref != 38400); - break; - case 180000: - case 312000: - case 324000: - case 552000: - case 648000: - WARN_ON(dev_priv->cdclk.hw.ref != 24000); - break; - case 192000: - WARN_ON(dev_priv->cdclk.hw.ref != 19200 && - dev_priv->cdclk.hw.ref != 38400 && - dev_priv->cdclk.hw.ref != 24000); - break; - } - - ratio = cdclk / (dev_priv->cdclk.hw.ref / 2); - - return dev_priv->cdclk.hw.ref * ratio; -} - static void icl_init_cdclk(struct drm_i915_private *dev_priv) { struct intel_cdclk_state sanitized_state; @@ -1882,8 +1787,8 @@ sanitize: DRM_DEBUG_KMS("Sanitizing cdclk programmed by pre-os\n"); sanitized_state.ref = dev_priv->cdclk.hw.ref; - sanitized_state.cdclk = icl_calc_cdclk(0, sanitized_state.ref); - sanitized_state.vco = icl_calc_cdclk_pll_vco(dev_priv, + sanitized_state.cdclk = bxt_calc_cdclk(dev_priv, 0); + sanitized_state.vco = bxt_calc_cdclk_pll_vco(dev_priv, sanitized_state.cdclk); if (IS_ELKHARTLAKE(dev_priv)) sanitized_state.voltage_level = @@ -1923,8 +1828,8 @@ static void cnl_init_cdclk(struct drm_i915_private *dev_priv) cdclk_state = dev_priv->cdclk.hw; - cdclk_state.cdclk = cnl_calc_cdclk(0); - cdclk_state.vco = cnl_cdclk_pll_vco(dev_priv, cdclk_state.cdclk); + cdclk_state.cdclk = bxt_calc_cdclk(dev_priv, 0); + cdclk_state.vco = bxt_calc_cdclk_pll_vco(dev_priv, cdclk_state.cdclk); cdclk_state.voltage_level = cnl_calc_voltage_level(cdclk_state.cdclk); cnl_set_cdclk(dev_priv, &cdclk_state, INVALID_PIPE); @@ -2426,13 +2331,8 @@ static int bxt_modeset_calc_cdclk(struct intel_atomic_state *state) if (min_cdclk < 0) return min_cdclk; - if (IS_GEMINILAKE(dev_priv)) { - cdclk = glk_calc_cdclk(min_cdclk); - vco = glk_de_pll_vco(dev_priv, cdclk); - } else { - cdclk = bxt_calc_cdclk(min_cdclk); - vco = bxt_de_pll_vco(dev_priv, cdclk); - } + cdclk = bxt_calc_cdclk(dev_priv, min_cdclk); + vco = bxt_calc_cdclk_pll_vco(dev_priv, cdclk); state->cdclk.logical.vco = vco; state->cdclk.logical.cdclk = cdclk; @@ -2440,13 +2340,8 @@ static int bxt_modeset_calc_cdclk(struct intel_atomic_state *state) bxt_calc_voltage_level(cdclk); if (!state->active_pipes) { - if (IS_GEMINILAKE(dev_priv)) { - cdclk = glk_calc_cdclk(state->cdclk.force_min_cdclk); - vco = glk_de_pll_vco(dev_priv, cdclk); - } else { - cdclk = bxt_calc_cdclk(state->cdclk.force_min_cdclk); - vco = bxt_de_pll_vco(dev_priv, cdclk); - } + cdclk = bxt_calc_cdclk(dev_priv, state->cdclk.force_min_cdclk); + vco = bxt_calc_cdclk_pll_vco(dev_priv, cdclk); state->cdclk.actual.vco = vco; state->cdclk.actual.cdclk = cdclk; @@ -2468,8 +2363,8 @@ static int cnl_modeset_calc_cdclk(struct intel_atomic_state *state) if (min_cdclk < 0) return min_cdclk; - cdclk = cnl_calc_cdclk(min_cdclk); - vco = cnl_cdclk_pll_vco(dev_priv, cdclk); + cdclk = bxt_calc_cdclk(dev_priv, min_cdclk); + vco = bxt_calc_cdclk_pll_vco(dev_priv, cdclk); state->cdclk.logical.vco = vco; state->cdclk.logical.cdclk = cdclk; @@ -2478,8 +2373,8 @@ static int cnl_modeset_calc_cdclk(struct intel_atomic_state *state) cnl_compute_min_voltage_level(state)); if (!state->active_pipes) { - cdclk = cnl_calc_cdclk(state->cdclk.force_min_cdclk); - vco = cnl_cdclk_pll_vco(dev_priv, cdclk); + cdclk = bxt_calc_cdclk(dev_priv, state->cdclk.force_min_cdclk); + vco = bxt_calc_cdclk_pll_vco(dev_priv, cdclk); state->cdclk.actual.vco = vco; state->cdclk.actual.cdclk = cdclk; @@ -2495,15 +2390,14 @@ static int cnl_modeset_calc_cdclk(struct intel_atomic_state *state) static int icl_modeset_calc_cdclk(struct intel_atomic_state *state) { struct drm_i915_private *dev_priv = to_i915(state->base.dev); - unsigned int ref = state->cdclk.logical.ref; int min_cdclk, cdclk, vco; min_cdclk = intel_compute_min_cdclk(state); if (min_cdclk < 0) return min_cdclk; - cdclk = icl_calc_cdclk(min_cdclk, ref); - vco = icl_calc_cdclk_pll_vco(dev_priv, cdclk); + cdclk = bxt_calc_cdclk(dev_priv, min_cdclk); + vco = bxt_calc_cdclk_pll_vco(dev_priv, cdclk); state->cdclk.logical.vco = vco; state->cdclk.logical.cdclk = cdclk; @@ -2517,8 +2411,8 @@ static int icl_modeset_calc_cdclk(struct intel_atomic_state *state) cnl_compute_min_voltage_level(state)); if (!state->active_pipes) { - cdclk = icl_calc_cdclk(state->cdclk.force_min_cdclk, ref); - vco = icl_calc_cdclk_pll_vco(dev_priv, cdclk); + cdclk = bxt_calc_cdclk(dev_priv, state->cdclk.force_min_cdclk); + vco = bxt_calc_cdclk_pll_vco(dev_priv, cdclk); state->cdclk.actual.vco = vco; state->cdclk.actual.cdclk = cdclk; @@ -2754,12 +2648,15 @@ void intel_init_cdclk_hooks(struct drm_i915_private *dev_priv) if (INTEL_GEN(dev_priv) >= 11) { dev_priv->display.set_cdclk = cnl_set_cdclk; dev_priv->display.modeset_calc_cdclk = icl_modeset_calc_cdclk; + dev_priv->cdclk.table = icl_cdclk_table; } else if (IS_CANNONLAKE(dev_priv)) { dev_priv->display.set_cdclk = cnl_set_cdclk; dev_priv->display.modeset_calc_cdclk = cnl_modeset_calc_cdclk; + dev_priv->cdclk.table = cnl_cdclk_table; } else if (IS_GEN9_LP(dev_priv)) { dev_priv->display.set_cdclk = bxt_set_cdclk; dev_priv->display.modeset_calc_cdclk = bxt_modeset_calc_cdclk; + dev_priv->cdclk.table = bxt_cdclk_table; } else if (IS_GEN9_BC(dev_priv)) { dev_priv->display.set_cdclk = skl_set_cdclk; dev_priv->display.modeset_calc_cdclk = skl_modeset_calc_cdclk; diff --git a/drivers/gpu/drm/i915/display/intel_cdclk.h b/drivers/gpu/drm/i915/display/intel_cdclk.h index 4d6f7f5f8930..1afa84ab6018 100644 --- a/drivers/gpu/drm/i915/display/intel_cdclk.h +++ b/drivers/gpu/drm/i915/display/intel_cdclk.h @@ -15,6 +15,13 @@ struct intel_atomic_state; struct intel_cdclk_state; struct intel_crtc_state; +struct intel_cdclk_vals { + u16 refclk; + u32 cdclk; + u8 divider; /* CD2X divider * 2 */ + u8 ratio; +}; + int intel_crtc_compute_min_cdclk(const struct intel_crtc_state *crtc_state); void intel_cdclk_init(struct drm_i915_private *i915); void intel_cdclk_uninit(struct drm_i915_private *i915); diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index e289b4ffd34b..ff6aff2a4866 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h @@ -1420,6 +1420,9 @@ struct drm_i915_private { /* The current hardware cdclk state */ struct intel_cdclk_state hw; + /* cdclk, divider, and ratio table from bspec */ + const struct intel_cdclk_vals *table; + int force_min_cdclk; } cdclk; -- cgit v1.2.3 From 1cbcd3b4b168a08620153d2c5d52461a75338349 Mon Sep 17 00:00:00 2001 From: Matt Roper Date: Tue, 10 Sep 2019 08:42:47 -0700 Subject: drm/i915: Combine bxt_set_cdclk and cnl_set_cdclk MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit We'd previously combined ICL/TGL logic into the cnl_set_cdclk function, but BXT is pretty similar as well. Roll the cnl/icl/tgl logic back into the bxt function; the only things we really need to handle separately are punit notification and calling different functions to enable/disable the cdclk PLL. Cc: Ville Syrjälä Signed-off-by: Matt Roper Reviewed-by: Ville Syrjälä Link: https://patchwork.freedesktop.org/patch/msgid/20190910154252.30503-4-matthew.d.roper@intel.com --- drivers/gpu/drm/i915/display/intel_cdclk.c | 267 +++++++++++++---------------- 1 file changed, 119 insertions(+), 148 deletions(-) diff --git a/drivers/gpu/drm/i915/display/intel_cdclk.c b/drivers/gpu/drm/i915/display/intel_cdclk.c index f3431b530966..1dcb8a113b4f 100644 --- a/drivers/gpu/drm/i915/display/intel_cdclk.c +++ b/drivers/gpu/drm/i915/display/intel_cdclk.c @@ -1440,6 +1440,39 @@ static void bxt_de_pll_enable(struct drm_i915_private *dev_priv, int vco) dev_priv->cdclk.hw.vco = vco; } +static void cnl_cdclk_pll_disable(struct drm_i915_private *dev_priv) +{ + u32 val; + + val = I915_READ(BXT_DE_PLL_ENABLE); + val &= ~BXT_DE_PLL_PLL_ENABLE; + I915_WRITE(BXT_DE_PLL_ENABLE, val); + + /* Timeout 200us */ + if (wait_for((I915_READ(BXT_DE_PLL_ENABLE) & BXT_DE_PLL_LOCK) == 0, 1)) + DRM_ERROR("timeout waiting for CDCLK PLL unlock\n"); + + dev_priv->cdclk.hw.vco = 0; +} + +static void cnl_cdclk_pll_enable(struct drm_i915_private *dev_priv, int vco) +{ + int ratio = DIV_ROUND_CLOSEST(vco, dev_priv->cdclk.hw.ref); + u32 val; + + val = CNL_CDCLK_PLL_RATIO(ratio); + I915_WRITE(BXT_DE_PLL_ENABLE, val); + + val |= BXT_DE_PLL_PLL_ENABLE; + I915_WRITE(BXT_DE_PLL_ENABLE, val); + + /* Timeout 200us */ + if (wait_for((I915_READ(BXT_DE_PLL_ENABLE) & BXT_DE_PLL_LOCK) != 0, 1)) + DRM_ERROR("timeout waiting for CDCLK PLL lock\n"); + + dev_priv->cdclk.hw.vco = vco; +} + static void bxt_set_cdclk(struct drm_i915_private *dev_priv, const struct intel_cdclk_state *cdclk_state, enum pipe pipe) @@ -1449,6 +1482,27 @@ static void bxt_set_cdclk(struct drm_i915_private *dev_priv, u32 val, divider; int ret; + /* Inform power controller of upcoming frequency change. */ + if (INTEL_GEN(dev_priv) >= 10) + ret = skl_pcode_request(dev_priv, SKL_PCODE_CDCLK_CONTROL, + SKL_CDCLK_PREPARE_FOR_CHANGE, + SKL_CDCLK_READY_FOR_CHANGE, + SKL_CDCLK_READY_FOR_CHANGE, 3); + else + /* + * BSpec requires us to wait up to 150usec, but that leads to + * timeouts; the 2ms used here is based on experiment. + */ + ret = sandybridge_pcode_write_timeout(dev_priv, + HSW_PCODE_DE_WRITE_FREQ_REQ, + 0x80000000, 150, 2); + + if (ret) { + DRM_ERROR("Failed to inform PCU about cdclk change (err %d, freq %d)\n", + ret, cdclk); + return; + } + /* cdclk = vco / 2 / div{1,1.5,2,4} */ switch (DIV_ROUND_CLOSEST(vco, cdclk)) { default: @@ -1459,63 +1513,82 @@ static void bxt_set_cdclk(struct drm_i915_private *dev_priv, divider = BXT_CDCLK_CD2X_DIV_SEL_1; break; case 3: - WARN(IS_GEMINILAKE(dev_priv), "Unsupported divider\n"); + WARN(IS_GEMINILAKE(dev_priv) || INTEL_GEN(dev_priv) >= 10, + "Unsupported divider\n"); divider = BXT_CDCLK_CD2X_DIV_SEL_1_5; break; case 4: divider = BXT_CDCLK_CD2X_DIV_SEL_2; break; case 8: + WARN(INTEL_GEN(dev_priv) >= 10, "Unsupported divider\n"); divider = BXT_CDCLK_CD2X_DIV_SEL_4; break; } - /* - * Inform power controller of upcoming frequency change. BSpec - * requires us to wait up to 150usec, but that leads to timeouts; - * the 2ms used here is based on experiment. - */ - ret = sandybridge_pcode_write_timeout(dev_priv, - HSW_PCODE_DE_WRITE_FREQ_REQ, - 0x80000000, 150, 2); - if (ret) { - DRM_ERROR("PCode CDCLK freq change notify failed (err %d, freq %d)\n", - ret, cdclk); - return; - } + if (INTEL_GEN(dev_priv) >= 10) { + if (dev_priv->cdclk.hw.vco != 0 && + dev_priv->cdclk.hw.vco != vco) + cnl_cdclk_pll_disable(dev_priv); - if (dev_priv->cdclk.hw.vco != 0 && - dev_priv->cdclk.hw.vco != vco) - bxt_de_pll_disable(dev_priv); + if (dev_priv->cdclk.hw.vco != vco) + cnl_cdclk_pll_enable(dev_priv, vco); - if (dev_priv->cdclk.hw.vco != vco) - bxt_de_pll_enable(dev_priv, vco); + } else { + if (dev_priv->cdclk.hw.vco != 0 && + dev_priv->cdclk.hw.vco != vco) + bxt_de_pll_disable(dev_priv); + + if (dev_priv->cdclk.hw.vco != vco) + bxt_de_pll_enable(dev_priv, vco); + } val = divider | skl_cdclk_decimal(cdclk); - if (pipe == INVALID_PIPE) - val |= BXT_CDCLK_CD2X_PIPE_NONE; - else - val |= BXT_CDCLK_CD2X_PIPE(pipe); + + if (INTEL_GEN(dev_priv) >= 12) { + if (pipe == INVALID_PIPE) + val |= TGL_CDCLK_CD2X_PIPE_NONE; + else + val |= TGL_CDCLK_CD2X_PIPE(pipe); + } else if (INTEL_GEN(dev_priv) >= 11) { + if (pipe == INVALID_PIPE) + val |= ICL_CDCLK_CD2X_PIPE_NONE; + else + val |= ICL_CDCLK_CD2X_PIPE(pipe); + } else { + if (pipe == INVALID_PIPE) + val |= BXT_CDCLK_CD2X_PIPE_NONE; + else + val |= BXT_CDCLK_CD2X_PIPE(pipe); + } + /* * Disable SSA Precharge when CD clock frequency < 500 MHz, * enable otherwise. */ - if (cdclk >= 500000) + if (IS_GEN9_LP(dev_priv) && cdclk >= 500000) val |= BXT_CDCLK_SSA_PRECHARGE_ENABLE; I915_WRITE(CDCLK_CTL, val); if (pipe != INVALID_PIPE) intel_wait_for_vblank(dev_priv, pipe); - /* - * The timeout isn't specified, the 2ms used here is based on - * experiment. - * FIXME: Waiting for the request completion could be delayed until - * the next PCODE request based on BSpec. - */ - ret = sandybridge_pcode_write_timeout(dev_priv, - HSW_PCODE_DE_WRITE_FREQ_REQ, - cdclk_state->voltage_level, 150, 2); + if (INTEL_GEN(dev_priv) >= 10) { + ret = sandybridge_pcode_write(dev_priv, SKL_PCODE_CDCLK_CONTROL, + cdclk_state->voltage_level); + } else { + /* + * The timeout isn't specified, the 2ms used here is based on + * experiment. + * FIXME: Waiting for the request completion could be delayed + * until the next PCODE request based on BSpec. + */ + ret = sandybridge_pcode_write_timeout(dev_priv, + HSW_PCODE_DE_WRITE_FREQ_REQ, + cdclk_state->voltage_level, + 150, 2); + } + if (ret) { DRM_ERROR("PCode CDCLK freq set failed, (err %d, freq %d)\n", ret, cdclk); @@ -1523,6 +1596,13 @@ static void bxt_set_cdclk(struct drm_i915_private *dev_priv, } intel_update_cdclk(dev_priv); + + if (INTEL_GEN(dev_priv) >= 10) + /* + * Can't read out the voltage level :( + * Let's just assume everything is as expected. + */ + dev_priv->cdclk.hw.voltage_level = cdclk_state->voltage_level; } static void bxt_sanitize_cdclk(struct drm_i915_private *dev_priv) @@ -1608,115 +1688,6 @@ static void bxt_uninit_cdclk(struct drm_i915_private *dev_priv) bxt_set_cdclk(dev_priv, &cdclk_state, INVALID_PIPE); } -static void cnl_cdclk_pll_disable(struct drm_i915_private *dev_priv) -{ - u32 val; - - val = I915_READ(BXT_DE_PLL_ENABLE); - val &= ~BXT_DE_PLL_PLL_ENABLE; - I915_WRITE(BXT_DE_PLL_ENABLE, val); - - /* Timeout 200us */ - if (wait_for((I915_READ(BXT_DE_PLL_ENABLE) & BXT_DE_PLL_LOCK) == 0, 1)) - DRM_ERROR("timeout waiting for CDCLK PLL unlock\n"); - - dev_priv->cdclk.hw.vco = 0; -} - -static void cnl_cdclk_pll_enable(struct drm_i915_private *dev_priv, int vco) -{ - int ratio = DIV_ROUND_CLOSEST(vco, dev_priv->cdclk.hw.ref); - u32 val; - - val = CNL_CDCLK_PLL_RATIO(ratio); - I915_WRITE(BXT_DE_PLL_ENABLE, val); - - val |= BXT_DE_PLL_PLL_ENABLE; - I915_WRITE(BXT_DE_PLL_ENABLE, val); - - /* Timeout 200us */ - if (wait_for((I915_READ(BXT_DE_PLL_ENABLE) & BXT_DE_PLL_LOCK) != 0, 1)) - DRM_ERROR("timeout waiting for CDCLK PLL lock\n"); - - dev_priv->cdclk.hw.vco = vco; -} - -static void cnl_set_cdclk(struct drm_i915_private *dev_priv, - const struct intel_cdclk_state *cdclk_state, - enum pipe pipe) -{ - int cdclk = cdclk_state->cdclk; - int vco = cdclk_state->vco; - u32 val, divider; - int ret; - - ret = skl_pcode_request(dev_priv, SKL_PCODE_CDCLK_CONTROL, - SKL_CDCLK_PREPARE_FOR_CHANGE, - SKL_CDCLK_READY_FOR_CHANGE, - SKL_CDCLK_READY_FOR_CHANGE, 3); - if (ret) { - DRM_ERROR("Failed to inform PCU about cdclk change (%d)\n", - ret); - return; - } - - /* cdclk = vco / 2 / div{1,2} */ - switch (DIV_ROUND_CLOSEST(vco, cdclk)) { - default: - WARN_ON(cdclk != dev_priv->cdclk.hw.bypass); - WARN_ON(vco != 0); - /* fall through */ - case 2: - divider = BXT_CDCLK_CD2X_DIV_SEL_1; - break; - case 4: - divider = BXT_CDCLK_CD2X_DIV_SEL_2; - break; - } - - if (dev_priv->cdclk.hw.vco != 0 && - dev_priv->cdclk.hw.vco != vco) - cnl_cdclk_pll_disable(dev_priv); - - if (dev_priv->cdclk.hw.vco != vco) - cnl_cdclk_pll_enable(dev_priv, vco); - - val = divider | skl_cdclk_decimal(cdclk); - - if (INTEL_GEN(dev_priv) >= 12) { - if (pipe == INVALID_PIPE) - val |= TGL_CDCLK_CD2X_PIPE_NONE; - else - val |= TGL_CDCLK_CD2X_PIPE(pipe); - } else if (INTEL_GEN(dev_priv) >= 11) { - if (pipe == INVALID_PIPE) - val |= ICL_CDCLK_CD2X_PIPE_NONE; - else - val |= ICL_CDCLK_CD2X_PIPE(pipe); - } else { - if (pipe == INVALID_PIPE) - val |= BXT_CDCLK_CD2X_PIPE_NONE; - else - val |= BXT_CDCLK_CD2X_PIPE(pipe); - } - I915_WRITE(CDCLK_CTL, val); - - if (pipe != INVALID_PIPE) - intel_wait_for_vblank(dev_priv, pipe); - - /* inform PCU of the change */ - sandybridge_pcode_write(dev_priv, SKL_PCODE_CDCLK_CONTROL, - cdclk_state->voltage_level); - - intel_update_cdclk(dev_priv); - - /* - * Can't read out the voltage level :( - * Let's just assume everything is as expected. - */ - dev_priv->cdclk.hw.voltage_level = cdclk_state->voltage_level; -} - static void cnl_sanitize_cdclk(struct drm_i915_private *dev_priv) { u32 cdctl, expected; @@ -1797,7 +1768,7 @@ sanitize: sanitized_state.voltage_level = icl_calc_voltage_level(sanitized_state.cdclk); - cnl_set_cdclk(dev_priv, &sanitized_state, INVALID_PIPE); + bxt_set_cdclk(dev_priv, &sanitized_state, INVALID_PIPE); } static void icl_uninit_cdclk(struct drm_i915_private *dev_priv) @@ -1813,7 +1784,7 @@ static void icl_uninit_cdclk(struct drm_i915_private *dev_priv) cdclk_state.voltage_level = icl_calc_voltage_level(cdclk_state.cdclk); - cnl_set_cdclk(dev_priv, &cdclk_state, INVALID_PIPE); + bxt_set_cdclk(dev_priv, &cdclk_state, INVALID_PIPE); } static void cnl_init_cdclk(struct drm_i915_private *dev_priv) @@ -1832,7 +1803,7 @@ static void cnl_init_cdclk(struct drm_i915_private *dev_priv) cdclk_state.vco = bxt_calc_cdclk_pll_vco(dev_priv, cdclk_state.cdclk); cdclk_state.voltage_level = cnl_calc_voltage_level(cdclk_state.cdclk); - cnl_set_cdclk(dev_priv, &cdclk_state, INVALID_PIPE); + bxt_set_cdclk(dev_priv, &cdclk_state, INVALID_PIPE); } static void cnl_uninit_cdclk(struct drm_i915_private *dev_priv) @@ -1843,7 +1814,7 @@ static void cnl_uninit_cdclk(struct drm_i915_private *dev_priv) cdclk_state.vco = 0; cdclk_state.voltage_level = cnl_calc_voltage_level(cdclk_state.cdclk); - cnl_set_cdclk(dev_priv, &cdclk_state, INVALID_PIPE); + bxt_set_cdclk(dev_priv, &cdclk_state, INVALID_PIPE); } /** @@ -2646,11 +2617,11 @@ void intel_update_rawclk(struct drm_i915_private *dev_priv) void intel_init_cdclk_hooks(struct drm_i915_private *dev_priv) { if (INTEL_GEN(dev_priv) >= 11) { - dev_priv->display.set_cdclk = cnl_set_cdclk; + dev_priv->display.set_cdclk = bxt_set_cdclk; dev_priv->display.modeset_calc_cdclk = icl_modeset_calc_cdclk; dev_priv->cdclk.table = icl_cdclk_table; } else if (IS_CANNONLAKE(dev_priv)) { - dev_priv->display.set_cdclk = cnl_set_cdclk; + dev_priv->display.set_cdclk = bxt_set_cdclk; dev_priv->display.modeset_calc_cdclk = cnl_modeset_calc_cdclk; dev_priv->cdclk.table = cnl_cdclk_table; } else if (IS_GEN9_LP(dev_priv)) { -- cgit v1.2.3 From 5dac256bf76793d4e4c554a3c6784e39ccf06664 Mon Sep 17 00:00:00 2001 From: Matt Roper Date: Tue, 10 Sep 2019 08:42:48 -0700 Subject: drm/i915: Kill cnl_sanitize_cdclk() MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit The CNL variant of this function is identical to the BXT variant aside from not needing to handle SSA precharge. Cc: Ville Syrjälä Signed-off-by: Matt Roper Reviewed-by: Ville Syrjälä Link: https://patchwork.freedesktop.org/patch/msgid/20190910154252.30503-5-matthew.d.roper@intel.com --- drivers/gpu/drm/i915/display/intel_cdclk.c | 46 ++---------------------------- 1 file changed, 2 insertions(+), 44 deletions(-) diff --git a/drivers/gpu/drm/i915/display/intel_cdclk.c b/drivers/gpu/drm/i915/display/intel_cdclk.c index 1dcb8a113b4f..ebed87a64c98 100644 --- a/drivers/gpu/drm/i915/display/intel_cdclk.c +++ b/drivers/gpu/drm/i915/display/intel_cdclk.c @@ -1636,7 +1636,7 @@ static void bxt_sanitize_cdclk(struct drm_i915_private *dev_priv) * Disable SSA Precharge when CD clock frequency < 500 MHz, * enable otherwise. */ - if (dev_priv->cdclk.hw.cdclk >= 500000) + if (IS_GEN9_LP(dev_priv) && dev_priv->cdclk.hw.cdclk >= 500000) expected |= BXT_CDCLK_SSA_PRECHARGE_ENABLE; if (cdctl == expected) @@ -1688,48 +1688,6 @@ static void bxt_uninit_cdclk(struct drm_i915_private *dev_priv) bxt_set_cdclk(dev_priv, &cdclk_state, INVALID_PIPE); } -static void cnl_sanitize_cdclk(struct drm_i915_private *dev_priv) -{ - u32 cdctl, expected; - - intel_update_cdclk(dev_priv); - intel_dump_cdclk_state(&dev_priv->cdclk.hw, "Current CDCLK"); - - if (dev_priv->cdclk.hw.vco == 0 || - dev_priv->cdclk.hw.cdclk == dev_priv->cdclk.hw.bypass) - goto sanitize; - - /* DPLL okay; verify the cdclock - * - * Some BIOS versions leave an incorrect decimal frequency value and - * set reserved MBZ bits in CDCLK_CTL at least during exiting from S4, - * so sanitize this register. - */ - cdctl = I915_READ(CDCLK_CTL); - /* - * Let's ignore the pipe field, since BIOS could have configured the - * dividers both synching to an active pipe, or asynchronously - * (PIPE_NONE). - */ - cdctl &= ~BXT_CDCLK_CD2X_PIPE_NONE; - - expected = (cdctl & BXT_CDCLK_CD2X_DIV_SEL_MASK) | - skl_cdclk_decimal(dev_priv->cdclk.hw.cdclk); - - if (cdctl == expected) - /* All well; nothing to sanitize */ - return; - -sanitize: - DRM_DEBUG_KMS("Sanitizing cdclk programmed by pre-os\n"); - - /* force cdclk programming */ - dev_priv->cdclk.hw.cdclk = 0; - - /* force full PLL disable + enable */ - dev_priv->cdclk.hw.vco = -1; -} - static void icl_init_cdclk(struct drm_i915_private *dev_priv) { struct intel_cdclk_state sanitized_state; @@ -1791,7 +1749,7 @@ static void cnl_init_cdclk(struct drm_i915_private *dev_priv) { struct intel_cdclk_state cdclk_state; - cnl_sanitize_cdclk(dev_priv); + bxt_sanitize_cdclk(dev_priv); if (dev_priv->cdclk.hw.cdclk != 0 && dev_priv->cdclk.hw.vco != 0) -- cgit v1.2.3 From 751a93a15cde277ada4e1be14d93eb13a0dc3edd Mon Sep 17 00:00:00 2001 From: Matt Roper Date: Tue, 10 Sep 2019 08:42:49 -0700 Subject: drm/i915: Consolidate {bxt,cnl,icl}_uninit_cdclk MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit The uninitialize flow is the same on all of these platforms, aside from calculating a different frequency level. v2: Reverse platform conditional order for consistency. (Ville) Cc: Ville Syrjälä Signed-off-by: Matt Roper Reviewed-by: Ville Syrjälä Link: https://patchwork.freedesktop.org/patch/msgid/20190910154252.30503-6-matthew.d.roper@intel.com --- drivers/gpu/drm/i915/display/intel_cdclk.c | 48 +++++++++--------------------- 1 file changed, 14 insertions(+), 34 deletions(-) diff --git a/drivers/gpu/drm/i915/display/intel_cdclk.c b/drivers/gpu/drm/i915/display/intel_cdclk.c index ebed87a64c98..679aeebfbca5 100644 --- a/drivers/gpu/drm/i915/display/intel_cdclk.c +++ b/drivers/gpu/drm/i915/display/intel_cdclk.c @@ -1683,7 +1683,18 @@ static void bxt_uninit_cdclk(struct drm_i915_private *dev_priv) cdclk_state.cdclk = cdclk_state.bypass; cdclk_state.vco = 0; - cdclk_state.voltage_level = bxt_calc_voltage_level(cdclk_state.cdclk); + if (IS_ELKHARTLAKE(dev_priv)) + cdclk_state.voltage_level = + ehl_calc_voltage_level(cdclk_state.cdclk); + else if (INTEL_GEN(dev_priv) >= 11) + cdclk_state.voltage_level = + icl_calc_voltage_level(cdclk_state.cdclk); + else if (INTEL_GEN(dev_priv) >= 10) + cdclk_state.voltage_level = + cnl_calc_voltage_level(cdclk_state.cdclk); + else + cdclk_state.voltage_level = + bxt_calc_voltage_level(cdclk_state.cdclk); bxt_set_cdclk(dev_priv, &cdclk_state, INVALID_PIPE); } @@ -1729,22 +1740,6 @@ sanitize: bxt_set_cdclk(dev_priv, &sanitized_state, INVALID_PIPE); } -static void icl_uninit_cdclk(struct drm_i915_private *dev_priv) -{ - struct intel_cdclk_state cdclk_state = dev_priv->cdclk.hw; - - cdclk_state.cdclk = cdclk_state.bypass; - cdclk_state.vco = 0; - if (IS_ELKHARTLAKE(dev_priv)) - cdclk_state.voltage_level = - ehl_calc_voltage_level(cdclk_state.cdclk); - else - cdclk_state.voltage_level = - icl_calc_voltage_level(cdclk_state.cdclk); - - bxt_set_cdclk(dev_priv, &cdclk_state, INVALID_PIPE); -} - static void cnl_init_cdclk(struct drm_i915_private *dev_priv) { struct intel_cdclk_state cdclk_state; @@ -1764,17 +1759,6 @@ static void cnl_init_cdclk(struct drm_i915_private *dev_priv) bxt_set_cdclk(dev_priv, &cdclk_state, INVALID_PIPE); } -static void cnl_uninit_cdclk(struct drm_i915_private *dev_priv) -{ - struct intel_cdclk_state cdclk_state = dev_priv->cdclk.hw; - - cdclk_state.cdclk = cdclk_state.bypass; - cdclk_state.vco = 0; - cdclk_state.voltage_level = cnl_calc_voltage_level(cdclk_state.cdclk); - - bxt_set_cdclk(dev_priv, &cdclk_state, INVALID_PIPE); -} - /** * intel_cdclk_init - Initialize CDCLK * @i915: i915 device @@ -1805,14 +1789,10 @@ void intel_cdclk_init(struct drm_i915_private *i915) */ void intel_cdclk_uninit(struct drm_i915_private *i915) { - if (INTEL_GEN(i915) >= 11) - icl_uninit_cdclk(i915); - else if (IS_CANNONLAKE(i915)) - cnl_uninit_cdclk(i915); + if (INTEL_GEN(i915) >= 10 || IS_GEN9_LP(i915)) + bxt_uninit_cdclk(i915); else if (IS_GEN9_BC(i915)) skl_uninit_cdclk(i915); - else if (IS_GEN9_LP(i915)) - bxt_uninit_cdclk(i915); } /** -- cgit v1.2.3 From d2f429ebb97725e9bec597aad2f0d2c7d203842f Mon Sep 17 00:00:00 2001 From: Matt Roper Date: Tue, 10 Sep 2019 08:42:50 -0700 Subject: drm/i915: Add calc_voltage_level display vfunc MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit With all of the cdclk function consolidation, we can cut down on a lot of platform if/else logic by creating a vfunc that's initialized at startup. Cc: Ville Syrjälä Signed-off-by: Matt Roper Reviewed-by: Ville Syrjälä Link: https://patchwork.freedesktop.org/patch/msgid/20190910154252.30503-7-matthew.d.roper@intel.com --- drivers/gpu/drm/i915/display/intel_cdclk.c | 75 +++++++++++------------------- drivers/gpu/drm/i915/i915_drv.h | 1 + 2 files changed, 27 insertions(+), 49 deletions(-) diff --git a/drivers/gpu/drm/i915/display/intel_cdclk.c b/drivers/gpu/drm/i915/display/intel_cdclk.c index 679aeebfbca5..e1a4ac9bd4f0 100644 --- a/drivers/gpu/drm/i915/display/intel_cdclk.c +++ b/drivers/gpu/drm/i915/display/intel_cdclk.c @@ -1394,18 +1394,8 @@ static void bxt_get_cdclk(struct drm_i915_private *dev_priv, * Can't read this out :( Let's assume it's * at least what the CDCLK frequency requires. */ - if (IS_ELKHARTLAKE(dev_priv)) - cdclk_state->voltage_level = - ehl_calc_voltage_level(cdclk_state->cdclk); - else if (INTEL_GEN(dev_priv) >= 11) - cdclk_state->voltage_level = - icl_calc_voltage_level(cdclk_state->cdclk); - else if (INTEL_GEN(dev_priv) >= 10) - cdclk_state->voltage_level = - cnl_calc_voltage_level(cdclk_state->cdclk); - else - cdclk_state->voltage_level = - bxt_calc_voltage_level(cdclk_state->cdclk); + cdclk_state->voltage_level = + dev_priv->display.calc_voltage_level(cdclk_state->cdclk); } static void bxt_de_pll_disable(struct drm_i915_private *dev_priv) @@ -1672,7 +1662,8 @@ static void bxt_init_cdclk(struct drm_i915_private *dev_priv) */ cdclk_state.cdclk = bxt_calc_cdclk(dev_priv, 0); cdclk_state.vco = bxt_calc_cdclk_pll_vco(dev_priv, cdclk_state.cdclk); - cdclk_state.voltage_level = bxt_calc_voltage_level(cdclk_state.cdclk); + cdclk_state.voltage_level = + dev_priv->display.calc_voltage_level(cdclk_state.cdclk); bxt_set_cdclk(dev_priv, &cdclk_state, INVALID_PIPE); } @@ -1683,18 +1674,8 @@ static void bxt_uninit_cdclk(struct drm_i915_private *dev_priv) cdclk_state.cdclk = cdclk_state.bypass; cdclk_state.vco = 0; - if (IS_ELKHARTLAKE(dev_priv)) - cdclk_state.voltage_level = - ehl_calc_voltage_level(cdclk_state.cdclk); - else if (INTEL_GEN(dev_priv) >= 11) - cdclk_state.voltage_level = - icl_calc_voltage_level(cdclk_state.cdclk); - else if (INTEL_GEN(dev_priv) >= 10) - cdclk_state.voltage_level = - cnl_calc_voltage_level(cdclk_state.cdclk); - else - cdclk_state.voltage_level = - bxt_calc_voltage_level(cdclk_state.cdclk); + cdclk_state.voltage_level = + dev_priv->display.calc_voltage_level(cdclk_state.cdclk); bxt_set_cdclk(dev_priv, &cdclk_state, INVALID_PIPE); } @@ -1730,12 +1711,8 @@ sanitize: sanitized_state.cdclk = bxt_calc_cdclk(dev_priv, 0); sanitized_state.vco = bxt_calc_cdclk_pll_vco(dev_priv, sanitized_state.cdclk); - if (IS_ELKHARTLAKE(dev_priv)) - sanitized_state.voltage_level = - ehl_calc_voltage_level(sanitized_state.cdclk); - else - sanitized_state.voltage_level = - icl_calc_voltage_level(sanitized_state.cdclk); + sanitized_state.voltage_level = + dev_priv->display.calc_voltage_level(sanitized_state.cdclk); bxt_set_cdclk(dev_priv, &sanitized_state, INVALID_PIPE); } @@ -1754,7 +1731,8 @@ static void cnl_init_cdclk(struct drm_i915_private *dev_priv) cdclk_state.cdclk = bxt_calc_cdclk(dev_priv, 0); cdclk_state.vco = bxt_calc_cdclk_pll_vco(dev_priv, cdclk_state.cdclk); - cdclk_state.voltage_level = cnl_calc_voltage_level(cdclk_state.cdclk); + cdclk_state.voltage_level = + dev_priv->display.calc_voltage_level(cdclk_state.cdclk); bxt_set_cdclk(dev_priv, &cdclk_state, INVALID_PIPE); } @@ -2246,7 +2224,7 @@ static int bxt_modeset_calc_cdclk(struct intel_atomic_state *state) state->cdclk.logical.vco = vco; state->cdclk.logical.cdclk = cdclk; state->cdclk.logical.voltage_level = - bxt_calc_voltage_level(cdclk); + dev_priv->display.calc_voltage_level(cdclk); if (!state->active_pipes) { cdclk = bxt_calc_cdclk(dev_priv, state->cdclk.force_min_cdclk); @@ -2255,7 +2233,7 @@ static int bxt_modeset_calc_cdclk(struct intel_atomic_state *state) state->cdclk.actual.vco = vco; state->cdclk.actual.cdclk = cdclk; state->cdclk.actual.voltage_level = - bxt_calc_voltage_level(cdclk); + dev_priv->display.calc_voltage_level(cdclk); } else { state->cdclk.actual = state->cdclk.logical; } @@ -2310,14 +2288,9 @@ static int icl_modeset_calc_cdclk(struct intel_atomic_state *state) state->cdclk.logical.vco = vco; state->cdclk.logical.cdclk = cdclk; - if (IS_ELKHARTLAKE(dev_priv)) - state->cdclk.logical.voltage_level = - max(ehl_calc_voltage_level(cdclk), - cnl_compute_min_voltage_level(state)); - else - state->cdclk.logical.voltage_level = - max(icl_calc_voltage_level(cdclk), - cnl_compute_min_voltage_level(state)); + state->cdclk.logical.voltage_level = + max(dev_priv->display.calc_voltage_level(cdclk), + cnl_compute_min_voltage_level(state)); if (!state->active_pipes) { cdclk = bxt_calc_cdclk(dev_priv, state->cdclk.force_min_cdclk); @@ -2325,12 +2298,8 @@ static int icl_modeset_calc_cdclk(struct intel_atomic_state *state) state->cdclk.actual.vco = vco; state->cdclk.actual.cdclk = cdclk; - if (IS_ELKHARTLAKE(dev_priv)) - state->cdclk.actual.voltage_level = - ehl_calc_voltage_level(cdclk); - else - state->cdclk.actual.voltage_level = - icl_calc_voltage_level(cdclk); + state->cdclk.actual.voltage_level = + dev_priv->display.calc_voltage_level(cdclk); } else { state->cdclk.actual = state->cdclk.logical; } @@ -2554,17 +2523,25 @@ void intel_update_rawclk(struct drm_i915_private *dev_priv) */ void intel_init_cdclk_hooks(struct drm_i915_private *dev_priv) { - if (INTEL_GEN(dev_priv) >= 11) { + if (IS_ELKHARTLAKE(dev_priv)) { + dev_priv->display.set_cdclk = bxt_set_cdclk; + dev_priv->display.modeset_calc_cdclk = icl_modeset_calc_cdclk; + dev_priv->display.calc_voltage_level = ehl_calc_voltage_level; + dev_priv->cdclk.table = icl_cdclk_table; + } else if (INTEL_GEN(dev_priv) >= 11) { dev_priv->display.set_cdclk = bxt_set_cdclk; dev_priv->display.modeset_calc_cdclk = icl_modeset_calc_cdclk; + dev_priv->display.calc_voltage_level = icl_calc_voltage_level; dev_priv->cdclk.table = icl_cdclk_table; } else if (IS_CANNONLAKE(dev_priv)) { dev_priv->display.set_cdclk = bxt_set_cdclk; dev_priv->display.modeset_calc_cdclk = cnl_modeset_calc_cdclk; + dev_priv->display.calc_voltage_level = cnl_calc_voltage_level; dev_priv->cdclk.table = cnl_cdclk_table; } else if (IS_GEN9_LP(dev_priv)) { dev_priv->display.set_cdclk = bxt_set_cdclk; dev_priv->display.modeset_calc_cdclk = bxt_modeset_calc_cdclk; + dev_priv->display.calc_voltage_level = bxt_calc_voltage_level; dev_priv->cdclk.table = bxt_cdclk_table; } else if (IS_GEN9_BC(dev_priv)) { dev_priv->display.set_cdclk = skl_set_cdclk; diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index ff6aff2a4866..dc4d40b44d74 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h @@ -276,6 +276,7 @@ struct drm_i915_display_funcs { int (*compute_global_watermarks)(struct intel_atomic_state *state); void (*update_wm)(struct intel_crtc *crtc); int (*modeset_calc_cdclk)(struct intel_atomic_state *state); + u8 (*calc_voltage_level)(int cdclk); /* Returns the active state of the crtc, and if the crtc is active, * fills out the pipe-config with the hw state. */ bool (*get_pipe_config)(struct intel_crtc *, -- cgit v1.2.3 From 8f9f717d6c44649353e153cfcd32ad99618956a9 Mon Sep 17 00:00:00 2001 From: Matt Roper Date: Tue, 10 Sep 2019 08:42:51 -0700 Subject: drm/i915: Enhance cdclk sanitization MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit When reading out the BIOS-programmed cdclk state, let's make sure that the cdclk value is on the valid list for the platform, ensure that the VCO matches the cdclk, and ensure that the CD2X divider was set properly. Cc: Ville Syrjälä Signed-off-by: Matt Roper Reviewed-by: Ville Syrjälä Link: https://patchwork.freedesktop.org/patch/msgid/20190910154252.30503-8-matthew.d.roper@intel.com --- drivers/gpu/drm/i915/display/intel_cdclk.c | 34 ++++++++++++++++++++++++++++-- 1 file changed, 32 insertions(+), 2 deletions(-) diff --git a/drivers/gpu/drm/i915/display/intel_cdclk.c b/drivers/gpu/drm/i915/display/intel_cdclk.c index e1a4ac9bd4f0..988d0849ce09 100644 --- a/drivers/gpu/drm/i915/display/intel_cdclk.c +++ b/drivers/gpu/drm/i915/display/intel_cdclk.c @@ -1598,6 +1598,7 @@ static void bxt_set_cdclk(struct drm_i915_private *dev_priv, static void bxt_sanitize_cdclk(struct drm_i915_private *dev_priv) { u32 cdctl, expected; + int cdclk, vco; intel_update_cdclk(dev_priv); intel_dump_cdclk_state(&dev_priv->cdclk.hw, "Current CDCLK"); @@ -1620,8 +1621,37 @@ static void bxt_sanitize_cdclk(struct drm_i915_private *dev_priv) */ cdctl &= ~BXT_CDCLK_CD2X_PIPE_NONE; - expected = (cdctl & BXT_CDCLK_CD2X_DIV_SEL_MASK) | - skl_cdclk_decimal(dev_priv->cdclk.hw.cdclk); + /* Make sure this is a legal cdclk value for the platform */ + cdclk = bxt_calc_cdclk(dev_priv, dev_priv->cdclk.hw.cdclk); + if (cdclk != dev_priv->cdclk.hw.cdclk) + goto sanitize; + + /* Make sure the VCO is correct for the cdclk */ + vco = bxt_calc_cdclk_pll_vco(dev_priv, cdclk); + if (vco != dev_priv->cdclk.hw.vco) + goto sanitize; + + expected = skl_cdclk_decimal(cdclk); + + /* Figure out what CD2X divider we should be using for this cdclk */ + switch (DIV_ROUND_CLOSEST(dev_priv->cdclk.hw.vco, + dev_priv->cdclk.hw.cdclk)) { + case 2: + expected |= BXT_CDCLK_CD2X_DIV_SEL_1; + break; + case 3: + expected |= BXT_CDCLK_CD2X_DIV_SEL_1_5; + break; + case 4: + expected |= BXT_CDCLK_CD2X_DIV_SEL_2; + break; + case 8: + expected |= BXT_CDCLK_CD2X_DIV_SEL_4; + break; + default: + goto sanitize; + } + /* * Disable SSA Precharge when CD clock frequency < 500 MHz, * enable otherwise. -- cgit v1.2.3 From 0c1279b58fc7de230d47bb0dc21b7a08417dee96 Mon Sep 17 00:00:00 2001 From: Matt Roper Date: Tue, 10 Sep 2019 08:42:52 -0700 Subject: drm/i915: Consolidate {bxt,cnl,icl}_init_cdclk MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit The BXT and CNL functions were already basically identical, whereas ICL's function tried to do its own sanitization rather than calling bxt_sanitize_cdclk. This should actually fix a bug in our ICL initialization where it would consider the /2 CD2X divider invalid and force an unnecessary sanitization (we now have valid clock frequencies that use this divider). Cc: Ville Syrjälä Signed-off-by: Matt Roper Reviewed-by: Ville Syrjälä Link: https://patchwork.freedesktop.org/patch/msgid/20190910154252.30503-9-matthew.d.roper@intel.com --- drivers/gpu/drm/i915/display/intel_cdclk.c | 65 +----------------------------- 1 file changed, 2 insertions(+), 63 deletions(-) diff --git a/drivers/gpu/drm/i915/display/intel_cdclk.c b/drivers/gpu/drm/i915/display/intel_cdclk.c index 988d0849ce09..618a93bad0a8 100644 --- a/drivers/gpu/drm/i915/display/intel_cdclk.c +++ b/drivers/gpu/drm/i915/display/intel_cdclk.c @@ -1710,63 +1710,6 @@ static void bxt_uninit_cdclk(struct drm_i915_private *dev_priv) bxt_set_cdclk(dev_priv, &cdclk_state, INVALID_PIPE); } -static void icl_init_cdclk(struct drm_i915_private *dev_priv) -{ - struct intel_cdclk_state sanitized_state; - u32 val; - - /* This sets dev_priv->cdclk.hw. */ - intel_update_cdclk(dev_priv); - intel_dump_cdclk_state(&dev_priv->cdclk.hw, "Current CDCLK"); - - /* This means CDCLK disabled. */ - if (dev_priv->cdclk.hw.cdclk == dev_priv->cdclk.hw.bypass) - goto sanitize; - - val = I915_READ(CDCLK_CTL); - - if ((val & BXT_CDCLK_CD2X_DIV_SEL_MASK) != 0) - goto sanitize; - - if ((val & CDCLK_FREQ_DECIMAL_MASK) != - skl_cdclk_decimal(dev_priv->cdclk.hw.cdclk)) - goto sanitize; - - return; - -sanitize: - DRM_DEBUG_KMS("Sanitizing cdclk programmed by pre-os\n"); - - sanitized_state.ref = dev_priv->cdclk.hw.ref; - sanitized_state.cdclk = bxt_calc_cdclk(dev_priv, 0); - sanitized_state.vco = bxt_calc_cdclk_pll_vco(dev_priv, - sanitized_state.cdclk); - sanitized_state.voltage_level = - dev_priv->display.calc_voltage_level(sanitized_state.cdclk); - - bxt_set_cdclk(dev_priv, &sanitized_state, INVALID_PIPE); -} - -static void cnl_init_cdclk(struct drm_i915_private *dev_priv) -{ - struct intel_cdclk_state cdclk_state; - - bxt_sanitize_cdclk(dev_priv); - - if (dev_priv->cdclk.hw.cdclk != 0 && - dev_priv->cdclk.hw.vco != 0) - return; - - cdclk_state = dev_priv->cdclk.hw; - - cdclk_state.cdclk = bxt_calc_cdclk(dev_priv, 0); - cdclk_state.vco = bxt_calc_cdclk_pll_vco(dev_priv, cdclk_state.cdclk); - cdclk_state.voltage_level = - dev_priv->display.calc_voltage_level(cdclk_state.cdclk); - - bxt_set_cdclk(dev_priv, &cdclk_state, INVALID_PIPE); -} - /** * intel_cdclk_init - Initialize CDCLK * @i915: i915 device @@ -1778,14 +1721,10 @@ static void cnl_init_cdclk(struct drm_i915_private *dev_priv) */ void intel_cdclk_init(struct drm_i915_private *i915) { - if (INTEL_GEN(i915) >= 11) - icl_init_cdclk(i915); - else if (IS_CANNONLAKE(i915)) - cnl_init_cdclk(i915); + if (IS_GEN9_LP(i915) || INTEL_GEN(i915) >= 10) + bxt_init_cdclk(i915); else if (IS_GEN9_BC(i915)) skl_init_cdclk(i915); - else if (IS_GEN9_LP(i915)) - bxt_init_cdclk(i915); } /** -- cgit v1.2.3 From 61fa60ff6e6a0e16e9bcc15f632ce5437afa6494 Mon Sep 17 00:00:00 2001 From: Tvrtko Ursulin Date: Tue, 10 Sep 2019 15:38:20 +0100 Subject: drm/i915: Move GT init to intel_gt.c Code in i915_gem_init_hw is all about GT init so move it to intel_gt.c renaming to intel_gt_init_hw. Existing intel_gt_init_hw is renamed to intel_gt_init_hw_early since it is currently called from driver probe. Signed-off-by: Tvrtko Ursulin Cc: Andi Shyti Cc: Chris Wilson Reviewed-by: Chris Wilson Link: https://patchwork.freedesktop.org/patch/msgid/20190910143823.10686-2-tvrtko.ursulin@linux.intel.com --- drivers/gpu/drm/i915/gem/i915_gem_pm.c | 2 +- drivers/gpu/drm/i915/gt/intel_gt.c | 92 ++++++++++++++++++++++++++++++- drivers/gpu/drm/i915/gt/intel_gt.h | 3 +- drivers/gpu/drm/i915/gt/intel_reset.c | 2 +- drivers/gpu/drm/i915/i915_drv.c | 2 +- drivers/gpu/drm/i915/i915_drv.h | 1 - drivers/gpu/drm/i915/i915_gem.c | 91 +----------------------------- drivers/gpu/drm/i915/selftests/mock_gtt.c | 2 +- 8 files changed, 98 insertions(+), 97 deletions(-) diff --git a/drivers/gpu/drm/i915/gem/i915_gem_pm.c b/drivers/gpu/drm/i915/gem/i915_gem_pm.c index 9b1129aaacfe..3bd764104d41 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_pm.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_pm.c @@ -242,7 +242,7 @@ void i915_gem_resume(struct drm_i915_private *i915) mutex_lock(&i915->drm.struct_mutex); intel_uncore_forcewake_get(&i915->uncore, FORCEWAKE_ALL); - if (i915_gem_init_hw(i915)) + if (intel_gt_init_hw(&i915->gt)) goto err_wedged; /* diff --git a/drivers/gpu/drm/i915/gt/intel_gt.c b/drivers/gpu/drm/i915/gt/intel_gt.c index e2cc697d27fb..eef9bdae8ebb 100644 --- a/drivers/gpu/drm/i915/gt/intel_gt.c +++ b/drivers/gpu/drm/i915/gt/intel_gt.c @@ -6,6 +6,7 @@ #include "i915_drv.h" #include "intel_gt.h" #include "intel_gt_pm.h" +#include "intel_mocs.h" #include "intel_uncore.h" #include "intel_pm.h" @@ -25,7 +26,7 @@ void intel_gt_init_early(struct intel_gt *gt, struct drm_i915_private *i915) intel_uc_init_early(>->uc); } -void intel_gt_init_hw(struct drm_i915_private *i915) +void intel_gt_init_hw_early(struct drm_i915_private *i915) { i915->gt.ggtt = &i915->ggtt; @@ -33,6 +34,95 @@ void intel_gt_init_hw(struct drm_i915_private *i915) intel_gt_pm_disable(&i915->gt); } +static void init_unused_ring(struct intel_gt *gt, u32 base) +{ + struct intel_uncore *uncore = gt->uncore; + + intel_uncore_write(uncore, RING_CTL(base), 0); + intel_uncore_write(uncore, RING_HEAD(base), 0); + intel_uncore_write(uncore, RING_TAIL(base), 0); + intel_uncore_write(uncore, RING_START(base), 0); +} + +static void init_unused_rings(struct intel_gt *gt) +{ + struct drm_i915_private *i915 = gt->i915; + + if (IS_I830(i915)) { + init_unused_ring(gt, PRB1_BASE); + init_unused_ring(gt, SRB0_BASE); + init_unused_ring(gt, SRB1_BASE); + init_unused_ring(gt, SRB2_BASE); + init_unused_ring(gt, SRB3_BASE); + } else if (IS_GEN(i915, 2)) { + init_unused_ring(gt, SRB0_BASE); + init_unused_ring(gt, SRB1_BASE); + } else if (IS_GEN(i915, 3)) { + init_unused_ring(gt, PRB1_BASE); + init_unused_ring(gt, PRB2_BASE); + } +} + +int intel_gt_init_hw(struct intel_gt *gt) +{ + struct drm_i915_private *i915 = gt->i915; + struct intel_uncore *uncore = gt->uncore; + int ret; + + BUG_ON(!i915->kernel_context); + ret = intel_gt_terminally_wedged(gt); + if (ret) + return ret; + + gt->last_init_time = ktime_get(); + + /* Double layer security blanket, see i915_gem_init() */ + intel_uncore_forcewake_get(uncore, FORCEWAKE_ALL); + + if (HAS_EDRAM(i915) && INTEL_GEN(i915) < 9) + intel_uncore_rmw(uncore, HSW_IDICR, 0, IDIHASHMSK(0xf)); + + if (IS_HASWELL(i915)) + intel_uncore_write(uncore, + MI_PREDICATE_RESULT_2, + IS_HSW_GT3(i915) ? + LOWER_SLICE_ENABLED : LOWER_SLICE_DISABLED); + + /* Apply the GT workarounds... */ + intel_gt_apply_workarounds(gt); + /* ...and determine whether they are sticking. */ + intel_gt_verify_workarounds(gt, "init"); + + intel_gt_init_swizzling(gt); + + /* + * At least 830 can leave some of the unused rings + * "active" (ie. head != tail) after resume which + * will prevent c3 entry. Makes sure all unused rings + * are totally idle. + */ + init_unused_rings(gt); + + ret = i915_ppgtt_init_hw(gt); + if (ret) { + DRM_ERROR("Enabling PPGTT failed (%d)\n", ret); + goto out; + } + + /* We can't enable contexts until all firmware is loaded */ + ret = intel_uc_init_hw(>->uc); + if (ret) { + i915_probe_error(i915, "Enabling uc failed (%d)\n", ret); + goto out; + } + + intel_mocs_init(gt); + +out: + intel_uncore_forcewake_put(uncore, FORCEWAKE_ALL); + return ret; +} + static void rmw_set(struct intel_uncore *uncore, i915_reg_t reg, u32 set) { intel_uncore_rmw(uncore, reg, 0, set); diff --git a/drivers/gpu/drm/i915/gt/intel_gt.h b/drivers/gpu/drm/i915/gt/intel_gt.h index 17af21cb7ed3..e6ab0bff0efb 100644 --- a/drivers/gpu/drm/i915/gt/intel_gt.h +++ b/drivers/gpu/drm/i915/gt/intel_gt.h @@ -28,7 +28,8 @@ static inline struct intel_gt *huc_to_gt(struct intel_huc *huc) } void intel_gt_init_early(struct intel_gt *gt, struct drm_i915_private *i915); -void intel_gt_init_hw(struct drm_i915_private *i915); +void intel_gt_init_hw_early(struct drm_i915_private *i915); +int __must_check intel_gt_init_hw(struct intel_gt *gt); int intel_gt_init(struct intel_gt *gt); void intel_gt_driver_register(struct intel_gt *gt); diff --git a/drivers/gpu/drm/i915/gt/intel_reset.c b/drivers/gpu/drm/i915/gt/intel_reset.c index b9d84d52e986..296bbc7745fb 100644 --- a/drivers/gpu/drm/i915/gt/intel_reset.c +++ b/drivers/gpu/drm/i915/gt/intel_reset.c @@ -972,7 +972,7 @@ void intel_gt_reset(struct intel_gt *gt, * was running at the time of the reset (i.e. we weren't VT * switched away). */ - ret = i915_gem_init_hw(gt->i915); + ret = intel_gt_init_hw(gt); if (ret) { DRM_ERROR("Failed to initialise HW following reset (%d)\n", ret); diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c index 0aadff9b8c88..8966672b0294 100644 --- a/drivers/gpu/drm/i915/i915_drv.c +++ b/drivers/gpu/drm/i915/i915_drv.c @@ -1269,7 +1269,7 @@ static int i915_driver_hw_probe(struct drm_i915_private *dev_priv) if (ret) goto err_ggtt; - intel_gt_init_hw(dev_priv); + intel_gt_init_hw_early(dev_priv); ret = i915_ggtt_enable_hw(dev_priv); if (ret) { diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index dc4d40b44d74..51d575cca2ac 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h @@ -2322,7 +2322,6 @@ static inline u32 i915_reset_engine_count(struct i915_gpu_error *error, void i915_gem_init_mmio(struct drm_i915_private *i915); int __must_check i915_gem_init(struct drm_i915_private *dev_priv); -int __must_check i915_gem_init_hw(struct drm_i915_private *dev_priv); void i915_gem_driver_register(struct drm_i915_private *i915); void i915_gem_driver_unregister(struct drm_i915_private *i915); void i915_gem_driver_remove(struct drm_i915_private *dev_priv); diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index 141024c66f36..db2681514a0b 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c @@ -1148,95 +1148,6 @@ void i915_gem_sanitize(struct drm_i915_private *i915) intel_runtime_pm_put(&i915->runtime_pm, wakeref); } -static void init_unused_ring(struct intel_gt *gt, u32 base) -{ - struct intel_uncore *uncore = gt->uncore; - - intel_uncore_write(uncore, RING_CTL(base), 0); - intel_uncore_write(uncore, RING_HEAD(base), 0); - intel_uncore_write(uncore, RING_TAIL(base), 0); - intel_uncore_write(uncore, RING_START(base), 0); -} - -static void init_unused_rings(struct intel_gt *gt) -{ - struct drm_i915_private *i915 = gt->i915; - - if (IS_I830(i915)) { - init_unused_ring(gt, PRB1_BASE); - init_unused_ring(gt, SRB0_BASE); - init_unused_ring(gt, SRB1_BASE); - init_unused_ring(gt, SRB2_BASE); - init_unused_ring(gt, SRB3_BASE); - } else if (IS_GEN(i915, 2)) { - init_unused_ring(gt, SRB0_BASE); - init_unused_ring(gt, SRB1_BASE); - } else if (IS_GEN(i915, 3)) { - init_unused_ring(gt, PRB1_BASE); - init_unused_ring(gt, PRB2_BASE); - } -} - -int i915_gem_init_hw(struct drm_i915_private *i915) -{ - struct intel_uncore *uncore = &i915->uncore; - struct intel_gt *gt = &i915->gt; - int ret; - - BUG_ON(!i915->kernel_context); - ret = intel_gt_terminally_wedged(gt); - if (ret) - return ret; - - gt->last_init_time = ktime_get(); - - /* Double layer security blanket, see i915_gem_init() */ - intel_uncore_forcewake_get(uncore, FORCEWAKE_ALL); - - if (HAS_EDRAM(i915) && INTEL_GEN(i915) < 9) - intel_uncore_rmw(uncore, HSW_IDICR, 0, IDIHASHMSK(0xf)); - - if (IS_HASWELL(i915)) - intel_uncore_write(uncore, - MI_PREDICATE_RESULT_2, - IS_HSW_GT3(i915) ? - LOWER_SLICE_ENABLED : LOWER_SLICE_DISABLED); - - /* Apply the GT workarounds... */ - intel_gt_apply_workarounds(gt); - /* ...and determine whether they are sticking. */ - intel_gt_verify_workarounds(gt, "init"); - - intel_gt_init_swizzling(gt); - - /* - * At least 830 can leave some of the unused rings - * "active" (ie. head != tail) after resume which - * will prevent c3 entry. Makes sure all unused rings - * are totally idle. - */ - init_unused_rings(gt); - - ret = i915_ppgtt_init_hw(gt); - if (ret) { - DRM_ERROR("Enabling PPGTT failed (%d)\n", ret); - goto out; - } - - /* We can't enable contexts until all firmware is loaded */ - ret = intel_uc_init_hw(>->uc); - if (ret) { - i915_probe_error(i915, "Enabling uc failed (%d)\n", ret); - goto out; - } - - intel_mocs_init(gt); - -out: - intel_uncore_forcewake_put(uncore, FORCEWAKE_ALL); - return ret; -} - static int __intel_engines_record_defaults(struct drm_i915_private *i915) { struct i915_request *requests[I915_NUM_ENGINES] = {}; @@ -1449,7 +1360,7 @@ int i915_gem_init(struct drm_i915_private *dev_priv) intel_uc_init(&dev_priv->gt.uc); - ret = i915_gem_init_hw(dev_priv); + ret = intel_gt_init_hw(&dev_priv->gt); if (ret) goto err_uc_init; diff --git a/drivers/gpu/drm/i915/selftests/mock_gtt.c b/drivers/gpu/drm/i915/selftests/mock_gtt.c index e62a67e0f79c..7d5fb60b43bb 100644 --- a/drivers/gpu/drm/i915/selftests/mock_gtt.c +++ b/drivers/gpu/drm/i915/selftests/mock_gtt.c @@ -118,7 +118,7 @@ void mock_init_ggtt(struct drm_i915_private *i915, struct i915_ggtt *ggtt) i915_address_space_init(&ggtt->vm, VM_CLASS_GGTT); - intel_gt_init_hw(i915); + intel_gt_init_hw_early(i915); } void mock_fini_ggtt(struct i915_ggtt *ggtt) -- cgit v1.2.3 From dab3588a151e5edb0cb0d56476265d2e5b3b1c79 Mon Sep 17 00:00:00 2001 From: Tvrtko Ursulin Date: Tue, 10 Sep 2019 15:38:21 +0100 Subject: drm/i915: Make wait_for_timelines take struct intel_gt Timelines live in struct intel_gt so make wait_for_timelines take exactly what it needs. Signed-off-by: Tvrtko Ursulin Cc: Andi Shyti Cc: Chris Wilson Reviewed-by: Chris Wilson Link: https://patchwork.freedesktop.org/patch/msgid/20190910143823.10686-3-tvrtko.ursulin@linux.intel.com --- drivers/gpu/drm/i915/i915_gem.c | 11 ++++++----- 1 file changed, 6 insertions(+), 5 deletions(-) diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index db2681514a0b..2da9544fa9a4 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c @@ -888,10 +888,9 @@ void i915_gem_runtime_suspend(struct drm_i915_private *i915) } static long -wait_for_timelines(struct drm_i915_private *i915, - unsigned int wait, long timeout) +wait_for_timelines(struct intel_gt *gt, unsigned int wait, long timeout) { - struct intel_gt_timelines *timelines = &i915->gt.timelines; + struct intel_gt_timelines *timelines = >->timelines; struct intel_timeline *tl; unsigned long flags; @@ -934,15 +933,17 @@ wait_for_timelines(struct drm_i915_private *i915, int i915_gem_wait_for_idle(struct drm_i915_private *i915, unsigned int flags, long timeout) { + struct intel_gt *gt = &i915->gt; + /* If the device is asleep, we have no requests outstanding */ - if (!intel_gt_pm_is_awake(&i915->gt)) + if (!intel_gt_pm_is_awake(gt)) return 0; GEM_TRACE("flags=%x (%s), timeout=%ld%s\n", flags, flags & I915_WAIT_LOCKED ? "locked" : "unlocked", timeout, timeout == MAX_SCHEDULE_TIMEOUT ? " (forever)" : ""); - timeout = wait_for_timelines(i915, flags, timeout); + timeout = wait_for_timelines(gt, flags, timeout); if (timeout < 0) return timeout; -- cgit v1.2.3 From ee236af8d51439ce2d4396ecaf84062b34b15f85 Mon Sep 17 00:00:00 2001 From: Tvrtko Ursulin Date: Tue, 10 Sep 2019 15:38:22 +0100 Subject: drm/i915: Avoid round-trip via i915 in intel_gt_park Both in the container_of and getting to gt->awake there is no need to go via i915 since both the wakeref and awake are members of gt. Signed-off-by: Tvrtko Ursulin Cc: Andi Shyti Cc: Chris Wilson Reviewed-by: Chris Wilson Link: https://patchwork.freedesktop.org/patch/msgid/20190910143823.10686-4-tvrtko.ursulin@linux.intel.com --- drivers/gpu/drm/i915/gt/intel_gt_pm.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/drivers/gpu/drm/i915/gt/intel_gt_pm.c b/drivers/gpu/drm/i915/gt/intel_gt_pm.c index 6ba0d2069f87..a2e29bcc9671 100644 --- a/drivers/gpu/drm/i915/gt/intel_gt_pm.c +++ b/drivers/gpu/drm/i915/gt/intel_gt_pm.c @@ -56,9 +56,9 @@ static int __gt_unpark(struct intel_wakeref *wf) static int __gt_park(struct intel_wakeref *wf) { - struct drm_i915_private *i915 = - container_of(wf, typeof(*i915), gt.wakeref); - intel_wakeref_t wakeref = fetch_and_zero(&i915->gt.awake); + struct intel_gt *gt = container_of(wf, typeof(*gt), wakeref); + intel_wakeref_t wakeref = fetch_and_zero(>->awake); + struct drm_i915_private *i915 = gt->i915; GEM_TRACE("\n"); -- cgit v1.2.3 From 85dd14c2918d452fa875a4175bffb76e01c5af6e Mon Sep 17 00:00:00 2001 From: Tvrtko Ursulin Date: Tue, 10 Sep 2019 15:38:23 +0100 Subject: drm/i915: Make pm_notify take intel_gt These notifications operate on intel_gt so make the code take what it needs. Signed-off-by: Tvrtko Ursulin Cc: Andi Shyti Cc: Chris Wilson Reviewed-by: Chris Wilson Link: https://patchwork.freedesktop.org/patch/msgid/20190910143823.10686-5-tvrtko.ursulin@linux.intel.com --- drivers/gpu/drm/i915/gt/intel_gt_pm.c | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/drivers/gpu/drm/i915/gt/intel_gt_pm.c b/drivers/gpu/drm/i915/gt/intel_gt_pm.c index a2e29bcc9671..2ccf8cacaa0a 100644 --- a/drivers/gpu/drm/i915/gt/intel_gt_pm.c +++ b/drivers/gpu/drm/i915/gt/intel_gt_pm.c @@ -13,9 +13,9 @@ #include "intel_pm.h" #include "intel_wakeref.h" -static void pm_notify(struct drm_i915_private *i915, int state) +static void pm_notify(struct intel_gt *gt, int state) { - blocking_notifier_call_chain(&i915->gt.pm_notifications, state, i915); + blocking_notifier_call_chain(>->pm_notifications, state, gt->i915); } static int __gt_unpark(struct intel_wakeref *wf) @@ -49,7 +49,7 @@ static int __gt_unpark(struct intel_wakeref *wf) intel_gt_queue_hangcheck(gt); - pm_notify(i915, INTEL_GT_UNPARK); + pm_notify(gt, INTEL_GT_UNPARK); return 0; } @@ -62,7 +62,7 @@ static int __gt_park(struct intel_wakeref *wf) GEM_TRACE("\n"); - pm_notify(i915, INTEL_GT_PARK); + pm_notify(gt, INTEL_GT_PARK); i915_pmu_gt_parked(i915); if (INTEL_GEN(i915) >= 6) -- cgit v1.2.3 From 99013b10100c4b552eec845ee2ca5604c8332e92 Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Tue, 10 Sep 2019 22:22:04 +0100 Subject: drm/i915: Make shrink/unshrink be atomic Add an atomic counter and always take the spinlock around the pin/unpin events, so that we can perform the list manipulation concurrently. Signed-off-by: Chris Wilson Reviewed-by: Matthew Auld Link: https://patchwork.freedesktop.org/patch/msgid/20190910212204.17190-1-chris@chris-wilson.co.uk --- drivers/gpu/drm/i915/gem/i915_gem_domain.c | 3 +- drivers/gpu/drm/i915/gem/i915_gem_object_types.h | 1 + drivers/gpu/drm/i915/gem/i915_gem_pages.c | 1 + drivers/gpu/drm/i915/gem/i915_gem_shrinker.c | 36 ++++++++++++++---------- drivers/gpu/drm/i915/gt/intel_context.c | 2 +- 5 files changed, 26 insertions(+), 17 deletions(-) diff --git a/drivers/gpu/drm/i915/gem/i915_gem_domain.c b/drivers/gpu/drm/i915/gem/i915_gem_domain.c index da3e7cf12aa1..55c3ab59e3aa 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_domain.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_domain.c @@ -494,7 +494,8 @@ static void i915_gem_object_bump_inactive_ggtt(struct drm_i915_gem_object *obj) spin_lock_irqsave(&i915->mm.obj_lock, flags); - if (obj->mm.madv == I915_MADV_WILLNEED) + if (obj->mm.madv == I915_MADV_WILLNEED && + !atomic_read(&obj->mm.shrink_pin)) list_move_tail(&obj->mm.link, &i915->mm.shrink_list); spin_unlock_irqrestore(&i915->mm.obj_lock, flags); diff --git a/drivers/gpu/drm/i915/gem/i915_gem_object_types.h b/drivers/gpu/drm/i915/gem/i915_gem_object_types.h index a558edf15ec8..d695f187b790 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_object_types.h +++ b/drivers/gpu/drm/i915/gem/i915_gem_object_types.h @@ -156,6 +156,7 @@ struct drm_i915_gem_object { struct { struct mutex lock; /* protects the pages and their use */ atomic_t pages_pin_count; + atomic_t shrink_pin; struct sg_table *pages; void *mapping; diff --git a/drivers/gpu/drm/i915/gem/i915_gem_pages.c b/drivers/gpu/drm/i915/gem/i915_gem_pages.c index 18f0ce0135c1..2e941f093a20 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_pages.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_pages.c @@ -71,6 +71,7 @@ void __i915_gem_object_set_pages(struct drm_i915_gem_object *obj, list = &i915->mm.shrink_list; list_add_tail(&obj->mm.link, list); + atomic_set(&obj->mm.shrink_pin, 0); spin_unlock_irqrestore(&i915->mm.obj_lock, flags); } } diff --git a/drivers/gpu/drm/i915/gem/i915_gem_shrinker.c b/drivers/gpu/drm/i915/gem/i915_gem_shrinker.c index 4e55cfc2b0dc..d2c05d752909 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_shrinker.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_shrinker.c @@ -516,46 +516,52 @@ void i915_gem_shrinker_taints_mutex(struct drm_i915_private *i915, void i915_gem_object_make_unshrinkable(struct drm_i915_gem_object *obj) { + struct drm_i915_private *i915 = obj_to_i915(obj); + unsigned long flags; + /* * We can only be called while the pages are pinned or when * the pages are released. If pinned, we should only be called * from a single caller under controlled conditions; and on release * only one caller may release us. Neither the two may cross. */ - if (!list_empty(&obj->mm.link)) { /* pinned by caller */ - struct drm_i915_private *i915 = obj_to_i915(obj); - unsigned long flags; - - spin_lock_irqsave(&i915->mm.obj_lock, flags); - GEM_BUG_ON(list_empty(&obj->mm.link)); + if (atomic_add_unless(&obj->mm.shrink_pin, 1, 0)) + return; + spin_lock_irqsave(&i915->mm.obj_lock, flags); + if (!atomic_fetch_inc(&obj->mm.shrink_pin) && + !list_empty(&obj->mm.link)) { list_del_init(&obj->mm.link); i915->mm.shrink_count--; i915->mm.shrink_memory -= obj->base.size; - - spin_unlock_irqrestore(&i915->mm.obj_lock, flags); } + spin_unlock_irqrestore(&i915->mm.obj_lock, flags); } static void __i915_gem_object_make_shrinkable(struct drm_i915_gem_object *obj, struct list_head *head) { + struct drm_i915_private *i915 = obj_to_i915(obj); + unsigned long flags; + GEM_BUG_ON(!i915_gem_object_has_pages(obj)); - GEM_BUG_ON(!list_empty(&obj->mm.link)); + if (!i915_gem_object_is_shrinkable(obj)) + return; - if (i915_gem_object_is_shrinkable(obj)) { - struct drm_i915_private *i915 = obj_to_i915(obj); - unsigned long flags; + if (atomic_add_unless(&obj->mm.shrink_pin, -1, 1)) + return; - spin_lock_irqsave(&i915->mm.obj_lock, flags); - GEM_BUG_ON(!kref_read(&obj->base.refcount)); + spin_lock_irqsave(&i915->mm.obj_lock, flags); + GEM_BUG_ON(!kref_read(&obj->base.refcount)); + if (atomic_dec_and_test(&obj->mm.shrink_pin)) { + GEM_BUG_ON(!list_empty(&obj->mm.link)); list_add_tail(&obj->mm.link, head); i915->mm.shrink_count++; i915->mm.shrink_memory += obj->base.size; - spin_unlock_irqrestore(&i915->mm.obj_lock, flags); } + spin_unlock_irqrestore(&i915->mm.obj_lock, flags); } void i915_gem_object_make_shrinkable(struct drm_i915_gem_object *obj) diff --git a/drivers/gpu/drm/i915/gt/intel_context.c b/drivers/gpu/drm/i915/gt/intel_context.c index f55691d151ae..c0495811f493 100644 --- a/drivers/gpu/drm/i915/gt/intel_context.c +++ b/drivers/gpu/drm/i915/gt/intel_context.c @@ -134,8 +134,8 @@ static int __context_pin_state(struct i915_vma *vma) static void __context_unpin_state(struct i915_vma *vma) { - __i915_vma_unpin(vma); i915_vma_make_shrinkable(vma); + __i915_vma_unpin(vma); } static void __intel_context_retire(struct i915_active *active) -- cgit v1.2.3 From 0606259e3b3a1220a0f04a92a1654a3f674f47ee Mon Sep 17 00:00:00 2001 From: Kenneth Graunke Date: Tue, 10 Sep 2019 18:48:01 -0700 Subject: drm/i915: Whitelist COMMON_SLICE_CHICKEN2 This allows userspace to use "legacy" mode for push constants, where they are committed at 3DPRIMITIVE or flush time, rather than being committed at 3DSTATE_BINDING_TABLE_POINTERS_XS time. Gen6-8 and Gen11 both use the "legacy" behavior - only Gen9 works in the "new" way. Conflating push constants with binding tables is painful for userspace, we would like to be able to avoid doing so. Signed-off-by: Kenneth Graunke Cc: stable@vger.kernel.org Reviewed-by: Chris Wilson Signed-off-by: Chris Wilson Link: https://patchwork.freedesktop.org/patch/msgid/20190911014801.26821-1-kenneth@whitecape.org --- drivers/gpu/drm/i915/gt/intel_workarounds.c | 3 +++ 1 file changed, 3 insertions(+) diff --git a/drivers/gpu/drm/i915/gt/intel_workarounds.c b/drivers/gpu/drm/i915/gt/intel_workarounds.c index 243d3f77be13..41d0f786e06d 100644 --- a/drivers/gpu/drm/i915/gt/intel_workarounds.c +++ b/drivers/gpu/drm/i915/gt/intel_workarounds.c @@ -1062,6 +1062,9 @@ static void gen9_whitelist_build(struct i915_wa_list *w) /* WaAllowUMDToModifyHDCChicken1:skl,bxt,kbl,glk,cfl */ whitelist_reg(w, GEN8_HDC_CHICKEN1); + + /* WaSendPushConstantsFromMMIO:skl,bxt */ + whitelist_reg(w, COMMON_SLICE_CHICKEN2); } static void skl_whitelist_build(struct intel_engine_cs *engine) -- cgit v1.2.3 From 4dd2fbbfb532d0981b0ecd218c0597ac0047ca55 Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Wed, 11 Sep 2019 10:02:43 +0100 Subject: drm/i915: Make i915_vma.flags atomic_t for mutex reduction In preparation for reducing struct_mutex stranglehold around the vm, make the vma.flags atomic so that we can acquire a pin on the vma atomically before deciding if we need to take the mutex. Signed-off-by: Chris Wilson Reviewed-by: Matthew Auld Link: https://patchwork.freedesktop.org/patch/msgid/20190911090243.16786-1-chris@chris-wilson.co.uk --- drivers/gpu/drm/i915/gem/i915_gem_object.c | 2 +- drivers/gpu/drm/i915/gem/i915_gem_stolen.c | 2 +- drivers/gpu/drm/i915/i915_gem_gtt.c | 14 +++---- drivers/gpu/drm/i915/i915_vma.c | 29 +++++++------- drivers/gpu/drm/i915/i915_vma.h | 62 ++++++++++++++++++------------ drivers/gpu/drm/i915/selftests/mock_gtt.c | 4 +- 6 files changed, 64 insertions(+), 49 deletions(-) diff --git a/drivers/gpu/drm/i915/gem/i915_gem_object.c b/drivers/gpu/drm/i915/gem/i915_gem_object.c index d7855dc5a5c5..0ef60dae23a7 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_object.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_object.c @@ -163,7 +163,7 @@ static void __i915_gem_free_objects(struct drm_i915_private *i915, list_for_each_entry_safe(vma, vn, &obj->vma.list, obj_link) { GEM_BUG_ON(i915_vma_is_active(vma)); - vma->flags &= ~I915_VMA_PIN_MASK; + atomic_and(~I915_VMA_PIN_MASK, &vma->flags); i915_vma_destroy(vma); } GEM_BUG_ON(!list_empty(&obj->vma.list)); diff --git a/drivers/gpu/drm/i915/gem/i915_gem_stolen.c b/drivers/gpu/drm/i915/gem/i915_gem_stolen.c index 7ce5259d73d6..bfbc3e3daf92 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_stolen.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_stolen.c @@ -688,7 +688,7 @@ i915_gem_object_create_stolen_for_preallocated(struct drm_i915_private *dev_priv GEM_BUG_ON(!drm_mm_node_allocated(&vma->node)); vma->pages = obj->mm.pages; - vma->flags |= I915_VMA_GLOBAL_BIND; + set_bit(I915_VMA_GLOBAL_BIND_BIT, __i915_vma_flags(vma)); __i915_vma_set_map_and_fenceable(vma); mutex_lock(&ggtt->vm.mutex); diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c index 48688d683e95..e4f66bfe74c2 100644 --- a/drivers/gpu/drm/i915/i915_gem_gtt.c +++ b/drivers/gpu/drm/i915/i915_gem_gtt.c @@ -155,7 +155,7 @@ static int ppgtt_bind_vma(struct i915_vma *vma, u32 pte_flags; int err; - if (!(vma->flags & I915_VMA_LOCAL_BIND)) { + if (!i915_vma_is_bound(vma, I915_VMA_LOCAL_BIND)) { err = vma->vm->allocate_va_range(vma->vm, vma->node.start, vma->size); if (err) @@ -1877,7 +1877,7 @@ static struct i915_vma *pd_vma_create(struct gen6_ppgtt *ppgtt, int size) vma->size = size; vma->fence_size = size; - vma->flags = I915_VMA_GGTT; + atomic_set(&vma->flags, I915_VMA_GGTT); vma->ggtt_view.type = I915_GGTT_VIEW_ROTATED; /* prevent fencing */ INIT_LIST_HEAD(&vma->obj_link); @@ -2440,7 +2440,7 @@ static int ggtt_bind_vma(struct i915_vma *vma, * GLOBAL/LOCAL_BIND, it's all the same ptes. Hence unconditionally * upgrade to both bound if we bind either to avoid double-binding. */ - vma->flags |= I915_VMA_GLOBAL_BIND | I915_VMA_LOCAL_BIND; + atomic_or(I915_VMA_GLOBAL_BIND | I915_VMA_LOCAL_BIND, &vma->flags); return 0; } @@ -2470,7 +2470,7 @@ static int aliasing_gtt_bind_vma(struct i915_vma *vma, if (flags & I915_VMA_LOCAL_BIND) { struct i915_ppgtt *alias = i915_vm_to_ggtt(vma->vm)->alias; - if (!(vma->flags & I915_VMA_LOCAL_BIND)) { + if (!i915_vma_is_bound(vma, I915_VMA_LOCAL_BIND)) { ret = alias->vm.allocate_va_range(&alias->vm, vma->node.start, vma->size); @@ -2498,7 +2498,7 @@ static void aliasing_gtt_unbind_vma(struct i915_vma *vma) { struct drm_i915_private *i915 = vma->vm->i915; - if (vma->flags & I915_VMA_GLOBAL_BIND) { + if (i915_vma_is_bound(vma, I915_VMA_GLOBAL_BIND)) { struct i915_address_space *vm = vma->vm; intel_wakeref_t wakeref; @@ -2506,7 +2506,7 @@ static void aliasing_gtt_unbind_vma(struct i915_vma *vma) vm->clear_range(vm, vma->node.start, vma->size); } - if (vma->flags & I915_VMA_LOCAL_BIND) { + if (i915_vma_is_bound(vma, I915_VMA_LOCAL_BIND)) { struct i915_address_space *vm = &i915_vm_to_ggtt(vma->vm)->alias->vm; @@ -3308,7 +3308,7 @@ static void ggtt_restore_mappings(struct i915_ggtt *ggtt) list_for_each_entry_safe(vma, vn, &ggtt->vm.bound_list, vm_link) { struct drm_i915_gem_object *obj = vma->obj; - if (!(vma->flags & I915_VMA_GLOBAL_BIND)) + if (!i915_vma_is_bound(vma, I915_VMA_GLOBAL_BIND)) continue; mutex_unlock(&ggtt->vm.mutex); diff --git a/drivers/gpu/drm/i915/i915_vma.c b/drivers/gpu/drm/i915/i915_vma.c index 68d9f9b4d050..411047d6a909 100644 --- a/drivers/gpu/drm/i915/i915_vma.c +++ b/drivers/gpu/drm/i915/i915_vma.c @@ -171,7 +171,7 @@ vma_create(struct drm_i915_gem_object *obj, i915_gem_object_get_stride(obj)); GEM_BUG_ON(!is_power_of_2(vma->fence_alignment)); - vma->flags |= I915_VMA_GGTT; + __set_bit(I915_VMA_GGTT_BIT, __i915_vma_flags(vma)); } spin_lock(&obj->vma.lock); @@ -325,7 +325,8 @@ int i915_vma_bind(struct i915_vma *vma, enum i915_cache_level cache_level, if (flags & PIN_USER) bind_flags |= I915_VMA_LOCAL_BIND; - vma_flags = vma->flags & (I915_VMA_GLOBAL_BIND | I915_VMA_LOCAL_BIND); + vma_flags = atomic_read(&vma->flags); + vma_flags &= I915_VMA_GLOBAL_BIND | I915_VMA_LOCAL_BIND; if (flags & PIN_UPDATE) bind_flags |= vma_flags; else @@ -340,7 +341,7 @@ int i915_vma_bind(struct i915_vma *vma, enum i915_cache_level cache_level, if (ret) return ret; - vma->flags |= bind_flags; + atomic_or(bind_flags, &vma->flags); return 0; } @@ -359,7 +360,7 @@ void __iomem *i915_vma_pin_iomap(struct i915_vma *vma) } GEM_BUG_ON(!i915_vma_is_ggtt(vma)); - GEM_BUG_ON((vma->flags & I915_VMA_GLOBAL_BIND) == 0); + GEM_BUG_ON(!i915_vma_is_bound(vma, I915_VMA_GLOBAL_BIND)); ptr = vma->iomap; if (ptr == NULL) { @@ -472,9 +473,9 @@ void __i915_vma_set_map_and_fenceable(struct i915_vma *vma) mappable = vma->node.start + vma->fence_size <= i915_vm_to_ggtt(vma->vm)->mappable_end; if (mappable && fenceable) - vma->flags |= I915_VMA_CAN_FENCE; + set_bit(I915_VMA_CAN_FENCE_BIT, __i915_vma_flags(vma)); else - vma->flags &= ~I915_VMA_CAN_FENCE; + clear_bit(I915_VMA_CAN_FENCE_BIT, __i915_vma_flags(vma)); } bool i915_gem_valid_gtt_space(struct i915_vma *vma, unsigned long color) @@ -544,7 +545,7 @@ i915_vma_insert(struct i915_vma *vma, u64 size, u64 alignment, u64 flags) int ret; GEM_BUG_ON(i915_vma_is_closed(vma)); - GEM_BUG_ON(vma->flags & (I915_VMA_GLOBAL_BIND | I915_VMA_LOCAL_BIND)); + GEM_BUG_ON(i915_vma_is_bound(vma, I915_VMA_GLOBAL_BIND | I915_VMA_LOCAL_BIND)); GEM_BUG_ON(drm_mm_node_allocated(&vma->node)); size = max(size, vma->size); @@ -678,7 +679,7 @@ static void i915_vma_remove(struct i915_vma *vma) { GEM_BUG_ON(!drm_mm_node_allocated(&vma->node)); - GEM_BUG_ON(vma->flags & (I915_VMA_GLOBAL_BIND | I915_VMA_LOCAL_BIND)); + GEM_BUG_ON(i915_vma_is_bound(vma, I915_VMA_GLOBAL_BIND | I915_VMA_LOCAL_BIND)); vma->ops->clear_pages(vma); @@ -709,7 +710,7 @@ i915_vma_remove(struct i915_vma *vma) int __i915_vma_do_pin(struct i915_vma *vma, u64 size, u64 alignment, u64 flags) { - const unsigned int bound = vma->flags; + const unsigned int bound = atomic_read(&vma->flags); int ret; lockdep_assert_held(&vma->vm->i915->drm.struct_mutex); @@ -732,9 +733,9 @@ int __i915_vma_do_pin(struct i915_vma *vma, if (ret) goto err_remove; - GEM_BUG_ON((vma->flags & I915_VMA_BIND_MASK) == 0); + GEM_BUG_ON(!i915_vma_is_bound(vma, I915_VMA_BIND_MASK)); - if ((bound ^ vma->flags) & I915_VMA_GLOBAL_BIND) + if ((bound ^ atomic_read(&vma->flags)) & I915_VMA_GLOBAL_BIND) __i915_vma_set_map_and_fenceable(vma); GEM_BUG_ON(i915_vma_misplaced(vma, size, alignment, flags)); @@ -744,7 +745,7 @@ err_remove: if ((bound & I915_VMA_BIND_MASK) == 0) { i915_vma_remove(vma); GEM_BUG_ON(vma->pages); - GEM_BUG_ON(vma->flags & I915_VMA_BIND_MASK); + GEM_BUG_ON(atomic_read(&vma->flags) & I915_VMA_BIND_MASK); } err_unpin: __i915_vma_unpin(vma); @@ -991,7 +992,7 @@ int i915_vma_unbind(struct i915_vma *vma) mutex_unlock(&vma->vm->mutex); __i915_vma_iounmap(vma); - vma->flags &= ~I915_VMA_CAN_FENCE; + clear_bit(I915_VMA_CAN_FENCE_BIT, __i915_vma_flags(vma)); } GEM_BUG_ON(vma->fence); GEM_BUG_ON(i915_vma_has_userfault(vma)); @@ -1000,7 +1001,7 @@ int i915_vma_unbind(struct i915_vma *vma) trace_i915_vma_unbind(vma); vma->ops->unbind_vma(vma); } - vma->flags &= ~(I915_VMA_GLOBAL_BIND | I915_VMA_LOCAL_BIND); + atomic_and(~I915_VMA_BIND_MASK, &vma->flags); i915_vma_remove(vma); diff --git a/drivers/gpu/drm/i915/i915_vma.h b/drivers/gpu/drm/i915/i915_vma.h index 425bda4db2c2..8bcb5812c446 100644 --- a/drivers/gpu/drm/i915/i915_vma.h +++ b/drivers/gpu/drm/i915/i915_vma.h @@ -72,7 +72,7 @@ struct i915_vma { * that exist in the ctx->handle_vmas LUT for this vma. */ atomic_t open_count; - unsigned long flags; + atomic_t flags; /** * How many users have pinned this object in GTT space. * @@ -97,18 +97,29 @@ struct i915_vma { * users. */ #define I915_VMA_PIN_MASK 0xff -#define I915_VMA_PIN_OVERFLOW BIT(8) +#define I915_VMA_PIN_OVERFLOW_BIT 8 +#define I915_VMA_PIN_OVERFLOW ((int)BIT(I915_VMA_PIN_OVERFLOW_BIT)) /** Flags and address space this VMA is bound to */ -#define I915_VMA_GLOBAL_BIND BIT(9) -#define I915_VMA_LOCAL_BIND BIT(10) -#define I915_VMA_BIND_MASK (I915_VMA_GLOBAL_BIND | I915_VMA_LOCAL_BIND | I915_VMA_PIN_OVERFLOW) +#define I915_VMA_GLOBAL_BIND_BIT 9 +#define I915_VMA_LOCAL_BIND_BIT 10 -#define I915_VMA_GGTT BIT(11) -#define I915_VMA_CAN_FENCE BIT(12) +#define I915_VMA_GLOBAL_BIND ((int)BIT(I915_VMA_GLOBAL_BIND_BIT)) +#define I915_VMA_LOCAL_BIND ((int)BIT(I915_VMA_LOCAL_BIND_BIT)) + +#define I915_VMA_BIND_MASK (I915_VMA_GLOBAL_BIND | \ + I915_VMA_LOCAL_BIND | \ + I915_VMA_PIN_OVERFLOW) + +#define I915_VMA_GGTT_BIT 11 +#define I915_VMA_CAN_FENCE_BIT 12 #define I915_VMA_USERFAULT_BIT 13 -#define I915_VMA_USERFAULT BIT(I915_VMA_USERFAULT_BIT) -#define I915_VMA_GGTT_WRITE BIT(14) +#define I915_VMA_GGTT_WRITE_BIT 14 + +#define I915_VMA_GGTT ((int)BIT(I915_VMA_GGTT_BIT)) +#define I915_VMA_CAN_FENCE ((int)BIT(I915_VMA_CAN_FENCE_BIT)) +#define I915_VMA_USERFAULT ((int)BIT(I915_VMA_USERFAULT_BIT)) +#define I915_VMA_GGTT_WRITE ((int)BIT(I915_VMA_GGTT_WRITE_BIT)) struct i915_active active; @@ -162,48 +173,51 @@ int __must_check i915_vma_move_to_active(struct i915_vma *vma, struct i915_request *rq, unsigned int flags); +#define __i915_vma_flags(v) ((unsigned long *)&(v)->flags.counter) + static inline bool i915_vma_is_ggtt(const struct i915_vma *vma) { - return vma->flags & I915_VMA_GGTT; + return test_bit(I915_VMA_GGTT_BIT, __i915_vma_flags(vma)); } static inline bool i915_vma_has_ggtt_write(const struct i915_vma *vma) { - return vma->flags & I915_VMA_GGTT_WRITE; + return test_bit(I915_VMA_GGTT_WRITE_BIT, __i915_vma_flags(vma)); } static inline void i915_vma_set_ggtt_write(struct i915_vma *vma) { GEM_BUG_ON(!i915_vma_is_ggtt(vma)); - vma->flags |= I915_VMA_GGTT_WRITE; + set_bit(I915_VMA_GGTT_WRITE_BIT, __i915_vma_flags(vma)); } -static inline void i915_vma_unset_ggtt_write(struct i915_vma *vma) +static inline bool i915_vma_unset_ggtt_write(struct i915_vma *vma) { - vma->flags &= ~I915_VMA_GGTT_WRITE; + return test_and_clear_bit(I915_VMA_GGTT_WRITE_BIT, + __i915_vma_flags(vma)); } void i915_vma_flush_writes(struct i915_vma *vma); static inline bool i915_vma_is_map_and_fenceable(const struct i915_vma *vma) { - return vma->flags & I915_VMA_CAN_FENCE; + return test_bit(I915_VMA_CAN_FENCE_BIT, __i915_vma_flags(vma)); } static inline bool i915_vma_set_userfault(struct i915_vma *vma) { GEM_BUG_ON(!i915_vma_is_map_and_fenceable(vma)); - return __test_and_set_bit(I915_VMA_USERFAULT_BIT, &vma->flags); + return test_and_set_bit(I915_VMA_USERFAULT_BIT, __i915_vma_flags(vma)); } static inline void i915_vma_unset_userfault(struct i915_vma *vma) { - return __clear_bit(I915_VMA_USERFAULT_BIT, &vma->flags); + return clear_bit(I915_VMA_USERFAULT_BIT, __i915_vma_flags(vma)); } static inline bool i915_vma_has_userfault(const struct i915_vma *vma) { - return test_bit(I915_VMA_USERFAULT_BIT, &vma->flags); + return test_bit(I915_VMA_USERFAULT_BIT, __i915_vma_flags(vma)); } static inline bool i915_vma_is_closed(const struct i915_vma *vma) @@ -330,7 +344,7 @@ i915_vma_pin(struct i915_vma *vma, u64 size, u64 alignment, u64 flags) /* Pin early to prevent the shrinker/eviction logic from destroying * our vma as we insert and bind. */ - if (likely(((++vma->flags ^ flags) & I915_VMA_BIND_MASK) == 0)) { + if (likely(((atomic_inc_return(&vma->flags) ^ flags) & I915_VMA_BIND_MASK) == 0)) { GEM_BUG_ON(!drm_mm_node_allocated(&vma->node)); GEM_BUG_ON(i915_vma_misplaced(vma, size, alignment, flags)); return 0; @@ -341,7 +355,7 @@ i915_vma_pin(struct i915_vma *vma, u64 size, u64 alignment, u64 flags) static inline int i915_vma_pin_count(const struct i915_vma *vma) { - return vma->flags & I915_VMA_PIN_MASK; + return atomic_read(&vma->flags) & I915_VMA_PIN_MASK; } static inline bool i915_vma_is_pinned(const struct i915_vma *vma) @@ -351,13 +365,13 @@ static inline bool i915_vma_is_pinned(const struct i915_vma *vma) static inline void __i915_vma_pin(struct i915_vma *vma) { - vma->flags++; - GEM_BUG_ON(vma->flags & I915_VMA_PIN_OVERFLOW); + atomic_inc(&vma->flags); + GEM_BUG_ON(atomic_read(&vma->flags) & I915_VMA_PIN_OVERFLOW); } static inline void __i915_vma_unpin(struct i915_vma *vma) { - vma->flags--; + atomic_dec(&vma->flags); } static inline void i915_vma_unpin(struct i915_vma *vma) @@ -370,7 +384,7 @@ static inline void i915_vma_unpin(struct i915_vma *vma) static inline bool i915_vma_is_bound(const struct i915_vma *vma, unsigned int where) { - return vma->flags & where; + return atomic_read(&vma->flags) & where; } static inline bool i915_node_color_differs(const struct drm_mm_node *node, diff --git a/drivers/gpu/drm/i915/selftests/mock_gtt.c b/drivers/gpu/drm/i915/selftests/mock_gtt.c index 7d5fb60b43bb..173f2d4dbd14 100644 --- a/drivers/gpu/drm/i915/selftests/mock_gtt.c +++ b/drivers/gpu/drm/i915/selftests/mock_gtt.c @@ -43,7 +43,7 @@ static int mock_bind_ppgtt(struct i915_vma *vma, u32 flags) { GEM_BUG_ON(flags & I915_VMA_GLOBAL_BIND); - vma->flags |= I915_VMA_LOCAL_BIND; + set_bit(I915_VMA_LOCAL_BIND_BIT, __i915_vma_flags(vma)); return 0; } @@ -86,7 +86,7 @@ static int mock_bind_ggtt(struct i915_vma *vma, enum i915_cache_level cache_level, u32 flags) { - vma->flags |= I915_VMA_GLOBAL_BIND | I915_VMA_LOCAL_BIND; + atomic_or(I915_VMA_GLOBAL_BIND | I915_VMA_LOCAL_BIND, &vma->flags); return 0; } -- cgit v1.2.3 From 43ed22753cf18c4bf60de81d567beee908e5e9f3 Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Wed, 11 Sep 2019 08:47:27 +0100 Subject: drm/i915/display: Add glk_cdclk_table MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Commit 736da8112fee ("drm/i915: Use literal representation of cdclk tables") pushed the cdclk logic into tables, adding glk_cdclk_table but not using yet: drivers/gpu/drm/i915/display/intel_cdclk.c:1173:38: error: ‘glk_cdclk_table’ defined but not used [-Werror=unused-const-variable=] Fixes: 736da8112fee ("drm/i915: Use literal representation of cdclk tables") Signed-off-by: Chris Wilson Cc: Ville Syrjälä Cc: Matt Roper Cc: Jani Nikula Reviewed-by: Ville Syrjälä Link: https://patchwork.freedesktop.org/patch/msgid/20190911074727.32585-1-chris@chris-wilson.co.uk --- drivers/gpu/drm/i915/display/intel_cdclk.c | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/drivers/gpu/drm/i915/display/intel_cdclk.c b/drivers/gpu/drm/i915/display/intel_cdclk.c index 618a93bad0a8..13779b6029f5 100644 --- a/drivers/gpu/drm/i915/display/intel_cdclk.c +++ b/drivers/gpu/drm/i915/display/intel_cdclk.c @@ -2511,7 +2511,10 @@ void intel_init_cdclk_hooks(struct drm_i915_private *dev_priv) dev_priv->display.set_cdclk = bxt_set_cdclk; dev_priv->display.modeset_calc_cdclk = bxt_modeset_calc_cdclk; dev_priv->display.calc_voltage_level = bxt_calc_voltage_level; - dev_priv->cdclk.table = bxt_cdclk_table; + if (IS_GEMINILAKE(dev_priv)) + dev_priv->cdclk.table = glk_cdclk_table; + else + dev_priv->cdclk.table = bxt_cdclk_table; } else if (IS_GEN9_BC(dev_priv)) { dev_priv->display.set_cdclk = skl_set_cdclk; dev_priv->display.modeset_calc_cdclk = skl_modeset_calc_cdclk; -- cgit v1.2.3 From c26a058680dcf3140b6c4e918a16074a5e739b95 Mon Sep 17 00:00:00 2001 From: Ville Syrjälä Date: Tue, 10 Sep 2019 15:13:47 +0300 Subject: drm/i915: Use a high priority wq for nonblocking plane updates MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit system_unbound_wq can't keep up sometimes and we get dropped frames. Switch to a high priority variant. Reported-by: Heinrich Fink Tested-by: Heinrich Fink Signed-off-by: Ville Syrjälä Link: https://patchwork.freedesktop.org/patch/msgid/20190910121347.22958-1-ville.syrjala@linux.intel.com Reviewed-by: Chris Wilson --- drivers/gpu/drm/i915/display/intel_display.c | 6 +++++- drivers/gpu/drm/i915/i915_drv.h | 2 ++ 2 files changed, 7 insertions(+), 1 deletion(-) diff --git a/drivers/gpu/drm/i915/display/intel_display.c b/drivers/gpu/drm/i915/display/intel_display.c index 3b5275ab66cf..2668744007b3 100644 --- a/drivers/gpu/drm/i915/display/intel_display.c +++ b/drivers/gpu/drm/i915/display/intel_display.c @@ -14272,7 +14272,7 @@ static int intel_atomic_commit(struct drm_device *dev, if (nonblock && state->modeset) { queue_work(dev_priv->modeset_wq, &state->base.commit_work); } else if (nonblock) { - queue_work(system_unbound_wq, &state->base.commit_work); + queue_work(dev_priv->flip_wq, &state->base.commit_work); } else { if (state->modeset) flush_workqueue(dev_priv->modeset_wq); @@ -16181,6 +16181,8 @@ int intel_modeset_init(struct drm_device *dev) int ret; dev_priv->modeset_wq = alloc_ordered_workqueue("i915_modeset", 0); + dev_priv->flip_wq = alloc_workqueue("i915_flip", WQ_HIGHPRI | + WQ_UNBOUND, WQ_UNBOUND_MAX_ACTIVE); drm_mode_config_init(dev); @@ -17139,6 +17141,7 @@ void intel_modeset_driver_remove(struct drm_device *dev) { struct drm_i915_private *dev_priv = to_i915(dev); + flush_workqueue(dev_priv->flip_wq); flush_workqueue(dev_priv->modeset_wq); flush_work(&dev_priv->atomic_helper.free_work); @@ -17175,6 +17178,7 @@ void intel_modeset_driver_remove(struct drm_device *dev) intel_gmbus_teardown(dev_priv); + destroy_workqueue(dev_priv->flip_wq); destroy_workqueue(dev_priv->modeset_wq); intel_fbc_cleanup_cfb(dev_priv); diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index 51d575cca2ac..2ea11123e933 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h @@ -1438,6 +1438,8 @@ struct drm_i915_private { /* ordered wq for modesets */ struct workqueue_struct *modeset_wq; + /* unbound hipri wq for page flips/plane updates */ + struct workqueue_struct *flip_wq; /* Display functions */ struct drm_i915_display_funcs display; -- cgit v1.2.3 From cfcbfdd8fe140af699b538a42cf0dd4933885fe5 Mon Sep 17 00:00:00 2001 From: Ville Syrjälä Date: Mon, 8 Jul 2019 15:53:09 +0300 Subject: drm/i915: Remove pointless planes_changed=true assignment MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit i915 doesn't use the crtc_state->plane_changed flag for anything, so setting it is pointless. Signed-off-by: Ville Syrjälä Link: https://patchwork.freedesktop.org/patch/msgid/20190708125325.16576-4-ville.syrjala@linux.intel.com Reviewed-by: Stanislav Lisovskiy --- drivers/gpu/drm/i915/display/intel_atomic.c | 7 ------- 1 file changed, 7 deletions(-) diff --git a/drivers/gpu/drm/i915/display/intel_atomic.c b/drivers/gpu/drm/i915/display/intel_atomic.c index d3fb75bb9eb1..698802da07b7 100644 --- a/drivers/gpu/drm/i915/display/intel_atomic.c +++ b/drivers/gpu/drm/i915/display/intel_atomic.c @@ -378,13 +378,6 @@ int intel_atomic_setup_scalers(struct drm_i915_private *dev_priv, plane->base.id); return PTR_ERR(state); } - - /* - * the plane is added after plane checks are run, - * but since this plane is unchanged just do the - * minimum required validation. - */ - crtc_state->base.planes_changed = true; } intel_plane = to_intel_plane(plane); -- cgit v1.2.3 From 7cb8468bbede8cac0d9ed69b2549788ecaa6b558 Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Wed, 11 Sep 2019 13:57:17 +0100 Subject: drm/i915/tgl: Disable read-only ppgtt support The same read-only affliction as befell Icelake is affecting Tigerlake. Disable the read-only support as clearly it was not fixed. Testcase: igt/i915_selftests/live_gem_context References: 3936867dbc1e ("drm/i915: Disable read only ppgtt support for gen11") Signed-off-by: Chris Wilson Cc: Mika Kuoppala Reviewed-by: Mika Kuoppala Link: https://patchwork.freedesktop.org/patch/msgid/20190911125717.28997-1-chris@chris-wilson.co.uk --- drivers/gpu/drm/i915/i915_gem_gtt.c | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c index e4f66bfe74c2..a09a9b62afbe 100644 --- a/drivers/gpu/drm/i915/i915_gem_gtt.c +++ b/drivers/gpu/drm/i915/i915_gem_gtt.c @@ -1486,8 +1486,10 @@ static struct i915_ppgtt *gen8_ppgtt_create(struct drm_i915_private *i915) * * Gen11 has HSDES#:1807136187 unresolved. Disable ro support * for now. + * + * Gen12 has inherited the same read-only fault issue from gen11. */ - ppgtt->vm.has_read_only = INTEL_GEN(i915) != 11; + ppgtt->vm.has_read_only = !IS_GEN_RANGE(i915, 11, 12); /* There are only few exceptions for gen >=6. chv and bxt. * And we are not sure about the latter so play safe for now. -- cgit v1.2.3 From aac60f1a867773de9eb164013d89c99f3ea1f009 Mon Sep 17 00:00:00 2001 From: Zenghui Yu Date: Wed, 11 Sep 2019 02:33:35 +0000 Subject: KVM: arm/arm64: vgic: Use the appropriate TRACE_INCLUDE_PATH Commit 49dfe94fe5ad ("KVM: arm/arm64: Fix TRACE_INCLUDE_PATH") fixes TRACE_INCLUDE_PATH to the correct relative path to the define_trace.h and explains why did the old one work. The same fix should be applied to virt/kvm/arm/vgic/trace.h. Reviewed-by: Masahiro Yamada Signed-off-by: Zenghui Yu Signed-off-by: Marc Zyngier --- virt/kvm/arm/vgic/trace.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/virt/kvm/arm/vgic/trace.h b/virt/kvm/arm/vgic/trace.h index 55fed77a9f73..4fd4f6db181b 100644 --- a/virt/kvm/arm/vgic/trace.h +++ b/virt/kvm/arm/vgic/trace.h @@ -30,7 +30,7 @@ TRACE_EVENT(vgic_update_irq_pending, #endif /* _TRACE_VGIC_H */ #undef TRACE_INCLUDE_PATH -#define TRACE_INCLUDE_PATH ../../../virt/kvm/arm/vgic +#define TRACE_INCLUDE_PATH ../../virt/kvm/arm/vgic #undef TRACE_INCLUDE_FILE #define TRACE_INCLUDE_FILE trace -- cgit v1.2.3 From 023a125d1dded8e1c21bd9974373b697544ba18d Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Wed, 11 Sep 2019 12:46:55 +0100 Subject: drm/i915: Squeeze iommu status into debugfs/i915_capabilities There's no easy way of checking whether iommu is enabled for the GPU (you can grep dmesg if you know the device, or you can grep i915_gpu_info if that's available). We do have a central i915_capabilities with the intent of listing such pertinent information, so add the iommu status. Suggested-by: Martin Peres Signed-off-by: Chris Wilson Cc: Martin Peres Cc: Tomi Sarvela Cc: Mika Kuoppala Acked-by: Martin Peres Link: https://patchwork.freedesktop.org/patch/msgid/20190911114655.9254-1-chris@chris-wilson.co.uk --- drivers/gpu/drm/i915/i915_debugfs.c | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c index 708855e051b5..e5835337f022 100644 --- a/drivers/gpu/drm/i915/i915_debugfs.c +++ b/drivers/gpu/drm/i915/i915_debugfs.c @@ -61,11 +61,18 @@ static int i915_capabilities(struct seq_file *m, void *data) struct drm_i915_private *dev_priv = node_to_i915(m->private); const struct intel_device_info *info = INTEL_INFO(dev_priv); struct drm_printer p = drm_seq_file_printer(m); + const char *msg; seq_printf(m, "gen: %d\n", INTEL_GEN(dev_priv)); seq_printf(m, "platform: %s\n", intel_platform_name(info->platform)); seq_printf(m, "pch: %d\n", INTEL_PCH_TYPE(dev_priv)); + msg = "n/a"; +#ifdef CONFIG_INTEL_IOMMU + msg = enableddisabled(intel_iommu_gfx_mapped); +#endif + seq_printf(m, "iommu: %s\n", msg); + intel_device_info_dump_flags(info, &p); intel_device_info_dump_runtime(RUNTIME_INFO(dev_priv), &p); intel_driver_caps_print(&dev_priv->caps, &p); -- cgit v1.2.3 From 249778704c01384d76984d83e6d6377ac96b2cc4 Mon Sep 17 00:00:00 2001 From: Jani Nikula Date: Wed, 11 Sep 2019 12:26:08 +0300 Subject: drm/i915: add INTEL_NUM_PIPES() and use it MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Abstract away direct access to ->num_pipes to allow further refactoring. No functional changes. Cc: Chris Wilson Cc: José Roberto de Souza Cc: Ville Syrjälä Reviewed-by: José Roberto de Souza Acked-by: Ville Syrjälä Signed-off-by: Jani Nikula Link: https://patchwork.freedesktop.org/patch/msgid/20190911092608.13009-1-jani.nikula@intel.com --- drivers/gpu/drm/i915/display/intel_display.c | 12 ++++++------ drivers/gpu/drm/i915/display/intel_display.h | 4 ++-- drivers/gpu/drm/i915/display/intel_lpe_audio.c | 2 +- drivers/gpu/drm/i915/i915_drv.c | 2 +- drivers/gpu/drm/i915/i915_drv.h | 4 +++- drivers/gpu/drm/i915/intel_pm.c | 6 +++--- 6 files changed, 16 insertions(+), 14 deletions(-) diff --git a/drivers/gpu/drm/i915/display/intel_display.c b/drivers/gpu/drm/i915/display/intel_display.c index 2668744007b3..a19f8c73f2e0 100644 --- a/drivers/gpu/drm/i915/display/intel_display.c +++ b/drivers/gpu/drm/i915/display/intel_display.c @@ -7190,7 +7190,7 @@ static int ironlake_check_fdi_lanes(struct drm_device *dev, enum pipe pipe, } } - if (INTEL_INFO(dev_priv)->num_pipes == 2) + if (INTEL_NUM_PIPES(dev_priv) == 2) return 0; /* Ivybridge 3 pipe is really complicated */ @@ -9574,7 +9574,7 @@ static void ironlake_compute_dpll(struct intel_crtc *crtc, * clear if it''s a win or loss power wise. No point in doing * this on ILK at all since it has a fixed DPLL<->pipe mapping. */ - if (INTEL_INFO(dev_priv)->num_pipes == 3 && + if (INTEL_NUM_PIPES(dev_priv) == 3 && intel_crtc_has_type(crtc_state, INTEL_OUTPUT_ANALOG)) dpll |= DPLL_SDVO_HIGH_SPEED; @@ -13899,7 +13899,7 @@ static void skl_commit_modeset_enables(struct intel_atomic_state *state) if (skl_ddb_allocation_overlaps(&new_crtc_state->wm.skl.ddb, entries, - INTEL_INFO(dev_priv)->num_pipes, i)) + INTEL_NUM_PIPES(dev_priv), i)) continue; updated |= cmask; @@ -16258,8 +16258,8 @@ int intel_modeset_init(struct drm_device *dev) } DRM_DEBUG_KMS("%d display pipe%s available.\n", - INTEL_INFO(dev_priv)->num_pipes, - INTEL_INFO(dev_priv)->num_pipes > 1 ? "s" : ""); + INTEL_NUM_PIPES(dev_priv), + INTEL_NUM_PIPES(dev_priv) > 1 ? "s" : ""); for_each_pipe(dev_priv, pipe) { ret = intel_crtc_init(dev_priv, pipe); @@ -17352,7 +17352,7 @@ intel_display_print_error_state(struct drm_i915_error_state_buf *m, if (!error) return; - err_printf(m, "Num Pipes: %d\n", INTEL_INFO(dev_priv)->num_pipes); + err_printf(m, "Num Pipes: %d\n", INTEL_NUM_PIPES(dev_priv)); if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) err_printf(m, "PWR_WELL_CTL2: %08x\n", error->power_well_driver); diff --git a/drivers/gpu/drm/i915/display/intel_display.h b/drivers/gpu/drm/i915/display/intel_display.h index 33fd523c4622..f4ddde171655 100644 --- a/drivers/gpu/drm/i915/display/intel_display.h +++ b/drivers/gpu/drm/i915/display/intel_display.h @@ -307,10 +307,10 @@ enum phy_fia { }; #define for_each_pipe(__dev_priv, __p) \ - for ((__p) = 0; (__p) < INTEL_INFO(__dev_priv)->num_pipes; (__p)++) + for ((__p) = 0; (__p) < INTEL_NUM_PIPES(__dev_priv); (__p)++) #define for_each_pipe_masked(__dev_priv, __p, __mask) \ - for ((__p) = 0; (__p) < INTEL_INFO(__dev_priv)->num_pipes; (__p)++) \ + for ((__p) = 0; (__p) < INTEL_NUM_PIPES(__dev_priv); (__p)++) \ for_each_if((__mask) & BIT(__p)) #define for_each_cpu_transcoder_masked(__dev_priv, __t, __mask) \ diff --git a/drivers/gpu/drm/i915/display/intel_lpe_audio.c b/drivers/gpu/drm/i915/display/intel_lpe_audio.c index b19800b58442..0b67f7887cd0 100644 --- a/drivers/gpu/drm/i915/display/intel_lpe_audio.c +++ b/drivers/gpu/drm/i915/display/intel_lpe_audio.c @@ -114,7 +114,7 @@ lpe_audio_platdev_create(struct drm_i915_private *dev_priv) pinfo.size_data = sizeof(*pdata); pinfo.dma_mask = DMA_BIT_MASK(32); - pdata->num_pipes = INTEL_INFO(dev_priv)->num_pipes; + pdata->num_pipes = INTEL_NUM_PIPES(dev_priv); pdata->num_ports = IS_CHERRYVIEW(dev_priv) ? 3 : 2; /* B,C,D or B,C */ pdata->port[0].pipe = -1; pdata->port[1].pipe = -1; diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c index 8966672b0294..c52ef2223580 100644 --- a/drivers/gpu/drm/i915/i915_drv.c +++ b/drivers/gpu/drm/i915/i915_drv.c @@ -340,7 +340,7 @@ static int i915_driver_modeset_probe(struct drm_device *dev) if (HAS_DISPLAY(dev_priv)) { ret = drm_vblank_init(&dev_priv->drm, - INTEL_INFO(dev_priv)->num_pipes); + INTEL_NUM_PIPES(dev_priv)); if (ret) goto out; } diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index 2ea11123e933..84fb0245cf62 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h @@ -2188,7 +2188,9 @@ IS_SUBPLATFORM(const struct drm_i915_private *i915, #define GT_FREQUENCY_MULTIPLIER 50 #define GEN9_FREQ_SCALER 3 -#define HAS_DISPLAY(dev_priv) (INTEL_INFO(dev_priv)->num_pipes > 0) +#define INTEL_NUM_PIPES(dev_priv) (INTEL_INFO(dev_priv)->num_pipes) + +#define HAS_DISPLAY(dev_priv) (INTEL_NUM_PIPES(dev_priv) > 0) static inline bool intel_vtd_active(void) { diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c index 54a8b7705bc6..d0ceb272551f 100644 --- a/drivers/gpu/drm/i915/intel_pm.c +++ b/drivers/gpu/drm/i915/intel_pm.c @@ -1909,7 +1909,7 @@ static int vlv_compute_pipe_wm(struct intel_crtc_state *crtc_state) for (level = 0; level < wm_state->num_levels; level++) { const struct g4x_pipe_wm *raw = &crtc_state->wm.vlv.raw[level]; - const int sr_fifo_size = INTEL_INFO(dev_priv)->num_pipes * 512 - 1; + const int sr_fifo_size = INTEL_NUM_PIPES(dev_priv) * 512 - 1; if (!vlv_raw_crtc_wm_is_valid(crtc_state, level)) break; @@ -2648,7 +2648,7 @@ static unsigned int ilk_plane_wm_max(const struct drm_i915_private *dev_priv, /* HSW allows LP1+ watermarks even with multiple pipes */ if (level == 0 || config->num_pipes_active > 1) { - fifo_size /= INTEL_INFO(dev_priv)->num_pipes; + fifo_size /= INTEL_NUM_PIPES(dev_priv); /* * For some reason the non self refresh @@ -9733,7 +9733,7 @@ void intel_init_pm(struct drm_i915_private *dev_priv) dev_priv->display.update_wm = i9xx_update_wm; dev_priv->display.get_fifo_size = i9xx_get_fifo_size; } else if (IS_GEN(dev_priv, 2)) { - if (INTEL_INFO(dev_priv)->num_pipes == 1) { + if (INTEL_NUM_PIPES(dev_priv) == 1) { dev_priv->display.update_wm = i845_update_wm; dev_priv->display.get_fifo_size = i845_get_fifo_size; } else { -- cgit v1.2.3 From 74689ddfb7574e2bf08c7206a4ab0dff04978b05 Mon Sep 17 00:00:00 2001 From: Ville Syrjälä Date: Wed, 11 Sep 2019 16:31:26 +0300 Subject: drm/i915: Fix cdclk bypass freq readout for tgl/bxt/glk MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit On tgl/bxt/glk the cdclk bypass frequency depends on the PLL reference clock. So let's read out the ref clock before we try to compute the bypass clock. Cc: Matt Roper Fixes: 71dc367e2bc3 ("drm/i915: Consolidate bxt/cnl/icl cdclk readout") Signed-off-by: Ville Syrjälä Reviewed-by: Matt Roper Signed-off-by: Chris Wilson Link: https://patchwork.freedesktop.org/patch/msgid/20190911133129.27466-1-ville.syrjala@linux.intel.com --- drivers/gpu/drm/i915/display/intel_cdclk.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/drivers/gpu/drm/i915/display/intel_cdclk.c b/drivers/gpu/drm/i915/display/intel_cdclk.c index 13779b6029f5..6fc8e3c0cfba 100644 --- a/drivers/gpu/drm/i915/display/intel_cdclk.c +++ b/drivers/gpu/drm/i915/display/intel_cdclk.c @@ -1351,6 +1351,8 @@ static void bxt_get_cdclk(struct drm_i915_private *dev_priv, u32 divider; int div; + bxt_de_pll_readout(dev_priv, cdclk_state); + if (INTEL_GEN(dev_priv) >= 12) cdclk_state->bypass = cdclk_state->ref / 2; else if (INTEL_GEN(dev_priv) >= 11) @@ -1358,7 +1360,6 @@ static void bxt_get_cdclk(struct drm_i915_private *dev_priv, else cdclk_state->bypass = cdclk_state->ref; - bxt_de_pll_readout(dev_priv, cdclk_state); if (cdclk_state->vco == 0) { cdclk_state->cdclk = cdclk_state->bypass; goto out; -- cgit v1.2.3 From 0a12e437049786b9c33542d16b6a03f46fab46f8 Mon Sep 17 00:00:00 2001 From: Ville Syrjälä Date: Wed, 11 Sep 2019 16:31:27 +0300 Subject: drm/i915: Fix CD2X pipe select masking during cdclk sanitation MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit We're forgetting to mask off all three pipe select bits from the CDCLK_CTL value on icl+ which may lead to the extra bit being left in. That will cause us to consider the current hardware cdclk state as invalid, and we proceed to sanitize it even though the hardware may have active pipes and whatnot. Fix up the mask so we get rid of all three pipe select bits and thus hopefully no longer sanitize cdclk when it's already correctly programmed. Cc: Matt Roper Bugzilla: https://bugs.freedesktop.org/show_bug.cgi?id=111641 Fixes: 0c1279b58fc7 ("drm/i915: Consolidate {bxt,cnl,icl}_init_cdclk") Signed-off-by: Ville Syrjälä Reviewed-by: Matt Roper Signed-off-by: Chris Wilson Link: https://patchwork.freedesktop.org/patch/msgid/20190911133129.27466-2-ville.syrjala@linux.intel.com --- drivers/gpu/drm/i915/display/intel_cdclk.c | 42 ++++++++++++++++-------------- 1 file changed, 23 insertions(+), 19 deletions(-) diff --git a/drivers/gpu/drm/i915/display/intel_cdclk.c b/drivers/gpu/drm/i915/display/intel_cdclk.c index 6fc8e3c0cfba..d838c34d9cac 100644 --- a/drivers/gpu/drm/i915/display/intel_cdclk.c +++ b/drivers/gpu/drm/i915/display/intel_cdclk.c @@ -1464,6 +1464,26 @@ static void cnl_cdclk_pll_enable(struct drm_i915_private *dev_priv, int vco) dev_priv->cdclk.hw.vco = vco; } +static u32 bxt_cdclk_cd2x_pipe(struct drm_i915_private *dev_priv, enum pipe pipe) +{ + if (INTEL_GEN(dev_priv) >= 12) { + if (pipe == INVALID_PIPE) + return TGL_CDCLK_CD2X_PIPE_NONE; + else + return TGL_CDCLK_CD2X_PIPE(pipe); + } else if (INTEL_GEN(dev_priv) >= 11) { + if (pipe == INVALID_PIPE) + return ICL_CDCLK_CD2X_PIPE_NONE; + else + return ICL_CDCLK_CD2X_PIPE(pipe); + } else { + if (pipe == INVALID_PIPE) + return BXT_CDCLK_CD2X_PIPE_NONE; + else + return BXT_CDCLK_CD2X_PIPE(pipe); + } +} + static void bxt_set_cdclk(struct drm_i915_private *dev_priv, const struct intel_cdclk_state *cdclk_state, enum pipe pipe) @@ -1534,24 +1554,8 @@ static void bxt_set_cdclk(struct drm_i915_private *dev_priv, bxt_de_pll_enable(dev_priv, vco); } - val = divider | skl_cdclk_decimal(cdclk); - - if (INTEL_GEN(dev_priv) >= 12) { - if (pipe == INVALID_PIPE) - val |= TGL_CDCLK_CD2X_PIPE_NONE; - else - val |= TGL_CDCLK_CD2X_PIPE(pipe); - } else if (INTEL_GEN(dev_priv) >= 11) { - if (pipe == INVALID_PIPE) - val |= ICL_CDCLK_CD2X_PIPE_NONE; - else - val |= ICL_CDCLK_CD2X_PIPE(pipe); - } else { - if (pipe == INVALID_PIPE) - val |= BXT_CDCLK_CD2X_PIPE_NONE; - else - val |= BXT_CDCLK_CD2X_PIPE(pipe); - } + val = divider | skl_cdclk_decimal(cdclk) | + bxt_cdclk_cd2x_pipe(dev_priv, pipe); /* * Disable SSA Precharge when CD clock frequency < 500 MHz, @@ -1620,7 +1624,7 @@ static void bxt_sanitize_cdclk(struct drm_i915_private *dev_priv) * dividers both synching to an active pipe, or asynchronously * (PIPE_NONE). */ - cdctl &= ~BXT_CDCLK_CD2X_PIPE_NONE; + cdctl &= ~bxt_cdclk_cd2x_pipe(dev_priv, INVALID_PIPE); /* Make sure this is a legal cdclk value for the platform */ cdclk = bxt_calc_cdclk(dev_priv, dev_priv->cdclk.hw.cdclk); -- cgit v1.2.3 From 502d1c04f53c787dfd6268578393d340cf614cde Mon Sep 17 00:00:00 2001 From: Ville Syrjälä Date: Wed, 11 Sep 2019 16:31:28 +0300 Subject: drm/i915: Reuse cnl_modeset_calc_cdclk() on icl+ MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit The cnl and icl .modeset_calc_cdclk() functions are identical. Drop one copy. Signed-off-by: Ville Syrjälä Reviewed-by: Matt Roper Signed-off-by: Chris Wilson Link: https://patchwork.freedesktop.org/patch/msgid/20190911133129.27466-3-ville.syrjala@linux.intel.com --- drivers/gpu/drm/i915/display/intel_cdclk.c | 37 ++---------------------------- 1 file changed, 2 insertions(+), 35 deletions(-) diff --git a/drivers/gpu/drm/i915/display/intel_cdclk.c b/drivers/gpu/drm/i915/display/intel_cdclk.c index d838c34d9cac..3bc70472fb5f 100644 --- a/drivers/gpu/drm/i915/display/intel_cdclk.c +++ b/drivers/gpu/drm/i915/display/intel_cdclk.c @@ -2227,39 +2227,6 @@ static int cnl_modeset_calc_cdclk(struct intel_atomic_state *state) cdclk = bxt_calc_cdclk(dev_priv, min_cdclk); vco = bxt_calc_cdclk_pll_vco(dev_priv, cdclk); - state->cdclk.logical.vco = vco; - state->cdclk.logical.cdclk = cdclk; - state->cdclk.logical.voltage_level = - max(cnl_calc_voltage_level(cdclk), - cnl_compute_min_voltage_level(state)); - - if (!state->active_pipes) { - cdclk = bxt_calc_cdclk(dev_priv, state->cdclk.force_min_cdclk); - vco = bxt_calc_cdclk_pll_vco(dev_priv, cdclk); - - state->cdclk.actual.vco = vco; - state->cdclk.actual.cdclk = cdclk; - state->cdclk.actual.voltage_level = - cnl_calc_voltage_level(cdclk); - } else { - state->cdclk.actual = state->cdclk.logical; - } - - return 0; -} - -static int icl_modeset_calc_cdclk(struct intel_atomic_state *state) -{ - struct drm_i915_private *dev_priv = to_i915(state->base.dev); - int min_cdclk, cdclk, vco; - - min_cdclk = intel_compute_min_cdclk(state); - if (min_cdclk < 0) - return min_cdclk; - - cdclk = bxt_calc_cdclk(dev_priv, min_cdclk); - vco = bxt_calc_cdclk_pll_vco(dev_priv, cdclk); - state->cdclk.logical.vco = vco; state->cdclk.logical.cdclk = cdclk; state->cdclk.logical.voltage_level = @@ -2499,12 +2466,12 @@ void intel_init_cdclk_hooks(struct drm_i915_private *dev_priv) { if (IS_ELKHARTLAKE(dev_priv)) { dev_priv->display.set_cdclk = bxt_set_cdclk; - dev_priv->display.modeset_calc_cdclk = icl_modeset_calc_cdclk; + dev_priv->display.modeset_calc_cdclk = cnl_modeset_calc_cdclk; dev_priv->display.calc_voltage_level = ehl_calc_voltage_level; dev_priv->cdclk.table = icl_cdclk_table; } else if (INTEL_GEN(dev_priv) >= 11) { dev_priv->display.set_cdclk = bxt_set_cdclk; - dev_priv->display.modeset_calc_cdclk = icl_modeset_calc_cdclk; + dev_priv->display.modeset_calc_cdclk = cnl_modeset_calc_cdclk; dev_priv->display.calc_voltage_level = icl_calc_voltage_level; dev_priv->cdclk.table = icl_cdclk_table; } else if (IS_CANNONLAKE(dev_priv)) { -- cgit v1.2.3 From 933122cc7cd26e67dfc7975f821033e89f766963 Mon Sep 17 00:00:00 2001 From: Ville Syrjälä Date: Wed, 11 Sep 2019 16:31:29 +0300 Subject: drm/i915: Remove duplicated bxt/cnl/icl .modeset_calc_cdclk() funcs MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Reuse the same .modeset_calc_cdclk() function for all bxt+. The only difference in between the cnl/icl and the bxt variants is the call to cnl_compute_min_voltage_level(). We can do that call just fine on older platforms since they leave min_voltage_level[] zeroed. Let's rename the function to bxt_compute_min_voltage_level() just so it stays consistent with the rest of the naming scheme. Signed-off-by: Ville Syrjälä Reviewed-by: Matt Roper Signed-off-by: Chris Wilson Link: https://patchwork.freedesktop.org/patch/msgid/20190911133129.27466-4-ville.syrjala@linux.intel.com --- drivers/gpu/drm/i915/display/intel_cdclk.c | 46 ++++++------------------------ 1 file changed, 9 insertions(+), 37 deletions(-) diff --git a/drivers/gpu/drm/i915/display/intel_cdclk.c b/drivers/gpu/drm/i915/display/intel_cdclk.c index 3bc70472fb5f..ea3f75c72fe8 100644 --- a/drivers/gpu/drm/i915/display/intel_cdclk.c +++ b/drivers/gpu/drm/i915/display/intel_cdclk.c @@ -2017,6 +2017,10 @@ static int intel_compute_min_cdclk(struct intel_atomic_state *state) } /* + * Account for port clock min voltage level requirements. + * This only really does something on CNL+ but can be + * called on earlier platforms as well. + * * Note that this functions assumes that 0 is * the lowest voltage value, and higher values * correspond to increasingly higher voltages. @@ -2025,7 +2029,7 @@ static int intel_compute_min_cdclk(struct intel_atomic_state *state) * future platforms this code will need to be * adjusted. */ -static u8 cnl_compute_min_voltage_level(struct intel_atomic_state *state) +static u8 bxt_compute_min_voltage_level(struct intel_atomic_state *state) { struct drm_i915_private *dev_priv = to_i915(state->base.dev); struct intel_crtc *crtc; @@ -2195,43 +2199,11 @@ static int bxt_modeset_calc_cdclk(struct intel_atomic_state *state) cdclk = bxt_calc_cdclk(dev_priv, min_cdclk); vco = bxt_calc_cdclk_pll_vco(dev_priv, cdclk); - state->cdclk.logical.vco = vco; - state->cdclk.logical.cdclk = cdclk; - state->cdclk.logical.voltage_level = - dev_priv->display.calc_voltage_level(cdclk); - - if (!state->active_pipes) { - cdclk = bxt_calc_cdclk(dev_priv, state->cdclk.force_min_cdclk); - vco = bxt_calc_cdclk_pll_vco(dev_priv, cdclk); - - state->cdclk.actual.vco = vco; - state->cdclk.actual.cdclk = cdclk; - state->cdclk.actual.voltage_level = - dev_priv->display.calc_voltage_level(cdclk); - } else { - state->cdclk.actual = state->cdclk.logical; - } - - return 0; -} - -static int cnl_modeset_calc_cdclk(struct intel_atomic_state *state) -{ - struct drm_i915_private *dev_priv = to_i915(state->base.dev); - int min_cdclk, cdclk, vco; - - min_cdclk = intel_compute_min_cdclk(state); - if (min_cdclk < 0) - return min_cdclk; - - cdclk = bxt_calc_cdclk(dev_priv, min_cdclk); - vco = bxt_calc_cdclk_pll_vco(dev_priv, cdclk); - state->cdclk.logical.vco = vco; state->cdclk.logical.cdclk = cdclk; state->cdclk.logical.voltage_level = max(dev_priv->display.calc_voltage_level(cdclk), - cnl_compute_min_voltage_level(state)); + bxt_compute_min_voltage_level(state)); if (!state->active_pipes) { cdclk = bxt_calc_cdclk(dev_priv, state->cdclk.force_min_cdclk); @@ -2466,17 +2438,17 @@ void intel_init_cdclk_hooks(struct drm_i915_private *dev_priv) { if (IS_ELKHARTLAKE(dev_priv)) { dev_priv->display.set_cdclk = bxt_set_cdclk; - dev_priv->display.modeset_calc_cdclk = cnl_modeset_calc_cdclk; + dev_priv->display.modeset_calc_cdclk = bxt_modeset_calc_cdclk; dev_priv->display.calc_voltage_level = ehl_calc_voltage_level; dev_priv->cdclk.table = icl_cdclk_table; } else if (INTEL_GEN(dev_priv) >= 11) { dev_priv->display.set_cdclk = bxt_set_cdclk; - dev_priv->display.modeset_calc_cdclk = cnl_modeset_calc_cdclk; + dev_priv->display.modeset_calc_cdclk = bxt_modeset_calc_cdclk; dev_priv->display.calc_voltage_level = icl_calc_voltage_level; dev_priv->cdclk.table = icl_cdclk_table; } else if (IS_CANNONLAKE(dev_priv)) { dev_priv->display.set_cdclk = bxt_set_cdclk; - dev_priv->display.modeset_calc_cdclk = cnl_modeset_calc_cdclk; + dev_priv->display.modeset_calc_cdclk = bxt_modeset_calc_cdclk; dev_priv->display.calc_voltage_level = cnl_calc_voltage_level; dev_priv->cdclk.table = cnl_cdclk_table; } else if (IS_GEN9_LP(dev_priv)) { -- cgit v1.2.3 From e3cb653d5cc42c2073ad174ae1130dc9e39266e9 Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Wed, 11 Sep 2019 18:59:26 +0100 Subject: drm/i915: Disable FBC if BIOS reserved memory (stolen) is unavailable MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit The FBC requires a couple of contiguous buffers, which we allocate from stolen memory. If stolen memory is unavailable, we cannot allocate those buffers and so cannot support FBC. Mark it so. Signed-off-by: Chris Wilson Reviewed-by: Ville Syrjälä Link: https://patchwork.freedesktop.org/patch/msgid/20190911175926.31365-1-chris@chris-wilson.co.uk --- drivers/gpu/drm/i915/display/intel_fbc.c | 3 +++ 1 file changed, 3 insertions(+) diff --git a/drivers/gpu/drm/i915/display/intel_fbc.c b/drivers/gpu/drm/i915/display/intel_fbc.c index dc34b23e2320..3111ecaeabd0 100644 --- a/drivers/gpu/drm/i915/display/intel_fbc.c +++ b/drivers/gpu/drm/i915/display/intel_fbc.c @@ -1320,6 +1320,9 @@ void intel_fbc_init(struct drm_i915_private *dev_priv) fbc->enabled = false; fbc->active = false; + if (!drm_mm_initialized(&dev_priv->mm.stolen)) + mkwrite_device_info(dev_priv)->display.has_fbc = false; + if (need_fbc_vtd_wa(dev_priv)) mkwrite_device_info(dev_priv)->display.has_fbc = false; -- cgit v1.2.3 From 582a6f90aa0d9103ab834b3a48a5bb7e4d07cac6 Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Thu, 12 Sep 2019 10:29:32 +0100 Subject: drm/i915/execlists: Add a paranoid flush of the CSB pointers upon reset After a GPU reset, we need to drain all the CS events so that we have an accurate picture of the execlists state at the time of the reset. Be paranoid and force a read of the CSB write pointer from memory. Signed-off-by: Chris Wilson Cc: Mika Kuoppala Reviewed-by: Mika Kuoppala Link: https://patchwork.freedesktop.org/patch/msgid/20190912092933.4729-1-chris@chris-wilson.co.uk --- drivers/gpu/drm/i915/gt/intel_lrc.c | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/drivers/gpu/drm/i915/gt/intel_lrc.c b/drivers/gpu/drm/i915/gt/intel_lrc.c index dcdf7cf66e7e..dbc90da2341a 100644 --- a/drivers/gpu/drm/i915/gt/intel_lrc.c +++ b/drivers/gpu/drm/i915/gt/intel_lrc.c @@ -2359,6 +2359,10 @@ static void __execlists_reset(struct intel_engine_cs *engine, bool stalled) struct i915_request *rq; u32 *regs; + mb(); /* paranoia: read the CSB pointers from after the reset */ + clflush(execlists->csb_write); + mb(); + process_csb(engine); /* drain preemption events */ /* Following the reset, we need to reload the CSB read/write pointers */ -- cgit v1.2.3 From a17592effdc16f3a51ef9c1dda30fe5c6d668263 Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Thu, 12 Sep 2019 10:29:33 +0100 Subject: drm/i915/execlists: Ensure the context is reloaded after a GPU reset After we manipulate the context to allow replay after a GPU reset, force that context to be reloaded. This should be a layer of paranoia, for if the GPU was reset, the context will no longer be resident! Signed-off-by: Chris Wilson Cc: Mika Kuoppala Acked-by: Mika Kuoppala Link: https://patchwork.freedesktop.org/patch/msgid/20190912092933.4729-2-chris@chris-wilson.co.uk --- drivers/gpu/drm/i915/gt/intel_lrc.c | 1 + 1 file changed, 1 insertion(+) diff --git a/drivers/gpu/drm/i915/gt/intel_lrc.c b/drivers/gpu/drm/i915/gt/intel_lrc.c index dbc90da2341a..47d766ccea71 100644 --- a/drivers/gpu/drm/i915/gt/intel_lrc.c +++ b/drivers/gpu/drm/i915/gt/intel_lrc.c @@ -2445,6 +2445,7 @@ out_replay: intel_ring_update_space(ce->ring); __execlists_reset_reg_state(ce, engine); __execlists_update_reg_state(ce, engine); + ce->lrc_desc |= CTX_DESC_FORCE_RESTORE; /* paranoid: GPU was reset! */ mutex_release(&ce->pin_mutex.dep_map, 0, _THIS_IP_); unwind: -- cgit v1.2.3 From 54fc577d90d01a28dbfccd277aa3de4d4abaf1ff Mon Sep 17 00:00:00 2001 From: Tvrtko Ursulin Date: Wed, 11 Sep 2019 17:07:30 +0100 Subject: drm/i915/pmu: Skip busyness sampling when and where not needed Since d0aa694b9239 ("drm/i915/pmu: Always sample an active ringbuffer") the cost of sampling the engine state on execlists platforms became a little bit higher when both engine busyness and one of the wait states are being monitored. (Previously the busyness sampling on legacy platforms was done via seqno comparison so there was no cost of mmio read.) We can avoid that by skipping busyness sampling when engine supports software busy stats and so avoid the cost of potential mmio read and sample accumulation. Signed-off-by: Tvrtko Ursulin Cc: Chris Wilson Reviewed-by: Chris Wilson Link: https://patchwork.freedesktop.org/patch/msgid/20190911160730.22687-1-tvrtko.ursulin@linux.intel.com --- drivers/gpu/drm/i915/i915_pmu.c | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/drivers/gpu/drm/i915/i915_pmu.c b/drivers/gpu/drm/i915/i915_pmu.c index 8e251e719390..623ad32303a1 100644 --- a/drivers/gpu/drm/i915/i915_pmu.c +++ b/drivers/gpu/drm/i915/i915_pmu.c @@ -194,6 +194,10 @@ engines_sample(struct intel_gt *gt, unsigned int period_ns) if (val & RING_WAIT_SEMAPHORE) add_sample(&pmu->sample[I915_SAMPLE_SEMA], period_ns); + /* No need to sample when busy stats are supported. */ + if (intel_engine_supports_stats(engine)) + goto skip; + /* * While waiting on a semaphore or event, MI_MODE reports the * ring as idle. However, previously using the seqno, and with -- cgit v1.2.3 From 8d8b00318593e2852aef19b92f99851d50a203fc Mon Sep 17 00:00:00 2001 From: Jani Nikula Date: Wed, 11 Sep 2019 23:29:08 +0300 Subject: drm/i915: convert device info num_pipes to pipe_mask MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Replace device info number of pipes with a bit mask of available pipes. This will prove handy in the future. There's still a bunch of future work to do to actually allow a non-consecutive mask of pipes, but it's a start. No functional changes. Cc: Chris Wilson Cc: José Roberto de Souza Cc: Ville Syrjälä Reviewed-by: José Roberto de Souza Acked-by: Ville Syrjälä Signed-off-by: Jani Nikula Link: https://patchwork.freedesktop.org/patch/msgid/20190911202908.19631-1-jani.nikula@intel.com --- drivers/gpu/drm/i915/i915_drv.h | 4 ++-- drivers/gpu/drm/i915/i915_pci.c | 24 ++++++++++++------------ drivers/gpu/drm/i915/intel_device_info.c | 10 +++++----- drivers/gpu/drm/i915/intel_device_info.h | 2 +- 4 files changed, 20 insertions(+), 20 deletions(-) diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index 84fb0245cf62..bf600888b3f1 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h @@ -2188,9 +2188,9 @@ IS_SUBPLATFORM(const struct drm_i915_private *i915, #define GT_FREQUENCY_MULTIPLIER 50 #define GEN9_FREQ_SCALER 3 -#define INTEL_NUM_PIPES(dev_priv) (INTEL_INFO(dev_priv)->num_pipes) +#define INTEL_NUM_PIPES(dev_priv) (hweight8(INTEL_INFO(dev_priv)->pipe_mask)) -#define HAS_DISPLAY(dev_priv) (INTEL_NUM_PIPES(dev_priv) > 0) +#define HAS_DISPLAY(dev_priv) (INTEL_INFO(dev_priv)->pipe_mask != 0) static inline bool intel_vtd_active(void) { diff --git a/drivers/gpu/drm/i915/i915_pci.c b/drivers/gpu/drm/i915/i915_pci.c index b3cc8560696b..698116276441 100644 --- a/drivers/gpu/drm/i915/i915_pci.c +++ b/drivers/gpu/drm/i915/i915_pci.c @@ -147,7 +147,7 @@ #define I830_FEATURES \ GEN(2), \ .is_mobile = 1, \ - .num_pipes = 2, \ + .pipe_mask = BIT(PIPE_A) | BIT(PIPE_B), \ .display.has_overlay = 1, \ .display.cursor_needs_physical = 1, \ .display.overlay_needs_physical = 1, \ @@ -165,7 +165,7 @@ #define I845_FEATURES \ GEN(2), \ - .num_pipes = 1, \ + .pipe_mask = BIT(PIPE_A), \ .display.has_overlay = 1, \ .display.overlay_needs_physical = 1, \ .display.has_gmch = 1, \ @@ -203,7 +203,7 @@ static const struct intel_device_info intel_i865g_info = { #define GEN3_FEATURES \ GEN(3), \ - .num_pipes = 2, \ + .pipe_mask = BIT(PIPE_A) | BIT(PIPE_B), \ .display.has_gmch = 1, \ .gpu_reset_clobbers_display = true, \ .engine_mask = BIT(RCS0), \ @@ -287,7 +287,7 @@ static const struct intel_device_info intel_pineview_m_info = { #define GEN4_FEATURES \ GEN(4), \ - .num_pipes = 2, \ + .pipe_mask = BIT(PIPE_A) | BIT(PIPE_B), \ .display.has_hotplug = 1, \ .display.has_gmch = 1, \ .gpu_reset_clobbers_display = true, \ @@ -337,7 +337,7 @@ static const struct intel_device_info intel_gm45_info = { #define GEN5_FEATURES \ GEN(5), \ - .num_pipes = 2, \ + .pipe_mask = BIT(PIPE_A) | BIT(PIPE_B), \ .display.has_hotplug = 1, \ .engine_mask = BIT(RCS0) | BIT(VCS0), \ .has_snoop = true, \ @@ -363,7 +363,7 @@ static const struct intel_device_info intel_ironlake_m_info = { #define GEN6_FEATURES \ GEN(6), \ - .num_pipes = 2, \ + .pipe_mask = BIT(PIPE_A) | BIT(PIPE_B), \ .display.has_hotplug = 1, \ .display.has_fbc = 1, \ .engine_mask = BIT(RCS0) | BIT(VCS0) | BIT(BCS0), \ @@ -411,7 +411,7 @@ static const struct intel_device_info intel_sandybridge_m_gt2_info = { #define GEN7_FEATURES \ GEN(7), \ - .num_pipes = 3, \ + .pipe_mask = BIT(PIPE_A) | BIT(PIPE_B) | BIT(PIPE_C), \ .display.has_hotplug = 1, \ .display.has_fbc = 1, \ .engine_mask = BIT(RCS0) | BIT(VCS0) | BIT(BCS0), \ @@ -462,7 +462,7 @@ static const struct intel_device_info intel_ivybridge_q_info = { GEN7_FEATURES, PLATFORM(INTEL_IVYBRIDGE), .gt = 2, - .num_pipes = 0, /* legal, last one wins */ + .pipe_mask = 0, /* legal, last one wins */ .has_l3_dpf = 1, }; @@ -470,7 +470,7 @@ static const struct intel_device_info intel_valleyview_info = { PLATFORM(INTEL_VALLEYVIEW), GEN(7), .is_lp = 1, - .num_pipes = 2, + .pipe_mask = BIT(PIPE_A) | BIT(PIPE_B), .has_runtime_pm = 1, .has_rc6 = 1, .has_rps = true, @@ -560,7 +560,7 @@ static const struct intel_device_info intel_broadwell_gt3_info = { static const struct intel_device_info intel_cherryview_info = { PLATFORM(INTEL_CHERRYVIEW), GEN(8), - .num_pipes = 3, + .pipe_mask = BIT(PIPE_A) | BIT(PIPE_B) | BIT(PIPE_C), .display.has_hotplug = 1, .is_lp = 1, .engine_mask = BIT(RCS0) | BIT(VCS0) | BIT(BCS0) | BIT(VECS0), @@ -631,7 +631,7 @@ static const struct intel_device_info intel_skylake_gt4_info = { .is_lp = 1, \ .display.has_hotplug = 1, \ .engine_mask = BIT(RCS0) | BIT(VCS0) | BIT(BCS0) | BIT(VECS0), \ - .num_pipes = 3, \ + .pipe_mask = BIT(PIPE_A) | BIT(PIPE_B) | BIT(PIPE_C), \ .has_64bit_reloc = 1, \ .display.has_ddi = 1, \ .has_fpga_dbg = 1, \ @@ -792,7 +792,7 @@ static const struct intel_device_info intel_elkhartlake_info = { static const struct intel_device_info intel_tigerlake_12_info = { GEN12_FEATURES, PLATFORM(INTEL_TIGERLAKE), - .num_pipes = 4, + .pipe_mask = BIT(PIPE_A) | BIT(PIPE_B) | BIT(PIPE_C) | BIT(PIPE_D), .require_force_probe = 1, .display.has_modular_fia = 1, .engine_mask = diff --git a/drivers/gpu/drm/i915/intel_device_info.c b/drivers/gpu/drm/i915/intel_device_info.c index d9b5baaef5d0..50b05a5de53b 100644 --- a/drivers/gpu/drm/i915/intel_device_info.c +++ b/drivers/gpu/drm/i915/intel_device_info.c @@ -896,7 +896,7 @@ void intel_device_info_runtime_init(struct drm_i915_private *dev_priv) if (i915_modparams.disable_display) { DRM_INFO("Display disabled (module parameter)\n"); - info->num_pipes = 0; + info->pipe_mask = 0; } else if (HAS_DISPLAY(dev_priv) && (IS_GEN_RANGE(dev_priv, 7, 8)) && HAS_PCH_SPLIT(dev_priv)) { @@ -917,14 +917,14 @@ void intel_device_info_runtime_init(struct drm_i915_private *dev_priv) (HAS_PCH_CPT(dev_priv) && !(sfuse_strap & SFUSE_STRAP_FUSE_LOCK))) { DRM_INFO("Display fused off, disabling\n"); - info->num_pipes = 0; + info->pipe_mask = 0; } else if (fuse_strap & IVB_PIPE_C_DISABLE) { DRM_INFO("PipeC fused off\n"); - info->num_pipes -= 1; + info->pipe_mask &= ~BIT(PIPE_C); } } else if (HAS_DISPLAY(dev_priv) && INTEL_GEN(dev_priv) >= 9) { u32 dfsm = I915_READ(SKL_DFSM); - u8 enabled_mask = BIT(info->num_pipes) - 1; + u8 enabled_mask = info->pipe_mask; if (dfsm & SKL_DFSM_PIPE_A_DISABLE) enabled_mask &= ~BIT(PIPE_A); @@ -945,7 +945,7 @@ void intel_device_info_runtime_init(struct drm_i915_private *dev_priv) DRM_ERROR("invalid pipe fuse configuration: enabled_mask=0x%x\n", enabled_mask); else - info->num_pipes = hweight8(enabled_mask); + info->pipe_mask = enabled_mask; } /* Initialize slice/subslice/EU info */ diff --git a/drivers/gpu/drm/i915/intel_device_info.h b/drivers/gpu/drm/i915/intel_device_info.h index 92e0c2e0954c..d4c288860aed 100644 --- a/drivers/gpu/drm/i915/intel_device_info.h +++ b/drivers/gpu/drm/i915/intel_device_info.h @@ -161,7 +161,7 @@ struct intel_device_info { u32 display_mmio_offset; - u8 num_pipes; + u8 pipe_mask; #define DEFINE_FLAG(name) u8 name:1 DEV_INFO_FOR_EACH_FLAG(DEFINE_FLAG); -- cgit v1.2.3 From 16ffe73c186b7897a1a6888ffb4949a8b4026624 Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Thu, 12 Sep 2019 13:48:13 +0100 Subject: drm/i915/pmu: Use GT parked for estimating RC6 while asleep As we track when we put the GT device to sleep upon idling, we can use that callback to sample the current rc6 counters and record the timestamp for estimating samples after that point while asleep. v2: Stick to using ktime_t v3: Track user_wakerefs that interfere with the new intel_gt_pm_wait_for_idle v4: No need for parked/unparked estimation if !CONFIG_PM v5: Keep timer park/unpark logic as was v6: Refactor duplicated estimate/update rc6 logic v7: Pull intel_get_pm_get_if_awake() out from the pmu->lock. Bugzilla: https://bugs.freedesktop.org/show_bug.cgi?id=105010 Signed-off-by: Chris Wilson Cc: Tvrtko Ursulin Reviewed-by: Tvrtko Ursulin Link: https://patchwork.freedesktop.org/patch/msgid/20190912124813.19225-1-chris@chris-wilson.co.uk --- drivers/gpu/drm/i915/gem/i915_gem_pm.c | 22 +++ drivers/gpu/drm/i915/gt/intel_gt_types.h | 1 + drivers/gpu/drm/i915/i915_debugfs.c | 22 ++- drivers/gpu/drm/i915/i915_pmu.c | 242 +++++++++++++++++-------------- drivers/gpu/drm/i915/i915_pmu.h | 4 +- 5 files changed, 170 insertions(+), 121 deletions(-) diff --git a/drivers/gpu/drm/i915/gem/i915_gem_pm.c b/drivers/gpu/drm/i915/gem/i915_gem_pm.c index 3bd764104d41..a11ad4d914ca 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_pm.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_pm.c @@ -141,6 +141,24 @@ bool i915_gem_load_power_context(struct drm_i915_private *i915) return switch_to_kernel_context_sync(&i915->gt); } +static void user_forcewake(struct intel_gt *gt, bool suspend) +{ + int count = atomic_read(>->user_wakeref); + + /* Inside suspend/resume so single threaded, no races to worry about. */ + if (likely(!count)) + return; + + intel_gt_pm_get(gt); + if (suspend) { + GEM_BUG_ON(count > atomic_read(>->wakeref.count)); + atomic_sub(count, >->wakeref.count); + } else { + atomic_add(count, >->wakeref.count); + } + intel_gt_pm_put(gt); +} + void i915_gem_suspend(struct drm_i915_private *i915) { GEM_TRACE("\n"); @@ -148,6 +166,8 @@ void i915_gem_suspend(struct drm_i915_private *i915) intel_wakeref_auto(&i915->ggtt.userfault_wakeref, 0); flush_workqueue(i915->wq); + user_forcewake(&i915->gt, true); + mutex_lock(&i915->drm.struct_mutex); /* @@ -259,6 +279,8 @@ void i915_gem_resume(struct drm_i915_private *i915) if (!i915_gem_load_power_context(i915)) goto err_wedged; + user_forcewake(&i915->gt, false); + out_unlock: intel_uncore_forcewake_put(&i915->uncore, FORCEWAKE_ALL); mutex_unlock(&i915->drm.struct_mutex); diff --git a/drivers/gpu/drm/i915/gt/intel_gt_types.h b/drivers/gpu/drm/i915/gt/intel_gt_types.h index dc295c196d11..3039cef64b11 100644 --- a/drivers/gpu/drm/i915/gt/intel_gt_types.h +++ b/drivers/gpu/drm/i915/gt/intel_gt_types.h @@ -50,6 +50,7 @@ struct intel_gt { } timelines; struct intel_wakeref wakeref; + atomic_t user_wakeref; struct list_head closed_vma; spinlock_t closed_lock; /* guards the list of closed_vma */ diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c index e5835337f022..f3ae525b77c0 100644 --- a/drivers/gpu/drm/i915/i915_debugfs.c +++ b/drivers/gpu/drm/i915/i915_debugfs.c @@ -3995,13 +3995,12 @@ static int i915_sseu_status(struct seq_file *m, void *unused) static int i915_forcewake_open(struct inode *inode, struct file *file) { struct drm_i915_private *i915 = inode->i_private; + struct intel_gt *gt = &i915->gt; - if (INTEL_GEN(i915) < 6) - return 0; - - file->private_data = - (void *)(uintptr_t)intel_runtime_pm_get(&i915->runtime_pm); - intel_uncore_forcewake_user_get(&i915->uncore); + atomic_inc(>->user_wakeref); + intel_gt_pm_get(gt); + if (INTEL_GEN(i915) >= 6) + intel_uncore_forcewake_user_get(gt->uncore); return 0; } @@ -4009,13 +4008,12 @@ static int i915_forcewake_open(struct inode *inode, struct file *file) static int i915_forcewake_release(struct inode *inode, struct file *file) { struct drm_i915_private *i915 = inode->i_private; + struct intel_gt *gt = &i915->gt; - if (INTEL_GEN(i915) < 6) - return 0; - - intel_uncore_forcewake_user_put(&i915->uncore); - intel_runtime_pm_put(&i915->runtime_pm, - (intel_wakeref_t)(uintptr_t)file->private_data); + if (INTEL_GEN(i915) >= 6) + intel_uncore_forcewake_user_put(&i915->uncore); + intel_gt_pm_put(gt); + atomic_dec(>->user_wakeref); return 0; } diff --git a/drivers/gpu/drm/i915/i915_pmu.c b/drivers/gpu/drm/i915/i915_pmu.c index 623ad32303a1..3310353890fb 100644 --- a/drivers/gpu/drm/i915/i915_pmu.c +++ b/drivers/gpu/drm/i915/i915_pmu.c @@ -116,22 +116,124 @@ static bool pmu_needs_timer(struct i915_pmu *pmu, bool gpu_active) return enable; } -void i915_pmu_gt_parked(struct drm_i915_private *i915) +static u64 __get_rc6(const struct intel_gt *gt) { - struct i915_pmu *pmu = &i915->pmu; + struct drm_i915_private *i915 = gt->i915; + u64 val; - if (!pmu->base.event_init) - return; + val = intel_rc6_residency_ns(i915, + IS_VALLEYVIEW(i915) ? + VLV_GT_RENDER_RC6 : + GEN6_GT_GFX_RC6); + + if (HAS_RC6p(i915)) + val += intel_rc6_residency_ns(i915, GEN6_GT_GFX_RC6p); + + if (HAS_RC6pp(i915)) + val += intel_rc6_residency_ns(i915, GEN6_GT_GFX_RC6pp); + + return val; +} + +#if IS_ENABLED(CONFIG_PM) + +static inline s64 ktime_since(const ktime_t kt) +{ + return ktime_to_ns(ktime_sub(ktime_get(), kt)); +} + +static u64 __pmu_estimate_rc6(struct i915_pmu *pmu) +{ + u64 val; - spin_lock_irq(&pmu->lock); /* - * Signal sampling timer to stop if only engine events are enabled and - * GPU went idle. + * We think we are runtime suspended. + * + * Report the delta from when the device was suspended to now, + * on top of the last known real value, as the approximated RC6 + * counter value. */ - pmu->timer_enabled = pmu_needs_timer(pmu, false); - spin_unlock_irq(&pmu->lock); + val = ktime_since(pmu->sleep_last); + val += pmu->sample[__I915_SAMPLE_RC6].cur; + + pmu->sample[__I915_SAMPLE_RC6_ESTIMATED].cur = val; + + return val; +} + +static u64 __pmu_update_rc6(struct i915_pmu *pmu, u64 val) +{ + /* + * If we are coming back from being runtime suspended we must + * be careful not to report a larger value than returned + * previously. + */ + if (val >= pmu->sample[__I915_SAMPLE_RC6_ESTIMATED].cur) { + pmu->sample[__I915_SAMPLE_RC6_ESTIMATED].cur = 0; + pmu->sample[__I915_SAMPLE_RC6].cur = val; + } else { + val = pmu->sample[__I915_SAMPLE_RC6_ESTIMATED].cur; + } + + return val; +} + +static u64 get_rc6(struct intel_gt *gt) +{ + struct drm_i915_private *i915 = gt->i915; + struct i915_pmu *pmu = &i915->pmu; + unsigned long flags; + u64 val; + + val = 0; + if (intel_gt_pm_get_if_awake(gt)) { + val = __get_rc6(gt); + intel_gt_pm_put(gt); + } + + spin_lock_irqsave(&pmu->lock, flags); + + if (val) + val = __pmu_update_rc6(pmu, val); + else + val = __pmu_estimate_rc6(pmu); + + spin_unlock_irqrestore(&pmu->lock, flags); + + return val; +} + +static void park_rc6(struct drm_i915_private *i915) +{ + struct i915_pmu *pmu = &i915->pmu; + + if (pmu->enable & config_enabled_mask(I915_PMU_RC6_RESIDENCY)) + __pmu_update_rc6(pmu, __get_rc6(&i915->gt)); + + pmu->sleep_last = ktime_get(); +} + +static void unpark_rc6(struct drm_i915_private *i915) +{ + struct i915_pmu *pmu = &i915->pmu; + + /* Estimate how long we slept and accumulate that into rc6 counters */ + if (pmu->enable & config_enabled_mask(I915_PMU_RC6_RESIDENCY)) + __pmu_estimate_rc6(pmu); +} + +#else + +static u64 get_rc6(struct intel_gt *gt) +{ + return __get_rc6(gt); } +static void park_rc6(struct drm_i915_private *i915) {} +static void unpark_rc6(struct drm_i915_private *i915) {} + +#endif + static void __i915_pmu_maybe_start_timer(struct i915_pmu *pmu) { if (!pmu->timer_enabled && pmu_needs_timer(pmu, true)) { @@ -143,6 +245,26 @@ static void __i915_pmu_maybe_start_timer(struct i915_pmu *pmu) } } +void i915_pmu_gt_parked(struct drm_i915_private *i915) +{ + struct i915_pmu *pmu = &i915->pmu; + + if (!pmu->base.event_init) + return; + + spin_lock_irq(&pmu->lock); + + park_rc6(i915); + + /* + * Signal sampling timer to stop if only engine events are enabled and + * GPU went idle. + */ + pmu->timer_enabled = pmu_needs_timer(pmu, false); + + spin_unlock_irq(&pmu->lock); +} + void i915_pmu_gt_unparked(struct drm_i915_private *i915) { struct i915_pmu *pmu = &i915->pmu; @@ -151,10 +273,14 @@ void i915_pmu_gt_unparked(struct drm_i915_private *i915) return; spin_lock_irq(&pmu->lock); + /* * Re-enable sampling timer when GPU goes active. */ __i915_pmu_maybe_start_timer(pmu); + + unpark_rc6(i915); + spin_unlock_irq(&pmu->lock); } @@ -430,104 +556,6 @@ static int i915_pmu_event_init(struct perf_event *event) return 0; } -static u64 __get_rc6(struct intel_gt *gt) -{ - struct drm_i915_private *i915 = gt->i915; - u64 val; - - val = intel_rc6_residency_ns(i915, - IS_VALLEYVIEW(i915) ? - VLV_GT_RENDER_RC6 : - GEN6_GT_GFX_RC6); - - if (HAS_RC6p(i915)) - val += intel_rc6_residency_ns(i915, GEN6_GT_GFX_RC6p); - - if (HAS_RC6pp(i915)) - val += intel_rc6_residency_ns(i915, GEN6_GT_GFX_RC6pp); - - return val; -} - -static u64 get_rc6(struct intel_gt *gt) -{ -#if IS_ENABLED(CONFIG_PM) - struct drm_i915_private *i915 = gt->i915; - struct intel_runtime_pm *rpm = &i915->runtime_pm; - struct i915_pmu *pmu = &i915->pmu; - intel_wakeref_t wakeref; - unsigned long flags; - u64 val; - - wakeref = intel_runtime_pm_get_if_in_use(rpm); - if (wakeref) { - val = __get_rc6(gt); - intel_runtime_pm_put(rpm, wakeref); - - /* - * If we are coming back from being runtime suspended we must - * be careful not to report a larger value than returned - * previously. - */ - - spin_lock_irqsave(&pmu->lock, flags); - - if (val >= pmu->sample[__I915_SAMPLE_RC6_ESTIMATED].cur) { - pmu->sample[__I915_SAMPLE_RC6_ESTIMATED].cur = 0; - pmu->sample[__I915_SAMPLE_RC6].cur = val; - } else { - val = pmu->sample[__I915_SAMPLE_RC6_ESTIMATED].cur; - } - - spin_unlock_irqrestore(&pmu->lock, flags); - } else { - struct device *kdev = rpm->kdev; - - /* - * We are runtime suspended. - * - * Report the delta from when the device was suspended to now, - * on top of the last known real value, as the approximated RC6 - * counter value. - */ - spin_lock_irqsave(&pmu->lock, flags); - - /* - * After the above branch intel_runtime_pm_get_if_in_use failed - * to get the runtime PM reference we cannot assume we are in - * runtime suspend since we can either: a) race with coming out - * of it before we took the power.lock, or b) there are other - * states than suspended which can bring us here. - * - * We need to double-check that we are indeed currently runtime - * suspended and if not we cannot do better than report the last - * known RC6 value. - */ - if (pm_runtime_status_suspended(kdev)) { - val = pm_runtime_suspended_time(kdev); - - if (!pmu->sample[__I915_SAMPLE_RC6_ESTIMATED].cur) - pmu->suspended_time_last = val; - - val -= pmu->suspended_time_last; - val += pmu->sample[__I915_SAMPLE_RC6].cur; - - pmu->sample[__I915_SAMPLE_RC6_ESTIMATED].cur = val; - } else if (pmu->sample[__I915_SAMPLE_RC6_ESTIMATED].cur) { - val = pmu->sample[__I915_SAMPLE_RC6_ESTIMATED].cur; - } else { - val = pmu->sample[__I915_SAMPLE_RC6].cur; - } - - spin_unlock_irqrestore(&pmu->lock, flags); - } - - return val; -#else - return __get_rc6(gt); -#endif -} - static u64 __i915_pmu_event_read(struct perf_event *event) { struct drm_i915_private *i915 = diff --git a/drivers/gpu/drm/i915/i915_pmu.h b/drivers/gpu/drm/i915/i915_pmu.h index 4fc4f2478301..067dbbf3bdff 100644 --- a/drivers/gpu/drm/i915/i915_pmu.h +++ b/drivers/gpu/drm/i915/i915_pmu.h @@ -97,9 +97,9 @@ struct i915_pmu { */ struct i915_pmu_sample sample[__I915_NUM_PMU_SAMPLERS]; /** - * @suspended_time_last: Cached suspend time from PM core. + * @sleep_last: Last time GT parked for RC6 estimation. */ - u64 suspended_time_last; + ktime_t sleep_last; /** * @i915_attr: Memory block holding device attributes. */ -- cgit v1.2.3 From ee73e2795b416b829f0e00e7c43154922dff495b Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Thu, 12 Sep 2019 14:23:13 +0100 Subject: drm/i915/tgl: Disable preemption while being debugged We see failures where the context continues executing past a preemption event, eventually leading to situations where a request has executed before we have event submitted it to HW! It seems like tgl is ignoring our RING_TAIL updates, but more likely is that there is a missing update required for our semaphore waits around preemption. v2: And disable internal semaphore usage Signed-off-by: Chris Wilson Cc: Mika Kuoppala Acked-by: Mika Kuoppala Link: https://patchwork.freedesktop.org/patch/msgid/20190912132313.12751-1-chris@chris-wilson.co.uk --- drivers/gpu/drm/i915/gt/intel_lrc.c | 3 +++ drivers/gpu/drm/i915/i915_pci.c | 1 + 2 files changed, 4 insertions(+) diff --git a/drivers/gpu/drm/i915/gt/intel_lrc.c b/drivers/gpu/drm/i915/gt/intel_lrc.c index 47d766ccea71..a3f0e4999744 100644 --- a/drivers/gpu/drm/i915/gt/intel_lrc.c +++ b/drivers/gpu/drm/i915/gt/intel_lrc.c @@ -2939,6 +2939,9 @@ void intel_execlists_set_default_submission(struct intel_engine_cs *engine) engine->flags |= I915_ENGINE_HAS_PREEMPTION; } + if (INTEL_GEN(engine->i915) >= 12) /* XXX disabled for debugging */ + engine->flags &= ~I915_ENGINE_HAS_SEMAPHORES; + if (engine->class != COPY_ENGINE_CLASS && INTEL_GEN(engine->i915) >= 12) engine->flags |= I915_ENGINE_HAS_RELATIVE_MMIO; } diff --git a/drivers/gpu/drm/i915/i915_pci.c b/drivers/gpu/drm/i915/i915_pci.c index 698116276441..9236fccb3a83 100644 --- a/drivers/gpu/drm/i915/i915_pci.c +++ b/drivers/gpu/drm/i915/i915_pci.c @@ -798,6 +798,7 @@ static const struct intel_device_info intel_tigerlake_12_info = { .engine_mask = BIT(RCS0) | BIT(BCS0) | BIT(VECS0) | BIT(VCS0) | BIT(VCS2), .has_rc6 = false, /* XXX disabled for debugging */ + .has_logical_ring_preemption = false, /* XXX disabled for debugging */ }; #undef GEN -- cgit v1.2.3 From 0c653722e6e669fae969c178f0d4a55d9c2dc922 Mon Sep 17 00:00:00 2001 From: Arkadiusz Hiler Date: Thu, 12 Sep 2019 15:54:18 +0300 Subject: drm/i915: Get the correct wakeref for reading HOTPLUG_EN et al. Without it we get: Unclaimed read from register 0x1e1110 WARNING: CPU: 2 PID: 1029 at drivers/gpu/drm/i915/intel_uncore.c:1101 __unclaimed_reg_debug+0x40/0x50 [i915] Call Trace: fwtable_read32+0x233/0x300 [i915] i915_interrupt_info+0xa73/0xd60 [i915] seq_read+0xdb/0x3c0 full_proxy_read+0x51/0x80 vfs_read+0x9e/0x160 ksys_read+0x8f/0xe0 do_syscall_64+0x55/0x1c0 entry_SYSCALL_64_after_hwframe+0x49/0xbe Cc: Chris Wilson Bugzilla: https://bugs.freedesktop.org/show_bug.cgi?id=109824 Signed-off-by: Arkadiusz Hiler Reviewed-by: Chris Wilson Signed-off-by: Chris Wilson Link: https://patchwork.freedesktop.org/patch/msgid/20190912125418.23115-2-arkadiusz.hiler@intel.com --- drivers/gpu/drm/i915/i915_debugfs.c | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c index f3ae525b77c0..43db50095257 100644 --- a/drivers/gpu/drm/i915/i915_debugfs.c +++ b/drivers/gpu/drm/i915/i915_debugfs.c @@ -534,6 +534,8 @@ static int i915_interrupt_info(struct seq_file *m, void *data) gen8_display_interrupt_info(m); } else if (IS_VALLEYVIEW(dev_priv)) { + intel_wakeref_t pref; + seq_printf(m, "Display IER:\t%08x\n", I915_READ(VLV_IER)); seq_printf(m, "Display IIR:\t%08x\n", @@ -544,7 +546,6 @@ static int i915_interrupt_info(struct seq_file *m, void *data) I915_READ(VLV_IMR)); for_each_pipe(dev_priv, pipe) { enum intel_display_power_domain power_domain; - intel_wakeref_t pref; power_domain = POWER_DOMAIN_PIPE(pipe); pref = intel_display_power_get_if_enabled(dev_priv, @@ -578,12 +579,14 @@ static int i915_interrupt_info(struct seq_file *m, void *data) seq_printf(m, "PM IMR:\t\t%08x\n", I915_READ(GEN6_PMIMR)); + pref = intel_display_power_get(dev_priv, POWER_DOMAIN_INIT); seq_printf(m, "Port hotplug:\t%08x\n", I915_READ(PORT_HOTPLUG_EN)); seq_printf(m, "DPFLIPSTAT:\t%08x\n", I915_READ(VLV_DPFLIPSTAT)); seq_printf(m, "DPINVGTT:\t%08x\n", I915_READ(DPINVGTT)); + intel_display_power_put(dev_priv, POWER_DOMAIN_INIT, pref); } else if (!HAS_PCH_SPLIT(dev_priv)) { seq_printf(m, "Interrupt enable: %08x\n", -- cgit v1.2.3 From 0b8d6273db0635acd0dae983a39600b1b96fe77d Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Thu, 12 Sep 2019 13:26:39 +0100 Subject: drm/i915/selftests: Keep the engine awake while we keep for preemption Keep the engine awake to ensure that we don't inject any pm-idle requests. References: https://bugs.freedesktop.org/show_bug.cgi?id=111108 Signed-off-by: Chris Wilson Cc: Mika Kuoppala Acked-by: Mika Kuoppala Link: https://patchwork.freedesktop.org/patch/msgid/20190912122639.25224-1-chris@chris-wilson.co.uk --- drivers/gpu/drm/i915/gt/selftest_lrc.c | 10 ++++++++++ 1 file changed, 10 insertions(+) diff --git a/drivers/gpu/drm/i915/gt/selftest_lrc.c b/drivers/gpu/drm/i915/gt/selftest_lrc.c index d791158988d6..26d05bd1bdc8 100644 --- a/drivers/gpu/drm/i915/gt/selftest_lrc.c +++ b/drivers/gpu/drm/i915/gt/selftest_lrc.c @@ -897,6 +897,10 @@ static int live_suppress_self_preempt(void *arg) if (!intel_engine_has_preemption(engine)) continue; + if (igt_flush_test(i915, I915_WAIT_LOCKED)) + goto err_wedged; + + intel_engine_pm_get(engine); engine->execlists.preempt_hang.count = 0; rq_a = spinner_create_request(&a.spin, @@ -904,12 +908,14 @@ static int live_suppress_self_preempt(void *arg) MI_NOOP); if (IS_ERR(rq_a)) { err = PTR_ERR(rq_a); + intel_engine_pm_put(engine); goto err_client_b; } i915_request_add(rq_a); if (!igt_wait_for_spinner(&a.spin, rq_a)) { pr_err("First client failed to start\n"); + intel_engine_pm_put(engine); goto err_wedged; } @@ -921,6 +927,7 @@ static int live_suppress_self_preempt(void *arg) MI_NOOP); if (IS_ERR(rq_b)) { err = PTR_ERR(rq_b); + intel_engine_pm_put(engine); goto err_client_b; } i915_request_add(rq_b); @@ -931,6 +938,7 @@ static int live_suppress_self_preempt(void *arg) if (!igt_wait_for_spinner(&b.spin, rq_b)) { pr_err("Second client failed to start\n"); + intel_engine_pm_put(engine); goto err_wedged; } @@ -944,10 +952,12 @@ static int live_suppress_self_preempt(void *arg) engine->name, engine->execlists.preempt_hang.count, depth); + intel_engine_pm_put(engine); err = -EINVAL; goto err_client_b; } + intel_engine_pm_put(engine); if (igt_flush_test(i915, I915_WAIT_LOCKED)) goto err_wedged; } -- cgit v1.2.3 From c8185520aed6faab58b310f573790badfca27981 Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Fri, 13 Sep 2019 07:42:00 +0100 Subject: drm/i915/gtt: Make sure the gen6 ppgtt is bound before first use As we remove the struct_mutex protection from around the vma pinning, counters need to be atomic and aware that there may be multiple threads simultaneously active. Signed-off-by: Chris Wilson Reviewed-by: Matthew Auld Link: https://patchwork.freedesktop.org/patch/msgid/20190913064200.24297-1-chris@chris-wilson.co.uk --- drivers/gpu/drm/i915/i915_gem_gtt.c | 40 ++++++++++++++++++++----------------- drivers/gpu/drm/i915/i915_gem_gtt.h | 4 +++- 2 files changed, 25 insertions(+), 19 deletions(-) diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c index a09a9b62afbe..e62e9d1a1307 100644 --- a/drivers/gpu/drm/i915/i915_gem_gtt.c +++ b/drivers/gpu/drm/i915/i915_gem_gtt.c @@ -1790,6 +1790,8 @@ static void gen6_ppgtt_cleanup(struct i915_address_space *vm) gen6_ppgtt_free_pd(ppgtt); free_scratch(vm); + + mutex_destroy(&ppgtt->pin_mutex); kfree(ppgtt->base.pd); } @@ -1895,7 +1897,7 @@ static struct i915_vma *pd_vma_create(struct gen6_ppgtt *ppgtt, int size) int gen6_ppgtt_pin(struct i915_ppgtt *base) { struct gen6_ppgtt *ppgtt = to_gen6_ppgtt(base); - int err; + int err = 0; GEM_BUG_ON(ppgtt->base.vm.closed); @@ -1905,24 +1907,26 @@ int gen6_ppgtt_pin(struct i915_ppgtt *base) * (When vma->pin_count becomes atomic, I expect we will naturally * need a larger, unpacked, type and kill this redundancy.) */ - if (ppgtt->pin_count++) + if (atomic_add_unless(&ppgtt->pin_count, 1, 0)) return 0; + if (mutex_lock_interruptible(&ppgtt->pin_mutex)) + return -EINTR; + /* * PPGTT PDEs reside in the GGTT and consists of 512 entries. The * allocator works in address space sizes, so it's multiplied by page * size. We allocate at the top of the GTT to avoid fragmentation. */ - err = i915_vma_pin(ppgtt->vma, - 0, GEN6_PD_ALIGN, - PIN_GLOBAL | PIN_HIGH); - if (err) - goto unpin; - - return 0; + if (!atomic_read(&ppgtt->pin_count)) { + err = i915_vma_pin(ppgtt->vma, + 0, GEN6_PD_ALIGN, + PIN_GLOBAL | PIN_HIGH); + } + if (!err) + atomic_inc(&ppgtt->pin_count); + mutex_unlock(&ppgtt->pin_mutex); -unpin: - ppgtt->pin_count = 0; return err; } @@ -1930,22 +1934,20 @@ void gen6_ppgtt_unpin(struct i915_ppgtt *base) { struct gen6_ppgtt *ppgtt = to_gen6_ppgtt(base); - GEM_BUG_ON(!ppgtt->pin_count); - if (--ppgtt->pin_count) - return; - - i915_vma_unpin(ppgtt->vma); + GEM_BUG_ON(!atomic_read(&ppgtt->pin_count)); + if (atomic_dec_and_test(&ppgtt->pin_count)) + i915_vma_unpin(ppgtt->vma); } void gen6_ppgtt_unpin_all(struct i915_ppgtt *base) { struct gen6_ppgtt *ppgtt = to_gen6_ppgtt(base); - if (!ppgtt->pin_count) + if (!atomic_read(&ppgtt->pin_count)) return; - ppgtt->pin_count = 0; i915_vma_unpin(ppgtt->vma); + atomic_set(&ppgtt->pin_count, 0); } static struct i915_ppgtt *gen6_ppgtt_create(struct drm_i915_private *i915) @@ -1958,6 +1960,8 @@ static struct i915_ppgtt *gen6_ppgtt_create(struct drm_i915_private *i915) if (!ppgtt) return ERR_PTR(-ENOMEM); + mutex_init(&ppgtt->pin_mutex); + ppgtt_init(&ppgtt->base, &i915->gt); ppgtt->base.vm.top = 1; diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.h b/drivers/gpu/drm/i915/i915_gem_gtt.h index 201788126a89..8fd2234ba0bf 100644 --- a/drivers/gpu/drm/i915/i915_gem_gtt.h +++ b/drivers/gpu/drm/i915/i915_gem_gtt.h @@ -437,7 +437,9 @@ struct gen6_ppgtt { struct i915_vma *vma; gen6_pte_t __iomem *pd_addr; - unsigned int pin_count; + atomic_t pin_count; + struct mutex pin_mutex; + bool scan_for_unused_pt; }; -- cgit v1.2.3 From 12a97df00e2aa6fb2200a8935e78f3e00fab4eb2 Mon Sep 17 00:00:00 2001 From: Manasi Navare Date: Fri, 12 Jul 2019 13:22:13 -0700 Subject: drm/i915/display/icl: Bump up the hdisplay and vdisplay as per transcoder limits MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit On ICL+, the vertical limits for the transcoders are increased to 8192 and horizontal limits are bumped to 16K so bump up limits in intel_mode_valid() v4: * Increase the hdisplay to 16K (Ville) v3: * Supported starting ICL (Ville) * Use the higher limits from TRANS_VTOTAL register (Ville) v2: * Checkpatch warning (Manasi) Cc: Maarten Lankhorst Cc: Ville Syrjälä Signed-off-by: Manasi Navare Reviewed-by: Ville Syrjälä Signed-off-by: Maarten Lankhorst Link: https://patchwork.freedesktop.org/patch/msgid/20190712202214.3906-1-manasi.d.navare@intel.com --- drivers/gpu/drm/i915/display/intel_display.c | 9 +++++++-- 1 file changed, 7 insertions(+), 2 deletions(-) diff --git a/drivers/gpu/drm/i915/display/intel_display.c b/drivers/gpu/drm/i915/display/intel_display.c index a19f8c73f2e0..d77fd6ddac49 100644 --- a/drivers/gpu/drm/i915/display/intel_display.c +++ b/drivers/gpu/drm/i915/display/intel_display.c @@ -15839,8 +15839,13 @@ intel_mode_valid(struct drm_device *dev, DRM_MODE_FLAG_CLKDIV2)) return MODE_BAD; - if (INTEL_GEN(dev_priv) >= 9 || - IS_BROADWELL(dev_priv) || IS_HASWELL(dev_priv)) { + if (INTEL_GEN(dev_priv) >= 11) { + hdisplay_max = 16384; + vdisplay_max = 8192; + htotal_max = 16384; + vtotal_max = 8192; + } else if (INTEL_GEN(dev_priv) >= 9 || + IS_BROADWELL(dev_priv) || IS_HASWELL(dev_priv)) { hdisplay_max = 8192; /* FDI max 4096 handled elsewhere */ vdisplay_max = 4096; htotal_max = 8192; -- cgit v1.2.3 From e91c8a29b452671d9af7259cd30980e6a2d78005 Mon Sep 17 00:00:00 2001 From: Manasi Navare Date: Fri, 12 Jul 2019 13:38:08 -0700 Subject: drm/i915/display/icl: Bump up the plane/fb height MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit On ICL+, the max supported plane height is 4320, so bump it up To support 4320, we need to increase the number of bits used to read plane_height to 13 as opposed to older 12 bits. v4: * Adjust the width mask also since extra bits are mbz (Ville) v3: * Use 0xffff for mask as extra bits are mbz (Ville) v2: * ICL plane height supported is 4320 (Ville) * Add a new line between max width and max height (Jose) Cc: Maarten Lankhorst Cc: Ville Syrjälä Signed-off-by: Manasi Navare Reviewed-by: Ville Syrjälä Signed-off-by: Maarten Lankhorst Link: https://patchwork.freedesktop.org/patch/msgid/20190712203808.4126-1-manasi.d.navare@intel.com --- drivers/gpu/drm/i915/display/intel_display.c | 21 ++++++++++++++++++--- 1 file changed, 18 insertions(+), 3 deletions(-) diff --git a/drivers/gpu/drm/i915/display/intel_display.c b/drivers/gpu/drm/i915/display/intel_display.c index d77fd6ddac49..4e001113e828 100644 --- a/drivers/gpu/drm/i915/display/intel_display.c +++ b/drivers/gpu/drm/i915/display/intel_display.c @@ -3333,6 +3333,16 @@ static int icl_max_plane_width(const struct drm_framebuffer *fb, return 5120; } +static int skl_max_plane_height(void) +{ + return 4096; +} + +static int icl_max_plane_height(void) +{ + return 4320; +} + static bool skl_check_main_ccs_coordinates(struct intel_plane_state *plane_state, int main_x, int main_y, u32 main_offset) { @@ -3381,7 +3391,7 @@ static int skl_check_main_surface(struct intel_plane_state *plane_state) int w = drm_rect_width(&plane_state->base.src) >> 16; int h = drm_rect_height(&plane_state->base.src) >> 16; int max_width; - int max_height = 4096; + int max_height; u32 alignment, offset, aux_offset = plane_state->color_plane[1].offset; if (INTEL_GEN(dev_priv) >= 11) @@ -3391,6 +3401,11 @@ static int skl_check_main_surface(struct intel_plane_state *plane_state) else max_width = skl_max_plane_width(fb, 0, rotation); + if (INTEL_GEN(dev_priv) >= 11) + max_height = icl_max_plane_height(); + else + max_height = skl_max_plane_height(); + if (w > max_width || h > max_height) { DRM_DEBUG_KMS("requested Y/RGB source size %dx%d too big (limit %dx%d)\n", w, h, max_width, max_height); @@ -9873,8 +9888,8 @@ skylake_get_initial_plane_config(struct intel_crtc *crtc, offset = I915_READ(PLANE_OFFSET(pipe, plane_id)); val = I915_READ(PLANE_SIZE(pipe, plane_id)); - fb->height = ((val >> 16) & 0xfff) + 1; - fb->width = ((val >> 0) & 0x1fff) + 1; + fb->height = ((val >> 16) & 0xffff) + 1; + fb->width = ((val >> 0) & 0xffff) + 1; val = I915_READ(PLANE_STRIDE(pipe, plane_id)); stride_mult = skl_plane_stride_mult(fb, 0, DRM_MODE_ROTATE_0); -- cgit v1.2.3 From eebab60f224fcfd560957715d08c31564d8672ed Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Thu, 12 Sep 2019 17:08:34 +0100 Subject: drm/i915: Don't mix srcu tag and negative error codes MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit While srcu may use an integer tag, it does not exclude potential error codes and so may overlap with our own use of -EINTR. Use a separate outparam to store the tag, and report the error code separately. Fixes: 2caffbf11762 ("drm/i915: Revoke mmaps and prevent access to fence registers across reset") Signed-off-by: Chris Wilson Cc: Mika Kuoppala Cc: Ville Syrjälä Reviewed-by: Mika Kuoppala Link: https://patchwork.freedesktop.org/patch/msgid/20190912160834.30601-1-chris@chris-wilson.co.uk --- drivers/gpu/drm/i915/gem/i915_gem_mman.c | 6 ++---- drivers/gpu/drm/i915/gt/intel_reset.c | 8 +++----- drivers/gpu/drm/i915/gt/intel_reset.h | 2 +- 3 files changed, 6 insertions(+), 10 deletions(-) diff --git a/drivers/gpu/drm/i915/gem/i915_gem_mman.c b/drivers/gpu/drm/i915/gem/i915_gem_mman.c index 82db2b783123..1748e63156a2 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_mman.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_mman.c @@ -245,11 +245,9 @@ vm_fault_t i915_gem_fault(struct vm_fault *vmf) wakeref = intel_runtime_pm_get(rpm); - srcu = intel_gt_reset_trylock(ggtt->vm.gt); - if (srcu < 0) { - ret = srcu; + ret = intel_gt_reset_trylock(ggtt->vm.gt, &srcu); + if (ret) goto err_rpm; - } ret = i915_mutex_lock_interruptible(dev); if (ret) diff --git a/drivers/gpu/drm/i915/gt/intel_reset.c b/drivers/gpu/drm/i915/gt/intel_reset.c index 296bbc7745fb..8327220ac558 100644 --- a/drivers/gpu/drm/i915/gt/intel_reset.c +++ b/drivers/gpu/drm/i915/gt/intel_reset.c @@ -1214,10 +1214,8 @@ out: intel_runtime_pm_put(>->i915->runtime_pm, wakeref); } -int intel_gt_reset_trylock(struct intel_gt *gt) +int intel_gt_reset_trylock(struct intel_gt *gt, int *srcu) { - int srcu; - might_lock(>->reset.backoff_srcu); might_sleep(); @@ -1232,10 +1230,10 @@ int intel_gt_reset_trylock(struct intel_gt *gt) rcu_read_lock(); } - srcu = srcu_read_lock(>->reset.backoff_srcu); + *srcu = srcu_read_lock(>->reset.backoff_srcu); rcu_read_unlock(); - return srcu; + return 0; } void intel_gt_reset_unlock(struct intel_gt *gt, int tag) diff --git a/drivers/gpu/drm/i915/gt/intel_reset.h b/drivers/gpu/drm/i915/gt/intel_reset.h index 37a987b17108..52c00199e069 100644 --- a/drivers/gpu/drm/i915/gt/intel_reset.h +++ b/drivers/gpu/drm/i915/gt/intel_reset.h @@ -38,7 +38,7 @@ int intel_engine_reset(struct intel_engine_cs *engine, void __i915_request_reset(struct i915_request *rq, bool guilty); -int __must_check intel_gt_reset_trylock(struct intel_gt *gt); +int __must_check intel_gt_reset_trylock(struct intel_gt *gt, int *srcu); void intel_gt_reset_unlock(struct intel_gt *gt, int tag); void intel_gt_set_wedged(struct intel_gt *gt); -- cgit v1.2.3 From 6da301e5291717861fb25025b782922d3a36c7af Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Fri, 13 Sep 2019 15:55:56 +0100 Subject: drm/i915/tgl: Limit ourselves to just rcs0 More pruning away of features until we have a stable system and a basis for debugging what's missing. v2: Fixup vdbox/vebox fusing Signed-off-by: Chris Wilson Cc: Mika Kuoppala Reviewed-by: Mika Kuoppala Link: https://patchwork.freedesktop.org/patch/msgid/20190913145556.23912-1-chris@chris-wilson.co.uk --- drivers/gpu/drm/i915/i915_pci.c | 1 + drivers/gpu/drm/i915/intel_device_info.c | 8 ++++++-- 2 files changed, 7 insertions(+), 2 deletions(-) diff --git a/drivers/gpu/drm/i915/i915_pci.c b/drivers/gpu/drm/i915/i915_pci.c index 9236fccb3a83..ee9a7959204c 100644 --- a/drivers/gpu/drm/i915/i915_pci.c +++ b/drivers/gpu/drm/i915/i915_pci.c @@ -799,6 +799,7 @@ static const struct intel_device_info intel_tigerlake_12_info = { BIT(RCS0) | BIT(BCS0) | BIT(VECS0) | BIT(VCS0) | BIT(VCS2), .has_rc6 = false, /* XXX disabled for debugging */ .has_logical_ring_preemption = false, /* XXX disabled for debugging */ + .engine_mask = BIT(RCS0), /* XXX reduced for debugging */ }; #undef GEN diff --git a/drivers/gpu/drm/i915/intel_device_info.c b/drivers/gpu/drm/i915/intel_device_info.c index 50b05a5de53b..727089dcd280 100644 --- a/drivers/gpu/drm/i915/intel_device_info.c +++ b/drivers/gpu/drm/i915/intel_device_info.c @@ -1004,8 +1004,10 @@ void intel_device_info_init_mmio(struct drm_i915_private *dev_priv) GEN11_GT_VEBOX_DISABLE_SHIFT; for (i = 0; i < I915_MAX_VCS; i++) { - if (!HAS_ENGINE(dev_priv, _VCS(i))) + if (!HAS_ENGINE(dev_priv, _VCS(i))) { + vdbox_mask &= ~BIT(i); continue; + } if (!(BIT(i) & vdbox_mask)) { info->engine_mask &= ~BIT(_VCS(i)); @@ -1026,8 +1028,10 @@ void intel_device_info_init_mmio(struct drm_i915_private *dev_priv) GEM_BUG_ON(vdbox_mask != VDBOX_MASK(dev_priv)); for (i = 0; i < I915_MAX_VECS; i++) { - if (!HAS_ENGINE(dev_priv, _VECS(i))) + if (!HAS_ENGINE(dev_priv, _VECS(i))) { + vebox_mask &= ~BIT(i); continue; + } if (!(BIT(i) & vebox_mask)) { info->engine_mask &= ~BIT(_VECS(i)); -- cgit v1.2.3 From c9f8d18710bee1c5b87d54007c24d8bf79029fb6 Mon Sep 17 00:00:00 2001 From: Mika Kuoppala Date: Fri, 13 Sep 2019 17:16:50 +0300 Subject: drm/i915: Update Gen11 forcewake ranges Daniele noticed new render ranges in Gen11 fw table. Bspec: 18331 Cc: Daniele Ceraolo Spurio Signed-off-by: Mika Kuoppala Reviewed-by: Daniele Ceraolo Spurio Signed-off-by: Chris Wilson Link: https://patchwork.freedesktop.org/patch/msgid/20190913141652.27958-1-mika.kuoppala@linux.intel.com --- drivers/gpu/drm/i915/intel_uncore.c | 23 +++++++++-------------- 1 file changed, 9 insertions(+), 14 deletions(-) diff --git a/drivers/gpu/drm/i915/intel_uncore.c b/drivers/gpu/drm/i915/intel_uncore.c index 9e583f13a9e4..732082a72022 100644 --- a/drivers/gpu/drm/i915/intel_uncore.c +++ b/drivers/gpu/drm/i915/intel_uncore.c @@ -805,9 +805,6 @@ void assert_forcewakes_active(struct intel_uncore *uncore, /* We give fast paths for the really cool registers */ #define NEEDS_FORCE_WAKE(reg) ((reg) < 0x40000) -#define GEN11_NEEDS_FORCE_WAKE(reg) \ - ((reg) < 0x40000 || ((reg) >= 0x1c0000 && (reg) < 0x1dc000)) - #define __gen6_reg_read_fw_domains(uncore, offset) \ ({ \ enum forcewake_domains __fwd; \ @@ -903,12 +900,7 @@ static const struct intel_forcewake_range __vlv_fw_ranges[] = { }) #define __gen11_fwtable_reg_read_fw_domains(uncore, offset) \ -({ \ - enum forcewake_domains __fwd = 0; \ - if (GEN11_NEEDS_FORCE_WAKE((offset))) \ - __fwd = find_fw_domain(uncore, offset); \ - __fwd; \ -}) + find_fw_domain(uncore, offset) /* *Must* be sorted by offset! See intel_shadow_table_check(). */ static const i915_reg_t gen8_shadowed_regs[] = { @@ -1005,8 +997,9 @@ static const struct intel_forcewake_range __chv_fw_ranges[] = { #define __gen11_fwtable_reg_write_fw_domains(uncore, offset) \ ({ \ enum forcewake_domains __fwd = 0; \ - if (GEN11_NEEDS_FORCE_WAKE((offset)) && !is_gen11_shadowed(offset)) \ - __fwd = find_fw_domain(uncore, offset); \ + const u32 __offset = (offset); \ + if (!is_gen11_shadowed(__offset)) \ + __fwd = find_fw_domain(uncore, __offset); \ __fwd; \ }) @@ -1065,9 +1058,11 @@ static const struct intel_forcewake_range __gen11_fw_ranges[] = { GEN_FW_RANGE(0x9400, 0x97ff, FORCEWAKE_ALL), GEN_FW_RANGE(0x9800, 0xafff, FORCEWAKE_BLITTER), GEN_FW_RANGE(0xb000, 0xb47f, FORCEWAKE_RENDER), - GEN_FW_RANGE(0xb480, 0xdfff, FORCEWAKE_BLITTER), - GEN_FW_RANGE(0xe000, 0xe8ff, FORCEWAKE_RENDER), - GEN_FW_RANGE(0xe900, 0x243ff, FORCEWAKE_BLITTER), + GEN_FW_RANGE(0xb480, 0xdeff, FORCEWAKE_BLITTER), + GEN_FW_RANGE(0xdf00, 0xe8ff, FORCEWAKE_RENDER), + GEN_FW_RANGE(0xe900, 0x16dff, FORCEWAKE_BLITTER), + GEN_FW_RANGE(0x16e00, 0x19fff, FORCEWAKE_RENDER), + GEN_FW_RANGE(0x1a000, 0x243ff, FORCEWAKE_BLITTER), GEN_FW_RANGE(0x24400, 0x247ff, FORCEWAKE_RENDER), GEN_FW_RANGE(0x24800, 0x3ffff, FORCEWAKE_BLITTER), GEN_FW_RANGE(0x40000, 0x1bffff, 0), -- cgit v1.2.3 From cf82d9ddd3b5203dea5561dfabd1216b8159af37 Mon Sep 17 00:00:00 2001 From: Michel Thierry Date: Fri, 13 Sep 2019 17:16:51 +0300 Subject: drm/i915/tgl: Introduce gen12 forcewake ranges The media ranges extend beyond what gen11 gives so we can't piggypack on gen11 ranges, even on read side. Introduce a table for gen12 and accessors for it. v2: correctly implement gen12_fwtable_write/read (Daniele) v3: update with ranges from bspec. v4: avoid GEN11_NEEDS_FORCEWAKE (Mika) v5: bspec ref (Daniele) BSpec: 52078 Cc: Tvrtko Ursulin Cc: Daniele Ceraolo Spurio Signed-off-by: Michel Thierry Signed-off-by: Mika Kuoppala Reviewed-by: Daniele Ceraolo Spurio Signed-off-by: Chris Wilson Link: https://patchwork.freedesktop.org/patch/msgid/20190913141652.27958-2-mika.kuoppala@linux.intel.com --- drivers/gpu/drm/i915/intel_uncore.c | 75 ++++++++++++++++++++++++++- drivers/gpu/drm/i915/selftests/intel_uncore.c | 2 + 2 files changed, 76 insertions(+), 1 deletion(-) diff --git a/drivers/gpu/drm/i915/intel_uncore.c b/drivers/gpu/drm/i915/intel_uncore.c index 732082a72022..94a97bf8c021 100644 --- a/drivers/gpu/drm/i915/intel_uncore.c +++ b/drivers/gpu/drm/i915/intel_uncore.c @@ -902,6 +902,9 @@ static const struct intel_forcewake_range __vlv_fw_ranges[] = { #define __gen11_fwtable_reg_read_fw_domains(uncore, offset) \ find_fw_domain(uncore, offset) +#define __gen12_fwtable_reg_read_fw_domains(uncore, offset) \ + find_fw_domain(uncore, offset) + /* *Must* be sorted by offset! See intel_shadow_table_check(). */ static const i915_reg_t gen8_shadowed_regs[] = { RING_TAIL(RENDER_RING_BASE), /* 0x2000 (base) */ @@ -927,6 +930,20 @@ static const i915_reg_t gen11_shadowed_regs[] = { /* TODO: Other registers are not yet used */ }; +static const i915_reg_t gen12_shadowed_regs[] = { + RING_TAIL(RENDER_RING_BASE), /* 0x2000 (base) */ + GEN6_RPNSWREQ, /* 0xA008 */ + GEN6_RC_VIDEO_FREQ, /* 0xA00C */ + RING_TAIL(BLT_RING_BASE), /* 0x22000 (base) */ + RING_TAIL(GEN11_BSD_RING_BASE), /* 0x1C0000 (base) */ + RING_TAIL(GEN11_BSD2_RING_BASE), /* 0x1C4000 (base) */ + RING_TAIL(GEN11_VEBOX_RING_BASE), /* 0x1C8000 (base) */ + RING_TAIL(GEN11_BSD3_RING_BASE), /* 0x1D0000 (base) */ + RING_TAIL(GEN11_BSD4_RING_BASE), /* 0x1D4000 (base) */ + RING_TAIL(GEN11_VEBOX2_RING_BASE), /* 0x1D8000 (base) */ + /* TODO: Other registers are not yet used */ +}; + static int mmio_reg_cmp(u32 key, const i915_reg_t *reg) { u32 offset = i915_mmio_reg_offset(*reg); @@ -949,6 +966,7 @@ static bool is_gen##x##_shadowed(u32 offset) \ __is_genX_shadowed(8) __is_genX_shadowed(11) +__is_genX_shadowed(12) static enum forcewake_domains gen6_reg_write_fw_domains(struct intel_uncore *uncore, i915_reg_t reg) @@ -1003,6 +1021,15 @@ static const struct intel_forcewake_range __chv_fw_ranges[] = { __fwd; \ }) +#define __gen12_fwtable_reg_write_fw_domains(uncore, offset) \ +({ \ + enum forcewake_domains __fwd = 0; \ + const u32 __offset = (offset); \ + if (!is_gen12_shadowed(__offset)) \ + __fwd = find_fw_domain(uncore, __offset); \ + __fwd; \ +}) + /* *Must* be sorted by offset ranges! See intel_fw_table_check(). */ static const struct intel_forcewake_range __gen9_fw_ranges[] = { GEN_FW_RANGE(0x0, 0xaff, FORCEWAKE_BLITTER), @@ -1075,6 +1102,46 @@ static const struct intel_forcewake_range __gen11_fw_ranges[] = { GEN_FW_RANGE(0x1d8000, 0x1dbfff, FORCEWAKE_MEDIA_VEBOX1) }; +/* *Must* be sorted by offset ranges! See intel_fw_table_check(). */ +static const struct intel_forcewake_range __gen12_fw_ranges[] = { + GEN_FW_RANGE(0x0, 0xaff, FORCEWAKE_BLITTER), + GEN_FW_RANGE(0xb00, 0x1fff, 0), /* uncore range */ + GEN_FW_RANGE(0x2000, 0x26ff, FORCEWAKE_RENDER), + GEN_FW_RANGE(0x2700, 0x2fff, FORCEWAKE_BLITTER), + GEN_FW_RANGE(0x3000, 0x3fff, FORCEWAKE_RENDER), + GEN_FW_RANGE(0x4000, 0x51ff, FORCEWAKE_BLITTER), + GEN_FW_RANGE(0x5200, 0x7fff, FORCEWAKE_RENDER), + GEN_FW_RANGE(0x8000, 0x813f, FORCEWAKE_BLITTER), + GEN_FW_RANGE(0x8140, 0x815f, FORCEWAKE_RENDER), + GEN_FW_RANGE(0x8160, 0x82ff, FORCEWAKE_BLITTER), + GEN_FW_RANGE(0x8300, 0x84ff, FORCEWAKE_RENDER), + GEN_FW_RANGE(0x8500, 0x8bff, FORCEWAKE_BLITTER), + GEN_FW_RANGE(0x8c00, 0x8cff, FORCEWAKE_RENDER), + GEN_FW_RANGE(0x8d00, 0x93ff, FORCEWAKE_BLITTER), + GEN_FW_RANGE(0x9400, 0x97ff, FORCEWAKE_ALL), + GEN_FW_RANGE(0x9800, 0xafff, FORCEWAKE_BLITTER), + GEN_FW_RANGE(0xb000, 0xb47f, FORCEWAKE_RENDER), + GEN_FW_RANGE(0xb480, 0xdfff, FORCEWAKE_BLITTER), + GEN_FW_RANGE(0xe000, 0xe8ff, FORCEWAKE_RENDER), + GEN_FW_RANGE(0xe900, 0x147ff, FORCEWAKE_BLITTER), + GEN_FW_RANGE(0x14800, 0x148ff, FORCEWAKE_RENDER), + GEN_FW_RANGE(0x14900, 0x19fff, FORCEWAKE_BLITTER), + GEN_FW_RANGE(0x1a000, 0x1a7ff, FORCEWAKE_RENDER), + GEN_FW_RANGE(0x1a800, 0x1afff, FORCEWAKE_BLITTER), + GEN_FW_RANGE(0x1b000, 0x1bfff, FORCEWAKE_RENDER), + GEN_FW_RANGE(0x1c000, 0x243ff, FORCEWAKE_BLITTER), + GEN_FW_RANGE(0x24400, 0x247ff, FORCEWAKE_RENDER), + GEN_FW_RANGE(0x24800, 0x3ffff, FORCEWAKE_BLITTER), + GEN_FW_RANGE(0x40000, 0x1bffff, 0), + GEN_FW_RANGE(0x1c0000, 0x1c3fff, FORCEWAKE_MEDIA_VDBOX0), + GEN_FW_RANGE(0x1c4000, 0x1c7fff, FORCEWAKE_MEDIA_VDBOX1), + GEN_FW_RANGE(0x1c8000, 0x1cbfff, FORCEWAKE_MEDIA_VEBOX0), + GEN_FW_RANGE(0x1cc000, 0x1cffff, FORCEWAKE_BLITTER), + GEN_FW_RANGE(0x1d0000, 0x1d3fff, FORCEWAKE_MEDIA_VDBOX2), + GEN_FW_RANGE(0x1d4000, 0x1d7fff, FORCEWAKE_MEDIA_VDBOX3), + GEN_FW_RANGE(0x1d8000, 0x1dbfff, FORCEWAKE_MEDIA_VEBOX1) +}; + static void ilk_dummy_write(struct intel_uncore *uncore) { @@ -1223,6 +1290,7 @@ __gen_read(func, 16) \ __gen_read(func, 32) \ __gen_read(func, 64) +__gen_reg_read_funcs(gen12_fwtable); __gen_reg_read_funcs(gen11_fwtable); __gen_reg_read_funcs(fwtable); __gen_reg_read_funcs(gen6); @@ -1314,6 +1382,7 @@ __gen_write(func, 8) \ __gen_write(func, 16) \ __gen_write(func, 32) +__gen_reg_write_funcs(gen12_fwtable); __gen_reg_write_funcs(gen11_fwtable); __gen_reg_write_funcs(fwtable); __gen_reg_write_funcs(gen8); @@ -1685,10 +1754,14 @@ static int uncore_forcewake_init(struct intel_uncore *uncore) ASSIGN_FW_DOMAINS_TABLE(uncore, __gen9_fw_ranges); ASSIGN_WRITE_MMIO_VFUNCS(uncore, fwtable); ASSIGN_READ_MMIO_VFUNCS(uncore, fwtable); - } else { + } else if (IS_GEN(i915, 11)) { ASSIGN_FW_DOMAINS_TABLE(uncore, __gen11_fw_ranges); ASSIGN_WRITE_MMIO_VFUNCS(uncore, gen11_fwtable); ASSIGN_READ_MMIO_VFUNCS(uncore, gen11_fwtable); + } else { + ASSIGN_FW_DOMAINS_TABLE(uncore, __gen12_fw_ranges); + ASSIGN_WRITE_MMIO_VFUNCS(uncore, gen12_fwtable); + ASSIGN_READ_MMIO_VFUNCS(uncore, gen12_fwtable); } uncore->pmic_bus_access_nb.notifier_call = i915_pmic_bus_access_notifier; diff --git a/drivers/gpu/drm/i915/selftests/intel_uncore.c b/drivers/gpu/drm/i915/selftests/intel_uncore.c index 86815c6072a1..0ffb141eb988 100644 --- a/drivers/gpu/drm/i915/selftests/intel_uncore.c +++ b/drivers/gpu/drm/i915/selftests/intel_uncore.c @@ -67,6 +67,7 @@ static int intel_shadow_table_check(void) } reg_lists[] = { { gen8_shadowed_regs, ARRAY_SIZE(gen8_shadowed_regs) }, { gen11_shadowed_regs, ARRAY_SIZE(gen11_shadowed_regs) }, + { gen12_shadowed_regs, ARRAY_SIZE(gen12_shadowed_regs) }, }; const i915_reg_t *reg; unsigned int i, j; @@ -101,6 +102,7 @@ int intel_uncore_mock_selftests(void) { __chv_fw_ranges, ARRAY_SIZE(__chv_fw_ranges), false }, { __gen9_fw_ranges, ARRAY_SIZE(__gen9_fw_ranges), true }, { __gen11_fw_ranges, ARRAY_SIZE(__gen11_fw_ranges), true }, + { __gen12_fw_ranges, ARRAY_SIZE(__gen12_fw_ranges), true }, }; int err, i; -- cgit v1.2.3 From 2f7155629c9dd4d4ed0c2ec95a8982c84882b9e2 Mon Sep 17 00:00:00 2001 From: Mika Kuoppala Date: Fri, 13 Sep 2019 21:06:38 +0100 Subject: drm/i915/tgl: Re-enable rc6 We think that we got rc6 problems sorted out. Flip the switch and let CI expose our tendency to naive optimism. Signed-off-by: Mika Kuoppala Reviewed-by: Chris Wilson Signed-off-by: Chris Wilson Link: https://patchwork.freedesktop.org/patch/msgid/20190913200638.31939-1-chris@chris-wilson.co.uk --- drivers/gpu/drm/i915/i915_pci.c | 1 - 1 file changed, 1 deletion(-) diff --git a/drivers/gpu/drm/i915/i915_pci.c b/drivers/gpu/drm/i915/i915_pci.c index ee9a7959204c..e4a26bbd8788 100644 --- a/drivers/gpu/drm/i915/i915_pci.c +++ b/drivers/gpu/drm/i915/i915_pci.c @@ -797,7 +797,6 @@ static const struct intel_device_info intel_tigerlake_12_info = { .display.has_modular_fia = 1, .engine_mask = BIT(RCS0) | BIT(BCS0) | BIT(VECS0) | BIT(VCS0) | BIT(VCS2), - .has_rc6 = false, /* XXX disabled for debugging */ .has_logical_ring_preemption = false, /* XXX disabled for debugging */ .engine_mask = BIT(RCS0), /* XXX reduced for debugging */ }; -- cgit v1.2.3 From a2b69ea4d26d1077b00924ff34e46b28d997aae2 Mon Sep 17 00:00:00 2001 From: Jani Nikula Date: Fri, 13 Sep 2019 13:04:07 +0300 Subject: drm/i915: introduce INTEL_DISPLAY_ENABLED() MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Prepare for making a distinction between not having display and having disabled display. Add INTEL_DISPLAY_ENABLED() and use it where HAS_DISPLAY() is used after intel_device_info_runtime_init(). This is initially duplication, as disabling display still leads to ->pipe_mask = 0 and HAS_DISPLAY() being false. Note that ever since i915.display_disable was introduced, it has not affected PCH detection even if it uses HAS_DISPLAY(), as display disable happens after that. Since INTEL_DISPLAY_ENABLED() will not make sense unless HAS_DISPLAY() is true, include a warning for catching misuses making decisions on INTEL_DISPLAY_ENABLED() when HAS_DISPLAY() is false. v2: Remove INTEL_DISPLAY_ENABLED() check from intel_detect_pch() (Chris) Cc: Chris Wilson Cc: José Roberto de Souza Cc: Ville Syrjälä Reviewed-by: José Roberto de Souza Acked-by: Ville Syrjälä Signed-off-by: Jani Nikula Link: https://patchwork.freedesktop.org/patch/msgid/20190913100407.30991-1-jani.nikula@intel.com --- drivers/gpu/drm/i915/display/intel_bios.c | 2 +- drivers/gpu/drm/i915/display/intel_display.c | 4 ++-- drivers/gpu/drm/i915/display/intel_fbdev.c | 2 +- drivers/gpu/drm/i915/display/intel_gmbus.c | 2 +- drivers/gpu/drm/i915/i915_drv.c | 8 ++++---- drivers/gpu/drm/i915/i915_drv.h | 3 +++ 6 files changed, 12 insertions(+), 9 deletions(-) diff --git a/drivers/gpu/drm/i915/display/intel_bios.c b/drivers/gpu/drm/i915/display/intel_bios.c index efb39f350b19..1def550c68c8 100644 --- a/drivers/gpu/drm/i915/display/intel_bios.c +++ b/drivers/gpu/drm/i915/display/intel_bios.c @@ -1833,7 +1833,7 @@ void intel_bios_init(struct drm_i915_private *dev_priv) const struct bdb_header *bdb; u8 __iomem *bios = NULL; - if (!HAS_DISPLAY(dev_priv)) { + if (!HAS_DISPLAY(dev_priv) || !INTEL_DISPLAY_ENABLED(dev_priv)) { DRM_DEBUG_KMS("Skipping VBT init due to disabled display.\n"); return; } diff --git a/drivers/gpu/drm/i915/display/intel_display.c b/drivers/gpu/drm/i915/display/intel_display.c index 4e001113e828..e75945a53e06 100644 --- a/drivers/gpu/drm/i915/display/intel_display.c +++ b/drivers/gpu/drm/i915/display/intel_display.c @@ -15381,7 +15381,7 @@ static void intel_setup_outputs(struct drm_i915_private *dev_priv) intel_pps_init(dev_priv); - if (!HAS_DISPLAY(dev_priv)) + if (!HAS_DISPLAY(dev_priv) || !INTEL_DISPLAY_ENABLED(dev_priv)) return; if (INTEL_GEN(dev_priv) >= 12) { @@ -17293,7 +17293,7 @@ intel_display_capture_error_state(struct drm_i915_private *dev_priv) BUILD_BUG_ON(ARRAY_SIZE(transcoders) != ARRAY_SIZE(error->transcoder)); - if (!HAS_DISPLAY(dev_priv)) + if (!HAS_DISPLAY(dev_priv) || !INTEL_DISPLAY_ENABLED(dev_priv)) return NULL; error = kzalloc(sizeof(*error), GFP_ATOMIC); diff --git a/drivers/gpu/drm/i915/display/intel_fbdev.c b/drivers/gpu/drm/i915/display/intel_fbdev.c index d59eee5c5d9c..68338669f054 100644 --- a/drivers/gpu/drm/i915/display/intel_fbdev.c +++ b/drivers/gpu/drm/i915/display/intel_fbdev.c @@ -444,7 +444,7 @@ int intel_fbdev_init(struct drm_device *dev) struct intel_fbdev *ifbdev; int ret; - if (WARN_ON(!HAS_DISPLAY(dev_priv))) + if (WARN_ON(!HAS_DISPLAY(dev_priv) || !INTEL_DISPLAY_ENABLED(dev_priv))) return -ENODEV; ifbdev = kzalloc(sizeof(struct intel_fbdev), GFP_KERNEL); diff --git a/drivers/gpu/drm/i915/display/intel_gmbus.c b/drivers/gpu/drm/i915/display/intel_gmbus.c index d6775a005726..3d4d19ac1d14 100644 --- a/drivers/gpu/drm/i915/display/intel_gmbus.c +++ b/drivers/gpu/drm/i915/display/intel_gmbus.c @@ -836,7 +836,7 @@ int intel_gmbus_setup(struct drm_i915_private *dev_priv) unsigned int pin; int ret; - if (!HAS_DISPLAY(dev_priv)) + if (!HAS_DISPLAY(dev_priv) || !INTEL_DISPLAY_ENABLED(dev_priv)) return 0; if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c index c52ef2223580..c95986a984c8 100644 --- a/drivers/gpu/drm/i915/i915_drv.c +++ b/drivers/gpu/drm/i915/i915_drv.c @@ -338,7 +338,7 @@ static int i915_driver_modeset_probe(struct drm_device *dev) if (i915_inject_probe_failure(dev_priv)) return -ENODEV; - if (HAS_DISPLAY(dev_priv)) { + if (HAS_DISPLAY(dev_priv) && INTEL_DISPLAY_ENABLED(dev_priv)) { ret = drm_vblank_init(&dev_priv->drm, INTEL_NUM_PIPES(dev_priv)); if (ret) @@ -389,7 +389,7 @@ static int i915_driver_modeset_probe(struct drm_device *dev) intel_overlay_setup(dev_priv); - if (!HAS_DISPLAY(dev_priv)) + if (!HAS_DISPLAY(dev_priv) || !INTEL_DISPLAY_ENABLED(dev_priv)) return 0; ret = intel_fbdev_init(dev); @@ -1415,7 +1415,7 @@ static void i915_driver_register(struct drm_i915_private *dev_priv) } else DRM_ERROR("Failed to register driver for userspace access!\n"); - if (HAS_DISPLAY(dev_priv)) { + if (HAS_DISPLAY(dev_priv) && INTEL_DISPLAY_ENABLED(dev_priv)) { /* Must be done after probing outputs */ intel_opregion_register(dev_priv); acpi_video_register(); @@ -1438,7 +1438,7 @@ static void i915_driver_register(struct drm_i915_private *dev_priv) * We need to coordinate the hotplugs with the asynchronous fbdev * configuration, for which we use the fbdev->async_cookie. */ - if (HAS_DISPLAY(dev_priv)) + if (HAS_DISPLAY(dev_priv) && INTEL_DISPLAY_ENABLED(dev_priv)) drm_kms_helper_poll_init(dev); intel_power_domains_enable(dev_priv); diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index bf600888b3f1..4faec2f94e19 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h @@ -2192,6 +2192,9 @@ IS_SUBPLATFORM(const struct drm_i915_private *i915, #define HAS_DISPLAY(dev_priv) (INTEL_INFO(dev_priv)->pipe_mask != 0) +/* Only valid when HAS_DISPLAY() is true */ +#define INTEL_DISPLAY_ENABLED(dev_priv) (WARN_ON(!HAS_DISPLAY(dev_priv)), !i915_modparams.disable_display) + static inline bool intel_vtd_active(void) { #ifdef CONFIG_INTEL_IOMMU -- cgit v1.2.3 From d1d23d7f4be6a619af4ab729a12856e9cbded985 Mon Sep 17 00:00:00 2001 From: Ville Syrjälä Date: Fri, 13 Sep 2019 22:31:54 +0300 Subject: drm/i915: Replace is_planar_yuv_format() with drm_format_info_is_yuv_semiplanar() MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit There's a helper in drm_fourcc.h these days to check of we're dealing with a two plane YUV format. Make use if it. Also s/plane/color_plane/ in skl_plane_relative_data_rate() to reduce the confusion. Reviewed-by: Maarten Lankhorst Signed-off-by: Ville Syrjälä Link: https://patchwork.freedesktop.org/patch/msgid/20190913193157.9556-2-ville.syrjala@linux.intel.com --- drivers/gpu/drm/i915/display/intel_atomic_plane.c | 5 +++-- drivers/gpu/drm/i915/display/intel_display.c | 10 ++++----- drivers/gpu/drm/i915/display/intel_display.h | 3 ++- drivers/gpu/drm/i915/display/intel_sprite.c | 20 ++++------------- drivers/gpu/drm/i915/display/intel_sprite.h | 1 - drivers/gpu/drm/i915/intel_pm.c | 27 +++++++++++------------ 6 files changed, 27 insertions(+), 39 deletions(-) diff --git a/drivers/gpu/drm/i915/display/intel_atomic_plane.c b/drivers/gpu/drm/i915/display/intel_atomic_plane.c index d1fcdf206da4..476ef0906ba0 100644 --- a/drivers/gpu/drm/i915/display/intel_atomic_plane.c +++ b/drivers/gpu/drm/i915/display/intel_atomic_plane.c @@ -144,6 +144,7 @@ int intel_plane_atomic_check_with_state(const struct intel_crtc_state *old_crtc_ struct intel_plane_state *new_plane_state) { struct intel_plane *plane = to_intel_plane(new_plane_state->base.plane); + const struct drm_framebuffer *fb = new_plane_state->base.fb; int ret; new_crtc_state->active_planes &= ~BIT(plane->id); @@ -164,11 +165,11 @@ int intel_plane_atomic_check_with_state(const struct intel_crtc_state *old_crtc_ new_crtc_state->active_planes |= BIT(plane->id); if (new_plane_state->base.visible && - is_planar_yuv_format(new_plane_state->base.fb->format->format)) + drm_format_info_is_yuv_semiplanar(fb->format)) new_crtc_state->nv12_planes |= BIT(plane->id); if (new_plane_state->base.visible && - new_plane_state->base.fb->format->format == DRM_FORMAT_C8) + fb->format->format == DRM_FORMAT_C8) new_crtc_state->c8_planes |= BIT(plane->id); if (new_plane_state->base.visible || old_plane_state->base.visible) diff --git a/drivers/gpu/drm/i915/display/intel_display.c b/drivers/gpu/drm/i915/display/intel_display.c index e75945a53e06..276adc303564 100644 --- a/drivers/gpu/drm/i915/display/intel_display.c +++ b/drivers/gpu/drm/i915/display/intel_display.c @@ -3545,7 +3545,7 @@ int skl_check_plane_surface(struct intel_plane_state *plane_state) * Handle the AUX surface first since * the main surface setup depends on it. */ - if (is_planar_yuv_format(fb->format->format)) { + if (drm_format_info_is_yuv_semiplanar(fb->format)) { ret = skl_check_nv12_aux_surface(plane_state); if (ret) return ret; @@ -5463,7 +5463,7 @@ skl_update_scaler(struct intel_crtc_state *crtc_state, bool force_detach, return 0; } - if (format && is_planar_yuv_format(format->format) && + if (format && drm_format_info_is_yuv_semiplanar(format) && (src_h < SKL_MIN_YUV_420_SRC_H || src_w < SKL_MIN_YUV_420_SRC_W)) { DRM_DEBUG_KMS("Planar YUV: src dimensions not met\n"); return -EINVAL; @@ -5540,7 +5540,7 @@ static int skl_update_scaler_plane(struct intel_crtc_state *crtc_state, /* Pre-gen11 and SDR planes always need a scaler for planar formats. */ if (!icl_is_hdr_plane(dev_priv, intel_plane->id) && - fb && is_planar_yuv_format(fb->format->format)) + fb && drm_format_info_is_yuv_semiplanar(fb->format)) need_scaler = true; ret = skl_update_scaler(crtc_state, force_detach, @@ -14552,7 +14552,7 @@ intel_cleanup_plane_fb(struct drm_plane *plane, int skl_max_scale(const struct intel_crtc_state *crtc_state, - u32 pixel_format) + const struct drm_format_info *format) { struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc); struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); @@ -14577,7 +14577,7 @@ skl_max_scale(const struct intel_crtc_state *crtc_state, * or * cdclk/crtc_clock */ - mult = is_planar_yuv_format(pixel_format) ? 2 : 3; + mult = drm_format_info_is_yuv_semiplanar(format) ? 2 : 3; tmpclk1 = (1 << 16) * mult - 1; tmpclk2 = (1 << 8) * ((max_dotclk << 8) / crtc_clock); max_scale = min(tmpclk1, tmpclk2); diff --git a/drivers/gpu/drm/i915/display/intel_display.h b/drivers/gpu/drm/i915/display/intel_display.h index f4ddde171655..66330fcb10d4 100644 --- a/drivers/gpu/drm/i915/display/intel_display.h +++ b/drivers/gpu/drm/i915/display/intel_display.h @@ -34,6 +34,7 @@ struct drm_connector; struct drm_device; struct drm_encoder; struct drm_file; +struct drm_format_info; struct drm_framebuffer; struct drm_i915_error_state_buf; struct drm_i915_gem_object; @@ -548,7 +549,7 @@ void intel_crtc_arm_fifo_underrun(struct intel_crtc *crtc, u16 skl_scaler_calc_phase(int sub, int scale, bool chroma_center); int skl_update_scaler_crtc(struct intel_crtc_state *crtc_state); int skl_max_scale(const struct intel_crtc_state *crtc_state, - u32 pixel_format); + const struct drm_format_info *format); u32 glk_plane_color_ctl(const struct intel_crtc_state *crtc_state, const struct intel_plane_state *plane_state); u32 glk_plane_color_ctl_crtc(const struct intel_crtc_state *crtc_state); diff --git a/drivers/gpu/drm/i915/display/intel_sprite.c b/drivers/gpu/drm/i915/display/intel_sprite.c index e415b0ad4a42..7a7078d0ba23 100644 --- a/drivers/gpu/drm/i915/display/intel_sprite.c +++ b/drivers/gpu/drm/i915/display/intel_sprite.c @@ -48,19 +48,6 @@ #include "intel_psr.h" #include "intel_sprite.h" -bool is_planar_yuv_format(u32 pixelformat) -{ - switch (pixelformat) { - case DRM_FORMAT_NV12: - case DRM_FORMAT_P010: - case DRM_FORMAT_P012: - case DRM_FORMAT_P016: - return true; - default: - return false; - } -} - int intel_usecs_to_scanlines(const struct drm_display_mode *adjusted_mode, int usecs) { @@ -361,6 +348,7 @@ skl_program_scaler(struct intel_plane *plane, const struct intel_plane_state *plane_state) { struct drm_i915_private *dev_priv = to_i915(plane->base.dev); + const struct drm_framebuffer *fb = plane_state->base.fb; enum pipe pipe = plane->pipe; int scaler_id = plane_state->scaler_id; const struct intel_scaler *scaler = @@ -381,7 +369,7 @@ skl_program_scaler(struct intel_plane *plane, 0, INT_MAX); /* TODO: handle sub-pixel coordinates */ - if (is_planar_yuv_format(plane_state->base.fb->format->format) && + if (drm_format_info_is_yuv_semiplanar(fb->format) && !icl_is_hdr_plane(dev_priv, plane->id)) { y_hphase = skl_scaler_calc_phase(1, hscale, false); y_vphase = skl_scaler_calc_phase(1, vscale, false); @@ -1790,7 +1778,7 @@ static int skl_plane_check_nv12_rotation(const struct intel_plane_state *plane_s int src_w = drm_rect_width(&plane_state->base.src) >> 16; /* Display WA #1106 */ - if (is_planar_yuv_format(fb->format->format) && src_w & 3 && + if (drm_format_info_is_yuv_semiplanar(fb->format) && src_w & 3 && (rotation == DRM_MODE_ROTATE_270 || rotation == (DRM_MODE_REFLECT_X | DRM_MODE_ROTATE_90))) { DRM_DEBUG_KMS("src width must be multiple of 4 for rotated planar YUV\n"); @@ -1817,7 +1805,7 @@ static int skl_plane_check(struct intel_crtc_state *crtc_state, /* use scaler when colorkey is not required */ if (!plane_state->ckey.flags && intel_fb_scalable(fb)) { min_scale = 1; - max_scale = skl_max_scale(crtc_state, fb->format->format); + max_scale = skl_max_scale(crtc_state, fb->format); } ret = drm_atomic_helper_check_plane_state(&plane_state->base, diff --git a/drivers/gpu/drm/i915/display/intel_sprite.h b/drivers/gpu/drm/i915/display/intel_sprite.h index 093a2d156f1e..229336214f68 100644 --- a/drivers/gpu/drm/i915/display/intel_sprite.h +++ b/drivers/gpu/drm/i915/display/intel_sprite.h @@ -17,7 +17,6 @@ struct drm_i915_private; struct intel_crtc_state; struct intel_plane_state; -bool is_planar_yuv_format(u32 pixelformat); int intel_usecs_to_scanlines(const struct drm_display_mode *adjusted_mode, int usecs); struct intel_plane *intel_sprite_plane_create(struct drm_i915_private *dev_priv, diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c index d0ceb272551f..6aa40f546226 100644 --- a/drivers/gpu/drm/i915/intel_pm.c +++ b/drivers/gpu/drm/i915/intel_pm.c @@ -4009,7 +4009,8 @@ skl_ddb_get_hw_plane_state(struct drm_i915_private *dev_priv, val = I915_READ(PLANE_BUF_CFG(pipe, plane_id)); val2 = I915_READ(PLANE_NV12_BUF_CFG(pipe, plane_id)); - if (is_planar_yuv_format(fourcc)) + if (fourcc && + drm_format_info_is_yuv_semiplanar(drm_format_info(fourcc))) swap(val, val2); skl_ddb_entry_init_from_hw(dev_priv, ddb_y, val); @@ -4197,25 +4198,23 @@ int skl_check_pipe_max_pixel_rate(struct intel_crtc *intel_crtc, static u64 skl_plane_relative_data_rate(const struct intel_crtc_state *crtc_state, const struct intel_plane_state *plane_state, - const int plane) + int color_plane) { - struct intel_plane *intel_plane = to_intel_plane(plane_state->base.plane); + struct intel_plane *plane = to_intel_plane(plane_state->base.plane); + const struct drm_framebuffer *fb = plane_state->base.fb; u32 data_rate; u32 width = 0, height = 0; - struct drm_framebuffer *fb; - u32 format; uint_fixed_16_16_t down_scale_amount; u64 rate; if (!plane_state->base.visible) return 0; - fb = plane_state->base.fb; - format = fb->format->format; - - if (intel_plane->id == PLANE_CURSOR) + if (plane->id == PLANE_CURSOR) return 0; - if (plane == 1 && !is_planar_yuv_format(format)) + + if (color_plane == 1 && + !drm_format_info_is_yuv_semiplanar(fb->format)) return 0; /* @@ -4227,7 +4226,7 @@ skl_plane_relative_data_rate(const struct intel_crtc_state *crtc_state, height = drm_rect_height(&plane_state->base.src) >> 16; /* UV plane does 1/2 pixel sub-sampling */ - if (plane == 1 && is_planar_yuv_format(format)) { + if (color_plane == 1) { width /= 2; height /= 2; } @@ -4238,7 +4237,7 @@ skl_plane_relative_data_rate(const struct intel_crtc_state *crtc_state, rate = mul_round_up_u32_fixed16(data_rate, down_scale_amount); - rate *= fb->format->cpp[plane]; + rate *= fb->format->cpp[color_plane]; return rate; } @@ -4643,7 +4642,7 @@ skl_compute_wm_params(const struct intel_crtc_state *crtc_state, u32 interm_pbpl; /* only planar format has two planes */ - if (color_plane == 1 && !is_planar_yuv_format(format->format)) { + if (color_plane == 1 && !drm_format_info_is_yuv_semiplanar(format)) { DRM_DEBUG_KMS("Non planar format have single plane\n"); return -EINVAL; } @@ -4655,7 +4654,7 @@ skl_compute_wm_params(const struct intel_crtc_state *crtc_state, wp->x_tiled = modifier == I915_FORMAT_MOD_X_TILED; wp->rc_surface = modifier == I915_FORMAT_MOD_Y_TILED_CCS || modifier == I915_FORMAT_MOD_Yf_TILED_CCS; - wp->is_planar = is_planar_yuv_format(format->format); + wp->is_planar = drm_format_info_is_yuv_semiplanar(format); wp->width = width; if (color_plane == 1 && wp->is_planar) -- cgit v1.2.3 From 76c36a4391641aa37ce7adede32d511a07450d5a Mon Sep 17 00:00:00 2001 From: Ville Syrjälä Date: Fri, 13 Sep 2019 22:31:55 +0300 Subject: drm/i915: Allow downscale factor of <3.0 on glk+ for all formats MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Bspec says that glk+ max downscale factor is <3.0 for all pixel formats. Older platforms had a max of <2.0 for NV12. Update the code to deal with this. Reviewed-by: Maarten Lankhorst Signed-off-by: Ville Syrjälä Link: https://patchwork.freedesktop.org/patch/msgid/20190913193157.9556-3-ville.syrjala@linux.intel.com --- drivers/gpu/drm/i915/display/intel_display.c | 9 ++++++--- 1 file changed, 6 insertions(+), 3 deletions(-) diff --git a/drivers/gpu/drm/i915/display/intel_display.c b/drivers/gpu/drm/i915/display/intel_display.c index 276adc303564..1a72706ebee2 100644 --- a/drivers/gpu/drm/i915/display/intel_display.c +++ b/drivers/gpu/drm/i915/display/intel_display.c @@ -14556,7 +14556,7 @@ skl_max_scale(const struct intel_crtc_state *crtc_state, { struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc); struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); - int max_scale, mult; + int max_scale; int crtc_clock, max_dotclk, tmpclk1, tmpclk2; if (!crtc_state->base.enable) @@ -14577,8 +14577,11 @@ skl_max_scale(const struct intel_crtc_state *crtc_state, * or * cdclk/crtc_clock */ - mult = drm_format_info_is_yuv_semiplanar(format) ? 2 : 3; - tmpclk1 = (1 << 16) * mult - 1; + if (INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv) || + !drm_format_info_is_yuv_semiplanar(format)) + tmpclk1 = 0x30000 - 1; + else + tmpclk1 = 0x20000 - 1; tmpclk2 = (1 << 8) * ((max_dotclk << 8) / crtc_clock); max_scale = min(tmpclk1, tmpclk2); -- cgit v1.2.3 From fe4709a8d033975a328a665a627acd35e628117a Mon Sep 17 00:00:00 2001 From: Ville Syrjälä Date: Fri, 13 Sep 2019 22:31:56 +0300 Subject: drm/i915: Extract intel_modeset_calc_cdclk() MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Exfiltrate the cdclk code from intel_modeset_checks() into intel_modeset_calc_cdclk(). Reviewed-by: Maarten Lankhorst Signed-off-by: Ville Syrjälä Link: https://patchwork.freedesktop.org/patch/msgid/20190913193157.9556-4-ville.syrjala@linux.intel.com --- drivers/gpu/drm/i915/display/intel_cdclk.c | 135 ++++++++++++++++++++++++++- drivers/gpu/drm/i915/display/intel_cdclk.h | 6 +- drivers/gpu/drm/i915/display/intel_display.c | 123 +----------------------- 3 files changed, 135 insertions(+), 129 deletions(-) diff --git a/drivers/gpu/drm/i915/display/intel_cdclk.c b/drivers/gpu/drm/i915/display/intel_cdclk.c index ea3f75c72fe8..43564295b864 100644 --- a/drivers/gpu/drm/i915/display/intel_cdclk.c +++ b/drivers/gpu/drm/i915/display/intel_cdclk.c @@ -21,6 +21,7 @@ * DEALINGS IN THE SOFTWARE. */ +#include "intel_atomic.h" #include "intel_cdclk.h" #include "intel_display_types.h" #include "intel_sideband.h" @@ -1772,9 +1773,9 @@ bool intel_cdclk_needs_modeset(const struct intel_cdclk_state *a, * Returns: * True if the CDCLK states require just a cd2x divider update, false if not. */ -bool intel_cdclk_needs_cd2x_update(struct drm_i915_private *dev_priv, - const struct intel_cdclk_state *a, - const struct intel_cdclk_state *b) +static bool intel_cdclk_needs_cd2x_update(struct drm_i915_private *dev_priv, + const struct intel_cdclk_state *a, + const struct intel_cdclk_state *b) { /* Older hw doesn't have the capability */ if (INTEL_GEN(dev_priv) < 10 && !IS_GEN9_LP(dev_priv)) @@ -1793,8 +1794,8 @@ bool intel_cdclk_needs_cd2x_update(struct drm_i915_private *dev_priv, * Returns: * True if the CDCLK states don't match, false if they do. */ -bool intel_cdclk_changed(const struct intel_cdclk_state *a, - const struct intel_cdclk_state *b) +static bool intel_cdclk_changed(const struct intel_cdclk_state *a, + const struct intel_cdclk_state *b) { return intel_cdclk_needs_modeset(a, b) || a->voltage_level != b->voltage_level; @@ -2220,6 +2221,130 @@ static int bxt_modeset_calc_cdclk(struct intel_atomic_state *state) return 0; } +static int intel_lock_all_pipes(struct intel_atomic_state *state) +{ + struct drm_i915_private *dev_priv = to_i915(state->base.dev); + struct intel_crtc *crtc; + + /* Add all pipes to the state */ + for_each_intel_crtc(&dev_priv->drm, crtc) { + struct intel_crtc_state *crtc_state; + + crtc_state = intel_atomic_get_crtc_state(&state->base, crtc); + if (IS_ERR(crtc_state)) + return PTR_ERR(crtc_state); + } + + return 0; +} + +static int intel_modeset_all_pipes(struct intel_atomic_state *state) +{ + struct drm_i915_private *dev_priv = to_i915(state->base.dev); + struct intel_crtc *crtc; + + /* + * Add all pipes to the state, and force + * a modeset on all the active ones. + */ + for_each_intel_crtc(&dev_priv->drm, crtc) { + struct intel_crtc_state *crtc_state; + int ret; + + crtc_state = intel_atomic_get_crtc_state(&state->base, crtc); + if (IS_ERR(crtc_state)) + return PTR_ERR(crtc_state); + + if (!crtc_state->base.active || + drm_atomic_crtc_needs_modeset(&crtc_state->base)) + continue; + + crtc_state->base.mode_changed = true; + + ret = drm_atomic_add_affected_connectors(&state->base, + &crtc->base); + if (ret) + return ret; + + ret = drm_atomic_add_affected_planes(&state->base, + &crtc->base); + if (ret) + return ret; + + crtc_state->update_planes |= crtc_state->active_planes; + } + + return 0; +} + +int intel_modeset_calc_cdclk(struct intel_atomic_state *state) +{ + struct drm_i915_private *dev_priv = to_i915(state->base.dev); + enum pipe pipe; + int ret; + + if (!dev_priv->display.modeset_calc_cdclk) + return 0; + + ret = dev_priv->display.modeset_calc_cdclk(state); + if (ret) + return ret; + + /* + * Writes to dev_priv->cdclk.logical must protected by + * holding all the crtc locks, even if we don't end up + * touching the hardware + */ + if (intel_cdclk_changed(&dev_priv->cdclk.logical, + &state->cdclk.logical)) { + ret = intel_lock_all_pipes(state); + if (ret < 0) + return ret; + } + + if (is_power_of_2(state->active_pipes)) { + struct intel_crtc *crtc; + struct intel_crtc_state *crtc_state; + + pipe = ilog2(state->active_pipes); + crtc = intel_get_crtc_for_pipe(dev_priv, pipe); + crtc_state = intel_atomic_get_new_crtc_state(state, crtc); + if (crtc_state && + drm_atomic_crtc_needs_modeset(&crtc_state->base)) + pipe = INVALID_PIPE; + } else { + pipe = INVALID_PIPE; + } + + /* All pipes must be switched off while we change the cdclk. */ + if (pipe != INVALID_PIPE && + intel_cdclk_needs_cd2x_update(dev_priv, + &dev_priv->cdclk.actual, + &state->cdclk.actual)) { + ret = intel_lock_all_pipes(state); + if (ret) + return ret; + + state->cdclk.pipe = pipe; + } else if (intel_cdclk_needs_modeset(&dev_priv->cdclk.actual, + &state->cdclk.actual)) { + ret = intel_modeset_all_pipes(state); + if (ret) + return ret; + + state->cdclk.pipe = INVALID_PIPE; + } + + DRM_DEBUG_KMS("New cdclk calculated to be logical %u kHz, actual %u kHz\n", + state->cdclk.logical.cdclk, + state->cdclk.actual.cdclk); + DRM_DEBUG_KMS("New voltage level calculated to be logical %u, actual %u\n", + state->cdclk.logical.voltage_level, + state->cdclk.actual.voltage_level); + + return 0; +} + static int intel_compute_max_dotclk(struct drm_i915_private *dev_priv) { int max_cdclk_freq = dev_priv->max_cdclk_freq; diff --git a/drivers/gpu/drm/i915/display/intel_cdclk.h b/drivers/gpu/drm/i915/display/intel_cdclk.h index 1afa84ab6018..cf71394cc79c 100644 --- a/drivers/gpu/drm/i915/display/intel_cdclk.h +++ b/drivers/gpu/drm/i915/display/intel_cdclk.h @@ -29,13 +29,8 @@ void intel_init_cdclk_hooks(struct drm_i915_private *dev_priv); void intel_update_max_cdclk(struct drm_i915_private *dev_priv); void intel_update_cdclk(struct drm_i915_private *dev_priv); void intel_update_rawclk(struct drm_i915_private *dev_priv); -bool intel_cdclk_needs_cd2x_update(struct drm_i915_private *dev_priv, - const struct intel_cdclk_state *a, - const struct intel_cdclk_state *b); bool intel_cdclk_needs_modeset(const struct intel_cdclk_state *a, const struct intel_cdclk_state *b); -bool intel_cdclk_changed(const struct intel_cdclk_state *a, - const struct intel_cdclk_state *b); void intel_cdclk_swap_state(struct intel_atomic_state *state); void intel_set_cdclk_pre_plane_update(struct drm_i915_private *dev_priv, @@ -49,5 +44,6 @@ intel_set_cdclk_post_plane_update(struct drm_i915_private *dev_priv, enum pipe pipe); void intel_dump_cdclk_state(const struct intel_cdclk_state *cdclk_state, const char *context); +int intel_modeset_calc_cdclk(struct intel_atomic_state *state); #endif /* __INTEL_CDCLK_H__ */ diff --git a/drivers/gpu/drm/i915/display/intel_display.c b/drivers/gpu/drm/i915/display/intel_display.c index 1a72706ebee2..befebfd5db66 100644 --- a/drivers/gpu/drm/i915/display/intel_display.c +++ b/drivers/gpu/drm/i915/display/intel_display.c @@ -13433,65 +13433,12 @@ static int haswell_mode_set_planes_workaround(struct intel_atomic_state *state) return 0; } -static int intel_lock_all_pipes(struct intel_atomic_state *state) -{ - struct drm_i915_private *dev_priv = to_i915(state->base.dev); - struct intel_crtc *crtc; - - /* Add all pipes to the state */ - for_each_intel_crtc(&dev_priv->drm, crtc) { - struct intel_crtc_state *crtc_state; - - crtc_state = intel_atomic_get_crtc_state(&state->base, crtc); - if (IS_ERR(crtc_state)) - return PTR_ERR(crtc_state); - } - - return 0; -} - -static int intel_modeset_all_pipes(struct intel_atomic_state *state) -{ - struct drm_i915_private *dev_priv = to_i915(state->base.dev); - struct intel_crtc *crtc; - - /* - * Add all pipes to the state, and force - * a modeset on all the active ones. - */ - for_each_intel_crtc(&dev_priv->drm, crtc) { - struct intel_crtc_state *crtc_state; - int ret; - - crtc_state = intel_atomic_get_crtc_state(&state->base, crtc); - if (IS_ERR(crtc_state)) - return PTR_ERR(crtc_state); - - if (!crtc_state->base.active || needs_modeset(crtc_state)) - continue; - - crtc_state->base.mode_changed = true; - - ret = drm_atomic_add_affected_connectors(&state->base, - &crtc->base); - if (ret) - return ret; - - ret = drm_atomic_add_affected_planes(&state->base, - &crtc->base); - if (ret) - return ret; - } - - return 0; -} - static int intel_modeset_checks(struct intel_atomic_state *state) { struct drm_i915_private *dev_priv = to_i915(state->base.dev); struct intel_crtc_state *old_crtc_state, *new_crtc_state; struct intel_crtc *crtc; - int ret = 0, i; + int ret, i; if (!check_digital_port_conflicts(state)) { DRM_DEBUG_KMS("rejecting conflicting digital port configuration\n"); @@ -13519,71 +13466,9 @@ static int intel_modeset_checks(struct intel_atomic_state *state) state->active_pipe_changes |= BIT(crtc->pipe); } - /* - * See if the config requires any additional preparation, e.g. - * to adjust global state with pipes off. We need to do this - * here so we can get the modeset_pipe updated config for the new - * mode set on this crtc. For other crtcs we need to use the - * adjusted_mode bits in the crtc directly. - */ - if (dev_priv->display.modeset_calc_cdclk) { - enum pipe pipe; - - ret = dev_priv->display.modeset_calc_cdclk(state); - if (ret < 0) - return ret; - - /* - * Writes to dev_priv->cdclk.logical must protected by - * holding all the crtc locks, even if we don't end up - * touching the hardware - */ - if (intel_cdclk_changed(&dev_priv->cdclk.logical, - &state->cdclk.logical)) { - ret = intel_lock_all_pipes(state); - if (ret < 0) - return ret; - } - - if (is_power_of_2(state->active_pipes)) { - struct intel_crtc *crtc; - struct intel_crtc_state *crtc_state; - - pipe = ilog2(state->active_pipes); - crtc = intel_get_crtc_for_pipe(dev_priv, pipe); - crtc_state = intel_atomic_get_new_crtc_state(state, crtc); - if (crtc_state && needs_modeset(crtc_state)) - pipe = INVALID_PIPE; - } else { - pipe = INVALID_PIPE; - } - - /* All pipes must be switched off while we change the cdclk. */ - if (pipe != INVALID_PIPE && - intel_cdclk_needs_cd2x_update(dev_priv, - &dev_priv->cdclk.actual, - &state->cdclk.actual)) { - ret = intel_lock_all_pipes(state); - if (ret < 0) - return ret; - - state->cdclk.pipe = pipe; - } else if (intel_cdclk_needs_modeset(&dev_priv->cdclk.actual, - &state->cdclk.actual)) { - ret = intel_modeset_all_pipes(state); - if (ret < 0) - return ret; - - state->cdclk.pipe = INVALID_PIPE; - } - - DRM_DEBUG_KMS("New cdclk calculated to be logical %u kHz, actual %u kHz\n", - state->cdclk.logical.cdclk, - state->cdclk.actual.cdclk); - DRM_DEBUG_KMS("New voltage level calculated to be logical %u, actual %u\n", - state->cdclk.logical.voltage_level, - state->cdclk.actual.voltage_level); - } + ret = intel_modeset_calc_cdclk(state); + if (ret) + return ret; intel_modeset_clear_plls(state); -- cgit v1.2.3 From 2e7f76c1e4b6480226cee3f6e358172891012882 Mon Sep 17 00:00:00 2001 From: Ville Syrjälä Date: Fri, 13 Sep 2019 22:31:57 +0300 Subject: drm/i915: s/pipe_config/crtc_state/ in intel_crtc_atomic_check() MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Clean up the mess with the drm vs. intel types in intel_crtc_atomic_check() and rename varibles accordingly. Reviewed-by: Maarten Lankhorst Signed-off-by: Ville Syrjälä Link: https://patchwork.freedesktop.org/patch/msgid/20190913193157.9556-5-ville.syrjala@linux.intel.com --- drivers/gpu/drm/i915/display/intel_display.c | 54 ++++++++++++++-------------- 1 file changed, 26 insertions(+), 28 deletions(-) diff --git a/drivers/gpu/drm/i915/display/intel_display.c b/drivers/gpu/drm/i915/display/intel_display.c index befebfd5db66..12bb8f951edf 100644 --- a/drivers/gpu/drm/i915/display/intel_display.c +++ b/drivers/gpu/drm/i915/display/intel_display.c @@ -11760,25 +11760,24 @@ static bool c8_planes_changed(const struct intel_crtc_state *new_crtc_state) return !old_crtc_state->c8_planes != !new_crtc_state->c8_planes; } -static int intel_crtc_atomic_check(struct drm_crtc *crtc, - struct drm_crtc_state *crtc_state) +static int intel_crtc_atomic_check(struct drm_crtc *_crtc, + struct drm_crtc_state *_crtc_state) { - struct drm_i915_private *dev_priv = to_i915(crtc->dev); - struct intel_crtc *intel_crtc = to_intel_crtc(crtc); - struct intel_crtc_state *pipe_config = - to_intel_crtc_state(crtc_state); + struct intel_crtc *crtc = to_intel_crtc(_crtc); + struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); + struct intel_crtc_state *crtc_state = + to_intel_crtc_state(_crtc_state); int ret; - bool mode_changed = needs_modeset(pipe_config); + bool mode_changed = needs_modeset(crtc_state); if (INTEL_GEN(dev_priv) < 5 && !IS_G4X(dev_priv) && - mode_changed && !crtc_state->active) - pipe_config->update_wm_post = true; + mode_changed && !crtc_state->base.active) + crtc_state->update_wm_post = true; - if (mode_changed && crtc_state->enable && + if (mode_changed && crtc_state->base.enable && dev_priv->display.crtc_compute_clock && - !WARN_ON(pipe_config->shared_dpll)) { - ret = dev_priv->display.crtc_compute_clock(intel_crtc, - pipe_config); + !WARN_ON(crtc_state->shared_dpll)) { + ret = dev_priv->display.crtc_compute_clock(crtc, crtc_state); if (ret) return ret; } @@ -11787,19 +11786,19 @@ static int intel_crtc_atomic_check(struct drm_crtc *crtc, * May need to update pipe gamma enable bits * when C8 planes are getting enabled/disabled. */ - if (c8_planes_changed(pipe_config)) - crtc_state->color_mgmt_changed = true; + if (c8_planes_changed(crtc_state)) + crtc_state->base.color_mgmt_changed = true; - if (mode_changed || pipe_config->update_pipe || - crtc_state->color_mgmt_changed) { - ret = intel_color_check(pipe_config); + if (mode_changed || crtc_state->update_pipe || + crtc_state->base.color_mgmt_changed) { + ret = intel_color_check(crtc_state); if (ret) return ret; } ret = 0; if (dev_priv->display.compute_pipe_wm) { - ret = dev_priv->display.compute_pipe_wm(pipe_config); + ret = dev_priv->display.compute_pipe_wm(crtc_state); if (ret) { DRM_DEBUG_KMS("Target pipe watermarks are invalid\n"); return ret; @@ -11815,7 +11814,7 @@ static int intel_crtc_atomic_check(struct drm_crtc *crtc, * old state and the new state. We can program these * immediately. */ - ret = dev_priv->display.compute_intermediate_wm(pipe_config); + ret = dev_priv->display.compute_intermediate_wm(crtc_state); if (ret) { DRM_DEBUG_KMS("No valid intermediate pipe watermarks are possible\n"); return ret; @@ -11823,21 +11822,20 @@ static int intel_crtc_atomic_check(struct drm_crtc *crtc, } if (INTEL_GEN(dev_priv) >= 9) { - if (mode_changed || pipe_config->update_pipe) - ret = skl_update_scaler_crtc(pipe_config); + if (mode_changed || crtc_state->update_pipe) + ret = skl_update_scaler_crtc(crtc_state); if (!ret) - ret = icl_check_nv12_planes(pipe_config); + ret = icl_check_nv12_planes(crtc_state); if (!ret) - ret = skl_check_pipe_max_pixel_rate(intel_crtc, - pipe_config); + ret = skl_check_pipe_max_pixel_rate(crtc, crtc_state); if (!ret) - ret = intel_atomic_setup_scalers(dev_priv, intel_crtc, - pipe_config); + ret = intel_atomic_setup_scalers(dev_priv, crtc, + crtc_state); } if (HAS_IPS(dev_priv)) - pipe_config->ips_enabled = hsw_compute_ips_config(pipe_config); + crtc_state->ips_enabled = hsw_compute_ips_config(crtc_state); return ret; } -- cgit v1.2.3 From ef404bc6592075925997d3626e968e4bcd59721f Mon Sep 17 00:00:00 2001 From: Jani Nikula Date: Mon, 16 Sep 2019 12:29:01 +0300 Subject: drm/i915: stop conflating HAS_DISPLAY() and disabled display MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Stop setting ->pipe_mask to zero when display is disabled, allowing us to have different code paths for not actually having display hardware, and having display hardware disabled. This lets us develop those two avenues independently. There are no functional changes for when there is no display. However, all uses of for_each_pipe() and for_each_pipe_masked() will start running for the disabled display case. Put one of the more significant ones behind checks for INTEL_DISPLAY_ENABLED(), otherwise the cases should not be hit with disabled display, or they seem benign. Fingers crossed. All in all, this might not be the ideal solution. In fact we may have had something along the lines of this in the past, but we ended up conflating the two cases. Possibly even by recommendation by yours truly; I did not dare dig up that part of the history. But the perfect is the enemy of the good, this is a straightforward change, and lets us get actual work done in both fronts without interfering with each other. Cc: Chris Wilson Cc: José Roberto de Souza Cc: Ville Syrjälä Reviewed-by: Chris Wilson Signed-off-by: Jani Nikula Link: https://patchwork.freedesktop.org/patch/msgid/20190916092901.31440-1-jani.nikula@intel.com --- drivers/gpu/drm/i915/display/intel_display.c | 12 +++++++----- drivers/gpu/drm/i915/intel_device_info.c | 8 ++------ 2 files changed, 9 insertions(+), 11 deletions(-) diff --git a/drivers/gpu/drm/i915/display/intel_display.c b/drivers/gpu/drm/i915/display/intel_display.c index 12bb8f951edf..1cc74844d3ea 100644 --- a/drivers/gpu/drm/i915/display/intel_display.c +++ b/drivers/gpu/drm/i915/display/intel_display.c @@ -16167,11 +16167,13 @@ int intel_modeset_init(struct drm_device *dev) INTEL_NUM_PIPES(dev_priv), INTEL_NUM_PIPES(dev_priv) > 1 ? "s" : ""); - for_each_pipe(dev_priv, pipe) { - ret = intel_crtc_init(dev_priv, pipe); - if (ret) { - drm_mode_config_cleanup(dev); - return ret; + if (HAS_DISPLAY(dev_priv) && INTEL_DISPLAY_ENABLED(dev_priv)) { + for_each_pipe(dev_priv, pipe) { + ret = intel_crtc_init(dev_priv, pipe); + if (ret) { + drm_mode_config_cleanup(dev); + return ret; + } } } diff --git a/drivers/gpu/drm/i915/intel_device_info.c b/drivers/gpu/drm/i915/intel_device_info.c index 727089dcd280..728c881718a2 100644 --- a/drivers/gpu/drm/i915/intel_device_info.c +++ b/drivers/gpu/drm/i915/intel_device_info.c @@ -894,12 +894,8 @@ void intel_device_info_runtime_init(struct drm_i915_private *dev_priv) runtime->num_sprites[pipe] = 1; } - if (i915_modparams.disable_display) { - DRM_INFO("Display disabled (module parameter)\n"); - info->pipe_mask = 0; - } else if (HAS_DISPLAY(dev_priv) && - (IS_GEN_RANGE(dev_priv, 7, 8)) && - HAS_PCH_SPLIT(dev_priv)) { + if (HAS_DISPLAY(dev_priv) && IS_GEN_RANGE(dev_priv, 7, 8) && + HAS_PCH_SPLIT(dev_priv)) { u32 fuse_strap = I915_READ(FUSE_STRAP); u32 sfuse_strap = I915_READ(SFUSE_STRAP); -- cgit v1.2.3 From bb120e1171a966429287865134265811887daa6c Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Sun, 15 Sep 2019 21:37:00 +0100 Subject: drm/i915: Show the logical context ring state on dumping Include the active context register state when dumping the engine. Suggested-by: Mika Kuoppala Signed-off-by: Chris Wilson Cc: Mika Kuoppala Reviewed-by: Stuart Summers Link: https://patchwork.freedesktop.org/patch/msgid/20190915203701.29163-1-chris@chris-wilson.co.uk --- drivers/gpu/drm/i915/gt/intel_engine_cs.c | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/drivers/gpu/drm/i915/gt/intel_engine_cs.c b/drivers/gpu/drm/i915/gt/intel_engine_cs.c index a8014c59b388..3c176b0f4b45 100644 --- a/drivers/gpu/drm/i915/gt/intel_engine_cs.c +++ b/drivers/gpu/drm/i915/gt/intel_engine_cs.c @@ -1404,6 +1404,11 @@ void intel_engine_dump(struct intel_engine_cs *engine, rq->timeline->hwsp_offset); print_request_ring(m, rq); + + if (rq->hw_context->lrc_reg_state) { + drm_printf(m, "Logical Ring Context:\n"); + hexdump(m, rq->hw_context->lrc_reg_state, PAGE_SIZE); + } } spin_unlock_irqrestore(&engine->active.lock, flags); -- cgit v1.2.3 From 80fa64d62067ea7390cd27d924dcb7c028e455e9 Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Tue, 17 Sep 2019 09:00:29 +0100 Subject: drm/i915: Only apply a rmw mmio update if the value changes If we try to clear, or even set, a bit in the register that doesn't change the register state; skip the write. There's a slight danger in that the register acts as a latch-on-write, but I do not think we use a rmw cycle with any such latch registers. Suggested-by: Daniele Ceraolo Spurio Signed-off-by: Chris Wilson Cc: Daniele Ceraolo Spurio Cc: Tvrtko Ursulin Reviewed-by: Mika Kuoppala Link: https://patchwork.freedesktop.org/patch/msgid/20190917080029.27632-1-chris@chris-wilson.co.uk --- drivers/gpu/drm/i915/intel_uncore.h | 20 ++++++++++---------- 1 file changed, 10 insertions(+), 10 deletions(-) diff --git a/drivers/gpu/drm/i915/intel_uncore.h b/drivers/gpu/drm/i915/intel_uncore.h index 414fc2cb0459..dcfa243892c6 100644 --- a/drivers/gpu/drm/i915/intel_uncore.h +++ b/drivers/gpu/drm/i915/intel_uncore.h @@ -378,23 +378,23 @@ intel_uncore_read64_2x32(struct intel_uncore *uncore, static inline void intel_uncore_rmw(struct intel_uncore *uncore, i915_reg_t reg, u32 clear, u32 set) { - u32 val; + u32 old, val; - val = intel_uncore_read(uncore, reg); - val &= ~clear; - val |= set; - intel_uncore_write(uncore, reg, val); + old = intel_uncore_read(uncore, reg); + val = (old & ~clear) | set; + if (val != old) + intel_uncore_write(uncore, reg, val); } static inline void intel_uncore_rmw_fw(struct intel_uncore *uncore, i915_reg_t reg, u32 clear, u32 set) { - u32 val; + u32 old, val; - val = intel_uncore_read_fw(uncore, reg); - val &= ~clear; - val |= set; - intel_uncore_write_fw(uncore, reg, val); + old = intel_uncore_read_fw(uncore, reg); + val = (old & ~clear) | set; + if (val != old) + intel_uncore_write_fw(uncore, reg, val); } static inline int intel_uncore_write_and_verify(struct intel_uncore *uncore, -- cgit v1.2.3 From c210e85b8f3371168ce78c8da00b913839a84ec7 Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Tue, 17 Sep 2019 13:30:55 +0100 Subject: drm/i915/tgl: Extend MI_SEMAPHORE_WAIT On Tigerlake, MI_SEMAPHORE_WAIT grew an extra dword, so be sure to update the length field and emit that extra parameter and any padding noop as required. v2: Define the token shift while we are adding the updated MI_SEMAPHORE_WAIT v3: Use int instead of bool in the addition so that readers are not left wondering about the intricacies of the C spec. Now they just have to worry what the integer value of a boolean operation is... Signed-off-by: Chris Wilson Cc: Mika Kuoppala Cc: Daniele Ceraolo Spurio Cc: Michal Winiarski Reviewed-by: Mika Kuoppala Link: https://patchwork.freedesktop.org/patch/msgid/20190917123055.28965-1-chris@chris-wilson.co.uk --- drivers/gpu/drm/i915/gt/intel_gpu_commands.h | 3 ++ drivers/gpu/drm/i915/gt/intel_lrc.c | 71 +++++++++++++++++++++++++--- drivers/gpu/drm/i915/i915_pci.c | 1 - drivers/gpu/drm/i915/i915_request.c | 21 ++++++-- 4 files changed, 84 insertions(+), 12 deletions(-) diff --git a/drivers/gpu/drm/i915/gt/intel_gpu_commands.h b/drivers/gpu/drm/i915/gt/intel_gpu_commands.h index fbad403ab7ac..f78b13d74e17 100644 --- a/drivers/gpu/drm/i915/gt/intel_gpu_commands.h +++ b/drivers/gpu/drm/i915/gt/intel_gpu_commands.h @@ -112,6 +112,7 @@ #define MI_SEMAPHORE_SIGNAL MI_INSTR(0x1b, 0) /* GEN8+ */ #define MI_SEMAPHORE_TARGET(engine) ((engine)<<15) #define MI_SEMAPHORE_WAIT MI_INSTR(0x1c, 2) /* GEN8+ */ +#define MI_SEMAPHORE_WAIT_TOKEN MI_INSTR(0x1c, 3) /* GEN12+ */ #define MI_SEMAPHORE_POLL (1 << 15) #define MI_SEMAPHORE_SAD_GT_SDD (0 << 12) #define MI_SEMAPHORE_SAD_GTE_SDD (1 << 12) @@ -119,6 +120,8 @@ #define MI_SEMAPHORE_SAD_LTE_SDD (3 << 12) #define MI_SEMAPHORE_SAD_EQ_SDD (4 << 12) #define MI_SEMAPHORE_SAD_NEQ_SDD (5 << 12) +#define MI_SEMAPHORE_TOKEN_MASK REG_GENMASK(9, 5) +#define MI_SEMAPHORE_TOKEN_SHIFT 5 #define MI_STORE_DWORD_IMM MI_INSTR(0x20, 1) #define MI_STORE_DWORD_IMM_GEN4 MI_INSTR(0x20, 2) #define MI_MEM_VIRTUAL (1 << 22) /* 945,g33,965 */ diff --git a/drivers/gpu/drm/i915/gt/intel_lrc.c b/drivers/gpu/drm/i915/gt/intel_lrc.c index a3f0e4999744..a99166a2d2eb 100644 --- a/drivers/gpu/drm/i915/gt/intel_lrc.c +++ b/drivers/gpu/drm/i915/gt/intel_lrc.c @@ -2879,6 +2879,22 @@ static u32 *gen8_emit_fini_breadcrumb_rcs(struct i915_request *request, u32 *cs) return gen8_emit_fini_breadcrumb_footer(request, cs); } +static u32 * +gen11_emit_fini_breadcrumb_rcs(struct i915_request *request, u32 *cs) +{ + cs = gen8_emit_ggtt_write_rcs(cs, + request->fence.seqno, + request->timeline->hwsp_offset, + PIPE_CONTROL_CS_STALL | + PIPE_CONTROL_TILE_CACHE_FLUSH | + PIPE_CONTROL_RENDER_TARGET_CACHE_FLUSH | + PIPE_CONTROL_DEPTH_CACHE_FLUSH | + PIPE_CONTROL_DC_FLUSH_ENABLE | + PIPE_CONTROL_FLUSH_ENABLE); + + return gen8_emit_fini_breadcrumb_footer(request, cs); +} + /* * Note that the CS instruction pre-parser will not stall on the breadcrumb * flush and will continue pre-fetching the instructions after it before the @@ -2897,8 +2913,49 @@ static u32 *gen8_emit_fini_breadcrumb_rcs(struct i915_request *request, u32 *cs) * All the above applies only to the instructions themselves. Non-inline data * used by the instructions is not pre-fetched. */ -static u32 *gen11_emit_fini_breadcrumb_rcs(struct i915_request *request, - u32 *cs) + +static u32 *gen12_emit_preempt_busywait(struct i915_request *request, u32 *cs) +{ + *cs++ = MI_SEMAPHORE_WAIT_TOKEN | + MI_SEMAPHORE_GLOBAL_GTT | + MI_SEMAPHORE_POLL | + MI_SEMAPHORE_SAD_EQ_SDD; + *cs++ = 0; + *cs++ = intel_hws_preempt_address(request->engine); + *cs++ = 0; + *cs++ = 0; + *cs++ = MI_NOOP; + + return cs; +} + +static __always_inline u32* +gen12_emit_fini_breadcrumb_footer(struct i915_request *request, u32 *cs) +{ + *cs++ = MI_USER_INTERRUPT; + + *cs++ = MI_ARB_ON_OFF | MI_ARB_ENABLE; + if (intel_engine_has_semaphores(request->engine)) + cs = gen12_emit_preempt_busywait(request, cs); + + request->tail = intel_ring_offset(request, cs); + assert_ring_tail_valid(request->ring, request->tail); + + return gen8_emit_wa_tail(request, cs); +} + +static u32 *gen12_emit_fini_breadcrumb(struct i915_request *request, u32 *cs) +{ + cs = gen8_emit_ggtt_write(cs, + request->fence.seqno, + request->timeline->hwsp_offset, + 0); + + return gen12_emit_fini_breadcrumb_footer(request, cs); +} + +static u32 * +gen12_emit_fini_breadcrumb_rcs(struct i915_request *request, u32 *cs) { cs = gen8_emit_ggtt_write_rcs(cs, request->fence.seqno, @@ -2910,7 +2967,7 @@ static u32 *gen11_emit_fini_breadcrumb_rcs(struct i915_request *request, PIPE_CONTROL_DC_FLUSH_ENABLE | PIPE_CONTROL_FLUSH_ENABLE); - return gen8_emit_fini_breadcrumb_footer(request, cs); + return gen12_emit_fini_breadcrumb_footer(request, cs); } static void execlists_park(struct intel_engine_cs *engine) @@ -2939,9 +2996,6 @@ void intel_execlists_set_default_submission(struct intel_engine_cs *engine) engine->flags |= I915_ENGINE_HAS_PREEMPTION; } - if (INTEL_GEN(engine->i915) >= 12) /* XXX disabled for debugging */ - engine->flags &= ~I915_ENGINE_HAS_SEMAPHORES; - if (engine->class != COPY_ENGINE_CLASS && INTEL_GEN(engine->i915) >= 12) engine->flags |= I915_ENGINE_HAS_RELATIVE_MMIO; } @@ -2971,6 +3025,8 @@ logical_ring_default_vfuncs(struct intel_engine_cs *engine) engine->emit_flush = gen8_emit_flush; engine->emit_init_breadcrumb = gen8_emit_init_breadcrumb; engine->emit_fini_breadcrumb = gen8_emit_fini_breadcrumb; + if (INTEL_GEN(engine->i915) >= 12) + engine->emit_fini_breadcrumb = gen12_emit_fini_breadcrumb; engine->set_default_submission = intel_execlists_set_default_submission; @@ -3016,6 +3072,9 @@ static void rcs_submission_override(struct intel_engine_cs *engine) { switch (INTEL_GEN(engine->i915)) { case 12: + engine->emit_flush = gen11_emit_flush_render; + engine->emit_fini_breadcrumb = gen12_emit_fini_breadcrumb_rcs; + break; case 11: engine->emit_flush = gen11_emit_flush_render; engine->emit_fini_breadcrumb = gen11_emit_fini_breadcrumb_rcs; diff --git a/drivers/gpu/drm/i915/i915_pci.c b/drivers/gpu/drm/i915/i915_pci.c index e4a26bbd8788..fe6941c8fc99 100644 --- a/drivers/gpu/drm/i915/i915_pci.c +++ b/drivers/gpu/drm/i915/i915_pci.c @@ -797,7 +797,6 @@ static const struct intel_device_info intel_tigerlake_12_info = { .display.has_modular_fia = 1, .engine_mask = BIT(RCS0) | BIT(BCS0) | BIT(VECS0) | BIT(VCS0) | BIT(VCS2), - .has_logical_ring_preemption = false, /* XXX disabled for debugging */ .engine_mask = BIT(RCS0), /* XXX reduced for debugging */ }; diff --git a/drivers/gpu/drm/i915/i915_request.c b/drivers/gpu/drm/i915/i915_request.c index 754a78364a63..3ecf92aa5fc1 100644 --- a/drivers/gpu/drm/i915/i915_request.c +++ b/drivers/gpu/drm/i915/i915_request.c @@ -783,7 +783,9 @@ emit_semaphore_wait(struct i915_request *to, struct i915_request *from, gfp_t gfp) { + const int has_token = INTEL_GEN(to->i915) >= 12; u32 hwsp_offset; + int len; u32 *cs; int err; @@ -810,7 +812,11 @@ emit_semaphore_wait(struct i915_request *to, if (err) return err; - cs = intel_ring_begin(to, 4); + len = 4; + if (has_token) + len += 2; + + cs = intel_ring_begin(to, len); if (IS_ERR(cs)) return PTR_ERR(cs); @@ -822,13 +828,18 @@ emit_semaphore_wait(struct i915_request *to, * (post-wrap) values than they were expecting (and so wait * forever). */ - *cs++ = MI_SEMAPHORE_WAIT | - MI_SEMAPHORE_GLOBAL_GTT | - MI_SEMAPHORE_POLL | - MI_SEMAPHORE_SAD_GTE_SDD; + *cs++ = (MI_SEMAPHORE_WAIT | + MI_SEMAPHORE_GLOBAL_GTT | + MI_SEMAPHORE_POLL | + MI_SEMAPHORE_SAD_GTE_SDD) + + has_token; *cs++ = from->fence.seqno; *cs++ = hwsp_offset; *cs++ = 0; + if (has_token) { + *cs++ = 0; + *cs++ = MI_NOOP; + } intel_ring_advance(to, cs); to->sched.semaphores |= from->engine->mask; -- cgit v1.2.3 From 8698ba53cd7173c32320ebbef4d389d41ebb5780 Mon Sep 17 00:00:00 2001 From: Matt Roper Date: Mon, 16 Sep 2019 16:32:51 -0700 Subject: drm/i915/cml: Add second PCH ID for CMP The CMP PCH ID we have in the driver is correct for the CML-U machines we have in our CI system, but the CML-S and CML-H CI machines appear to use a different PCH ID, leading our driver to detect no PCH for them. Cc: Rodrigo Vivi Cc: Anusha Srivatsa References: 729ae330a0f2e2 ("drm/i915/cml: Introduce Comet Lake PCH") Bugzilla: https://bugs.freedesktop.org/show_bug.cgi?id=111461 Signed-off-by: Matt Roper Acked-by: Chris Wilson Signed-off-by: Chris Wilson Link: https://patchwork.freedesktop.org/patch/msgid/20190916233251.387-1-matthew.d.roper@intel.com --- drivers/gpu/drm/i915/intel_pch.c | 1 + drivers/gpu/drm/i915/intel_pch.h | 1 + 2 files changed, 2 insertions(+) diff --git a/drivers/gpu/drm/i915/intel_pch.c b/drivers/gpu/drm/i915/intel_pch.c index fa864d8f2b73..15f8bff141f9 100644 --- a/drivers/gpu/drm/i915/intel_pch.c +++ b/drivers/gpu/drm/i915/intel_pch.c @@ -69,6 +69,7 @@ intel_pch_type(const struct drm_i915_private *dev_priv, unsigned short id) WARN_ON(!IS_CANNONLAKE(dev_priv) && !IS_COFFEELAKE(dev_priv)); return PCH_CNP; case INTEL_PCH_CMP_DEVICE_ID_TYPE: + case INTEL_PCH_CMP2_DEVICE_ID_TYPE: DRM_DEBUG_KMS("Found Comet Lake PCH (CMP)\n"); WARN_ON(!IS_COFFEELAKE(dev_priv)); /* CometPoint is CNP Compatible */ diff --git a/drivers/gpu/drm/i915/intel_pch.h b/drivers/gpu/drm/i915/intel_pch.h index e6a2d65f19c6..c29c81ec7971 100644 --- a/drivers/gpu/drm/i915/intel_pch.h +++ b/drivers/gpu/drm/i915/intel_pch.h @@ -41,6 +41,7 @@ enum intel_pch { #define INTEL_PCH_CNP_DEVICE_ID_TYPE 0xA300 #define INTEL_PCH_CNP_LP_DEVICE_ID_TYPE 0x9D80 #define INTEL_PCH_CMP_DEVICE_ID_TYPE 0x0280 +#define INTEL_PCH_CMP2_DEVICE_ID_TYPE 0x0680 #define INTEL_PCH_ICP_DEVICE_ID_TYPE 0x3480 #define INTEL_PCH_MCC_DEVICE_ID_TYPE 0x4B00 #define INTEL_PCH_MCC2_DEVICE_ID_TYPE 0x3880 -- cgit v1.2.3 From c107d613f9204ff9c7624c229938153d7492c56e Mon Sep 17 00:00:00 2001 From: Zenghui Yu Date: Wed, 18 Sep 2019 06:57:30 +0000 Subject: irqchip/gic-v3: Fix GIC_LINE_NR accessor As per GIC spec, ITLinesNumber indicates the maximum SPI INTID that the GIC implementation supports. And the maximum SPI INTID an implementation might support is 1019 (field value 11111). max(GICD_TYPER_SPIS(...), 1020) is not what we actually want for GIC_LINE_NR. Fix it to min(GICD_TYPER_SPIS(...), 1020). Signed-off-by: Zenghui Yu Signed-off-by: Marc Zyngier Link: https://lore.kernel.org/r/1568789850-14080-1-git-send-email-yuzenghui@huawei.com --- drivers/irqchip/irq-gic-v3.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/irqchip/irq-gic-v3.c b/drivers/irqchip/irq-gic-v3.c index 422664ac5f53..1edc99335a94 100644 --- a/drivers/irqchip/irq-gic-v3.c +++ b/drivers/irqchip/irq-gic-v3.c @@ -59,7 +59,7 @@ static struct gic_chip_data gic_data __read_mostly; static DEFINE_STATIC_KEY_TRUE(supports_deactivate_key); #define GIC_ID_NR (1U << GICD_TYPER_ID_BITS(gic_data.rdists.gicd_typer)) -#define GIC_LINE_NR max(GICD_TYPER_SPIS(gic_data.rdists.gicd_typer), 1020U) +#define GIC_LINE_NR min(GICD_TYPER_SPIS(gic_data.rdists.gicd_typer), 1020U) #define GIC_ESPI_NR GICD_TYPER_ESPIS(gic_data.rdists.gicd_typer) /* -- cgit v1.2.3 From 56c05de6bd773b96deca379370965c49042b5fbf Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Tue, 17 Sep 2019 20:47:46 +0100 Subject: drm/i915: Extend Haswell GT1 PSMI workaround to all A few times in CI, we have detected a GPU hang on our Haswell GT2 systems with the characteristic IPEHR of 0x780c0000. When the PSMI w/a was first introducted, it was applied to all Haswell, but later on we found an erratum that supposedly restricted the issue to GT1 and so constrained it only be applied on GT1. That may have been a mistake... Bugzilla: https://bugs.freedesktop.org/show_bug.cgi?id=111692 Fixes: 167bc759e823 ("drm/i915: Restrict PSMI context load w/a to Haswell GT1") References: 2c550183476d ("drm/i915: Disable PSMI sleep messages on all rings around context switches") Signed-off-by: Chris Wilson Cc: Mika Kuoppala Acked-by: Mika Kuoppala Link: https://patchwork.freedesktop.org/patch/msgid/20190917194746.26710-1-chris@chris-wilson.co.uk --- drivers/gpu/drm/i915/gt/intel_ringbuffer.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/gpu/drm/i915/gt/intel_ringbuffer.c b/drivers/gpu/drm/i915/gt/intel_ringbuffer.c index a73296e6b13d..a25b84b12ef1 100644 --- a/drivers/gpu/drm/i915/gt/intel_ringbuffer.c +++ b/drivers/gpu/drm/i915/gt/intel_ringbuffer.c @@ -1574,7 +1574,7 @@ static inline int mi_set_context(struct i915_request *rq, u32 flags) struct intel_engine_cs *engine = rq->engine; enum intel_engine_id id; const int num_engines = - IS_HSW_GT1(i915) ? RUNTIME_INFO(i915)->num_engines - 1 : 0; + IS_HASWELL(i915) ? RUNTIME_INFO(i915)->num_engines - 1 : 0; bool force_restore = false; int len; u32 *cs; -- cgit v1.2.3 From bb0fed1c60cccbe4063b455a7228818395dac86e Mon Sep 17 00:00:00 2001 From: Marc Zyngier Date: Sun, 15 Sep 2019 15:17:45 +0100 Subject: irqchip/sifive-plic: Switch to fasteoi flow The SiFive PLIC interrupt controller seems to have all the HW features to support the fasteoi flow, but the driver seems to be stuck in a distant past. Bring it into the 21st century. Signed-off-by: Marc Zyngier Tested-by: Palmer Dabbelt (QEMU Boot) Tested-by: Darius Rad (on 2 HW PLIC implementations) Tested-by: Paul Walmsley (HiFive Unleashed) Reviewed-by: Palmer Dabbelt Cc: stable@vger.kernel.org Link: https://lore.kernel.org/r/8636gxskmj.wl-maz@kernel.org --- drivers/irqchip/irq-sifive-plic.c | 29 +++++++++++++++-------------- 1 file changed, 15 insertions(+), 14 deletions(-) diff --git a/drivers/irqchip/irq-sifive-plic.c b/drivers/irqchip/irq-sifive-plic.c index cf755964f2f8..3e51deedbcc8 100644 --- a/drivers/irqchip/irq-sifive-plic.c +++ b/drivers/irqchip/irq-sifive-plic.c @@ -97,7 +97,7 @@ static inline void plic_irq_toggle(const struct cpumask *mask, } } -static void plic_irq_enable(struct irq_data *d) +static void plic_irq_unmask(struct irq_data *d) { unsigned int cpu = cpumask_any_and(irq_data_get_affinity_mask(d), cpu_online_mask); @@ -106,7 +106,7 @@ static void plic_irq_enable(struct irq_data *d) plic_irq_toggle(cpumask_of(cpu), d->hwirq, 1); } -static void plic_irq_disable(struct irq_data *d) +static void plic_irq_mask(struct irq_data *d) { plic_irq_toggle(cpu_possible_mask, d->hwirq, 0); } @@ -125,10 +125,8 @@ static int plic_set_affinity(struct irq_data *d, if (cpu >= nr_cpu_ids) return -EINVAL; - if (!irqd_irq_disabled(d)) { - plic_irq_toggle(cpu_possible_mask, d->hwirq, 0); - plic_irq_toggle(cpumask_of(cpu), d->hwirq, 1); - } + plic_irq_toggle(cpu_possible_mask, d->hwirq, 0); + plic_irq_toggle(cpumask_of(cpu), d->hwirq, 1); irq_data_update_effective_affinity(d, cpumask_of(cpu)); @@ -136,14 +134,18 @@ static int plic_set_affinity(struct irq_data *d, } #endif +static void plic_irq_eoi(struct irq_data *d) +{ + struct plic_handler *handler = this_cpu_ptr(&plic_handlers); + + writel(d->hwirq, handler->hart_base + CONTEXT_CLAIM); +} + static struct irq_chip plic_chip = { .name = "SiFive PLIC", - /* - * There is no need to mask/unmask PLIC interrupts. They are "masked" - * by reading claim and "unmasked" when writing it back. - */ - .irq_enable = plic_irq_enable, - .irq_disable = plic_irq_disable, + .irq_mask = plic_irq_mask, + .irq_unmask = plic_irq_unmask, + .irq_eoi = plic_irq_eoi, #ifdef CONFIG_SMP .irq_set_affinity = plic_set_affinity, #endif @@ -152,7 +154,7 @@ static struct irq_chip plic_chip = { static int plic_irqdomain_map(struct irq_domain *d, unsigned int irq, irq_hw_number_t hwirq) { - irq_set_chip_and_handler(irq, &plic_chip, handle_simple_irq); + irq_set_chip_and_handler(irq, &plic_chip, handle_fasteoi_irq); irq_set_chip_data(irq, NULL); irq_set_noprobe(irq); return 0; @@ -188,7 +190,6 @@ static void plic_handle_irq(struct pt_regs *regs) hwirq); else generic_handle_irq(irq); - writel(hwirq, claim); } csr_set(sie, SIE_SEIE); } -- cgit v1.2.3 From 0d333ac7eb17f0f6c86db66afba70135f11da4a9 Mon Sep 17 00:00:00 2001 From: Daniele Ceraolo Spurio Date: Wed, 18 Sep 2019 18:53:30 -0700 Subject: drm/i915: fix SFC reset flow Our assumption that the we can ask the HW to lock the SFC even if not currently in use does not match the HW commitment. The expectation from the HW is that SW will not try to lock the SFC if the engine is not using it and if we do that the behavior is undefined; on ICL the HW ends up to returning the ack and ignoring our lock request, but this is not guaranteed and we shouldn't expect it going forward. Also, failing to get the ack while the SFC is in use means that we can't cleanly reset it, so fail the engine reset in that scenario. v2: drop rmw change, keep the log as debug and handle failure (Chris), improve comments (Tvrtko). Reported-by: Owen Zhang Signed-off-by: Daniele Ceraolo Spurio Cc: Tvrtko Ursulin Cc: Chris Wilson Reviewed-by: Tvrtko Ursulin Acked-by: Chris Wilson Signed-off-by: Chris Wilson Link: https://patchwork.freedesktop.org/patch/msgid/20190919015330.15435-1-daniele.ceraolospurio@intel.com --- drivers/gpu/drm/i915/gt/intel_reset.c | 50 +++++++++++++++++++++++------------ 1 file changed, 33 insertions(+), 17 deletions(-) diff --git a/drivers/gpu/drm/i915/gt/intel_reset.c b/drivers/gpu/drm/i915/gt/intel_reset.c index 8327220ac558..89048ca8924c 100644 --- a/drivers/gpu/drm/i915/gt/intel_reset.c +++ b/drivers/gpu/drm/i915/gt/intel_reset.c @@ -309,7 +309,7 @@ static int gen6_reset_engines(struct intel_gt *gt, return gen6_hw_domain_reset(gt, hw_mask); } -static u32 gen11_lock_sfc(struct intel_engine_cs *engine) +static int gen11_lock_sfc(struct intel_engine_cs *engine, u32 *hw_mask) { struct intel_uncore *uncore = engine->uncore; u8 vdbox_sfc_access = RUNTIME_INFO(engine->i915)->vdbox_sfc_access; @@ -318,6 +318,7 @@ static u32 gen11_lock_sfc(struct intel_engine_cs *engine) i915_reg_t sfc_usage; u32 sfc_usage_bit; u32 sfc_reset_bit; + int ret; switch (engine->class) { case VIDEO_DECODE_CLASS: @@ -352,27 +353,33 @@ static u32 gen11_lock_sfc(struct intel_engine_cs *engine) } /* - * Tell the engine that a software reset is going to happen. The engine - * will then try to force lock the SFC (if currently locked, it will - * remain so until we tell the engine it is safe to unlock; if currently - * unlocked, it will ignore this and all new lock requests). If SFC - * ends up being locked to the engine we want to reset, we have to reset - * it as well (we will unlock it once the reset sequence is completed). + * If the engine is using a SFC, tell the engine that a software reset + * is going to happen. The engine will then try to force lock the SFC. + * If SFC ends up being locked to the engine we want to reset, we have + * to reset it as well (we will unlock it once the reset sequence is + * completed). */ + if (!(intel_uncore_read_fw(uncore, sfc_usage) & sfc_usage_bit)) + return 0; + rmw_set_fw(uncore, sfc_forced_lock, sfc_forced_lock_bit); - if (__intel_wait_for_register_fw(uncore, - sfc_forced_lock_ack, - sfc_forced_lock_ack_bit, - sfc_forced_lock_ack_bit, - 1000, 0, NULL)) { - DRM_DEBUG_DRIVER("Wait for SFC forced lock ack failed\n"); + ret = __intel_wait_for_register_fw(uncore, + sfc_forced_lock_ack, + sfc_forced_lock_ack_bit, + sfc_forced_lock_ack_bit, + 1000, 0, NULL); + + /* Was the SFC released while we were trying to lock it? */ + if (!(intel_uncore_read_fw(uncore, sfc_usage) & sfc_usage_bit)) return 0; - } - if (intel_uncore_read_fw(uncore, sfc_usage) & sfc_usage_bit) - return sfc_reset_bit; + if (ret) { + DRM_DEBUG_DRIVER("Wait for SFC forced lock ack failed\n"); + return ret; + } + *hw_mask |= sfc_reset_bit; return 0; } @@ -430,12 +437,21 @@ static int gen11_reset_engines(struct intel_gt *gt, for_each_engine_masked(engine, gt->i915, engine_mask, tmp) { GEM_BUG_ON(engine->id >= ARRAY_SIZE(hw_engine_mask)); hw_mask |= hw_engine_mask[engine->id]; - hw_mask |= gen11_lock_sfc(engine); + ret = gen11_lock_sfc(engine, &hw_mask); + if (ret) + goto sfc_unlock; } } ret = gen6_hw_domain_reset(gt, hw_mask); +sfc_unlock: + /* + * We unlock the SFC based on the lock status and not the result of + * gen11_lock_sfc to make sure that we clean properly if something + * wrong happened during the lock (e.g. lock acquired after timeout + * expiration). + */ if (engine_mask != ALL_ENGINES) for_each_engine_masked(engine, gt->i915, engine_mask, tmp) gen11_unlock_sfc(engine); -- cgit v1.2.3 From 37fa0de3c137d5f54f7e64f53495c9d501d42a4d Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Wed, 18 Sep 2019 15:54:50 +0100 Subject: drm/i915: Verify the engine after acquiring the active.lock When using virtual engines, the rq->engine is not stable until we hold the engine->active.lock (as the virtual engine may be exchanged with the sibling). Since commit 22b7a426bbe1 ("drm/i915/execlists: Preempt-to-busy") we may retire a request concurrently with resubmitting it to HW, we need to be extra careful to verify we are holding the correct lock for the request's active list. This is similar to the issue we saw with rescheduling the virtual requests, see sched_lock_engine(). Or else: <4> [876.736126] list_add corruption. prev->next should be next (ffff8883f931a1f8), but was dead000000000100. (prev=ffff888361ffa610). <4> [876.736136] WARNING: CPU: 2 PID: 21 at lib/list_debug.c:28 __list_add_valid+0x4d/0x70 <4> [876.736137] Modules linked in: i915(+) amdgpu gpu_sched ttm vgem snd_hda_codec_hdmi snd_hda_codec_realtek snd_hda_codec_generic mei_hdcp x86_pkg_temp_thermal coretemp crct10dif_pclmul crc32_pclmul snd_intel_nhlt snd_hda_codec snd_hwdep snd_hda_core ghash_clmulni_intel e1000e cdc_ether usbnet mii snd_pcm ptp pps_core mei_me mei prime_numbers btusb btrtl btbcm btintel bluetooth ecdh_generic ecc [last unloaded: i915] <4> [876.736154] CPU: 2 PID: 21 Comm: ksoftirqd/2 Tainted: G U 5.3.0-CI-CI_DRM_6898+ #1 <4> [876.736156] Hardware name: Intel Corporation Ice Lake Client Platform/IceLake U DDR4 SODIMM PD RVP TLC, BIOS ICLSFWR1.R00.3183.A00.1905020411 05/02/2019 <4> [876.736157] RIP: 0010:__list_add_valid+0x4d/0x70 <4> [876.736159] Code: c3 48 89 d1 48 c7 c7 20 33 0e 82 48 89 c2 e8 4a 4a bc ff 0f 0b 31 c0 c3 48 89 c1 4c 89 c6 48 c7 c7 70 33 0e 82 e8 33 4a bc ff <0f> 0b 31 c0 c3 48 89 f2 4c 89 c1 48 89 fe 48 c7 c7 c0 33 0e 82 e8 <4> [876.736160] RSP: 0018:ffffc9000018bd30 EFLAGS: 00010082 <4> [876.736162] RAX: 0000000000000000 RBX: ffff888361ffc840 RCX: 0000000000000104 <4> [876.736163] RDX: 0000000080000104 RSI: 0000000000000000 RDI: 00000000ffffffff <4> [876.736164] RBP: ffffc9000018bd68 R08: 0000000000000000 R09: 0000000000000001 <4> [876.736165] R10: 00000000aed95de3 R11: 000000007fe927eb R12: ffff888361ffca10 <4> [876.736166] R13: ffff888361ffa610 R14: ffff888361ffc880 R15: ffff8883f931a1f8 <4> [876.736168] FS: 0000000000000000(0000) GS:ffff88849fd00000(0000) knlGS:0000000000000000 <4> [876.736169] CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033 <4> [876.736170] CR2: 00007f093a9173c0 CR3: 00000003bba08005 CR4: 0000000000760ee0 <4> [876.736171] PKRU: 55555554 <4> [876.736172] Call Trace: <4> [876.736226] __i915_request_submit+0x152/0x370 [i915] <4> [876.736263] __execlists_submission_tasklet+0x6da/0x1f50 [i915] <4> [876.736293] ? execlists_submission_tasklet+0x29/0x50 [i915] <4> [876.736321] execlists_submission_tasklet+0x34/0x50 [i915] <4> [876.736325] tasklet_action_common.isra.5+0x47/0xb0 <4> [876.736328] __do_softirq+0xd8/0x4ae <4> [876.736332] ? smpboot_thread_fn+0x23/0x280 <4> [876.736334] ? smpboot_thread_fn+0x6b/0x280 <4> [876.736336] run_ksoftirqd+0x2b/0x50 <4> [876.736338] smpboot_thread_fn+0x1d3/0x280 <4> [876.736341] ? sort_range+0x20/0x20 <4> [876.736343] kthread+0x119/0x130 <4> [876.736345] ? kthread_park+0xa0/0xa0 <4> [876.736347] ret_from_fork+0x24/0x50 <4> [876.736353] irq event stamp: 2290145 <4> [876.736356] hardirqs last enabled at (2290144): [] __slab_free+0x3e8/0x500 <4> [876.736358] hardirqs last disabled at (2290145): [] _raw_spin_lock_irqsave+0xd/0x50 <4> [876.736360] softirqs last enabled at (2290114): [] __do_softirq+0x33e/0x4ae <4> [876.736361] softirqs last disabled at (2290119): [] run_ksoftirqd+0x2b/0x50 <4> [876.736363] WARNING: CPU: 2 PID: 21 at lib/list_debug.c:28 __list_add_valid+0x4d/0x70 <4> [876.736364] ---[ end trace 3e58d6c7356c65bf ]--- <4> [876.736406] ------------[ cut here ]------------ <4> [876.736415] list_del corruption. prev->next should be ffff888361ffca10, but was ffff88840ac2c730 <4> [876.736421] WARNING: CPU: 2 PID: 5490 at lib/list_debug.c:53 __list_del_entry_valid+0x79/0x90 <4> [876.736422] Modules linked in: i915(+) amdgpu gpu_sched ttm vgem snd_hda_codec_hdmi snd_hda_codec_realtek snd_hda_codec_generic mei_hdcp x86_pkg_temp_thermal coretemp crct10dif_pclmul crc32_pclmul snd_intel_nhlt snd_hda_codec snd_hwdep snd_hda_core ghash_clmulni_intel e1000e cdc_ether usbnet mii snd_pcm ptp pps_core mei_me mei prime_numbers btusb btrtl btbcm btintel bluetooth ecdh_generic ecc [last unloaded: i915] <4> [876.736433] CPU: 2 PID: 5490 Comm: i915_selftest Tainted: G U W 5.3.0-CI-CI_DRM_6898+ #1 <4> [876.736435] Hardware name: Intel Corporation Ice Lake Client Platform/IceLake U DDR4 SODIMM PD RVP TLC, BIOS ICLSFWR1.R00.3183.A00.1905020411 05/02/2019 <4> [876.736436] RIP: 0010:__list_del_entry_valid+0x79/0x90 <4> [876.736438] Code: 0b 31 c0 c3 48 89 fe 48 c7 c7 30 34 0e 82 e8 ae 49 bc ff 0f 0b 31 c0 c3 48 89 f2 48 89 fe 48 c7 c7 68 34 0e 82 e8 97 49 bc ff <0f> 0b 31 c0 c3 48 c7 c7 a8 34 0e 82 e8 86 49 bc ff 0f 0b 31 c0 c3 <4> [876.736439] RSP: 0018:ffffc900003ef758 EFLAGS: 00010086 <4> [876.736440] RAX: 0000000000000000 RBX: ffff888361ffc840 RCX: 0000000000000002 <4> [876.736442] RDX: 0000000080000002 RSI: 0000000000000000 RDI: 00000000ffffffff <4> [876.736443] RBP: ffffc900003ef780 R08: 0000000000000000 R09: 0000000000000001 <4> [876.736444] R10: 000000001418e4b7 R11: 000000007f0ea93b R12: ffff888361ffcab8 <4> [876.736445] R13: ffff88843b6d0000 R14: 000000000000217c R15: 0000000000000001 <4> [876.736447] FS: 00007f4e6f255240(0000) GS:ffff88849fd00000(0000) knlGS:0000000000000000 <4> [876.736448] CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033 <4> [876.736449] CR2: 00007f093a9173c0 CR3: 00000003bba08005 CR4: 0000000000760ee0 <4> [876.736450] PKRU: 55555554 <4> [876.736451] Call Trace: <4> [876.736488] i915_request_retire+0x224/0x8e0 [i915] <4> [876.736521] i915_request_create+0x4b/0x1b0 [i915] <4> [876.736550] nop_virtual_engine+0x230/0x4d0 [i915] Fixes: 22b7a426bbe1 ("drm/i915/execlists: Preempt-to-busy") Bugzilla: https://bugs.freedesktop.org/show_bug.cgi?id=111695 Signed-off-by: Chris Wilson Cc: Mika Kuoppala Cc: Tvrtko Ursulin Cc: Matthew Auld Reviewed-by: Tvrtko Ursulin Link: https://patchwork.freedesktop.org/patch/msgid/20190918145453.8800-1-chris@chris-wilson.co.uk --- drivers/gpu/drm/i915/i915_request.c | 25 ++++++++++++++++++++++--- 1 file changed, 22 insertions(+), 3 deletions(-) diff --git a/drivers/gpu/drm/i915/i915_request.c b/drivers/gpu/drm/i915/i915_request.c index 3ecf92aa5fc1..606acde47fa0 100644 --- a/drivers/gpu/drm/i915/i915_request.c +++ b/drivers/gpu/drm/i915/i915_request.c @@ -195,6 +195,27 @@ static void free_capture_list(struct i915_request *request) } } +static void remove_from_engine(struct i915_request *rq) +{ + struct intel_engine_cs *engine, *locked; + + /* + * Virtual engines complicate acquiring the engine timeline lock, + * as their rq->engine pointer is not stable until under that + * engine lock. The simple ploy we use is to take the lock then + * check that the rq still belongs to the newly locked engine. + */ + locked = READ_ONCE(rq->engine); + spin_lock(&locked->active.lock); + while (unlikely(locked != (engine = READ_ONCE(rq->engine)))) { + spin_unlock(&locked->active.lock); + spin_lock(&engine->active.lock); + locked = engine; + } + list_del(&rq->sched.link); + spin_unlock(&locked->active.lock); +} + static bool i915_request_retire(struct i915_request *rq) { struct i915_active_request *active, *next; @@ -260,9 +281,7 @@ static bool i915_request_retire(struct i915_request *rq) * request that we have removed from the HW and put back on a run * queue. */ - spin_lock(&rq->engine->active.lock); - list_del(&rq->sched.link); - spin_unlock(&rq->engine->active.lock); + remove_from_engine(rq); spin_lock(&rq->lock); i915_request_mark_complete(rq); -- cgit v1.2.3 From 7fd25e6fc035f4b04b75bca6d7e8daa069603a76 Mon Sep 17 00:00:00 2001 From: Johan Hovold Date: Thu, 19 Sep 2019 14:12:34 +0200 Subject: ieee802154: atusb: fix use-after-free at disconnect The disconnect callback was accessing the hardware-descriptor private data after having having freed it. Fixes: 7490b008d123 ("ieee802154: add support for atusb transceiver") Cc: stable # 4.2 Cc: Alexander Aring Reported-by: syzbot+f4509a9138a1472e7e80@syzkaller.appspotmail.com Signed-off-by: Johan Hovold Signed-off-by: Stefan Schmidt --- drivers/net/ieee802154/atusb.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/drivers/net/ieee802154/atusb.c b/drivers/net/ieee802154/atusb.c index ceddb424f887..0dd0ba915ab9 100644 --- a/drivers/net/ieee802154/atusb.c +++ b/drivers/net/ieee802154/atusb.c @@ -1137,10 +1137,11 @@ static void atusb_disconnect(struct usb_interface *interface) ieee802154_unregister_hw(atusb->hw); + usb_put_dev(atusb->usb_dev); + ieee802154_free_hw(atusb->hw); usb_set_intfdata(interface, NULL); - usb_put_dev(atusb->usb_dev); pr_debug("%s done\n", __func__); } -- cgit v1.2.3 From a47e788c2310a59b8e42e0bfc18d810118dff7bf Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Thu, 19 Sep 2019 14:14:14 +0100 Subject: drm/i915/selftests: Exercise CS TLB invalidation Check that we are correctly invalidating the TLB at the start of a batch after updating the GTT. v2: Comments and hold the request reference while spinning Signed-off-by: Chris Wilson Cc: Mika Kuoppala Cc: Daniele Ceraolo Spurio Reviewed-by: Mika Kuoppala Link: https://patchwork.freedesktop.org/patch/msgid/20190919131414.7495-1-chris@chris-wilson.co.uk --- drivers/gpu/drm/i915/selftests/i915_gem_gtt.c | 308 ++++++++++++++++++++++++++ 1 file changed, 308 insertions(+) diff --git a/drivers/gpu/drm/i915/selftests/i915_gem_gtt.c b/drivers/gpu/drm/i915/selftests/i915_gem_gtt.c index 598c18d10640..0d40e0b42923 100644 --- a/drivers/gpu/drm/i915/selftests/i915_gem_gtt.c +++ b/drivers/gpu/drm/i915/selftests/i915_gem_gtt.c @@ -25,13 +25,16 @@ #include #include +#include "gem/i915_gem_context.h" #include "gem/selftests/mock_context.h" +#include "gt/intel_context.h" #include "i915_random.h" #include "i915_selftest.h" #include "mock_drm.h" #include "mock_gem_device.h" +#include "igt_flush_test.h" static void cleanup_freed_objects(struct drm_i915_private *i915) { @@ -1705,6 +1708,310 @@ out_put: return err; } +static int context_sync(struct intel_context *ce) +{ + struct i915_request *rq; + long timeout; + + rq = intel_context_create_request(ce); + if (IS_ERR(rq)) + return PTR_ERR(rq); + + i915_request_get(rq); + i915_request_add(rq); + + timeout = i915_request_wait(rq, 0, HZ / 5); + i915_request_put(rq); + + return timeout < 0 ? -EIO : 0; +} + +static struct i915_request * +submit_batch(struct intel_context *ce, u64 addr) +{ + struct i915_request *rq; + int err; + + rq = intel_context_create_request(ce); + if (IS_ERR(rq)) + return rq; + + err = 0; + if (rq->engine->emit_init_breadcrumb) /* detect a hang */ + err = rq->engine->emit_init_breadcrumb(rq); + if (err == 0) + err = rq->engine->emit_bb_start(rq, addr, 0, 0); + + if (err == 0) + i915_request_get(rq); + i915_request_add(rq); + + return err ? ERR_PTR(err) : rq; +} + +static u32 *spinner(u32 *batch, int i) +{ + return batch + i * 64 / sizeof(*batch) + 4; +} + +static void end_spin(u32 *batch, int i) +{ + *spinner(batch, i) = MI_BATCH_BUFFER_END; + wmb(); +} + +static int igt_cs_tlb(void *arg) +{ + const unsigned int count = PAGE_SIZE / 64; + const unsigned int chunk_size = count * PAGE_SIZE; + struct drm_i915_private *i915 = arg; + struct drm_i915_gem_object *bbe, *act, *out; + struct i915_gem_engines_iter it; + struct i915_address_space *vm; + struct i915_gem_context *ctx; + struct intel_context *ce; + struct drm_file *file; + struct i915_vma *vma; + unsigned int i; + u32 *result; + u32 *batch; + int err = 0; + + /* + * Our mission here is to fool the hardware to execute something + * from scratch as it has not seen the batch move (due to missing + * the TLB invalidate). + */ + + file = mock_file(i915); + if (IS_ERR(file)) + return PTR_ERR(file); + + mutex_lock(&i915->drm.struct_mutex); + ctx = live_context(i915, file); + if (IS_ERR(ctx)) { + err = PTR_ERR(ctx); + goto out_unlock; + } + + vm = ctx->vm; + if (!vm) + goto out_unlock; + + /* Create two pages; dummy we prefill the TLB, and intended */ + bbe = i915_gem_object_create_internal(i915, PAGE_SIZE); + if (IS_ERR(bbe)) { + err = PTR_ERR(bbe); + goto out_unlock; + } + + batch = i915_gem_object_pin_map(bbe, I915_MAP_WC); + if (IS_ERR(batch)) { + err = PTR_ERR(batch); + goto out_put_bbe; + } + memset32(batch, MI_BATCH_BUFFER_END, PAGE_SIZE / sizeof(u32)); + i915_gem_object_flush_map(bbe); + i915_gem_object_unpin_map(bbe); + + act = i915_gem_object_create_internal(i915, PAGE_SIZE); + if (IS_ERR(act)) { + err = PTR_ERR(act); + goto out_put_bbe; + } + + /* Track the execution of each request by writing into different slot */ + batch = i915_gem_object_pin_map(act, I915_MAP_WC); + if (IS_ERR(batch)) { + err = PTR_ERR(batch); + goto out_put_act; + } + for (i = 0; i < count; i++) { + u32 *cs = batch + i * 64 / sizeof(*cs); + u64 addr = (vm->total - PAGE_SIZE) + i * sizeof(u32); + + GEM_BUG_ON(INTEL_GEN(i915) < 6); + cs[0] = MI_STORE_DWORD_IMM_GEN4; + if (INTEL_GEN(i915) >= 8) { + cs[1] = lower_32_bits(addr); + cs[2] = upper_32_bits(addr); + cs[3] = i; + cs[4] = MI_NOOP; + cs[5] = MI_BATCH_BUFFER_START_GEN8; + } else { + cs[1] = 0; + cs[2] = lower_32_bits(addr); + cs[3] = i; + cs[4] = MI_NOOP; + cs[5] = MI_BATCH_BUFFER_START; + } + } + + out = i915_gem_object_create_internal(i915, PAGE_SIZE); + if (IS_ERR(out)) { + err = PTR_ERR(out); + goto out_put_batch; + } + i915_gem_object_set_cache_coherency(out, I915_CACHING_CACHED); + + vma = i915_vma_instance(out, vm, NULL); + if (IS_ERR(vma)) { + err = PTR_ERR(vma); + goto out_put_batch; + } + + err = i915_vma_pin(vma, 0, 0, + PIN_USER | + PIN_OFFSET_FIXED | + (vm->total - PAGE_SIZE)); + if (err) + goto out_put_out; + GEM_BUG_ON(vma->node.start != vm->total - PAGE_SIZE); + + result = i915_gem_object_pin_map(out, I915_MAP_WB); + if (IS_ERR(result)) { + err = PTR_ERR(result); + goto out_put_out; + } + + for_each_gem_engine(ce, i915_gem_context_lock_engines(ctx), it) { + IGT_TIMEOUT(end_time); + unsigned long pass = 0; + + if (!intel_engine_can_store_dword(ce->engine)) + continue; + + while (!__igt_timeout(end_time, NULL)) { + struct i915_request *rq; + u64 offset; + + offset = random_offset(0, vm->total - PAGE_SIZE, + chunk_size, PAGE_SIZE); + + err = vm->allocate_va_range(vm, offset, chunk_size); + if (err) + goto end; + + memset32(result, STACK_MAGIC, PAGE_SIZE / sizeof(u32)); + + vma = i915_vma_instance(bbe, vm, NULL); + if (IS_ERR(vma)) { + err = PTR_ERR(vma); + goto end; + } + + err = vma->ops->set_pages(vma); + if (err) + goto end; + + /* Prime the TLB with the dummy pages */ + for (i = 0; i < count; i++) { + vma->node.start = offset + i * PAGE_SIZE; + vm->insert_entries(vm, vma, I915_CACHE_NONE, 0); + + rq = submit_batch(ce, vma->node.start); + if (IS_ERR(rq)) { + err = PTR_ERR(rq); + goto end; + } + i915_request_put(rq); + } + + vma->ops->clear_pages(vma); + + err = context_sync(ce); + if (err) { + pr_err("%s: dummy setup timed out\n", + ce->engine->name); + goto end; + } + + vma = i915_vma_instance(act, vm, NULL); + if (IS_ERR(vma)) { + err = PTR_ERR(vma); + goto end; + } + + err = vma->ops->set_pages(vma); + if (err) + goto end; + + /* Replace the TLB with target batches */ + for (i = 0; i < count; i++) { + struct i915_request *rq; + u32 *cs = batch + i * 64 / sizeof(*cs); + u64 addr; + + vma->node.start = offset + i * PAGE_SIZE; + vm->insert_entries(vm, vma, I915_CACHE_NONE, 0); + + addr = vma->node.start + i * 64; + cs[4] = MI_NOOP; + cs[6] = lower_32_bits(addr); + cs[7] = upper_32_bits(addr); + wmb(); + + rq = submit_batch(ce, addr); + if (IS_ERR(rq)) { + err = PTR_ERR(rq); + goto end; + } + + /* Wait until the context chain has started */ + if (i == 0) { + while (READ_ONCE(result[i]) && + !i915_request_completed(rq)) + cond_resched(); + } else { + end_spin(batch, i - 1); + } + + i915_request_put(rq); + } + end_spin(batch, count - 1); + + vma->ops->clear_pages(vma); + + err = context_sync(ce); + if (err) { + pr_err("%s: writes timed out\n", + ce->engine->name); + goto end; + } + + for (i = 0; i < count; i++) { + if (result[i] != i) { + pr_err("%s: Write lost on pass %lu, at offset %llx, index %d, found %x, expected %x\n", + ce->engine->name, pass, + offset, i, result[i], i); + err = -EINVAL; + goto end; + } + } + + vm->clear_range(vm, offset, chunk_size); + pass++; + } + } +end: + if (igt_flush_test(i915, I915_WAIT_LOCKED)) + err = -EIO; + i915_gem_context_unlock_engines(ctx); + i915_gem_object_unpin_map(out); +out_put_out: + i915_gem_object_put(out); +out_put_batch: + i915_gem_object_unpin_map(act); +out_put_act: + i915_gem_object_put(act); +out_put_bbe: + i915_gem_object_put(bbe); +out_unlock: + mutex_unlock(&i915->drm.struct_mutex); + mock_file_free(i915, file); + return err; +} + int i915_gem_gtt_live_selftests(struct drm_i915_private *i915) { static const struct i915_subtest tests[] = { @@ -1722,6 +2029,7 @@ int i915_gem_gtt_live_selftests(struct drm_i915_private *i915) SUBTEST(igt_ggtt_pot), SUBTEST(igt_ggtt_fill), SUBTEST(igt_ggtt_page), + SUBTEST(igt_cs_tlb), }; GEM_BUG_ON(offset_in_page(i915->ggtt.vm.total)); -- cgit v1.2.3 From b01a3ef34816461c31711e5559663e453cb83aba Mon Sep 17 00:00:00 2001 From: Matt Roper Date: Wed, 18 Sep 2019 16:56:25 -0700 Subject: drm/i915: Future-proof DDC pin mapping MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit We generally assume future platforms will inherit the behavior of the most recent platforms, so update our DDC pin mapping defaults to match how ICP/TGP behave (i.e., pins starting from GMBUS_PIN_1_BXT for combo PHY's and pins starting from GMBUS_PIN_9_TC1_ICP for TC PHY's). MCC's non-standard handling of combo PHY C seems like a platform-specific quirk that is unlikely to be duplicated on future platforms, so continue handling it as a special case. Without this change, future platforms would default to gen4-style pin mapping which is almost certainly not what we'll want. Cc: José Roberto de Souza Signed-off-by: Matt Roper Link: https://patchwork.freedesktop.org/patch/msgid/20190918235626.3750-1-matthew.d.roper@intel.com Reviewed-by: José Roberto de Souza --- drivers/gpu/drm/i915/display/intel_hdmi.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/gpu/drm/i915/display/intel_hdmi.c b/drivers/gpu/drm/i915/display/intel_hdmi.c index 640254feef27..f81541cff5df 100644 --- a/drivers/gpu/drm/i915/display/intel_hdmi.c +++ b/drivers/gpu/drm/i915/display/intel_hdmi.c @@ -3028,7 +3028,7 @@ static u8 intel_hdmi_ddc_pin(struct drm_i915_private *dev_priv, if (HAS_PCH_MCC(dev_priv)) ddc_pin = mcc_port_to_ddc_pin(dev_priv, port); - else if (HAS_PCH_TGP(dev_priv) || HAS_PCH_ICP(dev_priv)) + else if (INTEL_PCH_TYPE(dev_priv) >= PCH_ICP) ddc_pin = icl_port_to_ddc_pin(dev_priv, port); else if (HAS_PCH_CNP(dev_priv)) ddc_pin = cnp_port_to_ddc_pin(dev_priv, port); -- cgit v1.2.3 From d09ad3e7af3ab230323c2c00e190f801feb3a343 Mon Sep 17 00:00:00 2001 From: Matt Roper Date: Wed, 18 Sep 2019 16:56:26 -0700 Subject: drm/i915: Unify ICP and MCC hotplug pin tables MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit The MCC hpd table is just a subset of the ICP table; we can eliminate it and use the ICP table everywhere. The extra pins in the table won't be a problem for MCC since we still supply an appropriate hotplug trigger mask anywhere the pin table is used. Cc: José Roberto de Souza Signed-off-by: Matt Roper Link: https://patchwork.freedesktop.org/patch/msgid/20190918235626.3750-2-matthew.d.roper@intel.com Reviewed-by: José Roberto de Souza --- drivers/gpu/drm/i915/i915_irq.c | 10 ++-------- 1 file changed, 2 insertions(+), 8 deletions(-) diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c index ae7228032d2c..bc83f094065a 100644 --- a/drivers/gpu/drm/i915/i915_irq.c +++ b/drivers/gpu/drm/i915/i915_irq.c @@ -157,12 +157,6 @@ static const u32 hpd_icp[HPD_NUM_PINS] = { [HPD_PORT_F] = SDE_TC_HOTPLUG_ICP(PORT_TC4), }; -static const u32 hpd_mcc[HPD_NUM_PINS] = { - [HPD_PORT_A] = SDE_DDI_HOTPLUG_ICP(PORT_A), - [HPD_PORT_B] = SDE_DDI_HOTPLUG_ICP(PORT_B), - [HPD_PORT_C] = SDE_TC_HOTPLUG_ICP(PORT_TC1), -}; - static const u32 hpd_tgp[HPD_NUM_PINS] = { [HPD_PORT_A] = SDE_DDI_HOTPLUG_ICP(PORT_A), [HPD_PORT_B] = SDE_DDI_HOTPLUG_ICP(PORT_B), @@ -2258,7 +2252,7 @@ static void icp_irq_handler(struct drm_i915_private *dev_priv, u32 pch_iir) } else if (HAS_PCH_MCC(dev_priv)) { ddi_hotplug_trigger = pch_iir & SDE_DDI_MASK_TGP; tc_hotplug_trigger = 0; - pins = hpd_mcc; + pins = hpd_icp; } else { ddi_hotplug_trigger = pch_iir & SDE_DDI_MASK_ICP; tc_hotplug_trigger = pch_iir & SDE_TC_MASK_ICP; @@ -3434,7 +3428,7 @@ static void mcc_hpd_irq_setup(struct drm_i915_private *dev_priv) icp_hpd_irq_setup(dev_priv, SDE_DDI_MASK_TGP, 0, TGP_DDI_HPD_ENABLE_MASK, 0, - hpd_mcc); + hpd_icp); } static void gen11_hpd_detection_setup(struct drm_i915_private *dev_priv) -- cgit v1.2.3 From 1c757497096f55d2e037462bfcdfed50877b4cf5 Mon Sep 17 00:00:00 2001 From: Radhakrishna Sripada Date: Mon, 9 Sep 2019 16:14:45 -0700 Subject: drm/i915/tgl: Implement Wa_1409142259 Disable CPS aware color pipe by setting chicken bit. BSpec: 52890 HSDES: 1409142259 v2: Move WA to ctx WA's(Daniele) Cc: Daniele Ceraolo Spurio Cc: Stuart Summers Cc: Matt Roper Signed-off-by: Radhakrishna Sripada Signed-off-by: Matt Roper Link: https://patchwork.freedesktop.org/patch/msgid/20190909231445.23815-1-radhakrishna.sripada@intel.com Reviewed-by: Daniele Ceraolo Spurio --- drivers/gpu/drm/i915/gt/intel_workarounds.c | 3 +++ drivers/gpu/drm/i915/i915_reg.h | 1 + 2 files changed, 4 insertions(+) diff --git a/drivers/gpu/drm/i915/gt/intel_workarounds.c b/drivers/gpu/drm/i915/gt/intel_workarounds.c index 41d0f786e06d..ba65e5018978 100644 --- a/drivers/gpu/drm/i915/gt/intel_workarounds.c +++ b/drivers/gpu/drm/i915/gt/intel_workarounds.c @@ -567,6 +567,9 @@ static void icl_ctx_workarounds_init(struct intel_engine_cs *engine, static void tgl_ctx_workarounds_init(struct intel_engine_cs *engine, struct i915_wa_list *wal) { + /* Wa_1409142259 */ + WA_SET_BIT_MASKED(GEN11_COMMON_SLICE_CHICKEN3, + GEN12_DISABLE_CPS_AWARE_COLOR_PIPE); } static void diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h index bf37ecebc82f..f8f52ae6cc6f 100644 --- a/drivers/gpu/drm/i915/i915_reg.h +++ b/drivers/gpu/drm/i915/i915_reg.h @@ -7672,6 +7672,7 @@ enum { #define GEN11_COMMON_SLICE_CHICKEN3 _MMIO(0x7304) #define GEN11_BLEND_EMB_FIX_DISABLE_IN_RCC (1 << 11) + #define GEN12_DISABLE_CPS_AWARE_COLOR_PIPE (1 << 9) #define HIZ_CHICKEN _MMIO(0x7018) # define CHV_HZ_8X8_MODE_IN_1X (1 << 15) -- cgit v1.2.3 From 7f0cc34b5349cfb2867358737b3338cb2fcbc354 Mon Sep 17 00:00:00 2001 From: Michel Thierry Date: Thu, 25 Jul 2019 17:02:26 -0700 Subject: drm/i915/tgl: Implement Wa_1406941453 Enable Small PL for power benefit. Signed-off-by: Michel Thierry Signed-off-by: Lucas De Marchi Reviewed-by: Stuart Summers Reviewed-by: Radhakrishna Sripada Link: https://patchwork.freedesktop.org/patch/msgid/20190713010940.17711-18-lucas.demarchi@intel.com Link: https://patchwork.freedesktop.org/patch/msgid/20190726000226.26914-4-lucas.demarchi@intel.com --- drivers/gpu/drm/i915/gt/intel_workarounds.c | 7 +++++++ drivers/gpu/drm/i915/i915_reg.h | 3 +++ 2 files changed, 10 insertions(+) diff --git a/drivers/gpu/drm/i915/gt/intel_workarounds.c b/drivers/gpu/drm/i915/gt/intel_workarounds.c index ba65e5018978..25ae60846398 100644 --- a/drivers/gpu/drm/i915/gt/intel_workarounds.c +++ b/drivers/gpu/drm/i915/gt/intel_workarounds.c @@ -1260,6 +1260,13 @@ rcs_engine_wa_init(struct intel_engine_cs *engine, struct i915_wa_list *wal) { struct drm_i915_private *i915 = engine->i915; + if (IS_GEN(i915, 12)) { + /* Wa_1406941453:tgl */ + wa_masked_en(wal, + SAMPLER_MODE, + SAMPLER_ENABLE_SMALL_PL); + } + if (IS_GEN(i915, 11)) { /* This is not an Wa. Enable for better image quality */ wa_masked_en(wal, diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h index f8f52ae6cc6f..5e3a6178aff4 100644 --- a/drivers/gpu/drm/i915/i915_reg.h +++ b/drivers/gpu/drm/i915/i915_reg.h @@ -8965,6 +8965,9 @@ enum { #define GEN9_DG_MIRROR_FIX_ENABLE (1 << 5) #define GEN9_CCS_TLB_PREFETCH_ENABLE (1 << 3) +#define SAMPLER_MODE _MMIO(0xe18c) +#define SAMPLER_ENABLE_SMALL_PL (1 << 15) + #define GEN8_ROW_CHICKEN _MMIO(0xe4f0) #define FLOW_CONTROL_ENABLE (1 << 15) #define PARTIAL_INSTRUCTION_SHOOTDOWN_DISABLE (1 << 8) -- cgit v1.2.3 From bed34ef544f9ab37ab349c04cf4142282c4dcf5d Mon Sep 17 00:00:00 2001 From: Ville Syrjälä Date: Thu, 5 Sep 2019 16:50:43 +0300 Subject: drm/i915: Bump skl+ max plane width to 5k for linear/x-tiled MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit The officially validated plane width limit is 4k on skl+, however we already had people using 5k displays before we started to enforce the limit. Also it seems Windows allows 5k resolutions as well (though not sure if they do it with one plane or two). According to hw folks 5k should work with the possible exception of the following features: - Ytile (already limited to 4k) - FP16 (already limited to 4k) - render compression (already limited to 4k) - KVMR sprite and cursor (don't care) - horizontal panning (need to verify this) - pipe and plane scaling (need to verify this) So apart from last two items on that list we are already fine. We should really verify what happens with those last two items but I don't have a 5k display on hand atm so it'll have to wait. In the meantime let's just bump the limit back up to 5k since several users have already been using it without apparent issues. At least we'll be no worse off than we were prior to lowering the limits. Cc: stable@vger.kernel.org Cc: Sean Paul Cc: José Roberto de Souza Tested-by: Leho Kraav Fixes: 372b9ffb5799 ("drm/i915: Fix skl+ max plane width") Bugzilla: https://bugs.freedesktop.org/show_bug.cgi?id=111501 Signed-off-by: Ville Syrjälä Link: https://patchwork.freedesktop.org/patch/msgid/20190905135044.2001-1-ville.syrjala@linux.intel.com Reviewed-by: Maarten Lankhorst Reviewed-by: Sean Paul --- drivers/gpu/drm/i915/display/intel_display.c | 15 ++++++++++++++- 1 file changed, 14 insertions(+), 1 deletion(-) diff --git a/drivers/gpu/drm/i915/display/intel_display.c b/drivers/gpu/drm/i915/display/intel_display.c index 1cc74844d3ea..a61f4dfbf540 100644 --- a/drivers/gpu/drm/i915/display/intel_display.c +++ b/drivers/gpu/drm/i915/display/intel_display.c @@ -3282,7 +3282,20 @@ static int skl_max_plane_width(const struct drm_framebuffer *fb, switch (fb->modifier) { case DRM_FORMAT_MOD_LINEAR: case I915_FORMAT_MOD_X_TILED: - return 4096; + /* + * Validated limit is 4k, but has 5k should + * work apart from the following features: + * - Ytile (already limited to 4k) + * - FP16 (already limited to 4k) + * - render compression (already limited to 4k) + * - KVMR sprite and cursor (don't care) + * - horizontal panning (TODO verify this) + * - pipe and plane scaling (TODO verify this) + */ + if (cpp == 8) + return 4096; + else + return 5120; case I915_FORMAT_MOD_Y_TILED_CCS: case I915_FORMAT_MOD_Yf_TILED_CCS: /* FIXME AUX plane? */ -- cgit v1.2.3 From 2d20411e25a3bf3d2914a2219f47ed48dc57aed5 Mon Sep 17 00:00:00 2001 From: Ville Syrjälä Date: Wed, 18 Sep 2019 18:07:07 +0300 Subject: drm/i915: Don't advertise modes that exceed the max plane size MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Modern platforms allow the transcoders hdisplay/vdisplay to exceed the planes' max resolution. This has the nasty implication that modes on the connectors' mode list may not be usable when the user asks for a fullscreen plane. Seeing as that is the most common use case it seems prudent to filter out modes that don't allow for fullscreen planes to be enabled. Let's do that in the connetor .mode_valid() hook so that normally such modes are kept hidden but the user is still able to forcibly specify such a mode if they know they don't need fullscreen planes. This is in line with ealier policies regarding certain clock limits. The idea is to prevent the casual user from encountering a mode that would fail under typical conditions, but allow the expert user to force things if they so wish. Maybe in the future we should consider automagically using two planes when one can't cover the entire screen? Wouldn't be a great match for the current uapi with explicit planes though, but I guess no worse than using two pipes (which we apparently have to in the future anyway). Either that or we'd have to teach userspace to do it for us. v2: Fix icl+ max plane heigth (Manasi) Cc: Manasi Navare Cc: Leho Kraav Cc: Sean Paul Cc: José Roberto de Souza Reviewed-by: Maarten Lankhorst Reviewed-by: Manasi Navare Signed-off-by: Ville Syrjälä Link: https://patchwork.freedesktop.org/patch/msgid/20190918150707.32420-1-ville.syrjala@linux.intel.com --- drivers/gpu/drm/i915/display/intel_display.c | 36 ++++++++++++++++++++++++++++ drivers/gpu/drm/i915/display/intel_display.h | 4 ++++ drivers/gpu/drm/i915/display/intel_dp.c | 2 +- drivers/gpu/drm/i915/display/intel_dsi.c | 3 ++- drivers/gpu/drm/i915/display/intel_hdmi.c | 4 +++- 5 files changed, 46 insertions(+), 3 deletions(-) diff --git a/drivers/gpu/drm/i915/display/intel_display.c b/drivers/gpu/drm/i915/display/intel_display.c index a61f4dfbf540..e0033d99f6e3 100644 --- a/drivers/gpu/drm/i915/display/intel_display.c +++ b/drivers/gpu/drm/i915/display/intel_display.c @@ -15753,6 +15753,7 @@ intel_mode_valid(struct drm_device *dev, DRM_MODE_FLAG_CLKDIV2)) return MODE_BAD; + /* Transcoder timing limits */ if (INTEL_GEN(dev_priv) >= 11) { hdisplay_max = 16384; vdisplay_max = 8192; @@ -15791,6 +15792,41 @@ intel_mode_valid(struct drm_device *dev, return MODE_OK; } +enum drm_mode_status +intel_mode_valid_max_plane_size(struct drm_i915_private *dev_priv, + const struct drm_display_mode *mode) +{ + int plane_width_max, plane_height_max; + + /* + * intel_mode_valid() should be + * sufficient on older platforms. + */ + if (INTEL_GEN(dev_priv) < 9) + return MODE_OK; + + /* + * Most people will probably want a fullscreen + * plane so let's not advertize modes that are + * too big for that. + */ + if (INTEL_GEN(dev_priv) >= 11) { + plane_width_max = 5120; + plane_height_max = 4320; + } else { + plane_width_max = 5120; + plane_height_max = 4096; + } + + if (mode->hdisplay > plane_width_max) + return MODE_H_ILLEGAL; + + if (mode->vdisplay > plane_height_max) + return MODE_V_ILLEGAL; + + return MODE_OK; +} + static const struct drm_mode_config_funcs intel_mode_funcs = { .fb_create = intel_user_framebuffer_create, .get_format_info = intel_get_format_info, diff --git a/drivers/gpu/drm/i915/display/intel_display.h b/drivers/gpu/drm/i915/display/intel_display.h index 66330fcb10d4..b1ae0e59c715 100644 --- a/drivers/gpu/drm/i915/display/intel_display.h +++ b/drivers/gpu/drm/i915/display/intel_display.h @@ -32,6 +32,7 @@ enum link_m_n_set; struct dpll; struct drm_connector; struct drm_device; +struct drm_display_mode; struct drm_encoder; struct drm_file; struct drm_format_info; @@ -448,6 +449,9 @@ void lpt_disable_clkout_dp(struct drm_i915_private *dev_priv); u32 intel_plane_fb_max_stride(struct drm_i915_private *dev_priv, u32 pixel_format, u64 modifier); bool intel_plane_can_remap(const struct intel_plane_state *plane_state); +enum drm_mode_status +intel_mode_valid_max_plane_size(struct drm_i915_private *dev_priv, + const struct drm_display_mode *mode); enum phy intel_port_to_phy(struct drm_i915_private *i915, enum port port); void intel_plane_destroy(struct drm_plane *plane); diff --git a/drivers/gpu/drm/i915/display/intel_dp.c b/drivers/gpu/drm/i915/display/intel_dp.c index f06fbca5cb3e..0c7755e915d9 100644 --- a/drivers/gpu/drm/i915/display/intel_dp.c +++ b/drivers/gpu/drm/i915/display/intel_dp.c @@ -562,7 +562,7 @@ intel_dp_mode_valid(struct drm_connector *connector, if (mode->flags & DRM_MODE_FLAG_DBLCLK) return MODE_H_ILLEGAL; - return MODE_OK; + return intel_mode_valid_max_plane_size(dev_priv, mode); } u32 intel_dp_pack_aux(const u8 *src, int src_bytes) diff --git a/drivers/gpu/drm/i915/display/intel_dsi.c b/drivers/gpu/drm/i915/display/intel_dsi.c index 5fec02aceaed..a2a937109a5a 100644 --- a/drivers/gpu/drm/i915/display/intel_dsi.c +++ b/drivers/gpu/drm/i915/display/intel_dsi.c @@ -55,6 +55,7 @@ int intel_dsi_get_modes(struct drm_connector *connector) enum drm_mode_status intel_dsi_mode_valid(struct drm_connector *connector, struct drm_display_mode *mode) { + struct drm_i915_private *dev_priv = to_i915(connector->dev); struct intel_connector *intel_connector = to_intel_connector(connector); const struct drm_display_mode *fixed_mode = intel_connector->panel.fixed_mode; int max_dotclk = to_i915(connector->dev)->max_dotclk_freq; @@ -73,7 +74,7 @@ enum drm_mode_status intel_dsi_mode_valid(struct drm_connector *connector, return MODE_CLOCK_HIGH; } - return MODE_OK; + return intel_mode_valid_max_plane_size(dev_priv, mode); } struct intel_dsi_host *intel_dsi_host_init(struct intel_dsi *intel_dsi, diff --git a/drivers/gpu/drm/i915/display/intel_hdmi.c b/drivers/gpu/drm/i915/display/intel_hdmi.c index f81541cff5df..04a8718bd2d7 100644 --- a/drivers/gpu/drm/i915/display/intel_hdmi.c +++ b/drivers/gpu/drm/i915/display/intel_hdmi.c @@ -2188,8 +2188,10 @@ intel_hdmi_mode_valid(struct drm_connector *connector, status = hdmi_port_clock_valid(hdmi, clock * 5 / 4, true, force_dvi); } + if (status != MODE_OK) + return status; - return status; + return intel_mode_valid_max_plane_size(dev_priv, mode); } static bool hdmi_deep_color_possible(const struct intel_crtc_state *crtc_state, -- cgit v1.2.3 From c45e788d95b470e9f68fabe1f3cb44beb5dd7840 Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Thu, 19 Sep 2019 16:18:11 +0100 Subject: drm/i915/tgl: Suspend pre-parser across GTT invalidations Before we execute a batch, we must first issue any and all TLB invalidations so that batch picks up the new page table entries. Tigerlake's preparser is weakening our post-sync CS_STALL inside the invalidate pipe-control and allowing the loading of the batch buffer before we have setup its page table (and so it loads the wrong page and executes indefinitely). The igt_cs_tlb indicates that this issue can only be observed on rcs, even though the preparser is common to all engines. Alternatively, we could do TLB shootdown via mmio on updating the GTT. By inserting the pre-parser disable inside EMIT_INVALIDATE, we will also accidentally fixup execution that writes into subsequent batches, such as gem_exec_whisper and even relocations performed on the GPU. We should be careful not to allow this disable to become baked into the uABI! The issue is that if userspace relies on our disabling of the HW optimisation, when we are ready to enable that optimisation, userspace will then be broken... Testcase: igt/i915_selftests/live_gtt/igt_cs_tlb Bugzilla: https://bugs.freedesktop.org/show_bug.cgi?id=111753 Signed-off-by: Chris Wilson Cc: Daniele Ceraolo Spurio Cc: Mika Kuoppala Acked-by: Mika Kuoppala Link: https://patchwork.freedesktop.org/patch/msgid/20190919151811.9526-1-chris@chris-wilson.co.uk --- drivers/gpu/drm/i915/gt/intel_lrc.c | 74 ++++++++++++++++++++++++++++++++++++- 1 file changed, 73 insertions(+), 1 deletion(-) diff --git a/drivers/gpu/drm/i915/gt/intel_lrc.c b/drivers/gpu/drm/i915/gt/intel_lrc.c index a99166a2d2eb..49ed9230a2cb 100644 --- a/drivers/gpu/drm/i915/gt/intel_lrc.c +++ b/drivers/gpu/drm/i915/gt/intel_lrc.c @@ -2807,6 +2807,78 @@ static int gen11_emit_flush_render(struct i915_request *request, return 0; } +static u32 preparser_disable(bool state) +{ + return MI_ARB_CHECK | 1 << 8 | state; +} + +static int gen12_emit_flush_render(struct i915_request *request, + u32 mode) +{ + const u32 scratch_addr = + intel_gt_scratch_offset(request->engine->gt, + INTEL_GT_SCRATCH_FIELD_RENDER_FLUSH); + + if (mode & EMIT_FLUSH) { + u32 flags = 0; + u32 *cs; + + flags |= PIPE_CONTROL_TILE_CACHE_FLUSH; + flags |= PIPE_CONTROL_RENDER_TARGET_CACHE_FLUSH; + flags |= PIPE_CONTROL_DEPTH_CACHE_FLUSH; + flags |= PIPE_CONTROL_DC_FLUSH_ENABLE; + flags |= PIPE_CONTROL_FLUSH_ENABLE; + + flags |= PIPE_CONTROL_GLOBAL_GTT_IVB; + flags |= PIPE_CONTROL_QW_WRITE; + + flags |= PIPE_CONTROL_CS_STALL; + + cs = intel_ring_begin(request, 6); + if (IS_ERR(cs)) + return PTR_ERR(cs); + + cs = gen8_emit_pipe_control(cs, flags, scratch_addr); + intel_ring_advance(request, cs); + } + + if (mode & EMIT_INVALIDATE) { + u32 flags = 0; + u32 *cs; + + flags |= PIPE_CONTROL_COMMAND_CACHE_INVALIDATE; + flags |= PIPE_CONTROL_TLB_INVALIDATE; + flags |= PIPE_CONTROL_INSTRUCTION_CACHE_INVALIDATE; + flags |= PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE; + flags |= PIPE_CONTROL_VF_CACHE_INVALIDATE; + flags |= PIPE_CONTROL_CONST_CACHE_INVALIDATE; + flags |= PIPE_CONTROL_STATE_CACHE_INVALIDATE; + + flags |= PIPE_CONTROL_GLOBAL_GTT_IVB; + flags |= PIPE_CONTROL_QW_WRITE; + + flags |= PIPE_CONTROL_CS_STALL; + + cs = intel_ring_begin(request, 8); + if (IS_ERR(cs)) + return PTR_ERR(cs); + + /* + * Prevent the pre-parser from skipping past the TLB + * invalidate and loading a stale page for the batch + * buffer / request payload. + */ + *cs++ = preparser_disable(true); + + cs = gen8_emit_pipe_control(cs, flags, scratch_addr); + + *cs++ = preparser_disable(false); + intel_ring_advance(request, cs); + } + + return 0; +} + /* * Reserve space for 2 NOOPs at the end of each request to be * used as a workaround for not being allowed to do lite @@ -3072,7 +3144,7 @@ static void rcs_submission_override(struct intel_engine_cs *engine) { switch (INTEL_GEN(engine->i915)) { case 12: - engine->emit_flush = gen11_emit_flush_render; + engine->emit_flush = gen12_emit_flush_render; engine->emit_fini_breadcrumb = gen12_emit_fini_breadcrumb_rcs; break; case 11: -- cgit v1.2.3 From d19d71fc2b15bf30ff3e56932eae23ff096c1396 Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Thu, 19 Sep 2019 12:19:10 +0100 Subject: drm/i915: Mark i915_request.timeline as a volatile, rcu pointer The request->timeline is only valid until the request is retired (i.e. before it is completed). Upon retiring the request, the context may be unpinned and freed, and along with it the timeline may be freed. We therefore need to be very careful when chasing rq->timeline that the pointer does not disappear beneath us. The vast majority of users are in a protected context, either during request construction or retirement, where the timeline->mutex is held and the timeline cannot disappear. It is those few off the beaten path (where we access a second timeline) that need extra scrutiny -- to be added in the next patch after first adding the warnings about dangerous access. One complication, where we cannot use the timeline->mutex itself, is during request submission onto hardware (under spinlocks). Here, we want to check on the timeline to finalize the breadcrumb, and so we need to impose a second rule to ensure that the request->timeline is indeed valid. As we are submitting the request, it's context and timeline must be pinned, as it will be used by the hardware. Since it is pinned, we know the request->timeline must still be valid, and we cannot submit the idle barrier until after we release the engine->active.lock, ergo while submitting and holding that spinlock, a second thread cannot release the timeline. v2: Don't be lazy inside selftests; hold the timeline->mutex for as long as we need it, and tidy up acquiring the timeline with a bit of refactoring (i915_active_add_request) Signed-off-by: Chris Wilson Cc: Tvrtko Ursulin Reviewed-by: Tvrtko Ursulin Link: https://patchwork.freedesktop.org/patch/msgid/20190919111912.21631-1-chris@chris-wilson.co.uk --- drivers/gpu/drm/i915/display/intel_overlay.c | 2 +- drivers/gpu/drm/i915/gem/i915_gem_client_blt.c | 2 +- drivers/gpu/drm/i915/gem/i915_gem_context.c | 2 +- drivers/gpu/drm/i915/gt/intel_context.c | 4 +- drivers/gpu/drm/i915/gt/intel_engine_cs.c | 57 ++++++++++++++++++++++---- drivers/gpu/drm/i915/gt/intel_engine_pm.c | 2 +- drivers/gpu/drm/i915/gt/intel_engine_pool.h | 2 +- drivers/gpu/drm/i915/gt/intel_lrc.c | 17 ++++---- drivers/gpu/drm/i915/gt/intel_ringbuffer.c | 27 ++++++------ drivers/gpu/drm/i915/gt/intel_timeline.c | 6 +-- drivers/gpu/drm/i915/gt/intel_timeline_types.h | 1 + drivers/gpu/drm/i915/gt/selftest_context.c | 18 +++++--- drivers/gpu/drm/i915/gt/selftest_lrc.c | 2 +- drivers/gpu/drm/i915/i915_active.c | 2 +- drivers/gpu/drm/i915/i915_active.h | 6 +++ drivers/gpu/drm/i915/i915_request.c | 28 ++++++++----- drivers/gpu/drm/i915/i915_request.h | 22 +++++++++- drivers/gpu/drm/i915/i915_vma.c | 6 +-- drivers/gpu/drm/i915/selftests/i915_active.c | 2 +- drivers/gpu/drm/i915/selftests/igt_spinner.c | 2 +- 20 files changed, 147 insertions(+), 63 deletions(-) diff --git a/drivers/gpu/drm/i915/display/intel_overlay.c b/drivers/gpu/drm/i915/display/intel_overlay.c index 29edfc343716..5efef9babadb 100644 --- a/drivers/gpu/drm/i915/display/intel_overlay.c +++ b/drivers/gpu/drm/i915/display/intel_overlay.c @@ -230,7 +230,7 @@ alloc_request(struct intel_overlay *overlay, void (*fn)(struct intel_overlay *)) if (IS_ERR(rq)) return rq; - err = i915_active_ref(&overlay->last_flip, rq->timeline, rq); + err = i915_active_add_request(&overlay->last_flip, rq); if (err) { i915_request_add(rq); return ERR_PTR(err); diff --git a/drivers/gpu/drm/i915/gem/i915_gem_client_blt.c b/drivers/gpu/drm/i915/gem/i915_gem_client_blt.c index f99920652751..7f61a8024133 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_client_blt.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_client_blt.c @@ -211,7 +211,7 @@ static void clear_pages_worker(struct work_struct *work) * keep track of the GPU activity within this vma/request, and * propagate the signal from the request to w->dma. */ - err = i915_active_ref(&vma->active, rq->timeline, rq); + err = i915_active_add_request(&vma->active, rq); if (err) goto out_request; diff --git a/drivers/gpu/drm/i915/gem/i915_gem_context.c b/drivers/gpu/drm/i915/gem/i915_gem_context.c index f1c0e5d958f3..4a34c4f62065 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_context.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_context.c @@ -910,7 +910,7 @@ static int context_barrier_task(struct i915_gem_context *ctx, if (emit) err = emit(rq, data); if (err == 0) - err = i915_active_ref(&cb->base, rq->timeline, rq); + err = i915_active_add_request(&cb->base, rq); i915_request_add(rq); if (err) diff --git a/drivers/gpu/drm/i915/gt/intel_context.c b/drivers/gpu/drm/i915/gt/intel_context.c index c0495811f493..26cb838c272c 100644 --- a/drivers/gpu/drm/i915/gt/intel_context.c +++ b/drivers/gpu/drm/i915/gt/intel_context.c @@ -298,7 +298,7 @@ int intel_context_prepare_remote_request(struct intel_context *ce, /* Only suitable for use in remotely modifying this context */ GEM_BUG_ON(rq->hw_context == ce); - if (rq->timeline != tl) { /* beware timeline sharing */ + if (rcu_access_pointer(rq->timeline) != tl) { /* timeline sharing! */ err = mutex_lock_interruptible_nested(&tl->mutex, SINGLE_DEPTH_NESTING); if (err) @@ -319,7 +319,7 @@ int intel_context_prepare_remote_request(struct intel_context *ce, * words transfer the pinned ce object to tracked active request. */ GEM_BUG_ON(i915_active_is_idle(&ce->active)); - return i915_active_ref(&ce->active, rq->timeline, rq); + return i915_active_add_request(&ce->active, rq); } struct i915_request *intel_context_create_request(struct intel_context *ce) diff --git a/drivers/gpu/drm/i915/gt/intel_engine_cs.c b/drivers/gpu/drm/i915/gt/intel_engine_cs.c index 3c176b0f4b45..f451d5076bde 100644 --- a/drivers/gpu/drm/i915/gt/intel_engine_cs.c +++ b/drivers/gpu/drm/i915/gt/intel_engine_cs.c @@ -680,6 +680,8 @@ static int measure_breadcrumb_dw(struct intel_engine_cs *engine) engine->status_page.vma)) goto out_frame; + mutex_lock(&frame->timeline.mutex); + frame->ring.vaddr = frame->cs; frame->ring.size = sizeof(frame->cs); frame->ring.effective_size = frame->ring.size; @@ -688,18 +690,22 @@ static int measure_breadcrumb_dw(struct intel_engine_cs *engine) frame->rq.i915 = engine->i915; frame->rq.engine = engine; frame->rq.ring = &frame->ring; - frame->rq.timeline = &frame->timeline; + rcu_assign_pointer(frame->rq.timeline, &frame->timeline); dw = intel_timeline_pin(&frame->timeline); if (dw < 0) goto out_timeline; + spin_lock_irq(&engine->active.lock); dw = engine->emit_fini_breadcrumb(&frame->rq, frame->cs) - frame->cs; + spin_unlock_irq(&engine->active.lock); + GEM_BUG_ON(dw & 1); /* RING_TAIL must be qword aligned */ intel_timeline_unpin(&frame->timeline); out_timeline: + mutex_unlock(&frame->timeline.mutex); intel_timeline_fini(&frame->timeline); out_frame: kfree(frame); @@ -1196,6 +1202,27 @@ static void hexdump(struct drm_printer *m, const void *buf, size_t len) } } +static struct intel_timeline *get_timeline(struct i915_request *rq) +{ + struct intel_timeline *tl; + + /* + * Even though we are holding the engine->active.lock here, there + * is no control over the submission queue per-se and we are + * inspecting the active state at a random point in time, with an + * unknown queue. Play safe and make sure the timeline remains valid. + * (Only being used for pretty printing, one extra kref shouldn't + * cause a camel stampede!) + */ + rcu_read_lock(); + tl = rcu_dereference(rq->timeline); + if (!kref_get_unless_zero(&tl->kref)) + tl = NULL; + rcu_read_unlock(); + + return tl; +} + static void intel_engine_print_registers(struct intel_engine_cs *engine, struct drm_printer *m) { @@ -1290,27 +1317,37 @@ static void intel_engine_print_registers(struct intel_engine_cs *engine, int len; len = snprintf(hdr, sizeof(hdr), - "\t\tActive[%d: ", + "\t\tActive[%d]: ", (int)(port - execlists->active)); - if (!i915_request_signaled(rq)) + if (!i915_request_signaled(rq)) { + struct intel_timeline *tl = get_timeline(rq); + len += snprintf(hdr + len, sizeof(hdr) - len, "ring:{start:%08x, hwsp:%08x, seqno:%08x}, ", i915_ggtt_offset(rq->ring->vma), - rq->timeline->hwsp_offset, + tl ? tl->hwsp_offset : 0, hwsp_seqno(rq)); + + if (tl) + intel_timeline_put(tl); + } snprintf(hdr + len, sizeof(hdr) - len, "rq: "); print_request(m, rq, hdr); } for (port = execlists->pending; (rq = *port); port++) { + struct intel_timeline *tl = get_timeline(rq); char hdr[80]; snprintf(hdr, sizeof(hdr), "\t\tPending[%d] ring:{start:%08x, hwsp:%08x, seqno:%08x}, rq: ", (int)(port - execlists->pending), i915_ggtt_offset(rq->ring->vma), - rq->timeline->hwsp_offset, + tl ? tl->hwsp_offset : 0, hwsp_seqno(rq)); print_request(m, rq, hdr); + + if (tl) + intel_timeline_put(tl); } spin_unlock_irqrestore(&engine->active.lock, flags); } else if (INTEL_GEN(dev_priv) > 6) { @@ -1388,6 +1425,8 @@ void intel_engine_dump(struct intel_engine_cs *engine, spin_lock_irqsave(&engine->active.lock, flags); rq = intel_engine_find_active_request(engine); if (rq) { + struct intel_timeline *tl = get_timeline(rq); + print_request(m, rq, "\t\tactive "); drm_printf(m, "\t\tring->start: 0x%08x\n", @@ -1400,8 +1439,12 @@ void intel_engine_dump(struct intel_engine_cs *engine, rq->ring->emit); drm_printf(m, "\t\tring->space: 0x%08x\n", rq->ring->space); - drm_printf(m, "\t\tring->hwsp: 0x%08x\n", - rq->timeline->hwsp_offset); + + if (tl) { + drm_printf(m, "\t\tring->hwsp: 0x%08x\n", + tl->hwsp_offset); + intel_timeline_put(tl); + } print_request_ring(m, rq); diff --git a/drivers/gpu/drm/i915/gt/intel_engine_pm.c b/drivers/gpu/drm/i915/gt/intel_engine_pm.c index 65b5ca74b394..ce61c83d68e9 100644 --- a/drivers/gpu/drm/i915/gt/intel_engine_pm.c +++ b/drivers/gpu/drm/i915/gt/intel_engine_pm.c @@ -103,7 +103,7 @@ static bool switch_to_kernel_context(struct intel_engine_cs *engine) /* Context switch failed, hope for the best! Maybe reset? */ goto out_unlock; - intel_timeline_enter(rq->timeline); + intel_timeline_enter(i915_request_timeline(rq)); /* Check again on the next retirement. */ engine->wakeref_serial = engine->serial + 1; diff --git a/drivers/gpu/drm/i915/gt/intel_engine_pool.h b/drivers/gpu/drm/i915/gt/intel_engine_pool.h index 7e2123b33594..1bd89cadc3b7 100644 --- a/drivers/gpu/drm/i915/gt/intel_engine_pool.h +++ b/drivers/gpu/drm/i915/gt/intel_engine_pool.h @@ -18,7 +18,7 @@ static inline int intel_engine_pool_mark_active(struct intel_engine_pool_node *node, struct i915_request *rq) { - return i915_active_ref(&node->active, rq->timeline, rq); + return i915_active_add_request(&node->active, rq); } static inline void diff --git a/drivers/gpu/drm/i915/gt/intel_lrc.c b/drivers/gpu/drm/i915/gt/intel_lrc.c index 49ed9230a2cb..1a2b71157f08 100644 --- a/drivers/gpu/drm/i915/gt/intel_lrc.c +++ b/drivers/gpu/drm/i915/gt/intel_lrc.c @@ -1825,7 +1825,7 @@ static int gen8_emit_init_breadcrumb(struct i915_request *rq) { u32 *cs; - GEM_BUG_ON(!rq->timeline->has_initial_breadcrumb); + GEM_BUG_ON(!i915_request_timeline(rq)->has_initial_breadcrumb); cs = intel_ring_begin(rq, 6); if (IS_ERR(cs)) @@ -1841,7 +1841,7 @@ static int gen8_emit_init_breadcrumb(struct i915_request *rq) *cs++ = MI_NOOP; *cs++ = MI_STORE_DWORD_IMM_GEN4 | MI_USE_GGTT; - *cs++ = rq->timeline->hwsp_offset; + *cs++ = i915_request_timeline(rq)->hwsp_offset; *cs++ = 0; *cs++ = rq->fence.seqno - 1; @@ -2324,7 +2324,8 @@ static void reset_csb_pointers(struct intel_engine_cs *engine) static struct i915_request *active_request(struct i915_request *rq) { - const struct list_head * const list = &rq->timeline->requests; + const struct list_head * const list = + &i915_request_active_timeline(rq)->requests; const struct intel_context * const ce = rq->hw_context; struct i915_request *active = NULL; @@ -2927,7 +2928,7 @@ static u32 *gen8_emit_fini_breadcrumb(struct i915_request *request, u32 *cs) { cs = gen8_emit_ggtt_write(cs, request->fence.seqno, - request->timeline->hwsp_offset, + i915_request_active_timeline(request)->hwsp_offset, 0); return gen8_emit_fini_breadcrumb_footer(request, cs); @@ -2944,7 +2945,7 @@ static u32 *gen8_emit_fini_breadcrumb_rcs(struct i915_request *request, u32 *cs) /* XXX flush+write+CS_STALL all in one upsets gem_concurrent_blt:kbl */ cs = gen8_emit_ggtt_write_rcs(cs, request->fence.seqno, - request->timeline->hwsp_offset, + i915_request_active_timeline(request)->hwsp_offset, PIPE_CONTROL_FLUSH_ENABLE | PIPE_CONTROL_CS_STALL); @@ -2956,7 +2957,7 @@ gen11_emit_fini_breadcrumb_rcs(struct i915_request *request, u32 *cs) { cs = gen8_emit_ggtt_write_rcs(cs, request->fence.seqno, - request->timeline->hwsp_offset, + i915_request_active_timeline(request)->hwsp_offset, PIPE_CONTROL_CS_STALL | PIPE_CONTROL_TILE_CACHE_FLUSH | PIPE_CONTROL_RENDER_TARGET_CACHE_FLUSH | @@ -3020,7 +3021,7 @@ static u32 *gen12_emit_fini_breadcrumb(struct i915_request *request, u32 *cs) { cs = gen8_emit_ggtt_write(cs, request->fence.seqno, - request->timeline->hwsp_offset, + i915_request_active_timeline(request)->hwsp_offset, 0); return gen12_emit_fini_breadcrumb_footer(request, cs); @@ -3031,7 +3032,7 @@ gen12_emit_fini_breadcrumb_rcs(struct i915_request *request, u32 *cs) { cs = gen8_emit_ggtt_write_rcs(cs, request->fence.seqno, - request->timeline->hwsp_offset, + i915_request_active_timeline(request)->hwsp_offset, PIPE_CONTROL_CS_STALL | PIPE_CONTROL_TILE_CACHE_FLUSH | PIPE_CONTROL_RENDER_TARGET_CACHE_FLUSH | diff --git a/drivers/gpu/drm/i915/gt/intel_ringbuffer.c b/drivers/gpu/drm/i915/gt/intel_ringbuffer.c index a25b84b12ef1..0747b8c9f768 100644 --- a/drivers/gpu/drm/i915/gt/intel_ringbuffer.c +++ b/drivers/gpu/drm/i915/gt/intel_ringbuffer.c @@ -322,7 +322,8 @@ static u32 *gen6_rcs_emit_breadcrumb(struct i915_request *rq, u32 *cs) PIPE_CONTROL_DC_FLUSH_ENABLE | PIPE_CONTROL_QW_WRITE | PIPE_CONTROL_CS_STALL); - *cs++ = rq->timeline->hwsp_offset | PIPE_CONTROL_GLOBAL_GTT; + *cs++ = i915_request_active_timeline(rq)->hwsp_offset | + PIPE_CONTROL_GLOBAL_GTT; *cs++ = rq->fence.seqno; *cs++ = MI_USER_INTERRUPT; @@ -425,7 +426,7 @@ static u32 *gen7_rcs_emit_breadcrumb(struct i915_request *rq, u32 *cs) PIPE_CONTROL_QW_WRITE | PIPE_CONTROL_GLOBAL_GTT_IVB | PIPE_CONTROL_CS_STALL); - *cs++ = rq->timeline->hwsp_offset; + *cs++ = i915_request_active_timeline(rq)->hwsp_offset; *cs++ = rq->fence.seqno; *cs++ = MI_USER_INTERRUPT; @@ -439,8 +440,8 @@ static u32 *gen7_rcs_emit_breadcrumb(struct i915_request *rq, u32 *cs) static u32 *gen6_xcs_emit_breadcrumb(struct i915_request *rq, u32 *cs) { - GEM_BUG_ON(rq->timeline->hwsp_ggtt != rq->engine->status_page.vma); - GEM_BUG_ON(offset_in_page(rq->timeline->hwsp_offset) != I915_GEM_HWS_SEQNO_ADDR); + GEM_BUG_ON(i915_request_active_timeline(rq)->hwsp_ggtt != rq->engine->status_page.vma); + GEM_BUG_ON(offset_in_page(i915_request_active_timeline(rq)->hwsp_offset) != I915_GEM_HWS_SEQNO_ADDR); *cs++ = MI_FLUSH_DW | MI_FLUSH_DW_OP_STOREDW | MI_FLUSH_DW_STORE_INDEX; *cs++ = I915_GEM_HWS_SEQNO_ADDR | MI_FLUSH_DW_USE_GTT; @@ -459,8 +460,8 @@ static u32 *gen7_xcs_emit_breadcrumb(struct i915_request *rq, u32 *cs) { int i; - GEM_BUG_ON(rq->timeline->hwsp_ggtt != rq->engine->status_page.vma); - GEM_BUG_ON(offset_in_page(rq->timeline->hwsp_offset) != I915_GEM_HWS_SEQNO_ADDR); + GEM_BUG_ON(i915_request_active_timeline(rq)->hwsp_ggtt != rq->engine->status_page.vma); + GEM_BUG_ON(offset_in_page(i915_request_active_timeline(rq)->hwsp_offset) != I915_GEM_HWS_SEQNO_ADDR); *cs++ = MI_FLUSH_DW | MI_FLUSH_DW_OP_STOREDW | MI_FLUSH_DW_STORE_INDEX; *cs++ = I915_GEM_HWS_SEQNO_ADDR | MI_FLUSH_DW_USE_GTT; @@ -938,8 +939,8 @@ static void i9xx_submit_request(struct i915_request *request) static u32 *i9xx_emit_breadcrumb(struct i915_request *rq, u32 *cs) { - GEM_BUG_ON(rq->timeline->hwsp_ggtt != rq->engine->status_page.vma); - GEM_BUG_ON(offset_in_page(rq->timeline->hwsp_offset) != I915_GEM_HWS_SEQNO_ADDR); + GEM_BUG_ON(i915_request_active_timeline(rq)->hwsp_ggtt != rq->engine->status_page.vma); + GEM_BUG_ON(offset_in_page(i915_request_active_timeline(rq)->hwsp_offset) != I915_GEM_HWS_SEQNO_ADDR); *cs++ = MI_FLUSH; @@ -961,8 +962,8 @@ static u32 *gen5_emit_breadcrumb(struct i915_request *rq, u32 *cs) { int i; - GEM_BUG_ON(rq->timeline->hwsp_ggtt != rq->engine->status_page.vma); - GEM_BUG_ON(offset_in_page(rq->timeline->hwsp_offset) != I915_GEM_HWS_SEQNO_ADDR); + GEM_BUG_ON(i915_request_active_timeline(rq)->hwsp_ggtt != rq->engine->status_page.vma); + GEM_BUG_ON(offset_in_page(i915_request_active_timeline(rq)->hwsp_offset) != I915_GEM_HWS_SEQNO_ADDR); *cs++ = MI_FLUSH; @@ -1815,7 +1816,7 @@ static int ring_request_alloc(struct i915_request *request) int ret; GEM_BUG_ON(!intel_context_is_pinned(request->hw_context)); - GEM_BUG_ON(request->timeline->has_initial_breadcrumb); + GEM_BUG_ON(i915_request_timeline(request)->has_initial_breadcrumb); /* * Flush enough space to reduce the likelihood of waiting after @@ -1926,7 +1927,9 @@ u32 *intel_ring_begin(struct i915_request *rq, unsigned int num_dwords) */ GEM_BUG_ON(!rq->reserved_space); - ret = wait_for_space(ring, rq->timeline, total_bytes); + ret = wait_for_space(ring, + i915_request_timeline(rq), + total_bytes); if (unlikely(ret)) return ERR_PTR(ret); } diff --git a/drivers/gpu/drm/i915/gt/intel_timeline.c b/drivers/gpu/drm/i915/gt/intel_timeline.c index 9cb01d9828f1..115a24d4a20a 100644 --- a/drivers/gpu/drm/i915/gt/intel_timeline.c +++ b/drivers/gpu/drm/i915/gt/intel_timeline.c @@ -493,7 +493,7 @@ int intel_timeline_get_seqno(struct intel_timeline *tl, static int cacheline_ref(struct intel_timeline_cacheline *cl, struct i915_request *rq) { - return i915_active_ref(&cl->active, rq->timeline, rq); + return i915_active_add_request(&cl->active, rq); } int intel_timeline_read_hwsp(struct i915_request *from, @@ -504,7 +504,7 @@ int intel_timeline_read_hwsp(struct i915_request *from, struct intel_timeline *tl = from->timeline; int err; - GEM_BUG_ON(to->timeline == tl); + GEM_BUG_ON(rcu_access_pointer(to->timeline) == tl); mutex_lock_nested(&tl->mutex, SINGLE_DEPTH_NESTING); err = i915_request_completed(from); @@ -541,7 +541,7 @@ void __intel_timeline_free(struct kref *kref) container_of(kref, typeof(*timeline), kref); intel_timeline_fini(timeline); - kfree(timeline); + kfree_rcu(timeline, rcu); } static void timelines_fini(struct intel_gt *gt) diff --git a/drivers/gpu/drm/i915/gt/intel_timeline_types.h b/drivers/gpu/drm/i915/gt/intel_timeline_types.h index 2b1baf2fcc8e..c668c4c50e75 100644 --- a/drivers/gpu/drm/i915/gt/intel_timeline_types.h +++ b/drivers/gpu/drm/i915/gt/intel_timeline_types.h @@ -80,6 +80,7 @@ struct intel_timeline { struct intel_gt *gt; struct kref kref; + struct rcu_head rcu; }; #endif /* __I915_TIMELINE_TYPES_H__ */ diff --git a/drivers/gpu/drm/i915/gt/selftest_context.c b/drivers/gpu/drm/i915/gt/selftest_context.c index 9d1ea26c7a2d..4ce1e25433d2 100644 --- a/drivers/gpu/drm/i915/gt/selftest_context.c +++ b/drivers/gpu/drm/i915/gt/selftest_context.c @@ -14,22 +14,28 @@ static int request_sync(struct i915_request *rq) { + struct intel_timeline *tl = i915_request_timeline(rq); long timeout; int err = 0; + intel_timeline_get(tl); i915_request_get(rq); - i915_request_add(rq); + /* Opencode i915_request_add() so we can keep the timeline locked. */ + __i915_request_commit(rq); + __i915_request_queue(rq, NULL); + timeout = i915_request_wait(rq, 0, HZ / 10); - if (timeout < 0) { + if (timeout < 0) err = timeout; - } else { - mutex_lock(&rq->timeline->mutex); + else i915_request_retire_upto(rq); - mutex_unlock(&rq->timeline->mutex); - } + + lockdep_unpin_lock(&tl->mutex, rq->cookie); + mutex_unlock(&tl->mutex); i915_request_put(rq); + intel_timeline_put(tl); return err; } diff --git a/drivers/gpu/drm/i915/gt/selftest_lrc.c b/drivers/gpu/drm/i915/gt/selftest_lrc.c index 26d05bd1bdc8..93a871bfd95d 100644 --- a/drivers/gpu/drm/i915/gt/selftest_lrc.c +++ b/drivers/gpu/drm/i915/gt/selftest_lrc.c @@ -1089,7 +1089,7 @@ static int live_suppress_wait_preempt(void *arg) } /* Disable NEWCLIENT promotion */ - __i915_active_request_set(&rq[i]->timeline->last_request, + __i915_active_request_set(&i915_request_timeline(rq[i])->last_request, dummy); i915_request_add(rq[i]); } diff --git a/drivers/gpu/drm/i915/i915_active.c b/drivers/gpu/drm/i915/i915_active.c index 6a447f1d0110..d5aac6ff803a 100644 --- a/drivers/gpu/drm/i915/i915_active.c +++ b/drivers/gpu/drm/i915/i915_active.c @@ -695,7 +695,7 @@ void i915_request_add_active_barriers(struct i915_request *rq) struct llist_node *node, *next; GEM_BUG_ON(intel_engine_is_virtual(engine)); - GEM_BUG_ON(rq->timeline != engine->kernel_context->timeline); + GEM_BUG_ON(i915_request_timeline(rq) != engine->kernel_context->timeline); /* * Attach the list of proto-fences to the in-flight request such diff --git a/drivers/gpu/drm/i915/i915_active.h b/drivers/gpu/drm/i915/i915_active.h index f95058f99057..949c6835335b 100644 --- a/drivers/gpu/drm/i915/i915_active.h +++ b/drivers/gpu/drm/i915/i915_active.h @@ -373,6 +373,12 @@ int i915_active_ref(struct i915_active *ref, struct intel_timeline *tl, struct i915_request *rq); +static inline int +i915_active_add_request(struct i915_active *ref, struct i915_request *rq) +{ + return i915_active_ref(ref, i915_request_timeline(rq), rq); +} + int i915_active_wait(struct i915_active *ref); int i915_request_await_active(struct i915_request *rq, diff --git a/drivers/gpu/drm/i915/i915_request.c b/drivers/gpu/drm/i915/i915_request.c index 606acde47fa0..fb6f21c41934 100644 --- a/drivers/gpu/drm/i915/i915_request.c +++ b/drivers/gpu/drm/i915/i915_request.c @@ -220,7 +220,6 @@ static bool i915_request_retire(struct i915_request *rq) { struct i915_active_request *active, *next; - lockdep_assert_held(&rq->timeline->mutex); if (!i915_request_completed(rq)) return false; @@ -241,7 +240,8 @@ static bool i915_request_retire(struct i915_request *rq) * Note this requires that we are always called in request * completion order. */ - GEM_BUG_ON(!list_is_first(&rq->link, &rq->timeline->requests)); + GEM_BUG_ON(!list_is_first(&rq->link, + &i915_request_timeline(rq)->requests)); rq->ring->head = rq->postfix; /* @@ -317,7 +317,7 @@ static bool i915_request_retire(struct i915_request *rq) void i915_request_retire_upto(struct i915_request *rq) { - struct intel_timeline * const tl = rq->timeline; + struct intel_timeline * const tl = i915_request_timeline(rq); struct i915_request *tmp; GEM_TRACE("%s fence %llx:%lld, current %d\n", @@ -325,7 +325,6 @@ void i915_request_retire_upto(struct i915_request *rq) rq->fence.context, rq->fence.seqno, hwsp_seqno(rq)); - lockdep_assert_held(&tl->mutex); GEM_BUG_ON(!i915_request_completed(rq)); do { @@ -661,9 +660,11 @@ __i915_request_create(struct intel_context *ce, gfp_t gfp) rq->gem_context = ce->gem_context; rq->engine = ce->engine; rq->ring = ce->ring; - rq->timeline = tl; + + rcu_assign_pointer(rq->timeline, tl); rq->hwsp_seqno = tl->hwsp_seqno; rq->hwsp_cacheline = tl->hwsp_cacheline; + rq->rcustate = get_state_synchronize_rcu(); /* acts as smp_mb() */ spin_lock_init(&rq->lock); @@ -771,7 +772,8 @@ i915_request_await_start(struct i915_request *rq, struct i915_request *signal) return 0; signal = list_prev_entry(signal, link); - if (intel_timeline_sync_is_later(rq->timeline, &signal->fence)) + if (intel_timeline_sync_is_later(i915_request_timeline(rq), + &signal->fence)) return 0; return i915_sw_fence_await_dma_fence(&rq->submit, @@ -947,7 +949,8 @@ i915_request_await_dma_fence(struct i915_request *rq, struct dma_fence *fence) /* Squash repeated waits to the same timelines */ if (fence->context && - intel_timeline_sync_is_later(rq->timeline, fence)) + intel_timeline_sync_is_later(i915_request_timeline(rq), + fence)) continue; if (dma_fence_is_i915(fence)) @@ -961,7 +964,8 @@ i915_request_await_dma_fence(struct i915_request *rq, struct dma_fence *fence) /* Record the latest fence used against each timeline */ if (fence->context) - intel_timeline_sync_set(rq->timeline, fence); + intel_timeline_sync_set(i915_request_timeline(rq), + fence); } while (--nchild); return 0; @@ -1103,7 +1107,7 @@ void i915_request_skip(struct i915_request *rq, int error) static struct i915_request * __i915_request_add_to_timeline(struct i915_request *rq) { - struct intel_timeline *timeline = rq->timeline; + struct intel_timeline *timeline = i915_request_timeline(rq); struct i915_request *prev; /* @@ -1216,7 +1220,7 @@ void __i915_request_queue(struct i915_request *rq, void i915_request_add(struct i915_request *rq) { struct i915_sched_attr attr = rq->gem_context->sched; - struct intel_timeline * const tl = rq->timeline; + struct intel_timeline * const tl = i915_request_timeline(rq); struct i915_request *prev; lockdep_assert_held(&tl->mutex); @@ -1271,7 +1275,9 @@ void i915_request_add(struct i915_request *rq) * work on behalf of others -- but instead we should benefit from * improved resource management. (Well, that's the theory at least.) */ - if (prev && i915_request_completed(prev) && prev->timeline == tl) + if (prev && + i915_request_completed(prev) && + rcu_access_pointer(prev->timeline) == tl) i915_request_retire_upto(prev); mutex_unlock(&tl->mutex); diff --git a/drivers/gpu/drm/i915/i915_request.h b/drivers/gpu/drm/i915/i915_request.h index 8ac6e1226a56..b18f49528ded 100644 --- a/drivers/gpu/drm/i915/i915_request.h +++ b/drivers/gpu/drm/i915/i915_request.h @@ -113,7 +113,7 @@ struct i915_request { struct intel_engine_cs *engine; struct intel_context *hw_context; struct intel_ring *ring; - struct intel_timeline *timeline; + struct intel_timeline __rcu *timeline; struct list_head signal_link; /* @@ -442,6 +442,26 @@ static inline bool i915_request_has_nopreempt(const struct i915_request *rq) return unlikely(rq->flags & I915_REQUEST_NOPREEMPT); } +static inline struct intel_timeline * +i915_request_timeline(struct i915_request *rq) +{ + /* Valid only while the request is being constructed (or retired). */ + return rcu_dereference_protected(rq->timeline, + lockdep_is_held(&rcu_access_pointer(rq->timeline)->mutex)); +} + +static inline struct intel_timeline * +i915_request_active_timeline(struct i915_request *rq) +{ + /* + * When in use during submission, we are protected by a guarantee that + * the context/timeline is pinned and must remain pinned until after + * this submission. + */ + return rcu_dereference_protected(rq->timeline, + lockdep_is_held(&rq->engine->active.lock)); +} + bool i915_retire_requests(struct drm_i915_private *i915); #endif /* I915_REQUEST_H */ diff --git a/drivers/gpu/drm/i915/i915_vma.c b/drivers/gpu/drm/i915/i915_vma.c index 411047d6a909..9d5b0f87c210 100644 --- a/drivers/gpu/drm/i915/i915_vma.c +++ b/drivers/gpu/drm/i915/i915_vma.c @@ -900,15 +900,13 @@ int i915_vma_move_to_active(struct i915_vma *vma, * add the active reference first and queue for it to be dropped * *last*. */ - err = i915_active_ref(&vma->active, rq->timeline, rq); + err = i915_active_add_request(&vma->active, rq); if (unlikely(err)) return err; if (flags & EXEC_OBJECT_WRITE) { if (intel_frontbuffer_invalidate(obj->frontbuffer, ORIGIN_CS)) - i915_active_ref(&obj->frontbuffer->write, - rq->timeline, - rq); + i915_active_add_request(&obj->frontbuffer->write, rq); dma_resv_add_excl_fence(vma->resv, &rq->fence); obj->write_domain = I915_GEM_DOMAIN_RENDER; diff --git a/drivers/gpu/drm/i915/selftests/i915_active.c b/drivers/gpu/drm/i915/selftests/i915_active.c index 77d844ac8b71..afecfa081ff4 100644 --- a/drivers/gpu/drm/i915/selftests/i915_active.c +++ b/drivers/gpu/drm/i915/selftests/i915_active.c @@ -110,7 +110,7 @@ __live_active_setup(struct drm_i915_private *i915) submit, GFP_KERNEL); if (err >= 0) - err = i915_active_ref(&active->base, rq->timeline, rq); + err = i915_active_add_request(&active->base, rq); i915_request_add(rq); if (err) { pr_err("Failed to track active ref!\n"); diff --git a/drivers/gpu/drm/i915/selftests/igt_spinner.c b/drivers/gpu/drm/i915/selftests/igt_spinner.c index 11f04ad48e68..ee8450b871da 100644 --- a/drivers/gpu/drm/i915/selftests/igt_spinner.c +++ b/drivers/gpu/drm/i915/selftests/igt_spinner.c @@ -147,7 +147,7 @@ igt_spinner_create_request(struct igt_spinner *spin, intel_gt_chipset_flush(engine->gt); if (engine->emit_init_breadcrumb && - rq->timeline->has_initial_breadcrumb) { + i915_request_timeline(rq)->has_initial_breadcrumb) { err = engine->emit_init_breadcrumb(rq); if (err) goto cancel_rq; -- cgit v1.2.3 From 6a79d848403ded2245fbe4aaf50b33118d12daf0 Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Thu, 19 Sep 2019 12:19:11 +0100 Subject: drm/i915: Lock signaler timeline while navigating As we need to take a walk back along the signaler timeline to find the fence before upon which we want to wait, we need to lock that timeline to prevent it being modified as we walk. Similarly, we also need to acquire a reference to the earlier fence while it still exists! Though we lack the correct locking today, we are saved by the overarching struct_mutex -- but that protection is being removed. v2: Tvrtko made me realise I was being lax and using annotations to ignore the AB-BA deadlock from the timeline overlap. As it would be possible to construct a second request that was using a semaphore from the same timeline as ourselves, we could quite easily end up in a situation where we deadlocked in our mutex waits. Avoid that by using a trylock and falling back to a normal dma-fence await if contended. v3: Eek, the signal->timeline is volatile and must be carefully dereferenced to ensure it is valid. Signed-off-by: Chris Wilson Cc: Tvrtko Ursulin Reviewed-by: Tvrtko Ursulin Link: https://patchwork.freedesktop.org/patch/msgid/20190919111912.21631-2-chris@chris-wilson.co.uk --- drivers/gpu/drm/i915/i915_request.c | 68 +++++++++++++++++++++++++------------ 1 file changed, 46 insertions(+), 22 deletions(-) diff --git a/drivers/gpu/drm/i915/i915_request.c b/drivers/gpu/drm/i915/i915_request.c index fb6f21c41934..9bd8538b1907 100644 --- a/drivers/gpu/drm/i915/i915_request.c +++ b/drivers/gpu/drm/i915/i915_request.c @@ -768,17 +768,43 @@ err_unlock: static int i915_request_await_start(struct i915_request *rq, struct i915_request *signal) { - if (list_is_first(&signal->link, &signal->timeline->requests)) - return 0; + struct intel_timeline *tl; + struct dma_fence *fence; + int err; + + GEM_BUG_ON(i915_request_timeline(rq) == + rcu_access_pointer(signal->timeline)); - signal = list_prev_entry(signal, link); - if (intel_timeline_sync_is_later(i915_request_timeline(rq), - &signal->fence)) + rcu_read_lock(); + tl = rcu_dereference(signal->timeline); + if (i915_request_started(signal) || !kref_get_unless_zero(&tl->kref)) + tl = NULL; + rcu_read_unlock(); + if (!tl) /* already started or maybe even completed */ return 0; - return i915_sw_fence_await_dma_fence(&rq->submit, - &signal->fence, 0, - I915_FENCE_GFP); + fence = ERR_PTR(-EBUSY); + if (mutex_trylock(&tl->mutex)) { + fence = NULL; + if (!i915_request_started(signal) && + !list_is_first(&signal->link, &tl->requests)) { + signal = list_prev_entry(signal, link); + fence = dma_fence_get(&signal->fence); + } + mutex_unlock(&tl->mutex); + } + intel_timeline_put(tl); + if (IS_ERR_OR_NULL(fence)) + return PTR_ERR_OR_ZERO(fence); + + err = 0; + if (intel_timeline_sync_is_later(i915_request_timeline(rq), fence)) + err = i915_sw_fence_await_dma_fence(&rq->submit, + fence, 0, + I915_FENCE_GFP); + dma_fence_put(fence); + + return err; } static intel_engine_mask_t @@ -808,30 +834,23 @@ emit_semaphore_wait(struct i915_request *to, u32 hwsp_offset; int len; u32 *cs; - int err; - GEM_BUG_ON(!from->timeline->has_initial_breadcrumb); GEM_BUG_ON(INTEL_GEN(to->i915) < 8); /* Just emit the first semaphore we see as request space is limited. */ if (already_busywaiting(to) & from->engine->mask) - return i915_sw_fence_await_dma_fence(&to->submit, - &from->fence, 0, - I915_FENCE_GFP); + goto await_fence; - err = i915_request_await_start(to, from); - if (err < 0) - return err; + if (i915_request_await_start(to, from) < 0) + goto await_fence; /* Only submit our spinner after the signaler is running! */ - err = __i915_request_await_execution(to, from, NULL, gfp); - if (err) - return err; + if (__i915_request_await_execution(to, from, NULL, gfp)) + goto await_fence; /* We need to pin the signaler's HWSP until we are finished reading. */ - err = intel_timeline_read_hwsp(from, to, &hwsp_offset); - if (err) - return err; + if (intel_timeline_read_hwsp(from, to, &hwsp_offset)) + goto await_fence; len = 4; if (has_token) @@ -866,6 +885,11 @@ emit_semaphore_wait(struct i915_request *to, to->sched.semaphores |= from->engine->mask; to->sched.flags |= I915_SCHED_HAS_SEMAPHORE_CHAIN; return 0; + +await_fence: + return i915_sw_fence_await_dma_fence(&to->submit, + &from->fence, 0, + I915_FENCE_GFP); } static int -- cgit v1.2.3 From 9eee0dd7d3a4e2620257f1532cba8f013aee954f Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Thu, 19 Sep 2019 12:19:12 +0100 Subject: drm/i915: Protect timeline->hwsp dereferencing As not only is the signal->timeline volatile, so will be acquiring the timeline's HWSP. We must first carefully acquire the timeline from the signaling request and then lock the timeline. With the removal of the struct_mutex serialisation of request construction, we can have multiple timelines active at once, and so we must avoid using the nested mutex lock as it is quite possible for both timelines to be establishing semaphores on the other and so deadlock. Signed-off-by: Chris Wilson Cc: Tvrtko Ursulin Reviewed-by: Tvrtko Ursulin Link: https://patchwork.freedesktop.org/patch/msgid/20190919111912.21631-3-chris@chris-wilson.co.uk --- drivers/gpu/drm/i915/gt/intel_timeline.c | 32 +++++++++++++++++++++++++------- 1 file changed, 25 insertions(+), 7 deletions(-) diff --git a/drivers/gpu/drm/i915/gt/intel_timeline.c b/drivers/gpu/drm/i915/gt/intel_timeline.c index 115a24d4a20a..9d436e14ea8d 100644 --- a/drivers/gpu/drm/i915/gt/intel_timeline.c +++ b/drivers/gpu/drm/i915/gt/intel_timeline.c @@ -500,17 +500,32 @@ int intel_timeline_read_hwsp(struct i915_request *from, struct i915_request *to, u32 *hwsp) { - struct intel_timeline_cacheline *cl = from->hwsp_cacheline; - struct intel_timeline *tl = from->timeline; + struct intel_timeline *tl; int err; + rcu_read_lock(); + tl = rcu_dereference(from->timeline); + if (i915_request_completed(from) || !kref_get_unless_zero(&tl->kref)) + tl = NULL; + rcu_read_unlock(); + if (!tl) /* already completed */ + return 1; + GEM_BUG_ON(rcu_access_pointer(to->timeline) == tl); - mutex_lock_nested(&tl->mutex, SINGLE_DEPTH_NESTING); - err = i915_request_completed(from); - if (!err) + err = -EBUSY; + if (mutex_trylock(&tl->mutex)) { + struct intel_timeline_cacheline *cl = from->hwsp_cacheline; + + if (i915_request_completed(from)) { + err = 1; + goto unlock; + } + err = cacheline_ref(cl, to); - if (!err) { + if (err) + goto unlock; + if (likely(cl == tl->hwsp_cacheline)) { *hwsp = tl->hwsp_offset; } else { /* across a seqno wrap, recover the original offset */ @@ -518,8 +533,11 @@ int intel_timeline_read_hwsp(struct i915_request *from, ptr_unmask_bits(cl->vaddr, CACHELINE_BITS) * CACHELINE_BYTES; } + +unlock: + mutex_unlock(&tl->mutex); } - mutex_unlock(&tl->mutex); + intel_timeline_put(tl); return err; } -- cgit v1.2.3 From e5de91e68c5ccaf4ef3e5aa265b469e186ebfa65 Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Fri, 20 Sep 2019 09:12:54 +0100 Subject: Revert "drm/i915/tgl: Implement Wa_1406941453" Our sanitychecks indicate that while this register is context saved/restore, the HW does not preserve this bit within the register -- it likely doesn't exist, or one of those mythical bits that the architects insist does something despite all appearances to the contrary. For reference, SAMPLER_MODE is already in i915_reg.h as GEN10_SAMPLER_MODE and is being setup in icl_ctx_workarounds_init() as opposed to the chosen location here of rcs_engine_wa_init). Bugzilla: https://bugs.freedesktop.org/show_bug.cgi?id=111754 Fixes: 7f0cc34b5349 ("drm/i915/tgl: Implement Wa_1406941453") Testcase: igt/i915_selftest/live_workarounds Signed-off-by: Chris Wilson Cc: Lucas De Marchi Cc: Stuart Summers Cc: Radhakrishna Sripada Cc: Jani Nikula Cc: Joonas Lahtinen Acked-by: Lucas De Marchi Link: https://patchwork.freedesktop.org/patch/msgid/20190920081254.18389-1-chris@chris-wilson.co.uk --- drivers/gpu/drm/i915/gt/intel_workarounds.c | 7 ------- drivers/gpu/drm/i915/i915_reg.h | 3 --- 2 files changed, 10 deletions(-) diff --git a/drivers/gpu/drm/i915/gt/intel_workarounds.c b/drivers/gpu/drm/i915/gt/intel_workarounds.c index 25ae60846398..ba65e5018978 100644 --- a/drivers/gpu/drm/i915/gt/intel_workarounds.c +++ b/drivers/gpu/drm/i915/gt/intel_workarounds.c @@ -1260,13 +1260,6 @@ rcs_engine_wa_init(struct intel_engine_cs *engine, struct i915_wa_list *wal) { struct drm_i915_private *i915 = engine->i915; - if (IS_GEN(i915, 12)) { - /* Wa_1406941453:tgl */ - wa_masked_en(wal, - SAMPLER_MODE, - SAMPLER_ENABLE_SMALL_PL); - } - if (IS_GEN(i915, 11)) { /* This is not an Wa. Enable for better image quality */ wa_masked_en(wal, diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h index 5e3a6178aff4..f8f52ae6cc6f 100644 --- a/drivers/gpu/drm/i915/i915_reg.h +++ b/drivers/gpu/drm/i915/i915_reg.h @@ -8965,9 +8965,6 @@ enum { #define GEN9_DG_MIRROR_FIX_ENABLE (1 << 5) #define GEN9_CCS_TLB_PREFETCH_ENABLE (1 << 3) -#define SAMPLER_MODE _MMIO(0xe18c) -#define SAMPLER_ENABLE_SMALL_PL (1 << 15) - #define GEN8_ROW_CHICKEN _MMIO(0xe4f0) #define FLOW_CONTROL_ENABLE (1 << 15) #define PARTIAL_INSTRUCTION_SHOOTDOWN_DISABLE (1 << 8) -- cgit v1.2.3 From 35d97e43bb217446cdf128c6b6dfec423ed81ddf Mon Sep 17 00:00:00 2001 From: Anusha Srivatsa Date: Thu, 19 Sep 2019 13:12:03 -0700 Subject: drm/i915/uc: Update HuC firmware naming convention and load latest HuC Make both GuC and HuC to use "." as the separator. Hardcode the separator in MAKE_UC_FW_PATH. Remove the usage of "ver" from HuC. The current convention being: _uc_..patch.bin Update the versions of HuC being loaded of the platforms. SKL - v2.0.0 BXT - v2.0.0 KBL - v4.0.0 GLK - v4.0.0 CFL - KBL v4.0.0 ICL - v9.0.0 CML - v4.0.0 v2: Remove the separator parameter altogether from __MAKE_UC_FW_PATH.(Daniele) - Squash all firmware update patches (Daniele) v3: s/huc/HuC - Correct the order of platforms - Change REVID of cml to 5(Michal) - Code space changes in huc_def (Daniele) Suggested-by: Daniele Ceraolo Spurio Cc: Michal Wajdeczko Signed-off-by: Anusha Srivatsa Reviewed-by: Daniele Ceraolo Spurio Signed-off-by: Daniele Ceraolo Spurio Link: https://patchwork.freedesktop.org/patch/msgid/20190919201204.9691-1-daniele.ceraolospurio@intel.com --- drivers/gpu/drm/i915/gt/uc/intel_uc_fw.c | 27 ++++++++++++++------------- 1 file changed, 14 insertions(+), 13 deletions(-) diff --git a/drivers/gpu/drm/i915/gt/uc/intel_uc_fw.c b/drivers/gpu/drm/i915/gt/uc/intel_uc_fw.c index 296a82603be0..ea9a807abd4f 100644 --- a/drivers/gpu/drm/i915/gt/uc/intel_uc_fw.c +++ b/drivers/gpu/drm/i915/gt/uc/intel_uc_fw.c @@ -39,26 +39,27 @@ void intel_uc_fw_change_status(struct intel_uc_fw *uc_fw, * Must be ordered based on platform + revid, from newer to older. */ #define INTEL_UC_FIRMWARE_DEFS(fw_def, guc_def, huc_def) \ - fw_def(ELKHARTLAKE, 0, guc_def(ehl, 33, 0, 4), huc_def(ehl, 9, 0, 0)) \ - fw_def(ICELAKE, 0, guc_def(icl, 33, 0, 0), huc_def(icl, 8, 4, 3238)) \ - fw_def(COFFEELAKE, 0, guc_def(kbl, 33, 0, 0), huc_def(kbl, 02, 00, 1810)) \ - fw_def(GEMINILAKE, 0, guc_def(glk, 33, 0, 0), huc_def(glk, 03, 01, 2893)) \ - fw_def(KABYLAKE, 0, guc_def(kbl, 33, 0, 0), huc_def(kbl, 02, 00, 1810)) \ - fw_def(BROXTON, 0, guc_def(bxt, 33, 0, 0), huc_def(bxt, 01, 8, 2893)) \ - fw_def(SKYLAKE, 0, guc_def(skl, 33, 0, 0), huc_def(skl, 01, 07, 1398)) - -#define __MAKE_UC_FW_PATH(prefix_, name_, separator_, major_, minor_, patch_) \ + fw_def(ELKHARTLAKE, 0, guc_def(ehl, 33, 0, 4), huc_def(ehl, 9, 0, 0)) \ + fw_def(ICELAKE, 0, guc_def(icl, 33, 0, 0), huc_def(icl, 9, 0, 0)) \ + fw_def(COFFEELAKE, 5, guc_def(cml, 33, 0, 0), huc_def(cml, 4, 0, 0)) \ + fw_def(COFFEELAKE, 0, guc_def(kbl, 33, 0, 0), huc_def(kbl, 4, 0, 0)) \ + fw_def(GEMINILAKE, 0, guc_def(glk, 33, 0, 0), huc_def(glk, 4, 0, 0)) \ + fw_def(KABYLAKE, 0, guc_def(kbl, 33, 0, 0), huc_def(kbl, 4, 0, 0)) \ + fw_def(BROXTON, 0, guc_def(bxt, 33, 0, 0), huc_def(bxt, 2, 0, 0)) \ + fw_def(SKYLAKE, 0, guc_def(skl, 33, 0, 0), huc_def(skl, 2, 0, 0)) + +#define __MAKE_UC_FW_PATH(prefix_, name_, major_, minor_, patch_) \ "i915/" \ __stringify(prefix_) name_ \ - __stringify(major_) separator_ \ - __stringify(minor_) separator_ \ + __stringify(major_) "." \ + __stringify(minor_) "." \ __stringify(patch_) ".bin" #define MAKE_GUC_FW_PATH(prefix_, major_, minor_, patch_) \ - __MAKE_UC_FW_PATH(prefix_, "_guc_", ".", major_, minor_, patch_) + __MAKE_UC_FW_PATH(prefix_, "_guc_", major_, minor_, patch_) #define MAKE_HUC_FW_PATH(prefix_, major_, minor_, bld_num_) \ - __MAKE_UC_FW_PATH(prefix_, "_huc_ver", "_", major_, minor_, bld_num_) + __MAKE_UC_FW_PATH(prefix_, "_huc_", major_, minor_, bld_num_) /* All blobs need to be declared via MODULE_FIRMWARE() */ #define INTEL_UC_MODULE_FW(platform_, revid_, guc_, huc_) \ -- cgit v1.2.3 From 646d3dc855212855b708da4d481051a5e8cd1946 Mon Sep 17 00:00:00 2001 From: Ville Syrjälä Date: Thu, 18 Jul 2019 17:50:43 +0300 Subject: drm/i915: Fix HSW+ DP MSA YCbCr colorspace indication MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Looks like we're currently setting the MSA to xvYCC BT.709 instead of the YCbCr BT.601 claimed by the comment. But even that comment is wrong since we configure the CSC matrix to BT.709. Let's remove the bogus statement from the comment and fix the MSA to indicate YCbCr BT.709 so that it matches the actual pixel data we're transmitting. Signed-off-by: Ville Syrjälä Link: https://patchwork.freedesktop.org/patch/msgid/20190718145053.25808-3-ville.syrjala@linux.intel.com Reviewed-by: Gwan-gyeong Mun --- drivers/gpu/drm/i915/display/intel_ddi.c | 6 ++++-- drivers/gpu/drm/i915/i915_reg.h | 3 ++- 2 files changed, 6 insertions(+), 3 deletions(-) diff --git a/drivers/gpu/drm/i915/display/intel_ddi.c b/drivers/gpu/drm/i915/display/intel_ddi.c index 3e6394139964..fb6cb6aca188 100644 --- a/drivers/gpu/drm/i915/display/intel_ddi.c +++ b/drivers/gpu/drm/i915/display/intel_ddi.c @@ -1730,10 +1730,12 @@ void intel_ddi_set_pipe_settings(const struct intel_crtc_state *crtc_state) /* * As per DP 1.2 spec section 2.3.4.3 while sending * YCBCR 444 signals we should program MSA MISC1/0 fields with - * colorspace information. The output colorspace encoding is BT601. + * colorspace information. */ if (crtc_state->output_format == INTEL_OUTPUT_FORMAT_YCBCR444) - temp |= TRANS_MSA_SAMPLING_444 | TRANS_MSA_CLRSP_YCBCR; + temp |= TRANS_MSA_SAMPLING_444 | TRANS_MSA_CLRSP_YCBCR | + TRANS_MSA_YCBCR_BT709; + /* * As per DP 1.4a spec section 2.2.4.3 [MSA Field for Indication * of Color Encoding Format and Content Color Gamut] while sending diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h index f8f52ae6cc6f..2a1a1e84d1bc 100644 --- a/drivers/gpu/drm/i915/i915_reg.h +++ b/drivers/gpu/drm/i915/i915_reg.h @@ -9733,7 +9733,8 @@ enum skl_power_gate { #define TRANS_MSA_SYNC_CLK (1 << 0) #define TRANS_MSA_SAMPLING_444 (2 << 1) -#define TRANS_MSA_CLRSP_YCBCR (2 << 3) +#define TRANS_MSA_CLRSP_YCBCR (1 << 3) +#define TRANS_MSA_YCBCR_BT709 (1 << 4) #define TRANS_MSA_6_BPC (0 << 5) #define TRANS_MSA_8_BPC (1 << 5) #define TRANS_MSA_10_BPC (2 << 5) -- cgit v1.2.3 From 791ad5f1e1af2926adef5bb1998eb445681e31f0 Mon Sep 17 00:00:00 2001 From: Ville Syrjälä Date: Thu, 18 Jul 2019 17:50:44 +0300 Subject: drm/i915: Fix AVI infoframe quantization range for YCbCr output MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit We're configuring the AVI infoframe quantization range bits as if we're always transmitting RGB pixels. Let's fix this so that we correctly indicate limited range YCC quantization range when transmitting YCbCr instead. Signed-off-by: Ville Syrjälä Link: https://patchwork.freedesktop.org/patch/msgid/20190718145053.25808-4-ville.syrjala@linux.intel.com Reviewed-by: Gwan-gyeong Mun --- drivers/gpu/drm/i915/display/intel_hdmi.c | 15 ++++++++++----- 1 file changed, 10 insertions(+), 5 deletions(-) diff --git a/drivers/gpu/drm/i915/display/intel_hdmi.c b/drivers/gpu/drm/i915/display/intel_hdmi.c index 04a8718bd2d7..0082c4620246 100644 --- a/drivers/gpu/drm/i915/display/intel_hdmi.c +++ b/drivers/gpu/drm/i915/display/intel_hdmi.c @@ -724,11 +724,16 @@ intel_hdmi_compute_avi_infoframe(struct intel_encoder *encoder, drm_hdmi_avi_infoframe_colorspace(frame, conn_state); - drm_hdmi_avi_infoframe_quant_range(frame, connector, - adjusted_mode, - crtc_state->limited_color_range ? - HDMI_QUANTIZATION_RANGE_LIMITED : - HDMI_QUANTIZATION_RANGE_FULL); + if (crtc_state->output_format == INTEL_OUTPUT_FORMAT_RGB) { + drm_hdmi_avi_infoframe_quant_range(frame, connector, + adjusted_mode, + crtc_state->limited_color_range ? + HDMI_QUANTIZATION_RANGE_LIMITED : + HDMI_QUANTIZATION_RANGE_FULL); + } else { + frame->quantization_range = HDMI_QUANTIZATION_RANGE_DEFAULT; + frame->ycc_quantization_range = HDMI_YCC_QUANTIZATION_RANGE_LIMITED; + } drm_hdmi_avi_infoframe_content_type(frame, conn_state); -- cgit v1.2.3 From ba2d08c2a9ad96e592ee357ef5d46cbe1dc18bc9 Mon Sep 17 00:00:00 2001 From: Ville Syrjälä Date: Thu, 18 Jul 2019 17:50:45 +0300 Subject: drm/i915: Extract intel_hdmi_limited_color_range() MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Pull the code for computing the limited color range setting into a small helper. We'll add a bit more to it later. Signed-off-by: Ville Syrjälä Link: https://patchwork.freedesktop.org/patch/msgid/20190718145053.25808-5-ville.syrjala@linux.intel.com Reviewed-by: Gwan-gyeong Mun --- drivers/gpu/drm/i915/display/intel_hdmi.c | 30 ++++++++++++++++++++---------- 1 file changed, 20 insertions(+), 10 deletions(-) diff --git a/drivers/gpu/drm/i915/display/intel_hdmi.c b/drivers/gpu/drm/i915/display/intel_hdmi.c index 0082c4620246..b781e0d1962e 100644 --- a/drivers/gpu/drm/i915/display/intel_hdmi.c +++ b/drivers/gpu/drm/i915/display/intel_hdmi.c @@ -2365,6 +2365,24 @@ static int intel_hdmi_compute_clock(struct intel_encoder *encoder, return 0; } +static bool intel_hdmi_limited_color_range(const struct intel_crtc_state *crtc_state, + const struct drm_connector_state *conn_state) +{ + const struct intel_digital_connector_state *intel_conn_state = + to_intel_digital_connector_state(conn_state); + const struct drm_display_mode *adjusted_mode = + &crtc_state->base.adjusted_mode; + + if (intel_conn_state->broadcast_rgb == INTEL_BROADCAST_RGB_AUTO) { + /* See CEA-861-E - 5.1 Default Encoding Parameters */ + return crtc_state->has_hdmi_sink && + drm_default_rgb_quant_range(adjusted_mode) == + HDMI_QUANTIZATION_RANGE_LIMITED; + } else { + return intel_conn_state->broadcast_rgb == INTEL_BROADCAST_RGB_LIMITED; + } +} + int intel_hdmi_compute_config(struct intel_encoder *encoder, struct intel_crtc_state *pipe_config, struct drm_connector_state *conn_state) @@ -2388,16 +2406,8 @@ int intel_hdmi_compute_config(struct intel_encoder *encoder, if (pipe_config->has_hdmi_sink) pipe_config->has_infoframe = true; - if (intel_conn_state->broadcast_rgb == INTEL_BROADCAST_RGB_AUTO) { - /* See CEA-861-E - 5.1 Default Encoding Parameters */ - pipe_config->limited_color_range = - pipe_config->has_hdmi_sink && - drm_default_rgb_quant_range(adjusted_mode) == - HDMI_QUANTIZATION_RANGE_LIMITED; - } else { - pipe_config->limited_color_range = - intel_conn_state->broadcast_rgb == INTEL_BROADCAST_RGB_LIMITED; - } + pipe_config->limited_color_range = + intel_hdmi_limited_color_range(pipe_config, conn_state); if (adjusted_mode->flags & DRM_MODE_FLAG_DBLCLK) pipe_config->pixel_multiplier = 2; -- cgit v1.2.3 From cae154fcaefeb1ddab491d6fb651452a93bbf81d Mon Sep 17 00:00:00 2001 From: Ville Syrjälä Date: Thu, 18 Jul 2019 19:45:23 +0300 Subject: drm/i915: Never set limited_color_range=true for YCbCr output MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit crtc_state->limited_color_range only applies to RGB output but we're currently setting it even for YCbCr output. That will lead to conflicting MSA and PIPECONF settings which can mess up the image. Let's make sure limited_color_range stays unset with YCbCr output. Also WARN if we end up with such a bogus combination when programming the MSA MISC bits as it's impossible to even indicate quantization rangle for YCbCr via MSA MISC. YCbCr output is simply assumed to be limited range always. Note that VSC SDP does provide a mechanism for full range YCbCr, so in the future we may want to rethink how we compute/store this state. And for good measure we add the same WARN to the HDMI path. v2: s/==/!=/ in the HDMI WARN Signed-off-by: Ville Syrjälä Link: https://patchwork.freedesktop.org/patch/msgid/20190718164523.11738-1-ville.syrjala@linux.intel.com Reviewed-by: Gwan-gyeong Mun --- drivers/gpu/drm/i915/display/intel_ddi.c | 10 +++++++--- drivers/gpu/drm/i915/display/intel_dp.c | 10 ++++++++++ drivers/gpu/drm/i915/display/intel_hdmi.c | 20 +++++++++++++++++--- 3 files changed, 34 insertions(+), 6 deletions(-) diff --git a/drivers/gpu/drm/i915/display/intel_ddi.c b/drivers/gpu/drm/i915/display/intel_ddi.c index fb6cb6aca188..0c0da9f6c2e8 100644 --- a/drivers/gpu/drm/i915/display/intel_ddi.c +++ b/drivers/gpu/drm/i915/display/intel_ddi.c @@ -1706,9 +1706,6 @@ void intel_ddi_set_pipe_settings(const struct intel_crtc_state *crtc_state) temp = TRANS_MSA_SYNC_CLK; - if (crtc_state->limited_color_range) - temp |= TRANS_MSA_CEA_RANGE; - switch (crtc_state->pipe_bpp) { case 18: temp |= TRANS_MSA_6_BPC; @@ -1727,6 +1724,13 @@ void intel_ddi_set_pipe_settings(const struct intel_crtc_state *crtc_state) break; } + /* nonsense combination */ + WARN_ON(crtc_state->limited_color_range && + crtc_state->output_format != INTEL_OUTPUT_FORMAT_RGB); + + if (crtc_state->limited_color_range) + temp |= TRANS_MSA_CEA_RANGE; + /* * As per DP 1.2 spec section 2.3.4.3 while sending * YCBCR 444 signals we should program MSA MISC1/0 fields with diff --git a/drivers/gpu/drm/i915/display/intel_dp.c b/drivers/gpu/drm/i915/display/intel_dp.c index 0c7755e915d9..5cce9459e93c 100644 --- a/drivers/gpu/drm/i915/display/intel_dp.c +++ b/drivers/gpu/drm/i915/display/intel_dp.c @@ -2150,6 +2150,16 @@ bool intel_dp_limited_color_range(const struct intel_crtc_state *crtc_state, const struct drm_display_mode *adjusted_mode = &crtc_state->base.adjusted_mode; + /* + * Our YCbCr output is always limited range. + * crtc_state->limited_color_range only applies to RGB, + * and it must never be set for YCbCr or we risk setting + * some conflicting bits in PIPECONF which will mess up + * the colors on the monitor. + */ + if (crtc_state->output_format != INTEL_OUTPUT_FORMAT_RGB) + return false; + if (intel_conn_state->broadcast_rgb == INTEL_BROADCAST_RGB_AUTO) { /* * See: diff --git a/drivers/gpu/drm/i915/display/intel_hdmi.c b/drivers/gpu/drm/i915/display/intel_hdmi.c index b781e0d1962e..5ab73331d35d 100644 --- a/drivers/gpu/drm/i915/display/intel_hdmi.c +++ b/drivers/gpu/drm/i915/display/intel_hdmi.c @@ -724,6 +724,10 @@ intel_hdmi_compute_avi_infoframe(struct intel_encoder *encoder, drm_hdmi_avi_infoframe_colorspace(frame, conn_state); + /* nonsense combination */ + WARN_ON(crtc_state->limited_color_range && + crtc_state->output_format != INTEL_OUTPUT_FORMAT_RGB); + if (crtc_state->output_format == INTEL_OUTPUT_FORMAT_RGB) { drm_hdmi_avi_infoframe_quant_range(frame, connector, adjusted_mode, @@ -2373,6 +2377,16 @@ static bool intel_hdmi_limited_color_range(const struct intel_crtc_state *crtc_s const struct drm_display_mode *adjusted_mode = &crtc_state->base.adjusted_mode; + /* + * Our YCbCr output is always limited range. + * crtc_state->limited_color_range only applies to RGB, + * and it must never be set for YCbCr or we risk setting + * some conflicting bits in PIPECONF which will mess up + * the colors on the monitor. + */ + if (crtc_state->output_format != INTEL_OUTPUT_FORMAT_RGB) + return false; + if (intel_conn_state->broadcast_rgb == INTEL_BROADCAST_RGB_AUTO) { /* See CEA-861-E - 5.1 Default Encoding Parameters */ return crtc_state->has_hdmi_sink && @@ -2406,9 +2420,6 @@ int intel_hdmi_compute_config(struct intel_encoder *encoder, if (pipe_config->has_hdmi_sink) pipe_config->has_infoframe = true; - pipe_config->limited_color_range = - intel_hdmi_limited_color_range(pipe_config, conn_state); - if (adjusted_mode->flags & DRM_MODE_FLAG_DBLCLK) pipe_config->pixel_multiplier = 2; @@ -2419,6 +2430,9 @@ int intel_hdmi_compute_config(struct intel_encoder *encoder, } } + pipe_config->limited_color_range = + intel_hdmi_limited_color_range(pipe_config, conn_state); + if (HAS_PCH_SPLIT(dev_priv) && !HAS_DDI(dev_priv)) pipe_config->has_pch_encoder = true; -- cgit v1.2.3 From 60a02311cc588ae92b4732763650f187c506bc3a Mon Sep 17 00:00:00 2001 From: Ville Syrjälä Date: Thu, 18 Jul 2019 17:50:48 +0300 Subject: drm/i915: Don't look at unrelated PIPECONF bits for interlaced readout MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Since HSW the PIPECONF progressive vs. interlaced selection is done with just two bits instead of the earlier three. Let's not look at the extra bit on HSW+. Also gen2 doesn't support interlaced displays at all. This is actually fine as is currently because the extra bit is mbz (as are all three bits on gen2). But just to avoid mishaps in the future if the bits get reused let's only look at what's properly defined. v2: constify crtc_state Signed-off-by: Ville Syrjälä Link: https://patchwork.freedesktop.org/patch/msgid/20190718145053.25808-8-ville.syrjala@linux.intel.com Reviewed-by: Gwan-gyeong Mun --- drivers/gpu/drm/i915/display/intel_display.c | 17 ++++++++++++++++- 1 file changed, 16 insertions(+), 1 deletion(-) diff --git a/drivers/gpu/drm/i915/display/intel_display.c b/drivers/gpu/drm/i915/display/intel_display.c index e0033d99f6e3..44460bf70d6b 100644 --- a/drivers/gpu/drm/i915/display/intel_display.c +++ b/drivers/gpu/drm/i915/display/intel_display.c @@ -8203,6 +8203,21 @@ static void intel_set_pipe_src_size(const struct intel_crtc_state *crtc_state) (crtc_state->pipe_src_h - 1)); } +static bool intel_pipe_is_interlaced(const struct intel_crtc_state *crtc_state) +{ + struct drm_i915_private *dev_priv = to_i915(crtc_state->base.crtc->dev); + enum transcoder cpu_transcoder = crtc_state->cpu_transcoder; + + if (IS_GEN(dev_priv, 2)) + return false; + + if (INTEL_GEN(dev_priv) >= 9 || + IS_BROADWELL(dev_priv) || IS_HASWELL(dev_priv)) + return I915_READ(PIPECONF(cpu_transcoder)) & PIPECONF_INTERLACE_MASK_HSW; + else + return I915_READ(PIPECONF(cpu_transcoder)) & PIPECONF_INTERLACE_MASK; +} + static void intel_get_pipe_timings(struct intel_crtc *crtc, struct intel_crtc_state *pipe_config) { @@ -8241,7 +8256,7 @@ static void intel_get_pipe_timings(struct intel_crtc *crtc, pipe_config->base.adjusted_mode.crtc_vsync_start = (tmp & 0xffff) + 1; pipe_config->base.adjusted_mode.crtc_vsync_end = ((tmp >> 16) & 0xffff) + 1; - if (I915_READ(PIPECONF(cpu_transcoder)) & PIPECONF_INTERLACE_MASK) { + if (intel_pipe_is_interlaced(pipe_config)) { pipe_config->base.adjusted_mode.flags |= DRM_MODE_FLAG_INTERLACE; pipe_config->base.adjusted_mode.crtc_vtotal += 1; pipe_config->base.adjusted_mode.crtc_vblank_end += 1; -- cgit v1.2.3 From b10d1173eecf811d5293c5097f1025fc20915171 Mon Sep 17 00:00:00 2001 From: Ville Syrjälä Date: Thu, 18 Jul 2019 17:50:49 +0300 Subject: drm/i915: Simplify intel_get_crtc_ycbcr_config() MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Make intel_get_crtc_ycbcr_config() simpler and rename it to bdw_get_pipemisc_output_format() to better reflect what it does. Also toss in some comments to document that the 4:2:0 PIPECONF bits are glk+ only. They are mbz on earlier platforms so reading them unconditionally is safe however. Signed-off-by: Ville Syrjälä Link: https://patchwork.freedesktop.org/patch/msgid/20190718145053.25808-9-ville.syrjala@linux.intel.com Reviewed-by: Gwan-gyeong Mun --- drivers/gpu/drm/i915/display/intel_display.c | 71 +++++++++++++--------------- drivers/gpu/drm/i915/i915_reg.h | 4 +- 2 files changed, 34 insertions(+), 41 deletions(-) diff --git a/drivers/gpu/drm/i915/display/intel_display.c b/drivers/gpu/drm/i915/display/intel_display.c index 44460bf70d6b..95a63acc43a3 100644 --- a/drivers/gpu/drm/i915/display/intel_display.c +++ b/drivers/gpu/drm/i915/display/intel_display.c @@ -8727,47 +8727,24 @@ static void chv_crtc_clock_get(struct intel_crtc *crtc, pipe_config->port_clock = chv_calc_dpll_params(refclk, &clock); } -static void intel_get_crtc_ycbcr_config(struct intel_crtc *crtc, - struct intel_crtc_state *pipe_config) +static enum intel_output_format +bdw_get_pipemisc_output_format(struct intel_crtc *crtc) { struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); - enum intel_output_format output = INTEL_OUTPUT_FORMAT_RGB; - - pipe_config->lspcon_downsampling = false; - - if (IS_BROADWELL(dev_priv) || INTEL_GEN(dev_priv) >= 9) { - u32 tmp = I915_READ(PIPEMISC(crtc->pipe)); - - if (tmp & PIPEMISC_OUTPUT_COLORSPACE_YUV) { - bool ycbcr420_enabled = tmp & PIPEMISC_YUV420_ENABLE; - bool blend = tmp & PIPEMISC_YUV420_MODE_FULL_BLEND; - - if (ycbcr420_enabled) { - /* We support 4:2:0 in full blend mode only */ - if (!blend) - output = INTEL_OUTPUT_FORMAT_INVALID; - else if (!(IS_GEMINILAKE(dev_priv) || - INTEL_GEN(dev_priv) >= 10)) - output = INTEL_OUTPUT_FORMAT_INVALID; - else - output = INTEL_OUTPUT_FORMAT_YCBCR420; - } else { - /* - * Currently there is no interface defined to - * check user preference between RGB/YCBCR444 - * or YCBCR420. So the only possible case for - * YCBCR444 usage is driving YCBCR420 output - * with LSPCON, when pipe is configured for - * YCBCR444 output and LSPCON takes care of - * downsampling it. - */ - pipe_config->lspcon_downsampling = true; - output = INTEL_OUTPUT_FORMAT_YCBCR444; - } - } - } + u32 tmp; + + tmp = I915_READ(PIPEMISC(crtc->pipe)); - pipe_config->output_format = output; + if (tmp & PIPEMISC_YUV420_ENABLE) { + /* We support 4:2:0 in full blend mode only */ + WARN_ON((tmp & PIPEMISC_YUV420_MODE_FULL_BLEND) == 0); + + return INTEL_OUTPUT_FORMAT_YCBCR420; + } else if (tmp & PIPEMISC_OUTPUT_COLORSPACE_YUV) { + return INTEL_OUTPUT_FORMAT_YCBCR444; + } else { + return INTEL_OUTPUT_FORMAT_RGB; + } } static void i9xx_get_pipe_color_config(struct intel_crtc_state *crtc_state) @@ -10462,7 +10439,23 @@ static bool haswell_get_pipe_config(struct intel_crtc *crtc, } intel_get_pipe_src_size(crtc, pipe_config); - intel_get_crtc_ycbcr_config(crtc, pipe_config); + + if (INTEL_GEN(dev_priv) >= 9 || IS_BROADWELL(dev_priv)) { + pipe_config->output_format = + bdw_get_pipemisc_output_format(crtc); + + /* + * Currently there is no interface defined to + * check user preference between RGB/YCBCR444 + * or YCBCR420. So the only possible case for + * YCBCR444 usage is driving YCBCR420 output + * with LSPCON, when pipe is configured for + * YCBCR444 output and LSPCON takes care of + * downsampling it. + */ + pipe_config->lspcon_downsampling = + pipe_config->output_format == INTEL_OUTPUT_FORMAT_YCBCR444; + } pipe_config->gamma_mode = I915_READ(GAMMA_MODE(crtc->pipe)); diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h index 2a1a1e84d1bc..3cc1baa0b915 100644 --- a/drivers/gpu/drm/i915/i915_reg.h +++ b/drivers/gpu/drm/i915/i915_reg.h @@ -5775,8 +5775,8 @@ enum { #define _PIPE_MISC_A 0x70030 #define _PIPE_MISC_B 0x71030 -#define PIPEMISC_YUV420_ENABLE (1 << 27) -#define PIPEMISC_YUV420_MODE_FULL_BLEND (1 << 26) +#define PIPEMISC_YUV420_ENABLE (1 << 27) /* glk+ */ +#define PIPEMISC_YUV420_MODE_FULL_BLEND (1 << 26) /* glk+ */ #define PIPEMISC_HDR_MODE_PRECISION (1 << 23) /* icl+ */ #define PIPEMISC_OUTPUT_COLORSPACE_YUV (1 << 11) #define PIPEMISC_DITHER_BPC_MASK (7 << 5) -- cgit v1.2.3 From ac0f01cee94702d49aad3ff4d0607024fa34c9dd Mon Sep 17 00:00:00 2001 From: Ville Syrjälä Date: Thu, 18 Jul 2019 17:50:50 +0300 Subject: drm/i915: Add PIPECONF YCbCr 4:4:4 programming for HSW MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit On HSW the pipe colorspace is configured via PIPECONF (as opposed to PIPEMISC in BDW+). Let's configure+readout that stuff correctly. Enabling YCbCr 4:4:4 output will now be a simple matter of setting crtc_state->output_format appropriately in the encoder .compute_config(). Signed-off-by: Ville Syrjälä Link: https://patchwork.freedesktop.org/patch/msgid/20190718145053.25808-10-ville.syrjala@linux.intel.com Reviewed-by: Gwan-gyeong Mun --- drivers/gpu/drm/i915/display/intel_display.c | 13 ++++++++++++- drivers/gpu/drm/i915/i915_reg.h | 1 + 2 files changed, 13 insertions(+), 1 deletion(-) diff --git a/drivers/gpu/drm/i915/display/intel_display.c b/drivers/gpu/drm/i915/display/intel_display.c index 95a63acc43a3..1bd91a061a0a 100644 --- a/drivers/gpu/drm/i915/display/intel_display.c +++ b/drivers/gpu/drm/i915/display/intel_display.c @@ -9444,6 +9444,10 @@ static void haswell_set_pipeconf(const struct intel_crtc_state *crtc_state) else val |= PIPECONF_PROGRESSIVE; + if (IS_HASWELL(dev_priv) && + crtc_state->output_format != INTEL_OUTPUT_FORMAT_RGB) + val |= PIPECONF_OUTPUT_COLORSPACE_YUV_HSW; + I915_WRITE(PIPECONF(cpu_transcoder), val); POSTING_READ(PIPECONF(cpu_transcoder)); } @@ -10440,7 +10444,14 @@ static bool haswell_get_pipe_config(struct intel_crtc *crtc, intel_get_pipe_src_size(crtc, pipe_config); - if (INTEL_GEN(dev_priv) >= 9 || IS_BROADWELL(dev_priv)) { + if (IS_HASWELL(dev_priv)) { + u32 tmp = I915_READ(PIPECONF(pipe_config->cpu_transcoder)); + + if (tmp & PIPECONF_OUTPUT_COLORSPACE_YUV_HSW) + pipe_config->output_format = INTEL_OUTPUT_FORMAT_YCBCR444; + else + pipe_config->output_format = INTEL_OUTPUT_FORMAT_RGB; + } else { pipe_config->output_format = bdw_get_pipemisc_output_format(crtc); diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h index 3cc1baa0b915..6f2eda76da4a 100644 --- a/drivers/gpu/drm/i915/i915_reg.h +++ b/drivers/gpu/drm/i915/i915_reg.h @@ -5683,6 +5683,7 @@ enum { #define PIPECONF_CXSR_DOWNCLOCK (1 << 16) #define PIPECONF_EDP_RR_MODE_SWITCH_VLV (1 << 14) #define PIPECONF_COLOR_RANGE_SELECT (1 << 13) +#define PIPECONF_OUTPUT_COLORSPACE_YUV_HSW (1 << 11) /* hsw only */ #define PIPECONF_BPC_MASK (0x7 << 5) #define PIPECONF_8BPC (0 << 5) #define PIPECONF_10BPC (1 << 5) -- cgit v1.2.3 From 174d12bcc0874d78ad7fd63ae769cac864ee9f4f Mon Sep 17 00:00:00 2001 From: Ville Syrjälä Date: Thu, 18 Jul 2019 17:50:51 +0300 Subject: drm/i915: Document ILK+ pipe csc matrix better MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Add comments to explain the ilk pipe csc operation a bit better. Signed-off-by: Ville Syrjälä Link: https://patchwork.freedesktop.org/patch/msgid/20190718145053.25808-11-ville.syrjala@linux.intel.com Reviewed-by: Gwan-gyeong Mun --- drivers/gpu/drm/i915/display/intel_color.c | 26 +++++++++++++++++++++----- 1 file changed, 21 insertions(+), 5 deletions(-) diff --git a/drivers/gpu/drm/i915/display/intel_color.c b/drivers/gpu/drm/i915/display/intel_color.c index 318308dc136c..de31b4c251fa 100644 --- a/drivers/gpu/drm/i915/display/intel_color.c +++ b/drivers/gpu/drm/i915/display/intel_color.c @@ -42,6 +42,21 @@ #define LEGACY_LUT_LENGTH 256 +/* + * ILK+ csc matrix: + * + * |R/Cr| | c0 c1 c2 | ( |R/Cr| |preoff0| ) |postoff0| + * |G/Y | = | c3 c4 c5 | x ( |G/Y | + |preoff1| ) + |postoff1| + * |B/Cb| | c6 c7 c8 | ( |B/Cb| |preoff2| ) |postoff2| + * + * ILK/SNB don't have explicit post offsets, and instead + * CSC_MODE_YUV_TO_RGB and CSC_BLACK_SCREEN_OFFSET are used: + * CSC_MODE_YUV_TO_RGB=0 + CSC_BLACK_SCREEN_OFFSET=0 -> 1/2, 0, 1/2 + * CSC_MODE_YUV_TO_RGB=0 + CSC_BLACK_SCREEN_OFFSET=1 -> 1/2, 1/16, 1/2 + * CSC_MODE_YUV_TO_RGB=1 + CSC_BLACK_SCREEN_OFFSET=0 -> 0, 0, 0 + * CSC_MODE_YUV_TO_RGB=1 + CSC_BLACK_SCREEN_OFFSET=1 -> 1/16, 1/16, 1/16 + */ + /* * Extract the CSC coefficient from a CTM coefficient (in U32.32 fixed point * format). This macro takes the coefficient we want transformed and the @@ -59,37 +74,38 @@ #define ILK_CSC_POSTOFF_LIMITED_RANGE (16 * (1 << 12) / 255) +/* Nop pre/post offsets */ static const u16 ilk_csc_off_zero[3] = {}; +/* Identity matrix */ static const u16 ilk_csc_coeff_identity[9] = { ILK_CSC_COEFF_1_0, 0, 0, 0, ILK_CSC_COEFF_1_0, 0, 0, 0, ILK_CSC_COEFF_1_0, }; +/* Limited range RGB post offsets */ static const u16 ilk_csc_postoff_limited_range[3] = { ILK_CSC_POSTOFF_LIMITED_RANGE, ILK_CSC_POSTOFF_LIMITED_RANGE, ILK_CSC_POSTOFF_LIMITED_RANGE, }; +/* Full range RGB -> limited range RGB matrix */ static const u16 ilk_csc_coeff_limited_range[9] = { ILK_CSC_COEFF_LIMITED_RANGE, 0, 0, 0, ILK_CSC_COEFF_LIMITED_RANGE, 0, 0, 0, ILK_CSC_COEFF_LIMITED_RANGE, }; -/* - * These values are direct register values specified in the Bspec, - * for RGB->YUV conversion matrix (colorspace BT709) - */ +/* BT.709 full range RGB -> limited range YCbCr matrix */ static const u16 ilk_csc_coeff_rgb_to_ycbcr[9] = { 0x1e08, 0x9cc0, 0xb528, 0x2ba8, 0x09d8, 0x37e8, 0xbce8, 0x9ad8, 0x1e08, }; -/* Post offset values for RGB->YCBCR conversion */ +/* Limited range YCbCr post offsets */ static const u16 ilk_csc_postoff_rgb_to_ycbcr[3] = { 0x0800, 0x0100, 0x0800, }; -- cgit v1.2.3 From af28cc4c289600ad05da85e466517757a26ce216 Mon Sep 17 00:00:00 2001 From: Ville Syrjälä Date: Thu, 18 Jul 2019 17:50:52 +0300 Subject: drm/i915: Set up ILK/SNB csc unit properly for YCbCr output MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Prepare the pipe csc for YCbCr output on ilk/snb. The main difference to IVB+ is the lack of explicit post offsets, and instead we must configure the CSC info RGB->YUV mode (which takes care of offsetting Cb/Cr properly) and enable the "black screen offset" bit to add the required offset to Y. And while at it throw some comments around the bit defines to document which platforms have which bits. Signed-off-by: Ville Syrjälä Link: https://patchwork.freedesktop.org/patch/msgid/20190718145053.25808-12-ville.syrjala@linux.intel.com Reviewed-by: Gwan-gyeong Mun --- drivers/gpu/drm/i915/display/intel_color.c | 25 ++++++++++++++++++++----- drivers/gpu/drm/i915/i915_reg.h | 10 +++++----- 2 files changed, 25 insertions(+), 10 deletions(-) diff --git a/drivers/gpu/drm/i915/display/intel_color.c b/drivers/gpu/drm/i915/display/intel_color.c index de31b4c251fa..2de640bd3e73 100644 --- a/drivers/gpu/drm/i915/display/intel_color.c +++ b/drivers/gpu/drm/i915/display/intel_color.c @@ -1213,6 +1213,21 @@ static u32 ilk_gamma_mode(const struct intel_crtc_state *crtc_state) return GAMMA_MODE_MODE_10BIT; } +static u32 ilk_csc_mode(const struct intel_crtc_state *crtc_state) +{ + /* + * CSC comes after the LUT in RGB->YCbCr mode. + * RGB->YCbCr needs the limited range offsets added to + * the output. RGB limited range output is handled by + * the hw automagically elsewhere. + */ + if (crtc_state->output_format != INTEL_OUTPUT_FORMAT_RGB) + return CSC_BLACK_SCREEN_OFFSET; + + return CSC_MODE_YUV_TO_RGB | + CSC_POSITION_BEFORE_GAMMA; +} + static int ilk_color_check(struct intel_crtc_state *crtc_state) { int ret; @@ -1226,15 +1241,15 @@ static int ilk_color_check(struct intel_crtc_state *crtc_state) !crtc_state->c8_planes; /* - * We don't expose the ctm on ilk/snb currently, - * nor do we enable YCbCr output. Also RGB limited - * range output is handled by the hw automagically. + * We don't expose the ctm on ilk/snb currently, also RGB + * limited range output is handled by the hw automagically. */ - crtc_state->csc_enable = false; + crtc_state->csc_enable = + crtc_state->output_format != INTEL_OUTPUT_FORMAT_RGB; crtc_state->gamma_mode = ilk_gamma_mode(crtc_state); - crtc_state->csc_mode = 0; + crtc_state->csc_mode = ilk_csc_mode(crtc_state); ret = intel_color_add_affected_planes(crtc_state); if (ret) diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h index 6f2eda76da4a..90c45fee9ce7 100644 --- a/drivers/gpu/drm/i915/i915_reg.h +++ b/drivers/gpu/drm/i915/i915_reg.h @@ -10249,11 +10249,11 @@ enum skl_power_gate { #define _PIPE_A_CSC_COEFF_BV 0x49024 #define _PIPE_A_CSC_MODE 0x49028 -#define ICL_CSC_ENABLE (1 << 31) -#define ICL_OUTPUT_CSC_ENABLE (1 << 30) -#define CSC_BLACK_SCREEN_OFFSET (1 << 2) -#define CSC_POSITION_BEFORE_GAMMA (1 << 1) -#define CSC_MODE_YUV_TO_RGB (1 << 0) +#define ICL_CSC_ENABLE (1 << 31) /* icl+ */ +#define ICL_OUTPUT_CSC_ENABLE (1 << 30) /* icl+ */ +#define CSC_BLACK_SCREEN_OFFSET (1 << 2) /* ilk/snb */ +#define CSC_POSITION_BEFORE_GAMMA (1 << 1) /* pre-glk */ +#define CSC_MODE_YUV_TO_RGB (1 << 0) /* ilk/snb */ #define _PIPE_A_CSC_PREOFF_HI 0x49030 #define _PIPE_A_CSC_PREOFF_ME 0x49034 -- cgit v1.2.3 From d1844606fd63af1994e153a8607d4b13709bc395 Mon Sep 17 00:00:00 2001 From: Ville Syrjälä Date: Thu, 18 Jul 2019 17:50:53 +0300 Subject: drm/i915: Add PIPECONF YCbCr 4:4:4 programming for ILK-IVB MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit On ILK-IVB the pipe colorspace is configured via PIPECONF (as opposed to PIPEMISC in BDW+). Let's configure+readout that stuff correctly. Enabling YCbCr 4:4:4 output will now be a simple matter of setting crtc_state->output_format appropriately in the encoder .compute_config(). However, when we do that we must be aware of the fact that YCbCr DP output doesn't seem to work on ILK (resulting image is totally garbled), but on SNB+ it works fine. However HDMI YCbCr output does work correctly even on ILK. Signed-off-by: Ville Syrjälä Link: https://patchwork.freedesktop.org/patch/msgid/20190718145053.25808-13-ville.syrjala@linux.intel.com Reviewed-by: Gwan-gyeong Mun --- drivers/gpu/drm/i915/display/intel_display.c | 21 ++++++++++++++++++++- drivers/gpu/drm/i915/i915_reg.h | 4 ++++ 2 files changed, 24 insertions(+), 1 deletion(-) diff --git a/drivers/gpu/drm/i915/display/intel_display.c b/drivers/gpu/drm/i915/display/intel_display.c index 1bd91a061a0a..f5f655cde43a 100644 --- a/drivers/gpu/drm/i915/display/intel_display.c +++ b/drivers/gpu/drm/i915/display/intel_display.c @@ -9420,9 +9420,19 @@ static void ironlake_set_pipeconf(const struct intel_crtc_state *crtc_state) else val |= PIPECONF_PROGRESSIVE; + /* + * This would end up with an odd purple hue over + * the entire display. Make sure we don't do it. + */ + WARN_ON(crtc_state->limited_color_range && + crtc_state->output_format != INTEL_OUTPUT_FORMAT_RGB); + if (crtc_state->limited_color_range) val |= PIPECONF_COLOR_RANGE_SELECT; + if (crtc_state->output_format != INTEL_OUTPUT_FORMAT_RGB) + val |= PIPECONF_OUTPUT_COLORSPACE_YUV709; + val |= PIPECONF_GAMMA_MODE(crtc_state->gamma_mode); I915_WRITE(PIPECONF(pipe), val); @@ -9959,7 +9969,6 @@ static bool ironlake_get_pipe_config(struct intel_crtc *crtc, if (!wakeref) return false; - pipe_config->output_format = INTEL_OUTPUT_FORMAT_RGB; pipe_config->cpu_transcoder = (enum transcoder) crtc->pipe; pipe_config->shared_dpll = NULL; @@ -9988,6 +9997,16 @@ static bool ironlake_get_pipe_config(struct intel_crtc *crtc, if (tmp & PIPECONF_COLOR_RANGE_SELECT) pipe_config->limited_color_range = true; + switch (tmp & PIPECONF_OUTPUT_COLORSPACE_MASK) { + case PIPECONF_OUTPUT_COLORSPACE_YUV601: + case PIPECONF_OUTPUT_COLORSPACE_YUV709: + pipe_config->output_format = INTEL_OUTPUT_FORMAT_YCBCR444; + break; + default: + pipe_config->output_format = INTEL_OUTPUT_FORMAT_RGB; + break; + } + pipe_config->gamma_mode = (tmp & PIPECONF_GAMMA_MODE_MASK_ILK) >> PIPECONF_GAMMA_MODE_SHIFT; diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h index 90c45fee9ce7..acbda6d89a9b 100644 --- a/drivers/gpu/drm/i915/i915_reg.h +++ b/drivers/gpu/drm/i915/i915_reg.h @@ -5683,6 +5683,10 @@ enum { #define PIPECONF_CXSR_DOWNCLOCK (1 << 16) #define PIPECONF_EDP_RR_MODE_SWITCH_VLV (1 << 14) #define PIPECONF_COLOR_RANGE_SELECT (1 << 13) +#define PIPECONF_OUTPUT_COLORSPACE_MASK (3 << 11) /* ilk-ivb */ +#define PIPECONF_OUTPUT_COLORSPACE_RGB (0 << 11) /* ilk-ivb */ +#define PIPECONF_OUTPUT_COLORSPACE_YUV601 (1 << 11) /* ilk-ivb */ +#define PIPECONF_OUTPUT_COLORSPACE_YUV709 (2 << 11) /* ilk-ivb */ #define PIPECONF_OUTPUT_COLORSPACE_YUV_HSW (1 << 11) /* hsw only */ #define PIPECONF_BPC_MASK (0x7 << 5) #define PIPECONF_8BPC (0 << 5) -- cgit v1.2.3 From 601734f7aabd46d3a988554e59908d9de7f8b013 Mon Sep 17 00:00:00 2001 From: Daniele Ceraolo Spurio Date: Fri, 13 Sep 2019 08:51:37 +0100 Subject: drm/i915/tgl: s/ss/eu fuse reading support MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Gen12 has dual-subslices (DSS), which compared to gen11 subslices have some duplicated resources/paths. Although DSS behave similarly to 2 subslices, instead of splitting this and presenting userspace with bits not directly representative of hardware resources, present userspace with a subslice_mask made up of DSS bits instead. v2: GEM_BUG_ON on mask size (Lionel) Bspec: 29547 Bspec: 12247 Cc: Kelvin Gardiner Cc: Tvrtko Ursulin Cc: Lionel Landwerlin CC: Radhakrishna Sripada Cc: Michel Thierry #v1 Cc: Daniele Ceraolo Spurio Cc: José Roberto de Souza Signed-off-by: Daniele Ceraolo Spurio Signed-off-by: James Ausmus Signed-off-by: Oscar Mateo Signed-off-by: Sudeep Dutt Signed-off-by: Stuart Summers Signed-off-by: Mika Kuoppala Acked-by: Lionel Landwerlin Reviewed-by: Lucas De Marchi Link: https://patchwork.freedesktop.org/patch/msgid/20190913075137.18476-2-chris@chris-wilson.co.uk Signed-off-by: Chris Wilson --- drivers/gpu/drm/i915/gt/intel_sseu.h | 9 +--- drivers/gpu/drm/i915/i915_debugfs.c | 3 +- drivers/gpu/drm/i915/i915_reg.h | 2 + drivers/gpu/drm/i915/intel_device_info.c | 83 ++++++++++++++++++++++++-------- include/uapi/drm/i915_drm.h | 6 ++- 5 files changed, 72 insertions(+), 31 deletions(-) diff --git a/drivers/gpu/drm/i915/gt/intel_sseu.h b/drivers/gpu/drm/i915/gt/intel_sseu.h index 4070f6ff1db6..d1d225204f09 100644 --- a/drivers/gpu/drm/i915/gt/intel_sseu.h +++ b/drivers/gpu/drm/i915/gt/intel_sseu.h @@ -18,12 +18,13 @@ struct drm_i915_private; #define GEN_MAX_SUBSLICES (8) /* ICL upper bound */ #define GEN_SSEU_STRIDE(max_entries) DIV_ROUND_UP(max_entries, BITS_PER_BYTE) #define GEN_MAX_SUBSLICE_STRIDE GEN_SSEU_STRIDE(GEN_MAX_SUBSLICES) -#define GEN_MAX_EUS (10) /* HSW upper bound */ +#define GEN_MAX_EUS (16) /* TGL upper bound */ #define GEN_MAX_EU_STRIDE GEN_SSEU_STRIDE(GEN_MAX_EUS) struct sseu_dev_info { u8 slice_mask; u8 subslice_mask[GEN_MAX_SLICES * GEN_MAX_SUBSLICE_STRIDE]; + u8 eu_mask[GEN_MAX_SLICES * GEN_MAX_SUBSLICES * GEN_MAX_EU_STRIDE]; u16 eu_total; u8 eu_per_subslice; u8 min_eu_in_pool; @@ -40,12 +41,6 @@ struct sseu_dev_info { u8 ss_stride; u8 eu_stride; - - /* We don't have more than 8 eus per subslice at the moment and as we - * store eus enabled using bits, no need to multiply by eus per - * subslice. - */ - u8 eu_mask[GEN_MAX_SLICES * GEN_MAX_SUBSLICES]; }; /* diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c index 43db50095257..b5b449a88cf1 100644 --- a/drivers/gpu/drm/i915/i915_debugfs.c +++ b/drivers/gpu/drm/i915/i915_debugfs.c @@ -3823,7 +3823,8 @@ static void gen10_sseu_device_status(struct drm_i915_private *dev_priv, for (ss = 0; ss < info->sseu.max_subslices; ss++) { unsigned int eu_cnt; - if (!(s_reg[s] & (GEN9_PGCTL_SS_ACK(ss)))) + if (info->sseu.has_subslice_pg && + !(s_reg[s] & (GEN9_PGCTL_SS_ACK(ss)))) /* skip disabled subslice */ continue; diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h index acbda6d89a9b..7c64768976ac 100644 --- a/drivers/gpu/drm/i915/i915_reg.h +++ b/drivers/gpu/drm/i915/i915_reg.h @@ -2956,6 +2956,8 @@ static inline bool i915_mmio_reg_valid(i915_reg_t reg) #define GEN11_GT_SUBSLICE_DISABLE _MMIO(0x913C) +#define GEN12_GT_DSS_ENABLE _MMIO(0x913C) + #define GEN6_BSD_SLEEP_PSMI_CONTROL _MMIO(0x12050) #define GEN6_BSD_SLEEP_MSG_DISABLE (1 << 0) #define GEN6_BSD_SLEEP_FLUSH_DISABLE (1 << 2) diff --git a/drivers/gpu/drm/i915/intel_device_info.c b/drivers/gpu/drm/i915/intel_device_info.c index 728c881718a2..85e480bdc673 100644 --- a/drivers/gpu/drm/i915/intel_device_info.c +++ b/drivers/gpu/drm/i915/intel_device_info.c @@ -182,13 +182,69 @@ static u16 compute_eu_total(const struct sseu_dev_info *sseu) return total; } +static void gen11_compute_sseu_info(struct sseu_dev_info *sseu, + u8 s_en, u32 ss_en, u16 eu_en) +{ + int s, ss; + + /* ss_en represents entire subslice mask across all slices */ + GEM_BUG_ON(sseu->max_slices * sseu->max_subslices > + sizeof(ss_en) * BITS_PER_BYTE); + + for (s = 0; s < sseu->max_slices; s++) { + if ((s_en & BIT(s)) == 0) + continue; + + sseu->slice_mask |= BIT(s); + + intel_sseu_set_subslices(sseu, s, ss_en); + + for (ss = 0; ss < sseu->max_subslices; ss++) + if (intel_sseu_has_subslice(sseu, s, ss)) + sseu_set_eus(sseu, s, ss, eu_en); + } + sseu->eu_per_subslice = hweight16(eu_en); + sseu->eu_total = compute_eu_total(sseu); +} + +static void gen12_sseu_info_init(struct drm_i915_private *dev_priv) +{ + struct sseu_dev_info *sseu = &RUNTIME_INFO(dev_priv)->sseu; + u8 s_en; + u32 dss_en; + u16 eu_en = 0; + u8 eu_en_fuse; + int eu; + + /* + * Gen12 has Dual-Subslices, which behave similarly to 2 gen11 SS. + * Instead of splitting these, provide userspace with an array + * of DSS to more closely represent the hardware resource. + */ + intel_sseu_set_info(sseu, 1, 6, 16); + + s_en = I915_READ(GEN11_GT_SLICE_ENABLE) & GEN11_GT_S_ENA_MASK; + + dss_en = I915_READ(GEN12_GT_DSS_ENABLE); + + /* one bit per pair of EUs */ + eu_en_fuse = ~(I915_READ(GEN11_EU_DISABLE) & GEN11_EU_DIS_MASK); + for (eu = 0; eu < sseu->max_eus_per_subslice / 2; eu++) + if (eu_en_fuse & BIT(eu)) + eu_en |= BIT(eu * 2) | BIT(eu * 2 + 1); + + gen11_compute_sseu_info(sseu, s_en, dss_en, eu_en); + + /* TGL only supports slice-level power gating */ + sseu->has_slice_pg = 1; +} + static void gen11_sseu_info_init(struct drm_i915_private *dev_priv) { struct sseu_dev_info *sseu = &RUNTIME_INFO(dev_priv)->sseu; u8 s_en; - u32 ss_en, ss_en_mask; + u32 ss_en; u8 eu_en; - int s; if (IS_ELKHARTLAKE(dev_priv)) intel_sseu_set_info(sseu, 1, 4, 8); @@ -197,26 +253,9 @@ static void gen11_sseu_info_init(struct drm_i915_private *dev_priv) s_en = I915_READ(GEN11_GT_SLICE_ENABLE) & GEN11_GT_S_ENA_MASK; ss_en = ~I915_READ(GEN11_GT_SUBSLICE_DISABLE); - ss_en_mask = BIT(sseu->max_subslices) - 1; eu_en = ~(I915_READ(GEN11_EU_DISABLE) & GEN11_EU_DIS_MASK); - for (s = 0; s < sseu->max_slices; s++) { - if (s_en & BIT(s)) { - int ss_idx = sseu->max_subslices * s; - int ss; - - sseu->slice_mask |= BIT(s); - - intel_sseu_set_subslices(sseu, s, (ss_en >> ss_idx) & - ss_en_mask); - - for (ss = 0; ss < sseu->max_subslices; ss++) - if (intel_sseu_has_subslice(sseu, s, ss)) - sseu_set_eus(sseu, s, ss, eu_en); - } - } - sseu->eu_per_subslice = hweight8(eu_en); - sseu->eu_total = compute_eu_total(sseu); + gen11_compute_sseu_info(sseu, s_en, ss_en, eu_en); /* ICL has no power gating restrictions. */ sseu->has_slice_pg = 1; @@ -955,8 +994,10 @@ void intel_device_info_runtime_init(struct drm_i915_private *dev_priv) gen9_sseu_info_init(dev_priv); else if (IS_GEN(dev_priv, 10)) gen10_sseu_info_init(dev_priv); - else if (INTEL_GEN(dev_priv) >= 11) + else if (IS_GEN(dev_priv, 11)) gen11_sseu_info_init(dev_priv); + else if (INTEL_GEN(dev_priv) >= 12) + gen12_sseu_info_init(dev_priv); if (IS_GEN(dev_priv, 6) && intel_vtd_active()) { DRM_INFO("Disabling ppGTT for VT-d support\n"); diff --git a/include/uapi/drm/i915_drm.h b/include/uapi/drm/i915_drm.h index 469dc512cca3..30c542144016 100644 --- a/include/uapi/drm/i915_drm.h +++ b/include/uapi/drm/i915_drm.h @@ -2033,8 +2033,10 @@ struct drm_i915_query { * (data[X / 8] >> (X % 8)) & 1 * * - the subslice mask for each slice with one bit per subslice telling - * whether a subslice is available. The availability of subslice Y in slice - * X can be queried with the following formula : + * whether a subslice is available. Gen12 has dual-subslices, which are + * similar to two gen11 subslices. For gen12, this array represents dual- + * subslices. The availability of subslice Y in slice X can be queried + * with the following formula : * * (data[subslice_offset + * X * subslice_stride + -- cgit v1.2.3 From d4f4de5e5ef8efde85febb6876cd3c8ab1631999 Mon Sep 17 00:00:00 2001 From: Al Viro Date: Sun, 15 Sep 2019 12:12:39 -0400 Subject: Fix the locking in dcache_readdir() and friends There are two problems in dcache_readdir() - one is that lockless traversal of the list needs non-trivial cooperation of d_alloc() (at least a switch to list_add_rcu(), and probably more than just that) and another is that it assumes that no removal will happen without the directory locked exclusive. Said assumption had always been there, never had been stated explicitly and is violated by several places in the kernel (devpts and selinuxfs). * replacement of next_positive() with different calling conventions: it returns struct list_head * instead of struct dentry *; the latter is passed in and out by reference, grabbing the result and dropping the original value. * scan is under ->d_lock. If we run out of timeslice, cursor is moved after the last position we'd reached and we reschedule; then the scan continues from that place. To avoid livelocks between multiple lseek() (with cursors getting moved past each other, never reaching the real entries) we always skip the cursors, need_resched() or not. * returned list_head * is either ->d_child of dentry we'd found or ->d_subdirs of parent (if we got to the end of the list). * dcache_readdir() and dcache_dir_lseek() switched to new helper. dcache_readdir() always holds a reference to dentry passed to dir_emit() now. Cursor is moved to just before the entry where dir_emit() has failed or into the very end of the list, if we'd run out. * move_cursor() eliminated - it had sucky calling conventions and after fixing that it became simply list_move() (in lseek and scan_positives) or list_move_tail() (in readdir). All operations with the list are under ->d_lock now, and we do not depend upon having all file removals done with parent locked exclusive anymore. Cc: stable@vger.kernel.org Reported-by: "zhengbin (A)" Signed-off-by: Al Viro --- fs/libfs.c | 134 +++++++++++++++++++++++++++++++------------------------------ 1 file changed, 69 insertions(+), 65 deletions(-) diff --git a/fs/libfs.c b/fs/libfs.c index c9b2850c0f7c..8e023b08a240 100644 --- a/fs/libfs.c +++ b/fs/libfs.c @@ -89,58 +89,47 @@ int dcache_dir_close(struct inode *inode, struct file *file) EXPORT_SYMBOL(dcache_dir_close); /* parent is locked at least shared */ -static struct dentry *next_positive(struct dentry *parent, - struct list_head *from, - int count) +/* + * Returns an element of siblings' list. + * We are looking for th positive after

; if + * found, dentry is grabbed and passed to caller via *. + * If no such element exists, the anchor of list is returned + * and * is set to NULL. + */ +static struct list_head *scan_positives(struct dentry *cursor, + struct list_head *p, + loff_t count, + struct dentry **res) { - unsigned *seq = &parent->d_inode->i_dir_seq, n; - struct dentry *res; - struct list_head *p; - bool skipped; - int i; + struct dentry *dentry = cursor->d_parent, *found = NULL; -retry: - i = count; - skipped = false; - n = smp_load_acquire(seq) & ~1; - res = NULL; - rcu_read_lock(); - for (p = from->next; p != &parent->d_subdirs; p = p->next) { + spin_lock(&dentry->d_lock); + while ((p = p->next) != &dentry->d_subdirs) { struct dentry *d = list_entry(p, struct dentry, d_child); - if (!simple_positive(d)) { - skipped = true; - } else if (!--i) { - res = d; - break; + // we must at least skip cursors, to avoid livelocks + if (d->d_flags & DCACHE_DENTRY_CURSOR) + continue; + if (simple_positive(d) && !--count) { + spin_lock_nested(&d->d_lock, DENTRY_D_LOCK_NESTED); + if (simple_positive(d)) + found = dget_dlock(d); + spin_unlock(&d->d_lock); + if (likely(found)) + break; + count = 1; + } + if (need_resched()) { + list_move(&cursor->d_child, p); + p = &cursor->d_child; + spin_unlock(&dentry->d_lock); + cond_resched(); + spin_lock(&dentry->d_lock); } } - rcu_read_unlock(); - if (skipped) { - smp_rmb(); - if (unlikely(*seq != n)) - goto retry; - } - return res; -} - -static void move_cursor(struct dentry *cursor, struct list_head *after) -{ - struct dentry *parent = cursor->d_parent; - unsigned n, *seq = &parent->d_inode->i_dir_seq; - spin_lock(&parent->d_lock); - for (;;) { - n = *seq; - if (!(n & 1) && cmpxchg(seq, n, n + 1) == n) - break; - cpu_relax(); - } - __list_del(cursor->d_child.prev, cursor->d_child.next); - if (after) - list_add(&cursor->d_child, after); - else - list_add_tail(&cursor->d_child, &parent->d_subdirs); - smp_store_release(seq, n + 2); - spin_unlock(&parent->d_lock); + spin_unlock(&dentry->d_lock); + dput(*res); + *res = found; + return p; } loff_t dcache_dir_lseek(struct file *file, loff_t offset, int whence) @@ -158,17 +147,28 @@ loff_t dcache_dir_lseek(struct file *file, loff_t offset, int whence) return -EINVAL; } if (offset != file->f_pos) { + struct dentry *cursor = file->private_data; + struct dentry *to = NULL; + struct list_head *p; + file->f_pos = offset; - if (file->f_pos >= 2) { - struct dentry *cursor = file->private_data; - struct dentry *to; - loff_t n = file->f_pos - 2; - - inode_lock_shared(dentry->d_inode); - to = next_positive(dentry, &dentry->d_subdirs, n); - move_cursor(cursor, to ? &to->d_child : NULL); - inode_unlock_shared(dentry->d_inode); + inode_lock_shared(dentry->d_inode); + + if (file->f_pos > 2) { + p = scan_positives(cursor, &dentry->d_subdirs, + file->f_pos - 2, &to); + spin_lock(&dentry->d_lock); + list_move(&cursor->d_child, p); + spin_unlock(&dentry->d_lock); + } else { + spin_lock(&dentry->d_lock); + list_del_init(&cursor->d_child); + spin_unlock(&dentry->d_lock); } + + dput(to); + + inode_unlock_shared(dentry->d_inode); } return offset; } @@ -190,25 +190,29 @@ int dcache_readdir(struct file *file, struct dir_context *ctx) { struct dentry *dentry = file->f_path.dentry; struct dentry *cursor = file->private_data; - struct list_head *p = &cursor->d_child; - struct dentry *next; - bool moved = false; + struct list_head *anchor = &dentry->d_subdirs; + struct dentry *next = NULL; + struct list_head *p; if (!dir_emit_dots(file, ctx)) return 0; if (ctx->pos == 2) - p = &dentry->d_subdirs; - while ((next = next_positive(dentry, p, 1)) != NULL) { + p = anchor; + else + p = &cursor->d_child; + + while ((p = scan_positives(cursor, p, 1, &next)) != anchor) { if (!dir_emit(ctx, next->d_name.name, next->d_name.len, d_inode(next)->i_ino, dt_type(d_inode(next)))) break; - moved = true; - p = &next->d_child; ctx->pos++; } - if (moved) - move_cursor(cursor, p); + spin_lock(&dentry->d_lock); + list_move_tail(&cursor->d_child, p); + spin_unlock(&dentry->d_lock); + dput(next); + return 0; } EXPORT_SYMBOL(dcache_readdir); -- cgit v1.2.3 From bf93b7246548289a797798051198c9664af2931c Mon Sep 17 00:00:00 2001 From: Swati Sharma Date: Sun, 22 Sep 2019 01:40:52 +0530 Subject: drm/i915/color: Fix formatting issues Fixed few formatting issues in multi-segmented load_lut(). v3: -style nitting [Jani] -balanced parentheses moved from patch 2 to 1 [Jani] -subject prefix change [Jani] -added commit message [Jani] v4: -rearranged INDEX register write in ilk_read_luts() Signed-off-by: Swati Sharma Signed-off-by: Jani Nikula Link: https://patchwork.freedesktop.org/patch/msgid/1569096654-24433-2-git-send-email-swati2.sharma@intel.com --- drivers/gpu/drm/i915/display/intel_color.c | 40 ++++++++++++++---------------- 1 file changed, 19 insertions(+), 21 deletions(-) diff --git a/drivers/gpu/drm/i915/display/intel_color.c b/drivers/gpu/drm/i915/display/intel_color.c index 2de640bd3e73..91435ba426d0 100644 --- a/drivers/gpu/drm/i915/display/intel_color.c +++ b/drivers/gpu/drm/i915/display/intel_color.c @@ -823,11 +823,11 @@ icl_program_gamma_superfine_segment(const struct intel_crtc_state *crtc_state) u32 i; /* - * Every entry in the multi-segment LUT is corresponding to a superfine - * segment step which is 1/(8 * 128 * 256). + * Program Super Fine segment (let's call it seg1)... * - * Superfine segment has 9 entries, corresponding to values - * 0, 1/(8 * 128 * 256), 2/(8 * 128 * 256) .... 8/(8 * 128 * 256). + * Super Fine segment's step is 1/(8 * 128 * 256) and it has + * 9 entries, corresponding to values 0, 1/(8 * 128 * 256), + * 2/(8 * 128 * 256) ... 8/(8 * 128 * 256). */ I915_WRITE(PREC_PAL_MULTI_SEG_INDEX(pipe), PAL_PREC_AUTO_INCREMENT); @@ -853,17 +853,17 @@ icl_program_gamma_multi_segment(const struct intel_crtc_state *crtc_state) u32 i; /* - * * Program Fine segment (let's call it seg2)... * - * Fine segment's step is 1/(128 * 256) ie 1/(128 * 256), 2/(128*256) - * ... 256/(128*256). So in order to program fine segment of LUT we - * need to pick every 8'th entry in LUT, and program 256 indexes. + * Fine segment's step is 1/(128 * 256) i.e. 1/(128 * 256), 2/(128 * 256) + * ... 256/(128 * 256). So in order to program fine segment of LUT we + * need to pick every 8th entry in the LUT, and program 256 indexes. * * PAL_PREC_INDEX[0] and PAL_PREC_INDEX[1] map to seg2[1], - * with seg2[0] being unused by the hardware. + * seg2[0] being unused by the hardware. */ I915_WRITE(PREC_PAL_INDEX(pipe), PAL_PREC_AUTO_INCREMENT); + for (i = 1; i < 257; i++) { entry = &lut[i * 8]; I915_WRITE(PREC_PAL_DATA(pipe), ilk_lut_12p4_ldw(entry)); @@ -873,8 +873,8 @@ icl_program_gamma_multi_segment(const struct intel_crtc_state *crtc_state) /* * Program Coarse segment (let's call it seg3)... * - * Coarse segment's starts from index 0 and it's step is 1/256 ie 0, - * 1/256, 2/256 ...256/256. As per the description of each entry in LUT + * Coarse segment starts from index 0 and it's step is 1/256 ie 0, + * 1/256, 2/256 ... 256/256. As per the description of each entry in LUT * above, we need to pick every (8 * 128)th entry in LUT, and * program 256 of those. * @@ -906,12 +906,10 @@ static void icl_load_luts(const struct intel_crtc_state *crtc_state) case GAMMA_MODE_MODE_8BIT: i9xx_load_luts(crtc_state); break; - case GAMMA_MODE_MODE_12BIT_MULTI_SEGMENTED: icl_program_gamma_superfine_segment(crtc_state); icl_program_gamma_multi_segment(crtc_state); break; - default: bdw_load_lut_10(crtc, gamma_lut, PAL_PREC_INDEX_VALUE(0)); ivb_load_lut_ext_max(crtc); @@ -1743,9 +1741,6 @@ glk_read_lut_10(const struct intel_crtc_state *crtc_state, u32 prec_index) struct drm_color_lut *blob_data; u32 i, val; - I915_WRITE(PREC_PAL_INDEX(pipe), prec_index | - PAL_PREC_AUTO_INCREMENT); - blob = drm_property_create_blob(&dev_priv->drm, sizeof(struct drm_color_lut) * hw_lut_size, NULL); @@ -1754,6 +1749,9 @@ glk_read_lut_10(const struct intel_crtc_state *crtc_state, u32 prec_index) blob_data = blob->data; + I915_WRITE(PREC_PAL_INDEX(pipe), prec_index | + PAL_PREC_AUTO_INCREMENT); + for (i = 0; i < hw_lut_size; i++) { val = I915_READ(PREC_PAL_DATA(pipe)); @@ -1819,16 +1817,16 @@ void intel_color_init(struct intel_crtc *crtc) else dev_priv->display.color_commit = ilk_color_commit; - if (INTEL_GEN(dev_priv) >= 11) + if (INTEL_GEN(dev_priv) >= 11) { dev_priv->display.load_luts = icl_load_luts; - else if (IS_CANNONLAKE(dev_priv) || IS_GEMINILAKE(dev_priv)) { + } else if (IS_CANNONLAKE(dev_priv) || IS_GEMINILAKE(dev_priv)) { dev_priv->display.load_luts = glk_load_luts; dev_priv->display.read_luts = glk_read_luts; - } else if (INTEL_GEN(dev_priv) >= 8) + } else if (INTEL_GEN(dev_priv) >= 8) { dev_priv->display.load_luts = bdw_load_luts; - else if (INTEL_GEN(dev_priv) >= 7) + } else if (INTEL_GEN(dev_priv) >= 7) { dev_priv->display.load_luts = ivb_load_luts; - else { + } else { dev_priv->display.load_luts = ilk_load_luts; dev_priv->display.read_luts = ilk_read_luts; } -- cgit v1.2.3 From 84af7649188194a74cdd6437235a5e3c86108f0f Mon Sep 17 00:00:00 2001 From: Swati Sharma Date: Sun, 22 Sep 2019 01:40:53 +0530 Subject: drm/i915/color: Extract icl_read_luts() For icl+, have hw read out to create hw blob of gamma lut values. icl+ platforms supports multi segmented gamma mode by default, add hw lut creation for this mode. This will be used to validate gamma programming using dsb (display state buffer) which is a tgl specific feature. Major change done-removal of readouts of coarse and fine segments because PAL_PREC_DATA register isn't giving propoer values. State checker limited only to "fine segment" v2: -readout code for multisegmented gamma has to come up with some intermediate entries that aren't preserved in hardware (Jani N) -linear interpolation (Ville) -moved common code to check gamma_enable to specific funcs, since icl doesn't support that v3: -use u16 instead of __u16 [Jani N] -used single lut [Jani N] -improved and more readable for loops [Jani N] -read values directly to actual locations and then fill gaps [Jani N] -moved cleaning to patch 1 [Jani N] -renamed icl_read_lut_multi_seg() to icl_read_lut_multi_segment to make it similar to icl_load_luts() -renamed icl_compute_interpolated_gamma_blob() to icl_compute_interpolated_gamma_lut_values() more sensible, I guess v4: -removed interpolated func for creating gamma lut values -removed readouts of fine and coarse segments, failure to read PAL_PREC_DATA correctly Signed-off-by: Swati Sharma Signed-off-by: Jani Nikula Link: https://patchwork.freedesktop.org/patch/msgid/1569096654-24433-3-git-send-email-swati2.sharma@intel.com --- drivers/gpu/drm/i915/display/intel_color.c | 126 +++++++++++++++++++++++++---- drivers/gpu/drm/i915/i915_reg.h | 6 ++ 2 files changed, 117 insertions(+), 15 deletions(-) diff --git a/drivers/gpu/drm/i915/display/intel_color.c b/drivers/gpu/drm/i915/display/intel_color.c index 91435ba426d0..b0268a9bde8a 100644 --- a/drivers/gpu/drm/i915/display/intel_color.c +++ b/drivers/gpu/drm/i915/display/intel_color.c @@ -1402,6 +1402,9 @@ static int icl_color_check(struct intel_crtc_state *crtc_state) static int i9xx_gamma_precision(const struct intel_crtc_state *crtc_state) { + if (!crtc_state->gamma_enable) + return 0; + switch (crtc_state->gamma_mode) { case GAMMA_MODE_MODE_8BIT: return 8; @@ -1415,6 +1418,9 @@ static int i9xx_gamma_precision(const struct intel_crtc_state *crtc_state) static int ilk_gamma_precision(const struct intel_crtc_state *crtc_state) { + if (!crtc_state->gamma_enable) + return 0; + if ((crtc_state->csc_mode & CSC_POSITION_BEFORE_GAMMA) == 0) return 0; @@ -1431,6 +1437,9 @@ static int ilk_gamma_precision(const struct intel_crtc_state *crtc_state) static int chv_gamma_precision(const struct intel_crtc_state *crtc_state) { + if (!crtc_state->gamma_enable) + return 0; + if (crtc_state->cgm_mode & CGM_PIPE_MODE_GAMMA) return 10; else @@ -1439,6 +1448,9 @@ static int chv_gamma_precision(const struct intel_crtc_state *crtc_state) static int glk_gamma_precision(const struct intel_crtc_state *crtc_state) { + if (!crtc_state->gamma_enable) + return 0; + switch (crtc_state->gamma_mode) { case GAMMA_MODE_MODE_8BIT: return 8; @@ -1450,21 +1462,39 @@ static int glk_gamma_precision(const struct intel_crtc_state *crtc_state) } } +static int icl_gamma_precision(const struct intel_crtc_state *crtc_state) +{ + if ((crtc_state->gamma_mode & POST_CSC_GAMMA_ENABLE) == 0) + return 0; + + switch (crtc_state->gamma_mode & GAMMA_MODE_MODE_MASK) { + case GAMMA_MODE_MODE_8BIT: + return 8; + case GAMMA_MODE_MODE_10BIT: + return 10; + case GAMMA_MODE_MODE_12BIT_MULTI_SEGMENTED: + return 16; + default: + MISSING_CASE(crtc_state->gamma_mode); + return 0; + } + +} + int intel_color_get_gamma_bit_precision(const struct intel_crtc_state *crtc_state) { struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc); struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); - if (!crtc_state->gamma_enable) - return 0; - if (HAS_GMCH(dev_priv)) { if (IS_CHERRYVIEW(dev_priv)) return chv_gamma_precision(crtc_state); else return i9xx_gamma_precision(crtc_state); } else { - if (IS_CANNONLAKE(dev_priv) || IS_GEMINILAKE(dev_priv)) + if (INTEL_GEN(dev_priv) >= 11) + return icl_gamma_precision(crtc_state); + else if (IS_CANNONLAKE(dev_priv) || IS_GEMINILAKE(dev_priv)) return glk_gamma_precision(crtc_state); else if (IS_IRONLAKE(dev_priv)) return ilk_gamma_precision(crtc_state); @@ -1495,6 +1525,20 @@ static bool intel_color_lut_entry_equal(struct drm_color_lut *lut1, return true; } +static bool intel_color_lut_entry_multi_equal(struct drm_color_lut *lut1, + struct drm_color_lut *lut2, + int lut_size, u32 err) +{ + int i; + + for (i = 0; i < 9; i++) { + if (!err_check(&lut1[i], &lut2[i], err)) + return false; + } + + return true; +} + bool intel_color_lut_equal(struct drm_property_blob *blob1, struct drm_property_blob *blob2, u32 gamma_mode, u32 bit_precision) @@ -1513,16 +1557,8 @@ bool intel_color_lut_equal(struct drm_property_blob *blob1, lut_size2 = drm_color_lut_size(blob2); /* check sw and hw lut size */ - switch (gamma_mode) { - case GAMMA_MODE_MODE_8BIT: - case GAMMA_MODE_MODE_10BIT: - if (lut_size1 != lut_size2) - return false; - break; - default: - MISSING_CASE(gamma_mode); - return false; - } + if (lut_size1 != lut_size2) + return false; lut1 = blob1->data; lut2 = blob2->data; @@ -1530,13 +1566,18 @@ bool intel_color_lut_equal(struct drm_property_blob *blob1, err = 0xffff >> bit_precision; /* check sw and hw lut entry to be equal */ - switch (gamma_mode) { + switch (gamma_mode & GAMMA_MODE_MODE_MASK) { case GAMMA_MODE_MODE_8BIT: case GAMMA_MODE_MODE_10BIT: if (!intel_color_lut_entry_equal(lut1, lut2, lut_size2, err)) return false; break; + case GAMMA_MODE_MODE_12BIT_MULTI_SEGMENTED: + if (!intel_color_lut_entry_multi_equal(lut1, lut2, + lut_size2, err)) + return false; + break; default: MISSING_CASE(gamma_mode); return false; @@ -1776,6 +1817,60 @@ static void glk_read_luts(struct intel_crtc_state *crtc_state) crtc_state->base.gamma_lut = glk_read_lut_10(crtc_state, PAL_PREC_INDEX_VALUE(0)); } +static struct drm_property_blob * +icl_read_lut_multi_segment(const struct intel_crtc_state *crtc_state) +{ + struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc); + struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); + int lut_size = INTEL_INFO(dev_priv)->color.gamma_lut_size; + enum pipe pipe = crtc->pipe; + struct drm_property_blob *blob; + struct drm_color_lut *blob_data; + u32 i, val1, val2; + + blob = drm_property_create_blob(&dev_priv->drm, + sizeof(struct drm_color_lut) * lut_size, + NULL); + if (IS_ERR(blob)) + return NULL; + + blob_data = blob->data; + + I915_WRITE(PREC_PAL_MULTI_SEG_INDEX(pipe), PAL_PREC_AUTO_INCREMENT); + + for (i = 0; i < 9; i++) { + val1 = I915_READ(PREC_PAL_MULTI_SEG_DATA(pipe)); + val2 = I915_READ(PREC_PAL_MULTI_SEG_DATA(pipe)); + + blob_data[i].red = REG_FIELD_GET(PAL_PREC_MULTI_SEG_RED_UDW_MASK, val2) << 6 | + REG_FIELD_GET(PAL_PREC_MULTI_SEG_RED_LDW_MASK, val1); + blob_data[i].green = REG_FIELD_GET(PAL_PREC_MULTI_SEG_GREEN_UDW_MASK, val2) << 6 | + REG_FIELD_GET(PAL_PREC_MULTI_SEG_GREEN_LDW_MASK, val1); + blob_data[i].blue = REG_FIELD_GET(PAL_PREC_MULTI_SEG_BLUE_UDW_MASK, val2) << 6 | + REG_FIELD_GET(PAL_PREC_MULTI_SEG_BLUE_LDW_MASK, val1); + } + + /* + * FIXME readouts from PAL_PREC_DATA register aren't giving correct values + * in the case of fine and coarse segments. Restricting readouts only for + * super fine segment as of now. + */ + + return blob; +} + +static void icl_read_luts(struct intel_crtc_state *crtc_state) +{ + if ((crtc_state->gamma_mode & GAMMA_MODE_MODE_MASK) == + GAMMA_MODE_MODE_8BIT) + crtc_state->base.gamma_lut = i9xx_read_lut_8(crtc_state); + else if ((crtc_state->gamma_mode & GAMMA_MODE_MODE_MASK) == + GAMMA_MODE_MODE_12BIT_MULTI_SEGMENTED) + crtc_state->base.gamma_lut = icl_read_lut_multi_segment(crtc_state); + else + crtc_state->base.gamma_lut = glk_read_lut_10(crtc_state, PAL_PREC_INDEX_VALUE(0)); +} + void intel_color_init(struct intel_crtc *crtc) { struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); @@ -1819,6 +1914,7 @@ void intel_color_init(struct intel_crtc *crtc) if (INTEL_GEN(dev_priv) >= 11) { dev_priv->display.load_luts = icl_load_luts; + dev_priv->display.read_luts = icl_read_luts; } else if (IS_CANNONLAKE(dev_priv) || IS_GEMINILAKE(dev_priv)) { dev_priv->display.load_luts = glk_load_luts; dev_priv->display.read_luts = glk_read_luts; diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h index 7c64768976ac..9eb71a547103 100644 --- a/drivers/gpu/drm/i915/i915_reg.h +++ b/drivers/gpu/drm/i915/i915_reg.h @@ -10410,6 +10410,12 @@ enum skl_power_gate { #define _PAL_PREC_MULTI_SEG_DATA_A 0x4A40C #define _PAL_PREC_MULTI_SEG_DATA_B 0x4AC0C +#define PAL_PREC_MULTI_SEG_RED_LDW_MASK REG_GENMASK(29, 24) +#define PAL_PREC_MULTI_SEG_RED_UDW_MASK REG_GENMASK(29, 20) +#define PAL_PREC_MULTI_SEG_GREEN_LDW_MASK REG_GENMASK(19, 14) +#define PAL_PREC_MULTI_SEG_GREEN_UDW_MASK REG_GENMASK(19, 10) +#define PAL_PREC_MULTI_SEG_BLUE_LDW_MASK REG_GENMASK(9, 4) +#define PAL_PREC_MULTI_SEG_BLUE_UDW_MASK REG_GENMASK(9, 0) #define PREC_PAL_MULTI_SEG_INDEX(pipe) _MMIO_PIPE(pipe, \ _PAL_PREC_MULTI_SEG_INDEX_A, \ -- cgit v1.2.3 From 18febcb74ed37abd835da0ab5335df67ed40e4b5 Mon Sep 17 00:00:00 2001 From: Animesh Manna Date: Fri, 20 Sep 2019 17:29:21 +0530 Subject: drm/i915/dsb: feature flag added for display state buffer. Display State Buffer(DSB) is a new hardware capability, introduced in GEN12 display. DSB allows a driver to batch-program display HW registers. Cc: Jani Nikula Cc: Rodrigo Vivi Cc: Shashank Sharma Reviewed-by: Shashank Sharma Signed-off-by: Animesh Manna Signed-off-by: Jani Nikula Link: https://patchwork.freedesktop.org/patch/msgid/20190920115930.27829-2-animesh.manna@intel.com --- drivers/gpu/drm/i915/i915_drv.h | 2 ++ drivers/gpu/drm/i915/intel_device_info.h | 1 + 2 files changed, 3 insertions(+) diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index 4faec2f94e19..84b9b138d7ac 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h @@ -1863,6 +1863,8 @@ static inline struct drm_i915_private *pdev_to_i915(struct pci_dev *pdev) (BUILD_BUG_ON_ZERO(!__builtin_constant_p(n)) + \ INTEL_INFO(dev_priv)->gen == (n)) +#define HAS_DSB(dev_priv) (INTEL_INFO(dev_priv)->display.has_dsb) + /* * Return true if revision is in range [since,until] inclusive. * diff --git a/drivers/gpu/drm/i915/intel_device_info.h b/drivers/gpu/drm/i915/intel_device_info.h index d4c288860aed..0cdc2465534b 100644 --- a/drivers/gpu/drm/i915/intel_device_info.h +++ b/drivers/gpu/drm/i915/intel_device_info.h @@ -135,6 +135,7 @@ enum intel_ppgtt_type { func(has_csr); \ func(has_ddi); \ func(has_dp_mst); \ + func(has_dsb); \ func(has_fbc); \ func(has_gmch); \ func(has_hotplug); \ -- cgit v1.2.3 From 67f3b58f3bac975f35c312fd8876edb599cc24be Mon Sep 17 00:00:00 2001 From: Animesh Manna Date: Fri, 20 Sep 2019 17:29:22 +0530 Subject: drm/i915/dsb: DSB context creation. This patch adds a function, which will internally get the gem buffer for DSB engine. The GEM buffer is from global GTT, and is mapped into CPU domain, contains the data + opcode to be feed to DSB engine. v1: Initial version. v2: - removed some unwanted code. (Chris) - Used i915_gem_object_create_internal instead of _shmem. (Chris) - cmd_buf_tail removed and can be derived through vma object. (Chris) v3: vma realeased if i915_gem_object_pin_map() failed. (Shashank) v4: for simplification and based on current usage added single dsb object in intel_crtc. (Shashank) v5: seting NULL to cmd_buf moved outside of mutex in dsb-put(). (Shashank) v6: - refcount machanism added. - Used atomic_add_return and atomic_dec_and_test instead of atomic_inc and atomic_dec. (Jani) Cc: Imre Deak Cc: Michel Thierry Cc: Jani Nikula Cc: Rodrigo Vivi Cc: Shashank Sharma Signed-off-by: Animesh Manna [Jani: added #include while pushing] Signed-off-by: Jani Nikula Link: https://patchwork.freedesktop.org/patch/msgid/20190920115930.27829-3-animesh.manna@intel.com --- drivers/gpu/drm/i915/Makefile | 1 + drivers/gpu/drm/i915/display/intel_display_types.h | 3 + drivers/gpu/drm/i915/display/intel_dsb.c | 80 ++++++++++++++++++++++ drivers/gpu/drm/i915/display/intel_dsb.h | 33 +++++++++ drivers/gpu/drm/i915/i915_drv.h | 1 + 5 files changed, 118 insertions(+) create mode 100644 drivers/gpu/drm/i915/display/intel_dsb.c create mode 100644 drivers/gpu/drm/i915/display/intel_dsb.h diff --git a/drivers/gpu/drm/i915/Makefile b/drivers/gpu/drm/i915/Makefile index 658b930d34a8..6313e7b4bd78 100644 --- a/drivers/gpu/drm/i915/Makefile +++ b/drivers/gpu/drm/i915/Makefile @@ -172,6 +172,7 @@ i915-y += \ display/intel_display_power.o \ display/intel_dpio_phy.o \ display/intel_dpll_mgr.o \ + display/intel_dsb.o \ display/intel_fbc.o \ display/intel_fifo_underrun.o \ display/intel_frontbuffer.o \ diff --git a/drivers/gpu/drm/i915/display/intel_display_types.h b/drivers/gpu/drm/i915/display/intel_display_types.h index d5cc4b810d9e..49c902b00484 100644 --- a/drivers/gpu/drm/i915/display/intel_display_types.h +++ b/drivers/gpu/drm/i915/display/intel_display_types.h @@ -1033,6 +1033,9 @@ struct intel_crtc { /* scalers available on this crtc */ int num_scalers; + + /* per pipe DSB related info */ + struct intel_dsb dsb; }; struct intel_plane { diff --git a/drivers/gpu/drm/i915/display/intel_dsb.c b/drivers/gpu/drm/i915/display/intel_dsb.c new file mode 100644 index 000000000000..2ed277670f15 --- /dev/null +++ b/drivers/gpu/drm/i915/display/intel_dsb.c @@ -0,0 +1,80 @@ +// SPDX-License-Identifier: MIT +/* + * Copyright © 2019 Intel Corporation + * + */ + +#include "i915_drv.h" +#include "intel_display_types.h" + +#define DSB_BUF_SIZE (2 * PAGE_SIZE) + +struct intel_dsb * +intel_dsb_get(struct intel_crtc *crtc) +{ + struct drm_device *dev = crtc->base.dev; + struct drm_i915_private *i915 = to_i915(dev); + struct intel_dsb *dsb = &crtc->dsb; + struct drm_i915_gem_object *obj; + struct i915_vma *vma; + intel_wakeref_t wakeref; + + if (!HAS_DSB(i915)) + return dsb; + + if (atomic_add_return(1, &dsb->refcount) != 1) + return dsb; + + dsb->id = DSB1; + wakeref = intel_runtime_pm_get(&i915->runtime_pm); + + obj = i915_gem_object_create_internal(i915, DSB_BUF_SIZE); + if (IS_ERR(obj)) { + DRM_ERROR("Gem object creation failed\n"); + goto err; + } + + mutex_lock(&i915->drm.struct_mutex); + vma = i915_gem_object_ggtt_pin(obj, NULL, 0, 0, PIN_MAPPABLE); + mutex_unlock(&i915->drm.struct_mutex); + if (IS_ERR(vma)) { + DRM_ERROR("Vma creation failed\n"); + i915_gem_object_put(obj); + atomic_dec(&dsb->refcount); + goto err; + } + + dsb->cmd_buf = i915_gem_object_pin_map(vma->obj, I915_MAP_WC); + if (IS_ERR(dsb->cmd_buf)) { + DRM_ERROR("Command buffer creation failed\n"); + i915_vma_unpin_and_release(&vma, 0); + dsb->cmd_buf = NULL; + atomic_dec(&dsb->refcount); + goto err; + } + dsb->vma = vma; + +err: + intel_runtime_pm_put(&i915->runtime_pm, wakeref); + return dsb; +} + +void intel_dsb_put(struct intel_dsb *dsb) +{ + struct intel_crtc *crtc = container_of(dsb, typeof(*crtc), dsb); + struct drm_i915_private *i915 = to_i915(crtc->base.dev); + + if (!HAS_DSB(i915)) + return; + + if (WARN_ON(atomic_read(&dsb->refcount) == 0)) + return; + + if (atomic_dec_and_test(&dsb->refcount)) { + mutex_lock(&i915->drm.struct_mutex); + i915_gem_object_unpin_map(dsb->vma->obj); + i915_vma_unpin_and_release(&dsb->vma, 0); + mutex_unlock(&i915->drm.struct_mutex); + dsb->cmd_buf = NULL; + } +} diff --git a/drivers/gpu/drm/i915/display/intel_dsb.h b/drivers/gpu/drm/i915/display/intel_dsb.h new file mode 100644 index 000000000000..b8639864df50 --- /dev/null +++ b/drivers/gpu/drm/i915/display/intel_dsb.h @@ -0,0 +1,33 @@ +/* SPDX-License-Identifier: MIT + * + * Copyright © 2019 Intel Corporation + */ + +#ifndef _INTEL_DSB_H +#define _INTEL_DSB_H + +#include + +struct intel_crtc; +struct i915_vma; + +enum dsb_id { + INVALID_DSB = -1, + DSB1, + DSB2, + DSB3, + MAX_DSB_PER_PIPE +}; + +struct intel_dsb { + atomic_t refcount; + enum dsb_id id; + u32 *cmd_buf; + struct i915_vma *vma; +}; + +struct intel_dsb * +intel_dsb_get(struct intel_crtc *crtc); +void intel_dsb_put(struct intel_dsb *dsb); + +#endif diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index 84b9b138d7ac..07f1e89a55ca 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h @@ -67,6 +67,7 @@ #include "display/intel_display.h" #include "display/intel_display_power.h" #include "display/intel_dpll_mgr.h" +#include "display/intel_dsb.h" #include "display/intel_frontbuffer.h" #include "display/intel_gmbus.h" #include "display/intel_opregion.h" -- cgit v1.2.3 From 061489c65ff57ee9f757d7a519fb9a09e5fbadd6 Mon Sep 17 00:00:00 2001 From: Jani Nikula Date: Mon, 23 Sep 2019 10:09:23 +0300 Subject: drm/i915/dsb: single register write function for DSB. DSB support single register write through opcode 0x1. Generic api created which accumulate all single register write in a batch buffer and once DSB is triggered, it will program all the registers at the same time. v1: Initial version. v2: Unused macro removed and cosmetic changes done. (Shashank) v3: set free_pos to zero in dsb-put() instead dsb-get() and a cosmetic change. (Shashank) v4: macro of indexed-write is moved. (Shashank) Cc: Jani Nikula Cc: Rodrigo Vivi Cc: Shashank Sharma Reviewed-by: Shashank Sharma Signed-off-by: Animesh Manna Signed-off-by: Jani Nikula Link: https://patchwork.freedesktop.org/patch/msgid/20190920115930.27829-4-animesh.manna@intel.com --- drivers/gpu/drm/i915/display/intel_dsb.c | 29 +++++++++++++++++++++++++++++ drivers/gpu/drm/i915/display/intel_dsb.h | 9 +++++++++ 2 files changed, 38 insertions(+) diff --git a/drivers/gpu/drm/i915/display/intel_dsb.c b/drivers/gpu/drm/i915/display/intel_dsb.c index 2ed277670f15..f94cd6dc98b6 100644 --- a/drivers/gpu/drm/i915/display/intel_dsb.c +++ b/drivers/gpu/drm/i915/display/intel_dsb.c @@ -9,6 +9,12 @@ #define DSB_BUF_SIZE (2 * PAGE_SIZE) +/* DSB opcodes. */ +#define DSB_OPCODE_SHIFT 24 +#define DSB_OPCODE_MMIO_WRITE 0x1 +#define DSB_BYTE_EN 0xF +#define DSB_BYTE_EN_SHIFT 20 + struct intel_dsb * intel_dsb_get(struct intel_crtc *crtc) { @@ -76,5 +82,28 @@ void intel_dsb_put(struct intel_dsb *dsb) i915_vma_unpin_and_release(&dsb->vma, 0); mutex_unlock(&i915->drm.struct_mutex); dsb->cmd_buf = NULL; + dsb->free_pos = 0; + } +} + +void intel_dsb_reg_write(struct intel_dsb *dsb, i915_reg_t reg, u32 val) +{ + struct intel_crtc *crtc = container_of(dsb, typeof(*crtc), dsb); + struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); + u32 *buf = dsb->cmd_buf; + + if (!buf) { + I915_WRITE(reg, val); + return; + } + + if (WARN_ON(dsb->free_pos >= DSB_BUF_SIZE)) { + DRM_DEBUG_KMS("DSB buffer overflow\n"); + return; } + + buf[dsb->free_pos++] = val; + buf[dsb->free_pos++] = (DSB_OPCODE_MMIO_WRITE << DSB_OPCODE_SHIFT) | + (DSB_BYTE_EN << DSB_BYTE_EN_SHIFT) | + i915_mmio_reg_offset(reg); } diff --git a/drivers/gpu/drm/i915/display/intel_dsb.h b/drivers/gpu/drm/i915/display/intel_dsb.h index b8639864df50..839cbf621fa2 100644 --- a/drivers/gpu/drm/i915/display/intel_dsb.h +++ b/drivers/gpu/drm/i915/display/intel_dsb.h @@ -8,6 +8,8 @@ #include +#include "i915_reg.h" + struct intel_crtc; struct i915_vma; @@ -24,10 +26,17 @@ struct intel_dsb { enum dsb_id id; u32 *cmd_buf; struct i915_vma *vma; + + /* + * free_pos will point the first free entry position + * and help in calculating tail of command buffer. + */ + int free_pos; }; struct intel_dsb * intel_dsb_get(struct intel_crtc *crtc); void intel_dsb_put(struct intel_dsb *dsb); +void intel_dsb_reg_write(struct intel_dsb *dsb, i915_reg_t reg, u32 val); #endif -- cgit v1.2.3 From b27a96ad72fd706adc5dbbfd7bba76f698fe3875 Mon Sep 17 00:00:00 2001 From: Animesh Manna Date: Fri, 20 Sep 2019 17:29:24 +0530 Subject: drm/i915/dsb: Indexed register write function for DSB. DSB can program large set of data through indexed register write (opcode 0x9) in one shot. DSB feature can be used for bulk register programming e.g. gamma lut programming, HDR meta data programming. v1: initial version. v2: simplified code by using ALIGN(). (Chris) v3: ascii table added as code comment. (Shashank) v4: cosmetic changes done. (Shashank) v5: reset ins_start_offset. (Jani) v6: update ins_start_offset in inel_dsb_reg_write. Cc: Shashank Sharma Cc: Imre Deak Cc: Jani Nikula Cc: Rodrigo Vivi Reviewed-by: Shashank Sharma Signed-off-by: Animesh Manna Signed-off-by: Jani Nikula Link: https://patchwork.freedesktop.org/patch/msgid/20190920115930.27829-5-animesh.manna@intel.com --- drivers/gpu/drm/i915/display/intel_dsb.c | 68 ++++++++++++++++++++++++++++++++ drivers/gpu/drm/i915/display/intel_dsb.h | 9 +++++ 2 files changed, 77 insertions(+) diff --git a/drivers/gpu/drm/i915/display/intel_dsb.c b/drivers/gpu/drm/i915/display/intel_dsb.c index f94cd6dc98b6..faa853b08458 100644 --- a/drivers/gpu/drm/i915/display/intel_dsb.c +++ b/drivers/gpu/drm/i915/display/intel_dsb.c @@ -12,8 +12,10 @@ /* DSB opcodes. */ #define DSB_OPCODE_SHIFT 24 #define DSB_OPCODE_MMIO_WRITE 0x1 +#define DSB_OPCODE_INDEXED_WRITE 0x9 #define DSB_BYTE_EN 0xF #define DSB_BYTE_EN_SHIFT 20 +#define DSB_REG_VALUE_MASK 0xfffff struct intel_dsb * intel_dsb_get(struct intel_crtc *crtc) @@ -83,9 +85,74 @@ void intel_dsb_put(struct intel_dsb *dsb) mutex_unlock(&i915->drm.struct_mutex); dsb->cmd_buf = NULL; dsb->free_pos = 0; + dsb->ins_start_offset = 0; } } +void intel_dsb_indexed_reg_write(struct intel_dsb *dsb, i915_reg_t reg, + u32 val) +{ + struct intel_crtc *crtc = container_of(dsb, typeof(*crtc), dsb); + struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); + u32 *buf = dsb->cmd_buf; + u32 reg_val; + + if (!buf) { + I915_WRITE(reg, val); + return; + } + + if (WARN_ON(dsb->free_pos >= DSB_BUF_SIZE)) { + DRM_DEBUG_KMS("DSB buffer overflow\n"); + return; + } + + /* + * For example the buffer will look like below for 3 dwords for auto + * increment register: + * +--------------------------------------------------------+ + * | size = 3 | offset &| value1 | value2 | value3 | zero | + * | | opcode | | | | | + * +--------------------------------------------------------+ + * + + + + + + + + * 0 4 8 12 16 20 24 + * Byte + * + * As every instruction is 8 byte aligned the index of dsb instruction + * will start always from even number while dealing with u32 array. If + * we are writing odd no of dwords, Zeros will be added in the end for + * padding. + */ + reg_val = buf[dsb->ins_start_offset + 1] & DSB_REG_VALUE_MASK; + if (reg_val != i915_mmio_reg_offset(reg)) { + /* Every instruction should be 8 byte aligned. */ + dsb->free_pos = ALIGN(dsb->free_pos, 2); + + dsb->ins_start_offset = dsb->free_pos; + + /* Update the size. */ + buf[dsb->free_pos++] = 1; + + /* Update the opcode and reg. */ + buf[dsb->free_pos++] = (DSB_OPCODE_INDEXED_WRITE << + DSB_OPCODE_SHIFT) | + i915_mmio_reg_offset(reg); + + /* Update the value. */ + buf[dsb->free_pos++] = val; + } else { + /* Update the new value. */ + buf[dsb->free_pos++] = val; + + /* Update the size. */ + buf[dsb->ins_start_offset]++; + } + + /* if number of data words is odd, then the last dword should be 0.*/ + if (dsb->free_pos & 0x1) + buf[dsb->free_pos] = 0; +} + void intel_dsb_reg_write(struct intel_dsb *dsb, i915_reg_t reg, u32 val) { struct intel_crtc *crtc = container_of(dsb, typeof(*crtc), dsb); @@ -102,6 +169,7 @@ void intel_dsb_reg_write(struct intel_dsb *dsb, i915_reg_t reg, u32 val) return; } + dsb->ins_start_offset = dsb->free_pos; buf[dsb->free_pos++] = val; buf[dsb->free_pos++] = (DSB_OPCODE_MMIO_WRITE << DSB_OPCODE_SHIFT) | (DSB_BYTE_EN << DSB_BYTE_EN_SHIFT) | diff --git a/drivers/gpu/drm/i915/display/intel_dsb.h b/drivers/gpu/drm/i915/display/intel_dsb.h index 839cbf621fa2..e4dd0ab799ec 100644 --- a/drivers/gpu/drm/i915/display/intel_dsb.h +++ b/drivers/gpu/drm/i915/display/intel_dsb.h @@ -32,11 +32,20 @@ struct intel_dsb { * and help in calculating tail of command buffer. */ int free_pos; + + /* + * ins_start_offset will help to store start address of the dsb + * instuction and help in identifying the batch of auto-increment + * register. + */ + u32 ins_start_offset; }; struct intel_dsb * intel_dsb_get(struct intel_crtc *crtc); void intel_dsb_put(struct intel_dsb *dsb); void intel_dsb_reg_write(struct intel_dsb *dsb, i915_reg_t reg, u32 val); +void intel_dsb_indexed_reg_write(struct intel_dsb *dsb, i915_reg_t reg, + u32 val); #endif -- cgit v1.2.3 From a6e58d9a2e045e800ac54b838c05656f982c36fe Mon Sep 17 00:00:00 2001 From: Animesh Manna Date: Fri, 20 Sep 2019 17:29:25 +0530 Subject: drm/i915/dsb: Check DSB engine status. As per bspec check for DSB status before programming any of its register. Inline function added to check the dsb status. Cc: Michel Thierry Cc: Jani Nikula Cc: Rodrigo Vivi Cc: Shashank Sharma Reviewed-by: Shashank Sharma Signed-off-by: Animesh Manna Signed-off-by: Jani Nikula Link: https://patchwork.freedesktop.org/patch/msgid/20190920115930.27829-6-animesh.manna@intel.com --- drivers/gpu/drm/i915/display/intel_dsb.c | 9 +++++++++ drivers/gpu/drm/i915/i915_reg.h | 7 +++++++ 2 files changed, 16 insertions(+) diff --git a/drivers/gpu/drm/i915/display/intel_dsb.c b/drivers/gpu/drm/i915/display/intel_dsb.c index faa853b08458..6000050b18a6 100644 --- a/drivers/gpu/drm/i915/display/intel_dsb.c +++ b/drivers/gpu/drm/i915/display/intel_dsb.c @@ -17,6 +17,15 @@ #define DSB_BYTE_EN_SHIFT 20 #define DSB_REG_VALUE_MASK 0xfffff +static inline bool is_dsb_busy(struct intel_dsb *dsb) +{ + struct intel_crtc *crtc = container_of(dsb, typeof(*crtc), dsb); + struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); + enum pipe pipe = crtc->pipe; + + return DSB_STATUS & I915_READ(DSB_CTRL(pipe, dsb->id)); +} + struct intel_dsb * intel_dsb_get(struct intel_crtc *crtc) { diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h index 9eb71a547103..fba3e8b26296 100644 --- a/drivers/gpu/drm/i915/i915_reg.h +++ b/drivers/gpu/drm/i915/i915_reg.h @@ -11698,4 +11698,11 @@ enum skl_power_gate { #define PORT_TX_DFLEXDPCSSS(fia) _MMIO_FIA((fia), 0x00894) #define DP_PHY_MODE_STATUS_NOT_SAFE(tc_port) (1 << (tc_port)) +/* This register controls the Display State Buffer (DSB) engines. */ +#define _DSBSL_INSTANCE_BASE 0x70B00 +#define DSBSL_INSTANCE(pipe, id) (_DSBSL_INSTANCE_BASE + \ + (pipe) * 0x1000 + (id) * 100) +#define DSB_CTRL(pipe, id) _MMIO(DSBSL_INSTANCE(pipe, id) + 0x8) +#define DSB_STATUS (1 << 0) + #endif /* _I915_REG_H_ */ -- cgit v1.2.3 From f7619c47983431fc5ddfa30d8f921a6a40e0223a Mon Sep 17 00:00:00 2001 From: Animesh Manna Date: Fri, 20 Sep 2019 17:29:26 +0530 Subject: drm/i915/dsb: functions to enable/disable DSB engine. DSB will be used for performance improvement for some special scenario. DSB engine will be enabled based on need and after completion of its work will be disabled. Api added for enable/disable operation by using DSB_CTRL register. v1: Initial version. v2: POSTING_READ added after writing control register. (Shashank) v3: cosmetic changes done. (Shashank) Cc: Michel Thierry Cc: Jani Nikula Cc: Rodrigo Vivi Cc: Shashank Sharma Reviewed-by: Shashank Sharma Signed-off-by: Animesh Manna Signed-off-by: Jani Nikula Link: https://patchwork.freedesktop.org/patch/msgid/20190920115930.27829-7-animesh.manna@intel.com --- drivers/gpu/drm/i915/display/intel_dsb.c | 40 ++++++++++++++++++++++++++++++++ drivers/gpu/drm/i915/i915_reg.h | 1 + 2 files changed, 41 insertions(+) diff --git a/drivers/gpu/drm/i915/display/intel_dsb.c b/drivers/gpu/drm/i915/display/intel_dsb.c index 6000050b18a6..6fb4529689f1 100644 --- a/drivers/gpu/drm/i915/display/intel_dsb.c +++ b/drivers/gpu/drm/i915/display/intel_dsb.c @@ -26,6 +26,46 @@ static inline bool is_dsb_busy(struct intel_dsb *dsb) return DSB_STATUS & I915_READ(DSB_CTRL(pipe, dsb->id)); } +static inline bool intel_dsb_enable_engine(struct intel_dsb *dsb) +{ + struct intel_crtc *crtc = container_of(dsb, typeof(*crtc), dsb); + struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); + enum pipe pipe = crtc->pipe; + u32 dsb_ctrl; + + dsb_ctrl = I915_READ(DSB_CTRL(pipe, dsb->id)); + if (DSB_STATUS & dsb_ctrl) { + DRM_DEBUG_KMS("DSB engine is busy.\n"); + return false; + } + + dsb_ctrl |= DSB_ENABLE; + I915_WRITE(DSB_CTRL(pipe, dsb->id), dsb_ctrl); + + POSTING_READ(DSB_CTRL(pipe, dsb->id)); + return true; +} + +static inline bool intel_dsb_disable_engine(struct intel_dsb *dsb) +{ + struct intel_crtc *crtc = container_of(dsb, typeof(*crtc), dsb); + struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); + enum pipe pipe = crtc->pipe; + u32 dsb_ctrl; + + dsb_ctrl = I915_READ(DSB_CTRL(pipe, dsb->id)); + if (DSB_STATUS & dsb_ctrl) { + DRM_DEBUG_KMS("DSB engine is busy.\n"); + return false; + } + + dsb_ctrl &= ~DSB_ENABLE; + I915_WRITE(DSB_CTRL(pipe, dsb->id), dsb_ctrl); + + POSTING_READ(DSB_CTRL(pipe, dsb->id)); + return true; +} + struct intel_dsb * intel_dsb_get(struct intel_crtc *crtc) { diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h index fba3e8b26296..5fcbaaa8f3c6 100644 --- a/drivers/gpu/drm/i915/i915_reg.h +++ b/drivers/gpu/drm/i915/i915_reg.h @@ -11703,6 +11703,7 @@ enum skl_power_gate { #define DSBSL_INSTANCE(pipe, id) (_DSBSL_INSTANCE_BASE + \ (pipe) * 0x1000 + (id) * 100) #define DSB_CTRL(pipe, id) _MMIO(DSBSL_INSTANCE(pipe, id) + 0x8) +#define DSB_ENABLE (1 << 31) #define DSB_STATUS (1 << 0) #endif /* _I915_REG_H_ */ -- cgit v1.2.3 From 1abf329a713d2772257470873d57794d2404018d Mon Sep 17 00:00:00 2001 From: Animesh Manna Date: Fri, 20 Sep 2019 17:29:27 +0530 Subject: drm/i915/dsb: function to trigger workload execution of DSB. Batch buffer will be created through dsb-reg-write function which can have single/multiple request based on usecase and once the buffer is ready commit function will trigger the execution of the batch buffer. All the registers will be updated simultaneously. v1: Initial version. v2: Optimized code few places. (Chris) v3: USed DRM_ERROR for dsb head/tail programming failure. (Shashank) v4: reset ins_start_offset after commit. (Jani) Cc: Imre Deak Cc: Jani Nikula Cc: Rodrigo Vivi Cc: Shashank Sharma Reviewed-by: Shashank Sharma Signed-off-by: Animesh Manna Signed-off-by: Jani Nikula Link: https://patchwork.freedesktop.org/patch/msgid/20190920115930.27829-8-animesh.manna@intel.com --- drivers/gpu/drm/i915/display/intel_dsb.c | 43 ++++++++++++++++++++++++++++++++ drivers/gpu/drm/i915/display/intel_dsb.h | 1 + drivers/gpu/drm/i915/i915_reg.h | 2 ++ 3 files changed, 46 insertions(+) diff --git a/drivers/gpu/drm/i915/display/intel_dsb.c b/drivers/gpu/drm/i915/display/intel_dsb.c index 6fb4529689f1..f4c0b37683a5 100644 --- a/drivers/gpu/drm/i915/display/intel_dsb.c +++ b/drivers/gpu/drm/i915/display/intel_dsb.c @@ -224,3 +224,46 @@ void intel_dsb_reg_write(struct intel_dsb *dsb, i915_reg_t reg, u32 val) (DSB_BYTE_EN << DSB_BYTE_EN_SHIFT) | i915_mmio_reg_offset(reg); } + +void intel_dsb_commit(struct intel_dsb *dsb) +{ + struct intel_crtc *crtc = container_of(dsb, typeof(*crtc), dsb); + struct drm_device *dev = crtc->base.dev; + struct drm_i915_private *dev_priv = to_i915(dev); + enum pipe pipe = crtc->pipe; + u32 tail; + + if (!dsb->free_pos) + return; + + if (!intel_dsb_enable_engine(dsb)) + goto reset; + + if (is_dsb_busy(dsb)) { + DRM_ERROR("HEAD_PTR write failed - dsb engine is busy.\n"); + goto reset; + } + I915_WRITE(DSB_HEAD(pipe, dsb->id), i915_ggtt_offset(dsb->vma)); + + tail = ALIGN(dsb->free_pos * 4, CACHELINE_BYTES); + if (tail > dsb->free_pos * 4) + memset(&dsb->cmd_buf[dsb->free_pos], 0, + (tail - dsb->free_pos * 4)); + + if (is_dsb_busy(dsb)) { + DRM_ERROR("TAIL_PTR write failed - dsb engine is busy.\n"); + goto reset; + } + DRM_DEBUG_KMS("DSB execution started - head 0x%x, tail 0x%x\n", + i915_ggtt_offset(dsb->vma), tail); + I915_WRITE(DSB_TAIL(pipe, dsb->id), i915_ggtt_offset(dsb->vma) + tail); + if (wait_for(!is_dsb_busy(dsb), 1)) { + DRM_ERROR("Timed out waiting for DSB workload completion.\n"); + goto reset; + } + +reset: + dsb->free_pos = 0; + dsb->ins_start_offset = 0; + intel_dsb_disable_engine(dsb); +} diff --git a/drivers/gpu/drm/i915/display/intel_dsb.h b/drivers/gpu/drm/i915/display/intel_dsb.h index e4dd0ab799ec..6f95c8e909e6 100644 --- a/drivers/gpu/drm/i915/display/intel_dsb.h +++ b/drivers/gpu/drm/i915/display/intel_dsb.h @@ -47,5 +47,6 @@ void intel_dsb_put(struct intel_dsb *dsb); void intel_dsb_reg_write(struct intel_dsb *dsb, i915_reg_t reg, u32 val); void intel_dsb_indexed_reg_write(struct intel_dsb *dsb, i915_reg_t reg, u32 val); +void intel_dsb_commit(struct intel_dsb *dsb); #endif diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h index 5fcbaaa8f3c6..8649a3028963 100644 --- a/drivers/gpu/drm/i915/i915_reg.h +++ b/drivers/gpu/drm/i915/i915_reg.h @@ -11702,6 +11702,8 @@ enum skl_power_gate { #define _DSBSL_INSTANCE_BASE 0x70B00 #define DSBSL_INSTANCE(pipe, id) (_DSBSL_INSTANCE_BASE + \ (pipe) * 0x1000 + (id) * 100) +#define DSB_HEAD(pipe, id) _MMIO(DSBSL_INSTANCE(pipe, id) + 0x0) +#define DSB_TAIL(pipe, id) _MMIO(DSBSL_INSTANCE(pipe, id) + 0x4) #define DSB_CTRL(pipe, id) _MMIO(DSBSL_INSTANCE(pipe, id) + 0x8) #define DSB_ENABLE (1 << 31) #define DSB_STATUS (1 << 0) -- cgit v1.2.3 From 49e3fb7fd880f294643304f3f374d553dd6a4c11 Mon Sep 17 00:00:00 2001 From: Animesh Manna Date: Fri, 20 Sep 2019 17:29:28 +0530 Subject: drm/i915/dsb: Enable gamma lut programming using DSB. Gamma lut programming can be programmed using DSB where bulk register programming can be done using indexed register write which takes number of data and the mmio offset to be written. Currently enabled for 12-bit gamma LUT which is enabled by default and later 8-bit/10-bit will be enabled in future based on need. v1: Initial version. v2: Directly call dsb-api at callsites. (Jani) v3: - modified the code as per single dsb instance per crtc. (Shashank) - Added dsb get/put call in platform specific load_lut hook. (Jani) - removed dsb pointer from dev_priv. (Jani) v4: simplified code by dropping ref-count implementation. (Shashank) Cc: Jani Nikula Cc: Rodrigo Vivi Cc: Shashank Sharma Signed-off-by: Animesh Manna Signed-off-by: Jani Nikula Link: http://patchwork.freedesktop.org/patch/msgid/20190920115930.27829-9-animesh.manna@intel.com --- drivers/gpu/drm/i915/display/intel_color.c | 64 +++++++++++++++++++----------- 1 file changed, 41 insertions(+), 23 deletions(-) diff --git a/drivers/gpu/drm/i915/display/intel_color.c b/drivers/gpu/drm/i915/display/intel_color.c index b0268a9bde8a..402151128e1f 100644 --- a/drivers/gpu/drm/i915/display/intel_color.c +++ b/drivers/gpu/drm/i915/display/intel_color.c @@ -627,12 +627,13 @@ static void bdw_load_lut_10(struct intel_crtc *crtc, static void ivb_load_lut_ext_max(struct intel_crtc *crtc) { struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); + struct intel_dsb *dsb = intel_dsb_get(crtc); enum pipe pipe = crtc->pipe; /* Program the max register to clamp values > 1.0. */ - I915_WRITE(PREC_PAL_EXT_GC_MAX(pipe, 0), 1 << 16); - I915_WRITE(PREC_PAL_EXT_GC_MAX(pipe, 1), 1 << 16); - I915_WRITE(PREC_PAL_EXT_GC_MAX(pipe, 2), 1 << 16); + intel_dsb_reg_write(dsb, PREC_PAL_EXT_GC_MAX(pipe, 0), 1 << 16); + intel_dsb_reg_write(dsb, PREC_PAL_EXT_GC_MAX(pipe, 1), 1 << 16); + intel_dsb_reg_write(dsb, PREC_PAL_EXT_GC_MAX(pipe, 2), 1 << 16); /* * Program the gc max 2 register to clamp values > 1.0. @@ -640,10 +641,15 @@ static void ivb_load_lut_ext_max(struct intel_crtc *crtc) * from 3.0 to 7.0 */ if (INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv)) { - I915_WRITE(PREC_PAL_EXT2_GC_MAX(pipe, 0), 1 << 16); - I915_WRITE(PREC_PAL_EXT2_GC_MAX(pipe, 1), 1 << 16); - I915_WRITE(PREC_PAL_EXT2_GC_MAX(pipe, 2), 1 << 16); + intel_dsb_reg_write(dsb, PREC_PAL_EXT2_GC_MAX(pipe, 0), + 1 << 16); + intel_dsb_reg_write(dsb, PREC_PAL_EXT2_GC_MAX(pipe, 1), + 1 << 16); + intel_dsb_reg_write(dsb, PREC_PAL_EXT2_GC_MAX(pipe, 2), + 1 << 16); } + + intel_dsb_put(dsb); } static void ivb_load_luts(const struct intel_crtc_state *crtc_state) @@ -803,22 +809,23 @@ icl_load_gcmax(const struct intel_crtc_state *crtc_state, const struct drm_color_lut *color) { struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc); - struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); + struct intel_dsb *dsb = intel_dsb_get(crtc); enum pipe pipe = crtc->pipe; /* Fixme: LUT entries are 16 bit only, so we can prog 0xFFFF max */ - I915_WRITE(PREC_PAL_GC_MAX(pipe, 0), color->red); - I915_WRITE(PREC_PAL_GC_MAX(pipe, 1), color->green); - I915_WRITE(PREC_PAL_GC_MAX(pipe, 2), color->blue); + intel_dsb_reg_write(dsb, PREC_PAL_GC_MAX(pipe, 0), color->red); + intel_dsb_reg_write(dsb, PREC_PAL_GC_MAX(pipe, 1), color->green); + intel_dsb_reg_write(dsb, PREC_PAL_GC_MAX(pipe, 2), color->blue); + intel_dsb_put(dsb); } static void icl_program_gamma_superfine_segment(const struct intel_crtc_state *crtc_state) { struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc); - struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); const struct drm_property_blob *blob = crtc_state->base.gamma_lut; const struct drm_color_lut *lut = blob->data; + struct intel_dsb *dsb = intel_dsb_get(crtc); enum pipe pipe = crtc->pipe; u32 i; @@ -829,26 +836,29 @@ icl_program_gamma_superfine_segment(const struct intel_crtc_state *crtc_state) * 9 entries, corresponding to values 0, 1/(8 * 128 * 256), * 2/(8 * 128 * 256) ... 8/(8 * 128 * 256). */ - I915_WRITE(PREC_PAL_MULTI_SEG_INDEX(pipe), PAL_PREC_AUTO_INCREMENT); + intel_dsb_reg_write(dsb, PREC_PAL_MULTI_SEG_INDEX(pipe), + PAL_PREC_AUTO_INCREMENT); for (i = 0; i < 9; i++) { const struct drm_color_lut *entry = &lut[i]; - I915_WRITE(PREC_PAL_MULTI_SEG_DATA(pipe), - ilk_lut_12p4_ldw(entry)); - I915_WRITE(PREC_PAL_MULTI_SEG_DATA(pipe), - ilk_lut_12p4_udw(entry)); + intel_dsb_indexed_reg_write(dsb, PREC_PAL_MULTI_SEG_DATA(pipe), + ilk_lut_12p4_ldw(entry)); + intel_dsb_indexed_reg_write(dsb, PREC_PAL_MULTI_SEG_DATA(pipe), + ilk_lut_12p4_udw(entry)); } + + intel_dsb_put(dsb); } static void icl_program_gamma_multi_segment(const struct intel_crtc_state *crtc_state) { struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc); - struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); const struct drm_property_blob *blob = crtc_state->base.gamma_lut; const struct drm_color_lut *lut = blob->data; const struct drm_color_lut *entry; + struct intel_dsb *dsb = intel_dsb_get(crtc); enum pipe pipe = crtc->pipe; u32 i; @@ -862,12 +872,13 @@ icl_program_gamma_multi_segment(const struct intel_crtc_state *crtc_state) * PAL_PREC_INDEX[0] and PAL_PREC_INDEX[1] map to seg2[1], * seg2[0] being unused by the hardware. */ - I915_WRITE(PREC_PAL_INDEX(pipe), PAL_PREC_AUTO_INCREMENT); - + intel_dsb_reg_write(dsb, PREC_PAL_INDEX(pipe), PAL_PREC_AUTO_INCREMENT); for (i = 1; i < 257; i++) { entry = &lut[i * 8]; - I915_WRITE(PREC_PAL_DATA(pipe), ilk_lut_12p4_ldw(entry)); - I915_WRITE(PREC_PAL_DATA(pipe), ilk_lut_12p4_udw(entry)); + intel_dsb_indexed_reg_write(dsb, PREC_PAL_DATA(pipe), + ilk_lut_12p4_ldw(entry)); + intel_dsb_indexed_reg_write(dsb, PREC_PAL_DATA(pipe), + ilk_lut_12p4_udw(entry)); } /* @@ -884,20 +895,24 @@ icl_program_gamma_multi_segment(const struct intel_crtc_state *crtc_state) */ for (i = 0; i < 256; i++) { entry = &lut[i * 8 * 128]; - I915_WRITE(PREC_PAL_DATA(pipe), ilk_lut_12p4_ldw(entry)); - I915_WRITE(PREC_PAL_DATA(pipe), ilk_lut_12p4_udw(entry)); + intel_dsb_indexed_reg_write(dsb, PREC_PAL_DATA(pipe), + ilk_lut_12p4_ldw(entry)); + intel_dsb_indexed_reg_write(dsb, PREC_PAL_DATA(pipe), + ilk_lut_12p4_udw(entry)); } /* The last entry in the LUT is to be programmed in GCMAX */ entry = &lut[256 * 8 * 128]; icl_load_gcmax(crtc_state, entry); ivb_load_lut_ext_max(crtc); + intel_dsb_put(dsb); } static void icl_load_luts(const struct intel_crtc_state *crtc_state) { const struct drm_property_blob *gamma_lut = crtc_state->base.gamma_lut; struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc); + struct intel_dsb *dsb = intel_dsb_get(crtc); if (crtc_state->base.degamma_lut) glk_load_degamma_lut(crtc_state); @@ -914,6 +929,9 @@ static void icl_load_luts(const struct intel_crtc_state *crtc_state) bdw_load_lut_10(crtc, gamma_lut, PAL_PREC_INDEX_VALUE(0)); ivb_load_lut_ext_max(crtc); } + + intel_dsb_commit(dsb); + intel_dsb_put(dsb); } static u32 chv_cgm_degamma_ldw(const struct drm_color_lut *color) -- cgit v1.2.3 From dfaa6f285bd8eee63866df8b0474858e8a1cc40d Mon Sep 17 00:00:00 2001 From: Animesh Manna Date: Fri, 20 Sep 2019 17:29:29 +0530 Subject: drm/i915/dsb: Enable DSB for gen12. Enabling DSB by setting 1 to has_dsb flag for gen12. Cc: Jani Nikula Cc: Rodrigo Vivi Cc: Shashank Sharma Reviewed-by: Shashank Sharma Signed-off-by: Animesh Manna Signed-off-by: Jani Nikula Link: https://patchwork.freedesktop.org/patch/msgid/20190920115930.27829-10-animesh.manna@intel.com --- drivers/gpu/drm/i915/i915_pci.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/drivers/gpu/drm/i915/i915_pci.c b/drivers/gpu/drm/i915/i915_pci.c index fe6941c8fc99..c2faa679658c 100644 --- a/drivers/gpu/drm/i915/i915_pci.c +++ b/drivers/gpu/drm/i915/i915_pci.c @@ -787,7 +787,8 @@ static const struct intel_device_info intel_elkhartlake_info = { [TRANSCODER_DSI_0] = TRANSCODER_DSI0_OFFSET, \ [TRANSCODER_DSI_1] = TRANSCODER_DSI1_OFFSET, \ }, \ - .has_global_mocs = 1 + .has_global_mocs = 1, \ + .display.has_dsb = 1 static const struct intel_device_info intel_tigerlake_12_info = { GEN12_FEATURES, -- cgit v1.2.3 From 5dd85e72bc922379fec10969c9390a72aaa4746f Mon Sep 17 00:00:00 2001 From: Animesh Manna Date: Fri, 20 Sep 2019 17:29:30 +0530 Subject: drm/i915/dsb: Documentation for DSB. Added docbook info regarding Display State Buffer(DSB) which is added from gen12 onwards to batch submit display HW programming. v1: Initial version as RFC. Cc: Jani Nikula Cc: Rodrigo Vivi Cc: Shashank Sharma Reviewed-by: Shashank Sharma Signed-off-by: Animesh Manna Signed-off-by: Jani Nikula Link: https://patchwork.freedesktop.org/patch/msgid/20190920115930.27829-11-animesh.manna@intel.com --- Documentation/gpu/i915.rst | 9 +++++ drivers/gpu/drm/i915/display/intel_dsb.c | 68 ++++++++++++++++++++++++++++++++ 2 files changed, 77 insertions(+) diff --git a/Documentation/gpu/i915.rst b/Documentation/gpu/i915.rst index e249ea7b0ec7..465779670fd4 100644 --- a/Documentation/gpu/i915.rst +++ b/Documentation/gpu/i915.rst @@ -246,6 +246,15 @@ Display PLLs .. kernel-doc:: drivers/gpu/drm/i915/display/intel_dpll_mgr.h :internal: +Display State Buffer +-------------------- + +.. kernel-doc:: drivers/gpu/drm/i915/display/intel_dsb.c + :doc: DSB + +.. kernel-doc:: drivers/gpu/drm/i915/display/intel_dsb.c + :internal: + Memory Management and Command Submission ======================================== diff --git a/drivers/gpu/drm/i915/display/intel_dsb.c b/drivers/gpu/drm/i915/display/intel_dsb.c index f4c0b37683a5..0a0a1536ac96 100644 --- a/drivers/gpu/drm/i915/display/intel_dsb.c +++ b/drivers/gpu/drm/i915/display/intel_dsb.c @@ -9,6 +9,23 @@ #define DSB_BUF_SIZE (2 * PAGE_SIZE) +/** + * DOC: DSB + * + * A DSB (Display State Buffer) is a queue of MMIO instructions in the memory + * which can be offloaded to DSB HW in Display Controller. DSB HW is a DMA + * engine that can be programmed to download the DSB from memory. + * It allows driver to batch submit display HW programming. This helps to + * reduce loading time and CPU activity, thereby making the context switch + * faster. DSB Support added from Gen12 Intel graphics based platform. + * + * DSB's can access only the pipe, plane, and transcoder Data Island Packet + * registers. + * + * DSB HW can support only register writes (both indexed and direct MMIO + * writes). There are no registers reads possible with DSB HW engine. + */ + /* DSB opcodes. */ #define DSB_OPCODE_SHIFT 24 #define DSB_OPCODE_MMIO_WRITE 0x1 @@ -66,6 +83,17 @@ static inline bool intel_dsb_disable_engine(struct intel_dsb *dsb) return true; } +/** + * intel_dsb_get() - Allocate DSB context and return a DSB instance. + * @crtc: intel_crtc structure to get pipe info. + * + * This function provides handle of a DSB instance, for the further DSB + * operations. + * + * Returns: address of Intel_dsb instance requested for. + * Failure: Returns the same DSB instance, but without a command buffer. + */ + struct intel_dsb * intel_dsb_get(struct intel_crtc *crtc) { @@ -116,6 +144,14 @@ err: return dsb; } +/** + * intel_dsb_put() - To destroy DSB context. + * @dsb: intel_dsb structure. + * + * This function destroys the DSB context allocated by a dsb_get(), by + * unpinning and releasing the VMA object associated with it. + */ + void intel_dsb_put(struct intel_dsb *dsb) { struct intel_crtc *crtc = container_of(dsb, typeof(*crtc), dsb); @@ -138,6 +174,19 @@ void intel_dsb_put(struct intel_dsb *dsb) } } +/** + * intel_dsb_indexed_reg_write() -Write to the DSB context for auto + * increment register. + * @dsb: intel_dsb structure. + * @reg: register address. + * @val: value. + * + * This function is used for writing register-value pair in command + * buffer of DSB for auto-increment register. During command buffer overflow, + * a warning is thrown and rest all erroneous condition register programming + * is done through mmio write. + */ + void intel_dsb_indexed_reg_write(struct intel_dsb *dsb, i915_reg_t reg, u32 val) { @@ -202,6 +251,18 @@ void intel_dsb_indexed_reg_write(struct intel_dsb *dsb, i915_reg_t reg, buf[dsb->free_pos] = 0; } +/** + * intel_dsb_reg_write() -Write to the DSB context for normal + * register. + * @dsb: intel_dsb structure. + * @reg: register address. + * @val: value. + * + * This function is used for writing register-value pair in command + * buffer of DSB. During command buffer overflow, a warning is thrown + * and rest all erroneous condition register programming is done + * through mmio write. + */ void intel_dsb_reg_write(struct intel_dsb *dsb, i915_reg_t reg, u32 val) { struct intel_crtc *crtc = container_of(dsb, typeof(*crtc), dsb); @@ -225,6 +286,13 @@ void intel_dsb_reg_write(struct intel_dsb *dsb, i915_reg_t reg, u32 val) i915_mmio_reg_offset(reg); } +/** + * intel_dsb_commit() - Trigger workload execution of DSB. + * @dsb: intel_dsb structure. + * + * This function is used to do actual write to hardware using DSB. + * On errors, fall back to MMIO. Also this function help to reset the context. + */ void intel_dsb_commit(struct intel_dsb *dsb) { struct intel_crtc *crtc = container_of(dsb, typeof(*crtc), dsb); -- cgit v1.2.3 From 8581d51055a08cc6eb061c8856062290e8582ce4 Mon Sep 17 00:00:00 2001 From: "Lowry Li (Arm Technology China)" Date: Wed, 31 Jul 2019 11:04:38 +0000 Subject: drm: Free the writeback_job when it with an empty fb Adds the check if the writeback_job with an empty fb, then it should be freed in atomic_check phase. With this change, the driver users will not check empty fb case any more. So refined accordingly. Signed-off-by: Lowry Li (Arm Technology China) Reviewed-by: Liviu Dudau Reviewed-by: James Qian Wang (Arm Technology China) Signed-off-by: james qian wang (Arm Technology China) Link: https://patchwork.freedesktop.org/patch/msgid/1564571048-15029-2-git-send-email-lowry.li@arm.com --- drivers/gpu/drm/arm/display/komeda/komeda_wb_connector.c | 3 +-- drivers/gpu/drm/arm/malidp_mw.c | 4 ++-- drivers/gpu/drm/drm_atomic.c | 13 +++++++++---- drivers/gpu/drm/rcar-du/rcar_du_writeback.c | 4 ++-- drivers/gpu/drm/vc4/vc4_txp.c | 5 ++--- 5 files changed, 16 insertions(+), 13 deletions(-) diff --git a/drivers/gpu/drm/arm/display/komeda/komeda_wb_connector.c b/drivers/gpu/drm/arm/display/komeda/komeda_wb_connector.c index 2851cac94d86..23fbee268119 100644 --- a/drivers/gpu/drm/arm/display/komeda/komeda_wb_connector.c +++ b/drivers/gpu/drm/arm/display/komeda/komeda_wb_connector.c @@ -43,9 +43,8 @@ komeda_wb_encoder_atomic_check(struct drm_encoder *encoder, struct komeda_data_flow_cfg dflow; int err; - if (!writeback_job || !writeback_job->fb) { + if (!writeback_job) return 0; - } if (!crtc_st->active) { DRM_DEBUG_ATOMIC("Cannot write the composition result out on a inactive CRTC.\n"); diff --git a/drivers/gpu/drm/arm/malidp_mw.c b/drivers/gpu/drm/arm/malidp_mw.c index 2e812525025d..a59227b2cdb5 100644 --- a/drivers/gpu/drm/arm/malidp_mw.c +++ b/drivers/gpu/drm/arm/malidp_mw.c @@ -130,7 +130,7 @@ malidp_mw_encoder_atomic_check(struct drm_encoder *encoder, struct drm_framebuffer *fb; int i, n_planes; - if (!conn_state->writeback_job || !conn_state->writeback_job->fb) + if (!conn_state->writeback_job) return 0; fb = conn_state->writeback_job->fb; @@ -247,7 +247,7 @@ void malidp_mw_atomic_commit(struct drm_device *drm, mw_state = to_mw_state(conn_state); - if (conn_state->writeback_job && conn_state->writeback_job->fb) { + if (conn_state->writeback_job) { struct drm_framebuffer *fb = conn_state->writeback_job->fb; DRM_DEV_DEBUG_DRIVER(drm->dev, diff --git a/drivers/gpu/drm/drm_atomic.c b/drivers/gpu/drm/drm_atomic.c index 419381abbdd1..14aeaf736321 100644 --- a/drivers/gpu/drm/drm_atomic.c +++ b/drivers/gpu/drm/drm_atomic.c @@ -430,10 +430,15 @@ static int drm_atomic_connector_check(struct drm_connector *connector, return -EINVAL; } - if (writeback_job->out_fence && !writeback_job->fb) { - DRM_DEBUG_ATOMIC("[CONNECTOR:%d:%s] requesting out-fence without framebuffer\n", - connector->base.id, connector->name); - return -EINVAL; + if (!writeback_job->fb) { + if (writeback_job->out_fence) { + DRM_DEBUG_ATOMIC("[CONNECTOR:%d:%s] requesting out-fence without framebuffer\n", + connector->base.id, connector->name); + return -EINVAL; + } + + drm_writeback_cleanup_job(writeback_job); + state->writeback_job = NULL; } return 0; diff --git a/drivers/gpu/drm/rcar-du/rcar_du_writeback.c b/drivers/gpu/drm/rcar-du/rcar_du_writeback.c index ae07290bba6a..04efa78d70b6 100644 --- a/drivers/gpu/drm/rcar-du/rcar_du_writeback.c +++ b/drivers/gpu/drm/rcar-du/rcar_du_writeback.c @@ -147,7 +147,7 @@ static int rcar_du_wb_enc_atomic_check(struct drm_encoder *encoder, struct drm_device *dev = encoder->dev; struct drm_framebuffer *fb; - if (!conn_state->writeback_job || !conn_state->writeback_job->fb) + if (!conn_state->writeback_job) return 0; fb = conn_state->writeback_job->fb; @@ -221,7 +221,7 @@ void rcar_du_writeback_setup(struct rcar_du_crtc *rcrtc, unsigned int i; state = rcrtc->writeback.base.state; - if (!state || !state->writeback_job || !state->writeback_job->fb) + if (!state || !state->writeback_job) return; fb = state->writeback_job->fb; diff --git a/drivers/gpu/drm/vc4/vc4_txp.c b/drivers/gpu/drm/vc4/vc4_txp.c index 96f91c1b4b6e..e92fa1275034 100644 --- a/drivers/gpu/drm/vc4/vc4_txp.c +++ b/drivers/gpu/drm/vc4/vc4_txp.c @@ -229,7 +229,7 @@ static int vc4_txp_connector_atomic_check(struct drm_connector *conn, int i; conn_state = drm_atomic_get_new_connector_state(state, conn); - if (!conn_state->writeback_job || !conn_state->writeback_job->fb) + if (!conn_state->writeback_job) return 0; crtc_state = drm_atomic_get_new_crtc_state(state, conn_state->crtc); @@ -269,8 +269,7 @@ static void vc4_txp_connector_atomic_commit(struct drm_connector *conn, u32 ctrl; int i; - if (WARN_ON(!conn_state->writeback_job || - !conn_state->writeback_job->fb)) + if (WARN_ON(!conn_state->writeback_job)) return; mode = &conn_state->crtc->state->adjusted_mode; -- cgit v1.2.3 From b1066a123538044117f0a78ba8c6a50cf5a04c86 Mon Sep 17 00:00:00 2001 From: "Lowry Li (Arm Technology China)" Date: Wed, 31 Jul 2019 11:04:45 +0000 Subject: drm: Clear the fence pointer when writeback job signaled During it signals the completion of a writeback job, after releasing the out_fence, we'd clear the pointer. Check if fence left over in drm_writeback_cleanup_job(), release it. Signed-off-by: Lowry Li (Arm Technology China) Reviewed-by: Brian Starkey Reviewed-by: James Qian Wang (Arm Technology China) Signed-off-by: james qian wang (Arm Technology China) Link: https://patchwork.freedesktop.org/patch/msgid/1564571048-15029-3-git-send-email-lowry.li@arm.com --- drivers/gpu/drm/drm_writeback.c | 23 +++++++++++++++-------- 1 file changed, 15 insertions(+), 8 deletions(-) diff --git a/drivers/gpu/drm/drm_writeback.c b/drivers/gpu/drm/drm_writeback.c index ff138b6ec48b..43d9e3bb3a94 100644 --- a/drivers/gpu/drm/drm_writeback.c +++ b/drivers/gpu/drm/drm_writeback.c @@ -324,6 +324,9 @@ void drm_writeback_cleanup_job(struct drm_writeback_job *job) if (job->fb) drm_framebuffer_put(job->fb); + if (job->out_fence) + dma_fence_put(job->out_fence); + kfree(job); } EXPORT_SYMBOL(drm_writeback_cleanup_job); @@ -366,25 +369,29 @@ drm_writeback_signal_completion(struct drm_writeback_connector *wb_connector, { unsigned long flags; struct drm_writeback_job *job; + struct dma_fence *out_fence; spin_lock_irqsave(&wb_connector->job_lock, flags); job = list_first_entry_or_null(&wb_connector->job_queue, struct drm_writeback_job, list_entry); - if (job) { + if (job) list_del(&job->list_entry); - if (job->out_fence) { - if (status) - dma_fence_set_error(job->out_fence, status); - dma_fence_signal(job->out_fence); - dma_fence_put(job->out_fence); - } - } + spin_unlock_irqrestore(&wb_connector->job_lock, flags); if (WARN_ON(!job)) return; + out_fence = job->out_fence; + if (out_fence) { + if (status) + dma_fence_set_error(out_fence, status); + dma_fence_signal(out_fence); + dma_fence_put(out_fence); + job->out_fence = NULL; + } + INIT_WORK(&job->cleanup_work, cleanup_work); queue_work(system_long_wq, &job->cleanup_work); } -- cgit v1.2.3 From 87c1694533c947bf950251df3da04a32a05ede64 Mon Sep 17 00:00:00 2001 From: Kai Vehmanen Date: Fri, 20 Sep 2019 11:39:18 +0300 Subject: drm/i915: save AUD_FREQ_CNTRL state at audio domain suspend When audio power domain is suspended, the display driver must save state of AUD_FREQ_CNTRL on Tiger Lake and Ice Lake systems. The initial value of the register is set by BIOS and is read by driver during the audio component init sequence. Cc: Jani Nikula Cc: Imre Deak Signed-off-by: Kai Vehmanen Signed-off-by: Jani Nikula Link: https://patchwork.freedesktop.org/patch/msgid/20190920083918.27057-1-kai.vehmanen@linux.intel.com --- drivers/gpu/drm/i915/display/intel_audio.c | 17 +++++++++++++++-- drivers/gpu/drm/i915/i915_drv.h | 1 + drivers/gpu/drm/i915/i915_reg.h | 2 ++ 3 files changed, 18 insertions(+), 2 deletions(-) diff --git a/drivers/gpu/drm/i915/display/intel_audio.c b/drivers/gpu/drm/i915/display/intel_audio.c index aac089c79ceb..54638d99e021 100644 --- a/drivers/gpu/drm/i915/display/intel_audio.c +++ b/drivers/gpu/drm/i915/display/intel_audio.c @@ -852,10 +852,17 @@ static unsigned long i915_audio_component_get_power(struct device *kdev) ret = intel_display_power_get(dev_priv, POWER_DOMAIN_AUDIO); - /* Force CDCLK to 2*BCLK as long as we need audio to be powered. */ - if (dev_priv->audio_power_refcount++ == 0) + if (dev_priv->audio_power_refcount++ == 0) { + if (IS_TIGERLAKE(dev_priv) || IS_ICELAKE(dev_priv)) { + I915_WRITE(AUD_FREQ_CNTRL, dev_priv->audio_freq_cntrl); + DRM_DEBUG_KMS("restored AUD_FREQ_CNTRL to 0x%x\n", + dev_priv->audio_freq_cntrl); + } + + /* Force CDCLK to 2*BCLK as long as we need audio powered. */ if (IS_CANNONLAKE(dev_priv) || IS_GEMINILAKE(dev_priv)) glk_force_audio_cdclk(dev_priv, true); + } return ret; } @@ -1116,6 +1123,12 @@ static void i915_audio_component_init(struct drm_i915_private *dev_priv) return; } + if (IS_TIGERLAKE(dev_priv) || IS_ICELAKE(dev_priv)) { + dev_priv->audio_freq_cntrl = I915_READ(AUD_FREQ_CNTRL); + DRM_DEBUG_KMS("init value of AUD_FREQ_CNTRL of 0x%x\n", + dev_priv->audio_freq_cntrl); + } + dev_priv->audio_component_registered = true; } diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index 07f1e89a55ca..fcf7423075ef 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h @@ -1541,6 +1541,7 @@ struct drm_i915_private { */ struct mutex av_mutex; int audio_power_refcount; + u32 audio_freq_cntrl; struct { struct mutex mutex; diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h index 8649a3028963..6ecb64c042ef 100644 --- a/drivers/gpu/drm/i915/i915_reg.h +++ b/drivers/gpu/drm/i915/i915_reg.h @@ -9127,6 +9127,8 @@ enum { #define HSW_AUD_CHICKENBIT _MMIO(0x65f10) #define SKL_AUD_CODEC_WAKE_SIGNAL (1 << 15) +#define AUD_FREQ_CNTRL _MMIO(0x65900) + /* * HSW - ICL power wells * -- cgit v1.2.3 From 2d6f6f359fd841a6ce3133470859035037ff0f75 Mon Sep 17 00:00:00 2001 From: Jani Nikula Date: Fri, 20 Sep 2019 21:54:16 +0300 Subject: drm/i915: add i915_driver_modeset_remove() For completeness, add counterpart to i915_driver_modeset_probe() and remove the asymmetry in the probe/remove parts. No functional changes. Reviewed-by: Chris Wilson Signed-off-by: Jani Nikula Link: https://patchwork.freedesktop.org/patch/msgid/20190920185421.17822-1-jani.nikula@intel.com --- drivers/gpu/drm/i915/i915_drv.c | 25 +++++++++++++++---------- 1 file changed, 15 insertions(+), 10 deletions(-) diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c index c95986a984c8..acccf852e654 100644 --- a/drivers/gpu/drm/i915/i915_drv.c +++ b/drivers/gpu/drm/i915/i915_drv.c @@ -447,6 +447,20 @@ static int i915_kick_out_firmware_fb(struct drm_i915_private *dev_priv) return ret; } +static void i915_driver_modeset_remove(struct drm_i915_private *i915) +{ + struct pci_dev *pdev = i915->drm.pdev; + + intel_modeset_driver_remove(&i915->drm); + + intel_bios_driver_remove(i915); + + vga_switcheroo_unregister_client(pdev); + vga_client_register(pdev, NULL, NULL, NULL); + + intel_csr_ucode_fini(i915); +} + static void intel_init_dpio(struct drm_i915_private *dev_priv) { /* @@ -1623,8 +1637,6 @@ out_fini: void i915_driver_remove(struct drm_i915_private *i915) { - struct pci_dev *pdev = i915->drm.pdev; - disable_rpm_wakeref_asserts(&i915->runtime_pm); i915_driver_unregister(i915); @@ -1645,14 +1657,7 @@ void i915_driver_remove(struct drm_i915_private *i915) intel_gvt_driver_remove(i915); - intel_modeset_driver_remove(&i915->drm); - - intel_bios_driver_remove(i915); - - vga_switcheroo_unregister_client(pdev); - vga_client_register(pdev, NULL, NULL, NULL); - - intel_csr_ucode_fini(i915); + i915_driver_modeset_remove(i915); /* Free error state after interrupts are fully disabled. */ cancel_delayed_work_sync(&i915->gt.hangcheck.work); -- cgit v1.2.3 From 5bcd53aa39f3dd054328e0a29a343e0254709908 Mon Sep 17 00:00:00 2001 From: Jani Nikula Date: Fri, 20 Sep 2019 21:54:17 +0300 Subject: drm/i915: pass i915 to i915_driver_modeset_probe() In general, prefer struct drm_i915_private * over struct drm_device * when either will do. Rename the local variable to i915. No functional changes. Reviewed-by: Chris Wilson Signed-off-by: Jani Nikula Link: https://patchwork.freedesktop.org/patch/msgid/20190920185421.17822-2-jani.nikula@intel.com --- drivers/gpu/drm/i915/i915_drv.c | 59 ++++++++++++++++++++--------------------- 1 file changed, 29 insertions(+), 30 deletions(-) diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c index acccf852e654..fbf1b9e1e059 100644 --- a/drivers/gpu/drm/i915/i915_drv.c +++ b/drivers/gpu/drm/i915/i915_drv.c @@ -329,23 +329,22 @@ static const struct vga_switcheroo_client_ops i915_switcheroo_ops = { .can_switch = i915_switcheroo_can_switch, }; -static int i915_driver_modeset_probe(struct drm_device *dev) +static int i915_driver_modeset_probe(struct drm_i915_private *i915) { - struct drm_i915_private *dev_priv = to_i915(dev); - struct pci_dev *pdev = dev_priv->drm.pdev; + struct pci_dev *pdev = i915->drm.pdev; int ret; - if (i915_inject_probe_failure(dev_priv)) + if (i915_inject_probe_failure(i915)) return -ENODEV; - if (HAS_DISPLAY(dev_priv) && INTEL_DISPLAY_ENABLED(dev_priv)) { - ret = drm_vblank_init(&dev_priv->drm, - INTEL_NUM_PIPES(dev_priv)); + if (HAS_DISPLAY(i915) && INTEL_DISPLAY_ENABLED(i915)) { + ret = drm_vblank_init(&i915->drm, + INTEL_NUM_PIPES(i915)); if (ret) goto out; } - intel_bios_init(dev_priv); + intel_bios_init(i915); /* If we have > 1 VGA cards, then we need to arbitrate access * to the common VGA resources. @@ -354,7 +353,7 @@ static int i915_driver_modeset_probe(struct drm_device *dev) * then we do not take part in VGA arbitration and the * vga_client_register() fails with -ENODEV. */ - ret = vga_client_register(pdev, dev_priv, NULL, i915_vga_set_decode); + ret = vga_client_register(pdev, i915, NULL, i915_vga_set_decode); if (ret && ret != -ENODEV) goto out; @@ -365,56 +364,56 @@ static int i915_driver_modeset_probe(struct drm_device *dev) goto cleanup_vga_client; /* must happen before intel_power_domains_init_hw() on VLV/CHV */ - intel_update_rawclk(dev_priv); + intel_update_rawclk(i915); - intel_power_domains_init_hw(dev_priv, false); + intel_power_domains_init_hw(i915, false); - intel_csr_ucode_init(dev_priv); + intel_csr_ucode_init(i915); - ret = intel_irq_install(dev_priv); + ret = intel_irq_install(i915); if (ret) goto cleanup_csr; - intel_gmbus_setup(dev_priv); + intel_gmbus_setup(i915); /* Important: The output setup functions called by modeset_init need * working irqs for e.g. gmbus and dp aux transfers. */ - ret = intel_modeset_init(dev); + ret = intel_modeset_init(&i915->drm); if (ret) goto cleanup_irq; - ret = i915_gem_init(dev_priv); + ret = i915_gem_init(i915); if (ret) goto cleanup_modeset; - intel_overlay_setup(dev_priv); + intel_overlay_setup(i915); - if (!HAS_DISPLAY(dev_priv) || !INTEL_DISPLAY_ENABLED(dev_priv)) + if (!HAS_DISPLAY(i915) || !INTEL_DISPLAY_ENABLED(i915)) return 0; - ret = intel_fbdev_init(dev); + ret = intel_fbdev_init(&i915->drm); if (ret) goto cleanup_gem; /* Only enable hotplug handling once the fbdev is fully set up. */ - intel_hpd_init(dev_priv); + intel_hpd_init(i915); - intel_init_ipc(dev_priv); + intel_init_ipc(i915); return 0; cleanup_gem: - i915_gem_suspend(dev_priv); - i915_gem_driver_remove(dev_priv); - i915_gem_driver_release(dev_priv); + i915_gem_suspend(i915); + i915_gem_driver_remove(i915); + i915_gem_driver_release(i915); cleanup_modeset: - intel_modeset_driver_remove(dev); + intel_modeset_driver_remove(&i915->drm); cleanup_irq: - intel_irq_uninstall(dev_priv); - intel_gmbus_teardown(dev_priv); + intel_irq_uninstall(i915); + intel_gmbus_teardown(i915); cleanup_csr: - intel_csr_ucode_fini(dev_priv); - intel_power_domains_driver_remove(dev_priv); + intel_csr_ucode_fini(i915); + intel_power_domains_driver_remove(i915); vga_switcheroo_unregister_client(pdev); cleanup_vga_client: vga_client_register(pdev, NULL, NULL, NULL); @@ -1607,7 +1606,7 @@ int i915_driver_probe(struct pci_dev *pdev, const struct pci_device_id *ent) if (ret < 0) goto out_cleanup_mmio; - ret = i915_driver_modeset_probe(&dev_priv->drm); + ret = i915_driver_modeset_probe(dev_priv); if (ret < 0) goto out_cleanup_hw; -- cgit v1.2.3 From 9980c3c11060e177e6ccb0012371ade24a9543a4 Mon Sep 17 00:00:00 2001 From: Jani Nikula Date: Fri, 20 Sep 2019 21:54:18 +0300 Subject: drm/i915: pass i915 to intel_modeset_driver_remove() In general, prefer struct drm_i915_private * over struct drm_device * when either will do. Rename the local variable to i915. Also propagate to intel_hpd_poll_fini(). No functional changes. Reviewed-by: Chris Wilson Signed-off-by: Jani Nikula Link: https://patchwork.freedesktop.org/patch/msgid/20190920185421.17822-3-jani.nikula@intel.com --- drivers/gpu/drm/i915/display/intel_display.c | 38 +++++++++++++--------------- drivers/gpu/drm/i915/display/intel_display.h | 2 +- drivers/gpu/drm/i915/i915_drv.c | 4 +-- 3 files changed, 21 insertions(+), 23 deletions(-) diff --git a/drivers/gpu/drm/i915/display/intel_display.c b/drivers/gpu/drm/i915/display/intel_display.c index f5f655cde43a..74da12d3911c 100644 --- a/drivers/gpu/drm/i915/display/intel_display.c +++ b/drivers/gpu/drm/i915/display/intel_display.c @@ -17114,13 +17114,13 @@ void intel_display_resume(struct drm_device *dev) drm_atomic_state_put(state); } -static void intel_hpd_poll_fini(struct drm_device *dev) +static void intel_hpd_poll_fini(struct drm_i915_private *i915) { struct intel_connector *connector; struct drm_connector_list_iter conn_iter; /* Kill all the work that may have been queued by hpd. */ - drm_connector_list_iter_begin(dev, &conn_iter); + drm_connector_list_iter_begin(&i915->drm, &conn_iter); for_each_intel_connector_iter(connector, &conn_iter) { if (connector->modeset_retry_work.func) cancel_work_sync(&connector->modeset_retry_work); @@ -17132,51 +17132,49 @@ static void intel_hpd_poll_fini(struct drm_device *dev) drm_connector_list_iter_end(&conn_iter); } -void intel_modeset_driver_remove(struct drm_device *dev) +void intel_modeset_driver_remove(struct drm_i915_private *i915) { - struct drm_i915_private *dev_priv = to_i915(dev); - - flush_workqueue(dev_priv->flip_wq); - flush_workqueue(dev_priv->modeset_wq); + flush_workqueue(i915->flip_wq); + flush_workqueue(i915->modeset_wq); - flush_work(&dev_priv->atomic_helper.free_work); - WARN_ON(!llist_empty(&dev_priv->atomic_helper.free_list)); + flush_work(&i915->atomic_helper.free_work); + WARN_ON(!llist_empty(&i915->atomic_helper.free_list)); /* * Interrupts and polling as the first thing to avoid creating havoc. * Too much stuff here (turning of connectors, ...) would * experience fancy races otherwise. */ - intel_irq_uninstall(dev_priv); + intel_irq_uninstall(i915); /* * Due to the hpd irq storm handling the hotplug work can re-arm the * poll handlers. Hence disable polling after hpd handling is shut down. */ - intel_hpd_poll_fini(dev); + intel_hpd_poll_fini(i915); /* poll work can call into fbdev, hence clean that up afterwards */ - intel_fbdev_fini(dev_priv); + intel_fbdev_fini(i915); intel_unregister_dsm_handler(); - intel_fbc_global_disable(dev_priv); + intel_fbc_global_disable(i915); /* flush any delayed tasks or pending work */ flush_scheduled_work(); - intel_hdcp_component_fini(dev_priv); + intel_hdcp_component_fini(i915); - drm_mode_config_cleanup(dev); + drm_mode_config_cleanup(&i915->drm); - intel_overlay_cleanup(dev_priv); + intel_overlay_cleanup(i915); - intel_gmbus_teardown(dev_priv); + intel_gmbus_teardown(i915); - destroy_workqueue(dev_priv->flip_wq); - destroy_workqueue(dev_priv->modeset_wq); + destroy_workqueue(i915->flip_wq); + destroy_workqueue(i915->modeset_wq); - intel_fbc_cleanup_cfb(dev_priv); + intel_fbc_cleanup_cfb(i915); } /* diff --git a/drivers/gpu/drm/i915/display/intel_display.h b/drivers/gpu/drm/i915/display/intel_display.h index b1ae0e59c715..933cbe36bb59 100644 --- a/drivers/gpu/drm/i915/display/intel_display.h +++ b/drivers/gpu/drm/i915/display/intel_display.h @@ -578,7 +578,7 @@ void intel_display_print_error_state(struct drm_i915_error_state_buf *e, /* modesetting */ void intel_modeset_init_hw(struct drm_device *dev); int intel_modeset_init(struct drm_device *dev); -void intel_modeset_driver_remove(struct drm_device *dev); +void intel_modeset_driver_remove(struct drm_i915_private *i915); int intel_modeset_vga_set_state(struct drm_i915_private *dev_priv, bool state); void intel_display_resume(struct drm_device *dev); void i915_redisable_vga(struct drm_i915_private *dev_priv); diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c index fbf1b9e1e059..9b1788b4589e 100644 --- a/drivers/gpu/drm/i915/i915_drv.c +++ b/drivers/gpu/drm/i915/i915_drv.c @@ -407,7 +407,7 @@ cleanup_gem: i915_gem_driver_remove(i915); i915_gem_driver_release(i915); cleanup_modeset: - intel_modeset_driver_remove(&i915->drm); + intel_modeset_driver_remove(i915); cleanup_irq: intel_irq_uninstall(i915); intel_gmbus_teardown(i915); @@ -450,7 +450,7 @@ static void i915_driver_modeset_remove(struct drm_i915_private *i915) { struct pci_dev *pdev = i915->drm.pdev; - intel_modeset_driver_remove(&i915->drm); + intel_modeset_driver_remove(i915); intel_bios_driver_remove(i915); -- cgit v1.2.3 From 064bd628fde6665c38d8aa02ff7a0a9ad36db482 Mon Sep 17 00:00:00 2001 From: Jani Nikula Date: Fri, 20 Sep 2019 21:54:19 +0300 Subject: drm/i915: abstract intel_panel_sanitize_ssc() from intel_modeset_init() The code is too specific and detailed to have open in a high level function. Abstract away. As a drive-by improvement switch to using enableddisabled() in logging and git rid of a redundant !!. No functional changes. v2: drop the !! while at it too (Chris) Reviewed-by: Chris Wilson Signed-off-by: Jani Nikula Link: https://patchwork.freedesktop.org/patch/msgid/20190920185421.17822-4-jani.nikula@intel.com --- drivers/gpu/drm/i915/display/intel_display.c | 39 ++++++++++++++++------------ 1 file changed, 22 insertions(+), 17 deletions(-) diff --git a/drivers/gpu/drm/i915/display/intel_display.c b/drivers/gpu/drm/i915/display/intel_display.c index 74da12d3911c..9df7ee5a26bb 100644 --- a/drivers/gpu/drm/i915/display/intel_display.c +++ b/drivers/gpu/drm/i915/display/intel_display.c @@ -7552,6 +7552,27 @@ intel_link_compute_m_n(u16 bits_per_pixel, int nlanes, constant_n); } +static void intel_panel_sanitize_ssc(struct drm_i915_private *dev_priv) +{ + /* + * There may be no VBT; and if the BIOS enabled SSC we can + * just keep using it to avoid unnecessary flicker. Whereas if the + * BIOS isn't using it, don't assume it will work even if the VBT + * indicates as much. + */ + if (HAS_PCH_IBX(dev_priv) || HAS_PCH_CPT(dev_priv)) { + bool bios_lvds_use_ssc = I915_READ(PCH_DREF_CONTROL) & + DREF_SSC1_ENABLE; + + if (dev_priv->vbt.lvds_use_ssc != bios_lvds_use_ssc) { + DRM_DEBUG_KMS("SSC %s by BIOS, overriding VBT which says %s\n", + enableddisabled(bios_lvds_use_ssc), + enableddisabled(dev_priv->vbt.lvds_use_ssc)); + dev_priv->vbt.lvds_use_ssc = bios_lvds_use_ssc; + } + } +} + static inline bool intel_panel_use_ssc(struct drm_i915_private *dev_priv) { if (i915_modparams.panel_use_ssc >= 0) @@ -16203,23 +16224,7 @@ int intel_modeset_init(struct drm_device *dev) intel_init_pm(dev_priv); - /* - * There may be no VBT; and if the BIOS enabled SSC we can - * just keep using it to avoid unnecessary flicker. Whereas if the - * BIOS isn't using it, don't assume it will work even if the VBT - * indicates as much. - */ - if (HAS_PCH_IBX(dev_priv) || HAS_PCH_CPT(dev_priv)) { - bool bios_lvds_use_ssc = !!(I915_READ(PCH_DREF_CONTROL) & - DREF_SSC1_ENABLE); - - if (dev_priv->vbt.lvds_use_ssc != bios_lvds_use_ssc) { - DRM_DEBUG_KMS("SSC %sabled by BIOS, overriding VBT which says %sabled\n", - bios_lvds_use_ssc ? "en" : "dis", - dev_priv->vbt.lvds_use_ssc ? "en" : "dis"); - dev_priv->vbt.lvds_use_ssc = bios_lvds_use_ssc; - } - } + intel_panel_sanitize_ssc(dev_priv); /* * Maximum framebuffer dimensions, chosen to match -- cgit v1.2.3 From e1a3d9895ddcec20cbb7353e77759589cf010275 Mon Sep 17 00:00:00 2001 From: Jani Nikula Date: Fri, 20 Sep 2019 21:54:20 +0300 Subject: drm/i915: abstract intel_mode_config_init() from intel_modeset_init() The i915 specific mode config init code is too specific and detailed to have open in a high level function. Abstract away. No functional changes. v2: nest drm_mode_config_init() in the function too (Chris) Reviewed-by: Chris Wilson Signed-off-by: Jani Nikula Link: https://patchwork.freedesktop.org/patch/msgid/20190920185421.17822-5-jani.nikula@intel.com --- drivers/gpu/drm/i915/display/intel_display.c | 87 +++++++++++++++------------- 1 file changed, 47 insertions(+), 40 deletions(-) diff --git a/drivers/gpu/drm/i915/display/intel_display.c b/drivers/gpu/drm/i915/display/intel_display.c index 9df7ee5a26bb..06946f9ffd27 100644 --- a/drivers/gpu/drm/i915/display/intel_display.c +++ b/drivers/gpu/drm/i915/display/intel_display.c @@ -16187,6 +16187,52 @@ out: return ret; } +static void intel_mode_config_init(struct drm_i915_private *i915) +{ + struct drm_mode_config *mode_config = &i915->drm.mode_config; + + drm_mode_config_init(&i915->drm); + + mode_config->min_width = 0; + mode_config->min_height = 0; + + mode_config->preferred_depth = 24; + mode_config->prefer_shadow = 1; + + mode_config->allow_fb_modifiers = true; + + mode_config->funcs = &intel_mode_funcs; + + /* + * Maximum framebuffer dimensions, chosen to match + * the maximum render engine surface size on gen4+. + */ + if (INTEL_GEN(i915) >= 7) { + mode_config->max_width = 16384; + mode_config->max_height = 16384; + } else if (INTEL_GEN(i915) >= 4) { + mode_config->max_width = 8192; + mode_config->max_height = 8192; + } else if (IS_GEN(i915, 3)) { + mode_config->max_width = 4096; + mode_config->max_height = 4096; + } else { + mode_config->max_width = 2048; + mode_config->max_height = 2048; + } + + if (IS_I845G(i915) || IS_I865G(i915)) { + mode_config->cursor_width = IS_I845G(i915) ? 64 : 512; + mode_config->cursor_height = 1023; + } else if (IS_GEN(i915, 2)) { + mode_config->cursor_width = 64; + mode_config->cursor_height = 64; + } else { + mode_config->cursor_width = 256; + mode_config->cursor_height = 256; + } +} + int intel_modeset_init(struct drm_device *dev) { struct drm_i915_private *dev_priv = to_i915(dev); @@ -16198,22 +16244,12 @@ int intel_modeset_init(struct drm_device *dev) dev_priv->flip_wq = alloc_workqueue("i915_flip", WQ_HIGHPRI | WQ_UNBOUND, WQ_UNBOUND_MAX_ACTIVE); - drm_mode_config_init(dev); + intel_mode_config_init(dev_priv); ret = intel_bw_init(dev_priv); if (ret) return ret; - dev->mode_config.min_width = 0; - dev->mode_config.min_height = 0; - - dev->mode_config.preferred_depth = 24; - dev->mode_config.prefer_shadow = 1; - - dev->mode_config.allow_fb_modifiers = true; - - dev->mode_config.funcs = &intel_mode_funcs; - init_llist_head(&dev_priv->atomic_helper.free_list); INIT_WORK(&dev_priv->atomic_helper.free_work, intel_atomic_helper_free_state_worker); @@ -16226,35 +16262,6 @@ int intel_modeset_init(struct drm_device *dev) intel_panel_sanitize_ssc(dev_priv); - /* - * Maximum framebuffer dimensions, chosen to match - * the maximum render engine surface size on gen4+. - */ - if (INTEL_GEN(dev_priv) >= 7) { - dev->mode_config.max_width = 16384; - dev->mode_config.max_height = 16384; - } else if (INTEL_GEN(dev_priv) >= 4) { - dev->mode_config.max_width = 8192; - dev->mode_config.max_height = 8192; - } else if (IS_GEN(dev_priv, 3)) { - dev->mode_config.max_width = 4096; - dev->mode_config.max_height = 4096; - } else { - dev->mode_config.max_width = 2048; - dev->mode_config.max_height = 2048; - } - - if (IS_I845G(dev_priv) || IS_I865G(dev_priv)) { - dev->mode_config.cursor_width = IS_I845G(dev_priv) ? 64 : 512; - dev->mode_config.cursor_height = 1023; - } else if (IS_GEN(dev_priv, 2)) { - dev->mode_config.cursor_width = 64; - dev->mode_config.cursor_height = 64; - } else { - dev->mode_config.cursor_width = 256; - dev->mode_config.cursor_height = 256; - } - DRM_DEBUG_KMS("%d display pipe%s available.\n", INTEL_NUM_PIPES(dev_priv), INTEL_NUM_PIPES(dev_priv) > 1 ? "s" : ""); -- cgit v1.2.3 From 6cd02e77757a8fb0089053045932455355d2c4fd Mon Sep 17 00:00:00 2001 From: Jani Nikula Date: Fri, 20 Sep 2019 21:54:21 +0300 Subject: drm/i915: pass i915 to intel_modeset_init() and intel_modeset_init_hw() In general, prefer struct drm_i915_private * over struct drm_device * when either will do. Rename the local variables to i915. No functional changes. Reviewed-by: Chris Wilson Signed-off-by: Jani Nikula Link: https://patchwork.freedesktop.org/patch/msgid/20190920185421.17822-6-jani.nikula@intel.com --- drivers/gpu/drm/i915/display/intel_display.c | 69 +++++++++++++--------------- drivers/gpu/drm/i915/display/intel_display.h | 4 +- drivers/gpu/drm/i915/i915_drv.c | 4 +- 3 files changed, 37 insertions(+), 40 deletions(-) diff --git a/drivers/gpu/drm/i915/display/intel_display.c b/drivers/gpu/drm/i915/display/intel_display.c index 06946f9ffd27..5ecf54270181 100644 --- a/drivers/gpu/drm/i915/display/intel_display.c +++ b/drivers/gpu/drm/i915/display/intel_display.c @@ -4360,7 +4360,7 @@ void intel_finish_reset(struct drm_i915_private *dev_priv) * so need a full re-initialization. */ intel_pps_unlock_regs_wa(dev_priv); - intel_modeset_init_hw(dev); + intel_modeset_init_hw(dev_priv); intel_init_clock_gating(dev_priv); spin_lock_irq(&dev_priv->irq_lock); @@ -16021,13 +16021,11 @@ static void i915_disable_vga(struct drm_i915_private *dev_priv) POSTING_READ(vga_reg); } -void intel_modeset_init_hw(struct drm_device *dev) +void intel_modeset_init_hw(struct drm_i915_private *i915) { - struct drm_i915_private *dev_priv = to_i915(dev); - - intel_update_cdclk(dev_priv); - intel_dump_cdclk_state(&dev_priv->cdclk.hw, "Current CDCLK"); - dev_priv->cdclk.logical = dev_priv->cdclk.actual = dev_priv->cdclk.hw; + intel_update_cdclk(i915); + intel_dump_cdclk_state(&i915->cdclk.hw, "Current CDCLK"); + i915->cdclk.logical = i915->cdclk.actual = i915->cdclk.hw; } /* @@ -16233,42 +16231,42 @@ static void intel_mode_config_init(struct drm_i915_private *i915) } } -int intel_modeset_init(struct drm_device *dev) +int intel_modeset_init(struct drm_i915_private *i915) { - struct drm_i915_private *dev_priv = to_i915(dev); + struct drm_device *dev = &i915->drm; enum pipe pipe; struct intel_crtc *crtc; int ret; - dev_priv->modeset_wq = alloc_ordered_workqueue("i915_modeset", 0); - dev_priv->flip_wq = alloc_workqueue("i915_flip", WQ_HIGHPRI | - WQ_UNBOUND, WQ_UNBOUND_MAX_ACTIVE); + i915->modeset_wq = alloc_ordered_workqueue("i915_modeset", 0); + i915->flip_wq = alloc_workqueue("i915_flip", WQ_HIGHPRI | + WQ_UNBOUND, WQ_UNBOUND_MAX_ACTIVE); - intel_mode_config_init(dev_priv); + intel_mode_config_init(i915); - ret = intel_bw_init(dev_priv); + ret = intel_bw_init(i915); if (ret) return ret; - init_llist_head(&dev_priv->atomic_helper.free_list); - INIT_WORK(&dev_priv->atomic_helper.free_work, + init_llist_head(&i915->atomic_helper.free_list); + INIT_WORK(&i915->atomic_helper.free_work, intel_atomic_helper_free_state_worker); - intel_init_quirks(dev_priv); + intel_init_quirks(i915); - intel_fbc_init(dev_priv); + intel_fbc_init(i915); - intel_init_pm(dev_priv); + intel_init_pm(i915); - intel_panel_sanitize_ssc(dev_priv); + intel_panel_sanitize_ssc(i915); DRM_DEBUG_KMS("%d display pipe%s available.\n", - INTEL_NUM_PIPES(dev_priv), - INTEL_NUM_PIPES(dev_priv) > 1 ? "s" : ""); + INTEL_NUM_PIPES(i915), + INTEL_NUM_PIPES(i915) > 1 ? "s" : ""); - if (HAS_DISPLAY(dev_priv) && INTEL_DISPLAY_ENABLED(dev_priv)) { - for_each_pipe(dev_priv, pipe) { - ret = intel_crtc_init(dev_priv, pipe); + if (HAS_DISPLAY(i915) && INTEL_DISPLAY_ENABLED(i915)) { + for_each_pipe(i915, pipe) { + ret = intel_crtc_init(i915, pipe); if (ret) { drm_mode_config_cleanup(dev); return ret; @@ -16277,19 +16275,19 @@ int intel_modeset_init(struct drm_device *dev) } intel_shared_dpll_init(dev); - intel_update_fdi_pll_freq(dev_priv); + intel_update_fdi_pll_freq(i915); - intel_update_czclk(dev_priv); - intel_modeset_init_hw(dev); + intel_update_czclk(i915); + intel_modeset_init_hw(i915); - intel_hdcp_component_init(dev_priv); + intel_hdcp_component_init(i915); - if (dev_priv->max_cdclk_freq == 0) - intel_update_max_cdclk(dev_priv); + if (i915->max_cdclk_freq == 0) + intel_update_max_cdclk(i915); /* Just disable it once at startup */ - i915_disable_vga(dev_priv); - intel_setup_outputs(dev_priv); + i915_disable_vga(i915); + intel_setup_outputs(i915); drm_modeset_lock_all(dev); intel_modeset_setup_hw_state(dev, dev->mode_config.acquire_ctx); @@ -16308,8 +16306,7 @@ int intel_modeset_init(struct drm_device *dev) * can even allow for smooth boot transitions if the BIOS * fb is large enough for the active pipe configuration. */ - dev_priv->display.get_initial_plane_config(crtc, - &plane_config); + i915->display.get_initial_plane_config(crtc, &plane_config); /* * If the fb is shared between multiple heads, we'll @@ -16323,7 +16320,7 @@ int intel_modeset_init(struct drm_device *dev) * Note that we need to do this after reconstructing the BIOS fb's * since the watermark calculation done here will use pstate->fb. */ - if (!HAS_GMCH(dev_priv)) + if (!HAS_GMCH(i915)) sanitize_watermarks(dev); /* diff --git a/drivers/gpu/drm/i915/display/intel_display.h b/drivers/gpu/drm/i915/display/intel_display.h index 933cbe36bb59..5cea6f8e107a 100644 --- a/drivers/gpu/drm/i915/display/intel_display.h +++ b/drivers/gpu/drm/i915/display/intel_display.h @@ -576,8 +576,8 @@ void intel_display_print_error_state(struct drm_i915_error_state_buf *e, struct intel_display_error_state *error); /* modesetting */ -void intel_modeset_init_hw(struct drm_device *dev); -int intel_modeset_init(struct drm_device *dev); +void intel_modeset_init_hw(struct drm_i915_private *i915); +int intel_modeset_init(struct drm_i915_private *i915); void intel_modeset_driver_remove(struct drm_i915_private *i915); int intel_modeset_vga_set_state(struct drm_i915_private *dev_priv, bool state); void intel_display_resume(struct drm_device *dev); diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c index 9b1788b4589e..dd9613e45723 100644 --- a/drivers/gpu/drm/i915/i915_drv.c +++ b/drivers/gpu/drm/i915/i915_drv.c @@ -378,7 +378,7 @@ static int i915_driver_modeset_probe(struct drm_i915_private *i915) /* Important: The output setup functions called by modeset_init need * working irqs for e.g. gmbus and dp aux transfers. */ - ret = intel_modeset_init(&i915->drm); + ret = intel_modeset_init(i915); if (ret) goto cleanup_irq; @@ -1946,7 +1946,7 @@ static int i915_drm_resume(struct drm_device *dev) i915_gem_resume(dev_priv); - intel_modeset_init_hw(dev); + intel_modeset_init_hw(dev_priv); intel_init_clock_gating(dev_priv); spin_lock_irq(&dev_priv->irq_lock); -- cgit v1.2.3 From ae911b23d2f06c5d0a3e32768bedea857cadd269 Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Mon, 23 Sep 2019 12:00:53 +0100 Subject: drm/i915/execlists: Relax assertion for a pinned context image on reset A gpu hang can occur at any time, given a sufficiently angry gpu. An example is when it forgets to perform a context-switch at the end of a request, leaving us with a hanging GPU on a completed request. Here, we may retire the request, only leaving its context alive via the active barrier. When we reset the GPU on a completed request, we do not modify its context image (just updating the ring state) and can safely defer the assertion that we have the image pinned and ready to modify. Bugzilla: https://bugs.freedesktop.org/show_bug.cgi?id=111639 Fixes: dffa8feb3084 ("drm/i915/perf: Assert locking for i915_init_oa_perf_state()") Signed-off-by: Chris Wilson Cc: Mika Kuoppala Reviewed-by: Mika Kuoppala Link: https://patchwork.freedesktop.org/patch/msgid/20190923110056.15176-1-chris@chris-wilson.co.uk --- drivers/gpu/drm/i915/gt/intel_lrc.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/gpu/drm/i915/gt/intel_lrc.c b/drivers/gpu/drm/i915/gt/intel_lrc.c index 1a2b71157f08..80ded99bcb15 100644 --- a/drivers/gpu/drm/i915/gt/intel_lrc.c +++ b/drivers/gpu/drm/i915/gt/intel_lrc.c @@ -2383,7 +2383,6 @@ static void __execlists_reset(struct intel_engine_cs *engine, bool stalled) GEM_BUG_ON(!i915_vma_is_pinned(ce->state)); /* Proclaim we have exclusive access to the context image! */ - GEM_BUG_ON(!intel_context_is_pinned(ce)); mutex_acquire(&ce->pin_mutex.dep_map, 2, 0, _THIS_IP_); rq = active_request(rq); @@ -2432,6 +2431,7 @@ static void __execlists_reset(struct intel_engine_cs *engine, bool stalled) * future request will be after userspace has had the opportunity * to recreate its own state. */ + GEM_BUG_ON(!intel_context_is_pinned(ce)); regs = ce->lrc_reg_state; if (engine->pinned_default_state) { memcpy(regs, /* skip restoring the vanilla PPHWSP */ -- cgit v1.2.3 From 3231f8c01121ee1febfd82398ee22f7ff9dc5d76 Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Mon, 23 Sep 2019 12:00:54 +0100 Subject: drm/i915/execlists: Drop redundant list_del_init(&rq->sched.link) Since amalgamating the queued and active lists in commit 422d7df4f090 ("drm/i915: Replace engine->timeline with a plain list"), performing a i915_request_submit() will remove the request from the execlists priority queue. References: 422d7df4f090 ("drm/i915: Replace engine->timeline with a plain list") Signed-off-by: Chris Wilson Cc: Tvrtko Ursulin Reviewed-by: Tvrtko Ursulin Link: https://patchwork.freedesktop.org/patch/msgid/20190923110056.15176-2-chris@chris-wilson.co.uk --- drivers/gpu/drm/i915/gt/intel_lrc.c | 1 - 1 file changed, 1 deletion(-) diff --git a/drivers/gpu/drm/i915/gt/intel_lrc.c b/drivers/gpu/drm/i915/gt/intel_lrc.c index 80ded99bcb15..0a4812ebd184 100644 --- a/drivers/gpu/drm/i915/gt/intel_lrc.c +++ b/drivers/gpu/drm/i915/gt/intel_lrc.c @@ -2514,7 +2514,6 @@ static void execlists_cancel_requests(struct intel_engine_cs *engine) int i; priolist_for_each_request_consume(rq, rn, p, i) { - list_del_init(&rq->sched.link); __i915_request_submit(rq); dma_fence_set_error(&rq->fence, -EIO); i915_request_mark_complete(rq); -- cgit v1.2.3 From c0bb487dc19fc45dbeede7dcf8f513df51a3cd33 Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Mon, 23 Sep 2019 12:00:55 +0100 Subject: drm/i915: Only enqueue already completed requests If we are asked to submit a completed request, just move it onto the active-list without modifying it's payload. If we try to emit the modified payload of a completed request, we risk racing with the ring->head update during retirement which may advance the head past our breadcrumb and so we generate a warning for the emission being behind the RING_HEAD. v2: Commentary for the sneaky, shared responsibility between functions. v3: Spelling mistakes and bonus assertion Signed-off-by: Chris Wilson Cc: Tvrtko Ursulin Reviewed-by: Tvrtko Ursulin Link: https://patchwork.freedesktop.org/patch/msgid/20190923110056.15176-3-chris@chris-wilson.co.uk --- drivers/gpu/drm/i915/gt/intel_lrc.c | 66 +++++++++++++++++++++++-------------- drivers/gpu/drm/i915/i915_request.c | 44 ++++++++++++++++++------- drivers/gpu/drm/i915/i915_request.h | 2 +- 3 files changed, 74 insertions(+), 38 deletions(-) diff --git a/drivers/gpu/drm/i915/gt/intel_lrc.c b/drivers/gpu/drm/i915/gt/intel_lrc.c index 0a4812ebd184..bc9badd9c046 100644 --- a/drivers/gpu/drm/i915/gt/intel_lrc.c +++ b/drivers/gpu/drm/i915/gt/intel_lrc.c @@ -799,6 +799,17 @@ static bool can_merge_rq(const struct i915_request *prev, GEM_BUG_ON(prev == next); GEM_BUG_ON(!assert_priority_queue(prev, next)); + /* + * We do not submit known completed requests. Therefore if the next + * request is already completed, we can pretend to merge it in + * with the previous context (and we will skip updating the ELSP + * and tracking). Thus hopefully keeping the ELSP full with active + * contexts, despite the best efforts of preempt-to-busy to confuse + * us. + */ + if (i915_request_completed(next)) + return true; + if (!can_merge_ctx(prev->hw_context, next->hw_context)) return false; @@ -1181,21 +1192,6 @@ static void execlists_dequeue(struct intel_engine_cs *engine) continue; } - if (i915_request_completed(rq)) { - ve->request = NULL; - ve->base.execlists.queue_priority_hint = INT_MIN; - rb_erase_cached(rb, &execlists->virtual); - RB_CLEAR_NODE(rb); - - rq->engine = engine; - __i915_request_submit(rq); - - spin_unlock(&ve->base.active.lock); - - rb = rb_first_cached(&execlists->virtual); - continue; - } - if (last && !can_merge_rq(last, rq)) { spin_unlock(&ve->base.active.lock); return; /* leave this for another */ @@ -1249,11 +1245,23 @@ static void execlists_dequeue(struct intel_engine_cs *engine) GEM_BUG_ON(ve->siblings[0] != engine); } - __i915_request_submit(rq); - if (!i915_request_completed(rq)) { + if (__i915_request_submit(rq)) { submit = true; last = rq; } + + /* + * Hmm, we have a bunch of virtual engine requests, + * but the first one was already completed (thanks + * preempt-to-busy!). Keep looking at the veng queue + * until we have no more relevant requests (i.e. + * the normal submit queue has higher priority). + */ + if (!submit) { + spin_unlock(&ve->base.active.lock); + rb = rb_first_cached(&execlists->virtual); + continue; + } } spin_unlock(&ve->base.active.lock); @@ -1266,8 +1274,7 @@ static void execlists_dequeue(struct intel_engine_cs *engine) int i; priolist_for_each_request_consume(rq, rn, p, i) { - if (i915_request_completed(rq)) - goto skip; + bool merge = true; /* * Can we combine this request with the current port? @@ -1308,14 +1315,23 @@ static void execlists_dequeue(struct intel_engine_cs *engine) ctx_single_port_submission(rq->hw_context)) goto done; - *port = execlists_schedule_in(last, port - execlists->pending); - port++; + merge = false; } - last = rq; - submit = true; -skip: - __i915_request_submit(rq); + if (__i915_request_submit(rq)) { + if (!merge) { + *port = execlists_schedule_in(last, port - execlists->pending); + port++; + last = NULL; + } + + GEM_BUG_ON(last && + !can_merge_ctx(last->hw_context, + rq->hw_context)); + + submit = true; + last = rq; + } } rb_erase_cached(&p->node, &execlists->queue); diff --git a/drivers/gpu/drm/i915/i915_request.c b/drivers/gpu/drm/i915/i915_request.c index 9bd8538b1907..a8916412759b 100644 --- a/drivers/gpu/drm/i915/i915_request.c +++ b/drivers/gpu/drm/i915/i915_request.c @@ -377,9 +377,10 @@ __i915_request_await_execution(struct i915_request *rq, return 0; } -void __i915_request_submit(struct i915_request *request) +bool __i915_request_submit(struct i915_request *request) { struct intel_engine_cs *engine = request->engine; + bool result = false; GEM_TRACE("%s fence %llx:%lld, current %d\n", engine->name, @@ -389,6 +390,25 @@ void __i915_request_submit(struct i915_request *request) GEM_BUG_ON(!irqs_disabled()); lockdep_assert_held(&engine->active.lock); + /* + * With the advent of preempt-to-busy, we frequently encounter + * requests that we have unsubmitted from HW, but left running + * until the next ack and so have completed in the meantime. On + * resubmission of that completed request, we can skip + * updating the payload, and execlists can even skip submitting + * the request. + * + * We must remove the request from the caller's priority queue, + * and the caller must only call us when the request is in their + * priority queue, under the active.lock. This ensures that the + * request has *not* yet been retired and we can safely move + * the request into the engine->active.list where it will be + * dropped upon retiring. (Otherwise if resubmit a *retired* + * request, this would be a horrible use-after-free.) + */ + if (i915_request_completed(request)) + goto xfer; + if (i915_gem_context_is_banned(request->gem_context)) i915_request_skip(request, -EIO); @@ -412,13 +432,18 @@ void __i915_request_submit(struct i915_request *request) i915_sw_fence_signaled(&request->semaphore)) engine->saturated |= request->sched.semaphores; - /* We may be recursing from the signal callback of another i915 fence */ - spin_lock_nested(&request->lock, SINGLE_DEPTH_NESTING); + engine->emit_fini_breadcrumb(request, + request->ring->vaddr + request->postfix); + + trace_i915_request_execute(request); + engine->serial++; + result = true; - list_move_tail(&request->sched.link, &engine->active.requests); +xfer: /* We may be recursing from the signal callback of another i915 fence */ + spin_lock_nested(&request->lock, SINGLE_DEPTH_NESTING); - GEM_BUG_ON(test_bit(I915_FENCE_FLAG_ACTIVE, &request->fence.flags)); - set_bit(I915_FENCE_FLAG_ACTIVE, &request->fence.flags); + if (!test_and_set_bit(I915_FENCE_FLAG_ACTIVE, &request->fence.flags)) + list_move_tail(&request->sched.link, &engine->active.requests); if (test_bit(DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT, &request->fence.flags) && !test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &request->fence.flags) && @@ -429,12 +454,7 @@ void __i915_request_submit(struct i915_request *request) spin_unlock(&request->lock); - engine->emit_fini_breadcrumb(request, - request->ring->vaddr + request->postfix); - - engine->serial++; - - trace_i915_request_execute(request); + return result; } void i915_request_submit(struct i915_request *request) diff --git a/drivers/gpu/drm/i915/i915_request.h b/drivers/gpu/drm/i915/i915_request.h index b18f49528ded..ec5bb4c2e5ae 100644 --- a/drivers/gpu/drm/i915/i915_request.h +++ b/drivers/gpu/drm/i915/i915_request.h @@ -292,7 +292,7 @@ int i915_request_await_execution(struct i915_request *rq, void i915_request_add(struct i915_request *rq); -void __i915_request_submit(struct i915_request *request); +bool __i915_request_submit(struct i915_request *request); void i915_request_submit(struct i915_request *request); void i915_request_skip(struct i915_request *request, int error); -- cgit v1.2.3 From 0d7cf7bc15e75bf79f2f65d61d19f896609f816a Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Mon, 23 Sep 2019 12:00:56 +0100 Subject: drm/i915/execlists: Refactor -EIO markup of hung requests Pull setting -EIO on the hung requests into its own utility function. Having allowed ourselves to short-circuit submission of completed requests, we can now do the mark_eio() prior to submission and avoid some redundant operations. Signed-off-by: Chris Wilson Reviewed-by: Tvrtko Ursulin Link: https://patchwork.freedesktop.org/patch/msgid/20190923110056.15176-4-chris@chris-wilson.co.uk --- drivers/gpu/drm/i915/gt/intel_lrc.c | 31 +++++++++++++++++-------------- 1 file changed, 17 insertions(+), 14 deletions(-) diff --git a/drivers/gpu/drm/i915/gt/intel_lrc.c b/drivers/gpu/drm/i915/gt/intel_lrc.c index bc9badd9c046..bff4c6a735cf 100644 --- a/drivers/gpu/drm/i915/gt/intel_lrc.c +++ b/drivers/gpu/drm/i915/gt/intel_lrc.c @@ -234,6 +234,13 @@ static void execlists_init_reg_state(u32 *reg_state, struct intel_engine_cs *engine, struct intel_ring *ring); +static void mark_eio(struct i915_request *rq) +{ + if (!i915_request_signaled(rq)) + dma_fence_set_error(&rq->fence, -EIO); + i915_request_mark_complete(rq); +} + static inline u32 intel_hws_preempt_address(struct intel_engine_cs *engine) { return (i915_ggtt_offset(engine->status_page.vma) + @@ -2517,12 +2524,8 @@ static void execlists_cancel_requests(struct intel_engine_cs *engine) __execlists_reset(engine, true); /* Mark all executing requests as skipped. */ - list_for_each_entry(rq, &engine->active.requests, sched.link) { - if (!i915_request_signaled(rq)) - dma_fence_set_error(&rq->fence, -EIO); - - i915_request_mark_complete(rq); - } + list_for_each_entry(rq, &engine->active.requests, sched.link) + mark_eio(rq); /* Flush the queued requests to the timeline list (for retiring). */ while ((rb = rb_first_cached(&execlists->queue))) { @@ -2530,9 +2533,8 @@ static void execlists_cancel_requests(struct intel_engine_cs *engine) int i; priolist_for_each_request_consume(rq, rn, p, i) { + mark_eio(rq); __i915_request_submit(rq); - dma_fence_set_error(&rq->fence, -EIO); - i915_request_mark_complete(rq); } rb_erase_cached(&p->node, &execlists->queue); @@ -2548,13 +2550,14 @@ static void execlists_cancel_requests(struct intel_engine_cs *engine) RB_CLEAR_NODE(rb); spin_lock(&ve->base.active.lock); - if (ve->request) { - ve->request->engine = engine; - __i915_request_submit(ve->request); - dma_fence_set_error(&ve->request->fence, -EIO); - i915_request_mark_complete(ve->request); + rq = fetch_and_zero(&ve->request); + if (rq) { + mark_eio(rq); + + rq->engine = engine; + __i915_request_submit(rq); + ve->base.execlists.queue_priority_hint = INT_MIN; - ve->request = NULL; } spin_unlock(&ve->base.active.lock); } -- cgit v1.2.3 From dd8882a255388ba66175098b1560d4f81c100d30 Mon Sep 17 00:00:00 2001 From: Tony Lindgren Date: Mon, 23 Sep 2019 10:32:37 -0700 Subject: clk: ti: dra7: Fix mcasp8 clock bits There's a typo for dra7 mcasp clkctrl bit, it should be 22 like the other macasp instances, and not 24. And in dra7xx_clks[] we have the bits wrong way around. Fixes: dffa9051d546 ("clk: ti: dra7: add new clkctrl data") Cc: linux-clk@vger.kernel.org Cc: Michael Turquette Cc: Stephen Boyd Cc: Suman Anna Cc: Tero Kristo Acked-by: Stephen Boyd Signed-off-by: Tony Lindgren --- drivers/clk/ti/clk-7xx.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/drivers/clk/ti/clk-7xx.c b/drivers/clk/ti/clk-7xx.c index b57fe09b428b..9dd6185a4b4e 100644 --- a/drivers/clk/ti/clk-7xx.c +++ b/drivers/clk/ti/clk-7xx.c @@ -683,7 +683,7 @@ static const struct omap_clkctrl_reg_data dra7_l4per2_clkctrl_regs[] __initconst { DRA7_L4PER2_MCASP2_CLKCTRL, dra7_mcasp2_bit_data, CLKF_SW_SUP, "l4per2-clkctrl:0154:22" }, { DRA7_L4PER2_MCASP3_CLKCTRL, dra7_mcasp3_bit_data, CLKF_SW_SUP, "l4per2-clkctrl:015c:22" }, { DRA7_L4PER2_MCASP5_CLKCTRL, dra7_mcasp5_bit_data, CLKF_SW_SUP, "l4per2-clkctrl:016c:22" }, - { DRA7_L4PER2_MCASP8_CLKCTRL, dra7_mcasp8_bit_data, CLKF_SW_SUP, "l4per2-clkctrl:0184:24" }, + { DRA7_L4PER2_MCASP8_CLKCTRL, dra7_mcasp8_bit_data, CLKF_SW_SUP, "l4per2-clkctrl:0184:22" }, { DRA7_L4PER2_MCASP4_CLKCTRL, dra7_mcasp4_bit_data, CLKF_SW_SUP, "l4per2-clkctrl:018c:22" }, { DRA7_L4PER2_UART7_CLKCTRL, dra7_uart7_bit_data, CLKF_SW_SUP, "l4per2-clkctrl:01c4:24" }, { DRA7_L4PER2_UART8_CLKCTRL, dra7_uart8_bit_data, CLKF_SW_SUP, "l4per2-clkctrl:01d4:24" }, @@ -828,8 +828,8 @@ static struct ti_dt_clk dra7xx_clks[] = { DT_CLK(NULL, "mcasp6_aux_gfclk_mux", "l4per2-clkctrl:01f8:22"), DT_CLK(NULL, "mcasp7_ahclkx_mux", "l4per2-clkctrl:01fc:24"), DT_CLK(NULL, "mcasp7_aux_gfclk_mux", "l4per2-clkctrl:01fc:22"), - DT_CLK(NULL, "mcasp8_ahclkx_mux", "l4per2-clkctrl:0184:22"), - DT_CLK(NULL, "mcasp8_aux_gfclk_mux", "l4per2-clkctrl:0184:24"), + DT_CLK(NULL, "mcasp8_ahclkx_mux", "l4per2-clkctrl:0184:24"), + DT_CLK(NULL, "mcasp8_aux_gfclk_mux", "l4per2-clkctrl:0184:22"), DT_CLK(NULL, "mmc1_clk32k", "l3init-clkctrl:0008:8"), DT_CLK(NULL, "mmc1_fclk_div", "l3init-clkctrl:0008:25"), DT_CLK(NULL, "mmc1_fclk_mux", "l3init-clkctrl:0008:24"), -- cgit v1.2.3 From 2d3c8ba3cffa00f76bedb713c8c2126c82d8cd13 Mon Sep 17 00:00:00 2001 From: Tony Lindgren Date: Mon, 23 Sep 2019 10:32:38 -0700 Subject: ARM: dts: Fix wrong clocks for dra7 mcasp The ahclkr clkctrl clock bit 28 only exists for mcasp 1 and 2 on dra7. This causes the following warning on beagle-x15: ti-sysc 48468000.target-module: could not add child clock ahclkr: -19 Also the mcasp clkctrl clock bits are wrong: For mcasp1 and 2 we have four clocks at bits 28, 24, 22 and 0: bit 28 is ahclkr bit 24 is ahclkx bit 22 is auxclk bit 0 is fck For mcasp3 to 8 we have three clocks at bits 24, 22 and 0. bit 24 is ahclkx bit 22 is auxclk bit 0 is fck We do not have currently mapped auxclk at bit 22 for the drivers, that can be added if needed. Fixes: 5241ccbf2819 ("ARM: dts: Add missing ranges for dra7 mcasp l3 ports") Cc: Suman Anna Cc: Tero Kristo Signed-off-by: Tony Lindgren --- arch/arm/boot/dts/dra7-l4.dtsi | 48 ++++++++++++++++++------------------------ 1 file changed, 21 insertions(+), 27 deletions(-) diff --git a/arch/arm/boot/dts/dra7-l4.dtsi b/arch/arm/boot/dts/dra7-l4.dtsi index 21e5914fdd62..099d6fe2a57a 100644 --- a/arch/arm/boot/dts/dra7-l4.dtsi +++ b/arch/arm/boot/dts/dra7-l4.dtsi @@ -2762,7 +2762,7 @@ interrupt-names = "tx", "rx"; dmas = <&edma_xbar 129 1>, <&edma_xbar 128 1>; dma-names = "tx", "rx"; - clocks = <&ipu_clkctrl DRA7_IPU_MCASP1_CLKCTRL 22>, + clocks = <&ipu_clkctrl DRA7_IPU_MCASP1_CLKCTRL 0>, <&ipu_clkctrl DRA7_IPU_MCASP1_CLKCTRL 24>, <&ipu_clkctrl DRA7_IPU_MCASP1_CLKCTRL 28>; clock-names = "fck", "ahclkx", "ahclkr"; @@ -2799,8 +2799,8 @@ interrupt-names = "tx", "rx"; dmas = <&edma_xbar 131 1>, <&edma_xbar 130 1>; dma-names = "tx", "rx"; - clocks = <&l4per2_clkctrl DRA7_L4PER2_MCASP2_CLKCTRL 22>, - <&l4per2_clkctrl DRA7_L4PER2_MCASP2_CLKCTRL 24>, + clocks = <&l4per2_clkctrl DRA7_L4PER2_MCASP2_CLKCTRL 0>, + <&ipu_clkctrl DRA7_IPU_MCASP1_CLKCTRL 24>, <&l4per2_clkctrl DRA7_L4PER2_MCASP2_CLKCTRL 28>; clock-names = "fck", "ahclkx", "ahclkr"; status = "disabled"; @@ -2818,9 +2818,8 @@ ; /* Domains (P, C): l4per_pwrdm, l4per2_clkdm */ clocks = <&l4per2_clkctrl DRA7_L4PER2_MCASP3_CLKCTRL 0>, - <&l4per2_clkctrl DRA7_L4PER2_MCASP3_CLKCTRL 24>, - <&l4per2_clkctrl DRA7_L4PER2_MCASP3_CLKCTRL 28>; - clock-names = "fck", "ahclkx", "ahclkr"; + <&l4per2_clkctrl DRA7_L4PER2_MCASP3_CLKCTRL 24>; + clock-names = "fck", "ahclkx"; #address-cells = <1>; #size-cells = <1>; ranges = <0x0 0x68000 0x2000>, @@ -2836,7 +2835,7 @@ interrupt-names = "tx", "rx"; dmas = <&edma_xbar 133 1>, <&edma_xbar 132 1>; dma-names = "tx", "rx"; - clocks = <&l4per2_clkctrl DRA7_L4PER2_MCASP3_CLKCTRL 22>, + clocks = <&l4per2_clkctrl DRA7_L4PER2_MCASP3_CLKCTRL 0>, <&l4per2_clkctrl DRA7_L4PER2_MCASP3_CLKCTRL 24>; clock-names = "fck", "ahclkx"; status = "disabled"; @@ -2854,9 +2853,8 @@ ; /* Domains (P, C): l4per_pwrdm, l4per2_clkdm */ clocks = <&l4per2_clkctrl DRA7_L4PER2_MCASP4_CLKCTRL 0>, - <&l4per2_clkctrl DRA7_L4PER2_MCASP4_CLKCTRL 24>, - <&l4per2_clkctrl DRA7_L4PER2_MCASP4_CLKCTRL 28>; - clock-names = "fck", "ahclkx", "ahclkr"; + <&l4per2_clkctrl DRA7_L4PER2_MCASP4_CLKCTRL 24>; + clock-names = "fck", "ahclkx"; #address-cells = <1>; #size-cells = <1>; ranges = <0x0 0x6c000 0x2000>, @@ -2872,7 +2870,7 @@ interrupt-names = "tx", "rx"; dmas = <&edma_xbar 135 1>, <&edma_xbar 134 1>; dma-names = "tx", "rx"; - clocks = <&l4per2_clkctrl DRA7_L4PER2_MCASP4_CLKCTRL 22>, + clocks = <&l4per2_clkctrl DRA7_L4PER2_MCASP4_CLKCTRL 0>, <&l4per2_clkctrl DRA7_L4PER2_MCASP4_CLKCTRL 24>; clock-names = "fck", "ahclkx"; status = "disabled"; @@ -2890,9 +2888,8 @@ ; /* Domains (P, C): l4per_pwrdm, l4per2_clkdm */ clocks = <&l4per2_clkctrl DRA7_L4PER2_MCASP5_CLKCTRL 0>, - <&l4per2_clkctrl DRA7_L4PER2_MCASP5_CLKCTRL 24>, - <&l4per2_clkctrl DRA7_L4PER2_MCASP5_CLKCTRL 28>; - clock-names = "fck", "ahclkx", "ahclkr"; + <&l4per2_clkctrl DRA7_L4PER2_MCASP5_CLKCTRL 24>; + clock-names = "fck", "ahclkx"; #address-cells = <1>; #size-cells = <1>; ranges = <0x0 0x70000 0x2000>, @@ -2908,7 +2905,7 @@ interrupt-names = "tx", "rx"; dmas = <&edma_xbar 137 1>, <&edma_xbar 136 1>; dma-names = "tx", "rx"; - clocks = <&l4per2_clkctrl DRA7_L4PER2_MCASP5_CLKCTRL 22>, + clocks = <&l4per2_clkctrl DRA7_L4PER2_MCASP5_CLKCTRL 0>, <&l4per2_clkctrl DRA7_L4PER2_MCASP5_CLKCTRL 24>; clock-names = "fck", "ahclkx"; status = "disabled"; @@ -2926,9 +2923,8 @@ ; /* Domains (P, C): l4per_pwrdm, l4per2_clkdm */ clocks = <&l4per2_clkctrl DRA7_L4PER2_MCASP6_CLKCTRL 0>, - <&l4per2_clkctrl DRA7_L4PER2_MCASP6_CLKCTRL 24>, - <&l4per2_clkctrl DRA7_L4PER2_MCASP6_CLKCTRL 28>; - clock-names = "fck", "ahclkx", "ahclkr"; + <&l4per2_clkctrl DRA7_L4PER2_MCASP6_CLKCTRL 24>; + clock-names = "fck", "ahclkx"; #address-cells = <1>; #size-cells = <1>; ranges = <0x0 0x74000 0x2000>, @@ -2944,7 +2940,7 @@ interrupt-names = "tx", "rx"; dmas = <&edma_xbar 139 1>, <&edma_xbar 138 1>; dma-names = "tx", "rx"; - clocks = <&l4per2_clkctrl DRA7_L4PER2_MCASP6_CLKCTRL 22>, + clocks = <&l4per2_clkctrl DRA7_L4PER2_MCASP6_CLKCTRL 0>, <&l4per2_clkctrl DRA7_L4PER2_MCASP6_CLKCTRL 24>; clock-names = "fck", "ahclkx"; status = "disabled"; @@ -2962,9 +2958,8 @@ ; /* Domains (P, C): l4per_pwrdm, l4per2_clkdm */ clocks = <&l4per2_clkctrl DRA7_L4PER2_MCASP7_CLKCTRL 0>, - <&l4per2_clkctrl DRA7_L4PER2_MCASP7_CLKCTRL 24>, - <&l4per2_clkctrl DRA7_L4PER2_MCASP7_CLKCTRL 28>; - clock-names = "fck", "ahclkx", "ahclkr"; + <&l4per2_clkctrl DRA7_L4PER2_MCASP7_CLKCTRL 24>; + clock-names = "fck", "ahclkx"; #address-cells = <1>; #size-cells = <1>; ranges = <0x0 0x78000 0x2000>, @@ -2980,7 +2975,7 @@ interrupt-names = "tx", "rx"; dmas = <&edma_xbar 141 1>, <&edma_xbar 140 1>; dma-names = "tx", "rx"; - clocks = <&l4per2_clkctrl DRA7_L4PER2_MCASP7_CLKCTRL 22>, + clocks = <&l4per2_clkctrl DRA7_L4PER2_MCASP7_CLKCTRL 0>, <&l4per2_clkctrl DRA7_L4PER2_MCASP7_CLKCTRL 24>; clock-names = "fck", "ahclkx"; status = "disabled"; @@ -2998,9 +2993,8 @@ ; /* Domains (P, C): l4per_pwrdm, l4per2_clkdm */ clocks = <&l4per2_clkctrl DRA7_L4PER2_MCASP8_CLKCTRL 0>, - <&l4per2_clkctrl DRA7_L4PER2_MCASP8_CLKCTRL 24>, - <&l4per2_clkctrl DRA7_L4PER2_MCASP8_CLKCTRL 28>; - clock-names = "fck", "ahclkx", "ahclkr"; + <&l4per2_clkctrl DRA7_L4PER2_MCASP8_CLKCTRL 24>; + clock-names = "fck", "ahclkx"; #address-cells = <1>; #size-cells = <1>; ranges = <0x0 0x7c000 0x2000>, @@ -3016,7 +3010,7 @@ interrupt-names = "tx", "rx"; dmas = <&edma_xbar 143 1>, <&edma_xbar 142 1>; dma-names = "tx", "rx"; - clocks = <&l4per2_clkctrl DRA7_L4PER2_MCASP8_CLKCTRL 22>, + clocks = <&l4per2_clkctrl DRA7_L4PER2_MCASP8_CLKCTRL 0>, <&l4per2_clkctrl DRA7_L4PER2_MCASP8_CLKCTRL 24>; clock-names = "fck", "ahclkx"; status = "disabled"; -- cgit v1.2.3 From f1f028ff89cb0d37db299d48e7b2ce19be040d52 Mon Sep 17 00:00:00 2001 From: "H. Nikolaus Schaller" Date: Fri, 20 Sep 2019 18:11:15 +0200 Subject: DTS: ARM: gta04: introduce legacy spi-cs-high to make display work again commit 6953c57ab172 "gpio: of: Handle SPI chipselect legacy bindings" did introduce logic to centrally handle the legacy spi-cs-high property in combination with cs-gpios. This assumes that the polarity of the CS has to be inverted if spi-cs-high is missing, even and especially if non-legacy GPIO_ACTIVE_HIGH is specified. The DTS for the GTA04 was orginally introduced under the assumption that there is no need for spi-cs-high if the gpio is defined with proper polarity GPIO_ACTIVE_HIGH. This was not a problem until gpiolib changed the interpretation of GPIO_ACTIVE_HIGH and missing spi-cs-high. The effect is that the missing spi-cs-high is now interpreted as CS being low (despite GPIO_ACTIVE_HIGH) which turns off the SPI interface when the panel is to be programmed by the panel driver. Therefore, we have to add the redundant and legacy spi-cs-high property to properly activate CS. Cc: stable@vger.kernel.org Signed-off-by: H. Nikolaus Schaller Signed-off-by: Tony Lindgren --- arch/arm/boot/dts/omap3-gta04.dtsi | 1 + 1 file changed, 1 insertion(+) diff --git a/arch/arm/boot/dts/omap3-gta04.dtsi b/arch/arm/boot/dts/omap3-gta04.dtsi index b295f6fad2a5..954c216140ad 100644 --- a/arch/arm/boot/dts/omap3-gta04.dtsi +++ b/arch/arm/boot/dts/omap3-gta04.dtsi @@ -120,6 +120,7 @@ spi-max-frequency = <100000>; spi-cpol; spi-cpha; + spi-cs-high; backlight= <&backlight>; label = "lcd"; -- cgit v1.2.3 From 6171e58b1ff5c41632d8fb87f0e6bd003ed34c13 Mon Sep 17 00:00:00 2001 From: Clinton A Taylor Date: Fri, 20 Sep 2019 13:58:05 -0700 Subject: drm/i915/tgl: Add missing ddi clock select during DP init sequence MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Step 4.b was complete missed because it is only required to TC and TBT. Bspec: 49190 Reviewed-by: Imre Deak Reviewed-by: Lucas De Marchi Signed-off-by: Clinton A Taylor Signed-off-by: José Roberto de Souza Link: https://patchwork.freedesktop.org/patch/msgid/20190920205810.211048-2-jose.souza@intel.com --- drivers/gpu/drm/i915/display/intel_ddi.c | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/drivers/gpu/drm/i915/display/intel_ddi.c b/drivers/gpu/drm/i915/display/intel_ddi.c index 0c0da9f6c2e8..dfd6b064cbc3 100644 --- a/drivers/gpu/drm/i915/display/intel_ddi.c +++ b/drivers/gpu/drm/i915/display/intel_ddi.c @@ -3230,11 +3230,14 @@ static void tgl_ddi_pre_enable_dp(struct intel_encoder *encoder, intel_edp_panel_on(intel_dp); /* - * 1.b, 3. and 4. is done before tgl_ddi_pre_enable_dp() by: + * 1.b, 3. and 4.a is done before tgl_ddi_pre_enable_dp() by: * haswell_crtc_enable()->intel_encoders_pre_pll_enable() and * haswell_crtc_enable()->intel_enable_shared_dpll() */ + /* 4.b */ + intel_ddi_clk_select(encoder, crtc_state); + /* 5. */ if (!intel_phy_is_tc(dev_priv, phy) || dig_port->tc_mode != TC_PORT_TBT_ALT) -- cgit v1.2.3 From 31d9ae9d734226b2ce3a1bc2ce1ed29a6997d556 Mon Sep 17 00:00:00 2001 From: José Roberto de Souza Date: Fri, 20 Sep 2019 13:58:06 -0700 Subject: drm/i915/tgl: Finish modular FIA support on registers MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit If platform supports and has modular FIA is enabled, the registers bits also change, example: reading TC3 registers with modular FIA enabled, driver should read from FIA2 but with TC1 bits offsets. It is described in BSpec 50231 for DFLEXDPSP, other registers don't have the BSpec description but testing in real hardware have proven that it had moved for all other registers too. v2: - Caching index in tc_phy_fia_idx, instead of calculate it each time v3: - Setting tc_phy_fia and tc_phy_fia_idx in the same function Cc: Lucas De Marchi Reviewed-by: Lucas De Marchi Signed-off-by: José Roberto de Souza Link: https://patchwork.freedesktop.org/patch/msgid/20190920205810.211048-3-jose.souza@intel.com --- drivers/gpu/drm/i915/display/intel_display_types.h | 1 + drivers/gpu/drm/i915/display/intel_tc.c | 72 ++++++++++++---------- drivers/gpu/drm/i915/i915_reg.h | 28 ++++----- 3 files changed, 53 insertions(+), 48 deletions(-) diff --git a/drivers/gpu/drm/i915/display/intel_display_types.h b/drivers/gpu/drm/i915/display/intel_display_types.h index 49c902b00484..6b0a646f0170 100644 --- a/drivers/gpu/drm/i915/display/intel_display_types.h +++ b/drivers/gpu/drm/i915/display/intel_display_types.h @@ -1288,6 +1288,7 @@ struct intel_digital_port { char tc_port_name[8]; enum tc_port_mode tc_mode; enum phy_fia tc_phy_fia; + u8 tc_phy_fia_idx; void (*write_infoframe)(struct intel_encoder *encoder, const struct intel_crtc_state *crtc_state, diff --git a/drivers/gpu/drm/i915/display/intel_tc.c b/drivers/gpu/drm/i915/display/intel_tc.c index 85743a43bee2..f923f9cbd33c 100644 --- a/drivers/gpu/drm/i915/display/intel_tc.c +++ b/drivers/gpu/drm/i915/display/intel_tc.c @@ -23,32 +23,38 @@ static const char *tc_port_mode_name(enum tc_port_mode mode) return names[mode]; } -static bool has_modular_fia(struct drm_i915_private *i915) -{ - if (!INTEL_INFO(i915)->display.has_modular_fia) - return false; - - return intel_uncore_read(&i915->uncore, - PORT_TX_DFLEXDPSP(FIA1)) & MODULAR_FIA_MASK; -} - -static enum phy_fia tc_port_to_fia(struct drm_i915_private *i915, - enum tc_port tc_port) +static void +tc_port_load_fia_params(struct drm_i915_private *i915, + struct intel_digital_port *dig_port) { - if (!has_modular_fia(i915)) - return FIA1; + enum port port = dig_port->base.port; + enum tc_port tc_port = intel_port_to_tc(i915, port); + u32 modular_fia; + + if (INTEL_INFO(i915)->display.has_modular_fia) { + modular_fia = intel_uncore_read(&i915->uncore, + PORT_TX_DFLEXDPSP(FIA1)); + modular_fia &= MODULAR_FIA_MASK; + } else { + modular_fia = 0; + } /* * Each Modular FIA instance houses 2 TC ports. In SOC that has more * than two TC ports, there are multiple instances of Modular FIA. */ - return tc_port / 2; + if (modular_fia) { + dig_port->tc_phy_fia = tc_port / 2; + dig_port->tc_phy_fia_idx = tc_port % 2; + } else { + dig_port->tc_phy_fia = FIA1; + dig_port->tc_phy_fia_idx = tc_port; + } } u32 intel_tc_port_get_lane_mask(struct intel_digital_port *dig_port) { struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev); - enum tc_port tc_port = intel_port_to_tc(i915, dig_port->base.port); struct intel_uncore *uncore = &i915->uncore; u32 lane_mask; @@ -57,8 +63,8 @@ u32 intel_tc_port_get_lane_mask(struct intel_digital_port *dig_port) WARN_ON(lane_mask == 0xffffffff); - return (lane_mask & DP_LANE_ASSIGNMENT_MASK(tc_port)) >> - DP_LANE_ASSIGNMENT_SHIFT(tc_port); + lane_mask &= DP_LANE_ASSIGNMENT_MASK(dig_port->tc_phy_fia_idx); + return lane_mask >> DP_LANE_ASSIGNMENT_SHIFT(dig_port->tc_phy_fia_idx); } int intel_tc_port_fia_max_lane_count(struct intel_digital_port *dig_port) @@ -95,7 +101,6 @@ void intel_tc_port_set_fia_lane_count(struct intel_digital_port *dig_port, int required_lanes) { struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev); - enum tc_port tc_port = intel_port_to_tc(i915, dig_port->base.port); bool lane_reversal = dig_port->saved_port_bits & DDI_BUF_PORT_REVERSAL; struct intel_uncore *uncore = &i915->uncore; u32 val; @@ -104,19 +109,21 @@ void intel_tc_port_set_fia_lane_count(struct intel_digital_port *dig_port, val = intel_uncore_read(uncore, PORT_TX_DFLEXDPMLE1(dig_port->tc_phy_fia)); - val &= ~DFLEXDPMLE1_DPMLETC_MASK(tc_port); + val &= ~DFLEXDPMLE1_DPMLETC_MASK(dig_port->tc_phy_fia_idx); switch (required_lanes) { case 1: - val |= lane_reversal ? DFLEXDPMLE1_DPMLETC_ML3(tc_port) : - DFLEXDPMLE1_DPMLETC_ML0(tc_port); + val |= lane_reversal ? + DFLEXDPMLE1_DPMLETC_ML3(dig_port->tc_phy_fia_idx) : + DFLEXDPMLE1_DPMLETC_ML0(dig_port->tc_phy_fia_idx); break; case 2: - val |= lane_reversal ? DFLEXDPMLE1_DPMLETC_ML3_2(tc_port) : - DFLEXDPMLE1_DPMLETC_ML1_0(tc_port); + val |= lane_reversal ? + DFLEXDPMLE1_DPMLETC_ML3_2(dig_port->tc_phy_fia_idx) : + DFLEXDPMLE1_DPMLETC_ML1_0(dig_port->tc_phy_fia_idx); break; case 4: - val |= DFLEXDPMLE1_DPMLETC_ML3_0(tc_port); + val |= DFLEXDPMLE1_DPMLETC_ML3_0(dig_port->tc_phy_fia_idx); break; default: MISSING_CASE(required_lanes); @@ -164,9 +171,9 @@ static u32 tc_port_live_status_mask(struct intel_digital_port *dig_port) return mask; } - if (val & TC_LIVE_STATE_TBT(tc_port)) + if (val & TC_LIVE_STATE_TBT(dig_port->tc_phy_fia_idx)) mask |= BIT(TC_PORT_TBT_ALT); - if (val & TC_LIVE_STATE_TC(tc_port)) + if (val & TC_LIVE_STATE_TC(dig_port->tc_phy_fia_idx)) mask |= BIT(TC_PORT_DP_ALT); if (intel_uncore_read(uncore, SDEISR) & SDE_TC_HOTPLUG_ICP(tc_port)) @@ -182,7 +189,6 @@ static u32 tc_port_live_status_mask(struct intel_digital_port *dig_port) static bool icl_tc_phy_status_complete(struct intel_digital_port *dig_port) { struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev); - enum tc_port tc_port = intel_port_to_tc(i915, dig_port->base.port); struct intel_uncore *uncore = &i915->uncore; u32 val; @@ -194,14 +200,13 @@ static bool icl_tc_phy_status_complete(struct intel_digital_port *dig_port) return false; } - return val & DP_PHY_MODE_STATUS_COMPLETED(tc_port); + return val & DP_PHY_MODE_STATUS_COMPLETED(dig_port->tc_phy_fia_idx); } static bool icl_tc_phy_set_safe_mode(struct intel_digital_port *dig_port, bool enable) { struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev); - enum tc_port tc_port = intel_port_to_tc(i915, dig_port->base.port); struct intel_uncore *uncore = &i915->uncore; u32 val; @@ -215,9 +220,9 @@ static bool icl_tc_phy_set_safe_mode(struct intel_digital_port *dig_port, return false; } - val &= ~DP_PHY_MODE_STATUS_NOT_SAFE(tc_port); + val &= ~DP_PHY_MODE_STATUS_NOT_SAFE(dig_port->tc_phy_fia_idx); if (!enable) - val |= DP_PHY_MODE_STATUS_NOT_SAFE(tc_port); + val |= DP_PHY_MODE_STATUS_NOT_SAFE(dig_port->tc_phy_fia_idx); intel_uncore_write(uncore, PORT_TX_DFLEXDPCSSS(dig_port->tc_phy_fia), val); @@ -232,7 +237,6 @@ static bool icl_tc_phy_set_safe_mode(struct intel_digital_port *dig_port, static bool icl_tc_phy_is_in_safe_mode(struct intel_digital_port *dig_port) { struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev); - enum tc_port tc_port = intel_port_to_tc(i915, dig_port->base.port); struct intel_uncore *uncore = &i915->uncore; u32 val; @@ -244,7 +248,7 @@ static bool icl_tc_phy_is_in_safe_mode(struct intel_digital_port *dig_port) return true; } - return !(val & DP_PHY_MODE_STATUS_NOT_SAFE(tc_port)); + return !(val & DP_PHY_MODE_STATUS_NOT_SAFE(dig_port->tc_phy_fia_idx)); } /* @@ -540,5 +544,5 @@ void intel_tc_port_init(struct intel_digital_port *dig_port, bool is_legacy) mutex_init(&dig_port->tc_lock); dig_port->tc_legacy_port = is_legacy; dig_port->tc_link_refcount = 0; - dig_port->tc_phy_fia = tc_port_to_fia(i915, tc_port); + tc_port_load_fia_params(i915, dig_port); } diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h index 6ecb64c042ef..4236cb5d12df 100644 --- a/drivers/gpu/drm/i915/i915_reg.h +++ b/drivers/gpu/drm/i915/i915_reg.h @@ -2166,13 +2166,13 @@ static inline bool i915_mmio_reg_valid(i915_reg_t reg) #define _MMIO_FIA(fia, off) _MMIO(_FIA(fia) + (off)) /* ICL PHY DFLEX registers */ -#define PORT_TX_DFLEXDPMLE1(fia) _MMIO_FIA((fia), 0x008C0) -#define DFLEXDPMLE1_DPMLETC_MASK(tc_port) (0xf << (4 * (tc_port))) -#define DFLEXDPMLE1_DPMLETC_ML0(tc_port) (1 << (4 * (tc_port))) -#define DFLEXDPMLE1_DPMLETC_ML1_0(tc_port) (3 << (4 * (tc_port))) -#define DFLEXDPMLE1_DPMLETC_ML3(tc_port) (8 << (4 * (tc_port))) -#define DFLEXDPMLE1_DPMLETC_ML3_2(tc_port) (12 << (4 * (tc_port))) -#define DFLEXDPMLE1_DPMLETC_ML3_0(tc_port) (15 << (4 * (tc_port))) +#define PORT_TX_DFLEXDPMLE1(fia) _MMIO_FIA((fia), 0x008C0) +#define DFLEXDPMLE1_DPMLETC_MASK(idx) (0xf << (4 * (idx))) +#define DFLEXDPMLE1_DPMLETC_ML0(idx) (1 << (4 * (idx))) +#define DFLEXDPMLE1_DPMLETC_ML1_0(idx) (3 << (4 * (idx))) +#define DFLEXDPMLE1_DPMLETC_ML3(idx) (8 << (4 * (idx))) +#define DFLEXDPMLE1_DPMLETC_ML3_2(idx) (12 << (4 * (idx))) +#define DFLEXDPMLE1_DPMLETC_ML3_0(idx) (15 << (4 * (idx))) /* BXT PHY Ref registers */ #define _PORT_REF_DW3_A 0x16218C @@ -11688,17 +11688,17 @@ enum skl_power_gate { #define PORT_TX_DFLEXDPSP(fia) _MMIO_FIA((fia), 0x008A0) #define MODULAR_FIA_MASK (1 << 4) -#define TC_LIVE_STATE_TBT(tc_port) (1 << ((tc_port) * 8 + 6)) -#define TC_LIVE_STATE_TC(tc_port) (1 << ((tc_port) * 8 + 5)) -#define DP_LANE_ASSIGNMENT_SHIFT(tc_port) ((tc_port) * 8) -#define DP_LANE_ASSIGNMENT_MASK(tc_port) (0xf << ((tc_port) * 8)) -#define DP_LANE_ASSIGNMENT(tc_port, x) ((x) << ((tc_port) * 8)) +#define TC_LIVE_STATE_TBT(idx) (1 << ((idx) * 8 + 6)) +#define TC_LIVE_STATE_TC(idx) (1 << ((idx) * 8 + 5)) +#define DP_LANE_ASSIGNMENT_SHIFT(idx) ((idx) * 8) +#define DP_LANE_ASSIGNMENT_MASK(idx) (0xf << ((idx) * 8)) +#define DP_LANE_ASSIGNMENT(idx, x) ((x) << ((idx) * 8)) #define PORT_TX_DFLEXDPPMS(fia) _MMIO_FIA((fia), 0x00890) -#define DP_PHY_MODE_STATUS_COMPLETED(tc_port) (1 << (tc_port)) +#define DP_PHY_MODE_STATUS_COMPLETED(idx) (1 << (idx)) #define PORT_TX_DFLEXDPCSSS(fia) _MMIO_FIA((fia), 0x00894) -#define DP_PHY_MODE_STATUS_NOT_SAFE(tc_port) (1 << (tc_port)) +#define DP_PHY_MODE_STATUS_NOT_SAFE(idx) (1 << (idx)) /* This register controls the Display State Buffer (DSB) engines. */ #define _DSBSL_INSTANCE_BASE 0x70B00 -- cgit v1.2.3 From 57bd1798b1809a003c5354403a94f2ecb02bf3ae Mon Sep 17 00:00:00 2001 From: Clinton A Taylor Date: Fri, 20 Sep 2019 13:58:07 -0700 Subject: drm/i915/tgl/pll: Set update_active_dpll MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Commit 24a7bfe0c2d7 ("drm/i915: Keep the TypeC port mode fixed when the port is active") added this new hook while in parallel TGL upstream was happening and this was missed. Without this driver will crash when TC DDI is added and driver is preparing to do a full modeset. Cc: Lucas De Marchi Cc: Imre Deak Reviewed-by: Lucas De Marchi Signed-off-by: Clinton A Taylor Signed-off-by: José Roberto de Souza Link: https://patchwork.freedesktop.org/patch/msgid/20190920205810.211048-4-jose.souza@intel.com --- drivers/gpu/drm/i915/display/intel_dpll_mgr.c | 1 + 1 file changed, 1 insertion(+) diff --git a/drivers/gpu/drm/i915/display/intel_dpll_mgr.c b/drivers/gpu/drm/i915/display/intel_dpll_mgr.c index 98288edf88f0..84e734d44828 100644 --- a/drivers/gpu/drm/i915/display/intel_dpll_mgr.c +++ b/drivers/gpu/drm/i915/display/intel_dpll_mgr.c @@ -3479,6 +3479,7 @@ static const struct intel_dpll_mgr tgl_pll_mgr = { .dpll_info = tgl_plls, .get_dplls = icl_get_dplls, .put_dplls = icl_put_dplls, + .update_active_dpll = icl_update_active_dpll, .dump_hw_state = icl_dump_hw_state, }; -- cgit v1.2.3 From f15a4eb18264a6eba30fb32f21ae76b010a691d5 Mon Sep 17 00:00:00 2001 From: Vandita Kulkarni Date: Fri, 20 Sep 2019 13:58:08 -0700 Subject: drm/i915/tgl: Add dkl phy registers MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit These are the registers needed to program Dekel phy. Some register definitions will be reused from MG PHY definitions, so adding a comment on those. Bspec: 49295 Reviewed-by: Lucas De Marchi Signed-off-by: Vandita Kulkarni Signed-off-by: Clinton A Taylor Signed-off-by: José Roberto de Souza Link: https://patchwork.freedesktop.org/patch/msgid/20190920205810.211048-5-jose.souza@intel.com --- drivers/gpu/drm/i915/i915_reg.h | 162 ++++++++++++++++++++++++++++++++++++++++ 1 file changed, 162 insertions(+) diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h index 4236cb5d12df..a69c19aae5bb 100644 --- a/drivers/gpu/drm/i915/i915_reg.h +++ b/drivers/gpu/drm/i915/i915_reg.h @@ -10115,6 +10115,168 @@ enum skl_power_gate { _TGL_DPLL1_CFGCR1, \ _TGL_TBTPLL_CFGCR1) +#define _DKL_PHY1_BASE 0x168000 +#define _DKL_PHY2_BASE 0x169000 +#define _DKL_PHY3_BASE 0x16A000 +#define _DKL_PHY4_BASE 0x16B000 +#define _DKL_PHY5_BASE 0x16C000 +#define _DKL_PHY6_BASE 0x16D000 + +/* DEKEL PHY MMIO Address = Phy base + (internal address & ~index_mask) */ +#define _DKL_PLL_DIV0 0x200 +#define DKL_PLL_DIV0_INTEG_COEFF(x) ((x) << 16) +#define DKL_PLL_DIV0_INTEG_COEFF_MASK (0x1F << 16) +#define DKL_PLL_DIV0_PROP_COEFF(x) ((x) << 12) +#define DKL_PLL_DIV0_PROP_COEFF_MASK (0xF << 12) +#define DKL_PLL_DIV0_FBPREDIV_SHIFT (8) +#define DKL_PLL_DIV0_FBPREDIV(x) ((x) << DKL_PLL_DIV0_FBPREDIV_SHIFT) +#define DKL_PLL_DIV0_FBPREDIV_MASK (0xF << DKL_PLL_DIV0_FBPREDIV_SHIFT) +#define DKL_PLL_DIV0_FBDIV_INT(x) ((x) << 0) +#define DKL_PLL_DIV0_FBDIV_INT_MASK (0xFF << 0) +#define DKL_PLL_DIV0(tc_port) _MMIO(_PORT(tc_port, _DKL_PHY1_BASE, \ + _DKL_PHY2_BASE) + \ + _DKL_PLL_DIV0) + +#define _DKL_PLL_DIV1 0x204 +#define DKL_PLL_DIV1_IREF_TRIM(x) ((x) << 16) +#define DKL_PLL_DIV1_IREF_TRIM_MASK (0x1F << 16) +#define DKL_PLL_DIV1_TDC_TARGET_CNT(x) ((x) << 0) +#define DKL_PLL_DIV1_TDC_TARGET_CNT_MASK (0xFF << 0) +#define DKL_PLL_DIV1(tc_port) _MMIO(_PORT(tc_port, _DKL_PHY1_BASE, \ + _DKL_PHY2_BASE) + \ + _DKL_PLL_DIV1) + +#define _DKL_PLL_SSC 0x210 +#define DKL_PLL_SSC_IREF_NDIV_RATIO(x) ((x) << 29) +#define DKL_PLL_SSC_IREF_NDIV_RATIO_MASK (0x7 << 29) +#define DKL_PLL_SSC_STEP_LEN(x) ((x) << 16) +#define DKL_PLL_SSC_STEP_LEN_MASK (0xFF << 16) +#define DKL_PLL_SSC_STEP_NUM(x) ((x) << 11) +#define DKL_PLL_SSC_STEP_NUM_MASK (0x7 << 11) +#define DKL_PLL_SSC_EN (1 << 9) +#define DKL_PLL_SSC(tc_port) _MMIO(_PORT(tc_port, _DKL_PHY1_BASE, \ + _DKL_PHY2_BASE) + \ + _DKL_PLL_SSC) + +#define _DKL_PLL_BIAS 0x214 +#define DKL_PLL_BIAS_FRAC_EN_H (1 << 30) +#define DKL_PLL_BIAS_FBDIV_SHIFT (8) +#define DKL_PLL_BIAS_FBDIV_FRAC(x) ((x) << DKL_PLL_BIAS_FBDIV_SHIFT) +#define DKL_PLL_BIAS_FBDIV_FRAC_MASK (0x3FFFFF << DKL_PLL_BIAS_FBDIV_SHIFT) +#define DKL_PLL_BIAS(tc_port) _MMIO(_PORT(tc_port, _DKL_PHY1_BASE, \ + _DKL_PHY2_BASE) + \ + _DKL_PLL_BIAS) + +#define _DKL_PLL_TDC_COLDST_BIAS 0x218 +#define DKL_PLL_TDC_SSC_STEP_SIZE(x) ((x) << 8) +#define DKL_PLL_TDC_SSC_STEP_SIZE_MASK (0xFF << 8) +#define DKL_PLL_TDC_FEED_FWD_GAIN(x) ((x) << 0) +#define DKL_PLL_TDC_FEED_FWD_GAIN_MASK (0xFF << 0) +#define DKL_PLL_TDC_COLDST_BIAS(tc_port) _MMIO(_PORT(tc_port, \ + _DKL_PHY1_BASE, \ + _DKL_PHY2_BASE) + \ + _DKL_PLL_TDC_COLDST_BIAS) + +#define _DKL_REFCLKIN_CTL 0x12C +/* Bits are the same as MG_REFCLKIN_CTL */ +#define DKL_REFCLKIN_CTL(tc_port) _MMIO(_PORT(tc_port, \ + _DKL_PHY1_BASE, \ + _DKL_PHY2_BASE) + \ + _DKL_REFCLKIN_CTL) + +#define _DKL_CLKTOP2_HSCLKCTL 0xD4 +/* Bits are the same as MG_CLKTOP2_HSCLKCTL */ +#define DKL_CLKTOP2_HSCLKCTL(tc_port) _MMIO(_PORT(tc_port, \ + _DKL_PHY1_BASE, \ + _DKL_PHY2_BASE) + \ + _DKL_CLKTOP2_HSCLKCTL) + +#define _DKL_CLKTOP2_CORECLKCTL1 0xD8 +/* Bits are the same as MG_CLKTOP2_CORECLKCTL1 */ +#define DKL_CLKTOP2_CORECLKCTL1(tc_port) _MMIO(_PORT(tc_port, \ + _DKL_PHY1_BASE, \ + _DKL_PHY2_BASE) + \ + _DKL_CLKTOP2_CORECLKCTL1) + +#define _DKL_TX_DPCNTL0 0x2C0 +#define DKL_TX_PRESHOOT_COEFF(x) ((x) << 13) +#define DKL_TX_PRESHOOT_COEFF_MASK (0x1f << 13) +#define DKL_TX_DE_EMPHASIS_COEFF(x) ((x) << 8) +#define DKL_TX_DE_EMPAHSIS_COEFF_MASK (0x1f << 8) +#define DKL_TX_VSWING_CONTROL(x) ((x) << 0) +#define DKL_TX_VSWING_CONTROL_MASK (0x7 << 0) +#define DKL_TX_DPCNTL0(tc_port) _MMIO(_PORT(tc_port, \ + _DKL_PHY1_BASE, \ + _DKL_PHY2_BASE) + \ + _DKL_TX_DPCNTL0) + +#define _DKL_TX_DPCNTL1 0x2C4 +/* Bits are the same as DKL_TX_DPCNTRL0 */ +#define DKL_TX_DPCNTL1(tc_port) _MMIO(_PORT(tc_port, \ + _DKL_PHY1_BASE, \ + _DKL_PHY2_BASE) + \ + _DKL_TX_DPCNTL1) + +#define _DKL_TX_DPCNTL2 0x2C8 +#define DKL_TX_DP20BITMODE (1 << 2) +#define DKL_TX_DPCNTL2(tc_port) _MMIO(_PORT(tc_port, \ + _DKL_PHY1_BASE, \ + _DKL_PHY2_BASE) + \ + _DKL_TX_DPCNTL2) + +#define _DKL_TX_FW_CALIB 0x2F8 +#define DKL_TX_CFG_DISABLE_WAIT_INIT (1 << 7) +#define DKL_TX_FW_CALIB(tc_port) _MMIO(_PORT(tc_port, \ + _DKL_PHY1_BASE, \ + _DKL_PHY2_BASE) + \ + _DKL_TX_FW_CALIB) + +#define _DKL_TX_DW17 0xDC4 +#define DKL_TX_DW17(tc_port) _MMIO(_PORT(tc_port, \ + _DKL_PHY1_BASE, \ + _DKL_PHY2_BASE) + \ + _DKL_TX_DW17) + +#define _DKL_TX_DW18 0xDC8 +#define DKL_TX_DW18(tc_port) _MMIO(_PORT(tc_port, \ + _DKL_PHY1_BASE, \ + _DKL_PHY2_BASE) + \ + _DKL_TX_DW18) + +#define _DKL_DP_MODE 0xA0 +#define DKL_DP_MODE_CFG_GAONPWR_GATING (1 << 1) +#define DKL_DP_MODE_CFG_DIGPWR_GATING (1 << 2) +#define DKL_DP_MODE_CFG_CLNPWR_GATING (1 << 3) +#define DKL_DP_MODE_CFG_TRPWR_GATING (1 << 4) +#define DKL_DP_MODE_CFG_TR2PWR_GATING (1 << 5) +#define DKL_DP_MODE_CFG_GATING_CTRL_MASK (0x1f << 1) +#define DKL_DP_MODE_CFG_DP_X1_MODE (1 << 6) +#define DKL_DP_MODE_CFG_DP_X2_MODE (1 << 7) +#define DKL_DP_MODE(tc_port) _MMIO(_PORT(tc_port, \ + _DKL_PHY1_BASE, \ + _DKL_PHY2_BASE) + \ + _DKL_DP_MODE) + +#define _DKL_CMN_UC_DW27 0x36C +#define DKL_CMN_UC_DW27_UC_HEALTH (0x1 << 15) +#define DKL_CMN_UC_DW_27(tc_port) _MMIO(_PORT(tc_port, \ + _DKL_PHY1_BASE, \ + _DKL_PHY2_BASE) + \ + _DKL_CMN_UC_DW27) + +/* + * Each Dekel PHY is addressed through a 4KB aperture. Each PHY has more than + * 4KB of register space, so a separate index is programmed in HIP_INDEX_REG0 + * or HIP_INDEX_REG1, based on the port number, to set the upper 2 address + * bits that point the 4KB window into the full PHY register space. + */ +#define _HIP_INDEX_REG0 0x1010A0 +#define _HIP_INDEX_REG1 0x1010A4 +#define HIP_INDEX_REG(tc_port) _MMIO((tc_port) < 4 ? _HIP_INDEX_REG0 \ + : _HIP_INDEX_REG1) +#define _HIP_INDEX_SHIFT(tc_port) (8 * ((tc_port) % 4)) +#define HIP_INDEX_VAL(tc_port, val) ((val) << _HIP_INDEX_SHIFT(tc_port)) + /* BXT display engine PLL */ #define BXT_DE_PLL_CTL _MMIO(0x6d000) #define BXT_DE_PLL_RATIO(x) (x) /* {60,65,100} * 19.2MHz */ -- cgit v1.2.3 From 8aaf5cbda8f1684ffb7027724f67da086f46a0f4 Mon Sep 17 00:00:00 2001 From: José Roberto de Souza Date: Fri, 20 Sep 2019 13:58:09 -0700 Subject: drm/i915/icl: Unify disable and enable phy clock gating functions MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Adding a enable parameters allow us to share most of the code between enable and disable functions. v3: Renamed icl_phy_clock_gating() to icl_phy_set_clock_gating() Reviewed-by: Lucas De Marchi Signed-off-by: José Roberto de Souza Link: https://patchwork.freedesktop.org/patch/msgid/20190920205810.211048-6-jose.souza@intel.com --- drivers/gpu/drm/i915/display/intel_ddi.c | 73 ++++++++++---------------------- 1 file changed, 23 insertions(+), 50 deletions(-) diff --git a/drivers/gpu/drm/i915/display/intel_ddi.c b/drivers/gpu/drm/i915/display/intel_ddi.c index dfd6b064cbc3..33cd766f9eea 100644 --- a/drivers/gpu/drm/i915/display/intel_ddi.c +++ b/drivers/gpu/drm/i915/display/intel_ddi.c @@ -3033,67 +3033,40 @@ static void intel_ddi_clk_disable(struct intel_encoder *encoder) } } -static void icl_enable_phy_clock_gating(struct intel_digital_port *dig_port) +static void +icl_phy_set_clock_gating(struct intel_digital_port *dig_port, bool enable) { struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev); enum port port = dig_port->base.port; enum tc_port tc_port = intel_port_to_tc(dev_priv, port); - u32 val; + u32 val, bits; int ln; if (tc_port == PORT_TC_NONE) return; - for (ln = 0; ln < 2; ln++) { - val = I915_READ(MG_DP_MODE(ln, port)); - val |= MG_DP_MODE_CFG_TR2PWR_GATING | - MG_DP_MODE_CFG_TRPWR_GATING | - MG_DP_MODE_CFG_CLNPWR_GATING | - MG_DP_MODE_CFG_DIGPWR_GATING | - MG_DP_MODE_CFG_GAONPWR_GATING; - I915_WRITE(MG_DP_MODE(ln, port), val); - } - - val = I915_READ(MG_MISC_SUS0(tc_port)); - val |= MG_MISC_SUS0_SUSCLK_DYNCLKGATE_MODE(3) | - MG_MISC_SUS0_CFG_TR2PWR_GATING | - MG_MISC_SUS0_CFG_CL2PWR_GATING | - MG_MISC_SUS0_CFG_GAONPWR_GATING | - MG_MISC_SUS0_CFG_TRPWR_GATING | - MG_MISC_SUS0_CFG_CL1PWR_GATING | - MG_MISC_SUS0_CFG_DGPWR_GATING; - I915_WRITE(MG_MISC_SUS0(tc_port), val); -} - -static void icl_disable_phy_clock_gating(struct intel_digital_port *dig_port) -{ - struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev); - enum port port = dig_port->base.port; - enum tc_port tc_port = intel_port_to_tc(dev_priv, port); - u32 val; - int ln; - - if (tc_port == PORT_TC_NONE) - return; + bits = MG_DP_MODE_CFG_TR2PWR_GATING | MG_DP_MODE_CFG_TRPWR_GATING | + MG_DP_MODE_CFG_CLNPWR_GATING | MG_DP_MODE_CFG_DIGPWR_GATING | + MG_DP_MODE_CFG_GAONPWR_GATING; for (ln = 0; ln < 2; ln++) { val = I915_READ(MG_DP_MODE(ln, port)); - val &= ~(MG_DP_MODE_CFG_TR2PWR_GATING | - MG_DP_MODE_CFG_TRPWR_GATING | - MG_DP_MODE_CFG_CLNPWR_GATING | - MG_DP_MODE_CFG_DIGPWR_GATING | - MG_DP_MODE_CFG_GAONPWR_GATING); + if (enable) + val |= bits; + else + val &= ~bits; I915_WRITE(MG_DP_MODE(ln, port), val); } + bits = MG_MISC_SUS0_CFG_TR2PWR_GATING | MG_MISC_SUS0_CFG_CL2PWR_GATING | + MG_MISC_SUS0_CFG_GAONPWR_GATING | MG_MISC_SUS0_CFG_TRPWR_GATING | + MG_MISC_SUS0_CFG_CL1PWR_GATING | MG_MISC_SUS0_CFG_DGPWR_GATING; + val = I915_READ(MG_MISC_SUS0(tc_port)); - val &= ~(MG_MISC_SUS0_SUSCLK_DYNCLKGATE_MODE_MASK | - MG_MISC_SUS0_CFG_TR2PWR_GATING | - MG_MISC_SUS0_CFG_CL2PWR_GATING | - MG_MISC_SUS0_CFG_GAONPWR_GATING | - MG_MISC_SUS0_CFG_TRPWR_GATING | - MG_MISC_SUS0_CFG_CL1PWR_GATING | - MG_MISC_SUS0_CFG_DGPWR_GATING); + if (enable) + val |= (bits | MG_MISC_SUS0_SUSCLK_DYNCLKGATE_MODE(3)); + else + val &= ~(bits | MG_MISC_SUS0_SUSCLK_DYNCLKGATE_MODE_MASK); I915_WRITE(MG_MISC_SUS0(tc_port), val); } @@ -3258,7 +3231,7 @@ static void tgl_ddi_pre_enable_dp(struct intel_encoder *encoder, intel_ddi_config_transcoder_func(crtc_state); /* 7.d */ - icl_disable_phy_clock_gating(dig_port); + icl_phy_set_clock_gating(dig_port, false); /* 7.e */ icl_ddi_vswing_sequence(encoder, crtc_state->port_clock, level, @@ -3328,7 +3301,7 @@ static void hsw_ddi_pre_enable_dp(struct intel_encoder *encoder, dig_port->ddi_io_power_domain); icl_program_mg_dp_mode(dig_port); - icl_disable_phy_clock_gating(dig_port); + icl_phy_set_clock_gating(dig_port, false); if (INTEL_GEN(dev_priv) >= 11) icl_ddi_vswing_sequence(encoder, crtc_state->port_clock, @@ -3361,7 +3334,7 @@ static void hsw_ddi_pre_enable_dp(struct intel_encoder *encoder, intel_ddi_enable_fec(encoder, crtc_state); - icl_enable_phy_clock_gating(dig_port); + icl_phy_set_clock_gating(dig_port, true); if (!is_mst) intel_ddi_enable_pipe_clock(crtc_state); @@ -3398,7 +3371,7 @@ static void intel_ddi_pre_enable_hdmi(struct intel_encoder *encoder, intel_display_power_get(dev_priv, dig_port->ddi_io_power_domain); icl_program_mg_dp_mode(dig_port); - icl_disable_phy_clock_gating(dig_port); + icl_phy_set_clock_gating(dig_port, false); if (INTEL_GEN(dev_priv) >= 11) icl_ddi_vswing_sequence(encoder, crtc_state->port_clock, @@ -3410,7 +3383,7 @@ static void intel_ddi_pre_enable_hdmi(struct intel_encoder *encoder, else intel_prepare_hdmi_ddi_buffers(encoder, level); - icl_enable_phy_clock_gating(dig_port); + icl_phy_set_clock_gating(dig_port, true); if (IS_GEN9_BC(dev_priv)) skl_ddi_set_iboost(encoder, level, INTEL_OUTPUT_HDMI); -- cgit v1.2.3 From 27ffe6e570aa93e8389f7b0299a329e51d02843c Mon Sep 17 00:00:00 2001 From: José Roberto de Souza Date: Fri, 20 Sep 2019 13:58:10 -0700 Subject: drm/i915/tgl: Check the UC health of tc controllers after power on MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit New step added for TGL, required for us to check the TC microcontroller health after power on TC aux. BSpec: 49294 Reviewed-by: Imre Deak Signed-off-by: José Roberto de Souza Link: https://patchwork.freedesktop.org/patch/msgid/20190920205810.211048-7-jose.souza@intel.com --- drivers/gpu/drm/i915/display/intel_display_power.c | 13 +++++++++++++ 1 file changed, 13 insertions(+) diff --git a/drivers/gpu/drm/i915/display/intel_display_power.c b/drivers/gpu/drm/i915/display/intel_display_power.c index ce88a27229ef..f1186bc23542 100644 --- a/drivers/gpu/drm/i915/display/intel_display_power.c +++ b/drivers/gpu/drm/i915/display/intel_display_power.c @@ -562,6 +562,8 @@ static void icl_tc_port_assert_ref_held(struct drm_i915_private *dev_priv, #endif +#define TGL_AUX_PW_TO_TC_PORT(pw_idx) ((pw_idx) - TGL_PW_CTL_IDX_AUX_TC1) + static void icl_tc_phy_aux_power_well_enable(struct drm_i915_private *dev_priv, struct i915_power_well *power_well) @@ -578,6 +580,17 @@ icl_tc_phy_aux_power_well_enable(struct drm_i915_private *dev_priv, I915_WRITE(DP_AUX_CH_CTL(aux_ch), val); hsw_power_well_enable(dev_priv, power_well); + + if (INTEL_GEN(dev_priv) >= 12 && !power_well->desc->hsw.is_tc_tbt) { + enum tc_port tc_port; + + tc_port = TGL_AUX_PW_TO_TC_PORT(power_well->desc->hsw.idx); + I915_WRITE(HIP_INDEX_REG(tc_port), HIP_INDEX_VAL(tc_port, 0x2)); + + if (intel_de_wait_for_set(dev_priv, DKL_CMN_UC_DW_27(tc_port), + DKL_CMN_UC_DW27_UC_HEALTH, 1)) + DRM_WARN("Timeout waiting TC uC health\n"); + } } static void -- cgit v1.2.3 From 04e0e1777a792223897b8c21eb4e0a5b2624d9df Mon Sep 17 00:00:00 2001 From: Adam Ford Date: Mon, 23 Sep 2019 11:25:31 -0700 Subject: ARM: omap2plus_defconfig: Enable DRM_TI_TFP410 The TFP410 driver was removed but the replacement driver was never enabled. This patch enableds the DRM_TI_TFP410 Fixes: be3143d8b27f ("drm/omap: Remove TFP410 and DVI connector drivers") Signed-off-by: Adam Ford Signed-off-by: Tony Lindgren --- arch/arm/configs/omap2plus_defconfig | 1 + 1 file changed, 1 insertion(+) diff --git a/arch/arm/configs/omap2plus_defconfig b/arch/arm/configs/omap2plus_defconfig index 64eb896907bf..ab046eb41c7a 100644 --- a/arch/arm/configs/omap2plus_defconfig +++ b/arch/arm/configs/omap2plus_defconfig @@ -364,6 +364,7 @@ CONFIG_DRM_OMAP_PANEL_TPO_TD043MTEA1=m CONFIG_DRM_OMAP_PANEL_NEC_NL8048HL11=m CONFIG_DRM_TILCDC=m CONFIG_DRM_PANEL_SIMPLE=m +CONFIG_DRM_TI_TFP410=m CONFIG_FB=y CONFIG_FIRMWARE_EDID=y CONFIG_FB_MODE_HELPERS=y -- cgit v1.2.3 From 1d70ded8567ccf70353ff97e3b13f267c889f934 Mon Sep 17 00:00:00 2001 From: Tony Lindgren Date: Sun, 22 Sep 2019 10:57:40 -0700 Subject: ARM: omap2plus_defconfig: Enable more droid4 devices as loadable modules Droid4 needs USB option serial driver for modem, and lm3532 for the LCD backlight. Note that the LCD backlight does not yet get enabled automatically, but needs to be done manually with: # echo 50 > /sys/class/leds/lm3532::backlight/brightness Signed-off-by: Tony Lindgren --- arch/arm/configs/omap2plus_defconfig | 2 ++ 1 file changed, 2 insertions(+) diff --git a/arch/arm/configs/omap2plus_defconfig b/arch/arm/configs/omap2plus_defconfig index ab046eb41c7a..ae326572c62a 100644 --- a/arch/arm/configs/omap2plus_defconfig +++ b/arch/arm/configs/omap2plus_defconfig @@ -424,6 +424,7 @@ CONFIG_USB_SERIAL_GENERIC=y CONFIG_USB_SERIAL_SIMPLE=m CONFIG_USB_SERIAL_FTDI_SIO=m CONFIG_USB_SERIAL_PL2303=m +CONFIG_USB_SERIAL_OPTION=m CONFIG_USB_TEST=m CONFIG_NOP_USB_XCEIV=m CONFIG_AM335X_PHY_USB=m @@ -461,6 +462,7 @@ CONFIG_MMC_SDHCI_OMAP=y CONFIG_NEW_LEDS=y CONFIG_LEDS_CLASS=m CONFIG_LEDS_CPCAP=m +CONFIG_LEDS_LM3532=m CONFIG_LEDS_GPIO=m CONFIG_LEDS_PCA963X=m CONFIG_LEDS_PWM=m -- cgit v1.2.3 From b647c7df01b75761b4c0b1cb6f4841088c0b1121 Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Mon, 23 Sep 2019 16:28:42 +0100 Subject: drm/i915: Fixup preempt-to-busy vs resubmission of a virtual request As preempt-to-busy leaves the request on the HW as the resubmission is processed, that request may complete in the background and even cause a second virtual request to enter queue. This second virtual request breaks our "single request in the virtual pipeline" assumptions. Furthermore, as the virtual request may be completed and retired, we lose the reference the virtual engine assumes is held. Normally, just removing the request from the scheduler queue removes it from the engine, but the virtual engine keeps track of its singleton request via its ve->request. This pointer needs protecting with a reference. v2: Drop unnecessary motion of rq->engine = owner Fixes: 22b7a426bbe1 ("drm/i915/execlists: Preempt-to-busy") Signed-off-by: Chris Wilson Cc: Mika Kuoppala Cc: Tvrtko Ursulin Reviewed-by: Tvrtko Ursulin Link: https://patchwork.freedesktop.org/patch/msgid/20190923152844.8914-1-chris@chris-wilson.co.uk --- drivers/gpu/drm/i915/gt/intel_lrc.c | 32 ++++++++++++++++++++++++++------ 1 file changed, 26 insertions(+), 6 deletions(-) diff --git a/drivers/gpu/drm/i915/gt/intel_lrc.c b/drivers/gpu/drm/i915/gt/intel_lrc.c index bff4c6a735cf..114819fb5a24 100644 --- a/drivers/gpu/drm/i915/gt/intel_lrc.c +++ b/drivers/gpu/drm/i915/gt/intel_lrc.c @@ -1256,6 +1256,7 @@ static void execlists_dequeue(struct intel_engine_cs *engine) submit = true; last = rq; } + i915_request_put(rq); /* * Hmm, we have a bunch of virtual engine requests, @@ -2556,6 +2557,7 @@ static void execlists_cancel_requests(struct intel_engine_cs *engine) rq->engine = engine; __i915_request_submit(rq); + i915_request_put(rq); ve->base.execlists.queue_priority_hint = INT_MIN; } @@ -3809,6 +3811,8 @@ submit_engine: static void virtual_submit_request(struct i915_request *rq) { struct virtual_engine *ve = to_virtual_engine(rq->engine); + struct i915_request *old; + unsigned long flags; GEM_TRACE("%s: rq=%llx:%lld\n", ve->base.name, @@ -3817,15 +3821,31 @@ static void virtual_submit_request(struct i915_request *rq) GEM_BUG_ON(ve->base.submit_request != virtual_submit_request); - GEM_BUG_ON(ve->request); - GEM_BUG_ON(!list_empty(virtual_queue(ve))); + spin_lock_irqsave(&ve->base.active.lock, flags); + + old = ve->request; + if (old) { /* background completion event from preempt-to-busy */ + GEM_BUG_ON(!i915_request_completed(old)); + __i915_request_submit(old); + i915_request_put(old); + } - ve->base.execlists.queue_priority_hint = rq_prio(rq); - WRITE_ONCE(ve->request, rq); + if (i915_request_completed(rq)) { + __i915_request_submit(rq); - list_move_tail(&rq->sched.link, virtual_queue(ve)); + ve->base.execlists.queue_priority_hint = INT_MIN; + ve->request = NULL; + } else { + ve->base.execlists.queue_priority_hint = rq_prio(rq); + ve->request = i915_request_get(rq); + + GEM_BUG_ON(!list_empty(virtual_queue(ve))); + list_move_tail(&rq->sched.link, virtual_queue(ve)); + + tasklet_schedule(&ve->base.execlists.tasklet); + } - tasklet_schedule(&ve->base.execlists.tasklet); + spin_unlock_irqrestore(&ve->base.active.lock, flags); } static struct ve_bond * -- cgit v1.2.3 From cb2377a919bbe8107af269c5a31a8d5cfb27d867 Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Mon, 23 Sep 2019 16:28:43 +0100 Subject: drm/i915: Fixup preempt-to-busy vs reset of a virtual request Due to the nature of preempt-to-busy the execlists active tracking and the schedule queue may become temporarily desync'ed (between resubmission to HW and its ack from HW). This means that we may have unwound a request and passed it back to the virtual engine, but it is still inflight on the HW and may even result in a GPU hang. If we detect that GPU hang and try to reset, the hanging request->engine will no longer match the current engine, which means that the request is not on the execlists active list and we should not try to find an older incomplete request. Given that we have deduced this must be a request on a virtual engine, it is the single active request in the context and so must be guilty (as the context is still inflight, it is prevented from being executed on another engine as we process the reset). Fixes: 22b7a426bbe1 ("drm/i915/execlists: Preempt-to-busy") Signed-off-by: Chris Wilson Cc: Mika Kuoppala Cc: Tvrtko Ursulin Reviewed-by: Tvrtko Ursulin Link: https://patchwork.freedesktop.org/patch/msgid/20190923152844.8914-2-chris@chris-wilson.co.uk --- drivers/gpu/drm/i915/gt/intel_lrc.c | 7 +++++-- drivers/gpu/drm/i915/gt/intel_reset.c | 4 +--- 2 files changed, 6 insertions(+), 5 deletions(-) diff --git a/drivers/gpu/drm/i915/gt/intel_lrc.c b/drivers/gpu/drm/i915/gt/intel_lrc.c index 114819fb5a24..322287943e33 100644 --- a/drivers/gpu/drm/i915/gt/intel_lrc.c +++ b/drivers/gpu/drm/i915/gt/intel_lrc.c @@ -2348,11 +2348,14 @@ static void reset_csb_pointers(struct intel_engine_cs *engine) static struct i915_request *active_request(struct i915_request *rq) { - const struct list_head * const list = - &i915_request_active_timeline(rq)->requests; const struct intel_context * const ce = rq->hw_context; struct i915_request *active = NULL; + struct list_head *list; + if (!i915_request_is_active(rq)) /* unwound, but incomplete! */ + return rq; + + list = &i915_request_active_timeline(rq)->requests; list_for_each_entry_from_reverse(rq, list, link) { if (i915_request_completed(rq)) break; diff --git a/drivers/gpu/drm/i915/gt/intel_reset.c b/drivers/gpu/drm/i915/gt/intel_reset.c index 89048ca8924c..ae68c3786f63 100644 --- a/drivers/gpu/drm/i915/gt/intel_reset.c +++ b/drivers/gpu/drm/i915/gt/intel_reset.c @@ -42,11 +42,10 @@ static void engine_skip_context(struct i915_request *rq) struct intel_engine_cs *engine = rq->engine; struct i915_gem_context *hung_ctx = rq->gem_context; - lockdep_assert_held(&engine->active.lock); - if (!i915_request_is_active(rq)) return; + lockdep_assert_held(&engine->active.lock); list_for_each_entry_continue(rq, &engine->active.requests, sched.link) if (rq->gem_context == hung_ctx) i915_request_skip(rq, -EIO); @@ -123,7 +122,6 @@ void __i915_request_reset(struct i915_request *rq, bool guilty) rq->fence.seqno, yesno(guilty)); - lockdep_assert_held(&rq->engine->active.lock); GEM_BUG_ON(i915_request_completed(rq)); if (guilty) { -- cgit v1.2.3 From e2144503bf3b22275dd33cef2880e1cb5fb200c5 Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Mon, 23 Sep 2019 16:28:44 +0100 Subject: drm/i915: Prevent bonded requests from overtaking each other on preemption Force bonded requests to run on distinct engines so that they cannot be shuffled onto the same engine where timeslicing will reverse the order. A bonded request will often wait on a semaphore signaled by its master, creating an implicit dependency -- if we ignore that implicit dependency and allow the bonded request to run on the same engine and before its master, we will cause a GPU hang. [Whether it will hang the GPU is debatable, we should keep on timeslicing and each timeslice should be "accidentally" counted as forward progress, in which case it should run but at one-half to one-third speed.] We can prevent this inversion by restricting which engines we allow ourselves to jump to upon preemption, i.e. baking in the arrangement established at first execution. (We should also consider capturing the implicit dependency using i915_sched_add_dependency(), but first we need to think about the constraints that requires on the execution/retirement ordering.) Fixes: 8ee36e048c98 ("drm/i915/execlists: Minimalistic timeslicing") References: ee1136908e9b ("drm/i915/execlists: Virtual engine bonding") Testcase: igt/gem_exec_balancer/bonded-slice Signed-off-by: Chris Wilson Cc: Mika Kuoppala Cc: Tvrtko Ursulin Reviewed-by: Tvrtko Ursulin Link: https://patchwork.freedesktop.org/patch/msgid/20190923152844.8914-3-chris@chris-wilson.co.uk --- drivers/gpu/drm/i915/gt/intel_lrc.c | 20 ++++++++++++-------- 1 file changed, 12 insertions(+), 8 deletions(-) diff --git a/drivers/gpu/drm/i915/gt/intel_lrc.c b/drivers/gpu/drm/i915/gt/intel_lrc.c index 322287943e33..6cfdc0f9f2b9 100644 --- a/drivers/gpu/drm/i915/gt/intel_lrc.c +++ b/drivers/gpu/drm/i915/gt/intel_lrc.c @@ -3869,18 +3869,22 @@ static void virtual_bond_execute(struct i915_request *rq, struct dma_fence *signal) { struct virtual_engine *ve = to_virtual_engine(rq->engine); + intel_engine_mask_t allowed, exec; struct ve_bond *bond; + allowed = ~to_request(signal)->engine->mask; + bond = virtual_find_bond(ve, to_request(signal)->engine); - if (bond) { - intel_engine_mask_t old, new, cmp; + if (bond) + allowed &= bond->sibling_mask; - cmp = READ_ONCE(rq->execution_mask); - do { - old = cmp; - new = cmp & bond->sibling_mask; - } while ((cmp = cmpxchg(&rq->execution_mask, old, new)) != old); - } + /* Restrict the bonded request to run on only the available engines */ + exec = READ_ONCE(rq->execution_mask); + while (!try_cmpxchg(&rq->execution_mask, &exec, exec & allowed)) + ; + + /* Prevent the master from being re-run on the bonded engines */ + to_request(signal)->execution_mask &= ~allowed; } struct intel_context * -- cgit v1.2.3 From 0ec64895b05269c1814ea5befcadcd1184a12915 Mon Sep 17 00:00:00 2001 From: John Pittman Date: Tue, 17 Sep 2019 14:52:00 -0400 Subject: nvmet: change ppl to lpp In nvmet_bdev_set_limits() the number of logical blocks per physical block is calculated, but the opposite is mentioned in the associated comment and reflected in the variable name. Correct the comment and adjust the variable name to reflect the calculation done. Signed-off-by: John Pittman Reviewed-by: Bart Van Assche Reviewed-by: Chaitanya Kulkarni Signed-off-by: Sagi Grimberg --- drivers/nvme/target/io-cmd-bdev.c | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) diff --git a/drivers/nvme/target/io-cmd-bdev.c b/drivers/nvme/target/io-cmd-bdev.c index de0bff70ebb6..32008d85172b 100644 --- a/drivers/nvme/target/io-cmd-bdev.c +++ b/drivers/nvme/target/io-cmd-bdev.c @@ -11,10 +11,10 @@ void nvmet_bdev_set_limits(struct block_device *bdev, struct nvme_id_ns *id) { const struct queue_limits *ql = &bdev_get_queue(bdev)->limits; - /* Number of physical blocks per logical block. */ - const u32 ppl = ql->physical_block_size / ql->logical_block_size; - /* Physical blocks per logical block, 0's based. */ - const __le16 ppl0b = to0based(ppl); + /* Number of logical blocks per physical block. */ + const u32 lpp = ql->physical_block_size / ql->logical_block_size; + /* Logical blocks per physical block, 0's based. */ + const __le16 lpp0b = to0based(lpp); /* * For NVMe 1.2 and later, bit 1 indicates that the fields NAWUN, @@ -25,9 +25,9 @@ void nvmet_bdev_set_limits(struct block_device *bdev, struct nvme_id_ns *id) * field from the identify controller data structure should be used. */ id->nsfeat |= 1 << 1; - id->nawun = ppl0b; - id->nawupf = ppl0b; - id->nacwu = ppl0b; + id->nawun = lpp0b; + id->nawupf = lpp0b; + id->nacwu = lpp0b; /* * Bit 4 indicates that the fields NPWG, NPWA, NPDG, NPDA, and @@ -36,7 +36,7 @@ void nvmet_bdev_set_limits(struct block_device *bdev, struct nvme_id_ns *id) */ id->nsfeat |= 1 << 4; /* NPWG = Namespace Preferred Write Granularity. 0's based */ - id->npwg = ppl0b; + id->npwg = lpp0b; /* NPWA = Namespace Preferred Write Alignment. 0's based */ id->npwa = id->npwg; /* NPDG = Namespace Preferred Deallocate Granularity. 0's based */ -- cgit v1.2.3 From b224726de5e496dbf78147a66755c3d81e28bdd2 Mon Sep 17 00:00:00 2001 From: Balbir Singh Date: Wed, 18 Sep 2019 00:27:20 +0000 Subject: nvme-pci: Fix a race in controller removal User space programs like udevd may try to read to partitions at the same time the driver detects a namespace is unusable, and may deadlock if revalidate_disk() is called while such a process is waiting to enter the frozen queue. On detecting a dead namespace, move the disk revalidate after unblocking dispatchers that may be holding bd_butex. changelog Suggested-by: Keith Busch Signed-off-by: Balbir Singh Reviewed-by: Keith Busch Signed-off-by: Sagi Grimberg --- drivers/nvme/host/core.c | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c index 108f60b46804..0c385b1994fe 100644 --- a/drivers/nvme/host/core.c +++ b/drivers/nvme/host/core.c @@ -102,10 +102,13 @@ static void nvme_set_queue_dying(struct nvme_ns *ns) */ if (!ns->disk || test_and_set_bit(NVME_NS_DEAD, &ns->flags)) return; - revalidate_disk(ns->disk); blk_set_queue_dying(ns->queue); /* Forcibly unquiesce queues to avoid blocking dispatch */ blk_mq_unquiesce_queue(ns->queue); + /* + * Revalidate after unblocking dispatchers that may be holding bd_butex + */ + revalidate_disk(ns->disk); } static void nvme_queue_scan(struct nvme_ctrl *ctrl) -- cgit v1.2.3 From 82a9ac7130cf51c2640800fb0ef19d3a05cb8fff Mon Sep 17 00:00:00 2001 From: Steffen Maier Date: Wed, 7 Aug 2019 16:49:47 +0200 Subject: scsi: core: fix missing .cleanup_rq for SCSI hosts without request batching This was missing from scsi_mq_ops_no_commit of linux-next commit 8930a6c20791 ("scsi: core: add support for request batching") from Martin's scsi/5.4/scsi-queue or James' scsi/misc. See also linux-next commit b7e9e1fb7a92 ("scsi: implement .cleanup_rq callback") from block/for-next. Signed-off-by: Steffen Maier Fixes: 8930a6c20791 ("scsi: core: add support for request batching") Cc: Paolo Bonzini Cc: Ming Lei Reviewed-by: Paolo Bonzini Reviewed-by: Bart Van Assche Signed-off-by: Martin K. Petersen --- drivers/scsi/scsi_lib.c | 1 + 1 file changed, 1 insertion(+) diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c index dc210b9d4896..12db0c13a927 100644 --- a/drivers/scsi/scsi_lib.c +++ b/drivers/scsi/scsi_lib.c @@ -1834,6 +1834,7 @@ static const struct blk_mq_ops scsi_mq_ops_no_commit = { .init_request = scsi_mq_init_request, .exit_request = scsi_mq_exit_request, .initialize_rq_fn = scsi_initialize_rq, + .cleanup_rq = scsi_cleanup_rq, .busy = scsi_mq_lld_busy, .map_queues = scsi_map_queues, }; -- cgit v1.2.3 From 6b6fa7a5c86e1269d9f0c9a5b902072351317387 Mon Sep 17 00:00:00 2001 From: Steffen Maier Date: Wed, 7 Aug 2019 16:49:48 +0200 Subject: scsi: core: fix dh and multipathing for SCSI hosts without request batching This was missing from scsi_device_from_queue() due to the introduction of another new scsi_mq_ops_no_commit of linux-next commit 8930a6c20791 ("scsi: core: add support for request batching") from Martin's scsi/5.4/scsi-queue or James' scsi/misc. Only devicehandler code seems to call scsi_device_from_queue(): *** drivers/scsi/scsi_dh.c: scsi_dh_activate[255] sdev = scsi_device_from_queue(q); scsi_dh_set_params[302] sdev = scsi_device_from_queue(q); scsi_dh_attach[325] sdev = scsi_device_from_queue(q); scsi_dh_attached_handler_name[363] sdev = scsi_device_from_queue(q); Fixes multipath tools follow-on errors: $ multipath -v6 ... libdevmapper: ioctl/libdm-iface.c(1887): device-mapper: reload ioctl on mpatha failed: No such device ... mpatha: failed to load map, error 19 ... showing also as kernel messages: device-mapper: table: 252:0: multipath: error attaching hardware handler device-mapper: ioctl: error adding target to table Signed-off-by: Steffen Maier Fixes: 8930a6c20791 ("scsi: core: add support for request batching") Cc: Paolo Bonzini Reviewed-by: Paolo Bonzini Reviewed-by: Bart Van Assche Signed-off-by: Martin K. Petersen --- drivers/scsi/scsi_lib.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c index 12db0c13a927..5447738906ac 100644 --- a/drivers/scsi/scsi_lib.c +++ b/drivers/scsi/scsi_lib.c @@ -1922,7 +1922,8 @@ struct scsi_device *scsi_device_from_queue(struct request_queue *q) { struct scsi_device *sdev = NULL; - if (q->mq_ops == &scsi_mq_ops) + if (q->mq_ops == &scsi_mq_ops_no_commit || + q->mq_ops == &scsi_mq_ops) sdev = q->queuedata; if (!sdev || !get_device(&sdev->sdev_gendev)) sdev = NULL; -- cgit v1.2.3 From c9c53749375ccce0191f89b86732e642f914bd82 Mon Sep 17 00:00:00 2001 From: Laurence Oberman Date: Wed, 11 Sep 2019 09:56:42 -0400 Subject: scsi: bnx2fc: Handle scope bits when array returns BUSY or TSF The qla2xxx driver had this issue as well when the newer array firmware returned the retry_delay_timer in the fcp_rsp. The bnx2fc is not handling the masking of the scope bits either so the retry_delay_timestamp value lands up being a large value added to the timer timestamp delaying I/O for up to 27 Minutes. This patch adds similar code to handle this to the bnx2fc driver to avoid the huge delay. Link: https://lore.kernel.org/r/1568210202-12794-1-git-send-email-loberman@redhat.com Signed-off-by: Laurence Oberman Reported-by: David Jeffery Reported-by: kbuild test robot Signed-off-by: Martin K. Petersen --- drivers/scsi/bnx2fc/bnx2fc_io.c | 29 ++++++++++++++++++++++++----- 1 file changed, 24 insertions(+), 5 deletions(-) diff --git a/drivers/scsi/bnx2fc/bnx2fc_io.c b/drivers/scsi/bnx2fc/bnx2fc_io.c index da00ca5fa5dc..401743e2b429 100644 --- a/drivers/scsi/bnx2fc/bnx2fc_io.c +++ b/drivers/scsi/bnx2fc/bnx2fc_io.c @@ -1923,6 +1923,7 @@ void bnx2fc_process_scsi_cmd_compl(struct bnx2fc_cmd *io_req, struct fcoe_fcp_rsp_payload *fcp_rsp; struct bnx2fc_rport *tgt = io_req->tgt; struct scsi_cmnd *sc_cmd; + u16 scope = 0, qualifier = 0; /* scsi_cmd_cmpl is called with tgt lock held */ @@ -1990,12 +1991,30 @@ void bnx2fc_process_scsi_cmd_compl(struct bnx2fc_cmd *io_req, if (io_req->cdb_status == SAM_STAT_TASK_SET_FULL || io_req->cdb_status == SAM_STAT_BUSY) { - /* Set the jiffies + retry_delay_timer * 100ms - for the rport/tgt */ - tgt->retry_delay_timestamp = jiffies + - fcp_rsp->retry_delay_timer * HZ / 10; + /* Newer array firmware with BUSY or + * TASK_SET_FULL may return a status that needs + * the scope bits masked. + * Or a huge delay timestamp up to 27 minutes + * can result. + */ + if (fcp_rsp->retry_delay_timer) { + /* Upper 2 bits */ + scope = fcp_rsp->retry_delay_timer + & 0xC000; + /* Lower 14 bits */ + qualifier = fcp_rsp->retry_delay_timer + & 0x3FFF; + } + if (scope > 0 && qualifier > 0 && + qualifier <= 0x3FEF) { + /* Set the jiffies + + * retry_delay_timer * 100ms + * for the rport/tgt + */ + tgt->retry_delay_timestamp = jiffies + + (qualifier * HZ / 10); + } } - } if (io_req->fcp_resid) scsi_set_resid(sc_cmd, io_req->fcp_resid); -- cgit v1.2.3 From f51913eef23f74c3bd07899dc7f1ed6df9e521d8 Mon Sep 17 00:00:00 2001 From: Stanley Chu Date: Wed, 18 Sep 2019 12:20:38 +0800 Subject: scsi: ufs: skip shutdown if hba is not powered In some cases, hba may go through shutdown flow without successful initialization and then make system hang. For example, if ufshcd_change_power_mode() gets error and leads to ufshcd_hba_exit() to release resources of the host, future shutdown flow may hang the system since the host register will be accessed in unpowered state. To solve this issue, simply add checking to skip shutdown for above kind of situation. Link: https://lore.kernel.org/r/1568780438-28753-1-git-send-email-stanley.chu@mediatek.com Signed-off-by: Stanley Chu Acked-by: Bean Huo Signed-off-by: Martin K. Petersen --- drivers/scsi/ufs/ufshcd.c | 3 +++ 1 file changed, 3 insertions(+) diff --git a/drivers/scsi/ufs/ufshcd.c b/drivers/scsi/ufs/ufshcd.c index c4a015e42045..57b2c3a8def3 100644 --- a/drivers/scsi/ufs/ufshcd.c +++ b/drivers/scsi/ufs/ufshcd.c @@ -8140,6 +8140,9 @@ int ufshcd_shutdown(struct ufs_hba *hba) { int ret = 0; + if (!hba->is_powered) + goto out; + if (ufshcd_is_ufs_dev_poweroff(hba) && ufshcd_is_link_off(hba)) goto out; -- cgit v1.2.3 From 4b062e7cf940aa4f38fedc3a964b24cb095373f0 Mon Sep 17 00:00:00 2001 From: Austin Kim Date: Thu, 19 Sep 2019 16:55:48 +0900 Subject: scsi: qedf: Remove always false 'tmp_prio < 0' statement Since tmp_prio is declared as u8, the following statement is always false. tmp_prio < 0 So remove 'always false' statement. Link: https://lore.kernel.org/r/20190919075548.GA112801@LGEARND20B15 Signed-off-by: Austin Kim Acked-by: Saurav Kashyap Signed-off-by: Martin K. Petersen --- drivers/scsi/qedf/qedf_main.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/scsi/qedf/qedf_main.c b/drivers/scsi/qedf/qedf_main.c index 9c24f3834d70..1d2c111bdf82 100644 --- a/drivers/scsi/qedf/qedf_main.c +++ b/drivers/scsi/qedf/qedf_main.c @@ -596,7 +596,7 @@ static void qedf_dcbx_handler(void *dev, struct qed_dcbx_get *get, u32 mib_type) tmp_prio = get->operational.app_prio.fcoe; if (qedf_default_prio > -1) qedf->prio = qedf_default_prio; - else if (tmp_prio < 0 || tmp_prio > 7) { + else if (tmp_prio > 7) { QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_DISC, "FIP/FCoE prio %d out of range, setting to %d.\n", tmp_prio, QEDF_DEFAULT_PRIO); -- cgit v1.2.3 From 0ed8810276907c8a633dc8cecc48dabb6678cd23 Mon Sep 17 00:00:00 2001 From: Long Li Date: Fri, 6 Sep 2019 10:24:20 -0700 Subject: scsi: storvsc: setup 1:1 mapping between hardware queue and CPU queue storvsc doesn't use a dedicated hardware queue for a given CPU queue. When issuing I/O, it selects returning CPU (hardware queue) dynamically based on vmbus channel usage across all channels. This patch advertises num_present_cpus() as number of hardware queues. This will have upper layer setup 1:1 mapping between hardware queue and CPU queue and avoid unnecessary locking when issuing I/O. Link: https://lore.kernel.org/r/1567790660-48142-1-git-send-email-longli@linuxonhyperv.com Signed-off-by: Long Li Reviewed-by: Michael Kelley Signed-off-by: Martin K. Petersen --- drivers/scsi/storvsc_drv.c | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/drivers/scsi/storvsc_drv.c b/drivers/scsi/storvsc_drv.c index ed8b9ac805e6..542d2bac2922 100644 --- a/drivers/scsi/storvsc_drv.c +++ b/drivers/scsi/storvsc_drv.c @@ -1837,8 +1837,7 @@ static int storvsc_probe(struct hv_device *device, /* * Set the number of HW queues we are supporting. */ - if (stor_device->num_sc != 0) - host->nr_hw_queues = stor_device->num_sc + 1; + host->nr_hw_queues = num_present_cpus(); /* * Set the error handler work queue. -- cgit v1.2.3 From 70054aa39a013fa52eff432f2223b8bd5c0048f8 Mon Sep 17 00:00:00 2001 From: Xiang Chen Date: Sat, 7 Sep 2019 09:07:30 +0800 Subject: scsi: megaraid: disable device when probe failed after enabled device For pci device, need to disable device when probe failed after enabled device. Link: https://lore.kernel.org/r/1567818450-173315-1-git-send-email-chenxiang66@hisilicon.com Signed-off-by: Xiang Chen Reviewed-by: John Garry Signed-off-by: Martin K. Petersen --- drivers/scsi/megaraid.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/scsi/megaraid.c b/drivers/scsi/megaraid.c index 45a66048801b..ff6d4aa92421 100644 --- a/drivers/scsi/megaraid.c +++ b/drivers/scsi/megaraid.c @@ -4183,11 +4183,11 @@ megaraid_probe_one(struct pci_dev *pdev, const struct pci_device_id *id) */ if (pdev->subsystem_vendor == PCI_VENDOR_ID_COMPAQ && pdev->subsystem_device == 0xC000) - return -ENODEV; + goto out_disable_device; /* Now check the magic signature byte */ pci_read_config_word(pdev, PCI_CONF_AMISIG, &magic); if (magic != HBA_SIGNATURE_471 && magic != HBA_SIGNATURE) - return -ENODEV; + goto out_disable_device; /* Ok it is probably a megaraid */ } -- cgit v1.2.3 From 4b6b1bb68628ec49e4cbfabd8ac17a169f079cd8 Mon Sep 17 00:00:00 2001 From: YueHaibing Date: Mon, 23 Sep 2019 13:40:35 +0800 Subject: scsi: hisi_sas: Make three functions static Fix sparse warnings: drivers/scsi/hisi_sas/hisi_sas_main.c:3686:6: warning: symbol 'hisi_sas_debugfs_release' was not declared. Should it be static? drivers/scsi/hisi_sas/hisi_sas_main.c:3708:5: warning: symbol 'hisi_sas_debugfs_alloc' was not declared. Should it be static? drivers/scsi/hisi_sas/hisi_sas_main.c:3799:6: warning: symbol 'hisi_sas_debugfs_bist_init' was not declared. Should it be static? Link: https://lore.kernel.org/r/20190923054035.19036-1-yuehaibing@huawei.com Reported-by: Hulk Robot Signed-off-by: YueHaibing Acked-by: John Garry Signed-off-by: Martin K. Petersen --- drivers/scsi/hisi_sas/hisi_sas_main.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/drivers/scsi/hisi_sas/hisi_sas_main.c b/drivers/scsi/hisi_sas/hisi_sas_main.c index d1513fdf1e00..0847e682797b 100644 --- a/drivers/scsi/hisi_sas/hisi_sas_main.c +++ b/drivers/scsi/hisi_sas/hisi_sas_main.c @@ -3683,7 +3683,7 @@ void hisi_sas_debugfs_work_handler(struct work_struct *work) } EXPORT_SYMBOL_GPL(hisi_sas_debugfs_work_handler); -void hisi_sas_debugfs_release(struct hisi_hba *hisi_hba) +static void hisi_sas_debugfs_release(struct hisi_hba *hisi_hba) { struct device *dev = hisi_hba->dev; int i; @@ -3705,7 +3705,7 @@ void hisi_sas_debugfs_release(struct hisi_hba *hisi_hba) devm_kfree(dev, hisi_hba->debugfs_port_reg[i]); } -int hisi_sas_debugfs_alloc(struct hisi_hba *hisi_hba) +static int hisi_sas_debugfs_alloc(struct hisi_hba *hisi_hba) { const struct hisi_sas_hw *hw = hisi_hba->hw; struct device *dev = hisi_hba->dev; @@ -3796,7 +3796,7 @@ fail: return -ENOMEM; } -void hisi_sas_debugfs_bist_init(struct hisi_hba *hisi_hba) +static void hisi_sas_debugfs_bist_init(struct hisi_hba *hisi_hba) { hisi_hba->debugfs_bist_dentry = debugfs_create_dir("bist", hisi_hba->debugfs_dir); -- cgit v1.2.3 From 248a445adfc8c33ffd67cf1f2e336578e34f9e21 Mon Sep 17 00:00:00 2001 From: Himanshu Madhani Date: Thu, 12 Sep 2019 11:09:05 -0700 Subject: scsi: qla2xxx: Silence fwdump template message Print if fwdt template is present or not, only when ql2xextended_error_logging is enabled. Link: https://lore.kernel.org/r/20190912180918.6436-2-hmadhani@marvell.com Signed-off-by: Himanshu Madhani Signed-off-by: Martin K. Petersen --- drivers/scsi/qla2xxx/qla_init.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/scsi/qla2xxx/qla_init.c b/drivers/scsi/qla2xxx/qla_init.c index b6cdf108994c..f6a5d78f93c9 100644 --- a/drivers/scsi/qla2xxx/qla_init.c +++ b/drivers/scsi/qla2xxx/qla_init.c @@ -3190,7 +3190,7 @@ qla2x00_alloc_fw_dump(scsi_qla_host_t *vha) for (j = 0; j < 2; j++, fwdt++) { if (!fwdt->template) { - ql_log(ql_log_warn, vha, 0x00ba, + ql_dbg(ql_dbg_init, vha, 0x00ba, "-> fwdt%u no template\n", j); continue; } -- cgit v1.2.3 From c3b6a1d397420a0fdd97af2f06abfb78adc370df Mon Sep 17 00:00:00 2001 From: Quinn Tran Date: Thu, 12 Sep 2019 11:09:06 -0700 Subject: scsi: qla2xxx: Fix unbound sleep in fcport delete path. There are instances, though rare, where a LOGO request cannot be sent out and the thread in free session done can wait indefinitely. Fix this by putting an upper bound to sleep. Link: https://lore.kernel.org/r/20190912180918.6436-3-hmadhani@marvell.com Signed-off-by: Quinn Tran Signed-off-by: Himanshu Madhani Signed-off-by: Martin K. Petersen --- drivers/scsi/qla2xxx/qla_target.c | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/drivers/scsi/qla2xxx/qla_target.c b/drivers/scsi/qla2xxx/qla_target.c index 0ffda6171614..b58ecd2d7fb6 100644 --- a/drivers/scsi/qla2xxx/qla_target.c +++ b/drivers/scsi/qla2xxx/qla_target.c @@ -1020,6 +1020,7 @@ void qlt_free_session_done(struct work_struct *work) if (logout_started) { bool traced = false; + u16 cnt = 0; while (!READ_ONCE(sess->logout_completed)) { if (!traced) { @@ -1029,6 +1030,9 @@ void qlt_free_session_done(struct work_struct *work) traced = true; } msleep(100); + cnt++; + if (cnt > 200) + break; } ql_dbg(ql_dbg_disc, vha, 0xf087, -- cgit v1.2.3 From fd5564ba54e0d8a9e3e823d311b764232e09eb5f Mon Sep 17 00:00:00 2001 From: Quinn Tran Date: Thu, 12 Sep 2019 11:09:07 -0700 Subject: scsi: qla2xxx: Fix stale mem access on driver unload On driver unload, 'remove_one' thread was allowed to advance, while session cleanup still lag behind. This patch ensures session deletion will finish before remove_one can advance. Link: https://lore.kernel.org/r/20190912180918.6436-4-hmadhani@marvell.com Signed-off-by: Quinn Tran Signed-off-by: Himanshu Madhani Signed-off-by: Martin K. Petersen --- drivers/scsi/qla2xxx/qla_os.c | 1 + drivers/scsi/qla2xxx/qla_target.c | 21 ++++++++------------- 2 files changed, 9 insertions(+), 13 deletions(-) diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c index 51154c049385..42980e52fb41 100644 --- a/drivers/scsi/qla2xxx/qla_os.c +++ b/drivers/scsi/qla2xxx/qla_os.c @@ -1118,6 +1118,7 @@ qla2x00_wait_for_sess_deletion(scsi_qla_host_t *vha) qla2x00_mark_all_devices_lost(vha, 0); wait_event_timeout(vha->fcport_waitQ, test_fcport_count(vha), 10*HZ); + flush_workqueue(vha->hw->wq); } /* diff --git a/drivers/scsi/qla2xxx/qla_target.c b/drivers/scsi/qla2xxx/qla_target.c index b58ecd2d7fb6..b5315be00b4d 100644 --- a/drivers/scsi/qla2xxx/qla_target.c +++ b/drivers/scsi/qla2xxx/qla_target.c @@ -953,7 +953,7 @@ void qlt_free_session_done(struct work_struct *work) struct qla_hw_data *ha = vha->hw; unsigned long flags; bool logout_started = false; - scsi_qla_host_t *base_vha; + scsi_qla_host_t *base_vha = pci_get_drvdata(ha->pdev); struct qlt_plogi_ack_t *own = sess->plogi_link[QLT_PLOGI_LINK_SAME_WWN]; @@ -1105,6 +1105,7 @@ void qlt_free_session_done(struct work_struct *work) } spin_unlock_irqrestore(&ha->tgt.sess_lock, flags); + sess->free_pending = 0; ql_dbg(ql_dbg_tgt_mgt, vha, 0xf001, "Unregistration of sess %p %8phC finished fcp_cnt %d\n", @@ -1113,17 +1114,8 @@ void qlt_free_session_done(struct work_struct *work) if (tgt && (tgt->sess_count == 0)) wake_up_all(&tgt->waitQ); - if (vha->fcport_count == 0) - wake_up_all(&vha->fcport_waitQ); - - base_vha = pci_get_drvdata(ha->pdev); - - sess->free_pending = 0; - - if (test_bit(PFLG_DRIVER_REMOVING, &base_vha->pci_flags)) - return; - - if ((!tgt || !tgt->tgt_stop) && !LOOP_TRANSITION(vha)) { + if (!test_bit(PFLG_DRIVER_REMOVING, &base_vha->pci_flags) && + (!tgt || !tgt->tgt_stop) && !LOOP_TRANSITION(vha)) { switch (vha->host->active_mode) { case MODE_INITIATOR: case MODE_DUAL: @@ -1136,6 +1128,9 @@ void qlt_free_session_done(struct work_struct *work) break; } } + + if (vha->fcport_count == 0) + wake_up_all(&vha->fcport_waitQ); } /* ha->tgt.sess_lock supposed to be held on entry */ @@ -1165,7 +1160,7 @@ void qlt_unreg_sess(struct fc_port *sess) sess->last_login_gen = sess->login_gen; INIT_WORK(&sess->free_work, qlt_free_session_done); - schedule_work(&sess->free_work); + queue_work(sess->vha->hw->wq, &sess->free_work); } EXPORT_SYMBOL(qlt_unreg_sess); -- cgit v1.2.3 From f5187b7d1ac66b61676f896751d3af9fcf8dd592 Mon Sep 17 00:00:00 2001 From: Quinn Tran Date: Thu, 12 Sep 2019 11:09:08 -0700 Subject: scsi: qla2xxx: Optimize NPIV tear down process In the case of NPIV port is being torn down, this patch will set a flag to indicate VPORT_DELETE. This would prevent relogin to be triggered. Link: https://lore.kernel.org/r/20190912180918.6436-5-hmadhani@marvell.com Signed-off-by: Quinn Tran Signed-off-by: Himanshu Madhani Signed-off-by: Martin K. Petersen --- drivers/scsi/qla2xxx/qla_attr.c | 2 ++ drivers/scsi/qla2xxx/qla_def.h | 1 + drivers/scsi/qla2xxx/qla_gs.c | 3 ++- drivers/scsi/qla2xxx/qla_mid.c | 32 ++++++++++++++++++++++---------- drivers/scsi/qla2xxx/qla_os.c | 7 ++++++- drivers/scsi/qla2xxx/qla_target.c | 1 + 6 files changed, 34 insertions(+), 12 deletions(-) diff --git a/drivers/scsi/qla2xxx/qla_attr.c b/drivers/scsi/qla2xxx/qla_attr.c index e9c449ef515c..8b3015361428 100644 --- a/drivers/scsi/qla2xxx/qla_attr.c +++ b/drivers/scsi/qla2xxx/qla_attr.c @@ -2920,6 +2920,8 @@ qla24xx_vport_delete(struct fc_vport *fc_vport) struct qla_hw_data *ha = vha->hw; uint16_t id = vha->vp_idx; + set_bit(VPORT_DELETE, &vha->dpc_flags); + while (test_bit(LOOP_RESYNC_ACTIVE, &vha->dpc_flags) || test_bit(FCPORT_UPDATE_NEEDED, &vha->dpc_flags)) msleep(1000); diff --git a/drivers/scsi/qla2xxx/qla_def.h b/drivers/scsi/qla2xxx/qla_def.h index 873a6aef1c5c..fa038568beb6 100644 --- a/drivers/scsi/qla2xxx/qla_def.h +++ b/drivers/scsi/qla2xxx/qla_def.h @@ -4394,6 +4394,7 @@ typedef struct scsi_qla_host { #define IOCB_WORK_ACTIVE 31 #define SET_ZIO_THRESHOLD_NEEDED 32 #define ISP_ABORT_TO_ROM 33 +#define VPORT_DELETE 34 unsigned long pci_flags; #define PFLG_DISCONNECTED 0 /* PCI device removed */ diff --git a/drivers/scsi/qla2xxx/qla_gs.c b/drivers/scsi/qla2xxx/qla_gs.c index dc0e36676313..5298ed10059f 100644 --- a/drivers/scsi/qla2xxx/qla_gs.c +++ b/drivers/scsi/qla2xxx/qla_gs.c @@ -3102,7 +3102,8 @@ int qla24xx_post_gpnid_work(struct scsi_qla_host *vha, port_id_t *id) { struct qla_work_evt *e; - if (test_bit(UNLOADING, &vha->dpc_flags)) + if (test_bit(UNLOADING, &vha->dpc_flags) || + (vha->vp_idx && test_bit(VPORT_DELETE, &vha->dpc_flags))) return 0; e = qla2x00_alloc_work(vha, QLA_EVT_GPNID); diff --git a/drivers/scsi/qla2xxx/qla_mid.c b/drivers/scsi/qla2xxx/qla_mid.c index 1a9a11ae7285..6afad68e5ba2 100644 --- a/drivers/scsi/qla2xxx/qla_mid.c +++ b/drivers/scsi/qla2xxx/qla_mid.c @@ -66,6 +66,7 @@ qla24xx_deallocate_vp_id(scsi_qla_host_t *vha) uint16_t vp_id; struct qla_hw_data *ha = vha->hw; unsigned long flags = 0; + u8 i; mutex_lock(&ha->vport_lock); /* @@ -75,8 +76,9 @@ qla24xx_deallocate_vp_id(scsi_qla_host_t *vha) * ensures no active vp_list traversal while the vport is removed * from the queue) */ - wait_event_timeout(vha->vref_waitq, !atomic_read(&vha->vref_count), - 10*HZ); + for (i = 0; i < 10 && atomic_read(&vha->vref_count); i++) + wait_event_timeout(vha->vref_waitq, + atomic_read(&vha->vref_count), HZ); spin_lock_irqsave(&ha->vport_slock, flags); if (atomic_read(&vha->vref_count)) { @@ -262,6 +264,9 @@ qla2x00_alert_all_vps(struct rsp_que *rsp, uint16_t *mb) spin_lock_irqsave(&ha->vport_slock, flags); list_for_each_entry(vha, &ha->vp_list, list) { if (vha->vp_idx) { + if (test_bit(VPORT_DELETE, &vha->dpc_flags)) + continue; + atomic_inc(&vha->vref_count); spin_unlock_irqrestore(&ha->vport_slock, flags); @@ -300,6 +305,20 @@ qla2x00_alert_all_vps(struct rsp_que *rsp, uint16_t *mb) int qla2x00_vp_abort_isp(scsi_qla_host_t *vha) { + fc_port_t *fcport; + + /* + * To exclusively reset vport, we need to log it out first. + * Note: This control_vp can fail if ISP reset is already + * issued, this is expected, as the vp would be already + * logged out due to ISP reset. + */ + if (!test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags)) { + qla24xx_control_vp(vha, VCE_COMMAND_DISABLE_VPS_LOGO_ALL); + list_for_each_entry(fcport, &vha->vp_fcports, list) + fcport->logout_on_delete = 0; + } + /* * Physical port will do most of the abort and recovery work. We can * just treat it as a loop down @@ -312,16 +331,9 @@ qla2x00_vp_abort_isp(scsi_qla_host_t *vha) atomic_set(&vha->loop_down_timer, LOOP_DOWN_TIME); } - /* - * To exclusively reset vport, we need to log it out first. Note: this - * control_vp can fail if ISP reset is already issued, this is - * expected, as the vp would be already logged out due to ISP reset. - */ - if (!test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags)) - qla24xx_control_vp(vha, VCE_COMMAND_DISABLE_VPS_LOGO_ALL); - ql_dbg(ql_dbg_taskm, vha, 0x801d, "Scheduling enable of Vport %d.\n", vha->vp_idx); + return qla24xx_enable_vp(vha); } diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c index 42980e52fb41..5eda86ff6606 100644 --- a/drivers/scsi/qla2xxx/qla_os.c +++ b/drivers/scsi/qla2xxx/qla_os.c @@ -1115,9 +1115,14 @@ static inline int test_fcport_count(scsi_qla_host_t *vha) void qla2x00_wait_for_sess_deletion(scsi_qla_host_t *vha) { + u8 i; + qla2x00_mark_all_devices_lost(vha, 0); - wait_event_timeout(vha->fcport_waitQ, test_fcport_count(vha), 10*HZ); + for (i = 0; i < 10; i++) + wait_event_timeout(vha->fcport_waitQ, test_fcport_count(vha), + HZ); + flush_workqueue(vha->hw->wq); } diff --git a/drivers/scsi/qla2xxx/qla_target.c b/drivers/scsi/qla2xxx/qla_target.c index b5315be00b4d..a06e56224a55 100644 --- a/drivers/scsi/qla2xxx/qla_target.c +++ b/drivers/scsi/qla2xxx/qla_target.c @@ -1115,6 +1115,7 @@ void qlt_free_session_done(struct work_struct *work) wake_up_all(&tgt->waitQ); if (!test_bit(PFLG_DRIVER_REMOVING, &base_vha->pci_flags) && + !(vha->vp_idx && test_bit(VPORT_DELETE, &vha->dpc_flags)) && (!tgt || !tgt->tgt_stop) && !LOOP_TRANSITION(vha)) { switch (vha->host->active_mode) { case MODE_INITIATOR: -- cgit v1.2.3 From 7f2a398d59d658818f3d219645164676fbbc88e8 Mon Sep 17 00:00:00 2001 From: Quinn Tran Date: Thu, 12 Sep 2019 11:09:09 -0700 Subject: scsi: qla2xxx: Fix N2N link reset Fix stalled link recovery for N2N with FC-NVMe connection. Link: https://lore.kernel.org/r/20190912180918.6436-6-hmadhani@marvell.com Signed-off-by: Quinn Tran Signed-off-by: Himanshu Madhani Signed-off-by: Martin K. Petersen --- drivers/scsi/qla2xxx/qla_def.h | 3 +- drivers/scsi/qla2xxx/qla_init.c | 107 +++++++++++++++++++++++++++++----------- drivers/scsi/qla2xxx/qla_mbx.c | 23 +++++++-- drivers/scsi/qla2xxx/qla_os.c | 4 ++ 4 files changed, 103 insertions(+), 34 deletions(-) diff --git a/drivers/scsi/qla2xxx/qla_def.h b/drivers/scsi/qla2xxx/qla_def.h index fa038568beb6..6ffa9877c28b 100644 --- a/drivers/scsi/qla2xxx/qla_def.h +++ b/drivers/scsi/qla2xxx/qla_def.h @@ -2396,6 +2396,7 @@ typedef struct fc_port { unsigned int query:1; unsigned int id_changed:1; unsigned int scan_needed:1; + unsigned int n2n_flag:1; struct completion nvme_del_done; uint32_t nvme_prli_service_param; @@ -2446,7 +2447,6 @@ typedef struct fc_port { uint8_t fc4_type; uint8_t fc4f_nvme; uint8_t scan_state; - uint8_t n2n_flag; unsigned long last_queue_full; unsigned long last_ramp_up; @@ -3036,6 +3036,7 @@ enum scan_flags_t { enum fc4type_t { FS_FC4TYPE_FCP = BIT_0, FS_FC4TYPE_NVME = BIT_1, + FS_FCP_IS_N2N = BIT_7, }; struct fab_scan_rp { diff --git a/drivers/scsi/qla2xxx/qla_init.c b/drivers/scsi/qla2xxx/qla_init.c index f6a5d78f93c9..67f522a8a9d4 100644 --- a/drivers/scsi/qla2xxx/qla_init.c +++ b/drivers/scsi/qla2xxx/qla_init.c @@ -746,12 +746,15 @@ static void qla24xx_handle_gnl_done_event(scsi_qla_host_t *vha, break; default: if ((id.b24 != fcport->d_id.b24 && - fcport->d_id.b24) || + fcport->d_id.b24 && + fcport->loop_id != FC_NO_LOOP_ID) || (fcport->loop_id != FC_NO_LOOP_ID && fcport->loop_id != loop_id)) { ql_dbg(ql_dbg_disc, vha, 0x20e3, "%s %d %8phC post del sess\n", __func__, __LINE__, fcport->port_name); + if (fcport->n2n_flag) + fcport->d_id.b24 = 0; qlt_schedule_sess_for_deletion(fcport); return; } @@ -759,6 +762,8 @@ static void qla24xx_handle_gnl_done_event(scsi_qla_host_t *vha, } fcport->loop_id = loop_id; + if (fcport->n2n_flag) + fcport->d_id.b24 = id.b24; wwn = wwn_to_u64(fcport->port_name); qlt_find_sess_invalidate_other(vha, wwn, @@ -972,7 +977,7 @@ static void qla24xx_async_gnl_sp_done(srb_t *sp, int res) wwn = wwn_to_u64(e->port_name); ql_dbg(ql_dbg_disc + ql_dbg_verbose, vha, 0x20e8, - "%s %8phC %02x:%02x:%02x state %d/%d lid %x \n", + "%s %8phC %02x:%02x:%02x CLS %x/%x lid %x \n", __func__, (void *)&wwn, e->port_id[2], e->port_id[1], e->port_id[0], e->current_login_state, e->last_login_state, (loop_id & 0x7fff)); @@ -1499,7 +1504,8 @@ int qla24xx_fcport_handle_login(struct scsi_qla_host *vha, fc_port_t *fcport) (fcport->fw_login_state == DSC_LS_PRLI_PEND))) return 0; - if (fcport->fw_login_state == DSC_LS_PLOGI_COMP) { + if (fcport->fw_login_state == DSC_LS_PLOGI_COMP && + !N2N_TOPO(vha->hw)) { if (time_before_eq(jiffies, fcport->plogi_nack_done_deadline)) { set_bit(RELOGIN_NEEDED, &vha->dpc_flags); return 0; @@ -1570,8 +1576,9 @@ int qla24xx_fcport_handle_login(struct scsi_qla_host *vha, fc_port_t *fcport) qla24xx_post_gpdb_work(vha, fcport, 0); } else { ql_dbg(ql_dbg_disc, vha, 0x2118, - "%s %d %8phC post NVMe PRLI\n", - __func__, __LINE__, fcport->port_name); + "%s %d %8phC post %s PRLI\n", + __func__, __LINE__, fcport->port_name, + fcport->fc4f_nvme ? "NVME" : "FC"); qla24xx_post_prli_work(vha, fcport); } break; @@ -1853,17 +1860,38 @@ qla24xx_handle_prli_done_event(struct scsi_qla_host *vha, struct event_arg *ea) break; } - if (ea->fcport->n2n_flag) { + if (ea->fcport->fc4f_nvme) { ql_dbg(ql_dbg_disc, vha, 0x2118, "%s %d %8phC post fc4 prli\n", __func__, __LINE__, ea->fcport->port_name); ea->fcport->fc4f_nvme = 0; - ea->fcport->n2n_flag = 0; qla24xx_post_prli_work(vha, ea->fcport); + return; + } + + /* at this point both PRLI NVME & PRLI FCP failed */ + if (N2N_TOPO(vha->hw)) { + if (ea->fcport->n2n_link_reset_cnt < 3) { + ea->fcport->n2n_link_reset_cnt++; + /* + * remote port is not sending Plogi. Reset + * link to kick start his state machine + */ + set_bit(N2N_LINK_RESET, &vha->dpc_flags); + } else { + ql_log(ql_log_warn, vha, 0x2119, + "%s %d %8phC Unable to reconnect\n", + __func__, __LINE__, ea->fcport->port_name); + } + } else { + /* + * switch connect. login failed. Take connection + * down and allow relogin to retrigger + */ + ea->fcport->flags &= ~FCF_ASYNC_SENT; + ea->fcport->keep_nport_handle = 0; + qlt_schedule_sess_for_deletion(ea->fcport); } - ql_dbg(ql_dbg_disc, vha, 0x2119, - "%s %d %8phC unhandle event of %x\n", - __func__, __LINE__, ea->fcport->port_name, ea->data[0]); break; } } @@ -5000,28 +5028,47 @@ qla2x00_configure_local_loop(scsi_qla_host_t *vha) unsigned long flags; /* Inititae N2N login. */ - if (test_and_clear_bit(N2N_LOGIN_NEEDED, &vha->dpc_flags)) { - /* borrowing */ - u32 *bp, i, sz; - - memset(ha->init_cb, 0, ha->init_cb_size); - sz = min_t(int, sizeof(struct els_plogi_payload), - ha->init_cb_size); - rval = qla24xx_get_port_login_templ(vha, ha->init_cb_dma, - (void *)ha->init_cb, sz); - if (rval == QLA_SUCCESS) { - bp = (uint32_t *)ha->init_cb; - for (i = 0; i < sz/4 ; i++, bp++) - *bp = cpu_to_be32(*bp); + if (N2N_TOPO(ha)) { + if (test_and_clear_bit(N2N_LOGIN_NEEDED, &vha->dpc_flags)) { + /* borrowing */ + u32 *bp, i, sz; + + memset(ha->init_cb, 0, ha->init_cb_size); + sz = min_t(int, sizeof(struct els_plogi_payload), + ha->init_cb_size); + rval = qla24xx_get_port_login_templ(vha, + ha->init_cb_dma, (void *)ha->init_cb, sz); + if (rval == QLA_SUCCESS) { + bp = (uint32_t *)ha->init_cb; + for (i = 0; i < sz/4 ; i++, bp++) + *bp = cpu_to_be32(*bp); - memcpy(&ha->plogi_els_payld.data, (void *)ha->init_cb, - sizeof(ha->plogi_els_payld.data)); - set_bit(RELOGIN_NEEDED, &vha->dpc_flags); - } else { - ql_dbg(ql_dbg_init, vha, 0x00d1, - "PLOGI ELS param read fail.\n"); + memcpy(&ha->plogi_els_payld.data, + (void *)ha->init_cb, + sizeof(ha->plogi_els_payld.data)); + set_bit(RELOGIN_NEEDED, &vha->dpc_flags); + } else { + ql_dbg(ql_dbg_init, vha, 0x00d1, + "PLOGI ELS param read fail.\n"); + goto skip_login; + } + } + + list_for_each_entry(fcport, &vha->vp_fcports, list) { + if (fcport->n2n_flag) { + qla24xx_fcport_handle_login(vha, fcport); + return QLA_SUCCESS; + } + } +skip_login: + spin_lock_irqsave(&vha->work_lock, flags); + vha->scan.scan_retry++; + spin_unlock_irqrestore(&vha->work_lock, flags); + + if (vha->scan.scan_retry < MAX_SCAN_RETRIES) { + set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags); + set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags); } - return QLA_SUCCESS; } found_devs = 0; diff --git a/drivers/scsi/qla2xxx/qla_mbx.c b/drivers/scsi/qla2xxx/qla_mbx.c index 4c858e2d0ea8..6d6e10812b42 100644 --- a/drivers/scsi/qla2xxx/qla_mbx.c +++ b/drivers/scsi/qla2xxx/qla_mbx.c @@ -2249,7 +2249,7 @@ qla2x00_lip_reset(scsi_qla_host_t *vha) mbx_cmd_t mc; mbx_cmd_t *mcp = &mc; - ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x105a, + ql_dbg(ql_dbg_disc, vha, 0x105a, "Entered %s.\n", __func__); if (IS_CNA_CAPABLE(vha->hw)) { @@ -3883,14 +3883,23 @@ qla24xx_report_id_acquisition(scsi_qla_host_t *vha, case TOPO_N2N: ha->current_topology = ISP_CFG_N; spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags); + list_for_each_entry(fcport, &vha->vp_fcports, list) { + fcport->scan_state = QLA_FCPORT_SCAN; + fcport->n2n_flag = 0; + } + fcport = qla2x00_find_fcport_by_wwpn(vha, rptid_entry->u.f1.port_name, 1); spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags); if (fcport) { fcport->plogi_nack_done_deadline = jiffies + HZ; - fcport->dm_login_expire = jiffies + 3*HZ; + fcport->dm_login_expire = jiffies + 2*HZ; fcport->scan_state = QLA_FCPORT_FOUND; + fcport->n2n_flag = 1; + if (vha->flags.nvme_enabled) + fcport->fc4f_nvme = 1; + switch (fcport->disc_state) { case DSC_DELETED: set_bit(RELOGIN_NEEDED, @@ -3924,7 +3933,7 @@ qla24xx_report_id_acquisition(scsi_qla_host_t *vha, rptid_entry->u.f1.port_name, rptid_entry->u.f1.node_name, NULL, - FC4_TYPE_UNKNOWN); + FS_FCP_IS_N2N); } /* if our portname is higher then initiate N2N login */ @@ -4023,6 +4032,7 @@ qla24xx_report_id_acquisition(scsi_qla_host_t *vha, list_for_each_entry(fcport, &vha->vp_fcports, list) { fcport->scan_state = QLA_FCPORT_SCAN; + fcport->n2n_flag = 0; } fcport = qla2x00_find_fcport_by_wwpn(vha, @@ -4032,6 +4042,13 @@ qla24xx_report_id_acquisition(scsi_qla_host_t *vha, fcport->login_retry = vha->hw->login_retry_count; fcport->plogi_nack_done_deadline = jiffies + HZ; fcport->scan_state = QLA_FCPORT_FOUND; + fcport->n2n_flag = 1; + fcport->d_id.b.domain = + rptid_entry->u.f2.remote_nport_id[2]; + fcport->d_id.b.area = + rptid_entry->u.f2.remote_nport_id[1]; + fcport->d_id.b.al_pa = + rptid_entry->u.f2.remote_nport_id[0]; } } } diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c index 5eda86ff6606..8411dd5acd43 100644 --- a/drivers/scsi/qla2xxx/qla_os.c +++ b/drivers/scsi/qla2xxx/qla_os.c @@ -5033,6 +5033,10 @@ void qla24xx_create_new_sess(struct scsi_qla_host *vha, struct qla_work_evt *e) memcpy(fcport->port_name, e->u.new_sess.port_name, WWN_SIZE); + + if (e->u.new_sess.fc4_type & FS_FCP_IS_N2N) + fcport->n2n_flag = 1; + } else { ql_dbg(ql_dbg_disc, vha, 0xffff, "%s %8phC mem alloc fail.\n", -- cgit v1.2.3 From f3f1938bb673b1b5ad182c4608f5f8a24921eea3 Mon Sep 17 00:00:00 2001 From: Quinn Tran Date: Thu, 12 Sep 2019 11:09:10 -0700 Subject: scsi: qla2xxx: Fix N2N link up fail During link up/bounce, qla driver would do command flush as part of cleanup. In this case, the flush can intefere with FW state. This patch allows FW to be in control of link up. Link: https://lore.kernel.org/r/20190912180918.6436-7-hmadhani@marvell.com Signed-off-by: Quinn Tran Signed-off-by: Himanshu Madhani Signed-off-by: Martin K. Petersen --- drivers/scsi/qla2xxx/qla_mbx.c | 2 ++ drivers/scsi/qla2xxx/qla_os.c | 6 ++---- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/drivers/scsi/qla2xxx/qla_mbx.c b/drivers/scsi/qla2xxx/qla_mbx.c index 6d6e10812b42..1cc6913f76c4 100644 --- a/drivers/scsi/qla2xxx/qla_mbx.c +++ b/drivers/scsi/qla2xxx/qla_mbx.c @@ -3897,6 +3897,7 @@ qla24xx_report_id_acquisition(scsi_qla_host_t *vha, fcport->dm_login_expire = jiffies + 2*HZ; fcport->scan_state = QLA_FCPORT_FOUND; fcport->n2n_flag = 1; + fcport->keep_nport_handle = 1; if (vha->flags.nvme_enabled) fcport->fc4f_nvme = 1; @@ -4042,6 +4043,7 @@ qla24xx_report_id_acquisition(scsi_qla_host_t *vha, fcport->login_retry = vha->hw->login_retry_count; fcport->plogi_nack_done_deadline = jiffies + HZ; fcport->scan_state = QLA_FCPORT_FOUND; + fcport->keep_nport_handle = 1; fcport->n2n_flag = 1; fcport->d_id.b.domain = rptid_entry->u.f2.remote_nport_id[2]; diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c index 8411dd5acd43..ee47de9fbc05 100644 --- a/drivers/scsi/qla2xxx/qla_os.c +++ b/drivers/scsi/qla2xxx/qla_os.c @@ -5135,11 +5135,9 @@ void qla24xx_create_new_sess(struct scsi_qla_host *vha, struct qla_work_evt *e) if (dfcp) qlt_schedule_sess_for_deletion(tfcp); - - if (N2N_TOPO(vha->hw)) - fcport->flags &= ~FCF_FABRIC_DEVICE; - if (N2N_TOPO(vha->hw)) { + fcport->flags &= ~FCF_FABRIC_DEVICE; + fcport->keep_nport_handle = 1; if (vha->flags.nvme_enabled) { fcport->fc4f_nvme = 1; fcport->n2n_flag = 1; -- cgit v1.2.3 From 0aabb6b699f72dca96988d3f428e222f932dc889 Mon Sep 17 00:00:00 2001 From: Quinn Tran Date: Thu, 12 Sep 2019 11:09:11 -0700 Subject: scsi: qla2xxx: Fix Nport ID display value For N2N, the NPort ID is assigned by driver in the PLOGI ELS. According to FW Spec the byte order for SID is not the same as DID. Link: https://lore.kernel.org/r/20190912180918.6436-8-hmadhani@marvell.com Reviewed-by: Roman Bolshakov Tested-by: Roman Bolshakov Signed-off-by: Quinn Tran Signed-off-by: Himanshu Madhani Signed-off-by: Martin K. Petersen --- drivers/scsi/qla2xxx/qla_iocb.c | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/drivers/scsi/qla2xxx/qla_iocb.c b/drivers/scsi/qla2xxx/qla_iocb.c index e92e52aa6e9b..518eb954cf42 100644 --- a/drivers/scsi/qla2xxx/qla_iocb.c +++ b/drivers/scsi/qla2xxx/qla_iocb.c @@ -2656,9 +2656,10 @@ qla24xx_els_logo_iocb(srb_t *sp, struct els_entry_24xx *els_iocb) els_iocb->port_id[0] = sp->fcport->d_id.b.al_pa; els_iocb->port_id[1] = sp->fcport->d_id.b.area; els_iocb->port_id[2] = sp->fcport->d_id.b.domain; - els_iocb->s_id[0] = vha->d_id.b.al_pa; - els_iocb->s_id[1] = vha->d_id.b.area; - els_iocb->s_id[2] = vha->d_id.b.domain; + /* For SID the byte order is different than DID */ + els_iocb->s_id[1] = vha->d_id.b.al_pa; + els_iocb->s_id[2] = vha->d_id.b.area; + els_iocb->s_id[0] = vha->d_id.b.domain; if (elsio->u.els_logo.els_cmd == ELS_DCMD_PLOGI) { els_iocb->control_flags = 0; -- cgit v1.2.3 From 5028851cdfdf78dc22eacbc44a0ab0b3f599ee4a Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Fri, 20 Sep 2019 13:18:21 +0100 Subject: drm/i915: Mark contents as dirty on a write fault MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Since dropping the set-to-gtt-domain in commit a679f58d0510 ("drm/i915: Flush pages on acquisition"), we no longer mark the contents as dirty on a write fault. This has the issue of us then not marking the pages as dirty on releasing the buffer, which means the contents are not written out to the swap device (should we ever pick that buffer as a victim). Notably, this is visible in the dumb buffer interface used for cursors. Having updated the cursor contents via mmap, and swapped away, if the shrinker should evict the old cursor, upon next reuse, the cursor would be invisible. E.g. echo 80 > /proc/sys/kernel/sysrq ; echo f > /proc/sysrq-trigger Bugzilla: https://bugs.freedesktop.org/show_bug.cgi?id=111541 Fixes: a679f58d0510 ("drm/i915: Flush pages on acquisition") Signed-off-by: Chris Wilson Cc: Matthew Auld Cc: Ville Syrjälä Cc: # v5.2+ Reviewed-by: Matthew Auld Link: https://patchwork.freedesktop.org/patch/msgid/20190920121821.7223-1-chris@chris-wilson.co.uk --- drivers/gpu/drm/i915/gem/i915_gem_mman.c | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/drivers/gpu/drm/i915/gem/i915_gem_mman.c b/drivers/gpu/drm/i915/gem/i915_gem_mman.c index 1748e63156a2..860b751c51f1 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_mman.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_mman.c @@ -319,7 +319,11 @@ vm_fault_t i915_gem_fault(struct vm_fault *vmf) intel_wakeref_auto(&i915->ggtt.userfault_wakeref, msecs_to_jiffies_timeout(CONFIG_DRM_I915_USERFAULT_AUTOSUSPEND)); - i915_vma_set_ggtt_write(vma); + if (write) { + GEM_BUG_ON(!i915_gem_object_has_pinned_pages(obj)); + i915_vma_set_ggtt_write(vma); + obj->mm.dirty = true; + } err_fence: i915_vma_unpin_fence(vma); -- cgit v1.2.3 From 1b74d46782d0615dace4729c22d1862a0355b124 Mon Sep 17 00:00:00 2001 From: Stanislav Lisovskiy Date: Fri, 20 Sep 2019 11:37:54 +0300 Subject: drm/i915: Add TigerLake bandwidth checking MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Added bandwidth calculation algorithm and checks, similar way as it was done for ICL, some constants were corrected according to BSpec 53998. v2: Start using same icl_get_bw_info function to avoid code duplication. Moved mpagesize to memory info related structure as it is now dependent on memory type. Fixed qi.t_bl field assignment. v3: Removed mpagesize as unused. Duplicate code and redundant blankline fixed. v4: Changed ordering of IS_GEN checks as agreed. Minor commit message fixes. Bugzilla: https://bugs.freedesktop.org/show_bug.cgi?id=111600 Reviewed-by: James Ausmus Signed-off-by: Stanislav Lisovskiy Signed-off-by: Ville Syrjälä Link: https://patchwork.freedesktop.org/patch/msgid/20190920083754.5920-1-stanislav.lisovskiy@intel.com --- drivers/gpu/drm/i915/display/intel_bw.c | 26 ++++++++++++++++++-------- 1 file changed, 18 insertions(+), 8 deletions(-) diff --git a/drivers/gpu/drm/i915/display/intel_bw.c b/drivers/gpu/drm/i915/display/intel_bw.c index 688858ebe4d0..cd58e47ab7b2 100644 --- a/drivers/gpu/drm/i915/display/intel_bw.c +++ b/drivers/gpu/drm/i915/display/intel_bw.c @@ -56,7 +56,10 @@ static int icl_pcode_read_mem_global_info(struct drm_i915_private *dev_priv, qi->num_channels = (val & 0xf0) >> 4; qi->num_points = (val & 0xf00) >> 8; - qi->t_bl = qi->dram_type == INTEL_DRAM_DDR4 ? 4 : 8; + if (IS_GEN(dev_priv, 12)) + qi->t_bl = qi->dram_type == INTEL_DRAM_DDR4 ? 4 : 16; + else if (IS_GEN(dev_priv, 11)) + qi->t_bl = qi->dram_type == INTEL_DRAM_DDR4 ? 4 : 8; return 0; } @@ -132,20 +135,25 @@ static int icl_sagv_max_dclk(const struct intel_qgv_info *qi) } struct intel_sa_info { - u8 deburst, mpagesize, deprogbwlimit, displayrtids; + u16 displayrtids; + u8 deburst, deprogbwlimit; }; static const struct intel_sa_info icl_sa_info = { .deburst = 8, - .mpagesize = 16, .deprogbwlimit = 25, /* GB/s */ .displayrtids = 128, }; -static int icl_get_bw_info(struct drm_i915_private *dev_priv) +static const struct intel_sa_info tgl_sa_info = { + .deburst = 16, + .deprogbwlimit = 34, /* GB/s */ + .displayrtids = 256, +}; + +static int icl_get_bw_info(struct drm_i915_private *dev_priv, const struct intel_sa_info *sa) { struct intel_qgv_info qi = {}; - const struct intel_sa_info *sa = &icl_sa_info; bool is_y_tile = true; /* assume y tile may be used */ int num_channels; int deinterleave; @@ -233,14 +241,16 @@ static unsigned int icl_max_bw(struct drm_i915_private *dev_priv, void intel_bw_init_hw(struct drm_i915_private *dev_priv) { - if (IS_GEN(dev_priv, 11)) - icl_get_bw_info(dev_priv); + if (IS_GEN(dev_priv, 12)) + icl_get_bw_info(dev_priv, &tgl_sa_info); + else if (IS_GEN(dev_priv, 11)) + icl_get_bw_info(dev_priv, &icl_sa_info); } static unsigned int intel_max_data_rate(struct drm_i915_private *dev_priv, int num_planes) { - if (IS_GEN(dev_priv, 11)) + if (INTEL_GEN(dev_priv) >= 11) /* * FIXME with SAGV disabled maybe we can assume * point 1 will always be used? Seems to match -- cgit v1.2.3 From 4ef5d76b453908f21341e661a9b6f96862f6f589 Mon Sep 17 00:00:00 2001 From: Tony Lindgren Date: Tue, 24 Sep 2019 09:24:28 -0700 Subject: ARM: dts: Fix gpio0 flags for am335x-icev2 The ti,no-idle-on-init and ti,no-reset-on-init flags need to be at the interconnect target module level for the modules that have it defined. Otherwise we get the following warnings: dts flag should be at module level for ti,no-idle-on-init dts flag should be at module level for ti,no-reset-on-init Fixes: 87fc89ced3a7 ("ARM: dts: am335x: Move l4 child devices to probe them with ti-sysc") Cc: Lokesh Vutla Reported-by: Suman Anna Reviewed-by: Lokesh Vutla Signed-off-by: Tony Lindgren --- arch/arm/boot/dts/am335x-icev2.dts | 2 +- arch/arm/boot/dts/am33xx-l4.dtsi | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/arch/arm/boot/dts/am335x-icev2.dts b/arch/arm/boot/dts/am335x-icev2.dts index 18f70b35da4c..204bccfcc110 100644 --- a/arch/arm/boot/dts/am335x-icev2.dts +++ b/arch/arm/boot/dts/am335x-icev2.dts @@ -432,7 +432,7 @@ pinctrl-0 = <&mmc0_pins_default>; }; -&gpio0 { +&gpio0_target { /* Do not idle the GPIO used for holding the VTT regulator */ ti,no-reset-on-init; ti,no-idle-on-init; diff --git a/arch/arm/boot/dts/am33xx-l4.dtsi b/arch/arm/boot/dts/am33xx-l4.dtsi index 46849d6ecb3e..1515f4f91499 100644 --- a/arch/arm/boot/dts/am33xx-l4.dtsi +++ b/arch/arm/boot/dts/am33xx-l4.dtsi @@ -127,7 +127,7 @@ ranges = <0x0 0x5000 0x1000>; }; - target-module@7000 { /* 0x44e07000, ap 14 20.0 */ + gpio0_target: target-module@7000 { /* 0x44e07000, ap 14 20.0 */ compatible = "ti,sysc-omap2", "ti,sysc"; ti,hwmods = "gpio1"; reg = <0x7000 0x4>, -- cgit v1.2.3 From 7dc56af5260e958e36a08b2e0822029ddf765770 Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Tue, 24 Sep 2019 15:59:50 +0100 Subject: drm/i915/selftests: Verify the LRC register layout between init and HW Before we submit the first context to HW, we need to construct a valid image of the register state. This layout is defined by the HW and should match the layout generated by HW when it saves the context image. Asserting that this should be equivalent should help avoid any undefined behaviour and verify that we haven't missed anything important! Of course, having insisted that the initial register state within the LRC should match that returned by HW, we need to ensure that it does. v2: Drop the RELATIVE_MMIO flag from gen11, we ignore it for constructing the lrc image. Signed-off-by: Chris Wilson Cc: Mika Kuoppala Cc: Daniele Ceraolo Spurio Reviewed-by: Mika Kuoppala Link: https://patchwork.freedesktop.org/patch/msgid/20190924145950.3011-1-chris@chris-wilson.co.uk --- drivers/gpu/drm/i915/gem/i915_gem_context.c | 2 +- drivers/gpu/drm/i915/gt/intel_lrc.c | 667 ++++++++++++++------- drivers/gpu/drm/i915/gt/intel_lrc_reg.h | 62 +- drivers/gpu/drm/i915/gt/selftest_lrc.c | 142 +++++ drivers/gpu/drm/i915/i915_perf.c | 35 +- drivers/gpu/drm/i915/i915_perf.h | 5 +- .../gpu/drm/i915/selftests/i915_live_selftests.h | 1 + 7 files changed, 648 insertions(+), 266 deletions(-) diff --git a/drivers/gpu/drm/i915/gem/i915_gem_context.c b/drivers/gpu/drm/i915/gem/i915_gem_context.c index 4a34c4f62065..f7ba0935ed67 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_context.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_context.c @@ -1115,7 +1115,7 @@ static int gen8_emit_rpcs_config(struct i915_request *rq, offset = i915_ggtt_offset(ce->state) + LRC_STATE_PN * PAGE_SIZE + - (CTX_R_PWR_CLK_STATE + 1) * 4; + CTX_R_PWR_CLK_STATE * 4; *cs++ = MI_STORE_DWORD_IMM_GEN4 | MI_USE_GGTT; *cs++ = lower_32_bits(offset); diff --git a/drivers/gpu/drm/i915/gt/intel_lrc.c b/drivers/gpu/drm/i915/gt/intel_lrc.c index 6cfdc0f9f2b9..caac091b3eb9 100644 --- a/drivers/gpu/drm/i915/gt/intel_lrc.c +++ b/drivers/gpu/drm/i915/gt/intel_lrc.c @@ -230,9 +230,10 @@ static int __execlists_context_alloc(struct intel_context *ce, struct intel_engine_cs *engine); static void execlists_init_reg_state(u32 *reg_state, - struct intel_context *ce, - struct intel_engine_cs *engine, - struct intel_ring *ring); + const struct intel_context *ce, + const struct intel_engine_cs *engine, + const struct intel_ring *ring, + bool close); static void mark_eio(struct i915_request *rq) { @@ -471,6 +472,411 @@ lrc_descriptor(struct intel_context *ce, struct intel_engine_cs *engine) return desc; } +static u32 *set_offsets(u32 *regs, + const u8 *data, + const struct intel_engine_cs *engine) +#define NOP(x) (BIT(7) | (x)) +#define LRI(count, flags) ((flags) << 6 | (count)) +#define POSTED BIT(0) +#define REG(x) (((x) >> 2) | BUILD_BUG_ON_ZERO(x >= 0x200)) +#define REG16(x) \ + (((x) >> 9) | BIT(7) | BUILD_BUG_ON_ZERO(x >= 0x10000)), \ + (((x) >> 2) & 0x7f) +#define END() 0 +{ + const u32 base = engine->mmio_base; + + while (*data) { + u8 count, flags; + + if (*data & BIT(7)) { /* skip */ + regs += *data++ & ~BIT(7); + continue; + } + + count = *data & 0x3f; + flags = *data >> 6; + data++; + + *regs = MI_LOAD_REGISTER_IMM(count); + if (flags & POSTED) + *regs |= MI_LRI_FORCE_POSTED; + if (INTEL_GEN(engine->i915) >= 11) + *regs |= MI_LRI_CS_MMIO; + regs++; + + GEM_BUG_ON(!count); + do { + u32 offset = 0; + u8 v; + + do { + v = *data++; + offset <<= 7; + offset |= v & ~BIT(7); + } while (v & BIT(7)); + + *regs = base + (offset << 2); + regs += 2; + } while (--count); + } + + return regs; +} + +static const u8 gen8_xcs_offsets[] = { + NOP(1), + LRI(11, 0), + REG16(0x244), + REG(0x034), + REG(0x030), + REG(0x038), + REG(0x03c), + REG(0x168), + REG(0x140), + REG(0x110), + REG(0x11c), + REG(0x114), + REG(0x118), + + NOP(9), + LRI(9, 0), + REG16(0x3a8), + REG16(0x28c), + REG16(0x288), + REG16(0x284), + REG16(0x280), + REG16(0x27c), + REG16(0x278), + REG16(0x274), + REG16(0x270), + + NOP(13), + LRI(2, 0), + REG16(0x200), + REG(0x028), + + END(), +}; + +static const u8 gen9_xcs_offsets[] = { + NOP(1), + LRI(14, POSTED), + REG16(0x244), + REG(0x034), + REG(0x030), + REG(0x038), + REG(0x03c), + REG(0x168), + REG(0x140), + REG(0x110), + REG(0x11c), + REG(0x114), + REG(0x118), + REG(0x1c0), + REG(0x1c4), + REG(0x1c8), + + NOP(3), + LRI(9, POSTED), + REG16(0x3a8), + REG16(0x28c), + REG16(0x288), + REG16(0x284), + REG16(0x280), + REG16(0x27c), + REG16(0x278), + REG16(0x274), + REG16(0x270), + + NOP(13), + LRI(1, POSTED), + REG16(0x200), + + NOP(13), + LRI(44, POSTED), + REG(0x028), + REG(0x09c), + REG(0x0c0), + REG(0x178), + REG(0x17c), + REG16(0x358), + REG(0x170), + REG(0x150), + REG(0x154), + REG(0x158), + REG16(0x41c), + REG16(0x600), + REG16(0x604), + REG16(0x608), + REG16(0x60c), + REG16(0x610), + REG16(0x614), + REG16(0x618), + REG16(0x61c), + REG16(0x620), + REG16(0x624), + REG16(0x628), + REG16(0x62c), + REG16(0x630), + REG16(0x634), + REG16(0x638), + REG16(0x63c), + REG16(0x640), + REG16(0x644), + REG16(0x648), + REG16(0x64c), + REG16(0x650), + REG16(0x654), + REG16(0x658), + REG16(0x65c), + REG16(0x660), + REG16(0x664), + REG16(0x668), + REG16(0x66c), + REG16(0x670), + REG16(0x674), + REG16(0x678), + REG16(0x67c), + REG(0x068), + + END(), +}; + +static const u8 gen12_xcs_offsets[] = { + NOP(1), + LRI(13, POSTED), + REG16(0x244), + REG(0x034), + REG(0x030), + REG(0x038), + REG(0x03c), + REG(0x168), + REG(0x140), + REG(0x110), + REG(0x1c0), + REG(0x1c4), + REG(0x1c8), + REG(0x180), + REG16(0x2b4), + + NOP(5), + LRI(9, POSTED), + REG16(0x3a8), + REG16(0x28c), + REG16(0x288), + REG16(0x284), + REG16(0x280), + REG16(0x27c), + REG16(0x278), + REG16(0x274), + REG16(0x270), + + NOP(13), + LRI(2, POSTED), + REG16(0x200), + REG16(0x204), + + NOP(11), + LRI(50, POSTED), + REG16(0x588), + REG16(0x588), + REG16(0x588), + REG16(0x588), + REG16(0x588), + REG16(0x588), + REG(0x028), + REG(0x09c), + REG(0x0c0), + REG(0x178), + REG(0x17c), + REG16(0x358), + REG(0x170), + REG(0x150), + REG(0x154), + REG(0x158), + REG16(0x41c), + REG16(0x600), + REG16(0x604), + REG16(0x608), + REG16(0x60c), + REG16(0x610), + REG16(0x614), + REG16(0x618), + REG16(0x61c), + REG16(0x620), + REG16(0x624), + REG16(0x628), + REG16(0x62c), + REG16(0x630), + REG16(0x634), + REG16(0x638), + REG16(0x63c), + REG16(0x640), + REG16(0x644), + REG16(0x648), + REG16(0x64c), + REG16(0x650), + REG16(0x654), + REG16(0x658), + REG16(0x65c), + REG16(0x660), + REG16(0x664), + REG16(0x668), + REG16(0x66c), + REG16(0x670), + REG16(0x674), + REG16(0x678), + REG16(0x67c), + REG(0x068), + + END(), +}; + +static const u8 gen8_rcs_offsets[] = { + NOP(1), + LRI(14, POSTED), + REG16(0x244), + REG(0x034), + REG(0x030), + REG(0x038), + REG(0x03c), + REG(0x168), + REG(0x140), + REG(0x110), + REG(0x11c), + REG(0x114), + REG(0x118), + REG(0x1c0), + REG(0x1c4), + REG(0x1c8), + + NOP(3), + LRI(9, POSTED), + REG16(0x3a8), + REG16(0x28c), + REG16(0x288), + REG16(0x284), + REG16(0x280), + REG16(0x27c), + REG16(0x278), + REG16(0x274), + REG16(0x270), + + NOP(13), + LRI(1, 0), + REG(0x0c8), + + END(), +}; + +static const u8 gen11_rcs_offsets[] = { + NOP(1), + LRI(15, POSTED), + REG16(0x244), + REG(0x034), + REG(0x030), + REG(0x038), + REG(0x03c), + REG(0x168), + REG(0x140), + REG(0x110), + REG(0x11c), + REG(0x114), + REG(0x118), + REG(0x1c0), + REG(0x1c4), + REG(0x1c8), + REG(0x180), + + NOP(1), + LRI(9, POSTED), + REG16(0x3a8), + REG16(0x28c), + REG16(0x288), + REG16(0x284), + REG16(0x280), + REG16(0x27c), + REG16(0x278), + REG16(0x274), + REG16(0x270), + + LRI(1, POSTED), + REG(0x1b0), + + NOP(10), + LRI(1, 0), + REG(0x0c8), + + END(), +}; + +static const u8 gen12_rcs_offsets[] = { + NOP(1), + LRI(13, POSTED), + REG16(0x244), + REG(0x034), + REG(0x030), + REG(0x038), + REG(0x03c), + REG(0x168), + REG(0x140), + REG(0x110), + REG(0x1c0), + REG(0x1c4), + REG(0x1c8), + REG(0x180), + REG16(0x2b4), + + NOP(5), + LRI(9, POSTED), + REG16(0x3a8), + REG16(0x28c), + REG16(0x288), + REG16(0x284), + REG16(0x280), + REG16(0x27c), + REG16(0x278), + REG16(0x274), + REG16(0x270), + + LRI(3, POSTED), + REG(0x1b0), + REG16(0x5a8), + REG16(0x5ac), + + NOP(6), + LRI(1, 0), + REG(0x0c8), + + END(), +}; + +#undef END +#undef REG16 +#undef REG +#undef LRI +#undef NOP + +static const u8 *reg_offsets(const struct intel_engine_cs *engine) +{ + if (engine->class == RENDER_CLASS) { + if (INTEL_GEN(engine->i915) >= 12) + return gen12_rcs_offsets; + else if (INTEL_GEN(engine->i915) >= 11) + return gen11_rcs_offsets; + else + return gen8_rcs_offsets; + } else { + if (INTEL_GEN(engine->i915) >= 12) + return gen12_xcs_offsets; + else if (INTEL_GEN(engine->i915) >= 9) + return gen9_xcs_offsets; + else + return gen8_xcs_offsets; + } +} + static void unwind_wa_tail(struct i915_request *rq) { rq->tail = intel_ring_wrap(rq->ring, rq->wa_tail - WA_TAIL_BYTES); @@ -654,7 +1060,7 @@ static u64 execlists_update_context(const struct i915_request *rq) struct intel_context *ce = rq->hw_context; u64 desc; - ce->lrc_reg_state[CTX_RING_TAIL + 1] = + ce->lrc_reg_state[CTX_RING_TAIL] = intel_ring_set_tail(rq->ring, rq->tail); /* @@ -826,54 +1232,7 @@ static bool can_merge_rq(const struct i915_request *prev, static void virtual_update_register_offsets(u32 *regs, struct intel_engine_cs *engine) { - u32 base = engine->mmio_base; - - /* Refactor so that we only have one place that knows all the offsets! */ - GEM_WARN_ON(INTEL_GEN(engine->i915) >= 12); - - /* Must match execlists_init_reg_state()! */ - - /* Common part */ - regs[CTX_CONTEXT_CONTROL] = - i915_mmio_reg_offset(RING_CONTEXT_CONTROL(base)); - regs[CTX_RING_HEAD] = i915_mmio_reg_offset(RING_HEAD(base)); - regs[CTX_RING_TAIL] = i915_mmio_reg_offset(RING_TAIL(base)); - regs[CTX_RING_BUFFER_START] = i915_mmio_reg_offset(RING_START(base)); - regs[CTX_RING_BUFFER_CONTROL] = i915_mmio_reg_offset(RING_CTL(base)); - - regs[CTX_BB_HEAD_U] = i915_mmio_reg_offset(RING_BBADDR_UDW(base)); - regs[CTX_BB_HEAD_L] = i915_mmio_reg_offset(RING_BBADDR(base)); - regs[CTX_BB_STATE] = i915_mmio_reg_offset(RING_BBSTATE(base)); - - regs[CTX_SECOND_BB_HEAD_U] = - i915_mmio_reg_offset(RING_SBBADDR_UDW(base)); - regs[CTX_SECOND_BB_HEAD_L] = i915_mmio_reg_offset(RING_SBBADDR(base)); - regs[CTX_SECOND_BB_STATE] = i915_mmio_reg_offset(RING_SBBSTATE(base)); - - /* PPGTT part */ - regs[CTX_CTX_TIMESTAMP] = - i915_mmio_reg_offset(RING_CTX_TIMESTAMP(base)); - - regs[CTX_PDP3_UDW] = i915_mmio_reg_offset(GEN8_RING_PDP_UDW(base, 3)); - regs[CTX_PDP3_LDW] = i915_mmio_reg_offset(GEN8_RING_PDP_LDW(base, 3)); - regs[CTX_PDP2_UDW] = i915_mmio_reg_offset(GEN8_RING_PDP_UDW(base, 2)); - regs[CTX_PDP2_LDW] = i915_mmio_reg_offset(GEN8_RING_PDP_LDW(base, 2)); - regs[CTX_PDP1_UDW] = i915_mmio_reg_offset(GEN8_RING_PDP_UDW(base, 1)); - regs[CTX_PDP1_LDW] = i915_mmio_reg_offset(GEN8_RING_PDP_LDW(base, 1)); - regs[CTX_PDP0_UDW] = i915_mmio_reg_offset(GEN8_RING_PDP_UDW(base, 0)); - regs[CTX_PDP0_LDW] = i915_mmio_reg_offset(GEN8_RING_PDP_LDW(base, 0)); - - if (engine->class == RENDER_CLASS) { - regs[CTX_RCS_INDIRECT_CTX] = - i915_mmio_reg_offset(RING_INDIRECT_CTX(base)); - regs[CTX_RCS_INDIRECT_CTX_OFFSET] = - i915_mmio_reg_offset(RING_INDIRECT_CTX_OFFSET(base)); - regs[CTX_BB_PER_CTX_PTR] = - i915_mmio_reg_offset(RING_BB_PER_CTX_PTR(base)); - - regs[CTX_R_PWR_CLK_STATE] = - i915_mmio_reg_offset(GEN8_R_PWR_CLK_STATE); - } + set_offsets(regs, reg_offsets(engine), engine); } static bool virtual_matches(const struct virtual_engine *ve, @@ -1738,8 +2097,8 @@ static void execlists_context_unpin(struct intel_context *ce) } static void -__execlists_update_reg_state(struct intel_context *ce, - struct intel_engine_cs *engine) +__execlists_update_reg_state(const struct intel_context *ce, + const struct intel_engine_cs *engine) { struct intel_ring *ring = ce->ring; u32 *regs = ce->lrc_reg_state; @@ -1747,16 +2106,16 @@ __execlists_update_reg_state(struct intel_context *ce, GEM_BUG_ON(!intel_ring_offset_valid(ring, ring->head)); GEM_BUG_ON(!intel_ring_offset_valid(ring, ring->tail)); - regs[CTX_RING_BUFFER_START + 1] = i915_ggtt_offset(ring->vma); - regs[CTX_RING_HEAD + 1] = ring->head; - regs[CTX_RING_TAIL + 1] = ring->tail; + regs[CTX_RING_BUFFER_START] = i915_ggtt_offset(ring->vma); + regs[CTX_RING_HEAD] = ring->head; + regs[CTX_RING_TAIL] = ring->tail; /* RPCS */ if (engine->class == RENDER_CLASS) { - regs[CTX_R_PWR_CLK_STATE + 1] = + regs[CTX_R_PWR_CLK_STATE] = intel_sseu_make_rpcs(engine->i915, &ce->sseu); - i915_oa_init_reg_state(engine, ce, regs); + i915_oa_init_reg_state(ce, engine); } } @@ -2465,7 +2824,7 @@ static void __execlists_reset(struct intel_engine_cs *engine, bool stalled) engine->pinned_default_state + LRC_STATE_PN * PAGE_SIZE, engine->context_size - PAGE_SIZE); } - execlists_init_reg_state(regs, ce, engine, ce->ring); + execlists_init_reg_state(regs, ce, engine, ce->ring, false); out_replay: GEM_TRACE("%s replay {head:%04x, tail:%04x\n", @@ -3243,7 +3602,7 @@ int intel_execlists_submission_init(struct intel_engine_cs *engine) return 0; } -static u32 intel_lr_indirect_ctx_offset(struct intel_engine_cs *engine) +static u32 intel_lr_indirect_ctx_offset(const struct intel_engine_cs *engine) { u32 indirect_ctx_offset; @@ -3278,75 +3637,48 @@ static u32 intel_lr_indirect_ctx_offset(struct intel_engine_cs *engine) static void init_common_reg_state(u32 * const regs, - struct i915_ppgtt * const ppgtt, - struct intel_engine_cs *engine, - struct intel_ring *ring) + const struct intel_engine_cs *engine, + const struct intel_ring *ring) { - const u32 base = engine->mmio_base; - - CTX_REG(regs, CTX_CONTEXT_CONTROL, RING_CONTEXT_CONTROL(base), + regs[CTX_CONTEXT_CONTROL] = _MASKED_BIT_DISABLE(CTX_CTRL_ENGINE_CTX_RESTORE_INHIBIT) | - _MASKED_BIT_ENABLE(CTX_CTRL_INHIBIT_SYN_CTX_SWITCH)); - if (INTEL_GEN(engine->i915) < 11) { - regs[CTX_CONTEXT_CONTROL + 1] |= + _MASKED_BIT_ENABLE(CTX_CTRL_INHIBIT_SYN_CTX_SWITCH); + if (INTEL_GEN(engine->i915) < 11) + regs[CTX_CONTEXT_CONTROL] |= _MASKED_BIT_DISABLE(CTX_CTRL_ENGINE_CTX_SAVE_INHIBIT | CTX_CTRL_RS_CTX_ENABLE); - } - CTX_REG(regs, CTX_RING_HEAD, RING_HEAD(base), 0); - CTX_REG(regs, CTX_RING_TAIL, RING_TAIL(base), 0); - CTX_REG(regs, CTX_RING_BUFFER_START, RING_START(base), 0); - CTX_REG(regs, CTX_RING_BUFFER_CONTROL, RING_CTL(base), - RING_CTL_SIZE(ring->size) | RING_VALID); - CTX_REG(regs, CTX_BB_HEAD_U, RING_BBADDR_UDW(base), 0); - CTX_REG(regs, CTX_BB_HEAD_L, RING_BBADDR(base), 0); - CTX_REG(regs, CTX_BB_STATE, RING_BBSTATE(base), RING_BB_PPGTT); + + regs[CTX_RING_BUFFER_CONTROL] = RING_CTL_SIZE(ring->size) | RING_VALID; + regs[CTX_BB_STATE] = RING_BB_PPGTT; } static void init_wa_bb_reg_state(u32 * const regs, - struct intel_engine_cs *engine, + const struct intel_engine_cs *engine, u32 pos_bb_per_ctx) { - struct i915_ctx_workarounds * const wa_ctx = &engine->wa_ctx; - const u32 base = engine->mmio_base; - const u32 pos_indirect_ctx = pos_bb_per_ctx + 2; - const u32 pos_indirect_ctx_offset = pos_indirect_ctx + 2; + const struct i915_ctx_workarounds * const wa_ctx = &engine->wa_ctx; + + if (wa_ctx->per_ctx.size) { + const u32 ggtt_offset = i915_ggtt_offset(wa_ctx->vma); + + regs[pos_bb_per_ctx] = + (ggtt_offset + wa_ctx->per_ctx.offset) | 0x01; + } - CTX_REG(regs, pos_indirect_ctx, RING_INDIRECT_CTX(base), 0); - CTX_REG(regs, pos_indirect_ctx_offset, - RING_INDIRECT_CTX_OFFSET(base), 0); if (wa_ctx->indirect_ctx.size) { const u32 ggtt_offset = i915_ggtt_offset(wa_ctx->vma); - regs[pos_indirect_ctx + 1] = + regs[pos_bb_per_ctx + 2] = (ggtt_offset + wa_ctx->indirect_ctx.offset) | (wa_ctx->indirect_ctx.size / CACHELINE_BYTES); - regs[pos_indirect_ctx_offset + 1] = + regs[pos_bb_per_ctx + 4] = intel_lr_indirect_ctx_offset(engine) << 6; } - - CTX_REG(regs, pos_bb_per_ctx, RING_BB_PER_CTX_PTR(base), 0); - if (wa_ctx->per_ctx.size) { - const u32 ggtt_offset = i915_ggtt_offset(wa_ctx->vma); - - regs[pos_bb_per_ctx + 1] = - (ggtt_offset + wa_ctx->per_ctx.offset) | 0x01; - } } -static void init_ppgtt_reg_state(u32 *regs, u32 base, - struct i915_ppgtt *ppgtt) +static void init_ppgtt_reg_state(u32 *regs, const struct i915_ppgtt *ppgtt) { - /* PDP values well be assigned later if needed */ - CTX_REG(regs, CTX_PDP3_UDW, GEN8_RING_PDP_UDW(base, 3), 0); - CTX_REG(regs, CTX_PDP3_LDW, GEN8_RING_PDP_LDW(base, 3), 0); - CTX_REG(regs, CTX_PDP2_UDW, GEN8_RING_PDP_UDW(base, 2), 0); - CTX_REG(regs, CTX_PDP2_LDW, GEN8_RING_PDP_LDW(base, 2), 0); - CTX_REG(regs, CTX_PDP1_UDW, GEN8_RING_PDP_UDW(base, 1), 0); - CTX_REG(regs, CTX_PDP1_LDW, GEN8_RING_PDP_LDW(base, 1), 0); - CTX_REG(regs, CTX_PDP0_UDW, GEN8_RING_PDP_UDW(base, 0), 0); - CTX_REG(regs, CTX_PDP0_LDW, GEN8_RING_PDP_LDW(base, 0), 0); - if (i915_vm_is_4lvl(&ppgtt->vm)) { /* 64b PPGTT (48bit canonical) * PDP0_DESCRIPTOR contains the base address to PML4 and @@ -3369,91 +3701,11 @@ static struct i915_ppgtt *vm_alias(struct i915_address_space *vm) return i915_vm_to_ppgtt(vm); } -static void gen8_init_reg_state(u32 * const regs, - struct intel_context *ce, - struct intel_engine_cs *engine, - struct intel_ring *ring) -{ - struct i915_ppgtt * const ppgtt = vm_alias(ce->vm); - const bool rcs = engine->class == RENDER_CLASS; - const u32 base = engine->mmio_base; - const u32 lri_base = - intel_engine_has_relative_mmio(engine) ? MI_LRI_CS_MMIO : 0; - - regs[CTX_LRI_HEADER_0] = - MI_LOAD_REGISTER_IMM(rcs ? 14 : 11) | - MI_LRI_FORCE_POSTED | - lri_base; - - init_common_reg_state(regs, ppgtt, engine, ring); - CTX_REG(regs, CTX_SECOND_BB_HEAD_U, RING_SBBADDR_UDW(base), 0); - CTX_REG(regs, CTX_SECOND_BB_HEAD_L, RING_SBBADDR(base), 0); - CTX_REG(regs, CTX_SECOND_BB_STATE, RING_SBBSTATE(base), 0); - if (rcs) - init_wa_bb_reg_state(regs, engine, CTX_BB_PER_CTX_PTR); - - regs[CTX_LRI_HEADER_1] = - MI_LOAD_REGISTER_IMM(9) | - MI_LRI_FORCE_POSTED | - lri_base; - - CTX_REG(regs, CTX_CTX_TIMESTAMP, RING_CTX_TIMESTAMP(base), 0); - - init_ppgtt_reg_state(regs, base, ppgtt); - - if (rcs) { - regs[CTX_LRI_HEADER_2] = MI_LOAD_REGISTER_IMM(1) | lri_base; - CTX_REG(regs, CTX_R_PWR_CLK_STATE, GEN8_R_PWR_CLK_STATE, 0); - } - - regs[CTX_END] = MI_BATCH_BUFFER_END; - if (INTEL_GEN(engine->i915) >= 10) - regs[CTX_END] |= BIT(0); -} - -static void gen12_init_reg_state(u32 * const regs, - struct intel_context *ce, - struct intel_engine_cs *engine, - struct intel_ring *ring) -{ - struct i915_ppgtt * const ppgtt = i915_vm_to_ppgtt(ce->vm); - const bool rcs = engine->class == RENDER_CLASS; - const u32 base = engine->mmio_base; - const u32 lri_base = - intel_engine_has_relative_mmio(engine) ? MI_LRI_CS_MMIO : 0; - - regs[CTX_LRI_HEADER_0] = - MI_LOAD_REGISTER_IMM(rcs ? 11 : 9) | - MI_LRI_FORCE_POSTED | - lri_base; - - init_common_reg_state(regs, ppgtt, engine, ring); - - /* We want ctx_ptr for all engines to be set */ - init_wa_bb_reg_state(regs, engine, GEN12_CTX_BB_PER_CTX_PTR); - - regs[CTX_LRI_HEADER_1] = - MI_LOAD_REGISTER_IMM(9) | - MI_LRI_FORCE_POSTED | - lri_base; - - CTX_REG(regs, CTX_CTX_TIMESTAMP, RING_CTX_TIMESTAMP(base), 0); - - init_ppgtt_reg_state(regs, base, ppgtt); - - if (rcs) { - regs[GEN12_CTX_LRI_HEADER_3] = - MI_LOAD_REGISTER_IMM(1) | lri_base; - CTX_REG(regs, CTX_R_PWR_CLK_STATE, GEN8_R_PWR_CLK_STATE, 0); - - /* TODO: oa_init_reg_state ? */ - } -} - static void execlists_init_reg_state(u32 *regs, - struct intel_context *ce, - struct intel_engine_cs *engine, - struct intel_ring *ring) + const struct intel_context *ce, + const struct intel_engine_cs *engine, + const struct intel_ring *ring, + bool close) { /* * A context is actually a big batch buffer with several @@ -3465,10 +3717,21 @@ static void execlists_init_reg_state(u32 *regs, * * Must keep consistent with virtual_update_register_offsets(). */ - if (INTEL_GEN(engine->i915) >= 12) - gen12_init_reg_state(regs, ce, engine, ring); - else - gen8_init_reg_state(regs, ce, engine, ring); + u32 *bbe = set_offsets(regs, reg_offsets(engine), engine); + + if (close) { /* Close the batch; used mainly by live_lrc_layout() */ + *bbe = MI_BATCH_BUFFER_END; + if (INTEL_GEN(engine->i915) >= 10) + *bbe |= BIT(0); + } + + init_common_reg_state(regs, engine, ring); + init_ppgtt_reg_state(regs, vm_alias(ce->vm)); + + init_wa_bb_reg_state(regs, engine, + INTEL_GEN(engine->i915) >= 12 ? + GEN12_CTX_BB_PER_CTX_PTR : + CTX_BB_PER_CTX_PTR); } static int @@ -3477,6 +3740,7 @@ populate_lr_context(struct intel_context *ce, struct intel_engine_cs *engine, struct intel_ring *ring) { + bool inhibit = true; void *vaddr; u32 *regs; int ret; @@ -3508,14 +3772,15 @@ populate_lr_context(struct intel_context *ce, memcpy(vaddr + start, defaults + start, engine->context_size); i915_gem_object_unpin_map(engine->default_state); + inhibit = false; } /* The second page of the context object contains some fields which must * be set up prior to the first execution. */ regs = vaddr + LRC_STATE_PN * PAGE_SIZE; - execlists_init_reg_state(regs, ce, engine, ring); - if (!engine->default_state) - regs[CTX_CONTEXT_CONTROL + 1] |= + execlists_init_reg_state(regs, ce, engine, ring, inhibit); + if (inhibit) + regs[CTX_CONTEXT_CONTROL] |= _MASKED_BIT_ENABLE(CTX_CTRL_ENGINE_CTX_RESTORE_INHIBIT); ret = 0; @@ -4212,7 +4477,7 @@ void intel_lr_context_reset(struct intel_engine_cs *engine, engine->pinned_default_state + LRC_STATE_PN * PAGE_SIZE, engine->context_size - PAGE_SIZE); } - execlists_init_reg_state(regs, ce, engine, ce->ring); + execlists_init_reg_state(regs, ce, engine, ce->ring, false); } /* Rerun the request; its payload has been neutered (if guilty). */ diff --git a/drivers/gpu/drm/i915/gt/intel_lrc_reg.h b/drivers/gpu/drm/i915/gt/intel_lrc_reg.h index 7e773e74a3fe..06ab0276e10e 100644 --- a/drivers/gpu/drm/i915/gt/intel_lrc_reg.h +++ b/drivers/gpu/drm/i915/gt/intel_lrc_reg.h @@ -10,60 +10,40 @@ #include /* GEN8 to GEN11 Reg State Context */ -#define CTX_LRI_HEADER_0 0x01 -#define CTX_CONTEXT_CONTROL 0x02 -#define CTX_RING_HEAD 0x04 -#define CTX_RING_TAIL 0x06 -#define CTX_RING_BUFFER_START 0x08 -#define CTX_RING_BUFFER_CONTROL 0x0a -#define CTX_BB_HEAD_U 0x0c -#define CTX_BB_HEAD_L 0x0e -#define CTX_BB_STATE 0x10 -#define CTX_SECOND_BB_HEAD_U 0x12 -#define CTX_SECOND_BB_HEAD_L 0x14 -#define CTX_SECOND_BB_STATE 0x16 -#define CTX_BB_PER_CTX_PTR 0x18 -#define CTX_RCS_INDIRECT_CTX 0x1a -#define CTX_RCS_INDIRECT_CTX_OFFSET 0x1c -#define CTX_LRI_HEADER_1 0x21 -#define CTX_CTX_TIMESTAMP 0x22 -#define CTX_PDP3_UDW 0x24 -#define CTX_PDP3_LDW 0x26 -#define CTX_PDP2_UDW 0x28 -#define CTX_PDP2_LDW 0x2a -#define CTX_PDP1_UDW 0x2c -#define CTX_PDP1_LDW 0x2e -#define CTX_PDP0_UDW 0x30 -#define CTX_PDP0_LDW 0x32 -#define CTX_LRI_HEADER_2 0x41 -#define CTX_R_PWR_CLK_STATE 0x42 -#define CTX_END 0x44 +#define CTX_CONTEXT_CONTROL (0x02 + 1) +#define CTX_RING_HEAD (0x04 + 1) +#define CTX_RING_TAIL (0x06 + 1) +#define CTX_RING_BUFFER_START (0x08 + 1) +#define CTX_RING_BUFFER_CONTROL (0x0a + 1) +#define CTX_BB_STATE (0x10 + 1) +#define CTX_BB_PER_CTX_PTR (0x18 + 1) +#define CTX_PDP3_UDW (0x24 + 1) +#define CTX_PDP3_LDW (0x26 + 1) +#define CTX_PDP2_UDW (0x28 + 1) +#define CTX_PDP2_LDW (0x2a + 1) +#define CTX_PDP1_UDW (0x2c + 1) +#define CTX_PDP1_LDW (0x2e + 1) +#define CTX_PDP0_UDW (0x30 + 1) +#define CTX_PDP0_LDW (0x32 + 1) +#define CTX_R_PWR_CLK_STATE (0x42 + 1) #define GEN9_CTX_RING_MI_MODE 0x54 /* GEN12+ Reg State Context */ -#define GEN12_CTX_BB_PER_CTX_PTR 0x12 -#define GEN12_CTX_LRI_HEADER_3 0x41 - -#define CTX_REG(reg_state, pos, reg, val) do { \ - u32 *reg_state__ = (reg_state); \ - const u32 pos__ = (pos); \ - (reg_state__)[(pos__) + 0] = i915_mmio_reg_offset(reg); \ - (reg_state__)[(pos__) + 1] = (val); \ -} while (0) +#define GEN12_CTX_BB_PER_CTX_PTR (0x12 + 1) #define ASSIGN_CTX_PDP(ppgtt, reg_state, n) do { \ u32 *reg_state__ = (reg_state); \ const u64 addr__ = i915_page_dir_dma_addr((ppgtt), (n)); \ - (reg_state__)[CTX_PDP ## n ## _UDW + 1] = upper_32_bits(addr__); \ - (reg_state__)[CTX_PDP ## n ## _LDW + 1] = lower_32_bits(addr__); \ + (reg_state__)[CTX_PDP ## n ## _UDW] = upper_32_bits(addr__); \ + (reg_state__)[CTX_PDP ## n ## _LDW] = lower_32_bits(addr__); \ } while (0) #define ASSIGN_CTX_PML4(ppgtt, reg_state) do { \ u32 *reg_state__ = (reg_state); \ const u64 addr__ = px_dma(ppgtt->pd); \ - (reg_state__)[CTX_PDP0_UDW + 1] = upper_32_bits(addr__); \ - (reg_state__)[CTX_PDP0_LDW + 1] = lower_32_bits(addr__); \ + (reg_state__)[CTX_PDP0_UDW] = upper_32_bits(addr__); \ + (reg_state__)[CTX_PDP0_LDW] = lower_32_bits(addr__); \ } while (0) #define GEN8_CTX_RCS_INDIRECT_CTX_OFFSET_DEFAULT 0x17 diff --git a/drivers/gpu/drm/i915/gt/selftest_lrc.c b/drivers/gpu/drm/i915/gt/selftest_lrc.c index 93a871bfd95d..22ea2e747064 100644 --- a/drivers/gpu/drm/i915/gt/selftest_lrc.c +++ b/drivers/gpu/drm/i915/gt/selftest_lrc.c @@ -2201,3 +2201,145 @@ int intel_execlists_live_selftests(struct drm_i915_private *i915) return i915_live_subtests(tests, i915); } + +static void hexdump(const void *buf, size_t len) +{ + const size_t rowsize = 8 * sizeof(u32); + const void *prev = NULL; + bool skip = false; + size_t pos; + + for (pos = 0; pos < len; pos += rowsize) { + char line[128]; + + if (prev && !memcmp(prev, buf + pos, rowsize)) { + if (!skip) { + pr_info("*\n"); + skip = true; + } + continue; + } + + WARN_ON_ONCE(hex_dump_to_buffer(buf + pos, len - pos, + rowsize, sizeof(u32), + line, sizeof(line), + false) >= sizeof(line)); + pr_info("[%04zx] %s\n", pos, line); + + prev = buf + pos; + skip = false; + } +} + +static int live_lrc_layout(void *arg) +{ + struct intel_gt *gt = arg; + struct intel_engine_cs *engine; + enum intel_engine_id id; + u32 *mem; + int err; + + /* + * Check the registers offsets we use to create the initial reg state + * match the layout saved by HW. + */ + + mem = kmalloc(PAGE_SIZE, GFP_KERNEL); + if (!mem) + return -ENOMEM; + + err = 0; + for_each_engine(engine, gt->i915, id) { + u32 *hw, *lrc; + int dw; + + if (!engine->default_state) + continue; + + hw = i915_gem_object_pin_map(engine->default_state, + I915_MAP_WB); + if (IS_ERR(hw)) { + err = PTR_ERR(hw); + break; + } + hw += LRC_STATE_PN * PAGE_SIZE / sizeof(*hw); + + lrc = memset(mem, 0, PAGE_SIZE); + execlists_init_reg_state(lrc, + engine->kernel_context, + engine, + engine->kernel_context->ring, + true); + + dw = 0; + do { + u32 lri = hw[dw]; + + if (lri == 0) { + dw++; + continue; + } + + if ((lri & GENMASK(31, 23)) != MI_INSTR(0x22, 0)) { + pr_err("%s: Expected LRI command at dword %d, found %08x\n", + engine->name, dw, lri); + err = -EINVAL; + break; + } + + if (lrc[dw] != lri) { + pr_err("%s: LRI command mismatch at dword %d, expected %08x found %08x\n", + engine->name, dw, lri, lrc[dw]); + err = -EINVAL; + break; + } + + lri &= 0x7f; + lri++; + dw++; + + while (lri) { + if (hw[dw] != lrc[dw]) { + pr_err("%s: Different registers found at dword %d, expected %x, found %x\n", + engine->name, dw, hw[dw], lrc[dw]); + err = -EINVAL; + break; + } + + /* + * Skip over the actual register value as we + * expect that to differ. + */ + dw += 2; + lri -= 2; + } + } while ((lrc[dw] & ~BIT(0)) != MI_BATCH_BUFFER_END); + + if (err) { + pr_info("%s: HW register image:\n", engine->name); + hexdump(hw, PAGE_SIZE); + + pr_info("%s: SW register image:\n", engine->name); + hexdump(lrc, PAGE_SIZE); + } + + i915_gem_object_unpin_map(engine->default_state); + if (err) + break; + } + + kfree(mem); + return err; +} + +int intel_lrc_live_selftests(struct drm_i915_private *i915) +{ + static const struct i915_subtest tests[] = { + SUBTEST(live_lrc_layout), + }; + + if (!HAS_LOGICAL_RING_CONTEXTS(i915)) + return 0; + + return intel_gt_live_subtests(tests, &i915->gt); +} diff --git a/drivers/gpu/drm/i915/i915_perf.c b/drivers/gpu/drm/i915/i915_perf.c index c1b764233761..524f6710b7aa 100644 --- a/drivers/gpu/drm/i915/i915_perf.c +++ b/drivers/gpu/drm/i915/i915_perf.c @@ -1673,10 +1673,8 @@ static u32 oa_config_flex_reg(const struct i915_oa_config *oa_config, * in the case that the OA unit has been disabled. */ static void -gen8_update_reg_state_unlocked(struct i915_perf_stream *stream, - struct intel_context *ce, - u32 *reg_state, - const struct i915_oa_config *oa_config) +gen8_update_reg_state_unlocked(const struct intel_context *ce, + const struct i915_perf_stream *stream) { struct drm_i915_private *i915 = ce->engine->i915; u32 ctx_oactxctrl = i915->perf.ctx_oactxctrl_offset; @@ -1691,21 +1689,19 @@ gen8_update_reg_state_unlocked(struct i915_perf_stream *stream, EU_PERF_CNTL5, EU_PERF_CNTL6, }; + u32 *reg_state = ce->lrc_reg_state; int i; - CTX_REG(reg_state, ctx_oactxctrl, GEN8_OACTXCONTROL, + reg_state[ctx_oactxctrl + 1] = (stream->period_exponent << GEN8_OA_TIMER_PERIOD_SHIFT) | (stream->periodic ? GEN8_OA_TIMER_ENABLE : 0) | - GEN8_OA_COUNTER_RESUME); + GEN8_OA_COUNTER_RESUME; - for (i = 0; i < ARRAY_SIZE(flex_regs); i++) { - CTX_REG(reg_state, ctx_flexeu0 + i * 2, flex_regs[i], - oa_config_flex_reg(oa_config, flex_regs[i])); - } + for (i = 0; i < ARRAY_SIZE(flex_regs); i++) + reg_state[ctx_flexeu0 + i * 2 + 1] = + oa_config_flex_reg(stream->oa_config, flex_regs[i]); - CTX_REG(reg_state, - CTX_R_PWR_CLK_STATE, GEN8_R_PWR_CLK_STATE, - intel_sseu_make_rpcs(i915, &ce->sseu)); + reg_state[CTX_R_PWR_CLK_STATE] = intel_sseu_make_rpcs(i915, &ce->sseu); } struct flex { @@ -1729,7 +1725,7 @@ gen8_store_flex(struct i915_request *rq, offset = i915_ggtt_offset(ce->state) + LRC_STATE_PN * PAGE_SIZE; do { *cs++ = MI_STORE_DWORD_IMM_GEN4 | MI_USE_GGTT; - *cs++ = offset + (flex->offset + 1) * sizeof(u32); + *cs++ = offset + flex->offset * sizeof(u32); *cs++ = 0; *cs++ = flex->value; } while (flex++, --count); @@ -1863,7 +1859,7 @@ static int gen8_configure_all_contexts(struct i915_perf_stream *stream, struct drm_i915_private *i915 = stream->dev_priv; /* The MMIO offsets for Flex EU registers aren't contiguous */ const u32 ctx_flexeu0 = i915->perf.ctx_flexeu0_offset; -#define ctx_flexeuN(N) (ctx_flexeu0 + 2 * (N)) +#define ctx_flexeuN(N) (ctx_flexeu0 + 2 * (N) + 1) struct flex regs[] = { { GEN8_R_PWR_CLK_STATE, @@ -1871,7 +1867,7 @@ static int gen8_configure_all_contexts(struct i915_perf_stream *stream, }, { GEN8_OACTXCONTROL, - i915->perf.ctx_oactxctrl_offset, + i915->perf.ctx_oactxctrl_offset + 1, ((stream->period_exponent << GEN8_OA_TIMER_PERIOD_SHIFT) | (stream->periodic ? GEN8_OA_TIMER_ENABLE : 0) | GEN8_OA_COUNTER_RESUME) @@ -2299,9 +2295,8 @@ err_config: return ret; } -void i915_oa_init_reg_state(struct intel_engine_cs *engine, - struct intel_context *ce, - u32 *regs) +void i915_oa_init_reg_state(const struct intel_context *ce, + const struct intel_engine_cs *engine) { struct i915_perf_stream *stream; @@ -2313,7 +2308,7 @@ void i915_oa_init_reg_state(struct intel_engine_cs *engine, stream = engine->i915->perf.exclusive_stream; if (stream) - gen8_update_reg_state_unlocked(stream, ce, regs, stream->oa_config); + gen8_update_reg_state_unlocked(ce, stream); } /** diff --git a/drivers/gpu/drm/i915/i915_perf.h b/drivers/gpu/drm/i915/i915_perf.h index a412b16d9ffc..f4fb311184b1 100644 --- a/drivers/gpu/drm/i915/i915_perf.h +++ b/drivers/gpu/drm/i915/i915_perf.h @@ -25,8 +25,7 @@ int i915_perf_add_config_ioctl(struct drm_device *dev, void *data, struct drm_file *file); int i915_perf_remove_config_ioctl(struct drm_device *dev, void *data, struct drm_file *file); -void i915_oa_init_reg_state(struct intel_engine_cs *engine, - struct intel_context *ce, - u32 *reg_state); +void i915_oa_init_reg_state(const struct intel_context *ce, + const struct intel_engine_cs *engine); #endif /* __I915_PERF_H__ */ diff --git a/drivers/gpu/drm/i915/selftests/i915_live_selftests.h b/drivers/gpu/drm/i915/selftests/i915_live_selftests.h index 1ccf0f731ac0..66d83c1390c1 100644 --- a/drivers/gpu/drm/i915/selftests/i915_live_selftests.h +++ b/drivers/gpu/drm/i915/selftests/i915_live_selftests.h @@ -15,6 +15,7 @@ selftest(workarounds, intel_workarounds_live_selftests) selftest(gt_engines, intel_engine_live_selftests) selftest(gt_timelines, intel_timeline_live_selftests) selftest(gt_contexts, intel_context_live_selftests) +selftest(gt_lrc, intel_lrc_live_selftests) selftest(requests, i915_request_live_selftests) selftest(active, i915_active_live_selftests) selftest(objects, i915_gem_object_live_selftests) -- cgit v1.2.3 From 6ea3cee6d77d2c5513d5a61162b209f671fb3bb8 Mon Sep 17 00:00:00 2001 From: Ankit Nautiyal Date: Tue, 24 Sep 2019 13:01:52 +0530 Subject: drm/i915: Add Pipe D cursor ctrl register for Gen12 Currently the offset for PIPE D cursor control register is missing in i915_reg.h due to which the cursor plane cannot be enabled for Pipe D. This also causes kernel Warning, when a user requests to enable cursor plane for PIPE D for Gen 12 platforms. This patch adds the CURSOR_CTL_D register in the i915_reg.h. v2: Rebase Bugzilla: https://bugs.freedesktop.org/show_bug.cgi?id=111640 Signed-off-by: Ankit Nautiyal Reviewed-by: Lucas De Marchi [Lucas: remove extra blank line] Signed-off-by: Lucas De Marchi Link: https://patchwork.freedesktop.org/patch/msgid/1569310312-12313-1-git-send-email-ankit.k.nautiyal@intel.com --- drivers/gpu/drm/i915/i915_pci.c | 9 +++++++++ drivers/gpu/drm/i915/i915_reg.h | 1 + 2 files changed, 10 insertions(+) diff --git a/drivers/gpu/drm/i915/i915_pci.c b/drivers/gpu/drm/i915/i915_pci.c index c2faa679658c..43530b0abc96 100644 --- a/drivers/gpu/drm/i915/i915_pci.c +++ b/drivers/gpu/drm/i915/i915_pci.c @@ -118,6 +118,14 @@ [PIPE_C] = IVB_CURSOR_C_OFFSET, \ } +#define TGL_CURSOR_OFFSETS \ + .cursor_offsets = { \ + [PIPE_A] = CURSOR_A_OFFSET, \ + [PIPE_B] = IVB_CURSOR_B_OFFSET, \ + [PIPE_C] = IVB_CURSOR_C_OFFSET, \ + [PIPE_D] = TGL_CURSOR_D_OFFSET, \ + } + #define I9XX_COLORS \ .color = { .gamma_lut_size = 256 } #define I965_COLORS \ @@ -787,6 +795,7 @@ static const struct intel_device_info intel_elkhartlake_info = { [TRANSCODER_DSI_0] = TRANSCODER_DSI0_OFFSET, \ [TRANSCODER_DSI_1] = TRANSCODER_DSI1_OFFSET, \ }, \ + TGL_CURSOR_OFFSETS, \ .has_global_mocs = 1, \ .display.has_dsb = 1 diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h index a69c19aae5bb..28c483a3bbba 100644 --- a/drivers/gpu/drm/i915/i915_reg.h +++ b/drivers/gpu/drm/i915/i915_reg.h @@ -6240,6 +6240,7 @@ enum { #define CHV_CURSOR_C_OFFSET 0x700e0 #define IVB_CURSOR_B_OFFSET 0x71080 #define IVB_CURSOR_C_OFFSET 0x72080 +#define TGL_CURSOR_D_OFFSET 0x73080 /* Display A control */ #define _DSPACNTR 0x70180 -- cgit v1.2.3 From b1da91c9ddcef7ee077ea04797db7fa3a952d683 Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Tue, 24 Sep 2019 18:35:01 +0100 Subject: drm/i915/tgl: Swap engines for no rps (gpu reclocking) If we disable rps, it appears the Tigerlake is stable enough to run multiple engines simultaneously in CI. As disabling rps should only cause the execution to be slow, whereas many features depend on the different engines, we would prefer to have the engines enabled while the machine hangs are being debugged. Bugzilla: https://bugs.freedesktop.org/show_bug.cgi?id=111714 Signed-off-by: Chris Wilson Cc: Mika Kuoppala Acked-by: Mika Kuoppala Link: https://patchwork.freedesktop.org/patch/msgid/20190924173501.21956-1-chris@chris-wilson.co.uk --- drivers/gpu/drm/i915/i915_pci.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/gpu/drm/i915/i915_pci.c b/drivers/gpu/drm/i915/i915_pci.c index 43530b0abc96..ea53dfe2fba0 100644 --- a/drivers/gpu/drm/i915/i915_pci.c +++ b/drivers/gpu/drm/i915/i915_pci.c @@ -807,7 +807,7 @@ static const struct intel_device_info intel_tigerlake_12_info = { .display.has_modular_fia = 1, .engine_mask = BIT(RCS0) | BIT(BCS0) | BIT(VECS0) | BIT(VCS0) | BIT(VCS2), - .engine_mask = BIT(RCS0), /* XXX reduced for debugging */ + .has_rps = false, /* XXX disabled for debugging */ }; #undef GEN -- cgit v1.2.3 From 21a045e430e56725628f361dede8a882e92469b9 Mon Sep 17 00:00:00 2001 From: Christophe JAILLET Date: Fri, 20 Sep 2019 21:45:33 +0200 Subject: ieee802154: mcr20a: simplify a bit 'mcr20a_handle_rx_read_buf_complete()' Use a 'skb_put_data()' variant instead of rewritting it. The __skb_put_data variant is safe here. It is obvious that the skb can not overflow. It has just been allocated a few lines above with the same 'len'. Signed-off-by: Christophe JAILLET Acked-by: Xue Liu Signed-off-by: Stefan Schmidt --- drivers/net/ieee802154/mcr20a.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/net/ieee802154/mcr20a.c b/drivers/net/ieee802154/mcr20a.c index 17f2300e63ee..8dc04e2590b1 100644 --- a/drivers/net/ieee802154/mcr20a.c +++ b/drivers/net/ieee802154/mcr20a.c @@ -800,7 +800,7 @@ mcr20a_handle_rx_read_buf_complete(void *context) if (!skb) return; - memcpy(skb_put(skb, len), lp->rx_buf, len); + __skb_put_data(skb, lp->rx_buf, len); ieee802154_rx_irqsafe(lp->hw, skb, lp->rx_lqi[0]); print_hex_dump_debug("mcr20a rx: ", DUMP_PREFIX_OFFSET, 16, 1, -- cgit v1.2.3 From 1b8588741fdcb5976a343f7e5f47c5c97593d6d5 Mon Sep 17 00:00:00 2001 From: Swati Sharma Date: Tue, 24 Sep 2019 19:28:20 +0530 Subject: Revert "drm/i915/color: Extract icl_read_luts()" This reverts commit 84af7649188194a74cdd6437235a5e3c86108f0f. This is causing problems with the display, displays are all bright colors. Fixes: 84af76491881 ("drm/i915/color: Extract icl_read_luts()") Signed-off-by: Swati Sharma Signed-off-by: Jani Nikula Link: https://patchwork.freedesktop.org/patch/msgid/20190924135820.11850-1-swati2.sharma@intel.com --- drivers/gpu/drm/i915/display/intel_color.c | 126 ++++------------------------- drivers/gpu/drm/i915/i915_reg.h | 6 -- 2 files changed, 15 insertions(+), 117 deletions(-) diff --git a/drivers/gpu/drm/i915/display/intel_color.c b/drivers/gpu/drm/i915/display/intel_color.c index 402151128e1f..9ab34902663e 100644 --- a/drivers/gpu/drm/i915/display/intel_color.c +++ b/drivers/gpu/drm/i915/display/intel_color.c @@ -1420,9 +1420,6 @@ static int icl_color_check(struct intel_crtc_state *crtc_state) static int i9xx_gamma_precision(const struct intel_crtc_state *crtc_state) { - if (!crtc_state->gamma_enable) - return 0; - switch (crtc_state->gamma_mode) { case GAMMA_MODE_MODE_8BIT: return 8; @@ -1436,9 +1433,6 @@ static int i9xx_gamma_precision(const struct intel_crtc_state *crtc_state) static int ilk_gamma_precision(const struct intel_crtc_state *crtc_state) { - if (!crtc_state->gamma_enable) - return 0; - if ((crtc_state->csc_mode & CSC_POSITION_BEFORE_GAMMA) == 0) return 0; @@ -1455,9 +1449,6 @@ static int ilk_gamma_precision(const struct intel_crtc_state *crtc_state) static int chv_gamma_precision(const struct intel_crtc_state *crtc_state) { - if (!crtc_state->gamma_enable) - return 0; - if (crtc_state->cgm_mode & CGM_PIPE_MODE_GAMMA) return 10; else @@ -1466,9 +1457,6 @@ static int chv_gamma_precision(const struct intel_crtc_state *crtc_state) static int glk_gamma_precision(const struct intel_crtc_state *crtc_state) { - if (!crtc_state->gamma_enable) - return 0; - switch (crtc_state->gamma_mode) { case GAMMA_MODE_MODE_8BIT: return 8; @@ -1480,39 +1468,21 @@ static int glk_gamma_precision(const struct intel_crtc_state *crtc_state) } } -static int icl_gamma_precision(const struct intel_crtc_state *crtc_state) -{ - if ((crtc_state->gamma_mode & POST_CSC_GAMMA_ENABLE) == 0) - return 0; - - switch (crtc_state->gamma_mode & GAMMA_MODE_MODE_MASK) { - case GAMMA_MODE_MODE_8BIT: - return 8; - case GAMMA_MODE_MODE_10BIT: - return 10; - case GAMMA_MODE_MODE_12BIT_MULTI_SEGMENTED: - return 16; - default: - MISSING_CASE(crtc_state->gamma_mode); - return 0; - } - -} - int intel_color_get_gamma_bit_precision(const struct intel_crtc_state *crtc_state) { struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc); struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); + if (!crtc_state->gamma_enable) + return 0; + if (HAS_GMCH(dev_priv)) { if (IS_CHERRYVIEW(dev_priv)) return chv_gamma_precision(crtc_state); else return i9xx_gamma_precision(crtc_state); } else { - if (INTEL_GEN(dev_priv) >= 11) - return icl_gamma_precision(crtc_state); - else if (IS_CANNONLAKE(dev_priv) || IS_GEMINILAKE(dev_priv)) + if (IS_CANNONLAKE(dev_priv) || IS_GEMINILAKE(dev_priv)) return glk_gamma_precision(crtc_state); else if (IS_IRONLAKE(dev_priv)) return ilk_gamma_precision(crtc_state); @@ -1543,20 +1513,6 @@ static bool intel_color_lut_entry_equal(struct drm_color_lut *lut1, return true; } -static bool intel_color_lut_entry_multi_equal(struct drm_color_lut *lut1, - struct drm_color_lut *lut2, - int lut_size, u32 err) -{ - int i; - - for (i = 0; i < 9; i++) { - if (!err_check(&lut1[i], &lut2[i], err)) - return false; - } - - return true; -} - bool intel_color_lut_equal(struct drm_property_blob *blob1, struct drm_property_blob *blob2, u32 gamma_mode, u32 bit_precision) @@ -1575,8 +1531,16 @@ bool intel_color_lut_equal(struct drm_property_blob *blob1, lut_size2 = drm_color_lut_size(blob2); /* check sw and hw lut size */ - if (lut_size1 != lut_size2) - return false; + switch (gamma_mode) { + case GAMMA_MODE_MODE_8BIT: + case GAMMA_MODE_MODE_10BIT: + if (lut_size1 != lut_size2) + return false; + break; + default: + MISSING_CASE(gamma_mode); + return false; + } lut1 = blob1->data; lut2 = blob2->data; @@ -1584,18 +1548,13 @@ bool intel_color_lut_equal(struct drm_property_blob *blob1, err = 0xffff >> bit_precision; /* check sw and hw lut entry to be equal */ - switch (gamma_mode & GAMMA_MODE_MODE_MASK) { + switch (gamma_mode) { case GAMMA_MODE_MODE_8BIT: case GAMMA_MODE_MODE_10BIT: if (!intel_color_lut_entry_equal(lut1, lut2, lut_size2, err)) return false; break; - case GAMMA_MODE_MODE_12BIT_MULTI_SEGMENTED: - if (!intel_color_lut_entry_multi_equal(lut1, lut2, - lut_size2, err)) - return false; - break; default: MISSING_CASE(gamma_mode); return false; @@ -1835,60 +1794,6 @@ static void glk_read_luts(struct intel_crtc_state *crtc_state) crtc_state->base.gamma_lut = glk_read_lut_10(crtc_state, PAL_PREC_INDEX_VALUE(0)); } -static struct drm_property_blob * -icl_read_lut_multi_segment(const struct intel_crtc_state *crtc_state) -{ - struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc); - struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); - int lut_size = INTEL_INFO(dev_priv)->color.gamma_lut_size; - enum pipe pipe = crtc->pipe; - struct drm_property_blob *blob; - struct drm_color_lut *blob_data; - u32 i, val1, val2; - - blob = drm_property_create_blob(&dev_priv->drm, - sizeof(struct drm_color_lut) * lut_size, - NULL); - if (IS_ERR(blob)) - return NULL; - - blob_data = blob->data; - - I915_WRITE(PREC_PAL_MULTI_SEG_INDEX(pipe), PAL_PREC_AUTO_INCREMENT); - - for (i = 0; i < 9; i++) { - val1 = I915_READ(PREC_PAL_MULTI_SEG_DATA(pipe)); - val2 = I915_READ(PREC_PAL_MULTI_SEG_DATA(pipe)); - - blob_data[i].red = REG_FIELD_GET(PAL_PREC_MULTI_SEG_RED_UDW_MASK, val2) << 6 | - REG_FIELD_GET(PAL_PREC_MULTI_SEG_RED_LDW_MASK, val1); - blob_data[i].green = REG_FIELD_GET(PAL_PREC_MULTI_SEG_GREEN_UDW_MASK, val2) << 6 | - REG_FIELD_GET(PAL_PREC_MULTI_SEG_GREEN_LDW_MASK, val1); - blob_data[i].blue = REG_FIELD_GET(PAL_PREC_MULTI_SEG_BLUE_UDW_MASK, val2) << 6 | - REG_FIELD_GET(PAL_PREC_MULTI_SEG_BLUE_LDW_MASK, val1); - } - - /* - * FIXME readouts from PAL_PREC_DATA register aren't giving correct values - * in the case of fine and coarse segments. Restricting readouts only for - * super fine segment as of now. - */ - - return blob; -} - -static void icl_read_luts(struct intel_crtc_state *crtc_state) -{ - if ((crtc_state->gamma_mode & GAMMA_MODE_MODE_MASK) == - GAMMA_MODE_MODE_8BIT) - crtc_state->base.gamma_lut = i9xx_read_lut_8(crtc_state); - else if ((crtc_state->gamma_mode & GAMMA_MODE_MODE_MASK) == - GAMMA_MODE_MODE_12BIT_MULTI_SEGMENTED) - crtc_state->base.gamma_lut = icl_read_lut_multi_segment(crtc_state); - else - crtc_state->base.gamma_lut = glk_read_lut_10(crtc_state, PAL_PREC_INDEX_VALUE(0)); -} - void intel_color_init(struct intel_crtc *crtc) { struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); @@ -1932,7 +1837,6 @@ void intel_color_init(struct intel_crtc *crtc) if (INTEL_GEN(dev_priv) >= 11) { dev_priv->display.load_luts = icl_load_luts; - dev_priv->display.read_luts = icl_read_luts; } else if (IS_CANNONLAKE(dev_priv) || IS_GEMINILAKE(dev_priv)) { dev_priv->display.load_luts = glk_load_luts; dev_priv->display.read_luts = glk_read_luts; diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h index 28c483a3bbba..e752de9470bd 100644 --- a/drivers/gpu/drm/i915/i915_reg.h +++ b/drivers/gpu/drm/i915/i915_reg.h @@ -10575,12 +10575,6 @@ enum skl_power_gate { #define _PAL_PREC_MULTI_SEG_DATA_A 0x4A40C #define _PAL_PREC_MULTI_SEG_DATA_B 0x4AC0C -#define PAL_PREC_MULTI_SEG_RED_LDW_MASK REG_GENMASK(29, 24) -#define PAL_PREC_MULTI_SEG_RED_UDW_MASK REG_GENMASK(29, 20) -#define PAL_PREC_MULTI_SEG_GREEN_LDW_MASK REG_GENMASK(19, 14) -#define PAL_PREC_MULTI_SEG_GREEN_UDW_MASK REG_GENMASK(19, 10) -#define PAL_PREC_MULTI_SEG_BLUE_LDW_MASK REG_GENMASK(9, 4) -#define PAL_PREC_MULTI_SEG_BLUE_UDW_MASK REG_GENMASK(9, 0) #define PREC_PAL_MULTI_SEG_INDEX(pipe) _MMIO_PIPE(pipe, \ _PAL_PREC_MULTI_SEG_INDEX_A, \ -- cgit v1.2.3 From ed06efb801bd291e935238d3fba46fa03d098f0e Mon Sep 17 00:00:00 2001 From: Maarten Lankhorst Date: Wed, 25 Sep 2019 10:21:09 +0200 Subject: drm/i915/dp: Fix dsc bpp calculations, v5. MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit There was a integer wraparound when mode_clock became too high, and we didn't correct for the FEC overhead factor when dividing, with the calculations breaking at HBR3. As a result our calculated bpp was way too high, and the link width limitation never came into effect. Print out the resulting bpp calcululations as a sanity check, just in case we ever have to debug it later on again. We also used the wrong factor for FEC. While bspec mentions 2.4%, all the calculations use 1/0.972261, and the same ratio should be applied to data M/N as well, so use it there when FEC is enabled. This fixes the FIFO underrun we are seeing with FEC enabled. Changes since v2: - Handle fec_enable in intel_link_compute_m_n, so only data M/N is adjusted. (Ville) - Fix initial hardware readout for FEC. (Ville) Changes since v3: - Remove bogus fec_to_mode_clock. (Ville) Changes since v4: - Use the correct register for icl. (Ville) - Split hw readout to a separate patch. Signed-off-by: Maarten Lankhorst Fixes: d9218c8f6cf4 ("drm/i915/dp: Add helpers for Compressed BPP and Slice Count for DSC") Cc: # v5.0+ Cc: Manasi Navare Link: https://patchwork.freedesktop.org/patch/msgid/20190925082110.17439-1-maarten.lankhorst@linux.intel.com Reviewed-by: Ville Syrjälä --- drivers/gpu/drm/i915/display/intel_display.c | 12 +- drivers/gpu/drm/i915/display/intel_display.h | 2 +- drivers/gpu/drm/i915/display/intel_dp.c | 184 ++++++++++++++------------- drivers/gpu/drm/i915/display/intel_dp.h | 6 +- drivers/gpu/drm/i915/display/intel_dp_mst.c | 2 +- 5 files changed, 107 insertions(+), 99 deletions(-) diff --git a/drivers/gpu/drm/i915/display/intel_display.c b/drivers/gpu/drm/i915/display/intel_display.c index 5ecf54270181..c4c9286be987 100644 --- a/drivers/gpu/drm/i915/display/intel_display.c +++ b/drivers/gpu/drm/i915/display/intel_display.c @@ -7291,7 +7291,7 @@ retry: pipe_config->fdi_lanes = lane; intel_link_compute_m_n(pipe_config->pipe_bpp, lane, fdi_dotclock, - link_bw, &pipe_config->fdi_m_n, false); + link_bw, &pipe_config->fdi_m_n, false, false); ret = ironlake_check_fdi_lanes(dev, intel_crtc->pipe, pipe_config); if (ret == -EDEADLK) @@ -7538,11 +7538,15 @@ void intel_link_compute_m_n(u16 bits_per_pixel, int nlanes, int pixel_clock, int link_clock, struct intel_link_m_n *m_n, - bool constant_n) + bool constant_n, bool fec_enable) { - m_n->tu = 64; + u32 data_clock = bits_per_pixel * pixel_clock; + + if (fec_enable) + data_clock = intel_dp_mode_to_fec_clock(data_clock); - compute_m_n(bits_per_pixel * pixel_clock, + m_n->tu = 64; + compute_m_n(data_clock, link_clock * nlanes * 8, &m_n->gmch_m, &m_n->gmch_n, constant_n); diff --git a/drivers/gpu/drm/i915/display/intel_display.h b/drivers/gpu/drm/i915/display/intel_display.h index 5cea6f8e107a..4b9e18e5a263 100644 --- a/drivers/gpu/drm/i915/display/intel_display.h +++ b/drivers/gpu/drm/i915/display/intel_display.h @@ -443,7 +443,7 @@ enum phy_fia { void intel_link_compute_m_n(u16 bpp, int nlanes, int pixel_clock, int link_clock, struct intel_link_m_n *m_n, - bool constant_n); + bool constant_n, bool fec_enable); bool is_ccs_modifier(u64 modifier); void lpt_disable_clkout_dp(struct drm_i915_private *dev_priv); u32 intel_plane_fb_max_stride(struct drm_i915_private *dev_priv, diff --git a/drivers/gpu/drm/i915/display/intel_dp.c b/drivers/gpu/drm/i915/display/intel_dp.c index 5cce9459e93c..893c9807755c 100644 --- a/drivers/gpu/drm/i915/display/intel_dp.c +++ b/drivers/gpu/drm/i915/display/intel_dp.c @@ -76,8 +76,8 @@ #define DP_DSC_MAX_ENC_THROUGHPUT_0 340000 #define DP_DSC_MAX_ENC_THROUGHPUT_1 400000 -/* DP DSC FEC Overhead factor = (100 - 2.4)/100 */ -#define DP_DSC_FEC_OVERHEAD_FACTOR 976 +/* DP DSC FEC Overhead factor = 1/(0.972261) */ +#define DP_DSC_FEC_OVERHEAD_FACTOR 972261 /* Compliance test status bits */ #define INTEL_DP_RESOLUTION_SHIFT_MASK 0 @@ -492,6 +492,97 @@ int intel_dp_get_link_train_fallback_values(struct intel_dp *intel_dp, return 0; } +u32 intel_dp_mode_to_fec_clock(u32 mode_clock) +{ + return div_u64(mul_u32_u32(mode_clock, 1000000U), + DP_DSC_FEC_OVERHEAD_FACTOR); +} + +static u16 intel_dp_dsc_get_output_bpp(u32 link_clock, u32 lane_count, + u32 mode_clock, u32 mode_hdisplay) +{ + u32 bits_per_pixel, max_bpp_small_joiner_ram; + int i; + + /* + * Available Link Bandwidth(Kbits/sec) = (NumberOfLanes)* + * (LinkSymbolClock)* 8 * (TimeSlotsPerMTP) + * for SST -> TimeSlotsPerMTP is 1, + * for MST -> TimeSlotsPerMTP has to be calculated + */ + bits_per_pixel = (link_clock * lane_count * 8) / + intel_dp_mode_to_fec_clock(mode_clock); + DRM_DEBUG_KMS("Max link bpp: %u\n", bits_per_pixel); + + /* Small Joiner Check: output bpp <= joiner RAM (bits) / Horiz. width */ + max_bpp_small_joiner_ram = DP_DSC_MAX_SMALL_JOINER_RAM_BUFFER / mode_hdisplay; + DRM_DEBUG_KMS("Max small joiner bpp: %u\n", max_bpp_small_joiner_ram); + + /* + * Greatest allowed DSC BPP = MIN (output BPP from available Link BW + * check, output bpp from small joiner RAM check) + */ + bits_per_pixel = min(bits_per_pixel, max_bpp_small_joiner_ram); + + /* Error out if the max bpp is less than smallest allowed valid bpp */ + if (bits_per_pixel < valid_dsc_bpp[0]) { + DRM_DEBUG_KMS("Unsupported BPP %u, min %u\n", + bits_per_pixel, valid_dsc_bpp[0]); + return 0; + } + + /* Find the nearest match in the array of known BPPs from VESA */ + for (i = 0; i < ARRAY_SIZE(valid_dsc_bpp) - 1; i++) { + if (bits_per_pixel < valid_dsc_bpp[i + 1]) + break; + } + bits_per_pixel = valid_dsc_bpp[i]; + + /* + * Compressed BPP in U6.4 format so multiply by 16, for Gen 11, + * fractional part is 0 + */ + return bits_per_pixel << 4; +} + +static u8 intel_dp_dsc_get_slice_count(struct intel_dp *intel_dp, + int mode_clock, int mode_hdisplay) +{ + u8 min_slice_count, i; + int max_slice_width; + + if (mode_clock <= DP_DSC_PEAK_PIXEL_RATE) + min_slice_count = DIV_ROUND_UP(mode_clock, + DP_DSC_MAX_ENC_THROUGHPUT_0); + else + min_slice_count = DIV_ROUND_UP(mode_clock, + DP_DSC_MAX_ENC_THROUGHPUT_1); + + max_slice_width = drm_dp_dsc_sink_max_slice_width(intel_dp->dsc_dpcd); + if (max_slice_width < DP_DSC_MIN_SLICE_WIDTH_VALUE) { + DRM_DEBUG_KMS("Unsupported slice width %d by DP DSC Sink device\n", + max_slice_width); + return 0; + } + /* Also take into account max slice width */ + min_slice_count = min_t(u8, min_slice_count, + DIV_ROUND_UP(mode_hdisplay, + max_slice_width)); + + /* Find the closest match to the valid slice count values */ + for (i = 0; i < ARRAY_SIZE(valid_dsc_slicecount); i++) { + if (valid_dsc_slicecount[i] > + drm_dp_dsc_sink_max_slice_count(intel_dp->dsc_dpcd, + false)) + break; + if (min_slice_count <= valid_dsc_slicecount[i]) + return valid_dsc_slicecount[i]; + } + + DRM_DEBUG_KMS("Unsupported Slice Count %d\n", min_slice_count); + return 0; +} + static enum drm_mode_status intel_dp_mode_valid(struct drm_connector *connector, struct drm_display_mode *mode) @@ -2259,7 +2350,7 @@ intel_dp_compute_config(struct intel_encoder *encoder, adjusted_mode->crtc_clock, pipe_config->port_clock, &pipe_config->dp_m_n, - constant_n); + constant_n, pipe_config->fec_enable); if (intel_connector->panel.downclock_mode != NULL && dev_priv->drrs.type == SEAMLESS_DRRS_SUPPORT) { @@ -2269,7 +2360,7 @@ intel_dp_compute_config(struct intel_encoder *encoder, intel_connector->panel.downclock_mode->clock, pipe_config->port_clock, &pipe_config->dp_m2_n2, - constant_n); + constant_n, pipe_config->fec_enable); } if (!HAS_DDI(dev_priv)) @@ -4373,91 +4464,6 @@ intel_dp_get_sink_irq_esi(struct intel_dp *intel_dp, u8 *sink_irq_vector) DP_DPRX_ESI_LEN; } -u16 intel_dp_dsc_get_output_bpp(int link_clock, u8 lane_count, - int mode_clock, int mode_hdisplay) -{ - u16 bits_per_pixel, max_bpp_small_joiner_ram; - int i; - - /* - * Available Link Bandwidth(Kbits/sec) = (NumberOfLanes)* - * (LinkSymbolClock)* 8 * ((100-FECOverhead)/100)*(TimeSlotsPerMTP) - * FECOverhead = 2.4%, for SST -> TimeSlotsPerMTP is 1, - * for MST -> TimeSlotsPerMTP has to be calculated - */ - bits_per_pixel = (link_clock * lane_count * 8 * - DP_DSC_FEC_OVERHEAD_FACTOR) / - mode_clock; - - /* Small Joiner Check: output bpp <= joiner RAM (bits) / Horiz. width */ - max_bpp_small_joiner_ram = DP_DSC_MAX_SMALL_JOINER_RAM_BUFFER / - mode_hdisplay; - - /* - * Greatest allowed DSC BPP = MIN (output BPP from avaialble Link BW - * check, output bpp from small joiner RAM check) - */ - bits_per_pixel = min(bits_per_pixel, max_bpp_small_joiner_ram); - - /* Error out if the max bpp is less than smallest allowed valid bpp */ - if (bits_per_pixel < valid_dsc_bpp[0]) { - DRM_DEBUG_KMS("Unsupported BPP %d\n", bits_per_pixel); - return 0; - } - - /* Find the nearest match in the array of known BPPs from VESA */ - for (i = 0; i < ARRAY_SIZE(valid_dsc_bpp) - 1; i++) { - if (bits_per_pixel < valid_dsc_bpp[i + 1]) - break; - } - bits_per_pixel = valid_dsc_bpp[i]; - - /* - * Compressed BPP in U6.4 format so multiply by 16, for Gen 11, - * fractional part is 0 - */ - return bits_per_pixel << 4; -} - -u8 intel_dp_dsc_get_slice_count(struct intel_dp *intel_dp, - int mode_clock, - int mode_hdisplay) -{ - u8 min_slice_count, i; - int max_slice_width; - - if (mode_clock <= DP_DSC_PEAK_PIXEL_RATE) - min_slice_count = DIV_ROUND_UP(mode_clock, - DP_DSC_MAX_ENC_THROUGHPUT_0); - else - min_slice_count = DIV_ROUND_UP(mode_clock, - DP_DSC_MAX_ENC_THROUGHPUT_1); - - max_slice_width = drm_dp_dsc_sink_max_slice_width(intel_dp->dsc_dpcd); - if (max_slice_width < DP_DSC_MIN_SLICE_WIDTH_VALUE) { - DRM_DEBUG_KMS("Unsupported slice width %d by DP DSC Sink device\n", - max_slice_width); - return 0; - } - /* Also take into account max slice width */ - min_slice_count = min_t(u8, min_slice_count, - DIV_ROUND_UP(mode_hdisplay, - max_slice_width)); - - /* Find the closest match to the valid slice count values */ - for (i = 0; i < ARRAY_SIZE(valid_dsc_slicecount); i++) { - if (valid_dsc_slicecount[i] > - drm_dp_dsc_sink_max_slice_count(intel_dp->dsc_dpcd, - false)) - break; - if (min_slice_count <= valid_dsc_slicecount[i]) - return valid_dsc_slicecount[i]; - } - - DRM_DEBUG_KMS("Unsupported Slice Count %d\n", min_slice_count); - return 0; -} - static void intel_pixel_encoding_setup_vsc(struct intel_dp *intel_dp, const struct intel_crtc_state *crtc_state) diff --git a/drivers/gpu/drm/i915/display/intel_dp.h b/drivers/gpu/drm/i915/display/intel_dp.h index e01d1f89409d..a194b5b6da05 100644 --- a/drivers/gpu/drm/i915/display/intel_dp.h +++ b/drivers/gpu/drm/i915/display/intel_dp.h @@ -103,10 +103,6 @@ bool intel_dp_source_supports_hbr2(struct intel_dp *intel_dp); bool intel_dp_source_supports_hbr3(struct intel_dp *intel_dp); bool intel_dp_get_link_status(struct intel_dp *intel_dp, u8 *link_status); -u16 intel_dp_dsc_get_output_bpp(int link_clock, u8 lane_count, - int mode_clock, int mode_hdisplay); -u8 intel_dp_dsc_get_slice_count(struct intel_dp *intel_dp, int mode_clock, - int mode_hdisplay); bool intel_dp_read_dpcd(struct intel_dp *intel_dp); bool intel_dp_get_colorimetry_status(struct intel_dp *intel_dp); @@ -119,4 +115,6 @@ static inline unsigned int intel_dp_unused_lane_mask(int lane_count) return ~((1 << lane_count) - 1) & 0xf; } +u32 intel_dp_mode_to_fec_clock(u32 mode_clock); + #endif /* __INTEL_DP_H__ */ diff --git a/drivers/gpu/drm/i915/display/intel_dp_mst.c b/drivers/gpu/drm/i915/display/intel_dp_mst.c index eeeb3f933aa4..cf4d851a5139 100644 --- a/drivers/gpu/drm/i915/display/intel_dp_mst.c +++ b/drivers/gpu/drm/i915/display/intel_dp_mst.c @@ -81,7 +81,7 @@ static int intel_dp_mst_compute_link_config(struct intel_encoder *encoder, adjusted_mode->crtc_clock, crtc_state->port_clock, &crtc_state->dp_m_n, - constant_n); + constant_n, crtc_state->fec_enable); crtc_state->dp_m_n.tu = slots; return 0; -- cgit v1.2.3 From 8aa940c8551cce80bafcd8e1cd93d7b39a1acd8a Mon Sep 17 00:00:00 2001 From: Maarten Lankhorst Date: Wed, 25 Sep 2019 10:21:10 +0200 Subject: drm/i915: Add hardware readout for FEC MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Readout the FEC state in encoder->get_config(), this will allow us to ensure that we can correctly inherit the state from boot, and that we set FEC during modeset. Signed-off-by: Maarten Lankhorst Link: https://patchwork.freedesktop.org/patch/msgid/20190925082110.17439-2-maarten.lankhorst@linux.intel.com Reviewed-by: Ville Syrjälä --- drivers/gpu/drm/i915/display/intel_ddi.c | 17 +++++++++++++++++ drivers/gpu/drm/i915/display/intel_display.c | 1 + 2 files changed, 18 insertions(+) diff --git a/drivers/gpu/drm/i915/display/intel_ddi.c b/drivers/gpu/drm/i915/display/intel_ddi.c index 33cd766f9eea..14fe987888a6 100644 --- a/drivers/gpu/drm/i915/display/intel_ddi.c +++ b/drivers/gpu/drm/i915/display/intel_ddi.c @@ -4021,6 +4021,23 @@ void intel_ddi_get_config(struct intel_encoder *encoder, pipe_config->lane_count = ((temp & DDI_PORT_WIDTH_MASK) >> DDI_PORT_WIDTH_SHIFT) + 1; intel_dp_get_m_n(intel_crtc, pipe_config); + + if (INTEL_GEN(dev_priv) >= 11) { + i915_reg_t dp_tp_ctl; + + if (IS_GEN(dev_priv, 11)) + dp_tp_ctl = DP_TP_CTL(encoder->port); + else + dp_tp_ctl = TGL_DP_TP_CTL(pipe_config->cpu_transcoder); + + pipe_config->fec_enable = + I915_READ(dp_tp_ctl) & DP_TP_CTL_FEC_ENABLE; + + DRM_DEBUG_KMS("[ENCODER:%d:%s] Fec status: %u\n", + encoder->base.base.id, encoder->base.name, + pipe_config->fec_enable); + } + break; case TRANS_DDI_MODE_SELECT_DP_MST: pipe_config->output_types |= BIT(INTEL_OUTPUT_DP_MST); diff --git a/drivers/gpu/drm/i915/display/intel_display.c b/drivers/gpu/drm/i915/display/intel_display.c index c4c9286be987..31698a57773f 100644 --- a/drivers/gpu/drm/i915/display/intel_display.c +++ b/drivers/gpu/drm/i915/display/intel_display.c @@ -12836,6 +12836,7 @@ intel_pipe_config_compare(const struct intel_crtc_state *current_config, PIPE_CONF_CHECK_BOOL(hdmi_scrambling); PIPE_CONF_CHECK_BOOL(hdmi_high_tmds_clock_ratio); PIPE_CONF_CHECK_BOOL(has_infoframe); + PIPE_CONF_CHECK_BOOL(fec_enable); PIPE_CONF_CHECK_BOOL_INCOMPLETE(has_audio); -- cgit v1.2.3 From c22d62e6e49beadea0024fe4eb9a02ce0f635657 Mon Sep 17 00:00:00 2001 From: Maarten Lankhorst Date: Fri, 20 Sep 2019 13:42:18 +0200 Subject: drm/i915: Get rid of crtc_state->fb_changed We had this as an optimization to not do a plane update, but we killed it off because there are so many reasons we may have to do a plane update or fastset that it's best to just assume everything changed. Signed-off-by: Maarten Lankhorst Link: https://patchwork.freedesktop.org/patch/msgid/20190920114235.22411-6-maarten.lankhorst@linux.intel.com Reviewed-by: Matt Roper --- drivers/gpu/drm/i915/display/intel_atomic.c | 1 - drivers/gpu/drm/i915/display/intel_display.c | 10 +--------- drivers/gpu/drm/i915/display/intel_display_types.h | 1 - 3 files changed, 1 insertion(+), 11 deletions(-) diff --git a/drivers/gpu/drm/i915/display/intel_atomic.c b/drivers/gpu/drm/i915/display/intel_atomic.c index 698802da07b7..f7a981ff6812 100644 --- a/drivers/gpu/drm/i915/display/intel_atomic.c +++ b/drivers/gpu/drm/i915/display/intel_atomic.c @@ -199,7 +199,6 @@ intel_crtc_duplicate_state(struct drm_crtc *crtc) crtc_state->disable_cxsr = false; crtc_state->update_wm_pre = false; crtc_state->update_wm_post = false; - crtc_state->fb_changed = false; crtc_state->fifo_changed = false; crtc_state->wm.need_postvbl_update = false; crtc_state->fb_bits = 0; diff --git a/drivers/gpu/drm/i915/display/intel_display.c b/drivers/gpu/drm/i915/display/intel_display.c index 31698a57773f..292be4b71a82 100644 --- a/drivers/gpu/drm/i915/display/intel_display.c +++ b/drivers/gpu/drm/i915/display/intel_display.c @@ -11582,7 +11582,6 @@ int intel_plane_atomic_calc_changes(const struct intel_crtc_state *old_crtc_stat bool was_crtc_enabled = old_crtc_state->base.active; bool is_crtc_enabled = crtc_state->base.active; bool turn_off, turn_on, visible, was_visible; - struct drm_framebuffer *fb = plane_state->base.fb; int ret; if (INTEL_GEN(dev_priv) >= 9 && plane->id != PLANE_CURSOR) { @@ -11616,18 +11615,11 @@ int intel_plane_atomic_calc_changes(const struct intel_crtc_state *old_crtc_stat if (!was_visible && !visible) return 0; - if (fb != old_plane_state->base.fb) - crtc_state->fb_changed = true; - turn_off = was_visible && (!visible || mode_changed); turn_on = visible && (!was_visible || mode_changed); - DRM_DEBUG_ATOMIC("[CRTC:%d:%s] has [PLANE:%d:%s] with fb %i\n", + DRM_DEBUG_ATOMIC("[CRTC:%d:%s] with [PLANE:%d:%s] visible %i -> %i, off %i, on %i, ms %i\n", crtc->base.base.id, crtc->base.name, - plane->base.base.id, plane->base.name, - fb ? fb->base.id : -1); - - DRM_DEBUG_ATOMIC("[PLANE:%d:%s] visible %i -> %i, off %i, on %i, ms %i\n", plane->base.base.id, plane->base.name, was_visible, visible, turn_off, turn_on, mode_changed); diff --git a/drivers/gpu/drm/i915/display/intel_display_types.h b/drivers/gpu/drm/i915/display/intel_display_types.h index 6b0a646f0170..a33644f12af1 100644 --- a/drivers/gpu/drm/i915/display/intel_display_types.h +++ b/drivers/gpu/drm/i915/display/intel_display_types.h @@ -766,7 +766,6 @@ struct intel_crtc_state { bool update_pipe; /* can a fast modeset be performed? */ bool disable_cxsr; bool update_wm_pre, update_wm_post; /* watermarks are updated */ - bool fb_changed; /* fb on any of the planes is changed */ bool fifo_changed; /* FIFO split is changed */ /* Pipe source size (ie. panel fitter input size) -- cgit v1.2.3 From c47b7ddbcb29fca51515294a6fd2ed1d7d861939 Mon Sep 17 00:00:00 2001 From: Maarten Lankhorst Date: Fri, 20 Sep 2019 13:42:20 +0200 Subject: drm/i915: Rename planar linked plane variables Rename linked_plane to planar_linked_plane and slave to planar_slave, this will make it easier to keep apart bigjoiner linking and planar plane linking. Signed-off-by: Maarten Lankhorst Link: https://patchwork.freedesktop.org/patch/msgid/20190920114235.22411-8-maarten.lankhorst@linux.intel.com Reviewed-by: Matt Roper --- drivers/gpu/drm/i915/display/intel_atomic.c | 7 +++++-- drivers/gpu/drm/i915/display/intel_atomic_plane.c | 4 ++-- drivers/gpu/drm/i915/display/intel_display.c | 22 +++++++++++----------- drivers/gpu/drm/i915/display/intel_display_types.h | 8 ++++---- drivers/gpu/drm/i915/display/intel_sprite.c | 4 ++-- drivers/gpu/drm/i915/intel_pm.c | 12 ++++++------ 6 files changed, 30 insertions(+), 27 deletions(-) diff --git a/drivers/gpu/drm/i915/display/intel_atomic.c b/drivers/gpu/drm/i915/display/intel_atomic.c index f7a981ff6812..99f4f83de280 100644 --- a/drivers/gpu/drm/i915/display/intel_atomic.c +++ b/drivers/gpu/drm/i915/display/intel_atomic.c @@ -263,10 +263,13 @@ static void intel_atomic_setup_scaler(struct intel_crtc_scaler_state *scaler_sta */ mode = PS_SCALER_MODE_NORMAL; } else { + struct intel_plane *linked = + plane_state->planar_linked_plane; + mode = PS_SCALER_MODE_PLANAR; - if (plane_state->linked_plane) - mode |= PS_PLANE_Y_SEL(plane_state->linked_plane->id); + if (linked) + mode |= PS_PLANE_Y_SEL(linked->id); } } else if (INTEL_GEN(dev_priv) > 9 || IS_GEMINILAKE(dev_priv)) { mode = PS_SCALER_MODE_NORMAL; diff --git a/drivers/gpu/drm/i915/display/intel_atomic_plane.c b/drivers/gpu/drm/i915/display/intel_atomic_plane.c index 476ef0906ba0..98b7766eaa7a 100644 --- a/drivers/gpu/drm/i915/display/intel_atomic_plane.c +++ b/drivers/gpu/drm/i915/display/intel_atomic_plane.c @@ -321,9 +321,9 @@ void skl_update_planes_on_crtc(struct intel_atomic_state *state, if (new_plane_state->base.visible) { intel_update_plane(plane, new_crtc_state, new_plane_state); - } else if (new_plane_state->slave) { + } else if (new_plane_state->planar_slave) { struct intel_plane *master = - new_plane_state->linked_plane; + new_plane_state->planar_linked_plane; /* * We update the slave plane from this function because diff --git a/drivers/gpu/drm/i915/display/intel_display.c b/drivers/gpu/drm/i915/display/intel_display.c index 292be4b71a82..8f125f1624bd 100644 --- a/drivers/gpu/drm/i915/display/intel_display.c +++ b/drivers/gpu/drm/i915/display/intel_display.c @@ -11729,7 +11729,7 @@ static int icl_add_linked_planes(struct intel_atomic_state *state) int i; for_each_new_intel_plane_in_state(state, plane, plane_state, i) { - linked = plane_state->linked_plane; + linked = plane_state->planar_linked_plane; if (!linked) continue; @@ -11738,8 +11738,8 @@ static int icl_add_linked_planes(struct intel_atomic_state *state) if (IS_ERR(linked_plane_state)) return PTR_ERR(linked_plane_state); - WARN_ON(linked_plane_state->linked_plane != plane); - WARN_ON(linked_plane_state->slave == plane_state->slave); + WARN_ON(linked_plane_state->planar_linked_plane != plane); + WARN_ON(linked_plane_state->planar_slave == plane_state->planar_slave); } return 0; @@ -11762,16 +11762,16 @@ static int icl_check_nv12_planes(struct intel_crtc_state *crtc_state) * in the crtc_state->active_planes mask. */ for_each_new_intel_plane_in_state(state, plane, plane_state, i) { - if (plane->pipe != crtc->pipe || !plane_state->linked_plane) + if (plane->pipe != crtc->pipe || !plane_state->planar_linked_plane) continue; - plane_state->linked_plane = NULL; - if (plane_state->slave && !plane_state->base.visible) { + plane_state->planar_linked_plane = NULL; + if (plane_state->planar_slave && !plane_state->base.visible) { crtc_state->active_planes &= ~BIT(plane->id); crtc_state->update_planes |= BIT(plane->id); } - plane_state->slave = false; + plane_state->planar_slave = false; } if (!crtc_state->nv12_planes) @@ -11805,10 +11805,10 @@ static int icl_check_nv12_planes(struct intel_crtc_state *crtc_state) return -EINVAL; } - plane_state->linked_plane = linked; + plane_state->planar_linked_plane = linked; - linked_state->slave = true; - linked_state->linked_plane = plane; + linked_state->planar_slave = true; + linked_state->planar_linked_plane = plane; crtc_state->active_planes |= BIT(linked->id); crtc_state->update_planes |= BIT(linked->id); DRM_DEBUG_KMS("Using %s as Y plane for %s\n", linked->base.name, plane->base.name); @@ -13257,7 +13257,7 @@ intel_verify_planes(struct intel_atomic_state *state) for_each_new_intel_plane_in_state(state, plane, plane_state, i) - assert_plane(plane, plane_state->slave || + assert_plane(plane, plane_state->planar_slave || plane_state->base.visible); } diff --git a/drivers/gpu/drm/i915/display/intel_display_types.h b/drivers/gpu/drm/i915/display/intel_display_types.h index a33644f12af1..976669f01a8c 100644 --- a/drivers/gpu/drm/i915/display/intel_display_types.h +++ b/drivers/gpu/drm/i915/display/intel_display_types.h @@ -559,24 +559,24 @@ struct intel_plane_state { int scaler_id; /* - * linked_plane: + * planar_linked_plane: * * ICL planar formats require 2 planes that are updated as pairs. * This member is used to make sure the other plane is also updated * when required, and for update_slave() to find the correct * plane_state to pass as argument. */ - struct intel_plane *linked_plane; + struct intel_plane *planar_linked_plane; /* - * slave: + * planar_slave: * If set don't update use the linked plane's state for updating * this plane during atomic commit with the update_slave() callback. * * It's also used by the watermark code to ignore wm calculations on * this plane. They're calculated by the linked plane's wm code. */ - u32 slave; + u32 planar_slave; struct drm_intel_sprite_colorkey ckey; }; diff --git a/drivers/gpu/drm/i915/display/intel_sprite.c b/drivers/gpu/drm/i915/display/intel_sprite.c index 7a7078d0ba23..3d56db32291b 100644 --- a/drivers/gpu/drm/i915/display/intel_sprite.c +++ b/drivers/gpu/drm/i915/display/intel_sprite.c @@ -542,7 +542,7 @@ skl_program_plane(struct intel_plane *plane, u32 y = plane_state->color_plane[color_plane].y; u32 src_w = drm_rect_width(&plane_state->base.src) >> 16; u32 src_h = drm_rect_height(&plane_state->base.src) >> 16; - struct intel_plane *linked = plane_state->linked_plane; + struct intel_plane *linked = plane_state->planar_linked_plane; const struct drm_framebuffer *fb = plane_state->base.fb; u8 alpha = plane_state->base.alpha >> 8; u32 plane_color_ctl = 0; @@ -641,7 +641,7 @@ skl_update_plane(struct intel_plane *plane, { int color_plane = 0; - if (plane_state->linked_plane) { + if (plane_state->planar_linked_plane) { /* Program the UV plane */ color_plane = 1; } diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c index 6aa40f546226..6bed2ed14574 100644 --- a/drivers/gpu/drm/i915/intel_pm.c +++ b/drivers/gpu/drm/i915/intel_pm.c @@ -4293,7 +4293,7 @@ icl_get_total_relative_data_rate(struct intel_crtc_state *crtc_state, enum plane_id plane_id = to_intel_plane(plane)->id; u64 rate; - if (!plane_state->linked_plane) { + if (!plane_state->planar_linked_plane) { rate = skl_plane_relative_data_rate(crtc_state, plane_state, 0); plane_data_rate[plane_id] = rate; total_data_rate += rate; @@ -4307,12 +4307,12 @@ icl_get_total_relative_data_rate(struct intel_crtc_state *crtc_state, * NULL if we try get_new_plane_state(), so we * always calculate from the master. */ - if (plane_state->slave) + if (plane_state->planar_slave) continue; /* Y plane rate is calculated on the slave */ rate = skl_plane_relative_data_rate(crtc_state, plane_state, 0); - y_plane_id = plane_state->linked_plane->id; + y_plane_id = plane_state->planar_linked_plane->id; plane_data_rate[y_plane_id] = rate; total_data_rate += rate; @@ -5051,12 +5051,12 @@ static int icl_build_plane_wm(struct intel_crtc_state *crtc_state, int ret; /* Watermarks calculated in master */ - if (plane_state->slave) + if (plane_state->planar_slave) return 0; - if (plane_state->linked_plane) { + if (plane_state->planar_linked_plane) { const struct drm_framebuffer *fb = plane_state->base.fb; - enum plane_id y_plane_id = plane_state->linked_plane->id; + enum plane_id y_plane_id = plane_state->planar_linked_plane->id; WARN_ON(!intel_wm_plane_visible(crtc_state, plane_state)); WARN_ON(!fb->format->is_yuv || -- cgit v1.2.3 From c750c22b2456bcd8c50067424ccabf2686306cda Mon Sep 17 00:00:00 2001 From: Maarten Lankhorst Date: Fri, 20 Sep 2019 13:42:21 +0200 Subject: drm/i915: Do not add all planes when checking scalers on glk+ We cannot switch between HQ and normal mode on GLK+, so only add planes on platforms where it makes sense. We could probably restrict it even more to only add when scaler users toggles between 1 and 2, but lets just leave it for now. Signed-off-by: Maarten Lankhorst Link: https://patchwork.freedesktop.org/patch/msgid/20190920114235.22411-9-maarten.lankhorst@linux.intel.com Reviewed-by: Matt Roper --- drivers/gpu/drm/i915/display/intel_atomic.c | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/drivers/gpu/drm/i915/display/intel_atomic.c b/drivers/gpu/drm/i915/display/intel_atomic.c index 99f4f83de280..c5a552a69752 100644 --- a/drivers/gpu/drm/i915/display/intel_atomic.c +++ b/drivers/gpu/drm/i915/display/intel_atomic.c @@ -373,6 +373,15 @@ int intel_atomic_setup_scalers(struct drm_i915_private *dev_priv, */ if (!plane) { struct drm_plane_state *state; + + /* + * GLK+ scalers don't have a HQ mode so it + * isn't necessary to change between HQ and dyn mode + * on those platforms. + */ + if (INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv)) + continue; + plane = drm_plane_from_index(&dev_priv->drm, i); state = drm_atomic_get_plane_state(drm_state, plane); if (IS_ERR(state)) { -- cgit v1.2.3 From 8ad8041b98c665b6147e607b749586d6e20ba73a Mon Sep 17 00:00:00 2001 From: Tony Lindgren Date: Tue, 24 Sep 2019 09:25:52 -0700 Subject: ARM: OMAP2+: Fix missing reset done flag for am3 and am43 For ti,sysc-omap4 compatible devices with no sysstatus register, we do have reset done status available in the SOFTRESET bit that clears when the reset is done. This is documented for example in am437x TRM for DMTIMER_TIOCP_CFG register. The am335x TRM just says that SOFTRESET bit value 1 means reset is ongoing, but it behaves the same way clearing after reset is done. With the ti-sysc driver handling this automatically based on no sysstatus register defined, we see warnings if SYSC_HAS_RESET_STATUS is missing in the legacy platform data: ti-sysc 48042000.target-module: sysc_flags 00000222 != 00000022 ti-sysc 48044000.target-module: sysc_flags 00000222 != 00000022 ti-sysc 48046000.target-module: sysc_flags 00000222 != 00000022 ... Let's fix these warnings by adding SYSC_HAS_RESET_STATUS. Let's also remove the useless parentheses while at it. If it turns out we do have ti,sysc-omap4 compatible devices without a working SOFTRESET bit we can set up additional quirk handling for it. Signed-off-by: Tony Lindgren --- arch/arm/mach-omap2/omap_hwmod_33xx_43xx_ipblock_data.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/arch/arm/mach-omap2/omap_hwmod_33xx_43xx_ipblock_data.c b/arch/arm/mach-omap2/omap_hwmod_33xx_43xx_ipblock_data.c index adb6271f819b..7773876d165f 100644 --- a/arch/arm/mach-omap2/omap_hwmod_33xx_43xx_ipblock_data.c +++ b/arch/arm/mach-omap2/omap_hwmod_33xx_43xx_ipblock_data.c @@ -811,7 +811,8 @@ static struct omap_hwmod_class_sysconfig am33xx_timer_sysc = { .rev_offs = 0x0000, .sysc_offs = 0x0010, .syss_offs = 0x0014, - .sysc_flags = (SYSC_HAS_SIDLEMODE | SYSC_HAS_SOFTRESET), + .sysc_flags = SYSC_HAS_SIDLEMODE | SYSC_HAS_SOFTRESET | + SYSC_HAS_RESET_STATUS, .idlemodes = (SIDLE_FORCE | SIDLE_NO | SIDLE_SMART | SIDLE_SMART_WKUP), .sysc_fields = &omap_hwmod_sysc_type2, -- cgit v1.2.3 From 17529d43b21c72466e9109d602c6f5c360a1a9e8 Mon Sep 17 00:00:00 2001 From: Tony Lindgren Date: Tue, 24 Sep 2019 09:25:51 -0700 Subject: ARM: OMAP2+: Add missing LCDC midlemode for am335x TRM "Table 13-34. SYSCONFIG Register Field Descriptions" lists both standbymode and idlemode that should be just the sidle and midle registers where midle is currently unconfigured for lcdc_sysc. As the dts data has been generated based on lcdc_sysc, we now have an empty "ti,sysc-midle" property. And so we currently get a warning for lcdc because of a difference with dts provided configuration compared to the legacy platform data. This is because lcdc has SYSC_HAS_MIDLEMODE configured in the platform data without configuring the modes. Let's fix the issue by adding the missing midlemode to lcdc_sysc, and configuring the "ti,sysc-midle" property based on the TRM values. Fixes: f711c575cfec ("ARM: dts: am335x: Add l4 interconnect hierarchy and ti-sysc data") Cc: Jyri Sarha Cc: Keerthy Cc: Robert Nelson Cc: Suman Anna Signed-off-by: Tony Lindgren --- arch/arm/boot/dts/am33xx-l4.dtsi | 4 +++- arch/arm/mach-omap2/omap_hwmod_33xx_data.c | 5 +++-- 2 files changed, 6 insertions(+), 3 deletions(-) diff --git a/arch/arm/boot/dts/am33xx-l4.dtsi b/arch/arm/boot/dts/am33xx-l4.dtsi index 1515f4f91499..3287cf695b5a 100644 --- a/arch/arm/boot/dts/am33xx-l4.dtsi +++ b/arch/arm/boot/dts/am33xx-l4.dtsi @@ -2038,7 +2038,9 @@ reg = <0xe000 0x4>, <0xe054 0x4>; reg-names = "rev", "sysc"; - ti,sysc-midle ; + ti,sysc-midle = , + , + ; ti,sysc-sidle = , , ; diff --git a/arch/arm/mach-omap2/omap_hwmod_33xx_data.c b/arch/arm/mach-omap2/omap_hwmod_33xx_data.c index c965af275e34..81d9912f17c8 100644 --- a/arch/arm/mach-omap2/omap_hwmod_33xx_data.c +++ b/arch/arm/mach-omap2/omap_hwmod_33xx_data.c @@ -231,8 +231,9 @@ static struct omap_hwmod am33xx_control_hwmod = { static struct omap_hwmod_class_sysconfig lcdc_sysc = { .rev_offs = 0x0, .sysc_offs = 0x54, - .sysc_flags = (SYSC_HAS_SIDLEMODE | SYSC_HAS_MIDLEMODE), - .idlemodes = (SIDLE_FORCE | SIDLE_NO | SIDLE_SMART), + .sysc_flags = SYSC_HAS_SIDLEMODE | SYSC_HAS_MIDLEMODE, + .idlemodes = SIDLE_FORCE | SIDLE_NO | SIDLE_SMART | + MSTANDBY_FORCE | MSTANDBY_NO | MSTANDBY_SMART, .sysc_fields = &omap_hwmod_sysc_type2, }; -- cgit v1.2.3 From cf395f7ddb9ebc6b2d28d83b53d18aa4e7c19701 Mon Sep 17 00:00:00 2001 From: Tony Lindgren Date: Tue, 24 Sep 2019 16:19:00 -0700 Subject: ARM: OMAP2+: Fix warnings with broken omap2_set_init_voltage() MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit This code is currently unable to find the dts opp tables as ti-cpufreq needs to set them up first based on speed binning. We stopped initializing the opp tables with platform code years ago for device tree based booting with commit 92d51856d740 ("ARM: OMAP3+: do not register non-dt OPP tables for device tree boot"), and all of mach-omap2 is now booting using device tree. We currently get the following errors on init: omap2_set_init_voltage: unable to find boot up OPP for vdd_mpu omap2_set_init_voltage: unable to set vdd_mpu omap2_set_init_voltage: unable to find boot up OPP for vdd_core omap2_set_init_voltage: unable to set vdd_core omap2_set_init_voltage: unable to find boot up OPP for vdd_iva omap2_set_init_voltage: unable to set vdd_iva Let's just drop the unused code. Nowadays ti-cpufreq should be used to to initialize things properly. Cc: Adam Ford Cc: André Roth Cc: "H. Nikolaus Schaller" Cc: Nishanth Menon Cc: Tero Kristo Tested-by: Adam Ford #logicpd-torpedo-37xx-devkit Signed-off-by: Tony Lindgren --- arch/arm/mach-omap2/pm.c | 100 ----------------------------------------------- 1 file changed, 100 deletions(-) diff --git a/arch/arm/mach-omap2/pm.c b/arch/arm/mach-omap2/pm.c index 1fde1bf53fb6..7ac9af56762d 100644 --- a/arch/arm/mach-omap2/pm.c +++ b/arch/arm/mach-omap2/pm.c @@ -74,83 +74,6 @@ int omap_pm_clkdms_setup(struct clockdomain *clkdm, void *unused) return 0; } -/* - * This API is to be called during init to set the various voltage - * domains to the voltage as per the opp table. Typically we boot up - * at the nominal voltage. So this function finds out the rate of - * the clock associated with the voltage domain, finds out the correct - * opp entry and sets the voltage domain to the voltage specified - * in the opp entry - */ -static int __init omap2_set_init_voltage(char *vdd_name, char *clk_name, - const char *oh_name) -{ - struct voltagedomain *voltdm; - struct clk *clk; - struct dev_pm_opp *opp; - unsigned long freq, bootup_volt; - struct device *dev; - - if (!vdd_name || !clk_name || !oh_name) { - pr_err("%s: invalid parameters\n", __func__); - goto exit; - } - - if (!strncmp(oh_name, "mpu", 3)) - /* - * All current OMAPs share voltage rail and clock - * source, so CPU0 is used to represent the MPU-SS. - */ - dev = get_cpu_device(0); - else - dev = omap_device_get_by_hwmod_name(oh_name); - - if (IS_ERR(dev)) { - pr_err("%s: Unable to get dev pointer for hwmod %s\n", - __func__, oh_name); - goto exit; - } - - voltdm = voltdm_lookup(vdd_name); - if (!voltdm) { - pr_err("%s: unable to get vdd pointer for vdd_%s\n", - __func__, vdd_name); - goto exit; - } - - clk = clk_get(NULL, clk_name); - if (IS_ERR(clk)) { - pr_err("%s: unable to get clk %s\n", __func__, clk_name); - goto exit; - } - - freq = clk_get_rate(clk); - clk_put(clk); - - opp = dev_pm_opp_find_freq_ceil(dev, &freq); - if (IS_ERR(opp)) { - pr_err("%s: unable to find boot up OPP for vdd_%s\n", - __func__, vdd_name); - goto exit; - } - - bootup_volt = dev_pm_opp_get_voltage(opp); - dev_pm_opp_put(opp); - - if (!bootup_volt) { - pr_err("%s: unable to find voltage corresponding to the bootup OPP for vdd_%s\n", - __func__, vdd_name); - goto exit; - } - - voltdm_scale(voltdm, bootup_volt); - return 0; - -exit: - pr_err("%s: unable to set vdd_%s\n", __func__, vdd_name); - return -EINVAL; -} - #ifdef CONFIG_SUSPEND static int omap_pm_enter(suspend_state_t suspend_state) { @@ -208,25 +131,6 @@ void omap_common_suspend_init(void *pm_suspend) } #endif /* CONFIG_SUSPEND */ -static void __init omap3_init_voltages(void) -{ - if (!soc_is_omap34xx()) - return; - - omap2_set_init_voltage("mpu_iva", "dpll1_ck", "mpu"); - omap2_set_init_voltage("core", "l3_ick", "l3_main"); -} - -static void __init omap4_init_voltages(void) -{ - if (!soc_is_omap44xx()) - return; - - omap2_set_init_voltage("mpu", "dpll_mpu_ck", "mpu"); - omap2_set_init_voltage("core", "l3_div_ck", "l3_main_1"); - omap2_set_init_voltage("iva", "dpll_iva_m5x2_ck", "iva"); -} - int __maybe_unused omap_pm_nop_init(void) { return 0; @@ -246,10 +150,6 @@ int __init omap2_common_pm_late_init(void) omap4_twl_init(); omap_voltage_late_init(); - /* Initialize the voltages */ - omap3_init_voltages(); - omap4_init_voltages(); - /* Smartreflex device init */ omap_devinit_smartreflex(); -- cgit v1.2.3 From f9d4eae25d93a76f8ed52b1519cc27c5b3cb1dcb Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Wed, 25 Sep 2019 14:08:45 +0100 Subject: drm/i915/execlists: Simplify gen12_csb_parse Having decided that we only care about the promotion predicate, we can simplify gen12_csb_parse to simply check whether we need to jump to a new queue. Signed-off-by: Chris Wilson Cc: Mika Kuoppala Cc: Daniele Ceraolo Spurio Reviewed-by: Andi Shyti Link: https://patchwork.freedesktop.org/patch/msgid/20190925130845.17952-1-chris@chris-wilson.co.uk --- drivers/gpu/drm/i915/gt/intel_lrc.c | 8 +++----- 1 file changed, 3 insertions(+), 5 deletions(-) diff --git a/drivers/gpu/drm/i915/gt/intel_lrc.c b/drivers/gpu/drm/i915/gt/intel_lrc.c index caac091b3eb9..ab725a6ca0ac 100644 --- a/drivers/gpu/drm/i915/gt/intel_lrc.c +++ b/drivers/gpu/drm/i915/gt/intel_lrc.c @@ -1801,9 +1801,6 @@ gen12_csb_parse(const struct intel_engine_execlists *execlists, const u32 *csb) bool ctx_away_valid = GEN12_CSB_CTX_VALID(upper_dw); bool new_queue = lower_dw & GEN12_CTX_STATUS_SWITCHED_TO_NEW_QUEUE; - if (!ctx_away_valid && ctx_to_valid) - return true; - /* * The context switch detail is not guaranteed to be 5 when a preemption * occurs, so we can't just check for that. The check below works for @@ -1811,8 +1808,10 @@ gen12_csb_parse(const struct intel_engine_execlists *execlists, const u32 *csb) * instructions and lite-restore. Preempt-to-idle via the CTRL register * would require some extra handling, but we don't support that. */ - if (new_queue && ctx_away_valid) + if (!ctx_away_valid || new_queue) { + GEM_BUG_ON(!ctx_to_valid); return true; + } /* * switch detail = 5 is covered by the case above and we do not expect a @@ -1820,7 +1819,6 @@ gen12_csb_parse(const struct intel_engine_execlists *execlists, const u32 *csb) * use polling mode. */ GEM_BUG_ON(GEN12_CTX_SWITCH_DETAIL(upper_dw)); - return false; } -- cgit v1.2.3 From 1e225a2c7477f114f271403ce48b26dd7bc1da2c Mon Sep 17 00:00:00 2001 From: Lucas De Marchi Date: Tue, 24 Sep 2019 14:00:35 -0700 Subject: drm/i915/tgl: Add initial dkl pll support MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit The disable function can be the same as for MG phy since the same registers are used. The others are different as registers changed, also adding a empty dkl_pll_write() to be implemented later. v2: Setting the right HIP_INDEX_REG bits (José) v3: Masking non-computed registers of mg_pll_tdc_coldst_bias when getting hardware state Sharing mg_pll_enable() with TGL Reviewed-by: Imre Deak Acked-by: Lucas De Marchi Signed-off-by: José Roberto de Souza Signed-off-by: Lucas De Marchi Link: https://patchwork.freedesktop.org/patch/msgid/20190924210040.142075-1-jose.souza@intel.com --- drivers/gpu/drm/i915/display/intel_dpll_mgr.c | 96 ++++++++++++++++++++++++++- 1 file changed, 94 insertions(+), 2 deletions(-) diff --git a/drivers/gpu/drm/i915/display/intel_dpll_mgr.c b/drivers/gpu/drm/i915/display/intel_dpll_mgr.c index 84e734d44828..1d56027be12c 100644 --- a/drivers/gpu/drm/i915/display/intel_dpll_mgr.c +++ b/drivers/gpu/drm/i915/display/intel_dpll_mgr.c @@ -3086,6 +3086,78 @@ out: return ret; } +static bool dkl_pll_get_hw_state(struct drm_i915_private *dev_priv, + struct intel_shared_dpll *pll, + struct intel_dpll_hw_state *hw_state) +{ + const enum intel_dpll_id id = pll->info->id; + enum tc_port tc_port = icl_pll_id_to_tc_port(id); + intel_wakeref_t wakeref; + bool ret = false; + u32 val; + + wakeref = intel_display_power_get_if_enabled(dev_priv, + POWER_DOMAIN_DISPLAY_CORE); + if (!wakeref) + return false; + + val = I915_READ(MG_PLL_ENABLE(tc_port)); + if (!(val & PLL_ENABLE)) + goto out; + + /* + * All registers read here have the same HIP_INDEX_REG even though + * they are on different building blocks + */ + I915_WRITE(HIP_INDEX_REG(tc_port), HIP_INDEX_VAL(tc_port, 0x2)); + + hw_state->mg_refclkin_ctl = I915_READ(DKL_REFCLKIN_CTL(tc_port)); + hw_state->mg_refclkin_ctl &= MG_REFCLKIN_CTL_OD_2_MUX_MASK; + + hw_state->mg_clktop2_hsclkctl = + I915_READ(DKL_CLKTOP2_HSCLKCTL(tc_port)); + hw_state->mg_clktop2_hsclkctl &= + MG_CLKTOP2_HSCLKCTL_TLINEDRV_CLKSEL_MASK | + MG_CLKTOP2_HSCLKCTL_CORE_INPUTSEL_MASK | + MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_MASK | + MG_CLKTOP2_HSCLKCTL_DSDIV_RATIO_MASK; + + hw_state->mg_clktop2_coreclkctl1 = + I915_READ(DKL_CLKTOP2_CORECLKCTL1(tc_port)); + hw_state->mg_clktop2_coreclkctl1 &= + MG_CLKTOP2_CORECLKCTL1_A_DIVRATIO_MASK; + + hw_state->mg_pll_div0 = I915_READ(DKL_PLL_DIV0(tc_port)); + hw_state->mg_pll_div0 &= (DKL_PLL_DIV0_INTEG_COEFF_MASK | + DKL_PLL_DIV0_PROP_COEFF_MASK | + DKL_PLL_DIV0_FBPREDIV_MASK | + DKL_PLL_DIV0_FBDIV_INT_MASK); + + hw_state->mg_pll_div1 = I915_READ(DKL_PLL_DIV1(tc_port)); + hw_state->mg_pll_div1 &= (DKL_PLL_DIV1_IREF_TRIM_MASK | + DKL_PLL_DIV1_TDC_TARGET_CNT_MASK); + + hw_state->mg_pll_ssc = I915_READ(DKL_PLL_SSC(tc_port)); + hw_state->mg_pll_ssc &= (DKL_PLL_SSC_IREF_NDIV_RATIO_MASK | + DKL_PLL_SSC_STEP_LEN_MASK | + DKL_PLL_SSC_STEP_NUM_MASK | + DKL_PLL_SSC_EN); + + hw_state->mg_pll_bias = I915_READ(DKL_PLL_BIAS(tc_port)); + hw_state->mg_pll_bias &= (DKL_PLL_BIAS_FRAC_EN_H | + DKL_PLL_BIAS_FBDIV_FRAC_MASK); + + hw_state->mg_pll_tdc_coldst_bias = + I915_READ(DKL_PLL_TDC_COLDST_BIAS(tc_port)); + hw_state->mg_pll_tdc_coldst_bias &= (DKL_PLL_TDC_SSC_STEP_SIZE_MASK | + DKL_PLL_TDC_FEED_FWD_GAIN_MASK); + + ret = true; +out: + intel_display_power_put(dev_priv, POWER_DOMAIN_DISPLAY_CORE, wakeref); + return ret; +} + static bool icl_pll_get_hw_state(struct drm_i915_private *dev_priv, struct intel_shared_dpll *pll, struct intel_dpll_hw_state *hw_state, @@ -3220,6 +3292,12 @@ static void icl_mg_pll_write(struct drm_i915_private *dev_priv, POSTING_READ(MG_PLL_TDC_COLDST_BIAS(tc_port)); } +static void dkl_pll_write(struct drm_i915_private *dev_priv, + struct intel_shared_dpll *pll) +{ + /* TODO */ +} + static void icl_pll_power_enable(struct drm_i915_private *dev_priv, struct intel_shared_dpll *pll, i915_reg_t enable_reg) @@ -3312,7 +3390,10 @@ static void mg_pll_enable(struct drm_i915_private *dev_priv, icl_pll_power_enable(dev_priv, pll, enable_reg); - icl_mg_pll_write(dev_priv, pll); + if (INTEL_GEN(dev_priv) >= 12) + dkl_pll_write(dev_priv, pll); + else + icl_mg_pll_write(dev_priv, pll); /* * DVFS pre sequence would be here, but in our driver the cdclk code @@ -3467,11 +3548,22 @@ static const struct intel_dpll_mgr ehl_pll_mgr = { .dump_hw_state = icl_dump_hw_state, }; +static const struct intel_shared_dpll_funcs dkl_pll_funcs = { + .enable = mg_pll_enable, + .disable = mg_pll_disable, + .get_hw_state = dkl_pll_get_hw_state, +}; + static const struct dpll_info tgl_plls[] = { { "DPLL 0", &combo_pll_funcs, DPLL_ID_ICL_DPLL0, 0 }, { "DPLL 1", &combo_pll_funcs, DPLL_ID_ICL_DPLL1, 0 }, { "TBT PLL", &tbt_pll_funcs, DPLL_ID_ICL_TBTPLL, 0 }, - /* TODO: Add typeC plls */ + { "TC PLL 1", &dkl_pll_funcs, DPLL_ID_ICL_MGPLL1, 0 }, + { "TC PLL 2", &dkl_pll_funcs, DPLL_ID_ICL_MGPLL2, 0 }, + { "TC PLL 3", &dkl_pll_funcs, DPLL_ID_ICL_MGPLL3, 0 }, + { "TC PLL 4", &dkl_pll_funcs, DPLL_ID_ICL_MGPLL4, 0 }, + { "TC PLL 5", &dkl_pll_funcs, DPLL_ID_TGL_MGPLL5, 0 }, + { "TC PLL 6", &dkl_pll_funcs, DPLL_ID_TGL_MGPLL6, 0 }, { }, }; -- cgit v1.2.3 From e87b9b05104f5dc2d1529d2b8fb2574e332d43e5 Mon Sep 17 00:00:00 2001 From: Vandita Kulkarni Date: Tue, 24 Sep 2019 14:00:36 -0700 Subject: drm/i915/tgl: Add support for dkl pll write MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Add a new function to write to dkl phy pll registers. As per the bspec all the registers are read modify write. Reviewed-by: Lucas De Marchi Signed-off-by: Vandita Kulkarni Signed-off-by: José Roberto de Souza Signed-off-by: Lucas De Marchi Link: https://patchwork.freedesktop.org/patch/msgid/20190924210040.142075-2-jose.souza@intel.com --- drivers/gpu/drm/i915/display/intel_dpll_mgr.c | 65 ++++++++++++++++++++++++++- 1 file changed, 64 insertions(+), 1 deletion(-) diff --git a/drivers/gpu/drm/i915/display/intel_dpll_mgr.c b/drivers/gpu/drm/i915/display/intel_dpll_mgr.c index 1d56027be12c..114116cdbf49 100644 --- a/drivers/gpu/drm/i915/display/intel_dpll_mgr.c +++ b/drivers/gpu/drm/i915/display/intel_dpll_mgr.c @@ -3295,7 +3295,70 @@ static void icl_mg_pll_write(struct drm_i915_private *dev_priv, static void dkl_pll_write(struct drm_i915_private *dev_priv, struct intel_shared_dpll *pll) { - /* TODO */ + struct intel_dpll_hw_state *hw_state = &pll->state.hw_state; + enum tc_port tc_port = icl_pll_id_to_tc_port(pll->info->id); + u32 val; + + /* + * All registers programmed here have the same HIP_INDEX_REG even + * though on different building block + */ + I915_WRITE(HIP_INDEX_REG(tc_port), HIP_INDEX_VAL(tc_port, 0x2)); + + /* All the registers are RMW */ + val = I915_READ(DKL_REFCLKIN_CTL(tc_port)); + val &= ~MG_REFCLKIN_CTL_OD_2_MUX_MASK; + val |= hw_state->mg_refclkin_ctl; + I915_WRITE(DKL_REFCLKIN_CTL(tc_port), val); + + val = I915_READ(DKL_CLKTOP2_CORECLKCTL1(tc_port)); + val &= ~MG_CLKTOP2_CORECLKCTL1_A_DIVRATIO_MASK; + val |= hw_state->mg_clktop2_coreclkctl1; + I915_WRITE(DKL_CLKTOP2_CORECLKCTL1(tc_port), val); + + val = I915_READ(DKL_CLKTOP2_HSCLKCTL(tc_port)); + val &= ~(MG_CLKTOP2_HSCLKCTL_TLINEDRV_CLKSEL_MASK | + MG_CLKTOP2_HSCLKCTL_CORE_INPUTSEL_MASK | + MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_MASK | + MG_CLKTOP2_HSCLKCTL_DSDIV_RATIO_MASK); + val |= hw_state->mg_clktop2_hsclkctl; + I915_WRITE(DKL_CLKTOP2_HSCLKCTL(tc_port), val); + + val = I915_READ(DKL_PLL_DIV0(tc_port)); + val &= ~(DKL_PLL_DIV0_INTEG_COEFF_MASK | + DKL_PLL_DIV0_PROP_COEFF_MASK | + DKL_PLL_DIV0_FBPREDIV_MASK | + DKL_PLL_DIV0_FBDIV_INT_MASK); + val |= hw_state->mg_pll_div0; + I915_WRITE(DKL_PLL_DIV0(tc_port), val); + + val = I915_READ(DKL_PLL_DIV1(tc_port)); + val &= ~(DKL_PLL_DIV1_IREF_TRIM_MASK | + DKL_PLL_DIV1_TDC_TARGET_CNT_MASK); + val |= hw_state->mg_pll_div1; + I915_WRITE(DKL_PLL_DIV1(tc_port), val); + + val = I915_READ(DKL_PLL_SSC(tc_port)); + val &= ~(DKL_PLL_SSC_IREF_NDIV_RATIO_MASK | + DKL_PLL_SSC_STEP_LEN_MASK | + DKL_PLL_SSC_STEP_NUM_MASK | + DKL_PLL_SSC_EN); + val |= hw_state->mg_pll_ssc; + I915_WRITE(DKL_PLL_SSC(tc_port), val); + + val = I915_READ(DKL_PLL_BIAS(tc_port)); + val &= ~(DKL_PLL_BIAS_FRAC_EN_H | + DKL_PLL_BIAS_FBDIV_FRAC_MASK); + val |= hw_state->mg_pll_bias; + I915_WRITE(DKL_PLL_BIAS(tc_port), val); + + val = I915_READ(DKL_PLL_TDC_COLDST_BIAS(tc_port)); + val &= ~(DKL_PLL_TDC_SSC_STEP_SIZE_MASK | + DKL_PLL_TDC_FEED_FWD_GAIN_MASK); + val |= hw_state->mg_pll_tdc_coldst_bias; + I915_WRITE(DKL_PLL_TDC_COLDST_BIAS(tc_port), val); + + POSTING_READ(DKL_PLL_TDC_COLDST_BIAS(tc_port)); } static void icl_pll_power_enable(struct drm_i915_private *dev_priv, -- cgit v1.2.3 From 1a5c6aa43a3ae95bb4756c8a906a32519a6d291d Mon Sep 17 00:00:00 2001 From: Lucas De Marchi Date: Tue, 24 Sep 2019 14:00:38 -0700 Subject: drm/i915/tgl: re-indent code to prepare for DKL changes MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit The final save operation into pll_state of the calculations done will be different for DKL PHY. Prepare for that by reindenting code so it's easier to check for correctness. This one has no change in behavior. Reviewed-by: Matt Roper Signed-off-by: Lucas De Marchi Signed-off-by: José Roberto de Souza Link: https://patchwork.freedesktop.org/patch/msgid/20190924210040.142075-4-jose.souza@intel.com --- drivers/gpu/drm/i915/display/intel_dpll_mgr.c | 119 ++++++++++++++------------ 1 file changed, 66 insertions(+), 53 deletions(-) diff --git a/drivers/gpu/drm/i915/display/intel_dpll_mgr.c b/drivers/gpu/drm/i915/display/intel_dpll_mgr.c index 114116cdbf49..ce2dee38c956 100644 --- a/drivers/gpu/drm/i915/display/intel_dpll_mgr.c +++ b/drivers/gpu/drm/i915/display/intel_dpll_mgr.c @@ -2786,60 +2786,73 @@ static bool icl_calc_mg_pll_state(struct intel_crtc_state *crtc_state, } ssc_steplog = 4; - pll_state->mg_pll_div0 = (m2div_rem > 0 ? MG_PLL_DIV0_FRACNEN_H : 0) | - MG_PLL_DIV0_FBDIV_FRAC(m2div_frac) | - MG_PLL_DIV0_FBDIV_INT(m2div_int); - - pll_state->mg_pll_div1 = MG_PLL_DIV1_IREF_NDIVRATIO(iref_ndiv) | - MG_PLL_DIV1_DITHER_DIV_2 | - MG_PLL_DIV1_NDIVRATIO(1) | - MG_PLL_DIV1_FBPREDIV(m1div); - - pll_state->mg_pll_lf = MG_PLL_LF_TDCTARGETCNT(tdc_targetcnt) | - MG_PLL_LF_AFCCNTSEL_512 | - MG_PLL_LF_GAINCTRL(1) | - MG_PLL_LF_INT_COEFF(int_coeff) | - MG_PLL_LF_PROP_COEFF(prop_coeff); - - pll_state->mg_pll_frac_lock = MG_PLL_FRAC_LOCK_TRUELOCK_CRIT_32 | - MG_PLL_FRAC_LOCK_EARLYLOCK_CRIT_32 | - MG_PLL_FRAC_LOCK_LOCKTHRESH(10) | - MG_PLL_FRAC_LOCK_DCODITHEREN | - MG_PLL_FRAC_LOCK_FEEDFWRDGAIN(feedfwgain); - if (use_ssc || m2div_rem > 0) - pll_state->mg_pll_frac_lock |= MG_PLL_FRAC_LOCK_FEEDFWRDCAL_EN; - - pll_state->mg_pll_ssc = (use_ssc ? MG_PLL_SSC_EN : 0) | - MG_PLL_SSC_TYPE(2) | - MG_PLL_SSC_STEPLENGTH(ssc_steplen) | - MG_PLL_SSC_STEPNUM(ssc_steplog) | - MG_PLL_SSC_FLLEN | - MG_PLL_SSC_STEPSIZE(ssc_stepsize); - - pll_state->mg_pll_tdc_coldst_bias = MG_PLL_TDC_COLDST_COLDSTART | - MG_PLL_TDC_COLDST_IREFINT_EN | - MG_PLL_TDC_COLDST_REFBIAS_START_PULSE_W(iref_pulse_w) | - MG_PLL_TDC_TDCOVCCORR_EN | - MG_PLL_TDC_TDCSEL(3); - - pll_state->mg_pll_bias = MG_PLL_BIAS_BIAS_GB_SEL(3) | - MG_PLL_BIAS_INIT_DCOAMP(0x3F) | - MG_PLL_BIAS_BIAS_BONUS(10) | - MG_PLL_BIAS_BIASCAL_EN | - MG_PLL_BIAS_CTRIM(12) | - MG_PLL_BIAS_VREF_RDAC(4) | - MG_PLL_BIAS_IREFTRIM(iref_trim); - - if (refclk_khz == 38400) { - pll_state->mg_pll_tdc_coldst_bias_mask = MG_PLL_TDC_COLDST_COLDSTART; - pll_state->mg_pll_bias_mask = 0; - } else { - pll_state->mg_pll_tdc_coldst_bias_mask = -1U; - pll_state->mg_pll_bias_mask = -1U; - } + /* write pll_state calculations */ + { + pll_state->mg_pll_div0 = + (m2div_rem > 0 ? MG_PLL_DIV0_FRACNEN_H : 0) | + MG_PLL_DIV0_FBDIV_FRAC(m2div_frac) | + MG_PLL_DIV0_FBDIV_INT(m2div_int); + + pll_state->mg_pll_div1 = + MG_PLL_DIV1_IREF_NDIVRATIO(iref_ndiv) | + MG_PLL_DIV1_DITHER_DIV_2 | + MG_PLL_DIV1_NDIVRATIO(1) | + MG_PLL_DIV1_FBPREDIV(m1div); + + pll_state->mg_pll_lf = + MG_PLL_LF_TDCTARGETCNT(tdc_targetcnt) | + MG_PLL_LF_AFCCNTSEL_512 | + MG_PLL_LF_GAINCTRL(1) | + MG_PLL_LF_INT_COEFF(int_coeff) | + MG_PLL_LF_PROP_COEFF(prop_coeff); + + pll_state->mg_pll_frac_lock = + MG_PLL_FRAC_LOCK_TRUELOCK_CRIT_32 | + MG_PLL_FRAC_LOCK_EARLYLOCK_CRIT_32 | + MG_PLL_FRAC_LOCK_LOCKTHRESH(10) | + MG_PLL_FRAC_LOCK_DCODITHEREN | + MG_PLL_FRAC_LOCK_FEEDFWRDGAIN(feedfwgain); + if (use_ssc || m2div_rem > 0) + pll_state->mg_pll_frac_lock |= + MG_PLL_FRAC_LOCK_FEEDFWRDCAL_EN; + + pll_state->mg_pll_ssc = + (use_ssc ? MG_PLL_SSC_EN : 0) | + MG_PLL_SSC_TYPE(2) | + MG_PLL_SSC_STEPLENGTH(ssc_steplen) | + MG_PLL_SSC_STEPNUM(ssc_steplog) | + MG_PLL_SSC_FLLEN | + MG_PLL_SSC_STEPSIZE(ssc_stepsize); + + pll_state->mg_pll_tdc_coldst_bias = + MG_PLL_TDC_COLDST_COLDSTART | + MG_PLL_TDC_COLDST_IREFINT_EN | + MG_PLL_TDC_COLDST_REFBIAS_START_PULSE_W(iref_pulse_w) | + MG_PLL_TDC_TDCOVCCORR_EN | + MG_PLL_TDC_TDCSEL(3); + + pll_state->mg_pll_bias = + MG_PLL_BIAS_BIAS_GB_SEL(3) | + MG_PLL_BIAS_INIT_DCOAMP(0x3F) | + MG_PLL_BIAS_BIAS_BONUS(10) | + MG_PLL_BIAS_BIASCAL_EN | + MG_PLL_BIAS_CTRIM(12) | + MG_PLL_BIAS_VREF_RDAC(4) | + MG_PLL_BIAS_IREFTRIM(iref_trim); + + if (refclk_khz == 38400) { + pll_state->mg_pll_tdc_coldst_bias_mask = + MG_PLL_TDC_COLDST_COLDSTART; + pll_state->mg_pll_bias_mask = 0; + } else { + pll_state->mg_pll_tdc_coldst_bias_mask = -1U; + pll_state->mg_pll_bias_mask = -1U; + } - pll_state->mg_pll_tdc_coldst_bias &= pll_state->mg_pll_tdc_coldst_bias_mask; - pll_state->mg_pll_bias &= pll_state->mg_pll_bias_mask; + pll_state->mg_pll_tdc_coldst_bias &= + pll_state->mg_pll_tdc_coldst_bias_mask; + pll_state->mg_pll_bias &= pll_state->mg_pll_bias_mask; + } return true; } -- cgit v1.2.3 From ee7de6ad382d8d0fcf5bdb7443c7bc1f55e9d0f5 Mon Sep 17 00:00:00 2001 From: José Roberto de Souza Date: Tue, 24 Sep 2019 14:00:39 -0700 Subject: drm/i915/tgl: Add dkl phy pll calculations MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Extending ICL mg calculations to also support dkl calculations. v3: Fixing iref_trim calculation for 38400 refclock BSpec: 49204 Reviewed-by: Lucas De Marchi Signed-off-by: Vandita Kulkarni Signed-off-by: José Roberto de Souza Link: https://patchwork.freedesktop.org/patch/msgid/20190924210040.142075-5-jose.souza@intel.com --- drivers/gpu/drm/i915/display/intel_ddi.c | 29 ++++++++++++++--- drivers/gpu/drm/i915/display/intel_dpll_mgr.c | 45 ++++++++++++++++++++++----- 2 files changed, 62 insertions(+), 12 deletions(-) diff --git a/drivers/gpu/drm/i915/display/intel_ddi.c b/drivers/gpu/drm/i915/display/intel_ddi.c index 14fe987888a6..c06353ae0d12 100644 --- a/drivers/gpu/drm/i915/display/intel_ddi.c +++ b/drivers/gpu/drm/i915/display/intel_ddi.c @@ -1413,11 +1413,30 @@ static int icl_calc_mg_pll_link(struct drm_i915_private *dev_priv, ref_clock = dev_priv->cdclk.hw.ref; - m1 = pll_state->mg_pll_div1 & MG_PLL_DIV1_FBPREDIV_MASK; - m2_int = pll_state->mg_pll_div0 & MG_PLL_DIV0_FBDIV_INT_MASK; - m2_frac = (pll_state->mg_pll_div0 & MG_PLL_DIV0_FRACNEN_H) ? - (pll_state->mg_pll_div0 & MG_PLL_DIV0_FBDIV_FRAC_MASK) >> - MG_PLL_DIV0_FBDIV_FRAC_SHIFT : 0; + if (INTEL_GEN(dev_priv) >= 12) { + m1 = pll_state->mg_pll_div0 & DKL_PLL_DIV0_FBPREDIV_MASK; + m1 = m1 >> DKL_PLL_DIV0_FBPREDIV_SHIFT; + m2_int = pll_state->mg_pll_div0 & DKL_PLL_DIV0_FBDIV_INT_MASK; + + if (pll_state->mg_pll_bias & DKL_PLL_BIAS_FRAC_EN_H) { + m2_frac = pll_state->mg_pll_bias & + DKL_PLL_BIAS_FBDIV_FRAC_MASK; + m2_frac = m2_frac >> DKL_PLL_BIAS_FBDIV_SHIFT; + } else { + m2_frac = 0; + } + } else { + m1 = pll_state->mg_pll_div1 & MG_PLL_DIV1_FBPREDIV_MASK; + m2_int = pll_state->mg_pll_div0 & MG_PLL_DIV0_FBDIV_INT_MASK; + + if (pll_state->mg_pll_div0 & MG_PLL_DIV0_FRACNEN_H) { + m2_frac = pll_state->mg_pll_div0 & + MG_PLL_DIV0_FBDIV_FRAC_MASK; + m2_frac = m2_frac >> MG_PLL_DIV0_FBDIV_FRAC_SHIFT; + } else { + m2_frac = 0; + } + } switch (pll_state->mg_clktop2_hsclkctl & MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_MASK) { diff --git a/drivers/gpu/drm/i915/display/intel_dpll_mgr.c b/drivers/gpu/drm/i915/display/intel_dpll_mgr.c index ce2dee38c956..69abafa45ce9 100644 --- a/drivers/gpu/drm/i915/display/intel_dpll_mgr.c +++ b/drivers/gpu/drm/i915/display/intel_dpll_mgr.c @@ -2607,7 +2607,8 @@ enum intel_dpll_id icl_tc_port_to_pll_id(enum tc_port tc_port) static bool icl_mg_pll_find_divisors(int clock_khz, bool is_dp, bool use_ssc, u32 *target_dco_khz, - struct intel_dpll_hw_state *state) + struct intel_dpll_hw_state *state, + bool is_dkl) { u32 dco_min_freq, dco_max_freq; int div1_vals[] = {7, 5, 3, 2}; @@ -2629,8 +2630,13 @@ static bool icl_mg_pll_find_divisors(int clock_khz, bool is_dp, bool use_ssc, continue; if (div2 >= 2) { - a_divratio = is_dp ? 10 : 5; - tlinedrv = 2; + if (is_dkl) { + a_divratio = 5; + tlinedrv = 1; + } else { + a_divratio = is_dp ? 10 : 5; + tlinedrv = 2; + } } else { a_divratio = 5; tlinedrv = 0; @@ -2693,11 +2699,12 @@ static bool icl_calc_mg_pll_state(struct intel_crtc_state *crtc_state, u64 tmp; bool use_ssc = false; bool is_dp = !intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI); + bool is_dkl = INTEL_GEN(dev_priv) >= 12; memset(pll_state, 0, sizeof(*pll_state)); if (!icl_mg_pll_find_divisors(clock, is_dp, use_ssc, &dco_khz, - pll_state)) { + pll_state, is_dkl)) { DRM_DEBUG_KMS("Failed to find divisors for clock %d\n", clock); return false; } @@ -2705,8 +2712,11 @@ static bool icl_calc_mg_pll_state(struct intel_crtc_state *crtc_state, m1div = 2; m2div_int = dco_khz / (refclk_khz * m1div); if (m2div_int > 255) { - m1div = 4; - m2div_int = dco_khz / (refclk_khz * m1div); + if (!is_dkl) { + m1div = 4; + m2div_int = dco_khz / (refclk_khz * m1div); + } + if (m2div_int > 255) { DRM_DEBUG_KMS("Failed to find mdiv for clock %d\n", clock); @@ -2787,7 +2797,28 @@ static bool icl_calc_mg_pll_state(struct intel_crtc_state *crtc_state, ssc_steplog = 4; /* write pll_state calculations */ - { + if (is_dkl) { + pll_state->mg_pll_div0 = DKL_PLL_DIV0_INTEG_COEFF(int_coeff) | + DKL_PLL_DIV0_PROP_COEFF(prop_coeff) | + DKL_PLL_DIV0_FBPREDIV(m1div) | + DKL_PLL_DIV0_FBDIV_INT(m2div_int); + + pll_state->mg_pll_div1 = DKL_PLL_DIV1_IREF_TRIM(iref_trim) | + DKL_PLL_DIV1_TDC_TARGET_CNT(tdc_targetcnt); + + pll_state->mg_pll_ssc = DKL_PLL_SSC_IREF_NDIV_RATIO(iref_ndiv) | + DKL_PLL_SSC_STEP_LEN(ssc_steplen) | + DKL_PLL_SSC_STEP_NUM(ssc_steplog) | + (use_ssc ? DKL_PLL_SSC_EN : 0); + + pll_state->mg_pll_bias = (m2div_frac ? DKL_PLL_BIAS_FRAC_EN_H : 0) | + DKL_PLL_BIAS_FBDIV_FRAC(m2div_frac); + + pll_state->mg_pll_tdc_coldst_bias = + DKL_PLL_TDC_SSC_STEP_SIZE(ssc_stepsize) | + DKL_PLL_TDC_FEED_FWD_GAIN(feedfwgain); + + } else { pll_state->mg_pll_div0 = (m2div_rem > 0 ? MG_PLL_DIV0_FRACNEN_H : 0) | MG_PLL_DIV0_FBDIV_FRAC(m2div_frac) | -- cgit v1.2.3 From 6677c3b167b3ba9fa183015f6244e2db28d99f29 Mon Sep 17 00:00:00 2001 From: José Roberto de Souza Date: Tue, 24 Sep 2019 14:00:40 -0700 Subject: drm/i915/tgl: Return the mg/dkl pll as DDI clock for new TC ports MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit TGL added 2 more TC ports that currently are not being handled by icl_pll_to_ddi_clk_sel(), so adding those. Reviewed-by: Lucas De Marchi Cc: Lucas De Marchi Cc: Imre Deak Reported-by: Imre Deak Signed-off-by: José Roberto de Souza Link: https://patchwork.freedesktop.org/patch/msgid/20190924210040.142075-6-jose.souza@intel.com --- drivers/gpu/drm/i915/display/intel_ddi.c | 2 ++ 1 file changed, 2 insertions(+) diff --git a/drivers/gpu/drm/i915/display/intel_ddi.c b/drivers/gpu/drm/i915/display/intel_ddi.c index c06353ae0d12..aa470c70a198 100644 --- a/drivers/gpu/drm/i915/display/intel_ddi.c +++ b/drivers/gpu/drm/i915/display/intel_ddi.c @@ -1049,6 +1049,8 @@ static u32 icl_pll_to_ddi_clk_sel(struct intel_encoder *encoder, case DPLL_ID_ICL_MGPLL2: case DPLL_ID_ICL_MGPLL3: case DPLL_ID_ICL_MGPLL4: + case DPLL_ID_TGL_MGPLL5: + case DPLL_ID_TGL_MGPLL6: return DDI_CLK_SEL_MG; } } -- cgit v1.2.3 From ddef29578a81a1d4d8f2b26a7adbfe21407ee3ea Mon Sep 17 00:00:00 2001 From: "Wunderlich, Mark" Date: Wed, 18 Sep 2019 23:36:37 +0000 Subject: nvme-tcp: fix wrong stop condition in io_work Allow the do/while statement to continue if current time is not after the proposed time 'deadline'. Intent is to allow loop to proceed for a specific time period. Currently the loop, as coded, will exit after first pass. Signed-off-by: Mark Wunderlich Reviewed-by: Sagi Grimberg Signed-off-by: Sagi Grimberg --- drivers/nvme/host/tcp.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/nvme/host/tcp.c b/drivers/nvme/host/tcp.c index 4ffd5957637a..385a5212c10f 100644 --- a/drivers/nvme/host/tcp.c +++ b/drivers/nvme/host/tcp.c @@ -1042,7 +1042,7 @@ static void nvme_tcp_io_work(struct work_struct *w) { struct nvme_tcp_queue *queue = container_of(w, struct nvme_tcp_queue, io_work); - unsigned long start = jiffies + msecs_to_jiffies(1); + unsigned long deadline = jiffies + msecs_to_jiffies(1); do { bool pending = false; @@ -1067,7 +1067,7 @@ static void nvme_tcp_io_work(struct work_struct *w) if (!pending) return; - } while (time_after(jiffies, start)); /* quota is exhausted */ + } while (!time_after(jiffies, deadline)); /* quota is exhausted */ queue_work_on(queue->io_cpu, nvme_tcp_wq, &queue->io_work); } -- cgit v1.2.3 From 7cbb5c6f9aa7cfda7175d82a9cf77a92965b0c5e Mon Sep 17 00:00:00 2001 From: Mario Limonciello Date: Wed, 18 Sep 2019 13:15:55 -0500 Subject: nvme-pci: Save PCI state before putting drive into deepest state The action of saving the PCI state will cause numerous PCI configuration space reads which depending upon the vendor implementation may cause the drive to exit the deepest NVMe state. In these cases ASPM will typically resolve the PCIe link state and APST may resolve the NVMe power state. However it has also been observed that this register access after quiesced will cause PC10 failure on some device combinations. To resolve this, move the PCI state saving to before SetFeatures has been called. This has been proven to resolve the issue across a 5000 sample test on previously failing disk/system combinations. Signed-off-by: Mario Limonciello Reviewed-by: Keith Busch Signed-off-by: Sagi Grimberg --- drivers/nvme/host/pci.c | 17 ++++++++++------- 1 file changed, 10 insertions(+), 7 deletions(-) diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c index 6b4d7b064b38..d3f63f0c212a 100644 --- a/drivers/nvme/host/pci.c +++ b/drivers/nvme/host/pci.c @@ -2944,11 +2944,21 @@ static int nvme_suspend(struct device *dev) if (ret < 0) goto unfreeze; + /* + * A saved state prevents pci pm from generically controlling the + * device's power. If we're using protocol specific settings, we don't + * want pci interfering. + */ + pci_save_state(pdev); + ret = nvme_set_power_state(ctrl, ctrl->npss); if (ret < 0) goto unfreeze; if (ret) { + /* discard the saved state */ + pci_load_saved_state(pdev, NULL); + /* * Clearing npss forces a controller reset on resume. The * correct value will be resdicovered then. @@ -2956,14 +2966,7 @@ static int nvme_suspend(struct device *dev) nvme_dev_disable(ndev, true); ctrl->npss = 0; ret = 0; - goto unfreeze; } - /* - * A saved state prevents pci pm from generically controlling the - * device's power. If we're using protocol specific settings, we don't - * want pci interfering. - */ - pci_save_state(pdev); unfreeze: nvme_unfreeze(ctrl); return ret; -- cgit v1.2.3 From bc4f6e06a90ea016855fc67212b4d500145f0b8a Mon Sep 17 00:00:00 2001 From: Dan Carpenter Date: Mon, 23 Sep 2019 17:18:36 +0300 Subject: nvme: fix an error code in nvme_init_subsystem() "ret" should be a negative error code here, but it's either success or possibly uninitialized. Fixes: 32fd90c40768 ("nvme: change locking for the per-subsystem controller list") Signed-off-by: Dan Carpenter Reviewed-by: Keith Busch Signed-off-by: Sagi Grimberg --- drivers/nvme/host/core.c | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c index 0c385b1994fe..d3c9df62a9d5 100644 --- a/drivers/nvme/host/core.c +++ b/drivers/nvme/host/core.c @@ -2543,8 +2543,9 @@ static int nvme_init_subsystem(struct nvme_ctrl *ctrl, struct nvme_id_ctrl *id) list_add_tail(&subsys->entry, &nvme_subsystems); } - if (sysfs_create_link(&subsys->dev.kobj, &ctrl->device->kobj, - dev_name(ctrl->device))) { + ret = sysfs_create_link(&subsys->dev.kobj, &ctrl->device->kobj, + dev_name(ctrl->device)); + if (ret) { dev_err(ctrl->device, "failed to create sysfs link from subsystem.\n"); goto out_put_subsystem; -- cgit v1.2.3 From ff13c1b87c97275b82b2af99044b4abf6861b28f Mon Sep 17 00:00:00 2001 From: Max Gurtovoy Date: Sat, 21 Sep 2019 23:58:19 +0300 Subject: nvme-rdma: Fix max_hw_sectors calculation By default, the NVMe/RDMA driver should support max io_size of 1MiB (or upto the maximum supported size by the HCA). Currently, one will see that /sys/class/block//queue/max_hw_sectors_kb is 1020 instead of 1024. A non power of 2 value can cause performance degradation due to unnecessary splitting of IO requests and unoptimized allocation units. The number of pages per MR has been fixed here, so there is no longer any need to reduce max_sectors by 1. Reviewed-by: Sagi Grimberg Signed-off-by: Max Gurtovoy Signed-off-by: Sagi Grimberg --- drivers/nvme/host/rdma.c | 16 +++++++++++----- 1 file changed, 11 insertions(+), 5 deletions(-) diff --git a/drivers/nvme/host/rdma.c b/drivers/nvme/host/rdma.c index dfa07bb9dfeb..9d16dfc29368 100644 --- a/drivers/nvme/host/rdma.c +++ b/drivers/nvme/host/rdma.c @@ -427,7 +427,7 @@ static void nvme_rdma_destroy_queue_ib(struct nvme_rdma_queue *queue) static int nvme_rdma_get_max_fr_pages(struct ib_device *ibdev) { return min_t(u32, NVME_RDMA_MAX_SEGMENTS, - ibdev->attrs.max_fast_reg_page_list_len); + ibdev->attrs.max_fast_reg_page_list_len - 1); } static int nvme_rdma_create_queue_ib(struct nvme_rdma_queue *queue) @@ -437,7 +437,7 @@ static int nvme_rdma_create_queue_ib(struct nvme_rdma_queue *queue) const int cq_factor = send_wr_factor + 1; /* + RECV */ int comp_vector, idx = nvme_rdma_queue_idx(queue); enum ib_poll_context poll_ctx; - int ret; + int ret, pages_per_mr; queue->device = nvme_rdma_find_get_device(queue->cm_id); if (!queue->device) { @@ -479,10 +479,16 @@ static int nvme_rdma_create_queue_ib(struct nvme_rdma_queue *queue) goto out_destroy_qp; } + /* + * Currently we don't use SG_GAPS MR's so if the first entry is + * misaligned we'll end up using two entries for a single data page, + * so one additional entry is required. + */ + pages_per_mr = nvme_rdma_get_max_fr_pages(ibdev) + 1; ret = ib_mr_pool_init(queue->qp, &queue->qp->rdma_mrs, queue->queue_size, IB_MR_TYPE_MEM_REG, - nvme_rdma_get_max_fr_pages(ibdev), 0); + pages_per_mr, 0); if (ret) { dev_err(queue->ctrl->ctrl.device, "failed to initialize MR pool sized %d for QID %d\n", @@ -820,8 +826,8 @@ static int nvme_rdma_configure_admin_queue(struct nvme_rdma_ctrl *ctrl, if (error) goto out_stop_queue; - ctrl->ctrl.max_hw_sectors = - (ctrl->max_fr_pages - 1) << (ilog2(SZ_4K) - 9); + ctrl->ctrl.max_segments = ctrl->max_fr_pages; + ctrl->ctrl.max_hw_sectors = ctrl->max_fr_pages << (ilog2(SZ_4K) - 9); blk_mq_unquiesce_queue(ctrl->ctrl.admin_q); -- cgit v1.2.3 From f03e42c6af60f778a6d1ccfb857db9b2ec835279 Mon Sep 17 00:00:00 2001 From: Gabriel Craciunescu Date: Mon, 23 Sep 2019 20:22:56 +0200 Subject: Added QUIRKs for ADATA XPG SX8200 Pro 512GB Booting with default_ps_max_latency_us >6000 makes the device fail. Also SUBNQN is NULL and gives a warning on each boot/resume. $ nvme id-ctrl /dev/nvme0 | grep ^subnqn subnqn : (null) I use this device with an Acer Nitro 5 (AN515-43-R8BF) Laptop. To be sure is not a Laptop issue only, I tested the device on my server board with the same results. ( with 2x,4x link on the board and 4x link on a PCI-E card ). Signed-off-by: Gabriel Craciunescu Reviewed-by: Sagi Grimberg Signed-off-by: Sagi Grimberg --- drivers/nvme/host/pci.c | 3 +++ 1 file changed, 3 insertions(+) diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c index d3f63f0c212a..7ab07f4dce83 100644 --- a/drivers/nvme/host/pci.c +++ b/drivers/nvme/host/pci.c @@ -3091,6 +3091,9 @@ static const struct pci_device_id nvme_id_table[] = { .driver_data = NVME_QUIRK_LIGHTNVM, }, { PCI_DEVICE(0x10ec, 0x5762), /* ADATA SX6000LNP */ .driver_data = NVME_QUIRK_IGNORE_DEV_SUBNQN, }, + { PCI_DEVICE(0x1cc1, 0x8201), /* ADATA SX8200PNP 512GB */ + .driver_data = NVME_QUIRK_NO_DEEPEST_PS | + NVME_QUIRK_IGNORE_DEV_SUBNQN, }, { PCI_DEVICE_CLASS(PCI_CLASS_STORAGE_EXPRESS, 0xffffff) }, { PCI_DEVICE(PCI_VENDOR_ID_APPLE, 0x2001) }, { PCI_DEVICE(PCI_VENDOR_ID_APPLE, 0x2003) }, -- cgit v1.2.3 From 30f27d57c06e0199a9d3e0775113e13e2ae9078e Mon Sep 17 00:00:00 2001 From: Sagi Grimberg Date: Fri, 13 Sep 2019 10:36:40 -0700 Subject: nvmet-tcp: remove superflous check on request sgl Now that sgl_free is null safe, drop the superflous check. Signed-off-by: Sagi Grimberg --- drivers/nvme/target/tcp.c | 12 ++++-------- 1 file changed, 4 insertions(+), 8 deletions(-) diff --git a/drivers/nvme/target/tcp.c b/drivers/nvme/target/tcp.c index bf4f03474e89..d535080b781f 100644 --- a/drivers/nvme/target/tcp.c +++ b/drivers/nvme/target/tcp.c @@ -348,8 +348,7 @@ static int nvmet_tcp_map_data(struct nvmet_tcp_cmd *cmd) return 0; err: - if (cmd->req.sg_cnt) - sgl_free(cmd->req.sg); + sgl_free(cmd->req.sg); return NVME_SC_INTERNAL; } @@ -554,8 +553,7 @@ static int nvmet_try_send_data(struct nvmet_tcp_cmd *cmd) if (queue->nvme_sq.sqhd_disabled) { kfree(cmd->iov); - if (cmd->req.sg_cnt) - sgl_free(cmd->req.sg); + sgl_free(cmd->req.sg); } return 1; @@ -586,8 +584,7 @@ static int nvmet_try_send_response(struct nvmet_tcp_cmd *cmd, return -EAGAIN; kfree(cmd->iov); - if (cmd->req.sg_cnt) - sgl_free(cmd->req.sg); + sgl_free(cmd->req.sg); cmd->queue->snd_cmd = NULL; nvmet_tcp_put_cmd(cmd); return 1; @@ -1310,8 +1307,7 @@ static void nvmet_tcp_finish_cmd(struct nvmet_tcp_cmd *cmd) nvmet_req_uninit(&cmd->req); nvmet_tcp_unmap_pdu_iovec(cmd); kfree(cmd->iov); - if (cmd->req.sg_cnt) - sgl_free(cmd->req.sg); + sgl_free(cmd->req.sg); } static void nvmet_tcp_uninit_data_in_cmds(struct nvmet_tcp_queue *queue) -- cgit v1.2.3 From 19ea025e1d28c629b369c3532a85b3df478cc5c6 Mon Sep 17 00:00:00 2001 From: Jian-Hong Pan Date: Tue, 24 Sep 2019 17:42:41 +0800 Subject: nvme: Add quirk for Kingston NVME SSD running FW E8FK11.T Kingston NVME SSD with firmware version E8FK11.T has no interrupt after resume with actions related to suspend to idle. This patch applied NVME_QUIRK_SIMPLE_SUSPEND quirk to fix this issue. Fixes: d916b1be94b6 ("nvme-pci: use host managed power state for suspend") Buglink: https://bugzilla.kernel.org/show_bug.cgi?id=204887 Signed-off-by: Jian-Hong Pan Signed-off-by: Sagi Grimberg --- drivers/nvme/host/core.c | 10 ++++++++++ 1 file changed, 10 insertions(+) diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c index d3c9df62a9d5..adff57ea149e 100644 --- a/drivers/nvme/host/core.c +++ b/drivers/nvme/host/core.c @@ -2292,6 +2292,16 @@ static const struct nvme_core_quirk_entry core_quirks[] = { .vid = 0x14a4, .fr = "22301111", .quirks = NVME_QUIRK_SIMPLE_SUSPEND, + }, + { + /* + * This Kingston E8FK11.T firmware version has no interrupt + * after resume with actions related to suspend to idle + * https://bugzilla.kernel.org/show_bug.cgi?id=204887 + */ + .vid = 0x2646, + .fr = "E8FK11.T", + .quirks = NVME_QUIRK_SIMPLE_SUSPEND, } }; -- cgit v1.2.3 From 65e68edce0db433aa0c2b26d7dc14fbbbeb89fbb Mon Sep 17 00:00:00 2001 From: Marta Rybczynska Date: Tue, 24 Sep 2019 15:14:52 +0200 Subject: nvme: allow 64-bit results in passthru commands It is not possible to get 64-bit results from the passthru commands, what prevents from getting for the Capabilities (CAP) property value. As a result, it is not possible to implement IOL's NVMe Conformance test 4.3 Case 1 for Fabrics targets [1] (page 123). This issue has been already discussed [2], but without a solution. This patch solves the problem by adding new ioctls with a new passthru structure, including 64-bit results. The older ioctls stay unchanged. [1] https://www.iol.unh.edu/sites/default/files/testsuites/nvme/UNH-IOL_NVMe_Conformance_Test_Suite_v11.0.pdf [2] http://lists.infradead.org/pipermail/linux-nvme/2018-June/018791.html Signed-off-by: Marta Rybczynska Reviewed-by: Keith Busch Reviewed-by: Christoph Hellwig Signed-off-by: Sagi Grimberg --- drivers/nvme/host/core.c | 108 ++++++++++++++++++++++++++++++++++------ include/uapi/linux/nvme_ioctl.h | 23 +++++++++ 2 files changed, 115 insertions(+), 16 deletions(-) diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c index adff57ea149e..87ed437a46b8 100644 --- a/drivers/nvme/host/core.c +++ b/drivers/nvme/host/core.c @@ -850,7 +850,7 @@ out: static int nvme_submit_user_cmd(struct request_queue *q, struct nvme_command *cmd, void __user *ubuffer, unsigned bufflen, void __user *meta_buffer, unsigned meta_len, - u32 meta_seed, u32 *result, unsigned timeout) + u32 meta_seed, u64 *result, unsigned timeout) { bool write = nvme_is_write(cmd); struct nvme_ns *ns = q->queuedata; @@ -891,7 +891,7 @@ static int nvme_submit_user_cmd(struct request_queue *q, else ret = nvme_req(req)->status; if (result) - *result = le32_to_cpu(nvme_req(req)->result.u32); + *result = le64_to_cpu(nvme_req(req)->result.u64); if (meta && !ret && !write) { if (copy_to_user(meta_buffer, meta, meta_len)) ret = -EFAULT; @@ -1338,6 +1338,54 @@ static int nvme_user_cmd(struct nvme_ctrl *ctrl, struct nvme_ns *ns, struct nvme_command c; unsigned timeout = 0; u32 effects; + u64 result; + int status; + + if (!capable(CAP_SYS_ADMIN)) + return -EACCES; + if (copy_from_user(&cmd, ucmd, sizeof(cmd))) + return -EFAULT; + if (cmd.flags) + return -EINVAL; + + memset(&c, 0, sizeof(c)); + c.common.opcode = cmd.opcode; + c.common.flags = cmd.flags; + c.common.nsid = cpu_to_le32(cmd.nsid); + c.common.cdw2[0] = cpu_to_le32(cmd.cdw2); + c.common.cdw2[1] = cpu_to_le32(cmd.cdw3); + c.common.cdw10 = cpu_to_le32(cmd.cdw10); + c.common.cdw11 = cpu_to_le32(cmd.cdw11); + c.common.cdw12 = cpu_to_le32(cmd.cdw12); + c.common.cdw13 = cpu_to_le32(cmd.cdw13); + c.common.cdw14 = cpu_to_le32(cmd.cdw14); + c.common.cdw15 = cpu_to_le32(cmd.cdw15); + + if (cmd.timeout_ms) + timeout = msecs_to_jiffies(cmd.timeout_ms); + + effects = nvme_passthru_start(ctrl, ns, cmd.opcode); + status = nvme_submit_user_cmd(ns ? ns->queue : ctrl->admin_q, &c, + (void __user *)(uintptr_t)cmd.addr, cmd.data_len, + (void __user *)(uintptr_t)cmd.metadata, + cmd.metadata_len, 0, &result, timeout); + nvme_passthru_end(ctrl, effects); + + if (status >= 0) { + if (put_user(result, &ucmd->result)) + return -EFAULT; + } + + return status; +} + +static int nvme_user_cmd64(struct nvme_ctrl *ctrl, struct nvme_ns *ns, + struct nvme_passthru_cmd64 __user *ucmd) +{ + struct nvme_passthru_cmd64 cmd; + struct nvme_command c; + unsigned timeout = 0; + u32 effects; int status; if (!capable(CAP_SYS_ADMIN)) @@ -1408,6 +1456,41 @@ static void nvme_put_ns_from_disk(struct nvme_ns_head *head, int idx) srcu_read_unlock(&head->srcu, idx); } +static bool is_ctrl_ioctl(unsigned int cmd) +{ + if (cmd == NVME_IOCTL_ADMIN_CMD || cmd == NVME_IOCTL_ADMIN64_CMD) + return true; + if (is_sed_ioctl(cmd)) + return true; + return false; +} + +static int nvme_handle_ctrl_ioctl(struct nvme_ns *ns, unsigned int cmd, + void __user *argp, + struct nvme_ns_head *head, + int srcu_idx) +{ + struct nvme_ctrl *ctrl = ns->ctrl; + int ret; + + nvme_get_ctrl(ns->ctrl); + nvme_put_ns_from_disk(head, srcu_idx); + + switch (cmd) { + case NVME_IOCTL_ADMIN_CMD: + ret = nvme_user_cmd(ctrl, NULL, argp); + break; + case NVME_IOCTL_ADMIN64_CMD: + ret = nvme_user_cmd64(ctrl, NULL, argp); + break; + default: + ret = sed_ioctl(ctrl->opal_dev, cmd, argp); + break; + } + nvme_put_ctrl(ctrl); + return ret; +} + static int nvme_ioctl(struct block_device *bdev, fmode_t mode, unsigned int cmd, unsigned long arg) { @@ -1425,20 +1508,8 @@ static int nvme_ioctl(struct block_device *bdev, fmode_t mode, * seperately and drop the ns SRCU reference early. This avoids a * deadlock when deleting namespaces using the passthrough interface. */ - if (cmd == NVME_IOCTL_ADMIN_CMD || is_sed_ioctl(cmd)) { - struct nvme_ctrl *ctrl = ns->ctrl; - - nvme_get_ctrl(ns->ctrl); - nvme_put_ns_from_disk(head, srcu_idx); - - if (cmd == NVME_IOCTL_ADMIN_CMD) - ret = nvme_user_cmd(ctrl, NULL, argp); - else - ret = sed_ioctl(ctrl->opal_dev, cmd, argp); - - nvme_put_ctrl(ctrl); - return ret; - } + if (is_ctrl_ioctl(cmd)) + return nvme_handle_ctrl_ioctl(ns, cmd, argp, head, srcu_idx); switch (cmd) { case NVME_IOCTL_ID: @@ -1451,6 +1522,9 @@ static int nvme_ioctl(struct block_device *bdev, fmode_t mode, case NVME_IOCTL_SUBMIT_IO: ret = nvme_submit_io(ns, argp); break; + case NVME_IOCTL_IO64_CMD: + ret = nvme_user_cmd64(ns->ctrl, ns, argp); + break; default: if (ns->ndev) ret = nvme_nvm_ioctl(ns, cmd, arg); @@ -2852,6 +2926,8 @@ static long nvme_dev_ioctl(struct file *file, unsigned int cmd, switch (cmd) { case NVME_IOCTL_ADMIN_CMD: return nvme_user_cmd(ctrl, NULL, argp); + case NVME_IOCTL_ADMIN64_CMD: + return nvme_user_cmd64(ctrl, NULL, argp); case NVME_IOCTL_IO_CMD: return nvme_dev_user_cmd(ctrl, argp); case NVME_IOCTL_RESET: diff --git a/include/uapi/linux/nvme_ioctl.h b/include/uapi/linux/nvme_ioctl.h index 1c215ea1798e..e168dc59e9a0 100644 --- a/include/uapi/linux/nvme_ioctl.h +++ b/include/uapi/linux/nvme_ioctl.h @@ -45,6 +45,27 @@ struct nvme_passthru_cmd { __u32 result; }; +struct nvme_passthru_cmd64 { + __u8 opcode; + __u8 flags; + __u16 rsvd1; + __u32 nsid; + __u32 cdw2; + __u32 cdw3; + __u64 metadata; + __u64 addr; + __u32 metadata_len; + __u32 data_len; + __u32 cdw10; + __u32 cdw11; + __u32 cdw12; + __u32 cdw13; + __u32 cdw14; + __u32 cdw15; + __u32 timeout_ms; + __u64 result; +}; + #define nvme_admin_cmd nvme_passthru_cmd #define NVME_IOCTL_ID _IO('N', 0x40) @@ -54,5 +75,7 @@ struct nvme_passthru_cmd { #define NVME_IOCTL_RESET _IO('N', 0x44) #define NVME_IOCTL_SUBSYS_RESET _IO('N', 0x45) #define NVME_IOCTL_RESCAN _IO('N', 0x46) +#define NVME_IOCTL_ADMIN64_CMD _IOWR('N', 0x47, struct nvme_passthru_cmd64) +#define NVME_IOCTL_IO64_CMD _IOWR('N', 0x48, struct nvme_passthru_cmd64) #endif /* _UAPI_LINUX_NVME_IOCTL_H */ -- cgit v1.2.3 From 2b1ff255d2d043aa4e6d62f50c478b283f9943ac Mon Sep 17 00:00:00 2001 From: James Smart Date: Tue, 24 Sep 2019 14:22:08 -0700 Subject: nvme: Add ctrl attributes for queue_count and sqsize Current controller interrogation requires a lot of guesswork on how many io queues were created and what the io sq size is. The numbers are dependent upon core/fabric defaults, connect arguments, and target responses. Add sysfs attributes for queue_count and sqsize. Signed-off-by: James Smart Reviewed-by: Hannes Reinecke Signed-off-by: Sagi Grimberg --- drivers/nvme/host/core.c | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c index 87ed437a46b8..fd7dea36c3b6 100644 --- a/drivers/nvme/host/core.c +++ b/drivers/nvme/host/core.c @@ -3135,6 +3135,8 @@ static DEVICE_ATTR(field, S_IRUGO, field##_show, NULL); nvme_show_int_function(cntlid); nvme_show_int_function(numa_node); +nvme_show_int_function(queue_count); +nvme_show_int_function(sqsize); static ssize_t nvme_sysfs_delete(struct device *dev, struct device_attribute *attr, const char *buf, @@ -3215,6 +3217,8 @@ static struct attribute *nvme_dev_attrs[] = { &dev_attr_address.attr, &dev_attr_state.attr, &dev_attr_numa_node.attr, + &dev_attr_queue_count.attr, + &dev_attr_sqsize.attr, NULL }; -- cgit v1.2.3 From c1f2b8124bdf2d7812a759f83a36a6d6178b5c94 Mon Sep 17 00:00:00 2001 From: James Ausmus Date: Tue, 24 Sep 2019 15:28:29 -0700 Subject: drm/i915/tgl: Add memory type decoding for bandwidth checking MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit The memory type values have changed in TGL, so we need to translate them differently than ICL. While we're moving it, fix up the ICL translation for LPDDR4. BSpec: 53998 v2: Fix up ICL LPDDR4 entry (Ville); Drop unused values from TGL (Ville) Cc: Ville Syrjälä Cc: Stanislav Lisovskiy Signed-off-by: James Ausmus Reviewed-by: Ville Syrjälä Reviewed-by: Stuart Summers Signed-off-by: Lucas De Marchi Link: https://patchwork.freedesktop.org/patch/msgid/20190924222829.13142-1-james.ausmus@intel.com --- drivers/gpu/drm/i915/display/intel_bw.c | 55 +++++++++++++++++++++++---------- 1 file changed, 39 insertions(+), 16 deletions(-) diff --git a/drivers/gpu/drm/i915/display/intel_bw.c b/drivers/gpu/drm/i915/display/intel_bw.c index cd58e47ab7b2..22e83f857de8 100644 --- a/drivers/gpu/drm/i915/display/intel_bw.c +++ b/drivers/gpu/drm/i915/display/intel_bw.c @@ -35,22 +35,45 @@ static int icl_pcode_read_mem_global_info(struct drm_i915_private *dev_priv, if (ret) return ret; - switch (val & 0xf) { - case 0: - qi->dram_type = INTEL_DRAM_DDR4; - break; - case 1: - qi->dram_type = INTEL_DRAM_DDR3; - break; - case 2: - qi->dram_type = INTEL_DRAM_LPDDR3; - break; - case 3: - qi->dram_type = INTEL_DRAM_LPDDR3; - break; - default: - MISSING_CASE(val & 0xf); - break; + if (IS_GEN(dev_priv, 12)) { + switch (val & 0xf) { + case 0: + qi->dram_type = INTEL_DRAM_DDR4; + break; + case 3: + qi->dram_type = INTEL_DRAM_LPDDR4; + break; + case 4: + qi->dram_type = INTEL_DRAM_DDR3; + break; + case 5: + qi->dram_type = INTEL_DRAM_LPDDR3; + break; + default: + MISSING_CASE(val & 0xf); + break; + } + } else if (IS_GEN(dev_priv, 11)) { + switch (val & 0xf) { + case 0: + qi->dram_type = INTEL_DRAM_DDR4; + break; + case 1: + qi->dram_type = INTEL_DRAM_DDR3; + break; + case 2: + qi->dram_type = INTEL_DRAM_LPDDR3; + break; + case 3: + qi->dram_type = INTEL_DRAM_LPDDR4; + break; + default: + MISSING_CASE(val & 0xf); + break; + } + } else { + MISSING_CASE(INTEL_GEN(dev_priv)); + qi->dram_type = INTEL_DRAM_LPDDR3; /* Conservative default */ } qi->num_channels = (val & 0xf0) >> 4; -- cgit v1.2.3 From a0f0037e908c656573d165aadbfae6c05fc4dd75 Mon Sep 17 00:00:00 2001 From: Wanpeng Li Date: Thu, 26 Sep 2019 08:54:03 +0800 Subject: KVM: LAPIC: Loosen filter for adaptive tuning of lapic_timer_advance_ns 5000 guest cycles delta is easy to encounter on desktop, per-vCPU lapic_timer_advance_ns always keeps at 1000ns initial value, let's loosen the filter a bit to let adaptive tuning make progress. Signed-off-by: Wanpeng Li Signed-off-by: Paolo Bonzini --- arch/x86/kvm/lapic.c | 13 +++++++------ 1 file changed, 7 insertions(+), 6 deletions(-) diff --git a/arch/x86/kvm/lapic.c b/arch/x86/kvm/lapic.c index 3a3a6854dcca..87b0fcc23ef8 100644 --- a/arch/x86/kvm/lapic.c +++ b/arch/x86/kvm/lapic.c @@ -66,9 +66,10 @@ #define X2APIC_BROADCAST 0xFFFFFFFFul static bool lapic_timer_advance_dynamic __read_mostly; -#define LAPIC_TIMER_ADVANCE_ADJUST_MIN 100 -#define LAPIC_TIMER_ADVANCE_ADJUST_MAX 5000 -#define LAPIC_TIMER_ADVANCE_ADJUST_INIT 1000 +#define LAPIC_TIMER_ADVANCE_ADJUST_MIN 100 /* clock cycles */ +#define LAPIC_TIMER_ADVANCE_ADJUST_MAX 10000 /* clock cycles */ +#define LAPIC_TIMER_ADVANCE_NS_INIT 1000 +#define LAPIC_TIMER_ADVANCE_NS_MAX 5000 /* step-by-step approximation to mitigate fluctuation */ #define LAPIC_TIMER_ADVANCE_ADJUST_STEP 8 @@ -1504,8 +1505,8 @@ static inline void adjust_lapic_timer_advance(struct kvm_vcpu *vcpu, timer_advance_ns += ns/LAPIC_TIMER_ADVANCE_ADJUST_STEP; } - if (unlikely(timer_advance_ns > LAPIC_TIMER_ADVANCE_ADJUST_MAX)) - timer_advance_ns = LAPIC_TIMER_ADVANCE_ADJUST_INIT; + if (unlikely(timer_advance_ns > LAPIC_TIMER_ADVANCE_NS_MAX)) + timer_advance_ns = LAPIC_TIMER_ADVANCE_NS_INIT; apic->lapic_timer.timer_advance_ns = timer_advance_ns; } @@ -2302,7 +2303,7 @@ int kvm_create_lapic(struct kvm_vcpu *vcpu, int timer_advance_ns) HRTIMER_MODE_ABS_HARD); apic->lapic_timer.timer.function = apic_timer_fn; if (timer_advance_ns == -1) { - apic->lapic_timer.timer_advance_ns = LAPIC_TIMER_ADVANCE_ADJUST_INIT; + apic->lapic_timer.timer_advance_ns = LAPIC_TIMER_ADVANCE_NS_INIT; lapic_timer_advance_dynamic = true; } else { apic->lapic_timer.timer_advance_ns = timer_advance_ns; -- cgit v1.2.3 From a1a640b8c0cd8a2a7f84ab694f04bc64dc6988af Mon Sep 17 00:00:00 2001 From: Jim Mattson Date: Wed, 25 Sep 2019 11:17:14 -0700 Subject: kvm: x86: Fix a spurious -E2BIG in __do_cpuid_func Don't return -E2BIG from __do_cpuid_func when processing function 0BH or 1FH and the last interesting subleaf occupies the last allocated entry in the result array. Cc: Paolo Bonzini Fixes: 831bf664e9c1fc ("KVM: Refactor and simplify kvm_dev_ioctl_get_supported_cpuid") Signed-off-by: Jim Mattson Reviewed-by: Peter Shier Reviewed-by: Sean Christopherson Signed-off-by: Paolo Bonzini --- arch/x86/kvm/cpuid.c | 16 ++++++++++------ 1 file changed, 10 insertions(+), 6 deletions(-) diff --git a/arch/x86/kvm/cpuid.c b/arch/x86/kvm/cpuid.c index 63316036f85a..6bf4a261b308 100644 --- a/arch/x86/kvm/cpuid.c +++ b/arch/x86/kvm/cpuid.c @@ -618,16 +618,20 @@ static inline int __do_cpuid_func(struct kvm_cpuid_entry2 *entry, u32 function, */ case 0x1f: case 0xb: { - int i, level_type; + int i; - /* read more entries until level_type is zero */ - for (i = 1; ; ++i) { + /* + * We filled in entry[0] for CPUID(EAX=, + * ECX=00H) above. If its level type (ECX[15:8]) is + * zero, then the leaf is unimplemented, and we're + * done. Otherwise, continue to populate entries + * until the level type (ECX[15:8]) of the previously + * added entry is zero. + */ + for (i = 1; entry[i - 1].ecx & 0xff00; ++i) { if (*nent >= maxnent) goto out; - level_type = entry[i - 1].ecx & 0xff00; - if (!level_type) - break; do_host_cpuid(&entry[i], function, i); ++*nent; } -- cgit v1.2.3 From 3ca94192278ca8de169d78c085396c424be123b3 Mon Sep 17 00:00:00 2001 From: Wanpeng Li Date: Wed, 18 Sep 2019 17:50:10 +0800 Subject: KVM: X86: Fix userspace set invalid CR4 Reported by syzkaller: WARNING: CPU: 0 PID: 6544 at /home/kernel/data/kvm/arch/x86/kvm//vmx/vmx.c:4689 handle_desc+0x37/0x40 [kvm_intel] CPU: 0 PID: 6544 Comm: a.out Tainted: G OE 5.3.0-rc4+ #4 RIP: 0010:handle_desc+0x37/0x40 [kvm_intel] Call Trace: vmx_handle_exit+0xbe/0x6b0 [kvm_intel] vcpu_enter_guest+0x4dc/0x18d0 [kvm] kvm_arch_vcpu_ioctl_run+0x407/0x660 [kvm] kvm_vcpu_ioctl+0x3ad/0x690 [kvm] do_vfs_ioctl+0xa2/0x690 ksys_ioctl+0x6d/0x80 __x64_sys_ioctl+0x1a/0x20 do_syscall_64+0x74/0x720 entry_SYSCALL_64_after_hwframe+0x49/0xbe When CR4.UMIP is set, guest should have UMIP cpuid flag. Current kvm set_sregs function doesn't have such check when userspace inputs sregs values. SECONDARY_EXEC_DESC is enabled on writes to CR4.UMIP in vmx_set_cr4 though guest doesn't have UMIP cpuid flag. The testcast triggers handle_desc warning when executing ltr instruction since guest architectural CR4 doesn't set UMIP. This patch fixes it by adding valid CR4 and CPUID combination checking in __set_sregs. syzkaller source: https://syzkaller.appspot.com/x/repro.c?x=138efb99600000 Reported-by: syzbot+0f1819555fbdce992df9@syzkaller.appspotmail.com Cc: stable@vger.kernel.org Signed-off-by: Wanpeng Li Reviewed-by: Sean Christopherson Signed-off-by: Paolo Bonzini --- arch/x86/kvm/x86.c | 38 +++++++++++++++++++++----------------- 1 file changed, 21 insertions(+), 17 deletions(-) diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index 0ed07d8d2caa..180c7e88577a 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c @@ -885,34 +885,42 @@ int kvm_set_xcr(struct kvm_vcpu *vcpu, u32 index, u64 xcr) } EXPORT_SYMBOL_GPL(kvm_set_xcr); -int kvm_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4) +static int kvm_valid_cr4(struct kvm_vcpu *vcpu, unsigned long cr4) { - unsigned long old_cr4 = kvm_read_cr4(vcpu); - unsigned long pdptr_bits = X86_CR4_PGE | X86_CR4_PSE | X86_CR4_PAE | - X86_CR4_SMEP | X86_CR4_SMAP | X86_CR4_PKE; - if (cr4 & CR4_RESERVED_BITS) - return 1; + return -EINVAL; if (!guest_cpuid_has(vcpu, X86_FEATURE_XSAVE) && (cr4 & X86_CR4_OSXSAVE)) - return 1; + return -EINVAL; if (!guest_cpuid_has(vcpu, X86_FEATURE_SMEP) && (cr4 & X86_CR4_SMEP)) - return 1; + return -EINVAL; if (!guest_cpuid_has(vcpu, X86_FEATURE_SMAP) && (cr4 & X86_CR4_SMAP)) - return 1; + return -EINVAL; if (!guest_cpuid_has(vcpu, X86_FEATURE_FSGSBASE) && (cr4 & X86_CR4_FSGSBASE)) - return 1; + return -EINVAL; if (!guest_cpuid_has(vcpu, X86_FEATURE_PKU) && (cr4 & X86_CR4_PKE)) - return 1; + return -EINVAL; if (!guest_cpuid_has(vcpu, X86_FEATURE_LA57) && (cr4 & X86_CR4_LA57)) - return 1; + return -EINVAL; if (!guest_cpuid_has(vcpu, X86_FEATURE_UMIP) && (cr4 & X86_CR4_UMIP)) + return -EINVAL; + + return 0; +} + +int kvm_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4) +{ + unsigned long old_cr4 = kvm_read_cr4(vcpu); + unsigned long pdptr_bits = X86_CR4_PGE | X86_CR4_PSE | X86_CR4_PAE | + X86_CR4_SMEP | X86_CR4_SMAP | X86_CR4_PKE; + + if (kvm_valid_cr4(vcpu, cr4)) return 1; if (is_long_mode(vcpu)) { @@ -8714,10 +8722,6 @@ EXPORT_SYMBOL_GPL(kvm_task_switch); static int kvm_valid_sregs(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs) { - if (!guest_cpuid_has(vcpu, X86_FEATURE_XSAVE) && - (sregs->cr4 & X86_CR4_OSXSAVE)) - return -EINVAL; - if ((sregs->efer & EFER_LME) && (sregs->cr0 & X86_CR0_PG)) { /* * When EFER.LME and CR0.PG are set, the processor is in @@ -8736,7 +8740,7 @@ static int kvm_valid_sregs(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs) return -EINVAL; } - return 0; + return kvm_valid_cr4(vcpu, sregs->cr4); } static int __set_sregs(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs) -- cgit v1.2.3 From 43561123ab3759eb6ff47693aec1a307af0aef83 Mon Sep 17 00:00:00 2001 From: Jim Mattson Date: Wed, 25 Sep 2019 17:04:17 -0700 Subject: kvm: x86: Improve emulation of CPUID leaves 0BH and 1FH For these CPUID leaves, the EDX output is not dependent on the ECX input (i.e. the SIGNIFCANT_INDEX flag doesn't apply to EDX). Furthermore, the low byte of the ECX output is always identical to the low byte of the ECX input. KVM does not produce the correct ECX and EDX outputs for any undefined subleaves beyond the first. Special-case these CPUID leaves in kvm_cpuid, so that the ECX and EDX outputs are properly generated for all undefined subleaves. Fixes: 0771671749b59a ("KVM: Enhance guest cpuid management") Fixes: a87f2d3a6eadab ("KVM: x86: Add Intel CPUID.1F cpuid emulation support") Signed-off-by: Jim Mattson Reviewed-by: Marc Orr Reviewed-by: Peter Shier Reviewed-by: Jacob Xu Cc: Sean Christopherson Cc: Paolo Bonzini Signed-off-by: Paolo Bonzini --- arch/x86/kvm/cpuid.c | 83 +++++++++++++++++++++++++++++----------------------- 1 file changed, 47 insertions(+), 36 deletions(-) diff --git a/arch/x86/kvm/cpuid.c b/arch/x86/kvm/cpuid.c index 6bf4a261b308..a26a1ae03145 100644 --- a/arch/x86/kvm/cpuid.c +++ b/arch/x86/kvm/cpuid.c @@ -973,53 +973,64 @@ struct kvm_cpuid_entry2 *kvm_find_cpuid_entry(struct kvm_vcpu *vcpu, EXPORT_SYMBOL_GPL(kvm_find_cpuid_entry); /* - * If no match is found, check whether we exceed the vCPU's limit - * and return the content of the highest valid _standard_ leaf instead. - * This is to satisfy the CPUID specification. + * If the basic or extended CPUID leaf requested is higher than the + * maximum supported basic or extended leaf, respectively, then it is + * out of range. */ -static struct kvm_cpuid_entry2* check_cpuid_limit(struct kvm_vcpu *vcpu, - u32 function, u32 index) +static bool cpuid_function_in_range(struct kvm_vcpu *vcpu, u32 function) { - struct kvm_cpuid_entry2 *maxlevel; - - maxlevel = kvm_find_cpuid_entry(vcpu, function & 0x80000000, 0); - if (!maxlevel || maxlevel->eax >= function) - return NULL; - if (function & 0x80000000) { - maxlevel = kvm_find_cpuid_entry(vcpu, 0, 0); - if (!maxlevel) - return NULL; - } - return kvm_find_cpuid_entry(vcpu, maxlevel->eax, index); + struct kvm_cpuid_entry2 *max; + + max = kvm_find_cpuid_entry(vcpu, function & 0x80000000, 0); + return max && function <= max->eax; } bool kvm_cpuid(struct kvm_vcpu *vcpu, u32 *eax, u32 *ebx, u32 *ecx, u32 *edx, bool check_limit) { u32 function = *eax, index = *ecx; - struct kvm_cpuid_entry2 *best; - bool entry_found = true; - - best = kvm_find_cpuid_entry(vcpu, function, index); - - if (!best) { - entry_found = false; - if (!check_limit) - goto out; + struct kvm_cpuid_entry2 *entry; + struct kvm_cpuid_entry2 *max; + bool found; - best = check_cpuid_limit(vcpu, function, index); + entry = kvm_find_cpuid_entry(vcpu, function, index); + found = entry; + /* + * Intel CPUID semantics treats any query for an out-of-range + * leaf as if the highest basic leaf (i.e. CPUID.0H:EAX) were + * requested. + */ + if (!entry && check_limit && !cpuid_function_in_range(vcpu, function)) { + max = kvm_find_cpuid_entry(vcpu, 0, 0); + if (max) { + function = max->eax; + entry = kvm_find_cpuid_entry(vcpu, function, index); + } } - -out: - if (best) { - *eax = best->eax; - *ebx = best->ebx; - *ecx = best->ecx; - *edx = best->edx; - } else + if (entry) { + *eax = entry->eax; + *ebx = entry->ebx; + *ecx = entry->ecx; + *edx = entry->edx; + } else { *eax = *ebx = *ecx = *edx = 0; - trace_kvm_cpuid(function, *eax, *ebx, *ecx, *edx, entry_found); - return entry_found; + /* + * When leaf 0BH or 1FH is defined, CL is pass-through + * and EDX is always the x2APIC ID, even for undefined + * subleaves. Index 1 will exist iff the leaf is + * implemented, so we pass through CL iff leaf 1 + * exists. EDX can be copied from any existing index. + */ + if (function == 0xb || function == 0x1f) { + entry = kvm_find_cpuid_entry(vcpu, function, 1); + if (entry) { + *ecx = index & 0xff; + *edx = entry->edx; + } + } + } + trace_kvm_cpuid(function, *eax, *ebx, *ecx, *edx, found); + return found; } EXPORT_SYMBOL_GPL(kvm_cpuid); -- cgit v1.2.3 From 5f41a37b151f6459e0b650a2f4d1d59b6c02d1ab Mon Sep 17 00:00:00 2001 From: Jim Mattson Date: Wed, 25 Sep 2019 17:04:18 -0700 Subject: kvm: x86: Use AMD CPUID semantics for AMD vCPUs When the guest CPUID information represents an AMD vCPU, return all zeroes for queries of undefined CPUID leaves, whether or not they are in range. Signed-off-by: Jim Mattson Fixes: bd22f5cfcfe8f6 ("KVM: move and fix substitue search for missing CPUID entries") Reviewed-by: Marc Orr Reviewed-by: Peter Shier Reviewed-by: Jacob Xu Cc: Sean Christopherson Cc: Paolo Bonzini Signed-off-by: Paolo Bonzini --- arch/x86/kvm/cpuid.c | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/arch/x86/kvm/cpuid.c b/arch/x86/kvm/cpuid.c index a26a1ae03145..ce4a1b3cc3e8 100644 --- a/arch/x86/kvm/cpuid.c +++ b/arch/x86/kvm/cpuid.c @@ -998,9 +998,11 @@ bool kvm_cpuid(struct kvm_vcpu *vcpu, u32 *eax, u32 *ebx, /* * Intel CPUID semantics treats any query for an out-of-range * leaf as if the highest basic leaf (i.e. CPUID.0H:EAX) were - * requested. + * requested. AMD CPUID semantics returns all zeroes for any + * undefined leaf, whether or not the leaf is in range. */ - if (!entry && check_limit && !cpuid_function_in_range(vcpu, function)) { + if (!entry && check_limit && !guest_cpuid_is_amd(vcpu) && + !cpuid_function_in_range(vcpu, function)) { max = kvm_find_cpuid_entry(vcpu, 0, 0); if (max) { function = max->eax; -- cgit v1.2.3 From 40bc47b08b6efc8b64fef77cb46974f634215425 Mon Sep 17 00:00:00 2001 From: Jim Mattson Date: Tue, 24 Sep 2019 13:51:08 -0700 Subject: kvm: x86: Enumerate support for CLZERO instruction CLZERO is available to the guest if it is supported on the host. Therefore, enumerate support for the instruction in KVM_GET_SUPPORTED_CPUID whenever it is supported on the host. Signed-off-by: Jim Mattson Signed-off-by: Paolo Bonzini --- arch/x86/kvm/cpuid.c | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/arch/x86/kvm/cpuid.c b/arch/x86/kvm/cpuid.c index ce4a1b3cc3e8..34d9f9f481a3 100644 --- a/arch/x86/kvm/cpuid.c +++ b/arch/x86/kvm/cpuid.c @@ -485,8 +485,9 @@ static inline int __do_cpuid_func(struct kvm_cpuid_entry2 *entry, u32 function, /* cpuid 0x80000008.ebx */ const u32 kvm_cpuid_8000_0008_ebx_x86_features = - F(WBNOINVD) | F(AMD_IBPB) | F(AMD_IBRS) | F(AMD_SSBD) | F(VIRT_SSBD) | - F(AMD_SSB_NO) | F(AMD_STIBP) | F(AMD_STIBP_ALWAYS_ON); + F(CLZERO) | F(WBNOINVD) | F(AMD_IBPB) | F(AMD_IBRS) | + F(AMD_STIBP) | F(AMD_STIBP_ALWAYS_ON) | F(AMD_SSBD) | + F(VIRT_SSBD) | F(AMD_SSB_NO); /* cpuid 0xC0000001.edx */ const u32 kvm_cpuid_C000_0001_edx_x86_features = -- cgit v1.2.3 From 504ce1954fba888936c9d13ccc1e3db9b8f613d5 Mon Sep 17 00:00:00 2001 From: Sebastian Andrzej Siewior Date: Wed, 25 Sep 2019 23:37:21 +0200 Subject: KVM: x86: Expose XSAVEERPTR to the guest I was surprised to see that the guest reported `fxsave_leak' while the host did not. After digging deeper I noticed that the bits are simply masked out during enumeration. The XSAVEERPTR feature is actually a bug fix on AMD which means the kernel can disable a workaround. Pass XSAVEERPTR to the guest if available on the host. Signed-off-by: Sebastian Andrzej Siewior Signed-off-by: Paolo Bonzini --- arch/x86/kvm/cpuid.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/arch/x86/kvm/cpuid.c b/arch/x86/kvm/cpuid.c index 34d9f9f481a3..9c5029cf6f3f 100644 --- a/arch/x86/kvm/cpuid.c +++ b/arch/x86/kvm/cpuid.c @@ -485,9 +485,9 @@ static inline int __do_cpuid_func(struct kvm_cpuid_entry2 *entry, u32 function, /* cpuid 0x80000008.ebx */ const u32 kvm_cpuid_8000_0008_ebx_x86_features = - F(CLZERO) | F(WBNOINVD) | F(AMD_IBPB) | F(AMD_IBRS) | - F(AMD_STIBP) | F(AMD_STIBP_ALWAYS_ON) | F(AMD_SSBD) | - F(VIRT_SSBD) | F(AMD_SSB_NO); + F(CLZERO) | F(XSAVEERPTR) | + F(WBNOINVD) | F(AMD_IBPB) | F(AMD_IBRS) | F(AMD_SSBD) | F(VIRT_SSBD) | + F(AMD_SSB_NO) | F(AMD_STIBP) | F(AMD_STIBP_ALWAYS_ON); /* cpuid 0xC0000001.edx */ const u32 kvm_cpuid_C000_0001_edx_x86_features = -- cgit v1.2.3 From 45d3c5cd5233258ffa9326b33ae4af59fb127dfb Mon Sep 17 00:00:00 2001 From: Matt Roper Date: Wed, 25 Sep 2019 16:45:42 -0700 Subject: drm/i915: Small joiner RAM buffer size is platform-specific According to the bspec, GLK/CNL have a smaller small joiner RAM buffer than ICL+. This feels like something that could easily change again on future platforms, so let's just add a function to return the proper per-platform buffer size. That may also slightly simplify the upcoming bigjoiner enabling. Since we have to change intel_dp_dsc_get_output_bpp()'s signature to pass the dev_priv down for the platform check, let's take the opportunity to also make that function static since it isn't used outside the intel_dp file. v2: Minor rebase on top of Maarten's changes. Bspec: 20388 Bspec: 49259 Cc: Manasi Navare Cc: Maarten Lankhorst Signed-off-by: Matt Roper Reviewed-by: Manasi Navare Link: https://patchwork.freedesktop.org/patch/msgid/20190925234542.24289-1-matthew.d.roper@intel.com --- drivers/gpu/drm/i915/display/intel_dp.c | 24 +++++++++++++++++------- 1 file changed, 17 insertions(+), 7 deletions(-) diff --git a/drivers/gpu/drm/i915/display/intel_dp.c b/drivers/gpu/drm/i915/display/intel_dp.c index 893c9807755c..14a5c661f710 100644 --- a/drivers/gpu/drm/i915/display/intel_dp.c +++ b/drivers/gpu/drm/i915/display/intel_dp.c @@ -68,9 +68,6 @@ #define DP_DPRX_ESI_LEN 14 -/* DP DSC small joiner has 2 FIFOs each of 640 x 6 bytes */ -#define DP_DSC_MAX_SMALL_JOINER_RAM_BUFFER 61440 - /* DP DSC throughput values used for slice count calculations KPixels/s */ #define DP_DSC_PEAK_PIXEL_RATE 2720000 #define DP_DSC_MAX_ENC_THROUGHPUT_0 340000 @@ -498,7 +495,17 @@ u32 intel_dp_mode_to_fec_clock(u32 mode_clock) DP_DSC_FEC_OVERHEAD_FACTOR); } -static u16 intel_dp_dsc_get_output_bpp(u32 link_clock, u32 lane_count, +static int +small_joiner_ram_size_bits(struct drm_i915_private *i915) +{ + if (INTEL_GEN(i915) >= 11) + return 7680 * 8; + else + return 6144 * 8; +} + +static u16 intel_dp_dsc_get_output_bpp(struct drm_i915_private *i915, + u32 link_clock, u32 lane_count, u32 mode_clock, u32 mode_hdisplay) { u32 bits_per_pixel, max_bpp_small_joiner_ram; @@ -515,7 +522,8 @@ static u16 intel_dp_dsc_get_output_bpp(u32 link_clock, u32 lane_count, DRM_DEBUG_KMS("Max link bpp: %u\n", bits_per_pixel); /* Small Joiner Check: output bpp <= joiner RAM (bits) / Horiz. width */ - max_bpp_small_joiner_ram = DP_DSC_MAX_SMALL_JOINER_RAM_BUFFER / mode_hdisplay; + max_bpp_small_joiner_ram = small_joiner_ram_size_bits(i915) / + mode_hdisplay; DRM_DEBUG_KMS("Max small joiner bpp: %u\n", max_bpp_small_joiner_ram); /* @@ -632,7 +640,8 @@ intel_dp_mode_valid(struct drm_connector *connector, true); } else if (drm_dp_sink_supports_fec(intel_dp->fec_capable)) { dsc_max_output_bpp = - intel_dp_dsc_get_output_bpp(max_link_clock, + intel_dp_dsc_get_output_bpp(dev_priv, + max_link_clock, max_lanes, target_clock, mode->hdisplay) >> 4; @@ -2059,7 +2068,8 @@ static int intel_dp_dsc_compute_config(struct intel_dp *intel_dp, u8 dsc_dp_slice_count; dsc_max_output_bpp = - intel_dp_dsc_get_output_bpp(pipe_config->port_clock, + intel_dp_dsc_get_output_bpp(dev_priv, + pipe_config->port_clock, pipe_config->lane_count, adjusted_mode->crtc_clock, adjusted_mode->crtc_hdisplay); -- cgit v1.2.3 From 5311f5171e98e6958fe5cf1c424a6196115baeb6 Mon Sep 17 00:00:00 2001 From: Michał Winiarski Date: Thu, 26 Sep 2019 14:31:40 +0100 Subject: drm/i915: Define explicit wedged on init reset state MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit We're currently using scratch presence as a way of identifying that we entered wedged state at driver initialization time. Let's use a separate flag rather than rely on scratch. Signed-off-by: Michał Winiarski Cc: Chris Wilson Cc: Mika Kuoppala Reviewed-by: Chris Wilson Signed-off-by: Chris Wilson Link: https://patchwork.freedesktop.org/patch/msgid/20190926133142.2838-1-chris@chris-wilson.co.uk --- drivers/gpu/drm/i915/gt/intel_reset.c | 11 ++++++++++- drivers/gpu/drm/i915/gt/intel_reset.h | 9 +++++++++ drivers/gpu/drm/i915/gt/intel_reset_types.h | 6 ++++++ drivers/gpu/drm/i915/i915_gem.c | 2 +- 4 files changed, 26 insertions(+), 2 deletions(-) diff --git a/drivers/gpu/drm/i915/gt/intel_reset.c b/drivers/gpu/drm/i915/gt/intel_reset.c index ae68c3786f63..ec978b4d1be3 100644 --- a/drivers/gpu/drm/i915/gt/intel_reset.c +++ b/drivers/gpu/drm/i915/gt/intel_reset.c @@ -811,7 +811,8 @@ static bool __intel_gt_unset_wedged(struct intel_gt *gt) if (!test_bit(I915_WEDGED, >->reset.flags)) return true; - if (!gt->scratch) /* Never full initialised, recovery impossible */ + /* Never fully initialised, recovery impossible */ + if (test_bit(I915_WEDGED_ON_INIT, >->reset.flags)) return false; GEM_TRACE("start\n"); @@ -1279,6 +1280,14 @@ int intel_gt_terminally_wedged(struct intel_gt *gt) return intel_gt_is_wedged(gt) ? -EIO : 0; } +void intel_gt_set_wedged_on_init(struct intel_gt *gt) +{ + BUILD_BUG_ON(I915_RESET_ENGINE + I915_NUM_ENGINES > + I915_WEDGED_ON_INIT); + intel_gt_set_wedged(gt); + set_bit(I915_WEDGED_ON_INIT, >->reset.flags); +} + void intel_gt_init_reset(struct intel_gt *gt) { init_waitqueue_head(>->reset.queue); diff --git a/drivers/gpu/drm/i915/gt/intel_reset.h b/drivers/gpu/drm/i915/gt/intel_reset.h index 52c00199e069..0b6ff1ee7f06 100644 --- a/drivers/gpu/drm/i915/gt/intel_reset.h +++ b/drivers/gpu/drm/i915/gt/intel_reset.h @@ -45,6 +45,12 @@ void intel_gt_set_wedged(struct intel_gt *gt); bool intel_gt_unset_wedged(struct intel_gt *gt); int intel_gt_terminally_wedged(struct intel_gt *gt); +/* + * There's no unset_wedged_on_init paired with this one. + * Once we're wedged on init, there's no going back. + */ +void intel_gt_set_wedged_on_init(struct intel_gt *gt); + int __intel_gt_reset(struct intel_gt *gt, intel_engine_mask_t engine_mask); int intel_reset_guc(struct intel_gt *gt); @@ -68,6 +74,9 @@ void __intel_fini_wedge(struct intel_wedge_me *w); static inline bool __intel_reset_failed(const struct intel_reset *reset) { + GEM_BUG_ON(test_bit(I915_WEDGED_ON_INIT, &reset->flags) ? + !test_bit(I915_WEDGED, &reset->flags) : false); + return unlikely(test_bit(I915_WEDGED, &reset->flags)); } diff --git a/drivers/gpu/drm/i915/gt/intel_reset_types.h b/drivers/gpu/drm/i915/gt/intel_reset_types.h index 31968356e0c0..f43bc3a0fe4f 100644 --- a/drivers/gpu/drm/i915/gt/intel_reset_types.h +++ b/drivers/gpu/drm/i915/gt/intel_reset_types.h @@ -29,11 +29,17 @@ struct intel_reset { * we set the #I915_WEDGED bit. Prior to command submission, e.g. * i915_request_alloc(), this bit is checked and the sequence * aborted (with -EIO reported to userspace) if set. + * + * #I915_WEDGED_ON_INIT - If we fail to initialize the GPU we can no + * longer use the GPU - similar to #I915_WEDGED bit. The difference in + * in the way we're handling "forced" unwedged (e.g. through debugfs), + * which is not allowed in case we failed to initialize. */ unsigned long flags; #define I915_RESET_BACKOFF 0 #define I915_RESET_MODESET 1 #define I915_RESET_ENGINE 2 +#define I915_WEDGED_ON_INIT (BITS_PER_LONG - 2) #define I915_WEDGED (BITS_PER_LONG - 1) struct mutex mutex; /* serialises wedging/unwedging */ diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index 2da9544fa9a4..e2897a666225 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c @@ -1411,7 +1411,7 @@ int i915_gem_init(struct drm_i915_private *dev_priv) err_gt: mutex_unlock(&dev_priv->drm.struct_mutex); - intel_gt_set_wedged(&dev_priv->gt); + intel_gt_set_wedged_on_init(&dev_priv->gt); i915_gem_suspend(dev_priv); i915_gem_suspend_late(dev_priv); -- cgit v1.2.3 From e1237523749e342d9ffb499a636ab6860a554cba Mon Sep 17 00:00:00 2001 From: Michał Winiarski Date: Thu, 26 Sep 2019 14:31:41 +0100 Subject: drm/i915/execlists: Use per-process HWSP as scratch MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Some of our commands (MI_FLUSH_DW / PIPE_CONTROL) require a post-sync write operation to be performed. Currently we're using dedicated VMA for PIPE_CONTROL and global HWSP for MI_FLUSH_DW. On execlists platforms, each of our contexts has an area that can be used as scratch space. Let's use that instead. Signed-off-by: Michał Winiarski Cc: Chris Wilson Cc: Daniele Ceraolo Spurio Reviewed-by: Chris Wilson Signed-off-by: Chris Wilson Link: https://patchwork.freedesktop.org/patch/msgid/20190926133142.2838-2-chris@chris-wilson.co.uk --- drivers/gpu/drm/i915/gt/intel_gt_types.h | 3 --- drivers/gpu/drm/i915/gt/intel_lrc.c | 45 +++++++++----------------------- drivers/gpu/drm/i915/gt/intel_lrc.h | 4 +++ 3 files changed, 17 insertions(+), 35 deletions(-) diff --git a/drivers/gpu/drm/i915/gt/intel_gt_types.h b/drivers/gpu/drm/i915/gt/intel_gt_types.h index 3039cef64b11..e64db4c13df6 100644 --- a/drivers/gpu/drm/i915/gt/intel_gt_types.h +++ b/drivers/gpu/drm/i915/gt/intel_gt_types.h @@ -89,9 +89,6 @@ enum intel_gt_scratch_field { /* 8 bytes */ INTEL_GT_SCRATCH_FIELD_DEFAULT = 0, - /* 8 bytes */ - INTEL_GT_SCRATCH_FIELD_CLEAR_SLM_WA = 128, - /* 8 bytes */ INTEL_GT_SCRATCH_FIELD_RENDER_FLUSH = 128, diff --git a/drivers/gpu/drm/i915/gt/intel_lrc.c b/drivers/gpu/drm/i915/gt/intel_lrc.c index ab725a6ca0ac..fa385218ce92 100644 --- a/drivers/gpu/drm/i915/gt/intel_lrc.c +++ b/drivers/gpu/drm/i915/gt/intel_lrc.c @@ -2308,12 +2308,6 @@ gen8_emit_flush_coherentl3_wa(struct intel_engine_cs *engine, u32 *batch) return batch; } -static u32 slm_offset(struct intel_engine_cs *engine) -{ - return intel_gt_scratch_offset(engine->gt, - INTEL_GT_SCRATCH_FIELD_CLEAR_SLM_WA); -} - /* * Typically we only have one indirect_ctx and per_ctx batch buffer which are * initialized at the beginning and shared across all contexts but this field @@ -2342,10 +2336,10 @@ static u32 *gen8_init_indirectctx_bb(struct intel_engine_cs *engine, u32 *batch) /* Actual scratch location is at 128 bytes offset */ batch = gen8_emit_pipe_control(batch, PIPE_CONTROL_FLUSH_L3 | - PIPE_CONTROL_GLOBAL_GTT_IVB | + PIPE_CONTROL_STORE_DATA_INDEX | PIPE_CONTROL_CS_STALL | PIPE_CONTROL_QW_WRITE, - slm_offset(engine)); + LRC_PPHWSP_SCRATCH_ADDR); *batch++ = MI_ARB_ON_OFF | MI_ARB_ENABLE; @@ -3052,7 +3046,7 @@ static int gen8_emit_flush(struct i915_request *request, u32 mode) } *cs++ = cmd; - *cs++ = I915_GEM_HWS_SCRATCH_ADDR | MI_FLUSH_DW_USE_GTT; + *cs++ = LRC_PPHWSP_SCRATCH_ADDR; *cs++ = 0; /* upper addr */ *cs++ = 0; /* value */ intel_ring_advance(request, cs); @@ -3063,10 +3057,6 @@ static int gen8_emit_flush(struct i915_request *request, u32 mode) static int gen8_emit_flush_render(struct i915_request *request, u32 mode) { - struct intel_engine_cs *engine = request->engine; - u32 scratch_addr = - intel_gt_scratch_offset(engine->gt, - INTEL_GT_SCRATCH_FIELD_RENDER_FLUSH); bool vf_flush_wa = false, dc_flush_wa = false; u32 *cs, flags = 0; int len; @@ -3088,7 +3078,7 @@ static int gen8_emit_flush_render(struct i915_request *request, flags |= PIPE_CONTROL_CONST_CACHE_INVALIDATE; flags |= PIPE_CONTROL_STATE_CACHE_INVALIDATE; flags |= PIPE_CONTROL_QW_WRITE; - flags |= PIPE_CONTROL_GLOBAL_GTT_IVB; + flags |= PIPE_CONTROL_STORE_DATA_INDEX; /* * On GEN9: before VF_CACHE_INVALIDATE we need to emit a NULL @@ -3121,7 +3111,7 @@ static int gen8_emit_flush_render(struct i915_request *request, cs = gen8_emit_pipe_control(cs, PIPE_CONTROL_DC_FLUSH_ENABLE, 0); - cs = gen8_emit_pipe_control(cs, flags, scratch_addr); + cs = gen8_emit_pipe_control(cs, flags, LRC_PPHWSP_SCRATCH_ADDR); if (dc_flush_wa) cs = gen8_emit_pipe_control(cs, PIPE_CONTROL_CS_STALL, 0); @@ -3134,11 +3124,6 @@ static int gen8_emit_flush_render(struct i915_request *request, static int gen11_emit_flush_render(struct i915_request *request, u32 mode) { - struct intel_engine_cs *engine = request->engine; - const u32 scratch_addr = - intel_gt_scratch_offset(engine->gt, - INTEL_GT_SCRATCH_FIELD_RENDER_FLUSH); - if (mode & EMIT_FLUSH) { u32 *cs; u32 flags = 0; @@ -3151,13 +3136,13 @@ static int gen11_emit_flush_render(struct i915_request *request, flags |= PIPE_CONTROL_DC_FLUSH_ENABLE; flags |= PIPE_CONTROL_FLUSH_ENABLE; flags |= PIPE_CONTROL_QW_WRITE; - flags |= PIPE_CONTROL_GLOBAL_GTT_IVB; + flags |= PIPE_CONTROL_STORE_DATA_INDEX; cs = intel_ring_begin(request, 6); if (IS_ERR(cs)) return PTR_ERR(cs); - cs = gen8_emit_pipe_control(cs, flags, scratch_addr); + cs = gen8_emit_pipe_control(cs, flags, LRC_PPHWSP_SCRATCH_ADDR); intel_ring_advance(request, cs); } @@ -3175,13 +3160,13 @@ static int gen11_emit_flush_render(struct i915_request *request, flags |= PIPE_CONTROL_CONST_CACHE_INVALIDATE; flags |= PIPE_CONTROL_STATE_CACHE_INVALIDATE; flags |= PIPE_CONTROL_QW_WRITE; - flags |= PIPE_CONTROL_GLOBAL_GTT_IVB; + flags |= PIPE_CONTROL_STORE_DATA_INDEX; cs = intel_ring_begin(request, 6); if (IS_ERR(cs)) return PTR_ERR(cs); - cs = gen8_emit_pipe_control(cs, flags, scratch_addr); + cs = gen8_emit_pipe_control(cs, flags, LRC_PPHWSP_SCRATCH_ADDR); intel_ring_advance(request, cs); } @@ -3196,10 +3181,6 @@ static u32 preparser_disable(bool state) static int gen12_emit_flush_render(struct i915_request *request, u32 mode) { - const u32 scratch_addr = - intel_gt_scratch_offset(request->engine->gt, - INTEL_GT_SCRATCH_FIELD_RENDER_FLUSH); - if (mode & EMIT_FLUSH) { u32 flags = 0; u32 *cs; @@ -3210,7 +3191,7 @@ static int gen12_emit_flush_render(struct i915_request *request, flags |= PIPE_CONTROL_DC_FLUSH_ENABLE; flags |= PIPE_CONTROL_FLUSH_ENABLE; - flags |= PIPE_CONTROL_GLOBAL_GTT_IVB; + flags |= PIPE_CONTROL_STORE_DATA_INDEX; flags |= PIPE_CONTROL_QW_WRITE; flags |= PIPE_CONTROL_CS_STALL; @@ -3219,7 +3200,7 @@ static int gen12_emit_flush_render(struct i915_request *request, if (IS_ERR(cs)) return PTR_ERR(cs); - cs = gen8_emit_pipe_control(cs, flags, scratch_addr); + cs = gen8_emit_pipe_control(cs, flags, LRC_PPHWSP_SCRATCH_ADDR); intel_ring_advance(request, cs); } @@ -3235,7 +3216,7 @@ static int gen12_emit_flush_render(struct i915_request *request, flags |= PIPE_CONTROL_CONST_CACHE_INVALIDATE; flags |= PIPE_CONTROL_STATE_CACHE_INVALIDATE; - flags |= PIPE_CONTROL_GLOBAL_GTT_IVB; + flags |= PIPE_CONTROL_STORE_DATA_INDEX; flags |= PIPE_CONTROL_QW_WRITE; flags |= PIPE_CONTROL_CS_STALL; @@ -3251,7 +3232,7 @@ static int gen12_emit_flush_render(struct i915_request *request, */ *cs++ = preparser_disable(true); - cs = gen8_emit_pipe_control(cs, flags, scratch_addr); + cs = gen8_emit_pipe_control(cs, flags, LRC_PPHWSP_SCRATCH_ADDR); *cs++ = preparser_disable(false); intel_ring_advance(request, cs); diff --git a/drivers/gpu/drm/i915/gt/intel_lrc.h b/drivers/gpu/drm/i915/gt/intel_lrc.h index dc0252e0589e..66ac616361c1 100644 --- a/drivers/gpu/drm/i915/gt/intel_lrc.h +++ b/drivers/gpu/drm/i915/gt/intel_lrc.h @@ -104,6 +104,10 @@ int intel_execlists_submission_init(struct intel_engine_cs *engine); */ #define LRC_HEADER_PAGES LRC_PPHWSP_PN +/* Space within PPHWSP reserved to be used as scratch */ +#define LRC_PPHWSP_SCRATCH 0x34 +#define LRC_PPHWSP_SCRATCH_ADDR (LRC_PPHWSP_SCRATCH * sizeof(u32)) + void intel_execlists_set_default_submission(struct intel_engine_cs *engine); void intel_lr_context_reset(struct intel_engine_cs *engine, -- cgit v1.2.3 From 7d5255e0ced46a10131175bf65789a0c1a7806a4 Mon Sep 17 00:00:00 2001 From: Michał Winiarski Date: Thu, 26 Sep 2019 14:31:42 +0100 Subject: drm/i915: Adjust length of MI_LOAD_REGISTER_REG MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Default length value of MI_LOAD_REGISTER_REG is 1. Also move it out of cmd-parser-only registers since we're going to use it in i915. Signed-off-by: Michał Winiarski Cc: Chris Wilson Cc: Jani Nikula Reviewed-by: Chris Wilson Signed-off-by: Chris Wilson Link: https://patchwork.freedesktop.org/patch/msgid/20190926133142.2838-3-chris@chris-wilson.co.uk --- drivers/gpu/drm/i915/gt/intel_gpu_commands.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/gpu/drm/i915/gt/intel_gpu_commands.h b/drivers/gpu/drm/i915/gt/intel_gpu_commands.h index f78b13d74e17..9211b1ad401b 100644 --- a/drivers/gpu/drm/i915/gt/intel_gpu_commands.h +++ b/drivers/gpu/drm/i915/gt/intel_gpu_commands.h @@ -152,6 +152,7 @@ #define MI_FLUSH_DW_USE_PPGTT (0<<2) #define MI_LOAD_REGISTER_MEM MI_INSTR(0x29, 1) #define MI_LOAD_REGISTER_MEM_GEN8 MI_INSTR(0x29, 2) +#define MI_LOAD_REGISTER_REG MI_INSTR(0x2A, 1) #define MI_BATCH_BUFFER MI_INSTR(0x30, 1) #define MI_BATCH_NON_SECURE (1) /* for snb/ivb/vlv this also means "batch in ppgtt" when ppgtt is enabled. */ @@ -256,7 +257,6 @@ #define MI_CLFLUSH MI_INSTR(0x27, 0) #define MI_REPORT_PERF_COUNT MI_INSTR(0x28, 0) #define MI_REPORT_PERF_COUNT_GGTT (1<<0) -#define MI_LOAD_REGISTER_REG MI_INSTR(0x2A, 0) #define MI_RS_STORE_DATA_IMM MI_INSTR(0x2B, 0) #define MI_LOAD_URB_MEM MI_INSTR(0x2C, 0) #define MI_STORE_URB_MEM MI_INSTR(0x2D, 0) -- cgit v1.2.3 From 132dfc78d3eb51b6fc8e667183b3572d41e3fe9f Mon Sep 17 00:00:00 2001 From: Sebastian Andrzej Siewior Date: Thu, 26 Sep 2019 12:56:44 +0200 Subject: drm/i915: Drop the IRQ-off asserts The lockdep_assert_irqs_disabled() check is needless. The previous lockdep_assert_held() check ensures that the lock is acquired and while the lock is acquired lockdep also prints a warning if the interrupts are not disabled if they have to be. These IRQ-off asserts trigger on PREEMPT_RT because the locks become sleeping locks and do not really disable interrupts. Remove lockdep_assert_irqs_disabled(). Reported-by: Clark Williams Signed-off-by: Sebastian Andrzej Siewior Reviewed-by: Chris Wilson Signed-off-by: Chris Wilson Link: https://patchwork.freedesktop.org/patch/msgid/20190926105644.16703-3-bigeasy@linutronix.de --- drivers/gpu/drm/i915/gt/intel_breadcrumbs.c | 3 --- 1 file changed, 3 deletions(-) diff --git a/drivers/gpu/drm/i915/gt/intel_breadcrumbs.c b/drivers/gpu/drm/i915/gt/intel_breadcrumbs.c index 09c68dda2098..0c6a1b11e259 100644 --- a/drivers/gpu/drm/i915/gt/intel_breadcrumbs.c +++ b/drivers/gpu/drm/i915/gt/intel_breadcrumbs.c @@ -120,7 +120,6 @@ __dma_fence_signal__notify(struct dma_fence *fence, struct dma_fence_cb *cur, *tmp; lockdep_assert_held(fence->lock); - lockdep_assert_irqs_disabled(); list_for_each_entry_safe(cur, tmp, list, node) { INIT_LIST_HEAD(&cur->node); @@ -275,7 +274,6 @@ void intel_engine_fini_breadcrumbs(struct intel_engine_cs *engine) bool i915_request_enable_breadcrumb(struct i915_request *rq) { lockdep_assert_held(&rq->lock); - lockdep_assert_irqs_disabled(); if (test_bit(I915_FENCE_FLAG_ACTIVE, &rq->fence.flags)) { struct intel_breadcrumbs *b = &rq->engine->breadcrumbs; @@ -325,7 +323,6 @@ void i915_request_cancel_breadcrumb(struct i915_request *rq) struct intel_breadcrumbs *b = &rq->engine->breadcrumbs; lockdep_assert_held(&rq->lock); - lockdep_assert_irqs_disabled(); /* * We must wait for b->irq_lock so that we know the interrupt handler -- cgit v1.2.3 From e3792238c1dde5059e44e0f14899497ac875443d Mon Sep 17 00:00:00 2001 From: Sebastian Andrzej Siewior Date: Thu, 26 Sep 2019 12:56:43 +0200 Subject: drm/i915: Don't disable interrupts for intel_engine_breadcrumbs_irq() The function intel_engine_breadcrumbs_irq() is always invoked from an interrupt handler and for that reason it invokes (as an optimisation) only spin_lock() for locking assuming that the interrupts are already disabled. The function intel_engine_signal_breadcrumbs() is provided to disable interrupts while the former function is invoked so that assumption is also true for callers from preemptible context. On PREEMPT_RT local_irq_disable() really disables interrupts and this forbids to invoke spin_lock() which becomes a sleeping spinlock. This is also problematic with `threadirqs' in conjunction with irq_work. With force threading the interrupt handler, the handler is invoked with disabled BH but with interrupts enabled. This is okay and the lock itself is never acquired in IRQ context. This changes with irq_work (signal_irq_work()) which _still_ invokes intel_engine_breadcrumbs_irq() from IRQ context. Lockdep should see this and complain. Acquire the locks in intel_engine_breadcrumbs_irq() with _irqsave() suffix and let all callers invoke intel_engine_breadcrumbs_irq() directly instead using intel_engine_signal_breadcrumbs(). Reported-by: Clark Williams Signed-off-by: Sebastian Andrzej Siewior Reviewed-by: Chris Wilson Signed-off-by: Chris Wilson Link: https://patchwork.freedesktop.org/patch/msgid/20190926105644.16703-2-bigeasy@linutronix.de --- drivers/gpu/drm/i915/gt/intel_breadcrumbs.c | 16 +++++----------- drivers/gpu/drm/i915/gt/intel_engine.h | 1 - drivers/gpu/drm/i915/gt/intel_hangcheck.c | 2 +- drivers/gpu/drm/i915/gt/intel_reset.c | 2 +- 4 files changed, 7 insertions(+), 14 deletions(-) diff --git a/drivers/gpu/drm/i915/gt/intel_breadcrumbs.c b/drivers/gpu/drm/i915/gt/intel_breadcrumbs.c index 0c6a1b11e259..55317081d48b 100644 --- a/drivers/gpu/drm/i915/gt/intel_breadcrumbs.c +++ b/drivers/gpu/drm/i915/gt/intel_breadcrumbs.c @@ -133,9 +133,10 @@ void intel_engine_breadcrumbs_irq(struct intel_engine_cs *engine) const ktime_t timestamp = ktime_get(); struct intel_context *ce, *cn; struct list_head *pos, *next; + unsigned long flags; LIST_HEAD(signal); - spin_lock(&b->irq_lock); + spin_lock_irqsave(&b->irq_lock, flags); if (b->irq_armed && list_empty(&b->signalers)) __intel_breadcrumbs_disarm_irq(b); @@ -181,30 +182,23 @@ void intel_engine_breadcrumbs_irq(struct intel_engine_cs *engine) } } - spin_unlock(&b->irq_lock); + spin_unlock_irqrestore(&b->irq_lock, flags); list_for_each_safe(pos, next, &signal) { struct i915_request *rq = list_entry(pos, typeof(*rq), signal_link); struct list_head cb_list; - spin_lock(&rq->lock); + spin_lock_irqsave(&rq->lock, flags); list_replace(&rq->fence.cb_list, &cb_list); __dma_fence_signal__timestamp(&rq->fence, timestamp); __dma_fence_signal__notify(&rq->fence, &cb_list); - spin_unlock(&rq->lock); + spin_unlock_irqrestore(&rq->lock, flags); i915_request_put(rq); } } -void intel_engine_signal_breadcrumbs(struct intel_engine_cs *engine) -{ - local_irq_disable(); - intel_engine_breadcrumbs_irq(engine); - local_irq_enable(); -} - static void signal_irq_work(struct irq_work *work) { struct intel_engine_cs *engine = diff --git a/drivers/gpu/drm/i915/gt/intel_engine.h b/drivers/gpu/drm/i915/gt/intel_engine.h index d3c6993f4f46..c9e8c8ccbd47 100644 --- a/drivers/gpu/drm/i915/gt/intel_engine.h +++ b/drivers/gpu/drm/i915/gt/intel_engine.h @@ -335,7 +335,6 @@ void intel_engine_init_execlists(struct intel_engine_cs *engine); void intel_engine_init_breadcrumbs(struct intel_engine_cs *engine); void intel_engine_fini_breadcrumbs(struct intel_engine_cs *engine); -void intel_engine_signal_breadcrumbs(struct intel_engine_cs *engine); void intel_engine_disarm_breadcrumbs(struct intel_engine_cs *engine); static inline void diff --git a/drivers/gpu/drm/i915/gt/intel_hangcheck.c b/drivers/gpu/drm/i915/gt/intel_hangcheck.c index 40f62f780be5..9814b18b32ad 100644 --- a/drivers/gpu/drm/i915/gt/intel_hangcheck.c +++ b/drivers/gpu/drm/i915/gt/intel_hangcheck.c @@ -284,7 +284,7 @@ static void hangcheck_elapsed(struct work_struct *work) for_each_engine(engine, gt->i915, id) { struct hangcheck hc; - intel_engine_signal_breadcrumbs(engine); + intel_engine_breadcrumbs_irq(engine); hangcheck_load_sample(engine, &hc); hangcheck_accumulate_sample(engine, &hc); diff --git a/drivers/gpu/drm/i915/gt/intel_reset.c b/drivers/gpu/drm/i915/gt/intel_reset.c index ec978b4d1be3..d08226f5bea5 100644 --- a/drivers/gpu/drm/i915/gt/intel_reset.c +++ b/drivers/gpu/drm/i915/gt/intel_reset.c @@ -711,7 +711,7 @@ static void reset_finish_engine(struct intel_engine_cs *engine) engine->reset.finish(engine); intel_uncore_forcewake_put(engine->uncore, FORCEWAKE_ALL); - intel_engine_signal_breadcrumbs(engine); + intel_engine_breadcrumbs_irq(engine); } static void reset_finish(struct intel_gt *gt, intel_engine_mask_t awake) -- cgit v1.2.3 From 56316cbc9c986ce907bfcf4f3762f2494e44f439 Mon Sep 17 00:00:00 2001 From: Anna Karas Date: Thu, 26 Sep 2019 15:21:58 +0300 Subject: drm/i915/perf: Fix use of kernel-doc format in structure members Insert structure members names into their descriptions to follow kernel-doc format. Cc: Chris Wilson Signed-off-by: Anna Karas Reviewed-by: Chris Wilson Acked-by: Lionel Landwerlin Signed-off-by: Chris Wilson Link: https://patchwork.freedesktop.org/patch/msgid/20190926122158.13028-1-anna.karas@intel.com --- drivers/gpu/drm/i915/i915_drv.h | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index fcf7423075ef..b3c7dbc1832a 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h @@ -1134,7 +1134,7 @@ struct i915_perf_stream { struct i915_oa_config *oa_config; /** - * The OA context specific information. + * @pinned_ctx: The OA context specific information. */ struct intel_context *pinned_ctx; u32 specific_ctx_id; @@ -1148,7 +1148,7 @@ struct i915_perf_stream { int period_exponent; /** - * State of the OA buffer. + * @oa_buffer: State of the OA buffer. */ struct { struct i915_vma *vma; @@ -1159,7 +1159,7 @@ struct i915_perf_stream { int size_exponent; /** - * Locks reads and writes to all head/tail state + * @ptr_lock: Locks reads and writes to all head/tail state * * Consider: the head and tail pointer state needs to be read * consistently from a hrtimer callback (atomic context) and @@ -1181,7 +1181,7 @@ struct i915_perf_stream { spinlock_t ptr_lock; /** - * One 'aging' tail pointer and one 'aged' tail pointer ready to + * @tails: One 'aging' tail pointer and one 'aged' tail pointer ready to * used for reading. * * Initial values of 0xffffffff are invalid and imply that an @@ -1193,18 +1193,18 @@ struct i915_perf_stream { } tails[2]; /** - * Index for the aged tail ready to read() data up to. + * @aged_tail_idx: Index for the aged tail ready to read() data up to. */ unsigned int aged_tail_idx; /** - * A monotonic timestamp for when the current aging tail pointer + * @aging_timestamp: A monotonic timestamp for when the current aging tail pointer * was read; used to determine when it is old enough to trust. */ u64 aging_timestamp; /** - * Although we can always read back the head pointer register, + * @head: Although we can always read back the head pointer register, * we prefer to avoid trusting the HW state, just to avoid any * risk that some hardware condition could * somehow bump the * head pointer unpredictably and cause us to forward the wrong -- cgit v1.2.3 From f968688f44f529f96a64c9853fb2fb5d0a329aff Mon Sep 17 00:00:00 2001 From: Keith Busch Date: Thu, 26 Sep 2019 12:44:39 +0900 Subject: nvme: Move ctrl sqsize to generic space This isn't specific to fabrics. Signed-off-by: Keith Busch Signed-off-by: Sagi Grimberg --- drivers/nvme/host/nvme.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/nvme/host/nvme.h b/drivers/nvme/host/nvme.h index b5013c101b35..38a83ef5bcd3 100644 --- a/drivers/nvme/host/nvme.h +++ b/drivers/nvme/host/nvme.h @@ -221,6 +221,7 @@ struct nvme_ctrl { u16 oacs; u16 nssa; u16 nr_streams; + u16 sqsize; u32 max_namespaces; atomic_t abort_limit; u8 vwc; @@ -269,7 +270,6 @@ struct nvme_ctrl { u16 hmmaxd; /* Fabrics only */ - u16 sqsize; u32 ioccsz; u32 iorcsz; u16 icdoff; -- cgit v1.2.3 From 74b2089a105f43c33d2f2124bd03d1085fe7c9fc Mon Sep 17 00:00:00 2001 From: Michał Winiarski Date: Thu, 26 Sep 2019 12:06:33 +0200 Subject: drm/i915: Add definitions for MI_MATH command MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit We can use it in i915 for updating parts of unmasked registers from within a batch. We're also adding Gen8+ versions of CS_GPR registers (aka MI_MATH_REG in the coprocessor). Signed-off-by: Michał Winiarski Cc: Chris Wilson Reviewed-by: Chris Wilson Signed-off-by: Chris Wilson Link: https://patchwork.freedesktop.org/patch/msgid/20190926100635.9416-4-michal.winiarski@intel.com --- drivers/gpu/drm/i915/gt/intel_gpu_commands.h | 23 +++++++++++++++++++++++ drivers/gpu/drm/i915/i915_reg.h | 4 ++++ 2 files changed, 27 insertions(+) diff --git a/drivers/gpu/drm/i915/gt/intel_gpu_commands.h b/drivers/gpu/drm/i915/gt/intel_gpu_commands.h index 9211b1ad401b..b0227ab2fe1b 100644 --- a/drivers/gpu/drm/i915/gt/intel_gpu_commands.h +++ b/drivers/gpu/drm/i915/gt/intel_gpu_commands.h @@ -241,6 +241,29 @@ #define PIPE_CONTROL_DEPTH_CACHE_FLUSH (1<<0) #define PIPE_CONTROL_GLOBAL_GTT (1<<2) /* in addr dword */ +#define MI_MATH(x) MI_INSTR(0x1a, (x) - 1) +#define MI_MATH_INSTR(opcode, op1, op2) ((opcode) << 20 | (op1) << 10 | (op2)) +/* Opcodes for MI_MATH_INSTR */ +#define MI_MATH_NOOP MI_MATH_INSTR(0x000, 0x0, 0x0) +#define MI_MATH_LOAD(op1, op2) MI_MATH_INSTR(0x080, op1, op2) +#define MI_MATH_LOADINV(op1, op2) MI_MATH_INSTR(0x480, op1, op2) +#define MI_MATH_LOAD0(op1) MI_MATH_INSTR(0x081, op1) +#define MI_MATH_LOAD1(op1) MI_MATH_INSTR(0x481, op1) +#define MI_MATH_ADD MI_MATH_INSTR(0x100, 0x0, 0x0) +#define MI_MATH_SUB MI_MATH_INSTR(0x101, 0x0, 0x0) +#define MI_MATH_AND MI_MATH_INSTR(0x102, 0x0, 0x0) +#define MI_MATH_OR MI_MATH_INSTR(0x103, 0x0, 0x0) +#define MI_MATH_XOR MI_MATH_INSTR(0x104, 0x0, 0x0) +#define MI_MATH_STORE(op1, op2) MI_MATH_INSTR(0x180, op1, op2) +#define MI_MATH_STOREINV(op1, op2) MI_MATH_INSTR(0x580, op1, op2) +/* Registers used as operands in MI_MATH_INSTR */ +#define MI_MATH_REG(x) (x) +#define MI_MATH_REG_SRCA 0x20 +#define MI_MATH_REG_SRCB 0x21 +#define MI_MATH_REG_ACCU 0x31 +#define MI_MATH_REG_ZF 0x32 +#define MI_MATH_REG_CF 0x33 + /* * Commands used only by the command parser */ diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h index e752de9470bd..5e30f1a14624 100644 --- a/drivers/gpu/drm/i915/i915_reg.h +++ b/drivers/gpu/drm/i915/i915_reg.h @@ -2483,6 +2483,10 @@ static inline bool i915_mmio_reg_valid(i915_reg_t reg) #define RING_WAIT (1 << 11) /* gen3+, PRBx_CTL */ #define RING_WAIT_SEMAPHORE (1 << 10) /* gen6+ */ +/* There are 16 64-bit CS General Purpose Registers per-engine on Gen8+ */ +#define GEN8_RING_CS_GPR(base, n) _MMIO((base) + 0x600 + (n) * 8) +#define GEN8_RING_CS_GPR_UDW(base, n) _MMIO((base) + 0x600 + (n) * 8 + 4) + #define RING_FORCE_TO_NONPRIV(base, i) _MMIO(((base) + 0x4D0) + (i) * 4) #define RING_FORCE_TO_NONPRIV_ACCESS_RW (0 << 28) /* CFL+ & Gen11+ */ #define RING_FORCE_TO_NONPRIV_ACCESS_RD (1 << 28) -- cgit v1.2.3 From a0ecd6fdbf5d648123a7315c695fb6850d702835 Mon Sep 17 00:00:00 2001 From: Navid Emamdoost Date: Tue, 24 Sep 2019 23:30:30 -0500 Subject: drm/komeda: prevent memory leak in komeda_wb_connector_add In komeda_wb_connector_add if drm_writeback_connector_init fails the allocated memory for kwb_conn should be released. Signed-off-by: Navid Emamdoost Reviewed-by: James Qian Wang (Arm Technology China) Signed-off-by: james qian wang (Arm Technology China) Link: https://patchwork.freedesktop.org/patch/msgid/20190925043031.32308-1-navid.emamdoost@gmail.com --- drivers/gpu/drm/arm/display/komeda/komeda_wb_connector.c | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/drivers/gpu/drm/arm/display/komeda/komeda_wb_connector.c b/drivers/gpu/drm/arm/display/komeda/komeda_wb_connector.c index 23fbee268119..b72840c06ab7 100644 --- a/drivers/gpu/drm/arm/display/komeda/komeda_wb_connector.c +++ b/drivers/gpu/drm/arm/display/komeda/komeda_wb_connector.c @@ -165,8 +165,10 @@ static int komeda_wb_connector_add(struct komeda_kms_dev *kms, &komeda_wb_encoder_helper_funcs, formats, n_formats); komeda_put_fourcc_list(formats); - if (err) + if (err) { + kfree(kwb_conn); return err; + } drm_connector_helper_add(&wb_conn->base, &komeda_wb_conn_helper_funcs); -- cgit v1.2.3 From a3f56e7da5231c902925711940835b6716f63f73 Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Wed, 25 Sep 2019 20:34:46 +0100 Subject: drm/i915/selftests: Exercise concurrent submission to all engines The simplest and most maximal submission we can do, a thread to submit requests unto each engine. Signed-off-by: Chris Wilson Cc: Mika Kuoppala Reviewed-by: Mika Kuoppala Link: https://patchwork.freedesktop.org/patch/msgid/20190925193446.26007-1-chris@chris-wilson.co.uk --- drivers/gpu/drm/i915/selftests/i915_request.c | 125 ++++++++++++++++++++++++++ 1 file changed, 125 insertions(+) diff --git a/drivers/gpu/drm/i915/selftests/i915_request.c b/drivers/gpu/drm/i915/selftests/i915_request.c index b3688543ed7d..57cd4180d06c 100644 --- a/drivers/gpu/drm/i915/selftests/i915_request.c +++ b/drivers/gpu/drm/i915/selftests/i915_request.c @@ -1062,6 +1062,130 @@ out_unlock: return err; } +static int __live_parallel_engine1(void *arg) +{ + struct intel_engine_cs *engine = arg; + IGT_TIMEOUT(end_time); + unsigned long count; + + count = 0; + do { + struct i915_request *rq; + int err; + + mutex_lock(&engine->i915->drm.struct_mutex); + rq = i915_request_create(engine->kernel_context); + if (IS_ERR(rq)) { + mutex_unlock(&engine->i915->drm.struct_mutex); + return PTR_ERR(rq); + } + + i915_request_get(rq); + i915_request_add(rq); + mutex_unlock(&engine->i915->drm.struct_mutex); + + err = 0; + if (i915_request_wait(rq, 0, HZ / 5) < 0) + err = -ETIME; + i915_request_put(rq); + if (err) + return err; + + count++; + } while (!__igt_timeout(end_time, NULL)); + + pr_info("%s: %lu request + sync\n", engine->name, count); + return 0; +} + +static int __live_parallel_engineN(void *arg) +{ + struct intel_engine_cs *engine = arg; + IGT_TIMEOUT(end_time); + unsigned long count; + + count = 0; + do { + struct i915_request *rq; + + mutex_lock(&engine->i915->drm.struct_mutex); + rq = i915_request_create(engine->kernel_context); + if (IS_ERR(rq)) { + mutex_unlock(&engine->i915->drm.struct_mutex); + return PTR_ERR(rq); + } + + i915_request_add(rq); + mutex_unlock(&engine->i915->drm.struct_mutex); + + count++; + } while (!__igt_timeout(end_time, NULL)); + + pr_info("%s: %lu requests\n", engine->name, count); + return 0; +} + +static int live_parallel_engines(void *arg) +{ + struct drm_i915_private *i915 = arg; + static int (* const func[])(void *arg) = { + __live_parallel_engine1, + __live_parallel_engineN, + NULL, + }; + struct intel_engine_cs *engine; + enum intel_engine_id id; + int (* const *fn)(void *arg); + int err = 0; + + /* + * Check we can submit requests to all engines concurrently. This + * tests that we load up the system maximally. + */ + + for (fn = func; !err && *fn; fn++) { + struct task_struct *tsk[I915_NUM_ENGINES] = {}; + struct igt_live_test t; + + mutex_lock(&i915->drm.struct_mutex); + err = igt_live_test_begin(&t, i915, __func__, ""); + mutex_unlock(&i915->drm.struct_mutex); + if (err) + break; + + for_each_engine(engine, i915, id) { + tsk[id] = kthread_run(*fn, engine, + "igt/parallel:%s", + engine->name); + if (IS_ERR(tsk[id])) { + err = PTR_ERR(tsk[id]); + break; + } + get_task_struct(tsk[id]); + } + + for_each_engine(engine, i915, id) { + int status; + + if (IS_ERR_OR_NULL(tsk[id])) + continue; + + status = kthread_stop(tsk[id]); + if (status && !err) + err = status; + + put_task_struct(tsk[id]); + } + + mutex_lock(&i915->drm.struct_mutex); + if (igt_live_test_end(&t)) + err = -EIO; + mutex_unlock(&i915->drm.struct_mutex); + } + + return err; +} + static int max_batches(struct i915_gem_context *ctx, struct intel_engine_cs *engine) { @@ -1240,6 +1364,7 @@ int i915_request_live_selftests(struct drm_i915_private *i915) SUBTEST(live_nop_request), SUBTEST(live_all_engines), SUBTEST(live_sequential_engines), + SUBTEST(live_parallel_engines), SUBTEST(live_empty_request), SUBTEST(live_breadcrumbs_smoketest), }; -- cgit v1.2.3 From 6eeb4ef049e7cd89783e8ebe1ea2f1dac276f82c Mon Sep 17 00:00:00 2001 From: Paolo Bonzini Date: Tue, 24 Sep 2019 12:43:08 +0200 Subject: KVM: x86: assign two bits to track SPTE kinds Currently, we are overloading SPTE_SPECIAL_MASK to mean both "A/D bits unavailable" and MMIO, where the difference between the two is determined by mio_mask and mmio_value. However, the next patch will need two bits to distinguish availability of A/D bits from write protection. So, while at it give MMIO its own bit pattern, and move the two bits from bit 62 to bits 52..53 since Intel is allocating EPT page table bits from the top. Reviewed-by: Junaid Shahid Signed-off-by: Paolo Bonzini --- arch/x86/include/asm/kvm_host.h | 7 ------- arch/x86/kvm/mmu.c | 28 ++++++++++++++++++---------- 2 files changed, 18 insertions(+), 17 deletions(-) diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h index 23edf56cf577..50eb430b0ad8 100644 --- a/arch/x86/include/asm/kvm_host.h +++ b/arch/x86/include/asm/kvm_host.h @@ -219,13 +219,6 @@ enum { PFERR_WRITE_MASK | \ PFERR_PRESENT_MASK) -/* - * The mask used to denote special SPTEs, which can be either MMIO SPTEs or - * Access Tracking SPTEs. We use bit 62 instead of bit 63 to avoid conflicting - * with the SVE bit in EPT PTEs. - */ -#define SPTE_SPECIAL_MASK (1ULL << 62) - /* apic attention bits */ #define KVM_APIC_CHECK_VAPIC 0 /* diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c index 5269aa057dfa..bac8d228d82b 100644 --- a/arch/x86/kvm/mmu.c +++ b/arch/x86/kvm/mmu.c @@ -83,7 +83,16 @@ module_param(dbg, bool, 0644); #define PTE_PREFETCH_NUM 8 #define PT_FIRST_AVAIL_BITS_SHIFT 10 -#define PT64_SECOND_AVAIL_BITS_SHIFT 52 +#define PT64_SECOND_AVAIL_BITS_SHIFT 54 + +/* + * The mask used to denote special SPTEs, which can be either MMIO SPTEs or + * Access Tracking SPTEs. + */ +#define SPTE_SPECIAL_MASK (3ULL << 52) +#define SPTE_AD_ENABLED_MASK (0ULL << 52) +#define SPTE_AD_DISABLED_MASK (1ULL << 52) +#define SPTE_MMIO_MASK (3ULL << 52) #define PT64_LEVEL_BITS 9 @@ -219,12 +228,11 @@ static u64 __read_mostly shadow_present_mask; static u64 __read_mostly shadow_me_mask; /* - * SPTEs used by MMUs without A/D bits are marked with shadow_acc_track_value. - * Non-present SPTEs with shadow_acc_track_value set are in place for access - * tracking. + * SPTEs used by MMUs without A/D bits are marked with SPTE_AD_DISABLED_MASK; + * shadow_acc_track_mask is the set of bits to be cleared in non-accessed + * pages. */ static u64 __read_mostly shadow_acc_track_mask; -static const u64 shadow_acc_track_value = SPTE_SPECIAL_MASK; /* * The mask/shift to use for saving the original R/X bits when marking the PTE @@ -304,7 +312,7 @@ void kvm_mmu_set_mmio_spte_mask(u64 mmio_mask, u64 mmio_value, u64 access_mask) { BUG_ON((u64)(unsigned)access_mask != access_mask); BUG_ON((mmio_mask & mmio_value) != mmio_value); - shadow_mmio_value = mmio_value | SPTE_SPECIAL_MASK; + shadow_mmio_value = mmio_value | SPTE_MMIO_MASK; shadow_mmio_mask = mmio_mask | SPTE_SPECIAL_MASK; shadow_mmio_access_mask = access_mask; } @@ -323,7 +331,7 @@ static inline bool sp_ad_disabled(struct kvm_mmu_page *sp) static inline bool spte_ad_enabled(u64 spte) { MMU_WARN_ON(is_mmio_spte(spte)); - return !(spte & shadow_acc_track_value); + return (spte & SPTE_SPECIAL_MASK) == SPTE_AD_ENABLED_MASK; } static inline u64 spte_shadow_accessed_mask(u64 spte) @@ -461,7 +469,7 @@ void kvm_mmu_set_mask_ptes(u64 user_mask, u64 accessed_mask, { BUG_ON(!dirty_mask != !accessed_mask); BUG_ON(!accessed_mask && !acc_track_mask); - BUG_ON(acc_track_mask & shadow_acc_track_value); + BUG_ON(acc_track_mask & SPTE_SPECIAL_MASK); shadow_user_mask = user_mask; shadow_accessed_mask = accessed_mask; @@ -2622,7 +2630,7 @@ static void link_shadow_page(struct kvm_vcpu *vcpu, u64 *sptep, shadow_user_mask | shadow_x_mask | shadow_me_mask; if (sp_ad_disabled(sp)) - spte |= shadow_acc_track_value; + spte |= SPTE_AD_DISABLED_MASK; else spte |= shadow_accessed_mask; @@ -2968,7 +2976,7 @@ static int set_spte(struct kvm_vcpu *vcpu, u64 *sptep, sp = page_header(__pa(sptep)); if (sp_ad_disabled(sp)) - spte |= shadow_acc_track_value; + spte |= SPTE_AD_DISABLED_MASK; /* * For the EPT case, shadow_present_mask is 0 if hardware -- cgit v1.2.3 From 1f4e5fc83a4217fc61b23370b07573827329d7bd Mon Sep 17 00:00:00 2001 From: Paolo Bonzini Date: Thu, 26 Sep 2019 18:47:59 +0200 Subject: KVM: x86: fix nested guest live migration with PML Shadow paging is fundamentally incompatible with the page-modification log, because the GPAs in the log come from the wrong memory map. In particular, for the EPT page-modification log, the GPAs in the log come from L2 rather than L1. (If there was a non-EPT page-modification log, we couldn't use it for shadow paging because it would log GVAs rather than GPAs). Therefore, we need to rely on write protection to record dirty pages. This has the side effect of bypassing PML, since writes now result in an EPT violation vmexit. This is relatively easy to add to KVM, because pretty much the only place that needs changing is spte_clear_dirty. The first access to the page already goes through the page fault path and records the correct GPA; it's only subsequent accesses that are wrong. Therefore, we can equip set_spte (where the first access happens) to record that the SPTE will have to be write protected, and then spte_clear_dirty will use this information to do the right thing. Signed-off-by: Paolo Bonzini --- arch/x86/kvm/mmu.c | 39 ++++++++++++++++++++++++++++++++------- 1 file changed, 32 insertions(+), 7 deletions(-) diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c index bac8d228d82b..24c23c66b226 100644 --- a/arch/x86/kvm/mmu.c +++ b/arch/x86/kvm/mmu.c @@ -92,6 +92,7 @@ module_param(dbg, bool, 0644); #define SPTE_SPECIAL_MASK (3ULL << 52) #define SPTE_AD_ENABLED_MASK (0ULL << 52) #define SPTE_AD_DISABLED_MASK (1ULL << 52) +#define SPTE_AD_WRPROT_ONLY_MASK (2ULL << 52) #define SPTE_MMIO_MASK (3ULL << 52) #define PT64_LEVEL_BITS 9 @@ -328,10 +329,27 @@ static inline bool sp_ad_disabled(struct kvm_mmu_page *sp) return sp->role.ad_disabled; } +static inline bool kvm_vcpu_ad_need_write_protect(struct kvm_vcpu *vcpu) +{ + /* + * When using the EPT page-modification log, the GPAs in the log + * would come from L2 rather than L1. Therefore, we need to rely + * on write protection to record dirty pages. This also bypasses + * PML, since writes now result in a vmexit. + */ + return vcpu->arch.mmu == &vcpu->arch.guest_mmu; +} + static inline bool spte_ad_enabled(u64 spte) { MMU_WARN_ON(is_mmio_spte(spte)); - return (spte & SPTE_SPECIAL_MASK) == SPTE_AD_ENABLED_MASK; + return (spte & SPTE_SPECIAL_MASK) != SPTE_AD_DISABLED_MASK; +} + +static inline bool spte_ad_need_write_protect(u64 spte) +{ + MMU_WARN_ON(is_mmio_spte(spte)); + return (spte & SPTE_SPECIAL_MASK) != SPTE_AD_ENABLED_MASK; } static inline u64 spte_shadow_accessed_mask(u64 spte) @@ -1597,16 +1615,16 @@ static bool spte_clear_dirty(u64 *sptep) rmap_printk("rmap_clear_dirty: spte %p %llx\n", sptep, *sptep); + MMU_WARN_ON(!spte_ad_enabled(spte)); spte &= ~shadow_dirty_mask; - return mmu_spte_update(sptep, spte); } -static bool wrprot_ad_disabled_spte(u64 *sptep) +static bool spte_wrprot_for_clear_dirty(u64 *sptep) { bool was_writable = test_and_clear_bit(PT_WRITABLE_SHIFT, (unsigned long *)sptep); - if (was_writable) + if (was_writable && !spte_ad_enabled(*sptep)) kvm_set_pfn_dirty(spte_to_pfn(*sptep)); return was_writable; @@ -1625,10 +1643,10 @@ static bool __rmap_clear_dirty(struct kvm *kvm, struct kvm_rmap_head *rmap_head) bool flush = false; for_each_rmap_spte(rmap_head, &iter, sptep) - if (spte_ad_enabled(*sptep)) - flush |= spte_clear_dirty(sptep); + if (spte_ad_need_write_protect(*sptep)) + flush |= spte_wrprot_for_clear_dirty(sptep); else - flush |= wrprot_ad_disabled_spte(sptep); + flush |= spte_clear_dirty(sptep); return flush; } @@ -1639,6 +1657,11 @@ static bool spte_set_dirty(u64 *sptep) rmap_printk("rmap_set_dirty: spte %p %llx\n", sptep, *sptep); + /* + * Similar to the !kvm_x86_ops->slot_disable_log_dirty case, + * do not bother adding back write access to pages marked + * SPTE_AD_WRPROT_ONLY_MASK. + */ spte |= shadow_dirty_mask; return mmu_spte_update(sptep, spte); @@ -2977,6 +3000,8 @@ static int set_spte(struct kvm_vcpu *vcpu, u64 *sptep, sp = page_header(__pa(sptep)); if (sp_ad_disabled(sp)) spte |= SPTE_AD_DISABLED_MASK; + else if (kvm_vcpu_ad_need_write_protect(vcpu)) + spte |= SPTE_AD_WRPROT_ONLY_MASK; /* * For the EPT case, shadow_present_mask is 0 if hardware -- cgit v1.2.3 From 094444204570a5420d9e6ce3d4558877c3487856 Mon Sep 17 00:00:00 2001 From: Paolo Bonzini Date: Thu, 26 Sep 2019 15:01:15 +0200 Subject: selftests: kvm: add test for dirty logging inside nested guests Check that accesses by nested guests are logged according to the L1 physical addresses rather than L2. Most of the patch is really adding EPT support to the testing framework. Signed-off-by: Paolo Bonzini --- tools/testing/selftests/kvm/Makefile | 1 + .../selftests/kvm/include/x86_64/processor.h | 3 + tools/testing/selftests/kvm/include/x86_64/vmx.h | 14 ++ tools/testing/selftests/kvm/lib/kvm_util.c | 2 +- .../testing/selftests/kvm/lib/kvm_util_internal.h | 3 + tools/testing/selftests/kvm/lib/x86_64/vmx.c | 201 ++++++++++++++++++++- .../selftests/kvm/x86_64/vmx_dirty_log_test.c | 156 ++++++++++++++++ 7 files changed, 377 insertions(+), 3 deletions(-) create mode 100644 tools/testing/selftests/kvm/x86_64/vmx_dirty_log_test.c diff --git a/tools/testing/selftests/kvm/Makefile b/tools/testing/selftests/kvm/Makefile index 62c591f87dab..fd84b7a78dcf 100644 --- a/tools/testing/selftests/kvm/Makefile +++ b/tools/testing/selftests/kvm/Makefile @@ -22,6 +22,7 @@ TEST_GEN_PROGS_x86_64 += x86_64/smm_test TEST_GEN_PROGS_x86_64 += x86_64/state_test TEST_GEN_PROGS_x86_64 += x86_64/sync_regs_test TEST_GEN_PROGS_x86_64 += x86_64/vmx_close_while_nested_test +TEST_GEN_PROGS_x86_64 += x86_64/vmx_dirty_log_test TEST_GEN_PROGS_x86_64 += x86_64/vmx_set_nested_state_test TEST_GEN_PROGS_x86_64 += x86_64/vmx_tsc_adjust_test TEST_GEN_PROGS_x86_64 += clear_dirty_log_test diff --git a/tools/testing/selftests/kvm/include/x86_64/processor.h b/tools/testing/selftests/kvm/include/x86_64/processor.h index 0c17f2ee685e..ff234018219c 100644 --- a/tools/testing/selftests/kvm/include/x86_64/processor.h +++ b/tools/testing/selftests/kvm/include/x86_64/processor.h @@ -1083,6 +1083,9 @@ void kvm_get_cpu_address_width(unsigned int *pa_bits, unsigned int *va_bits); #define VMX_BASIC_MEM_TYPE_WB 6LLU #define VMX_BASIC_INOUT 0x0040000000000000LLU +/* VMX_EPT_VPID_CAP bits */ +#define VMX_EPT_VPID_CAP_AD_BITS (1ULL << 21) + /* MSR_IA32_VMX_MISC bits */ #define MSR_IA32_VMX_MISC_VMWRITE_SHADOW_RO_FIELDS (1ULL << 29) #define MSR_IA32_VMX_MISC_PREEMPTION_TIMER_SCALE 0x1F diff --git a/tools/testing/selftests/kvm/include/x86_64/vmx.h b/tools/testing/selftests/kvm/include/x86_64/vmx.h index 69b17055f63d..6ae5a47fe067 100644 --- a/tools/testing/selftests/kvm/include/x86_64/vmx.h +++ b/tools/testing/selftests/kvm/include/x86_64/vmx.h @@ -569,6 +569,10 @@ struct vmx_pages { void *enlightened_vmcs_hva; uint64_t enlightened_vmcs_gpa; void *enlightened_vmcs; + + void *eptp_hva; + uint64_t eptp_gpa; + void *eptp; }; struct vmx_pages *vcpu_alloc_vmx(struct kvm_vm *vm, vm_vaddr_t *p_vmx_gva); @@ -576,4 +580,14 @@ bool prepare_for_vmx_operation(struct vmx_pages *vmx); void prepare_vmcs(struct vmx_pages *vmx, void *guest_rip, void *guest_rsp); bool load_vmcs(struct vmx_pages *vmx); +void nested_pg_map(struct vmx_pages *vmx, struct kvm_vm *vm, + uint64_t nested_paddr, uint64_t paddr, uint32_t eptp_memslot); +void nested_map(struct vmx_pages *vmx, struct kvm_vm *vm, + uint64_t nested_paddr, uint64_t paddr, uint64_t size, + uint32_t eptp_memslot); +void nested_map_memslot(struct vmx_pages *vmx, struct kvm_vm *vm, + uint32_t memslot, uint32_t eptp_memslot); +void prepare_eptp(struct vmx_pages *vmx, struct kvm_vm *vm, + uint32_t eptp_memslot); + #endif /* SELFTEST_KVM_VMX_H */ diff --git a/tools/testing/selftests/kvm/lib/kvm_util.c b/tools/testing/selftests/kvm/lib/kvm_util.c index 80a338b5403c..41cf45416060 100644 --- a/tools/testing/selftests/kvm/lib/kvm_util.c +++ b/tools/testing/selftests/kvm/lib/kvm_util.c @@ -705,7 +705,7 @@ void vm_userspace_mem_region_add(struct kvm_vm *vm, * on error (e.g. currently no memory region using memslot as a KVM * memory slot ID). */ -static struct userspace_mem_region * +struct userspace_mem_region * memslot2region(struct kvm_vm *vm, uint32_t memslot) { struct userspace_mem_region *region; diff --git a/tools/testing/selftests/kvm/lib/kvm_util_internal.h b/tools/testing/selftests/kvm/lib/kvm_util_internal.h index f36262e0f655..ac50c42750cf 100644 --- a/tools/testing/selftests/kvm/lib/kvm_util_internal.h +++ b/tools/testing/selftests/kvm/lib/kvm_util_internal.h @@ -68,4 +68,7 @@ void virt_dump(FILE *stream, struct kvm_vm *vm, uint8_t indent); void regs_dump(FILE *stream, struct kvm_regs *regs, uint8_t indent); void sregs_dump(FILE *stream, struct kvm_sregs *sregs, uint8_t indent); +struct userspace_mem_region * +memslot2region(struct kvm_vm *vm, uint32_t memslot); + #endif /* SELFTEST_KVM_UTIL_INTERNAL_H */ diff --git a/tools/testing/selftests/kvm/lib/x86_64/vmx.c b/tools/testing/selftests/kvm/lib/x86_64/vmx.c index 9cef0455b819..fab8f6b0bf52 100644 --- a/tools/testing/selftests/kvm/lib/x86_64/vmx.c +++ b/tools/testing/selftests/kvm/lib/x86_64/vmx.c @@ -7,11 +7,39 @@ #include "test_util.h" #include "kvm_util.h" +#include "../kvm_util_internal.h" #include "processor.h" #include "vmx.h" +#define PAGE_SHIFT_4K 12 + +#define KVM_EPT_PAGE_TABLE_MIN_PADDR 0x1c0000 + bool enable_evmcs; +struct eptPageTableEntry { + uint64_t readable:1; + uint64_t writable:1; + uint64_t executable:1; + uint64_t memory_type:3; + uint64_t ignore_pat:1; + uint64_t page_size:1; + uint64_t accessed:1; + uint64_t dirty:1; + uint64_t ignored_11_10:2; + uint64_t address:40; + uint64_t ignored_62_52:11; + uint64_t suppress_ve:1; +}; + +struct eptPageTablePointer { + uint64_t memory_type:3; + uint64_t page_walk_length:3; + uint64_t ad_enabled:1; + uint64_t reserved_11_07:5; + uint64_t address:40; + uint64_t reserved_63_52:12; +}; int vcpu_enable_evmcs(struct kvm_vm *vm, int vcpu_id) { uint16_t evmcs_ver; @@ -174,15 +202,35 @@ bool load_vmcs(struct vmx_pages *vmx) */ static inline void init_vmcs_control_fields(struct vmx_pages *vmx) { + uint32_t sec_exec_ctl = 0; + vmwrite(VIRTUAL_PROCESSOR_ID, 0); vmwrite(POSTED_INTR_NV, 0); vmwrite(PIN_BASED_VM_EXEC_CONTROL, rdmsr(MSR_IA32_VMX_TRUE_PINBASED_CTLS)); - if (!vmwrite(SECONDARY_VM_EXEC_CONTROL, 0)) + + if (vmx->eptp_gpa) { + uint64_t ept_paddr; + struct eptPageTablePointer eptp = { + .memory_type = VMX_BASIC_MEM_TYPE_WB, + .page_walk_length = 3, /* + 1 */ + .ad_enabled = !!(rdmsr(MSR_IA32_VMX_EPT_VPID_CAP) & VMX_EPT_VPID_CAP_AD_BITS), + .address = vmx->eptp_gpa >> PAGE_SHIFT_4K, + }; + + memcpy(&ept_paddr, &eptp, sizeof(ept_paddr)); + vmwrite(EPT_POINTER, ept_paddr); + sec_exec_ctl |= SECONDARY_EXEC_ENABLE_EPT; + } + + if (!vmwrite(SECONDARY_VM_EXEC_CONTROL, sec_exec_ctl)) vmwrite(CPU_BASED_VM_EXEC_CONTROL, rdmsr(MSR_IA32_VMX_TRUE_PROCBASED_CTLS) | CPU_BASED_ACTIVATE_SECONDARY_CONTROLS); - else + else { vmwrite(CPU_BASED_VM_EXEC_CONTROL, rdmsr(MSR_IA32_VMX_TRUE_PROCBASED_CTLS)); + GUEST_ASSERT(!sec_exec_ctl); + } + vmwrite(EXCEPTION_BITMAP, 0); vmwrite(PAGE_FAULT_ERROR_CODE_MASK, 0); vmwrite(PAGE_FAULT_ERROR_CODE_MATCH, -1); /* Never match */ @@ -327,3 +375,152 @@ void prepare_vmcs(struct vmx_pages *vmx, void *guest_rip, void *guest_rsp) init_vmcs_host_state(); init_vmcs_guest_state(guest_rip, guest_rsp); } + +void nested_pg_map(struct vmx_pages *vmx, struct kvm_vm *vm, + uint64_t nested_paddr, uint64_t paddr, uint32_t eptp_memslot) +{ + uint16_t index[4]; + struct eptPageTableEntry *pml4e; + + TEST_ASSERT(vm->mode == VM_MODE_PXXV48_4K, "Attempt to use " + "unknown or unsupported guest mode, mode: 0x%x", vm->mode); + + TEST_ASSERT((nested_paddr % vm->page_size) == 0, + "Nested physical address not on page boundary,\n" + " nested_paddr: 0x%lx vm->page_size: 0x%x", + nested_paddr, vm->page_size); + TEST_ASSERT((nested_paddr >> vm->page_shift) <= vm->max_gfn, + "Physical address beyond beyond maximum supported,\n" + " nested_paddr: 0x%lx vm->max_gfn: 0x%lx vm->page_size: 0x%x", + paddr, vm->max_gfn, vm->page_size); + TEST_ASSERT((paddr % vm->page_size) == 0, + "Physical address not on page boundary,\n" + " paddr: 0x%lx vm->page_size: 0x%x", + paddr, vm->page_size); + TEST_ASSERT((paddr >> vm->page_shift) <= vm->max_gfn, + "Physical address beyond beyond maximum supported,\n" + " paddr: 0x%lx vm->max_gfn: 0x%lx vm->page_size: 0x%x", + paddr, vm->max_gfn, vm->page_size); + + index[0] = (nested_paddr >> 12) & 0x1ffu; + index[1] = (nested_paddr >> 21) & 0x1ffu; + index[2] = (nested_paddr >> 30) & 0x1ffu; + index[3] = (nested_paddr >> 39) & 0x1ffu; + + /* Allocate page directory pointer table if not present. */ + pml4e = vmx->eptp_hva; + if (!pml4e[index[3]].readable) { + pml4e[index[3]].address = vm_phy_page_alloc(vm, + KVM_EPT_PAGE_TABLE_MIN_PADDR, eptp_memslot) + >> vm->page_shift; + pml4e[index[3]].writable = true; + pml4e[index[3]].readable = true; + pml4e[index[3]].executable = true; + } + + /* Allocate page directory table if not present. */ + struct eptPageTableEntry *pdpe; + pdpe = addr_gpa2hva(vm, pml4e[index[3]].address * vm->page_size); + if (!pdpe[index[2]].readable) { + pdpe[index[2]].address = vm_phy_page_alloc(vm, + KVM_EPT_PAGE_TABLE_MIN_PADDR, eptp_memslot) + >> vm->page_shift; + pdpe[index[2]].writable = true; + pdpe[index[2]].readable = true; + pdpe[index[2]].executable = true; + } + + /* Allocate page table if not present. */ + struct eptPageTableEntry *pde; + pde = addr_gpa2hva(vm, pdpe[index[2]].address * vm->page_size); + if (!pde[index[1]].readable) { + pde[index[1]].address = vm_phy_page_alloc(vm, + KVM_EPT_PAGE_TABLE_MIN_PADDR, eptp_memslot) + >> vm->page_shift; + pde[index[1]].writable = true; + pde[index[1]].readable = true; + pde[index[1]].executable = true; + } + + /* Fill in page table entry. */ + struct eptPageTableEntry *pte; + pte = addr_gpa2hva(vm, pde[index[1]].address * vm->page_size); + pte[index[0]].address = paddr >> vm->page_shift; + pte[index[0]].writable = true; + pte[index[0]].readable = true; + pte[index[0]].executable = true; + + /* + * For now mark these as accessed and dirty because the only + * testcase we have needs that. Can be reconsidered later. + */ + pte[index[0]].accessed = true; + pte[index[0]].dirty = true; +} + +/* + * Map a range of EPT guest physical addresses to the VM's physical address + * + * Input Args: + * vm - Virtual Machine + * nested_paddr - Nested guest physical address to map + * paddr - VM Physical Address + * size - The size of the range to map + * eptp_memslot - Memory region slot for new virtual translation tables + * + * Output Args: None + * + * Return: None + * + * Within the VM given by vm, creates a nested guest translation for the + * page range starting at nested_paddr to the page range starting at paddr. + */ +void nested_map(struct vmx_pages *vmx, struct kvm_vm *vm, + uint64_t nested_paddr, uint64_t paddr, uint64_t size, + uint32_t eptp_memslot) +{ + size_t page_size = vm->page_size; + size_t npages = size / page_size; + + TEST_ASSERT(nested_paddr + size > nested_paddr, "Vaddr overflow"); + TEST_ASSERT(paddr + size > paddr, "Paddr overflow"); + + while (npages--) { + nested_pg_map(vmx, vm, nested_paddr, paddr, eptp_memslot); + nested_paddr += page_size; + paddr += page_size; + } +} + +/* Prepare an identity extended page table that maps all the + * physical pages in VM. + */ +void nested_map_memslot(struct vmx_pages *vmx, struct kvm_vm *vm, + uint32_t memslot, uint32_t eptp_memslot) +{ + sparsebit_idx_t i, last; + struct userspace_mem_region *region = + memslot2region(vm, memslot); + + i = (region->region.guest_phys_addr >> vm->page_shift) - 1; + last = i + (region->region.memory_size >> vm->page_shift); + for (;;) { + i = sparsebit_next_clear(region->unused_phy_pages, i); + if (i > last) + break; + + nested_map(vmx, vm, + (uint64_t)i << vm->page_shift, + (uint64_t)i << vm->page_shift, + 1 << vm->page_shift, + eptp_memslot); + } +} + +void prepare_eptp(struct vmx_pages *vmx, struct kvm_vm *vm, + uint32_t eptp_memslot) +{ + vmx->eptp = (void *)vm_vaddr_alloc(vm, getpagesize(), 0x10000, 0, 0); + vmx->eptp_hva = addr_gva2hva(vm, (uintptr_t)vmx->eptp); + vmx->eptp_gpa = addr_gva2gpa(vm, (uintptr_t)vmx->eptp); +} diff --git a/tools/testing/selftests/kvm/x86_64/vmx_dirty_log_test.c b/tools/testing/selftests/kvm/x86_64/vmx_dirty_log_test.c new file mode 100644 index 000000000000..0bca1cfe2c1e --- /dev/null +++ b/tools/testing/selftests/kvm/x86_64/vmx_dirty_log_test.c @@ -0,0 +1,156 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * KVM dirty page logging test + * + * Copyright (C) 2018, Red Hat, Inc. + */ + +#define _GNU_SOURCE /* for program_invocation_name */ + +#include +#include +#include +#include + +#include "test_util.h" +#include "kvm_util.h" +#include "processor.h" +#include "vmx.h" + +#define VCPU_ID 1 + +/* The memory slot index to track dirty pages */ +#define TEST_MEM_SLOT_INDEX 1 +#define TEST_MEM_SIZE 3 + +/* L1 guest test virtual memory offset */ +#define GUEST_TEST_MEM 0xc0000000 + +/* L2 guest test virtual memory offset */ +#define NESTED_TEST_MEM1 0xc0001000 +#define NESTED_TEST_MEM2 0xc0002000 + +static void l2_guest_code(void) +{ + *(volatile uint64_t *)NESTED_TEST_MEM1; + *(volatile uint64_t *)NESTED_TEST_MEM1 = 1; + GUEST_SYNC(true); + GUEST_SYNC(false); + + *(volatile uint64_t *)NESTED_TEST_MEM2 = 1; + GUEST_SYNC(true); + *(volatile uint64_t *)NESTED_TEST_MEM2 = 1; + GUEST_SYNC(true); + GUEST_SYNC(false); + + /* Exit to L1 and never come back. */ + vmcall(); +} + +void l1_guest_code(struct vmx_pages *vmx) +{ +#define L2_GUEST_STACK_SIZE 64 + unsigned long l2_guest_stack[L2_GUEST_STACK_SIZE]; + + GUEST_ASSERT(vmx->vmcs_gpa); + GUEST_ASSERT(prepare_for_vmx_operation(vmx)); + GUEST_ASSERT(load_vmcs(vmx)); + + prepare_vmcs(vmx, l2_guest_code, + &l2_guest_stack[L2_GUEST_STACK_SIZE]); + + GUEST_SYNC(false); + GUEST_ASSERT(!vmlaunch()); + GUEST_SYNC(false); + GUEST_ASSERT(vmreadz(VM_EXIT_REASON) == EXIT_REASON_VMCALL); + GUEST_DONE(); +} + +int main(int argc, char *argv[]) +{ + vm_vaddr_t vmx_pages_gva = 0; + struct vmx_pages *vmx; + unsigned long *bmap; + uint64_t *host_test_mem; + + struct kvm_vm *vm; + struct kvm_run *run; + struct ucall uc; + bool done = false; + + /* Create VM */ + vm = vm_create_default(VCPU_ID, 0, l1_guest_code); + vcpu_set_cpuid(vm, VCPU_ID, kvm_get_supported_cpuid()); + vmx = vcpu_alloc_vmx(vm, &vmx_pages_gva); + vcpu_args_set(vm, VCPU_ID, 1, vmx_pages_gva); + run = vcpu_state(vm, VCPU_ID); + + /* Add an extra memory slot for testing dirty logging */ + vm_userspace_mem_region_add(vm, VM_MEM_SRC_ANONYMOUS, + GUEST_TEST_MEM, + TEST_MEM_SLOT_INDEX, + TEST_MEM_SIZE, + KVM_MEM_LOG_DIRTY_PAGES); + + /* + * Add an identity map for GVA range [0xc0000000, 0xc0002000). This + * affects both L1 and L2. However... + */ + virt_map(vm, GUEST_TEST_MEM, GUEST_TEST_MEM, + TEST_MEM_SIZE * 4096, 0); + + /* + * ... pages in the L2 GPA range [0xc0001000, 0xc0003000) will map to + * 0xc0000000. + * + * Note that prepare_eptp should be called only L1's GPA map is done, + * meaning after the last call to virt_map. + */ + prepare_eptp(vmx, vm, 0); + nested_map_memslot(vmx, vm, 0, 0); + nested_map(vmx, vm, NESTED_TEST_MEM1, GUEST_TEST_MEM, 4096, 0); + nested_map(vmx, vm, NESTED_TEST_MEM2, GUEST_TEST_MEM, 4096, 0); + + bmap = bitmap_alloc(TEST_MEM_SIZE); + host_test_mem = addr_gpa2hva(vm, GUEST_TEST_MEM); + + while (!done) { + memset(host_test_mem, 0xaa, TEST_MEM_SIZE * 4096); + _vcpu_run(vm, VCPU_ID); + TEST_ASSERT(run->exit_reason == KVM_EXIT_IO, + "Unexpected exit reason: %u (%s),\n", + run->exit_reason, + exit_reason_str(run->exit_reason)); + + switch (get_ucall(vm, VCPU_ID, &uc)) { + case UCALL_ABORT: + TEST_ASSERT(false, "%s at %s:%d", (const char *)uc.args[0], + __FILE__, uc.args[1]); + /* NOT REACHED */ + case UCALL_SYNC: + /* + * The nested guest wrote at offset 0x1000 in the memslot, but the + * dirty bitmap must be filled in according to L1 GPA, not L2. + */ + kvm_vm_get_dirty_log(vm, TEST_MEM_SLOT_INDEX, bmap); + if (uc.args[1]) { + TEST_ASSERT(test_bit(0, bmap), "Page 0 incorrectly reported clean\n"); + TEST_ASSERT(host_test_mem[0] == 1, "Page 0 not written by guest\n"); + } else { + TEST_ASSERT(!test_bit(0, bmap), "Page 0 incorrectly reported dirty\n"); + TEST_ASSERT(host_test_mem[0] == 0xaaaaaaaaaaaaaaaaULL, "Page 0 written by guest\n"); + } + + TEST_ASSERT(!test_bit(1, bmap), "Page 1 incorrectly reported dirty\n"); + TEST_ASSERT(host_test_mem[4096 / 8] == 0xaaaaaaaaaaaaaaaaULL, "Page 1 written by guest\n"); + TEST_ASSERT(!test_bit(2, bmap), "Page 2 incorrectly reported dirty\n"); + TEST_ASSERT(host_test_mem[8192 / 8] == 0xaaaaaaaaaaaaaaaaULL, "Page 2 written by guest\n"); + break; + case UCALL_DONE: + done = true; + break; + default: + TEST_ASSERT(false, "Unknown ucall 0x%x.", uc.cmd); + } + } +} -- cgit v1.2.3 From c113236718e89561f309705f1a78126b3df93a21 Mon Sep 17 00:00:00 2001 From: Andi Shyti Date: Fri, 27 Sep 2019 12:08:49 +0100 Subject: drm/i915: Extract GT render sleep (rc6) management Continuing the theme of breaking intel_pm.c up in a reasonable chunk of powermanagement utilities, pull out the rc6 setup into its GT handler. Based on a patch by Chris Wilson. Signed-off-by: Andi Shyti Cc: Chris Wilson Reviewed-by: Chris Wilson Signed-off-by: Chris Wilson Link: https://patchwork.freedesktop.org/patch/msgid/20190919143840.20384-1-andi.shyti@intel.com Link: https://patchwork.freedesktop.org/patch/msgid/20190927110849.28734-1-chris@chris-wilson.co.uk --- drivers/gpu/drm/i915/Makefile | 1 + drivers/gpu/drm/i915/gem/i915_gem_pm.c | 2 +- drivers/gpu/drm/i915/gt/intel_engine_pm.c | 1 + drivers/gpu/drm/i915/gt/intel_gt.c | 5 +- drivers/gpu/drm/i915/gt/intel_gt_pm.c | 92 ++- drivers/gpu/drm/i915/gt/intel_gt_pm.h | 11 +- drivers/gpu/drm/i915/gt/intel_gt_types.h | 3 + drivers/gpu/drm/i915/gt/intel_rc6.c | 712 ++++++++++++++++++ drivers/gpu/drm/i915/gt/intel_rc6.h | 25 + drivers/gpu/drm/i915/gt/intel_rc6_types.h | 28 + drivers/gpu/drm/i915/gt/selftest_gt_pm.c | 50 ++ drivers/gpu/drm/i915/i915_debugfs.c | 11 +- drivers/gpu/drm/i915/i915_drv.h | 7 - drivers/gpu/drm/i915/i915_pmu.c | 9 +- drivers/gpu/drm/i915/i915_sysfs.c | 4 +- drivers/gpu/drm/i915/intel_pm.c | 794 +-------------------- drivers/gpu/drm/i915/intel_pm.h | 3 - .../gpu/drm/i915/selftests/i915_live_selftests.h | 1 + 18 files changed, 916 insertions(+), 843 deletions(-) create mode 100644 drivers/gpu/drm/i915/gt/intel_rc6.c create mode 100644 drivers/gpu/drm/i915/gt/intel_rc6.h create mode 100644 drivers/gpu/drm/i915/gt/intel_rc6_types.h create mode 100644 drivers/gpu/drm/i915/gt/selftest_gt_pm.c diff --git a/drivers/gpu/drm/i915/Makefile b/drivers/gpu/drm/i915/Makefile index 6313e7b4bd78..8d407ff7d59d 100644 --- a/drivers/gpu/drm/i915/Makefile +++ b/drivers/gpu/drm/i915/Makefile @@ -85,6 +85,7 @@ gt-y += \ gt/intel_gt_pm_irq.o \ gt/intel_hangcheck.o \ gt/intel_lrc.o \ + gt/intel_rc6.o \ gt/intel_renderstate.o \ gt/intel_reset.o \ gt/intel_ringbuffer.o \ diff --git a/drivers/gpu/drm/i915/gem/i915_gem_pm.c b/drivers/gpu/drm/i915/gem/i915_gem_pm.c index a11ad4d914ca..cca192ecff8b 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_pm.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_pm.c @@ -137,7 +137,6 @@ static bool switch_to_kernel_context_sync(struct intel_gt *gt) bool i915_gem_load_power_context(struct drm_i915_private *i915) { - intel_gt_pm_enable(&i915->gt); return switch_to_kernel_context_sync(&i915->gt); } @@ -188,6 +187,7 @@ void i915_gem_suspend(struct drm_i915_private *i915) i915_gem_drain_freed_objects(i915); intel_uc_suspend(&i915->gt.uc); + intel_gt_suspend(&i915->gt); } static struct drm_i915_gem_object *first_mm_object(struct list_head *list) diff --git a/drivers/gpu/drm/i915/gt/intel_engine_pm.c b/drivers/gpu/drm/i915/gt/intel_engine_pm.c index ce61c83d68e9..66fc49b76ea8 100644 --- a/drivers/gpu/drm/i915/gt/intel_engine_pm.c +++ b/drivers/gpu/drm/i915/gt/intel_engine_pm.c @@ -11,6 +11,7 @@ #include "intel_engine_pool.h" #include "intel_gt.h" #include "intel_gt_pm.h" +#include "intel_rc6.h" static int __engine_unpark(struct intel_wakeref *wf) { diff --git a/drivers/gpu/drm/i915/gt/intel_gt.c b/drivers/gpu/drm/i915/gt/intel_gt.c index eef9bdae8ebb..0e5909ee0657 100644 --- a/drivers/gpu/drm/i915/gt/intel_gt.c +++ b/drivers/gpu/drm/i915/gt/intel_gt.c @@ -7,6 +7,7 @@ #include "intel_gt.h" #include "intel_gt_pm.h" #include "intel_mocs.h" +#include "intel_rc6.h" #include "intel_uncore.h" #include "intel_pm.h" @@ -369,6 +370,8 @@ int intel_gt_init(struct intel_gt *gt) if (err) return err; + intel_gt_pm_init(gt); + return 0; } @@ -387,8 +390,8 @@ void intel_gt_driver_release(struct intel_gt *gt) { /* Paranoia: make sure we have disabled everything before we exit. */ intel_gt_pm_disable(gt); + intel_gt_pm_fini(gt); - intel_cleanup_gt_powersave(gt->i915); intel_gt_fini_scratch(gt); } diff --git a/drivers/gpu/drm/i915/gt/intel_gt_pm.c b/drivers/gpu/drm/i915/gt/intel_gt_pm.c index 2ccf8cacaa0a..42f175d9b98c 100644 --- a/drivers/gpu/drm/i915/gt/intel_gt_pm.c +++ b/drivers/gpu/drm/i915/gt/intel_gt_pm.c @@ -11,6 +11,7 @@ #include "intel_gt.h" #include "intel_gt_pm.h" #include "intel_pm.h" +#include "intel_rc6.h" #include "intel_wakeref.h" static void pm_notify(struct intel_gt *gt, int state) @@ -90,6 +91,16 @@ void intel_gt_pm_init_early(struct intel_gt *gt) BLOCKING_INIT_NOTIFIER_HEAD(>->pm_notifications); } +void intel_gt_pm_init(struct intel_gt *gt) +{ + /* + * Enabling power-management should be "self-healing". If we cannot + * enable a feature, simply leave it disabled with a notice to the + * user. + */ + intel_rc6_init(>->rc6); +} + static bool reset_engines(struct intel_gt *gt) { if (INTEL_INFO(gt->i915)->gpu_reset_clobbers_display) @@ -124,40 +135,14 @@ void intel_gt_sanitize(struct intel_gt *gt, bool force) __intel_engine_reset(engine, false); } -static bool is_mock_device(const struct intel_gt *gt) -{ - return I915_SELFTEST_ONLY(gt->awake == -1); -} - -void intel_gt_pm_enable(struct intel_gt *gt) +void intel_gt_pm_disable(struct intel_gt *gt) { - struct intel_engine_cs *engine; - enum intel_engine_id id; - - /* Powersaving is controlled by the host when inside a VM */ - if (intel_vgpu_active(gt->i915)) - return; - - if (is_mock_device(gt)) - return; - - intel_gt_pm_get(gt); - - for_each_engine(engine, gt->i915, id) { - intel_engine_pm_get(engine); - engine->serial++; /* force kernel context reload */ - intel_engine_pm_put(engine); - } - - intel_gt_pm_put(gt); + intel_sanitize_gt_powersave(gt->i915); } -void intel_gt_pm_disable(struct intel_gt *gt) +void intel_gt_pm_fini(struct intel_gt *gt) { - if (is_mock_device(gt)) - return; - - intel_sanitize_gt_powersave(gt->i915); + intel_rc6_fini(>->rc6); } int intel_gt_resume(struct intel_gt *gt) @@ -173,6 +158,9 @@ int intel_gt_resume(struct intel_gt *gt) * allowing us to fixup the user contexts on their first pin. */ intel_gt_pm_get(gt); + intel_uncore_forcewake_get(gt->uncore, FORCEWAKE_ALL); + intel_rc6_sanitize(>->rc6); + for_each_engine(engine, gt->i915, id) { struct intel_context *ce; @@ -197,11 +185,51 @@ int intel_gt_resume(struct intel_gt *gt) break; } } + + intel_rc6_enable(>->rc6); + intel_uncore_forcewake_put(gt->uncore, FORCEWAKE_ALL); intel_gt_pm_put(gt); return err; } +static void wait_for_idle(struct intel_gt *gt) +{ + mutex_lock(>->i915->drm.struct_mutex); /* XXX */ + do { + if (i915_gem_wait_for_idle(gt->i915, + I915_WAIT_LOCKED, + I915_GEM_IDLE_TIMEOUT) == -ETIME) { + /* XXX hide warning from gem_eio */ + if (i915_modparams.reset) { + dev_err(gt->i915->drm.dev, + "Failed to idle engines, declaring wedged!\n"); + GEM_TRACE_DUMP(); + } + + /* + * Forcibly cancel outstanding work and leave + * the gpu quiet. + */ + intel_gt_set_wedged(gt); + } + } while (i915_retire_requests(gt->i915)); + mutex_unlock(>->i915->drm.struct_mutex); + + intel_gt_pm_wait_for_idle(gt); +} + +void intel_gt_suspend(struct intel_gt *gt) +{ + intel_wakeref_t wakeref; + + /* We expect to be idle already; but also want to be independent */ + wait_for_idle(gt); + + with_intel_runtime_pm(>->i915->runtime_pm, wakeref) + intel_rc6_disable(>->rc6); +} + void intel_gt_runtime_suspend(struct intel_gt *gt) { intel_uc_runtime_suspend(>->uc); @@ -213,3 +241,7 @@ int intel_gt_runtime_resume(struct intel_gt *gt) return intel_uc_runtime_resume(>->uc); } + +#if IS_ENABLED(CONFIG_DRM_I915_SELFTEST) +#include "selftest_gt_pm.c" +#endif diff --git a/drivers/gpu/drm/i915/gt/intel_gt_pm.h b/drivers/gpu/drm/i915/gt/intel_gt_pm.h index d1f3e2e23937..ab794e853356 100644 --- a/drivers/gpu/drm/i915/gt/intel_gt_pm.h +++ b/drivers/gpu/drm/i915/gt/intel_gt_pm.h @@ -43,12 +43,21 @@ static inline int intel_gt_pm_wait_for_idle(struct intel_gt *gt) } void intel_gt_pm_init_early(struct intel_gt *gt); -void intel_gt_pm_enable(struct intel_gt *gt); +void intel_gt_pm_init(struct intel_gt *gt); void intel_gt_pm_disable(struct intel_gt *gt); +void intel_gt_pm_fini(struct intel_gt *gt); void intel_gt_sanitize(struct intel_gt *gt, bool force); + int intel_gt_resume(struct intel_gt *gt); +void intel_gt_suspend(struct intel_gt *gt); + void intel_gt_runtime_suspend(struct intel_gt *gt); int intel_gt_runtime_resume(struct intel_gt *gt); +static inline bool is_mock_gt(const struct intel_gt *gt) +{ + return I915_SELFTEST_ONLY(gt->awake == -1); +} + #endif /* INTEL_GT_PM_H */ diff --git a/drivers/gpu/drm/i915/gt/intel_gt_types.h b/drivers/gpu/drm/i915/gt/intel_gt_types.h index e64db4c13df6..7134f1319bbe 100644 --- a/drivers/gpu/drm/i915/gt/intel_gt_types.h +++ b/drivers/gpu/drm/i915/gt/intel_gt_types.h @@ -18,6 +18,7 @@ #include "i915_vma.h" #include "intel_engine_types.h" #include "intel_reset_types.h" +#include "intel_rc6_types.h" #include "intel_wakeref.h" struct drm_i915_private; @@ -67,6 +68,8 @@ struct intel_gt { */ intel_wakeref_t awake; + struct intel_rc6 rc6; + struct blocking_notifier_head pm_notifications; ktime_t last_init_time; diff --git a/drivers/gpu/drm/i915/gt/intel_rc6.c b/drivers/gpu/drm/i915/gt/intel_rc6.c new file mode 100644 index 000000000000..d6167dd592e9 --- /dev/null +++ b/drivers/gpu/drm/i915/gt/intel_rc6.c @@ -0,0 +1,712 @@ +/* + * SPDX-License-Identifier: MIT + * + * Copyright © 2019 Intel Corporation + */ + +#include + +#include "i915_drv.h" +#include "intel_gt.h" +#include "intel_gt_pm.h" +#include "intel_rc6.h" +#include "intel_sideband.h" + +/** + * DOC: RC6 + * + * RC6 is a special power stage which allows the GPU to enter an very + * low-voltage mode when idle, using down to 0V while at this stage. This + * stage is entered automatically when the GPU is idle when RC6 support is + * enabled, and as soon as new workload arises GPU wakes up automatically as + * well. + * + * There are different RC6 modes available in Intel GPU, which differentiate + * among each other with the latency required to enter and leave RC6 and + * voltage consumed by the GPU in different states. + * + * The combination of the following flags define which states GPU is allowed + * to enter, while RC6 is the normal RC6 state, RC6p is the deep RC6, and + * RC6pp is deepest RC6. Their support by hardware varies according to the + * GPU, BIOS, chipset and platform. RC6 is usually the safest one and the one + * which brings the most power savings; deeper states save more power, but + * require higher latency to switch to and wake up. + */ + +static struct intel_gt *rc6_to_gt(struct intel_rc6 *rc6) +{ + return container_of(rc6, struct intel_gt, rc6); +} + +static struct intel_uncore *rc6_to_uncore(struct intel_rc6 *rc) +{ + return rc6_to_gt(rc)->uncore; +} + +static struct drm_i915_private *rc6_to_i915(struct intel_rc6 *rc) +{ + return rc6_to_gt(rc)->i915; +} + +static inline void set(struct intel_uncore *uncore, i915_reg_t reg, u32 val) +{ + intel_uncore_write_fw(uncore, reg, val); +} + +static void gen11_rc6_enable(struct intel_rc6 *rc6) +{ + struct intel_uncore *uncore = rc6_to_uncore(rc6); + struct intel_engine_cs *engine; + enum intel_engine_id id; + + /* 2b: Program RC6 thresholds.*/ + set(uncore, GEN6_RC6_WAKE_RATE_LIMIT, 54 << 16 | 85); + set(uncore, GEN10_MEDIA_WAKE_RATE_LIMIT, 150); + + set(uncore, GEN6_RC_EVALUATION_INTERVAL, 125000); /* 12500 * 1280ns */ + set(uncore, GEN6_RC_IDLE_HYSTERSIS, 25); /* 25 * 1280ns */ + for_each_engine(engine, rc6_to_gt(rc6)->i915, id) + set(uncore, RING_MAX_IDLE(engine->mmio_base), 10); + + set(uncore, GUC_MAX_IDLE_COUNT, 0xA); + + set(uncore, GEN6_RC_SLEEP, 0); + + set(uncore, GEN6_RC6_THRESHOLD, 50000); /* 50/125ms per EI */ + + /* + * 2c: Program Coarse Power Gating Policies. + * + * Bspec's guidance is to use 25us (really 25 * 1280ns) here. What we + * use instead is a more conservative estimate for the maximum time + * it takes us to service a CS interrupt and submit a new ELSP - that + * is the time which the GPU is idle waiting for the CPU to select the + * next request to execute. If the idle hysteresis is less than that + * interrupt service latency, the hardware will automatically gate + * the power well and we will then incur the wake up cost on top of + * the service latency. A similar guide from plane_state is that we + * do not want the enable hysteresis to less than the wakeup latency. + * + * igt/gem_exec_nop/sequential provides a rough estimate for the + * service latency, and puts it around 10us for Broadwell (and other + * big core) and around 40us for Broxton (and other low power cores). + * [Note that for legacy ringbuffer submission, this is less than 1us!] + * However, the wakeup latency on Broxton is closer to 100us. To be + * conservative, we have to factor in a context switch on top (due + * to ksoftirqd). + */ + set(uncore, GEN9_MEDIA_PG_IDLE_HYSTERESIS, 250); + set(uncore, GEN9_RENDER_PG_IDLE_HYSTERESIS, 250); + + /* 3a: Enable RC6 */ + set(uncore, GEN6_RC_CONTROL, + GEN6_RC_CTL_HW_ENABLE | + GEN6_RC_CTL_RC6_ENABLE | + GEN6_RC_CTL_EI_MODE(1)); + + set(uncore, GEN9_PG_ENABLE, + GEN9_RENDER_PG_ENABLE | + GEN9_MEDIA_PG_ENABLE | + GEN11_MEDIA_SAMPLER_PG_ENABLE); +} + +static void gen9_rc6_enable(struct intel_rc6 *rc6) +{ + struct intel_uncore *uncore = rc6_to_uncore(rc6); + struct intel_engine_cs *engine; + enum intel_engine_id id; + u32 rc6_mode; + + /* 2b: Program RC6 thresholds.*/ + if (INTEL_GEN(rc6_to_i915(rc6)) >= 10) { + set(uncore, GEN6_RC6_WAKE_RATE_LIMIT, 54 << 16 | 85); + set(uncore, GEN10_MEDIA_WAKE_RATE_LIMIT, 150); + } else if (IS_SKYLAKE(rc6_to_i915(rc6))) { + /* + * WaRsDoubleRc6WrlWithCoarsePowerGating:skl Doubling WRL only + * when CPG is enabled + */ + set(uncore, GEN6_RC6_WAKE_RATE_LIMIT, 108 << 16); + } else { + set(uncore, GEN6_RC6_WAKE_RATE_LIMIT, 54 << 16); + } + + set(uncore, GEN6_RC_EVALUATION_INTERVAL, 125000); /* 12500 * 1280ns */ + set(uncore, GEN6_RC_IDLE_HYSTERSIS, 25); /* 25 * 1280ns */ + for_each_engine(engine, rc6_to_gt(rc6)->i915, id) + set(uncore, RING_MAX_IDLE(engine->mmio_base), 10); + + set(uncore, GUC_MAX_IDLE_COUNT, 0xA); + + set(uncore, GEN6_RC_SLEEP, 0); + + /* + * 2c: Program Coarse Power Gating Policies. + * + * Bspec's guidance is to use 25us (really 25 * 1280ns) here. What we + * use instead is a more conservative estimate for the maximum time + * it takes us to service a CS interrupt and submit a new ELSP - that + * is the time which the GPU is idle waiting for the CPU to select the + * next request to execute. If the idle hysteresis is less than that + * interrupt service latency, the hardware will automatically gate + * the power well and we will then incur the wake up cost on top of + * the service latency. A similar guide from plane_state is that we + * do not want the enable hysteresis to less than the wakeup latency. + * + * igt/gem_exec_nop/sequential provides a rough estimate for the + * service latency, and puts it around 10us for Broadwell (and other + * big core) and around 40us for Broxton (and other low power cores). + * [Note that for legacy ringbuffer submission, this is less than 1us!] + * However, the wakeup latency on Broxton is closer to 100us. To be + * conservative, we have to factor in a context switch on top (due + * to ksoftirqd). + */ + set(uncore, GEN9_MEDIA_PG_IDLE_HYSTERESIS, 250); + set(uncore, GEN9_RENDER_PG_IDLE_HYSTERESIS, 250); + + /* 3a: Enable RC6 */ + set(uncore, GEN6_RC6_THRESHOLD, 37500); /* 37.5/125ms per EI */ + + /* WaRsUseTimeoutMode:cnl (pre-prod) */ + if (IS_CNL_REVID(rc6_to_i915(rc6), CNL_REVID_A0, CNL_REVID_C0)) + rc6_mode = GEN7_RC_CTL_TO_MODE; + else + rc6_mode = GEN6_RC_CTL_EI_MODE(1); + + set(uncore, GEN6_RC_CONTROL, + GEN6_RC_CTL_HW_ENABLE | + GEN6_RC_CTL_RC6_ENABLE | + rc6_mode); + + set(uncore, GEN9_PG_ENABLE, + GEN9_RENDER_PG_ENABLE | GEN9_MEDIA_PG_ENABLE); +} + +static void gen8_rc6_enable(struct intel_rc6 *rc6) +{ + struct intel_uncore *uncore = rc6_to_uncore(rc6); + struct intel_engine_cs *engine; + enum intel_engine_id id; + + /* 2b: Program RC6 thresholds.*/ + set(uncore, GEN6_RC6_WAKE_RATE_LIMIT, 40 << 16); + set(uncore, GEN6_RC_EVALUATION_INTERVAL, 125000); /* 12500 * 1280ns */ + set(uncore, GEN6_RC_IDLE_HYSTERSIS, 25); /* 25 * 1280ns */ + for_each_engine(engine, rc6_to_gt(rc6)->i915, id) + set(uncore, RING_MAX_IDLE(engine->mmio_base), 10); + set(uncore, GEN6_RC_SLEEP, 0); + set(uncore, GEN6_RC6_THRESHOLD, 625); /* 800us/1.28 for TO */ + + /* 3: Enable RC6 */ + set(uncore, GEN6_RC_CONTROL, + GEN6_RC_CTL_HW_ENABLE | + GEN7_RC_CTL_TO_MODE | + GEN6_RC_CTL_RC6_ENABLE); +} + +static void gen6_rc6_enable(struct intel_rc6 *rc6) +{ + struct intel_uncore *uncore = rc6_to_uncore(rc6); + struct drm_i915_private *i915 = rc6_to_i915(rc6); + struct intel_engine_cs *engine; + enum intel_engine_id id; + u32 rc6vids, rc6_mask; + int ret; + + set(uncore, GEN6_RC1_WAKE_RATE_LIMIT, 1000 << 16); + set(uncore, GEN6_RC6_WAKE_RATE_LIMIT, 40 << 16 | 30); + set(uncore, GEN6_RC6pp_WAKE_RATE_LIMIT, 30); + set(uncore, GEN6_RC_EVALUATION_INTERVAL, 125000); + set(uncore, GEN6_RC_IDLE_HYSTERSIS, 25); + + for_each_engine(engine, i915, id) + set(uncore, RING_MAX_IDLE(engine->mmio_base), 10); + + set(uncore, GEN6_RC_SLEEP, 0); + set(uncore, GEN6_RC1e_THRESHOLD, 1000); + if (IS_IVYBRIDGE(i915)) + set(uncore, GEN6_RC6_THRESHOLD, 125000); + else + set(uncore, GEN6_RC6_THRESHOLD, 50000); + set(uncore, GEN6_RC6p_THRESHOLD, 150000); + set(uncore, GEN6_RC6pp_THRESHOLD, 64000); /* unused */ + + /* We don't use those on Haswell */ + rc6_mask = GEN6_RC_CTL_RC6_ENABLE; + if (HAS_RC6p(i915)) + rc6_mask |= GEN6_RC_CTL_RC6p_ENABLE; + if (HAS_RC6pp(i915)) + rc6_mask |= GEN6_RC_CTL_RC6pp_ENABLE; + set(uncore, GEN6_RC_CONTROL, + rc6_mask | + GEN6_RC_CTL_EI_MODE(1) | + GEN6_RC_CTL_HW_ENABLE); + + rc6vids = 0; + ret = sandybridge_pcode_read(i915, GEN6_PCODE_READ_RC6VIDS, + &rc6vids, NULL); + if (IS_GEN(i915, 6) && ret) { + DRM_DEBUG_DRIVER("Couldn't check for BIOS workaround\n"); + } else if (IS_GEN(i915, 6) && + (GEN6_DECODE_RC6_VID(rc6vids & 0xff) < 450)) { + DRM_DEBUG_DRIVER("You should update your BIOS. Correcting minimum rc6 voltage (%dmV->%dmV)\n", + GEN6_DECODE_RC6_VID(rc6vids & 0xff), 450); + rc6vids &= 0xffff00; + rc6vids |= GEN6_ENCODE_RC6_VID(450); + ret = sandybridge_pcode_write(i915, GEN6_PCODE_WRITE_RC6VIDS, rc6vids); + if (ret) + DRM_ERROR("Couldn't fix incorrect rc6 voltage\n"); + } +} + +/* Check that the pcbr address is not empty. */ +static int chv_rc6_init(struct intel_rc6 *rc6) +{ + struct intel_uncore *uncore = rc6_to_uncore(rc6); + resource_size_t pctx_paddr, paddr; + resource_size_t pctx_size = 32 * SZ_1K; + u32 pcbr; + + pcbr = intel_uncore_read(uncore, VLV_PCBR); + if ((pcbr >> VLV_PCBR_ADDR_SHIFT) == 0) { + DRM_DEBUG_DRIVER("BIOS didn't set up PCBR, fixing up\n"); + paddr = rc6_to_i915(rc6)->dsm.end + 1 - pctx_size; + GEM_BUG_ON(paddr > U32_MAX); + + pctx_paddr = (paddr & ~4095); + intel_uncore_write(uncore, VLV_PCBR, pctx_paddr); + } + + return 0; +} + +static int vlv_rc6_init(struct intel_rc6 *rc6) +{ + struct drm_i915_private *i915 = rc6_to_i915(rc6); + struct intel_uncore *uncore = rc6_to_uncore(rc6); + struct drm_i915_gem_object *pctx; + resource_size_t pctx_paddr; + resource_size_t pctx_size = 24 * SZ_1K; + u32 pcbr; + + pcbr = intel_uncore_read(uncore, VLV_PCBR); + if (pcbr) { + /* BIOS set it up already, grab the pre-alloc'd space */ + resource_size_t pcbr_offset; + + pcbr_offset = (pcbr & ~4095) - i915->dsm.start; + pctx = i915_gem_object_create_stolen_for_preallocated(i915, + pcbr_offset, + I915_GTT_OFFSET_NONE, + pctx_size); + if (!pctx) + return -ENOMEM; + + goto out; + } + + DRM_DEBUG_DRIVER("BIOS didn't set up PCBR, fixing up\n"); + + /* + * From the Gunit register HAS: + * The Gfx driver is expected to program this register and ensure + * proper allocation within Gfx stolen memory. For example, this + * register should be programmed such than the PCBR range does not + * overlap with other ranges, such as the frame buffer, protected + * memory, or any other relevant ranges. + */ + pctx = i915_gem_object_create_stolen(i915, pctx_size); + if (!pctx) { + DRM_DEBUG("not enough stolen space for PCTX, disabling\n"); + return -ENOMEM; + } + + GEM_BUG_ON(range_overflows_t(u64, + i915->dsm.start, + pctx->stolen->start, + U32_MAX)); + pctx_paddr = i915->dsm.start + pctx->stolen->start; + intel_uncore_write(uncore, VLV_PCBR, pctx_paddr); + +out: + rc6->pctx = pctx; + return 0; +} + +static void chv_rc6_enable(struct intel_rc6 *rc6) +{ + struct intel_uncore *uncore = rc6_to_uncore(rc6); + struct intel_engine_cs *engine; + enum intel_engine_id id; + + /* 2a: Program RC6 thresholds.*/ + set(uncore, GEN6_RC6_WAKE_RATE_LIMIT, 40 << 16); + set(uncore, GEN6_RC_EVALUATION_INTERVAL, 125000); /* 12500 * 1280ns */ + set(uncore, GEN6_RC_IDLE_HYSTERSIS, 25); /* 25 * 1280ns */ + + for_each_engine(engine, rc6_to_gt(rc6)->i915, id) + set(uncore, RING_MAX_IDLE(engine->mmio_base), 10); + set(uncore, GEN6_RC_SLEEP, 0); + + /* TO threshold set to 500 us (0x186 * 1.28 us) */ + set(uncore, GEN6_RC6_THRESHOLD, 0x186); + + /* Allows RC6 residency counter to work */ + set(uncore, VLV_COUNTER_CONTROL, + _MASKED_BIT_ENABLE(VLV_COUNT_RANGE_HIGH | + VLV_MEDIA_RC6_COUNT_EN | + VLV_RENDER_RC6_COUNT_EN)); + + /* 3: Enable RC6 */ + set(uncore, GEN6_RC_CONTROL, GEN7_RC_CTL_TO_MODE); +} + +static void vlv_rc6_enable(struct intel_rc6 *rc6) +{ + struct intel_uncore *uncore = rc6_to_uncore(rc6); + struct intel_engine_cs *engine; + enum intel_engine_id id; + + set(uncore, GEN6_RC6_WAKE_RATE_LIMIT, 0x00280000); + set(uncore, GEN6_RC_EVALUATION_INTERVAL, 125000); + set(uncore, GEN6_RC_IDLE_HYSTERSIS, 25); + + for_each_engine(engine, rc6_to_gt(rc6)->i915, id) + set(uncore, RING_MAX_IDLE(engine->mmio_base), 10); + + set(uncore, GEN6_RC6_THRESHOLD, 0x557); + + /* Allows RC6 residency counter to work */ + set(uncore, VLV_COUNTER_CONTROL, + _MASKED_BIT_ENABLE(VLV_COUNT_RANGE_HIGH | + VLV_MEDIA_RC0_COUNT_EN | + VLV_RENDER_RC0_COUNT_EN | + VLV_MEDIA_RC6_COUNT_EN | + VLV_RENDER_RC6_COUNT_EN)); + + set(uncore, GEN6_RC_CONTROL, + GEN7_RC_CTL_TO_MODE | VLV_RC_CTL_CTX_RST_PARALLEL); +} + +static bool bxt_check_bios_rc6_setup(struct intel_rc6 *rc6) +{ + struct intel_uncore *uncore = rc6_to_uncore(rc6); + struct drm_i915_private *i915 = rc6_to_i915(rc6); + u32 rc6_ctx_base, rc_ctl, rc_sw_target; + bool enable_rc6 = true; + + rc_ctl = intel_uncore_read(uncore, GEN6_RC_CONTROL); + rc_sw_target = intel_uncore_read(uncore, GEN6_RC_STATE); + rc_sw_target &= RC_SW_TARGET_STATE_MASK; + rc_sw_target >>= RC_SW_TARGET_STATE_SHIFT; + DRM_DEBUG_DRIVER("BIOS enabled RC states: " + "HW_CTRL %s HW_RC6 %s SW_TARGET_STATE %x\n", + onoff(rc_ctl & GEN6_RC_CTL_HW_ENABLE), + onoff(rc_ctl & GEN6_RC_CTL_RC6_ENABLE), + rc_sw_target); + + if (!(intel_uncore_read(uncore, RC6_LOCATION) & RC6_CTX_IN_DRAM)) { + DRM_DEBUG_DRIVER("RC6 Base location not set properly.\n"); + enable_rc6 = false; + } + + /* + * The exact context size is not known for BXT, so assume a page size + * for this check. + */ + rc6_ctx_base = + intel_uncore_read(uncore, RC6_CTX_BASE) & RC6_CTX_BASE_MASK; + if (!(rc6_ctx_base >= i915->dsm_reserved.start && + rc6_ctx_base + PAGE_SIZE < i915->dsm_reserved.end)) { + DRM_DEBUG_DRIVER("RC6 Base address not as expected.\n"); + enable_rc6 = false; + } + + if (!((intel_uncore_read(uncore, PWRCTX_MAXCNT_RCSUNIT) & IDLE_TIME_MASK) > 1 && + (intel_uncore_read(uncore, PWRCTX_MAXCNT_VCSUNIT0) & IDLE_TIME_MASK) > 1 && + (intel_uncore_read(uncore, PWRCTX_MAXCNT_BCSUNIT) & IDLE_TIME_MASK) > 1 && + (intel_uncore_read(uncore, PWRCTX_MAXCNT_VECSUNIT) & IDLE_TIME_MASK) > 1)) { + DRM_DEBUG_DRIVER("Engine Idle wait time not set properly.\n"); + enable_rc6 = false; + } + + if (!intel_uncore_read(uncore, GEN8_PUSHBUS_CONTROL) || + !intel_uncore_read(uncore, GEN8_PUSHBUS_ENABLE) || + !intel_uncore_read(uncore, GEN8_PUSHBUS_SHIFT)) { + DRM_DEBUG_DRIVER("Pushbus not setup properly.\n"); + enable_rc6 = false; + } + + if (!intel_uncore_read(uncore, GEN6_GFXPAUSE)) { + DRM_DEBUG_DRIVER("GFX pause not setup properly.\n"); + enable_rc6 = false; + } + + if (!intel_uncore_read(uncore, GEN8_MISC_CTRL0)) { + DRM_DEBUG_DRIVER("GPM control not setup properly.\n"); + enable_rc6 = false; + } + + return enable_rc6; +} + +static bool rc6_supported(struct intel_rc6 *rc6) +{ + struct drm_i915_private *i915 = rc6_to_i915(rc6); + + if (!HAS_RC6(i915)) + return false; + + if (intel_vgpu_active(i915)) + return false; + + if (is_mock_gt(rc6_to_gt(rc6))) + return false; + + if (IS_GEN9_LP(i915) && !bxt_check_bios_rc6_setup(rc6)) { + dev_notice(i915->drm.dev, + "RC6 and powersaving disabled by BIOS\n"); + return false; + } + + return true; +} + +static void rpm_get(struct intel_rc6 *rc6) +{ + GEM_BUG_ON(rc6->wakeref); + pm_runtime_get_sync(&rc6_to_i915(rc6)->drm.pdev->dev); + rc6->wakeref = true; +} + +static void rpm_put(struct intel_rc6 *rc6) +{ + GEM_BUG_ON(!rc6->wakeref); + pm_runtime_put(&rc6_to_i915(rc6)->drm.pdev->dev); + rc6->wakeref = false; +} + +static void __intel_rc6_disable(struct intel_rc6 *rc6) +{ + struct drm_i915_private *i915 = rc6_to_i915(rc6); + struct intel_uncore *uncore = rc6_to_uncore(rc6); + + intel_uncore_forcewake_get(uncore, FORCEWAKE_ALL); + if (INTEL_GEN(i915) >= 9) + set(uncore, GEN9_PG_ENABLE, 0); + set(uncore, GEN6_RC_CONTROL, 0); + set(uncore, GEN6_RC_STATE, 0); + intel_uncore_forcewake_put(uncore, FORCEWAKE_ALL); +} + +void intel_rc6_init(struct intel_rc6 *rc6) +{ + struct drm_i915_private *i915 = rc6_to_i915(rc6); + int err; + + /* Disable runtime-pm until we can save the GPU state with rc6 pctx */ + rpm_get(rc6); + + if (!rc6_supported(rc6)) + return; + + if (IS_CHERRYVIEW(i915)) + err = chv_rc6_init(rc6); + else if (IS_VALLEYVIEW(i915)) + err = vlv_rc6_init(rc6); + else + err = 0; + + /* Sanitize rc6, ensure it is disabled before we are ready. */ + __intel_rc6_disable(rc6); + + rc6->supported = err == 0; +} + +void intel_rc6_sanitize(struct intel_rc6 *rc6) +{ + if (rc6->supported) + __intel_rc6_disable(rc6); +} + +void intel_rc6_enable(struct intel_rc6 *rc6) +{ + struct drm_i915_private *i915 = rc6_to_i915(rc6); + struct intel_uncore *uncore = rc6_to_uncore(rc6); + + if (!rc6->supported) + return; + + GEM_BUG_ON(rc6->enabled); + + intel_uncore_forcewake_get(uncore, FORCEWAKE_ALL); + + if (IS_CHERRYVIEW(i915)) + chv_rc6_enable(rc6); + else if (IS_VALLEYVIEW(i915)) + vlv_rc6_enable(rc6); + else if (INTEL_GEN(i915) >= 11) + gen11_rc6_enable(rc6); + else if (INTEL_GEN(i915) >= 9) + gen9_rc6_enable(rc6); + else if (IS_BROADWELL(i915)) + gen8_rc6_enable(rc6); + else if (INTEL_GEN(i915) >= 6) + gen6_rc6_enable(rc6); + + intel_uncore_forcewake_put(uncore, FORCEWAKE_ALL); + + /* rc6 is ready, runtime-pm is go! */ + rpm_put(rc6); + rc6->enabled = true; +} + +void intel_rc6_disable(struct intel_rc6 *rc6) +{ + if (!rc6->enabled) + return; + + rpm_get(rc6); + rc6->enabled = false; + + __intel_rc6_disable(rc6); +} + +void intel_rc6_fini(struct intel_rc6 *rc6) +{ + struct drm_i915_gem_object *pctx; + + intel_rc6_disable(rc6); + + pctx = fetch_and_zero(&rc6->pctx); + if (pctx) + i915_gem_object_put(pctx); + + if (rc6->wakeref) + rpm_put(rc6); +} + +static u64 vlv_residency_raw(struct intel_uncore *uncore, const i915_reg_t reg) +{ + u32 lower, upper, tmp; + int loop = 2; + + /* + * The register accessed do not need forcewake. We borrow + * uncore lock to prevent concurrent access to range reg. + */ + lockdep_assert_held(&uncore->lock); + + /* + * vlv and chv residency counters are 40 bits in width. + * With a control bit, we can choose between upper or lower + * 32bit window into this counter. + * + * Although we always use the counter in high-range mode elsewhere, + * userspace may attempt to read the value before rc6 is initialised, + * before we have set the default VLV_COUNTER_CONTROL value. So always + * set the high bit to be safe. + */ + set(uncore, VLV_COUNTER_CONTROL, + _MASKED_BIT_ENABLE(VLV_COUNT_RANGE_HIGH)); + upper = intel_uncore_read_fw(uncore, reg); + do { + tmp = upper; + + set(uncore, VLV_COUNTER_CONTROL, + _MASKED_BIT_DISABLE(VLV_COUNT_RANGE_HIGH)); + lower = intel_uncore_read_fw(uncore, reg); + + set(uncore, VLV_COUNTER_CONTROL, + _MASKED_BIT_ENABLE(VLV_COUNT_RANGE_HIGH)); + upper = intel_uncore_read_fw(uncore, reg); + } while (upper != tmp && --loop); + + /* + * Everywhere else we always use VLV_COUNTER_CONTROL with the + * VLV_COUNT_RANGE_HIGH bit set - so it is safe to leave it set + * now. + */ + + return lower | (u64)upper << 8; +} + +u64 intel_rc6_residency_ns(struct intel_rc6 *rc6, const i915_reg_t reg) +{ + struct drm_i915_private *i915 = rc6_to_i915(rc6); + struct intel_uncore *uncore = rc6_to_uncore(rc6); + u64 time_hw, prev_hw, overflow_hw; + unsigned int fw_domains; + unsigned long flags; + unsigned int i; + u32 mul, div; + + if (!rc6->supported) + return 0; + + /* + * Store previous hw counter values for counter wrap-around handling. + * + * There are only four interesting registers and they live next to each + * other so we can use the relative address, compared to the smallest + * one as the index into driver storage. + */ + i = (i915_mmio_reg_offset(reg) - + i915_mmio_reg_offset(GEN6_GT_GFX_RC6_LOCKED)) / sizeof(u32); + if (WARN_ON_ONCE(i >= ARRAY_SIZE(rc6->cur_residency))) + return 0; + + fw_domains = intel_uncore_forcewake_for_reg(uncore, reg, FW_REG_READ); + + spin_lock_irqsave(&uncore->lock, flags); + intel_uncore_forcewake_get__locked(uncore, fw_domains); + + /* On VLV and CHV, residency time is in CZ units rather than 1.28us */ + if (IS_VALLEYVIEW(i915) || IS_CHERRYVIEW(i915)) { + mul = 1000000; + div = i915->czclk_freq; + overflow_hw = BIT_ULL(40); + time_hw = vlv_residency_raw(uncore, reg); + } else { + /* 833.33ns units on Gen9LP, 1.28us elsewhere. */ + if (IS_GEN9_LP(i915)) { + mul = 10000; + div = 12; + } else { + mul = 1280; + div = 1; + } + + overflow_hw = BIT_ULL(32); + time_hw = intel_uncore_read_fw(uncore, reg); + } + + /* + * Counter wrap handling. + * + * But relying on a sufficient frequency of queries otherwise counters + * can still wrap. + */ + prev_hw = rc6->prev_hw_residency[i]; + rc6->prev_hw_residency[i] = time_hw; + + /* RC6 delta from last sample. */ + if (time_hw >= prev_hw) + time_hw -= prev_hw; + else + time_hw += overflow_hw - prev_hw; + + /* Add delta to RC6 extended raw driver copy. */ + time_hw += rc6->cur_residency[i]; + rc6->cur_residency[i] = time_hw; + + intel_uncore_forcewake_put__locked(uncore, fw_domains); + spin_unlock_irqrestore(&uncore->lock, flags); + + return mul_u64_u32_div(time_hw, mul, div); +} + +u64 intel_rc6_residency_us(struct intel_rc6 *rc6, i915_reg_t reg) +{ + return DIV_ROUND_UP_ULL(intel_rc6_residency_ns(rc6, reg), 1000); +} diff --git a/drivers/gpu/drm/i915/gt/intel_rc6.h b/drivers/gpu/drm/i915/gt/intel_rc6.h new file mode 100644 index 000000000000..5e6711f36457 --- /dev/null +++ b/drivers/gpu/drm/i915/gt/intel_rc6.h @@ -0,0 +1,25 @@ +/* + * SPDX-License-Identifier: MIT + * + * Copyright © 2019 Intel Corporation + */ + +#ifndef INTEL_RC6_H +#define INTEL_RC6_H + +#include "i915_reg.h" + +struct intel_engine_cs; +struct intel_rc6; + +void intel_rc6_init(struct intel_rc6 *rc6); +void intel_rc6_fini(struct intel_rc6 *rc6); + +void intel_rc6_sanitize(struct intel_rc6 *rc6); +void intel_rc6_enable(struct intel_rc6 *rc6); +void intel_rc6_disable(struct intel_rc6 *rc6); + +u64 intel_rc6_residency_ns(struct intel_rc6 *rc6, i915_reg_t reg); +u64 intel_rc6_residency_us(struct intel_rc6 *rc6, i915_reg_t reg); + +#endif /* INTEL_RC6_H */ diff --git a/drivers/gpu/drm/i915/gt/intel_rc6_types.h b/drivers/gpu/drm/i915/gt/intel_rc6_types.h new file mode 100644 index 000000000000..214f354d6ae4 --- /dev/null +++ b/drivers/gpu/drm/i915/gt/intel_rc6_types.h @@ -0,0 +1,28 @@ +/* + * SPDX-License-Identifier: MIT + * + * Copyright © 2019 Intel Corporation + */ + +#ifndef INTEL_RC6_TYPES_H +#define INTEL_RC6_TYPES_H + +#include +#include + +#include "intel_engine_types.h" + +struct drm_i915_gem_object; + +struct intel_rc6 { + u64 prev_hw_residency[4]; + u64 cur_residency[4]; + + struct drm_i915_gem_object *pctx; + + bool supported : 1; + bool enabled : 1; + bool wakeref : 1; +}; + +#endif /* INTEL_RC6_TYPES_H */ diff --git a/drivers/gpu/drm/i915/gt/selftest_gt_pm.c b/drivers/gpu/drm/i915/gt/selftest_gt_pm.c new file mode 100644 index 000000000000..87985bd46423 --- /dev/null +++ b/drivers/gpu/drm/i915/gt/selftest_gt_pm.c @@ -0,0 +1,50 @@ + +/* + * SPDX-License-Identifier: MIT + * + * Copyright © 2019 Intel Corporation + */ + +static int live_gt_resume(void *arg) +{ + struct intel_gt *gt = arg; + IGT_TIMEOUT(end_time); + int err; + + /* Do several suspend/resume cycles to check we don't explode! */ + do { + intel_gt_suspend(gt); + + if (gt->rc6.enabled) { + pr_err("rc6 still enabled after suspend!\n"); + intel_gt_set_wedged_on_init(gt); + err = -EINVAL; + break; + } + + err = intel_gt_resume(gt); + if (err) + break; + + if (gt->rc6.supported && !gt->rc6.enabled) { + pr_err("rc6 not enabled upon resume!\n"); + intel_gt_set_wedged_on_init(gt); + err = -EINVAL; + break; + } + } while (!__igt_timeout(end_time, NULL)); + + return err; +} + +int intel_gt_pm_live_selftests(struct drm_i915_private *i915) +{ + static const struct i915_subtest tests[] = { + SUBTEST(live_gt_resume), + }; + + if (intel_gt_is_wedged(&i915->gt)) + return 0; + + return intel_gt_live_subtests(tests, &i915->gt); +} diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c index b5b449a88cf1..fec9fb7cc384 100644 --- a/drivers/gpu/drm/i915/i915_debugfs.c +++ b/drivers/gpu/drm/i915/i915_debugfs.c @@ -42,6 +42,7 @@ #include "gem/i915_gem_context.h" #include "gt/intel_gt_pm.h" #include "gt/intel_reset.h" +#include "gt/intel_rc6.h" #include "gt/uc/intel_guc_submission.h" #include "i915_debugfs.h" @@ -1168,11 +1169,13 @@ static void print_rc6_res(struct seq_file *m, const char *title, const i915_reg_t reg) { - struct drm_i915_private *dev_priv = node_to_i915(m->private); + struct drm_i915_private *i915 = node_to_i915(m->private); + intel_wakeref_t wakeref; - seq_printf(m, "%s %u (%llu us)\n", - title, I915_READ(reg), - intel_rc6_residency_us(dev_priv, reg)); + with_intel_runtime_pm(&i915->runtime_pm, wakeref) + seq_printf(m, "%s %u (%llu us)\n", title, + intel_uncore_read(&i915->uncore, reg), + intel_rc6_residency_us(&i915->gt.rc6, reg)); } static int vlv_drpc_info(struct seq_file *m) diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index b3c7dbc1832a..0e4e77373ee5 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h @@ -599,19 +599,12 @@ struct intel_rps { struct intel_rps_ei ei; }; -struct intel_rc6 { - bool enabled; - u64 prev_hw_residency[4]; - u64 cur_residency[4]; -}; - struct intel_llc_pstate { bool enabled; }; struct intel_gen6_power_mgmt { struct intel_rps rps; - struct intel_rc6 rc6; struct intel_llc_pstate llc_pstate; }; diff --git a/drivers/gpu/drm/i915/i915_pmu.c b/drivers/gpu/drm/i915/i915_pmu.c index 3310353890fb..d0508719492e 100644 --- a/drivers/gpu/drm/i915/i915_pmu.c +++ b/drivers/gpu/drm/i915/i915_pmu.c @@ -11,6 +11,7 @@ #include "gt/intel_engine_pm.h" #include "gt/intel_engine_user.h" #include "gt/intel_gt_pm.h" +#include "gt/intel_rc6.h" #include "i915_drv.h" #include "i915_pmu.h" @@ -116,21 +117,21 @@ static bool pmu_needs_timer(struct i915_pmu *pmu, bool gpu_active) return enable; } -static u64 __get_rc6(const struct intel_gt *gt) +static u64 __get_rc6(struct intel_gt *gt) { struct drm_i915_private *i915 = gt->i915; u64 val; - val = intel_rc6_residency_ns(i915, + val = intel_rc6_residency_ns(>->rc6, IS_VALLEYVIEW(i915) ? VLV_GT_RENDER_RC6 : GEN6_GT_GFX_RC6); if (HAS_RC6p(i915)) - val += intel_rc6_residency_ns(i915, GEN6_GT_GFX_RC6p); + val += intel_rc6_residency_ns(>->rc6, GEN6_GT_GFX_RC6p); if (HAS_RC6pp(i915)) - val += intel_rc6_residency_ns(i915, GEN6_GT_GFX_RC6pp); + val += intel_rc6_residency_ns(>->rc6, GEN6_GT_GFX_RC6pp); return val; } diff --git a/drivers/gpu/drm/i915/i915_sysfs.c b/drivers/gpu/drm/i915/i915_sysfs.c index d8a3b180c084..034b8abc5062 100644 --- a/drivers/gpu/drm/i915/i915_sysfs.c +++ b/drivers/gpu/drm/i915/i915_sysfs.c @@ -30,6 +30,8 @@ #include #include +#include "gt/intel_rc6.h" + #include "i915_drv.h" #include "i915_sysfs.h" #include "intel_pm.h" @@ -49,7 +51,7 @@ static u32 calc_residency(struct drm_i915_private *dev_priv, u64 res = 0; with_intel_runtime_pm(&dev_priv->runtime_pm, wakeref) - res = intel_rc6_residency_us(dev_priv, reg); + res = intel_rc6_residency_us(&dev_priv->gt.rc6, reg); return DIV_ROUND_CLOSEST_ULL(res, 1000); } diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c index 6bed2ed14574..bfcf03ab5245 100644 --- a/drivers/gpu/drm/i915/intel_pm.c +++ b/drivers/gpu/drm/i915/intel_pm.c @@ -45,26 +45,6 @@ #include "intel_sideband.h" #include "../../../platform/x86/intel_ips.h" -/** - * DOC: RC6 - * - * RC6 is a special power stage which allows the GPU to enter an very - * low-voltage mode when idle, using down to 0V while at this stage. This - * stage is entered automatically when the GPU is idle when RC6 support is - * enabled, and as soon as new workload arises GPU wakes up automatically as well. - * - * There are different RC6 modes available in Intel GPU, which differentiate - * among each other with the latency required to enter and leave RC6 and - * voltage consumed by the GPU in different states. - * - * The combination of the following flags define which states GPU is allowed - * to enter, while RC6 is the normal RC6 state, RC6p is the deep RC6, and - * RC6pp is deepest RC6. Their support by hardware varies according to the - * GPU, BIOS, chipset and platform. RC6 is usually the safest one and the one - * which brings the most power savings; deeper states save more power, but - * require higher latency to switch to and wake up. - */ - static void gen9_init_clock_gating(struct drm_i915_private *dev_priv) { if (HAS_LLC(dev_priv)) { @@ -6918,142 +6898,27 @@ int intel_set_rps(struct drm_i915_private *dev_priv, u8 val) return err; } -static void gen9_disable_rc6(struct drm_i915_private *dev_priv) -{ - I915_WRITE(GEN6_RC_CONTROL, 0); - I915_WRITE(GEN9_PG_ENABLE, 0); -} - static void gen9_disable_rps(struct drm_i915_private *dev_priv) { I915_WRITE(GEN6_RP_CONTROL, 0); } -static void gen6_disable_rc6(struct drm_i915_private *dev_priv) -{ - I915_WRITE(GEN6_RC_CONTROL, 0); -} - static void gen6_disable_rps(struct drm_i915_private *dev_priv) { I915_WRITE(GEN6_RPNSWREQ, 1 << 31); I915_WRITE(GEN6_RP_CONTROL, 0); } -static void cherryview_disable_rc6(struct drm_i915_private *dev_priv) -{ - I915_WRITE(GEN6_RC_CONTROL, 0); -} - static void cherryview_disable_rps(struct drm_i915_private *dev_priv) { I915_WRITE(GEN6_RP_CONTROL, 0); } -static void valleyview_disable_rc6(struct drm_i915_private *dev_priv) -{ - /* We're doing forcewake before Disabling RC6, - * This what the BIOS expects when going into suspend */ - intel_uncore_forcewake_get(&dev_priv->uncore, FORCEWAKE_ALL); - - I915_WRITE(GEN6_RC_CONTROL, 0); - - intel_uncore_forcewake_put(&dev_priv->uncore, FORCEWAKE_ALL); -} - static void valleyview_disable_rps(struct drm_i915_private *dev_priv) { I915_WRITE(GEN6_RP_CONTROL, 0); } -static bool bxt_check_bios_rc6_setup(struct drm_i915_private *dev_priv) -{ - bool enable_rc6 = true; - unsigned long rc6_ctx_base; - u32 rc_ctl; - int rc_sw_target; - - rc_ctl = I915_READ(GEN6_RC_CONTROL); - rc_sw_target = (I915_READ(GEN6_RC_STATE) & RC_SW_TARGET_STATE_MASK) >> - RC_SW_TARGET_STATE_SHIFT; - DRM_DEBUG_DRIVER("BIOS enabled RC states: " - "HW_CTRL %s HW_RC6 %s SW_TARGET_STATE %x\n", - onoff(rc_ctl & GEN6_RC_CTL_HW_ENABLE), - onoff(rc_ctl & GEN6_RC_CTL_RC6_ENABLE), - rc_sw_target); - - if (!(I915_READ(RC6_LOCATION) & RC6_CTX_IN_DRAM)) { - DRM_DEBUG_DRIVER("RC6 Base location not set properly.\n"); - enable_rc6 = false; - } - - /* - * The exact context size is not known for BXT, so assume a page size - * for this check. - */ - rc6_ctx_base = I915_READ(RC6_CTX_BASE) & RC6_CTX_BASE_MASK; - if (!((rc6_ctx_base >= dev_priv->dsm_reserved.start) && - (rc6_ctx_base + PAGE_SIZE < dev_priv->dsm_reserved.end))) { - DRM_DEBUG_DRIVER("RC6 Base address not as expected.\n"); - enable_rc6 = false; - } - - if (!(((I915_READ(PWRCTX_MAXCNT_RCSUNIT) & IDLE_TIME_MASK) > 1) && - ((I915_READ(PWRCTX_MAXCNT_VCSUNIT0) & IDLE_TIME_MASK) > 1) && - ((I915_READ(PWRCTX_MAXCNT_BCSUNIT) & IDLE_TIME_MASK) > 1) && - ((I915_READ(PWRCTX_MAXCNT_VECSUNIT) & IDLE_TIME_MASK) > 1))) { - DRM_DEBUG_DRIVER("Engine Idle wait time not set properly.\n"); - enable_rc6 = false; - } - - if (!I915_READ(GEN8_PUSHBUS_CONTROL) || - !I915_READ(GEN8_PUSHBUS_ENABLE) || - !I915_READ(GEN8_PUSHBUS_SHIFT)) { - DRM_DEBUG_DRIVER("Pushbus not setup properly.\n"); - enable_rc6 = false; - } - - if (!I915_READ(GEN6_GFXPAUSE)) { - DRM_DEBUG_DRIVER("GFX pause not setup properly.\n"); - enable_rc6 = false; - } - - if (!I915_READ(GEN8_MISC_CTRL0)) { - DRM_DEBUG_DRIVER("GPM control not setup properly.\n"); - enable_rc6 = false; - } - - return enable_rc6; -} - -static bool sanitize_rc6(struct drm_i915_private *i915) -{ - struct intel_device_info *info = mkwrite_device_info(i915); - - /* Powersaving is controlled by the host when inside a VM */ - if (intel_vgpu_active(i915)) { - info->has_rc6 = 0; - info->has_rps = false; - } - - if (info->has_rc6 && - IS_GEN9_LP(i915) && !bxt_check_bios_rc6_setup(i915)) { - DRM_INFO("RC6 disabled by BIOS\n"); - info->has_rc6 = 0; - } - - /* - * We assume that we do not have any deep rc6 levels if we don't have - * have the previous rc6 level supported, i.e. we use HAS_RC6() - * as the initial coarse check for rc6 in general, moving on to - * progressively finer/deeper levels. - */ - if (!info->has_rc6 && info->has_rc6p) - info->has_rc6p = 0; - - return info->has_rc6; -} - static void gen6_init_rps_frequencies(struct drm_i915_private *dev_priv) { struct intel_rps *rps = &dev_priv->gt_pm.rps; @@ -7140,203 +7005,6 @@ static void gen9_enable_rps(struct drm_i915_private *dev_priv) intel_uncore_forcewake_put(&dev_priv->uncore, FORCEWAKE_ALL); } -static void gen11_enable_rc6(struct drm_i915_private *dev_priv) -{ - struct intel_engine_cs *engine; - enum intel_engine_id id; - - /* 1a: Software RC state - RC0 */ - I915_WRITE(GEN6_RC_STATE, 0); - - /* - * 1b: Get forcewake during program sequence. Although the driver - * hasn't enabled a state yet where we need forcewake, BIOS may have. - */ - intel_uncore_forcewake_get(&dev_priv->uncore, FORCEWAKE_ALL); - - /* 2a: Disable RC states. */ - I915_WRITE(GEN6_RC_CONTROL, 0); - - /* 2b: Program RC6 thresholds.*/ - I915_WRITE(GEN6_RC6_WAKE_RATE_LIMIT, 54 << 16 | 85); - I915_WRITE(GEN10_MEDIA_WAKE_RATE_LIMIT, 150); - - I915_WRITE(GEN6_RC_EVALUATION_INTERVAL, 125000); /* 12500 * 1280ns */ - I915_WRITE(GEN6_RC_IDLE_HYSTERSIS, 25); /* 25 * 1280ns */ - for_each_engine(engine, dev_priv, id) - I915_WRITE(RING_MAX_IDLE(engine->mmio_base), 10); - - if (HAS_GT_UC(dev_priv)) - I915_WRITE(GUC_MAX_IDLE_COUNT, 0xA); - - I915_WRITE(GEN6_RC_SLEEP, 0); - - I915_WRITE(GEN6_RC6_THRESHOLD, 50000); /* 50/125ms per EI */ - - /* - * 2c: Program Coarse Power Gating Policies. - * - * Bspec's guidance is to use 25us (really 25 * 1280ns) here. What we - * use instead is a more conservative estimate for the maximum time - * it takes us to service a CS interrupt and submit a new ELSP - that - * is the time which the GPU is idle waiting for the CPU to select the - * next request to execute. If the idle hysteresis is less than that - * interrupt service latency, the hardware will automatically gate - * the power well and we will then incur the wake up cost on top of - * the service latency. A similar guide from plane_state is that we - * do not want the enable hysteresis to less than the wakeup latency. - * - * igt/gem_exec_nop/sequential provides a rough estimate for the - * service latency, and puts it around 10us for Broadwell (and other - * big core) and around 40us for Broxton (and other low power cores). - * [Note that for legacy ringbuffer submission, this is less than 1us!] - * However, the wakeup latency on Broxton is closer to 100us. To be - * conservative, we have to factor in a context switch on top (due - * to ksoftirqd). - */ - I915_WRITE(GEN9_MEDIA_PG_IDLE_HYSTERESIS, 250); - I915_WRITE(GEN9_RENDER_PG_IDLE_HYSTERESIS, 250); - - /* 3a: Enable RC6 */ - I915_WRITE(GEN6_RC_CONTROL, - GEN6_RC_CTL_HW_ENABLE | - GEN6_RC_CTL_RC6_ENABLE | - GEN6_RC_CTL_EI_MODE(1)); - - /* 3b: Enable Coarse Power Gating only when RC6 is enabled. */ - I915_WRITE(GEN9_PG_ENABLE, - GEN9_RENDER_PG_ENABLE | - GEN9_MEDIA_PG_ENABLE | - GEN11_MEDIA_SAMPLER_PG_ENABLE); - - intel_uncore_forcewake_put(&dev_priv->uncore, FORCEWAKE_ALL); -} - -static void gen9_enable_rc6(struct drm_i915_private *dev_priv) -{ - struct intel_engine_cs *engine; - enum intel_engine_id id; - u32 rc6_mode; - - /* 1a: Software RC state - RC0 */ - I915_WRITE(GEN6_RC_STATE, 0); - - /* 1b: Get forcewake during program sequence. Although the driver - * hasn't enabled a state yet where we need forcewake, BIOS may have.*/ - intel_uncore_forcewake_get(&dev_priv->uncore, FORCEWAKE_ALL); - - /* 2a: Disable RC states. */ - I915_WRITE(GEN6_RC_CONTROL, 0); - - /* 2b: Program RC6 thresholds.*/ - if (INTEL_GEN(dev_priv) >= 10) { - I915_WRITE(GEN6_RC6_WAKE_RATE_LIMIT, 54 << 16 | 85); - I915_WRITE(GEN10_MEDIA_WAKE_RATE_LIMIT, 150); - } else if (IS_SKYLAKE(dev_priv)) { - /* - * WaRsDoubleRc6WrlWithCoarsePowerGating:skl Doubling WRL only - * when CPG is enabled - */ - I915_WRITE(GEN6_RC6_WAKE_RATE_LIMIT, 108 << 16); - } else { - I915_WRITE(GEN6_RC6_WAKE_RATE_LIMIT, 54 << 16); - } - - I915_WRITE(GEN6_RC_EVALUATION_INTERVAL, 125000); /* 12500 * 1280ns */ - I915_WRITE(GEN6_RC_IDLE_HYSTERSIS, 25); /* 25 * 1280ns */ - for_each_engine(engine, dev_priv, id) - I915_WRITE(RING_MAX_IDLE(engine->mmio_base), 10); - - if (HAS_GT_UC(dev_priv)) - I915_WRITE(GUC_MAX_IDLE_COUNT, 0xA); - - I915_WRITE(GEN6_RC_SLEEP, 0); - - /* - * 2c: Program Coarse Power Gating Policies. - * - * Bspec's guidance is to use 25us (really 25 * 1280ns) here. What we - * use instead is a more conservative estimate for the maximum time - * it takes us to service a CS interrupt and submit a new ELSP - that - * is the time which the GPU is idle waiting for the CPU to select the - * next request to execute. If the idle hysteresis is less than that - * interrupt service latency, the hardware will automatically gate - * the power well and we will then incur the wake up cost on top of - * the service latency. A similar guide from plane_state is that we - * do not want the enable hysteresis to less than the wakeup latency. - * - * igt/gem_exec_nop/sequential provides a rough estimate for the - * service latency, and puts it around 10us for Broadwell (and other - * big core) and around 40us for Broxton (and other low power cores). - * [Note that for legacy ringbuffer submission, this is less than 1us!] - * However, the wakeup latency on Broxton is closer to 100us. To be - * conservative, we have to factor in a context switch on top (due - * to ksoftirqd). - */ - I915_WRITE(GEN9_MEDIA_PG_IDLE_HYSTERESIS, 250); - I915_WRITE(GEN9_RENDER_PG_IDLE_HYSTERESIS, 250); - - /* 3a: Enable RC6 */ - I915_WRITE(GEN6_RC6_THRESHOLD, 37500); /* 37.5/125ms per EI */ - - /* WaRsUseTimeoutMode:cnl (pre-prod) */ - if (IS_CNL_REVID(dev_priv, CNL_REVID_A0, CNL_REVID_C0)) - rc6_mode = GEN7_RC_CTL_TO_MODE; - else - rc6_mode = GEN6_RC_CTL_EI_MODE(1); - - I915_WRITE(GEN6_RC_CONTROL, - GEN6_RC_CTL_HW_ENABLE | - GEN6_RC_CTL_RC6_ENABLE | - rc6_mode); - - /* - * 3b: Enable Coarse Power Gating only when RC6 is enabled. - * WaRsDisableCoarsePowerGating:skl,cnl - Render/Media PG need to be disabled with RC6. - */ - if (NEEDS_WaRsDisableCoarsePowerGating(dev_priv)) - I915_WRITE(GEN9_PG_ENABLE, 0); - else - I915_WRITE(GEN9_PG_ENABLE, - GEN9_RENDER_PG_ENABLE | GEN9_MEDIA_PG_ENABLE); - - intel_uncore_forcewake_put(&dev_priv->uncore, FORCEWAKE_ALL); -} - -static void gen8_enable_rc6(struct drm_i915_private *dev_priv) -{ - struct intel_engine_cs *engine; - enum intel_engine_id id; - - /* 1a: Software RC state - RC0 */ - I915_WRITE(GEN6_RC_STATE, 0); - - /* 1b: Get forcewake during program sequence. Although the driver - * hasn't enabled a state yet where we need forcewake, BIOS may have.*/ - intel_uncore_forcewake_get(&dev_priv->uncore, FORCEWAKE_ALL); - - /* 2a: Disable RC states. */ - I915_WRITE(GEN6_RC_CONTROL, 0); - - /* 2b: Program RC6 thresholds.*/ - I915_WRITE(GEN6_RC6_WAKE_RATE_LIMIT, 40 << 16); - I915_WRITE(GEN6_RC_EVALUATION_INTERVAL, 125000); /* 12500 * 1280ns */ - I915_WRITE(GEN6_RC_IDLE_HYSTERSIS, 25); /* 25 * 1280ns */ - for_each_engine(engine, dev_priv, id) - I915_WRITE(RING_MAX_IDLE(engine->mmio_base), 10); - I915_WRITE(GEN6_RC_SLEEP, 0); - I915_WRITE(GEN6_RC6_THRESHOLD, 625); /* 800us/1.28 for TO */ - - /* 3: Enable RC6 */ - - I915_WRITE(GEN6_RC_CONTROL, - GEN6_RC_CTL_HW_ENABLE | - GEN7_RC_CTL_TO_MODE | - GEN6_RC_CTL_RC6_ENABLE); - - intel_uncore_forcewake_put(&dev_priv->uncore, FORCEWAKE_ALL); -} - static void gen8_enable_rps(struct drm_i915_private *dev_priv) { struct intel_rps *rps = &dev_priv->gt_pm.rps; @@ -7377,75 +7045,6 @@ static void gen8_enable_rps(struct drm_i915_private *dev_priv) intel_uncore_forcewake_put(&dev_priv->uncore, FORCEWAKE_ALL); } -static void gen6_enable_rc6(struct drm_i915_private *dev_priv) -{ - struct intel_engine_cs *engine; - enum intel_engine_id id; - u32 rc6vids, rc6_mask; - u32 gtfifodbg; - int ret; - - I915_WRITE(GEN6_RC_STATE, 0); - - /* Clear the DBG now so we don't confuse earlier errors */ - gtfifodbg = I915_READ(GTFIFODBG); - if (gtfifodbg) { - DRM_ERROR("GT fifo had a previous error %x\n", gtfifodbg); - I915_WRITE(GTFIFODBG, gtfifodbg); - } - - intel_uncore_forcewake_get(&dev_priv->uncore, FORCEWAKE_ALL); - - /* disable the counters and set deterministic thresholds */ - I915_WRITE(GEN6_RC_CONTROL, 0); - - I915_WRITE(GEN6_RC1_WAKE_RATE_LIMIT, 1000 << 16); - I915_WRITE(GEN6_RC6_WAKE_RATE_LIMIT, 40 << 16 | 30); - I915_WRITE(GEN6_RC6pp_WAKE_RATE_LIMIT, 30); - I915_WRITE(GEN6_RC_EVALUATION_INTERVAL, 125000); - I915_WRITE(GEN6_RC_IDLE_HYSTERSIS, 25); - - for_each_engine(engine, dev_priv, id) - I915_WRITE(RING_MAX_IDLE(engine->mmio_base), 10); - - I915_WRITE(GEN6_RC_SLEEP, 0); - I915_WRITE(GEN6_RC1e_THRESHOLD, 1000); - if (IS_IVYBRIDGE(dev_priv)) - I915_WRITE(GEN6_RC6_THRESHOLD, 125000); - else - I915_WRITE(GEN6_RC6_THRESHOLD, 50000); - I915_WRITE(GEN6_RC6p_THRESHOLD, 150000); - I915_WRITE(GEN6_RC6pp_THRESHOLD, 64000); /* unused */ - - /* We don't use those on Haswell */ - rc6_mask = GEN6_RC_CTL_RC6_ENABLE; - if (HAS_RC6p(dev_priv)) - rc6_mask |= GEN6_RC_CTL_RC6p_ENABLE; - if (HAS_RC6pp(dev_priv)) - rc6_mask |= GEN6_RC_CTL_RC6pp_ENABLE; - I915_WRITE(GEN6_RC_CONTROL, - rc6_mask | - GEN6_RC_CTL_EI_MODE(1) | - GEN6_RC_CTL_HW_ENABLE); - - rc6vids = 0; - ret = sandybridge_pcode_read(dev_priv, GEN6_PCODE_READ_RC6VIDS, - &rc6vids, NULL); - if (IS_GEN(dev_priv, 6) && ret) { - DRM_DEBUG_DRIVER("Couldn't check for BIOS workaround\n"); - } else if (IS_GEN(dev_priv, 6) && (GEN6_DECODE_RC6_VID(rc6vids & 0xff) < 450)) { - DRM_DEBUG_DRIVER("You should update your BIOS. Correcting minimum rc6 voltage (%dmV->%dmV)\n", - GEN6_DECODE_RC6_VID(rc6vids & 0xff), 450); - rc6vids &= 0xffff00; - rc6vids |= GEN6_ENCODE_RC6_VID(450); - ret = sandybridge_pcode_write(dev_priv, GEN6_PCODE_WRITE_RC6VIDS, rc6vids); - if (ret) - DRM_ERROR("Couldn't fix incorrect rc6 voltage\n"); - } - - intel_uncore_forcewake_put(&dev_priv->uncore, FORCEWAKE_ALL); -} - static void gen6_enable_rps(struct drm_i915_private *dev_priv) { /* Here begins a magic sequence of register writes to enable @@ -7662,100 +7261,6 @@ static int valleyview_rps_min_freq(struct drm_i915_private *dev_priv) return max_t(u32, val, 0xc0); } -/* Check that the pctx buffer wasn't move under us. */ -static void valleyview_check_pctx(struct drm_i915_private *dev_priv) -{ - unsigned long pctx_addr = I915_READ(VLV_PCBR) & ~4095; - - WARN_ON(pctx_addr != dev_priv->dsm.start + - dev_priv->vlv_pctx->stolen->start); -} - - -/* Check that the pcbr address is not empty. */ -static void cherryview_check_pctx(struct drm_i915_private *dev_priv) -{ - unsigned long pctx_addr = I915_READ(VLV_PCBR) & ~4095; - - WARN_ON((pctx_addr >> VLV_PCBR_ADDR_SHIFT) == 0); -} - -static void cherryview_setup_pctx(struct drm_i915_private *dev_priv) -{ - resource_size_t pctx_paddr, paddr; - resource_size_t pctx_size = 32*1024; - u32 pcbr; - - pcbr = I915_READ(VLV_PCBR); - if ((pcbr >> VLV_PCBR_ADDR_SHIFT) == 0) { - DRM_DEBUG_DRIVER("BIOS didn't set up PCBR, fixing up\n"); - paddr = dev_priv->dsm.end + 1 - pctx_size; - GEM_BUG_ON(paddr > U32_MAX); - - pctx_paddr = (paddr & (~4095)); - I915_WRITE(VLV_PCBR, pctx_paddr); - } - - DRM_DEBUG_DRIVER("PCBR: 0x%08x\n", I915_READ(VLV_PCBR)); -} - -static void valleyview_setup_pctx(struct drm_i915_private *dev_priv) -{ - struct drm_i915_gem_object *pctx; - resource_size_t pctx_paddr; - resource_size_t pctx_size = 24*1024; - u32 pcbr; - - pcbr = I915_READ(VLV_PCBR); - if (pcbr) { - /* BIOS set it up already, grab the pre-alloc'd space */ - resource_size_t pcbr_offset; - - pcbr_offset = (pcbr & (~4095)) - dev_priv->dsm.start; - pctx = i915_gem_object_create_stolen_for_preallocated(dev_priv, - pcbr_offset, - I915_GTT_OFFSET_NONE, - pctx_size); - goto out; - } - - DRM_DEBUG_DRIVER("BIOS didn't set up PCBR, fixing up\n"); - - /* - * From the Gunit register HAS: - * The Gfx driver is expected to program this register and ensure - * proper allocation within Gfx stolen memory. For example, this - * register should be programmed such than the PCBR range does not - * overlap with other ranges, such as the frame buffer, protected - * memory, or any other relevant ranges. - */ - pctx = i915_gem_object_create_stolen(dev_priv, pctx_size); - if (!pctx) { - DRM_DEBUG("not enough stolen space for PCTX, disabling\n"); - goto out; - } - - GEM_BUG_ON(range_overflows_t(u64, - dev_priv->dsm.start, - pctx->stolen->start, - U32_MAX)); - pctx_paddr = dev_priv->dsm.start + pctx->stolen->start; - I915_WRITE(VLV_PCBR, pctx_paddr); - -out: - DRM_DEBUG_DRIVER("PCBR: 0x%08x\n", I915_READ(VLV_PCBR)); - dev_priv->vlv_pctx = pctx; -} - -static void valleyview_cleanup_pctx(struct drm_i915_private *dev_priv) -{ - struct drm_i915_gem_object *pctx; - - pctx = fetch_and_zero(&dev_priv->vlv_pctx); - if (pctx) - i915_gem_object_put(pctx); -} - static void vlv_init_gpll_ref_freq(struct drm_i915_private *dev_priv) { dev_priv->gt_pm.rps.gpll_ref_freq = @@ -7772,8 +7277,6 @@ static void valleyview_init_gt_powersave(struct drm_i915_private *dev_priv) struct intel_rps *rps = &dev_priv->gt_pm.rps; u32 val; - valleyview_setup_pctx(dev_priv); - vlv_iosf_sb_get(dev_priv, BIT(VLV_IOSF_SB_PUNIT) | BIT(VLV_IOSF_SB_NC) | @@ -7828,8 +7331,6 @@ static void cherryview_init_gt_powersave(struct drm_i915_private *dev_priv) struct intel_rps *rps = &dev_priv->gt_pm.rps; u32 val; - cherryview_setup_pctx(dev_priv); - vlv_iosf_sb_get(dev_priv, BIT(VLV_IOSF_SB_PUNIT) | BIT(VLV_IOSF_SB_NC) | @@ -7880,64 +7381,6 @@ static void cherryview_init_gt_powersave(struct drm_i915_private *dev_priv) "Odd GPU freq values\n"); } -static void valleyview_cleanup_gt_powersave(struct drm_i915_private *dev_priv) -{ - valleyview_cleanup_pctx(dev_priv); -} - -static void cherryview_enable_rc6(struct drm_i915_private *dev_priv) -{ - struct intel_engine_cs *engine; - enum intel_engine_id id; - u32 gtfifodbg, rc6_mode, pcbr; - - gtfifodbg = I915_READ(GTFIFODBG) & ~(GT_FIFO_SBDEDICATE_FREE_ENTRY_CHV | - GT_FIFO_FREE_ENTRIES_CHV); - if (gtfifodbg) { - DRM_DEBUG_DRIVER("GT fifo had a previous error %x\n", - gtfifodbg); - I915_WRITE(GTFIFODBG, gtfifodbg); - } - - cherryview_check_pctx(dev_priv); - - /* 1a & 1b: Get forcewake during program sequence. Although the driver - * hasn't enabled a state yet where we need forcewake, BIOS may have.*/ - intel_uncore_forcewake_get(&dev_priv->uncore, FORCEWAKE_ALL); - - /* Disable RC states. */ - I915_WRITE(GEN6_RC_CONTROL, 0); - - /* 2a: Program RC6 thresholds.*/ - I915_WRITE(GEN6_RC6_WAKE_RATE_LIMIT, 40 << 16); - I915_WRITE(GEN6_RC_EVALUATION_INTERVAL, 125000); /* 12500 * 1280ns */ - I915_WRITE(GEN6_RC_IDLE_HYSTERSIS, 25); /* 25 * 1280ns */ - - for_each_engine(engine, dev_priv, id) - I915_WRITE(RING_MAX_IDLE(engine->mmio_base), 10); - I915_WRITE(GEN6_RC_SLEEP, 0); - - /* TO threshold set to 500 us ( 0x186 * 1.28 us) */ - I915_WRITE(GEN6_RC6_THRESHOLD, 0x186); - - /* Allows RC6 residency counter to work */ - I915_WRITE(VLV_COUNTER_CONTROL, - _MASKED_BIT_ENABLE(VLV_COUNT_RANGE_HIGH | - VLV_MEDIA_RC6_COUNT_EN | - VLV_RENDER_RC6_COUNT_EN)); - - /* For now we assume BIOS is allocating and populating the PCBR */ - pcbr = I915_READ(VLV_PCBR); - - /* 3: Enable RC6 */ - rc6_mode = 0; - if (pcbr >> VLV_PCBR_ADDR_SHIFT) - rc6_mode = GEN7_RC_CTL_TO_MODE; - I915_WRITE(GEN6_RC_CONTROL, rc6_mode); - - intel_uncore_forcewake_put(&dev_priv->uncore, FORCEWAKE_ALL); -} - static void cherryview_enable_rps(struct drm_i915_private *dev_priv) { u32 val; @@ -7982,49 +7425,6 @@ static void cherryview_enable_rps(struct drm_i915_private *dev_priv) intel_uncore_forcewake_put(&dev_priv->uncore, FORCEWAKE_ALL); } -static void valleyview_enable_rc6(struct drm_i915_private *dev_priv) -{ - struct intel_engine_cs *engine; - enum intel_engine_id id; - u32 gtfifodbg; - - valleyview_check_pctx(dev_priv); - - gtfifodbg = I915_READ(GTFIFODBG); - if (gtfifodbg) { - DRM_DEBUG_DRIVER("GT fifo had a previous error %x\n", - gtfifodbg); - I915_WRITE(GTFIFODBG, gtfifodbg); - } - - intel_uncore_forcewake_get(&dev_priv->uncore, FORCEWAKE_ALL); - - /* Disable RC states. */ - I915_WRITE(GEN6_RC_CONTROL, 0); - - I915_WRITE(GEN6_RC6_WAKE_RATE_LIMIT, 0x00280000); - I915_WRITE(GEN6_RC_EVALUATION_INTERVAL, 125000); - I915_WRITE(GEN6_RC_IDLE_HYSTERSIS, 25); - - for_each_engine(engine, dev_priv, id) - I915_WRITE(RING_MAX_IDLE(engine->mmio_base), 10); - - I915_WRITE(GEN6_RC6_THRESHOLD, 0x557); - - /* Allows RC6 residency counter to work */ - I915_WRITE(VLV_COUNTER_CONTROL, - _MASKED_BIT_ENABLE(VLV_COUNT_RANGE_HIGH | - VLV_MEDIA_RC0_COUNT_EN | - VLV_RENDER_RC0_COUNT_EN | - VLV_MEDIA_RC6_COUNT_EN | - VLV_RENDER_RC6_COUNT_EN)); - - I915_WRITE(GEN6_RC_CONTROL, - GEN7_RC_CTL_TO_MODE | VLV_RC_CTL_CTX_RST_PARALLEL); - - intel_uncore_forcewake_put(&dev_priv->uncore, FORCEWAKE_ALL); -} - static void valleyview_enable_rps(struct drm_i915_private *dev_priv) { u32 val; @@ -8551,14 +7951,9 @@ void intel_init_gt_powersave(struct drm_i915_private *dev_priv) { struct intel_rps *rps = &dev_priv->gt_pm.rps; - /* - * RPM depends on RC6 to save restore the GT HW context, so make RC6 a - * requirement. - */ - if (!sanitize_rc6(dev_priv)) { - DRM_INFO("RC6 disabled, disabling runtime PM support\n"); - pm_runtime_get(&dev_priv->drm.pdev->dev); - } + /* Powersaving is controlled by the host when inside a VM */ + if (intel_vgpu_active(dev_priv)) + mkwrite_device_info(dev_priv)->has_rps = false; /* Initialize RPS limits (for userspace) */ if (IS_CHERRYVIEW(dev_priv)) @@ -8593,19 +7988,9 @@ void intel_init_gt_powersave(struct drm_i915_private *dev_priv) rps->cur_freq = rps->idle_freq; } -void intel_cleanup_gt_powersave(struct drm_i915_private *dev_priv) -{ - if (IS_VALLEYVIEW(dev_priv)) - valleyview_cleanup_gt_powersave(dev_priv); - - if (!HAS_RC6(dev_priv)) - pm_runtime_put(&dev_priv->drm.pdev->dev); -} - void intel_sanitize_gt_powersave(struct drm_i915_private *dev_priv) { dev_priv->gt_pm.rps.enabled = true; /* force RPS disabling */ - dev_priv->gt_pm.rc6.enabled = true; /* force RC6 disabling */ intel_disable_gt_powersave(dev_priv); if (INTEL_GEN(dev_priv) >= 11) @@ -8626,25 +8011,6 @@ static inline void intel_disable_llc_pstate(struct drm_i915_private *i915) i915->gt_pm.llc_pstate.enabled = false; } -static void intel_disable_rc6(struct drm_i915_private *dev_priv) -{ - lockdep_assert_held(&dev_priv->gt_pm.rps.lock); - - if (!dev_priv->gt_pm.rc6.enabled) - return; - - if (INTEL_GEN(dev_priv) >= 9) - gen9_disable_rc6(dev_priv); - else if (IS_CHERRYVIEW(dev_priv)) - cherryview_disable_rc6(dev_priv); - else if (IS_VALLEYVIEW(dev_priv)) - valleyview_disable_rc6(dev_priv); - else if (INTEL_GEN(dev_priv) >= 6) - gen6_disable_rc6(dev_priv); - - dev_priv->gt_pm.rc6.enabled = false; -} - static void intel_disable_rps(struct drm_i915_private *dev_priv) { lockdep_assert_held(&dev_priv->gt_pm.rps.lock); @@ -8670,8 +8036,6 @@ void intel_disable_gt_powersave(struct drm_i915_private *dev_priv) { mutex_lock(&dev_priv->gt_pm.rps.lock); - intel_disable_rc6(dev_priv); - intel_disable_rps(dev_priv); if (HAS_LLC(dev_priv)) intel_disable_llc_pstate(dev_priv); @@ -8691,29 +8055,6 @@ static inline void intel_enable_llc_pstate(struct drm_i915_private *i915) i915->gt_pm.llc_pstate.enabled = true; } -static void intel_enable_rc6(struct drm_i915_private *dev_priv) -{ - lockdep_assert_held(&dev_priv->gt_pm.rps.lock); - - if (dev_priv->gt_pm.rc6.enabled) - return; - - if (IS_CHERRYVIEW(dev_priv)) - cherryview_enable_rc6(dev_priv); - else if (IS_VALLEYVIEW(dev_priv)) - valleyview_enable_rc6(dev_priv); - else if (INTEL_GEN(dev_priv) >= 11) - gen11_enable_rc6(dev_priv); - else if (INTEL_GEN(dev_priv) >= 9) - gen9_enable_rc6(dev_priv); - else if (IS_BROADWELL(dev_priv)) - gen8_enable_rc6(dev_priv); - else if (INTEL_GEN(dev_priv) >= 6) - gen6_enable_rc6(dev_priv); - - dev_priv->gt_pm.rc6.enabled = true; -} - static void intel_enable_rps(struct drm_i915_private *dev_priv) { struct intel_rps *rps = &dev_priv->gt_pm.rps; @@ -8755,8 +8096,6 @@ void intel_enable_gt_powersave(struct drm_i915_private *dev_priv) mutex_lock(&dev_priv->gt_pm.rps.lock); - if (HAS_RC6(dev_priv)) - intel_enable_rc6(dev_priv); if (HAS_RPS(dev_priv)) intel_enable_rps(dev_priv); if (HAS_LLC(dev_priv)) @@ -9818,133 +9157,6 @@ void intel_pm_setup(struct drm_i915_private *dev_priv) atomic_set(&dev_priv->runtime_pm.wakeref_count, 0); } -static u64 vlv_residency_raw(struct drm_i915_private *dev_priv, - const i915_reg_t reg) -{ - u32 lower, upper, tmp; - int loop = 2; - - /* - * The register accessed do not need forcewake. We borrow - * uncore lock to prevent concurrent access to range reg. - */ - lockdep_assert_held(&dev_priv->uncore.lock); - - /* - * vlv and chv residency counters are 40 bits in width. - * With a control bit, we can choose between upper or lower - * 32bit window into this counter. - * - * Although we always use the counter in high-range mode elsewhere, - * userspace may attempt to read the value before rc6 is initialised, - * before we have set the default VLV_COUNTER_CONTROL value. So always - * set the high bit to be safe. - */ - I915_WRITE_FW(VLV_COUNTER_CONTROL, - _MASKED_BIT_ENABLE(VLV_COUNT_RANGE_HIGH)); - upper = I915_READ_FW(reg); - do { - tmp = upper; - - I915_WRITE_FW(VLV_COUNTER_CONTROL, - _MASKED_BIT_DISABLE(VLV_COUNT_RANGE_HIGH)); - lower = I915_READ_FW(reg); - - I915_WRITE_FW(VLV_COUNTER_CONTROL, - _MASKED_BIT_ENABLE(VLV_COUNT_RANGE_HIGH)); - upper = I915_READ_FW(reg); - } while (upper != tmp && --loop); - - /* - * Everywhere else we always use VLV_COUNTER_CONTROL with the - * VLV_COUNT_RANGE_HIGH bit set - so it is safe to leave it set - * now. - */ - - return lower | (u64)upper << 8; -} - -u64 intel_rc6_residency_ns(struct drm_i915_private *dev_priv, - const i915_reg_t reg) -{ - struct intel_uncore *uncore = &dev_priv->uncore; - u64 time_hw, prev_hw, overflow_hw; - unsigned int fw_domains; - unsigned long flags; - unsigned int i; - u32 mul, div; - - if (!HAS_RC6(dev_priv)) - return 0; - - /* - * Store previous hw counter values for counter wrap-around handling. - * - * There are only four interesting registers and they live next to each - * other so we can use the relative address, compared to the smallest - * one as the index into driver storage. - */ - i = (i915_mmio_reg_offset(reg) - - i915_mmio_reg_offset(GEN6_GT_GFX_RC6_LOCKED)) / sizeof(u32); - if (WARN_ON_ONCE(i >= ARRAY_SIZE(dev_priv->gt_pm.rc6.cur_residency))) - return 0; - - fw_domains = intel_uncore_forcewake_for_reg(uncore, reg, FW_REG_READ); - - spin_lock_irqsave(&uncore->lock, flags); - intel_uncore_forcewake_get__locked(uncore, fw_domains); - - /* On VLV and CHV, residency time is in CZ units rather than 1.28us */ - if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) { - mul = 1000000; - div = dev_priv->czclk_freq; - overflow_hw = BIT_ULL(40); - time_hw = vlv_residency_raw(dev_priv, reg); - } else { - /* 833.33ns units on Gen9LP, 1.28us elsewhere. */ - if (IS_GEN9_LP(dev_priv)) { - mul = 10000; - div = 12; - } else { - mul = 1280; - div = 1; - } - - overflow_hw = BIT_ULL(32); - time_hw = intel_uncore_read_fw(uncore, reg); - } - - /* - * Counter wrap handling. - * - * But relying on a sufficient frequency of queries otherwise counters - * can still wrap. - */ - prev_hw = dev_priv->gt_pm.rc6.prev_hw_residency[i]; - dev_priv->gt_pm.rc6.prev_hw_residency[i] = time_hw; - - /* RC6 delta from last sample. */ - if (time_hw >= prev_hw) - time_hw -= prev_hw; - else - time_hw += overflow_hw - prev_hw; - - /* Add delta to RC6 extended raw driver copy. */ - time_hw += dev_priv->gt_pm.rc6.cur_residency[i]; - dev_priv->gt_pm.rc6.cur_residency[i] = time_hw; - - intel_uncore_forcewake_put__locked(uncore, fw_domains); - spin_unlock_irqrestore(&uncore->lock, flags); - - return mul_u64_u32_div(time_hw, mul, div); -} - -u64 intel_rc6_residency_us(struct drm_i915_private *dev_priv, - i915_reg_t reg) -{ - return DIV_ROUND_UP_ULL(intel_rc6_residency_ns(dev_priv, reg), 1000); -} - u32 intel_get_cagf(struct drm_i915_private *dev_priv, u32 rpstat) { u32 cagf; diff --git a/drivers/gpu/drm/i915/intel_pm.h b/drivers/gpu/drm/i915/intel_pm.h index e3573e1e16e3..93d192d0610a 100644 --- a/drivers/gpu/drm/i915/intel_pm.h +++ b/drivers/gpu/drm/i915/intel_pm.h @@ -32,7 +32,6 @@ void intel_pm_setup(struct drm_i915_private *dev_priv); void intel_gpu_ips_init(struct drm_i915_private *dev_priv); void intel_gpu_ips_teardown(void); void intel_init_gt_powersave(struct drm_i915_private *dev_priv); -void intel_cleanup_gt_powersave(struct drm_i915_private *dev_priv); void intel_sanitize_gt_powersave(struct drm_i915_private *dev_priv); void intel_enable_gt_powersave(struct drm_i915_private *dev_priv); void intel_disable_gt_powersave(struct drm_i915_private *dev_priv); @@ -72,8 +71,6 @@ void intel_enable_ipc(struct drm_i915_private *dev_priv); int intel_gpu_freq(struct drm_i915_private *dev_priv, int val); int intel_freq_opcode(struct drm_i915_private *dev_priv, int val); -u64 intel_rc6_residency_ns(struct drm_i915_private *dev_priv, i915_reg_t reg); -u64 intel_rc6_residency_us(struct drm_i915_private *dev_priv, i915_reg_t reg); u32 intel_get_cagf(struct drm_i915_private *dev_priv, u32 rpstat1); diff --git a/drivers/gpu/drm/i915/selftests/i915_live_selftests.h b/drivers/gpu/drm/i915/selftests/i915_live_selftests.h index 66d83c1390c1..6713efea350b 100644 --- a/drivers/gpu/drm/i915/selftests/i915_live_selftests.h +++ b/drivers/gpu/drm/i915/selftests/i915_live_selftests.h @@ -16,6 +16,7 @@ selftest(gt_engines, intel_engine_live_selftests) selftest(gt_timelines, intel_timeline_live_selftests) selftest(gt_contexts, intel_context_live_selftests) selftest(gt_lrc, intel_lrc_live_selftests) +selftest(gt_pm, intel_gt_pm_live_selftests) selftest(requests, i915_request_live_selftests) selftest(active, i915_active_live_selftests) selftest(objects, i915_gem_object_live_selftests) -- cgit v1.2.3 From 9cd6c339e34ab13479fb139b0b9f5ffc9743bfc1 Mon Sep 17 00:00:00 2001 From: Joonas Lahtinen Date: Fri, 27 Sep 2019 15:25:54 +0300 Subject: drm/i915: Update DRIVER_DATE to 20190927 Signed-off-by: Joonas Lahtinen --- drivers/gpu/drm/i915/i915_drv.h | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index 0e4e77373ee5..c0f6c1f72b62 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h @@ -106,8 +106,8 @@ #define DRIVER_NAME "i915" #define DRIVER_DESC "Intel Graphics" -#define DRIVER_DATE "20190822" -#define DRIVER_TIMESTAMP 1566477988 +#define DRIVER_DATE "20190927" +#define DRIVER_TIMESTAMP 1569587154 struct drm_i915_gem_object; -- cgit v1.2.3 From 4b0b2b096da9d296e0e5668cdfba8613bd6f5bc8 Mon Sep 17 00:00:00 2001 From: Ian Rogers Date: Wed, 25 Sep 2019 12:59:23 -0700 Subject: libsubcmd: Make _FORTIFY_SOURCE defines dependent on the feature Unconditionally defining _FORTIFY_SOURCE can break tools that don't work with it, such as memory sanitizers: https://github.com/google/sanitizers/wiki/AddressSanitizer#faq Fixes: 4b6ab94eabe4 ("perf subcmd: Create subcmd library") Signed-off-by: Ian Rogers Cc: Alexander Shishkin Cc: Andi Kleen Cc: Jiri Olsa Cc: Josh Poimboeuf Cc: Namhyung Kim Cc: Peter Zijlstra Cc: Stephane Eranian Link: http://lore.kernel.org/lkml/20190925195924.152834-1-irogers@google.com Signed-off-by: Arnaldo Carvalho de Melo --- tools/lib/subcmd/Makefile | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) diff --git a/tools/lib/subcmd/Makefile b/tools/lib/subcmd/Makefile index ed61fb3a46c0..5b2cd5e58df0 100644 --- a/tools/lib/subcmd/Makefile +++ b/tools/lib/subcmd/Makefile @@ -20,7 +20,13 @@ MAKEFLAGS += --no-print-directory LIBFILE = $(OUTPUT)libsubcmd.a CFLAGS := $(EXTRA_WARNINGS) $(EXTRA_CFLAGS) -CFLAGS += -ggdb3 -Wall -Wextra -std=gnu99 -U_FORTIFY_SOURCE -D_FORTIFY_SOURCE=2 -fPIC +CFLAGS += -ggdb3 -Wall -Wextra -std=gnu99 -fPIC + +ifeq ($(DEBUG),0) + ifeq ($(feature-fortify-source), 1) + CFLAGS += -U_FORTIFY_SOURCE -D_FORTIFY_SOURCE=2 + endif +endif ifeq ($(CC_NO_CLANG), 0) CFLAGS += -O3 -- cgit v1.2.3 From e3e2cf3d5b1fe800b032e14c0fdcd9a6fb20cf3b Mon Sep 17 00:00:00 2001 From: Ian Rogers Date: Wed, 25 Sep 2019 12:59:24 -0700 Subject: perf tests: Avoid raising SEGV using an obvious NULL dereference An optimized build such as: make -C tools/perf CLANG=1 CC=clang EXTRA_CFLAGS="-O3 will turn the dereference operation into a ud2 instruction, raising a SIGILL rather than a SIGSEGV. Use raise(..) for correctness and clarity. Similar issues were addressed in Numfor Mbiziwo-Tiapo's patch: https://lkml.org/lkml/2019/7/8/1234 Committer testing: Before: [root@quaco ~]# perf test hooks 55: perf hooks : Ok [root@quaco ~]# perf test -v hooks 55: perf hooks : --- start --- test child forked, pid 17092 SIGSEGV is observed as expected, try to recover. Fatal error (SEGFAULT) in perf hook 'test' test child finished with 0 ---- end ---- perf hooks: Ok [root@quaco ~]# After: [root@quaco ~]# perf test hooks 55: perf hooks : Ok [root@quaco ~]# perf test -v hooks 55: perf hooks : --- start --- test child forked, pid 17909 SIGSEGV is observed as expected, try to recover. Fatal error (SEGFAULT) in perf hook 'test' test child finished with 0 ---- end ---- perf hooks: Ok [root@quaco ~]# Fixes: a074865e60ed ("perf tools: Introduce perf hooks") Signed-off-by: Ian Rogers Tested-by: Arnaldo Carvalho de Melo Cc: Alexander Shishkin Cc: Andi Kleen Cc: Jiri Olsa Cc: Namhyung Kim Cc: Peter Zijlstra Cc: Stephane Eranian Cc: Wang Nan Link: http://lore.kernel.org/lkml/20190925195924.152834-2-irogers@google.com Signed-off-by: Arnaldo Carvalho de Melo --- tools/perf/tests/perf-hooks.c | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/tools/perf/tests/perf-hooks.c b/tools/perf/tests/perf-hooks.c index dbc27199c65e..dd865e0bea12 100644 --- a/tools/perf/tests/perf-hooks.c +++ b/tools/perf/tests/perf-hooks.c @@ -19,12 +19,11 @@ static void sigsegv_handler(int sig __maybe_unused) static void the_hook(void *_hook_flags) { int *hook_flags = _hook_flags; - int *p = NULL; *hook_flags = 1234; /* Generate a segfault, test perf_hooks__recover */ - *p = 0; + raise(SIGSEGV); } int test__perf_hooks(struct test *test __maybe_unused, int subtest __maybe_unused) -- cgit v1.2.3 From d586ac10ce56b2381b8e1d8ed74660c1b2b8ab0d Mon Sep 17 00:00:00 2001 From: Ian Rogers Date: Fri, 20 Sep 2019 21:13:27 -0700 Subject: perf docs: Allow man page date to be specified With this change if a perf_date parameter is provided to asciidoc then it will override the default date written to the man page metadata. Without this change, or if the perf_date isn't specified, then the current date is written to the metadata. Having this parameter allows the metadata to be constant if builds happen on different dates. The name of the parameter is intended to be consistent with the existing perf_version parameter. Signed-off-by: Ian Rogers Cc: Alexander Shishkin Cc: Andi Kleen Cc: Jiri Olsa Cc: Namhyung Kim Cc: Peter Zijlstra Cc: Stephane Eranian Link: http://lore.kernel.org/lkml/20190921041327.155054-1-irogers@google.com Signed-off-by: Arnaldo Carvalho de Melo --- tools/perf/Documentation/asciidoc.conf | 3 +++ 1 file changed, 3 insertions(+) diff --git a/tools/perf/Documentation/asciidoc.conf b/tools/perf/Documentation/asciidoc.conf index 356b23a40339..2b62ba1e72b7 100644 --- a/tools/perf/Documentation/asciidoc.conf +++ b/tools/perf/Documentation/asciidoc.conf @@ -71,6 +71,9 @@ ifdef::backend-docbook[] [header] template::[header-declarations] +ifdef::perf_date[] +{perf_date} +endif::perf_date[] {mantitle} {manvolnum} -- cgit v1.2.3 From 08a96a31474a732fd654575ced843b94bc3212e1 Mon Sep 17 00:00:00 2001 From: Arnaldo Carvalho de Melo Date: Fri, 27 Sep 2019 09:28:11 -0300 Subject: tools headers uapi: Sync drm/i915_drm.h with the kernel sources To pick the change in: bf73fc0fa9cf ("drm/i915: Show support for accurate sw PMU busyness tracking") That don't result in any changes in tooling, just silences this perf build warning: Warning: Kernel ABI header at 'tools/include/uapi/drm/i915_drm.h' differs from latest version at 'include/uapi/drm/i915_drm.h' diff -u tools/include/uapi/drm/i915_drm.h include/uapi/drm/i915_drm.h Cc: Adrian Hunter Cc: Chris Wilson Cc: Jiri Olsa Cc: Namhyung Kim Link: https://lkml.kernel.org/n/tip-o651nt7vpz93tu3nmx4f3xql@git.kernel.org Signed-off-by: Arnaldo Carvalho de Melo --- tools/include/uapi/drm/i915_drm.h | 1 + 1 file changed, 1 insertion(+) diff --git a/tools/include/uapi/drm/i915_drm.h b/tools/include/uapi/drm/i915_drm.h index 328d05e77d9f..469dc512cca3 100644 --- a/tools/include/uapi/drm/i915_drm.h +++ b/tools/include/uapi/drm/i915_drm.h @@ -521,6 +521,7 @@ typedef struct drm_i915_irq_wait { #define I915_SCHEDULER_CAP_PRIORITY (1ul << 1) #define I915_SCHEDULER_CAP_PREEMPTION (1ul << 2) #define I915_SCHEDULER_CAP_SEMAPHORES (1ul << 3) +#define I915_SCHEDULER_CAP_ENGINE_BUSY_STATS (1ul << 4) #define I915_PARAM_HUC_STATUS 42 -- cgit v1.2.3 From b9023b91dd020ad7e093baa5122b6968c48cc9e0 Mon Sep 17 00:00:00 2001 From: Balasubramani Vivekanandan Date: Thu, 26 Sep 2019 15:51:01 +0200 Subject: tick: broadcast-hrtimer: Fix a race in bc_set_next When a cpu requests broadcasting, before starting the tick broadcast hrtimer, bc_set_next() checks if the timer callback (bc_handler) is active using hrtimer_try_to_cancel(). But hrtimer_try_to_cancel() does not provide the required synchronization when the callback is active on other core. The callback could have already executed tick_handle_oneshot_broadcast() and could have also returned. But still there is a small time window where the hrtimer_try_to_cancel() returns -1. In that case bc_set_next() returns without doing anything, but the next_event of the tick broadcast clock device is already set to a timeout value. In the race condition diagram below, CPU #1 is running the timer callback and CPU #2 is entering idle state and so calls bc_set_next(). In the worst case, the next_event will contain an expiry time, but the hrtimer will not be started which happens when the racing callback returns HRTIMER_NORESTART. The hrtimer might never recover if all further requests from the CPUs to subscribe to tick broadcast have timeout greater than the next_event of tick broadcast clock device. This leads to cascading of failures and finally noticed as rcu stall warnings Here is a depiction of the race condition CPU #1 (Running timer callback) CPU #2 (Enter idle and subscribe to tick broadcast) --------------------- --------------------- __run_hrtimer() tick_broadcast_enter() bc_handler() __tick_broadcast_oneshot_control() tick_handle_oneshot_broadcast() raw_spin_lock(&tick_broadcast_lock); dev->next_event = KTIME_MAX; //wait for tick_broadcast_lock //next_event for tick broadcast clock set to KTIME_MAX since no other cores subscribed to tick broadcasting raw_spin_unlock(&tick_broadcast_lock); if (dev->next_event == KTIME_MAX) return HRTIMER_NORESTART // callback function exits without restarting the hrtimer //tick_broadcast_lock acquired raw_spin_lock(&tick_broadcast_lock); tick_broadcast_set_event() clockevents_program_event() dev->next_event = expires; bc_set_next() hrtimer_try_to_cancel() //returns -1 since the timer callback is active. Exits without restarting the timer cpu_base->running = NULL; The comment that hrtimer cannot be armed from within the callback is wrong. It is fine to start the hrtimer from within the callback. Also it is safe to start the hrtimer from the enter/exit idle code while the broadcast handler is active. The enter/exit idle code and the broadcast handler are synchronized using tick_broadcast_lock. So there is no need for the existing try to cancel logic. All this can be removed which will eliminate the race condition as well. Fixes: 5d1638acb9f6 ("tick: Introduce hrtimer based broadcast") Originally-by: Thomas Gleixner Signed-off-by: Balasubramani Vivekanandan Signed-off-by: Thomas Gleixner Cc: stable@vger.kernel.org Link: https://lkml.kernel.org/r/20190926135101.12102-2-balasubramani_vivekanandan@mentor.com --- kernel/time/tick-broadcast-hrtimer.c | 62 +++++++++++++++++------------------- 1 file changed, 29 insertions(+), 33 deletions(-) diff --git a/kernel/time/tick-broadcast-hrtimer.c b/kernel/time/tick-broadcast-hrtimer.c index c1f5bb590b5e..b5a65e212df2 100644 --- a/kernel/time/tick-broadcast-hrtimer.c +++ b/kernel/time/tick-broadcast-hrtimer.c @@ -42,39 +42,39 @@ static int bc_shutdown(struct clock_event_device *evt) */ static int bc_set_next(ktime_t expires, struct clock_event_device *bc) { - int bc_moved; /* - * We try to cancel the timer first. If the callback is on - * flight on some other cpu then we let it handle it. If we - * were able to cancel the timer nothing can rearm it as we - * own broadcast_lock. + * This is called either from enter/exit idle code or from the + * broadcast handler. In all cases tick_broadcast_lock is held. * - * However we can also be called from the event handler of - * ce_broadcast_hrtimer itself when it expires. We cannot - * restart the timer because we are in the callback, but we - * can set the expiry time and let the callback return - * HRTIMER_RESTART. + * hrtimer_cancel() cannot be called here neither from the + * broadcast handler nor from the enter/exit idle code. The idle + * code can run into the problem described in bc_shutdown() and the + * broadcast handler cannot wait for itself to complete for obvious + * reasons. * - * Since we are in the idle loop at this point and because - * hrtimer_{start/cancel} functions call into tracing, - * calls to these functions must be bound within RCU_NONIDLE. + * Each caller tries to arm the hrtimer on its own CPU, but if the + * hrtimer callbback function is currently running, then + * hrtimer_start() cannot move it and the timer stays on the CPU on + * which it is assigned at the moment. + * + * As this can be called from idle code, the hrtimer_start() + * invocation has to be wrapped with RCU_NONIDLE() as + * hrtimer_start() can call into tracing. */ - RCU_NONIDLE( - { - bc_moved = hrtimer_try_to_cancel(&bctimer) >= 0; - if (bc_moved) { - hrtimer_start(&bctimer, expires, - HRTIMER_MODE_ABS_PINNED_HARD); - } - } - ); - - if (bc_moved) { - /* Bind the "device" to the cpu */ - bc->bound_on = smp_processor_id(); - } else if (bc->bound_on == smp_processor_id()) { - hrtimer_set_expires(&bctimer, expires); - } + RCU_NONIDLE( { + hrtimer_start(&bctimer, expires, HRTIMER_MODE_ABS_PINNED_HARD); + /* + * The core tick broadcast mode expects bc->bound_on to be set + * correctly to prevent a CPU which has the broadcast hrtimer + * armed from going deep idle. + * + * As tick_broadcast_lock is held, nothing can change the cpu + * base which was just established in hrtimer_start() above. So + * the below access is safe even without holding the hrtimer + * base lock. + */ + bc->bound_on = bctimer.base->cpu_base->cpu; + } ); return 0; } @@ -100,10 +100,6 @@ static enum hrtimer_restart bc_handler(struct hrtimer *t) { ce_broadcast_hrtimer.event_handler(&ce_broadcast_hrtimer); - if (clockevent_state_oneshot(&ce_broadcast_hrtimer)) - if (ce_broadcast_hrtimer.next_event != KTIME_MAX) - return HRTIMER_RESTART; - return HRTIMER_NORESTART; } -- cgit v1.2.3 From 19a36d329f5b1e5d3d06f7093de992693d0fdee6 Mon Sep 17 00:00:00 2001 From: Waiman Long Date: Mon, 26 Aug 2019 15:30:23 -0400 Subject: KVM: VMX: Set VMENTER_L1D_FLUSH_NOT_REQUIRED if !X86_BUG_L1TF The l1tf_vmx_mitigation is only set to VMENTER_L1D_FLUSH_NOT_REQUIRED when the ARCH_CAPABILITIES MSR indicates that L1D flush is not required. However, if the CPU is not affected by L1TF, l1tf_vmx_mitigation will still be set to VMENTER_L1D_FLUSH_AUTO. This is certainly not the best option for a !X86_BUG_L1TF CPU. So force l1tf_vmx_mitigation to VMENTER_L1D_FLUSH_NOT_REQUIRED to make it more explicit in case users are checking the vmentry_l1d_flush parameter. Signed-off-by: Waiman Long [Patch rewritten accoring to Borislav Petkov's suggestion. - Paolo] Signed-off-by: Paolo Bonzini --- arch/x86/kvm/vmx/vmx.c | 15 +++++++++------ 1 file changed, 9 insertions(+), 6 deletions(-) diff --git a/arch/x86/kvm/vmx/vmx.c b/arch/x86/kvm/vmx/vmx.c index d4575ffb3cec..e7970a2e8eae 100644 --- a/arch/x86/kvm/vmx/vmx.c +++ b/arch/x86/kvm/vmx/vmx.c @@ -209,6 +209,11 @@ static int vmx_setup_l1d_flush(enum vmx_l1d_flush_state l1tf) struct page *page; unsigned int i; + if (!boot_cpu_has_bug(X86_BUG_L1TF)) { + l1tf_vmx_mitigation = VMENTER_L1D_FLUSH_NOT_REQUIRED; + return 0; + } + if (!enable_ept) { l1tf_vmx_mitigation = VMENTER_L1D_FLUSH_EPT_DISABLED; return 0; @@ -7995,12 +8000,10 @@ static int __init vmx_init(void) * contain 'auto' which will be turned into the default 'cond' * mitigation mode. */ - if (boot_cpu_has(X86_BUG_L1TF)) { - r = vmx_setup_l1d_flush(vmentry_l1d_flush_param); - if (r) { - vmx_exit(); - return r; - } + r = vmx_setup_l1d_flush(vmentry_l1d_flush_param); + if (r) { + vmx_exit(); + return r; } #ifdef CONFIG_KEXEC_CORE -- cgit v1.2.3 From 2e4a75976dfb88ee6ce019b1d4fa507aabd02c14 Mon Sep 17 00:00:00 2001 From: Vitaly Kuznetsov Date: Fri, 27 Sep 2019 17:54:13 +0200 Subject: KVM: selftests: x86: clarify what is reported on KVM_GET_MSRS failure When KVM_GET_MSRS fail the report looks like ==== Test Assertion Failure ==== lib/x86_64/processor.c:1089: r == nmsrs pid=28775 tid=28775 - Argument list too long 1 0x000000000040a55f: vcpu_save_state at processor.c:1088 (discriminator 3) 2 0x00000000004010e3: main at state_test.c:171 (discriminator 4) 3 0x00007fb8e69223d4: ?? ??:0 4 0x0000000000401287: _start at ??:? Unexpected result from KVM_GET_MSRS, r: 36 (failed at 194) and it's not obvious that '194' here is the failed MSR index and that it's printed in hex. Change that. Suggested-by: Sean Christopherson Signed-off-by: Vitaly Kuznetsov Signed-off-by: Paolo Bonzini --- tools/testing/selftests/kvm/lib/x86_64/processor.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tools/testing/selftests/kvm/lib/x86_64/processor.c b/tools/testing/selftests/kvm/lib/x86_64/processor.c index c53dbc6bc568..6698cb741e10 100644 --- a/tools/testing/selftests/kvm/lib/x86_64/processor.c +++ b/tools/testing/selftests/kvm/lib/x86_64/processor.c @@ -1085,7 +1085,7 @@ struct kvm_x86_state *vcpu_save_state(struct kvm_vm *vm, uint32_t vcpuid) for (i = 0; i < nmsrs; i++) state->msrs.entries[i].index = list->indices[i]; r = ioctl(vcpu->fd, KVM_GET_MSRS, &state->msrs); - TEST_ASSERT(r == nmsrs, "Unexpected result from KVM_GET_MSRS, r: %i (failed at %x)", + TEST_ASSERT(r == nmsrs, "Unexpected result from KVM_GET_MSRS, r: %i (failed MSR was 0x%x)", r, r == nmsrs ? -1 : list->indices[r]); r = ioctl(vcpu->fd, KVM_GET_DEBUGREGS, &state->debugregs); -- cgit v1.2.3 From 901045c3f0f43566f9a8a2f5716c2c0a497979e6 Mon Sep 17 00:00:00 2001 From: Daniele Ceraolo Spurio Date: Wed, 25 Sep 2019 15:21:21 -0700 Subject: drm/i915/huc: fix version parsing from CSS header The HuC FW has silently switched to encoding the version the same way as the GuC FW does, i.e. major.minor.patch instead of just major.minor. All the current blobs follow the new scheme, but since minor and patch are both zero there is no difference in the end results and we happily load them. New binaries, however, will have non-zero values in there, so we need to make sure to parse them correctly. Signed-off-by: Daniele Ceraolo Spurio Cc: Anusha Srivatsa Cc: Michal Wajdeczko Reviewed-by: Stuart Summers Acked-by: Michal Wajdeczko Link: https://patchwork.freedesktop.org/patch/msgid/20190925222121.4000-1-daniele.ceraolospurio@intel.com --- drivers/gpu/drm/i915/gt/uc/intel_uc_fw.c | 23 ++++------------------- drivers/gpu/drm/i915/gt/uc/intel_uc_fw_abi.h | 8 +++----- 2 files changed, 7 insertions(+), 24 deletions(-) diff --git a/drivers/gpu/drm/i915/gt/uc/intel_uc_fw.c b/drivers/gpu/drm/i915/gt/uc/intel_uc_fw.c index ea9a807abd4f..bb878119f06c 100644 --- a/drivers/gpu/drm/i915/gt/uc/intel_uc_fw.c +++ b/drivers/gpu/drm/i915/gt/uc/intel_uc_fw.c @@ -339,25 +339,10 @@ int intel_uc_fw_fetch(struct intel_uc_fw *uc_fw, struct drm_i915_private *i915) } /* Get version numbers from the CSS header */ - switch (uc_fw->type) { - case INTEL_UC_FW_TYPE_GUC: - uc_fw->major_ver_found = FIELD_GET(CSS_SW_VERSION_GUC_MAJOR, - css->sw_version); - uc_fw->minor_ver_found = FIELD_GET(CSS_SW_VERSION_GUC_MINOR, - css->sw_version); - break; - - case INTEL_UC_FW_TYPE_HUC: - uc_fw->major_ver_found = FIELD_GET(CSS_SW_VERSION_HUC_MAJOR, - css->sw_version); - uc_fw->minor_ver_found = FIELD_GET(CSS_SW_VERSION_HUC_MINOR, - css->sw_version); - break; - - default: - MISSING_CASE(uc_fw->type); - break; - } + uc_fw->major_ver_found = FIELD_GET(CSS_SW_VERSION_UC_MAJOR, + css->sw_version); + uc_fw->minor_ver_found = FIELD_GET(CSS_SW_VERSION_UC_MINOR, + css->sw_version); if (uc_fw->major_ver_found != uc_fw->major_ver_wanted || uc_fw->minor_ver_found < uc_fw->minor_ver_wanted) { diff --git a/drivers/gpu/drm/i915/gt/uc/intel_uc_fw_abi.h b/drivers/gpu/drm/i915/gt/uc/intel_uc_fw_abi.h index ae58e8a8c53b..f8f6c91a0df6 100644 --- a/drivers/gpu/drm/i915/gt/uc/intel_uc_fw_abi.h +++ b/drivers/gpu/drm/i915/gt/uc/intel_uc_fw_abi.h @@ -69,11 +69,9 @@ struct uc_css_header { char username[8]; char buildnumber[12]; u32 sw_version; -#define CSS_SW_VERSION_GUC_MAJOR (0xFF << 16) -#define CSS_SW_VERSION_GUC_MINOR (0xFF << 8) -#define CSS_SW_VERSION_GUC_PATCH (0xFF << 0) -#define CSS_SW_VERSION_HUC_MAJOR (0xFFFF << 16) -#define CSS_SW_VERSION_HUC_MINOR (0xFFFF << 0) +#define CSS_SW_VERSION_UC_MAJOR (0xFF << 16) +#define CSS_SW_VERSION_UC_MINOR (0xFF << 8) +#define CSS_SW_VERSION_UC_PATCH (0xFF << 0) u32 reserved[14]; u32 header_info; } __packed; -- cgit v1.2.3 From 283a4095af9dd95e5d6d9f3e7f8b66a4b9f84082 Mon Sep 17 00:00:00 2001 From: Anusha Srivatsa Date: Wed, 25 Sep 2019 13:12:50 -0700 Subject: drm/i915/dmc: Update ICL DMC version to v1.09 MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit We have a new version of DMC for ICL - v1.09. This version adds the Half Refresh Rate capability into DMC. Cc: José Roberto de Souza Signed-off-by: Anusha Srivatsa Reviewed-by: José Roberto de Souza Signed-off-by: Daniele Ceraolo Spurio Link: https://patchwork.freedesktop.org/patch/msgid/20190925201250.18136-1-daniele.ceraolospurio@intel.com --- drivers/gpu/drm/i915/intel_csr.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/gpu/drm/i915/intel_csr.c b/drivers/gpu/drm/i915/intel_csr.c index 546577e39b4e..09870a31b4f0 100644 --- a/drivers/gpu/drm/i915/intel_csr.c +++ b/drivers/gpu/drm/i915/intel_csr.c @@ -44,8 +44,8 @@ #define TGL_CSR_MAX_FW_SIZE 0x6000 MODULE_FIRMWARE(TGL_CSR_PATH); -#define ICL_CSR_PATH "i915/icl_dmc_ver1_07.bin" -#define ICL_CSR_VERSION_REQUIRED CSR_VERSION(1, 7) +#define ICL_CSR_PATH "i915/icl_dmc_ver1_09.bin" +#define ICL_CSR_VERSION_REQUIRED CSR_VERSION(1, 9) #define ICL_CSR_MAX_FW_SIZE 0x6000 MODULE_FIRMWARE(ICL_CSR_PATH); -- cgit v1.2.3 From 67b483dd03c4cd9e90e4c3943132dce514ea4e88 Mon Sep 17 00:00:00 2001 From: Sagi Grimberg Date: Tue, 24 Sep 2019 11:27:05 -0700 Subject: nvme-rdma: fix possible use-after-free in connect timeout If the connect times out, we may have already destroyed the queue in the timeout handler, so test if the queue is still allocated in the connect error handler. Reported-by: Yi Zhang Signed-off-by: Sagi Grimberg --- drivers/nvme/host/rdma.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/drivers/nvme/host/rdma.c b/drivers/nvme/host/rdma.c index 9d16dfc29368..4d280160dd3f 100644 --- a/drivers/nvme/host/rdma.c +++ b/drivers/nvme/host/rdma.c @@ -620,7 +620,8 @@ static int nvme_rdma_start_queue(struct nvme_rdma_ctrl *ctrl, int idx) if (!ret) { set_bit(NVME_RDMA_Q_LIVE, &queue->flags); } else { - __nvme_rdma_stop_queue(queue); + if (test_bit(NVME_RDMA_Q_ALLOCATED, &queue->flags)) + __nvme_rdma_stop_queue(queue); dev_info(ctrl->ctrl.device, "failed to connect queue: %d ret=%d\n", idx, ret); } -- cgit v1.2.3 From a12de1d42d74ef3c80e9fb9a2da94daaef747869 Mon Sep 17 00:00:00 2001 From: Ming Lei Date: Fri, 27 Sep 2019 15:24:30 +0800 Subject: blk-mq: honor IO scheduler for multiqueue devices MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit If a device is using multiple queues, the IO scheduler may be bypassed. This may hurt performance for some slow MQ devices, and it also breaks zoned devices which depend on mq-deadline for respecting the write order in one zone. Don't bypass io scheduler if we have one setup. This patch can double sequential write performance basically on MQ scsi_debug when mq-deadline is applied. Cc: Bart Van Assche Cc: Hannes Reinecke Cc: Dave Chinner Reviewed-by: Javier González Reviewed-by: Damien Le Moal Signed-off-by: Ming Lei Signed-off-by: Jens Axboe --- block/blk-mq.c | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/block/blk-mq.c b/block/blk-mq.c index 6e3b15f70cd7..5d4bef3c378d 100644 --- a/block/blk-mq.c +++ b/block/blk-mq.c @@ -2012,6 +2012,8 @@ static blk_qc_t blk_mq_make_request(struct request_queue *q, struct bio *bio) } blk_add_rq_to_plug(plug, rq); + } else if (q->elevator) { + blk_mq_sched_insert_request(rq, false, true, true); } else if (plug && !blk_queue_nomerges(q)) { /* * We do limited plugging. If the bio can be merged, do that. @@ -2035,8 +2037,8 @@ static blk_qc_t blk_mq_make_request(struct request_queue *q, struct bio *bio) blk_mq_try_issue_directly(data.hctx, same_queue_rq, &cookie); } - } else if ((q->nr_hw_queues > 1 && is_sync) || (!q->elevator && - !data.hctx->dispatch_busy)) { + } else if ((q->nr_hw_queues > 1 && is_sync) || + !data.hctx->dispatch_busy) { blk_mq_try_issue_directly(data.hctx, rq, &cookie); } else { blk_mq_sched_insert_request(rq, false, true, true); -- cgit v1.2.3 From 3b51be4e4061bd5e0f1b73f56cfecaa879c76d51 Mon Sep 17 00:00:00 2001 From: Clinton A Taylor Date: Thu, 26 Sep 2019 14:06:56 -0700 Subject: drm/i915/tc: Update DP_MODE programming MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit BSpec was updated(r146548) with a new MG_DP_MODE Programming table, now taking in consideration the pin assignment and allowing us to optimize power by shutting down available but not needed lanes. It was tested on ICL and TGL, with adaptors that used pin assignment C and B, reversing the connector and going to different modes testing the not needed lane shutdown. v5: Using crtc_state->lane_count instead of dp.lane_count BSpec: 21735 BSpec: 49292 Cc: Imre Deak Cc: Lucas De Marchi Reviewed-by: Imre Deak Signed-off-by: Clinton A Taylor Signed-off-by: José Roberto de Souza Link: https://patchwork.freedesktop.org/patch/msgid/20190926210659.56317-1-jose.souza@intel.com --- drivers/gpu/drm/i915/display/intel_ddi.c | 92 ++++++++++++++++++-------------- drivers/gpu/drm/i915/display/intel_tc.c | 15 ++++++ drivers/gpu/drm/i915/display/intel_tc.h | 1 + drivers/gpu/drm/i915/i915_reg.h | 5 ++ 4 files changed, 72 insertions(+), 41 deletions(-) diff --git a/drivers/gpu/drm/i915/display/intel_ddi.c b/drivers/gpu/drm/i915/display/intel_ddi.c index aa470c70a198..20393d39a29a 100644 --- a/drivers/gpu/drm/i915/display/intel_ddi.c +++ b/drivers/gpu/drm/i915/display/intel_ddi.c @@ -3091,11 +3091,14 @@ icl_phy_set_clock_gating(struct intel_digital_port *dig_port, bool enable) I915_WRITE(MG_MISC_SUS0(tc_port), val); } -static void icl_program_mg_dp_mode(struct intel_digital_port *intel_dig_port) +static void +icl_program_mg_dp_mode(struct intel_digital_port *intel_dig_port, + const struct intel_crtc_state *crtc_state) { struct drm_i915_private *dev_priv = to_i915(intel_dig_port->base.base.dev); enum port port = intel_dig_port->base.port; - u32 ln0, ln1, lane_mask; + u32 ln0, ln1, pin_assignment; + u8 width; if (intel_dig_port->tc_mode == TC_PORT_TBT_ALT) return; @@ -3103,50 +3106,57 @@ static void icl_program_mg_dp_mode(struct intel_digital_port *intel_dig_port) ln0 = I915_READ(MG_DP_MODE(0, port)); ln1 = I915_READ(MG_DP_MODE(1, port)); - switch (intel_dig_port->tc_mode) { - case TC_PORT_DP_ALT: - ln0 &= ~(MG_DP_MODE_CFG_DP_X1_MODE | MG_DP_MODE_CFG_DP_X2_MODE); - ln1 &= ~(MG_DP_MODE_CFG_DP_X1_MODE | MG_DP_MODE_CFG_DP_X2_MODE); + ln0 &= ~(MG_DP_MODE_CFG_DP_X1_MODE | MG_DP_MODE_CFG_DP_X1_MODE); + ln1 &= ~(MG_DP_MODE_CFG_DP_X1_MODE | MG_DP_MODE_CFG_DP_X2_MODE); - lane_mask = intel_tc_port_get_lane_mask(intel_dig_port); + /* DPPATC */ + pin_assignment = intel_tc_port_get_pin_assignment_mask(intel_dig_port); + width = crtc_state->lane_count; - switch (lane_mask) { - case 0x1: - case 0x4: - break; - case 0x2: + switch (pin_assignment) { + case 0x0: + WARN_ON(intel_dig_port->tc_mode != TC_PORT_LEGACY); + if (width == 1) { + ln1 |= MG_DP_MODE_CFG_DP_X1_MODE; + } else { + ln0 |= MG_DP_MODE_CFG_DP_X2_MODE; + ln1 |= MG_DP_MODE_CFG_DP_X2_MODE; + } + break; + case 0x1: + if (width == 4) { + ln0 |= MG_DP_MODE_CFG_DP_X2_MODE; + ln1 |= MG_DP_MODE_CFG_DP_X2_MODE; + } + break; + case 0x2: + if (width == 2) { + ln0 |= MG_DP_MODE_CFG_DP_X2_MODE; + ln1 |= MG_DP_MODE_CFG_DP_X2_MODE; + } + break; + case 0x3: + case 0x5: + if (width == 1) { ln0 |= MG_DP_MODE_CFG_DP_X1_MODE; - break; - case 0x3: - ln0 |= MG_DP_MODE_CFG_DP_X1_MODE | - MG_DP_MODE_CFG_DP_X2_MODE; - break; - case 0x8: ln1 |= MG_DP_MODE_CFG_DP_X1_MODE; - break; - case 0xC: - ln1 |= MG_DP_MODE_CFG_DP_X1_MODE | - MG_DP_MODE_CFG_DP_X2_MODE; - break; - case 0xF: - ln0 |= MG_DP_MODE_CFG_DP_X1_MODE | - MG_DP_MODE_CFG_DP_X2_MODE; - ln1 |= MG_DP_MODE_CFG_DP_X1_MODE | - MG_DP_MODE_CFG_DP_X2_MODE; - break; - default: - MISSING_CASE(lane_mask); + } else { + ln0 |= MG_DP_MODE_CFG_DP_X2_MODE; + ln1 |= MG_DP_MODE_CFG_DP_X2_MODE; } break; - - case TC_PORT_LEGACY: - ln0 |= MG_DP_MODE_CFG_DP_X1_MODE | MG_DP_MODE_CFG_DP_X2_MODE; - ln1 |= MG_DP_MODE_CFG_DP_X1_MODE | MG_DP_MODE_CFG_DP_X2_MODE; + case 0x4: + case 0x6: + if (width == 1) { + ln0 |= MG_DP_MODE_CFG_DP_X1_MODE; + ln1 |= MG_DP_MODE_CFG_DP_X1_MODE; + } else { + ln0 |= MG_DP_MODE_CFG_DP_X2_MODE; + ln1 |= MG_DP_MODE_CFG_DP_X2_MODE; + } break; - default: - MISSING_CASE(intel_dig_port->tc_mode); - return; + MISSING_CASE(pin_assignment); } I915_WRITE(MG_DP_MODE(0, port), ln0); @@ -3239,7 +3249,7 @@ static void tgl_ddi_pre_enable_dp(struct intel_encoder *encoder, dig_port->ddi_io_power_domain); /* 6. */ - icl_program_mg_dp_mode(dig_port); + icl_program_mg_dp_mode(dig_port, crtc_state); /* * 7.a - Steps in this function should only be executed over MST @@ -3321,7 +3331,7 @@ static void hsw_ddi_pre_enable_dp(struct intel_encoder *encoder, intel_display_power_get(dev_priv, dig_port->ddi_io_power_domain); - icl_program_mg_dp_mode(dig_port); + icl_program_mg_dp_mode(dig_port, crtc_state); icl_phy_set_clock_gating(dig_port, false); if (INTEL_GEN(dev_priv) >= 11) @@ -3391,7 +3401,7 @@ static void intel_ddi_pre_enable_hdmi(struct intel_encoder *encoder, intel_display_power_get(dev_priv, dig_port->ddi_io_power_domain); - icl_program_mg_dp_mode(dig_port); + icl_program_mg_dp_mode(dig_port, crtc_state); icl_phy_set_clock_gating(dig_port, false); if (INTEL_GEN(dev_priv) >= 11) diff --git a/drivers/gpu/drm/i915/display/intel_tc.c b/drivers/gpu/drm/i915/display/intel_tc.c index f923f9cbd33c..7773169b7331 100644 --- a/drivers/gpu/drm/i915/display/intel_tc.c +++ b/drivers/gpu/drm/i915/display/intel_tc.c @@ -67,6 +67,21 @@ u32 intel_tc_port_get_lane_mask(struct intel_digital_port *dig_port) return lane_mask >> DP_LANE_ASSIGNMENT_SHIFT(dig_port->tc_phy_fia_idx); } +u32 intel_tc_port_get_pin_assignment_mask(struct intel_digital_port *dig_port) +{ + struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev); + struct intel_uncore *uncore = &i915->uncore; + u32 pin_mask; + + pin_mask = intel_uncore_read(uncore, + PORT_TX_DFLEXPA1(dig_port->tc_phy_fia)); + + WARN_ON(pin_mask == 0xffffffff); + + return (pin_mask & DP_PIN_ASSIGNMENT_MASK(dig_port->tc_phy_fia_idx)) >> + DP_PIN_ASSIGNMENT_SHIFT(dig_port->tc_phy_fia_idx); +} + int intel_tc_port_fia_max_lane_count(struct intel_digital_port *dig_port) { struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev); diff --git a/drivers/gpu/drm/i915/display/intel_tc.h b/drivers/gpu/drm/i915/display/intel_tc.h index 783d75531435..463f1b3c836f 100644 --- a/drivers/gpu/drm/i915/display/intel_tc.h +++ b/drivers/gpu/drm/i915/display/intel_tc.h @@ -13,6 +13,7 @@ struct intel_digital_port; bool intel_tc_port_connected(struct intel_digital_port *dig_port); u32 intel_tc_port_get_lane_mask(struct intel_digital_port *dig_port); +u32 intel_tc_port_get_pin_assignment_mask(struct intel_digital_port *dig_port); int intel_tc_port_fia_max_lane_count(struct intel_digital_port *dig_port); void intel_tc_port_set_fia_lane_count(struct intel_digital_port *dig_port, int required_lanes); diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h index 5e30f1a14624..014fa5dbde83 100644 --- a/drivers/gpu/drm/i915/i915_reg.h +++ b/drivers/gpu/drm/i915/i915_reg.h @@ -11861,6 +11861,11 @@ enum skl_power_gate { #define PORT_TX_DFLEXDPCSSS(fia) _MMIO_FIA((fia), 0x00894) #define DP_PHY_MODE_STATUS_NOT_SAFE(idx) (1 << (idx)) +#define PORT_TX_DFLEXPA1(fia) _MMIO_FIA((fia), 0x00880) +#define DP_PIN_ASSIGNMENT_SHIFT(idx) ((idx) * 4) +#define DP_PIN_ASSIGNMENT_MASK(idx) (0xf << ((idx) * 4)) +#define DP_PIN_ASSIGNMENT(idx, x) ((x) << ((idx) * 4)) + /* This register controls the Display State Buffer (DSB) engines. */ #define _DSBSL_INSTANCE_BASE 0x70B00 #define DSBSL_INSTANCE(pipe, id) (_DSBSL_INSTANCE_BASE + \ -- cgit v1.2.3 From 978c3e539be210268b6c6cfeef0c97e18c00d7a7 Mon Sep 17 00:00:00 2001 From: Clinton A Taylor Date: Thu, 26 Sep 2019 14:06:57 -0700 Subject: drm/i915/tgl: Add dkl phy programming sequences MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Added DKL Phy sequences and helpers functions to program voltage swing, clock gating and dp mode. It is not written in DP enabling sequence but "PHY Clockgating programming" states that clock gating should be enabled after the link training but doing so causes all the following trainings to fail so not enabling it for. v2: Setting the right HIP_INDEX_REG bits (José) v3: Adding the meaning of each column of tgl_dkl_phy_ddi_translations Adding if gen >= 12 on intel_ddi_hdmi_level() and intel_ddi_pre_enable_hdmi() instead of reuse part of gen >= 11 if v4: Moved the DP_MODE lane programing to another patch as ICL also needed it Sharing icl_phy_set_clock_gating() and icl_program_mg_dp_mode() with TGL as bits and programing as now it almost identical to ICL BSpec: 49292 BSpec: 49190 Cc: Imre Deak Cc: Lucas De Marchi Reviewed-by: Imre Deak Signed-off-by: José Roberto de Souza Signed-off-by: Clinton A Taylor Link: https://patchwork.freedesktop.org/patch/msgid/20190926210659.56317-2-jose.souza@intel.com --- drivers/gpu/drm/i915/display/intel_ddi.c | 174 +++++++++++++++++++++++++++---- drivers/gpu/drm/i915/i915_reg.h | 8 -- 2 files changed, 154 insertions(+), 28 deletions(-) diff --git a/drivers/gpu/drm/i915/display/intel_ddi.c b/drivers/gpu/drm/i915/display/intel_ddi.c index 20393d39a29a..b463e51f8b45 100644 --- a/drivers/gpu/drm/i915/display/intel_ddi.c +++ b/drivers/gpu/drm/i915/display/intel_ddi.c @@ -586,6 +586,26 @@ static const struct icl_mg_phy_ddi_buf_trans icl_mg_phy_ddi_translations[] = { { 0x0, 0x00, 0x00 }, /* 3 0 */ }; +struct tgl_dkl_phy_ddi_buf_trans { + u32 dkl_vswing_control; + u32 dkl_preshoot_control; + u32 dkl_de_emphasis_control; +}; + +static const struct tgl_dkl_phy_ddi_buf_trans tgl_dkl_phy_ddi_translations[] = { + /* VS pre-emp Non-trans mV Pre-emph dB */ + { 0x7, 0x0, 0x00 }, /* 0 0 400mV 0 dB */ + { 0x5, 0x0, 0x03 }, /* 0 1 400mV 3.5 dB */ + { 0x2, 0x0, 0x0b }, /* 0 2 400mV 6 dB */ + { 0x0, 0x0, 0x19 }, /* 0 3 400mV 9.5 dB */ + { 0x5, 0x0, 0x00 }, /* 1 0 600mV 0 dB */ + { 0x2, 0x0, 0x03 }, /* 1 1 600mV 3.5 dB */ + { 0x0, 0x0, 0x14 }, /* 1 2 600mV 6 dB */ + { 0x2, 0x0, 0x00 }, /* 2 0 800mV 0 dB */ + { 0x0, 0x0, 0x0B }, /* 2 1 800mV 3.5 dB */ + { 0x0, 0x0, 0x00 }, /* 3 0 1200mV 0 dB HDMI default */ +}; + static const struct ddi_buf_trans * bdw_get_buf_trans_edp(struct drm_i915_private *dev_priv, int *n_entries) { @@ -872,7 +892,14 @@ static int intel_ddi_hdmi_level(struct drm_i915_private *dev_priv, enum port por level = dev_priv->vbt.ddi_port_info[port].hdmi_level_shift; - if (INTEL_GEN(dev_priv) >= 11) { + if (INTEL_GEN(dev_priv) >= 12) { + if (intel_phy_is_combo(dev_priv, phy)) + icl_get_combo_buf_trans(dev_priv, INTEL_OUTPUT_HDMI, + 0, &n_entries); + else + n_entries = ARRAY_SIZE(tgl_dkl_phy_ddi_translations); + default_entry = n_entries - 1; + } else if (INTEL_GEN(dev_priv) == 11) { if (intel_phy_is_combo(dev_priv, phy)) icl_get_combo_buf_trans(dev_priv, INTEL_OUTPUT_HDMI, 0, &n_entries); @@ -2334,7 +2361,13 @@ u8 intel_ddi_dp_voltage_max(struct intel_encoder *encoder) enum phy phy = intel_port_to_phy(dev_priv, port); int n_entries; - if (INTEL_GEN(dev_priv) >= 11) { + if (INTEL_GEN(dev_priv) >= 12) { + if (intel_phy_is_combo(dev_priv, phy)) + icl_get_combo_buf_trans(dev_priv, encoder->type, + intel_dp->link_rate, &n_entries); + else + n_entries = ARRAY_SIZE(tgl_dkl_phy_ddi_translations); + } else if (INTEL_GEN(dev_priv) == 11) { if (intel_phy_is_combo(dev_priv, phy)) icl_get_combo_buf_trans(dev_priv, encoder->type, intel_dp->link_rate, &n_entries); @@ -2776,6 +2809,62 @@ static void icl_ddi_vswing_sequence(struct intel_encoder *encoder, icl_mg_phy_ddi_vswing_sequence(encoder, link_clock, level); } +static void +tgl_dkl_phy_ddi_vswing_sequence(struct intel_encoder *encoder, int link_clock, + u32 level) +{ + struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); + enum tc_port tc_port = intel_port_to_tc(dev_priv, encoder->port); + const struct tgl_dkl_phy_ddi_buf_trans *ddi_translations; + u32 n_entries, val, ln, dpcnt_mask, dpcnt_val; + + n_entries = ARRAY_SIZE(tgl_dkl_phy_ddi_translations); + ddi_translations = tgl_dkl_phy_ddi_translations; + + if (level >= n_entries) + level = n_entries - 1; + + dpcnt_mask = (DKL_TX_PRESHOOT_COEFF_MASK | + DKL_TX_DE_EMPAHSIS_COEFF_MASK | + DKL_TX_VSWING_CONTROL_MASK); + dpcnt_val = DKL_TX_VSWING_CONTROL(ddi_translations[level].dkl_vswing_control); + dpcnt_val |= DKL_TX_DE_EMPHASIS_COEFF(ddi_translations[level].dkl_de_emphasis_control); + dpcnt_val |= DKL_TX_PRESHOOT_COEFF(ddi_translations[level].dkl_preshoot_control); + + for (ln = 0; ln < 2; ln++) { + I915_WRITE(HIP_INDEX_REG(tc_port), HIP_INDEX_VAL(tc_port, ln)); + + /* All the registers are RMW */ + val = I915_READ(DKL_TX_DPCNTL0(tc_port)); + val &= ~dpcnt_mask; + val |= dpcnt_val; + I915_WRITE(DKL_TX_DPCNTL0(tc_port), val); + + val = I915_READ(DKL_TX_DPCNTL1(tc_port)); + val &= ~dpcnt_mask; + val |= dpcnt_val; + I915_WRITE(DKL_TX_DPCNTL1(tc_port), val); + + val = I915_READ(DKL_TX_DPCNTL2(tc_port)); + val &= ~DKL_TX_DP20BITMODE; + I915_WRITE(DKL_TX_DPCNTL2(tc_port), val); + } +} + +static void tgl_ddi_vswing_sequence(struct intel_encoder *encoder, + int link_clock, + u32 level, + enum intel_output_type type) +{ + struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); + enum phy phy = intel_port_to_phy(dev_priv, encoder->port); + + if (intel_phy_is_combo(dev_priv, phy)) + icl_combo_phy_ddi_vswing_sequence(encoder, level, type); + else + tgl_dkl_phy_ddi_vswing_sequence(encoder, link_clock, level); +} + static u32 translate_signal_level(int signal_levels) { int i; @@ -2807,7 +2896,10 @@ u32 bxt_signal_levels(struct intel_dp *intel_dp) struct intel_encoder *encoder = &dport->base; int level = intel_ddi_dp_level(intel_dp); - if (INTEL_GEN(dev_priv) >= 11) + if (INTEL_GEN(dev_priv) >= 12) + tgl_ddi_vswing_sequence(encoder, intel_dp->link_rate, + level, encoder->type); + else if (INTEL_GEN(dev_priv) >= 11) icl_ddi_vswing_sequence(encoder, intel_dp->link_rate, level, encoder->type); else if (IS_CANNONLAKE(dev_priv)) @@ -3071,24 +3163,39 @@ icl_phy_set_clock_gating(struct intel_digital_port *dig_port, bool enable) MG_DP_MODE_CFG_GAONPWR_GATING; for (ln = 0; ln < 2; ln++) { - val = I915_READ(MG_DP_MODE(ln, port)); + if (INTEL_GEN(dev_priv) >= 12) { + I915_WRITE(HIP_INDEX_REG(tc_port), HIP_INDEX_VAL(tc_port, ln)); + val = I915_READ(DKL_DP_MODE(tc_port)); + } else { + val = I915_READ(MG_DP_MODE(ln, port)); + } + if (enable) val |= bits; else val &= ~bits; - I915_WRITE(MG_DP_MODE(ln, port), val); + + if (INTEL_GEN(dev_priv) >= 12) + I915_WRITE(DKL_DP_MODE(tc_port), val); + else + I915_WRITE(MG_DP_MODE(ln, port), val); } - bits = MG_MISC_SUS0_CFG_TR2PWR_GATING | MG_MISC_SUS0_CFG_CL2PWR_GATING | - MG_MISC_SUS0_CFG_GAONPWR_GATING | MG_MISC_SUS0_CFG_TRPWR_GATING | - MG_MISC_SUS0_CFG_CL1PWR_GATING | MG_MISC_SUS0_CFG_DGPWR_GATING; + if (INTEL_GEN(dev_priv) == 11) { + bits = MG_MISC_SUS0_CFG_TR2PWR_GATING | + MG_MISC_SUS0_CFG_CL2PWR_GATING | + MG_MISC_SUS0_CFG_GAONPWR_GATING | + MG_MISC_SUS0_CFG_TRPWR_GATING | + MG_MISC_SUS0_CFG_CL1PWR_GATING | + MG_MISC_SUS0_CFG_DGPWR_GATING; - val = I915_READ(MG_MISC_SUS0(tc_port)); - if (enable) - val |= (bits | MG_MISC_SUS0_SUSCLK_DYNCLKGATE_MODE(3)); - else - val &= ~(bits | MG_MISC_SUS0_SUSCLK_DYNCLKGATE_MODE_MASK); - I915_WRITE(MG_MISC_SUS0(tc_port), val); + val = I915_READ(MG_MISC_SUS0(tc_port)); + if (enable) + val |= (bits | MG_MISC_SUS0_SUSCLK_DYNCLKGATE_MODE(3)); + else + val &= ~(bits | MG_MISC_SUS0_SUSCLK_DYNCLKGATE_MODE_MASK); + I915_WRITE(MG_MISC_SUS0(tc_port), val); + } } static void @@ -3097,14 +3204,22 @@ icl_program_mg_dp_mode(struct intel_digital_port *intel_dig_port, { struct drm_i915_private *dev_priv = to_i915(intel_dig_port->base.base.dev); enum port port = intel_dig_port->base.port; + enum tc_port tc_port = intel_port_to_tc(dev_priv, port); u32 ln0, ln1, pin_assignment; u8 width; if (intel_dig_port->tc_mode == TC_PORT_TBT_ALT) return; - ln0 = I915_READ(MG_DP_MODE(0, port)); - ln1 = I915_READ(MG_DP_MODE(1, port)); + if (INTEL_GEN(dev_priv) >= 12) { + I915_WRITE(HIP_INDEX_REG(tc_port), HIP_INDEX_VAL(tc_port, 0x0)); + ln0 = I915_READ(DKL_DP_MODE(tc_port)); + I915_WRITE(HIP_INDEX_REG(tc_port), HIP_INDEX_VAL(tc_port, 0x1)); + ln1 = I915_READ(DKL_DP_MODE(tc_port)); + } else { + ln0 = I915_READ(MG_DP_MODE(0, port)); + ln1 = I915_READ(MG_DP_MODE(1, port)); + } ln0 &= ~(MG_DP_MODE_CFG_DP_X1_MODE | MG_DP_MODE_CFG_DP_X1_MODE); ln1 &= ~(MG_DP_MODE_CFG_DP_X1_MODE | MG_DP_MODE_CFG_DP_X2_MODE); @@ -3159,8 +3274,15 @@ icl_program_mg_dp_mode(struct intel_digital_port *intel_dig_port, MISSING_CASE(pin_assignment); } - I915_WRITE(MG_DP_MODE(0, port), ln0); - I915_WRITE(MG_DP_MODE(1, port), ln1); + if (INTEL_GEN(dev_priv) >= 12) { + I915_WRITE(HIP_INDEX_REG(tc_port), HIP_INDEX_VAL(tc_port, 0x0)); + I915_WRITE(DKL_DP_MODE(tc_port), ln0); + I915_WRITE(HIP_INDEX_REG(tc_port), HIP_INDEX_VAL(tc_port, 0x1)); + I915_WRITE(DKL_DP_MODE(tc_port), ln1); + } else { + I915_WRITE(MG_DP_MODE(0, port), ln0); + I915_WRITE(MG_DP_MODE(1, port), ln1); + } } static void intel_dp_sink_set_fec_ready(struct intel_dp *intel_dp, @@ -3265,7 +3387,7 @@ static void tgl_ddi_pre_enable_dp(struct intel_encoder *encoder, icl_phy_set_clock_gating(dig_port, false); /* 7.e */ - icl_ddi_vswing_sequence(encoder, crtc_state->port_clock, level, + tgl_ddi_vswing_sequence(encoder, crtc_state->port_clock, level, encoder->type); /* 7.f */ @@ -3297,6 +3419,15 @@ static void tgl_ddi_pre_enable_dp(struct intel_encoder *encoder, /* 7.k */ intel_dp_stop_link_train(intel_dp); + /* + * TODO: enable clock gating + * + * It is not written in DP enabling sequence but "PHY Clockgating + * programming" states that clock gating should be enabled after the + * link training but doing so causes all the following trainings to fail + * so not enabling it for now. + */ + /* 7.l */ intel_ddi_enable_fec(encoder, crtc_state); intel_dsc_enable(encoder, crtc_state); @@ -3404,7 +3535,10 @@ static void intel_ddi_pre_enable_hdmi(struct intel_encoder *encoder, icl_program_mg_dp_mode(dig_port, crtc_state); icl_phy_set_clock_gating(dig_port, false); - if (INTEL_GEN(dev_priv) >= 11) + if (INTEL_GEN(dev_priv) >= 12) + tgl_ddi_vswing_sequence(encoder, crtc_state->port_clock, + level, INTEL_OUTPUT_HDMI); + else if (INTEL_GEN(dev_priv) == 11) icl_ddi_vswing_sequence(encoder, crtc_state->port_clock, level, INTEL_OUTPUT_HDMI); else if (IS_CANNONLAKE(dev_priv)) diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h index 014fa5dbde83..058aa5ca8b73 100644 --- a/drivers/gpu/drm/i915/i915_reg.h +++ b/drivers/gpu/drm/i915/i915_reg.h @@ -10249,14 +10249,6 @@ enum skl_power_gate { _DKL_TX_DW18) #define _DKL_DP_MODE 0xA0 -#define DKL_DP_MODE_CFG_GAONPWR_GATING (1 << 1) -#define DKL_DP_MODE_CFG_DIGPWR_GATING (1 << 2) -#define DKL_DP_MODE_CFG_CLNPWR_GATING (1 << 3) -#define DKL_DP_MODE_CFG_TRPWR_GATING (1 << 4) -#define DKL_DP_MODE_CFG_TR2PWR_GATING (1 << 5) -#define DKL_DP_MODE_CFG_GATING_CTRL_MASK (0x1f << 1) -#define DKL_DP_MODE_CFG_DP_X1_MODE (1 << 6) -#define DKL_DP_MODE_CFG_DP_X2_MODE (1 << 7) #define DKL_DP_MODE(tc_port) _MMIO(_PORT(tc_port, \ _DKL_PHY1_BASE, \ _DKL_PHY2_BASE) + \ -- cgit v1.2.3 From a839136ca47f79dcdb403cd7a0c5f4798466ac48 Mon Sep 17 00:00:00 2001 From: José Roberto de Souza Date: Thu, 26 Sep 2019 14:06:58 -0700 Subject: drm/i915/tgl: Fix dkl link training MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Link training is failling when running link at 2.7GHz and 1.62GHz and following BSpec pll algorithm. Comparing the values calculated and the ones from the reference table it looks like MG_CLKTOP2_CORECLKCTL1_A_DIVRATIO should not always set to 5. For DP ports ICL mg pll algorithm sets it to 10 or 5 based on div2 value, that matches with dkl hardcoded table. So implementing this way as it proved to work in HW and leaving a comment so we know why it do not match BSpec. v4: Using the same is_dp check as ICL, need testing on HDMI over tc port Issue reported on BSpec 49204. Reviewed-by: Imre Deak Signed-off-by: José Roberto de Souza Link: https://patchwork.freedesktop.org/patch/msgid/20190926210659.56317-3-jose.souza@intel.com --- drivers/gpu/drm/i915/display/intel_dpll_mgr.c | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/drivers/gpu/drm/i915/display/intel_dpll_mgr.c b/drivers/gpu/drm/i915/display/intel_dpll_mgr.c index 69abafa45ce9..be69a2344294 100644 --- a/drivers/gpu/drm/i915/display/intel_dpll_mgr.c +++ b/drivers/gpu/drm/i915/display/intel_dpll_mgr.c @@ -2630,13 +2630,13 @@ static bool icl_mg_pll_find_divisors(int clock_khz, bool is_dp, bool use_ssc, continue; if (div2 >= 2) { - if (is_dkl) { - a_divratio = 5; - tlinedrv = 1; - } else { - a_divratio = is_dp ? 10 : 5; - tlinedrv = 2; - } + /* + * Note: a_divratio not matching TGL BSpec + * algorithm but matching hardcoded values and + * working on HW for DP alt-mode at least + */ + a_divratio = is_dp ? 10 : 5; + tlinedrv = is_dkl ? 1 : 2; } else { a_divratio = 5; tlinedrv = 0; -- cgit v1.2.3 From f663769a5eefbeb304a7b4749fdd28c24944d9a7 Mon Sep 17 00:00:00 2001 From: Lucas De Marchi Date: Thu, 26 Sep 2019 14:06:59 -0700 Subject: drm/i915/tgl: initialize TC and TBT ports MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Now that TC support was added, initialize DDIs. Reviewed-by: José Roberto de Souza Acked-by: Lucas De Marchi Signed-off-by: Lucas De Marchi Signed-off-by: José Roberto de Souza Link: https://patchwork.freedesktop.org/patch/msgid/20190926210659.56317-4-jose.souza@intel.com --- drivers/gpu/drm/i915/display/intel_display.c | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/drivers/gpu/drm/i915/display/intel_display.c b/drivers/gpu/drm/i915/display/intel_display.c index 8f125f1624bd..bbe088b9d057 100644 --- a/drivers/gpu/drm/i915/display/intel_display.c +++ b/drivers/gpu/drm/i915/display/intel_display.c @@ -15340,9 +15340,14 @@ static void intel_setup_outputs(struct drm_i915_private *dev_priv) return; if (INTEL_GEN(dev_priv) >= 12) { - /* TODO: initialize TC ports as well */ intel_ddi_init(dev_priv, PORT_A); intel_ddi_init(dev_priv, PORT_B); + intel_ddi_init(dev_priv, PORT_D); + intel_ddi_init(dev_priv, PORT_E); + intel_ddi_init(dev_priv, PORT_F); + intel_ddi_init(dev_priv, PORT_G); + intel_ddi_init(dev_priv, PORT_H); + intel_ddi_init(dev_priv, PORT_I); icl_dsi_init(dev_priv); } else if (IS_ELKHARTLAKE(dev_priv)) { intel_ddi_init(dev_priv, PORT_A); -- cgit v1.2.3 From 3154df262db52f1d27e01020e871f4345ec2f9a8 Mon Sep 17 00:00:00 2001 From: Ming Lei Date: Fri, 27 Sep 2019 15:24:31 +0800 Subject: blk-mq: apply normal plugging for HDD Some HDD drive may expose multiple hardware queues, such as MegraRaid. Let's apply the normal plugging for such devices because sequential IO may benefit a lot from plug merging. Cc: Bart Van Assche Cc: Hannes Reinecke Cc: Dave Chinner Reviewed-by: Damien Le Moal Signed-off-by: Ming Lei Signed-off-by: Jens Axboe --- block/blk-mq.c | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/block/blk-mq.c b/block/blk-mq.c index 5d4bef3c378d..ec791156e9cc 100644 --- a/block/blk-mq.c +++ b/block/blk-mq.c @@ -1992,10 +1992,14 @@ static blk_qc_t blk_mq_make_request(struct request_queue *q, struct bio *bio) /* bypass scheduler for flush rq */ blk_insert_flush(rq); blk_mq_run_hw_queue(data.hctx, true); - } else if (plug && (q->nr_hw_queues == 1 || q->mq_ops->commit_rqs)) { + } else if (plug && (q->nr_hw_queues == 1 || q->mq_ops->commit_rqs || + !blk_queue_nonrot(q))) { /* * Use plugging if we have a ->commit_rqs() hook as well, as * we know the driver uses bd->last in a smart fashion. + * + * Use normal plugging if this disk is slow HDD, as sequential + * IO may benefit a lot from plug merging. */ unsigned int request_count = plug->rq_count; struct request *last = NULL; -- cgit v1.2.3 From b178a3f68128383a1ad2988464f0e964712563f6 Mon Sep 17 00:00:00 2001 From: Matthew Auld Date: Fri, 27 Sep 2019 18:33:48 +0100 Subject: drm/i915: check for kernel_context Explosions during early driver init on the error path. Make sure we fail gracefully. [ 9547.672258] BUG: kernel NULL pointer dereference, address: 000000000000007c [ 9547.672288] #PF: supervisor read access in kernel mode [ 9547.672292] #PF: error_code(0x0000) - not-present page [ 9547.672296] PGD 8000000846b41067 P4D 8000000846b41067 PUD 797034067 PMD 0 [ 9547.672303] Oops: 0000 [#1] SMP PTI [ 9547.672307] CPU: 1 PID: 25634 Comm: i915_selftest Tainted: G U 5.3.0-rc8+ #73 [ 9547.672313] Hardware name: /NUC6i7KYB, BIOS KYSKLi70.86A.0050.2017.0831.1924 08/31/2017 [ 9547.672395] RIP: 0010:intel_context_unpin+0x9/0x100 [i915] [ 9547.672400] Code: 6b 60 00 e9 17 ff ff ff bd fc ff ff ff e9 7c ff ff ff 66 66 2e 0f 1f 84 00 00 00 00 00 0f 1f 40 00 0f 1f 44 00 00 41 54 55 53 <8b> 47 7c 83 f8 01 74 26 8d 48 ff f0 0f b1 4f 7c 48 8d 57 7c 75 05 [ 9547.672413] RSP: 0018:ffffae8ac24ff878 EFLAGS: 00010246 [ 9547.672417] RAX: ffff944a1b7842d0 RBX: ffff944a1b784000 RCX: ffff944a12dd6fa8 [ 9547.672422] RDX: ffff944a1b7842c0 RSI: ffff944a12dd5328 RDI: 0000000000000000 [ 9547.672428] RBP: 0000000000000000 R08: ffff944a11e5d840 R09: 0000000000000000 [ 9547.672433] R10: 0000000000000000 R11: 0000000000000000 R12: 0000000000000000 [ 9547.672438] R13: ffffffffc11aaf00 R14: 00000000ffffffe4 R15: ffff944a0e29bf38 [ 9547.672443] FS: 00007fc259b88ac0(0000) GS:ffff944a1f880000(0000) knlGS:0000000000000000 [ 9547.672449] CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033 [ 9547.672454] CR2: 000000000000007c CR3: 0000000853346003 CR4: 00000000003606e0 [ 9547.672459] DR0: 0000000000000000 DR1: 0000000000000000 DR2: 0000000000000000 [ 9547.672464] DR3: 0000000000000000 DR6: 00000000fffe0ff0 DR7: 0000000000000400 [ 9547.672469] Call Trace: [ 9547.672518] intel_engine_cleanup_common+0xe3/0x270 [i915] [ 9547.672567] execlists_destroy+0xe/0x30 [i915] [ 9547.672669] intel_engines_init+0x94/0xf0 [i915] [ 9547.672749] i915_gem_init+0x191/0x950 [i915] Signed-off-by: Matthew Auld Reviewed-by: Chris Wilson Signed-off-by: Chris Wilson Link: https://patchwork.freedesktop.org/patch/msgid/20190927173409.31175-2-matthew.auld@intel.com --- drivers/gpu/drm/i915/gt/intel_engine_cs.c | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/drivers/gpu/drm/i915/gt/intel_engine_cs.c b/drivers/gpu/drm/i915/gt/intel_engine_cs.c index f451d5076bde..80fd072ac719 100644 --- a/drivers/gpu/drm/i915/gt/intel_engine_cs.c +++ b/drivers/gpu/drm/i915/gt/intel_engine_cs.c @@ -820,8 +820,10 @@ void intel_engine_cleanup_common(struct intel_engine_cs *engine) if (engine->default_state) i915_gem_object_put(engine->default_state); - intel_context_unpin(engine->kernel_context); - intel_context_put(engine->kernel_context); + if (engine->kernel_context) { + intel_context_unpin(engine->kernel_context); + intel_context_put(engine->kernel_context); + } GEM_BUG_ON(!llist_empty(&engine->barrier_tasks)); intel_wa_list_free(&engine->ctx_wa_list); -- cgit v1.2.3 From a3f356b273f9b860cd28be1d4ec25cd2782cda0c Mon Sep 17 00:00:00 2001 From: Matthew Auld Date: Fri, 27 Sep 2019 18:33:49 +0100 Subject: drm/i915: simplify i915_gem_init_early i915_gem_init_early doesn't need to return anything. Signed-off-by: Matthew Auld Reviewed-by: Chris Wilson Signed-off-by: Chris Wilson Link: https://patchwork.freedesktop.org/patch/msgid/20190927173409.31175-3-matthew.auld@intel.com --- drivers/gpu/drm/i915/i915_drv.c | 5 +---- drivers/gpu/drm/i915/i915_drv.h | 2 +- drivers/gpu/drm/i915/i915_gem.c | 4 +--- 3 files changed, 3 insertions(+), 8 deletions(-) diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c index dd9613e45723..364f7d0b7e31 100644 --- a/drivers/gpu/drm/i915/i915_drv.c +++ b/drivers/gpu/drm/i915/i915_drv.c @@ -614,9 +614,7 @@ static int i915_driver_early_probe(struct drm_i915_private *dev_priv) intel_gt_init_early(&dev_priv->gt, dev_priv); - ret = i915_gem_init_early(dev_priv); - if (ret < 0) - goto err_gt; + i915_gem_init_early(dev_priv); /* This must be called before any calls to HAS_PCH_* */ intel_detect_pch(dev_priv); @@ -638,7 +636,6 @@ static int i915_driver_early_probe(struct drm_i915_private *dev_priv) err_gem: i915_gem_cleanup_early(dev_priv); -err_gt: intel_gt_driver_late_release(&dev_priv->gt); vlv_free_s0ix_state(dev_priv); err_workqueues: diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index c0f6c1f72b62..337d8306416a 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h @@ -2243,7 +2243,7 @@ int i915_getparam_ioctl(struct drm_device *dev, void *data, int i915_gem_init_userptr(struct drm_i915_private *dev_priv); void i915_gem_cleanup_userptr(struct drm_i915_private *dev_priv); void i915_gem_sanitize(struct drm_i915_private *i915); -int i915_gem_init_early(struct drm_i915_private *dev_priv); +void i915_gem_init_early(struct drm_i915_private *dev_priv); void i915_gem_cleanup_early(struct drm_i915_private *dev_priv); int i915_gem_freeze(struct drm_i915_private *dev_priv); int i915_gem_freeze_late(struct drm_i915_private *dev_priv); diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index e2897a666225..3d3fda4cae99 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c @@ -1533,7 +1533,7 @@ static void i915_gem_init__mm(struct drm_i915_private *i915) i915_gem_init__objects(i915); } -int i915_gem_init_early(struct drm_i915_private *dev_priv) +void i915_gem_init_early(struct drm_i915_private *dev_priv) { int err; @@ -1545,8 +1545,6 @@ int i915_gem_init_early(struct drm_i915_private *dev_priv) err = i915_gemfs_init(dev_priv); if (err) DRM_NOTE("Unable to create a private tmpfs mount, hugepage support will be disabled(%d).\n", err); - - return 0; } void i915_gem_cleanup_early(struct drm_i915_private *dev_priv) -- cgit v1.2.3 From 6402939ec86eaf226c8b8ae00ed983936b164908 Mon Sep 17 00:00:00 2001 From: Navid Emamdoost Date: Tue, 17 Sep 2019 17:47:12 -0500 Subject: ieee802154: ca8210: prevent memory leak In ca8210_probe the allocated pdata needs to be assigned to spi_device->dev.platform_data before calling ca8210_get_platform_data. Othrwise when ca8210_get_platform_data fails pdata cannot be released. Signed-off-by: Navid Emamdoost Link: https://lore.kernel.org/r/20190917224713.26371-1-navid.emamdoost@gmail.com Signed-off-by: Stefan Schmidt --- drivers/net/ieee802154/ca8210.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/net/ieee802154/ca8210.c b/drivers/net/ieee802154/ca8210.c index b188fce3f641..658b399ac9ea 100644 --- a/drivers/net/ieee802154/ca8210.c +++ b/drivers/net/ieee802154/ca8210.c @@ -3152,12 +3152,12 @@ static int ca8210_probe(struct spi_device *spi_device) goto error; } + priv->spi->dev.platform_data = pdata; ret = ca8210_get_platform_data(priv->spi, pdata); if (ret) { dev_crit(&spi_device->dev, "ca8210_get_platform_data failed\n"); goto error; } - priv->spi->dev.platform_data = pdata; ret = ca8210_dev_com_init(priv); if (ret) { -- cgit v1.2.3 From 42b899fb9a3fe40e7b59e8815247a34973729d9a Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Fri, 27 Sep 2019 22:06:46 +0100 Subject: drm/i915/selftests: Do not try to sanitize mock HW If we are mocking the device, skip trying to sanitize the pm HW state. Signed-off-by: Chris Wilson Cc: Andi Shyti Reviewed-by: Andi Shyti Link: https://patchwork.freedesktop.org/patch/msgid/20190927210646.29664-1-chris@chris-wilson.co.uk --- drivers/gpu/drm/i915/gt/intel_gt_pm.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/drivers/gpu/drm/i915/gt/intel_gt_pm.c b/drivers/gpu/drm/i915/gt/intel_gt_pm.c index 42f175d9b98c..29fa1dabbc2e 100644 --- a/drivers/gpu/drm/i915/gt/intel_gt_pm.c +++ b/drivers/gpu/drm/i915/gt/intel_gt_pm.c @@ -137,7 +137,8 @@ void intel_gt_sanitize(struct intel_gt *gt, bool force) void intel_gt_pm_disable(struct intel_gt *gt) { - intel_sanitize_gt_powersave(gt->i915); + if (!is_mock_gt(gt)) + intel_sanitize_gt_powersave(gt->i915); } void intel_gt_pm_fini(struct intel_gt *gt) -- cgit v1.2.3 From 260e6b712769d74304093a699796ebf134aef095 Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Fri, 27 Sep 2019 22:17:47 +0100 Subject: drm/i915: Pass intel_gt to has-reset? As we execute GPU resets on a gt/ basis, and use the intel_gt as the primary for all other reset functions, also use it for the has-reset? predicates. Gradually simplifying the churn of pointers. Signed-off-by: Chris Wilson Cc: Andi Shyti Reviewed-by: Andi Shyti Link: https://patchwork.freedesktop.org/patch/msgid/20190927211749.2181-1-chris@chris-wilson.co.uk --- drivers/gpu/drm/i915/display/intel_display.c | 2 +- drivers/gpu/drm/i915/gt/intel_reset.c | 21 +++++++++++++-------- drivers/gpu/drm/i915/gt/intel_reset.h | 5 ++--- drivers/gpu/drm/i915/gt/selftest_hangcheck.c | 12 ++++++------ drivers/gpu/drm/i915/gt/selftest_lrc.c | 2 +- drivers/gpu/drm/i915/gt/selftest_reset.c | 4 ++-- drivers/gpu/drm/i915/gt/selftest_workarounds.c | 8 ++++---- drivers/gpu/drm/i915/i915_getparam.c | 4 ++-- 8 files changed, 31 insertions(+), 27 deletions(-) diff --git a/drivers/gpu/drm/i915/display/intel_display.c b/drivers/gpu/drm/i915/display/intel_display.c index bbe088b9d057..f1328c08f4ad 100644 --- a/drivers/gpu/drm/i915/display/intel_display.c +++ b/drivers/gpu/drm/i915/display/intel_display.c @@ -4273,7 +4273,7 @@ __intel_display_resume(struct drm_device *dev, static bool gpu_reset_clobbers_display(struct drm_i915_private *dev_priv) { return (INTEL_INFO(dev_priv)->gpu_reset_clobbers_display && - intel_has_gpu_reset(dev_priv)); + intel_has_gpu_reset(&dev_priv->gt)); } void intel_prepare_reset(struct drm_i915_private *dev_priv) diff --git a/drivers/gpu/drm/i915/gt/intel_reset.c b/drivers/gpu/drm/i915/gt/intel_reset.c index d08226f5bea5..ea5cf3a28fbe 100644 --- a/drivers/gpu/drm/i915/gt/intel_reset.c +++ b/drivers/gpu/drm/i915/gt/intel_reset.c @@ -546,8 +546,10 @@ typedef int (*reset_func)(struct intel_gt *, intel_engine_mask_t engine_mask, unsigned int retry); -static reset_func intel_get_gpu_reset(struct drm_i915_private *i915) +static reset_func intel_get_gpu_reset(const struct intel_gt *gt) { + struct drm_i915_private *i915 = gt->i915; + if (INTEL_GEN(i915) >= 8) return gen8_reset_engines; else if (INTEL_GEN(i915) >= 6) @@ -571,7 +573,7 @@ int __intel_gt_reset(struct intel_gt *gt, intel_engine_mask_t engine_mask) int ret = -ETIMEDOUT; int retry; - reset = intel_get_gpu_reset(gt->i915); + reset = intel_get_gpu_reset(gt); if (!reset) return -ENODEV; @@ -591,17 +593,20 @@ int __intel_gt_reset(struct intel_gt *gt, intel_engine_mask_t engine_mask) return ret; } -bool intel_has_gpu_reset(struct drm_i915_private *i915) +bool intel_has_gpu_reset(const struct intel_gt *gt) { if (!i915_modparams.reset) return NULL; - return intel_get_gpu_reset(i915); + return intel_get_gpu_reset(gt); } -bool intel_has_reset_engine(struct drm_i915_private *i915) +bool intel_has_reset_engine(const struct intel_gt *gt) { - return INTEL_INFO(i915)->has_reset_engine && i915_modparams.reset >= 2; + if (i915_modparams.reset < 2) + return false; + + return INTEL_INFO(gt->i915)->has_reset_engine; } int intel_reset_guc(struct intel_gt *gt) @@ -958,7 +963,7 @@ void intel_gt_reset(struct intel_gt *gt, awake = reset_prepare(gt); - if (!intel_has_gpu_reset(gt->i915)) { + if (!intel_has_gpu_reset(gt)) { if (i915_modparams.reset) dev_err(gt->i915->drm.dev, "GPU reset not supported\n"); else @@ -1179,7 +1184,7 @@ void intel_gt_handle_error(struct intel_gt *gt, * Try engine reset when available. We fall back to full reset if * single reset fails. */ - if (intel_has_reset_engine(gt->i915) && !intel_gt_is_wedged(gt)) { + if (intel_has_reset_engine(gt) && !intel_gt_is_wedged(gt)) { for_each_engine_masked(engine, gt->i915, engine_mask, tmp) { BUILD_BUG_ON(I915_RESET_MODESET >= I915_RESET_ENGINE); if (test_and_set_bit(I915_RESET_ENGINE + engine->id, diff --git a/drivers/gpu/drm/i915/gt/intel_reset.h b/drivers/gpu/drm/i915/gt/intel_reset.h index 0b6ff1ee7f06..8e8d5f761166 100644 --- a/drivers/gpu/drm/i915/gt/intel_reset.h +++ b/drivers/gpu/drm/i915/gt/intel_reset.h @@ -14,7 +14,6 @@ #include "intel_engine_types.h" #include "intel_reset_types.h" -struct drm_i915_private; struct i915_request; struct intel_engine_cs; struct intel_gt; @@ -80,7 +79,7 @@ static inline bool __intel_reset_failed(const struct intel_reset *reset) return unlikely(test_bit(I915_WEDGED, &reset->flags)); } -bool intel_has_gpu_reset(struct drm_i915_private *i915); -bool intel_has_reset_engine(struct drm_i915_private *i915); +bool intel_has_gpu_reset(const struct intel_gt *gt); +bool intel_has_reset_engine(const struct intel_gt *gt); #endif /* I915_RESET_H */ diff --git a/drivers/gpu/drm/i915/gt/selftest_hangcheck.c b/drivers/gpu/drm/i915/gt/selftest_hangcheck.c index a0098fc35921..9c0c8441c22a 100644 --- a/drivers/gpu/drm/i915/gt/selftest_hangcheck.c +++ b/drivers/gpu/drm/i915/gt/selftest_hangcheck.c @@ -458,7 +458,7 @@ static int igt_reset_nop_engine(void *arg) /* Check that we can engine-reset during non-user portions */ - if (!intel_has_reset_engine(gt->i915)) + if (!intel_has_reset_engine(gt)) return 0; file = mock_file(gt->i915); @@ -559,7 +559,7 @@ static int __igt_reset_engine(struct intel_gt *gt, bool active) /* Check that we can issue an engine reset on an idle engine (no-op) */ - if (!intel_has_reset_engine(gt->i915)) + if (!intel_has_reset_engine(gt)) return 0; if (active) { @@ -791,7 +791,7 @@ static int __igt_reset_engines(struct intel_gt *gt, * with any other engine. */ - if (!intel_has_reset_engine(gt->i915)) + if (!intel_has_reset_engine(gt)) return 0; if (flags & TEST_ACTIVE) { @@ -1547,7 +1547,7 @@ static int igt_handle_error(void *arg) /* Check that we can issue a global GPU and engine reset */ - if (!intel_has_reset_engine(gt->i915)) + if (!intel_has_reset_engine(gt)) return 0; if (!engine || !intel_engine_can_store_dword(engine)) @@ -1689,7 +1689,7 @@ static int igt_reset_engines_atomic(void *arg) /* Check that the engines resets are usable from atomic context */ - if (!intel_has_reset_engine(gt->i915)) + if (!intel_has_reset_engine(gt)) return 0; if (USES_GUC_SUBMISSION(gt->i915)) @@ -1746,7 +1746,7 @@ int intel_hangcheck_live_selftests(struct drm_i915_private *i915) bool saved_hangcheck; int err; - if (!intel_has_gpu_reset(gt->i915)) + if (!intel_has_gpu_reset(gt)) return 0; if (intel_gt_is_wedged(gt)) diff --git a/drivers/gpu/drm/i915/gt/selftest_lrc.c b/drivers/gpu/drm/i915/gt/selftest_lrc.c index 22ea2e747064..93f2fcdc49bf 100644 --- a/drivers/gpu/drm/i915/gt/selftest_lrc.c +++ b/drivers/gpu/drm/i915/gt/selftest_lrc.c @@ -1310,7 +1310,7 @@ static int live_preempt_hang(void *arg) if (!HAS_LOGICAL_RING_PREEMPTION(i915)) return 0; - if (!intel_has_reset_engine(i915)) + if (!intel_has_reset_engine(&i915->gt)) return 0; mutex_lock(&i915->drm.struct_mutex); diff --git a/drivers/gpu/drm/i915/gt/selftest_reset.c b/drivers/gpu/drm/i915/gt/selftest_reset.c index 00a4f60cdfd5..d79482db7fe8 100644 --- a/drivers/gpu/drm/i915/gt/selftest_reset.c +++ b/drivers/gpu/drm/i915/gt/selftest_reset.c @@ -112,7 +112,7 @@ static int igt_atomic_engine_reset(void *arg) /* Check that the resets are usable from atomic context */ - if (!intel_has_reset_engine(gt->i915)) + if (!intel_has_reset_engine(gt)) return 0; if (USES_GUC_SUBMISSION(gt->i915)) @@ -170,7 +170,7 @@ int intel_reset_live_selftests(struct drm_i915_private *i915) }; struct intel_gt *gt = &i915->gt; - if (!intel_has_gpu_reset(gt->i915)) + if (!intel_has_gpu_reset(gt)) return 0; if (intel_gt_is_wedged(gt)) diff --git a/drivers/gpu/drm/i915/gt/selftest_workarounds.c b/drivers/gpu/drm/i915/gt/selftest_workarounds.c index 999a98f00494..d40ce0709bff 100644 --- a/drivers/gpu/drm/i915/gt/selftest_workarounds.c +++ b/drivers/gpu/drm/i915/gt/selftest_workarounds.c @@ -747,7 +747,7 @@ static int live_reset_whitelist(void *arg) igt_global_reset_lock(&i915->gt); - if (intel_has_reset_engine(i915)) { + if (intel_has_reset_engine(&i915->gt)) { err = check_whitelist_across_reset(engine, do_engine_reset, "engine"); @@ -755,7 +755,7 @@ static int live_reset_whitelist(void *arg) goto out; } - if (intel_has_gpu_reset(i915)) { + if (intel_has_gpu_reset(&i915->gt)) { err = check_whitelist_across_reset(engine, do_device_reset, "device"); @@ -1131,7 +1131,7 @@ live_gpu_reset_workarounds(void *arg) struct wa_lists lists; bool ok; - if (!intel_has_gpu_reset(i915)) + if (!intel_has_gpu_reset(&i915->gt)) return 0; ctx = kernel_context(i915); @@ -1178,7 +1178,7 @@ live_engine_reset_workarounds(void *arg) struct wa_lists lists; int ret = 0; - if (!intel_has_reset_engine(i915)) + if (!intel_has_reset_engine(&i915->gt)) return 0; ctx = kernel_context(i915); diff --git a/drivers/gpu/drm/i915/i915_getparam.c b/drivers/gpu/drm/i915/i915_getparam.c index 5d9101376a3d..f4b3cbb1adce 100644 --- a/drivers/gpu/drm/i915/i915_getparam.c +++ b/drivers/gpu/drm/i915/i915_getparam.c @@ -79,8 +79,8 @@ int i915_getparam_ioctl(struct drm_device *dev, void *data, break; case I915_PARAM_HAS_GPU_RESET: value = i915_modparams.enable_hangcheck && - intel_has_gpu_reset(i915); - if (value && intel_has_reset_engine(i915)) + intel_has_gpu_reset(&i915->gt); + if (value && intel_has_reset_engine(&i915->gt)) value = 2; break; case I915_PARAM_HAS_RESOURCE_STREAMER: -- cgit v1.2.3 From 4e18ca703f2167d8bcb7f736271744131e3c390b Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Fri, 27 Sep 2019 22:17:48 +0100 Subject: drm/i915/selftests: Distinguish mock device from no wakeref On systems that have no runtime-pm, we mark the wakeref as being -1. We therefore cannot use that value for the mock-gt indicator, so opt for -ENODEV instead. The wakeref should never be an error value -- one hopes! Signed-off-by: Chris Wilson Cc: Andi Shyti Reviewed-by: Andi Shyti Link: https://patchwork.freedesktop.org/patch/msgid/20190927211749.2181-2-chris@chris-wilson.co.uk --- drivers/gpu/drm/i915/gt/intel_gt_pm.h | 2 +- drivers/gpu/drm/i915/selftests/mock_gem_device.c | 3 +-- 2 files changed, 2 insertions(+), 3 deletions(-) diff --git a/drivers/gpu/drm/i915/gt/intel_gt_pm.h b/drivers/gpu/drm/i915/gt/intel_gt_pm.h index ab794e853356..997770d3a968 100644 --- a/drivers/gpu/drm/i915/gt/intel_gt_pm.h +++ b/drivers/gpu/drm/i915/gt/intel_gt_pm.h @@ -57,7 +57,7 @@ int intel_gt_runtime_resume(struct intel_gt *gt); static inline bool is_mock_gt(const struct intel_gt *gt) { - return I915_SELFTEST_ONLY(gt->awake == -1); + return I915_SELFTEST_ONLY(gt->awake == -ENODEV); } #endif /* INTEL_GT_PM_H */ diff --git a/drivers/gpu/drm/i915/selftests/mock_gem_device.c b/drivers/gpu/drm/i915/selftests/mock_gem_device.c index 91f15fa728cd..2448067822af 100644 --- a/drivers/gpu/drm/i915/selftests/mock_gem_device.c +++ b/drivers/gpu/drm/i915/selftests/mock_gem_device.c @@ -182,6 +182,7 @@ struct drm_i915_private *mock_gem_device(void) i915_gem_init__mm(i915); intel_gt_init_early(&i915->gt, i915); atomic_inc(&i915->gt.wakeref.count); /* disable; no hw support */ + i915->gt.awake = -ENODEV; i915->wq = alloc_ordered_workqueue("mock", 0); if (!i915->wq) @@ -192,8 +193,6 @@ struct drm_i915_private *mock_gem_device(void) INIT_DELAYED_WORK(&i915->gem.retire_work, mock_retire_work_handler); INIT_WORK(&i915->gem.idle_work, mock_idle_work_handler); - i915->gt.awake = -1; - intel_timelines_init(i915); mutex_lock(&i915->drm.struct_mutex); -- cgit v1.2.3 From 4abc6e7c917b1929d8fad2c2364b270cae5ed4ca Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Fri, 27 Sep 2019 22:17:49 +0100 Subject: drm/i915/selftests: Provide a mock GPU reset routine For those mock tests that may wish to pretend triggering a GPU reset and processing the cleanup. Signed-off-by: Chris Wilson Cc: Andi Shyti Reviewed-by: Andi Shyti Link: https://patchwork.freedesktop.org/patch/msgid/20190927211749.2181-3-chris@chris-wilson.co.uk --- drivers/gpu/drm/i915/gt/intel_reset.c | 11 ++++++++++- 1 file changed, 10 insertions(+), 1 deletion(-) diff --git a/drivers/gpu/drm/i915/gt/intel_reset.c b/drivers/gpu/drm/i915/gt/intel_reset.c index ea5cf3a28fbe..76938fa3a1b9 100644 --- a/drivers/gpu/drm/i915/gt/intel_reset.c +++ b/drivers/gpu/drm/i915/gt/intel_reset.c @@ -542,6 +542,13 @@ skip_reset: return ret; } +static int mock_reset(struct intel_gt *gt, + intel_engine_mask_t mask, + unsigned int retry) +{ + return 0; +} + typedef int (*reset_func)(struct intel_gt *, intel_engine_mask_t engine_mask, unsigned int retry); @@ -550,7 +557,9 @@ static reset_func intel_get_gpu_reset(const struct intel_gt *gt) { struct drm_i915_private *i915 = gt->i915; - if (INTEL_GEN(i915) >= 8) + if (is_mock_gt(gt)) + return mock_reset; + else if (INTEL_GEN(i915) >= 8) return gen8_reset_engines; else if (INTEL_GEN(i915) >= 6) return gen6_reset_engines; -- cgit v1.2.3 From 55d554f5d14071f7c2c5dbd88d0a2eb695c97d16 Mon Sep 17 00:00:00 2001 From: Shuah Khan Date: Thu, 26 Sep 2019 19:13:44 -0600 Subject: tools: bpf: Use !building_out_of_srctree to determine srctree make TARGETS=bpf kselftest fails with: Makefile:127: tools/build/Makefile.include: No such file or directory When the bpf tool make is invoked from tools Makefile, srctree is cleared and the current logic check for srctree equals to empty string to determine srctree location from CURDIR. When the build in invoked from selftests/bpf Makefile, the srctree is set to "." and the same logic used for srctree equals to empty is needed to determine srctree. Check building_out_of_srctree undefined as the condition for both cases to fix "make TARGETS=bpf kselftest" build failure. Signed-off-by: Shuah Khan Signed-off-by: Daniel Borkmann Acked-by: Song Liu Link: https://lore.kernel.org/bpf/20190927011344.4695-1-skhan@linuxfoundation.org --- tools/bpf/Makefile | 6 +++++- tools/lib/bpf/Makefile | 6 +++++- 2 files changed, 10 insertions(+), 2 deletions(-) diff --git a/tools/bpf/Makefile b/tools/bpf/Makefile index fbf5e4a0cb9c..5d1995fd369c 100644 --- a/tools/bpf/Makefile +++ b/tools/bpf/Makefile @@ -12,7 +12,11 @@ INSTALL ?= install CFLAGS += -Wall -O2 CFLAGS += -D__EXPORTED_HEADERS__ -I$(srctree)/include/uapi -I$(srctree)/include -ifeq ($(srctree),) +# This will work when bpf is built in tools env. where srctree +# isn't set and when invoked from selftests build, where srctree +# is set to ".". building_out_of_srctree is undefined for in srctree +# builds +ifndef building_out_of_srctree srctree := $(patsubst %/,%,$(dir $(CURDIR))) srctree := $(patsubst %/,%,$(dir $(srctree))) endif diff --git a/tools/lib/bpf/Makefile b/tools/lib/bpf/Makefile index c6f94cffe06e..20772663d3e1 100644 --- a/tools/lib/bpf/Makefile +++ b/tools/lib/bpf/Makefile @@ -8,7 +8,11 @@ LIBBPF_MAJOR_VERSION := $(firstword $(subst ., ,$(LIBBPF_VERSION))) MAKEFLAGS += --no-print-directory -ifeq ($(srctree),) +# This will work when bpf is built in tools env. where srctree +# isn't set and when invoked from selftests build, where srctree +# is a ".". building_out_of_srctree is undefined for in srctree +# builds +ifndef building_out_of_srctree srctree := $(patsubst %/,%,$(dir $(CURDIR))) srctree := $(patsubst %/,%,$(dir $(srctree))) srctree := $(patsubst %/,%,$(dir $(srctree))) -- cgit v1.2.3 From d7d44b6fe40a98e960be92ea8617954c2596d140 Mon Sep 17 00:00:00 2001 From: Arnd Bergmann Date: Mon, 9 Sep 2019 22:33:57 +0200 Subject: drm/tilcdc: include linux/pinctrl/consumer.h again This was apparently dropped by accident in a recent cleanup, causing a build failure in some configurations now: drivers/gpu/drm/tilcdc/tilcdc_tfp410.c:296:12: error: implicit declaration of function 'devm_pinctrl_get_select_default' [-Werror,-Wimplicit-function-declaration] Fixes: fcb57664172e ("drm/tilcdc: drop use of drmP.h") Signed-off-by: Arnd Bergmann Reviewed-by: Laurent Pinchart Signed-off-by: Jyri Sarha Link: https://patchwork.freedesktop.org/patch/msgid/02b03f74cf941f52a941a36bdc8dabb4a69fd87e.1569852588.git.jsarha@ti.com Acked-by: Sam Ravnborg --- drivers/gpu/drm/tilcdc/tilcdc_tfp410.c | 1 + 1 file changed, 1 insertion(+) diff --git a/drivers/gpu/drm/tilcdc/tilcdc_tfp410.c b/drivers/gpu/drm/tilcdc/tilcdc_tfp410.c index 525dc1c0f1c1..530edb3b51cc 100644 --- a/drivers/gpu/drm/tilcdc/tilcdc_tfp410.c +++ b/drivers/gpu/drm/tilcdc/tilcdc_tfp410.c @@ -7,6 +7,7 @@ #include #include #include +#include #include #include -- cgit v1.2.3 From f90ec6cdf674248dcad85bf9af6e064bf472b841 Mon Sep 17 00:00:00 2001 From: Peter Ujfalusi Date: Mon, 30 Sep 2019 11:54:50 +0300 Subject: ARM: dts: am4372: Set memory bandwidth limit for DISPC Set memory bandwidth limit to filter out resolutions above 720p@60Hz to avoid underflow errors due to the bandwidth needs of higher resolutions. am43xx can not provide enough bandwidth to DISPC to correctly handle 'high' resolutions. Signed-off-by: Peter Ujfalusi Signed-off-by: Tomi Valkeinen Signed-off-by: Tony Lindgren --- arch/arm/boot/dts/am4372.dtsi | 2 ++ 1 file changed, 2 insertions(+) diff --git a/arch/arm/boot/dts/am4372.dtsi b/arch/arm/boot/dts/am4372.dtsi index 848e2a8884e2..14bbc438055f 100644 --- a/arch/arm/boot/dts/am4372.dtsi +++ b/arch/arm/boot/dts/am4372.dtsi @@ -337,6 +337,8 @@ ti,hwmods = "dss_dispc"; clocks = <&disp_clk>; clock-names = "fck"; + + max-memory-bandwidth = <230000000>; }; rfbi: rfbi@4832a800 { -- cgit v1.2.3 From 50d16d44cce45f172bc4b27dcd353b1107aa7429 Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Mon, 30 Sep 2019 15:49:19 +0100 Subject: drm/i915/selftests: Exercise context switching in parallel We currently test context switching on each engine as a basic stress test (just verifying that nothing explodes if we execute 2 requests from different contexts sequentially). What we have not tested is what happens if we try and do so on all available engines simultaneously, putting our SW and the HW under the maximal stress. v2: Clone the set of engines from the first context into the secondary contexts. Signed-off-by: Chris Wilson Cc: Mika Kuoppala Cc: Tvrtko Ursulin Reviewed-by: Tvrtko Ursulin Link: https://patchwork.freedesktop.org/patch/msgid/20190930144919.27992-1-chris@chris-wilson.co.uk --- .../gpu/drm/i915/gem/selftests/i915_gem_context.c | 225 +++++++++++++++++++++ 1 file changed, 225 insertions(+) diff --git a/drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c b/drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c index dc25bcc3e372..0f4d0644a480 100644 --- a/drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c +++ b/drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c @@ -156,6 +156,230 @@ out_unlock: return err; } +struct parallel_switch { + struct task_struct *tsk; + struct intel_context *ce[2]; +}; + +static int __live_parallel_switch1(void *data) +{ + struct parallel_switch *arg = data; + struct drm_i915_private *i915 = arg->ce[0]->engine->i915; + IGT_TIMEOUT(end_time); + unsigned long count; + + count = 0; + do { + struct i915_request *rq = NULL; + int err, n; + + for (n = 0; n < ARRAY_SIZE(arg->ce); n++) { + i915_request_put(rq); + + mutex_lock(&i915->drm.struct_mutex); + rq = i915_request_create(arg->ce[n]); + if (IS_ERR(rq)) { + mutex_unlock(&i915->drm.struct_mutex); + return PTR_ERR(rq); + } + + i915_request_get(rq); + i915_request_add(rq); + mutex_unlock(&i915->drm.struct_mutex); + } + + err = 0; + if (i915_request_wait(rq, 0, HZ / 5) < 0) + err = -ETIME; + i915_request_put(rq); + if (err) + return err; + + count++; + } while (!__igt_timeout(end_time, NULL)); + + pr_info("%s: %lu switches (sync)\n", arg->ce[0]->engine->name, count); + return 0; +} + +static int __live_parallel_switchN(void *data) +{ + struct parallel_switch *arg = data; + struct drm_i915_private *i915 = arg->ce[0]->engine->i915; + IGT_TIMEOUT(end_time); + unsigned long count; + int n; + + count = 0; + do { + for (n = 0; n < ARRAY_SIZE(arg->ce); n++) { + struct i915_request *rq; + + mutex_lock(&i915->drm.struct_mutex); + rq = i915_request_create(arg->ce[n]); + if (IS_ERR(rq)) { + mutex_unlock(&i915->drm.struct_mutex); + return PTR_ERR(rq); + } + + i915_request_add(rq); + mutex_unlock(&i915->drm.struct_mutex); + } + + count++; + } while (!__igt_timeout(end_time, NULL)); + + pr_info("%s: %lu switches (many)\n", arg->ce[0]->engine->name, count); + return 0; +} + +static int live_parallel_switch(void *arg) +{ + struct drm_i915_private *i915 = arg; + static int (* const func[])(void *arg) = { + __live_parallel_switch1, + __live_parallel_switchN, + NULL, + }; + struct parallel_switch *data = NULL; + struct i915_gem_engines *engines; + struct i915_gem_engines_iter it; + int (* const *fn)(void *arg); + struct i915_gem_context *ctx; + struct intel_context *ce; + struct drm_file *file; + int n, m, count; + int err = 0; + + /* + * Check we can process switches on all engines simultaneously. + */ + + if (!DRIVER_CAPS(i915)->has_logical_contexts) + return 0; + + file = mock_file(i915); + if (IS_ERR(file)) + return PTR_ERR(file); + + mutex_lock(&i915->drm.struct_mutex); + + ctx = live_context(i915, file); + if (IS_ERR(ctx)) { + err = PTR_ERR(ctx); + goto out_locked; + } + + engines = i915_gem_context_lock_engines(ctx); + count = engines->num_engines; + + data = kcalloc(count, sizeof(*data), GFP_KERNEL); + if (!data) { + i915_gem_context_unlock_engines(ctx); + err = -ENOMEM; + goto out_locked; + } + + m = 0; /* Use the first context as our template for the engines */ + for_each_gem_engine(ce, engines, it) { + err = intel_context_pin(ce); + if (err) { + i915_gem_context_unlock_engines(ctx); + goto out_locked; + } + data[m++].ce[0] = intel_context_get(ce); + } + i915_gem_context_unlock_engines(ctx); + + /* Clone the same set of engines into the other contexts */ + for (n = 1; n < ARRAY_SIZE(data->ce); n++) { + ctx = live_context(i915, file); + if (IS_ERR(ctx)) { + err = PTR_ERR(ctx); + goto out_locked; + } + + for (m = 0; m < count; m++) { + if (!data[m].ce[0]) + continue; + + ce = intel_context_create(ctx, data[m].ce[0]->engine); + if (IS_ERR(ce)) + goto out_locked; + + err = intel_context_pin(ce); + if (err) { + intel_context_put(ce); + goto out_locked; + } + + data[m].ce[n] = ce; + } + } + + mutex_unlock(&i915->drm.struct_mutex); + + for (fn = func; !err && *fn; fn++) { + struct igt_live_test t; + int n; + + mutex_lock(&i915->drm.struct_mutex); + err = igt_live_test_begin(&t, i915, __func__, ""); + mutex_unlock(&i915->drm.struct_mutex); + if (err) + break; + + for (n = 0; n < count; n++) { + if (!data[n].ce[0]) + continue; + + data[n].tsk = kthread_run(*fn, &data[n], + "igt/parallel:%s", + data[n].ce[0]->engine->name); + if (IS_ERR(data[n].tsk)) { + err = PTR_ERR(data[n].tsk); + break; + } + get_task_struct(data[n].tsk); + } + + for (n = 0; n < count; n++) { + int status; + + if (IS_ERR_OR_NULL(data[n].tsk)) + continue; + + status = kthread_stop(data[n].tsk); + if (status && !err) + err = status; + + put_task_struct(data[n].tsk); + data[n].tsk = NULL; + } + + mutex_lock(&i915->drm.struct_mutex); + if (igt_live_test_end(&t)) + err = -EIO; + mutex_unlock(&i915->drm.struct_mutex); + } + + mutex_lock(&i915->drm.struct_mutex); +out_locked: + for (n = 0; n < count; n++) { + for (m = 0; m < ARRAY_SIZE(data->ce); m++) { + if (!data[n].ce[m]) + continue; + + intel_context_unpin(data[n].ce[m]); + intel_context_put(data[n].ce[m]); + } + } + mutex_unlock(&i915->drm.struct_mutex); + kfree(data); + mock_file_free(i915, file); + return err; +} + static unsigned long real_page_count(struct drm_i915_gem_object *obj) { return huge_gem_object_phys_size(obj) >> PAGE_SHIFT; @@ -1681,6 +1905,7 @@ int i915_gem_context_live_selftests(struct drm_i915_private *i915) { static const struct i915_subtest tests[] = { SUBTEST(live_nop_switch), + SUBTEST(live_parallel_switch), SUBTEST(igt_ctx_exec), SUBTEST(igt_ctx_readonly), SUBTEST(igt_ctx_sseu), -- cgit v1.2.3 From 1d6f1d16d3a36351a6718378790497f1ba3b5378 Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Fri, 27 Sep 2019 17:03:35 +0100 Subject: drm/i915/gt: Only unwedge if we can reset first MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Unwedging the GPU requires a successful GPU reset before we restore the default submission, or else we may see residual context switch events that we were not expecting. v2: Pull in the special-case reset_clobbers_display, and explain why it should be safe in the context of unwedging. v3: Just forget all about resets before unwedging if it will clobber the display; risk it all. Reported-by: Janusz Krzysztofik Signed-off-by: Chris Wilson Cc: Janusz Krzysztofik Cc: Daniele Ceraolo Spurio Cc: Ville Syrjälä Reviewed-by: Daniele Ceraolo Spurio #v1 Link: https://patchwork.freedesktop.org/patch/msgid/20190927160335.10622-1-chris@chris-wilson.co.uk --- drivers/gpu/drm/i915/gt/intel_reset.c | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) diff --git a/drivers/gpu/drm/i915/gt/intel_reset.c b/drivers/gpu/drm/i915/gt/intel_reset.c index 76938fa3a1b9..e189897e8797 100644 --- a/drivers/gpu/drm/i915/gt/intel_reset.c +++ b/drivers/gpu/drm/i915/gt/intel_reset.c @@ -821,6 +821,7 @@ static bool __intel_gt_unset_wedged(struct intel_gt *gt) struct intel_gt_timelines *timelines = >->timelines; struct intel_timeline *tl; unsigned long flags; + bool ok; if (!test_bit(I915_WEDGED, >->reset.flags)) return true; @@ -867,7 +868,12 @@ static bool __intel_gt_unset_wedged(struct intel_gt *gt) } spin_unlock_irqrestore(&timelines->lock, flags); - intel_gt_sanitize(gt, false); + /* We must reset pending GPU events before restoring our submission */ + ok = !HAS_EXECLISTS(gt->i915); /* XXX better agnosticism desired */ + if (!INTEL_INFO(gt->i915)->gpu_reset_clobbers_display) + ok = __intel_gt_reset(gt, ALL_ENGINES) == 0; + if (!ok) + return false; /* * Undo nop_submit_request. We prevent all new i915 requests from -- cgit v1.2.3 From 833b45de69a6016c4b0cebe6765d526a31a81580 Mon Sep 17 00:00:00 2001 From: Paolo Bonzini Date: Mon, 30 Sep 2019 18:48:44 +0200 Subject: kvm: x86, powerpc: do not allow clearing largepages debugfs entry The largepages debugfs entry is incremented/decremented as shadow pages are created or destroyed. Clearing it will result in an underflow, which is harmless to KVM but ugly (and could be misinterpreted by tools that use debugfs information), so make this particular statistic read-only. Cc: kvm-ppc@vger.kernel.org Signed-off-by: Paolo Bonzini --- arch/powerpc/kvm/book3s.c | 8 ++++---- arch/x86/kvm/x86.c | 6 +++--- include/linux/kvm_host.h | 2 ++ virt/kvm/kvm_main.c | 10 +++++++--- 4 files changed, 16 insertions(+), 10 deletions(-) diff --git a/arch/powerpc/kvm/book3s.c b/arch/powerpc/kvm/book3s.c index d7fcdfa7fee4..ec2547cc5ecb 100644 --- a/arch/powerpc/kvm/book3s.c +++ b/arch/powerpc/kvm/book3s.c @@ -36,8 +36,8 @@ #include "book3s.h" #include "trace.h" -#define VM_STAT(x) offsetof(struct kvm, stat.x), KVM_STAT_VM -#define VCPU_STAT(x) offsetof(struct kvm_vcpu, stat.x), KVM_STAT_VCPU +#define VM_STAT(x, ...) offsetof(struct kvm, stat.x), KVM_STAT_VM, ## __VA_ARGS__ +#define VCPU_STAT(x, ...) offsetof(struct kvm_vcpu, stat.x), KVM_STAT_VCPU, ## __VA_ARGS__ /* #define EXIT_DEBUG */ @@ -69,8 +69,8 @@ struct kvm_stats_debugfs_item debugfs_entries[] = { { "pthru_all", VCPU_STAT(pthru_all) }, { "pthru_host", VCPU_STAT(pthru_host) }, { "pthru_bad_aff", VCPU_STAT(pthru_bad_aff) }, - { "largepages_2M", VM_STAT(num_2M_pages) }, - { "largepages_1G", VM_STAT(num_1G_pages) }, + { "largepages_2M", VM_STAT(num_2M_pages, .mode = 0444) }, + { "largepages_1G", VM_STAT(num_1G_pages, .mode = 0444) }, { NULL } }; diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index 180c7e88577a..8072acaaf028 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c @@ -92,8 +92,8 @@ u64 __read_mostly efer_reserved_bits = ~((u64)(EFER_SCE | EFER_LME | EFER_LMA)); static u64 __read_mostly efer_reserved_bits = ~((u64)EFER_SCE); #endif -#define VM_STAT(x) offsetof(struct kvm, stat.x), KVM_STAT_VM -#define VCPU_STAT(x) offsetof(struct kvm_vcpu, stat.x), KVM_STAT_VCPU +#define VM_STAT(x, ...) offsetof(struct kvm, stat.x), KVM_STAT_VM, ## __VA_ARGS__ +#define VCPU_STAT(x, ...) offsetof(struct kvm_vcpu, stat.x), KVM_STAT_VCPU, ## __VA_ARGS__ #define KVM_X2APIC_API_VALID_FLAGS (KVM_X2APIC_API_USE_32BIT_IDS | \ KVM_X2APIC_API_DISABLE_BROADCAST_QUIRK) @@ -212,7 +212,7 @@ struct kvm_stats_debugfs_item debugfs_entries[] = { { "mmu_cache_miss", VM_STAT(mmu_cache_miss) }, { "mmu_unsync", VM_STAT(mmu_unsync) }, { "remote_tlb_flush", VM_STAT(remote_tlb_flush) }, - { "largepages", VM_STAT(lpages) }, + { "largepages", VM_STAT(lpages, .mode = 0444) }, { "max_mmu_page_hash_collisions", VM_STAT(max_mmu_page_hash_collisions) }, { NULL } diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h index fcb46b3374c6..719fc3e15ea4 100644 --- a/include/linux/kvm_host.h +++ b/include/linux/kvm_host.h @@ -1090,6 +1090,7 @@ enum kvm_stat_kind { struct kvm_stat_data { int offset; + int mode; struct kvm *kvm; }; @@ -1097,6 +1098,7 @@ struct kvm_stats_debugfs_item { const char *name; int offset; enum kvm_stat_kind kind; + int mode; }; extern struct kvm_stats_debugfs_item debugfs_entries[]; extern struct dentry *kvm_debugfs_dir; diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c index e6de3159e682..fd68fbe0a75d 100644 --- a/virt/kvm/kvm_main.c +++ b/virt/kvm/kvm_main.c @@ -617,8 +617,9 @@ static int kvm_create_vm_debugfs(struct kvm *kvm, int fd) stat_data->kvm = kvm; stat_data->offset = p->offset; + stat_data->mode = p->mode ? p->mode : 0644; kvm->debugfs_stat_data[p - debugfs_entries] = stat_data; - debugfs_create_file(p->name, 0644, kvm->debugfs_dentry, + debugfs_create_file(p->name, stat_data->mode, kvm->debugfs_dentry, stat_data, stat_fops_per_vm[p->kind]); } return 0; @@ -3929,7 +3930,9 @@ static int kvm_debugfs_open(struct inode *inode, struct file *file, if (!refcount_inc_not_zero(&stat_data->kvm->users_count)) return -ENOENT; - if (simple_attr_open(inode, file, get, set, fmt)) { + if (simple_attr_open(inode, file, get, + stat_data->mode & S_IWUGO ? set : NULL, + fmt)) { kvm_put_kvm(stat_data->kvm); return -ENOMEM; } @@ -4177,7 +4180,8 @@ static void kvm_init_debug(void) kvm_debugfs_num_entries = 0; for (p = debugfs_entries; p->name; ++p, kvm_debugfs_num_entries++) { - debugfs_create_file(p->name, 0644, kvm_debugfs_dir, + int mode = p->mode ? p->mode : 0644; + debugfs_create_file(p->name, mode, kvm_debugfs_dir, (void *)(long)p->offset, stat_fops[p->kind]); } -- cgit v1.2.3 From 7ae6d93c8f052b7a77ba56ed0f654e22a2876739 Mon Sep 17 00:00:00 2001 From: Michal Vokáč Date: Thu, 26 Sep 2019 10:59:17 +0200 Subject: net: dsa: qca8k: Use up to 7 ports for all operations MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit The QCA8K family supports up to 7 ports. So use the existing QCA8K_NUM_PORTS define to allocate the switch structure and limit all operations with the switch ports. This was not an issue until commit 0394a63acfe2 ("net: dsa: enable and disable all ports") disabled all unused ports. Since the unused ports 7-11 are outside of the correct register range on this switch some registers were rewritten with invalid content. Fixes: 6b93fb46480a ("net-next: dsa: add new driver for qca8xxx family") Fixes: a0c02161ecfc ("net: dsa: variable number of ports") Fixes: 0394a63acfe2 ("net: dsa: enable and disable all ports") Signed-off-by: Michal Vokáč Reviewed-by: Andrew Lunn Signed-off-by: David S. Miller --- drivers/net/dsa/qca8k.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/net/dsa/qca8k.c b/drivers/net/dsa/qca8k.c index 684aa51684db..b00274caae4f 100644 --- a/drivers/net/dsa/qca8k.c +++ b/drivers/net/dsa/qca8k.c @@ -705,7 +705,7 @@ qca8k_setup(struct dsa_switch *ds) BIT(0) << QCA8K_GLOBAL_FW_CTRL1_UC_DP_S); /* Setup connection between CPU port & user ports */ - for (i = 0; i < DSA_MAX_PORTS; i++) { + for (i = 0; i < QCA8K_NUM_PORTS; i++) { /* CPU port gets connected to all user ports of the switch */ if (dsa_is_cpu_port(ds, i)) { qca8k_rmw(priv, QCA8K_PORT_LOOKUP_CTRL(QCA8K_CPU_PORT), @@ -1077,7 +1077,7 @@ qca8k_sw_probe(struct mdio_device *mdiodev) if (id != QCA8K_ID_QCA8337) return -ENODEV; - priv->ds = dsa_switch_alloc(&mdiodev->dev, DSA_MAX_PORTS); + priv->ds = dsa_switch_alloc(&mdiodev->dev, QCA8K_NUM_PORTS); if (!priv->ds) return -ENOMEM; -- cgit v1.2.3 From e9789c7cc182484fc031fd88097eb14cb26c4596 Mon Sep 17 00:00:00 2001 From: Eric Dumazet Date: Thu, 26 Sep 2019 18:24:43 -0700 Subject: sch_cbq: validate TCA_CBQ_WRROPT to avoid crash syzbot reported a crash in cbq_normalize_quanta() caused by an out of range cl->priority. iproute2 enforces this check, but malicious users do not. kasan: CONFIG_KASAN_INLINE enabled kasan: GPF could be caused by NULL-ptr deref or user memory access general protection fault: 0000 [#1] SMP KASAN PTI Modules linked in: CPU: 1 PID: 26447 Comm: syz-executor.1 Not tainted 5.3+ #0 Hardware name: Google Google Compute Engine/Google Compute Engine, BIOS Google 01/01/2011 RIP: 0010:cbq_normalize_quanta.part.0+0x1fd/0x430 net/sched/sch_cbq.c:902 RSP: 0018:ffff8801a5c333b0 EFLAGS: 00010206 RAX: 0000000020000003 RBX: 00000000fffffff8 RCX: ffffc9000712f000 RDX: 00000000000043bf RSI: ffffffff83be8962 RDI: 0000000100000018 RBP: ffff8801a5c33420 R08: 000000000000003a R09: 0000000000000000 R10: 0000000000000000 R11: 0000000000000000 R12: 00000000000002ef R13: ffff88018da95188 R14: dffffc0000000000 R15: 0000000000000015 FS: 00007f37d26b1700(0000) GS:ffff8801dad00000(0000) knlGS:0000000000000000 CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033 CR2: 00000000004c7cec CR3: 00000001bcd0a006 CR4: 00000000001626f0 DR0: 0000000000000000 DR1: 0000000000000000 DR2: 0000000000000000 DR3: 0000000000000000 DR6: 00000000fffe0ff0 DR7: 0000000000000400 Call Trace: [] cbq_normalize_quanta include/net/pkt_sched.h:27 [inline] [] cbq_addprio net/sched/sch_cbq.c:1097 [inline] [] cbq_set_wrr+0x2d7/0x450 net/sched/sch_cbq.c:1115 [] cbq_change_class+0x987/0x225b net/sched/sch_cbq.c:1537 [] tc_ctl_tclass+0x555/0xcd0 net/sched/sch_api.c:2329 [] rtnetlink_rcv_msg+0x485/0xc10 net/core/rtnetlink.c:5248 [] netlink_rcv_skb+0x17a/0x460 net/netlink/af_netlink.c:2510 [] rtnetlink_rcv+0x1d/0x30 net/core/rtnetlink.c:5266 [] netlink_unicast_kernel net/netlink/af_netlink.c:1324 [inline] [] netlink_unicast+0x536/0x720 net/netlink/af_netlink.c:1350 [] netlink_sendmsg+0x89a/0xd50 net/netlink/af_netlink.c:1939 [] sock_sendmsg_nosec net/socket.c:673 [inline] [] sock_sendmsg+0x12e/0x170 net/socket.c:684 [] ___sys_sendmsg+0x81d/0x960 net/socket.c:2359 [] __sys_sendmsg+0x105/0x1d0 net/socket.c:2397 [] SYSC_sendmsg net/socket.c:2406 [inline] [] SyS_sendmsg+0x29/0x30 net/socket.c:2404 [] do_syscall_64+0x528/0x770 arch/x86/entry/common.c:305 [] entry_SYSCALL_64_after_hwframe+0x42/0xb7 Fixes: 1da177e4c3f4 ("Linux-2.6.12-rc2") Signed-off-by: Eric Dumazet Reported-by: syzbot Signed-off-by: David S. Miller --- net/sched/sch_cbq.c | 43 +++++++++++++++++++++++++++++-------------- 1 file changed, 29 insertions(+), 14 deletions(-) diff --git a/net/sched/sch_cbq.c b/net/sched/sch_cbq.c index 06c7a2da21bc..39b427dc7512 100644 --- a/net/sched/sch_cbq.c +++ b/net/sched/sch_cbq.c @@ -1127,6 +1127,33 @@ static const struct nla_policy cbq_policy[TCA_CBQ_MAX + 1] = { [TCA_CBQ_POLICE] = { .len = sizeof(struct tc_cbq_police) }, }; +static int cbq_opt_parse(struct nlattr *tb[TCA_CBQ_MAX + 1], + struct nlattr *opt, + struct netlink_ext_ack *extack) +{ + int err; + + if (!opt) { + NL_SET_ERR_MSG(extack, "CBQ options are required for this operation"); + return -EINVAL; + } + + err = nla_parse_nested_deprecated(tb, TCA_CBQ_MAX, opt, + cbq_policy, extack); + if (err < 0) + return err; + + if (tb[TCA_CBQ_WRROPT]) { + const struct tc_cbq_wrropt *wrr = nla_data(tb[TCA_CBQ_WRROPT]); + + if (wrr->priority > TC_CBQ_MAXPRIO) { + NL_SET_ERR_MSG(extack, "priority is bigger than TC_CBQ_MAXPRIO"); + err = -EINVAL; + } + } + return err; +} + static int cbq_init(struct Qdisc *sch, struct nlattr *opt, struct netlink_ext_ack *extack) { @@ -1139,13 +1166,7 @@ static int cbq_init(struct Qdisc *sch, struct nlattr *opt, hrtimer_init(&q->delay_timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS_PINNED); q->delay_timer.function = cbq_undelay; - if (!opt) { - NL_SET_ERR_MSG(extack, "CBQ options are required for this operation"); - return -EINVAL; - } - - err = nla_parse_nested_deprecated(tb, TCA_CBQ_MAX, opt, cbq_policy, - extack); + err = cbq_opt_parse(tb, opt, extack); if (err < 0) return err; @@ -1464,13 +1485,7 @@ cbq_change_class(struct Qdisc *sch, u32 classid, u32 parentid, struct nlattr **t struct cbq_class *parent; struct qdisc_rate_table *rtab = NULL; - if (!opt) { - NL_SET_ERR_MSG(extack, "Mandatory qdisc options missing"); - return -EINVAL; - } - - err = nla_parse_nested_deprecated(tb, TCA_CBQ_MAX, opt, cbq_policy, - extack); + err = cbq_opt_parse(tb, opt, extack); if (err < 0) return err; -- cgit v1.2.3 From 0e141f757b2c78c983df893e9993313e2dc21e38 Mon Sep 17 00:00:00 2001 From: Haishuang Yan Date: Fri, 27 Sep 2019 14:58:20 +0800 Subject: erspan: remove the incorrect mtu limit for erspan erspan driver calls ether_setup(), after commit 61e84623ace3 ("net: centralize net_device min/max MTU checking"), the range of mtu is [min_mtu, max_mtu], which is [68, 1500] by default. It causes the dev mtu of the erspan device to not be greater than 1500, this limit value is not correct for ipgre tap device. Tested: Before patch: # ip link set erspan0 mtu 1600 Error: mtu greater than device maximum. After patch: # ip link set erspan0 mtu 1600 # ip -d link show erspan0 21: erspan0@NONE: mtu 1600 qdisc noop state DOWN mode DEFAULT group default qlen 1000 link/ether 00:00:00:00:00:00 brd ff:ff:ff:ff:ff:ff promiscuity 0 minmtu 68 maxmtu 0 Fixes: 61e84623ace3 ("net: centralize net_device min/max MTU checking") Signed-off-by: Haishuang Yan Signed-off-by: David S. Miller --- net/ipv4/ip_gre.c | 1 + 1 file changed, 1 insertion(+) diff --git a/net/ipv4/ip_gre.c b/net/ipv4/ip_gre.c index a53a543fe055..52690bb3e40f 100644 --- a/net/ipv4/ip_gre.c +++ b/net/ipv4/ip_gre.c @@ -1446,6 +1446,7 @@ static void erspan_setup(struct net_device *dev) struct ip_tunnel *t = netdev_priv(dev); ether_setup(dev); + dev->max_mtu = 0; dev->netdev_ops = &erspan_netdev_ops; dev->priv_flags &= ~IFF_TX_SKB_SHARING; dev->priv_flags |= IFF_LIVE_ADDR_CHANGE; -- cgit v1.2.3 From a304c0a60252fde9daccd9f8a07a598021f6956a Mon Sep 17 00:00:00 2001 From: Keerthy Date: Fri, 20 Sep 2019 13:29:46 +0530 Subject: arm64/ARM: configs: Change CONFIG_REMOTEPROC from m to y Commit 6334150e9a36 ("remoteproc: don't allow modular build") changes CONFIG_REMOTEPROC to a boolean from a tristate config option which inhibits all defconfigs marking CONFIG_REMOTEPROC as a module in compiling the remoteproc and dependent config options. So fix the configs to have CONFIG_REMOTEPROC built in. Link: https://lore.kernel.org/r/20190920075946.13282-5-j-keerthy@ti.com Fixes: 6334150e9a36 ("remoteproc: don't allow modular build") Signed-off-by: Keerthy Acked-by: Will Deacon [olof: Fixed up all 4 occurrances in this one commit] Signed-off-by: Olof Johansson --- arch/arm/configs/davinci_all_defconfig | 2 +- arch/arm/configs/multi_v7_defconfig | 2 +- arch/arm/configs/omap2plus_defconfig | 2 +- arch/arm64/configs/defconfig | 2 +- 4 files changed, 4 insertions(+), 4 deletions(-) diff --git a/arch/arm/configs/davinci_all_defconfig b/arch/arm/configs/davinci_all_defconfig index b34970ce6b31..01e3c0f4be92 100644 --- a/arch/arm/configs/davinci_all_defconfig +++ b/arch/arm/configs/davinci_all_defconfig @@ -228,7 +228,7 @@ CONFIG_RTC_DRV_OMAP=m CONFIG_DMADEVICES=y CONFIG_TI_EDMA=y CONFIG_COMMON_CLK_PWM=m -CONFIG_REMOTEPROC=m +CONFIG_REMOTEPROC=y CONFIG_DA8XX_REMOTEPROC=m CONFIG_MEMORY=y CONFIG_TI_AEMIF=m diff --git a/arch/arm/configs/multi_v7_defconfig b/arch/arm/configs/multi_v7_defconfig index 13ba53286901..198de8e36d92 100644 --- a/arch/arm/configs/multi_v7_defconfig +++ b/arch/arm/configs/multi_v7_defconfig @@ -933,7 +933,7 @@ CONFIG_BCM2835_MBOX=y CONFIG_ROCKCHIP_IOMMU=y CONFIG_TEGRA_IOMMU_GART=y CONFIG_TEGRA_IOMMU_SMMU=y -CONFIG_REMOTEPROC=m +CONFIG_REMOTEPROC=y CONFIG_ST_REMOTEPROC=m CONFIG_RPMSG_VIRTIO=m CONFIG_ASPEED_LPC_CTRL=m diff --git a/arch/arm/configs/omap2plus_defconfig b/arch/arm/configs/omap2plus_defconfig index 64eb896907bf..af4069476582 100644 --- a/arch/arm/configs/omap2plus_defconfig +++ b/arch/arm/configs/omap2plus_defconfig @@ -481,7 +481,7 @@ CONFIG_RTC_DRV_OMAP=m CONFIG_RTC_DRV_CPCAP=m CONFIG_DMADEVICES=y CONFIG_OMAP_IOMMU=y -CONFIG_REMOTEPROC=m +CONFIG_REMOTEPROC=y CONFIG_OMAP_REMOTEPROC=m CONFIG_WKUP_M3_RPROC=m CONFIG_SOC_TI=y diff --git a/arch/arm64/configs/defconfig b/arch/arm64/configs/defconfig index 8e05c39eab08..c9a867ac32d4 100644 --- a/arch/arm64/configs/defconfig +++ b/arch/arm64/configs/defconfig @@ -723,7 +723,7 @@ CONFIG_TEGRA_IOMMU_SMMU=y CONFIG_ARM_SMMU=y CONFIG_ARM_SMMU_V3=y CONFIG_QCOM_IOMMU=y -CONFIG_REMOTEPROC=m +CONFIG_REMOTEPROC=y CONFIG_QCOM_Q6V5_MSS=m CONFIG_QCOM_Q6V5_PAS=m CONFIG_QCOM_SYSMON=m -- cgit v1.2.3 From b1ba55cf1cfb9f3e0e00d743534684a25bf66d28 Mon Sep 17 00:00:00 2001 From: Arnaldo Carvalho de Melo Date: Fri, 27 Sep 2019 11:30:30 -0300 Subject: tools headers uapi: Sync asm-generic/mman-common.h with the kernel MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit To pick the changes from: 1a4e58cce84e ("mm: introduce MADV_PAGEOUT") 9c276cc65a58 ("mm: introduce MADV_COLD") That result in these changes in the tools: $ tools/perf/trace/beauty/madvise_behavior.sh > before $ cp include/uapi/asm-generic/mman-common.h tools/include/uapi/asm-generic/mman-common.h $ git diff diff --git a/tools/include/uapi/asm-generic/mman-common.h b/tools/include/uapi/asm-generic/mman-common.h index 63b1f506ea67..c160a5354eb6 100644 --- a/tools/include/uapi/asm-generic/mman-common.h +++ b/tools/include/uapi/asm-generic/mman-common.h @@ -67,6 +67,9 @@ #define MADV_WIPEONFORK 18 /* Zero memory on fork, child only */ #define MADV_KEEPONFORK 19 /* Undo MADV_WIPEONFORK */ +#define MADV_COLD 20 /* deactivate these pages */ +#define MADV_PAGEOUT 21 /* reclaim these pages */ + /* compatibility flags */ #define MAP_FILE 0 $ tools/perf/trace/beauty/madvise_behavior.sh > after $ diff -u before after --- before 2019-09-27 11:29:43.346320100 -0300 +++ after 2019-09-27 11:30:03.838570439 -0300 @@ -16,6 +16,8 @@ [17] = "DODUMP", [18] = "WIPEONFORK", [19] = "KEEPONFORK", + [20] = "COLD", + [21] = "PAGEOUT", [100] = "HWPOISON", [101] = "SOFT_OFFLINE", }; $ I.e. now when madvise gets those behaviours as args, it will be able to translate from the number to a human readable string. This addresses the following perf build warning: Warning: Kernel ABI header at 'tools/include/uapi/asm-generic/mman-common.h' differs from latest version at 'include/uapi/asm-generic/mman-common.h' diff -u tools/include/uapi/asm-generic/mman-common.h include/uapi/asm-generic/mman-common.h Cc: Adrian Hunter Cc: Brendan Gregg Cc: Jiri Olsa Cc: Luis Cláudio Gonçalves Cc: Minchan Kim Cc: Namhyung Kim Link: https://lkml.kernel.org/n/tip-n40y6c4sa49p29q6sl8w3ufx@git.kernel.org Signed-off-by: Arnaldo Carvalho de Melo --- tools/include/uapi/asm-generic/mman-common.h | 3 +++ 1 file changed, 3 insertions(+) diff --git a/tools/include/uapi/asm-generic/mman-common.h b/tools/include/uapi/asm-generic/mman-common.h index 63b1f506ea67..c160a5354eb6 100644 --- a/tools/include/uapi/asm-generic/mman-common.h +++ b/tools/include/uapi/asm-generic/mman-common.h @@ -67,6 +67,9 @@ #define MADV_WIPEONFORK 18 /* Zero memory on fork, child only */ #define MADV_KEEPONFORK 19 /* Undo MADV_WIPEONFORK */ +#define MADV_COLD 20 /* deactivate these pages */ +#define MADV_PAGEOUT 21 /* reclaim these pages */ + /* compatibility flags */ #define MAP_FILE 0 -- cgit v1.2.3 From 05f371f8c55d69e4c04db4473085303291e4e734 Mon Sep 17 00:00:00 2001 From: Arnaldo Carvalho de Melo Date: Fri, 27 Sep 2019 11:42:26 -0300 Subject: tools headers uapi: Sync linux/usbdevice_fs.h with the kernel sources MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit To pick up the changes from: 4ed3350539aa ("USB: usbfs: Add a capability flag for runtime suspend") 7794f486ed0b ("usbfs: Add ioctls for runtime power management") This triggers these changes in the kernel sources, automagically supporting these new ioctls in the 'perf trace' beautifiers. Soon this will be used in things like filter expressions for tracepoints in 'perf record', 'perf trace', 'perf top', i.e. filter expressions will do a lookup to turn things like USBDEVFS_WAIT_FOR_RESUME into _IO('U', 35) before associating the tracepoint expression to tracepoint perf event. $ tools/perf/trace/beauty/usbdevfs_ioctl.sh > before $ cp include/uapi/linux/usbdevice_fs.h tools/include/uapi/linux/usbdevice_fs.h $ git diff diff --git a/tools/include/uapi/linux/usbdevice_fs.h b/tools/include/uapi/linux/usbdevice_fs.h index 78efe870c2b7..cf525cddeb94 100644 --- a/tools/include/uapi/linux/usbdevice_fs.h +++ b/tools/include/uapi/linux/usbdevice_fs.h @@ -158,6 +158,7 @@ struct usbdevfs_hub_portinfo { #define USBDEVFS_CAP_MMAP 0x20 #define USBDEVFS_CAP_DROP_PRIVILEGES 0x40 #define USBDEVFS_CAP_CONNINFO_EX 0x80 +#define USBDEVFS_CAP_SUSPEND 0x100 /* USBDEVFS_DISCONNECT_CLAIM flags & struct */ @@ -223,5 +224,8 @@ struct usbdevfs_streams { * extending size of the data returned. */ #define USBDEVFS_CONNINFO_EX(len) _IOC(_IOC_READ, 'U', 32, len) +#define USBDEVFS_FORBID_SUSPEND _IO('U', 33) +#define USBDEVFS_ALLOW_SUSPEND _IO('U', 34) +#define USBDEVFS_WAIT_FOR_RESUME _IO('U', 35) #endif /* _UAPI_LINUX_USBDEVICE_FS_H */ $ tools/perf/trace/beauty/usbdevfs_ioctl.sh > after $ diff -u before after --- before 2019-09-27 11:41:50.634867620 -0300 +++ after 2019-09-27 11:42:07.453102978 -0300 @@ -24,6 +24,9 @@ [30] = "DROP_PRIVILEGES", [31] = "GET_SPEED", [32] = "CONNINFO_EX", + [33] = "FORBID_SUSPEND", + [34] = "ALLOW_SUSPEND", + [35] = "WAIT_FOR_RESUME", [3] = "RESETEP", [4] = "SETINTERFACE", [5] = "SETCONFIGURATION", $ This addresses the following perf build warning: Warning: Kernel ABI header at 'tools/include/uapi/linux/usbdevice_fs.h' differs from latest version at 'include/uapi/linux/usbdevice_fs.h' diff -u tools/include/uapi/linux/usbdevice_fs.h include/uapi/linux/usbdevice_fs.h Cc: Alan Stern Cc: Adrian Hunter Cc: Brendan Gregg Cc: Greg Kroah-Hartman Cc: Jiri Olsa Cc: Luis Cláudio Gonçalves Cc: Namhyung Kim Link: https://lkml.kernel.org/n/tip-x1rb109b9nfi7pukota82xhj@git.kernel.org Signed-off-by: Arnaldo Carvalho de Melo --- tools/include/uapi/linux/usbdevice_fs.h | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/tools/include/uapi/linux/usbdevice_fs.h b/tools/include/uapi/linux/usbdevice_fs.h index 78efe870c2b7..cf525cddeb94 100644 --- a/tools/include/uapi/linux/usbdevice_fs.h +++ b/tools/include/uapi/linux/usbdevice_fs.h @@ -158,6 +158,7 @@ struct usbdevfs_hub_portinfo { #define USBDEVFS_CAP_MMAP 0x20 #define USBDEVFS_CAP_DROP_PRIVILEGES 0x40 #define USBDEVFS_CAP_CONNINFO_EX 0x80 +#define USBDEVFS_CAP_SUSPEND 0x100 /* USBDEVFS_DISCONNECT_CLAIM flags & struct */ @@ -223,5 +224,8 @@ struct usbdevfs_streams { * extending size of the data returned. */ #define USBDEVFS_CONNINFO_EX(len) _IOC(_IOC_READ, 'U', 32, len) +#define USBDEVFS_FORBID_SUSPEND _IO('U', 33) +#define USBDEVFS_ALLOW_SUSPEND _IO('U', 34) +#define USBDEVFS_WAIT_FOR_RESUME _IO('U', 35) #endif /* _UAPI_LINUX_USBDEVICE_FS_H */ -- cgit v1.2.3 From 0ae4061223a3d097222cbec6599370e54db17731 Mon Sep 17 00:00:00 2001 From: Arnaldo Carvalho de Melo Date: Fri, 27 Sep 2019 12:01:26 -0300 Subject: tools headers uapi: Sync linux/fs.h with the kernel sources To pick the changes from: 78a1b96bcf7a ("fscrypt: add FS_IOC_REMOVE_ENCRYPTION_KEY_ALL_USERS ioctl") 23c688b54016 ("fscrypt: allow unprivileged users to add/remove keys for v2 policies") 5dae460c2292 ("fscrypt: v2 encryption policy support") 5a7e29924dac ("fscrypt: add FS_IOC_GET_ENCRYPTION_KEY_STATUS ioctl") b1c0ec3599f4 ("fscrypt: add FS_IOC_REMOVE_ENCRYPTION_KEY ioctl") 22d94f493bfb ("fscrypt: add FS_IOC_ADD_ENCRYPTION_KEY ioctl") 3b6df59bc4d2 ("fscrypt: use FSCRYPT_* definitions, not FS_*") 2336d0deb2d4 ("fscrypt: use FSCRYPT_ prefix for uapi constants") 7af0ab0d3aab ("fs, fscrypt: move uapi definitions to new header ") That don't trigger any changes in tooling, as it so far is used only for: $ grep -l 'fs\.h' tools/perf/trace/beauty/*.sh | xargs grep regex= tools/perf/trace/beauty/rename_flags.sh:regex='^[[:space:]]*#[[:space:]]*define[[:space:]]+RENAME_([[:alnum:]_]+)[[:space:]]+\(1[[:space:]]*<<[[:space:]]*([[:xdigit:]]+)[[:space:]]*\)[[:space:]]*.*' tools/perf/trace/beauty/sync_file_range.sh:regex='^[[:space:]]*#[[:space:]]*define[[:space:]]+SYNC_FILE_RANGE_([[:alnum:]_]+)[[:space:]]+([[:xdigit:]]+)[[:space:]]*.*' tools/perf/trace/beauty/usbdevfs_ioctl.sh:regex="^#[[:space:]]*define[[:space:]]+USBDEVFS_(\w+)(\(\w+\))?[[:space:]]+_IO[CWR]{0,2}\([[:space:]]*(_IOC_\w+,[[:space:]]*)?'U'[[:space:]]*,[[:space:]]*([[:digit:]]+).*" tools/perf/trace/beauty/usbdevfs_ioctl.sh:regex="^#[[:space:]]*define[[:space:]]+USBDEVFS_(\w+)[[:space:]]+_IO[WR]{0,2}\([[:space:]]*'U'[[:space:]]*,[[:space:]]*([[:digit:]]+).*" $ This silences this perf build warning: Warning: Kernel ABI header at 'tools/include/uapi/linux/fs.h' differs from latest version at 'include/uapi/linux/fs.h' diff -u tools/include/uapi/linux/fs.h include/uapi/linux/fs.h Cc: Adrian Hunter Cc: Eric Biggers Cc: Jiri Olsa Cc: Namhyung Kim Link: https://lkml.kernel.org/n/tip-44g48exl9br9ba0t64chqb4i@git.kernel.org Signed-off-by: Arnaldo Carvalho de Melo --- tools/include/uapi/linux/fs.h | 55 +---------- tools/include/uapi/linux/fscrypt.h | 181 +++++++++++++++++++++++++++++++++++++ tools/perf/check-headers.sh | 1 + 3 files changed, 186 insertions(+), 51 deletions(-) create mode 100644 tools/include/uapi/linux/fscrypt.h diff --git a/tools/include/uapi/linux/fs.h b/tools/include/uapi/linux/fs.h index 2a616aa3f686..379a612f8f1d 100644 --- a/tools/include/uapi/linux/fs.h +++ b/tools/include/uapi/linux/fs.h @@ -13,6 +13,9 @@ #include #include #include +#ifndef __KERNEL__ +#include +#endif /* Use of MS_* flags within the kernel is restricted to core mount(2) code. */ #if !defined(__KERNEL__) @@ -212,57 +215,6 @@ struct fsxattr { #define FS_IOC_GETFSLABEL _IOR(0x94, 49, char[FSLABEL_MAX]) #define FS_IOC_SETFSLABEL _IOW(0x94, 50, char[FSLABEL_MAX]) -/* - * File system encryption support - */ -/* Policy provided via an ioctl on the topmost directory */ -#define FS_KEY_DESCRIPTOR_SIZE 8 - -#define FS_POLICY_FLAGS_PAD_4 0x00 -#define FS_POLICY_FLAGS_PAD_8 0x01 -#define FS_POLICY_FLAGS_PAD_16 0x02 -#define FS_POLICY_FLAGS_PAD_32 0x03 -#define FS_POLICY_FLAGS_PAD_MASK 0x03 -#define FS_POLICY_FLAG_DIRECT_KEY 0x04 /* use master key directly */ -#define FS_POLICY_FLAGS_VALID 0x07 - -/* Encryption algorithms */ -#define FS_ENCRYPTION_MODE_INVALID 0 -#define FS_ENCRYPTION_MODE_AES_256_XTS 1 -#define FS_ENCRYPTION_MODE_AES_256_GCM 2 -#define FS_ENCRYPTION_MODE_AES_256_CBC 3 -#define FS_ENCRYPTION_MODE_AES_256_CTS 4 -#define FS_ENCRYPTION_MODE_AES_128_CBC 5 -#define FS_ENCRYPTION_MODE_AES_128_CTS 6 -#define FS_ENCRYPTION_MODE_SPECK128_256_XTS 7 /* Removed, do not use. */ -#define FS_ENCRYPTION_MODE_SPECK128_256_CTS 8 /* Removed, do not use. */ -#define FS_ENCRYPTION_MODE_ADIANTUM 9 - -struct fscrypt_policy { - __u8 version; - __u8 contents_encryption_mode; - __u8 filenames_encryption_mode; - __u8 flags; - __u8 master_key_descriptor[FS_KEY_DESCRIPTOR_SIZE]; -}; - -#define FS_IOC_SET_ENCRYPTION_POLICY _IOR('f', 19, struct fscrypt_policy) -#define FS_IOC_GET_ENCRYPTION_PWSALT _IOW('f', 20, __u8[16]) -#define FS_IOC_GET_ENCRYPTION_POLICY _IOW('f', 21, struct fscrypt_policy) - -/* Parameters for passing an encryption key into the kernel keyring */ -#define FS_KEY_DESC_PREFIX "fscrypt:" -#define FS_KEY_DESC_PREFIX_SIZE 8 - -/* Structure that userspace passes to the kernel keyring */ -#define FS_MAX_KEY_SIZE 64 - -struct fscrypt_key { - __u32 mode; - __u8 raw[FS_MAX_KEY_SIZE]; - __u32 size; -}; - /* * Inode flags (FS_IOC_GETFLAGS / FS_IOC_SETFLAGS) * @@ -306,6 +258,7 @@ struct fscrypt_key { #define FS_TOPDIR_FL 0x00020000 /* Top of directory hierarchies*/ #define FS_HUGE_FILE_FL 0x00040000 /* Reserved for ext4 */ #define FS_EXTENT_FL 0x00080000 /* Extents */ +#define FS_VERITY_FL 0x00100000 /* Verity protected inode */ #define FS_EA_INODE_FL 0x00200000 /* Inode used for large EA */ #define FS_EOFBLOCKS_FL 0x00400000 /* Reserved for ext4 */ #define FS_NOCOW_FL 0x00800000 /* Do not cow file */ diff --git a/tools/include/uapi/linux/fscrypt.h b/tools/include/uapi/linux/fscrypt.h new file mode 100644 index 000000000000..39ccfe9311c3 --- /dev/null +++ b/tools/include/uapi/linux/fscrypt.h @@ -0,0 +1,181 @@ +/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ +/* + * fscrypt user API + * + * These ioctls can be used on filesystems that support fscrypt. See the + * "User API" section of Documentation/filesystems/fscrypt.rst. + */ +#ifndef _UAPI_LINUX_FSCRYPT_H +#define _UAPI_LINUX_FSCRYPT_H + +#include + +/* Encryption policy flags */ +#define FSCRYPT_POLICY_FLAGS_PAD_4 0x00 +#define FSCRYPT_POLICY_FLAGS_PAD_8 0x01 +#define FSCRYPT_POLICY_FLAGS_PAD_16 0x02 +#define FSCRYPT_POLICY_FLAGS_PAD_32 0x03 +#define FSCRYPT_POLICY_FLAGS_PAD_MASK 0x03 +#define FSCRYPT_POLICY_FLAG_DIRECT_KEY 0x04 +#define FSCRYPT_POLICY_FLAGS_VALID 0x07 + +/* Encryption algorithms */ +#define FSCRYPT_MODE_AES_256_XTS 1 +#define FSCRYPT_MODE_AES_256_CTS 4 +#define FSCRYPT_MODE_AES_128_CBC 5 +#define FSCRYPT_MODE_AES_128_CTS 6 +#define FSCRYPT_MODE_ADIANTUM 9 +#define __FSCRYPT_MODE_MAX 9 + +/* + * Legacy policy version; ad-hoc KDF and no key verification. + * For new encrypted directories, use fscrypt_policy_v2 instead. + * + * Careful: the .version field for this is actually 0, not 1. + */ +#define FSCRYPT_POLICY_V1 0 +#define FSCRYPT_KEY_DESCRIPTOR_SIZE 8 +struct fscrypt_policy_v1 { + __u8 version; + __u8 contents_encryption_mode; + __u8 filenames_encryption_mode; + __u8 flags; + __u8 master_key_descriptor[FSCRYPT_KEY_DESCRIPTOR_SIZE]; +}; +#define fscrypt_policy fscrypt_policy_v1 + +/* + * Process-subscribed "logon" key description prefix and payload format. + * Deprecated; prefer FS_IOC_ADD_ENCRYPTION_KEY instead. + */ +#define FSCRYPT_KEY_DESC_PREFIX "fscrypt:" +#define FSCRYPT_KEY_DESC_PREFIX_SIZE 8 +#define FSCRYPT_MAX_KEY_SIZE 64 +struct fscrypt_key { + __u32 mode; + __u8 raw[FSCRYPT_MAX_KEY_SIZE]; + __u32 size; +}; + +/* + * New policy version with HKDF and key verification (recommended). + */ +#define FSCRYPT_POLICY_V2 2 +#define FSCRYPT_KEY_IDENTIFIER_SIZE 16 +struct fscrypt_policy_v2 { + __u8 version; + __u8 contents_encryption_mode; + __u8 filenames_encryption_mode; + __u8 flags; + __u8 __reserved[4]; + __u8 master_key_identifier[FSCRYPT_KEY_IDENTIFIER_SIZE]; +}; + +/* Struct passed to FS_IOC_GET_ENCRYPTION_POLICY_EX */ +struct fscrypt_get_policy_ex_arg { + __u64 policy_size; /* input/output */ + union { + __u8 version; + struct fscrypt_policy_v1 v1; + struct fscrypt_policy_v2 v2; + } policy; /* output */ +}; + +/* + * v1 policy keys are specified by an arbitrary 8-byte key "descriptor", + * matching fscrypt_policy_v1::master_key_descriptor. + */ +#define FSCRYPT_KEY_SPEC_TYPE_DESCRIPTOR 1 + +/* + * v2 policy keys are specified by a 16-byte key "identifier" which the kernel + * calculates as a cryptographic hash of the key itself, + * matching fscrypt_policy_v2::master_key_identifier. + */ +#define FSCRYPT_KEY_SPEC_TYPE_IDENTIFIER 2 + +/* + * Specifies a key, either for v1 or v2 policies. This doesn't contain the + * actual key itself; this is just the "name" of the key. + */ +struct fscrypt_key_specifier { + __u32 type; /* one of FSCRYPT_KEY_SPEC_TYPE_* */ + __u32 __reserved; + union { + __u8 __reserved[32]; /* reserve some extra space */ + __u8 descriptor[FSCRYPT_KEY_DESCRIPTOR_SIZE]; + __u8 identifier[FSCRYPT_KEY_IDENTIFIER_SIZE]; + } u; +}; + +/* Struct passed to FS_IOC_ADD_ENCRYPTION_KEY */ +struct fscrypt_add_key_arg { + struct fscrypt_key_specifier key_spec; + __u32 raw_size; + __u32 __reserved[9]; + __u8 raw[]; +}; + +/* Struct passed to FS_IOC_REMOVE_ENCRYPTION_KEY */ +struct fscrypt_remove_key_arg { + struct fscrypt_key_specifier key_spec; +#define FSCRYPT_KEY_REMOVAL_STATUS_FLAG_FILES_BUSY 0x00000001 +#define FSCRYPT_KEY_REMOVAL_STATUS_FLAG_OTHER_USERS 0x00000002 + __u32 removal_status_flags; /* output */ + __u32 __reserved[5]; +}; + +/* Struct passed to FS_IOC_GET_ENCRYPTION_KEY_STATUS */ +struct fscrypt_get_key_status_arg { + /* input */ + struct fscrypt_key_specifier key_spec; + __u32 __reserved[6]; + + /* output */ +#define FSCRYPT_KEY_STATUS_ABSENT 1 +#define FSCRYPT_KEY_STATUS_PRESENT 2 +#define FSCRYPT_KEY_STATUS_INCOMPLETELY_REMOVED 3 + __u32 status; +#define FSCRYPT_KEY_STATUS_FLAG_ADDED_BY_SELF 0x00000001 + __u32 status_flags; + __u32 user_count; + __u32 __out_reserved[13]; +}; + +#define FS_IOC_SET_ENCRYPTION_POLICY _IOR('f', 19, struct fscrypt_policy) +#define FS_IOC_GET_ENCRYPTION_PWSALT _IOW('f', 20, __u8[16]) +#define FS_IOC_GET_ENCRYPTION_POLICY _IOW('f', 21, struct fscrypt_policy) +#define FS_IOC_GET_ENCRYPTION_POLICY_EX _IOWR('f', 22, __u8[9]) /* size + version */ +#define FS_IOC_ADD_ENCRYPTION_KEY _IOWR('f', 23, struct fscrypt_add_key_arg) +#define FS_IOC_REMOVE_ENCRYPTION_KEY _IOWR('f', 24, struct fscrypt_remove_key_arg) +#define FS_IOC_REMOVE_ENCRYPTION_KEY_ALL_USERS _IOWR('f', 25, struct fscrypt_remove_key_arg) +#define FS_IOC_GET_ENCRYPTION_KEY_STATUS _IOWR('f', 26, struct fscrypt_get_key_status_arg) + +/**********************************************************************/ + +/* old names; don't add anything new here! */ +#ifndef __KERNEL__ +#define FS_KEY_DESCRIPTOR_SIZE FSCRYPT_KEY_DESCRIPTOR_SIZE +#define FS_POLICY_FLAGS_PAD_4 FSCRYPT_POLICY_FLAGS_PAD_4 +#define FS_POLICY_FLAGS_PAD_8 FSCRYPT_POLICY_FLAGS_PAD_8 +#define FS_POLICY_FLAGS_PAD_16 FSCRYPT_POLICY_FLAGS_PAD_16 +#define FS_POLICY_FLAGS_PAD_32 FSCRYPT_POLICY_FLAGS_PAD_32 +#define FS_POLICY_FLAGS_PAD_MASK FSCRYPT_POLICY_FLAGS_PAD_MASK +#define FS_POLICY_FLAG_DIRECT_KEY FSCRYPT_POLICY_FLAG_DIRECT_KEY +#define FS_POLICY_FLAGS_VALID FSCRYPT_POLICY_FLAGS_VALID +#define FS_ENCRYPTION_MODE_INVALID 0 /* never used */ +#define FS_ENCRYPTION_MODE_AES_256_XTS FSCRYPT_MODE_AES_256_XTS +#define FS_ENCRYPTION_MODE_AES_256_GCM 2 /* never used */ +#define FS_ENCRYPTION_MODE_AES_256_CBC 3 /* never used */ +#define FS_ENCRYPTION_MODE_AES_256_CTS FSCRYPT_MODE_AES_256_CTS +#define FS_ENCRYPTION_MODE_AES_128_CBC FSCRYPT_MODE_AES_128_CBC +#define FS_ENCRYPTION_MODE_AES_128_CTS FSCRYPT_MODE_AES_128_CTS +#define FS_ENCRYPTION_MODE_SPECK128_256_XTS 7 /* removed */ +#define FS_ENCRYPTION_MODE_SPECK128_256_CTS 8 /* removed */ +#define FS_ENCRYPTION_MODE_ADIANTUM FSCRYPT_MODE_ADIANTUM +#define FS_KEY_DESC_PREFIX FSCRYPT_KEY_DESC_PREFIX +#define FS_KEY_DESC_PREFIX_SIZE FSCRYPT_KEY_DESC_PREFIX_SIZE +#define FS_MAX_KEY_SIZE FSCRYPT_MAX_KEY_SIZE +#endif /* !__KERNEL__ */ + +#endif /* _UAPI_LINUX_FSCRYPT_H */ diff --git a/tools/perf/check-headers.sh b/tools/perf/check-headers.sh index e2e0f06c97d0..cea13cb987d0 100755 --- a/tools/perf/check-headers.sh +++ b/tools/perf/check-headers.sh @@ -8,6 +8,7 @@ include/uapi/drm/i915_drm.h include/uapi/linux/fadvise.h include/uapi/linux/fcntl.h include/uapi/linux/fs.h +include/uapi/linux/fscrypt.h include/uapi/linux/kcmp.h include/uapi/linux/kvm.h include/uapi/linux/in.h -- cgit v1.2.3 From b7ad6108484221f431372b94763b74e550d16c93 Mon Sep 17 00:00:00 2001 From: Arnaldo Carvalho de Melo Date: Fri, 27 Sep 2019 12:18:21 -0300 Subject: tools headers kvm: Sync kvm headers with the kernel sources To pick the changes in: 200824f55eef ("KVM: s390: Disallow invalid bits in kvm_valid_regs and kvm_dirty_regs") 4a53d99dd0c2 ("KVM: VMX: Introduce exit reason for receiving INIT signal on guest-mode") 7396d337cfad ("KVM: x86: Return to userspace with internal error on unexpected exit reason") 92f35b751c71 ("KVM: arm/arm64: vgic: Allow more than 256 vcpus for KVM_IRQ_LINE") None of them trigger any changes in tooling, this time this is just to silence these perf build warnings: Warning: Kernel ABI header at 'tools/include/uapi/linux/kvm.h' differs from latest version at 'include/uapi/linux/kvm.h' diff -u tools/include/uapi/linux/kvm.h include/uapi/linux/kvm.h Warning: Kernel ABI header at 'tools/arch/x86/include/uapi/asm/vmx.h' differs from latest version at 'arch/x86/include/uapi/asm/vmx.h' diff -u tools/arch/x86/include/uapi/asm/vmx.h arch/x86/include/uapi/asm/vmx.h Warning: Kernel ABI header at 'tools/arch/s390/include/uapi/asm/kvm.h' differs from latest version at 'arch/s390/include/uapi/asm/kvm.h' diff -u tools/arch/s390/include/uapi/asm/kvm.h arch/s390/include/uapi/asm/kvm.h Warning: Kernel ABI header at 'tools/arch/arm/include/uapi/asm/kvm.h' differs from latest version at 'arch/arm/include/uapi/asm/kvm.h' diff -u tools/arch/arm/include/uapi/asm/kvm.h arch/arm/include/uapi/asm/kvm.h Warning: Kernel ABI header at 'tools/arch/arm64/include/uapi/asm/kvm.h' differs from latest version at 'arch/arm64/include/uapi/asm/kvm.h' diff -u tools/arch/arm64/include/uapi/asm/kvm.h arch/arm64/include/uapi/asm/kvm.h Cc: Adrian Hunter Cc: Janosch Frank Cc: Jiri Olsa Cc: Liran Alon Cc: Marc Zyngier Cc: Namhyung Kim Cc: Paolo Bonzini Cc: Thomas Huth Link: https://lkml.kernel.org/n/tip-akuugvvjxte26kzv23zp5d2z@git.kernel.org Signed-off-by: Arnaldo Carvalho de Melo --- tools/arch/arm/include/uapi/asm/kvm.h | 4 +++- tools/arch/arm64/include/uapi/asm/kvm.h | 4 +++- tools/arch/s390/include/uapi/asm/kvm.h | 6 ++++++ tools/arch/x86/include/uapi/asm/vmx.h | 2 ++ tools/include/uapi/linux/kvm.h | 3 +++ 5 files changed, 17 insertions(+), 2 deletions(-) diff --git a/tools/arch/arm/include/uapi/asm/kvm.h b/tools/arch/arm/include/uapi/asm/kvm.h index a4217c1a5d01..2769360f195c 100644 --- a/tools/arch/arm/include/uapi/asm/kvm.h +++ b/tools/arch/arm/include/uapi/asm/kvm.h @@ -266,8 +266,10 @@ struct kvm_vcpu_events { #define KVM_DEV_ARM_ITS_CTRL_RESET 4 /* KVM_IRQ_LINE irq field index values */ +#define KVM_ARM_IRQ_VCPU2_SHIFT 28 +#define KVM_ARM_IRQ_VCPU2_MASK 0xf #define KVM_ARM_IRQ_TYPE_SHIFT 24 -#define KVM_ARM_IRQ_TYPE_MASK 0xff +#define KVM_ARM_IRQ_TYPE_MASK 0xf #define KVM_ARM_IRQ_VCPU_SHIFT 16 #define KVM_ARM_IRQ_VCPU_MASK 0xff #define KVM_ARM_IRQ_NUM_SHIFT 0 diff --git a/tools/arch/arm64/include/uapi/asm/kvm.h b/tools/arch/arm64/include/uapi/asm/kvm.h index 9a507716ae2f..67c21f9bdbad 100644 --- a/tools/arch/arm64/include/uapi/asm/kvm.h +++ b/tools/arch/arm64/include/uapi/asm/kvm.h @@ -325,8 +325,10 @@ struct kvm_vcpu_events { #define KVM_ARM_VCPU_TIMER_IRQ_PTIMER 1 /* KVM_IRQ_LINE irq field index values */ +#define KVM_ARM_IRQ_VCPU2_SHIFT 28 +#define KVM_ARM_IRQ_VCPU2_MASK 0xf #define KVM_ARM_IRQ_TYPE_SHIFT 24 -#define KVM_ARM_IRQ_TYPE_MASK 0xff +#define KVM_ARM_IRQ_TYPE_MASK 0xf #define KVM_ARM_IRQ_VCPU_SHIFT 16 #define KVM_ARM_IRQ_VCPU_MASK 0xff #define KVM_ARM_IRQ_NUM_SHIFT 0 diff --git a/tools/arch/s390/include/uapi/asm/kvm.h b/tools/arch/s390/include/uapi/asm/kvm.h index 47104e5b47fd..436ec7636927 100644 --- a/tools/arch/s390/include/uapi/asm/kvm.h +++ b/tools/arch/s390/include/uapi/asm/kvm.h @@ -231,6 +231,12 @@ struct kvm_guest_debug_arch { #define KVM_SYNC_GSCB (1UL << 9) #define KVM_SYNC_BPBC (1UL << 10) #define KVM_SYNC_ETOKEN (1UL << 11) + +#define KVM_SYNC_S390_VALID_FIELDS \ + (KVM_SYNC_PREFIX | KVM_SYNC_GPRS | KVM_SYNC_ACRS | KVM_SYNC_CRS | \ + KVM_SYNC_ARCH0 | KVM_SYNC_PFAULT | KVM_SYNC_VRS | KVM_SYNC_RICCB | \ + KVM_SYNC_FPRS | KVM_SYNC_GSCB | KVM_SYNC_BPBC | KVM_SYNC_ETOKEN) + /* length and alignment of the sdnx as a power of two */ #define SDNXC 8 #define SDNXL (1UL << SDNXC) diff --git a/tools/arch/x86/include/uapi/asm/vmx.h b/tools/arch/x86/include/uapi/asm/vmx.h index f0b0c90dd398..f01950aa7fae 100644 --- a/tools/arch/x86/include/uapi/asm/vmx.h +++ b/tools/arch/x86/include/uapi/asm/vmx.h @@ -31,6 +31,7 @@ #define EXIT_REASON_EXCEPTION_NMI 0 #define EXIT_REASON_EXTERNAL_INTERRUPT 1 #define EXIT_REASON_TRIPLE_FAULT 2 +#define EXIT_REASON_INIT_SIGNAL 3 #define EXIT_REASON_PENDING_INTERRUPT 7 #define EXIT_REASON_NMI_WINDOW 8 @@ -90,6 +91,7 @@ { EXIT_REASON_EXCEPTION_NMI, "EXCEPTION_NMI" }, \ { EXIT_REASON_EXTERNAL_INTERRUPT, "EXTERNAL_INTERRUPT" }, \ { EXIT_REASON_TRIPLE_FAULT, "TRIPLE_FAULT" }, \ + { EXIT_REASON_INIT_SIGNAL, "INIT_SIGNAL" }, \ { EXIT_REASON_PENDING_INTERRUPT, "PENDING_INTERRUPT" }, \ { EXIT_REASON_NMI_WINDOW, "NMI_WINDOW" }, \ { EXIT_REASON_TASK_SWITCH, "TASK_SWITCH" }, \ diff --git a/tools/include/uapi/linux/kvm.h b/tools/include/uapi/linux/kvm.h index 5e3f12d5359e..233efbb1c81c 100644 --- a/tools/include/uapi/linux/kvm.h +++ b/tools/include/uapi/linux/kvm.h @@ -243,6 +243,8 @@ struct kvm_hyperv_exit { #define KVM_INTERNAL_ERROR_SIMUL_EX 2 /* Encounter unexpected vm-exit due to delivery event. */ #define KVM_INTERNAL_ERROR_DELIVERY_EV 3 +/* Encounter unexpected vm-exit reason */ +#define KVM_INTERNAL_ERROR_UNEXPECTED_EXIT_REASON 4 /* for KVM_RUN, returned by mmap(vcpu_fd, offset=0) */ struct kvm_run { @@ -996,6 +998,7 @@ struct kvm_ppc_resize_hpt { #define KVM_CAP_ARM_PTRAUTH_ADDRESS 171 #define KVM_CAP_ARM_PTRAUTH_GENERIC 172 #define KVM_CAP_PMU_EVENT_FILTER 173 +#define KVM_CAP_ARM_IRQ_LINE_LAYOUT_2 174 #ifdef KVM_CAP_IRQ_ROUTING -- cgit v1.2.3 From 7d4c85b7035eb2f9ab217ce649dcd1bfaf0cacd3 Mon Sep 17 00:00:00 2001 From: Ian Rogers Date: Thu, 26 Sep 2019 15:00:18 -0700 Subject: perf llvm: Don't access out-of-scope array The 'test_dir' variable is assigned to the 'release' array which is out-of-scope 3 lines later. Extend the scope of the 'release' array so that an out-of-scope array isn't accessed. Bug detected by clang's address sanitizer. Fixes: 07bc5c699a3d ("perf tools: Make fetch_kernel_version() publicly available") Cc: stable@vger.kernel.org # v4.4+ Signed-off-by: Ian Rogers Cc: Alexander Shishkin Cc: Andi Kleen Cc: Jiri Olsa Cc: Namhyung Kim Cc: Peter Zijlstra Cc: Stephane Eranian Cc: Wang Nan Link: http://lore.kernel.org/lkml/20190926220018.25402-1-irogers@google.com Signed-off-by: Arnaldo Carvalho de Melo --- tools/perf/util/llvm-utils.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/tools/perf/util/llvm-utils.c b/tools/perf/util/llvm-utils.c index 8d04e3d070b1..8b14e4a7f1dc 100644 --- a/tools/perf/util/llvm-utils.c +++ b/tools/perf/util/llvm-utils.c @@ -233,14 +233,14 @@ static int detect_kbuild_dir(char **kbuild_dir) const char *prefix_dir = ""; const char *suffix_dir = ""; + /* _UTSNAME_LENGTH is 65 */ + char release[128]; + char *autoconf_path; int err; if (!test_dir) { - /* _UTSNAME_LENGTH is 65 */ - char release[128]; - err = fetch_kernel_version(NULL, release, sizeof(release)); if (err) -- cgit v1.2.3 From 02d084792273e8a5f1813dcad988229a45be96ea Mon Sep 17 00:00:00 2001 From: Thomas Richter Date: Fri, 27 Sep 2019 10:11:46 +0200 Subject: perf vendor events s390: Add JSON transaction for machine type 8561 Add s390 transaction counter definition for machine 8561. This is the same file as for the predecessor machine. Fixes: 6e67d77d673d ("perf vendor events s390: Add JSON files for machine type 8561") Signed-off-by: Thomas Richter Cc: Heiko Carstens Cc: Vasily Gorbik Link: http://lore.kernel.org/lkml/20190927081147.18345-1-tmricht@linux.ibm.com Signed-off-by: Arnaldo Carvalho de Melo --- tools/perf/pmu-events/arch/s390/cf_m8561/transaction.json | 7 +++++++ 1 file changed, 7 insertions(+) create mode 100644 tools/perf/pmu-events/arch/s390/cf_m8561/transaction.json diff --git a/tools/perf/pmu-events/arch/s390/cf_m8561/transaction.json b/tools/perf/pmu-events/arch/s390/cf_m8561/transaction.json new file mode 100644 index 000000000000..1a0034f79f73 --- /dev/null +++ b/tools/perf/pmu-events/arch/s390/cf_m8561/transaction.json @@ -0,0 +1,7 @@ +[ + { + "BriefDescription": "Transaction count", + "MetricName": "transaction", + "MetricExpr": "TX_C_TEND + TX_NC_TEND + TX_NC_TABORT + TX_C_TABORT_SPECIAL + TX_C_TABORT_NO_SPECIAL" + } +] -- cgit v1.2.3 From 0d0e5ecec6116db6031829299e74cc71240c9ff3 Mon Sep 17 00:00:00 2001 From: Thomas Richter Date: Fri, 27 Sep 2019 10:11:47 +0200 Subject: perf vendor events s390: Use s390 machine name instead of type 8561 In the pmu-events directory for JSON file definitions use the official machine name IBM z15 instead of machine type number 8561. This is consistent with previous machines. Signed-off-by: Thomas Richter Cc: Heiko Carstens Cc: Vasily Gorbik Link: http://lore.kernel.org/lkml/20190927081147.18345-2-tmricht@linux.ibm.com Signed-off-by: Arnaldo Carvalho de Melo --- .../perf/pmu-events/arch/s390/cf_m8561/basic.json | 58 ---- .../perf/pmu-events/arch/s390/cf_m8561/crypto.json | 114 ------- .../pmu-events/arch/s390/cf_m8561/crypto6.json | 30 -- .../pmu-events/arch/s390/cf_m8561/extended.json | 373 --------------------- .../pmu-events/arch/s390/cf_m8561/transaction.json | 7 - tools/perf/pmu-events/arch/s390/cf_z15/basic.json | 58 ++++ tools/perf/pmu-events/arch/s390/cf_z15/crypto.json | 114 +++++++ .../perf/pmu-events/arch/s390/cf_z15/crypto6.json | 30 ++ .../perf/pmu-events/arch/s390/cf_z15/extended.json | 373 +++++++++++++++++++++ .../pmu-events/arch/s390/cf_z15/transaction.json | 7 + tools/perf/pmu-events/arch/s390/mapfile.csv | 2 +- 11 files changed, 583 insertions(+), 583 deletions(-) delete mode 100644 tools/perf/pmu-events/arch/s390/cf_m8561/basic.json delete mode 100644 tools/perf/pmu-events/arch/s390/cf_m8561/crypto.json delete mode 100644 tools/perf/pmu-events/arch/s390/cf_m8561/crypto6.json delete mode 100644 tools/perf/pmu-events/arch/s390/cf_m8561/extended.json delete mode 100644 tools/perf/pmu-events/arch/s390/cf_m8561/transaction.json create mode 100644 tools/perf/pmu-events/arch/s390/cf_z15/basic.json create mode 100644 tools/perf/pmu-events/arch/s390/cf_z15/crypto.json create mode 100644 tools/perf/pmu-events/arch/s390/cf_z15/crypto6.json create mode 100644 tools/perf/pmu-events/arch/s390/cf_z15/extended.json create mode 100644 tools/perf/pmu-events/arch/s390/cf_z15/transaction.json diff --git a/tools/perf/pmu-events/arch/s390/cf_m8561/basic.json b/tools/perf/pmu-events/arch/s390/cf_m8561/basic.json deleted file mode 100644 index 17fb5241928b..000000000000 --- a/tools/perf/pmu-events/arch/s390/cf_m8561/basic.json +++ /dev/null @@ -1,58 +0,0 @@ -[ - { - "Unit": "CPU-M-CF", - "EventCode": "0", - "EventName": "CPU_CYCLES", - "BriefDescription": "CPU Cycles", - "PublicDescription": "Cycle Count" - }, - { - "Unit": "CPU-M-CF", - "EventCode": "1", - "EventName": "INSTRUCTIONS", - "BriefDescription": "Instructions", - "PublicDescription": "Instruction Count" - }, - { - "Unit": "CPU-M-CF", - "EventCode": "2", - "EventName": "L1I_DIR_WRITES", - "BriefDescription": "L1I Directory Writes", - "PublicDescription": "Level-1 I-Cache Directory Write Count" - }, - { - "Unit": "CPU-M-CF", - "EventCode": "3", - "EventName": "L1I_PENALTY_CYCLES", - "BriefDescription": "L1I Penalty Cycles", - "PublicDescription": "Level-1 I-Cache Penalty Cycle Count" - }, - { - "Unit": "CPU-M-CF", - "EventCode": "4", - "EventName": "L1D_DIR_WRITES", - "BriefDescription": "L1D Directory Writes", - "PublicDescription": "Level-1 D-Cache Directory Write Count" - }, - { - "Unit": "CPU-M-CF", - "EventCode": "5", - "EventName": "L1D_PENALTY_CYCLES", - "BriefDescription": "L1D Penalty Cycles", - "PublicDescription": "Level-1 D-Cache Penalty Cycle Count" - }, - { - "Unit": "CPU-M-CF", - "EventCode": "32", - "EventName": "PROBLEM_STATE_CPU_CYCLES", - "BriefDescription": "Problem-State CPU Cycles", - "PublicDescription": "Problem-State Cycle Count" - }, - { - "Unit": "CPU-M-CF", - "EventCode": "33", - "EventName": "PROBLEM_STATE_INSTRUCTIONS", - "BriefDescription": "Problem-State Instructions", - "PublicDescription": "Problem-State Instruction Count" - }, -] diff --git a/tools/perf/pmu-events/arch/s390/cf_m8561/crypto.json b/tools/perf/pmu-events/arch/s390/cf_m8561/crypto.json deleted file mode 100644 index db286f19e7b6..000000000000 --- a/tools/perf/pmu-events/arch/s390/cf_m8561/crypto.json +++ /dev/null @@ -1,114 +0,0 @@ -[ - { - "Unit": "CPU-M-CF", - "EventCode": "64", - "EventName": "PRNG_FUNCTIONS", - "BriefDescription": "PRNG Functions", - "PublicDescription": "Total number of the PRNG functions issued by the CPU" - }, - { - "Unit": "CPU-M-CF", - "EventCode": "65", - "EventName": "PRNG_CYCLES", - "BriefDescription": "PRNG Cycles", - "PublicDescription": "Total number of CPU cycles when the DEA/AES coprocessor is busy performing PRNG functions issued by the CPU" - }, - { - "Unit": "CPU-M-CF", - "EventCode": "66", - "EventName": "PRNG_BLOCKED_FUNCTIONS", - "BriefDescription": "PRNG Blocked Functions", - "PublicDescription": "Total number of the PRNG functions that are issued by the CPU and are blocked because the DEA/AES coprocessor is busy performing a function issued by another CPU" - }, - { - "Unit": "CPU-M-CF", - "EventCode": "67", - "EventName": "PRNG_BLOCKED_CYCLES", - "BriefDescription": "PRNG Blocked Cycles", - "PublicDescription": "Total number of CPU cycles blocked for the PRNG functions issued by the CPU because the DEA/AES coprocessor is busy performing a function issued by another CPU" - }, - { - "Unit": "CPU-M-CF", - "EventCode": "68", - "EventName": "SHA_FUNCTIONS", - "BriefDescription": "SHA Functions", - "PublicDescription": "Total number of SHA functions issued by the CPU" - }, - { - "Unit": "CPU-M-CF", - "EventCode": "69", - "EventName": "SHA_CYCLES", - "BriefDescription": "SHA Cycles", - "PublicDescription": "Total number of CPU cycles when the SHA coprocessor is busy performing the SHA functions issued by the CPU" - }, - { - "Unit": "CPU-M-CF", - "EventCode": "70", - "EventName": "SHA_BLOCKED_FUNCTIONS", - "BriefDescription": "SHA Blocked Functions", - "PublicDescription": "Total number of the SHA functions that are issued by the CPU and are blocked because the SHA coprocessor is busy performing a function issued by another CPU" - }, - { - "Unit": "CPU-M-CF", - "EventCode": "71", - "EventName": "SHA_BLOCKED_CYCLES", - "BriefDescription": "SHA Bloced Cycles", - "PublicDescription": "Total number of CPU cycles blocked for the SHA functions issued by the CPU because the SHA coprocessor is busy performing a function issued by another CPU" - }, - { - "Unit": "CPU-M-CF", - "EventCode": "72", - "EventName": "DEA_FUNCTIONS", - "BriefDescription": "DEA Functions", - "PublicDescription": "Total number of the DEA functions issued by the CPU" - }, - { - "Unit": "CPU-M-CF", - "EventCode": "73", - "EventName": "DEA_CYCLES", - "BriefDescription": "DEA Cycles", - "PublicDescription": "Total number of CPU cycles when the DEA/AES coprocessor is busy performing the DEA functions issued by the CPU" - }, - { - "Unit": "CPU-M-CF", - "EventCode": "74", - "EventName": "DEA_BLOCKED_FUNCTIONS", - "BriefDescription": "DEA Blocked Functions", - "PublicDescription": "Total number of the DEA functions that are issued by the CPU and are blocked because the DEA/AES coprocessor is busy performing a function issued by another CPU" - }, - { - "Unit": "CPU-M-CF", - "EventCode": "75", - "EventName": "DEA_BLOCKED_CYCLES", - "BriefDescription": "DEA Blocked Cycles", - "PublicDescription": "Total number of CPU cycles blocked for the DEA functions issued by the CPU because the DEA/AES coprocessor is busy performing a function issued by another CPU" - }, - { - "Unit": "CPU-M-CF", - "EventCode": "76", - "EventName": "AES_FUNCTIONS", - "BriefDescription": "AES Functions", - "PublicDescription": "Total number of AES functions issued by the CPU" - }, - { - "Unit": "CPU-M-CF", - "EventCode": "77", - "EventName": "AES_CYCLES", - "BriefDescription": "AES Cycles", - "PublicDescription": "Total number of CPU cycles when the DEA/AES coprocessor is busy performing the AES functions issued by the CPU" - }, - { - "Unit": "CPU-M-CF", - "EventCode": "78", - "EventName": "AES_BLOCKED_FUNCTIONS", - "BriefDescription": "AES Blocked Functions", - "PublicDescription": "Total number of AES functions that are issued by the CPU and are blocked because the DEA/AES coprocessor is busy performing a function issued by another CPU" - }, - { - "Unit": "CPU-M-CF", - "EventCode": "79", - "EventName": "AES_BLOCKED_CYCLES", - "BriefDescription": "AES Blocked Cycles", - "PublicDescription": "Total number of CPU cycles blocked for the AES functions issued by the CPU because the DEA/AES coprocessor is busy performing a function issued by another CPU" - }, -] diff --git a/tools/perf/pmu-events/arch/s390/cf_m8561/crypto6.json b/tools/perf/pmu-events/arch/s390/cf_m8561/crypto6.json deleted file mode 100644 index 5e36bc2468d0..000000000000 --- a/tools/perf/pmu-events/arch/s390/cf_m8561/crypto6.json +++ /dev/null @@ -1,30 +0,0 @@ -[ - { - "Unit": "CPU-M-CF", - "EventCode": "80", - "EventName": "ECC_FUNCTION_COUNT", - "BriefDescription": "ECC Function Count", - "PublicDescription": "Long ECC function Count" - }, - { - "Unit": "CPU-M-CF", - "EventCode": "81", - "EventName": "ECC_CYCLES_COUNT", - "BriefDescription": "ECC Cycles Count", - "PublicDescription": "Long ECC Function cycles count" - }, - { - "Unit": "CPU-M-CF", - "EventCode": "82", - "EventName": "ECC_BLOCKED_FUNCTION_COUNT", - "BriefDescription": "Ecc Blocked Function Count", - "PublicDescription": "Long ECC blocked function count" - }, - { - "Unit": "CPU-M-CF", - "EventCode": "83", - "EventName": "ECC_BLOCKED_CYCLES_COUNT", - "BriefDescription": "ECC Blocked Cycles Count", - "PublicDescription": "Long ECC blocked cycles count" - }, -] diff --git a/tools/perf/pmu-events/arch/s390/cf_m8561/extended.json b/tools/perf/pmu-events/arch/s390/cf_m8561/extended.json deleted file mode 100644 index 89e070727e1b..000000000000 --- a/tools/perf/pmu-events/arch/s390/cf_m8561/extended.json +++ /dev/null @@ -1,373 +0,0 @@ -[ - { - "Unit": "CPU-M-CF", - "EventCode": "128", - "EventName": "L1D_RO_EXCL_WRITES", - "BriefDescription": "L1D Read-only Exclusive Writes", - "PublicDescription": "A directory write to the Level-1 Data cache where the line was originally in a Read-Only state in the cache but has been updated to be in the Exclusive state that allows stores to the cache line" - }, - { - "Unit": "CPU-M-CF", - "EventCode": "129", - "EventName": "DTLB2_WRITES", - "BriefDescription": "DTLB2 Writes", - "PublicDescription": "A translation has been written into The Translation Lookaside Buffer 2 (TLB2) and the request was made by the data cache" - }, - { - "Unit": "CPU-M-CF", - "EventCode": "130", - "EventName": "DTLB2_MISSES", - "BriefDescription": "DTLB2 Misses", - "PublicDescription": "A TLB2 miss is in progress for a request made by the data cache. Incremented by one for every TLB2 miss in progress for the Level-1 Data cache on this cycle" - }, - { - "Unit": "CPU-M-CF", - "EventCode": "131", - "EventName": "DTLB2_HPAGE_WRITES", - "BriefDescription": "DTLB2 One-Megabyte Page Writes", - "PublicDescription": "A translation entry was written into the Combined Region and Segment Table Entry array in the Level-2 TLB for a one-megabyte page or a Last Host Translation was done" - }, - { - "Unit": "CPU-M-CF", - "EventCode": "132", - "EventName": "DTLB2_GPAGE_WRITES", - "BriefDescription": "DTLB2 Two-Gigabyte Page Writes", - "PublicDescription": "A translation entry for a two-gigabyte page was written into the Level-2 TLB" - }, - { - "Unit": "CPU-M-CF", - "EventCode": "133", - "EventName": "L1D_L2D_SOURCED_WRITES", - "BriefDescription": "L1D L2D Sourced Writes", - "PublicDescription": "A directory write to the Level-1 Data cache directory where the returned cache line was sourced from the Level-2 Data cache" - }, - { - "Unit": "CPU-M-CF", - "EventCode": "134", - "EventName": "ITLB2_WRITES", - "BriefDescription": "ITLB2 Writes", - "PublicDescription": "A translation entry has been written into the Translation Lookaside Buffer 2 (TLB2) and the request was made by the instruction cache" - }, - { - "Unit": "CPU-M-CF", - "EventCode": "135", - "EventName": "ITLB2_MISSES", - "BriefDescription": "ITLB2 Misses", - "PublicDescription": "A TLB2 miss is in progress for a request made by the instruction cache. Incremented by one for every TLB2 miss in progress for the Level-1 Instruction cache in a cycle" - }, - { - "Unit": "CPU-M-CF", - "EventCode": "136", - "EventName": "L1I_L2I_SOURCED_WRITES", - "BriefDescription": "L1I L2I Sourced Writes", - "PublicDescription": "A directory write to the Level-1 Instruction cache directory where the returned cache line was sourced from the Level-2 Instruction cache" - }, - { - "Unit": "CPU-M-CF", - "EventCode": "137", - "EventName": "TLB2_PTE_WRITES", - "BriefDescription": "TLB2 PTE Writes", - "PublicDescription": "A translation entry was written into the Page Table Entry array in the Level-2 TLB" - }, - { - "Unit": "CPU-M-CF", - "EventCode": "138", - "EventName": "TLB2_CRSTE_WRITES", - "BriefDescription": "TLB2 CRSTE Writes", - "PublicDescription": "Translation entries were written into the Combined Region and Segment Table Entry array and the Page Table Entry array in the Level-2 TLB" - }, - { - "Unit": "CPU-M-CF", - "EventCode": "139", - "EventName": "TLB2_ENGINES_BUSY", - "BriefDescription": "TLB2 Engines Busy", - "PublicDescription": "The number of Level-2 TLB translation engines busy in a cycle" - }, - { - "Unit": "CPU-M-CF", - "EventCode": "140", - "EventName": "TX_C_TEND", - "BriefDescription": "Completed TEND instructions in constrained TX mode", - "PublicDescription": "A TEND instruction has completed in a constrained transactional-execution mode" - }, - { - "Unit": "CPU-M-CF", - "EventCode": "141", - "EventName": "TX_NC_TEND", - "BriefDescription": "Completed TEND instructions in non-constrained TX mode", - "PublicDescription": "A TEND instruction has completed in a non-constrained transactional-execution mode" - }, - { - "Unit": "CPU-M-CF", - "EventCode": "143", - "EventName": "L1C_TLB2_MISSES", - "BriefDescription": "L1C TLB2 Misses", - "PublicDescription": "Increments by one for any cycle where a level-1 cache or level-2 TLB miss is in progress" - }, - { - "Unit": "CPU-M-CF", - "EventCode": "144", - "EventName": "L1D_ONCHIP_L3_SOURCED_WRITES", - "BriefDescription": "L1D On-Chip L3 Sourced Writes", - "PublicDescription": "A directory write to the Level-1 Data cache directory where the returned cache line was sourced from an On-Chip Level-3 cache without intervention" - }, - { - "Unit": "CPU-M-CF", - "EventCode": "145", - "EventName": "L1D_ONCHIP_MEMORY_SOURCED_WRITES", - "BriefDescription": "L1D On-Chip Memory Sourced Writes", - "PublicDescription": "A directory write to the Level-1 Data cache directory where the returned cache line was sourced from On-Chip memory" - }, - { - "Unit": "CPU-M-CF", - "EventCode": "146", - "EventName": "L1D_ONCHIP_L3_SOURCED_WRITES_IV", - "BriefDescription": "L1D On-Chip L3 Sourced Writes with Intervention", - "PublicDescription": "A directory write to the Level-1 Data cache directory where the returned cache line was sourced from an On-Chip Level-3 cache with intervention" - }, - { - "Unit": "CPU-M-CF", - "EventCode": "147", - "EventName": "L1D_ONCLUSTER_L3_SOURCED_WRITES", - "BriefDescription": "L1D On-Cluster L3 Sourced Writes", - "PublicDescription": "A directory write to the Level-1 Data cache directory where the returned cache line was sourced from On-Cluster Level-3 cache withountervention" - }, - { - "Unit": "CPU-M-CF", - "EventCode": "148", - "EventName": "L1D_ONCLUSTER_MEMORY_SOURCED_WRITES", - "BriefDescription": "L1D On-Cluster Memory Sourced Writes", - "PublicDescription": "A directory write to the Level-1 Data cache directory where the returned cache line was sourced from an On-Cluster memory" - }, - { - "Unit": "CPU-M-CF", - "EventCode": "149", - "EventName": "L1D_ONCLUSTER_L3_SOURCED_WRITES_IV", - "BriefDescription": "L1D On-Cluster L3 Sourced Writes with Intervention", - "PublicDescription": "A directory write to the Level-1 Data cache directory where the returned cache line was sourced from an On-Cluster Level-3 cache with intervention" - }, - { - "Unit": "CPU-M-CF", - "EventCode": "150", - "EventName": "L1D_OFFCLUSTER_L3_SOURCED_WRITES", - "BriefDescription": "L1D Off-Cluster L3 Sourced Writes", - "PublicDescription": "A directory write to the Level-1 Data cache directory where the returned cache line was sourced from an Off-Cluster Level-3 cache without intervention" - }, - { - "Unit": "CPU-M-CF", - "EventCode": "151", - "EventName": "L1D_OFFCLUSTER_MEMORY_SOURCED_WRITES", - "BriefDescription": "L1D Off-Cluster Memory Sourced Writes", - "PublicDescription": "A directory write to the Level-1 Data cache directory where the returned cache line was sourced from Off-Cluster memory" - }, - { - "Unit": "CPU-M-CF", - "EventCode": "152", - "EventName": "L1D_OFFCLUSTER_L3_SOURCED_WRITES_IV", - "BriefDescription": "L1D Off-Cluster L3 Sourced Writes with Intervention", - "PublicDescription": "A directory write to the Level-1 Data cache directory where the returned cache line was sourced from an Off-Cluster Level-3 cache with intervention" - }, - { - "Unit": "CPU-M-CF", - "EventCode": "153", - "EventName": "L1D_OFFDRAWER_L3_SOURCED_WRITES", - "BriefDescription": "L1D Off-Drawer L3 Sourced Writes", - "PublicDescription": "A directory write to the Level-1 Data cache directory where the returned cache line was sourced from an Off-Drawer Level-3 cache without intervention" - }, - { - "Unit": "CPU-M-CF", - "EventCode": "154", - "EventName": "L1D_OFFDRAWER_MEMORY_SOURCED_WRITES", - "BriefDescription": "L1D Off-Drawer Memory Sourced Writes", - "PublicDescription": "A directory write to the Level-1 Data cache directory where the returned cache line was sourced from Off-Drawer memory" - }, - { - "Unit": "CPU-M-CF", - "EventCode": "155", - "EventName": "L1D_OFFDRAWER_L3_SOURCED_WRITES_IV", - "BriefDescription": "L1D Off-Drawer L3 Sourced Writes with Intervention", - "PublicDescription": "A directory write to the Level-1 Data cache directory where the returned cache line was sourced from an Off-Drawer Level-3 cache with intervention" - }, - { - "Unit": "CPU-M-CF", - "EventCode": "156", - "EventName": "L1D_ONDRAWER_L4_SOURCED_WRITES", - "BriefDescription": "L1D On-Drawer L4 Sourced Writes", - "PublicDescription": "A directory write to the Level-1 Data cache directory where the returned cache line was sourced from On-Drawer Level-4 cache" - }, - { - "Unit": "CPU-M-CF", - "EventCode": "157", - "EventName": "L1D_OFFDRAWER_L4_SOURCED_WRITES", - "BriefDescription": "L1D Off-Drawer L4 Sourced Writes", - "PublicDescription": "A directory write to the Level-1 Data cache directory where the returned cache line was sourced from Off-Drawer Level-4 cache" - }, - { - "Unit": "CPU-M-CF", - "EventCode": "158", - "EventName": "L1D_ONCHIP_L3_SOURCED_WRITES_RO", - "BriefDescription": "L1D On-Chip L3 Sourced Writes read-only", - "PublicDescription": "A directory write to the Level-1 Data cache directory where the returned cache line was sourced from On-Chip L3 but a read-only invalidate was done to remove other copies of the cache line" - }, - { - "Unit": "CPU-M-CF", - "EventCode": "162", - "EventName": "L1I_ONCHIP_L3_SOURCED_WRITES", - "BriefDescription": "L1I On-Chip L3 Sourced Writes", - "PublicDescription": "A directory write to the Level-1 Instruction cache directory where the returned cache ine was sourced from an On-Chip Level-3 cache without intervention" - }, - { - "Unit": "CPU-M-CF", - "EventCode": "163", - "EventName": "L1I_ONCHIP_MEMORY_SOURCED_WRITES", - "BriefDescription": "L1I On-Chip Memory Sourced Writes", - "PublicDescription": "A directory write to the Level-1 Instruction cache directory where the returned cache ine was sourced from On-Chip memory" - }, - { - "Unit": "CPU-M-CF", - "EventCode": "164", - "EventName": "L1I_ONCHIP_L3_SOURCED_WRITES_IV", - "BriefDescription": "L1I On-Chip L3 Sourced Writes with Intervention", - "PublicDescription": "A directory write to the Level-1 Instruction cache directory where the returned cache ine was sourced from an On-Chip Level-3 cache with intervention" - }, - { - "Unit": "CPU-M-CF", - "EventCode": "165", - "EventName": "L1I_ONCLUSTER_L3_SOURCED_WRITES", - "BriefDescription": "L1I On-Cluster L3 Sourced Writes", - "PublicDescription": "A directory write to the Level-1 Instruction cache directory where the returned cache line was sourced from an On-Cluster Level-3 cache without intervention" - }, - { - "Unit": "CPU-M-CF", - "EventCode": "166", - "EventName": "L1I_ONCLUSTER_MEMORY_SOURCED_WRITES", - "BriefDescription": "L1I On-Cluster Memory Sourced Writes", - "PublicDescription": "A directory write to the Level-1 Instruction cache directory where the returned cache line was sourced from an On-Cluster memory" - }, - { - "Unit": "CPU-M-CF", - "EventCode": "167", - "EventName": "L1I_ONCLUSTER_L3_SOURCED_WRITES_IV", - "BriefDescription": "L1I On-Cluster L3 Sourced Writes with Intervention", - "PublicDescription": "A directory write to the Level-1 Instruction cache directory where the returned cache line was sourced from On-Cluster Level-3 cache with intervention" - }, - { - "Unit": "CPU-M-CF", - "EventCode": "168", - "EventName": "L1I_OFFCLUSTER_L3_SOURCED_WRITES", - "BriefDescription": "L1I Off-Cluster L3 Sourced Writes", - "PublicDescription": "A directory write to the Level-1 Instruction cache directory where the returned cache line was sourced from an Off-Cluster Level-3 cache without intervention" - }, - { - "Unit": "CPU-M-CF", - "EventCode": "169", - "EventName": "L1I_OFFCLUSTER_MEMORY_SOURCED_WRITES", - "BriefDescription": "L1I Off-Cluster Memory Sourced Writes", - "PublicDescription": "A directory write to the Level-1 Instruction cache directory where the returned cache line was sourced from Off-Cluster memory" - }, - { - "Unit": "CPU-M-CF", - "EventCode": "170", - "EventName": "L1I_OFFCLUSTER_L3_SOURCED_WRITES_IV", - "BriefDescription": "L1I Off-Cluster L3 Sourced Writes with Intervention", - "PublicDescription": "A directory write to the Level-1 Instruction cache directory where the returned cache line was sourced from an Off-Cluster Level-3 cache with intervention" - }, - { - "Unit": "CPU-M-CF", - "EventCode": "171", - "EventName": "L1I_OFFDRAWER_L3_SOURCED_WRITES", - "BriefDescription": "L1I Off-Drawer L3 Sourced Writes", - "PublicDescription": "A directory write to the Level-1 Instruction cache directory where the returned cache line was sourced from an Off-Drawer Level-3 cache without intervention" - }, - { - "Unit": "CPU-M-CF", - "EventCode": "172", - "EventName": "L1I_OFFDRAWER_MEMORY_SOURCED_WRITES", - "BriefDescription": "L1I Off-Drawer Memory Sourced Writes", - "PublicDescription": "A directory write to the Level-1 Instruction cache directory where the returned cache line was sourced from Off-Drawer memory" - }, - { - "Unit": "CPU-M-CF", - "EventCode": "173", - "EventName": "L1I_OFFDRAWER_L3_SOURCED_WRITES_IV", - "BriefDescription": "L1I Off-Drawer L3 Sourced Writes with Intervention", - "PublicDescription": "A directory write to the Level-1 Instruction cache directory where the returned cache line was sourced from an Off-Drawer Level-3 cache with intervention" - }, - { - "Unit": "CPU-M-CF", - "EventCode": "174", - "EventName": "L1I_ONDRAWER_L4_SOURCED_WRITES", - "BriefDescription": "L1I On-Drawer L4 Sourced Writes", - "PublicDescription": "A directory write to the Level-1 Instruction cache directory where the returned cache line was sourced from On-Drawer Level-4 cache" - }, - { - "Unit": "CPU-M-CF", - "EventCode": "175", - "EventName": "L1I_OFFDRAWER_L4_SOURCED_WRITES", - "BriefDescription": "L1I Off-Drawer L4 Sourced Writes", - "PublicDescription": "A directory write to the Level-1 Instruction cache directory where the returned cache line was sourced from Off-Drawer Level-4 cache" - }, - { - "Unit": "CPU-M-CF", - "EventCode": "224", - "EventName": "BCD_DFP_EXECUTION_SLOTS", - "BriefDescription": "BCD DFP Execution Slots", - "PublicDescription": "Count of floating point execution slots used for finished Binary Coded Decimal to Decimal Floating Point conversions. Instructions: CDZT, CXZT, CZDT, CZXT" - }, - { - "Unit": "CPU-M-CF", - "EventCode": "225", - "EventName": "VX_BCD_EXECUTION_SLOTS", - "BriefDescription": "VX BCD Execution Slots", - "PublicDescription": "Count of floating point execution slots used for finished vector arithmetic Binary Coded Decimal instructions. Instructions: VAP, VSP, VMPVMSP, VDP, VSDP, VRP, VLIP, VSRP, VPSOPVCP, VTP, VPKZ, VUPKZ, VCVB, VCVBG, VCVDVCVDG" - }, - { - "Unit": "CPU-M-CF", - "EventCode": "226", - "EventName": "DECIMAL_INSTRUCTIONS", - "BriefDescription": "Decimal Instructions", - "PublicDescription": "Decimal instructions dispatched. Instructions: CVB, CVD, AP, CP, DP, ED, EDMK, MP, SRP, SP, ZAP" - }, - { - "Unit": "CPU-M-CF", - "EventCode": "232", - "EventName": "LAST_HOST_TRANSLATIONS", - "BriefDescription": "Last host translation done", - "PublicDescription": "Last Host Translation done" - }, - { - "Unit": "CPU-M-CF", - "EventCode": "243", - "EventName": "TX_NC_TABORT", - "BriefDescription": "Aborted transactions in non-constrained TX mode", - "PublicDescription": "A transaction abort has occurred in a non-constrained transactional-execution mode" - }, - { - "Unit": "CPU-M-CF", - "EventCode": "244", - "EventName": "TX_C_TABORT_NO_SPECIAL", - "BriefDescription": "Aborted transactions in constrained TX mode not using special completion logic", - "PublicDescription": "A transaction abort has occurred in a constrained transactional-execution mode and the CPU is not using any special logic to allow the transaction to complete" - }, - { - "Unit": "CPU-M-CF", - "EventCode": "245", - "EventName": "TX_C_TABORT_SPECIAL", - "BriefDescription": "Aborted transactions in constrained TX mode using special completion logic", - "PublicDescription": "A transaction abort has occurred in a constrained transactional-execution mode and the CPU is using special logic to allow the transaction to complete" - }, - { - "Unit": "CPU-M-CF", - "EventCode": "448", - "EventName": "MT_DIAG_CYCLES_ONE_THR_ACTIVE", - "BriefDescription": "Cycle count with one thread active", - "PublicDescription": "Cycle count with one thread active" - }, - { - "Unit": "CPU-M-CF", - "EventCode": "449", - "EventName": "MT_DIAG_CYCLES_TWO_THR_ACTIVE", - "BriefDescription": "Cycle count with two threads active", - "PublicDescription": "Cycle count with two threads active" - }, -] diff --git a/tools/perf/pmu-events/arch/s390/cf_m8561/transaction.json b/tools/perf/pmu-events/arch/s390/cf_m8561/transaction.json deleted file mode 100644 index 1a0034f79f73..000000000000 --- a/tools/perf/pmu-events/arch/s390/cf_m8561/transaction.json +++ /dev/null @@ -1,7 +0,0 @@ -[ - { - "BriefDescription": "Transaction count", - "MetricName": "transaction", - "MetricExpr": "TX_C_TEND + TX_NC_TEND + TX_NC_TABORT + TX_C_TABORT_SPECIAL + TX_C_TABORT_NO_SPECIAL" - } -] diff --git a/tools/perf/pmu-events/arch/s390/cf_z15/basic.json b/tools/perf/pmu-events/arch/s390/cf_z15/basic.json new file mode 100644 index 000000000000..17fb5241928b --- /dev/null +++ b/tools/perf/pmu-events/arch/s390/cf_z15/basic.json @@ -0,0 +1,58 @@ +[ + { + "Unit": "CPU-M-CF", + "EventCode": "0", + "EventName": "CPU_CYCLES", + "BriefDescription": "CPU Cycles", + "PublicDescription": "Cycle Count" + }, + { + "Unit": "CPU-M-CF", + "EventCode": "1", + "EventName": "INSTRUCTIONS", + "BriefDescription": "Instructions", + "PublicDescription": "Instruction Count" + }, + { + "Unit": "CPU-M-CF", + "EventCode": "2", + "EventName": "L1I_DIR_WRITES", + "BriefDescription": "L1I Directory Writes", + "PublicDescription": "Level-1 I-Cache Directory Write Count" + }, + { + "Unit": "CPU-M-CF", + "EventCode": "3", + "EventName": "L1I_PENALTY_CYCLES", + "BriefDescription": "L1I Penalty Cycles", + "PublicDescription": "Level-1 I-Cache Penalty Cycle Count" + }, + { + "Unit": "CPU-M-CF", + "EventCode": "4", + "EventName": "L1D_DIR_WRITES", + "BriefDescription": "L1D Directory Writes", + "PublicDescription": "Level-1 D-Cache Directory Write Count" + }, + { + "Unit": "CPU-M-CF", + "EventCode": "5", + "EventName": "L1D_PENALTY_CYCLES", + "BriefDescription": "L1D Penalty Cycles", + "PublicDescription": "Level-1 D-Cache Penalty Cycle Count" + }, + { + "Unit": "CPU-M-CF", + "EventCode": "32", + "EventName": "PROBLEM_STATE_CPU_CYCLES", + "BriefDescription": "Problem-State CPU Cycles", + "PublicDescription": "Problem-State Cycle Count" + }, + { + "Unit": "CPU-M-CF", + "EventCode": "33", + "EventName": "PROBLEM_STATE_INSTRUCTIONS", + "BriefDescription": "Problem-State Instructions", + "PublicDescription": "Problem-State Instruction Count" + }, +] diff --git a/tools/perf/pmu-events/arch/s390/cf_z15/crypto.json b/tools/perf/pmu-events/arch/s390/cf_z15/crypto.json new file mode 100644 index 000000000000..db286f19e7b6 --- /dev/null +++ b/tools/perf/pmu-events/arch/s390/cf_z15/crypto.json @@ -0,0 +1,114 @@ +[ + { + "Unit": "CPU-M-CF", + "EventCode": "64", + "EventName": "PRNG_FUNCTIONS", + "BriefDescription": "PRNG Functions", + "PublicDescription": "Total number of the PRNG functions issued by the CPU" + }, + { + "Unit": "CPU-M-CF", + "EventCode": "65", + "EventName": "PRNG_CYCLES", + "BriefDescription": "PRNG Cycles", + "PublicDescription": "Total number of CPU cycles when the DEA/AES coprocessor is busy performing PRNG functions issued by the CPU" + }, + { + "Unit": "CPU-M-CF", + "EventCode": "66", + "EventName": "PRNG_BLOCKED_FUNCTIONS", + "BriefDescription": "PRNG Blocked Functions", + "PublicDescription": "Total number of the PRNG functions that are issued by the CPU and are blocked because the DEA/AES coprocessor is busy performing a function issued by another CPU" + }, + { + "Unit": "CPU-M-CF", + "EventCode": "67", + "EventName": "PRNG_BLOCKED_CYCLES", + "BriefDescription": "PRNG Blocked Cycles", + "PublicDescription": "Total number of CPU cycles blocked for the PRNG functions issued by the CPU because the DEA/AES coprocessor is busy performing a function issued by another CPU" + }, + { + "Unit": "CPU-M-CF", + "EventCode": "68", + "EventName": "SHA_FUNCTIONS", + "BriefDescription": "SHA Functions", + "PublicDescription": "Total number of SHA functions issued by the CPU" + }, + { + "Unit": "CPU-M-CF", + "EventCode": "69", + "EventName": "SHA_CYCLES", + "BriefDescription": "SHA Cycles", + "PublicDescription": "Total number of CPU cycles when the SHA coprocessor is busy performing the SHA functions issued by the CPU" + }, + { + "Unit": "CPU-M-CF", + "EventCode": "70", + "EventName": "SHA_BLOCKED_FUNCTIONS", + "BriefDescription": "SHA Blocked Functions", + "PublicDescription": "Total number of the SHA functions that are issued by the CPU and are blocked because the SHA coprocessor is busy performing a function issued by another CPU" + }, + { + "Unit": "CPU-M-CF", + "EventCode": "71", + "EventName": "SHA_BLOCKED_CYCLES", + "BriefDescription": "SHA Bloced Cycles", + "PublicDescription": "Total number of CPU cycles blocked for the SHA functions issued by the CPU because the SHA coprocessor is busy performing a function issued by another CPU" + }, + { + "Unit": "CPU-M-CF", + "EventCode": "72", + "EventName": "DEA_FUNCTIONS", + "BriefDescription": "DEA Functions", + "PublicDescription": "Total number of the DEA functions issued by the CPU" + }, + { + "Unit": "CPU-M-CF", + "EventCode": "73", + "EventName": "DEA_CYCLES", + "BriefDescription": "DEA Cycles", + "PublicDescription": "Total number of CPU cycles when the DEA/AES coprocessor is busy performing the DEA functions issued by the CPU" + }, + { + "Unit": "CPU-M-CF", + "EventCode": "74", + "EventName": "DEA_BLOCKED_FUNCTIONS", + "BriefDescription": "DEA Blocked Functions", + "PublicDescription": "Total number of the DEA functions that are issued by the CPU and are blocked because the DEA/AES coprocessor is busy performing a function issued by another CPU" + }, + { + "Unit": "CPU-M-CF", + "EventCode": "75", + "EventName": "DEA_BLOCKED_CYCLES", + "BriefDescription": "DEA Blocked Cycles", + "PublicDescription": "Total number of CPU cycles blocked for the DEA functions issued by the CPU because the DEA/AES coprocessor is busy performing a function issued by another CPU" + }, + { + "Unit": "CPU-M-CF", + "EventCode": "76", + "EventName": "AES_FUNCTIONS", + "BriefDescription": "AES Functions", + "PublicDescription": "Total number of AES functions issued by the CPU" + }, + { + "Unit": "CPU-M-CF", + "EventCode": "77", + "EventName": "AES_CYCLES", + "BriefDescription": "AES Cycles", + "PublicDescription": "Total number of CPU cycles when the DEA/AES coprocessor is busy performing the AES functions issued by the CPU" + }, + { + "Unit": "CPU-M-CF", + "EventCode": "78", + "EventName": "AES_BLOCKED_FUNCTIONS", + "BriefDescription": "AES Blocked Functions", + "PublicDescription": "Total number of AES functions that are issued by the CPU and are blocked because the DEA/AES coprocessor is busy performing a function issued by another CPU" + }, + { + "Unit": "CPU-M-CF", + "EventCode": "79", + "EventName": "AES_BLOCKED_CYCLES", + "BriefDescription": "AES Blocked Cycles", + "PublicDescription": "Total number of CPU cycles blocked for the AES functions issued by the CPU because the DEA/AES coprocessor is busy performing a function issued by another CPU" + }, +] diff --git a/tools/perf/pmu-events/arch/s390/cf_z15/crypto6.json b/tools/perf/pmu-events/arch/s390/cf_z15/crypto6.json new file mode 100644 index 000000000000..5e36bc2468d0 --- /dev/null +++ b/tools/perf/pmu-events/arch/s390/cf_z15/crypto6.json @@ -0,0 +1,30 @@ +[ + { + "Unit": "CPU-M-CF", + "EventCode": "80", + "EventName": "ECC_FUNCTION_COUNT", + "BriefDescription": "ECC Function Count", + "PublicDescription": "Long ECC function Count" + }, + { + "Unit": "CPU-M-CF", + "EventCode": "81", + "EventName": "ECC_CYCLES_COUNT", + "BriefDescription": "ECC Cycles Count", + "PublicDescription": "Long ECC Function cycles count" + }, + { + "Unit": "CPU-M-CF", + "EventCode": "82", + "EventName": "ECC_BLOCKED_FUNCTION_COUNT", + "BriefDescription": "Ecc Blocked Function Count", + "PublicDescription": "Long ECC blocked function count" + }, + { + "Unit": "CPU-M-CF", + "EventCode": "83", + "EventName": "ECC_BLOCKED_CYCLES_COUNT", + "BriefDescription": "ECC Blocked Cycles Count", + "PublicDescription": "Long ECC blocked cycles count" + }, +] diff --git a/tools/perf/pmu-events/arch/s390/cf_z15/extended.json b/tools/perf/pmu-events/arch/s390/cf_z15/extended.json new file mode 100644 index 000000000000..89e070727e1b --- /dev/null +++ b/tools/perf/pmu-events/arch/s390/cf_z15/extended.json @@ -0,0 +1,373 @@ +[ + { + "Unit": "CPU-M-CF", + "EventCode": "128", + "EventName": "L1D_RO_EXCL_WRITES", + "BriefDescription": "L1D Read-only Exclusive Writes", + "PublicDescription": "A directory write to the Level-1 Data cache where the line was originally in a Read-Only state in the cache but has been updated to be in the Exclusive state that allows stores to the cache line" + }, + { + "Unit": "CPU-M-CF", + "EventCode": "129", + "EventName": "DTLB2_WRITES", + "BriefDescription": "DTLB2 Writes", + "PublicDescription": "A translation has been written into The Translation Lookaside Buffer 2 (TLB2) and the request was made by the data cache" + }, + { + "Unit": "CPU-M-CF", + "EventCode": "130", + "EventName": "DTLB2_MISSES", + "BriefDescription": "DTLB2 Misses", + "PublicDescription": "A TLB2 miss is in progress for a request made by the data cache. Incremented by one for every TLB2 miss in progress for the Level-1 Data cache on this cycle" + }, + { + "Unit": "CPU-M-CF", + "EventCode": "131", + "EventName": "DTLB2_HPAGE_WRITES", + "BriefDescription": "DTLB2 One-Megabyte Page Writes", + "PublicDescription": "A translation entry was written into the Combined Region and Segment Table Entry array in the Level-2 TLB for a one-megabyte page or a Last Host Translation was done" + }, + { + "Unit": "CPU-M-CF", + "EventCode": "132", + "EventName": "DTLB2_GPAGE_WRITES", + "BriefDescription": "DTLB2 Two-Gigabyte Page Writes", + "PublicDescription": "A translation entry for a two-gigabyte page was written into the Level-2 TLB" + }, + { + "Unit": "CPU-M-CF", + "EventCode": "133", + "EventName": "L1D_L2D_SOURCED_WRITES", + "BriefDescription": "L1D L2D Sourced Writes", + "PublicDescription": "A directory write to the Level-1 Data cache directory where the returned cache line was sourced from the Level-2 Data cache" + }, + { + "Unit": "CPU-M-CF", + "EventCode": "134", + "EventName": "ITLB2_WRITES", + "BriefDescription": "ITLB2 Writes", + "PublicDescription": "A translation entry has been written into the Translation Lookaside Buffer 2 (TLB2) and the request was made by the instruction cache" + }, + { + "Unit": "CPU-M-CF", + "EventCode": "135", + "EventName": "ITLB2_MISSES", + "BriefDescription": "ITLB2 Misses", + "PublicDescription": "A TLB2 miss is in progress for a request made by the instruction cache. Incremented by one for every TLB2 miss in progress for the Level-1 Instruction cache in a cycle" + }, + { + "Unit": "CPU-M-CF", + "EventCode": "136", + "EventName": "L1I_L2I_SOURCED_WRITES", + "BriefDescription": "L1I L2I Sourced Writes", + "PublicDescription": "A directory write to the Level-1 Instruction cache directory where the returned cache line was sourced from the Level-2 Instruction cache" + }, + { + "Unit": "CPU-M-CF", + "EventCode": "137", + "EventName": "TLB2_PTE_WRITES", + "BriefDescription": "TLB2 PTE Writes", + "PublicDescription": "A translation entry was written into the Page Table Entry array in the Level-2 TLB" + }, + { + "Unit": "CPU-M-CF", + "EventCode": "138", + "EventName": "TLB2_CRSTE_WRITES", + "BriefDescription": "TLB2 CRSTE Writes", + "PublicDescription": "Translation entries were written into the Combined Region and Segment Table Entry array and the Page Table Entry array in the Level-2 TLB" + }, + { + "Unit": "CPU-M-CF", + "EventCode": "139", + "EventName": "TLB2_ENGINES_BUSY", + "BriefDescription": "TLB2 Engines Busy", + "PublicDescription": "The number of Level-2 TLB translation engines busy in a cycle" + }, + { + "Unit": "CPU-M-CF", + "EventCode": "140", + "EventName": "TX_C_TEND", + "BriefDescription": "Completed TEND instructions in constrained TX mode", + "PublicDescription": "A TEND instruction has completed in a constrained transactional-execution mode" + }, + { + "Unit": "CPU-M-CF", + "EventCode": "141", + "EventName": "TX_NC_TEND", + "BriefDescription": "Completed TEND instructions in non-constrained TX mode", + "PublicDescription": "A TEND instruction has completed in a non-constrained transactional-execution mode" + }, + { + "Unit": "CPU-M-CF", + "EventCode": "143", + "EventName": "L1C_TLB2_MISSES", + "BriefDescription": "L1C TLB2 Misses", + "PublicDescription": "Increments by one for any cycle where a level-1 cache or level-2 TLB miss is in progress" + }, + { + "Unit": "CPU-M-CF", + "EventCode": "144", + "EventName": "L1D_ONCHIP_L3_SOURCED_WRITES", + "BriefDescription": "L1D On-Chip L3 Sourced Writes", + "PublicDescription": "A directory write to the Level-1 Data cache directory where the returned cache line was sourced from an On-Chip Level-3 cache without intervention" + }, + { + "Unit": "CPU-M-CF", + "EventCode": "145", + "EventName": "L1D_ONCHIP_MEMORY_SOURCED_WRITES", + "BriefDescription": "L1D On-Chip Memory Sourced Writes", + "PublicDescription": "A directory write to the Level-1 Data cache directory where the returned cache line was sourced from On-Chip memory" + }, + { + "Unit": "CPU-M-CF", + "EventCode": "146", + "EventName": "L1D_ONCHIP_L3_SOURCED_WRITES_IV", + "BriefDescription": "L1D On-Chip L3 Sourced Writes with Intervention", + "PublicDescription": "A directory write to the Level-1 Data cache directory where the returned cache line was sourced from an On-Chip Level-3 cache with intervention" + }, + { + "Unit": "CPU-M-CF", + "EventCode": "147", + "EventName": "L1D_ONCLUSTER_L3_SOURCED_WRITES", + "BriefDescription": "L1D On-Cluster L3 Sourced Writes", + "PublicDescription": "A directory write to the Level-1 Data cache directory where the returned cache line was sourced from On-Cluster Level-3 cache withountervention" + }, + { + "Unit": "CPU-M-CF", + "EventCode": "148", + "EventName": "L1D_ONCLUSTER_MEMORY_SOURCED_WRITES", + "BriefDescription": "L1D On-Cluster Memory Sourced Writes", + "PublicDescription": "A directory write to the Level-1 Data cache directory where the returned cache line was sourced from an On-Cluster memory" + }, + { + "Unit": "CPU-M-CF", + "EventCode": "149", + "EventName": "L1D_ONCLUSTER_L3_SOURCED_WRITES_IV", + "BriefDescription": "L1D On-Cluster L3 Sourced Writes with Intervention", + "PublicDescription": "A directory write to the Level-1 Data cache directory where the returned cache line was sourced from an On-Cluster Level-3 cache with intervention" + }, + { + "Unit": "CPU-M-CF", + "EventCode": "150", + "EventName": "L1D_OFFCLUSTER_L3_SOURCED_WRITES", + "BriefDescription": "L1D Off-Cluster L3 Sourced Writes", + "PublicDescription": "A directory write to the Level-1 Data cache directory where the returned cache line was sourced from an Off-Cluster Level-3 cache without intervention" + }, + { + "Unit": "CPU-M-CF", + "EventCode": "151", + "EventName": "L1D_OFFCLUSTER_MEMORY_SOURCED_WRITES", + "BriefDescription": "L1D Off-Cluster Memory Sourced Writes", + "PublicDescription": "A directory write to the Level-1 Data cache directory where the returned cache line was sourced from Off-Cluster memory" + }, + { + "Unit": "CPU-M-CF", + "EventCode": "152", + "EventName": "L1D_OFFCLUSTER_L3_SOURCED_WRITES_IV", + "BriefDescription": "L1D Off-Cluster L3 Sourced Writes with Intervention", + "PublicDescription": "A directory write to the Level-1 Data cache directory where the returned cache line was sourced from an Off-Cluster Level-3 cache with intervention" + }, + { + "Unit": "CPU-M-CF", + "EventCode": "153", + "EventName": "L1D_OFFDRAWER_L3_SOURCED_WRITES", + "BriefDescription": "L1D Off-Drawer L3 Sourced Writes", + "PublicDescription": "A directory write to the Level-1 Data cache directory where the returned cache line was sourced from an Off-Drawer Level-3 cache without intervention" + }, + { + "Unit": "CPU-M-CF", + "EventCode": "154", + "EventName": "L1D_OFFDRAWER_MEMORY_SOURCED_WRITES", + "BriefDescription": "L1D Off-Drawer Memory Sourced Writes", + "PublicDescription": "A directory write to the Level-1 Data cache directory where the returned cache line was sourced from Off-Drawer memory" + }, + { + "Unit": "CPU-M-CF", + "EventCode": "155", + "EventName": "L1D_OFFDRAWER_L3_SOURCED_WRITES_IV", + "BriefDescription": "L1D Off-Drawer L3 Sourced Writes with Intervention", + "PublicDescription": "A directory write to the Level-1 Data cache directory where the returned cache line was sourced from an Off-Drawer Level-3 cache with intervention" + }, + { + "Unit": "CPU-M-CF", + "EventCode": "156", + "EventName": "L1D_ONDRAWER_L4_SOURCED_WRITES", + "BriefDescription": "L1D On-Drawer L4 Sourced Writes", + "PublicDescription": "A directory write to the Level-1 Data cache directory where the returned cache line was sourced from On-Drawer Level-4 cache" + }, + { + "Unit": "CPU-M-CF", + "EventCode": "157", + "EventName": "L1D_OFFDRAWER_L4_SOURCED_WRITES", + "BriefDescription": "L1D Off-Drawer L4 Sourced Writes", + "PublicDescription": "A directory write to the Level-1 Data cache directory where the returned cache line was sourced from Off-Drawer Level-4 cache" + }, + { + "Unit": "CPU-M-CF", + "EventCode": "158", + "EventName": "L1D_ONCHIP_L3_SOURCED_WRITES_RO", + "BriefDescription": "L1D On-Chip L3 Sourced Writes read-only", + "PublicDescription": "A directory write to the Level-1 Data cache directory where the returned cache line was sourced from On-Chip L3 but a read-only invalidate was done to remove other copies of the cache line" + }, + { + "Unit": "CPU-M-CF", + "EventCode": "162", + "EventName": "L1I_ONCHIP_L3_SOURCED_WRITES", + "BriefDescription": "L1I On-Chip L3 Sourced Writes", + "PublicDescription": "A directory write to the Level-1 Instruction cache directory where the returned cache ine was sourced from an On-Chip Level-3 cache without intervention" + }, + { + "Unit": "CPU-M-CF", + "EventCode": "163", + "EventName": "L1I_ONCHIP_MEMORY_SOURCED_WRITES", + "BriefDescription": "L1I On-Chip Memory Sourced Writes", + "PublicDescription": "A directory write to the Level-1 Instruction cache directory where the returned cache ine was sourced from On-Chip memory" + }, + { + "Unit": "CPU-M-CF", + "EventCode": "164", + "EventName": "L1I_ONCHIP_L3_SOURCED_WRITES_IV", + "BriefDescription": "L1I On-Chip L3 Sourced Writes with Intervention", + "PublicDescription": "A directory write to the Level-1 Instruction cache directory where the returned cache ine was sourced from an On-Chip Level-3 cache with intervention" + }, + { + "Unit": "CPU-M-CF", + "EventCode": "165", + "EventName": "L1I_ONCLUSTER_L3_SOURCED_WRITES", + "BriefDescription": "L1I On-Cluster L3 Sourced Writes", + "PublicDescription": "A directory write to the Level-1 Instruction cache directory where the returned cache line was sourced from an On-Cluster Level-3 cache without intervention" + }, + { + "Unit": "CPU-M-CF", + "EventCode": "166", + "EventName": "L1I_ONCLUSTER_MEMORY_SOURCED_WRITES", + "BriefDescription": "L1I On-Cluster Memory Sourced Writes", + "PublicDescription": "A directory write to the Level-1 Instruction cache directory where the returned cache line was sourced from an On-Cluster memory" + }, + { + "Unit": "CPU-M-CF", + "EventCode": "167", + "EventName": "L1I_ONCLUSTER_L3_SOURCED_WRITES_IV", + "BriefDescription": "L1I On-Cluster L3 Sourced Writes with Intervention", + "PublicDescription": "A directory write to the Level-1 Instruction cache directory where the returned cache line was sourced from On-Cluster Level-3 cache with intervention" + }, + { + "Unit": "CPU-M-CF", + "EventCode": "168", + "EventName": "L1I_OFFCLUSTER_L3_SOURCED_WRITES", + "BriefDescription": "L1I Off-Cluster L3 Sourced Writes", + "PublicDescription": "A directory write to the Level-1 Instruction cache directory where the returned cache line was sourced from an Off-Cluster Level-3 cache without intervention" + }, + { + "Unit": "CPU-M-CF", + "EventCode": "169", + "EventName": "L1I_OFFCLUSTER_MEMORY_SOURCED_WRITES", + "BriefDescription": "L1I Off-Cluster Memory Sourced Writes", + "PublicDescription": "A directory write to the Level-1 Instruction cache directory where the returned cache line was sourced from Off-Cluster memory" + }, + { + "Unit": "CPU-M-CF", + "EventCode": "170", + "EventName": "L1I_OFFCLUSTER_L3_SOURCED_WRITES_IV", + "BriefDescription": "L1I Off-Cluster L3 Sourced Writes with Intervention", + "PublicDescription": "A directory write to the Level-1 Instruction cache directory where the returned cache line was sourced from an Off-Cluster Level-3 cache with intervention" + }, + { + "Unit": "CPU-M-CF", + "EventCode": "171", + "EventName": "L1I_OFFDRAWER_L3_SOURCED_WRITES", + "BriefDescription": "L1I Off-Drawer L3 Sourced Writes", + "PublicDescription": "A directory write to the Level-1 Instruction cache directory where the returned cache line was sourced from an Off-Drawer Level-3 cache without intervention" + }, + { + "Unit": "CPU-M-CF", + "EventCode": "172", + "EventName": "L1I_OFFDRAWER_MEMORY_SOURCED_WRITES", + "BriefDescription": "L1I Off-Drawer Memory Sourced Writes", + "PublicDescription": "A directory write to the Level-1 Instruction cache directory where the returned cache line was sourced from Off-Drawer memory" + }, + { + "Unit": "CPU-M-CF", + "EventCode": "173", + "EventName": "L1I_OFFDRAWER_L3_SOURCED_WRITES_IV", + "BriefDescription": "L1I Off-Drawer L3 Sourced Writes with Intervention", + "PublicDescription": "A directory write to the Level-1 Instruction cache directory where the returned cache line was sourced from an Off-Drawer Level-3 cache with intervention" + }, + { + "Unit": "CPU-M-CF", + "EventCode": "174", + "EventName": "L1I_ONDRAWER_L4_SOURCED_WRITES", + "BriefDescription": "L1I On-Drawer L4 Sourced Writes", + "PublicDescription": "A directory write to the Level-1 Instruction cache directory where the returned cache line was sourced from On-Drawer Level-4 cache" + }, + { + "Unit": "CPU-M-CF", + "EventCode": "175", + "EventName": "L1I_OFFDRAWER_L4_SOURCED_WRITES", + "BriefDescription": "L1I Off-Drawer L4 Sourced Writes", + "PublicDescription": "A directory write to the Level-1 Instruction cache directory where the returned cache line was sourced from Off-Drawer Level-4 cache" + }, + { + "Unit": "CPU-M-CF", + "EventCode": "224", + "EventName": "BCD_DFP_EXECUTION_SLOTS", + "BriefDescription": "BCD DFP Execution Slots", + "PublicDescription": "Count of floating point execution slots used for finished Binary Coded Decimal to Decimal Floating Point conversions. Instructions: CDZT, CXZT, CZDT, CZXT" + }, + { + "Unit": "CPU-M-CF", + "EventCode": "225", + "EventName": "VX_BCD_EXECUTION_SLOTS", + "BriefDescription": "VX BCD Execution Slots", + "PublicDescription": "Count of floating point execution slots used for finished vector arithmetic Binary Coded Decimal instructions. Instructions: VAP, VSP, VMPVMSP, VDP, VSDP, VRP, VLIP, VSRP, VPSOPVCP, VTP, VPKZ, VUPKZ, VCVB, VCVBG, VCVDVCVDG" + }, + { + "Unit": "CPU-M-CF", + "EventCode": "226", + "EventName": "DECIMAL_INSTRUCTIONS", + "BriefDescription": "Decimal Instructions", + "PublicDescription": "Decimal instructions dispatched. Instructions: CVB, CVD, AP, CP, DP, ED, EDMK, MP, SRP, SP, ZAP" + }, + { + "Unit": "CPU-M-CF", + "EventCode": "232", + "EventName": "LAST_HOST_TRANSLATIONS", + "BriefDescription": "Last host translation done", + "PublicDescription": "Last Host Translation done" + }, + { + "Unit": "CPU-M-CF", + "EventCode": "243", + "EventName": "TX_NC_TABORT", + "BriefDescription": "Aborted transactions in non-constrained TX mode", + "PublicDescription": "A transaction abort has occurred in a non-constrained transactional-execution mode" + }, + { + "Unit": "CPU-M-CF", + "EventCode": "244", + "EventName": "TX_C_TABORT_NO_SPECIAL", + "BriefDescription": "Aborted transactions in constrained TX mode not using special completion logic", + "PublicDescription": "A transaction abort has occurred in a constrained transactional-execution mode and the CPU is not using any special logic to allow the transaction to complete" + }, + { + "Unit": "CPU-M-CF", + "EventCode": "245", + "EventName": "TX_C_TABORT_SPECIAL", + "BriefDescription": "Aborted transactions in constrained TX mode using special completion logic", + "PublicDescription": "A transaction abort has occurred in a constrained transactional-execution mode and the CPU is using special logic to allow the transaction to complete" + }, + { + "Unit": "CPU-M-CF", + "EventCode": "448", + "EventName": "MT_DIAG_CYCLES_ONE_THR_ACTIVE", + "BriefDescription": "Cycle count with one thread active", + "PublicDescription": "Cycle count with one thread active" + }, + { + "Unit": "CPU-M-CF", + "EventCode": "449", + "EventName": "MT_DIAG_CYCLES_TWO_THR_ACTIVE", + "BriefDescription": "Cycle count with two threads active", + "PublicDescription": "Cycle count with two threads active" + }, +] diff --git a/tools/perf/pmu-events/arch/s390/cf_z15/transaction.json b/tools/perf/pmu-events/arch/s390/cf_z15/transaction.json new file mode 100644 index 000000000000..1a0034f79f73 --- /dev/null +++ b/tools/perf/pmu-events/arch/s390/cf_z15/transaction.json @@ -0,0 +1,7 @@ +[ + { + "BriefDescription": "Transaction count", + "MetricName": "transaction", + "MetricExpr": "TX_C_TEND + TX_NC_TEND + TX_NC_TABORT + TX_C_TABORT_SPECIAL + TX_C_TABORT_NO_SPECIAL" + } +] diff --git a/tools/perf/pmu-events/arch/s390/mapfile.csv b/tools/perf/pmu-events/arch/s390/mapfile.csv index bd3fc577139c..61641a3480e0 100644 --- a/tools/perf/pmu-events/arch/s390/mapfile.csv +++ b/tools/perf/pmu-events/arch/s390/mapfile.csv @@ -4,4 +4,4 @@ Family-model,Version,Filename,EventType ^IBM.282[78].*[13]\.[1-5].[[:xdigit:]]+$,1,cf_zec12,core ^IBM.296[45].*[13]\.[1-5].[[:xdigit:]]+$,1,cf_z13,core ^IBM.390[67].*[13]\.[1-5].[[:xdigit:]]+$,3,cf_z14,core -^IBM.856[12].*3\.6.[[:xdigit:]]+$,3,cf_m8561,core +^IBM.856[12].*3\.6.[[:xdigit:]]+$,3,cf_z15,core -- cgit v1.2.3 From ee212d6ea20887c0ef352be8563ca13dbf965906 Mon Sep 17 00:00:00 2001 From: Steve MacLean Date: Sat, 28 Sep 2019 01:39:00 +0000 Subject: perf map: Fix overlapped map handling Whenever an mmap/mmap2 event occurs, the map tree must be updated to add a new entry. If a new map overlaps a previous map, the overlapped section of the previous map is effectively unmapped, but the non-overlapping sections are still valid. maps__fixup_overlappings() is responsible for creating any new map entries from the previously overlapped map. It optionally creates a before and an after map. When creating the after map the existing code failed to adjust the map.pgoff. This meant the new after map would incorrectly calculate the file offset for the ip. This results in incorrect symbol name resolution for any ip in the after region. Make maps__fixup_overlappings() correctly populate map.pgoff. Add an assert that new mapping matches old mapping at the beginning of the after map. Committer-testing: Validated correct parsing of libcoreclr.so symbols from .NET Core 3.0 preview9 (which didn't strip symbols). Preparation: ~/dotnet3.0-preview9/dotnet new webapi -o perfSymbol cd perfSymbol ~/dotnet3.0-preview9/dotnet publish perf record ~/dotnet3.0-preview9/dotnet \ bin/Debug/netcoreapp3.0/publish/perfSymbol.dll ^C Before: perf script --show-mmap-events 2>&1 | grep -e MMAP -e unknown |\ grep libcoreclr.so | head -n 4 dotnet 1907 373352.698780: PERF_RECORD_MMAP2 1907/1907: \ [0x7fe615726000(0x768000) @ 0 08:02 5510620 765057155]: \ r-xp .../3.0.0-preview9-19423-09/libcoreclr.so dotnet 1907 373352.701091: PERF_RECORD_MMAP2 1907/1907: \ [0x7fe615974000(0x1000) @ 0x24e000 08:02 5510620 765057155]: \ rwxp .../3.0.0-preview9-19423-09/libcoreclr.so dotnet 1907 373352.701241: PERF_RECORD_MMAP2 1907/1907: \ [0x7fe615c42000(0x1000) @ 0x51c000 08:02 5510620 765057155]: \ rwxp .../3.0.0-preview9-19423-09/libcoreclr.so dotnet 1907 373352.705249: 250000 cpu-clock: \ 7fe6159a1f99 [unknown] \ (.../3.0.0-preview9-19423-09/libcoreclr.so) After: perf script --show-mmap-events 2>&1 | grep -e MMAP -e unknown |\ grep libcoreclr.so | head -n 4 dotnet 1907 373352.698780: PERF_RECORD_MMAP2 1907/1907: \ [0x7fe615726000(0x768000) @ 0 08:02 5510620 765057155]: \ r-xp .../3.0.0-preview9-19423-09/libcoreclr.so dotnet 1907 373352.701091: PERF_RECORD_MMAP2 1907/1907: \ [0x7fe615974000(0x1000) @ 0x24e000 08:02 5510620 765057155]: \ rwxp .../3.0.0-preview9-19423-09/libcoreclr.so dotnet 1907 373352.701241: PERF_RECORD_MMAP2 1907/1907: \ [0x7fe615c42000(0x1000) @ 0x51c000 08:02 5510620 765057155]: \ rwxp .../3.0.0-preview9-19423-09/libcoreclr.so All the [unknown] symbols were resolved. Signed-off-by: Steve MacLean Tested-by: Brian Robbins Acked-by: Jiri Olsa Cc: Alexander Shishkin Cc: Andi Kleen Cc: Davidlohr Bueso Cc: Eric Saint-Etienne Cc: John Keeping Cc: John Salem Cc: Leo Yan Cc: Mark Rutland Cc: Namhyung Kim Cc: Peter Zijlstra Cc: Song Liu Cc: Stephane Eranian Cc: Tom McDonald Link: http://lore.kernel.org/lkml/BN8PR21MB136270949F22A6A02335C238F7800@BN8PR21MB1362.namprd21.prod.outlook.com Signed-off-by: Arnaldo Carvalho de Melo --- tools/perf/util/map.c | 3 +++ 1 file changed, 3 insertions(+) diff --git a/tools/perf/util/map.c b/tools/perf/util/map.c index 5b83ed1ebbd6..eec9b282c047 100644 --- a/tools/perf/util/map.c +++ b/tools/perf/util/map.c @@ -1,5 +1,6 @@ // SPDX-License-Identifier: GPL-2.0 #include "symbol.h" +#include #include #include #include @@ -850,6 +851,8 @@ static int maps__fixup_overlappings(struct maps *maps, struct map *map, FILE *fp } after->start = map->end; + after->pgoff += map->end - pos->start; + assert(pos->map_ip(pos, map->end) == after->map_ip(after, map->end)); __map_groups__insert(pos->groups, after); if (verbose >= 2 && !use_browser) map__fprintf(after, fp); -- cgit v1.2.3 From b59711e9b0d22fd47abfa00602fd8c365cdd3ab7 Mon Sep 17 00:00:00 2001 From: Steve MacLean Date: Sat, 28 Sep 2019 01:41:18 +0000 Subject: perf inject jit: Fix JIT_CODE_MOVE filename During perf inject --jit, JIT_CODE_MOVE records were injecting MMAP records with an incorrect filename. Specifically it was missing the ".so" suffix. Further the JIT_CODE_LOAD record were silently truncating the jr->load.code_index field to 32 bits before generating the filename. Make both records emit the same filename based on the full 64 bit code_index field. Fixes: 9b07e27f88b9 ("perf inject: Add jitdump mmap injection support") Cc: stable@vger.kernel.org # v4.6+ Signed-off-by: Steve MacLean Acked-by: Jiri Olsa Cc: Alexander Shishkin Cc: Andi Kleen Cc: Brian Robbins Cc: Davidlohr Bueso Cc: Eric Saint-Etienne Cc: John Keeping Cc: John Salem Cc: Leo Yan Cc: Mark Rutland Cc: Namhyung Kim Cc: Peter Zijlstra Cc: Song Liu Cc: Stephane Eranian Cc: Tom McDonald Link: http://lore.kernel.org/lkml/BN8PR21MB1362FF8F127B31DBF4121528F7800@BN8PR21MB1362.namprd21.prod.outlook.com Signed-off-by: Arnaldo Carvalho de Melo --- tools/perf/util/jitdump.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/tools/perf/util/jitdump.c b/tools/perf/util/jitdump.c index 1bdf4c6ea3e5..e3ccb0ce1938 100644 --- a/tools/perf/util/jitdump.c +++ b/tools/perf/util/jitdump.c @@ -395,7 +395,7 @@ static int jit_repipe_code_load(struct jit_buf_desc *jd, union jr_entry *jr) size_t size; u16 idr_size; const char *sym; - uint32_t count; + uint64_t count; int ret, csize, usize; pid_t pid, tid; struct { @@ -418,7 +418,7 @@ static int jit_repipe_code_load(struct jit_buf_desc *jd, union jr_entry *jr) return -1; filename = event->mmap2.filename; - size = snprintf(filename, PATH_MAX, "%s/jitted-%d-%u.so", + size = snprintf(filename, PATH_MAX, "%s/jitted-%d-%" PRIu64 ".so", jd->dir, pid, count); @@ -529,7 +529,7 @@ static int jit_repipe_code_move(struct jit_buf_desc *jd, union jr_entry *jr) return -1; filename = event->mmap2.filename; - size = snprintf(filename, PATH_MAX, "%s/jitted-%d-%"PRIu64, + size = snprintf(filename, PATH_MAX, "%s/jitted-%d-%" PRIu64 ".so", jd->dir, pid, jr->move.code_index); -- cgit v1.2.3 From 2657983b4c0d81632c6a73bae469951b0d341251 Mon Sep 17 00:00:00 2001 From: Steve MacLean Date: Sat, 28 Sep 2019 01:53:08 +0000 Subject: perf docs: Correct and clarify jitdump spec Specification claims latest version of jitdump file format is 2. Current jit dump reading code treats 1 as the latest version. Correct spec to match code. The original language made it unclear the value to be written in the magic field. Revise language that the writer always writes the same value. Specify that the reader uses the value to detect endian mismatches. Signed-off-by: Steve MacLean Acked-by: Stephane Eranian Cc: Alexander Shishkin Cc: Andi Kleen Cc: Brian Robbins Cc: Davidlohr Bueso Cc: Eric Saint-Etienne Cc: Jiri Olsa Cc: John Keeping Cc: John Salem Cc: Leo Yan Cc: Mark Rutland Cc: Namhyung Kim Cc: Peter Zijlstra Cc: Song Liu Cc: Tom McDonald Link: http://lore.kernel.org/lkml/BN8PR21MB1362F63CDE7AC69736FC7F9EF7800@BN8PR21MB1362.namprd21.prod.outlook.com Signed-off-by: Arnaldo Carvalho de Melo --- tools/perf/Documentation/jitdump-specification.txt | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/tools/perf/Documentation/jitdump-specification.txt b/tools/perf/Documentation/jitdump-specification.txt index 4c62b0713651..52152d156ad9 100644 --- a/tools/perf/Documentation/jitdump-specification.txt +++ b/tools/perf/Documentation/jitdump-specification.txt @@ -36,8 +36,8 @@ III/ Jitdump file header format Each jitdump file starts with a fixed size header containing the following fields in order: -* uint32_t magic : a magic number tagging the file type. The value is 4-byte long and represents the string "JiTD" in ASCII form. It is 0x4A695444 or 0x4454694a depending on the endianness. The field can be used to detect the endianness of the file -* uint32_t version : a 4-byte value representing the format version. It is currently set to 2 +* uint32_t magic : a magic number tagging the file type. The value is 4-byte long and represents the string "JiTD" in ASCII form. It written is as 0x4A695444. The reader will detect an endian mismatch when it reads 0x4454694a. +* uint32_t version : a 4-byte value representing the format version. It is currently set to 1 * uint32_t total_size: size in bytes of file header * uint32_t elf_mach : ELF architecture encoding (ELF e_machine value as specified in /usr/include/elf.h) * uint32_t pad1 : padding. Reserved for future use -- cgit v1.2.3 From e98df280bc2a499fd41d7f9e2d6733884de69902 Mon Sep 17 00:00:00 2001 From: Andi Kleen Date: Fri, 27 Sep 2019 16:35:44 -0700 Subject: perf script brstackinsn: Fix recovery from LBR/binary mismatch When the LBR data and the instructions in a binary do not match the loop printing instructions could get confused and print a long stream of bogus instructions. The problem was that if the instruction decoder cannot decode an instruction it ilen wasn't initialized, so the loop going through the basic block would continue with the previous value. Harden the code to avoid such problems: - Make sure ilen is always freshly initialized and is 0 for bad instructions. - Do not overrun the code buffer while printing instructions - Print a warning message if the final jump is not on an instruction boundary. Signed-off-by: Andi Kleen Cc: Jiri Olsa Link: http://lore.kernel.org/lkml/20190927233546.11533-1-andi@firstfloor.org Signed-off-by: Arnaldo Carvalho de Melo --- tools/perf/builtin-script.c | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/tools/perf/builtin-script.c b/tools/perf/builtin-script.c index 286fc70d7402..67be8d31afab 100644 --- a/tools/perf/builtin-script.c +++ b/tools/perf/builtin-script.c @@ -1063,7 +1063,7 @@ static int perf_sample__fprintf_brstackinsn(struct perf_sample *sample, continue; insn = 0; - for (off = 0;; off += ilen) { + for (off = 0; off < (unsigned)len; off += ilen) { uint64_t ip = start + off; printed += ip__fprintf_sym(ip, thread, x.cpumode, x.cpu, &lastsym, attr, fp); @@ -1074,6 +1074,7 @@ static int perf_sample__fprintf_brstackinsn(struct perf_sample *sample, printed += print_srccode(thread, x.cpumode, ip); break; } else { + ilen = 0; printed += fprintf(fp, "\t%016" PRIx64 "\t%s\n", ip, dump_insn(&x, ip, buffer + off, len - off, &ilen)); if (ilen == 0) @@ -1083,6 +1084,8 @@ static int perf_sample__fprintf_brstackinsn(struct perf_sample *sample, insn++; } } + if (off != (unsigned)len) + printed += fprintf(fp, "\tmismatch of LBR data and executable\n"); } /* @@ -1123,6 +1126,7 @@ static int perf_sample__fprintf_brstackinsn(struct perf_sample *sample, goto out; } for (off = 0; off <= end - start; off += ilen) { + ilen = 0; printed += fprintf(fp, "\t%016" PRIx64 "\t%s\n", start + off, dump_insn(&x, start + off, buffer + off, len - off, &ilen)); if (ilen == 0) -- cgit v1.2.3 From 6bdfd9f118bd59cf0f85d3bf4b72b586adea17c1 Mon Sep 17 00:00:00 2001 From: Andi Kleen Date: Fri, 27 Sep 2019 16:35:45 -0700 Subject: perf jevents: Fix period for Intel fixed counters The Intel fixed counters use a special table to override the JSON information. During this override the period information from the JSON file got dropped, which results in inst_retired.any and similar running with frequency mode instead of a period. Just specify the expected period in the table. Signed-off-by: Andi Kleen Cc: Jiri Olsa Link: http://lore.kernel.org/lkml/20190927233546.11533-2-andi@firstfloor.org Signed-off-by: Arnaldo Carvalho de Melo --- tools/perf/pmu-events/jevents.c | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/tools/perf/pmu-events/jevents.c b/tools/perf/pmu-events/jevents.c index 9e37287da924..e2837260ca4d 100644 --- a/tools/perf/pmu-events/jevents.c +++ b/tools/perf/pmu-events/jevents.c @@ -450,12 +450,12 @@ static struct fixed { const char *name; const char *event; } fixed[] = { - { "inst_retired.any", "event=0xc0" }, - { "inst_retired.any_p", "event=0xc0" }, - { "cpu_clk_unhalted.ref", "event=0x0,umask=0x03" }, - { "cpu_clk_unhalted.thread", "event=0x3c" }, - { "cpu_clk_unhalted.core", "event=0x3c" }, - { "cpu_clk_unhalted.thread_any", "event=0x3c,any=1" }, + { "inst_retired.any", "event=0xc0,period=2000003" }, + { "inst_retired.any_p", "event=0xc0,period=2000003" }, + { "cpu_clk_unhalted.ref", "event=0x0,umask=0x03,period=2000003" }, + { "cpu_clk_unhalted.thread", "event=0x3c,period=2000003" }, + { "cpu_clk_unhalted.core", "event=0x3c,period=2000003" }, + { "cpu_clk_unhalted.thread_any", "event=0x3c,any=1,period=2000003" }, { NULL, NULL}, }; -- cgit v1.2.3 From f67001a4a08eb124197ed4376941e1da9cf94b42 Mon Sep 17 00:00:00 2001 From: Arnaldo Carvalho de Melo Date: Mon, 30 Sep 2019 10:55:34 -0300 Subject: perf tools: Propagate get_cpuid() error For consistency, propagate the exact cause for get_cpuid() to have failed. Cc: Adrian Hunter Cc: Jiri Olsa Cc: Namhyung Kim Link: https://lkml.kernel.org/n/tip-9ig269f7ktnhh99g4l15vpu2@git.kernel.org Signed-off-by: Arnaldo Carvalho de Melo --- tools/perf/arch/powerpc/util/header.c | 3 ++- tools/perf/arch/s390/util/header.c | 9 +++++---- tools/perf/arch/x86/util/header.c | 3 ++- tools/perf/builtin-kvm.c | 7 ++++--- 4 files changed, 13 insertions(+), 9 deletions(-) diff --git a/tools/perf/arch/powerpc/util/header.c b/tools/perf/arch/powerpc/util/header.c index b6b7bc7e31a1..3b4cdfc5efd6 100644 --- a/tools/perf/arch/powerpc/util/header.c +++ b/tools/perf/arch/powerpc/util/header.c @@ -1,5 +1,6 @@ // SPDX-License-Identifier: GPL-2.0 #include +#include #include #include #include @@ -30,7 +31,7 @@ get_cpuid(char *buffer, size_t sz) buffer[nb-1] = '\0'; return 0; } - return -1; + return ENOBUFS; } char * diff --git a/tools/perf/arch/s390/util/header.c b/tools/perf/arch/s390/util/header.c index 8b0b018d896a..7933f6871c81 100644 --- a/tools/perf/arch/s390/util/header.c +++ b/tools/perf/arch/s390/util/header.c @@ -8,6 +8,7 @@ */ #include +#include #include #include #include @@ -54,7 +55,7 @@ int get_cpuid(char *buffer, size_t sz) sysinfo = fopen(SYSINFO, "r"); if (sysinfo == NULL) - return -1; + return errno; while ((read = getline(&line, &line_sz, sysinfo)) != -1) { if (!strncmp(line, SYSINFO_MANU, strlen(SYSINFO_MANU))) { @@ -89,7 +90,7 @@ int get_cpuid(char *buffer, size_t sz) /* Missing manufacturer, type or model information should not happen */ if (!manufacturer[0] || !type[0] || !model[0]) - return -1; + return EINVAL; /* * Scan /proc/service_levels and return the CPU-MF counter facility @@ -133,14 +134,14 @@ skip_sysinfo: else nbytes = snprintf(buffer, sz, "%s,%s,%s", manufacturer, type, model); - return (nbytes >= sz) ? -1 : 0; + return (nbytes >= sz) ? ENOBUFS : 0; } char *get_cpuid_str(struct perf_pmu *pmu __maybe_unused) { char *buf = malloc(128); - if (buf && get_cpuid(buf, 128) < 0) + if (buf && get_cpuid(buf, 128)) zfree(&buf); return buf; } diff --git a/tools/perf/arch/x86/util/header.c b/tools/perf/arch/x86/util/header.c index 662ecf84a421..aa6deb463bf3 100644 --- a/tools/perf/arch/x86/util/header.c +++ b/tools/perf/arch/x86/util/header.c @@ -1,5 +1,6 @@ // SPDX-License-Identifier: GPL-2.0 #include +#include #include #include #include @@ -58,7 +59,7 @@ __get_cpuid(char *buffer, size_t sz, const char *fmt) buffer[nb-1] = '\0'; return 0; } - return -1; + return ENOBUFS; } int diff --git a/tools/perf/builtin-kvm.c b/tools/perf/builtin-kvm.c index 2227e2f42c09..58a9e0989491 100644 --- a/tools/perf/builtin-kvm.c +++ b/tools/perf/builtin-kvm.c @@ -705,14 +705,15 @@ static int process_sample_event(struct perf_tool *tool, static int cpu_isa_config(struct perf_kvm_stat *kvm) { - char buf[64], *cpuid; + char buf[128], *cpuid; int err; if (kvm->live) { err = get_cpuid(buf, sizeof(buf)); if (err != 0) { - pr_err("Failed to look up CPU type\n"); - return err; + pr_err("Failed to look up CPU type: %s\n", + str_error_r(err, buf, sizeof(buf))); + return -err; } cpuid = buf; } else -- cgit v1.2.3 From 9db0e3635fb35b6695275774ab909c51221b66ad Mon Sep 17 00:00:00 2001 From: Arnaldo Carvalho de Melo Date: Mon, 30 Sep 2019 11:48:32 -0300 Subject: perf evsel: Fall back to global 'perf_env' in perf_evsel__env() I.e. if evsel->evlist or evsel->evlist->env isn't set, return the environment for the running machine, as that would be set if reading from a perf.data file. Cc: Adrian Hunter Cc: Jiri Olsa Cc: Namhyung Kim Link: https://lkml.kernel.org/n/tip-uqq4grmhbi12rwb0lfpo6lfu@git.kernel.org Signed-off-by: Arnaldo Carvalho de Melo --- tools/perf/util/evsel.c | 3 ++- tools/perf/util/python.c | 6 ++++++ 2 files changed, 8 insertions(+), 1 deletion(-) diff --git a/tools/perf/util/evsel.c b/tools/perf/util/evsel.c index 5591af81a070..abc7fda4a0fe 100644 --- a/tools/perf/util/evsel.c +++ b/tools/perf/util/evsel.c @@ -30,6 +30,7 @@ #include "counts.h" #include "event.h" #include "evsel.h" +#include "util/env.h" #include "util/evsel_config.h" #include "util/evsel_fprintf.h" #include "evlist.h" @@ -2512,7 +2513,7 @@ struct perf_env *perf_evsel__env(struct evsel *evsel) { if (evsel && evsel->evlist) return evsel->evlist->env; - return NULL; + return &perf_env; } static int store_evsel_ids(struct evsel *evsel, struct evlist *evlist) diff --git a/tools/perf/util/python.c b/tools/perf/util/python.c index 53f31053a27a..02460362256d 100644 --- a/tools/perf/util/python.c +++ b/tools/perf/util/python.c @@ -14,6 +14,7 @@ #include "thread_map.h" #include "trace-event.h" #include "mmap.h" +#include "util/env.h" #include #include "../perf-sys.h" @@ -53,6 +54,11 @@ int parse_callchain_record(const char *arg __maybe_unused, return 0; } +/* + * Add this one here not to drag util/env.c + */ +struct perf_env perf_env; + /* * Support debug printing even though util/debug.c is not linked. That means * implementing 'verbose' and 'eprintf'. -- cgit v1.2.3 From a66fa0619a0ae3585ef09e9c33ecfb5c7c6cb72b Mon Sep 17 00:00:00 2001 From: Arnaldo Carvalho de Melo Date: Mon, 30 Sep 2019 15:06:01 -0300 Subject: perf annotate: Propagate perf_env__arch() error The callers of symbol__annotate2() use symbol__strerror_disassemble() to convert its failure returns into a human readable string, so propagate error values from functions it calls, starting with perf_env__arch() that when fails the right thing to do is to look at 'errno' to see why its possible call to uname() failed. Reported-by: Russell King - ARM Linux admin Cc: Adrian Hunter Cc: Alexander Shishkin Cc: Jiri Olsa Cc: Namhyung Kim Cc: Peter Zijlstra , Cc: Will Deacon Link: https://lkml.kernel.org/n/tip-it5d83kyusfhb1q1b0l4pxzs@git.kernel.org Signed-off-by: Arnaldo Carvalho de Melo --- tools/perf/util/annotate.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tools/perf/util/annotate.c b/tools/perf/util/annotate.c index e830eadfca2a..9b7b9176e713 100644 --- a/tools/perf/util/annotate.c +++ b/tools/perf/util/annotate.c @@ -2071,7 +2071,7 @@ int symbol__annotate(struct symbol *sym, struct map *map, int err; if (!arch_name) - return -1; + return errno; args.arch = arch = arch__find(arch_name); if (arch == NULL) -- cgit v1.2.3 From 28f4417c3333940b242af03d90214f713bbef232 Mon Sep 17 00:00:00 2001 From: Arnaldo Carvalho de Melo Date: Mon, 30 Sep 2019 15:11:47 -0300 Subject: perf annotate: Fix the signedness of failure returns Callers of symbol__annotate() expect a errno value or some other extended error value range in symbol__strerror_disassemble() to convert to a proper error string, fix it when propagating a failure to find the arch specific annotation routines via arch__find(arch_name). Reported-by: Russell King - ARM Linux admin Cc: Adrian Hunter Cc: Alexander Shishkin Cc: Jiri Olsa Cc: Namhyung Kim Cc: Peter Zijlstra , Cc: Will Deacon Link: https://lkml.kernel.org/n/tip-o0k6dw7cas0vvmjjvgsyvu1i@git.kernel.org Signed-off-by: Arnaldo Carvalho de Melo --- tools/perf/util/annotate.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tools/perf/util/annotate.c b/tools/perf/util/annotate.c index 9b7b9176e713..95109acf4808 100644 --- a/tools/perf/util/annotate.c +++ b/tools/perf/util/annotate.c @@ -2075,7 +2075,7 @@ int symbol__annotate(struct symbol *sym, struct map *map, args.arch = arch = arch__find(arch_name); if (arch == NULL) - return -ENOTSUP; + return ENOTSUP; if (parch) *parch = arch; -- cgit v1.2.3 From 211f493b611eef012841f795166c38ec7528738d Mon Sep 17 00:00:00 2001 From: Arnaldo Carvalho de Melo Date: Mon, 30 Sep 2019 15:44:13 -0300 Subject: perf annotate: Propagate the symbol__annotate() error return We were just returning -1 in symbol__annotate() when symbol__annotate() failed, propagate its error as it is used later to pass to symbol__strerror_disassemble() to present a error message to the user, that in some cases were getting: "Invalid -1 error code" Fix it to propagate the error. Reported-by: Russell King - ARM Linux admin Cc: Adrian Hunter Cc: Alexander Shishkin Cc: Jiri Olsa Cc: Namhyung Kim Cc: Peter Zijlstra , Cc: Will Deacon Link: https://lkml.kernel.org/n/tip-0tj89rs9g7nbcyd5skadlvuu@git.kernel.org Signed-off-by: Arnaldo Carvalho de Melo --- tools/perf/util/annotate.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tools/perf/util/annotate.c b/tools/perf/util/annotate.c index 95109acf4808..1de1a7091c48 100644 --- a/tools/perf/util/annotate.c +++ b/tools/perf/util/annotate.c @@ -2997,7 +2997,7 @@ int symbol__annotate2(struct symbol *sym, struct map *map, struct evsel *evsel, out_free_offsets: zfree(¬es->offsets); - return -1; + return err; } #define ANNOTATION__CFG(n) \ -- cgit v1.2.3 From 42d7a9107d83223a5fcecc6732d626a6c074cbc2 Mon Sep 17 00:00:00 2001 From: Arnaldo Carvalho de Melo Date: Mon, 30 Sep 2019 15:48:12 -0300 Subject: perf annotate: Fix arch specific ->init() failure errors They are called from symbol__annotate() and to propagate errors that can help understand the problem make them return what symbol__strerror_disassemble() known, i.e. errno codes and other annotation specific errors in a special, out of errnos, range. Reported-by: Russell King - ARM Linux admin Cc: Adrian Hunter Cc: Alexander Shishkin Cc: Jiri Olsa Cc: Namhyung Kim Cc: Peter Zijlstra , Cc: Will Deacon Link: https://lkml.kernel.org/n/tip-pqx7srcv7tixgid251aeboj6@git.kernel.org Signed-off-by: Arnaldo Carvalho de Melo --- tools/perf/arch/arm/annotate/instructions.c | 4 ++-- tools/perf/arch/arm64/annotate/instructions.c | 4 ++-- tools/perf/arch/s390/annotate/instructions.c | 6 ++++-- tools/perf/arch/x86/annotate/instructions.c | 6 ++++-- tools/perf/util/annotate.c | 6 ++++++ tools/perf/util/annotate.h | 2 ++ 6 files changed, 20 insertions(+), 8 deletions(-) diff --git a/tools/perf/arch/arm/annotate/instructions.c b/tools/perf/arch/arm/annotate/instructions.c index e1d4b484cc4b..2ff6cedeb9c5 100644 --- a/tools/perf/arch/arm/annotate/instructions.c +++ b/tools/perf/arch/arm/annotate/instructions.c @@ -37,7 +37,7 @@ static int arm__annotate_init(struct arch *arch, char *cpuid __maybe_unused) arm = zalloc(sizeof(*arm)); if (!arm) - return -1; + return ENOMEM; #define ARM_CONDS "(cc|cs|eq|ge|gt|hi|le|ls|lt|mi|ne|pl|vc|vs)" err = regcomp(&arm->call_insn, "^blx?" ARM_CONDS "?$", REG_EXTENDED); @@ -59,5 +59,5 @@ out_free_call: regfree(&arm->call_insn); out_free_arm: free(arm); - return -1; + return SYMBOL_ANNOTATE_ERRNO__ARCH_INIT_REGEXP; } diff --git a/tools/perf/arch/arm64/annotate/instructions.c b/tools/perf/arch/arm64/annotate/instructions.c index 43aa93ed8414..037e292ecd8e 100644 --- a/tools/perf/arch/arm64/annotate/instructions.c +++ b/tools/perf/arch/arm64/annotate/instructions.c @@ -95,7 +95,7 @@ static int arm64__annotate_init(struct arch *arch, char *cpuid __maybe_unused) arm = zalloc(sizeof(*arm)); if (!arm) - return -1; + return ENOMEM; /* bl, blr */ err = regcomp(&arm->call_insn, "^blr?$", REG_EXTENDED); @@ -118,5 +118,5 @@ out_free_call: regfree(&arm->call_insn); out_free_arm: free(arm); - return -1; + return SYMBOL_ANNOTATE_ERRNO__ARCH_INIT_REGEXP; } diff --git a/tools/perf/arch/s390/annotate/instructions.c b/tools/perf/arch/s390/annotate/instructions.c index 89bb8f2c54ce..a50e70baf918 100644 --- a/tools/perf/arch/s390/annotate/instructions.c +++ b/tools/perf/arch/s390/annotate/instructions.c @@ -164,8 +164,10 @@ static int s390__annotate_init(struct arch *arch, char *cpuid __maybe_unused) if (!arch->initialized) { arch->initialized = true; arch->associate_instruction_ops = s390__associate_ins_ops; - if (cpuid) - err = s390__cpuid_parse(arch, cpuid); + if (cpuid) { + if (s390__cpuid_parse(arch, cpuid)) + err = SYMBOL_ANNOTATE_ERRNO__ARCH_INIT_CPUID_PARSING; + } } return err; diff --git a/tools/perf/arch/x86/annotate/instructions.c b/tools/perf/arch/x86/annotate/instructions.c index 44f5aba78210..7eb5621c021d 100644 --- a/tools/perf/arch/x86/annotate/instructions.c +++ b/tools/perf/arch/x86/annotate/instructions.c @@ -196,8 +196,10 @@ static int x86__annotate_init(struct arch *arch, char *cpuid) if (arch->initialized) return 0; - if (cpuid) - err = x86__cpuid_parse(arch, cpuid); + if (cpuid) { + if (x86__cpuid_parse(arch, cpuid)) + err = SYMBOL_ANNOTATE_ERRNO__ARCH_INIT_CPUID_PARSING; + } arch->initialized = true; return err; diff --git a/tools/perf/util/annotate.c b/tools/perf/util/annotate.c index 1de1a7091c48..dc15352924f9 100644 --- a/tools/perf/util/annotate.c +++ b/tools/perf/util/annotate.c @@ -1631,6 +1631,12 @@ int symbol__strerror_disassemble(struct symbol *sym __maybe_unused, struct map * case SYMBOL_ANNOTATE_ERRNO__NO_LIBOPCODES_FOR_BPF: scnprintf(buf, buflen, "Please link with binutils's libopcode to enable BPF annotation"); break; + case SYMBOL_ANNOTATE_ERRNO__ARCH_INIT_REGEXP: + scnprintf(buf, buflen, "Problems with arch specific instruction name regular expressions."); + break; + case SYMBOL_ANNOTATE_ERRNO__ARCH_INIT_CPUID_PARSING: + scnprintf(buf, buflen, "Problems while parsing the CPUID in the arch specific initialization."); + break; default: scnprintf(buf, buflen, "Internal error: Invalid %d error code\n", errnum); break; diff --git a/tools/perf/util/annotate.h b/tools/perf/util/annotate.h index d94be9140e31..116e21f49da6 100644 --- a/tools/perf/util/annotate.h +++ b/tools/perf/util/annotate.h @@ -370,6 +370,8 @@ enum symbol_disassemble_errno { SYMBOL_ANNOTATE_ERRNO__NO_VMLINUX = __SYMBOL_ANNOTATE_ERRNO__START, SYMBOL_ANNOTATE_ERRNO__NO_LIBOPCODES_FOR_BPF, + SYMBOL_ANNOTATE_ERRNO__ARCH_INIT_CPUID_PARSING, + SYMBOL_ANNOTATE_ERRNO__ARCH_INIT_REGEXP, __SYMBOL_ANNOTATE_ERRNO__END, }; -- cgit v1.2.3 From 16ed3c1e91159e28b02f11f71ff4ce4cbc6f99e4 Mon Sep 17 00:00:00 2001 From: Arnaldo Carvalho de Melo Date: Mon, 30 Sep 2019 15:53:33 -0300 Subject: perf annotate: Return appropriate error code for allocation failures We should return errno or the annotation extra range understood by symbol__strerror_disassemble() instead of -1, fix it, returning ENOMEM instead. Reported-by: Russell King - ARM Linux admin Cc: Adrian Hunter Cc: Alexander Shishkin Cc: Jiri Olsa Cc: Namhyung Kim Cc: Peter Zijlstra , Cc: Will Deacon Link: https://lkml.kernel.org/n/tip-8of1cmj3rz0mppfcshc9bbqq@git.kernel.org Signed-off-by: Arnaldo Carvalho de Melo --- tools/perf/util/annotate.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/tools/perf/util/annotate.c b/tools/perf/util/annotate.c index dc15352924f9..b49ecdd51188 100644 --- a/tools/perf/util/annotate.c +++ b/tools/perf/util/annotate.c @@ -1668,7 +1668,7 @@ static int dso__disassemble_filename(struct dso *dso, char *filename, size_t fil build_id_path = strdup(filename); if (!build_id_path) - return -1; + return ENOMEM; /* * old style build-id cache has name of XX/XXXXXXX.. while @@ -2977,7 +2977,7 @@ int symbol__annotate2(struct symbol *sym, struct map *map, struct evsel *evsel, notes->offsets = zalloc(size * sizeof(struct annotation_line *)); if (notes->offsets == NULL) - return -1; + return ENOMEM; if (perf_evsel__is_group_event(evsel)) nr_pcnt = evsel->core.nr_members; -- cgit v1.2.3 From 11aad897f6d1a28eae3b7e5b293647c522d65819 Mon Sep 17 00:00:00 2001 From: Arnaldo Carvalho de Melo Date: Mon, 30 Sep 2019 16:04:21 -0300 Subject: perf annotate: Don't return -1 for error when doing BPF disassembly Return errno when open_memstream() fails and add two new speciall error codes for when an invalid, non BPF file or one without BTF is passed to symbol__disassemble_bpf(), so that its callers can rely on symbol__strerror_disassemble() to convert that to a human readable error message that can help figure out what is wrong, with hints even. Cc: Russell King - ARM Linux admin Cc: Song Liu Cc: Alexei Starovoitov Cc: Daniel Borkmann Cc: Adrian Hunter Cc: Alexander Shishkin Cc: Jiri Olsa Cc: Namhyung Kim Cc: Peter Zijlstra , Cc: Will Deacon Link: https://lkml.kernel.org/n/tip-usevw9r2gcipfcrbpaueurw0@git.kernel.org Signed-off-by: Arnaldo Carvalho de Melo --- tools/perf/util/annotate.c | 19 +++++++++++++++---- tools/perf/util/annotate.h | 2 ++ 2 files changed, 17 insertions(+), 4 deletions(-) diff --git a/tools/perf/util/annotate.c b/tools/perf/util/annotate.c index b49ecdd51188..4036c7f7b0fb 100644 --- a/tools/perf/util/annotate.c +++ b/tools/perf/util/annotate.c @@ -1637,6 +1637,13 @@ int symbol__strerror_disassemble(struct symbol *sym __maybe_unused, struct map * case SYMBOL_ANNOTATE_ERRNO__ARCH_INIT_CPUID_PARSING: scnprintf(buf, buflen, "Problems while parsing the CPUID in the arch specific initialization."); break; + case SYMBOL_ANNOTATE_ERRNO__BPF_INVALID_FILE: + scnprintf(buf, buflen, "Invalid BPF file: %s.", dso->long_name); + break; + case SYMBOL_ANNOTATE_ERRNO__BPF_MISSING_BTF: + scnprintf(buf, buflen, "The %s BPF file has no BTF section, compile with -g or use pahole -J.", + dso->long_name); + break; default: scnprintf(buf, buflen, "Internal error: Invalid %d error code\n", errnum); break; @@ -1719,13 +1726,13 @@ static int symbol__disassemble_bpf(struct symbol *sym, char tpath[PATH_MAX]; size_t buf_size; int nr_skip = 0; - int ret = -1; char *buf; bfd *bfdf; + int ret; FILE *s; if (dso->binary_type != DSO_BINARY_TYPE__BPF_PROG_INFO) - return -1; + return SYMBOL_ANNOTATE_ERRNO__BPF_INVALID_FILE; pr_debug("%s: handling sym %s addr %" PRIx64 " len %" PRIx64 "\n", __func__, sym->name, sym->start, sym->end - sym->start); @@ -1738,8 +1745,10 @@ static int symbol__disassemble_bpf(struct symbol *sym, assert(bfd_check_format(bfdf, bfd_object)); s = open_memstream(&buf, &buf_size); - if (!s) + if (!s) { + ret = errno; goto out; + } init_disassemble_info(&info, s, (fprintf_ftype) fprintf); @@ -1748,8 +1757,10 @@ static int symbol__disassemble_bpf(struct symbol *sym, info_node = perf_env__find_bpf_prog_info(dso->bpf_prog.env, dso->bpf_prog.id); - if (!info_node) + if (!info_node) { + return SYMBOL_ANNOTATE_ERRNO__BPF_MISSING_BTF; goto out; + } info_linear = info_node->info_linear; sub_id = dso->bpf_prog.sub_id; diff --git a/tools/perf/util/annotate.h b/tools/perf/util/annotate.h index 116e21f49da6..d76fd0e81f46 100644 --- a/tools/perf/util/annotate.h +++ b/tools/perf/util/annotate.h @@ -372,6 +372,8 @@ enum symbol_disassemble_errno { SYMBOL_ANNOTATE_ERRNO__NO_LIBOPCODES_FOR_BPF, SYMBOL_ANNOTATE_ERRNO__ARCH_INIT_CPUID_PARSING, SYMBOL_ANNOTATE_ERRNO__ARCH_INIT_REGEXP, + SYMBOL_ANNOTATE_ERRNO__BPF_INVALID_FILE, + SYMBOL_ANNOTATE_ERRNO__BPF_MISSING_BTF, __SYMBOL_ANNOTATE_ERRNO__END, }; -- cgit v1.2.3 From 61129dd29f7962f278b618a2a3e8fdb986a66dc8 Mon Sep 17 00:00:00 2001 From: Seth Forshee Date: Tue, 17 Sep 2019 09:18:53 +0200 Subject: sched: Add __ASSEMBLY__ guards around struct clone_args The addition of struct clone_args to uapi/linux/sched.h is not protected by __ASSEMBLY__ guards, causing a failure to build from source for glibc on RISC-V. Add the guards to fix this. Fixes: 7f192e3cd316 ("fork: add clone3") Signed-off-by: Seth Forshee Cc: Acked-by: Ingo Molnar Link: https://lore.kernel.org/r/20190917071853.12385-1-seth.forshee@canonical.com Signed-off-by: Christian Brauner --- include/uapi/linux/sched.h | 2 ++ 1 file changed, 2 insertions(+) diff --git a/include/uapi/linux/sched.h b/include/uapi/linux/sched.h index b3105ac1381a..851ff1feadd5 100644 --- a/include/uapi/linux/sched.h +++ b/include/uapi/linux/sched.h @@ -33,6 +33,7 @@ #define CLONE_NEWNET 0x40000000 /* New network namespace */ #define CLONE_IO 0x80000000 /* Clone io context */ +#ifndef __ASSEMBLY__ /* * Arguments for the clone3 syscall */ @@ -46,6 +47,7 @@ struct clone_args { __aligned_u64 stack_size; __aligned_u64 tls; }; +#endif /* * Scheduling policies -- cgit v1.2.3 From 3969e76909d3aa06715997896184ee684f68d164 Mon Sep 17 00:00:00 2001 From: Shuah Khan Date: Tue, 24 Sep 2019 13:52:37 -0600 Subject: selftests: pidfd: Fix undefined reference to pthread_create() Fix build failure: undefined reference to `pthread_create' collect2: error: ld returned 1 exit status Fix CFLAGS to include pthread correctly. Fixes: 740378dc7834 ("pidfd: add polling selftests") Signed-off-by: Shuah Khan Reviewed-by: Christian Brauner Cc: Link: https://lore.kernel.org/r/20190924195237.30519-1-skhan@linuxfoundation.org Signed-off-by: Christian Brauner --- tools/testing/selftests/pidfd/Makefile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tools/testing/selftests/pidfd/Makefile b/tools/testing/selftests/pidfd/Makefile index 464c9b76148f..7550f08822a3 100644 --- a/tools/testing/selftests/pidfd/Makefile +++ b/tools/testing/selftests/pidfd/Makefile @@ -1,5 +1,5 @@ # SPDX-License-Identifier: GPL-2.0-only -CFLAGS += -g -I../../../../usr/include/ -lpthread +CFLAGS += -g -I../../../../usr/include/ -pthread TEST_GEN_PROGS := pidfd_test pidfd_open_test pidfd_poll_test pidfd_wait -- cgit v1.2.3 From 517d6b9c6f71bee15c729155445c42cc4ef77dda Mon Sep 17 00:00:00 2001 From: Wei Yongjun Date: Wed, 18 Sep 2019 08:30:33 +0000 Subject: erofs: fix return value check in erofs_read_superblock() In case of error, the function read_mapping_page() returns ERR_PTR() not NULL. The NULL test in the return value check should be replaced with IS_ERR(). Fixes: fe7c2423570d ("erofs: use read_mapping_page instead of sb_bread") Reviewed-by: Gao Xiang Reviewed-by: Chao Yu Signed-off-by: Wei Yongjun Link: https://lore.kernel.org/r/20190918083033.47780-1-weiyongjun1@huawei.com Signed-off-by: Gao Xiang --- fs/erofs/super.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/fs/erofs/super.c b/fs/erofs/super.c index caf9a95173b0..0e369494f2f2 100644 --- a/fs/erofs/super.c +++ b/fs/erofs/super.c @@ -105,9 +105,9 @@ static int erofs_read_superblock(struct super_block *sb) int ret; page = read_mapping_page(sb->s_bdev->bd_inode->i_mapping, 0, NULL); - if (!page) { + if (IS_ERR(page)) { erofs_err(sb, "cannot read erofs superblock"); - return -EIO; + return PTR_ERR(page); } sbi = EROFS_SB(sb); -- cgit v1.2.3 From 6927cc05c2b067d4ca8ab6aeaa3b4544bea26ef8 Mon Sep 17 00:00:00 2001 From: Gao Xiang Date: Thu, 19 Sep 2019 14:28:38 +0800 Subject: MAINTAINERS: erofs: complete sub-entries for erofs Add a formal git tree and missing files for erofs after moving out of staging for everyone to blame in order for better improvement. Acked-by: Chao Yu Link: https://lore.kernel.org/r/20190919062838.106423-1-gaoxiang25@huawei.com Signed-off-by: Gao Xiang --- MAINTAINERS | 3 +++ 1 file changed, 3 insertions(+) diff --git a/MAINTAINERS b/MAINTAINERS index 296de2b51c83..30a5b4028d2f 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -6112,7 +6112,10 @@ M: Gao Xiang M: Chao Yu L: linux-erofs@lists.ozlabs.org S: Maintained +T: git git://git.kernel.org/pub/scm/linux/kernel/git/xiang/erofs.git +F: Documentation/filesystems/erofs.txt F: fs/erofs/ +F: include/trace/events/erofs.h ERRSEQ ERROR TRACKING INFRASTRUCTURE M: Jeff Layton -- cgit v1.2.3 From 55252ab72b774119afedfdc6d1f142ffa2a9b818 Mon Sep 17 00:00:00 2001 From: Gao Xiang Date: Sun, 22 Sep 2019 02:43:55 +0800 Subject: erofs: fix erofs_get_meta_page locking due to a cleanup After doing more drop_caches stress test on our products, I found the mistake introduced by a very recent cleanup [1]. The current rule is that "erofs_get_meta_page" should be returned with page locked (although it's mostly unnecessary for read-only fs after pages are PG_uptodate), but a fix should be done for this. [1] https://lore.kernel.org/r/20190904020912.63925-26-gaoxiang25@huawei.com Fixes: 618f40ea026b ("erofs: use read_cache_page_gfp for erofs_get_meta_page") Reviewed-by: Chao Yu Link: https://lore.kernel.org/r/20190921184355.149928-1-gaoxiang25@huawei.com Signed-off-by: Gao Xiang --- fs/erofs/data.c | 10 +++++++--- 1 file changed, 7 insertions(+), 3 deletions(-) diff --git a/fs/erofs/data.c b/fs/erofs/data.c index 8a9fcbd0e8ac..fc3a8d8064f8 100644 --- a/fs/erofs/data.c +++ b/fs/erofs/data.c @@ -34,11 +34,15 @@ static void erofs_readendio(struct bio *bio) struct page *erofs_get_meta_page(struct super_block *sb, erofs_blk_t blkaddr) { - struct inode *const bd_inode = sb->s_bdev->bd_inode; - struct address_space *const mapping = bd_inode->i_mapping; + struct address_space *const mapping = sb->s_bdev->bd_inode->i_mapping; + struct page *page; - return read_cache_page_gfp(mapping, blkaddr, + page = read_cache_page_gfp(mapping, blkaddr, mapping_gfp_constraint(mapping, ~__GFP_FS)); + /* should already be PageUptodate */ + if (!IS_ERR(page)) + lock_page(page); + return page; } static int erofs_map_blocks_flatmode(struct inode *inode, -- cgit v1.2.3 From dc76ea8c1087b5c44235566ed4be2202d21a8504 Mon Sep 17 00:00:00 2001 From: Gao Xiang Date: Sun, 22 Sep 2019 18:04:34 +0800 Subject: erofs: fix mis-inplace determination related with noio chain Fix a recent cleanup patch. noio (bypass) chain is handled asynchronously against submit chain, therefore inplace I/O or pagevec cannot be applied to such pages. Add detailed comment for this as well. Fixes: 97e86a858bc3 ("staging: erofs: tidy up decompression frontend") Reviewed-by: Chao Yu Link: https://lore.kernel.org/r/20190922100434.229340-1-gaoxiang25@huawei.com Signed-off-by: Gao Xiang --- fs/erofs/zdata.c | 12 ++++++++++-- 1 file changed, 10 insertions(+), 2 deletions(-) diff --git a/fs/erofs/zdata.c b/fs/erofs/zdata.c index 96e34c90f814..fad80c97d247 100644 --- a/fs/erofs/zdata.c +++ b/fs/erofs/zdata.c @@ -575,7 +575,7 @@ static int z_erofs_do_read_page(struct z_erofs_decompress_frontend *fe, struct erofs_map_blocks *const map = &fe->map; struct z_erofs_collector *const clt = &fe->clt; const loff_t offset = page_offset(page); - bool tight = (clt->mode >= COLLECT_PRIMARY_HOOKED); + bool tight = true; enum z_erofs_cache_alloctype cache_strategy; enum z_erofs_page_type page_type; @@ -628,8 +628,16 @@ restart_now: preload_compressed_pages(clt, MNGD_MAPPING(sbi), cache_strategy, pagepool); - tight &= (clt->mode >= COLLECT_PRIMARY_HOOKED); hitted: + /* + * Ensure the current partial page belongs to this submit chain rather + * than other concurrent submit chains or the noio(bypass) chain since + * those chains are handled asynchronously thus the page cannot be used + * for inplace I/O or pagevec (should be processed in strict order.) + */ + tight &= (clt->mode >= COLLECT_PRIMARY_HOOKED && + clt->mode != COLLECT_PRIMARY_FOLLOWED_NOINPLACE); + cur = end - min_t(unsigned int, offset + end - map->m_la, end); if (!(map->m_flags & EROFS_MAP_MAPPED)) { zero_user_segment(page, cur, end); -- cgit v1.2.3 From 2b6509f42dc6c96ef0b625888d4aa56d2e140330 Mon Sep 17 00:00:00 2001 From: Huacai Chen Date: Fri, 27 Sep 2019 18:27:42 +0800 Subject: MIPS: Loongson64: Fix boot failure after dropping boot_mem_map From commit a94e4f24ec83 ("MIPS: init: Drop boot_mem_map") onwards, add_memory_region() is handled by memblock_add()/memblock_reserve() directly and all bootmem API should be converted to memblock API. Otherwise it will lead to boot failure, especially in the NUMA case because add_memory_region lose the node_id information. Fixes: a94e4f24ec836c8984f83959 ("MIPS: init: Drop boot_mem_map") Signed-off-by: Huacai Chen Signed-off-by: Jiaxun Yang [paul.burton@mips.com: - Invert node_id check to de-indent the switch statement & avoid lines over 80 characters. - Fixup commit reference in commit message.] Signed-off-by: Paul Burton Cc: Ralf Baechle Cc: James Hogan Cc: linux-mips@linux-mips.org Cc: linux-mips@vger.kernel.org Cc: Fuxin Zhang Cc: Zhangjin Wu Cc: Huacai Chen --- arch/mips/loongson64/common/mem.c | 35 +++++++++++++++++----------------- arch/mips/loongson64/loongson-3/numa.c | 11 +---------- 2 files changed, 18 insertions(+), 28 deletions(-) diff --git a/arch/mips/loongson64/common/mem.c b/arch/mips/loongson64/common/mem.c index 4abb92e0fc39..4254ac4ec616 100644 --- a/arch/mips/loongson64/common/mem.c +++ b/arch/mips/loongson64/common/mem.c @@ -3,6 +3,7 @@ */ #include #include +#include #include #include @@ -64,24 +65,22 @@ void __init prom_init_memory(void) node_id = loongson_memmap->map[i].node_id; mem_type = loongson_memmap->map[i].mem_type; - if (node_id == 0) { - switch (mem_type) { - case SYSTEM_RAM_LOW: - add_memory_region(loongson_memmap->map[i].mem_start, - (u64)loongson_memmap->map[i].mem_size << 20, - BOOT_MEM_RAM); - break; - case SYSTEM_RAM_HIGH: - add_memory_region(loongson_memmap->map[i].mem_start, - (u64)loongson_memmap->map[i].mem_size << 20, - BOOT_MEM_RAM); - break; - case SYSTEM_RAM_RESERVED: - add_memory_region(loongson_memmap->map[i].mem_start, - (u64)loongson_memmap->map[i].mem_size << 20, - BOOT_MEM_RESERVED); - break; - } + if (node_id != 0) + continue; + + switch (mem_type) { + case SYSTEM_RAM_LOW: + memblock_add(loongson_memmap->map[i].mem_start, + (u64)loongson_memmap->map[i].mem_size << 20); + break; + case SYSTEM_RAM_HIGH: + memblock_add(loongson_memmap->map[i].mem_start, + (u64)loongson_memmap->map[i].mem_size << 20); + break; + case SYSTEM_RAM_RESERVED: + memblock_reserve(loongson_memmap->map[i].mem_start, + (u64)loongson_memmap->map[i].mem_size << 20); + break; } } } diff --git a/arch/mips/loongson64/loongson-3/numa.c b/arch/mips/loongson64/loongson-3/numa.c index 414e97de5dc0..8f20d2cb3767 100644 --- a/arch/mips/loongson64/loongson-3/numa.c +++ b/arch/mips/loongson64/loongson-3/numa.c @@ -142,8 +142,6 @@ static void __init szmem(unsigned int node) (u32)node_id, mem_type, mem_start, mem_size); pr_info(" start_pfn:0x%llx, end_pfn:0x%llx, num_physpages:0x%lx\n", start_pfn, end_pfn, num_physpages); - add_memory_region((node_id << 44) + mem_start, - (u64)mem_size << 20, BOOT_MEM_RAM); memblock_add_node(PFN_PHYS(start_pfn), PFN_PHYS(end_pfn - start_pfn), node); break; @@ -156,16 +154,12 @@ static void __init szmem(unsigned int node) (u32)node_id, mem_type, mem_start, mem_size); pr_info(" start_pfn:0x%llx, end_pfn:0x%llx, num_physpages:0x%lx\n", start_pfn, end_pfn, num_physpages); - add_memory_region((node_id << 44) + mem_start, - (u64)mem_size << 20, BOOT_MEM_RAM); memblock_add_node(PFN_PHYS(start_pfn), PFN_PHYS(end_pfn - start_pfn), node); break; case SYSTEM_RAM_RESERVED: pr_info("Node%d: mem_type:%d, mem_start:0x%llx, mem_size:0x%llx MB\n", (u32)node_id, mem_type, mem_start, mem_size); - add_memory_region((node_id << 44) + mem_start, - (u64)mem_size << 20, BOOT_MEM_RESERVED); memblock_reserve(((node_id << 44) + mem_start), mem_size << 20); break; @@ -191,8 +185,6 @@ static void __init node_mem_init(unsigned int node) NODE_DATA(node)->node_start_pfn = start_pfn; NODE_DATA(node)->node_spanned_pages = end_pfn - start_pfn; - free_bootmem_with_active_regions(node, end_pfn); - if (node == 0) { /* kernel end address */ unsigned long kernel_end_pfn = PFN_UP(__pa_symbol(&_end)); @@ -209,8 +201,6 @@ static void __init node_mem_init(unsigned int node) memblock_reserve((node_addrspace_offset | 0xfe000000), 32 << 20); } - - sparse_memory_present_with_active_regions(node); } static __init void prom_meminit(void) @@ -227,6 +217,7 @@ static __init void prom_meminit(void) cpumask_clear(&__node_data[(node)]->cpumask); } } + memblocks_present(); max_low_pfn = PHYS_PFN(memblock_end_of_DRAM()); for (cpu = 0; cpu < loongson_sysconf.nr_cpus; cpu++) { -- cgit v1.2.3 From 1bd63524593b964934a33afd442df16b8f90e2b5 Mon Sep 17 00:00:00 2001 From: Yonghong Song Date: Mon, 30 Sep 2019 14:02:03 -0700 Subject: libbpf: handle symbol versioning properly for libbpf.a bcc uses libbpf repo as a submodule. It brings in libbpf source code and builds everything together to produce shared libraries. With latest libbpf, I got the following errors: /bin/ld: libbcc_bpf.so.0.10.0: version node not found for symbol xsk_umem__create@LIBBPF_0.0.2 /bin/ld: failed to set dynamic section sizes: Bad value collect2: error: ld returned 1 exit status make[2]: *** [src/cc/libbcc_bpf.so.0.10.0] Error 1 In xsk.c, we have asm(".symver xsk_umem__create_v0_0_2, xsk_umem__create@LIBBPF_0.0.2"); asm(".symver xsk_umem__create_v0_0_4, xsk_umem__create@@LIBBPF_0.0.4"); The linker thinks the built is for LIBBPF but cannot find proper version LIBBPF_0.0.2/4, so emit errors. I also confirmed that using libbpf.a to produce a shared library also has issues: -bash-4.4$ cat t.c extern void *xsk_umem__create; void * test() { return xsk_umem__create; } -bash-4.4$ gcc -c -fPIC t.c -bash-4.4$ gcc -shared t.o libbpf.a -o t.so /bin/ld: t.so: version node not found for symbol xsk_umem__create@LIBBPF_0.0.2 /bin/ld: failed to set dynamic section sizes: Bad value collect2: error: ld returned 1 exit status -bash-4.4$ Symbol versioning does happens in commonly used libraries, e.g., elfutils and glibc. For static libraries, for a versioned symbol, the old definitions will be ignored, and the symbol will be an alias to the latest definition. For example, glibc sched_setaffinity is versioned. -bash-4.4$ readelf -s /usr/lib64/libc.so.6 | grep sched_setaffinity 756: 000000000013d3d0 13 FUNC GLOBAL DEFAULT 13 sched_setaffinity@GLIBC_2.3.3 757: 00000000000e2e70 455 FUNC GLOBAL DEFAULT 13 sched_setaffinity@@GLIBC_2.3.4 1800: 0000000000000000 0 FILE LOCAL DEFAULT ABS sched_setaffinity.c 4228: 00000000000e2e70 455 FUNC LOCAL DEFAULT 13 __sched_setaffinity_new 4648: 000000000013d3d0 13 FUNC LOCAL DEFAULT 13 __sched_setaffinity_old 7338: 000000000013d3d0 13 FUNC GLOBAL DEFAULT 13 sched_setaffinity@GLIBC_2 7380: 00000000000e2e70 455 FUNC GLOBAL DEFAULT 13 sched_setaffinity@@GLIBC_ -bash-4.4$ For static library, the definition of sched_setaffinity aliases to the new definition. -bash-4.4$ readelf -s /usr/lib64/libc.a | grep sched_setaffinity File: /usr/lib64/libc.a(sched_setaffinity.o) 8: 0000000000000000 455 FUNC GLOBAL DEFAULT 1 __sched_setaffinity_new 12: 0000000000000000 455 FUNC WEAK DEFAULT 1 sched_setaffinity For both elfutils and glibc, additional macros are used to control different handling of symbol versioning w.r.t static and shared libraries. For elfutils, the macro is SYMBOL_VERSIONING (https://sourceware.org/git/?p=elfutils.git;a=blob;f=lib/eu-config.h). For glibc, the macro is SHARED (https://sourceware.org/git/?p=glibc.git;a=blob;f=include/shlib-compat.h;hb=refs/heads/master) This patch used SHARED as the macro name. After this patch, the libbpf.a has -bash-4.4$ readelf -s libbpf.a | grep xsk_umem__create 372: 0000000000017145 1190 FUNC GLOBAL DEFAULT 1 xsk_umem__create_v0_0_4 405: 0000000000017145 1190 FUNC GLOBAL DEFAULT 1 xsk_umem__create 499: 00000000000175eb 103 FUNC GLOBAL DEFAULT 1 xsk_umem__create_v0_0_2 -bash-4.4$ No versioned symbols for xsk_umem__create. The libbpf.a can be used to build a shared library succesfully. -bash-4.4$ cat t.c extern void *xsk_umem__create; void * test() { return xsk_umem__create; } -bash-4.4$ gcc -c -fPIC t.c -bash-4.4$ gcc -shared t.o libbpf.a -o t.so -bash-4.4$ Fixes: 10d30e301732 ("libbpf: add flags to umem config") Cc: Kevin Laatz Cc: Arnaldo Carvalho de Melo Cc: Andrii Nakryiko Acked-by: Andrii Nakryiko Signed-off-by: Yonghong Song Signed-off-by: Alexei Starovoitov --- tools/lib/bpf/Makefile | 27 ++++++++++++++++++--------- tools/lib/bpf/libbpf_internal.h | 16 ++++++++++++++++ tools/lib/bpf/xsk.c | 4 ++-- 3 files changed, 36 insertions(+), 11 deletions(-) diff --git a/tools/lib/bpf/Makefile b/tools/lib/bpf/Makefile index 20772663d3e1..56ce6292071b 100644 --- a/tools/lib/bpf/Makefile +++ b/tools/lib/bpf/Makefile @@ -114,6 +114,9 @@ override CFLAGS += $(INCLUDES) override CFLAGS += -fvisibility=hidden override CFLAGS += -D_LARGEFILE64_SOURCE -D_FILE_OFFSET_BITS=64 +# flags specific for shared library +SHLIB_FLAGS := -DSHARED + ifeq ($(VERBOSE),1) Q = else @@ -130,14 +133,17 @@ all: export srctree OUTPUT CC LD CFLAGS V include $(srctree)/tools/build/Makefile.include -BPF_IN := $(OUTPUT)libbpf-in.o +SHARED_OBJDIR := $(OUTPUT)sharedobjs/ +STATIC_OBJDIR := $(OUTPUT)staticobjs/ +BPF_IN_SHARED := $(SHARED_OBJDIR)libbpf-in.o +BPF_IN_STATIC := $(STATIC_OBJDIR)libbpf-in.o VERSION_SCRIPT := libbpf.map LIB_TARGET := $(addprefix $(OUTPUT),$(LIB_TARGET)) LIB_FILE := $(addprefix $(OUTPUT),$(LIB_FILE)) PC_FILE := $(addprefix $(OUTPUT),$(PC_FILE)) -GLOBAL_SYM_COUNT = $(shell readelf -s --wide $(BPF_IN) | \ +GLOBAL_SYM_COUNT = $(shell readelf -s --wide $(BPF_IN_SHARED) | \ cut -d "@" -f1 | sed 's/_v[0-9]_[0-9]_[0-9].*//' | \ awk '/GLOBAL/ && /DEFAULT/ && !/UND/ {print $$8}' | \ sort -u | wc -l) @@ -159,7 +165,7 @@ all: fixdep all_cmd: $(CMD_TARGETS) check -$(BPF_IN): force elfdep bpfdep +$(BPF_IN_SHARED): force elfdep bpfdep @(test -f ../../include/uapi/linux/bpf.h -a -f ../../../include/uapi/linux/bpf.h && ( \ (diff -B ../../include/uapi/linux/bpf.h ../../../include/uapi/linux/bpf.h >/dev/null) || \ echo "Warning: Kernel ABI header at 'tools/include/uapi/linux/bpf.h' differs from latest version at 'include/uapi/linux/bpf.h'" >&2 )) || true @@ -175,17 +181,20 @@ $(BPF_IN): force elfdep bpfdep @(test -f ../../include/uapi/linux/if_xdp.h -a -f ../../../include/uapi/linux/if_xdp.h && ( \ (diff -B ../../include/uapi/linux/if_xdp.h ../../../include/uapi/linux/if_xdp.h >/dev/null) || \ echo "Warning: Kernel ABI header at 'tools/include/uapi/linux/if_xdp.h' differs from latest version at 'include/uapi/linux/if_xdp.h'" >&2 )) || true - $(Q)$(MAKE) $(build)=libbpf + $(Q)$(MAKE) $(build)=libbpf OUTPUT=$(SHARED_OBJDIR) CFLAGS="$(CFLAGS) $(SHLIB_FLAGS)" + +$(BPF_IN_STATIC): force elfdep bpfdep + $(Q)$(MAKE) $(build)=libbpf OUTPUT=$(STATIC_OBJDIR) $(OUTPUT)libbpf.so: $(OUTPUT)libbpf.so.$(LIBBPF_VERSION) -$(OUTPUT)libbpf.so.$(LIBBPF_VERSION): $(BPF_IN) +$(OUTPUT)libbpf.so.$(LIBBPF_VERSION): $(BPF_IN_SHARED) $(QUIET_LINK)$(CC) --shared -Wl,-soname,libbpf.so.$(LIBBPF_MAJOR_VERSION) \ -Wl,--version-script=$(VERSION_SCRIPT) $^ -lelf -o $@ @ln -sf $(@F) $(OUTPUT)libbpf.so @ln -sf $(@F) $(OUTPUT)libbpf.so.$(LIBBPF_MAJOR_VERSION) -$(OUTPUT)libbpf.a: $(BPF_IN) +$(OUTPUT)libbpf.a: $(BPF_IN_STATIC) $(QUIET_LINK)$(RM) $@; $(AR) rcs $@ $^ $(OUTPUT)test_libbpf: test_libbpf.cpp $(OUTPUT)libbpf.a @@ -201,7 +210,7 @@ check: check_abi check_abi: $(OUTPUT)libbpf.so @if [ "$(GLOBAL_SYM_COUNT)" != "$(VERSIONED_SYM_COUNT)" ]; then \ - echo "Warning: Num of global symbols in $(BPF_IN)" \ + echo "Warning: Num of global symbols in $(BPF_IN_SHARED)" \ "($(GLOBAL_SYM_COUNT)) does NOT match with num of" \ "versioned symbols in $^ ($(VERSIONED_SYM_COUNT))." \ "Please make sure all LIBBPF_API symbols are" \ @@ -259,9 +268,9 @@ config-clean: $(Q)$(MAKE) -C $(srctree)/tools/build/feature/ clean >/dev/null clean: - $(call QUIET_CLEAN, libbpf) $(RM) $(TARGETS) $(CXX_TEST_TARGET) \ + $(call QUIET_CLEAN, libbpf) $(RM) -rf $(TARGETS) $(CXX_TEST_TARGET) \ *.o *~ *.a *.so *.so.$(LIBBPF_MAJOR_VERSION) .*.d .*.cmd \ - *.pc LIBBPF-CFLAGS + *.pc LIBBPF-CFLAGS $(SHARED_OBJDIR) $(STATIC_OBJDIR) $(call QUIET_CLEAN, core-gen) $(RM) $(OUTPUT)FEATURE-DUMP.libbpf diff --git a/tools/lib/bpf/libbpf_internal.h b/tools/lib/bpf/libbpf_internal.h index 2e83a34f8c79..98216a69c32f 100644 --- a/tools/lib/bpf/libbpf_internal.h +++ b/tools/lib/bpf/libbpf_internal.h @@ -34,6 +34,22 @@ (offsetof(TYPE, FIELD) + sizeof(((TYPE *)0)->FIELD)) #endif +/* Symbol versioning is different between static and shared library. + * Properly versioned symbols are needed for shared library, but + * only the symbol of the new version is needed for static library. + */ +#ifdef SHARED +# define COMPAT_VERSION(internal_name, api_name, version) \ + asm(".symver " #internal_name "," #api_name "@" #version); +# define DEFAULT_VERSION(internal_name, api_name, version) \ + asm(".symver " #internal_name "," #api_name "@@" #version); +#else +# define COMPAT_VERSION(internal_name, api_name, version) +# define DEFAULT_VERSION(internal_name, api_name, version) \ + extern typeof(internal_name) api_name \ + __attribute__((alias(#internal_name))); +#endif + extern void libbpf_print(enum libbpf_print_level level, const char *format, ...) __attribute__((format(printf, 2, 3))); diff --git a/tools/lib/bpf/xsk.c b/tools/lib/bpf/xsk.c index 24fa313524fb..a902838f9fcc 100644 --- a/tools/lib/bpf/xsk.c +++ b/tools/lib/bpf/xsk.c @@ -261,8 +261,8 @@ int xsk_umem__create_v0_0_2(struct xsk_umem **umem_ptr, void *umem_area, return xsk_umem__create_v0_0_4(umem_ptr, umem_area, size, fill, comp, &config); } -asm(".symver xsk_umem__create_v0_0_2, xsk_umem__create@LIBBPF_0.0.2"); -asm(".symver xsk_umem__create_v0_0_4, xsk_umem__create@@LIBBPF_0.0.4"); +COMPAT_VERSION(xsk_umem__create_v0_0_2, xsk_umem__create, LIBBPF_0.0.2) +DEFAULT_VERSION(xsk_umem__create_v0_0_4, xsk_umem__create, LIBBPF_0.0.4) static int xsk_load_xdp_prog(struct xsk_socket *xsk) { -- cgit v1.2.3 From 0889d07f3e4b171c453b2aaf2b257f9074cdf624 Mon Sep 17 00:00:00 2001 From: Oleksij Rempel Date: Mon, 30 Sep 2019 11:39:52 +0200 Subject: MIPS: dts: ar9331: fix interrupt-controller size It is two registers each of 4 byte. Signed-off-by: Oleksij Rempel Signed-off-by: Paul Burton Cc: Rob Herring Cc: Mark Rutland Cc: Pengutronix Kernel Team Cc: Ralf Baechle Cc: James Hogan Cc: devicetree@vger.kernel.org Cc: linux-mips@vger.kernel.org Cc: linux-kernel@vger.kernel.org --- arch/mips/boot/dts/qca/ar9331.dtsi | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/arch/mips/boot/dts/qca/ar9331.dtsi b/arch/mips/boot/dts/qca/ar9331.dtsi index 63a9f33aa43e..5cfc9d347826 100644 --- a/arch/mips/boot/dts/qca/ar9331.dtsi +++ b/arch/mips/boot/dts/qca/ar9331.dtsi @@ -99,7 +99,7 @@ miscintc: interrupt-controller@18060010 { compatible = "qca,ar7240-misc-intc"; - reg = <0x18060010 0x4>; + reg = <0x18060010 0x8>; interrupt-parent = <&cpuintc>; interrupts = <6>; -- cgit v1.2.3 From 8c7138b33e5c690c308b2a7085f6313fdcb3f616 Mon Sep 17 00:00:00 2001 From: Martin KaFai Lau Date: Fri, 27 Sep 2019 16:00:31 -0700 Subject: net: Unpublish sk from sk_reuseport_cb before call_rcu The "reuse->sock[]" array is shared by multiple sockets. The going away sk must unpublish itself from "reuse->sock[]" before making call_rcu() call. However, this unpublish-action is currently done after a grace period and it may cause use-after-free. The fix is to move reuseport_detach_sock() to sk_destruct(). Due to the above reason, any socket with sk_reuseport_cb has to go through the rcu grace period before freeing it. It is a rather old bug (~3 yrs). The Fixes tag is not necessary the right commit but it is the one that introduced the SOCK_RCU_FREE logic and this fix is depending on it. Fixes: a4298e4522d6 ("net: add SOCK_RCU_FREE socket flag") Cc: Eric Dumazet Suggested-by: Eric Dumazet Signed-off-by: Martin KaFai Lau Signed-off-by: David S. Miller --- net/core/sock.c | 11 ++++++++--- 1 file changed, 8 insertions(+), 3 deletions(-) diff --git a/net/core/sock.c b/net/core/sock.c index 07863edbe6fc..e23ec80a67bf 100644 --- a/net/core/sock.c +++ b/net/core/sock.c @@ -1700,8 +1700,6 @@ static void __sk_destruct(struct rcu_head *head) sk_filter_uncharge(sk, filter); RCU_INIT_POINTER(sk->sk_filter, NULL); } - if (rcu_access_pointer(sk->sk_reuseport_cb)) - reuseport_detach_sock(sk); sock_disable_timestamp(sk, SK_FLAGS_TIMESTAMP); @@ -1728,7 +1726,14 @@ static void __sk_destruct(struct rcu_head *head) void sk_destruct(struct sock *sk) { - if (sock_flag(sk, SOCK_RCU_FREE)) + bool use_call_rcu = sock_flag(sk, SOCK_RCU_FREE); + + if (rcu_access_pointer(sk->sk_reuseport_cb)) { + reuseport_detach_sock(sk); + use_call_rcu = true; + } + + if (use_call_rcu) call_rcu(&sk->sk_rcu, __sk_destruct); else __sk_destruct(&sk->sk_rcu); -- cgit v1.2.3 From 13dc8c029cabf52ba95f60c56eb104d4d95d5889 Mon Sep 17 00:00:00 2001 From: Masahiro Yamada Date: Sat, 21 Sep 2019 15:49:54 +0900 Subject: kbuild: remove ar-option and KBUILD_ARFLAGS Commit 40df759e2b9e ("kbuild: Fix build with binutils <= 2.19") introduced ar-option and KBUILD_ARFLAGS to deal with old binutils. According to Documentation/process/changes.rst, the current minimal supported version of binutils is 2.21 so you can assume the 'D' option is always supported. Not only GNU ar but also llvm-ar supports it. With the 'D' option hard-coded, there is no more user of ar-option or KBUILD_ARFLAGS. Signed-off-by: Masahiro Yamada Reviewed-by: Nick Desaulniers Tested-by: Nick Desaulniers --- Documentation/kbuild/makefiles.rst | 5 ----- Makefile | 4 ---- arch/powerpc/boot/Makefile | 2 +- scripts/Kbuild.include | 5 ----- scripts/Makefile.build | 2 +- scripts/Makefile.lib | 2 +- 6 files changed, 3 insertions(+), 17 deletions(-) diff --git a/Documentation/kbuild/makefiles.rst b/Documentation/kbuild/makefiles.rst index 6ba9d5365ff3..b89c88168d6a 100644 --- a/Documentation/kbuild/makefiles.rst +++ b/Documentation/kbuild/makefiles.rst @@ -954,11 +954,6 @@ When kbuild executes, the following steps are followed (roughly): From commandline LDFLAGS_MODULE shall be used (see kbuild.txt). - KBUILD_ARFLAGS Options for $(AR) when creating archives - - $(KBUILD_ARFLAGS) set by the top level Makefile to "D" (deterministic - mode) if this option is supported by $(AR). - KBUILD_LDS The linker script with full path. Assigned by the top-level Makefile. diff --git a/Makefile b/Makefile index 6f54f2f95743..7452174f5b21 100644 --- a/Makefile +++ b/Makefile @@ -498,7 +498,6 @@ export CFLAGS_KASAN CFLAGS_KASAN_NOSANITIZE CFLAGS_UBSAN export KBUILD_AFLAGS AFLAGS_KERNEL AFLAGS_MODULE export KBUILD_AFLAGS_MODULE KBUILD_CFLAGS_MODULE KBUILD_LDFLAGS_MODULE export KBUILD_AFLAGS_KERNEL KBUILD_CFLAGS_KERNEL -export KBUILD_ARFLAGS # Files to ignore in find ... statements @@ -914,9 +913,6 @@ ifdef CONFIG_RETPOLINE KBUILD_CFLAGS += $(call cc-option,-fcf-protection=none) endif -# use the deterministic mode of AR if available -KBUILD_ARFLAGS := $(call ar-option,D) - include scripts/Makefile.kasan include scripts/Makefile.extrawarn include scripts/Makefile.ubsan diff --git a/arch/powerpc/boot/Makefile b/arch/powerpc/boot/Makefile index 6841bd52738b..dfbd7f22eef5 100644 --- a/arch/powerpc/boot/Makefile +++ b/arch/powerpc/boot/Makefile @@ -50,7 +50,7 @@ endif BOOTAFLAGS := -D__ASSEMBLY__ $(BOOTCFLAGS) -nostdinc -BOOTARFLAGS := -cr$(KBUILD_ARFLAGS) +BOOTARFLAGS := -crD ifdef CONFIG_CC_IS_CLANG BOOTCFLAGS += $(CLANG_FLAGS) diff --git a/scripts/Kbuild.include b/scripts/Kbuild.include index 4b0432e095ae..10ba926ae292 100644 --- a/scripts/Kbuild.include +++ b/scripts/Kbuild.include @@ -143,11 +143,6 @@ cc-ifversion = $(shell [ $(CONFIG_GCC_VERSION)0 $(1) $(2)000 ] && echo $(3) || e # Usage: KBUILD_LDFLAGS += $(call ld-option, -X, -Y) ld-option = $(call try-run, $(LD) $(KBUILD_LDFLAGS) $(1) -v,$(1),$(2),$(3)) -# ar-option -# Usage: KBUILD_ARFLAGS := $(call ar-option,D) -# Important: no spaces around options -ar-option = $(call try-run, $(AR) rc$(1) "$$TMP",$(1),$(2)) - # ld-version # Note this is mainly for HJ Lu's 3 number binutil versions ld-version = $(shell $(LD) --version | $(srctree)/scripts/ld-version.sh) diff --git a/scripts/Makefile.build b/scripts/Makefile.build index f72aba64d611..a9e47953ca53 100644 --- a/scripts/Makefile.build +++ b/scripts/Makefile.build @@ -389,7 +389,7 @@ $(sort $(subdir-obj-y)): $(subdir-ym) ; ifdef builtin-target quiet_cmd_ar_builtin = AR $@ - cmd_ar_builtin = rm -f $@; $(AR) rcSTP$(KBUILD_ARFLAGS) $@ $(real-prereqs) + cmd_ar_builtin = rm -f $@; $(AR) cDPrST $@ $(real-prereqs) $(builtin-target): $(real-obj-y) FORCE $(call if_changed,ar_builtin) diff --git a/scripts/Makefile.lib b/scripts/Makefile.lib index 4a0cdd6f5909..179d55af5852 100644 --- a/scripts/Makefile.lib +++ b/scripts/Makefile.lib @@ -232,7 +232,7 @@ quiet_cmd_ld = LD $@ # --------------------------------------------------------------------------- quiet_cmd_ar = AR $@ - cmd_ar = rm -f $@; $(AR) rcsTP$(KBUILD_ARFLAGS) $@ $(real-prereqs) + cmd_ar = rm -f $@; $(AR) cDPrsT $@ $(real-prereqs) # Objcopy # --------------------------------------------------------------------------- -- cgit v1.2.3 From b6f2494d311a19b33b19708543e7ef6dea1de459 Mon Sep 17 00:00:00 2001 From: Vladimir Oltean Date: Sun, 29 Sep 2019 01:08:17 +0300 Subject: net: dsa: sja1105: Ensure PTP time for rxtstamp reconstruction is not in the past Sometimes the PTP synchronization on the switch 'jumps': ptp4l[11241.155]: rms 8 max 16 freq -21732 +/- 11 delay 742 +/- 0 ptp4l[11243.157]: rms 7 max 17 freq -21731 +/- 10 delay 744 +/- 0 ptp4l[11245.160]: rms 33592410 max 134217731 freq +192422 +/- 8530253 delay 743 +/- 0 ptp4l[11247.163]: rms 811631 max 964131 freq +10326 +/- 557785 delay 743 +/- 0 ptp4l[11249.166]: rms 261936 max 533876 freq -304323 +/- 126371 delay 744 +/- 0 ptp4l[11251.169]: rms 48700 max 57740 freq -20218 +/- 30532 delay 744 +/- 0 ptp4l[11253.171]: rms 14570 max 30163 freq -5568 +/- 7563 delay 742 +/- 0 ptp4l[11255.174]: rms 2914 max 3440 freq -22001 +/- 1667 delay 744 +/- 1 ptp4l[11257.177]: rms 811 max 1710 freq -22653 +/- 451 delay 744 +/- 1 ptp4l[11259.180]: rms 177 max 218 freq -21695 +/- 89 delay 741 +/- 0 ptp4l[11261.182]: rms 45 max 92 freq -21677 +/- 32 delay 742 +/- 0 ptp4l[11263.186]: rms 14 max 32 freq -21733 +/- 11 delay 742 +/- 0 ptp4l[11265.188]: rms 9 max 14 freq -21725 +/- 12 delay 742 +/- 0 ptp4l[11267.191]: rms 9 max 16 freq -21727 +/- 13 delay 742 +/- 0 ptp4l[11269.194]: rms 6 max 15 freq -21726 +/- 9 delay 743 +/- 0 ptp4l[11271.197]: rms 8 max 15 freq -21728 +/- 11 delay 743 +/- 0 ptp4l[11273.200]: rms 6 max 12 freq -21727 +/- 8 delay 743 +/- 0 ptp4l[11275.202]: rms 9 max 17 freq -21720 +/- 11 delay 742 +/- 0 ptp4l[11277.205]: rms 9 max 18 freq -21725 +/- 12 delay 742 +/- 0 Background: the switch only offers partial RX timestamps (24 bits) and it is up to the driver to read the PTP clock to fill those timestamps up to 64 bits. But the PTP clock readout needs to happen quickly enough (in 0.135 seconds, in fact), otherwise the PTP clock will wrap around 24 bits, condition which cannot be detected. Looking at the 'max 134217731' value on output line 3, one can see that in hex it is 0x8000003. Because the PTP clock resolution is 8 ns, that means 0x1000000 in ticks, which is exactly 2^24. So indeed this is a PTP clock wraparound, but the reason might be surprising. What is going on is that sja1105_tstamp_reconstruct(priv, now, ts) expects a "now" time that is later than the "ts" was snapshotted at. This, of course, is obvious: we read the PTP time _after_ the partial RX timestamp was received. However, the workqueue is processing frames from a skb queue and reuses the same PTP time, read once at the beginning. Normally the skb queue only contains one frame and all goes well. But when the skb queue contains two frames, the second frame that gets dequeued might have been partially timestamped by the RX MAC _after_ we had read our PTP time initially. The code was originally like that due to concerns that SPI access for PTP time readout is a slow process, and we are time-constrained anyway (aka: premature optimization). But some timing analysis reveals that the time spent until the RX timestamp is completely reconstructed is 1 order of magnitude lower than the 0.135 s deadline even under worst-case conditions. So we can afford to read the PTP time for each frame in the RX timestamping queue, which of course ensures that the full PTP time is in the partial timestamp's future. Fixes: f3097be21bf1 ("net: dsa: sja1105: Add a state machine for RX timestamping") Signed-off-by: Vladimir Oltean Signed-off-by: David S. Miller --- drivers/net/dsa/sja1105/sja1105_main.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/net/dsa/sja1105/sja1105_main.c b/drivers/net/dsa/sja1105/sja1105_main.c index b9def744bcb3..f3d38ff144c4 100644 --- a/drivers/net/dsa/sja1105/sja1105_main.c +++ b/drivers/net/dsa/sja1105/sja1105_main.c @@ -2005,12 +2005,12 @@ static void sja1105_rxtstamp_work(struct work_struct *work) mutex_lock(&priv->ptp_lock); - now = priv->tstamp_cc.read(&priv->tstamp_cc); - while ((skb = skb_dequeue(&data->skb_rxtstamp_queue)) != NULL) { struct skb_shared_hwtstamps *shwt = skb_hwtstamps(skb); u64 ts; + now = priv->tstamp_cc.read(&priv->tstamp_cc); + *shwt = (struct skb_shared_hwtstamps) {0}; ts = SJA1105_SKB_CB(skb)->meta_tstamp; -- cgit v1.2.3 From 7e35b42591c058b91282f95ce3b2cf0c05ffe93d Mon Sep 17 00:00:00 2001 From: Masahiro Yamada Date: Sat, 21 Sep 2019 16:05:41 +0900 Subject: kbuild: remove SUBDIRS support Linux 5.3 is out. Remove the SUBDIRS support. Signed-off-by: Masahiro Yamada --- Makefile | 20 ++------------------ 1 file changed, 2 insertions(+), 18 deletions(-) diff --git a/Makefile b/Makefile index 7452174f5b21..779c9c9b9820 100644 --- a/Makefile +++ b/Makefile @@ -206,24 +206,8 @@ ifndef KBUILD_CHECKSRC KBUILD_CHECKSRC = 0 endif -# Use make M=dir to specify directory of external module to build -# Old syntax make ... SUBDIRS=$PWD is still supported -# Setting the environment variable KBUILD_EXTMOD take precedence -ifdef SUBDIRS - $(warning ================= WARNING ================) - $(warning 'SUBDIRS' will be removed after Linux 5.3) - $(warning ) - $(warning If you are building an individual subdirectory) - $(warning in the kernel tree, you can do like this:) - $(warning $$ make path/to/dir/you/want/to/build/) - $(warning (Do not forget the trailing slash)) - $(warning ) - $(warning If you are building an external module,) - $(warning Please use 'M=' or 'KBUILD_EXTMOD' instead) - $(warning ==========================================) - KBUILD_EXTMOD ?= $(SUBDIRS) -endif - +# Use make M=dir or set the environment variable KBUILD_EXTMOD to specify the +# directory of external module to build. Setting M= takes precedence. ifeq ("$(origin M)", "command line") KBUILD_EXTMOD := $(M) endif -- cgit v1.2.3 From 807f2105b87af20c4a582fb62111b16689f83bb8 Mon Sep 17 00:00:00 2001 From: Alex Gaynor Date: Sat, 21 Sep 2019 15:23:04 -0700 Subject: kbuild: correct formatting of header in kbuild module docs Minor formatting fixup. Fixes: cd238effefa2 ("docs: kbuild: convert docs to ReST and rename to *.rst") Signed-off-by: Alex Gaynor Signed-off-by: Matthew Garrett Signed-off-by: Masahiro Yamada --- Documentation/kbuild/modules.rst | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/Documentation/kbuild/modules.rst b/Documentation/kbuild/modules.rst index d2ae799237fd..33a17121d11d 100644 --- a/Documentation/kbuild/modules.rst +++ b/Documentation/kbuild/modules.rst @@ -498,7 +498,8 @@ build. will be written containing all exported symbols that were not defined in the kernel. ---- 6.3 Symbols From Another External Module +6.3 Symbols From Another External Module +---------------------------------------- Sometimes, an external module uses exported symbols from another external module. kbuild needs to have full knowledge of -- cgit v1.2.3 From 47346e96f004eca07720e1e2b24fc7f0b0df4092 Mon Sep 17 00:00:00 2001 From: Masahiro Yamada Date: Tue, 24 Sep 2019 21:07:40 +0900 Subject: modpost: fix static EXPORT_SYMBOL warnings for UML build Johannes Berg reports lots of modpost warnings on ARCH=um builds: WARNING: "rename" [vmlinux] is a static EXPORT_SYMBOL WARNING: "lseek" [vmlinux] is a static EXPORT_SYMBOL WARNING: "ftruncate64" [vmlinux] is a static EXPORT_SYMBOL WARNING: "getuid" [vmlinux] is a static EXPORT_SYMBOL WARNING: "lseek64" [vmlinux] is a static EXPORT_SYMBOL WARNING: "unlink" [vmlinux] is a static EXPORT_SYMBOL WARNING: "pwrite64" [vmlinux] is a static EXPORT_SYMBOL WARNING: "close" [vmlinux] is a static EXPORT_SYMBOL WARNING: "opendir" [vmlinux] is a static EXPORT_SYMBOL WARNING: "pread64" [vmlinux] is a static EXPORT_SYMBOL WARNING: "syscall" [vmlinux] is a static EXPORT_SYMBOL WARNING: "readdir" [vmlinux] is a static EXPORT_SYMBOL WARNING: "readdir64" [vmlinux] is a static EXPORT_SYMBOL WARNING: "futimes" [vmlinux] is a static EXPORT_SYMBOL WARNING: "__lxstat" [vmlinux] is a static EXPORT_SYMBOL WARNING: "write" [vmlinux] is a static EXPORT_SYMBOL WARNING: "closedir" [vmlinux] is a static EXPORT_SYMBOL WARNING: "__xstat" [vmlinux] is a static EXPORT_SYMBOL WARNING: "fsync" [vmlinux] is a static EXPORT_SYMBOL WARNING: "__lxstat64" [vmlinux] is a static EXPORT_SYMBOL WARNING: "__fxstat64" [vmlinux] is a static EXPORT_SYMBOL WARNING: "telldir" [vmlinux] is a static EXPORT_SYMBOL WARNING: "printf" [vmlinux] is a static EXPORT_SYMBOL WARNING: "readlink" [vmlinux] is a static EXPORT_SYMBOL WARNING: "__sprintf_chk" [vmlinux] is a static EXPORT_SYMBOL WARNING: "link" [vmlinux] is a static EXPORT_SYMBOL WARNING: "rmdir" [vmlinux] is a static EXPORT_SYMBOL WARNING: "fdatasync" [vmlinux] is a static EXPORT_SYMBOL WARNING: "truncate" [vmlinux] is a static EXPORT_SYMBOL WARNING: "statfs" [vmlinux] is a static EXPORT_SYMBOL WARNING: "__errno_location" [vmlinux] is a static EXPORT_SYMBOL WARNING: "__xmknod" [vmlinux] is a static EXPORT_SYMBOL WARNING: "open64" [vmlinux] is a static EXPORT_SYMBOL WARNING: "truncate64" [vmlinux] is a static EXPORT_SYMBOL WARNING: "open" [vmlinux] is a static EXPORT_SYMBOL WARNING: "read" [vmlinux] is a static EXPORT_SYMBOL WARNING: "chown" [vmlinux] is a static EXPORT_SYMBOL WARNING: "chmod" [vmlinux] is a static EXPORT_SYMBOL WARNING: "utime" [vmlinux] is a static EXPORT_SYMBOL WARNING: "fchmod" [vmlinux] is a static EXPORT_SYMBOL WARNING: "seekdir" [vmlinux] is a static EXPORT_SYMBOL WARNING: "ioctl" [vmlinux] is a static EXPORT_SYMBOL WARNING: "dup2" [vmlinux] is a static EXPORT_SYMBOL WARNING: "statfs64" [vmlinux] is a static EXPORT_SYMBOL WARNING: "utimes" [vmlinux] is a static EXPORT_SYMBOL WARNING: "mkdir" [vmlinux] is a static EXPORT_SYMBOL WARNING: "fchown" [vmlinux] is a static EXPORT_SYMBOL WARNING: "__guard" [vmlinux] is a static EXPORT_SYMBOL WARNING: "symlink" [vmlinux] is a static EXPORT_SYMBOL WARNING: "access" [vmlinux] is a static EXPORT_SYMBOL WARNING: "__stack_smash_handler" [vmlinux] is a static EXPORT_SYMBOL When you run "make", the modpost is run twice; before linking vmlinux, and before building modules. All the warnings above are from the second modpost. The offending symbols are defined not in vmlinux, but in the C library. The first modpost is run against the relocatable vmlinux.o, and those warnings are nicely suppressed because the SH_UNDEF entries from the symbol table clear the ->is_static flag. The second modpost is run against the executable vmlinux (+ modules), where those symbols have been resolved, but the definitions do not exist. This commit fixes it in a straightforward way; suppress the static EXPORT_SYMBOL warnings from "vmlinux". Without this commit, we see valid warnings twice anyway. For example, ARCH=arm64 defconfig shows the following warning twice: WARNING: "HYPERVISOR_platform_op" [vmlinux] is a static EXPORT_SYMBOL_GPL So, it is reasonable to suppress the second one. Fixes: 15bfc2348d54 ("modpost: check for static EXPORT_SYMBOL* functions") Reported-by: Johannes Berg Signed-off-by: Masahiro Yamada Tested-by: Johannes Berg Tested-by: Denis Efremov --- scripts/mod/modpost.c | 13 +++++++++---- 1 file changed, 9 insertions(+), 4 deletions(-) diff --git a/scripts/mod/modpost.c b/scripts/mod/modpost.c index 3961941e8e7a..442d5e2ad688 100644 --- a/scripts/mod/modpost.c +++ b/scripts/mod/modpost.c @@ -2652,15 +2652,20 @@ int main(int argc, char **argv) fatal("modpost: Section mismatches detected.\n" "Set CONFIG_SECTION_MISMATCH_WARN_ONLY=y to allow them.\n"); for (n = 0; n < SYMBOL_HASH_SIZE; n++) { - struct symbol *s = symbolhash[n]; + struct symbol *s; + + for (s = symbolhash[n]; s; s = s->next) { + /* + * Do not check "vmlinux". This avoids the same warnings + * shown twice, and false-positives for ARCH=um. + */ + if (is_vmlinux(s->module->name) && !s->module->is_dot_o) + continue; - while (s) { if (s->is_static) warn("\"%s\" [%s] is a static %s\n", s->name, s->module->name, export_str(s->export)); - - s = s->next; } } -- cgit v1.2.3 From 68501df92d116b760777a2cfda314789f926476f Mon Sep 17 00:00:00 2001 From: Navid Emamdoost Date: Sun, 29 Sep 2019 01:43:39 +0300 Subject: net: dsa: sja1105: Prevent leaking memory In sja1105_static_config_upload, in two cases memory is leaked: when static_config_buf_prepare_for_upload fails and when sja1105_inhibit_tx fails. In both cases config_buf should be released. Fixes: 8aa9ebccae87 ("net: dsa: Introduce driver for NXP SJA1105 5-port L2 switch") Fixes: 1a4c69406cc1 ("net: dsa: sja1105: Prevent PHY jabbering during switch reset") Signed-off-by: Navid Emamdoost Signed-off-by: Vladimir Oltean Signed-off-by: David S. Miller --- drivers/net/dsa/sja1105/sja1105_spi.c | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/drivers/net/dsa/sja1105/sja1105_spi.c b/drivers/net/dsa/sja1105/sja1105_spi.c index 84dc603138cf..58dd37ecde17 100644 --- a/drivers/net/dsa/sja1105/sja1105_spi.c +++ b/drivers/net/dsa/sja1105/sja1105_spi.c @@ -409,7 +409,8 @@ int sja1105_static_config_upload(struct sja1105_private *priv) rc = static_config_buf_prepare_for_upload(priv, config_buf, buf_len); if (rc < 0) { dev_err(dev, "Invalid config, cannot upload\n"); - return -EINVAL; + rc = -EINVAL; + goto out; } /* Prevent PHY jabbering during switch reset by inhibiting * Tx on all ports and waiting for current packet to drain. @@ -418,7 +419,8 @@ int sja1105_static_config_upload(struct sja1105_private *priv) rc = sja1105_inhibit_tx(priv, port_bitmap, true); if (rc < 0) { dev_err(dev, "Failed to inhibit Tx on ports\n"); - return -ENXIO; + rc = -ENXIO; + goto out; } /* Wait for an eventual egress packet to finish transmission * (reach IFG). It is guaranteed that a second one will not -- cgit v1.2.3 From 68ce6688a5baefde30914fc07fc27292dbbe8320 Mon Sep 17 00:00:00 2001 From: Vladimir Oltean Date: Sun, 29 Sep 2019 02:01:39 +0300 Subject: net: sched: taprio: Fix potential integer overflow in taprio_set_picos_per_byte The speed divisor is used in a context expecting an s64, but it is evaluated using 32-bit arithmetic. To avoid that happening, instead of multiplying by 1,000,000 in the first place, simplify the fraction and do a standard 32 bit division instead. Fixes: f04b514c0ce2 ("taprio: Set default link speed to 10 Mbps in taprio_set_picos_per_byte") Reported-by: Gustavo A. R. Silva Suggested-by: Eric Dumazet Signed-off-by: Vladimir Oltean Acked-by: Vinicius Costa Gomes Signed-off-by: David S. Miller --- net/sched/sch_taprio.c | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/net/sched/sch_taprio.c b/net/sched/sch_taprio.c index 2f7b34205c82..2aab46ada94f 100644 --- a/net/sched/sch_taprio.c +++ b/net/sched/sch_taprio.c @@ -1048,8 +1048,7 @@ static void taprio_set_picos_per_byte(struct net_device *dev, speed = ecmd.base.speed; skip: - picos_per_byte = div64_s64(NSEC_PER_SEC * 1000LL * 8, - speed * 1000 * 1000); + picos_per_byte = (USEC_PER_SEC * 8) / speed; atomic64_set(&q->picos_per_byte, picos_per_byte); netdev_dbg(dev, "taprio: set %s's picos_per_byte to: %lld, linkspeed: %d\n", -- cgit v1.2.3 From 21e3d6c81179bbdfa279efc8de456c34b814cfd2 Mon Sep 17 00:00:00 2001 From: Oliver Neukum Date: Tue, 3 Sep 2019 12:18:39 +0200 Subject: scsi: sd: Ignore a failure to sync cache due to lack of authorization I've got a report about a UAS drive enclosure reporting back Sense: Logical unit access not authorized if the drive it holds is password protected. While the drive is obviously unusable in that state as a mass storage device, it still exists as a sd device and when the system is asked to perform a suspend of the drive, it will be sent a SYNCHRONIZE CACHE. If that fails due to password protection, the error must be ignored. Cc: Link: https://lore.kernel.org/r/20190903101840.16483-1-oneukum@suse.com Signed-off-by: Oliver Neukum Signed-off-by: Martin K. Petersen --- drivers/scsi/sd.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c index 91af598f2f53..0f96eb0ddbfa 100644 --- a/drivers/scsi/sd.c +++ b/drivers/scsi/sd.c @@ -1655,7 +1655,8 @@ static int sd_sync_cache(struct scsi_disk *sdkp, struct scsi_sense_hdr *sshdr) /* we need to evaluate the error return */ if (scsi_sense_valid(sshdr) && (sshdr->asc == 0x3a || /* medium not present */ - sshdr->asc == 0x20)) /* invalid command */ + sshdr->asc == 0x20 || /* invalid command */ + (sshdr->asc == 0x74 && sshdr->ascq == 0x71))) /* drive is password locked */ /* this is no error here */ return 0; -- cgit v1.2.3 From 9bc6157f5fd0e898c94f3018d088a3419bde0d8f Mon Sep 17 00:00:00 2001 From: Daniel Wagner Date: Fri, 27 Sep 2019 09:30:31 +0200 Subject: scsi: qla2xxx: Remove WARN_ON_ONCE in qla2x00_status_cont_entry() Commit 88263208dd23 ("scsi: qla2xxx: Complain if sp->done() is not called from the completion path") introduced the WARN_ON_ONCE in qla2x00_status_cont_entry(). The assumption was that there is only one status continuations element. According to the firmware documentation it is possible that multiple status continuations are emitted by the firmware. Fixes: 88263208dd23 ("scsi: qla2xxx: Complain if sp->done() is not called from the completion path") Link: https://lore.kernel.org/r/20190927073031.62296-1-dwagner@suse.de Cc: Bart Van Assche Reviewed-by: Hannes Reinecke Reviewed-by: Bart Van Assche Signed-off-by: Daniel Wagner Signed-off-by: Martin K. Petersen --- drivers/scsi/qla2xxx/qla_isr.c | 2 -- 1 file changed, 2 deletions(-) diff --git a/drivers/scsi/qla2xxx/qla_isr.c b/drivers/scsi/qla2xxx/qla_isr.c index 4c26630c1c3e..009fd5a33fcd 100644 --- a/drivers/scsi/qla2xxx/qla_isr.c +++ b/drivers/scsi/qla2xxx/qla_isr.c @@ -2837,8 +2837,6 @@ qla2x00_status_cont_entry(struct rsp_que *rsp, sts_cont_entry_t *pkt) if (sense_len == 0) { rsp->status_srb = NULL; sp->done(sp, cp->result); - } else { - WARN_ON_ONCE(true); } } -- cgit v1.2.3 From ec1db1be106101c24f6f24efbb5eb3176f66cb50 Mon Sep 17 00:00:00 2001 From: Michael Straube Date: Tue, 17 Sep 2019 21:07:55 +0200 Subject: staging: exfat: add missing SPDX line to Kconfig Add SPDX identifier to Kconfig. Signed-off-by: Michael Straube Link: https://lore.kernel.org/r/20190917190755.21723-1-straube.linux@gmail.com Signed-off-by: Greg Kroah-Hartman --- drivers/staging/exfat/Kconfig | 1 + 1 file changed, 1 insertion(+) diff --git a/drivers/staging/exfat/Kconfig b/drivers/staging/exfat/Kconfig index 290dbfc7ace1..59e07afe249c 100644 --- a/drivers/staging/exfat/Kconfig +++ b/drivers/staging/exfat/Kconfig @@ -1,3 +1,4 @@ +# SPDX-License-Identifier: GPL-2.0 config EXFAT_FS tristate "exFAT fs support" depends on BLOCK -- cgit v1.2.3 From a358eea07c78d9d0c246738ed1c6349d72d41920 Mon Sep 17 00:00:00 2001 From: Valdis Klētnieks Date: Wed, 18 Sep 2019 20:38:08 -0400 Subject: staging: exfat - fix SPDX tags.. The copyright notices as I got them said "GPLv2 or later", which I screwed up when putting in the SPDX tags. Fix them to match the license I got the code under. Signed-off-by: Valdis Kletnieks Link: https://lore.kernel.org/r/122590.1568853488@turing-police Signed-off-by: Greg Kroah-Hartman --- drivers/staging/exfat/Makefile | 2 +- drivers/staging/exfat/exfat.h | 2 +- drivers/staging/exfat/exfat_blkdev.c | 2 +- drivers/staging/exfat/exfat_cache.c | 2 +- drivers/staging/exfat/exfat_core.c | 2 +- drivers/staging/exfat/exfat_nls.c | 2 +- drivers/staging/exfat/exfat_super.c | 2 +- drivers/staging/exfat/exfat_upcase.c | 2 +- 8 files changed, 8 insertions(+), 8 deletions(-) diff --git a/drivers/staging/exfat/Makefile b/drivers/staging/exfat/Makefile index 84944dfbae28..6c90aec83feb 100644 --- a/drivers/staging/exfat/Makefile +++ b/drivers/staging/exfat/Makefile @@ -1,4 +1,4 @@ -# SPDX-License-Identifier: GPL-2.0 +# SPDX-License-Identifier: GPL-2.0-or-later obj-$(CONFIG_EXFAT_FS) += exfat.o diff --git a/drivers/staging/exfat/exfat.h b/drivers/staging/exfat/exfat.h index 6c12f2d79f4d..3abab33e932c 100644 --- a/drivers/staging/exfat/exfat.h +++ b/drivers/staging/exfat/exfat.h @@ -1,4 +1,4 @@ -/* SPDX-License-Identifier: GPL-2.0 */ +/* SPDX-License-Identifier: GPL-2.0-or-later */ /* * Copyright (C) 2012-2013 Samsung Electronics Co., Ltd. */ diff --git a/drivers/staging/exfat/exfat_blkdev.c b/drivers/staging/exfat/exfat_blkdev.c index f086c75e7076..81d20e6241c6 100644 --- a/drivers/staging/exfat/exfat_blkdev.c +++ b/drivers/staging/exfat/exfat_blkdev.c @@ -1,4 +1,4 @@ -// SPDX-License-Identifier: GPL-2.0 +// SPDX-License-Identifier: GPL-2.0-or-later /* * Copyright (C) 2012-2013 Samsung Electronics Co., Ltd. */ diff --git a/drivers/staging/exfat/exfat_cache.c b/drivers/staging/exfat/exfat_cache.c index 1565ce65d39f..e1b001718709 100644 --- a/drivers/staging/exfat/exfat_cache.c +++ b/drivers/staging/exfat/exfat_cache.c @@ -1,4 +1,4 @@ -// SPDX-License-Identifier: GPL-2.0 +// SPDX-License-Identifier: GPL-2.0-or-later /* * Copyright (C) 2012-2013 Samsung Electronics Co., Ltd. */ diff --git a/drivers/staging/exfat/exfat_core.c b/drivers/staging/exfat/exfat_core.c index b3e9cf725cf5..79174e5c4145 100644 --- a/drivers/staging/exfat/exfat_core.c +++ b/drivers/staging/exfat/exfat_core.c @@ -1,4 +1,4 @@ -// SPDX-License-Identifier: GPL-2.0 +// SPDX-License-Identifier: GPL-2.0-or-later /* * Copyright (C) 2012-2013 Samsung Electronics Co., Ltd. */ diff --git a/drivers/staging/exfat/exfat_nls.c b/drivers/staging/exfat/exfat_nls.c index 03cb8290b5d2..a5c4b68925fb 100644 --- a/drivers/staging/exfat/exfat_nls.c +++ b/drivers/staging/exfat/exfat_nls.c @@ -1,4 +1,4 @@ -// SPDX-License-Identifier: GPL-2.0 +// SPDX-License-Identifier: GPL-2.0-or-later /* * Copyright (C) 2012-2013 Samsung Electronics Co., Ltd. */ diff --git a/drivers/staging/exfat/exfat_super.c b/drivers/staging/exfat/exfat_super.c index 5f6caee819a6..229ecabe7a93 100644 --- a/drivers/staging/exfat/exfat_super.c +++ b/drivers/staging/exfat/exfat_super.c @@ -1,4 +1,4 @@ -// SPDX-License-Identifier: GPL-2.0 +// SPDX-License-Identifier: GPL-2.0-or-later /* * Copyright (C) 2012-2013 Samsung Electronics Co., Ltd. */ diff --git a/drivers/staging/exfat/exfat_upcase.c b/drivers/staging/exfat/exfat_upcase.c index 366082fb3dab..b91a1faa0e50 100644 --- a/drivers/staging/exfat/exfat_upcase.c +++ b/drivers/staging/exfat/exfat_upcase.c @@ -1,4 +1,4 @@ -// SPDX-License-Identifier: GPL-2.0 +// SPDX-License-Identifier: GPL-2.0-or-later /* * Copyright (C) 2012-2013 Samsung Electronics Co., Ltd. */ -- cgit v1.2.3 From 89d5f78fab48844d750f6f3a419e26f7f828bac8 Mon Sep 17 00:00:00 2001 From: Masanari Iida Date: Mon, 30 Sep 2019 22:05:04 +0900 Subject: staging: exfat: Fix a typo in Kconfig This patch fix a spelling typo in Kconfig. Signed-off-by: Masanari Iida Link: https://lore.kernel.org/r/20190930130504.21994-1-standby24x7@gmail.com Signed-off-by: Greg Kroah-Hartman --- drivers/staging/exfat/Kconfig | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/staging/exfat/Kconfig b/drivers/staging/exfat/Kconfig index 59e07afe249c..ce32dfe33bec 100644 --- a/drivers/staging/exfat/Kconfig +++ b/drivers/staging/exfat/Kconfig @@ -7,7 +7,7 @@ config EXFAT_FS This adds support for the exFAT file system. config EXFAT_DONT_MOUNT_VFAT - bool "Prohibit mounting of fat/vfat filesysems by exFAT" + bool "Prohibit mounting of fat/vfat filesystems by exFAT" depends on EXFAT_FS default y help -- cgit v1.2.3 From 7d4dea95f8281fc7f14766a6040e3504d3f658fc Mon Sep 17 00:00:00 2001 From: Geert Uytterhoeven Date: Thu, 19 Sep 2019 11:50:22 +0200 Subject: staging: octeon: Use "(uintptr_t)" to cast from pointer to int MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit On 32-bit: In file included from drivers/staging/octeon/octeon-ethernet.h:41, from drivers/staging/octeon/ethernet-tx.c:25: drivers/staging/octeon/octeon-stubs.h: In function ‘cvmx_phys_to_ptr’: drivers/staging/octeon/octeon-stubs.h:1205:9: warning: cast to pointer from integer of different size [-Wint-to-pointer-cast] return (void *)(physical_address); ^ drivers/staging/octeon/ethernet-tx.c: In function ‘cvm_oct_xmit’: drivers/staging/octeon/ethernet-tx.c:264:37: warning: cast from pointer to integer of different size [-Wpointer-to-int-cast] hw_buffer.s.addr = XKPHYS_TO_PHYS((u64)skb->data); ^ drivers/staging/octeon/octeon-stubs.h:2:30: note: in definition of macro ‘XKPHYS_TO_PHYS’ #define XKPHYS_TO_PHYS(p) (p) ^ drivers/staging/octeon/ethernet-tx.c:268:37: warning: cast from pointer to integer of different size [-Wpointer-to-int-cast] hw_buffer.s.addr = XKPHYS_TO_PHYS((u64)skb->data); ^ drivers/staging/octeon/octeon-stubs.h:2:30: note: in definition of macro ‘XKPHYS_TO_PHYS’ #define XKPHYS_TO_PHYS(p) (p) ^ drivers/staging/octeon/ethernet-tx.c:276:20: warning: cast from pointer to integer of different size [-Wpointer-to-int-cast] XKPHYS_TO_PHYS((u64)skb_frag_address(fs)); ^ drivers/staging/octeon/octeon-stubs.h:2:30: note: in definition of macro ‘XKPHYS_TO_PHYS’ #define XKPHYS_TO_PHYS(p) (p) ^ drivers/staging/octeon/ethernet-tx.c:280:37: warning: cast from pointer to integer of different size [-Wpointer-to-int-cast] hw_buffer.s.addr = XKPHYS_TO_PHYS((u64)CVM_OCT_SKB_CB(skb)); ^ drivers/staging/octeon/octeon-stubs.h:2:30: note: in definition of macro ‘XKPHYS_TO_PHYS’ #define XKPHYS_TO_PHYS(p) (p) ^ Fix this by replacing casts to "u64" by casts to "uintptr_t", which is either 32-bit or 64-bit, and adding an intermediate cast to "uintptr_t" where needed. Exposed by commit 171a9bae68c72f2d ("staging/octeon: Allow test build on !MIPS"). Signed-off-by: Geert Uytterhoeven Link: https://lore.kernel.org/r/20190919095022.29099-1-geert@linux-m68k.org Signed-off-by: Greg Kroah-Hartman --- drivers/staging/octeon/ethernet-tx.c | 9 +++++---- drivers/staging/octeon/octeon-stubs.h | 2 +- 2 files changed, 6 insertions(+), 5 deletions(-) diff --git a/drivers/staging/octeon/ethernet-tx.c b/drivers/staging/octeon/ethernet-tx.c index c64728fc21f2..7021ff07ba2a 100644 --- a/drivers/staging/octeon/ethernet-tx.c +++ b/drivers/staging/octeon/ethernet-tx.c @@ -261,11 +261,11 @@ int cvm_oct_xmit(struct sk_buff *skb, struct net_device *dev) /* Build the PKO buffer pointer */ hw_buffer.u64 = 0; if (skb_shinfo(skb)->nr_frags == 0) { - hw_buffer.s.addr = XKPHYS_TO_PHYS((u64)skb->data); + hw_buffer.s.addr = XKPHYS_TO_PHYS((uintptr_t)skb->data); hw_buffer.s.pool = 0; hw_buffer.s.size = skb->len; } else { - hw_buffer.s.addr = XKPHYS_TO_PHYS((u64)skb->data); + hw_buffer.s.addr = XKPHYS_TO_PHYS((uintptr_t)skb->data); hw_buffer.s.pool = 0; hw_buffer.s.size = skb_headlen(skb); CVM_OCT_SKB_CB(skb)[0] = hw_buffer.u64; @@ -273,11 +273,12 @@ int cvm_oct_xmit(struct sk_buff *skb, struct net_device *dev) skb_frag_t *fs = skb_shinfo(skb)->frags + i; hw_buffer.s.addr = - XKPHYS_TO_PHYS((u64)skb_frag_address(fs)); + XKPHYS_TO_PHYS((uintptr_t)skb_frag_address(fs)); hw_buffer.s.size = skb_frag_size(fs); CVM_OCT_SKB_CB(skb)[i + 1] = hw_buffer.u64; } - hw_buffer.s.addr = XKPHYS_TO_PHYS((u64)CVM_OCT_SKB_CB(skb)); + hw_buffer.s.addr = + XKPHYS_TO_PHYS((uintptr_t)CVM_OCT_SKB_CB(skb)); hw_buffer.s.size = skb_shinfo(skb)->nr_frags + 1; pko_command.s.segs = skb_shinfo(skb)->nr_frags + 1; pko_command.s.gather = 1; diff --git a/drivers/staging/octeon/octeon-stubs.h b/drivers/staging/octeon/octeon-stubs.h index a4ac3bfb62a8..b78ce9eaab85 100644 --- a/drivers/staging/octeon/octeon-stubs.h +++ b/drivers/staging/octeon/octeon-stubs.h @@ -1202,7 +1202,7 @@ static inline int cvmx_wqe_get_grp(cvmx_wqe_t *work) static inline void *cvmx_phys_to_ptr(uint64_t physical_address) { - return (void *)(physical_address); + return (void *)(uintptr_t)(physical_address); } static inline uint64_t cvmx_ptr_to_phys(void *ptr) -- cgit v1.2.3 From 63f2b1677fba11c5bd02089f25c13421948905f5 Mon Sep 17 00:00:00 2001 From: Noralf Trønnes Date: Tue, 17 Sep 2019 19:18:41 +0200 Subject: staging/fbtft: Depend on OF MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Commit c440eee1a7a1 ("Staging: fbtft: Switch to the gpio descriptor interface") removed setting gpios via platform data. This means that fbtft will now only work with Device Tree so set the dependency. This also prevents a NULL pointer deref on non-DT platform because fbtftops.request_gpios is not set in that case anymore. Fixes: c440eee1a7a1 ("Staging: fbtft: Switch to the gpio descriptor interface") Cc: stable Signed-off-by: Noralf Trønnes Link: https://lore.kernel.org/r/20190917171843.10334-1-noralf@tronnes.org Signed-off-by: Greg Kroah-Hartman --- drivers/staging/fbtft/Kconfig | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/staging/fbtft/Kconfig b/drivers/staging/fbtft/Kconfig index 8ec524a95ec8..4e5d860fd788 100644 --- a/drivers/staging/fbtft/Kconfig +++ b/drivers/staging/fbtft/Kconfig @@ -1,7 +1,7 @@ # SPDX-License-Identifier: GPL-2.0 menuconfig FB_TFT tristate "Support for small TFT LCD display modules" - depends on FB && SPI + depends on FB && SPI && OF depends on GPIOLIB || COMPILE_TEST select FB_SYS_FILLRECT select FB_SYS_COPYAREA -- cgit v1.2.3 From 2962db71c7030868b6859d09b2138b55297df95e Mon Sep 17 00:00:00 2001 From: Noralf Trønnes Date: Tue, 17 Sep 2019 19:18:42 +0200 Subject: staging/fbtft: Remove fbtft_device MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Commit c440eee1a7a1 ("Staging: fbtft: Switch to the gpio descriptor interface") removed the gpio code from fbtft_device rendering it useless. fbtft_device is a module that was used on the Raspberry Pi to dynamically add fbtft devices when the Pi didn't have Device Tree support. Just remove the module since it's the responsibility of Device Tree, ACPI or platform code to add devices. Fixes: c440eee1a7a1 ("Staging: fbtft: Switch to the gpio descriptor interface") Signed-off-by: Noralf Trønnes Link: https://lore.kernel.org/r/20190917171843.10334-2-noralf@tronnes.org Signed-off-by: Greg Kroah-Hartman --- drivers/staging/fbtft/Kconfig | 4 - drivers/staging/fbtft/Makefile | 3 - drivers/staging/fbtft/fbtft_device.c | 1261 ---------------------------------- 3 files changed, 1268 deletions(-) delete mode 100644 drivers/staging/fbtft/fbtft_device.c diff --git a/drivers/staging/fbtft/Kconfig b/drivers/staging/fbtft/Kconfig index 4e5d860fd788..4c37d087e25c 100644 --- a/drivers/staging/fbtft/Kconfig +++ b/drivers/staging/fbtft/Kconfig @@ -205,7 +205,3 @@ config FB_FLEX depends on FB_TFT help Generic Framebuffer support for TFT LCD displays. - -config FB_TFT_FBTFT_DEVICE - tristate "Module to for adding FBTFT devices" - depends on FB_TFT diff --git a/drivers/staging/fbtft/Makefile b/drivers/staging/fbtft/Makefile index 6bc03311c9c7..925ec17e318d 100644 --- a/drivers/staging/fbtft/Makefile +++ b/drivers/staging/fbtft/Makefile @@ -37,6 +37,3 @@ obj-$(CONFIG_FB_TFT_UC1701) += fb_uc1701.o obj-$(CONFIG_FB_TFT_UPD161704) += fb_upd161704.o obj-$(CONFIG_FB_TFT_WATTEROTT) += fb_watterott.o obj-$(CONFIG_FB_FLEX) += flexfb.o - -# Device modules -obj-$(CONFIG_FB_TFT_FBTFT_DEVICE) += fbtft_device.o diff --git a/drivers/staging/fbtft/fbtft_device.c b/drivers/staging/fbtft/fbtft_device.c deleted file mode 100644 index 44e1410eb3fe..000000000000 --- a/drivers/staging/fbtft/fbtft_device.c +++ /dev/null @@ -1,1261 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0+ -/* - * - * Copyright (C) 2013, Noralf Tronnes - */ - -#define pr_fmt(fmt) "fbtft_device: " fmt -#include -#include -#include -#include -#include -#include

; if - * found, dentry is grabbed and passed to caller via *. - * If no such element exists, the anchor of list is returned - * and * is set to NULL. + * found, dentry is grabbed and returned to caller. + * If no such element exists, NULL is returned. */ -static struct list_head *scan_positives(struct dentry *cursor, +static struct dentry *scan_positives(struct dentry *cursor, struct list_head *p, loff_t count, - struct dentry **res) + struct dentry *last) { struct dentry *dentry = cursor->d_parent, *found = NULL; @@ -127,9 +126,8 @@ static struct list_head *scan_positives(struct dentry *cursor, } } spin_unlock(&dentry->d_lock); - dput(*res); - *res = found; - return p; + dput(last); + return found; } loff_t dcache_dir_lseek(struct file *file, loff_t offset, int whence) @@ -149,25 +147,22 @@ loff_t dcache_dir_lseek(struct file *file, loff_t offset, int whence) if (offset != file->f_pos) { struct dentry *cursor = file->private_data; struct dentry *to = NULL; - struct list_head *p; - file->f_pos = offset; inode_lock_shared(dentry->d_inode); - if (file->f_pos > 2) { - p = scan_positives(cursor, &dentry->d_subdirs, - file->f_pos - 2, &to); - spin_lock(&dentry->d_lock); - list_move(&cursor->d_child, p); - spin_unlock(&dentry->d_lock); - } else { - spin_lock(&dentry->d_lock); + if (offset > 2) + to = scan_positives(cursor, &dentry->d_subdirs, + offset - 2, NULL); + spin_lock(&dentry->d_lock); + if (to) + list_move(&cursor->d_child, &to->d_child); + else list_del_init(&cursor->d_child); - spin_unlock(&dentry->d_lock); - } - + spin_unlock(&dentry->d_lock); dput(to); + file->f_pos = offset; + inode_unlock_shared(dentry->d_inode); } return offset; @@ -199,17 +194,23 @@ int dcache_readdir(struct file *file, struct dir_context *ctx) if (ctx->pos == 2) p = anchor; - else + else if (!list_empty(&cursor->d_child)) p = &cursor->d_child; + else + return 0; - while ((p = scan_positives(cursor, p, 1, &next)) != anchor) { + while ((next = scan_positives(cursor, p, 1, next)) != NULL) { if (!dir_emit(ctx, next->d_name.name, next->d_name.len, d_inode(next)->i_ino, dt_type(d_inode(next)))) break; ctx->pos++; + p = &next->d_child; } spin_lock(&dentry->d_lock); - list_move_tail(&cursor->d_child, p); + if (next) + list_move_tail(&cursor->d_child, &next->d_child); + else + list_del_init(&cursor->d_child); spin_unlock(&dentry->d_lock); dput(next); -- cgit v1.2.3 From e0ae2c578d3909e60e9448207f5d83f785f1129f Mon Sep 17 00:00:00 2001 From: Daniele Palmas Date: Wed, 9 Oct 2019 11:07:18 +0200 Subject: net: usb: qmi_wwan: add Telit 0x1050 composition MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit This patch adds support for Telit FN980 0x1050 composition 0x1050: tty, adb, rmnet, tty, tty, tty, tty Signed-off-by: Daniele Palmas Acked-by: Bjørn Mork Signed-off-by: Jakub Kicinski --- drivers/net/usb/qmi_wwan.c | 1 + 1 file changed, 1 insertion(+) diff --git a/drivers/net/usb/qmi_wwan.c b/drivers/net/usb/qmi_wwan.c index 3d77cd402ba9..596428ec71df 100644 --- a/drivers/net/usb/qmi_wwan.c +++ b/drivers/net/usb/qmi_wwan.c @@ -1327,6 +1327,7 @@ static const struct usb_device_id products[] = { {QMI_FIXED_INTF(0x2357, 0x0201, 4)}, /* TP-LINK HSUPA Modem MA180 */ {QMI_FIXED_INTF(0x2357, 0x9000, 4)}, /* TP-LINK MA260 */ {QMI_QUIRK_SET_DTR(0x1bc7, 0x1040, 2)}, /* Telit LE922A */ + {QMI_QUIRK_SET_DTR(0x1bc7, 0x1050, 2)}, /* Telit FN980 */ {QMI_FIXED_INTF(0x1bc7, 0x1100, 3)}, /* Telit ME910 */ {QMI_FIXED_INTF(0x1bc7, 0x1101, 3)}, /* Telit ME910 dual modem */ {QMI_FIXED_INTF(0x1bc7, 0x1200, 5)}, /* Telit LE920 */ -- cgit v1.2.3 From 79a85e214d62da9a750cc63ef49483e62abbda81 Mon Sep 17 00:00:00 2001 From: Keith Busch Date: Thu, 10 Oct 2019 00:38:13 +0900 Subject: null_blk: Fix zoned command return code The return code from null_handle_zoned() sets the cmd->error value. Returning OK status when an error occured overwrites the intended cmd->error. Return the appropriate error code instead of setting the error in the cmd. Fixes: fceb5d1b19cbe626 ("null_blk: create a helper for zoned devices") Cc: Chaitanya Kulkarni Signed-off-by: Keith Busch Signed-off-by: Jens Axboe --- drivers/block/null_blk_zoned.c | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/drivers/block/null_blk_zoned.c b/drivers/block/null_blk_zoned.c index eabc116832a7..3d7fdea872f8 100644 --- a/drivers/block/null_blk_zoned.c +++ b/drivers/block/null_blk_zoned.c @@ -142,8 +142,7 @@ static blk_status_t null_zone_reset(struct nullb_cmd *cmd, sector_t sector) zone->wp = zone->start; break; default: - cmd->error = BLK_STS_NOTSUPP; - break; + return BLK_STS_NOTSUPP; } return BLK_STS_OK; } -- cgit v1.2.3 From 993e4c929a073595d22c85f59082f0c387e31c21 Mon Sep 17 00:00:00 2001 From: Nicolas Dichtel Date: Wed, 9 Oct 2019 11:19:10 +0200 Subject: netns: fix NLM_F_ECHO mechanism for RTM_NEWNSID The flag NLM_F_ECHO aims to reply to the user the message notified to all listeners. It was not the case with the command RTM_NEWNSID, let's fix this. Fixes: 0c7aecd4bde4 ("netns: add rtnl cmd to add and get peer netns ids") Reported-by: Guillaume Nault Signed-off-by: Nicolas Dichtel Acked-by: Guillaume Nault Tested-by: Guillaume Nault Signed-off-by: Jakub Kicinski --- net/core/net_namespace.c | 17 +++++++++++------ 1 file changed, 11 insertions(+), 6 deletions(-) diff --git a/net/core/net_namespace.c b/net/core/net_namespace.c index a0e0d298c991..6d3e4821b02d 100644 --- a/net/core/net_namespace.c +++ b/net/core/net_namespace.c @@ -245,7 +245,8 @@ static int __peernet2id(struct net *net, struct net *peer) return __peernet2id_alloc(net, peer, &no); } -static void rtnl_net_notifyid(struct net *net, int cmd, int id); +static void rtnl_net_notifyid(struct net *net, int cmd, int id, u32 portid, + struct nlmsghdr *nlh); /* This function returns the id of a peer netns. If no id is assigned, one will * be allocated and returned. */ @@ -268,7 +269,7 @@ int peernet2id_alloc(struct net *net, struct net *peer) id = __peernet2id_alloc(net, peer, &alloc); spin_unlock_bh(&net->nsid_lock); if (alloc && id >= 0) - rtnl_net_notifyid(net, RTM_NEWNSID, id); + rtnl_net_notifyid(net, RTM_NEWNSID, id, 0, NULL); if (alive) put_net(peer); return id; @@ -532,7 +533,7 @@ static void unhash_nsid(struct net *net, struct net *last) idr_remove(&tmp->netns_ids, id); spin_unlock_bh(&tmp->nsid_lock); if (id >= 0) - rtnl_net_notifyid(tmp, RTM_DELNSID, id); + rtnl_net_notifyid(tmp, RTM_DELNSID, id, 0, NULL); if (tmp == last) break; } @@ -764,7 +765,8 @@ static int rtnl_net_newid(struct sk_buff *skb, struct nlmsghdr *nlh, err = alloc_netid(net, peer, nsid); spin_unlock_bh(&net->nsid_lock); if (err >= 0) { - rtnl_net_notifyid(net, RTM_NEWNSID, err); + rtnl_net_notifyid(net, RTM_NEWNSID, err, NETLINK_CB(skb).portid, + nlh); err = 0; } else if (err == -ENOSPC && nsid >= 0) { err = -EEXIST; @@ -1051,9 +1053,12 @@ end: return err < 0 ? err : skb->len; } -static void rtnl_net_notifyid(struct net *net, int cmd, int id) +static void rtnl_net_notifyid(struct net *net, int cmd, int id, u32 portid, + struct nlmsghdr *nlh) { struct net_fill_args fillargs = { + .portid = portid, + .seq = nlh ? nlh->nlmsg_seq : 0, .cmd = cmd, .nsid = id, }; @@ -1068,7 +1073,7 @@ static void rtnl_net_notifyid(struct net *net, int cmd, int id) if (err < 0) goto err_out; - rtnl_notify(msg, net, 0, RTNLGRP_NSID, NULL, 0); + rtnl_notify(msg, net, portid, RTNLGRP_NSID, nlh, 0); return; err_out: -- cgit v1.2.3 From e37542ba111f3974dc622ae0a21c1787318de500 Mon Sep 17 00:00:00 2001 From: Eric Dumazet Date: Wed, 9 Oct 2019 09:19:13 -0700 Subject: netfilter: conntrack: avoid possible false sharing As hinted by KCSAN, we need at least one READ_ONCE() to prevent a compiler optimization. More details on : https://github.com/google/ktsan/wiki/READ_ONCE-and-WRITE_ONCE#it-may-improve-performance sysbot report : BUG: KCSAN: data-race in __nf_ct_refresh_acct / __nf_ct_refresh_acct read to 0xffff888123eb4f08 of 4 bytes by interrupt on cpu 0: __nf_ct_refresh_acct+0xd4/0x1b0 net/netfilter/nf_conntrack_core.c:1796 nf_ct_refresh_acct include/net/netfilter/nf_conntrack.h:201 [inline] nf_conntrack_tcp_packet+0xd40/0x3390 net/netfilter/nf_conntrack_proto_tcp.c:1161 nf_conntrack_handle_packet net/netfilter/nf_conntrack_core.c:1633 [inline] nf_conntrack_in+0x410/0xaa0 net/netfilter/nf_conntrack_core.c:1727 ipv4_conntrack_in+0x27/0x40 net/netfilter/nf_conntrack_proto.c:178 nf_hook_entry_hookfn include/linux/netfilter.h:135 [inline] nf_hook_slow+0x83/0x160 net/netfilter/core.c:512 nf_hook include/linux/netfilter.h:260 [inline] NF_HOOK include/linux/netfilter.h:303 [inline] ip_rcv+0x12f/0x1a0 net/ipv4/ip_input.c:523 __netif_receive_skb_one_core+0xa7/0xe0 net/core/dev.c:5004 __netif_receive_skb+0x37/0xf0 net/core/dev.c:5118 netif_receive_skb_internal+0x59/0x190 net/core/dev.c:5208 napi_skb_finish net/core/dev.c:5671 [inline] napi_gro_receive+0x28f/0x330 net/core/dev.c:5704 receive_buf+0x284/0x30b0 drivers/net/virtio_net.c:1061 virtnet_receive drivers/net/virtio_net.c:1323 [inline] virtnet_poll+0x436/0x7d0 drivers/net/virtio_net.c:1428 napi_poll net/core/dev.c:6352 [inline] net_rx_action+0x3ae/0xa50 net/core/dev.c:6418 __do_softirq+0x115/0x33f kernel/softirq.c:292 write to 0xffff888123eb4f08 of 4 bytes by task 7191 on cpu 1: __nf_ct_refresh_acct+0xfb/0x1b0 net/netfilter/nf_conntrack_core.c:1797 nf_ct_refresh_acct include/net/netfilter/nf_conntrack.h:201 [inline] nf_conntrack_tcp_packet+0xd40/0x3390 net/netfilter/nf_conntrack_proto_tcp.c:1161 nf_conntrack_handle_packet net/netfilter/nf_conntrack_core.c:1633 [inline] nf_conntrack_in+0x410/0xaa0 net/netfilter/nf_conntrack_core.c:1727 ipv4_conntrack_local+0xbe/0x130 net/netfilter/nf_conntrack_proto.c:200 nf_hook_entry_hookfn include/linux/netfilter.h:135 [inline] nf_hook_slow+0x83/0x160 net/netfilter/core.c:512 nf_hook include/linux/netfilter.h:260 [inline] __ip_local_out+0x1f7/0x2b0 net/ipv4/ip_output.c:114 ip_local_out+0x31/0x90 net/ipv4/ip_output.c:123 __ip_queue_xmit+0x3a8/0xa40 net/ipv4/ip_output.c:532 ip_queue_xmit+0x45/0x60 include/net/ip.h:236 __tcp_transmit_skb+0xdeb/0x1cd0 net/ipv4/tcp_output.c:1158 __tcp_send_ack+0x246/0x300 net/ipv4/tcp_output.c:3685 tcp_send_ack+0x34/0x40 net/ipv4/tcp_output.c:3691 tcp_cleanup_rbuf+0x130/0x360 net/ipv4/tcp.c:1575 Reported by Kernel Concurrency Sanitizer on: CPU: 1 PID: 7191 Comm: syz-fuzzer Not tainted 5.3.0+ #0 Hardware name: Google Google Compute Engine/Google Compute Engine, BIOS Google 01/01/2011 Fixes: cc16921351d8 ("netfilter: conntrack: avoid same-timeout update") Signed-off-by: Eric Dumazet Reported-by: syzbot Cc: Jozsef Kadlecsik Cc: Florian Westphal Acked-by: Pablo Neira Ayuso Signed-off-by: Jakub Kicinski --- net/netfilter/nf_conntrack_core.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/net/netfilter/nf_conntrack_core.c b/net/netfilter/nf_conntrack_core.c index 0c63120b2db2..5cd610b547e0 100644 --- a/net/netfilter/nf_conntrack_core.c +++ b/net/netfilter/nf_conntrack_core.c @@ -1792,8 +1792,8 @@ void __nf_ct_refresh_acct(struct nf_conn *ct, if (nf_ct_is_confirmed(ct)) extra_jiffies += nfct_time_stamp; - if (ct->timeout != extra_jiffies) - ct->timeout = extra_jiffies; + if (READ_ONCE(ct->timeout) != extra_jiffies) + WRITE_ONCE(ct->timeout, extra_jiffies); acct: if (do_acct) nf_ct_acct_update(ct, ctinfo, skb->len); -- cgit v1.2.3 From 4ffdd22e49f47db543906d75453a0048a53071ab Mon Sep 17 00:00:00 2001 From: Eric Dumazet Date: Wed, 9 Oct 2019 09:20:02 -0700 Subject: tun: remove possible false sharing in tun_flow_update() As mentioned in https://github.com/google/ktsan/wiki/READ_ONCE-and-WRITE_ONCE#it-may-improve-performance a C compiler can legally transform if (e->queue_index != queue_index) e->queue_index = queue_index; to : e->queue_index = queue_index; Note that the code using jiffies has no issue, since jiffies has volatile attribute. if (e->updated != jiffies) e->updated = jiffies; Fixes: 83b1bc122cab ("tun: align write-heavy flow entry members to a cache line") Signed-off-by: Eric Dumazet Cc: Zhang Yu Cc: Wang Li Cc: Li RongQing Acked-by: Jason Wang Signed-off-by: Jakub Kicinski --- drivers/net/tun.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/net/tun.c b/drivers/net/tun.c index 812dc3a65efb..a8d3141582a5 100644 --- a/drivers/net/tun.c +++ b/drivers/net/tun.c @@ -526,8 +526,8 @@ static void tun_flow_update(struct tun_struct *tun, u32 rxhash, e = tun_flow_find(head, rxhash); if (likely(e)) { /* TODO: keep queueing to old queue until it's empty? */ - if (e->queue_index != queue_index) - e->queue_index = queue_index; + if (READ_ONCE(e->queue_index) != queue_index) + WRITE_ONCE(e->queue_index, queue_index); if (e->updated != jiffies) e->updated = jiffies; sock_rps_record_flow_hash(e->rps_rxhash); -- cgit v1.2.3 From 503978aca46124cd714703e180b9c8292ba50ba7 Mon Sep 17 00:00:00 2001 From: Eric Dumazet Date: Wed, 9 Oct 2019 12:55:53 -0700 Subject: net: avoid possible false sharing in sk_leave_memory_pressure() As mentioned in https://github.com/google/ktsan/wiki/READ_ONCE-and-WRITE_ONCE#it-may-improve-performance a C compiler can legally transform : if (memory_pressure && *memory_pressure) *memory_pressure = 0; to : if (memory_pressure) *memory_pressure = 0; Fixes: 0604475119de ("tcp: add TCPMemoryPressuresChrono counter") Fixes: 180d8cd942ce ("foundations of per-cgroup memory pressure controlling.") Fixes: 3ab224be6d69 ("[NET] CORE: Introducing new memory accounting interface.") Signed-off-by: Eric Dumazet Signed-off-by: Jakub Kicinski --- net/core/sock.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/net/core/sock.c b/net/core/sock.c index fac2b4d80de5..50647a10fdb7 100644 --- a/net/core/sock.c +++ b/net/core/sock.c @@ -2334,8 +2334,8 @@ static void sk_leave_memory_pressure(struct sock *sk) } else { unsigned long *memory_pressure = sk->sk_prot->memory_pressure; - if (memory_pressure && *memory_pressure) - *memory_pressure = 0; + if (memory_pressure && READ_ONCE(*memory_pressure)) + WRITE_ONCE(*memory_pressure, 0); } } -- cgit v1.2.3 From 60b173ca3d1cd1782bd0096dc17298ec242f6fb1 Mon Sep 17 00:00:00 2001 From: Eric Dumazet Date: Wed, 9 Oct 2019 14:51:20 -0700 Subject: net: add {READ|WRITE}_ONCE() annotations on ->rskq_accept_head reqsk_queue_empty() is called from inet_csk_listen_poll() while other cpus might write ->rskq_accept_head value. Use {READ|WRITE}_ONCE() to avoid compiler tricks and potential KCSAN splats. Fixes: fff1f3001cc5 ("tcp: add a spinlock to protect struct request_sock_queue") Signed-off-by: Eric Dumazet Signed-off-by: Jakub Kicinski --- drivers/xen/pvcalls-back.c | 2 +- include/net/request_sock.h | 4 ++-- net/ipv4/inet_connection_sock.c | 2 +- 3 files changed, 4 insertions(+), 4 deletions(-) diff --git a/drivers/xen/pvcalls-back.c b/drivers/xen/pvcalls-back.c index 69a626b0e594..c57c71b7d53d 100644 --- a/drivers/xen/pvcalls-back.c +++ b/drivers/xen/pvcalls-back.c @@ -775,7 +775,7 @@ static int pvcalls_back_poll(struct xenbus_device *dev, mappass->reqcopy = *req; icsk = inet_csk(mappass->sock->sk); queue = &icsk->icsk_accept_queue; - data = queue->rskq_accept_head != NULL; + data = READ_ONCE(queue->rskq_accept_head) != NULL; if (data) { mappass->reqcopy.cmd = 0; ret = 0; diff --git a/include/net/request_sock.h b/include/net/request_sock.h index fd178d58fa84..cf8b33213bbc 100644 --- a/include/net/request_sock.h +++ b/include/net/request_sock.h @@ -185,7 +185,7 @@ void reqsk_fastopen_remove(struct sock *sk, struct request_sock *req, static inline bool reqsk_queue_empty(const struct request_sock_queue *queue) { - return queue->rskq_accept_head == NULL; + return READ_ONCE(queue->rskq_accept_head) == NULL; } static inline struct request_sock *reqsk_queue_remove(struct request_sock_queue *queue, @@ -197,7 +197,7 @@ static inline struct request_sock *reqsk_queue_remove(struct request_sock_queue req = queue->rskq_accept_head; if (req) { sk_acceptq_removed(parent); - queue->rskq_accept_head = req->dl_next; + WRITE_ONCE(queue->rskq_accept_head, req->dl_next); if (queue->rskq_accept_head == NULL) queue->rskq_accept_tail = NULL; } diff --git a/net/ipv4/inet_connection_sock.c b/net/ipv4/inet_connection_sock.c index a9183543ca30..dbcf34ec8dd2 100644 --- a/net/ipv4/inet_connection_sock.c +++ b/net/ipv4/inet_connection_sock.c @@ -934,7 +934,7 @@ struct sock *inet_csk_reqsk_queue_add(struct sock *sk, req->sk = child; req->dl_next = NULL; if (queue->rskq_accept_head == NULL) - queue->rskq_accept_head = req; + WRITE_ONCE(queue->rskq_accept_head, req); else queue->rskq_accept_tail->dl_next = req; queue->rskq_accept_tail = req; -- cgit v1.2.3 From 1f142c17d19a5618d5a633195a46f2c8be9bf232 Mon Sep 17 00:00:00 2001 From: Eric Dumazet Date: Wed, 9 Oct 2019 15:10:15 -0700 Subject: tcp: annotate lockless access to tcp_memory_pressure tcp_memory_pressure is read without holding any lock, and its value could be changed on other cpus. Use READ_ONCE() to annotate these lockless reads. The write side is already using atomic ops. Fixes: b8da51ebb1aa ("tcp: introduce tcp_under_memory_pressure()") Signed-off-by: Eric Dumazet Signed-off-by: Jakub Kicinski --- include/net/tcp.h | 2 +- net/ipv4/tcp.c | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/include/net/tcp.h b/include/net/tcp.h index c9a3f9688223..88e63d64c698 100644 --- a/include/net/tcp.h +++ b/include/net/tcp.h @@ -258,7 +258,7 @@ static inline bool tcp_under_memory_pressure(const struct sock *sk) mem_cgroup_under_socket_pressure(sk->sk_memcg)) return true; - return tcp_memory_pressure; + return READ_ONCE(tcp_memory_pressure); } /* * The next routines deal with comparing 32 bit unsigned ints diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c index f98a1882e537..888c92b63f5a 100644 --- a/net/ipv4/tcp.c +++ b/net/ipv4/tcp.c @@ -326,7 +326,7 @@ void tcp_enter_memory_pressure(struct sock *sk) { unsigned long val; - if (tcp_memory_pressure) + if (READ_ONCE(tcp_memory_pressure)) return; val = jiffies; @@ -341,7 +341,7 @@ void tcp_leave_memory_pressure(struct sock *sk) { unsigned long val; - if (!tcp_memory_pressure) + if (!READ_ONCE(tcp_memory_pressure)) return; val = xchg(&tcp_memory_pressure, 0); if (val) -- cgit v1.2.3 From 8265792bf8871acc2d00fd03883d830e2249d395 Mon Sep 17 00:00:00 2001 From: Eric Dumazet Date: Wed, 9 Oct 2019 15:21:13 -0700 Subject: net: silence KCSAN warnings around sk_add_backlog() calls sk_add_backlog() callers usually read sk->sk_rcvbuf without owning the socket lock. This means sk_rcvbuf value can be changed by other cpus, and KCSAN complains. Add READ_ONCE() annotations to document the lockless nature of these reads. Note that writes over sk_rcvbuf should also use WRITE_ONCE(), but this will be done in separate patches to ease stable backports (if we decide this is relevant for stable trees). BUG: KCSAN: data-race in tcp_add_backlog / tcp_recvmsg write to 0xffff88812ab369f8 of 8 bytes by interrupt on cpu 1: __sk_add_backlog include/net/sock.h:902 [inline] sk_add_backlog include/net/sock.h:933 [inline] tcp_add_backlog+0x45a/0xcc0 net/ipv4/tcp_ipv4.c:1737 tcp_v4_rcv+0x1aba/0x1bf0 net/ipv4/tcp_ipv4.c:1925 ip_protocol_deliver_rcu+0x51/0x470 net/ipv4/ip_input.c:204 ip_local_deliver_finish+0x110/0x140 net/ipv4/ip_input.c:231 NF_HOOK include/linux/netfilter.h:305 [inline] NF_HOOK include/linux/netfilter.h:299 [inline] ip_local_deliver+0x133/0x210 net/ipv4/ip_input.c:252 dst_input include/net/dst.h:442 [inline] ip_rcv_finish+0x121/0x160 net/ipv4/ip_input.c:413 NF_HOOK include/linux/netfilter.h:305 [inline] NF_HOOK include/linux/netfilter.h:299 [inline] ip_rcv+0x18f/0x1a0 net/ipv4/ip_input.c:523 __netif_receive_skb_one_core+0xa7/0xe0 net/core/dev.c:5004 __netif_receive_skb+0x37/0xf0 net/core/dev.c:5118 netif_receive_skb_internal+0x59/0x190 net/core/dev.c:5208 napi_skb_finish net/core/dev.c:5671 [inline] napi_gro_receive+0x28f/0x330 net/core/dev.c:5704 receive_buf+0x284/0x30b0 drivers/net/virtio_net.c:1061 virtnet_receive drivers/net/virtio_net.c:1323 [inline] virtnet_poll+0x436/0x7d0 drivers/net/virtio_net.c:1428 napi_poll net/core/dev.c:6352 [inline] net_rx_action+0x3ae/0xa50 net/core/dev.c:6418 read to 0xffff88812ab369f8 of 8 bytes by task 7271 on cpu 0: tcp_recvmsg+0x470/0x1a30 net/ipv4/tcp.c:2047 inet_recvmsg+0xbb/0x250 net/ipv4/af_inet.c:838 sock_recvmsg_nosec net/socket.c:871 [inline] sock_recvmsg net/socket.c:889 [inline] sock_recvmsg+0x92/0xb0 net/socket.c:885 sock_read_iter+0x15f/0x1e0 net/socket.c:967 call_read_iter include/linux/fs.h:1864 [inline] new_sync_read+0x389/0x4f0 fs/read_write.c:414 __vfs_read+0xb1/0xc0 fs/read_write.c:427 vfs_read fs/read_write.c:461 [inline] vfs_read+0x143/0x2c0 fs/read_write.c:446 ksys_read+0xd5/0x1b0 fs/read_write.c:587 __do_sys_read fs/read_write.c:597 [inline] __se_sys_read fs/read_write.c:595 [inline] __x64_sys_read+0x4c/0x60 fs/read_write.c:595 do_syscall_64+0xcf/0x2f0 arch/x86/entry/common.c:296 entry_SYSCALL_64_after_hwframe+0x44/0xa9 Reported by Kernel Concurrency Sanitizer on: CPU: 0 PID: 7271 Comm: syz-fuzzer Not tainted 5.3.0+ #0 Hardware name: Google Google Compute Engine/Google Compute Engine, BIOS Google 01/01/2011 Signed-off-by: Eric Dumazet Reported-by: syzbot Signed-off-by: Jakub Kicinski --- net/core/sock.c | 2 +- net/ipv4/tcp_ipv4.c | 2 +- net/llc/llc_conn.c | 2 +- net/sctp/input.c | 6 +++--- net/tipc/socket.c | 6 +++--- net/x25/x25_dev.c | 2 +- 6 files changed, 10 insertions(+), 10 deletions(-) diff --git a/net/core/sock.c b/net/core/sock.c index 50647a10fdb7..1cf06934da50 100644 --- a/net/core/sock.c +++ b/net/core/sock.c @@ -522,7 +522,7 @@ int __sk_receive_skb(struct sock *sk, struct sk_buff *skb, rc = sk_backlog_rcv(sk, skb); mutex_release(&sk->sk_lock.dep_map, 1, _RET_IP_); - } else if (sk_add_backlog(sk, skb, sk->sk_rcvbuf)) { + } else if (sk_add_backlog(sk, skb, READ_ONCE(sk->sk_rcvbuf))) { bh_unlock_sock(sk); atomic_inc(&sk->sk_drops); goto discard_and_relse; diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c index bf124b1742df..492bf6a6b023 100644 --- a/net/ipv4/tcp_ipv4.c +++ b/net/ipv4/tcp_ipv4.c @@ -1644,7 +1644,7 @@ int tcp_v4_early_demux(struct sk_buff *skb) bool tcp_add_backlog(struct sock *sk, struct sk_buff *skb) { - u32 limit = sk->sk_rcvbuf + sk->sk_sndbuf; + u32 limit = READ_ONCE(sk->sk_rcvbuf) + READ_ONCE(sk->sk_sndbuf); struct skb_shared_info *shinfo; const struct tcphdr *th; struct tcphdr *thtail; diff --git a/net/llc/llc_conn.c b/net/llc/llc_conn.c index a79b739eb223..7b620acaca9e 100644 --- a/net/llc/llc_conn.c +++ b/net/llc/llc_conn.c @@ -813,7 +813,7 @@ void llc_conn_handler(struct llc_sap *sap, struct sk_buff *skb) else { dprintk("%s: adding to backlog...\n", __func__); llc_set_backlog_type(skb, LLC_PACKET); - if (sk_add_backlog(sk, skb, sk->sk_rcvbuf)) + if (sk_add_backlog(sk, skb, READ_ONCE(sk->sk_rcvbuf))) goto drop_unlock; } out: diff --git a/net/sctp/input.c b/net/sctp/input.c index f2771375bfc0..2277981559d0 100644 --- a/net/sctp/input.c +++ b/net/sctp/input.c @@ -322,7 +322,7 @@ int sctp_backlog_rcv(struct sock *sk, struct sk_buff *skb) bh_lock_sock(sk); if (sock_owned_by_user(sk) || !sctp_newsk_ready(sk)) { - if (sk_add_backlog(sk, skb, sk->sk_rcvbuf)) + if (sk_add_backlog(sk, skb, READ_ONCE(sk->sk_rcvbuf))) sctp_chunk_free(chunk); else backloged = 1; @@ -337,7 +337,7 @@ int sctp_backlog_rcv(struct sock *sk, struct sk_buff *skb) return 0; } else { if (!sctp_newsk_ready(sk)) { - if (!sk_add_backlog(sk, skb, sk->sk_rcvbuf)) + if (!sk_add_backlog(sk, skb, READ_ONCE(sk->sk_rcvbuf))) return 0; sctp_chunk_free(chunk); } else { @@ -364,7 +364,7 @@ static int sctp_add_backlog(struct sock *sk, struct sk_buff *skb) struct sctp_ep_common *rcvr = chunk->rcvr; int ret; - ret = sk_add_backlog(sk, skb, sk->sk_rcvbuf); + ret = sk_add_backlog(sk, skb, READ_ONCE(sk->sk_rcvbuf)); if (!ret) { /* Hold the assoc/ep while hanging on the backlog queue. * This way, we know structures we need will not disappear diff --git a/net/tipc/socket.c b/net/tipc/socket.c index 3b9f8cc328f5..7c736cfec57f 100644 --- a/net/tipc/socket.c +++ b/net/tipc/socket.c @@ -2119,13 +2119,13 @@ static unsigned int rcvbuf_limit(struct sock *sk, struct sk_buff *skb) struct tipc_msg *hdr = buf_msg(skb); if (unlikely(msg_in_group(hdr))) - return sk->sk_rcvbuf; + return READ_ONCE(sk->sk_rcvbuf); if (unlikely(!msg_connected(hdr))) - return sk->sk_rcvbuf << msg_importance(hdr); + return READ_ONCE(sk->sk_rcvbuf) << msg_importance(hdr); if (likely(tsk->peer_caps & TIPC_BLOCK_FLOWCTL)) - return sk->sk_rcvbuf; + return READ_ONCE(sk->sk_rcvbuf); return FLOWCTL_MSG_LIM; } diff --git a/net/x25/x25_dev.c b/net/x25/x25_dev.c index 5c111bc3c8ea..00e782335cb0 100644 --- a/net/x25/x25_dev.c +++ b/net/x25/x25_dev.c @@ -55,7 +55,7 @@ static int x25_receive_data(struct sk_buff *skb, struct x25_neigh *nb) if (!sock_owned_by_user(sk)) { queued = x25_process_rx_frame(sk, skb); } else { - queued = !sk_add_backlog(sk, skb, sk->sk_rcvbuf); + queued = !sk_add_backlog(sk, skb, READ_ONCE(sk->sk_rcvbuf)); } bh_unlock_sock(sk); sock_put(sk); -- cgit v1.2.3 From eac66402d1c342f07ff38f8d631ff95eb7ad3220 Mon Sep 17 00:00:00 2001 From: Eric Dumazet Date: Wed, 9 Oct 2019 15:32:35 -0700 Subject: net: annotate sk->sk_rcvlowat lockless reads sock_rcvlowat() or int_sk_rcvlowat() might be called without the socket lock for example from tcp_poll(). Use READ_ONCE() to document the fact that other cpus might change sk->sk_rcvlowat under us and avoid KCSAN splats. Use WRITE_ONCE() on write sides too. Signed-off-by: Eric Dumazet Signed-off-by: Jakub Kicinski --- include/net/sock.h | 4 +++- net/core/filter.c | 2 +- net/core/sock.c | 2 +- net/ipv4/tcp.c | 2 +- net/sched/em_meta.c | 2 +- 5 files changed, 7 insertions(+), 5 deletions(-) diff --git a/include/net/sock.h b/include/net/sock.h index 2c53f1a1d905..79f54e1f8827 100644 --- a/include/net/sock.h +++ b/include/net/sock.h @@ -2271,7 +2271,9 @@ static inline long sock_sndtimeo(const struct sock *sk, bool noblock) static inline int sock_rcvlowat(const struct sock *sk, int waitall, int len) { - return (waitall ? len : min_t(int, sk->sk_rcvlowat, len)) ? : 1; + int v = waitall ? len : min_t(int, READ_ONCE(sk->sk_rcvlowat), len); + + return v ?: 1; } /* Alas, with timeout socket operations are not restartable. diff --git a/net/core/filter.c b/net/core/filter.c index ed6563622ce3..a50c0b6846f2 100644 --- a/net/core/filter.c +++ b/net/core/filter.c @@ -4274,7 +4274,7 @@ BPF_CALL_5(bpf_setsockopt, struct bpf_sock_ops_kern *, bpf_sock, case SO_RCVLOWAT: if (val < 0) val = INT_MAX; - sk->sk_rcvlowat = val ? : 1; + WRITE_ONCE(sk->sk_rcvlowat, val ? : 1); break; case SO_MARK: if (sk->sk_mark != val) { diff --git a/net/core/sock.c b/net/core/sock.c index 1cf06934da50..b7c5c6ea51ba 100644 --- a/net/core/sock.c +++ b/net/core/sock.c @@ -974,7 +974,7 @@ set_rcvbuf: if (sock->ops->set_rcvlowat) ret = sock->ops->set_rcvlowat(sk, val); else - sk->sk_rcvlowat = val ? : 1; + WRITE_ONCE(sk->sk_rcvlowat, val ? : 1); break; case SO_RCVTIMEO_OLD: diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c index 888c92b63f5a..8781a92ea4b6 100644 --- a/net/ipv4/tcp.c +++ b/net/ipv4/tcp.c @@ -1699,7 +1699,7 @@ int tcp_set_rcvlowat(struct sock *sk, int val) else cap = sock_net(sk)->ipv4.sysctl_tcp_rmem[2] >> 1; val = min(val, cap); - sk->sk_rcvlowat = val ? : 1; + WRITE_ONCE(sk->sk_rcvlowat, val ? : 1); /* Check if we need to signal EPOLLIN right now */ tcp_data_ready(sk); diff --git a/net/sched/em_meta.c b/net/sched/em_meta.c index 82bd14e7ac93..4c9122fc35c9 100644 --- a/net/sched/em_meta.c +++ b/net/sched/em_meta.c @@ -554,7 +554,7 @@ META_COLLECTOR(int_sk_rcvlowat) *err = -1; return; } - dst->value = sk->sk_rcvlowat; + dst->value = READ_ONCE(sk->sk_rcvlowat); } META_COLLECTOR(int_sk_rcvtimeo) -- cgit v1.2.3 From 70c2655849a25431f31b505a07fe0c861e5e41fb Mon Sep 17 00:00:00 2001 From: Eric Dumazet Date: Wed, 9 Oct 2019 15:41:03 -0700 Subject: net: silence KCSAN warnings about sk->sk_backlog.len reads sk->sk_backlog.len can be written by BH handlers, and read from process contexts in a lockless way. Note the write side should also use WRITE_ONCE() or a variant. We need some agreement about the best way to do this. syzbot reported : BUG: KCSAN: data-race in tcp_add_backlog / tcp_grow_window.isra.0 write to 0xffff88812665f32c of 4 bytes by interrupt on cpu 1: sk_add_backlog include/net/sock.h:934 [inline] tcp_add_backlog+0x4a0/0xcc0 net/ipv4/tcp_ipv4.c:1737 tcp_v4_rcv+0x1aba/0x1bf0 net/ipv4/tcp_ipv4.c:1925 ip_protocol_deliver_rcu+0x51/0x470 net/ipv4/ip_input.c:204 ip_local_deliver_finish+0x110/0x140 net/ipv4/ip_input.c:231 NF_HOOK include/linux/netfilter.h:305 [inline] NF_HOOK include/linux/netfilter.h:299 [inline] ip_local_deliver+0x133/0x210 net/ipv4/ip_input.c:252 dst_input include/net/dst.h:442 [inline] ip_rcv_finish+0x121/0x160 net/ipv4/ip_input.c:413 NF_HOOK include/linux/netfilter.h:305 [inline] NF_HOOK include/linux/netfilter.h:299 [inline] ip_rcv+0x18f/0x1a0 net/ipv4/ip_input.c:523 __netif_receive_skb_one_core+0xa7/0xe0 net/core/dev.c:5004 __netif_receive_skb+0x37/0xf0 net/core/dev.c:5118 netif_receive_skb_internal+0x59/0x190 net/core/dev.c:5208 napi_skb_finish net/core/dev.c:5671 [inline] napi_gro_receive+0x28f/0x330 net/core/dev.c:5704 receive_buf+0x284/0x30b0 drivers/net/virtio_net.c:1061 virtnet_receive drivers/net/virtio_net.c:1323 [inline] virtnet_poll+0x436/0x7d0 drivers/net/virtio_net.c:1428 napi_poll net/core/dev.c:6352 [inline] net_rx_action+0x3ae/0xa50 net/core/dev.c:6418 read to 0xffff88812665f32c of 4 bytes by task 7292 on cpu 0: tcp_space include/net/tcp.h:1373 [inline] tcp_grow_window.isra.0+0x6b/0x480 net/ipv4/tcp_input.c:413 tcp_event_data_recv+0x68f/0x990 net/ipv4/tcp_input.c:717 tcp_rcv_established+0xbfe/0xf50 net/ipv4/tcp_input.c:5618 tcp_v4_do_rcv+0x381/0x4e0 net/ipv4/tcp_ipv4.c:1542 sk_backlog_rcv include/net/sock.h:945 [inline] __release_sock+0x135/0x1e0 net/core/sock.c:2427 release_sock+0x61/0x160 net/core/sock.c:2943 tcp_recvmsg+0x63b/0x1a30 net/ipv4/tcp.c:2181 inet_recvmsg+0xbb/0x250 net/ipv4/af_inet.c:838 sock_recvmsg_nosec net/socket.c:871 [inline] sock_recvmsg net/socket.c:889 [inline] sock_recvmsg+0x92/0xb0 net/socket.c:885 sock_read_iter+0x15f/0x1e0 net/socket.c:967 call_read_iter include/linux/fs.h:1864 [inline] new_sync_read+0x389/0x4f0 fs/read_write.c:414 __vfs_read+0xb1/0xc0 fs/read_write.c:427 vfs_read fs/read_write.c:461 [inline] vfs_read+0x143/0x2c0 fs/read_write.c:446 Reported by Kernel Concurrency Sanitizer on: CPU: 0 PID: 7292 Comm: syz-fuzzer Not tainted 5.3.0+ #0 Hardware name: Google Google Compute Engine/Google Compute Engine, BIOS Google 01/01/2011 Signed-off-by: Eric Dumazet Reported-by: syzbot Signed-off-by: Jakub Kicinski --- include/net/tcp.h | 3 ++- net/core/sock.c | 2 +- net/sctp/diag.c | 2 +- net/tipc/socket.c | 2 +- 4 files changed, 5 insertions(+), 4 deletions(-) diff --git a/include/net/tcp.h b/include/net/tcp.h index 88e63d64c698..35f6f7e0fdc2 100644 --- a/include/net/tcp.h +++ b/include/net/tcp.h @@ -1380,7 +1380,8 @@ static inline int tcp_win_from_space(const struct sock *sk, int space) /* Note: caller must be prepared to deal with negative returns */ static inline int tcp_space(const struct sock *sk) { - return tcp_win_from_space(sk, sk->sk_rcvbuf - sk->sk_backlog.len - + return tcp_win_from_space(sk, sk->sk_rcvbuf - + READ_ONCE(sk->sk_backlog.len) - atomic_read(&sk->sk_rmem_alloc)); } diff --git a/net/core/sock.c b/net/core/sock.c index b7c5c6ea51ba..2a053999df11 100644 --- a/net/core/sock.c +++ b/net/core/sock.c @@ -3210,7 +3210,7 @@ void sk_get_meminfo(const struct sock *sk, u32 *mem) mem[SK_MEMINFO_FWD_ALLOC] = sk->sk_forward_alloc; mem[SK_MEMINFO_WMEM_QUEUED] = sk->sk_wmem_queued; mem[SK_MEMINFO_OPTMEM] = atomic_read(&sk->sk_omem_alloc); - mem[SK_MEMINFO_BACKLOG] = sk->sk_backlog.len; + mem[SK_MEMINFO_BACKLOG] = READ_ONCE(sk->sk_backlog.len); mem[SK_MEMINFO_DROPS] = atomic_read(&sk->sk_drops); } diff --git a/net/sctp/diag.c b/net/sctp/diag.c index fc9a4c6629ce..0851166b9175 100644 --- a/net/sctp/diag.c +++ b/net/sctp/diag.c @@ -175,7 +175,7 @@ static int inet_sctp_diag_fill(struct sock *sk, struct sctp_association *asoc, mem[SK_MEMINFO_FWD_ALLOC] = sk->sk_forward_alloc; mem[SK_MEMINFO_WMEM_QUEUED] = sk->sk_wmem_queued; mem[SK_MEMINFO_OPTMEM] = atomic_read(&sk->sk_omem_alloc); - mem[SK_MEMINFO_BACKLOG] = sk->sk_backlog.len; + mem[SK_MEMINFO_BACKLOG] = READ_ONCE(sk->sk_backlog.len); mem[SK_MEMINFO_DROPS] = atomic_read(&sk->sk_drops); if (nla_put(skb, INET_DIAG_SKMEMINFO, sizeof(mem), &mem) < 0) diff --git a/net/tipc/socket.c b/net/tipc/socket.c index 7c736cfec57f..f8bbc4aab213 100644 --- a/net/tipc/socket.c +++ b/net/tipc/socket.c @@ -3790,7 +3790,7 @@ int tipc_sk_dump(struct sock *sk, u16 dqueues, char *buf) i += scnprintf(buf + i, sz - i, " %d", sk->sk_sndbuf); i += scnprintf(buf + i, sz - i, " | %d", sk_rmem_alloc_get(sk)); i += scnprintf(buf + i, sz - i, " %d", sk->sk_rcvbuf); - i += scnprintf(buf + i, sz - i, " | %d\n", sk->sk_backlog.len); + i += scnprintf(buf + i, sz - i, " | %d\n", READ_ONCE(sk->sk_backlog.len)); if (dqueues & TIPC_DUMP_SK_SNDQ) { i += scnprintf(buf + i, sz - i, "sk_write_queue: "); -- cgit v1.2.3 From 05668e1d74b84c53fbe0f28565e4c9502a6b8a67 Mon Sep 17 00:00:00 2001 From: Halil Pasic Date: Mon, 30 Sep 2019 17:38:02 +0200 Subject: s390/cio: fix virtio-ccw DMA without PV Commit 37db8985b211 ("s390/cio: add basic protected virtualization support") breaks virtio-ccw devices with VIRTIO_F_IOMMU_PLATFORM for non Protected Virtualization (PV) guests. The problem is that the dma_mask of the ccw device, which is used by virtio core, gets changed from 64 to 31 bit, because some of the DMA allocations do require 31 bit addressable memory. For PV the only drawback is that some of the virtio structures must end up in ZONE_DMA because we have the bounce the buffers mapped via DMA API anyway. But for non PV guests we have a problem: because of the 31 bit mask guests bigger than 2G are likely to try bouncing buffers. The swiotlb however is only initialized for PV guests, because we don't want to bounce anything for non PV guests. The first such map kills the guest. Since the DMA API won't allow us to specify for each allocation whether we need memory from ZONE_DMA (31 bit addressable) or any DMA capable memory will do, let us use coherent_dma_mask (which is used for allocations) to force allocating form ZONE_DMA while changing dma_mask to DMA_BIT_MASK(64) so that at least the streaming API will regard the whole memory DMA capable. Signed-off-by: Halil Pasic Reported-by: Marc Hartmayer Suggested-by: Robin Murphy Fixes: 37db8985b211 ("s390/cio: add basic protected virtualization support") Link: https://lore.kernel.org/lkml/20190930153803.7958-1-pasic@linux.ibm.com Reviewed-by: Christoph Hellwig Reviewed-by: Cornelia Huck Signed-off-by: Christian Borntraeger Signed-off-by: Vasily Gorbik --- drivers/s390/cio/cio.h | 1 + drivers/s390/cio/css.c | 7 ++++++- drivers/s390/cio/device.c | 2 +- 3 files changed, 8 insertions(+), 2 deletions(-) diff --git a/drivers/s390/cio/cio.h b/drivers/s390/cio/cio.h index ba7d2480613b..dcdaba689b20 100644 --- a/drivers/s390/cio/cio.h +++ b/drivers/s390/cio/cio.h @@ -113,6 +113,7 @@ struct subchannel { enum sch_todo todo; struct work_struct todo_work; struct schib_config config; + u64 dma_mask; char *driver_override; /* Driver name to force a match */ } __attribute__ ((aligned(8))); diff --git a/drivers/s390/cio/css.c b/drivers/s390/cio/css.c index 1fbfb0a93f5f..831850435c23 100644 --- a/drivers/s390/cio/css.c +++ b/drivers/s390/cio/css.c @@ -232,7 +232,12 @@ struct subchannel *css_alloc_subchannel(struct subchannel_id schid, * belong to a subchannel need to fit 31 bit width (e.g. ccw). */ sch->dev.coherent_dma_mask = DMA_BIT_MASK(31); - sch->dev.dma_mask = &sch->dev.coherent_dma_mask; + /* + * But we don't have such restrictions imposed on the stuff that + * is handled by the streaming API. + */ + sch->dma_mask = DMA_BIT_MASK(64); + sch->dev.dma_mask = &sch->dma_mask; return sch; err: diff --git a/drivers/s390/cio/device.c b/drivers/s390/cio/device.c index 131430bd48d9..0c6245fc7706 100644 --- a/drivers/s390/cio/device.c +++ b/drivers/s390/cio/device.c @@ -710,7 +710,7 @@ static struct ccw_device * io_subchannel_allocate_dev(struct subchannel *sch) if (!cdev->private) goto err_priv; cdev->dev.coherent_dma_mask = sch->dev.coherent_dma_mask; - cdev->dev.dma_mask = &cdev->dev.coherent_dma_mask; + cdev->dev.dma_mask = sch->dev.dma_mask; dma_pool = cio_gp_dma_create(&cdev->dev, 1); if (!dma_pool) goto err_dma_pool; -- cgit v1.2.3 From 2189624b3c5a6fb03416129e35c09aa5151b7392 Mon Sep 17 00:00:00 2001 From: Mario Limonciello Date: Thu, 26 Sep 2019 11:08:57 -0500 Subject: ACPI: PM: Drop Dell XPS13 9360 from LPS0 Idle _DSM blacklist This reverts part of commit 71630b7a832f ("ACPI / PM: Blacklist Low Power S0 Idle _DSM for Dell XPS13 9360") to remove the S0ix blacklist for the XPS 9360. The problems with this system occurred in one possible NVME SSD when putting system into s0ix. As the NVME sleep behavior has been adjusted in commit d916b1be94b6 ("nvme-pci: use host managed power state for suspend") this is expected to be now resolved. BugLink: https://bugzilla.kernel.org/show_bug.cgi?id=196907 Signed-off-by: Mario Limonciello Tested-by: Paul Menzel Signed-off-by: Rafael J. Wysocki --- drivers/acpi/sleep.c | 13 ------------- 1 file changed, 13 deletions(-) diff --git a/drivers/acpi/sleep.c b/drivers/acpi/sleep.c index 9fa77d72ef27..2af937a8b1c5 100644 --- a/drivers/acpi/sleep.c +++ b/drivers/acpi/sleep.c @@ -361,19 +361,6 @@ static const struct dmi_system_id acpisleep_dmi_table[] __initconst = { DMI_MATCH(DMI_PRODUCT_NAME, "80E3"), }, }, - /* - * https://bugzilla.kernel.org/show_bug.cgi?id=196907 - * Some Dell XPS13 9360 cannot do suspend-to-idle using the Low Power - * S0 Idle firmware interface. - */ - { - .callback = init_default_s3, - .ident = "Dell XPS13 9360", - .matches = { - DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."), - DMI_MATCH(DMI_PRODUCT_NAME, "XPS 13 9360"), - }, - }, /* * ThinkPad X1 Tablet(2016) cannot do suspend-to-idle using * the Low Power S0 Idle firmware interface (see -- cgit v1.2.3 From 65650b35133ff20f0c9ef0abd5c3c66dbce3ae57 Mon Sep 17 00:00:00 2001 From: "Rafael J. Wysocki" Date: Wed, 9 Oct 2019 01:29:10 +0200 Subject: cpufreq: Avoid cpufreq_suspend() deadlock on system shutdown MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit It is incorrect to set the cpufreq syscore shutdown callback pointer to cpufreq_suspend(), because that function cannot be run in the syscore stage of system shutdown for two reasons: (a) it may attempt to carry out actions depending on devices that have already been shut down at that point and (b) the RCU synchronization carried out by it may not be able to make progress then. The latter issue has been present since commit 45975c7d21a1 ("rcu: Define RCU-sched API in terms of RCU for Tree RCU PREEMPT builds"), but the former one has been there since commit 90de2a4aa9f3 ("cpufreq: suspend cpufreq governors on shutdown") regardless. Fix that by dropping cpufreq_syscore_ops altogether and making device_shutdown() call cpufreq_suspend() directly before shutting down devices, which is along the lines of what system-wide power management does. Fixes: 45975c7d21a1 ("rcu: Define RCU-sched API in terms of RCU for Tree RCU PREEMPT builds") Fixes: 90de2a4aa9f3 ("cpufreq: suspend cpufreq governors on shutdown") Reported-by: Ville Syrjälä Tested-by: Ville Syrjälä Signed-off-by: Rafael J. Wysocki Acked-by: Viresh Kumar Cc: 4.0+ # 4.0+ --- drivers/base/core.c | 3 +++ drivers/cpufreq/cpufreq.c | 10 ---------- 2 files changed, 3 insertions(+), 10 deletions(-) diff --git a/drivers/base/core.c b/drivers/base/core.c index 2db62d98e395..7bd9cd366d41 100644 --- a/drivers/base/core.c +++ b/drivers/base/core.c @@ -9,6 +9,7 @@ */ #include +#include #include #include #include @@ -3179,6 +3180,8 @@ void device_shutdown(void) wait_for_device_probe(); device_block_probing(); + cpufreq_suspend(); + spin_lock(&devices_kset->list_lock); /* * Walk the devices list backward, shutting down each in turn. diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c index c52d6fa32aac..bffc11b87247 100644 --- a/drivers/cpufreq/cpufreq.c +++ b/drivers/cpufreq/cpufreq.c @@ -2737,14 +2737,6 @@ int cpufreq_unregister_driver(struct cpufreq_driver *driver) } EXPORT_SYMBOL_GPL(cpufreq_unregister_driver); -/* - * Stop cpufreq at shutdown to make sure it isn't holding any locks - * or mutexes when secondary CPUs are halted. - */ -static struct syscore_ops cpufreq_syscore_ops = { - .shutdown = cpufreq_suspend, -}; - struct kobject *cpufreq_global_kobject; EXPORT_SYMBOL(cpufreq_global_kobject); @@ -2756,8 +2748,6 @@ static int __init cpufreq_core_init(void) cpufreq_global_kobject = kobject_create_and_add("cpufreq", &cpu_subsys.dev_root->kobj); BUG_ON(!cpufreq_global_kobject); - register_syscore_ops(&cpufreq_syscore_ops); - return 0; } module_param(off, int, 0444); -- cgit v1.2.3 From f49249d58abdc12067d69f15d509487171148b30 Mon Sep 17 00:00:00 2001 From: Ben Dooks Date: Tue, 8 Oct 2019 11:46:46 +0100 Subject: PM: sleep: include for pm_wq Include the for the definition of pm_wq to avoid the following warning: kernel/power/main.c:890:25: warning: symbol 'pm_wq' was not declared. Should it be static? Signed-off-by: Ben Dooks Signed-off-by: Rafael J. Wysocki --- kernel/power/main.c | 1 + 1 file changed, 1 insertion(+) diff --git a/kernel/power/main.c b/kernel/power/main.c index e8710d179b35..e26de7af520b 100644 --- a/kernel/power/main.c +++ b/kernel/power/main.c @@ -15,6 +15,7 @@ #include #include #include +#include #include "power.h" -- cgit v1.2.3 From ba2c74da52eba388d959a1000a9dd08fa4857659 Mon Sep 17 00:00:00 2001 From: Daniele Ceraolo Spurio Date: Wed, 9 Oct 2019 16:04:23 -0700 Subject: drm/i915/tgl: the BCS engine supports relative MMIO The specs don't mention any specific HW limitation on the blitter and manual inspection shows that the HW does set the relative MMIO bit in the LRI of the blitter context image, so we can remove our limitations. Signed-off-by: Daniele Ceraolo Spurio Cc: Chris Wilson Cc: John Harrison Cc: Mika Kuoppala Reviewed-by: Chris Wilson Signed-off-by: Chris Wilson Link: https://patchwork.freedesktop.org/patch/msgid/20191009230424.6507-1-daniele.ceraolospurio@intel.com --- drivers/gpu/drm/i915/gt/intel_lrc.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/gpu/drm/i915/gt/intel_lrc.c b/drivers/gpu/drm/i915/gt/intel_lrc.c index 7ea58335f04c..eb061f345166 100644 --- a/drivers/gpu/drm/i915/gt/intel_lrc.c +++ b/drivers/gpu/drm/i915/gt/intel_lrc.c @@ -3451,7 +3451,7 @@ void intel_execlists_set_default_submission(struct intel_engine_cs *engine) engine->flags |= I915_ENGINE_HAS_PREEMPTION; } - if (engine->class != COPY_ENGINE_CLASS && INTEL_GEN(engine->i915) >= 12) + if (INTEL_GEN(engine->i915) >= 12) engine->flags |= I915_ENGINE_HAS_RELATIVE_MMIO; } -- cgit v1.2.3 From 9d41318c4e43da6b4c95a21cf366bf18179291fd Mon Sep 17 00:00:00 2001 From: Daniele Ceraolo Spurio Date: Wed, 9 Oct 2019 16:04:24 -0700 Subject: drm/i915/tgl: simplify the lrc register list for !RCS There are small differences between the blitter and the video engines in the xcs context image (e.g. registers 0x200 and 0x204 only exist on the blitter). Since we never explicitly set a value for those register and given that we don't need to update the offsets in the lrc image when we change engine within the class for virtual engine because the HW can handle that, instead of having a separate define for the BCS we can just restrict the programming to the part we're interested in, which is common across the engines. Bspec: 45584 Signed-off-by: Daniele Ceraolo Spurio Cc: Chris Wilson Cc: Mika Kuoppala Cc: Stuart Summers Reviewed-by: Chris Wilson Signed-off-by: Chris Wilson Link: https://patchwork.freedesktop.org/patch/msgid/20191009230424.6507-2-daniele.ceraolospurio@intel.com --- drivers/gpu/drm/i915/gt/intel_lrc.c | 67 +++++-------------------------------- 1 file changed, 9 insertions(+), 58 deletions(-) diff --git a/drivers/gpu/drm/i915/gt/intel_lrc.c b/drivers/gpu/drm/i915/gt/intel_lrc.c index eb061f345166..bdfd602b402a 100644 --- a/drivers/gpu/drm/i915/gt/intel_lrc.c +++ b/drivers/gpu/drm/i915/gt/intel_lrc.c @@ -669,64 +669,6 @@ static const u8 gen12_xcs_offsets[] = { REG16(0x274), REG16(0x270), - NOP(13), - LRI(2, POSTED), - REG16(0x200), - REG16(0x204), - - NOP(11), - LRI(50, POSTED), - REG16(0x588), - REG16(0x588), - REG16(0x588), - REG16(0x588), - REG16(0x588), - REG16(0x588), - REG(0x028), - REG(0x09c), - REG(0x0c0), - REG(0x178), - REG(0x17c), - REG16(0x358), - REG(0x170), - REG(0x150), - REG(0x154), - REG(0x158), - REG16(0x41c), - REG16(0x600), - REG16(0x604), - REG16(0x608), - REG16(0x60c), - REG16(0x610), - REG16(0x614), - REG16(0x618), - REG16(0x61c), - REG16(0x620), - REG16(0x624), - REG16(0x628), - REG16(0x62c), - REG16(0x630), - REG16(0x634), - REG16(0x638), - REG16(0x63c), - REG16(0x640), - REG16(0x644), - REG16(0x648), - REG16(0x64c), - REG16(0x650), - REG16(0x654), - REG16(0x658), - REG16(0x65c), - REG16(0x660), - REG16(0x664), - REG16(0x668), - REG16(0x66c), - REG16(0x670), - REG16(0x674), - REG16(0x678), - REG16(0x67c), - REG(0x068), - END(), }; @@ -857,6 +799,15 @@ static const u8 gen12_rcs_offsets[] = { static const u8 *reg_offsets(const struct intel_engine_cs *engine) { + /* + * The gen12+ lists only have the registers we program in the basic + * default state. We rely on the context image using relative + * addressing to automatic fixup the register state between the + * physical engines for virtual engine. + */ + GEM_BUG_ON(INTEL_GEN(engine->i915) >= 12 && + !intel_engine_has_relative_mmio(engine)); + if (engine->class == RENDER_CLASS) { if (INTEL_GEN(engine->i915) >= 12) return gen12_rcs_offsets; -- cgit v1.2.3 From fd70c7755bf0172ddd33b558aef69c322de3b5cf Mon Sep 17 00:00:00 2001 From: Tomi Valkeinen Date: Tue, 24 Sep 2019 16:17:02 +0300 Subject: drm/bridge: tc358767: fix max_tu_symbol value max_tu_symbol was programmed to TU_SIZE_RECOMMENDED - 1, which is not what the spec says. The spec says: roundup ((input active video bandwidth in bytes/output active video bandwidth in bytes) * tu_size) It is not quite clear what the above means, but calculating max_tu_symbol = (input Bps / output Bps) * tu_size seems to work and fixes the issues seen. This fixes artifacts in some videomodes (e.g. 1024x768@60 on 2-lanes & 1.62Gbps was pretty bad for me). Signed-off-by: Tomi Valkeinen Tested-by: Jyri Sarha Signed-off-by: Andrzej Hajda Link: https://patchwork.freedesktop.org/patch/msgid/20190924131702.9988-1-tomi.valkeinen@ti.com --- drivers/gpu/drm/bridge/tc358767.c | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/drivers/gpu/drm/bridge/tc358767.c b/drivers/gpu/drm/bridge/tc358767.c index cebc8e620820..8a8d605021f0 100644 --- a/drivers/gpu/drm/bridge/tc358767.c +++ b/drivers/gpu/drm/bridge/tc358767.c @@ -728,6 +728,8 @@ static int tc_set_video_mode(struct tc_data *tc, int lower_margin = mode->vsync_start - mode->vdisplay; int vsync_len = mode->vsync_end - mode->vsync_start; u32 dp0_syncval; + u32 bits_per_pixel = 24; + u32 in_bw, out_bw; /* * Recommended maximum number of symbols transferred in a transfer unit: @@ -735,7 +737,10 @@ static int tc_set_video_mode(struct tc_data *tc, * (output active video bandwidth in bytes)) * Must be less than tu_size. */ - max_tu_symbol = TU_SIZE_RECOMMENDED - 1; + + in_bw = mode->clock * bits_per_pixel / 8; + out_bw = tc->link.base.num_lanes * tc->link.base.rate; + max_tu_symbol = DIV_ROUND_UP(in_bw * TU_SIZE_RECOMMENDED, out_bw); dev_dbg(tc->dev, "set mode %dx%d\n", mode->hdisplay, mode->vdisplay); -- cgit v1.2.3 From 542a5c66e0ff6cf257600df1d7362735f8ebde35 Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Wed, 2 Oct 2019 17:00:34 +0100 Subject: drm/i915/gt: Warn CI about an unrecoverable wedge If we have a wedged GPU that we need to recover, but fail, add a taint for CI to pickup and schedule a reboot. Signed-off-by: Chris Wilson Cc: Tvrtko Ursulin Cc: Petri Latvala Reviewed-by: Janusz Krzysztofik Link: https://patchwork.freedesktop.org/patch/msgid/20191002160034.5121-1-chris@chris-wilson.co.uk --- drivers/gpu/drm/i915/gt/intel_reset.c | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) diff --git a/drivers/gpu/drm/i915/gt/intel_reset.c b/drivers/gpu/drm/i915/gt/intel_reset.c index 34791fc79dea..77445d100ca8 100644 --- a/drivers/gpu/drm/i915/gt/intel_reset.c +++ b/drivers/gpu/drm/i915/gt/intel_reset.c @@ -872,8 +872,14 @@ static bool __intel_gt_unset_wedged(struct intel_gt *gt) ok = !HAS_EXECLISTS(gt->i915); /* XXX better agnosticism desired */ if (!INTEL_INFO(gt->i915)->gpu_reset_clobbers_display) ok = __intel_gt_reset(gt, ALL_ENGINES) == 0; - if (!ok) + if (!ok) { + /* + * Warn CI about the unrecoverable wedged condition. + * Time for a reboot. + */ + add_taint_for_CI(TAINT_WARN); return false; + } /* * Undo nop_submit_request. We prevent all new i915 requests from -- cgit v1.2.3 From bed5ef230943863b9abf5eae226a20fad9a8ff71 Mon Sep 17 00:00:00 2001 From: Johan Hovold Date: Wed, 9 Oct 2019 19:09:42 +0200 Subject: USB: usb-skeleton: fix NULL-deref on disconnect The driver was using its struct usb_interface pointer as an inverted disconnected flag and was setting it to NULL before making sure all completion handlers had run. This could lead to NULL-pointer dereferences in the dev_err() statements in the completion handlers which relies on said pointer. Fix this by using a dedicated disconnected flag. Note that this is also addresses a NULL-pointer dereference at release() and a struct usb_interface reference leak introduced by a recent runtime PM fix, which depends on and should have been submitted together with this patch. Fixes: 4212cd74ca6f ("USB: usb-skeleton.c: remove err() usage") Fixes: 5c290a5e42c3 ("USB: usb-skeleton: fix runtime PM after driver unbind") Cc: stable Signed-off-by: Johan Hovold Link: https://lore.kernel.org/r/20191009170944.30057-2-johan@kernel.org Signed-off-by: Greg Kroah-Hartman --- drivers/usb/usb-skeleton.c | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/drivers/usb/usb-skeleton.c b/drivers/usb/usb-skeleton.c index 8001d6384c73..c2843fcfa52d 100644 --- a/drivers/usb/usb-skeleton.c +++ b/drivers/usb/usb-skeleton.c @@ -61,6 +61,7 @@ struct usb_skel { spinlock_t err_lock; /* lock for errors */ struct kref kref; struct mutex io_mutex; /* synchronize I/O with disconnect */ + unsigned long disconnected:1; wait_queue_head_t bulk_in_wait; /* to wait for an ongoing read */ }; #define to_skel_dev(d) container_of(d, struct usb_skel, kref) @@ -238,7 +239,7 @@ static ssize_t skel_read(struct file *file, char *buffer, size_t count, if (rv < 0) return rv; - if (!dev->interface) { /* disconnect() was called */ + if (dev->disconnected) { /* disconnect() was called */ rv = -ENODEV; goto exit; } @@ -420,7 +421,7 @@ static ssize_t skel_write(struct file *file, const char *user_buffer, /* this lock makes sure we don't submit URBs to gone devices */ mutex_lock(&dev->io_mutex); - if (!dev->interface) { /* disconnect() was called */ + if (dev->disconnected) { /* disconnect() was called */ mutex_unlock(&dev->io_mutex); retval = -ENODEV; goto error; @@ -571,7 +572,7 @@ static void skel_disconnect(struct usb_interface *interface) /* prevent more I/O from starting */ mutex_lock(&dev->io_mutex); - dev->interface = NULL; + dev->disconnected = 1; mutex_unlock(&dev->io_mutex); usb_kill_anchored_urbs(&dev->submitted); -- cgit v1.2.3 From 6353001852776e7eeaab4da78922d4c6f2b076af Mon Sep 17 00:00:00 2001 From: Johan Hovold Date: Wed, 9 Oct 2019 19:09:43 +0200 Subject: USB: usb-skeleton: fix use-after-free after driver unbind The driver failed to stop its read URB on disconnect, something which could lead to a use-after-free in the completion handler after driver unbind in case the character device has been closed. Fixes: e7389cc9a7ff ("USB: skel_read really sucks royally") Signed-off-by: Johan Hovold Link: https://lore.kernel.org/r/20191009170944.30057-3-johan@kernel.org Signed-off-by: Greg Kroah-Hartman --- drivers/usb/usb-skeleton.c | 1 + 1 file changed, 1 insertion(+) diff --git a/drivers/usb/usb-skeleton.c b/drivers/usb/usb-skeleton.c index c2843fcfa52d..be311787403e 100644 --- a/drivers/usb/usb-skeleton.c +++ b/drivers/usb/usb-skeleton.c @@ -575,6 +575,7 @@ static void skel_disconnect(struct usb_interface *interface) dev->disconnected = 1; mutex_unlock(&dev->io_mutex); + usb_kill_urb(dev->bulk_in_urb); usb_kill_anchored_urbs(&dev->submitted); /* decrement our usage count */ -- cgit v1.2.3 From 369dca424a3f9ef80bc4e5a5ccdd41a17ce306c1 Mon Sep 17 00:00:00 2001 From: Johan Hovold Date: Wed, 9 Oct 2019 19:09:44 +0200 Subject: USB: usb-skeleton: drop redundant in-urb check The driver bails out at probe if we can't find a bulk-in endpoint or if we fail to allocate the URB, so drop the check in read(). Signed-off-by: Johan Hovold Link: https://lore.kernel.org/r/20191009170944.30057-4-johan@kernel.org Signed-off-by: Greg Kroah-Hartman --- drivers/usb/usb-skeleton.c | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/drivers/usb/usb-skeleton.c b/drivers/usb/usb-skeleton.c index be311787403e..2dc58766273a 100644 --- a/drivers/usb/usb-skeleton.c +++ b/drivers/usb/usb-skeleton.c @@ -230,8 +230,7 @@ static ssize_t skel_read(struct file *file, char *buffer, size_t count, dev = file->private_data; - /* if we cannot read at all, return EOF */ - if (!dev->bulk_in_urb || !count) + if (!count) return 0; /* no concurrent readers */ -- cgit v1.2.3 From ac9099e10a603f02f52f583fba78dd54c52b5542 Mon Sep 17 00:00:00 2001 From: Roger Quadros Date: Mon, 7 Oct 2019 15:16:01 +0300 Subject: usb: cdns3: gadget: Fix full-speed mode We need to disable USB3 PHY for full-speed mode else gadget mode is broken. Signed-off-by: Roger Quadros Signed-off-by: Sekhar Nori Reviewed-by: Peter Chen Link: https://lore.kernel.org/r/20191007121601.25996-3-rogerq@ti.com Signed-off-by: Greg Kroah-Hartman --- drivers/usb/cdns3/gadget.c | 1 + 1 file changed, 1 insertion(+) diff --git a/drivers/usb/cdns3/gadget.c b/drivers/usb/cdns3/gadget.c index 228cdc4ab886..157536753b8c 100644 --- a/drivers/usb/cdns3/gadget.c +++ b/drivers/usb/cdns3/gadget.c @@ -2571,6 +2571,7 @@ static int cdns3_gadget_start(struct cdns3 *cdns) switch (max_speed) { case USB_SPEED_FULL: writel(USB_CONF_SFORCE_FS, &priv_dev->regs->usb_conf); + writel(USB_CONF_USB3DIS, &priv_dev->regs->usb_conf); break; case USB_SPEED_HIGH: writel(USB_CONF_USB3DIS, &priv_dev->regs->usb_conf); -- cgit v1.2.3 From 02ffc26df96b487f476b8945eaef13c7f170e79a Mon Sep 17 00:00:00 2001 From: Roger Quadros Date: Mon, 7 Oct 2019 15:16:00 +0300 Subject: usb: cdns3: fix cdns3_core_init_role() At startup we should trigger the HW state machine only if it is OTG mode. Otherwise we should just start the respective role. Initialize idle role by default. If we don't do this then cdns3_idle_role_stop() is not called when switching to host/device role and so lane switch mechanism doesn't work. This results to super-speed device not working in one orientation if it was plugged before driver probe. Signed-off-by: Roger Quadros Signed-off-by: Sekhar Nori Link: https://lore.kernel.org/r/20191007121601.25996-2-rogerq@ti.com Signed-off-by: Greg Kroah-Hartman --- drivers/usb/cdns3/core.c | 20 +++++++++++++++++++- 1 file changed, 19 insertions(+), 1 deletion(-) diff --git a/drivers/usb/cdns3/core.c b/drivers/usb/cdns3/core.c index 06f1e105be4e..1109dc5a4c39 100644 --- a/drivers/usb/cdns3/core.c +++ b/drivers/usb/cdns3/core.c @@ -160,10 +160,28 @@ static int cdns3_core_init_role(struct cdns3 *cdns) if (ret) goto err; - if (cdns->dr_mode != USB_DR_MODE_OTG) { + /* Initialize idle role to start with */ + ret = cdns3_role_start(cdns, USB_ROLE_NONE); + if (ret) + goto err; + + switch (cdns->dr_mode) { + case USB_DR_MODE_UNKNOWN: + case USB_DR_MODE_OTG: ret = cdns3_hw_role_switch(cdns); if (ret) goto err; + break; + case USB_DR_MODE_PERIPHERAL: + ret = cdns3_role_start(cdns, USB_ROLE_DEVICE); + if (ret) + goto err; + break; + case USB_DR_MODE_HOST: + ret = cdns3_role_start(cdns, USB_ROLE_HOST); + if (ret) + goto err; + break; } return ret; -- cgit v1.2.3 From eb21a74adaa11846737f503dd618bd35337e2c37 Mon Sep 17 00:00:00 2001 From: Pawel Laszczak Date: Mon, 7 Oct 2019 13:03:23 +0100 Subject: usb: cdns3: Fix for incorrect DMA mask. This patch restores the correct DMA mask after switching back to device mode. The issue occurred because Device part of controller use 32 bits DMA and Host side use 64 bits DMA. During loading XHCI driver the DMA mask used by driver is overwritten by XHCI driver so it must be restored to 32 bits. Reported-by: Pawel Laszczak Signed-off-by: Roger Quadros Signed-off-by: Pawel Laszczak Fixes: 7733f6c32e36 ("usb: cdns3: Add Cadence USB3 DRD Driver") Reviewed-by: Peter Chen Tested-by: Roger Quadros Link: https://lore.kernel.org/r/1570449803-15299-1-git-send-email-pawell@cadence.com Signed-off-by: Greg Kroah-Hartman --- drivers/usb/cdns3/gadget.c | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/drivers/usb/cdns3/gadget.c b/drivers/usb/cdns3/gadget.c index 157536753b8c..2ca280f4c054 100644 --- a/drivers/usb/cdns3/gadget.c +++ b/drivers/usb/cdns3/gadget.c @@ -2663,6 +2663,13 @@ static int __cdns3_gadget_init(struct cdns3 *cdns) { int ret = 0; + /* Ensure 32-bit DMA Mask in case we switched back from Host mode */ + ret = dma_set_mask_and_coherent(cdns->dev, DMA_BIT_MASK(32)); + if (ret) { + dev_err(cdns->dev, "Failed to set dma mask: %d\n", ret); + return ret; + } + cdns3_drd_switch_gadget(cdns, 1); pm_runtime_get_sync(cdns->dev); -- cgit v1.2.3 From 726b55d0e22ca72c69c947af87785c830289ddbc Mon Sep 17 00:00:00 2001 From: Johan Hovold Date: Wed, 9 Oct 2019 17:38:47 +0200 Subject: USB: legousbtower: fix use-after-free on release The driver was accessing its struct usb_device in its release() callback without holding a reference. This would lead to a use-after-free whenever the device was disconnected while the character device was still open. Fixes: fef526cae700 ("USB: legousbtower: remove custom debug macro") Cc: stable # 3.12 Signed-off-by: Johan Hovold Link: https://lore.kernel.org/r/20191009153848.8664-5-johan@kernel.org Signed-off-by: Greg Kroah-Hartman --- drivers/usb/misc/legousbtower.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/drivers/usb/misc/legousbtower.c b/drivers/usb/misc/legousbtower.c index 44d6a3381804..9d4c52a7ebe0 100644 --- a/drivers/usb/misc/legousbtower.c +++ b/drivers/usb/misc/legousbtower.c @@ -296,6 +296,7 @@ static inline void tower_delete (struct lego_usb_tower *dev) kfree (dev->read_buffer); kfree (dev->interrupt_in_buffer); kfree (dev->interrupt_out_buffer); + usb_put_dev(dev->udev); kfree (dev); } @@ -810,7 +811,7 @@ static int tower_probe (struct usb_interface *interface, const struct usb_device mutex_init(&dev->lock); - dev->udev = udev; + dev->udev = usb_get_dev(udev); dev->open_count = 0; dev->disconnected = 0; -- cgit v1.2.3 From 58ecf131e74620305175a7aa103f81350bb37570 Mon Sep 17 00:00:00 2001 From: Johan Hovold Date: Wed, 9 Oct 2019 17:38:46 +0200 Subject: USB: ldusb: fix NULL-derefs on driver unbind The driver was using its struct usb_interface pointer as an inverted disconnected flag, but was setting it to NULL before making sure all completion handlers had run. This could lead to a NULL-pointer dereference in a number of dev_dbg, dev_warn and dev_err statements in the completion handlers which relies on said pointer. Fix this by unconditionally stopping all I/O and preventing resubmissions by poisoning the interrupt URBs at disconnect and using a dedicated disconnected flag. This also makes sure that all I/O has completed by the time the disconnect callback returns. Fixes: 2824bd250f0b ("[PATCH] USB: add ldusb driver") Cc: stable # 2.6.13 Signed-off-by: Johan Hovold Link: https://lore.kernel.org/r/20191009153848.8664-4-johan@kernel.org Signed-off-by: Greg Kroah-Hartman --- drivers/usb/misc/ldusb.c | 24 ++++++++++++------------ 1 file changed, 12 insertions(+), 12 deletions(-) diff --git a/drivers/usb/misc/ldusb.c b/drivers/usb/misc/ldusb.c index 6581774bdfa4..f3108d85e768 100644 --- a/drivers/usb/misc/ldusb.c +++ b/drivers/usb/misc/ldusb.c @@ -153,6 +153,7 @@ MODULE_PARM_DESC(min_interrupt_out_interval, "Minimum interrupt out interval in struct ld_usb { struct mutex mutex; /* locks this structure */ struct usb_interface *intf; /* save off the usb interface pointer */ + unsigned long disconnected:1; int open_count; /* number of times this port has been opened */ @@ -192,12 +193,10 @@ static void ld_usb_abort_transfers(struct ld_usb *dev) /* shutdown transfer */ if (dev->interrupt_in_running) { dev->interrupt_in_running = 0; - if (dev->intf) - usb_kill_urb(dev->interrupt_in_urb); + usb_kill_urb(dev->interrupt_in_urb); } if (dev->interrupt_out_busy) - if (dev->intf) - usb_kill_urb(dev->interrupt_out_urb); + usb_kill_urb(dev->interrupt_out_urb); } /** @@ -205,8 +204,6 @@ static void ld_usb_abort_transfers(struct ld_usb *dev) */ static void ld_usb_delete(struct ld_usb *dev) { - ld_usb_abort_transfers(dev); - /* free data structures */ usb_free_urb(dev->interrupt_in_urb); usb_free_urb(dev->interrupt_out_urb); @@ -263,7 +260,7 @@ static void ld_usb_interrupt_in_callback(struct urb *urb) resubmit: /* resubmit if we're still running */ - if (dev->interrupt_in_running && !dev->buffer_overflow && dev->intf) { + if (dev->interrupt_in_running && !dev->buffer_overflow) { retval = usb_submit_urb(dev->interrupt_in_urb, GFP_ATOMIC); if (retval) { dev_err(&dev->intf->dev, @@ -392,7 +389,7 @@ static int ld_usb_release(struct inode *inode, struct file *file) retval = -ENODEV; goto unlock_exit; } - if (dev->intf == NULL) { + if (dev->disconnected) { /* the device was unplugged before the file was released */ mutex_unlock(&dev->mutex); /* unlock here as ld_usb_delete frees dev */ @@ -423,7 +420,7 @@ static __poll_t ld_usb_poll(struct file *file, poll_table *wait) dev = file->private_data; - if (!dev->intf) + if (dev->disconnected) return EPOLLERR | EPOLLHUP; poll_wait(file, &dev->read_wait, wait); @@ -462,7 +459,7 @@ static ssize_t ld_usb_read(struct file *file, char __user *buffer, size_t count, } /* verify that the device wasn't unplugged */ - if (dev->intf == NULL) { + if (dev->disconnected) { retval = -ENODEV; printk(KERN_ERR "ldusb: No device or device unplugged %d\n", retval); goto unlock_exit; @@ -542,7 +539,7 @@ static ssize_t ld_usb_write(struct file *file, const char __user *buffer, } /* verify that the device wasn't unplugged */ - if (dev->intf == NULL) { + if (dev->disconnected) { retval = -ENODEV; printk(KERN_ERR "ldusb: No device or device unplugged %d\n", retval); goto unlock_exit; @@ -764,6 +761,9 @@ static void ld_usb_disconnect(struct usb_interface *intf) /* give back our minor */ usb_deregister_dev(intf, &ld_usb_class); + usb_poison_urb(dev->interrupt_in_urb); + usb_poison_urb(dev->interrupt_out_urb); + mutex_lock(&dev->mutex); /* if the device is not opened, then we clean up right now */ @@ -771,7 +771,7 @@ static void ld_usb_disconnect(struct usb_interface *intf) mutex_unlock(&dev->mutex); ld_usb_delete(dev); } else { - dev->intf = NULL; + dev->disconnected = 1; /* wake up pollers */ wake_up_interruptible_all(&dev->read_wait); wake_up_interruptible_all(&dev->write_wait); -- cgit v1.2.3 From 123a0f125fa3d2104043697baa62899d9e549272 Mon Sep 17 00:00:00 2001 From: Johan Hovold Date: Wed, 9 Oct 2019 17:38:44 +0200 Subject: USB: adutux: fix use-after-free on release The driver was accessing its struct usb_device in its release() callback without holding a reference. This would lead to a use-after-free whenever the device was disconnected while the character device was still open. Fixes: 66d4bc30d128 ("USB: adutux: remove custom debug macro") Cc: stable # 3.12 Signed-off-by: Johan Hovold Link: https://lore.kernel.org/r/20191009153848.8664-2-johan@kernel.org Signed-off-by: Greg Kroah-Hartman --- drivers/usb/misc/adutux.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/drivers/usb/misc/adutux.c b/drivers/usb/misc/adutux.c index f9efec719359..6f5edb9fc61e 100644 --- a/drivers/usb/misc/adutux.c +++ b/drivers/usb/misc/adutux.c @@ -149,6 +149,7 @@ static void adu_delete(struct adu_device *dev) kfree(dev->read_buffer_secondary); kfree(dev->interrupt_in_buffer); kfree(dev->interrupt_out_buffer); + usb_put_dev(dev->udev); kfree(dev); } @@ -664,7 +665,7 @@ static int adu_probe(struct usb_interface *interface, mutex_init(&dev->mtx); spin_lock_init(&dev->buflock); - dev->udev = udev; + dev->udev = usb_get_dev(udev); init_waitqueue_head(&dev->read_wait); init_waitqueue_head(&dev->write_wait); -- cgit v1.2.3 From 93ddb1f56ae102f14f9e46a9a9c8017faa970003 Mon Sep 17 00:00:00 2001 From: Johan Hovold Date: Wed, 9 Oct 2019 17:38:45 +0200 Subject: USB: chaoskey: fix use-after-free on release The driver was accessing its struct usb_interface in its release() callback without holding a reference. This would lead to a use-after-free whenever the device was disconnected while the character device was still open. Fixes: 66e3e591891d ("usb: Add driver for Altus Metrum ChaosKey device (v2)") Cc: stable # 4.1 Signed-off-by: Johan Hovold Link: https://lore.kernel.org/r/20191009153848.8664-3-johan@kernel.org Signed-off-by: Greg Kroah-Hartman --- drivers/usb/misc/chaoskey.c | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/drivers/usb/misc/chaoskey.c b/drivers/usb/misc/chaoskey.c index cf5828ce927a..34e6cd6f40d3 100644 --- a/drivers/usb/misc/chaoskey.c +++ b/drivers/usb/misc/chaoskey.c @@ -98,6 +98,7 @@ static void chaoskey_free(struct chaoskey *dev) usb_free_urb(dev->urb); kfree(dev->name); kfree(dev->buf); + usb_put_intf(dev->interface); kfree(dev); } } @@ -145,6 +146,8 @@ static int chaoskey_probe(struct usb_interface *interface, if (dev == NULL) goto out; + dev->interface = usb_get_intf(interface); + dev->buf = kmalloc(size, GFP_KERNEL); if (dev->buf == NULL) @@ -174,8 +177,6 @@ static int chaoskey_probe(struct usb_interface *interface, goto out; } - dev->interface = interface; - dev->in_ep = in_ep; if (le16_to_cpu(udev->descriptor.idVendor) != ALEA_VENDOR_ID) -- cgit v1.2.3 From edc4746f253d907d048de680a621e121517f484b Mon Sep 17 00:00:00 2001 From: Johan Hovold Date: Wed, 9 Oct 2019 12:48:41 +0200 Subject: USB: iowarrior: fix use-after-free on disconnect A recent fix addressing a deadlock on disconnect introduced a new bug by moving the present flag out of the critical section protected by the driver-data mutex. This could lead to a racing release() freeing the driver data before disconnect() is done with it. Due to insufficient locking a related use-after-free could be triggered also before the above mentioned commit. Specifically, the driver needs to hold the driver-data mutex also while checking the opened flag at disconnect(). Fixes: c468a8aa790e ("usb: iowarrior: fix deadlock on disconnect") Fixes: 946b960d13c1 ("USB: add driver for iowarrior devices.") Cc: stable # 2.6.21 Reported-by: syzbot+0761012cebf7bdb38137@syzkaller.appspotmail.com Signed-off-by: Johan Hovold Link: https://lore.kernel.org/r/20191009104846.5925-2-johan@kernel.org Signed-off-by: Greg Kroah-Hartman --- drivers/usb/misc/iowarrior.c | 7 +++---- 1 file changed, 3 insertions(+), 4 deletions(-) diff --git a/drivers/usb/misc/iowarrior.c b/drivers/usb/misc/iowarrior.c index f5bed9f29e56..4fe1d3267b3c 100644 --- a/drivers/usb/misc/iowarrior.c +++ b/drivers/usb/misc/iowarrior.c @@ -866,8 +866,6 @@ static void iowarrior_disconnect(struct usb_interface *interface) dev = usb_get_intfdata(interface); mutex_lock(&iowarrior_open_disc_lock); usb_set_intfdata(interface, NULL); - /* prevent device read, write and ioctl */ - dev->present = 0; minor = dev->minor; mutex_unlock(&iowarrior_open_disc_lock); @@ -878,8 +876,7 @@ static void iowarrior_disconnect(struct usb_interface *interface) mutex_lock(&dev->mutex); /* prevent device read, write and ioctl */ - - mutex_unlock(&dev->mutex); + dev->present = 0; if (dev->opened) { /* There is a process that holds a filedescriptor to the device , @@ -889,8 +886,10 @@ static void iowarrior_disconnect(struct usb_interface *interface) usb_kill_urb(dev->int_in_urb); wake_up_interruptible(&dev->read_wait); wake_up_interruptible(&dev->write_wait); + mutex_unlock(&dev->mutex); } else { /* no process is using the device, cleanup now */ + mutex_unlock(&dev->mutex); iowarrior_delete(dev); } -- cgit v1.2.3 From 80cd5479b525093a56ef768553045741af61b250 Mon Sep 17 00:00:00 2001 From: Johan Hovold Date: Wed, 9 Oct 2019 12:48:42 +0200 Subject: USB: iowarrior: fix use-after-free on release The driver was accessing its struct usb_interface from its release() callback without holding a reference. This would lead to a use-after-free whenever debugging was enabled and the device was disconnected while its character device was open. Fixes: 549e83500b80 ("USB: iowarrior: Convert local dbg macro to dev_dbg") Cc: stable # 3.16 Signed-off-by: Johan Hovold Link: https://lore.kernel.org/r/20191009104846.5925-3-johan@kernel.org Signed-off-by: Greg Kroah-Hartman --- drivers/usb/misc/iowarrior.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/drivers/usb/misc/iowarrior.c b/drivers/usb/misc/iowarrior.c index 4fe1d3267b3c..6841267820c6 100644 --- a/drivers/usb/misc/iowarrior.c +++ b/drivers/usb/misc/iowarrior.c @@ -243,6 +243,7 @@ static inline void iowarrior_delete(struct iowarrior *dev) kfree(dev->int_in_buffer); usb_free_urb(dev->int_in_urb); kfree(dev->read_queue); + usb_put_intf(dev->interface); kfree(dev); } @@ -764,7 +765,7 @@ static int iowarrior_probe(struct usb_interface *interface, init_waitqueue_head(&dev->write_wait); dev->udev = udev; - dev->interface = interface; + dev->interface = usb_get_intf(interface); iface_desc = interface->cur_altsetting; dev->product_id = le16_to_cpu(udev->descriptor.idProduct); -- cgit v1.2.3 From b5f8d46867ca233d773408ffbe691a8062ed718f Mon Sep 17 00:00:00 2001 From: Johan Hovold Date: Wed, 9 Oct 2019 12:48:43 +0200 Subject: USB: iowarrior: fix use-after-free after driver unbind Make sure to stop also the asynchronous write URBs on disconnect() to avoid use-after-free in the completion handler after driver unbind. Fixes: 946b960d13c1 ("USB: add driver for iowarrior devices.") Cc: stable # 2.6.21: 51a2f077c44e ("USB: introduce usb_anchor") Signed-off-by: Johan Hovold Link: https://lore.kernel.org/r/20191009104846.5925-4-johan@kernel.org Signed-off-by: Greg Kroah-Hartman --- drivers/usb/misc/iowarrior.c | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/drivers/usb/misc/iowarrior.c b/drivers/usb/misc/iowarrior.c index 6841267820c6..f405fa734bcc 100644 --- a/drivers/usb/misc/iowarrior.c +++ b/drivers/usb/misc/iowarrior.c @@ -87,6 +87,7 @@ struct iowarrior { char chip_serial[9]; /* the serial number string of the chip connected */ int report_size; /* number of bytes in a report */ u16 product_id; + struct usb_anchor submitted; }; /*--------------*/ @@ -425,11 +426,13 @@ static ssize_t iowarrior_write(struct file *file, retval = -EFAULT; goto error; } + usb_anchor_urb(int_out_urb, &dev->submitted); retval = usb_submit_urb(int_out_urb, GFP_KERNEL); if (retval) { dev_dbg(&dev->interface->dev, "submit error %d for urb nr.%d\n", retval, atomic_read(&dev->write_busy)); + usb_unanchor_urb(int_out_urb); goto error; } /* submit was ok */ @@ -770,6 +773,8 @@ static int iowarrior_probe(struct usb_interface *interface, iface_desc = interface->cur_altsetting; dev->product_id = le16_to_cpu(udev->descriptor.idProduct); + init_usb_anchor(&dev->submitted); + res = usb_find_last_int_in_endpoint(iface_desc, &dev->int_in_endpoint); if (res) { dev_err(&interface->dev, "no interrupt-in endpoint found\n"); @@ -885,6 +890,7 @@ static void iowarrior_disconnect(struct usb_interface *interface) Deleting the device is postponed until close() was called. */ usb_kill_urb(dev->int_in_urb); + usb_kill_anchored_urbs(&dev->submitted); wake_up_interruptible(&dev->read_wait); wake_up_interruptible(&dev->write_wait); mutex_unlock(&dev->mutex); -- cgit v1.2.3 From 7c5b971d623fdb40c03205e99f9ef68002b34726 Mon Sep 17 00:00:00 2001 From: Johan Hovold Date: Wed, 9 Oct 2019 12:48:44 +0200 Subject: USB: iowarrior: drop redundant disconnect mutex Drop the redundant disconnect mutex which was introduced after the open-disconnect race had been addressed generally in USB core by commit d4ead16f50f9 ("USB: prevent char device open/deregister race"). Specifically, the rw-semaphore in core guarantees that all calls to open() will have completed and that no new calls to open() will occur after usb_deregister_dev() returns. Hence there is no need use the driver data as an inverted disconnected flag. Signed-off-by: Johan Hovold Link: https://lore.kernel.org/r/20191009104846.5925-5-johan@kernel.org Signed-off-by: Greg Kroah-Hartman --- drivers/usb/misc/iowarrior.c | 17 ++--------------- 1 file changed, 2 insertions(+), 15 deletions(-) diff --git a/drivers/usb/misc/iowarrior.c b/drivers/usb/misc/iowarrior.c index f405fa734bcc..d844c2098e42 100644 --- a/drivers/usb/misc/iowarrior.c +++ b/drivers/usb/misc/iowarrior.c @@ -58,7 +58,6 @@ MODULE_LICENSE("GPL"); static DEFINE_MUTEX(iowarrior_mutex); static struct usb_driver iowarrior_driver; -static DEFINE_MUTEX(iowarrior_open_disc_lock); /*--------------*/ /* data */ @@ -601,16 +600,13 @@ static int iowarrior_open(struct inode *inode, struct file *file) return -ENODEV; } - mutex_lock(&iowarrior_open_disc_lock); dev = usb_get_intfdata(interface); if (!dev) { - mutex_unlock(&iowarrior_open_disc_lock); mutex_unlock(&iowarrior_mutex); return -ENODEV; } mutex_lock(&dev->mutex); - mutex_unlock(&iowarrior_open_disc_lock); /* Only one process can open each device, no sharing. */ if (dev->opened) { @@ -842,7 +838,6 @@ static int iowarrior_probe(struct usb_interface *interface, if (retval) { /* something prevented us from registering this driver */ dev_err(&interface->dev, "Not able to get a minor for this device.\n"); - usb_set_intfdata(interface, NULL); goto error; } @@ -866,16 +861,8 @@ error: */ static void iowarrior_disconnect(struct usb_interface *interface) { - struct iowarrior *dev; - int minor; - - dev = usb_get_intfdata(interface); - mutex_lock(&iowarrior_open_disc_lock); - usb_set_intfdata(interface, NULL); - - minor = dev->minor; - mutex_unlock(&iowarrior_open_disc_lock); - /* give back our minor - this will call close() locks need to be dropped at this point*/ + struct iowarrior *dev = usb_get_intfdata(interface); + int minor = dev->minor; usb_deregister_dev(interface, &iowarrior_class); -- cgit v1.2.3 From 8d33e828f72c216ae264ad39088a595d9a6cc95c Mon Sep 17 00:00:00 2001 From: Johan Hovold Date: Wed, 9 Oct 2019 12:48:45 +0200 Subject: USB: iowarrior: drop redundant iowarrior mutex Drop the redundant iowarrior mutex introduced by commit 925ce689bb31 ("USB: autoconvert trivial BKL users to private mutex") which replaced an earlier BKL use. The lock serialised calls to open() against other open() and ioctl(), but neither is needed. Signed-off-by: Johan Hovold Link: https://lore.kernel.org/r/20191009104846.5925-6-johan@kernel.org Signed-off-by: Greg Kroah-Hartman --- drivers/usb/misc/iowarrior.c | 13 +------------ 1 file changed, 1 insertion(+), 12 deletions(-) diff --git a/drivers/usb/misc/iowarrior.c b/drivers/usb/misc/iowarrior.c index d844c2098e42..ad29ef51e53f 100644 --- a/drivers/usb/misc/iowarrior.c +++ b/drivers/usb/misc/iowarrior.c @@ -54,9 +54,6 @@ MODULE_AUTHOR(DRIVER_AUTHOR); MODULE_DESCRIPTION(DRIVER_DESC); MODULE_LICENSE("GPL"); -/* Module parameters */ -static DEFINE_MUTEX(iowarrior_mutex); - static struct usb_driver iowarrior_driver; /*--------------*/ @@ -480,8 +477,6 @@ static long iowarrior_ioctl(struct file *file, unsigned int cmd, if (!buffer) return -ENOMEM; - /* lock this object */ - mutex_lock(&iowarrior_mutex); mutex_lock(&dev->mutex); /* verify that the device wasn't unplugged */ @@ -574,7 +569,6 @@ static long iowarrior_ioctl(struct file *file, unsigned int cmd, error_out: /* unlock the device */ mutex_unlock(&dev->mutex); - mutex_unlock(&iowarrior_mutex); kfree(buffer); return retval; } @@ -589,22 +583,18 @@ static int iowarrior_open(struct inode *inode, struct file *file) int subminor; int retval = 0; - mutex_lock(&iowarrior_mutex); subminor = iminor(inode); interface = usb_find_interface(&iowarrior_driver, subminor); if (!interface) { - mutex_unlock(&iowarrior_mutex); printk(KERN_ERR "%s - error, can't find device for minor %d\n", __func__, subminor); return -ENODEV; } dev = usb_get_intfdata(interface); - if (!dev) { - mutex_unlock(&iowarrior_mutex); + if (!dev) return -ENODEV; - } mutex_lock(&dev->mutex); @@ -628,7 +618,6 @@ static int iowarrior_open(struct inode *inode, struct file *file) out: mutex_unlock(&dev->mutex); - mutex_unlock(&iowarrior_mutex); return retval; } -- cgit v1.2.3 From ebb2fe57a51c630e0f852becbbdd295ad5d60514 Mon Sep 17 00:00:00 2001 From: Johan Hovold Date: Wed, 9 Oct 2019 12:48:46 +0200 Subject: USB: iowarrior: use pr_err() Replace the one remaining printk with pr_err(). Signed-off-by: Johan Hovold Link: https://lore.kernel.org/r/20191009104846.5925-7-johan@kernel.org Signed-off-by: Greg Kroah-Hartman --- drivers/usb/misc/iowarrior.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/usb/misc/iowarrior.c b/drivers/usb/misc/iowarrior.c index ad29ef51e53f..dce44fbf031f 100644 --- a/drivers/usb/misc/iowarrior.c +++ b/drivers/usb/misc/iowarrior.c @@ -587,7 +587,7 @@ static int iowarrior_open(struct inode *inode, struct file *file) interface = usb_find_interface(&iowarrior_driver, subminor); if (!interface) { - printk(KERN_ERR "%s - error, can't find device for minor %d\n", + pr_err("%s - error, can't find device for minor %d\n", __func__, subminor); return -ENODEV; } -- cgit v1.2.3 From ff30283a8de4f978dad120936c1af507d01a6a98 Mon Sep 17 00:00:00 2001 From: Randy Dunlap Date: Tue, 8 Oct 2019 13:46:53 -0700 Subject: serial: fix kernel-doc warning in comments Fix Sphinx warning in serial_core.c: ../drivers/tty/serial/serial_core.c:1969: WARNING: Definition list ends without a blank line; unexpected unindent. Fixes: 73abaf87f01b ("serial: earlycon: Refactor parse_options into serial core") Signed-off-by: Randy Dunlap Cc: Peter Hurley Cc: Greg Kroah-Hartman Link: https://lore.kernel.org/r/e989641c-224a-1090-e596-e7cc800bed44@infradead.org Signed-off-by: Greg Kroah-Hartman --- drivers/tty/serial/serial_core.c | 2 ++ 1 file changed, 2 insertions(+) diff --git a/drivers/tty/serial/serial_core.c b/drivers/tty/serial/serial_core.c index 6e713be1d4e9..c4a414a46c7f 100644 --- a/drivers/tty/serial/serial_core.c +++ b/drivers/tty/serial/serial_core.c @@ -1964,8 +1964,10 @@ uart_get_console(struct uart_port *ports, int nr, struct console *co) * console=,io|mmio|mmio16|mmio32|mmio32be|mmio32native,, * * The optional form + * * earlycon=,0x, * console=,0x, + * * is also accepted; the returned @iotype will be UPIO_MEM. * * Returns 0 on success or -EINVAL on failure -- cgit v1.2.3 From 31a8d8fa84c51d3ab00bf059158d5de6178cf890 Mon Sep 17 00:00:00 2001 From: Anson Huang Date: Wed, 9 Oct 2019 17:49:19 +0800 Subject: tty: serial: imx: Use platform_get_irq_optional() for optional IRQs All i.MX SoCs except i.MX1 have ONLY one necessary IRQ, use platform_get_irq_optional() to get second/third IRQ which are optional to avoid below error message during probe: [ 0.726219] imx-uart 30860000.serial: IRQ index 1 not found [ 0.731329] imx-uart 30860000.serial: IRQ index 2 not found Fixes: 7723f4c5ecdb8d83 ("driver core: platform: Add an error message to platform_get_irq*()") Signed-off-by: Anson Huang Link: https://lore.kernel.org/r/1570614559-11900-1-git-send-email-Anson.Huang@nxp.com Signed-off-by: Greg Kroah-Hartman --- drivers/tty/serial/imx.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/tty/serial/imx.c b/drivers/tty/serial/imx.c index 87c58f9f6390..5e08f2657b90 100644 --- a/drivers/tty/serial/imx.c +++ b/drivers/tty/serial/imx.c @@ -2222,8 +2222,8 @@ static int imx_uart_probe(struct platform_device *pdev) return PTR_ERR(base); rxirq = platform_get_irq(pdev, 0); - txirq = platform_get_irq(pdev, 1); - rtsirq = platform_get_irq(pdev, 2); + txirq = platform_get_irq_optional(pdev, 1); + rtsirq = platform_get_irq_optional(pdev, 2); sport->port.dev = &pdev->dev; sport->port.mapbase = res->start; -- cgit v1.2.3 From 18380f52dbac230032a16545e67a6d90b548cf18 Mon Sep 17 00:00:00 2001 From: yu kuai Date: Sun, 29 Sep 2019 21:42:49 +0800 Subject: platform/x86: classmate-laptop: remove unused variable Fixes gcc '-Wunused-but-set-variable' warning: drivers/platform/x86/classmate-laptop.c: In function cmpc_accel_remove_v4: drivers/platform/x86/classmate-laptop.c:424:21: warning: variable accel set but not used [-Wunused-but-set-variable] drivers/platform/x86/classmate-laptop.c: In function cmpc_accel_remove: drivers/platform/x86/classmate-laptop.c:660:21: warning: variable accel set but not used [-Wunused-but-set-variable] In function cmpc_accel_remove_v4 and cmpc_accel_remove, variable accel is set but not used, so it can be removed. In that case, variable inputdev is set but not used and can be removed. Fixes: 7125587df4e8 ("classmate-laptop: Add support for Classmate V4 accelerometer.") Reported-by: Hulk Robot Signed-off-by: yu kuai Signed-off-by: Andy Shevchenko --- drivers/platform/x86/classmate-laptop.c | 12 ------------ 1 file changed, 12 deletions(-) diff --git a/drivers/platform/x86/classmate-laptop.c b/drivers/platform/x86/classmate-laptop.c index 86cc2cc68fb5..af063f690846 100644 --- a/drivers/platform/x86/classmate-laptop.c +++ b/drivers/platform/x86/classmate-laptop.c @@ -420,12 +420,6 @@ failed_sensitivity: static int cmpc_accel_remove_v4(struct acpi_device *acpi) { - struct input_dev *inputdev; - struct cmpc_accel *accel; - - inputdev = dev_get_drvdata(&acpi->dev); - accel = dev_get_drvdata(&inputdev->dev); - device_remove_file(&acpi->dev, &cmpc_accel_sensitivity_attr_v4); device_remove_file(&acpi->dev, &cmpc_accel_g_select_attr_v4); return cmpc_remove_acpi_notify_device(acpi); @@ -656,12 +650,6 @@ failed_file: static int cmpc_accel_remove(struct acpi_device *acpi) { - struct input_dev *inputdev; - struct cmpc_accel *accel; - - inputdev = dev_get_drvdata(&acpi->dev); - accel = dev_get_drvdata(&inputdev->dev); - device_remove_file(&acpi->dev, &cmpc_accel_sensitivity_attr); return cmpc_remove_acpi_notify_device(acpi); } -- cgit v1.2.3 From 71eea7071583b04e9b796ee1d6f7a07334426495 Mon Sep 17 00:00:00 2001 From: Andy Shevchenko Date: Thu, 10 Oct 2019 11:15:20 +0300 Subject: platform/x86: intel_punit_ipc: Avoid error message when retrieving IRQ Since the commit 7723f4c5ecdb ("driver core: platform: Add an error message to platform_get_irq*()") the platform_get_irq() started issuing an error message which is not what we want here. Switch to platform_get_irq_optional() to have only warning message provided by the driver. Fixes: 7723f4c5ecdb ("driver core: platform: Add an error message to platform_get_irq*()") Signed-off-by: Andy Shevchenko --- drivers/platform/x86/intel_punit_ipc.c | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/drivers/platform/x86/intel_punit_ipc.c b/drivers/platform/x86/intel_punit_ipc.c index ab7ae1950867..fa97834fdb78 100644 --- a/drivers/platform/x86/intel_punit_ipc.c +++ b/drivers/platform/x86/intel_punit_ipc.c @@ -293,9 +293,8 @@ static int intel_punit_ipc_probe(struct platform_device *pdev) platform_set_drvdata(pdev, punit_ipcdev); - irq = platform_get_irq(pdev, 0); + irq = platform_get_irq_optional(pdev, 0); if (irq < 0) { - punit_ipcdev->irq = 0; dev_warn(&pdev->dev, "Invalid IRQ, using polling mode\n"); } else { ret = devm_request_irq(&pdev->dev, irq, intel_punit_ioc, -- cgit v1.2.3 From aafb00a977cf7d81821f7c9d12e04c558c22dc3c Mon Sep 17 00:00:00 2001 From: Johan Hovold Date: Wed, 9 Oct 2019 17:38:48 +0200 Subject: USB: yurex: fix NULL-derefs on disconnect The driver was using its struct usb_interface pointer as an inverted disconnected flag, but was setting it to NULL without making sure all code paths that used it were done with it. Before commit ef61eb43ada6 ("USB: yurex: Fix protection fault after device removal") this included the interrupt-in completion handler, but there are further accesses in dev_err and dev_dbg statements in yurex_write() and the driver-data destructor (sic!). Fix this by unconditionally stopping also the control URB at disconnect and by using a dedicated disconnected flag. Note that we need to take a reference to the struct usb_interface to avoid a use-after-free in the destructor whenever the device was disconnected while the character device was still open. Fixes: aadd6472d904 ("USB: yurex.c: remove dbg() usage") Fixes: 45714104b9e8 ("USB: yurex.c: remove err() usage") Cc: stable # 3.5: ef61eb43ada6 Signed-off-by: Johan Hovold Link: https://lore.kernel.org/r/20191009153848.8664-6-johan@kernel.org Signed-off-by: Greg Kroah-Hartman --- drivers/usb/misc/yurex.c | 11 +++++++---- 1 file changed, 7 insertions(+), 4 deletions(-) diff --git a/drivers/usb/misc/yurex.c b/drivers/usb/misc/yurex.c index 8d52d4336c29..be0505b8b5d4 100644 --- a/drivers/usb/misc/yurex.c +++ b/drivers/usb/misc/yurex.c @@ -60,6 +60,7 @@ struct usb_yurex { struct kref kref; struct mutex io_mutex; + unsigned long disconnected:1; struct fasync_struct *async_queue; wait_queue_head_t waitq; @@ -107,6 +108,7 @@ static void yurex_delete(struct kref *kref) dev->int_buffer, dev->urb->transfer_dma); usb_free_urb(dev->urb); } + usb_put_intf(dev->interface); usb_put_dev(dev->udev); kfree(dev); } @@ -205,7 +207,7 @@ static int yurex_probe(struct usb_interface *interface, const struct usb_device_ init_waitqueue_head(&dev->waitq); dev->udev = usb_get_dev(interface_to_usbdev(interface)); - dev->interface = interface; + dev->interface = usb_get_intf(interface); /* set up the endpoint information */ iface_desc = interface->cur_altsetting; @@ -316,8 +318,9 @@ static void yurex_disconnect(struct usb_interface *interface) /* prevent more I/O from starting */ usb_poison_urb(dev->urb); + usb_poison_urb(dev->cntl_urb); mutex_lock(&dev->io_mutex); - dev->interface = NULL; + dev->disconnected = 1; mutex_unlock(&dev->io_mutex); /* wakeup waiters */ @@ -405,7 +408,7 @@ static ssize_t yurex_read(struct file *file, char __user *buffer, size_t count, dev = file->private_data; mutex_lock(&dev->io_mutex); - if (!dev->interface) { /* already disconnected */ + if (dev->disconnected) { /* already disconnected */ mutex_unlock(&dev->io_mutex); return -ENODEV; } @@ -440,7 +443,7 @@ static ssize_t yurex_write(struct file *file, const char __user *user_buffer, goto error; mutex_lock(&dev->io_mutex); - if (!dev->interface) { /* already disconnected */ + if (dev->disconnected) { /* already disconnected */ mutex_unlock(&dev->io_mutex); retval = -ENODEV; goto error; -- cgit v1.2.3 From 51d8a7eca67784b155a07aeab4bfb9f63ebaaf9e Mon Sep 17 00:00:00 2001 From: Christian Brauner Date: Tue, 8 Oct 2019 15:01:59 +0200 Subject: binder: prevent UAF read in print_binder_transaction_log_entry() When a binder transaction is initiated on a binder device coming from a binderfs instance, a pointer to the name of the binder device is stashed in the binder_transaction_log_entry's context_name member. Later on it is used to print the name in print_binder_transaction_log_entry(). By the time print_binder_transaction_log_entry() accesses context_name binderfs_evict_inode() might have already freed the associated memory thereby causing a UAF. Do the simple thing and prevent this by copying the name of the binder device instead of stashing a pointer to it. Reported-by: Jann Horn Fixes: 03e2e07e3814 ("binder: Make transaction_log available in binderfs") Link: https://lore.kernel.org/r/CAG48ez14Q0-F8LqsvcNbyR2o6gPW8SHXsm4u5jmD9MpsteM2Tw@mail.gmail.com Signed-off-by: Christian Brauner Reviewed-by: Joel Fernandes (Google) Acked-by: Todd Kjos Reviewed-by: Hridya Valsaraju Link: https://lore.kernel.org/r/20191008130159.10161-1-christian.brauner@ubuntu.com Signed-off-by: Greg Kroah-Hartman --- drivers/android/binder.c | 4 +++- drivers/android/binder_internal.h | 2 +- 2 files changed, 4 insertions(+), 2 deletions(-) diff --git a/drivers/android/binder.c b/drivers/android/binder.c index c0a491277aca..5b9ac2122e89 100644 --- a/drivers/android/binder.c +++ b/drivers/android/binder.c @@ -57,6 +57,7 @@ #include #include #include +#include #include #include #include @@ -66,6 +67,7 @@ #include #include +#include #include @@ -2876,7 +2878,7 @@ static void binder_transaction(struct binder_proc *proc, e->target_handle = tr->target.handle; e->data_size = tr->data_size; e->offsets_size = tr->offsets_size; - e->context_name = proc->context->name; + strscpy(e->context_name, proc->context->name, BINDERFS_MAX_NAME); if (reply) { binder_inner_proc_lock(proc); diff --git a/drivers/android/binder_internal.h b/drivers/android/binder_internal.h index bd47f7f72075..ae991097d14d 100644 --- a/drivers/android/binder_internal.h +++ b/drivers/android/binder_internal.h @@ -130,7 +130,7 @@ struct binder_transaction_log_entry { int return_error_line; uint32_t return_error; uint32_t return_error_param; - const char *context_name; + char context_name[BINDERFS_MAX_NAME + 1]; }; struct binder_transaction_log { -- cgit v1.2.3 From 5dc54a06f6e575f146492661129d24f3b50d17bb Mon Sep 17 00:00:00 2001 From: "Joel Fernandes (Google)" Date: Mon, 30 Sep 2019 16:12:50 -0400 Subject: binder: Fix comment headers on binder_alloc_prepare_to_free() binder_alloc_buffer_lookup() doesn't exist and is named "binder_alloc_prepare_to_free()". Correct the code comments to reflect this. Signed-off-by: Joel Fernandes (Google) Reviewed-by: Christian Brauner Link: https://lore.kernel.org/r/20190930201250.139554-1-joel@joelfernandes.org Signed-off-by: Greg Kroah-Hartman --- drivers/android/binder_alloc.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/android/binder_alloc.c b/drivers/android/binder_alloc.c index 6d79a1b0d446..d42a8b2f636a 100644 --- a/drivers/android/binder_alloc.c +++ b/drivers/android/binder_alloc.c @@ -156,7 +156,7 @@ static struct binder_buffer *binder_alloc_prepare_to_free_locked( } /** - * binder_alloc_buffer_lookup() - get buffer given user ptr + * binder_alloc_prepare_to_free() - get buffer given user ptr * @alloc: binder_alloc for this proc * @user_ptr: User pointer to buffer data * -- cgit v1.2.3 From e0b0cb9388642c104838fac100a4af32745621e2 Mon Sep 17 00:00:00 2001 From: Navid Emamdoost Date: Mon, 30 Sep 2019 15:42:22 -0500 Subject: virt: vbox: fix memory leak in hgcm_call_preprocess_linaddr In hgcm_call_preprocess_linaddr memory is allocated for bounce_buf but is not released if copy_form_user fails. In order to prevent memory leak in case of failure, the assignment to bounce_buf_ret is moved before the error check. This way the allocated bounce_buf will be released by the caller. Fixes: 579db9d45cb4 ("virt: Add vboxguest VMMDEV communication code") Signed-off-by: Navid Emamdoost Reviewed-by: Hans de Goede Link: https://lore.kernel.org/r/20190930204223.3660-1-navid.emamdoost@gmail.com Signed-off-by: Greg Kroah-Hartman --- drivers/virt/vboxguest/vboxguest_utils.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/drivers/virt/vboxguest/vboxguest_utils.c b/drivers/virt/vboxguest/vboxguest_utils.c index 75fd140b02ff..43c391626a00 100644 --- a/drivers/virt/vboxguest/vboxguest_utils.c +++ b/drivers/virt/vboxguest/vboxguest_utils.c @@ -220,6 +220,8 @@ static int hgcm_call_preprocess_linaddr( if (!bounce_buf) return -ENOMEM; + *bounce_buf_ret = bounce_buf; + if (copy_in) { ret = copy_from_user(bounce_buf, (void __user *)buf, len); if (ret) @@ -228,7 +230,6 @@ static int hgcm_call_preprocess_linaddr( memset(bounce_buf, 0, len); } - *bounce_buf_ret = bounce_buf; hgcm_call_add_pagelist_size(bounce_buf, len, extra); return 0; } -- cgit v1.2.3 From bd9bec5b6a09a3a7656f096e3ff0ca6709f89770 Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Thu, 10 Oct 2019 09:32:42 +0100 Subject: drm/i915/execlists: Mark up expected state during reset Move the BUG_ON around slightly and add some explanations for each to try and capture the expected state more carefully. We want to compare the expected active state of our bookkeeping as compared to the tracked HW state. References: https://bugs.freedesktop.org/show_bug.cgi?id=111937 Signed-off-by: Chris Wilson Reviewed-by: Mika Kuoppala Link: https://patchwork.freedesktop.org/patch/msgid/20191010083242.1387-1-chris@chris-wilson.co.uk --- drivers/gpu/drm/i915/gt/intel_lrc.c | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/drivers/gpu/drm/i915/gt/intel_lrc.c b/drivers/gpu/drm/i915/gt/intel_lrc.c index bdfd602b402a..bd8ba98e35df 100644 --- a/drivers/gpu/drm/i915/gt/intel_lrc.c +++ b/drivers/gpu/drm/i915/gt/intel_lrc.c @@ -2728,8 +2728,10 @@ static void __execlists_reset(struct intel_engine_cs *engine, bool stalled) if (!rq) goto unwind; + /* We still have requests in-flight; the engine should be active */ + GEM_BUG_ON(!intel_engine_pm_is_awake(engine)); + ce = rq->hw_context; - GEM_BUG_ON(i915_active_is_idle(&ce->active)); GEM_BUG_ON(!i915_vma_is_pinned(ce->state)); /* Proclaim we have exclusive access to the context image! */ @@ -2737,10 +2739,13 @@ static void __execlists_reset(struct intel_engine_cs *engine, bool stalled) rq = active_request(rq); if (!rq) { + /* Idle context; tidy up the ring so we can restart afresh */ ce->ring->head = ce->ring->tail; goto out_replay; } + /* Context has requests still in-flight; it should not be idle! */ + GEM_BUG_ON(i915_active_is_idle(&ce->active)); ce->ring->head = intel_ring_wrap(ce->ring, rq->head); /* -- cgit v1.2.3 From 86027e312c36dfe6b614e32f7550d5fe20a6d9e0 Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Thu, 10 Oct 2019 12:02:52 +0100 Subject: drm/i915/selftests: Check that registers are preserved between virtual engines Make sure that we copy across the registers from one engine to the next, as we hop around a virtual engine. Signed-off-by: Chris Wilson Cc: Tvrtko Ursulin Reviewed-by: Tvrtko Ursulin Link: https://patchwork.freedesktop.org/patch/msgid/20191010110252.17289-1-chris@chris-wilson.co.uk --- drivers/gpu/drm/i915/gt/selftest_lrc.c | 180 +++++++++++++++++++++++++++++++++ 1 file changed, 180 insertions(+) diff --git a/drivers/gpu/drm/i915/gt/selftest_lrc.c b/drivers/gpu/drm/i915/gt/selftest_lrc.c index 198cf2f754f4..a691e429ca01 100644 --- a/drivers/gpu/drm/i915/gt/selftest_lrc.c +++ b/drivers/gpu/drm/i915/gt/selftest_lrc.c @@ -19,6 +19,33 @@ #include "gem/selftests/igt_gem_utils.h" #include "gem/selftests/mock_context.h" +static struct i915_vma *create_scratch(struct intel_gt *gt) +{ + struct drm_i915_gem_object *obj; + struct i915_vma *vma; + int err; + + obj = i915_gem_object_create_internal(gt->i915, PAGE_SIZE); + if (IS_ERR(obj)) + return ERR_CAST(obj); + + i915_gem_object_set_cache_coherency(obj, I915_CACHING_CACHED); + + vma = i915_vma_instance(obj, >->ggtt->vm, NULL); + if (IS_ERR(vma)) { + i915_gem_object_put(obj); + return vma; + } + + err = i915_vma_pin(vma, 0, 0, PIN_GLOBAL); + if (err) { + i915_gem_object_put(obj); + return ERR_PTR(err); + } + + return vma; +} + static int live_sanitycheck(void *arg) { struct drm_i915_private *i915 = arg; @@ -2076,6 +2103,158 @@ static int live_virtual_mask(void *arg) return 0; } +static int preserved_virtual_engine(struct drm_i915_private *i915, + struct intel_engine_cs **siblings, + unsigned int nsibling) +{ +#define CS_GPR(engine, n) ((engine)->mmio_base + 0x600 + (n) * 4) + + struct i915_request *last = NULL; + struct i915_gem_context *ctx; + struct intel_context *ve; + struct i915_vma *scratch; + struct igt_live_test t; + const int num_gpr = 16 * 2; /* each GPR is 2 dwords */ + unsigned int n; + int err = 0; + + ctx = kernel_context(i915); + if (!ctx) + return -ENOMEM; + + scratch = create_scratch(siblings[0]->gt); + if (IS_ERR(scratch)) { + err = PTR_ERR(scratch); + goto out_close; + } + + ve = intel_execlists_create_virtual(ctx, siblings, nsibling); + if (IS_ERR(ve)) { + err = PTR_ERR(ve); + goto out_scratch; + } + + err = intel_context_pin(ve); + if (err) + goto out_put; + + err = igt_live_test_begin(&t, i915, __func__, ve->engine->name); + if (err) + goto out_unpin; + + for (n = 0; n < num_gpr; n++) { + struct intel_engine_cs *engine = siblings[n % nsibling]; + struct i915_request *rq; + u32 *cs; + + rq = i915_request_create(ve); + if (IS_ERR(rq)) { + err = PTR_ERR(rq); + goto out_end; + } + + i915_request_put(last); + last = i915_request_get(rq); + + cs = intel_ring_begin(rq, 8); + if (IS_ERR(cs)) { + i915_request_add(rq); + err = PTR_ERR(cs); + goto out_end; + } + + *cs++ = MI_STORE_REGISTER_MEM_GEN8 | MI_USE_GGTT; + *cs++ = CS_GPR(engine, n); + *cs++ = i915_ggtt_offset(scratch) + n * sizeof(u32); + *cs++ = 0; + + *cs++ = MI_LOAD_REGISTER_IMM(1); + *cs++ = CS_GPR(engine, (n + 1) % num_gpr); + *cs++ = n + 1; + + *cs++ = MI_NOOP; + intel_ring_advance(rq, cs); + + /* Restrict this request to run on a particular engine */ + rq->execution_mask = engine->mask; + i915_request_add(rq); + } + + if (i915_request_wait(last, 0, HZ / 5) < 0) { + err = -ETIME; + } else { + u32 *map = i915_gem_object_pin_map(scratch->obj, I915_MAP_WB); + + for (n = 0; n < num_gpr; n++) { + if (map[n] != n) { + pr_err("Incorrect value[%d] found for GPR[%d]\n", + map[n], n); + err = -EINVAL; + break; + } + } + + i915_gem_object_unpin_map(scratch->obj); + } + +out_end: + if (igt_live_test_end(&t)) + err = -EIO; + i915_request_put(last); +out_unpin: + intel_context_unpin(ve); +out_put: + intel_context_put(ve); +out_scratch: + i915_vma_unpin_and_release(&scratch, 0); +out_close: + kernel_context_close(ctx); + return err; + +#undef CS_GPR +} + +static int live_virtual_preserved(void *arg) +{ + struct drm_i915_private *i915 = arg; + struct intel_engine_cs *siblings[MAX_ENGINE_INSTANCE + 1]; + struct intel_gt *gt = &i915->gt; + unsigned int class, inst; + + /* + * Check that the context image retains non-privileged (user) registers + * from one engine to the next. For this we check that the CS_GPR + * are preserved. + */ + + if (USES_GUC_SUBMISSION(i915)) + return 0; + + /* As we use CS_GPR we cannot run before they existed on all engines. */ + if (INTEL_GEN(i915) < 9) + return 0; + + for (class = 0; class <= MAX_ENGINE_CLASS; class++) { + int nsibling, err; + + nsibling = 0; + for (inst = 0; inst <= MAX_ENGINE_INSTANCE; inst++) { + if (!gt->engine_class[class][inst]) + continue; + + siblings[nsibling++] = gt->engine_class[class][inst]; + } + if (nsibling < 2) + continue; + + err = preserved_virtual_engine(i915, siblings, nsibling); + if (err) + return err; + } + + return 0; +} + static int bond_virtual_engine(struct drm_i915_private *i915, unsigned int class, struct intel_engine_cs **siblings, @@ -2277,6 +2456,7 @@ int intel_execlists_live_selftests(struct drm_i915_private *i915) SUBTEST(live_preempt_smoke), SUBTEST(live_virtual_engine), SUBTEST(live_virtual_mask), + SUBTEST(live_virtual_preserved), SUBTEST(live_virtual_bond), }; -- cgit v1.2.3 From b33a654a5bd6bba0052ca2d86098826b309f27b8 Mon Sep 17 00:00:00 2001 From: Ulf Magnusson Date: Fri, 27 Sep 2019 19:42:32 +0200 Subject: drm/tiny: Kconfig: Remove always-y THERMAL dep. from TINYDRM_REPAPER MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit [cherry-picked to drm-misc-fixes: drm-misc-next commit dfef959803c7] Commit 554b3529fe01 ("thermal/drivers/core: Remove the module Kconfig's option") changed the type of THERMAL from tristate to bool, so THERMAL || !THERMAL is now always y. Remove the redundant dependency. Discovered through Kconfiglib detecting a dependency loop. The C tools simplify the expression to y before running dependency loop detection, and so don't see it. Changing the type of THERMAL back to tristate makes the C tools detect the same loop. Not sure if running dep. loop detection after simplification can be called a bug. Fixing this nit unbreaks Kconfiglib on the kernel at least. Signed-off-by: Ulf Magnusson Signed-off-by: Noralf Trønnes Link: https://patchwork.freedesktop.org/patch/msgid/20190927174218.GA32085@huvuddator --- drivers/gpu/drm/tiny/Kconfig | 1 - 1 file changed, 1 deletion(-) diff --git a/drivers/gpu/drm/tiny/Kconfig b/drivers/gpu/drm/tiny/Kconfig index 504763423d46..a46ac284dd5e 100644 --- a/drivers/gpu/drm/tiny/Kconfig +++ b/drivers/gpu/drm/tiny/Kconfig @@ -63,7 +63,6 @@ config TINYDRM_REPAPER depends on DRM && SPI select DRM_KMS_HELPER select DRM_KMS_CMA_HELPER - depends on THERMAL || !THERMAL help DRM driver for the following Pervasive Displays panels: 1.44" TFT EPD Panel (E1144CS021) -- cgit v1.2.3 From b058b2552edb4a6ce62922bf904149bbafd5fd9d Mon Sep 17 00:00:00 2001 From: YueHaibing Date: Fri, 20 Sep 2019 14:03:18 +0800 Subject: w1: ds250x: Fix build error without CRC16 If CRC16 is not set, building will fails: drivers/w1/slaves/w1_ds250x.o: In function `w1_ds2505_read_page': w1_ds250x.c:(.text+0x82f): undefined reference to `crc16' w1_ds250x.c:(.text+0x90a): undefined reference to `crc16' w1_ds250x.c:(.text+0x91a): undefined reference to `crc16' Reported-by: Hulk Robot Fixes: 25ec8710d9c2 ("w1: add DS2501, DS2502, DS2505 EPROM device driver") Signed-off-by: YueHaibing Signed-off-by: Arnd Bergmann Link: https://lore.kernel.org/r/20190920060318.35020-1-yuehaibing@huawei.com Signed-off-by: Greg Kroah-Hartman --- drivers/w1/slaves/Kconfig | 1 + 1 file changed, 1 insertion(+) diff --git a/drivers/w1/slaves/Kconfig b/drivers/w1/slaves/Kconfig index ebed495b9e69..b7847636501d 100644 --- a/drivers/w1/slaves/Kconfig +++ b/drivers/w1/slaves/Kconfig @@ -103,6 +103,7 @@ config W1_SLAVE_DS2438 config W1_SLAVE_DS250X tristate "512b/1kb/16kb EPROM family support" + select CRC16 help Say Y here if you want to use a 1-wire 512b/1kb/16kb EPROM family device (DS250x). -- cgit v1.2.3 From a2f83e8b0c82c9500421a26c49eb198b25fcdea3 Mon Sep 17 00:00:00 2001 From: Mikulas Patocka Date: Wed, 2 Oct 2019 06:14:17 -0400 Subject: dm snapshot: introduce account_start_copy() and account_end_copy() This simple refactoring moves code for modifying the semaphore cow_count into separate functions to prepare for changes that will extend these methods to provide for a more sophisticated mechanism for COW throttling. Signed-off-by: Mikulas Patocka Reviewed-by: Nikos Tsironis Signed-off-by: Mike Snitzer --- drivers/md/dm-snap.c | 20 +++++++++++++++----- 1 file changed, 15 insertions(+), 5 deletions(-) diff --git a/drivers/md/dm-snap.c b/drivers/md/dm-snap.c index f150f5c5492b..da3bd1794ee0 100644 --- a/drivers/md/dm-snap.c +++ b/drivers/md/dm-snap.c @@ -1512,6 +1512,16 @@ static void snapshot_dtr(struct dm_target *ti) kfree(s); } +static void account_start_copy(struct dm_snapshot *s) +{ + down(&s->cow_count); +} + +static void account_end_copy(struct dm_snapshot *s) +{ + up(&s->cow_count); +} + /* * Flush a list of buffers. */ @@ -1732,7 +1742,7 @@ static void copy_callback(int read_err, unsigned long write_err, void *context) rb_link_node(&pe->out_of_order_node, parent, p); rb_insert_color(&pe->out_of_order_node, &s->out_of_order_tree); } - up(&s->cow_count); + account_end_copy(s); } /* @@ -1756,7 +1766,7 @@ static void start_copy(struct dm_snap_pending_exception *pe) dest.count = src.count; /* Hand over to kcopyd */ - down(&s->cow_count); + account_start_copy(s); dm_kcopyd_copy(s->kcopyd_client, &src, 1, &dest, 0, copy_callback, pe); } @@ -1776,7 +1786,7 @@ static void start_full_bio(struct dm_snap_pending_exception *pe, pe->full_bio = bio; pe->full_bio_end_io = bio->bi_end_io; - down(&s->cow_count); + account_start_copy(s); callback_data = dm_kcopyd_prepare_callback(s->kcopyd_client, copy_callback, pe); @@ -1866,7 +1876,7 @@ static void zero_callback(int read_err, unsigned long write_err, void *context) struct bio *bio = context; struct dm_snapshot *s = bio->bi_private; - up(&s->cow_count); + account_end_copy(s); bio->bi_status = write_err ? BLK_STS_IOERR : 0; bio_endio(bio); } @@ -1880,7 +1890,7 @@ static void zero_exception(struct dm_snapshot *s, struct dm_exception *e, dest.sector = bio->bi_iter.bi_sector; dest.count = s->store->chunk_size; - down(&s->cow_count); + account_start_copy(s); WARN_ON_ONCE(bio->bi_private); bio->bi_private = s; dm_kcopyd_zero(s->kcopyd_client, 1, &dest, 0, zero_callback, bio); -- cgit v1.2.3 From b21555786f18cd77f2311ad89074533109ae3ffa Mon Sep 17 00:00:00 2001 From: Mikulas Patocka Date: Wed, 2 Oct 2019 06:15:53 -0400 Subject: dm snapshot: rework COW throttling to fix deadlock Commit 721b1d98fb517a ("dm snapshot: Fix excessive memory usage and workqueue stalls") introduced a semaphore to limit the maximum number of in-flight kcopyd (COW) jobs. The implementation of this throttling mechanism is prone to a deadlock: 1. One or more threads write to the origin device causing COW, which is performed by kcopyd. 2. At some point some of these threads might reach the s->cow_count semaphore limit and block in down(&s->cow_count), holding a read lock on _origins_lock. 3. Someone tries to acquire a write lock on _origins_lock, e.g., snapshot_ctr(), which blocks because the threads at step (2) already hold a read lock on it. 4. A COW operation completes and kcopyd runs dm-snapshot's completion callback, which ends up calling pending_complete(). pending_complete() tries to resubmit any deferred origin bios. This requires acquiring a read lock on _origins_lock, which blocks. This happens because the read-write semaphore implementation gives priority to writers, meaning that as soon as a writer tries to enter the critical section, no readers will be allowed in, until all writers have completed their work. So, pending_complete() waits for the writer at step (3) to acquire and release the lock. This writer waits for the readers at step (2) to release the read lock and those readers wait for pending_complete() (the kcopyd thread) to signal the s->cow_count semaphore: DEADLOCK. The above was thoroughly analyzed and documented by Nikos Tsironis as part of his initial proposal for fixing this deadlock, see: https://www.redhat.com/archives/dm-devel/2019-October/msg00001.html Fix this deadlock by reworking COW throttling so that it waits without holding any locks. Add a variable 'in_progress' that counts how many kcopyd jobs are running. A function wait_for_in_progress() will sleep if 'in_progress' is over the limit. It drops _origins_lock in order to avoid the deadlock. Reported-by: Guruswamy Basavaiah Reported-by: Nikos Tsironis Reviewed-by: Nikos Tsironis Tested-by: Nikos Tsironis Fixes: 721b1d98fb51 ("dm snapshot: Fix excessive memory usage and workqueue stalls") Cc: stable@vger.kernel.org # v5.0+ Depends-on: 4a3f111a73a8c ("dm snapshot: introduce account_start_copy() and account_end_copy()") Signed-off-by: Mikulas Patocka Signed-off-by: Mike Snitzer --- drivers/md/dm-snap.c | 78 ++++++++++++++++++++++++++++++++++++++++++---------- 1 file changed, 64 insertions(+), 14 deletions(-) diff --git a/drivers/md/dm-snap.c b/drivers/md/dm-snap.c index da3bd1794ee0..4fb1a40e68a0 100644 --- a/drivers/md/dm-snap.c +++ b/drivers/md/dm-snap.c @@ -18,7 +18,6 @@ #include #include #include -#include #include "dm.h" @@ -107,8 +106,8 @@ struct dm_snapshot { /* The on disk metadata handler */ struct dm_exception_store *store; - /* Maximum number of in-flight COW jobs. */ - struct semaphore cow_count; + unsigned in_progress; + struct wait_queue_head in_progress_wait; struct dm_kcopyd_client *kcopyd_client; @@ -162,8 +161,8 @@ struct dm_snapshot { */ #define DEFAULT_COW_THRESHOLD 2048 -static int cow_threshold = DEFAULT_COW_THRESHOLD; -module_param_named(snapshot_cow_threshold, cow_threshold, int, 0644); +static unsigned cow_threshold = DEFAULT_COW_THRESHOLD; +module_param_named(snapshot_cow_threshold, cow_threshold, uint, 0644); MODULE_PARM_DESC(snapshot_cow_threshold, "Maximum number of chunks being copied on write"); DECLARE_DM_KCOPYD_THROTTLE_WITH_MODULE_PARM(snapshot_copy_throttle, @@ -1327,7 +1326,7 @@ static int snapshot_ctr(struct dm_target *ti, unsigned int argc, char **argv) goto bad_hash_tables; } - sema_init(&s->cow_count, (cow_threshold > 0) ? cow_threshold : INT_MAX); + init_waitqueue_head(&s->in_progress_wait); s->kcopyd_client = dm_kcopyd_client_create(&dm_kcopyd_throttle); if (IS_ERR(s->kcopyd_client)) { @@ -1509,17 +1508,54 @@ static void snapshot_dtr(struct dm_target *ti) dm_put_device(ti, s->origin); + WARN_ON(s->in_progress); + kfree(s); } static void account_start_copy(struct dm_snapshot *s) { - down(&s->cow_count); + spin_lock(&s->in_progress_wait.lock); + s->in_progress++; + spin_unlock(&s->in_progress_wait.lock); } static void account_end_copy(struct dm_snapshot *s) { - up(&s->cow_count); + spin_lock(&s->in_progress_wait.lock); + BUG_ON(!s->in_progress); + s->in_progress--; + if (likely(s->in_progress <= cow_threshold) && + unlikely(waitqueue_active(&s->in_progress_wait))) + wake_up_locked(&s->in_progress_wait); + spin_unlock(&s->in_progress_wait.lock); +} + +static bool wait_for_in_progress(struct dm_snapshot *s, bool unlock_origins) +{ + if (unlikely(s->in_progress > cow_threshold)) { + spin_lock(&s->in_progress_wait.lock); + if (likely(s->in_progress > cow_threshold)) { + /* + * NOTE: this throttle doesn't account for whether + * the caller is servicing an IO that will trigger a COW + * so excess throttling may result for chunks not required + * to be COW'd. But if cow_threshold was reached, extra + * throttling is unlikely to negatively impact performance. + */ + DECLARE_WAITQUEUE(wait, current); + __add_wait_queue(&s->in_progress_wait, &wait); + __set_current_state(TASK_UNINTERRUPTIBLE); + spin_unlock(&s->in_progress_wait.lock); + if (unlock_origins) + up_read(&_origins_lock); + io_schedule(); + remove_wait_queue(&s->in_progress_wait, &wait); + return false; + } + spin_unlock(&s->in_progress_wait.lock); + } + return true; } /* @@ -1537,7 +1573,7 @@ static void flush_bios(struct bio *bio) } } -static int do_origin(struct dm_dev *origin, struct bio *bio); +static int do_origin(struct dm_dev *origin, struct bio *bio, bool limit); /* * Flush a list of buffers. @@ -1550,7 +1586,7 @@ static void retry_origin_bios(struct dm_snapshot *s, struct bio *bio) while (bio) { n = bio->bi_next; bio->bi_next = NULL; - r = do_origin(s->origin, bio); + r = do_origin(s->origin, bio, false); if (r == DM_MAPIO_REMAPPED) generic_make_request(bio); bio = n; @@ -1926,6 +1962,11 @@ static int snapshot_map(struct dm_target *ti, struct bio *bio) if (!s->valid) return DM_MAPIO_KILL; + if (bio_data_dir(bio) == WRITE) { + while (unlikely(!wait_for_in_progress(s, false))) + ; /* wait_for_in_progress() has slept */ + } + down_read(&s->lock); dm_exception_table_lock(&lock); @@ -2122,7 +2163,7 @@ redirect_to_origin: if (bio_data_dir(bio) == WRITE) { up_write(&s->lock); - return do_origin(s->origin, bio); + return do_origin(s->origin, bio, false); } out_unlock: @@ -2497,15 +2538,24 @@ next_snapshot: /* * Called on a write from the origin driver. */ -static int do_origin(struct dm_dev *origin, struct bio *bio) +static int do_origin(struct dm_dev *origin, struct bio *bio, bool limit) { struct origin *o; int r = DM_MAPIO_REMAPPED; +again: down_read(&_origins_lock); o = __lookup_origin(origin->bdev); - if (o) + if (o) { + if (limit) { + struct dm_snapshot *s; + list_for_each_entry(s, &o->snapshots, list) + if (unlikely(!wait_for_in_progress(s, true))) + goto again; + } + r = __origin_write(&o->snapshots, bio->bi_iter.bi_sector, bio); + } up_read(&_origins_lock); return r; @@ -2618,7 +2668,7 @@ static int origin_map(struct dm_target *ti, struct bio *bio) dm_accept_partial_bio(bio, available_sectors); /* Only tell snapshots if this is a write */ - return do_origin(o->dev, bio); + return do_origin(o->dev, bio, true); } /* -- cgit v1.2.3 From 9a3a41dfe2239abe65944cfa1f8cd3aff5a03944 Mon Sep 17 00:00:00 2001 From: Maarten Lankhorst Date: Fri, 4 Oct 2019 13:34:52 +0200 Subject: drm/i915: Fix for_each_intel_plane_mask definition MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Using for_each_intel_plane_mask() fails because of an extra bracket, remove the bracket so we can use it in the next commit. Signed-off-by: Maarten Lankhorst Link: https://patchwork.freedesktop.org/patch/msgid/20191004113514.17064-3-maarten.lankhorst@linux.intel.com Reviewed-by: Ville Syrjälä Reviewed-by: Matt Roper --- drivers/gpu/drm/i915/display/intel_display.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/gpu/drm/i915/display/intel_display.h b/drivers/gpu/drm/i915/display/intel_display.h index 2782f23ee887..4ded64fcbc6c 100644 --- a/drivers/gpu/drm/i915/display/intel_display.h +++ b/drivers/gpu/drm/i915/display/intel_display.h @@ -350,7 +350,7 @@ enum phy_fia { &(dev)->mode_config.plane_list, \ base.head) \ for_each_if((plane_mask) & \ - drm_plane_mask(&intel_plane->base))) + drm_plane_mask(&intel_plane->base)) #define for_each_intel_plane_on_crtc(dev, intel_crtc, intel_plane) \ list_for_each_entry(intel_plane, \ -- cgit v1.2.3 From af9fbfa657c86dd0c5ee0e24da3e2da79353c0fb Mon Sep 17 00:00:00 2001 From: Maarten Lankhorst Date: Fri, 4 Oct 2019 13:34:53 +0200 Subject: drm/i915: Introduce and use intel_atomic_crtc_state_for_each_plane_state. MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Instead of looking at drm_plane_state, look at intel_plane_state directly. This will allow us to make the watermarks bigjoiner aware, when we make it work for bigjoiner slave pipes as well. Signed-off-by: Maarten Lankhorst Link: https://patchwork.freedesktop.org/patch/msgid/20191004113514.17064-4-maarten.lankhorst@linux.intel.com Reviewed-by: Ville Syrjälä Reviewed-by: Matt Roper --- drivers/gpu/drm/i915/display/intel_display.h | 8 ++++ drivers/gpu/drm/i915/intel_pm.c | 60 ++++++++++++---------------- 2 files changed, 33 insertions(+), 35 deletions(-) diff --git a/drivers/gpu/drm/i915/display/intel_display.h b/drivers/gpu/drm/i915/display/intel_display.h index 4ded64fcbc6c..bc2cf4bec0e8 100644 --- a/drivers/gpu/drm/i915/display/intel_display.h +++ b/drivers/gpu/drm/i915/display/intel_display.h @@ -440,6 +440,14 @@ enum phy_fia { (__i)--) \ for_each_if(crtc) +#define intel_atomic_crtc_state_for_each_plane_state( \ + plane, plane_state, \ + crtc_state) \ + for_each_intel_plane_mask(((crtc_state)->base.state->dev), (plane), \ + ((crtc_state)->base.plane_mask)) \ + for_each_if ((plane_state = \ + to_intel_plane_state(__drm_atomic_get_current_plane_state((crtc_state)->base.state, &plane->base)))) + void intel_link_compute_m_n(u16 bpp, int nlanes, int pixel_clock, int link_clock, struct intel_link_m_n *m_n, diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c index bfcf03ab5245..6aeaad587a20 100644 --- a/drivers/gpu/drm/i915/intel_pm.c +++ b/drivers/gpu/drm/i915/intel_pm.c @@ -3089,8 +3089,8 @@ static int ilk_compute_pipe_wm(struct intel_crtc_state *crtc_state) struct intel_pipe_wm *pipe_wm; struct drm_device *dev = state->dev; const struct drm_i915_private *dev_priv = to_i915(dev); - struct drm_plane *plane; - const struct drm_plane_state *plane_state; + struct intel_plane *plane; + const struct intel_plane_state *plane_state; const struct intel_plane_state *pristate = NULL; const struct intel_plane_state *sprstate = NULL; const struct intel_plane_state *curstate = NULL; @@ -3099,15 +3099,13 @@ static int ilk_compute_pipe_wm(struct intel_crtc_state *crtc_state) pipe_wm = &crtc_state->wm.ilk.optimal; - drm_atomic_crtc_state_for_each_plane_state(plane, plane_state, &crtc_state->base) { - const struct intel_plane_state *ps = to_intel_plane_state(plane_state); - - if (plane->type == DRM_PLANE_TYPE_PRIMARY) - pristate = ps; - else if (plane->type == DRM_PLANE_TYPE_OVERLAY) - sprstate = ps; - else if (plane->type == DRM_PLANE_TYPE_CURSOR) - curstate = ps; + intel_atomic_crtc_state_for_each_plane_state(plane, plane_state, crtc_state) { + if (plane->base.type == DRM_PLANE_TYPE_PRIMARY) + pristate = plane_state; + else if (plane->base.type == DRM_PLANE_TYPE_OVERLAY) + sprstate = plane_state; + else if (plane->base.type == DRM_PLANE_TYPE_CURSOR) + curstate = plane_state; } pipe_wm->pipe_enabled = crtc_state->base.active; @@ -4124,8 +4122,8 @@ int skl_check_pipe_max_pixel_rate(struct intel_crtc *intel_crtc, { struct drm_i915_private *dev_priv = to_i915(intel_crtc->base.dev); struct drm_atomic_state *state = crtc_state->base.state; - struct drm_plane *plane; - const struct drm_plane_state *drm_plane_state; + const struct intel_plane_state *plane_state; + struct intel_plane *plane; int crtc_clock, dotclk; u32 pipe_max_pixel_rate; uint_fixed_16_16_t pipe_downscale; @@ -4134,12 +4132,10 @@ int skl_check_pipe_max_pixel_rate(struct intel_crtc *intel_crtc, if (!crtc_state->base.enable) return 0; - drm_atomic_crtc_state_for_each_plane_state(plane, drm_plane_state, &crtc_state->base) { + intel_atomic_crtc_state_for_each_plane_state(plane, plane_state, crtc_state) { uint_fixed_16_16_t plane_downscale; uint_fixed_16_16_t fp_9_div_8 = div_fixed16(9, 8); int bpp; - const struct intel_plane_state *plane_state = - to_intel_plane_state(drm_plane_state); if (!intel_wm_plane_visible(crtc_state, plane_state)) continue; @@ -4227,18 +4223,16 @@ skl_get_total_relative_data_rate(struct intel_crtc_state *crtc_state, u64 *uv_plane_data_rate) { struct drm_atomic_state *state = crtc_state->base.state; - struct drm_plane *plane; - const struct drm_plane_state *drm_plane_state; + struct intel_plane *plane; + const struct intel_plane_state *plane_state; u64 total_data_rate = 0; if (WARN_ON(!state)) return 0; /* Calculate and cache data rate for each plane */ - drm_atomic_crtc_state_for_each_plane_state(plane, drm_plane_state, &crtc_state->base) { - enum plane_id plane_id = to_intel_plane(plane)->id; - const struct intel_plane_state *plane_state = - to_intel_plane_state(drm_plane_state); + intel_atomic_crtc_state_for_each_plane_state(plane, plane_state, crtc_state) { + enum plane_id plane_id = plane->id; u64 rate; /* packed/y */ @@ -4259,18 +4253,16 @@ static u64 icl_get_total_relative_data_rate(struct intel_crtc_state *crtc_state, u64 *plane_data_rate) { - struct drm_plane *plane; - const struct drm_plane_state *drm_plane_state; + struct intel_plane *plane; + const struct intel_plane_state *plane_state; u64 total_data_rate = 0; if (WARN_ON(!crtc_state->base.state)) return 0; /* Calculate and cache data rate for each plane */ - drm_atomic_crtc_state_for_each_plane_state(plane, drm_plane_state, &crtc_state->base) { - const struct intel_plane_state *plane_state = - to_intel_plane_state(drm_plane_state); - enum plane_id plane_id = to_intel_plane(plane)->id; + intel_atomic_crtc_state_for_each_plane_state(plane, plane_state, crtc_state) { + enum plane_id plane_id = plane->id; u64 rate; if (!plane_state->planar_linked_plane) { @@ -4282,7 +4274,7 @@ icl_get_total_relative_data_rate(struct intel_crtc_state *crtc_state, /* * The slave plane might not iterate in - * drm_atomic_crtc_state_for_each_plane_state(), + * intel_atomic_crtc_state_for_each_plane_state(), * and needs the master plane state which may be * NULL if we try get_new_plane_state(), so we * always calculate from the master. @@ -5065,8 +5057,8 @@ static int skl_build_pipe_wm(struct intel_crtc_state *crtc_state) { struct drm_i915_private *dev_priv = to_i915(crtc_state->base.crtc->dev); struct skl_pipe_wm *pipe_wm = &crtc_state->wm.skl.optimal; - struct drm_plane *plane; - const struct drm_plane_state *drm_plane_state; + struct intel_plane *plane; + const struct intel_plane_state *plane_state; int ret; /* @@ -5075,10 +5067,8 @@ static int skl_build_pipe_wm(struct intel_crtc_state *crtc_state) */ memset(pipe_wm->planes, 0, sizeof(pipe_wm->planes)); - drm_atomic_crtc_state_for_each_plane_state(plane, drm_plane_state, - &crtc_state->base) { - const struct intel_plane_state *plane_state = - to_intel_plane_state(drm_plane_state); + intel_atomic_crtc_state_for_each_plane_state(plane, plane_state, + crtc_state) { if (INTEL_GEN(dev_priv) >= 11) ret = icl_build_plane_wm(crtc_state, plane_state); -- cgit v1.2.3 From 04c8b0bf4abc203df94f63f268a56a7a0efbb0f4 Mon Sep 17 00:00:00 2001 From: Maarten Lankhorst Date: Fri, 4 Oct 2019 13:34:55 +0200 Subject: drm/i915: Use intel_plane_state in prepare and cleanup plane_fb MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit We need to look at the hw fb in the plane split, so replace all the places that use drm_plane_state with intel_plane_state. Signed-off-by: Maarten Lankhorst Link: https://patchwork.freedesktop.org/patch/msgid/20191004113514.17064-6-maarten.lankhorst@linux.intel.com Reviewed-by: Ville Syrjälä [mlankhorst: Fix line wraps (Matt Roper)] Reviewed-by: Matt Roper --- drivers/gpu/drm/i915/display/intel_display.c | 30 +++++++++++++++++----------- 1 file changed, 18 insertions(+), 12 deletions(-) diff --git a/drivers/gpu/drm/i915/display/intel_display.c b/drivers/gpu/drm/i915/display/intel_display.c index 1a533ccdb54f..23dc7ae1243a 100644 --- a/drivers/gpu/drm/i915/display/intel_display.c +++ b/drivers/gpu/drm/i915/display/intel_display.c @@ -14358,12 +14358,14 @@ static void fb_obj_bump_render_priority(struct drm_i915_gem_object *obj) */ int intel_prepare_plane_fb(struct drm_plane *plane, - struct drm_plane_state *new_state) + struct drm_plane_state *_new_plane_state) { + struct intel_plane_state *new_plane_state = + to_intel_plane_state(_new_plane_state); struct intel_atomic_state *intel_state = - to_intel_atomic_state(new_state->state); + to_intel_atomic_state(new_plane_state->base.state); struct drm_i915_private *dev_priv = to_i915(plane->dev); - struct drm_framebuffer *fb = new_state->fb; + struct drm_framebuffer *fb = new_plane_state->base.fb; struct drm_i915_gem_object *obj = intel_fb_obj(fb); struct drm_i915_gem_object *old_obj = intel_fb_obj(plane->state->fb); int ret; @@ -14394,9 +14396,9 @@ intel_prepare_plane_fb(struct drm_plane *plane, } } - if (new_state->fence) { /* explicit fencing */ + if (new_plane_state->base.fence) { /* explicit fencing */ ret = i915_sw_fence_await_dma_fence(&intel_state->commit_ready, - new_state->fence, + new_plane_state->base.fence, I915_FENCE_TIMEOUT, GFP_KERNEL); if (ret < 0) @@ -14410,7 +14412,7 @@ intel_prepare_plane_fb(struct drm_plane *plane, if (ret) return ret; - ret = intel_plane_pin_fb(to_intel_plane_state(new_state)); + ret = intel_plane_pin_fb(new_plane_state); i915_gem_object_unpin_pages(obj); if (ret) @@ -14419,7 +14421,7 @@ intel_prepare_plane_fb(struct drm_plane *plane, fb_obj_bump_render_priority(obj); intel_frontbuffer_flush(obj->frontbuffer, ORIGIN_DIRTYFB); - if (!new_state->fence) { /* implicit fencing */ + if (!new_plane_state->base.fence) { /* implicit fencing */ struct dma_fence *fence; ret = i915_sw_fence_await_reservation(&intel_state->commit_ready, @@ -14431,11 +14433,13 @@ intel_prepare_plane_fb(struct drm_plane *plane, fence = dma_resv_get_excl_rcu(obj->base.resv); if (fence) { - add_rps_boost_after_vblank(new_state->crtc, fence); + add_rps_boost_after_vblank(new_plane_state->base.crtc, + fence); dma_fence_put(fence); } } else { - add_rps_boost_after_vblank(new_state->crtc, new_state->fence); + add_rps_boost_after_vblank(new_plane_state->base.crtc, + new_plane_state->base.fence); } /* @@ -14463,10 +14467,12 @@ intel_prepare_plane_fb(struct drm_plane *plane, */ void intel_cleanup_plane_fb(struct drm_plane *plane, - struct drm_plane_state *old_state) + struct drm_plane_state *_old_plane_state) { + struct intel_plane_state *old_plane_state = + to_intel_plane_state(_old_plane_state); struct intel_atomic_state *intel_state = - to_intel_atomic_state(old_state->state); + to_intel_atomic_state(old_plane_state->base.state); struct drm_i915_private *dev_priv = to_i915(plane->dev); if (intel_state->rps_interactive) { @@ -14475,7 +14481,7 @@ intel_cleanup_plane_fb(struct drm_plane *plane, } /* Should only be called after a successful intel_prepare_plane_fb()! */ - intel_plane_unpin_fb(to_intel_plane_state(old_state)); + intel_plane_unpin_fb(old_plane_state); } int -- cgit v1.2.3 From d8bd3e157a17b03d2b0f380e143bf8204f75df94 Mon Sep 17 00:00:00 2001 From: Maarten Lankhorst Date: Fri, 4 Oct 2019 13:34:56 +0200 Subject: drm/i915: Remove begin/finish_crtc_commit, v4. This can all be done from the intel_update_crtc function. Split out the pipe update into a separate function, just like is done for the planes. Pull in all the changes done during fastset as well. It makes no sense for it to still exist as a separate function. Changes since v1: - Inline intel_update_pipe_config() Changes since v2: - Add comments suggested by matt. - Reorder commit_pipe_config() to remove all nesting. (Ville, Matt) - Use intel_set_pipe_src_size((). (Matt) Changes since v3: - Move atomic_update_watermarks closer to the plane calls. Signed-off-by: Maarten Lankhorst Link: https://patchwork.freedesktop.org/patch/msgid/20191004113514.17064-7-maarten.lankhorst@linux.intel.com Reviewed-by: Matt Roper [mlankhorst: Replace 8 spaces with tabs in comment] --- drivers/gpu/drm/i915/display/intel_display.c | 210 +++++++++++++-------------- 1 file changed, 99 insertions(+), 111 deletions(-) diff --git a/drivers/gpu/drm/i915/display/intel_display.c b/drivers/gpu/drm/i915/display/intel_display.c index 23dc7ae1243a..9db390188e9d 100644 --- a/drivers/gpu/drm/i915/display/intel_display.c +++ b/drivers/gpu/drm/i915/display/intel_display.c @@ -135,8 +135,6 @@ static void vlv_prepare_pll(struct intel_crtc *crtc, const struct intel_crtc_state *pipe_config); static void chv_prepare_pll(struct intel_crtc *crtc, const struct intel_crtc_state *pipe_config); -static void intel_begin_crtc_commit(struct intel_atomic_state *, struct intel_crtc *); -static void intel_finish_crtc_commit(struct intel_atomic_state *, struct intel_crtc *); static void intel_crtc_init_scalers(struct intel_crtc *crtc, struct intel_crtc_state *crtc_state); static void skylake_pfit_enable(const struct intel_crtc_state *crtc_state); @@ -4401,45 +4399,6 @@ static void icl_set_pipe_chicken(struct intel_crtc *crtc) I915_WRITE(PIPE_CHICKEN(pipe), tmp); } -static void intel_update_pipe_config(const struct intel_crtc_state *old_crtc_state, - const struct intel_crtc_state *new_crtc_state) -{ - struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->base.crtc); - struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); - - /* drm_atomic_helper_update_legacy_modeset_state might not be called. */ - crtc->base.mode = new_crtc_state->base.mode; - - /* - * Update pipe size and adjust fitter if needed: the reason for this is - * that in compute_mode_changes we check the native mode (not the pfit - * mode) to see if we can flip rather than do a full mode set. In the - * fastboot case, we'll flip, but if we don't update the pipesrc and - * pfit state, we'll end up with a big fb scanned out into the wrong - * sized surface. - */ - - I915_WRITE(PIPESRC(crtc->pipe), - ((new_crtc_state->pipe_src_w - 1) << 16) | - (new_crtc_state->pipe_src_h - 1)); - - /* on skylake this is done by detaching scalers */ - if (INTEL_GEN(dev_priv) >= 9) { - skl_detach_scalers(new_crtc_state); - - if (new_crtc_state->pch_pfit.enabled) - skylake_pfit_enable(new_crtc_state); - } else if (HAS_PCH_SPLIT(dev_priv)) { - if (new_crtc_state->pch_pfit.enabled) - ironlake_pfit_enable(new_crtc_state); - else if (old_crtc_state->pch_pfit.enabled) - ironlake_pfit_disable(old_crtc_state); - } - - if (INTEL_GEN(dev_priv) >= 11) - icl_set_pipe_chicken(crtc); -} - static void intel_fdi_normal_train(struct intel_crtc *crtc) { struct drm_device *dev = crtc->base.dev; @@ -13695,13 +13654,95 @@ u32 intel_crtc_get_vblank_counter(struct intel_crtc *crtc) return crtc->base.funcs->get_vblank_counter(&crtc->base); } +void intel_crtc_arm_fifo_underrun(struct intel_crtc *crtc, + struct intel_crtc_state *crtc_state) +{ + struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); + + if (!IS_GEN(dev_priv, 2)) + intel_set_cpu_fifo_underrun_reporting(dev_priv, crtc->pipe, true); + + if (crtc_state->has_pch_encoder) { + enum pipe pch_transcoder = + intel_crtc_pch_transcoder(crtc); + + intel_set_pch_fifo_underrun_reporting(dev_priv, pch_transcoder, true); + } +} + +static void intel_pipe_fastset(const struct intel_crtc_state *old_crtc_state, + const struct intel_crtc_state *new_crtc_state) +{ + struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->base.crtc); + struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); + + /* drm_atomic_helper_update_legacy_modeset_state might not be called. */ + crtc->base.mode = new_crtc_state->base.mode; + + /* + * Update pipe size and adjust fitter if needed: the reason for this is + * that in compute_mode_changes we check the native mode (not the pfit + * mode) to see if we can flip rather than do a full mode set. In the + * fastboot case, we'll flip, but if we don't update the pipesrc and + * pfit state, we'll end up with a big fb scanned out into the wrong + * sized surface. + */ + intel_set_pipe_src_size(new_crtc_state); + + /* on skylake this is done by detaching scalers */ + if (INTEL_GEN(dev_priv) >= 9) { + skl_detach_scalers(new_crtc_state); + + if (new_crtc_state->pch_pfit.enabled) + skylake_pfit_enable(new_crtc_state); + } else if (HAS_PCH_SPLIT(dev_priv)) { + if (new_crtc_state->pch_pfit.enabled) + ironlake_pfit_enable(new_crtc_state); + else if (old_crtc_state->pch_pfit.enabled) + ironlake_pfit_disable(old_crtc_state); + } + + if (INTEL_GEN(dev_priv) >= 11) + icl_set_pipe_chicken(crtc); +} + +static void commit_pipe_config(struct intel_atomic_state *state, + struct intel_crtc_state *old_crtc_state, + struct intel_crtc_state *new_crtc_state) +{ + struct drm_i915_private *dev_priv = to_i915(state->base.dev); + bool modeset = needs_modeset(new_crtc_state); + + /* + * During modesets pipe configuration was programmed as the + * CRTC was enabled. + */ + if (!modeset) { + if (new_crtc_state->base.color_mgmt_changed || + new_crtc_state->update_pipe) + intel_color_commit(new_crtc_state); + + if (INTEL_GEN(dev_priv) >= 9) + skl_detach_scalers(new_crtc_state); + + if (INTEL_GEN(dev_priv) >= 9 || IS_BROADWELL(dev_priv)) + bdw_set_pipemisc(new_crtc_state); + + if (new_crtc_state->update_pipe) + intel_pipe_fastset(old_crtc_state, new_crtc_state); + } + + if (dev_priv->display.atomic_update_watermarks) + dev_priv->display.atomic_update_watermarks(state, + new_crtc_state); +} + static void intel_update_crtc(struct intel_crtc *crtc, struct intel_atomic_state *state, struct intel_crtc_state *old_crtc_state, struct intel_crtc_state *new_crtc_state) { - struct drm_device *dev = state->base.dev; - struct drm_i915_private *dev_priv = to_i915(dev); + struct drm_i915_private *dev_priv = to_i915(state->base.dev); bool modeset = needs_modeset(new_crtc_state); struct intel_plane_state *new_plane_state = intel_atomic_get_new_plane_state(state, @@ -13725,14 +13766,27 @@ static void intel_update_crtc(struct intel_crtc *crtc, else if (new_plane_state) intel_fbc_enable(crtc, new_crtc_state, new_plane_state); - intel_begin_crtc_commit(state, crtc); + /* Perform vblank evasion around commit operation */ + intel_pipe_update_start(new_crtc_state); + + commit_pipe_config(state, old_crtc_state, new_crtc_state); if (INTEL_GEN(dev_priv) >= 9) skl_update_planes_on_crtc(state, crtc); else i9xx_update_planes_on_crtc(state, crtc); - intel_finish_crtc_commit(state, crtc); + intel_pipe_update_end(new_crtc_state); + + /* + * We usually enable FIFO underrun interrupts as part of the + * CRTC enable sequence during modesets. But when we inherit a + * valid pipe configuration from the BIOS we need to take care + * of enabling them on the CRTC's first fastset. + */ + if (new_crtc_state->update_pipe && !modeset && + old_crtc_state->base.mode.private_flags & I915_MODE_FLAG_INHERITED) + intel_crtc_arm_fifo_underrun(crtc, new_crtc_state); } static void intel_old_crtc_state_disables(struct intel_atomic_state *state, @@ -14522,72 +14576,6 @@ skl_max_scale(const struct intel_crtc_state *crtc_state, return max_scale; } -static void intel_begin_crtc_commit(struct intel_atomic_state *state, - struct intel_crtc *crtc) -{ - struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); - struct intel_crtc_state *old_crtc_state = - intel_atomic_get_old_crtc_state(state, crtc); - struct intel_crtc_state *new_crtc_state = - intel_atomic_get_new_crtc_state(state, crtc); - bool modeset = needs_modeset(new_crtc_state); - - /* Perform vblank evasion around commit operation */ - intel_pipe_update_start(new_crtc_state); - - if (modeset) - goto out; - - if (new_crtc_state->base.color_mgmt_changed || - new_crtc_state->update_pipe) - intel_color_commit(new_crtc_state); - - if (new_crtc_state->update_pipe) - intel_update_pipe_config(old_crtc_state, new_crtc_state); - else if (INTEL_GEN(dev_priv) >= 9) - skl_detach_scalers(new_crtc_state); - - if (INTEL_GEN(dev_priv) >= 9 || IS_BROADWELL(dev_priv)) - bdw_set_pipemisc(new_crtc_state); - -out: - if (dev_priv->display.atomic_update_watermarks) - dev_priv->display.atomic_update_watermarks(state, - new_crtc_state); -} - -void intel_crtc_arm_fifo_underrun(struct intel_crtc *crtc, - struct intel_crtc_state *crtc_state) -{ - struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); - - if (!IS_GEN(dev_priv, 2)) - intel_set_cpu_fifo_underrun_reporting(dev_priv, crtc->pipe, true); - - if (crtc_state->has_pch_encoder) { - enum pipe pch_transcoder = - intel_crtc_pch_transcoder(crtc); - - intel_set_pch_fifo_underrun_reporting(dev_priv, pch_transcoder, true); - } -} - -static void intel_finish_crtc_commit(struct intel_atomic_state *state, - struct intel_crtc *crtc) -{ - struct intel_crtc_state *old_crtc_state = - intel_atomic_get_old_crtc_state(state, crtc); - struct intel_crtc_state *new_crtc_state = - intel_atomic_get_new_crtc_state(state, crtc); - - intel_pipe_update_end(new_crtc_state); - - if (new_crtc_state->update_pipe && - !needs_modeset(new_crtc_state) && - old_crtc_state->base.mode.private_flags & I915_MODE_FLAG_INHERITED) - intel_crtc_arm_fifo_underrun(crtc, new_crtc_state); -} - /** * intel_plane_destroy - destroy a plane * @plane: plane to destroy -- cgit v1.2.3 From 3a612765f4233993bf5cff302844bd3b6b5a7429 Mon Sep 17 00:00:00 2001 From: Maarten Lankhorst Date: Fri, 4 Oct 2019 13:34:54 +0200 Subject: drm/i915: Remove cursor use of properties for coordinates MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit We have a src and dect rectangle, use it instead of relying on the core drm properties. Because the core by default clips the src/dst properties, after the drm_atomic_helper_check_plane_state() we manually set the unclipped src/dst rectangles. We still need the call for visibility checks, but this way we are able to use the src/dst rects in the check/commit code. This removes the special case in the watermark code for cursor w/h. Signed-off-by: Maarten Lankhorst Link: https://patchwork.freedesktop.org/patch/msgid/20191004113514.17064-5-maarten.lankhorst@linux.intel.com Reviewed-by: Ville Syrjälä [mlankhorst: Clarify commit message to state we use unclipped src/dst --- drivers/gpu/drm/i915/display/intel_display.c | 57 +++++++++++++++------------ drivers/gpu/drm/i915/intel_pm.c | 58 ++++++++++------------------ 2 files changed, 53 insertions(+), 62 deletions(-) diff --git a/drivers/gpu/drm/i915/display/intel_display.c b/drivers/gpu/drm/i915/display/intel_display.c index 9db390188e9d..a146ec02a0c1 100644 --- a/drivers/gpu/drm/i915/display/intel_display.c +++ b/drivers/gpu/drm/i915/display/intel_display.c @@ -10538,16 +10538,16 @@ static u32 intel_cursor_base(const struct intel_plane_state *plane_state) /* ILK+ do this automagically */ if (HAS_GMCH(dev_priv) && plane_state->base.rotation & DRM_MODE_ROTATE_180) - base += (plane_state->base.crtc_h * - plane_state->base.crtc_w - 1) * fb->format->cpp[0]; + base += (drm_rect_height(&plane_state->base.dst) * + drm_rect_width(&plane_state->base.dst) - 1) * fb->format->cpp[0]; return base; } static u32 intel_cursor_position(const struct intel_plane_state *plane_state) { - int x = plane_state->base.crtc_x; - int y = plane_state->base.crtc_y; + int x = plane_state->base.dst.x1; + int y = plane_state->base.dst.y1; u32 pos = 0; if (x < 0) { @@ -10569,8 +10569,8 @@ static bool intel_cursor_size_ok(const struct intel_plane_state *plane_state) { const struct drm_mode_config *config = &plane_state->base.plane->dev->mode_config; - int width = plane_state->base.crtc_w; - int height = plane_state->base.crtc_h; + int width = drm_rect_width(&plane_state->base.dst); + int height = drm_rect_height(&plane_state->base.dst); return width > 0 && width <= config->cursor_width && height > 0 && height <= config->cursor_height; @@ -10589,8 +10589,8 @@ static int intel_cursor_check_surface(struct intel_plane_state *plane_state) if (!plane_state->base.visible) return 0; - src_x = plane_state->base.src_x >> 16; - src_y = plane_state->base.src_y >> 16; + src_x = plane_state->base.src.x1 >> 16; + src_y = plane_state->base.src.y1 >> 16; intel_add_fb_offsets(&src_x, &src_y, plane_state, 0); offset = intel_plane_compute_aligned_offset(&src_x, &src_y, @@ -10625,6 +10625,10 @@ static int intel_check_cursor(struct intel_crtc_state *crtc_state, if (ret) return ret; + /* Use the unclipped src/dst rectangles, which we program to hw */ + plane_state->base.src = drm_plane_state_src(&plane_state->base); + plane_state->base.dst = drm_plane_state_dest(&plane_state->base); + ret = intel_cursor_check_surface(plane_state); if (ret) return ret; @@ -10667,7 +10671,7 @@ static u32 i845_cursor_ctl(const struct intel_crtc_state *crtc_state, static bool i845_cursor_size_ok(const struct intel_plane_state *plane_state) { - int width = plane_state->base.crtc_w; + int width = drm_rect_width(&plane_state->base.dst); /* * 845g/865g are only limited by the width of their cursors, @@ -10693,8 +10697,8 @@ static int i845_check_cursor(struct intel_crtc_state *crtc_state, /* Check for which cursor types we support */ if (!i845_cursor_size_ok(plane_state)) { DRM_DEBUG("Cursor dimension %dx%d not supported\n", - plane_state->base.crtc_w, - plane_state->base.crtc_h); + drm_rect_width(&plane_state->base.dst), + drm_rect_height(&plane_state->base.dst)); return -EINVAL; } @@ -10727,8 +10731,8 @@ static void i845_update_cursor(struct intel_plane *plane, unsigned long irqflags; if (plane_state && plane_state->base.visible) { - unsigned int width = plane_state->base.crtc_w; - unsigned int height = plane_state->base.crtc_h; + unsigned int width = drm_rect_width(&plane_state->base.src); + unsigned int height = drm_rect_height(&plane_state->base.dst); cntl = plane_state->ctl | i845_cursor_ctl_crtc(crtc_state); @@ -10830,7 +10834,7 @@ static u32 i9xx_cursor_ctl(const struct intel_crtc_state *crtc_state, if (IS_GEN(dev_priv, 6) || IS_IVYBRIDGE(dev_priv)) cntl |= MCURSOR_TRICKLE_FEED_DISABLE; - switch (plane_state->base.crtc_w) { + switch (drm_rect_width(&plane_state->base.dst)) { case 64: cntl |= MCURSOR_MODE_64_ARGB_AX; break; @@ -10841,7 +10845,7 @@ static u32 i9xx_cursor_ctl(const struct intel_crtc_state *crtc_state, cntl |= MCURSOR_MODE_256_ARGB_AX; break; default: - MISSING_CASE(plane_state->base.crtc_w); + MISSING_CASE(drm_rect_width(&plane_state->base.dst)); return 0; } @@ -10855,8 +10859,8 @@ static bool i9xx_cursor_size_ok(const struct intel_plane_state *plane_state) { struct drm_i915_private *dev_priv = to_i915(plane_state->base.plane->dev); - int width = plane_state->base.crtc_w; - int height = plane_state->base.crtc_h; + int width = drm_rect_width(&plane_state->base.dst); + int height = drm_rect_height(&plane_state->base.dst); if (!intel_cursor_size_ok(plane_state)) return false; @@ -10909,17 +10913,19 @@ static int i9xx_check_cursor(struct intel_crtc_state *crtc_state, /* Check for which cursor types we support */ if (!i9xx_cursor_size_ok(plane_state)) { DRM_DEBUG("Cursor dimension %dx%d not supported\n", - plane_state->base.crtc_w, - plane_state->base.crtc_h); + drm_rect_width(&plane_state->base.dst), + drm_rect_height(&plane_state->base.dst)); return -EINVAL; } WARN_ON(plane_state->base.visible && plane_state->color_plane[0].stride != fb->pitches[0]); - if (fb->pitches[0] != plane_state->base.crtc_w * fb->format->cpp[0]) { + if (fb->pitches[0] != + drm_rect_width(&plane_state->base.dst) * fb->format->cpp[0]) { DRM_DEBUG_KMS("Invalid cursor stride (%u) (cursor width %d)\n", - fb->pitches[0], plane_state->base.crtc_w); + fb->pitches[0], + drm_rect_width(&plane_state->base.dst)); return -EINVAL; } @@ -10934,7 +10940,7 @@ static int i9xx_check_cursor(struct intel_crtc_state *crtc_state, * Refuse the put the cursor into that compromised position. */ if (IS_CHERRYVIEW(dev_priv) && pipe == PIPE_C && - plane_state->base.visible && plane_state->base.crtc_x < 0) { + plane_state->base.visible && plane_state->base.dst.x1 < 0) { DRM_DEBUG_KMS("CHV cursor C not allowed to straddle the left screen edge\n"); return -EINVAL; } @@ -10954,11 +10960,14 @@ static void i9xx_update_cursor(struct intel_plane *plane, unsigned long irqflags; if (plane_state && plane_state->base.visible) { + unsigned width = drm_rect_width(&plane_state->base.dst); + unsigned height = drm_rect_height(&plane_state->base.dst); + cntl = plane_state->ctl | i9xx_cursor_ctl_crtc(crtc_state); - if (plane_state->base.crtc_h != plane_state->base.crtc_w) - fbc_ctl = CUR_FBC_CTL_EN | (plane_state->base.crtc_h - 1); + if (width != height) + fbc_ctl = CUR_FBC_CTL_EN | (height - 1); base = intel_cursor_base(plane_state); pos = intel_cursor_position(plane_state); diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c index 6aeaad587a20..53358e33df1b 100644 --- a/drivers/gpu/drm/i915/intel_pm.c +++ b/drivers/gpu/drm/i915/intel_pm.c @@ -1117,10 +1117,7 @@ static u16 g4x_compute_wm(const struct intel_crtc_state *crtc_state, clock = adjusted_mode->crtc_clock; htotal = adjusted_mode->crtc_htotal; - if (plane->id == PLANE_CURSOR) - width = plane_state->base.crtc_w; - else - width = drm_rect_width(&plane_state->base.dst); + width = drm_rect_width(&plane_state->base.dst); if (plane->id == PLANE_CURSOR) { wm = intel_wm_method2(clock, htotal, width, cpp, latency); @@ -2549,7 +2546,8 @@ static u32 ilk_compute_cur_wm(const struct intel_crtc_state *crtc_state, return ilk_wm_method2(crtc_state->pixel_rate, crtc_state->base.adjusted_mode.crtc_htotal, - plane_state->base.crtc_w, cpp, mem_value); + drm_rect_width(&plane_state->base.dst), + cpp, mem_value); } /* Only for WM_LP. */ @@ -4046,7 +4044,6 @@ static uint_fixed_16_16_t skl_plane_downscale_amount(const struct intel_crtc_state *crtc_state, const struct intel_plane_state *plane_state) { - struct intel_plane *plane = to_intel_plane(plane_state->base.plane); u32 src_w, src_h, dst_w, dst_h; uint_fixed_16_16_t fp_w_ratio, fp_h_ratio; uint_fixed_16_16_t downscale_h, downscale_w; @@ -4054,27 +4051,17 @@ skl_plane_downscale_amount(const struct intel_crtc_state *crtc_state, if (WARN_ON(!intel_wm_plane_visible(crtc_state, plane_state))) return u32_to_fixed16(0); - /* n.b., src is 16.16 fixed point, dst is whole integer */ - if (plane->id == PLANE_CURSOR) { - /* - * Cursors only support 0/180 degree rotation, - * hence no need to account for rotation here. - */ - src_w = plane_state->base.src_w >> 16; - src_h = plane_state->base.src_h >> 16; - dst_w = plane_state->base.crtc_w; - dst_h = plane_state->base.crtc_h; - } else { - /* - * Src coordinates are already rotated by 270 degrees for - * the 90/270 degree plane rotation cases (to match the - * GTT mapping), hence no need to account for rotation here. - */ - src_w = drm_rect_width(&plane_state->base.src) >> 16; - src_h = drm_rect_height(&plane_state->base.src) >> 16; - dst_w = drm_rect_width(&plane_state->base.dst); - dst_h = drm_rect_height(&plane_state->base.dst); - } + /* + * Src coordinates are already rotated by 270 degrees for + * the 90/270 degree plane rotation cases (to match the + * GTT mapping), hence no need to account for rotation here. + * + * n.b., src is 16.16 fixed point, dst is whole integer. + */ + src_w = drm_rect_width(&plane_state->base.src) >> 16; + src_h = drm_rect_height(&plane_state->base.src) >> 16; + dst_w = drm_rect_width(&plane_state->base.dst); + dst_h = drm_rect_height(&plane_state->base.dst); fp_w_ratio = div_fixed16(src_w, dst_w); fp_h_ratio = div_fixed16(src_h, dst_h); @@ -4698,20 +4685,15 @@ skl_compute_plane_wm_params(const struct intel_crtc_state *crtc_state, const struct intel_plane_state *plane_state, struct skl_wm_params *wp, int color_plane) { - struct intel_plane *plane = to_intel_plane(plane_state->base.plane); const struct drm_framebuffer *fb = plane_state->base.fb; int width; - if (plane->id == PLANE_CURSOR) { - width = plane_state->base.crtc_w; - } else { - /* - * Src coordinates are already rotated by 270 degrees for - * the 90/270 degree plane rotation cases (to match the - * GTT mapping), hence no need to account for rotation here. - */ - width = drm_rect_width(&plane_state->base.src) >> 16; - } + /* + * Src coordinates are already rotated by 270 degrees for + * the 90/270 degree plane rotation cases (to match the + * GTT mapping), hence no need to account for rotation here. + */ + width = drm_rect_width(&plane_state->base.src) >> 16; return skl_compute_wm_params(crtc_state, width, fb->format, fb->modifier, -- cgit v1.2.3 From 11bcf5f78905b90baae8fb01e16650664ed0cb00 Mon Sep 17 00:00:00 2001 From: Kai-Heng Feng Date: Tue, 2 Apr 2019 11:30:37 +0800 Subject: drm/edid: Add 6 bpc quirk for SDC panel in Lenovo G50 Another panel that needs 6BPC quirk. BugLink: https://bugs.launchpad.net/bugs/1819968 Cc: # v4.8+ Reviewed-by: Alex Deucher Signed-off-by: Kai-Heng Feng Signed-off-by: Alex Deucher Link: https://patchwork.freedesktop.org/patch/msgid/20190402033037.21877-1-kai.heng.feng@canonical.com --- drivers/gpu/drm/drm_edid.c | 3 +++ 1 file changed, 3 insertions(+) diff --git a/drivers/gpu/drm/drm_edid.c b/drivers/gpu/drm/drm_edid.c index 82a4ceed3fcf..6b0177112e18 100644 --- a/drivers/gpu/drm/drm_edid.c +++ b/drivers/gpu/drm/drm_edid.c @@ -159,6 +159,9 @@ static const struct edid_quirk { /* Medion MD 30217 PG */ { "MED", 0x7b8, EDID_QUIRK_PREFER_LARGE_75 }, + /* Lenovo G50 */ + { "SDC", 18514, EDID_QUIRK_FORCE_6BPC }, + /* Panel in Samsung NP700G7A-S01PL notebook reports 6bpc */ { "SEC", 0xd033, EDID_QUIRK_FORCE_8BPC }, -- cgit v1.2.3 From 574878f98c05aaae7e046cd18be9195f8da38cbd Mon Sep 17 00:00:00 2001 From: Fuqian Huang Date: Thu, 10 Oct 2019 16:32:09 +0800 Subject: xen/grant-table: remove unnecessary printing xen_auto_xlat_grant_frames.vaddr is definitely NULL in this case. So the address printing is unnecessary. Signed-off-by: Fuqian Huang Reviewed-by: Juergen Gross Signed-off-by: Boris Ostrovsky --- drivers/xen/grant-table.c | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/drivers/xen/grant-table.c b/drivers/xen/grant-table.c index 7ea6fb6a2e5d..49b381e104ef 100644 --- a/drivers/xen/grant-table.c +++ b/drivers/xen/grant-table.c @@ -1363,8 +1363,7 @@ static int gnttab_setup(void) if (xen_feature(XENFEAT_auto_translated_physmap) && gnttab_shared.addr == NULL) { gnttab_shared.addr = xen_auto_xlat_grant_frames.vaddr; if (gnttab_shared.addr == NULL) { - pr_warn("gnttab share frames (addr=0x%08lx) is not mapped!\n", - (unsigned long)xen_auto_xlat_grant_frames.vaddr); + pr_warn("gnttab share frames is not mapped!\n"); return -ENOMEM; } } -- cgit v1.2.3 From c3d79a83ca10d2026fd1cce096cde2d8e316592e Mon Sep 17 00:00:00 2001 From: Qiang Yu Date: Sun, 22 Sep 2019 15:49:00 +0800 Subject: dma-buf/resv: fix exclusive fence get MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit This causes kernel crash when testing lima driver. Cc: Christian König Fixes: b8c036dfc66f ("dma-buf: simplify reservation_object_get_fences_rcu a bit") Signed-off-by: Qiang Yu Reviewed-by: Chris Wilson Signed-off-by: Chris Wilson Link: https://patchwork.freedesktop.org/patch/msgid/20190922074900.853-1-yuq825@gmail.com Signed-off-by: Christian König --- drivers/dma-buf/dma-resv.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/dma-buf/dma-resv.c b/drivers/dma-buf/dma-resv.c index 42a8f3f11681..709002515550 100644 --- a/drivers/dma-buf/dma-resv.c +++ b/drivers/dma-buf/dma-resv.c @@ -471,7 +471,7 @@ unlock: if (pfence_excl) *pfence_excl = fence_excl; else if (fence_excl) - shared[++shared_count] = fence_excl; + shared[shared_count++] = fence_excl; if (!shared_count) { kfree(shared); -- cgit v1.2.3 From 5e48e55fb57a9026e6b3b6961def6d2aeb210659 Mon Sep 17 00:00:00 2001 From: Geert Uytterhoeven Date: Thu, 10 Oct 2019 14:30:46 +0200 Subject: MAINTAINERS: Remove Simon as Renesas SoC Co-Maintainer At the end of the v5.3 upstream kernel development cycle, Simon stepped down from his role as Renesas SoC maintainer. Remove his maintainership, git repository, and branch from the MAINTAINERS file, and add an entry to the CREDITS file to honor his work. Signed-off-by: Geert Uytterhoeven Signed-off-by: Linus Torvalds --- CREDITS | 4 ++++ MAINTAINERS | 4 ---- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/CREDITS b/CREDITS index 8b67a85844b5..031605d46b4d 100644 --- a/CREDITS +++ b/CREDITS @@ -1637,6 +1637,10 @@ S: Panoramastrasse 18 S: D-69126 Heidelberg S: Germany +N: Simon Horman +M: horms@verge.net.au +D: Renesas ARM/ARM64 SoC maintainer + N: Christopher Horn E: chorn@warwick.net D: Miscellaneous sysctl hacks diff --git a/MAINTAINERS b/MAINTAINERS index 94ce075907a0..d44d6732510d 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -2165,12 +2165,10 @@ F: arch/arm64/boot/dts/realtek/ F: Documentation/devicetree/bindings/arm/realtek.yaml ARM/RENESAS ARM64 ARCHITECTURE -M: Simon Horman M: Geert Uytterhoeven M: Magnus Damm L: linux-renesas-soc@vger.kernel.org Q: http://patchwork.kernel.org/project/linux-renesas-soc/list/ -T: git git://git.kernel.org/pub/scm/linux/kernel/git/horms/renesas.git next T: git git://git.kernel.org/pub/scm/linux/kernel/git/geert/renesas-devel.git next S: Supported F: arch/arm64/boot/dts/renesas/ @@ -2282,12 +2280,10 @@ S: Maintained F: drivers/media/platform/s5p-mfc/ ARM/SHMOBILE ARM ARCHITECTURE -M: Simon Horman M: Geert Uytterhoeven M: Magnus Damm L: linux-renesas-soc@vger.kernel.org Q: http://patchwork.kernel.org/project/linux-renesas-soc/list/ -T: git git://git.kernel.org/pub/scm/linux/kernel/git/horms/renesas.git next T: git git://git.kernel.org/pub/scm/linux/kernel/git/geert/renesas-devel.git next S: Supported F: arch/arm/boot/dts/emev2* -- cgit v1.2.3 From ee7f5225dc3cc7c19df1603597532ff34571f895 Mon Sep 17 00:00:00 2001 From: Rob Herring Date: Tue, 8 Oct 2019 14:41:55 -0500 Subject: xen: Stop abusing DT of_dma_configure API As the removed comments say, these aren't DT based devices. of_dma_configure() is going to stop allowing a NULL DT node and calling it will no longer work. The comment is also now out of date as of commit 9ab91e7c5c51 ("arm64: default to the direct mapping in get_arch_dma_ops"). Direct mapping is now the default rather than dma_dummy_ops. According to Stefano and Oleksandr, the only other part needed is setting the DMA masks and there's no reason to restrict the masks to 32-bits. So set the masks to 64 bits. Cc: Robin Murphy Cc: Julien Grall Cc: Nicolas Saenz Julienne Cc: Oleksandr Andrushchenko Cc: Boris Ostrovsky Cc: Juergen Gross Cc: Stefano Stabellini Cc: Christoph Hellwig Cc: xen-devel@lists.xenproject.org Signed-off-by: Rob Herring Acked-by: Oleksandr Andrushchenko Reviewed-by: Boris Ostrovsky Signed-off-by: Boris Ostrovsky --- drivers/gpu/drm/xen/xen_drm_front.c | 12 ++---------- drivers/xen/gntdev.c | 13 ++----------- 2 files changed, 4 insertions(+), 21 deletions(-) diff --git a/drivers/gpu/drm/xen/xen_drm_front.c b/drivers/gpu/drm/xen/xen_drm_front.c index 84aa4d61dc42..3406dfc9a605 100644 --- a/drivers/gpu/drm/xen/xen_drm_front.c +++ b/drivers/gpu/drm/xen/xen_drm_front.c @@ -716,17 +716,9 @@ static int xen_drv_probe(struct xenbus_device *xb_dev, struct device *dev = &xb_dev->dev; int ret; - /* - * The device is not spawn from a device tree, so arch_setup_dma_ops - * is not called, thus leaving the device with dummy DMA ops. - * This makes the device return error on PRIME buffer import, which - * is not correct: to fix this call of_dma_configure() with a NULL - * node to set default DMA ops. - */ - dev->coherent_dma_mask = DMA_BIT_MASK(32); - ret = of_dma_configure(dev, NULL, true); + ret = dma_coerce_mask_and_coherent(dev, DMA_BIT_MASK(64)); if (ret < 0) { - DRM_ERROR("Cannot setup DMA ops, ret %d", ret); + DRM_ERROR("Cannot setup DMA mask, ret %d", ret); return ret; } diff --git a/drivers/xen/gntdev.c b/drivers/xen/gntdev.c index a446a7221e13..81401f386c9c 100644 --- a/drivers/xen/gntdev.c +++ b/drivers/xen/gntdev.c @@ -22,6 +22,7 @@ #define pr_fmt(fmt) "xen:" KBUILD_MODNAME ": " fmt +#include #include #include #include @@ -34,9 +35,6 @@ #include #include #include -#ifdef CONFIG_XEN_GRANT_DMA_ALLOC -#include -#endif #include #include @@ -625,14 +623,7 @@ static int gntdev_open(struct inode *inode, struct file *flip) flip->private_data = priv; #ifdef CONFIG_XEN_GRANT_DMA_ALLOC priv->dma_dev = gntdev_miscdev.this_device; - - /* - * The device is not spawn from a device tree, so arch_setup_dma_ops - * is not called, thus leaving the device with dummy DMA ops. - * Fix this by calling of_dma_configure() with a NULL node to set - * default DMA ops. - */ - of_dma_configure(priv->dma_dev, NULL, true); + dma_coerce_mask_and_coherent(priv->dma_dev, DMA_BIT_MASK(64)); #endif pr_debug("priv %p\n", priv); -- cgit v1.2.3 From 862488105b84ca744b3d8ff131e0fcfe10644be1 Mon Sep 17 00:00:00 2001 From: Xiubo Li Date: Thu, 19 Sep 2019 11:44:27 +0530 Subject: nbd: fix possible sysfs duplicate warning 1. nbd_put takes the mutex and drops nbd->ref to 0. It then does idr_remove and drops the mutex. 2. nbd_genl_connect takes the mutex. idr_find/idr_for_each fails to find an existing device, so it does nbd_dev_add. 3. just before the nbd_put could call nbd_dev_remove or not finished totally, but if nbd_dev_add try to add_disk, we can hit: debugfs: Directory 'nbd1' with parent 'block' already present! This patch will make sure all the disk add/remove stuff are done by holding the nbd_index_mutex lock. Reported-by: Mike Christie Reviewed-by: Josef Bacik Signed-off-by: Xiubo Li Signed-off-by: Jens Axboe --- drivers/block/nbd.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/block/nbd.c b/drivers/block/nbd.c index ac07e8c94c79..478aa86fc1f2 100644 --- a/drivers/block/nbd.c +++ b/drivers/block/nbd.c @@ -248,8 +248,8 @@ static void nbd_put(struct nbd_device *nbd) if (refcount_dec_and_mutex_lock(&nbd->refs, &nbd_index_mutex)) { idr_remove(&nbd_index_idr, nbd->index); - mutex_unlock(&nbd_index_mutex); nbd_dev_remove(nbd); + mutex_unlock(&nbd_index_mutex); } } -- cgit v1.2.3 From 9a61363a6310dda769b8c7601fc9de8ed81e6bc8 Mon Sep 17 00:00:00 2001 From: Lionel Landwerlin Date: Thu, 10 Oct 2019 16:05:19 +0100 Subject: drm/i915/perf: store the associated engine of a stream We'll use this information later to verify that a client trying to reconfigure the stream does so on the right engine. For now, we want to pull the knowledge of which engine we use into a central property. Signed-off-by: Lionel Landwerlin Reviewed-by: Chris Wilson Signed-off-by: Chris Wilson Link: https://patchwork.freedesktop.org/patch/msgid/20191010150520.26488-1-chris@chris-wilson.co.uk --- drivers/gpu/drm/i915/i915_perf.c | 30 ++++++++++++++++++++++++++---- drivers/gpu/drm/i915/i915_perf_types.h | 5 +++++ 2 files changed, 31 insertions(+), 4 deletions(-) diff --git a/drivers/gpu/drm/i915/i915_perf.c b/drivers/gpu/drm/i915/i915_perf.c index 5a34cad7d824..1a5c6591b9bb 100644 --- a/drivers/gpu/drm/i915/i915_perf.c +++ b/drivers/gpu/drm/i915/i915_perf.c @@ -197,6 +197,7 @@ #include "gem/i915_gem_context.h" #include "gem/i915_gem_pm.h" +#include "gt/intel_engine_user.h" #include "gt/intel_lrc_reg.h" #include "i915_drv.h" @@ -347,6 +348,7 @@ static const struct i915_oa_format gen8_plus_oa_formats[I915_OA_FORMAT_MAX] = { * @oa_format: An OA unit HW report format * @oa_periodic: Whether to enable periodic OA unit sampling * @oa_period_exponent: The OA unit sampling period is derived from this + * @engine: The engine (typically rcs0) being monitored by the OA unit * * As read_properties_unlocked() enumerates and validates the properties given * to open a stream of metrics the configuration is built up in the structure @@ -363,6 +365,8 @@ struct perf_open_properties { int oa_format; bool oa_periodic; int oa_period_exponent; + + struct intel_engine_cs *engine; }; static enum hrtimer_restart oa_poll_check_timer_cb(struct hrtimer *hrtimer); @@ -1205,7 +1209,7 @@ static struct intel_context *oa_pin_context(struct i915_perf_stream *stream) int err; for_each_gem_engine(ce, i915_gem_context_lock_engines(ctx), it) { - if (ce->engine->class != RENDER_CLASS) + if (ce->engine != stream->engine) /* first match! */ continue; /* @@ -2127,7 +2131,13 @@ static int i915_oa_stream_init(struct i915_perf_stream *stream, int format_size; int ret; - /* If the sysfs metrics/ directory wasn't registered for some + if (!props->engine) { + DRM_DEBUG("OA engine not specified\n"); + return -EINVAL; + } + + /* + * If the sysfs metrics/ directory wasn't registered for some * reason then don't let userspace try their luck with config * IDs */ @@ -2146,7 +2156,8 @@ static int i915_oa_stream_init(struct i915_perf_stream *stream, return -ENODEV; } - /* To avoid the complexity of having to accurately filter + /* + * To avoid the complexity of having to accurately filter * counter reports and marshal to the appropriate client * we currently only allow exclusive access */ @@ -2160,6 +2171,9 @@ static int i915_oa_stream_init(struct i915_perf_stream *stream, return -EINVAL; } + stream->engine = props->engine; + stream->gt = stream->engine->gt; + stream->sample_size = sizeof(struct drm_i915_perf_record_header); format_size = perf->oa_formats[props->oa_format].size; @@ -2711,7 +2725,6 @@ i915_perf_open_ioctl_locked(struct i915_perf *perf, } stream->perf = perf; - stream->gt = &perf->i915->gt; stream->ctx = specific_ctx; ret = i915_oa_stream_init(stream, param, props); @@ -2796,6 +2809,15 @@ static int read_properties_unlocked(struct i915_perf *perf, return -EINVAL; } + /* At the moment we only support using i915-perf on the RCS. */ + props->engine = intel_engine_lookup_user(perf->i915, + I915_ENGINE_CLASS_RENDER, + 0); + if (!props->engine) { + DRM_DEBUG("No RENDER-capable engines\n"); + return -EINVAL; + } + /* Considering that ID = 0 is reserved and assuming that we don't * (currently) expect any configurations to ever specify duplicate * values for a particular property ID then the last _PROP_MAX value is diff --git a/drivers/gpu/drm/i915/i915_perf_types.h b/drivers/gpu/drm/i915/i915_perf_types.h index 2d17059d32ee..82cd3b295037 100644 --- a/drivers/gpu/drm/i915/i915_perf_types.h +++ b/drivers/gpu/drm/i915/i915_perf_types.h @@ -140,6 +140,11 @@ struct i915_perf_stream { */ intel_wakeref_t wakeref; + /** + * @engine: Engine associated with this performance stream. + */ + struct intel_engine_cs *engine; + /** * @sample_flags: Flags representing the `DRM_I915_PERF_PROP_SAMPLE_*` * properties given when opening a stream, representing the contents -- cgit v1.2.3 From 52111c4628a29973d8be266f45b464318912b199 Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Thu, 10 Oct 2019 16:05:20 +0100 Subject: drm/i915/perf: Store shortcut to intel_uncore Now that we have the engine stored in i915_perf, we have a means of accessing intel_gt should we require it. However, we are currently only using the intel_gt to find the right intel_uncore, so replace our i915_perf.gt pointer with the more useful i915_perf.uncore. Signed-off-by: Chris Wilson Cc: Lionel Landwerlin Reviewed-by: Lionel Landwerlin Link: https://patchwork.freedesktop.org/patch/msgid/20191010150520.26488-2-chris@chris-wilson.co.uk --- drivers/gpu/drm/i915/i915_perf.c | 48 +++++++++++++++++----------------- drivers/gpu/drm/i915/i915_perf_types.h | 4 +-- 2 files changed, 26 insertions(+), 26 deletions(-) diff --git a/drivers/gpu/drm/i915/i915_perf.c b/drivers/gpu/drm/i915/i915_perf.c index 1a5c6591b9bb..77c3cef64548 100644 --- a/drivers/gpu/drm/i915/i915_perf.c +++ b/drivers/gpu/drm/i915/i915_perf.c @@ -419,14 +419,14 @@ static int get_oa_config(struct i915_perf *perf, static u32 gen8_oa_hw_tail_read(struct i915_perf_stream *stream) { - struct intel_uncore *uncore = stream->gt->uncore; + struct intel_uncore *uncore = stream->uncore; return intel_uncore_read(uncore, GEN8_OATAILPTR) & GEN8_OATAILPTR_MASK; } static u32 gen7_oa_hw_tail_read(struct i915_perf_stream *stream) { - struct intel_uncore *uncore = stream->gt->uncore; + struct intel_uncore *uncore = stream->uncore; u32 oastatus1 = intel_uncore_read(uncore, GEN7_OASTATUS1); return oastatus1 & GEN7_OASTATUS1_TAIL_MASK; @@ -656,7 +656,7 @@ static int gen8_append_oa_reports(struct i915_perf_stream *stream, size_t count, size_t *offset) { - struct intel_uncore *uncore = stream->gt->uncore; + struct intel_uncore *uncore = stream->uncore; int report_size = stream->oa_buffer.format_size; u8 *oa_buf_base = stream->oa_buffer.vaddr; u32 gtt_offset = i915_ggtt_offset(stream->oa_buffer.vma); @@ -866,7 +866,7 @@ static int gen8_oa_read(struct i915_perf_stream *stream, size_t count, size_t *offset) { - struct intel_uncore *uncore = stream->gt->uncore; + struct intel_uncore *uncore = stream->uncore; u32 oastatus; int ret; @@ -945,7 +945,7 @@ static int gen7_append_oa_reports(struct i915_perf_stream *stream, size_t count, size_t *offset) { - struct intel_uncore *uncore = stream->gt->uncore; + struct intel_uncore *uncore = stream->uncore; int report_size = stream->oa_buffer.format_size; u8 *oa_buf_base = stream->oa_buffer.vaddr; u32 gtt_offset = i915_ggtt_offset(stream->oa_buffer.vma); @@ -1077,7 +1077,7 @@ static int gen7_oa_read(struct i915_perf_stream *stream, size_t count, size_t *offset) { - struct intel_uncore *uncore = stream->gt->uncore; + struct intel_uncore *uncore = stream->uncore; u32 oastatus1; int ret; @@ -1352,8 +1352,8 @@ static void i915_oa_stream_destroy(struct i915_perf_stream *stream) free_oa_buffer(stream); - intel_uncore_forcewake_put(stream->gt->uncore, FORCEWAKE_ALL); - intel_runtime_pm_put(stream->gt->uncore->rpm, stream->wakeref); + intel_uncore_forcewake_put(stream->uncore, FORCEWAKE_ALL); + intel_runtime_pm_put(stream->uncore->rpm, stream->wakeref); if (stream->ctx) oa_put_render_ctx_id(stream); @@ -1368,7 +1368,7 @@ static void i915_oa_stream_destroy(struct i915_perf_stream *stream) static void gen7_init_oa_buffer(struct i915_perf_stream *stream) { - struct intel_uncore *uncore = stream->gt->uncore; + struct intel_uncore *uncore = stream->uncore; u32 gtt_offset = i915_ggtt_offset(stream->oa_buffer.vma); unsigned long flags; @@ -1416,7 +1416,7 @@ static void gen7_init_oa_buffer(struct i915_perf_stream *stream) static void gen8_init_oa_buffer(struct i915_perf_stream *stream) { - struct intel_uncore *uncore = stream->gt->uncore; + struct intel_uncore *uncore = stream->uncore; u32 gtt_offset = i915_ggtt_offset(stream->oa_buffer.vma); unsigned long flags; @@ -1565,7 +1565,7 @@ static void delay_after_mux(void) static int hsw_enable_metric_set(struct i915_perf_stream *stream) { - struct intel_uncore *uncore = stream->gt->uncore; + struct intel_uncore *uncore = stream->uncore; const struct i915_oa_config *oa_config = stream->oa_config; /* @@ -1594,7 +1594,7 @@ static int hsw_enable_metric_set(struct i915_perf_stream *stream) static void hsw_disable_metric_set(struct i915_perf_stream *stream) { - struct intel_uncore *uncore = stream->gt->uncore; + struct intel_uncore *uncore = stream->uncore; intel_uncore_rmw(uncore, GEN6_UCGCTL1, GEN6_CSUNIT_CLOCK_GATE_DISABLE, 0); @@ -1911,7 +1911,7 @@ static int gen8_configure_all_contexts(struct i915_perf_stream *stream, static int gen8_enable_metric_set(struct i915_perf_stream *stream) { - struct intel_uncore *uncore = stream->gt->uncore; + struct intel_uncore *uncore = stream->uncore; const struct i915_oa_config *oa_config = stream->oa_config; int ret; @@ -1964,7 +1964,7 @@ static int gen8_enable_metric_set(struct i915_perf_stream *stream) static void gen8_disable_metric_set(struct i915_perf_stream *stream) { - struct intel_uncore *uncore = stream->gt->uncore; + struct intel_uncore *uncore = stream->uncore; /* Reset all contexts' slices/subslices configurations. */ gen8_configure_all_contexts(stream, NULL); @@ -1974,7 +1974,7 @@ static void gen8_disable_metric_set(struct i915_perf_stream *stream) static void gen10_disable_metric_set(struct i915_perf_stream *stream) { - struct intel_uncore *uncore = stream->gt->uncore; + struct intel_uncore *uncore = stream->uncore; /* Reset all contexts' slices/subslices configurations. */ gen8_configure_all_contexts(stream, NULL); @@ -1985,7 +1985,7 @@ static void gen10_disable_metric_set(struct i915_perf_stream *stream) static void gen7_oa_enable(struct i915_perf_stream *stream) { - struct intel_uncore *uncore = stream->gt->uncore; + struct intel_uncore *uncore = stream->uncore; struct i915_gem_context *ctx = stream->ctx; u32 ctx_id = stream->specific_ctx_id; bool periodic = stream->periodic; @@ -2015,7 +2015,7 @@ static void gen7_oa_enable(struct i915_perf_stream *stream) static void gen8_oa_enable(struct i915_perf_stream *stream) { - struct intel_uncore *uncore = stream->gt->uncore; + struct intel_uncore *uncore = stream->uncore; u32 report_format = stream->oa_buffer.format; /* @@ -2060,7 +2060,7 @@ static void i915_oa_stream_enable(struct i915_perf_stream *stream) static void gen7_oa_disable(struct i915_perf_stream *stream) { - struct intel_uncore *uncore = stream->gt->uncore; + struct intel_uncore *uncore = stream->uncore; intel_uncore_write(uncore, GEN7_OACONTROL, 0); if (intel_wait_for_register(uncore, @@ -2071,7 +2071,7 @@ static void gen7_oa_disable(struct i915_perf_stream *stream) static void gen8_oa_disable(struct i915_perf_stream *stream) { - struct intel_uncore *uncore = stream->gt->uncore; + struct intel_uncore *uncore = stream->uncore; intel_uncore_write(uncore, GEN8_OACONTROL, 0); if (intel_wait_for_register(uncore, @@ -2172,7 +2172,7 @@ static int i915_oa_stream_init(struct i915_perf_stream *stream, } stream->engine = props->engine; - stream->gt = stream->engine->gt; + stream->uncore = stream->engine->gt->uncore; stream->sample_size = sizeof(struct drm_i915_perf_record_header); @@ -2218,8 +2218,8 @@ static int i915_oa_stream_init(struct i915_perf_stream *stream, * In our case we are expecting that taking pm + FORCEWAKE * references will effectively disable RC6. */ - stream->wakeref = intel_runtime_pm_get(stream->gt->uncore->rpm); - intel_uncore_forcewake_get(stream->gt->uncore, FORCEWAKE_ALL); + stream->wakeref = intel_runtime_pm_get(stream->uncore->rpm); + intel_uncore_forcewake_get(stream->uncore, FORCEWAKE_ALL); ret = alloc_oa_buffer(stream); if (ret) @@ -2251,8 +2251,8 @@ err_enable: err_oa_buf_alloc: put_oa_config(stream->oa_config); - intel_uncore_forcewake_put(stream->gt->uncore, FORCEWAKE_ALL); - intel_runtime_pm_put(stream->gt->uncore->rpm, stream->wakeref); + intel_uncore_forcewake_put(stream->uncore, FORCEWAKE_ALL); + intel_runtime_pm_put(stream->uncore->rpm, stream->wakeref); err_config: if (stream->ctx) diff --git a/drivers/gpu/drm/i915/i915_perf_types.h b/drivers/gpu/drm/i915/i915_perf_types.h index 82cd3b295037..a91ae2d1a543 100644 --- a/drivers/gpu/drm/i915/i915_perf_types.h +++ b/drivers/gpu/drm/i915/i915_perf_types.h @@ -130,9 +130,9 @@ struct i915_perf_stream { struct i915_perf *perf; /** - * @gt: intel_gt container + * @uncore: mmio access path */ - struct intel_gt *gt; + struct intel_uncore *uncore; /** * @wakeref: As we keep the device awake while the perf stream is -- cgit v1.2.3 From b068a86071ca743de28a607a0b22b41b1a3f5fb3 Mon Sep 17 00:00:00 2001 From: James Ausmus Date: Wed, 9 Oct 2019 10:23:14 -0700 Subject: drm/i915: Move SAGV block time to dev_priv MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit In prep for newer platforms having more complicated ways to determine the SAGV block time, move the variable to dev_priv, and extract the setting to an initial setup function. While we're at it, update the if ladder to follow the new gen -> old gen order preference, and warn on any non-specified gen. v2: Shorten the function name (Ville), return directly (Ville), move sagv_block_time_us value to dev_priv (Ville) v3: Change sagv_block_time_us to u32 (Lucas), Change fallback value to -1 (Lucas), use intel_has_sagv for setup check rather than hand-rolling (Lucas) Cc: Ville Syrjälä Cc: Stanislav Lisovskiy Cc: Lucas De Marchi Signed-off-by: James Ausmus Reviewed-by: Ville Syrjälä Signed-off-by: Lucas De Marchi Link: https://patchwork.freedesktop.org/patch/msgid/20191004221449.1317-1-james.ausmus@intel.com Link: https://patchwork.freedesktop.org/patch/msgid/20191009172315.11004-1-lucas.demarchi@intel.com --- drivers/gpu/drm/i915/i915_drv.h | 2 ++ drivers/gpu/drm/i915/intel_pm.c | 33 ++++++++++++++++++++++++--------- 2 files changed, 26 insertions(+), 9 deletions(-) diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index d284b04c492b..c46b339064c0 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h @@ -1267,6 +1267,8 @@ struct drm_i915_private { I915_SAGV_NOT_CONTROLLED } sagv_status; + u32 sagv_block_time_us; + struct { /* * Raw watermark latency values: diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c index 53358e33df1b..f00e64f9320c 100644 --- a/drivers/gpu/drm/i915/intel_pm.c +++ b/drivers/gpu/drm/i915/intel_pm.c @@ -3638,6 +3638,26 @@ intel_has_sagv(struct drm_i915_private *dev_priv) dev_priv->sagv_status != I915_SAGV_NOT_CONTROLLED; } +static void +skl_setup_sagv_block_time(struct drm_i915_private *dev_priv) +{ + if (IS_GEN(dev_priv, 11)) { + dev_priv->sagv_block_time_us = 10; + return; + } else if (IS_GEN(dev_priv, 10)) { + dev_priv->sagv_block_time_us = 20; + return; + } else if (IS_GEN(dev_priv, 9)) { + dev_priv->sagv_block_time_us = 30; + return; + } else { + MISSING_CASE(INTEL_GEN(dev_priv)); + } + + /* Default to an unusable block time */ + dev_priv->sagv_block_time_us = -1; +} + /* * SAGV dynamically adjusts the system agent voltage and clock frequencies * depending on power and performance requirements. The display engine access @@ -3726,18 +3746,10 @@ bool intel_can_enable_sagv(struct intel_atomic_state *state) struct intel_crtc_state *crtc_state; enum pipe pipe; int level, latency; - int sagv_block_time_us; if (!intel_has_sagv(dev_priv)) return false; - if (IS_GEN(dev_priv, 9)) - sagv_block_time_us = 30; - else if (IS_GEN(dev_priv, 10)) - sagv_block_time_us = 20; - else - sagv_block_time_us = 10; - /* * If there are no active CRTCs, no additional checks need be performed */ @@ -3784,7 +3796,7 @@ bool intel_can_enable_sagv(struct intel_atomic_state *state) * incur memory latencies higher than sagv_block_time_us we * can't enable SAGV. */ - if (latency < sagv_block_time_us) + if (latency < dev_priv->sagv_block_time_us) return false; } @@ -8985,6 +8997,9 @@ void intel_init_pm(struct drm_i915_private *dev_priv) else if (IS_GEN(dev_priv, 5)) i915_ironlake_get_mem_freq(dev_priv); + if (intel_has_sagv(dev_priv)) + skl_setup_sagv_block_time(dev_priv); + /* For FIFO watermark updates */ if (INTEL_GEN(dev_priv) >= 9) { skl_setup_wm_latency(dev_priv); -- cgit v1.2.3 From da80f04792fc0dddcc44201cb4ee96cb011755b6 Mon Sep 17 00:00:00 2001 From: James Ausmus Date: Wed, 9 Oct 2019 10:23:15 -0700 Subject: drm/i915/tgl: Read SAGV block time from PCODE MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Starting from TGL, we now need to read the SAGV block time via a PCODE mailbox, rather than having a static value. BSpec: 49326 v2: Fix up pcode val data type (Ville), tighten variable scope (Ville) Cc: Ville Syrjälä Cc: Stanislav Lisovskiy Cc: Lucas De Marchi Signed-off-by: James Ausmus Reviewed-by: Ville Syrjälä Signed-off-by: Lucas De Marchi Link: https://patchwork.freedesktop.org/patch/msgid/20191004221449.1317-2-james.ausmus@intel.com Link: https://patchwork.freedesktop.org/patch/msgid/20191009172315.11004-2-lucas.demarchi@intel.com --- drivers/gpu/drm/i915/i915_reg.h | 1 + drivers/gpu/drm/i915/intel_pm.c | 15 ++++++++++++++- 2 files changed, 15 insertions(+), 1 deletion(-) diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h index 1dc067fc57ab..0fb9030b89f1 100644 --- a/drivers/gpu/drm/i915/i915_reg.h +++ b/drivers/gpu/drm/i915/i915_reg.h @@ -8878,6 +8878,7 @@ enum { #define GEN9_SAGV_DISABLE 0x0 #define GEN9_SAGV_IS_DISABLED 0x1 #define GEN9_SAGV_ENABLE 0x3 +#define GEN12_PCODE_READ_SAGV_BLOCK_TIME_US 0x23 #define GEN6_PCODE_DATA _MMIO(0x138128) #define GEN6_PCODE_FREQ_IA_RATIO_SHIFT 8 #define GEN6_PCODE_FREQ_RING_RATIO_SHIFT 16 diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c index f00e64f9320c..b306e2338f5a 100644 --- a/drivers/gpu/drm/i915/intel_pm.c +++ b/drivers/gpu/drm/i915/intel_pm.c @@ -3641,7 +3641,20 @@ intel_has_sagv(struct drm_i915_private *dev_priv) static void skl_setup_sagv_block_time(struct drm_i915_private *dev_priv) { - if (IS_GEN(dev_priv, 11)) { + if (INTEL_GEN(dev_priv) >= 12) { + u32 val = 0; + int ret; + + ret = sandybridge_pcode_read(dev_priv, + GEN12_PCODE_READ_SAGV_BLOCK_TIME_US, + &val, NULL); + if (!ret) { + dev_priv->sagv_block_time_us = val; + return; + } + + DRM_DEBUG_DRIVER("Couldn't read SAGV block time!\n"); + } else if (IS_GEN(dev_priv, 11)) { dev_priv->sagv_block_time_us = 10; return; } else if (IS_GEN(dev_priv, 10)) { -- cgit v1.2.3 From 38dffe1e4dde1d3174fdce09d67370412843ebb5 Mon Sep 17 00:00:00 2001 From: Jiaxun Yang Date: Thu, 10 Oct 2019 23:01:57 +0800 Subject: MIPS: elf_hwcap: Export userspace ASEs A Golang developer reported MIPS hwcap isn't reflecting instructions that the processor actually supported so programs can't apply optimized code at runtime. Thus we export the ASEs that can be used in userspace programs. Reported-by: Meng Zhuo Signed-off-by: Jiaxun Yang Cc: linux-mips@vger.kernel.org Cc: Paul Burton Cc: # 4.14+ Signed-off-by: Paul Burton --- arch/mips/include/uapi/asm/hwcap.h | 11 +++++++++++ arch/mips/kernel/cpu-probe.c | 33 +++++++++++++++++++++++++++++++++ 2 files changed, 44 insertions(+) diff --git a/arch/mips/include/uapi/asm/hwcap.h b/arch/mips/include/uapi/asm/hwcap.h index a2aba4b059e6..1ade1daa4921 100644 --- a/arch/mips/include/uapi/asm/hwcap.h +++ b/arch/mips/include/uapi/asm/hwcap.h @@ -6,5 +6,16 @@ #define HWCAP_MIPS_R6 (1 << 0) #define HWCAP_MIPS_MSA (1 << 1) #define HWCAP_MIPS_CRC32 (1 << 2) +#define HWCAP_MIPS_MIPS16 (1 << 3) +#define HWCAP_MIPS_MDMX (1 << 4) +#define HWCAP_MIPS_MIPS3D (1 << 5) +#define HWCAP_MIPS_SMARTMIPS (1 << 6) +#define HWCAP_MIPS_DSP (1 << 7) +#define HWCAP_MIPS_DSP2 (1 << 8) +#define HWCAP_MIPS_DSP3 (1 << 9) +#define HWCAP_MIPS_MIPS16E2 (1 << 10) +#define HWCAP_LOONGSON_MMI (1 << 11) +#define HWCAP_LOONGSON_EXT (1 << 12) +#define HWCAP_LOONGSON_EXT2 (1 << 13) #endif /* _UAPI_ASM_HWCAP_H */ diff --git a/arch/mips/kernel/cpu-probe.c b/arch/mips/kernel/cpu-probe.c index c2eb392597bf..f521cbf934e7 100644 --- a/arch/mips/kernel/cpu-probe.c +++ b/arch/mips/kernel/cpu-probe.c @@ -2180,6 +2180,39 @@ void cpu_probe(void) elf_hwcap |= HWCAP_MIPS_MSA; } + if (cpu_has_mips16) + elf_hwcap |= HWCAP_MIPS_MIPS16; + + if (cpu_has_mdmx) + elf_hwcap |= HWCAP_MIPS_MDMX; + + if (cpu_has_mips3d) + elf_hwcap |= HWCAP_MIPS_MIPS3D; + + if (cpu_has_smartmips) + elf_hwcap |= HWCAP_MIPS_SMARTMIPS; + + if (cpu_has_dsp) + elf_hwcap |= HWCAP_MIPS_DSP; + + if (cpu_has_dsp2) + elf_hwcap |= HWCAP_MIPS_DSP2; + + if (cpu_has_dsp3) + elf_hwcap |= HWCAP_MIPS_DSP3; + + if (cpu_has_mips16e2) + elf_hwcap |= HWCAP_MIPS_MIPS16E2; + + if (cpu_has_loongson_mmi) + elf_hwcap |= HWCAP_LOONGSON_MMI; + + if (cpu_has_loongson_ext) + elf_hwcap |= HWCAP_LOONGSON_EXT; + + if (cpu_has_loongson_ext2) + elf_hwcap |= HWCAP_LOONGSON_EXT2; + if (cpu_has_vz) cpu_probe_vz(c); -- cgit v1.2.3 From 2f2b4fd674cadd8c6b40eb629e140a14db4068fd Mon Sep 17 00:00:00 2001 From: Paul Burton Date: Thu, 10 Oct 2019 18:54:03 +0000 Subject: MIPS: Disable Loongson MMI instructions for kernel build MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit GCC 9.x automatically enables support for Loongson MMI instructions when using some -march= flags, and then errors out when -msoft-float is specified with: cc1: error: ‘-mloongson-mmi’ must be used with ‘-mhard-float’ The kernel shouldn't be using these MMI instructions anyway, just as it doesn't use floating point instructions. Explicitly disable them in order to fix the build with GCC 9.x. Signed-off-by: Paul Burton Fixes: 3702bba5eb4f ("MIPS: Loongson: Add GCC 4.4 support for Loongson2E") Fixes: 6f7a251a259e ("MIPS: Loongson: Add basic Loongson 2F support") Fixes: 5188129b8c9f ("MIPS: Loongson-3: Improve -march option and move it to Platform") Cc: Huacai Chen Cc: Jiaxun Yang Cc: stable@vger.kernel.org # v2.6.32+ Cc: linux-mips@vger.kernel.org --- arch/mips/loongson64/Platform | 4 ++++ arch/mips/vdso/Makefile | 1 + 2 files changed, 5 insertions(+) diff --git a/arch/mips/loongson64/Platform b/arch/mips/loongson64/Platform index c1a4d4dc4665..9f79908f5063 100644 --- a/arch/mips/loongson64/Platform +++ b/arch/mips/loongson64/Platform @@ -66,6 +66,10 @@ else $(call cc-option,-march=mips64r2,-mips64r2 -U_MIPS_ISA -D_MIPS_ISA=_MIPS_ISA_MIPS64) endif +# Some -march= flags enable MMI instructions, and GCC complains about that +# support being enabled alongside -msoft-float. Thus explicitly disable MMI. +cflags-y += $(call cc-option,-mno-loongson-mmi) + # # Loongson Machines' Support # diff --git a/arch/mips/vdso/Makefile b/arch/mips/vdso/Makefile index 807f0f782f75..996a934ece7d 100644 --- a/arch/mips/vdso/Makefile +++ b/arch/mips/vdso/Makefile @@ -15,6 +15,7 @@ ccflags-vdso := \ $(filter -mmicromips,$(KBUILD_CFLAGS)) \ $(filter -march=%,$(KBUILD_CFLAGS)) \ $(filter -m%-float,$(KBUILD_CFLAGS)) \ + $(filter -mno-loongson-%,$(KBUILD_CFLAGS)) \ -D__VDSO__ ifdef CONFIG_CC_IS_CLANG -- cgit v1.2.3 From 1047ec868332034d1fbcb2fae19fe6d4cb869ff2 Mon Sep 17 00:00:00 2001 From: Chuck Lever Date: Fri, 4 Oct 2019 09:58:54 -0400 Subject: NFSv4: Fix leak of clp->cl_acceptor string Our client can issue multiple SETCLIENTID operations to the same server in some circumstances. Ensure that calls to nfs4_proc_setclientid() after the first one do not overwrite the previously allocated cl_acceptor string. unreferenced object 0xffff888461031800 (size 32): comm "mount.nfs", pid 2227, jiffies 4294822467 (age 1407.749s) hex dump (first 32 bytes): 6e 66 73 40 6b 6c 69 6d 74 2e 69 62 2e 31 30 31 nfs@klimt.ib.101 35 67 72 61 6e 67 65 72 2e 6e 65 74 00 00 00 00 5granger.net.... backtrace: [<00000000ab820188>] __kmalloc+0x128/0x176 [<00000000eeaf4ec8>] gss_stringify_acceptor+0xbd/0x1a7 [auth_rpcgss] [<00000000e85e3382>] nfs4_proc_setclientid+0x34e/0x46c [nfsv4] [<000000003d9cf1fa>] nfs40_discover_server_trunking+0x7a/0xed [nfsv4] [<00000000b81c3787>] nfs4_discover_server_trunking+0x81/0x244 [nfsv4] [<000000000801b55f>] nfs4_init_client+0x1b0/0x238 [nfsv4] [<00000000977daf7f>] nfs4_set_client+0xfe/0x14d [nfsv4] [<0000000053a68a2a>] nfs4_create_server+0x107/0x1db [nfsv4] [<0000000088262019>] nfs4_remote_mount+0x2c/0x59 [nfsv4] [<00000000e84a2fd0>] legacy_get_tree+0x2d/0x4c [<00000000797e947c>] vfs_get_tree+0x20/0xc7 [<00000000ecabaaa8>] fc_mount+0xe/0x36 [<00000000f15fafc2>] vfs_kern_mount+0x74/0x8d [<00000000a3ff4e26>] nfs_do_root_mount+0x8a/0xa3 [nfsv4] [<00000000d1c2b337>] nfs4_try_mount+0x58/0xad [nfsv4] [<000000004c9bddee>] nfs_fs_mount+0x820/0x869 [nfs] Fixes: f11b2a1cfbf5 ("nfs4: copy acceptor name from context ... ") Signed-off-by: Chuck Lever Signed-off-by: Anna Schumaker --- fs/nfs/nfs4proc.c | 1 + 1 file changed, 1 insertion(+) diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c index 11eafcfc490b..ab8ca20fd579 100644 --- a/fs/nfs/nfs4proc.c +++ b/fs/nfs/nfs4proc.c @@ -6106,6 +6106,7 @@ int nfs4_proc_setclientid(struct nfs_client *clp, u32 program, status = nfs4_call_sync_custom(&task_setup_data); if (setclientid.sc_cred) { + kfree(clp->cl_acceptor); clp->cl_acceptor = rpcauth_stringify_acceptor(setclientid.sc_cred); put_rpccred(setclientid.sc_cred); } -- cgit v1.2.3 From af84537dbd1b39505d1f3d8023029b4a59666513 Mon Sep 17 00:00:00 2001 From: Benjamin Coddington Date: Wed, 2 Oct 2019 10:40:55 -0400 Subject: SUNRPC: fix race to sk_err after xs_error_report Since commit 4f8943f80883 ("SUNRPC: Replace direct task wakeups from softirq context") there has been a race to the value of the sk_err if both XPRT_SOCK_WAKE_ERROR and XPRT_SOCK_WAKE_DISCONNECT are set. In that case, we may end up losing the sk_err value that existed when xs_error_report was called. Fix this by reverting to the previous behavior: instead of using SO_ERROR to retrieve the value at a later time (which might also return sk_err_soft), copy the sk_err value onto struct sock_xprt, and use that value to wake pending tasks. Signed-off-by: Benjamin Coddington Fixes: 4f8943f80883 ("SUNRPC: Replace direct task wakeups from softirq context") Signed-off-by: Anna Schumaker --- include/linux/sunrpc/xprtsock.h | 1 + net/sunrpc/xprtsock.c | 17 ++++++++--------- 2 files changed, 9 insertions(+), 9 deletions(-) diff --git a/include/linux/sunrpc/xprtsock.h b/include/linux/sunrpc/xprtsock.h index 7638dbe7bc50..a940de03808d 100644 --- a/include/linux/sunrpc/xprtsock.h +++ b/include/linux/sunrpc/xprtsock.h @@ -61,6 +61,7 @@ struct sock_xprt { struct mutex recv_mutex; struct sockaddr_storage srcaddr; unsigned short srcport; + int xprt_err; /* * UDP socket buffer size parameters diff --git a/net/sunrpc/xprtsock.c b/net/sunrpc/xprtsock.c index 9ac88722fa83..70e52f567b2a 100644 --- a/net/sunrpc/xprtsock.c +++ b/net/sunrpc/xprtsock.c @@ -1249,19 +1249,21 @@ static void xs_error_report(struct sock *sk) { struct sock_xprt *transport; struct rpc_xprt *xprt; - int err; read_lock_bh(&sk->sk_callback_lock); if (!(xprt = xprt_from_sock(sk))) goto out; transport = container_of(xprt, struct sock_xprt, xprt); - err = -sk->sk_err; - if (err == 0) + transport->xprt_err = -sk->sk_err; + if (transport->xprt_err == 0) goto out; dprintk("RPC: xs_error_report client %p, error=%d...\n", - xprt, -err); - trace_rpc_socket_error(xprt, sk->sk_socket, err); + xprt, -transport->xprt_err); + trace_rpc_socket_error(xprt, sk->sk_socket, transport->xprt_err); + + /* barrier ensures xprt_err is set before XPRT_SOCK_WAKE_ERROR */ + smp_mb__before_atomic(); xs_run_error_worker(transport, XPRT_SOCK_WAKE_ERROR); out: read_unlock_bh(&sk->sk_callback_lock); @@ -2476,7 +2478,6 @@ static void xs_wake_write(struct sock_xprt *transport) static void xs_wake_error(struct sock_xprt *transport) { int sockerr; - int sockerr_len = sizeof(sockerr); if (!test_bit(XPRT_SOCK_WAKE_ERROR, &transport->sock_state)) return; @@ -2485,9 +2486,7 @@ static void xs_wake_error(struct sock_xprt *transport) goto out; if (!test_and_clear_bit(XPRT_SOCK_WAKE_ERROR, &transport->sock_state)) goto out; - if (kernel_getsockopt(transport->sock, SOL_SOCKET, SO_ERROR, - (char *)&sockerr, &sockerr_len) != 0) - goto out; + sockerr = xchg(&transport->xprt_err, 0); if (sockerr < 0) xprt_wake_pending_tasks(&transport->xprt, sockerr); out: -- cgit v1.2.3 From 4ebcb113edcc498888394821bca2e60ef89c6ff3 Mon Sep 17 00:00:00 2001 From: Heiner Kallweit Date: Wed, 9 Oct 2019 20:55:48 +0200 Subject: r8169: fix jumbo packet handling on resume from suspend Mariusz reported that invalid packets are sent after resume from suspend if jumbo packets are active. It turned out that his BIOS resets chip settings to non-jumbo on resume. Most chip settings are re-initialized on resume from suspend by calling rtl_hw_start(), so let's add configuring jumbo to this function. There's nothing wrong with the commit marked as fixed, it's just the first one where the patch applies cleanly. Fixes: 7366016d2d4c ("r8169: read common register for PCI commit") Reported-by: Mariusz Bialonczyk Tested-by: Mariusz Bialonczyk Signed-off-by: Heiner Kallweit Signed-off-by: Jakub Kicinski --- drivers/net/ethernet/realtek/r8169_main.c | 35 ++++++++++--------------------- 1 file changed, 11 insertions(+), 24 deletions(-) diff --git a/drivers/net/ethernet/realtek/r8169_main.c b/drivers/net/ethernet/realtek/r8169_main.c index 74f81fe03810..350b0d949611 100644 --- a/drivers/net/ethernet/realtek/r8169_main.c +++ b/drivers/net/ethernet/realtek/r8169_main.c @@ -4146,6 +4146,14 @@ static void rtl_hw_jumbo_disable(struct rtl8169_private *tp) rtl_lock_config_regs(tp); } +static void rtl_jumbo_config(struct rtl8169_private *tp, int mtu) +{ + if (mtu > ETH_DATA_LEN) + rtl_hw_jumbo_enable(tp); + else + rtl_hw_jumbo_disable(tp); +} + DECLARE_RTL_COND(rtl_chipcmd_cond) { return RTL_R8(tp, ChipCmd) & CmdReset; @@ -4442,11 +4450,6 @@ static void rtl8168g_set_pause_thresholds(struct rtl8169_private *tp, static void rtl_hw_start_8168bb(struct rtl8169_private *tp) { RTL_W8(tp, Config3, RTL_R8(tp, Config3) & ~Beacon_en); - - if (tp->dev->mtu <= ETH_DATA_LEN) { - rtl_tx_performance_tweak(tp, PCI_EXP_DEVCTL_READRQ_4096B | - PCI_EXP_DEVCTL_NOSNOOP_EN); - } } static void rtl_hw_start_8168bef(struct rtl8169_private *tp) @@ -4462,9 +4465,6 @@ static void __rtl_hw_start_8168cp(struct rtl8169_private *tp) RTL_W8(tp, Config3, RTL_R8(tp, Config3) & ~Beacon_en); - if (tp->dev->mtu <= ETH_DATA_LEN) - rtl_tx_performance_tweak(tp, PCI_EXP_DEVCTL_READRQ_4096B); - rtl_disable_clock_request(tp); } @@ -4490,9 +4490,6 @@ static void rtl_hw_start_8168cp_2(struct rtl8169_private *tp) rtl_set_def_aspm_entry_latency(tp); RTL_W8(tp, Config3, RTL_R8(tp, Config3) & ~Beacon_en); - - if (tp->dev->mtu <= ETH_DATA_LEN) - rtl_tx_performance_tweak(tp, PCI_EXP_DEVCTL_READRQ_4096B); } static void rtl_hw_start_8168cp_3(struct rtl8169_private *tp) @@ -4503,9 +4500,6 @@ static void rtl_hw_start_8168cp_3(struct rtl8169_private *tp) /* Magic. */ RTL_W8(tp, DBG_REG, 0x20); - - if (tp->dev->mtu <= ETH_DATA_LEN) - rtl_tx_performance_tweak(tp, PCI_EXP_DEVCTL_READRQ_4096B); } static void rtl_hw_start_8168c_1(struct rtl8169_private *tp) @@ -4611,9 +4605,6 @@ static void rtl_hw_start_8168e_1(struct rtl8169_private *tp) rtl_ephy_init(tp, e_info_8168e_1); - if (tp->dev->mtu <= ETH_DATA_LEN) - rtl_tx_performance_tweak(tp, PCI_EXP_DEVCTL_READRQ_4096B); - rtl_disable_clock_request(tp); /* Reset tx FIFO pointer */ @@ -4636,9 +4627,6 @@ static void rtl_hw_start_8168e_2(struct rtl8169_private *tp) rtl_ephy_init(tp, e_info_8168e_2); - if (tp->dev->mtu <= ETH_DATA_LEN) - rtl_tx_performance_tweak(tp, PCI_EXP_DEVCTL_READRQ_4096B); - rtl_eri_write(tp, 0xc0, ERIAR_MASK_0011, 0x0000); rtl_eri_write(tp, 0xb8, ERIAR_MASK_0011, 0x0000); rtl_set_fifo_size(tp, 0x10, 0x10, 0x02, 0x06); @@ -5485,6 +5473,8 @@ static void rtl_hw_start(struct rtl8169_private *tp) rtl_set_rx_tx_desc_registers(tp); rtl_lock_config_regs(tp); + rtl_jumbo_config(tp, tp->dev->mtu); + /* Initially a 10 us delay. Turned it into a PCI commit. - FR */ RTL_R16(tp, CPlusCmd); RTL_W8(tp, ChipCmd, CmdTxEnb | CmdRxEnb); @@ -5498,10 +5488,7 @@ static int rtl8169_change_mtu(struct net_device *dev, int new_mtu) { struct rtl8169_private *tp = netdev_priv(dev); - if (new_mtu > ETH_DATA_LEN) - rtl_hw_jumbo_enable(tp); - else - rtl_hw_jumbo_disable(tp); + rtl_jumbo_config(tp, new_mtu); dev->mtu = new_mtu; netdev_update_features(dev); -- cgit v1.2.3 From 2168da4594040529f6a6cb413c22668a4402f83c Mon Sep 17 00:00:00 2001 From: Jacob Keller Date: Wed, 9 Oct 2019 12:18:31 -0700 Subject: net: update net_dim documentation after rename Commit 8960b38932be ("linux/dim: Rename externally used net_dim members") renamed the net_dim API, removing the "net_" prefix from the structures and functions. The patch didn't update the net_dim.txt documentation file. Fix the documentation so that its examples match the current code. Fixes: 8960b38932be ("linux/dim: Rename externally used net_dim members", 2019-06-25) Fixes: c002bd529d71 ("linux/dim: Rename externally exposed macros", 2019-06-25) Fixes: 4f75da3666c0 ("linux/dim: Move implementation to .c files") Cc: Tal Gilboa Signed-off-by: Jacob Keller Signed-off-by: Jakub Kicinski --- Documentation/networking/net_dim.txt | 36 ++++++++++++++++++------------------ 1 file changed, 18 insertions(+), 18 deletions(-) diff --git a/Documentation/networking/net_dim.txt b/Documentation/networking/net_dim.txt index 9cb31c5e2dcd..9bdb7d5a3ba3 100644 --- a/Documentation/networking/net_dim.txt +++ b/Documentation/networking/net_dim.txt @@ -92,16 +92,16 @@ under some conditions. Part III: Registering a Network Device to DIM ============================================== -Net DIM API exposes the main function net_dim(struct net_dim *dim, -struct net_dim_sample end_sample). This function is the entry point to the Net +Net DIM API exposes the main function net_dim(struct dim *dim, +struct dim_sample end_sample). This function is the entry point to the Net DIM algorithm and has to be called every time the driver would like to check if it should change interrupt moderation parameters. The driver should provide two -data structures: struct net_dim and struct net_dim_sample. Struct net_dim +data structures: struct dim and struct dim_sample. Struct dim describes the state of DIM for a specific object (RX queue, TX queue, other queues, etc.). This includes the current selected profile, previous data samples, the callback function provided by the driver and more. -Struct net_dim_sample describes a data sample, which will be compared to the -data sample stored in struct net_dim in order to decide on the algorithm's next +Struct dim_sample describes a data sample, which will be compared to the +data sample stored in struct dim in order to decide on the algorithm's next step. The sample should include bytes, packets and interrupts, measured by the driver. @@ -110,9 +110,9 @@ main net_dim() function. The recommended method is to call net_dim() on each interrupt. Since Net DIM has a built-in moderation and it might decide to skip iterations under certain conditions, there is no need to moderate the net_dim() calls as well. As mentioned above, the driver needs to provide an object of type -struct net_dim to the net_dim() function call. It is advised for each entity -using Net DIM to hold a struct net_dim as part of its data structure and use it -as the main Net DIM API object. The struct net_dim_sample should hold the latest +struct dim to the net_dim() function call. It is advised for each entity +using Net DIM to hold a struct dim as part of its data structure and use it +as the main Net DIM API object. The struct dim_sample should hold the latest bytes, packets and interrupts count. No need to perform any calculations, just include the raw data. @@ -132,19 +132,19 @@ usage is not complete but it should make the outline of the usage clear. my_driver.c: -#include +#include /* Callback for net DIM to schedule on a decision to change moderation */ void my_driver_do_dim_work(struct work_struct *work) { - /* Get struct net_dim from struct work_struct */ - struct net_dim *dim = container_of(work, struct net_dim, - work); + /* Get struct dim from struct work_struct */ + struct dim *dim = container_of(work, struct dim, + work); /* Do interrupt moderation related stuff */ ... /* Signal net DIM work is done and it should move to next iteration */ - dim->state = NET_DIM_START_MEASURE; + dim->state = DIM_START_MEASURE; } /* My driver's interrupt handler */ @@ -152,13 +152,13 @@ int my_driver_handle_interrupt(struct my_driver_entity *my_entity, ...) { ... /* A struct to hold current measured data */ - struct net_dim_sample dim_sample; + struct dim_sample dim_sample; ... /* Initiate data sample struct with current data */ - net_dim_sample(my_entity->events, - my_entity->packets, - my_entity->bytes, - &dim_sample); + dim_update_sample(my_entity->events, + my_entity->packets, + my_entity->bytes, + &dim_sample); /* Call net DIM */ net_dim(&my_entity->dim, dim_sample); ... -- cgit v1.2.3 From 29ee2701529e1905c0e948688f9688c68c8d4ea4 Mon Sep 17 00:00:00 2001 From: Ursula Braun Date: Thu, 10 Oct 2019 10:16:09 +0200 Subject: net/smc: fix SMCD link group creation with VLAN id If creation of an SMCD link group with VLAN id fails, the initial smc_ism_get_vlan() step has to be reverted as well. Fixes: c6ba7c9ba43d ("net/smc: add base infrastructure for SMC-D and ISM") Signed-off-by: Ursula Braun Signed-off-by: Karsten Graul Signed-off-by: Jakub Kicinski --- net/smc/smc_core.c | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/net/smc/smc_core.c b/net/smc/smc_core.c index 4ca50ddf8d16..88556f0251ab 100644 --- a/net/smc/smc_core.c +++ b/net/smc/smc_core.c @@ -213,7 +213,7 @@ static int smc_lgr_create(struct smc_sock *smc, struct smc_init_info *ini) lgr = kzalloc(sizeof(*lgr), GFP_KERNEL); if (!lgr) { rc = SMC_CLC_DECL_MEM; - goto out; + goto ism_put_vlan; } lgr->is_smcd = ini->is_smcd; lgr->sync_err = 0; @@ -289,6 +289,9 @@ clear_llc_lnk: smc_llc_link_clear(lnk); free_lgr: kfree(lgr); +ism_put_vlan: + if (ini->is_smcd && ini->vlan_id) + smc_ism_put_vlan(ini->ism_dev, ini->vlan_id); out: if (rc < 0) { if (rc == -ENOMEM) -- cgit v1.2.3 From 882dcfe5a1785c20f45820cbe6fec4b8b647c946 Mon Sep 17 00:00:00 2001 From: Karsten Graul Date: Thu, 10 Oct 2019 10:16:10 +0200 Subject: net/smc: receive returns without data smc_cdc_rxed_any_close_or_senddone() is used as an end condition for the receive loop. This conflicts with smc_cdc_msg_recv_action() which could run in parallel and set the bits checked by smc_cdc_rxed_any_close_or_senddone() before the receive is processed. In that case we could return from receive with no data, although data is available. The same applies to smc_rx_wait(). Fix this by checking for RCV_SHUTDOWN only, which is set in smc_cdc_msg_recv_action() after the receive was actually processed. Fixes: 952310ccf2d8 ("smc: receive data from RMBE") Reviewed-by: Ursula Braun Signed-off-by: Karsten Graul Signed-off-by: Jakub Kicinski --- net/smc/smc_rx.c | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/net/smc/smc_rx.c b/net/smc/smc_rx.c index 413a6abf227e..000002642288 100644 --- a/net/smc/smc_rx.c +++ b/net/smc/smc_rx.c @@ -211,8 +211,7 @@ int smc_rx_wait(struct smc_sock *smc, long *timeo, rc = sk_wait_event(sk, timeo, sk->sk_err || sk->sk_shutdown & RCV_SHUTDOWN || - fcrit(conn) || - smc_cdc_rxed_any_close_or_senddone(conn), + fcrit(conn), &wait); remove_wait_queue(sk_sleep(sk), &wait); sk_clear_bit(SOCKWQ_ASYNC_WAITDATA, sk); @@ -310,7 +309,6 @@ int smc_rx_recvmsg(struct smc_sock *smc, struct msghdr *msg, smc_rx_update_cons(smc, 0); if (sk->sk_shutdown & RCV_SHUTDOWN || - smc_cdc_rxed_any_close_or_senddone(conn) || conn->local_tx_ctrl.conn_state_flags.peer_conn_abort) break; -- cgit v1.2.3 From 107529e31a87acd475ff6a0f82745821b8f70fec Mon Sep 17 00:00:00 2001 From: Karsten Graul Date: Thu, 10 Oct 2019 10:16:11 +0200 Subject: net/smc: receive pending data after RCV_SHUTDOWN smc_rx_recvmsg() first checks if data is available, and then if RCV_SHUTDOWN is set. There is a race when smc_cdc_msg_recv_action() runs in between these 2 checks, receives data and sets RCV_SHUTDOWN. In that case smc_rx_recvmsg() would return from receive without to process the available data. Fix that with a final check for data available if RCV_SHUTDOWN is set. Move the check for data into a function and call it twice. And use the existing helper smc_rx_data_available(). Fixes: 952310ccf2d8 ("smc: receive data from RMBE") Reviewed-by: Ursula Braun Signed-off-by: Karsten Graul Signed-off-by: Jakub Kicinski --- net/smc/smc_rx.c | 25 ++++++++++++++++++++----- 1 file changed, 20 insertions(+), 5 deletions(-) diff --git a/net/smc/smc_rx.c b/net/smc/smc_rx.c index 000002642288..97e8369002d7 100644 --- a/net/smc/smc_rx.c +++ b/net/smc/smc_rx.c @@ -261,6 +261,18 @@ static int smc_rx_recv_urg(struct smc_sock *smc, struct msghdr *msg, int len, return -EAGAIN; } +static bool smc_rx_recvmsg_data_available(struct smc_sock *smc) +{ + struct smc_connection *conn = &smc->conn; + + if (smc_rx_data_available(conn)) + return true; + else if (conn->urg_state == SMC_URG_VALID) + /* we received a single urgent Byte - skip */ + smc_rx_update_cons(smc, 0); + return false; +} + /* smc_rx_recvmsg - receive data from RMBE * @msg: copy data to receive buffer * @pipe: copy data to pipe if set - indicates splice() call @@ -302,15 +314,18 @@ int smc_rx_recvmsg(struct smc_sock *smc, struct msghdr *msg, if (read_done >= target || (pipe && read_done)) break; - if (atomic_read(&conn->bytes_to_rcv)) + if (smc_rx_recvmsg_data_available(smc)) goto copy; - else if (conn->urg_state == SMC_URG_VALID) - /* we received a single urgent Byte - skip */ - smc_rx_update_cons(smc, 0); if (sk->sk_shutdown & RCV_SHUTDOWN || - conn->local_tx_ctrl.conn_state_flags.peer_conn_abort) + conn->local_tx_ctrl.conn_state_flags.peer_conn_abort) { + /* smc_cdc_msg_recv_action() could have run after + * above smc_rx_recvmsg_data_available() + */ + if (smc_rx_recvmsg_data_available(smc)) + goto copy; break; + } if (read_done) { if (sk->sk_err || -- cgit v1.2.3 From 7adf4eaf60f3d8c3584bed51fe7066d4dfc2cbe1 Mon Sep 17 00:00:00 2001 From: Jens Axboe Date: Thu, 10 Oct 2019 21:42:58 -0600 Subject: io_uring: fix sequence logic for timeout requests We have two ways a request can be deferred: 1) It's a regular request that depends on another one 2) It's a timeout that tracks completions We have a shared helper to determine whether to defer, and that attempts to make the right decision based on the request. But we only have some of this information in the caller. Un-share the two timeout/defer helpers so the caller can use the right one. Fixes: 5262f567987d ("io_uring: IORING_OP_TIMEOUT support") Reported-by: yangerkun Reviewed-by: Jackie Liu Signed-off-by: Jens Axboe --- fs/io_uring.c | 37 ++++++++++++++++++++----------------- 1 file changed, 20 insertions(+), 17 deletions(-) diff --git a/fs/io_uring.c b/fs/io_uring.c index 2c44648217bd..38d274fc0f25 100644 --- a/fs/io_uring.c +++ b/fs/io_uring.c @@ -415,27 +415,27 @@ static struct io_ring_ctx *io_ring_ctx_alloc(struct io_uring_params *p) return ctx; } +static inline bool __io_sequence_defer(struct io_ring_ctx *ctx, + struct io_kiocb *req) +{ + return req->sequence != ctx->cached_cq_tail + ctx->rings->sq_dropped; +} + static inline bool io_sequence_defer(struct io_ring_ctx *ctx, struct io_kiocb *req) { - /* timeout requests always honor sequence */ - if (!(req->flags & REQ_F_TIMEOUT) && - (req->flags & (REQ_F_IO_DRAIN|REQ_F_IO_DRAINED)) != REQ_F_IO_DRAIN) + if ((req->flags & (REQ_F_IO_DRAIN|REQ_F_IO_DRAINED)) != REQ_F_IO_DRAIN) return false; - return req->sequence != ctx->cached_cq_tail + ctx->rings->sq_dropped; + return __io_sequence_defer(ctx, req); } -static struct io_kiocb *__io_get_deferred_req(struct io_ring_ctx *ctx, - struct list_head *list) +static struct io_kiocb *io_get_deferred_req(struct io_ring_ctx *ctx) { struct io_kiocb *req; - if (list_empty(list)) - return NULL; - - req = list_first_entry(list, struct io_kiocb, list); - if (!io_sequence_defer(ctx, req)) { + req = list_first_entry_or_null(&ctx->defer_list, struct io_kiocb, list); + if (req && !io_sequence_defer(ctx, req)) { list_del_init(&req->list); return req; } @@ -443,14 +443,17 @@ static struct io_kiocb *__io_get_deferred_req(struct io_ring_ctx *ctx, return NULL; } -static struct io_kiocb *io_get_deferred_req(struct io_ring_ctx *ctx) -{ - return __io_get_deferred_req(ctx, &ctx->defer_list); -} - static struct io_kiocb *io_get_timeout_req(struct io_ring_ctx *ctx) { - return __io_get_deferred_req(ctx, &ctx->timeout_list); + struct io_kiocb *req; + + req = list_first_entry_or_null(&ctx->timeout_list, struct io_kiocb, list); + if (req && !__io_sequence_defer(ctx, req)) { + list_del_init(&req->list); + return req; + } + + return NULL; } static void __io_commit_cqring(struct io_ring_ctx *ctx) -- cgit v1.2.3 From 2272905a4580f26630f7d652cc33935b59f96d4c Mon Sep 17 00:00:00 2001 From: Emmanuel Nicolet Date: Tue, 8 Oct 2019 16:13:42 +0200 Subject: spufs: fix a crash in spufs_create_root() The spu_fs_context was not set in fc->fs_private, this caused a crash when accessing ctx->mode in spufs_create_root(). Fixes: d2e0981c3b9a ("vfs: Convert spufs to use the new mount API") Signed-off-by: Emmanuel Nicolet Signed-off-by: Michael Ellerman Acked-by: Arnd Bergmann Link: https://lore.kernel.org/r/20191008141342.GA266797@gmail.com --- arch/powerpc/platforms/cell/spufs/inode.c | 1 + 1 file changed, 1 insertion(+) diff --git a/arch/powerpc/platforms/cell/spufs/inode.c b/arch/powerpc/platforms/cell/spufs/inode.c index 1d93e55a2de1..2dd452a047cd 100644 --- a/arch/powerpc/platforms/cell/spufs/inode.c +++ b/arch/powerpc/platforms/cell/spufs/inode.c @@ -761,6 +761,7 @@ static int spufs_init_fs_context(struct fs_context *fc) ctx->gid = current_gid(); ctx->mode = 0755; + fc->fs_private = ctx; fc->s_fs_info = sbi; fc->ops = &spufs_context_ops; return 0; -- cgit v1.2.3 From 10deeac921647c929c67752d377f22e632d55d1c Mon Sep 17 00:00:00 2001 From: Douglas Anderson Date: Fri, 20 Sep 2019 10:44:47 -0700 Subject: MAINTAINERS: kgdb: Add myself as a reviewer for kgdb/kdb I'm interested in kdb / kgdb and have sent various fixes over the years. I'd like to get CCed on patches so I can be aware of them and also help review. Signed-off-by: Douglas Anderson Acked-by: Daniel Thompson Link: https://lore.kernel.org/r/20190920104404.1.I237e68e8825e2d6ac26f8e847f521fe2fcc3705a@changeid Signed-off-by: Greg Kroah-Hartman --- MAINTAINERS | 1 + 1 file changed, 1 insertion(+) diff --git a/MAINTAINERS b/MAINTAINERS index 296de2b51c83..acdd996774d6 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -9075,6 +9075,7 @@ F: security/keys/ KGDB / KDB /debug_core M: Jason Wessel M: Daniel Thompson +R: Douglas Anderson W: http://kgdb.wiki.kernel.org/ L: kgdb-bugreport@lists.sourceforge.net T: git git://git.kernel.org/pub/scm/linux/kernel/git/jwessel/kgdb.git -- cgit v1.2.3 From 442f1e746e8187b9deb1590176f6b0ff19686b11 Mon Sep 17 00:00:00 2001 From: Brian Norris Date: Mon, 30 Sep 2019 14:45:22 -0700 Subject: firmware: google: increment VPD key_len properly Commit 4b708b7b1a2c ("firmware: google: check if size is valid when decoding VPD data") adds length checks, but the new vpd_decode_entry() function botched the logic -- it adds the key length twice, instead of adding the key and value lengths separately. On my local system, this means vpd.c's vpd_section_create_attribs() hits an error case after the first attribute it parses, since it's no longer looking at the correct offset. With this patch, I'm back to seeing all the correct attributes in /sys/firmware/vpd/... Fixes: 4b708b7b1a2c ("firmware: google: check if size is valid when decoding VPD data") Cc: Cc: Hung-Te Lin Signed-off-by: Brian Norris Reviewed-by: Stephen Boyd Reviewed-by: Guenter Roeck Link: https://lore.kernel.org/r/20190930214522.240680-1-briannorris@chromium.org Signed-off-by: Greg Kroah-Hartman --- drivers/firmware/google/vpd_decode.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/firmware/google/vpd_decode.c b/drivers/firmware/google/vpd_decode.c index dda525c0f968..5c6f2a74f104 100644 --- a/drivers/firmware/google/vpd_decode.c +++ b/drivers/firmware/google/vpd_decode.c @@ -52,7 +52,7 @@ static int vpd_decode_entry(const u32 max_len, const u8 *input_buf, if (max_len - consumed < *entry_len) return VPD_FAIL; - consumed += decoded_len; + consumed += *entry_len; *_consumed = consumed; return VPD_OK; } -- cgit v1.2.3 From 9c4a14f8cceeffa01334e3229888746aa7dd95fe Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Thu, 10 Oct 2019 08:14:25 +0100 Subject: drm/i915: Note the addition of timeslicing to the pretend scheduler Since writing the comment that the scheduler is entirely passive, we've added minimal timeslicing which adds the most primitive of active elements (a timeout and reschedule). Signed-off-by: Chris Wilson Cc: Tvrtko Ursulin Cc: Ramalingam C Reviewed-by: Tvrtko Ursulin Link: https://patchwork.freedesktop.org/patch/msgid/20191010071434.31195-1-chris@chris-wilson.co.uk --- drivers/gpu/drm/i915/i915_scheduler_types.h | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/drivers/gpu/drm/i915/i915_scheduler_types.h b/drivers/gpu/drm/i915/i915_scheduler_types.h index aad81acba9dc..d18e70550054 100644 --- a/drivers/gpu/drm/i915/i915_scheduler_types.h +++ b/drivers/gpu/drm/i915/i915_scheduler_types.h @@ -49,6 +49,15 @@ struct i915_sched_attr { * DAG of each request, we are able to insert it into a sorted queue when it * is ready, and are able to reorder its portion of the graph to accommodate * dynamic priority changes. + * + * Ok, there is now one active element to the "scheduler" in the backends. + * We let a new context run for a small amount of time before re-evaluating + * the run order. As we re-evaluate, we maintain the strict ordering of + * dependencies, but attempt to rotate the active contexts (the current context + * is put to the back of its priority queue, then reshuffling its dependents). + * This provides minimal timeslicing and prevents a userspace hog (e.g. + * something waiting on a user semaphore [VkEvent]) from denying service to + * others. */ struct i915_sched_node { struct list_head signalers_list; /* those before us, we depend upon */ -- cgit v1.2.3 From c97fb526ca0666e515e94f96e922dfc50467fd01 Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Thu, 10 Oct 2019 08:14:26 +0100 Subject: drm/i915/execlists: Leave tell-tales as to why pending[] is bad Before we BUG out with bad pending state, leave a telltale as to which test failed. Signed-off-by: Chris Wilson Cc: Mika Kuoppala Cc: Tvrtko Ursulin Reviewed-by: Tvrtko Ursulin Link: https://patchwork.freedesktop.org/patch/msgid/20191010071434.31195-2-chris@chris-wilson.co.uk --- drivers/gpu/drm/i915/gt/intel_lrc.c | 30 +++++++++++++++++++++++++----- drivers/gpu/drm/i915/i915_gem.h | 11 +++++++---- 2 files changed, 32 insertions(+), 9 deletions(-) diff --git a/drivers/gpu/drm/i915/gt/intel_lrc.c b/drivers/gpu/drm/i915/gt/intel_lrc.c index bd8ba98e35df..fd609ce4313a 100644 --- a/drivers/gpu/drm/i915/gt/intel_lrc.c +++ b/drivers/gpu/drm/i915/gt/intel_lrc.c @@ -1089,25 +1089,45 @@ assert_pending_valid(const struct intel_engine_execlists *execlists, trace_ports(execlists, msg, execlists->pending); - if (!execlists->pending[0]) + if (!execlists->pending[0]) { + GEM_TRACE_ERR("Nothing pending for promotion!\n"); return false; + } - if (execlists->pending[execlists_num_ports(execlists)]) + if (execlists->pending[execlists_num_ports(execlists)]) { + GEM_TRACE_ERR("Excess pending[%d] for promotion!\n", + execlists_num_ports(execlists)); return false; + } for (port = execlists->pending; (rq = *port); port++) { - if (ce == rq->hw_context) + if (ce == rq->hw_context) { + GEM_TRACE_ERR("Duplicate context in pending[%zd]\n", + port - execlists->pending); return false; + } ce = rq->hw_context; if (i915_request_completed(rq)) continue; - if (i915_active_is_idle(&ce->active)) + if (i915_active_is_idle(&ce->active)) { + GEM_TRACE_ERR("Inactive context in pending[%zd]\n", + port - execlists->pending); + return false; + } + + if (!i915_vma_is_pinned(ce->state)) { + GEM_TRACE_ERR("Unpinned context in pending[%zd]\n", + port - execlists->pending); return false; + } - if (!i915_vma_is_pinned(ce->state)) + if (!i915_vma_is_pinned(ce->ring->vma)) { + GEM_TRACE_ERR("Unpinned ringbuffer in pending[%zd]\n", + port - execlists->pending); return false; + } } return ce; diff --git a/drivers/gpu/drm/i915/i915_gem.h b/drivers/gpu/drm/i915/i915_gem.h index 6795f1daa3d5..db20d2b0842b 100644 --- a/drivers/gpu/drm/i915/i915_gem.h +++ b/drivers/gpu/drm/i915/i915_gem.h @@ -37,10 +37,8 @@ struct drm_i915_private; #define GEM_SHOW_DEBUG() (drm_debug & DRM_UT_DRIVER) #define GEM_BUG_ON(condition) do { if (unlikely((condition))) { \ - pr_err("%s:%d GEM_BUG_ON(%s)\n", \ - __func__, __LINE__, __stringify(condition)); \ - GEM_TRACE("%s:%d GEM_BUG_ON(%s)\n", \ - __func__, __LINE__, __stringify(condition)); \ + GEM_TRACE_ERR("%s:%d GEM_BUG_ON(%s)\n", \ + __func__, __LINE__, __stringify(condition)); \ BUG(); \ } \ } while(0) @@ -66,11 +64,16 @@ struct drm_i915_private; #if IS_ENABLED(CONFIG_DRM_I915_TRACE_GEM) #define GEM_TRACE(...) trace_printk(__VA_ARGS__) +#define GEM_TRACE_ERR(...) do { \ + pr_err(__VA_ARGS__); \ + trace_printk(__VA_ARGS__); \ +} while (0) #define GEM_TRACE_DUMP() ftrace_dump(DUMP_ALL) #define GEM_TRACE_DUMP_ON(expr) \ do { if (expr) ftrace_dump(DUMP_ALL); } while (0) #else #define GEM_TRACE(...) do { } while (0) +#define GEM_TRACE_ERR(...) do { } while (0) #define GEM_TRACE_DUMP() do { } while (0) #define GEM_TRACE_DUMP_ON(expr) BUILD_BUG_ON_INVALID(expr) #endif -- cgit v1.2.3 From 062795fcdcb2d22822fb42644b1d76a8ad8439b3 Mon Sep 17 00:00:00 2001 From: Christian Borntraeger Date: Tue, 8 Oct 2019 17:02:32 +0200 Subject: s390/uaccess: avoid (false positive) compiler warnings MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Depending on inlining decisions by the compiler, __get/put_user_fn might become out of line. Then the compiler is no longer able to tell that size can only be 1,2,4 or 8 due to the check in __get/put_user resulting in false positives like ./arch/s390/include/asm/uaccess.h: In function ‘__put_user_fn’: ./arch/s390/include/asm/uaccess.h:113:9: warning: ‘rc’ may be used uninitialized in this function [-Wmaybe-uninitialized] 113 | return rc; | ^~ ./arch/s390/include/asm/uaccess.h: In function ‘__get_user_fn’: ./arch/s390/include/asm/uaccess.h:143:9: warning: ‘rc’ may be used uninitialized in this function [-Wmaybe-uninitialized] 143 | return rc; | ^~ These functions are supposed to be always inlined. Mark it as such. Signed-off-by: Christian Borntraeger Signed-off-by: Vasily Gorbik --- arch/s390/include/asm/uaccess.h | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/arch/s390/include/asm/uaccess.h b/arch/s390/include/asm/uaccess.h index bd2fd9a7821d..a470f1fa9f2a 100644 --- a/arch/s390/include/asm/uaccess.h +++ b/arch/s390/include/asm/uaccess.h @@ -83,7 +83,7 @@ raw_copy_to_user(void __user *to, const void *from, unsigned long n); __rc; \ }) -static inline int __put_user_fn(void *x, void __user *ptr, unsigned long size) +static __always_inline int __put_user_fn(void *x, void __user *ptr, unsigned long size) { unsigned long spec = 0x010000UL; int rc; @@ -113,7 +113,7 @@ static inline int __put_user_fn(void *x, void __user *ptr, unsigned long size) return rc; } -static inline int __get_user_fn(void *x, const void __user *ptr, unsigned long size) +static __always_inline int __get_user_fn(void *x, const void __user *ptr, unsigned long size) { unsigned long spec = 0x01UL; int rc; -- cgit v1.2.3 From cbbf2787782ca5d17b479deb962a07f0421a7932 Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Fri, 11 Oct 2019 11:33:45 +0100 Subject: drm/i915/execlists: Only mark incomplete requests as -EIO on cancelling Only the requests that have not completed do we want to change the status of to signal the -EIO when cancelling the inflight set of requests upon wedging. Reported-by: Tvrtko Ursulin Signed-off-by: Chris Wilson Cc: Tvrtko Ursulin Reviewed-by: Tvrtko Ursulin Link: https://patchwork.freedesktop.org/patch/msgid/20191011103345.26013-1-chris@chris-wilson.co.uk --- drivers/gpu/drm/i915/gt/intel_lrc.c | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/drivers/gpu/drm/i915/gt/intel_lrc.c b/drivers/gpu/drm/i915/gt/intel_lrc.c index fd609ce4313a..13fa61ed85de 100644 --- a/drivers/gpu/drm/i915/gt/intel_lrc.c +++ b/drivers/gpu/drm/i915/gt/intel_lrc.c @@ -247,8 +247,12 @@ static void __context_pin_release(struct intel_context *ce) static void mark_eio(struct i915_request *rq) { - if (!i915_request_signaled(rq)) - dma_fence_set_error(&rq->fence, -EIO); + if (i915_request_completed(rq)) + return; + + GEM_BUG_ON(i915_request_signaled(rq)); + + dma_fence_set_error(&rq->fence, -EIO); i915_request_mark_complete(rq); } -- cgit v1.2.3 From c461e8df0c9bc46b3696dfb5d1df2f09d755af53 Mon Sep 17 00:00:00 2001 From: "Michael S. Tsirkin" Date: Thu, 26 Sep 2019 13:43:10 -0400 Subject: tools/virtio: more stubs fix test module build. Signed-off-by: Michael S. Tsirkin --- tools/virtio/crypto/hash.h | 0 tools/virtio/linux/dma-mapping.h | 2 ++ 2 files changed, 2 insertions(+) create mode 100644 tools/virtio/crypto/hash.h diff --git a/tools/virtio/crypto/hash.h b/tools/virtio/crypto/hash.h new file mode 100644 index 000000000000..e69de29bb2d1 diff --git a/tools/virtio/linux/dma-mapping.h b/tools/virtio/linux/dma-mapping.h index f91aeb5fe571..8f41cd6bd5c0 100644 --- a/tools/virtio/linux/dma-mapping.h +++ b/tools/virtio/linux/dma-mapping.h @@ -29,4 +29,6 @@ enum dma_data_direction { #define dma_unmap_single(...) do { } while (0) #define dma_unmap_page(...) do { } while (0) +#define dma_max_mapping_size(...) SIZE_MAX + #endif -- cgit v1.2.3 From 48f9bcf91461b43249949f4e172b5c0245dde751 Mon Sep 17 00:00:00 2001 From: Joe Perches Date: Sat, 5 Oct 2019 09:46:41 -0700 Subject: net: sctp: Rename fallthrough label to unhandled fallthrough will become a pseudo reserved keyword so this only use of fallthrough is better renamed to allow it. Signed-off-by: Joe Perches Reviewed-by: Nick Desaulniers Reviewed-by: Kees Cook Acked-by: Neil Horman Signed-off-by: Linus Torvalds --- net/sctp/sm_make_chunk.c | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/net/sctp/sm_make_chunk.c b/net/sctp/sm_make_chunk.c index e41ed2e0ae7d..48d63956a68c 100644 --- a/net/sctp/sm_make_chunk.c +++ b/net/sctp/sm_make_chunk.c @@ -2155,7 +2155,7 @@ static enum sctp_ierror sctp_verify_param(struct net *net, case SCTP_PARAM_SET_PRIMARY: if (ep->asconf_enable) break; - goto fallthrough; + goto unhandled; case SCTP_PARAM_HOST_NAME_ADDRESS: /* Tell the peer, we won't support this param. */ @@ -2166,11 +2166,11 @@ static enum sctp_ierror sctp_verify_param(struct net *net, case SCTP_PARAM_FWD_TSN_SUPPORT: if (ep->prsctp_enable) break; - goto fallthrough; + goto unhandled; case SCTP_PARAM_RANDOM: if (!ep->auth_enable) - goto fallthrough; + goto unhandled; /* SCTP-AUTH: Secion 6.1 * If the random number is not 32 byte long the association @@ -2187,7 +2187,7 @@ static enum sctp_ierror sctp_verify_param(struct net *net, case SCTP_PARAM_CHUNKS: if (!ep->auth_enable) - goto fallthrough; + goto unhandled; /* SCTP-AUTH: Section 3.2 * The CHUNKS parameter MUST be included once in the INIT or @@ -2203,7 +2203,7 @@ static enum sctp_ierror sctp_verify_param(struct net *net, case SCTP_PARAM_HMAC_ALGO: if (!ep->auth_enable) - goto fallthrough; + goto unhandled; hmacs = (struct sctp_hmac_algo_param *)param.p; n_elt = (ntohs(param.p->length) - @@ -2226,7 +2226,7 @@ static enum sctp_ierror sctp_verify_param(struct net *net, retval = SCTP_IERROR_ABORT; } break; -fallthrough: +unhandled: default: pr_debug("%s: unrecognized param:%d for chunk:%d\n", __func__, ntohs(param.p->type), cid); -- cgit v1.2.3 From 294f69e662d1570703e9b56e95be37a9fd3afba5 Mon Sep 17 00:00:00 2001 From: Joe Perches Date: Sat, 5 Oct 2019 09:46:42 -0700 Subject: compiler_attributes.h: Add 'fallthrough' pseudo keyword for switch/case use Reserve the pseudo keyword 'fallthrough' for the ability to convert the various case block /* fallthrough */ style comments to appear to be an actual reserved word with the same gcc case block missing fallthrough warning capability. All switch/case blocks now should end in one of: break; fallthrough; goto