summaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
Diffstat (limited to 'drivers')
-rw-r--r--drivers/acpi/acpi_video.c16
-rw-r--r--drivers/acpi/device_pm.c13
-rw-r--r--drivers/acpi/processor_idle.c2
-rw-r--r--drivers/acpi/resource.c7
-rw-r--r--drivers/acpi/scan.c7
-rw-r--r--drivers/ata/libata-scsi.c9
-rw-r--r--drivers/cpufreq/amd-pstate.c71
-rw-r--r--drivers/cpufreq/imx6q-cpufreq.c2
-rw-r--r--drivers/cpufreq/qcom-cpufreq-nvmem.c73
-rw-r--r--drivers/dma-buf/dma-resv.c2
-rw-r--r--drivers/firewire/core-device.c11
-rw-r--r--drivers/firewire/sbp2.c6
-rw-r--r--drivers/firmware/efi/unaccepted_memory.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c6
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_device.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_display.c9
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_object.c10
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ras_eeprom.c6
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c10
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/nbio_v7_11.c18
-rw-r--r--drivers/gpu/drm/amd/amdgpu/nbio_v7_9.c5
-rw-r--r--drivers/gpu/drm/amd/amdgpu/soc15.c5
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_priv.h2
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c54
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c8
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dcn315/dcn315_clk_mgr.c8
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dcn35/dcn35_clk_mgr.c159
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc.h2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc_types.h4
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn314/dcn314_resource.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn35/dcn35_pg_cntl.c10
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn35/dcn35_resource.c9
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dc_features.h2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn20/dcn20_fpu.c15
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.c23
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn35/dcn35_fpu.c84
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn35/dcn35_fpu.h2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2/display_mode_core.c16
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2/dml2_translation_helper.c60
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2/dml2_wrapper.h2
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/dcn32/dcn32_hwseq.c6
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/link_detection.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/link_dpms.c3
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training_fixed_vs_pe_retimer.c6
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/protocols/link_edp_panel_control.c16
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/protocols/link_edp_panel_control.h1
-rw-r--r--drivers/gpu/drm/amd/display/dmub/src/dmub_srv.c1
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/gc/gc_11_0_0_offset.h2
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/nbio/nbio_7_11_0_offset.h2
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/nbio/nbio_7_11_0_sh_mask.h29
-rw-r--r--drivers/gpu/drm/amd/include/kgd_pp_interface.h1
-rw-r--r--drivers/gpu/drm/amd/pm/amdgpu_dpm.c18
-rw-r--r--drivers/gpu/drm/amd/pm/inc/amdgpu_dpm.h2
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c10
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/inc/amdgpu_smu.h5
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu11/vangogh_ppt.c5
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu13/aldebaran_ppt.c5
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu_internal.h1
-rw-r--r--drivers/gpu/drm/bridge/panel.c17
-rw-r--r--drivers/gpu/drm/drm_gpuvm.c2
-rw-r--r--drivers/gpu/drm/drm_prime.c33
-rw-r--r--drivers/gpu/drm/i915/display/intel_display.c3
-rw-r--r--drivers/gpu/drm/i915/display/intel_dp.c28
-rw-r--r--drivers/gpu/drm/i915/gt/intel_engine_user.c39
-rw-r--r--drivers/gpu/drm/nouveau/include/nvrm/535.113.01/nvidia/generated/g_os_nvoc.h2
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_bo.c5
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/gsp/r535.c6
-rw-r--r--drivers/gpu/drm/panel/panel-boe-tv101wum-nl6.c1
-rw-r--r--drivers/gpu/drm/panel/panel-novatek-nt36523.c4
-rw-r--r--drivers/iommu/intel/dmar.c18
-rw-r--r--drivers/iommu/intel/iommu.c18
-rw-r--r--drivers/iommu/intel/iommu.h3
-rw-r--r--drivers/iommu/intel/svm.c26
-rw-r--r--drivers/iommu/iommu.c79
-rw-r--r--drivers/iommu/of_iommu.c14
-rw-r--r--drivers/leds/led-class.c14
-rw-r--r--drivers/md/bcache/btree.c16
-rw-r--r--drivers/md/bcache/journal.c20
-rw-r--r--drivers/md/bcache/movinggc.c16
-rw-r--r--drivers/md/bcache/request.c74
-rw-r--r--drivers/md/bcache/request.h2
-rw-r--r--drivers/md/bcache/super.c40
-rw-r--r--drivers/md/bcache/writeback.c16
-rw-r--r--drivers/md/dm-flakey.c2
-rw-r--r--drivers/md/dm-verity-fec.c3
-rw-r--r--drivers/md/dm-verity-target.c7
-rw-r--r--drivers/md/dm-verity.h6
-rw-r--r--drivers/media/pci/mgb4/Kconfig1
-rw-r--r--drivers/media/pci/mgb4/mgb4_core.c20
-rw-r--r--drivers/media/platform/renesas/vsp1/vsp1_pipe.c2
-rw-r--r--drivers/media/platform/renesas/vsp1/vsp1_rpf.c10
-rw-r--r--drivers/media/platform/renesas/vsp1/vsp1_rwpf.c8
-rw-r--r--drivers/media/platform/renesas/vsp1/vsp1_rwpf.h4
-rw-r--r--drivers/media/platform/renesas/vsp1/vsp1_wpf.c29
-rw-r--r--drivers/mmc/core/block.c2
-rw-r--r--drivers/mmc/core/core.c9
-rw-r--r--drivers/mmc/host/cqhci-core.c44
-rw-r--r--drivers/mmc/host/sdhci-pci-gli.c54
-rw-r--r--drivers/mmc/host/sdhci-sprd.c25
-rw-r--r--drivers/net/dsa/mv88e6xxx/chip.c26
-rw-r--r--drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c16
-rw-r--r--drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.h2
-rw-r--r--drivers/net/ethernet/intel/ice/ice_lag.c122
-rw-r--r--drivers/net/ethernet/intel/ice/ice_lag.h1
-rw-r--r--drivers/net/ethernet/intel/ice/ice_vf_lib.c20
-rw-r--r--drivers/net/ethernet/intel/ice/ice_virtchnl.c25
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c4
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/cn10k.c3
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.h2
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c9
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_tc.c120
-rw-r--r--drivers/net/ethernet/realtek/r8169_main.c9
-rw-r--r--drivers/net/ethernet/renesas/ravb_main.c69
-rw-r--r--drivers/net/ethernet/renesas/rswitch.c22
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/mmc_core.c4
-rw-r--r--drivers/net/ethernet/wangxun/libwx/wx_lib.c2
-rw-r--r--drivers/net/netdevsim/bpf.c4
-rw-r--r--drivers/net/netkit.c6
-rw-r--r--drivers/net/wireless/ath/ath9k/Kconfig4
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/mld-sta.c4
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7921/mcu.c1
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7925/main.c4
-rw-r--r--drivers/nvme/host/core.c34
-rw-r--r--drivers/phy/Kconfig1
-rw-r--r--drivers/phy/Makefile1
-rw-r--r--drivers/phy/realtek/Kconfig32
-rw-r--r--drivers/phy/realtek/Makefile3
-rw-r--r--drivers/phy/realtek/phy-rtk-usb2.c1325
-rw-r--r--drivers/phy/realtek/phy-rtk-usb3.c761
-rw-r--r--drivers/pinctrl/cirrus/Kconfig3
-rw-r--r--drivers/pinctrl/core.c6
-rw-r--r--drivers/pinctrl/nxp/pinctrl-s32cc.c4
-rw-r--r--drivers/pinctrl/pinctrl-cy8c95x0.c1
-rw-r--r--drivers/pinctrl/realtek/pinctrl-rtd.c4
-rw-r--r--drivers/pinctrl/stm32/pinctrl-stm32.c13
-rw-r--r--drivers/pmdomain/arm/scmi_perf_domain.c2
-rw-r--r--drivers/pmdomain/qcom/rpmpd.c1
-rw-r--r--drivers/powercap/dtpm_cpu.c6
-rw-r--r--drivers/powercap/dtpm_devfreq.c11
-rw-r--r--drivers/scsi/sd.c9
-rw-r--r--drivers/thunderbolt/switch.c6
-rw-r--r--drivers/thunderbolt/tb.c12
-rw-r--r--drivers/ufs/core/ufshcd.c13
-rw-r--r--drivers/usb/cdns3/cdnsp-ring.c3
-rw-r--r--drivers/usb/core/config.c3
-rw-r--r--drivers/usb/core/hub.c23
-rw-r--r--drivers/usb/dwc2/hcd_intr.c15
-rw-r--r--drivers/usb/dwc3/core.c2
-rw-r--r--drivers/usb/dwc3/drd.c2
-rw-r--r--drivers/usb/dwc3/dwc3-qcom.c69
-rw-r--r--drivers/usb/dwc3/dwc3-rtk.c8
-rw-r--r--drivers/usb/host/xhci-mtk-sch.c13
-rw-r--r--drivers/usb/host/xhci-mtk.h2
-rw-r--r--drivers/usb/host/xhci-plat.c50
-rw-r--r--drivers/usb/misc/onboard_usb_hub.c2
-rw-r--r--drivers/usb/misc/onboard_usb_hub.h7
-rw-r--r--drivers/usb/misc/usb-ljca.c17
-rw-r--r--drivers/usb/serial/option.c11
-rw-r--r--drivers/usb/typec/tcpm/tcpm.c12
-rw-r--r--drivers/usb/typec/tipd/core.c14
-rw-r--r--drivers/vfio/pci/pds/pci_drv.c4
-rw-r--r--drivers/vfio/pci/pds/vfio_dev.c30
-rw-r--r--drivers/vfio/pci/pds/vfio_dev.h2
-rw-r--r--drivers/xen/events/events_base.c4
-rw-r--r--drivers/xen/privcmd.c2
-rw-r--r--drivers/xen/swiotlb-xen.c1
173 files changed, 1634 insertions, 3005 deletions
diff --git a/drivers/acpi/acpi_video.c b/drivers/acpi/acpi_video.c
index 0b7a01f38b65..6cee536c229a 100644
--- a/drivers/acpi/acpi_video.c
+++ b/drivers/acpi/acpi_video.c
@@ -253,8 +253,7 @@ static const struct backlight_ops acpi_backlight_ops = {
static int video_get_max_state(struct thermal_cooling_device *cooling_dev,
unsigned long *state)
{
- struct acpi_device *device = cooling_dev->devdata;
- struct acpi_video_device *video = acpi_driver_data(device);
+ struct acpi_video_device *video = cooling_dev->devdata;
*state = video->brightness->count - ACPI_VIDEO_FIRST_LEVEL - 1;
return 0;
@@ -263,8 +262,7 @@ static int video_get_max_state(struct thermal_cooling_device *cooling_dev,
static int video_get_cur_state(struct thermal_cooling_device *cooling_dev,
unsigned long *state)
{
- struct acpi_device *device = cooling_dev->devdata;
- struct acpi_video_device *video = acpi_driver_data(device);
+ struct acpi_video_device *video = cooling_dev->devdata;
unsigned long long level;
int offset;
@@ -283,8 +281,7 @@ static int video_get_cur_state(struct thermal_cooling_device *cooling_dev,
static int
video_set_cur_state(struct thermal_cooling_device *cooling_dev, unsigned long state)
{
- struct acpi_device *device = cooling_dev->devdata;
- struct acpi_video_device *video = acpi_driver_data(device);
+ struct acpi_video_device *video = cooling_dev->devdata;
int level;
if (state >= video->brightness->count - ACPI_VIDEO_FIRST_LEVEL)
@@ -1125,7 +1122,6 @@ static int acpi_video_bus_get_one_device(struct acpi_device *device, void *arg)
strcpy(acpi_device_name(device), ACPI_VIDEO_DEVICE_NAME);
strcpy(acpi_device_class(device), ACPI_VIDEO_CLASS);
- device->driver_data = data;
data->device_id = device_id;
data->video = video;
@@ -1747,8 +1743,8 @@ static void acpi_video_dev_register_backlight(struct acpi_video_device *device)
device->backlight->props.brightness =
acpi_video_get_brightness(device->backlight);
- device->cooling_dev = thermal_cooling_device_register("LCD",
- device->dev, &video_cooling_ops);
+ device->cooling_dev = thermal_cooling_device_register("LCD", device,
+ &video_cooling_ops);
if (IS_ERR(device->cooling_dev)) {
/*
* Set cooling_dev to NULL so we don't crash trying to free it.
@@ -2031,7 +2027,7 @@ static int acpi_video_bus_add(struct acpi_device *device)
* HP ZBook Fury 16 G10 requires ACPI video's child devices have _PS0
* evaluated to have functional panel brightness control.
*/
- acpi_device_fix_up_power_extended(device);
+ acpi_device_fix_up_power_children(device);
pr_info("%s [%s] (multi-head: %s rom: %s post: %s)\n",
ACPI_VIDEO_DEVICE_NAME, acpi_device_bid(device),
diff --git a/drivers/acpi/device_pm.c b/drivers/acpi/device_pm.c
index f007116a8427..3b4d048c4941 100644
--- a/drivers/acpi/device_pm.c
+++ b/drivers/acpi/device_pm.c
@@ -397,6 +397,19 @@ void acpi_device_fix_up_power_extended(struct acpi_device *adev)
}
EXPORT_SYMBOL_GPL(acpi_device_fix_up_power_extended);
+/**
+ * acpi_device_fix_up_power_children - Force a device's children into D0.
+ * @adev: Parent device object whose children's power state is to be fixed up.
+ *
+ * Call acpi_device_fix_up_power() for @adev's children so long as they
+ * are reported as present and enabled.
+ */
+void acpi_device_fix_up_power_children(struct acpi_device *adev)
+{
+ acpi_dev_for_each_child(adev, fix_up_power_if_applicable, NULL);
+}
+EXPORT_SYMBOL_GPL(acpi_device_fix_up_power_children);
+
int acpi_device_update_power(struct acpi_device *device, int *state_p)
{
int state;
diff --git a/drivers/acpi/processor_idle.c b/drivers/acpi/processor_idle.c
index 3a34a8c425fe..55437f5e0c3a 100644
--- a/drivers/acpi/processor_idle.c
+++ b/drivers/acpi/processor_idle.c
@@ -592,7 +592,7 @@ static int acpi_idle_play_dead(struct cpuidle_device *dev, int index)
while (1) {
if (cx->entry_method == ACPI_CSTATE_HALT)
- safe_halt();
+ raw_safe_halt();
else if (cx->entry_method == ACPI_CSTATE_SYSTEMIO) {
io_idle(cx->address);
} else
diff --git a/drivers/acpi/resource.c b/drivers/acpi/resource.c
index 15a3bdbd0755..9bd9f79cd409 100644
--- a/drivers/acpi/resource.c
+++ b/drivers/acpi/resource.c
@@ -448,6 +448,13 @@ static const struct dmi_system_id irq1_level_low_skip_override[] = {
},
},
{
+ /* Asus ExpertBook B1402CVA */
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
+ DMI_MATCH(DMI_BOARD_NAME, "B1402CVA"),
+ },
+ },
+ {
/* Asus ExpertBook B1502CBA */
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
diff --git a/drivers/acpi/scan.c b/drivers/acpi/scan.c
index fa5dd71a80fa..02bb2cce423f 100644
--- a/drivers/acpi/scan.c
+++ b/drivers/acpi/scan.c
@@ -1568,17 +1568,22 @@ static const struct iommu_ops *acpi_iommu_configure_id(struct device *dev,
int err;
const struct iommu_ops *ops;
+ /* Serialise to make dev->iommu stable under our potential fwspec */
+ mutex_lock(&iommu_probe_device_lock);
/*
* If we already translated the fwspec there is nothing left to do,
* return the iommu_ops.
*/
ops = acpi_iommu_fwspec_ops(dev);
- if (ops)
+ if (ops) {
+ mutex_unlock(&iommu_probe_device_lock);
return ops;
+ }
err = iort_iommu_configure_id(dev, id_in);
if (err && err != -EPROBE_DEFER)
err = viot_iommu_configure(dev);
+ mutex_unlock(&iommu_probe_device_lock);
/*
* If we have reason to believe the IOMMU driver missed the initial
diff --git a/drivers/ata/libata-scsi.c b/drivers/ata/libata-scsi.c
index c10ff8985203..0a0f483124c3 100644
--- a/drivers/ata/libata-scsi.c
+++ b/drivers/ata/libata-scsi.c
@@ -1055,9 +1055,14 @@ int ata_scsi_dev_config(struct scsi_device *sdev, struct ata_device *dev)
* Ask the sd driver to issue START STOP UNIT on runtime suspend
* and resume and shutdown only. For system level suspend/resume,
* devices power state is handled directly by libata EH.
+ * Given that disks are always spun up on system resume, also
+ * make sure that the sd driver forces runtime suspended disks
+ * to be resumed to correctly reflect the power state of the
+ * device.
*/
- sdev->manage_runtime_start_stop = true;
- sdev->manage_shutdown = true;
+ sdev->manage_runtime_start_stop = 1;
+ sdev->manage_shutdown = 1;
+ sdev->force_runtime_start_on_system_start = 1;
}
/*
diff --git a/drivers/cpufreq/amd-pstate.c b/drivers/cpufreq/amd-pstate.c
index 9a1e194d5cf8..1f6186475715 100644
--- a/drivers/cpufreq/amd-pstate.c
+++ b/drivers/cpufreq/amd-pstate.c
@@ -307,11 +307,11 @@ static int pstate_init_perf(struct amd_cpudata *cpudata)
highest_perf = AMD_CPPC_HIGHEST_PERF(cap1);
WRITE_ONCE(cpudata->highest_perf, highest_perf);
-
+ WRITE_ONCE(cpudata->max_limit_perf, highest_perf);
WRITE_ONCE(cpudata->nominal_perf, AMD_CPPC_NOMINAL_PERF(cap1));
WRITE_ONCE(cpudata->lowest_nonlinear_perf, AMD_CPPC_LOWNONLIN_PERF(cap1));
WRITE_ONCE(cpudata->lowest_perf, AMD_CPPC_LOWEST_PERF(cap1));
-
+ WRITE_ONCE(cpudata->min_limit_perf, AMD_CPPC_LOWEST_PERF(cap1));
return 0;
}
@@ -329,11 +329,12 @@ static int cppc_init_perf(struct amd_cpudata *cpudata)
highest_perf = cppc_perf.highest_perf;
WRITE_ONCE(cpudata->highest_perf, highest_perf);
-
+ WRITE_ONCE(cpudata->max_limit_perf, highest_perf);
WRITE_ONCE(cpudata->nominal_perf, cppc_perf.nominal_perf);
WRITE_ONCE(cpudata->lowest_nonlinear_perf,
cppc_perf.lowest_nonlinear_perf);
WRITE_ONCE(cpudata->lowest_perf, cppc_perf.lowest_perf);
+ WRITE_ONCE(cpudata->min_limit_perf, cppc_perf.lowest_perf);
if (cppc_state == AMD_PSTATE_ACTIVE)
return 0;
@@ -432,6 +433,10 @@ static void amd_pstate_update(struct amd_cpudata *cpudata, u32 min_perf,
u64 prev = READ_ONCE(cpudata->cppc_req_cached);
u64 value = prev;
+ min_perf = clamp_t(unsigned long, min_perf, cpudata->min_limit_perf,
+ cpudata->max_limit_perf);
+ max_perf = clamp_t(unsigned long, max_perf, cpudata->min_limit_perf,
+ cpudata->max_limit_perf);
des_perf = clamp_t(unsigned long, des_perf, min_perf, max_perf);
if ((cppc_state == AMD_PSTATE_GUIDED) && (gov_flags & CPUFREQ_GOV_DYNAMIC_SWITCHING)) {
@@ -470,6 +475,22 @@ static int amd_pstate_verify(struct cpufreq_policy_data *policy)
return 0;
}
+static int amd_pstate_update_min_max_limit(struct cpufreq_policy *policy)
+{
+ u32 max_limit_perf, min_limit_perf;
+ struct amd_cpudata *cpudata = policy->driver_data;
+
+ max_limit_perf = div_u64(policy->max * cpudata->highest_perf, cpudata->max_freq);
+ min_limit_perf = div_u64(policy->min * cpudata->highest_perf, cpudata->max_freq);
+
+ WRITE_ONCE(cpudata->max_limit_perf, max_limit_perf);
+ WRITE_ONCE(cpudata->min_limit_perf, min_limit_perf);
+ WRITE_ONCE(cpudata->max_limit_freq, policy->max);
+ WRITE_ONCE(cpudata->min_limit_freq, policy->min);
+
+ return 0;
+}
+
static int amd_pstate_update_freq(struct cpufreq_policy *policy,
unsigned int target_freq, bool fast_switch)
{
@@ -480,6 +501,9 @@ static int amd_pstate_update_freq(struct cpufreq_policy *policy,
if (!cpudata->max_freq)
return -ENODEV;
+ if (policy->min != cpudata->min_limit_freq || policy->max != cpudata->max_limit_freq)
+ amd_pstate_update_min_max_limit(policy);
+
cap_perf = READ_ONCE(cpudata->highest_perf);
min_perf = READ_ONCE(cpudata->lowest_perf);
max_perf = cap_perf;
@@ -518,7 +542,9 @@ static int amd_pstate_target(struct cpufreq_policy *policy,
static unsigned int amd_pstate_fast_switch(struct cpufreq_policy *policy,
unsigned int target_freq)
{
- return amd_pstate_update_freq(policy, target_freq, true);
+ if (!amd_pstate_update_freq(policy, target_freq, true))
+ return target_freq;
+ return policy->cur;
}
static void amd_pstate_adjust_perf(unsigned int cpu,
@@ -532,6 +558,10 @@ static void amd_pstate_adjust_perf(unsigned int cpu,
struct amd_cpudata *cpudata = policy->driver_data;
unsigned int target_freq;
+ if (policy->min != cpudata->min_limit_freq || policy->max != cpudata->max_limit_freq)
+ amd_pstate_update_min_max_limit(policy);
+
+
cap_perf = READ_ONCE(cpudata->highest_perf);
lowest_nonlinear_perf = READ_ONCE(cpudata->lowest_nonlinear_perf);
max_freq = READ_ONCE(cpudata->max_freq);
@@ -745,6 +775,8 @@ static int amd_pstate_cpu_init(struct cpufreq_policy *policy)
/* Initial processor data capability frequencies */
cpudata->max_freq = max_freq;
cpudata->min_freq = min_freq;
+ cpudata->max_limit_freq = max_freq;
+ cpudata->min_limit_freq = min_freq;
cpudata->nominal_freq = nominal_freq;
cpudata->lowest_nonlinear_freq = lowest_nonlinear_freq;
@@ -850,11 +882,16 @@ static ssize_t show_energy_performance_available_preferences(
{
int i = 0;
int offset = 0;
+ struct amd_cpudata *cpudata = policy->driver_data;
+
+ if (cpudata->policy == CPUFREQ_POLICY_PERFORMANCE)
+ return sysfs_emit_at(buf, offset, "%s\n",
+ energy_perf_strings[EPP_INDEX_PERFORMANCE]);
while (energy_perf_strings[i] != NULL)
offset += sysfs_emit_at(buf, offset, "%s ", energy_perf_strings[i++]);
- sysfs_emit_at(buf, offset, "\n");
+ offset += sysfs_emit_at(buf, offset, "\n");
return offset;
}
@@ -1183,16 +1220,25 @@ static int amd_pstate_epp_cpu_exit(struct cpufreq_policy *policy)
return 0;
}
-static void amd_pstate_epp_init(unsigned int cpu)
+static void amd_pstate_epp_update_limit(struct cpufreq_policy *policy)
{
- struct cpufreq_policy *policy = cpufreq_cpu_get(cpu);
struct amd_cpudata *cpudata = policy->driver_data;
- u32 max_perf, min_perf;
+ u32 max_perf, min_perf, min_limit_perf, max_limit_perf;
u64 value;
s16 epp;
max_perf = READ_ONCE(cpudata->highest_perf);
min_perf = READ_ONCE(cpudata->lowest_perf);
+ max_limit_perf = div_u64(policy->max * cpudata->highest_perf, cpudata->max_freq);
+ min_limit_perf = div_u64(policy->min * cpudata->highest_perf, cpudata->max_freq);
+
+ max_perf = clamp_t(unsigned long, max_perf, cpudata->min_limit_perf,
+ cpudata->max_limit_perf);
+ min_perf = clamp_t(unsigned long, min_perf, cpudata->min_limit_perf,
+ cpudata->max_limit_perf);
+
+ WRITE_ONCE(cpudata->max_limit_perf, max_limit_perf);
+ WRITE_ONCE(cpudata->min_limit_perf, min_limit_perf);
value = READ_ONCE(cpudata->cppc_req_cached);
@@ -1210,9 +1256,6 @@ static void amd_pstate_epp_init(unsigned int cpu)
value &= ~AMD_CPPC_DES_PERF(~0L);
value |= AMD_CPPC_DES_PERF(0);
- if (cpudata->epp_policy == cpudata->policy)
- goto skip_epp;
-
cpudata->epp_policy = cpudata->policy;
/* Get BIOS pre-defined epp value */
@@ -1222,7 +1265,7 @@ static void amd_pstate_epp_init(unsigned int cpu)
* This return value can only be negative for shared_memory
* systems where EPP register read/write not supported.
*/
- goto skip_epp;
+ return;
}
if (cpudata->policy == CPUFREQ_POLICY_PERFORMANCE)
@@ -1236,8 +1279,6 @@ static void amd_pstate_epp_init(unsigned int cpu)
WRITE_ONCE(cpudata->cppc_req_cached, value);
amd_pstate_set_epp(cpudata, epp);
-skip_epp:
- cpufreq_cpu_put(policy);
}
static int amd_pstate_epp_set_policy(struct cpufreq_policy *policy)
@@ -1252,7 +1293,7 @@ static int amd_pstate_epp_set_policy(struct cpufreq_policy *policy)
cpudata->policy = policy->policy;
- amd_pstate_epp_init(policy->cpu);
+ amd_pstate_epp_update_limit(policy);
return 0;
}
diff --git a/drivers/cpufreq/imx6q-cpufreq.c b/drivers/cpufreq/imx6q-cpufreq.c
index 494d044b9e72..33728c242f66 100644
--- a/drivers/cpufreq/imx6q-cpufreq.c
+++ b/drivers/cpufreq/imx6q-cpufreq.c
@@ -327,7 +327,7 @@ static int imx6ul_opp_check_speed_grading(struct device *dev)
imx6x_disable_freq_in_opp(dev, 696000000);
if (of_machine_is_compatible("fsl,imx6ull")) {
- if (val != OCOTP_CFG3_6ULL_SPEED_792MHZ)
+ if (val < OCOTP_CFG3_6ULL_SPEED_792MHZ)
imx6x_disable_freq_in_opp(dev, 792000000);
if (val != OCOTP_CFG3_6ULL_SPEED_900MHZ)
diff --git a/drivers/cpufreq/qcom-cpufreq-nvmem.c b/drivers/cpufreq/qcom-cpufreq-nvmem.c
index 6355a39418c5..ea05d9d67490 100644
--- a/drivers/cpufreq/qcom-cpufreq-nvmem.c
+++ b/drivers/cpufreq/qcom-cpufreq-nvmem.c
@@ -23,8 +23,10 @@
#include <linux/nvmem-consumer.h>
#include <linux/of.h>
#include <linux/platform_device.h>
+#include <linux/pm.h>
#include <linux/pm_domain.h>
#include <linux/pm_opp.h>
+#include <linux/pm_runtime.h>
#include <linux/slab.h>
#include <linux/soc/qcom/smem.h>
@@ -55,6 +57,7 @@ struct qcom_cpufreq_match_data {
struct qcom_cpufreq_drv_cpu {
int opp_token;
+ struct device **virt_devs;
};
struct qcom_cpufreq_drv {
@@ -424,6 +427,30 @@ static const struct qcom_cpufreq_match_data match_data_ipq8074 = {
.get_version = qcom_cpufreq_ipq8074_name_version,
};
+static void qcom_cpufreq_suspend_virt_devs(struct qcom_cpufreq_drv *drv, unsigned int cpu)
+{
+ const char * const *name = drv->data->genpd_names;
+ int i;
+
+ if (!drv->cpus[cpu].virt_devs)
+ return;
+
+ for (i = 0; *name; i++, name++)
+ device_set_awake_path(drv->cpus[cpu].virt_devs[i]);
+}
+
+static void qcom_cpufreq_put_virt_devs(struct qcom_cpufreq_drv *drv, unsigned int cpu)
+{
+ const char * const *name = drv->data->genpd_names;
+ int i;
+
+ if (!drv->cpus[cpu].virt_devs)
+ return;
+
+ for (i = 0; *name; i++, name++)
+ pm_runtime_put(drv->cpus[cpu].virt_devs[i]);
+}
+
static int qcom_cpufreq_probe(struct platform_device *pdev)
{
struct qcom_cpufreq_drv *drv;
@@ -478,6 +505,7 @@ static int qcom_cpufreq_probe(struct platform_device *pdev)
of_node_put(np);
for_each_possible_cpu(cpu) {
+ struct device **virt_devs = NULL;
struct dev_pm_opp_config config = {
.supported_hw = NULL,
};
@@ -498,7 +526,7 @@ static int qcom_cpufreq_probe(struct platform_device *pdev)
if (drv->data->genpd_names) {
config.genpd_names = drv->data->genpd_names;
- config.virt_devs = NULL;
+ config.virt_devs = &virt_devs;
}
if (config.supported_hw || config.genpd_names) {
@@ -509,6 +537,27 @@ static int qcom_cpufreq_probe(struct platform_device *pdev)
goto free_opp;
}
}
+
+ if (virt_devs) {
+ const char * const *name = config.genpd_names;
+ int i, j;
+
+ for (i = 0; *name; i++, name++) {
+ ret = pm_runtime_resume_and_get(virt_devs[i]);
+ if (ret) {
+ dev_err(cpu_dev, "failed to resume %s: %d\n",
+ *name, ret);
+
+ /* Rollback previous PM runtime calls */
+ name = config.genpd_names;
+ for (j = 0; *name && j < i; j++, name++)
+ pm_runtime_put(virt_devs[j]);
+
+ goto free_opp;
+ }
+ }
+ drv->cpus[cpu].virt_devs = virt_devs;
+ }
}
cpufreq_dt_pdev = platform_device_register_simple("cpufreq-dt", -1,
@@ -522,8 +571,10 @@ static int qcom_cpufreq_probe(struct platform_device *pdev)
dev_err(cpu_dev, "Failed to register platform device\n");
free_opp:
- for_each_possible_cpu(cpu)
+ for_each_possible_cpu(cpu) {
+ qcom_cpufreq_put_virt_devs(drv, cpu);
dev_pm_opp_clear_config(drv->cpus[cpu].opp_token);
+ }
return ret;
}
@@ -534,15 +585,31 @@ static void qcom_cpufreq_remove(struct platform_device *pdev)
platform_device_unregister(cpufreq_dt_pdev);
- for_each_possible_cpu(cpu)
+ for_each_possible_cpu(cpu) {
+ qcom_cpufreq_put_virt_devs(drv, cpu);
dev_pm_opp_clear_config(drv->cpus[cpu].opp_token);
+ }
}
+static int qcom_cpufreq_suspend(struct device *dev)
+{
+ struct qcom_cpufreq_drv *drv = dev_get_drvdata(dev);
+ unsigned int cpu;
+
+ for_each_possible_cpu(cpu)
+ qcom_cpufreq_suspend_virt_devs(drv, cpu);
+
+ return 0;
+}
+
+static DEFINE_SIMPLE_DEV_PM_OPS(qcom_cpufreq_pm_ops, qcom_cpufreq_suspend, NULL);
+
static struct platform_driver qcom_cpufreq_driver = {
.probe = qcom_cpufreq_probe,
.remove_new = qcom_cpufreq_remove,
.driver = {
.name = "qcom-cpufreq-nvmem",
+ .pm = pm_sleep_ptr(&qcom_cpufreq_pm_ops),
},
};
diff --git a/drivers/dma-buf/dma-resv.c b/drivers/dma-buf/dma-resv.c
index 38b4110378de..eb8b733065b2 100644
--- a/drivers/dma-buf/dma-resv.c
+++ b/drivers/dma-buf/dma-resv.c
@@ -301,7 +301,7 @@ void dma_resv_add_fence(struct dma_resv *obj, struct dma_fence *fence,
dma_resv_list_entry(fobj, i, obj, &old, &old_usage);
if ((old->context == fence->context && old_usage >= usage &&
- dma_fence_is_later(fence, old)) ||
+ dma_fence_is_later_or_same(fence, old)) ||
dma_fence_is_signaled(old)) {
dma_resv_list_set(fobj, i, fence, usage);
dma_fence_put(old);
diff --git a/drivers/firewire/core-device.c b/drivers/firewire/core-device.c
index aa597cda0d88..2828e9573e90 100644
--- a/drivers/firewire/core-device.c
+++ b/drivers/firewire/core-device.c
@@ -717,14 +717,11 @@ static void create_units(struct fw_device *device)
fw_unit_attributes,
&unit->attribute_group);
- if (device_register(&unit->device) < 0)
- goto skip_unit;
-
fw_device_get(device);
- continue;
-
- skip_unit:
- kfree(unit);
+ if (device_register(&unit->device) < 0) {
+ put_device(&unit->device);
+ continue;
+ }
}
}
diff --git a/drivers/firewire/sbp2.c b/drivers/firewire/sbp2.c
index 7edf2c95282f..e779d866022b 100644
--- a/drivers/firewire/sbp2.c
+++ b/drivers/firewire/sbp2.c
@@ -1519,9 +1519,9 @@ static int sbp2_scsi_slave_configure(struct scsi_device *sdev)
sdev->use_10_for_rw = 1;
if (sbp2_param_exclusive_login) {
- sdev->manage_system_start_stop = true;
- sdev->manage_runtime_start_stop = true;
- sdev->manage_shutdown = true;
+ sdev->manage_system_start_stop = 1;
+ sdev->manage_runtime_start_stop = 1;
+ sdev->manage_shutdown = 1;
}
if (sdev->type == TYPE_ROM)
diff --git a/drivers/firmware/efi/unaccepted_memory.c b/drivers/firmware/efi/unaccepted_memory.c
index 3f2f7bf6e335..5b439d04079c 100644
--- a/drivers/firmware/efi/unaccepted_memory.c
+++ b/drivers/firmware/efi/unaccepted_memory.c
@@ -101,7 +101,7 @@ retry:
* overlap on physical address level.
*/
list_for_each_entry(entry, &accepting_list, list) {
- if (entry->end < range.start)
+ if (entry->end <= range.start)
continue;
if (entry->start >= range.end)
continue;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c
index b8412202a1b0..75dc58470393 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c
@@ -547,7 +547,7 @@ int amdgpu_amdkfd_get_xgmi_bandwidth_mbytes(struct amdgpu_device *dst,
struct amdgpu_device *adev = dst, *peer_adev;
int num_links;
- if (adev->asic_type != CHIP_ALDEBARAN)
+ if (amdgpu_ip_version(adev, GC_HWIP, 0) < IP_VERSION(9, 4, 2))
return 0;
if (src)
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c
index a53f436fa9f1..0e61ebdb3f3e 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c
@@ -638,6 +638,9 @@ static ssize_t amdgpu_debugfs_regs_didt_read(struct file *f, char __user *buf,
if (size & 0x3 || *pos & 0x3)
return -EINVAL;
+ if (!adev->didt_rreg)
+ return -EOPNOTSUPP;
+
r = pm_runtime_get_sync(adev_to_drm(adev)->dev);
if (r < 0) {
pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
@@ -694,6 +697,9 @@ static ssize_t amdgpu_debugfs_regs_didt_write(struct file *f, const char __user
if (size & 0x3 || *pos & 0x3)
return -EINVAL;
+ if (!adev->didt_wreg)
+ return -EOPNOTSUPP;
+
r = pm_runtime_get_sync(adev_to_drm(adev)->dev);
if (r < 0) {
pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
index 7eeaf0aa7f81..5c0817cbc7c2 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
@@ -4538,6 +4538,10 @@ int amdgpu_device_suspend(struct drm_device *dev, bool fbcon)
if (amdgpu_sriov_vf(adev))
amdgpu_virt_release_full_gpu(adev, false);
+ r = amdgpu_dpm_notify_rlc_state(adev, false);
+ if (r)
+ return r;
+
return 0;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c
index 0cacd0b9f8be..b8fbe97efe1d 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c
@@ -340,14 +340,11 @@ int amdgpu_display_crtc_set_config(struct drm_mode_set *set,
adev->have_disp_power_ref = true;
return ret;
}
- /* if we have no active crtcs, then drop the power ref
- * we got before
+ /* if we have no active crtcs, then go to
+ * drop the power ref we got before
*/
- if (!active && adev->have_disp_power_ref) {
- pm_runtime_put_autosuspend(dev->dev);
+ if (!active && adev->have_disp_power_ref)
adev->have_disp_power_ref = false;
- }
-
out:
/* drop the power reference we got coming in here */
pm_runtime_put_autosuspend(dev->dev);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
index 8f24cabe2155..8b33b130ea36 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
@@ -2263,6 +2263,8 @@ retry_init:
pm_runtime_mark_last_busy(ddev->dev);
pm_runtime_put_autosuspend(ddev->dev);
+ pci_wake_from_d3(pdev, TRUE);
+
/*
* For runpm implemented via BACO, PMFW will handle the
* timing for BACO in and out:
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c
index 5f71414190e9..d2f273d77e59 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c
@@ -181,6 +181,9 @@ uint64_t amdgpu_gmc_agp_addr(struct ttm_buffer_object *bo)
{
struct amdgpu_device *adev = amdgpu_ttm_adev(bo->bdev);
+ if (!bo->ttm)
+ return AMDGPU_BO_INVALID_OFFSET;
+
if (bo->ttm->num_pages != 1 || bo->ttm->caching == ttm_cached)
return AMDGPU_BO_INVALID_OFFSET;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
index cef920a93924..d79b4ca1ecfc 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
@@ -1527,10 +1527,14 @@ u64 amdgpu_bo_gpu_offset(struct amdgpu_bo *bo)
u64 amdgpu_bo_gpu_offset_no_check(struct amdgpu_bo *bo)
{
struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
- uint64_t offset;
+ uint64_t offset = AMDGPU_BO_INVALID_OFFSET;
- offset = (bo->tbo.resource->start << PAGE_SHIFT) +
- amdgpu_ttm_domain_start(adev, bo->tbo.resource->mem_type);
+ if (bo->tbo.resource->mem_type == TTM_PL_TT)
+ offset = amdgpu_gmc_agp_addr(&bo->tbo);
+
+ if (offset == AMDGPU_BO_INVALID_OFFSET)
+ offset = (bo->tbo.resource->start << PAGE_SHIFT) +
+ amdgpu_ttm_domain_start(adev, bo->tbo.resource->mem_type);
return amdgpu_gmc_sign_extend(offset);
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras_eeprom.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras_eeprom.c
index 65aa218380be..2fde93b00cab 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras_eeprom.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras_eeprom.c
@@ -214,6 +214,12 @@ static bool __get_eeprom_i2c_addr(struct amdgpu_device *adev,
control->i2c_address = EEPROM_I2C_MADDR_0;
return true;
case IP_VERSION(13, 0, 0):
+ if (strnstr(atom_ctx->vbios_pn, "D707",
+ sizeof(atom_ctx->vbios_pn)))
+ control->i2c_address = EEPROM_I2C_MADDR_0;
+ else
+ control->i2c_address = EEPROM_I2C_MADDR_4;
+ return true;
case IP_VERSION(13, 0, 6):
case IP_VERSION(13, 0, 10):
control->i2c_address = EEPROM_I2C_MADDR_4;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
index 05991c5c8ddb..ab4a762aed5b 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
@@ -959,10 +959,8 @@ int amdgpu_ttm_alloc_gart(struct ttm_buffer_object *bo)
return 0;
addr = amdgpu_gmc_agp_addr(bo);
- if (addr != AMDGPU_BO_INVALID_OFFSET) {
- bo->resource->start = addr >> PAGE_SHIFT;
+ if (addr != AMDGPU_BO_INVALID_OFFSET)
return 0;
- }
/* allocate GART space */
placement.num_placement = 1;
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c
index 0c6133cc5e57..8ed4a6fb147a 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c
@@ -89,6 +89,10 @@ MODULE_FIRMWARE("amdgpu/gc_11_5_0_me.bin");
MODULE_FIRMWARE("amdgpu/gc_11_5_0_mec.bin");
MODULE_FIRMWARE("amdgpu/gc_11_5_0_rlc.bin");
+static const struct soc15_reg_golden golden_settings_gc_11_0[] = {
+ SOC15_REG_GOLDEN_VALUE(GC, 0, regTCP_CNTL, 0x20000000, 0x20000000)
+};
+
static const struct soc15_reg_golden golden_settings_gc_11_0_1[] =
{
SOC15_REG_GOLDEN_VALUE(GC, 0, regCGTT_GS_NGG_CLK_CTRL, 0x9fff8fff, 0x00000010),
@@ -304,6 +308,10 @@ static void gfx_v11_0_init_golden_registers(struct amdgpu_device *adev)
default:
break;
}
+ soc15_program_register_sequence(adev,
+ golden_settings_gc_11_0,
+ (const u32)ARRAY_SIZE(golden_settings_gc_11_0));
+
}
static void gfx_v11_0_write_data_to_reg(struct amdgpu_ring *ring, int eng_sel,
@@ -419,7 +427,7 @@ static int gfx_v11_0_ring_test_ib(struct amdgpu_ring *ring, long timeout)
adev->wb.wb[index] = cpu_to_le32(0xCAFEDEAD);
cpu_ptr = &adev->wb.wb[index];
- r = amdgpu_ib_get(adev, NULL, 16, AMDGPU_IB_POOL_DIRECT, &ib);
+ r = amdgpu_ib_get(adev, NULL, 20, AMDGPU_IB_POOL_DIRECT, &ib);
if (r) {
DRM_ERROR("amdgpu: failed to get ib (%ld).\n", r);
goto err1;
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
index 885ebd703260..1943beb135c4 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
@@ -883,8 +883,8 @@ static int gfx_v8_0_ring_test_ib(struct amdgpu_ring *ring, long timeout)
gpu_addr = adev->wb.gpu_addr + (index * 4);
adev->wb.wb[index] = cpu_to_le32(0xCAFEDEAD);
memset(&ib, 0, sizeof(ib));
- r = amdgpu_ib_get(adev, NULL, 16,
- AMDGPU_IB_POOL_DIRECT, &ib);
+
+ r = amdgpu_ib_get(adev, NULL, 20, AMDGPU_IB_POOL_DIRECT, &ib);
if (r)
goto err1;
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
index e3ff6e46f3f7..69c500910746 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
@@ -1039,8 +1039,8 @@ static int gfx_v9_0_ring_test_ib(struct amdgpu_ring *ring, long timeout)
gpu_addr = adev->wb.gpu_addr + (index * 4);
adev->wb.wb[index] = cpu_to_le32(0xCAFEDEAD);
memset(&ib, 0, sizeof(ib));
- r = amdgpu_ib_get(adev, NULL, 16,
- AMDGPU_IB_POOL_DIRECT, &ib);
+
+ r = amdgpu_ib_get(adev, NULL, 20, AMDGPU_IB_POOL_DIRECT, &ib);
if (r)
goto err1;
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c
index 40d06d32bb74..4a09cc0d8ce0 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c
@@ -297,8 +297,8 @@ static int gfx_v9_4_3_ring_test_ib(struct amdgpu_ring *ring, long timeout)
gpu_addr = adev->wb.gpu_addr + (index * 4);
adev->wb.wb[index] = cpu_to_le32(0xCAFEDEAD);
memset(&ib, 0, sizeof(ib));
- r = amdgpu_ib_get(adev, NULL, 16,
- AMDGPU_IB_POOL_DIRECT, &ib);
+
+ r = amdgpu_ib_get(adev, NULL, 20, AMDGPU_IB_POOL_DIRECT, &ib);
if (r)
goto err1;
diff --git a/drivers/gpu/drm/amd/amdgpu/nbio_v7_11.c b/drivers/gpu/drm/amd/amdgpu/nbio_v7_11.c
index 676ab1d20d2f..1f52b4b1db03 100644
--- a/drivers/gpu/drm/amd/amdgpu/nbio_v7_11.c
+++ b/drivers/gpu/drm/amd/amdgpu/nbio_v7_11.c
@@ -259,17 +259,17 @@ const struct nbio_hdp_flush_reg nbio_v7_11_hdp_flush_reg = {
static void nbio_v7_11_init_registers(struct amdgpu_device *adev)
{
-/* uint32_t def, data;
+ uint32_t def, data;
+
+ def = data = RREG32_SOC15(NBIO, 0, regBIF_BIF256_CI256_RC3X4_USB4_PCIE_MST_CTRL_3);
+ data = REG_SET_FIELD(data, BIF_BIF256_CI256_RC3X4_USB4_PCIE_MST_CTRL_3,
+ CI_SWUS_MAX_READ_REQUEST_SIZE_MODE, 1);
+ data = REG_SET_FIELD(data, BIF_BIF256_CI256_RC3X4_USB4_PCIE_MST_CTRL_3,
+ CI_SWUS_MAX_READ_REQUEST_SIZE_PRIV, 1);
- def = data = RREG32_SOC15(NBIO, 0, regBIF_BIF256_CI256_RC3X4_USB4_PCIE_MST_CTRL_3);
- data = REG_SET_FIELD(data, BIF_BIF256_CI256_RC3X4_USB4_PCIE_MST_CTRL_3,
- CI_SWUS_MAX_READ_REQUEST_SIZE_MODE, 1);
- data = REG_SET_FIELD(data, BIF_BIF256_CI256_RC3X4_USB4_PCIE_MST_CTRL_3,
- CI_SWUS_MAX_READ_REQUEST_SIZE_PRIV, 1);
+ if (def != data)
+ WREG32_SOC15(NBIO, 0, regBIF_BIF256_CI256_RC3X4_USB4_PCIE_MST_CTRL_3, data);
- if (def != data)
- WREG32_SOC15(NBIO, 0, regBIF_BIF256_CI256_RC3X4_USB4_PCIE_MST_CTRL_3, data);
-*/
}
static void nbio_v7_11_update_medium_grain_clock_gating(struct amdgpu_device *adev,
diff --git a/drivers/gpu/drm/amd/amdgpu/nbio_v7_9.c b/drivers/gpu/drm/amd/amdgpu/nbio_v7_9.c
index 23f26f8caad4..25a3da83e0fb 100644
--- a/drivers/gpu/drm/amd/amdgpu/nbio_v7_9.c
+++ b/drivers/gpu/drm/amd/amdgpu/nbio_v7_9.c
@@ -611,11 +611,6 @@ static void nbio_v7_9_handle_ras_controller_intr_no_bifring(struct amdgpu_device
dev_info(adev->dev, "RAS controller interrupt triggered "
"by NBIF error\n");
-
- /* ras_controller_int is dedicated for nbif ras error,
- * not the global interrupt for sync flood
- */
- amdgpu_ras_reset_gpu(adev);
}
amdgpu_ras_error_data_fini(&err_data);
diff --git a/drivers/gpu/drm/amd/amdgpu/soc15.c b/drivers/gpu/drm/amd/amdgpu/soc15.c
index d4b8d62f4294..c82776e5e9aa 100644
--- a/drivers/gpu/drm/amd/amdgpu/soc15.c
+++ b/drivers/gpu/drm/amd/amdgpu/soc15.c
@@ -1161,6 +1161,11 @@ static int soc15_common_early_init(void *handle)
AMD_PG_SUPPORT_VCN_DPG |
AMD_PG_SUPPORT_JPEG;
adev->external_rev_id = adev->rev_id + 0x46;
+ /* GC 9.4.3 uses MMIO register region hole at a different offset */
+ if (!amdgpu_sriov_vf(adev)) {
+ adev->rmmio_remap.reg_offset = 0x1A000;
+ adev->rmmio_remap.bus_addr = adev->rmmio_base + 0x1A000;
+ }
break;
default:
/* FIXME: not supported yet */
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_priv.h b/drivers/gpu/drm/amd/amdkfd/kfd_priv.h
index 9cc32f577e38..4c8e278a0d0c 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_priv.h
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_priv.h
@@ -1128,7 +1128,7 @@ static inline struct kfd_node *kfd_node_by_irq_ids(struct amdgpu_device *adev,
struct kfd_dev *dev = adev->kfd.dev;
uint32_t i;
- if (adev->ip_versions[GC_HWIP][0] != IP_VERSION(9, 4, 3))
+ if (KFD_GC_VERSION(dev) != IP_VERSION(9, 4, 3))
return dev->nodes[0];
for (i = 0; i < dev->num_nodes; i++)
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c b/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c
index 77649392e233..77f493262e05 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c
@@ -169,16 +169,43 @@ int pqm_init(struct process_queue_manager *pqm, struct kfd_process *p)
return 0;
}
+static void pqm_clean_queue_resource(struct process_queue_manager *pqm,
+ struct process_queue_node *pqn)
+{
+ struct kfd_node *dev;
+ struct kfd_process_device *pdd;
+
+ dev = pqn->q->device;
+
+ pdd = kfd_get_process_device_data(dev, pqm->process);
+ if (!pdd) {
+ pr_err("Process device data doesn't exist\n");
+ return;
+ }
+
+ if (pqn->q->gws) {
+ if (KFD_GC_VERSION(pqn->q->device) != IP_VERSION(9, 4, 3) &&
+ !dev->kfd->shared_resources.enable_mes)
+ amdgpu_amdkfd_remove_gws_from_process(
+ pqm->process->kgd_process_info, pqn->q->gws);
+ pdd->qpd.num_gws = 0;
+ }
+
+ if (dev->kfd->shared_resources.enable_mes) {
+ amdgpu_amdkfd_free_gtt_mem(dev->adev, pqn->q->gang_ctx_bo);
+ if (pqn->q->wptr_bo)
+ amdgpu_amdkfd_free_gtt_mem(dev->adev, pqn->q->wptr_bo);
+ }
+}
+
void pqm_uninit(struct process_queue_manager *pqm)
{
struct process_queue_node *pqn, *next;
list_for_each_entry_safe(pqn, next, &pqm->queues, process_queue_list) {
- if (pqn->q && pqn->q->gws &&
- KFD_GC_VERSION(pqn->q->device) != IP_VERSION(9, 4, 3) &&
- !pqn->q->device->kfd->shared_resources.enable_mes)
- amdgpu_amdkfd_remove_gws_from_process(pqm->process->kgd_process_info,
- pqn->q->gws);
+ if (pqn->q)
+ pqm_clean_queue_resource(pqm, pqn);
+
kfd_procfs_del_queue(pqn->q);
uninit_queue(pqn->q);
list_del(&pqn->process_queue_list);
@@ -461,22 +488,7 @@ int pqm_destroy_queue(struct process_queue_manager *pqm, unsigned int qid)
goto err_destroy_queue;
}
- if (pqn->q->gws) {
- if (KFD_GC_VERSION(pqn->q->device) != IP_VERSION(9, 4, 3) &&
- !dev->kfd->shared_resources.enable_mes)
- amdgpu_amdkfd_remove_gws_from_process(
- pqm->process->kgd_process_info,
- pqn->q->gws);
- pdd->qpd.num_gws = 0;
- }
-
- if (dev->kfd->shared_resources.enable_mes) {
- amdgpu_amdkfd_free_gtt_mem(dev->adev,
- pqn->q->gang_ctx_bo);
- if (pqn->q->wptr_bo)
- amdgpu_amdkfd_free_gtt_mem(dev->adev, pqn->q->wptr_bo);
-
- }
+ pqm_clean_queue_resource(pqm, pqn);
uninit_queue(pqn->q);
}
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
index ee97814ebd99..b452796fc6d3 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
@@ -6267,7 +6267,7 @@ int amdgpu_dm_connector_atomic_set_property(struct drm_connector *connector,
dm_new_state->underscan_enable = val;
ret = 0;
} else if (property == adev->mode_info.abm_level_property) {
- dm_new_state->abm_level = val;
+ dm_new_state->abm_level = val ?: ABM_LEVEL_IMMEDIATE_DISABLE;
ret = 0;
}
@@ -6312,7 +6312,8 @@ int amdgpu_dm_connector_atomic_get_property(struct drm_connector *connector,
*val = dm_state->underscan_enable;
ret = 0;
} else if (property == adev->mode_info.abm_level_property) {
- *val = dm_state->abm_level;
+ *val = (dm_state->abm_level != ABM_LEVEL_IMMEDIATE_DISABLE) ?
+ dm_state->abm_level : 0;
ret = 0;
}
@@ -6385,7 +6386,8 @@ void amdgpu_dm_connector_funcs_reset(struct drm_connector *connector)
state->pbn = 0;
if (connector->connector_type == DRM_MODE_CONNECTOR_eDP)
- state->abm_level = amdgpu_dm_abm_level;
+ state->abm_level = amdgpu_dm_abm_level ?:
+ ABM_LEVEL_IMMEDIATE_DISABLE;
__drm_atomic_helper_connector_reset(connector, &state->base);
}
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn315/dcn315_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn315/dcn315_clk_mgr.c
index b2c4f97afc8b..8776055bbeaa 100644
--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn315/dcn315_clk_mgr.c
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn315/dcn315_clk_mgr.c
@@ -334,7 +334,7 @@ static struct wm_table lpddr5_wm_table = {
{
.wm_inst = WM_A,
.wm_type = WM_TYPE_PSTATE_CHG,
- .pstate_latency_us = 11.65333,
+ .pstate_latency_us = 129.0,
.sr_exit_time_us = 11.5,
.sr_enter_plus_exit_time_us = 14.5,
.valid = true,
@@ -342,7 +342,7 @@ static struct wm_table lpddr5_wm_table = {
{
.wm_inst = WM_B,
.wm_type = WM_TYPE_PSTATE_CHG,
- .pstate_latency_us = 11.65333,
+ .pstate_latency_us = 129.0,
.sr_exit_time_us = 11.5,
.sr_enter_plus_exit_time_us = 14.5,
.valid = true,
@@ -350,7 +350,7 @@ static struct wm_table lpddr5_wm_table = {
{
.wm_inst = WM_C,
.wm_type = WM_TYPE_PSTATE_CHG,
- .pstate_latency_us = 11.65333,
+ .pstate_latency_us = 129.0,
.sr_exit_time_us = 11.5,
.sr_enter_plus_exit_time_us = 14.5,
.valid = true,
@@ -358,7 +358,7 @@ static struct wm_table lpddr5_wm_table = {
{
.wm_inst = WM_D,
.wm_type = WM_TYPE_PSTATE_CHG,
- .pstate_latency_us = 11.65333,
+ .pstate_latency_us = 129.0,
.sr_exit_time_us = 11.5,
.sr_enter_plus_exit_time_us = 14.5,
.valid = true,
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn35/dcn35_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn35/dcn35_clk_mgr.c
index 507a7cf56711..d5fde7d23fbf 100644
--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn35/dcn35_clk_mgr.c
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn35/dcn35_clk_mgr.c
@@ -232,6 +232,10 @@ void dcn35_update_clocks(struct clk_mgr *clk_mgr_base,
if (dc->work_arounds.skip_clock_update)
return;
+ /* DTBCLK is fixed, so set a default if unspecified. */
+ if (new_clocks->dtbclk_en && !new_clocks->ref_dtbclk_khz)
+ new_clocks->ref_dtbclk_khz = 600000;
+
/*
* if it is safe to lower, but we are already in the lower state, we don't have to do anything
* also if safe to lower is false, we just go in the higher state
@@ -265,8 +269,10 @@ void dcn35_update_clocks(struct clk_mgr *clk_mgr_base,
if (!clk_mgr_base->clks.dtbclk_en && new_clocks->dtbclk_en) {
dcn35_smu_set_dtbclk(clk_mgr, true);
- dcn35_update_clocks_update_dtb_dto(clk_mgr, context, clk_mgr_base->clks.ref_dtbclk_khz);
clk_mgr_base->clks.dtbclk_en = new_clocks->dtbclk_en;
+
+ dcn35_update_clocks_update_dtb_dto(clk_mgr, context, new_clocks->ref_dtbclk_khz);
+ clk_mgr_base->clks.ref_dtbclk_khz = new_clocks->ref_dtbclk_khz;
}
/* check that we're not already in D0 */
@@ -314,17 +320,12 @@ void dcn35_update_clocks(struct clk_mgr *clk_mgr_base,
update_dispclk = true;
}
- if (!new_clocks->dtbclk_en) {
- new_clocks->ref_dtbclk_khz = 600000;
- }
-
/* clock limits are received with MHz precision, divide by 1000 to prevent setting clocks at every call */
if (!dc->debug.disable_dtb_ref_clk_switch &&
- should_set_clock(safe_to_lower, new_clocks->ref_dtbclk_khz / 1000, clk_mgr_base->clks.ref_dtbclk_khz / 1000)) {
- /* DCCG requires KHz precision for DTBCLK */
- dcn35_smu_set_dtbclk(clk_mgr, true);
-
- dcn35_update_clocks_update_dtb_dto(clk_mgr, context, clk_mgr_base->clks.ref_dtbclk_khz);
+ should_set_clock(safe_to_lower, new_clocks->ref_dtbclk_khz / 1000,
+ clk_mgr_base->clks.ref_dtbclk_khz / 1000)) {
+ dcn35_update_clocks_update_dtb_dto(clk_mgr, context, new_clocks->ref_dtbclk_khz);
+ clk_mgr_base->clks.ref_dtbclk_khz = new_clocks->ref_dtbclk_khz;
}
if (dpp_clock_lowered) {
@@ -443,32 +444,32 @@ static struct wm_table ddr5_wm_table = {
.wm_inst = WM_A,
.wm_type = WM_TYPE_PSTATE_CHG,
.pstate_latency_us = 11.72,
- .sr_exit_time_us = 9,
- .sr_enter_plus_exit_time_us = 11,
+ .sr_exit_time_us = 14.0,
+ .sr_enter_plus_exit_time_us = 16.0,
.valid = true,
},
{
.wm_inst = WM_B,
.wm_type = WM_TYPE_PSTATE_CHG,
.pstate_latency_us = 11.72,
- .sr_exit_time_us = 9,
- .sr_enter_plus_exit_time_us = 11,
+ .sr_exit_time_us = 14.0,
+ .sr_enter_plus_exit_time_us = 16.0,
.valid = true,
},
{
.wm_inst = WM_C,
.wm_type = WM_TYPE_PSTATE_CHG,
.pstate_latency_us = 11.72,
- .sr_exit_time_us = 9,
- .sr_enter_plus_exit_time_us = 11,
+ .sr_exit_time_us = 14.0,
+ .sr_enter_plus_exit_time_us = 16.0,
.valid = true,
},
{
.wm_inst = WM_D,
.wm_type = WM_TYPE_PSTATE_CHG,
.pstate_latency_us = 11.72,
- .sr_exit_time_us = 9,
- .sr_enter_plus_exit_time_us = 11,
+ .sr_exit_time_us = 14.0,
+ .sr_enter_plus_exit_time_us = 16.0,
.valid = true,
},
}
@@ -480,32 +481,32 @@ static struct wm_table lpddr5_wm_table = {
.wm_inst = WM_A,
.wm_type = WM_TYPE_PSTATE_CHG,
.pstate_latency_us = 11.65333,
- .sr_exit_time_us = 11.5,
- .sr_enter_plus_exit_time_us = 14.5,
+ .sr_exit_time_us = 14.0,
+ .sr_enter_plus_exit_time_us = 16.0,
.valid = true,
},
{
.wm_inst = WM_B,
.wm_type = WM_TYPE_PSTATE_CHG,
.pstate_latency_us = 11.65333,
- .sr_exit_time_us = 11.5,
- .sr_enter_plus_exit_time_us = 14.5,
+ .sr_exit_time_us = 14.0,
+ .sr_enter_plus_exit_time_us = 16.0,
.valid = true,
},
{
.wm_inst = WM_C,
.wm_type = WM_TYPE_PSTATE_CHG,
.pstate_latency_us = 11.65333,
- .sr_exit_time_us = 11.5,
- .sr_enter_plus_exit_time_us = 14.5,
+ .sr_exit_time_us = 14.0,
+ .sr_enter_plus_exit_time_us = 16.0,
.valid = true,
},
{
.wm_inst = WM_D,
.wm_type = WM_TYPE_PSTATE_CHG,
.pstate_latency_us = 11.65333,
- .sr_exit_time_us = 11.5,
- .sr_enter_plus_exit_time_us = 14.5,
+ .sr_exit_time_us = 14.0,
+ .sr_enter_plus_exit_time_us = 16.0,
.valid = true,
},
}
@@ -515,11 +516,6 @@ static DpmClocks_t_dcn35 dummy_clocks;
static struct dcn35_watermarks dummy_wms = { 0 };
-static struct dcn35_ss_info_table ss_info_table = {
- .ss_divider = 1000,
- .ss_percentage = {0, 0, 375, 375, 375}
-};
-
static void dcn35_build_watermark_ranges(struct clk_bw_params *bw_params, struct dcn35_watermarks *table)
{
int i, num_valid_sets;
@@ -653,27 +649,47 @@ static unsigned int convert_wck_ratio(uint8_t wck_ratio)
return 1;
}
+static inline uint32_t calc_dram_speed_mts(const MemPstateTable_t *entry)
+{
+ return entry->UClk * convert_wck_ratio(entry->WckRatio) * 2;
+}
+
static void dcn35_clk_mgr_helper_populate_bw_params(struct clk_mgr_internal *clk_mgr,
struct integrated_info *bios_info,
DpmClocks_t_dcn35 *clock_table)
{
struct clk_bw_params *bw_params = clk_mgr->base.bw_params;
struct clk_limit_table_entry def_max = bw_params->clk_table.entries[bw_params->clk_table.num_entries - 1];
- uint32_t max_pstate = 0, max_uclk = 0, max_fclk = 0;
- uint32_t min_pstate = 0, max_dispclk = 0, max_dppclk = 0;
+ uint32_t max_fclk = 0, min_pstate = 0, max_dispclk = 0, max_dppclk = 0;
+ uint32_t max_pstate = 0, max_dram_speed_mts = 0, min_dram_speed_mts = 0;
int i;
+ /* Determine min/max p-state values. */
for (i = 0; i < clock_table->NumMemPstatesEnabled; i++) {
- if (is_valid_clock_value(clock_table->MemPstateTable[i].UClk) &&
- clock_table->MemPstateTable[i].UClk > max_uclk) {
- max_uclk = clock_table->MemPstateTable[i].UClk;
+ uint32_t dram_speed_mts = calc_dram_speed_mts(&clock_table->MemPstateTable[i]);
+
+ if (is_valid_clock_value(dram_speed_mts) && dram_speed_mts > max_dram_speed_mts) {
+ max_dram_speed_mts = dram_speed_mts;
max_pstate = i;
}
}
- /* We expect the table to contain at least one valid Uclk entry. */
- ASSERT(is_valid_clock_value(max_uclk));
+ min_dram_speed_mts = max_dram_speed_mts;
+ min_pstate = max_pstate;
+
+ for (i = 0; i < clock_table->NumMemPstatesEnabled; i++) {
+ uint32_t dram_speed_mts = calc_dram_speed_mts(&clock_table->MemPstateTable[i]);
+
+ if (is_valid_clock_value(dram_speed_mts) && dram_speed_mts < min_dram_speed_mts) {
+ min_dram_speed_mts = dram_speed_mts;
+ min_pstate = i;
+ }
+ }
+ /* We expect the table to contain at least one valid P-state entry. */
+ ASSERT(clock_table->NumMemPstatesEnabled &&
+ is_valid_clock_value(max_dram_speed_mts) &&
+ is_valid_clock_value(min_dram_speed_mts));
/* dispclk and dppclk can be max at any voltage, same number of levels for both */
if (clock_table->NumDispClkLevelsEnabled <= NUM_DISPCLK_DPM_LEVELS &&
@@ -683,47 +699,46 @@ static void dcn35_clk_mgr_helper_populate_bw_params(struct clk_mgr_internal *clk
max_dppclk = find_max_clk_value(clock_table->DppClocks,
clock_table->NumDispClkLevelsEnabled);
} else {
+ /* Invalid number of entries in the table from PMFW. */
ASSERT(0);
}
- if (clock_table->NumFclkLevelsEnabled <= NUM_FCLK_DPM_LEVELS)
- max_fclk = find_max_clk_value(clock_table->FclkClocks_Freq,
- clock_table->NumFclkLevelsEnabled);
- for (i = 0; i < clock_table->NumMemPstatesEnabled; i++) {
- uint32_t min_uclk = clock_table->MemPstateTable[0].UClk;
- int j;
+ /* Base the clock table on dcfclk, need at least one entry regardless of pmfw table */
+ ASSERT(clock_table->NumDcfClkLevelsEnabled > 0);
- for (j = 1; j < clock_table->NumMemPstatesEnabled; j++) {
- if (is_valid_clock_value(clock_table->MemPstateTable[j].UClk) &&
- clock_table->MemPstateTable[j].UClk < min_uclk &&
- clock_table->MemPstateTable[j].Voltage <= clock_table->SocVoltage[i]) {
- min_uclk = clock_table->MemPstateTable[j].UClk;
- min_pstate = j;
- }
- }
+ max_fclk = find_max_clk_value(clock_table->FclkClocks_Freq, clock_table->NumFclkLevelsEnabled);
+ for (i = 0; i < clock_table->NumDcfClkLevelsEnabled; i++) {
+ int j;
+
+ /* First search defaults for the clocks we don't read using closest lower or equal default dcfclk */
for (j = bw_params->clk_table.num_entries - 1; j > 0; j--)
if (bw_params->clk_table.entries[j].dcfclk_mhz <= clock_table->DcfClocks[i])
- break;
+ break;
bw_params->clk_table.entries[i].phyclk_mhz = bw_params->clk_table.entries[j].phyclk_mhz;
bw_params->clk_table.entries[i].phyclk_d18_mhz = bw_params->clk_table.entries[j].phyclk_d18_mhz;
bw_params->clk_table.entries[i].dtbclk_mhz = bw_params->clk_table.entries[j].dtbclk_mhz;
- bw_params->clk_table.entries[i].fclk_mhz = max_fclk;
+
+ /* Now update clocks we do read */
bw_params->clk_table.entries[i].memclk_mhz = clock_table->MemPstateTable[min_pstate].MemClk;
bw_params->clk_table.entries[i].voltage = clock_table->MemPstateTable[min_pstate].Voltage;
bw_params->clk_table.entries[i].dcfclk_mhz = clock_table->DcfClocks[i];
bw_params->clk_table.entries[i].socclk_mhz = clock_table->SocClocks[i];
bw_params->clk_table.entries[i].dispclk_mhz = max_dispclk;
bw_params->clk_table.entries[i].dppclk_mhz = max_dppclk;
- bw_params->clk_table.entries[i].wck_ratio = convert_wck_ratio(
- clock_table->MemPstateTable[min_pstate].WckRatio);
- }
+ bw_params->clk_table.entries[i].wck_ratio =
+ convert_wck_ratio(clock_table->MemPstateTable[min_pstate].WckRatio);
+
+ /* Dcfclk and Fclk are tied, but at a different ratio */
+ bw_params->clk_table.entries[i].fclk_mhz = min(max_fclk, 2 * clock_table->DcfClocks[i]);
+ }
/* Make sure to include at least one entry at highest pstate */
if (max_pstate != min_pstate || i == 0) {
if (i > MAX_NUM_DPM_LVL - 1)
i = MAX_NUM_DPM_LVL - 1;
+
bw_params->clk_table.entries[i].fclk_mhz = max_fclk;
bw_params->clk_table.entries[i].memclk_mhz = clock_table->MemPstateTable[max_pstate].MemClk;
bw_params->clk_table.entries[i].voltage = clock_table->MemPstateTable[max_pstate].Voltage;
@@ -739,6 +754,7 @@ static void dcn35_clk_mgr_helper_populate_bw_params(struct clk_mgr_internal *clk
}
bw_params->clk_table.num_entries = i--;
+ /* Make sure all highest clocks are included*/
bw_params->clk_table.entries[i].socclk_mhz =
find_max_clk_value(clock_table->SocClocks, NUM_SOCCLK_DPM_LEVELS);
bw_params->clk_table.entries[i].dispclk_mhz =
@@ -757,6 +773,11 @@ static void dcn35_clk_mgr_helper_populate_bw_params(struct clk_mgr_internal *clk
bw_params->clk_table.num_entries_per_clk.num_fclk_levels = clock_table->NumFclkLevelsEnabled;
bw_params->clk_table.num_entries_per_clk.num_memclk_levels = clock_table->NumMemPstatesEnabled;
bw_params->clk_table.num_entries_per_clk.num_socclk_levels = clock_table->NumSocClkLevelsEnabled;
+
+ /*
+ * Set any 0 clocks to max default setting. Not an issue for
+ * power since we aren't doing switching in such case anyway
+ */
for (i = 0; i < bw_params->clk_table.num_entries; i++) {
if (!bw_params->clk_table.entries[i].fclk_mhz) {
bw_params->clk_table.entries[i].fclk_mhz = def_max.fclk_mhz;
@@ -965,21 +986,6 @@ struct clk_mgr_funcs dcn35_fpga_funcs = {
.get_dtb_ref_clk_frequency = dcn31_get_dtb_ref_freq_khz,
};
-static void dcn35_read_ss_info_from_lut(struct clk_mgr_internal *clk_mgr)
-{
- uint32_t clock_source;
- struct dc_context *ctx = clk_mgr->base.ctx;
-
- REG_GET(CLK1_CLK2_BYPASS_CNTL, CLK2_BYPASS_SEL, &clock_source);
-
- clk_mgr->dprefclk_ss_percentage = ss_info_table.ss_percentage[clock_source];
-
- if (clk_mgr->dprefclk_ss_percentage != 0) {
- clk_mgr->ss_on_dprefclk = true;
- clk_mgr->dprefclk_ss_divider = ss_info_table.ss_divider;
- }
-}
-
void dcn35_clk_mgr_construct(
struct dc_context *ctx,
struct clk_mgr_dcn35 *clk_mgr,
@@ -1043,17 +1049,11 @@ void dcn35_clk_mgr_construct(
dcn35_dump_clk_registers(&clk_mgr->base.base.boot_snapshot, &clk_mgr->base.base, &log_info);
clk_mgr->base.base.dprefclk_khz = dcn35_smu_get_dprefclk(&clk_mgr->base);
- clk_mgr->base.base.clks.ref_dtbclk_khz = dcn35_smu_get_dtbclk(&clk_mgr->base);
+ clk_mgr->base.base.clks.ref_dtbclk_khz = 600000;
- if (!clk_mgr->base.base.clks.ref_dtbclk_khz)
- dcn35_smu_set_dtbclk(&clk_mgr->base, true);
-
- clk_mgr->base.base.clks.dtbclk_en = true;
dce_clock_read_ss_info(&clk_mgr->base);
/*when clk src is from FCH, it could have ss, same clock src as DPREF clk*/
- dcn35_read_ss_info_from_lut(&clk_mgr->base);
-
clk_mgr->base.base.bw_params = &dcn35_bw_params;
if (clk_mgr->base.base.ctx->dc->debug.pstate_enabled) {
@@ -1129,7 +1129,6 @@ void dcn35_clk_mgr_construct(
ctx->dc->debug.disable_dpp_power_gate = false;
ctx->dc->debug.disable_hubp_power_gate = false;
ctx->dc->debug.disable_dsc_power_gate = false;
- ctx->dc->debug.disable_hpo_power_gate = false;
} else {
/*let's reset the config control flag*/
ctx->dc->config.disable_ips = DMUB_IPS_DISABLE_ALL; /*pmfw not support it, disable it all*/
diff --git a/drivers/gpu/drm/amd/display/dc/dc.h b/drivers/gpu/drm/amd/display/dc/dc.h
index 9316b737a8ba..2cafd644baff 100644
--- a/drivers/gpu/drm/amd/display/dc/dc.h
+++ b/drivers/gpu/drm/amd/display/dc/dc.h
@@ -874,6 +874,7 @@ struct dc_debug_options {
unsigned int seamless_boot_odm_combine;
unsigned int force_odm_combine_4to1; //bit vector based on otg inst
int minimum_z8_residency_time;
+ int minimum_z10_residency_time;
bool disable_z9_mpc;
unsigned int force_fclk_khz;
bool enable_tri_buf;
@@ -1608,7 +1609,6 @@ struct dc_link {
enum edp_revision edp_revision;
union dpcd_sink_ext_caps dpcd_sink_ext_caps;
- struct backlight_settings backlight_settings;
struct psr_settings psr_settings;
struct replay_settings replay_settings;
diff --git a/drivers/gpu/drm/amd/display/dc/dc_types.h b/drivers/gpu/drm/amd/display/dc/dc_types.h
index fcb825e4f1bb..35d146217aef 100644
--- a/drivers/gpu/drm/amd/display/dc/dc_types.h
+++ b/drivers/gpu/drm/amd/display/dc/dc_types.h
@@ -991,10 +991,6 @@ struct link_mst_stream_allocation_table {
struct link_mst_stream_allocation stream_allocations[MAX_CONTROLLER_NUM];
};
-struct backlight_settings {
- uint32_t backlight_millinits;
-};
-
/* PSR feature flags */
struct psr_settings {
bool psr_feature_enabled; // PSR is supported by sink
diff --git a/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_resource.c b/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_resource.c
index 677361d74a4e..c97391edb5ff 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_resource.c
@@ -871,7 +871,7 @@ static const struct dc_plane_cap plane_cap = {
static const struct dc_debug_options debug_defaults_drv = {
.disable_z10 = false,
.enable_z9_disable_interface = true,
- .minimum_z8_residency_time = 2000,
+ .minimum_z8_residency_time = 2100,
.psr_skip_crtc_disable = true,
.replay_skip_crtc_disabled = true,
.disable_dmcu = true,
diff --git a/drivers/gpu/drm/amd/display/dc/dcn35/dcn35_pg_cntl.c b/drivers/gpu/drm/amd/display/dc/dcn35/dcn35_pg_cntl.c
index 46f71ff08fd1..d19db8e9b8a5 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn35/dcn35_pg_cntl.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn35/dcn35_pg_cntl.c
@@ -261,6 +261,7 @@ void pg_cntl35_hpo_pg_control(struct pg_cntl *pg_cntl, bool power_on)
uint32_t power_gate = power_on ? 0 : 1;
uint32_t pwr_status = power_on ? 0 : 2;
uint32_t org_ip_request_cntl;
+ uint32_t power_forceon;
bool block_enabled;
if (pg_cntl->ctx->dc->debug.ignore_pg ||
@@ -277,6 +278,10 @@ void pg_cntl35_hpo_pg_control(struct pg_cntl *pg_cntl, bool power_on)
return;
}
+ REG_GET(DOMAIN25_PG_CONFIG, DOMAIN_POWER_FORCEON, &power_forceon);
+ if (power_forceon)
+ return;
+
REG_GET(DC_IP_REQUEST_CNTL, IP_REQUEST_EN, &org_ip_request_cntl);
if (org_ip_request_cntl == 0)
REG_SET(DC_IP_REQUEST_CNTL, 0, IP_REQUEST_EN, 1);
@@ -304,6 +309,7 @@ void pg_cntl35_io_clk_pg_control(struct pg_cntl *pg_cntl, bool power_on)
uint32_t power_gate = power_on ? 0 : 1;
uint32_t pwr_status = power_on ? 0 : 2;
uint32_t org_ip_request_cntl;
+ uint32_t power_forceon;
bool block_enabled;
if (pg_cntl->ctx->dc->debug.ignore_pg ||
@@ -319,6 +325,10 @@ void pg_cntl35_io_clk_pg_control(struct pg_cntl *pg_cntl, bool power_on)
return;
}
+ REG_GET(DOMAIN22_PG_CONFIG, DOMAIN_POWER_FORCEON, &power_forceon);
+ if (power_forceon)
+ return;
+
REG_GET(DC_IP_REQUEST_CNTL, IP_REQUEST_EN, &org_ip_request_cntl);
if (org_ip_request_cntl == 0)
REG_SET(DC_IP_REQUEST_CNTL, 0, IP_REQUEST_EN, 1);
diff --git a/drivers/gpu/drm/amd/display/dc/dcn35/dcn35_resource.c b/drivers/gpu/drm/amd/display/dc/dcn35/dcn35_resource.c
index c7e011d26d41..70ef1e7ff841 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn35/dcn35_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn35/dcn35_resource.c
@@ -1712,6 +1712,13 @@ static bool dcn35_validate_bandwidth(struct dc *dc,
out = dml2_validate(dc, context, fast_validate);
+ if (fast_validate)
+ return out;
+
+ DC_FP_START();
+ dcn35_decide_zstate_support(dc, context);
+ DC_FP_END();
+
return out;
}
@@ -1857,7 +1864,7 @@ static bool dcn35_resource_construct(
/* Use pipe context based otg sync logic */
dc->config.use_pipe_ctx_sync_logic = true;
- dc->config.use_default_clock_table = false;
+
/* read VBIOS LTTPR caps */
{
if (ctx->dc_bios->funcs->get_lttpr_caps) {
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dc_features.h b/drivers/gpu/drm/amd/display/dc/dml/dc_features.h
index 2cbdd75429ff..6e669a2c5b2d 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/dc_features.h
+++ b/drivers/gpu/drm/amd/display/dc/dml/dc_features.h
@@ -36,7 +36,7 @@
* Define the maximum amount of states supported by the ASIC. Every ASIC has a
* specific number of states; this macro defines the maximum number of states.
*/
-#define DC__VOLTAGE_STATES 20
+#define DC__VOLTAGE_STATES 40
#define DC__NUM_DPP__4 1
#define DC__NUM_DPP__0_PRESENT 1
#define DC__NUM_DPP__1_PRESENT 1
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn20/dcn20_fpu.c b/drivers/gpu/drm/amd/display/dc/dml/dcn20/dcn20_fpu.c
index 7fc8b18096ba..ec77b2b41ba3 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/dcn20/dcn20_fpu.c
+++ b/drivers/gpu/drm/amd/display/dc/dml/dcn20/dcn20_fpu.c
@@ -950,10 +950,8 @@ static enum dcn_zstate_support_state decide_zstate_support(struct dc *dc, struc
{
int plane_count;
int i;
- unsigned int min_dst_y_next_start_us;
plane_count = 0;
- min_dst_y_next_start_us = 0;
for (i = 0; i < dc->res_pool->pipe_count; i++) {
if (context->res_ctx.pipe_ctx[i].plane_state)
plane_count++;
@@ -975,26 +973,15 @@ static enum dcn_zstate_support_state decide_zstate_support(struct dc *dc, struc
else if (context->stream_count == 1 && context->streams[0]->signal == SIGNAL_TYPE_EDP) {
struct dc_link *link = context->streams[0]->sink->link;
struct dc_stream_status *stream_status = &context->stream_status[0];
- struct dc_stream_state *current_stream = context->streams[0];
int minmum_z8_residency = dc->debug.minimum_z8_residency_time > 0 ? dc->debug.minimum_z8_residency_time : 1000;
bool allow_z8 = context->bw_ctx.dml.vba.StutterPeriod > (double)minmum_z8_residency;
bool is_pwrseq0 = link->link_index == 0;
- bool isFreesyncVideo;
-
- isFreesyncVideo = current_stream->adjust.v_total_min == current_stream->adjust.v_total_max;
- isFreesyncVideo = isFreesyncVideo && current_stream->timing.v_total < current_stream->adjust.v_total_min;
- for (i = 0; i < dc->res_pool->pipe_count; i++) {
- if (context->res_ctx.pipe_ctx[i].stream == current_stream && isFreesyncVideo) {
- min_dst_y_next_start_us = context->res_ctx.pipe_ctx[i].dlg_regs.min_dst_y_next_start_us;
- break;
- }
- }
/* Don't support multi-plane configurations */
if (stream_status->plane_count > 1)
return DCN_ZSTATE_SUPPORT_DISALLOW;
- if (is_pwrseq0 && (context->bw_ctx.dml.vba.StutterPeriod > 5000.0 || min_dst_y_next_start_us > 5000))
+ if (is_pwrseq0 && context->bw_ctx.dml.vba.StutterPeriod > 5000.0)
return DCN_ZSTATE_SUPPORT_ALLOW;
else if (is_pwrseq0 && link->psr_settings.psr_version == DC_PSR_VERSION_1 && !link->panel_config.psr.disable_psr)
return allow_z8 ? DCN_ZSTATE_SUPPORT_ALLOW_Z8_Z10_ONLY : DCN_ZSTATE_SUPPORT_ALLOW_Z10_ONLY;
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.c b/drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.c
index 9ec4172d1c2d..b46cde525066 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.c
+++ b/drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.c
@@ -1192,13 +1192,16 @@ static bool update_pipe_slice_table_with_split_flags(
*/
struct pipe_ctx *pipe;
bool odm;
- int i;
+ int dc_pipe_idx, dml_pipe_idx = 0;
bool updated = false;
- for (i = 0; i < dc->res_pool->pipe_count; i++) {
- pipe = &context->res_ctx.pipe_ctx[i];
+ for (dc_pipe_idx = 0;
+ dc_pipe_idx < dc->res_pool->pipe_count; dc_pipe_idx++) {
+ pipe = &context->res_ctx.pipe_ctx[dc_pipe_idx];
+ if (resource_is_pipe_type(pipe, FREE_PIPE))
+ continue;
- if (merge[i]) {
+ if (merge[dc_pipe_idx]) {
if (resource_is_pipe_type(pipe, OPP_HEAD))
/* merging OPP head means reducing ODM slice
* count by 1
@@ -1213,17 +1216,18 @@ static bool update_pipe_slice_table_with_split_flags(
updated = true;
}
- if (split[i]) {
- odm = vba->ODMCombineEnabled[vba->pipe_plane[i]] !=
+ if (split[dc_pipe_idx]) {
+ odm = vba->ODMCombineEnabled[vba->pipe_plane[dml_pipe_idx]] !=
dm_odm_combine_mode_disabled;
if (odm && resource_is_pipe_type(pipe, OPP_HEAD))
update_slice_table_for_stream(
- table, pipe->stream, split[i] - 1);
+ table, pipe->stream, split[dc_pipe_idx] - 1);
else if (!odm && resource_is_pipe_type(pipe, DPP_PIPE))
update_slice_table_for_plane(table, pipe,
- pipe->plane_state, split[i] - 1);
+ pipe->plane_state, split[dc_pipe_idx] - 1);
updated = true;
}
+ dml_pipe_idx++;
}
return updated;
}
@@ -2231,6 +2235,7 @@ void dcn32_calculate_wm_and_dlg_fpu(struct dc *dc, struct dc_state *context,
int i, pipe_idx, vlevel_temp = 0;
double dcfclk = dcn3_2_soc.clock_limits[0].dcfclk_mhz;
double dcfclk_from_validation = context->bw_ctx.dml.vba.DCFCLKState[vlevel][context->bw_ctx.dml.vba.maxMpcComb];
+ double dram_speed_from_validation = context->bw_ctx.dml.vba.DRAMSpeed;
double dcfclk_from_fw_based_mclk_switching = dcfclk_from_validation;
bool pstate_en = context->bw_ctx.dml.vba.DRAMClockChangeSupport[vlevel][context->bw_ctx.dml.vba.maxMpcComb] !=
dm_dram_clock_change_unsupported;
@@ -2418,7 +2423,7 @@ void dcn32_calculate_wm_and_dlg_fpu(struct dc *dc, struct dc_state *context,
}
if (dc->clk_mgr->bw_params->wm_table.nv_entries[WM_C].valid) {
- min_dram_speed_mts = context->bw_ctx.dml.vba.DRAMSpeed;
+ min_dram_speed_mts = dram_speed_from_validation;
min_dram_speed_mts_margin = 160;
context->bw_ctx.dml.soc.dram_clock_change_latency_us =
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn35/dcn35_fpu.c b/drivers/gpu/drm/amd/display/dc/dml/dcn35/dcn35_fpu.c
index a5fe523668e9..39cf1ae3a3e1 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/dcn35/dcn35_fpu.c
+++ b/drivers/gpu/drm/amd/display/dc/dml/dcn35/dcn35_fpu.c
@@ -164,10 +164,10 @@ struct _vcs_dpi_soc_bounding_box_st dcn3_5_soc = {
},
},
.num_states = 5,
- .sr_exit_time_us = 9.0,
- .sr_enter_plus_exit_time_us = 11.0,
- .sr_exit_z8_time_us = 50.0, /*changed from 442.0*/
- .sr_enter_plus_exit_z8_time_us = 50.0,/*changed from 560.0*/
+ .sr_exit_time_us = 14.0,
+ .sr_enter_plus_exit_time_us = 16.0,
+ .sr_exit_z8_time_us = 525.0,
+ .sr_enter_plus_exit_z8_time_us = 715.0,
.fclk_change_latency_us = 20.0,
.usr_retraining_latency_us = 2,
.writeback_latency_us = 12.0,
@@ -329,6 +329,48 @@ void dcn35_update_bw_bounding_box_fpu(struct dc *dc,
/*temp till dml2 fully work without dml1*/
dml_init_instance(&dc->dml, &dcn3_5_soc, &dcn3_5_ip,
DML_PROJECT_DCN31);
+
+ /*copy to dml2, before dml2_create*/
+ if (clk_table->num_entries > 2) {
+
+ for (i = 0; i < clk_table->num_entries; i++) {
+ dc->dml2_options.bbox_overrides.clks_table.num_states =
+ clk_table->num_entries;
+ dc->dml2_options.bbox_overrides.clks_table.clk_entries[i].dcfclk_mhz =
+ clock_limits[i].dcfclk_mhz;
+ dc->dml2_options.bbox_overrides.clks_table.clk_entries[i].fclk_mhz =
+ clock_limits[i].fabricclk_mhz;
+ dc->dml2_options.bbox_overrides.clks_table.clk_entries[i].dispclk_mhz =
+ clock_limits[i].dispclk_mhz;
+ dc->dml2_options.bbox_overrides.clks_table.clk_entries[i].dppclk_mhz =
+ clock_limits[i].dppclk_mhz;
+ dc->dml2_options.bbox_overrides.clks_table.clk_entries[i].socclk_mhz =
+ clock_limits[i].socclk_mhz;
+ dc->dml2_options.bbox_overrides.clks_table.clk_entries[i].memclk_mhz =
+ clk_table->entries[i].memclk_mhz * clk_table->entries[i].wck_ratio;
+ dc->dml2_options.bbox_overrides.clks_table.num_entries_per_clk.num_dcfclk_levels =
+ clk_table->num_entries;
+ dc->dml2_options.bbox_overrides.clks_table.num_entries_per_clk.num_fclk_levels =
+ clk_table->num_entries;
+ dc->dml2_options.bbox_overrides.clks_table.num_entries_per_clk.num_dispclk_levels =
+ clk_table->num_entries;
+ dc->dml2_options.bbox_overrides.clks_table.num_entries_per_clk.num_dppclk_levels =
+ clk_table->num_entries;
+ dc->dml2_options.bbox_overrides.clks_table.num_entries_per_clk.num_socclk_levels =
+ clk_table->num_entries;
+ dc->dml2_options.bbox_overrides.clks_table.num_entries_per_clk.num_memclk_levels =
+ clk_table->num_entries;
+ }
+ }
+
+ /* Update latency values */
+ dc->dml2_options.bbox_overrides.dram_clock_change_latency_us = dcn3_5_soc.dram_clock_change_latency_us;
+
+ dc->dml2_options.bbox_overrides.sr_exit_latency_us = dcn3_5_soc.sr_exit_time_us;
+ dc->dml2_options.bbox_overrides.sr_enter_plus_exit_latency_us = dcn3_5_soc.sr_enter_plus_exit_time_us;
+
+ dc->dml2_options.bbox_overrides.sr_exit_z8_time_us = dcn3_5_soc.sr_exit_z8_time_us;
+ dc->dml2_options.bbox_overrides.sr_enter_plus_exit_z8_time_us = dcn3_5_soc.sr_enter_plus_exit_z8_time_us;
}
static bool is_dual_plane(enum surface_pixel_format format)
@@ -507,3 +549,37 @@ int dcn35_populate_dml_pipes_from_context_fpu(struct dc *dc,
return pipe_cnt;
}
+
+void dcn35_decide_zstate_support(struct dc *dc, struct dc_state *context)
+{
+ enum dcn_zstate_support_state support = DCN_ZSTATE_SUPPORT_DISALLOW;
+ unsigned int i, plane_count = 0;
+
+ for (i = 0; i < dc->res_pool->pipe_count; i++) {
+ if (context->res_ctx.pipe_ctx[i].plane_state)
+ plane_count++;
+ }
+
+ if (plane_count == 0) {
+ support = DCN_ZSTATE_SUPPORT_ALLOW;
+ } else if (plane_count == 1 && context->stream_count == 1 && context->streams[0]->signal == SIGNAL_TYPE_EDP) {
+ struct dc_link *link = context->streams[0]->sink->link;
+ bool is_pwrseq0 = link && link->link_index == 0;
+ bool is_psr1 = link && link->psr_settings.psr_version == DC_PSR_VERSION_1 && !link->panel_config.psr.disable_psr;
+ int minmum_z8_residency =
+ dc->debug.minimum_z8_residency_time > 0 ? dc->debug.minimum_z8_residency_time : 1000;
+ bool allow_z8 = context->bw_ctx.dml.vba.StutterPeriod > (double)minmum_z8_residency;
+ int minmum_z10_residency =
+ dc->debug.minimum_z10_residency_time > 0 ? dc->debug.minimum_z10_residency_time : 5000;
+ bool allow_z10 = context->bw_ctx.dml.vba.StutterPeriod > (double)minmum_z10_residency;
+
+ if (is_pwrseq0 && allow_z10)
+ support = DCN_ZSTATE_SUPPORT_ALLOW;
+ else if (is_pwrseq0 && is_psr1)
+ support = allow_z8 ? DCN_ZSTATE_SUPPORT_ALLOW_Z8_Z10_ONLY : DCN_ZSTATE_SUPPORT_ALLOW_Z10_ONLY;
+ else if (allow_z8)
+ support = DCN_ZSTATE_SUPPORT_ALLOW_Z8_ONLY;
+ }
+
+ context->bw_ctx.bw.dcn.clk.zstate_support = support;
+}
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn35/dcn35_fpu.h b/drivers/gpu/drm/amd/display/dc/dml/dcn35/dcn35_fpu.h
index e8d5a170893e..067480fc3691 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/dcn35/dcn35_fpu.h
+++ b/drivers/gpu/drm/amd/display/dc/dml/dcn35/dcn35_fpu.h
@@ -39,4 +39,6 @@ int dcn35_populate_dml_pipes_from_context_fpu(struct dc *dc,
display_e2e_pipe_params_st *pipes,
bool fast_validate);
+void dcn35_decide_zstate_support(struct dc *dc, struct dc_state *context);
+
#endif
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/display_mode_core.c b/drivers/gpu/drm/amd/display/dc/dml2/display_mode_core.c
index 510be909cd75..59718ee33e51 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2/display_mode_core.c
+++ b/drivers/gpu/drm/amd/display/dc/dml2/display_mode_core.c
@@ -6329,7 +6329,7 @@ static void dml_prefetch_check(struct display_mode_lib_st *mode_lib)
mode_lib->ms.NoOfDPPThisState,
mode_lib->ms.dpte_group_bytes,
s->HostVMInefficiencyFactor,
- mode_lib->ms.soc.hostvm_min_page_size_kbytes,
+ mode_lib->ms.soc.hostvm_min_page_size_kbytes * 1024,
mode_lib->ms.cache_display_cfg.plane.HostVMMaxPageTableLevels);
s->NextMaxVStartup = s->MaxVStartupAllPlanes[j];
@@ -6542,7 +6542,7 @@ static void dml_prefetch_check(struct display_mode_lib_st *mode_lib)
mode_lib->ms.cache_display_cfg.plane.HostVMEnable,
mode_lib->ms.cache_display_cfg.plane.HostVMMaxPageTableLevels,
mode_lib->ms.cache_display_cfg.plane.GPUVMEnable,
- mode_lib->ms.soc.hostvm_min_page_size_kbytes,
+ mode_lib->ms.soc.hostvm_min_page_size_kbytes * 1024,
mode_lib->ms.PDEAndMetaPTEBytesPerFrame[j][k],
mode_lib->ms.MetaRowBytes[j][k],
mode_lib->ms.DPTEBytesPerRow[j][k],
@@ -7687,7 +7687,7 @@ dml_bool_t dml_core_mode_support(struct display_mode_lib_st *mode_lib)
CalculateVMRowAndSwath_params->HostVMMaxNonCachedPageTableLevels = mode_lib->ms.cache_display_cfg.plane.HostVMMaxPageTableLevels;
CalculateVMRowAndSwath_params->GPUVMMaxPageTableLevels = mode_lib->ms.cache_display_cfg.plane.GPUVMMaxPageTableLevels;
CalculateVMRowAndSwath_params->GPUVMMinPageSizeKBytes = mode_lib->ms.cache_display_cfg.plane.GPUVMMinPageSizeKBytes;
- CalculateVMRowAndSwath_params->HostVMMinPageSize = mode_lib->ms.soc.hostvm_min_page_size_kbytes;
+ CalculateVMRowAndSwath_params->HostVMMinPageSize = mode_lib->ms.soc.hostvm_min_page_size_kbytes * 1024;
CalculateVMRowAndSwath_params->PTEBufferModeOverrideEn = mode_lib->ms.cache_display_cfg.plane.PTEBufferModeOverrideEn;
CalculateVMRowAndSwath_params->PTEBufferModeOverrideVal = mode_lib->ms.cache_display_cfg.plane.PTEBufferMode;
CalculateVMRowAndSwath_params->PTEBufferSizeNotExceeded = mode_lib->ms.PTEBufferSizeNotExceededPerState;
@@ -7957,7 +7957,7 @@ dml_bool_t dml_core_mode_support(struct display_mode_lib_st *mode_lib)
UseMinimumDCFCLK_params->GPUVMMaxPageTableLevels = mode_lib->ms.cache_display_cfg.plane.GPUVMMaxPageTableLevels;
UseMinimumDCFCLK_params->HostVMEnable = mode_lib->ms.cache_display_cfg.plane.HostVMEnable;
UseMinimumDCFCLK_params->NumberOfActiveSurfaces = mode_lib->ms.num_active_planes;
- UseMinimumDCFCLK_params->HostVMMinPageSize = mode_lib->ms.soc.hostvm_min_page_size_kbytes;
+ UseMinimumDCFCLK_params->HostVMMinPageSize = mode_lib->ms.soc.hostvm_min_page_size_kbytes * 1024;
UseMinimumDCFCLK_params->HostVMMaxNonCachedPageTableLevels = mode_lib->ms.cache_display_cfg.plane.HostVMMaxPageTableLevels;
UseMinimumDCFCLK_params->DynamicMetadataVMEnabled = mode_lib->ms.ip.dynamic_metadata_vm_enabled;
UseMinimumDCFCLK_params->ImmediateFlipRequirement = s->ImmediateFlipRequiredFinal;
@@ -8699,7 +8699,7 @@ void dml_core_mode_programming(struct display_mode_lib_st *mode_lib, const struc
CalculateVMRowAndSwath_params->HostVMMaxNonCachedPageTableLevels = mode_lib->ms.cache_display_cfg.plane.HostVMMaxPageTableLevels;
CalculateVMRowAndSwath_params->GPUVMMaxPageTableLevels = mode_lib->ms.cache_display_cfg.plane.GPUVMMaxPageTableLevels;
CalculateVMRowAndSwath_params->GPUVMMinPageSizeKBytes = mode_lib->ms.cache_display_cfg.plane.GPUVMMinPageSizeKBytes;
- CalculateVMRowAndSwath_params->HostVMMinPageSize = mode_lib->ms.soc.hostvm_min_page_size_kbytes;
+ CalculateVMRowAndSwath_params->HostVMMinPageSize = mode_lib->ms.soc.hostvm_min_page_size_kbytes * 1024;
CalculateVMRowAndSwath_params->PTEBufferModeOverrideEn = mode_lib->ms.cache_display_cfg.plane.PTEBufferModeOverrideEn;
CalculateVMRowAndSwath_params->PTEBufferModeOverrideVal = mode_lib->ms.cache_display_cfg.plane.PTEBufferMode;
CalculateVMRowAndSwath_params->PTEBufferSizeNotExceeded = s->dummy_boolean_array[0];
@@ -8805,7 +8805,7 @@ void dml_core_mode_programming(struct display_mode_lib_st *mode_lib, const struc
mode_lib->ms.cache_display_cfg.hw.DPPPerSurface,
locals->dpte_group_bytes,
s->HostVMInefficiencyFactor,
- mode_lib->ms.soc.hostvm_min_page_size_kbytes,
+ mode_lib->ms.soc.hostvm_min_page_size_kbytes * 1024,
mode_lib->ms.cache_display_cfg.plane.HostVMMaxPageTableLevels);
locals->TCalc = 24.0 / locals->DCFCLKDeepSleep;
@@ -8995,7 +8995,7 @@ void dml_core_mode_programming(struct display_mode_lib_st *mode_lib, const struc
CalculatePrefetchSchedule_params->GPUVMEnable = mode_lib->ms.cache_display_cfg.plane.GPUVMEnable;
CalculatePrefetchSchedule_params->HostVMEnable = mode_lib->ms.cache_display_cfg.plane.HostVMEnable;
CalculatePrefetchSchedule_params->HostVMMaxNonCachedPageTableLevels = mode_lib->ms.cache_display_cfg.plane.HostVMMaxPageTableLevels;
- CalculatePrefetchSchedule_params->HostVMMinPageSize = mode_lib->ms.soc.hostvm_min_page_size_kbytes;
+ CalculatePrefetchSchedule_params->HostVMMinPageSize = mode_lib->ms.soc.hostvm_min_page_size_kbytes * 1024;
CalculatePrefetchSchedule_params->DynamicMetadataEnable = mode_lib->ms.cache_display_cfg.plane.DynamicMetadataEnable[k];
CalculatePrefetchSchedule_params->DynamicMetadataVMEnabled = mode_lib->ms.ip.dynamic_metadata_vm_enabled;
CalculatePrefetchSchedule_params->DynamicMetadataLinesBeforeActiveRequired = mode_lib->ms.cache_display_cfg.plane.DynamicMetadataLinesBeforeActiveRequired[k];
@@ -9240,7 +9240,7 @@ void dml_core_mode_programming(struct display_mode_lib_st *mode_lib, const struc
mode_lib->ms.cache_display_cfg.plane.HostVMEnable,
mode_lib->ms.cache_display_cfg.plane.HostVMMaxPageTableLevels,
mode_lib->ms.cache_display_cfg.plane.GPUVMEnable,
- mode_lib->ms.soc.hostvm_min_page_size_kbytes,
+ mode_lib->ms.soc.hostvm_min_page_size_kbytes * 1024,
locals->PDEAndMetaPTEBytesFrame[k],
locals->MetaRowByte[k],
locals->PixelPTEBytesPerRow[k],
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml2_translation_helper.c b/drivers/gpu/drm/amd/display/dc/dml2/dml2_translation_helper.c
index 75171bee6f71..fa8fe5bf7e57 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2/dml2_translation_helper.c
+++ b/drivers/gpu/drm/amd/display/dc/dml2/dml2_translation_helper.c
@@ -341,25 +341,42 @@ void dml2_init_soc_states(struct dml2_context *dml2, const struct dc *in_dc,
break;
}
- /* Override from passed values, mainly for debugging purposes, if available */
- if (dml2->config.bbox_overrides.sr_exit_latency_us) {
- p->in_states->state_array[0].sr_exit_time_us = dml2->config.bbox_overrides.sr_exit_latency_us;
- }
+ /* Override from passed values, if available */
+ for (i = 0; i < p->in_states->num_states; i++) {
+ if (dml2->config.bbox_overrides.sr_exit_latency_us) {
+ p->in_states->state_array[i].sr_exit_time_us =
+ dml2->config.bbox_overrides.sr_exit_latency_us;
+ }
- if (dml2->config.bbox_overrides.sr_enter_plus_exit_latency_us) {
- p->in_states->state_array[0].sr_enter_plus_exit_time_us = dml2->config.bbox_overrides.sr_enter_plus_exit_latency_us;
- }
+ if (dml2->config.bbox_overrides.sr_enter_plus_exit_latency_us) {
+ p->in_states->state_array[i].sr_enter_plus_exit_time_us =
+ dml2->config.bbox_overrides.sr_enter_plus_exit_latency_us;
+ }
- if (dml2->config.bbox_overrides.urgent_latency_us) {
- p->in_states->state_array[0].urgent_latency_pixel_data_only_us = dml2->config.bbox_overrides.urgent_latency_us;
- }
+ if (dml2->config.bbox_overrides.sr_exit_z8_time_us) {
+ p->in_states->state_array[i].sr_exit_z8_time_us =
+ dml2->config.bbox_overrides.sr_exit_z8_time_us;
+ }
- if (dml2->config.bbox_overrides.dram_clock_change_latency_us) {
- p->in_states->state_array[0].dram_clock_change_latency_us = dml2->config.bbox_overrides.dram_clock_change_latency_us;
- }
+ if (dml2->config.bbox_overrides.sr_enter_plus_exit_z8_time_us) {
+ p->in_states->state_array[i].sr_enter_plus_exit_z8_time_us =
+ dml2->config.bbox_overrides.sr_enter_plus_exit_z8_time_us;
+ }
+
+ if (dml2->config.bbox_overrides.urgent_latency_us) {
+ p->in_states->state_array[i].urgent_latency_pixel_data_only_us =
+ dml2->config.bbox_overrides.urgent_latency_us;
+ }
- if (dml2->config.bbox_overrides.fclk_change_latency_us) {
- p->in_states->state_array[0].fclk_change_latency_us = dml2->config.bbox_overrides.fclk_change_latency_us;
+ if (dml2->config.bbox_overrides.dram_clock_change_latency_us) {
+ p->in_states->state_array[i].dram_clock_change_latency_us =
+ dml2->config.bbox_overrides.dram_clock_change_latency_us;
+ }
+
+ if (dml2->config.bbox_overrides.fclk_change_latency_us) {
+ p->in_states->state_array[i].fclk_change_latency_us =
+ dml2->config.bbox_overrides.fclk_change_latency_us;
+ }
}
/* DCFCLK stas values are project specific */
@@ -498,8 +515,8 @@ void dml2_translate_socbb_params(const struct dc *in, struct soc_bounding_box_st
out->do_urgent_latency_adjustment = in_soc_params->do_urgent_latency_adjustment;
out->dram_channel_width_bytes = (dml_uint_t)in_soc_params->dram_channel_width_bytes;
out->fabric_datapath_to_dcn_data_return_bytes = (dml_uint_t)in_soc_params->fabric_datapath_to_dcn_data_return_bytes;
- out->gpuvm_min_page_size_kbytes = in_soc_params->gpuvm_min_page_size_bytes * 1024;
- out->hostvm_min_page_size_kbytes = in_soc_params->hostvm_min_page_size_bytes * 1024;
+ out->gpuvm_min_page_size_kbytes = in_soc_params->gpuvm_min_page_size_bytes / 1024;
+ out->hostvm_min_page_size_kbytes = in_soc_params->hostvm_min_page_size_bytes / 1024;
out->mall_allocated_for_dcn_mbytes = (dml_uint_t)in_soc_params->mall_allocated_for_dcn_mbytes;
out->max_avg_dram_bw_use_normal_percent = in_soc_params->max_avg_dram_bw_use_normal_percent;
out->max_avg_fabric_bw_use_normal_percent = in_soc_params->max_avg_fabric_bw_use_normal_percent;
@@ -1040,9 +1057,12 @@ void map_dc_state_into_dml_display_cfg(struct dml2_context *dml2, struct dc_stat
}
//Generally these are set by referencing our latest BB/IP params in dcn32_resource.c file
- dml_dispcfg->plane.GPUVMEnable = true;
- dml_dispcfg->plane.GPUVMMaxPageTableLevels = 4;
- dml_dispcfg->plane.HostVMEnable = false;
+ dml_dispcfg->plane.GPUVMEnable = dml2->v20.dml_core_ctx.ip.gpuvm_enable;
+ dml_dispcfg->plane.GPUVMMaxPageTableLevels = dml2->v20.dml_core_ctx.ip.gpuvm_max_page_table_levels;
+ dml_dispcfg->plane.HostVMEnable = dml2->v20.dml_core_ctx.ip.hostvm_enable;
+ dml_dispcfg->plane.HostVMMaxPageTableLevels = dml2->v20.dml_core_ctx.ip.hostvm_max_page_table_levels;
+ if (dml2->v20.dml_core_ctx.ip.hostvm_enable)
+ dml2->v20.dml_core_ctx.policy.AllowForPStateChangeOrStutterInVBlankFinal = dml_prefetch_support_uclk_fclk_and_stutter;
dml2_populate_pipe_to_plane_index_mapping(dml2, context);
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml2_wrapper.h b/drivers/gpu/drm/amd/display/dc/dml2/dml2_wrapper.h
index 317f90776d97..fe15baa4bf09 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2/dml2_wrapper.h
+++ b/drivers/gpu/drm/amd/display/dc/dml2/dml2_wrapper.h
@@ -139,6 +139,8 @@ struct dml2_soc_bbox_overrides {
double urgent_latency_us;
double sr_exit_latency_us;
double sr_enter_plus_exit_latency_us;
+ double sr_exit_z8_time_us;
+ double sr_enter_plus_exit_z8_time_us;
double dram_clock_change_latency_us;
double fclk_change_latency_us;
unsigned int dram_num_chan;
diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn32/dcn32_hwseq.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn32/dcn32_hwseq.c
index 6a65af8c36b9..5f7f474ef51c 100644
--- a/drivers/gpu/drm/amd/display/dc/hwss/dcn32/dcn32_hwseq.c
+++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn32/dcn32_hwseq.c
@@ -487,8 +487,7 @@ bool dcn32_set_mcm_luts(
if (plane_state->blend_tf->type == TF_TYPE_HWPWL)
lut_params = &plane_state->blend_tf->pwl;
else if (plane_state->blend_tf->type == TF_TYPE_DISTRIBUTED_POINTS) {
- cm_helper_translate_curve_to_hw_format(plane_state->ctx,
- plane_state->blend_tf,
+ cm3_helper_translate_curve_to_hw_format(plane_state->blend_tf,
&dpp_base->regamma_params, false);
lut_params = &dpp_base->regamma_params;
}
@@ -503,8 +502,7 @@ bool dcn32_set_mcm_luts(
else if (plane_state->in_shaper_func->type == TF_TYPE_DISTRIBUTED_POINTS) {
// TODO: dpp_base replace
ASSERT(false);
- cm_helper_translate_curve_to_hw_format(plane_state->ctx,
- plane_state->in_shaper_func,
+ cm3_helper_translate_curve_to_hw_format(plane_state->in_shaper_func,
&dpp_base->shaper_params, true);
lut_params = &dpp_base->shaper_params;
}
diff --git a/drivers/gpu/drm/amd/display/dc/link/link_detection.c b/drivers/gpu/drm/amd/display/dc/link/link_detection.c
index f2fe523f914f..24153b0df503 100644
--- a/drivers/gpu/drm/amd/display/dc/link/link_detection.c
+++ b/drivers/gpu/drm/amd/display/dc/link/link_detection.c
@@ -879,7 +879,7 @@ static bool detect_link_and_local_sink(struct dc_link *link,
(link->dpcd_sink_ext_caps.bits.oled == 1)) {
dpcd_set_source_specific_data(link);
msleep(post_oui_delay);
- set_cached_brightness_aux(link);
+ set_default_brightness_aux(link);
}
return true;
diff --git a/drivers/gpu/drm/amd/display/dc/link/link_dpms.c b/drivers/gpu/drm/amd/display/dc/link/link_dpms.c
index 34a4a8c0e18c..f8e01ca09d96 100644
--- a/drivers/gpu/drm/amd/display/dc/link/link_dpms.c
+++ b/drivers/gpu/drm/amd/display/dc/link/link_dpms.c
@@ -2142,8 +2142,7 @@ static enum dc_status enable_link_dp(struct dc_state *state,
if (link->dpcd_sink_ext_caps.bits.oled == 1 ||
link->dpcd_sink_ext_caps.bits.sdr_aux_backlight_control == 1 ||
link->dpcd_sink_ext_caps.bits.hdr_aux_backlight_control == 1) {
- set_cached_brightness_aux(link);
-
+ set_default_brightness_aux(link);
if (link->dpcd_sink_ext_caps.bits.oled == 1)
msleep(bl_oled_enable_delay);
edp_backlight_enable_aux(link, true);
diff --git a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training_fixed_vs_pe_retimer.c b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training_fixed_vs_pe_retimer.c
index fd8f6f198146..68096d12f52f 100644
--- a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training_fixed_vs_pe_retimer.c
+++ b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training_fixed_vs_pe_retimer.c
@@ -115,7 +115,7 @@ static enum link_training_result perform_fixed_vs_pe_nontransparent_training_seq
lt_settings->cr_pattern_time = 16000;
/* Fixed VS/PE specific: Toggle link rate */
- apply_toggle_rate_wa = (link->vendor_specific_lttpr_link_rate_wa == target_rate);
+ apply_toggle_rate_wa = ((link->vendor_specific_lttpr_link_rate_wa == target_rate) || (link->vendor_specific_lttpr_link_rate_wa == 0));
target_rate = get_dpcd_link_rate(&lt_settings->link_settings);
toggle_rate = (target_rate == 0x6) ? 0xA : 0x6;
@@ -271,7 +271,7 @@ enum link_training_result dp_perform_fixed_vs_pe_training_sequence_legacy(
/* Vendor specific: Toggle link rate */
toggle_rate = (rate == 0x6) ? 0xA : 0x6;
- if (link->vendor_specific_lttpr_link_rate_wa == rate) {
+ if (link->vendor_specific_lttpr_link_rate_wa == rate || link->vendor_specific_lttpr_link_rate_wa == 0) {
core_link_write_dpcd(
link,
DP_LINK_BW_SET,
@@ -617,7 +617,7 @@ enum link_training_result dp_perform_fixed_vs_pe_training_sequence(
/* Vendor specific: Toggle link rate */
toggle_rate = (rate == 0x6) ? 0xA : 0x6;
- if (link->vendor_specific_lttpr_link_rate_wa == rate) {
+ if (link->vendor_specific_lttpr_link_rate_wa == rate || link->vendor_specific_lttpr_link_rate_wa == 0) {
core_link_write_dpcd(
link,
DP_LINK_BW_SET,
diff --git a/drivers/gpu/drm/amd/display/dc/link/protocols/link_edp_panel_control.c b/drivers/gpu/drm/amd/display/dc/link/protocols/link_edp_panel_control.c
index e32a7974a4bc..996e4ee99023 100644
--- a/drivers/gpu/drm/amd/display/dc/link/protocols/link_edp_panel_control.c
+++ b/drivers/gpu/drm/amd/display/dc/link/protocols/link_edp_panel_control.c
@@ -170,7 +170,6 @@ bool edp_set_backlight_level_nits(struct dc_link *link,
*(uint32_t *)&dpcd_backlight_set.backlight_level_millinits = backlight_millinits;
*(uint16_t *)&dpcd_backlight_set.backlight_transition_time_ms = (uint16_t)transition_time_in_ms;
- link->backlight_settings.backlight_millinits = backlight_millinits;
if (!link->dpcd_caps.panel_luminance_control) {
if (core_link_write_dpcd(link, DP_SOURCE_BACKLIGHT_LEVEL,
@@ -288,9 +287,9 @@ bool set_default_brightness_aux(struct dc_link *link)
if (link && link->dpcd_sink_ext_caps.bits.oled == 1) {
if (!read_default_bl_aux(link, &default_backlight))
default_backlight = 150000;
- // if < 1 nits or > 5000, it might be wrong readback
- if (default_backlight < 1000 || default_backlight > 5000000)
- default_backlight = 150000; //
+ // if > 5000, it might be wrong readback
+ if (default_backlight > 5000000)
+ default_backlight = 150000;
return edp_set_backlight_level_nits(link, true,
default_backlight, 0);
@@ -298,15 +297,6 @@ bool set_default_brightness_aux(struct dc_link *link)
return false;
}
-bool set_cached_brightness_aux(struct dc_link *link)
-{
- if (link->backlight_settings.backlight_millinits)
- return edp_set_backlight_level_nits(link, true,
- link->backlight_settings.backlight_millinits, 0);
- else
- return set_default_brightness_aux(link);
- return false;
-}
bool edp_is_ilr_optimization_enabled(struct dc_link *link)
{
if (link->dpcd_caps.edp_supported_link_rates_count == 0 || !link->panel_config.ilr.optimize_edp_link_rate)
diff --git a/drivers/gpu/drm/amd/display/dc/link/protocols/link_edp_panel_control.h b/drivers/gpu/drm/amd/display/dc/link/protocols/link_edp_panel_control.h
index ebf7deb63d13..a034288ad75d 100644
--- a/drivers/gpu/drm/amd/display/dc/link/protocols/link_edp_panel_control.h
+++ b/drivers/gpu/drm/amd/display/dc/link/protocols/link_edp_panel_control.h
@@ -30,7 +30,6 @@
enum dp_panel_mode dp_get_panel_mode(struct dc_link *link);
void dp_set_panel_mode(struct dc_link *link, enum dp_panel_mode panel_mode);
bool set_default_brightness_aux(struct dc_link *link);
-bool set_cached_brightness_aux(struct dc_link *link);
void edp_panel_backlight_power_on(struct dc_link *link, bool wait_for_hpd);
int edp_get_backlight_level(const struct dc_link *link);
bool edp_get_backlight_level_nits(struct dc_link *link,
diff --git a/drivers/gpu/drm/amd/display/dmub/src/dmub_srv.c b/drivers/gpu/drm/amd/display/dmub/src/dmub_srv.c
index 22fc4ba96def..38360adc53d9 100644
--- a/drivers/gpu/drm/amd/display/dmub/src/dmub_srv.c
+++ b/drivers/gpu/drm/amd/display/dmub/src/dmub_srv.c
@@ -1077,6 +1077,7 @@ enum dmub_status dmub_srv_wait_for_inbox0_ack(struct dmub_srv *dmub, uint32_t ti
ack = dmub->hw_funcs.read_inbox0_ack_register(dmub);
if (ack)
return DMUB_STATUS_OK;
+ udelay(1);
}
return DMUB_STATUS_TIMEOUT;
}
diff --git a/drivers/gpu/drm/amd/include/asic_reg/gc/gc_11_0_0_offset.h b/drivers/gpu/drm/amd/include/asic_reg/gc/gc_11_0_0_offset.h
index c92c4b83253f..4bff1ef8a9a6 100644
--- a/drivers/gpu/drm/amd/include/asic_reg/gc/gc_11_0_0_offset.h
+++ b/drivers/gpu/drm/amd/include/asic_reg/gc/gc_11_0_0_offset.h
@@ -6369,6 +6369,8 @@
#define regTCP_INVALIDATE_BASE_IDX 1
#define regTCP_STATUS 0x19a1
#define regTCP_STATUS_BASE_IDX 1
+#define regTCP_CNTL 0x19a2
+#define regTCP_CNTL_BASE_IDX 1
#define regTCP_CNTL2 0x19a3
#define regTCP_CNTL2_BASE_IDX 1
#define regTCP_DEBUG_INDEX 0x19a5
diff --git a/drivers/gpu/drm/amd/include/asic_reg/nbio/nbio_7_11_0_offset.h b/drivers/gpu/drm/amd/include/asic_reg/nbio/nbio_7_11_0_offset.h
index ff30f04be591..7ee3d291120d 100644
--- a/drivers/gpu/drm/amd/include/asic_reg/nbio/nbio_7_11_0_offset.h
+++ b/drivers/gpu/drm/amd/include/asic_reg/nbio/nbio_7_11_0_offset.h
@@ -781,6 +781,8 @@
#define regBIF_BIF256_CI256_RC3X4_USB4_PCIE_CNTL2_BASE_IDX 5
#define regBIF_BIF256_CI256_RC3X4_USB4_PCIE_TX_POWER_CTRL_1 0x420187
#define regBIF_BIF256_CI256_RC3X4_USB4_PCIE_TX_POWER_CTRL_1_BASE_IDX 5
+#define regBIF_BIF256_CI256_RC3X4_USB4_PCIE_MST_CTRL_3 0x4201c6
+#define regBIF_BIF256_CI256_RC3X4_USB4_PCIE_MST_CTRL_3_BASE_IDX 5
// addressBlock: nbio_nbif0_bif_cfg_dev0_rc_bifcfgdecp
diff --git a/drivers/gpu/drm/amd/include/asic_reg/nbio/nbio_7_11_0_sh_mask.h b/drivers/gpu/drm/amd/include/asic_reg/nbio/nbio_7_11_0_sh_mask.h
index 7f131999a263..eb8c556d9c93 100644
--- a/drivers/gpu/drm/amd/include/asic_reg/nbio/nbio_7_11_0_sh_mask.h
+++ b/drivers/gpu/drm/amd/include/asic_reg/nbio/nbio_7_11_0_sh_mask.h
@@ -24646,6 +24646,35 @@
//BIF_BIF256_CI256_RC3X4_USB4_PCIE_TX_POWER_CTRL_1
#define BIF_BIF256_CI256_RC3X4_USB4_PCIE_TX_POWER_CTRL_1__MST_MEM_LS_EN_MASK 0x00000001L
#define BIF_BIF256_CI256_RC3X4_USB4_PCIE_TX_POWER_CTRL_1__REPLAY_MEM_LS_EN_MASK 0x00000008L
+//BIF_BIF256_CI256_RC3X4_USB4_PCIE_MST_CTRL_3
+#define BIF_BIF256_CI256_RC3X4_USB4_PCIE_MST_CTRL_3__CI_SWUS_MAX_PAYLOAD_SIZE_MODE__SHIFT 0x8
+#define BIF_BIF256_CI256_RC3X4_USB4_PCIE_MST_CTRL_3__CI_SWUS_PRIV_MAX_PAYLOAD_SIZE__SHIFT 0x9
+#define BIF_BIF256_CI256_RC3X4_USB4_PCIE_MST_CTRL_3__CI_10BIT_TAG_EN_OVERRIDE__SHIFT 0xb
+#define BIF_BIF256_CI256_RC3X4_USB4_PCIE_MST_CTRL_3__CI_SWUS_10BIT_TAG_EN_OVERRIDE__SHIFT 0xd
+#define BIF_BIF256_CI256_RC3X4_USB4_PCIE_MST_CTRL_3__MST_DROP_SYNC_FLOOD_EN__SHIFT 0xf
+#define BIF_BIF256_CI256_RC3X4_USB4_PCIE_MST_CTRL_3__CI_MAX_PAYLOAD_SIZE_MODE__SHIFT 0x10
+#define BIF_BIF256_CI256_RC3X4_USB4_PCIE_MST_CTRL_3__CI_PRIV_MAX_PAYLOAD_SIZE__SHIFT 0x11
+#define BIF_BIF256_CI256_RC3X4_USB4_PCIE_MST_CTRL_3__CI_MAX_READ_REQUEST_SIZE_MODE__SHIFT 0x14
+#define BIF_BIF256_CI256_RC3X4_USB4_PCIE_MST_CTRL_3__CI_PRIV_MAX_READ_REQUEST_SIZE__SHIFT 0x15
+#define BIF_BIF256_CI256_RC3X4_USB4_PCIE_MST_CTRL_3__CI_MAX_READ_SAFE_MODE__SHIFT 0x18
+#define BIF_BIF256_CI256_RC3X4_USB4_PCIE_MST_CTRL_3__CI_EXTENDED_TAG_EN_OVERRIDE__SHIFT 0x19
+#define BIF_BIF256_CI256_RC3X4_USB4_PCIE_MST_CTRL_3__CI_SWUS_MAX_READ_REQUEST_SIZE_MODE__SHIFT 0x1b
+#define BIF_BIF256_CI256_RC3X4_USB4_PCIE_MST_CTRL_3__CI_SWUS_MAX_READ_REQUEST_SIZE_PRIV__SHIFT 0x1c
+#define BIF_BIF256_CI256_RC3X4_USB4_PCIE_MST_CTRL_3__CI_SWUS_EXTENDED_TAG_EN_OVERRIDE__SHIFT 0x1e
+#define BIF_BIF256_CI256_RC3X4_USB4_PCIE_MST_CTRL_3__CI_SWUS_MAX_PAYLOAD_SIZE_MODE_MASK 0x00000100L
+#define BIF_BIF256_CI256_RC3X4_USB4_PCIE_MST_CTRL_3__CI_SWUS_PRIV_MAX_PAYLOAD_SIZE_MASK 0x00000600L
+#define BIF_BIF256_CI256_RC3X4_USB4_PCIE_MST_CTRL_3__CI_10BIT_TAG_EN_OVERRIDE_MASK 0x00001800L
+#define BIF_BIF256_CI256_RC3X4_USB4_PCIE_MST_CTRL_3__CI_SWUS_10BIT_TAG_EN_OVERRIDE_MASK 0x00006000L
+#define BIF_BIF256_CI256_RC3X4_USB4_PCIE_MST_CTRL_3__MST_DROP_SYNC_FLOOD_EN_MASK 0x00008000L
+#define BIF_BIF256_CI256_RC3X4_USB4_PCIE_MST_CTRL_3__CI_MAX_PAYLOAD_SIZE_MODE_MASK 0x00010000L
+#define BIF_BIF256_CI256_RC3X4_USB4_PCIE_MST_CTRL_3__CI_PRIV_MAX_PAYLOAD_SIZE_MASK 0x000E0000L
+#define BIF_BIF256_CI256_RC3X4_USB4_PCIE_MST_CTRL_3__CI_MAX_READ_REQUEST_SIZE_MODE_MASK 0x00100000L
+#define BIF_BIF256_CI256_RC3X4_USB4_PCIE_MST_CTRL_3__CI_PRIV_MAX_READ_REQUEST_SIZE_MASK 0x00E00000L
+#define BIF_BIF256_CI256_RC3X4_USB4_PCIE_MST_CTRL_3__CI_MAX_READ_SAFE_MODE_MASK 0x01000000L
+#define BIF_BIF256_CI256_RC3X4_USB4_PCIE_MST_CTRL_3__CI_EXTENDED_TAG_EN_OVERRIDE_MASK 0x06000000L
+#define BIF_BIF256_CI256_RC3X4_USB4_PCIE_MST_CTRL_3__CI_SWUS_MAX_READ_REQUEST_SIZE_MODE_MASK 0x08000000L
+#define BIF_BIF256_CI256_RC3X4_USB4_PCIE_MST_CTRL_3__CI_SWUS_MAX_READ_REQUEST_SIZE_PRIV_MASK 0x30000000L
+#define BIF_BIF256_CI256_RC3X4_USB4_PCIE_MST_CTRL_3__CI_SWUS_EXTENDED_TAG_EN_OVERRIDE_MASK 0xC0000000L
// addressBlock: nbio_nbif0_bif_cfg_dev0_rc_bifcfgdecp
//BIF_CFG_DEV0_RC0_VENDOR_ID
diff --git a/drivers/gpu/drm/amd/include/kgd_pp_interface.h b/drivers/gpu/drm/amd/include/kgd_pp_interface.h
index cd3c40a86029..0d1209f2cf31 100644
--- a/drivers/gpu/drm/amd/include/kgd_pp_interface.h
+++ b/drivers/gpu/drm/amd/include/kgd_pp_interface.h
@@ -444,6 +444,7 @@ struct amd_pm_funcs {
struct dpm_clocks *clock_table);
int (*get_smu_prv_buf_details)(void *handle, void **addr, size_t *size);
void (*pm_compute_clocks)(void *handle);
+ int (*notify_rlc_state)(void *handle, bool en);
};
struct metrics_table_header {
diff --git a/drivers/gpu/drm/amd/pm/amdgpu_dpm.c b/drivers/gpu/drm/amd/pm/amdgpu_dpm.c
index 08cb79401410..8ec11da0319f 100644
--- a/drivers/gpu/drm/amd/pm/amdgpu_dpm.c
+++ b/drivers/gpu/drm/amd/pm/amdgpu_dpm.c
@@ -181,6 +181,24 @@ int amdgpu_dpm_set_mp1_state(struct amdgpu_device *adev,
return ret;
}
+int amdgpu_dpm_notify_rlc_state(struct amdgpu_device *adev, bool en)
+{
+ int ret = 0;
+ const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
+
+ if (pp_funcs && pp_funcs->notify_rlc_state) {
+ mutex_lock(&adev->pm.mutex);
+
+ ret = pp_funcs->notify_rlc_state(
+ adev->powerplay.pp_handle,
+ en);
+
+ mutex_unlock(&adev->pm.mutex);
+ }
+
+ return ret;
+}
+
bool amdgpu_dpm_is_baco_supported(struct amdgpu_device *adev)
{
const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
diff --git a/drivers/gpu/drm/amd/pm/inc/amdgpu_dpm.h b/drivers/gpu/drm/amd/pm/inc/amdgpu_dpm.h
index feccd2a7120d..482ea30147ab 100644
--- a/drivers/gpu/drm/amd/pm/inc/amdgpu_dpm.h
+++ b/drivers/gpu/drm/amd/pm/inc/amdgpu_dpm.h
@@ -415,6 +415,8 @@ int amdgpu_dpm_mode1_reset(struct amdgpu_device *adev);
int amdgpu_dpm_set_mp1_state(struct amdgpu_device *adev,
enum pp_mp1_state mp1_state);
+int amdgpu_dpm_notify_rlc_state(struct amdgpu_device *adev, bool en);
+
int amdgpu_dpm_set_gfx_power_up_by_imu(struct amdgpu_device *adev);
int amdgpu_dpm_baco_exit(struct amdgpu_device *adev);
diff --git a/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c b/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c
index 1ead323f1c78..e1a5ee911dbb 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c
@@ -1710,6 +1710,16 @@ static int smu_disable_dpms(struct smu_context *smu)
}
}
+ /* Notify SMU RLC is going to be off, stop RLC and SMU interaction.
+ * otherwise SMU will hang while interacting with RLC if RLC is halted
+ * this is a WA for Vangogh asic which fix the SMU hang issue.
+ */
+ ret = smu_notify_rlc_state(smu, false);
+ if (ret) {
+ dev_err(adev->dev, "Fail to notify rlc status!\n");
+ return ret;
+ }
+
if (amdgpu_ip_version(adev, GC_HWIP, 0) >= IP_VERSION(9, 4, 2) &&
!((adev->flags & AMD_IS_APU) && adev->gfx.imu.funcs) &&
!amdgpu_sriov_vf(adev) && adev->gfx.rlc.funcs->stop)
diff --git a/drivers/gpu/drm/amd/pm/swsmu/inc/amdgpu_smu.h b/drivers/gpu/drm/amd/pm/swsmu/inc/amdgpu_smu.h
index 8def291b18bc..23fa71cafb14 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/inc/amdgpu_smu.h
+++ b/drivers/gpu/drm/amd/pm/swsmu/inc/amdgpu_smu.h
@@ -1360,6 +1360,11 @@ struct pptable_funcs {
* management.
*/
int (*dpm_set_umsch_mm_enable)(struct smu_context *smu, bool enable);
+
+ /**
+ * @notify_rlc_state: Notify RLC power state to SMU.
+ */
+ int (*notify_rlc_state)(struct smu_context *smu, bool en);
};
typedef enum {
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu11/vangogh_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu11/vangogh_ppt.c
index 762b31455a0b..2ff6deedef95 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu11/vangogh_ppt.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu11/vangogh_ppt.c
@@ -2193,8 +2193,7 @@ static int vangogh_get_dpm_clock_table(struct smu_context *smu, struct dpm_clock
return 0;
}
-
-static int vangogh_system_features_control(struct smu_context *smu, bool en)
+static int vangogh_notify_rlc_state(struct smu_context *smu, bool en)
{
struct amdgpu_device *adev = smu->adev;
int ret = 0;
@@ -2523,7 +2522,7 @@ static const struct pptable_funcs vangogh_ppt_funcs = {
.print_clk_levels = vangogh_common_print_clk_levels,
.set_default_dpm_table = vangogh_set_default_dpm_tables,
.set_fine_grain_gfx_freq_parameters = vangogh_set_fine_grain_gfx_freq_parameters,
- .system_features_control = vangogh_system_features_control,
+ .notify_rlc_state = vangogh_notify_rlc_state,
.feature_is_enabled = smu_cmn_feature_is_enabled,
.set_power_profile_mode = vangogh_set_power_profile_mode,
.get_power_profile_mode = vangogh_get_power_profile_mode,
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/aldebaran_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/aldebaran_ppt.c
index 1a6675d70a4b..f1440869d1ce 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu13/aldebaran_ppt.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/aldebaran_ppt.c
@@ -257,8 +257,11 @@ static int aldebaran_tables_init(struct smu_context *smu)
}
smu_table->ecc_table = kzalloc(tables[SMU_TABLE_ECCINFO].size, GFP_KERNEL);
- if (!smu_table->ecc_table)
+ if (!smu_table->ecc_table) {
+ kfree(smu_table->metrics_table);
+ kfree(smu_table->gpu_metrics_table);
return -ENOMEM;
+ }
return 0;
}
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu_internal.h b/drivers/gpu/drm/amd/pm/swsmu/smu_internal.h
index 80b3c3efc006..64766ac69c53 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu_internal.h
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu_internal.h
@@ -97,6 +97,7 @@
#define smu_get_default_config_table_settings(smu, config_table) smu_ppt_funcs(get_default_config_table_settings, -EOPNOTSUPP, smu, config_table)
#define smu_set_config_table(smu, config_table) smu_ppt_funcs(set_config_table, -EOPNOTSUPP, smu, config_table)
#define smu_init_pptable_microcode(smu) smu_ppt_funcs(init_pptable_microcode, 0, smu)
+#define smu_notify_rlc_state(smu, en) smu_ppt_funcs(notify_rlc_state, 0, smu, en)
#endif
#endif
diff --git a/drivers/gpu/drm/bridge/panel.c b/drivers/gpu/drm/bridge/panel.c
index e48823a4f1ed..7f41525f7a6e 100644
--- a/drivers/gpu/drm/bridge/panel.c
+++ b/drivers/gpu/drm/bridge/panel.c
@@ -4,8 +4,6 @@
* Copyright (C) 2017 Broadcom
*/
-#include <linux/device.h>
-
#include <drm/drm_atomic_helper.h>
#include <drm/drm_bridge.h>
#include <drm/drm_connector.h>
@@ -21,7 +19,6 @@ struct panel_bridge {
struct drm_bridge bridge;
struct drm_connector connector;
struct drm_panel *panel;
- struct device_link *link;
u32 connector_type;
};
@@ -63,24 +60,13 @@ static int panel_bridge_attach(struct drm_bridge *bridge,
{
struct panel_bridge *panel_bridge = drm_bridge_to_panel_bridge(bridge);
struct drm_connector *connector = &panel_bridge->connector;
- struct drm_panel *panel = panel_bridge->panel;
- struct drm_device *drm_dev = bridge->dev;
int ret;
- panel_bridge->link = device_link_add(drm_dev->dev, panel->dev,
- DL_FLAG_STATELESS);
- if (!panel_bridge->link) {
- DRM_ERROR("Failed to add device link between %s and %s\n",
- dev_name(drm_dev->dev), dev_name(panel->dev));
- return -EINVAL;
- }
-
if (flags & DRM_BRIDGE_ATTACH_NO_CONNECTOR)
return 0;
if (!bridge->encoder) {
DRM_ERROR("Missing encoder\n");
- device_link_del(panel_bridge->link);
return -ENODEV;
}
@@ -92,7 +78,6 @@ static int panel_bridge_attach(struct drm_bridge *bridge,
panel_bridge->connector_type);
if (ret) {
DRM_ERROR("Failed to initialize connector\n");
- device_link_del(panel_bridge->link);
return ret;
}
@@ -115,8 +100,6 @@ static void panel_bridge_detach(struct drm_bridge *bridge)
struct panel_bridge *panel_bridge = drm_bridge_to_panel_bridge(bridge);
struct drm_connector *connector = &panel_bridge->connector;
- device_link_del(panel_bridge->link);
-
/*
* Cleanup the connector if we know it was initialized.
*
diff --git a/drivers/gpu/drm/drm_gpuvm.c b/drivers/gpu/drm/drm_gpuvm.c
index 08c088319652..b80d4e1cc9b7 100644
--- a/drivers/gpu/drm/drm_gpuvm.c
+++ b/drivers/gpu/drm/drm_gpuvm.c
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: GPL-2.0 OR MIT
+// SPDX-License-Identifier: GPL-2.0-only OR MIT
/*
* Copyright (c) 2022 Red Hat.
*
diff --git a/drivers/gpu/drm/drm_prime.c b/drivers/gpu/drm/drm_prime.c
index 63b709a67471..834a5e28abbe 100644
--- a/drivers/gpu/drm/drm_prime.c
+++ b/drivers/gpu/drm/drm_prime.c
@@ -278,7 +278,7 @@ void drm_gem_dmabuf_release(struct dma_buf *dma_buf)
}
EXPORT_SYMBOL(drm_gem_dmabuf_release);
-/*
+/**
* drm_gem_prime_fd_to_handle - PRIME import function for GEM drivers
* @dev: drm_device to import into
* @file_priv: drm file-private structure
@@ -292,9 +292,9 @@ EXPORT_SYMBOL(drm_gem_dmabuf_release);
*
* Returns 0 on success or a negative error code on failure.
*/
-static int drm_gem_prime_fd_to_handle(struct drm_device *dev,
- struct drm_file *file_priv, int prime_fd,
- uint32_t *handle)
+int drm_gem_prime_fd_to_handle(struct drm_device *dev,
+ struct drm_file *file_priv, int prime_fd,
+ uint32_t *handle)
{
struct dma_buf *dma_buf;
struct drm_gem_object *obj;
@@ -360,6 +360,7 @@ out_put:
dma_buf_put(dma_buf);
return ret;
}
+EXPORT_SYMBOL(drm_gem_prime_fd_to_handle);
int drm_prime_fd_to_handle_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv)
@@ -408,7 +409,7 @@ static struct dma_buf *export_and_register_object(struct drm_device *dev,
return dmabuf;
}
-/*
+/**
* drm_gem_prime_handle_to_fd - PRIME export function for GEM drivers
* @dev: dev to export the buffer from
* @file_priv: drm file-private structure
@@ -421,10 +422,10 @@ static struct dma_buf *export_and_register_object(struct drm_device *dev,
* The actual exporting from GEM object to a dma-buf is done through the
* &drm_gem_object_funcs.export callback.
*/
-static int drm_gem_prime_handle_to_fd(struct drm_device *dev,
- struct drm_file *file_priv, uint32_t handle,
- uint32_t flags,
- int *prime_fd)
+int drm_gem_prime_handle_to_fd(struct drm_device *dev,
+ struct drm_file *file_priv, uint32_t handle,
+ uint32_t flags,
+ int *prime_fd)
{
struct drm_gem_object *obj;
int ret = 0;
@@ -506,6 +507,7 @@ out_unlock:
return ret;
}
+EXPORT_SYMBOL(drm_gem_prime_handle_to_fd);
int drm_prime_handle_to_fd_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv)
@@ -864,9 +866,9 @@ EXPORT_SYMBOL(drm_prime_get_contiguous_size);
* @obj: GEM object to export
* @flags: flags like DRM_CLOEXEC and DRM_RDWR
*
- * This is the implementation of the &drm_gem_object_funcs.export functions
- * for GEM drivers using the PRIME helpers. It is used as the default for
- * drivers that do not set their own.
+ * This is the implementation of the &drm_gem_object_funcs.export functions for GEM drivers
+ * using the PRIME helpers. It is used as the default in
+ * drm_gem_prime_handle_to_fd().
*/
struct dma_buf *drm_gem_prime_export(struct drm_gem_object *obj,
int flags)
@@ -962,9 +964,10 @@ EXPORT_SYMBOL(drm_gem_prime_import_dev);
* @dev: drm_device to import into
* @dma_buf: dma-buf object to import
*
- * This is the implementation of the gem_prime_import functions for GEM
- * drivers using the PRIME helpers. It is the default for drivers that do
- * not set their own &drm_driver.gem_prime_import.
+ * This is the implementation of the gem_prime_import functions for GEM drivers
+ * using the PRIME helpers. Drivers can use this as their
+ * &drm_driver.gem_prime_import implementation. It is used as the default
+ * implementation in drm_gem_prime_fd_to_handle().
*
* Drivers must arrange to call drm_prime_gem_destroy() from their
* &drm_gem_object_funcs.free hook when using this function.
diff --git a/drivers/gpu/drm/i915/display/intel_display.c b/drivers/gpu/drm/i915/display/intel_display.c
index 28d85e1e858e..a2a806262c9e 100644
--- a/drivers/gpu/drm/i915/display/intel_display.c
+++ b/drivers/gpu/drm/i915/display/intel_display.c
@@ -6853,10 +6853,11 @@ static void intel_commit_modeset_disables(struct intel_atomic_state *state)
if (!intel_crtc_needs_modeset(new_crtc_state))
continue;
+ intel_pre_plane_update(state, crtc);
+
if (!old_crtc_state->hw.active)
continue;
- intel_pre_plane_update(state, crtc);
intel_crtc_disable_planes(state, crtc);
}
diff --git a/drivers/gpu/drm/i915/display/intel_dp.c b/drivers/gpu/drm/i915/display/intel_dp.c
index 2c1034578984..2852958dd4e7 100644
--- a/drivers/gpu/drm/i915/display/intel_dp.c
+++ b/drivers/gpu/drm/i915/display/intel_dp.c
@@ -6037,8 +6037,7 @@ static bool intel_edp_init_connector(struct intel_dp *intel_dp,
* (eg. Acer Chromebook C710), so we'll check it only if multiple
* ports are attempting to use the same AUX CH, according to VBT.
*/
- if (intel_bios_dp_has_shared_aux_ch(encoder->devdata) &&
- !intel_digital_port_connected(encoder)) {
+ if (intel_bios_dp_has_shared_aux_ch(encoder->devdata)) {
/*
* If this fails, presume the DPCD answer came
* from some other port using the same AUX CH.
@@ -6046,10 +6045,27 @@ static bool intel_edp_init_connector(struct intel_dp *intel_dp,
* FIXME maybe cleaner to check this before the
* DPCD read? Would need sort out the VDD handling...
*/
- drm_info(&dev_priv->drm,
- "[ENCODER:%d:%s] HPD is down, disabling eDP\n",
- encoder->base.base.id, encoder->base.name);
- goto out_vdd_off;
+ if (!intel_digital_port_connected(encoder)) {
+ drm_info(&dev_priv->drm,
+ "[ENCODER:%d:%s] HPD is down, disabling eDP\n",
+ encoder->base.base.id, encoder->base.name);
+ goto out_vdd_off;
+ }
+
+ /*
+ * Unfortunately even the HPD based detection fails on
+ * eg. Asus B360M-A (CFL+CNP), so as a last resort fall
+ * back to checking for a VGA branch device. Only do this
+ * on known affected platforms to minimize false positives.
+ */
+ if (DISPLAY_VER(dev_priv) == 9 && drm_dp_is_branch(intel_dp->dpcd) &&
+ (intel_dp->dpcd[DP_DOWNSTREAMPORT_PRESENT] & DP_DWN_STRM_PORT_TYPE_MASK) ==
+ DP_DWN_STRM_PORT_TYPE_ANALOG) {
+ drm_info(&dev_priv->drm,
+ "[ENCODER:%d:%s] VGA converter detected, disabling eDP\n",
+ encoder->base.base.id, encoder->base.name);
+ goto out_vdd_off;
+ }
}
mutex_lock(&dev_priv->drm.mode_config.mutex);
diff --git a/drivers/gpu/drm/i915/gt/intel_engine_user.c b/drivers/gpu/drm/i915/gt/intel_engine_user.c
index 118164ddbb2e..833987015b8b 100644
--- a/drivers/gpu/drm/i915/gt/intel_engine_user.c
+++ b/drivers/gpu/drm/i915/gt/intel_engine_user.c
@@ -41,12 +41,15 @@ void intel_engine_add_user(struct intel_engine_cs *engine)
llist_add(&engine->uabi_llist, &engine->i915->uabi_engines_llist);
}
-static const u8 uabi_classes[] = {
+#define I915_NO_UABI_CLASS ((u16)(-1))
+
+static const u16 uabi_classes[] = {
[RENDER_CLASS] = I915_ENGINE_CLASS_RENDER,
[COPY_ENGINE_CLASS] = I915_ENGINE_CLASS_COPY,
[VIDEO_DECODE_CLASS] = I915_ENGINE_CLASS_VIDEO,
[VIDEO_ENHANCEMENT_CLASS] = I915_ENGINE_CLASS_VIDEO_ENHANCE,
[COMPUTE_CLASS] = I915_ENGINE_CLASS_COMPUTE,
+ [OTHER_CLASS] = I915_NO_UABI_CLASS, /* Not exposed to users, no uabi class. */
};
static int engine_cmp(void *priv, const struct list_head *A,
@@ -200,6 +203,7 @@ static void engine_rename(struct intel_engine_cs *engine, const char *name, u16
void intel_engines_driver_register(struct drm_i915_private *i915)
{
+ u16 name_instance, other_instance = 0;
struct legacy_ring ring = {};
struct list_head *it, *next;
struct rb_node **p, *prev;
@@ -216,27 +220,28 @@ void intel_engines_driver_register(struct drm_i915_private *i915)
if (intel_gt_has_unrecoverable_error(engine->gt))
continue; /* ignore incomplete engines */
- /*
- * We don't want to expose the GSC engine to the users, but we
- * still rename it so it is easier to identify in the debug logs
- */
- if (engine->id == GSC0) {
- engine_rename(engine, "gsc", 0);
- continue;
- }
-
GEM_BUG_ON(engine->class >= ARRAY_SIZE(uabi_classes));
engine->uabi_class = uabi_classes[engine->class];
+ if (engine->uabi_class == I915_NO_UABI_CLASS) {
+ name_instance = other_instance++;
+ } else {
+ GEM_BUG_ON(engine->uabi_class >=
+ ARRAY_SIZE(i915->engine_uabi_class_count));
+ name_instance =
+ i915->engine_uabi_class_count[engine->uabi_class]++;
+ }
+ engine->uabi_instance = name_instance;
- GEM_BUG_ON(engine->uabi_class >=
- ARRAY_SIZE(i915->engine_uabi_class_count));
- engine->uabi_instance =
- i915->engine_uabi_class_count[engine->uabi_class]++;
-
- /* Replace the internal name with the final user facing name */
+ /*
+ * Replace the internal name with the final user and log facing
+ * name.
+ */
engine_rename(engine,
intel_engine_class_repr(engine->class),
- engine->uabi_instance);
+ name_instance);
+
+ if (engine->uabi_class == I915_NO_UABI_CLASS)
+ continue;
rb_link_node(&engine->uabi_node, prev, p);
rb_insert_color(&engine->uabi_node, &i915->uabi_engines);
diff --git a/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/nvidia/generated/g_os_nvoc.h b/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/nvidia/generated/g_os_nvoc.h
index 754c6af42f30..10121218f4d3 100644
--- a/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/nvidia/generated/g_os_nvoc.h
+++ b/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/nvidia/generated/g_os_nvoc.h
@@ -38,7 +38,7 @@ typedef struct PACKED_REGISTRY_TABLE
{
NvU32 size;
NvU32 numEntries;
- PACKED_REGISTRY_ENTRY entries[0];
+ PACKED_REGISTRY_ENTRY entries[] __counted_by(numEntries);
} PACKED_REGISTRY_TABLE;
#endif
diff --git a/drivers/gpu/drm/nouveau/nouveau_bo.c b/drivers/gpu/drm/nouveau/nouveau_bo.c
index 0f3bd187ede6..280d1d9a559b 100644
--- a/drivers/gpu/drm/nouveau/nouveau_bo.c
+++ b/drivers/gpu/drm/nouveau/nouveau_bo.c
@@ -318,8 +318,9 @@ nouveau_bo_alloc(struct nouveau_cli *cli, u64 *size, int *align, u32 domain,
(!vmm->page[i].host || vmm->page[i].shift > PAGE_SHIFT))
continue;
- if (pi < 0)
- pi = i;
+ /* pick the last one as it will be smallest. */
+ pi = i;
+
/* Stop once the buffer is larger than the current page size. */
if (*size >= 1ULL << vmm->page[i].shift)
break;
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/r535.c b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/r535.c
index dc44f5c7833f..f6725a5f5bfb 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/r535.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/r535.c
@@ -365,10 +365,8 @@ r535_gsp_rpc_send(struct nvkm_gsp *gsp, void *argv, bool wait, u32 repc)
}
ret = r535_gsp_cmdq_push(gsp, rpc);
- if (ret) {
- mutex_unlock(&gsp->cmdq.mutex);
+ if (ret)
return ERR_PTR(ret);
- }
if (wait) {
msg = r535_gsp_msg_recv(gsp, fn, repc);
@@ -1048,7 +1046,7 @@ r535_gsp_rpc_set_registry(struct nvkm_gsp *gsp)
char *strings;
int str_offset;
int i;
- size_t rpc_size = sizeof(*rpc) + sizeof(rpc->entries[0]) * NV_GSP_REG_NUM_ENTRIES;
+ size_t rpc_size = struct_size(rpc, entries, NV_GSP_REG_NUM_ENTRIES);
/* add strings + null terminator */
for (i = 0; i < NV_GSP_REG_NUM_ENTRIES; i++)
diff --git a/drivers/gpu/drm/panel/panel-boe-tv101wum-nl6.c b/drivers/gpu/drm/panel/panel-boe-tv101wum-nl6.c
index be8f48e3c1db..c4c0f08e9202 100644
--- a/drivers/gpu/drm/panel/panel-boe-tv101wum-nl6.c
+++ b/drivers/gpu/drm/panel/panel-boe-tv101wum-nl6.c
@@ -1764,6 +1764,7 @@ static const struct panel_desc starry_qfh032011_53g_desc = {
.mode_flags = MIPI_DSI_MODE_VIDEO | MIPI_DSI_MODE_VIDEO_SYNC_PULSE |
MIPI_DSI_MODE_LPM,
.init_cmds = starry_qfh032011_53g_init_cmd,
+ .lp11_before_reset = true,
};
static const struct drm_display_mode starry_himax83102_j02_default_mode = {
diff --git a/drivers/gpu/drm/panel/panel-novatek-nt36523.c b/drivers/gpu/drm/panel/panel-novatek-nt36523.c
index 9b9a7eb1bc60..a189ce236328 100644
--- a/drivers/gpu/drm/panel/panel-novatek-nt36523.c
+++ b/drivers/gpu/drm/panel/panel-novatek-nt36523.c
@@ -1254,9 +1254,9 @@ static int nt36523_probe(struct mipi_dsi_device *dsi)
return dev_err_probe(dev, -EPROBE_DEFER, "cannot get secondary DSI host\n");
pinfo->dsi[1] = mipi_dsi_device_register_full(dsi1_host, info);
- if (!pinfo->dsi[1]) {
+ if (IS_ERR(pinfo->dsi[1])) {
dev_err(dev, "cannot get secondary DSI device\n");
- return -ENODEV;
+ return PTR_ERR(pinfo->dsi[1]);
}
}
diff --git a/drivers/iommu/intel/dmar.c b/drivers/iommu/intel/dmar.c
index a3414afe11b0..23cb80d62a9a 100644
--- a/drivers/iommu/intel/dmar.c
+++ b/drivers/iommu/intel/dmar.c
@@ -1522,6 +1522,15 @@ void qi_flush_dev_iotlb(struct intel_iommu *iommu, u16 sid, u16 pfsid,
{
struct qi_desc desc;
+ /*
+ * VT-d spec, section 4.3:
+ *
+ * Software is recommended to not submit any Device-TLB invalidation
+ * requests while address remapping hardware is disabled.
+ */
+ if (!(iommu->gcmd & DMA_GCMD_TE))
+ return;
+
if (mask) {
addr |= (1ULL << (VTD_PAGE_SHIFT + mask - 1)) - 1;
desc.qw1 = QI_DEV_IOTLB_ADDR(addr) | QI_DEV_IOTLB_SIZE;
@@ -1587,6 +1596,15 @@ void qi_flush_dev_iotlb_pasid(struct intel_iommu *iommu, u16 sid, u16 pfsid,
unsigned long mask = 1UL << (VTD_PAGE_SHIFT + size_order - 1);
struct qi_desc desc = {.qw1 = 0, .qw2 = 0, .qw3 = 0};
+ /*
+ * VT-d spec, section 4.3:
+ *
+ * Software is recommended to not submit any Device-TLB invalidation
+ * requests while address remapping hardware is disabled.
+ */
+ if (!(iommu->gcmd & DMA_GCMD_TE))
+ return;
+
desc.qw0 = QI_DEV_EIOTLB_PASID(pasid) | QI_DEV_EIOTLB_SID(sid) |
QI_DEV_EIOTLB_QDEP(qdep) | QI_DEIOTLB_TYPE |
QI_DEV_IOTLB_PFSID(pfsid);
diff --git a/drivers/iommu/intel/iommu.c b/drivers/iommu/intel/iommu.c
index 3531b956556c..897159dba47d 100644
--- a/drivers/iommu/intel/iommu.c
+++ b/drivers/iommu/intel/iommu.c
@@ -299,7 +299,7 @@ static int iommu_skip_te_disable;
#define IDENTMAP_AZALIA 4
const struct iommu_ops intel_iommu_ops;
-const struct iommu_dirty_ops intel_dirty_ops;
+static const struct iommu_dirty_ops intel_dirty_ops;
static bool translation_pre_enabled(struct intel_iommu *iommu)
{
@@ -2207,6 +2207,8 @@ __domain_mapping(struct dmar_domain *domain, unsigned long iov_pfn,
attr |= DMA_FL_PTE_DIRTY;
}
+ domain->has_mappings = true;
+
pteval = ((phys_addr_t)phys_pfn << VTD_PAGE_SHIFT) | attr;
while (nr_pages > 0) {
@@ -2490,7 +2492,8 @@ static int dmar_domain_attach_device(struct dmar_domain *domain,
return ret;
}
- iommu_enable_pci_caps(info);
+ if (sm_supported(info->iommu) || !domain_type_is_si(info->domain))
+ iommu_enable_pci_caps(info);
return 0;
}
@@ -3925,8 +3928,8 @@ static int domain_context_clear_one_cb(struct pci_dev *pdev, u16 alias, void *op
*/
static void domain_context_clear(struct device_domain_info *info)
{
- if (!info->iommu || !info->dev || !dev_is_pci(info->dev))
- return;
+ if (!dev_is_pci(info->dev))
+ domain_context_clear_one(info, info->bus, info->devfn);
pci_for_each_dma_alias(to_pci_dev(info->dev),
&domain_context_clear_one_cb, info);
@@ -4360,7 +4363,8 @@ static bool intel_iommu_enforce_cache_coherency(struct iommu_domain *domain)
return true;
spin_lock_irqsave(&dmar_domain->lock, flags);
- if (!domain_support_force_snooping(dmar_domain)) {
+ if (!domain_support_force_snooping(dmar_domain) ||
+ (!dmar_domain->use_first_level && dmar_domain->has_mappings)) {
spin_unlock_irqrestore(&dmar_domain->lock, flags);
return false;
}
@@ -4925,7 +4929,7 @@ static int intel_iommu_read_and_clear_dirty(struct iommu_domain *domain,
return 0;
}
-const struct iommu_dirty_ops intel_dirty_ops = {
+static const struct iommu_dirty_ops intel_dirty_ops = {
.set_dirty_tracking = intel_iommu_set_dirty_tracking,
.read_and_clear_dirty = intel_iommu_read_and_clear_dirty,
};
@@ -5073,7 +5077,7 @@ static void quirk_igfx_skip_te_disable(struct pci_dev *dev)
ver = (dev->device >> 8) & 0xff;
if (ver != 0x45 && ver != 0x46 && ver != 0x4c &&
ver != 0x4e && ver != 0x8a && ver != 0x98 &&
- ver != 0x9a && ver != 0xa7)
+ ver != 0x9a && ver != 0xa7 && ver != 0x7d)
return;
if (risky_device(dev))
diff --git a/drivers/iommu/intel/iommu.h b/drivers/iommu/intel/iommu.h
index 65d37a138c75..ce030c5b5772 100644
--- a/drivers/iommu/intel/iommu.h
+++ b/drivers/iommu/intel/iommu.h
@@ -602,6 +602,9 @@ struct dmar_domain {
*/
u8 dirty_tracking:1; /* Dirty tracking is enabled */
u8 nested_parent:1; /* Has other domains nested on it */
+ u8 has_mappings:1; /* Has mappings configured through
+ * iommu_map() interface.
+ */
spinlock_t lock; /* Protect device tracking lists */
struct list_head devices; /* all devices' list */
diff --git a/drivers/iommu/intel/svm.c b/drivers/iommu/intel/svm.c
index 50a481c895b8..ac12f76c1212 100644
--- a/drivers/iommu/intel/svm.c
+++ b/drivers/iommu/intel/svm.c
@@ -216,6 +216,27 @@ static void intel_flush_svm_range(struct intel_svm *svm, unsigned long address,
rcu_read_unlock();
}
+static void intel_flush_svm_all(struct intel_svm *svm)
+{
+ struct device_domain_info *info;
+ struct intel_svm_dev *sdev;
+
+ rcu_read_lock();
+ list_for_each_entry_rcu(sdev, &svm->devs, list) {
+ info = dev_iommu_priv_get(sdev->dev);
+
+ qi_flush_piotlb(sdev->iommu, sdev->did, svm->pasid, 0, -1UL, 0);
+ if (info->ats_enabled) {
+ qi_flush_dev_iotlb_pasid(sdev->iommu, sdev->sid, info->pfsid,
+ svm->pasid, sdev->qdep,
+ 0, 64 - VTD_PAGE_SHIFT);
+ quirk_extra_dev_tlb_flush(info, 0, 64 - VTD_PAGE_SHIFT,
+ svm->pasid, sdev->qdep);
+ }
+ }
+ rcu_read_unlock();
+}
+
/* Pages have been freed at this point */
static void intel_arch_invalidate_secondary_tlbs(struct mmu_notifier *mn,
struct mm_struct *mm,
@@ -223,6 +244,11 @@ static void intel_arch_invalidate_secondary_tlbs(struct mmu_notifier *mn,
{
struct intel_svm *svm = container_of(mn, struct intel_svm, notifier);
+ if (start == 0 && end == -1UL) {
+ intel_flush_svm_all(svm);
+ return;
+ }
+
intel_flush_svm_range(svm, start,
(end - start + PAGE_SIZE - 1) >> VTD_PAGE_SHIFT, 0);
}
diff --git a/drivers/iommu/iommu.c b/drivers/iommu/iommu.c
index f17a1113f3d6..33e2a9b5d339 100644
--- a/drivers/iommu/iommu.c
+++ b/drivers/iommu/iommu.c
@@ -485,11 +485,12 @@ static void iommu_deinit_device(struct device *dev)
dev_iommu_free(dev);
}
+DEFINE_MUTEX(iommu_probe_device_lock);
+
static int __iommu_probe_device(struct device *dev, struct list_head *group_list)
{
const struct iommu_ops *ops = dev->bus->iommu_ops;
struct iommu_group *group;
- static DEFINE_MUTEX(iommu_probe_device_lock);
struct group_device *gdev;
int ret;
@@ -502,17 +503,15 @@ static int __iommu_probe_device(struct device *dev, struct list_head *group_list
* probably be able to use device_lock() here to minimise the scope,
* but for now enforcing a simple global ordering is fine.
*/
- mutex_lock(&iommu_probe_device_lock);
+ lockdep_assert_held(&iommu_probe_device_lock);
/* Device is probed already if in a group */
- if (dev->iommu_group) {
- ret = 0;
- goto out_unlock;
- }
+ if (dev->iommu_group)
+ return 0;
ret = iommu_init_device(dev, ops);
if (ret)
- goto out_unlock;
+ return ret;
group = dev->iommu_group;
gdev = iommu_group_alloc_device(group, dev);
@@ -548,7 +547,6 @@ static int __iommu_probe_device(struct device *dev, struct list_head *group_list
list_add_tail(&group->entry, group_list);
}
mutex_unlock(&group->mutex);
- mutex_unlock(&iommu_probe_device_lock);
if (dev_is_pci(dev))
iommu_dma_set_pci_32bit_workaround(dev);
@@ -562,8 +560,6 @@ err_put_group:
iommu_deinit_device(dev);
mutex_unlock(&group->mutex);
iommu_group_put(group);
-out_unlock:
- mutex_unlock(&iommu_probe_device_lock);
return ret;
}
@@ -573,7 +569,9 @@ int iommu_probe_device(struct device *dev)
const struct iommu_ops *ops;
int ret;
+ mutex_lock(&iommu_probe_device_lock);
ret = __iommu_probe_device(dev, NULL);
+ mutex_unlock(&iommu_probe_device_lock);
if (ret)
return ret;
@@ -1788,7 +1786,7 @@ iommu_group_alloc_default_domain(struct iommu_group *group, int req_type)
*/
if (ops->default_domain) {
if (req_type)
- return NULL;
+ return ERR_PTR(-EINVAL);
return ops->default_domain;
}
@@ -1797,15 +1795,15 @@ iommu_group_alloc_default_domain(struct iommu_group *group, int req_type)
/* The driver gave no guidance on what type to use, try the default */
dom = __iommu_group_alloc_default_domain(group, iommu_def_domain_type);
- if (dom)
+ if (!IS_ERR(dom))
return dom;
/* Otherwise IDENTITY and DMA_FQ defaults will try DMA */
if (iommu_def_domain_type == IOMMU_DOMAIN_DMA)
- return NULL;
+ return ERR_PTR(-EINVAL);
dom = __iommu_group_alloc_default_domain(group, IOMMU_DOMAIN_DMA);
- if (!dom)
- return NULL;
+ if (IS_ERR(dom))
+ return dom;
pr_warn("Failed to allocate default IOMMU domain of type %u for group %s - Falling back to IOMMU_DOMAIN_DMA",
iommu_def_domain_type, group->name);
@@ -1822,7 +1820,9 @@ static int probe_iommu_group(struct device *dev, void *data)
struct list_head *group_list = data;
int ret;
+ mutex_lock(&iommu_probe_device_lock);
ret = __iommu_probe_device(dev, group_list);
+ mutex_unlock(&iommu_probe_device_lock);
if (ret == -ENODEV)
ret = 0;
@@ -2094,10 +2094,17 @@ static struct iommu_domain *__iommu_domain_alloc(const struct iommu_ops *ops,
else if (ops->domain_alloc)
domain = ops->domain_alloc(alloc_type);
else
- return NULL;
+ return ERR_PTR(-EOPNOTSUPP);
+ /*
+ * Many domain_alloc ops now return ERR_PTR, make things easier for the
+ * driver by accepting ERR_PTR from all domain_alloc ops instead of
+ * having two rules.
+ */
+ if (IS_ERR(domain))
+ return domain;
if (!domain)
- return NULL;
+ return ERR_PTR(-ENOMEM);
domain->type = type;
/*
@@ -2110,9 +2117,14 @@ static struct iommu_domain *__iommu_domain_alloc(const struct iommu_ops *ops,
if (!domain->ops)
domain->ops = ops->default_domain_ops;
- if (iommu_is_dma_domain(domain) && iommu_get_dma_cookie(domain)) {
- iommu_domain_free(domain);
- domain = NULL;
+ if (iommu_is_dma_domain(domain)) {
+ int rc;
+
+ rc = iommu_get_dma_cookie(domain);
+ if (rc) {
+ iommu_domain_free(domain);
+ return ERR_PTR(rc);
+ }
}
return domain;
}
@@ -2129,10 +2141,15 @@ __iommu_group_domain_alloc(struct iommu_group *group, unsigned int type)
struct iommu_domain *iommu_domain_alloc(const struct bus_type *bus)
{
+ struct iommu_domain *domain;
+
if (bus == NULL || bus->iommu_ops == NULL)
return NULL;
- return __iommu_domain_alloc(bus->iommu_ops, NULL,
+ domain = __iommu_domain_alloc(bus->iommu_ops, NULL,
IOMMU_DOMAIN_UNMANAGED);
+ if (IS_ERR(domain))
+ return NULL;
+ return domain;
}
EXPORT_SYMBOL_GPL(iommu_domain_alloc);
@@ -3041,8 +3058,8 @@ static int iommu_setup_default_domain(struct iommu_group *group,
return -EINVAL;
dom = iommu_group_alloc_default_domain(group, req_type);
- if (!dom)
- return -ENODEV;
+ if (IS_ERR(dom))
+ return PTR_ERR(dom);
if (group->default_domain == dom)
return 0;
@@ -3243,21 +3260,23 @@ void iommu_device_unuse_default_domain(struct device *dev)
static int __iommu_group_alloc_blocking_domain(struct iommu_group *group)
{
+ struct iommu_domain *domain;
+
if (group->blocking_domain)
return 0;
- group->blocking_domain =
- __iommu_group_domain_alloc(group, IOMMU_DOMAIN_BLOCKED);
- if (!group->blocking_domain) {
+ domain = __iommu_group_domain_alloc(group, IOMMU_DOMAIN_BLOCKED);
+ if (IS_ERR(domain)) {
/*
* For drivers that do not yet understand IOMMU_DOMAIN_BLOCKED
* create an empty domain instead.
*/
- group->blocking_domain = __iommu_group_domain_alloc(
- group, IOMMU_DOMAIN_UNMANAGED);
- if (!group->blocking_domain)
- return -EINVAL;
+ domain = __iommu_group_domain_alloc(group,
+ IOMMU_DOMAIN_UNMANAGED);
+ if (IS_ERR(domain))
+ return PTR_ERR(domain);
}
+ group->blocking_domain = domain;
return 0;
}
diff --git a/drivers/iommu/of_iommu.c b/drivers/iommu/of_iommu.c
index 157b286e36bf..35ba090f3b5e 100644
--- a/drivers/iommu/of_iommu.c
+++ b/drivers/iommu/of_iommu.c
@@ -112,16 +112,20 @@ const struct iommu_ops *of_iommu_configure(struct device *dev,
const u32 *id)
{
const struct iommu_ops *ops = NULL;
- struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev);
+ struct iommu_fwspec *fwspec;
int err = NO_IOMMU;
if (!master_np)
return NULL;
+ /* Serialise to make dev->iommu stable under our potential fwspec */
+ mutex_lock(&iommu_probe_device_lock);
+ fwspec = dev_iommu_fwspec_get(dev);
if (fwspec) {
- if (fwspec->ops)
+ if (fwspec->ops) {
+ mutex_unlock(&iommu_probe_device_lock);
return fwspec->ops;
-
+ }
/* In the deferred case, start again from scratch */
iommu_fwspec_free(dev);
}
@@ -155,6 +159,8 @@ const struct iommu_ops *of_iommu_configure(struct device *dev,
fwspec = dev_iommu_fwspec_get(dev);
ops = fwspec->ops;
}
+ mutex_unlock(&iommu_probe_device_lock);
+
/*
* If we have reason to believe the IOMMU driver missed the initial
* probe for dev, replay it to get things in order.
@@ -191,7 +197,7 @@ iommu_resv_region_get_type(struct device *dev,
if (start == phys->start && end == phys->end)
return IOMMU_RESV_DIRECT;
- dev_warn(dev, "treating non-direct mapping [%pr] -> [%pap-%pap] as reservation\n", &phys,
+ dev_warn(dev, "treating non-direct mapping [%pr] -> [%pap-%pap] as reservation\n", phys,
&start, &end);
return IOMMU_RESV_RESERVED;
}
diff --git a/drivers/leds/led-class.c b/drivers/leds/led-class.c
index 974b84f6bd6a..ba1be15cfd8e 100644
--- a/drivers/leds/led-class.c
+++ b/drivers/leds/led-class.c
@@ -75,19 +75,6 @@ static ssize_t max_brightness_show(struct device *dev,
}
static DEVICE_ATTR_RO(max_brightness);
-static ssize_t color_show(struct device *dev,
- struct device_attribute *attr, char *buf)
-{
- const char *color_text = "invalid";
- struct led_classdev *led_cdev = dev_get_drvdata(dev);
-
- if (led_cdev->color < LED_COLOR_ID_MAX)
- color_text = led_colors[led_cdev->color];
-
- return sysfs_emit(buf, "%s\n", color_text);
-}
-static DEVICE_ATTR_RO(color);
-
#ifdef CONFIG_LEDS_TRIGGERS
static BIN_ATTR(trigger, 0644, led_trigger_read, led_trigger_write, 0);
static struct bin_attribute *led_trigger_bin_attrs[] = {
@@ -102,7 +89,6 @@ static const struct attribute_group led_trigger_group = {
static struct attribute *led_class_attrs[] = {
&dev_attr_brightness.attr,
&dev_attr_max_brightness.attr,
- &dev_attr_color.attr,
NULL,
};
diff --git a/drivers/md/bcache/btree.c b/drivers/md/bcache/btree.c
index de3019972b35..196cdacce38f 100644
--- a/drivers/md/bcache/btree.c
+++ b/drivers/md/bcache/btree.c
@@ -293,16 +293,16 @@ static void btree_complete_write(struct btree *b, struct btree_write *w)
w->journal = NULL;
}
-static void btree_node_write_unlock(struct closure *cl)
+static CLOSURE_CALLBACK(btree_node_write_unlock)
{
- struct btree *b = container_of(cl, struct btree, io);
+ closure_type(b, struct btree, io);
up(&b->io_mutex);
}
-static void __btree_node_write_done(struct closure *cl)
+static CLOSURE_CALLBACK(__btree_node_write_done)
{
- struct btree *b = container_of(cl, struct btree, io);
+ closure_type(b, struct btree, io);
struct btree_write *w = btree_prev_write(b);
bch_bbio_free(b->bio, b->c);
@@ -315,12 +315,12 @@ static void __btree_node_write_done(struct closure *cl)
closure_return_with_destructor(cl, btree_node_write_unlock);
}
-static void btree_node_write_done(struct closure *cl)
+static CLOSURE_CALLBACK(btree_node_write_done)
{
- struct btree *b = container_of(cl, struct btree, io);
+ closure_type(b, struct btree, io);
bio_free_pages(b->bio);
- __btree_node_write_done(cl);
+ __btree_node_write_done(&cl->work);
}
static void btree_node_write_endio(struct bio *bio)
@@ -1522,7 +1522,7 @@ out_nocoalesce:
bch_keylist_free(&keylist);
for (i = 0; i < nodes; i++)
- if (!IS_ERR(new_nodes[i])) {
+ if (!IS_ERR_OR_NULL(new_nodes[i])) {
btree_node_free(new_nodes[i]);
rw_unlock(true, new_nodes[i]);
}
diff --git a/drivers/md/bcache/journal.c b/drivers/md/bcache/journal.c
index c182c21de2e8..7ff14bd2feb8 100644
--- a/drivers/md/bcache/journal.c
+++ b/drivers/md/bcache/journal.c
@@ -723,11 +723,11 @@ static void journal_write_endio(struct bio *bio)
closure_put(&w->c->journal.io);
}
-static void journal_write(struct closure *cl);
+static CLOSURE_CALLBACK(journal_write);
-static void journal_write_done(struct closure *cl)
+static CLOSURE_CALLBACK(journal_write_done)
{
- struct journal *j = container_of(cl, struct journal, io);
+ closure_type(j, struct journal, io);
struct journal_write *w = (j->cur == j->w)
? &j->w[1]
: &j->w[0];
@@ -736,19 +736,19 @@ static void journal_write_done(struct closure *cl)
continue_at_nobarrier(cl, journal_write, bch_journal_wq);
}
-static void journal_write_unlock(struct closure *cl)
+static CLOSURE_CALLBACK(journal_write_unlock)
__releases(&c->journal.lock)
{
- struct cache_set *c = container_of(cl, struct cache_set, journal.io);
+ closure_type(c, struct cache_set, journal.io);
c->journal.io_in_flight = 0;
spin_unlock(&c->journal.lock);
}
-static void journal_write_unlocked(struct closure *cl)
+static CLOSURE_CALLBACK(journal_write_unlocked)
__releases(c->journal.lock)
{
- struct cache_set *c = container_of(cl, struct cache_set, journal.io);
+ closure_type(c, struct cache_set, journal.io);
struct cache *ca = c->cache;
struct journal_write *w = c->journal.cur;
struct bkey *k = &c->journal.key;
@@ -823,12 +823,12 @@ static void journal_write_unlocked(struct closure *cl)
continue_at(cl, journal_write_done, NULL);
}
-static void journal_write(struct closure *cl)
+static CLOSURE_CALLBACK(journal_write)
{
- struct cache_set *c = container_of(cl, struct cache_set, journal.io);
+ closure_type(c, struct cache_set, journal.io);
spin_lock(&c->journal.lock);
- journal_write_unlocked(cl);
+ journal_write_unlocked(&cl->work);
}
static void journal_try_write(struct cache_set *c)
diff --git a/drivers/md/bcache/movinggc.c b/drivers/md/bcache/movinggc.c
index 9f32901fdad1..ebd500bdf0b2 100644
--- a/drivers/md/bcache/movinggc.c
+++ b/drivers/md/bcache/movinggc.c
@@ -35,16 +35,16 @@ static bool moving_pred(struct keybuf *buf, struct bkey *k)
/* Moving GC - IO loop */
-static void moving_io_destructor(struct closure *cl)
+static CLOSURE_CALLBACK(moving_io_destructor)
{
- struct moving_io *io = container_of(cl, struct moving_io, cl);
+ closure_type(io, struct moving_io, cl);
kfree(io);
}
-static void write_moving_finish(struct closure *cl)
+static CLOSURE_CALLBACK(write_moving_finish)
{
- struct moving_io *io = container_of(cl, struct moving_io, cl);
+ closure_type(io, struct moving_io, cl);
struct bio *bio = &io->bio.bio;
bio_free_pages(bio);
@@ -89,9 +89,9 @@ static void moving_init(struct moving_io *io)
bch_bio_map(bio, NULL);
}
-static void write_moving(struct closure *cl)
+static CLOSURE_CALLBACK(write_moving)
{
- struct moving_io *io = container_of(cl, struct moving_io, cl);
+ closure_type(io, struct moving_io, cl);
struct data_insert_op *op = &io->op;
if (!op->status) {
@@ -113,9 +113,9 @@ static void write_moving(struct closure *cl)
continue_at(cl, write_moving_finish, op->wq);
}
-static void read_moving_submit(struct closure *cl)
+static CLOSURE_CALLBACK(read_moving_submit)
{
- struct moving_io *io = container_of(cl, struct moving_io, cl);
+ closure_type(io, struct moving_io, cl);
struct bio *bio = &io->bio.bio;
bch_submit_bbio(bio, io->op.c, &io->w->key, 0);
diff --git a/drivers/md/bcache/request.c b/drivers/md/bcache/request.c
index a9b1f3896249..83d112bd2b1c 100644
--- a/drivers/md/bcache/request.c
+++ b/drivers/md/bcache/request.c
@@ -25,7 +25,7 @@
struct kmem_cache *bch_search_cache;
-static void bch_data_insert_start(struct closure *cl);
+static CLOSURE_CALLBACK(bch_data_insert_start);
static unsigned int cache_mode(struct cached_dev *dc)
{
@@ -55,9 +55,9 @@ static void bio_csum(struct bio *bio, struct bkey *k)
/* Insert data into cache */
-static void bch_data_insert_keys(struct closure *cl)
+static CLOSURE_CALLBACK(bch_data_insert_keys)
{
- struct data_insert_op *op = container_of(cl, struct data_insert_op, cl);
+ closure_type(op, struct data_insert_op, cl);
atomic_t *journal_ref = NULL;
struct bkey *replace_key = op->replace ? &op->replace_key : NULL;
int ret;
@@ -136,9 +136,9 @@ out:
continue_at(cl, bch_data_insert_keys, op->wq);
}
-static void bch_data_insert_error(struct closure *cl)
+static CLOSURE_CALLBACK(bch_data_insert_error)
{
- struct data_insert_op *op = container_of(cl, struct data_insert_op, cl);
+ closure_type(op, struct data_insert_op, cl);
/*
* Our data write just errored, which means we've got a bunch of keys to
@@ -163,7 +163,7 @@ static void bch_data_insert_error(struct closure *cl)
op->insert_keys.top = dst;
- bch_data_insert_keys(cl);
+ bch_data_insert_keys(&cl->work);
}
static void bch_data_insert_endio(struct bio *bio)
@@ -184,9 +184,9 @@ static void bch_data_insert_endio(struct bio *bio)
bch_bbio_endio(op->c, bio, bio->bi_status, "writing data to cache");
}
-static void bch_data_insert_start(struct closure *cl)
+static CLOSURE_CALLBACK(bch_data_insert_start)
{
- struct data_insert_op *op = container_of(cl, struct data_insert_op, cl);
+ closure_type(op, struct data_insert_op, cl);
struct bio *bio = op->bio, *n;
if (op->bypass)
@@ -305,16 +305,16 @@ err:
* If op->bypass is true, instead of inserting the data it invalidates the
* region of the cache represented by op->bio and op->inode.
*/
-void bch_data_insert(struct closure *cl)
+CLOSURE_CALLBACK(bch_data_insert)
{
- struct data_insert_op *op = container_of(cl, struct data_insert_op, cl);
+ closure_type(op, struct data_insert_op, cl);
trace_bcache_write(op->c, op->inode, op->bio,
op->writeback, op->bypass);
bch_keylist_init(&op->insert_keys);
bio_get(op->bio);
- bch_data_insert_start(cl);
+ bch_data_insert_start(&cl->work);
}
/*
@@ -575,9 +575,9 @@ static int cache_lookup_fn(struct btree_op *op, struct btree *b, struct bkey *k)
return n == bio ? MAP_DONE : MAP_CONTINUE;
}
-static void cache_lookup(struct closure *cl)
+static CLOSURE_CALLBACK(cache_lookup)
{
- struct search *s = container_of(cl, struct search, iop.cl);
+ closure_type(s, struct search, iop.cl);
struct bio *bio = &s->bio.bio;
struct cached_dev *dc;
int ret;
@@ -698,9 +698,9 @@ static void do_bio_hook(struct search *s,
bio_cnt_set(bio, 3);
}
-static void search_free(struct closure *cl)
+static CLOSURE_CALLBACK(search_free)
{
- struct search *s = container_of(cl, struct search, cl);
+ closure_type(s, struct search, cl);
atomic_dec(&s->iop.c->search_inflight);
@@ -749,20 +749,20 @@ static inline struct search *search_alloc(struct bio *bio,
/* Cached devices */
-static void cached_dev_bio_complete(struct closure *cl)
+static CLOSURE_CALLBACK(cached_dev_bio_complete)
{
- struct search *s = container_of(cl, struct search, cl);
+ closure_type(s, struct search, cl);
struct cached_dev *dc = container_of(s->d, struct cached_dev, disk);
cached_dev_put(dc);
- search_free(cl);
+ search_free(&cl->work);
}
/* Process reads */
-static void cached_dev_read_error_done(struct closure *cl)
+static CLOSURE_CALLBACK(cached_dev_read_error_done)
{
- struct search *s = container_of(cl, struct search, cl);
+ closure_type(s, struct search, cl);
if (s->iop.replace_collision)
bch_mark_cache_miss_collision(s->iop.c, s->d);
@@ -770,12 +770,12 @@ static void cached_dev_read_error_done(struct closure *cl)
if (s->iop.bio)
bio_free_pages(s->iop.bio);
- cached_dev_bio_complete(cl);
+ cached_dev_bio_complete(&cl->work);
}
-static void cached_dev_read_error(struct closure *cl)
+static CLOSURE_CALLBACK(cached_dev_read_error)
{
- struct search *s = container_of(cl, struct search, cl);
+ closure_type(s, struct search, cl);
struct bio *bio = &s->bio.bio;
/*
@@ -801,9 +801,9 @@ static void cached_dev_read_error(struct closure *cl)
continue_at(cl, cached_dev_read_error_done, NULL);
}
-static void cached_dev_cache_miss_done(struct closure *cl)
+static CLOSURE_CALLBACK(cached_dev_cache_miss_done)
{
- struct search *s = container_of(cl, struct search, cl);
+ closure_type(s, struct search, cl);
struct bcache_device *d = s->d;
if (s->iop.replace_collision)
@@ -812,13 +812,13 @@ static void cached_dev_cache_miss_done(struct closure *cl)
if (s->iop.bio)
bio_free_pages(s->iop.bio);
- cached_dev_bio_complete(cl);
+ cached_dev_bio_complete(&cl->work);
closure_put(&d->cl);
}
-static void cached_dev_read_done(struct closure *cl)
+static CLOSURE_CALLBACK(cached_dev_read_done)
{
- struct search *s = container_of(cl, struct search, cl);
+ closure_type(s, struct search, cl);
struct cached_dev *dc = container_of(s->d, struct cached_dev, disk);
/*
@@ -858,9 +858,9 @@ static void cached_dev_read_done(struct closure *cl)
continue_at(cl, cached_dev_cache_miss_done, NULL);
}
-static void cached_dev_read_done_bh(struct closure *cl)
+static CLOSURE_CALLBACK(cached_dev_read_done_bh)
{
- struct search *s = container_of(cl, struct search, cl);
+ closure_type(s, struct search, cl);
struct cached_dev *dc = container_of(s->d, struct cached_dev, disk);
bch_mark_cache_accounting(s->iop.c, s->d,
@@ -955,13 +955,13 @@ static void cached_dev_read(struct cached_dev *dc, struct search *s)
/* Process writes */
-static void cached_dev_write_complete(struct closure *cl)
+static CLOSURE_CALLBACK(cached_dev_write_complete)
{
- struct search *s = container_of(cl, struct search, cl);
+ closure_type(s, struct search, cl);
struct cached_dev *dc = container_of(s->d, struct cached_dev, disk);
up_read_non_owner(&dc->writeback_lock);
- cached_dev_bio_complete(cl);
+ cached_dev_bio_complete(&cl->work);
}
static void cached_dev_write(struct cached_dev *dc, struct search *s)
@@ -1048,9 +1048,9 @@ insert_data:
continue_at(cl, cached_dev_write_complete, NULL);
}
-static void cached_dev_nodata(struct closure *cl)
+static CLOSURE_CALLBACK(cached_dev_nodata)
{
- struct search *s = container_of(cl, struct search, cl);
+ closure_type(s, struct search, cl);
struct bio *bio = &s->bio.bio;
if (s->iop.flush_journal)
@@ -1265,9 +1265,9 @@ static int flash_dev_cache_miss(struct btree *b, struct search *s,
return MAP_CONTINUE;
}
-static void flash_dev_nodata(struct closure *cl)
+static CLOSURE_CALLBACK(flash_dev_nodata)
{
- struct search *s = container_of(cl, struct search, cl);
+ closure_type(s, struct search, cl);
if (s->iop.flush_journal)
bch_journal_meta(s->iop.c, cl);
diff --git a/drivers/md/bcache/request.h b/drivers/md/bcache/request.h
index 38ab4856eaab..46bbef00aebb 100644
--- a/drivers/md/bcache/request.h
+++ b/drivers/md/bcache/request.h
@@ -34,7 +34,7 @@ struct data_insert_op {
};
unsigned int bch_get_congested(const struct cache_set *c);
-void bch_data_insert(struct closure *cl);
+CLOSURE_CALLBACK(bch_data_insert);
void bch_cached_dev_request_init(struct cached_dev *dc);
void cached_dev_submit_bio(struct bio *bio);
diff --git a/drivers/md/bcache/super.c b/drivers/md/bcache/super.c
index bfe1685dbae5..1402096b8076 100644
--- a/drivers/md/bcache/super.c
+++ b/drivers/md/bcache/super.c
@@ -327,9 +327,9 @@ static void __write_super(struct cache_sb *sb, struct cache_sb_disk *out,
submit_bio(bio);
}
-static void bch_write_bdev_super_unlock(struct closure *cl)
+static CLOSURE_CALLBACK(bch_write_bdev_super_unlock)
{
- struct cached_dev *dc = container_of(cl, struct cached_dev, sb_write);
+ closure_type(dc, struct cached_dev, sb_write);
up(&dc->sb_write_mutex);
}
@@ -363,9 +363,9 @@ static void write_super_endio(struct bio *bio)
closure_put(&ca->set->sb_write);
}
-static void bcache_write_super_unlock(struct closure *cl)
+static CLOSURE_CALLBACK(bcache_write_super_unlock)
{
- struct cache_set *c = container_of(cl, struct cache_set, sb_write);
+ closure_type(c, struct cache_set, sb_write);
up(&c->sb_write_mutex);
}
@@ -407,9 +407,9 @@ static void uuid_endio(struct bio *bio)
closure_put(cl);
}
-static void uuid_io_unlock(struct closure *cl)
+static CLOSURE_CALLBACK(uuid_io_unlock)
{
- struct cache_set *c = container_of(cl, struct cache_set, uuid_write);
+ closure_type(c, struct cache_set, uuid_write);
up(&c->uuid_write_mutex);
}
@@ -1344,9 +1344,9 @@ void bch_cached_dev_release(struct kobject *kobj)
module_put(THIS_MODULE);
}
-static void cached_dev_free(struct closure *cl)
+static CLOSURE_CALLBACK(cached_dev_free)
{
- struct cached_dev *dc = container_of(cl, struct cached_dev, disk.cl);
+ closure_type(dc, struct cached_dev, disk.cl);
if (test_and_clear_bit(BCACHE_DEV_WB_RUNNING, &dc->disk.flags))
cancel_writeback_rate_update_dwork(dc);
@@ -1378,9 +1378,9 @@ static void cached_dev_free(struct closure *cl)
kobject_put(&dc->disk.kobj);
}
-static void cached_dev_flush(struct closure *cl)
+static CLOSURE_CALLBACK(cached_dev_flush)
{
- struct cached_dev *dc = container_of(cl, struct cached_dev, disk.cl);
+ closure_type(dc, struct cached_dev, disk.cl);
struct bcache_device *d = &dc->disk;
mutex_lock(&bch_register_lock);
@@ -1499,9 +1499,9 @@ void bch_flash_dev_release(struct kobject *kobj)
kfree(d);
}
-static void flash_dev_free(struct closure *cl)
+static CLOSURE_CALLBACK(flash_dev_free)
{
- struct bcache_device *d = container_of(cl, struct bcache_device, cl);
+ closure_type(d, struct bcache_device, cl);
mutex_lock(&bch_register_lock);
atomic_long_sub(bcache_dev_sectors_dirty(d),
@@ -1512,9 +1512,9 @@ static void flash_dev_free(struct closure *cl)
kobject_put(&d->kobj);
}
-static void flash_dev_flush(struct closure *cl)
+static CLOSURE_CALLBACK(flash_dev_flush)
{
- struct bcache_device *d = container_of(cl, struct bcache_device, cl);
+ closure_type(d, struct bcache_device, cl);
mutex_lock(&bch_register_lock);
bcache_device_unlink(d);
@@ -1670,9 +1670,9 @@ void bch_cache_set_release(struct kobject *kobj)
module_put(THIS_MODULE);
}
-static void cache_set_free(struct closure *cl)
+static CLOSURE_CALLBACK(cache_set_free)
{
- struct cache_set *c = container_of(cl, struct cache_set, cl);
+ closure_type(c, struct cache_set, cl);
struct cache *ca;
debugfs_remove(c->debug);
@@ -1711,9 +1711,9 @@ static void cache_set_free(struct closure *cl)
kobject_put(&c->kobj);
}
-static void cache_set_flush(struct closure *cl)
+static CLOSURE_CALLBACK(cache_set_flush)
{
- struct cache_set *c = container_of(cl, struct cache_set, caching);
+ closure_type(c, struct cache_set, caching);
struct cache *ca = c->cache;
struct btree *b;
@@ -1808,9 +1808,9 @@ static void conditional_stop_bcache_device(struct cache_set *c,
}
}
-static void __cache_set_unregister(struct closure *cl)
+static CLOSURE_CALLBACK(__cache_set_unregister)
{
- struct cache_set *c = container_of(cl, struct cache_set, caching);
+ closure_type(c, struct cache_set, caching);
struct cached_dev *dc;
struct bcache_device *d;
size_t i;
diff --git a/drivers/md/bcache/writeback.c b/drivers/md/bcache/writeback.c
index 3accfdaee6b1..8827a6f130ad 100644
--- a/drivers/md/bcache/writeback.c
+++ b/drivers/md/bcache/writeback.c
@@ -341,16 +341,16 @@ static void dirty_init(struct keybuf_key *w)
bch_bio_map(bio, NULL);
}
-static void dirty_io_destructor(struct closure *cl)
+static CLOSURE_CALLBACK(dirty_io_destructor)
{
- struct dirty_io *io = container_of(cl, struct dirty_io, cl);
+ closure_type(io, struct dirty_io, cl);
kfree(io);
}
-static void write_dirty_finish(struct closure *cl)
+static CLOSURE_CALLBACK(write_dirty_finish)
{
- struct dirty_io *io = container_of(cl, struct dirty_io, cl);
+ closure_type(io, struct dirty_io, cl);
struct keybuf_key *w = io->bio.bi_private;
struct cached_dev *dc = io->dc;
@@ -400,9 +400,9 @@ static void dirty_endio(struct bio *bio)
closure_put(&io->cl);
}
-static void write_dirty(struct closure *cl)
+static CLOSURE_CALLBACK(write_dirty)
{
- struct dirty_io *io = container_of(cl, struct dirty_io, cl);
+ closure_type(io, struct dirty_io, cl);
struct keybuf_key *w = io->bio.bi_private;
struct cached_dev *dc = io->dc;
@@ -462,9 +462,9 @@ static void read_dirty_endio(struct bio *bio)
dirty_endio(bio);
}
-static void read_dirty_submit(struct closure *cl)
+static CLOSURE_CALLBACK(read_dirty_submit)
{
- struct dirty_io *io = container_of(cl, struct dirty_io, cl);
+ closure_type(io, struct dirty_io, cl);
closure_bio_submit(io->dc->disk.c, &io->bio, cl);
diff --git a/drivers/md/dm-flakey.c b/drivers/md/dm-flakey.c
index 120153e44ae0..f57fb821528d 100644
--- a/drivers/md/dm-flakey.c
+++ b/drivers/md/dm-flakey.c
@@ -434,7 +434,7 @@ static struct bio *clone_bio(struct dm_target *ti, struct flakey_c *fc, struct b
remaining_size = size;
- order = MAX_ORDER - 1;
+ order = MAX_ORDER;
while (remaining_size) {
struct page *pages;
unsigned size_to_add, to_copy;
diff --git a/drivers/md/dm-verity-fec.c b/drivers/md/dm-verity-fec.c
index 2099c755119e..b475200d8586 100644
--- a/drivers/md/dm-verity-fec.c
+++ b/drivers/md/dm-verity-fec.c
@@ -24,7 +24,8 @@ bool verity_fec_is_enabled(struct dm_verity *v)
*/
static inline struct dm_verity_fec_io *fec_io(struct dm_verity_io *io)
{
- return (struct dm_verity_fec_io *) verity_io_digest_end(io->v, io);
+ return (struct dm_verity_fec_io *)
+ ((char *)io + io->v->ti->per_io_data_size - sizeof(struct dm_verity_fec_io));
}
/*
diff --git a/drivers/md/dm-verity-target.c b/drivers/md/dm-verity-target.c
index e115fcfe723c..14e58ae70521 100644
--- a/drivers/md/dm-verity-target.c
+++ b/drivers/md/dm-verity-target.c
@@ -642,7 +642,6 @@ static void verity_work(struct work_struct *w)
io->in_tasklet = false;
- verity_fec_init_io(io);
verity_finish_io(io, errno_to_blk_status(verity_verify_io(io)));
}
@@ -668,7 +667,9 @@ static void verity_end_io(struct bio *bio)
struct dm_verity_io *io = bio->bi_private;
if (bio->bi_status &&
- (!verity_fec_is_enabled(io->v) || verity_is_system_shutting_down())) {
+ (!verity_fec_is_enabled(io->v) ||
+ verity_is_system_shutting_down() ||
+ (bio->bi_opf & REQ_RAHEAD))) {
verity_finish_io(io, bio->bi_status);
return;
}
@@ -792,6 +793,8 @@ static int verity_map(struct dm_target *ti, struct bio *bio)
bio->bi_private = io;
io->iter = bio->bi_iter;
+ verity_fec_init_io(io);
+
verity_submit_prefetch(v, io);
submit_bio_noacct(bio);
diff --git a/drivers/md/dm-verity.h b/drivers/md/dm-verity.h
index f96f4e281ee4..f9d522c870e6 100644
--- a/drivers/md/dm-verity.h
+++ b/drivers/md/dm-verity.h
@@ -115,12 +115,6 @@ static inline u8 *verity_io_want_digest(struct dm_verity *v,
return (u8 *)(io + 1) + v->ahash_reqsize + v->digest_size;
}
-static inline u8 *verity_io_digest_end(struct dm_verity *v,
- struct dm_verity_io *io)
-{
- return verity_io_want_digest(v, io) + v->digest_size;
-}
-
extern int verity_for_bv_block(struct dm_verity *v, struct dm_verity_io *io,
struct bvec_iter *iter,
int (*process)(struct dm_verity *v,
diff --git a/drivers/media/pci/mgb4/Kconfig b/drivers/media/pci/mgb4/Kconfig
index 13fad15a434c..f2a05a1c8ffa 100644
--- a/drivers/media/pci/mgb4/Kconfig
+++ b/drivers/media/pci/mgb4/Kconfig
@@ -2,6 +2,7 @@
config VIDEO_MGB4
tristate "Digiteq Automotive MGB4 support"
depends on VIDEO_DEV && PCI && I2C && DMADEVICES && SPI && MTD && IIO
+ depends on COMMON_CLK
select VIDEOBUF2_DMA_SG
select IIO_BUFFER
select IIO_TRIGGERED_BUFFER
diff --git a/drivers/media/pci/mgb4/mgb4_core.c b/drivers/media/pci/mgb4/mgb4_core.c
index 3efb33fbf40c..5bfb8a06202e 100644
--- a/drivers/media/pci/mgb4/mgb4_core.c
+++ b/drivers/media/pci/mgb4/mgb4_core.c
@@ -42,6 +42,10 @@
#define MGB4_USER_IRQS 16
+#define DIGITEQ_VID 0x1ed8
+#define T100_DID 0x0101
+#define T200_DID 0x0201
+
ATTRIBUTE_GROUPS(mgb4_pci);
static int flashid;
@@ -151,7 +155,7 @@ static struct spi_master *get_spi_adap(struct platform_device *pdev)
return dev ? container_of(dev, struct spi_master, dev) : NULL;
}
-static int init_spi(struct mgb4_dev *mgbdev)
+static int init_spi(struct mgb4_dev *mgbdev, u32 devid)
{
struct resource spi_resources[] = {
{
@@ -213,8 +217,13 @@ static int init_spi(struct mgb4_dev *mgbdev)
snprintf(mgbdev->fw_part_name, sizeof(mgbdev->fw_part_name),
"mgb4-fw.%d", flashid);
mgbdev->partitions[0].name = mgbdev->fw_part_name;
- mgbdev->partitions[0].size = 0x400000;
- mgbdev->partitions[0].offset = 0x400000;
+ if (devid == T200_DID) {
+ mgbdev->partitions[0].size = 0x950000;
+ mgbdev->partitions[0].offset = 0x1000000;
+ } else {
+ mgbdev->partitions[0].size = 0x400000;
+ mgbdev->partitions[0].offset = 0x400000;
+ }
mgbdev->partitions[0].mask_flags = 0;
snprintf(mgbdev->data_part_name, sizeof(mgbdev->data_part_name),
@@ -551,7 +560,7 @@ static int mgb4_probe(struct pci_dev *pdev, const struct pci_device_id *id)
goto err_video_regs;
/* SPI FLASH */
- rv = init_spi(mgbdev);
+ rv = init_spi(mgbdev, id->device);
if (rv < 0)
goto err_cmt_regs;
@@ -666,7 +675,8 @@ static void mgb4_remove(struct pci_dev *pdev)
}
static const struct pci_device_id mgb4_pci_ids[] = {
- { PCI_DEVICE(0x1ed8, 0x0101), },
+ { PCI_DEVICE(DIGITEQ_VID, T100_DID), },
+ { PCI_DEVICE(DIGITEQ_VID, T200_DID), },
{ 0, }
};
MODULE_DEVICE_TABLE(pci, mgb4_pci_ids);
diff --git a/drivers/media/platform/renesas/vsp1/vsp1_pipe.c b/drivers/media/platform/renesas/vsp1/vsp1_pipe.c
index f8093ba9539e..68d05243c3ee 100644
--- a/drivers/media/platform/renesas/vsp1/vsp1_pipe.c
+++ b/drivers/media/platform/renesas/vsp1/vsp1_pipe.c
@@ -373,7 +373,7 @@ int vsp1_pipeline_stop(struct vsp1_pipeline *pipe)
(7 << VI6_DPR_SMPPT_TGW_SHIFT) |
(VI6_DPR_NODE_UNUSED << VI6_DPR_SMPPT_PT_SHIFT));
- v4l2_subdev_call(&pipe->output->entity.subdev, video, s_stream, 0);
+ vsp1_wpf_stop(pipe->output);
return ret;
}
diff --git a/drivers/media/platform/renesas/vsp1/vsp1_rpf.c b/drivers/media/platform/renesas/vsp1/vsp1_rpf.c
index 3b17f5fa4067..ea12c3f12c92 100644
--- a/drivers/media/platform/renesas/vsp1/vsp1_rpf.c
+++ b/drivers/media/platform/renesas/vsp1/vsp1_rpf.c
@@ -44,14 +44,6 @@ static inline void vsp1_rpf_write(struct vsp1_rwpf *rpf,
}
/* -----------------------------------------------------------------------------
- * V4L2 Subdevice Operations
- */
-
-static const struct v4l2_subdev_ops rpf_ops = {
- .pad = &vsp1_rwpf_pad_ops,
-};
-
-/* -----------------------------------------------------------------------------
* VSP1 Entity Operations
*/
@@ -411,7 +403,7 @@ struct vsp1_rwpf *vsp1_rpf_create(struct vsp1_device *vsp1, unsigned int index)
rpf->entity.index = index;
sprintf(name, "rpf.%u", index);
- ret = vsp1_entity_init(vsp1, &rpf->entity, name, 2, &rpf_ops,
+ ret = vsp1_entity_init(vsp1, &rpf->entity, name, 2, &vsp1_rwpf_subdev_ops,
MEDIA_ENT_F_PROC_VIDEO_PIXEL_FORMATTER);
if (ret < 0)
return ERR_PTR(ret);
diff --git a/drivers/media/platform/renesas/vsp1/vsp1_rwpf.c b/drivers/media/platform/renesas/vsp1/vsp1_rwpf.c
index 22a82d218152..e0f87c8103ca 100644
--- a/drivers/media/platform/renesas/vsp1/vsp1_rwpf.c
+++ b/drivers/media/platform/renesas/vsp1/vsp1_rwpf.c
@@ -24,7 +24,7 @@ struct v4l2_rect *vsp1_rwpf_get_crop(struct vsp1_rwpf *rwpf,
}
/* -----------------------------------------------------------------------------
- * V4L2 Subdevice Pad Operations
+ * V4L2 Subdevice Operations
*/
static int vsp1_rwpf_enum_mbus_code(struct v4l2_subdev *subdev,
@@ -243,7 +243,7 @@ done:
return ret;
}
-const struct v4l2_subdev_pad_ops vsp1_rwpf_pad_ops = {
+static const struct v4l2_subdev_pad_ops vsp1_rwpf_pad_ops = {
.init_cfg = vsp1_entity_init_cfg,
.enum_mbus_code = vsp1_rwpf_enum_mbus_code,
.enum_frame_size = vsp1_rwpf_enum_frame_size,
@@ -253,6 +253,10 @@ const struct v4l2_subdev_pad_ops vsp1_rwpf_pad_ops = {
.set_selection = vsp1_rwpf_set_selection,
};
+const struct v4l2_subdev_ops vsp1_rwpf_subdev_ops = {
+ .pad = &vsp1_rwpf_pad_ops,
+};
+
/* -----------------------------------------------------------------------------
* Controls
*/
diff --git a/drivers/media/platform/renesas/vsp1/vsp1_rwpf.h b/drivers/media/platform/renesas/vsp1/vsp1_rwpf.h
index eac5c04c2239..e0d212c70b2f 100644
--- a/drivers/media/platform/renesas/vsp1/vsp1_rwpf.h
+++ b/drivers/media/platform/renesas/vsp1/vsp1_rwpf.h
@@ -79,9 +79,11 @@ static inline struct vsp1_rwpf *entity_to_rwpf(struct vsp1_entity *entity)
struct vsp1_rwpf *vsp1_rpf_create(struct vsp1_device *vsp1, unsigned int index);
struct vsp1_rwpf *vsp1_wpf_create(struct vsp1_device *vsp1, unsigned int index);
+void vsp1_wpf_stop(struct vsp1_rwpf *wpf);
+
int vsp1_rwpf_init_ctrls(struct vsp1_rwpf *rwpf, unsigned int ncontrols);
-extern const struct v4l2_subdev_pad_ops vsp1_rwpf_pad_ops;
+extern const struct v4l2_subdev_ops vsp1_rwpf_subdev_ops;
struct v4l2_rect *vsp1_rwpf_get_crop(struct vsp1_rwpf *rwpf,
struct v4l2_subdev_state *sd_state);
diff --git a/drivers/media/platform/renesas/vsp1/vsp1_wpf.c b/drivers/media/platform/renesas/vsp1/vsp1_wpf.c
index d0074ca00920..cab4445eca69 100644
--- a/drivers/media/platform/renesas/vsp1/vsp1_wpf.c
+++ b/drivers/media/platform/renesas/vsp1/vsp1_wpf.c
@@ -186,17 +186,13 @@ static int wpf_init_controls(struct vsp1_rwpf *wpf)
}
/* -----------------------------------------------------------------------------
- * V4L2 Subdevice Core Operations
+ * VSP1 Entity Operations
*/
-static int wpf_s_stream(struct v4l2_subdev *subdev, int enable)
+void vsp1_wpf_stop(struct vsp1_rwpf *wpf)
{
- struct vsp1_rwpf *wpf = to_rwpf(subdev);
struct vsp1_device *vsp1 = wpf->entity.vsp1;
- if (enable)
- return 0;
-
/*
* Write to registers directly when stopping the stream as there will be
* no pipeline run to apply the display list.
@@ -204,27 +200,8 @@ static int wpf_s_stream(struct v4l2_subdev *subdev, int enable)
vsp1_write(vsp1, VI6_WPF_IRQ_ENB(wpf->entity.index), 0);
vsp1_write(vsp1, wpf->entity.index * VI6_WPF_OFFSET +
VI6_WPF_SRCRPF, 0);
-
- return 0;
}
-/* -----------------------------------------------------------------------------
- * V4L2 Subdevice Operations
- */
-
-static const struct v4l2_subdev_video_ops wpf_video_ops = {
- .s_stream = wpf_s_stream,
-};
-
-static const struct v4l2_subdev_ops wpf_ops = {
- .video = &wpf_video_ops,
- .pad = &vsp1_rwpf_pad_ops,
-};
-
-/* -----------------------------------------------------------------------------
- * VSP1 Entity Operations
- */
-
static void vsp1_wpf_destroy(struct vsp1_entity *entity)
{
struct vsp1_rwpf *wpf = entity_to_rwpf(entity);
@@ -583,7 +560,7 @@ struct vsp1_rwpf *vsp1_wpf_create(struct vsp1_device *vsp1, unsigned int index)
wpf->entity.index = index;
sprintf(name, "wpf.%u", index);
- ret = vsp1_entity_init(vsp1, &wpf->entity, name, 2, &wpf_ops,
+ ret = vsp1_entity_init(vsp1, &wpf->entity, name, 2, &vsp1_rwpf_subdev_ops,
MEDIA_ENT_F_PROC_VIDEO_PIXEL_FORMATTER);
if (ret < 0)
return ERR_PTR(ret);
diff --git a/drivers/mmc/core/block.c b/drivers/mmc/core/block.c
index 152dfe593c43..f9a5cffa64b1 100644
--- a/drivers/mmc/core/block.c
+++ b/drivers/mmc/core/block.c
@@ -1482,6 +1482,8 @@ static void mmc_blk_cqe_complete_rq(struct mmc_queue *mq, struct request *req)
blk_mq_requeue_request(req, true);
else
__blk_mq_end_request(req, BLK_STS_OK);
+ } else if (mq->in_recovery) {
+ blk_mq_requeue_request(req, true);
} else {
blk_mq_end_request(req, BLK_STS_OK);
}
diff --git a/drivers/mmc/core/core.c b/drivers/mmc/core/core.c
index 3d3e0ca52614..a8c17b4cd737 100644
--- a/drivers/mmc/core/core.c
+++ b/drivers/mmc/core/core.c
@@ -551,7 +551,9 @@ int mmc_cqe_recovery(struct mmc_host *host)
cmd.flags = MMC_RSP_R1B | MMC_CMD_AC;
cmd.flags &= ~MMC_RSP_CRC; /* Ignore CRC */
cmd.busy_timeout = MMC_CQE_RECOVERY_TIMEOUT;
- mmc_wait_for_cmd(host, &cmd, 0);
+ mmc_wait_for_cmd(host, &cmd, MMC_CMD_RETRIES);
+
+ mmc_poll_for_busy(host->card, MMC_CQE_RECOVERY_TIMEOUT, true, MMC_BUSY_IO);
memset(&cmd, 0, sizeof(cmd));
cmd.opcode = MMC_CMDQ_TASK_MGMT;
@@ -559,10 +561,13 @@ int mmc_cqe_recovery(struct mmc_host *host)
cmd.flags = MMC_RSP_R1B | MMC_CMD_AC;
cmd.flags &= ~MMC_RSP_CRC; /* Ignore CRC */
cmd.busy_timeout = MMC_CQE_RECOVERY_TIMEOUT;
- err = mmc_wait_for_cmd(host, &cmd, 0);
+ err = mmc_wait_for_cmd(host, &cmd, MMC_CMD_RETRIES);
host->cqe_ops->cqe_recovery_finish(host);
+ if (err)
+ err = mmc_wait_for_cmd(host, &cmd, MMC_CMD_RETRIES);
+
mmc_retune_release(host);
return err;
diff --git a/drivers/mmc/host/cqhci-core.c b/drivers/mmc/host/cqhci-core.c
index b3d7d6d8d654..41e94cd14109 100644
--- a/drivers/mmc/host/cqhci-core.c
+++ b/drivers/mmc/host/cqhci-core.c
@@ -942,8 +942,8 @@ static bool cqhci_clear_all_tasks(struct mmc_host *mmc, unsigned int timeout)
ret = cqhci_tasks_cleared(cq_host);
if (!ret)
- pr_debug("%s: cqhci: Failed to clear tasks\n",
- mmc_hostname(mmc));
+ pr_warn("%s: cqhci: Failed to clear tasks\n",
+ mmc_hostname(mmc));
return ret;
}
@@ -976,7 +976,7 @@ static bool cqhci_halt(struct mmc_host *mmc, unsigned int timeout)
ret = cqhci_halted(cq_host);
if (!ret)
- pr_debug("%s: cqhci: Failed to halt\n", mmc_hostname(mmc));
+ pr_warn("%s: cqhci: Failed to halt\n", mmc_hostname(mmc));
return ret;
}
@@ -984,10 +984,10 @@ static bool cqhci_halt(struct mmc_host *mmc, unsigned int timeout)
/*
* After halting we expect to be able to use the command line. We interpret the
* failure to halt to mean the data lines might still be in use (and the upper
- * layers will need to send a STOP command), so we set the timeout based on a
- * generous command timeout.
+ * layers will need to send a STOP command), however failing to halt complicates
+ * the recovery, so set a timeout that would reasonably allow I/O to complete.
*/
-#define CQHCI_START_HALT_TIMEOUT 5
+#define CQHCI_START_HALT_TIMEOUT 500
static void cqhci_recovery_start(struct mmc_host *mmc)
{
@@ -1075,28 +1075,28 @@ static void cqhci_recovery_finish(struct mmc_host *mmc)
ok = cqhci_halt(mmc, CQHCI_FINISH_HALT_TIMEOUT);
- if (!cqhci_clear_all_tasks(mmc, CQHCI_CLEAR_TIMEOUT))
- ok = false;
-
/*
* The specification contradicts itself, by saying that tasks cannot be
* cleared if CQHCI does not halt, but if CQHCI does not halt, it should
* be disabled/re-enabled, but not to disable before clearing tasks.
* Have a go anyway.
*/
- if (!ok) {
- pr_debug("%s: cqhci: disable / re-enable\n", mmc_hostname(mmc));
- cqcfg = cqhci_readl(cq_host, CQHCI_CFG);
- cqcfg &= ~CQHCI_ENABLE;
- cqhci_writel(cq_host, cqcfg, CQHCI_CFG);
- cqcfg |= CQHCI_ENABLE;
- cqhci_writel(cq_host, cqcfg, CQHCI_CFG);
- /* Be sure that there are no tasks */
- ok = cqhci_halt(mmc, CQHCI_FINISH_HALT_TIMEOUT);
- if (!cqhci_clear_all_tasks(mmc, CQHCI_CLEAR_TIMEOUT))
- ok = false;
- WARN_ON(!ok);
- }
+ if (!cqhci_clear_all_tasks(mmc, CQHCI_CLEAR_TIMEOUT))
+ ok = false;
+
+ /* Disable to make sure tasks really are cleared */
+ cqcfg = cqhci_readl(cq_host, CQHCI_CFG);
+ cqcfg &= ~CQHCI_ENABLE;
+ cqhci_writel(cq_host, cqcfg, CQHCI_CFG);
+
+ cqcfg = cqhci_readl(cq_host, CQHCI_CFG);
+ cqcfg |= CQHCI_ENABLE;
+ cqhci_writel(cq_host, cqcfg, CQHCI_CFG);
+
+ cqhci_halt(mmc, CQHCI_FINISH_HALT_TIMEOUT);
+
+ if (!ok)
+ cqhci_clear_all_tasks(mmc, CQHCI_CLEAR_TIMEOUT);
cqhci_recover_mrqs(cq_host);
diff --git a/drivers/mmc/host/sdhci-pci-gli.c b/drivers/mmc/host/sdhci-pci-gli.c
index d8a991b349a8..77911a57b12c 100644
--- a/drivers/mmc/host/sdhci-pci-gli.c
+++ b/drivers/mmc/host/sdhci-pci-gli.c
@@ -1189,6 +1189,32 @@ static void gl9763e_hs400_enhanced_strobe(struct mmc_host *mmc,
sdhci_writel(host, val, SDHCI_GLI_9763E_HS400_ES_REG);
}
+static void gl9763e_set_low_power_negotiation(struct sdhci_pci_slot *slot,
+ bool enable)
+{
+ struct pci_dev *pdev = slot->chip->pdev;
+ u32 value;
+
+ pci_read_config_dword(pdev, PCIE_GLI_9763E_VHS, &value);
+ value &= ~GLI_9763E_VHS_REV;
+ value |= FIELD_PREP(GLI_9763E_VHS_REV, GLI_9763E_VHS_REV_W);
+ pci_write_config_dword(pdev, PCIE_GLI_9763E_VHS, value);
+
+ pci_read_config_dword(pdev, PCIE_GLI_9763E_CFG, &value);
+
+ if (enable)
+ value &= ~GLI_9763E_CFG_LPSN_DIS;
+ else
+ value |= GLI_9763E_CFG_LPSN_DIS;
+
+ pci_write_config_dword(pdev, PCIE_GLI_9763E_CFG, value);
+
+ pci_read_config_dword(pdev, PCIE_GLI_9763E_VHS, &value);
+ value &= ~GLI_9763E_VHS_REV;
+ value |= FIELD_PREP(GLI_9763E_VHS_REV, GLI_9763E_VHS_REV_R);
+ pci_write_config_dword(pdev, PCIE_GLI_9763E_VHS, value);
+}
+
static void sdhci_set_gl9763e_signaling(struct sdhci_host *host,
unsigned int timing)
{
@@ -1297,6 +1323,9 @@ static int gl9763e_add_host(struct sdhci_pci_slot *slot)
if (ret)
goto cleanup;
+ /* Disable LPM negotiation to avoid entering L1 state. */
+ gl9763e_set_low_power_negotiation(slot, false);
+
return 0;
cleanup:
@@ -1340,31 +1369,6 @@ static void gli_set_gl9763e(struct sdhci_pci_slot *slot)
}
#ifdef CONFIG_PM
-static void gl9763e_set_low_power_negotiation(struct sdhci_pci_slot *slot, bool enable)
-{
- struct pci_dev *pdev = slot->chip->pdev;
- u32 value;
-
- pci_read_config_dword(pdev, PCIE_GLI_9763E_VHS, &value);
- value &= ~GLI_9763E_VHS_REV;
- value |= FIELD_PREP(GLI_9763E_VHS_REV, GLI_9763E_VHS_REV_W);
- pci_write_config_dword(pdev, PCIE_GLI_9763E_VHS, value);
-
- pci_read_config_dword(pdev, PCIE_GLI_9763E_CFG, &value);
-
- if (enable)
- value &= ~GLI_9763E_CFG_LPSN_DIS;
- else
- value |= GLI_9763E_CFG_LPSN_DIS;
-
- pci_write_config_dword(pdev, PCIE_GLI_9763E_CFG, value);
-
- pci_read_config_dword(pdev, PCIE_GLI_9763E_VHS, &value);
- value &= ~GLI_9763E_VHS_REV;
- value |= FIELD_PREP(GLI_9763E_VHS_REV, GLI_9763E_VHS_REV_R);
- pci_write_config_dword(pdev, PCIE_GLI_9763E_VHS, value);
-}
-
static int gl9763e_runtime_suspend(struct sdhci_pci_chip *chip)
{
struct sdhci_pci_slot *slot = chip->slots[0];
diff --git a/drivers/mmc/host/sdhci-sprd.c b/drivers/mmc/host/sdhci-sprd.c
index 6b84ba27e6ab..6b8a57e2d20f 100644
--- a/drivers/mmc/host/sdhci-sprd.c
+++ b/drivers/mmc/host/sdhci-sprd.c
@@ -416,12 +416,33 @@ static void sdhci_sprd_request_done(struct sdhci_host *host,
mmc_request_done(host->mmc, mrq);
}
+static void sdhci_sprd_set_power(struct sdhci_host *host, unsigned char mode,
+ unsigned short vdd)
+{
+ struct mmc_host *mmc = host->mmc;
+
+ switch (mode) {
+ case MMC_POWER_OFF:
+ mmc_regulator_set_ocr(host->mmc, mmc->supply.vmmc, 0);
+
+ mmc_regulator_disable_vqmmc(mmc);
+ break;
+ case MMC_POWER_ON:
+ mmc_regulator_enable_vqmmc(mmc);
+ break;
+ case MMC_POWER_UP:
+ mmc_regulator_set_ocr(host->mmc, mmc->supply.vmmc, vdd);
+ break;
+ }
+}
+
static struct sdhci_ops sdhci_sprd_ops = {
.read_l = sdhci_sprd_readl,
.write_l = sdhci_sprd_writel,
.write_w = sdhci_sprd_writew,
.write_b = sdhci_sprd_writeb,
.set_clock = sdhci_sprd_set_clock,
+ .set_power = sdhci_sprd_set_power,
.get_max_clock = sdhci_sprd_get_max_clock,
.get_min_clock = sdhci_sprd_get_min_clock,
.set_bus_width = sdhci_set_bus_width,
@@ -823,6 +844,10 @@ static int sdhci_sprd_probe(struct platform_device *pdev)
host->caps1 &= ~(SDHCI_SUPPORT_SDR50 | SDHCI_SUPPORT_SDR104 |
SDHCI_SUPPORT_DDR50);
+ ret = mmc_regulator_get_supply(host->mmc);
+ if (ret)
+ goto pm_runtime_disable;
+
ret = sdhci_setup_host(host);
if (ret)
goto pm_runtime_disable;
diff --git a/drivers/net/dsa/mv88e6xxx/chip.c b/drivers/net/dsa/mv88e6xxx/chip.c
index 42b1acaca33a..07a22c74fe81 100644
--- a/drivers/net/dsa/mv88e6xxx/chip.c
+++ b/drivers/net/dsa/mv88e6xxx/chip.c
@@ -577,6 +577,18 @@ static void mv88e6250_phylink_get_caps(struct mv88e6xxx_chip *chip, int port,
config->mac_capabilities = MAC_SYM_PAUSE | MAC_10 | MAC_100;
}
+static void mv88e6351_phylink_get_caps(struct mv88e6xxx_chip *chip, int port,
+ struct phylink_config *config)
+{
+ unsigned long *supported = config->supported_interfaces;
+
+ /* Translate the default cmode */
+ mv88e6xxx_translate_cmode(chip->ports[port].cmode, supported);
+
+ config->mac_capabilities = MAC_SYM_PAUSE | MAC_10 | MAC_100 |
+ MAC_1000FD;
+}
+
static int mv88e6352_get_port4_serdes_cmode(struct mv88e6xxx_chip *chip)
{
u16 reg, val;
@@ -3880,7 +3892,8 @@ static int mv88e6xxx_port_setup(struct dsa_switch *ds, int port)
struct mv88e6xxx_chip *chip = ds->priv;
int err;
- if (chip->info->ops->pcs_ops->pcs_init) {
+ if (chip->info->ops->pcs_ops &&
+ chip->info->ops->pcs_ops->pcs_init) {
err = chip->info->ops->pcs_ops->pcs_init(chip, port);
if (err)
return err;
@@ -3895,7 +3908,8 @@ static void mv88e6xxx_port_teardown(struct dsa_switch *ds, int port)
mv88e6xxx_teardown_devlink_regions_port(ds, port);
- if (chip->info->ops->pcs_ops->pcs_teardown)
+ if (chip->info->ops->pcs_ops &&
+ chip->info->ops->pcs_ops->pcs_teardown)
chip->info->ops->pcs_ops->pcs_teardown(chip, port);
}
@@ -4340,7 +4354,7 @@ static const struct mv88e6xxx_ops mv88e6171_ops = {
.vtu_loadpurge = mv88e6352_g1_vtu_loadpurge,
.stu_getnext = mv88e6352_g1_stu_getnext,
.stu_loadpurge = mv88e6352_g1_stu_loadpurge,
- .phylink_get_caps = mv88e6185_phylink_get_caps,
+ .phylink_get_caps = mv88e6351_phylink_get_caps,
};
static const struct mv88e6xxx_ops mv88e6172_ops = {
@@ -4440,7 +4454,7 @@ static const struct mv88e6xxx_ops mv88e6175_ops = {
.vtu_loadpurge = mv88e6352_g1_vtu_loadpurge,
.stu_getnext = mv88e6352_g1_stu_getnext,
.stu_loadpurge = mv88e6352_g1_stu_loadpurge,
- .phylink_get_caps = mv88e6185_phylink_get_caps,
+ .phylink_get_caps = mv88e6351_phylink_get_caps,
};
static const struct mv88e6xxx_ops mv88e6176_ops = {
@@ -5069,7 +5083,7 @@ static const struct mv88e6xxx_ops mv88e6350_ops = {
.vtu_loadpurge = mv88e6352_g1_vtu_loadpurge,
.stu_getnext = mv88e6352_g1_stu_getnext,
.stu_loadpurge = mv88e6352_g1_stu_loadpurge,
- .phylink_get_caps = mv88e6185_phylink_get_caps,
+ .phylink_get_caps = mv88e6351_phylink_get_caps,
};
static const struct mv88e6xxx_ops mv88e6351_ops = {
@@ -5117,7 +5131,7 @@ static const struct mv88e6xxx_ops mv88e6351_ops = {
.stu_loadpurge = mv88e6352_g1_stu_loadpurge,
.avb_ops = &mv88e6352_avb_ops,
.ptp_ops = &mv88e6352_ptp_ops,
- .phylink_get_caps = mv88e6185_phylink_get_caps,
+ .phylink_get_caps = mv88e6351_phylink_get_caps,
};
static const struct mv88e6xxx_ops mv88e6352_ops = {
diff --git a/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c b/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c
index 15bab41cee48..888509cf1f21 100644
--- a/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c
+++ b/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c
@@ -516,8 +516,6 @@ struct sk_buff *dpaa2_eth_alloc_skb(struct dpaa2_eth_priv *priv,
memcpy(skb->data, fd_vaddr + fd_offset, fd_length);
- dpaa2_eth_recycle_buf(priv, ch, dpaa2_fd_get_addr(fd));
-
return skb;
}
@@ -589,6 +587,7 @@ void dpaa2_eth_rx(struct dpaa2_eth_priv *priv,
struct rtnl_link_stats64 *percpu_stats;
struct dpaa2_eth_drv_stats *percpu_extras;
struct device *dev = priv->net_dev->dev.parent;
+ bool recycle_rx_buf = false;
void *buf_data;
u32 xdp_act;
@@ -618,6 +617,8 @@ void dpaa2_eth_rx(struct dpaa2_eth_priv *priv,
dma_unmap_page(dev, addr, priv->rx_buf_size,
DMA_BIDIRECTIONAL);
skb = dpaa2_eth_build_linear_skb(ch, fd, vaddr);
+ } else {
+ recycle_rx_buf = true;
}
} else if (fd_format == dpaa2_fd_sg) {
WARN_ON(priv->xdp_prog);
@@ -637,6 +638,9 @@ void dpaa2_eth_rx(struct dpaa2_eth_priv *priv,
goto err_build_skb;
dpaa2_eth_receive_skb(priv, ch, fd, vaddr, fq, percpu_stats, skb);
+
+ if (recycle_rx_buf)
+ dpaa2_eth_recycle_buf(priv, ch, dpaa2_fd_get_addr(fd));
return;
err_build_skb:
@@ -1073,14 +1077,12 @@ static int dpaa2_eth_build_single_fd(struct dpaa2_eth_priv *priv,
dma_addr_t addr;
buffer_start = skb->data - dpaa2_eth_needed_headroom(skb);
-
- /* If there's enough room to align the FD address, do it.
- * It will help hardware optimize accesses.
- */
aligned_start = PTR_ALIGN(buffer_start - DPAA2_ETH_TX_BUF_ALIGN,
DPAA2_ETH_TX_BUF_ALIGN);
if (aligned_start >= skb->head)
buffer_start = aligned_start;
+ else
+ return -ENOMEM;
/* Store a backpointer to the skb at the beginning of the buffer
* (in the private data area) such that we can release it
@@ -4967,6 +4969,8 @@ static int dpaa2_eth_probe(struct fsl_mc_device *dpni_dev)
if (err)
goto err_dl_port_add;
+ net_dev->needed_headroom = DPAA2_ETH_SWA_SIZE + DPAA2_ETH_TX_BUF_ALIGN;
+
err = register_netdev(net_dev);
if (err < 0) {
dev_err(dev, "register_netdev() failed\n");
diff --git a/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.h b/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.h
index bfb6c96c3b2f..834cba8c3a41 100644
--- a/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.h
+++ b/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.h
@@ -740,7 +740,7 @@ static inline bool dpaa2_eth_rx_pause_enabled(u64 link_options)
static inline unsigned int dpaa2_eth_needed_headroom(struct sk_buff *skb)
{
- unsigned int headroom = DPAA2_ETH_SWA_SIZE;
+ unsigned int headroom = DPAA2_ETH_SWA_SIZE + DPAA2_ETH_TX_BUF_ALIGN;
/* If we don't have an skb (e.g. XDP buffer), we only need space for
* the software annotation area
diff --git a/drivers/net/ethernet/intel/ice/ice_lag.c b/drivers/net/ethernet/intel/ice/ice_lag.c
index cd065ec48c87..280994ee5933 100644
--- a/drivers/net/ethernet/intel/ice/ice_lag.c
+++ b/drivers/net/ethernet/intel/ice/ice_lag.c
@@ -570,6 +570,50 @@ resume_traffic:
}
/**
+ * ice_lag_build_netdev_list - populate the lag struct's netdev list
+ * @lag: local lag struct
+ * @ndlist: pointer to netdev list to populate
+ */
+static void ice_lag_build_netdev_list(struct ice_lag *lag,
+ struct ice_lag_netdev_list *ndlist)
+{
+ struct ice_lag_netdev_list *nl;
+ struct net_device *tmp_nd;
+
+ INIT_LIST_HEAD(&ndlist->node);
+ rcu_read_lock();
+ for_each_netdev_in_bond_rcu(lag->upper_netdev, tmp_nd) {
+ nl = kzalloc(sizeof(*nl), GFP_ATOMIC);
+ if (!nl)
+ break;
+
+ nl->netdev = tmp_nd;
+ list_add(&nl->node, &ndlist->node);
+ }
+ rcu_read_unlock();
+ lag->netdev_head = &ndlist->node;
+}
+
+/**
+ * ice_lag_destroy_netdev_list - free lag struct's netdev list
+ * @lag: pointer to local lag struct
+ * @ndlist: pointer to lag struct netdev list
+ */
+static void ice_lag_destroy_netdev_list(struct ice_lag *lag,
+ struct ice_lag_netdev_list *ndlist)
+{
+ struct ice_lag_netdev_list *entry, *n;
+
+ rcu_read_lock();
+ list_for_each_entry_safe(entry, n, &ndlist->node, node) {
+ list_del(&entry->node);
+ kfree(entry);
+ }
+ rcu_read_unlock();
+ lag->netdev_head = NULL;
+}
+
+/**
* ice_lag_move_single_vf_nodes - Move Tx scheduling nodes for single VF
* @lag: primary interface LAG struct
* @oldport: lport of previous interface
@@ -597,7 +641,6 @@ ice_lag_move_single_vf_nodes(struct ice_lag *lag, u8 oldport, u8 newport,
void ice_lag_move_new_vf_nodes(struct ice_vf *vf)
{
struct ice_lag_netdev_list ndlist;
- struct list_head *tmp, *n;
u8 pri_port, act_port;
struct ice_lag *lag;
struct ice_vsi *vsi;
@@ -621,38 +664,15 @@ void ice_lag_move_new_vf_nodes(struct ice_vf *vf)
pri_port = pf->hw.port_info->lport;
act_port = lag->active_port;
- if (lag->upper_netdev) {
- struct ice_lag_netdev_list *nl;
- struct net_device *tmp_nd;
-
- INIT_LIST_HEAD(&ndlist.node);
- rcu_read_lock();
- for_each_netdev_in_bond_rcu(lag->upper_netdev, tmp_nd) {
- nl = kzalloc(sizeof(*nl), GFP_ATOMIC);
- if (!nl)
- break;
-
- nl->netdev = tmp_nd;
- list_add(&nl->node, &ndlist.node);
- }
- rcu_read_unlock();
- }
-
- lag->netdev_head = &ndlist.node;
+ if (lag->upper_netdev)
+ ice_lag_build_netdev_list(lag, &ndlist);
if (ice_is_feature_supported(pf, ICE_F_SRIOV_LAG) &&
lag->bonded && lag->primary && pri_port != act_port &&
!list_empty(lag->netdev_head))
ice_lag_move_single_vf_nodes(lag, pri_port, act_port, vsi->idx);
- list_for_each_safe(tmp, n, &ndlist.node) {
- struct ice_lag_netdev_list *entry;
-
- entry = list_entry(tmp, struct ice_lag_netdev_list, node);
- list_del(&entry->node);
- kfree(entry);
- }
- lag->netdev_head = NULL;
+ ice_lag_destroy_netdev_list(lag, &ndlist);
new_vf_unlock:
mutex_unlock(&pf->lag_mutex);
@@ -679,6 +699,29 @@ static void ice_lag_move_vf_nodes(struct ice_lag *lag, u8 oldport, u8 newport)
ice_lag_move_single_vf_nodes(lag, oldport, newport, i);
}
+/**
+ * ice_lag_move_vf_nodes_cfg - move vf nodes outside LAG netdev event context
+ * @lag: local lag struct
+ * @src_prt: lport value for source port
+ * @dst_prt: lport value for destination port
+ *
+ * This function is used to move nodes during an out-of-netdev-event situation,
+ * primarily when the driver needs to reconfigure or recreate resources.
+ *
+ * Must be called while holding the lag_mutex to avoid lag events from
+ * processing while out-of-sync moves are happening. Also, paired moves,
+ * such as used in a reset flow, should both be called under the same mutex
+ * lock to avoid changes between start of reset and end of reset.
+ */
+void ice_lag_move_vf_nodes_cfg(struct ice_lag *lag, u8 src_prt, u8 dst_prt)
+{
+ struct ice_lag_netdev_list ndlist;
+
+ ice_lag_build_netdev_list(lag, &ndlist);
+ ice_lag_move_vf_nodes(lag, src_prt, dst_prt);
+ ice_lag_destroy_netdev_list(lag, &ndlist);
+}
+
#define ICE_LAG_SRIOV_CP_RECIPE 10
#define ICE_LAG_SRIOV_TRAIN_PKT_LEN 16
@@ -2051,7 +2094,6 @@ void ice_lag_rebuild(struct ice_pf *pf)
{
struct ice_lag_netdev_list ndlist;
struct ice_lag *lag, *prim_lag;
- struct list_head *tmp, *n;
u8 act_port, loc_port;
if (!pf->lag || !pf->lag->bonded)
@@ -2063,21 +2105,7 @@ void ice_lag_rebuild(struct ice_pf *pf)
if (lag->primary) {
prim_lag = lag;
} else {
- struct ice_lag_netdev_list *nl;
- struct net_device *tmp_nd;
-
- INIT_LIST_HEAD(&ndlist.node);
- rcu_read_lock();
- for_each_netdev_in_bond_rcu(lag->upper_netdev, tmp_nd) {
- nl = kzalloc(sizeof(*nl), GFP_ATOMIC);
- if (!nl)
- break;
-
- nl->netdev = tmp_nd;
- list_add(&nl->node, &ndlist.node);
- }
- rcu_read_unlock();
- lag->netdev_head = &ndlist.node;
+ ice_lag_build_netdev_list(lag, &ndlist);
prim_lag = ice_lag_find_primary(lag);
}
@@ -2107,13 +2135,7 @@ void ice_lag_rebuild(struct ice_pf *pf)
ice_clear_rdma_cap(pf);
lag_rebuild_out:
- list_for_each_safe(tmp, n, &ndlist.node) {
- struct ice_lag_netdev_list *entry;
-
- entry = list_entry(tmp, struct ice_lag_netdev_list, node);
- list_del(&entry->node);
- kfree(entry);
- }
+ ice_lag_destroy_netdev_list(lag, &ndlist);
mutex_unlock(&pf->lag_mutex);
}
diff --git a/drivers/net/ethernet/intel/ice/ice_lag.h b/drivers/net/ethernet/intel/ice/ice_lag.h
index 9557e8605a07..ede833dfa658 100644
--- a/drivers/net/ethernet/intel/ice/ice_lag.h
+++ b/drivers/net/ethernet/intel/ice/ice_lag.h
@@ -65,4 +65,5 @@ int ice_init_lag(struct ice_pf *pf);
void ice_deinit_lag(struct ice_pf *pf);
void ice_lag_rebuild(struct ice_pf *pf);
bool ice_lag_is_switchdev_running(struct ice_pf *pf);
+void ice_lag_move_vf_nodes_cfg(struct ice_lag *lag, u8 src_prt, u8 dst_prt);
#endif /* _ICE_LAG_H_ */
diff --git a/drivers/net/ethernet/intel/ice/ice_vf_lib.c b/drivers/net/ethernet/intel/ice/ice_vf_lib.c
index aca1f2ea5034..b7ae09952156 100644
--- a/drivers/net/ethernet/intel/ice/ice_vf_lib.c
+++ b/drivers/net/ethernet/intel/ice/ice_vf_lib.c
@@ -829,12 +829,16 @@ static void ice_notify_vf_reset(struct ice_vf *vf)
int ice_reset_vf(struct ice_vf *vf, u32 flags)
{
struct ice_pf *pf = vf->pf;
+ struct ice_lag *lag;
struct ice_vsi *vsi;
+ u8 act_prt, pri_prt;
struct device *dev;
int err = 0;
bool rsd;
dev = ice_pf_to_dev(pf);
+ act_prt = ICE_LAG_INVALID_PORT;
+ pri_prt = pf->hw.port_info->lport;
if (flags & ICE_VF_RESET_NOTIFY)
ice_notify_vf_reset(vf);
@@ -845,6 +849,17 @@ int ice_reset_vf(struct ice_vf *vf, u32 flags)
return 0;
}
+ lag = pf->lag;
+ mutex_lock(&pf->lag_mutex);
+ if (lag && lag->bonded && lag->primary) {
+ act_prt = lag->active_port;
+ if (act_prt != pri_prt && act_prt != ICE_LAG_INVALID_PORT &&
+ lag->upper_netdev)
+ ice_lag_move_vf_nodes_cfg(lag, act_prt, pri_prt);
+ else
+ act_prt = ICE_LAG_INVALID_PORT;
+ }
+
if (flags & ICE_VF_RESET_LOCK)
mutex_lock(&vf->cfg_lock);
else
@@ -937,6 +952,11 @@ out_unlock:
if (flags & ICE_VF_RESET_LOCK)
mutex_unlock(&vf->cfg_lock);
+ if (lag && lag->bonded && lag->primary &&
+ act_prt != ICE_LAG_INVALID_PORT)
+ ice_lag_move_vf_nodes_cfg(lag, pri_prt, act_prt);
+ mutex_unlock(&pf->lag_mutex);
+
return err;
}
diff --git a/drivers/net/ethernet/intel/ice/ice_virtchnl.c b/drivers/net/ethernet/intel/ice/ice_virtchnl.c
index cdf17b1e2f25..de11b3186bd7 100644
--- a/drivers/net/ethernet/intel/ice/ice_virtchnl.c
+++ b/drivers/net/ethernet/intel/ice/ice_virtchnl.c
@@ -1603,9 +1603,24 @@ static int ice_vc_cfg_qs_msg(struct ice_vf *vf, u8 *msg)
(struct virtchnl_vsi_queue_config_info *)msg;
struct virtchnl_queue_pair_info *qpi;
struct ice_pf *pf = vf->pf;
+ struct ice_lag *lag;
struct ice_vsi *vsi;
+ u8 act_prt, pri_prt;
int i = -1, q_idx;
+ lag = pf->lag;
+ mutex_lock(&pf->lag_mutex);
+ act_prt = ICE_LAG_INVALID_PORT;
+ pri_prt = pf->hw.port_info->lport;
+ if (lag && lag->bonded && lag->primary) {
+ act_prt = lag->active_port;
+ if (act_prt != pri_prt && act_prt != ICE_LAG_INVALID_PORT &&
+ lag->upper_netdev)
+ ice_lag_move_vf_nodes_cfg(lag, act_prt, pri_prt);
+ else
+ act_prt = ICE_LAG_INVALID_PORT;
+ }
+
if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states))
goto error_param;
@@ -1729,6 +1744,11 @@ static int ice_vc_cfg_qs_msg(struct ice_vf *vf, u8 *msg)
}
}
+ if (lag && lag->bonded && lag->primary &&
+ act_prt != ICE_LAG_INVALID_PORT)
+ ice_lag_move_vf_nodes_cfg(lag, pri_prt, act_prt);
+ mutex_unlock(&pf->lag_mutex);
+
/* send the response to the VF */
return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_CONFIG_VSI_QUEUES,
VIRTCHNL_STATUS_SUCCESS, NULL, 0);
@@ -1743,6 +1763,11 @@ error_param:
vf->vf_id, i);
}
+ if (lag && lag->bonded && lag->primary &&
+ act_prt != ICE_LAG_INVALID_PORT)
+ ice_lag_move_vf_nodes_cfg(lag, pri_prt, act_prt);
+ mutex_unlock(&pf->lag_mutex);
+
ice_lag_move_new_vf_nodes(vf);
/* send the response to the VF */
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c
index 23c2f2ed2fb8..c112c71ff576 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c
@@ -5505,6 +5505,8 @@ int rvu_mbox_handler_nix_bandprof_free(struct rvu *rvu,
ipolicer = &nix_hw->ipolicer[layer];
for (idx = 0; idx < req->prof_count[layer]; idx++) {
+ if (idx == MAX_BANDPROF_PER_PFFUNC)
+ break;
prof_idx = req->prof_idx[layer][idx];
if (prof_idx >= ipolicer->band_prof.max ||
ipolicer->pfvf_map[prof_idx] != pcifunc)
@@ -5518,8 +5520,6 @@ int rvu_mbox_handler_nix_bandprof_free(struct rvu *rvu,
ipolicer->pfvf_map[prof_idx] = 0x00;
ipolicer->match_id[prof_idx] = 0;
rvu_free_rsrc(&ipolicer->band_prof, prof_idx);
- if (idx == MAX_BANDPROF_PER_PFFUNC)
- break;
}
}
mutex_unlock(&rvu->rsrc_lock);
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/cn10k.c b/drivers/net/ethernet/marvell/octeontx2/nic/cn10k.c
index a4a258da8dd5..c1c99d7054f8 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/cn10k.c
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/cn10k.c
@@ -450,6 +450,9 @@ int cn10k_set_ipolicer_rate(struct otx2_nic *pfvf, u16 profile,
aq->prof.pebs_mantissa = 0;
aq->prof_mask.pebs_mantissa = 0xFF;
+ aq->prof.hl_en = 0;
+ aq->prof_mask.hl_en = 1;
+
/* Fill AQ info */
aq->qidx = profile;
aq->ctype = NIX_AQ_CTYPE_BANDPROF;
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.h b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.h
index e7c69b57147e..06910307085e 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.h
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.h
@@ -1070,6 +1070,8 @@ int otx2_init_tc(struct otx2_nic *nic);
void otx2_shutdown_tc(struct otx2_nic *nic);
int otx2_setup_tc(struct net_device *netdev, enum tc_setup_type type,
void *type_data);
+void otx2_tc_apply_ingress_police_rules(struct otx2_nic *nic);
+
/* CGX/RPM DMAC filters support */
int otx2_dmacflt_get_max_cnt(struct otx2_nic *pf);
int otx2_dmacflt_add(struct otx2_nic *pf, const u8 *mac, u32 bit_pos);
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c
index ba95ac913274..532e324bdcc8 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c
@@ -566,7 +566,9 @@ static irqreturn_t otx2_pfvf_mbox_intr_handler(int irq, void *pf_irq)
otx2_write64(pf, RVU_PF_VFPF_MBOX_INTX(1), intr);
otx2_queue_work(mbox, pf->mbox_pfvf_wq, 64, vfs, intr,
TYPE_PFVF);
- vfs -= 64;
+ if (intr)
+ trace_otx2_msg_interrupt(mbox->mbox.pdev, "VF(s) to PF", intr);
+ vfs = 64;
}
intr = otx2_read64(pf, RVU_PF_VFPF_MBOX_INTX(0));
@@ -574,7 +576,8 @@ static irqreturn_t otx2_pfvf_mbox_intr_handler(int irq, void *pf_irq)
otx2_queue_work(mbox, pf->mbox_pfvf_wq, 0, vfs, intr, TYPE_PFVF);
- trace_otx2_msg_interrupt(mbox->mbox.pdev, "VF(s) to PF", intr);
+ if (intr)
+ trace_otx2_msg_interrupt(mbox->mbox.pdev, "VF(s) to PF", intr);
return IRQ_HANDLED;
}
@@ -1870,6 +1873,8 @@ int otx2_open(struct net_device *netdev)
if (pf->flags & OTX2_FLAG_DMACFLTR_SUPPORT)
otx2_dmacflt_reinstall_flows(pf);
+ otx2_tc_apply_ingress_police_rules(pf);
+
err = otx2_rxtx_enable(pf, true);
/* If a mbox communication error happens at this point then interface
* will end up in a state such that it is in down state but hardware
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_tc.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_tc.c
index 8a5e3987a482..db1e0e0e812d 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_tc.c
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_tc.c
@@ -47,6 +47,9 @@ struct otx2_tc_flow {
bool is_act_police;
u32 prio;
struct npc_install_flow_req req;
+ u64 rate;
+ u32 burst;
+ bool is_pps;
};
static void otx2_get_egress_burst_cfg(struct otx2_nic *nic, u32 burst,
@@ -284,21 +287,10 @@ static int otx2_tc_egress_matchall_delete(struct otx2_nic *nic,
return err;
}
-static int otx2_tc_act_set_police(struct otx2_nic *nic,
- struct otx2_tc_flow *node,
- struct flow_cls_offload *f,
- u64 rate, u32 burst, u32 mark,
- struct npc_install_flow_req *req, bool pps)
+static int otx2_tc_act_set_hw_police(struct otx2_nic *nic,
+ struct otx2_tc_flow *node)
{
- struct netlink_ext_ack *extack = f->common.extack;
- struct otx2_hw *hw = &nic->hw;
- int rq_idx, rc;
-
- rq_idx = find_first_zero_bit(&nic->rq_bmap, hw->rx_queues);
- if (rq_idx >= hw->rx_queues) {
- NL_SET_ERR_MSG_MOD(extack, "Police action rules exceeded");
- return -EINVAL;
- }
+ int rc;
mutex_lock(&nic->mbox.lock);
@@ -308,23 +300,17 @@ static int otx2_tc_act_set_police(struct otx2_nic *nic,
return rc;
}
- rc = cn10k_set_ipolicer_rate(nic, node->leaf_profile, burst, rate, pps);
+ rc = cn10k_set_ipolicer_rate(nic, node->leaf_profile,
+ node->burst, node->rate, node->is_pps);
if (rc)
goto free_leaf;
- rc = cn10k_map_unmap_rq_policer(nic, rq_idx, node->leaf_profile, true);
+ rc = cn10k_map_unmap_rq_policer(nic, node->rq, node->leaf_profile, true);
if (rc)
goto free_leaf;
mutex_unlock(&nic->mbox.lock);
- req->match_id = mark & 0xFFFFULL;
- req->index = rq_idx;
- req->op = NIX_RX_ACTIONOP_UCAST;
- set_bit(rq_idx, &nic->rq_bmap);
- node->is_act_police = true;
- node->rq = rq_idx;
-
return 0;
free_leaf:
@@ -336,6 +322,39 @@ free_leaf:
return rc;
}
+static int otx2_tc_act_set_police(struct otx2_nic *nic,
+ struct otx2_tc_flow *node,
+ struct flow_cls_offload *f,
+ u64 rate, u32 burst, u32 mark,
+ struct npc_install_flow_req *req, bool pps)
+{
+ struct netlink_ext_ack *extack = f->common.extack;
+ struct otx2_hw *hw = &nic->hw;
+ int rq_idx, rc;
+
+ rq_idx = find_first_zero_bit(&nic->rq_bmap, hw->rx_queues);
+ if (rq_idx >= hw->rx_queues) {
+ NL_SET_ERR_MSG_MOD(extack, "Police action rules exceeded");
+ return -EINVAL;
+ }
+
+ req->match_id = mark & 0xFFFFULL;
+ req->index = rq_idx;
+ req->op = NIX_RX_ACTIONOP_UCAST;
+
+ node->is_act_police = true;
+ node->rq = rq_idx;
+ node->burst = burst;
+ node->rate = rate;
+ node->is_pps = pps;
+
+ rc = otx2_tc_act_set_hw_police(nic, node);
+ if (!rc)
+ set_bit(rq_idx, &nic->rq_bmap);
+
+ return rc;
+}
+
static int otx2_tc_parse_actions(struct otx2_nic *nic,
struct flow_action *flow_action,
struct npc_install_flow_req *req,
@@ -1044,6 +1063,11 @@ static int otx2_tc_del_flow(struct otx2_nic *nic,
}
if (flow_node->is_act_police) {
+ __clear_bit(flow_node->rq, &nic->rq_bmap);
+
+ if (nic->flags & OTX2_FLAG_INTF_DOWN)
+ goto free_mcam_flow;
+
mutex_lock(&nic->mbox.lock);
err = cn10k_map_unmap_rq_policer(nic, flow_node->rq,
@@ -1059,11 +1083,10 @@ static int otx2_tc_del_flow(struct otx2_nic *nic,
"Unable to free leaf bandwidth profile(%d)\n",
flow_node->leaf_profile);
- __clear_bit(flow_node->rq, &nic->rq_bmap);
-
mutex_unlock(&nic->mbox.lock);
}
+free_mcam_flow:
otx2_del_mcam_flow_entry(nic, flow_node->entry, NULL);
otx2_tc_update_mcam_table(nic, flow_cfg, flow_node, false);
kfree_rcu(flow_node, rcu);
@@ -1083,6 +1106,11 @@ static int otx2_tc_add_flow(struct otx2_nic *nic,
if (!(nic->flags & OTX2_FLAG_TC_FLOWER_SUPPORT))
return -ENOMEM;
+ if (nic->flags & OTX2_FLAG_INTF_DOWN) {
+ NL_SET_ERR_MSG_MOD(extack, "Interface not initialized");
+ return -EINVAL;
+ }
+
if (flow_cfg->nr_flows == flow_cfg->max_flows) {
NL_SET_ERR_MSG_MOD(extack,
"Free MCAM entry not available to add the flow");
@@ -1442,3 +1470,45 @@ void otx2_shutdown_tc(struct otx2_nic *nic)
otx2_destroy_tc_flow_list(nic);
}
EXPORT_SYMBOL(otx2_shutdown_tc);
+
+static void otx2_tc_config_ingress_rule(struct otx2_nic *nic,
+ struct otx2_tc_flow *node)
+{
+ struct npc_install_flow_req *req;
+
+ if (otx2_tc_act_set_hw_police(nic, node))
+ return;
+
+ mutex_lock(&nic->mbox.lock);
+
+ req = otx2_mbox_alloc_msg_npc_install_flow(&nic->mbox);
+ if (!req)
+ goto err;
+
+ memcpy(req, &node->req, sizeof(struct npc_install_flow_req));
+
+ if (otx2_sync_mbox_msg(&nic->mbox))
+ netdev_err(nic->netdev,
+ "Failed to install MCAM flow entry for ingress rule");
+err:
+ mutex_unlock(&nic->mbox.lock);
+}
+
+void otx2_tc_apply_ingress_police_rules(struct otx2_nic *nic)
+{
+ struct otx2_flow_config *flow_cfg = nic->flow_cfg;
+ struct otx2_tc_flow *node;
+
+ /* If any ingress policer rules exist for the interface then
+ * apply those rules. Ingress policer rules depend on bandwidth
+ * profiles linked to the receive queues. Since no receive queues
+ * exist when interface is down, ingress policer rules are stored
+ * and configured in hardware after all receive queues are allocated
+ * in otx2_open.
+ */
+ list_for_each_entry(node, &flow_cfg->flow_list_tc, list) {
+ if (node->is_act_police)
+ otx2_tc_config_ingress_rule(nic, node);
+ }
+}
+EXPORT_SYMBOL(otx2_tc_apply_ingress_police_rules);
diff --git a/drivers/net/ethernet/realtek/r8169_main.c b/drivers/net/ethernet/realtek/r8169_main.c
index 295366a85c63..62cabeeb842a 100644
--- a/drivers/net/ethernet/realtek/r8169_main.c
+++ b/drivers/net/ethernet/realtek/r8169_main.c
@@ -579,6 +579,7 @@ struct rtl8169_tc_offsets {
enum rtl_flag {
RTL_FLAG_TASK_ENABLED = 0,
RTL_FLAG_TASK_RESET_PENDING,
+ RTL_FLAG_TASK_RESET_NO_QUEUE_WAKE,
RTL_FLAG_TASK_TX_TIMEOUT,
RTL_FLAG_MAX
};
@@ -4582,6 +4583,8 @@ static void rtl_task(struct work_struct *work)
reset:
rtl_reset_work(tp);
netif_wake_queue(tp->dev);
+ } else if (test_and_clear_bit(RTL_FLAG_TASK_RESET_NO_QUEUE_WAKE, tp->wk.flags)) {
+ rtl_reset_work(tp);
}
out_unlock:
rtnl_unlock();
@@ -4615,7 +4618,7 @@ static void r8169_phylink_handler(struct net_device *ndev)
} else {
/* In few cases rx is broken after link-down otherwise */
if (rtl_is_8125(tp))
- rtl_reset_work(tp);
+ rtl_schedule_task(tp, RTL_FLAG_TASK_RESET_NO_QUEUE_WAKE);
pm_runtime_idle(d);
}
@@ -4691,7 +4694,7 @@ static int rtl8169_close(struct net_device *dev)
rtl8169_down(tp);
rtl8169_rx_clear(tp);
- cancel_work_sync(&tp->wk.work);
+ cancel_work(&tp->wk.work);
free_irq(tp->irq, tp);
@@ -4925,6 +4928,8 @@ static void rtl_remove_one(struct pci_dev *pdev)
if (pci_dev_run_wake(pdev))
pm_runtime_get_noresume(&pdev->dev);
+ cancel_work_sync(&tp->wk.work);
+
unregister_netdev(tp->dev);
if (tp->dash_type != RTL_DASH_NONE)
diff --git a/drivers/net/ethernet/renesas/ravb_main.c b/drivers/net/ethernet/renesas/ravb_main.c
index c70cff80cc99..664eda4b5a11 100644
--- a/drivers/net/ethernet/renesas/ravb_main.c
+++ b/drivers/net/ethernet/renesas/ravb_main.c
@@ -515,6 +515,15 @@ static void ravb_emac_init_gbeth(struct net_device *ndev)
{
struct ravb_private *priv = netdev_priv(ndev);
+ if (priv->phy_interface == PHY_INTERFACE_MODE_MII) {
+ ravb_write(ndev, (1000 << 16) | CXR35_SEL_XMII_MII, CXR35);
+ ravb_modify(ndev, CXR31, CXR31_SEL_LINK0 | CXR31_SEL_LINK1, 0);
+ } else {
+ ravb_write(ndev, (1000 << 16) | CXR35_SEL_XMII_RGMII, CXR35);
+ ravb_modify(ndev, CXR31, CXR31_SEL_LINK0 | CXR31_SEL_LINK1,
+ CXR31_SEL_LINK0);
+ }
+
/* Receive frame limit set register */
ravb_write(ndev, GBETH_RX_BUFF_MAX + ETH_FCS_LEN, RFLR);
@@ -537,14 +546,6 @@ static void ravb_emac_init_gbeth(struct net_device *ndev)
/* E-MAC interrupt enable register */
ravb_write(ndev, ECSIPR_ICDIP, ECSIPR);
-
- if (priv->phy_interface == PHY_INTERFACE_MODE_MII) {
- ravb_modify(ndev, CXR31, CXR31_SEL_LINK0 | CXR31_SEL_LINK1, 0);
- ravb_write(ndev, (1000 << 16) | CXR35_SEL_XMII_MII, CXR35);
- } else {
- ravb_modify(ndev, CXR31, CXR31_SEL_LINK0 | CXR31_SEL_LINK1,
- CXR31_SEL_LINK0);
- }
}
static void ravb_emac_init_rcar(struct net_device *ndev)
@@ -1811,19 +1812,20 @@ static int ravb_open(struct net_device *ndev)
if (info->gptp)
ravb_ptp_init(ndev, priv->pdev);
- netif_tx_start_all_queues(ndev);
-
/* PHY control start */
error = ravb_phy_start(ndev);
if (error)
goto out_ptp_stop;
+ netif_tx_start_all_queues(ndev);
+
return 0;
out_ptp_stop:
/* Stop PTP Clock driver */
if (info->gptp)
ravb_ptp_stop(ndev);
+ ravb_stop_dma(ndev);
out_free_irq_mgmta:
if (!info->multi_irqs)
goto out_free_irq;
@@ -1874,6 +1876,12 @@ static void ravb_tx_timeout_work(struct work_struct *work)
struct net_device *ndev = priv->ndev;
int error;
+ if (!rtnl_trylock()) {
+ usleep_range(1000, 2000);
+ schedule_work(&priv->work);
+ return;
+ }
+
netif_tx_stop_all_queues(ndev);
/* Stop PTP Clock driver */
@@ -1907,7 +1915,7 @@ static void ravb_tx_timeout_work(struct work_struct *work)
*/
netdev_err(ndev, "%s: ravb_dmac_init() failed, error %d\n",
__func__, error);
- return;
+ goto out_unlock;
}
ravb_emac_init(ndev);
@@ -1917,6 +1925,9 @@ out:
ravb_ptp_init(ndev, priv->pdev);
netif_tx_start_all_queues(ndev);
+
+out_unlock:
+ rtnl_unlock();
}
/* Packet transmit function for Ethernet AVB */
@@ -2645,9 +2656,14 @@ static int ravb_probe(struct platform_device *pdev)
ndev->features = info->net_features;
ndev->hw_features = info->net_hw_features;
- reset_control_deassert(rstc);
+ error = reset_control_deassert(rstc);
+ if (error)
+ goto out_free_netdev;
+
pm_runtime_enable(&pdev->dev);
- pm_runtime_get_sync(&pdev->dev);
+ error = pm_runtime_resume_and_get(&pdev->dev);
+ if (error < 0)
+ goto out_rpm_disable;
if (info->multi_irqs) {
if (info->err_mgmt_irqs)
@@ -2872,11 +2888,12 @@ out_disable_gptp_clk:
out_disable_refclk:
clk_disable_unprepare(priv->refclk);
out_release:
- free_netdev(ndev);
-
pm_runtime_put(&pdev->dev);
+out_rpm_disable:
pm_runtime_disable(&pdev->dev);
reset_control_assert(rstc);
+out_free_netdev:
+ free_netdev(ndev);
return error;
}
@@ -2886,22 +2903,26 @@ static void ravb_remove(struct platform_device *pdev)
struct ravb_private *priv = netdev_priv(ndev);
const struct ravb_hw_info *info = priv->info;
- /* Stop PTP Clock driver */
- if (info->ccc_gac)
- ravb_ptp_stop(ndev);
-
- clk_disable_unprepare(priv->gptp_clk);
- clk_disable_unprepare(priv->refclk);
-
- /* Set reset mode */
- ravb_write(ndev, CCC_OPC_RESET, CCC);
unregister_netdev(ndev);
if (info->nc_queues)
netif_napi_del(&priv->napi[RAVB_NC]);
netif_napi_del(&priv->napi[RAVB_BE]);
+
ravb_mdio_release(priv);
+
+ /* Stop PTP Clock driver */
+ if (info->ccc_gac)
+ ravb_ptp_stop(ndev);
+
dma_free_coherent(ndev->dev.parent, priv->desc_bat_size, priv->desc_bat,
priv->desc_bat_dma);
+
+ /* Set reset mode */
+ ravb_write(ndev, CCC_OPC_RESET, CCC);
+
+ clk_disable_unprepare(priv->gptp_clk);
+ clk_disable_unprepare(priv->refclk);
+
pm_runtime_put_sync(&pdev->dev);
pm_runtime_disable(&pdev->dev);
reset_control_assert(priv->rstc);
diff --git a/drivers/net/ethernet/renesas/rswitch.c b/drivers/net/ethernet/renesas/rswitch.c
index 43a7795d6591..e77c6ff93d81 100644
--- a/drivers/net/ethernet/renesas/rswitch.c
+++ b/drivers/net/ethernet/renesas/rswitch.c
@@ -1504,8 +1504,8 @@ static netdev_tx_t rswitch_start_xmit(struct sk_buff *skb, struct net_device *nd
{
struct rswitch_device *rdev = netdev_priv(ndev);
struct rswitch_gwca_queue *gq = rdev->tx_queue;
+ netdev_tx_t ret = NETDEV_TX_OK;
struct rswitch_ext_desc *desc;
- int ret = NETDEV_TX_OK;
dma_addr_t dma_addr;
if (rswitch_get_num_cur_queues(gq) >= gq->ring_size - 1) {
@@ -1517,10 +1517,8 @@ static netdev_tx_t rswitch_start_xmit(struct sk_buff *skb, struct net_device *nd
return ret;
dma_addr = dma_map_single(ndev->dev.parent, skb->data, skb->len, DMA_TO_DEVICE);
- if (dma_mapping_error(ndev->dev.parent, dma_addr)) {
- dev_kfree_skb_any(skb);
- return ret;
- }
+ if (dma_mapping_error(ndev->dev.parent, dma_addr))
+ goto err_kfree;
gq->skbs[gq->cur] = skb;
desc = &gq->tx_ring[gq->cur];
@@ -1533,10 +1531,8 @@ static netdev_tx_t rswitch_start_xmit(struct sk_buff *skb, struct net_device *nd
struct rswitch_gwca_ts_info *ts_info;
ts_info = kzalloc(sizeof(*ts_info), GFP_ATOMIC);
- if (!ts_info) {
- dma_unmap_single(ndev->dev.parent, dma_addr, skb->len, DMA_TO_DEVICE);
- return -ENOMEM;
- }
+ if (!ts_info)
+ goto err_unmap;
skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
rdev->ts_tag++;
@@ -1559,6 +1555,14 @@ static netdev_tx_t rswitch_start_xmit(struct sk_buff *skb, struct net_device *nd
rswitch_modify(rdev->addr, GWTRC(gq->index), 0, BIT(gq->index % 32));
return ret;
+
+err_unmap:
+ dma_unmap_single(ndev->dev.parent, dma_addr, skb->len, DMA_TO_DEVICE);
+
+err_kfree:
+ dev_kfree_skb_any(skb);
+
+ return ret;
}
static struct net_device_stats *rswitch_get_stats(struct net_device *ndev)
diff --git a/drivers/net/ethernet/stmicro/stmmac/mmc_core.c b/drivers/net/ethernet/stmicro/stmmac/mmc_core.c
index ea4910ae0921..6a7c1d325c46 100644
--- a/drivers/net/ethernet/stmicro/stmmac/mmc_core.c
+++ b/drivers/net/ethernet/stmicro/stmmac/mmc_core.c
@@ -177,8 +177,10 @@
#define MMC_XGMAC_RX_DISCARD_OCT_GB 0x1b4
#define MMC_XGMAC_RX_ALIGN_ERR_PKT 0x1bc
+#define MMC_XGMAC_TX_FPE_INTR_MASK 0x204
#define MMC_XGMAC_TX_FPE_FRAG 0x208
#define MMC_XGMAC_TX_HOLD_REQ 0x20c
+#define MMC_XGMAC_RX_FPE_INTR_MASK 0x224
#define MMC_XGMAC_RX_PKT_ASSEMBLY_ERR 0x228
#define MMC_XGMAC_RX_PKT_SMD_ERR 0x22c
#define MMC_XGMAC_RX_PKT_ASSEMBLY_OK 0x230
@@ -352,6 +354,8 @@ static void dwxgmac_mmc_intr_all_mask(void __iomem *mmcaddr)
{
writel(0x0, mmcaddr + MMC_RX_INTR_MASK);
writel(0x0, mmcaddr + MMC_TX_INTR_MASK);
+ writel(MMC_DEFAULT_MASK, mmcaddr + MMC_XGMAC_TX_FPE_INTR_MASK);
+ writel(MMC_DEFAULT_MASK, mmcaddr + MMC_XGMAC_RX_FPE_INTR_MASK);
writel(MMC_DEFAULT_MASK, mmcaddr + MMC_XGMAC_RX_IPC_INTR_MASK);
}
diff --git a/drivers/net/ethernet/wangxun/libwx/wx_lib.c b/drivers/net/ethernet/wangxun/libwx/wx_lib.c
index 2823861e5a92..a5a50b5a8816 100644
--- a/drivers/net/ethernet/wangxun/libwx/wx_lib.c
+++ b/drivers/net/ethernet/wangxun/libwx/wx_lib.c
@@ -1972,11 +1972,11 @@ void wx_reset_interrupt_capability(struct wx *wx)
if (!pdev->msi_enabled && !pdev->msix_enabled)
return;
- pci_free_irq_vectors(wx->pdev);
if (pdev->msix_enabled) {
kfree(wx->msix_entries);
wx->msix_entries = NULL;
}
+ pci_free_irq_vectors(wx->pdev);
}
EXPORT_SYMBOL(wx_reset_interrupt_capability);
diff --git a/drivers/net/netdevsim/bpf.c b/drivers/net/netdevsim/bpf.c
index f60eb97e3a62..608953d4f98d 100644
--- a/drivers/net/netdevsim/bpf.c
+++ b/drivers/net/netdevsim/bpf.c
@@ -93,7 +93,7 @@ static void nsim_prog_set_loaded(struct bpf_prog *prog, bool loaded)
{
struct nsim_bpf_bound_prog *state;
- if (!prog || !prog->aux->offload)
+ if (!prog || !bpf_prog_is_offloaded(prog->aux))
return;
state = prog->aux->offload->dev_priv;
@@ -311,7 +311,7 @@ nsim_setup_prog_hw_checks(struct netdevsim *ns, struct netdev_bpf *bpf)
if (!bpf->prog)
return 0;
- if (!bpf->prog->aux->offload) {
+ if (!bpf_prog_is_offloaded(bpf->prog->aux)) {
NSIM_EA(bpf->extack, "xdpoffload of non-bound program");
return -EINVAL;
}
diff --git a/drivers/net/netkit.c b/drivers/net/netkit.c
index 97bd6705c241..39171380ccf2 100644
--- a/drivers/net/netkit.c
+++ b/drivers/net/netkit.c
@@ -851,6 +851,12 @@ static int netkit_change_link(struct net_device *dev, struct nlattr *tb[],
return -EACCES;
}
+ if (data[IFLA_NETKIT_PEER_INFO]) {
+ NL_SET_ERR_MSG_ATTR(extack, data[IFLA_NETKIT_PEER_INFO],
+ "netkit peer info cannot be changed after device creation");
+ return -EINVAL;
+ }
+
if (data[IFLA_NETKIT_POLICY]) {
attr = data[IFLA_NETKIT_POLICY];
policy = nla_get_u32(attr);
diff --git a/drivers/net/wireless/ath/ath9k/Kconfig b/drivers/net/wireless/ath/ath9k/Kconfig
index e150d82eddb6..0c47be06c153 100644
--- a/drivers/net/wireless/ath/ath9k/Kconfig
+++ b/drivers/net/wireless/ath/ath9k/Kconfig
@@ -57,8 +57,7 @@ config ATH9K_AHB
config ATH9K_DEBUGFS
bool "Atheros ath9k debugging"
- depends on ATH9K && DEBUG_FS
- select MAC80211_DEBUGFS
+ depends on ATH9K && DEBUG_FS && MAC80211_DEBUGFS
select ATH9K_COMMON_DEBUG
help
Say Y, if you need access to ath9k's statistics for
@@ -70,7 +69,6 @@ config ATH9K_DEBUGFS
config ATH9K_STATION_STATISTICS
bool "Detailed station statistics"
depends on ATH9K && ATH9K_DEBUGFS && DEBUG_FS
- select MAC80211_DEBUGFS
default n
help
This option enables detailed statistics for association stations.
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mld-sta.c b/drivers/net/wireless/intel/iwlwifi/mvm/mld-sta.c
index ca5e4fbcf8ce..6af606e5da65 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/mld-sta.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/mld-sta.c
@@ -707,8 +707,10 @@ int iwl_mvm_mld_add_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
rcu_dereference_protected(mvm_sta->link[link_id],
lockdep_is_held(&mvm->mutex));
- if (WARN_ON(!link_conf || !mvm_link_sta))
+ if (WARN_ON(!link_conf || !mvm_link_sta)) {
+ ret = -EINVAL;
goto err;
+ }
ret = iwl_mvm_mld_cfg_sta(mvm, sta, vif, link_sta, link_conf,
mvm_link_sta);
diff --git a/drivers/net/wireless/mediatek/mt76/mt7921/mcu.c b/drivers/net/wireless/mediatek/mt76/mt7921/mcu.c
index 63f3d4a5c9aa..2cc2d2788f83 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7921/mcu.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7921/mcu.c
@@ -375,6 +375,7 @@ static int mt7921_load_clc(struct mt792x_dev *dev, const char *fw_name)
int ret, i, len, offset = 0;
u8 *clc_base = NULL, hw_encap = 0;
+ dev->phy.clc_chan_conf = 0xff;
if (mt7921_disable_clc ||
mt76_is_usb(&dev->mt76))
return 0;
diff --git a/drivers/net/wireless/mediatek/mt76/mt7925/main.c b/drivers/net/wireless/mediatek/mt76/mt7925/main.c
index 15c2fb0bcb1b..aa918b9b0469 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7925/main.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7925/main.c
@@ -14,7 +14,7 @@
static void
mt7925_init_he_caps(struct mt792x_phy *phy, enum nl80211_band band,
struct ieee80211_sband_iftype_data *data,
- enum nl80211_iftype iftype)
+ enum nl80211_iftype iftype)
{
struct ieee80211_sta_he_cap *he_cap = &data->he_cap;
struct ieee80211_he_cap_elem *he_cap_elem = &he_cap->he_cap_elem;
@@ -53,7 +53,7 @@ mt7925_init_he_caps(struct mt792x_phy *phy, enum nl80211_band band,
IEEE80211_HE_PHY_CAP2_UL_MU_FULL_MU_MIMO |
IEEE80211_HE_PHY_CAP2_UL_MU_PARTIAL_MU_MIMO;
- switch (i) {
+ switch (iftype) {
case NL80211_IFTYPE_AP:
he_cap_elem->mac_cap_info[2] |=
IEEE80211_HE_MAC_CAP2_BSR;
diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c
index 46a4c9c5ea96..1be1ce522896 100644
--- a/drivers/nvme/host/core.c
+++ b/drivers/nvme/host/core.c
@@ -1192,8 +1192,16 @@ static unsigned long nvme_keep_alive_work_period(struct nvme_ctrl *ctrl)
static void nvme_queue_keep_alive_work(struct nvme_ctrl *ctrl)
{
- queue_delayed_work(nvme_wq, &ctrl->ka_work,
- nvme_keep_alive_work_period(ctrl));
+ unsigned long now = jiffies;
+ unsigned long delay = nvme_keep_alive_work_period(ctrl);
+ unsigned long ka_next_check_tm = ctrl->ka_last_check_time + delay;
+
+ if (time_after(now, ka_next_check_tm))
+ delay = 0;
+ else
+ delay = ka_next_check_tm - now;
+
+ queue_delayed_work(nvme_wq, &ctrl->ka_work, delay);
}
static enum rq_end_io_ret nvme_keep_alive_end_io(struct request *rq,
@@ -1479,7 +1487,8 @@ static int nvme_ns_info_from_identify(struct nvme_ctrl *ctrl,
if (id->ncap == 0) {
/* namespace not allocated or attached */
info->is_removed = true;
- return -ENODEV;
+ ret = -ENODEV;
+ goto error;
}
info->anagrpid = id->anagrpid;
@@ -1497,8 +1506,10 @@ static int nvme_ns_info_from_identify(struct nvme_ctrl *ctrl,
!memchr_inv(ids->nguid, 0, sizeof(ids->nguid)))
memcpy(ids->nguid, id->nguid, sizeof(ids->nguid));
}
+
+error:
kfree(id);
- return 0;
+ return ret;
}
static int nvme_ns_info_from_id_cs_indep(struct nvme_ctrl *ctrl,
@@ -1890,9 +1901,10 @@ static void nvme_update_disk_info(struct gendisk *disk,
/*
* The block layer can't support LBA sizes larger than the page size
- * yet, so catch this early and don't allow block I/O.
+ * or smaller than a sector size yet, so catch this early and don't
+ * allow block I/O.
*/
- if (ns->lba_shift > PAGE_SHIFT) {
+ if (ns->lba_shift > PAGE_SHIFT || ns->lba_shift < SECTOR_SHIFT) {
capacity = 0;
bs = (1 << 9);
}
@@ -2029,6 +2041,13 @@ static int nvme_update_ns_info_block(struct nvme_ns *ns,
if (ret)
return ret;
+ if (id->ncap == 0) {
+ /* namespace not allocated or attached */
+ info->is_removed = true;
+ ret = -ENODEV;
+ goto error;
+ }
+
blk_mq_freeze_queue(ns->disk->queue);
lbaf = nvme_lbaf_index(id->flbas);
ns->lba_shift = id->lbaf[lbaf].ds;
@@ -2090,6 +2109,8 @@ out:
set_bit(NVME_NS_READY, &ns->flags);
ret = 0;
}
+
+error:
kfree(id);
return ret;
}
@@ -4471,6 +4492,7 @@ int nvme_init_ctrl(struct nvme_ctrl *ctrl, struct device *dev,
INIT_DELAYED_WORK(&ctrl->failfast_work, nvme_failfast_work);
memset(&ctrl->ka_cmd, 0, sizeof(ctrl->ka_cmd));
ctrl->ka_cmd.common.opcode = nvme_admin_keep_alive;
+ ctrl->ka_last_check_time = jiffies;
BUILD_BUG_ON(NVME_DSM_MAX_RANGES * sizeof(struct nvme_dsm_range) >
PAGE_SIZE);
diff --git a/drivers/phy/Kconfig b/drivers/phy/Kconfig
index 787354b849c7..4cef568231bf 100644
--- a/drivers/phy/Kconfig
+++ b/drivers/phy/Kconfig
@@ -87,7 +87,6 @@ source "drivers/phy/motorola/Kconfig"
source "drivers/phy/mscc/Kconfig"
source "drivers/phy/qualcomm/Kconfig"
source "drivers/phy/ralink/Kconfig"
-source "drivers/phy/realtek/Kconfig"
source "drivers/phy/renesas/Kconfig"
source "drivers/phy/rockchip/Kconfig"
source "drivers/phy/samsung/Kconfig"
diff --git a/drivers/phy/Makefile b/drivers/phy/Makefile
index 868a220ed0f6..fb3dc9de6111 100644
--- a/drivers/phy/Makefile
+++ b/drivers/phy/Makefile
@@ -26,7 +26,6 @@ obj-y += allwinner/ \
mscc/ \
qualcomm/ \
ralink/ \
- realtek/ \
renesas/ \
rockchip/ \
samsung/ \
diff --git a/drivers/phy/realtek/Kconfig b/drivers/phy/realtek/Kconfig
deleted file mode 100644
index 75ac7e7c31ae..000000000000
--- a/drivers/phy/realtek/Kconfig
+++ /dev/null
@@ -1,32 +0,0 @@
-# SPDX-License-Identifier: GPL-2.0
-#
-# Phy drivers for Realtek platforms
-#
-
-if ARCH_REALTEK || COMPILE_TEST
-
-config PHY_RTK_RTD_USB2PHY
- tristate "Realtek RTD USB2 PHY Transceiver Driver"
- depends on USB_SUPPORT
- select GENERIC_PHY
- select USB_PHY
- select USB_COMMON
- help
- Enable this to support Realtek SoC USB2 phy transceiver.
- The DHC (digital home center) RTD series SoCs used the Synopsys
- DWC3 USB IP. This driver will do the PHY initialization
- of the parameters.
-
-config PHY_RTK_RTD_USB3PHY
- tristate "Realtek RTD USB3 PHY Transceiver Driver"
- depends on USB_SUPPORT
- select GENERIC_PHY
- select USB_PHY
- select USB_COMMON
- help
- Enable this to support Realtek SoC USB3 phy transceiver.
- The DHC (digital home center) RTD series SoCs used the Synopsys
- DWC3 USB IP. This driver will do the PHY initialization
- of the parameters.
-
-endif # ARCH_REALTEK || COMPILE_TEST
diff --git a/drivers/phy/realtek/Makefile b/drivers/phy/realtek/Makefile
deleted file mode 100644
index ed7b47ff8a26..000000000000
--- a/drivers/phy/realtek/Makefile
+++ /dev/null
@@ -1,3 +0,0 @@
-# SPDX-License-Identifier: GPL-2.0
-obj-$(CONFIG_PHY_RTK_RTD_USB2PHY) += phy-rtk-usb2.o
-obj-$(CONFIG_PHY_RTK_RTD_USB3PHY) += phy-rtk-usb3.o
diff --git a/drivers/phy/realtek/phy-rtk-usb2.c b/drivers/phy/realtek/phy-rtk-usb2.c
deleted file mode 100644
index 0a6426285c67..000000000000
--- a/drivers/phy/realtek/phy-rtk-usb2.c
+++ /dev/null
@@ -1,1325 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * phy-rtk-usb2.c RTK usb2.0 PHY driver
- *
- * Copyright (C) 2023 Realtek Semiconductor Corporation
- *
- */
-
-#include <linux/module.h>
-#include <linux/of.h>
-#include <linux/of_address.h>
-#include <linux/platform_device.h>
-#include <linux/uaccess.h>
-#include <linux/debugfs.h>
-#include <linux/nvmem-consumer.h>
-#include <linux/regmap.h>
-#include <linux/sys_soc.h>
-#include <linux/mfd/syscon.h>
-#include <linux/phy/phy.h>
-#include <linux/usb.h>
-#include <linux/usb/phy.h>
-#include <linux/usb/hcd.h>
-
-/* GUSB2PHYACCn register */
-#define PHY_NEW_REG_REQ BIT(25)
-#define PHY_VSTS_BUSY BIT(23)
-#define PHY_VCTRL_SHIFT 8
-#define PHY_REG_DATA_MASK 0xff
-
-#define GET_LOW_NIBBLE(addr) ((addr) & 0x0f)
-#define GET_HIGH_NIBBLE(addr) (((addr) & 0xf0) >> 4)
-
-#define EFUS_USB_DC_CAL_RATE 2
-#define EFUS_USB_DC_CAL_MAX 7
-
-#define EFUS_USB_DC_DIS_RATE 1
-#define EFUS_USB_DC_DIS_MAX 7
-
-#define MAX_PHY_DATA_SIZE 20
-#define OFFEST_PHY_READ 0x20
-
-#define MAX_USB_PHY_NUM 4
-#define MAX_USB_PHY_PAGE0_DATA_SIZE 16
-#define MAX_USB_PHY_PAGE1_DATA_SIZE 16
-#define MAX_USB_PHY_PAGE2_DATA_SIZE 8
-
-#define SET_PAGE_OFFSET 0xf4
-#define SET_PAGE_0 0x9b
-#define SET_PAGE_1 0xbb
-#define SET_PAGE_2 0xdb
-
-#define PAGE_START 0xe0
-#define PAGE0_0XE4 0xe4
-#define PAGE0_0XE6 0xe6
-#define PAGE0_0XE7 0xe7
-#define PAGE1_0XE0 0xe0
-#define PAGE1_0XE2 0xe2
-
-#define SENSITIVITY_CTRL (BIT(4) | BIT(5) | BIT(6))
-#define ENABLE_AUTO_SENSITIVITY_CALIBRATION BIT(2)
-#define DEFAULT_DC_DRIVING_VALUE (0x8)
-#define DEFAULT_DC_DISCONNECTION_VALUE (0x6)
-#define HS_CLK_SELECT BIT(6)
-
-struct phy_reg {
- void __iomem *reg_wrap_vstatus;
- void __iomem *reg_gusb2phyacc0;
- int vstatus_index;
-};
-
-struct phy_data {
- u8 addr;
- u8 data;
-};
-
-struct phy_cfg {
- int page0_size;
- struct phy_data page0[MAX_USB_PHY_PAGE0_DATA_SIZE];
- int page1_size;
- struct phy_data page1[MAX_USB_PHY_PAGE1_DATA_SIZE];
- int page2_size;
- struct phy_data page2[MAX_USB_PHY_PAGE2_DATA_SIZE];
-
- int num_phy;
-
- bool check_efuse;
- int check_efuse_version;
-#define CHECK_EFUSE_V1 1
-#define CHECK_EFUSE_V2 2
- int efuse_dc_driving_rate;
- int efuse_dc_disconnect_rate;
- int dc_driving_mask;
- int dc_disconnect_mask;
- bool usb_dc_disconnect_at_page0;
- int driving_updated_for_dev_dis;
-
- bool do_toggle;
- bool do_toggle_driving;
- bool use_default_parameter;
- bool is_double_sensitivity_mode;
-};
-
-struct phy_parameter {
- struct phy_reg phy_reg;
-
- /* Get from efuse */
- s8 efuse_usb_dc_cal;
- s8 efuse_usb_dc_dis;
-
- /* Get from dts */
- bool inverse_hstx_sync_clock;
- u32 driving_level;
- s32 driving_level_compensate;
- s32 disconnection_compensate;
-};
-
-struct rtk_phy {
- struct usb_phy phy;
- struct device *dev;
-
- struct phy_cfg *phy_cfg;
- int num_phy;
- struct phy_parameter *phy_parameter;
-
- struct dentry *debug_dir;
-};
-
-/* mapping 0xE0 to 0 ... 0xE7 to 7, 0xF0 to 8 ,,, 0xF7 to 15 */
-static inline int page_addr_to_array_index(u8 addr)
-{
- return (int)((((addr) - PAGE_START) & 0x7) +
- ((((addr) - PAGE_START) & 0x10) >> 1));
-}
-
-static inline u8 array_index_to_page_addr(int index)
-{
- return ((((index) + PAGE_START) & 0x7) +
- ((((index) & 0x8) << 1) + PAGE_START));
-}
-
-#define PHY_IO_TIMEOUT_USEC (50000)
-#define PHY_IO_DELAY_US (100)
-
-static inline int utmi_wait_register(void __iomem *reg, u32 mask, u32 result)
-{
- int ret;
- unsigned int val;
-
- ret = read_poll_timeout(readl, val, ((val & mask) == result),
- PHY_IO_DELAY_US, PHY_IO_TIMEOUT_USEC, false, reg);
- if (ret) {
- pr_err("%s can't program USB phy\n", __func__);
- return -ETIMEDOUT;
- }
-
- return 0;
-}
-
-static char rtk_phy_read(struct phy_reg *phy_reg, char addr)
-{
- void __iomem *reg_gusb2phyacc0 = phy_reg->reg_gusb2phyacc0;
- unsigned int val;
- int ret = 0;
-
- addr -= OFFEST_PHY_READ;
-
- /* polling until VBusy == 0 */
- ret = utmi_wait_register(reg_gusb2phyacc0, PHY_VSTS_BUSY, 0);
- if (ret)
- return (char)ret;
-
- /* VCtrl = low nibble of addr, and set PHY_NEW_REG_REQ */
- val = PHY_NEW_REG_REQ | (GET_LOW_NIBBLE(addr) << PHY_VCTRL_SHIFT);
- writel(val, reg_gusb2phyacc0);
- ret = utmi_wait_register(reg_gusb2phyacc0, PHY_VSTS_BUSY, 0);
- if (ret)
- return (char)ret;
-
- /* VCtrl = high nibble of addr, and set PHY_NEW_REG_REQ */
- val = PHY_NEW_REG_REQ | (GET_HIGH_NIBBLE(addr) << PHY_VCTRL_SHIFT);
- writel(val, reg_gusb2phyacc0);
- ret = utmi_wait_register(reg_gusb2phyacc0, PHY_VSTS_BUSY, 0);
- if (ret)
- return (char)ret;
-
- val = readl(reg_gusb2phyacc0);
-
- return (char)(val & PHY_REG_DATA_MASK);
-}
-
-static int rtk_phy_write(struct phy_reg *phy_reg, char addr, char data)
-{
- unsigned int val;
- void __iomem *reg_wrap_vstatus = phy_reg->reg_wrap_vstatus;
- void __iomem *reg_gusb2phyacc0 = phy_reg->reg_gusb2phyacc0;
- int shift_bits = phy_reg->vstatus_index * 8;
- int ret = 0;
-
- /* write data to VStatusOut2 (data output to phy) */
- writel((u32)data << shift_bits, reg_wrap_vstatus);
-
- ret = utmi_wait_register(reg_gusb2phyacc0, PHY_VSTS_BUSY, 0);
- if (ret)
- return ret;
-
- /* VCtrl = low nibble of addr, set PHY_NEW_REG_REQ */
- val = PHY_NEW_REG_REQ | (GET_LOW_NIBBLE(addr) << PHY_VCTRL_SHIFT);
-
- writel(val, reg_gusb2phyacc0);
- ret = utmi_wait_register(reg_gusb2phyacc0, PHY_VSTS_BUSY, 0);
- if (ret)
- return ret;
-
- /* VCtrl = high nibble of addr, set PHY_NEW_REG_REQ */
- val = PHY_NEW_REG_REQ | (GET_HIGH_NIBBLE(addr) << PHY_VCTRL_SHIFT);
-
- writel(val, reg_gusb2phyacc0);
- ret = utmi_wait_register(reg_gusb2phyacc0, PHY_VSTS_BUSY, 0);
- if (ret)
- return ret;
-
- return 0;
-}
-
-static int rtk_phy_set_page(struct phy_reg *phy_reg, int page)
-{
- switch (page) {
- case 0:
- return rtk_phy_write(phy_reg, SET_PAGE_OFFSET, SET_PAGE_0);
- case 1:
- return rtk_phy_write(phy_reg, SET_PAGE_OFFSET, SET_PAGE_1);
- case 2:
- return rtk_phy_write(phy_reg, SET_PAGE_OFFSET, SET_PAGE_2);
- default:
- pr_err("%s error page=%d\n", __func__, page);
- }
-
- return -EINVAL;
-}
-
-static u8 __updated_dc_disconnect_level_page0_0xe4(struct phy_cfg *phy_cfg,
- struct phy_parameter *phy_parameter, u8 data)
-{
- u8 ret;
- s32 val;
- s32 dc_disconnect_mask = phy_cfg->dc_disconnect_mask;
- int offset = 4;
-
- val = (s32)((data >> offset) & dc_disconnect_mask)
- + phy_parameter->efuse_usb_dc_dis
- + phy_parameter->disconnection_compensate;
-
- if (val > dc_disconnect_mask)
- val = dc_disconnect_mask;
- else if (val < 0)
- val = 0;
-
- ret = (data & (~(dc_disconnect_mask << offset))) |
- (val & dc_disconnect_mask) << offset;
-
- return ret;
-}
-
-/* updated disconnect level at page0 */
-static void update_dc_disconnect_level_at_page0(struct rtk_phy *rtk_phy,
- struct phy_parameter *phy_parameter, bool update)
-{
- struct phy_cfg *phy_cfg;
- struct phy_reg *phy_reg;
- struct phy_data *phy_data_page;
- struct phy_data *phy_data;
- u8 addr, data;
- int offset = 4;
- s32 dc_disconnect_mask;
- int i;
-
- phy_cfg = rtk_phy->phy_cfg;
- phy_reg = &phy_parameter->phy_reg;
-
- /* Set page 0 */
- phy_data_page = phy_cfg->page0;
- rtk_phy_set_page(phy_reg, 0);
-
- i = page_addr_to_array_index(PAGE0_0XE4);
- phy_data = phy_data_page + i;
- if (!phy_data->addr) {
- phy_data->addr = PAGE0_0XE4;
- phy_data->data = rtk_phy_read(phy_reg, PAGE0_0XE4);
- }
-
- addr = phy_data->addr;
- data = phy_data->data;
- dc_disconnect_mask = phy_cfg->dc_disconnect_mask;
-
- if (update)
- data = __updated_dc_disconnect_level_page0_0xe4(phy_cfg, phy_parameter, data);
- else
- data = (data & ~(dc_disconnect_mask << offset)) |
- (DEFAULT_DC_DISCONNECTION_VALUE << offset);
-
- if (rtk_phy_write(phy_reg, addr, data))
- dev_err(rtk_phy->dev,
- "%s: Error to set page1 parameter addr=0x%x value=0x%x\n",
- __func__, addr, data);
-}
-
-static u8 __updated_dc_disconnect_level_page1_0xe2(struct phy_cfg *phy_cfg,
- struct phy_parameter *phy_parameter, u8 data)
-{
- u8 ret;
- s32 val;
- s32 dc_disconnect_mask = phy_cfg->dc_disconnect_mask;
-
- if (phy_cfg->check_efuse_version == CHECK_EFUSE_V1) {
- val = (s32)(data & dc_disconnect_mask)
- + phy_parameter->efuse_usb_dc_dis
- + phy_parameter->disconnection_compensate;
- } else { /* for CHECK_EFUSE_V2 or no efuse */
- if (phy_parameter->efuse_usb_dc_dis)
- val = (s32)(phy_parameter->efuse_usb_dc_dis +
- phy_parameter->disconnection_compensate);
- else
- val = (s32)((data & dc_disconnect_mask) +
- phy_parameter->disconnection_compensate);
- }
-
- if (val > dc_disconnect_mask)
- val = dc_disconnect_mask;
- else if (val < 0)
- val = 0;
-
- ret = (data & (~dc_disconnect_mask)) | (val & dc_disconnect_mask);
-
- return ret;
-}
-
-/* updated disconnect level at page1 */
-static void update_dc_disconnect_level_at_page1(struct rtk_phy *rtk_phy,
- struct phy_parameter *phy_parameter, bool update)
-{
- struct phy_cfg *phy_cfg;
- struct phy_data *phy_data_page;
- struct phy_data *phy_data;
- struct phy_reg *phy_reg;
- u8 addr, data;
- s32 dc_disconnect_mask;
- int i;
-
- phy_cfg = rtk_phy->phy_cfg;
- phy_reg = &phy_parameter->phy_reg;
-
- /* Set page 1 */
- phy_data_page = phy_cfg->page1;
- rtk_phy_set_page(phy_reg, 1);
-
- i = page_addr_to_array_index(PAGE1_0XE2);
- phy_data = phy_data_page + i;
- if (!phy_data->addr) {
- phy_data->addr = PAGE1_0XE2;
- phy_data->data = rtk_phy_read(phy_reg, PAGE1_0XE2);
- }
-
- addr = phy_data->addr;
- data = phy_data->data;
- dc_disconnect_mask = phy_cfg->dc_disconnect_mask;
-
- if (update)
- data = __updated_dc_disconnect_level_page1_0xe2(phy_cfg, phy_parameter, data);
- else
- data = (data & ~dc_disconnect_mask) | DEFAULT_DC_DISCONNECTION_VALUE;
-
- if (rtk_phy_write(phy_reg, addr, data))
- dev_err(rtk_phy->dev,
- "%s: Error to set page1 parameter addr=0x%x value=0x%x\n",
- __func__, addr, data);
-}
-
-static void update_dc_disconnect_level(struct rtk_phy *rtk_phy,
- struct phy_parameter *phy_parameter, bool update)
-{
- struct phy_cfg *phy_cfg = rtk_phy->phy_cfg;
-
- if (phy_cfg->usb_dc_disconnect_at_page0)
- update_dc_disconnect_level_at_page0(rtk_phy, phy_parameter, update);
- else
- update_dc_disconnect_level_at_page1(rtk_phy, phy_parameter, update);
-}
-
-static u8 __update_dc_driving_page0_0xe4(struct phy_cfg *phy_cfg,
- struct phy_parameter *phy_parameter, u8 data)
-{
- s32 driving_level_compensate = phy_parameter->driving_level_compensate;
- s32 dc_driving_mask = phy_cfg->dc_driving_mask;
- s32 val;
- u8 ret;
-
- if (phy_cfg->check_efuse_version == CHECK_EFUSE_V1) {
- val = (s32)(data & dc_driving_mask) + driving_level_compensate
- + phy_parameter->efuse_usb_dc_cal;
- } else { /* for CHECK_EFUSE_V2 or no efuse */
- if (phy_parameter->efuse_usb_dc_cal)
- val = (s32)((phy_parameter->efuse_usb_dc_cal & dc_driving_mask)
- + driving_level_compensate);
- else
- val = (s32)(data & dc_driving_mask);
- }
-
- if (val > dc_driving_mask)
- val = dc_driving_mask;
- else if (val < 0)
- val = 0;
-
- ret = (data & (~dc_driving_mask)) | (val & dc_driving_mask);
-
- return ret;
-}
-
-static void update_dc_driving_level(struct rtk_phy *rtk_phy,
- struct phy_parameter *phy_parameter)
-{
- struct phy_cfg *phy_cfg;
- struct phy_reg *phy_reg;
-
- phy_reg = &phy_parameter->phy_reg;
- phy_cfg = rtk_phy->phy_cfg;
- if (!phy_cfg->page0[4].addr) {
- rtk_phy_set_page(phy_reg, 0);
- phy_cfg->page0[4].addr = PAGE0_0XE4;
- phy_cfg->page0[4].data = rtk_phy_read(phy_reg, PAGE0_0XE4);
- }
-
- if (phy_parameter->driving_level != DEFAULT_DC_DRIVING_VALUE) {
- u32 dc_driving_mask;
- u8 driving_level;
- u8 data;
-
- data = phy_cfg->page0[4].data;
- dc_driving_mask = phy_cfg->dc_driving_mask;
- driving_level = data & dc_driving_mask;
-
- dev_dbg(rtk_phy->dev, "%s driving_level=%d => dts driving_level=%d\n",
- __func__, driving_level, phy_parameter->driving_level);
-
- phy_cfg->page0[4].data = (data & (~dc_driving_mask)) |
- (phy_parameter->driving_level & dc_driving_mask);
- }
-
- phy_cfg->page0[4].data = __update_dc_driving_page0_0xe4(phy_cfg,
- phy_parameter,
- phy_cfg->page0[4].data);
-}
-
-static void update_hs_clk_select(struct rtk_phy *rtk_phy,
- struct phy_parameter *phy_parameter)
-{
- struct phy_cfg *phy_cfg;
- struct phy_reg *phy_reg;
-
- phy_cfg = rtk_phy->phy_cfg;
- phy_reg = &phy_parameter->phy_reg;
-
- if (phy_parameter->inverse_hstx_sync_clock) {
- if (!phy_cfg->page0[6].addr) {
- rtk_phy_set_page(phy_reg, 0);
- phy_cfg->page0[6].addr = PAGE0_0XE6;
- phy_cfg->page0[6].data = rtk_phy_read(phy_reg, PAGE0_0XE6);
- }
-
- phy_cfg->page0[6].data = phy_cfg->page0[6].data | HS_CLK_SELECT;
- }
-}
-
-static void do_rtk_phy_toggle(struct rtk_phy *rtk_phy,
- int index, bool connect)
-{
- struct phy_parameter *phy_parameter;
- struct phy_cfg *phy_cfg;
- struct phy_reg *phy_reg;
- struct phy_data *phy_data_page;
- u8 addr, data;
- int i;
-
- phy_cfg = rtk_phy->phy_cfg;
- phy_parameter = &((struct phy_parameter *)rtk_phy->phy_parameter)[index];
- phy_reg = &phy_parameter->phy_reg;
-
- if (!phy_cfg->do_toggle)
- goto out;
-
- if (phy_cfg->is_double_sensitivity_mode)
- goto do_toggle_driving;
-
- /* Set page 0 */
- rtk_phy_set_page(phy_reg, 0);
-
- addr = PAGE0_0XE7;
- data = rtk_phy_read(phy_reg, addr);
-
- if (connect)
- rtk_phy_write(phy_reg, addr, data & (~SENSITIVITY_CTRL));
- else
- rtk_phy_write(phy_reg, addr, data | (SENSITIVITY_CTRL));
-
-do_toggle_driving:
-
- if (!phy_cfg->do_toggle_driving)
- goto do_toggle;
-
- /* Page 0 addr 0xE4 driving capability */
-
- /* Set page 0 */
- phy_data_page = phy_cfg->page0;
- rtk_phy_set_page(phy_reg, 0);
-
- i = page_addr_to_array_index(PAGE0_0XE4);
- addr = phy_data_page[i].addr;
- data = phy_data_page[i].data;
-
- if (connect) {
- rtk_phy_write(phy_reg, addr, data);
- } else {
- u8 value;
- s32 tmp;
- s32 driving_updated =
- phy_cfg->driving_updated_for_dev_dis;
- s32 dc_driving_mask = phy_cfg->dc_driving_mask;
-
- tmp = (s32)(data & dc_driving_mask) + driving_updated;
-
- if (tmp > dc_driving_mask)
- tmp = dc_driving_mask;
- else if (tmp < 0)
- tmp = 0;
-
- value = (data & (~dc_driving_mask)) | (tmp & dc_driving_mask);
-
- rtk_phy_write(phy_reg, addr, value);
- }
-
-do_toggle:
- /* restore dc disconnect level before toggle */
- update_dc_disconnect_level(rtk_phy, phy_parameter, false);
-
- /* Set page 1 */
- rtk_phy_set_page(phy_reg, 1);
-
- addr = PAGE1_0XE0;
- data = rtk_phy_read(phy_reg, addr);
-
- rtk_phy_write(phy_reg, addr, data &
- (~ENABLE_AUTO_SENSITIVITY_CALIBRATION));
- mdelay(1);
- rtk_phy_write(phy_reg, addr, data |
- (ENABLE_AUTO_SENSITIVITY_CALIBRATION));
-
- /* update dc disconnect level after toggle */
- update_dc_disconnect_level(rtk_phy, phy_parameter, true);
-
-out:
- return;
-}
-
-static int do_rtk_phy_init(struct rtk_phy *rtk_phy, int index)
-{
- struct phy_parameter *phy_parameter;
- struct phy_cfg *phy_cfg;
- struct phy_data *phy_data_page;
- struct phy_reg *phy_reg;
- int i;
-
- phy_cfg = rtk_phy->phy_cfg;
- phy_parameter = &((struct phy_parameter *)rtk_phy->phy_parameter)[index];
- phy_reg = &phy_parameter->phy_reg;
-
- if (phy_cfg->use_default_parameter) {
- dev_dbg(rtk_phy->dev, "%s phy#%d use default parameter\n",
- __func__, index);
- goto do_toggle;
- }
-
- /* Set page 0 */
- phy_data_page = phy_cfg->page0;
- rtk_phy_set_page(phy_reg, 0);
-
- for (i = 0; i < phy_cfg->page0_size; i++) {
- struct phy_data *phy_data = phy_data_page + i;
- u8 addr = phy_data->addr;
- u8 data = phy_data->data;
-
- if (!addr)
- continue;
-
- if (rtk_phy_write(phy_reg, addr, data)) {
- dev_err(rtk_phy->dev,
- "%s: Error to set page0 parameter addr=0x%x value=0x%x\n",
- __func__, addr, data);
- return -EINVAL;
- }
- }
-
- /* Set page 1 */
- phy_data_page = phy_cfg->page1;
- rtk_phy_set_page(phy_reg, 1);
-
- for (i = 0; i < phy_cfg->page1_size; i++) {
- struct phy_data *phy_data = phy_data_page + i;
- u8 addr = phy_data->addr;
- u8 data = phy_data->data;
-
- if (!addr)
- continue;
-
- if (rtk_phy_write(phy_reg, addr, data)) {
- dev_err(rtk_phy->dev,
- "%s: Error to set page1 parameter addr=0x%x value=0x%x\n",
- __func__, addr, data);
- return -EINVAL;
- }
- }
-
- if (phy_cfg->page2_size == 0)
- goto do_toggle;
-
- /* Set page 2 */
- phy_data_page = phy_cfg->page2;
- rtk_phy_set_page(phy_reg, 2);
-
- for (i = 0; i < phy_cfg->page2_size; i++) {
- struct phy_data *phy_data = phy_data_page + i;
- u8 addr = phy_data->addr;
- u8 data = phy_data->data;
-
- if (!addr)
- continue;
-
- if (rtk_phy_write(phy_reg, addr, data)) {
- dev_err(rtk_phy->dev,
- "%s: Error to set page2 parameter addr=0x%x value=0x%x\n",
- __func__, addr, data);
- return -EINVAL;
- }
- }
-
-do_toggle:
- do_rtk_phy_toggle(rtk_phy, index, false);
-
- return 0;
-}
-
-static int rtk_phy_init(struct phy *phy)
-{
- struct rtk_phy *rtk_phy = phy_get_drvdata(phy);
- unsigned long phy_init_time = jiffies;
- int i, ret = 0;
-
- if (!rtk_phy)
- return -EINVAL;
-
- for (i = 0; i < rtk_phy->num_phy; i++)
- ret = do_rtk_phy_init(rtk_phy, i);
-
- dev_dbg(rtk_phy->dev, "Initialized RTK USB 2.0 PHY (take %dms)\n",
- jiffies_to_msecs(jiffies - phy_init_time));
- return ret;
-}
-
-static int rtk_phy_exit(struct phy *phy)
-{
- return 0;
-}
-
-static const struct phy_ops ops = {
- .init = rtk_phy_init,
- .exit = rtk_phy_exit,
- .owner = THIS_MODULE,
-};
-
-static void rtk_phy_toggle(struct usb_phy *usb2_phy, bool connect, int port)
-{
- int index = port;
- struct rtk_phy *rtk_phy = NULL;
-
- rtk_phy = dev_get_drvdata(usb2_phy->dev);
-
- if (index > rtk_phy->num_phy) {
- dev_err(rtk_phy->dev, "%s: The port=%d is not in usb phy (num_phy=%d)\n",
- __func__, index, rtk_phy->num_phy);
- return;
- }
-
- do_rtk_phy_toggle(rtk_phy, index, connect);
-}
-
-static int rtk_phy_notify_port_status(struct usb_phy *x, int port,
- u16 portstatus, u16 portchange)
-{
- bool connect = false;
-
- pr_debug("%s port=%d portstatus=0x%x portchange=0x%x\n",
- __func__, port, (int)portstatus, (int)portchange);
- if (portstatus & USB_PORT_STAT_CONNECTION)
- connect = true;
-
- if (portchange & USB_PORT_STAT_C_CONNECTION)
- rtk_phy_toggle(x, connect, port);
-
- return 0;
-}
-
-#ifdef CONFIG_DEBUG_FS
-static struct dentry *create_phy_debug_root(void)
-{
- struct dentry *phy_debug_root;
-
- phy_debug_root = debugfs_lookup("phy", usb_debug_root);
- if (!phy_debug_root)
- phy_debug_root = debugfs_create_dir("phy", usb_debug_root);
-
- return phy_debug_root;
-}
-
-static int rtk_usb2_parameter_show(struct seq_file *s, void *unused)
-{
- struct rtk_phy *rtk_phy = s->private;
- struct phy_cfg *phy_cfg;
- int i, index;
-
- phy_cfg = rtk_phy->phy_cfg;
-
- seq_puts(s, "Property:\n");
- seq_printf(s, " check_efuse: %s\n",
- phy_cfg->check_efuse ? "Enable" : "Disable");
- seq_printf(s, " check_efuse_version: %d\n",
- phy_cfg->check_efuse_version);
- seq_printf(s, " efuse_dc_driving_rate: %d\n",
- phy_cfg->efuse_dc_driving_rate);
- seq_printf(s, " dc_driving_mask: 0x%x\n",
- phy_cfg->dc_driving_mask);
- seq_printf(s, " efuse_dc_disconnect_rate: %d\n",
- phy_cfg->efuse_dc_disconnect_rate);
- seq_printf(s, " dc_disconnect_mask: 0x%x\n",
- phy_cfg->dc_disconnect_mask);
- seq_printf(s, " usb_dc_disconnect_at_page0: %s\n",
- phy_cfg->usb_dc_disconnect_at_page0 ? "true" : "false");
- seq_printf(s, " do_toggle: %s\n",
- phy_cfg->do_toggle ? "Enable" : "Disable");
- seq_printf(s, " do_toggle_driving: %s\n",
- phy_cfg->do_toggle_driving ? "Enable" : "Disable");
- seq_printf(s, " driving_updated_for_dev_dis: 0x%x\n",
- phy_cfg->driving_updated_for_dev_dis);
- seq_printf(s, " use_default_parameter: %s\n",
- phy_cfg->use_default_parameter ? "Enable" : "Disable");
- seq_printf(s, " is_double_sensitivity_mode: %s\n",
- phy_cfg->is_double_sensitivity_mode ? "Enable" : "Disable");
-
- for (index = 0; index < rtk_phy->num_phy; index++) {
- struct phy_parameter *phy_parameter;
- struct phy_reg *phy_reg;
- struct phy_data *phy_data_page;
-
- phy_parameter = &((struct phy_parameter *)rtk_phy->phy_parameter)[index];
- phy_reg = &phy_parameter->phy_reg;
-
- seq_printf(s, "PHY %d:\n", index);
-
- seq_puts(s, "Page 0:\n");
- /* Set page 0 */
- phy_data_page = phy_cfg->page0;
- rtk_phy_set_page(phy_reg, 0);
-
- for (i = 0; i < phy_cfg->page0_size; i++) {
- struct phy_data *phy_data = phy_data_page + i;
- u8 addr = array_index_to_page_addr(i);
- u8 data = phy_data->data;
- u8 value = rtk_phy_read(phy_reg, addr);
-
- if (phy_data->addr)
- seq_printf(s, " Page 0: addr=0x%x data=0x%02x ==> read value=0x%02x\n",
- addr, data, value);
- else
- seq_printf(s, " Page 0: addr=0x%x data=none ==> read value=0x%02x\n",
- addr, value);
- }
-
- seq_puts(s, "Page 1:\n");
- /* Set page 1 */
- phy_data_page = phy_cfg->page1;
- rtk_phy_set_page(phy_reg, 1);
-
- for (i = 0; i < phy_cfg->page1_size; i++) {
- struct phy_data *phy_data = phy_data_page + i;
- u8 addr = array_index_to_page_addr(i);
- u8 data = phy_data->data;
- u8 value = rtk_phy_read(phy_reg, addr);
-
- if (phy_data->addr)
- seq_printf(s, " Page 1: addr=0x%x data=0x%02x ==> read value=0x%02x\n",
- addr, data, value);
- else
- seq_printf(s, " Page 1: addr=0x%x data=none ==> read value=0x%02x\n",
- addr, value);
- }
-
- if (phy_cfg->page2_size == 0)
- goto out;
-
- seq_puts(s, "Page 2:\n");
- /* Set page 2 */
- phy_data_page = phy_cfg->page2;
- rtk_phy_set_page(phy_reg, 2);
-
- for (i = 0; i < phy_cfg->page2_size; i++) {
- struct phy_data *phy_data = phy_data_page + i;
- u8 addr = array_index_to_page_addr(i);
- u8 data = phy_data->data;
- u8 value = rtk_phy_read(phy_reg, addr);
-
- if (phy_data->addr)
- seq_printf(s, " Page 2: addr=0x%x data=0x%02x ==> read value=0x%02x\n",
- addr, data, value);
- else
- seq_printf(s, " Page 2: addr=0x%x data=none ==> read value=0x%02x\n",
- addr, value);
- }
-
-out:
- seq_puts(s, "PHY Property:\n");
- seq_printf(s, " efuse_usb_dc_cal: %d\n",
- (int)phy_parameter->efuse_usb_dc_cal);
- seq_printf(s, " efuse_usb_dc_dis: %d\n",
- (int)phy_parameter->efuse_usb_dc_dis);
- seq_printf(s, " inverse_hstx_sync_clock: %s\n",
- phy_parameter->inverse_hstx_sync_clock ? "Enable" : "Disable");
- seq_printf(s, " driving_level: %d\n",
- phy_parameter->driving_level);
- seq_printf(s, " driving_level_compensate: %d\n",
- phy_parameter->driving_level_compensate);
- seq_printf(s, " disconnection_compensate: %d\n",
- phy_parameter->disconnection_compensate);
- }
-
- return 0;
-}
-DEFINE_SHOW_ATTRIBUTE(rtk_usb2_parameter);
-
-static inline void create_debug_files(struct rtk_phy *rtk_phy)
-{
- struct dentry *phy_debug_root = NULL;
-
- phy_debug_root = create_phy_debug_root();
- if (!phy_debug_root)
- return;
-
- rtk_phy->debug_dir = debugfs_create_dir(dev_name(rtk_phy->dev),
- phy_debug_root);
-
- debugfs_create_file("parameter", 0444, rtk_phy->debug_dir, rtk_phy,
- &rtk_usb2_parameter_fops);
-
- return;
-}
-
-static inline void remove_debug_files(struct rtk_phy *rtk_phy)
-{
- debugfs_remove_recursive(rtk_phy->debug_dir);
-}
-#else
-static inline void create_debug_files(struct rtk_phy *rtk_phy) { }
-static inline void remove_debug_files(struct rtk_phy *rtk_phy) { }
-#endif /* CONFIG_DEBUG_FS */
-
-static int get_phy_data_by_efuse(struct rtk_phy *rtk_phy,
- struct phy_parameter *phy_parameter, int index)
-{
- struct phy_cfg *phy_cfg = rtk_phy->phy_cfg;
- u8 value = 0;
- struct nvmem_cell *cell;
- struct soc_device_attribute rtk_soc_groot[] = {
- { .family = "Realtek Groot",},
- { /* empty */ } };
-
- if (!phy_cfg->check_efuse)
- goto out;
-
- /* Read efuse for usb dc cal */
- cell = nvmem_cell_get(rtk_phy->dev, "usb-dc-cal");
- if (IS_ERR(cell)) {
- dev_dbg(rtk_phy->dev, "%s no usb-dc-cal: %ld\n",
- __func__, PTR_ERR(cell));
- } else {
- unsigned char *buf;
- size_t buf_size;
-
- buf = nvmem_cell_read(cell, &buf_size);
- if (!IS_ERR(buf)) {
- value = buf[0] & phy_cfg->dc_driving_mask;
- kfree(buf);
- }
- nvmem_cell_put(cell);
- }
-
- if (phy_cfg->check_efuse_version == CHECK_EFUSE_V1) {
- int rate = phy_cfg->efuse_dc_driving_rate;
-
- if (value <= EFUS_USB_DC_CAL_MAX)
- phy_parameter->efuse_usb_dc_cal = (int8_t)(value * rate);
- else
- phy_parameter->efuse_usb_dc_cal = -(int8_t)
- ((EFUS_USB_DC_CAL_MAX & value) * rate);
-
- if (soc_device_match(rtk_soc_groot)) {
- dev_dbg(rtk_phy->dev, "For groot IC we need a workaround to adjust efuse_usb_dc_cal\n");
-
- /* We don't multiple dc_cal_rate=2 for positive dc cal compensate */
- if (value <= EFUS_USB_DC_CAL_MAX)
- phy_parameter->efuse_usb_dc_cal = (int8_t)(value);
-
- /* We set max dc cal compensate is 0x8 if otp is 0x7 */
- if (value == 0x7)
- phy_parameter->efuse_usb_dc_cal = (int8_t)(value + 1);
- }
- } else { /* for CHECK_EFUSE_V2 */
- phy_parameter->efuse_usb_dc_cal = value & phy_cfg->dc_driving_mask;
- }
-
- /* Read efuse for usb dc disconnect level */
- value = 0;
- cell = nvmem_cell_get(rtk_phy->dev, "usb-dc-dis");
- if (IS_ERR(cell)) {
- dev_dbg(rtk_phy->dev, "%s no usb-dc-dis: %ld\n",
- __func__, PTR_ERR(cell));
- } else {
- unsigned char *buf;
- size_t buf_size;
-
- buf = nvmem_cell_read(cell, &buf_size);
- if (!IS_ERR(buf)) {
- value = buf[0] & phy_cfg->dc_disconnect_mask;
- kfree(buf);
- }
- nvmem_cell_put(cell);
- }
-
- if (phy_cfg->check_efuse_version == CHECK_EFUSE_V1) {
- int rate = phy_cfg->efuse_dc_disconnect_rate;
-
- if (value <= EFUS_USB_DC_DIS_MAX)
- phy_parameter->efuse_usb_dc_dis = (int8_t)(value * rate);
- else
- phy_parameter->efuse_usb_dc_dis = -(int8_t)
- ((EFUS_USB_DC_DIS_MAX & value) * rate);
- } else { /* for CHECK_EFUSE_V2 */
- phy_parameter->efuse_usb_dc_dis = value & phy_cfg->dc_disconnect_mask;
- }
-
-out:
- return 0;
-}
-
-static int parse_phy_data(struct rtk_phy *rtk_phy)
-{
- struct device *dev = rtk_phy->dev;
- struct device_node *np = dev->of_node;
- struct phy_parameter *phy_parameter;
- int ret = 0;
- int index;
-
- rtk_phy->phy_parameter = devm_kzalloc(dev, sizeof(struct phy_parameter) *
- rtk_phy->num_phy, GFP_KERNEL);
- if (!rtk_phy->phy_parameter)
- return -ENOMEM;
-
- for (index = 0; index < rtk_phy->num_phy; index++) {
- phy_parameter = &((struct phy_parameter *)rtk_phy->phy_parameter)[index];
-
- phy_parameter->phy_reg.reg_wrap_vstatus = of_iomap(np, 0);
- phy_parameter->phy_reg.reg_gusb2phyacc0 = of_iomap(np, 1) + index;
- phy_parameter->phy_reg.vstatus_index = index;
-
- if (of_property_read_bool(np, "realtek,inverse-hstx-sync-clock"))
- phy_parameter->inverse_hstx_sync_clock = true;
- else
- phy_parameter->inverse_hstx_sync_clock = false;
-
- if (of_property_read_u32_index(np, "realtek,driving-level",
- index, &phy_parameter->driving_level))
- phy_parameter->driving_level = DEFAULT_DC_DRIVING_VALUE;
-
- if (of_property_read_u32_index(np, "realtek,driving-level-compensate",
- index, &phy_parameter->driving_level_compensate))
- phy_parameter->driving_level_compensate = 0;
-
- if (of_property_read_u32_index(np, "realtek,disconnection-compensate",
- index, &phy_parameter->disconnection_compensate))
- phy_parameter->disconnection_compensate = 0;
-
- get_phy_data_by_efuse(rtk_phy, phy_parameter, index);
-
- update_dc_driving_level(rtk_phy, phy_parameter);
-
- update_hs_clk_select(rtk_phy, phy_parameter);
- }
-
- return ret;
-}
-
-static int rtk_usb2phy_probe(struct platform_device *pdev)
-{
- struct rtk_phy *rtk_phy;
- struct device *dev = &pdev->dev;
- struct phy *generic_phy;
- struct phy_provider *phy_provider;
- const struct phy_cfg *phy_cfg;
- int ret = 0;
-
- phy_cfg = of_device_get_match_data(dev);
- if (!phy_cfg) {
- dev_err(dev, "phy config are not assigned!\n");
- return -EINVAL;
- }
-
- rtk_phy = devm_kzalloc(dev, sizeof(*rtk_phy), GFP_KERNEL);
- if (!rtk_phy)
- return -ENOMEM;
-
- rtk_phy->dev = &pdev->dev;
- rtk_phy->phy.dev = rtk_phy->dev;
- rtk_phy->phy.label = "rtk-usb2phy";
- rtk_phy->phy.notify_port_status = rtk_phy_notify_port_status;
-
- rtk_phy->phy_cfg = devm_kzalloc(dev, sizeof(*phy_cfg), GFP_KERNEL);
-
- memcpy(rtk_phy->phy_cfg, phy_cfg, sizeof(*phy_cfg));
-
- rtk_phy->num_phy = phy_cfg->num_phy;
-
- ret = parse_phy_data(rtk_phy);
- if (ret)
- goto err;
-
- platform_set_drvdata(pdev, rtk_phy);
-
- generic_phy = devm_phy_create(rtk_phy->dev, NULL, &ops);
- if (IS_ERR(generic_phy))
- return PTR_ERR(generic_phy);
-
- phy_set_drvdata(generic_phy, rtk_phy);
-
- phy_provider = devm_of_phy_provider_register(rtk_phy->dev,
- of_phy_simple_xlate);
- if (IS_ERR(phy_provider))
- return PTR_ERR(phy_provider);
-
- ret = usb_add_phy_dev(&rtk_phy->phy);
- if (ret)
- goto err;
-
- create_debug_files(rtk_phy);
-
-err:
- return ret;
-}
-
-static void rtk_usb2phy_remove(struct platform_device *pdev)
-{
- struct rtk_phy *rtk_phy = platform_get_drvdata(pdev);
-
- remove_debug_files(rtk_phy);
-
- usb_remove_phy(&rtk_phy->phy);
-}
-
-static const struct phy_cfg rtd1295_phy_cfg = {
- .page0_size = MAX_USB_PHY_PAGE0_DATA_SIZE,
- .page0 = { [0] = {0xe0, 0x90},
- [3] = {0xe3, 0x3a},
- [4] = {0xe4, 0x68},
- [6] = {0xe6, 0x91},
- [13] = {0xf5, 0x81},
- [15] = {0xf7, 0x02}, },
- .page1_size = 8,
- .page1 = { /* default parameter */ },
- .page2_size = 0,
- .page2 = { /* no parameter */ },
- .num_phy = 1,
- .check_efuse = false,
- .check_efuse_version = CHECK_EFUSE_V1,
- .efuse_dc_driving_rate = 1,
- .dc_driving_mask = 0xf,
- .efuse_dc_disconnect_rate = EFUS_USB_DC_DIS_RATE,
- .dc_disconnect_mask = 0xf,
- .usb_dc_disconnect_at_page0 = true,
- .do_toggle = true,
- .do_toggle_driving = false,
- .driving_updated_for_dev_dis = 0xf,
- .use_default_parameter = false,
- .is_double_sensitivity_mode = false,
-};
-
-static const struct phy_cfg rtd1395_phy_cfg = {
- .page0_size = MAX_USB_PHY_PAGE0_DATA_SIZE,
- .page0 = { [4] = {0xe4, 0xac},
- [13] = {0xf5, 0x00},
- [15] = {0xf7, 0x02}, },
- .page1_size = 8,
- .page1 = { /* default parameter */ },
- .page2_size = 0,
- .page2 = { /* no parameter */ },
- .num_phy = 1,
- .check_efuse = false,
- .check_efuse_version = CHECK_EFUSE_V1,
- .efuse_dc_driving_rate = 1,
- .dc_driving_mask = 0xf,
- .efuse_dc_disconnect_rate = EFUS_USB_DC_DIS_RATE,
- .dc_disconnect_mask = 0xf,
- .usb_dc_disconnect_at_page0 = true,
- .do_toggle = true,
- .do_toggle_driving = false,
- .driving_updated_for_dev_dis = 0xf,
- .use_default_parameter = false,
- .is_double_sensitivity_mode = false,
-};
-
-static const struct phy_cfg rtd1395_phy_cfg_2port = {
- .page0_size = MAX_USB_PHY_PAGE0_DATA_SIZE,
- .page0 = { [4] = {0xe4, 0xac},
- [13] = {0xf5, 0x00},
- [15] = {0xf7, 0x02}, },
- .page1_size = 8,
- .page1 = { /* default parameter */ },
- .page2_size = 0,
- .page2 = { /* no parameter */ },
- .num_phy = 2,
- .check_efuse = false,
- .check_efuse_version = CHECK_EFUSE_V1,
- .efuse_dc_driving_rate = 1,
- .dc_driving_mask = 0xf,
- .efuse_dc_disconnect_rate = EFUS_USB_DC_DIS_RATE,
- .dc_disconnect_mask = 0xf,
- .usb_dc_disconnect_at_page0 = true,
- .do_toggle = true,
- .do_toggle_driving = false,
- .driving_updated_for_dev_dis = 0xf,
- .use_default_parameter = false,
- .is_double_sensitivity_mode = false,
-};
-
-static const struct phy_cfg rtd1619_phy_cfg = {
- .page0_size = MAX_USB_PHY_PAGE0_DATA_SIZE,
- .page0 = { [4] = {0xe4, 0x68}, },
- .page1_size = 8,
- .page1 = { /* default parameter */ },
- .page2_size = 0,
- .page2 = { /* no parameter */ },
- .num_phy = 1,
- .check_efuse = true,
- .check_efuse_version = CHECK_EFUSE_V1,
- .efuse_dc_driving_rate = 1,
- .dc_driving_mask = 0xf,
- .efuse_dc_disconnect_rate = EFUS_USB_DC_DIS_RATE,
- .dc_disconnect_mask = 0xf,
- .usb_dc_disconnect_at_page0 = true,
- .do_toggle = true,
- .do_toggle_driving = false,
- .driving_updated_for_dev_dis = 0xf,
- .use_default_parameter = false,
- .is_double_sensitivity_mode = false,
-};
-
-static const struct phy_cfg rtd1319_phy_cfg = {
- .page0_size = MAX_USB_PHY_PAGE0_DATA_SIZE,
- .page0 = { [0] = {0xe0, 0x18},
- [4] = {0xe4, 0x6a},
- [7] = {0xe7, 0x71},
- [13] = {0xf5, 0x15},
- [15] = {0xf7, 0x32}, },
- .page1_size = 8,
- .page1 = { [3] = {0xe3, 0x44}, },
- .page2_size = MAX_USB_PHY_PAGE2_DATA_SIZE,
- .page2 = { [0] = {0xe0, 0x01}, },
- .num_phy = 1,
- .check_efuse = true,
- .check_efuse_version = CHECK_EFUSE_V1,
- .efuse_dc_driving_rate = 1,
- .dc_driving_mask = 0xf,
- .efuse_dc_disconnect_rate = EFUS_USB_DC_DIS_RATE,
- .dc_disconnect_mask = 0xf,
- .usb_dc_disconnect_at_page0 = true,
- .do_toggle = true,
- .do_toggle_driving = true,
- .driving_updated_for_dev_dis = 0xf,
- .use_default_parameter = false,
- .is_double_sensitivity_mode = true,
-};
-
-static const struct phy_cfg rtd1312c_phy_cfg = {
- .page0_size = MAX_USB_PHY_PAGE0_DATA_SIZE,
- .page0 = { [0] = {0xe0, 0x14},
- [4] = {0xe4, 0x67},
- [5] = {0xe5, 0x55}, },
- .page1_size = 8,
- .page1 = { [3] = {0xe3, 0x23},
- [6] = {0xe6, 0x58}, },
- .page2_size = MAX_USB_PHY_PAGE2_DATA_SIZE,
- .page2 = { /* default parameter */ },
- .num_phy = 1,
- .check_efuse = true,
- .check_efuse_version = CHECK_EFUSE_V1,
- .efuse_dc_driving_rate = 1,
- .dc_driving_mask = 0xf,
- .efuse_dc_disconnect_rate = EFUS_USB_DC_DIS_RATE,
- .dc_disconnect_mask = 0xf,
- .usb_dc_disconnect_at_page0 = true,
- .do_toggle = true,
- .do_toggle_driving = true,
- .driving_updated_for_dev_dis = 0xf,
- .use_default_parameter = false,
- .is_double_sensitivity_mode = true,
-};
-
-static const struct phy_cfg rtd1619b_phy_cfg = {
- .page0_size = MAX_USB_PHY_PAGE0_DATA_SIZE,
- .page0 = { [0] = {0xe0, 0xa3},
- [4] = {0xe4, 0x88},
- [5] = {0xe5, 0x4f},
- [6] = {0xe6, 0x02}, },
- .page1_size = 8,
- .page1 = { [3] = {0xe3, 0x64}, },
- .page2_size = MAX_USB_PHY_PAGE2_DATA_SIZE,
- .page2 = { [7] = {0xe7, 0x45}, },
- .num_phy = 1,
- .check_efuse = true,
- .check_efuse_version = CHECK_EFUSE_V1,
- .efuse_dc_driving_rate = EFUS_USB_DC_CAL_RATE,
- .dc_driving_mask = 0x1f,
- .efuse_dc_disconnect_rate = EFUS_USB_DC_DIS_RATE,
- .dc_disconnect_mask = 0xf,
- .usb_dc_disconnect_at_page0 = false,
- .do_toggle = true,
- .do_toggle_driving = true,
- .driving_updated_for_dev_dis = 0x8,
- .use_default_parameter = false,
- .is_double_sensitivity_mode = true,
-};
-
-static const struct phy_cfg rtd1319d_phy_cfg = {
- .page0_size = MAX_USB_PHY_PAGE0_DATA_SIZE,
- .page0 = { [0] = {0xe0, 0xa3},
- [4] = {0xe4, 0x8e},
- [5] = {0xe5, 0x4f},
- [6] = {0xe6, 0x02}, },
- .page1_size = MAX_USB_PHY_PAGE1_DATA_SIZE,
- .page1 = { [14] = {0xf5, 0x1}, },
- .page2_size = MAX_USB_PHY_PAGE2_DATA_SIZE,
- .page2 = { [7] = {0xe7, 0x44}, },
- .check_efuse = true,
- .num_phy = 1,
- .check_efuse_version = CHECK_EFUSE_V1,
- .efuse_dc_driving_rate = EFUS_USB_DC_CAL_RATE,
- .dc_driving_mask = 0x1f,
- .efuse_dc_disconnect_rate = EFUS_USB_DC_DIS_RATE,
- .dc_disconnect_mask = 0xf,
- .usb_dc_disconnect_at_page0 = false,
- .do_toggle = true,
- .do_toggle_driving = false,
- .driving_updated_for_dev_dis = 0x8,
- .use_default_parameter = false,
- .is_double_sensitivity_mode = true,
-};
-
-static const struct phy_cfg rtd1315e_phy_cfg = {
- .page0_size = MAX_USB_PHY_PAGE0_DATA_SIZE,
- .page0 = { [0] = {0xe0, 0xa3},
- [4] = {0xe4, 0x8c},
- [5] = {0xe5, 0x4f},
- [6] = {0xe6, 0x02}, },
- .page1_size = MAX_USB_PHY_PAGE1_DATA_SIZE,
- .page1 = { [3] = {0xe3, 0x7f},
- [14] = {0xf5, 0x01}, },
- .page2_size = MAX_USB_PHY_PAGE2_DATA_SIZE,
- .page2 = { [7] = {0xe7, 0x44}, },
- .num_phy = 1,
- .check_efuse = true,
- .check_efuse_version = CHECK_EFUSE_V2,
- .efuse_dc_driving_rate = EFUS_USB_DC_CAL_RATE,
- .dc_driving_mask = 0x1f,
- .efuse_dc_disconnect_rate = EFUS_USB_DC_DIS_RATE,
- .dc_disconnect_mask = 0xf,
- .usb_dc_disconnect_at_page0 = false,
- .do_toggle = true,
- .do_toggle_driving = false,
- .driving_updated_for_dev_dis = 0x8,
- .use_default_parameter = false,
- .is_double_sensitivity_mode = true,
-};
-
-static const struct of_device_id usbphy_rtk_dt_match[] = {
- { .compatible = "realtek,rtd1295-usb2phy", .data = &rtd1295_phy_cfg },
- { .compatible = "realtek,rtd1312c-usb2phy", .data = &rtd1312c_phy_cfg },
- { .compatible = "realtek,rtd1315e-usb2phy", .data = &rtd1315e_phy_cfg },
- { .compatible = "realtek,rtd1319-usb2phy", .data = &rtd1319_phy_cfg },
- { .compatible = "realtek,rtd1319d-usb2phy", .data = &rtd1319d_phy_cfg },
- { .compatible = "realtek,rtd1395-usb2phy", .data = &rtd1395_phy_cfg },
- { .compatible = "realtek,rtd1395-usb2phy-2port", .data = &rtd1395_phy_cfg_2port },
- { .compatible = "realtek,rtd1619-usb2phy", .data = &rtd1619_phy_cfg },
- { .compatible = "realtek,rtd1619b-usb2phy", .data = &rtd1619b_phy_cfg },
- {},
-};
-MODULE_DEVICE_TABLE(of, usbphy_rtk_dt_match);
-
-static struct platform_driver rtk_usb2phy_driver = {
- .probe = rtk_usb2phy_probe,
- .remove_new = rtk_usb2phy_remove,
- .driver = {
- .name = "rtk-usb2phy",
- .of_match_table = usbphy_rtk_dt_match,
- },
-};
-
-module_platform_driver(rtk_usb2phy_driver);
-
-MODULE_LICENSE("GPL");
-MODULE_ALIAS("platform: rtk-usb2phy");
-MODULE_AUTHOR("Stanley Chang <stanley_chang@realtek.com>");
-MODULE_DESCRIPTION("Realtek usb 2.0 phy driver");
diff --git a/drivers/phy/realtek/phy-rtk-usb3.c b/drivers/phy/realtek/phy-rtk-usb3.c
deleted file mode 100644
index 67446a85e968..000000000000
--- a/drivers/phy/realtek/phy-rtk-usb3.c
+++ /dev/null
@@ -1,761 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * phy-rtk-usb3.c RTK usb3.0 phy driver
- *
- * copyright (c) 2023 realtek semiconductor corporation
- *
- */
-
-#include <linux/module.h>
-#include <linux/of.h>
-#include <linux/of_address.h>
-#include <linux/platform_device.h>
-#include <linux/uaccess.h>
-#include <linux/debugfs.h>
-#include <linux/nvmem-consumer.h>
-#include <linux/regmap.h>
-#include <linux/sys_soc.h>
-#include <linux/mfd/syscon.h>
-#include <linux/phy/phy.h>
-#include <linux/usb.h>
-#include <linux/usb/hcd.h>
-#include <linux/usb/phy.h>
-
-#define USB_MDIO_CTRL_PHY_BUSY BIT(7)
-#define USB_MDIO_CTRL_PHY_WRITE BIT(0)
-#define USB_MDIO_CTRL_PHY_ADDR_SHIFT 8
-#define USB_MDIO_CTRL_PHY_DATA_SHIFT 16
-
-#define MAX_USB_PHY_DATA_SIZE 0x30
-#define PHY_ADDR_0X09 0x09
-#define PHY_ADDR_0X0B 0x0b
-#define PHY_ADDR_0X0D 0x0d
-#define PHY_ADDR_0X10 0x10
-#define PHY_ADDR_0X1F 0x1f
-#define PHY_ADDR_0X20 0x20
-#define PHY_ADDR_0X21 0x21
-#define PHY_ADDR_0X30 0x30
-
-#define REG_0X09_FORCE_CALIBRATION BIT(9)
-#define REG_0X0B_RX_OFFSET_RANGE_MASK 0xc
-#define REG_0X0D_RX_DEBUG_TEST_EN BIT(6)
-#define REG_0X10_DEBUG_MODE_SETTING 0x3c0
-#define REG_0X10_DEBUG_MODE_SETTING_MASK 0x3f8
-#define REG_0X1F_RX_OFFSET_CODE_MASK 0x1e
-
-#define USB_U3_TX_LFPS_SWING_TRIM_SHIFT 4
-#define USB_U3_TX_LFPS_SWING_TRIM_MASK 0xf
-#define AMPLITUDE_CONTROL_COARSE_MASK 0xff
-#define AMPLITUDE_CONTROL_FINE_MASK 0xffff
-#define AMPLITUDE_CONTROL_COARSE_DEFAULT 0xff
-#define AMPLITUDE_CONTROL_FINE_DEFAULT 0xffff
-
-#define PHY_ADDR_MAP_ARRAY_INDEX(addr) (addr)
-#define ARRAY_INDEX_MAP_PHY_ADDR(index) (index)
-
-struct phy_reg {
- void __iomem *reg_mdio_ctl;
-};
-
-struct phy_data {
- u8 addr;
- u16 data;
-};
-
-struct phy_cfg {
- int param_size;
- struct phy_data param[MAX_USB_PHY_DATA_SIZE];
-
- bool check_efuse;
- bool do_toggle;
- bool do_toggle_once;
- bool use_default_parameter;
- bool check_rx_front_end_offset;
-};
-
-struct phy_parameter {
- struct phy_reg phy_reg;
-
- /* Get from efuse */
- u8 efuse_usb_u3_tx_lfps_swing_trim;
-
- /* Get from dts */
- u32 amplitude_control_coarse;
- u32 amplitude_control_fine;
-};
-
-struct rtk_phy {
- struct usb_phy phy;
- struct device *dev;
-
- struct phy_cfg *phy_cfg;
- int num_phy;
- struct phy_parameter *phy_parameter;
-
- struct dentry *debug_dir;
-};
-
-#define PHY_IO_TIMEOUT_USEC (50000)
-#define PHY_IO_DELAY_US (100)
-
-static inline int utmi_wait_register(void __iomem *reg, u32 mask, u32 result)
-{
- int ret;
- unsigned int val;
-
- ret = read_poll_timeout(readl, val, ((val & mask) == result),
- PHY_IO_DELAY_US, PHY_IO_TIMEOUT_USEC, false, reg);
- if (ret) {
- pr_err("%s can't program USB phy\n", __func__);
- return -ETIMEDOUT;
- }
-
- return 0;
-}
-
-static int rtk_phy3_wait_vbusy(struct phy_reg *phy_reg)
-{
- return utmi_wait_register(phy_reg->reg_mdio_ctl, USB_MDIO_CTRL_PHY_BUSY, 0);
-}
-
-static u16 rtk_phy_read(struct phy_reg *phy_reg, char addr)
-{
- unsigned int tmp;
- u32 value;
-
- tmp = (addr << USB_MDIO_CTRL_PHY_ADDR_SHIFT);
-
- writel(tmp, phy_reg->reg_mdio_ctl);
-
- rtk_phy3_wait_vbusy(phy_reg);
-
- value = readl(phy_reg->reg_mdio_ctl);
- value = value >> USB_MDIO_CTRL_PHY_DATA_SHIFT;
-
- return (u16)value;
-}
-
-static int rtk_phy_write(struct phy_reg *phy_reg, char addr, u16 data)
-{
- unsigned int val;
-
- val = USB_MDIO_CTRL_PHY_WRITE |
- (addr << USB_MDIO_CTRL_PHY_ADDR_SHIFT) |
- (data << USB_MDIO_CTRL_PHY_DATA_SHIFT);
-
- writel(val, phy_reg->reg_mdio_ctl);
-
- rtk_phy3_wait_vbusy(phy_reg);
-
- return 0;
-}
-
-static void do_rtk_usb3_phy_toggle(struct rtk_phy *rtk_phy, int index, bool connect)
-{
- struct phy_cfg *phy_cfg = rtk_phy->phy_cfg;
- struct phy_reg *phy_reg;
- struct phy_parameter *phy_parameter;
- struct phy_data *phy_data;
- u8 addr;
- u16 data;
- int i;
-
- phy_parameter = &((struct phy_parameter *)rtk_phy->phy_parameter)[index];
- phy_reg = &phy_parameter->phy_reg;
-
- if (!phy_cfg->do_toggle)
- return;
-
- i = PHY_ADDR_MAP_ARRAY_INDEX(PHY_ADDR_0X09);
- phy_data = phy_cfg->param + i;
- addr = phy_data->addr;
- data = phy_data->data;
-
- if (!addr && !data) {
- addr = PHY_ADDR_0X09;
- data = rtk_phy_read(phy_reg, addr);
- phy_data->addr = addr;
- phy_data->data = data;
- }
-
- rtk_phy_write(phy_reg, addr, data & (~REG_0X09_FORCE_CALIBRATION));
- mdelay(1);
- rtk_phy_write(phy_reg, addr, data | REG_0X09_FORCE_CALIBRATION);
-}
-
-static int do_rtk_phy_init(struct rtk_phy *rtk_phy, int index)
-{
- struct phy_cfg *phy_cfg;
- struct phy_reg *phy_reg;
- struct phy_parameter *phy_parameter;
- int i = 0;
-
- phy_cfg = rtk_phy->phy_cfg;
- phy_parameter = &((struct phy_parameter *)rtk_phy->phy_parameter)[index];
- phy_reg = &phy_parameter->phy_reg;
-
- if (phy_cfg->use_default_parameter)
- goto do_toggle;
-
- for (i = 0; i < phy_cfg->param_size; i++) {
- struct phy_data *phy_data = phy_cfg->param + i;
- u8 addr = phy_data->addr;
- u16 data = phy_data->data;
-
- if (!addr && !data)
- continue;
-
- rtk_phy_write(phy_reg, addr, data);
- }
-
-do_toggle:
- if (phy_cfg->do_toggle_once)
- phy_cfg->do_toggle = true;
-
- do_rtk_usb3_phy_toggle(rtk_phy, index, false);
-
- if (phy_cfg->do_toggle_once) {
- u16 check_value = 0;
- int count = 10;
- u16 value_0x0d, value_0x10;
-
- /* Enable Debug mode by set 0x0D and 0x10 */
- value_0x0d = rtk_phy_read(phy_reg, PHY_ADDR_0X0D);
- value_0x10 = rtk_phy_read(phy_reg, PHY_ADDR_0X10);
-
- rtk_phy_write(phy_reg, PHY_ADDR_0X0D,
- value_0x0d | REG_0X0D_RX_DEBUG_TEST_EN);
- rtk_phy_write(phy_reg, PHY_ADDR_0X10,
- (value_0x10 & ~REG_0X10_DEBUG_MODE_SETTING_MASK) |
- REG_0X10_DEBUG_MODE_SETTING);
-
- check_value = rtk_phy_read(phy_reg, PHY_ADDR_0X30);
-
- while (!(check_value & BIT(15))) {
- check_value = rtk_phy_read(phy_reg, PHY_ADDR_0X30);
- mdelay(1);
- if (count-- < 0)
- break;
- }
-
- if (!(check_value & BIT(15)))
- dev_info(rtk_phy->dev, "toggle fail addr=0x%02x, data=0x%04x\n",
- PHY_ADDR_0X30, check_value);
-
- /* Disable Debug mode by set 0x0D and 0x10 to default*/
- rtk_phy_write(phy_reg, PHY_ADDR_0X0D, value_0x0d);
- rtk_phy_write(phy_reg, PHY_ADDR_0X10, value_0x10);
-
- phy_cfg->do_toggle = false;
- }
-
- if (phy_cfg->check_rx_front_end_offset) {
- u16 rx_offset_code, rx_offset_range;
- u16 code_mask = REG_0X1F_RX_OFFSET_CODE_MASK;
- u16 range_mask = REG_0X0B_RX_OFFSET_RANGE_MASK;
- bool do_update = false;
-
- rx_offset_code = rtk_phy_read(phy_reg, PHY_ADDR_0X1F);
- if (((rx_offset_code & code_mask) == 0x0) ||
- ((rx_offset_code & code_mask) == code_mask))
- do_update = true;
-
- rx_offset_range = rtk_phy_read(phy_reg, PHY_ADDR_0X0B);
- if (((rx_offset_range & range_mask) == range_mask) && do_update) {
- dev_warn(rtk_phy->dev, "Don't update rx_offset_range (rx_offset_code=0x%x, rx_offset_range=0x%x)\n",
- rx_offset_code, rx_offset_range);
- do_update = false;
- }
-
- if (do_update) {
- u16 tmp1, tmp2;
-
- tmp1 = rx_offset_range & (~range_mask);
- tmp2 = rx_offset_range & range_mask;
- tmp2 += (1 << 2);
- rx_offset_range = tmp1 | (tmp2 & range_mask);
- rtk_phy_write(phy_reg, PHY_ADDR_0X0B, rx_offset_range);
- goto do_toggle;
- }
- }
-
- return 0;
-}
-
-static int rtk_phy_init(struct phy *phy)
-{
- struct rtk_phy *rtk_phy = phy_get_drvdata(phy);
- int ret = 0;
- int i;
- unsigned long phy_init_time = jiffies;
-
- for (i = 0; i < rtk_phy->num_phy; i++)
- ret = do_rtk_phy_init(rtk_phy, i);
-
- dev_dbg(rtk_phy->dev, "Initialized RTK USB 3.0 PHY (take %dms)\n",
- jiffies_to_msecs(jiffies - phy_init_time));
-
- return ret;
-}
-
-static int rtk_phy_exit(struct phy *phy)
-{
- return 0;
-}
-
-static const struct phy_ops ops = {
- .init = rtk_phy_init,
- .exit = rtk_phy_exit,
- .owner = THIS_MODULE,
-};
-
-static void rtk_phy_toggle(struct usb_phy *usb3_phy, bool connect, int port)
-{
- int index = port;
- struct rtk_phy *rtk_phy = NULL;
-
- rtk_phy = dev_get_drvdata(usb3_phy->dev);
-
- if (index > rtk_phy->num_phy) {
- dev_err(rtk_phy->dev, "%s: The port=%d is not in usb phy (num_phy=%d)\n",
- __func__, index, rtk_phy->num_phy);
- return;
- }
-
- do_rtk_usb3_phy_toggle(rtk_phy, index, connect);
-}
-
-static int rtk_phy_notify_port_status(struct usb_phy *x, int port,
- u16 portstatus, u16 portchange)
-{
- bool connect = false;
-
- pr_debug("%s port=%d portstatus=0x%x portchange=0x%x\n",
- __func__, port, (int)portstatus, (int)portchange);
- if (portstatus & USB_PORT_STAT_CONNECTION)
- connect = true;
-
- if (portchange & USB_PORT_STAT_C_CONNECTION)
- rtk_phy_toggle(x, connect, port);
-
- return 0;
-}
-
-#ifdef CONFIG_DEBUG_FS
-static struct dentry *create_phy_debug_root(void)
-{
- struct dentry *phy_debug_root;
-
- phy_debug_root = debugfs_lookup("phy", usb_debug_root);
- if (!phy_debug_root)
- phy_debug_root = debugfs_create_dir("phy", usb_debug_root);
-
- return phy_debug_root;
-}
-
-static int rtk_usb3_parameter_show(struct seq_file *s, void *unused)
-{
- struct rtk_phy *rtk_phy = s->private;
- struct phy_cfg *phy_cfg;
- int i, index;
-
- phy_cfg = rtk_phy->phy_cfg;
-
- seq_puts(s, "Property:\n");
- seq_printf(s, " check_efuse: %s\n",
- phy_cfg->check_efuse ? "Enable" : "Disable");
- seq_printf(s, " do_toggle: %s\n",
- phy_cfg->do_toggle ? "Enable" : "Disable");
- seq_printf(s, " do_toggle_once: %s\n",
- phy_cfg->do_toggle_once ? "Enable" : "Disable");
- seq_printf(s, " use_default_parameter: %s\n",
- phy_cfg->use_default_parameter ? "Enable" : "Disable");
-
- for (index = 0; index < rtk_phy->num_phy; index++) {
- struct phy_reg *phy_reg;
- struct phy_parameter *phy_parameter;
-
- phy_parameter = &((struct phy_parameter *)rtk_phy->phy_parameter)[index];
- phy_reg = &phy_parameter->phy_reg;
-
- seq_printf(s, "PHY %d:\n", index);
-
- for (i = 0; i < phy_cfg->param_size; i++) {
- struct phy_data *phy_data = phy_cfg->param + i;
- u8 addr = ARRAY_INDEX_MAP_PHY_ADDR(i);
- u16 data = phy_data->data;
-
- if (!phy_data->addr && !data)
- seq_printf(s, " addr = 0x%02x, data = none ==> read value = 0x%04x\n",
- addr, rtk_phy_read(phy_reg, addr));
- else
- seq_printf(s, " addr = 0x%02x, data = 0x%04x ==> read value = 0x%04x\n",
- addr, data, rtk_phy_read(phy_reg, addr));
- }
-
- seq_puts(s, "PHY Property:\n");
- seq_printf(s, " efuse_usb_u3_tx_lfps_swing_trim: 0x%x\n",
- (int)phy_parameter->efuse_usb_u3_tx_lfps_swing_trim);
- seq_printf(s, " amplitude_control_coarse: 0x%x\n",
- (int)phy_parameter->amplitude_control_coarse);
- seq_printf(s, " amplitude_control_fine: 0x%x\n",
- (int)phy_parameter->amplitude_control_fine);
- }
-
- return 0;
-}
-DEFINE_SHOW_ATTRIBUTE(rtk_usb3_parameter);
-
-static inline void create_debug_files(struct rtk_phy *rtk_phy)
-{
- struct dentry *phy_debug_root = NULL;
-
- phy_debug_root = create_phy_debug_root();
-
- if (!phy_debug_root)
- return;
-
- rtk_phy->debug_dir = debugfs_create_dir(dev_name(rtk_phy->dev), phy_debug_root);
-
- debugfs_create_file("parameter", 0444, rtk_phy->debug_dir, rtk_phy,
- &rtk_usb3_parameter_fops);
-
- return;
-}
-
-static inline void remove_debug_files(struct rtk_phy *rtk_phy)
-{
- debugfs_remove_recursive(rtk_phy->debug_dir);
-}
-#else
-static inline void create_debug_files(struct rtk_phy *rtk_phy) { }
-static inline void remove_debug_files(struct rtk_phy *rtk_phy) { }
-#endif /* CONFIG_DEBUG_FS */
-
-static int get_phy_data_by_efuse(struct rtk_phy *rtk_phy,
- struct phy_parameter *phy_parameter, int index)
-{
- struct phy_cfg *phy_cfg = rtk_phy->phy_cfg;
- u8 value = 0;
- struct nvmem_cell *cell;
-
- if (!phy_cfg->check_efuse)
- goto out;
-
- cell = nvmem_cell_get(rtk_phy->dev, "usb_u3_tx_lfps_swing_trim");
- if (IS_ERR(cell)) {
- dev_dbg(rtk_phy->dev, "%s no usb_u3_tx_lfps_swing_trim: %ld\n",
- __func__, PTR_ERR(cell));
- } else {
- unsigned char *buf;
- size_t buf_size;
-
- buf = nvmem_cell_read(cell, &buf_size);
- if (!IS_ERR(buf)) {
- value = buf[0] & USB_U3_TX_LFPS_SWING_TRIM_MASK;
- kfree(buf);
- }
- nvmem_cell_put(cell);
- }
-
- if (value > 0 && value < 0x8)
- phy_parameter->efuse_usb_u3_tx_lfps_swing_trim = 0x8;
- else
- phy_parameter->efuse_usb_u3_tx_lfps_swing_trim = (u8)value;
-
-out:
- return 0;
-}
-
-static void update_amplitude_control_value(struct rtk_phy *rtk_phy,
- struct phy_parameter *phy_parameter)
-{
- struct phy_cfg *phy_cfg;
- struct phy_reg *phy_reg;
-
- phy_reg = &phy_parameter->phy_reg;
- phy_cfg = rtk_phy->phy_cfg;
-
- if (phy_parameter->amplitude_control_coarse != AMPLITUDE_CONTROL_COARSE_DEFAULT) {
- u16 val_mask = AMPLITUDE_CONTROL_COARSE_MASK;
- u16 data;
-
- if (!phy_cfg->param[PHY_ADDR_0X20].addr && !phy_cfg->param[PHY_ADDR_0X20].data) {
- phy_cfg->param[PHY_ADDR_0X20].addr = PHY_ADDR_0X20;
- data = rtk_phy_read(phy_reg, PHY_ADDR_0X20);
- } else {
- data = phy_cfg->param[PHY_ADDR_0X20].data;
- }
-
- data &= (~val_mask);
- data |= (phy_parameter->amplitude_control_coarse & val_mask);
-
- phy_cfg->param[PHY_ADDR_0X20].data = data;
- }
-
- if (phy_parameter->efuse_usb_u3_tx_lfps_swing_trim) {
- u8 efuse_val = phy_parameter->efuse_usb_u3_tx_lfps_swing_trim;
- u16 val_mask = USB_U3_TX_LFPS_SWING_TRIM_MASK;
- int val_shift = USB_U3_TX_LFPS_SWING_TRIM_SHIFT;
- u16 data;
-
- if (!phy_cfg->param[PHY_ADDR_0X20].addr && !phy_cfg->param[PHY_ADDR_0X20].data) {
- phy_cfg->param[PHY_ADDR_0X20].addr = PHY_ADDR_0X20;
- data = rtk_phy_read(phy_reg, PHY_ADDR_0X20);
- } else {
- data = phy_cfg->param[PHY_ADDR_0X20].data;
- }
-
- data &= ~(val_mask << val_shift);
- data |= ((efuse_val & val_mask) << val_shift);
-
- phy_cfg->param[PHY_ADDR_0X20].data = data;
- }
-
- if (phy_parameter->amplitude_control_fine != AMPLITUDE_CONTROL_FINE_DEFAULT) {
- u16 val_mask = AMPLITUDE_CONTROL_FINE_MASK;
-
- if (!phy_cfg->param[PHY_ADDR_0X21].addr && !phy_cfg->param[PHY_ADDR_0X21].data)
- phy_cfg->param[PHY_ADDR_0X21].addr = PHY_ADDR_0X21;
-
- phy_cfg->param[PHY_ADDR_0X21].data =
- phy_parameter->amplitude_control_fine & val_mask;
- }
-}
-
-static int parse_phy_data(struct rtk_phy *rtk_phy)
-{
- struct device *dev = rtk_phy->dev;
- struct phy_parameter *phy_parameter;
- int ret = 0;
- int index;
-
- rtk_phy->phy_parameter = devm_kzalloc(dev, sizeof(struct phy_parameter) *
- rtk_phy->num_phy, GFP_KERNEL);
- if (!rtk_phy->phy_parameter)
- return -ENOMEM;
-
- for (index = 0; index < rtk_phy->num_phy; index++) {
- phy_parameter = &((struct phy_parameter *)rtk_phy->phy_parameter)[index];
-
- phy_parameter->phy_reg.reg_mdio_ctl = of_iomap(dev->of_node, 0) + index;
-
- /* Amplitude control address 0x20 bit 0 to bit 7 */
- if (of_property_read_u32(dev->of_node, "realtek,amplitude-control-coarse-tuning",
- &phy_parameter->amplitude_control_coarse))
- phy_parameter->amplitude_control_coarse = AMPLITUDE_CONTROL_COARSE_DEFAULT;
-
- /* Amplitude control address 0x21 bit 0 to bit 16 */
- if (of_property_read_u32(dev->of_node, "realtek,amplitude-control-fine-tuning",
- &phy_parameter->amplitude_control_fine))
- phy_parameter->amplitude_control_fine = AMPLITUDE_CONTROL_FINE_DEFAULT;
-
- get_phy_data_by_efuse(rtk_phy, phy_parameter, index);
-
- update_amplitude_control_value(rtk_phy, phy_parameter);
- }
-
- return ret;
-}
-
-static int rtk_usb3phy_probe(struct platform_device *pdev)
-{
- struct rtk_phy *rtk_phy;
- struct device *dev = &pdev->dev;
- struct phy *generic_phy;
- struct phy_provider *phy_provider;
- const struct phy_cfg *phy_cfg;
- int ret;
-
- phy_cfg = of_device_get_match_data(dev);
- if (!phy_cfg) {
- dev_err(dev, "phy config are not assigned!\n");
- return -EINVAL;
- }
-
- rtk_phy = devm_kzalloc(dev, sizeof(*rtk_phy), GFP_KERNEL);
- if (!rtk_phy)
- return -ENOMEM;
-
- rtk_phy->dev = &pdev->dev;
- rtk_phy->phy.dev = rtk_phy->dev;
- rtk_phy->phy.label = "rtk-usb3phy";
- rtk_phy->phy.notify_port_status = rtk_phy_notify_port_status;
-
- rtk_phy->phy_cfg = devm_kzalloc(dev, sizeof(*phy_cfg), GFP_KERNEL);
-
- memcpy(rtk_phy->phy_cfg, phy_cfg, sizeof(*phy_cfg));
-
- rtk_phy->num_phy = 1;
-
- ret = parse_phy_data(rtk_phy);
- if (ret)
- goto err;
-
- platform_set_drvdata(pdev, rtk_phy);
-
- generic_phy = devm_phy_create(rtk_phy->dev, NULL, &ops);
- if (IS_ERR(generic_phy))
- return PTR_ERR(generic_phy);
-
- phy_set_drvdata(generic_phy, rtk_phy);
-
- phy_provider = devm_of_phy_provider_register(rtk_phy->dev, of_phy_simple_xlate);
- if (IS_ERR(phy_provider))
- return PTR_ERR(phy_provider);
-
- ret = usb_add_phy_dev(&rtk_phy->phy);
- if (ret)
- goto err;
-
- create_debug_files(rtk_phy);
-
-err:
- return ret;
-}
-
-static void rtk_usb3phy_remove(struct platform_device *pdev)
-{
- struct rtk_phy *rtk_phy = platform_get_drvdata(pdev);
-
- remove_debug_files(rtk_phy);
-
- usb_remove_phy(&rtk_phy->phy);
-}
-
-static const struct phy_cfg rtd1295_phy_cfg = {
- .param_size = MAX_USB_PHY_DATA_SIZE,
- .param = { [0] = {0x01, 0x4008}, [1] = {0x01, 0xe046},
- [2] = {0x02, 0x6046}, [3] = {0x03, 0x2779},
- [4] = {0x04, 0x72f5}, [5] = {0x05, 0x2ad3},
- [6] = {0x06, 0x000e}, [7] = {0x07, 0x2e00},
- [8] = {0x08, 0x3591}, [9] = {0x09, 0x525c},
- [10] = {0x0a, 0xa600}, [11] = {0x0b, 0xa904},
- [12] = {0x0c, 0xc000}, [13] = {0x0d, 0xef1c},
- [14] = {0x0e, 0x2000}, [15] = {0x0f, 0x0000},
- [16] = {0x10, 0x000c}, [17] = {0x11, 0x4c00},
- [18] = {0x12, 0xfc00}, [19] = {0x13, 0x0c81},
- [20] = {0x14, 0xde01}, [21] = {0x15, 0x0000},
- [22] = {0x16, 0x0000}, [23] = {0x17, 0x0000},
- [24] = {0x18, 0x0000}, [25] = {0x19, 0x4004},
- [26] = {0x1a, 0x1260}, [27] = {0x1b, 0xff00},
- [28] = {0x1c, 0xcb00}, [29] = {0x1d, 0xa03f},
- [30] = {0x1e, 0xc2e0}, [31] = {0x1f, 0x2807},
- [32] = {0x20, 0x947a}, [33] = {0x21, 0x88aa},
- [34] = {0x22, 0x0057}, [35] = {0x23, 0xab66},
- [36] = {0x24, 0x0800}, [37] = {0x25, 0x0000},
- [38] = {0x26, 0x040a}, [39] = {0x27, 0x01d6},
- [40] = {0x28, 0xf8c2}, [41] = {0x29, 0x3080},
- [42] = {0x2a, 0x3082}, [43] = {0x2b, 0x2078},
- [44] = {0x2c, 0xffff}, [45] = {0x2d, 0xffff},
- [46] = {0x2e, 0x0000}, [47] = {0x2f, 0x0040}, },
- .check_efuse = false,
- .do_toggle = true,
- .do_toggle_once = false,
- .use_default_parameter = false,
- .check_rx_front_end_offset = false,
-};
-
-static const struct phy_cfg rtd1619_phy_cfg = {
- .param_size = MAX_USB_PHY_DATA_SIZE,
- .param = { [8] = {0x08, 0x3591},
- [38] = {0x26, 0x840b},
- [40] = {0x28, 0xf842}, },
- .check_efuse = false,
- .do_toggle = true,
- .do_toggle_once = false,
- .use_default_parameter = false,
- .check_rx_front_end_offset = false,
-};
-
-static const struct phy_cfg rtd1319_phy_cfg = {
- .param_size = MAX_USB_PHY_DATA_SIZE,
- .param = { [1] = {0x01, 0xac86},
- [6] = {0x06, 0x0003},
- [9] = {0x09, 0x924c},
- [10] = {0x0a, 0xa608},
- [11] = {0x0b, 0xb905},
- [14] = {0x0e, 0x2010},
- [32] = {0x20, 0x705a},
- [33] = {0x21, 0xf645},
- [34] = {0x22, 0x0013},
- [35] = {0x23, 0xcb66},
- [41] = {0x29, 0xff00}, },
- .check_efuse = true,
- .do_toggle = true,
- .do_toggle_once = false,
- .use_default_parameter = false,
- .check_rx_front_end_offset = false,
-};
-
-static const struct phy_cfg rtd1619b_phy_cfg = {
- .param_size = MAX_USB_PHY_DATA_SIZE,
- .param = { [1] = {0x01, 0xac8c},
- [6] = {0x06, 0x0017},
- [9] = {0x09, 0x724c},
- [10] = {0x0a, 0xb610},
- [11] = {0x0b, 0xb90d},
- [13] = {0x0d, 0xef2a},
- [15] = {0x0f, 0x9050},
- [16] = {0x10, 0x000c},
- [32] = {0x20, 0x70ff},
- [34] = {0x22, 0x0013},
- [35] = {0x23, 0xdb66},
- [38] = {0x26, 0x8609},
- [41] = {0x29, 0xff13},
- [42] = {0x2a, 0x3070}, },
- .check_efuse = true,
- .do_toggle = false,
- .do_toggle_once = true,
- .use_default_parameter = false,
- .check_rx_front_end_offset = false,
-};
-
-static const struct phy_cfg rtd1319d_phy_cfg = {
- .param_size = MAX_USB_PHY_DATA_SIZE,
- .param = { [1] = {0x01, 0xac89},
- [4] = {0x04, 0xf2f5},
- [6] = {0x06, 0x0017},
- [9] = {0x09, 0x424c},
- [10] = {0x0a, 0x9610},
- [11] = {0x0b, 0x9901},
- [12] = {0x0c, 0xf000},
- [13] = {0x0d, 0xef2a},
- [14] = {0x0e, 0x1000},
- [15] = {0x0f, 0x9050},
- [32] = {0x20, 0x7077},
- [35] = {0x23, 0x0b62},
- [37] = {0x25, 0x10ec},
- [42] = {0x2a, 0x3070}, },
- .check_efuse = true,
- .do_toggle = false,
- .do_toggle_once = true,
- .use_default_parameter = false,
- .check_rx_front_end_offset = true,
-};
-
-static const struct of_device_id usbphy_rtk_dt_match[] = {
- { .compatible = "realtek,rtd1295-usb3phy", .data = &rtd1295_phy_cfg },
- { .compatible = "realtek,rtd1319-usb3phy", .data = &rtd1319_phy_cfg },
- { .compatible = "realtek,rtd1319d-usb3phy", .data = &rtd1319d_phy_cfg },
- { .compatible = "realtek,rtd1619-usb3phy", .data = &rtd1619_phy_cfg },
- { .compatible = "realtek,rtd1619b-usb3phy", .data = &rtd1619b_phy_cfg },
- {},
-};
-MODULE_DEVICE_TABLE(of, usbphy_rtk_dt_match);
-
-static struct platform_driver rtk_usb3phy_driver = {
- .probe = rtk_usb3phy_probe,
- .remove_new = rtk_usb3phy_remove,
- .driver = {
- .name = "rtk-usb3phy",
- .of_match_table = usbphy_rtk_dt_match,
- },
-};
-
-module_platform_driver(rtk_usb3phy_driver);
-
-MODULE_LICENSE("GPL");
-MODULE_ALIAS("platform: rtk-usb3phy");
-MODULE_AUTHOR("Stanley Chang <stanley_chang@realtek.com>");
-MODULE_DESCRIPTION("Realtek usb 3.0 phy driver");
diff --git a/drivers/pinctrl/cirrus/Kconfig b/drivers/pinctrl/cirrus/Kconfig
index d6318cb57aff..e7e827a8877a 100644
--- a/drivers/pinctrl/cirrus/Kconfig
+++ b/drivers/pinctrl/cirrus/Kconfig
@@ -12,7 +12,8 @@ config PINCTRL_CS42L43
config PINCTRL_LOCHNAGAR
tristate "Cirrus Logic Lochnagar pinctrl driver"
- depends on MFD_LOCHNAGAR
+ # Avoid clash caused by MIPS defining RST, which is used in the driver
+ depends on MFD_LOCHNAGAR && !MIPS
select GPIOLIB
select PINMUX
select PINCONF
diff --git a/drivers/pinctrl/core.c b/drivers/pinctrl/core.c
index 1fa89be29b8f..f2977eb65522 100644
--- a/drivers/pinctrl/core.c
+++ b/drivers/pinctrl/core.c
@@ -1262,17 +1262,17 @@ static void pinctrl_link_add(struct pinctrl_dev *pctldev,
static int pinctrl_commit_state(struct pinctrl *p, struct pinctrl_state *state)
{
struct pinctrl_setting *setting, *setting2;
- struct pinctrl_state *old_state = p->state;
+ struct pinctrl_state *old_state = READ_ONCE(p->state);
int ret;
- if (p->state) {
+ if (old_state) {
/*
* For each pinmux setting in the old state, forget SW's record
* of mux owner for that pingroup. Any pingroups which are
* still owned by the new state will be re-acquired by the call
* to pinmux_enable_setting() in the loop below.
*/
- list_for_each_entry(setting, &p->state->settings, node) {
+ list_for_each_entry(setting, &old_state->settings, node) {
if (setting->type != PIN_MAP_TYPE_MUX_GROUP)
continue;
pinmux_disable_setting(setting);
diff --git a/drivers/pinctrl/nxp/pinctrl-s32cc.c b/drivers/pinctrl/nxp/pinctrl-s32cc.c
index 7daff9f186cd..f0cad2c501f7 100644
--- a/drivers/pinctrl/nxp/pinctrl-s32cc.c
+++ b/drivers/pinctrl/nxp/pinctrl-s32cc.c
@@ -843,8 +843,8 @@ static int s32_pinctrl_probe_dt(struct platform_device *pdev,
if (!np)
return -ENODEV;
- if (mem_regions == 0) {
- dev_err(&pdev->dev, "mem_regions is 0\n");
+ if (mem_regions == 0 || mem_regions >= 10000) {
+ dev_err(&pdev->dev, "mem_regions is invalid: %u\n", mem_regions);
return -EINVAL;
}
diff --git a/drivers/pinctrl/pinctrl-cy8c95x0.c b/drivers/pinctrl/pinctrl-cy8c95x0.c
index 04285c930e94..4ccfa99ed93a 100644
--- a/drivers/pinctrl/pinctrl-cy8c95x0.c
+++ b/drivers/pinctrl/pinctrl-cy8c95x0.c
@@ -143,6 +143,7 @@ static const struct dmi_system_id cy8c95x0_dmi_acpi_irq_info[] = {
* @pinctrl_desc: pin controller description
* @name: Chip controller name
* @tpin: Total number of pins
+ * @gpio_reset: GPIO line handler that can reset the IC
*/
struct cy8c95x0_pinctrl {
struct regmap *regmap;
diff --git a/drivers/pinctrl/realtek/pinctrl-rtd.c b/drivers/pinctrl/realtek/pinctrl-rtd.c
index 9c7a1af4ba69..208896593b61 100644
--- a/drivers/pinctrl/realtek/pinctrl-rtd.c
+++ b/drivers/pinctrl/realtek/pinctrl-rtd.c
@@ -146,7 +146,7 @@ static int rtd_pinctrl_get_function_groups(struct pinctrl_dev *pcdev,
static const struct rtd_pin_desc *rtd_pinctrl_find_mux(struct rtd_pinctrl *data, unsigned int pin)
{
- if (!data->info->muxes[pin].name)
+ if (data->info->muxes[pin].name)
return &data->info->muxes[pin];
return NULL;
@@ -249,7 +249,7 @@ static const struct pinctrl_pin_desc
static const struct rtd_pin_config_desc
*rtd_pinctrl_find_config(struct rtd_pinctrl *data, unsigned int pin)
{
- if (!data->info->configs[pin].name)
+ if (data->info->configs[pin].name)
return &data->info->configs[pin];
return NULL;
diff --git a/drivers/pinctrl/stm32/pinctrl-stm32.c b/drivers/pinctrl/stm32/pinctrl-stm32.c
index 64e8201c7eac..603f900e88c1 100644
--- a/drivers/pinctrl/stm32/pinctrl-stm32.c
+++ b/drivers/pinctrl/stm32/pinctrl-stm32.c
@@ -1273,9 +1273,11 @@ static struct stm32_desc_pin *stm32_pctrl_get_desc_pin_from_gpio(struct stm32_pi
int i;
/* With few exceptions (e.g. bank 'Z'), pin number matches with pin index in array */
- pin_desc = pctl->pins + stm32_pin_nb;
- if (pin_desc->pin.number == stm32_pin_nb)
- return pin_desc;
+ if (stm32_pin_nb < pctl->npins) {
+ pin_desc = pctl->pins + stm32_pin_nb;
+ if (pin_desc->pin.number == stm32_pin_nb)
+ return pin_desc;
+ }
/* Otherwise, loop all array to find the pin with the right number */
for (i = 0; i < pctl->npins; i++) {
@@ -1368,6 +1370,11 @@ static int stm32_gpiolib_register_bank(struct stm32_pinctrl *pctl, struct fwnode
}
names = devm_kcalloc(dev, npins, sizeof(char *), GFP_KERNEL);
+ if (!names) {
+ err = -ENOMEM;
+ goto err_clk;
+ }
+
for (i = 0; i < npins; i++) {
stm32_pin = stm32_pctrl_get_desc_pin_from_gpio(pctl, bank, i);
if (stm32_pin && stm32_pin->pin.name)
diff --git a/drivers/pmdomain/arm/scmi_perf_domain.c b/drivers/pmdomain/arm/scmi_perf_domain.c
index bc3f78abb6da..709bbc448fad 100644
--- a/drivers/pmdomain/arm/scmi_perf_domain.c
+++ b/drivers/pmdomain/arm/scmi_perf_domain.c
@@ -35,7 +35,7 @@ scmi_pd_set_perf_state(struct generic_pm_domain *genpd, unsigned int state)
if (!state)
return -EINVAL;
- ret = pd->perf_ops->level_set(pd->ph, pd->domain_id, state, true);
+ ret = pd->perf_ops->level_set(pd->ph, pd->domain_id, state, false);
if (ret)
dev_warn(&genpd->dev, "Failed with %d when trying to set %d perf level",
ret, state);
diff --git a/drivers/pmdomain/qcom/rpmpd.c b/drivers/pmdomain/qcom/rpmpd.c
index 07590a3ef19c..7796d65f96e8 100644
--- a/drivers/pmdomain/qcom/rpmpd.c
+++ b/drivers/pmdomain/qcom/rpmpd.c
@@ -1044,6 +1044,7 @@ static int rpmpd_probe(struct platform_device *pdev)
rpmpds[i]->pd.power_off = rpmpd_power_off;
rpmpds[i]->pd.power_on = rpmpd_power_on;
rpmpds[i]->pd.set_performance_state = rpmpd_set_performance;
+ rpmpds[i]->pd.flags = GENPD_FLAG_ACTIVE_WAKEUP;
pm_genpd_init(&rpmpds[i]->pd, NULL, true);
data->domains[i] = &rpmpds[i]->pd;
diff --git a/drivers/powercap/dtpm_cpu.c b/drivers/powercap/dtpm_cpu.c
index 2ff7717530bf..8a2f18fa3faf 100644
--- a/drivers/powercap/dtpm_cpu.c
+++ b/drivers/powercap/dtpm_cpu.c
@@ -24,7 +24,6 @@
#include <linux/of.h>
#include <linux/pm_qos.h>
#include <linux/slab.h>
-#include <linux/units.h>
struct dtpm_cpu {
struct dtpm dtpm;
@@ -104,8 +103,7 @@ static u64 get_pd_power_uw(struct dtpm *dtpm)
if (pd->table[i].frequency < freq)
continue;
- return scale_pd_power_uw(pd_mask, pd->table[i].power *
- MICROWATT_PER_MILLIWATT);
+ return scale_pd_power_uw(pd_mask, pd->table[i].power);
}
return 0;
@@ -122,11 +120,9 @@ static int update_pd_power_uw(struct dtpm *dtpm)
nr_cpus = cpumask_weight(&cpus);
dtpm->power_min = em->table[0].power;
- dtpm->power_min *= MICROWATT_PER_MILLIWATT;
dtpm->power_min *= nr_cpus;
dtpm->power_max = em->table[em->nr_perf_states - 1].power;
- dtpm->power_max *= MICROWATT_PER_MILLIWATT;
dtpm->power_max *= nr_cpus;
return 0;
diff --git a/drivers/powercap/dtpm_devfreq.c b/drivers/powercap/dtpm_devfreq.c
index 91276761a31d..612c3b59dd5b 100644
--- a/drivers/powercap/dtpm_devfreq.c
+++ b/drivers/powercap/dtpm_devfreq.c
@@ -39,10 +39,8 @@ static int update_pd_power_uw(struct dtpm *dtpm)
struct em_perf_domain *pd = em_pd_get(dev);
dtpm->power_min = pd->table[0].power;
- dtpm->power_min *= MICROWATT_PER_MILLIWATT;
dtpm->power_max = pd->table[pd->nr_perf_states - 1].power;
- dtpm->power_max *= MICROWATT_PER_MILLIWATT;
return 0;
}
@@ -54,13 +52,10 @@ static u64 set_pd_power_limit(struct dtpm *dtpm, u64 power_limit)
struct device *dev = devfreq->dev.parent;
struct em_perf_domain *pd = em_pd_get(dev);
unsigned long freq;
- u64 power;
int i;
for (i = 0; i < pd->nr_perf_states; i++) {
-
- power = pd->table[i].power * MICROWATT_PER_MILLIWATT;
- if (power > power_limit)
+ if (pd->table[i].power > power_limit)
break;
}
@@ -68,7 +63,7 @@ static u64 set_pd_power_limit(struct dtpm *dtpm, u64 power_limit)
dev_pm_qos_update_request(&dtpm_devfreq->qos_req, freq);
- power_limit = pd->table[i - 1].power * MICROWATT_PER_MILLIWATT;
+ power_limit = pd->table[i - 1].power;
return power_limit;
}
@@ -110,7 +105,7 @@ static u64 get_pd_power_uw(struct dtpm *dtpm)
if (pd->table[i].frequency < freq)
continue;
- power = pd->table[i].power * MICROWATT_PER_MILLIWATT;
+ power = pd->table[i].power;
power *= status.busy_time;
power >>= 10;
diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c
index fa00dd503cbf..542a4bbb21bc 100644
--- a/drivers/scsi/sd.c
+++ b/drivers/scsi/sd.c
@@ -3949,8 +3949,15 @@ static int sd_resume(struct device *dev, bool runtime)
static int sd_resume_system(struct device *dev)
{
- if (pm_runtime_suspended(dev))
+ if (pm_runtime_suspended(dev)) {
+ struct scsi_disk *sdkp = dev_get_drvdata(dev);
+ struct scsi_device *sdp = sdkp ? sdkp->device : NULL;
+
+ if (sdp && sdp->force_runtime_start_on_system_start)
+ pm_request_resume(dev);
+
return 0;
+ }
return sd_resume(dev, false);
}
diff --git a/drivers/thunderbolt/switch.c b/drivers/thunderbolt/switch.c
index 1e15ffa79295..44e9b09de47a 100644
--- a/drivers/thunderbolt/switch.c
+++ b/drivers/thunderbolt/switch.c
@@ -1143,7 +1143,7 @@ int tb_port_lane_bonding_enable(struct tb_port *port)
* Only set bonding if the link was not already bonded. This
* avoids the lane adapter to re-enter bonding state.
*/
- if (width == TB_LINK_WIDTH_SINGLE) {
+ if (width == TB_LINK_WIDTH_SINGLE && !tb_is_upstream_port(port)) {
ret = tb_port_set_lane_bonding(port, true);
if (ret)
goto err_lane1;
@@ -2880,6 +2880,7 @@ static int tb_switch_lane_bonding_disable(struct tb_switch *sw)
return tb_port_wait_for_link_width(down, TB_LINK_WIDTH_SINGLE, 100);
}
+/* Note updating sw->link_width done in tb_switch_update_link_attributes() */
static int tb_switch_asym_enable(struct tb_switch *sw, enum tb_link_width width)
{
struct tb_port *up, *down, *port;
@@ -2919,10 +2920,10 @@ static int tb_switch_asym_enable(struct tb_switch *sw, enum tb_link_width width)
return ret;
}
- sw->link_width = width;
return 0;
}
+/* Note updating sw->link_width done in tb_switch_update_link_attributes() */
static int tb_switch_asym_disable(struct tb_switch *sw)
{
struct tb_port *up, *down;
@@ -2957,7 +2958,6 @@ static int tb_switch_asym_disable(struct tb_switch *sw)
return ret;
}
- sw->link_width = TB_LINK_WIDTH_DUAL;
return 0;
}
diff --git a/drivers/thunderbolt/tb.c b/drivers/thunderbolt/tb.c
index 5acdeb766860..fd49f86e0353 100644
--- a/drivers/thunderbolt/tb.c
+++ b/drivers/thunderbolt/tb.c
@@ -213,7 +213,17 @@ static void tb_add_dp_resources(struct tb_switch *sw)
if (!tb_switch_query_dp_resource(sw, port))
continue;
- list_add(&port->list, &tcm->dp_resources);
+ /*
+ * If DP IN on device router exist, position it at the
+ * beginning of the DP resources list, so that it is used
+ * before DP IN of the host router. This way external GPU(s)
+ * will be prioritized when pairing DP IN to a DP OUT.
+ */
+ if (tb_route(sw))
+ list_add(&port->list, &tcm->dp_resources);
+ else
+ list_add_tail(&port->list, &tcm->dp_resources);
+
tb_port_dbg(port, "DP IN resource available\n");
}
}
diff --git a/drivers/ufs/core/ufshcd.c b/drivers/ufs/core/ufshcd.c
index 8b1031fb0a44..bce0d2a9a7f3 100644
--- a/drivers/ufs/core/ufshcd.c
+++ b/drivers/ufs/core/ufshcd.c
@@ -6444,11 +6444,24 @@ static bool ufshcd_abort_one(struct request *rq, void *priv)
struct scsi_device *sdev = cmd->device;
struct Scsi_Host *shost = sdev->host;
struct ufs_hba *hba = shost_priv(shost);
+ struct ufshcd_lrb *lrbp = &hba->lrb[tag];
+ struct ufs_hw_queue *hwq;
+ unsigned long flags;
*ret = ufshcd_try_to_abort_task(hba, tag);
dev_err(hba->dev, "Aborting tag %d / CDB %#02x %s\n", tag,
hba->lrb[tag].cmd ? hba->lrb[tag].cmd->cmnd[0] : -1,
*ret ? "failed" : "succeeded");
+
+ /* Release cmd in MCQ mode if abort succeeds */
+ if (is_mcq_enabled(hba) && (*ret == 0)) {
+ hwq = ufshcd_mcq_req_to_hwq(hba, scsi_cmd_to_rq(lrbp->cmd));
+ spin_lock_irqsave(&hwq->cq_lock, flags);
+ if (ufshcd_cmd_inflight(lrbp->cmd))
+ ufshcd_release_scsi_cmd(hba, lrbp);
+ spin_unlock_irqrestore(&hwq->cq_lock, flags);
+ }
+
return *ret == 0;
}
diff --git a/drivers/usb/cdns3/cdnsp-ring.c b/drivers/usb/cdns3/cdnsp-ring.c
index af981778382d..02f297f5637d 100644
--- a/drivers/usb/cdns3/cdnsp-ring.c
+++ b/drivers/usb/cdns3/cdnsp-ring.c
@@ -1529,6 +1529,7 @@ irqreturn_t cdnsp_thread_irq_handler(int irq, void *data)
unsigned long flags;
int counter = 0;
+ local_bh_disable();
spin_lock_irqsave(&pdev->lock, flags);
if (pdev->cdnsp_state & (CDNSP_STATE_HALTED | CDNSP_STATE_DYING)) {
@@ -1541,6 +1542,7 @@ irqreturn_t cdnsp_thread_irq_handler(int irq, void *data)
cdnsp_died(pdev);
spin_unlock_irqrestore(&pdev->lock, flags);
+ local_bh_enable();
return IRQ_HANDLED;
}
@@ -1557,6 +1559,7 @@ irqreturn_t cdnsp_thread_irq_handler(int irq, void *data)
cdnsp_update_erst_dequeue(pdev, event_ring_deq, 1);
spin_unlock_irqrestore(&pdev->lock, flags);
+ local_bh_enable();
return IRQ_HANDLED;
}
diff --git a/drivers/usb/core/config.c b/drivers/usb/core/config.c
index b19e38d5fd10..7f8d33f92ddb 100644
--- a/drivers/usb/core/config.c
+++ b/drivers/usb/core/config.c
@@ -1047,7 +1047,7 @@ int usb_get_bos_descriptor(struct usb_device *dev)
if (cap->bDescriptorType != USB_DT_DEVICE_CAPABILITY) {
dev_notice(ddev, "descriptor type invalid, skip\n");
- continue;
+ goto skip_to_next_descriptor;
}
switch (cap_type) {
@@ -1078,6 +1078,7 @@ int usb_get_bos_descriptor(struct usb_device *dev)
break;
}
+skip_to_next_descriptor:
total_len -= length;
buffer += length;
}
diff --git a/drivers/usb/core/hub.c b/drivers/usb/core/hub.c
index b4584a0cd484..87480a6e6d93 100644
--- a/drivers/usb/core/hub.c
+++ b/drivers/usb/core/hub.c
@@ -622,29 +622,6 @@ static int hub_ext_port_status(struct usb_hub *hub, int port1, int type,
ret = 0;
}
mutex_unlock(&hub->status_mutex);
-
- /*
- * There is no need to lock status_mutex here, because status_mutex
- * protects hub->status, and the phy driver only checks the port
- * status without changing the status.
- */
- if (!ret) {
- struct usb_device *hdev = hub->hdev;
-
- /*
- * Only roothub will be notified of port state changes,
- * since the USB PHY only cares about changes at the next
- * level.
- */
- if (is_root_hub(hdev)) {
- struct usb_hcd *hcd = bus_to_hcd(hdev->bus);
-
- if (hcd->usb_phy)
- usb_phy_notify_port_status(hcd->usb_phy,
- port1 - 1, *status, *change);
- }
- }
-
return ret;
}
diff --git a/drivers/usb/dwc2/hcd_intr.c b/drivers/usb/dwc2/hcd_intr.c
index 0144ca8350c3..5c7538d498dd 100644
--- a/drivers/usb/dwc2/hcd_intr.c
+++ b/drivers/usb/dwc2/hcd_intr.c
@@ -2015,15 +2015,17 @@ static void dwc2_hc_n_intr(struct dwc2_hsotg *hsotg, int chnum)
{
struct dwc2_qtd *qtd;
struct dwc2_host_chan *chan;
- u32 hcint, hcintmsk;
+ u32 hcint, hcintraw, hcintmsk;
chan = hsotg->hc_ptr_array[chnum];
- hcint = dwc2_readl(hsotg, HCINT(chnum));
+ hcintraw = dwc2_readl(hsotg, HCINT(chnum));
hcintmsk = dwc2_readl(hsotg, HCINTMSK(chnum));
+ hcint = hcintraw & hcintmsk;
+ dwc2_writel(hsotg, hcint, HCINT(chnum));
+
if (!chan) {
dev_err(hsotg->dev, "## hc_ptr_array for channel is NULL ##\n");
- dwc2_writel(hsotg, hcint, HCINT(chnum));
return;
}
@@ -2032,11 +2034,9 @@ static void dwc2_hc_n_intr(struct dwc2_hsotg *hsotg, int chnum)
chnum);
dev_vdbg(hsotg->dev,
" hcint 0x%08x, hcintmsk 0x%08x, hcint&hcintmsk 0x%08x\n",
- hcint, hcintmsk, hcint & hcintmsk);
+ hcintraw, hcintmsk, hcint);
}
- dwc2_writel(hsotg, hcint, HCINT(chnum));
-
/*
* If we got an interrupt after someone called
* dwc2_hcd_endpoint_disable() we don't want to crash below
@@ -2046,8 +2046,7 @@ static void dwc2_hc_n_intr(struct dwc2_hsotg *hsotg, int chnum)
return;
}
- chan->hcint = hcint;
- hcint &= hcintmsk;
+ chan->hcint = hcintraw;
/*
* If the channel was halted due to a dequeue, the qtd list might
diff --git a/drivers/usb/dwc3/core.c b/drivers/usb/dwc3/core.c
index 0328c86ef806..b101dbf8c5dc 100644
--- a/drivers/usb/dwc3/core.c
+++ b/drivers/usb/dwc3/core.c
@@ -2034,6 +2034,8 @@ static int dwc3_probe(struct platform_device *pdev)
pm_runtime_put(dev);
+ dma_set_max_seg_size(dev, UINT_MAX);
+
return 0;
err_exit_debugfs:
diff --git a/drivers/usb/dwc3/drd.c b/drivers/usb/dwc3/drd.c
index 039bf241769a..57ddd2e43022 100644
--- a/drivers/usb/dwc3/drd.c
+++ b/drivers/usb/dwc3/drd.c
@@ -505,6 +505,7 @@ static int dwc3_setup_role_switch(struct dwc3 *dwc)
dwc->role_switch_default_mode = USB_DR_MODE_PERIPHERAL;
mode = DWC3_GCTL_PRTCAP_DEVICE;
}
+ dwc3_set_mode(dwc, mode);
dwc3_role_switch.fwnode = dev_fwnode(dwc->dev);
dwc3_role_switch.set = dwc3_usb_role_switch_set;
@@ -526,7 +527,6 @@ static int dwc3_setup_role_switch(struct dwc3 *dwc)
}
}
- dwc3_set_mode(dwc, mode);
return 0;
}
#else
diff --git a/drivers/usb/dwc3/dwc3-qcom.c b/drivers/usb/dwc3/dwc3-qcom.c
index 3de43df6bbe8..fdf6d5d3c2ad 100644
--- a/drivers/usb/dwc3/dwc3-qcom.c
+++ b/drivers/usb/dwc3/dwc3-qcom.c
@@ -546,10 +546,9 @@ static int dwc3_qcom_setup_irq(struct platform_device *pdev)
pdata ? pdata->hs_phy_irq_index : -1);
if (irq > 0) {
/* Keep wakeup interrupts disabled until suspend */
- irq_set_status_flags(irq, IRQ_NOAUTOEN);
ret = devm_request_threaded_irq(qcom->dev, irq, NULL,
qcom_dwc3_resume_irq,
- IRQF_TRIGGER_HIGH | IRQF_ONESHOT,
+ IRQF_ONESHOT | IRQF_NO_AUTOEN,
"qcom_dwc3 HS", qcom);
if (ret) {
dev_err(qcom->dev, "hs_phy_irq failed: %d\n", ret);
@@ -561,10 +560,9 @@ static int dwc3_qcom_setup_irq(struct platform_device *pdev)
irq = dwc3_qcom_get_irq(pdev, "dp_hs_phy_irq",
pdata ? pdata->dp_hs_phy_irq_index : -1);
if (irq > 0) {
- irq_set_status_flags(irq, IRQ_NOAUTOEN);
ret = devm_request_threaded_irq(qcom->dev, irq, NULL,
qcom_dwc3_resume_irq,
- IRQF_TRIGGER_HIGH | IRQF_ONESHOT,
+ IRQF_ONESHOT | IRQF_NO_AUTOEN,
"qcom_dwc3 DP_HS", qcom);
if (ret) {
dev_err(qcom->dev, "dp_hs_phy_irq failed: %d\n", ret);
@@ -576,10 +574,9 @@ static int dwc3_qcom_setup_irq(struct platform_device *pdev)
irq = dwc3_qcom_get_irq(pdev, "dm_hs_phy_irq",
pdata ? pdata->dm_hs_phy_irq_index : -1);
if (irq > 0) {
- irq_set_status_flags(irq, IRQ_NOAUTOEN);
ret = devm_request_threaded_irq(qcom->dev, irq, NULL,
qcom_dwc3_resume_irq,
- IRQF_TRIGGER_HIGH | IRQF_ONESHOT,
+ IRQF_ONESHOT | IRQF_NO_AUTOEN,
"qcom_dwc3 DM_HS", qcom);
if (ret) {
dev_err(qcom->dev, "dm_hs_phy_irq failed: %d\n", ret);
@@ -591,10 +588,9 @@ static int dwc3_qcom_setup_irq(struct platform_device *pdev)
irq = dwc3_qcom_get_irq(pdev, "ss_phy_irq",
pdata ? pdata->ss_phy_irq_index : -1);
if (irq > 0) {
- irq_set_status_flags(irq, IRQ_NOAUTOEN);
ret = devm_request_threaded_irq(qcom->dev, irq, NULL,
qcom_dwc3_resume_irq,
- IRQF_TRIGGER_HIGH | IRQF_ONESHOT,
+ IRQF_ONESHOT | IRQF_NO_AUTOEN,
"qcom_dwc3 SS", qcom);
if (ret) {
dev_err(qcom->dev, "ss_phy_irq failed: %d\n", ret);
@@ -758,6 +754,7 @@ static int dwc3_qcom_of_register_core(struct platform_device *pdev)
if (!qcom->dwc3) {
ret = -ENODEV;
dev_err(dev, "failed to get dwc3 platform device\n");
+ of_platform_depopulate(dev);
}
node_put:
@@ -766,9 +763,9 @@ node_put:
return ret;
}
-static struct platform_device *
-dwc3_qcom_create_urs_usb_platdev(struct device *dev)
+static struct platform_device *dwc3_qcom_create_urs_usb_platdev(struct device *dev)
{
+ struct platform_device *urs_usb = NULL;
struct fwnode_handle *fwh;
struct acpi_device *adev;
char name[8];
@@ -788,9 +785,26 @@ dwc3_qcom_create_urs_usb_platdev(struct device *dev)
adev = to_acpi_device_node(fwh);
if (!adev)
- return NULL;
+ goto err_put_handle;
+
+ urs_usb = acpi_create_platform_device(adev, NULL);
+ if (IS_ERR_OR_NULL(urs_usb))
+ goto err_put_handle;
+
+ return urs_usb;
+
+err_put_handle:
+ fwnode_handle_put(fwh);
+
+ return urs_usb;
+}
- return acpi_create_platform_device(adev, NULL);
+static void dwc3_qcom_destroy_urs_usb_platdev(struct platform_device *urs_usb)
+{
+ struct fwnode_handle *fwh = urs_usb->dev.fwnode;
+
+ platform_device_unregister(urs_usb);
+ fwnode_handle_put(fwh);
}
static int dwc3_qcom_probe(struct platform_device *pdev)
@@ -874,13 +888,13 @@ static int dwc3_qcom_probe(struct platform_device *pdev)
qcom->qscratch_base = devm_ioremap_resource(dev, parent_res);
if (IS_ERR(qcom->qscratch_base)) {
ret = PTR_ERR(qcom->qscratch_base);
- goto clk_disable;
+ goto free_urs;
}
ret = dwc3_qcom_setup_irq(pdev);
if (ret) {
dev_err(dev, "failed to setup IRQs, err=%d\n", ret);
- goto clk_disable;
+ goto free_urs;
}
/*
@@ -899,7 +913,7 @@ static int dwc3_qcom_probe(struct platform_device *pdev)
if (ret) {
dev_err(dev, "failed to register DWC3 Core, err=%d\n", ret);
- goto depopulate;
+ goto free_urs;
}
ret = dwc3_qcom_interconnect_init(qcom);
@@ -931,10 +945,16 @@ static int dwc3_qcom_probe(struct platform_device *pdev)
interconnect_exit:
dwc3_qcom_interconnect_exit(qcom);
depopulate:
- if (np)
+ if (np) {
of_platform_depopulate(&pdev->dev);
- else
- platform_device_put(pdev);
+ } else {
+ device_remove_software_node(&qcom->dwc3->dev);
+ platform_device_del(qcom->dwc3);
+ }
+ platform_device_put(qcom->dwc3);
+free_urs:
+ if (qcom->urs_usb)
+ dwc3_qcom_destroy_urs_usb_platdev(qcom->urs_usb);
clk_disable:
for (i = qcom->num_clocks - 1; i >= 0; i--) {
clk_disable_unprepare(qcom->clks[i]);
@@ -953,11 +973,16 @@ static void dwc3_qcom_remove(struct platform_device *pdev)
struct device *dev = &pdev->dev;
int i;
- device_remove_software_node(&qcom->dwc3->dev);
- if (np)
+ if (np) {
of_platform_depopulate(&pdev->dev);
- else
- platform_device_put(pdev);
+ } else {
+ device_remove_software_node(&qcom->dwc3->dev);
+ platform_device_del(qcom->dwc3);
+ }
+ platform_device_put(qcom->dwc3);
+
+ if (qcom->urs_usb)
+ dwc3_qcom_destroy_urs_usb_platdev(qcom->urs_usb);
for (i = qcom->num_clocks - 1; i >= 0; i--) {
clk_disable_unprepare(qcom->clks[i]);
diff --git a/drivers/usb/dwc3/dwc3-rtk.c b/drivers/usb/dwc3/dwc3-rtk.c
index 590028e8fdcb..3cd6b184551c 100644
--- a/drivers/usb/dwc3/dwc3-rtk.c
+++ b/drivers/usb/dwc3/dwc3-rtk.c
@@ -183,10 +183,13 @@ static enum usb_device_speed __get_dwc3_maximum_speed(struct device_node *np)
ret = of_property_read_string(dwc3_np, "maximum-speed", &maximum_speed);
if (ret < 0)
- return USB_SPEED_UNKNOWN;
+ goto out;
ret = match_string(speed_names, ARRAY_SIZE(speed_names), maximum_speed);
+out:
+ of_node_put(dwc3_np);
+
return (ret < 0) ? USB_SPEED_UNKNOWN : ret;
}
@@ -339,6 +342,9 @@ static int dwc3_rtk_probe_dwc3_core(struct dwc3_rtk *rtk)
switch_usb2_role(rtk, rtk->cur_role);
+ platform_device_put(dwc3_pdev);
+ of_node_put(dwc3_node);
+
return 0;
err_pdev_put:
diff --git a/drivers/usb/host/xhci-mtk-sch.c b/drivers/usb/host/xhci-mtk-sch.c
index 5b3cd455adec..61f3f8bbdcea 100644
--- a/drivers/usb/host/xhci-mtk-sch.c
+++ b/drivers/usb/host/xhci-mtk-sch.c
@@ -650,9 +650,8 @@ static int check_isoc_ss_overlap(struct mu3h_sch_ep_info *sch_ep, u32 offset)
if (sch_ep->ep_type == ISOC_OUT_EP) {
for (j = 0; j < sch_ep->num_budget_microframes; j++) {
- k = XHCI_MTK_BW_INDEX(base + j + CS_OFFSET);
- /* use cs to indicate existence of in-ss @(base+j) */
- if (tt->fs_bus_bw_in[k])
+ k = XHCI_MTK_BW_INDEX(base + j);
+ if (tt->in_ss_cnt[k])
return -ESCH_SS_OVERLAP;
}
} else if (sch_ep->ep_type == ISOC_IN_EP || sch_ep->ep_type == INT_IN_EP) {
@@ -769,6 +768,14 @@ static void update_sch_tt(struct mu3h_sch_ep_info *sch_ep, bool used)
tt->fs_frame_bw[f] -= (u16)sch_ep->bw_budget_table[j];
}
}
+
+ if (sch_ep->ep_type == ISOC_IN_EP || sch_ep->ep_type == INT_IN_EP) {
+ k = XHCI_MTK_BW_INDEX(base);
+ if (used)
+ tt->in_ss_cnt[k]++;
+ else
+ tt->in_ss_cnt[k]--;
+ }
}
if (used)
diff --git a/drivers/usb/host/xhci-mtk.h b/drivers/usb/host/xhci-mtk.h
index 865b55e23b15..39f7ae7d3087 100644
--- a/drivers/usb/host/xhci-mtk.h
+++ b/drivers/usb/host/xhci-mtk.h
@@ -38,6 +38,7 @@
* @fs_bus_bw_in: save bandwidth used by FS/LS IN eps in each uframes
* @ls_bus_bw: save bandwidth used by LS eps in each uframes
* @fs_frame_bw: save bandwidth used by FS/LS eps in each FS frames
+ * @in_ss_cnt: the count of Start-Split for IN eps
* @ep_list: Endpoints using this TT
*/
struct mu3h_sch_tt {
@@ -45,6 +46,7 @@ struct mu3h_sch_tt {
u16 fs_bus_bw_in[XHCI_MTK_MAX_ESIT];
u8 ls_bus_bw[XHCI_MTK_MAX_ESIT];
u16 fs_frame_bw[XHCI_MTK_FRAMES_CNT];
+ u8 in_ss_cnt[XHCI_MTK_MAX_ESIT];
struct list_head ep_list;
};
diff --git a/drivers/usb/host/xhci-plat.c b/drivers/usb/host/xhci-plat.c
index b93161374293..732cdeb73920 100644
--- a/drivers/usb/host/xhci-plat.c
+++ b/drivers/usb/host/xhci-plat.c
@@ -13,6 +13,7 @@
#include <linux/module.h>
#include <linux/pci.h>
#include <linux/of.h>
+#include <linux/of_device.h>
#include <linux/platform_device.h>
#include <linux/usb/phy.h>
#include <linux/slab.h>
@@ -148,7 +149,7 @@ int xhci_plat_probe(struct platform_device *pdev, struct device *sysdev, const s
int ret;
int irq;
struct xhci_plat_priv *priv = NULL;
-
+ bool of_match;
if (usb_disabled())
return -ENODEV;
@@ -253,16 +254,23 @@ int xhci_plat_probe(struct platform_device *pdev, struct device *sysdev, const s
&xhci->imod_interval);
}
- hcd->usb_phy = devm_usb_get_phy_by_phandle(sysdev, "usb-phy", 0);
- if (IS_ERR(hcd->usb_phy)) {
- ret = PTR_ERR(hcd->usb_phy);
- if (ret == -EPROBE_DEFER)
- goto disable_clk;
- hcd->usb_phy = NULL;
- } else {
- ret = usb_phy_init(hcd->usb_phy);
- if (ret)
- goto disable_clk;
+ /*
+ * Drivers such as dwc3 manages PHYs themself (and rely on driver name
+ * matching for the xhci platform device).
+ */
+ of_match = of_match_device(pdev->dev.driver->of_match_table, &pdev->dev);
+ if (of_match) {
+ hcd->usb_phy = devm_usb_get_phy_by_phandle(sysdev, "usb-phy", 0);
+ if (IS_ERR(hcd->usb_phy)) {
+ ret = PTR_ERR(hcd->usb_phy);
+ if (ret == -EPROBE_DEFER)
+ goto disable_clk;
+ hcd->usb_phy = NULL;
+ } else {
+ ret = usb_phy_init(hcd->usb_phy);
+ if (ret)
+ goto disable_clk;
+ }
}
hcd->tpl_support = of_usb_host_tpl_support(sysdev->of_node);
@@ -285,15 +293,17 @@ int xhci_plat_probe(struct platform_device *pdev, struct device *sysdev, const s
goto dealloc_usb2_hcd;
}
- xhci->shared_hcd->usb_phy = devm_usb_get_phy_by_phandle(sysdev,
- "usb-phy", 1);
- if (IS_ERR(xhci->shared_hcd->usb_phy)) {
- xhci->shared_hcd->usb_phy = NULL;
- } else {
- ret = usb_phy_init(xhci->shared_hcd->usb_phy);
- if (ret)
- dev_err(sysdev, "%s init usb3phy fail (ret=%d)\n",
- __func__, ret);
+ if (of_match) {
+ xhci->shared_hcd->usb_phy = devm_usb_get_phy_by_phandle(sysdev,
+ "usb-phy", 1);
+ if (IS_ERR(xhci->shared_hcd->usb_phy)) {
+ xhci->shared_hcd->usb_phy = NULL;
+ } else {
+ ret = usb_phy_init(xhci->shared_hcd->usb_phy);
+ if (ret)
+ dev_err(sysdev, "%s init usb3phy fail (ret=%d)\n",
+ __func__, ret);
+ }
}
xhci->shared_hcd->tpl_support = hcd->tpl_support;
diff --git a/drivers/usb/misc/onboard_usb_hub.c b/drivers/usb/misc/onboard_usb_hub.c
index a341b2fbb7b4..2b45404e9732 100644
--- a/drivers/usb/misc/onboard_usb_hub.c
+++ b/drivers/usb/misc/onboard_usb_hub.c
@@ -432,6 +432,8 @@ static const struct usb_device_id onboard_hub_id_table[] = {
{ USB_DEVICE(VENDOR_ID_MICROCHIP, 0x2412) }, /* USB2412 USB 2.0 */
{ USB_DEVICE(VENDOR_ID_MICROCHIP, 0x2514) }, /* USB2514B USB 2.0 */
{ USB_DEVICE(VENDOR_ID_MICROCHIP, 0x2517) }, /* USB2517 USB 2.0 */
+ { USB_DEVICE(VENDOR_ID_MICROCHIP, 0x2744) }, /* USB5744 USB 2.0 */
+ { USB_DEVICE(VENDOR_ID_MICROCHIP, 0x5744) }, /* USB5744 USB 3.0 */
{ USB_DEVICE(VENDOR_ID_REALTEK, 0x0411) }, /* RTS5411 USB 3.1 */
{ USB_DEVICE(VENDOR_ID_REALTEK, 0x5411) }, /* RTS5411 USB 2.1 */
{ USB_DEVICE(VENDOR_ID_REALTEK, 0x0414) }, /* RTS5414 USB 3.2 */
diff --git a/drivers/usb/misc/onboard_usb_hub.h b/drivers/usb/misc/onboard_usb_hub.h
index c4e24a7b9290..292110e64a1d 100644
--- a/drivers/usb/misc/onboard_usb_hub.h
+++ b/drivers/usb/misc/onboard_usb_hub.h
@@ -16,6 +16,11 @@ static const struct onboard_hub_pdata microchip_usb424_data = {
.num_supplies = 1,
};
+static const struct onboard_hub_pdata microchip_usb5744_data = {
+ .reset_us = 0,
+ .num_supplies = 2,
+};
+
static const struct onboard_hub_pdata realtek_rts5411_data = {
.reset_us = 0,
.num_supplies = 1,
@@ -50,6 +55,8 @@ static const struct of_device_id onboard_hub_match[] = {
{ .compatible = "usb424,2412", .data = &microchip_usb424_data, },
{ .compatible = "usb424,2514", .data = &microchip_usb424_data, },
{ .compatible = "usb424,2517", .data = &microchip_usb424_data, },
+ { .compatible = "usb424,2744", .data = &microchip_usb5744_data, },
+ { .compatible = "usb424,5744", .data = &microchip_usb5744_data, },
{ .compatible = "usb451,8140", .data = &ti_tusb8041_data, },
{ .compatible = "usb451,8142", .data = &ti_tusb8041_data, },
{ .compatible = "usb4b4,6504", .data = &cypress_hx3_data, },
diff --git a/drivers/usb/misc/usb-ljca.c b/drivers/usb/misc/usb-ljca.c
index c9decd0396d4..35770e608c64 100644
--- a/drivers/usb/misc/usb-ljca.c
+++ b/drivers/usb/misc/usb-ljca.c
@@ -457,8 +457,8 @@ static void ljca_auxdev_acpi_bind(struct ljca_adapter *adap,
u64 adr, u8 id)
{
struct ljca_match_ids_walk_data wd = { 0 };
- struct acpi_device *parent, *adev;
struct device *dev = adap->dev;
+ struct acpi_device *parent;
char uid[4];
parent = ACPI_COMPANION(dev);
@@ -466,17 +466,7 @@ static void ljca_auxdev_acpi_bind(struct ljca_adapter *adap,
return;
/*
- * get auxdev ACPI handle from the ACPI device directly
- * under the parent that matches _ADR.
- */
- adev = acpi_find_child_device(parent, adr, false);
- if (adev) {
- ACPI_COMPANION_SET(&auxdev->dev, adev);
- return;
- }
-
- /*
- * _ADR is a grey area in the ACPI specification, some
+ * Currently LJCA hw doesn't use _ADR instead the shipped
* platforms use _HID to distinguish children devices.
*/
switch (adr) {
@@ -656,10 +646,11 @@ static int ljca_enumerate_spi(struct ljca_adapter *adap)
unsigned int i;
int ret;
+ /* Not all LJCA chips implement SPI, a timeout reading the descriptors is normal */
ret = ljca_send(adap, LJCA_CLIENT_MNG, LJCA_MNG_ENUM_SPI, NULL, 0, buf,
sizeof(buf), true, LJCA_ENUM_CLIENT_TIMEOUT_MS);
if (ret < 0)
- return ret;
+ return (ret == -ETIMEDOUT) ? 0 : ret;
/* check firmware response */
desc = (struct ljca_spi_descriptor *)buf;
diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c
index 45dcfaadaf98..4dffcfefd62d 100644
--- a/drivers/usb/serial/option.c
+++ b/drivers/usb/serial/option.c
@@ -203,8 +203,8 @@ static void option_instat_callback(struct urb *urb);
#define DELL_PRODUCT_5829E_ESIM 0x81e4
#define DELL_PRODUCT_5829E 0x81e6
-#define DELL_PRODUCT_FM101R 0x8213
-#define DELL_PRODUCT_FM101R_ESIM 0x8215
+#define DELL_PRODUCT_FM101R_ESIM 0x8213
+#define DELL_PRODUCT_FM101R 0x8215
#define KYOCERA_VENDOR_ID 0x0c88
#define KYOCERA_PRODUCT_KPC650 0x17da
@@ -609,6 +609,8 @@ static void option_instat_callback(struct urb *urb);
#define UNISOC_VENDOR_ID 0x1782
/* TOZED LT70-C based on UNISOC SL8563 uses UNISOC's vendor ID */
#define TOZED_PRODUCT_LT70C 0x4055
+/* Luat Air72*U series based on UNISOC UIS8910 uses UNISOC's vendor ID */
+#define LUAT_PRODUCT_AIR720U 0x4e00
/* Device flags */
@@ -1546,7 +1548,8 @@ static const struct usb_device_id option_ids[] = {
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0165, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0167, 0xff, 0xff, 0xff),
.driver_info = RSVD(4) },
- { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0189, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0189, 0xff, 0xff, 0xff),
+ .driver_info = RSVD(4) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0191, 0xff, 0xff, 0xff), /* ZTE EuFi890 */
.driver_info = RSVD(4) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0196, 0xff, 0xff, 0xff) },
@@ -2249,6 +2252,7 @@ static const struct usb_device_id option_ids[] = {
.driver_info = RSVD(4) | RSVD(5) | RSVD(6) },
{ USB_DEVICE(0x1782, 0x4d10) }, /* Fibocom L610 (AT mode) */
{ USB_DEVICE_INTERFACE_CLASS(0x1782, 0x4d11, 0xff) }, /* Fibocom L610 (ECM/RNDIS mode) */
+ { USB_DEVICE_AND_INTERFACE_INFO(0x2cb7, 0x0001, 0xff, 0xff, 0xff) }, /* Fibocom L716-EU (ECM/RNDIS mode) */
{ USB_DEVICE(0x2cb7, 0x0104), /* Fibocom NL678 series */
.driver_info = RSVD(4) | RSVD(5) },
{ USB_DEVICE_INTERFACE_CLASS(0x2cb7, 0x0105, 0xff), /* Fibocom NL678 series */
@@ -2271,6 +2275,7 @@ static const struct usb_device_id option_ids[] = {
{ USB_DEVICE_AND_INTERFACE_INFO(SIERRA_VENDOR_ID, SIERRA_PRODUCT_EM9191, 0xff, 0xff, 0x40) },
{ USB_DEVICE_AND_INTERFACE_INFO(SIERRA_VENDOR_ID, SIERRA_PRODUCT_EM9191, 0xff, 0, 0) },
{ USB_DEVICE_AND_INTERFACE_INFO(UNISOC_VENDOR_ID, TOZED_PRODUCT_LT70C, 0xff, 0, 0) },
+ { USB_DEVICE_AND_INTERFACE_INFO(UNISOC_VENDOR_ID, LUAT_PRODUCT_AIR720U, 0xff, 0, 0) },
{ } /* Terminating entry */
};
MODULE_DEVICE_TABLE(usb, option_ids);
diff --git a/drivers/usb/typec/tcpm/tcpm.c b/drivers/usb/typec/tcpm/tcpm.c
index 058d5b853b57..bfb6f9481e87 100644
--- a/drivers/usb/typec/tcpm/tcpm.c
+++ b/drivers/usb/typec/tcpm/tcpm.c
@@ -4273,7 +4273,8 @@ static void run_state_machine(struct tcpm_port *port)
current_lim = PD_P_SNK_STDBY_MW / 5;
tcpm_set_current_limit(port, current_lim, 5000);
/* Not sink vbus if operational current is 0mA */
- tcpm_set_charge(port, !!pdo_max_current(port->snk_pdo[0]));
+ tcpm_set_charge(port, !port->pd_supported ||
+ pdo_max_current(port->snk_pdo[0]));
if (!port->pd_supported)
tcpm_set_state(port, SNK_READY, 0);
@@ -5391,6 +5392,15 @@ static void _tcpm_pd_hard_reset(struct tcpm_port *port)
if (port->bist_request == BDO_MODE_TESTDATA && port->tcpc->set_bist_data)
port->tcpc->set_bist_data(port->tcpc, false);
+ switch (port->state) {
+ case ERROR_RECOVERY:
+ case PORT_RESET:
+ case PORT_RESET_WAIT_OFF:
+ return;
+ default:
+ break;
+ }
+
if (port->ams != NONE_AMS)
port->ams = NONE_AMS;
if (port->hard_reset_count < PD_N_HARD_RESET_COUNT)
diff --git a/drivers/usb/typec/tipd/core.c b/drivers/usb/typec/tipd/core.c
index 0e867f531d34..196535ad996d 100644
--- a/drivers/usb/typec/tipd/core.c
+++ b/drivers/usb/typec/tipd/core.c
@@ -968,16 +968,17 @@ static int tps25750_start_patch_burst_mode(struct tps6598x *tps)
ret = of_property_match_string(np, "reg-names", "patch-address");
if (ret < 0) {
dev_err(tps->dev, "failed to get patch-address %d\n", ret);
- return ret;
+ goto release_fw;
}
ret = of_property_read_u32_index(np, "reg", ret, &addr);
if (ret)
- return ret;
+ goto release_fw;
if (addr == 0 || (addr >= 0x20 && addr <= 0x23)) {
dev_err(tps->dev, "wrong patch address %u\n", addr);
- return -EINVAL;
+ ret = -EINVAL;
+ goto release_fw;
}
bpms_data.addr = (u8)addr;
@@ -1226,7 +1227,10 @@ static int tps6598x_probe(struct i2c_client *client)
TPS_REG_INT_PLUG_EVENT;
}
- tps->data = device_get_match_data(tps->dev);
+ if (dev_fwnode(tps->dev))
+ tps->data = device_get_match_data(tps->dev);
+ else
+ tps->data = i2c_get_match_data(client);
if (!tps->data)
return -EINVAL;
@@ -1425,7 +1429,7 @@ static const struct of_device_id tps6598x_of_match[] = {
MODULE_DEVICE_TABLE(of, tps6598x_of_match);
static const struct i2c_device_id tps6598x_id[] = {
- { "tps6598x" },
+ { "tps6598x", (kernel_ulong_t)&tps6598x_data },
{ }
};
MODULE_DEVICE_TABLE(i2c, tps6598x_id);
diff --git a/drivers/vfio/pci/pds/pci_drv.c b/drivers/vfio/pci/pds/pci_drv.c
index dd8c00c895a2..a34dda516629 100644
--- a/drivers/vfio/pci/pds/pci_drv.c
+++ b/drivers/vfio/pci/pds/pci_drv.c
@@ -55,10 +55,10 @@ static void pds_vfio_recovery(struct pds_vfio_pci_device *pds_vfio)
* VFIO_DEVICE_STATE_RUNNING.
*/
if (deferred_reset_needed) {
- spin_lock(&pds_vfio->reset_lock);
+ mutex_lock(&pds_vfio->reset_mutex);
pds_vfio->deferred_reset = true;
pds_vfio->deferred_reset_state = VFIO_DEVICE_STATE_ERROR;
- spin_unlock(&pds_vfio->reset_lock);
+ mutex_unlock(&pds_vfio->reset_mutex);
}
}
diff --git a/drivers/vfio/pci/pds/vfio_dev.c b/drivers/vfio/pci/pds/vfio_dev.c
index 649b18ee394b..4c351c59d05a 100644
--- a/drivers/vfio/pci/pds/vfio_dev.c
+++ b/drivers/vfio/pci/pds/vfio_dev.c
@@ -29,7 +29,7 @@ struct pds_vfio_pci_device *pds_vfio_pci_drvdata(struct pci_dev *pdev)
void pds_vfio_state_mutex_unlock(struct pds_vfio_pci_device *pds_vfio)
{
again:
- spin_lock(&pds_vfio->reset_lock);
+ mutex_lock(&pds_vfio->reset_mutex);
if (pds_vfio->deferred_reset) {
pds_vfio->deferred_reset = false;
if (pds_vfio->state == VFIO_DEVICE_STATE_ERROR) {
@@ -39,23 +39,23 @@ again:
}
pds_vfio->state = pds_vfio->deferred_reset_state;
pds_vfio->deferred_reset_state = VFIO_DEVICE_STATE_RUNNING;
- spin_unlock(&pds_vfio->reset_lock);
+ mutex_unlock(&pds_vfio->reset_mutex);
goto again;
}
mutex_unlock(&pds_vfio->state_mutex);
- spin_unlock(&pds_vfio->reset_lock);
+ mutex_unlock(&pds_vfio->reset_mutex);
}
void pds_vfio_reset(struct pds_vfio_pci_device *pds_vfio)
{
- spin_lock(&pds_vfio->reset_lock);
+ mutex_lock(&pds_vfio->reset_mutex);
pds_vfio->deferred_reset = true;
pds_vfio->deferred_reset_state = VFIO_DEVICE_STATE_RUNNING;
if (!mutex_trylock(&pds_vfio->state_mutex)) {
- spin_unlock(&pds_vfio->reset_lock);
+ mutex_unlock(&pds_vfio->reset_mutex);
return;
}
- spin_unlock(&pds_vfio->reset_lock);
+ mutex_unlock(&pds_vfio->reset_mutex);
pds_vfio_state_mutex_unlock(pds_vfio);
}
@@ -155,6 +155,9 @@ static int pds_vfio_init_device(struct vfio_device *vdev)
pds_vfio->vf_id = vf_id;
+ mutex_init(&pds_vfio->state_mutex);
+ mutex_init(&pds_vfio->reset_mutex);
+
vdev->migration_flags = VFIO_MIGRATION_STOP_COPY | VFIO_MIGRATION_P2P;
vdev->mig_ops = &pds_vfio_lm_ops;
vdev->log_ops = &pds_vfio_log_ops;
@@ -168,6 +171,17 @@ static int pds_vfio_init_device(struct vfio_device *vdev)
return 0;
}
+static void pds_vfio_release_device(struct vfio_device *vdev)
+{
+ struct pds_vfio_pci_device *pds_vfio =
+ container_of(vdev, struct pds_vfio_pci_device,
+ vfio_coredev.vdev);
+
+ mutex_destroy(&pds_vfio->state_mutex);
+ mutex_destroy(&pds_vfio->reset_mutex);
+ vfio_pci_core_release_dev(vdev);
+}
+
static int pds_vfio_open_device(struct vfio_device *vdev)
{
struct pds_vfio_pci_device *pds_vfio =
@@ -179,7 +193,6 @@ static int pds_vfio_open_device(struct vfio_device *vdev)
if (err)
return err;
- mutex_init(&pds_vfio->state_mutex);
pds_vfio->state = VFIO_DEVICE_STATE_RUNNING;
pds_vfio->deferred_reset_state = VFIO_DEVICE_STATE_RUNNING;
@@ -199,14 +212,13 @@ static void pds_vfio_close_device(struct vfio_device *vdev)
pds_vfio_put_save_file(pds_vfio);
pds_vfio_dirty_disable(pds_vfio, true);
mutex_unlock(&pds_vfio->state_mutex);
- mutex_destroy(&pds_vfio->state_mutex);
vfio_pci_core_close_device(vdev);
}
static const struct vfio_device_ops pds_vfio_ops = {
.name = "pds-vfio",
.init = pds_vfio_init_device,
- .release = vfio_pci_core_release_dev,
+ .release = pds_vfio_release_device,
.open_device = pds_vfio_open_device,
.close_device = pds_vfio_close_device,
.ioctl = vfio_pci_core_ioctl,
diff --git a/drivers/vfio/pci/pds/vfio_dev.h b/drivers/vfio/pci/pds/vfio_dev.h
index b8f2d667608f..e7b01080a1ec 100644
--- a/drivers/vfio/pci/pds/vfio_dev.h
+++ b/drivers/vfio/pci/pds/vfio_dev.h
@@ -18,7 +18,7 @@ struct pds_vfio_pci_device {
struct pds_vfio_dirty dirty;
struct mutex state_mutex; /* protect migration state */
enum vfio_device_mig_state state;
- spinlock_t reset_lock; /* protect reset_done flow */
+ struct mutex reset_mutex; /* protect reset_done flow */
u8 deferred_reset;
enum vfio_device_mig_state deferred_reset_state;
struct notifier_block nb;
diff --git a/drivers/xen/events/events_base.c b/drivers/xen/events/events_base.c
index f5edb9e27e3c..b8cfea7812d6 100644
--- a/drivers/xen/events/events_base.c
+++ b/drivers/xen/events/events_base.c
@@ -1110,8 +1110,10 @@ int xen_bind_pirq_msi_to_irq(struct pci_dev *dev, struct msi_desc *msidesc,
for (i = 0; i < nvec; i++) {
info = xen_irq_init(irq + i);
- if (!info)
+ if (!info) {
+ ret = -ENOMEM;
goto error_irq;
+ }
irq_set_chip_and_handler_name(irq + i, &xen_pirq_chip, handle_edge_irq, name);
diff --git a/drivers/xen/privcmd.c b/drivers/xen/privcmd.c
index 1ce7f3c7a950..0eb337a8ec0f 100644
--- a/drivers/xen/privcmd.c
+++ b/drivers/xen/privcmd.c
@@ -1115,7 +1115,7 @@ struct privcmd_kernel_ioreq {
spinlock_t lock; /* Protects ioeventfds list */
struct list_head ioeventfds;
struct list_head list;
- struct ioreq_port ports[0];
+ struct ioreq_port ports[] __counted_by(vcpus);
};
static irqreturn_t ioeventfd_interrupt(int irq, void *dev_id)
diff --git a/drivers/xen/swiotlb-xen.c b/drivers/xen/swiotlb-xen.c
index 946bd56f0ac5..0e6c6c25d154 100644
--- a/drivers/xen/swiotlb-xen.c
+++ b/drivers/xen/swiotlb-xen.c
@@ -405,4 +405,5 @@ const struct dma_map_ops xen_swiotlb_dma_ops = {
.get_sgtable = dma_common_get_sgtable,
.alloc_pages = dma_common_alloc_pages,
.free_pages = dma_common_free_pages,
+ .max_mapping_size = swiotlb_max_mapping_size,
};