summaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
Diffstat (limited to 'drivers')
-rw-r--r--drivers/acpi/glue.c25
-rw-r--r--drivers/acpi/internal.h1
-rw-r--r--drivers/acpi/scan.c6
-rw-r--r--drivers/ata/ahci.c1
-rw-r--r--drivers/ata/libahci.c15
-rw-r--r--drivers/ata/libata-core.c11
-rw-r--r--drivers/ata/libata-sata.c2
-rw-r--r--drivers/bus/tegra-gmi.c50
-rw-r--r--drivers/firmware/ti_sci.c2
-rw-r--r--drivers/firmware/xilinx/zynqmp.c5
-rw-r--r--drivers/gpio/Kconfig1
-rw-r--r--drivers/gpio/gpio-virtio.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_device.c3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c36
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.c1
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c5
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c3
-rw-r--r--drivers/gpu/drm/amd/include/amd_shared.h3
-rw-r--r--drivers/gpu/drm/amd/pm/amdgpu_dpm.c10
-rw-r--r--drivers/gpu/drm/amd/pm/inc/amdgpu_dpm.h8
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu11/cyan_skillfish_ppt.c22
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c13
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu11/vangogh_ppt.c26
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu13/yellow_carp_ppt.c27
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu13/yellow_carp_ppt.h1
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu_cmn.c8
-rw-r--r--drivers/gpu/drm/drm_gem_cma_helper.c9
-rw-r--r--drivers/gpu/drm/i915/display/icl_dsi.c13
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c4
-rw-r--r--drivers/gpu/drm/i915/i915_reg.h4
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/hdmigv100.c1
-rw-r--r--drivers/gpu/drm/scheduler/sched_main.c6
-rw-r--r--drivers/gpu/drm/sun4i/Kconfig1
-rw-r--r--drivers/hv/hv_balloon.c2
-rw-r--r--drivers/infiniband/core/nldev.c3
-rw-r--r--drivers/infiniband/core/verbs.c3
-rw-r--r--drivers/infiniband/hw/hfi1/verbs.c5
-rw-r--r--drivers/infiniband/hw/mlx4/main.c18
-rw-r--r--drivers/memory/renesas-rpc-if.c109
-rw-r--r--drivers/mmc/host/sdhci-tegra.c81
-rw-r--r--drivers/mtd/hyperbus/rpc-if.c4
-rw-r--r--drivers/mtd/nand/raw/tegra_nand.c58
-rw-r--r--drivers/net/amt.c2
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.c10
-rw-r--r--drivers/net/ethernet/asix/ax88796c_main.h6
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_init_ops.h4
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt.h10
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.c9
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c2
-rw-r--r--drivers/net/ethernet/dec/tulip/de4x5.c34
-rw-r--r--drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c4
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_dsaf_misc.c4
-rw-r--r--drivers/net/ethernet/intel/e100.c18
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e.h2
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_main.c160
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c121
-rw-r--r--drivers/net/ethernet/intel/iavf/iavf.h1
-rw-r--r--drivers/net/ethernet/intel/iavf/iavf_ethtool.c30
-rw-r--r--drivers/net/ethernet/intel/iavf/iavf_main.c55
-rw-r--r--drivers/net/ethernet/lantiq_etop.c20
-rw-r--r--drivers/net/ethernet/marvell/mvmdio.c2
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu_debugfs.c17
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/cmd.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/cq.c5
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/debugfs.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.c26
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.h2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc_priv.h1
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_encap.c8
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_rx.c23
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_tc.c10
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/eswitch.c21
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c9
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fs_counters.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lag/lag.c28
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/dr_domain.c56
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/dr_matcher.c11
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/dr_types.h1
-rw-r--r--drivers/net/ethernet/sis/sis900.c2
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-socfpga.c24
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_main.c23
-rw-r--r--drivers/net/hamradio/6pack.c1
-rw-r--r--drivers/net/ipa/ipa_endpoint.c5
-rw-r--r--drivers/net/ipa/ipa_resource.c2
-rw-r--r--drivers/net/tun.c5
-rw-r--r--drivers/net/usb/r8152.c9
-rw-r--r--drivers/pinctrl/pinctrl-amd.c29
-rw-r--r--drivers/pinctrl/pinctrl-apple-gpio.c12
-rw-r--r--drivers/pinctrl/qcom/Kconfig2
-rw-r--r--drivers/pinctrl/qcom/pinctrl-sdm845.c1
-rw-r--r--drivers/pinctrl/qcom/pinctrl-sm8350.c8
-rw-r--r--drivers/pinctrl/ralink/pinctrl-mt7620.c1
-rw-r--r--drivers/pinctrl/tegra/pinctrl-tegra.c4
-rw-r--r--drivers/pinctrl/tegra/pinctrl-tegra194.c1
-rw-r--r--drivers/platform/mellanox/mlxreg-lc.c5
-rw-r--r--drivers/platform/x86/Kconfig2
-rw-r--r--drivers/platform/x86/dell/Kconfig2
-rw-r--r--drivers/platform/x86/hp_accel.c2
-rw-r--r--drivers/platform/x86/samsung-laptop.c2
-rw-r--r--drivers/platform/x86/think-lmi.c13
-rw-r--r--drivers/platform/x86/think-lmi.h1
-rw-r--r--drivers/platform/x86/thinkpad_acpi.c13
-rw-r--r--drivers/powercap/dtpm_cpu.c9
-rw-r--r--drivers/ptp/ptp_clockmatrix.c5
-rw-r--r--drivers/ptp/ptp_ocp.c9
-rw-r--r--drivers/pwm/pwm-tegra.c82
-rw-r--r--drivers/s390/block/dasd_devmap.c76
-rw-r--r--drivers/s390/char/raw3270.c12
-rw-r--r--drivers/s390/cio/chp.c2
-rw-r--r--drivers/scsi/qla2xxx/qla_mbx.c6
-rw-r--r--drivers/scsi/scsi_sysfs.c30
-rw-r--r--drivers/scsi/scsi_transport_iscsi.c6
-rw-r--r--drivers/scsi/ufs/ufshcd.c9
-rw-r--r--drivers/soc/Kconfig1
-rw-r--r--drivers/soc/Makefile1
-rw-r--r--drivers/soc/apple/Kconfig22
-rw-r--r--drivers/soc/apple/Makefile2
-rw-r--r--drivers/soc/apple/apple-pmgr-pwrstate.c324
-rw-r--r--drivers/soc/renesas/Kconfig15
-rw-r--r--drivers/soc/renesas/Makefile2
-rw-r--r--drivers/soc/renesas/r8a779a0-sysc.c380
-rw-r--r--drivers/soc/renesas/r8a779f0-sysc.c47
-rw-r--r--drivers/soc/renesas/rcar-gen4-sysc.c376
-rw-r--r--drivers/soc/renesas/rcar-gen4-sysc.h43
-rw-r--r--drivers/soc/renesas/rcar-rst.c50
-rw-r--r--drivers/soc/renesas/renesas-soc.c127
-rw-r--r--drivers/soc/tegra/common.c29
-rw-r--r--drivers/soc/tegra/fuse/fuse-tegra.c51
-rw-r--r--drivers/soc/tegra/fuse/fuse-tegra20.c33
-rw-r--r--drivers/soc/tegra/fuse/fuse.h1
-rw-r--r--drivers/soc/tegra/pmc.c41
-rw-r--r--drivers/soc/tegra/regulators-tegra20.c99
-rw-r--r--drivers/soc/tegra/regulators-tegra30.c122
-rw-r--r--drivers/soc/ti/k3-socinfo.c3
-rw-r--r--drivers/soc/ti/knav_dma.c20
-rw-r--r--drivers/soc/ti/pruss.c2
-rw-r--r--drivers/soc/xilinx/zynqmp_pm_domains.c91
-rw-r--r--drivers/soc/xilinx/zynqmp_power.c1
-rw-r--r--drivers/spi/spi-cadence-quadspi.c24
-rw-r--r--drivers/spi/spi-fsl-lpspi.c2
-rw-r--r--drivers/spi/spi-geni-qcom.c16
-rw-r--r--drivers/spi/spi-rpc-if.c4
-rw-r--r--drivers/spi/spi-tegra20-slink.c9
-rw-r--r--drivers/spi/spi.c12
-rw-r--r--drivers/staging/media/tegra-vde/vde.c63
-rw-r--r--drivers/tee/optee/Makefile1
-rw-r--r--drivers/tee/optee/core.c2
-rw-r--r--drivers/tee/optee/ffa_abi.c6
-rw-r--r--drivers/tee/optee/notif.c125
-rw-r--r--drivers/tee/optee/optee_msg.h9
-rw-r--r--drivers/tee/optee/optee_private.h28
-rw-r--r--drivers/tee/optee/optee_rpc_cmd.h31
-rw-r--r--drivers/tee/optee/optee_smc.h75
-rw-r--r--drivers/tee/optee/rpc.c71
-rw-r--r--drivers/tee/optee/smc_abi.c237
-rw-r--r--drivers/tee/tee_core.c10
-rw-r--r--drivers/thermal/intel/int340x_thermal/Kconfig4
-rw-r--r--drivers/thermal/thermal_core.c2
-rw-r--r--drivers/usb/chipidea/ci_hdrc_tegra.c53
-rw-r--r--drivers/video/console/sticon.c12
-rw-r--r--drivers/video/fbdev/efifb.c11
-rw-r--r--drivers/video/fbdev/simplefb.c11
163 files changed, 3164 insertions, 1283 deletions
diff --git a/drivers/acpi/glue.c b/drivers/acpi/glue.c
index 7cd0009e7ff3..ef104809f27b 100644
--- a/drivers/acpi/glue.c
+++ b/drivers/acpi/glue.c
@@ -347,28 +347,3 @@ void acpi_device_notify_remove(struct device *dev)
acpi_unbind_one(dev);
}
-
-int acpi_dev_turn_off_if_unused(struct device *dev, void *not_used)
-{
- struct acpi_device *adev = to_acpi_device(dev);
-
- /*
- * Skip device objects with device IDs, because they may be in use even
- * if they are not companions of any physical device objects.
- */
- if (adev->pnp.type.hardware_id)
- return 0;
-
- mutex_lock(&adev->physical_node_lock);
-
- /*
- * Device objects without device IDs are not in use if they have no
- * corresponding physical device objects.
- */
- if (list_empty(&adev->physical_node_list))
- acpi_device_set_power(adev, ACPI_STATE_D3_COLD);
-
- mutex_unlock(&adev->physical_node_lock);
-
- return 0;
-}
diff --git a/drivers/acpi/internal.h b/drivers/acpi/internal.h
index 8fbdc172864b..d91b560e8867 100644
--- a/drivers/acpi/internal.h
+++ b/drivers/acpi/internal.h
@@ -117,7 +117,6 @@ bool acpi_device_is_battery(struct acpi_device *adev);
bool acpi_device_is_first_physical_node(struct acpi_device *adev,
const struct device *dev);
int acpi_bus_register_early_device(int type);
-int acpi_dev_turn_off_if_unused(struct device *dev, void *not_used);
/* --------------------------------------------------------------------------
Device Matching and Notification
diff --git a/drivers/acpi/scan.c b/drivers/acpi/scan.c
index a50f1967c73d..2c80765670bc 100644
--- a/drivers/acpi/scan.c
+++ b/drivers/acpi/scan.c
@@ -2564,12 +2564,6 @@ int __init acpi_scan_init(void)
}
}
- /*
- * Make sure that power management resources are not blocked by ACPI
- * device objects with no users.
- */
- bus_for_each_dev(&acpi_bus_type, NULL, NULL, acpi_dev_turn_off_if_unused);
-
acpi_turn_off_unused_power_resources();
acpi_scan_initialized = true;
diff --git a/drivers/ata/ahci.c b/drivers/ata/ahci.c
index d60f34718b5d..1e1167e725a4 100644
--- a/drivers/ata/ahci.c
+++ b/drivers/ata/ahci.c
@@ -438,6 +438,7 @@ static const struct pci_device_id ahci_pci_tbl[] = {
/* AMD */
{ PCI_VDEVICE(AMD, 0x7800), board_ahci }, /* AMD Hudson-2 */
{ PCI_VDEVICE(AMD, 0x7900), board_ahci }, /* AMD CZ */
+ { PCI_VDEVICE(AMD, 0x7901), board_ahci_mobile }, /* AMD Green Sardine */
/* AMD is using RAID class only for ahci controllers */
{ PCI_VENDOR_ID_AMD, PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID,
PCI_CLASS_STORAGE_RAID << 8, 0xffffff, board_ahci },
diff --git a/drivers/ata/libahci.c b/drivers/ata/libahci.c
index 8a6835bfd18a..f76b8418e6fb 100644
--- a/drivers/ata/libahci.c
+++ b/drivers/ata/libahci.c
@@ -2323,6 +2323,18 @@ int ahci_port_resume(struct ata_port *ap)
EXPORT_SYMBOL_GPL(ahci_port_resume);
#ifdef CONFIG_PM
+static void ahci_handle_s2idle(struct ata_port *ap)
+{
+ void __iomem *port_mmio = ahci_port_base(ap);
+ u32 devslp;
+
+ if (pm_suspend_via_firmware())
+ return;
+ devslp = readl(port_mmio + PORT_DEVSLP);
+ if ((devslp & PORT_DEVSLP_ADSE))
+ ata_msleep(ap, devslp_idle_timeout);
+}
+
static int ahci_port_suspend(struct ata_port *ap, pm_message_t mesg)
{
const char *emsg = NULL;
@@ -2336,6 +2348,9 @@ static int ahci_port_suspend(struct ata_port *ap, pm_message_t mesg)
ata_port_freeze(ap);
}
+ if (acpi_storage_d3(ap->host->dev))
+ ahci_handle_s2idle(ap);
+
ahci_rpm_put_port(ap);
return rc;
}
diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c
index 8a0ccb190d76..59ad8c979cb3 100644
--- a/drivers/ata/libata-core.c
+++ b/drivers/ata/libata-core.c
@@ -2031,8 +2031,9 @@ retry:
dev->horkage |= ATA_HORKAGE_NO_DMA_LOG;
goto retry;
}
- ata_dev_err(dev, "Read log page 0x%02x failed, Emask 0x%x\n",
- (unsigned int)page, err_mask);
+ ata_dev_err(dev,
+ "Read log 0x%02x page 0x%02x failed, Emask 0x%x\n",
+ (unsigned int)log, (unsigned int)page, err_mask);
}
return err_mask;
@@ -2177,6 +2178,9 @@ static void ata_dev_config_ncq_prio(struct ata_device *dev)
struct ata_port *ap = dev->link->ap;
unsigned int err_mask;
+ if (!ata_identify_page_supported(dev, ATA_LOG_SATA_SETTINGS))
+ return;
+
err_mask = ata_read_log_page(dev,
ATA_LOG_IDENTIFY_DEVICE,
ATA_LOG_SATA_SETTINGS,
@@ -2453,7 +2457,8 @@ static void ata_dev_config_devslp(struct ata_device *dev)
* Check device sleep capability. Get DevSlp timing variables
* from SATA Settings page of Identify Device Data Log.
*/
- if (!ata_id_has_devslp(dev->id))
+ if (!ata_id_has_devslp(dev->id) ||
+ !ata_identify_page_supported(dev, ATA_LOG_SATA_SETTINGS))
return;
err_mask = ata_read_log_page(dev,
diff --git a/drivers/ata/libata-sata.c b/drivers/ata/libata-sata.c
index 4e88597aa9df..5b78e86e3459 100644
--- a/drivers/ata/libata-sata.c
+++ b/drivers/ata/libata-sata.c
@@ -922,7 +922,7 @@ DEVICE_ATTR(ncq_prio_enable, S_IRUGO | S_IWUSR,
ata_ncq_prio_enable_show, ata_ncq_prio_enable_store);
EXPORT_SYMBOL_GPL(dev_attr_ncq_prio_enable);
-struct attribute *ata_ncq_sdev_attrs[] = {
+static struct attribute *ata_ncq_sdev_attrs[] = {
&dev_attr_unload_heads.attr,
&dev_attr_ncq_prio_enable.attr,
&dev_attr_ncq_prio_supported.attr,
diff --git a/drivers/bus/tegra-gmi.c b/drivers/bus/tegra-gmi.c
index a6570789f7af..35b59f92fa66 100644
--- a/drivers/bus/tegra-gmi.c
+++ b/drivers/bus/tegra-gmi.c
@@ -13,8 +13,11 @@
#include <linux/io.h>
#include <linux/module.h>
#include <linux/of_device.h>
+#include <linux/pm_runtime.h>
#include <linux/reset.h>
+#include <soc/tegra/common.h>
+
#define TEGRA_GMI_CONFIG 0x00
#define TEGRA_GMI_CONFIG_GO BIT(31)
#define TEGRA_GMI_BUS_WIDTH_32BIT BIT(30)
@@ -54,9 +57,10 @@ static int tegra_gmi_enable(struct tegra_gmi *gmi)
{
int err;
- err = clk_prepare_enable(gmi->clk);
- if (err < 0) {
- dev_err(gmi->dev, "failed to enable clock: %d\n", err);
+ pm_runtime_enable(gmi->dev);
+ err = pm_runtime_resume_and_get(gmi->dev);
+ if (err) {
+ pm_runtime_disable(gmi->dev);
return err;
}
@@ -83,7 +87,9 @@ static void tegra_gmi_disable(struct tegra_gmi *gmi)
writel(config, gmi->base + TEGRA_GMI_CONFIG);
reset_control_assert(gmi->rst);
- clk_disable_unprepare(gmi->clk);
+
+ pm_runtime_put_sync_suspend(gmi->dev);
+ pm_runtime_force_suspend(gmi->dev);
}
static int tegra_gmi_parse_dt(struct tegra_gmi *gmi)
@@ -213,6 +219,7 @@ static int tegra_gmi_probe(struct platform_device *pdev)
if (!gmi)
return -ENOMEM;
+ platform_set_drvdata(pdev, gmi);
gmi->dev = dev;
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
@@ -232,6 +239,10 @@ static int tegra_gmi_probe(struct platform_device *pdev)
return PTR_ERR(gmi->rst);
}
+ err = devm_tegra_core_dev_init_opp_table_common(&pdev->dev);
+ if (err)
+ return err;
+
err = tegra_gmi_parse_dt(gmi);
if (err)
return err;
@@ -247,8 +258,6 @@ static int tegra_gmi_probe(struct platform_device *pdev)
return err;
}
- platform_set_drvdata(pdev, gmi);
-
return 0;
}
@@ -262,6 +271,34 @@ static int tegra_gmi_remove(struct platform_device *pdev)
return 0;
}
+static int __maybe_unused tegra_gmi_runtime_resume(struct device *dev)
+{
+ struct tegra_gmi *gmi = dev_get_drvdata(dev);
+ int err;
+
+ err = clk_prepare_enable(gmi->clk);
+ if (err < 0) {
+ dev_err(gmi->dev, "failed to enable clock: %d\n", err);
+ return err;
+ }
+
+ return 0;
+}
+
+static int __maybe_unused tegra_gmi_runtime_suspend(struct device *dev)
+{
+ struct tegra_gmi *gmi = dev_get_drvdata(dev);
+
+ clk_disable_unprepare(gmi->clk);
+
+ return 0;
+}
+
+static const struct dev_pm_ops tegra_gmi_pm = {
+ SET_RUNTIME_PM_OPS(tegra_gmi_runtime_suspend, tegra_gmi_runtime_resume,
+ NULL)
+};
+
static const struct of_device_id tegra_gmi_id_table[] = {
{ .compatible = "nvidia,tegra20-gmi", },
{ .compatible = "nvidia,tegra30-gmi", },
@@ -275,6 +312,7 @@ static struct platform_driver tegra_gmi_driver = {
.driver = {
.name = "tegra-gmi",
.of_match_table = tegra_gmi_id_table,
+ .pm = &tegra_gmi_pm,
},
};
module_platform_driver(tegra_gmi_driver);
diff --git a/drivers/firmware/ti_sci.c b/drivers/firmware/ti_sci.c
index 235c7e7869aa..5ae2040b8b02 100644
--- a/drivers/firmware/ti_sci.c
+++ b/drivers/firmware/ti_sci.c
@@ -1759,7 +1759,7 @@ static int ti_sci_get_resource_range(const struct ti_sci_handle *handle,
desc->num = resp->range_num;
desc->start_sec = resp->range_start_sec;
desc->num_sec = resp->range_num_sec;
- };
+ }
fail:
ti_sci_put_one_xfer(&info->minfo, xfer);
diff --git a/drivers/firmware/xilinx/zynqmp.c b/drivers/firmware/xilinx/zynqmp.c
index 3dd45a7420dc..0dd117860b63 100644
--- a/drivers/firmware/xilinx/zynqmp.c
+++ b/drivers/firmware/xilinx/zynqmp.c
@@ -1434,7 +1434,10 @@ static int zynqmp_firmware_probe(struct platform_device *pdev)
return ret;
/* Check PM API version number */
- zynqmp_pm_get_api_version(&pm_api_version);
+ ret = zynqmp_pm_get_api_version(&pm_api_version);
+ if (ret)
+ return ret;
+
if (pm_api_version < ZYNQMP_PM_VERSION) {
panic("%s Platform Management API version error. Expected: v%d.%d - Found: v%d.%d\n",
__func__,
diff --git a/drivers/gpio/Kconfig b/drivers/gpio/Kconfig
index 072ed610f9c6..60d9374c72c0 100644
--- a/drivers/gpio/Kconfig
+++ b/drivers/gpio/Kconfig
@@ -523,6 +523,7 @@ config GPIO_REG
config GPIO_ROCKCHIP
tristate "Rockchip GPIO support"
depends on ARCH_ROCKCHIP || COMPILE_TEST
+ select GENERIC_IRQ_CHIP
select GPIOLIB_IRQCHIP
default ARCH_ROCKCHIP
help
diff --git a/drivers/gpio/gpio-virtio.c b/drivers/gpio/gpio-virtio.c
index aeec4bf0b625..84f96b78f32a 100644
--- a/drivers/gpio/gpio-virtio.c
+++ b/drivers/gpio/gpio-virtio.c
@@ -434,7 +434,7 @@ static void virtio_gpio_event_vq(struct virtqueue *vq)
ret = generic_handle_domain_irq(vgpio->gc.irq.domain, gpio);
if (ret)
dev_err(dev, "failed to handle interrupt: %d\n", ret);
- };
+ }
}
static void virtio_gpio_request_vq(struct virtqueue *vq)
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c
index b9c11c2b2885..0de66f59adb8 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c
@@ -827,6 +827,7 @@ static int amdgpu_connector_vga_get_modes(struct drm_connector *connector)
amdgpu_connector_get_edid(connector);
ret = amdgpu_connector_ddc_get_modes(connector);
+ amdgpu_get_native_mode(connector);
return ret;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
index 5625f7736e37..188accb71249 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
@@ -3509,6 +3509,9 @@ int amdgpu_device_init(struct amdgpu_device *adev,
adev->rmmio_size = pci_resource_len(adev->pdev, 2);
}
+ for (i = 0; i < AMD_IP_BLOCK_TYPE_NUM; i++)
+ atomic_set(&adev->pm.pwr_state[i], POWER_STATE_UNKNOWN);
+
adev->rmmio = ioremap(adev->rmmio_base, adev->rmmio_size);
if (adev->rmmio == NULL) {
return -ENOMEM;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c
index ff70bc233489..4e3669407518 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c
@@ -587,6 +587,9 @@ static int amdgpu_discovery_set_common_ip_blocks(struct amdgpu_device *adev)
amdgpu_device_ip_block_add(adev, &nv_common_ip_block);
break;
default:
+ dev_err(adev->dev,
+ "Failed to add common ip block(GC_HWIP:0x%x)\n",
+ adev->ip_versions[GC_HWIP][0]);
return -EINVAL;
}
return 0;
@@ -619,6 +622,9 @@ static int amdgpu_discovery_set_gmc_ip_blocks(struct amdgpu_device *adev)
amdgpu_device_ip_block_add(adev, &gmc_v10_0_ip_block);
break;
default:
+ dev_err(adev->dev,
+ "Failed to add gmc ip block(GC_HWIP:0x%x)\n",
+ adev->ip_versions[GC_HWIP][0]);
return -EINVAL;
}
return 0;
@@ -648,6 +654,9 @@ static int amdgpu_discovery_set_ih_ip_blocks(struct amdgpu_device *adev)
amdgpu_device_ip_block_add(adev, &navi10_ih_ip_block);
break;
default:
+ dev_err(adev->dev,
+ "Failed to add ih ip block(OSSSYS_HWIP:0x%x)\n",
+ adev->ip_versions[OSSSYS_HWIP][0]);
return -EINVAL;
}
return 0;
@@ -688,6 +697,9 @@ static int amdgpu_discovery_set_psp_ip_blocks(struct amdgpu_device *adev)
amdgpu_device_ip_block_add(adev, &psp_v13_0_ip_block);
break;
default:
+ dev_err(adev->dev,
+ "Failed to add psp ip block(MP0_HWIP:0x%x)\n",
+ adev->ip_versions[MP0_HWIP][0]);
return -EINVAL;
}
return 0;
@@ -726,6 +738,9 @@ static int amdgpu_discovery_set_smu_ip_blocks(struct amdgpu_device *adev)
amdgpu_device_ip_block_add(adev, &smu_v13_0_ip_block);
break;
default:
+ dev_err(adev->dev,
+ "Failed to add smu ip block(MP1_HWIP:0x%x)\n",
+ adev->ip_versions[MP1_HWIP][0]);
return -EINVAL;
}
return 0;
@@ -753,6 +768,9 @@ static int amdgpu_discovery_set_display_ip_blocks(struct amdgpu_device *adev)
amdgpu_device_ip_block_add(adev, &dm_ip_block);
break;
default:
+ dev_err(adev->dev,
+ "Failed to add dm ip block(DCE_HWIP:0x%x)\n",
+ adev->ip_versions[DCE_HWIP][0]);
return -EINVAL;
}
} else if (adev->ip_versions[DCI_HWIP][0]) {
@@ -763,6 +781,9 @@ static int amdgpu_discovery_set_display_ip_blocks(struct amdgpu_device *adev)
amdgpu_device_ip_block_add(adev, &dm_ip_block);
break;
default:
+ dev_err(adev->dev,
+ "Failed to add dm ip block(DCI_HWIP:0x%x)\n",
+ adev->ip_versions[DCI_HWIP][0]);
return -EINVAL;
}
#endif
@@ -796,6 +817,9 @@ static int amdgpu_discovery_set_gc_ip_blocks(struct amdgpu_device *adev)
amdgpu_device_ip_block_add(adev, &gfx_v10_0_ip_block);
break;
default:
+ dev_err(adev->dev,
+ "Failed to add gfx ip block(GC_HWIP:0x%x)\n",
+ adev->ip_versions[GC_HWIP][0]);
return -EINVAL;
}
return 0;
@@ -829,6 +853,9 @@ static int amdgpu_discovery_set_sdma_ip_blocks(struct amdgpu_device *adev)
amdgpu_device_ip_block_add(adev, &sdma_v5_2_ip_block);
break;
default:
+ dev_err(adev->dev,
+ "Failed to add sdma ip block(SDMA0_HWIP:0x%x)\n",
+ adev->ip_versions[SDMA0_HWIP][0]);
return -EINVAL;
}
return 0;
@@ -845,6 +872,9 @@ static int amdgpu_discovery_set_mm_ip_blocks(struct amdgpu_device *adev)
amdgpu_device_ip_block_add(adev, &uvd_v7_0_ip_block);
break;
default:
+ dev_err(adev->dev,
+ "Failed to add uvd v7 ip block(UVD_HWIP:0x%x)\n",
+ adev->ip_versions[UVD_HWIP][0]);
return -EINVAL;
}
switch (adev->ip_versions[VCE_HWIP][0]) {
@@ -855,6 +885,9 @@ static int amdgpu_discovery_set_mm_ip_blocks(struct amdgpu_device *adev)
amdgpu_device_ip_block_add(adev, &vce_v4_0_ip_block);
break;
default:
+ dev_err(adev->dev,
+ "Failed to add VCE v4 ip block(VCE_HWIP:0x%x)\n",
+ adev->ip_versions[VCE_HWIP][0]);
return -EINVAL;
}
} else {
@@ -893,6 +926,9 @@ static int amdgpu_discovery_set_mm_ip_blocks(struct amdgpu_device *adev)
amdgpu_device_ip_block_add(adev, &vcn_v3_0_ip_block);
break;
default:
+ dev_err(adev->dev,
+ "Failed to add vcn/jpeg ip block(UVD_HWIP:0x%x)\n",
+ adev->ip_versions[UVD_HWIP][0]);
return -EINVAL;
}
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.c
index 0fad2bf854ae..567df2db23ac 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.c
@@ -386,6 +386,7 @@ struct amdgpu_hive_info *amdgpu_get_xgmi_hive(struct amdgpu_device *adev)
"%s", "xgmi_hive_info");
if (ret) {
dev_err(adev->dev, "XGMI: failed initializing kobject for xgmi hive\n");
+ kobject_put(&hive->kobj);
kfree(hive);
hive = NULL;
goto pro_end;
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
index 003ba6a373ff..93e33dd84dd4 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
@@ -1226,6 +1226,11 @@ static int stop_cpsch(struct device_queue_manager *dqm)
bool hanging;
dqm_lock(dqm);
+ if (!dqm->sched_running) {
+ dqm_unlock(dqm);
+ return 0;
+ }
+
if (!dqm->is_hws_hang)
unmap_queues_cpsch(dqm, KFD_UNMAP_QUEUES_FILTER_ALL_QUEUES, 0);
hanging = dqm->is_hws_hang || dqm->is_resetting;
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
index c911b30de658..c27cb47bc988 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
@@ -4242,7 +4242,8 @@ static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev)
} else if (dc_link_detect(link, DETECT_REASON_BOOT)) {
amdgpu_dm_update_connector_after_detect(aconnector);
register_backlight_device(dm, link);
-
+ if (dm->num_of_edps)
+ update_connector_ext_caps(aconnector);
if (psr_feature_enabled)
amdgpu_dm_set_psr_caps(link);
}
diff --git a/drivers/gpu/drm/amd/include/amd_shared.h b/drivers/gpu/drm/amd/include/amd_shared.h
index f1a46d16f7ea..4b9e68a79f06 100644
--- a/drivers/gpu/drm/amd/include/amd_shared.h
+++ b/drivers/gpu/drm/amd/include/amd_shared.h
@@ -98,7 +98,8 @@ enum amd_ip_block_type {
AMD_IP_BLOCK_TYPE_ACP,
AMD_IP_BLOCK_TYPE_VCN,
AMD_IP_BLOCK_TYPE_MES,
- AMD_IP_BLOCK_TYPE_JPEG
+ AMD_IP_BLOCK_TYPE_JPEG,
+ AMD_IP_BLOCK_TYPE_NUM,
};
enum amd_clockgating_state {
diff --git a/drivers/gpu/drm/amd/pm/amdgpu_dpm.c b/drivers/gpu/drm/amd/pm/amdgpu_dpm.c
index 03581d5b1836..08362d506534 100644
--- a/drivers/gpu/drm/amd/pm/amdgpu_dpm.c
+++ b/drivers/gpu/drm/amd/pm/amdgpu_dpm.c
@@ -927,6 +927,13 @@ int amdgpu_dpm_set_powergating_by_smu(struct amdgpu_device *adev, uint32_t block
{
int ret = 0;
const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
+ enum ip_power_state pwr_state = gate ? POWER_STATE_OFF : POWER_STATE_ON;
+
+ if (atomic_read(&adev->pm.pwr_state[block_type]) == pwr_state) {
+ dev_dbg(adev->dev, "IP block%d already in the target %s state!",
+ block_type, gate ? "gate" : "ungate");
+ return 0;
+ }
switch (block_type) {
case AMD_IP_BLOCK_TYPE_UVD:
@@ -979,6 +986,9 @@ int amdgpu_dpm_set_powergating_by_smu(struct amdgpu_device *adev, uint32_t block
break;
}
+ if (!ret)
+ atomic_set(&adev->pm.pwr_state[block_type], pwr_state);
+
return ret;
}
diff --git a/drivers/gpu/drm/amd/pm/inc/amdgpu_dpm.h b/drivers/gpu/drm/amd/pm/inc/amdgpu_dpm.h
index 98f1b3d8c1d5..16e3f72d31b9 100644
--- a/drivers/gpu/drm/amd/pm/inc/amdgpu_dpm.h
+++ b/drivers/gpu/drm/amd/pm/inc/amdgpu_dpm.h
@@ -417,6 +417,12 @@ struct amdgpu_dpm {
enum amd_dpm_forced_level forced_level;
};
+enum ip_power_state {
+ POWER_STATE_UNKNOWN,
+ POWER_STATE_ON,
+ POWER_STATE_OFF,
+};
+
struct amdgpu_pm {
struct mutex mutex;
u32 current_sclk;
@@ -452,6 +458,8 @@ struct amdgpu_pm {
struct i2c_adapter smu_i2c;
struct mutex smu_i2c_mutex;
struct list_head pm_attr_list;
+
+ atomic_t pwr_state[AMD_IP_BLOCK_TYPE_NUM];
};
#define R600_SSTU_DFLT 0
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu11/cyan_skillfish_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu11/cyan_skillfish_ppt.c
index cbc3f99e8573..2238ee19c222 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu11/cyan_skillfish_ppt.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu11/cyan_skillfish_ppt.c
@@ -309,6 +309,7 @@ static int cyan_skillfish_print_clk_levels(struct smu_context *smu,
{
int ret = 0, size = 0;
uint32_t cur_value = 0;
+ int i;
smu_cmn_get_sysfs_buf(&buf, &size);
@@ -334,8 +335,6 @@ static int cyan_skillfish_print_clk_levels(struct smu_context *smu,
size += sysfs_emit_at(buf, size, "VDDC: %7umV %10umV\n",
CYAN_SKILLFISH_VDDC_MIN, CYAN_SKILLFISH_VDDC_MAX);
break;
- case SMU_GFXCLK:
- case SMU_SCLK:
case SMU_FCLK:
case SMU_MCLK:
case SMU_SOCCLK:
@@ -346,6 +345,25 @@ static int cyan_skillfish_print_clk_levels(struct smu_context *smu,
return ret;
size += sysfs_emit_at(buf, size, "0: %uMhz *\n", cur_value);
break;
+ case SMU_SCLK:
+ case SMU_GFXCLK:
+ ret = cyan_skillfish_get_current_clk_freq(smu, clk_type, &cur_value);
+ if (ret)
+ return ret;
+ if (cur_value == CYAN_SKILLFISH_SCLK_MAX)
+ i = 2;
+ else if (cur_value == CYAN_SKILLFISH_SCLK_MIN)
+ i = 0;
+ else
+ i = 1;
+ size += sysfs_emit_at(buf, size, "0: %uMhz %s\n", CYAN_SKILLFISH_SCLK_MIN,
+ i == 0 ? "*" : "");
+ size += sysfs_emit_at(buf, size, "1: %uMhz %s\n",
+ i == 1 ? cur_value : cyan_skillfish_sclk_default,
+ i == 1 ? "*" : "");
+ size += sysfs_emit_at(buf, size, "2: %uMhz %s\n", CYAN_SKILLFISH_SCLK_MAX,
+ i == 2 ? "*" : "");
+ break;
default:
dev_warn(smu->adev->dev, "Unsupported clock type\n");
return ret;
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c
index 71161f6b78fe..60a557068ea4 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c
@@ -1265,7 +1265,7 @@ static int navi10_print_clk_levels(struct smu_context *smu,
enum smu_clk_type clk_type, char *buf)
{
uint16_t *curve_settings;
- int i, size = 0, ret = 0;
+ int i, levels, size = 0, ret = 0;
uint32_t cur_value = 0, value = 0, count = 0;
uint32_t freq_values[3] = {0};
uint32_t mark_index = 0;
@@ -1319,14 +1319,17 @@ static int navi10_print_clk_levels(struct smu_context *smu,
freq_values[1] = cur_value;
mark_index = cur_value == freq_values[0] ? 0 :
cur_value == freq_values[2] ? 2 : 1;
- if (mark_index != 1)
- freq_values[1] = (freq_values[0] + freq_values[2]) / 2;
- for (i = 0; i < 3; i++) {
+ levels = 3;
+ if (mark_index != 1) {
+ levels = 2;
+ freq_values[1] = freq_values[2];
+ }
+
+ for (i = 0; i < levels; i++) {
size += sysfs_emit_at(buf, size, "%d: %uMhz %s\n", i, freq_values[i],
i == mark_index ? "*" : "");
}
-
}
break;
case SMU_PCIE:
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu11/vangogh_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu11/vangogh_ppt.c
index 421f38e8dada..c02ed65ffa38 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu11/vangogh_ppt.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu11/vangogh_ppt.c
@@ -683,6 +683,7 @@ static int vangogh_print_clk_levels(struct smu_context *smu,
int i, size = 0, ret = 0;
uint32_t cur_value = 0, value = 0, count = 0;
bool cur_value_match_level = false;
+ uint32_t min, max;
memset(&metrics, 0, sizeof(metrics));
@@ -743,6 +744,13 @@ static int vangogh_print_clk_levels(struct smu_context *smu,
if (ret)
return ret;
break;
+ case SMU_GFXCLK:
+ case SMU_SCLK:
+ ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_GetGfxclkFrequency, 0, &cur_value);
+ if (ret) {
+ return ret;
+ }
+ break;
default:
break;
}
@@ -768,6 +776,24 @@ static int vangogh_print_clk_levels(struct smu_context *smu,
if (!cur_value_match_level)
size += sysfs_emit_at(buf, size, " %uMhz *\n", cur_value);
break;
+ case SMU_GFXCLK:
+ case SMU_SCLK:
+ min = (smu->gfx_actual_hard_min_freq > 0) ? smu->gfx_actual_hard_min_freq : smu->gfx_default_hard_min_freq;
+ max = (smu->gfx_actual_soft_max_freq > 0) ? smu->gfx_actual_soft_max_freq : smu->gfx_default_soft_max_freq;
+ if (cur_value == max)
+ i = 2;
+ else if (cur_value == min)
+ i = 0;
+ else
+ i = 1;
+ size += sysfs_emit_at(buf, size, "0: %uMhz %s\n", min,
+ i == 0 ? "*" : "");
+ size += sysfs_emit_at(buf, size, "1: %uMhz %s\n",
+ i == 1 ? cur_value : VANGOGH_UMD_PSTATE_STANDARD_GFXCLK,
+ i == 1 ? "*" : "");
+ size += sysfs_emit_at(buf, size, "2: %uMhz %s\n", max,
+ i == 2 ? "*" : "");
+ break;
default:
break;
}
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/yellow_carp_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/yellow_carp_ppt.c
index 8215bbf5ed7c..caf1775d48ef 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu13/yellow_carp_ppt.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/yellow_carp_ppt.c
@@ -697,6 +697,11 @@ static int yellow_carp_get_current_clk_freq(struct smu_context *smu,
case SMU_FCLK:
return smu_cmn_send_smc_msg_with_param(smu,
SMU_MSG_GetFclkFrequency, 0, value);
+ case SMU_GFXCLK:
+ case SMU_SCLK:
+ return smu_cmn_send_smc_msg_with_param(smu,
+ SMU_MSG_GetGfxclkFrequency, 0, value);
+ break;
default:
return -EINVAL;
}
@@ -967,6 +972,7 @@ static int yellow_carp_print_clk_levels(struct smu_context *smu,
{
int i, size = 0, ret = 0;
uint32_t cur_value = 0, value = 0, count = 0;
+ uint32_t min, max;
smu_cmn_get_sysfs_buf(&buf, &size);
@@ -1005,6 +1011,27 @@ static int yellow_carp_print_clk_levels(struct smu_context *smu,
cur_value == value ? "*" : "");
}
break;
+ case SMU_GFXCLK:
+ case SMU_SCLK:
+ ret = yellow_carp_get_current_clk_freq(smu, clk_type, &cur_value);
+ if (ret)
+ goto print_clk_out;
+ min = (smu->gfx_actual_hard_min_freq > 0) ? smu->gfx_actual_hard_min_freq : smu->gfx_default_hard_min_freq;
+ max = (smu->gfx_actual_soft_max_freq > 0) ? smu->gfx_actual_soft_max_freq : smu->gfx_default_soft_max_freq;
+ if (cur_value == max)
+ i = 2;
+ else if (cur_value == min)
+ i = 0;
+ else
+ i = 1;
+ size += sysfs_emit_at(buf, size, "0: %uMhz %s\n", min,
+ i == 0 ? "*" : "");
+ size += sysfs_emit_at(buf, size, "1: %uMhz %s\n",
+ i == 1 ? cur_value : YELLOW_CARP_UMD_PSTATE_GFXCLK,
+ i == 1 ? "*" : "");
+ size += sysfs_emit_at(buf, size, "2: %uMhz %s\n", max,
+ i == 2 ? "*" : "");
+ break;
default:
break;
}
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/yellow_carp_ppt.h b/drivers/gpu/drm/amd/pm/swsmu/smu13/yellow_carp_ppt.h
index b3ad8352c68a..a9205a8ea3ad 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu13/yellow_carp_ppt.h
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/yellow_carp_ppt.h
@@ -24,5 +24,6 @@
#define __YELLOW_CARP_PPT_H__
extern void yellow_carp_set_ppt_funcs(struct smu_context *smu);
+#define YELLOW_CARP_UMD_PSTATE_GFXCLK 1100
#endif
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu_cmn.c b/drivers/gpu/drm/amd/pm/swsmu/smu_cmn.c
index 843d2cbfc71d..ea6f50c08c5f 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu_cmn.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu_cmn.c
@@ -139,9 +139,13 @@ static void __smu_cmn_reg_print_error(struct smu_context *smu,
const char *message = smu_get_message_name(smu, msg);
switch (reg_c2pmsg_90) {
- case SMU_RESP_NONE:
+ case SMU_RESP_NONE: {
+ u32 msg_idx = RREG32_SOC15(MP1, 0, mmMP1_SMN_C2PMSG_66);
+ u32 prm = RREG32_SOC15(MP1, 0, mmMP1_SMN_C2PMSG_82);
dev_err_ratelimited(adev->dev,
- "SMU: I'm not done with your previous command!");
+ "SMU: I'm not done with your previous command: SMN_C2PMSG_66:0x%08X SMN_C2PMSG_82:0x%08X",
+ msg_idx, prm);
+ }
break;
case SMU_RESP_OK:
/* The SMU executed the command. It completed with a
diff --git a/drivers/gpu/drm/drm_gem_cma_helper.c b/drivers/gpu/drm/drm_gem_cma_helper.c
index d53388199f34..9d05674550a4 100644
--- a/drivers/gpu/drm/drm_gem_cma_helper.c
+++ b/drivers/gpu/drm/drm_gem_cma_helper.c
@@ -210,8 +210,13 @@ void drm_gem_cma_free_object(struct drm_gem_object *gem_obj)
dma_buf_vunmap(gem_obj->import_attach->dmabuf, &map);
drm_prime_gem_destroy(gem_obj, cma_obj->sgt);
} else if (cma_obj->vaddr) {
- dma_free_wc(gem_obj->dev->dev, cma_obj->base.size,
- cma_obj->vaddr, cma_obj->paddr);
+ if (cma_obj->map_noncoherent)
+ dma_free_noncoherent(gem_obj->dev->dev, cma_obj->base.size,
+ cma_obj->vaddr, cma_obj->paddr,
+ DMA_TO_DEVICE);
+ else
+ dma_free_wc(gem_obj->dev->dev, cma_obj->base.size,
+ cma_obj->vaddr, cma_obj->paddr);
}
drm_gem_object_release(gem_obj);
diff --git a/drivers/gpu/drm/i915/display/icl_dsi.c b/drivers/gpu/drm/i915/display/icl_dsi.c
index 168c84a74d30..71fbdcddd31f 100644
--- a/drivers/gpu/drm/i915/display/icl_dsi.c
+++ b/drivers/gpu/drm/i915/display/icl_dsi.c
@@ -696,10 +696,7 @@ static void gen11_dsi_map_pll(struct intel_encoder *encoder,
intel_de_write(dev_priv, ICL_DPCLKA_CFGCR0, val);
for_each_dsi_phy(phy, intel_dsi->phys) {
- if (DISPLAY_VER(dev_priv) >= 12)
- val |= ICL_DPCLKA_CFGCR0_DDI_CLK_OFF(phy);
- else
- val &= ~ICL_DPCLKA_CFGCR0_DDI_CLK_OFF(phy);
+ val &= ~ICL_DPCLKA_CFGCR0_DDI_CLK_OFF(phy);
}
intel_de_write(dev_priv, ICL_DPCLKA_CFGCR0, val);
@@ -1135,8 +1132,6 @@ static void
gen11_dsi_enable_port_and_phy(struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state)
{
- struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
-
/* step 4a: power up all lanes of the DDI used by DSI */
gen11_dsi_power_up_lanes(encoder);
@@ -1162,8 +1157,7 @@ gen11_dsi_enable_port_and_phy(struct intel_encoder *encoder,
gen11_dsi_configure_transcoder(encoder, crtc_state);
/* Step 4l: Gate DDI clocks */
- if (DISPLAY_VER(dev_priv) == 11)
- gen11_dsi_gate_clocks(encoder);
+ gen11_dsi_gate_clocks(encoder);
}
static void gen11_dsi_powerup_panel(struct intel_encoder *encoder)
@@ -1271,7 +1265,8 @@ static void adlp_set_lp_hs_wakeup_gb(struct intel_encoder *encoder)
if (DISPLAY_VER(i915) == 13) {
for_each_dsi_port(port, intel_dsi->ports)
intel_de_rmw(i915, TGL_DSI_CHKN_REG(port),
- TGL_DSI_CHKN_LSHS_GB, 0x4);
+ TGL_DSI_CHKN_LSHS_GB_MASK,
+ TGL_DSI_CHKN_LSHS_GB(4));
}
}
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c b/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
index 38b47e73e35d..c48557dfa04c 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
+++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
@@ -3080,8 +3080,8 @@ guc_create_parallel(struct intel_engine_cs **engines,
ce = intel_engine_create_virtual(siblings, num_siblings,
FORCE_VIRTUAL);
- if (!ce) {
- err = ERR_PTR(-ENOMEM);
+ if (IS_ERR(ce)) {
+ err = ERR_CAST(ce);
goto unwind;
}
diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h
index da9055c3ebf0..bcee121bec5a 100644
--- a/drivers/gpu/drm/i915/i915_reg.h
+++ b/drivers/gpu/drm/i915/i915_reg.h
@@ -11717,7 +11717,9 @@ enum skl_power_gate {
#define TGL_DSI_CHKN_REG(port) _MMIO_PORT(port, \
_TGL_DSI_CHKN_REG_0, \
_TGL_DSI_CHKN_REG_1)
-#define TGL_DSI_CHKN_LSHS_GB REG_GENMASK(15, 12)
+#define TGL_DSI_CHKN_LSHS_GB_MASK REG_GENMASK(15, 12)
+#define TGL_DSI_CHKN_LSHS_GB(byte_clocks) REG_FIELD_PREP(TGL_DSI_CHKN_LSHS_GB_MASK, \
+ (byte_clocks))
/* Display Stream Splitter Control */
#define DSS_CTL1 _MMIO(0x67400)
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/hdmigv100.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/hdmigv100.c
index 6e3c450eaace..3ff49344abc7 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/hdmigv100.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/hdmigv100.c
@@ -62,7 +62,6 @@ gv100_hdmi_ctrl(struct nvkm_ior *ior, int head, bool enable, u8 max_ac_packet,
nvkm_wr32(device, 0x6f0108 + hdmi, vendor_infoframe.header);
nvkm_wr32(device, 0x6f010c + hdmi, vendor_infoframe.subpack0_low);
nvkm_wr32(device, 0x6f0110 + hdmi, vendor_infoframe.subpack0_high);
- nvkm_wr32(device, 0x6f0110 + hdmi, 0x00000000);
nvkm_wr32(device, 0x6f0114 + hdmi, 0x00000000);
nvkm_wr32(device, 0x6f0118 + hdmi, 0x00000000);
nvkm_wr32(device, 0x6f011c + hdmi, 0x00000000);
diff --git a/drivers/gpu/drm/scheduler/sched_main.c b/drivers/gpu/drm/scheduler/sched_main.c
index 5bc5f775abe1..f91fb31ab7a7 100644
--- a/drivers/gpu/drm/scheduler/sched_main.c
+++ b/drivers/gpu/drm/scheduler/sched_main.c
@@ -704,9 +704,13 @@ int drm_sched_job_add_implicit_dependencies(struct drm_sched_job *job,
int ret;
dma_resv_for_each_fence(&cursor, obj->resv, write, fence) {
+ /* Make sure to grab an additional ref on the added fence */
+ dma_fence_get(fence);
ret = drm_sched_job_add_dependency(job, fence);
- if (ret)
+ if (ret) {
+ dma_fence_put(fence);
return ret;
+ }
}
return 0;
}
diff --git a/drivers/gpu/drm/sun4i/Kconfig b/drivers/gpu/drm/sun4i/Kconfig
index 5755f0432e77..8c796de53222 100644
--- a/drivers/gpu/drm/sun4i/Kconfig
+++ b/drivers/gpu/drm/sun4i/Kconfig
@@ -46,6 +46,7 @@ config DRM_SUN6I_DSI
default MACH_SUN8I
select CRC_CCITT
select DRM_MIPI_DSI
+ select RESET_CONTROLLER
select PHY_SUN6I_MIPI_DPHY
help
Choose this option if you want have an Allwinner SoC with
diff --git a/drivers/hv/hv_balloon.c b/drivers/hv/hv_balloon.c
index 7f11ea07d698..ca873a3b98db 100644
--- a/drivers/hv/hv_balloon.c
+++ b/drivers/hv/hv_balloon.c
@@ -480,7 +480,7 @@ module_param(pressure_report_delay, uint, (S_IRUGO | S_IWUSR));
MODULE_PARM_DESC(pressure_report_delay, "Delay in secs in reporting pressure");
static atomic_t trans_id = ATOMIC_INIT(0);
-static int dm_ring_size = 20 * 1024;
+static int dm_ring_size = VMBUS_RING_SIZE(16 * 1024);
/*
* Driver specific state.
diff --git a/drivers/infiniband/core/nldev.c b/drivers/infiniband/core/nldev.c
index fedc0fa6ebf9..f5aacaf7fb8e 100644
--- a/drivers/infiniband/core/nldev.c
+++ b/drivers/infiniband/core/nldev.c
@@ -1906,7 +1906,8 @@ static int nldev_stat_set_mode_doit(struct sk_buff *msg,
int ret;
/* Currently only counter for QP is supported */
- if (nla_get_u32(tb[RDMA_NLDEV_ATTR_STAT_RES]) != RDMA_NLDEV_ATTR_RES_QP)
+ if (!tb[RDMA_NLDEV_ATTR_STAT_RES] ||
+ nla_get_u32(tb[RDMA_NLDEV_ATTR_STAT_RES]) != RDMA_NLDEV_ATTR_RES_QP)
return -EINVAL;
mode = nla_get_u32(tb[RDMA_NLDEV_ATTR_STAT_MODE]);
diff --git a/drivers/infiniband/core/verbs.c b/drivers/infiniband/core/verbs.c
index 692d5ff657df..c18634bec212 100644
--- a/drivers/infiniband/core/verbs.c
+++ b/drivers/infiniband/core/verbs.c
@@ -1232,6 +1232,9 @@ static struct ib_qp *create_qp(struct ib_device *dev, struct ib_pd *pd,
INIT_LIST_HEAD(&qp->rdma_mrs);
INIT_LIST_HEAD(&qp->sig_mrs);
+ qp->send_cq = attr->send_cq;
+ qp->recv_cq = attr->recv_cq;
+
rdma_restrack_new(&qp->res, RDMA_RESTRACK_QP);
WARN_ONCE(!udata && !caller, "Missing kernel QP owner");
rdma_restrack_set_name(&qp->res, udata ? NULL : caller);
diff --git a/drivers/infiniband/hw/hfi1/verbs.c b/drivers/infiniband/hw/hfi1/verbs.c
index ed9fa0d84e9e..dc9211f3a009 100644
--- a/drivers/infiniband/hw/hfi1/verbs.c
+++ b/drivers/infiniband/hw/hfi1/verbs.c
@@ -1628,8 +1628,7 @@ static int init_cntr_names(const char *names_in, const size_t names_len,
n++;
names_out =
- kmalloc((n + num_extra_names) * sizeof(struct rdma_stat_desc) +
- names_len,
+ kzalloc((n + num_extra_names) * sizeof(*q) + names_len,
GFP_KERNEL);
if (!names_out) {
*num_cntrs = 0;
@@ -1637,7 +1636,7 @@ static int init_cntr_names(const char *names_in, const size_t names_len,
return -ENOMEM;
}
- p = names_out + (n + num_extra_names) * sizeof(struct rdma_stat_desc);
+ p = names_out + (n + num_extra_names) * sizeof(*q);
memcpy(p, names_in, names_len);
q = (struct rdma_stat_desc *)names_out;
diff --git a/drivers/infiniband/hw/mlx4/main.c b/drivers/infiniband/hw/mlx4/main.c
index ceca05982f61..0d2fa3338784 100644
--- a/drivers/infiniband/hw/mlx4/main.c
+++ b/drivers/infiniband/hw/mlx4/main.c
@@ -2215,6 +2215,11 @@ static const struct ib_device_ops mlx4_ib_hw_stats_ops = {
.get_hw_stats = mlx4_ib_get_hw_stats,
};
+static const struct ib_device_ops mlx4_ib_hw_stats_ops1 = {
+ .alloc_hw_device_stats = mlx4_ib_alloc_hw_device_stats,
+ .get_hw_stats = mlx4_ib_get_hw_stats,
+};
+
static int mlx4_ib_alloc_diag_counters(struct mlx4_ib_dev *ibdev)
{
struct mlx4_ib_diag_counters *diag = ibdev->diag_counters;
@@ -2227,9 +2232,16 @@ static int mlx4_ib_alloc_diag_counters(struct mlx4_ib_dev *ibdev)
return 0;
for (i = 0; i < MLX4_DIAG_COUNTERS_TYPES; i++) {
- /* i == 1 means we are building port counters */
- if (i && !per_port)
- continue;
+ /*
+ * i == 1 means we are building port counters, set a different
+ * stats ops without port stats callback.
+ */
+ if (i && !per_port) {
+ ib_set_device_ops(&ibdev->ib_dev,
+ &mlx4_ib_hw_stats_ops1);
+
+ return 0;
+ }
ret = __mlx4_ib_alloc_diag_counters(ibdev, &diag[i].descs,
&diag[i].offset,
diff --git a/drivers/memory/renesas-rpc-if.c b/drivers/memory/renesas-rpc-if.c
index 7435baad0007..e4cc64f56019 100644
--- a/drivers/memory/renesas-rpc-if.c
+++ b/drivers/memory/renesas-rpc-if.c
@@ -12,6 +12,7 @@
#include <linux/module.h>
#include <linux/platform_device.h>
#include <linux/of.h>
+#include <linux/of_device.h>
#include <linux/regmap.h>
#include <linux/reset.h>
@@ -19,19 +20,17 @@
#define RPCIF_CMNCR 0x0000 /* R/W */
#define RPCIF_CMNCR_MD BIT(31)
-#define RPCIF_CMNCR_SFDE BIT(24) /* undocumented but must be set */
#define RPCIF_CMNCR_MOIIO3(val) (((val) & 0x3) << 22)
#define RPCIF_CMNCR_MOIIO2(val) (((val) & 0x3) << 20)
#define RPCIF_CMNCR_MOIIO1(val) (((val) & 0x3) << 18)
#define RPCIF_CMNCR_MOIIO0(val) (((val) & 0x3) << 16)
-#define RPCIF_CMNCR_MOIIO_HIZ (RPCIF_CMNCR_MOIIO0(3) | \
- RPCIF_CMNCR_MOIIO1(3) | \
- RPCIF_CMNCR_MOIIO2(3) | RPCIF_CMNCR_MOIIO3(3))
-#define RPCIF_CMNCR_IO3FV(val) (((val) & 0x3) << 14) /* undocumented */
-#define RPCIF_CMNCR_IO2FV(val) (((val) & 0x3) << 12) /* undocumented */
+#define RPCIF_CMNCR_MOIIO(val) (RPCIF_CMNCR_MOIIO0(val) | RPCIF_CMNCR_MOIIO1(val) | \
+ RPCIF_CMNCR_MOIIO2(val) | RPCIF_CMNCR_MOIIO3(val))
+#define RPCIF_CMNCR_IO3FV(val) (((val) & 0x3) << 14) /* documented for RZ/G2L */
+#define RPCIF_CMNCR_IO2FV(val) (((val) & 0x3) << 12) /* documented for RZ/G2L */
#define RPCIF_CMNCR_IO0FV(val) (((val) & 0x3) << 8)
-#define RPCIF_CMNCR_IOFV_HIZ (RPCIF_CMNCR_IO0FV(3) | RPCIF_CMNCR_IO2FV(3) | \
- RPCIF_CMNCR_IO3FV(3))
+#define RPCIF_CMNCR_IOFV(val) (RPCIF_CMNCR_IO0FV(val) | RPCIF_CMNCR_IO2FV(val) | \
+ RPCIF_CMNCR_IO3FV(val))
#define RPCIF_CMNCR_BSZ(val) (((val) & 0x3) << 0)
#define RPCIF_SSLDR 0x0004 /* R/W */
@@ -126,6 +125,9 @@
#define RPCIF_SMDRENR_OPDRE BIT(4)
#define RPCIF_SMDRENR_SPIDRE BIT(0)
+#define RPCIF_PHYADD 0x0070 /* R/W available on R-Car E3/D3/V3M and RZ/G2{E,L} */
+#define RPCIF_PHYWR 0x0074 /* R/W available on R-Car E3/D3/V3M and RZ/G2{E,L} */
+
#define RPCIF_PHYCNT 0x007C /* R/W */
#define RPCIF_PHYCNT_CAL BIT(31)
#define RPCIF_PHYCNT_OCTA(v) (((v) & 0x3) << 22)
@@ -133,10 +135,12 @@
#define RPCIF_PHYCNT_OCT BIT(20)
#define RPCIF_PHYCNT_DDRCAL BIT(19)
#define RPCIF_PHYCNT_HS BIT(18)
-#define RPCIF_PHYCNT_STRTIM(v) (((v) & 0x7) << 15)
+#define RPCIF_PHYCNT_CKSEL(v) (((v) & 0x3) << 16) /* valid only for RZ/G2L */
+#define RPCIF_PHYCNT_STRTIM(v) (((v) & 0x7) << 15) /* valid for R-Car and RZ/G2{E,H,M,N} */
#define RPCIF_PHYCNT_WBUF2 BIT(4)
#define RPCIF_PHYCNT_WBUF BIT(2)
#define RPCIF_PHYCNT_PHYMEM(v) (((v) & 0x3) << 0)
+#define RPCIF_PHYCNT_PHYMEM_MASK GENMASK(1, 0)
#define RPCIF_PHYOFFSET1 0x0080 /* R/W */
#define RPCIF_PHYOFFSET1_DDRTMG(v) (((v) & 0x3) << 28)
@@ -147,8 +151,6 @@
#define RPCIF_PHYINT 0x0088 /* R/W */
#define RPCIF_PHYINT_WPVAL BIT(1)
-#define RPCIF_DIRMAP_SIZE 0x4000000
-
static const struct regmap_range rpcif_volatile_ranges[] = {
regmap_reg_range(RPCIF_SMRDR0, RPCIF_SMRDR1),
regmap_reg_range(RPCIF_SMWDR0, RPCIF_SMWDR1),
@@ -243,50 +245,74 @@ int rpcif_sw_init(struct rpcif *rpc, struct device *dev)
res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "dirmap");
rpc->dirmap = devm_ioremap_resource(&pdev->dev, res);
if (IS_ERR(rpc->dirmap))
- rpc->dirmap = NULL;
+ return PTR_ERR(rpc->dirmap);
rpc->size = resource_size(res);
+ rpc->type = (uintptr_t)of_device_get_match_data(dev);
rpc->rstc = devm_reset_control_get_exclusive(&pdev->dev, NULL);
return PTR_ERR_OR_ZERO(rpc->rstc);
}
EXPORT_SYMBOL(rpcif_sw_init);
-void rpcif_hw_init(struct rpcif *rpc, bool hyperflash)
+static void rpcif_rzg2l_timing_adjust_sdr(struct rpcif *rpc)
+{
+ regmap_write(rpc->regmap, RPCIF_PHYWR, 0xa5390000);
+ regmap_write(rpc->regmap, RPCIF_PHYADD, 0x80000000);
+ regmap_write(rpc->regmap, RPCIF_PHYWR, 0x00008080);
+ regmap_write(rpc->regmap, RPCIF_PHYADD, 0x80000022);
+ regmap_write(rpc->regmap, RPCIF_PHYWR, 0x00008080);
+ regmap_write(rpc->regmap, RPCIF_PHYADD, 0x80000024);
+ regmap_update_bits(rpc->regmap, RPCIF_PHYCNT, RPCIF_PHYCNT_CKSEL(3),
+ RPCIF_PHYCNT_CKSEL(3));
+ regmap_write(rpc->regmap, RPCIF_PHYWR, 0x00000030);
+ regmap_write(rpc->regmap, RPCIF_PHYADD, 0x80000032);
+}
+
+int rpcif_hw_init(struct rpcif *rpc, bool hyperflash)
{
u32 dummy;
pm_runtime_get_sync(rpc->dev);
- /*
- * NOTE: The 0x260 are undocumented bits, but they must be set.
- * RPCIF_PHYCNT_STRTIM is strobe timing adjustment bits,
- * 0x0 : the delay is biggest,
- * 0x1 : the delay is 2nd biggest,
- * On H3 ES1.x, the value should be 0, while on others,
- * the value should be 7.
- */
- regmap_write(rpc->regmap, RPCIF_PHYCNT, RPCIF_PHYCNT_STRTIM(7) |
- RPCIF_PHYCNT_PHYMEM(hyperflash ? 3 : 0) | 0x260);
-
- /*
- * NOTE: The 0x1511144 are undocumented bits, but they must be set
- * for RPCIF_PHYOFFSET1.
- * The 0x31 are undocumented bits, but they must be set
- * for RPCIF_PHYOFFSET2.
- */
- regmap_write(rpc->regmap, RPCIF_PHYOFFSET1, 0x1511144 |
- RPCIF_PHYOFFSET1_DDRTMG(3));
- regmap_write(rpc->regmap, RPCIF_PHYOFFSET2, 0x31 |
- RPCIF_PHYOFFSET2_OCTTMG(4));
+ if (rpc->type == RPCIF_RZ_G2L) {
+ int ret;
+
+ ret = reset_control_reset(rpc->rstc);
+ if (ret)
+ return ret;
+ usleep_range(200, 300);
+ rpcif_rzg2l_timing_adjust_sdr(rpc);
+ }
+
+ regmap_update_bits(rpc->regmap, RPCIF_PHYCNT, RPCIF_PHYCNT_PHYMEM_MASK,
+ RPCIF_PHYCNT_PHYMEM(hyperflash ? 3 : 0));
+
+ if (rpc->type == RPCIF_RCAR_GEN3)
+ regmap_update_bits(rpc->regmap, RPCIF_PHYCNT,
+ RPCIF_PHYCNT_STRTIM(7), RPCIF_PHYCNT_STRTIM(7));
+
+ regmap_update_bits(rpc->regmap, RPCIF_PHYOFFSET1, RPCIF_PHYOFFSET1_DDRTMG(3),
+ RPCIF_PHYOFFSET1_DDRTMG(3));
+ regmap_update_bits(rpc->regmap, RPCIF_PHYOFFSET2, RPCIF_PHYOFFSET2_OCTTMG(7),
+ RPCIF_PHYOFFSET2_OCTTMG(4));
if (hyperflash)
regmap_update_bits(rpc->regmap, RPCIF_PHYINT,
RPCIF_PHYINT_WPVAL, 0);
- regmap_write(rpc->regmap, RPCIF_CMNCR, RPCIF_CMNCR_SFDE |
- RPCIF_CMNCR_MOIIO_HIZ | RPCIF_CMNCR_IOFV_HIZ |
- RPCIF_CMNCR_BSZ(hyperflash ? 1 : 0));
+ if (rpc->type == RPCIF_RCAR_GEN3)
+ regmap_update_bits(rpc->regmap, RPCIF_CMNCR,
+ RPCIF_CMNCR_MOIIO(3) | RPCIF_CMNCR_BSZ(3),
+ RPCIF_CMNCR_MOIIO(3) |
+ RPCIF_CMNCR_BSZ(hyperflash ? 1 : 0));
+ else
+ regmap_update_bits(rpc->regmap, RPCIF_CMNCR,
+ RPCIF_CMNCR_MOIIO(3) | RPCIF_CMNCR_IOFV(3) |
+ RPCIF_CMNCR_BSZ(3),
+ RPCIF_CMNCR_MOIIO(1) | RPCIF_CMNCR_IOFV(2) |
+ RPCIF_CMNCR_BSZ(hyperflash ? 1 : 0));
+
/* Set RCF after BSZ update */
regmap_write(rpc->regmap, RPCIF_DRCR, RPCIF_DRCR_RCF);
/* Dummy read according to spec */
@@ -297,6 +323,8 @@ void rpcif_hw_init(struct rpcif *rpc, bool hyperflash)
pm_runtime_put(rpc->dev);
rpc->bus_size = hyperflash ? 2 : 1;
+
+ return 0;
}
EXPORT_SYMBOL(rpcif_hw_init);
@@ -588,8 +616,8 @@ static void memcpy_fromio_readw(void *to,
ssize_t rpcif_dirmap_read(struct rpcif *rpc, u64 offs, size_t len, void *buf)
{
- loff_t from = offs & (RPCIF_DIRMAP_SIZE - 1);
- size_t size = RPCIF_DIRMAP_SIZE - from;
+ loff_t from = offs & (rpc->size - 1);
+ size_t size = rpc->size - from;
if (len > size)
len = size;
@@ -659,7 +687,8 @@ static int rpcif_remove(struct platform_device *pdev)
}
static const struct of_device_id rpcif_of_match[] = {
- { .compatible = "renesas,rcar-gen3-rpc-if", },
+ { .compatible = "renesas,rcar-gen3-rpc-if", .data = (void *)RPCIF_RCAR_GEN3 },
+ { .compatible = "renesas,rzg2l-rpc-if", .data = (void *)RPCIF_RZ_G2L },
{},
};
MODULE_DEVICE_TABLE(of, rpcif_of_match);
diff --git a/drivers/mmc/host/sdhci-tegra.c b/drivers/mmc/host/sdhci-tegra.c
index a5001875876b..6435a75142a6 100644
--- a/drivers/mmc/host/sdhci-tegra.c
+++ b/drivers/mmc/host/sdhci-tegra.c
@@ -15,6 +15,8 @@
#include <linux/of.h>
#include <linux/of_device.h>
#include <linux/pinctrl/consumer.h>
+#include <linux/pm_opp.h>
+#include <linux/pm_runtime.h>
#include <linux/regulator/consumer.h>
#include <linux/reset.h>
#include <linux/mmc/card.h>
@@ -24,6 +26,8 @@
#include <linux/gpio/consumer.h>
#include <linux/ktime.h>
+#include <soc/tegra/common.h>
+
#include "sdhci-pltfm.h"
#include "cqhci.h"
@@ -760,7 +764,9 @@ static void tegra_sdhci_set_clock(struct sdhci_host *host, unsigned int clock)
{
struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
struct sdhci_tegra *tegra_host = sdhci_pltfm_priv(pltfm_host);
+ struct device *dev = mmc_dev(host->mmc);
unsigned long host_clk;
+ int err;
if (!clock)
return sdhci_set_clock(host, clock);
@@ -778,7 +784,12 @@ static void tegra_sdhci_set_clock(struct sdhci_host *host, unsigned int clock)
* from clk_get_rate() is used.
*/
host_clk = tegra_host->ddr_signaling ? clock * 2 : clock;
- clk_set_rate(pltfm_host->clk, host_clk);
+
+ err = dev_pm_opp_set_rate(dev, host_clk);
+ if (err)
+ dev_err(dev, "failed to set clk rate to %luHz: %d\n",
+ host_clk, err);
+
tegra_host->curr_clk_rate = host_clk;
if (tegra_host->ddr_signaling)
host->max_clk = host_clk;
@@ -1705,7 +1716,6 @@ static int sdhci_tegra_probe(struct platform_device *pdev)
"failed to get clock\n");
goto err_clk_get;
}
- clk_prepare_enable(clk);
pltfm_host->clk = clk;
tegra_host->rst = devm_reset_control_get_exclusive(&pdev->dev,
@@ -1716,15 +1726,24 @@ static int sdhci_tegra_probe(struct platform_device *pdev)
goto err_rst_get;
}
- rc = reset_control_assert(tegra_host->rst);
+ rc = devm_tegra_core_dev_init_opp_table_common(&pdev->dev);
if (rc)
goto err_rst_get;
+ pm_runtime_enable(&pdev->dev);
+ rc = pm_runtime_resume_and_get(&pdev->dev);
+ if (rc)
+ goto err_pm_get;
+
+ rc = reset_control_assert(tegra_host->rst);
+ if (rc)
+ goto err_rst_assert;
+
usleep_range(2000, 4000);
rc = reset_control_deassert(tegra_host->rst);
if (rc)
- goto err_rst_get;
+ goto err_rst_assert;
usleep_range(2000, 4000);
@@ -1736,8 +1755,11 @@ static int sdhci_tegra_probe(struct platform_device *pdev)
err_add_host:
reset_control_assert(tegra_host->rst);
+err_rst_assert:
+ pm_runtime_put_sync_suspend(&pdev->dev);
+err_pm_get:
+ pm_runtime_disable(&pdev->dev);
err_rst_get:
- clk_disable_unprepare(pltfm_host->clk);
err_clk_get:
clk_disable_unprepare(tegra_host->tmclk);
err_power_req:
@@ -1756,19 +1778,38 @@ static int sdhci_tegra_remove(struct platform_device *pdev)
reset_control_assert(tegra_host->rst);
usleep_range(2000, 4000);
- clk_disable_unprepare(pltfm_host->clk);
- clk_disable_unprepare(tegra_host->tmclk);
+ pm_runtime_put_sync_suspend(&pdev->dev);
+ pm_runtime_force_suspend(&pdev->dev);
+
+ clk_disable_unprepare(tegra_host->tmclk);
sdhci_pltfm_free(pdev);
return 0;
}
-#ifdef CONFIG_PM_SLEEP
-static int __maybe_unused sdhci_tegra_suspend(struct device *dev)
+static int __maybe_unused sdhci_tegra_runtime_suspend(struct device *dev)
{
struct sdhci_host *host = dev_get_drvdata(dev);
struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
+
+ clk_disable_unprepare(pltfm_host->clk);
+
+ return 0;
+}
+
+static int __maybe_unused sdhci_tegra_runtime_resume(struct device *dev)
+{
+ struct sdhci_host *host = dev_get_drvdata(dev);
+ struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
+
+ return clk_prepare_enable(pltfm_host->clk);
+}
+
+#ifdef CONFIG_PM_SLEEP
+static int sdhci_tegra_suspend(struct device *dev)
+{
+ struct sdhci_host *host = dev_get_drvdata(dev);
int ret;
if (host->mmc->caps2 & MMC_CAP2_CQE) {
@@ -1783,17 +1824,22 @@ static int __maybe_unused sdhci_tegra_suspend(struct device *dev)
return ret;
}
- clk_disable_unprepare(pltfm_host->clk);
+ ret = pm_runtime_force_suspend(dev);
+ if (ret) {
+ sdhci_resume_host(host);
+ cqhci_resume(host->mmc);
+ return ret;
+ }
+
return 0;
}
-static int __maybe_unused sdhci_tegra_resume(struct device *dev)
+static int sdhci_tegra_resume(struct device *dev)
{
struct sdhci_host *host = dev_get_drvdata(dev);
- struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
int ret;
- ret = clk_prepare_enable(pltfm_host->clk);
+ ret = pm_runtime_force_resume(dev);
if (ret)
return ret;
@@ -1812,13 +1858,16 @@ static int __maybe_unused sdhci_tegra_resume(struct device *dev)
suspend_host:
sdhci_suspend_host(host);
disable_clk:
- clk_disable_unprepare(pltfm_host->clk);
+ pm_runtime_force_suspend(dev);
return ret;
}
#endif
-static SIMPLE_DEV_PM_OPS(sdhci_tegra_dev_pm_ops, sdhci_tegra_suspend,
- sdhci_tegra_resume);
+static const struct dev_pm_ops sdhci_tegra_dev_pm_ops = {
+ SET_RUNTIME_PM_OPS(sdhci_tegra_runtime_suspend, sdhci_tegra_runtime_resume,
+ NULL)
+ SET_SYSTEM_SLEEP_PM_OPS(sdhci_tegra_suspend, sdhci_tegra_resume)
+};
static struct platform_driver sdhci_tegra_driver = {
.driver = {
diff --git a/drivers/mtd/hyperbus/rpc-if.c b/drivers/mtd/hyperbus/rpc-if.c
index ecb050ba95cd..8daa296f6eb6 100644
--- a/drivers/mtd/hyperbus/rpc-if.c
+++ b/drivers/mtd/hyperbus/rpc-if.c
@@ -130,7 +130,9 @@ static int rpcif_hb_probe(struct platform_device *pdev)
rpcif_enable_rpm(&hyperbus->rpc);
- rpcif_hw_init(&hyperbus->rpc, true);
+ error = rpcif_hw_init(&hyperbus->rpc, true);
+ if (error)
+ return error;
hyperbus->hbdev.map.size = hyperbus->rpc.size;
hyperbus->hbdev.map.virt = hyperbus->rpc.dirmap;
diff --git a/drivers/mtd/nand/raw/tegra_nand.c b/drivers/mtd/nand/raw/tegra_nand.c
index 32431bbe69b8..b36e5260ae27 100644
--- a/drivers/mtd/nand/raw/tegra_nand.c
+++ b/drivers/mtd/nand/raw/tegra_nand.c
@@ -17,8 +17,11 @@
#include <linux/mtd/rawnand.h>
#include <linux/of.h>
#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
#include <linux/reset.h>
+#include <soc/tegra/common.h>
+
#define COMMAND 0x00
#define COMMAND_GO BIT(31)
#define COMMAND_CLE BIT(30)
@@ -1151,6 +1154,7 @@ static int tegra_nand_probe(struct platform_device *pdev)
return -ENOMEM;
ctrl->dev = &pdev->dev;
+ platform_set_drvdata(pdev, ctrl);
nand_controller_init(&ctrl->controller);
ctrl->controller.ops = &tegra_nand_controller_ops;
@@ -1166,14 +1170,23 @@ static int tegra_nand_probe(struct platform_device *pdev)
if (IS_ERR(ctrl->clk))
return PTR_ERR(ctrl->clk);
- err = clk_prepare_enable(ctrl->clk);
+ err = devm_tegra_core_dev_init_opp_table_common(&pdev->dev);
+ if (err)
+ return err;
+
+ /*
+ * This driver doesn't support active power management yet,
+ * so we will simply keep device resumed.
+ */
+ pm_runtime_enable(&pdev->dev);
+ err = pm_runtime_resume_and_get(&pdev->dev);
if (err)
return err;
err = reset_control_reset(rst);
if (err) {
dev_err(ctrl->dev, "Failed to reset HW: %d\n", err);
- goto err_disable_clk;
+ goto err_put_pm;
}
writel_relaxed(HWSTATUS_CMD_DEFAULT, ctrl->regs + HWSTATUS_CMD);
@@ -1188,21 +1201,20 @@ static int tegra_nand_probe(struct platform_device *pdev)
dev_name(&pdev->dev), ctrl);
if (err) {
dev_err(ctrl->dev, "Failed to get IRQ: %d\n", err);
- goto err_disable_clk;
+ goto err_put_pm;
}
writel_relaxed(DMA_MST_CTRL_IS_DONE, ctrl->regs + DMA_MST_CTRL);
err = tegra_nand_chips_init(ctrl->dev, ctrl);
if (err)
- goto err_disable_clk;
-
- platform_set_drvdata(pdev, ctrl);
+ goto err_put_pm;
return 0;
-err_disable_clk:
- clk_disable_unprepare(ctrl->clk);
+err_put_pm:
+ pm_runtime_put_sync_suspend(ctrl->dev);
+ pm_runtime_force_suspend(ctrl->dev);
return err;
}
@@ -1219,11 +1231,40 @@ static int tegra_nand_remove(struct platform_device *pdev)
nand_cleanup(chip);
+ pm_runtime_put_sync_suspend(ctrl->dev);
+ pm_runtime_force_suspend(ctrl->dev);
+
+ return 0;
+}
+
+static int __maybe_unused tegra_nand_runtime_resume(struct device *dev)
+{
+ struct tegra_nand_controller *ctrl = dev_get_drvdata(dev);
+ int err;
+
+ err = clk_prepare_enable(ctrl->clk);
+ if (err) {
+ dev_err(dev, "Failed to enable clock: %d\n", err);
+ return err;
+ }
+
+ return 0;
+}
+
+static int __maybe_unused tegra_nand_runtime_suspend(struct device *dev)
+{
+ struct tegra_nand_controller *ctrl = dev_get_drvdata(dev);
+
clk_disable_unprepare(ctrl->clk);
return 0;
}
+static const struct dev_pm_ops tegra_nand_pm = {
+ SET_RUNTIME_PM_OPS(tegra_nand_runtime_suspend, tegra_nand_runtime_resume,
+ NULL)
+};
+
static const struct of_device_id tegra_nand_of_match[] = {
{ .compatible = "nvidia,tegra20-nand" },
{ /* sentinel */ }
@@ -1234,6 +1275,7 @@ static struct platform_driver tegra_nand_driver = {
.driver = {
.name = "tegra-nand",
.of_match_table = tegra_nand_of_match,
+ .pm = &tegra_nand_pm,
},
.probe = tegra_nand_probe,
.remove = tegra_nand_remove,
diff --git a/drivers/net/amt.c b/drivers/net/amt.c
index 47a04c330885..b732ee9a50ef 100644
--- a/drivers/net/amt.c
+++ b/drivers/net/amt.c
@@ -3286,7 +3286,7 @@ static void __exit amt_fini(void)
{
rtnl_link_unregister(&amt_link_ops);
unregister_netdevice_notifier(&amt_notifier_block);
- cancel_delayed_work(&source_gc_wq);
+ cancel_delayed_work_sync(&source_gc_wq);
__amt_source_gc_work();
destroy_workqueue(amt_wq);
}
diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.c b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.c
index fc0e66006644..3f1704cbe1cb 100644
--- a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.c
+++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.c
@@ -559,6 +559,11 @@ int hw_atl_utils_fw_rpc_wait(struct aq_hw_s *self,
goto err_exit;
if (fw.len == 0xFFFFU) {
+ if (sw.len > sizeof(self->rpc)) {
+ printk(KERN_INFO "Invalid sw len: %x\n", sw.len);
+ err = -EINVAL;
+ goto err_exit;
+ }
err = hw_atl_utils_fw_rpc_call(self, sw.len);
if (err < 0)
goto err_exit;
@@ -567,6 +572,11 @@ int hw_atl_utils_fw_rpc_wait(struct aq_hw_s *self,
if (rpc) {
if (fw.len) {
+ if (fw.len > sizeof(self->rpc)) {
+ printk(KERN_INFO "Invalid fw len: %x\n", fw.len);
+ err = -EINVAL;
+ goto err_exit;
+ }
err =
hw_atl_utils_fw_downld_dwords(self,
self->rpc_addr,
diff --git a/drivers/net/ethernet/asix/ax88796c_main.h b/drivers/net/ethernet/asix/ax88796c_main.h
index 80263c3cef75..4a83c991dcbe 100644
--- a/drivers/net/ethernet/asix/ax88796c_main.h
+++ b/drivers/net/ethernet/asix/ax88796c_main.h
@@ -127,9 +127,9 @@ struct ax88796c_device {
#define AX_PRIV_FLAGS_MASK (AX_CAP_COMP)
unsigned long flags;
- #define EVENT_INTR BIT(0)
- #define EVENT_TX BIT(1)
- #define EVENT_SET_MULTI BIT(2)
+ #define EVENT_INTR 0
+ #define EVENT_TX 1
+ #define EVENT_SET_MULTI 2
};
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_init_ops.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_init_ops.h
index 1835d2e451c0..fc7fce642666 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_init_ops.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_init_ops.h
@@ -635,11 +635,13 @@ static int bnx2x_ilt_client_mem_op(struct bnx2x *bp, int cli_num,
{
int i, rc;
struct bnx2x_ilt *ilt = BP_ILT(bp);
- struct ilt_client_info *ilt_cli = &ilt->clients[cli_num];
+ struct ilt_client_info *ilt_cli;
if (!ilt || !ilt->lines)
return -1;
+ ilt_cli = &ilt->clients[cli_num];
+
if (ilt_cli->flags & (ILT_CLIENT_SKIP_INIT | ILT_CLIENT_SKIP_MEM))
return 0;
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.h b/drivers/net/ethernet/broadcom/bnxt/bnxt.h
index d0d5da9b78f8..4c9507d82fd0 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.h
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.h
@@ -2258,6 +2258,16 @@ static inline void bnxt_db_write(struct bnxt *bp, struct bnxt_db_info *db,
}
}
+/* Must hold rtnl_lock */
+static inline bool bnxt_sriov_cfg(struct bnxt *bp)
+{
+#if defined(CONFIG_BNXT_SRIOV)
+ return BNXT_PF(bp) && (bp->pf.active_vfs || bp->sriov_cfg);
+#else
+ return false;
+#endif
+}
+
extern const u16 bnxt_lhint_arr[];
int bnxt_alloc_rx_data(struct bnxt *bp, struct bnxt_rx_ring_info *rxr,
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.c
index 5c464ea73576..951c4c569a9b 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.c
@@ -360,7 +360,7 @@ bnxt_dl_livepatch_report_err(struct bnxt *bp, struct netlink_ext_ack *extack,
NL_SET_ERR_MSG_MOD(extack, "Live patch already applied");
break;
default:
- netdev_err(bp->dev, "Unexpected live patch error: %hhd\n", err);
+ netdev_err(bp->dev, "Unexpected live patch error: %d\n", err);
NL_SET_ERR_MSG_MOD(extack, "Failed to activate live patch");
break;
}
@@ -441,12 +441,13 @@ static int bnxt_dl_reload_down(struct devlink *dl, bool netns_change,
switch (action) {
case DEVLINK_RELOAD_ACTION_DRIVER_REINIT: {
- if (BNXT_PF(bp) && bp->pf.active_vfs) {
+ rtnl_lock();
+ if (bnxt_sriov_cfg(bp)) {
NL_SET_ERR_MSG_MOD(extack,
- "reload is unsupported when VFs are allocated");
+ "reload is unsupported while VFs are allocated or being configured");
+ rtnl_unlock();
return -EOPNOTSUPP;
}
- rtnl_lock();
if (bp->dev->reg_state == NETREG_UNREGISTERED) {
rtnl_unlock();
return -ENODEV;
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c
index e6a4a768b10b..1471b6130a2b 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c
@@ -1868,7 +1868,7 @@ static int bnxt_tc_setup_indr_block_cb(enum tc_setup_type type,
struct flow_cls_offload *flower = type_data;
struct bnxt *bp = priv->bp;
- if (flower->common.chain_index)
+ if (!tc_cls_can_offload_and_chain0(bp->dev, type_data))
return -EOPNOTSUPP;
switch (type) {
diff --git a/drivers/net/ethernet/dec/tulip/de4x5.c b/drivers/net/ethernet/dec/tulip/de4x5.c
index 13121c4dcfe6..71730ef4cd57 100644
--- a/drivers/net/ethernet/dec/tulip/de4x5.c
+++ b/drivers/net/ethernet/dec/tulip/de4x5.c
@@ -4709,6 +4709,10 @@ type3_infoblock(struct net_device *dev, u_char count, u_char *p)
lp->ibn = 3;
lp->active = *p++;
if (MOTO_SROM_BUG) lp->active = 0;
+ /* if (MOTO_SROM_BUG) statement indicates lp->active could
+ * be 8 (i.e. the size of array lp->phy) */
+ if (WARN_ON(lp->active >= ARRAY_SIZE(lp->phy)))
+ return -EINVAL;
lp->phy[lp->active].gep = (*p ? p : NULL); p += (2 * (*p) + 1);
lp->phy[lp->active].rst = (*p ? p : NULL); p += (2 * (*p) + 1);
lp->phy[lp->active].mc = get_unaligned_le16(p); p += 2;
@@ -5000,19 +5004,23 @@ mii_get_phy(struct net_device *dev)
}
if ((j == limit) && (i < DE4X5_MAX_MII)) {
for (k=0; k < DE4X5_MAX_PHY && lp->phy[k].id; k++);
- lp->phy[k].addr = i;
- lp->phy[k].id = id;
- lp->phy[k].spd.reg = GENERIC_REG; /* ANLPA register */
- lp->phy[k].spd.mask = GENERIC_MASK; /* 100Mb/s technologies */
- lp->phy[k].spd.value = GENERIC_VALUE; /* TX & T4, H/F Duplex */
- lp->mii_cnt++;
- lp->active++;
- printk("%s: Using generic MII device control. If the board doesn't operate,\nplease mail the following dump to the author:\n", dev->name);
- j = de4x5_debug;
- de4x5_debug |= DEBUG_MII;
- de4x5_dbg_mii(dev, k);
- de4x5_debug = j;
- printk("\n");
+ if (k < DE4X5_MAX_PHY) {
+ lp->phy[k].addr = i;
+ lp->phy[k].id = id;
+ lp->phy[k].spd.reg = GENERIC_REG; /* ANLPA register */
+ lp->phy[k].spd.mask = GENERIC_MASK; /* 100Mb/s technologies */
+ lp->phy[k].spd.value = GENERIC_VALUE; /* TX & T4, H/F Duplex */
+ lp->mii_cnt++;
+ lp->active++;
+ printk("%s: Using generic MII device control. If the board doesn't operate,\nplease mail the following dump to the author:\n", dev->name);
+ j = de4x5_debug;
+ de4x5_debug |= DEBUG_MII;
+ de4x5_dbg_mii(dev, k);
+ de4x5_debug = j;
+ printk("\n");
+ } else {
+ goto purgatory;
+ }
}
}
purgatory:
diff --git a/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c b/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c
index 714e961e7a77..6451c8383639 100644
--- a/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c
+++ b/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c
@@ -4550,10 +4550,10 @@ static int dpaa2_eth_remove(struct fsl_mc_device *ls_dev)
fsl_mc_portal_free(priv->mc_io);
- free_netdev(net_dev);
-
dev_dbg(net_dev->dev.parent, "Removed interface %s\n", net_dev->name);
+ free_netdev(net_dev);
+
return 0;
}
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_misc.c b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_misc.c
index 23d9cbf262c3..740850b64aff 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_misc.c
+++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_misc.c
@@ -400,6 +400,10 @@ static void hns_dsaf_ge_srst_by_port(struct dsaf_device *dsaf_dev, u32 port,
return;
if (!HNS_DSAF_IS_DEBUG(dsaf_dev)) {
+ /* DSAF_MAX_PORT_NUM is 6, but DSAF_GE_NUM is 8.
+ We need check to prevent array overflow */
+ if (port >= DSAF_MAX_PORT_NUM)
+ return;
reg_val_1 = 0x1 << port;
port_rst_off = dsaf_dev->mac_cb[port]->port_rst_off;
/* there is difference between V1 and V2 in register.*/
diff --git a/drivers/net/ethernet/intel/e100.c b/drivers/net/ethernet/intel/e100.c
index 5039a2536951..0bf3d47bb90d 100644
--- a/drivers/net/ethernet/intel/e100.c
+++ b/drivers/net/ethernet/intel/e100.c
@@ -3003,9 +3003,10 @@ static void __e100_shutdown(struct pci_dev *pdev, bool *enable_wake)
struct net_device *netdev = pci_get_drvdata(pdev);
struct nic *nic = netdev_priv(netdev);
+ netif_device_detach(netdev);
+
if (netif_running(netdev))
e100_down(nic);
- netif_device_detach(netdev);
if ((nic->flags & wol_magic) | e100_asf(nic)) {
/* enable reverse auto-negotiation */
@@ -3022,7 +3023,7 @@ static void __e100_shutdown(struct pci_dev *pdev, bool *enable_wake)
*enable_wake = false;
}
- pci_clear_master(pdev);
+ pci_disable_device(pdev);
}
static int __e100_power_off(struct pci_dev *pdev, bool wake)
@@ -3042,8 +3043,6 @@ static int __maybe_unused e100_suspend(struct device *dev_d)
__e100_shutdown(to_pci_dev(dev_d), &wake);
- device_wakeup_disable(dev_d);
-
return 0;
}
@@ -3051,6 +3050,14 @@ static int __maybe_unused e100_resume(struct device *dev_d)
{
struct net_device *netdev = dev_get_drvdata(dev_d);
struct nic *nic = netdev_priv(netdev);
+ int err;
+
+ err = pci_enable_device(to_pci_dev(dev_d));
+ if (err) {
+ netdev_err(netdev, "Resume cannot enable PCI device, aborting\n");
+ return err;
+ }
+ pci_set_master(to_pci_dev(dev_d));
/* disable reverse auto-negotiation */
if (nic->phy == phy_82552_v) {
@@ -3062,10 +3069,11 @@ static int __maybe_unused e100_resume(struct device *dev_d)
smartspeed & ~(E100_82552_REV_ANEG));
}
- netif_device_attach(netdev);
if (netif_running(netdev))
e100_up(nic);
+ netif_device_attach(netdev);
+
return 0;
}
diff --git a/drivers/net/ethernet/intel/i40e/i40e.h b/drivers/net/ethernet/intel/i40e/i40e.h
index 3d528fba754b..4d939af0a626 100644
--- a/drivers/net/ethernet/intel/i40e/i40e.h
+++ b/drivers/net/ethernet/intel/i40e/i40e.h
@@ -161,6 +161,7 @@ enum i40e_vsi_state_t {
__I40E_VSI_OVERFLOW_PROMISC,
__I40E_VSI_REINIT_REQUESTED,
__I40E_VSI_DOWN_REQUESTED,
+ __I40E_VSI_RELEASING,
/* This must be last as it determines the size of the BITMAP */
__I40E_VSI_STATE_SIZE__,
};
@@ -1247,6 +1248,7 @@ void i40e_ptp_restore_hw_time(struct i40e_pf *pf);
void i40e_ptp_init(struct i40e_pf *pf);
void i40e_ptp_stop(struct i40e_pf *pf);
int i40e_ptp_alloc_pins(struct i40e_pf *pf);
+int i40e_update_adq_vsi_queues(struct i40e_vsi *vsi, int vsi_offset);
int i40e_is_vsi_uplink_mode_veb(struct i40e_vsi *vsi);
i40e_status i40e_get_partition_bw_setting(struct i40e_pf *pf);
i40e_status i40e_set_partition_bw_setting(struct i40e_pf *pf);
diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c
index ba862131b9bd..e118cf9265c7 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_main.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_main.c
@@ -1790,6 +1790,7 @@ static void i40e_vsi_setup_queue_map(struct i40e_vsi *vsi,
bool is_add)
{
struct i40e_pf *pf = vsi->back;
+ u16 num_tc_qps = 0;
u16 sections = 0;
u8 netdev_tc = 0;
u16 numtc = 1;
@@ -1797,13 +1798,33 @@ static void i40e_vsi_setup_queue_map(struct i40e_vsi *vsi,
u8 offset;
u16 qmap;
int i;
- u16 num_tc_qps = 0;
sections = I40E_AQ_VSI_PROP_QUEUE_MAP_VALID;
offset = 0;
+ /* zero out queue mapping, it will get updated on the end of the function */
+ memset(ctxt->info.queue_mapping, 0, sizeof(ctxt->info.queue_mapping));
+
+ if (vsi->type == I40E_VSI_MAIN) {
+ /* This code helps add more queue to the VSI if we have
+ * more cores than RSS can support, the higher cores will
+ * be served by ATR or other filters. Furthermore, the
+ * non-zero req_queue_pairs says that user requested a new
+ * queue count via ethtool's set_channels, so use this
+ * value for queues distribution across traffic classes
+ */
+ if (vsi->req_queue_pairs > 0)
+ vsi->num_queue_pairs = vsi->req_queue_pairs;
+ else if (pf->flags & I40E_FLAG_MSIX_ENABLED)
+ vsi->num_queue_pairs = pf->num_lan_msix;
+ }
/* Number of queues per enabled TC */
- num_tc_qps = vsi->alloc_queue_pairs;
+ if (vsi->type == I40E_VSI_MAIN ||
+ (vsi->type == I40E_VSI_SRIOV && vsi->num_queue_pairs != 0))
+ num_tc_qps = vsi->num_queue_pairs;
+ else
+ num_tc_qps = vsi->alloc_queue_pairs;
+
if (enabled_tc && (vsi->back->flags & I40E_FLAG_DCB_ENABLED)) {
/* Find numtc from enabled TC bitmap */
for (i = 0, numtc = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
@@ -1881,15 +1902,11 @@ static void i40e_vsi_setup_queue_map(struct i40e_vsi *vsi,
}
ctxt->info.tc_mapping[i] = cpu_to_le16(qmap);
}
-
- /* Set actual Tx/Rx queue pairs */
- vsi->num_queue_pairs = offset;
- if ((vsi->type == I40E_VSI_MAIN) && (numtc == 1)) {
- if (vsi->req_queue_pairs > 0)
- vsi->num_queue_pairs = vsi->req_queue_pairs;
- else if (pf->flags & I40E_FLAG_MSIX_ENABLED)
- vsi->num_queue_pairs = pf->num_lan_msix;
- }
+ /* Do not change previously set num_queue_pairs for PFs and VFs*/
+ if ((vsi->type == I40E_VSI_MAIN && numtc != 1) ||
+ (vsi->type == I40E_VSI_SRIOV && vsi->num_queue_pairs == 0) ||
+ (vsi->type != I40E_VSI_MAIN && vsi->type != I40E_VSI_SRIOV))
+ vsi->num_queue_pairs = offset;
/* Scheduler section valid can only be set for ADD VSI */
if (is_add) {
@@ -2623,7 +2640,8 @@ static void i40e_sync_filters_subtask(struct i40e_pf *pf)
for (v = 0; v < pf->num_alloc_vsi; v++) {
if (pf->vsi[v] &&
- (pf->vsi[v]->flags & I40E_VSI_FLAG_FILTER_CHANGED)) {
+ (pf->vsi[v]->flags & I40E_VSI_FLAG_FILTER_CHANGED) &&
+ !test_bit(__I40E_VSI_RELEASING, pf->vsi[v]->state)) {
int ret = i40e_sync_vsi_filters(pf->vsi[v]);
if (ret) {
@@ -5427,6 +5445,58 @@ static void i40e_vsi_update_queue_map(struct i40e_vsi *vsi,
}
/**
+ * i40e_update_adq_vsi_queues - update queue mapping for ADq VSI
+ * @vsi: the VSI being reconfigured
+ * @vsi_offset: offset from main VF VSI
+ */
+int i40e_update_adq_vsi_queues(struct i40e_vsi *vsi, int vsi_offset)
+{
+ struct i40e_vsi_context ctxt = {};
+ struct i40e_pf *pf;
+ struct i40e_hw *hw;
+ int ret;
+
+ if (!vsi)
+ return I40E_ERR_PARAM;
+ pf = vsi->back;
+ hw = &pf->hw;
+
+ ctxt.seid = vsi->seid;
+ ctxt.pf_num = hw->pf_id;
+ ctxt.vf_num = vsi->vf_id + hw->func_caps.vf_base_id + vsi_offset;
+ ctxt.uplink_seid = vsi->uplink_seid;
+ ctxt.connection_type = I40E_AQ_VSI_CONN_TYPE_NORMAL;
+ ctxt.flags = I40E_AQ_VSI_TYPE_VF;
+ ctxt.info = vsi->info;
+
+ i40e_vsi_setup_queue_map(vsi, &ctxt, vsi->tc_config.enabled_tc,
+ false);
+ if (vsi->reconfig_rss) {
+ vsi->rss_size = min_t(int, pf->alloc_rss_size,
+ vsi->num_queue_pairs);
+ ret = i40e_vsi_config_rss(vsi);
+ if (ret) {
+ dev_info(&pf->pdev->dev, "Failed to reconfig rss for num_queues\n");
+ return ret;
+ }
+ vsi->reconfig_rss = false;
+ }
+
+ ret = i40e_aq_update_vsi_params(hw, &ctxt, NULL);
+ if (ret) {
+ dev_info(&pf->pdev->dev, "Update vsi config failed, err %s aq_err %s\n",
+ i40e_stat_str(hw, ret),
+ i40e_aq_str(hw, hw->aq.asq_last_status));
+ return ret;
+ }
+ /* update the local VSI info with updated queue map */
+ i40e_vsi_update_queue_map(vsi, &ctxt);
+ vsi->info.valid_sections = 0;
+
+ return ret;
+}
+
+/**
* i40e_vsi_config_tc - Configure VSI Tx Scheduler for given TC map
* @vsi: VSI to be configured
* @enabled_tc: TC bitmap
@@ -5717,24 +5787,6 @@ static void i40e_remove_queue_channels(struct i40e_vsi *vsi)
}
/**
- * i40e_is_any_channel - channel exist or not
- * @vsi: ptr to VSI to which channels are associated with
- *
- * Returns true or false if channel(s) exist for associated VSI or not
- **/
-static bool i40e_is_any_channel(struct i40e_vsi *vsi)
-{
- struct i40e_channel *ch, *ch_tmp;
-
- list_for_each_entry_safe(ch, ch_tmp, &vsi->ch_list, list) {
- if (ch->initialized)
- return true;
- }
-
- return false;
-}
-
-/**
* i40e_get_max_queues_for_channel
* @vsi: ptr to VSI to which channels are associated with
*
@@ -6240,26 +6292,15 @@ int i40e_create_queue_channel(struct i40e_vsi *vsi,
/* By default we are in VEPA mode, if this is the first VF/VMDq
* VSI to be added switch to VEB mode.
*/
- if ((!(pf->flags & I40E_FLAG_VEB_MODE_ENABLED)) ||
- (!i40e_is_any_channel(vsi))) {
- if (!is_power_of_2(vsi->tc_config.tc_info[0].qcount)) {
- dev_dbg(&pf->pdev->dev,
- "Failed to create channel. Override queues (%u) not power of 2\n",
- vsi->tc_config.tc_info[0].qcount);
- return -EINVAL;
- }
- if (!(pf->flags & I40E_FLAG_VEB_MODE_ENABLED)) {
- pf->flags |= I40E_FLAG_VEB_MODE_ENABLED;
+ if (!(pf->flags & I40E_FLAG_VEB_MODE_ENABLED)) {
+ pf->flags |= I40E_FLAG_VEB_MODE_ENABLED;
- if (vsi->type == I40E_VSI_MAIN) {
- if (pf->flags & I40E_FLAG_TC_MQPRIO)
- i40e_do_reset(pf, I40E_PF_RESET_FLAG,
- true);
- else
- i40e_do_reset_safe(pf,
- I40E_PF_RESET_FLAG);
- }
+ if (vsi->type == I40E_VSI_MAIN) {
+ if (pf->flags & I40E_FLAG_TC_MQPRIO)
+ i40e_do_reset(pf, I40E_PF_RESET_FLAG, true);
+ else
+ i40e_do_reset_safe(pf, I40E_PF_RESET_FLAG);
}
/* now onwards for main VSI, number of queues will be value
* of TC0's queue count
@@ -7912,12 +7953,20 @@ config_tc:
vsi->seid);
need_reset = true;
goto exit;
- } else {
- dev_info(&vsi->back->pdev->dev,
- "Setup channel (id:%u) utilizing num_queues %d\n",
- vsi->seid, vsi->tc_config.tc_info[0].qcount);
+ } else if (enabled_tc &&
+ (!is_power_of_2(vsi->tc_config.tc_info[0].qcount))) {
+ netdev_info(netdev,
+ "Failed to create channel. Override queues (%u) not power of 2\n",
+ vsi->tc_config.tc_info[0].qcount);
+ ret = -EINVAL;
+ need_reset = true;
+ goto exit;
}
+ dev_info(&vsi->back->pdev->dev,
+ "Setup channel (id:%u) utilizing num_queues %d\n",
+ vsi->seid, vsi->tc_config.tc_info[0].qcount);
+
if (pf->flags & I40E_FLAG_TC_MQPRIO) {
if (vsi->mqprio_qopt.max_rate[0]) {
u64 max_tx_rate = vsi->mqprio_qopt.max_rate[0];
@@ -8482,9 +8531,8 @@ static int i40e_configure_clsflower(struct i40e_vsi *vsi,
err = i40e_add_del_cloud_filter(vsi, filter, true);
if (err) {
- dev_err(&pf->pdev->dev,
- "Failed to add cloud filter, err %s\n",
- i40e_stat_str(&pf->hw, err));
+ dev_err(&pf->pdev->dev, "Failed to add cloud filter, err %d\n",
+ err);
goto err;
}
@@ -13771,7 +13819,7 @@ int i40e_vsi_release(struct i40e_vsi *vsi)
dev_info(&pf->pdev->dev, "Can't remove PF VSI\n");
return -ENODEV;
}
-
+ set_bit(__I40E_VSI_RELEASING, vsi->state);
uplink_seid = vsi->uplink_seid;
if (vsi->type != I40E_VSI_SRIOV) {
if (vsi->netdev_registered) {
diff --git a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
index 472f56b360b8..80ae264c99ba 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
@@ -183,17 +183,18 @@ void i40e_vc_notify_vf_reset(struct i40e_vf *vf)
/***********************misc routines*****************************/
/**
- * i40e_vc_disable_vf
+ * i40e_vc_reset_vf
* @vf: pointer to the VF info
- *
- * Disable the VF through a SW reset.
+ * @notify_vf: notify vf about reset or not
+ * Reset VF handler.
**/
-static inline void i40e_vc_disable_vf(struct i40e_vf *vf)
+static void i40e_vc_reset_vf(struct i40e_vf *vf, bool notify_vf)
{
struct i40e_pf *pf = vf->pf;
int i;
- i40e_vc_notify_vf_reset(vf);
+ if (notify_vf)
+ i40e_vc_notify_vf_reset(vf);
/* We want to ensure that an actual reset occurs initiated after this
* function was called. However, we do not want to wait forever, so
@@ -211,9 +212,14 @@ static inline void i40e_vc_disable_vf(struct i40e_vf *vf)
usleep_range(10000, 20000);
}
- dev_warn(&vf->pf->pdev->dev,
- "Failed to initiate reset for VF %d after 200 milliseconds\n",
- vf->vf_id);
+ if (notify_vf)
+ dev_warn(&vf->pf->pdev->dev,
+ "Failed to initiate reset for VF %d after 200 milliseconds\n",
+ vf->vf_id);
+ else
+ dev_dbg(&vf->pf->pdev->dev,
+ "Failed to initiate reset for VF %d after 200 milliseconds\n",
+ vf->vf_id);
}
/**
@@ -674,14 +680,13 @@ static int i40e_config_vsi_rx_queue(struct i40e_vf *vf, u16 vsi_id,
u16 vsi_queue_id,
struct virtchnl_rxq_info *info)
{
+ u16 pf_queue_id = i40e_vc_get_pf_queue_id(vf, vsi_id, vsi_queue_id);
struct i40e_pf *pf = vf->pf;
+ struct i40e_vsi *vsi = pf->vsi[vf->lan_vsi_idx];
struct i40e_hw *hw = &pf->hw;
struct i40e_hmc_obj_rxq rx_ctx;
- u16 pf_queue_id;
int ret = 0;
- pf_queue_id = i40e_vc_get_pf_queue_id(vf, vsi_id, vsi_queue_id);
-
/* clear the context structure first */
memset(&rx_ctx, 0, sizeof(struct i40e_hmc_obj_rxq));
@@ -719,6 +724,10 @@ static int i40e_config_vsi_rx_queue(struct i40e_vf *vf, u16 vsi_id,
}
rx_ctx.rxmax = info->max_pkt_size;
+ /* if port VLAN is configured increase the max packet size */
+ if (vsi->info.pvid)
+ rx_ctx.rxmax += VLAN_HLEN;
+
/* enable 32bytes desc always */
rx_ctx.dsize = 1;
@@ -2106,20 +2115,6 @@ err:
}
/**
- * i40e_vc_reset_vf_msg
- * @vf: pointer to the VF info
- *
- * called from the VF to reset itself,
- * unlike other virtchnl messages, PF driver
- * doesn't send the response back to the VF
- **/
-static void i40e_vc_reset_vf_msg(struct i40e_vf *vf)
-{
- if (test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states))
- i40e_reset_vf(vf, false);
-}
-
-/**
* i40e_vc_config_promiscuous_mode_msg
* @vf: pointer to the VF info
* @msg: pointer to the msg buffer
@@ -2217,11 +2212,12 @@ static int i40e_vc_config_queues_msg(struct i40e_vf *vf, u8 *msg)
struct virtchnl_vsi_queue_config_info *qci =
(struct virtchnl_vsi_queue_config_info *)msg;
struct virtchnl_queue_pair_info *qpi;
- struct i40e_pf *pf = vf->pf;
u16 vsi_id, vsi_queue_id = 0;
- u16 num_qps_all = 0;
+ struct i40e_pf *pf = vf->pf;
i40e_status aq_ret = 0;
int i, j = 0, idx = 0;
+ struct i40e_vsi *vsi;
+ u16 num_qps_all = 0;
if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states)) {
aq_ret = I40E_ERR_PARAM;
@@ -2310,9 +2306,15 @@ static int i40e_vc_config_queues_msg(struct i40e_vf *vf, u8 *msg)
pf->vsi[vf->lan_vsi_idx]->num_queue_pairs =
qci->num_queue_pairs;
} else {
- for (i = 0; i < vf->num_tc; i++)
- pf->vsi[vf->ch[i].vsi_idx]->num_queue_pairs =
- vf->ch[i].num_qps;
+ for (i = 0; i < vf->num_tc; i++) {
+ vsi = pf->vsi[vf->ch[i].vsi_idx];
+ vsi->num_queue_pairs = vf->ch[i].num_qps;
+
+ if (i40e_update_adq_vsi_queues(vsi, i)) {
+ aq_ret = I40E_ERR_CONFIG;
+ goto error_param;
+ }
+ }
}
error_param:
@@ -2607,8 +2609,7 @@ static int i40e_vc_request_queues_msg(struct i40e_vf *vf, u8 *msg)
} else {
/* successful request */
vf->num_req_queues = req_pairs;
- i40e_vc_notify_vf_reset(vf);
- i40e_reset_vf(vf, false);
+ i40e_vc_reset_vf(vf, true);
return 0;
}
@@ -3803,8 +3804,7 @@ static int i40e_vc_add_qch_msg(struct i40e_vf *vf, u8 *msg)
vf->num_req_queues = 0;
/* reset the VF in order to allocate resources */
- i40e_vc_notify_vf_reset(vf);
- i40e_reset_vf(vf, false);
+ i40e_vc_reset_vf(vf, true);
return I40E_SUCCESS;
@@ -3844,8 +3844,7 @@ static int i40e_vc_del_qch_msg(struct i40e_vf *vf, u8 *msg)
}
/* reset the VF in order to allocate resources */
- i40e_vc_notify_vf_reset(vf);
- i40e_reset_vf(vf, false);
+ i40e_vc_reset_vf(vf, true);
return I40E_SUCCESS;
@@ -3907,7 +3906,7 @@ int i40e_vc_process_vf_msg(struct i40e_pf *pf, s16 vf_id, u32 v_opcode,
i40e_vc_notify_vf_link_state(vf);
break;
case VIRTCHNL_OP_RESET_VF:
- i40e_vc_reset_vf_msg(vf);
+ i40e_vc_reset_vf(vf, false);
ret = 0;
break;
case VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE:
@@ -4161,7 +4160,7 @@ int i40e_ndo_set_vf_mac(struct net_device *netdev, int vf_id, u8 *mac)
/* Force the VF interface down so it has to bring up with new MAC
* address
*/
- i40e_vc_disable_vf(vf);
+ i40e_vc_reset_vf(vf, true);
dev_info(&pf->pdev->dev, "Bring down and up the VF interface to make this change effective.\n");
error_param:
@@ -4170,34 +4169,6 @@ error_param:
}
/**
- * i40e_vsi_has_vlans - True if VSI has configured VLANs
- * @vsi: pointer to the vsi
- *
- * Check if a VSI has configured any VLANs. False if we have a port VLAN or if
- * we have no configured VLANs. Do not call while holding the
- * mac_filter_hash_lock.
- */
-static bool i40e_vsi_has_vlans(struct i40e_vsi *vsi)
-{
- bool have_vlans;
-
- /* If we have a port VLAN, then the VSI cannot have any VLANs
- * configured, as all MAC/VLAN filters will be assigned to the PVID.
- */
- if (vsi->info.pvid)
- return false;
-
- /* Since we don't have a PVID, we know that if the device is in VLAN
- * mode it must be because of a VLAN filter configured on this VSI.
- */
- spin_lock_bh(&vsi->mac_filter_hash_lock);
- have_vlans = i40e_is_vsi_in_vlan(vsi);
- spin_unlock_bh(&vsi->mac_filter_hash_lock);
-
- return have_vlans;
-}
-
-/**
* i40e_ndo_set_vf_port_vlan
* @netdev: network interface device structure
* @vf_id: VF identifier
@@ -4253,19 +4224,9 @@ int i40e_ndo_set_vf_port_vlan(struct net_device *netdev, int vf_id,
/* duplicate request, so just return success */
goto error_pvid;
- if (i40e_vsi_has_vlans(vsi)) {
- dev_err(&pf->pdev->dev,
- "VF %d has already configured VLAN filters and the administrator is requesting a port VLAN override.\nPlease unload and reload the VF driver for this change to take effect.\n",
- vf_id);
- /* Administrator Error - knock the VF offline until he does
- * the right thing by reconfiguring his network correctly
- * and then reloading the VF driver.
- */
- i40e_vc_disable_vf(vf);
- /* During reset the VF got a new VSI, so refresh the pointer. */
- vsi = pf->vsi[vf->lan_vsi_idx];
- }
-
+ i40e_vc_reset_vf(vf, true);
+ /* During reset the VF got a new VSI, so refresh a pointer. */
+ vsi = pf->vsi[vf->lan_vsi_idx];
/* Locked once because multiple functions below iterate list */
spin_lock_bh(&vsi->mac_filter_hash_lock);
@@ -4641,7 +4602,7 @@ int i40e_ndo_set_vf_trust(struct net_device *netdev, int vf_id, bool setting)
goto out;
vf->trusted = setting;
- i40e_vc_disable_vf(vf);
+ i40e_vc_reset_vf(vf, true);
dev_info(&pf->pdev->dev, "VF %u is now %strusted\n",
vf_id, setting ? "" : "un");
diff --git a/drivers/net/ethernet/intel/iavf/iavf.h b/drivers/net/ethernet/intel/iavf/iavf.h
index e6e7c1da47fb..75635bd57cf6 100644
--- a/drivers/net/ethernet/intel/iavf/iavf.h
+++ b/drivers/net/ethernet/intel/iavf/iavf.h
@@ -39,6 +39,7 @@
#include "iavf_txrx.h"
#include "iavf_fdir.h"
#include "iavf_adv_rss.h"
+#include <linux/bitmap.h>
#define DEFAULT_DEBUG_LEVEL_SHIFT 3
#define PFX "iavf: "
diff --git a/drivers/net/ethernet/intel/iavf/iavf_ethtool.c b/drivers/net/ethernet/intel/iavf/iavf_ethtool.c
index 5a359a0a20ec..144a77679359 100644
--- a/drivers/net/ethernet/intel/iavf/iavf_ethtool.c
+++ b/drivers/net/ethernet/intel/iavf/iavf_ethtool.c
@@ -1776,6 +1776,7 @@ static int iavf_set_channels(struct net_device *netdev,
{
struct iavf_adapter *adapter = netdev_priv(netdev);
u32 num_req = ch->combined_count;
+ int i;
if ((adapter->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_ADQ) &&
adapter->num_tc) {
@@ -1786,7 +1787,7 @@ static int iavf_set_channels(struct net_device *netdev,
/* All of these should have already been checked by ethtool before this
* even gets to us, but just to be sure.
*/
- if (num_req > adapter->vsi_res->num_queue_pairs)
+ if (num_req == 0 || num_req > adapter->vsi_res->num_queue_pairs)
return -EINVAL;
if (num_req == adapter->num_active_queues)
@@ -1798,6 +1799,20 @@ static int iavf_set_channels(struct net_device *netdev,
adapter->num_req_queues = num_req;
adapter->flags |= IAVF_FLAG_REINIT_ITR_NEEDED;
iavf_schedule_reset(adapter);
+
+ /* wait for the reset is done */
+ for (i = 0; i < IAVF_RESET_WAIT_COMPLETE_COUNT; i++) {
+ msleep(IAVF_RESET_WAIT_MS);
+ if (adapter->flags & IAVF_FLAG_RESET_PENDING)
+ continue;
+ break;
+ }
+ if (i == IAVF_RESET_WAIT_COMPLETE_COUNT) {
+ adapter->flags &= ~IAVF_FLAG_REINIT_ITR_NEEDED;
+ adapter->num_active_queues = num_req;
+ return -EOPNOTSUPP;
+ }
+
return 0;
}
@@ -1844,14 +1859,13 @@ static int iavf_get_rxfh(struct net_device *netdev, u32 *indir, u8 *key,
if (hfunc)
*hfunc = ETH_RSS_HASH_TOP;
- if (!indir)
- return 0;
-
- memcpy(key, adapter->rss_key, adapter->rss_key_size);
+ if (key)
+ memcpy(key, adapter->rss_key, adapter->rss_key_size);
- /* Each 32 bits pointed by 'indir' is stored with a lut entry */
- for (i = 0; i < adapter->rss_lut_size; i++)
- indir[i] = (u32)adapter->rss_lut[i];
+ if (indir)
+ /* Each 32 bits pointed by 'indir' is stored with a lut entry */
+ for (i = 0; i < adapter->rss_lut_size; i++)
+ indir[i] = (u32)adapter->rss_lut[i];
return 0;
}
diff --git a/drivers/net/ethernet/intel/iavf/iavf_main.c b/drivers/net/ethernet/intel/iavf/iavf_main.c
index 847d67e32a54..336e6bf95e48 100644
--- a/drivers/net/ethernet/intel/iavf/iavf_main.c
+++ b/drivers/net/ethernet/intel/iavf/iavf_main.c
@@ -697,6 +697,23 @@ static void iavf_del_vlan(struct iavf_adapter *adapter, u16 vlan)
}
/**
+ * iavf_restore_filters
+ * @adapter: board private structure
+ *
+ * Restore existing non MAC filters when VF netdev comes back up
+ **/
+static void iavf_restore_filters(struct iavf_adapter *adapter)
+{
+ /* re-add all VLAN filters */
+ if (VLAN_ALLOWED(adapter)) {
+ u16 vid;
+
+ for_each_set_bit(vid, adapter->vsi.active_vlans, VLAN_N_VID)
+ iavf_add_vlan(adapter, vid);
+ }
+}
+
+/**
* iavf_vlan_rx_add_vid - Add a VLAN filter to a device
* @netdev: network device struct
* @proto: unused protocol data
@@ -709,8 +726,11 @@ static int iavf_vlan_rx_add_vid(struct net_device *netdev,
if (!VLAN_ALLOWED(adapter))
return -EIO;
+
if (iavf_add_vlan(adapter, vid) == NULL)
return -ENOMEM;
+
+ set_bit(vid, adapter->vsi.active_vlans);
return 0;
}
@@ -725,11 +745,13 @@ static int iavf_vlan_rx_kill_vid(struct net_device *netdev,
{
struct iavf_adapter *adapter = netdev_priv(netdev);
- if (VLAN_ALLOWED(adapter)) {
- iavf_del_vlan(adapter, vid);
- return 0;
- }
- return -EIO;
+ if (!VLAN_ALLOWED(adapter))
+ return -EIO;
+
+ iavf_del_vlan(adapter, vid);
+ clear_bit(vid, adapter->vsi.active_vlans);
+
+ return 0;
}
/**
@@ -1639,8 +1661,7 @@ static int iavf_process_aq_command(struct iavf_adapter *adapter)
iavf_set_promiscuous(adapter, FLAG_VF_MULTICAST_PROMISC);
return 0;
}
-
- if ((adapter->aq_required & IAVF_FLAG_AQ_RELEASE_PROMISC) &&
+ if ((adapter->aq_required & IAVF_FLAG_AQ_RELEASE_PROMISC) ||
(adapter->aq_required & IAVF_FLAG_AQ_RELEASE_ALLMULTI)) {
iavf_set_promiscuous(adapter, 0);
return 0;
@@ -2123,8 +2144,8 @@ static void iavf_disable_vf(struct iavf_adapter *adapter)
iavf_free_misc_irq(adapter);
iavf_reset_interrupt_capability(adapter);
- iavf_free_queues(adapter);
iavf_free_q_vectors(adapter);
+ iavf_free_queues(adapter);
memset(adapter->vf_res, 0, IAVF_VIRTCHNL_VF_RESOURCE_SIZE);
iavf_shutdown_adminq(&adapter->hw);
adapter->netdev->flags &= ~IFF_UP;
@@ -2410,7 +2431,7 @@ static void iavf_adminq_task(struct work_struct *work)
/* check for error indications */
val = rd32(hw, hw->aq.arq.len);
- if (val == 0xdeadbeef) /* indicates device in reset */
+ if (val == 0xdeadbeef || val == 0xffffffff) /* device in reset */
goto freedom;
oldval = val;
if (val & IAVF_VF_ARQLEN1_ARQVFE_MASK) {
@@ -3095,8 +3116,10 @@ static int iavf_configure_clsflower(struct iavf_adapter *adapter,
return -ENOMEM;
while (!mutex_trylock(&adapter->crit_lock)) {
- if (--count == 0)
- goto err;
+ if (--count == 0) {
+ kfree(filter);
+ return err;
+ }
udelay(1);
}
@@ -3107,11 +3130,11 @@ static int iavf_configure_clsflower(struct iavf_adapter *adapter,
/* start out with flow type and eth type IPv4 to begin with */
filter->f.flow_type = VIRTCHNL_TCP_V4_FLOW;
err = iavf_parse_cls_flower(adapter, cls_flower, filter);
- if (err < 0)
+ if (err)
goto err;
err = iavf_handle_tclass(adapter, tc, filter);
- if (err < 0)
+ if (err)
goto err;
/* add filter to the list */
@@ -3308,6 +3331,9 @@ static int iavf_open(struct net_device *netdev)
spin_unlock_bh(&adapter->mac_vlan_list_lock);
+ /* Restore VLAN filters that were removed with IFF_DOWN */
+ iavf_restore_filters(adapter);
+
iavf_configure(adapter);
iavf_up_complete(adapter);
@@ -3503,7 +3529,8 @@ static netdev_features_t iavf_fix_features(struct net_device *netdev,
{
struct iavf_adapter *adapter = netdev_priv(netdev);
- if (!(adapter->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_VLAN))
+ if (adapter->vf_res &&
+ !(adapter->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_VLAN))
features &= ~(NETIF_F_HW_VLAN_CTAG_TX |
NETIF_F_HW_VLAN_CTAG_RX |
NETIF_F_HW_VLAN_CTAG_FILTER);
diff --git a/drivers/net/ethernet/lantiq_etop.c b/drivers/net/ethernet/lantiq_etop.c
index 6433c909c6b2..072391c494ce 100644
--- a/drivers/net/ethernet/lantiq_etop.c
+++ b/drivers/net/ethernet/lantiq_etop.c
@@ -25,6 +25,7 @@
#include <linux/io.h>
#include <linux/dma-mapping.h>
#include <linux/module.h>
+#include <linux/property.h>
#include <asm/checksum.h>
@@ -239,6 +240,7 @@ ltq_etop_hw_init(struct net_device *dev)
{
struct ltq_etop_priv *priv = netdev_priv(dev);
int i;
+ int err;
ltq_pmu_enable(PMU_PPE);
@@ -273,7 +275,13 @@ ltq_etop_hw_init(struct net_device *dev)
if (IS_TX(i)) {
ltq_dma_alloc_tx(&ch->dma);
- request_irq(irq, ltq_etop_dma_irq, 0, "etop_tx", priv);
+ err = request_irq(irq, ltq_etop_dma_irq, 0, "etop_tx", priv);
+ if (err) {
+ netdev_err(dev,
+ "Unable to get Tx DMA IRQ %d\n",
+ irq);
+ return err;
+ }
} else if (IS_RX(i)) {
ltq_dma_alloc_rx(&ch->dma);
for (ch->dma.desc = 0; ch->dma.desc < LTQ_DESC_NUM;
@@ -281,7 +289,13 @@ ltq_etop_hw_init(struct net_device *dev)
if (ltq_etop_alloc_skb(ch))
return -ENOMEM;
ch->dma.desc = 0;
- request_irq(irq, ltq_etop_dma_irq, 0, "etop_rx", priv);
+ err = request_irq(irq, ltq_etop_dma_irq, 0, "etop_rx", priv);
+ if (err) {
+ netdev_err(dev,
+ "Unable to get Rx DMA IRQ %d\n",
+ irq);
+ return err;
+ }
}
ch->dma.irq = irq;
}
@@ -726,7 +740,7 @@ static struct platform_driver ltq_mii_driver = {
},
};
-int __init
+static int __init
init_ltq_etop(void)
{
int ret = platform_driver_probe(&ltq_mii_driver, ltq_etop_probe);
diff --git a/drivers/net/ethernet/marvell/mvmdio.c b/drivers/net/ethernet/marvell/mvmdio.c
index 62a97c46fba0..ef878973b859 100644
--- a/drivers/net/ethernet/marvell/mvmdio.c
+++ b/drivers/net/ethernet/marvell/mvmdio.c
@@ -429,12 +429,14 @@ static const struct of_device_id orion_mdio_match[] = {
};
MODULE_DEVICE_TABLE(of, orion_mdio_match);
+#ifdef CONFIG_ACPI
static const struct acpi_device_id orion_mdio_acpi_match[] = {
{ "MRVL0100", BUS_TYPE_SMI },
{ "MRVL0101", BUS_TYPE_XSMI },
{ },
};
MODULE_DEVICE_TABLE(acpi, orion_mdio_acpi_match);
+#endif
static struct platform_driver orion_mdio_driver = {
.probe = orion_mdio_probe,
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_debugfs.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_debugfs.c
index c7fd466a0efd..a09a507369ac 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_debugfs.c
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_debugfs.c
@@ -236,10 +236,11 @@ static ssize_t rvu_dbg_lmtst_map_table_display(struct file *filp,
u64 lmt_addr, val, tbl_base;
int pf, vf, num_vfs, hw_vfs;
void __iomem *lmt_map_base;
- int index = 0, off = 0;
- int bytes_not_copied;
int buf_size = 10240;
+ size_t off = 0;
+ int index = 0;
char *buf;
+ int ret;
/* don't allow partial reads */
if (*ppos != 0)
@@ -303,15 +304,17 @@ static ssize_t rvu_dbg_lmtst_map_table_display(struct file *filp,
}
off += scnprintf(&buf[off], buf_size - 1 - off, "\n");
- bytes_not_copied = copy_to_user(buffer, buf, off);
+ ret = min(off, count);
+ if (copy_to_user(buffer, buf, ret))
+ ret = -EFAULT;
kfree(buf);
iounmap(lmt_map_base);
- if (bytes_not_copied)
- return -EFAULT;
+ if (ret < 0)
+ return ret;
- *ppos = off;
- return off;
+ *ppos = ret;
+ return ret;
}
RVU_DEBUG_FOPS(lmtst_map_table, lmtst_map_table_display, NULL);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/cmd.c b/drivers/net/ethernet/mellanox/mlx5/core/cmd.c
index f71ec4d9d68e..8eaa24d865c5 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/cmd.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/cmd.c
@@ -339,6 +339,8 @@ static int mlx5_internal_err_ret_value(struct mlx5_core_dev *dev, u16 op,
case MLX5_CMD_OP_PAGE_FAULT_RESUME:
case MLX5_CMD_OP_QUERY_ESW_FUNCTIONS:
case MLX5_CMD_OP_DEALLOC_SF:
+ case MLX5_CMD_OP_DESTROY_UCTX:
+ case MLX5_CMD_OP_DESTROY_UMEM:
return MLX5_CMD_STAT_OK;
case MLX5_CMD_OP_QUERY_HCA_CAP:
@@ -464,9 +466,7 @@ static int mlx5_internal_err_ret_value(struct mlx5_core_dev *dev, u16 op,
case MLX5_CMD_OP_MODIFY_GENERAL_OBJECT:
case MLX5_CMD_OP_QUERY_GENERAL_OBJECT:
case MLX5_CMD_OP_CREATE_UCTX:
- case MLX5_CMD_OP_DESTROY_UCTX:
case MLX5_CMD_OP_CREATE_UMEM:
- case MLX5_CMD_OP_DESTROY_UMEM:
case MLX5_CMD_OP_ALLOC_MEMIC:
case MLX5_CMD_OP_MODIFY_XRQ:
case MLX5_CMD_OP_RELEASE_XRQ_ERROR:
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/cq.c b/drivers/net/ethernet/mellanox/mlx5/core/cq.c
index 02e77ffe5c3e..5371ad0a12eb 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/cq.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/cq.c
@@ -164,13 +164,14 @@ int mlx5_core_destroy_cq(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq)
MLX5_SET(destroy_cq_in, in, cqn, cq->cqn);
MLX5_SET(destroy_cq_in, in, uid, cq->uid);
err = mlx5_cmd_exec_in(dev, destroy_cq, in);
+ if (err)
+ return err;
synchronize_irq(cq->irqn);
-
mlx5_cq_put(cq);
wait_for_completion(&cq->free);
- return err;
+ return 0;
}
EXPORT_SYMBOL(mlx5_core_destroy_cq);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/debugfs.c b/drivers/net/ethernet/mellanox/mlx5/core/debugfs.c
index 07c8d9811bc8..10d195042ab5 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/debugfs.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/debugfs.c
@@ -507,6 +507,8 @@ void mlx5_debug_cq_remove(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq)
if (!mlx5_debugfs_root)
return;
- if (cq->dbg)
+ if (cq->dbg) {
rem_res_tree(cq->dbg);
+ cq->dbg = NULL;
+ }
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.c
index c1c6e74c79c4..2445e2ae3324 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.c
@@ -1356,9 +1356,13 @@ mlx5_tc_ct_match_add(struct mlx5_tc_ct_priv *priv,
int
mlx5_tc_ct_parse_action(struct mlx5_tc_ct_priv *priv,
struct mlx5_flow_attr *attr,
+ struct mlx5e_tc_mod_hdr_acts *mod_acts,
const struct flow_action_entry *act,
struct netlink_ext_ack *extack)
{
+ bool clear_action = act->ct.action & TCA_CT_ACT_CLEAR;
+ int err;
+
if (!priv) {
NL_SET_ERR_MSG_MOD(extack,
"offload of ct action isn't available");
@@ -1369,6 +1373,17 @@ mlx5_tc_ct_parse_action(struct mlx5_tc_ct_priv *priv,
attr->ct_attr.ct_action = act->ct.action;
attr->ct_attr.nf_ft = act->ct.flow_table;
+ if (!clear_action)
+ goto out;
+
+ err = mlx5_tc_ct_entry_set_registers(priv, mod_acts, 0, 0, 0, 0);
+ if (err) {
+ NL_SET_ERR_MSG_MOD(extack, "Failed to set registers for ct clear");
+ return err;
+ }
+ attr->action |= MLX5_FLOW_CONTEXT_ACTION_MOD_HDR;
+
+out:
return 0;
}
@@ -1898,23 +1913,16 @@ __mlx5_tc_ct_flow_offload_clear(struct mlx5_tc_ct_priv *ct_priv,
memcpy(pre_ct_attr, attr, attr_sz);
- err = mlx5_tc_ct_entry_set_registers(ct_priv, mod_acts, 0, 0, 0, 0);
- if (err) {
- ct_dbg("Failed to set register for ct clear");
- goto err_set_registers;
- }
-
mod_hdr = mlx5_modify_header_alloc(priv->mdev, ct_priv->ns_type,
mod_acts->num_actions,
mod_acts->actions);
if (IS_ERR(mod_hdr)) {
err = PTR_ERR(mod_hdr);
ct_dbg("Failed to add create ct clear mod hdr");
- goto err_set_registers;
+ goto err_mod_hdr;
}
pre_ct_attr->modify_hdr = mod_hdr;
- pre_ct_attr->action |= MLX5_FLOW_CONTEXT_ACTION_MOD_HDR;
rule = mlx5_tc_rule_insert(priv, orig_spec, pre_ct_attr);
if (IS_ERR(rule)) {
@@ -1930,7 +1938,7 @@ __mlx5_tc_ct_flow_offload_clear(struct mlx5_tc_ct_priv *ct_priv,
err_insert:
mlx5_modify_header_dealloc(priv->mdev, mod_hdr);
-err_set_registers:
+err_mod_hdr:
netdev_warn(priv->netdev,
"Failed to offload ct clear flow, err %d\n", err);
kfree(pre_ct_attr);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.h b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.h
index 363329f4aac6..99662af1e41a 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.h
@@ -110,6 +110,7 @@ int mlx5_tc_ct_add_no_trk_match(struct mlx5_flow_spec *spec);
int
mlx5_tc_ct_parse_action(struct mlx5_tc_ct_priv *priv,
struct mlx5_flow_attr *attr,
+ struct mlx5e_tc_mod_hdr_acts *mod_acts,
const struct flow_action_entry *act,
struct netlink_ext_ack *extack);
@@ -172,6 +173,7 @@ mlx5_tc_ct_add_no_trk_match(struct mlx5_flow_spec *spec)
static inline int
mlx5_tc_ct_parse_action(struct mlx5_tc_ct_priv *priv,
struct mlx5_flow_attr *attr,
+ struct mlx5e_tc_mod_hdr_acts *mod_acts,
const struct flow_action_entry *act,
struct netlink_ext_ack *extack)
{
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_priv.h b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_priv.h
index 8f64f2c8895a..b689701ac7d8 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_priv.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_priv.h
@@ -102,6 +102,7 @@ struct mlx5e_tc_flow {
refcount_t refcnt;
struct rcu_head rcu_head;
struct completion init_done;
+ struct completion del_hw_done;
int tunnel_id; /* the mapped tunnel id of this flow */
struct mlx5_flow_attr *attr;
};
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_encap.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_encap.c
index 660cca73c36c..042b1abe1437 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_encap.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_encap.c
@@ -245,8 +245,14 @@ static void mlx5e_take_tmp_flow(struct mlx5e_tc_flow *flow,
struct list_head *flow_list,
int index)
{
- if (IS_ERR(mlx5e_flow_get(flow)))
+ if (IS_ERR(mlx5e_flow_get(flow))) {
+ /* Flow is being deleted concurrently. Wait for it to be
+ * unoffloaded from hardware, otherwise deleting encap will
+ * fail.
+ */
+ wait_for_completion(&flow->del_hw_done);
return;
+ }
wait_for_completion(&flow->init_done);
flow->tmp_entry_index = index;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_rx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_rx.c
index 62abce008c7b..a2a9f68579dd 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_rx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_rx.c
@@ -55,6 +55,7 @@ struct mlx5e_ktls_offload_context_rx {
DECLARE_BITMAP(flags, MLX5E_NUM_PRIV_RX_FLAGS);
/* resync */
+ spinlock_t lock; /* protects resync fields */
struct mlx5e_ktls_rx_resync_ctx resync;
struct list_head list;
};
@@ -386,14 +387,18 @@ static void resync_handle_seq_match(struct mlx5e_ktls_offload_context_rx *priv_r
struct mlx5e_icosq *sq;
bool trigger_poll;
- memcpy(info->rec_seq, &priv_rx->resync.sw_rcd_sn_be, sizeof(info->rec_seq));
-
sq = &c->async_icosq;
ktls_resync = sq->ktls_resync;
+ trigger_poll = false;
spin_lock_bh(&ktls_resync->lock);
- list_add_tail(&priv_rx->list, &ktls_resync->list);
- trigger_poll = !test_and_set_bit(MLX5E_SQ_STATE_PENDING_TLS_RX_RESYNC, &sq->state);
+ spin_lock_bh(&priv_rx->lock);
+ memcpy(info->rec_seq, &priv_rx->resync.sw_rcd_sn_be, sizeof(info->rec_seq));
+ if (list_empty(&priv_rx->list)) {
+ list_add_tail(&priv_rx->list, &ktls_resync->list);
+ trigger_poll = !test_and_set_bit(MLX5E_SQ_STATE_PENDING_TLS_RX_RESYNC, &sq->state);
+ }
+ spin_unlock_bh(&priv_rx->lock);
spin_unlock_bh(&ktls_resync->lock);
if (!trigger_poll)
@@ -617,6 +622,8 @@ int mlx5e_ktls_add_rx(struct net_device *netdev, struct sock *sk,
if (err)
goto err_create_key;
+ INIT_LIST_HEAD(&priv_rx->list);
+ spin_lock_init(&priv_rx->lock);
priv_rx->crypto_info =
*(struct tls12_crypto_info_aes_gcm_128 *)crypto_info;
@@ -730,10 +737,14 @@ bool mlx5e_ktls_rx_handle_resync_list(struct mlx5e_channel *c, int budget)
priv_rx = list_first_entry(&local_list,
struct mlx5e_ktls_offload_context_rx,
list);
+ spin_lock(&priv_rx->lock);
cseg = post_static_params(sq, priv_rx);
- if (IS_ERR(cseg))
+ if (IS_ERR(cseg)) {
+ spin_unlock(&priv_rx->lock);
break;
- list_del(&priv_rx->list);
+ }
+ list_del_init(&priv_rx->list);
+ spin_unlock(&priv_rx->lock);
db_cseg = cseg;
}
if (db_cseg)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
index 835caa1c7b74..3d45f4ae80c0 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
@@ -1600,6 +1600,7 @@ static void mlx5e_tc_del_fdb_flow(struct mlx5e_priv *priv,
else
mlx5e_tc_unoffload_fdb_rules(esw, flow, attr);
}
+ complete_all(&flow->del_hw_done);
if (mlx5_flow_has_geneve_opt(flow))
mlx5_geneve_tlv_option_del(priv->mdev->geneve);
@@ -3607,7 +3608,9 @@ parse_tc_nic_actions(struct mlx5e_priv *priv,
attr->dest_chain = act->chain_index;
break;
case FLOW_ACTION_CT:
- err = mlx5_tc_ct_parse_action(get_ct_priv(priv), attr, act, extack);
+ err = mlx5_tc_ct_parse_action(get_ct_priv(priv), attr,
+ &parse_attr->mod_hdr_acts,
+ act, extack);
if (err)
return err;
@@ -4276,7 +4279,9 @@ static int parse_tc_fdb_actions(struct mlx5e_priv *priv,
NL_SET_ERR_MSG_MOD(extack, "Sample action with connection tracking is not supported");
return -EOPNOTSUPP;
}
- err = mlx5_tc_ct_parse_action(get_ct_priv(priv), attr, act, extack);
+ err = mlx5_tc_ct_parse_action(get_ct_priv(priv), attr,
+ &parse_attr->mod_hdr_acts,
+ act, extack);
if (err)
return err;
@@ -4465,6 +4470,7 @@ mlx5e_alloc_flow(struct mlx5e_priv *priv, int attr_size,
INIT_LIST_HEAD(&flow->l3_to_l2_reformat);
refcount_set(&flow->refcnt, 1);
init_completion(&flow->init_done);
+ init_completion(&flow->del_hw_done);
*__flow = flow;
*__parse_attr = parse_attr;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
index ec136b499204..51a8cecc4a7c 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
@@ -1305,12 +1305,17 @@ abort:
*/
int mlx5_eswitch_enable(struct mlx5_eswitch *esw, int num_vfs)
{
+ bool toggle_lag;
int ret;
if (!mlx5_esw_allowed(esw))
return 0;
- mlx5_lag_disable_change(esw->dev);
+ toggle_lag = esw->mode == MLX5_ESWITCH_NONE;
+
+ if (toggle_lag)
+ mlx5_lag_disable_change(esw->dev);
+
down_write(&esw->mode_lock);
if (esw->mode == MLX5_ESWITCH_NONE) {
ret = mlx5_eswitch_enable_locked(esw, MLX5_ESWITCH_LEGACY, num_vfs);
@@ -1324,7 +1329,10 @@ int mlx5_eswitch_enable(struct mlx5_eswitch *esw, int num_vfs)
esw->esw_funcs.num_vfs = num_vfs;
}
up_write(&esw->mode_lock);
- mlx5_lag_enable_change(esw->dev);
+
+ if (toggle_lag)
+ mlx5_lag_enable_change(esw->dev);
+
return ret;
}
@@ -1572,6 +1580,11 @@ int mlx5_eswitch_init(struct mlx5_core_dev *dev)
esw->enabled_vports = 0;
esw->mode = MLX5_ESWITCH_NONE;
esw->offloads.inline_mode = MLX5_INLINE_MODE_NONE;
+ if (MLX5_CAP_ESW_FLOWTABLE_FDB(dev, reformat) &&
+ MLX5_CAP_ESW_FLOWTABLE_FDB(dev, decap))
+ esw->offloads.encap = DEVLINK_ESWITCH_ENCAP_MODE_BASIC;
+ else
+ esw->offloads.encap = DEVLINK_ESWITCH_ENCAP_MODE_NONE;
dev->priv.eswitch = esw;
BLOCKING_INIT_NOTIFIER_HEAD(&esw->n_head);
@@ -1934,7 +1947,7 @@ free_out:
return err;
}
-u8 mlx5_eswitch_mode(struct mlx5_core_dev *dev)
+u8 mlx5_eswitch_mode(const struct mlx5_core_dev *dev)
{
struct mlx5_eswitch *esw = dev->priv.eswitch;
@@ -1948,7 +1961,7 @@ mlx5_eswitch_get_encap_mode(const struct mlx5_core_dev *dev)
struct mlx5_eswitch *esw;
esw = dev->priv.eswitch;
- return mlx5_esw_allowed(esw) ? esw->offloads.encap :
+ return (mlx5_eswitch_mode(dev) == MLX5_ESWITCH_OFFLOADS) ? esw->offloads.encap :
DEVLINK_ESWITCH_ENCAP_MODE_NONE;
}
EXPORT_SYMBOL(mlx5_eswitch_get_encap_mode);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
index f4eaa5893886..a46455694f7a 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
@@ -3183,12 +3183,6 @@ int esw_offloads_enable(struct mlx5_eswitch *esw)
u64 mapping_id;
int err;
- if (MLX5_CAP_ESW_FLOWTABLE_FDB(esw->dev, reformat) &&
- MLX5_CAP_ESW_FLOWTABLE_FDB(esw->dev, decap))
- esw->offloads.encap = DEVLINK_ESWITCH_ENCAP_MODE_BASIC;
- else
- esw->offloads.encap = DEVLINK_ESWITCH_ENCAP_MODE_NONE;
-
mutex_init(&esw->offloads.termtbl_mutex);
mlx5_rdma_enable_roce(esw->dev);
@@ -3286,7 +3280,6 @@ void esw_offloads_disable(struct mlx5_eswitch *esw)
esw_offloads_metadata_uninit(esw);
mlx5_rdma_disable_roce(esw->dev);
mutex_destroy(&esw->offloads.termtbl_mutex);
- esw->offloads.encap = DEVLINK_ESWITCH_ENCAP_MODE_NONE;
}
static int esw_mode_from_devlink(u16 mode, u16 *mlx5_mode)
@@ -3630,7 +3623,7 @@ int mlx5_devlink_eswitch_encap_mode_get(struct devlink *devlink,
*encap = esw->offloads.encap;
unlock:
up_write(&esw->mode_lock);
- return 0;
+ return err;
}
static bool
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_counters.c b/drivers/net/ethernet/mellanox/mlx5/core/fs_counters.c
index 31c99d53faf7..7e0e04cf26f8 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fs_counters.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_counters.c
@@ -40,7 +40,7 @@
#define MLX5_FC_STATS_PERIOD msecs_to_jiffies(1000)
/* Max number of counters to query in bulk read is 32K */
#define MLX5_SW_MAX_COUNTERS_BULK BIT(15)
-#define MLX5_SF_NUM_COUNTERS_BULK 6
+#define MLX5_SF_NUM_COUNTERS_BULK 8
#define MLX5_FC_POOL_MAX_THRESHOLD BIT(18)
#define MLX5_FC_POOL_USED_BUFF_RATIO 10
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lag/lag.c b/drivers/net/ethernet/mellanox/mlx5/core/lag/lag.c
index 48d2ea690d7a..4ddf6b330a44 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/lag/lag.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/lag/lag.c
@@ -615,6 +615,7 @@ static int mlx5_handle_changeupper_event(struct mlx5_lag *ldev,
bool is_bonded, is_in_lag, mode_supported;
int bond_status = 0;
int num_slaves = 0;
+ int changed = 0;
int idx;
if (!netif_is_lag_master(upper))
@@ -653,27 +654,27 @@ static int mlx5_handle_changeupper_event(struct mlx5_lag *ldev,
*/
is_in_lag = num_slaves == MLX5_MAX_PORTS && bond_status == 0x3;
- if (!mlx5_lag_is_ready(ldev) && is_in_lag) {
- NL_SET_ERR_MSG_MOD(info->info.extack,
- "Can't activate LAG offload, PF is configured with more than 64 VFs");
- return 0;
- }
-
/* Lag mode must be activebackup or hash. */
mode_supported = tracker->tx_type == NETDEV_LAG_TX_TYPE_ACTIVEBACKUP ||
tracker->tx_type == NETDEV_LAG_TX_TYPE_HASH;
- if (is_in_lag && !mode_supported)
- NL_SET_ERR_MSG_MOD(info->info.extack,
- "Can't activate LAG offload, TX type isn't supported");
-
is_bonded = is_in_lag && mode_supported;
if (tracker->is_bonded != is_bonded) {
tracker->is_bonded = is_bonded;
- return 1;
+ changed = 1;
}
- return 0;
+ if (!is_in_lag)
+ return changed;
+
+ if (!mlx5_lag_is_ready(ldev))
+ NL_SET_ERR_MSG_MOD(info->info.extack,
+ "Can't activate LAG offload, PF is configured with more than 64 VFs");
+ else if (!mode_supported)
+ NL_SET_ERR_MSG_MOD(info->info.extack,
+ "Can't activate LAG offload, TX type isn't supported");
+
+ return changed;
}
static int mlx5_handle_changelowerstate_event(struct mlx5_lag *ldev,
@@ -716,9 +717,6 @@ static int mlx5_lag_netdev_event(struct notifier_block *this,
ldev = container_of(this, struct mlx5_lag, nb);
- if (!mlx5_lag_is_ready(ldev) && event == NETDEV_CHANGELOWERSTATE)
- return NOTIFY_DONE;
-
tracker = ldev->tracker;
switch (event) {
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_domain.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_domain.c
index 49089cbe897c..8cbd36c82b3b 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_domain.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_domain.c
@@ -135,25 +135,14 @@ static void dr_domain_fill_uplink_caps(struct mlx5dr_domain *dmn,
static int dr_domain_query_vport(struct mlx5dr_domain *dmn,
u16 vport_number,
+ bool other_vport,
struct mlx5dr_cmd_vport_cap *vport_caps)
{
- u16 cmd_vport = vport_number;
- bool other_vport = true;
int ret;
- if (vport_number == MLX5_VPORT_UPLINK) {
- dr_domain_fill_uplink_caps(dmn, vport_caps);
- return 0;
- }
-
- if (dmn->info.caps.is_ecpf && vport_number == MLX5_VPORT_ECPF) {
- other_vport = false;
- cmd_vport = 0;
- }
-
ret = mlx5dr_cmd_query_esw_vport_context(dmn->mdev,
other_vport,
- cmd_vport,
+ vport_number,
&vport_caps->icm_address_rx,
&vport_caps->icm_address_tx);
if (ret)
@@ -161,7 +150,7 @@ static int dr_domain_query_vport(struct mlx5dr_domain *dmn,
ret = mlx5dr_cmd_query_gvmi(dmn->mdev,
other_vport,
- cmd_vport,
+ vport_number,
&vport_caps->vport_gvmi);
if (ret)
return ret;
@@ -176,9 +165,15 @@ static int dr_domain_query_esw_mngr(struct mlx5dr_domain *dmn)
{
return dr_domain_query_vport(dmn,
dmn->info.caps.is_ecpf ? MLX5_VPORT_ECPF : 0,
+ false,
&dmn->info.caps.vports.esw_manager_caps);
}
+static void dr_domain_query_uplink(struct mlx5dr_domain *dmn)
+{
+ dr_domain_fill_uplink_caps(dmn, &dmn->info.caps.vports.uplink_caps);
+}
+
static struct mlx5dr_cmd_vport_cap *
dr_domain_add_vport_cap(struct mlx5dr_domain *dmn, u16 vport)
{
@@ -190,7 +185,7 @@ dr_domain_add_vport_cap(struct mlx5dr_domain *dmn, u16 vport)
if (!vport_caps)
return NULL;
- ret = dr_domain_query_vport(dmn, vport, vport_caps);
+ ret = dr_domain_query_vport(dmn, vport, true, vport_caps);
if (ret) {
kvfree(vport_caps);
return NULL;
@@ -207,16 +202,26 @@ dr_domain_add_vport_cap(struct mlx5dr_domain *dmn, u16 vport)
return vport_caps;
}
+static bool dr_domain_is_esw_mgr_vport(struct mlx5dr_domain *dmn, u16 vport)
+{
+ struct mlx5dr_cmd_caps *caps = &dmn->info.caps;
+
+ return (caps->is_ecpf && vport == MLX5_VPORT_ECPF) ||
+ (!caps->is_ecpf && vport == 0);
+}
+
struct mlx5dr_cmd_vport_cap *
mlx5dr_domain_get_vport_cap(struct mlx5dr_domain *dmn, u16 vport)
{
struct mlx5dr_cmd_caps *caps = &dmn->info.caps;
struct mlx5dr_cmd_vport_cap *vport_caps;
- if ((caps->is_ecpf && vport == MLX5_VPORT_ECPF) ||
- (!caps->is_ecpf && vport == 0))
+ if (dr_domain_is_esw_mgr_vport(dmn, vport))
return &caps->vports.esw_manager_caps;
+ if (vport == MLX5_VPORT_UPLINK)
+ return &caps->vports.uplink_caps;
+
vport_load:
vport_caps = xa_load(&caps->vports.vports_caps_xa, vport);
if (vport_caps)
@@ -241,17 +246,6 @@ static void dr_domain_clear_vports(struct mlx5dr_domain *dmn)
}
}
-static int dr_domain_query_uplink(struct mlx5dr_domain *dmn)
-{
- struct mlx5dr_cmd_vport_cap *vport_caps;
-
- vport_caps = mlx5dr_domain_get_vport_cap(dmn, MLX5_VPORT_UPLINK);
- if (!vport_caps)
- return -EINVAL;
-
- return 0;
-}
-
static int dr_domain_query_fdb_caps(struct mlx5_core_dev *mdev,
struct mlx5dr_domain *dmn)
{
@@ -281,11 +275,7 @@ static int dr_domain_query_fdb_caps(struct mlx5_core_dev *mdev,
goto free_vports_caps_xa;
}
- ret = dr_domain_query_uplink(dmn);
- if (ret) {
- mlx5dr_err(dmn, "Failed to query uplink vport caps (err: %d)", ret);
- goto free_vports_caps_xa;
- }
+ dr_domain_query_uplink(dmn);
return 0;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_matcher.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_matcher.c
index 75c775bee351..793365242e85 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_matcher.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_matcher.c
@@ -924,11 +924,12 @@ static int dr_matcher_init(struct mlx5dr_matcher *matcher,
/* Check that all mask data was consumed */
for (i = 0; i < consumed_mask.match_sz; i++) {
- if (consumed_mask.match_buf[i]) {
- mlx5dr_dbg(dmn, "Match param mask contains unsupported parameters\n");
- ret = -EOPNOTSUPP;
- goto free_consumed_mask;
- }
+ if (!((u8 *)consumed_mask.match_buf)[i])
+ continue;
+
+ mlx5dr_dbg(dmn, "Match param mask contains unsupported parameters\n");
+ ret = -EOPNOTSUPP;
+ goto free_consumed_mask;
}
ret = 0;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_types.h b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_types.h
index 3028b776da00..2333c2439c28 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_types.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_types.h
@@ -764,6 +764,7 @@ struct mlx5dr_roce_cap {
struct mlx5dr_vports {
struct mlx5dr_cmd_vport_cap esw_manager_caps;
+ struct mlx5dr_cmd_vport_cap uplink_caps;
struct xarray vports_caps_xa;
};
diff --git a/drivers/net/ethernet/sis/sis900.c b/drivers/net/ethernet/sis/sis900.c
index cc2d907c4c4b..23a336c5096e 100644
--- a/drivers/net/ethernet/sis/sis900.c
+++ b/drivers/net/ethernet/sis/sis900.c
@@ -392,7 +392,7 @@ static int sis96x_get_mac_addr(struct pci_dev *pci_dev,
/* get MAC address from EEPROM */
for (i = 0; i < 3; i++)
addr[i] = read_eeprom(ioaddr, i + EEPROMMACAddr);
- eth_hw_addr_set(net_dev, (u8 *)addr);
+ eth_hw_addr_set(net_dev, (u8 *)addr);
rc = 1;
break;
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-socfpga.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-socfpga.c
index 85208128f135..b7c2579c963b 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-socfpga.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-socfpga.c
@@ -485,8 +485,28 @@ static int socfpga_dwmac_resume(struct device *dev)
}
#endif /* CONFIG_PM_SLEEP */
-static SIMPLE_DEV_PM_OPS(socfpga_dwmac_pm_ops, stmmac_suspend,
- socfpga_dwmac_resume);
+static int __maybe_unused socfpga_dwmac_runtime_suspend(struct device *dev)
+{
+ struct net_device *ndev = dev_get_drvdata(dev);
+ struct stmmac_priv *priv = netdev_priv(ndev);
+
+ stmmac_bus_clks_config(priv, false);
+
+ return 0;
+}
+
+static int __maybe_unused socfpga_dwmac_runtime_resume(struct device *dev)
+{
+ struct net_device *ndev = dev_get_drvdata(dev);
+ struct stmmac_priv *priv = netdev_priv(ndev);
+
+ return stmmac_bus_clks_config(priv, true);
+}
+
+static const struct dev_pm_ops socfpga_dwmac_pm_ops = {
+ SET_SYSTEM_SLEEP_PM_OPS(stmmac_suspend, socfpga_dwmac_resume)
+ SET_RUNTIME_PM_OPS(socfpga_dwmac_runtime_suspend, socfpga_dwmac_runtime_resume, NULL)
+};
static const struct socfpga_dwmac_ops socfpga_gen5_ops = {
.set_phy_mode = socfpga_gen5_set_phy_mode,
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
index d3f350c25b9b..2eb284576336 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
@@ -511,6 +511,14 @@ bool stmmac_eee_init(struct stmmac_priv *priv)
return true;
}
+static inline u32 stmmac_cdc_adjust(struct stmmac_priv *priv)
+{
+ /* Correct the clk domain crossing(CDC) error */
+ if (priv->plat->has_gmac4 && priv->plat->clk_ptp_rate)
+ return (2 * NSEC_PER_SEC) / priv->plat->clk_ptp_rate;
+ return 0;
+}
+
/* stmmac_get_tx_hwtstamp - get HW TX timestamps
* @priv: driver private structure
* @p : descriptor pointer
@@ -524,7 +532,6 @@ static void stmmac_get_tx_hwtstamp(struct stmmac_priv *priv,
{
struct skb_shared_hwtstamps shhwtstamp;
bool found = false;
- s64 adjust = 0;
u64 ns = 0;
if (!priv->hwts_tx_en)
@@ -543,12 +550,7 @@ static void stmmac_get_tx_hwtstamp(struct stmmac_priv *priv,
}
if (found) {
- /* Correct the clk domain crossing(CDC) error */
- if (priv->plat->has_gmac4 && priv->plat->clk_ptp_rate) {
- adjust += -(2 * (NSEC_PER_SEC /
- priv->plat->clk_ptp_rate));
- ns += adjust;
- }
+ ns -= stmmac_cdc_adjust(priv);
memset(&shhwtstamp, 0, sizeof(struct skb_shared_hwtstamps));
shhwtstamp.hwtstamp = ns_to_ktime(ns);
@@ -573,7 +575,6 @@ static void stmmac_get_rx_hwtstamp(struct stmmac_priv *priv, struct dma_desc *p,
{
struct skb_shared_hwtstamps *shhwtstamp = NULL;
struct dma_desc *desc = p;
- u64 adjust = 0;
u64 ns = 0;
if (!priv->hwts_rx_en)
@@ -586,11 +587,7 @@ static void stmmac_get_rx_hwtstamp(struct stmmac_priv *priv, struct dma_desc *p,
if (stmmac_get_rx_timestamp_status(priv, p, np, priv->adv_ts)) {
stmmac_get_timestamp(priv, desc, priv->adv_ts, &ns);
- /* Correct the clk domain crossing(CDC) error */
- if (priv->plat->has_gmac4 && priv->plat->clk_ptp_rate) {
- adjust += 2 * (NSEC_PER_SEC / priv->plat->clk_ptp_rate);
- ns -= adjust;
- }
+ ns -= stmmac_cdc_adjust(priv);
netdev_dbg(priv->dev, "get valid RX hw timestamp %llu\n", ns);
shhwtstamp = skb_hwtstamps(skb);
diff --git a/drivers/net/hamradio/6pack.c b/drivers/net/hamradio/6pack.c
index bfdf89e54752..8a19a06b505d 100644
--- a/drivers/net/hamradio/6pack.c
+++ b/drivers/net/hamradio/6pack.c
@@ -306,7 +306,6 @@ static void sp_setup(struct net_device *dev)
{
/* Finish setting up the DEVICE info. */
dev->netdev_ops = &sp_netdev_ops;
- dev->needs_free_netdev = true;
dev->mtu = SIXP_MTU;
dev->hard_header_len = AX25_MAX_HEADER_LEN;
dev->header_ops = &ax25_header_ops;
diff --git a/drivers/net/ipa/ipa_endpoint.c b/drivers/net/ipa/ipa_endpoint.c
index 5528d97110d5..ef790fd0ab56 100644
--- a/drivers/net/ipa/ipa_endpoint.c
+++ b/drivers/net/ipa/ipa_endpoint.c
@@ -853,6 +853,7 @@ static void ipa_endpoint_init_hol_block_timer(struct ipa_endpoint *endpoint,
u32 offset;
u32 val;
+ /* This should only be changed when HOL_BLOCK_EN is disabled */
offset = IPA_REG_ENDP_INIT_HOL_BLOCK_TIMER_N_OFFSET(endpoint_id);
val = hol_block_timer_val(ipa, microseconds);
iowrite32(val, ipa->reg_virt + offset);
@@ -868,6 +869,9 @@ ipa_endpoint_init_hol_block_enable(struct ipa_endpoint *endpoint, bool enable)
val = enable ? HOL_BLOCK_EN_FMASK : 0;
offset = IPA_REG_ENDP_INIT_HOL_BLOCK_EN_N_OFFSET(endpoint_id);
iowrite32(val, endpoint->ipa->reg_virt + offset);
+ /* When enabling, the register must be written twice for IPA v4.5+ */
+ if (enable && endpoint->ipa->version >= IPA_VERSION_4_5)
+ iowrite32(val, endpoint->ipa->reg_virt + offset);
}
void ipa_endpoint_modem_hol_block_clear_all(struct ipa *ipa)
@@ -880,6 +884,7 @@ void ipa_endpoint_modem_hol_block_clear_all(struct ipa *ipa)
if (endpoint->toward_ipa || endpoint->ee_id != GSI_EE_MODEM)
continue;
+ ipa_endpoint_init_hol_block_enable(endpoint, false);
ipa_endpoint_init_hol_block_timer(endpoint, 0);
ipa_endpoint_init_hol_block_enable(endpoint, true);
}
diff --git a/drivers/net/ipa/ipa_resource.c b/drivers/net/ipa/ipa_resource.c
index e3da95d69409..06cec7199382 100644
--- a/drivers/net/ipa/ipa_resource.c
+++ b/drivers/net/ipa/ipa_resource.c
@@ -52,7 +52,7 @@ static bool ipa_resource_limits_valid(struct ipa *ipa,
return false;
}
- group_count = data->rsrc_group_src_count;
+ group_count = data->rsrc_group_dst_count;
if (!group_count || group_count > IPA_RESOURCE_GROUP_MAX)
return false;
diff --git a/drivers/net/tun.c b/drivers/net/tun.c
index fecc9a1d293a..1572878c3403 100644
--- a/drivers/net/tun.c
+++ b/drivers/net/tun.c
@@ -1010,6 +1010,7 @@ static netdev_tx_t tun_net_xmit(struct sk_buff *skb, struct net_device *dev)
{
struct tun_struct *tun = netdev_priv(dev);
int txq = skb->queue_mapping;
+ struct netdev_queue *queue;
struct tun_file *tfile;
int len = skb->len;
@@ -1054,6 +1055,10 @@ static netdev_tx_t tun_net_xmit(struct sk_buff *skb, struct net_device *dev)
if (ptr_ring_produce(&tfile->tx_ring, skb))
goto drop;
+ /* NETIF_F_LLTX requires to do our own update of trans_start */
+ queue = netdev_get_tx_queue(dev, txq);
+ queue->trans_start = jiffies;
+
/* Notify and wake up reader process */
if (tfile->flags & TUN_FASYNC)
kill_fasync(&tfile->fasync, SIGIO, POLL_IN);
diff --git a/drivers/net/usb/r8152.c b/drivers/net/usb/r8152.c
index 4a02f33f0643..f9877a3e83ac 100644
--- a/drivers/net/usb/r8152.c
+++ b/drivers/net/usb/r8152.c
@@ -9603,12 +9603,9 @@ static int rtl8152_probe(struct usb_interface *intf,
netdev->hw_features &= ~NETIF_F_RXCSUM;
}
- if (le16_to_cpu(udev->descriptor.idVendor) == VENDOR_ID_LENOVO) {
- switch (le16_to_cpu(udev->descriptor.idProduct)) {
- case DEVICE_ID_THINKPAD_THUNDERBOLT3_DOCK_GEN2:
- case DEVICE_ID_THINKPAD_USB_C_DOCK_GEN2:
- tp->lenovo_macpassthru = 1;
- }
+ if (udev->parent &&
+ le16_to_cpu(udev->parent->descriptor.idVendor) == VENDOR_ID_LENOVO) {
+ tp->lenovo_macpassthru = 1;
}
if (le16_to_cpu(udev->descriptor.bcdDevice) == 0x3011 && udev->serial &&
diff --git a/drivers/pinctrl/pinctrl-amd.c b/drivers/pinctrl/pinctrl-amd.c
index bae9d429b813..ecab9064a845 100644
--- a/drivers/pinctrl/pinctrl-amd.c
+++ b/drivers/pinctrl/pinctrl-amd.c
@@ -598,14 +598,14 @@ static struct irq_chip amd_gpio_irqchip = {
#define PIN_IRQ_PENDING (BIT(INTERRUPT_STS_OFF) | BIT(WAKE_STS_OFF))
-static irqreturn_t amd_gpio_irq_handler(int irq, void *dev_id)
+static bool do_amd_gpio_irq_handler(int irq, void *dev_id)
{
struct amd_gpio *gpio_dev = dev_id;
struct gpio_chip *gc = &gpio_dev->gc;
- irqreturn_t ret = IRQ_NONE;
unsigned int i, irqnr;
unsigned long flags;
u32 __iomem *regs;
+ bool ret = false;
u32 regval;
u64 status, mask;
@@ -627,6 +627,14 @@ static irqreturn_t amd_gpio_irq_handler(int irq, void *dev_id)
/* Each status bit covers four pins */
for (i = 0; i < 4; i++) {
regval = readl(regs + i);
+ /* caused wake on resume context for shared IRQ */
+ if (irq < 0 && (regval & BIT(WAKE_STS_OFF))) {
+ dev_dbg(&gpio_dev->pdev->dev,
+ "Waking due to GPIO %d: 0x%x",
+ irqnr + i, regval);
+ return true;
+ }
+
if (!(regval & PIN_IRQ_PENDING) ||
!(regval & BIT(INTERRUPT_MASK_OFF)))
continue;
@@ -650,9 +658,12 @@ static irqreturn_t amd_gpio_irq_handler(int irq, void *dev_id)
}
writel(regval, regs + i);
raw_spin_unlock_irqrestore(&gpio_dev->lock, flags);
- ret = IRQ_HANDLED;
+ ret = true;
}
}
+ /* did not cause wake on resume context for shared IRQ */
+ if (irq < 0)
+ return false;
/* Signal EOI to the GPIO unit */
raw_spin_lock_irqsave(&gpio_dev->lock, flags);
@@ -664,6 +675,16 @@ static irqreturn_t amd_gpio_irq_handler(int irq, void *dev_id)
return ret;
}
+static irqreturn_t amd_gpio_irq_handler(int irq, void *dev_id)
+{
+ return IRQ_RETVAL(do_amd_gpio_irq_handler(irq, dev_id));
+}
+
+static bool __maybe_unused amd_gpio_check_wake(void *dev_id)
+{
+ return do_amd_gpio_irq_handler(-1, dev_id);
+}
+
static int amd_get_groups_count(struct pinctrl_dev *pctldev)
{
struct amd_gpio *gpio_dev = pinctrl_dev_get_drvdata(pctldev);
@@ -1033,6 +1054,7 @@ static int amd_gpio_probe(struct platform_device *pdev)
goto out2;
platform_set_drvdata(pdev, gpio_dev);
+ acpi_register_wakeup_handler(gpio_dev->irq, amd_gpio_check_wake, gpio_dev);
dev_dbg(&pdev->dev, "amd gpio driver loaded\n");
return ret;
@@ -1050,6 +1072,7 @@ static int amd_gpio_remove(struct platform_device *pdev)
gpio_dev = platform_get_drvdata(pdev);
gpiochip_remove(&gpio_dev->gc);
+ acpi_unregister_wakeup_handler(amd_gpio_check_wake, gpio_dev);
return 0;
}
diff --git a/drivers/pinctrl/pinctrl-apple-gpio.c b/drivers/pinctrl/pinctrl-apple-gpio.c
index 0cc346bfc4c3..a7861079a650 100644
--- a/drivers/pinctrl/pinctrl-apple-gpio.c
+++ b/drivers/pinctrl/pinctrl-apple-gpio.c
@@ -258,7 +258,7 @@ static void apple_gpio_irq_ack(struct irq_data *data)
pctl->base + REG_IRQ(irqgrp, data->hwirq));
}
-static int apple_gpio_irq_type(unsigned int type)
+static unsigned int apple_gpio_irq_type(unsigned int type)
{
switch (type & IRQ_TYPE_SENSE_MASK) {
case IRQ_TYPE_EDGE_RISING:
@@ -272,7 +272,7 @@ static int apple_gpio_irq_type(unsigned int type)
case IRQ_TYPE_LEVEL_LOW:
return REG_GPIOx_IN_IRQ_LO;
default:
- return -EINVAL;
+ return REG_GPIOx_IN_IRQ_OFF;
}
}
@@ -288,7 +288,7 @@ static void apple_gpio_irq_unmask(struct irq_data *data)
{
struct apple_gpio_pinctrl *pctl =
gpiochip_get_data(irq_data_get_irq_chip_data(data));
- int irqtype = apple_gpio_irq_type(irqd_get_trigger_type(data));
+ unsigned int irqtype = apple_gpio_irq_type(irqd_get_trigger_type(data));
apple_gpio_set_reg(pctl, data->hwirq, REG_GPIOx_MODE,
FIELD_PREP(REG_GPIOx_MODE, irqtype));
@@ -313,10 +313,10 @@ static int apple_gpio_irq_set_type(struct irq_data *data,
{
struct apple_gpio_pinctrl *pctl =
gpiochip_get_data(irq_data_get_irq_chip_data(data));
- int irqtype = apple_gpio_irq_type(type);
+ unsigned int irqtype = apple_gpio_irq_type(type);
- if (irqtype < 0)
- return irqtype;
+ if (irqtype == REG_GPIOx_IN_IRQ_OFF)
+ return -EINVAL;
apple_gpio_set_reg(pctl, data->hwirq, REG_GPIOx_MODE,
FIELD_PREP(REG_GPIOx_MODE, irqtype));
diff --git a/drivers/pinctrl/qcom/Kconfig b/drivers/pinctrl/qcom/Kconfig
index b9191f1abb1c..3e0c00766f59 100644
--- a/drivers/pinctrl/qcom/Kconfig
+++ b/drivers/pinctrl/qcom/Kconfig
@@ -197,6 +197,7 @@ config PINCTRL_QCOM_SPMI_PMIC
select PINMUX
select PINCONF
select GENERIC_PINCONF
+ select GPIOLIB
select GPIOLIB_IRQCHIP
select IRQ_DOMAIN_HIERARCHY
help
@@ -211,6 +212,7 @@ config PINCTRL_QCOM_SSBI_PMIC
select PINMUX
select PINCONF
select GENERIC_PINCONF
+ select GPIOLIB
select GPIOLIB_IRQCHIP
select IRQ_DOMAIN_HIERARCHY
help
diff --git a/drivers/pinctrl/qcom/pinctrl-sdm845.c b/drivers/pinctrl/qcom/pinctrl-sdm845.c
index c51793f6546f..fdfd7b8f3a76 100644
--- a/drivers/pinctrl/qcom/pinctrl-sdm845.c
+++ b/drivers/pinctrl/qcom/pinctrl-sdm845.c
@@ -1310,6 +1310,7 @@ static const struct msm_pinctrl_soc_data sdm845_pinctrl = {
.ngpios = 151,
.wakeirq_map = sdm845_pdc_map,
.nwakeirq_map = ARRAY_SIZE(sdm845_pdc_map),
+ .wakeirq_dual_edge_errata = true,
};
static const struct msm_pinctrl_soc_data sdm845_acpi_pinctrl = {
diff --git a/drivers/pinctrl/qcom/pinctrl-sm8350.c b/drivers/pinctrl/qcom/pinctrl-sm8350.c
index 4d8f8636c2b3..1c042d39380c 100644
--- a/drivers/pinctrl/qcom/pinctrl-sm8350.c
+++ b/drivers/pinctrl/qcom/pinctrl-sm8350.c
@@ -1597,10 +1597,10 @@ static const struct msm_pingroup sm8350_groups[] = {
[200] = PINGROUP(200, qdss_gpio, _, _, _, _, _, _, _, _),
[201] = PINGROUP(201, _, _, _, _, _, _, _, _, _),
[202] = PINGROUP(202, _, _, _, _, _, _, _, _, _),
- [203] = UFS_RESET(ufs_reset, 0x1d8000),
- [204] = SDC_PINGROUP(sdc2_clk, 0x1cf000, 14, 6),
- [205] = SDC_PINGROUP(sdc2_cmd, 0x1cf000, 11, 3),
- [206] = SDC_PINGROUP(sdc2_data, 0x1cf000, 9, 0),
+ [203] = UFS_RESET(ufs_reset, 0xd8000),
+ [204] = SDC_PINGROUP(sdc2_clk, 0xcf000, 14, 6),
+ [205] = SDC_PINGROUP(sdc2_cmd, 0xcf000, 11, 3),
+ [206] = SDC_PINGROUP(sdc2_data, 0xcf000, 9, 0),
};
static const struct msm_gpio_wakeirq_map sm8350_pdc_map[] = {
diff --git a/drivers/pinctrl/ralink/pinctrl-mt7620.c b/drivers/pinctrl/ralink/pinctrl-mt7620.c
index 425d55a2ee19..6853b5b8b0fe 100644
--- a/drivers/pinctrl/ralink/pinctrl-mt7620.c
+++ b/drivers/pinctrl/ralink/pinctrl-mt7620.c
@@ -1,5 +1,6 @@
// SPDX-License-Identifier: GPL-2.0-only
+#include <asm/mach-ralink/ralink_regs.h>
#include <asm/mach-ralink/mt7620.h>
#include <linux/module.h>
#include <linux/platform_device.h>
diff --git a/drivers/pinctrl/tegra/pinctrl-tegra.c b/drivers/pinctrl/tegra/pinctrl-tegra.c
index 8d734bfc33d2..50bd26a30ac0 100644
--- a/drivers/pinctrl/tegra/pinctrl-tegra.c
+++ b/drivers/pinctrl/tegra/pinctrl-tegra.c
@@ -275,7 +275,7 @@ static int tegra_pinctrl_set_mux(struct pinctrl_dev *pctldev,
return 0;
}
-static struct tegra_pingroup *tegra_pinctrl_get_group(struct pinctrl_dev *pctldev,
+static const struct tegra_pingroup *tegra_pinctrl_get_group(struct pinctrl_dev *pctldev,
unsigned int offset)
{
struct tegra_pmx *pmx = pinctrl_dev_get_drvdata(pctldev);
@@ -289,7 +289,7 @@ static struct tegra_pingroup *tegra_pinctrl_get_group(struct pinctrl_dev *pctlde
continue;
for (j = 0; j < num_pins; j++) {
if (offset == pins[j])
- return (struct tegra_pingroup *)&pmx->soc->groups[group];
+ return &pmx->soc->groups[group];
}
}
diff --git a/drivers/pinctrl/tegra/pinctrl-tegra194.c b/drivers/pinctrl/tegra/pinctrl-tegra194.c
index b4fef9185d88..5c1dfcb46749 100644
--- a/drivers/pinctrl/tegra/pinctrl-tegra194.c
+++ b/drivers/pinctrl/tegra/pinctrl-tegra194.c
@@ -1387,7 +1387,6 @@ static struct tegra_function tegra194_functions[] = {
.schmitt_bit = schmitt_b, \
.drvtype_bit = 13, \
.lpdr_bit = e_lpdr, \
- .drv_reg = -1, \
#define drive_touch_clk_pcc4 DRV_PINGROUP_ENTRY_Y(0x2004, 12, 5, 20, 5, -1, -1, -1, -1, 1)
#define drive_uart3_rx_pcc6 DRV_PINGROUP_ENTRY_Y(0x200c, 12, 5, 20, 5, -1, -1, -1, -1, 1)
diff --git a/drivers/platform/mellanox/mlxreg-lc.c b/drivers/platform/mellanox/mlxreg-lc.c
index 0b7f58feb701..c897a2f15840 100644
--- a/drivers/platform/mellanox/mlxreg-lc.c
+++ b/drivers/platform/mellanox/mlxreg-lc.c
@@ -413,7 +413,7 @@ mlxreg_lc_create_static_devices(struct mlxreg_lc *mlxreg_lc, struct mlxreg_hotpl
int size)
{
struct mlxreg_hotplug_device *dev = devs;
- int i;
+ int i, ret;
/* Create static I2C device feeding by auxiliary or main power. */
for (i = 0; i < size; i++, dev++) {
@@ -423,6 +423,7 @@ mlxreg_lc_create_static_devices(struct mlxreg_lc *mlxreg_lc, struct mlxreg_hotpl
dev->brdinfo->type, dev->nr, dev->brdinfo->addr);
dev->adapter = NULL;
+ ret = PTR_ERR(dev->client);
goto fail_create_static_devices;
}
}
@@ -435,7 +436,7 @@ fail_create_static_devices:
i2c_unregister_device(dev->client);
dev->client = NULL;
}
- return IS_ERR(dev->client);
+ return ret;
}
static void
diff --git a/drivers/platform/x86/Kconfig b/drivers/platform/x86/Kconfig
index d4c079f4afc6..7400bc5da5be 100644
--- a/drivers/platform/x86/Kconfig
+++ b/drivers/platform/x86/Kconfig
@@ -185,7 +185,7 @@ config ACER_WMI
config AMD_PMC
tristate "AMD SoC PMC driver"
- depends on ACPI && PCI
+ depends on ACPI && PCI && RTC_CLASS
help
The driver provides support for AMD Power Management Controller
primarily responsible for S2Idle transactions that are driven from
diff --git a/drivers/platform/x86/dell/Kconfig b/drivers/platform/x86/dell/Kconfig
index 2fffa57e596e..fe224a54f24c 100644
--- a/drivers/platform/x86/dell/Kconfig
+++ b/drivers/platform/x86/dell/Kconfig
@@ -187,7 +187,7 @@ config DELL_WMI_AIO
config DELL_WMI_DESCRIPTOR
tristate
- default m
+ default n
depends on ACPI_WMI
config DELL_WMI_LED
diff --git a/drivers/platform/x86/hp_accel.c b/drivers/platform/x86/hp_accel.c
index b183967ecfb7..435a91fe2568 100644
--- a/drivers/platform/x86/hp_accel.c
+++ b/drivers/platform/x86/hp_accel.c
@@ -331,9 +331,11 @@ static int lis3lv02d_probe(struct platform_device *device)
INIT_WORK(&hpled_led.work, delayed_set_status_worker);
ret = led_classdev_register(NULL, &hpled_led.led_classdev);
if (ret) {
+ i8042_remove_filter(hp_accel_i8042_filter);
lis3lv02d_joystick_disable(&lis3_dev);
lis3lv02d_poweroff(&lis3_dev);
flush_work(&hpled_led.work);
+ lis3lv02d_remove_fs(&lis3_dev);
return ret;
}
diff --git a/drivers/platform/x86/samsung-laptop.c b/drivers/platform/x86/samsung-laptop.c
index 7ee010aa740a..c1d9ed9b7b67 100644
--- a/drivers/platform/x86/samsung-laptop.c
+++ b/drivers/platform/x86/samsung-laptop.c
@@ -152,7 +152,7 @@ struct sabi_config {
static const struct sabi_config sabi_configs[] = {
{
- /* I don't know if it is really 2, but it it is
+ /* I don't know if it is really 2, but it is
* less than 3 anyway */
.sabi_version = 2,
diff --git a/drivers/platform/x86/think-lmi.c b/drivers/platform/x86/think-lmi.c
index 9472aae72df2..c4d9c45350f7 100644
--- a/drivers/platform/x86/think-lmi.c
+++ b/drivers/platform/x86/think-lmi.c
@@ -888,8 +888,10 @@ static int tlmi_analyze(void)
break;
if (!item)
break;
- if (!*item)
+ if (!*item) {
+ kfree(item);
continue;
+ }
/* It is not allowed to have '/' for file name. Convert it into '\'. */
strreplace(item, '/', '\\');
@@ -902,6 +904,7 @@ static int tlmi_analyze(void)
setting = kzalloc(sizeof(*setting), GFP_KERNEL);
if (!setting) {
ret = -ENOMEM;
+ kfree(item);
goto fail_clear_attr;
}
setting->index = i;
@@ -916,7 +919,6 @@ static int tlmi_analyze(void)
}
kobject_init(&setting->kobj, &tlmi_attr_setting_ktype);
tlmi_priv.setting[i] = setting;
- tlmi_priv.settings_count++;
kfree(item);
}
@@ -983,7 +985,12 @@ static void tlmi_remove(struct wmi_device *wdev)
static int tlmi_probe(struct wmi_device *wdev, const void *context)
{
- tlmi_analyze();
+ int ret;
+
+ ret = tlmi_analyze();
+ if (ret)
+ return ret;
+
return tlmi_sysfs_init();
}
diff --git a/drivers/platform/x86/think-lmi.h b/drivers/platform/x86/think-lmi.h
index f8e26823075f..2ce5086a5af2 100644
--- a/drivers/platform/x86/think-lmi.h
+++ b/drivers/platform/x86/think-lmi.h
@@ -55,7 +55,6 @@ struct tlmi_attr_setting {
struct think_lmi {
struct wmi_device *wmi_device;
- int settings_count;
bool can_set_bios_settings;
bool can_get_bios_selections;
bool can_set_bios_password;
diff --git a/drivers/platform/x86/thinkpad_acpi.c b/drivers/platform/x86/thinkpad_acpi.c
index 9c632df734bb..b3ac9c3f3b7c 100644
--- a/drivers/platform/x86/thinkpad_acpi.c
+++ b/drivers/platform/x86/thinkpad_acpi.c
@@ -1105,15 +1105,6 @@ static int tpacpi_rfk_update_swstate(const struct tpacpi_rfk *tp_rfk)
return status;
}
-/* Query FW and update rfkill sw state for all rfkill switches */
-static void tpacpi_rfk_update_swstate_all(void)
-{
- unsigned int i;
-
- for (i = 0; i < TPACPI_RFK_SW_MAX; i++)
- tpacpi_rfk_update_swstate(tpacpi_rfkill_switches[i]);
-}
-
/*
* Sync the HW-blocking state of all rfkill switches,
* do notice it causes the rfkill core to schedule uevents
@@ -3074,9 +3065,6 @@ static void tpacpi_send_radiosw_update(void)
if (wlsw == TPACPI_RFK_RADIO_OFF)
tpacpi_rfk_update_hwblock_state(true);
- /* Sync sw blocking state */
- tpacpi_rfk_update_swstate_all();
-
/* Sync hw blocking state last if it is hw-unblocked */
if (wlsw == TPACPI_RFK_RADIO_ON)
tpacpi_rfk_update_hwblock_state(false);
@@ -8766,6 +8754,7 @@ static const struct tpacpi_quirk fan_quirk_table[] __initconst = {
TPACPI_Q_LNV3('N', '2', 'E', TPACPI_FAN_2CTL), /* P1 / X1 Extreme (1st gen) */
TPACPI_Q_LNV3('N', '2', 'O', TPACPI_FAN_2CTL), /* P1 / X1 Extreme (2nd gen) */
TPACPI_Q_LNV3('N', '2', 'V', TPACPI_FAN_2CTL), /* P1 / X1 Extreme (3nd gen) */
+ TPACPI_Q_LNV3('N', '4', '0', TPACPI_FAN_2CTL), /* P1 / X1 Extreme (4nd gen) */
TPACPI_Q_LNV3('N', '3', '0', TPACPI_FAN_2CTL), /* P15 (1st gen) / P15v (1st gen) */
TPACPI_Q_LNV3('N', '3', '2', TPACPI_FAN_2CTL), /* X1 Carbon (9th gen) */
};
diff --git a/drivers/powercap/dtpm_cpu.c b/drivers/powercap/dtpm_cpu.c
index 44faa3a74db6..b740866b228d 100644
--- a/drivers/powercap/dtpm_cpu.c
+++ b/drivers/powercap/dtpm_cpu.c
@@ -166,16 +166,13 @@ static struct dtpm_ops dtpm_ops = {
static int cpuhp_dtpm_cpu_offline(unsigned int cpu)
{
- struct em_perf_domain *pd;
struct dtpm_cpu *dtpm_cpu;
- pd = em_cpu_get(cpu);
- if (!pd)
- return -EINVAL;
-
dtpm_cpu = per_cpu(dtpm_per_cpu, cpu);
+ if (dtpm_cpu)
+ dtpm_update_power(&dtpm_cpu->dtpm);
- return dtpm_update_power(&dtpm_cpu->dtpm);
+ return 0;
}
static int cpuhp_dtpm_cpu_online(unsigned int cpu)
diff --git a/drivers/ptp/ptp_clockmatrix.c b/drivers/ptp/ptp_clockmatrix.c
index 6bc5791a7ec5..08e429a06922 100644
--- a/drivers/ptp/ptp_clockmatrix.c
+++ b/drivers/ptp/ptp_clockmatrix.c
@@ -1699,12 +1699,9 @@ static int initialize_dco_operating_mode(struct idtcm_channel *channel)
/* PTP Hardware Clock interface */
-/**
+/*
* Maximum absolute value for write phase offset in picoseconds
*
- * @channel: channel
- * @delta_ns: delta in nanoseconds
- *
* Destination signed register is 32-bit register in resolution of 50ps
*
* 0x7fffffff * 50 = 2147483647 * 50 = 107374182350
diff --git a/drivers/ptp/ptp_ocp.c b/drivers/ptp/ptp_ocp.c
index 34f943c8c9fd..0f1b5a7d2a89 100644
--- a/drivers/ptp/ptp_ocp.c
+++ b/drivers/ptp/ptp_ocp.c
@@ -1304,10 +1304,11 @@ ptp_ocp_register_ext(struct ptp_ocp *bp, struct ocp_resource *r)
if (!ext)
return -ENOMEM;
- err = -EINVAL;
ext->mem = ptp_ocp_get_mem(bp, r);
- if (!ext->mem)
+ if (IS_ERR(ext->mem)) {
+ err = PTR_ERR(ext->mem);
goto out;
+ }
ext->bp = bp;
ext->info = r->extra;
@@ -1371,8 +1372,8 @@ ptp_ocp_register_mem(struct ptp_ocp *bp, struct ocp_resource *r)
void __iomem *mem;
mem = ptp_ocp_get_mem(bp, r);
- if (!mem)
- return -EINVAL;
+ if (IS_ERR(mem))
+ return PTR_ERR(mem);
bp_assign_entry(bp, r, mem);
diff --git a/drivers/pwm/pwm-tegra.c b/drivers/pwm/pwm-tegra.c
index 11a10b575ace..18cf974ac776 100644
--- a/drivers/pwm/pwm-tegra.c
+++ b/drivers/pwm/pwm-tegra.c
@@ -42,12 +42,16 @@
#include <linux/module.h>
#include <linux/of.h>
#include <linux/of_device.h>
+#include <linux/pm_opp.h>
#include <linux/pwm.h>
#include <linux/platform_device.h>
#include <linux/pinctrl/consumer.h>
+#include <linux/pm_runtime.h>
#include <linux/slab.h>
#include <linux/reset.h>
+#include <soc/tegra/common.h>
+
#define PWM_ENABLE (1 << 31)
#define PWM_DUTY_WIDTH 8
#define PWM_DUTY_SHIFT 16
@@ -145,7 +149,7 @@ static int tegra_pwm_config(struct pwm_chip *chip, struct pwm_device *pwm,
required_clk_rate =
(NSEC_PER_SEC / period_ns) << PWM_DUTY_WIDTH;
- err = clk_set_rate(pc->clk, required_clk_rate);
+ err = dev_pm_opp_set_rate(pc->dev, required_clk_rate);
if (err < 0)
return -EINVAL;
@@ -181,8 +185,8 @@ static int tegra_pwm_config(struct pwm_chip *chip, struct pwm_device *pwm,
* before writing the register. Otherwise, keep it enabled.
*/
if (!pwm_is_enabled(pwm)) {
- err = clk_prepare_enable(pc->clk);
- if (err < 0)
+ err = pm_runtime_resume_and_get(pc->dev);
+ if (err)
return err;
} else
val |= PWM_ENABLE;
@@ -193,7 +197,7 @@ static int tegra_pwm_config(struct pwm_chip *chip, struct pwm_device *pwm,
* If the PWM is not enabled, turn the clock off again to save power.
*/
if (!pwm_is_enabled(pwm))
- clk_disable_unprepare(pc->clk);
+ pm_runtime_put(pc->dev);
return 0;
}
@@ -204,8 +208,8 @@ static int tegra_pwm_enable(struct pwm_chip *chip, struct pwm_device *pwm)
int rc = 0;
u32 val;
- rc = clk_prepare_enable(pc->clk);
- if (rc < 0)
+ rc = pm_runtime_resume_and_get(pc->dev);
+ if (rc)
return rc;
val = pwm_readl(pc, pwm->hwpwm);
@@ -224,7 +228,7 @@ static void tegra_pwm_disable(struct pwm_chip *chip, struct pwm_device *pwm)
val &= ~PWM_ENABLE;
pwm_writel(pc, pwm->hwpwm, val);
- clk_disable_unprepare(pc->clk);
+ pm_runtime_put_sync(pc->dev);
}
static const struct pwm_ops tegra_pwm_ops = {
@@ -256,11 +260,20 @@ static int tegra_pwm_probe(struct platform_device *pdev)
if (IS_ERR(pwm->clk))
return PTR_ERR(pwm->clk);
+ ret = devm_tegra_core_dev_init_opp_table_common(&pdev->dev);
+ if (ret)
+ return ret;
+
+ pm_runtime_enable(&pdev->dev);
+ ret = pm_runtime_resume_and_get(&pdev->dev);
+ if (ret)
+ return ret;
+
/* Set maximum frequency of the IP */
- ret = clk_set_rate(pwm->clk, pwm->soc->max_frequency);
+ ret = dev_pm_opp_set_rate(pwm->dev, pwm->soc->max_frequency);
if (ret < 0) {
dev_err(&pdev->dev, "Failed to set max frequency: %d\n", ret);
- return ret;
+ goto put_pm;
}
/*
@@ -278,7 +291,7 @@ static int tegra_pwm_probe(struct platform_device *pdev)
if (IS_ERR(pwm->rst)) {
ret = PTR_ERR(pwm->rst);
dev_err(&pdev->dev, "Reset control is not found: %d\n", ret);
- return ret;
+ goto put_pm;
}
reset_control_deassert(pwm->rst);
@@ -291,10 +304,16 @@ static int tegra_pwm_probe(struct platform_device *pdev)
if (ret < 0) {
dev_err(&pdev->dev, "pwmchip_add() failed: %d\n", ret);
reset_control_assert(pwm->rst);
- return ret;
+ goto put_pm;
}
+ pm_runtime_put(&pdev->dev);
+
return 0;
+put_pm:
+ pm_runtime_put_sync_suspend(&pdev->dev);
+ pm_runtime_force_suspend(&pdev->dev);
+ return ret;
}
static int tegra_pwm_remove(struct platform_device *pdev)
@@ -305,20 +324,44 @@ static int tegra_pwm_remove(struct platform_device *pdev)
reset_control_assert(pc->rst);
+ pm_runtime_force_suspend(&pdev->dev);
+
return 0;
}
-#ifdef CONFIG_PM_SLEEP
-static int tegra_pwm_suspend(struct device *dev)
+static int __maybe_unused tegra_pwm_runtime_suspend(struct device *dev)
{
- return pinctrl_pm_select_sleep_state(dev);
+ struct tegra_pwm_chip *pc = dev_get_drvdata(dev);
+ int err;
+
+ clk_disable_unprepare(pc->clk);
+
+ err = pinctrl_pm_select_sleep_state(dev);
+ if (err) {
+ clk_prepare_enable(pc->clk);
+ return err;
+ }
+
+ return 0;
}
-static int tegra_pwm_resume(struct device *dev)
+static int __maybe_unused tegra_pwm_runtime_resume(struct device *dev)
{
- return pinctrl_pm_select_default_state(dev);
+ struct tegra_pwm_chip *pc = dev_get_drvdata(dev);
+ int err;
+
+ err = pinctrl_pm_select_default_state(dev);
+ if (err)
+ return err;
+
+ err = clk_prepare_enable(pc->clk);
+ if (err) {
+ pinctrl_pm_select_sleep_state(dev);
+ return err;
+ }
+
+ return 0;
}
-#endif
static const struct tegra_pwm_soc tegra20_pwm_soc = {
.num_channels = 4,
@@ -344,7 +387,10 @@ static const struct of_device_id tegra_pwm_of_match[] = {
MODULE_DEVICE_TABLE(of, tegra_pwm_of_match);
static const struct dev_pm_ops tegra_pwm_pm_ops = {
- SET_SYSTEM_SLEEP_PM_OPS(tegra_pwm_suspend, tegra_pwm_resume)
+ SET_RUNTIME_PM_OPS(tegra_pwm_runtime_suspend, tegra_pwm_runtime_resume,
+ NULL)
+ SET_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend,
+ pm_runtime_force_resume)
};
static struct platform_driver tegra_pwm_driver = {
diff --git a/drivers/s390/block/dasd_devmap.c b/drivers/s390/block/dasd_devmap.c
index 2c40fe15da55..6043c832d09e 100644
--- a/drivers/s390/block/dasd_devmap.c
+++ b/drivers/s390/block/dasd_devmap.c
@@ -731,7 +731,7 @@ static ssize_t dasd_ff_show(struct device *dev, struct device_attribute *attr,
ff_flag = (devmap->features & DASD_FEATURE_FAILFAST) != 0;
else
ff_flag = (DASD_FEATURE_DEFAULT & DASD_FEATURE_FAILFAST) != 0;
- return snprintf(buf, PAGE_SIZE, ff_flag ? "1\n" : "0\n");
+ return sysfs_emit(buf, ff_flag ? "1\n" : "0\n");
}
static ssize_t dasd_ff_store(struct device *dev, struct device_attribute *attr,
@@ -773,7 +773,7 @@ dasd_ro_show(struct device *dev, struct device_attribute *attr, char *buf)
spin_unlock(&dasd_devmap_lock);
out:
- return snprintf(buf, PAGE_SIZE, ro_flag ? "1\n" : "0\n");
+ return sysfs_emit(buf, ro_flag ? "1\n" : "0\n");
}
static ssize_t
@@ -834,7 +834,7 @@ dasd_erplog_show(struct device *dev, struct device_attribute *attr, char *buf)
erplog = (devmap->features & DASD_FEATURE_ERPLOG) != 0;
else
erplog = (DASD_FEATURE_DEFAULT & DASD_FEATURE_ERPLOG) != 0;
- return snprintf(buf, PAGE_SIZE, erplog ? "1\n" : "0\n");
+ return sysfs_emit(buf, erplog ? "1\n" : "0\n");
}
static ssize_t
@@ -1033,13 +1033,13 @@ dasd_discipline_show(struct device *dev, struct device_attribute *attr,
dasd_put_device(device);
goto out;
} else {
- len = snprintf(buf, PAGE_SIZE, "%s\n",
- device->discipline->name);
+ len = sysfs_emit(buf, "%s\n",
+ device->discipline->name);
dasd_put_device(device);
return len;
}
out:
- len = snprintf(buf, PAGE_SIZE, "none\n");
+ len = sysfs_emit(buf, "none\n");
return len;
}
@@ -1056,30 +1056,30 @@ dasd_device_status_show(struct device *dev, struct device_attribute *attr,
if (!IS_ERR(device)) {
switch (device->state) {
case DASD_STATE_NEW:
- len = snprintf(buf, PAGE_SIZE, "new\n");
+ len = sysfs_emit(buf, "new\n");
break;
case DASD_STATE_KNOWN:
- len = snprintf(buf, PAGE_SIZE, "detected\n");
+ len = sysfs_emit(buf, "detected\n");
break;
case DASD_STATE_BASIC:
- len = snprintf(buf, PAGE_SIZE, "basic\n");
+ len = sysfs_emit(buf, "basic\n");
break;
case DASD_STATE_UNFMT:
- len = snprintf(buf, PAGE_SIZE, "unformatted\n");
+ len = sysfs_emit(buf, "unformatted\n");
break;
case DASD_STATE_READY:
- len = snprintf(buf, PAGE_SIZE, "ready\n");
+ len = sysfs_emit(buf, "ready\n");
break;
case DASD_STATE_ONLINE:
- len = snprintf(buf, PAGE_SIZE, "online\n");
+ len = sysfs_emit(buf, "online\n");
break;
default:
- len = snprintf(buf, PAGE_SIZE, "no stat\n");
+ len = sysfs_emit(buf, "no stat\n");
break;
}
dasd_put_device(device);
} else
- len = snprintf(buf, PAGE_SIZE, "unknown\n");
+ len = sysfs_emit(buf, "unknown\n");
return len;
}
@@ -1120,7 +1120,7 @@ static ssize_t dasd_vendor_show(struct device *dev,
device = dasd_device_from_cdev(to_ccwdev(dev));
vendor = "";
if (IS_ERR(device))
- return snprintf(buf, PAGE_SIZE, "%s\n", vendor);
+ return sysfs_emit(buf, "%s\n", vendor);
if (device->discipline && device->discipline->get_uid &&
!device->discipline->get_uid(device, &uid))
@@ -1128,7 +1128,7 @@ static ssize_t dasd_vendor_show(struct device *dev,
dasd_put_device(device);
- return snprintf(buf, PAGE_SIZE, "%s\n", vendor);
+ return sysfs_emit(buf, "%s\n", vendor);
}
static DEVICE_ATTR(vendor, 0444, dasd_vendor_show, NULL);
@@ -1148,7 +1148,7 @@ dasd_uid_show(struct device *dev, struct device_attribute *attr, char *buf)
device = dasd_device_from_cdev(to_ccwdev(dev));
uid_string[0] = 0;
if (IS_ERR(device))
- return snprintf(buf, PAGE_SIZE, "%s\n", uid_string);
+ return sysfs_emit(buf, "%s\n", uid_string);
if (device->discipline && device->discipline->get_uid &&
!device->discipline->get_uid(device, &uid)) {
@@ -1183,7 +1183,7 @@ dasd_uid_show(struct device *dev, struct device_attribute *attr, char *buf)
}
dasd_put_device(device);
- return snprintf(buf, PAGE_SIZE, "%s\n", uid_string);
+ return sysfs_emit(buf, "%s\n", uid_string);
}
static DEVICE_ATTR(uid, 0444, dasd_uid_show, NULL);
@@ -1201,7 +1201,7 @@ dasd_eer_show(struct device *dev, struct device_attribute *attr, char *buf)
eer_flag = dasd_eer_enabled(devmap->device);
else
eer_flag = 0;
- return snprintf(buf, PAGE_SIZE, eer_flag ? "1\n" : "0\n");
+ return sysfs_emit(buf, eer_flag ? "1\n" : "0\n");
}
static ssize_t
@@ -1243,7 +1243,7 @@ dasd_expires_show(struct device *dev, struct device_attribute *attr, char *buf)
device = dasd_device_from_cdev(to_ccwdev(dev));
if (IS_ERR(device))
return -ENODEV;
- len = snprintf(buf, PAGE_SIZE, "%lu\n", device->default_expires);
+ len = sysfs_emit(buf, "%lu\n", device->default_expires);
dasd_put_device(device);
return len;
}
@@ -1283,7 +1283,7 @@ dasd_retries_show(struct device *dev, struct device_attribute *attr, char *buf)
device = dasd_device_from_cdev(to_ccwdev(dev));
if (IS_ERR(device))
return -ENODEV;
- len = snprintf(buf, PAGE_SIZE, "%lu\n", device->default_retries);
+ len = sysfs_emit(buf, "%lu\n", device->default_retries);
dasd_put_device(device);
return len;
}
@@ -1324,7 +1324,7 @@ dasd_timeout_show(struct device *dev, struct device_attribute *attr,
device = dasd_device_from_cdev(to_ccwdev(dev));
if (IS_ERR(device))
return -ENODEV;
- len = snprintf(buf, PAGE_SIZE, "%lu\n", device->blk_timeout);
+ len = sysfs_emit(buf, "%lu\n", device->blk_timeout);
dasd_put_device(device);
return len;
}
@@ -1398,11 +1398,11 @@ static ssize_t dasd_hpf_show(struct device *dev, struct device_attribute *attr,
return -ENODEV;
if (!device->discipline || !device->discipline->hpf_enabled) {
dasd_put_device(device);
- return snprintf(buf, PAGE_SIZE, "%d\n", dasd_nofcx);
+ return sysfs_emit(buf, "%d\n", dasd_nofcx);
}
hpf = device->discipline->hpf_enabled(device);
dasd_put_device(device);
- return snprintf(buf, PAGE_SIZE, "%d\n", hpf);
+ return sysfs_emit(buf, "%d\n", hpf);
}
static DEVICE_ATTR(hpf, 0444, dasd_hpf_show, NULL);
@@ -1416,13 +1416,13 @@ static ssize_t dasd_reservation_policy_show(struct device *dev,
devmap = dasd_find_busid(dev_name(dev));
if (IS_ERR(devmap)) {
- rc = snprintf(buf, PAGE_SIZE, "ignore\n");
+ rc = sysfs_emit(buf, "ignore\n");
} else {
spin_lock(&dasd_devmap_lock);
if (devmap->features & DASD_FEATURE_FAILONSLCK)
- rc = snprintf(buf, PAGE_SIZE, "fail\n");
+ rc = sysfs_emit(buf, "fail\n");
else
- rc = snprintf(buf, PAGE_SIZE, "ignore\n");
+ rc = sysfs_emit(buf, "ignore\n");
spin_unlock(&dasd_devmap_lock);
}
return rc;
@@ -1457,14 +1457,14 @@ static ssize_t dasd_reservation_state_show(struct device *dev,
device = dasd_device_from_cdev(to_ccwdev(dev));
if (IS_ERR(device))
- return snprintf(buf, PAGE_SIZE, "none\n");
+ return sysfs_emit(buf, "none\n");
if (test_bit(DASD_FLAG_IS_RESERVED, &device->flags))
- rc = snprintf(buf, PAGE_SIZE, "reserved\n");
+ rc = sysfs_emit(buf, "reserved\n");
else if (test_bit(DASD_FLAG_LOCK_STOLEN, &device->flags))
- rc = snprintf(buf, PAGE_SIZE, "lost\n");
+ rc = sysfs_emit(buf, "lost\n");
else
- rc = snprintf(buf, PAGE_SIZE, "none\n");
+ rc = sysfs_emit(buf, "none\n");
dasd_put_device(device);
return rc;
}
@@ -1531,7 +1531,7 @@ dasd_path_threshold_show(struct device *dev,
device = dasd_device_from_cdev(to_ccwdev(dev));
if (IS_ERR(device))
return -ENODEV;
- len = snprintf(buf, PAGE_SIZE, "%lu\n", device->path_thrhld);
+ len = sysfs_emit(buf, "%lu\n", device->path_thrhld);
dasd_put_device(device);
return len;
}
@@ -1578,7 +1578,7 @@ dasd_path_autodisable_show(struct device *dev,
else
flag = (DASD_FEATURE_DEFAULT &
DASD_FEATURE_PATH_AUTODISABLE) != 0;
- return snprintf(buf, PAGE_SIZE, flag ? "1\n" : "0\n");
+ return sysfs_emit(buf, flag ? "1\n" : "0\n");
}
static ssize_t
@@ -1616,7 +1616,7 @@ dasd_path_interval_show(struct device *dev,
device = dasd_device_from_cdev(to_ccwdev(dev));
if (IS_ERR(device))
return -ENODEV;
- len = snprintf(buf, PAGE_SIZE, "%lu\n", device->path_interval);
+ len = sysfs_emit(buf, "%lu\n", device->path_interval);
dasd_put_device(device);
return len;
}
@@ -1662,9 +1662,9 @@ dasd_device_fcs_show(struct device *dev, struct device_attribute *attr,
return -ENODEV;
fc_sec = dasd_path_get_fcs_device(device);
if (fc_sec == -EINVAL)
- rc = snprintf(buf, PAGE_SIZE, "Inconsistent\n");
+ rc = sysfs_emit(buf, "Inconsistent\n");
else
- rc = snprintf(buf, PAGE_SIZE, "%s\n", dasd_path_get_fcs_str(fc_sec));
+ rc = sysfs_emit(buf, "%s\n", dasd_path_get_fcs_str(fc_sec));
dasd_put_device(device);
return rc;
@@ -1677,7 +1677,7 @@ dasd_path_fcs_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf)
struct dasd_path *path = to_dasd_path(kobj);
unsigned int fc_sec = path->fc_security;
- return snprintf(buf, PAGE_SIZE, "%s\n", dasd_path_get_fcs_str(fc_sec));
+ return sysfs_emit(buf, "%s\n", dasd_path_get_fcs_str(fc_sec));
}
static struct kobj_attribute path_fcs_attribute =
@@ -1698,7 +1698,7 @@ static ssize_t dasd_##_name##_show(struct device *dev, \
val = _func(device); \
dasd_put_device(device); \
\
- return snprintf(buf, PAGE_SIZE, "%d\n", val); \
+ return sysfs_emit(buf, "%d\n", val); \
} \
static DEVICE_ATTR(_name, 0444, dasd_##_name##_show, NULL); \
diff --git a/drivers/s390/char/raw3270.c b/drivers/s390/char/raw3270.c
index 646ec796bb83..dfde0d941c3c 100644
--- a/drivers/s390/char/raw3270.c
+++ b/drivers/s390/char/raw3270.c
@@ -1047,24 +1047,24 @@ raw3270_probe (struct ccw_device *cdev)
static ssize_t
raw3270_model_show(struct device *dev, struct device_attribute *attr, char *buf)
{
- return snprintf(buf, PAGE_SIZE, "%i\n",
- ((struct raw3270 *) dev_get_drvdata(dev))->model);
+ return sysfs_emit(buf, "%i\n",
+ ((struct raw3270 *)dev_get_drvdata(dev))->model);
}
static DEVICE_ATTR(model, 0444, raw3270_model_show, NULL);
static ssize_t
raw3270_rows_show(struct device *dev, struct device_attribute *attr, char *buf)
{
- return snprintf(buf, PAGE_SIZE, "%i\n",
- ((struct raw3270 *) dev_get_drvdata(dev))->rows);
+ return sysfs_emit(buf, "%i\n",
+ ((struct raw3270 *)dev_get_drvdata(dev))->rows);
}
static DEVICE_ATTR(rows, 0444, raw3270_rows_show, NULL);
static ssize_t
raw3270_columns_show(struct device *dev, struct device_attribute *attr, char *buf)
{
- return snprintf(buf, PAGE_SIZE, "%i\n",
- ((struct raw3270 *) dev_get_drvdata(dev))->cols);
+ return sysfs_emit(buf, "%i\n",
+ ((struct raw3270 *)dev_get_drvdata(dev))->cols);
}
static DEVICE_ATTR(columns, 0444, raw3270_columns_show, NULL);
diff --git a/drivers/s390/cio/chp.c b/drivers/s390/cio/chp.c
index 1097e76982a5..5440f285f349 100644
--- a/drivers/s390/cio/chp.c
+++ b/drivers/s390/cio/chp.c
@@ -285,7 +285,7 @@ static ssize_t chp_configure_show(struct device *dev,
if (status < 0)
return status;
- return snprintf(buf, PAGE_SIZE, "%d\n", status);
+ return sysfs_emit(buf, "%d\n", status);
}
static int cfg_wait_idle(void);
diff --git a/drivers/scsi/qla2xxx/qla_mbx.c b/drivers/scsi/qla2xxx/qla_mbx.c
index 73a353153d33..10d2655ef676 100644
--- a/drivers/scsi/qla2xxx/qla_mbx.c
+++ b/drivers/scsi/qla2xxx/qla_mbx.c
@@ -1695,10 +1695,8 @@ qla2x00_get_adapter_id(scsi_qla_host_t *vha, uint16_t *id, uint8_t *al_pa,
mcp->in_mb |= MBX_13|MBX_12|MBX_11|MBX_10;
if (IS_FWI2_CAPABLE(vha->hw))
mcp->in_mb |= MBX_19|MBX_18|MBX_17|MBX_16;
- if (IS_QLA27XX(vha->hw) || IS_QLA28XX(vha->hw)) {
- mcp->in_mb |= MBX_15;
- mcp->out_mb |= MBX_7|MBX_21|MBX_22|MBX_23;
- }
+ if (IS_QLA27XX(vha->hw) || IS_QLA28XX(vha->hw))
+ mcp->in_mb |= MBX_15|MBX_21|MBX_22|MBX_23;
mcp->tov = MBX_TOV_SECONDS;
mcp->flags = 0;
diff --git a/drivers/scsi/scsi_sysfs.c b/drivers/scsi/scsi_sysfs.c
index 55addd78fde4..7afcec250f9b 100644
--- a/drivers/scsi/scsi_sysfs.c
+++ b/drivers/scsi/scsi_sysfs.c
@@ -792,6 +792,7 @@ store_state_field(struct device *dev, struct device_attribute *attr,
int i, ret;
struct scsi_device *sdev = to_scsi_device(dev);
enum scsi_device_state state = 0;
+ bool rescan_dev = false;
for (i = 0; i < ARRAY_SIZE(sdev_states); i++) {
const int len = strlen(sdev_states[i].name);
@@ -810,20 +811,27 @@ store_state_field(struct device *dev, struct device_attribute *attr,
}
mutex_lock(&sdev->state_mutex);
- ret = scsi_device_set_state(sdev, state);
- /*
- * If the device state changes to SDEV_RUNNING, we need to
- * run the queue to avoid I/O hang, and rescan the device
- * to revalidate it. Running the queue first is necessary
- * because another thread may be waiting inside
- * blk_mq_freeze_queue_wait() and because that call may be
- * waiting for pending I/O to finish.
- */
- if (ret == 0 && state == SDEV_RUNNING) {
+ if (sdev->sdev_state == SDEV_RUNNING && state == SDEV_RUNNING) {
+ ret = count;
+ } else {
+ ret = scsi_device_set_state(sdev, state);
+ if (ret == 0 && state == SDEV_RUNNING)
+ rescan_dev = true;
+ }
+ mutex_unlock(&sdev->state_mutex);
+
+ if (rescan_dev) {
+ /*
+ * If the device state changes to SDEV_RUNNING, we need to
+ * run the queue to avoid I/O hang, and rescan the device
+ * to revalidate it. Running the queue first is necessary
+ * because another thread may be waiting inside
+ * blk_mq_freeze_queue_wait() and because that call may be
+ * waiting for pending I/O to finish.
+ */
blk_mq_run_hw_queues(sdev->request_queue, true);
scsi_rescan_device(dev);
}
- mutex_unlock(&sdev->state_mutex);
return ret == 0 ? count : -EINVAL;
}
diff --git a/drivers/scsi/scsi_transport_iscsi.c b/drivers/scsi/scsi_transport_iscsi.c
index 78343d3f9385..554b6f784223 100644
--- a/drivers/scsi/scsi_transport_iscsi.c
+++ b/drivers/scsi/scsi_transport_iscsi.c
@@ -1899,12 +1899,12 @@ static void session_recovery_timedout(struct work_struct *work)
}
spin_unlock_irqrestore(&session->lock, flags);
- if (session->transport->session_recovery_timedout)
- session->transport->session_recovery_timedout(session);
-
ISCSI_DBG_TRANS_SESSION(session, "Unblocking SCSI target\n");
scsi_target_unblock(&session->dev, SDEV_TRANSPORT_OFFLINE);
ISCSI_DBG_TRANS_SESSION(session, "Completed unblocking SCSI target\n");
+
+ if (session->transport->session_recovery_timedout)
+ session->transport->session_recovery_timedout(session);
}
static void __iscsi_unblock_session(struct work_struct *work)
diff --git a/drivers/scsi/ufs/ufshcd.c b/drivers/scsi/ufs/ufshcd.c
index afd38142b1c0..13c09dbd99b9 100644
--- a/drivers/scsi/ufs/ufshcd.c
+++ b/drivers/scsi/ufs/ufshcd.c
@@ -6453,9 +6453,8 @@ static irqreturn_t ufshcd_tmc_handler(struct ufs_hba *hba)
irqreturn_t ret = IRQ_NONE;
int tag;
- pending = ufshcd_readl(hba, REG_UTP_TASK_REQ_DOOR_BELL);
-
spin_lock_irqsave(hba->host->host_lock, flags);
+ pending = ufshcd_readl(hba, REG_UTP_TASK_REQ_DOOR_BELL);
issued = hba->outstanding_tasks & ~pending;
for_each_set_bit(tag, &issued, hba->nutmrs) {
struct request *req = hba->tmf_rqs[tag];
@@ -6616,11 +6615,6 @@ static int __ufshcd_issue_tm_cmd(struct ufs_hba *hba,
err = wait_for_completion_io_timeout(&wait,
msecs_to_jiffies(TM_CMD_TIMEOUT));
if (!err) {
- /*
- * Make sure that ufshcd_compl_tm() does not trigger a
- * use-after-free.
- */
- req->end_io_data = NULL;
ufshcd_add_tm_upiu_trace(hba, task_tag, UFS_TM_ERR);
dev_err(hba->dev, "%s: task management cmd 0x%.2x timed-out\n",
__func__, tm_function);
@@ -7116,6 +7110,7 @@ static int ufshcd_abort(struct scsi_cmnd *cmd)
goto release;
}
+ lrbp->cmd = NULL;
err = SUCCESS;
release:
diff --git a/drivers/soc/Kconfig b/drivers/soc/Kconfig
index e8a30c4c5aec..a8562678c437 100644
--- a/drivers/soc/Kconfig
+++ b/drivers/soc/Kconfig
@@ -3,6 +3,7 @@ menu "SOC (System On Chip) specific Drivers"
source "drivers/soc/actions/Kconfig"
source "drivers/soc/amlogic/Kconfig"
+source "drivers/soc/apple/Kconfig"
source "drivers/soc/aspeed/Kconfig"
source "drivers/soc/atmel/Kconfig"
source "drivers/soc/bcm/Kconfig"
diff --git a/drivers/soc/Makefile b/drivers/soc/Makefile
index a05e9fbcd3e0..adb30c2d4fea 100644
--- a/drivers/soc/Makefile
+++ b/drivers/soc/Makefile
@@ -4,6 +4,7 @@
#
obj-$(CONFIG_ARCH_ACTIONS) += actions/
+obj-$(CONFIG_ARCH_APPLE) += apple/
obj-y += aspeed/
obj-$(CONFIG_ARCH_AT91) += atmel/
obj-y += bcm/
diff --git a/drivers/soc/apple/Kconfig b/drivers/soc/apple/Kconfig
new file mode 100644
index 000000000000..9b8de31d6a8f
--- /dev/null
+++ b/drivers/soc/apple/Kconfig
@@ -0,0 +1,22 @@
+# SPDX-License-Identifier: GPL-2.0-only
+
+if ARCH_APPLE || COMPILE_TEST
+
+menu "Apple SoC drivers"
+
+config APPLE_PMGR_PWRSTATE
+ bool "Apple SoC PMGR power state control"
+ depends on PM
+ select REGMAP
+ select MFD_SYSCON
+ select PM_GENERIC_DOMAINS
+ select RESET_CONTROLLER
+ default ARCH_APPLE
+ help
+ The PMGR block in Apple SoCs provides high-level power state
+ controls for SoC devices. This driver manages them through the
+ generic power domain framework, and also provides reset support.
+
+endmenu
+
+endif
diff --git a/drivers/soc/apple/Makefile b/drivers/soc/apple/Makefile
new file mode 100644
index 000000000000..c114e84667e4
--- /dev/null
+++ b/drivers/soc/apple/Makefile
@@ -0,0 +1,2 @@
+# SPDX-License-Identifier: GPL-2.0-only
+obj-$(CONFIG_APPLE_PMGR_PWRSTATE) += apple-pmgr-pwrstate.o
diff --git a/drivers/soc/apple/apple-pmgr-pwrstate.c b/drivers/soc/apple/apple-pmgr-pwrstate.c
new file mode 100644
index 000000000000..e1122288409a
--- /dev/null
+++ b/drivers/soc/apple/apple-pmgr-pwrstate.c
@@ -0,0 +1,324 @@
+// SPDX-License-Identifier: GPL-2.0-only OR MIT
+/*
+ * Apple SoC PMGR device power state driver
+ *
+ * Copyright The Asahi Linux Contributors
+ */
+
+#include <linux/bitops.h>
+#include <linux/bitfield.h>
+#include <linux/err.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/platform_device.h>
+#include <linux/pm_domain.h>
+#include <linux/regmap.h>
+#include <linux/mfd/syscon.h>
+#include <linux/reset-controller.h>
+#include <linux/module.h>
+
+#define APPLE_PMGR_RESET BIT(31)
+#define APPLE_PMGR_AUTO_ENABLE BIT(28)
+#define APPLE_PMGR_PS_AUTO GENMASK(27, 24)
+#define APPLE_PMGR_PS_MIN GENMASK(19, 16)
+#define APPLE_PMGR_PARENT_OFF BIT(11)
+#define APPLE_PMGR_DEV_DISABLE BIT(10)
+#define APPLE_PMGR_WAS_CLKGATED BIT(9)
+#define APPLE_PMGR_WAS_PWRGATED BIT(8)
+#define APPLE_PMGR_PS_ACTUAL GENMASK(7, 4)
+#define APPLE_PMGR_PS_TARGET GENMASK(3, 0)
+
+#define APPLE_PMGR_FLAGS (APPLE_PMGR_WAS_CLKGATED | APPLE_PMGR_WAS_PWRGATED)
+
+#define APPLE_PMGR_PS_ACTIVE 0xf
+#define APPLE_PMGR_PS_CLKGATE 0x4
+#define APPLE_PMGR_PS_PWRGATE 0x0
+
+#define APPLE_PMGR_PS_SET_TIMEOUT 100
+#define APPLE_PMGR_RESET_TIME 1
+
+struct apple_pmgr_ps {
+ struct device *dev;
+ struct generic_pm_domain genpd;
+ struct reset_controller_dev rcdev;
+ struct regmap *regmap;
+ u32 offset;
+ u32 min_state;
+};
+
+#define genpd_to_apple_pmgr_ps(_genpd) container_of(_genpd, struct apple_pmgr_ps, genpd)
+#define rcdev_to_apple_pmgr_ps(_rcdev) container_of(_rcdev, struct apple_pmgr_ps, rcdev)
+
+static int apple_pmgr_ps_set(struct generic_pm_domain *genpd, u32 pstate, bool auto_enable)
+{
+ int ret;
+ struct apple_pmgr_ps *ps = genpd_to_apple_pmgr_ps(genpd);
+ u32 reg;
+
+ ret = regmap_read(ps->regmap, ps->offset, &reg);
+ if (ret < 0)
+ return ret;
+
+ /* Resets are synchronous, and only work if the device is powered and clocked. */
+ if (reg & APPLE_PMGR_RESET && pstate != APPLE_PMGR_PS_ACTIVE)
+ dev_err(ps->dev, "PS %s: powering off with RESET active\n",
+ genpd->name);
+
+ reg &= ~(APPLE_PMGR_AUTO_ENABLE | APPLE_PMGR_FLAGS | APPLE_PMGR_PS_TARGET);
+ reg |= FIELD_PREP(APPLE_PMGR_PS_TARGET, pstate);
+
+ dev_dbg(ps->dev, "PS %s: pwrstate = 0x%x: 0x%x\n", genpd->name, pstate, reg);
+
+ regmap_write(ps->regmap, ps->offset, reg);
+
+ ret = regmap_read_poll_timeout_atomic(
+ ps->regmap, ps->offset, reg,
+ (FIELD_GET(APPLE_PMGR_PS_ACTUAL, reg) == pstate), 1,
+ APPLE_PMGR_PS_SET_TIMEOUT);
+ if (ret < 0)
+ dev_err(ps->dev, "PS %s: Failed to reach power state 0x%x (now: 0x%x)\n",
+ genpd->name, pstate, reg);
+
+ if (auto_enable) {
+ /* Not all devices implement this; this is a no-op where not implemented. */
+ reg &= ~APPLE_PMGR_FLAGS;
+ reg |= APPLE_PMGR_AUTO_ENABLE;
+ regmap_write(ps->regmap, ps->offset, reg);
+ }
+
+ return ret;
+}
+
+static bool apple_pmgr_ps_is_active(struct apple_pmgr_ps *ps)
+{
+ u32 reg = 0;
+
+ regmap_read(ps->regmap, ps->offset, &reg);
+ /*
+ * We consider domains as active if they are actually on, or if they have auto-PM
+ * enabled and the intended target is on.
+ */
+ return (FIELD_GET(APPLE_PMGR_PS_ACTUAL, reg) == APPLE_PMGR_PS_ACTIVE ||
+ (FIELD_GET(APPLE_PMGR_PS_TARGET, reg) == APPLE_PMGR_PS_ACTIVE &&
+ reg & APPLE_PMGR_AUTO_ENABLE));
+}
+
+static int apple_pmgr_ps_power_on(struct generic_pm_domain *genpd)
+{
+ return apple_pmgr_ps_set(genpd, APPLE_PMGR_PS_ACTIVE, true);
+}
+
+static int apple_pmgr_ps_power_off(struct generic_pm_domain *genpd)
+{
+ return apple_pmgr_ps_set(genpd, APPLE_PMGR_PS_PWRGATE, false);
+}
+
+static int apple_pmgr_reset_assert(struct reset_controller_dev *rcdev, unsigned long id)
+{
+ struct apple_pmgr_ps *ps = rcdev_to_apple_pmgr_ps(rcdev);
+
+ mutex_lock(&ps->genpd.mlock);
+
+ if (ps->genpd.status == GENPD_STATE_OFF)
+ dev_err(ps->dev, "PS 0x%x: asserting RESET while powered down\n", ps->offset);
+
+ dev_dbg(ps->dev, "PS 0x%x: assert reset\n", ps->offset);
+ /* Quiesce device before asserting reset */
+ regmap_update_bits(ps->regmap, ps->offset, APPLE_PMGR_FLAGS | APPLE_PMGR_DEV_DISABLE,
+ APPLE_PMGR_DEV_DISABLE);
+ regmap_update_bits(ps->regmap, ps->offset, APPLE_PMGR_FLAGS | APPLE_PMGR_RESET,
+ APPLE_PMGR_RESET);
+
+ mutex_unlock(&ps->genpd.mlock);
+
+ return 0;
+}
+
+static int apple_pmgr_reset_deassert(struct reset_controller_dev *rcdev, unsigned long id)
+{
+ struct apple_pmgr_ps *ps = rcdev_to_apple_pmgr_ps(rcdev);
+
+ mutex_lock(&ps->genpd.mlock);
+
+ dev_dbg(ps->dev, "PS 0x%x: deassert reset\n", ps->offset);
+ regmap_update_bits(ps->regmap, ps->offset, APPLE_PMGR_FLAGS | APPLE_PMGR_RESET, 0);
+ regmap_update_bits(ps->regmap, ps->offset, APPLE_PMGR_FLAGS | APPLE_PMGR_DEV_DISABLE, 0);
+
+ if (ps->genpd.status == GENPD_STATE_OFF)
+ dev_err(ps->dev, "PS 0x%x: RESET was deasserted while powered down\n", ps->offset);
+
+ mutex_unlock(&ps->genpd.mlock);
+
+ return 0;
+}
+
+static int apple_pmgr_reset_reset(struct reset_controller_dev *rcdev, unsigned long id)
+{
+ int ret;
+
+ ret = apple_pmgr_reset_assert(rcdev, id);
+ if (ret)
+ return ret;
+
+ usleep_range(APPLE_PMGR_RESET_TIME, 2 * APPLE_PMGR_RESET_TIME);
+
+ return apple_pmgr_reset_deassert(rcdev, id);
+}
+
+static int apple_pmgr_reset_status(struct reset_controller_dev *rcdev, unsigned long id)
+{
+ struct apple_pmgr_ps *ps = rcdev_to_apple_pmgr_ps(rcdev);
+ u32 reg = 0;
+
+ regmap_read(ps->regmap, ps->offset, &reg);
+
+ return !!(reg & APPLE_PMGR_RESET);
+}
+
+const struct reset_control_ops apple_pmgr_reset_ops = {
+ .assert = apple_pmgr_reset_assert,
+ .deassert = apple_pmgr_reset_deassert,
+ .reset = apple_pmgr_reset_reset,
+ .status = apple_pmgr_reset_status,
+};
+
+static int apple_pmgr_reset_xlate(struct reset_controller_dev *rcdev,
+ const struct of_phandle_args *reset_spec)
+{
+ return 0;
+}
+
+static int apple_pmgr_ps_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct device_node *node = dev->of_node;
+ struct apple_pmgr_ps *ps;
+ struct regmap *regmap;
+ struct of_phandle_iterator it;
+ int ret;
+ const char *name;
+ bool active;
+
+ regmap = syscon_node_to_regmap(node->parent);
+ if (IS_ERR(regmap))
+ return PTR_ERR(regmap);
+
+ ps = devm_kzalloc(dev, sizeof(*ps), GFP_KERNEL);
+ if (!ps)
+ return -ENOMEM;
+
+ ps->dev = dev;
+ ps->regmap = regmap;
+
+ ret = of_property_read_string(node, "label", &name);
+ if (ret < 0) {
+ dev_err(dev, "missing label property\n");
+ return ret;
+ }
+
+ ret = of_property_read_u32(node, "reg", &ps->offset);
+ if (ret < 0) {
+ dev_err(dev, "missing reg property\n");
+ return ret;
+ }
+
+ ps->genpd.name = name;
+ ps->genpd.power_on = apple_pmgr_ps_power_on;
+ ps->genpd.power_off = apple_pmgr_ps_power_off;
+
+ ret = of_property_read_u32(node, "apple,min-state", &ps->min_state);
+ if (ret == 0 && ps->min_state <= APPLE_PMGR_PS_ACTIVE)
+ regmap_update_bits(regmap, ps->offset, APPLE_PMGR_FLAGS | APPLE_PMGR_PS_MIN,
+ FIELD_PREP(APPLE_PMGR_PS_MIN, ps->min_state));
+
+ active = apple_pmgr_ps_is_active(ps);
+ if (of_property_read_bool(node, "apple,always-on")) {
+ ps->genpd.flags |= GENPD_FLAG_ALWAYS_ON;
+ if (!active) {
+ dev_warn(dev, "always-on domain %s is not on at boot\n", name);
+ /* Turn it on so pm_genpd_init does not fail */
+ active = apple_pmgr_ps_power_on(&ps->genpd) == 0;
+ }
+ }
+
+ /* Turn on auto-PM if the domain is already on */
+ if (active)
+ regmap_update_bits(regmap, ps->offset, APPLE_PMGR_FLAGS | APPLE_PMGR_AUTO_ENABLE,
+ APPLE_PMGR_AUTO_ENABLE);
+
+ ret = pm_genpd_init(&ps->genpd, NULL, !active);
+ if (ret < 0) {
+ dev_err(dev, "pm_genpd_init failed\n");
+ return ret;
+ }
+
+ ret = of_genpd_add_provider_simple(node, &ps->genpd);
+ if (ret < 0) {
+ dev_err(dev, "of_genpd_add_provider_simple failed\n");
+ return ret;
+ }
+
+ of_for_each_phandle(&it, ret, node, "power-domains", "#power-domain-cells", -1) {
+ struct of_phandle_args parent, child;
+
+ parent.np = it.node;
+ parent.args_count = of_phandle_iterator_args(&it, parent.args, MAX_PHANDLE_ARGS);
+ child.np = node;
+ child.args_count = 0;
+ ret = of_genpd_add_subdomain(&parent, &child);
+
+ if (ret == -EPROBE_DEFER) {
+ of_node_put(parent.np);
+ goto err_remove;
+ } else if (ret < 0) {
+ dev_err(dev, "failed to add to parent domain: %d (%s -> %s)\n",
+ ret, it.node->name, node->name);
+ of_node_put(parent.np);
+ goto err_remove;
+ }
+ }
+
+ /*
+ * Do not participate in regular PM; parent power domains are handled via the
+ * genpd hierarchy.
+ */
+ pm_genpd_remove_device(dev);
+
+ ps->rcdev.owner = THIS_MODULE;
+ ps->rcdev.nr_resets = 1;
+ ps->rcdev.ops = &apple_pmgr_reset_ops;
+ ps->rcdev.of_node = dev->of_node;
+ ps->rcdev.of_reset_n_cells = 0;
+ ps->rcdev.of_xlate = apple_pmgr_reset_xlate;
+
+ ret = devm_reset_controller_register(dev, &ps->rcdev);
+ if (ret < 0)
+ goto err_remove;
+
+ return 0;
+err_remove:
+ of_genpd_del_provider(node);
+ pm_genpd_remove(&ps->genpd);
+ return ret;
+}
+
+static const struct of_device_id apple_pmgr_ps_of_match[] = {
+ { .compatible = "apple,pmgr-pwrstate" },
+ {}
+};
+
+MODULE_DEVICE_TABLE(of, apple_pmgr_ps_of_match);
+
+static struct platform_driver apple_pmgr_ps_driver = {
+ .probe = apple_pmgr_ps_probe,
+ .driver = {
+ .name = "apple-pmgr-pwrstate",
+ .of_match_table = apple_pmgr_ps_of_match,
+ },
+};
+
+MODULE_AUTHOR("Hector Martin <marcan@marcan.st>");
+MODULE_DESCRIPTION("PMGR power state driver for Apple SoCs");
+MODULE_LICENSE("GPL v2");
+
+module_platform_driver(apple_pmgr_ps_driver);
diff --git a/drivers/soc/renesas/Kconfig b/drivers/soc/renesas/Kconfig
index ce16ef5c939c..2cbd03db2cc7 100644
--- a/drivers/soc/renesas/Kconfig
+++ b/drivers/soc/renesas/Kconfig
@@ -235,6 +235,13 @@ config ARCH_R8A77961
This enables support for the Renesas R-Car M3-W+ SoC.
This includes different gradings like R-Car M3e and M3e-2G.
+config ARCH_R8A779F0
+ bool "ARM64 Platform support for R-Car S4-8"
+ select ARCH_RCAR_GEN3
+ select SYSC_R8A779F0
+ help
+ This enables support for the Renesas R-Car S4-8 SoC.
+
config ARCH_R8A77980
bool "ARM64 Platform support for R-Car V3H"
select ARCH_RCAR_GEN3
@@ -297,6 +304,9 @@ config RST_RCAR
config SYSC_RCAR
bool "System Controller support for R-Car" if COMPILE_TEST
+config SYSC_RCAR_GEN4
+ bool "System Controller support for R-Car Gen4" if COMPILE_TEST
+
config SYSC_R8A77995
bool "System Controller support for R-Car D3" if COMPILE_TEST
select SYSC_RCAR
@@ -337,6 +347,10 @@ config SYSC_R8A77961
bool "System Controller support for R-Car M3-W+" if COMPILE_TEST
select SYSC_RCAR
+config SYSC_R8A779F0
+ bool "System Controller support for R-Car S4-8" if COMPILE_TEST
+ select SYSC_RCAR_GEN4
+
config SYSC_R8A7792
bool "System Controller support for R-Car V2H" if COMPILE_TEST
select SYSC_RCAR
@@ -351,6 +365,7 @@ config SYSC_R8A77970
config SYSC_R8A779A0
bool "System Controller support for R-Car V3U" if COMPILE_TEST
+ select SYSC_RCAR_GEN4
config SYSC_RMOBILE
bool "System Controller support for R-Mobile" if COMPILE_TEST
diff --git a/drivers/soc/renesas/Makefile b/drivers/soc/renesas/Makefile
index 9b29bed2a597..deeb41f84f01 100644
--- a/drivers/soc/renesas/Makefile
+++ b/drivers/soc/renesas/Makefile
@@ -25,6 +25,7 @@ obj-$(CONFIG_SYSC_R8A77980) += r8a77980-sysc.o
obj-$(CONFIG_SYSC_R8A77990) += r8a77990-sysc.o
obj-$(CONFIG_SYSC_R8A77995) += r8a77995-sysc.o
obj-$(CONFIG_SYSC_R8A779A0) += r8a779a0-sysc.o
+obj-$(CONFIG_SYSC_R8A779F0) += r8a779f0-sysc.o
ifdef CONFIG_SMP
obj-$(CONFIG_ARCH_R9A06G032) += r9a06g032-smp.o
endif
@@ -32,4 +33,5 @@ endif
# Family
obj-$(CONFIG_RST_RCAR) += rcar-rst.o
obj-$(CONFIG_SYSC_RCAR) += rcar-sysc.o
+obj-$(CONFIG_SYSC_RCAR_GEN4) += rcar-gen4-sysc.o
obj-$(CONFIG_SYSC_RMOBILE) += rmobile-sysc.o
diff --git a/drivers/soc/renesas/r8a779a0-sysc.c b/drivers/soc/renesas/r8a779a0-sysc.c
index 7410b9fa9846..fdfc857df334 100644
--- a/drivers/soc/renesas/r8a779a0-sysc.c
+++ b/drivers/soc/renesas/r8a779a0-sysc.c
@@ -21,35 +21,9 @@
#include <dt-bindings/power/r8a779a0-sysc.h>
-/*
- * Power Domain flags
- */
-#define PD_CPU BIT(0) /* Area contains main CPU core */
-#define PD_SCU BIT(1) /* Area contains SCU and L2 cache */
-#define PD_NO_CR BIT(2) /* Area lacks PWR{ON,OFF}CR registers */
-
-#define PD_CPU_NOCR PD_CPU | PD_NO_CR /* CPU area lacks CR */
-#define PD_ALWAYS_ON PD_NO_CR /* Always-on area */
-
-/*
- * Description of a Power Area
- */
-struct r8a779a0_sysc_area {
- const char *name;
- u8 pdr; /* PDRn */
- int parent; /* -1 if none */
- unsigned int flags; /* See PD_* */
-};
-
-/*
- * SoC-specific Power Area Description
- */
-struct r8a779a0_sysc_info {
- const struct r8a779a0_sysc_area *areas;
- unsigned int num_areas;
-};
+#include "rcar-gen4-sysc.h"
-static struct r8a779a0_sysc_area r8a779a0_areas[] __initdata = {
+static struct rcar_gen4_sysc_area r8a779a0_areas[] __initdata = {
{ "always-on", R8A779A0_PD_ALWAYS_ON, -1, PD_ALWAYS_ON },
{ "a3e0", R8A779A0_PD_A3E0, R8A779A0_PD_ALWAYS_ON, PD_SCU },
{ "a3e1", R8A779A0_PD_A3E1, R8A779A0_PD_ALWAYS_ON, PD_SCU },
@@ -96,355 +70,7 @@ static struct r8a779a0_sysc_area r8a779a0_areas[] __initdata = {
{ "a1dsp1", R8A779A0_PD_A1DSP1, R8A779A0_PD_A2CN1 },
};
-static const struct r8a779a0_sysc_info r8a779a0_sysc_info __initconst = {
+const struct rcar_gen4_sysc_info r8a779a0_sysc_info __initconst = {
.areas = r8a779a0_areas,
.num_areas = ARRAY_SIZE(r8a779a0_areas),
};
-
-/* SYSC Common */
-#define SYSCSR 0x000 /* SYSC Status Register */
-#define SYSCPONSR(x) (0x800 + ((x) * 0x4)) /* Power-ON Status Register 0 */
-#define SYSCPOFFSR(x) (0x808 + ((x) * 0x4)) /* Power-OFF Status Register */
-#define SYSCISCR(x) (0x810 + ((x) * 0x4)) /* Interrupt Status/Clear Register */
-#define SYSCIER(x) (0x820 + ((x) * 0x4)) /* Interrupt Enable Register */
-#define SYSCIMR(x) (0x830 + ((x) * 0x4)) /* Interrupt Mask Register */
-
-/* Power Domain Registers */
-#define PDRSR(n) (0x1000 + ((n) * 0x40))
-#define PDRONCR(n) (0x1004 + ((n) * 0x40))
-#define PDROFFCR(n) (0x1008 + ((n) * 0x40))
-#define PDRESR(n) (0x100C + ((n) * 0x40))
-
-/* PWRON/PWROFF */
-#define PWRON_PWROFF BIT(0) /* Power-ON/OFF request */
-
-/* PDRESR */
-#define PDRESR_ERR BIT(0)
-
-/* PDRSR */
-#define PDRSR_OFF BIT(0) /* Power-OFF state */
-#define PDRSR_ON BIT(4) /* Power-ON state */
-#define PDRSR_OFF_STATE BIT(8) /* Processing Power-OFF sequence */
-#define PDRSR_ON_STATE BIT(12) /* Processing Power-ON sequence */
-
-#define SYSCSR_BUSY GENMASK(1, 0) /* All bit sets is not busy */
-
-#define SYSCSR_TIMEOUT 10000
-#define SYSCSR_DELAY_US 10
-
-#define PDRESR_RETRIES 1000
-#define PDRESR_DELAY_US 10
-
-#define SYSCISR_TIMEOUT 10000
-#define SYSCISR_DELAY_US 10
-
-#define NUM_DOMAINS_EACH_REG BITS_PER_TYPE(u32)
-
-static void __iomem *r8a779a0_sysc_base;
-static DEFINE_SPINLOCK(r8a779a0_sysc_lock); /* SMP CPUs + I/O devices */
-
-static int r8a779a0_sysc_pwr_on_off(u8 pdr, bool on)
-{
- unsigned int reg_offs;
- u32 val;
- int ret;
-
- if (on)
- reg_offs = PDRONCR(pdr);
- else
- reg_offs = PDROFFCR(pdr);
-
- /* Wait until SYSC is ready to accept a power request */
- ret = readl_poll_timeout_atomic(r8a779a0_sysc_base + SYSCSR, val,
- (val & SYSCSR_BUSY) == SYSCSR_BUSY,
- SYSCSR_DELAY_US, SYSCSR_TIMEOUT);
- if (ret < 0)
- return -EAGAIN;
-
- /* Submit power shutoff or power resume request */
- iowrite32(PWRON_PWROFF, r8a779a0_sysc_base + reg_offs);
-
- return 0;
-}
-
-static int clear_irq_flags(unsigned int reg_idx, unsigned int isr_mask)
-{
- u32 val;
- int ret;
-
- iowrite32(isr_mask, r8a779a0_sysc_base + SYSCISCR(reg_idx));
-
- ret = readl_poll_timeout_atomic(r8a779a0_sysc_base + SYSCISCR(reg_idx),
- val, !(val & isr_mask),
- SYSCISR_DELAY_US, SYSCISR_TIMEOUT);
- if (ret < 0) {
- pr_err("\n %s : Can not clear IRQ flags in SYSCISCR", __func__);
- return -EIO;
- }
-
- return 0;
-}
-
-static int r8a779a0_sysc_power(u8 pdr, bool on)
-{
- unsigned int isr_mask;
- unsigned int reg_idx, bit_idx;
- unsigned int status;
- unsigned long flags;
- int ret = 0;
- u32 val;
- int k;
-
- spin_lock_irqsave(&r8a779a0_sysc_lock, flags);
-
- reg_idx = pdr / NUM_DOMAINS_EACH_REG;
- bit_idx = pdr % NUM_DOMAINS_EACH_REG;
-
- isr_mask = BIT(bit_idx);
-
- /*
- * The interrupt source needs to be enabled, but masked, to prevent the
- * CPU from receiving it.
- */
- iowrite32(ioread32(r8a779a0_sysc_base + SYSCIER(reg_idx)) | isr_mask,
- r8a779a0_sysc_base + SYSCIER(reg_idx));
- iowrite32(ioread32(r8a779a0_sysc_base + SYSCIMR(reg_idx)) | isr_mask,
- r8a779a0_sysc_base + SYSCIMR(reg_idx));
-
- ret = clear_irq_flags(reg_idx, isr_mask);
- if (ret)
- goto out;
-
- /* Submit power shutoff or resume request until it was accepted */
- for (k = 0; k < PDRESR_RETRIES; k++) {
- ret = r8a779a0_sysc_pwr_on_off(pdr, on);
- if (ret)
- goto out;
-
- status = ioread32(r8a779a0_sysc_base + PDRESR(pdr));
- if (!(status & PDRESR_ERR))
- break;
-
- udelay(PDRESR_DELAY_US);
- }
-
- if (k == PDRESR_RETRIES) {
- ret = -EIO;
- goto out;
- }
-
- /* Wait until the power shutoff or resume request has completed * */
- ret = readl_poll_timeout_atomic(r8a779a0_sysc_base + SYSCISCR(reg_idx),
- val, (val & isr_mask),
- SYSCISR_DELAY_US, SYSCISR_TIMEOUT);
- if (ret < 0) {
- ret = -EIO;
- goto out;
- }
-
- /* Clear interrupt flags */
- ret = clear_irq_flags(reg_idx, isr_mask);
- if (ret)
- goto out;
-
- out:
- spin_unlock_irqrestore(&r8a779a0_sysc_lock, flags);
-
- pr_debug("sysc power %s domain %d: %08x -> %d\n", on ? "on" : "off",
- pdr, ioread32(r8a779a0_sysc_base + SYSCISCR(reg_idx)), ret);
- return ret;
-}
-
-static bool r8a779a0_sysc_power_is_off(u8 pdr)
-{
- unsigned int st;
-
- st = ioread32(r8a779a0_sysc_base + PDRSR(pdr));
-
- if (st & PDRSR_OFF)
- return true;
-
- return false;
-}
-
-struct r8a779a0_sysc_pd {
- struct generic_pm_domain genpd;
- u8 pdr;
- unsigned int flags;
- char name[];
-};
-
-static inline struct r8a779a0_sysc_pd *to_r8a779a0_pd(struct generic_pm_domain *d)
-{
- return container_of(d, struct r8a779a0_sysc_pd, genpd);
-}
-
-static int r8a779a0_sysc_pd_power_off(struct generic_pm_domain *genpd)
-{
- struct r8a779a0_sysc_pd *pd = to_r8a779a0_pd(genpd);
-
- pr_debug("%s: %s\n", __func__, genpd->name);
- return r8a779a0_sysc_power(pd->pdr, false);
-}
-
-static int r8a779a0_sysc_pd_power_on(struct generic_pm_domain *genpd)
-{
- struct r8a779a0_sysc_pd *pd = to_r8a779a0_pd(genpd);
-
- pr_debug("%s: %s\n", __func__, genpd->name);
- return r8a779a0_sysc_power(pd->pdr, true);
-}
-
-static int __init r8a779a0_sysc_pd_setup(struct r8a779a0_sysc_pd *pd)
-{
- struct generic_pm_domain *genpd = &pd->genpd;
- const char *name = pd->genpd.name;
- int error;
-
- if (pd->flags & PD_CPU) {
- /*
- * This domain contains a CPU core and therefore it should
- * only be turned off if the CPU is not in use.
- */
- pr_debug("PM domain %s contains %s\n", name, "CPU");
- genpd->flags |= GENPD_FLAG_ALWAYS_ON;
- } else if (pd->flags & PD_SCU) {
- /*
- * This domain contains an SCU and cache-controller, and
- * therefore it should only be turned off if the CPU cores are
- * not in use.
- */
- pr_debug("PM domain %s contains %s\n", name, "SCU");
- genpd->flags |= GENPD_FLAG_ALWAYS_ON;
- } else if (pd->flags & PD_NO_CR) {
- /*
- * This domain cannot be turned off.
- */
- genpd->flags |= GENPD_FLAG_ALWAYS_ON;
- }
-
- if (!(pd->flags & (PD_CPU | PD_SCU))) {
- /* Enable Clock Domain for I/O devices */
- genpd->flags |= GENPD_FLAG_PM_CLK | GENPD_FLAG_ACTIVE_WAKEUP;
- genpd->attach_dev = cpg_mssr_attach_dev;
- genpd->detach_dev = cpg_mssr_detach_dev;
- }
-
- genpd->power_off = r8a779a0_sysc_pd_power_off;
- genpd->power_on = r8a779a0_sysc_pd_power_on;
-
- if (pd->flags & (PD_CPU | PD_NO_CR)) {
- /* Skip CPUs (handled by SMP code) and areas without control */
- pr_debug("%s: Not touching %s\n", __func__, genpd->name);
- goto finalize;
- }
-
- if (!r8a779a0_sysc_power_is_off(pd->pdr)) {
- pr_debug("%s: %s is already powered\n", __func__, genpd->name);
- goto finalize;
- }
-
- r8a779a0_sysc_power(pd->pdr, true);
-
-finalize:
- error = pm_genpd_init(genpd, &simple_qos_governor, false);
- if (error)
- pr_err("Failed to init PM domain %s: %d\n", name, error);
-
- return error;
-}
-
-static const struct of_device_id r8a779a0_sysc_matches[] __initconst = {
- { .compatible = "renesas,r8a779a0-sysc", .data = &r8a779a0_sysc_info },
- { /* sentinel */ }
-};
-
-struct r8a779a0_pm_domains {
- struct genpd_onecell_data onecell_data;
- struct generic_pm_domain *domains[R8A779A0_PD_ALWAYS_ON + 1];
-};
-
-static struct genpd_onecell_data *r8a779a0_sysc_onecell_data;
-
-static int __init r8a779a0_sysc_pd_init(void)
-{
- const struct r8a779a0_sysc_info *info;
- const struct of_device_id *match;
- struct r8a779a0_pm_domains *domains;
- struct device_node *np;
- void __iomem *base;
- unsigned int i;
- int error;
-
- np = of_find_matching_node_and_match(NULL, r8a779a0_sysc_matches, &match);
- if (!np)
- return -ENODEV;
-
- info = match->data;
-
- base = of_iomap(np, 0);
- if (!base) {
- pr_warn("%pOF: Cannot map regs\n", np);
- error = -ENOMEM;
- goto out_put;
- }
-
- r8a779a0_sysc_base = base;
-
- domains = kzalloc(sizeof(*domains), GFP_KERNEL);
- if (!domains) {
- error = -ENOMEM;
- goto out_put;
- }
-
- domains->onecell_data.domains = domains->domains;
- domains->onecell_data.num_domains = ARRAY_SIZE(domains->domains);
- r8a779a0_sysc_onecell_data = &domains->onecell_data;
-
- for (i = 0; i < info->num_areas; i++) {
- const struct r8a779a0_sysc_area *area = &info->areas[i];
- struct r8a779a0_sysc_pd *pd;
- size_t n;
-
- if (!area->name) {
- /* Skip NULLified area */
- continue;
- }
-
- n = strlen(area->name) + 1;
- pd = kzalloc(sizeof(*pd) + n, GFP_KERNEL);
- if (!pd) {
- error = -ENOMEM;
- goto out_put;
- }
-
- memcpy(pd->name, area->name, n);
- pd->genpd.name = pd->name;
- pd->pdr = area->pdr;
- pd->flags = area->flags;
-
- error = r8a779a0_sysc_pd_setup(pd);
- if (error)
- goto out_put;
-
- domains->domains[area->pdr] = &pd->genpd;
-
- if (area->parent < 0)
- continue;
-
- error = pm_genpd_add_subdomain(domains->domains[area->parent],
- &pd->genpd);
- if (error) {
- pr_warn("Failed to add PM subdomain %s to parent %u\n",
- area->name, area->parent);
- goto out_put;
- }
- }
-
- error = of_genpd_add_provider_onecell(np, &domains->onecell_data);
-
-out_put:
- of_node_put(np);
- return error;
-}
-early_initcall(r8a779a0_sysc_pd_init);
diff --git a/drivers/soc/renesas/r8a779f0-sysc.c b/drivers/soc/renesas/r8a779f0-sysc.c
new file mode 100644
index 000000000000..5602aa6bd7ed
--- /dev/null
+++ b/drivers/soc/renesas/r8a779f0-sysc.c
@@ -0,0 +1,47 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Renesas R-Car S4-8 System Controller
+ *
+ * Copyright (C) 2021 Renesas Electronics Corp.
+ */
+
+#include <linux/bits.h>
+#include <linux/clk/renesas.h>
+#include <linux/delay.h>
+#include <linux/err.h>
+#include <linux/io.h>
+#include <linux/iopoll.h>
+#include <linux/kernel.h>
+#include <linux/mm.h>
+#include <linux/of_address.h>
+#include <linux/pm_domain.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+#include <linux/types.h>
+
+#include <dt-bindings/power/r8a779f0-sysc.h>
+
+#include "rcar-gen4-sysc.h"
+
+static struct rcar_gen4_sysc_area r8a779f0_areas[] __initdata = {
+ { "always-on", R8A779F0_PD_ALWAYS_ON, -1, PD_ALWAYS_ON },
+ { "a3e0", R8A779F0_PD_A3E0, R8A779F0_PD_ALWAYS_ON, PD_SCU },
+ { "a3e1", R8A779F0_PD_A3E1, R8A779F0_PD_ALWAYS_ON, PD_SCU },
+ { "a2e0d0", R8A779F0_PD_A2E0D0, R8A779F0_PD_A3E0, PD_SCU },
+ { "a2e0d1", R8A779F0_PD_A2E0D1, R8A779F0_PD_A3E0, PD_SCU },
+ { "a2e1d0", R8A779F0_PD_A2E1D0, R8A779F0_PD_A3E1, PD_SCU },
+ { "a2e1d1", R8A779F0_PD_A2E1D1, R8A779F0_PD_A3E1, PD_SCU },
+ { "a1e0d0c0", R8A779F0_PD_A1E0D0C0, R8A779F0_PD_A2E0D0, PD_CPU_NOCR },
+ { "a1e0d0c1", R8A779F0_PD_A1E0D0C1, R8A779F0_PD_A2E0D0, PD_CPU_NOCR },
+ { "a1e0d1c0", R8A779F0_PD_A1E0D1C0, R8A779F0_PD_A2E0D1, PD_CPU_NOCR },
+ { "a1e0d1c1", R8A779F0_PD_A1E0D1C1, R8A779F0_PD_A2E0D1, PD_CPU_NOCR },
+ { "a1e1d0c0", R8A779F0_PD_A1E1D0C0, R8A779F0_PD_A2E1D0, PD_CPU_NOCR },
+ { "a1e1d0c1", R8A779F0_PD_A1E1D0C1, R8A779F0_PD_A2E1D0, PD_CPU_NOCR },
+ { "a1e1d1c0", R8A779F0_PD_A1E1D1C0, R8A779F0_PD_A2E1D1, PD_CPU_NOCR },
+ { "a1e1d1c1", R8A779F0_PD_A1E1D1C1, R8A779F0_PD_A2E1D1, PD_CPU_NOCR },
+};
+
+const struct rcar_gen4_sysc_info r8a779f0_sysc_info __initconst = {
+ .areas = r8a779f0_areas,
+ .num_areas = ARRAY_SIZE(r8a779f0_areas),
+};
diff --git a/drivers/soc/renesas/rcar-gen4-sysc.c b/drivers/soc/renesas/rcar-gen4-sysc.c
new file mode 100644
index 000000000000..831162a57f9a
--- /dev/null
+++ b/drivers/soc/renesas/rcar-gen4-sysc.c
@@ -0,0 +1,376 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * R-Car Gen4 SYSC Power management support
+ *
+ * Copyright (C) 2021 Renesas Electronics Corp.
+ */
+
+#include <linux/bits.h>
+#include <linux/clk/renesas.h>
+#include <linux/delay.h>
+#include <linux/err.h>
+#include <linux/io.h>
+#include <linux/iopoll.h>
+#include <linux/kernel.h>
+#include <linux/mm.h>
+#include <linux/of_address.h>
+#include <linux/pm_domain.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+#include <linux/types.h>
+
+#include "rcar-gen4-sysc.h"
+
+/* SYSC Common */
+#define SYSCSR 0x000 /* SYSC Status Register */
+#define SYSCPONSR(x) (0x800 + ((x) * 0x4)) /* Power-ON Status Register 0 */
+#define SYSCPOFFSR(x) (0x808 + ((x) * 0x4)) /* Power-OFF Status Register */
+#define SYSCISCR(x) (0x810 + ((x) * 0x4)) /* Interrupt Status/Clear Register */
+#define SYSCIER(x) (0x820 + ((x) * 0x4)) /* Interrupt Enable Register */
+#define SYSCIMR(x) (0x830 + ((x) * 0x4)) /* Interrupt Mask Register */
+
+/* Power Domain Registers */
+#define PDRSR(n) (0x1000 + ((n) * 0x40))
+#define PDRONCR(n) (0x1004 + ((n) * 0x40))
+#define PDROFFCR(n) (0x1008 + ((n) * 0x40))
+#define PDRESR(n) (0x100C + ((n) * 0x40))
+
+/* PWRON/PWROFF */
+#define PWRON_PWROFF BIT(0) /* Power-ON/OFF request */
+
+/* PDRESR */
+#define PDRESR_ERR BIT(0)
+
+/* PDRSR */
+#define PDRSR_OFF BIT(0) /* Power-OFF state */
+#define PDRSR_ON BIT(4) /* Power-ON state */
+#define PDRSR_OFF_STATE BIT(8) /* Processing Power-OFF sequence */
+#define PDRSR_ON_STATE BIT(12) /* Processing Power-ON sequence */
+
+#define SYSCSR_BUSY GENMASK(1, 0) /* All bit sets is not busy */
+
+#define SYSCSR_TIMEOUT 10000
+#define SYSCSR_DELAY_US 10
+
+#define PDRESR_RETRIES 1000
+#define PDRESR_DELAY_US 10
+
+#define SYSCISR_TIMEOUT 10000
+#define SYSCISR_DELAY_US 10
+
+#define RCAR_GEN4_PD_ALWAYS_ON 64
+#define NUM_DOMAINS_EACH_REG BITS_PER_TYPE(u32)
+
+static void __iomem *rcar_gen4_sysc_base;
+static DEFINE_SPINLOCK(rcar_gen4_sysc_lock); /* SMP CPUs + I/O devices */
+
+static int rcar_gen4_sysc_pwr_on_off(u8 pdr, bool on)
+{
+ unsigned int reg_offs;
+ u32 val;
+ int ret;
+
+ if (on)
+ reg_offs = PDRONCR(pdr);
+ else
+ reg_offs = PDROFFCR(pdr);
+
+ /* Wait until SYSC is ready to accept a power request */
+ ret = readl_poll_timeout_atomic(rcar_gen4_sysc_base + SYSCSR, val,
+ (val & SYSCSR_BUSY) == SYSCSR_BUSY,
+ SYSCSR_DELAY_US, SYSCSR_TIMEOUT);
+ if (ret < 0)
+ return -EAGAIN;
+
+ /* Submit power shutoff or power resume request */
+ iowrite32(PWRON_PWROFF, rcar_gen4_sysc_base + reg_offs);
+
+ return 0;
+}
+
+static int clear_irq_flags(unsigned int reg_idx, unsigned int isr_mask)
+{
+ u32 val;
+ int ret;
+
+ iowrite32(isr_mask, rcar_gen4_sysc_base + SYSCISCR(reg_idx));
+
+ ret = readl_poll_timeout_atomic(rcar_gen4_sysc_base + SYSCISCR(reg_idx),
+ val, !(val & isr_mask),
+ SYSCISR_DELAY_US, SYSCISR_TIMEOUT);
+ if (ret < 0) {
+ pr_err("\n %s : Can not clear IRQ flags in SYSCISCR", __func__);
+ return -EIO;
+ }
+
+ return 0;
+}
+
+static int rcar_gen4_sysc_power(u8 pdr, bool on)
+{
+ unsigned int isr_mask;
+ unsigned int reg_idx, bit_idx;
+ unsigned int status;
+ unsigned long flags;
+ int ret = 0;
+ u32 val;
+ int k;
+
+ spin_lock_irqsave(&rcar_gen4_sysc_lock, flags);
+
+ reg_idx = pdr / NUM_DOMAINS_EACH_REG;
+ bit_idx = pdr % NUM_DOMAINS_EACH_REG;
+
+ isr_mask = BIT(bit_idx);
+
+ /*
+ * The interrupt source needs to be enabled, but masked, to prevent the
+ * CPU from receiving it.
+ */
+ iowrite32(ioread32(rcar_gen4_sysc_base + SYSCIER(reg_idx)) | isr_mask,
+ rcar_gen4_sysc_base + SYSCIER(reg_idx));
+ iowrite32(ioread32(rcar_gen4_sysc_base + SYSCIMR(reg_idx)) | isr_mask,
+ rcar_gen4_sysc_base + SYSCIMR(reg_idx));
+
+ ret = clear_irq_flags(reg_idx, isr_mask);
+ if (ret)
+ goto out;
+
+ /* Submit power shutoff or resume request until it was accepted */
+ for (k = 0; k < PDRESR_RETRIES; k++) {
+ ret = rcar_gen4_sysc_pwr_on_off(pdr, on);
+ if (ret)
+ goto out;
+
+ status = ioread32(rcar_gen4_sysc_base + PDRESR(pdr));
+ if (!(status & PDRESR_ERR))
+ break;
+
+ udelay(PDRESR_DELAY_US);
+ }
+
+ if (k == PDRESR_RETRIES) {
+ ret = -EIO;
+ goto out;
+ }
+
+ /* Wait until the power shutoff or resume request has completed * */
+ ret = readl_poll_timeout_atomic(rcar_gen4_sysc_base + SYSCISCR(reg_idx),
+ val, (val & isr_mask),
+ SYSCISR_DELAY_US, SYSCISR_TIMEOUT);
+ if (ret < 0) {
+ ret = -EIO;
+ goto out;
+ }
+
+ /* Clear interrupt flags */
+ ret = clear_irq_flags(reg_idx, isr_mask);
+ if (ret)
+ goto out;
+
+ out:
+ spin_unlock_irqrestore(&rcar_gen4_sysc_lock, flags);
+
+ pr_debug("sysc power %s domain %d: %08x -> %d\n", on ? "on" : "off",
+ pdr, ioread32(rcar_gen4_sysc_base + SYSCISCR(reg_idx)), ret);
+ return ret;
+}
+
+static bool rcar_gen4_sysc_power_is_off(u8 pdr)
+{
+ unsigned int st;
+
+ st = ioread32(rcar_gen4_sysc_base + PDRSR(pdr));
+
+ if (st & PDRSR_OFF)
+ return true;
+
+ return false;
+}
+
+struct rcar_gen4_sysc_pd {
+ struct generic_pm_domain genpd;
+ u8 pdr;
+ unsigned int flags;
+ char name[];
+};
+
+static inline struct rcar_gen4_sysc_pd *to_rcar_gen4_pd(struct generic_pm_domain *d)
+{
+ return container_of(d, struct rcar_gen4_sysc_pd, genpd);
+}
+
+static int rcar_gen4_sysc_pd_power_off(struct generic_pm_domain *genpd)
+{
+ struct rcar_gen4_sysc_pd *pd = to_rcar_gen4_pd(genpd);
+
+ pr_debug("%s: %s\n", __func__, genpd->name);
+ return rcar_gen4_sysc_power(pd->pdr, false);
+}
+
+static int rcar_gen4_sysc_pd_power_on(struct generic_pm_domain *genpd)
+{
+ struct rcar_gen4_sysc_pd *pd = to_rcar_gen4_pd(genpd);
+
+ pr_debug("%s: %s\n", __func__, genpd->name);
+ return rcar_gen4_sysc_power(pd->pdr, true);
+}
+
+static int __init rcar_gen4_sysc_pd_setup(struct rcar_gen4_sysc_pd *pd)
+{
+ struct generic_pm_domain *genpd = &pd->genpd;
+ const char *name = pd->genpd.name;
+ int error;
+
+ if (pd->flags & PD_CPU) {
+ /*
+ * This domain contains a CPU core and therefore it should
+ * only be turned off if the CPU is not in use.
+ */
+ pr_debug("PM domain %s contains %s\n", name, "CPU");
+ genpd->flags |= GENPD_FLAG_ALWAYS_ON;
+ } else if (pd->flags & PD_SCU) {
+ /*
+ * This domain contains an SCU and cache-controller, and
+ * therefore it should only be turned off if the CPU cores are
+ * not in use.
+ */
+ pr_debug("PM domain %s contains %s\n", name, "SCU");
+ genpd->flags |= GENPD_FLAG_ALWAYS_ON;
+ } else if (pd->flags & PD_NO_CR) {
+ /*
+ * This domain cannot be turned off.
+ */
+ genpd->flags |= GENPD_FLAG_ALWAYS_ON;
+ }
+
+ if (!(pd->flags & (PD_CPU | PD_SCU))) {
+ /* Enable Clock Domain for I/O devices */
+ genpd->flags |= GENPD_FLAG_PM_CLK | GENPD_FLAG_ACTIVE_WAKEUP;
+ genpd->attach_dev = cpg_mssr_attach_dev;
+ genpd->detach_dev = cpg_mssr_detach_dev;
+ }
+
+ genpd->power_off = rcar_gen4_sysc_pd_power_off;
+ genpd->power_on = rcar_gen4_sysc_pd_power_on;
+
+ if (pd->flags & (PD_CPU | PD_NO_CR)) {
+ /* Skip CPUs (handled by SMP code) and areas without control */
+ pr_debug("%s: Not touching %s\n", __func__, genpd->name);
+ goto finalize;
+ }
+
+ if (!rcar_gen4_sysc_power_is_off(pd->pdr)) {
+ pr_debug("%s: %s is already powered\n", __func__, genpd->name);
+ goto finalize;
+ }
+
+ rcar_gen4_sysc_power(pd->pdr, true);
+
+finalize:
+ error = pm_genpd_init(genpd, &simple_qos_governor, false);
+ if (error)
+ pr_err("Failed to init PM domain %s: %d\n", name, error);
+
+ return error;
+}
+
+static const struct of_device_id rcar_gen4_sysc_matches[] __initconst = {
+#ifdef CONFIG_SYSC_R8A779A0
+ { .compatible = "renesas,r8a779a0-sysc", .data = &r8a779a0_sysc_info },
+#endif
+#ifdef CONFIG_SYSC_R8A779F0
+ { .compatible = "renesas,r8a779f0-sysc", .data = &r8a779f0_sysc_info },
+#endif
+ { /* sentinel */ }
+};
+
+struct rcar_gen4_pm_domains {
+ struct genpd_onecell_data onecell_data;
+ struct generic_pm_domain *domains[RCAR_GEN4_PD_ALWAYS_ON + 1];
+};
+
+static struct genpd_onecell_data *rcar_gen4_sysc_onecell_data;
+
+static int __init rcar_gen4_sysc_pd_init(void)
+{
+ const struct rcar_gen4_sysc_info *info;
+ const struct of_device_id *match;
+ struct rcar_gen4_pm_domains *domains;
+ struct device_node *np;
+ void __iomem *base;
+ unsigned int i;
+ int error;
+
+ np = of_find_matching_node_and_match(NULL, rcar_gen4_sysc_matches, &match);
+ if (!np)
+ return -ENODEV;
+
+ info = match->data;
+
+ base = of_iomap(np, 0);
+ if (!base) {
+ pr_warn("%pOF: Cannot map regs\n", np);
+ error = -ENOMEM;
+ goto out_put;
+ }
+
+ rcar_gen4_sysc_base = base;
+
+ domains = kzalloc(sizeof(*domains), GFP_KERNEL);
+ if (!domains) {
+ error = -ENOMEM;
+ goto out_put;
+ }
+
+ domains->onecell_data.domains = domains->domains;
+ domains->onecell_data.num_domains = ARRAY_SIZE(domains->domains);
+ rcar_gen4_sysc_onecell_data = &domains->onecell_data;
+
+ for (i = 0; i < info->num_areas; i++) {
+ const struct rcar_gen4_sysc_area *area = &info->areas[i];
+ struct rcar_gen4_sysc_pd *pd;
+ size_t n;
+
+ if (!area->name) {
+ /* Skip NULLified area */
+ continue;
+ }
+
+ n = strlen(area->name) + 1;
+ pd = kzalloc(sizeof(*pd) + n, GFP_KERNEL);
+ if (!pd) {
+ error = -ENOMEM;
+ goto out_put;
+ }
+
+ memcpy(pd->name, area->name, n);
+ pd->genpd.name = pd->name;
+ pd->pdr = area->pdr;
+ pd->flags = area->flags;
+
+ error = rcar_gen4_sysc_pd_setup(pd);
+ if (error)
+ goto out_put;
+
+ domains->domains[area->pdr] = &pd->genpd;
+
+ if (area->parent < 0)
+ continue;
+
+ error = pm_genpd_add_subdomain(domains->domains[area->parent],
+ &pd->genpd);
+ if (error) {
+ pr_warn("Failed to add PM subdomain %s to parent %u\n",
+ area->name, area->parent);
+ goto out_put;
+ }
+ }
+
+ error = of_genpd_add_provider_onecell(np, &domains->onecell_data);
+
+out_put:
+ of_node_put(np);
+ return error;
+}
+early_initcall(rcar_gen4_sysc_pd_init);
diff --git a/drivers/soc/renesas/rcar-gen4-sysc.h b/drivers/soc/renesas/rcar-gen4-sysc.h
new file mode 100644
index 000000000000..0e0bd102b1f9
--- /dev/null
+++ b/drivers/soc/renesas/rcar-gen4-sysc.h
@@ -0,0 +1,43 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * R-Car Gen4 System Controller
+ *
+ * Copyright (C) 2021 Renesas Electronics Corp.
+ */
+#ifndef __SOC_RENESAS_RCAR_GEN4_SYSC_H__
+#define __SOC_RENESAS_RCAR_GEN4_SYSC_H__
+
+#include <linux/types.h>
+
+/*
+ * Power Domain flags
+ */
+#define PD_CPU BIT(0) /* Area contains main CPU core */
+#define PD_SCU BIT(1) /* Area contains SCU and L2 cache */
+#define PD_NO_CR BIT(2) /* Area lacks PWR{ON,OFF}CR registers */
+
+#define PD_CPU_NOCR (PD_CPU | PD_NO_CR) /* CPU area lacks CR */
+#define PD_ALWAYS_ON PD_NO_CR /* Always-on area */
+
+/*
+ * Description of a Power Area
+ */
+struct rcar_gen4_sysc_area {
+ const char *name;
+ u8 pdr; /* PDRn */
+ int parent; /* -1 if none */
+ unsigned int flags; /* See PD_* */
+};
+
+/*
+ * SoC-specific Power Area Description
+ */
+struct rcar_gen4_sysc_info {
+ const struct rcar_gen4_sysc_area *areas;
+ unsigned int num_areas;
+};
+
+extern const struct rcar_gen4_sysc_info r8a779a0_sysc_info;
+extern const struct rcar_gen4_sysc_info r8a779f0_sysc_info;
+
+#endif /* __SOC_RENESAS_RCAR_GEN4_SYSC_H__ */
diff --git a/drivers/soc/renesas/rcar-rst.c b/drivers/soc/renesas/rcar-rst.c
index 8a1e402ea799..4d293eb2d8f3 100644
--- a/drivers/soc/renesas/rcar-rst.c
+++ b/drivers/soc/renesas/rcar-rst.c
@@ -13,15 +13,43 @@
#define WDTRSTCR_RESET 0xA55A0002
#define WDTRSTCR 0x0054
+#define CR7BAR 0x0070
+#define CR7BAREN BIT(4)
+#define CR7BAR_MASK 0xFFFC0000
+
+static void __iomem *rcar_rst_base;
+static u32 saved_mode __initdata;
+static int (*rcar_rst_set_rproc_boot_addr_func)(u64 boot_addr);
+
static int rcar_rst_enable_wdt_reset(void __iomem *base)
{
iowrite32(WDTRSTCR_RESET, base + WDTRSTCR);
return 0;
}
+/*
+ * Most of the R-Car Gen3 SoCs have an ARM Realtime Core.
+ * Firmware boot address has to be set in CR7BAR before
+ * starting the realtime core.
+ * Boot address must be aligned on a 256k boundary.
+ */
+static int rcar_rst_set_gen3_rproc_boot_addr(u64 boot_addr)
+{
+ if (boot_addr & ~(u64)CR7BAR_MASK) {
+ pr_err("Invalid boot address got %llx\n", boot_addr);
+ return -EINVAL;
+ }
+
+ iowrite32(boot_addr, rcar_rst_base + CR7BAR);
+ iowrite32(boot_addr | CR7BAREN, rcar_rst_base + CR7BAR);
+
+ return 0;
+}
+
struct rst_config {
unsigned int modemr; /* Mode Monitoring Register Offset */
int (*configure)(void __iomem *base); /* Platform specific config */
+ int (*set_rproc_boot_addr)(u64 boot_addr);
};
static const struct rst_config rcar_rst_gen1 __initconst = {
@@ -35,9 +63,10 @@ static const struct rst_config rcar_rst_gen2 __initconst = {
static const struct rst_config rcar_rst_gen3 __initconst = {
.modemr = 0x60,
+ .set_rproc_boot_addr = rcar_rst_set_gen3_rproc_boot_addr,
};
-static const struct rst_config rcar_rst_r8a779a0 __initconst = {
+static const struct rst_config rcar_rst_gen4 __initconst = {
.modemr = 0x00, /* MODEMR0 and it has CPG related bits */
};
@@ -71,14 +100,12 @@ static const struct of_device_id rcar_rst_matches[] __initconst = {
{ .compatible = "renesas,r8a77980-rst", .data = &rcar_rst_gen3 },
{ .compatible = "renesas,r8a77990-rst", .data = &rcar_rst_gen3 },
{ .compatible = "renesas,r8a77995-rst", .data = &rcar_rst_gen3 },
- /* R-Car V3U */
- { .compatible = "renesas,r8a779a0-rst", .data = &rcar_rst_r8a779a0 },
+ /* R-Car Gen4 */
+ { .compatible = "renesas,r8a779a0-rst", .data = &rcar_rst_gen4 },
+ { .compatible = "renesas,r8a779f0-rst", .data = &rcar_rst_gen4 },
{ /* sentinel */ }
};
-static void __iomem *rcar_rst_base __initdata;
-static u32 saved_mode __initdata;
-
static int __init rcar_rst_init(void)
{
const struct of_device_id *match;
@@ -100,6 +127,8 @@ static int __init rcar_rst_init(void)
rcar_rst_base = base;
cfg = match->data;
+ rcar_rst_set_rproc_boot_addr_func = cfg->set_rproc_boot_addr;
+
saved_mode = ioread32(base + cfg->modemr);
if (cfg->configure) {
error = cfg->configure(base);
@@ -130,3 +159,12 @@ int __init rcar_rst_read_mode_pins(u32 *mode)
*mode = saved_mode;
return 0;
}
+
+int rcar_rst_set_rproc_boot_addr(u64 boot_addr)
+{
+ if (!rcar_rst_set_rproc_boot_addr_func)
+ return -EIO;
+
+ return rcar_rst_set_rproc_boot_addr_func(boot_addr);
+}
+EXPORT_SYMBOL_GPL(rcar_rst_set_rproc_boot_addr);
diff --git a/drivers/soc/renesas/renesas-soc.c b/drivers/soc/renesas/renesas-soc.c
index 7961b0be1850..62540ffc581a 100644
--- a/drivers/soc/renesas/renesas-soc.c
+++ b/drivers/soc/renesas/renesas-soc.c
@@ -33,6 +33,10 @@ static const struct renesas_family fam_rcar_gen3 __initconst __maybe_unused = {
.reg = 0xfff00044, /* PRR (Product Register) */
};
+static const struct renesas_family fam_rcar_gen4 __initconst __maybe_unused = {
+ .name = "R-Car Gen4",
+};
+
static const struct renesas_family fam_rmobile __initconst __maybe_unused = {
.name = "R-Mobile",
.reg = 0xe600101c, /* CCCR (Common Chip Code Register) */
@@ -214,6 +218,11 @@ static const struct renesas_soc soc_rcar_v3u __initconst __maybe_unused = {
.id = 0x59,
};
+static const struct renesas_soc soc_rcar_s4 __initconst __maybe_unused = {
+ .family = &fam_rcar_gen4,
+ .id = 0x5a,
+};
+
static const struct renesas_soc soc_shmobile_ag5 __initconst __maybe_unused = {
.family = &fam_shmobile,
.id = 0x37,
@@ -319,6 +328,9 @@ static const struct of_device_id renesas_socs[] __initconst = {
#ifdef CONFIG_ARCH_R8A779A0
{ .compatible = "renesas,r8a779a0", .data = &soc_rcar_v3u },
#endif
+#ifdef CONFIG_ARCH_R8A779F0
+ { .compatible = "renesas,r8a779f0", .data = &soc_rcar_s4 },
+#endif
#if defined(CONFIG_ARCH_R9A07G044)
{ .compatible = "renesas,r9a07g044", .data = &soc_rz_g2l },
#endif
@@ -328,94 +340,92 @@ static const struct of_device_id renesas_socs[] __initconst = {
{ /* sentinel */ }
};
+struct renesas_id {
+ unsigned int offset;
+ u32 mask;
+};
+
+static const struct renesas_id id_bsid __initconst = {
+ .offset = 0,
+ .mask = 0xff0000,
+ /*
+ * TODO: Upper 4 bits of BSID are for chip version, but the format is
+ * not known at this time so we don't know how to specify eshi and eslo
+ */
+};
+
+static const struct renesas_id id_rzg2l __initconst = {
+ .offset = 0xa04,
+ .mask = 0xfffffff,
+};
+
+static const struct renesas_id id_prr __initconst = {
+ .offset = 0,
+ .mask = 0xff00,
+};
+
+static const struct of_device_id renesas_ids[] __initconst = {
+ { .compatible = "renesas,bsid", .data = &id_bsid },
+ { .compatible = "renesas,r9a07g044-sysc", .data = &id_rzg2l },
+ { .compatible = "renesas,prr", .data = &id_prr },
+ { /* sentinel */ }
+};
+
static int __init renesas_soc_init(void)
{
struct soc_device_attribute *soc_dev_attr;
+ unsigned int product, eshi = 0, eslo;
const struct renesas_family *family;
const struct of_device_id *match;
const struct renesas_soc *soc;
+ const struct renesas_id *id;
void __iomem *chipid = NULL;
struct soc_device *soc_dev;
struct device_node *np;
- unsigned int product, eshi = 0, eslo;
+ const char *soc_id;
match = of_match_node(renesas_socs, of_root);
if (!match)
return -ENODEV;
+ soc_id = strchr(match->compatible, ',') + 1;
soc = match->data;
family = soc->family;
- np = of_find_compatible_node(NULL, NULL, "renesas,bsid");
+ np = of_find_matching_node_and_match(NULL, renesas_ids, &match);
if (np) {
+ id = match->data;
chipid = of_iomap(np, 0);
of_node_put(np);
-
- if (chipid) {
- product = readl(chipid);
- iounmap(chipid);
-
- if (soc->id && ((product >> 16) & 0xff) != soc->id) {
- pr_warn("SoC mismatch (product = 0x%x)\n",
- product);
- return -ENODEV;
- }
- }
-
- /*
- * TODO: Upper 4 bits of BSID are for chip version, but the
- * format is not known at this time so we don't know how to
- * specify eshi and eslo
- */
-
- goto done;
+ } else if (soc->id && family->reg) {
+ /* Try hardcoded CCCR/PRR fallback */
+ id = &id_prr;
+ chipid = ioremap(family->reg, 4);
}
- np = of_find_compatible_node(NULL, NULL, "renesas,r9a07g044-sysc");
- if (np) {
- chipid = of_iomap(np, 0);
- of_node_put(np);
+ if (chipid) {
+ product = readl(chipid + id->offset);
+ iounmap(chipid);
- if (chipid) {
- product = readl(chipid + 0x0a04);
- iounmap(chipid);
+ if (id == &id_prr) {
+ /* R-Car M3-W ES1.1 incorrectly identifies as ES2.0 */
+ if ((product & 0x7fff) == 0x5210)
+ product ^= 0x11;
+ /* R-Car M3-W ES1.3 incorrectly identifies as ES2.1 */
+ if ((product & 0x7fff) == 0x5211)
+ product ^= 0x12;
- if (soc->id && (product & 0xfffffff) != soc->id) {
- pr_warn("SoC mismatch (product = 0x%x)\n",
- product);
- return -ENODEV;
- }
+ eshi = ((product >> 4) & 0x0f) + 1;
+ eslo = product & 0xf;
}
- goto done;
- }
-
- /* Try PRR first, then hardcoded fallback */
- np = of_find_compatible_node(NULL, NULL, "renesas,prr");
- if (np) {
- chipid = of_iomap(np, 0);
- of_node_put(np);
- } else if (soc->id && family->reg) {
- chipid = ioremap(family->reg, 4);
- }
- if (chipid) {
- product = readl(chipid);
- iounmap(chipid);
- /* R-Car M3-W ES1.1 incorrectly identifies as ES2.0 */
- if ((product & 0x7fff) == 0x5210)
- product ^= 0x11;
- /* R-Car M3-W ES1.3 incorrectly identifies as ES2.1 */
- if ((product & 0x7fff) == 0x5211)
- product ^= 0x12;
- if (soc->id && ((product >> 8) & 0xff) != soc->id) {
+ if (soc->id &&
+ ((product & id->mask) >> __ffs(id->mask)) != soc->id) {
pr_warn("SoC mismatch (product = 0x%x)\n", product);
return -ENODEV;
}
- eshi = ((product >> 4) & 0x0f) + 1;
- eslo = product & 0xf;
}
-done:
soc_dev_attr = kzalloc(sizeof(*soc_dev_attr), GFP_KERNEL);
if (!soc_dev_attr)
return -ENOMEM;
@@ -425,8 +435,7 @@ done:
of_node_put(np);
soc_dev_attr->family = kstrdup_const(family->name, GFP_KERNEL);
- soc_dev_attr->soc_id = kstrdup_const(strchr(match->compatible, ',') + 1,
- GFP_KERNEL);
+ soc_dev_attr->soc_id = kstrdup_const(soc_id, GFP_KERNEL);
if (eshi)
soc_dev_attr->revision = kasprintf(GFP_KERNEL, "ES%u.%u", eshi,
eslo);
diff --git a/drivers/soc/tegra/common.c b/drivers/soc/tegra/common.c
index cd33e99249c3..32c346b72635 100644
--- a/drivers/soc/tegra/common.c
+++ b/drivers/soc/tegra/common.c
@@ -10,6 +10,7 @@
#include <linux/export.h>
#include <linux/of.h>
#include <linux/pm_opp.h>
+#include <linux/pm_runtime.h>
#include <soc/tegra/common.h>
#include <soc/tegra/fuse.h>
@@ -43,6 +44,7 @@ static int tegra_core_dev_init_opp_state(struct device *dev)
{
unsigned long rate;
struct clk *clk;
+ bool rpm_enabled;
int err;
clk = devm_clk_get(dev, NULL);
@@ -57,8 +59,31 @@ static int tegra_core_dev_init_opp_state(struct device *dev)
return -EINVAL;
}
+ /*
+ * Runtime PM of the device must be enabled in order to set up
+ * GENPD's performance properly because GENPD core checks whether
+ * device is suspended and this check doesn't work while RPM is
+ * disabled. This makes sure the OPP vote below gets cached in
+ * GENPD for the device. Instead, the vote is done the next time
+ * the device gets runtime resumed.
+ */
+ rpm_enabled = pm_runtime_enabled(dev);
+ if (!rpm_enabled)
+ pm_runtime_enable(dev);
+
+ /* should never happen in practice */
+ if (!pm_runtime_enabled(dev)) {
+ dev_WARN(dev, "failed to enable runtime PM\n");
+ pm_runtime_disable(dev);
+ return -EINVAL;
+ }
+
/* first dummy rate-setting initializes voltage vote */
err = dev_pm_opp_set_rate(dev, rate);
+
+ if (!rpm_enabled)
+ pm_runtime_disable(dev);
+
if (err) {
dev_err(dev, "failed to initialize OPP clock: %d\n", err);
return err;
@@ -111,9 +136,7 @@ int devm_tegra_core_dev_init_opp_table(struct device *dev,
*/
err = devm_pm_opp_of_add_table(dev);
if (err) {
- if (err == -ENODEV)
- dev_err_once(dev, "OPP table not found, please update device-tree\n");
- else
+ if (err != -ENODEV)
dev_err(dev, "failed to add OPP table: %d\n", err);
return err;
diff --git a/drivers/soc/tegra/fuse/fuse-tegra.c b/drivers/soc/tegra/fuse/fuse-tegra.c
index f2151815db58..fe4f935ce73a 100644
--- a/drivers/soc/tegra/fuse/fuse-tegra.c
+++ b/drivers/soc/tegra/fuse/fuse-tegra.c
@@ -14,6 +14,7 @@
#include <linux/of_address.h>
#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
+#include <linux/reset.h>
#include <linux/slab.h>
#include <linux/sys_soc.h>
@@ -181,6 +182,12 @@ static const struct nvmem_cell_info tegra_fuse_cells[] = {
},
};
+static void tegra_fuse_restore(void *base)
+{
+ fuse->clk = NULL;
+ fuse->base = base;
+}
+
static int tegra_fuse_probe(struct platform_device *pdev)
{
void __iomem *base = fuse->base;
@@ -188,13 +195,16 @@ static int tegra_fuse_probe(struct platform_device *pdev)
struct resource *res;
int err;
+ err = devm_add_action(&pdev->dev, tegra_fuse_restore, base);
+ if (err)
+ return err;
+
/* take over the memory region from the early initialization */
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
fuse->phys = res->start;
fuse->base = devm_ioremap_resource(&pdev->dev, res);
if (IS_ERR(fuse->base)) {
err = PTR_ERR(fuse->base);
- fuse->base = base;
return err;
}
@@ -204,19 +214,20 @@ static int tegra_fuse_probe(struct platform_device *pdev)
dev_err(&pdev->dev, "failed to get FUSE clock: %ld",
PTR_ERR(fuse->clk));
- fuse->base = base;
return PTR_ERR(fuse->clk);
}
platform_set_drvdata(pdev, fuse);
fuse->dev = &pdev->dev;
- pm_runtime_enable(&pdev->dev);
+ err = devm_pm_runtime_enable(&pdev->dev);
+ if (err)
+ return err;
if (fuse->soc->probe) {
err = fuse->soc->probe(fuse);
if (err < 0)
- goto restore;
+ return err;
}
memset(&nvmem, 0, sizeof(nvmem));
@@ -240,19 +251,37 @@ static int tegra_fuse_probe(struct platform_device *pdev)
err = PTR_ERR(fuse->nvmem);
dev_err(&pdev->dev, "failed to register NVMEM device: %d\n",
err);
- goto restore;
+ return err;
+ }
+
+ fuse->rst = devm_reset_control_get_optional(&pdev->dev, "fuse");
+ if (IS_ERR(fuse->rst)) {
+ err = PTR_ERR(fuse->rst);
+ dev_err(&pdev->dev, "failed to get FUSE reset: %pe\n",
+ fuse->rst);
+ return err;
+ }
+
+ /*
+ * FUSE clock is enabled at a boot time, hence this resume/suspend
+ * disables the clock besides the h/w resetting.
+ */
+ err = pm_runtime_resume_and_get(&pdev->dev);
+ if (err)
+ return err;
+
+ err = reset_control_reset(fuse->rst);
+ pm_runtime_put(&pdev->dev);
+
+ if (err < 0) {
+ dev_err(&pdev->dev, "failed to reset FUSE: %d\n", err);
+ return err;
}
/* release the early I/O memory mapping */
iounmap(base);
return 0;
-
-restore:
- fuse->clk = NULL;
- fuse->base = base;
- pm_runtime_disable(&pdev->dev);
- return err;
}
static int __maybe_unused tegra_fuse_runtime_resume(struct device *dev)
diff --git a/drivers/soc/tegra/fuse/fuse-tegra20.c b/drivers/soc/tegra/fuse/fuse-tegra20.c
index 8ec9fc5e5e4b..12503f563e36 100644
--- a/drivers/soc/tegra/fuse/fuse-tegra20.c
+++ b/drivers/soc/tegra/fuse/fuse-tegra20.c
@@ -94,9 +94,28 @@ static bool dma_filter(struct dma_chan *chan, void *filter_param)
return of_device_is_compatible(np, "nvidia,tegra20-apbdma");
}
+static void tegra20_fuse_release_channel(void *data)
+{
+ struct tegra_fuse *fuse = data;
+
+ dma_release_channel(fuse->apbdma.chan);
+ fuse->apbdma.chan = NULL;
+}
+
+static void tegra20_fuse_free_coherent(void *data)
+{
+ struct tegra_fuse *fuse = data;
+
+ dma_free_coherent(fuse->dev, sizeof(u32), fuse->apbdma.virt,
+ fuse->apbdma.phys);
+ fuse->apbdma.virt = NULL;
+ fuse->apbdma.phys = 0x0;
+}
+
static int tegra20_fuse_probe(struct tegra_fuse *fuse)
{
dma_cap_mask_t mask;
+ int err;
dma_cap_zero(mask);
dma_cap_set(DMA_SLAVE, mask);
@@ -105,13 +124,21 @@ static int tegra20_fuse_probe(struct tegra_fuse *fuse)
if (!fuse->apbdma.chan)
return -EPROBE_DEFER;
+ err = devm_add_action_or_reset(fuse->dev, tegra20_fuse_release_channel,
+ fuse);
+ if (err)
+ return err;
+
fuse->apbdma.virt = dma_alloc_coherent(fuse->dev, sizeof(u32),
&fuse->apbdma.phys,
GFP_KERNEL);
- if (!fuse->apbdma.virt) {
- dma_release_channel(fuse->apbdma.chan);
+ if (!fuse->apbdma.virt)
return -ENOMEM;
- }
+
+ err = devm_add_action_or_reset(fuse->dev, tegra20_fuse_free_coherent,
+ fuse);
+ if (err)
+ return err;
fuse->apbdma.config.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
fuse->apbdma.config.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
diff --git a/drivers/soc/tegra/fuse/fuse.h b/drivers/soc/tegra/fuse/fuse.h
index de58feba0435..1b719d85bd04 100644
--- a/drivers/soc/tegra/fuse/fuse.h
+++ b/drivers/soc/tegra/fuse/fuse.h
@@ -43,6 +43,7 @@ struct tegra_fuse {
void __iomem *base;
phys_addr_t phys;
struct clk *clk;
+ struct reset_control *rst;
u32 (*read_early)(struct tegra_fuse *fuse, unsigned int offset);
u32 (*read)(struct tegra_fuse *fuse, unsigned int offset);
diff --git a/drivers/soc/tegra/pmc.c b/drivers/soc/tegra/pmc.c
index 575d6d5b4294..5aceacbd8ce0 100644
--- a/drivers/soc/tegra/pmc.c
+++ b/drivers/soc/tegra/pmc.c
@@ -1064,10 +1064,8 @@ int tegra_pmc_cpu_remove_clamping(unsigned int cpuid)
return tegra_powergate_remove_clamping(id);
}
-static int tegra_pmc_restart_notify(struct notifier_block *this,
- unsigned long action, void *data)
+static void tegra_pmc_program_reboot_reason(const char *cmd)
{
- const char *cmd = data;
u32 value;
value = tegra_pmc_scratch_readl(pmc, pmc->soc->regs->scratch0);
@@ -1085,6 +1083,25 @@ static int tegra_pmc_restart_notify(struct notifier_block *this,
}
tegra_pmc_scratch_writel(pmc, value, pmc->soc->regs->scratch0);
+}
+
+static int tegra_pmc_reboot_notify(struct notifier_block *this,
+ unsigned long action, void *data)
+{
+ if (action == SYS_RESTART)
+ tegra_pmc_program_reboot_reason(data);
+
+ return NOTIFY_DONE;
+}
+
+static struct notifier_block tegra_pmc_reboot_notifier = {
+ .notifier_call = tegra_pmc_reboot_notify,
+};
+
+static int tegra_pmc_restart_notify(struct notifier_block *this,
+ unsigned long action, void *data)
+{
+ u32 value;
/* reset everything but PMC_SCRATCH0 and PMC_RST_STATUS */
value = tegra_pmc_readl(pmc, PMC_CNTRL);
@@ -1353,7 +1370,7 @@ static int tegra_pmc_core_pd_add(struct tegra_pmc *pmc, struct device_node *np)
if (!genpd)
return -ENOMEM;
- genpd->name = np->name;
+ genpd->name = "core";
genpd->set_performance_state = tegra_pmc_core_pd_set_performance_state;
genpd->opp_to_performance_state = tegra_pmc_core_pd_opp_to_performance_state;
@@ -2890,6 +2907,14 @@ static int tegra_pmc_probe(struct platform_device *pdev)
goto cleanup_sysfs;
}
+ err = devm_register_reboot_notifier(&pdev->dev,
+ &tegra_pmc_reboot_notifier);
+ if (err) {
+ dev_err(&pdev->dev, "unable to register reboot notifier, %d\n",
+ err);
+ goto cleanup_debugfs;
+ }
+
err = register_restart_handler(&tegra_pmc_restart_handler);
if (err) {
dev_err(&pdev->dev, "unable to register restart handler, %d\n",
@@ -2963,7 +2988,7 @@ static SIMPLE_DEV_PM_OPS(tegra_pmc_pm_ops, tegra_pmc_suspend, tegra_pmc_resume);
static const char * const tegra20_powergates[] = {
[TEGRA_POWERGATE_CPU] = "cpu",
- [TEGRA_POWERGATE_3D] = "3d",
+ [TEGRA_POWERGATE_3D] = "td",
[TEGRA_POWERGATE_VENC] = "venc",
[TEGRA_POWERGATE_VDEC] = "vdec",
[TEGRA_POWERGATE_PCIE] = "pcie",
@@ -3071,7 +3096,7 @@ static const struct tegra_pmc_soc tegra20_pmc_soc = {
static const char * const tegra30_powergates[] = {
[TEGRA_POWERGATE_CPU] = "cpu0",
- [TEGRA_POWERGATE_3D] = "3d0",
+ [TEGRA_POWERGATE_3D] = "td",
[TEGRA_POWERGATE_VENC] = "venc",
[TEGRA_POWERGATE_VDEC] = "vdec",
[TEGRA_POWERGATE_PCIE] = "pcie",
@@ -3083,7 +3108,7 @@ static const char * const tegra30_powergates[] = {
[TEGRA_POWERGATE_CPU2] = "cpu2",
[TEGRA_POWERGATE_CPU3] = "cpu3",
[TEGRA_POWERGATE_CELP] = "celp",
- [TEGRA_POWERGATE_3D1] = "3d1",
+ [TEGRA_POWERGATE_3D1] = "td2",
};
static const u8 tegra30_cpu_powergates[] = {
@@ -3132,7 +3157,7 @@ static const struct tegra_pmc_soc tegra30_pmc_soc = {
static const char * const tegra114_powergates[] = {
[TEGRA_POWERGATE_CPU] = "crail",
- [TEGRA_POWERGATE_3D] = "3d",
+ [TEGRA_POWERGATE_3D] = "td",
[TEGRA_POWERGATE_VENC] = "venc",
[TEGRA_POWERGATE_VDEC] = "vdec",
[TEGRA_POWERGATE_MPE] = "mpe",
diff --git a/drivers/soc/tegra/regulators-tegra20.c b/drivers/soc/tegra/regulators-tegra20.c
index b8ce9fd0650d..6a2f90ab9d3e 100644
--- a/drivers/soc/tegra/regulators-tegra20.c
+++ b/drivers/soc/tegra/regulators-tegra20.c
@@ -16,7 +16,9 @@
#include <linux/regulator/coupler.h>
#include <linux/regulator/driver.h>
#include <linux/regulator/machine.h>
+#include <linux/suspend.h>
+#include <soc/tegra/fuse.h>
#include <soc/tegra/pmc.h>
struct tegra_regulator_coupler {
@@ -25,9 +27,12 @@ struct tegra_regulator_coupler {
struct regulator_dev *cpu_rdev;
struct regulator_dev *rtc_rdev;
struct notifier_block reboot_notifier;
+ struct notifier_block suspend_notifier;
int core_min_uV, cpu_min_uV;
bool sys_reboot_mode_req;
bool sys_reboot_mode;
+ bool sys_suspend_mode_req;
+ bool sys_suspend_mode;
};
static inline struct tegra_regulator_coupler *
@@ -105,6 +110,28 @@ static int tegra20_core_rtc_max_spread(struct regulator_dev *core_rdev,
return 150000;
}
+static int tegra20_cpu_nominal_uV(void)
+{
+ switch (tegra_sku_info.soc_speedo_id) {
+ case 0:
+ return 1100000;
+ case 1:
+ return 1025000;
+ default:
+ return 1125000;
+ }
+}
+
+static int tegra20_core_nominal_uV(void)
+{
+ switch (tegra_sku_info.soc_speedo_id) {
+ default:
+ return 1225000;
+ case 2:
+ return 1300000;
+ }
+}
+
static int tegra20_core_rtc_update(struct tegra_regulator_coupler *tegra,
struct regulator_dev *core_rdev,
struct regulator_dev *rtc_rdev,
@@ -144,6 +171,11 @@ static int tegra20_core_rtc_update(struct tegra_regulator_coupler *tegra,
if (err)
return err;
+ /* prepare voltage level for suspend */
+ if (tegra->sys_suspend_mode)
+ core_min_uV = clamp(tegra20_core_nominal_uV(),
+ core_min_uV, core_max_uV);
+
core_uV = regulator_get_voltage_rdev(core_rdev);
if (core_uV < 0)
return core_uV;
@@ -279,6 +311,11 @@ static int tegra20_cpu_voltage_update(struct tegra_regulator_coupler *tegra,
if (tegra->sys_reboot_mode)
cpu_min_uV = max(cpu_min_uV, tegra->cpu_min_uV);
+ /* prepare voltage level for suspend */
+ if (tegra->sys_suspend_mode)
+ cpu_min_uV = clamp(tegra20_cpu_nominal_uV(),
+ cpu_min_uV, cpu_max_uV);
+
if (cpu_min_uV > cpu_uV) {
err = tegra20_core_rtc_update(tegra, core_rdev, rtc_rdev,
cpu_uV, cpu_min_uV);
@@ -320,6 +357,7 @@ static int tegra20_regulator_balance_voltage(struct regulator_coupler *coupler,
}
tegra->sys_reboot_mode = READ_ONCE(tegra->sys_reboot_mode_req);
+ tegra->sys_suspend_mode = READ_ONCE(tegra->sys_suspend_mode_req);
if (rdev == cpu_rdev)
return tegra20_cpu_voltage_update(tegra, cpu_rdev,
@@ -334,6 +372,63 @@ static int tegra20_regulator_balance_voltage(struct regulator_coupler *coupler,
return -EPERM;
}
+static int tegra20_regulator_prepare_suspend(struct tegra_regulator_coupler *tegra,
+ bool sys_suspend_mode)
+{
+ int err;
+
+ if (!tegra->core_rdev || !tegra->rtc_rdev || !tegra->cpu_rdev)
+ return 0;
+
+ /*
+ * All power domains are enabled early during resume from suspend
+ * by GENPD core. Domains like VENC may require a higher voltage
+ * when enabled during resume from suspend. This also prepares
+ * hardware for resuming from LP0.
+ */
+
+ WRITE_ONCE(tegra->sys_suspend_mode_req, sys_suspend_mode);
+
+ err = regulator_sync_voltage_rdev(tegra->cpu_rdev);
+ if (err)
+ return err;
+
+ err = regulator_sync_voltage_rdev(tegra->core_rdev);
+ if (err)
+ return err;
+
+ return 0;
+}
+
+static int tegra20_regulator_suspend(struct notifier_block *notifier,
+ unsigned long mode, void *arg)
+{
+ struct tegra_regulator_coupler *tegra;
+ int ret = 0;
+
+ tegra = container_of(notifier, struct tegra_regulator_coupler,
+ suspend_notifier);
+
+ switch (mode) {
+ case PM_HIBERNATION_PREPARE:
+ case PM_RESTORE_PREPARE:
+ case PM_SUSPEND_PREPARE:
+ ret = tegra20_regulator_prepare_suspend(tegra, true);
+ break;
+
+ case PM_POST_HIBERNATION:
+ case PM_POST_RESTORE:
+ case PM_POST_SUSPEND:
+ ret = tegra20_regulator_prepare_suspend(tegra, false);
+ break;
+ }
+
+ if (ret)
+ pr_err("failed to prepare regulators: %d\n", ret);
+
+ return notifier_from_errno(ret);
+}
+
static int tegra20_regulator_prepare_reboot(struct tegra_regulator_coupler *tegra,
bool sys_reboot_mode)
{
@@ -444,6 +539,7 @@ static struct tegra_regulator_coupler tegra20_coupler = {
.balance_voltage = tegra20_regulator_balance_voltage,
},
.reboot_notifier.notifier_call = tegra20_regulator_reboot,
+ .suspend_notifier.notifier_call = tegra20_regulator_suspend,
};
static int __init tegra_regulator_coupler_init(void)
@@ -456,6 +552,9 @@ static int __init tegra_regulator_coupler_init(void)
err = register_reboot_notifier(&tegra20_coupler.reboot_notifier);
WARN_ON(err);
+ err = register_pm_notifier(&tegra20_coupler.suspend_notifier);
+ WARN_ON(err);
+
return regulator_coupler_register(&tegra20_coupler.coupler);
}
arch_initcall(tegra_regulator_coupler_init);
diff --git a/drivers/soc/tegra/regulators-tegra30.c b/drivers/soc/tegra/regulators-tegra30.c
index e74bbc9c7859..8fd43c689134 100644
--- a/drivers/soc/tegra/regulators-tegra30.c
+++ b/drivers/soc/tegra/regulators-tegra30.c
@@ -16,6 +16,7 @@
#include <linux/regulator/coupler.h>
#include <linux/regulator/driver.h>
#include <linux/regulator/machine.h>
+#include <linux/suspend.h>
#include <soc/tegra/fuse.h>
#include <soc/tegra/pmc.h>
@@ -25,9 +26,12 @@ struct tegra_regulator_coupler {
struct regulator_dev *core_rdev;
struct regulator_dev *cpu_rdev;
struct notifier_block reboot_notifier;
+ struct notifier_block suspend_notifier;
int core_min_uV, cpu_min_uV;
bool sys_reboot_mode_req;
bool sys_reboot_mode;
+ bool sys_suspend_mode_req;
+ bool sys_suspend_mode;
};
static inline struct tegra_regulator_coupler *
@@ -113,6 +117,52 @@ static int tegra30_core_cpu_limit(int cpu_uV)
return -EINVAL;
}
+static int tegra30_cpu_nominal_uV(void)
+{
+ switch (tegra_sku_info.cpu_speedo_id) {
+ case 10 ... 11:
+ return 850000;
+
+ case 9:
+ return 912000;
+
+ case 1 ... 3:
+ case 7 ... 8:
+ return 1050000;
+
+ default:
+ return 1125000;
+
+ case 4 ... 6:
+ case 12 ... 13:
+ return 1237000;
+ }
+}
+
+static int tegra30_core_nominal_uV(void)
+{
+ switch (tegra_sku_info.soc_speedo_id) {
+ case 0:
+ return 1200000;
+
+ case 1:
+ if (tegra_sku_info.cpu_speedo_id != 7 &&
+ tegra_sku_info.cpu_speedo_id != 8)
+ return 1200000;
+
+ fallthrough;
+
+ case 2:
+ if (tegra_sku_info.cpu_speedo_id != 13)
+ return 1300000;
+
+ return 1350000;
+
+ default:
+ return 1250000;
+ }
+}
+
static int tegra30_voltage_update(struct tegra_regulator_coupler *tegra,
struct regulator_dev *cpu_rdev,
struct regulator_dev *core_rdev)
@@ -168,6 +218,11 @@ static int tegra30_voltage_update(struct tegra_regulator_coupler *tegra,
if (err)
return err;
+ /* prepare voltage level for suspend */
+ if (tegra->sys_suspend_mode)
+ core_min_uV = clamp(tegra30_core_nominal_uV(),
+ core_min_uV, core_max_uV);
+
core_uV = regulator_get_voltage_rdev(core_rdev);
if (core_uV < 0)
return core_uV;
@@ -223,6 +278,11 @@ static int tegra30_voltage_update(struct tegra_regulator_coupler *tegra,
if (tegra->sys_reboot_mode)
cpu_min_uV = max(cpu_min_uV, tegra->cpu_min_uV);
+ /* prepare voltage level for suspend */
+ if (tegra->sys_suspend_mode)
+ cpu_min_uV = clamp(tegra30_cpu_nominal_uV(),
+ cpu_min_uV, cpu_max_uV);
+
if (core_min_limited_uV > core_uV) {
pr_err("core voltage constraint violated: %d %d %d\n",
core_uV, core_min_limited_uV, cpu_uV);
@@ -292,10 +352,68 @@ static int tegra30_regulator_balance_voltage(struct regulator_coupler *coupler,
}
tegra->sys_reboot_mode = READ_ONCE(tegra->sys_reboot_mode_req);
+ tegra->sys_suspend_mode = READ_ONCE(tegra->sys_suspend_mode_req);
return tegra30_voltage_update(tegra, cpu_rdev, core_rdev);
}
+static int tegra30_regulator_prepare_suspend(struct tegra_regulator_coupler *tegra,
+ bool sys_suspend_mode)
+{
+ int err;
+
+ if (!tegra->core_rdev || !tegra->cpu_rdev)
+ return 0;
+
+ /*
+ * All power domains are enabled early during resume from suspend
+ * by GENPD core. Domains like VENC may require a higher voltage
+ * when enabled during resume from suspend. This also prepares
+ * hardware for resuming from LP0.
+ */
+
+ WRITE_ONCE(tegra->sys_suspend_mode_req, sys_suspend_mode);
+
+ err = regulator_sync_voltage_rdev(tegra->cpu_rdev);
+ if (err)
+ return err;
+
+ err = regulator_sync_voltage_rdev(tegra->core_rdev);
+ if (err)
+ return err;
+
+ return 0;
+}
+
+static int tegra30_regulator_suspend(struct notifier_block *notifier,
+ unsigned long mode, void *arg)
+{
+ struct tegra_regulator_coupler *tegra;
+ int ret = 0;
+
+ tegra = container_of(notifier, struct tegra_regulator_coupler,
+ suspend_notifier);
+
+ switch (mode) {
+ case PM_HIBERNATION_PREPARE:
+ case PM_RESTORE_PREPARE:
+ case PM_SUSPEND_PREPARE:
+ ret = tegra30_regulator_prepare_suspend(tegra, true);
+ break;
+
+ case PM_POST_HIBERNATION:
+ case PM_POST_RESTORE:
+ case PM_POST_SUSPEND:
+ ret = tegra30_regulator_prepare_suspend(tegra, false);
+ break;
+ }
+
+ if (ret)
+ pr_err("failed to prepare regulators: %d\n", ret);
+
+ return notifier_from_errno(ret);
+}
+
static int tegra30_regulator_prepare_reboot(struct tegra_regulator_coupler *tegra,
bool sys_reboot_mode)
{
@@ -395,6 +513,7 @@ static struct tegra_regulator_coupler tegra30_coupler = {
.balance_voltage = tegra30_regulator_balance_voltage,
},
.reboot_notifier.notifier_call = tegra30_regulator_reboot,
+ .suspend_notifier.notifier_call = tegra30_regulator_suspend,
};
static int __init tegra_regulator_coupler_init(void)
@@ -407,6 +526,9 @@ static int __init tegra_regulator_coupler_init(void)
err = register_reboot_notifier(&tegra30_coupler.reboot_notifier);
WARN_ON(err);
+ err = register_pm_notifier(&tegra30_coupler.suspend_notifier);
+ WARN_ON(err);
+
return regulator_coupler_register(&tegra30_coupler.coupler);
}
arch_initcall(tegra_regulator_coupler_init);
diff --git a/drivers/soc/ti/k3-socinfo.c b/drivers/soc/ti/k3-socinfo.c
index fd91129de6e5..b6b2150aca4e 100644
--- a/drivers/soc/ti/k3-socinfo.c
+++ b/drivers/soc/ti/k3-socinfo.c
@@ -40,7 +40,8 @@ static const struct k3_soc_id {
{ 0xBB5A, "AM65X" },
{ 0xBB64, "J721E" },
{ 0xBB6D, "J7200" },
- { 0xBB38, "AM64X" }
+ { 0xBB38, "AM64X" },
+ { 0xBB75, "J721S2"},
};
static int
diff --git a/drivers/soc/ti/knav_dma.c b/drivers/soc/ti/knav_dma.c
index 591d14ebcb11..700d8eecd8c4 100644
--- a/drivers/soc/ti/knav_dma.c
+++ b/drivers/soc/ti/knav_dma.c
@@ -646,31 +646,31 @@ static int dma_init(struct device_node *cloud, struct device_node *dma_node)
}
dma->reg_global = pktdma_get_regs(dma, node, 0, &size);
- if (!dma->reg_global)
- return -ENODEV;
+ if (IS_ERR(dma->reg_global))
+ return PTR_ERR(dma->reg_global);
if (size < sizeof(struct reg_global)) {
dev_err(kdev->dev, "bad size %pa for global regs\n", &size);
return -ENODEV;
}
dma->reg_tx_chan = pktdma_get_regs(dma, node, 1, &size);
- if (!dma->reg_tx_chan)
- return -ENODEV;
+ if (IS_ERR(dma->reg_tx_chan))
+ return PTR_ERR(dma->reg_tx_chan);
max_tx_chan = size / sizeof(struct reg_chan);
dma->reg_rx_chan = pktdma_get_regs(dma, node, 2, &size);
- if (!dma->reg_rx_chan)
- return -ENODEV;
+ if (IS_ERR(dma->reg_rx_chan))
+ return PTR_ERR(dma->reg_rx_chan);
max_rx_chan = size / sizeof(struct reg_chan);
dma->reg_tx_sched = pktdma_get_regs(dma, node, 3, &size);
- if (!dma->reg_tx_sched)
- return -ENODEV;
+ if (IS_ERR(dma->reg_tx_sched))
+ return PTR_ERR(dma->reg_tx_sched);
max_tx_sched = size / sizeof(struct reg_tx_sched);
dma->reg_rx_flow = pktdma_get_regs(dma, node, 4, &size);
- if (!dma->reg_rx_flow)
- return -ENODEV;
+ if (IS_ERR(dma->reg_rx_flow))
+ return PTR_ERR(dma->reg_rx_flow);
max_rx_flow = size / sizeof(struct reg_rx_flow);
dma->rx_priority = DMA_PRIO_DEFAULT;
diff --git a/drivers/soc/ti/pruss.c b/drivers/soc/ti/pruss.c
index 49da387d7749..b36779309e49 100644
--- a/drivers/soc/ti/pruss.c
+++ b/drivers/soc/ti/pruss.c
@@ -129,7 +129,7 @@ static int pruss_clk_init(struct pruss *pruss, struct device_node *cfg_node)
clks_np = of_get_child_by_name(cfg_node, "clocks");
if (!clks_np) {
- dev_err(dev, "%pOF is missing its 'clocks' node\n", clks_np);
+ dev_err(dev, "%pOF is missing its 'clocks' node\n", cfg_node);
return -ENODEV;
}
diff --git a/drivers/soc/xilinx/zynqmp_pm_domains.c b/drivers/soc/xilinx/zynqmp_pm_domains.c
index 226d343f0a6a..fcce2433bd6d 100644
--- a/drivers/soc/xilinx/zynqmp_pm_domains.c
+++ b/drivers/soc/xilinx/zynqmp_pm_domains.c
@@ -20,8 +20,6 @@
#include <linux/firmware/xlnx-zynqmp.h>
#define ZYNQMP_NUM_DOMAINS (100)
-/* Flag stating if PM nodes mapped to the PM domain has been requested */
-#define ZYNQMP_PM_DOMAIN_REQUESTED BIT(0)
static int min_capability;
@@ -29,14 +27,17 @@ static int min_capability;
* struct zynqmp_pm_domain - Wrapper around struct generic_pm_domain
* @gpd: Generic power domain
* @node_id: PM node ID corresponding to device inside PM domain
- * @flags: ZynqMP PM domain flags
+ * @requested: The PM node mapped to the PM domain has been requested
*/
struct zynqmp_pm_domain {
struct generic_pm_domain gpd;
u32 node_id;
- u8 flags;
+ bool requested;
};
+#define to_zynqmp_pm_domain(pm_domain) \
+ container_of(pm_domain, struct zynqmp_pm_domain, gpd)
+
/**
* zynqmp_gpd_is_active_wakeup_path() - Check if device is in wakeup source
* path
@@ -71,21 +72,23 @@ static int zynqmp_gpd_is_active_wakeup_path(struct device *dev, void *not_used)
*/
static int zynqmp_gpd_power_on(struct generic_pm_domain *domain)
{
+ struct zynqmp_pm_domain *pd = to_zynqmp_pm_domain(domain);
int ret;
- struct zynqmp_pm_domain *pd;
- pd = container_of(domain, struct zynqmp_pm_domain, gpd);
ret = zynqmp_pm_set_requirement(pd->node_id,
ZYNQMP_PM_CAPABILITY_ACCESS,
ZYNQMP_PM_MAX_QOS,
ZYNQMP_PM_REQUEST_ACK_BLOCKING);
if (ret) {
- pr_err("%s() %s set requirement for node %d failed: %d\n",
- __func__, domain->name, pd->node_id, ret);
+ dev_err(&domain->dev,
+ "failed to set requirement to 0x%x for PM node id %d: %d\n",
+ ZYNQMP_PM_CAPABILITY_ACCESS, pd->node_id, ret);
return ret;
}
- pr_debug("%s() Powered on %s domain\n", __func__, domain->name);
+ dev_dbg(&domain->dev, "set requirement to 0x%x for PM node id %d\n",
+ ZYNQMP_PM_CAPABILITY_ACCESS, pd->node_id);
+
return 0;
}
@@ -100,18 +103,16 @@ static int zynqmp_gpd_power_on(struct generic_pm_domain *domain)
*/
static int zynqmp_gpd_power_off(struct generic_pm_domain *domain)
{
+ struct zynqmp_pm_domain *pd = to_zynqmp_pm_domain(domain);
int ret;
struct pm_domain_data *pdd, *tmp;
- struct zynqmp_pm_domain *pd;
u32 capabilities = min_capability;
bool may_wakeup;
- pd = container_of(domain, struct zynqmp_pm_domain, gpd);
-
/* If domain is already released there is nothing to be done */
- if (!(pd->flags & ZYNQMP_PM_DOMAIN_REQUESTED)) {
- pr_debug("%s() %s domain is already released\n",
- __func__, domain->name);
+ if (!pd->requested) {
+ dev_dbg(&domain->dev, "PM node id %d is already released\n",
+ pd->node_id);
return 0;
}
@@ -128,17 +129,16 @@ static int zynqmp_gpd_power_off(struct generic_pm_domain *domain)
ret = zynqmp_pm_set_requirement(pd->node_id, capabilities, 0,
ZYNQMP_PM_REQUEST_ACK_NO);
- /**
- * If powering down of any node inside this domain fails,
- * report and return the error
- */
if (ret) {
- pr_err("%s() %s set requirement for node %d failed: %d\n",
- __func__, domain->name, pd->node_id, ret);
+ dev_err(&domain->dev,
+ "failed to set requirement to 0x%x for PM node id %d: %d\n",
+ capabilities, pd->node_id, ret);
return ret;
}
- pr_debug("%s() Powered off %s domain\n", __func__, domain->name);
+ dev_dbg(&domain->dev, "set requirement to 0x%x for PM node id %d\n",
+ capabilities, pd->node_id);
+
return 0;
}
@@ -152,10 +152,14 @@ static int zynqmp_gpd_power_off(struct generic_pm_domain *domain)
static int zynqmp_gpd_attach_dev(struct generic_pm_domain *domain,
struct device *dev)
{
+ struct zynqmp_pm_domain *pd = to_zynqmp_pm_domain(domain);
+ struct device_link *link;
int ret;
- struct zynqmp_pm_domain *pd;
- pd = container_of(domain, struct zynqmp_pm_domain, gpd);
+ link = device_link_add(dev, &domain->dev, DL_FLAG_SYNC_STATE_ONLY);
+ if (!link)
+ dev_dbg(&domain->dev, "failed to create device link for %s\n",
+ dev_name(dev));
/* If this is not the first device to attach there is nothing to do */
if (domain->device_count)
@@ -163,17 +167,17 @@ static int zynqmp_gpd_attach_dev(struct generic_pm_domain *domain,
ret = zynqmp_pm_request_node(pd->node_id, 0, 0,
ZYNQMP_PM_REQUEST_ACK_BLOCKING);
- /* If requesting a node fails print and return the error */
if (ret) {
- pr_err("%s() %s request failed for node %d: %d\n",
- __func__, domain->name, pd->node_id, ret);
+ dev_err(&domain->dev, "%s request failed for node %d: %d\n",
+ domain->name, pd->node_id, ret);
return ret;
}
- pd->flags |= ZYNQMP_PM_DOMAIN_REQUESTED;
+ pd->requested = true;
+
+ dev_dbg(&domain->dev, "%s requested PM node id %d\n",
+ dev_name(dev), pd->node_id);
- pr_debug("%s() %s attached to %s domain\n", __func__,
- dev_name(dev), domain->name);
return 0;
}
@@ -185,27 +189,24 @@ static int zynqmp_gpd_attach_dev(struct generic_pm_domain *domain,
static void zynqmp_gpd_detach_dev(struct generic_pm_domain *domain,
struct device *dev)
{
+ struct zynqmp_pm_domain *pd = to_zynqmp_pm_domain(domain);
int ret;
- struct zynqmp_pm_domain *pd;
-
- pd = container_of(domain, struct zynqmp_pm_domain, gpd);
/* If this is not the last device to detach there is nothing to do */
if (domain->device_count)
return;
ret = zynqmp_pm_release_node(pd->node_id);
- /* If releasing a node fails print the error and return */
if (ret) {
- pr_err("%s() %s release failed for node %d: %d\n",
- __func__, domain->name, pd->node_id, ret);
+ dev_err(&domain->dev, "failed to release PM node id %d: %d\n",
+ pd->node_id, ret);
return;
}
- pd->flags &= ~ZYNQMP_PM_DOMAIN_REQUESTED;
+ pd->requested = false;
- pr_debug("%s() %s detached from %s domain\n", __func__,
- dev_name(dev), domain->name);
+ dev_dbg(&domain->dev, "%s released PM node id %d\n",
+ dev_name(dev), pd->node_id);
}
static struct generic_pm_domain *zynqmp_gpd_xlate
@@ -215,7 +216,7 @@ static struct generic_pm_domain *zynqmp_gpd_xlate
unsigned int i, idx = genpdspec->args[0];
struct zynqmp_pm_domain *pd;
- pd = container_of(genpd_data->domains[0], struct zynqmp_pm_domain, gpd);
+ pd = to_zynqmp_pm_domain(genpd_data->domains[0]);
if (genpdspec->args_count != 1)
return ERR_PTR(-EINVAL);
@@ -299,9 +300,19 @@ static int zynqmp_gpd_remove(struct platform_device *pdev)
return 0;
}
+static void zynqmp_gpd_sync_state(struct device *dev)
+{
+ int ret;
+
+ ret = zynqmp_pm_init_finalize();
+ if (ret)
+ dev_warn(dev, "failed to release power management to firmware\n");
+}
+
static struct platform_driver zynqmp_power_domain_driver = {
.driver = {
.name = "zynqmp_power_controller",
+ .sync_state = zynqmp_gpd_sync_state,
},
.probe = zynqmp_gpd_probe,
.remove = zynqmp_gpd_remove,
diff --git a/drivers/soc/xilinx/zynqmp_power.c b/drivers/soc/xilinx/zynqmp_power.c
index c556623dae02..f8c301984d4f 100644
--- a/drivers/soc/xilinx/zynqmp_power.c
+++ b/drivers/soc/xilinx/zynqmp_power.c
@@ -178,7 +178,6 @@ static int zynqmp_pm_probe(struct platform_device *pdev)
u32 pm_api_version;
struct mbox_client *client;
- zynqmp_pm_init_finalize();
zynqmp_pm_get_api_version(&pm_api_version);
/* Check PM API version number */
diff --git a/drivers/spi/spi-cadence-quadspi.c b/drivers/spi/spi-cadence-quadspi.c
index 8b3d268ac63c..b808c94641fa 100644
--- a/drivers/spi/spi-cadence-quadspi.c
+++ b/drivers/spi/spi-cadence-quadspi.c
@@ -37,6 +37,7 @@
#define CQSPI_NEEDS_WR_DELAY BIT(0)
#define CQSPI_DISABLE_DAC_MODE BIT(1)
#define CQSPI_SUPPORT_EXTERNAL_DMA BIT(2)
+#define CQSPI_NO_SUPPORT_WR_COMPLETION BIT(3)
/* Capabilities */
#define CQSPI_SUPPORTS_OCTAL BIT(0)
@@ -86,6 +87,7 @@ struct cqspi_st {
struct cqspi_flash_pdata f_pdata[CQSPI_MAX_CHIPSELECT];
bool use_dma_read;
u32 pd_dev_id;
+ bool wr_completion;
};
struct cqspi_driver_platdata {
@@ -996,9 +998,11 @@ static int cqspi_write_setup(struct cqspi_flash_pdata *f_pdata,
* polling on the controller's side. spinand and spi-nor will take
* care of polling the status register.
*/
- reg = readl(reg_base + CQSPI_REG_WR_COMPLETION_CTRL);
- reg |= CQSPI_REG_WR_DISABLE_AUTO_POLL;
- writel(reg, reg_base + CQSPI_REG_WR_COMPLETION_CTRL);
+ if (cqspi->wr_completion) {
+ reg = readl(reg_base + CQSPI_REG_WR_COMPLETION_CTRL);
+ reg |= CQSPI_REG_WR_DISABLE_AUTO_POLL;
+ writel(reg, reg_base + CQSPI_REG_WR_COMPLETION_CTRL);
+ }
reg = readl(reg_base + CQSPI_REG_SIZE);
reg &= ~CQSPI_REG_SIZE_ADDRESS_MASK;
@@ -1736,6 +1740,10 @@ static int cqspi_probe(struct platform_device *pdev)
cqspi->master_ref_clk_hz = clk_get_rate(cqspi->clk);
master->max_speed_hz = cqspi->master_ref_clk_hz;
+
+ /* write completion is supported by default */
+ cqspi->wr_completion = true;
+
ddata = of_device_get_match_data(dev);
if (ddata) {
if (ddata->quirks & CQSPI_NEEDS_WR_DELAY)
@@ -1747,6 +1755,8 @@ static int cqspi_probe(struct platform_device *pdev)
cqspi->use_direct_mode = true;
if (ddata->quirks & CQSPI_SUPPORT_EXTERNAL_DMA)
cqspi->use_dma_read = true;
+ if (ddata->quirks & CQSPI_NO_SUPPORT_WR_COMPLETION)
+ cqspi->wr_completion = false;
if (of_device_is_compatible(pdev->dev.of_node,
"xlnx,versal-ospi-1.0"))
@@ -1859,6 +1869,10 @@ static const struct cqspi_driver_platdata intel_lgm_qspi = {
.quirks = CQSPI_DISABLE_DAC_MODE,
};
+static const struct cqspi_driver_platdata socfpga_qspi = {
+ .quirks = CQSPI_NO_SUPPORT_WR_COMPLETION,
+};
+
static const struct cqspi_driver_platdata versal_ospi = {
.hwcaps_mask = CQSPI_SUPPORTS_OCTAL,
.quirks = CQSPI_DISABLE_DAC_MODE | CQSPI_SUPPORT_EXTERNAL_DMA,
@@ -1887,6 +1901,10 @@ static const struct of_device_id cqspi_dt_ids[] = {
.compatible = "xlnx,versal-ospi-1.0",
.data = (void *)&versal_ospi,
},
+ {
+ .compatible = "intel,socfpga-qspi",
+ .data = (void *)&socfpga_qspi,
+ },
{ /* end of table */ }
};
diff --git a/drivers/spi/spi-fsl-lpspi.c b/drivers/spi/spi-fsl-lpspi.c
index 5d98611dd999..c72e501c270f 100644
--- a/drivers/spi/spi-fsl-lpspi.c
+++ b/drivers/spi/spi-fsl-lpspi.c
@@ -912,7 +912,7 @@ static int fsl_lpspi_probe(struct platform_device *pdev)
ret = devm_spi_register_controller(&pdev->dev, controller);
if (ret < 0) {
- dev_err(&pdev->dev, "spi_register_controller error.\n");
+ dev_err_probe(&pdev->dev, ret, "spi_register_controller error: %i\n", ret);
goto out_pm_get;
}
diff --git a/drivers/spi/spi-geni-qcom.c b/drivers/spi/spi-geni-qcom.c
index 27a446faf143..e2affaee4e76 100644
--- a/drivers/spi/spi-geni-qcom.c
+++ b/drivers/spi/spi-geni-qcom.c
@@ -491,22 +491,26 @@ static int spi_geni_grab_gpi_chan(struct spi_geni_master *mas)
int ret;
mas->tx = dma_request_chan(mas->dev, "tx");
- ret = dev_err_probe(mas->dev, IS_ERR(mas->tx), "Failed to get tx DMA ch\n");
- if (ret < 0)
+ if (IS_ERR(mas->tx)) {
+ ret = dev_err_probe(mas->dev, PTR_ERR(mas->tx),
+ "Failed to get tx DMA ch\n");
goto err_tx;
+ }
mas->rx = dma_request_chan(mas->dev, "rx");
- ret = dev_err_probe(mas->dev, IS_ERR(mas->rx), "Failed to get rx DMA ch\n");
- if (ret < 0)
+ if (IS_ERR(mas->rx)) {
+ ret = dev_err_probe(mas->dev, PTR_ERR(mas->rx),
+ "Failed to get rx DMA ch\n");
goto err_rx;
+ }
return 0;
err_rx:
+ mas->rx = NULL;
dma_release_channel(mas->tx);
- mas->tx = NULL;
err_tx:
- mas->rx = NULL;
+ mas->tx = NULL;
return ret;
}
diff --git a/drivers/spi/spi-rpc-if.c b/drivers/spi/spi-rpc-if.c
index 83796a4ead34..fe82f3575df4 100644
--- a/drivers/spi/spi-rpc-if.c
+++ b/drivers/spi/spi-rpc-if.c
@@ -156,7 +156,9 @@ static int rpcif_spi_probe(struct platform_device *pdev)
ctlr->mode_bits = SPI_CPOL | SPI_CPHA | SPI_TX_QUAD | SPI_RX_QUAD;
ctlr->flags = SPI_CONTROLLER_HALF_DUPLEX;
- rpcif_hw_init(rpc, false);
+ error = rpcif_hw_init(rpc, false);
+ if (error)
+ return error;
error = spi_register_controller(ctlr);
if (error) {
diff --git a/drivers/spi/spi-tegra20-slink.c b/drivers/spi/spi-tegra20-slink.c
index e8204e155484..2a03739a0c60 100644
--- a/drivers/spi/spi-tegra20-slink.c
+++ b/drivers/spi/spi-tegra20-slink.c
@@ -18,12 +18,15 @@
#include <linux/kthread.h>
#include <linux/module.h>
#include <linux/platform_device.h>
+#include <linux/pm_opp.h>
#include <linux/pm_runtime.h>
#include <linux/of.h>
#include <linux/of_device.h>
#include <linux/reset.h>
#include <linux/spi/spi.h>
+#include <soc/tegra/common.h>
+
#define SLINK_COMMAND 0x000
#define SLINK_BIT_LENGTH(x) (((x) & 0x1f) << 0)
#define SLINK_WORD_SIZE(x) (((x) & 0x1f) << 5)
@@ -680,7 +683,7 @@ static int tegra_slink_start_transfer_one(struct spi_device *spi,
bits_per_word = t->bits_per_word;
speed = t->speed_hz;
if (speed != tspi->cur_speed) {
- clk_set_rate(tspi->clk, speed * 4);
+ dev_pm_opp_set_rate(tspi->dev, speed * 4);
tspi->cur_speed = speed;
}
@@ -1066,6 +1069,10 @@ static int tegra_slink_probe(struct platform_device *pdev)
goto exit_free_master;
}
+ ret = devm_tegra_core_dev_init_opp_table_common(&pdev->dev);
+ if (ret)
+ goto exit_free_master;
+
tspi->max_buf_size = SLINK_FIFO_DEPTH << 2;
tspi->dma_buf_size = DEFAULT_SPI_DMA_BUF_LEN;
diff --git a/drivers/spi/spi.c b/drivers/spi/spi.c
index b23e675953e1..fdd530b150a7 100644
--- a/drivers/spi/spi.c
+++ b/drivers/spi/spi.c
@@ -3099,12 +3099,6 @@ void spi_unregister_controller(struct spi_controller *ctlr)
device_del(&ctlr->dev);
- /* Release the last reference on the controller if its driver
- * has not yet been converted to devm_spi_alloc_master/slave().
- */
- if (!ctlr->devm_allocated)
- put_device(&ctlr->dev);
-
/* free bus id */
mutex_lock(&board_lock);
if (found == ctlr)
@@ -3113,6 +3107,12 @@ void spi_unregister_controller(struct spi_controller *ctlr)
if (IS_ENABLED(CONFIG_SPI_DYNAMIC))
mutex_unlock(&ctlr->add_lock);
+
+ /* Release the last reference on the controller if its driver
+ * has not yet been converted to devm_spi_alloc_master/slave().
+ */
+ if (!ctlr->devm_allocated)
+ put_device(&ctlr->dev);
}
EXPORT_SYMBOL_GPL(spi_unregister_controller);
diff --git a/drivers/staging/media/tegra-vde/vde.c b/drivers/staging/media/tegra-vde/vde.c
index ed4c1250b303..859f60a70904 100644
--- a/drivers/staging/media/tegra-vde/vde.c
+++ b/drivers/staging/media/tegra-vde/vde.c
@@ -20,6 +20,7 @@
#include <linux/slab.h>
#include <linux/uaccess.h>
+#include <soc/tegra/common.h>
#include <soc/tegra/pmc.h>
#include "uapi.h"
@@ -920,13 +921,17 @@ static __maybe_unused int tegra_vde_runtime_suspend(struct device *dev)
struct tegra_vde *vde = dev_get_drvdata(dev);
int err;
- err = tegra_powergate_power_off(TEGRA_POWERGATE_VDEC);
- if (err) {
- dev_err(dev, "Failed to power down HW: %d\n", err);
- return err;
+ if (!dev->pm_domain) {
+ err = tegra_powergate_power_off(TEGRA_POWERGATE_VDEC);
+ if (err) {
+ dev_err(dev, "Failed to power down HW: %d\n", err);
+ return err;
+ }
}
clk_disable_unprepare(vde->clk);
+ reset_control_release(vde->rst);
+ reset_control_release(vde->rst_mc);
return 0;
}
@@ -936,14 +941,45 @@ static __maybe_unused int tegra_vde_runtime_resume(struct device *dev)
struct tegra_vde *vde = dev_get_drvdata(dev);
int err;
- err = tegra_powergate_sequence_power_up(TEGRA_POWERGATE_VDEC,
- vde->clk, vde->rst);
+ err = reset_control_acquire(vde->rst_mc);
if (err) {
- dev_err(dev, "Failed to power up HW : %d\n", err);
+ dev_err(dev, "Failed to acquire mc reset: %d\n", err);
return err;
}
+ err = reset_control_acquire(vde->rst);
+ if (err) {
+ dev_err(dev, "Failed to acquire reset: %d\n", err);
+ goto release_mc_reset;
+ }
+
+ if (!dev->pm_domain) {
+ err = tegra_powergate_sequence_power_up(TEGRA_POWERGATE_VDEC,
+ vde->clk, vde->rst);
+ if (err) {
+ dev_err(dev, "Failed to power up HW : %d\n", err);
+ goto release_reset;
+ }
+ } else {
+ /*
+ * tegra_powergate_sequence_power_up() leaves clocks enabled,
+ * while GENPD not.
+ */
+ err = clk_prepare_enable(vde->clk);
+ if (err) {
+ dev_err(dev, "Failed to enable clock: %d\n", err);
+ goto release_reset;
+ }
+ }
+
return 0;
+
+release_reset:
+ reset_control_release(vde->rst);
+release_mc_reset:
+ reset_control_release(vde->rst_mc);
+
+ return err;
}
static int tegra_vde_probe(struct platform_device *pdev)
@@ -1001,14 +1037,14 @@ static int tegra_vde_probe(struct platform_device *pdev)
return err;
}
- vde->rst = devm_reset_control_get(dev, NULL);
+ vde->rst = devm_reset_control_get_exclusive_released(dev, NULL);
if (IS_ERR(vde->rst)) {
err = PTR_ERR(vde->rst);
dev_err(dev, "Could not get VDE reset %d\n", err);
return err;
}
- vde->rst_mc = devm_reset_control_get_optional(dev, "mc");
+ vde->rst_mc = devm_reset_control_get_optional_exclusive_released(dev, "mc");
if (IS_ERR(vde->rst_mc)) {
err = PTR_ERR(vde->rst_mc);
dev_err(dev, "Could not get MC reset %d\n", err);
@@ -1026,6 +1062,12 @@ static int tegra_vde_probe(struct platform_device *pdev)
return err;
}
+ err = devm_tegra_core_dev_init_opp_table_common(dev);
+ if (err) {
+ dev_err(dev, "Could initialize OPP table %d\n", err);
+ return err;
+ }
+
vde->iram_pool = of_gen_pool_get(dev->of_node, "iram", 0);
if (!vde->iram_pool) {
dev_err(dev, "Could not get IRAM pool\n");
@@ -1133,8 +1175,7 @@ static void tegra_vde_shutdown(struct platform_device *pdev)
* On some devices bootloader isn't ready to a power-gated VDE on
* a warm-reboot, machine will hang in that case.
*/
- if (pm_runtime_status_suspended(&pdev->dev))
- tegra_vde_runtime_resume(&pdev->dev);
+ pm_runtime_get_sync(&pdev->dev);
}
static __maybe_unused int tegra_vde_pm_suspend(struct device *dev)
diff --git a/drivers/tee/optee/Makefile b/drivers/tee/optee/Makefile
index 66b8a17f14c4..a6eff388d300 100644
--- a/drivers/tee/optee/Makefile
+++ b/drivers/tee/optee/Makefile
@@ -2,6 +2,7 @@
obj-$(CONFIG_OPTEE) += optee.o
optee-objs += core.o
optee-objs += call.o
+optee-objs += notif.o
optee-objs += rpc.o
optee-objs += supp.o
optee-objs += device.o
diff --git a/drivers/tee/optee/core.c b/drivers/tee/optee/core.c
index ab2edfcc6c70..ba7300ca9ddf 100644
--- a/drivers/tee/optee/core.c
+++ b/drivers/tee/optee/core.c
@@ -159,6 +159,7 @@ void optee_remove_common(struct optee *optee)
/* Unregister OP-TEE specific client devices on TEE bus */
optee_unregister_devices();
+ optee_notif_uninit(optee);
/*
* The two devices have to be unregistered before we can free the
* other resources.
@@ -167,7 +168,6 @@ void optee_remove_common(struct optee *optee)
tee_device_unregister(optee->teedev);
tee_shm_pool_free(optee->pool);
- optee_wait_queue_exit(&optee->wait_queue);
optee_supp_uninit(&optee->supp);
mutex_destroy(&optee->call_queue.mutex);
}
diff --git a/drivers/tee/optee/ffa_abi.c b/drivers/tee/optee/ffa_abi.c
index 45424824e0f9..3577781e5df7 100644
--- a/drivers/tee/optee/ffa_abi.c
+++ b/drivers/tee/optee/ffa_abi.c
@@ -856,9 +856,13 @@ static int optee_ffa_probe(struct ffa_device *ffa_dev)
mutex_init(&optee->ffa.mutex);
mutex_init(&optee->call_queue.mutex);
INIT_LIST_HEAD(&optee->call_queue.waiters);
- optee_wait_queue_init(&optee->wait_queue);
optee_supp_init(&optee->supp);
ffa_dev_set_drvdata(ffa_dev, optee);
+ rc = optee_notif_init(optee, OPTEE_DEFAULT_MAX_NOTIF_VALUE);
+ if (rc) {
+ optee_ffa_remove(ffa_dev);
+ return rc;
+ }
rc = optee_enumerate_devices(PTA_CMD_GET_DEVICES);
if (rc) {
diff --git a/drivers/tee/optee/notif.c b/drivers/tee/optee/notif.c
new file mode 100644
index 000000000000..a28fa03dcd0e
--- /dev/null
+++ b/drivers/tee/optee/notif.c
@@ -0,0 +1,125 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2015-2021, Linaro Limited
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/arm-smccc.h>
+#include <linux/errno.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+#include <linux/tee_drv.h>
+#include "optee_private.h"
+
+struct notif_entry {
+ struct list_head link;
+ struct completion c;
+ u_int key;
+};
+
+static bool have_key(struct optee *optee, u_int key)
+{
+ struct notif_entry *entry;
+
+ list_for_each_entry(entry, &optee->notif.db, link)
+ if (entry->key == key)
+ return true;
+
+ return false;
+}
+
+int optee_notif_wait(struct optee *optee, u_int key)
+{
+ unsigned long flags;
+ struct notif_entry *entry;
+ int rc = 0;
+
+ if (key > optee->notif.max_key)
+ return -EINVAL;
+
+ entry = kmalloc(sizeof(*entry), GFP_KERNEL);
+ if (!entry)
+ return -ENOMEM;
+ init_completion(&entry->c);
+ entry->key = key;
+
+ spin_lock_irqsave(&optee->notif.lock, flags);
+
+ /*
+ * If the bit is already set it means that the key has already
+ * been posted and we must not wait.
+ */
+ if (test_bit(key, optee->notif.bitmap)) {
+ clear_bit(key, optee->notif.bitmap);
+ goto out;
+ }
+
+ /*
+ * Check if someone is already waiting for this key. If there is
+ * it's a programming error.
+ */
+ if (have_key(optee, key)) {
+ rc = -EBUSY;
+ goto out;
+ }
+
+ list_add_tail(&entry->link, &optee->notif.db);
+
+ /*
+ * Unlock temporarily and wait for completion.
+ */
+ spin_unlock_irqrestore(&optee->notif.lock, flags);
+ wait_for_completion(&entry->c);
+ spin_lock_irqsave(&optee->notif.lock, flags);
+
+ list_del(&entry->link);
+out:
+ spin_unlock_irqrestore(&optee->notif.lock, flags);
+
+ kfree(entry);
+
+ return rc;
+}
+
+int optee_notif_send(struct optee *optee, u_int key)
+{
+ unsigned long flags;
+ struct notif_entry *entry;
+
+ if (key > optee->notif.max_key)
+ return -EINVAL;
+
+ spin_lock_irqsave(&optee->notif.lock, flags);
+
+ list_for_each_entry(entry, &optee->notif.db, link)
+ if (entry->key == key) {
+ complete(&entry->c);
+ goto out;
+ }
+
+ /* Only set the bit in case there where nobody waiting */
+ set_bit(key, optee->notif.bitmap);
+out:
+ spin_unlock_irqrestore(&optee->notif.lock, flags);
+
+ return 0;
+}
+
+int optee_notif_init(struct optee *optee, u_int max_key)
+{
+ spin_lock_init(&optee->notif.lock);
+ INIT_LIST_HEAD(&optee->notif.db);
+ optee->notif.bitmap = bitmap_zalloc(max_key, GFP_KERNEL);
+ if (!optee->notif.bitmap)
+ return -ENOMEM;
+
+ optee->notif.max_key = max_key;
+
+ return 0;
+}
+
+void optee_notif_uninit(struct optee *optee)
+{
+ kfree(optee->notif.bitmap);
+}
diff --git a/drivers/tee/optee/optee_msg.h b/drivers/tee/optee/optee_msg.h
index 2422e185d400..70e9cc2ee96b 100644
--- a/drivers/tee/optee/optee_msg.h
+++ b/drivers/tee/optee/optee_msg.h
@@ -318,6 +318,13 @@ struct optee_msg_arg {
* [in] param[0].u.rmem.shm_ref holds shared memory reference
* [in] param[0].u.rmem.offs 0
* [in] param[0].u.rmem.size 0
+ *
+ * OPTEE_MSG_CMD_DO_BOTTOM_HALF does the scheduled bottom half processing
+ * of a driver.
+ *
+ * OPTEE_MSG_CMD_STOP_ASYNC_NOTIF informs secure world that from now is
+ * normal world unable to process asynchronous notifications. Typically
+ * used when the driver is shut down.
*/
#define OPTEE_MSG_CMD_OPEN_SESSION 0
#define OPTEE_MSG_CMD_INVOKE_COMMAND 1
@@ -325,6 +332,8 @@ struct optee_msg_arg {
#define OPTEE_MSG_CMD_CANCEL 3
#define OPTEE_MSG_CMD_REGISTER_SHM 4
#define OPTEE_MSG_CMD_UNREGISTER_SHM 5
+#define OPTEE_MSG_CMD_DO_BOTTOM_HALF 6
+#define OPTEE_MSG_CMD_STOP_ASYNC_NOTIF 7
#define OPTEE_MSG_FUNCID_CALL_WITH_ARG 0x0004
#endif /* _OPTEE_MSG_H */
diff --git a/drivers/tee/optee/optee_private.h b/drivers/tee/optee/optee_private.h
index 6660e05298db..46f74ab07c7e 100644
--- a/drivers/tee/optee/optee_private.h
+++ b/drivers/tee/optee/optee_private.h
@@ -28,6 +28,13 @@
#define TEEC_ORIGIN_COMMS 0x00000002
+/*
+ * This value should be larger than the number threads in secure world to
+ * meet the need from secure world. The number of threads in secure world
+ * are usually not even close to 255 so we should be safe for now.
+ */
+#define OPTEE_DEFAULT_MAX_NOTIF_VALUE 255
+
typedef void (optee_invoke_fn)(unsigned long, unsigned long, unsigned long,
unsigned long, unsigned long, unsigned long,
unsigned long, unsigned long,
@@ -44,10 +51,13 @@ struct optee_call_queue {
struct list_head waiters;
};
-struct optee_wait_queue {
- /* Serializes access to this struct */
- struct mutex mu;
+struct optee_notif {
+ u_int max_key;
+ struct tee_context *ctx;
+ /* Serializes access to the elements below in this struct */
+ spinlock_t lock;
struct list_head db;
+ u_long *bitmap;
};
/**
@@ -79,6 +89,7 @@ struct optee_smc {
optee_invoke_fn *invoke_fn;
void *memremaped_shm;
u32 sec_caps;
+ unsigned int notif_irq;
};
/**
@@ -129,8 +140,7 @@ struct optee_ops {
* @smc: specific to SMC ABI
* @ffa: specific to FF-A ABI
* @call_queue: queue of threads waiting to call @invoke_fn
- * @wait_queue: queue of threads from secure world waiting for a
- * secure world sync object
+ * @notif: notification synchronization struct
* @supp: supplicant synchronization struct for RPC to supplicant
* @pool: shared memory pool
* @rpc_arg_count: If > 0 number of RPC parameters to make room for
@@ -147,7 +157,7 @@ struct optee {
struct optee_ffa ffa;
};
struct optee_call_queue call_queue;
- struct optee_wait_queue wait_queue;
+ struct optee_notif notif;
struct optee_supp supp;
struct tee_shm_pool *pool;
unsigned int rpc_arg_count;
@@ -185,8 +195,10 @@ struct optee_call_ctx {
size_t num_entries;
};
-void optee_wait_queue_init(struct optee_wait_queue *wq);
-void optee_wait_queue_exit(struct optee_wait_queue *wq);
+int optee_notif_init(struct optee *optee, u_int max_key);
+void optee_notif_uninit(struct optee *optee);
+int optee_notif_wait(struct optee *optee, u_int key);
+int optee_notif_send(struct optee *optee, u_int key);
u32 optee_supp_thrd_req(struct tee_context *ctx, u32 func, size_t num_params,
struct tee_param *param);
diff --git a/drivers/tee/optee/optee_rpc_cmd.h b/drivers/tee/optee/optee_rpc_cmd.h
index b8275140cef8..f3f06e0994a7 100644
--- a/drivers/tee/optee/optee_rpc_cmd.h
+++ b/drivers/tee/optee/optee_rpc_cmd.h
@@ -28,24 +28,27 @@
#define OPTEE_RPC_CMD_GET_TIME 3
/*
- * Wait queue primitive, helper for secure world to implement a wait queue.
+ * Notification from/to secure world.
*
- * If secure world needs to wait for a secure world mutex it issues a sleep
- * request instead of spinning in secure world. Conversely is a wakeup
- * request issued when a secure world mutex with a thread waiting thread is
- * unlocked.
+ * If secure world needs to wait for something, for instance a mutex, it
+ * does a notification wait request instead of spinning in secure world.
+ * Conversely can a synchronous notification can be sent when a secure
+ * world mutex with a thread waiting thread is unlocked.
*
- * Waiting on a key
- * [in] value[0].a OPTEE_RPC_WAIT_QUEUE_SLEEP
- * [in] value[0].b Wait key
+ * This interface can also be used to wait for a asynchronous notification
+ * which instead is sent via a non-secure interrupt.
*
- * Waking up a key
- * [in] value[0].a OPTEE_RPC_WAIT_QUEUE_WAKEUP
- * [in] value[0].b Wakeup key
+ * Waiting on notification
+ * [in] value[0].a OPTEE_RPC_NOTIFICATION_WAIT
+ * [in] value[0].b notification value
+ *
+ * Sending a synchronous notification
+ * [in] value[0].a OPTEE_RPC_NOTIFICATION_SEND
+ * [in] value[0].b notification value
*/
-#define OPTEE_RPC_CMD_WAIT_QUEUE 4
-#define OPTEE_RPC_WAIT_QUEUE_SLEEP 0
-#define OPTEE_RPC_WAIT_QUEUE_WAKEUP 1
+#define OPTEE_RPC_CMD_NOTIFICATION 4
+#define OPTEE_RPC_NOTIFICATION_WAIT 0
+#define OPTEE_RPC_NOTIFICATION_SEND 1
/*
* Suspend execution
diff --git a/drivers/tee/optee/optee_smc.h b/drivers/tee/optee/optee_smc.h
index 80eb763a8a80..d44a6ae994f8 100644
--- a/drivers/tee/optee/optee_smc.h
+++ b/drivers/tee/optee/optee_smc.h
@@ -107,6 +107,12 @@ struct optee_smc_call_get_os_revision_result {
/*
* Call with struct optee_msg_arg as argument
*
+ * When calling this function normal world has a few responsibilities:
+ * 1. It must be able to handle eventual RPCs
+ * 2. Non-secure interrupts should not be masked
+ * 3. If asynchronous notifications has been negotiated successfully, then
+ * asynchronous notifications should be unmasked during this call.
+ *
* Call register usage:
* a0 SMC Function ID, OPTEE_SMC*CALL_WITH_ARG
* a1 Upper 32 bits of a 64-bit physical pointer to a struct optee_msg_arg
@@ -195,7 +201,8 @@ struct optee_smc_get_shm_config_result {
* Normal return register usage:
* a0 OPTEE_SMC_RETURN_OK
* a1 bitfield of secure world capabilities OPTEE_SMC_SEC_CAP_*
- * a2-7 Preserved
+ * a2 The maximum secure world notification number
+ * a3-7 Preserved
*
* Error return register usage:
* a0 OPTEE_SMC_RETURN_ENOTAVAIL, can't use the capabilities from normal world
@@ -218,6 +225,8 @@ struct optee_smc_get_shm_config_result {
#define OPTEE_SMC_SEC_CAP_VIRTUALIZATION BIT(3)
/* Secure world supports Shared Memory with a NULL reference */
#define OPTEE_SMC_SEC_CAP_MEMREF_NULL BIT(4)
+/* Secure world supports asynchronous notification of normal world */
+#define OPTEE_SMC_SEC_CAP_ASYNC_NOTIF BIT(5)
#define OPTEE_SMC_FUNCID_EXCHANGE_CAPABILITIES 9
#define OPTEE_SMC_EXCHANGE_CAPABILITIES \
@@ -226,8 +235,8 @@ struct optee_smc_get_shm_config_result {
struct optee_smc_exchange_capabilities_result {
unsigned long status;
unsigned long capabilities;
+ unsigned long max_notif_value;
unsigned long reserved0;
- unsigned long reserved1;
};
/*
@@ -320,6 +329,68 @@ struct optee_smc_disable_shm_cache_result {
OPTEE_SMC_FAST_CALL_VAL(OPTEE_SMC_FUNCID_GET_THREAD_COUNT)
/*
+ * Inform OP-TEE that normal world is able to receive asynchronous
+ * notifications.
+ *
+ * Call requests usage:
+ * a0 SMC Function ID, OPTEE_SMC_ENABLE_ASYNC_NOTIF
+ * a1-6 Not used
+ * a7 Hypervisor Client ID register
+ *
+ * Normal return register usage:
+ * a0 OPTEE_SMC_RETURN_OK
+ * a1-7 Preserved
+ *
+ * Not supported return register usage:
+ * a0 OPTEE_SMC_RETURN_ENOTAVAIL
+ * a1-7 Preserved
+ */
+#define OPTEE_SMC_FUNCID_ENABLE_ASYNC_NOTIF 16
+#define OPTEE_SMC_ENABLE_ASYNC_NOTIF \
+ OPTEE_SMC_FAST_CALL_VAL(OPTEE_SMC_FUNCID_ENABLE_ASYNC_NOTIF)
+
+/*
+ * Retrieve a value of notifications pending since the last call of this
+ * function.
+ *
+ * OP-TEE keeps a record of all posted values. When an interrupt is
+ * received which indicates that there are posted values this function
+ * should be called until all pended values have been retrieved. When a
+ * value is retrieved, it's cleared from the record in secure world.
+ *
+ * Call requests usage:
+ * a0 SMC Function ID, OPTEE_SMC_GET_ASYNC_NOTIF_VALUE
+ * a1-6 Not used
+ * a7 Hypervisor Client ID register
+ *
+ * Normal return register usage:
+ * a0 OPTEE_SMC_RETURN_OK
+ * a1 value
+ * a2 Bit[0]: OPTEE_SMC_ASYNC_NOTIF_VALUE_VALID if the value in a1 is
+ * valid, else 0 if no values where pending
+ * a2 Bit[1]: OPTEE_SMC_ASYNC_NOTIF_VALUE_PENDING if another value is
+ * pending, else 0.
+ * Bit[31:2]: MBZ
+ * a3-7 Preserved
+ *
+ * Not supported return register usage:
+ * a0 OPTEE_SMC_RETURN_ENOTAVAIL
+ * a1-7 Preserved
+ */
+#define OPTEE_SMC_ASYNC_NOTIF_VALUE_VALID BIT(0)
+#define OPTEE_SMC_ASYNC_NOTIF_VALUE_PENDING BIT(1)
+
+/*
+ * Notification that OP-TEE expects a yielding call to do some bottom half
+ * work in a driver.
+ */
+#define OPTEE_SMC_ASYNC_NOTIF_VALUE_DO_BOTTOM_HALF 0
+
+#define OPTEE_SMC_FUNCID_GET_ASYNC_NOTIF_VALUE 17
+#define OPTEE_SMC_GET_ASYNC_NOTIF_VALUE \
+ OPTEE_SMC_FAST_CALL_VAL(OPTEE_SMC_FUNCID_GET_ASYNC_NOTIF_VALUE)
+
+/*
* Resume from RPC (for example after processing a foreign interrupt)
*
* Call register usage:
diff --git a/drivers/tee/optee/rpc.c b/drivers/tee/optee/rpc.c
index cd642e340eaf..e69bc6380683 100644
--- a/drivers/tee/optee/rpc.c
+++ b/drivers/tee/optee/rpc.c
@@ -12,23 +12,6 @@
#include "optee_private.h"
#include "optee_rpc_cmd.h"
-struct wq_entry {
- struct list_head link;
- struct completion c;
- u32 key;
-};
-
-void optee_wait_queue_init(struct optee_wait_queue *priv)
-{
- mutex_init(&priv->mu);
- INIT_LIST_HEAD(&priv->db);
-}
-
-void optee_wait_queue_exit(struct optee_wait_queue *priv)
-{
- mutex_destroy(&priv->mu);
-}
-
static void handle_rpc_func_cmd_get_time(struct optee_msg_arg *arg)
{
struct timespec64 ts;
@@ -144,48 +127,6 @@ static void handle_rpc_func_cmd_i2c_transfer(struct tee_context *ctx,
}
#endif
-static struct wq_entry *wq_entry_get(struct optee_wait_queue *wq, u32 key)
-{
- struct wq_entry *w;
-
- mutex_lock(&wq->mu);
-
- list_for_each_entry(w, &wq->db, link)
- if (w->key == key)
- goto out;
-
- w = kmalloc(sizeof(*w), GFP_KERNEL);
- if (w) {
- init_completion(&w->c);
- w->key = key;
- list_add_tail(&w->link, &wq->db);
- }
-out:
- mutex_unlock(&wq->mu);
- return w;
-}
-
-static void wq_sleep(struct optee_wait_queue *wq, u32 key)
-{
- struct wq_entry *w = wq_entry_get(wq, key);
-
- if (w) {
- wait_for_completion(&w->c);
- mutex_lock(&wq->mu);
- list_del(&w->link);
- mutex_unlock(&wq->mu);
- kfree(w);
- }
-}
-
-static void wq_wakeup(struct optee_wait_queue *wq, u32 key)
-{
- struct wq_entry *w = wq_entry_get(wq, key);
-
- if (w)
- complete(&w->c);
-}
-
static void handle_rpc_func_cmd_wq(struct optee *optee,
struct optee_msg_arg *arg)
{
@@ -197,11 +138,13 @@ static void handle_rpc_func_cmd_wq(struct optee *optee,
goto bad;
switch (arg->params[0].u.value.a) {
- case OPTEE_RPC_WAIT_QUEUE_SLEEP:
- wq_sleep(&optee->wait_queue, arg->params[0].u.value.b);
+ case OPTEE_RPC_NOTIFICATION_WAIT:
+ if (optee_notif_wait(optee, arg->params[0].u.value.b))
+ goto bad;
break;
- case OPTEE_RPC_WAIT_QUEUE_WAKEUP:
- wq_wakeup(&optee->wait_queue, arg->params[0].u.value.b);
+ case OPTEE_RPC_NOTIFICATION_SEND:
+ if (optee_notif_send(optee, arg->params[0].u.value.b))
+ goto bad;
break;
default:
goto bad;
@@ -319,7 +262,7 @@ void optee_rpc_cmd(struct tee_context *ctx, struct optee *optee,
case OPTEE_RPC_CMD_GET_TIME:
handle_rpc_func_cmd_get_time(arg);
break;
- case OPTEE_RPC_CMD_WAIT_QUEUE:
+ case OPTEE_RPC_CMD_NOTIFICATION:
handle_rpc_func_cmd_wq(optee, arg);
break;
case OPTEE_RPC_CMD_SUSPEND:
diff --git a/drivers/tee/optee/smc_abi.c b/drivers/tee/optee/smc_abi.c
index 6196d7c3888f..d7c8235c1c42 100644
--- a/drivers/tee/optee/smc_abi.c
+++ b/drivers/tee/optee/smc_abi.c
@@ -8,13 +8,16 @@
#include <linux/arm-smccc.h>
#include <linux/errno.h>
+#include <linux/interrupt.h>
#include <linux/io.h>
-#include <linux/sched.h>
+#include <linux/irqdomain.h>
#include <linux/mm.h>
#include <linux/module.h>
#include <linux/of.h>
+#include <linux/of_irq.h>
#include <linux/of_platform.h>
#include <linux/platform_device.h>
+#include <linux/sched.h>
#include <linux/slab.h>
#include <linux/string.h>
#include <linux/tee_drv.h>
@@ -34,7 +37,8 @@
* 2. Low level support functions to register shared memory in secure world
* 3. Dynamic shared memory pool based on alloc_pages()
* 4. Do a normal scheduled call into secure world
- * 5. Driver initialization.
+ * 5. Asynchronous notification
+ * 6. Driver initialization.
*/
#define OPTEE_SHM_NUM_PRIV_PAGES CONFIG_OPTEE_SHM_NUM_PRIV_PAGES
@@ -875,10 +879,137 @@ static int optee_smc_do_call_with_arg(struct tee_context *ctx,
return rc;
}
+static int simple_call_with_arg(struct tee_context *ctx, u32 cmd)
+{
+ struct optee_msg_arg *msg_arg;
+ struct tee_shm *shm;
+
+ shm = optee_get_msg_arg(ctx, 0, &msg_arg);
+ if (IS_ERR(shm))
+ return PTR_ERR(shm);
+
+ msg_arg->cmd = cmd;
+ optee_smc_do_call_with_arg(ctx, shm);
+
+ tee_shm_free(shm);
+ return 0;
+}
+
+static int optee_smc_do_bottom_half(struct tee_context *ctx)
+{
+ return simple_call_with_arg(ctx, OPTEE_MSG_CMD_DO_BOTTOM_HALF);
+}
+
+static int optee_smc_stop_async_notif(struct tee_context *ctx)
+{
+ return simple_call_with_arg(ctx, OPTEE_MSG_CMD_STOP_ASYNC_NOTIF);
+}
+
/*
- * 5. Driver initialization
+ * 5. Asynchronous notification
+ */
+
+static u32 get_async_notif_value(optee_invoke_fn *invoke_fn, bool *value_valid,
+ bool *value_pending)
+{
+ struct arm_smccc_res res;
+
+ invoke_fn(OPTEE_SMC_GET_ASYNC_NOTIF_VALUE, 0, 0, 0, 0, 0, 0, 0, &res);
+
+ if (res.a0)
+ return 0;
+ *value_valid = (res.a2 & OPTEE_SMC_ASYNC_NOTIF_VALUE_VALID);
+ *value_pending = (res.a2 & OPTEE_SMC_ASYNC_NOTIF_VALUE_PENDING);
+ return res.a1;
+}
+
+static irqreturn_t notif_irq_handler(int irq, void *dev_id)
+{
+ struct optee *optee = dev_id;
+ bool do_bottom_half = false;
+ bool value_valid;
+ bool value_pending;
+ u32 value;
+
+ do {
+ value = get_async_notif_value(optee->smc.invoke_fn,
+ &value_valid, &value_pending);
+ if (!value_valid)
+ break;
+
+ if (value == OPTEE_SMC_ASYNC_NOTIF_VALUE_DO_BOTTOM_HALF)
+ do_bottom_half = true;
+ else
+ optee_notif_send(optee, value);
+ } while (value_pending);
+
+ if (do_bottom_half)
+ return IRQ_WAKE_THREAD;
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t notif_irq_thread_fn(int irq, void *dev_id)
+{
+ struct optee *optee = dev_id;
+
+ optee_smc_do_bottom_half(optee->notif.ctx);
+
+ return IRQ_HANDLED;
+}
+
+static int optee_smc_notif_init_irq(struct optee *optee, u_int irq)
+{
+ struct tee_context *ctx;
+ int rc;
+
+ ctx = teedev_open(optee->teedev);
+ if (IS_ERR(ctx))
+ return PTR_ERR(ctx);
+
+ optee->notif.ctx = ctx;
+ rc = request_threaded_irq(irq, notif_irq_handler,
+ notif_irq_thread_fn,
+ 0, "optee_notification", optee);
+ if (rc)
+ goto err_close_ctx;
+
+ optee->smc.notif_irq = irq;
+
+ return 0;
+
+err_close_ctx:
+ teedev_close_context(optee->notif.ctx);
+ optee->notif.ctx = NULL;
+
+ return rc;
+}
+
+static void optee_smc_notif_uninit_irq(struct optee *optee)
+{
+ if (optee->notif.ctx) {
+ optee_smc_stop_async_notif(optee->notif.ctx);
+ if (optee->smc.notif_irq) {
+ free_irq(optee->smc.notif_irq, optee);
+ irq_dispose_mapping(optee->smc.notif_irq);
+ }
+
+ /*
+ * The thread normally working with optee->notif.ctx was
+ * stopped with free_irq() above.
+ *
+ * Note we're not using teedev_close_context() or
+ * tee_client_close_context() since we have already called
+ * tee_device_put() while initializing to avoid a circular
+ * reference counting.
+ */
+ teedev_close_context(optee->notif.ctx);
+ }
+}
+
+/*
+ * 6. Driver initialization
*
- * During driver inititialization is secure world probed to find out which
+ * During driver initialization is secure world probed to find out which
* features it supports so the driver can be initialized with a matching
* configuration. This involves for instance support for dynamic shared
* memory instead of a static memory carvout.
@@ -950,6 +1081,17 @@ static const struct optee_ops optee_ops = {
.from_msg_param = optee_from_msg_param,
};
+static int enable_async_notif(optee_invoke_fn *invoke_fn)
+{
+ struct arm_smccc_res res;
+
+ invoke_fn(OPTEE_SMC_ENABLE_ASYNC_NOTIF, 0, 0, 0, 0, 0, 0, 0, &res);
+
+ if (res.a0)
+ return -EINVAL;
+ return 0;
+}
+
static bool optee_msg_api_uid_is_optee_api(optee_invoke_fn *invoke_fn)
{
struct arm_smccc_res res;
@@ -999,7 +1141,7 @@ static bool optee_msg_api_revision_is_compatible(optee_invoke_fn *invoke_fn)
}
static bool optee_msg_exchange_capabilities(optee_invoke_fn *invoke_fn,
- u32 *sec_caps)
+ u32 *sec_caps, u32 *max_notif_value)
{
union {
struct arm_smccc_res smccc;
@@ -1022,6 +1164,11 @@ static bool optee_msg_exchange_capabilities(optee_invoke_fn *invoke_fn,
return false;
*sec_caps = res.result.capabilities;
+ if (*sec_caps & OPTEE_SMC_SEC_CAP_ASYNC_NOTIF)
+ *max_notif_value = res.result.max_notif_value;
+ else
+ *max_notif_value = OPTEE_DEFAULT_MAX_NOTIF_VALUE;
+
return true;
}
@@ -1186,6 +1333,8 @@ static int optee_smc_remove(struct platform_device *pdev)
*/
optee_disable_shm_cache(optee);
+ optee_smc_notif_uninit_irq(optee);
+
optee_remove_common(optee);
if (optee->smc.memremaped_shm)
@@ -1215,6 +1364,7 @@ static int optee_probe(struct platform_device *pdev)
struct optee *optee = NULL;
void *memremaped_shm = NULL;
struct tee_device *teedev;
+ u32 max_notif_value;
u32 sec_caps;
int rc;
@@ -1234,7 +1384,8 @@ static int optee_probe(struct platform_device *pdev)
return -EINVAL;
}
- if (!optee_msg_exchange_capabilities(invoke_fn, &sec_caps)) {
+ if (!optee_msg_exchange_capabilities(invoke_fn, &sec_caps,
+ &max_notif_value)) {
pr_warn("capabilities mismatch\n");
return -EINVAL;
}
@@ -1257,7 +1408,7 @@ static int optee_probe(struct platform_device *pdev)
optee = kzalloc(sizeof(*optee), GFP_KERNEL);
if (!optee) {
rc = -ENOMEM;
- goto err;
+ goto err_free_pool;
}
optee->ops = &optee_ops;
@@ -1267,32 +1418,55 @@ static int optee_probe(struct platform_device *pdev)
teedev = tee_device_alloc(&optee_clnt_desc, NULL, pool, optee);
if (IS_ERR(teedev)) {
rc = PTR_ERR(teedev);
- goto err;
+ goto err_free_optee;
}
optee->teedev = teedev;
teedev = tee_device_alloc(&optee_supp_desc, NULL, pool, optee);
if (IS_ERR(teedev)) {
rc = PTR_ERR(teedev);
- goto err;
+ goto err_unreg_teedev;
}
optee->supp_teedev = teedev;
rc = tee_device_register(optee->teedev);
if (rc)
- goto err;
+ goto err_unreg_supp_teedev;
rc = tee_device_register(optee->supp_teedev);
if (rc)
- goto err;
+ goto err_unreg_supp_teedev;
mutex_init(&optee->call_queue.mutex);
INIT_LIST_HEAD(&optee->call_queue.waiters);
- optee_wait_queue_init(&optee->wait_queue);
optee_supp_init(&optee->supp);
optee->smc.memremaped_shm = memremaped_shm;
optee->pool = pool;
+ platform_set_drvdata(pdev, optee);
+ rc = optee_notif_init(optee, max_notif_value);
+ if (rc)
+ goto err_supp_uninit;
+
+ if (sec_caps & OPTEE_SMC_SEC_CAP_ASYNC_NOTIF) {
+ unsigned int irq;
+
+ rc = platform_get_irq(pdev, 0);
+ if (rc < 0) {
+ pr_err("platform_get_irq: ret %d\n", rc);
+ goto err_notif_uninit;
+ }
+ irq = rc;
+
+ rc = optee_smc_notif_init_irq(optee, irq);
+ if (rc) {
+ irq_dispose_mapping(irq);
+ goto err_notif_uninit;
+ }
+ enable_async_notif(optee->smc.invoke_fn);
+ pr_info("Asynchronous notifications enabled\n");
+ }
+
/*
* Ensure that there are no pre-existing shm objects before enabling
* the shm cache so that there's no chance of receiving an invalid
@@ -1307,29 +1481,30 @@ static int optee_probe(struct platform_device *pdev)
if (optee->smc.sec_caps & OPTEE_SMC_SEC_CAP_DYNAMIC_SHM)
pr_info("dynamic shared memory is enabled\n");
- platform_set_drvdata(pdev, optee);
-
rc = optee_enumerate_devices(PTA_CMD_GET_DEVICES);
- if (rc) {
- optee_smc_remove(pdev);
- return rc;
- }
+ if (rc)
+ goto err_disable_shm_cache;
pr_info("initialized driver\n");
return 0;
-err:
- if (optee) {
- /*
- * tee_device_unregister() is safe to call even if the
- * devices hasn't been registered with
- * tee_device_register() yet.
- */
- tee_device_unregister(optee->supp_teedev);
- tee_device_unregister(optee->teedev);
- kfree(optee);
- }
- if (pool)
- tee_shm_pool_free(pool);
+
+err_disable_shm_cache:
+ optee_disable_shm_cache(optee);
+ optee_smc_notif_uninit_irq(optee);
+ optee_unregister_devices();
+err_notif_uninit:
+ optee_notif_uninit(optee);
+err_supp_uninit:
+ optee_supp_uninit(&optee->supp);
+ mutex_destroy(&optee->call_queue.mutex);
+err_unreg_supp_teedev:
+ tee_device_unregister(optee->supp_teedev);
+err_unreg_teedev:
+ tee_device_unregister(optee->teedev);
+err_free_optee:
+ kfree(optee);
+err_free_pool:
+ tee_shm_pool_free(pool);
if (memremaped_shm)
memunmap(memremaped_shm);
return rc;
diff --git a/drivers/tee/tee_core.c b/drivers/tee/tee_core.c
index 2b37bc408fc3..3fc426dad2df 100644
--- a/drivers/tee/tee_core.c
+++ b/drivers/tee/tee_core.c
@@ -43,7 +43,7 @@ static DEFINE_SPINLOCK(driver_lock);
static struct class *tee_class;
static dev_t tee_devt;
-static struct tee_context *teedev_open(struct tee_device *teedev)
+struct tee_context *teedev_open(struct tee_device *teedev)
{
int rc;
struct tee_context *ctx;
@@ -70,6 +70,7 @@ err:
return ERR_PTR(rc);
}
+EXPORT_SYMBOL_GPL(teedev_open);
void teedev_ctx_get(struct tee_context *ctx)
{
@@ -96,11 +97,14 @@ void teedev_ctx_put(struct tee_context *ctx)
kref_put(&ctx->refcount, teedev_ctx_release);
}
-static void teedev_close_context(struct tee_context *ctx)
+void teedev_close_context(struct tee_context *ctx)
{
- tee_device_put(ctx->teedev);
+ struct tee_device *teedev = ctx->teedev;
+
teedev_ctx_put(ctx);
+ tee_device_put(teedev);
}
+EXPORT_SYMBOL_GPL(teedev_close_context);
static int tee_open(struct inode *inode, struct file *filp)
{
diff --git a/drivers/thermal/intel/int340x_thermal/Kconfig b/drivers/thermal/intel/int340x_thermal/Kconfig
index 45c31f3d6054..5d046de96a5d 100644
--- a/drivers/thermal/intel/int340x_thermal/Kconfig
+++ b/drivers/thermal/intel/int340x_thermal/Kconfig
@@ -5,12 +5,12 @@
config INT340X_THERMAL
tristate "ACPI INT340X thermal drivers"
- depends on X86 && ACPI && PCI
+ depends on X86_64 && ACPI && PCI
select THERMAL_GOV_USER_SPACE
select ACPI_THERMAL_REL
select ACPI_FAN
select INTEL_SOC_DTS_IOSF_CORE
- select PROC_THERMAL_MMIO_RAPL if X86_64 && POWERCAP
+ select PROC_THERMAL_MMIO_RAPL if POWERCAP
help
Newer laptops and tablets that use ACPI may have thermal sensors and
other devices with thermal control capabilities outside the core
diff --git a/drivers/thermal/thermal_core.c b/drivers/thermal/thermal_core.c
index 648829ab79ff..82654dc8382b 100644
--- a/drivers/thermal/thermal_core.c
+++ b/drivers/thermal/thermal_core.c
@@ -421,6 +421,8 @@ static void thermal_zone_device_init(struct thermal_zone_device *tz)
{
struct thermal_instance *pos;
tz->temperature = THERMAL_TEMP_INVALID;
+ tz->prev_low_trip = -INT_MAX;
+ tz->prev_high_trip = INT_MAX;
list_for_each_entry(pos, &tz->thermal_instances, tz_node)
pos->initialized = false;
}
diff --git a/drivers/usb/chipidea/ci_hdrc_tegra.c b/drivers/usb/chipidea/ci_hdrc_tegra.c
index 60361141ac04..a72a9474afea 100644
--- a/drivers/usb/chipidea/ci_hdrc_tegra.c
+++ b/drivers/usb/chipidea/ci_hdrc_tegra.c
@@ -7,6 +7,7 @@
#include <linux/io.h>
#include <linux/module.h>
#include <linux/of_device.h>
+#include <linux/pm_runtime.h>
#include <linux/reset.h>
#include <linux/usb.h>
@@ -15,6 +16,8 @@
#include <linux/usb/of.h>
#include <linux/usb/phy.h>
+#include <soc/tegra/common.h>
+
#include "../host/ehci.h"
#include "ci.h"
@@ -278,6 +281,8 @@ static int tegra_usb_probe(struct platform_device *pdev)
if (!usb)
return -ENOMEM;
+ platform_set_drvdata(pdev, usb);
+
soc = of_device_get_match_data(&pdev->dev);
if (!soc) {
dev_err(&pdev->dev, "failed to match OF data\n");
@@ -296,11 +301,14 @@ static int tegra_usb_probe(struct platform_device *pdev)
return err;
}
- err = clk_prepare_enable(usb->clk);
- if (err < 0) {
- dev_err(&pdev->dev, "failed to enable clock: %d\n", err);
+ err = devm_tegra_core_dev_init_opp_table_common(&pdev->dev);
+ if (err)
+ return err;
+
+ pm_runtime_enable(&pdev->dev);
+ err = pm_runtime_resume_and_get(&pdev->dev);
+ if (err)
return err;
- }
if (device_property_present(&pdev->dev, "nvidia,needs-double-reset"))
usb->needs_double_reset = true;
@@ -320,8 +328,6 @@ static int tegra_usb_probe(struct platform_device *pdev)
if (err)
goto fail_power_off;
- platform_set_drvdata(pdev, usb);
-
/* setup and register ChipIdea HDRC device */
usb->soc = soc;
usb->data.name = "tegra-usb";
@@ -350,7 +356,9 @@ static int tegra_usb_probe(struct platform_device *pdev)
phy_shutdown:
usb_phy_shutdown(usb->phy);
fail_power_off:
- clk_disable_unprepare(usb->clk);
+ pm_runtime_put_sync_suspend(&pdev->dev);
+ pm_runtime_force_suspend(&pdev->dev);
+
return err;
}
@@ -360,15 +368,46 @@ static int tegra_usb_remove(struct platform_device *pdev)
ci_hdrc_remove_device(usb->dev);
usb_phy_shutdown(usb->phy);
+
+ pm_runtime_put_sync_suspend(&pdev->dev);
+ pm_runtime_force_suspend(&pdev->dev);
+
+ return 0;
+}
+
+static int __maybe_unused tegra_usb_runtime_resume(struct device *dev)
+{
+ struct tegra_usb *usb = dev_get_drvdata(dev);
+ int err;
+
+ err = clk_prepare_enable(usb->clk);
+ if (err < 0) {
+ dev_err(dev, "failed to enable clock: %d\n", err);
+ return err;
+ }
+
+ return 0;
+}
+
+static int __maybe_unused tegra_usb_runtime_suspend(struct device *dev)
+{
+ struct tegra_usb *usb = dev_get_drvdata(dev);
+
clk_disable_unprepare(usb->clk);
return 0;
}
+static const struct dev_pm_ops tegra_usb_pm = {
+ SET_RUNTIME_PM_OPS(tegra_usb_runtime_suspend, tegra_usb_runtime_resume,
+ NULL)
+};
+
static struct platform_driver tegra_usb_driver = {
.driver = {
.name = "tegra-usb",
.of_match_table = tegra_usb_of_match,
+ .pm = &tegra_usb_pm,
},
.probe = tegra_usb_probe,
.remove = tegra_usb_remove,
diff --git a/drivers/video/console/sticon.c b/drivers/video/console/sticon.c
index 1b451165311c..40496e9e9b43 100644
--- a/drivers/video/console/sticon.c
+++ b/drivers/video/console/sticon.c
@@ -332,13 +332,13 @@ static u8 sticon_build_attr(struct vc_data *conp, u8 color,
bool blink, bool underline, bool reverse,
bool italic)
{
- u8 attr = ((color & 0x70) >> 1) | ((color & 7));
+ u8 fg = color & 7;
+ u8 bg = (color & 0x70) >> 4;
- if (reverse) {
- color = ((color >> 3) & 0x7) | ((color & 0x7) << 3);
- }
-
- return attr;
+ if (reverse)
+ return (fg << 3) | bg;
+ else
+ return (bg << 3) | fg;
}
static void sticon_invert_region(struct vc_data *conp, u16 *p, int count)
diff --git a/drivers/video/fbdev/efifb.c b/drivers/video/fbdev/efifb.c
index edca3703b964..ea42ba6445b2 100644
--- a/drivers/video/fbdev/efifb.c
+++ b/drivers/video/fbdev/efifb.c
@@ -351,6 +351,17 @@ static int efifb_probe(struct platform_device *dev)
char *option = NULL;
efi_memory_desc_t md;
+ /*
+ * Generic drivers must not be registered if a framebuffer exists.
+ * If a native driver was probed, the display hardware was already
+ * taken and attempting to use the system framebuffer is dangerous.
+ */
+ if (num_registered_fb > 0) {
+ dev_err(&dev->dev,
+ "efifb: a framebuffer is already registered\n");
+ return -EINVAL;
+ }
+
if (screen_info.orig_video_isVGA != VIDEO_TYPE_EFI || pci_dev_disabled)
return -ENODEV;
diff --git a/drivers/video/fbdev/simplefb.c b/drivers/video/fbdev/simplefb.c
index 62f0ded70681..b63074fd892e 100644
--- a/drivers/video/fbdev/simplefb.c
+++ b/drivers/video/fbdev/simplefb.c
@@ -407,6 +407,17 @@ static int simplefb_probe(struct platform_device *pdev)
struct simplefb_par *par;
struct resource *mem;
+ /*
+ * Generic drivers must not be registered if a framebuffer exists.
+ * If a native driver was probed, the display hardware was already
+ * taken and attempting to use the system framebuffer is dangerous.
+ */
+ if (num_registered_fb > 0) {
+ dev_err(&pdev->dev,
+ "simplefb: a framebuffer is already registered\n");
+ return -EINVAL;
+ }
+
if (fb_get_options("simplefb", NULL))
return -ENODEV;